From f427ee49d309d8fc33ebf3042c3a775f2f530ded Mon Sep 17 00:00:00 2001 From: Apple Date: Thu, 19 Nov 2020 01:08:12 +0000 Subject: [PATCH] xnu-7195.50.7.100.1.tar.gz --- .gitignore | 58 +- EXTERNAL_HEADERS/acpi/Acpi.h | 462 + EXTERNAL_HEADERS/acpi/Acpi_v1.h | 781 + EXTERNAL_HEADERS/architecture/arm/Makefile | 3 +- EXTERNAL_HEADERS/architecture/arm/arm_neon.h | 74267 ---------------- EXTERNAL_HEADERS/corecrypto/cc.h | 43 +- EXTERNAL_HEADERS/corecrypto/cc_config.h | 156 +- EXTERNAL_HEADERS/corecrypto/cc_debug.h | 75 - EXTERNAL_HEADERS/corecrypto/cc_error.h | 40 +- EXTERNAL_HEADERS/corecrypto/cc_fault_canary.h | 30 + EXTERNAL_HEADERS/corecrypto/cc_macros.h | 44 +- EXTERNAL_HEADERS/corecrypto/cc_priv.h | 254 +- .../corecrypto/cc_runtime_config.h | 86 +- EXTERNAL_HEADERS/corecrypto/ccaes.h | 37 +- EXTERNAL_HEADERS/corecrypto/ccasn1.h | 15 +- .../corecrypto/ccchacha20poly1305.h | 16 +- EXTERNAL_HEADERS/corecrypto/cccmac.h | 15 +- EXTERNAL_HEADERS/corecrypto/ccder.h | 317 - EXTERNAL_HEADERS/corecrypto/ccdes.h | 15 +- EXTERNAL_HEADERS/corecrypto/ccdigest.h | 34 +- EXTERNAL_HEADERS/corecrypto/ccdigest_priv.h | 15 +- EXTERNAL_HEADERS/corecrypto/ccdrbg.h | 15 +- EXTERNAL_HEADERS/corecrypto/ccdrbg_impl.h | 15 +- EXTERNAL_HEADERS/corecrypto/cchmac.h | 15 +- EXTERNAL_HEADERS/corecrypto/cckprng.h | 60 +- EXTERNAL_HEADERS/corecrypto/ccmd4.h | 26 + EXTERNAL_HEADERS/corecrypto/ccmd5.h | 26 - EXTERNAL_HEADERS/corecrypto/ccmode.h | 19 +- EXTERNAL_HEADERS/corecrypto/ccmode_factory.h | 137 - EXTERNAL_HEADERS/corecrypto/ccmode_impl.h | 16 +- EXTERNAL_HEADERS/corecrypto/ccmode_siv.h | 170 +- EXTERNAL_HEADERS/corecrypto/ccmode_siv_hmac.h | 26 +- EXTERNAL_HEADERS/corecrypto/ccn.h | 93 +- EXTERNAL_HEADERS/corecrypto/ccpad.h | 15 +- EXTERNAL_HEADERS/corecrypto/ccpbkdf2.h | 42 - EXTERNAL_HEADERS/corecrypto/ccrc4.h | 33 - EXTERNAL_HEADERS/corecrypto/ccrng.h | 17 +- EXTERNAL_HEADERS/corecrypto/ccrsa.h | 561 +- EXTERNAL_HEADERS/corecrypto/ccsha1.h | 16 +- EXTERNAL_HEADERS/corecrypto/ccsha2.h | 28 +- EXTERNAL_HEADERS/corecrypto/cczp.h | 30 +- EXTERNAL_HEADERS/corecrypto/fipspost_trace.h | 15 +- EXTERNAL_HEADERS/img4/api.h | 125 +- EXTERNAL_HEADERS/img4/chip.h | 490 + EXTERNAL_HEADERS/img4/environment.h | 180 - EXTERNAL_HEADERS/img4/firmware.h | 649 + EXTERNAL_HEADERS/img4/image.h | 164 + EXTERNAL_HEADERS/img4/img4.h | 578 - EXTERNAL_HEADERS/img4/nonce.h | 36 +- EXTERNAL_HEADERS/img4/object.h | 68 + EXTERNAL_HEADERS/img4/payload.h | 192 - EXTERNAL_HEADERS/img4/runtime.h | 680 + EXTERNAL_HEADERS/mach-o/Makefile | 1 + EXTERNAL_HEADERS/mach-o/fixup-chains.h | 257 + EXTERNAL_HEADERS/mach-o/loader.h | 18 +- EXTERNAL_HEADERS/ptrauth.h | 54 +- Makefile | 119 +- README.md | 21 + SETUP/config/Makefile | 10 +- SETUP/config/doconf | 27 +- SETUP/decomment/Makefile | 6 +- SETUP/installfile/Makefile | 6 +- SETUP/installfile/installfile.c | 2 +- SETUP/json_compilation_db/Makefile | 6 +- SETUP/kextsymboltool/Makefile | 6 +- SETUP/replacecontents/Makefile | 6 +- SETUP/setsegname/Makefile | 6 +- bsd/Makefile | 13 + bsd/arm/Makefile | 4 + bsd/arm/_param.h | 4 +- bsd/arm/vmparam.h | 20 +- bsd/bsm/audit.h | 23 +- bsd/bsm/audit_kevents.h | 9 +- bsd/bsm/audit_record.h | 234 +- bsd/conf/Makefile | 2 +- bsd/conf/Makefile.arm | 11 +- bsd/conf/Makefile.arm64 | 1 + bsd/conf/Makefile.template | 711 +- bsd/conf/Makefile.x86_64 | 4 - bsd/conf/files | 78 +- bsd/conf/param.c | 17 +- bsd/crypto/Makefile | 4 +- bsd/crypto/{rc4 => entropy}/Makefile | 10 +- bsd/crypto/entropy/diag_entropy_sysctl.c | 56 + .../crypto/entropy/diag_entropy_sysctl.h | 12 +- bsd/crypto/rc4/rc4.c | 102 - bsd/crypto/rc4/rc4.h | 52 - bsd/dev/Makefile | 5 + bsd/dev/arm/conf.c | 4 +- bsd/dev/arm/dtrace_isa.c | 45 +- bsd/dev/arm/fasttrap_isa.c | 8 +- bsd/dev/arm/kern_machdep.c | 28 +- bsd/dev/arm/km.c | 15 +- bsd/dev/arm/munge.c | 42 + bsd/dev/arm/stubs.c | 2 +- bsd/dev/arm/sysctl.c | 14 + bsd/dev/arm/systemcalls.c | 214 +- bsd/dev/arm/unix_signal.c | 79 +- bsd/dev/arm64/conf.c | 4 +- bsd/dev/arm64/disassembler.c | 6 +- bsd/dev/arm64/dtrace_isa.c | 55 +- bsd/dev/arm64/fasttrap_isa.c | 13 +- bsd/dev/arm64/fbt_arm.c | 113 +- bsd/dev/arm64/sysctl.c | 168 +- bsd/dev/dtrace/Makefile | 5 + bsd/dev/dtrace/blist.c | 3 + bsd/dev/dtrace/blist.h | 4 + bsd/dev/dtrace/dtrace.c | 314 +- bsd/dev/dtrace/dtrace_glue.c | 75 +- bsd/dev/dtrace/dtrace_subr.c | 45 +- bsd/dev/dtrace/fasttrap.c | 152 +- bsd/dev/dtrace/fbt.c | 43 +- bsd/dev/dtrace/fbt_blacklist.c | 6 +- bsd/dev/dtrace/lockprof.c | 176 +- bsd/dev/dtrace/lockstat.c | 140 +- bsd/dev/dtrace/profile_prvd.c | 32 +- bsd/dev/dtrace/scripts/Makefile | 38 +- bsd/dev/dtrace/scripts/io.d | 48 +- bsd/dev/dtrace/scripts/log_unnest_badness.d | 2 +- bsd/dev/dtrace/scripts/vm_object_ownership.d | 32 + bsd/dev/dtrace/scripts/vmx_compat.d | 175 + bsd/dev/dtrace/sdt.c | 237 +- bsd/dev/dtrace/sdt_subr.c | 13 + bsd/dev/dtrace/systrace.c | 40 +- bsd/dev/i386/conf.c | 4 +- bsd/dev/i386/dis_tables.c | 27 +- bsd/dev/i386/dtrace_isa.c | 88 +- bsd/dev/i386/dtrace_subr_x86.c | 4 +- bsd/dev/i386/fasttrap_isa.c | 12 +- bsd/dev/i386/fbt_x86.c | 4 +- bsd/dev/i386/kern_machdep.c | 8 +- bsd/dev/i386/sysctl.c | 68 +- bsd/dev/i386/systemcalls.c | 6 +- bsd/dev/i386/unix_signal.c | 5 +- bsd/dev/mem.c | 4 +- bsd/dev/memdev.c | 12 +- bsd/dev/monotonic.c | 2 +- bsd/dev/munge.c | 45 + bsd/dev/random/randomdev.c | 58 +- bsd/dev/unix_startup.c | 26 +- bsd/dev/vn/shadow.c | 63 +- bsd/dev/vn/shadow.h | 10 +- bsd/dev/vn/vn.c | 69 +- bsd/i386/Makefile | 2 +- bsd/i386/_param.h | 4 +- bsd/kern/bsd_init.c | 514 +- bsd/kern/bsd_stubs.c | 35 +- bsd/kern/chunklist.c | 182 +- bsd/kern/chunklist.h | 4 +- bsd/kern/decmpfs.c | 310 +- bsd/kern/imageboot.c | 520 +- bsd/kern/kdebug.c | 687 +- bsd/kern/kern_acct.c | 18 +- bsd/kern/kern_aio.c | 1827 +- bsd/kern/kern_asl.c | 15 +- bsd/kern/kern_authorization.c | 6 +- bsd/kern/kern_backtrace.c | 11 +- bsd/kern/kern_clock.c | 4 +- bsd/kern/kern_control.c | 40 +- bsd/kern/kern_core.c | 67 +- bsd/kern/kern_credential.c | 104 +- bsd/kern/kern_cs.c | 397 +- bsd/kern/kern_csr.c | 232 +- bsd/kern/kern_descrip.c | 2412 +- bsd/kern/kern_event.c | 412 +- bsd/kern/kern_exec.c | 962 +- bsd/kern/kern_exit.c | 321 +- bsd/kern/kern_fork.c | 161 +- bsd/kern/kern_guarded.c | 76 +- bsd/kern/kern_kpc.c | 5 +- bsd/kern/kern_ktrace.c | 2 +- bsd/kern/kern_lockf.c | 9 +- bsd/kern/kern_malloc.c | 753 +- bsd/kern/kern_memorystatus.c | 866 +- bsd/kern/kern_memorystatus_freeze.c | 363 +- bsd/kern/kern_memorystatus_notify.c | 312 +- bsd/kern/kern_mib.c | 456 +- bsd/kern/kern_mman.c | 187 +- bsd/kern/kern_newsysctl.c | 102 +- bsd/kern/kern_ntptime.c | 54 +- bsd/kern/kern_persona.c | 66 +- bsd/kern/kern_physio.c | 19 +- bsd/kern/kern_proc.c | 342 +- bsd/kern/kern_prot.c | 64 +- bsd/kern/kern_resource.c | 695 +- bsd/kern/kern_shutdown.c | 12 +- bsd/kern/kern_sig.c | 101 +- bsd/kern/kern_subr.c | 99 +- bsd/kern/kern_symfile.c | 13 +- bsd/kern/kern_synch.c | 8 +- bsd/kern/kern_sysctl.c | 966 +- bsd/kern/kern_time.c | 18 +- bsd/kern/kern_xxx.c | 42 +- bsd/kern/kpi_mbuf.c | 43 - bsd/kern/kpi_socket.c | 37 +- bsd/kern/mach_fat.c | 35 +- bsd/kern/mach_fat.h | 4 +- bsd/kern/mach_loader.c | 593 +- bsd/kern/mach_loader.h | 7 +- bsd/kern/mach_process.c | 55 +- bsd/kern/makekdebugevents.py | 38 - bsd/kern/makesyscalls.sh | 14 +- bsd/kern/mcache.c | 44 +- bsd/kern/netboot.c | 53 +- bsd/kern/policy_check.c | 143 +- bsd/kern/posix_sem.c | 115 +- bsd/kern/posix_shm.c | 22 +- bsd/kern/proc_info.c | 432 +- bsd/kern/process_policy.c | 138 +- bsd/kern/qsort.c | 103 +- bsd/kern/socket_info.c | 22 +- bsd/kern/subr_eventhandler.c | 10 +- bsd/kern/subr_log.c | 267 +- bsd/kern/subr_prf.c | 62 +- bsd/kern/subr_sbuf.c | 2346 +- bsd/kern/subr_xxx.c | 19 - bsd/kern/sys_coalition.c | 32 +- bsd/kern/sys_eventlink.c | 88 + bsd/kern/sys_generic.c | 1562 +- bsd/kern/sys_persona.c | 20 +- bsd/kern/sys_pipe.c | 117 +- bsd/kern/sys_reason.c | 107 +- bsd/kern/sys_socket.c | 44 +- bsd/kern/sys_ulock.c | 89 +- bsd/kern/sys_work_interval.c | 24 + bsd/kern/syscalls.master | 693 +- bsd/kern/sysv_ipc.c | 4 +- bsd/kern/sysv_msg.c | 2 +- bsd/kern/sysv_sem.c | 6 +- bsd/kern/sysv_shm.c | 40 +- bsd/kern/trace_codes | 319 +- bsd/kern/tty.c | 144 +- bsd/kern/tty_compat.c | 96 +- bsd/kern/tty_dev.c | 33 +- bsd/kern/tty_dev.h | 3 + bsd/kern/tty_ptmx.c | 35 +- bsd/kern/tty_subr.c | 63 +- bsd/kern/ubc_subr.c | 631 +- bsd/kern/uipc_domain.c | 8 +- bsd/kern/uipc_mbuf.c | 118 +- bsd/kern/uipc_mbuf2.c | 35 +- bsd/kern/uipc_socket.c | 409 +- bsd/kern/uipc_socket2.c | 400 +- bsd/kern/uipc_syscalls.c | 212 +- bsd/kern/uipc_usrreq.c | 438 +- bsd/kern/vsock_domain.c | 1433 + bsd/libkern/libkern.h | 6 +- bsd/libkern/scanc.c | 6 +- bsd/libkern/skpc.c | 76 - bsd/libkern/url_encode.c | 2 +- bsd/machine/Makefile | 7 +- bsd/machine/_param.h | 4 +- bsd/machine/exec.h | 6 +- bsd/machine/machine_types.modulemap | 12 + bsd/machine/param.h | 4 +- bsd/man/man2/Makefile | 2 + bsd/man/man2/chflags.2 | 4 +- bsd/man/man2/fcntl.2 | 8 +- bsd/man/man2/getattrlist.2 | 28 +- bsd/man/man2/i386_get_ldt.2 | 17 +- bsd/man/man2/mmap.2 | 129 +- bsd/man/man2/open.2 | 16 + bsd/man/man2/posix_spawn.2 | 4 + bsd/man/man2/preadv.2 | 1 + bsd/man/man2/pwritev.2 | 1 + bsd/man/man2/read.2 | 41 +- bsd/man/man2/select.2 | 18 +- bsd/man/man2/socket.2 | 3 +- bsd/man/man2/write.2 | 41 +- bsd/man/man3/Makefile | 2 + bsd/man/man3/posix_spawnattr_init.3 | 1 + bsd/man/man3/posix_spawnattr_setarchpref_np.3 | 185 + bsd/man/man3/posix_spawnattr_setbinpref_np.3 | 17 +- bsd/man/man4/Makefile | 4 +- bsd/man/man4/divert.4 | 168 - bsd/man/man4/inet6.4 | 1 - bsd/man/man4/netintro.4 | 24 +- bsd/man/man4/vsock.4 | 205 + bsd/miscfs/Makefile | 6 +- bsd/miscfs/bindfs/Makefile | 22 + bsd/miscfs/bindfs/bind_subr.c | 411 + bsd/miscfs/bindfs/bind_vfsops.c | 559 + bsd/miscfs/bindfs/bind_vnops.c | 701 + bsd/miscfs/bindfs/bindfs.h | 162 + bsd/miscfs/devfs/devfs.h | 2 + bsd/miscfs/devfs/devfs_fdesc_support.c | 41 +- bsd/miscfs/devfs/devfs_tree.c | 12 +- bsd/miscfs/devfs/devfs_vfsops.c | 2 +- bsd/miscfs/devfs/devfs_vnops.c | 14 +- bsd/miscfs/devfs/devfsdefs.h | 4 +- bsd/miscfs/devfs/fdesc.h | 2 +- bsd/miscfs/fifofs/fifo_vnops.c | 12 +- bsd/miscfs/nullfs/null_subr.c | 43 +- bsd/miscfs/nullfs/null_vfsops.c | 40 +- bsd/miscfs/nullfs/null_vnops.c | 149 +- bsd/miscfs/nullfs/nullfs.h | 12 +- bsd/miscfs/specfs/spec_vnops.c | 86 +- bsd/net/Makefile | 6 + bsd/net/altq/altq_qfq.h | 1 - bsd/net/bpf.c | 44 +- bsd/net/bpf_filter.c | 6 +- bsd/net/bpfdesc.h | 3 - bsd/net/bridgestp.c | 1 - bsd/net/classq/classq.c | 2 +- bsd/net/classq/classq.h | 2 +- bsd/net/classq/classq_fq_codel.c | 195 +- bsd/net/classq/classq_fq_codel.h | 17 +- bsd/net/classq/classq_sfb.c | 106 +- bsd/net/classq/classq_sfb.h | 3 +- bsd/net/classq/classq_subr.c | 149 +- bsd/net/classq/classq_util.c | 14 +- bsd/net/classq/if_classq.h | 79 +- bsd/net/content_filter.c | 534 +- bsd/net/content_filter.h | 3 +- bsd/net/dlil.c | 1397 +- bsd/net/dlil.h | 76 +- bsd/net/ether_if_module.c | 10 +- bsd/net/ether_inet6_pr_module.c | 2 - bsd/net/ether_inet_pr_module.c | 4 - bsd/net/ethernet.h | 8 + bsd/net/flowadv.c | 36 +- bsd/net/flowadv.h | 3 +- bsd/net/flowhash.c | 89 +- bsd/net/frame802154.c | 22 +- bsd/net/frame802154.h | 6 +- bsd/net/if.c | 255 +- bsd/net/if.h | 11 +- bsd/net/if_6lowpan.c | 33 +- bsd/net/if_bond.c | 67 +- bsd/net/if_bridge.c | 2203 +- bsd/net/if_fake.c | 211 +- bsd/net/if_gif.c | 49 +- bsd/net/if_gif.h | 4 - bsd/net/if_headless.c | 2 +- bsd/net/if_ipsec.c | 251 +- bsd/net/if_llatbl.c | 10 +- bsd/net/if_llatbl.h | 14 +- bsd/net/if_llreach.c | 54 +- bsd/net/if_llreach.h | 9 +- bsd/net/if_loop.c | 20 +- bsd/net/if_low_power_mode.c | 18 +- bsd/net/if_media.h | 2 + bsd/net/if_mib.c | 4 +- bsd/net/if_pflog.c | 2 - bsd/net/if_ports_used.c | 39 +- bsd/net/if_stf.c | 12 - bsd/net/if_utun.c | 235 +- bsd/net/if_var.h | 60 +- bsd/net/if_vlan.c | 71 +- bsd/net/kpi_interface.c | 162 +- bsd/net/kpi_interface.h | 32 +- bsd/net/kpi_interfacefilter.h | 1 + bsd/net/kpi_protocol.c | 38 +- bsd/net/kpi_protocol.h | 1 + bsd/net/lacp.h | 16 +- bsd/net/multi_layer_pkt_log.c | 7 +- bsd/net/multi_layer_pkt_log.h | 6 +- bsd/net/multicast_list.c | 2 +- bsd/net/nat464_utils.c | 61 +- bsd/net/nat464_utils.h | 8 +- bsd/net/ndrv.c | 57 +- bsd/net/necp.c | 734 +- bsd/net/necp.h | 83 +- bsd/net/necp_client.c | 746 +- bsd/net/net_kev.h | 4 + bsd/net/net_perf.c | 2 +- bsd/net/net_str_id.c | 21 +- bsd/net/net_stubs.c | 8 + bsd/net/netsrc.c | 16 +- bsd/net/network_agent.c | 174 +- bsd/net/ntstat.c | 369 +- bsd/net/ntstat.h | 23 +- bsd/net/packet_mangler.c | 26 +- bsd/net/pf.c | 246 +- bsd/net/pf_if.c | 42 +- bsd/net/pf_ioctl.c | 135 +- bsd/net/pf_norm.c | 20 +- bsd/net/pf_osfp.c | 18 +- bsd/net/pf_ruleset.c | 19 - bsd/net/pf_table.c | 30 +- bsd/net/pfvar.h | 48 +- bsd/net/pktap.c | 22 +- bsd/net/pktap.h | 8 +- bsd/net/pktsched/Makefile | 3 +- bsd/net/pktsched/pktsched.c | 134 +- bsd/net/pktsched/pktsched.h | 24 +- bsd/net/pktsched/pktsched_fq_codel.c | 166 +- bsd/net/pktsched/pktsched_fq_codel.h | 32 +- bsd/net/pktsched/pktsched_netem.c | 44 +- bsd/net/pktsched/pktsched_netem.h | 2 - bsd/net/pktsched/pktsched_qfq.c | 1968 - bsd/net/pktsched/pktsched_qfq.h | 295 - bsd/net/pktsched/pktsched_tcq.c | 1103 - bsd/net/pktsched/pktsched_tcq.h | 152 - bsd/net/raw_cb.c | 4 +- bsd/net/raw_usrreq.c | 3 +- bsd/net/restricted_in_port.c | 17 +- bsd/net/route.c | 1054 +- bsd/net/route.h | 10 +- bsd/net/rtsock.c | 44 +- bsd/net/sixxlowpan.c | 6 + bsd/net/skywalk_stubs.c | 27 +- bsd/netinet/Makefile | 13 +- bsd/netinet/flow_divert.c | 1630 +- bsd/netinet/flow_divert.h | 62 +- bsd/netinet/flow_divert_proto.h | 8 +- bsd/netinet/icmp6.h | 27 +- bsd/netinet/icmp_var.h | 5 +- bsd/netinet/igmp.c | 90 +- bsd/netinet/igmp_var.h | 4 +- bsd/netinet/in.c | 94 +- bsd/netinet/in.h | 17 +- bsd/netinet/in_arp.c | 52 +- bsd/netinet/in_cksum.c | 8 +- bsd/netinet/in_gif.c | 6 - bsd/netinet/in_mcast.c | 148 +- bsd/netinet/in_pcb.c | 135 +- bsd/netinet/in_pcb.h | 24 +- bsd/netinet/in_pcblist.c | 82 +- bsd/netinet/in_proto.c | 22 - bsd/netinet/in_systm.h | 2 + bsd/netinet/in_tclass.c | 142 +- bsd/netinet/ip.h | 13 +- bsd/netinet/ip6.h | 6 + bsd/netinet/ip_compat.h | 2 + bsd/netinet/ip_divert.c | 829 - bsd/netinet/ip_divert.h | 122 - bsd/netinet/ip_dummynet.c | 2805 +- bsd/netinet/ip_dummynet.h | 28 +- bsd/netinet/ip_ecn.c | 7 - bsd/netinet/ip_encap.c | 5 - bsd/netinet/ip_flowid.h | 9 +- bsd/netinet/ip_fw.h | 329 - bsd/netinet/ip_fw2.c | 4227 - bsd/netinet/ip_fw2.h | 655 - bsd/netinet/ip_fw2_compat.c | 3325 - bsd/netinet/ip_fw2_compat.h | 497 - bsd/netinet/ip_icmp.c | 76 +- bsd/netinet/ip_input.c | 655 +- bsd/netinet/ip_output.c | 489 +- bsd/netinet/ip_var.h | 24 +- bsd/netinet/kpi_ipfilter.c | 12 +- bsd/netinet/kpi_ipfilter.h | 1 + bsd/netinet/lro_ext.h | 65 - bsd/netinet/mptcp.c | 122 +- bsd/netinet/mptcp_opt.c | 58 +- bsd/netinet/mptcp_opt.h | 2 +- bsd/netinet/mptcp_subr.c | 467 +- bsd/netinet/mptcp_timer.c | 7 +- bsd/netinet/mptcp_usrreq.c | 111 +- bsd/netinet/mptcp_var.h | 63 +- bsd/netinet/raw_ip.c | 146 +- bsd/netinet/tcp.h | 38 +- bsd/netinet/tcp_cache.c | 70 +- bsd/netinet/tcp_cc.c | 74 +- bsd/netinet/tcp_cc.h | 11 + bsd/netinet/tcp_cubic.c | 226 +- bsd/netinet/tcp_debug.c | 22 +- bsd/netinet/tcp_debug.h | 2 +- bsd/netinet/tcp_input.c | 1195 +- bsd/netinet/tcp_ledbat.c | 37 +- bsd/netinet/tcp_log.c | 11 +- bsd/netinet/tcp_log.h | 10 +- bsd/netinet/tcp_lro.c | 927 - bsd/netinet/tcp_lro.h | 81 - bsd/netinet/tcp_newreno.c | 61 +- bsd/netinet/tcp_output.c | 1270 +- bsd/netinet/tcp_sack.c | 132 +- bsd/netinet/tcp_seq.h | 2 +- bsd/netinet/tcp_subr.c | 621 +- bsd/netinet/tcp_timer.c | 94 +- bsd/netinet/tcp_timer.h | 10 +- bsd/netinet/tcp_usrreq.c | 248 +- bsd/netinet/tcp_var.h | 255 +- bsd/netinet/udp.h | 7 + bsd/netinet/udp_usrreq.c | 210 +- bsd/netinet/udp_var.h | 2 - bsd/netinet6/Makefile | 6 +- bsd/netinet6/ah.h | 6 +- bsd/netinet6/ah_core.c | 21 +- bsd/netinet6/ah_input.c | 133 +- bsd/netinet6/ah_output.c | 41 +- bsd/netinet6/esp.h | 12 +- bsd/netinet6/esp_chachapoly.c | 12 +- bsd/netinet6/esp_chachapoly.h | 6 +- bsd/netinet6/esp_core.c | 33 +- bsd/netinet6/esp_input.c | 91 +- bsd/netinet6/esp_output.c | 96 +- bsd/netinet6/esp_rijndael.c | 48 +- bsd/netinet6/esp_rijndael.h | 8 +- bsd/netinet6/frag6.c | 247 +- bsd/netinet6/icmp6.c | 138 +- bsd/netinet6/in6.c | 270 +- bsd/netinet6/in6.h | 45 +- bsd/netinet6/in6_cga.c | 14 +- bsd/netinet6/in6_cksum.c | 2 +- bsd/netinet6/in6_gif.c | 10 - bsd/netinet6/in6_ifattach.c | 15 +- bsd/netinet6/in6_ifattach.h | 2 +- bsd/netinet6/in6_mcast.c | 184 +- bsd/netinet6/in6_pcb.c | 26 +- bsd/netinet6/in6_pcb.h | 2 +- bsd/netinet6/in6_proto.c | 118 +- bsd/netinet6/in6_src.c | 64 +- bsd/netinet6/in6_var.h | 79 +- bsd/netinet6/ip6_forward.c | 7 +- bsd/netinet6/ip6_fw.c | 1515 - bsd/netinet6/ip6_fw.h | 348 - bsd/netinet6/ip6_id.c | 2 +- bsd/netinet6/ip6_input.c | 41 +- bsd/netinet6/ip6_output.c | 98 +- bsd/netinet6/ip6_var.h | 33 +- bsd/netinet6/ipsec.c | 234 +- bsd/netinet6/ipsec.h | 16 +- bsd/netinet6/ipsec6.h | 8 +- bsd/netinet6/mld6.c | 82 +- bsd/netinet6/mld6_var.h | 4 +- bsd/netinet6/nd6.c | 779 +- bsd/netinet6/nd6.h | 142 +- bsd/netinet6/nd6_nbr.c | 91 +- bsd/netinet6/nd6_prproxy.c | 112 +- bsd/netinet6/nd6_rti.c | 158 + bsd/netinet6/nd6_rtr.c | 837 +- bsd/netinet6/nd6_send.c | 4 +- bsd/netinet6/nd6_var.h | 3 +- bsd/netinet6/raw_ip6.c | 31 +- bsd/netinet6/route6.c | 2 +- bsd/netinet6/udp6_output.c | 28 +- bsd/netinet6/udp6_usrreq.c | 53 +- bsd/netkey/key.c | 1263 +- bsd/netkey/key.h | 22 - bsd/netkey/key_debug.c | 308 +- bsd/netkey/keydb.c | 2 +- bsd/netkey/keydb.h | 9 +- bsd/netkey/keysock.c | 3 +- bsd/nfs/gss/gss_krb5_mech.c | 71 +- bsd/nfs/gss/gss_krb5_mech.h | 4 +- bsd/nfs/krpc_subr.c | 21 +- bsd/nfs/nfs.h | 117 +- bsd/nfs/nfs4_subs.c | 46 +- bsd/nfs/nfs4_vnops.c | 632 +- bsd/nfs/nfs_bio.c | 323 +- bsd/nfs/nfs_boot.c | 74 +- bsd/nfs/nfs_gss.c | 155 +- bsd/nfs/nfs_gss.h | 7 +- bsd/nfs/nfs_ioctl.h | 4 +- bsd/nfs/nfs_lock.c | 3 +- bsd/nfs/nfs_node.c | 106 +- bsd/nfs/nfs_serv.c | 87 +- bsd/nfs/nfs_socket.c | 219 +- bsd/nfs/nfs_subs.c | 198 +- bsd/nfs/nfs_syscalls.c | 69 +- bsd/nfs/nfs_vfsops.c | 272 +- bsd/nfs/nfs_vnops.c | 993 +- bsd/nfs/nfsm_subs.h | 49 +- bsd/nfs/nfsmount.h | 24 +- bsd/nfs/nfsnode.h | 85 +- bsd/nfs/nfsproto.h | 47 +- bsd/nfs/rpcv2.h | 1 + bsd/nfs/xdr_subs.h | 30 +- bsd/pgo/profile_runtime_data.c | 14 + bsd/pthread/priority_private.h | 58 +- bsd/pthread/pthread_priority.c | 10 +- bsd/pthread/pthread_workqueue.c | 150 +- bsd/pthread/workqueue_internal.h | 8 +- bsd/security/audit/audit.c | 16 +- bsd/security/audit/audit.h | 6 +- bsd/security/audit/audit_arg.c | 15 +- bsd/security/audit/audit_bsd.c | 66 +- bsd/security/audit/audit_bsd.h | 6 +- bsd/security/audit/audit_bsm.c | 42 +- bsd/security/audit/audit_mac.c | 49 +- bsd/security/audit/audit_pipe.c | 2 +- bsd/security/audit/audit_private.h | 13 +- bsd/security/audit/audit_session.c | 11 +- bsd/security/audit/audit_syscalls.c | 14 +- bsd/sys/Makefile | 49 +- bsd/sys/_endian.h | 13 + bsd/sys/_select.h | 5 + bsd/sys/_types/Makefile | 6 + bsd/sys/_types/_fd_def.h | 9 +- bsd/sys/_types/_int8_t.h | 2 +- bsd/sys/_types/_ucontext.h | 1 + bsd/sys/_types/_ucontext64.h | 1 + bsd/sys/acct.h | 3 + bsd/sys/aio_kern.h | 45 +- bsd/sys/attr.h | 10 +- bsd/sys/buf_internal.h | 22 +- bsd/sys/callout.h | 86 - bsd/sys/cdefs.h | 70 +- bsd/sys/clist.h | 80 - bsd/sys/codesign.h | 12 +- bsd/sys/conf.h | 12 +- bsd/sys/cprotect.h | 30 +- bsd/sys/csr.h | 53 +- bsd/sys/disk.h | 2 - bsd/sys/dtrace.h | 10 +- bsd/sys/dtrace_glue.h | 4 +- bsd/sys/dtrace_impl.h | 6 + bsd/sys/ev.h | 39 - bsd/sys/event.h | 26 +- bsd/sys/eventhandler.h | 6 +- bsd/sys/fcntl.h | 202 +- bsd/sys/file.h | 1 + bsd/sys/file_internal.h | 279 +- bsd/sys/filedesc.h | 95 +- bsd/sys/fsctl.h | 12 +- bsd/sys/gmon.h | 1 + bsd/sys/imageboot.h | 10 + bsd/sys/imgact.h | 8 + bsd/sys/ioccom.h | 2 +- bsd/sys/kas_info.h | 3 +- bsd/sys/kasl.h | 6 +- bsd/sys/kauth.h | 8 +- bsd/sys/kdebug.h | 78 +- bsd/sys/kdebug_kernel.h | 32 +- bsd/sys/kern_control.h | 26 + bsd/sys/kern_event.h | 18 +- bsd/sys/kern_memorystatus.h | 44 +- bsd/sys/kern_memorystatus_freeze.h | 22 +- bsd/sys/kern_memorystatus_notify.h | 5 +- bsd/sys/kern_sysctl.h | 2 +- bsd/sys/kernel_types.h | 4 + bsd/sys/kpi_mbuf.h | 3 +- bsd/sys/kpi_socket.h | 7 +- bsd/sys/kpi_socketfilter.h | 1 + bsd/sys/linker_set.h | 87 +- bsd/sys/lockstat.h | 18 +- bsd/sys/make_symbol_aliasing.sh | 25 +- bsd/sys/malloc.h | 226 +- bsd/sys/mbuf.h | 63 +- bsd/sys/mcache.h | 2 - bsd/sys/memory_maintenance.h | 2 +- bsd/sys/mman.h | 42 +- bsd/sys/mount.h | 77 +- bsd/sys/mount_internal.h | 78 +- bsd/sys/munge.h | 6 + bsd/sys/namei.h | 2 +- bsd/sys/netboot.h | 4 +- bsd/sys/param.h | 26 +- bsd/sys/persona.h | 22 +- bsd/sys/pipe.h | 14 +- bsd/sys/priv.h | 6 +- bsd/sys/proc.h | 46 +- bsd/sys/proc_info.h | 29 +- bsd/sys/proc_internal.h | 77 +- bsd/sys/proc_require.h | 44 + bsd/sys/process_policy.h | 37 +- bsd/sys/queue.h | 27 +- bsd/sys/reason.h | 8 +- bsd/sys/resource.h | 58 +- bsd/sys/resourcevar.h | 31 +- bsd/sys/sbuf.h | 8 +- bsd/sys/sdt_impl.h | 2 + bsd/sys/shm.h | 4 + bsd/sys/signalvar.h | 4 +- bsd/sys/socket.h | 47 +- bsd/sys/socketvar.h | 93 +- bsd/sys/sockio.h | 11 +- bsd/sys/spawn.h | 8 +- bsd/sys/spawn_internal.h | 27 +- bsd/sys/stackshot.h | 8 +- bsd/sys/stat.h | 4 +- bsd/sys/sys__types.modulemap | 54 + bsd/sys/sys_cdefs.modulemap | 10 + bsd/sys/sys_domain.h | 2 +- bsd/sys/sys_types.modulemap | 25 + bsd/sys/sysctl.h | 59 +- bsd/sys/sysent.h | 5 +- bsd/sys/syslimits.h | 18 +- bsd/sys/syslog.h | 2 +- bsd/sys/systm.h | 7 +- bsd/sys/time.h | 6 + bsd/sys/tty.h | 8 +- bsd/sys/ttycom.h | 1 + bsd/sys/ubc.h | 5 +- bsd/sys/ubc_internal.h | 49 +- bsd/sys/ucontext.h | 1 - bsd/sys/ucred.h | 6 +- bsd/sys/uio.h | 15 +- bsd/sys/ulock.h | 6 +- bsd/sys/un.h | 1 + bsd/sys/unicode.h | 259 + bsd/sys/unpcb.h | 6 +- bsd/sys/user.h | 1 + bsd/sys/vnode.h | 17 +- bsd/sys/vnode_if.h | 49 +- bsd/sys/vnode_internal.h | 28 +- bsd/sys/vsock.h | 84 + bsd/sys/vsock_domain.h | 76 + bsd/sys/vsock_transport.h | 78 + bsd/sys/work_interval.h | 60 +- bsd/tests/bsd_tests.c | 35 +- bsd/tests/copyio_tests.c | 26 +- bsd/tests/ptrauth_data_tests_sysctl.c | 122 + bsd/vfs/doc_tombstone.c | 17 +- bsd/vfs/kpi_vfs.c | 339 +- bsd/vfs/vfs_attrlist.c | 288 +- bsd/vfs/vfs_bio.c | 173 +- bsd/vfs/vfs_cache.c | 94 +- bsd/vfs/vfs_cluster.c | 369 +- bsd/vfs/vfs_conf.c | 24 + bsd/vfs/vfs_cprotect.c | 110 +- bsd/vfs/vfs_disk_conditioner.c | 19 +- bsd/vfs/vfs_fsevents.c | 209 +- bsd/vfs/vfs_fslog.c | 6 +- bsd/vfs/vfs_init.c | 28 +- bsd/vfs/vfs_lookup.c | 379 +- bsd/vfs/vfs_quota.c | 16 +- bsd/vfs/vfs_subr.c | 1083 +- bsd/vfs/vfs_support.c | 12 +- bsd/vfs/vfs_syscalls.c | 1499 +- bsd/vfs/vfs_unicode.c | 1137 + bsd/vfs/vfs_unicode_data.h | 1255 + bsd/vfs/vfs_utfconv.c | 40 +- bsd/vfs/vfs_vnops.c | 341 +- bsd/vfs/vfs_xattr.c | 248 +- bsd/vfs/vnode_if.c | 18 + bsd/vm/dp_backing_file.c | 1 - bsd/vm/vm_compressor_backing_file.c | 12 +- bsd/vm/vm_unix.c | 1677 +- bsd/vm/vnode_pager.c | 50 +- config/BSDKernel.exports | 16 +- config/IOKit.arm.exports | 31 +- config/IOKit.arm64.MacOSX.exports | 258 + config/IOKit.arm64.exports | 9 +- config/IOKit.arm64.hibernation.MacOSX.exports | 8 + config/IOKit.exports | 774 +- config/IOKit.x86_64.MacOSX.exports | 241 + config/IOKit.x86_64.exports | 275 +- config/Libkern.arm.exports | 2 +- config/Libkern.arm64.MacOSX.exports | 132 + config/Libkern.arm64.exports | 2 +- config/Libkern.exports | 168 +- config/Libkern.x86_64.MacOSX.exports | 125 + config/Libkern.x86_64.exports | 127 +- config/MACFramework.exports | 27 +- config/MACFramework.x86_64.exports | 8 - config/MASTER | 85 +- config/MASTER.arm | 25 +- config/MASTER.arm64 | 25 +- config/MASTER.arm64.BridgeOS | 96 + config/MASTER.arm64.MacOSX | 102 + config/MASTER.arm64.bcm2837 | 23 +- config/MASTER.arm64.iPhoneOS | 96 + config/MASTER.x86_64 | 18 +- config/Mach.exports | 1 + config/Makefile | 58 +- config/MasterVersion | 2 +- config/Private.arm64.MacOSX.exports | 41 + config/Private.arm64.exports | 20 + config/Private.exports | 314 +- config/Private.x86_64.exports | 31 +- config/Unsupported.arm.exports | 2 + config/Unsupported.arm64.MacOSX.exports | 36 + config/Unsupported.arm64.exports | 9 - config/Unsupported.exports | 65 +- config/Unsupported.x86_64.MacOSX.exports | 16 + config/Unsupported.x86_64.exports | 19 +- config/generate_combined_symbolsets_plist.sh | 48 + config/generate_symbolset_plist.sh | 69 + doc/allocators.md | 466 + doc/atomics.md | 20 +- doc/pac.md | 326 + doc/startup.md | 275 + doc/xnu_build_consolidation.md | 142 + iokit/DriverKit/IOKitKeys.h | 13 + iokit/DriverKit/IOMemoryDescriptor.iig | 62 +- iokit/DriverKit/IOService.iig | 26 + iokit/DriverKit/IOTypes.h | 62 +- iokit/DriverKit/IOUserClient.iig | 22 +- iokit/DriverKit/Makefile | 5 +- iokit/DriverKit/OSAction.iig | 10 + iokit/DriverKit/OSObject.iig | 20 +- iokit/IOKit/IOBSD.h | 6 +- iokit/IOKit/IOBufferMemoryDescriptor.h | 19 +- iokit/IOKit/IOCPU.h | 13 +- iokit/IOKit/IOCatalogue.h | 27 +- iokit/IOKit/IOCommandGate.h | 5 +- iokit/IOKit/IOCommandPool.h | 12 +- iokit/IOKit/IOCommandQueue.h | 3 +- iokit/IOKit/IOConditionLock.h | 3 +- iokit/IOKit/IODMACommand.h | 34 +- iokit/IOKit/IODMAController.h | 5 +- iokit/IOKit/IODMAEventSource.h | 5 +- iokit/IOKit/IODataQueue.h | 12 +- iokit/IOKit/IODeviceMemory.h | 7 +- iokit/IOKit/IODeviceTreeSupport.h | 5 + iokit/IOKit/IOFilterInterruptEventSource.h | 8 +- iokit/IOKit/IOHibernatePrivate.h | 117 +- iokit/IOKit/IOInterleavedMemoryDescriptor.h | 3 +- iokit/IOKit/IOInterruptController.h | 10 +- iokit/IOKit/IOInterruptEventSource.h | 9 +- iokit/IOKit/IOKernelReporters.h | 35 +- iokit/IOKit/IOKitDebug.h | 2 + iokit/IOKit/IOKitKeys.h | 22 + iokit/IOKit/IOKitKeysPrivate.h | 8 +- iokit/IOKit/IOLib.h | 58 +- iokit/IOKit/IOLocks.h | 22 + iokit/IOKit/IOMapper.h | 5 +- iokit/IOKit/IOMemoryCursor.h | 9 +- iokit/IOKit/IOMemoryDescriptor.h | 97 +- iokit/IOKit/IOMultiMemoryDescriptor.h | 3 +- iokit/IOKit/IONVRAM.h | 93 +- iokit/IOKit/IOPMGR.h | 126 + iokit/IOKit/IOPlatformActions.h | 40 + iokit/IOKit/IOPlatformExpert.h | 65 +- iokit/IOKit/IOPolledInterface.h | 27 +- iokit/IOKit/IORangeAllocator.h | 3 +- iokit/IOKit/IORegistryEntry.h | 220 +- iokit/IOKit/IOReportMacros.h | 12 +- iokit/IOKit/IOService.h | 167 +- iokit/IOKit/IOSharedDataQueue.h | 7 +- iokit/IOKit/IOSubMemoryDescriptor.h | 3 +- iokit/IOKit/IOTimeStamp.h | 125 + iokit/IOKit/IOTimerEventSource.h | 15 +- iokit/IOKit/IOTypes.h | 18 +- iokit/IOKit/IOUserClient.h | 79 +- iokit/IOKit/IOUserServer.h | 30 +- iokit/IOKit/IOWorkLoop.h | 17 +- iokit/IOKit/Makefile | 4 +- iokit/IOKit/PassthruInterruptController.h | 79 + iokit/IOKit/perfcontrol/IOPerfControl.h | 70 +- iokit/IOKit/platform/IOPlatformIO.h | 62 + iokit/IOKit/pwr_mgt/IOPM.h | 1 + iokit/IOKit/pwr_mgt/IOPMLibDefs.h | 3 +- iokit/IOKit/pwr_mgt/IOPMPrivate.h | 35 +- iokit/IOKit/pwr_mgt/IOPMinformeeList.h | 2 +- iokit/IOKit/pwr_mgt/RootDomain.h | 195 +- iokit/Kernel/IOBufferMemoryDescriptor.cpp | 90 +- iokit/Kernel/IOCPU.cpp | 434 +- iokit/Kernel/IOCatalogue.cpp | 333 +- iokit/Kernel/IOCommandGate.cpp | 15 +- iokit/Kernel/IOCommandPool.cpp | 28 +- iokit/Kernel/IOCommandQueue.cpp | 11 +- iokit/Kernel/IODMACommand.cpp | 429 +- iokit/Kernel/IODMAController.cpp | 17 +- iokit/Kernel/IODMAEventSource.cpp | 12 +- iokit/Kernel/IODataQueue.cpp | 22 +- iokit/Kernel/IODeviceTreeSupport.cpp | 63 +- iokit/Kernel/IOEventSource.cpp | 4 + iokit/Kernel/IOFilterInterruptEventSource.cpp | 21 +- iokit/Kernel/IOHibernateIO.cpp | 600 +- iokit/Kernel/IOHibernateInternal.h | 41 +- iokit/Kernel/IOHibernateRestoreKernel.c | 398 +- iokit/Kernel/IOHistogramReporter.cpp | 46 +- iokit/Kernel/IOInterruptAccounting.cpp | 2 + iokit/Kernel/IOInterruptController.cpp | 39 +- iokit/Kernel/IOInterruptEventSource.cpp | 42 +- iokit/Kernel/IOKitDebug.cpp | 137 +- iokit/Kernel/IOKitKernelInternal.h | 45 +- iokit/Kernel/IOLib.cpp | 129 +- iokit/Kernel/IOMapper.cpp | 54 +- iokit/Kernel/IOMemoryCursor.cpp | 35 +- iokit/Kernel/IOMemoryDescriptor.cpp | 1286 +- iokit/Kernel/IONVRAM.cpp | 2094 +- iokit/Kernel/IOPMGR.cpp | 32 + iokit/Kernel/IOPMinformeeList.cpp | 20 +- iokit/Kernel/IOPMrootDomain.cpp | 3628 +- iokit/Kernel/IOPerfControl.cpp | 359 +- iokit/Kernel/IOPlatformActions.cpp | 339 + iokit/Kernel/IOPlatformExpert.cpp | 585 +- .../Kernel/IOPlatformIO.cpp | 44 +- iokit/Kernel/IOPolledInterface.cpp | 110 +- iokit/Kernel/IORegistryEntry.cpp | 200 +- iokit/Kernel/IOReportLegend.cpp | 60 +- iokit/Kernel/IOReporter.cpp | 108 +- iokit/Kernel/IOService.cpp | 559 +- iokit/Kernel/IOServicePM.cpp | 512 +- iokit/Kernel/IOServicePMPrivate.h | 113 +- iokit/Kernel/IOServicePrivate.h | 4 +- iokit/Kernel/IOSharedDataQueue.cpp | 24 +- iokit/Kernel/IOSimpleReporter.cpp | 26 +- iokit/Kernel/IOStartIOKit.cpp | 81 +- iokit/Kernel/IOStateReporter.cpp | 29 +- iokit/Kernel/IOStatistics.cpp | 24 +- iokit/Kernel/IOStringFuncs.c | 8 +- iokit/Kernel/IOSubMemoryDescriptor.cpp | 1 + iokit/Kernel/IOTimerEventSource.cpp | 47 +- iokit/Kernel/IOUserClient.cpp | 612 +- iokit/Kernel/IOUserServer.cpp | 907 +- iokit/Kernel/IOWorkLoop.cpp | 45 +- iokit/Kernel/PassthruInterruptController.cpp | 158 + iokit/Kernel/RootDomainUserClient.cpp | 5 +- iokit/Kernel/arm/AppleARMSMP.cpp | 399 + iokit/Kernel/i386/IOKeyStoreHelper.cpp | 149 + .../System/IODataQueueDispatchSourceShared.h | 36 +- iokit/Tests/TestIOMemoryDescriptor.cpp | 66 +- iokit/Tests/Tests.cpp | 277 +- iokit/bsddev/DINetBootHook.cpp | 69 +- iokit/bsddev/DINetBootHook.h | 1 + iokit/bsddev/IOKitBSDInit.cpp | 232 +- iokit/conf/Makefile | 2 +- iokit/conf/Makefile.arm | 65 +- iokit/conf/Makefile.arm64 | 30 +- iokit/conf/Makefile.template | 166 +- iokit/conf/Makefile.x86_64 | 5 + iokit/conf/files | 8 +- iokit/conf/files.arm64 | 3 + iokit/conf/files.x86_64 | 3 - libkdd/kcdata.h | 153 +- libkdd/kcdtypes.c | 8 + libkdd/kdd.xcodeproj/project.pbxproj | 3 + libkdd/tests/kdd_bridge.h | 4 +- libkern/OSKextLib.cpp | 41 +- libkern/OSKextVersion.c | 6 +- libkern/c++/OSArray.cpp | 173 +- libkern/c++/OSCollection.cpp | 16 +- libkern/c++/OSCollectionIterator.cpp | 22 +- libkern/c++/OSData.cpp | 41 +- libkern/c++/OSDictionary.cpp | 210 +- libkern/c++/OSKext.cpp | 5678 +- libkern/c++/OSMetaClass.cpp | 67 +- libkern/c++/OSNumber.cpp | 20 +- libkern/c++/OSObject.cpp | 45 +- libkern/c++/OSOrderedSet.cpp | 133 +- libkern/c++/OSRuntime.cpp | 249 +- libkern/c++/OSSerialize.cpp | 31 +- libkern/c++/OSSerializeBinary.cpp | 49 +- libkern/c++/OSSet.cpp | 101 +- libkern/c++/OSString.cpp | 50 +- libkern/c++/OSSymbol.cpp | 95 +- libkern/c++/OSUnserialize.cpp | 136 +- libkern/c++/OSUnserialize.y | 68 +- libkern/c++/OSUnserializeXML.cpp | 162 +- libkern/c++/OSUnserializeXML.y | 100 +- libkern/c++/priority_queue.cpp | 437 + libkern/conf/Makefile | 2 +- libkern/conf/Makefile.arm | 28 +- libkern/conf/Makefile.arm64 | 18 +- libkern/conf/Makefile.template | 109 +- libkern/conf/Makefile.x86_64 | 20 + libkern/conf/files | 6 +- libkern/crypto/corecrypto_aes.c | 10 +- libkern/crypto/corecrypto_aesxts.c | 2 +- libkern/crypto/corecrypto_md5.c | 2 +- libkern/crypto/corecrypto_sha1.c | 2 +- libkern/firehose/Makefile | 5 +- libkern/firehose/chunk_private.h | 31 +- libkern/firehose/firehose_types_private.h | 3 + libkern/firehose/tracepoint_private.h | 5 +- libkern/gen/OSAtomicOperations.c | 26 +- .../gen/OSSpinLock.c | 26 +- libkern/kxld/Makefile | 11 +- libkern/kxld/kxld_copyright.c | 7 +- libkern/kxld/kxld_reloc.c | 11 +- libkern/kxld/kxld_stubs.c | 82 - libkern/kxld/kxld_sym.c | 8 +- libkern/kxld/kxld_util.c | 39 +- libkern/kxld/kxld_util.h | 3 - libkern/libclosure/runtime.cpp | 168 +- libkern/libkern/Block_private.h | 91 +- libkern/libkern/Makefile | 7 +- libkern/libkern/OSByteOrder.h | 24 +- libkern/libkern/OSDebug.h | 2 +- libkern/libkern/OSKextLib.h | 48 + libkern/libkern/OSKextLibPrivate.h | 46 +- libkern/libkern/OSMalloc.h | 40 +- libkern/libkern/_OSByteOrder.h | 32 +- libkern/libkern/arm/Makefile | 7 + libkern/libkern/arm/OSByteOrder.h | 197 +- libkern/libkern/c++/Makefile | 14 +- libkern/libkern/c++/OSAllocation.h | 60 + libkern/libkern/c++/OSArray.h | 28 +- libkern/libkern/c++/OSBoolean.h | 4 +- .../libkern/c++/OSBoundedArray.h | 24 +- libkern/libkern/c++/OSBoundedArrayRef.h | 38 + .../libkern/c++/OSBoundedPtr.h | 39 +- libkern/libkern/c++/OSBoundedPtrFwd.h | 42 + libkern/libkern/c++/OSCPPDebug.h | 16 +- libkern/libkern/c++/OSCollection.h | 17 +- libkern/libkern/c++/OSCollectionIterator.h | 8 +- libkern/libkern/c++/OSData.h | 21 +- libkern/libkern/c++/OSDictionary.h | 39 +- libkern/libkern/c++/OSKext.h | 247 +- libkern/libkern/c++/OSLib.h | 22 +- libkern/libkern/c++/OSMetaClass.h | 485 +- libkern/libkern/c++/OSNumber.h | 8 +- libkern/libkern/c++/OSObject.h | 22 +- libkern/libkern/c++/OSOrderedSet.h | 18 +- libkern/libkern/c++/OSPtr.h | 245 +- libkern/libkern/c++/OSSerialize.h | 21 +- libkern/libkern/c++/OSSet.h | 22 +- libkern/libkern/c++/OSSharedPtr.h | 153 + libkern/libkern/c++/OSString.h | 18 +- libkern/libkern/c++/OSSymbol.h | 14 +- libkern/libkern/c++/OSUnserialize.h | 30 +- libkern/libkern/c++/bounded_array.h | 104 + libkern/libkern/c++/bounded_array_ref.h | 283 + libkern/libkern/c++/bounded_ptr.h | 706 + libkern/libkern/c++/bounded_ptr_fwd.h | 37 + libkern/libkern/c++/intrusive_shared_ptr.h | 619 + libkern/libkern/c++/safe_allocation.h | 457 + libkern/libkern/crypto/aes.h | 10 +- libkern/libkern/crypto/register_crypto.h | 19 - libkern/libkern/img4/interface.h | 352 +- libkern/libkern/kernel_mach_header.h | 9 + libkern/libkern/kext_request_keys.h | 197 +- libkern/libkern/prelink.h | 7 + libkern/libkern/ptrauth_utils.h | 102 + libkern/libkern/section_keywords.h | 34 +- libkern/libkern/tree.h | 21 + libkern/mkext.c | 378 +- libkern/net/inet_ntop.c | 3 +- libkern/net/inet_pton.c | 6 +- libkern/os/Makefile | 23 +- libkern/os/atomic.h | 135 + libkern/os/atomic_private.h | 925 + libkern/os/atomic_private_arch.h | 219 + libkern/os/atomic_private_impl.h | 257 + libkern/os/base.h | 67 +- libkern/os/base_private.h | 43 + libkern/os/cpp_util.h | 68 + libkern/os/log.c | 327 +- libkern/os/log.h | 20 + libkern/os/log_encode.h | 20 +- libkern/os/object.h | 11 +- libkern/os/object_private.h | 148 - libkern/os/refcnt.c | 238 +- libkern/os/refcnt.h | 55 +- libkern/os/refcnt_internal.h | 112 +- libkern/os/smart_ptr.h | 523 - libkern/ptrauth_utils.c | 108 + libkern/stdio/scanf.c | 101 +- libkern/zlib/deflate.c | 4 +- libkern/zlib/deflate.h | 4 +- libkern/zlib/infback.c | 2 + libkern/zlib/inffast.c | 77 +- libkern/zlib/inflate.c | 37 +- libkern/zlib/inftrees.c | 20 +- libkern/zlib/trees.c | 4 +- libkern/zlib/z_crc32.c | 16 +- libsa/bootstrap.cpp | 493 +- libsa/conf/Makefile | 2 +- libsa/conf/Makefile.arm64 | 1 + libsa/conf/Makefile.template | 2 +- .../Libsyscall.xcodeproj/project.pbxproj | 30 +- .../Platforms/DriverKit/arm64/syscall.map | 54 + .../Platforms/DriverKit/x86_64/syscall.map | 6 +- libsyscall/Platforms/MacOSX/arm64/syscall.map | 54 + libsyscall/Platforms/MacOSX/i386/syscall.map | 5 + .../Platforms/MacOSX/x86_64/syscall.map | 6 +- libsyscall/Platforms/iPhoneOS/arm/syscall.map | 6 +- .../Platforms/iPhoneOS/arm64/syscall.map | 2 + libsyscall/custom/SYS.h | 22 +- libsyscall/custom/__vfork.s | 9 +- libsyscall/mach/err_libkern.sub | 8 + libsyscall/mach/mach_eventlink.c | 105 + libsyscall/mach/mach_eventlink.defs | 96 + libsyscall/mach/mach_init.c | 26 +- libsyscall/mach/mach_msg.c | 35 + libsyscall/mach/mach_port.c | 6 +- libsyscall/mach/mach_right.c | 4 +- libsyscall/mach/mach_vm.c | 8 +- libsyscall/mach/string.h | 5 +- libsyscall/wrappers/__get_cpu_capabilities.s | 4 +- libsyscall/wrappers/_libc_funcptr.c | 7 + libsyscall/wrappers/_libkernel_init.c | 64 +- libsyscall/wrappers/_libkernel_init.h | 9 + libsyscall/wrappers/cancelable/fcntl-base.c | 3 + .../wrappers/cancelable/open.c | 46 +- libsyscall/wrappers/init_cpu_capabilities.c | 2 +- libsyscall/wrappers/legacy/open.c | 54 - libsyscall/wrappers/libproc/libproc.c | 94 +- libsyscall/wrappers/libproc/libproc.h | 35 + libsyscall/wrappers/mach_absolute_time.s | 16 +- libsyscall/wrappers/mach_continuous_time.c | 3 +- libsyscall/wrappers/mach_get_times.c | 5 - libsyscall/wrappers/open-base.c | 164 + libsyscall/wrappers/proc.c | 2 - libsyscall/wrappers/skywalk/os_channel.c | 2 +- libsyscall/wrappers/skywalk/os_packet.c | 2 +- libsyscall/wrappers/spawn/posix_spawn.c | 299 +- libsyscall/wrappers/spawn/spawn.h | 22 + libsyscall/wrappers/spawn/spawn_private.h | 14 + libsyscall/wrappers/stackshot.c | 32 +- libsyscall/wrappers/string/strings.h | 5 + .../wrappers/system-version-compat-support.c | 54 + .../wrappers/system-version-compat-support.h | 37 + libsyscall/wrappers/system-version-compat.c | 211 + libsyscall/wrappers/unix03/mmap.c | 8 +- libsyscall/wrappers/varargs_wrappers.s | 64 - libsyscall/wrappers/work_interval.c | 24 + libsyscall/xcodescripts/create-syscalls.pl | 28 +- libsyscall/xcodescripts/mach_install_mig.sh | 33 +- makedefs/MakeInc.cmd | 82 +- makedefs/MakeInc.def | 213 +- makedefs/MakeInc.dir | 9 +- makedefs/MakeInc.kernel | 69 +- makedefs/MakeInc.rule | 101 +- makedefs/MakeInc.top | 82 +- osfmk/UserNotification/Makefile | 4 +- osfmk/arm/Makefile | 71 +- osfmk/arm/arm_init.c | 477 +- osfmk/arm/arm_timer.c | 42 +- osfmk/arm/arm_vm_init.c | 55 +- osfmk/arm/atomic.h | 162 - osfmk/arm/caches.c | 82 +- osfmk/arm/caches_asm.s | 75 +- osfmk/arm/caches_internal.h | 14 +- osfmk/arm/caches_macros.s | 83 + osfmk/arm/commpage/commpage.c | 162 +- osfmk/arm/commpage/commpage.h | 3 + osfmk/arm/commpage/commpage_asm.s | 528 + osfmk/arm/cpu.c | 60 +- osfmk/arm/cpu_capabilities.h | 74 +- osfmk/arm/cpu_common.c | 313 +- osfmk/arm/cpu_data.h | 9 +- osfmk/arm/cpu_data_internal.h | 286 +- osfmk/arm/cpu_internal.h | 7 +- osfmk/arm/cpu_x86_64_capabilities.h | 153 + osfmk/arm/cpuid.c | 56 +- osfmk/arm/cpuid.h | 12 +- osfmk/arm/cpuid_internal.h | 3 + osfmk/arm/data.s | 10 + osfmk/arm/dwarf_unwind.h | 117 + osfmk/arm/genassym.c | 28 +- osfmk/arm/kpc_arm.c | 37 +- osfmk/arm/locks.h | 6 +- osfmk/arm/locks_arm.c | 281 +- osfmk/arm/locore.s | 117 +- osfmk/arm/loose_ends.c | 32 +- osfmk/arm/lowglobals.h | 58 +- osfmk/arm/lowmem_vectors.c | 19 +- osfmk/arm/machine_cpuid.c | 4 +- osfmk/arm/machine_routines.c | 301 +- osfmk/arm/machine_routines.h | 394 +- osfmk/arm/machine_routines_asm.s | 56 - osfmk/arm/machine_routines_common.c | 375 +- osfmk/arm/misc_protos.h | 40 +- osfmk/arm/model_dep.c | 202 +- osfmk/arm/pcb.c | 201 +- osfmk/arm/pmap.c | 5237 +- osfmk/arm/pmap.h | 220 +- osfmk/arm/proc_reg.h | 360 +- osfmk/arm/rtclock.c | 15 +- osfmk/arm/smp.h | 1 - osfmk/arm/start.s | 34 +- osfmk/arm/status.c | 793 +- osfmk/arm/status_shared.c | 1 - osfmk/arm/task.h | 26 +- osfmk/arm/thread.h | 50 +- osfmk/arm/trap.c | 14 +- osfmk/arm/trap.h | 6 +- osfmk/arm64/Makefile | 44 +- osfmk/arm64/amcc_rorgn.c | 706 + osfmk/arm64/amcc_rorgn.h | 38 + osfmk/arm64/arm_vm_init.c | 816 +- osfmk/arm64/bsd_arm64.c | 62 +- osfmk/arm64/caches_asm.s | 124 +- osfmk/arm64/copyio.c | 31 +- .../corecrypto/arm64_isa_compatibility.h | 181 + .../arm64/corecrypto/sha256_compress_arm64.s | 405 + osfmk/arm64/cpu.c | 308 +- osfmk/arm64/cswitch.s | 196 +- osfmk/arm64/dwarf_unwind.h | 170 + osfmk/arm64/exception_asm.h | 315 +- osfmk/arm64/genassym.c | 66 +- osfmk/arm64/hibernate_arm64.c | 305 + osfmk/arm64/hibernate_ppl_hmac.c | 299 + osfmk/arm64/hibernate_ppl_hmac.h | 59 + osfmk/arm64/hibernate_restore.c | 480 + osfmk/arm64/instructions.h | 166 + osfmk/arm64/kpc.c | 38 +- osfmk/arm64/locore.s | 395 +- osfmk/arm64/loose_ends.c | 43 +- osfmk/arm64/lowglobals.h | 60 +- osfmk/arm64/lowmem_vectors.c | 54 +- osfmk/arm64/machine_remote_time.c | 17 +- osfmk/arm64/machine_routines.c | 1319 +- osfmk/arm64/machine_routines_asm.h | 22 +- osfmk/arm64/machine_routines_asm.s | 311 +- osfmk/arm64/machine_task.c | 2 +- osfmk/arm64/memcmp_zero.s | 145 + osfmk/arm64/monotonic_arm64.c | 171 +- osfmk/arm64/pac_asm.h | 131 + osfmk/arm64/pal_hibernate.h | 127 + osfmk/arm64/pcb.c | 414 +- osfmk/arm64/platform_tests.c | 183 +- osfmk/arm64/proc_reg.h | 554 +- osfmk/arm64/sleh.c | 598 +- osfmk/arm64/start.s | 533 +- osfmk/arm64/status.c | 211 +- osfmk/arm64/tlb.h | 116 +- osfmk/arm64/tunables/tunables.s | 45 + osfmk/arm64/tunables/tunables_h10.s | 76 + osfmk/arm64/tunables/tunables_h11.s | 80 + osfmk/arm64/tunables/tunables_h12.s | 100 + osfmk/arm64/tunables/tunables_h7.s | 57 + osfmk/arm64/tunables/tunables_h8.s | 60 + osfmk/arm64/tunables/tunables_h9.s | 61 + osfmk/atm/Makefile | 8 +- osfmk/atm/atm.c | 1097 +- osfmk/atm/atm_internal.h | 84 +- osfmk/atm/atm_types.defs | 7 +- osfmk/atm/atm_types.h | 7 +- osfmk/bank/Makefile | 8 +- osfmk/bank/bank.c | 116 +- osfmk/bank/bank_internal.h | 8 +- osfmk/conf/Makefile | 2 +- osfmk/conf/Makefile.arm | 13 + osfmk/conf/Makefile.arm64 | 38 +- osfmk/conf/Makefile.template | 363 +- osfmk/conf/Makefile.x86_64 | 4 +- osfmk/conf/files | 43 +- osfmk/conf/files.arm | 1 - osfmk/conf/files.arm64 | 9 +- osfmk/conf/files.x86_64 | 5 +- osfmk/console/serial_console.c | 349 +- osfmk/console/serial_general.c | 4 +- osfmk/console/serial_protos.h | 1 + osfmk/console/video_console.c | 3418 +- osfmk/console/video_console.h | 4 +- osfmk/corecrypto/{cc/src => }/cc_abort.c | 2 +- osfmk/corecrypto/{cc/src => }/cc_clear.c | 9 - osfmk/corecrypto/{cc/src => }/cc_cmp_safe.c | 0 .../{ccsha1/src => }/ccdigest_final_64be.c | 0 osfmk/corecrypto/ccdigest_init.c | 84 + .../{ccsha1/src => }/ccdigest_internal.h | 0 .../{ccdigest/src => }/ccdigest_update.c | 9 +- .../{ccdbrg/src => }/ccdrbg_nisthmac.c | 0 osfmk/corecrypto/{cchmac/src => }/cchmac.c | 0 .../{cchmac/src => }/cchmac_final.c | 0 .../corecrypto/{cchmac/src => }/cchmac_init.c | 0 .../{cchmac/src => }/cchmac_update.c | 0 .../corecrypto/{ccsha2/src => }/ccsha256_K.c | 0 .../corecrypto/{ccsha2/src => }/ccsha256_di.c | 0 .../{ccsha2/src => }/ccsha256_initial_state.c | 0 .../{ccsha2/src => }/ccsha256_ltc_compress.c | 0 .../{ccsha2/src => }/ccsha256_ltc_di.c | 0 .../{ccsha2/src => }/ccsha2_internal.h | 7 +- osfmk/corpses/corpse.c | 14 +- osfmk/corpses/task_corpse.h | 8 +- osfmk/default_pager/Makefile | 8 +- osfmk/device/Makefile | 2 +- osfmk/device/device_init.c | 15 - osfmk/device/iokit_rpc.c | 6 +- osfmk/device/subrs.c | 59 +- osfmk/gssd/Makefile | 12 +- osfmk/i386/AT386/model_dep.c | 408 +- osfmk/i386/Diagnostics.c | 1 + osfmk/i386/Makefile | 1 + osfmk/i386/acpi.c | 273 +- osfmk/i386/acpi.h | 1 + osfmk/i386/asm.h | 8 +- osfmk/i386/bsd_i386.c | 50 +- osfmk/i386/commpage/commpage.c | 63 +- osfmk/i386/commpage/commpage.h | 11 - osfmk/i386/commpage/commpage_asm.s | 28 - osfmk/i386/cpu.c | 28 - osfmk/i386/cpu_capabilities.h | 19 +- osfmk/i386/cpu_data.h | 181 +- osfmk/i386/cpu_number.h | 8 - osfmk/i386/cpu_threads.c | 24 +- osfmk/i386/cpu_topology.c | 23 +- osfmk/i386/cpuid.c | 155 +- osfmk/i386/cpuid.h | 32 +- osfmk/i386/endian.h | 2 +- osfmk/i386/fpu.c | 44 +- osfmk/i386/genassym.c | 24 +- osfmk/i386/hibernate_i386.c | 13 +- osfmk/i386/hibernate_restore.c | 31 +- osfmk/i386/hw_defs.h | 5 + osfmk/i386/i386_init.c | 230 +- osfmk/i386/i386_timer.c | 153 +- osfmk/i386/i386_vm_init.c | 55 +- osfmk/i386/lapic.h | 6 +- osfmk/i386/lapic_native.c | 174 +- osfmk/i386/locks.h | 9 +- osfmk/i386/locks_i386.c | 141 +- osfmk/i386/locks_i386_opt.c | 2 +- osfmk/i386/machine_check.c | 5 +- osfmk/i386/machine_routines.c | 298 +- osfmk/i386/machine_routines.h | 44 +- osfmk/i386/misc_protos.h | 14 +- osfmk/i386/mp.c | 52 +- osfmk/i386/mp.h | 1 + osfmk/i386/mp_desc.c | 130 +- osfmk/i386/mp_native.c | 2 +- osfmk/i386/mtrr.c | 6 +- osfmk/i386/pal_hibernate.h | 29 +- osfmk/i386/pal_routines.h | 3 +- osfmk/i386/panic_hooks.c | 3 +- osfmk/i386/panic_notify.c | 77 + osfmk/i386/panic_notify.h | 37 + osfmk/i386/pcb.c | 89 +- osfmk/i386/pcb_native.c | 353 +- osfmk/i386/phys.c | 13 - osfmk/i386/pmCPU.c | 4 +- osfmk/i386/pmap.h | 69 +- osfmk/i386/pmap_internal.h | 31 +- osfmk/i386/pmap_x86_common.c | 284 +- osfmk/i386/proc_reg.h | 94 +- osfmk/i386/rtclock.c | 12 + osfmk/i386/seg.h | 1 + osfmk/i386/smp.h | 3 - osfmk/i386/startup64.c | 265 - osfmk/i386/thread.h | 23 +- osfmk/i386/trap.c | 436 +- osfmk/i386/trap.h | 11 + osfmk/i386/trap_native.c | 4 +- osfmk/i386/tsc.c | 45 +- osfmk/i386/tsc.h | 6 + osfmk/i386/ucode.c | 3 +- osfmk/i386/vmx/vmx_cpu.c | 2 +- osfmk/ipc/flipc.c | 28 +- osfmk/ipc/flipc.h | 8 +- osfmk/ipc/ipc_entry.h | 7 +- osfmk/ipc/ipc_eventlink.c | 1161 + osfmk/ipc/ipc_eventlink.h | 142 + osfmk/ipc/ipc_hash.c | 1 - osfmk/ipc/ipc_importance.c | 65 +- osfmk/ipc/ipc_init.c | 146 +- osfmk/ipc/ipc_init.h | 10 - osfmk/ipc/ipc_kmsg.c | 671 +- osfmk/ipc/ipc_kmsg.h | 76 +- osfmk/ipc/ipc_mqueue.c | 11 +- osfmk/ipc/ipc_mqueue.h | 2 +- osfmk/ipc/ipc_notify.c | 2 - osfmk/ipc/ipc_object.c | 49 +- osfmk/ipc/ipc_object.h | 4 +- osfmk/ipc/ipc_port.c | 191 +- osfmk/ipc/ipc_port.h | 47 +- osfmk/ipc/ipc_pset.c | 8 +- osfmk/ipc/ipc_space.c | 6 +- osfmk/ipc/ipc_space.h | 8 +- osfmk/ipc/ipc_table.c | 46 +- osfmk/ipc/ipc_table.h | 7 +- osfmk/ipc/ipc_voucher.c | 160 +- osfmk/ipc/ipc_voucher.h | 2 - osfmk/ipc/mach_debug.c | 148 +- osfmk/ipc/mach_kernelrpc.c | 138 +- osfmk/ipc/mach_msg.c | 97 +- osfmk/ipc/mach_port.c | 102 +- osfmk/kdp/kdp.c | 1542 +- osfmk/kdp/kdp_core.c | 1066 +- osfmk/kdp/kdp_core.h | 8 +- osfmk/kdp/kdp_dyld.h | 5 + osfmk/kdp/kdp_internal.h | 6 +- osfmk/kdp/kdp_serial.c | 10 +- osfmk/kdp/kdp_udp.c | 144 +- osfmk/kdp/ml/arm/kdp_machdep.c | 72 +- osfmk/kdp/ml/arm/kdp_vm.c | 65 +- osfmk/kdp/ml/i386/kdp_x86_common.c | 8 +- osfmk/kdp/ml/x86_64/kdp_machdep.c | 36 +- osfmk/kdp/ml/x86_64/kdp_vm.c | 12 + osfmk/kdp/processor_core.c | 8 +- osfmk/kern/Makefile | 10 +- osfmk/kern/affinity.c | 20 +- osfmk/kern/arcade.c | 23 +- osfmk/kern/audit_sessionport.c | 2 +- osfmk/kern/backtrace.c | 22 +- osfmk/kern/backtrace.h | 3 +- osfmk/kern/bits.h | 90 +- osfmk/kern/block_hint.h | 4 +- osfmk/kern/bsd_kern.c | 176 +- osfmk/kern/btlog.c | 9 +- osfmk/kern/call_entry.h | 161 - osfmk/kern/cambria_layout.h | 41 + osfmk/kern/clock.c | 143 +- osfmk/kern/clock.h | 7 + osfmk/kern/clock_oldops.c | 21 +- osfmk/kern/coalition.c | 217 +- osfmk/kern/coalition.h | 2 +- osfmk/kern/cpu_quiesce.c | 63 +- osfmk/kern/cs_blobs.h | 34 +- osfmk/kern/debug.c | 504 +- osfmk/kern/debug.h | 65 +- osfmk/kern/ecc.h | 1 - osfmk/kern/ecc_logging.c | 12 +- osfmk/kern/exception.c | 12 +- osfmk/kern/exception.h | 4 +- osfmk/kern/gzalloc.c | 560 +- osfmk/kern/host.c | 373 +- osfmk/kern/host.h | 7 +- osfmk/kern/host_notify.c | 48 +- osfmk/kern/host_notify.h | 2 - osfmk/kern/host_statistics.h | 18 +- osfmk/kern/hv_support.h | 55 +- .../kern/{hv_support.c => hv_support_kext.c} | 52 +- osfmk/kern/hv_support_kext.h | 94 + osfmk/kern/ipc_host.c | 3 +- osfmk/kern/ipc_kobject.c | 201 +- osfmk/kern/ipc_kobject.h | 40 +- osfmk/kern/ipc_mig.c | 67 +- osfmk/kern/ipc_sync.c | 2 +- osfmk/kern/ipc_tt.c | 1696 +- osfmk/kern/ipc_tt.h | 66 +- osfmk/kern/kalloc.c | 1369 +- osfmk/kern/kalloc.h | 409 +- osfmk/kern/kcdata.h | 153 +- osfmk/kern/kern_cdata.c | 801 +- osfmk/kern/kern_cdata.h | 64 +- osfmk/kern/kern_stackshot.c | 1156 +- osfmk/kern/kern_types.h | 56 +- osfmk/kern/kext_alloc.c | 66 + osfmk/kern/kext_alloc.h | 4 + osfmk/kern/kpc.h | 55 +- osfmk/kern/kpc_common.c | 70 +- osfmk/kern/kpc_thread.c | 13 +- osfmk/kern/ledger.c | 32 +- osfmk/kern/ledger.h | 2 - osfmk/kern/lock_group.h | 122 +- osfmk/kern/lock_stat.h | 219 +- osfmk/kern/locks.c | 213 +- osfmk/kern/locks.h | 368 +- osfmk/kern/ltable.c | 60 +- osfmk/kern/ltable.h | 9 - osfmk/kern/mach_filter.h | 61 + osfmk/kern/mach_node.c | 23 +- osfmk/kern/machine.c | 102 +- osfmk/kern/machine.h | 7 + osfmk/kern/misc_protos.h | 12 +- osfmk/kern/mk_timer.c | 67 +- osfmk/kern/mk_timer.h | 20 +- osfmk/kern/monotonic.h | 7 - osfmk/kern/mpqueue.h | 8 +- osfmk/kern/mpsc_queue.c | 24 +- osfmk/kern/mpsc_queue.h | 4 +- osfmk/kern/percpu.h | 285 + osfmk/kern/policy_internal.h | 27 +- osfmk/kern/printf.c | 94 +- osfmk/kern/priority.c | 161 +- osfmk/kern/priority_queue.c | 102 - osfmk/kern/priority_queue.h | 948 +- osfmk/kern/processor.c | 427 +- osfmk/kern/processor.h | 382 +- osfmk/kern/processor_data.h | 159 - osfmk/kern/queue.h | 35 +- osfmk/kern/remote_time.c | 75 +- osfmk/kern/remote_time.h | 8 + osfmk/kern/restartable.c | 7 +- osfmk/kern/sched.h | 22 +- osfmk/kern/sched_amp.c | 10 +- osfmk/kern/sched_amp_common.c | 54 +- osfmk/kern/sched_amp_common.h | 2 + osfmk/kern/sched_average.c | 8 +- osfmk/kern/sched_clutch.c | 3261 +- osfmk/kern/sched_clutch.h | 242 +- osfmk/kern/sched_dualq.c | 22 +- osfmk/kern/sched_grrr.c | 23 +- osfmk/kern/sched_multiq.c | 42 +- osfmk/kern/sched_prim.c | 1461 +- osfmk/kern/sched_prim.h | 160 +- osfmk/kern/sched_proto.c | 11 +- osfmk/kern/sched_traditional.c | 22 +- osfmk/kern/sfi.c | 123 +- osfmk/kern/sfi.h | 1 - osfmk/kern/simple_lock.h | 30 +- osfmk/kern/stack.c | 57 +- osfmk/kern/startup.c | 343 +- osfmk/kern/startup.h | 281 +- osfmk/kern/suid_cred.c | 16 +- osfmk/kern/suid_cred.h | 4 +- osfmk/kern/sync_lock.c | 1 - osfmk/kern/sync_sema.c | 78 +- osfmk/kern/sync_sema.h | 4 +- osfmk/kern/syscall_emulation.c | 1 - osfmk/kern/syscall_subr.c | 46 +- osfmk/kern/syscall_sw.c | 17 +- osfmk/kern/task.c | 833 +- osfmk/kern/task.h | 153 +- osfmk/kern/task_policy.c | 245 +- osfmk/kern/telemetry.c | 88 +- osfmk/kern/thread.c | 522 +- osfmk/kern/thread.h | 463 +- osfmk/kern/thread_act.c | 64 +- osfmk/kern/thread_call.c | 1067 +- osfmk/kern/thread_call.h | 65 +- osfmk/kern/thread_group.c | 894 +- osfmk/kern/thread_group.h | 67 + osfmk/kern/thread_policy.c | 130 +- osfmk/kern/timer.c | 8 +- osfmk/kern/timer_call.c | 856 +- osfmk/kern/timer_call.h | 150 +- osfmk/kern/timer_queue.h | 5 - osfmk/kern/tlock.c | 40 +- osfmk/kern/turnstile.c | 277 +- osfmk/kern/turnstile.h | 4 +- osfmk/kern/waitq.c | 70 +- osfmk/kern/waitq.h | 23 +- osfmk/kern/work_interval.c | 726 +- osfmk/kern/work_interval.h | 16 +- osfmk/kern/zalloc.c | 6348 +- osfmk/kern/zalloc.h | 1185 +- osfmk/kern/zalloc_internal.h | 560 + osfmk/kern/zcache.c | 824 +- osfmk/kern/{zcache.h => zcache_internal.h} | 112 +- osfmk/kextd/Makefile | 2 +- osfmk/kperf/Makefile | 2 +- osfmk/kperf/action.c | 10 +- osfmk/kperf/arm/kperf_mp.c | 98 - osfmk/kperf/buffer.h | 10 +- osfmk/kperf/callstack.c | 8 +- osfmk/kperf/callstack.h | 4 + osfmk/kperf/kdebug_trigger.c | 37 +- osfmk/kperf/kdebug_trigger.h | 2 +- osfmk/kperf/kperf.c | 179 +- osfmk/kperf/kperf.h | 85 +- osfmk/kperf/kperf_timer.c | 532 - osfmk/kperf/kperf_timer.h | 111 - osfmk/kperf/kperfbsd.c | 78 +- osfmk/kperf/kptimer.c | 734 + osfmk/kperf/kptimer.h | 151 + osfmk/kperf/lazy.c | 3 + osfmk/kperf/pet.c | 615 +- osfmk/kperf/pet.h | 51 +- osfmk/kperf/thread_samplers.c | 9 +- osfmk/kperf/x86_64/kperf_mp.c | 89 - osfmk/libsa/stdlib.h | 3 +- osfmk/libsa/string.h | 41 +- osfmk/libsa/types.h | 1 + osfmk/lockd/Makefile | 2 +- osfmk/mach/Makefile | 14 +- osfmk/mach/arm/Makefile | 2 +- osfmk/mach/arm/_structs.h | 30 +- osfmk/mach/arm/exception.h | 1 + osfmk/mach/arm/sdt_isa.h | 634 +- osfmk/mach/arm/thread_status.h | 69 +- osfmk/mach/arm/traps.h | 32 + osfmk/mach/arm/vm_param.h | 99 +- osfmk/mach/audit_triggers.defs | 10 +- osfmk/mach/audit_triggers_types.h | 1 + osfmk/mach/coalition.h | 7 + osfmk/mach/dyld_kernel_fixups.h | 610 + osfmk/mach/host_priv.defs | 14 +- osfmk/mach/i386/_structs.h | 80 + osfmk/mach/i386/sdt_isa.h | 657 +- osfmk/mach/i386/thread_state.h | 2 +- osfmk/mach/i386/thread_status.h | 19 +- osfmk/mach/i386/vm_param.h | 64 +- osfmk/mach/i386/vm_types.h | 5 - osfmk/mach/kern_return.h | 4 + osfmk/mach/kmod.h | 1 + osfmk/mach/mach_eventlink.defs | 61 + osfmk/mach/mach_eventlink_types.h | 97 + osfmk/mach/mach_param.h | 6 + osfmk/mach/mach_port.defs | 73 +- osfmk/mach/mach_traps.h | 18 +- osfmk/mach/mach_types.defs | 82 + osfmk/mach/mach_types.h | 54 +- osfmk/mach/mach_vm.defs | 31 +- osfmk/mach/machine.h | 21 +- osfmk/mach/machine/sdt.h | 30 + osfmk/mach/machine/sdt_isa.h | 8 +- osfmk/mach/memory_object_types.h | 17 +- osfmk/mach/message.h | 101 +- osfmk/mach/port.h | 9 +- osfmk/mach/processor_set.defs | 8 + osfmk/mach/semaphore.h | 1 + osfmk/mach/shared_memory_server.h | 8 - osfmk/mach/shared_region.h | 55 +- osfmk/mach/syscall_sw.h | 2 +- osfmk/mach/task.defs | 50 +- osfmk/mach/task_info.h | 4 +- osfmk/mach/task_policy.h | 20 +- osfmk/mach/task_special_ports.h | 13 +- osfmk/mach/thread_act.defs | 39 +- osfmk/mach/thread_info.h | 1 + osfmk/mach/thread_policy.h | 11 +- osfmk/mach/thread_special_ports.h | 7 +- osfmk/mach/thread_status.h | 3 + osfmk/mach/vm_param.h | 239 +- osfmk/mach/vm_prot.h | 8 + osfmk/mach/vm_statistics.h | 55 +- osfmk/mach/vm_types.h | 16 +- osfmk/machine/Makefile | 4 +- osfmk/machine/atomic.h | 730 +- osfmk/machine/atomic_impl.h | 220 - osfmk/machine/machine_routines.h | 127 + osfmk/machine/pal_hibernate.h | 130 + osfmk/machine/smp.h | 3 + osfmk/prng/Makefile | 1 + osfmk/prng/entropy.c | 70 + osfmk/prng/entropy.h | 94 + osfmk/prng/prng_random.c | 15 +- osfmk/prng/random.h | 29 - osfmk/tests/bitmap_test.c | 23 + osfmk/tests/kernel_tests.c | 167 +- osfmk/tests/ktest_emit.c | 12 +- osfmk/tests/pmap_tests.c | 20 + osfmk/tests/ptrauth_data_tests.c | 127 + osfmk/tests/vfp_state_test.c | 247 + osfmk/tests/xnupost.h | 2 +- osfmk/vm/bsd_vm.c | 57 +- osfmk/vm/device_vm.c | 44 +- osfmk/vm/memory_object.c | 91 +- osfmk/vm/memory_object.h | 5 - osfmk/vm/pmap.h | 124 +- osfmk/vm/vm32_user.c | 1 - osfmk/vm/vm_apple_protect.c | 33 +- osfmk/vm/vm_compressor.c | 745 +- osfmk/vm/vm_compressor.h | 128 +- osfmk/vm/vm_compressor_algorithms.c | 33 +- osfmk/vm/vm_compressor_algorithms.h | 6 +- osfmk/vm/vm_compressor_backing_store.c | 229 +- osfmk/vm/vm_compressor_backing_store.h | 23 +- osfmk/vm/vm_compressor_pager.c | 153 +- osfmk/vm/vm_compressor_pager.h | 6 + osfmk/vm/vm_fault.c | 2306 +- osfmk/vm/vm_fault.h | 6 +- osfmk/vm/vm_fourk_pager.c | 40 +- osfmk/vm/vm_init.c | 141 +- osfmk/vm/vm_init.h | 2 - osfmk/vm/vm_kern.c | 152 +- osfmk/vm/vm_kern.h | 54 +- osfmk/vm/vm_map.c | 3947 +- osfmk/vm/vm_map.h | 272 +- osfmk/vm/vm_map_store.c | 29 +- osfmk/vm/vm_map_store.h | 1 + osfmk/vm/vm_map_store_ll.c | 33 + osfmk/vm/vm_map_store_ll.h | 1 + osfmk/vm/vm_object.c | 805 +- osfmk/vm/vm_object.h | 26 +- osfmk/vm/vm_page.h | 278 +- osfmk/vm/vm_pageout.c | 1414 +- osfmk/vm/vm_pageout.h | 70 +- osfmk/vm/vm_protos.h | 179 +- osfmk/vm/vm_purgeable.c | 4 +- osfmk/vm/vm_resident.c | 569 +- osfmk/vm/vm_shared_region.c | 1923 +- osfmk/vm/vm_shared_region.h | 150 +- osfmk/vm/vm_shared_region_pager.c | 544 +- osfmk/vm/vm_swapfile_pager.c | 28 +- osfmk/vm/vm_tests.c | 962 + osfmk/vm/vm_user.c | 1168 +- osfmk/voucher/Makefile | 8 +- osfmk/voucher/ipc_pthread_priority.c | 1 - osfmk/x86_64/copyio.c | 18 +- osfmk/x86_64/dwarf_unwind.h | 115 + osfmk/x86_64/idt64.s | 211 +- osfmk/x86_64/kpc_x86.c | 64 +- osfmk/x86_64/loose_ends.c | 136 +- osfmk/x86_64/machine_remote_time.c | 10 +- osfmk/x86_64/pmap.c | 255 +- pexpert/arm/pe_consistent_debug.c | 6 +- pexpert/arm/pe_identify_machine.c | 221 +- pexpert/arm/pe_init.c | 132 +- pexpert/arm/pe_kprintf.c | 67 +- pexpert/arm/pe_serial.c | 212 +- pexpert/conf/Makefile | 2 +- pexpert/conf/Makefile.template | 24 +- pexpert/conf/files | 1 + pexpert/gen/bootargs.c | 26 +- pexpert/gen/device_tree.c | 207 +- pexpert/gen/kcformat.c | 228 + pexpert/gen/pe_gen.c | 34 +- pexpert/i386/pe_init.c | 35 +- pexpert/i386/pe_kprintf.c | 63 +- pexpert/pexpert/GearImage.h | 7738 +- pexpert/pexpert/arm/Makefile | 38 +- pexpert/pexpert/arm/S7002.h | 41 - pexpert/pexpert/arm/board_config.h | 19 +- pexpert/pexpert/arm/consistent_debug.h | 5 + pexpert/pexpert/arm/protos.h | 2 +- pexpert/pexpert/arm64/AMCC.h | 27 - pexpert/pexpert/arm64/BCM2837.h | 15 +- pexpert/pexpert/arm64/H7.h | 47 + pexpert/pexpert/arm64/H8.h | 46 + pexpert/pexpert/arm64/H9.h | 52 + pexpert/pexpert/arm64/Makefile | 53 +- pexpert/pexpert/arm64/S8000.h | 17 - pexpert/pexpert/arm64/T7000.h | 19 - pexpert/pexpert/arm64/T8010.h | 22 - pexpert/pexpert/arm64/apple_arm64_common.h | 85 + .../{arm64_common.h => apple_arm64_regs.h} | 150 +- pexpert/pexpert/arm64/board_config.h | 364 +- pexpert/pexpert/arm64/hurricane.h | 27 - pexpert/pexpert/arm64/spr_locks.h | 1 + pexpert/pexpert/arm64/twister.h | 17 - pexpert/pexpert/arm64/typhoon.h | 18 - pexpert/pexpert/device_tree.h | 57 +- pexpert/pexpert/i386/boot.h | 15 +- pexpert/pexpert/i386/efi.h | 4 + pexpert/pexpert/i386/protos.h | 1 + pexpert/pexpert/machine/boot.h | 6 + pexpert/pexpert/machine/protos.h | 3 + pexpert/pexpert/pexpert.h | 96 +- pexpert/pexpert/protos.h | 6 +- san/Makefile | 27 +- san/conf/Makefile | 2 +- san/conf/Makefile.template | 4 +- san/kasan-arm64.c | 30 +- san/kasan-blacklist | 11 +- san/kasan-blacklist-arm64 | 1 + san/kasan-blacklist-x86_64 | 14 +- san/kasan-fakestack.c | 20 +- san/kasan-test.c | 2 +- san/kasan-x86_64.c | 8 +- san/kasan.c | 244 +- san/kasan.h | 9 +- san/kasan_dynamic_blacklist.c | 9 +- san/kasan_internal.h | 33 +- san/ksancov.c | 349 +- san/ksancov.h | 13 + san/ubsan.c | 111 +- san/ubsan.h | 11 +- san/ubsan_log.c | 77 +- security/Makefile | 1 - security/_label.h | 8 +- security/conf/Makefile | 2 +- security/conf/Makefile.template | 26 +- security/conf/files | 5 - security/mac.h | 7 - security/mac_alloc.c | 164 - security/mac_alloc.h | 77 - security/mac_audit.c | 7 +- security/mac_base.c | 126 +- security/mac_file.c | 12 +- security/mac_framework.h | 109 +- security/mac_inet.c | 303 - security/mac_internal.h | 26 - security/mac_iokit.c | 10 - security/mac_label.c | 28 +- security/mac_mach.c | 3 +- security/mac_mach_internal.h | 20 + security/mac_net.c | 534 - security/mac_pipe.c | 63 +- security/mac_policy.h | 1386 +- security/mac_process.c | 79 +- security/mac_socket.c | 431 +- security/mac_system.c | 2 +- security/mac_vfs.c | 74 +- tests/Makefile | 384 +- tests/backtracing.c | 194 +- tests/bounded_array.cpp | 11 + tests/bounded_array_ref.cpp | 11 + tests/bounded_array_ref_src/begin_end.cpp | 47 + .../compare.equal.nullptr.cpp | 46 + tests/bounded_array_ref_src/ctor.C_array.cpp | 62 + .../bounded_array_ref_src/ctor.begin_end.cpp | 80 + .../ctor.bounded_array.cpp | 69 + .../ctor.bounded_ptr.cpp | 104 + tests/bounded_array_ref_src/ctor.default.cpp | 37 + tests/bounded_array_ref_src/ctor.raw_ptr.cpp | 121 + tests/bounded_array_ref_src/data.cpp | 46 + tests/bounded_array_ref_src/for_loop.cpp | 30 + tests/bounded_array_ref_src/operator.bool.cpp | 38 + .../operator.subscript.cpp | 35 + tests/bounded_array_ref_src/size.cpp | 45 + tests/bounded_array_ref_src/slice.cpp | 227 + tests/bounded_array_ref_src/test_policy.h | 52 + tests/bounded_array_src/begin_end.cpp | 63 + .../bounded_array_src/ctor.aggregate_init.cpp | 59 + tests/bounded_array_src/ctor.default.cpp | 69 + tests/bounded_array_src/data.cpp | 58 + tests/bounded_array_src/for_loop.cpp | 29 + .../bounded_array_src/operator.subscript.cpp | 56 + tests/bounded_array_src/size.cpp | 31 + tests/bounded_array_src/test_policy.h | 26 + tests/bounded_ptr.cpp | 11 + tests/bounded_ptr_03.cpp | 10 + tests/bounded_ptr_src/arith.add.cpp | 127 + tests/bounded_ptr_src/arith.add_assign.cpp | 200 + tests/bounded_ptr_src/arith.difference.cpp | 122 + tests/bounded_ptr_src/arith.inc_dec.cpp | 147 + tests/bounded_ptr_src/arith.subtract.cpp | 106 + .../bounded_ptr_src/arith.subtract_assign.cpp | 197 + tests/bounded_ptr_src/assign.convert.cpp | 140 + tests/bounded_ptr_src/assign.nullptr.cpp | 55 + tests/bounded_ptr_src/compare.equal.cpp | 152 + .../bounded_ptr_src/compare.equal.nullptr.cpp | 68 + tests/bounded_ptr_src/compare.equal.raw.cpp | 190 + tests/bounded_ptr_src/compare.order.cpp | 168 + tests/bounded_ptr_src/compare.order.raw.cpp | 212 + tests/bounded_ptr_src/ctor.begin_end.cpp | 99 + tests/bounded_ptr_src/ctor.convert.cpp | 142 + tests/bounded_ptr_src/ctor.default.cpp | 34 + tests/bounded_ptr_src/ctor.nullptr.cpp | 97 + tests/bounded_ptr_src/deref.cpp | 171 + tests/bounded_ptr_src/discard_bounds.cpp | 124 + tests/bounded_ptr_src/example.malloc.cpp | 75 + tests/bounded_ptr_src/operator_bool.cpp | 42 + tests/bounded_ptr_src/reinterpret_cast.cpp | 86 + tests/bounded_ptr_src/subscript.cpp | 252 + tests/bounded_ptr_src/test_utils.h | 20 + .../bounded_ptr_src/unsafe_discard_bounds.cpp | 144 + tests/contextswitch.c | 6 +- tests/correct_kernel_booted.c | 167 + tests/decompression_failure.c | 172 + tests/dirtiness_tracking.c | 173 + tests/driverkit/Makefile | 100 + .../Info.plist | 70 + ...intentionally_crashing_driver_56101852.cpp | 30 + ...ally_crashing_driver_56101852.entitlements | 10 + ...intentionally_crashing_driver_56101852.iig | 21 + tests/exc_helpers.c | 256 + tests/exc_helpers.h | 55 + tests/fd.c | 94 + tests/fd_aio_fsync_uaf.c | 71 + tests/fduiomove.c | 26 + tests/filter_policy.c | 178 + tests/flow_div_doubleconnect_55917185.c | 63 + tests/fp_exception.c | 195 +- tests/ftruncate.c | 77 + tests/get_shared_cache_address.c | 40 + tests/hv_private.entitlements | 8 + tests/hv_public.entitlements | 8 + tests/hvbench.c | 366 + tests/hvtest_x86.m | 1248 + tests/hvtest_x86_asm.s | 486 + tests/hvtest_x86_guest.c | 77 + tests/hvtest_x86_guest.h | 31 + tests/inspect_port.c | 581 + tests/intrusive_shared_ptr.cpp | 11 + .../abi.callee.raw.cpp | 15 + .../abi.callee.smart.cpp | 18 + .../abi.caller.raw.cpp | 24 + .../abi.caller.smart.cpp | 31 + .../abi.size_alignment.cpp | 142 + tests/intrusive_shared_ptr_src/abi_helper.h | 21 + .../intrusive_shared_ptr_src/assign.copy.cpp | 121 + .../intrusive_shared_ptr_src/assign.move.cpp | 122 + .../assign.nullptr.cpp | 46 + tests/intrusive_shared_ptr_src/cast.const.cpp | 67 + .../cast.reinterpret.cpp | 76 + .../intrusive_shared_ptr_src/cast.static.cpp | 78 + .../compare.equal.cpp | 86 + .../compare.equal.nullptr.cpp | 62 + .../compare.equal.raw.cpp | 107 + tests/intrusive_shared_ptr_src/ctor.copy.cpp | 101 + .../intrusive_shared_ptr_src/ctor.default.cpp | 37 + tests/intrusive_shared_ptr_src/ctor.move.cpp | 115 + .../intrusive_shared_ptr_src/ctor.nullptr.cpp | 33 + .../ctor.ptr.no_retain.cpp | 36 + .../ctor.ptr.retain.cpp | 36 + tests/intrusive_shared_ptr_src/deref.cpp | 38 + tests/intrusive_shared_ptr_src/detach.cpp | 32 + tests/intrusive_shared_ptr_src/dtor.cpp | 38 + tests/intrusive_shared_ptr_src/get.cpp | 50 + .../operator.bool.cpp | 45 + tests/intrusive_shared_ptr_src/reset.cpp | 57 + .../reset.no_retain.cpp | 76 + .../intrusive_shared_ptr_src/reset.retain.cpp | 89 + tests/intrusive_shared_ptr_src/swap.cpp | 96 + tests/intrusive_shared_ptr_src/test_policy.h | 80 + tests/invalid_setaudit_57414044.c | 46 + tests/ioconnectasyncmethod_57641955.c | 29 + tests/ipsec.entitlements | 10 + tests/ipsec.m | 616 + tests/jitbox-entitlements.plist | 9 + tests/jumbo_va_spaces_28530648.c | 12 +- tests/jumbo_va_spaces_52551256.entitlements | 9 + tests/kas_info.c | 255 + tests/kdebug.c | 332 + tests/kernel_symbolication_entitlements.plist | 10 + tests/kernel_uuid_match.c | 194 - tests/kevent_qos.c | 4 +- tests/kpc.c | 24 +- tests/kperf.c | 803 +- tests/kperf_backtracing.c | 38 +- tests/kperf_helpers.c | 14 +- tests/kperf_helpers.h | 4 +- tests/ktrace_helpers.h | 18 +- ...om.apple.xnu.test.turnstile_multihop.plist | 9 +- tests/ldt.c | 18 + tests/mach_eventlink.c | 812 + tests/memcmp_zero.c | 91 + tests/memorystatus_freeze_test.c | 338 +- tests/memorystatus_is_assertion.c | 6 +- tests/memorystatus_vm_map_fork.c | 6 +- tests/memorystatus_zone_test.c | 6 +- tests/mpsc.c | 2 +- tests/net_bridge.c | 31 +- tests/net_tun_pr_35136664.c | 3 +- .../netagent_kctl_header_infodisc_56190773.c | 51 + tests/netagent_race_infodisc_56244905.c | 43 +- tests/no32exec_35914211.c | 104 - tests/no32exec_35914211_helper.c | 17 - tests/os_atomic.cpp | 33 + tests/os_proc.c | 53 +- tests/os_refcnt.c | 72 +- tests/osptr.cpp | 772 - tests/osptr_compat.cpp | 18 + tests/osptr_dumb.cpp | 80 - tests/osptr_helper.cpp | 24 - tests/perf_vmfault.c | 5 +- tests/pfz.c | 287 + tests/pid_for_task_test.c | 17 + tests/pipe_read_infloop_55437634.c | 53 + tests/posix_spawn_archpref.c | 63 + tests/posix_spawn_archpref_helper.c | 20 + tests/preoslog.c | 125 + tests/priority_queue.cpp | 285 + tests/proc_info.c | 2 +- tests/proc_info_udata.c | 9 +- tests/proc_pidpath_audittoken.c | 105 + tests/proc_rlimit.c | 271 + tests/ptrauth-entitlements.plist | 8 + tests/ptrauth_data_tests.c | 15 + tests/ptrauth_failure.c | 47 + tests/pwrite_avoid_sigxfsz_28581610.c | 8 +- tests/rename_excl.c | 121 + tests/safe_allocation.cpp | 11 + tests/safe_allocation_src/assign.copy.cpp | 16 + tests/safe_allocation_src/assign.move.cpp | 143 + tests/safe_allocation_src/assign.nullptr.cpp | 46 + tests/safe_allocation_src/begin_end.cpp | 71 + .../compare.equal.nullptr.cpp | 45 + tests/safe_allocation_src/ctor.adopt.cpp | 36 + tests/safe_allocation_src/ctor.allocate.cpp | 67 + tests/safe_allocation_src/ctor.copy.cpp | 18 + tests/safe_allocation_src/ctor.default.cpp | 41 + tests/safe_allocation_src/ctor.move.cpp | 105 + tests/safe_allocation_src/ctor.nullptr.cpp | 49 + tests/safe_allocation_src/data.cpp | 40 + tests/safe_allocation_src/dtor.cpp | 50 + tests/safe_allocation_src/operator.bool.cpp | 42 + .../operator.subscript.cpp | 84 + tests/safe_allocation_src/size.cpp | 48 + tests/safe_allocation_src/swap.cpp | 104 + tests/safe_allocation_src/test_utils.h | 97 + tests/safe_allocation_src/usage.for_loop.cpp | 29 + .../usage.two_dimensions.cpp | 39 + tests/sbuf_tests.c | 11 + tests/scanf.c | 195 + tests/sched_cluster_bound_threads.c | 140 + tests/select_stress.c | 416 + tests/settimeofday_29193041.c | 6 - tests/settimeofday_29193041_entitled.c | 6 - tests/shared_cache_reslide_test.c | 49 + tests/shared_cache_tests.c | 5 + tests/sigchld_return.c | 55 +- tests/signal_stack.c | 56 + tests/sioc-if-addr-bounds.c | 262 + tests/sr_entitlement.c | 103 + tests/sr_entitlement.entitlements | 8 + tests/sr_entitlement_helper.c | 15 + tests/stackshot_accuracy.m | 4 +- tests/stackshot_spawn_exit_stress.c | 2 +- tests/stackshot_tests.m | 937 +- tests/stackshot_translated_child.c | 16 + tests/subsystem_root_path-entitlements.plist | 9 + tests/subsystem_root_path.c | 48 + tests/subsystem_root_path.h | 30 + tests/subsystem_root_path_helper.c | 60 + tests/sysctl_hw.c | 20 + tests/sysctl_system_version.c | 21 + tests/system_version_compat.c | 294 + tests/task_filter_msg.c | 76 + tests/task_policy.c | 600 + tests/task_policy_entitlement.plist | 13 + tests/telemetry.c | 4 + tests/test_dext_launch_56101852.c | 101 + tests/test_dext_launch_56101852.entitlements | 8 + tests/test_sysctl_kern_procargs_25397314.m | 343 + tests/thread_group_set_32261625.c | 3 +- tests/thread_set_state_arm64_cpsr.c | 96 + tests/turnstile_multihop.c | 63 +- tests/ulock.c | 92 + tests/vm/entitlement_increased_memory_limit.c | 148 + ...lement_increased_memory_limit.entitlements | 8 + tests/vm/fault_throughput.c | 684 + tests/vm/fault_throughput.lua | 103 + tests/vm/fault_throughput.plist | 47 + tests/vm/kern_max_task_pmem.c | 45 + tests/vm/memorystatus_sort_test.c | 370 + tests/vm/perf_helpers.c | 69 + tests/vm/perf_helpers.h | 34 + tests/vm/perf_madvise.c | 200 + tests/vm/perf_madvise.lua | 69 + tests/vm/vm_allocation.c | 4184 + tests/vm/zone_gc_replenish_test.c | 78 + tests/vm_kern_count_wired_kernelcache.c | 73 + tests/vm_memory_tests_src/common.c | 173 + tests/vm_memory_tests_src/mach_vm_tests.h | 63 + tests/vm_memory_tests_src/main.c | 155 + tests/vm_memory_tests_src/server.c | 67 + tests/vm_memory_tests_src/vm_tests.c | 685 + tests/vm_set_max_addr_test.c | 2 +- tests/vm_spawn_tool.c | 62 + tests/vm_test_code_signing_helper.c | 152 + tests/vm_test_mach_map.c | 1083 + tests/voucher_traps.c | 44 +- tests/vsock.c | 838 + tests/wired_mem_bench.c | 100 - tests/work_interval_test.c | 117 + tests/work_interval_test_unentitled.c | 207 + tests/workq_sigprof.c | 8 +- tests/x18.c | 68 + tests/zalloc.c | 15 + tools/cred_dump_creds.c | 2 +- tools/lldbmacros/Makefile | 1 - tools/lldbmacros/atm.py | 96 - tools/lldbmacros/core/cvalue.py | 168 +- tools/lldbmacros/core/kernelcore.py | 20 +- tools/lldbmacros/core/operating_system.py | 53 +- tools/lldbmacros/ioreg.py | 253 +- tools/lldbmacros/ipc.py | 130 +- tools/lldbmacros/kasan.py | 4 +- tools/lldbmacros/kcdata.py | 148 +- tools/lldbmacros/kevent.py | 9 +- tools/lldbmacros/ktrace.py | 299 +- tools/lldbmacros/memory.py | 1060 +- tools/lldbmacros/misc.py | 286 +- tools/lldbmacros/net.py | 48 +- tools/lldbmacros/pmap.py | 468 +- tools/lldbmacros/process.py | 263 +- tools/lldbmacros/scheduler.py | 449 +- tools/lldbmacros/skywalk.py | 52 +- tools/lldbmacros/structanalyze.py | 131 +- tools/lldbmacros/turnstile.py | 4 +- tools/lldbmacros/userspace.py | 7 +- .../lldbmacros/usertaskdebugging/gdbserver.py | 8 +- .../lldbmacros/usertaskdebugging/interface.py | 4 +- .../usertaskdebugging/userprocess.py | 34 +- tools/lldbmacros/usertaskgdbserver.py | 8 +- tools/lldbmacros/workqueue.py | 8 +- tools/lldbmacros/xnu.py | 14 +- tools/lldbmacros/xnudefines.py | 18 +- tools/lldbmacros/zonetriage.py | 6 +- tools/tests/MPMMTest/Makefile | 36 +- tools/tests/Makefile | 13 +- tools/tests/Makefile.common | 25 + tools/tests/TLBcoherency/Makefile | 13 +- tools/tests/affinity/Makefile | 28 +- tools/tests/execperf/Makefile | 15 +- tools/tests/jitter/Makefile | 14 +- tools/tests/mktimer/Makefile | 13 +- tools/tests/perf_index/Makefile | 19 +- tools/tests/perf_index/test_controller.py | 8 +- tools/tests/personas/Makefile | 24 +- tools/tests/superpages/Makefile | 13 +- .../testkext.xcodeproj/project.pbxproj | 2 - tools/tests/zero-to-n/Makefile | 13 +- tools/tests/zero-to-n/zero-to-n.c | 36 +- 2026 files changed, 194346 insertions(+), 182617 deletions(-) create mode 100644 EXTERNAL_HEADERS/acpi/Acpi.h create mode 100644 EXTERNAL_HEADERS/acpi/Acpi_v1.h delete mode 100644 EXTERNAL_HEADERS/architecture/arm/arm_neon.h delete mode 100644 EXTERNAL_HEADERS/corecrypto/cc_debug.h create mode 100644 EXTERNAL_HEADERS/corecrypto/cc_fault_canary.h delete mode 100644 EXTERNAL_HEADERS/corecrypto/ccder.h create mode 100644 EXTERNAL_HEADERS/corecrypto/ccmd4.h delete mode 100644 EXTERNAL_HEADERS/corecrypto/ccmd5.h delete mode 100644 EXTERNAL_HEADERS/corecrypto/ccmode_factory.h delete mode 100644 EXTERNAL_HEADERS/corecrypto/ccpbkdf2.h delete mode 100644 EXTERNAL_HEADERS/corecrypto/ccrc4.h create mode 100644 EXTERNAL_HEADERS/img4/chip.h delete mode 100644 EXTERNAL_HEADERS/img4/environment.h create mode 100644 EXTERNAL_HEADERS/img4/firmware.h create mode 100644 EXTERNAL_HEADERS/img4/image.h delete mode 100644 EXTERNAL_HEADERS/img4/img4.h create mode 100644 EXTERNAL_HEADERS/img4/object.h delete mode 100644 EXTERNAL_HEADERS/img4/payload.h create mode 100644 EXTERNAL_HEADERS/img4/runtime.h create mode 100644 EXTERNAL_HEADERS/mach-o/fixup-chains.h rename bsd/crypto/{rc4 => entropy}/Makefile (69%) create mode 100644 bsd/crypto/entropy/diag_entropy_sysctl.c rename osfmk/kperf/kperf_arch.h => bsd/crypto/entropy/diag_entropy_sysctl.h (82%) delete mode 100644 bsd/crypto/rc4/rc4.c delete mode 100644 bsd/crypto/rc4/rc4.h create mode 100644 bsd/dev/dtrace/scripts/vm_object_ownership.d create mode 100644 bsd/dev/dtrace/scripts/vmx_compat.d delete mode 100755 bsd/kern/makekdebugevents.py create mode 100644 bsd/kern/sys_eventlink.c create mode 100644 bsd/kern/vsock_domain.c delete mode 100644 bsd/libkern/skpc.c create mode 100644 bsd/machine/machine_types.modulemap create mode 100644 bsd/man/man2/preadv.2 create mode 100644 bsd/man/man2/pwritev.2 create mode 100644 bsd/man/man3/posix_spawnattr_setarchpref_np.3 delete mode 100644 bsd/man/man4/divert.4 create mode 100644 bsd/man/man4/vsock.4 create mode 100644 bsd/miscfs/bindfs/Makefile create mode 100644 bsd/miscfs/bindfs/bind_subr.c create mode 100644 bsd/miscfs/bindfs/bind_vfsops.c create mode 100644 bsd/miscfs/bindfs/bind_vnops.c create mode 100644 bsd/miscfs/bindfs/bindfs.h delete mode 100644 bsd/net/pktsched/pktsched_qfq.c delete mode 100644 bsd/net/pktsched/pktsched_qfq.h delete mode 100644 bsd/net/pktsched/pktsched_tcq.c delete mode 100644 bsd/net/pktsched/pktsched_tcq.h delete mode 100644 bsd/netinet/ip_divert.c delete mode 100644 bsd/netinet/ip_divert.h delete mode 100644 bsd/netinet/ip_fw.h delete mode 100644 bsd/netinet/ip_fw2.c delete mode 100644 bsd/netinet/ip_fw2.h delete mode 100644 bsd/netinet/ip_fw2_compat.c delete mode 100644 bsd/netinet/ip_fw2_compat.h delete mode 100644 bsd/netinet/lro_ext.h delete mode 100644 bsd/netinet/tcp_lro.c delete mode 100644 bsd/netinet/tcp_lro.h delete mode 100644 bsd/netinet6/ip6_fw.c delete mode 100644 bsd/netinet6/ip6_fw.h create mode 100644 bsd/netinet6/nd6_rti.c delete mode 100644 bsd/sys/callout.h delete mode 100644 bsd/sys/clist.h create mode 100644 bsd/sys/proc_require.h create mode 100644 bsd/sys/sys__types.modulemap create mode 100644 bsd/sys/sys_cdefs.modulemap create mode 100644 bsd/sys/sys_types.modulemap create mode 100644 bsd/sys/unicode.h create mode 100644 bsd/sys/vsock.h create mode 100644 bsd/sys/vsock_domain.h create mode 100644 bsd/sys/vsock_transport.h create mode 100644 bsd/tests/ptrauth_data_tests_sysctl.c create mode 100644 bsd/vfs/vfs_unicode.c create mode 100644 bsd/vfs/vfs_unicode_data.h create mode 100644 config/IOKit.arm64.MacOSX.exports create mode 100644 config/IOKit.arm64.hibernation.MacOSX.exports create mode 100644 config/IOKit.x86_64.MacOSX.exports create mode 100644 config/Libkern.arm64.MacOSX.exports create mode 100644 config/Libkern.x86_64.MacOSX.exports create mode 100644 config/MASTER.arm64.BridgeOS create mode 100644 config/MASTER.arm64.MacOSX create mode 100644 config/MASTER.arm64.iPhoneOS create mode 100644 config/Private.arm64.MacOSX.exports create mode 100644 config/Unsupported.arm64.MacOSX.exports create mode 100644 config/Unsupported.x86_64.MacOSX.exports create mode 100755 config/generate_combined_symbolsets_plist.sh create mode 100755 config/generate_symbolset_plist.sh create mode 100644 doc/allocators.md create mode 100644 doc/pac.md create mode 100644 doc/startup.md create mode 100644 doc/xnu_build_consolidation.md create mode 100644 iokit/IOKit/IOPMGR.h create mode 100644 iokit/IOKit/IOPlatformActions.h create mode 100644 iokit/IOKit/PassthruInterruptController.h create mode 100644 iokit/IOKit/platform/IOPlatformIO.h create mode 100644 iokit/Kernel/IOPMGR.cpp create mode 100644 iokit/Kernel/IOPlatformActions.cpp rename libkern/c++/OSCompat.cpp => iokit/Kernel/IOPlatformIO.cpp (64%) create mode 100644 iokit/Kernel/PassthruInterruptController.cpp create mode 100644 iokit/Kernel/arm/AppleARMSMP.cpp create mode 100644 libkern/c++/priority_queue.cpp rename osfmk/corecrypto/ccdigest/src/ccdigest_init.c => libkern/gen/OSSpinLock.c (76%) delete mode 100644 libkern/kxld/kxld_stubs.c create mode 100644 libkern/libkern/c++/OSAllocation.h rename osfmk/corecrypto/ccsha2/src/ccdigest_internal.h => libkern/libkern/c++/OSBoundedArray.h (72%) create mode 100644 libkern/libkern/c++/OSBoundedArrayRef.h rename osfmk/kern/processor_data.c => libkern/libkern/c++/OSBoundedPtr.h (64%) create mode 100644 libkern/libkern/c++/OSBoundedPtrFwd.h create mode 100644 libkern/libkern/c++/OSSharedPtr.h create mode 100644 libkern/libkern/c++/bounded_array.h create mode 100644 libkern/libkern/c++/bounded_array_ref.h create mode 100644 libkern/libkern/c++/bounded_ptr.h create mode 100644 libkern/libkern/c++/bounded_ptr_fwd.h create mode 100644 libkern/libkern/c++/intrusive_shared_ptr.h create mode 100644 libkern/libkern/c++/safe_allocation.h create mode 100644 libkern/libkern/ptrauth_utils.h create mode 100644 libkern/os/atomic.h create mode 100644 libkern/os/atomic_private.h create mode 100644 libkern/os/atomic_private_arch.h create mode 100644 libkern/os/atomic_private_impl.h create mode 100644 libkern/os/base_private.h delete mode 100644 libkern/os/object_private.h delete mode 100644 libkern/os/smart_ptr.h create mode 100644 libkern/ptrauth_utils.c create mode 100644 libsyscall/Platforms/DriverKit/arm64/syscall.map create mode 100644 libsyscall/Platforms/MacOSX/arm64/syscall.map create mode 100644 libsyscall/mach/mach_eventlink.c create mode 100644 libsyscall/mach/mach_eventlink.defs rename iokit/Kernel/x86_64/IOSharedLock.s => libsyscall/wrappers/cancelable/open.c (60%) delete mode 100644 libsyscall/wrappers/legacy/open.c create mode 100644 libsyscall/wrappers/open-base.c create mode 100644 libsyscall/wrappers/system-version-compat-support.c create mode 100644 libsyscall/wrappers/system-version-compat-support.h create mode 100644 libsyscall/wrappers/system-version-compat.c create mode 100644 osfmk/arm/caches_macros.s create mode 100644 osfmk/arm/commpage/commpage_asm.s create mode 100644 osfmk/arm/cpu_x86_64_capabilities.h create mode 100644 osfmk/arm/dwarf_unwind.h create mode 100644 osfmk/arm64/amcc_rorgn.c create mode 100644 osfmk/arm64/amcc_rorgn.h create mode 100644 osfmk/arm64/corecrypto/arm64_isa_compatibility.h create mode 100644 osfmk/arm64/corecrypto/sha256_compress_arm64.s create mode 100644 osfmk/arm64/dwarf_unwind.h create mode 100644 osfmk/arm64/hibernate_arm64.c create mode 100644 osfmk/arm64/hibernate_ppl_hmac.c create mode 100644 osfmk/arm64/hibernate_ppl_hmac.h create mode 100644 osfmk/arm64/hibernate_restore.c create mode 100644 osfmk/arm64/instructions.h create mode 100644 osfmk/arm64/memcmp_zero.s create mode 100644 osfmk/arm64/pac_asm.h create mode 100644 osfmk/arm64/pal_hibernate.h create mode 100644 osfmk/arm64/tunables/tunables.s create mode 100644 osfmk/arm64/tunables/tunables_h10.s create mode 100644 osfmk/arm64/tunables/tunables_h11.s create mode 100644 osfmk/arm64/tunables/tunables_h12.s create mode 100644 osfmk/arm64/tunables/tunables_h7.s create mode 100644 osfmk/arm64/tunables/tunables_h8.s create mode 100644 osfmk/arm64/tunables/tunables_h9.s rename osfmk/corecrypto/{cc/src => }/cc_abort.c (97%) rename osfmk/corecrypto/{cc/src => }/cc_clear.c (91%) rename osfmk/corecrypto/{cc/src => }/cc_cmp_safe.c (100%) rename osfmk/corecrypto/{ccsha1/src => }/ccdigest_final_64be.c (100%) create mode 100644 osfmk/corecrypto/ccdigest_init.c rename osfmk/corecrypto/{ccsha1/src => }/ccdigest_internal.h (100%) rename osfmk/corecrypto/{ccdigest/src => }/ccdigest_update.c (92%) rename osfmk/corecrypto/{ccdbrg/src => }/ccdrbg_nisthmac.c (100%) rename osfmk/corecrypto/{cchmac/src => }/cchmac.c (100%) rename osfmk/corecrypto/{cchmac/src => }/cchmac_final.c (100%) rename osfmk/corecrypto/{cchmac/src => }/cchmac_init.c (100%) rename osfmk/corecrypto/{cchmac/src => }/cchmac_update.c (100%) rename osfmk/corecrypto/{ccsha2/src => }/ccsha256_K.c (100%) rename osfmk/corecrypto/{ccsha2/src => }/ccsha256_di.c (100%) rename osfmk/corecrypto/{ccsha2/src => }/ccsha256_initial_state.c (100%) rename osfmk/corecrypto/{ccsha2/src => }/ccsha256_ltc_compress.c (100%) rename osfmk/corecrypto/{ccsha2/src => }/ccsha256_ltc_di.c (100%) rename osfmk/corecrypto/{ccsha2/src => }/ccsha2_internal.h (93%) create mode 100644 osfmk/i386/panic_notify.c create mode 100644 osfmk/i386/panic_notify.h delete mode 100644 osfmk/i386/startup64.c create mode 100644 osfmk/ipc/ipc_eventlink.c create mode 100644 osfmk/ipc/ipc_eventlink.h delete mode 100644 osfmk/kern/call_entry.h create mode 100644 osfmk/kern/cambria_layout.h rename osfmk/kern/{hv_support.c => hv_support_kext.c} (85%) create mode 100644 osfmk/kern/hv_support_kext.h create mode 100644 osfmk/kern/mach_filter.h create mode 100644 osfmk/kern/percpu.h delete mode 100644 osfmk/kern/priority_queue.c delete mode 100644 osfmk/kern/processor_data.h create mode 100644 osfmk/kern/zalloc_internal.h rename osfmk/kern/{zcache.h => zcache_internal.h} (74%) delete mode 100644 osfmk/kperf/arm/kperf_mp.c delete mode 100644 osfmk/kperf/kperf_timer.c delete mode 100644 osfmk/kperf/kperf_timer.h create mode 100644 osfmk/kperf/kptimer.c create mode 100644 osfmk/kperf/kptimer.h delete mode 100644 osfmk/kperf/x86_64/kperf_mp.c create mode 100644 osfmk/mach/arm/traps.h create mode 100644 osfmk/mach/audit_triggers_types.h create mode 100644 osfmk/mach/dyld_kernel_fixups.h create mode 100644 osfmk/mach/mach_eventlink.defs create mode 100644 osfmk/mach/mach_eventlink_types.h delete mode 100644 osfmk/machine/atomic_impl.h create mode 100644 osfmk/prng/entropy.c create mode 100644 osfmk/prng/entropy.h create mode 100644 osfmk/tests/ptrauth_data_tests.c create mode 100644 osfmk/tests/vfp_state_test.c create mode 100644 osfmk/vm/vm_tests.c create mode 100644 osfmk/x86_64/dwarf_unwind.h create mode 100644 pexpert/gen/kcformat.c delete mode 100644 pexpert/pexpert/arm/S7002.h delete mode 100644 pexpert/pexpert/arm64/AMCC.h create mode 100644 pexpert/pexpert/arm64/H7.h create mode 100644 pexpert/pexpert/arm64/H8.h create mode 100644 pexpert/pexpert/arm64/H9.h delete mode 100644 pexpert/pexpert/arm64/S8000.h delete mode 100644 pexpert/pexpert/arm64/T7000.h delete mode 100644 pexpert/pexpert/arm64/T8010.h create mode 100644 pexpert/pexpert/arm64/apple_arm64_common.h rename pexpert/pexpert/arm64/{arm64_common.h => apple_arm64_regs.h} (65%) delete mode 100644 pexpert/pexpert/arm64/hurricane.h delete mode 100644 pexpert/pexpert/arm64/twister.h delete mode 100644 pexpert/pexpert/arm64/typhoon.h delete mode 100644 security/mac_alloc.c delete mode 100644 security/mac_alloc.h delete mode 100644 security/mac_inet.c delete mode 100644 security/mac_net.c create mode 100644 tests/bounded_array.cpp create mode 100644 tests/bounded_array_ref.cpp create mode 100644 tests/bounded_array_ref_src/begin_end.cpp create mode 100644 tests/bounded_array_ref_src/compare.equal.nullptr.cpp create mode 100644 tests/bounded_array_ref_src/ctor.C_array.cpp create mode 100644 tests/bounded_array_ref_src/ctor.begin_end.cpp create mode 100644 tests/bounded_array_ref_src/ctor.bounded_array.cpp create mode 100644 tests/bounded_array_ref_src/ctor.bounded_ptr.cpp create mode 100644 tests/bounded_array_ref_src/ctor.default.cpp create mode 100644 tests/bounded_array_ref_src/ctor.raw_ptr.cpp create mode 100644 tests/bounded_array_ref_src/data.cpp create mode 100644 tests/bounded_array_ref_src/for_loop.cpp create mode 100644 tests/bounded_array_ref_src/operator.bool.cpp create mode 100644 tests/bounded_array_ref_src/operator.subscript.cpp create mode 100644 tests/bounded_array_ref_src/size.cpp create mode 100644 tests/bounded_array_ref_src/slice.cpp create mode 100644 tests/bounded_array_ref_src/test_policy.h create mode 100644 tests/bounded_array_src/begin_end.cpp create mode 100644 tests/bounded_array_src/ctor.aggregate_init.cpp create mode 100644 tests/bounded_array_src/ctor.default.cpp create mode 100644 tests/bounded_array_src/data.cpp create mode 100644 tests/bounded_array_src/for_loop.cpp create mode 100644 tests/bounded_array_src/operator.subscript.cpp create mode 100644 tests/bounded_array_src/size.cpp create mode 100644 tests/bounded_array_src/test_policy.h create mode 100644 tests/bounded_ptr.cpp create mode 100644 tests/bounded_ptr_03.cpp create mode 100644 tests/bounded_ptr_src/arith.add.cpp create mode 100644 tests/bounded_ptr_src/arith.add_assign.cpp create mode 100644 tests/bounded_ptr_src/arith.difference.cpp create mode 100644 tests/bounded_ptr_src/arith.inc_dec.cpp create mode 100644 tests/bounded_ptr_src/arith.subtract.cpp create mode 100644 tests/bounded_ptr_src/arith.subtract_assign.cpp create mode 100644 tests/bounded_ptr_src/assign.convert.cpp create mode 100644 tests/bounded_ptr_src/assign.nullptr.cpp create mode 100644 tests/bounded_ptr_src/compare.equal.cpp create mode 100644 tests/bounded_ptr_src/compare.equal.nullptr.cpp create mode 100644 tests/bounded_ptr_src/compare.equal.raw.cpp create mode 100644 tests/bounded_ptr_src/compare.order.cpp create mode 100644 tests/bounded_ptr_src/compare.order.raw.cpp create mode 100644 tests/bounded_ptr_src/ctor.begin_end.cpp create mode 100644 tests/bounded_ptr_src/ctor.convert.cpp create mode 100644 tests/bounded_ptr_src/ctor.default.cpp create mode 100644 tests/bounded_ptr_src/ctor.nullptr.cpp create mode 100644 tests/bounded_ptr_src/deref.cpp create mode 100644 tests/bounded_ptr_src/discard_bounds.cpp create mode 100644 tests/bounded_ptr_src/example.malloc.cpp create mode 100644 tests/bounded_ptr_src/operator_bool.cpp create mode 100644 tests/bounded_ptr_src/reinterpret_cast.cpp create mode 100644 tests/bounded_ptr_src/subscript.cpp create mode 100644 tests/bounded_ptr_src/test_utils.h create mode 100644 tests/bounded_ptr_src/unsafe_discard_bounds.cpp create mode 100644 tests/correct_kernel_booted.c create mode 100644 tests/decompression_failure.c create mode 100644 tests/dirtiness_tracking.c create mode 100644 tests/driverkit/Makefile create mode 100644 tests/driverkit/test_intentionally_crashing_driver_56101852/Info.plist create mode 100644 tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.cpp create mode 100644 tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.entitlements create mode 100644 tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.iig create mode 100644 tests/exc_helpers.c create mode 100644 tests/exc_helpers.h create mode 100644 tests/fd.c create mode 100644 tests/fd_aio_fsync_uaf.c create mode 100644 tests/fduiomove.c create mode 100644 tests/filter_policy.c create mode 100644 tests/flow_div_doubleconnect_55917185.c create mode 100644 tests/ftruncate.c create mode 100644 tests/get_shared_cache_address.c create mode 100644 tests/hv_private.entitlements create mode 100644 tests/hv_public.entitlements create mode 100644 tests/hvbench.c create mode 100644 tests/hvtest_x86.m create mode 100644 tests/hvtest_x86_asm.s create mode 100644 tests/hvtest_x86_guest.c create mode 100644 tests/hvtest_x86_guest.h create mode 100644 tests/inspect_port.c create mode 100644 tests/intrusive_shared_ptr.cpp create mode 100644 tests/intrusive_shared_ptr_src/abi.callee.raw.cpp create mode 100644 tests/intrusive_shared_ptr_src/abi.callee.smart.cpp create mode 100644 tests/intrusive_shared_ptr_src/abi.caller.raw.cpp create mode 100644 tests/intrusive_shared_ptr_src/abi.caller.smart.cpp create mode 100644 tests/intrusive_shared_ptr_src/abi.size_alignment.cpp create mode 100644 tests/intrusive_shared_ptr_src/abi_helper.h create mode 100644 tests/intrusive_shared_ptr_src/assign.copy.cpp create mode 100644 tests/intrusive_shared_ptr_src/assign.move.cpp create mode 100644 tests/intrusive_shared_ptr_src/assign.nullptr.cpp create mode 100644 tests/intrusive_shared_ptr_src/cast.const.cpp create mode 100644 tests/intrusive_shared_ptr_src/cast.reinterpret.cpp create mode 100644 tests/intrusive_shared_ptr_src/cast.static.cpp create mode 100644 tests/intrusive_shared_ptr_src/compare.equal.cpp create mode 100644 tests/intrusive_shared_ptr_src/compare.equal.nullptr.cpp create mode 100644 tests/intrusive_shared_ptr_src/compare.equal.raw.cpp create mode 100644 tests/intrusive_shared_ptr_src/ctor.copy.cpp create mode 100644 tests/intrusive_shared_ptr_src/ctor.default.cpp create mode 100644 tests/intrusive_shared_ptr_src/ctor.move.cpp create mode 100644 tests/intrusive_shared_ptr_src/ctor.nullptr.cpp create mode 100644 tests/intrusive_shared_ptr_src/ctor.ptr.no_retain.cpp create mode 100644 tests/intrusive_shared_ptr_src/ctor.ptr.retain.cpp create mode 100644 tests/intrusive_shared_ptr_src/deref.cpp create mode 100644 tests/intrusive_shared_ptr_src/detach.cpp create mode 100644 tests/intrusive_shared_ptr_src/dtor.cpp create mode 100644 tests/intrusive_shared_ptr_src/get.cpp create mode 100644 tests/intrusive_shared_ptr_src/operator.bool.cpp create mode 100644 tests/intrusive_shared_ptr_src/reset.cpp create mode 100644 tests/intrusive_shared_ptr_src/reset.no_retain.cpp create mode 100644 tests/intrusive_shared_ptr_src/reset.retain.cpp create mode 100644 tests/intrusive_shared_ptr_src/swap.cpp create mode 100644 tests/intrusive_shared_ptr_src/test_policy.h create mode 100644 tests/invalid_setaudit_57414044.c create mode 100644 tests/ioconnectasyncmethod_57641955.c create mode 100644 tests/ipsec.entitlements create mode 100644 tests/ipsec.m create mode 100644 tests/jitbox-entitlements.plist create mode 100644 tests/jumbo_va_spaces_52551256.entitlements create mode 100644 tests/kas_info.c create mode 100644 tests/kernel_symbolication_entitlements.plist delete mode 100644 tests/kernel_uuid_match.c create mode 100644 tests/mach_eventlink.c create mode 100644 tests/memcmp_zero.c create mode 100644 tests/netagent_kctl_header_infodisc_56190773.c delete mode 100644 tests/no32exec_35914211.c delete mode 100644 tests/no32exec_35914211_helper.c create mode 100644 tests/os_atomic.cpp delete mode 100644 tests/osptr.cpp create mode 100644 tests/osptr_compat.cpp delete mode 100644 tests/osptr_dumb.cpp delete mode 100644 tests/osptr_helper.cpp create mode 100644 tests/pfz.c create mode 100644 tests/pid_for_task_test.c create mode 100644 tests/pipe_read_infloop_55437634.c create mode 100644 tests/posix_spawn_archpref.c create mode 100644 tests/posix_spawn_archpref_helper.c create mode 100644 tests/preoslog.c create mode 100644 tests/priority_queue.cpp create mode 100644 tests/proc_pidpath_audittoken.c create mode 100644 tests/proc_rlimit.c create mode 100644 tests/ptrauth-entitlements.plist create mode 100644 tests/ptrauth_data_tests.c create mode 100644 tests/ptrauth_failure.c create mode 100644 tests/rename_excl.c create mode 100644 tests/safe_allocation.cpp create mode 100644 tests/safe_allocation_src/assign.copy.cpp create mode 100644 tests/safe_allocation_src/assign.move.cpp create mode 100644 tests/safe_allocation_src/assign.nullptr.cpp create mode 100644 tests/safe_allocation_src/begin_end.cpp create mode 100644 tests/safe_allocation_src/compare.equal.nullptr.cpp create mode 100644 tests/safe_allocation_src/ctor.adopt.cpp create mode 100644 tests/safe_allocation_src/ctor.allocate.cpp create mode 100644 tests/safe_allocation_src/ctor.copy.cpp create mode 100644 tests/safe_allocation_src/ctor.default.cpp create mode 100644 tests/safe_allocation_src/ctor.move.cpp create mode 100644 tests/safe_allocation_src/ctor.nullptr.cpp create mode 100644 tests/safe_allocation_src/data.cpp create mode 100644 tests/safe_allocation_src/dtor.cpp create mode 100644 tests/safe_allocation_src/operator.bool.cpp create mode 100644 tests/safe_allocation_src/operator.subscript.cpp create mode 100644 tests/safe_allocation_src/size.cpp create mode 100644 tests/safe_allocation_src/swap.cpp create mode 100644 tests/safe_allocation_src/test_utils.h create mode 100644 tests/safe_allocation_src/usage.for_loop.cpp create mode 100644 tests/safe_allocation_src/usage.two_dimensions.cpp create mode 100644 tests/sbuf_tests.c create mode 100644 tests/scanf.c create mode 100644 tests/sched_cluster_bound_threads.c create mode 100644 tests/select_stress.c create mode 100644 tests/shared_cache_reslide_test.c create mode 100644 tests/signal_stack.c create mode 100644 tests/sr_entitlement.c create mode 100644 tests/sr_entitlement.entitlements create mode 100644 tests/sr_entitlement_helper.c create mode 100644 tests/stackshot_translated_child.c create mode 100644 tests/subsystem_root_path-entitlements.plist create mode 100644 tests/subsystem_root_path.c create mode 100644 tests/subsystem_root_path.h create mode 100644 tests/subsystem_root_path_helper.c create mode 100644 tests/sysctl_hw.c create mode 100644 tests/sysctl_system_version.c create mode 100644 tests/system_version_compat.c create mode 100644 tests/task_filter_msg.c create mode 100644 tests/task_policy.c create mode 100644 tests/task_policy_entitlement.plist create mode 100644 tests/test_dext_launch_56101852.c create mode 100644 tests/test_dext_launch_56101852.entitlements create mode 100644 tests/test_sysctl_kern_procargs_25397314.m create mode 100644 tests/thread_set_state_arm64_cpsr.c create mode 100644 tests/ulock.c create mode 100644 tests/vm/entitlement_increased_memory_limit.c create mode 100644 tests/vm/entitlement_increased_memory_limit.entitlements create mode 100644 tests/vm/fault_throughput.c create mode 100755 tests/vm/fault_throughput.lua create mode 100644 tests/vm/fault_throughput.plist create mode 100644 tests/vm/kern_max_task_pmem.c create mode 100644 tests/vm/memorystatus_sort_test.c create mode 100644 tests/vm/perf_helpers.c create mode 100644 tests/vm/perf_helpers.h create mode 100644 tests/vm/perf_madvise.c create mode 100755 tests/vm/perf_madvise.lua create mode 100644 tests/vm/vm_allocation.c create mode 100644 tests/vm/zone_gc_replenish_test.c create mode 100644 tests/vm_kern_count_wired_kernelcache.c create mode 100644 tests/vm_memory_tests_src/common.c create mode 100644 tests/vm_memory_tests_src/mach_vm_tests.h create mode 100644 tests/vm_memory_tests_src/main.c create mode 100644 tests/vm_memory_tests_src/server.c create mode 100644 tests/vm_memory_tests_src/vm_tests.c create mode 100644 tests/vm_spawn_tool.c create mode 100644 tests/vm_test_code_signing_helper.c create mode 100644 tests/vm_test_mach_map.c create mode 100644 tests/vsock.c delete mode 100644 tests/wired_mem_bench.c create mode 100644 tests/work_interval_test_unentitled.c create mode 100644 tests/x18.c create mode 100644 tests/zalloc.c delete mode 100755 tools/lldbmacros/atm.py diff --git a/.gitignore b/.gitignore index 70d6a4014..1c0f5d497 100644 --- a/.gitignore +++ b/.gitignore @@ -7,65 +7,21 @@ build/ *~ *.swp -# JSON compilation definitions (for the YouCompleteMe vim plugin) +# JSON compilation definitions compile_commands.json +# From the various build systems +*.xcworkspace +xcuserdata +*.pyc + # / /.remotebuild_credential /cscope.* /TAGS /tags -# /libkern/c++/Tests/TestSerialization/test1/test1.xcodeproj/ -/libkern/c++/Tests/TestSerialization/test1/test1.xcodeproj/xcuserdata - -# /libkern/c++/Tests/TestSerialization/test2/test2.xcodeproj/ -/libkern/c++/Tests/TestSerialization/test2/test2.xcodeproj/xcuserdata - -# /libkern/kmod/libkmod.xcodeproj/ -/libkern/kmod/libkmod.xcodeproj/xcuserdata - -# /libkdd/kdd.xcodeproj/ -/libkdd/kdd.xcodeproj/xcuserdata - -# /libsyscall/Libsyscall.xcodeproj/ -/libsyscall/Libsyscall.xcodeproj/xcuserdata -/libsyscall/Libsyscall.xcodeproj/project.xcworkspace - -# /tools/lldbmacros/ -/tools/lldbmacros/*.pyc - -# /tools/lldbmacros/core/ -/tools/lldbmacros/core/*.pyc - -# /tools/lldbmacros/plugins/ -/tools/lldbmacros/plugins/*.pyc - -# /tools/tests/perf_index/PerfIndex_COPS_Module/PerfIndex.xcodeproj/ -/tools/tests/perf_index/PerfIndex_COPS_Module/PerfIndex.xcodeproj/xcuserdata - -# /tools/tests/testkext/testkext.xcodeproj/ -/tools/tests/testkext/testkext.xcodeproj/xcuserdata - -#/tools/tests/unit_tests/cpu_monitor_tests_11646922_src/CatchRN/CatchRN.xcodeproj/ -/tools/tests/unit_tests/cpu_monitor_tests_11646922_src/CatchRN/CatchRN.xcodeproj/xcuserdata - -# /tools/tests/unit_tests/cpu_monitor_tests_11646922_src/cpu_hog/cpu_hog.xcodeproj/ -/tools/tests/unit_tests/cpu_monitor_tests_11646922_src/cpu_hog/cpu_hog.xcodeproj/xcuserdata - -# /tools/tests/unit_tests/mach_test_15789220_src/mach_test.xcodeproj/ -/tools/tests/unit_tests/mach_test_15789220_src/mach_test.xcodeproj/xcuserdata - -# /tools/tests/unit_tests/monitor_stress_12901965_src/monitor_stress.xcodeproj/ -/tools/tests/unit_tests/monitor_stress_12901965_src/monitor_stress.xcodeproj/xcuserdata - -# /tools/tests/unit_tests/monitor_stress_12901965_src/monitor_stress.xcodeproj/project.xcworkspace/ -/tools/tests/unit_tests/monitor_stress_12901965_src/monitor_stress.xcodeproj/project.xcworkspace/xcuserdata - -#/tools/tests/unit_tests/test_14395574/test_14395574.xcodeproj/ -/tools/tests/unit_tests/test_14395574/test_14395574.xcodeproj/xcuserdata - # /tools/tests/zero-to-n /tools/tests/zero-to-n/zn* -# do not add *.orig, *.rej, use `git clean` instead +# XXX do not add *.orig, *.rej, use `git clean` instead diff --git a/EXTERNAL_HEADERS/acpi/Acpi.h b/EXTERNAL_HEADERS/acpi/Acpi.h new file mode 100644 index 000000000..5dcc2a279 --- /dev/null +++ b/EXTERNAL_HEADERS/acpi/Acpi.h @@ -0,0 +1,462 @@ +/****************************************************************************** + * + * Name: actbl.h - Basic ACPI Table Definitions + * $Revision: 1.7 $ + * + *****************************************************************************/ + +/****************************************************************************** + * + * 1. Copyright Notice + * + * Some or all of this work - Copyright (c) 1999 - 2006, Intel Corp. + * All rights reserved. + * + * 2. License + * + * 2.1. This is your license from Intel Corp. under its intellectual property + * rights. You may have additional license terms from the party that provided + * you this software, covering your right to use that party's intellectual + * property rights. + * + * 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a + * copy of the source code appearing in this file ("Covered Code") an + * irrevocable, perpetual, worldwide license under Intel's copyrights in the + * base code distributed originally by Intel ("Original Intel Code") to copy, + * make derivatives, distribute, use and display any portion of the Covered + * Code in any form, with the right to sublicense such rights; and + * + * 2.3. Intel grants Licensee a non-exclusive and non-transferable patent + * license (with the right to sublicense), under only those claims of Intel + * patents that are infringed by the Original Intel Code, to make, use, sell, + * offer to sell, and import the Covered Code and derivative works thereof + * solely to the minimum extent necessary to exercise the above copyright + * license, and in no event shall the patent license extend to any additions + * to or modifications of the Original Intel Code. No other license or right + * is granted directly or by implication, estoppel or otherwise; + * + * The above copyright and patent license is granted only if the following + * conditions are met: + * + * 3. Conditions + * + * 3.1. Redistribution of Source with Rights to Further Distribute Source. + * Redistribution of source code of any substantial portion of the Covered + * Code or modification with rights to further distribute source must include + * the above Copyright Notice, the above License, this list of Conditions, + * and the following Disclaimer and Export Compliance provision. In addition, + * Licensee must cause all Covered Code to which Licensee contributes to + * contain a file documenting the changes Licensee made to create that Covered + * Code and the date of any change. Licensee must include in that file the + * documentation of any changes made by any predecessor Licensee. Licensee + * must include a prominent statement that the modification is derived, + * directly or indirectly, from Original Intel Code. + * + * 3.2. Redistribution of Source with no Rights to Further Distribute Source. + * Redistribution of source code of any substantial portion of the Covered + * Code or modification without rights to further distribute source must + * include the following Disclaimer and Export Compliance provision in the + * documentation and/or other materials provided with distribution. In + * addition, Licensee may not authorize further sublicense of source of any + * portion of the Covered Code, and must include terms to the effect that the + * license from Licensee to its licensee is limited to the intellectual + * property embodied in the software Licensee provides to its licensee, and + * not to intellectual property embodied in modifications its licensee may + * make. + * + * 3.3. Redistribution of Executable. Redistribution in executable form of any + * substantial portion of the Covered Code or modification must reproduce the + * above Copyright Notice, and the following Disclaimer and Export Compliance + * provision in the documentation and/or other materials provided with the + * distribution. + * + * 3.4. Intel retains all right, title, and interest in and to the Original + * Intel Code. + * + * 3.5. Neither the name Intel nor any other trademark owned or controlled by + * Intel shall be used in advertising or otherwise to promote the sale, use or + * other dealings in products derived from or relating to the Covered Code + * without prior written authorization from Intel. + * + * 4. Disclaimer and Export Compliance + * + * 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED + * HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE + * IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE, + * INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY + * UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY + * IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A + * PARTICULAR PURPOSE. + * + * 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES + * OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR + * COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT, + * SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY + * CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL + * HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS + * SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY + * LIMITED REMEDY. + * + * 4.3. Licensee shall not export, either directly or indirectly, any of this + * software or system incorporating such software without first obtaining any + * required license or other approval from the U. S. Department of Commerce or + * any other agency or department of the United States Government. In the + * event Licensee exports any such software from the United States or + * re-exports any such software from a foreign destination, Licensee shall + * ensure that the distribution and export/re-export of the software is in + * compliance with all laws, regulations, orders, or other restrictions of the + * U.S. Export Administration Regulations. Licensee agrees that neither it nor + * any of its subsidiaries will export/re-export any technical data, process, + * software, or service, directly or indirectly, to any country for which the + * United States government or any agency thereof requires an export license, + * other governmental approval, or letter of assurance, without first obtaining + * such license, approval or letter. + * + *****************************************************************************/ + +#ifndef __ACTBL_H__ +#define __ACTBL_H__ + +/* + * Values for description table header signatures. Useful because they make + * it more difficult to inadvertently type in the wrong signature. + */ +#define DSDT_SIG "DSDT" /* Differentiated System Description Table */ +#define FADT_SIG "FACP" /* Fixed ACPI Description Table */ +#define FACS_SIG "FACS" /* Firmware ACPI Control Structure */ +#define PSDT_SIG "PSDT" /* Persistent System Description Table */ +#define RSDP_SIG "RSD PTR " /* Root System Description Pointer */ +#define RSDT_SIG "RSDT" /* Root System Description Table */ +#define XSDT_SIG "XSDT" /* Extended System Description Table */ +#define SSDT_SIG "SSDT" /* Secondary System Description Table */ +#define RSDP_NAME "RSDP" + + +/* + * All tables and structures must be byte-packed to match the ACPI + * specification, since the tables are provided by the system BIOS + */ +#pragma pack(1) + + +/* + * These are the ACPI tables that are directly consumed by the subsystem. + * + * The RSDP and FACS do not use the common ACPI table header. All other ACPI + * tables use the header. + * + * Note about bitfields: The UINT8 type is used for bitfields in ACPI tables. + * This is the only type that is even remotely portable. Anything else is not + * portable, so do not use any other bitfield types. + */ + +/******************************************************************************* + * + * ACPI Table Header. This common header is used by all tables except the + * RSDP and FACS. The define is used for direct inclusion of header into + * other ACPI tables + * + ******************************************************************************/ + +#define ACPI_TABLE_HEADER_DEF \ +char Signature[4]; /* ASCII table signature */ \ +UINT32 Length; /* Length of table in bytes, including this header */ \ +UINT8 Revision; /* ACPI Specification minor version # */ \ +UINT8 Checksum; /* To make sum of entire table == 0 */ \ +char OemId[6]; /* ASCII OEM identification */ \ +char OemTableId[8]; /* ASCII OEM table identification */ \ +UINT32 OemRevision; /* OEM revision number */ \ +char AslCompilerId[4]; /* ASCII ASL compiler vendor ID */ \ +UINT32 AslCompilerRevision; /* ASL compiler version */ + +typedef struct acpi_table_header { + ACPI_TABLE_HEADER_DEF +} ACPI_TABLE_HEADER; + + +/* + * GAS - Generic Address Structure (ACPI 2.0+) + */ +typedef struct acpi_generic_address { + UINT8 AddressSpaceId; /* Address space where struct or register exists */ + UINT8 RegisterBitWidth; /* Size in bits of given register */ + UINT8 RegisterBitOffset; /* Bit offset within the register */ + UINT8 AccessWidth; /* Minimum Access size (ACPI 3.0) */ + UINT64 Address; /* 64-bit address of struct or register */ +} ACPI_GENERIC_ADDRESS; + + +/******************************************************************************* + * + * RSDP - Root System Description Pointer (Signature is "RSD PTR ") + * + ******************************************************************************/ + +typedef struct rsdp_descriptor { + char Signature[8]; /* ACPI signature, contains "RSD PTR " */ + UINT8 Checksum; /* ACPI 1.0 checksum */ + char OemId[6]; /* OEM identification */ + UINT8 Revision; /* Must be (0) for ACPI 1.0 or (2) for ACPI 2.0+ */ + UINT32 RsdtPhysicalAddress;/* 32-bit physical address of the RSDT */ + UINT32 Length; /* Table length in bytes, including header (ACPI 2.0+) */ + UINT64 XsdtPhysicalAddress;/* 64-bit physical address of the XSDT (ACPI 2.0+) */ + UINT8 ExtendedChecksum; /* Checksum of entire table (ACPI 2.0+) */ + UINT8 Reserved[3]; /* Reserved, must be zero */ +} RSDP_DESCRIPTOR; + +#define ACPI_RSDP_REV0_SIZE 20 /* Size of original ACPI 1.0 RSDP */ + + +/******************************************************************************* + * + * RSDT/XSDT - Root System Description Tables + * + ******************************************************************************/ + +typedef struct rsdt_descriptor { + ACPI_TABLE_HEADER_DEF + UINT32 TableOffsetEntry[1];/* Array of pointers to ACPI tables */ +} RSDT_DESCRIPTOR; + +typedef struct xsdt_descriptor { + ACPI_TABLE_HEADER_DEF + UINT64 TableOffsetEntry[1];/* Array of pointers to ACPI tables */ +} XSDT_DESCRIPTOR; + + +/******************************************************************************* + * + * FACS - Firmware ACPI Control Structure (FACS) + * + ******************************************************************************/ + +typedef struct facs_descriptor { + char Signature[4]; /* ASCII table signature */ + UINT32 Length; /* Length of structure, in bytes */ + UINT32 HardwareSignature; /* Hardware configuration signature */ + UINT32 FirmwareWakingVector;/* 32-bit physical address of the Firmware Waking Vector */ + UINT32 GlobalLock; /* Global Lock for shared hardware resources */ + + /* Flags (32 bits) */ + + UINT8 S4Bios_f : 1;/* 00: S4BIOS support is present */ + UINT8 : 7;/* 01-07: Reserved, must be zero */ + UINT8 Reserved1[3]; /* 08-31: Reserved, must be zero */ + + UINT64 XFirmwareWakingVector;/* 64-bit version of the Firmware Waking Vector (ACPI 2.0+) */ + UINT8 Version; /* Version of this table (ACPI 2.0+) */ + UINT8 Reserved[31]; /* Reserved, must be zero */ +} FACS_DESCRIPTOR; + +#define ACPI_GLOCK_PENDING 0x01 /* 00: Pending global lock ownership */ +#define ACPI_GLOCK_OWNED 0x02 /* 01: Global lock is owned */ + + +/* + * Common FACS - This is a version-independent FACS structure used for internal use only + */ +typedef struct acpi_common_facs { + UINT32 *GlobalLock; + UINT64 *FirmwareWakingVector; + UINT8 VectorWidth; +} ACPI_COMMON_FACS; + + +/******************************************************************************* + * + * FADT - Fixed ACPI Description Table (Signature "FACP") + * + ******************************************************************************/ + +/* Fields common to all versions of the FADT */ + +#define ACPI_FADT_COMMON \ +ACPI_TABLE_HEADER_DEF \ +UINT32 V1_FirmwareCtrl; /* 32-bit physical address of FACS */ \ +UINT32 V1_Dsdt; /* 32-bit physical address of DSDT */ \ +UINT8 Reserved1; /* System Interrupt Model isn't used in ACPI 2.0*/ \ +UINT8 Prefer_PM_Profile; /* Conveys preferred power management profile to OSPM. */ \ +UINT16 SciInt; /* System vector of SCI interrupt */ \ +UINT32 SmiCmd; /* Port address of SMI command port */ \ +UINT8 AcpiEnable; /* Value to write to smi_cmd to enable ACPI */ \ +UINT8 AcpiDisable; /* Value to write to smi_cmd to disable ACPI */ \ +UINT8 S4BiosReq; /* Value to write to SMI CMD to enter S4BIOS state */ \ +UINT8 PstateCnt; /* Processor performance state control*/ \ +UINT32 V1_Pm1aEvtBlk; /* Port address of Power Mgt 1a Event Reg Blk */ \ +UINT32 V1_Pm1bEvtBlk; /* Port address of Power Mgt 1b Event Reg Blk */ \ +UINT32 V1_Pm1aCntBlk; /* Port address of Power Mgt 1a Control Reg Blk */ \ +UINT32 V1_Pm1bCntBlk; /* Port address of Power Mgt 1b Control Reg Blk */ \ +UINT32 V1_Pm2CntBlk; /* Port address of Power Mgt 2 Control Reg Blk */ \ +UINT32 V1_PmTmrBlk; /* Port address of Power Mgt Timer Ctrl Reg Blk */ \ +UINT32 V1_Gpe0Blk; /* Port addr of General Purpose AcpiEvent 0 Reg Blk */ \ +UINT32 V1_Gpe1Blk; /* Port addr of General Purpose AcpiEvent 1 Reg Blk */ \ +UINT8 Pm1EvtLen; /* Byte Length of ports at pm1X_evt_blk */ \ +UINT8 Pm1CntLen; /* Byte Length of ports at pm1X_cnt_blk */ \ +UINT8 Pm2CntLen; /* Byte Length of ports at pm2_cnt_blk */ \ +UINT8 PmTmLen; /* Byte Length of ports at pm_tm_blk */ \ +UINT8 Gpe0BlkLen; /* Byte Length of ports at gpe0_blk */ \ +UINT8 Gpe1BlkLen; /* Byte Length of ports at gpe1_blk */ \ +UINT8 Gpe1Base; /* Offset in gpe model where gpe1 events start */ \ +UINT8 CstCnt; /* Support for the _CST object and C States change notification.*/ \ +UINT16 Plvl2Lat; /* Worst case HW latency to enter/exit C2 state */ \ +UINT16 Plvl3Lat; /* Worst case HW latency to enter/exit C3 state */ \ +UINT16 FlushSize; /* Processor's memory cache line width, in bytes */ \ +UINT16 FlushStride; /* Number of flush strides that need to be read */ \ +UINT8 DutyOffset; /* Processor's duty cycle index in processor's P_CNT reg*/ \ +UINT8 DutyWidth; /* Processor's duty cycle value bit width in P_CNT register.*/ \ +UINT8 DayAlrm; /* Index to day-of-month alarm in RTC CMOS RAM */ \ +UINT8 MonAlrm; /* Index to month-of-year alarm in RTC CMOS RAM */ \ +UINT8 Century; /* Index to century in RTC CMOS RAM */ \ +UINT16 IapcBootArch; /* IA-PC Boot Architecture Flags. See Table 5-10 for description*/ \ +UINT8 Reserved2; /* Reserved, must be zero */ + + +/* + * ACPI 2.0+ FADT + */ +typedef struct fadt_descriptor { + ACPI_FADT_COMMON + + /* Flags (32 bits) */ + + UINT8 WbInvd : 1;/* 00: The wbinvd instruction works properly */ + UINT8 WbInvdFlush : 1;/* 01: The wbinvd flushes but does not invalidate */ + UINT8 ProcC1 : 1;/* 02: All processors support C1 state */ + UINT8 Plvl2Up : 1;/* 03: C2 state works on MP system */ + UINT8 PwrButton : 1;/* 04: Power button is handled as a generic feature */ + UINT8 SleepButton : 1;/* 05: Sleep button is handled as a generic feature, or not present */ + UINT8 FixedRTC : 1;/* 06: RTC wakeup stat not in fixed register space */ + UINT8 Rtcs4 : 1;/* 07: RTC wakeup stat not possible from S4 */ + UINT8 TmrValExt : 1;/* 08: tmr_val is 32 bits 0=24-bits */ + UINT8 DockCap : 1;/* 09: Docking supported */ + UINT8 ResetRegSup : 1;/* 10: System reset via the FADT RESET_REG supported */ + UINT8 SealedCase : 1;/* 11: No internal expansion capabilities and case is sealed */ + UINT8 Headless : 1;/* 12: No local video capabilities or local input devices */ + UINT8 CpuSwSleep : 1;/* 13: Must execute native instruction after writing SLP_TYPx register */ + + UINT8 PciExpWak : 1;/* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */ + UINT8 UsePlatformClock : 1;/* 15: OSPM should use platform-provided timer (ACPI 3.0) */ + UINT8 S4RtcStsValid : 1;/* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */ + UINT8 RemotePowerOnCapable : 1;/* 17: System is compatible with remote power on (ACPI 3.0) */ + UINT8 ForceApicClusterModel : 1;/* 18: All local APICs must use cluster model (ACPI 3.0) */ + UINT8 ForceApicPhysicalDestinationMode : 1;/* 19: All local xAPICs must use physical dest mode (ACPI 3.0) */ + UINT8 : 4;/* 20-23: Reserved, must be zero */ + UINT8 Reserved3; /* 24-31: Reserved, must be zero */ + + ACPI_GENERIC_ADDRESS ResetRegister; /* Reset register address in GAS format */ + UINT8 ResetValue; /* Value to write to the ResetRegister port to reset the system */ + UINT8 Reserved4[3]; /* These three bytes must be zero */ + UINT64 XFirmwareCtrl; /* 64-bit physical address of FACS */ + UINT64 XDsdt; /* 64-bit physical address of DSDT */ + ACPI_GENERIC_ADDRESS XPm1aEvtBlk; /* Extended Power Mgt 1a AcpiEvent Reg Blk address */ + ACPI_GENERIC_ADDRESS XPm1bEvtBlk; /* Extended Power Mgt 1b AcpiEvent Reg Blk address */ + ACPI_GENERIC_ADDRESS XPm1aCntBlk; /* Extended Power Mgt 1a Control Reg Blk address */ + ACPI_GENERIC_ADDRESS XPm1bCntBlk; /* Extended Power Mgt 1b Control Reg Blk address */ + ACPI_GENERIC_ADDRESS XPm2CntBlk; /* Extended Power Mgt 2 Control Reg Blk address */ + ACPI_GENERIC_ADDRESS XPmTmrBlk; /* Extended Power Mgt Timer Ctrl Reg Blk address */ + ACPI_GENERIC_ADDRESS XGpe0Blk; /* Extended General Purpose AcpiEvent 0 Reg Blk address */ + ACPI_GENERIC_ADDRESS XGpe1Blk; /* Extended General Purpose AcpiEvent 1 Reg Blk address */ +} FADT_DESCRIPTOR; + + +/* + * "Down-revved" ACPI 2.0 FADT descriptor + * Defined here to allow compiler to generate the length of the struct + */ +typedef struct fadt_descriptor_rev2_minus { + ACPI_FADT_COMMON + UINT32 Flags; + ACPI_GENERIC_ADDRESS ResetRegister; /* Reset register address in GAS format */ + UINT8 ResetValue; /* Value to write to the ResetRegister port to reset the system. */ + UINT8 Reserved7[3]; /* Reserved, must be zero */ +} FADT_DESCRIPTOR_REV2_MINUS; + + +/* + * ACPI 1.0 FADT + * Defined here to allow compiler to generate the length of the struct + */ +typedef struct fadt_descriptor_rev1 { + ACPI_FADT_COMMON + UINT32 Flags; +} FADT_DESCRIPTOR_REV1; + + +/* FADT: Prefered Power Management Profiles */ + +#define PM_UNSPECIFIED 0 +#define PM_DESKTOP 1 +#define PM_MOBILE 2 +#define PM_WORKSTATION 3 +#define PM_ENTERPRISE_SERVER 4 +#define PM_SOHO_SERVER 5 +#define PM_APPLIANCE_PC 6 + +/* FADT: Boot Arch Flags */ + +#define BAF_LEGACY_DEVICES 0x0001 +#define BAF_8042_KEYBOARD_CONTROLLER 0x0002 + +#define FADT2_REVISION_ID 3 +#define FADT2_MINUS_REVISION_ID 2 + + +/* Reset to default packing */ + +#pragma pack() + +/* + * This macro is temporary until the table bitfield flag definitions + * are removed and replaced by a Flags field. + */ +#define ACPI_FLAG_OFFSET(d, f, o) (UINT8) (ACPI_OFFSET (d,f) +\ + sizeof(((d *)0)->f) + o) +/* + * Get the remaining ACPI tables + */ +#include "Acpi_v1.h" + +/* + * ACPI Table information. We save the table address, length, + * and type of memory allocation (mapped or allocated) for each + * table for 1) when we exit, and 2) if a new table is installed + */ +#define ACPI_MEM_NOT_ALLOCATED 0 +#define ACPI_MEM_ALLOCATED 1 +#define ACPI_MEM_MAPPED 2 + +/* Definitions for the Flags bitfield member of ACPI_TABLE_SUPPORT */ + +#define ACPI_TABLE_SINGLE 0x00 +#define ACPI_TABLE_MULTIPLE 0x01 +#define ACPI_TABLE_EXECUTABLE 0x02 + +#define ACPI_TABLE_ROOT 0x00 +#define ACPI_TABLE_PRIMARY 0x10 +#define ACPI_TABLE_SECONDARY 0x20 +#define ACPI_TABLE_ALL 0x30 +#define ACPI_TABLE_TYPE_MASK 0x30 + +/* Data about each known table type */ + +typedef struct acpi_table_support { + char *Name; + char *Signature; + void **GlobalPtr; + UINT8 SigLength; + UINT8 Flags; +} ACPI_TABLE_SUPPORT; + + +/* Macros used to generate offsets to specific table fields */ + +#define ACPI_FACS_OFFSET(f) (UINT8) ACPI_OFFSET (FACS_DESCRIPTOR,f) +#define ACPI_FADT_OFFSET(f) (UINT8) ACPI_OFFSET (FADT_DESCRIPTOR, f) +#define ACPI_GAS_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_GENERIC_ADDRESS,f) +#define ACPI_HDR_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_TABLE_HEADER,f) +#define ACPI_RSDP_OFFSET(f) (UINT8) ACPI_OFFSET (RSDP_DESCRIPTOR,f) + +#define ACPI_FADT_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (FADT_DESCRIPTOR,f,o) +#define ACPI_FACS_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (FACS_DESCRIPTOR,f,o) + +#endif /* __ACTBL_H__ */ diff --git a/EXTERNAL_HEADERS/acpi/Acpi_v1.h b/EXTERNAL_HEADERS/acpi/Acpi_v1.h new file mode 100644 index 000000000..82777e198 --- /dev/null +++ b/EXTERNAL_HEADERS/acpi/Acpi_v1.h @@ -0,0 +1,781 @@ +/****************************************************************************** + * + * Name: actbl1.h - Additional ACPI table definitions + * $Revision: 1.6 $ + * + *****************************************************************************/ + +/****************************************************************************** + * + * 1. Copyright Notice + * + * Some or all of this work - Copyright (c) 1999 - 2006, Intel Corp. + * All rights reserved. + * + * 2. License + * + * 2.1. This is your license from Intel Corp. under its intellectual property + * rights. You may have additional license terms from the party that provided + * you this software, covering your right to use that party's intellectual + * property rights. + * + * 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a + * copy of the source code appearing in this file ("Covered Code") an + * irrevocable, perpetual, worldwide license under Intel's copyrights in the + * base code distributed originally by Intel ("Original Intel Code") to copy, + * make derivatives, distribute, use and display any portion of the Covered + * Code in any form, with the right to sublicense such rights; and + * + * 2.3. Intel grants Licensee a non-exclusive and non-transferable patent + * license (with the right to sublicense), under only those claims of Intel + * patents that are infringed by the Original Intel Code, to make, use, sell, + * offer to sell, and import the Covered Code and derivative works thereof + * solely to the minimum extent necessary to exercise the above copyright + * license, and in no event shall the patent license extend to any additions + * to or modifications of the Original Intel Code. No other license or right + * is granted directly or by implication, estoppel or otherwise; + * + * The above copyright and patent license is granted only if the following + * conditions are met: + * + * 3. Conditions + * + * 3.1. Redistribution of Source with Rights to Further Distribute Source. + * Redistribution of source code of any substantial portion of the Covered + * Code or modification with rights to further distribute source must include + * the above Copyright Notice, the above License, this list of Conditions, + * and the following Disclaimer and Export Compliance provision. In addition, + * Licensee must cause all Covered Code to which Licensee contributes to + * contain a file documenting the changes Licensee made to create that Covered + * Code and the date of any change. Licensee must include in that file the + * documentation of any changes made by any predecessor Licensee. Licensee + * must include a prominent statement that the modification is derived, + * directly or indirectly, from Original Intel Code. + * + * 3.2. Redistribution of Source with no Rights to Further Distribute Source. + * Redistribution of source code of any substantial portion of the Covered + * Code or modification without rights to further distribute source must + * include the following Disclaimer and Export Compliance provision in the + * documentation and/or other materials provided with distribution. In + * addition, Licensee may not authorize further sublicense of source of any + * portion of the Covered Code, and must include terms to the effect that the + * license from Licensee to its licensee is limited to the intellectual + * property embodied in the software Licensee provides to its licensee, and + * not to intellectual property embodied in modifications its licensee may + * make. + * + * 3.3. Redistribution of Executable. Redistribution in executable form of any + * substantial portion of the Covered Code or modification must reproduce the + * above Copyright Notice, and the following Disclaimer and Export Compliance + * provision in the documentation and/or other materials provided with the + * distribution. + * + * 3.4. Intel retains all right, title, and interest in and to the Original + * Intel Code. + * + * 3.5. Neither the name Intel nor any other trademark owned or controlled by + * Intel shall be used in advertising or otherwise to promote the sale, use or + * other dealings in products derived from or relating to the Covered Code + * without prior written authorization from Intel. + * + * 4. Disclaimer and Export Compliance + * + * 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED + * HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE + * IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE, + * INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY + * UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY + * IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A + * PARTICULAR PURPOSE. + * + * 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES + * OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR + * COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT, + * SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY + * CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL + * HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS + * SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY + * LIMITED REMEDY. + * + * 4.3. Licensee shall not export, either directly or indirectly, any of this + * software or system incorporating such software without first obtaining any + * required license or other approval from the U. S. Department of Commerce or + * any other agency or department of the United States Government. In the + * event Licensee exports any such software from the United States or + * re-exports any such software from a foreign destination, Licensee shall + * ensure that the distribution and export/re-export of the software is in + * compliance with all laws, regulations, orders, or other restrictions of the + * U.S. Export Administration Regulations. Licensee agrees that neither it nor + * any of its subsidiaries will export/re-export any technical data, process, + * software, or service, directly or indirectly, to any country for which the + * United States government or any agency thereof requires an export license, + * other governmental approval, or letter of assurance, without first obtaining + * such license, approval or letter. + * + *****************************************************************************/ + +#ifndef __ACTBL1_H__ +#define __ACTBL1_H__ + + +/******************************************************************************* + * + * Additional ACPI Tables + * + * These tables are not consumed directly by the ACPICA subsystem, but are + * included here to support device drivers and the AML disassembler. + * + ******************************************************************************/ + + +/* + * Values for description table header signatures. Useful because they make + * it more difficult to inadvertently type in the wrong signature. + */ +#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */ +#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */ +#define ACPI_SIG_CPEP "CPEP" /* Corrected Platform Error Polling table */ +#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */ +#define ACPI_SIG_ECDT "ECDT" /* Embedded Controller Boot Resources Table */ +#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */ +#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */ +#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */ +#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ +#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */ +#define ACPI_SIG_SPCR "SPCR" /* Serial Port Console Redirection table */ +#define ACPI_SIG_SPMI "SPMI" /* Server Platform Management Interface table */ +#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */ +#define ACPI_SIG_TCPA "TCPA" /* Trusted Computing Platform Alliance table */ +#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */ + +/* Legacy names */ + +#define APIC_SIG "APIC" /* Multiple APIC Description Table */ +#define BOOT_SIG "BOOT" /* Simple Boot Flag Table */ +#define SBST_SIG "SBST" /* Smart Battery Specification Table */ + + +/* + * All tables must be byte-packed to match the ACPI specification, since + * the tables are provided by the system BIOS. + */ +#pragma pack(1) + +/* + * Note about bitfields: The UINT8 type is used for bitfields in ACPI tables. + * This is the only type that is even remotely portable. Anything else is not + * portable, so do not use any other bitfield types. + */ + + +/******************************************************************************* + * + * ASF - Alert Standard Format table (Signature "ASF!") + * + ******************************************************************************/ + +typedef struct acpi_table_asf { + ACPI_TABLE_HEADER_DEF +} ACPI_TABLE_ASF; + +#define ACPI_ASF_HEADER_DEF \ +UINT8 Type; \ +UINT8 Reserved; \ +UINT16 Length; + +typedef struct acpi_asf_header { + ACPI_ASF_HEADER_DEF +} ACPI_ASF_HEADER; + + +/* Values for Type field */ + +#define ASF_INFO 0 +#define ASF_ALERT 1 +#define ASF_CONTROL 2 +#define ASF_BOOT 3 +#define ASF_ADDRESS 4 +#define ASF_RESERVED 5 + +/* + * ASF subtables + */ + +/* 0: ASF Information */ + +typedef struct acpi_asf_info { + ACPI_ASF_HEADER_DEF + UINT8 MinResetValue; + UINT8 MinPollInterval; + UINT16 SystemId; + UINT32 MfgId; + UINT8 Flags; + UINT8 Reserved2[3]; +} ACPI_ASF_INFO; + +/* 1: ASF Alerts */ + +typedef struct acpi_asf_alert { + ACPI_ASF_HEADER_DEF + UINT8 AssertMask; + UINT8 DeassertMask; + UINT8 Alerts; + UINT8 DataLength; + UINT8 Array[1]; +} ACPI_ASF_ALERT; + +/* 2: ASF Remote Control */ + +typedef struct acpi_asf_remote { + ACPI_ASF_HEADER_DEF + UINT8 Controls; + UINT8 DataLength; + UINT16 Reserved2; + UINT8 Array[1]; +} ACPI_ASF_REMOTE; + +/* 3: ASF RMCP Boot Options */ + +typedef struct acpi_asf_rmcp { + ACPI_ASF_HEADER_DEF + UINT8 Capabilities[7]; + UINT8 CompletionCode; + UINT32 EnterpriseId; + UINT8 Command; + UINT16 Parameter; + UINT16 BootOptions; + UINT16 OemParameters; +} ACPI_ASF_RMCP; + +/* 4: ASF Address */ + +typedef struct acpi_asf_address { + ACPI_ASF_HEADER_DEF + UINT8 EpromAddress; + UINT8 Devices; + UINT8 SmbusAddresses[1]; +} ACPI_ASF_ADDRESS; + + +/******************************************************************************* + * + * BOOT - Simple Boot Flag Table + * + ******************************************************************************/ + +typedef struct acpi_table_boot { + ACPI_TABLE_HEADER_DEF + UINT8 CmosIndex; /* Index in CMOS RAM for the boot register */ + UINT8 Reserved[3]; +} ACPI_TABLE_BOOT; + + +/******************************************************************************* + * + * CPEP - Corrected Platform Error Polling table + * + ******************************************************************************/ + +typedef struct acpi_table_cpep { + ACPI_TABLE_HEADER_DEF + UINT64 Reserved; +} ACPI_TABLE_CPEP; + +/* Subtable */ + +typedef struct acpi_cpep_polling { + UINT8 Type; + UINT8 Length; + UINT8 ProcessorId; /* Processor ID */ + UINT8 ProcessorEid; /* Processor EID */ + UINT32 PollingInterval;/* Polling interval (msec) */ +} ACPI_CPEP_POLLING; + + +/******************************************************************************* + * + * DBGP - Debug Port table + * + ******************************************************************************/ + +typedef struct acpi_table_dbgp { + ACPI_TABLE_HEADER_DEF + UINT8 InterfaceType; /* 0=full 16550, 1=subset of 16550 */ + UINT8 Reserved[3]; + ACPI_GENERIC_ADDRESS DebugPort; +} ACPI_TABLE_DBGP; + + +/******************************************************************************* + * + * ECDT - Embedded Controller Boot Resources Table + * + ******************************************************************************/ + +typedef struct ec_boot_resources { + ACPI_TABLE_HEADER_DEF + ACPI_GENERIC_ADDRESS EcControl; /* Address of EC command/status register */ + ACPI_GENERIC_ADDRESS EcData; /* Address of EC data register */ + UINT32 Uid; /* Unique ID - must be same as the EC _UID method */ + UINT8 GpeBit; /* The GPE for the EC */ + UINT8 EcId[1]; /* Full namepath of the EC in the ACPI namespace */ +} EC_BOOT_RESOURCES; + + +/******************************************************************************* + * + * HPET - High Precision Event Timer table + * + ******************************************************************************/ + +typedef struct acpi_hpet_table { + ACPI_TABLE_HEADER_DEF + UINT32 HardwareId; /* Hardware ID of event timer block */ + ACPI_GENERIC_ADDRESS BaseAddress; /* Address of event timer block */ + UINT8 HpetNumber; /* HPET sequence number */ + UINT16 ClockTick; /* Main counter min tick, periodic mode */ + UINT8 Attributes; +} HPET_TABLE; + +#if 0 /* HPET flags to be converted to macros */ +struct /* Flags (8 bits) */ +{ + UINT8 PageProtect :1;/* 00: No page protection */ + UINT8 PageProtect4 :1;/* 01: 4KB page protected */ + UINT8 PageProtect64 :1;/* 02: 64KB page protected */ + UINT8 :5;/* 03-07: Reserved, must be zero */ +} Flags; +#endif + + +/******************************************************************************* + * + * MADT - Multiple APIC Description Table + * + ******************************************************************************/ + +typedef struct multiple_apic_table { + ACPI_TABLE_HEADER_DEF + UINT32 LocalApicAddress;/* Physical address of local APIC */ + + /* Flags (32 bits) */ + + UINT8 PCATCompat : 1;/* 00: System also has dual 8259s */ + UINT8 : 7;/* 01-07: Reserved, must be zero */ + UINT8 Reserved1[3]; /* 08-31: Reserved, must be zero */ +} MULTIPLE_APIC_TABLE; + +/* Values for MADT PCATCompat */ + +#define DUAL_PIC 0 +#define MULTIPLE_APIC 1 + + +/* Common MADT Sub-table header */ + +#define APIC_HEADER_DEF \ +UINT8 Type; \ +UINT8 Length; + +typedef struct apic_header { + APIC_HEADER_DEF +} APIC_HEADER; + +/* Values for Type in APIC_HEADER */ + +#define APIC_PROCESSOR 0 +#define APIC_IO 1 +#define APIC_XRUPT_OVERRIDE 2 +#define APIC_NMI 3 +#define APIC_LOCAL_NMI 4 +#define APIC_ADDRESS_OVERRIDE 5 +#define APIC_IO_SAPIC 6 +#define APIC_LOCAL_SAPIC 7 +#define APIC_XRUPT_SOURCE 8 +#define APIC_RESERVED 9 /* 9 and greater are reserved */ + + +/* Flag definitions for MADT sub-tables */ + +#define ACPI_MADT_IFLAGS /* INTI flags (16 bits) */ \ +UINT8 Polarity : 2; /* 00-01: Polarity of APIC I/O input signals */ \ +UINT8 TriggerMode : 2; /* 02-03: Trigger mode of APIC input signals */ \ +UINT8 : 4; /* 04-07: Reserved, must be zero */ \ +UINT8 Reserved1; /* 08-15: Reserved, must be zero */ + +#define ACPI_MADT_LFLAGS /* Local Sapic flags (32 bits) */ \ +UINT8 ProcessorEnabled: 1; /* 00: Processor is usable if set */ \ +UINT8 : 7; /* 01-07: Reserved, must be zero */ \ +UINT8 Reserved2[3]; /* 08-31: Reserved, must be zero */ + + +/* Values for MPS INTI flags */ + +#define POLARITY_CONFORMS 0 +#define POLARITY_ACTIVE_HIGH 1 +#define POLARITY_RESERVED 2 +#define POLARITY_ACTIVE_LOW 3 + +#define TRIGGER_CONFORMS 0 +#define TRIGGER_EDGE 1 +#define TRIGGER_RESERVED 2 +#define TRIGGER_LEVEL 3 + + +/* + * MADT Sub-tables, correspond to Type in APIC_HEADER + */ + +/* 0: processor APIC */ + +typedef struct madt_processor_apic { + APIC_HEADER_DEF + UINT8 ProcessorId; /* ACPI processor id */ + UINT8 LocalApicId; /* Processor's local APIC id */ + ACPI_MADT_LFLAGS +} MADT_PROCESSOR_APIC; + +/* 1: IO APIC */ + +typedef struct madt_io_apic { + APIC_HEADER_DEF + UINT8 IoApicId; /* I/O APIC ID */ + UINT8 Reserved; /* Reserved - must be zero */ + UINT32 Address; /* APIC physical address */ + UINT32 Interrupt; /* Global system interrupt where INTI lines start */ +} MADT_IO_APIC; + +/* 2: Interrupt Override */ + +typedef struct madt_interrupt_override { + APIC_HEADER_DEF + UINT8 Bus; /* 0 - ISA */ + UINT8 Source; /* Interrupt source (IRQ) */ + UINT32 Interrupt; /* Global system interrupt */ + ACPI_MADT_IFLAGS +} MADT_INTERRUPT_OVERRIDE; + +/* 3: NMI Sources */ + +typedef struct madt_nmi_source { + APIC_HEADER_DEF + ACPI_MADT_IFLAGS + UINT32 Interrupt; /* Global system interrupt */ +} MADT_NMI_SOURCE; + +/* 4: Local APIC NMI */ + +typedef struct madt_local_apic_nmi { + APIC_HEADER_DEF + UINT8 ProcessorId; /* ACPI processor id */ + ACPI_MADT_IFLAGS + UINT8 Lint; /* LINTn to which NMI is connected */ +} MADT_LOCAL_APIC_NMI; + +/* 5: Address Override */ + +typedef struct madt_address_override { + APIC_HEADER_DEF + UINT16 Reserved; /* Reserved, must be zero */ + UINT64 Address; /* APIC physical address */ +} MADT_ADDRESS_OVERRIDE; + +/* 6: I/O Sapic */ + +typedef struct madt_io_sapic { + APIC_HEADER_DEF + UINT8 IoSapicId; /* I/O SAPIC ID */ + UINT8 Reserved; /* Reserved, must be zero */ + UINT32 InterruptBase; /* Glocal interrupt for SAPIC start */ + UINT64 Address; /* SAPIC physical address */ +} MADT_IO_SAPIC; + +/* 7: Local Sapic */ + +typedef struct madt_local_sapic { + APIC_HEADER_DEF + UINT8 ProcessorId; /* ACPI processor id */ + UINT8 LocalSapicId; /* SAPIC ID */ + UINT8 LocalSapicEid; /* SAPIC EID */ + UINT8 Reserved[3]; /* Reserved, must be zero */ + ACPI_MADT_LFLAGS + UINT32 ProcessorUID; /* Numeric UID - ACPI 3.0 */ + char ProcessorUIDString[1];/* String UID - ACPI 3.0 */ +} MADT_LOCAL_SAPIC; + +/* 8: Platform Interrupt Source */ + +typedef struct madt_interrupt_source { + APIC_HEADER_DEF + ACPI_MADT_IFLAGS + UINT8 InterruptType; /* 1=PMI, 2=INIT, 3=corrected */ + UINT8 ProcessorId; /* Processor ID */ + UINT8 ProcessorEid; /* Processor EID */ + UINT8 IoSapicVector; /* Vector value for PMI interrupts */ + UINT32 Interrupt; /* Global system interrupt */ + UINT32 Flags; /* Interrupt Source Flags */ +} MADT_INTERRUPT_SOURCE; + + +/******************************************************************************* + * + * MCFG - PCI Memory Mapped Configuration table and sub-table + * + ******************************************************************************/ + +typedef struct acpi_table_mcfg { + ACPI_TABLE_HEADER_DEF + UINT8 Reserved[8]; +} ACPI_TABLE_MCFG; + +typedef struct acpi_mcfg_allocation { + UINT64 BaseAddress; /* Base address, processor-relative */ + UINT16 PciSegment; /* PCI segment group number */ + UINT8 StartBusNumber; /* Starting PCI Bus number */ + UINT8 EndBusNumber; /* Final PCI Bus number */ + UINT32 Reserved; +} ACPI_MCFG_ALLOCATION; + + +/******************************************************************************* + * + * SBST - Smart Battery Specification Table + * + ******************************************************************************/ + +typedef struct smart_battery_table { + ACPI_TABLE_HEADER_DEF + UINT32 WarningLevel; + UINT32 LowLevel; + UINT32 CriticalLevel; +} SMART_BATTERY_TABLE; + + +/******************************************************************************* + * + * SLIT - System Locality Distance Information Table + * + ******************************************************************************/ + +typedef struct system_locality_info { + ACPI_TABLE_HEADER_DEF + UINT64 LocalityCount; + UINT8 Entry[1][1]; +} SYSTEM_LOCALITY_INFO; + + +/******************************************************************************* + * + * SPCR - Serial Port Console Redirection table + * + ******************************************************************************/ + +typedef struct acpi_table_spcr { + ACPI_TABLE_HEADER_DEF + UINT8 InterfaceType; /* 0=full 16550, 1=subset of 16550 */ + UINT8 Reserved[3]; + ACPI_GENERIC_ADDRESS SerialPort; + UINT8 InterruptType; + UINT8 PcInterrupt; + UINT32 Interrupt; + UINT8 BaudRate; + UINT8 Parity; + UINT8 StopBits; + UINT8 FlowControl; + UINT8 TerminalType; + UINT8 Reserved2; + UINT16 PciDeviceId; + UINT16 PciVendorId; + UINT8 PciBus; + UINT8 PciDevice; + UINT8 PciFunction; + UINT32 PciFlags; + UINT8 PciSegment; + UINT32 Reserved3; +} ACPI_TABLE_SPCR; + + +/******************************************************************************* + * + * SPMI - Server Platform Management Interface table + * + ******************************************************************************/ + +typedef struct acpi_table_spmi { + ACPI_TABLE_HEADER_DEF + UINT8 Reserved; + UINT8 InterfaceType; + UINT16 SpecRevision; /* Version of IPMI */ + UINT8 InterruptType; + UINT8 GpeNumber; /* GPE assigned */ + UINT8 Reserved2; + UINT8 PciDeviceFlag; + UINT32 Interrupt; + ACPI_GENERIC_ADDRESS IpmiRegister; + UINT8 PciSegment; + UINT8 PciBus; + UINT8 PciDevice; + UINT8 PciFunction; +} ACPI_TABLE_SPMI; + + +/******************************************************************************* + * + * SRAT - System Resource Affinity Table + * + ******************************************************************************/ + +typedef struct system_resource_affinity { + ACPI_TABLE_HEADER_DEF + UINT32 Reserved1; /* Must be value '1' */ + UINT64 Reserved2; /* Reserved, must be zero */ +} SYSTEM_RESOURCE_AFFINITY; + + +/* SRAT common sub-table header */ + +#define SRAT_SUBTABLE_HEADER \ +UINT8 Type; \ +UINT8 Length; + +/* Values for Type above */ + +#define SRAT_CPU_AFFINITY 0 +#define SRAT_MEMORY_AFFINITY 1 +#define SRAT_RESERVED 2 + + +/* SRAT sub-tables */ + +typedef struct static_resource_alloc { + SRAT_SUBTABLE_HEADER + UINT8 ProximityDomainLo; + UINT8 ApicId; + + /* Flags (32 bits) */ + + UINT8 Enabled :1;/* 00: Use affinity structure */ + UINT8 :7;/* 01-07: Reserved, must be zero */ + UINT8 Reserved3[3]; /* 08-31: Reserved, must be zero */ + + UINT8 LocalSapicEid; + UINT8 ProximityDomainHi[3]; + UINT32 Reserved4; /* Reserved, must be zero */ +} STATIC_RESOURCE_ALLOC; + +typedef struct memory_affinity { + SRAT_SUBTABLE_HEADER + UINT32 ProximityDomain; + UINT16 Reserved3; + UINT64 BaseAddress; + UINT64 AddressLength; + UINT32 Reserved4; + + /* Flags (32 bits) */ + + UINT8 Enabled :1;/* 00: Use affinity structure */ + UINT8 HotPluggable :1;/* 01: Memory region is hot pluggable */ + UINT8 NonVolatile :1;/* 02: Memory is non-volatile */ + UINT8 :5;/* 03-07: Reserved, must be zero */ + UINT8 Reserved5[3]; /* 08-31: Reserved, must be zero */ + + UINT64 Reserved6; /* Reserved, must be zero */ +} MEMORY_AFFINITY; + + +/******************************************************************************* + * + * TCPA - Trusted Computing Platform Alliance table + * + ******************************************************************************/ + +typedef struct acpi_table_tcpa { + ACPI_TABLE_HEADER_DEF + UINT16 Reserved; + UINT32 MaxLogLength; /* Maximum length for the event log area */ + UINT64 LogAddress; /* Address of the event log area */ +} ACPI_TABLE_TCPA; + + +/******************************************************************************* + * + * WDRT - Watchdog Resource Table + * + ******************************************************************************/ + +typedef struct acpi_table_wdrt { + ACPI_TABLE_HEADER_DEF + UINT32 HeaderLength; /* Watchdog Header Length */ + UINT8 PciSegment; /* PCI Segment number */ + UINT8 PciBus; /* PCI Bus number */ + UINT8 PciDevice; /* PCI Device number */ + UINT8 PciFunction; /* PCI Function number */ + UINT32 TimerPeriod; /* Period of one timer count (msec) */ + UINT32 MaxCount; /* Maximum counter value supported */ + UINT32 MinCount; /* Minimum counter value */ + UINT8 Flags; + UINT8 Reserved[3]; + UINT32 Entries; /* Number of watchdog entries that follow */ +} ACPI_TABLE_WDRT; + +#if 0 /* Flags, will be converted to macros */ +UINT8 Enabled :1; /* 00: Timer enabled */ +UINT8 :6; /* 01-06: Reserved */ +UINT8 SleepStop :1; /* 07: Timer stopped in sleep state */ +#endif + + +/* Macros used to generate offsets to specific table fields */ + +#define ACPI_ASF0_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_ASF_INFO,f) +#define ACPI_ASF1_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_ASF_ALERT,f) +#define ACPI_ASF2_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_ASF_REMOTE,f) +#define ACPI_ASF3_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_ASF_RMCP,f) +#define ACPI_ASF4_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_ASF_ADDRESS,f) +#define ACPI_BOOT_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_TABLE_BOOT,f) +#define ACPI_CPEP_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_TABLE_CPEP,f) +#define ACPI_CPEP0_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_CPEP_POLLING,f) +#define ACPI_DBGP_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_TABLE_DBGP,f) +#define ACPI_ECDT_OFFSET(f) (UINT8) ACPI_OFFSET (EC_BOOT_RESOURCES,f) +#define ACPI_HPET_OFFSET(f) (UINT8) ACPI_OFFSET (HPET_TABLE,f) +#define ACPI_MADT_OFFSET(f) (UINT8) ACPI_OFFSET (MULTIPLE_APIC_TABLE,f) +#define ACPI_MADT0_OFFSET(f) (UINT8) ACPI_OFFSET (MADT_PROCESSOR_APIC,f) +#define ACPI_MADT1_OFFSET(f) (UINT8) ACPI_OFFSET (MADT_IO_APIC,f) +#define ACPI_MADT2_OFFSET(f) (UINT8) ACPI_OFFSET (MADT_INTERRUPT_OVERRIDE,f) +#define ACPI_MADT3_OFFSET(f) (UINT8) ACPI_OFFSET (MADT_NMI_SOURCE,f) +#define ACPI_MADT4_OFFSET(f) (UINT8) ACPI_OFFSET (MADT_LOCAL_APIC_NMI,f) +#define ACPI_MADT5_OFFSET(f) (UINT8) ACPI_OFFSET (MADT_ADDRESS_OVERRIDE,f) +#define ACPI_MADT6_OFFSET(f) (UINT8) ACPI_OFFSET (MADT_IO_SAPIC,f) +#define ACPI_MADT7_OFFSET(f) (UINT8) ACPI_OFFSET (MADT_LOCAL_SAPIC,f) +#define ACPI_MADT8_OFFSET(f) (UINT8) ACPI_OFFSET (MADT_INTERRUPT_SOURCE,f) +#define ACPI_MADTH_OFFSET(f) (UINT8) ACPI_OFFSET (APIC_HEADER,f) +#define ACPI_MCFG_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_TABLE_MCFG,f) +#define ACPI_MCFG0_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_MCFG_ALLOCATION,f) +#define ACPI_SBST_OFFSET(f) (UINT8) ACPI_OFFSET (SMART_BATTERY_TABLE,f) +#define ACPI_SLIT_OFFSET(f) (UINT8) ACPI_OFFSET (SYSTEM_LOCALITY_INFO,f) +#define ACPI_SPCR_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_TABLE_SPCR,f) +#define ACPI_SPMI_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_TABLE_SPMI,f) +#define ACPI_SRAT_OFFSET(f) (UINT8) ACPI_OFFSET (SYSTEM_RESOURCE_AFFINITY,f) +#define ACPI_SRAT0_OFFSET(f) (UINT8) ACPI_OFFSET (STATIC_RESOURCE_ALLOC,f) +#define ACPI_SRAT1_OFFSET(f) (UINT8) ACPI_OFFSET (MEMORY_AFFINITY,f) +#define ACPI_TCPA_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_TABLE_TCPA,f) +#define ACPI_WDRT_OFFSET(f) (UINT8) ACPI_OFFSET (ACPI_TABLE_WDRT,f) + + +#define ACPI_HPET_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (HPET_TABLE,f,o) +#define ACPI_SRAT0_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (STATIC_RESOURCE_ALLOC,f,o) +#define ACPI_SRAT1_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (MEMORY_AFFINITY,f,o) +#define ACPI_MADT_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (MULTIPLE_APIC_TABLE,f,o) +#define ACPI_MADT0_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (MADT_PROCESSOR_APIC,f,o) +#define ACPI_MADT2_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (MADT_INTERRUPT_OVERRIDE,f,o) +#define ACPI_MADT3_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (MADT_NMI_SOURCE,f,o) +#define ACPI_MADT4_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (MADT_LOCAL_APIC_NMI,f,o) +#define ACPI_MADT7_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (MADT_LOCAL_SAPIC,f,o) +#define ACPI_MADT8_FLAG_OFFSET(f, o) ACPI_FLAG_OFFSET (MADT_INTERRUPT_SOURCE,f,o) + + +/* Reset to default packing */ + +#pragma pack() + +#endif /* __ACTBL1_H__ */ diff --git a/EXTERNAL_HEADERS/architecture/arm/Makefile b/EXTERNAL_HEADERS/architecture/arm/Makefile index 08f41e365..b3d81657d 100644 --- a/EXTERNAL_HEADERS/architecture/arm/Makefile +++ b/EXTERNAL_HEADERS/architecture/arm/Makefile @@ -6,8 +6,7 @@ export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir include $(MakeInc_cmd) include $(MakeInc_def) -EXPORT_FILES = \ - arm_neon.h \ +EXPORT_FILES = INSTALL_MD_LIST = diff --git a/EXTERNAL_HEADERS/architecture/arm/arm_neon.h b/EXTERNAL_HEADERS/architecture/arm/arm_neon.h deleted file mode 100644 index e294bd96c..000000000 --- a/EXTERNAL_HEADERS/architecture/arm/arm_neon.h +++ /dev/null @@ -1,74267 +0,0 @@ -/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------=== - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - * - *===-----------------------------------------------------------------------=== - */ - -#ifndef __ARM_NEON_H -#define __ARM_NEON_H - -#if !defined(__ARM_NEON) -#error "NEON support not enabled" -#endif - -#include - -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wfloat-equal" -#pragma clang diagnostic ignored "-Wvector-conversion" - -typedef float float32_t; -typedef __fp16 float16_t; -#ifdef __aarch64__ -typedef double float64_t; -#endif - -#ifdef __aarch64__ -typedef uint8_t poly8_t; -typedef uint16_t poly16_t; -typedef uint64_t poly64_t; -typedef __uint128_t poly128_t; -#else -typedef int8_t poly8_t; -typedef int16_t poly16_t; -#endif -typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t; -typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t; -typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t; -typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t; -typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t; -typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t; -typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t; -typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t; -typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t; -typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t; -typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t; -typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t; -typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t; -typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t; -typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t; -typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t; -typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t; -typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t; -typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t; -typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t; -#ifdef __aarch64__ -typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t; -typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; -#endif -typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t; -typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t; -typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t; -typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t; -#ifdef __aarch64__ -typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t; -typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t; -#endif - -typedef struct int8x8x2_t { - int8x8_t val[2]; -} int8x8x2_t; - -typedef struct int8x16x2_t { - int8x16_t val[2]; -} int8x16x2_t; - -typedef struct int16x4x2_t { - int16x4_t val[2]; -} int16x4x2_t; - -typedef struct int16x8x2_t { - int16x8_t val[2]; -} int16x8x2_t; - -typedef struct int32x2x2_t { - int32x2_t val[2]; -} int32x2x2_t; - -typedef struct int32x4x2_t { - int32x4_t val[2]; -} int32x4x2_t; - -typedef struct int64x1x2_t { - int64x1_t val[2]; -} int64x1x2_t; - -typedef struct int64x2x2_t { - int64x2_t val[2]; -} int64x2x2_t; - -typedef struct uint8x8x2_t { - uint8x8_t val[2]; -} uint8x8x2_t; - -typedef struct uint8x16x2_t { - uint8x16_t val[2]; -} uint8x16x2_t; - -typedef struct uint16x4x2_t { - uint16x4_t val[2]; -} uint16x4x2_t; - -typedef struct uint16x8x2_t { - uint16x8_t val[2]; -} uint16x8x2_t; - -typedef struct uint32x2x2_t { - uint32x2_t val[2]; -} uint32x2x2_t; - -typedef struct uint32x4x2_t { - uint32x4_t val[2]; -} uint32x4x2_t; - -typedef struct uint64x1x2_t { - uint64x1_t val[2]; -} uint64x1x2_t; - -typedef struct uint64x2x2_t { - uint64x2_t val[2]; -} uint64x2x2_t; - -typedef struct float16x4x2_t { - float16x4_t val[2]; -} float16x4x2_t; - -typedef struct float16x8x2_t { - float16x8_t val[2]; -} float16x8x2_t; - -typedef struct float32x2x2_t { - float32x2_t val[2]; -} float32x2x2_t; - -typedef struct float32x4x2_t { - float32x4_t val[2]; -} float32x4x2_t; - -#ifdef __aarch64__ -typedef struct float64x1x2_t { - float64x1_t val[2]; -} float64x1x2_t; - -typedef struct float64x2x2_t { - float64x2_t val[2]; -} float64x2x2_t; - -#endif -typedef struct poly8x8x2_t { - poly8x8_t val[2]; -} poly8x8x2_t; - -typedef struct poly8x16x2_t { - poly8x16_t val[2]; -} poly8x16x2_t; - -typedef struct poly16x4x2_t { - poly16x4_t val[2]; -} poly16x4x2_t; - -typedef struct poly16x8x2_t { - poly16x8_t val[2]; -} poly16x8x2_t; - -#ifdef __aarch64__ -typedef struct poly64x1x2_t { - poly64x1_t val[2]; -} poly64x1x2_t; - -typedef struct poly64x2x2_t { - poly64x2_t val[2]; -} poly64x2x2_t; - -#endif -typedef struct int8x8x3_t { - int8x8_t val[3]; -} int8x8x3_t; - -typedef struct int8x16x3_t { - int8x16_t val[3]; -} int8x16x3_t; - -typedef struct int16x4x3_t { - int16x4_t val[3]; -} int16x4x3_t; - -typedef struct int16x8x3_t { - int16x8_t val[3]; -} int16x8x3_t; - -typedef struct int32x2x3_t { - int32x2_t val[3]; -} int32x2x3_t; - -typedef struct int32x4x3_t { - int32x4_t val[3]; -} int32x4x3_t; - -typedef struct int64x1x3_t { - int64x1_t val[3]; -} int64x1x3_t; - -typedef struct int64x2x3_t { - int64x2_t val[3]; -} int64x2x3_t; - -typedef struct uint8x8x3_t { - uint8x8_t val[3]; -} uint8x8x3_t; - -typedef struct uint8x16x3_t { - uint8x16_t val[3]; -} uint8x16x3_t; - -typedef struct uint16x4x3_t { - uint16x4_t val[3]; -} uint16x4x3_t; - -typedef struct uint16x8x3_t { - uint16x8_t val[3]; -} uint16x8x3_t; - -typedef struct uint32x2x3_t { - uint32x2_t val[3]; -} uint32x2x3_t; - -typedef struct uint32x4x3_t { - uint32x4_t val[3]; -} uint32x4x3_t; - -typedef struct uint64x1x3_t { - uint64x1_t val[3]; -} uint64x1x3_t; - -typedef struct uint64x2x3_t { - uint64x2_t val[3]; -} uint64x2x3_t; - -typedef struct float16x4x3_t { - float16x4_t val[3]; -} float16x4x3_t; - -typedef struct float16x8x3_t { - float16x8_t val[3]; -} float16x8x3_t; - -typedef struct float32x2x3_t { - float32x2_t val[3]; -} float32x2x3_t; - -typedef struct float32x4x3_t { - float32x4_t val[3]; -} float32x4x3_t; - -#ifdef __aarch64__ -typedef struct float64x1x3_t { - float64x1_t val[3]; -} float64x1x3_t; - -typedef struct float64x2x3_t { - float64x2_t val[3]; -} float64x2x3_t; - -#endif -typedef struct poly8x8x3_t { - poly8x8_t val[3]; -} poly8x8x3_t; - -typedef struct poly8x16x3_t { - poly8x16_t val[3]; -} poly8x16x3_t; - -typedef struct poly16x4x3_t { - poly16x4_t val[3]; -} poly16x4x3_t; - -typedef struct poly16x8x3_t { - poly16x8_t val[3]; -} poly16x8x3_t; - -#ifdef __aarch64__ -typedef struct poly64x1x3_t { - poly64x1_t val[3]; -} poly64x1x3_t; - -typedef struct poly64x2x3_t { - poly64x2_t val[3]; -} poly64x2x3_t; - -#endif -typedef struct int8x8x4_t { - int8x8_t val[4]; -} int8x8x4_t; - -typedef struct int8x16x4_t { - int8x16_t val[4]; -} int8x16x4_t; - -typedef struct int16x4x4_t { - int16x4_t val[4]; -} int16x4x4_t; - -typedef struct int16x8x4_t { - int16x8_t val[4]; -} int16x8x4_t; - -typedef struct int32x2x4_t { - int32x2_t val[4]; -} int32x2x4_t; - -typedef struct int32x4x4_t { - int32x4_t val[4]; -} int32x4x4_t; - -typedef struct int64x1x4_t { - int64x1_t val[4]; -} int64x1x4_t; - -typedef struct int64x2x4_t { - int64x2_t val[4]; -} int64x2x4_t; - -typedef struct uint8x8x4_t { - uint8x8_t val[4]; -} uint8x8x4_t; - -typedef struct uint8x16x4_t { - uint8x16_t val[4]; -} uint8x16x4_t; - -typedef struct uint16x4x4_t { - uint16x4_t val[4]; -} uint16x4x4_t; - -typedef struct uint16x8x4_t { - uint16x8_t val[4]; -} uint16x8x4_t; - -typedef struct uint32x2x4_t { - uint32x2_t val[4]; -} uint32x2x4_t; - -typedef struct uint32x4x4_t { - uint32x4_t val[4]; -} uint32x4x4_t; - -typedef struct uint64x1x4_t { - uint64x1_t val[4]; -} uint64x1x4_t; - -typedef struct uint64x2x4_t { - uint64x2_t val[4]; -} uint64x2x4_t; - -typedef struct float16x4x4_t { - float16x4_t val[4]; -} float16x4x4_t; - -typedef struct float16x8x4_t { - float16x8_t val[4]; -} float16x8x4_t; - -typedef struct float32x2x4_t { - float32x2_t val[4]; -} float32x2x4_t; - -typedef struct float32x4x4_t { - float32x4_t val[4]; -} float32x4x4_t; - -#ifdef __aarch64__ -typedef struct float64x1x4_t { - float64x1_t val[4]; -} float64x1x4_t; - -typedef struct float64x2x4_t { - float64x2_t val[4]; -} float64x2x4_t; - -#endif -typedef struct poly8x8x4_t { - poly8x8_t val[4]; -} poly8x8x4_t; - -typedef struct poly8x16x4_t { - poly8x16_t val[4]; -} poly8x16x4_t; - -typedef struct poly16x4x4_t { - poly16x4_t val[4]; -} poly16x4x4_t; - -typedef struct poly16x8x4_t { - poly16x8_t val[4]; -} poly16x8x4_t; - -#ifdef __aarch64__ -typedef struct poly64x1x4_t { - poly64x1_t val[4]; -} poly64x1x4_t; - -typedef struct poly64x2x4_t { - poly64x2_t val[4]; -} poly64x2x4_t; - -#endif - -#define __ai static inline __attribute__((__always_inline__, __nodebug__)) - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vabsq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32); - return __ret; -} -#else -__ai int8x16_t vabsq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vabsq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vabsq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabsq_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vabsq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabsq_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vabsq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vabs_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0); - return __ret; -} -#else -__ai int8x8_t vabs_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vabs_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vabs_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vabs_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vabs_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vabs_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vabs_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); - return __ret; -} -#else -__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { - poly16x4_t __ret; - __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5); - return __ret; -} -#else -__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - poly16x4_t __ret; - __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); - return __ret; -} -#else -__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { - poly16x8_t __ret; - __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37); - return __ret; -} -#else -__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); - return __ret; -} -#else -__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); - return __ret; -} -#else -__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); - return __ret; -} -#else -__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); - return __ret; -} -#else -__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); - return __ret; -} -#else -__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); - return __ret; -} -#else -__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); - return __ret; -} -#else -__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); - return __ret; -} -#else -__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); - return __ret; -} -#else -__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19); - return __ret; -} -#else -__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17); - return __ret; -} -#else -__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); - return __ret; -} -#else -__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); - return __ret; -} -#else -__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3); - return __ret; -} -#else -__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); - return __ret; -} -#else -__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vclsq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); - return __ret; -} -#else -__ai int8x16_t vclsq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vclsq_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vclsq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vclsq_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vclsq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vcls_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); - return __ret; -} -#else -__ai int8x8_t vcls_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcls_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vcls_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcls_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vcls_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vclzq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32); - return __ret; -} -#else -__ai int8x16_t vclzq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vclzq_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vclzq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vclzq_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vclzq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclz_u8(uint8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vclz_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclz_u32(uint32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vclz_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclz_u16(uint16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vclz_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vclz_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0); - return __ret; -} -#else -__ai int8x8_t vclz_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vclz_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vclz_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vclz_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vclz_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4); - return __ret; -} -#else -__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36); - return __ret; -} -#else -__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vcntq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32); - return __ret; -} -#else -__ai int8x16_t vcntq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vcnt_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0); - return __ret; -} -#else -__ai int8x8_t vcnt_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#else -__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#else -__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#else -__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); - return __ret; -} -#else -__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); - return __ret; -} -#else -__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#else -__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#else -__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); - return __ret; -} -#else -__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#else -__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); - return __ret; -} -#else -__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { - int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); - return __ret; -} -#else -__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { - int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#else -__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vcreate_p8(uint64_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vcreate_p8(uint64_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vcreate_p16(uint64_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vcreate_p16(uint64_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcreate_u8(uint64_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vcreate_u8(uint64_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcreate_u32(uint64_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vcreate_u32(uint64_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcreate_u64(uint64_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vcreate_u64(uint64_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcreate_u16(uint64_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vcreate_u16(uint64_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vcreate_s8(uint64_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vcreate_s8(uint64_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcreate_f32(uint64_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vcreate_f32(uint64_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcreate_f16(uint64_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vcreate_f16(uint64_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcreate_s32(uint64_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vcreate_s32(uint64_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vcreate_s64(uint64_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vcreate_s64(uint64_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcreate_s16(uint64_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vcreate_s16(uint64_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) -#else -#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ -}) -#else -#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \ - __ret; \ -}) -#else -#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ -}) -#else -#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \ - __ret; \ -}) -#else -#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) -#else -#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - poly16x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x16_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - poly16x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x16_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x16_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vdup_n_p8(poly8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai poly8x8_t vdup_n_p8(poly8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vdup_n_p16(poly16_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai poly16x4_t vdup_n_p16(poly16_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vdupq_n_s8(int8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai int8x16_t vdupq_n_s8(int8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vdupq_n_f32(float32_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai float32x4_t vdupq_n_f32(float32_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_n_f16(__p0) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x8_t __ret; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ - __ret; \ -}) -#else -#define vdupq_n_f16(__p0) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x8_t __ret; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vdupq_n_s32(int32_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai int32x4_t vdupq_n_s32(int32_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vdupq_n_s64(int64_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai int64x2_t vdupq_n_s64(int64_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vdupq_n_s16(int16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai int16x8_t vdupq_n_s16(int16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vdup_n_u8(uint8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai uint8x8_t vdup_n_u8(uint8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vdup_n_u32(uint32_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai uint32x2_t vdup_n_u32(uint32_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vdup_n_u64(uint64_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) {__p0}; - return __ret; -} -#else -__ai uint64x1_t vdup_n_u64(uint64_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) {__p0}; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vdup_n_u16(uint16_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai uint16x4_t vdup_n_u16(uint16_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vdup_n_s8(int8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai int8x8_t vdup_n_s8(int8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vdup_n_f32(float32_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai float32x2_t vdup_n_f32(float32_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_n_f16(__p0) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x4_t __ret; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ - __ret; \ -}) -#else -#define vdup_n_f16(__p0) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x4_t __ret; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vdup_n_s32(int32_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai int32x2_t vdup_n_s32(int32_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vdup_n_s64(int64_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) {__p0}; - return __ret; -} -#else -__ai int64x1_t vdup_n_s64(int64_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) {__p0}; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vdup_n_s16(int16_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai int16x4_t vdup_n_s16(int16_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} -#else -__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ - __ret; \ -}) -#else -#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ - __ret; \ -}) -#else -#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ - __ret; \ -}) -#else -#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ - __ret; \ -}) -#else -#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ - __ret; \ -}) -#else -#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ - __ret; \ -}) -#else -#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ - __ret; \ -}) -#else -#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ - __ret; \ -}) -#else -#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ - __ret; \ -}) -#else -#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \ - __ret; \ -}) -#else -#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ -}) -#else -#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ - __ret; \ -}) -#else -#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ -}) -#else -#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ - __ret; \ -}) -#else -#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ - __ret; \ -}) -#else -#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#else -#define vext_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ - __ret; \ -}) -#else -#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ - __ret; \ -}) -#else -#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \ - __ret; \ -}) -#else -#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) -#else -#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#else -#define vext_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) -#else -#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#else -__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} -#else -__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#else -__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); - return __ret; -} -#else -__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { - uint64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1); - return __ret; -} -#else -__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x1_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} -#else -__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vget_high_s8(int8x16_t __p0) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#else -__ai int8x8_t vget_high_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vget_high_f32(float32x4_t __p0) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); - return __ret; -} -#else -__ai float32x2_t vget_high_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vget_high_f16(float16x8_t __p0) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} -#else -__ai float16x4_t vget_high_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vget_high_s32(int32x4_t __p0) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); - return __ret; -} -#else -__ai int32x2_t vget_high_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vget_high_s64(int64x2_t __p0) { - int64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1); - return __ret; -} -#else -__ai int64x1_t vget_high_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x1_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vget_high_s16(int16x8_t __p0) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} -#else -__ai int16x4_t vget_high_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8_t __ret; \ - __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8_t __ret; \ - __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8_t __ret; \ - __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16_t __ret; \ - __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - poly16_t __ret; \ - __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16_t __ret; \ - __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8_t __ret; \ - __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8_t __ret; \ - __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8_t __ret; \ - __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16_t __ret; \ - __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16_t __ret; \ - __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16_t __ret; \ - __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vgetq_lane_f16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vgetq_lane_f16((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vgetq_lane_f16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vget_lane_f16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vget_lane_f16((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vget_lane_f16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#else -__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); - return __ret; -} -#else -__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#else -__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1); - return __ret; -} -#else -__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { - uint64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0); - return __ret; -} -#else -__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x1_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); - return __ret; -} -#else -__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vget_low_s8(int8x16_t __p0) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); - return __ret; -} -#else -__ai int8x8_t vget_low_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vget_low_f32(float32x4_t __p0) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1); - return __ret; -} -#else -__ai float32x2_t vget_low_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vget_low_f16(float16x8_t __p0) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); - return __ret; -} -#else -__ai float16x4_t vget_low_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vget_low_s32(int32x4_t __p0) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1); - return __ret; -} -#else -__ai int32x2_t vget_low_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vget_low_s64(int64x2_t __p0) { - int64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0); - return __ret; -} -#else -__ai int64x1_t vget_low_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x1_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vget_low_s16(int16x8_t __p0) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); - return __ret; -} -#else -__ai int16x4_t vget_low_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p8(__p0) __extension__ ({ \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ - __ret; \ -}) -#else -#define vld1_p8(__p0) __extension__ ({ \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p16(__p0) __extension__ ({ \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ - __ret; \ -}) -#else -#define vld1_p16(__p0) __extension__ ({ \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p8(__p0) __extension__ ({ \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ - __ret; \ -}) -#else -#define vld1q_p8(__p0) __extension__ ({ \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p16(__p0) __extension__ ({ \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ - __ret; \ -}) -#else -#define vld1q_p16(__p0) __extension__ ({ \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u8(__p0) __extension__ ({ \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ - __ret; \ -}) -#else -#define vld1q_u8(__p0) __extension__ ({ \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u32(__p0) __extension__ ({ \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ - __ret; \ -}) -#else -#define vld1q_u32(__p0) __extension__ ({ \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u64(__p0) __extension__ ({ \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ - __ret; \ -}) -#else -#define vld1q_u64(__p0) __extension__ ({ \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u16(__p0) __extension__ ({ \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ - __ret; \ -}) -#else -#define vld1q_u16(__p0) __extension__ ({ \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s8(__p0) __extension__ ({ \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ - __ret; \ -}) -#else -#define vld1q_s8(__p0) __extension__ ({ \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f32(__p0) __extension__ ({ \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ - __ret; \ -}) -#else -#define vld1q_f32(__p0) __extension__ ({ \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ - __ret; \ -}) -#else -#define vld1q_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s32(__p0) __extension__ ({ \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ - __ret; \ -}) -#else -#define vld1q_s32(__p0) __extension__ ({ \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s64(__p0) __extension__ ({ \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ - __ret; \ -}) -#else -#define vld1q_s64(__p0) __extension__ ({ \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s16(__p0) __extension__ ({ \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ - __ret; \ -}) -#else -#define vld1q_s16(__p0) __extension__ ({ \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u8(__p0) __extension__ ({ \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ - __ret; \ -}) -#else -#define vld1_u8(__p0) __extension__ ({ \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u32(__p0) __extension__ ({ \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ - __ret; \ -}) -#else -#define vld1_u32(__p0) __extension__ ({ \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u64(__p0) __extension__ ({ \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \ - __ret; \ -}) -#else -#define vld1_u64(__p0) __extension__ ({ \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u16(__p0) __extension__ ({ \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ - __ret; \ -}) -#else -#define vld1_u16(__p0) __extension__ ({ \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s8(__p0) __extension__ ({ \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ - __ret; \ -}) -#else -#define vld1_s8(__p0) __extension__ ({ \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f32(__p0) __extension__ ({ \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ - __ret; \ -}) -#else -#define vld1_f32(__p0) __extension__ ({ \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ - __ret; \ -}) -#else -#define vld1_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s32(__p0) __extension__ ({ \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ - __ret; \ -}) -#else -#define vld1_s32(__p0) __extension__ ({ \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s64(__p0) __extension__ ({ \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \ - __ret; \ -}) -#else -#define vld1_s64(__p0) __extension__ ({ \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s16(__p0) __extension__ ({ \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ - __ret; \ -}) -#else -#define vld1_s16(__p0) __extension__ ({ \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_p8(__p0) __extension__ ({ \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ - __ret; \ -}) -#else -#define vld1_dup_p8(__p0) __extension__ ({ \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_p16(__p0) __extension__ ({ \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ - __ret; \ -}) -#else -#define vld1_dup_p16(__p0) __extension__ ({ \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_p8(__p0) __extension__ ({ \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ - __ret; \ -}) -#else -#define vld1q_dup_p8(__p0) __extension__ ({ \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_p16(__p0) __extension__ ({ \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ - __ret; \ -}) -#else -#define vld1q_dup_p16(__p0) __extension__ ({ \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_u8(__p0) __extension__ ({ \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ - __ret; \ -}) -#else -#define vld1q_dup_u8(__p0) __extension__ ({ \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_u32(__p0) __extension__ ({ \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ - __ret; \ -}) -#else -#define vld1q_dup_u32(__p0) __extension__ ({ \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_u64(__p0) __extension__ ({ \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ - __ret; \ -}) -#else -#define vld1q_dup_u64(__p0) __extension__ ({ \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_u16(__p0) __extension__ ({ \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ - __ret; \ -}) -#else -#define vld1q_dup_u16(__p0) __extension__ ({ \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_s8(__p0) __extension__ ({ \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ - __ret; \ -}) -#else -#define vld1q_dup_s8(__p0) __extension__ ({ \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_f32(__p0) __extension__ ({ \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ - __ret; \ -}) -#else -#define vld1q_dup_f32(__p0) __extension__ ({ \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ - __ret; \ -}) -#else -#define vld1q_dup_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_s32(__p0) __extension__ ({ \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ - __ret; \ -}) -#else -#define vld1q_dup_s32(__p0) __extension__ ({ \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_s64(__p0) __extension__ ({ \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ - __ret; \ -}) -#else -#define vld1q_dup_s64(__p0) __extension__ ({ \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_s16(__p0) __extension__ ({ \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ - __ret; \ -}) -#else -#define vld1q_dup_s16(__p0) __extension__ ({ \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_u8(__p0) __extension__ ({ \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ - __ret; \ -}) -#else -#define vld1_dup_u8(__p0) __extension__ ({ \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_u32(__p0) __extension__ ({ \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ - __ret; \ -}) -#else -#define vld1_dup_u32(__p0) __extension__ ({ \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_u64(__p0) __extension__ ({ \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \ - __ret; \ -}) -#else -#define vld1_dup_u64(__p0) __extension__ ({ \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_u16(__p0) __extension__ ({ \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ - __ret; \ -}) -#else -#define vld1_dup_u16(__p0) __extension__ ({ \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_s8(__p0) __extension__ ({ \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ - __ret; \ -}) -#else -#define vld1_dup_s8(__p0) __extension__ ({ \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_f32(__p0) __extension__ ({ \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ - __ret; \ -}) -#else -#define vld1_dup_f32(__p0) __extension__ ({ \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ - __ret; \ -}) -#else -#define vld1_dup_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_s32(__p0) __extension__ ({ \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ - __ret; \ -}) -#else -#define vld1_dup_s32(__p0) __extension__ ({ \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_s64(__p0) __extension__ ({ \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \ - __ret; \ -}) -#else -#define vld1_dup_s64(__p0) __extension__ ({ \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_s16(__p0) __extension__ ({ \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ - __ret; \ -}) -#else -#define vld1_dup_s16(__p0) __extension__ ({ \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ - __ret; \ -}) -#else -#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ - __ret; \ -}) -#else -#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ - __ret; \ -}) -#else -#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ - __ret; \ -}) -#else -#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ - __ret; \ -}) -#else -#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ - __ret; \ -}) -#else -#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ - __ret; \ -}) -#else -#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ - __ret; \ -}) -#else -#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s1 = __p1; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ - __ret; \ -}) -#else -#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s1 = __p1; \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ - __ret; \ -}) -#else -#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s1 = __p1; \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ - __ret; \ -}) -#else -#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ -}) -#else -#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ - __ret; \ -}) -#else -#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ -}) -#else -#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ - __ret; \ -}) -#else -#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ - __ret; \ -}) -#else -#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#else -#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ - __ret; \ -}) -#else -#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s1 = __p1; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ - __ret; \ -}) -#else -#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s1 = __p1; \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ - __ret; \ -}) -#else -#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s1 = __p1; \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ - __ret; \ -}) -#else -#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) -#else -#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#else -#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) -#else -#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_p8(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 4); \ - __ret; \ -}) -#else -#define vld2_p8(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_p16(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 5); \ - __ret; \ -}) -#else -#define vld2_p16(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_p8(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 36); \ - __ret; \ -}) -#else -#define vld2q_p8(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_p16(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 37); \ - __ret; \ -}) -#else -#define vld2q_p16(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_u8(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 48); \ - __ret; \ -}) -#else -#define vld2q_u8(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_u32(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 50); \ - __ret; \ -}) -#else -#define vld2q_u32(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_u16(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 49); \ - __ret; \ -}) -#else -#define vld2q_u16(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_s8(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 32); \ - __ret; \ -}) -#else -#define vld2q_s8(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_f32(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 41); \ - __ret; \ -}) -#else -#define vld2q_f32(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_f16(__p0) __extension__ ({ \ - float16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 40); \ - __ret; \ -}) -#else -#define vld2q_f16(__p0) __extension__ ({ \ - float16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_s32(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 34); \ - __ret; \ -}) -#else -#define vld2q_s32(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_s16(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 33); \ - __ret; \ -}) -#else -#define vld2q_s16(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_u8(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 16); \ - __ret; \ -}) -#else -#define vld2_u8(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_u32(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 18); \ - __ret; \ -}) -#else -#define vld2_u32(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_u64(__p0) __extension__ ({ \ - uint64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 19); \ - __ret; \ -}) -#else -#define vld2_u64(__p0) __extension__ ({ \ - uint64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_u16(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 17); \ - __ret; \ -}) -#else -#define vld2_u16(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_s8(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 0); \ - __ret; \ -}) -#else -#define vld2_s8(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_f32(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 9); \ - __ret; \ -}) -#else -#define vld2_f32(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_f16(__p0) __extension__ ({ \ - float16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 8); \ - __ret; \ -}) -#else -#define vld2_f16(__p0) __extension__ ({ \ - float16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_s32(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 2); \ - __ret; \ -}) -#else -#define vld2_s32(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_s64(__p0) __extension__ ({ \ - int64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 3); \ - __ret; \ -}) -#else -#define vld2_s64(__p0) __extension__ ({ \ - int64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_s16(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 1); \ - __ret; \ -}) -#else -#define vld2_s16(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_p8(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ - __ret; \ -}) -#else -#define vld2_dup_p8(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_p16(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ - __ret; \ -}) -#else -#define vld2_dup_p16(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_u8(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ - __ret; \ -}) -#else -#define vld2_dup_u8(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_u32(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ - __ret; \ -}) -#else -#define vld2_dup_u32(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_u64(__p0) __extension__ ({ \ - uint64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \ - __ret; \ -}) -#else -#define vld2_dup_u64(__p0) __extension__ ({ \ - uint64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_u16(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ - __ret; \ -}) -#else -#define vld2_dup_u16(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_s8(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ - __ret; \ -}) -#else -#define vld2_dup_s8(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_f32(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ - __ret; \ -}) -#else -#define vld2_dup_f32(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_f16(__p0) __extension__ ({ \ - float16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ - __ret; \ -}) -#else -#define vld2_dup_f16(__p0) __extension__ ({ \ - float16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_s32(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ - __ret; \ -}) -#else -#define vld2_dup_s32(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_s64(__p0) __extension__ ({ \ - int64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \ - __ret; \ -}) -#else -#define vld2_dup_s64(__p0) __extension__ ({ \ - int64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_s16(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ - __ret; \ -}) -#else -#define vld2_dup_s16(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - poly8x8x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ - __ret; \ -}) -#else -#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - poly8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - poly16x4x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ - __ret; \ -}) -#else -#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - poly16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - poly16x4x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - poly16x8x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ - __ret; \ -}) -#else -#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - poly16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - uint32x4x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ - __ret; \ -}) -#else -#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - uint32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - uint32x4x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - uint16x8x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ - __ret; \ -}) -#else -#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - uint16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - float32x4x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 41); \ - __ret; \ -}) -#else -#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - float32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - float32x4x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x2_t __s1 = __p1; \ - float16x8x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 40); \ - __ret; \ -}) -#else -#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x2_t __s1 = __p1; \ - float16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - int32x4x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 34); \ - __ret; \ -}) -#else -#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - int32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - int32x4x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - int16x8x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 33); \ - __ret; \ -}) -#else -#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - int16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - uint8x8x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ - __ret; \ -}) -#else -#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - uint8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - uint32x2x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ - __ret; \ -}) -#else -#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - uint32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - uint32x2x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - uint16x4x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ - __ret; \ -}) -#else -#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - uint16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - uint16x4x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - int8x8x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ - __ret; \ -}) -#else -#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - int8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - float32x2x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 9); \ - __ret; \ -}) -#else -#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - float32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - float32x2x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x2_t __s1 = __p1; \ - float16x4x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 8); \ - __ret; \ -}) -#else -#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x2_t __s1 = __p1; \ - float16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - float16x4x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - int32x2x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 2); \ - __ret; \ -}) -#else -#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - int32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - int32x2x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - int16x4x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 1); \ - __ret; \ -}) -#else -#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - int16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - int16x4x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_p8(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 4); \ - __ret; \ -}) -#else -#define vld3_p8(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_p16(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 5); \ - __ret; \ -}) -#else -#define vld3_p16(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_p8(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 36); \ - __ret; \ -}) -#else -#define vld3q_p8(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_p16(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 37); \ - __ret; \ -}) -#else -#define vld3q_p16(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_u8(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 48); \ - __ret; \ -}) -#else -#define vld3q_u8(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_u32(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 50); \ - __ret; \ -}) -#else -#define vld3q_u32(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_u16(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 49); \ - __ret; \ -}) -#else -#define vld3q_u16(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_s8(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 32); \ - __ret; \ -}) -#else -#define vld3q_s8(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_f32(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 41); \ - __ret; \ -}) -#else -#define vld3q_f32(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_f16(__p0) __extension__ ({ \ - float16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 40); \ - __ret; \ -}) -#else -#define vld3q_f16(__p0) __extension__ ({ \ - float16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_s32(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 34); \ - __ret; \ -}) -#else -#define vld3q_s32(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_s16(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 33); \ - __ret; \ -}) -#else -#define vld3q_s16(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_u8(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 16); \ - __ret; \ -}) -#else -#define vld3_u8(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_u32(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 18); \ - __ret; \ -}) -#else -#define vld3_u32(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_u64(__p0) __extension__ ({ \ - uint64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 19); \ - __ret; \ -}) -#else -#define vld3_u64(__p0) __extension__ ({ \ - uint64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_u16(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 17); \ - __ret; \ -}) -#else -#define vld3_u16(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_s8(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 0); \ - __ret; \ -}) -#else -#define vld3_s8(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_f32(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 9); \ - __ret; \ -}) -#else -#define vld3_f32(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_f16(__p0) __extension__ ({ \ - float16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 8); \ - __ret; \ -}) -#else -#define vld3_f16(__p0) __extension__ ({ \ - float16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_s32(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 2); \ - __ret; \ -}) -#else -#define vld3_s32(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_s64(__p0) __extension__ ({ \ - int64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 3); \ - __ret; \ -}) -#else -#define vld3_s64(__p0) __extension__ ({ \ - int64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_s16(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 1); \ - __ret; \ -}) -#else -#define vld3_s16(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_p8(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ - __ret; \ -}) -#else -#define vld3_dup_p8(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_p16(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ - __ret; \ -}) -#else -#define vld3_dup_p16(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_u8(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ - __ret; \ -}) -#else -#define vld3_dup_u8(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_u32(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ - __ret; \ -}) -#else -#define vld3_dup_u32(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_u64(__p0) __extension__ ({ \ - uint64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \ - __ret; \ -}) -#else -#define vld3_dup_u64(__p0) __extension__ ({ \ - uint64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_u16(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ - __ret; \ -}) -#else -#define vld3_dup_u16(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_s8(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ - __ret; \ -}) -#else -#define vld3_dup_s8(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_f32(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ - __ret; \ -}) -#else -#define vld3_dup_f32(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_f16(__p0) __extension__ ({ \ - float16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ - __ret; \ -}) -#else -#define vld3_dup_f16(__p0) __extension__ ({ \ - float16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_s32(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ - __ret; \ -}) -#else -#define vld3_dup_s32(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_s64(__p0) __extension__ ({ \ - int64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \ - __ret; \ -}) -#else -#define vld3_dup_s64(__p0) __extension__ ({ \ - int64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_s16(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ - __ret; \ -}) -#else -#define vld3_dup_s16(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - poly8x8x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ - __ret; \ -}) -#else -#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - poly8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - poly16x4x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ - __ret; \ -}) -#else -#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - poly16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - poly16x4x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - poly16x8x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ - __ret; \ -}) -#else -#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - poly16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - uint32x4x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ - __ret; \ -}) -#else -#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - uint32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - uint32x4x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - uint16x8x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ - __ret; \ -}) -#else -#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - uint16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - float32x4x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \ - __ret; \ -}) -#else -#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - float32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - float32x4x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x3_t __s1 = __p1; \ - float16x8x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \ - __ret; \ -}) -#else -#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x3_t __s1 = __p1; \ - float16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - int32x4x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \ - __ret; \ -}) -#else -#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - int32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - int32x4x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - int16x8x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \ - __ret; \ -}) -#else -#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - int16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - uint8x8x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ - __ret; \ -}) -#else -#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - uint8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - uint32x2x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ - __ret; \ -}) -#else -#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - uint32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - uint32x2x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - uint16x4x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ - __ret; \ -}) -#else -#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - uint16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - uint16x4x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - int8x8x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ - __ret; \ -}) -#else -#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - int8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - float32x2x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \ - __ret; \ -}) -#else -#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - float32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - float32x2x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x3_t __s1 = __p1; \ - float16x4x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \ - __ret; \ -}) -#else -#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x3_t __s1 = __p1; \ - float16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - float16x4x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - int32x2x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \ - __ret; \ -}) -#else -#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - int32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - int32x2x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - int16x4x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \ - __ret; \ -}) -#else -#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - int16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - int16x4x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_p8(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 4); \ - __ret; \ -}) -#else -#define vld4_p8(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_p16(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 5); \ - __ret; \ -}) -#else -#define vld4_p16(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_p8(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 36); \ - __ret; \ -}) -#else -#define vld4q_p8(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_p16(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 37); \ - __ret; \ -}) -#else -#define vld4q_p16(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_u8(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 48); \ - __ret; \ -}) -#else -#define vld4q_u8(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_u32(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 50); \ - __ret; \ -}) -#else -#define vld4q_u32(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_u16(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 49); \ - __ret; \ -}) -#else -#define vld4q_u16(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_s8(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 32); \ - __ret; \ -}) -#else -#define vld4q_s8(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_f32(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 41); \ - __ret; \ -}) -#else -#define vld4q_f32(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_f16(__p0) __extension__ ({ \ - float16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 40); \ - __ret; \ -}) -#else -#define vld4q_f16(__p0) __extension__ ({ \ - float16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_s32(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 34); \ - __ret; \ -}) -#else -#define vld4q_s32(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_s16(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 33); \ - __ret; \ -}) -#else -#define vld4q_s16(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_u8(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 16); \ - __ret; \ -}) -#else -#define vld4_u8(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_u32(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 18); \ - __ret; \ -}) -#else -#define vld4_u32(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_u64(__p0) __extension__ ({ \ - uint64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 19); \ - __ret; \ -}) -#else -#define vld4_u64(__p0) __extension__ ({ \ - uint64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_u16(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 17); \ - __ret; \ -}) -#else -#define vld4_u16(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_s8(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 0); \ - __ret; \ -}) -#else -#define vld4_s8(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_f32(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 9); \ - __ret; \ -}) -#else -#define vld4_f32(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_f16(__p0) __extension__ ({ \ - float16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 8); \ - __ret; \ -}) -#else -#define vld4_f16(__p0) __extension__ ({ \ - float16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_s32(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 2); \ - __ret; \ -}) -#else -#define vld4_s32(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_s64(__p0) __extension__ ({ \ - int64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 3); \ - __ret; \ -}) -#else -#define vld4_s64(__p0) __extension__ ({ \ - int64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_s16(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 1); \ - __ret; \ -}) -#else -#define vld4_s16(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_p8(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ - __ret; \ -}) -#else -#define vld4_dup_p8(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_p16(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ - __ret; \ -}) -#else -#define vld4_dup_p16(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_u8(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ - __ret; \ -}) -#else -#define vld4_dup_u8(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_u32(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ - __ret; \ -}) -#else -#define vld4_dup_u32(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_u64(__p0) __extension__ ({ \ - uint64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \ - __ret; \ -}) -#else -#define vld4_dup_u64(__p0) __extension__ ({ \ - uint64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_u16(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ - __ret; \ -}) -#else -#define vld4_dup_u16(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_s8(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ - __ret; \ -}) -#else -#define vld4_dup_s8(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_f32(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ - __ret; \ -}) -#else -#define vld4_dup_f32(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_f16(__p0) __extension__ ({ \ - float16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ - __ret; \ -}) -#else -#define vld4_dup_f16(__p0) __extension__ ({ \ - float16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_s32(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ - __ret; \ -}) -#else -#define vld4_dup_s32(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_s64(__p0) __extension__ ({ \ - int64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \ - __ret; \ -}) -#else -#define vld4_dup_s64(__p0) __extension__ ({ \ - int64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_s16(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ - __ret; \ -}) -#else -#define vld4_dup_s16(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - poly8x8x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ - __ret; \ -}) -#else -#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - poly8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - poly16x4x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ - __ret; \ -}) -#else -#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - poly16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - poly16x4x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - poly16x8x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ - __ret; \ -}) -#else -#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - poly16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - uint32x4x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ - __ret; \ -}) -#else -#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - uint32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - uint32x4x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - uint16x8x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ - __ret; \ -}) -#else -#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - uint16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - float32x4x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \ - __ret; \ -}) -#else -#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - float32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - float32x4x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x4_t __s1 = __p1; \ - float16x8x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \ - __ret; \ -}) -#else -#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x4_t __s1 = __p1; \ - float16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - int32x4x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \ - __ret; \ -}) -#else -#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - int32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - int32x4x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - int16x8x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \ - __ret; \ -}) -#else -#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - int16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - uint8x8x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ - __ret; \ -}) -#else -#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - uint8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - uint32x2x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ - __ret; \ -}) -#else -#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - uint32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - uint32x2x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - uint16x4x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ - __ret; \ -}) -#else -#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - uint16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - uint16x4x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - int8x8x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ - __ret; \ -}) -#else -#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - int8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - float32x2x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \ - __ret; \ -}) -#else -#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - float32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - float32x2x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x4_t __s1 = __p1; \ - float16x4x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \ - __ret; \ -}) -#else -#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x4_t __s1 = __p1; \ - float16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - float16x4x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - int32x2x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \ - __ret; \ -}) -#else -#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - int32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - int32x2x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - int16x4x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \ - __ret; \ -}) -#else -#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - int16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - int16x4x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint32x2_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - float32x2_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int32x2_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint16x8_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x4_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - float32x4_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x8_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint32x2_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - uint32x2_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint16x4_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x2_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - float32x2_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int32x2_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint32x4_t __ret; - __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint16x8_t __ret; - __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __ret; - __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { - int32x4_t __ret; - __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { - int16x8_t __ret; - __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint32x2_t __ret; - __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2}; - return __ret; -} -#else -__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint16x4_t __ret; - __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __ret; - __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2}; - return __ret; -} -#else -__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { - int32x2_t __ret; - __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2}; - return __ret; -} -#else -__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { - int16x4_t __ret; - __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint32x2_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - float32x2_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int32x2_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint16x8_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x4_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - float32x4_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x8_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint32x2_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - uint32x2_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint16x4_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x2_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - float32x2_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int32x2_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint32x4_t __ret; - __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint16x8_t __ret; - __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __ret; - __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { - int32x4_t __ret; - __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { - int16x8_t __ret; - __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint32x2_t __ret; - __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2}; - return __ret; -} -#else -__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint16x4_t __ret; - __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __ret; - __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2}; - return __ret; -} -#else -__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { - int32x2_t __ret; - __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2}; - return __ret; -} -#else -__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { - int16x4_t __ret; - __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} -#else -__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vmov_n_p8(poly8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai poly8x8_t vmov_n_p8(poly8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vmov_n_p16(poly16_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai poly16x4_t vmov_n_p16(poly16_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmovq_n_s8(int8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai int8x16_t vmovq_n_s8(int8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmovq_n_f32(float32_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai float32x4_t vmovq_n_f32(float32_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmovq_n_f16(__p0) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x8_t __ret; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ - __ret; \ -}) -#else -#define vmovq_n_f16(__p0) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x8_t __ret; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovq_n_s32(int32_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai int32x4_t vmovq_n_s32(int32_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmovq_n_s64(int64_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai int64x2_t vmovq_n_s64(int64_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovq_n_s16(int16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai int16x8_t vmovq_n_s16(int16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmov_n_u8(uint8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai uint8x8_t vmov_n_u8(uint8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmov_n_u32(uint32_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai uint32x2_t vmov_n_u32(uint32_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vmov_n_u64(uint64_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) {__p0}; - return __ret; -} -#else -__ai uint64x1_t vmov_n_u64(uint64_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) {__p0}; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmov_n_u16(uint16_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai uint16x4_t vmov_n_u16(uint16_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmov_n_s8(int8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai int8x8_t vmov_n_s8(int8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmov_n_f32(float32_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai float32x2_t vmov_n_f32(float32_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmov_n_f16(__p0) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x4_t __ret; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ - __ret; \ -}) -#else -#define vmov_n_f16(__p0) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x4_t __ret; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmov_n_s32(int32_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai int32x2_t vmov_n_s32(int32_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vmov_n_s64(int64_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) {__p0}; - return __ret; -} -#else -__ai int64x1_t vmov_n_s64(int64_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) {__p0}; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmov_n_s16(int16_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} -#else -__ai int16x4_t vmov_n_s16(int16_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovl_s8(int8x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vmovl_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmovl_s32(int32x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vmovl_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovl_s16(int16x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vmovl_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmovn_s32(int32x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vmovn_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmovn_s64(int64x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vmovn_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmovn_s16(int16x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); - return __ret; -} -#else -__ai int8x8_t vmovn_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); - return __ret; -} -#else -__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x8_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x2_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { - uint32x4_t __ret; - __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; - return __ret; -} -#else -__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { - uint16x8_t __ret; - __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; - return __ret; -} -#else -__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { - float32x4_t __ret; - __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1}; - return __ret; -} -#else -__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { - int32x4_t __ret; - __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1}; - return __ret; -} -#else -__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { - int16x8_t __ret; - __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; - return __ret; -} -#else -__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { - uint32x2_t __ret; - __ret = __p0 * (uint32x2_t) {__p1, __p1}; - return __ret; -} -#else -__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = __rev0 * (uint32x2_t) {__p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { - uint16x4_t __ret; - __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; - return __ret; -} -#else -__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { - float32x2_t __ret; - __ret = __p0 * (float32x2_t) {__p1, __p1}; - return __ret; -} -#else -__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = __rev0 * (float32x2_t) {__p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { - int32x2_t __ret; - __ret = __p0 * (int32x2_t) {__p1, __p1}; - return __ret; -} -#else -__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = __rev0 * (int32x2_t) {__p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { - int16x4_t __ret; - __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1}; - return __ret; -} -#else -__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly16x8_t __ret; - __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); - return __ret; -} -#else -__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly16x8_t __ret; - __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint64x2_t __ret; \ - __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51); - return __ret; -} -#else -__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50); - return __ret; -} -#else -__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); - return __ret; -} -#else -__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); - return __ret; -} -#else -__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { - poly8x8_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { - poly8x16_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { - uint32x4_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { - uint16x8_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmvnq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai int8x16_t vmvnq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmvnq_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai int32x4_t vmvnq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmvnq_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai int16x8_t vmvnq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { - uint8x8_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { - uint32x2_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { - uint16x4_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmvn_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai int8x8_t vmvn_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmvn_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai int32x2_t vmvn_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmvn_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = ~__p0; - return __ret; -} -#else -__ai int16x4_t vmvn_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vnegq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai int8x16_t vnegq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vnegq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai float32x4_t vnegq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vnegq_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai int32x4_t vnegq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vnegq_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai int16x8_t vnegq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vneg_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai int8x8_t vneg_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vneg_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai float32x2_t vneg_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vneg_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai int32x2_t vneg_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vneg_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai int16x4_t vneg_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#else -__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpaddl_s8(int8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vpaddl_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vpaddl_s32(int32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3); - return __ret; -} -#else -__ai int64x1_t vpaddl_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpaddl_s16(int16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vpaddl_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqabsq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32); - return __ret; -} -#else -__ai int8x16_t vqabsq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqabsq_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vqabsq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqabsq_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vqabsq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqabs_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0); - return __ret; -} -#else -__ai int8x8_t vqabs_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqabs_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vqabs_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqabs_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vqabs_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#else -__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); - return __ret; -} -#else -__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); - return __ret; -} -#else -__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); - return __ret; -} -#else -__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); - return __ret; -} -#else -__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); - return __ret; -} -#else -__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); - return __ret; -} -#else -__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); - return __ret; -} -#else -__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); - return __ret; -} -#else -__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __ret; \ - __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34); - return __ret; -} -#else -__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33); - return __ret; -} -#else -__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2); - return __ret; -} -#else -__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1); - return __ret; -} -#else -__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); - return __ret; -} -#else -__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); - return __ret; -} -#else -__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqmovn_s32(int32x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vqmovn_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqmovn_s64(int64x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vqmovn_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqmovn_s16(int16x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); - return __ret; -} -#else -__ai int8x8_t vqmovn_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqnegq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32); - return __ret; -} -#else -__ai int8x16_t vqnegq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqnegq_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vqnegq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqnegq_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vqnegq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqneg_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0); - return __ret; -} -#else -__ai int8x8_t vqneg_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqneg_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vqneg_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqneg_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vqneg_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __ret; \ - __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34); - return __ret; -} -#else -__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33); - return __ret; -} -#else -__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2); - return __ret; -} -#else -__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1); - return __ret; -} -#else -__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#else -__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#else -#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) -#else -#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) -#else -#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#else -#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#else -__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \ - __ret; \ -}) -#else -#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) -#else -#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) -#else -#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \ - __ret; \ -}) -#else -#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ -}) -#else -#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ -}) -#else -#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) -#else -#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \ - __ret; \ -}) -#else -#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshl_n_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#else -#define vqshl_n_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \ - __ret; \ -}) -#else -#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \ - __ret; \ -}) -#else -#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshl_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#else -#define vqshl_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \ - __ret; \ -}) -#else -#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) -#else -#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) -#else -#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \ - __ret; \ -}) -#else -#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#else -#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#else -#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) -#else -#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) -#else -#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#else -#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#else -__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrecpe_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrecpe_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); - return __ret; -} -#else -__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); - return __ret; -} -#else -__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); - return __ret; -} -#else -__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrev16q_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); - return __ret; -} -#else -__ai int8x16_t vrev16q_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); - return __ret; -} -#else -__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrev16_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); - return __ret; -} -#else -__ai int8x8_t vrev16_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); - return __ret; -} -#else -__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); - return __ret; -} -#else -__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); - return __ret; -} -#else -__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); - return __ret; -} -#else -__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); - return __ret; -} -#else -__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); - return __ret; -} -#else -__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrev32q_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); - return __ret; -} -#else -__ai int8x16_t vrev32q_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrev32q_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); - return __ret; -} -#else -__ai int16x8_t vrev32q_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); - return __ret; -} -#else -__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); - return __ret; -} -#else -__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrev32_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); - return __ret; -} -#else -__ai int8x8_t vrev32_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrev32_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); - return __ret; -} -#else -__ai int16x4_t vrev32_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#else -__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - return __ret; -} -#else -__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); - return __ret; -} -#else -__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); - return __ret; -} -#else -__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); - return __ret; -} -#else -__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); - return __ret; -} -#else -__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); - return __ret; -} -#else -__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrev64q_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); - return __ret; -} -#else -__ai int8x16_t vrev64q_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrev64q_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); - return __ret; -} -#else -__ai float32x4_t vrev64q_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrev64q_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); - return __ret; -} -#else -__ai int32x4_t vrev64q_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrev64q_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); - return __ret; -} -#else -__ai int16x8_t vrev64q_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#else -__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0); - return __ret; -} -#else -__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - return __ret; -} -#else -__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrev64_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#else -__ai int8x8_t vrev64_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrev64_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0); - return __ret; -} -#else -__ai float32x2_t vrev64_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrev64_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0); - return __ret; -} -#else -__ai int32x2_t vrev64_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrev64_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - return __ret; -} -#else -__ai int16x4_t vrev64_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#else -__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \ - __ret; \ -}) -#else -#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) -#else -#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) -#else -#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \ - __ret; \ -}) -#else -#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ -}) -#else -#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ -}) -#else -#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) -#else -#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \ - __ret; \ -}) -#else -#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshr_n_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#else -#define vrshr_n_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \ - __ret; \ -}) -#else -#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \ - __ret; \ -}) -#else -#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshr_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#else -#define vrshr_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#else -#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) -#else -#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) -#else -#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ - __ret; \ -}) -#else -#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ - __ret; \ -}) -#else -#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ - __ret; \ -}) -#else -#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ - __ret; \ -}) -#else -#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ - __ret; \ -}) -#else -#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ -}) -#else -#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ - __ret; \ -}) -#else -#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ -}) -#else -#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ - __ret; \ -}) -#else -#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ - __ret; \ -}) -#else -#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#else -#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ - __ret; \ -}) -#else -#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ - __ret; \ -}) -#else -#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) -#else -#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#else -#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) -#else -#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vsetq_lane_f16(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vsetq_lane_f16(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#define __noswap_vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vset_lane_f16(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vset_lane_f16(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#define __noswap_vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#else -__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \ - __ret; \ -}) -#else -#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) -#else -#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) -#else -#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \ - __ret; \ -}) -#else -#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ -}) -#else -#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ -}) -#else -#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) -#else -#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshl_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \ - __ret; \ -}) -#else -#define vshl_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshl_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vshl_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshl_n_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#else -#define vshl_n_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshl_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vshl_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshl_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \ - __ret; \ -}) -#else -#define vshl_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshl_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \ - __ret; \ -}) -#else -#define vshl_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshl_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#else -#define vshl_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshl_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vshl_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vshll_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ - __ret; \ -}) -#else -#define vshll_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ - __ret; \ -}) -#else -#define vshll_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ - __ret; \ -}) -#else -#define vshll_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ - __ret; \ -}) -#else -#define vshll_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ - __ret; \ -}) -#else -#define vshll_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \ - __ret; \ -}) -#else -#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) -#else -#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) -#else -#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \ - __ret; \ -}) -#else -#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ -}) -#else -#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ -}) -#else -#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) -#else -#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshr_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \ - __ret; \ -}) -#else -#define vshr_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshr_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vshr_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshr_n_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#else -#define vshr_n_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshr_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vshr_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshr_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \ - __ret; \ -}) -#else -#define vshr_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshr_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \ - __ret; \ -}) -#else -#define vshr_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshr_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#else -#define vshr_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshr_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vshr_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#else -#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#else -#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) -#else -#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) -#else -#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ - __ret; \ -}) -#else -#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ - __ret; \ -}) -#else -#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ - __ret; \ -}) -#else -#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ - __ret; \ -}) -#else -#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ - __ret; \ -}) -#else -#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ - __ret; \ -}) -#else -#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ - __ret; \ -}) -#else -#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ - __ret; \ -}) -#else -#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ - __ret; \ -}) -#else -#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ -}) -#else -#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ - __ret; \ -}) -#else -#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ -}) -#else -#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ - __ret; \ -}) -#else -#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ - __ret; \ -}) -#else -#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#else -#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ - __ret; \ -}) -#else -#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ - __ret; \ -}) -#else -#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) -#else -#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#else -#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) -#else -#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ - __ret; \ -}) -#else -#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ - __ret; \ -}) -#else -#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ - __ret; \ -}) -#else -#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ - __ret; \ -}) -#else -#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ - __ret; \ -}) -#else -#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ -}) -#else -#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ - __ret; \ -}) -#else -#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ -}) -#else -#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ - __ret; \ -}) -#else -#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ - __ret; \ -}) -#else -#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#else -#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ - __ret; \ -}) -#else -#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ - __ret; \ -}) -#else -#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) -#else -#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#else -#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) -#else -#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ - __ret; \ -}) -#else -#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ - __ret; \ -}) -#else -#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ - __ret; \ -}) -#else -#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ - __ret; \ -}) -#else -#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ - __ret; \ -}) -#else -#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ - __ret; \ -}) -#else -#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ - __ret; \ -}) -#else -#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ - __ret; \ -}) -#else -#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ - __ret; \ -}) -#else -#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ -}) -#else -#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ - __ret; \ -}) -#else -#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ -}) -#else -#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ - __ret; \ -}) -#else -#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ - __ret; \ -}) -#else -#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#else -#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ - __ret; \ -}) -#else -#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ - __ret; \ -}) -#else -#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) -#else -#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#else -#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) -#else -#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \ -}) -#else -#define vst1_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \ -}) -#else -#define vst1_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \ -}) -#else -#define vst1q_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \ -}) -#else -#define vst1q_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \ -}) -#else -#define vst1q_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \ -}) -#else -#define vst1q_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \ -}) -#else -#define vst1q_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \ -}) -#else -#define vst1q_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \ -}) -#else -#define vst1q_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \ -}) -#else -#define vst1q_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \ -}) -#else -#define vst1q_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \ -}) -#else -#define vst1q_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \ -}) -#else -#define vst1q_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \ -}) -#else -#define vst1q_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \ -}) -#else -#define vst1_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \ -}) -#else -#define vst1_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \ -}) -#else -#define vst1_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \ -}) -#else -#define vst1_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \ -}) -#else -#define vst1_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \ -}) -#else -#define vst1_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \ -}) -#else -#define vst1_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \ -}) -#else -#define vst1_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \ -}) -#else -#define vst1_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \ -}) -#else -#define vst1_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ -}) -#else -#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ -}) -#else -#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ -}) -#else -#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ -}) -#else -#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ -}) -#else -#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ -}) -#else -#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ -}) -#else -#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ -}) -#else -#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ -}) -#else -#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ -}) -#else -#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \ -}) -#else -#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ -}) -#else -#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ -}) -#else -#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ -}) -#else -#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ -}) -#else -#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ -}) -#else -#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ -}) -#else -#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ -}) -#else -#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ -}) -#else -#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ -}) -#else -#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \ -}) -#else -#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ -}) -#else -#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ -}) -#else -#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ -}) -#else -#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_p8(__p0, __p1) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ -}) -#else -#define vst2_p8(__p0, __p1) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - poly8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_p16(__p0, __p1) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ -}) -#else -#define vst2_p16(__p0, __p1) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - poly16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ -}) -#else -#define vst2q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - poly8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ -}) -#else -#define vst2q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - poly16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ -}) -#else -#define vst2q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - uint8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ -}) -#else -#define vst2q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - uint32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ -}) -#else -#define vst2q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - uint16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_s8(__p0, __p1) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ -}) -#else -#define vst2q_s8(__p0, __p1) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - int8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_f32(__p0, __p1) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 41); \ -}) -#else -#define vst2q_f32(__p0, __p1) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - float32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_f16(__p0, __p1) __extension__ ({ \ - float16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 40); \ -}) -#else -#define vst2q_f16(__p0, __p1) __extension__ ({ \ - float16x8x2_t __s1 = __p1; \ - float16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_s32(__p0, __p1) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 34); \ -}) -#else -#define vst2q_s32(__p0, __p1) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - int32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_s16(__p0, __p1) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 33); \ -}) -#else -#define vst2q_s16(__p0, __p1) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - int16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_u8(__p0, __p1) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ -}) -#else -#define vst2_u8(__p0, __p1) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - uint8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_u32(__p0, __p1) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ -}) -#else -#define vst2_u32(__p0, __p1) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - uint32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_u64(__p0, __p1) __extension__ ({ \ - uint64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ -}) -#else -#define vst2_u64(__p0, __p1) __extension__ ({ \ - uint64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_u16(__p0, __p1) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ -}) -#else -#define vst2_u16(__p0, __p1) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - uint16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_s8(__p0, __p1) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ -}) -#else -#define vst2_s8(__p0, __p1) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - int8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_f32(__p0, __p1) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 9); \ -}) -#else -#define vst2_f32(__p0, __p1) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - float32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_f16(__p0, __p1) __extension__ ({ \ - float16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 8); \ -}) -#else -#define vst2_f16(__p0, __p1) __extension__ ({ \ - float16x4x2_t __s1 = __p1; \ - float16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_s32(__p0, __p1) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 2); \ -}) -#else -#define vst2_s32(__p0, __p1) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - int32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_s64(__p0, __p1) __extension__ ({ \ - int64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \ -}) -#else -#define vst2_s64(__p0, __p1) __extension__ ({ \ - int64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_s16(__p0, __p1) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 1); \ -}) -#else -#define vst2_s16(__p0, __p1) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - int16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ -}) -#else -#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - poly8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ -}) -#else -#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - poly16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ -}) -#else -#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - poly16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ -}) -#else -#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - uint32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ -}) -#else -#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - uint16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 41); \ -}) -#else -#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - float32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 40); \ -}) -#else -#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x2_t __s1 = __p1; \ - float16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 34); \ -}) -#else -#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - int32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 33); \ -}) -#else -#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - int16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ -}) -#else -#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - uint8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ -}) -#else -#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - uint32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ -}) -#else -#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - uint16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ -}) -#else -#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - int8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 9); \ -}) -#else -#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - float32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 8); \ -}) -#else -#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x2_t __s1 = __p1; \ - float16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 2); \ -}) -#else -#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - int32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 1); \ -}) -#else -#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - int16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_p8(__p0, __p1) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ -}) -#else -#define vst3_p8(__p0, __p1) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - poly8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_p16(__p0, __p1) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ -}) -#else -#define vst3_p16(__p0, __p1) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - poly16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ -}) -#else -#define vst3q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - poly8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ -}) -#else -#define vst3q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - poly16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ -}) -#else -#define vst3q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - uint8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ -}) -#else -#define vst3q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - uint32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ -}) -#else -#define vst3q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - uint16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_s8(__p0, __p1) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ -}) -#else -#define vst3q_s8(__p0, __p1) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - int8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_f32(__p0, __p1) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \ -}) -#else -#define vst3q_f32(__p0, __p1) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - float32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_f16(__p0, __p1) __extension__ ({ \ - float16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \ -}) -#else -#define vst3q_f16(__p0, __p1) __extension__ ({ \ - float16x8x3_t __s1 = __p1; \ - float16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_s32(__p0, __p1) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \ -}) -#else -#define vst3q_s32(__p0, __p1) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - int32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_s16(__p0, __p1) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \ -}) -#else -#define vst3q_s16(__p0, __p1) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - int16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_u8(__p0, __p1) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ -}) -#else -#define vst3_u8(__p0, __p1) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - uint8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_u32(__p0, __p1) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ -}) -#else -#define vst3_u32(__p0, __p1) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - uint32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_u64(__p0, __p1) __extension__ ({ \ - uint64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ -}) -#else -#define vst3_u64(__p0, __p1) __extension__ ({ \ - uint64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_u16(__p0, __p1) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ -}) -#else -#define vst3_u16(__p0, __p1) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - uint16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_s8(__p0, __p1) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ -}) -#else -#define vst3_s8(__p0, __p1) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - int8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_f32(__p0, __p1) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \ -}) -#else -#define vst3_f32(__p0, __p1) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - float32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_f16(__p0, __p1) __extension__ ({ \ - float16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \ -}) -#else -#define vst3_f16(__p0, __p1) __extension__ ({ \ - float16x4x3_t __s1 = __p1; \ - float16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_s32(__p0, __p1) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \ -}) -#else -#define vst3_s32(__p0, __p1) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - int32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_s64(__p0, __p1) __extension__ ({ \ - int64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \ -}) -#else -#define vst3_s64(__p0, __p1) __extension__ ({ \ - int64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_s16(__p0, __p1) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \ -}) -#else -#define vst3_s16(__p0, __p1) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - int16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ -}) -#else -#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - poly8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ -}) -#else -#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - poly16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ -}) -#else -#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - poly16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ -}) -#else -#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - uint32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ -}) -#else -#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - uint16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \ -}) -#else -#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - float32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \ -}) -#else -#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x3_t __s1 = __p1; \ - float16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \ -}) -#else -#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - int32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \ -}) -#else -#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - int16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ -}) -#else -#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - uint8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ -}) -#else -#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - uint32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ -}) -#else -#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - uint16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ -}) -#else -#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - int8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \ -}) -#else -#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - float32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \ -}) -#else -#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x3_t __s1 = __p1; \ - float16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \ -}) -#else -#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - int32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \ -}) -#else -#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - int16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_p8(__p0, __p1) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ -}) -#else -#define vst4_p8(__p0, __p1) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - poly8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_p16(__p0, __p1) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ -}) -#else -#define vst4_p16(__p0, __p1) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - poly16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ -}) -#else -#define vst4q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - poly8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ -}) -#else -#define vst4q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - poly16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ -}) -#else -#define vst4q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - uint8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ -}) -#else -#define vst4q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - uint32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ -}) -#else -#define vst4q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - uint16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_s8(__p0, __p1) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ -}) -#else -#define vst4q_s8(__p0, __p1) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - int8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_f32(__p0, __p1) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \ -}) -#else -#define vst4q_f32(__p0, __p1) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - float32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_f16(__p0, __p1) __extension__ ({ \ - float16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \ -}) -#else -#define vst4q_f16(__p0, __p1) __extension__ ({ \ - float16x8x4_t __s1 = __p1; \ - float16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_s32(__p0, __p1) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \ -}) -#else -#define vst4q_s32(__p0, __p1) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - int32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_s16(__p0, __p1) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \ -}) -#else -#define vst4q_s16(__p0, __p1) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - int16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_u8(__p0, __p1) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ -}) -#else -#define vst4_u8(__p0, __p1) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - uint8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_u32(__p0, __p1) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ -}) -#else -#define vst4_u32(__p0, __p1) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - uint32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_u64(__p0, __p1) __extension__ ({ \ - uint64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ -}) -#else -#define vst4_u64(__p0, __p1) __extension__ ({ \ - uint64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_u16(__p0, __p1) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ -}) -#else -#define vst4_u16(__p0, __p1) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - uint16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_s8(__p0, __p1) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ -}) -#else -#define vst4_s8(__p0, __p1) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - int8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_f32(__p0, __p1) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \ -}) -#else -#define vst4_f32(__p0, __p1) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - float32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_f16(__p0, __p1) __extension__ ({ \ - float16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \ -}) -#else -#define vst4_f16(__p0, __p1) __extension__ ({ \ - float16x4x4_t __s1 = __p1; \ - float16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_s32(__p0, __p1) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \ -}) -#else -#define vst4_s32(__p0, __p1) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - int32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_s64(__p0, __p1) __extension__ ({ \ - int64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \ -}) -#else -#define vst4_s64(__p0, __p1) __extension__ ({ \ - int64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_s16(__p0, __p1) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \ -}) -#else -#define vst4_s16(__p0, __p1) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - int16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ -}) -#else -#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - poly8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ -}) -#else -#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - poly16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ -}) -#else -#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - poly16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ -}) -#else -#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - uint32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ -}) -#else -#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - uint16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \ -}) -#else -#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - float32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \ -}) -#else -#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8x4_t __s1 = __p1; \ - float16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \ -}) -#else -#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - int32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \ -}) -#else -#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - int16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ -}) -#else -#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - uint8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ -}) -#else -#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - uint32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ -}) -#else -#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - uint16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ -}) -#else -#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - int8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \ -}) -#else -#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - float32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \ -}) -#else -#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4x4_t __s1 = __p1; \ - float16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \ -}) -#else -#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - int32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \ -}) -#else -#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - int16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - __ret = vmovl_u8(__p0) - vmovl_u8(__p1); - return __ret; -} -#else -__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = vmovl_u32(__p0) - vmovl_u32(__p1); - return __ret; -} -#else -__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = vmovl_u16(__p0) - vmovl_u16(__p1); - return __ret; -} -#else -__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = vmovl_s8(__p0) - vmovl_s8(__p1); - return __ret; -} -#else -__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = vmovl_s32(__p0) - vmovl_s32(__p1); - return __ret; -} -#else -__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = vmovl_s16(__p0) - vmovl_s16(__p1); - return __ret; -} -#else -__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 - vmovl_u8(__p1); - return __ret; -} -#else -__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 - __noswap_vmovl_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 - vmovl_u32(__p1); - return __ret; -} -#else -__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __rev0 - __noswap_vmovl_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 - vmovl_u16(__p1); - return __ret; -} -#else -__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 - __noswap_vmovl_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = __p0 - vmovl_s8(__p1); - return __ret; -} -#else -__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 - __noswap_vmovl_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = __p0 - vmovl_s32(__p1); - return __ret; -} -#else -__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __rev0 - __noswap_vmovl_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = __p0 - vmovl_s16(__p1); - return __ret; -} -#else -__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 - __noswap_vmovl_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { - poly8x8x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { - uint8x8x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { - int8x8x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { - poly8x8x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { - uint8x8x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { - int8x8x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { - poly8x8x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { - uint8x8x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { - int8x8x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); - return __ret; -} -#else -__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); - return __ret; -} -#else -__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); - return __ret; -} -#else -__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4); - return __ret; -} -#else -__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16); - return __ret; -} -#else -__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0); - return __ret; -} -#else -__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4); - return __ret; -} -#else -__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16); - return __ret; -} -#else -__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0); - return __ret; -} -#else -__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4); - return __ret; -} -#else -__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16); - return __ret; -} -#else -__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0); - return __ret; -} -#else -__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); - return __ret; -} -#else -__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); - return __ret; -} -#else -__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); - return __ret; -} -#else -__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); - return __ret; -} -#else -__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); - return __ret; -} -#else -__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); - return __ret; -} -#else -__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); - return __ret; -} -#else -__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); - return __ret; -} -#else -__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); - return __ret; -} -#else -__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#if !defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#endif -#if (__ARM_FP & 2) -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41); - return __ret; -} -#endif - -#endif -#if __ARM_ARCH >= 8 -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34); - return __ret; -} -#else -__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2); - return __ret; -} -#else -__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrnd_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrnd_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndaq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndaq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrnda_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrnda_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndmq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndmq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndm_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrndm_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndnq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndnq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndn_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrndn_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndpq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndpq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndp_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrndp_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndxq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndxq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndx_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrndx_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#else -__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#else -__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#else -__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#else -__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#else -__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#else -__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#else -__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#else -__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#else -__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#else -__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#else -__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#else -__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#else -__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#else -__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#else -__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#else -__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#else -__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#else -__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#else -__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#else -__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#else -__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#else -__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#else -__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#else -__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#else -__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#else -__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#else -__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#else -__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#else -__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#else -__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#else -__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vrnd_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); - return __ret; -} -#else -__ai float64x1_t vrnd_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndaq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndaq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vrnda_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); - return __ret; -} -#else -__ai float64x1_t vrnda_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndiq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndiq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndiq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vrndiq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vrndi_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); - return __ret; -} -#else -__ai float64x1_t vrndi_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndi_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vrndi_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndmq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndmq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vrndm_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); - return __ret; -} -#else -__ai float64x1_t vrndm_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndnq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndnq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vrndn_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); - return __ret; -} -#else -__ai float64x1_t vrndn_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndpq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndpq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vrndp_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); - return __ret; -} -#else -__ai float64x1_t vrndp_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndxq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrndxq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vrndx_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); - return __ret; -} -#else -__ai float64x1_t vrndx_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); - return __ret; -} -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#else -__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#else -__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#endif - -#endif -#if __ARM_FEATURE_CRYPTO -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2); - return __ret; -} -#else -__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vsha1h_u32(uint32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); - return __ret; -} -#else -__ai uint32_t vsha1h_u32(uint32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2); - return __ret; -} -#else -__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2); - return __ret; -} -#else -__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); - return __ret; -} -#else -__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_FMA) -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#else -__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#else -__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = vfmaq_f32(__p0, -__p1, __p2); - return __ret; -} -#else -__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = vfma_f32(__p0, -__p1, __p2); - return __ret; -} -#else -__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - float32x2_t __ret; - __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vabdh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vabdh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vabdh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vabdh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vabsh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vabsh_f16(__p0); - return __ret; -} -#else -__ai float16_t vabsh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vabsh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vaddh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vaddh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vaddh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vaddh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcageh_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcageh_f16(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcageh_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcageh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcagth_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcagth_f16(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcagth_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcagth_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcaleh_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcaleh_f16(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcaleh_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcaleh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcalth_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcalth_f16(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcalth_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcalth_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vceqh_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vceqh_f16(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vceqh_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vceqh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vceqzh_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vceqzh_f16(__p0); - return __ret; -} -#else -__ai uint32_t vceqzh_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vceqzh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcgeh_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgeh_f16(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcgeh_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgeh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcgezh_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgezh_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcgezh_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgezh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcgth_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgth_f16(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcgth_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgth_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcgtzh_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgtzh_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcgtzh_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgtzh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcleh_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcleh_f16(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcleh_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcleh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vclezh_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vclezh_f16(__p0); - return __ret; -} -#else -__ai uint32_t vclezh_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vclezh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vclth_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vclth_f16(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vclth_f16(float16_t __p0, float16_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vclth_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcltzh_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcltzh_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcltzh_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcltzh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvth_s16_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvth_s16_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvth_s16_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvth_s16_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvth_s32_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvth_s32_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvth_s64_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvth_s64_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvth_s64_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvth_s64_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvth_u16_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvth_u16_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvth_u16_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvth_u16_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvth_u32_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvth_u32_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvth_u64_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvth_u64_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvth_u64_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvth_u64_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtah_s16_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtah_s16_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtah_s16_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtah_s16_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtah_s32_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtah_s32_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtah_s64_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtah_s64_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtah_s64_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtah_s64_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtah_u16_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtah_u16_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtah_u16_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtah_u16_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtah_u32_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtah_u32_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtah_u64_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtah_u64_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtah_u64_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtah_u64_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vcvth_f16_s32(int32_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__p0); - return __ret; -} -#else -__ai float16_t vcvth_f16_s32(int32_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vcvth_f16_s64(int64_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__p0); - return __ret; -} -#else -__ai float16_t vcvth_f16_s64(int64_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vcvth_f16_s16(int16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__p0); - return __ret; -} -#else -__ai float16_t vcvth_f16_s16(int16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vcvth_f16_u32(uint32_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__p0); - return __ret; -} -#else -__ai float16_t vcvth_f16_u32(uint32_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vcvth_f16_u64(uint64_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__p0); - return __ret; -} -#else -__ai float16_t vcvth_f16_u64(uint64_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vcvth_f16_u16(uint16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__p0); - return __ret; -} -#else -__ai float16_t vcvth_f16_u16(uint16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtmh_s16_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtmh_s16_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtmh_s16_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtmh_s16_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtmh_s32_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtmh_s32_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtmh_s64_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtmh_s64_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtmh_s64_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtmh_s64_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtmh_u16_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtmh_u16_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtmh_u16_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtmh_u16_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtmh_u32_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtmh_u32_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtmh_u64_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtmh_u64_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtmh_u64_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtmh_u64_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtnh_s16_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtnh_s16_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtnh_s16_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtnh_s16_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtnh_s32_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtnh_s32_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtnh_s64_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtnh_s64_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtnh_s64_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtnh_s64_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtnh_u16_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtnh_u16_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtnh_u16_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtnh_u16_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtnh_u32_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtnh_u32_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtnh_u64_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtnh_u64_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtnh_u64_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtnh_u64_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtph_s16_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtph_s16_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtph_s16_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtph_s16_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtph_s32_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtph_s32_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtph_s64_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtph_s64_f16(__p0); - return __ret; -} -#else -__ai int32_t vcvtph_s64_f16(float16_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtph_s64_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtph_u16_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtph_u16_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtph_u16_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtph_u16_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtph_u32_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtph_u32_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtph_u64_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtph_u64_f16(__p0); - return __ret; -} -#else -__ai uint32_t vcvtph_u64_f16(float16_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtph_u64_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vfmah_f16(float16_t __p0, float16_t __p1, float16_t __p2) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vfmah_f16(__p0, __p1, __p2); - return __ret; -} -#else -__ai float16_t vfmah_f16(float16_t __p0, float16_t __p1, float16_t __p2) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vfmah_f16(__p0, __p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__rev2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__rev2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vmaxh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vmaxh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vmaxnmh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vmaxnmh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vminh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vminh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vminnmh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminnmh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vminnmh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminnmh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vmulh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmulh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vmulh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmulh_f16(__p0, __p1); - return __ret; -} -__ai float16_t __noswap_vmulh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmulh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulh_lane_f16(__p0_0, __p1_0, __p2_0) __extension__ ({ \ - float16_t __s0_0 = __p0_0; \ - float16x4_t __s1_0 = __p1_0; \ - float16_t __ret_0; \ - __ret_0 = vmulh_f16(__s0_0, vget_lane_f16(__s1_0, __p2_0)); \ - __ret_0; \ -}) -#else -#define vmulh_lane_f16(__p0_1, __p1_1, __p2_1) __extension__ ({ \ - float16_t __s0_1 = __p0_1; \ - float16x4_t __s1_1 = __p1_1; \ - float16x4_t __rev1_1; __rev1_1 = __builtin_shufflevector(__s1_1, __s1_1, 3, 2, 1, 0); \ - float16_t __ret_1; \ - __ret_1 = __noswap_vmulh_f16(__s0_1, __noswap_vget_lane_f16(__rev1_1, __p2_1)); \ - __ret_1; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulh_laneq_f16(__p0_2, __p1_2, __p2_2) __extension__ ({ \ - float16_t __s0_2 = __p0_2; \ - float16x8_t __s1_2 = __p1_2; \ - float16_t __ret_2; \ - __ret_2 = vmulh_f16(__s0_2, vgetq_lane_f16(__s1_2, __p2_2)); \ - __ret_2; \ -}) -#else -#define vmulh_laneq_f16(__p0_3, __p1_3, __p2_3) __extension__ ({ \ - float16_t __s0_3 = __p0_3; \ - float16x8_t __s1_3 = __p1_3; \ - float16x8_t __rev1_3; __rev1_3 = __builtin_shufflevector(__s1_3, __s1_3, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16_t __ret_3; \ - __ret_3 = __noswap_vmulh_f16(__s0_3, __noswap_vgetq_lane_f16(__rev1_3, __p2_3)); \ - __ret_3; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vmulxh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmulxh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vmulxh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmulxh_f16(__p0, __p1); - return __ret; -} -__ai float16_t __noswap_vmulxh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmulxh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxh_lane_f16(__p0_4, __p1_4, __p2_4) __extension__ ({ \ - float16_t __s0_4 = __p0_4; \ - float16x4_t __s1_4 = __p1_4; \ - float16_t __ret_4; \ - __ret_4 = vmulxh_f16(__s0_4, vget_lane_f16(__s1_4, __p2_4)); \ - __ret_4; \ -}) -#else -#define vmulxh_lane_f16(__p0_5, __p1_5, __p2_5) __extension__ ({ \ - float16_t __s0_5 = __p0_5; \ - float16x4_t __s1_5 = __p1_5; \ - float16x4_t __rev1_5; __rev1_5 = __builtin_shufflevector(__s1_5, __s1_5, 3, 2, 1, 0); \ - float16_t __ret_5; \ - __ret_5 = __noswap_vmulxh_f16(__s0_5, __noswap_vget_lane_f16(__rev1_5, __p2_5)); \ - __ret_5; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxh_laneq_f16(__p0_6, __p1_6, __p2_6) __extension__ ({ \ - float16_t __s0_6 = __p0_6; \ - float16x8_t __s1_6 = __p1_6; \ - float16_t __ret_6; \ - __ret_6 = vmulxh_f16(__s0_6, vgetq_lane_f16(__s1_6, __p2_6)); \ - __ret_6; \ -}) -#else -#define vmulxh_laneq_f16(__p0_7, __p1_7, __p2_7) __extension__ ({ \ - float16_t __s0_7 = __p0_7; \ - float16x8_t __s1_7 = __p1_7; \ - float16x8_t __rev1_7; __rev1_7 = __builtin_shufflevector(__s1_7, __s1_7, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16_t __ret_7; \ - __ret_7 = __noswap_vmulxh_f16(__s0_7, __noswap_vgetq_lane_f16(__rev1_7, __p2_7)); \ - __ret_7; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vnegh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vnegh_f16(__p0); - return __ret; -} -#else -__ai float16_t vnegh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vnegh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrecpeh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrecpeh_f16(__p0); - return __ret; -} -#else -__ai float16_t vrecpeh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrecpeh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrecpsh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrecpsh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vrecpsh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrecpsh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrecpxh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrecpxh_f16(__p0); - return __ret; -} -#else -__ai float16_t vrecpxh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrecpxh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrndh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndh_f16(__p0); - return __ret; -} -#else -__ai float16_t vrndh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrndah_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndah_f16(__p0); - return __ret; -} -#else -__ai float16_t vrndah_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndah_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrndih_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndih_f16(__p0); - return __ret; -} -#else -__ai float16_t vrndih_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndih_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrndmh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndmh_f16(__p0); - return __ret; -} -#else -__ai float16_t vrndmh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndmh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrndnh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndnh_f16(__p0); - return __ret; -} -#else -__ai float16_t vrndnh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndnh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrndph_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndph_f16(__p0); - return __ret; -} -#else -__ai float16_t vrndph_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndph_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrndxh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndxh_f16(__p0); - return __ret; -} -#else -__ai float16_t vrndxh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrndxh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrsqrteh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__p0); - return __ret; -} -#else -__ai float16_t vrsqrteh_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vrsqrtsh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vrsqrtsh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vsubh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vsubh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vsubh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vsubh_f16(__p0, __p1); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s32_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s32_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s32_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s32_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s32_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_u32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s32_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_u32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s32_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s32_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s32_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_u16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s32_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvth_n_s32_u16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s64_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s64_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s64_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s64_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s64_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_u32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s64_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_u32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s64_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s64_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s64_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_u16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s64_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvth_n_s64_u16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s16_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s16_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s16_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s16_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s16_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_u32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s16_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_u32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s16_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s16_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_s16_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_u16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_s16_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vcvth_n_s16_u16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u32_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u32_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u32_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u32_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u32_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u32_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u32_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u32_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u32_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_u16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u32_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvth_n_u32_u16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u64_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u64_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u64_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u64_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u64_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u64_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u64_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_u32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u64_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_u32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u64_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_u16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u64_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvth_n_u64_u16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \ - float16_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u16_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u16_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u16_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u16_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u16_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u16_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u16_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_u32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u16_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_u32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvth_n_u16_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvth_n_u16_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vcvth_n_u16_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vdivh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vdivh_f16(__p0, __p1); - return __ret; -} -#else -__ai float16_t vdivh_f16(float16_t __p0, float16_t __p1) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vdivh_f16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vfmsh_f16(float16_t __p0, float16_t __p1, float16_t __p2) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vfmsh_f16(__p0, __p1, __p2); - return __ret; -} -#else -__ai float16_t vfmsh_f16(float16_t __p0, float16_t __p1, float16_t __p2) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vfmsh_f16(__p0, __p1, __p2); - return __ret; -} -__ai float16_t __noswap_vfmsh_f16(float16_t __p0, float16_t __p1, float16_t __p2) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vfmsh_f16(__p0, __p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vsqrth_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vsqrth_f16(__p0); - return __ret; -} -#else -__ai float16_t vsqrth_f16(float16_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vsqrth_f16(__p0); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vabsq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vabsq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vabs_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vabs_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#else -__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#else -__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) -#else -#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) -#else -#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) -#else -#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) -#else -#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) -#else -#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16_t __ret; \ - __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \ - __ret; \ -}) -#else -#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \ - __ret; \ -}) -#else -#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#else -__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#else -__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vmulq_n_f16(float16x8_t __p0, float16_t __p1) { - float16x8_t __ret; - __ret = __p0 * (float16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; - return __ret; -} -#else -__ai float16x8_t vmulq_n_f16(float16x8_t __p0, float16_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __rev0 * (float16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmul_n_f16(float16x4_t __p0, float16_t __p1) { - float16x4_t __ret; - __ret = __p0 * (float16x4_t) {__p1, __p1, __p1, __p1}; - return __ret; -} -#else -__ai float16x4_t vmul_n_f16(float16x4_t __p0, float16_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __rev0 * (float16x4_t) {__p1, __p1, __p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vnegq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai float16x8_t vnegq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vneg_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai float16x4_t vneg_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrecpeq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrecpeq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrecpe_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrecpe_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrev64q_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); - return __ret; -} -#else -__ai float16x8_t vrev64q_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrev64_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - return __ret; -} -#else -__ai float16x4_t vrev64_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrndq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrnd_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrnd_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndaq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrndaq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrnda_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrnda_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndiq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrndiq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrndi_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrndi_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndmq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrndmq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrndm_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrndm_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndnq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrndnq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrndn_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrndn_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndpq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrndpq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrndp_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrndp_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrndxq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrndxq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrndx_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrndx_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrsqrte_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vrsqrte_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} -#else -__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} -#else -__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} -#else -__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} -#else -__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} -#else -__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} -#else -__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); - return __ret; -} -#else -__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); - return __ret; -} -#else -__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); - return __ret; -} -#else -__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); - return __ret; -} -#else -__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); - return __ret; -} -#else -__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); - return __ret; -} -#else -__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vceqzq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceqz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vceqz_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcgezq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgez_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcgez_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgtz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcgtz_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vclezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vclezq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclez_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vclez_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcltzq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcltz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcltz_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 / __p1; - return __ret; -} -#else -__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __rev0 / __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 / __p1; - return __ret; -} -#else -__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __rev0 / __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ - __ret; \ -}) -#else -#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ - __ret; \ -}) -#else -#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ - __ret; \ -}) -#else -#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x8_t __ret; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ - __ret; \ -}) -#else -#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x4_t __ret; \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vfmaq_n_f16(float16x8_t __p0, float16x8_t __p1, float16_t __p2) { - float16x8_t __ret; - __ret = vfmaq_f16(__p0, __p1, (float16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}); - return __ret; -} -#else -__ai float16x8_t vfmaq_n_f16(float16x8_t __p0, float16x8_t __p1, float16_t __p2) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vfma_n_f16(float16x4_t __p0, float16x4_t __p1, float16_t __p2) { - float16x4_t __ret; - __ret = vfma_f16(__p0, __p1, (float16x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#else -__ai float16x4_t vfma_n_f16(float16x4_t __p0, float16x4_t __p1, float16_t __p2) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__p2, __p2, __p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = vfmaq_f16(__p0, -__p1, __p2); - return __ret; -} -#else -__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = vfma_f16(__p0, -__p1, __p2); - return __ret; -} -#else -__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f16(__p0_8, __p1_8, __p2_8, __p3_8) __extension__ ({ \ - float16x8_t __s0_8 = __p0_8; \ - float16x8_t __s1_8 = __p1_8; \ - float16x4_t __s2_8 = __p2_8; \ - float16x8_t __ret_8; \ - __ret_8 = vfmaq_lane_f16(__s0_8, -__s1_8, __s2_8, __p3_8); \ - __ret_8; \ -}) -#else -#define vfmsq_lane_f16(__p0_9, __p1_9, __p2_9, __p3_9) __extension__ ({ \ - float16x8_t __s0_9 = __p0_9; \ - float16x8_t __s1_9 = __p1_9; \ - float16x4_t __s2_9 = __p2_9; \ - float16x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_9; __rev1_9 = __builtin_shufflevector(__s1_9, __s1_9, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_9; __rev2_9 = __builtin_shufflevector(__s2_9, __s2_9, 3, 2, 1, 0); \ - float16x8_t __ret_9; \ - __ret_9 = __noswap_vfmaq_lane_f16(__rev0_9, -__rev1_9, __rev2_9, __p3_9); \ - __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_9; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_lane_f16(__p0_10, __p1_10, __p2_10, __p3_10) __extension__ ({ \ - float16x4_t __s0_10 = __p0_10; \ - float16x4_t __s1_10 = __p1_10; \ - float16x4_t __s2_10 = __p2_10; \ - float16x4_t __ret_10; \ - __ret_10 = vfma_lane_f16(__s0_10, -__s1_10, __s2_10, __p3_10); \ - __ret_10; \ -}) -#else -#define vfms_lane_f16(__p0_11, __p1_11, __p2_11, __p3_11) __extension__ ({ \ - float16x4_t __s0_11 = __p0_11; \ - float16x4_t __s1_11 = __p1_11; \ - float16x4_t __s2_11 = __p2_11; \ - float16x4_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 3, 2, 1, 0); \ - float16x4_t __rev1_11; __rev1_11 = __builtin_shufflevector(__s1_11, __s1_11, 3, 2, 1, 0); \ - float16x4_t __rev2_11; __rev2_11 = __builtin_shufflevector(__s2_11, __s2_11, 3, 2, 1, 0); \ - float16x4_t __ret_11; \ - __ret_11 = __noswap_vfma_lane_f16(__rev0_11, -__rev1_11, __rev2_11, __p3_11); \ - __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \ - __ret_11; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f16(__p0_12, __p1_12, __p2_12, __p3_12) __extension__ ({ \ - float16x8_t __s0_12 = __p0_12; \ - float16x8_t __s1_12 = __p1_12; \ - float16x8_t __s2_12 = __p2_12; \ - float16x8_t __ret_12; \ - __ret_12 = vfmaq_laneq_f16(__s0_12, -__s1_12, __s2_12, __p3_12); \ - __ret_12; \ -}) -#else -#define vfmsq_laneq_f16(__p0_13, __p1_13, __p2_13, __p3_13) __extension__ ({ \ - float16x8_t __s0_13 = __p0_13; \ - float16x8_t __s1_13 = __p1_13; \ - float16x8_t __s2_13 = __p2_13; \ - float16x8_t __rev0_13; __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_13; __rev1_13 = __builtin_shufflevector(__s1_13, __s1_13, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_13; __rev2_13 = __builtin_shufflevector(__s2_13, __s2_13, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __ret_13; \ - __ret_13 = __noswap_vfmaq_laneq_f16(__rev0_13, -__rev1_13, __rev2_13, __p3_13); \ - __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_13; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f16(__p0_14, __p1_14, __p2_14, __p3_14) __extension__ ({ \ - float16x4_t __s0_14 = __p0_14; \ - float16x4_t __s1_14 = __p1_14; \ - float16x8_t __s2_14 = __p2_14; \ - float16x4_t __ret_14; \ - __ret_14 = vfma_laneq_f16(__s0_14, -__s1_14, __s2_14, __p3_14); \ - __ret_14; \ -}) -#else -#define vfms_laneq_f16(__p0_15, __p1_15, __p2_15, __p3_15) __extension__ ({ \ - float16x4_t __s0_15 = __p0_15; \ - float16x4_t __s1_15 = __p1_15; \ - float16x8_t __s2_15 = __p2_15; \ - float16x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \ - float16x4_t __rev1_15; __rev1_15 = __builtin_shufflevector(__s1_15, __s1_15, 3, 2, 1, 0); \ - float16x8_t __rev2_15; __rev2_15 = __builtin_shufflevector(__s2_15, __s2_15, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __ret_15; \ - __ret_15 = __noswap_vfma_laneq_f16(__rev0_15, -__rev1_15, __rev2_15, __p3_15); \ - __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 3, 2, 1, 0); \ - __ret_15; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vfmsq_n_f16(float16x8_t __p0, float16x8_t __p1, float16_t __p2) { - float16x8_t __ret; - __ret = vfmaq_f16(__p0, -__p1, (float16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}); - return __ret; -} -#else -__ai float16x8_t vfmsq_n_f16(float16x8_t __p0, float16x8_t __p1, float16_t __p2) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vfms_n_f16(float16x4_t __p0, float16x4_t __p1, float16_t __p2) { - float16x4_t __ret; - __ret = vfma_f16(__p0, -__p1, (float16x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#else -__ai float16x4_t vfms_n_f16(float16x4_t __p0, float16x4_t __p1, float16_t __p2) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__p2, __p2, __p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vmaxnmvq_f16(float16x8_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__p0); - return __ret; -} -#else -__ai float16_t vmaxnmvq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vmaxnmv_f16(float16x4_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__p0); - return __ret; -} -#else -__ai float16_t vmaxnmv_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vmaxvq_f16(float16x8_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__p0); - return __ret; -} -#else -__ai float16_t vmaxvq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vmaxv_f16(float16x4_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__p0); - return __ret; -} -#else -__ai float16_t vmaxv_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16_t __ret; - __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vminnmvq_f16(float16x8_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__p0); - return __ret; -} -#else -__ai float16_t vminnmvq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vminnmv_f16(float16x4_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__p0); - return __ret; -} -#else -__ai float16_t vminnmv_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vminvq_f16(float16x8_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__p0); - return __ret; -} -#else -__ai float16_t vminvq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16_t vminv_f16(float16x4_t __p0) { - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__p0); - return __ret; -} -#else -__ai float16_t vminv_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16_t __ret; - __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __ret; \ - __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __ret; \ - __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __ret; \ - __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __ret; \ - __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vmulxq_n_f16(float16x8_t __p0, float16_t __p1) { - float16x8_t __ret; - __ret = vmulxq_f16(__p0, (float16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); - return __ret; -} -#else -__ai float16x8_t vmulxq_n_f16(float16x8_t __p0, float16_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vmulx_n_f16(float16x4_t __p0, float16_t __p1) { - float16x4_t __ret; - __ret = vmulx_f16(__p0, (float16x4_t) {__p1, __p1, __p1, __p1}); - return __ret; -} -#else -__ai float16x4_t vmulx_n_f16(float16x4_t __p0, float16_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__p1, __p1, __p1, __p1}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vsqrtq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai float16x8_t vsqrtq_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vsqrt_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float16x4_t vsqrt_f16(float16x4_t __p0) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#if defined(__ARM_FEATURE_QRDMX) -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2)); - return __ret; -} -#else -__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2)); - return __ret; -} -#else -__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2)); - return __ret; -} -#else -__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int32x2_t __ret; - __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2)); - return __ret; -} -#else -__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x8_t __ret; \ - __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __ret; \ - __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int32x2_t __ret; \ - __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __ret; \ - __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2)); - return __ret; -} -#else -__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2)); - return __ret; -} -#else -__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2)); - return __ret; -} -#else -__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int32x2_t __ret; - __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2)); - return __ret; -} -#else -__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x8_t __ret; \ - __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __ret; \ - __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int32x2_t __ret; \ - __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __ret; \ - __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#endif -#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __ret; \ - __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x2_t __ret; \ - __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x2_t __ret; \ - __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x4_t __ret; \ - __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __ret; \ - __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x2_t __ret; \ - __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x2_t __ret; \ - __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x4_t __ret; \ - __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \ - __ret; \ -}) -#else -#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#endif -#if defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#else -__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1); - return __ret; -} -#else -__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1); - return __ret; -} -#else -__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vabsq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vabsq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabsq_s64(int64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vabsq_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vabs_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10); - return __ret; -} -#else -__ai float64x1_t vabs_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vabs_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3); - return __ret; -} -#else -__ai int64x1_t vabs_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vabsd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vabsd_s64(__p0); - return __ret; -} -#else -__ai int64_t vabsd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vabsd_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#else -__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = __p0 + __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x8_t __ret; - __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2)); - return __ret; -} -#else -__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2)); - return __ret; -} -#else -__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2)); - return __ret; -} -#else -__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2)); - return __ret; -} -#else -__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2)); - return __ret; -} -#else -__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2)); - return __ret; -} -#else -__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__p0); - return __ret; -} -#else -__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__p0); - return __ret; -} -#else -__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__p0); - return __ret; -} -#else -__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddlvq_s8(int8x16_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__p0); - return __ret; -} -#else -__ai int16_t vaddlvq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vaddlvq_s32(int32x4_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__p0); - return __ret; -} -#else -__ai int64_t vaddlvq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddlvq_s16(int16x8_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__p0); - return __ret; -} -#else -__ai int32_t vaddlvq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddlv_u8(uint8x8_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__p0); - return __ret; -} -#else -__ai uint16_t vaddlv_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vaddlv_u32(uint32x2_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__p0); - return __ret; -} -#else -__ai uint64_t vaddlv_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddlv_u16(uint16x4_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__p0); - return __ret; -} -#else -__ai uint32_t vaddlv_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddlv_s8(int8x8_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__p0); - return __ret; -} -#else -__ai int16_t vaddlv_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vaddlv_s32(int32x2_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__p0); - return __ret; -} -#else -__ai int64_t vaddlv_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddlv_s16(int16x4_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__p0); - return __ret; -} -#else -__ai int32_t vaddlv_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vaddvq_u8(uint8x16_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__p0); - return __ret; -} -#else -__ai uint8_t vaddvq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddvq_u32(uint32x4_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__p0); - return __ret; -} -#else -__ai uint32_t vaddvq_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vaddvq_u64(uint64x2_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__p0); - return __ret; -} -#else -__ai uint64_t vaddvq_u64(uint64x2_t __p0) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddvq_u16(uint16x8_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__p0); - return __ret; -} -#else -__ai uint16_t vaddvq_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vaddvq_s8(int8x16_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__p0); - return __ret; -} -#else -__ai int8_t vaddvq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8_t __ret; - __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vaddvq_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__p0); - return __ret; -} -#else -__ai float64_t vaddvq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64_t __ret; - __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vaddvq_f32(float32x4_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__p0); - return __ret; -} -#else -__ai float32_t vaddvq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddvq_s32(int32x4_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__p0); - return __ret; -} -#else -__ai int32_t vaddvq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vaddvq_s64(int64x2_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__p0); - return __ret; -} -#else -__ai int64_t vaddvq_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddvq_s16(int16x8_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__p0); - return __ret; -} -#else -__ai int16_t vaddvq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vaddv_u8(uint8x8_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__p0); - return __ret; -} -#else -__ai uint8_t vaddv_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddv_u32(uint32x2_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__p0); - return __ret; -} -#else -__ai uint32_t vaddv_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddv_u16(uint16x4_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__p0); - return __ret; -} -#else -__ai uint16_t vaddv_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vaddv_s8(int8x8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__p0); - return __ret; -} -#else -__ai int8_t vaddv_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8_t __ret; - __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vaddv_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__p0); - return __ret; -} -#else -__ai float32_t vaddv_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddv_s32(int32x2_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__p0); - return __ret; -} -#else -__ai int32_t vaddv_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddv_s16(int16x4_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__p0); - return __ret; -} -#else -__ai int16_t vaddv_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { - poly64x1_t __ret; - __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6); - return __ret; -} -#else -__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { - poly64x1_t __ret; - __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { - poly64x2_t __ret; - __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38); - return __ret; -} -#else -__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - poly64x2_t __ret; - __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); - return __ret; -} -#else -__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); - return __ret; -} -#else -__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vceqz_p64(poly64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vceqz_p64(poly64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceqz_p16(poly16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vceqz_p16(poly16x4_t __p0) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vceqz_u64(uint64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vceqz_u64(uint64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceqz_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vceqz_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vceqz_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vceqz_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceqz_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vceqz_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceqz_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vceqz_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vceqz_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vceqz_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceqz_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vceqz_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vceqzd_u64(uint64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0); - return __ret; -} -#else -__ai uint64_t vceqzd_u64(uint64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vceqzd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0); - return __ret; -} -#else -__ai int64_t vceqzd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vceqzd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0); - return __ret; -} -#else -__ai uint64_t vceqzd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vceqzs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0); - return __ret; -} -#else -__ai uint32_t vceqzs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 >= __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 >= __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 >= __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcged_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vcged_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgez_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vcgez_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcgez_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vcgez_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgez_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcgez_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgez_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcgez_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcgez_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vcgez_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgez_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcgez_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcgezd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0); - return __ret; -} -#else -__ai int64_t vcgezd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcgezd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0); - return __ret; -} -#else -__ai uint64_t vcgezd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcgezs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0); - return __ret; -} -#else -__ai uint32_t vcgezs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 > __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 > __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 > __p1); - return __ret; -} -#else -__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 > __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcgtz_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vcgtz_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcgtz_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vcgtz_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcgtzd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0); - return __ret; -} -#else -__ai int64_t vcgtzd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcgtzd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0); - return __ret; -} -#else -__ai uint64_t vcgtzd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcgtzs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0); - return __ret; -} -#else -__ai uint32_t vcgtzs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 <= __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 <= __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 <= __p1); - return __ret; -} -#else -__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 <= __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcled_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vcled_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vclezq_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vclezq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vclezq_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vclezq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vclezq_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vclezq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vclezq_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vclezq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vclezq_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vclezq_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vclezq_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vclezq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclez_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vclez_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vclez_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vclez_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclez_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vclez_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclez_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vclez_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vclez_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vclez_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclez_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vclez_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vclezd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vclezd_s64(__p0); - return __ret; -} -#else -__ai int64_t vclezd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vclezd_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vclezd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0); - return __ret; -} -#else -__ai uint64_t vclezd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vclezs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0); - return __ret; -} -#else -__ai uint32_t vclezs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 < __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 < __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 < __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); - return __ret; -} -#else -__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcltz_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vcltz_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcltz_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vcltz_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcltz_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcltz_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcltz_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vcltz_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcltz_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vcltz_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcltz_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai uint16x4_t vcltz_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcltzd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0); - return __ret; -} -#else -__ai int64_t vcltzd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcltzd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0); - return __ret; -} -#else -__ai uint64_t vcltzd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcltzs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0); - return __ret; -} -#else -__ai uint32_t vcltzs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); - return __ret; -} -#else -__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); - return __ret; -} -#else -__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p8(__p0_16, __p1_16, __p2_16, __p3_16) __extension__ ({ \ - poly8x16_t __s0_16 = __p0_16; \ - poly8x8_t __s2_16 = __p2_16; \ - poly8x16_t __ret_16; \ - __ret_16 = vsetq_lane_p8(vget_lane_p8(__s2_16, __p3_16), __s0_16, __p1_16); \ - __ret_16; \ -}) -#else -#define vcopyq_lane_p8(__p0_17, __p1_17, __p2_17, __p3_17) __extension__ ({ \ - poly8x16_t __s0_17 = __p0_17; \ - poly8x8_t __s2_17 = __p2_17; \ - poly8x16_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev2_17; __rev2_17 = __builtin_shufflevector(__s2_17, __s2_17, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __ret_17; \ - __ret_17 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_17, __p3_17), __rev0_17, __p1_17); \ - __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_17; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p16(__p0_18, __p1_18, __p2_18, __p3_18) __extension__ ({ \ - poly16x8_t __s0_18 = __p0_18; \ - poly16x4_t __s2_18 = __p2_18; \ - poly16x8_t __ret_18; \ - __ret_18 = vsetq_lane_p16(vget_lane_p16(__s2_18, __p3_18), __s0_18, __p1_18); \ - __ret_18; \ -}) -#else -#define vcopyq_lane_p16(__p0_19, __p1_19, __p2_19, __p3_19) __extension__ ({ \ - poly16x8_t __s0_19 = __p0_19; \ - poly16x4_t __s2_19 = __p2_19; \ - poly16x8_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x4_t __rev2_19; __rev2_19 = __builtin_shufflevector(__s2_19, __s2_19, 3, 2, 1, 0); \ - poly16x8_t __ret_19; \ - __ret_19 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_19, __p3_19), __rev0_19, __p1_19); \ - __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_19; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u8(__p0_20, __p1_20, __p2_20, __p3_20) __extension__ ({ \ - uint8x16_t __s0_20 = __p0_20; \ - uint8x8_t __s2_20 = __p2_20; \ - uint8x16_t __ret_20; \ - __ret_20 = vsetq_lane_u8(vget_lane_u8(__s2_20, __p3_20), __s0_20, __p1_20); \ - __ret_20; \ -}) -#else -#define vcopyq_lane_u8(__p0_21, __p1_21, __p2_21, __p3_21) __extension__ ({ \ - uint8x16_t __s0_21 = __p0_21; \ - uint8x8_t __s2_21 = __p2_21; \ - uint8x16_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_21; __rev2_21 = __builtin_shufflevector(__s2_21, __s2_21, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret_21; \ - __ret_21 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_21, __p3_21), __rev0_21, __p1_21); \ - __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_21; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u32(__p0_22, __p1_22, __p2_22, __p3_22) __extension__ ({ \ - uint32x4_t __s0_22 = __p0_22; \ - uint32x2_t __s2_22 = __p2_22; \ - uint32x4_t __ret_22; \ - __ret_22 = vsetq_lane_u32(vget_lane_u32(__s2_22, __p3_22), __s0_22, __p1_22); \ - __ret_22; \ -}) -#else -#define vcopyq_lane_u32(__p0_23, __p1_23, __p2_23, __p3_23) __extension__ ({ \ - uint32x4_t __s0_23 = __p0_23; \ - uint32x2_t __s2_23 = __p2_23; \ - uint32x4_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 3, 2, 1, 0); \ - uint32x2_t __rev2_23; __rev2_23 = __builtin_shufflevector(__s2_23, __s2_23, 1, 0); \ - uint32x4_t __ret_23; \ - __ret_23 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_23, __p3_23), __rev0_23, __p1_23); \ - __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 3, 2, 1, 0); \ - __ret_23; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u64(__p0_24, __p1_24, __p2_24, __p3_24) __extension__ ({ \ - uint64x2_t __s0_24 = __p0_24; \ - uint64x1_t __s2_24 = __p2_24; \ - uint64x2_t __ret_24; \ - __ret_24 = vsetq_lane_u64(vget_lane_u64(__s2_24, __p3_24), __s0_24, __p1_24); \ - __ret_24; \ -}) -#else -#define vcopyq_lane_u64(__p0_25, __p1_25, __p2_25, __p3_25) __extension__ ({ \ - uint64x2_t __s0_25 = __p0_25; \ - uint64x1_t __s2_25 = __p2_25; \ - uint64x2_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 1, 0); \ - uint64x2_t __ret_25; \ - __ret_25 = __noswap_vsetq_lane_u64(__noswap_vget_lane_u64(__s2_25, __p3_25), __rev0_25, __p1_25); \ - __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 1, 0); \ - __ret_25; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u16(__p0_26, __p1_26, __p2_26, __p3_26) __extension__ ({ \ - uint16x8_t __s0_26 = __p0_26; \ - uint16x4_t __s2_26 = __p2_26; \ - uint16x8_t __ret_26; \ - __ret_26 = vsetq_lane_u16(vget_lane_u16(__s2_26, __p3_26), __s0_26, __p1_26); \ - __ret_26; \ -}) -#else -#define vcopyq_lane_u16(__p0_27, __p1_27, __p2_27, __p3_27) __extension__ ({ \ - uint16x8_t __s0_27 = __p0_27; \ - uint16x4_t __s2_27 = __p2_27; \ - uint16x8_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_27; __rev2_27 = __builtin_shufflevector(__s2_27, __s2_27, 3, 2, 1, 0); \ - uint16x8_t __ret_27; \ - __ret_27 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_27, __p3_27), __rev0_27, __p1_27); \ - __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_27; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s8(__p0_28, __p1_28, __p2_28, __p3_28) __extension__ ({ \ - int8x16_t __s0_28 = __p0_28; \ - int8x8_t __s2_28 = __p2_28; \ - int8x16_t __ret_28; \ - __ret_28 = vsetq_lane_s8(vget_lane_s8(__s2_28, __p3_28), __s0_28, __p1_28); \ - __ret_28; \ -}) -#else -#define vcopyq_lane_s8(__p0_29, __p1_29, __p2_29, __p3_29) __extension__ ({ \ - int8x16_t __s0_29 = __p0_29; \ - int8x8_t __s2_29 = __p2_29; \ - int8x16_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_29; __rev2_29 = __builtin_shufflevector(__s2_29, __s2_29, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret_29; \ - __ret_29 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_29, __p3_29), __rev0_29, __p1_29); \ - __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_29; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_f32(__p0_30, __p1_30, __p2_30, __p3_30) __extension__ ({ \ - float32x4_t __s0_30 = __p0_30; \ - float32x2_t __s2_30 = __p2_30; \ - float32x4_t __ret_30; \ - __ret_30 = vsetq_lane_f32(vget_lane_f32(__s2_30, __p3_30), __s0_30, __p1_30); \ - __ret_30; \ -}) -#else -#define vcopyq_lane_f32(__p0_31, __p1_31, __p2_31, __p3_31) __extension__ ({ \ - float32x4_t __s0_31 = __p0_31; \ - float32x2_t __s2_31 = __p2_31; \ - float32x4_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 3, 2, 1, 0); \ - float32x2_t __rev2_31; __rev2_31 = __builtin_shufflevector(__s2_31, __s2_31, 1, 0); \ - float32x4_t __ret_31; \ - __ret_31 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_31, __p3_31), __rev0_31, __p1_31); \ - __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 3, 2, 1, 0); \ - __ret_31; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s32(__p0_32, __p1_32, __p2_32, __p3_32) __extension__ ({ \ - int32x4_t __s0_32 = __p0_32; \ - int32x2_t __s2_32 = __p2_32; \ - int32x4_t __ret_32; \ - __ret_32 = vsetq_lane_s32(vget_lane_s32(__s2_32, __p3_32), __s0_32, __p1_32); \ - __ret_32; \ -}) -#else -#define vcopyq_lane_s32(__p0_33, __p1_33, __p2_33, __p3_33) __extension__ ({ \ - int32x4_t __s0_33 = __p0_33; \ - int32x2_t __s2_33 = __p2_33; \ - int32x4_t __rev0_33; __rev0_33 = __builtin_shufflevector(__s0_33, __s0_33, 3, 2, 1, 0); \ - int32x2_t __rev2_33; __rev2_33 = __builtin_shufflevector(__s2_33, __s2_33, 1, 0); \ - int32x4_t __ret_33; \ - __ret_33 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_33, __p3_33), __rev0_33, __p1_33); \ - __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 3, 2, 1, 0); \ - __ret_33; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s64(__p0_34, __p1_34, __p2_34, __p3_34) __extension__ ({ \ - int64x2_t __s0_34 = __p0_34; \ - int64x1_t __s2_34 = __p2_34; \ - int64x2_t __ret_34; \ - __ret_34 = vsetq_lane_s64(vget_lane_s64(__s2_34, __p3_34), __s0_34, __p1_34); \ - __ret_34; \ -}) -#else -#define vcopyq_lane_s64(__p0_35, __p1_35, __p2_35, __p3_35) __extension__ ({ \ - int64x2_t __s0_35 = __p0_35; \ - int64x1_t __s2_35 = __p2_35; \ - int64x2_t __rev0_35; __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 1, 0); \ - int64x2_t __ret_35; \ - __ret_35 = __noswap_vsetq_lane_s64(__noswap_vget_lane_s64(__s2_35, __p3_35), __rev0_35, __p1_35); \ - __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 1, 0); \ - __ret_35; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s16(__p0_36, __p1_36, __p2_36, __p3_36) __extension__ ({ \ - int16x8_t __s0_36 = __p0_36; \ - int16x4_t __s2_36 = __p2_36; \ - int16x8_t __ret_36; \ - __ret_36 = vsetq_lane_s16(vget_lane_s16(__s2_36, __p3_36), __s0_36, __p1_36); \ - __ret_36; \ -}) -#else -#define vcopyq_lane_s16(__p0_37, __p1_37, __p2_37, __p3_37) __extension__ ({ \ - int16x8_t __s0_37 = __p0_37; \ - int16x4_t __s2_37 = __p2_37; \ - int16x8_t __rev0_37; __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_37; __rev2_37 = __builtin_shufflevector(__s2_37, __s2_37, 3, 2, 1, 0); \ - int16x8_t __ret_37; \ - __ret_37 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_37, __p3_37), __rev0_37, __p1_37); \ - __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_37; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_p8(__p0_38, __p1_38, __p2_38, __p3_38) __extension__ ({ \ - poly8x8_t __s0_38 = __p0_38; \ - poly8x8_t __s2_38 = __p2_38; \ - poly8x8_t __ret_38; \ - __ret_38 = vset_lane_p8(vget_lane_p8(__s2_38, __p3_38), __s0_38, __p1_38); \ - __ret_38; \ -}) -#else -#define vcopy_lane_p8(__p0_39, __p1_39, __p2_39, __p3_39) __extension__ ({ \ - poly8x8_t __s0_39 = __p0_39; \ - poly8x8_t __s2_39 = __p2_39; \ - poly8x8_t __rev0_39; __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev2_39; __rev2_39 = __builtin_shufflevector(__s2_39, __s2_39, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __ret_39; \ - __ret_39 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_39, __p3_39), __rev0_39, __p1_39); \ - __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_39; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_p16(__p0_40, __p1_40, __p2_40, __p3_40) __extension__ ({ \ - poly16x4_t __s0_40 = __p0_40; \ - poly16x4_t __s2_40 = __p2_40; \ - poly16x4_t __ret_40; \ - __ret_40 = vset_lane_p16(vget_lane_p16(__s2_40, __p3_40), __s0_40, __p1_40); \ - __ret_40; \ -}) -#else -#define vcopy_lane_p16(__p0_41, __p1_41, __p2_41, __p3_41) __extension__ ({ \ - poly16x4_t __s0_41 = __p0_41; \ - poly16x4_t __s2_41 = __p2_41; \ - poly16x4_t __rev0_41; __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \ - poly16x4_t __rev2_41; __rev2_41 = __builtin_shufflevector(__s2_41, __s2_41, 3, 2, 1, 0); \ - poly16x4_t __ret_41; \ - __ret_41 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_41, __p3_41), __rev0_41, __p1_41); \ - __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \ - __ret_41; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u8(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \ - uint8x8_t __s0_42 = __p0_42; \ - uint8x8_t __s2_42 = __p2_42; \ - uint8x8_t __ret_42; \ - __ret_42 = vset_lane_u8(vget_lane_u8(__s2_42, __p3_42), __s0_42, __p1_42); \ - __ret_42; \ -}) -#else -#define vcopy_lane_u8(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \ - uint8x8_t __s0_43 = __p0_43; \ - uint8x8_t __s2_43 = __p2_43; \ - uint8x8_t __rev0_43; __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_43; __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret_43; \ - __ret_43 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_43, __p3_43), __rev0_43, __p1_43); \ - __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_43; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u32(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \ - uint32x2_t __s0_44 = __p0_44; \ - uint32x2_t __s2_44 = __p2_44; \ - uint32x2_t __ret_44; \ - __ret_44 = vset_lane_u32(vget_lane_u32(__s2_44, __p3_44), __s0_44, __p1_44); \ - __ret_44; \ -}) -#else -#define vcopy_lane_u32(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \ - uint32x2_t __s0_45 = __p0_45; \ - uint32x2_t __s2_45 = __p2_45; \ - uint32x2_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 1, 0); \ - uint32x2_t __rev2_45; __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 1, 0); \ - uint32x2_t __ret_45; \ - __ret_45 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_45, __p3_45), __rev0_45, __p1_45); \ - __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 1, 0); \ - __ret_45; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u64(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \ - uint64x1_t __s0_46 = __p0_46; \ - uint64x1_t __s2_46 = __p2_46; \ - uint64x1_t __ret_46; \ - __ret_46 = vset_lane_u64(vget_lane_u64(__s2_46, __p3_46), __s0_46, __p1_46); \ - __ret_46; \ -}) -#else -#define vcopy_lane_u64(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \ - uint64x1_t __s0_47 = __p0_47; \ - uint64x1_t __s2_47 = __p2_47; \ - uint64x1_t __ret_47; \ - __ret_47 = __noswap_vset_lane_u64(__noswap_vget_lane_u64(__s2_47, __p3_47), __s0_47, __p1_47); \ - __ret_47; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u16(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \ - uint16x4_t __s0_48 = __p0_48; \ - uint16x4_t __s2_48 = __p2_48; \ - uint16x4_t __ret_48; \ - __ret_48 = vset_lane_u16(vget_lane_u16(__s2_48, __p3_48), __s0_48, __p1_48); \ - __ret_48; \ -}) -#else -#define vcopy_lane_u16(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \ - uint16x4_t __s0_49 = __p0_49; \ - uint16x4_t __s2_49 = __p2_49; \ - uint16x4_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \ - uint16x4_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 3, 2, 1, 0); \ - uint16x4_t __ret_49; \ - __ret_49 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_49, __p3_49), __rev0_49, __p1_49); \ - __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \ - __ret_49; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s8(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \ - int8x8_t __s0_50 = __p0_50; \ - int8x8_t __s2_50 = __p2_50; \ - int8x8_t __ret_50; \ - __ret_50 = vset_lane_s8(vget_lane_s8(__s2_50, __p3_50), __s0_50, __p1_50); \ - __ret_50; \ -}) -#else -#define vcopy_lane_s8(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \ - int8x8_t __s0_51 = __p0_51; \ - int8x8_t __s2_51 = __p2_51; \ - int8x8_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret_51; \ - __ret_51 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_51, __p3_51), __rev0_51, __p1_51); \ - __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_51; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_f32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \ - float32x2_t __s0_52 = __p0_52; \ - float32x2_t __s2_52 = __p2_52; \ - float32x2_t __ret_52; \ - __ret_52 = vset_lane_f32(vget_lane_f32(__s2_52, __p3_52), __s0_52, __p1_52); \ - __ret_52; \ -}) -#else -#define vcopy_lane_f32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \ - float32x2_t __s0_53 = __p0_53; \ - float32x2_t __s2_53 = __p2_53; \ - float32x2_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \ - float32x2_t __rev2_53; __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \ - float32x2_t __ret_53; \ - __ret_53 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_53, __p3_53), __rev0_53, __p1_53); \ - __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \ - __ret_53; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s32(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ - int32x2_t __s0_54 = __p0_54; \ - int32x2_t __s2_54 = __p2_54; \ - int32x2_t __ret_54; \ - __ret_54 = vset_lane_s32(vget_lane_s32(__s2_54, __p3_54), __s0_54, __p1_54); \ - __ret_54; \ -}) -#else -#define vcopy_lane_s32(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ - int32x2_t __s0_55 = __p0_55; \ - int32x2_t __s2_55 = __p2_55; \ - int32x2_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 1, 0); \ - int32x2_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 1, 0); \ - int32x2_t __ret_55; \ - __ret_55 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_55, __p3_55), __rev0_55, __p1_55); \ - __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 1, 0); \ - __ret_55; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s64(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ - int64x1_t __s0_56 = __p0_56; \ - int64x1_t __s2_56 = __p2_56; \ - int64x1_t __ret_56; \ - __ret_56 = vset_lane_s64(vget_lane_s64(__s2_56, __p3_56), __s0_56, __p1_56); \ - __ret_56; \ -}) -#else -#define vcopy_lane_s64(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ - int64x1_t __s0_57 = __p0_57; \ - int64x1_t __s2_57 = __p2_57; \ - int64x1_t __ret_57; \ - __ret_57 = __noswap_vset_lane_s64(__noswap_vget_lane_s64(__s2_57, __p3_57), __s0_57, __p1_57); \ - __ret_57; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s16(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ - int16x4_t __s0_58 = __p0_58; \ - int16x4_t __s2_58 = __p2_58; \ - int16x4_t __ret_58; \ - __ret_58 = vset_lane_s16(vget_lane_s16(__s2_58, __p3_58), __s0_58, __p1_58); \ - __ret_58; \ -}) -#else -#define vcopy_lane_s16(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ - int16x4_t __s0_59 = __p0_59; \ - int16x4_t __s2_59 = __p2_59; \ - int16x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \ - int16x4_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 3, 2, 1, 0); \ - int16x4_t __ret_59; \ - __ret_59 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_59, __p3_59), __rev0_59, __p1_59); \ - __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \ - __ret_59; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p8(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ - poly8x16_t __s0_60 = __p0_60; \ - poly8x16_t __s2_60 = __p2_60; \ - poly8x16_t __ret_60; \ - __ret_60 = vsetq_lane_p8(vgetq_lane_p8(__s2_60, __p3_60), __s0_60, __p1_60); \ - __ret_60; \ -}) -#else -#define vcopyq_laneq_p8(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ - poly8x16_t __s0_61 = __p0_61; \ - poly8x16_t __s2_61 = __p2_61; \ - poly8x16_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __ret_61; \ - __ret_61 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_61, __p3_61), __rev0_61, __p1_61); \ - __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_61; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p16(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ - poly16x8_t __s0_62 = __p0_62; \ - poly16x8_t __s2_62 = __p2_62; \ - poly16x8_t __ret_62; \ - __ret_62 = vsetq_lane_p16(vgetq_lane_p16(__s2_62, __p3_62), __s0_62, __p1_62); \ - __ret_62; \ -}) -#else -#define vcopyq_laneq_p16(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ - poly16x8_t __s0_63 = __p0_63; \ - poly16x8_t __s2_63 = __p2_63; \ - poly16x8_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __ret_63; \ - __ret_63 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_63, __p3_63), __rev0_63, __p1_63); \ - __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_63; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u8(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ - uint8x16_t __s0_64 = __p0_64; \ - uint8x16_t __s2_64 = __p2_64; \ - uint8x16_t __ret_64; \ - __ret_64 = vsetq_lane_u8(vgetq_lane_u8(__s2_64, __p3_64), __s0_64, __p1_64); \ - __ret_64; \ -}) -#else -#define vcopyq_laneq_u8(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ - uint8x16_t __s0_65 = __p0_65; \ - uint8x16_t __s2_65 = __p2_65; \ - uint8x16_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret_65; \ - __ret_65 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_65, __p3_65), __rev0_65, __p1_65); \ - __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_65; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ - uint32x4_t __s0_66 = __p0_66; \ - uint32x4_t __s2_66 = __p2_66; \ - uint32x4_t __ret_66; \ - __ret_66 = vsetq_lane_u32(vgetq_lane_u32(__s2_66, __p3_66), __s0_66, __p1_66); \ - __ret_66; \ -}) -#else -#define vcopyq_laneq_u32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ - uint32x4_t __s0_67 = __p0_67; \ - uint32x4_t __s2_67 = __p2_67; \ - uint32x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \ - uint32x4_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 3, 2, 1, 0); \ - uint32x4_t __ret_67; \ - __ret_67 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_67, __p3_67), __rev0_67, __p1_67); \ - __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \ - __ret_67; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u64(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ - uint64x2_t __s0_68 = __p0_68; \ - uint64x2_t __s2_68 = __p2_68; \ - uint64x2_t __ret_68; \ - __ret_68 = vsetq_lane_u64(vgetq_lane_u64(__s2_68, __p3_68), __s0_68, __p1_68); \ - __ret_68; \ -}) -#else -#define vcopyq_laneq_u64(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ - uint64x2_t __s0_69 = __p0_69; \ - uint64x2_t __s2_69 = __p2_69; \ - uint64x2_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 1, 0); \ - uint64x2_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \ - uint64x2_t __ret_69; \ - __ret_69 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_69, __p3_69), __rev0_69, __p1_69); \ - __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 1, 0); \ - __ret_69; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ - uint16x8_t __s0_70 = __p0_70; \ - uint16x8_t __s2_70 = __p2_70; \ - uint16x8_t __ret_70; \ - __ret_70 = vsetq_lane_u16(vgetq_lane_u16(__s2_70, __p3_70), __s0_70, __p1_70); \ - __ret_70; \ -}) -#else -#define vcopyq_laneq_u16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ - uint16x8_t __s0_71 = __p0_71; \ - uint16x8_t __s2_71 = __p2_71; \ - uint16x8_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret_71; \ - __ret_71 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_71, __p3_71), __rev0_71, __p1_71); \ - __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_71; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s8(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ - int8x16_t __s0_72 = __p0_72; \ - int8x16_t __s2_72 = __p2_72; \ - int8x16_t __ret_72; \ - __ret_72 = vsetq_lane_s8(vgetq_lane_s8(__s2_72, __p3_72), __s0_72, __p1_72); \ - __ret_72; \ -}) -#else -#define vcopyq_laneq_s8(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ - int8x16_t __s0_73 = __p0_73; \ - int8x16_t __s2_73 = __p2_73; \ - int8x16_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret_73; \ - __ret_73 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_73, __p3_73), __rev0_73, __p1_73); \ - __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_73; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_f32(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ - float32x4_t __s0_74 = __p0_74; \ - float32x4_t __s2_74 = __p2_74; \ - float32x4_t __ret_74; \ - __ret_74 = vsetq_lane_f32(vgetq_lane_f32(__s2_74, __p3_74), __s0_74, __p1_74); \ - __ret_74; \ -}) -#else -#define vcopyq_laneq_f32(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ - float32x4_t __s0_75 = __p0_75; \ - float32x4_t __s2_75 = __p2_75; \ - float32x4_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \ - float32x4_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \ - float32x4_t __ret_75; \ - __ret_75 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_75, __p3_75), __rev0_75, __p1_75); \ - __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \ - __ret_75; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ - int32x4_t __s0_76 = __p0_76; \ - int32x4_t __s2_76 = __p2_76; \ - int32x4_t __ret_76; \ - __ret_76 = vsetq_lane_s32(vgetq_lane_s32(__s2_76, __p3_76), __s0_76, __p1_76); \ - __ret_76; \ -}) -#else -#define vcopyq_laneq_s32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ - int32x4_t __s0_77 = __p0_77; \ - int32x4_t __s2_77 = __p2_77; \ - int32x4_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 3, 2, 1, 0); \ - int32x4_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 3, 2, 1, 0); \ - int32x4_t __ret_77; \ - __ret_77 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_77, __p3_77), __rev0_77, __p1_77); \ - __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 3, 2, 1, 0); \ - __ret_77; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s64(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ - int64x2_t __s0_78 = __p0_78; \ - int64x2_t __s2_78 = __p2_78; \ - int64x2_t __ret_78; \ - __ret_78 = vsetq_lane_s64(vgetq_lane_s64(__s2_78, __p3_78), __s0_78, __p1_78); \ - __ret_78; \ -}) -#else -#define vcopyq_laneq_s64(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ - int64x2_t __s0_79 = __p0_79; \ - int64x2_t __s2_79 = __p2_79; \ - int64x2_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \ - int64x2_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \ - int64x2_t __ret_79; \ - __ret_79 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_79, __p3_79), __rev0_79, __p1_79); \ - __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \ - __ret_79; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ - int16x8_t __s0_80 = __p0_80; \ - int16x8_t __s2_80 = __p2_80; \ - int16x8_t __ret_80; \ - __ret_80 = vsetq_lane_s16(vgetq_lane_s16(__s2_80, __p3_80), __s0_80, __p1_80); \ - __ret_80; \ -}) -#else -#define vcopyq_laneq_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ - int16x8_t __s0_81 = __p0_81; \ - int16x8_t __s2_81 = __p2_81; \ - int16x8_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret_81; \ - __ret_81 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_81, __p3_81), __rev0_81, __p1_81); \ - __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_81; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p8(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \ - poly8x8_t __s0_82 = __p0_82; \ - poly8x16_t __s2_82 = __p2_82; \ - poly8x8_t __ret_82; \ - __ret_82 = vset_lane_p8(vgetq_lane_p8(__s2_82, __p3_82), __s0_82, __p1_82); \ - __ret_82; \ -}) -#else -#define vcopy_laneq_p8(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \ - poly8x8_t __s0_83 = __p0_83; \ - poly8x16_t __s2_83 = __p2_83; \ - poly8x8_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __ret_83; \ - __ret_83 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_83, __p3_83), __rev0_83, __p1_83); \ - __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_83; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p16(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \ - poly16x4_t __s0_84 = __p0_84; \ - poly16x8_t __s2_84 = __p2_84; \ - poly16x4_t __ret_84; \ - __ret_84 = vset_lane_p16(vgetq_lane_p16(__s2_84, __p3_84), __s0_84, __p1_84); \ - __ret_84; \ -}) -#else -#define vcopy_laneq_p16(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \ - poly16x4_t __s0_85 = __p0_85; \ - poly16x8_t __s2_85 = __p2_85; \ - poly16x4_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 3, 2, 1, 0); \ - poly16x8_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x4_t __ret_85; \ - __ret_85 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_85, __p3_85), __rev0_85, __p1_85); \ - __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 3, 2, 1, 0); \ - __ret_85; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u8(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \ - uint8x8_t __s0_86 = __p0_86; \ - uint8x16_t __s2_86 = __p2_86; \ - uint8x8_t __ret_86; \ - __ret_86 = vset_lane_u8(vgetq_lane_u8(__s2_86, __p3_86), __s0_86, __p1_86); \ - __ret_86; \ -}) -#else -#define vcopy_laneq_u8(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \ - uint8x8_t __s0_87 = __p0_87; \ - uint8x16_t __s2_87 = __p2_87; \ - uint8x8_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_87; __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret_87; \ - __ret_87 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_87, __p3_87), __rev0_87, __p1_87); \ - __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_87; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u32(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \ - uint32x2_t __s0_88 = __p0_88; \ - uint32x4_t __s2_88 = __p2_88; \ - uint32x2_t __ret_88; \ - __ret_88 = vset_lane_u32(vgetq_lane_u32(__s2_88, __p3_88), __s0_88, __p1_88); \ - __ret_88; \ -}) -#else -#define vcopy_laneq_u32(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \ - uint32x2_t __s0_89 = __p0_89; \ - uint32x4_t __s2_89 = __p2_89; \ - uint32x2_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 1, 0); \ - uint32x4_t __rev2_89; __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, 3, 2, 1, 0); \ - uint32x2_t __ret_89; \ - __ret_89 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_89, __p3_89), __rev0_89, __p1_89); \ - __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 1, 0); \ - __ret_89; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u64(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \ - uint64x1_t __s0_90 = __p0_90; \ - uint64x2_t __s2_90 = __p2_90; \ - uint64x1_t __ret_90; \ - __ret_90 = vset_lane_u64(vgetq_lane_u64(__s2_90, __p3_90), __s0_90, __p1_90); \ - __ret_90; \ -}) -#else -#define vcopy_laneq_u64(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \ - uint64x1_t __s0_91 = __p0_91; \ - uint64x2_t __s2_91 = __p2_91; \ - uint64x2_t __rev2_91; __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 1, 0); \ - uint64x1_t __ret_91; \ - __ret_91 = __noswap_vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_91, __p3_91), __s0_91, __p1_91); \ - __ret_91; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u16(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \ - uint16x4_t __s0_92 = __p0_92; \ - uint16x8_t __s2_92 = __p2_92; \ - uint16x4_t __ret_92; \ - __ret_92 = vset_lane_u16(vgetq_lane_u16(__s2_92, __p3_92), __s0_92, __p1_92); \ - __ret_92; \ -}) -#else -#define vcopy_laneq_u16(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \ - uint16x4_t __s0_93 = __p0_93; \ - uint16x8_t __s2_93 = __p2_93; \ - uint16x4_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 3, 2, 1, 0); \ - uint16x8_t __rev2_93; __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __ret_93; \ - __ret_93 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_93, __p3_93), __rev0_93, __p1_93); \ - __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 3, 2, 1, 0); \ - __ret_93; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s8(__p0_94, __p1_94, __p2_94, __p3_94) __extension__ ({ \ - int8x8_t __s0_94 = __p0_94; \ - int8x16_t __s2_94 = __p2_94; \ - int8x8_t __ret_94; \ - __ret_94 = vset_lane_s8(vgetq_lane_s8(__s2_94, __p3_94), __s0_94, __p1_94); \ - __ret_94; \ -}) -#else -#define vcopy_laneq_s8(__p0_95, __p1_95, __p2_95, __p3_95) __extension__ ({ \ - int8x8_t __s0_95 = __p0_95; \ - int8x16_t __s2_95 = __p2_95; \ - int8x8_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_95; __rev2_95 = __builtin_shufflevector(__s2_95, __s2_95, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret_95; \ - __ret_95 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_95, __p3_95), __rev0_95, __p1_95); \ - __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_95; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_f32(__p0_96, __p1_96, __p2_96, __p3_96) __extension__ ({ \ - float32x2_t __s0_96 = __p0_96; \ - float32x4_t __s2_96 = __p2_96; \ - float32x2_t __ret_96; \ - __ret_96 = vset_lane_f32(vgetq_lane_f32(__s2_96, __p3_96), __s0_96, __p1_96); \ - __ret_96; \ -}) -#else -#define vcopy_laneq_f32(__p0_97, __p1_97, __p2_97, __p3_97) __extension__ ({ \ - float32x2_t __s0_97 = __p0_97; \ - float32x4_t __s2_97 = __p2_97; \ - float32x2_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \ - float32x4_t __rev2_97; __rev2_97 = __builtin_shufflevector(__s2_97, __s2_97, 3, 2, 1, 0); \ - float32x2_t __ret_97; \ - __ret_97 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_97, __p3_97), __rev0_97, __p1_97); \ - __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \ - __ret_97; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s32(__p0_98, __p1_98, __p2_98, __p3_98) __extension__ ({ \ - int32x2_t __s0_98 = __p0_98; \ - int32x4_t __s2_98 = __p2_98; \ - int32x2_t __ret_98; \ - __ret_98 = vset_lane_s32(vgetq_lane_s32(__s2_98, __p3_98), __s0_98, __p1_98); \ - __ret_98; \ -}) -#else -#define vcopy_laneq_s32(__p0_99, __p1_99, __p2_99, __p3_99) __extension__ ({ \ - int32x2_t __s0_99 = __p0_99; \ - int32x4_t __s2_99 = __p2_99; \ - int32x2_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \ - int32x4_t __rev2_99; __rev2_99 = __builtin_shufflevector(__s2_99, __s2_99, 3, 2, 1, 0); \ - int32x2_t __ret_99; \ - __ret_99 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_99, __p3_99), __rev0_99, __p1_99); \ - __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \ - __ret_99; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s64(__p0_100, __p1_100, __p2_100, __p3_100) __extension__ ({ \ - int64x1_t __s0_100 = __p0_100; \ - int64x2_t __s2_100 = __p2_100; \ - int64x1_t __ret_100; \ - __ret_100 = vset_lane_s64(vgetq_lane_s64(__s2_100, __p3_100), __s0_100, __p1_100); \ - __ret_100; \ -}) -#else -#define vcopy_laneq_s64(__p0_101, __p1_101, __p2_101, __p3_101) __extension__ ({ \ - int64x1_t __s0_101 = __p0_101; \ - int64x2_t __s2_101 = __p2_101; \ - int64x2_t __rev2_101; __rev2_101 = __builtin_shufflevector(__s2_101, __s2_101, 1, 0); \ - int64x1_t __ret_101; \ - __ret_101 = __noswap_vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_101, __p3_101), __s0_101, __p1_101); \ - __ret_101; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s16(__p0_102, __p1_102, __p2_102, __p3_102) __extension__ ({ \ - int16x4_t __s0_102 = __p0_102; \ - int16x8_t __s2_102 = __p2_102; \ - int16x4_t __ret_102; \ - __ret_102 = vset_lane_s16(vgetq_lane_s16(__s2_102, __p3_102), __s0_102, __p1_102); \ - __ret_102; \ -}) -#else -#define vcopy_laneq_s16(__p0_103, __p1_103, __p2_103, __p3_103) __extension__ ({ \ - int16x4_t __s0_103 = __p0_103; \ - int16x8_t __s2_103 = __p2_103; \ - int16x4_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 3, 2, 1, 0); \ - int16x8_t __rev2_103; __rev2_103 = __builtin_shufflevector(__s2_103, __s2_103, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __ret_103; \ - __ret_103 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_103, __p3_103), __rev0_103, __p1_103); \ - __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 3, 2, 1, 0); \ - __ret_103; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vcreate_p64(uint64_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#else -__ai poly64x1_t vcreate_p64(uint64_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vcreate_f64(uint64_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#else -__ai float64x1_t vcreate_f64(uint64_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vcvts_f32_s32(int32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0); - return __ret; -} -#else -__ai float32_t vcvts_f32_s32(int32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vcvts_f32_u32(uint32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0); - return __ret; -} -#else -__ai float32_t vcvts_f32_u32(uint32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vcvtd_f64_s64(int64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0); - return __ret; -} -#else -__ai float64_t vcvtd_f64_s64(int64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vcvtd_f64_u64(uint64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0); - return __ret; -} -#else -__ai float64_t vcvtd_f64_u64(uint64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3); - return __ret; -} -#else -__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { - float16x8_t __ret; - __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1)); - return __ret; -} -#else -__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x8_t __ret; - __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { - float32x4_t __ret; - __ret = vcvt_f32_f16(vget_high_f16(__p0)); - return __ret; -} -#else -__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { - float32x4_t __ret; - __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1)); - return __ret; -} -#else -__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x4_t __ret; - __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { - float64x2_t __ret; - __ret = vcvt_f64_f32(vget_high_f32(__p0)); - return __ret; -} -#else -__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float64x2_t __ret; - __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) -#else -#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ -}) -#else -#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#else -#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#else -#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \ - float32_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \ - float32_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ -}) -#else -#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#else -#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \ - float64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \ - float64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \ - float32_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \ - float32_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) -#else -#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#else -#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \ - float64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \ - __ret; \ -}) -#else -#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \ - float64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvts_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0); - return __ret; -} -#else -__ai int32_t vcvts_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcvtd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0); - return __ret; -} -#else -__ai int64_t vcvtd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#else -__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvts_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0); - return __ret; -} -#else -__ai uint32_t vcvts_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcvtd_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0); - return __ret; -} -#else -__ai uint64_t vcvtd_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51); - return __ret; -} -#else -__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtas_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); - return __ret; -} -#else -__ai int32_t vcvtas_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcvtad_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); - return __ret; -} -#else -__ai int64_t vcvtad_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtas_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); - return __ret; -} -#else -__ai uint32_t vcvtas_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcvtad_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); - return __ret; -} -#else -__ai uint64_t vcvtad_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtms_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); - return __ret; -} -#else -__ai int32_t vcvtms_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcvtmd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); - return __ret; -} -#else -__ai int64_t vcvtmd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtms_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); - return __ret; -} -#else -__ai uint32_t vcvtms_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcvtmd_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); - return __ret; -} -#else -__ai uint64_t vcvtmd_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtns_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); - return __ret; -} -#else -__ai int32_t vcvtns_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcvtnd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); - return __ret; -} -#else -__ai int64_t vcvtnd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtns_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); - return __ret; -} -#else -__ai uint32_t vcvtns_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcvtnd_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); - return __ret; -} -#else -__ai uint64_t vcvtnd_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vcvtps_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); - return __ret; -} -#else -__ai int32_t vcvtps_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vcvtpd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); - return __ret; -} -#else -__ai int64_t vcvtpd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vcvtps_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); - return __ret; -} -#else -__ai uint32_t vcvtps_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vcvtpd_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); - return __ret; -} -#else -__ai uint64_t vcvtpd_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vcvtxd_f32_f64(float64_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0); - return __ret; -} -#else -__ai float32_t vcvtxd_f32_f64(float64_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { - float32x4_t __ret; - __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1)); - return __ret; -} -#else -__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x4_t __ret; - __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __p0 / __p1; - return __ret; -} -#else -__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __rev0 / __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __p0 / __p1; - return __ret; -} -#else -__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __rev0 / __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = __p0 / __p1; - return __ret; -} -#else -__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = __p0 / __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __p0 / __p1; - return __ret; -} -#else -__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __rev0 / __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8_t __ret; \ - __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8_t __ret; \ - __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16_t __ret; \ - __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - poly16_t __ret; \ - __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8_t __ret; \ - __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8_t __ret; \ - __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16_t __ret; \ - __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16_t __ret; \ - __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - poly64x1_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - poly64x2_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x2_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x1_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x8_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x1_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x2_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x2_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x2_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x1_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x1_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x4_t __ret; \ - __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \ - __ret; \ -}) -#else -#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vdup_n_p64(poly64_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t) {__p0}; - return __ret; -} -#else -__ai poly64x1_t vdup_n_p64(poly64_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t) {__p0}; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vdupq_n_f64(float64_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai float64x2_t vdupq_n_f64(float64_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vdup_n_f64(float64_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) {__p0}; - return __ret; -} -#else -__ai float64x1_t vdup_n_f64(float64_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) {__p0}; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ - __ret; \ -}) -#else -#define vext_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ - __ret; \ -}) -#else -#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \ - __ret; \ -}) -#else -#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ - __ret; \ -}) -#else -#define vext_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); - return __ret; -} -#else -__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); - return __ret; -} -#else -__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); - return __ret; -} -__ai float64x1_t __noswap_vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \ - __ret; \ -}) -#define __noswap_vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__rev2, __p3); \ - __ret; \ -}) -#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ - __ret; \ -}) -#else -#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ - __ret; \ -}) -#else -#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ - __ret; \ -}) -#else -#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ - __ret; \ -}) -#define __noswap_vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ - __ret; \ -}) -#else -#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__rev2, __p3); \ - __ret; \ -}) -#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__rev2, __p3); \ - __ret; \ -}) -#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32_t __ret; \ - __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ - __ret; \ -}) -#else -#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ - __ret; \ -}) -#else -#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ - __ret; \ -}) -#else -#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \ - __ret; \ -}) -#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ - __ret; \ -}) -#else -#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { - float64x2_t __ret; - __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2}); - return __ret; -} -#else -__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __ret; - __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#else -__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __ret; - __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2}); - return __ret; -} -#else -__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = vfmaq_f64(__p0, -__p1, __p2); - return __ret; -} -#else -__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - float64x2_t __ret; - __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = vfma_f64(__p0, -__p1, __p2); - return __ret; -} -#else -__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = __noswap_vfma_f64(__p0, -__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsd_lane_f64(__p0_104, __p1_104, __p2_104, __p3_104) __extension__ ({ \ - float64_t __s0_104 = __p0_104; \ - float64_t __s1_104 = __p1_104; \ - float64x1_t __s2_104 = __p2_104; \ - float64_t __ret_104; \ - __ret_104 = vfmad_lane_f64(__s0_104, -__s1_104, __s2_104, __p3_104); \ - __ret_104; \ -}) -#else -#define vfmsd_lane_f64(__p0_105, __p1_105, __p2_105, __p3_105) __extension__ ({ \ - float64_t __s0_105 = __p0_105; \ - float64_t __s1_105 = __p1_105; \ - float64x1_t __s2_105 = __p2_105; \ - float64_t __ret_105; \ - __ret_105 = __noswap_vfmad_lane_f64(__s0_105, -__s1_105, __s2_105, __p3_105); \ - __ret_105; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmss_lane_f32(__p0_106, __p1_106, __p2_106, __p3_106) __extension__ ({ \ - float32_t __s0_106 = __p0_106; \ - float32_t __s1_106 = __p1_106; \ - float32x2_t __s2_106 = __p2_106; \ - float32_t __ret_106; \ - __ret_106 = vfmas_lane_f32(__s0_106, -__s1_106, __s2_106, __p3_106); \ - __ret_106; \ -}) -#else -#define vfmss_lane_f32(__p0_107, __p1_107, __p2_107, __p3_107) __extension__ ({ \ - float32_t __s0_107 = __p0_107; \ - float32_t __s1_107 = __p1_107; \ - float32x2_t __s2_107 = __p2_107; \ - float32x2_t __rev2_107; __rev2_107 = __builtin_shufflevector(__s2_107, __s2_107, 1, 0); \ - float32_t __ret_107; \ - __ret_107 = __noswap_vfmas_lane_f32(__s0_107, -__s1_107, __rev2_107, __p3_107); \ - __ret_107; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f64(__p0_108, __p1_108, __p2_108, __p3_108) __extension__ ({ \ - float64x2_t __s0_108 = __p0_108; \ - float64x2_t __s1_108 = __p1_108; \ - float64x1_t __s2_108 = __p2_108; \ - float64x2_t __ret_108; \ - __ret_108 = vfmaq_lane_f64(__s0_108, -__s1_108, __s2_108, __p3_108); \ - __ret_108; \ -}) -#else -#define vfmsq_lane_f64(__p0_109, __p1_109, __p2_109, __p3_109) __extension__ ({ \ - float64x2_t __s0_109 = __p0_109; \ - float64x2_t __s1_109 = __p1_109; \ - float64x1_t __s2_109 = __p2_109; \ - float64x2_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 1, 0); \ - float64x2_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 1, 0); \ - float64x2_t __ret_109; \ - __ret_109 = __noswap_vfmaq_lane_f64(__rev0_109, -__rev1_109, __s2_109, __p3_109); \ - __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 1, 0); \ - __ret_109; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \ - float32x4_t __s0_110 = __p0_110; \ - float32x4_t __s1_110 = __p1_110; \ - float32x2_t __s2_110 = __p2_110; \ - float32x4_t __ret_110; \ - __ret_110 = vfmaq_lane_f32(__s0_110, -__s1_110, __s2_110, __p3_110); \ - __ret_110; \ -}) -#else -#define vfmsq_lane_f32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \ - float32x4_t __s0_111 = __p0_111; \ - float32x4_t __s1_111 = __p1_111; \ - float32x2_t __s2_111 = __p2_111; \ - float32x4_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 3, 2, 1, 0); \ - float32x4_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 3, 2, 1, 0); \ - float32x2_t __rev2_111; __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \ - float32x4_t __ret_111; \ - __ret_111 = __noswap_vfmaq_lane_f32(__rev0_111, -__rev1_111, __rev2_111, __p3_111); \ - __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 3, 2, 1, 0); \ - __ret_111; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_lane_f64(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \ - float64x1_t __s0_112 = __p0_112; \ - float64x1_t __s1_112 = __p1_112; \ - float64x1_t __s2_112 = __p2_112; \ - float64x1_t __ret_112; \ - __ret_112 = vfma_lane_f64(__s0_112, -__s1_112, __s2_112, __p3_112); \ - __ret_112; \ -}) -#else -#define vfms_lane_f64(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \ - float64x1_t __s0_113 = __p0_113; \ - float64x1_t __s1_113 = __p1_113; \ - float64x1_t __s2_113 = __p2_113; \ - float64x1_t __ret_113; \ - __ret_113 = __noswap_vfma_lane_f64(__s0_113, -__s1_113, __s2_113, __p3_113); \ - __ret_113; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_lane_f32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \ - float32x2_t __s0_114 = __p0_114; \ - float32x2_t __s1_114 = __p1_114; \ - float32x2_t __s2_114 = __p2_114; \ - float32x2_t __ret_114; \ - __ret_114 = vfma_lane_f32(__s0_114, -__s1_114, __s2_114, __p3_114); \ - __ret_114; \ -}) -#else -#define vfms_lane_f32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \ - float32x2_t __s0_115 = __p0_115; \ - float32x2_t __s1_115 = __p1_115; \ - float32x2_t __s2_115 = __p2_115; \ - float32x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \ - float32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \ - float32x2_t __rev2_115; __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \ - float32x2_t __ret_115; \ - __ret_115 = __noswap_vfma_lane_f32(__rev0_115, -__rev1_115, __rev2_115, __p3_115); \ - __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \ - __ret_115; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsd_laneq_f64(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \ - float64_t __s0_116 = __p0_116; \ - float64_t __s1_116 = __p1_116; \ - float64x2_t __s2_116 = __p2_116; \ - float64_t __ret_116; \ - __ret_116 = vfmad_laneq_f64(__s0_116, -__s1_116, __s2_116, __p3_116); \ - __ret_116; \ -}) -#else -#define vfmsd_laneq_f64(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \ - float64_t __s0_117 = __p0_117; \ - float64_t __s1_117 = __p1_117; \ - float64x2_t __s2_117 = __p2_117; \ - float64x2_t __rev2_117; __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 1, 0); \ - float64_t __ret_117; \ - __ret_117 = __noswap_vfmad_laneq_f64(__s0_117, -__s1_117, __rev2_117, __p3_117); \ - __ret_117; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmss_laneq_f32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \ - float32_t __s0_118 = __p0_118; \ - float32_t __s1_118 = __p1_118; \ - float32x4_t __s2_118 = __p2_118; \ - float32_t __ret_118; \ - __ret_118 = vfmas_laneq_f32(__s0_118, -__s1_118, __s2_118, __p3_118); \ - __ret_118; \ -}) -#else -#define vfmss_laneq_f32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \ - float32_t __s0_119 = __p0_119; \ - float32_t __s1_119 = __p1_119; \ - float32x4_t __s2_119 = __p2_119; \ - float32x4_t __rev2_119; __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 3, 2, 1, 0); \ - float32_t __ret_119; \ - __ret_119 = __noswap_vfmas_laneq_f32(__s0_119, -__s1_119, __rev2_119, __p3_119); \ - __ret_119; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f64(__p0_120, __p1_120, __p2_120, __p3_120) __extension__ ({ \ - float64x2_t __s0_120 = __p0_120; \ - float64x2_t __s1_120 = __p1_120; \ - float64x2_t __s2_120 = __p2_120; \ - float64x2_t __ret_120; \ - __ret_120 = vfmaq_laneq_f64(__s0_120, -__s1_120, __s2_120, __p3_120); \ - __ret_120; \ -}) -#else -#define vfmsq_laneq_f64(__p0_121, __p1_121, __p2_121, __p3_121) __extension__ ({ \ - float64x2_t __s0_121 = __p0_121; \ - float64x2_t __s1_121 = __p1_121; \ - float64x2_t __s2_121 = __p2_121; \ - float64x2_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 1, 0); \ - float64x2_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 1, 0); \ - float64x2_t __rev2_121; __rev2_121 = __builtin_shufflevector(__s2_121, __s2_121, 1, 0); \ - float64x2_t __ret_121; \ - __ret_121 = __noswap_vfmaq_laneq_f64(__rev0_121, -__rev1_121, __rev2_121, __p3_121); \ - __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 1, 0); \ - __ret_121; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f32(__p0_122, __p1_122, __p2_122, __p3_122) __extension__ ({ \ - float32x4_t __s0_122 = __p0_122; \ - float32x4_t __s1_122 = __p1_122; \ - float32x4_t __s2_122 = __p2_122; \ - float32x4_t __ret_122; \ - __ret_122 = vfmaq_laneq_f32(__s0_122, -__s1_122, __s2_122, __p3_122); \ - __ret_122; \ -}) -#else -#define vfmsq_laneq_f32(__p0_123, __p1_123, __p2_123, __p3_123) __extension__ ({ \ - float32x4_t __s0_123 = __p0_123; \ - float32x4_t __s1_123 = __p1_123; \ - float32x4_t __s2_123 = __p2_123; \ - float32x4_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \ - float32x4_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 3, 2, 1, 0); \ - float32x4_t __rev2_123; __rev2_123 = __builtin_shufflevector(__s2_123, __s2_123, 3, 2, 1, 0); \ - float32x4_t __ret_123; \ - __ret_123 = __noswap_vfmaq_laneq_f32(__rev0_123, -__rev1_123, __rev2_123, __p3_123); \ - __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 3, 2, 1, 0); \ - __ret_123; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f64(__p0_124, __p1_124, __p2_124, __p3_124) __extension__ ({ \ - float64x1_t __s0_124 = __p0_124; \ - float64x1_t __s1_124 = __p1_124; \ - float64x2_t __s2_124 = __p2_124; \ - float64x1_t __ret_124; \ - __ret_124 = vfma_laneq_f64(__s0_124, -__s1_124, __s2_124, __p3_124); \ - __ret_124; \ -}) -#else -#define vfms_laneq_f64(__p0_125, __p1_125, __p2_125, __p3_125) __extension__ ({ \ - float64x1_t __s0_125 = __p0_125; \ - float64x1_t __s1_125 = __p1_125; \ - float64x2_t __s2_125 = __p2_125; \ - float64x2_t __rev2_125; __rev2_125 = __builtin_shufflevector(__s2_125, __s2_125, 1, 0); \ - float64x1_t __ret_125; \ - __ret_125 = __noswap_vfma_laneq_f64(__s0_125, -__s1_125, __rev2_125, __p3_125); \ - __ret_125; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f32(__p0_126, __p1_126, __p2_126, __p3_126) __extension__ ({ \ - float32x2_t __s0_126 = __p0_126; \ - float32x2_t __s1_126 = __p1_126; \ - float32x4_t __s2_126 = __p2_126; \ - float32x2_t __ret_126; \ - __ret_126 = vfma_laneq_f32(__s0_126, -__s1_126, __s2_126, __p3_126); \ - __ret_126; \ -}) -#else -#define vfms_laneq_f32(__p0_127, __p1_127, __p2_127, __p3_127) __extension__ ({ \ - float32x2_t __s0_127 = __p0_127; \ - float32x2_t __s1_127 = __p1_127; \ - float32x4_t __s2_127 = __p2_127; \ - float32x2_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 1, 0); \ - float32x2_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \ - float32x4_t __rev2_127; __rev2_127 = __builtin_shufflevector(__s2_127, __s2_127, 3, 2, 1, 0); \ - float32x2_t __ret_127; \ - __ret_127 = __noswap_vfma_laneq_f32(__rev0_127, -__rev1_127, __rev2_127, __p3_127); \ - __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 1, 0); \ - __ret_127; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { - float64x2_t __ret; - __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2}); - return __ret; -} -#else -__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __ret; - __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#else -__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __ret; - __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2}); - return __ret; -} -#else -__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { - poly64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1); - return __ret; -} -#else -__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x1_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1); - return __ret; -} -__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) { - poly64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vget_high_f64(float64x2_t __p0) { - float64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1); - return __ret; -} -#else -__ai float64x1_t vget_high_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x1_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64_t __ret; \ - __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64_t __ret; \ - __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64_t __ret; \ - __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64_t __ret; \ - __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - poly64_t __ret; \ - __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64_t __ret; \ - __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vget_lane_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64_t __ret; \ - __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { - poly64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0); - return __ret; -} -#else -__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x1_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vget_low_f64(float64x2_t __p0) { - float64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0); - return __ret; -} -#else -__ai float64x1_t vget_low_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x1_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev0, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p64(__p0) __extension__ ({ \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \ - __ret; \ -}) -#else -#define vld1_p64(__p0) __extension__ ({ \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p64(__p0) __extension__ ({ \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ - __ret; \ -}) -#else -#define vld1q_p64(__p0) __extension__ ({ \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f64(__p0) __extension__ ({ \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ - __ret; \ -}) -#else -#define vld1q_f64(__p0) __extension__ ({ \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f64(__p0) __extension__ ({ \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \ - __ret; \ -}) -#else -#define vld1_f64(__p0) __extension__ ({ \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_p64(__p0) __extension__ ({ \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \ - __ret; \ -}) -#else -#define vld1_dup_p64(__p0) __extension__ ({ \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_p64(__p0) __extension__ ({ \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ - __ret; \ -}) -#else -#define vld1q_dup_p64(__p0) __extension__ ({ \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_f64(__p0) __extension__ ({ \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ - __ret; \ -}) -#else -#define vld1q_dup_f64(__p0) __extension__ ({ \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_dup_f64(__p0) __extension__ ({ \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \ - __ret; \ -}) -#else -#define vld1_dup_f64(__p0) __extension__ ({ \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __s1 = __p1; \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ - __ret; \ -}) -#else -#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __s1 = __p1; \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ - __ret; \ -}) -#else -#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s1 = __p1; \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ - __ret; \ -}) -#else -#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __s1 = __p1; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ - __ret; \ -}) -#else -#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __s1 = __p1; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p8_x2(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ - __ret; \ -}) -#else -#define vld1_p8_x2(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p64_x2(__p0) __extension__ ({ \ - poly64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \ - __ret; \ -}) -#else -#define vld1_p64_x2(__p0) __extension__ ({ \ - poly64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p16_x2(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ - __ret; \ -}) -#else -#define vld1_p16_x2(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p8_x2(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ - __ret; \ -}) -#else -#define vld1q_p8_x2(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p64_x2(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ - __ret; \ -}) -#else -#define vld1q_p64_x2(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p16_x2(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ - __ret; \ -}) -#else -#define vld1q_p16_x2(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u8_x2(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ - __ret; \ -}) -#else -#define vld1q_u8_x2(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u32_x2(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ - __ret; \ -}) -#else -#define vld1q_u32_x2(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u64_x2(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ - __ret; \ -}) -#else -#define vld1q_u64_x2(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u16_x2(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ - __ret; \ -}) -#else -#define vld1q_u16_x2(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s8_x2(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ - __ret; \ -}) -#else -#define vld1q_s8_x2(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f64_x2(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ - __ret; \ -}) -#else -#define vld1q_f64_x2(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f32_x2(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ - __ret; \ -}) -#else -#define vld1q_f32_x2(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f16_x2(__p0) __extension__ ({ \ - float16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ - __ret; \ -}) -#else -#define vld1q_f16_x2(__p0) __extension__ ({ \ - float16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s32_x2(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ - __ret; \ -}) -#else -#define vld1q_s32_x2(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s64_x2(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ - __ret; \ -}) -#else -#define vld1q_s64_x2(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s16_x2(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ - __ret; \ -}) -#else -#define vld1q_s16_x2(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u8_x2(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ - __ret; \ -}) -#else -#define vld1_u8_x2(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u32_x2(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ - __ret; \ -}) -#else -#define vld1_u32_x2(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u64_x2(__p0) __extension__ ({ \ - uint64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \ - __ret; \ -}) -#else -#define vld1_u64_x2(__p0) __extension__ ({ \ - uint64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u16_x2(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ - __ret; \ -}) -#else -#define vld1_u16_x2(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s8_x2(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ - __ret; \ -}) -#else -#define vld1_s8_x2(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f64_x2(__p0) __extension__ ({ \ - float64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \ - __ret; \ -}) -#else -#define vld1_f64_x2(__p0) __extension__ ({ \ - float64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f32_x2(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ - __ret; \ -}) -#else -#define vld1_f32_x2(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f16_x2(__p0) __extension__ ({ \ - float16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ - __ret; \ -}) -#else -#define vld1_f16_x2(__p0) __extension__ ({ \ - float16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s32_x2(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ - __ret; \ -}) -#else -#define vld1_s32_x2(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s64_x2(__p0) __extension__ ({ \ - int64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \ - __ret; \ -}) -#else -#define vld1_s64_x2(__p0) __extension__ ({ \ - int64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s16_x2(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ - __ret; \ -}) -#else -#define vld1_s16_x2(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p8_x3(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ - __ret; \ -}) -#else -#define vld1_p8_x3(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p64_x3(__p0) __extension__ ({ \ - poly64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \ - __ret; \ -}) -#else -#define vld1_p64_x3(__p0) __extension__ ({ \ - poly64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p16_x3(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ - __ret; \ -}) -#else -#define vld1_p16_x3(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p8_x3(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ - __ret; \ -}) -#else -#define vld1q_p8_x3(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p64_x3(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ - __ret; \ -}) -#else -#define vld1q_p64_x3(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p16_x3(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ - __ret; \ -}) -#else -#define vld1q_p16_x3(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u8_x3(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ - __ret; \ -}) -#else -#define vld1q_u8_x3(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u32_x3(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ - __ret; \ -}) -#else -#define vld1q_u32_x3(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u64_x3(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ - __ret; \ -}) -#else -#define vld1q_u64_x3(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u16_x3(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ - __ret; \ -}) -#else -#define vld1q_u16_x3(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s8_x3(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ - __ret; \ -}) -#else -#define vld1q_s8_x3(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f64_x3(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ - __ret; \ -}) -#else -#define vld1q_f64_x3(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f32_x3(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ - __ret; \ -}) -#else -#define vld1q_f32_x3(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f16_x3(__p0) __extension__ ({ \ - float16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ - __ret; \ -}) -#else -#define vld1q_f16_x3(__p0) __extension__ ({ \ - float16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s32_x3(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ - __ret; \ -}) -#else -#define vld1q_s32_x3(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s64_x3(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ - __ret; \ -}) -#else -#define vld1q_s64_x3(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s16_x3(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ - __ret; \ -}) -#else -#define vld1q_s16_x3(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u8_x3(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ - __ret; \ -}) -#else -#define vld1_u8_x3(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u32_x3(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ - __ret; \ -}) -#else -#define vld1_u32_x3(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u64_x3(__p0) __extension__ ({ \ - uint64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \ - __ret; \ -}) -#else -#define vld1_u64_x3(__p0) __extension__ ({ \ - uint64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u16_x3(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ - __ret; \ -}) -#else -#define vld1_u16_x3(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s8_x3(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ - __ret; \ -}) -#else -#define vld1_s8_x3(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f64_x3(__p0) __extension__ ({ \ - float64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \ - __ret; \ -}) -#else -#define vld1_f64_x3(__p0) __extension__ ({ \ - float64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f32_x3(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ - __ret; \ -}) -#else -#define vld1_f32_x3(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f16_x3(__p0) __extension__ ({ \ - float16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ - __ret; \ -}) -#else -#define vld1_f16_x3(__p0) __extension__ ({ \ - float16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s32_x3(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ - __ret; \ -}) -#else -#define vld1_s32_x3(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s64_x3(__p0) __extension__ ({ \ - int64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \ - __ret; \ -}) -#else -#define vld1_s64_x3(__p0) __extension__ ({ \ - int64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s16_x3(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ - __ret; \ -}) -#else -#define vld1_s16_x3(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p8_x4(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ - __ret; \ -}) -#else -#define vld1_p8_x4(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p64_x4(__p0) __extension__ ({ \ - poly64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \ - __ret; \ -}) -#else -#define vld1_p64_x4(__p0) __extension__ ({ \ - poly64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_p16_x4(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ - __ret; \ -}) -#else -#define vld1_p16_x4(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p8_x4(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ - __ret; \ -}) -#else -#define vld1q_p8_x4(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p64_x4(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ - __ret; \ -}) -#else -#define vld1q_p64_x4(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_p16_x4(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ - __ret; \ -}) -#else -#define vld1q_p16_x4(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u8_x4(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ - __ret; \ -}) -#else -#define vld1q_u8_x4(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u32_x4(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ - __ret; \ -}) -#else -#define vld1q_u32_x4(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u64_x4(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ - __ret; \ -}) -#else -#define vld1q_u64_x4(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u16_x4(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ - __ret; \ -}) -#else -#define vld1q_u16_x4(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s8_x4(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ - __ret; \ -}) -#else -#define vld1q_s8_x4(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f64_x4(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ - __ret; \ -}) -#else -#define vld1q_f64_x4(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f32_x4(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ - __ret; \ -}) -#else -#define vld1q_f32_x4(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_f16_x4(__p0) __extension__ ({ \ - float16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ - __ret; \ -}) -#else -#define vld1q_f16_x4(__p0) __extension__ ({ \ - float16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s32_x4(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ - __ret; \ -}) -#else -#define vld1q_s32_x4(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s64_x4(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ - __ret; \ -}) -#else -#define vld1q_s64_x4(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s16_x4(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ - __ret; \ -}) -#else -#define vld1q_s16_x4(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u8_x4(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ - __ret; \ -}) -#else -#define vld1_u8_x4(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u32_x4(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ - __ret; \ -}) -#else -#define vld1_u32_x4(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u64_x4(__p0) __extension__ ({ \ - uint64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \ - __ret; \ -}) -#else -#define vld1_u64_x4(__p0) __extension__ ({ \ - uint64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_u16_x4(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ - __ret; \ -}) -#else -#define vld1_u16_x4(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s8_x4(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ - __ret; \ -}) -#else -#define vld1_s8_x4(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f64_x4(__p0) __extension__ ({ \ - float64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \ - __ret; \ -}) -#else -#define vld1_f64_x4(__p0) __extension__ ({ \ - float64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f32_x4(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ - __ret; \ -}) -#else -#define vld1_f32_x4(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_f16_x4(__p0) __extension__ ({ \ - float16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ - __ret; \ -}) -#else -#define vld1_f16_x4(__p0) __extension__ ({ \ - float16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s32_x4(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ - __ret; \ -}) -#else -#define vld1_s32_x4(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s64_x4(__p0) __extension__ ({ \ - int64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \ - __ret; \ -}) -#else -#define vld1_s64_x4(__p0) __extension__ ({ \ - int64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_s16_x4(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ - __ret; \ -}) -#else -#define vld1_s16_x4(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_p64(__p0) __extension__ ({ \ - poly64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 6); \ - __ret; \ -}) -#else -#define vld2_p64(__p0) __extension__ ({ \ - poly64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_p64(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 38); \ - __ret; \ -}) -#else -#define vld2q_p64(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_u64(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 51); \ - __ret; \ -}) -#else -#define vld2q_u64(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_f64(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 42); \ - __ret; \ -}) -#else -#define vld2q_f64(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_s64(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 35); \ - __ret; \ -}) -#else -#define vld2q_s64(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_f64(__p0) __extension__ ({ \ - float64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 10); \ - __ret; \ -}) -#else -#define vld2_f64(__p0) __extension__ ({ \ - float64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_p64(__p0) __extension__ ({ \ - poly64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \ - __ret; \ -}) -#else -#define vld2_dup_p64(__p0) __extension__ ({ \ - poly64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_p8(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ - __ret; \ -}) -#else -#define vld2q_dup_p8(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_p64(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ - __ret; \ -}) -#else -#define vld2q_dup_p64(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_p16(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ - __ret; \ -}) -#else -#define vld2q_dup_p16(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_u8(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ - __ret; \ -}) -#else -#define vld2q_dup_u8(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_u32(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ - __ret; \ -}) -#else -#define vld2q_dup_u32(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_u64(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ - __ret; \ -}) -#else -#define vld2q_dup_u64(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_u16(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ - __ret; \ -}) -#else -#define vld2q_dup_u16(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_s8(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ - __ret; \ -}) -#else -#define vld2q_dup_s8(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_f64(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ - __ret; \ -}) -#else -#define vld2q_dup_f64(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_f32(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ - __ret; \ -}) -#else -#define vld2q_dup_f32(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_f16(__p0) __extension__ ({ \ - float16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ - __ret; \ -}) -#else -#define vld2q_dup_f16(__p0) __extension__ ({ \ - float16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_s32(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ - __ret; \ -}) -#else -#define vld2q_dup_s32(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_s64(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ - __ret; \ -}) -#else -#define vld2q_dup_s64(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_s16(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ - __ret; \ -}) -#else -#define vld2q_dup_s16(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_dup_f64(__p0) __extension__ ({ \ - float64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \ - __ret; \ -}) -#else -#define vld2_dup_f64(__p0) __extension__ ({ \ - float64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x2_t __s1 = __p1; \ - poly64x1x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ - __ret; \ -}) -#else -#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x2_t __s1 = __p1; \ - poly64x1x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - poly8x16x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ - __ret; \ -}) -#else -#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - poly8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - poly64x2x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ - __ret; \ -}) -#else -#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - poly64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - poly64x2x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - uint8x16x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ - __ret; \ -}) -#else -#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - uint8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - uint64x2x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ - __ret; \ -}) -#else -#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - uint64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - uint64x2x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - int8x16x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ - __ret; \ -}) -#else -#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - int8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - float64x2x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 42); \ - __ret; \ -}) -#else -#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - float64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - float64x2x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - int64x2x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 35); \ - __ret; \ -}) -#else -#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - int64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - int64x2x2_t __ret; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x2_t __s1 = __p1; \ - uint64x1x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ - __ret; \ -}) -#else -#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x2_t __s1 = __p1; \ - uint64x1x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x2_t __s1 = __p1; \ - float64x1x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \ - __ret; \ -}) -#else -#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x2_t __s1 = __p1; \ - float64x1x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x2_t __s1 = __p1; \ - int64x1x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \ - __ret; \ -}) -#else -#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x2_t __s1 = __p1; \ - int64x1x2_t __ret; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_p64(__p0) __extension__ ({ \ - poly64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 6); \ - __ret; \ -}) -#else -#define vld3_p64(__p0) __extension__ ({ \ - poly64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_p64(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 38); \ - __ret; \ -}) -#else -#define vld3q_p64(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_u64(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 51); \ - __ret; \ -}) -#else -#define vld3q_u64(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_f64(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 42); \ - __ret; \ -}) -#else -#define vld3q_f64(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_s64(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 35); \ - __ret; \ -}) -#else -#define vld3q_s64(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_f64(__p0) __extension__ ({ \ - float64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 10); \ - __ret; \ -}) -#else -#define vld3_f64(__p0) __extension__ ({ \ - float64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_p64(__p0) __extension__ ({ \ - poly64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \ - __ret; \ -}) -#else -#define vld3_dup_p64(__p0) __extension__ ({ \ - poly64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_p8(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ - __ret; \ -}) -#else -#define vld3q_dup_p8(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_p64(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ - __ret; \ -}) -#else -#define vld3q_dup_p64(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_p16(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ - __ret; \ -}) -#else -#define vld3q_dup_p16(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_u8(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ - __ret; \ -}) -#else -#define vld3q_dup_u8(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_u32(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ - __ret; \ -}) -#else -#define vld3q_dup_u32(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_u64(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ - __ret; \ -}) -#else -#define vld3q_dup_u64(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_u16(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ - __ret; \ -}) -#else -#define vld3q_dup_u16(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_s8(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ - __ret; \ -}) -#else -#define vld3q_dup_s8(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_f64(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ - __ret; \ -}) -#else -#define vld3q_dup_f64(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_f32(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ - __ret; \ -}) -#else -#define vld3q_dup_f32(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_f16(__p0) __extension__ ({ \ - float16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ - __ret; \ -}) -#else -#define vld3q_dup_f16(__p0) __extension__ ({ \ - float16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_s32(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ - __ret; \ -}) -#else -#define vld3q_dup_s32(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_s64(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ - __ret; \ -}) -#else -#define vld3q_dup_s64(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_s16(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ - __ret; \ -}) -#else -#define vld3q_dup_s16(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_dup_f64(__p0) __extension__ ({ \ - float64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \ - __ret; \ -}) -#else -#define vld3_dup_f64(__p0) __extension__ ({ \ - float64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x3_t __s1 = __p1; \ - poly64x1x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ - __ret; \ -}) -#else -#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x3_t __s1 = __p1; \ - poly64x1x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - poly8x16x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ - __ret; \ -}) -#else -#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - poly8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - poly64x2x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ - __ret; \ -}) -#else -#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - poly64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - poly64x2x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - uint8x16x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ - __ret; \ -}) -#else -#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - uint8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - uint64x2x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ - __ret; \ -}) -#else -#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - uint64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - uint64x2x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - int8x16x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ - __ret; \ -}) -#else -#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - int8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - float64x2x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \ - __ret; \ -}) -#else -#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - float64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - float64x2x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - int64x2x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \ - __ret; \ -}) -#else -#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - int64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - int64x2x3_t __ret; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x3_t __s1 = __p1; \ - uint64x1x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ - __ret; \ -}) -#else -#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x3_t __s1 = __p1; \ - uint64x1x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x3_t __s1 = __p1; \ - float64x1x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \ - __ret; \ -}) -#else -#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x3_t __s1 = __p1; \ - float64x1x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x3_t __s1 = __p1; \ - int64x1x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \ - __ret; \ -}) -#else -#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x3_t __s1 = __p1; \ - int64x1x3_t __ret; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_p64(__p0) __extension__ ({ \ - poly64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 6); \ - __ret; \ -}) -#else -#define vld4_p64(__p0) __extension__ ({ \ - poly64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_p64(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 38); \ - __ret; \ -}) -#else -#define vld4q_p64(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_u64(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 51); \ - __ret; \ -}) -#else -#define vld4q_u64(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_f64(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 42); \ - __ret; \ -}) -#else -#define vld4q_f64(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_s64(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 35); \ - __ret; \ -}) -#else -#define vld4q_s64(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_f64(__p0) __extension__ ({ \ - float64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 10); \ - __ret; \ -}) -#else -#define vld4_f64(__p0) __extension__ ({ \ - float64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_p64(__p0) __extension__ ({ \ - poly64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \ - __ret; \ -}) -#else -#define vld4_dup_p64(__p0) __extension__ ({ \ - poly64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_p8(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ - __ret; \ -}) -#else -#define vld4q_dup_p8(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_p64(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ - __ret; \ -}) -#else -#define vld4q_dup_p64(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_p16(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ - __ret; \ -}) -#else -#define vld4q_dup_p16(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_u8(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ - __ret; \ -}) -#else -#define vld4q_dup_u8(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_u32(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ - __ret; \ -}) -#else -#define vld4q_dup_u32(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_u64(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ - __ret; \ -}) -#else -#define vld4q_dup_u64(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_u16(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ - __ret; \ -}) -#else -#define vld4q_dup_u16(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_s8(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ - __ret; \ -}) -#else -#define vld4q_dup_s8(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_f64(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ - __ret; \ -}) -#else -#define vld4q_dup_f64(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_f32(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ - __ret; \ -}) -#else -#define vld4q_dup_f32(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_f16(__p0) __extension__ ({ \ - float16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ - __ret; \ -}) -#else -#define vld4q_dup_f16(__p0) __extension__ ({ \ - float16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_s32(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ - __ret; \ -}) -#else -#define vld4q_dup_s32(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_s64(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ - __ret; \ -}) -#else -#define vld4q_dup_s64(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_s16(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ - __ret; \ -}) -#else -#define vld4q_dup_s16(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_dup_f64(__p0) __extension__ ({ \ - float64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \ - __ret; \ -}) -#else -#define vld4_dup_f64(__p0) __extension__ ({ \ - float64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x4_t __s1 = __p1; \ - poly64x1x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ - __ret; \ -}) -#else -#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x4_t __s1 = __p1; \ - poly64x1x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - poly8x16x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ - __ret; \ -}) -#else -#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - poly8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - poly64x2x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ - __ret; \ -}) -#else -#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - poly64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - poly64x2x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - uint8x16x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ - __ret; \ -}) -#else -#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - uint8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - uint64x2x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ - __ret; \ -}) -#else -#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - uint64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - uint64x2x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - int8x16x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ - __ret; \ -}) -#else -#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - int8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - float64x2x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \ - __ret; \ -}) -#else -#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - float64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - float64x2x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - int64x2x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \ - __ret; \ -}) -#else -#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - int64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - int64x2x4_t __ret; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x4_t __s1 = __p1; \ - uint64x1x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ - __ret; \ -}) -#else -#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x4_t __s1 = __p1; \ - uint64x1x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x4_t __s1 = __p1; \ - float64x1x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \ - __ret; \ -}) -#else -#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x4_t __s1 = __p1; \ - float64x1x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x4_t __s1 = __p1; \ - int64x1x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \ - __ret; \ -}) -#else -#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x4_t __s1 = __p1; \ - int64x1x4_t __ret; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vldrq_p128(__p0) __extension__ ({ \ - poly128_t __ret; \ - __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \ - __ret; \ -}) -#else -#define vldrq_p128(__p0) __extension__ ({ \ - poly128_t __ret; \ - __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#else -__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__p0); - return __ret; -} -#else -__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64_t __ret; - __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__p0); - return __ret; -} -#else -__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxnmv_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__p0); - return __ret; -} -#else -__ai float32_t vmaxnmv_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__p0); - return __ret; -} -#else -__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__p0); - return __ret; -} -#else -__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__p0); - return __ret; -} -#else -__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vmaxvq_s8(int8x16_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__p0); - return __ret; -} -#else -__ai int8_t vmaxvq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8_t __ret; - __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vmaxvq_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__p0); - return __ret; -} -#else -__ai float64_t vmaxvq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64_t __ret; - __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxvq_f32(float32x4_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__p0); - return __ret; -} -#else -__ai float32_t vmaxvq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vmaxvq_s32(int32x4_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__p0); - return __ret; -} -#else -__ai int32_t vmaxvq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32_t __ret; - __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vmaxvq_s16(int16x8_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__p0); - return __ret; -} -#else -__ai int16_t vmaxvq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16_t __ret; - __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vmaxv_u8(uint8x8_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__p0); - return __ret; -} -#else -__ai uint8_t vmaxv_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vmaxv_u32(uint32x2_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__p0); - return __ret; -} -#else -__ai uint32_t vmaxv_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vmaxv_u16(uint16x4_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__p0); - return __ret; -} -#else -__ai uint16_t vmaxv_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vmaxv_s8(int8x8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__p0); - return __ret; -} -#else -__ai int8_t vmaxv_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8_t __ret; - __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxv_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__p0); - return __ret; -} -#else -__ai float32_t vmaxv_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vmaxv_s32(int32x2_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__p0); - return __ret; -} -#else -__ai int32_t vmaxv_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32_t __ret; - __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vmaxv_s16(int16x4_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__p0); - return __ret; -} -#else -__ai int16_t vmaxv_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16_t __ret; - __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#else -__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vminnmvq_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__p0); - return __ret; -} -#else -__ai float64_t vminnmvq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64_t __ret; - __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vminnmvq_f32(float32x4_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__p0); - return __ret; -} -#else -__ai float32_t vminnmvq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vminnmv_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__p0); - return __ret; -} -#else -__ai float32_t vminnmv_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vminvq_u8(uint8x16_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__p0); - return __ret; -} -#else -__ai uint8_t vminvq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vminvq_u32(uint32x4_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__p0); - return __ret; -} -#else -__ai uint32_t vminvq_u32(uint32x4_t __p0) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vminvq_u16(uint16x8_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__p0); - return __ret; -} -#else -__ai uint16_t vminvq_u16(uint16x8_t __p0) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vminvq_s8(int8x16_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__p0); - return __ret; -} -#else -__ai int8_t vminvq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8_t __ret; - __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vminvq_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__p0); - return __ret; -} -#else -__ai float64_t vminvq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64_t __ret; - __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vminvq_f32(float32x4_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__p0); - return __ret; -} -#else -__ai float32_t vminvq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vminvq_s32(int32x4_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__p0); - return __ret; -} -#else -__ai int32_t vminvq_s32(int32x4_t __p0) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32_t __ret; - __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vminvq_s16(int16x8_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__p0); - return __ret; -} -#else -__ai int16_t vminvq_s16(int16x8_t __p0) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16_t __ret; - __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vminv_u8(uint8x8_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__p0); - return __ret; -} -#else -__ai uint8_t vminv_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vminv_u32(uint32x2_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__p0); - return __ret; -} -#else -__ai uint32_t vminv_u32(uint32x2_t __p0) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vminv_u16(uint16x4_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__p0); - return __ret; -} -#else -__ai uint16_t vminv_u16(uint16x4_t __p0) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vminv_s8(int8x8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__p0); - return __ret; -} -#else -__ai int8_t vminv_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8_t __ret; - __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vminv_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__p0); - return __ret; -} -#else -__ai float32_t vminv_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vminv_s32(int32x2_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__p0); - return __ret; -} -#else -__ai int32_t vminv_s32(int32x2_t __p0) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32_t __ret; - __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vminv_s16(int16x4_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__p0); - return __ret; -} -#else -__ai int16_t vminv_s16(int16x4_t __p0) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16_t __ret; - __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - float64x2_t __ret; - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint16x8_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x4_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - float32x4_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x2_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint32x2_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint16x4_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x2_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - float32x2_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x2_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x2_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x4_t __ret; \ - __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { - float64x2_t __ret; - __ret = __p0 + __p1 * (float64x2_t) {__p2, __p2}; - return __ret; -} -#else -__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __rev0 + __rev1 * (float64x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint64x2_t __ret; \ - __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - uint64x2_t __ret; \ - __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int64x2_t __ret; \ - __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint64x2_t __ret; \ - __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint64x2_t __ret; \ - __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint64x2_t __ret; \ - __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint64x2_t __ret; \ - __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - float64x2_t __ret; - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint16x8_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x4_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - float32x4_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x2_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint32x2_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint16x4_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x2_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - float32x2_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x2_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \ - __ret; \ -}) -#else -#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x2_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x4_t __ret; \ - __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \ - __ret; \ -}) -#else -#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { - float64x2_t __ret; - __ret = __p0 - __p1 * (float64x2_t) {__p2, __p2}; - return __ret; -} -#else -__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __rev0 - __rev1 * (float64x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint64x2_t __ret; \ - __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - uint64x2_t __ret; \ - __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int64x2_t __ret; \ - __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint64x2_t __ret; \ - __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint64x2_t __ret; \ - __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint64x2_t __ret; \ - __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x4_t __s2 = __p2; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint64x2_t __ret; \ - __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x8_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vmov_n_p64(poly64_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t) {__p0}; - return __ret; -} -#else -__ai poly64x1_t vmov_n_p64(poly64_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t) {__p0}; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmovq_n_f64(float64_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) {__p0, __p0}; - return __ret; -} -#else -__ai float64x2_t vmovq_n_f64(float64_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vmov_n_f64(float64_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) {__p0}; - return __ret; -} -#else -__ai float64x1_t vmov_n_f64(float64_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) {__p0}; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_128) { - uint16x8_t __ret_128; - uint8x8_t __a1_128 = vget_high_u8(__p0_128); - __ret_128 = (uint16x8_t)(vshll_n_u8(__a1_128, 0)); - return __ret_128; -} -#else -__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_129) { - uint8x16_t __rev0_129; __rev0_129 = __builtin_shufflevector(__p0_129, __p0_129, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret_129; - uint8x8_t __a1_129 = __noswap_vget_high_u8(__rev0_129); - __ret_129 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_129, 0)); - __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret_129; -} -__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_130) { - uint16x8_t __ret_130; - uint8x8_t __a1_130 = __noswap_vget_high_u8(__p0_130); - __ret_130 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_130, 0)); - return __ret_130; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_131) { - uint64x2_t __ret_131; - uint32x2_t __a1_131 = vget_high_u32(__p0_131); - __ret_131 = (uint64x2_t)(vshll_n_u32(__a1_131, 0)); - return __ret_131; -} -#else -__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_132) { - uint32x4_t __rev0_132; __rev0_132 = __builtin_shufflevector(__p0_132, __p0_132, 3, 2, 1, 0); - uint64x2_t __ret_132; - uint32x2_t __a1_132 = __noswap_vget_high_u32(__rev0_132); - __ret_132 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_132, 0)); - __ret_132 = __builtin_shufflevector(__ret_132, __ret_132, 1, 0); - return __ret_132; -} -__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_133) { - uint64x2_t __ret_133; - uint32x2_t __a1_133 = __noswap_vget_high_u32(__p0_133); - __ret_133 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_133, 0)); - return __ret_133; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_134) { - uint32x4_t __ret_134; - uint16x4_t __a1_134 = vget_high_u16(__p0_134); - __ret_134 = (uint32x4_t)(vshll_n_u16(__a1_134, 0)); - return __ret_134; -} -#else -__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_135) { - uint16x8_t __rev0_135; __rev0_135 = __builtin_shufflevector(__p0_135, __p0_135, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret_135; - uint16x4_t __a1_135 = __noswap_vget_high_u16(__rev0_135); - __ret_135 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_135, 0)); - __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); - return __ret_135; -} -__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_136) { - uint32x4_t __ret_136; - uint16x4_t __a1_136 = __noswap_vget_high_u16(__p0_136); - __ret_136 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_136, 0)); - return __ret_136; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovl_high_s8(int8x16_t __p0_137) { - int16x8_t __ret_137; - int8x8_t __a1_137 = vget_high_s8(__p0_137); - __ret_137 = (int16x8_t)(vshll_n_s8(__a1_137, 0)); - return __ret_137; -} -#else -__ai int16x8_t vmovl_high_s8(int8x16_t __p0_138) { - int8x16_t __rev0_138; __rev0_138 = __builtin_shufflevector(__p0_138, __p0_138, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret_138; - int8x8_t __a1_138 = __noswap_vget_high_s8(__rev0_138); - __ret_138 = (int16x8_t)(__noswap_vshll_n_s8(__a1_138, 0)); - __ret_138 = __builtin_shufflevector(__ret_138, __ret_138, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret_138; -} -__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_139) { - int16x8_t __ret_139; - int8x8_t __a1_139 = __noswap_vget_high_s8(__p0_139); - __ret_139 = (int16x8_t)(__noswap_vshll_n_s8(__a1_139, 0)); - return __ret_139; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmovl_high_s32(int32x4_t __p0_140) { - int64x2_t __ret_140; - int32x2_t __a1_140 = vget_high_s32(__p0_140); - __ret_140 = (int64x2_t)(vshll_n_s32(__a1_140, 0)); - return __ret_140; -} -#else -__ai int64x2_t vmovl_high_s32(int32x4_t __p0_141) { - int32x4_t __rev0_141; __rev0_141 = __builtin_shufflevector(__p0_141, __p0_141, 3, 2, 1, 0); - int64x2_t __ret_141; - int32x2_t __a1_141 = __noswap_vget_high_s32(__rev0_141); - __ret_141 = (int64x2_t)(__noswap_vshll_n_s32(__a1_141, 0)); - __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 1, 0); - return __ret_141; -} -__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_142) { - int64x2_t __ret_142; - int32x2_t __a1_142 = __noswap_vget_high_s32(__p0_142); - __ret_142 = (int64x2_t)(__noswap_vshll_n_s32(__a1_142, 0)); - return __ret_142; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovl_high_s16(int16x8_t __p0_143) { - int32x4_t __ret_143; - int16x4_t __a1_143 = vget_high_s16(__p0_143); - __ret_143 = (int32x4_t)(vshll_n_s16(__a1_143, 0)); - return __ret_143; -} -#else -__ai int32x4_t vmovl_high_s16(int16x8_t __p0_144) { - int16x8_t __rev0_144; __rev0_144 = __builtin_shufflevector(__p0_144, __p0_144, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret_144; - int16x4_t __a1_144 = __noswap_vget_high_s16(__rev0_144); - __ret_144 = (int32x4_t)(__noswap_vshll_n_s16(__a1_144, 0)); - __ret_144 = __builtin_shufflevector(__ret_144, __ret_144, 3, 2, 1, 0); - return __ret_144; -} -__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_145) { - int32x4_t __ret_145; - int16x4_t __a1_145 = __noswap_vget_high_s16(__p0_145); - __ret_145 = (int32x4_t)(__noswap_vshll_n_s16(__a1_145, 0)); - return __ret_145; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { - uint16x8_t __ret; - __ret = vcombine_u16(__p0, vmovn_u32(__p1)); - return __ret; -} -#else -__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vmovn_u64(__p1)); - return __ret; -} -#else -__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vmovn_u16(__p1)); - return __ret; -} -#else -__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vmovn_s32(__p1)); - return __ret; -} -#else -__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { - int32x4_t __ret; - __ret = vcombine_s32(__p0, vmovn_s64(__p1)); - return __ret; -} -#else -__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x4_t __ret; - __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vmovn_s16(__p1)); - return __ret; -} -#else -__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmuld_lane_f64(__p0_146, __p1_146, __p2_146) __extension__ ({ \ - float64_t __s0_146 = __p0_146; \ - float64x1_t __s1_146 = __p1_146; \ - float64_t __ret_146; \ - __ret_146 = __s0_146 * vget_lane_f64(__s1_146, __p2_146); \ - __ret_146; \ -}) -#else -#define vmuld_lane_f64(__p0_147, __p1_147, __p2_147) __extension__ ({ \ - float64_t __s0_147 = __p0_147; \ - float64x1_t __s1_147 = __p1_147; \ - float64_t __ret_147; \ - __ret_147 = __s0_147 * __noswap_vget_lane_f64(__s1_147, __p2_147); \ - __ret_147; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmuls_lane_f32(__p0_148, __p1_148, __p2_148) __extension__ ({ \ - float32_t __s0_148 = __p0_148; \ - float32x2_t __s1_148 = __p1_148; \ - float32_t __ret_148; \ - __ret_148 = __s0_148 * vget_lane_f32(__s1_148, __p2_148); \ - __ret_148; \ -}) -#else -#define vmuls_lane_f32(__p0_149, __p1_149, __p2_149) __extension__ ({ \ - float32_t __s0_149 = __p0_149; \ - float32x2_t __s1_149 = __p1_149; \ - float32x2_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 1, 0); \ - float32_t __ret_149; \ - __ret_149 = __s0_149 * __noswap_vget_lane_f32(__rev1_149, __p2_149); \ - __ret_149; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ - __ret; \ -}) -#else -#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x2_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmuld_laneq_f64(__p0_150, __p1_150, __p2_150) __extension__ ({ \ - float64_t __s0_150 = __p0_150; \ - float64x2_t __s1_150 = __p1_150; \ - float64_t __ret_150; \ - __ret_150 = __s0_150 * vgetq_lane_f64(__s1_150, __p2_150); \ - __ret_150; \ -}) -#else -#define vmuld_laneq_f64(__p0_151, __p1_151, __p2_151) __extension__ ({ \ - float64_t __s0_151 = __p0_151; \ - float64x2_t __s1_151 = __p1_151; \ - float64x2_t __rev1_151; __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 1, 0); \ - float64_t __ret_151; \ - __ret_151 = __s0_151 * __noswap_vgetq_lane_f64(__rev1_151, __p2_151); \ - __ret_151; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmuls_laneq_f32(__p0_152, __p1_152, __p2_152) __extension__ ({ \ - float32_t __s0_152 = __p0_152; \ - float32x4_t __s1_152 = __p1_152; \ - float32_t __ret_152; \ - __ret_152 = __s0_152 * vgetq_lane_f32(__s1_152, __p2_152); \ - __ret_152; \ -}) -#else -#define vmuls_laneq_f32(__p0_153, __p1_153, __p2_153) __extension__ ({ \ - float32_t __s0_153 = __p0_153; \ - float32x4_t __s1_153 = __p1_153; \ - float32x4_t __rev1_153; __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 3, 2, 1, 0); \ - float32_t __ret_153; \ - __ret_153 = __s0_153 * __noswap_vgetq_lane_f32(__rev1_153, __p2_153); \ - __ret_153; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \ - __ret; \ -}) -#else -#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float64x2_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x2_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x2_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x2_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x2_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \ - __ret; \ -}) -#else -#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1); - return __ret; -} -#else -__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { - float64x2_t __ret; - __ret = __p0 * (float64x2_t) {__p1, __p1}; - return __ret; -} -#else -__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = __rev0 * (float64x2_t) {__p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { - poly128_t __ret; - __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); - return __ret; -} -#else -__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { - poly128_t __ret; - __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); - return __ret; -} -__ai poly128_t __noswap_vmull_p64(poly64_t __p0, poly64_t __p1) { - poly128_t __ret; - __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly16x8_t __ret; - __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1)); - return __ret; -} -#else -__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1)); - return __ret; -} -#else -__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1)); - return __ret; -} -#else -__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1)); - return __ret; -} -#else -__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { - int16x8_t __ret; - __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1)); - return __ret; -} -#else -__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); - return __ret; -} -#else -__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); - return __ret; -} -#else -__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly128_t __ret; - __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); - return __ret; -} -#else -__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - poly128_t __ret; - __ret = __noswap_vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint64x2_t __ret; \ - __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint64x2_t __ret; \ - __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { - uint64x2_t __ret; - __ret = vmull_n_u32(vget_high_u32(__p0), __p1); - return __ret; -} -#else -__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { - uint32x4_t __ret; - __ret = vmull_n_u16(vget_high_u16(__p0), __p1); - return __ret; -} -#else -__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = vmull_n_s32(vget_high_s32(__p0), __p1); - return __ret; -} -#else -__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { - int32x4_t __ret; - __ret = vmull_n_s16(vget_high_s16(__p0), __p1); - return __ret; -} -#else -__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint64x2_t __ret; \ - __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint64x2_t __ret; \ - __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint32x4_t __ret; \ - __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#else -__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); - return __ret; -} -#else -__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); - return __ret; -} -__ai float64_t __noswap_vmulxd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); - return __ret; -} -#else -__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); - return __ret; -} -__ai float32_t __noswap_vmulxs_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxd_lane_f64(__p0_154, __p1_154, __p2_154) __extension__ ({ \ - float64_t __s0_154 = __p0_154; \ - float64x1_t __s1_154 = __p1_154; \ - float64_t __ret_154; \ - __ret_154 = vmulxd_f64(__s0_154, vget_lane_f64(__s1_154, __p2_154)); \ - __ret_154; \ -}) -#else -#define vmulxd_lane_f64(__p0_155, __p1_155, __p2_155) __extension__ ({ \ - float64_t __s0_155 = __p0_155; \ - float64x1_t __s1_155 = __p1_155; \ - float64_t __ret_155; \ - __ret_155 = __noswap_vmulxd_f64(__s0_155, __noswap_vget_lane_f64(__s1_155, __p2_155)); \ - __ret_155; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxs_lane_f32(__p0_156, __p1_156, __p2_156) __extension__ ({ \ - float32_t __s0_156 = __p0_156; \ - float32x2_t __s1_156 = __p1_156; \ - float32_t __ret_156; \ - __ret_156 = vmulxs_f32(__s0_156, vget_lane_f32(__s1_156, __p2_156)); \ - __ret_156; \ -}) -#else -#define vmulxs_lane_f32(__p0_157, __p1_157, __p2_157) __extension__ ({ \ - float32_t __s0_157 = __p0_157; \ - float32x2_t __s1_157 = __p1_157; \ - float32x2_t __rev1_157; __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 1, 0); \ - float32_t __ret_157; \ - __ret_157 = __noswap_vmulxs_f32(__s0_157, __noswap_vget_lane_f32(__rev1_157, __p2_157)); \ - __ret_157; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x2_t __ret; \ - __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __ret; \ - __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __ret; \ - __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x4_t __ret; \ - __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __ret; \ - __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x2_t __ret; \ - __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxd_laneq_f64(__p0_158, __p1_158, __p2_158) __extension__ ({ \ - float64_t __s0_158 = __p0_158; \ - float64x2_t __s1_158 = __p1_158; \ - float64_t __ret_158; \ - __ret_158 = vmulxd_f64(__s0_158, vgetq_lane_f64(__s1_158, __p2_158)); \ - __ret_158; \ -}) -#else -#define vmulxd_laneq_f64(__p0_159, __p1_159, __p2_159) __extension__ ({ \ - float64_t __s0_159 = __p0_159; \ - float64x2_t __s1_159 = __p1_159; \ - float64x2_t __rev1_159; __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 1, 0); \ - float64_t __ret_159; \ - __ret_159 = __noswap_vmulxd_f64(__s0_159, __noswap_vgetq_lane_f64(__rev1_159, __p2_159)); \ - __ret_159; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxs_laneq_f32(__p0_160, __p1_160, __p2_160) __extension__ ({ \ - float32_t __s0_160 = __p0_160; \ - float32x4_t __s1_160 = __p1_160; \ - float32_t __ret_160; \ - __ret_160 = vmulxs_f32(__s0_160, vgetq_lane_f32(__s1_160, __p2_160)); \ - __ret_160; \ -}) -#else -#define vmulxs_laneq_f32(__p0_161, __p1_161, __p2_161) __extension__ ({ \ - float32_t __s0_161 = __p0_161; \ - float32x4_t __s1_161 = __p1_161; \ - float32x4_t __rev1_161; __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 3, 2, 1, 0); \ - float32_t __ret_161; \ - __ret_161 = __noswap_vmulxs_f32(__s0_161, __noswap_vgetq_lane_f32(__rev1_161, __p2_161)); \ - __ret_161; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __ret; \ - __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float64x2_t __ret; \ - __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __ret; \ - __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x4_t __ret; \ - __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __ret; \ - __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x2_t __ret; \ - __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vnegq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai float64x2_t vnegq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vnegq_s64(int64x2_t __p0) { - int64x2_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai int64x2_t vnegq_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vneg_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai float64x1_t vneg_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = -__p0; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vneg_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = -__p0; - return __ret; -} -#else -__ai int64x1_t vneg_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = -__p0; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vnegd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vnegd_s64(__p0); - return __ret; -} -#else -__ai int64_t vnegd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vnegd_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vpaddd_u64(uint64x2_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__p0); - return __ret; -} -#else -__ai uint64_t vpaddd_u64(uint64x2_t __p0) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vpaddd_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__p0); - return __ret; -} -#else -__ai float64_t vpaddd_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vpaddd_s64(int64x2_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__p0); - return __ret; -} -#else -__ai int64_t vpaddd_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64_t __ret; - __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vpadds_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__p0); - return __ret; -} -#else -__ai float32_t vpadds_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vpmaxqd_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__p0); - return __ret; -} -#else -__ai float64_t vpmaxqd_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vpmaxs_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__p0); - return __ret; -} -#else -__ai float32_t vpmaxs_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__p0); - return __ret; -} -#else -__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vpmaxnms_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__p0); - return __ret; -} -#else -__ai float32_t vpmaxnms_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vpminqd_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__p0); - return __ret; -} -#else -__ai float64_t vpminqd_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vpmins_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__p0); - return __ret; -} -#else -__ai float32_t vpmins_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vpminnmqd_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__p0); - return __ret; -} -#else -__ai float64_t vpminnmqd_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vpminnms_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__p0); - return __ret; -} -#else -__ai float32_t vpminnms_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqabsq_s64(int64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vqabsq_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vqabs_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3); - return __ret; -} -#else -__ai int64x1_t vqabs_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vqabsb_s8(int8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0); - return __ret; -} -#else -__ai int8_t vqabsb_s8(int8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqabss_s32(int32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqabss_s32(__p0); - return __ret; -} -#else -__ai int32_t vqabss_s32(int32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqabss_s32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vqabsd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0); - return __ret; -} -#else -__ai int64_t vqabsd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqabsh_s16(int16_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0); - return __ret; -} -#else -__ai int16_t vqabsh_s16(int16_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1); - return __ret; -} -#else -__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1); - return __ret; -} -#else -__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1); - return __ret; -} -#else -__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); - return __ret; -} -#else -__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); - return __ret; -} -__ai int32_t __noswap_vqadds_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); - return __ret; -} -#else -__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); - return __ret; -} -__ai int16_t __noswap_vqaddh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2); - return __ret; -} -#else -__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2); - return __ret; -} -#else -__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __ret; - __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); - return __ret; -} -#else -__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __ret; - __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); - return __ret; -} -#else -__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2); - return __ret; -} -#else -__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2); - return __ret; -} -#else -__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2); - return __ret; -} -#else -__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2); - return __ret; -} -#else -__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __ret; - __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); - return __ret; -} -#else -__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __ret; - __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); - return __ret; -} -#else -__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); - return __ret; -} -#else -__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); - return __ret; -} -#else -__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \ - __ret; \ -}) -#else -#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); - return __ret; -} -#else -__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); - return __ret; -} -__ai int32_t __noswap_vqdmulhs_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); - return __ret; -} -#else -__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); - return __ret; -} -__ai int16_t __noswap_vqdmulhh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhs_lane_s32(__p0_162, __p1_162, __p2_162) __extension__ ({ \ - int32_t __s0_162 = __p0_162; \ - int32x2_t __s1_162 = __p1_162; \ - int32_t __ret_162; \ - __ret_162 = vqdmulhs_s32(__s0_162, vget_lane_s32(__s1_162, __p2_162)); \ - __ret_162; \ -}) -#else -#define vqdmulhs_lane_s32(__p0_163, __p1_163, __p2_163) __extension__ ({ \ - int32_t __s0_163 = __p0_163; \ - int32x2_t __s1_163 = __p1_163; \ - int32x2_t __rev1_163; __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 1, 0); \ - int32_t __ret_163; \ - __ret_163 = __noswap_vqdmulhs_s32(__s0_163, __noswap_vget_lane_s32(__rev1_163, __p2_163)); \ - __ret_163; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhh_lane_s16(__p0_164, __p1_164, __p2_164) __extension__ ({ \ - int16_t __s0_164 = __p0_164; \ - int16x4_t __s1_164 = __p1_164; \ - int16_t __ret_164; \ - __ret_164 = vqdmulhh_s16(__s0_164, vget_lane_s16(__s1_164, __p2_164)); \ - __ret_164; \ -}) -#else -#define vqdmulhh_lane_s16(__p0_165, __p1_165, __p2_165) __extension__ ({ \ - int16_t __s0_165 = __p0_165; \ - int16x4_t __s1_165 = __p1_165; \ - int16x4_t __rev1_165; __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 3, 2, 1, 0); \ - int16_t __ret_165; \ - __ret_165 = __noswap_vqdmulhh_s16(__s0_165, __noswap_vget_lane_s16(__rev1_165, __p2_165)); \ - __ret_165; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhs_laneq_s32(__p0_166, __p1_166, __p2_166) __extension__ ({ \ - int32_t __s0_166 = __p0_166; \ - int32x4_t __s1_166 = __p1_166; \ - int32_t __ret_166; \ - __ret_166 = vqdmulhs_s32(__s0_166, vgetq_lane_s32(__s1_166, __p2_166)); \ - __ret_166; \ -}) -#else -#define vqdmulhs_laneq_s32(__p0_167, __p1_167, __p2_167) __extension__ ({ \ - int32_t __s0_167 = __p0_167; \ - int32x4_t __s1_167 = __p1_167; \ - int32x4_t __rev1_167; __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 3, 2, 1, 0); \ - int32_t __ret_167; \ - __ret_167 = __noswap_vqdmulhs_s32(__s0_167, __noswap_vgetq_lane_s32(__rev1_167, __p2_167)); \ - __ret_167; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhh_laneq_s16(__p0_168, __p1_168, __p2_168) __extension__ ({ \ - int16_t __s0_168 = __p0_168; \ - int16x8_t __s1_168 = __p1_168; \ - int16_t __ret_168; \ - __ret_168 = vqdmulhh_s16(__s0_168, vgetq_lane_s16(__s1_168, __p2_168)); \ - __ret_168; \ -}) -#else -#define vqdmulhh_laneq_s16(__p0_169, __p1_169, __p2_169) __extension__ ({ \ - int16_t __s0_169 = __p0_169; \ - int16x8_t __s1_169 = __p1_169; \ - int16x8_t __rev1_169; __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16_t __ret_169; \ - __ret_169 = __noswap_vqdmulhh_s16(__s0_169, __noswap_vgetq_lane_s16(__rev1_169, __p2_169)); \ - __ret_169; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x2_t __ret; \ - __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); - return __ret; -} -#else -__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); - return __ret; -} -__ai int64_t __noswap_vqdmulls_s32(int32_t __p0, int32_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); - return __ret; -} -#else -__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); - return __ret; -} -__ai int32_t __noswap_vqdmullh_s16(int16_t __p0, int16_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); - return __ret; -} -#else -__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); - return __ret; -} -#else -__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1); - return __ret; -} -#else -__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { - int32x4_t __ret; - __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1); - return __ret; -} -#else -__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulls_lane_s32(__p0_170, __p1_170, __p2_170) __extension__ ({ \ - int32_t __s0_170 = __p0_170; \ - int32x2_t __s1_170 = __p1_170; \ - int64_t __ret_170; \ - __ret_170 = vqdmulls_s32(__s0_170, vget_lane_s32(__s1_170, __p2_170)); \ - __ret_170; \ -}) -#else -#define vqdmulls_lane_s32(__p0_171, __p1_171, __p2_171) __extension__ ({ \ - int32_t __s0_171 = __p0_171; \ - int32x2_t __s1_171 = __p1_171; \ - int32x2_t __rev1_171; __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 1, 0); \ - int64_t __ret_171; \ - __ret_171 = __noswap_vqdmulls_s32(__s0_171, __noswap_vget_lane_s32(__rev1_171, __p2_171)); \ - __ret_171; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmullh_lane_s16(__p0_172, __p1_172, __p2_172) __extension__ ({ \ - int16_t __s0_172 = __p0_172; \ - int16x4_t __s1_172 = __p1_172; \ - int32_t __ret_172; \ - __ret_172 = vqdmullh_s16(__s0_172, vget_lane_s16(__s1_172, __p2_172)); \ - __ret_172; \ -}) -#else -#define vqdmullh_lane_s16(__p0_173, __p1_173, __p2_173) __extension__ ({ \ - int16_t __s0_173 = __p0_173; \ - int16x4_t __s1_173 = __p1_173; \ - int16x4_t __rev1_173; __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 3, 2, 1, 0); \ - int32_t __ret_173; \ - __ret_173 = __noswap_vqdmullh_s16(__s0_173, __noswap_vget_lane_s16(__rev1_173, __p2_173)); \ - __ret_173; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulls_laneq_s32(__p0_174, __p1_174, __p2_174) __extension__ ({ \ - int32_t __s0_174 = __p0_174; \ - int32x4_t __s1_174 = __p1_174; \ - int64_t __ret_174; \ - __ret_174 = vqdmulls_s32(__s0_174, vgetq_lane_s32(__s1_174, __p2_174)); \ - __ret_174; \ -}) -#else -#define vqdmulls_laneq_s32(__p0_175, __p1_175, __p2_175) __extension__ ({ \ - int32_t __s0_175 = __p0_175; \ - int32x4_t __s1_175 = __p1_175; \ - int32x4_t __rev1_175; __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 3, 2, 1, 0); \ - int64_t __ret_175; \ - __ret_175 = __noswap_vqdmulls_s32(__s0_175, __noswap_vgetq_lane_s32(__rev1_175, __p2_175)); \ - __ret_175; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmullh_laneq_s16(__p0_176, __p1_176, __p2_176) __extension__ ({ \ - int16_t __s0_176 = __p0_176; \ - int16x8_t __s1_176 = __p1_176; \ - int32_t __ret_176; \ - __ret_176 = vqdmullh_s16(__s0_176, vgetq_lane_s16(__s1_176, __p2_176)); \ - __ret_176; \ -}) -#else -#define vqdmullh_laneq_s16(__p0_177, __p1_177, __p2_177) __extension__ ({ \ - int16_t __s0_177 = __p0_177; \ - int16x8_t __s1_177 = __p1_177; \ - int16x8_t __rev1_177; __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32_t __ret_177; \ - __ret_177 = __noswap_vqdmullh_s16(__s0_177, __noswap_vgetq_lane_s16(__rev1_177, __p2_177)); \ - __ret_177; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int64x2_t __ret; \ - __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int64x2_t __ret; \ - __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqmovns_s32(int32_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0); - return __ret; -} -#else -__ai int16_t vqmovns_s32(int32_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqmovnd_s64(int64_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0); - return __ret; -} -#else -__ai int32_t vqmovnd_s64(int64_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vqmovnh_s16(int16_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0); - return __ret; -} -#else -__ai int8_t vqmovnh_s16(int16_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vqmovns_u32(uint32_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0); - return __ret; -} -#else -__ai uint16_t vqmovns_u32(uint32_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vqmovnd_u64(uint64_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0); - return __ret; -} -#else -__ai uint32_t vqmovnd_u64(uint64_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vqmovnh_u16(uint16_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0); - return __ret; -} -#else -__ai uint8_t vqmovnh_u16(uint16_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { - uint16x8_t __ret; - __ret = vcombine_u16(__p0, vqmovn_u32(__p1)); - return __ret; -} -#else -__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vqmovn_u64(__p1)); - return __ret; -} -#else -__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vqmovn_u16(__p1)); - return __ret; -} -#else -__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vqmovn_s32(__p1)); - return __ret; -} -#else -__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { - int32x4_t __ret; - __ret = vcombine_s32(__p0, vqmovn_s64(__p1)); - return __ret; -} -#else -__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x4_t __ret; - __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vqmovn_s16(__p1)); - return __ret; -} -#else -__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqmovuns_s32(int32_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0); - return __ret; -} -#else -__ai int16_t vqmovuns_s32(int32_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqmovund_s64(int64_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0); - return __ret; -} -#else -__ai int32_t vqmovund_s64(int64_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vqmovunh_s16(int16_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0); - return __ret; -} -#else -__ai int8_t vqmovunh_s16(int16_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) { - uint16x8_t __ret; - __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1)); - return __ret; -} -#else -__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) { - uint32x4_t __ret; - __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1)); - return __ret; -} -#else -__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) { - uint8x16_t __ret; - __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1)); - return __ret; -} -#else -__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqnegq_s64(int64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35); - return __ret; -} -#else -__ai int64x2_t vqnegq_s64(int64x2_t __p0) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vqneg_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3); - return __ret; -} -#else -__ai int64x1_t vqneg_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vqnegb_s8(int8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0); - return __ret; -} -#else -__ai int8_t vqnegb_s8(int8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqnegs_s32(int32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0); - return __ret; -} -#else -__ai int32_t vqnegs_s32(int32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vqnegd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0); - return __ret; -} -#else -__ai int64_t vqnegd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqnegh_s16(int16_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0); - return __ret; -} -#else -__ai int16_t vqnegh_s16(int16_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); - return __ret; -} -#else -__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); - return __ret; -} -__ai int32_t __noswap_vqrdmulhs_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); - return __ret; -} -#else -__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); - return __ret; -} -__ai int16_t __noswap_vqrdmulhh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhs_lane_s32(__p0_178, __p1_178, __p2_178) __extension__ ({ \ - int32_t __s0_178 = __p0_178; \ - int32x2_t __s1_178 = __p1_178; \ - int32_t __ret_178; \ - __ret_178 = vqrdmulhs_s32(__s0_178, vget_lane_s32(__s1_178, __p2_178)); \ - __ret_178; \ -}) -#else -#define vqrdmulhs_lane_s32(__p0_179, __p1_179, __p2_179) __extension__ ({ \ - int32_t __s0_179 = __p0_179; \ - int32x2_t __s1_179 = __p1_179; \ - int32x2_t __rev1_179; __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 1, 0); \ - int32_t __ret_179; \ - __ret_179 = __noswap_vqrdmulhs_s32(__s0_179, __noswap_vget_lane_s32(__rev1_179, __p2_179)); \ - __ret_179; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhh_lane_s16(__p0_180, __p1_180, __p2_180) __extension__ ({ \ - int16_t __s0_180 = __p0_180; \ - int16x4_t __s1_180 = __p1_180; \ - int16_t __ret_180; \ - __ret_180 = vqrdmulhh_s16(__s0_180, vget_lane_s16(__s1_180, __p2_180)); \ - __ret_180; \ -}) -#else -#define vqrdmulhh_lane_s16(__p0_181, __p1_181, __p2_181) __extension__ ({ \ - int16_t __s0_181 = __p0_181; \ - int16x4_t __s1_181 = __p1_181; \ - int16x4_t __rev1_181; __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 3, 2, 1, 0); \ - int16_t __ret_181; \ - __ret_181 = __noswap_vqrdmulhh_s16(__s0_181, __noswap_vget_lane_s16(__rev1_181, __p2_181)); \ - __ret_181; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhs_laneq_s32(__p0_182, __p1_182, __p2_182) __extension__ ({ \ - int32_t __s0_182 = __p0_182; \ - int32x4_t __s1_182 = __p1_182; \ - int32_t __ret_182; \ - __ret_182 = vqrdmulhs_s32(__s0_182, vgetq_lane_s32(__s1_182, __p2_182)); \ - __ret_182; \ -}) -#else -#define vqrdmulhs_laneq_s32(__p0_183, __p1_183, __p2_183) __extension__ ({ \ - int32_t __s0_183 = __p0_183; \ - int32x4_t __s1_183 = __p1_183; \ - int32x4_t __rev1_183; __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 3, 2, 1, 0); \ - int32_t __ret_183; \ - __ret_183 = __noswap_vqrdmulhs_s32(__s0_183, __noswap_vgetq_lane_s32(__rev1_183, __p2_183)); \ - __ret_183; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhh_laneq_s16(__p0_184, __p1_184, __p2_184) __extension__ ({ \ - int16_t __s0_184 = __p0_184; \ - int16x8_t __s1_184 = __p1_184; \ - int16_t __ret_184; \ - __ret_184 = vqrdmulhh_s16(__s0_184, vgetq_lane_s16(__s1_184, __p2_184)); \ - __ret_184; \ -}) -#else -#define vqrdmulhh_laneq_s16(__p0_185, __p1_185, __p2_185) __extension__ ({ \ - int16_t __s0_185 = __p0_185; \ - int16x8_t __s1_185 = __p1_185; \ - int16x8_t __rev1_185; __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16_t __ret_185; \ - __ret_185 = __noswap_vqrdmulhh_s16(__s0_185, __noswap_vgetq_lane_s16(__rev1_185, __p2_185)); \ - __ret_185; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __ret; \ - __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __ret; \ - __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret; \ - __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __ret; \ - __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int32x2_t __ret; \ - __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __ret; \ - __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \ - __ret; \ -}) -#else -#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __ret; \ - __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1); - return __ret; -} -#else -__ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1); - return __ret; -} -#else -__ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1); - return __ret; -} -#else -__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1); - return __ret; -} -#else -__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1); - return __ret; -} -#else -__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u32(__p0_186, __p1_186, __p2_186) __extension__ ({ \ - uint16x4_t __s0_186 = __p0_186; \ - uint32x4_t __s1_186 = __p1_186; \ - uint16x8_t __ret_186; \ - __ret_186 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_186), (uint16x4_t)(vqrshrn_n_u32(__s1_186, __p2_186)))); \ - __ret_186; \ -}) -#else -#define vqrshrn_high_n_u32(__p0_187, __p1_187, __p2_187) __extension__ ({ \ - uint16x4_t __s0_187 = __p0_187; \ - uint32x4_t __s1_187 = __p1_187; \ - uint16x4_t __rev0_187; __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 3, 2, 1, 0); \ - uint32x4_t __rev1_187; __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 3, 2, 1, 0); \ - uint16x8_t __ret_187; \ - __ret_187 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_187), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_187, __p2_187)))); \ - __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_187; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u64(__p0_188, __p1_188, __p2_188) __extension__ ({ \ - uint32x2_t __s0_188 = __p0_188; \ - uint64x2_t __s1_188 = __p1_188; \ - uint32x4_t __ret_188; \ - __ret_188 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_188), (uint32x2_t)(vqrshrn_n_u64(__s1_188, __p2_188)))); \ - __ret_188; \ -}) -#else -#define vqrshrn_high_n_u64(__p0_189, __p1_189, __p2_189) __extension__ ({ \ - uint32x2_t __s0_189 = __p0_189; \ - uint64x2_t __s1_189 = __p1_189; \ - uint32x2_t __rev0_189; __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 1, 0); \ - uint64x2_t __rev1_189; __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 1, 0); \ - uint32x4_t __ret_189; \ - __ret_189 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_189), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_189, __p2_189)))); \ - __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 3, 2, 1, 0); \ - __ret_189; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u16(__p0_190, __p1_190, __p2_190) __extension__ ({ \ - uint8x8_t __s0_190 = __p0_190; \ - uint16x8_t __s1_190 = __p1_190; \ - uint8x16_t __ret_190; \ - __ret_190 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_190), (uint8x8_t)(vqrshrn_n_u16(__s1_190, __p2_190)))); \ - __ret_190; \ -}) -#else -#define vqrshrn_high_n_u16(__p0_191, __p1_191, __p2_191) __extension__ ({ \ - uint8x8_t __s0_191 = __p0_191; \ - uint16x8_t __s1_191 = __p1_191; \ - uint8x8_t __rev0_191; __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_191; __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret_191; \ - __ret_191 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_191), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_191, __p2_191)))); \ - __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_191; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s32(__p0_192, __p1_192, __p2_192) __extension__ ({ \ - int16x4_t __s0_192 = __p0_192; \ - int32x4_t __s1_192 = __p1_192; \ - int16x8_t __ret_192; \ - __ret_192 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_192), (int16x4_t)(vqrshrn_n_s32(__s1_192, __p2_192)))); \ - __ret_192; \ -}) -#else -#define vqrshrn_high_n_s32(__p0_193, __p1_193, __p2_193) __extension__ ({ \ - int16x4_t __s0_193 = __p0_193; \ - int32x4_t __s1_193 = __p1_193; \ - int16x4_t __rev0_193; __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 3, 2, 1, 0); \ - int32x4_t __rev1_193; __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 3, 2, 1, 0); \ - int16x8_t __ret_193; \ - __ret_193 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_193), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_193, __p2_193)))); \ - __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_193; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s64(__p0_194, __p1_194, __p2_194) __extension__ ({ \ - int32x2_t __s0_194 = __p0_194; \ - int64x2_t __s1_194 = __p1_194; \ - int32x4_t __ret_194; \ - __ret_194 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_194), (int32x2_t)(vqrshrn_n_s64(__s1_194, __p2_194)))); \ - __ret_194; \ -}) -#else -#define vqrshrn_high_n_s64(__p0_195, __p1_195, __p2_195) __extension__ ({ \ - int32x2_t __s0_195 = __p0_195; \ - int64x2_t __s1_195 = __p1_195; \ - int32x2_t __rev0_195; __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 1, 0); \ - int64x2_t __rev1_195; __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 1, 0); \ - int32x4_t __ret_195; \ - __ret_195 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_195), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_195, __p2_195)))); \ - __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 3, 2, 1, 0); \ - __ret_195; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s16(__p0_196, __p1_196, __p2_196) __extension__ ({ \ - int8x8_t __s0_196 = __p0_196; \ - int16x8_t __s1_196 = __p1_196; \ - int8x16_t __ret_196; \ - __ret_196 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_196), (int8x8_t)(vqrshrn_n_s16(__s1_196, __p2_196)))); \ - __ret_196; \ -}) -#else -#define vqrshrn_high_n_s16(__p0_197, __p1_197, __p2_197) __extension__ ({ \ - int8x8_t __s0_197 = __p0_197; \ - int16x8_t __s1_197 = __p1_197; \ - int8x8_t __rev0_197; __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_197; __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret_197; \ - __ret_197 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_197), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_197, __p2_197)))); \ - __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_197; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \ - __ret; \ -}) -#else -#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \ - __ret; \ -}) -#else -#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s32(__p0_198, __p1_198, __p2_198) __extension__ ({ \ - int16x4_t __s0_198 = __p0_198; \ - int32x4_t __s1_198 = __p1_198; \ - int16x8_t __ret_198; \ - __ret_198 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_198), (int16x4_t)(vqrshrun_n_s32(__s1_198, __p2_198)))); \ - __ret_198; \ -}) -#else -#define vqrshrun_high_n_s32(__p0_199, __p1_199, __p2_199) __extension__ ({ \ - int16x4_t __s0_199 = __p0_199; \ - int32x4_t __s1_199 = __p1_199; \ - int16x4_t __rev0_199; __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 3, 2, 1, 0); \ - int32x4_t __rev1_199; __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 3, 2, 1, 0); \ - int16x8_t __ret_199; \ - __ret_199 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_199), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_199, __p2_199)))); \ - __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_199; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s64(__p0_200, __p1_200, __p2_200) __extension__ ({ \ - int32x2_t __s0_200 = __p0_200; \ - int64x2_t __s1_200 = __p1_200; \ - int32x4_t __ret_200; \ - __ret_200 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_200), (int32x2_t)(vqrshrun_n_s64(__s1_200, __p2_200)))); \ - __ret_200; \ -}) -#else -#define vqrshrun_high_n_s64(__p0_201, __p1_201, __p2_201) __extension__ ({ \ - int32x2_t __s0_201 = __p0_201; \ - int64x2_t __s1_201 = __p1_201; \ - int32x2_t __rev0_201; __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 1, 0); \ - int64x2_t __rev1_201; __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 1, 0); \ - int32x4_t __ret_201; \ - __ret_201 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_201), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_201, __p2_201)))); \ - __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 3, 2, 1, 0); \ - __ret_201; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s16(__p0_202, __p1_202, __p2_202) __extension__ ({ \ - int8x8_t __s0_202 = __p0_202; \ - int16x8_t __s1_202 = __p1_202; \ - int8x16_t __ret_202; \ - __ret_202 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_202), (int8x8_t)(vqrshrun_n_s16(__s1_202, __p2_202)))); \ - __ret_202; \ -}) -#else -#define vqrshrun_high_n_s16(__p0_203, __p1_203, __p2_203) __extension__ ({ \ - int8x8_t __s0_203 = __p0_203; \ - int16x8_t __s1_203 = __p1_203; \ - int8x8_t __rev0_203; __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_203; __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret_203; \ - __ret_203 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_203), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_203, __p2_203)))); \ - __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_203; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1); - return __ret; -} -#else -__ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1); - return __ret; -} -#else -__ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1); - return __ret; -} -#else -__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1); - return __ret; -} -#else -__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1); - return __ret; -} -#else -__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \ - uint8_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \ - uint8_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshls_n_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshls_n_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshld_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshld_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \ - int8_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \ - int8_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshls_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshls_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshld_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshld_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \ - int8_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \ - int8_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u32(__p0_204, __p1_204, __p2_204) __extension__ ({ \ - uint16x4_t __s0_204 = __p0_204; \ - uint32x4_t __s1_204 = __p1_204; \ - uint16x8_t __ret_204; \ - __ret_204 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_204), (uint16x4_t)(vqshrn_n_u32(__s1_204, __p2_204)))); \ - __ret_204; \ -}) -#else -#define vqshrn_high_n_u32(__p0_205, __p1_205, __p2_205) __extension__ ({ \ - uint16x4_t __s0_205 = __p0_205; \ - uint32x4_t __s1_205 = __p1_205; \ - uint16x4_t __rev0_205; __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 3, 2, 1, 0); \ - uint32x4_t __rev1_205; __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 3, 2, 1, 0); \ - uint16x8_t __ret_205; \ - __ret_205 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_205), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_205, __p2_205)))); \ - __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_205; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u64(__p0_206, __p1_206, __p2_206) __extension__ ({ \ - uint32x2_t __s0_206 = __p0_206; \ - uint64x2_t __s1_206 = __p1_206; \ - uint32x4_t __ret_206; \ - __ret_206 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_206), (uint32x2_t)(vqshrn_n_u64(__s1_206, __p2_206)))); \ - __ret_206; \ -}) -#else -#define vqshrn_high_n_u64(__p0_207, __p1_207, __p2_207) __extension__ ({ \ - uint32x2_t __s0_207 = __p0_207; \ - uint64x2_t __s1_207 = __p1_207; \ - uint32x2_t __rev0_207; __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 1, 0); \ - uint64x2_t __rev1_207; __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 1, 0); \ - uint32x4_t __ret_207; \ - __ret_207 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_207), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_207, __p2_207)))); \ - __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 3, 2, 1, 0); \ - __ret_207; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u16(__p0_208, __p1_208, __p2_208) __extension__ ({ \ - uint8x8_t __s0_208 = __p0_208; \ - uint16x8_t __s1_208 = __p1_208; \ - uint8x16_t __ret_208; \ - __ret_208 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_208), (uint8x8_t)(vqshrn_n_u16(__s1_208, __p2_208)))); \ - __ret_208; \ -}) -#else -#define vqshrn_high_n_u16(__p0_209, __p1_209, __p2_209) __extension__ ({ \ - uint8x8_t __s0_209 = __p0_209; \ - uint16x8_t __s1_209 = __p1_209; \ - uint8x8_t __rev0_209; __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_209; __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret_209; \ - __ret_209 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_209), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_209, __p2_209)))); \ - __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_209; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s32(__p0_210, __p1_210, __p2_210) __extension__ ({ \ - int16x4_t __s0_210 = __p0_210; \ - int32x4_t __s1_210 = __p1_210; \ - int16x8_t __ret_210; \ - __ret_210 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_210), (int16x4_t)(vqshrn_n_s32(__s1_210, __p2_210)))); \ - __ret_210; \ -}) -#else -#define vqshrn_high_n_s32(__p0_211, __p1_211, __p2_211) __extension__ ({ \ - int16x4_t __s0_211 = __p0_211; \ - int32x4_t __s1_211 = __p1_211; \ - int16x4_t __rev0_211; __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 3, 2, 1, 0); \ - int32x4_t __rev1_211; __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 3, 2, 1, 0); \ - int16x8_t __ret_211; \ - __ret_211 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_211), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_211, __p2_211)))); \ - __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_211; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s64(__p0_212, __p1_212, __p2_212) __extension__ ({ \ - int32x2_t __s0_212 = __p0_212; \ - int64x2_t __s1_212 = __p1_212; \ - int32x4_t __ret_212; \ - __ret_212 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_212), (int32x2_t)(vqshrn_n_s64(__s1_212, __p2_212)))); \ - __ret_212; \ -}) -#else -#define vqshrn_high_n_s64(__p0_213, __p1_213, __p2_213) __extension__ ({ \ - int32x2_t __s0_213 = __p0_213; \ - int64x2_t __s1_213 = __p1_213; \ - int32x2_t __rev0_213; __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 1, 0); \ - int64x2_t __rev1_213; __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 1, 0); \ - int32x4_t __ret_213; \ - __ret_213 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_213), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_213, __p2_213)))); \ - __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 3, 2, 1, 0); \ - __ret_213; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s16(__p0_214, __p1_214, __p2_214) __extension__ ({ \ - int8x8_t __s0_214 = __p0_214; \ - int16x8_t __s1_214 = __p1_214; \ - int8x16_t __ret_214; \ - __ret_214 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_214), (int8x8_t)(vqshrn_n_s16(__s1_214, __p2_214)))); \ - __ret_214; \ -}) -#else -#define vqshrn_high_n_s16(__p0_215, __p1_215, __p2_215) __extension__ ({ \ - int8x8_t __s0_215 = __p0_215; \ - int16x8_t __s1_215 = __p1_215; \ - int8x8_t __rev0_215; __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_215; __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret_215; \ - __ret_215 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_215), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_215, __p2_215)))); \ - __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_215; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \ - uint32_t __s0 = __p0; \ - uint16_t __ret; \ - __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint32_t __ret; \ - __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \ - uint16_t __s0 = __p0; \ - uint8_t __ret; \ - __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s32(__p0_216, __p1_216, __p2_216) __extension__ ({ \ - int16x4_t __s0_216 = __p0_216; \ - int32x4_t __s1_216 = __p1_216; \ - int16x8_t __ret_216; \ - __ret_216 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_216), (int16x4_t)(vqshrun_n_s32(__s1_216, __p2_216)))); \ - __ret_216; \ -}) -#else -#define vqshrun_high_n_s32(__p0_217, __p1_217, __p2_217) __extension__ ({ \ - int16x4_t __s0_217 = __p0_217; \ - int32x4_t __s1_217 = __p1_217; \ - int16x4_t __rev0_217; __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 3, 2, 1, 0); \ - int32x4_t __rev1_217; __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 3, 2, 1, 0); \ - int16x8_t __ret_217; \ - __ret_217 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_217), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_217, __p2_217)))); \ - __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_217; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s64(__p0_218, __p1_218, __p2_218) __extension__ ({ \ - int32x2_t __s0_218 = __p0_218; \ - int64x2_t __s1_218 = __p1_218; \ - int32x4_t __ret_218; \ - __ret_218 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_218), (int32x2_t)(vqshrun_n_s64(__s1_218, __p2_218)))); \ - __ret_218; \ -}) -#else -#define vqshrun_high_n_s64(__p0_219, __p1_219, __p2_219) __extension__ ({ \ - int32x2_t __s0_219 = __p0_219; \ - int64x2_t __s1_219 = __p1_219; \ - int32x2_t __rev0_219; __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 1, 0); \ - int64x2_t __rev1_219; __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 1, 0); \ - int32x4_t __ret_219; \ - __ret_219 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_219), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_219, __p2_219)))); \ - __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 3, 2, 1, 0); \ - __ret_219; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s16(__p0_220, __p1_220, __p2_220) __extension__ ({ \ - int8x8_t __s0_220 = __p0_220; \ - int16x8_t __s1_220 = __p1_220; \ - int8x16_t __ret_220; \ - __ret_220 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_220), (int8x8_t)(vqshrun_n_s16(__s1_220, __p2_220)))); \ - __ret_220; \ -}) -#else -#define vqshrun_high_n_s16(__p0_221, __p1_221, __p2_221) __extension__ ({ \ - int8x8_t __s0_221 = __p0_221; \ - int16x8_t __s1_221 = __p1_221; \ - int8x8_t __rev0_221; __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_221; __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret_221; \ - __ret_221 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_221), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_221, __p2_221)))); \ - __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_221; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __s0 = __p0; \ - int16_t __ret; \ - __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int32_t __ret; \ - __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ - __ret; \ -}) -#else -#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __s0 = __p0; \ - int8_t __ret; \ - __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1); - return __ret; -} -#else -__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1); - return __ret; -} -#else -__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1); - return __ret; -} -#else -__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); - return __ret; -} -#else -__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); - return __ret; -} -__ai int32_t __noswap_vqsubs_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); - return __ret; -} -#else -__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); - return __ret; -} -__ai int16_t __noswap_vqsubh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36); - return __ret; -} -#else -__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { - poly8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36); - return __ret; -} -#else -__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { - poly8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { - uint8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) { - int8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { - uint8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) { - int8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { - poly8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36); - return __ret; -} -#else -__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { - poly8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { - uint8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) { - int8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { - uint8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) { - int8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { - poly8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36); - return __ret; -} -#else -__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { - poly8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { - uint8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) { - int8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { - uint8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) { - int8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4); - return __ret; -} -#else -__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); - return __ret; -} -#else -__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); - return __ret; -} -#else -__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); - return __ret; -} -#else -__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16); - return __ret; -} -#else -__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0); - return __ret; -} -#else -__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4); - return __ret; -} -#else -__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36); - return __ret; -} -#else -__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48); - return __ret; -} -#else -__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32); - return __ret; -} -#else -__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16); - return __ret; -} -#else -__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0); - return __ret; -} -#else -__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4); - return __ret; -} -#else -__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36); - return __ret; -} -#else -__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48); - return __ret; -} -#else -__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32); - return __ret; -} -#else -__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16); - return __ret; -} -#else -__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0); - return __ret; -} -#else -__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4); - return __ret; -} -#else -__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36); - return __ret; -} -#else -__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48); - return __ret; -} -#else -__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32); - return __ret; -} -#else -__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16); - return __ret; -} -#else -__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0); - return __ret; -} -#else -__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x8_t __ret; - __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2)); - return __ret; -} -#else -__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2)); - return __ret; -} -#else -__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2)); - return __ret; -} -#else -__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2)); - return __ret; -} -#else -__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2)); - return __ret; -} -#else -__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2)); - return __ret; -} -#else -__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4); - return __ret; -} -#else -__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36); - return __ret; -} -#else -__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrbitq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32); - return __ret; -} -#else -__ai int8x16_t vrbitq_s8(int8x16_t __p0) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16); - return __ret; -} -#else -__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrbit_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0); - return __ret; -} -#else -__ai int8x8_t vrbit_s8(int8x8_t __p0) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vrecpe_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10); - return __ret; -} -#else -__ai float64x1_t vrecpe_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vrecped_f64(float64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrecped_f64(__p0); - return __ret; -} -#else -__ai float64_t vrecped_f64(float64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrecped_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vrecpes_f32(float32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0); - return __ret; -} -#else -__ai float32_t vrecpes_f32(float32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#else -__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1); - return __ret; -} -#else -__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1); - return __ret; -} -#else -__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vrecpxd_f64(float64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0); - return __ret; -} -#else -__ai float64_t vrecpxd_f64(float64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vrecpxs_f32(float32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); - return __ret; -} -#else -__ai float32_t vrecpxs_f32(float32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u32(__p0_222, __p1_222, __p2_222) __extension__ ({ \ - uint16x4_t __s0_222 = __p0_222; \ - uint32x4_t __s1_222 = __p1_222; \ - uint16x8_t __ret_222; \ - __ret_222 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_222), (uint16x4_t)(vrshrn_n_u32(__s1_222, __p2_222)))); \ - __ret_222; \ -}) -#else -#define vrshrn_high_n_u32(__p0_223, __p1_223, __p2_223) __extension__ ({ \ - uint16x4_t __s0_223 = __p0_223; \ - uint32x4_t __s1_223 = __p1_223; \ - uint16x4_t __rev0_223; __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 3, 2, 1, 0); \ - uint32x4_t __rev1_223; __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 3, 2, 1, 0); \ - uint16x8_t __ret_223; \ - __ret_223 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_223), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_223, __p2_223)))); \ - __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_223; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u64(__p0_224, __p1_224, __p2_224) __extension__ ({ \ - uint32x2_t __s0_224 = __p0_224; \ - uint64x2_t __s1_224 = __p1_224; \ - uint32x4_t __ret_224; \ - __ret_224 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_224), (uint32x2_t)(vrshrn_n_u64(__s1_224, __p2_224)))); \ - __ret_224; \ -}) -#else -#define vrshrn_high_n_u64(__p0_225, __p1_225, __p2_225) __extension__ ({ \ - uint32x2_t __s0_225 = __p0_225; \ - uint64x2_t __s1_225 = __p1_225; \ - uint32x2_t __rev0_225; __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 1, 0); \ - uint64x2_t __rev1_225; __rev1_225 = __builtin_shufflevector(__s1_225, __s1_225, 1, 0); \ - uint32x4_t __ret_225; \ - __ret_225 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_225), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_225, __p2_225)))); \ - __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 3, 2, 1, 0); \ - __ret_225; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u16(__p0_226, __p1_226, __p2_226) __extension__ ({ \ - uint8x8_t __s0_226 = __p0_226; \ - uint16x8_t __s1_226 = __p1_226; \ - uint8x16_t __ret_226; \ - __ret_226 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_226), (uint8x8_t)(vrshrn_n_u16(__s1_226, __p2_226)))); \ - __ret_226; \ -}) -#else -#define vrshrn_high_n_u16(__p0_227, __p1_227, __p2_227) __extension__ ({ \ - uint8x8_t __s0_227 = __p0_227; \ - uint16x8_t __s1_227 = __p1_227; \ - uint8x8_t __rev0_227; __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_227; __rev1_227 = __builtin_shufflevector(__s1_227, __s1_227, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret_227; \ - __ret_227 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_227), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_227, __p2_227)))); \ - __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_227; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s32(__p0_228, __p1_228, __p2_228) __extension__ ({ \ - int16x4_t __s0_228 = __p0_228; \ - int32x4_t __s1_228 = __p1_228; \ - int16x8_t __ret_228; \ - __ret_228 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_228), (int16x4_t)(vrshrn_n_s32(__s1_228, __p2_228)))); \ - __ret_228; \ -}) -#else -#define vrshrn_high_n_s32(__p0_229, __p1_229, __p2_229) __extension__ ({ \ - int16x4_t __s0_229 = __p0_229; \ - int32x4_t __s1_229 = __p1_229; \ - int16x4_t __rev0_229; __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 3, 2, 1, 0); \ - int32x4_t __rev1_229; __rev1_229 = __builtin_shufflevector(__s1_229, __s1_229, 3, 2, 1, 0); \ - int16x8_t __ret_229; \ - __ret_229 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_229), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_229, __p2_229)))); \ - __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_229; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s64(__p0_230, __p1_230, __p2_230) __extension__ ({ \ - int32x2_t __s0_230 = __p0_230; \ - int64x2_t __s1_230 = __p1_230; \ - int32x4_t __ret_230; \ - __ret_230 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_230), (int32x2_t)(vrshrn_n_s64(__s1_230, __p2_230)))); \ - __ret_230; \ -}) -#else -#define vrshrn_high_n_s64(__p0_231, __p1_231, __p2_231) __extension__ ({ \ - int32x2_t __s0_231 = __p0_231; \ - int64x2_t __s1_231 = __p1_231; \ - int32x2_t __rev0_231; __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 1, 0); \ - int64x2_t __rev1_231; __rev1_231 = __builtin_shufflevector(__s1_231, __s1_231, 1, 0); \ - int32x4_t __ret_231; \ - __ret_231 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_231), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_231, __p2_231)))); \ - __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 3, 2, 1, 0); \ - __ret_231; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s16(__p0_232, __p1_232, __p2_232) __extension__ ({ \ - int8x8_t __s0_232 = __p0_232; \ - int16x8_t __s1_232 = __p1_232; \ - int8x16_t __ret_232; \ - __ret_232 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_232), (int8x8_t)(vrshrn_n_s16(__s1_232, __p2_232)))); \ - __ret_232; \ -}) -#else -#define vrshrn_high_n_s16(__p0_233, __p1_233, __p2_233) __extension__ ({ \ - int8x8_t __s0_233 = __p0_233; \ - int16x8_t __s1_233 = __p1_233; \ - int8x8_t __rev0_233; __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_233; __rev1_233 = __builtin_shufflevector(__s1_233, __s1_233, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret_233; \ - __ret_233 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_233), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_233, __p2_233)))); \ - __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_233; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vrsqrte_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10); - return __ret; -} -#else -__ai float64x1_t vrsqrte_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vrsqrted_f64(float64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0); - return __ret; -} -#else -__ai float64_t vrsqrted_f64(float64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vrsqrtes_f32(float32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0); - return __ret; -} -#else -__ai float32_t vrsqrtes_f32(float32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#else -__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1); - return __ret; -} -#else -__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1); - return __ret; -} -#else -__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#else -#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#else -#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x8_t __ret; - __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2)); - return __ret; -} -#else -__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2)); - return __ret; -} -#else -__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2)); - return __ret; -} -#else -__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2)); - return __ret; -} -#else -__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2)); - return __ret; -} -#else -__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2)); - return __ret; -} -#else -__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#define __noswap_vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#define __noswap_vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshld_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vshld_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshld_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vshld_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u8(__p0_234, __p1_234) __extension__ ({ \ - uint8x16_t __s0_234 = __p0_234; \ - uint16x8_t __ret_234; \ - __ret_234 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_234), __p1_234)); \ - __ret_234; \ -}) -#else -#define vshll_high_n_u8(__p0_235, __p1_235) __extension__ ({ \ - uint8x16_t __s0_235 = __p0_235; \ - uint8x16_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __ret_235; \ - __ret_235 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_235), __p1_235)); \ - __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_235; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u32(__p0_236, __p1_236) __extension__ ({ \ - uint32x4_t __s0_236 = __p0_236; \ - uint64x2_t __ret_236; \ - __ret_236 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_236), __p1_236)); \ - __ret_236; \ -}) -#else -#define vshll_high_n_u32(__p0_237, __p1_237) __extension__ ({ \ - uint32x4_t __s0_237 = __p0_237; \ - uint32x4_t __rev0_237; __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 3, 2, 1, 0); \ - uint64x2_t __ret_237; \ - __ret_237 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_237), __p1_237)); \ - __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 1, 0); \ - __ret_237; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u16(__p0_238, __p1_238) __extension__ ({ \ - uint16x8_t __s0_238 = __p0_238; \ - uint32x4_t __ret_238; \ - __ret_238 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_238), __p1_238)); \ - __ret_238; \ -}) -#else -#define vshll_high_n_u16(__p0_239, __p1_239) __extension__ ({ \ - uint16x8_t __s0_239 = __p0_239; \ - uint16x8_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint32x4_t __ret_239; \ - __ret_239 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_239), __p1_239)); \ - __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 3, 2, 1, 0); \ - __ret_239; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s8(__p0_240, __p1_240) __extension__ ({ \ - int8x16_t __s0_240 = __p0_240; \ - int16x8_t __ret_240; \ - __ret_240 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_240), __p1_240)); \ - __ret_240; \ -}) -#else -#define vshll_high_n_s8(__p0_241, __p1_241) __extension__ ({ \ - int8x16_t __s0_241 = __p0_241; \ - int8x16_t __rev0_241; __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __ret_241; \ - __ret_241 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_241), __p1_241)); \ - __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_241; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s32(__p0_242, __p1_242) __extension__ ({ \ - int32x4_t __s0_242 = __p0_242; \ - int64x2_t __ret_242; \ - __ret_242 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_242), __p1_242)); \ - __ret_242; \ -}) -#else -#define vshll_high_n_s32(__p0_243, __p1_243) __extension__ ({ \ - int32x4_t __s0_243 = __p0_243; \ - int32x4_t __rev0_243; __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 3, 2, 1, 0); \ - int64x2_t __ret_243; \ - __ret_243 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_243), __p1_243)); \ - __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 1, 0); \ - __ret_243; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s16(__p0_244, __p1_244) __extension__ ({ \ - int16x8_t __s0_244 = __p0_244; \ - int32x4_t __ret_244; \ - __ret_244 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_244), __p1_244)); \ - __ret_244; \ -}) -#else -#define vshll_high_n_s16(__p0_245, __p1_245) __extension__ ({ \ - int16x8_t __s0_245 = __p0_245; \ - int16x8_t __rev0_245; __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 7, 6, 5, 4, 3, 2, 1, 0); \ - int32x4_t __ret_245; \ - __ret_245 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_245), __p1_245)); \ - __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 3, 2, 1, 0); \ - __ret_245; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrd_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \ - __ret; \ -}) -#else -#define vshrd_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrd_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \ - __ret; \ -}) -#else -#define vshrd_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u32(__p0_246, __p1_246, __p2_246) __extension__ ({ \ - uint16x4_t __s0_246 = __p0_246; \ - uint32x4_t __s1_246 = __p1_246; \ - uint16x8_t __ret_246; \ - __ret_246 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_246), (uint16x4_t)(vshrn_n_u32(__s1_246, __p2_246)))); \ - __ret_246; \ -}) -#else -#define vshrn_high_n_u32(__p0_247, __p1_247, __p2_247) __extension__ ({ \ - uint16x4_t __s0_247 = __p0_247; \ - uint32x4_t __s1_247 = __p1_247; \ - uint16x4_t __rev0_247; __rev0_247 = __builtin_shufflevector(__s0_247, __s0_247, 3, 2, 1, 0); \ - uint32x4_t __rev1_247; __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 3, 2, 1, 0); \ - uint16x8_t __ret_247; \ - __ret_247 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_247), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_247, __p2_247)))); \ - __ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_247; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u64(__p0_248, __p1_248, __p2_248) __extension__ ({ \ - uint32x2_t __s0_248 = __p0_248; \ - uint64x2_t __s1_248 = __p1_248; \ - uint32x4_t __ret_248; \ - __ret_248 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_248), (uint32x2_t)(vshrn_n_u64(__s1_248, __p2_248)))); \ - __ret_248; \ -}) -#else -#define vshrn_high_n_u64(__p0_249, __p1_249, __p2_249) __extension__ ({ \ - uint32x2_t __s0_249 = __p0_249; \ - uint64x2_t __s1_249 = __p1_249; \ - uint32x2_t __rev0_249; __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 1, 0); \ - uint64x2_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 1, 0); \ - uint32x4_t __ret_249; \ - __ret_249 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_249), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_249, __p2_249)))); \ - __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 3, 2, 1, 0); \ - __ret_249; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u16(__p0_250, __p1_250, __p2_250) __extension__ ({ \ - uint8x8_t __s0_250 = __p0_250; \ - uint16x8_t __s1_250 = __p1_250; \ - uint8x16_t __ret_250; \ - __ret_250 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_250), (uint8x8_t)(vshrn_n_u16(__s1_250, __p2_250)))); \ - __ret_250; \ -}) -#else -#define vshrn_high_n_u16(__p0_251, __p1_251, __p2_251) __extension__ ({ \ - uint8x8_t __s0_251 = __p0_251; \ - uint16x8_t __s1_251 = __p1_251; \ - uint8x8_t __rev0_251; __rev0_251 = __builtin_shufflevector(__s0_251, __s0_251, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_251; __rev1_251 = __builtin_shufflevector(__s1_251, __s1_251, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __ret_251; \ - __ret_251 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_251), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_251, __p2_251)))); \ - __ret_251 = __builtin_shufflevector(__ret_251, __ret_251, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_251; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s32(__p0_252, __p1_252, __p2_252) __extension__ ({ \ - int16x4_t __s0_252 = __p0_252; \ - int32x4_t __s1_252 = __p1_252; \ - int16x8_t __ret_252; \ - __ret_252 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_252), (int16x4_t)(vshrn_n_s32(__s1_252, __p2_252)))); \ - __ret_252; \ -}) -#else -#define vshrn_high_n_s32(__p0_253, __p1_253, __p2_253) __extension__ ({ \ - int16x4_t __s0_253 = __p0_253; \ - int32x4_t __s1_253 = __p1_253; \ - int16x4_t __rev0_253; __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 3, 2, 1, 0); \ - int32x4_t __rev1_253; __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 3, 2, 1, 0); \ - int16x8_t __ret_253; \ - __ret_253 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_253), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_253, __p2_253)))); \ - __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_253; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s64(__p0_254, __p1_254, __p2_254) __extension__ ({ \ - int32x2_t __s0_254 = __p0_254; \ - int64x2_t __s1_254 = __p1_254; \ - int32x4_t __ret_254; \ - __ret_254 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_254), (int32x2_t)(vshrn_n_s64(__s1_254, __p2_254)))); \ - __ret_254; \ -}) -#else -#define vshrn_high_n_s64(__p0_255, __p1_255, __p2_255) __extension__ ({ \ - int32x2_t __s0_255 = __p0_255; \ - int64x2_t __s1_255 = __p1_255; \ - int32x2_t __rev0_255; __rev0_255 = __builtin_shufflevector(__s0_255, __s0_255, 1, 0); \ - int64x2_t __rev1_255; __rev1_255 = __builtin_shufflevector(__s1_255, __s1_255, 1, 0); \ - int32x4_t __ret_255; \ - __ret_255 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_255), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_255, __p2_255)))); \ - __ret_255 = __builtin_shufflevector(__ret_255, __ret_255, 3, 2, 1, 0); \ - __ret_255; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s16(__p0_256, __p1_256, __p2_256) __extension__ ({ \ - int8x8_t __s0_256 = __p0_256; \ - int16x8_t __s1_256 = __p1_256; \ - int8x16_t __ret_256; \ - __ret_256 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_256), (int8x8_t)(vshrn_n_s16(__s1_256, __p2_256)))); \ - __ret_256; \ -}) -#else -#define vshrn_high_n_s16(__p0_257, __p1_257, __p2_257) __extension__ ({ \ - int8x8_t __s0_257 = __p0_257; \ - int16x8_t __s1_257 = __p1_257; \ - int8x8_t __rev0_257; __rev0_257 = __builtin_shufflevector(__s0_257, __s0_257, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_257; __rev1_257 = __builtin_shufflevector(__s1_257, __s1_257, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __ret_257; \ - __ret_257 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_257), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_257, __p2_257)))); \ - __ret_257 = __builtin_shufflevector(__ret_257, __ret_257, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_257; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#else -#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#else -#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ - __ret; \ -}) -#else -#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ - __ret; \ -}) -#else -#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1); - return __ret; -} -#else -__ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1); - return __ret; -} -#else -__ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1); - return __ret; -} -#else -__ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vsqrt_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10); - return __ret; -} -#else -__ai float64x1_t vsqrt_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vsqrt_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vsqrt_f32(float32x2_t __p0) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#else -#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#else -#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#else -#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - uint64_t __ret; \ - __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#else -#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - int64_t __ret; \ - __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ - __ret; \ -}) -#else -#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ - __ret; \ -}) -#else -#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \ -}) -#else -#define vst1_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \ -}) -#else -#define vst1q_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \ -}) -#else -#define vst1q_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \ -}) -#else -#define vst1_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ -}) -#else -#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ -}) -#else -#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ -}) -#else -#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ -}) -#else -#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ -}) -#else -#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - poly8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p64_x2(__p0, __p1) __extension__ ({ \ - poly64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ -}) -#else -#define vst1_p64_x2(__p0, __p1) __extension__ ({ \ - poly64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ -}) -#else -#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - poly16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ -}) -#else -#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - poly8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ -}) -#else -#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - poly64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ -}) -#else -#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - poly16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ -}) -#else -#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - uint8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ -}) -#else -#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - uint32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ -}) -#else -#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - uint64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ -}) -#else -#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - uint16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ -}) -#else -#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - int8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 42); \ -}) -#else -#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - float64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 41); \ -}) -#else -#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - float32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 41); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ - float16x8x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 40); \ -}) -#else -#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \ - float16x8x2_t __s1 = __p1; \ - float16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 40); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 34); \ -}) -#else -#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - int32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 34); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 35); \ -}) -#else -#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - int64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 35); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 33); \ -}) -#else -#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - int16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ -}) -#else -#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - uint8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ -}) -#else -#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - uint32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u64_x2(__p0, __p1) __extension__ ({ \ - uint64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ -}) -#else -#define vst1_u64_x2(__p0, __p1) __extension__ ({ \ - uint64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ -}) -#else -#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - uint16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ -}) -#else -#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - int8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f64_x2(__p0, __p1) __extension__ ({ \ - float64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \ -}) -#else -#define vst1_f64_x2(__p0, __p1) __extension__ ({ \ - float64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 9); \ -}) -#else -#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - float32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f16_x2(__p0, __p1) __extension__ ({ \ - float16x4x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 8); \ -}) -#else -#define vst1_f16_x2(__p0, __p1) __extension__ ({ \ - float16x4x2_t __s1 = __p1; \ - float16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 2); \ -}) -#else -#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - int32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s64_x2(__p0, __p1) __extension__ ({ \ - int64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \ -}) -#else -#define vst1_s64_x2(__p0, __p1) __extension__ ({ \ - int64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 1); \ -}) -#else -#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - int16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ -}) -#else -#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - poly8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p64_x3(__p0, __p1) __extension__ ({ \ - poly64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ -}) -#else -#define vst1_p64_x3(__p0, __p1) __extension__ ({ \ - poly64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ -}) -#else -#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - poly16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ -}) -#else -#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - poly8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ -}) -#else -#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - poly64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ -}) -#else -#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - poly16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ -}) -#else -#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - uint8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ -}) -#else -#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - uint32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ -}) -#else -#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - uint64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ -}) -#else -#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - uint16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ -}) -#else -#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - int8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \ -}) -#else -#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - float64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \ -}) -#else -#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - float32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ - float16x8x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \ -}) -#else -#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \ - float16x8x3_t __s1 = __p1; \ - float16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \ -}) -#else -#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - int32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \ -}) -#else -#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - int64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \ -}) -#else -#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - int16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ -}) -#else -#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - uint8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ -}) -#else -#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - uint32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u64_x3(__p0, __p1) __extension__ ({ \ - uint64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ -}) -#else -#define vst1_u64_x3(__p0, __p1) __extension__ ({ \ - uint64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ -}) -#else -#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - uint16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ -}) -#else -#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - int8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f64_x3(__p0, __p1) __extension__ ({ \ - float64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \ -}) -#else -#define vst1_f64_x3(__p0, __p1) __extension__ ({ \ - float64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \ -}) -#else -#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - float32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f16_x3(__p0, __p1) __extension__ ({ \ - float16x4x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \ -}) -#else -#define vst1_f16_x3(__p0, __p1) __extension__ ({ \ - float16x4x3_t __s1 = __p1; \ - float16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \ -}) -#else -#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - int32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s64_x3(__p0, __p1) __extension__ ({ \ - int64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \ -}) -#else -#define vst1_s64_x3(__p0, __p1) __extension__ ({ \ - int64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \ -}) -#else -#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - int16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ -}) -#else -#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - poly8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p64_x4(__p0, __p1) __extension__ ({ \ - poly64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ -}) -#else -#define vst1_p64_x4(__p0, __p1) __extension__ ({ \ - poly64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ -}) -#else -#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - poly16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ -}) -#else -#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - poly8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ -}) -#else -#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - poly64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ -}) -#else -#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - poly16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ -}) -#else -#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - uint8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ -}) -#else -#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - uint32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ -}) -#else -#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - uint64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ -}) -#else -#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - uint16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ -}) -#else -#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - int8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \ -}) -#else -#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - float64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \ -}) -#else -#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - float32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ - float16x8x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \ -}) -#else -#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \ - float16x8x4_t __s1 = __p1; \ - float16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \ -}) -#else -#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - int32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \ -}) -#else -#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - int64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \ -}) -#else -#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - int16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ -}) -#else -#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - uint8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ -}) -#else -#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - uint32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u64_x4(__p0, __p1) __extension__ ({ \ - uint64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ -}) -#else -#define vst1_u64_x4(__p0, __p1) __extension__ ({ \ - uint64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ -}) -#else -#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - uint16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ -}) -#else -#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - int8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f64_x4(__p0, __p1) __extension__ ({ \ - float64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \ -}) -#else -#define vst1_f64_x4(__p0, __p1) __extension__ ({ \ - float64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \ -}) -#else -#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - float32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_f16_x4(__p0, __p1) __extension__ ({ \ - float16x4x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \ -}) -#else -#define vst1_f16_x4(__p0, __p1) __extension__ ({ \ - float16x4x4_t __s1 = __p1; \ - float16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \ -}) -#else -#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - int32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s64_x4(__p0, __p1) __extension__ ({ \ - int64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \ -}) -#else -#define vst1_s64_x4(__p0, __p1) __extension__ ({ \ - int64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \ -}) -#else -#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - int16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_p64(__p0, __p1) __extension__ ({ \ - poly64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ -}) -#else -#define vst2_p64(__p0, __p1) __extension__ ({ \ - poly64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ -}) -#else -#define vst2q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - poly64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ -}) -#else -#define vst2q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - uint64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_f64(__p0, __p1) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 42); \ -}) -#else -#define vst2q_f64(__p0, __p1) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - float64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_s64(__p0, __p1) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 35); \ -}) -#else -#define vst2q_s64(__p0, __p1) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - int64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 35); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_f64(__p0, __p1) __extension__ ({ \ - float64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \ -}) -#else -#define vst2_f64(__p0, __p1) __extension__ ({ \ - float64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ -}) -#else -#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ -}) -#else -#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - poly8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ -}) -#else -#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - poly64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ -}) -#else -#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - uint8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ -}) -#else -#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - uint64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ -}) -#else -#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - int8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 42); \ -}) -#else -#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - float64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 35); \ -}) -#else -#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - int64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 35); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ -}) -#else -#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \ -}) -#else -#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \ -}) -#else -#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_p64(__p0, __p1) __extension__ ({ \ - poly64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ -}) -#else -#define vst3_p64(__p0, __p1) __extension__ ({ \ - poly64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ -}) -#else -#define vst3q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - poly64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ -}) -#else -#define vst3q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - uint64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_f64(__p0, __p1) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \ -}) -#else -#define vst3q_f64(__p0, __p1) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - float64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_s64(__p0, __p1) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \ -}) -#else -#define vst3q_s64(__p0, __p1) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - int64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_f64(__p0, __p1) __extension__ ({ \ - float64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \ -}) -#else -#define vst3_f64(__p0, __p1) __extension__ ({ \ - float64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ -}) -#else -#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ -}) -#else -#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - poly8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ -}) -#else -#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - poly64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ -}) -#else -#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - uint8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ -}) -#else -#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - uint64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ -}) -#else -#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - int8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \ -}) -#else -#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - float64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \ -}) -#else -#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - int64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ -}) -#else -#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \ -}) -#else -#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \ -}) -#else -#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_p64(__p0, __p1) __extension__ ({ \ - poly64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ -}) -#else -#define vst4_p64(__p0, __p1) __extension__ ({ \ - poly64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ -}) -#else -#define vst4q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - poly64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ -}) -#else -#define vst4q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - uint64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_f64(__p0, __p1) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \ -}) -#else -#define vst4q_f64(__p0, __p1) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - float64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_s64(__p0, __p1) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \ -}) -#else -#define vst4q_s64(__p0, __p1) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - int64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_f64(__p0, __p1) __extension__ ({ \ - float64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \ -}) -#else -#define vst4_f64(__p0, __p1) __extension__ ({ \ - float64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ -}) -#else -#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ -}) -#else -#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - poly8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ -}) -#else -#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - poly64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ -}) -#else -#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - uint8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ -}) -#else -#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - uint64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ -}) -#else -#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - int8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \ -}) -#else -#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - float64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \ -}) -#else -#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - int64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ -}) -#else -#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \ -}) -#else -#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \ -}) -#else -#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vstrq_p128(__p0, __p1) __extension__ ({ \ - poly128_t __s1 = __p1; \ - __builtin_neon_vstrq_p128(__p0, __s1); \ -}) -#else -#define vstrq_p128(__p0, __p1) __extension__ ({ \ - poly128_t __s1 = __p1; \ - __builtin_neon_vstrq_p128(__p0, __s1); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#else -__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = __p0 - __p1; - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x8_t __ret; - __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2)); - return __ret; -} -#else -__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2)); - return __ret; -} -#else -__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2)); - return __ret; -} -#else -__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2)); - return __ret; -} -#else -__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2)); - return __ret; -} -#else -__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2)); - return __ret; -} -#else -__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1); - return __ret; -} -#else -__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1); - return __ret; -} -#else -__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1); - return __ret; -} -#else -__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { - int16x8_t __ret; - __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1); - return __ret; -} -#else -__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1); - return __ret; -} -#else -__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1); - return __ret; -} -#else -__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - __ret = __p0 - vmovl_high_u8(__p1); - return __ret; -} -#else -__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 - __noswap_vmovl_high_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - __ret = __p0 - vmovl_high_u32(__p1); - return __ret; -} -#else -__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __rev0 - __noswap_vmovl_high_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - __ret = __p0 - vmovl_high_u16(__p1); - return __ret; -} -#else -__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 - __noswap_vmovl_high_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { - int16x8_t __ret; - __ret = __p0 - vmovl_high_s8(__p1); - return __ret; -} -#else -__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 - __noswap_vmovl_high_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = __p0 - vmovl_high_s32(__p1); - return __ret; -} -#else -__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __rev0 - __noswap_vmovl_high_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = __p0 - vmovl_high_s16(__p1); - return __ret; -} -#else -__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 - __noswap_vmovl_high_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} -#else -__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} -#else -__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); - return __ret; -} -#else -__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - poly64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} -#else -__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); - return __ret; -} -#else -__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} -#else -__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} -#else -__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); - return __ret; -} -#else -__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} -#else -__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} -#else -__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} -#else -__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} -#else -__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} -#else -__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} -#else -__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} -#else -__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} -#else -__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} -#else -__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); - return __ret; -} -#else -__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - poly64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} -#else -__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); - return __ret; -} -#else -__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} -#else -__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} -#else -__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); - return __ret; -} -#else -__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} -#else -__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} -#else -__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} -#else -__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} -#else -__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} -#else -__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} -#else -__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} -#else -__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#else -__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1); - return __ret; -} -#else -__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1); - return __ret; -} -#else -__ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1); - return __ret; -} -#else -__ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1); - return __ret; -} -#else -__ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1); - return __ret; -} -#else -__ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} -#else -__ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} -#else -__ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#else -__ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} -#else -__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} -#else -__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - return __ret; -} -#else -__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - poly64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} -#else -__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - return __ret; -} -#else -__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} -#else -__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} -#else -__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - return __ret; -} -#else -__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} -#else -__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} -#else -__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} -#else -__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} -#else -__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} -#else -__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} -#else -__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} -#else -__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); - return __ret; -} -#else -__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); - return __ret; -} -#else -__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); - return __ret; -} -#else -__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - poly64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); - return __ret; -} -#else -__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); - return __ret; -} -#else -__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); - return __ret; -} -#else -__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); - return __ret; -} -#else -__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); - return __ret; -} -#else -__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); - return __ret; -} -#else -__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); - return __ret; -} -#else -__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); - return __ret; -} -#else -__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); - return __ret; -} -#else -__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); - return __ret; -} -#else -__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); - return __ret; -} -#else -__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); - return __ret; -} -#else -__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); - return __ret; -} -#else -__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); - return __ret; -} -#else -__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); - return __ret; -} -#else -__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - poly64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); - return __ret; -} -#else -__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); - return __ret; -} -#else -__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); - return __ret; -} -#else -__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); - return __ret; -} -#else -__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); - return __ret; -} -#else -__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); - return __ret; -} -#else -__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); - return __ret; -} -#else -__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); - return __ret; -} -#else -__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); - return __ret; -} -#else -__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); - return __ret; -} -#else -__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); - return __ret; -} -#else -__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} -#else -__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); - return __ret; -} -#else -__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); - return __ret; -} -#else -__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); - return __ret; -} -#else -__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); - return __ret; -} -#else -__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - poly64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); - return __ret; -} -#else -__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); - return __ret; -} -#else -__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); - return __ret; -} -#else -__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); - return __ret; -} -#else -__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); - return __ret; -} -#else -__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); - return __ret; -} -#else -__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); - return __ret; -} -#else -__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); - return __ret; -} -#else -__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); - return __ret; -} -#else -__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); - return __ret; -} -#else -__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); - return __ret; -} -#else -__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); - return __ret; -} -#else -__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = __p0 + vabdq_u8(__p1, __p2); - return __ret; -} -#else -__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __ret; - __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = __p0 + vabdq_u32(__p1, __p2); - return __ret; -} -#else -__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - __ret = __p0 + vabdq_u16(__p1, __p2); - return __ret; -} -#else -__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = __p0 + vabdq_s8(__p1, __p2); - return __ret; -} -#else -__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __ret; - __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = __p0 + vabdq_s32(__p1, __p2); - return __ret; -} -#else -__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = __p0 + vabdq_s16(__p1, __p2); - return __ret; -} -#else -__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = __p0 + vabd_u8(__p1, __p2); - return __ret; -} -#else -__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __ret; - __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __ret; - __ret = __p0 + vabd_u32(__p1, __p2); - return __ret; -} -#else -__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint32x2_t __ret; - __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __ret; - __ret = __p0 + vabd_u16(__p1, __p2); - return __ret; -} -#else -__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint16x4_t __ret; - __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = __p0 + vabd_s8(__p1, __p2); - return __ret; -} -#else -__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __ret; - __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = __p0 + vabd_s32(__p1, __p2); - return __ret; -} -#else -__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int32x2_t __ret; - __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = __p0 + vabd_s16(__p1, __p2); - return __ret; -} -#else -__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int16x4_t __ret; - __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1)))); - return __ret; -} -#else -__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1)))); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1)))); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1)))); - return __ret; -} -#else -__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1)))); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1)))); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1)))); - return __ret; -} -#else -__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1)))); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1)))); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1)))); - return __ret; -} -#else -__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1)))); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1)))); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1)))); - return __ret; -} -#else -__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1)))); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1)))); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1)))); - return __ret; -} -#else -__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1)))); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1)))); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - __ret = vmovl_u8(__p0) + vmovl_u8(__p1); - return __ret; -} -#else -__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = vmovl_u32(__p0) + vmovl_u32(__p1); - return __ret; -} -#else -__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = vmovl_u16(__p0) + vmovl_u16(__p1); - return __ret; -} -#else -__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = vmovl_s8(__p0) + vmovl_s8(__p1); - return __ret; -} -#else -__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = vmovl_s32(__p0) + vmovl_s32(__p1); - return __ret; -} -#else -__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = vmovl_s16(__p0) + vmovl_s16(__p1); - return __ret; -} -#else -__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 + vmovl_u8(__p1); - return __ret; -} -#else -__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 + __noswap_vmovl_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 + vmovl_u32(__p1); - return __ret; -} -#else -__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __rev0 + __noswap_vmovl_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 + vmovl_u16(__p1); - return __ret; -} -#else -__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 + __noswap_vmovl_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = __p0 + vmovl_s8(__p1); - return __ret; -} -#else -__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 + __noswap_vmovl_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = __p0 + vmovl_s32(__p1); - return __ret; -} -#else -__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __rev0 + __noswap_vmovl_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = __p0 + vmovl_s16(__p1); - return __ret; -} -#else -__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 + __noswap_vmovl_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint16x8_t __ret; - __ret = __p0 + vmull_u8(__p1, __p2); - return __ret; -} -#else -__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint16x8_t __ret; - __ret = __p0 + __noswap_vmull_u8(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint64x2_t __ret; - __ret = __p0 + vmull_u32(__p1, __p2); - return __ret; -} -#else -__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint64x2_t __ret; - __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint64x2_t __ret; - __ret = __p0 + __noswap_vmull_u32(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint32x4_t __ret; - __ret = __p0 + vmull_u16(__p1, __p2); - return __ret; -} -#else -__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint32x4_t __ret; - __ret = __p0 + __noswap_vmull_u16(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int16x8_t __ret; - __ret = __p0 + vmull_s8(__p1, __p2); - return __ret; -} -#else -__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int16x8_t __ret; - __ret = __p0 + __noswap_vmull_s8(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = __p0 + vmull_s32(__p1, __p2); - return __ret; -} -#else -__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int64x2_t __ret; - __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = __p0 + __noswap_vmull_s32(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = __p0 + vmull_s16(__p1, __p2); - return __ret; -} -#else -__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = __p0 + __noswap_vmull_s16(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint64x2_t __ret; \ - __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - uint64x2_t __ret; \ - __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int64x2_t __ret; \ - __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint64x2_t __ret; - __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); - return __ret; -} -#else -__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint64x2_t __ret; - __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint32x4_t __ret; - __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#else -__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint32x4_t __ret; - __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2}); - return __ret; -} -#else -__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#else -__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint16x8_t __ret; - __ret = __p0 - vmull_u8(__p1, __p2); - return __ret; -} -#else -__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint16x8_t __ret; - __ret = __p0 - __noswap_vmull_u8(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint64x2_t __ret; - __ret = __p0 - vmull_u32(__p1, __p2); - return __ret; -} -#else -__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint64x2_t __ret; - __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint64x2_t __ret; - __ret = __p0 - __noswap_vmull_u32(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint32x4_t __ret; - __ret = __p0 - vmull_u16(__p1, __p2); - return __ret; -} -#else -__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint32x4_t __ret; - __ret = __p0 - __noswap_vmull_u16(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int16x8_t __ret; - __ret = __p0 - vmull_s8(__p1, __p2); - return __ret; -} -#else -__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int16x8_t __ret; - __ret = __p0 - __noswap_vmull_s8(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = __p0 - vmull_s32(__p1, __p2); - return __ret; -} -#else -__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int64x2_t __ret; - __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = __p0 - __noswap_vmull_s32(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = __p0 - vmull_s16(__p1, __p2); - return __ret; -} -#else -__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = __p0 - __noswap_vmull_s16(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint64x2_t __ret; \ - __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint64x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __s2 = __p2; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - uint64x2_t __ret; \ - __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint32x4_t __ret; \ - __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \ - uint32x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __s2 = __p2; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - uint32x4_t __ret; \ - __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __ret; \ - __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - int64x2_t __ret; \ - __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __ret; \ - __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \ - __ret; \ -}) -#else -#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - int32x4_t __ret; \ - __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint64x2_t __ret; - __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); - return __ret; -} -#else -__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __ret; - __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint64x2_t __ret; - __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint32x4_t __ret; - __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#else -__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint32x4_t __ret; - __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2}); - return __ret; -} -#else -__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __ret; - __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#else -__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); - return __ret; -} -#endif - -#if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) -#ifdef __LITTLE_ENDIAN__ -#define vfmsh_lane_f16(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \ - float16_t __s0_258 = __p0_258; \ - float16_t __s1_258 = __p1_258; \ - float16x4_t __s2_258 = __p2_258; \ - float16_t __ret_258; \ - __ret_258 = vfmsh_f16(__s0_258, __s1_258, vget_lane_f16(__s2_258, __p3_258)); \ - __ret_258; \ -}) -#else -#define vfmsh_lane_f16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \ - float16_t __s0_259 = __p0_259; \ - float16_t __s1_259 = __p1_259; \ - float16x4_t __s2_259 = __p2_259; \ - float16x4_t __rev2_259; __rev2_259 = __builtin_shufflevector(__s2_259, __s2_259, 3, 2, 1, 0); \ - float16_t __ret_259; \ - __ret_259 = __noswap_vfmsh_f16(__s0_259, __s1_259, __noswap_vget_lane_f16(__rev2_259, __p3_259)); \ - __ret_259; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmsh_laneq_f16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \ - float16_t __s0_260 = __p0_260; \ - float16_t __s1_260 = __p1_260; \ - float16x8_t __s2_260 = __p2_260; \ - float16_t __ret_260; \ - __ret_260 = vfmsh_f16(__s0_260, __s1_260, vgetq_lane_f16(__s2_260, __p3_260)); \ - __ret_260; \ -}) -#else -#define vfmsh_laneq_f16(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \ - float16_t __s0_261 = __p0_261; \ - float16_t __s1_261 = __p1_261; \ - float16x8_t __s2_261 = __p2_261; \ - float16x8_t __rev2_261; __rev2_261 = __builtin_shufflevector(__s2_261, __s2_261, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16_t __ret_261; \ - __ret_261 = __noswap_vfmsh_f16(__s0_261, __s1_261, __noswap_vgetq_lane_f16(__rev2_261, __p3_261)); \ - __ret_261; \ -}) -#endif - -#endif -#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { - int32_t __ret; - __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2)); - return __ret; -} -#else -__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { - int32_t __ret; - __ret = __noswap_vqadds_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2)); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { - int16_t __ret; - __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2)); - return __ret; -} -#else -__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { - int16_t __ret; - __ret = __noswap_vqaddh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2)); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahs_lane_s32(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \ - int32_t __s0_262 = __p0_262; \ - int32_t __s1_262 = __p1_262; \ - int32x2_t __s2_262 = __p2_262; \ - int32_t __ret_262; \ - __ret_262 = vqadds_s32(__s0_262, vqrdmulhs_s32(__s1_262, vget_lane_s32(__s2_262, __p3_262))); \ - __ret_262; \ -}) -#else -#define vqrdmlahs_lane_s32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \ - int32_t __s0_263 = __p0_263; \ - int32_t __s1_263 = __p1_263; \ - int32x2_t __s2_263 = __p2_263; \ - int32x2_t __rev2_263; __rev2_263 = __builtin_shufflevector(__s2_263, __s2_263, 1, 0); \ - int32_t __ret_263; \ - __ret_263 = __noswap_vqadds_s32(__s0_263, __noswap_vqrdmulhs_s32(__s1_263, __noswap_vget_lane_s32(__rev2_263, __p3_263))); \ - __ret_263; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahh_lane_s16(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \ - int16_t __s0_264 = __p0_264; \ - int16_t __s1_264 = __p1_264; \ - int16x4_t __s2_264 = __p2_264; \ - int16_t __ret_264; \ - __ret_264 = vqaddh_s16(__s0_264, vqrdmulhh_s16(__s1_264, vget_lane_s16(__s2_264, __p3_264))); \ - __ret_264; \ -}) -#else -#define vqrdmlahh_lane_s16(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \ - int16_t __s0_265 = __p0_265; \ - int16_t __s1_265 = __p1_265; \ - int16x4_t __s2_265 = __p2_265; \ - int16x4_t __rev2_265; __rev2_265 = __builtin_shufflevector(__s2_265, __s2_265, 3, 2, 1, 0); \ - int16_t __ret_265; \ - __ret_265 = __noswap_vqaddh_s16(__s0_265, __noswap_vqrdmulhh_s16(__s1_265, __noswap_vget_lane_s16(__rev2_265, __p3_265))); \ - __ret_265; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahs_laneq_s32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \ - int32_t __s0_266 = __p0_266; \ - int32_t __s1_266 = __p1_266; \ - int32x4_t __s2_266 = __p2_266; \ - int32_t __ret_266; \ - __ret_266 = vqadds_s32(__s0_266, vqrdmulhs_s32(__s1_266, vgetq_lane_s32(__s2_266, __p3_266))); \ - __ret_266; \ -}) -#else -#define vqrdmlahs_laneq_s32(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \ - int32_t __s0_267 = __p0_267; \ - int32_t __s1_267 = __p1_267; \ - int32x4_t __s2_267 = __p2_267; \ - int32x4_t __rev2_267; __rev2_267 = __builtin_shufflevector(__s2_267, __s2_267, 3, 2, 1, 0); \ - int32_t __ret_267; \ - __ret_267 = __noswap_vqadds_s32(__s0_267, __noswap_vqrdmulhs_s32(__s1_267, __noswap_vgetq_lane_s32(__rev2_267, __p3_267))); \ - __ret_267; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahh_laneq_s16(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \ - int16_t __s0_268 = __p0_268; \ - int16_t __s1_268 = __p1_268; \ - int16x8_t __s2_268 = __p2_268; \ - int16_t __ret_268; \ - __ret_268 = vqaddh_s16(__s0_268, vqrdmulhh_s16(__s1_268, vgetq_lane_s16(__s2_268, __p3_268))); \ - __ret_268; \ -}) -#else -#define vqrdmlahh_laneq_s16(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \ - int16_t __s0_269 = __p0_269; \ - int16_t __s1_269 = __p1_269; \ - int16x8_t __s2_269 = __p2_269; \ - int16x8_t __rev2_269; __rev2_269 = __builtin_shufflevector(__s2_269, __s2_269, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16_t __ret_269; \ - __ret_269 = __noswap_vqaddh_s16(__s0_269, __noswap_vqrdmulhh_s16(__s1_269, __noswap_vgetq_lane_s16(__rev2_269, __p3_269))); \ - __ret_269; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { - int32_t __ret; - __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2)); - return __ret; -} -#else -__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { - int32_t __ret; - __ret = __noswap_vqsubs_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2)); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { - int16_t __ret; - __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2)); - return __ret; -} -#else -__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { - int16_t __ret; - __ret = __noswap_vqsubh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2)); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshs_lane_s32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ - int32_t __s0_270 = __p0_270; \ - int32_t __s1_270 = __p1_270; \ - int32x2_t __s2_270 = __p2_270; \ - int32_t __ret_270; \ - __ret_270 = vqsubs_s32(__s0_270, vqrdmulhs_s32(__s1_270, vget_lane_s32(__s2_270, __p3_270))); \ - __ret_270; \ -}) -#else -#define vqrdmlshs_lane_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ - int32_t __s0_271 = __p0_271; \ - int32_t __s1_271 = __p1_271; \ - int32x2_t __s2_271 = __p2_271; \ - int32x2_t __rev2_271; __rev2_271 = __builtin_shufflevector(__s2_271, __s2_271, 1, 0); \ - int32_t __ret_271; \ - __ret_271 = __noswap_vqsubs_s32(__s0_271, __noswap_vqrdmulhs_s32(__s1_271, __noswap_vget_lane_s32(__rev2_271, __p3_271))); \ - __ret_271; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshh_lane_s16(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ - int16_t __s0_272 = __p0_272; \ - int16_t __s1_272 = __p1_272; \ - int16x4_t __s2_272 = __p2_272; \ - int16_t __ret_272; \ - __ret_272 = vqsubh_s16(__s0_272, vqrdmulhh_s16(__s1_272, vget_lane_s16(__s2_272, __p3_272))); \ - __ret_272; \ -}) -#else -#define vqrdmlshh_lane_s16(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ - int16_t __s0_273 = __p0_273; \ - int16_t __s1_273 = __p1_273; \ - int16x4_t __s2_273 = __p2_273; \ - int16x4_t __rev2_273; __rev2_273 = __builtin_shufflevector(__s2_273, __s2_273, 3, 2, 1, 0); \ - int16_t __ret_273; \ - __ret_273 = __noswap_vqsubh_s16(__s0_273, __noswap_vqrdmulhh_s16(__s1_273, __noswap_vget_lane_s16(__rev2_273, __p3_273))); \ - __ret_273; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshs_laneq_s32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ - int32_t __s0_274 = __p0_274; \ - int32_t __s1_274 = __p1_274; \ - int32x4_t __s2_274 = __p2_274; \ - int32_t __ret_274; \ - __ret_274 = vqsubs_s32(__s0_274, vqrdmulhs_s32(__s1_274, vgetq_lane_s32(__s2_274, __p3_274))); \ - __ret_274; \ -}) -#else -#define vqrdmlshs_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ - int32_t __s0_275 = __p0_275; \ - int32_t __s1_275 = __p1_275; \ - int32x4_t __s2_275 = __p2_275; \ - int32x4_t __rev2_275; __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 3, 2, 1, 0); \ - int32_t __ret_275; \ - __ret_275 = __noswap_vqsubs_s32(__s0_275, __noswap_vqrdmulhs_s32(__s1_275, __noswap_vgetq_lane_s32(__rev2_275, __p3_275))); \ - __ret_275; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshh_laneq_s16(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ - int16_t __s0_276 = __p0_276; \ - int16_t __s1_276 = __p1_276; \ - int16x8_t __s2_276 = __p2_276; \ - int16_t __ret_276; \ - __ret_276 = vqsubh_s16(__s0_276, vqrdmulhh_s16(__s1_276, vgetq_lane_s16(__s2_276, __p3_276))); \ - __ret_276; \ -}) -#else -#define vqrdmlshh_laneq_s16(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \ - int16_t __s0_277 = __p0_277; \ - int16_t __s1_277 = __p1_277; \ - int16x8_t __s2_277 = __p2_277; \ - int16x8_t __rev2_277; __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16_t __ret_277; \ - __ret_277 = __noswap_vqsubh_s16(__s0_277, __noswap_vqrdmulhh_s16(__s1_277, __noswap_vgetq_lane_s16(__rev2_277, __p3_277))); \ - __ret_277; \ -}) -#endif - -#endif -#if defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1)); - return __ret; -} -#else -__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1)); - return __ret; -} -#else -__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1)); - return __ret; -} -#else -__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { - int16x8_t __ret; - __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1)); - return __ret; -} -#else -__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1)); - return __ret; -} -#else -__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1)); - return __ret; -} -#else -__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1); - return __ret; -} -#else -__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1); - return __ret; -} -#else -__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1); - return __ret; -} -#else -__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { - int16x8_t __ret; - __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1); - return __ret; -} -#else -__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1); - return __ret; -} -#else -__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1); - return __ret; -} -#else -__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - __ret = __p0 + vmovl_high_u8(__p1); - return __ret; -} -#else -__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 + __noswap_vmovl_high_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - __ret = __p0 + vmovl_high_u32(__p1); - return __ret; -} -#else -__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __rev0 + __noswap_vmovl_high_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - __ret = __p0 + vmovl_high_u16(__p1); - return __ret; -} -#else -__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 + __noswap_vmovl_high_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { - int16x8_t __ret; - __ret = __p0 + vmovl_high_s8(__p1); - return __ret; -} -#else -__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 + __noswap_vmovl_high_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = __p0 + vmovl_high_s32(__p1); - return __ret; -} -#else -__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __rev0 + __noswap_vmovl_high_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = __p0 + vmovl_high_s16(__p1); - return __ret; -} -#else -__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 + __noswap_vmovl_high_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p64(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \ - poly64x2_t __s0_278 = __p0_278; \ - poly64x1_t __s2_278 = __p2_278; \ - poly64x2_t __ret_278; \ - __ret_278 = vsetq_lane_p64(vget_lane_p64(__s2_278, __p3_278), __s0_278, __p1_278); \ - __ret_278; \ -}) -#else -#define vcopyq_lane_p64(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \ - poly64x2_t __s0_279 = __p0_279; \ - poly64x1_t __s2_279 = __p2_279; \ - poly64x2_t __rev0_279; __rev0_279 = __builtin_shufflevector(__s0_279, __s0_279, 1, 0); \ - poly64x2_t __ret_279; \ - __ret_279 = __noswap_vsetq_lane_p64(__noswap_vget_lane_p64(__s2_279, __p3_279), __rev0_279, __p1_279); \ - __ret_279 = __builtin_shufflevector(__ret_279, __ret_279, 1, 0); \ - __ret_279; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_f64(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \ - float64x2_t __s0_280 = __p0_280; \ - float64x1_t __s2_280 = __p2_280; \ - float64x2_t __ret_280; \ - __ret_280 = vsetq_lane_f64(vget_lane_f64(__s2_280, __p3_280), __s0_280, __p1_280); \ - __ret_280; \ -}) -#else -#define vcopyq_lane_f64(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ - float64x2_t __s0_281 = __p0_281; \ - float64x1_t __s2_281 = __p2_281; \ - float64x2_t __rev0_281; __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 1, 0); \ - float64x2_t __ret_281; \ - __ret_281 = __noswap_vsetq_lane_f64(__noswap_vget_lane_f64(__s2_281, __p3_281), __rev0_281, __p1_281); \ - __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 1, 0); \ - __ret_281; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_p64(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \ - poly64x1_t __s0_282 = __p0_282; \ - poly64x1_t __s2_282 = __p2_282; \ - poly64x1_t __ret_282; \ - __ret_282 = vset_lane_p64(vget_lane_p64(__s2_282, __p3_282), __s0_282, __p1_282); \ - __ret_282; \ -}) -#else -#define vcopy_lane_p64(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \ - poly64x1_t __s0_283 = __p0_283; \ - poly64x1_t __s2_283 = __p2_283; \ - poly64x1_t __ret_283; \ - __ret_283 = __noswap_vset_lane_p64(__noswap_vget_lane_p64(__s2_283, __p3_283), __s0_283, __p1_283); \ - __ret_283; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_f64(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \ - float64x1_t __s0_284 = __p0_284; \ - float64x1_t __s2_284 = __p2_284; \ - float64x1_t __ret_284; \ - __ret_284 = vset_lane_f64(vget_lane_f64(__s2_284, __p3_284), __s0_284, __p1_284); \ - __ret_284; \ -}) -#else -#define vcopy_lane_f64(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \ - float64x1_t __s0_285 = __p0_285; \ - float64x1_t __s2_285 = __p2_285; \ - float64x1_t __ret_285; \ - __ret_285 = __noswap_vset_lane_f64(__noswap_vget_lane_f64(__s2_285, __p3_285), __s0_285, __p1_285); \ - __ret_285; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p64(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \ - poly64x2_t __s0_286 = __p0_286; \ - poly64x2_t __s2_286 = __p2_286; \ - poly64x2_t __ret_286; \ - __ret_286 = vsetq_lane_p64(vgetq_lane_p64(__s2_286, __p3_286), __s0_286, __p1_286); \ - __ret_286; \ -}) -#else -#define vcopyq_laneq_p64(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \ - poly64x2_t __s0_287 = __p0_287; \ - poly64x2_t __s2_287 = __p2_287; \ - poly64x2_t __rev0_287; __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 1, 0); \ - poly64x2_t __rev2_287; __rev2_287 = __builtin_shufflevector(__s2_287, __s2_287, 1, 0); \ - poly64x2_t __ret_287; \ - __ret_287 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_287, __p3_287), __rev0_287, __p1_287); \ - __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 1, 0); \ - __ret_287; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_f64(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \ - float64x2_t __s0_288 = __p0_288; \ - float64x2_t __s2_288 = __p2_288; \ - float64x2_t __ret_288; \ - __ret_288 = vsetq_lane_f64(vgetq_lane_f64(__s2_288, __p3_288), __s0_288, __p1_288); \ - __ret_288; \ -}) -#else -#define vcopyq_laneq_f64(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \ - float64x2_t __s0_289 = __p0_289; \ - float64x2_t __s2_289 = __p2_289; \ - float64x2_t __rev0_289; __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 1, 0); \ - float64x2_t __rev2_289; __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 1, 0); \ - float64x2_t __ret_289; \ - __ret_289 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_289, __p3_289), __rev0_289, __p1_289); \ - __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 1, 0); \ - __ret_289; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p64(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \ - poly64x1_t __s0_290 = __p0_290; \ - poly64x2_t __s2_290 = __p2_290; \ - poly64x1_t __ret_290; \ - __ret_290 = vset_lane_p64(vgetq_lane_p64(__s2_290, __p3_290), __s0_290, __p1_290); \ - __ret_290; \ -}) -#else -#define vcopy_laneq_p64(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \ - poly64x1_t __s0_291 = __p0_291; \ - poly64x2_t __s2_291 = __p2_291; \ - poly64x2_t __rev2_291; __rev2_291 = __builtin_shufflevector(__s2_291, __s2_291, 1, 0); \ - poly64x1_t __ret_291; \ - __ret_291 = __noswap_vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_291, __p3_291), __s0_291, __p1_291); \ - __ret_291; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_f64(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \ - float64x1_t __s0_292 = __p0_292; \ - float64x2_t __s2_292 = __p2_292; \ - float64x1_t __ret_292; \ - __ret_292 = vset_lane_f64(vgetq_lane_f64(__s2_292, __p3_292), __s0_292, __p1_292); \ - __ret_292; \ -}) -#else -#define vcopy_laneq_f64(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \ - float64x1_t __s0_293 = __p0_293; \ - float64x2_t __s2_293 = __p2_293; \ - float64x2_t __rev2_293; __rev2_293 = __builtin_shufflevector(__s2_293, __s2_293, 1, 0); \ - float64x1_t __ret_293; \ - __ret_293 = __noswap_vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_293, __p3_293), __s0_293, __p1_293); \ - __ret_293; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint16x8_t __ret; - __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); - return __ret; -} -#else -__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint64x2_t __ret; - __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); - return __ret; -} -#else -__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint32x4_t __ret; - __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); - return __ret; -} -#else -__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { - int16x8_t __ret; - __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); - return __ret; -} -#else -__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __ret; - __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); - return __ret; -} -#else -__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __ret; - __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); - return __ret; -} -#else -__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint64x2_t __ret; - __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2); - return __ret; -} -#else -__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint32x4_t __ret; - __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2); - return __ret; -} -#else -__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2); - return __ret; -} -#else -__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2); - return __ret; -} -#else -__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint16x8_t __ret; - __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); - return __ret; -} -#else -__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint64x2_t __ret; - __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); - return __ret; -} -#else -__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint32x4_t __ret; - __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); - return __ret; -} -#else -__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { - int16x8_t __ret; - __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); - return __ret; -} -#else -__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __ret; - __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); - return __ret; -} -#else -__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __ret; - __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); - return __ret; -} -#else -__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint64x2_t __ret; - __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2); - return __ret; -} -#else -__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint32x4_t __ret; - __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2); - return __ret; -} -#else -__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); - return __ret; -} -#else -__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); - return __ret; -} -#else -__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_lane_f64(__p0_294, __p1_294, __p2_294) __extension__ ({ \ - float64x1_t __s0_294 = __p0_294; \ - float64x1_t __s1_294 = __p1_294; \ - float64x1_t __ret_294; \ - float64_t __x_294 = vget_lane_f64(__s0_294, 0); \ - float64_t __y_294 = vget_lane_f64(__s1_294, __p2_294); \ - float64_t __z_294 = vmulxd_f64(__x_294, __y_294); \ - __ret_294 = vset_lane_f64(__z_294, __s0_294, __p2_294); \ - __ret_294; \ -}) -#else -#define vmulx_lane_f64(__p0_295, __p1_295, __p2_295) __extension__ ({ \ - float64x1_t __s0_295 = __p0_295; \ - float64x1_t __s1_295 = __p1_295; \ - float64x1_t __ret_295; \ - float64_t __x_295 = __noswap_vget_lane_f64(__s0_295, 0); \ - float64_t __y_295 = __noswap_vget_lane_f64(__s1_295, __p2_295); \ - float64_t __z_295 = __noswap_vmulxd_f64(__x_295, __y_295); \ - __ret_295 = __noswap_vset_lane_f64(__z_295, __s0_295, __p2_295); \ - __ret_295; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f64(__p0_296, __p1_296, __p2_296) __extension__ ({ \ - float64x1_t __s0_296 = __p0_296; \ - float64x2_t __s1_296 = __p1_296; \ - float64x1_t __ret_296; \ - float64_t __x_296 = vget_lane_f64(__s0_296, 0); \ - float64_t __y_296 = vgetq_lane_f64(__s1_296, __p2_296); \ - float64_t __z_296 = vmulxd_f64(__x_296, __y_296); \ - __ret_296 = vset_lane_f64(__z_296, __s0_296, 0); \ - __ret_296; \ -}) -#else -#define vmulx_laneq_f64(__p0_297, __p1_297, __p2_297) __extension__ ({ \ - float64x1_t __s0_297 = __p0_297; \ - float64x2_t __s1_297 = __p1_297; \ - float64x2_t __rev1_297; __rev1_297 = __builtin_shufflevector(__s1_297, __s1_297, 1, 0); \ - float64x1_t __ret_297; \ - float64_t __x_297 = __noswap_vget_lane_f64(__s0_297, 0); \ - float64_t __y_297 = __noswap_vgetq_lane_f64(__rev1_297, __p2_297); \ - float64_t __z_297 = __noswap_vmulxd_f64(__x_297, __y_297); \ - __ret_297 = __noswap_vset_lane_f64(__z_297, __s0_297, 0); \ - __ret_297; \ -}) -#endif - -#endif -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint16x8_t __ret; - __ret = __p0 + vabdl_u8(__p1, __p2); - return __ret; -} -#else -__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint16x8_t __ret; - __ret = __p0 + __noswap_vabdl_u8(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint64x2_t __ret; - __ret = __p0 + vabdl_u32(__p1, __p2); - return __ret; -} -#else -__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - uint64x2_t __ret; - __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint64x2_t __ret; - __ret = __p0 + __noswap_vabdl_u32(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint32x4_t __ret; - __ret = __p0 + vabdl_u16(__p1, __p2); - return __ret; -} -#else -__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint32x4_t __ret; - __ret = __p0 + __noswap_vabdl_u16(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int16x8_t __ret; - __ret = __p0 + vabdl_s8(__p1, __p2); - return __ret; -} -#else -__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int16x8_t __ret; - __ret = __p0 + __noswap_vabdl_s8(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = __p0 + vabdl_s32(__p1, __p2); - return __ret; -} -#else -__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - int64x2_t __ret; - __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = __p0 + __noswap_vabdl_s32(__p1, __p2); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = __p0 + vabdl_s16(__p1, __p2); - return __ret; -} -#else -__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = __p0 + __noswap_vabdl_s16(__p1, __p2); - return __ret; -} -#endif - -#if defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint16x8_t __ret; - __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); - return __ret; -} -#else -__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __ret; - __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint64x2_t __ret; - __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); - return __ret; -} -#else -__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - uint64x2_t __ret; - __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint32x4_t __ret; - __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); - return __ret; -} -#else -__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - uint32x4_t __ret; - __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { - int16x8_t __ret; - __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); - return __ret; -} -#else -__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __ret; - __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __ret; - __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); - return __ret; -} -#else -__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - int64x2_t __ret; - __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __ret; - __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); - return __ret; -} -#else -__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - int32x4_t __ret; - __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif - -#undef __ai - -#pragma clang diagnostic pop - -#endif /* __ARM_NEON_H */ diff --git a/EXTERNAL_HEADERS/corecrypto/cc.h b/EXTERNAL_HEADERS/corecrypto/cc.h index 4b2a6dec2..4ea8e63d0 100644 --- a/EXTERNAL_HEADERS/corecrypto/cc.h +++ b/EXTERNAL_HEADERS/corecrypto/cc.h @@ -1,11 +1,12 @@ -/* - * cc.h - * corecrypto - * - * Created on 12/16/2010 - * - * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2014,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CC_H_ @@ -44,6 +45,10 @@ __CC_BRIDGEOS_DEPRECATED(bridgeos_version, replacement_message) #define cc_concat_(a, b) a##b #define cc_concat(a, b) cc_concat_(a, b) +#if defined(_MSC_VER) +#define __asm__(x) +#endif + /* Manage asserts here because a few functions in header public files do use asserts */ #if CORECRYPTO_DEBUG #define cc_assert(x) assert(x) @@ -72,9 +77,9 @@ uint8_t b[_alignment_]; \ } CC_ALIGNED(_alignment_) #if defined(__BIGGEST_ALIGNMENT__) -#define CC_MAX_ALIGNMENT __BIGGEST_ALIGNMENT__ +#define CC_MAX_ALIGNMENT ((size_t)__BIGGEST_ALIGNMENT__) #else -#define CC_MAX_ALIGNMENT 16 +#define CC_MAX_ALIGNMENT ((size_t)16) #endif /* pads a given size to be a multiple of the biggest alignment for any type */ @@ -95,13 +100,17 @@ uint8_t b[_alignment_]; \ #if defined(_MSC_VER) #include #define cc_ctx_decl(_type_, _size_, _name_) _type_ * _name_ = (_type_ *) _alloca(sizeof(_type_) * cc_ctx_n(_type_, _size_) ) +#define cc_ctx_decl_field(_type_, _size_, _name_) _type_ _name_ [cc_ctx_n(_type_, _size_)] #else -#define cc_ctx_decl(_type_, _size_, _name_) _type_ _name_ [cc_ctx_n(_type_, _size_)] +// FIXME this pragma is the wrong fix for VLA usage, but since this API is central to corecrypto it's difficult to remove VLAs. The macro is then used in many other projects who don't need to be warned about VLAs at the moment. It's therefore desirable to silence the diagnostic and let corecrypto deal with removing VLAs. +#define cc_ctx_decl(_type_, _size_, _name_) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wvla\"") \ + _type_ _name_ [cc_ctx_n(_type_, _size_)] \ + _Pragma("GCC diagnostic pop") +#define cc_ctx_decl_field cc_ctx_decl #endif -// cc_zero is deprecated, please use cc_clear instead. -#define cc_zero(_size_,_data_) _Pragma ("corecrypto deprecation warning \"'cc_zero' macro is deprecated. Use 'cc_clear' instead.\"") cc_clear(_size_,_data_) - /*! @brief cc_clear(len, dst) zeroizes array dst and it will not be optimized out. @discussion It is used to clear sensitive data, particularly when the are defined in the stack @@ -111,6 +120,14 @@ uint8_t b[_alignment_]; \ CC_NONNULL((2)) void cc_clear(size_t len, void *dst); +// cc_zero is deprecated, please use cc_clear instead. +cc_deprecate_with_replacement("cc_clear", 13.0, 10.15, 13.0, 6.0, 4.0) +CC_NONNULL_ALL CC_INLINE +void cc_zero(size_t len, void *dst) +{ + cc_clear(len, dst); +} + #define cc_copy(_size_, _dst_, _src_) memcpy(_dst_, _src_, _size_) CC_INLINE CC_NONNULL((2, 3, 4)) diff --git a/EXTERNAL_HEADERS/corecrypto/cc_config.h b/EXTERNAL_HEADERS/corecrypto/cc_config.h index 5fb183288..2ce76e2bd 100644 --- a/EXTERNAL_HEADERS/corecrypto/cc_config.h +++ b/EXTERNAL_HEADERS/corecrypto/cc_config.h @@ -1,11 +1,12 @@ -/* - * cc_config.h - * corecrypto - * - * Created on 11/16/2010 - * - * Copyright (c) 2010,2011,2012,2013,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CC_CONFIG_H_ @@ -50,7 +51,7 @@ //Do not set these macros to 1, unless you are developing/testing for Windows under macOS #define CORECRYPTO_SIMULATE_WINDOWS_ENVIRONMENT 0 -#define CORECRYPTO_HACK_FOR_WINDOWS_DEVELOPMENT 0 //to be removed after port corecrypto to Windows +#define CORECRYPTO_HACK_FOR_WINDOWS_DEVELOPMENT 0 #if (defined(DEBUG) && (DEBUG)) || defined(_DEBUG) //MSVC defines _DEBUG /* CC_DEBUG is already used in CommonCrypto */ @@ -70,12 +71,24 @@ #define CC_KERNEL 0 #endif -#if defined(__linux__) || CORECRYPTO_SIMULATE_POSIX_ENVIRONMENT +#if defined(LINUX_SGX) && (LINUX_SGX) + #define CC_SGX 1 +#else + #define CC_SGX 0 +#endif + +#if (defined(__linux__) && !(CC_SGX)) || CORECRYPTO_SIMULATE_POSIX_ENVIRONMENT #define CC_LINUX 1 #else #define CC_LINUX 0 #endif +#if defined(__ANDROID__) && (__ANDROID__) + #define CC_ANDROID 1 +#else + #define CC_ANDROID 0 +#endif + #if defined(USE_L4) && (USE_L4) #define CC_USE_L4 1 #else @@ -124,6 +137,72 @@ #define CC_IBOOT 0 #endif +#if defined(TARGET_OS_BRIDGE) + #define CC_BRIDGE TARGET_OS_BRIDGE +#else + #define CC_BRIDGE 0 +#endif + +// Check if we're running on a generic, userspace platform, i.e., not in the kernel, SEP, etc. +#ifndef CC_GENERIC_PLATFORM + #define CC_GENERIC_PLATFORM \ + (!CC_RTKIT && !CC_KERNEL && !CC_USE_L4 && \ + !CC_RTKITROM && !CC_EFI && !CC_IBOOT && \ + !CC_USE_SEPROM && !CC_ANDROID && !CC_LINUX && \ + !CC_BRIDGE) +#endif + +// Check for availability of internal Darwin SPIs. +#ifndef CC_DARWIN_SPIS_AVAILABLE + #if defined(__has_include) + #define CC_DARWIN_SPIS_AVAILABLE __has_include() + #else + #define CC_DARWIN_SPIS_AVAILABLE 0 + #endif +#endif + +// Check for open source builds + +// ccringbuffer availability +// Only enable the ccringbuffer data structure in generic, userspace builds where memory allocation is not an issue. +#ifndef CC_RINGBUFFER_AVAILABLE + #define CC_RINGBUFFER_AVAILABLE (CC_GENERIC_PLATFORM && CC_DARWIN_SPIS_AVAILABLE && !CC_OPEN_SOURCE) +#endif + +// os_log integration +// Only enable logging support in generic, userspace builds with the desired Darwin SPIs. +#ifndef CC_LOGGING_AVAILABLE + #define CC_LOGGING_AVAILABLE (CC_GENERIC_PLATFORM && CC_DARWIN_SPIS_AVAILABLE && !CC_OPEN_SOURCE) +#endif + +// FeatureFlag integration +// Only enable feature flag support in generic, userspace builds with the desired Darwin SPIs. +// This requires linking against libsystem_featureflags to function correctly. +#ifndef CC_FEATURE_FLAGS_AVAILABLE + #if defined(__has_include) + #define CC_FEATURE_FLAGS_AVAILABLE __has_include() + #else + #define CC_FEATURE_FLAGS_AVAILABLE 0 + #endif +#endif + +// Macro to determine if a specific feature is available. +// Turn off all features at compile time if desired and avoid the runtime check by changing this +// definition to 0. Limit this functionality to the same environments wherein the ringbuffer is available. +#ifndef CC_FEATURE_ENABLED + #if (CC_RINGBUFFER_AVAILABLE && CC_FEATURE_FLAGS_AVAILABLE && !defined(__i386__)) + #define CC_FEATURE_ENABLED(FEATURE) os_feature_enabled(Cryptography, FEATURE) + #else + #define CC_FEATURE_ENABLED(FEATURE) 0 + #endif +#endif + +// Trace usage of deprecated or obscure functions. For now, this is +// completely disabled. +#ifndef CC_LOG_TRACE + #define CC_LOG_TRACE 0 +#endif + // Defined by the XNU build scripts // Applies to code embedded in XNU but NOT to the kext #if defined(XNU_KERNEL_PRIVATE) @@ -293,7 +372,6 @@ #define CC_DISABLE_RSAKEYGEN 0 /* default */ #endif -// see rdar://problem/26636018 #if (CCN_UNIT_SIZE == 8) && !( defined(_MSC_VER) && defined(__clang__)) #define CCEC25519_CURVE25519_64BIT 1 #else @@ -307,7 +385,7 @@ #endif // Enable assembler in Linux if CC_LINUX_ASM is defined -#if CC_LINUX && defined(CC_LINUX_ASM) && CC_LINUX_ASM +#if (CC_LINUX || CC_SGX) && defined(CC_LINUX_ASM) && CC_LINUX_ASM #define CC_USE_ASM 1 #endif @@ -321,6 +399,8 @@ #endif #endif +#define CC_CACHE_DESCRIPTORS CC_KERNEL + //-(1) ARM V7 #if defined(_ARM_ARCH_7) && __clang__ && CC_USE_ASM #define CCN_DEDICATED_SQR CC_SMALL_CODE @@ -341,7 +421,7 @@ #else #define CCN_SHIFT_LEFT_ASM 0 #endif - #define CCN_MOD_224_ASM 1 + #define CCN_MULMOD_224_ASM 1 #define CCN_MULMOD_256_ASM 1 #define CCAES_ARM_ASM 1 #define CCAES_INTEL_ASM 0 @@ -368,7 +448,7 @@ //-(2) ARM 64 #elif defined(__arm64__) && __clang__ && CC_USE_ASM #define CCN_DEDICATED_SQR CC_SMALL_CODE - #define CCN_MUL_KARATSUBA 1 // 4*n CCN_UNIT extra memory required. + #define CCN_MUL_KARATSUBA 0 // 4*n CCN_UNIT extra memory required. #define CCN_ADD_ASM 1 #define CCN_SUB_ASM 1 #define CCN_MUL_ASM 1 @@ -381,7 +461,7 @@ #define CCN_SET_ASM 0 #define CCN_SHIFT_RIGHT_ASM 1 #define CCN_SHIFT_LEFT_ASM 1 - #define CCN_MOD_224_ASM 0 + #define CCN_MULMOD_224_ASM 1 #define CCN_MULMOD_256_ASM 1 #define CCAES_ARM_ASM 1 #define CCAES_INTEL_ASM 0 @@ -398,7 +478,7 @@ //-(3) Intel 32/64 #elif (defined(__x86_64__) || defined(__i386__)) && __clang__ && CC_USE_ASM #define CCN_DEDICATED_SQR 1 - #define CCN_MUL_KARATSUBA 1 // 4*n CCN_UNIT extra memory required. + #define CCN_MUL_KARATSUBA 0 // 4*n CCN_UNIT extra memory required. /* These assembly routines only work for a single CCN_UNIT_SIZE. */ #if (defined(__x86_64__) && CCN_UNIT_SIZE == 8) || (defined(__i386__) && CCN_UNIT_SIZE == 4) #define CCN_ADD_ASM 1 @@ -422,10 +502,16 @@ #define CCN_SHIFT_LEFT_ASM 0 #endif - #define CCN_MOD_224_ASM 0 - #define CCN_MULMOD_256_ASM 0 - #define CCN_ADDMUL1_ASM 0 - #define CCN_MUL1_ASM 0 + #define CCN_MULMOD_224_ASM 0 + #if defined(__x86_64__) && CCN_UNIT_SIZE == 8 && !CC_SGX + #define CCN_MULMOD_256_ASM 1 + #define CCN_ADDMUL1_ASM 1 + #define CCN_MUL1_ASM 1 + #else + #define CCN_MULMOD_256_ASM 0 + #define CCN_ADDMUL1_ASM 0 + #define CCN_MUL1_ASM 0 + #endif #define CCN_ADD1_ASM 0 #define CCN_SUB1_ASM 0 #define CCN_SET_ASM 0 @@ -448,7 +534,7 @@ #else #define CCN_DEDICATED_SQR 0 //when assembly is off and 128-bit integers are not supported, dedicated square is off. This is the case on Windows #endif - #define CCN_MUL_KARATSUBA 1 // 4*n CCN_UNIT extra memory required. + #define CCN_MUL_KARATSUBA 0 // 4*n CCN_UNIT extra memory required. #define CCN_ADD_ASM 0 #define CCN_SUB_ASM 0 #define CCN_MUL_ASM 0 @@ -461,7 +547,7 @@ #define CCN_SET_ASM 0 #define CCN_SHIFT_RIGHT_ASM 0 #define CCN_SHIFT_LEFT_ASM 0 - #define CCN_MOD_224_ASM 0 + #define CCN_MULMOD_224_ASM 0 #define CCN_MULMOD_256_ASM 0 #define CCAES_ARM_ASM 0 #define CCAES_INTEL_ASM 0 @@ -520,18 +606,24 @@ // Bridge differences between MachO and ELF compiler/assemblers. */ -#if CC_USE_ASM -#if CC_LINUX +#if CC_LINUX || CC_SGX #define CC_ASM_SECTION_CONST .rodata #define CC_ASM_PRIVATE_EXTERN .hidden +#if CC_LINUX +// We need to be sure that assembler can access relocated C +// symbols. Sad but this is the quickest way to do that, at least with +// our current linux compiler (clang-3.4). +#define CC_C_LABEL(_sym) _sym@PLT +#else /* CC_SGX */ #define CC_C_LABEL(_sym) _sym -#else /* !CC_LINUX */ +#endif +#define _IMM(x) $(x) +#else /* !CC_LINUX && !CC_SGX */ #define CC_ASM_SECTION_CONST .const #define CC_ASM_PRIVATE_EXTERN .private_extern #define CC_C_LABEL(_sym) _##_sym -#endif /* !CC_LINUX */ -#endif /* CC_USE_ASM */ - +#define _IMM(x) $$(x) +#endif /* !CC_LINUX && !CC_SGX */ // Enable FIPSPOST function tracing only when supported. */ #ifdef CORECRYPTO_POST_TRACE @@ -540,4 +632,14 @@ #define CC_FIPSPOST_TRACE 0 #endif +#ifndef CC_INTERNAL_SDK +#if __has_include() +#define CC_INTERNAL_SDK 1 +#elif __has_include() +#define CC_INTERNAL_SDK 1 +#else +#define CC_INTERNAL_SDK 0 +#endif +#endif + #endif /* _CORECRYPTO_CC_CONFIG_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/cc_debug.h b/EXTERNAL_HEADERS/corecrypto/cc_debug.h deleted file mode 100644 index 8cd85e279..000000000 --- a/EXTERNAL_HEADERS/corecrypto/cc_debug.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * cc_debug.h - * corecrypto - * - * Created on 01/25/2012 - * - * Copyright (c) 2012,2014,2015 Apple Inc. All rights reserved. - * - */ - -//debug configuration header file -#ifndef _CORECRYPTO_CCN_DEBUG_H_ -#define _CORECRYPTO_CCN_DEBUG_H_ - -#include - -// DO NOT INCLUDE this HEADER file in CoreCrypto files added for XNU project or headers -// included by external clients. - -// ======================== -// Printf for corecrypto -// ======================== -#if CC_KERNEL - #include - #define cc_printf(x...) kprintf(x) - #if !CONFIG_EMBEDDED - extern int printf(const char *format, ...) __printflike(1,2); - #endif -#elif CC_USE_S3 || CC_IBOOT || CC_RTKIT || CC_RTKITROM - #include - #define cc_printf(x...) printf(x) -#elif defined(__ANDROID_API__) - #include - #define cc_printf(x...) __android_log_print(ANDROID_LOG_DEBUG, "corecrypto", x); -#else - #include - #define cc_printf(x...) fprintf(stderr, x) -#endif - -// ======================== -// Integer types -// ======================== - -#if CC_KERNEL -/* Those are not defined in libkern */ -#define PRIx64 "llx" -#define PRIx32 "x" -#define PRIx16 "hx" -#define PRIx8 "hhx" -#else -#include -#endif - -#if CCN_UNIT_SIZE == 8 -#define CCPRIx_UNIT ".016" PRIx64 -#elif CCN_UNIT_SIZE == 4 -#define CCPRIx_UNIT ".08" PRIx32 -#elif CCN_UNIT_SIZE == 2 -#define CCPRIx_UNIT ".04" PRIx16 -#elif CCN_UNIT_SIZE == 1 -#define CCPRIx_UNIT ".02" PRIx8 -#else -#error invalid CCN_UNIT_SIZE -#endif - -// ======================== -// Print utilities for corecrypto -// ======================== - -#include - -/* Print a byte array of arbitrary size */ -void cc_print(const char *label, size_t count, const uint8_t *s); - -#endif /* _CORECRYPTO_CCN_DEBUG_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/cc_error.h b/EXTERNAL_HEADERS/corecrypto/cc_error.h index b382cc5c1..0942ab98f 100644 --- a/EXTERNAL_HEADERS/corecrypto/cc_error.h +++ b/EXTERNAL_HEADERS/corecrypto/cc_error.h @@ -1,11 +1,12 @@ -/* - * cc_error.h - * corecrypto - * - * Created on 11/14/2017 - * - * Copyright (c) 2017 Apple Inc. All rights reserved. +/* Copyright (c) (2017,2018,2019,2020) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CC_ERROR_H_ @@ -140,6 +141,31 @@ enum { CCMODE_NONCE_EMPTY = -102, CCMODE_AD_EMPTY = -103, CCMODE_DECRYPTION_OR_VERIFICATION_ERR=-104, + CCMODE_BUFFER_OUT_IN_OVERLAP = -105, + + // Error codes for Secret Sharing + CCSS_ELEMENT_TOO_LARGE_FOR_FIELD = -120, + CCSS_NOT_ENOUGH_SHARES = -121, + CCSS_TOO_MANY_SHARES = -122, + CCSS_IMPROPER_DEGREE = -123, + CCSS_TWO_SHARES_FOR_SAME_X = -124, + CCSS_THRESHOLD_NOT_LARGE_ENOUGH = -125, + CCSS_SHARE_BAG_FULL = -126, + CCSS_SHARE_ALREADY_PRESENT_IN_SHARE_BAG = -127, + CCSS_THRESHOLD_LARGER_OR_EQUAL_TO_FIELD = -128, + CCSS_TOO_MANY_SHARES_REQUESTED = -129, + CCSS_FIELD_MISMATCH = -130, + CCSS_INDEX_OUT_OF_RANGE = -131, + + CCSAE_NOT_ENOUGH_COMMIT_PARTIAL_CALLS = -132, + CCSAE_GENERATE_COMMIT_CALL_AGAIN = -133, + + CCERR_VALID_SIGNATURE = CCERR_OK, + CCERR_INVALID_SIGNATURE = -146, + + CCERR_IOSERVICE_GETMATCHING = -147, + CCERR_IOSERVICE_OPEN = -148, + CCERR_IOCONNECT_CALL = -149, }; #define CCDRBG_STATUS_OK CCERR_OK diff --git a/EXTERNAL_HEADERS/corecrypto/cc_fault_canary.h b/EXTERNAL_HEADERS/corecrypto/cc_fault_canary.h new file mode 100644 index 000000000..4ec10cdfa --- /dev/null +++ b/EXTERNAL_HEADERS/corecrypto/cc_fault_canary.h @@ -0,0 +1,30 @@ +/* Copyright (c) (2020) Apple Inc. All rights reserved. + * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. + */ + + +#ifndef corecrypto_cc_fault_canary_h +#define corecrypto_cc_fault_canary_h + +#include "cc.h" + +#define CC_FAULT_CANARY_SIZE 16 +typedef uint8_t cc_fault_canary_t[CC_FAULT_CANARY_SIZE]; + +extern const cc_fault_canary_t CCEC_FAULT_CANARY; +extern const cc_fault_canary_t CCRSA_PKCS1_FAULT_CANARY; +extern const cc_fault_canary_t CCRSA_PSS_FAULT_CANARY; + +#define CC_FAULT_CANARY_MEMCPY(_dst_, _src_) memcpy(_dst_, _src_, CC_FAULT_CANARY_SIZE) +#define CC_FAULT_CANARY_CLEAR(_name_) memset(_name_, 0x00, CC_FAULT_CANARY_SIZE) + +#define CC_FAULT_CANARY_EQUAL(_a_, _b_) (cc_cmp_safe(CC_FAULT_CANARY_SIZE, _a_, _b_) == 0) + +#endif /* corecrypto_cc_fault_canary_h */ diff --git a/EXTERNAL_HEADERS/corecrypto/cc_macros.h b/EXTERNAL_HEADERS/corecrypto/cc_macros.h index f678f944d..b1d3fe1fd 100644 --- a/EXTERNAL_HEADERS/corecrypto/cc_macros.h +++ b/EXTERNAL_HEADERS/corecrypto/cc_macros.h @@ -1,11 +1,12 @@ -/* - * cc_macros.h - * corecrypto - * - * Created on 01/11/2012 - * - * Copyright (c) 2012,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2012,2015,2016,2017,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CC_MACROS_H_ @@ -117,4 +118,33 @@ CC_UNUSED static char *cc_strstr(const char *file) { #endif #endif +#ifndef cc_require_or_return +#if (__CC_DEBUG_ASSERT_PRODUCTION_CODE) || (!CORECRYPTO_DEBUG_ENABLE_CC_REQUIRE_PRINTS) + #if defined(_WIN32) && defined (__clang__) + #define cc_require_or_return(assertion, value) \ + do { \ + if (!(assertion) ) { \ + return value; \ + } \ + } while ( 0 ) + #else + #define cc_require_or_return(assertion, value) \ + do { \ + if ( __builtin_expect(!(assertion), 0) ) { \ + return value; \ + } \ + } while ( 0 ) + #endif +#else + #define cc_require_or_return(assertion, value) \ + do { \ + if ( __builtin_expect(!(assertion), 0) ) { \ + __CC_DEBUG_REQUIRE_MESSAGE(__CC_DEBUG_ASSERT_COMPONENT_NAME_STRING, \ + #assertion, #exceptionLabel, 0, __FILE__, __LINE__, 0); \ + return value; \ + } \ + } while ( 0 ) +#endif +#endif + #endif /* _CORECRYPTO_CC_MACROS_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/cc_priv.h b/EXTERNAL_HEADERS/corecrypto/cc_priv.h index 6a201eade..544813fad 100644 --- a/EXTERNAL_HEADERS/corecrypto/cc_priv.h +++ b/EXTERNAL_HEADERS/corecrypto/cc_priv.h @@ -1,17 +1,19 @@ -/* - * cc_priv.h - * corecrypto - * - * Created on 12/01/2010 - * - * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2014,2015,2016,2017,2018,2019,2020) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CC_PRIV_H_ #define _CORECRYPTO_CC_PRIV_H_ #include +#include #include // Fork handlers for the stateful components of corecrypto. @@ -68,12 +70,12 @@ void cc_atfork_child(void); */ -// RTKitOSPlatform should replace CC_MEMCPY with memcpy +// RTKitOSPlatform should replace CC_MEMCPY with memcpy #define CC_MEMCPY(D,S,L) cc_memcpy((D),(S),(L)) #define CC_MEMMOVE(D,S,L) cc_memmove((D),(S),(L)) #define CC_MEMSET(D,V,L) cc_memset((D),(V),(L)) -#if __has_builtin(__builtin___memcpy_chk) && !CC_RTKIT +#if __has_builtin(__builtin___memcpy_chk) && !defined(_MSC_VER) #define cc_memcpy(dst, src, len) __builtin___memcpy_chk((dst), (src), (len), __builtin_object_size((dst), 1)) #define cc_memcpy_nochk(dst, src, len) __builtin___memcpy_chk((dst), (src), (len), __builtin_object_size((dst), 0)) #else @@ -81,13 +83,13 @@ void cc_atfork_child(void); #define cc_memcpy_nochk(dst, src, len) memcpy((dst), (src), (len)) #endif -#if __has_builtin(__builtin___memmove_chk) && !CC_RTKIT +#if __has_builtin(__builtin___memmove_chk) && !defined(_MSC_VER) #define cc_memmove(dst, src, len) __builtin___memmove_chk((dst), (src), (len), __builtin_object_size((dst), 1)) #else #define cc_memmove(dst, src, len) memmove((dst), (src), (len)) #endif -#if __has_builtin(__builtin___memset_chk) && !CC_RTKIT +#if __has_builtin(__builtin___memset_chk) && !defined(_MSC_VER) #define cc_memset(dst, val, len) __builtin___memset_chk((dst), (val), (len), __builtin_object_size((dst), 1)) #else #define cc_memset(dst, val, len) memset((dst), (val), (len)) @@ -455,30 +457,7 @@ do { \ /* If you find yourself seeing this warning, file a radar for someone to * check whether or not __builtin_clz() generates a constant-time * implementation on the architecture you are targeting. If it does, append - * the name of that architecture to the list of "safe" architectures above. */ */ -#endif - - -#if defined(_WIN32) - -#include -#include - -CC_INLINE CC_CONST unsigned clz64_win(uint64_t value) -{ - DWORD leading_zero; - _BitScanReverse64(&leading_zero, value); - return 63 - leading_zero; -} - - -CC_INLINE CC_CONST unsigned clz32_win(uint32_t value) -{ - DWORD leading_zero; - _BitScanReverse(&leading_zero, value); - return 31 - leading_zero; -} - + * the name of that architecture to the list of "safe" architectures above. */ #endif CC_INLINE CC_CONST unsigned cc_clz32_fallback(uint32_t data) @@ -515,6 +494,40 @@ CC_INLINE CC_CONST unsigned cc_clz64_fallback(uint64_t data) return b; } +CC_INLINE CC_CONST unsigned cc_ctz32_fallback(uint32_t data) +{ + unsigned int b = 0; + unsigned int bit = 0; + // Work from MSB to LSB + for (int i = 31; i >= 0; i--) { + bit = (data >> i) & 1; + // If the bit is 0, update the "trailing zero bits" counter. + b += (1 - bit); + /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained. + * If the bit is 1, (bit - 1) is 0 therefore b is set to 0. + */ + b &= (bit - 1); + } + return b; +} + +CC_INLINE CC_CONST unsigned cc_ctz64_fallback(uint64_t data) +{ + unsigned int b = 0; + unsigned int bit = 0; + // Work from MSB to LSB + for (int i = 63; i >= 0; i--) { + bit = (data >> i) & 1; + // If the bit is 0, update the "trailing zero bits" counter. + b += (1 - bit); + /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained. + * If the bit is 1, (bit - 1) is 0 therefore b is set to 0. + */ + b &= (bit - 1); + } + return b; +} + /*! @function cc_clz32 @abstract Count leading zeros of a nonzero 32-bit value @@ -526,8 +539,9 @@ CC_INLINE CC_CONST unsigned cc_clz64_fallback(uint64_t data) @discussion @p data is assumed to be nonzero. */ CC_INLINE CC_CONST unsigned cc_clz32(uint32_t data) { + cc_assert(data != 0); #if defined(_WIN32) - return clz32_win(data); + return cc_clz32_fallback(data); #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__) || defined(__GNUC__) cc_static_assert(sizeof(unsigned) == 4, "clz relies on an unsigned int being 4 bytes"); return (unsigned)__builtin_clz(data); @@ -547,8 +561,9 @@ CC_INLINE CC_CONST unsigned cc_clz32(uint32_t data) { @discussion @p data is assumed to be nonzero. */ CC_INLINE CC_CONST unsigned cc_clz64(uint64_t data) { + cc_assert(data != 0); #if defined(_WIN32) - return clz64_win(data); + return cc_clz64_fallback(data); #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__) || defined(__GNUC__) return (unsigned)__builtin_clzll(data); #else @@ -556,6 +571,145 @@ CC_INLINE CC_CONST unsigned cc_clz64(uint64_t data) { #endif } +/*! + @function cc_ctz32 + @abstract Count trailing zeros of a nonzero 32-bit value + + @param data A nonzero 32-bit value + + @result Count of trailing zeros of @p data + + @discussion @p data is assumed to be nonzero. +*/ +CC_INLINE CC_CONST unsigned cc_ctz32(uint32_t data) { + cc_assert(data != 0); +#if defined(_WIN32) + return cc_ctz32_fallback(data); +#elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__) || defined(__GNUC__) + cc_static_assert(sizeof(unsigned) == 4, "ctz relies on an unsigned int being 4 bytes"); + return (unsigned)__builtin_ctz(data); +#else + return cc_ctz32_fallback(data); +#endif +} + +/*! + @function cc_ctz64 + @abstract Count trailing zeros of a nonzero 64-bit value + + @param data A nonzero 64-bit value + + @result Count of trailing zeros of @p data + + @discussion @p data is assumed to be nonzero. +*/ +CC_INLINE CC_CONST unsigned cc_ctz64(uint64_t data) { + cc_assert(data != 0); +#if defined(_WIN32) + return cc_ctz64_fallback(data); +#elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__) || defined(__GNUC__) + return (unsigned)__builtin_ctzll(data); +#else + return cc_ctz64_fallback(data); +#endif +} + +/*! + @function cc_ffs32_fallback + @abstract Find first bit set in a 32-bit value + + @param data A 32-bit value + + @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero + */ +CC_INLINE CC_CONST unsigned cc_ffs32_fallback(int32_t data) +{ + unsigned b = 0; + unsigned bit = 0; + unsigned seen = 0; + + // Work from LSB to MSB + for (int i = 0; i < 32; i++) { + bit = ((uint32_t)data >> i) & 1; + + // Track whether we've seen a 1 bit. + seen |= bit; + + // If the bit is 0 and we haven't seen a 1 yet, increment b. + b += (1 - bit) & (seen - 1); + } + + // If we saw a 1, return b + 1, else 0. + return (~(seen - 1)) & (b + 1); +} + +/*! + @function cc_ffs64_fallback + @abstract Find first bit set in a 64-bit value + + @param data A 64-bit value + + @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero + */ +CC_INLINE CC_CONST unsigned cc_ffs64_fallback(int64_t data) +{ + unsigned b = 0; + unsigned bit = 0; + unsigned seen = 0; + + // Work from LSB to MSB + for (int i = 0; i < 64; i++) { + bit = ((uint64_t)data >> i) & 1; + + // Track whether we've seen a 1 bit. + seen |= bit; + + // If the bit is 0 and we haven't seen a 1 yet, increment b. + b += (1 - bit) & (seen - 1); + } + + // If we saw a 1, return b + 1, else 0. + return (~(seen - 1)) & (b + 1); +} + +/*! + @function cc_ffs32 + @abstract Find first bit set in a 32-bit value + + @param data A 32-bit value + + @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero + */ +CC_INLINE CC_CONST unsigned cc_ffs32(int32_t data) +{ + cc_static_assert(sizeof(int) == 4, "ffs relies on an int being 4 bytes"); +#ifdef _WIN32 + return cc_ffs32_fallback(data); +#else + return (unsigned)__builtin_ffs(data); +#endif +} + +/*! + @function cc_ffs64 + @abstract Find first bit set in a 64-bit value + + @param data A 64-bit value + + @result One plus the index of the least-significant bit set in @p data or, if @p data is zero, zero + */ +CC_INLINE CC_CONST unsigned cc_ffs64(int64_t data) +{ +#ifdef _WIN32 + return cc_ffs64_fallback(data); +#else + return (unsigned)__builtin_ffsll(data); +#endif +} + +#define cc_add_overflow __builtin_add_overflow +#define cc_mul_overflow __builtin_mul_overflow + /* HEAVISIDE_STEP (shifted by one) function f(x): x->0, when x=0 x->1, when x>0 @@ -576,8 +730,6 @@ CC_INLINE CC_CONST unsigned cc_clz64(uint64_t data) { #define CC_CARRY_2BITS(x) (((x>>1) | x) & 0x1) #define CC_CARRY_3BITS(x) (((x>>2) | (x>>1) | x) & 0x1) -/* Set a variable to the biggest power of 2 which can be represented */ -#define MAX_POWER_OF_2(x) ((__typeof__(x))1<<(8*sizeof(x)-1)) #define cc_ceiling(a,b) (((a)+((b)-1))/(b)) #define CC_BITLEN_TO_BYTELEN(x) cc_ceiling((x), 8) @@ -597,11 +749,11 @@ void *cc_muxp(int s, const void *a, const void *b); @param s Selection parameter s. Must be 0 or 1. @param r Output, set to a if s=1, or b if s=0. */ -#define CC_MUXU(r, s, a, b) \ -{ \ - __typeof__(r) _cond = ((__typeof__(r))(s)-(__typeof__(r))1); \ - r = (~_cond&(a))|(_cond&(b)); \ -} +#define CC_MUXU(r, s, a, b) \ + { \ + __typeof__(r) _cond = (__typeof__(r))((s)-1); \ + r = (~_cond & (a)) | (_cond & (b)); \ + } #define CC_PROVIDES_ABORT (!(CC_USE_SEPROM || CC_USE_S3 || CC_BASEBAND || CC_EFI || CC_IBOOT || CC_RTKITROM)) @@ -641,6 +793,20 @@ void cc_try_abort(CC_UNUSED const char *msg) #endif +#if __has_builtin(__builtin_expect) + #define CC_UNLIKELY(cond) __builtin_expect(cond, 0) +#else + #define CC_UNLIKELY(cond) cond +#endif + +CC_INLINE +void cc_try_abort_if(bool condition, const char *msg) +{ + if (CC_UNLIKELY(condition)) { + cc_try_abort(msg); + } +} + /* Unfortunately, since we export this symbol, this declaration needs to be in a public header to satisfy TAPI. diff --git a/EXTERNAL_HEADERS/corecrypto/cc_runtime_config.h b/EXTERNAL_HEADERS/corecrypto/cc_runtime_config.h index 996accee1..6ef5a19c9 100644 --- a/EXTERNAL_HEADERS/corecrypto/cc_runtime_config.h +++ b/EXTERNAL_HEADERS/corecrypto/cc_runtime_config.h @@ -1,11 +1,12 @@ -/* - * cc_runtime_config.h - * corecrypto - * - * Created on 09/18/2012 - * - * Copyright (c) 2012,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2012,2014,2015,2016,2017,2018,2019,2020) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef CORECRYPTO_CC_RUNTIME_CONFIG_H_ @@ -20,43 +21,68 @@ #if CC_KERNEL #include #define CC_HAS_RDRAND() ((cpuid_features() & CPUID_FEATURE_RDRAND) != 0) -#elif CC_XNU_KERNEL_AVAILABLE - #include - - extern int _cpu_capabilities; - #define CC_HAS_RDRAND() (_cpu_capabilities & kHasRDRAND) -#else - #define CC_HAS_RDRAND() 0 -#endif - -#if (CCSHA1_VNG_INTEL || CCSHA2_VNG_INTEL || CCAES_INTEL_ASM) - -#if CC_KERNEL - #include #define CC_HAS_AESNI() ((cpuid_features() & CPUID_FEATURE_AES) != 0) #define CC_HAS_SupplementalSSE3() ((cpuid_features() & CPUID_FEATURE_SSSE3) != 0) #define CC_HAS_AVX1() ((cpuid_features() & CPUID_FEATURE_AVX1_0) != 0) #define CC_HAS_AVX2() ((cpuid_info()->cpuid_leaf7_features & CPUID_LEAF7_FEATURE_AVX2) != 0) #define CC_HAS_AVX512_AND_IN_KERNEL() ((cpuid_info()->cpuid_leaf7_features & CPUID_LEAF7_FEATURE_AVX512F) !=0) + #define CC_HAS_BMI2() ((cpuid_info()->cpuid_leaf7_features & CPUID_LEAF7_FEATURE_BMI2) != 0) + #define CC_HAS_ADX() ((cpuid_info()->cpuid_leaf7_features & CPUID_LEAF7_FEATURE_ADX) != 0) -#elif CC_XNU_KERNEL_AVAILABLE +#elif CC_XNU_KERNEL_AVAILABLE && CC_INTERNAL_SDK #include - - extern int _cpu_capabilities; - #define CC_HAS_AESNI() (_cpu_capabilities & kHasAES) - #define CC_HAS_SupplementalSSE3() (_cpu_capabilities & kHasSupplementalSSE3) - #define CC_HAS_AVX1() (_cpu_capabilities & kHasAVX1_0) - #define CC_HAS_AVX2() (_cpu_capabilities & kHasAVX2_0) + #define CC_HAS_RDRAND() (_get_cpu_capabilities() & kHasRDRAND) + #define CC_HAS_AESNI() (_get_cpu_capabilities() & kHasAES) + #define CC_HAS_SupplementalSSE3() (_get_cpu_capabilities() & kHasSupplementalSSE3) + #define CC_HAS_AVX1() (_get_cpu_capabilities() & kHasAVX1_0) + #define CC_HAS_AVX2() (_get_cpu_capabilities() & kHasAVX2_0) #define CC_HAS_AVX512_AND_IN_KERNEL() 0 -#else + #define CC_HAS_BMI2() (_get_cpu_capabilities() & kHasBMI2) + #define CC_HAS_ADX() (_get_cpu_capabilities() & kHasADX) + +#elif CC_SGX +// SGX has no cpuid function, so these will fail #define CC_HAS_AESNI() 0 #define CC_HAS_SupplementalSSE3() 0 #define CC_HAS_AVX1() 0 #define CC_HAS_AVX2() 0 - #define CC_HAS_AVX512_AND_IN_KERNEL() 0 + #define CC_HAS_AVX512_AND_IN_KERNEL() 0 + #define CC_HAS_BMI2() 0 + #define CC_HAS_RDRAND() 0 + #define CC_HAS_ADX() 0 +#else + #define CC_HAS_AESNI() __builtin_cpu_supports("aes") + #define CC_HAS_SupplementalSSE3() __builtin_cpu_supports("ssse3") + #define CC_HAS_AVX1() __builtin_cpu_supports("avx") + #define CC_HAS_AVX2() __builtin_cpu_supports("avx2") + #define CC_HAS_AVX512_AND_IN_KERNEL() 0 + #define CC_HAS_BMI2() __builtin_cpu_supports("bmi2") +#if CC_LINUX || !CC_INTERNAL_SDK + #include + #include + + CC_INLINE bool _cpu_supports_rdrand() + { + unsigned int eax, ebx, ecx, edx; + __cpuid(1, eax, ebx, ecx, edx); + return ecx & bit_RDRND; + } + + CC_INLINE bool _cpu_supports_adx() + { + unsigned int eax, ebx, ecx, edx; + __cpuid_count(7, 0, eax, ebx, ecx, edx); + return ebx & bit_ADX; + } + + #define CC_HAS_RDRAND() _cpu_supports_rdrand() + #define CC_HAS_ADX() _cpu_supports_adx() +#else + #define CC_HAS_RDRAND() 0 + #define CC_HAS_ADX() 0 #endif -#endif // (CCSHA1_VNG_INTEL || CCSHA2_VNG_INTEL || CCAES_INTEL_ASM) +#endif #endif // defined(__x86_64__) || defined(__i386__) diff --git a/EXTERNAL_HEADERS/corecrypto/ccaes.h b/EXTERNAL_HEADERS/corecrypto/ccaes.h index 9c664b842..6c65c4060 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccaes.h +++ b/EXTERNAL_HEADERS/corecrypto/ccaes.h @@ -1,11 +1,12 @@ -/* - * ccaes.h - * corecrypto - * - * Created on 12/10/2010 - * - * Copyright (c) 2010,2011,2012,2013,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2013,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCAES_H_ @@ -61,27 +62,18 @@ extern const struct ccmode_ctr *ccaes_ios_mux_ctr_crypt_mode(void); #endif #if CCAES_INTEL_ASM -//extern const struct ccmode_ecb ccaes_intel_ecb_encrypt_mode; -//extern const struct ccmode_ecb ccaes_intel_ecb_decrypt_mode; - extern const struct ccmode_ecb ccaes_intel_ecb_encrypt_opt_mode; extern const struct ccmode_ecb ccaes_intel_ecb_encrypt_aesni_mode; extern const struct ccmode_ecb ccaes_intel_ecb_decrypt_opt_mode; extern const struct ccmode_ecb ccaes_intel_ecb_decrypt_aesni_mode; -//extern const struct ccmode_cbc ccaes_intel_cbc_encrypt_mode; -//extern const struct ccmode_cbc ccaes_intel_cbc_decrypt_mode; - extern const struct ccmode_cbc ccaes_intel_cbc_encrypt_opt_mode; extern const struct ccmode_cbc ccaes_intel_cbc_encrypt_aesni_mode; extern const struct ccmode_cbc ccaes_intel_cbc_decrypt_opt_mode; extern const struct ccmode_cbc ccaes_intel_cbc_decrypt_aesni_mode; -//extern const struct ccmode_xts ccaes_intel_xts_encrypt_mode; -//extern const struct ccmode_xts ccaes_intel_xts_decrypt_mode; - extern const struct ccmode_xts ccaes_intel_xts_encrypt_opt_mode; extern const struct ccmode_xts ccaes_intel_xts_encrypt_aesni_mode; @@ -125,4 +117,17 @@ const struct ccmode_siv *ccaes_siv_decrypt_mode(void); const struct ccmode_siv_hmac *ccaes_siv_hmac_sha256_encrypt_mode(void); const struct ccmode_siv_hmac *ccaes_siv_hmac_sha256_decrypt_mode(void); +/*! + @function ccaes_unwind + @abstract "Unwind" an AES encryption key to the equivalent decryption key. + + @param key_nbytes Length in bytes of both the input and output keys + @param key The input AES encryption key + @param out The output AES decryption key + + @result @p CCERR_OK iff successful. + @discussion Only AES256 (i.e. 32-byte) keys are supported. This function is not necessary in typical AES usage; consult the maintainers before using it. +*/ +int ccaes_unwind(size_t key_nbytes, const void *key, void *out); + #endif /* _CORECRYPTO_CCAES_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccasn1.h b/EXTERNAL_HEADERS/corecrypto/ccasn1.h index 75aac6e68..0e7081b52 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccasn1.h +++ b/EXTERNAL_HEADERS/corecrypto/ccasn1.h @@ -1,11 +1,12 @@ -/* - * ccasn1.h - * corecrypto - * - * Created on 11/16/2010 - * - * Copyright (c) 2010,2011,2012,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCASN1_H_ diff --git a/EXTERNAL_HEADERS/corecrypto/ccchacha20poly1305.h b/EXTERNAL_HEADERS/corecrypto/ccchacha20poly1305.h index 4ca59e63b..f2ce5e265 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccchacha20poly1305.h +++ b/EXTERNAL_HEADERS/corecrypto/ccchacha20poly1305.h @@ -1,9 +1,13 @@ -/* - ccchacha20poly1305.h - corecrypto - - Copyright 2014 Apple Inc. All rights reserved. -*/ +/* Copyright (c) (2016,2017,2018,2019) Apple Inc. All rights reserved. + * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. + */ #ifndef _CORECRYPTO_CCCHACHA20POLY1305_H_ #define _CORECRYPTO_CCCHACHA20POLY1305_H_ diff --git a/EXTERNAL_HEADERS/corecrypto/cccmac.h b/EXTERNAL_HEADERS/corecrypto/cccmac.h index e29e543dd..1b85184ad 100644 --- a/EXTERNAL_HEADERS/corecrypto/cccmac.h +++ b/EXTERNAL_HEADERS/corecrypto/cccmac.h @@ -1,11 +1,12 @@ -/* - * cccmac.h - * corecrypto - * - * Created on 11/07/2013 - * - * Copyright (c) 2013,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2013,2014,2015,2016,2017,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_cccmac_H_ diff --git a/EXTERNAL_HEADERS/corecrypto/ccder.h b/EXTERNAL_HEADERS/corecrypto/ccder.h deleted file mode 100644 index 5bd102962..000000000 --- a/EXTERNAL_HEADERS/corecrypto/ccder.h +++ /dev/null @@ -1,317 +0,0 @@ -/* - * ccder.h - * corecrypto - * - * Created on 03/14/2012 - * - * Copyright (c) 2012,2013,2014,2015 Apple Inc. All rights reserved. - * - */ - -#ifndef _CORECRYPTO_CCDER_H_ -#define _CORECRYPTO_CCDER_H_ - -#include -#include - -#define CCDER_MULTIBYTE_TAGS 1 - -#ifdef CCDER_MULTIBYTE_TAGS -typedef unsigned long ccder_tag; -#else -typedef uint8_t ccder_tag; -#endif - -/* DER types to be used with ccder_decode and ccder_encode functions. */ -#define CCDER_EOL CCASN1_EOL -#define CCDER_BOOLEAN CCASN1_BOOLEAN -#define CCDER_INTEGER CCASN1_INTEGER -#define CCDER_BIT_STRING CCASN1_BIT_STRING -#define CCDER_OCTET_STRING CCASN1_OCTET_STRING -#define CCDER_NULL CCASN1_NULL -#define CCDER_OBJECT_IDENTIFIER CCASN1_OBJECT_IDENTIFIER -#define CCDER_OBJECT_DESCRIPTOR CCASN1_OBJECT_DESCRIPTOR - /* External or instance-of 0x08 */ -#define CCDER_REAL CCASN1_REAL -#define CCDER_ENUMERATED CCASN1_ENUMERATED -#define CCDER_EMBEDDED_PDV CCASN1_EMBEDDED_PDV -#define CCDER_UTF8_STRING CCASN1_UTF8_STRING - /* 0x0d */ - /* 0x0e */ - /* 0x0f */ -#define CCDER_SEQUENCE CCASN1_SEQUENCE -#define CCDER_SET CCASN1_SET -#define CCDER_NUMERIC_STRING CCASN1_NUMERIC_STRING -#define CCDER_PRINTABLE_STRING CCASN1_PRINTABLE_STRING -#define CCDER_T61_STRING CCASN1_T61_STRING -#define CCDER_VIDEOTEX_STRING CCASN1_VIDEOTEX_STRING -#define CCDER_IA5_STRING CCASN1_IA5_STRING -#define CCDER_UTC_TIME CCASN1_UTC_TIME -#define CCDER_GENERALIZED_TIME CCASN1_GENERALIZED_TIME -#define CCDER_GRAPHIC_STRING CCASN1_GRAPHIC_STRING -#define CCDER_VISIBLE_STRING CCASN1_VISIBLE_STRING -#define CCDER_GENERAL_STRING CCASN1_GENERAL_STRING -#define CCDER_UNIVERSAL_STRING CCASN1_UNIVERSAL_STRING - /* 0x1d */ -#define CCDER_BMP_STRING CCASN1_BMP_STRING -#define CCDER_HIGH_TAG_NUMBER CCASN1_HIGH_TAG_NUMBER -#define CCDER_TELETEX_STRING CCDER_T61_STRING - -#ifdef CCDER_MULTIBYTE_TAGS -#define CCDER_TAG_MASK ((ccder_tag)~0) -#define CCDER_TAGNUM_MASK ((ccder_tag)~((ccder_tag)7 << (sizeof(ccder_tag) * 8 - 3))) - -#define CCDER_METHOD_MASK ((ccder_tag)1 << (sizeof(ccder_tag) * 8 - 3)) -#define CCDER_PRIMITIVE ((ccder_tag)0 << (sizeof(ccder_tag) * 8 - 3)) -#define CCDER_CONSTRUCTED ((ccder_tag)1 << (sizeof(ccder_tag) * 8 - 3)) - -#define CCDER_CLASS_MASK ((ccder_tag)3 << (sizeof(ccder_tag) * 8 - 2)) -#define CCDER_UNIVERSAL ((ccder_tag)0 << (sizeof(ccder_tag) * 8 - 2)) -#define CCDER_APPLICATION ((ccder_tag)1 << (sizeof(ccder_tag) * 8 - 2)) -#define CCDER_CONTEXT_SPECIFIC ((ccder_tag)2 << (sizeof(ccder_tag) * 8 - 2)) -#define CCDER_PRIVATE ((ccder_tag)3 << (sizeof(ccder_tag) * 8 - 2)) -#else /* !CCDER_MULTIBYTE_TAGS */ -#define CCDER_TAG_MASK CCASN1_TAG_MASK -#define CCDER_TAGNUM_MASK CCASN1_TAGNUM_MASK - -#define CCDER_METHOD_MASK CCASN1_METHOD_MASK -#define CCDER_PRIMITIVE CCASN1_PRIMITIVE -#define CCDER_CONSTRUCTED CCASN1_CONSTRUCTED - -#define CCDER_CLASS_MASK CCASN1_CLASS_MASK -#define CCDER_UNIVERSAL CCASN1_UNIVERSAL -#define CCDER_APPLICATION CCASN1_APPLICATION -#define CCDER_CONTEXT_SPECIFIC CCASN1_CONTEXT_SPECIFIC -#define CCDER_PRIVATE CCASN1_PRIVATE -#endif /* !CCDER_MULTIBYTE_TAGS */ -#define CCDER_CONSTRUCTED_SET (CCDER_SET | CCDER_CONSTRUCTED) -#define CCDER_CONSTRUCTED_SEQUENCE (CCDER_SEQUENCE | CCDER_CONSTRUCTED) - - -// MARK: ccder_sizeof_ functions - -/* Returns the size of an asn1 encoded item of length l in bytes. */ -CC_CONST -size_t ccder_sizeof(ccder_tag tag, size_t len); - -CC_PURE -size_t ccder_sizeof_implicit_integer(ccder_tag implicit_tag, - cc_size n, const cc_unit *s); - -CC_PURE -size_t ccder_sizeof_implicit_octet_string(ccder_tag implicit_tag, - cc_size n, const cc_unit *s); - -CC_CONST -size_t ccder_sizeof_implicit_raw_octet_string(ccder_tag implicit_tag, - size_t s_size); -CC_CONST -size_t ccder_sizeof_implicit_uint64(ccder_tag implicit_tag, uint64_t value); - -CC_PURE -size_t ccder_sizeof_integer(cc_size n, const cc_unit *s); - -CC_CONST -size_t ccder_sizeof_len(size_t len); - -CC_PURE -size_t ccder_sizeof_octet_string(cc_size n, const cc_unit *s); - -CC_PURE -size_t ccder_sizeof_oid(ccoid_t oid); - -CC_CONST -size_t ccder_sizeof_raw_octet_string(size_t s_size); - -CC_CONST -size_t ccder_sizeof_tag(ccder_tag tag); - -CC_CONST -size_t ccder_sizeof_uint64(uint64_t value); - -// MARK: ccder_encode_ functions. - -/* Encode a tag backwards, der_end should point to one byte past the end of - destination for the tag, returns a pointer to the first byte of the tag. - Returns NULL if there is an encoding error. */ -CC_NONNULL((2)) -uint8_t *ccder_encode_tag(ccder_tag tag, const uint8_t *der, uint8_t *der_end); - -/* Returns a pointer to the start of the len field. returns NULL if there - is an encoding error. */ -CC_NONNULL((2)) -uint8_t * -ccder_encode_len(size_t len, const uint8_t *der, uint8_t *der_end); - -/* der_end should point to the first byte of the content of this der item. */ -CC_NONNULL((3)) -uint8_t * -ccder_encode_tl(ccder_tag tag, size_t len, const uint8_t *der, uint8_t *der_end); - -CC_PURE CC_NONNULL((2)) -uint8_t * -ccder_encode_body_nocopy(size_t size, const uint8_t *der, uint8_t *der_end); - -/* Encode the tag and length of a constructed object. der is the lower - bound, der_end is one byte paste where we want to write the length and - body_end is one byte past the end of the body of the der object we are - encoding the tag and length of. */ -CC_NONNULL((2, 3)) -uint8_t * -ccder_encode_constructed_tl(ccder_tag tag, const uint8_t *body_end, - const uint8_t *der, uint8_t *der_end); - -/* Encodes oid into der and returns - der + ccder_sizeof_oid(oid). */ -CC_NONNULL((1, 2)) -uint8_t *ccder_encode_oid(ccoid_t oid, const uint8_t *der, uint8_t *der_end); - -CC_NONNULL((3, 4)) -uint8_t *ccder_encode_implicit_integer(ccder_tag implicit_tag, - cc_size n, const cc_unit *s, - const uint8_t *der, uint8_t *der_end); - -CC_NONNULL((2, 3)) -uint8_t *ccder_encode_integer(cc_size n, const cc_unit *s, - const uint8_t *der, uint8_t *der_end); - -CC_NONNULL((3)) -uint8_t *ccder_encode_implicit_uint64(ccder_tag implicit_tag, - uint64_t value, - const uint8_t *der, uint8_t *der_end); - -CC_NONNULL((2)) -uint8_t *ccder_encode_uint64(uint64_t value, - const uint8_t *der, uint8_t *der_end); - -CC_NONNULL((3, 4)) -uint8_t *ccder_encode_implicit_octet_string(ccder_tag implicit_tag, - cc_size n, const cc_unit *s, - const uint8_t *der, - uint8_t *der_end); - -CC_NONNULL((2, 3)) -uint8_t *ccder_encode_octet_string(cc_size n, const cc_unit *s, - const uint8_t *der, uint8_t *der_end); - -CC_NONNULL((3, 4)) -uint8_t *ccder_encode_implicit_raw_octet_string(ccder_tag implicit_tag, - size_t s_size, const uint8_t *s, - const uint8_t *der, - uint8_t *der_end); - -CC_NONNULL((2, 3)) -uint8_t *ccder_encode_raw_octet_string(size_t s_size, const uint8_t *s, - const uint8_t *der, uint8_t *der_end); - -size_t ccder_encode_eckey_size(size_t priv_size, ccoid_t oid, size_t pub_size); - -CC_NONNULL((2, 5, 6, 7)) -uint8_t *ccder_encode_eckey(size_t priv_size, const uint8_t *priv_key, - ccoid_t oid, - size_t pub_size, const uint8_t *pub_key, - uint8_t *der, uint8_t *der_end); - -/* ccder_encode_body COPIES the body into the der. - It's inefficient – especially when you already have to convert to get to - the form for the body. - see encode integer for the right way to unify conversion and insertion */ -CC_NONNULL((3)) -uint8_t * -ccder_encode_body(size_t size, const uint8_t* body, - const uint8_t *der, uint8_t *der_end); - -// MARK: ccder_decode_ functions. - -/* Returns a pointer to the start of the length field, and returns the decoded tag in tag. - returns NULL if there is a decoding error. */ -CC_NONNULL((1, 3)) -const uint8_t *ccder_decode_tag(ccder_tag *tagp, const uint8_t *der, const uint8_t *der_end); - -CC_NONNULL((1, 3)) -const uint8_t *ccder_decode_len(size_t *lenp, const uint8_t *der, const uint8_t *der_end); - -/* Returns a pointer to the start of the der object, and returns the length in len. - returns NULL if there is a decoding error. */ -CC_NONNULL((2, 4)) -const uint8_t *ccder_decode_tl(ccder_tag expected_tag, size_t *lenp, - const uint8_t *der, const uint8_t *der_end); - -CC_NONNULL((2, 4)) -const uint8_t * -ccder_decode_constructed_tl(ccder_tag expected_tag, const uint8_t **body_end, - const uint8_t *der, const uint8_t *der_end); - -CC_NONNULL((1, 3)) -const uint8_t * -ccder_decode_sequence_tl(const uint8_t **body_end, - const uint8_t *der, const uint8_t *der_end); - -/*! - @function ccder_decode_uint_n - @abstract length in cc_unit of a der unsigned integer after skipping the leading zeroes - - @param der Beginning of input DER buffer - @param der_end End of input DER buffer - @param n Output the number of cc_unit required to represent the number - - @result First byte after the parsed integer or - NULL if the integer is not valid (negative) or reach der_end when reading the integer - */ - -CC_NONNULL((3)) -const uint8_t *ccder_decode_uint_n(cc_size *n, - const uint8_t *der, const uint8_t *der_end); - -/*! - @function ccder_decode_uint - @abstract Represent in cc_unit a der unsigned integer after skipping the leading zeroes - - @param der Beginning of input DER buffer - @param der_end End of input DER buffer - @param n Number of cc_unit allocated for r - @param r Allocated array of cc_unit to copy the integer into. - - @result First byte after the parsed integer or - NULL if the integer is not valid (negative) - reach der_end when reading the integer - n cc_unit is not enough to represent the integer - */ -CC_NONNULL((4)) -const uint8_t *ccder_decode_uint(cc_size n, cc_unit *r, - const uint8_t *der, const uint8_t *der_end); - -CC_NONNULL((3)) -const uint8_t *ccder_decode_uint64(uint64_t* r, - const uint8_t *der, const uint8_t *der_end); - -/* Decode SEQUENCE { r, s -- (unsigned)integer } in der into r and s. - Returns NULL on decode errors, returns pointer just past the end of the - sequence of integers otherwise. */ -CC_NONNULL((2, 3, 5)) -const uint8_t *ccder_decode_seqii(cc_size n, cc_unit *r, cc_unit *s, - const uint8_t *der, const uint8_t *der_end); -CC_NONNULL((1, 3)) -const uint8_t *ccder_decode_oid(ccoid_t *oidp, - const uint8_t *der, const uint8_t *der_end); - -CC_NONNULL((1, 2, 4)) -const uint8_t *ccder_decode_bitstring(const uint8_t **bit_string, - size_t *bit_length, - const uint8_t *der, const uint8_t *der_end); - -CC_NONNULL((1, 2, 3, 4, 5, 6, 8)) -const uint8_t *ccder_decode_eckey(uint64_t *version, - size_t *priv_size, const uint8_t **priv_key, - ccoid_t *oid, - size_t *pub_size, const uint8_t **pub_key, - const uint8_t *der, const uint8_t *der_end); - -#define CC_EC_OID_SECP192R1 {((unsigned char *)"\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x01")} -#define CC_EC_OID_SECP256R1 {((unsigned char *)"\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07")} -#define CC_EC_OID_SECP224R1 {((unsigned char *)"\x06\x05\x2B\x81\x04\x00\x21")} -#define CC_EC_OID_SECP384R1 {((unsigned char *)"\x06\x05\x2B\x81\x04\x00\x22")} -#define CC_EC_OID_SECP521R1 {((unsigned char *)"\x06\x05\x2B\x81\x04\x00\x23")} - - -#endif /* _CORECRYPTO_CCDER_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccdes.h b/EXTERNAL_HEADERS/corecrypto/ccdes.h index 31b5dadbf..3441ded70 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccdes.h +++ b/EXTERNAL_HEADERS/corecrypto/ccdes.h @@ -1,11 +1,12 @@ -/* - * ccdes.h - * corecrypto - * - * Created on 12/20/2010 - * - * Copyright (c) 2010,2012,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2012,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccdigest.h b/EXTERNAL_HEADERS/corecrypto/ccdigest.h index fa2b765f9..ce84aa8d4 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccdigest.h +++ b/EXTERNAL_HEADERS/corecrypto/ccdigest.h @@ -1,11 +1,12 @@ -/* - * ccdigest.h - * corecrypto - * - * Created on 11/30/2010 - * - * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2014,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCDIGEST_H_ @@ -100,14 +101,15 @@ void ccdigest(const struct ccdigest_info *di, size_t len, #define OID_DEF(_VALUE_) ((const unsigned char *)_VALUE_) -#define CC_DIGEST_OID_MD2 OID_DEF("\x06\x08\x2A\x86\x48\x86\xF7\x0D\x02\x02") -#define CC_DIGEST_OID_MD4 OID_DEF("\x06\x08\x2A\x86\x48\x86\xF7\x0D\x02\x04") -#define CC_DIGEST_OID_MD5 OID_DEF("\x06\x08\x2A\x86\x48\x86\xF7\x0D\x02\x05") -#define CC_DIGEST_OID_SHA1 OID_DEF("\x06\x05\x2b\x0e\x03\x02\x1a") -#define CC_DIGEST_OID_SHA224 OID_DEF("\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x04") -#define CC_DIGEST_OID_SHA256 OID_DEF("\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01") -#define CC_DIGEST_OID_SHA384 OID_DEF("\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02") -#define CC_DIGEST_OID_SHA512 OID_DEF("\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03") -#define CC_DIGEST_OID_RMD160 OID_DEF("\x06\x05\x2B\x24\x03\x02\x01") +#define CC_DIGEST_OID_MD2 OID_DEF("\x06\x08\x2A\x86\x48\x86\xF7\x0D\x02\x02") +#define CC_DIGEST_OID_MD4 OID_DEF("\x06\x08\x2A\x86\x48\x86\xF7\x0D\x02\x04") +#define CC_DIGEST_OID_MD5 OID_DEF("\x06\x08\x2A\x86\x48\x86\xF7\x0D\x02\x05") +#define CC_DIGEST_OID_SHA1 OID_DEF("\x06\x05\x2b\x0e\x03\x02\x1a") +#define CC_DIGEST_OID_SHA224 OID_DEF("\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x04") +#define CC_DIGEST_OID_SHA256 OID_DEF("\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01") +#define CC_DIGEST_OID_SHA384 OID_DEF("\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02") +#define CC_DIGEST_OID_SHA512 OID_DEF("\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03") +#define CC_DIGEST_OID_SHA512_256 OID_DEF("\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x06") +#define CC_DIGEST_OID_RMD160 OID_DEF("\x06\x05\x2B\x24\x03\x02\x01") #endif /* _CORECRYPTO_CCDIGEST_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccdigest_priv.h b/EXTERNAL_HEADERS/corecrypto/ccdigest_priv.h index 8061c5faf..fd80de818 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccdigest_priv.h +++ b/EXTERNAL_HEADERS/corecrypto/ccdigest_priv.h @@ -1,11 +1,12 @@ -/* - * ccdigest_priv.h - * corecrypto - * - * Created on 12/07/2010 - * - * Copyright (c) 2010,2011,2012,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCDIGEST_PRIV_H_ diff --git a/EXTERNAL_HEADERS/corecrypto/ccdrbg.h b/EXTERNAL_HEADERS/corecrypto/ccdrbg.h index 14db0a16b..1cbcca0a1 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccdrbg.h +++ b/EXTERNAL_HEADERS/corecrypto/ccdrbg.h @@ -1,11 +1,12 @@ -/* - * ccdrbg.h - * corecrypto - * - * Created on 08/17/2010 - * - * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2014,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ /*! diff --git a/EXTERNAL_HEADERS/corecrypto/ccdrbg_impl.h b/EXTERNAL_HEADERS/corecrypto/ccdrbg_impl.h index 499f58792..263dded51 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccdrbg_impl.h +++ b/EXTERNAL_HEADERS/corecrypto/ccdrbg_impl.h @@ -1,11 +1,12 @@ -/* - * ccdrbg_impl.h - * corecrypto - * - * Created on 01/03/2012 - * - * Copyright (c) 2012,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2012,2015,2016,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCDRBG_IMPL_H_ diff --git a/EXTERNAL_HEADERS/corecrypto/cchmac.h b/EXTERNAL_HEADERS/corecrypto/cchmac.h index 3b6ac339b..4493016c8 100644 --- a/EXTERNAL_HEADERS/corecrypto/cchmac.h +++ b/EXTERNAL_HEADERS/corecrypto/cchmac.h @@ -1,11 +1,12 @@ -/* - * cchmac.h - * corecrypto - * - * Created on 12/07/2010 - * - * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2014,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCHMAC_H_ diff --git a/EXTERNAL_HEADERS/corecrypto/cckprng.h b/EXTERNAL_HEADERS/corecrypto/cckprng.h index edcff9a61..0c97177ff 100644 --- a/EXTERNAL_HEADERS/corecrypto/cckprng.h +++ b/EXTERNAL_HEADERS/corecrypto/cckprng.h @@ -1,11 +1,12 @@ -/* - * cckprng.h - * corecrypto - * - * Created on 12/7/2017 - * - * Copyright (c) 2017 Apple Inc. All rights reserved. +/* Copyright (c) (2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCKPRNG_H_ @@ -15,25 +16,6 @@ #include -#define CCKPRNG_YARROW 0 - -#if CCKPRNG_YARROW - -typedef struct PRNG *PrngRef; - -struct cckprng_ctx { - PrngRef prng; - uint64_t bytes_since_entropy; - uint64_t bytes_generated; -}; - -#define CCKPRNG_ENTROPY_INTERVAL (1 << 14) -#define CCKPRNG_RESEED_NTICKS 50 - -typedef struct cckprng_ctx *cckprng_ctx_t; - -#else - // This is a Fortuna-inspired PRNG. While it differs from Fortuna in // many minor details, the biggest difference is its support for // multiple independent output generators. This is to make it suitable @@ -138,7 +120,7 @@ struct cckprng_diag { // Diagnostics corresponding to individual output generators unsigned ngens; - struct cckprng_gen_diag *gens; + CC_ALIGNED(8) struct cckprng_gen_diag *gens; // Diagnostics corresponding to internal entropy pools struct cckprng_pool_diag pools[CCKPRNG_NPOOLS]; @@ -156,6 +138,16 @@ struct cckprng_lock_ctx { cckprng_lock_mutex mutex; }; +#elif CC_ANDROID || CC_LINUX + +#include + +typedef pthread_mutex_t cckprng_lock_mutex; + +struct cckprng_lock_ctx { + cckprng_lock_mutex mutex; +}; + #else #include @@ -224,17 +216,16 @@ struct cckprng_sched_ctx { // A counter governing the set of entropy pools to drain uint64_t reseed_sched; - // A timestamp from the last reseed - uint64_t reseed_last; - // An index used to add entropy to pools in a round-robin style unsigned pool_idx; }; struct cckprng_ctx { - // The master secret of the PRNG - uint8_t seed[CCKPRNG_SEED_NBYTES]; + struct cckprng_key_ctx key; + + // A counter used in CTR mode (with the master secret) + uint8_t ctr[16]; // State used to schedule entropy consumption and reseeds struct cckprng_sched_ctx sched; @@ -245,6 +236,9 @@ struct cckprng_ctx { // The maximum number of generators that may be allocated unsigned max_ngens; + // The actual number of generators that have been initialized + unsigned ngens; + // An array of output generators (allocated dynamically) of length max_ngens struct cckprng_gen_ctx *gens; @@ -276,8 +270,6 @@ struct cckprng_funcs { void (*generate)(struct cckprng_ctx *ctx, unsigned gen_idx, size_t nbytes, void *out); }; -#endif - /* @function cckprng_init @abstract Initialize a kernel PRNG context. diff --git a/EXTERNAL_HEADERS/corecrypto/ccmd4.h b/EXTERNAL_HEADERS/corecrypto/ccmd4.h new file mode 100644 index 000000000..15e9cf605 --- /dev/null +++ b/EXTERNAL_HEADERS/corecrypto/ccmd4.h @@ -0,0 +1,26 @@ +/* Copyright (c) (2010,2015,2017,2018,2019) Apple Inc. All rights reserved. + * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. + */ + +#ifndef _CORECRYPTO_CCMD4_H_ +#define _CORECRYPTO_CCMD4_H_ + +#include + +#define CCMD4_BLOCK_SIZE 64 +#define CCMD4_OUTPUT_SIZE 16 +#define CCMD4_STATE_SIZE 16 + +extern const struct ccdigest_info ccmd4_ltc_di; + +/* default is libtomcrypt */ +#define ccmd4_di ccmd4_ltc_di + +#endif /* _CORECRYPTO_CCMD4_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccmd5.h b/EXTERNAL_HEADERS/corecrypto/ccmd5.h deleted file mode 100644 index 7e97a76f2..000000000 --- a/EXTERNAL_HEADERS/corecrypto/ccmd5.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * ccmd5.h - * corecrypto - * - * Created on 12/06/2010 - * - * Copyright (c) 2010,2011,2012,2015 Apple Inc. All rights reserved. - * - */ - -#ifndef _CORECRYPTO_CCMD5_H_ -#define _CORECRYPTO_CCMD5_H_ - -#include - -#define CCMD5_BLOCK_SIZE 64 -#define CCMD5_OUTPUT_SIZE 16 -#define CCMD5_STATE_SIZE 16 - -/* Selector */ -const struct ccdigest_info *ccmd5_di(void); - -/* Implementations */ -extern const struct ccdigest_info ccmd5_ltc_di; - -#endif /* _CORECRYPTO_CCMD5_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccmode.h b/EXTERNAL_HEADERS/corecrypto/ccmode.h index f4aa20a99..e01908eb1 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccmode.h +++ b/EXTERNAL_HEADERS/corecrypto/ccmode.h @@ -1,11 +1,12 @@ -/* - * ccmode.h - * corecrypto - * - * Created on 12/07/2010 - * - * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2014,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCMODE_H_ @@ -58,10 +59,6 @@ ccecb_one_shot(const struct ccmode_ecb *mode, size_t key_len, const void *key, s /* CBC mode. */ -/* The CBC interface changed due to rdar://11468135. This macros is to indicate - to client which CBC API is implemented. Clients can support old versions of - corecrypto at build time using this. - */ #define __CC_HAS_FIX_FOR_11468135__ 1 /* Declare a cbc key named _name_. Pass the size field of a struct ccmode_cbc diff --git a/EXTERNAL_HEADERS/corecrypto/ccmode_factory.h b/EXTERNAL_HEADERS/corecrypto/ccmode_factory.h deleted file mode 100644 index aa8cb0527..000000000 --- a/EXTERNAL_HEADERS/corecrypto/ccmode_factory.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - * ccmode_factory.h - * corecrypto - * - * Created on 01/21/2011 - * - * Copyright (c) 2011,2012,2013,2014,2015 Apple Inc. All rights reserved. - * - */ - -#ifndef _CORECRYPTO_CCMODE_FACTORY_H_ -#define _CORECRYPTO_CCMODE_FACTORY_H_ - -#include /* TODO: Remove dependency on this header. */ -#include - -/* Functions defined in this file are only to be used - within corecrypto files. - */ - -/* Use these function to runtime initialize a ccmode_cbc decrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb decrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_cbc_decrypt(struct ccmode_cbc *cbc, - const struct ccmode_ecb *ecb); - -/* Use these function to runtime initialize a ccmode_cbc encrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_cbc_encrypt(struct ccmode_cbc *cbc, - const struct ccmode_ecb *ecb); - - -/* Use these function to runtime initialize a ccmode_cfb decrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_cfb_decrypt(struct ccmode_cfb *cfb, - const struct ccmode_ecb *ecb); - -/* Use these function to runtime initialize a ccmode_cfb encrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_cfb_encrypt(struct ccmode_cfb *cfb, - const struct ccmode_ecb *ecb); - -/* Use these function to runtime initialize a ccmode_cfb8 decrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb decrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_cfb8_decrypt(struct ccmode_cfb8 *cfb8, - const struct ccmode_ecb *ecb); - -/* Use these function to runtime initialize a ccmode_cfb8 encrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_cfb8_encrypt(struct ccmode_cfb8 *cfb8, - const struct ccmode_ecb *ecb); - -/* Use these function to runtime initialize a ccmode_ctr decrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_ctr_crypt(struct ccmode_ctr *ctr, - const struct ccmode_ecb *ecb); - -/* Use these function to runtime initialize a ccmode_gcm decrypt object (for - example if it's part of a larger structure). For GCM you always pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_gcm_decrypt(struct ccmode_gcm *gcm, - const struct ccmode_ecb *ecb_encrypt); - -/* Use these function to runtime initialize a ccmode_gcm encrypt object (for - example if it's part of a larger structure). For GCM you always pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_gcm_encrypt(struct ccmode_gcm *gcm, - const struct ccmode_ecb *ecb_encrypt); - -/* Use these function to runtime initialize a ccmode_ccm decrypt object (for - example if it's part of a larger structure). For CCM you always pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ - -void ccmode_factory_ccm_decrypt(struct ccmode_ccm *ccm, - const struct ccmode_ecb *ecb_encrypt); - -/* Use these function to runtime initialize a ccmode_ccm encrypt object (for - example if it's part of a larger structure). For CCM you always pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_ccm_encrypt(struct ccmode_ccm *ccm, - const struct ccmode_ecb *ecb_encrypt); - -/* Use these function to runtime initialize a ccmode_ofb encrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_ofb_crypt(struct ccmode_ofb *ofb, - const struct ccmode_ecb *ecb); - -/* Use these function to runtime initialize a ccmode_omac decrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb decrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_omac_decrypt(struct ccmode_omac *omac, - const struct ccmode_ecb *ecb); - -/* Use these function to runtime initialize a ccmode_omac encrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_omac_encrypt(struct ccmode_omac *omac, - const struct ccmode_ecb *ecb); - -/* Use these function to runtime initialize a ccmode_xts decrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb decrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_xts_decrypt(struct ccmode_xts *xts, - const struct ccmode_ecb *ecb, - const struct ccmode_ecb *ecb_encrypt); - -/* Use these function to runtime initialize a ccmode_xts encrypt object (for - example if it's part of a larger structure). Normally you would pass a - ecb encrypt mode implementation of some underlying algorithm as the ecb - parameter. */ -void ccmode_factory_xts_encrypt(struct ccmode_xts *xts, - const struct ccmode_ecb *ecb, - const struct ccmode_ecb *ecb_encrypt); - -#endif /* _CORECRYPTO_CCMODE_FACTORY_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h b/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h index a0c6e24bc..849881ed4 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h +++ b/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h @@ -1,11 +1,12 @@ -/* - * ccmode_impl.h - * corecrypto - * - * Created on 12/07/2010 - * - * Copyright (c) 2012,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCMODE_IMPL_H_ @@ -25,6 +26,7 @@ struct ccmode_ecb { size_t key_nbytes, const void *key); int (*ecb)(const ccecb_ctx *ctx, size_t nblocks, const void *in, void *out); + void (*roundkey)(const ccecb_ctx *ctx, unsigned r, void *key); }; /*! diff --git a/EXTERNAL_HEADERS/corecrypto/ccmode_siv.h b/EXTERNAL_HEADERS/corecrypto/ccmode_siv.h index 1b05c638e..5d40c1dd1 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccmode_siv.h +++ b/EXTERNAL_HEADERS/corecrypto/ccmode_siv.h @@ -1,11 +1,12 @@ -/* - * ccmode_siv.h - * corecrypto - * - * Created on 11/13/2015 - * - * Copyright (c) 2015 Apple Inc. All rights reserved. +/* Copyright (c) (2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCMODE_SIV_H_ @@ -48,17 +49,44 @@ CC_INLINE size_t ccsiv_context_size(const struct ccmode_siv *mode) return mode->size; } +/*! +@function ccsiv_block_size +@abstract Return the block_size = block_length = tag_length used in the mode. + +@param mode ccsiv mode descriptor + +@discussion Used to return the current block size of the SIV mode. Note that the tag in this mode is an output of the underlying blockcipher and therefore the tag length corresponds to the block size. +*/ CC_INLINE size_t ccsiv_block_size(const struct ccmode_siv *mode) { return mode->block_size; } +/*! + @function ccsiv_ciphertext_size + @abstract Return size of Ciphertext (which is the ciphertext and corresponding tag) given the mode and plaintext length + + @param mode ccsiv mode descriptor + @param plaintext_size Size of the plaintext + + @discussion returns the length of the aead ciphertext that the context will generate which includes both the encrypted plaintext and tag. + */ CC_INLINE size_t ccsiv_ciphertext_size(const struct ccmode_siv *mode, size_t plaintext_size) { return plaintext_size + mode->cbc->block_size; } +/*! + @function ccsiv_plaintext_size + @abstract Return size of plaintext given a ciphertext length and mode. + + @param mode ccsiv mode descriptor + @param ciphertext_size Size of the ciphertext + + @discussion returns the length of the plaintext which results from the decryption of a ciphertext of the corresponding size (here ciphertext size includes the tag). + */ + CC_INLINE size_t ccsiv_plaintext_size(const struct ccmode_siv *mode, size_t ciphertext_size) { @@ -68,43 +96,155 @@ CC_INLINE size_t ccsiv_plaintext_size(const struct ccmode_siv *mode, return ciphertext_size - mode->cbc->block_size; } -// Supported key sizes are 32, 48, 64 bytes +/*! + @function ccsiv_init + @abstract Initialize a context for ccsiv with an associated mode, and given key. + + @param mode Descriptor for the mode + @param ctx Alocated context to be intialized + @param key_byte_len Length of the key: Supported key sizes are 32, 48, 64 bytes. + @param key key for siv. All bits of this key should be random. (See discussion) + + @discussion In order to compute SIV_Enc_k(a1,...,am, n, x) where ai is the ith piece of associated data, n is a nonce and x is a plaintext, we use the following sequence of calls : + + + @code + ccsiv_init(...) + ccsiv_aad(...) (may be called zero or more times) + ccsiv_set_nonce(...) + ccsiv_crypt(...) + @endcode + + To reuse the context for additional encryptions, follow this sequence: + + @code + ccsiv_reset(...) + ccsiv_aad(...) (may be called zero or more times) + ccsiv_set_nonce(...) + ccsiv_crypt(...) + @endcode + +Importantly, all the bits in the key need to be random. Duplicating a smaller key to achieve a longer key length will result in an insecure implementation. + */ CC_INLINE int ccsiv_init(const struct ccmode_siv *mode, ccsiv_ctx *ctx, size_t key_byte_len, const uint8_t *key) { return mode->init(mode, ctx, key_byte_len, key); } -// Process nonce. it is actually just an authenticated data +/*! + @function ccsiv_set_nonce + @abstract Add the nonce to the siv's computation of the the tag. Changes the internal state of the context + so that after the call only a crypt or reset call is permitted. + + @param mode Descriptor for the mode + @param ctx Intialized ctx + @param nbytes Length of the current nonce data being added + @param in Nonce data to be authenticated. + + @discussion The nonce is a special form of authenticated data. If provided (a call to ccsiv_set_nonce is optional) it allows + randomization of the ciphertext (preventing deterministic encryption). While the length of the nonce is not limmited, the + amount of entropy that can be provided is limited by the number of bits in the block of the associated block-cipher. + */ CC_INLINE int ccsiv_set_nonce(const struct ccmode_siv *mode, ccsiv_ctx *ctx, size_t nbytes, const uint8_t *in) { return mode->set_nonce(ctx, nbytes, in); } -// Process authenticated data. Taken into account for authentication but not -// encrypted +/*! + @function ccsiv_aad + @abstract Add the next piece of associated data to the SIV's computation of the tag. + @param mode Descriptor for the mode + @param ctx Intialized ctx + @param nbytes Length of the current associated data being added + @param in Associated data to be authenticated. + + @discussion Adds the associated data given by in to the computation of the tag in the associated data. Note this call is optional and no associated data needs to be provided. Multiple pieces of associated data can be provided by multiple calls to this function. Note the associated data in this case is simply computed as the concatenation of all of the associated data inputs. + */ CC_INLINE int ccsiv_aad(const struct ccmode_siv *mode, ccsiv_ctx *ctx, size_t nbytes, const uint8_t *in) { return mode->auth(ctx, nbytes, in); } -// Encryption data. Authenticated and encrypted. -// Encrypt/Decrypt can only be called once +/*! + @function ccsiv_crypt + @abstract Depdening on mode, 1) Encrypts a plaintext , or 2) Decrypts a ciphertext + + @param mode Descriptor for the mode + @param ctx Intialized ctx + @param nbytes Case 1) Length of the current plaintext + Case 2) Length of the current ciphertext (block length + plaintext length). + @param in Case 1) Plaintext + Case 2) Ciphertext + @param out Case 1) Tag+ciphertext (buffer should be already allocated and of length block_length+plaintext_length.) + Case 2) Plaintext (buffer should be already allocated and of length ciphertext_length - block_length length + + @discussion Depending on whether mode has been setup to encrypt or decrypt, this function + 1) Encrypts the plaintext given as input in, and provides the ciphertext (which is a concatenation of the cbc-tag + followed by the encrypted plaintext) as output out. 2) Decrypts plaintext using the input ciphertext at in (which again is the cbc-tag, followed by encrypted plaintext), and then verifies that the computed tag and provided tags match. + + This function is only called once. If one wishes to compute another (en)/(de)cryption, one resets the state with + ccsiv_reset, and then begins the process again. There is no way to stream large plaintext/ciphertext inputs into the + function. + + In the case of a decryption, if there is a failure in verifying the computed tag against the provided tag (embedded int he ciphertext), then a decryption/verification + failure is returned, and any internally computed plaintexts and tags are zeroed out. + Lastly the contexts internal state is reset, so that a new decryption/encryption can be commenced. + + Decryption can be done in place in memory by setting in=out. Encryption cannot be done in place. However, if one is trying to minimize memory usage one can set out = in - block_length, which results in the ciphertext being encrypted inplace, and the IV being prepended before the ciphertext. + */ CC_INLINE int ccsiv_crypt(const struct ccmode_siv *mode, ccsiv_ctx *ctx, size_t nbytes, const uint8_t *in, uint8_t *out) { return mode->crypt(ctx, nbytes, in, out); } -// Clear all context for reuse. +/*! + @function ccsiv_reset + @abstract Resets the state of the ccsiv_ctx ctx, maintaing the key, but preparing the + ctx to preform a new Associated Data Authenticated (En)/(De)cryption. + @param mode Descriptor for the mode + @param ctx Intialized ctx + */ CC_INLINE int ccsiv_reset(const struct ccmode_siv *mode, ccsiv_ctx *ctx) { return mode->reset(ctx); } -// One shot with only one vector of adata +/*! + @function ccsiv_one_shot + @abstract A simplified but more constrained way of performing a AES SIV (en)/(de)cryption. It is limited because only + one piece of associated data may be provided. + + @param mode Descriptor for the mode + @param key_len Length of the key: Supported key sizes are 32, 48, 64 bytes + @param key key for siv + @param nonce_nbytes Length of the current nonce data being added + @param nonce Nonce data to be authenticated. + @param adata_nbytes Length of the associated data. + @param adata Associated data to be authenticated. + @param in_nbytes Length of either the plaintext (for encryption) or ciphertext (for decryption), in the latter case the length includes the length of the tag. + @param in Plaintext or ciphertext. Note that the ciphertext includes a tag of length tag_length prepended to it. + @param out Buffer to hold ciphertext/plaintext. (Note Ciphertext is of size plaintext_length + block_length and plaintext is of ciphertext_length - block_length, as the tag has the length of one block. + Must be the case that out<= in - block length || out>= in + plaintext_length + + @discussion Decryption can be done in place in memory by setting in=out. Encryption cannot be done in place. However, is one is trying to minimize memory usage + one can set out = in - block_length, which results in the ciphertext being encrypted inplace, and the IV being prepended before the ciphertext. + + Suppose the block length is 16 bytes long (AES) and plaintext of length 20, then we could set in = 16, out = 0 let the bytes of the plaintext be denoted as P_1...P_20 + then memory is depicted as: + | 0 = ? | 1 = ? | ... | 15 = ? | 16 = P_1 | ... | 35 = P_20 | + | | | | | + V V V V V + |IV_1 | IV_2 | ... | IV_16 | C_1 | ... | C_20 | + +Note that the ciphrtext itself is encrypted in place, but the IV prefixes the ciphertext. + + + */ + CC_INLINE int ccsiv_one_shot(const struct ccmode_siv *mode, size_t key_len, const uint8_t *key, unsigned nonce_nbytes, const uint8_t* nonce, diff --git a/EXTERNAL_HEADERS/corecrypto/ccmode_siv_hmac.h b/EXTERNAL_HEADERS/corecrypto/ccmode_siv_hmac.h index 2cbc9a131..eba951c50 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccmode_siv_hmac.h +++ b/EXTERNAL_HEADERS/corecrypto/ccmode_siv_hmac.h @@ -1,7 +1,13 @@ -// -// ccmode_siv_hmac.h -// corecrypto -// +/* Copyright (c) (2019) Apple Inc. All rights reserved. + * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. + */ // Created by Apple on 12/10/18. // @@ -83,9 +89,9 @@ size_t ccsiv_hmac_ciphertext_size(ccsiv_hmac_ctx *ctx, size_t plaintext_size); @abstract Return size of plaintext given a ciphertext length and mode. @param ctx Current siv_hmac context that has been previously initialized - @param ciphertext_size Size of the ciphertext + @param ciphertext_size Size of the ciphertext (which includes the tag) - @discussion returns the length of the aead ciphertext which is both the encrypted plaintext and tag length together. + @discussion returns the length of the plaintext which results from the decryption of a ciphertext of the corresponding size (here ciphertext size includes the tag). */ size_t ccsiv_hmac_plaintext_size(ccsiv_hmac_ctx *ctx, size_t ciphertext_size); @@ -155,7 +161,9 @@ int ccsiv_hmac_set_nonce(const struct ccmode_siv_hmac *mode, ccsiv_hmac_ctx *ctx @discussion This function is only called once. If one wishes to compute another (en)/(de)cryption, one resets the state with ccsiv_hmac_reset, and then begins the process again. There is no way to stream large plaintext/ciphertext inputs into the function. - + @param out Case1) Tag+ Ciphertext (buffer should be already allocated and of length tag + plaintext length) + Case 2) Plaintext (buffer should be already allocated and of length ciphertext - tag length + In the case of a decryption, if there is a failure in verifying the computed tag against the provided tag (embedded int he ciphertext), then a decryption/verification failure is returned, and any internally computed plaintexts and tags are zeroed out. Lastly the contexts internal state is reset, so that a new decryption/encryption can be commenced. @@ -185,8 +193,8 @@ int ccsiv_hmac_reset(const struct ccmode_siv_hmac *mode, ccsiv_hmac_ctx *ctx); @param adata_nbytes Length of the associated data. @param adata Associated data to be authenticated. @param in_nbytes Length of either the plaintext (for encryption) or ciphertext (for decryption) - @param in plaintext or ciphertext. Note that the ciphertext includes a tag of length tag_length prepended to - it. + @param in plaintext or ciphertext. Note that the ciphertext includes a tag of length tag_length prepended to it. + @param out Buffer to hold ciphertext/plaintext. (Note Ciphertext is of size plaintext length + tag_length and plaintext is of length ciphertext - tag_length.) */ // One shot AEAD with only one input for adata, and a nonce. diff --git a/EXTERNAL_HEADERS/corecrypto/ccn.h b/EXTERNAL_HEADERS/corecrypto/ccn.h index 778f3e5cf..4b2593689 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccn.h +++ b/EXTERNAL_HEADERS/corecrypto/ccn.h @@ -1,11 +1,12 @@ -/* - * ccn.h - * corecrypto - * - * Created on 11/16/2010 - * - * Copyright (c) 2010,2011,2012,2013,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCN_H_ @@ -66,6 +67,7 @@ typedef uint16_t cc_dunit; // 16 bit double width unit #define CCN_UNIT_MASK ((cc_unit)~0) #define CCN_UNIT_LOWER_HALF_MASK ((CCN_UNIT_MASK) >> (CCN_UNIT_BITS/2)) #define CCN_UNIT_UPPER_HALF_MASK (~CCN_UNIT_LOWER_HALF_MASK) +#define CCN_UNIT_HALF_BITS (CCN_UNIT_BITS / 2) typedef struct { cc_unit *start; // First cc_unit of the workspace @@ -98,11 +100,11 @@ typedef struct { #define ccn_sizeof_size(_size_) ccn_sizeof_n(ccn_nof_size(_size_)) /* Returns the value of bit _k_ of _ccn_, both are only evaluated once. */ -#define ccn_bit(_ccn_, _k_) ({__typeof__ (_k_) __k = (_k_); \ +#define ccn_bit(_ccn_, _k_) ({size_t __k = (size_t)(_k_); \ 1 & ((_ccn_)[ __k >> CCN_LOG2_BITS_PER_UNIT] >> (__k & (CCN_UNIT_BITS - 1)));}) /* Set the value of bit _k_ of _ccn_ to the value _v_ */ -#define ccn_set_bit(_ccn_, _k_, _v_) ({__typeof__ (_k_) __k = (_k_); \ +#define ccn_set_bit(_ccn_, _k_, _v_) ({size_t __k = (size_t)(_k_); \ if (_v_) \ (_ccn_)[ __k >> CCN_LOG2_BITS_PER_UNIT] |= CC_UNIT_C(1) << (__k & (CCN_UNIT_BITS - 1)); \ else \ @@ -252,19 +254,11 @@ typedef struct { CCN192_C(c7,c6,c5,c4,c3,c2,c1,c0,b7,b6,b5,b4,b3,b2,b1,b0,a7,a6,a5,a4,a3,a2,a1,a0),\ CCN64_C(d7,d6,d5,d4,d3,d2,d1,d0) -#define CCN264_C(e0,d7,d6,d5,d4,d3,d2,d1,d0,c7,c6,c5,c4,c3,c2,c1,c0,b7,b6,b5,b4,b3,b2,b1,b0,a7,a6,a5,a4,a3,a2,a1,a0) \ - CCN256_C(d7,d6,d5,d4,d3,d2,d1,d0,c7,c6,c5,c4,c3,c2,c1,c0,b7,b6,b5,b4,b3,b2,b1,b0,a7,a6,a5,a4,a3,a2,a1,a0),\ - CCN8_C(e0) - #define CCN384_C(f7,f6,f5,f4,f3,f2,f1,f0,e7,e6,e5,e4,e3,e2,e1,e0,d7,d6,d5,d4,d3,d2,d1,d0,c7,c6,c5,c4,c3,c2,c1,c0,b7,b6,b5,b4,b3,b2,b1,b0,a7,a6,a5,a4,a3,a2,a1,a0) \ CCN256_C(d7,d6,d5,d4,d3,d2,d1,d0,c7,c6,c5,c4,c3,c2,c1,c0,b7,b6,b5,b4,b3,b2,b1,b0,a7,a6,a5,a4,a3,a2,a1,a0),\ CCN64_C(e7,e6,e5,e4,e3,e2,e1,e0),\ CCN64_C(f7,f6,f5,f4,f3,f2,f1,f0) -#define CCN392_C(g0,f7,f6,f5,f4,f3,f2,f1,f0,e7,e6,e5,e4,e3,e2,e1,e0,d7,d6,d5,d4,d3,d2,d1,d0,c7,c6,c5,c4,c3,c2,c1,c0,b7,b6,b5,b4,b3,b2,b1,b0,a7,a6,a5,a4,a3,a2,a1,a0) \ - CCN384_C(f7,f6,f5,f4,f3,f2,f1,f0,e7,e6,e5,e4,e3,e2,e1,e0,d7,d6,d5,d4,d3,d2,d1,d0,c7,c6,c5,c4,c3,c2,c1,c0,b7,b6,b5,b4,b3,b2,b1,b0,a7,a6,a5,a4,a3,a2,a1,a0),\ - CCN8_C(g0) - #define CCN528_C(i1,i0,h7,h6,h5,h4,h3,h2,h1,h0,g7,g6,g5,g4,g3,g2,g1,g0,f7,f6,f5,f4,f3,f2,f1,f0,e7,e6,e5,e4,e3,e2,e1,e0,d7,d6,d5,d4,d3,d2,d1,d0,c7,c6,c5,c4,c3,c2,c1,c0,b7,b6,b5,b4,b3,b2,b1,b0,a7,a6,a5,a4,a3,a2,a1,a0) \ CCN256_C(d7,d6,d5,d4,d3,d2,d1,d0,c7,c6,c5,c4,c3,c2,c1,c0,b7,b6,b5,b4,b3,b2,b1,b0,a7,a6,a5,a4,a3,a2,a1,a0),\ CCN256_C(h7,h6,h5,h4,h3,h2,h1,h0,g7,g6,g5,g4,g3,g2,g1,g0,f7,f6,f5,f4,f3,f2,f1,f0,e7,e6,e5,e4,e3,e2,e1,e0),\ @@ -314,18 +308,21 @@ size_t ccn_bitlen(cc_size n, const cc_unit *s); CC_PURE CC_NONNULL((2, 3)) int ccn_cmp(cc_size n, const cc_unit *s, const cc_unit *t) __asm__("_ccn_cmp"); -/* s < t -> return - 1 | s == t -> return 0 | s > t -> return 1 - { N bit, M bit -> int } N = ns * sizeof(cc_unit) * 8 M = nt * sizeof(cc_unit) * 8 */ -CC_INLINE CC_NONNULL((2, 4)) -int ccn_cmpn(cc_size ns, const cc_unit *s, - cc_size nt, const cc_unit *t) { - if (ns > nt) { - return 1; - } else if (ns < nt) { - return -1; - } - return ccn_cmp(ns, s, t); -} +/*! @function ccn_cmpn + @abstract Compares the values of two big ints of different lengths. + + @discussion The execution time does not depend on the values of either s or t. + The function does not hide ns, nt, or whether ns > nt. + + @param ns Length of s + @param s First integer + @param nt Length of t + @param t Second integer + + @return 1 if s > t, -1 if s < t, 0 otherwise. + */ +CC_NONNULL_ALL +int ccn_cmpn(cc_size ns, const cc_unit *s, cc_size nt, const cc_unit *t); /* s - t -> r return 1 iff t > s { N bit, N bit -> N bit } N = n * sizeof(cc_unit) * 8 */ @@ -389,16 +386,16 @@ cc_unit ccn_addmul1(cc_size n, cc_unit *r, const cc_unit *s, const cc_unit v); /*! @function ccn_read_uint @abstract Copy big endian integer and represent it in cc_units - + @param n Input allocated size of the cc_unit output array r @param r Ouput cc_unit array for unsigned integer @param data_nbytes Input byte size of data @param data Input unsigned integer represented in big endian - + @result r is initialized with the big unsigned number - + @return 0 if no error, !=0 if the big number cannot be represented in the allocated cc_unit array. - + @discussion The execution pattern of this function depends on both n and data_nbytes but not on data values except the handling of the error case. */ @@ -414,12 +411,12 @@ int ccn_read_uint(cc_size n, cc_unit *r, size_t data_nbytes, const uint8_t *data /*! @function ccn_write_uint_size @abstract Compute the minimum size required to store an big integer - + @param n Input size of the cc_unit array representing the input @param s Input cc_unit array - + @result Return value is the exact byte size of the big integer - + @discussion The execution flow is independent on the value of the big integer. However, the use of the returned value may leak the position of the most significant byte @@ -429,7 +426,7 @@ CC_PURE CC_NONNULL((2)) size_t ccn_write_uint_size(cc_size n, const cc_unit *s); /*! @function ccn_write_uint @abstract Serialize the big integer into a big endian byte buffer - + @param n Input size of the cc_unit array representing the input @param s Input cc_unit array @param out_size Size of the output buffer @@ -449,17 +446,17 @@ void ccn_write_uint(cc_size n, const cc_unit *s, size_t out_size, void *out); /*! @function ccn_write_uint_padded_ct @abstract Serialize the big integer into a big endian byte buffer - + @param n Input size of the cc_unit array representing the input @param s Input cc_unit array @param out_size Size of the output buffer @param out Output byte array of size at least out_size - + @return number of leading zero bytes in case of success, a negative error value in case of failure - + @result This function writes exactly out_size byte, padding with zeroes when necessary. This function DOES NOT support truncation and returns an error if out_size < ccn_write_uint_size - + @discussion The execution flow of function is independent on the value of the big integer However, the processing of the return value by the caller may expose the position of the most significant byte @@ -472,17 +469,17 @@ int ccn_write_uint_padded_ct(cc_size n, const cc_unit *s, size_t out_size, uint8 @abstract Serialize the big integer into a big endian byte buffer Not recommended, for most cases ccn_write_uint_padded_ct is more appropriate Sensitive big integers are exposed since the processing expose the position of the MS byte - + @param n Input size of the cc_unit array representing the input @param s Input cc_unit array @param out_size Size of the output buffer @param out Output byte array of size at least out_size - + @return number of leading zero bytes - + @result This function writes exactly out_size byte, padding with zeroes when necessary. This function DOES support truncation when out_size r { n bit -> n bit } */ CC_NONNULL((2, 3)) -void ccn_set(cc_size n, cc_unit *r, const cc_unit *s) __asm__("_ccn_set"); +void ccn_set(cc_size n, cc_unit *r, const cc_unit *s); CC_INLINE CC_NONNULL((2)) void ccn_zero(cc_size n, cc_unit *r) { @@ -541,18 +538,16 @@ void ccn_zero_multi(cc_size n, cc_unit *r, ...); CC_INLINE CC_NONNULL((2)) void ccn_seti(cc_size n, cc_unit *r, cc_unit v) { - /* assert(n > 0); */ + assert(n > 0); r[0] = v; ccn_zero(n - 1, r + 1); } CC_INLINE CC_NONNULL((2, 4)) void ccn_setn(cc_size n, cc_unit *r, const cc_size s_size, const cc_unit *s) { - /* FIXME: assert not available in kernel. assert(n > 0); assert(s_size > 0); assert(s_size <= n); - */ ccn_set(s_size, r, s); ccn_zero(n - s_size, r + s_size); } diff --git a/EXTERNAL_HEADERS/corecrypto/ccpad.h b/EXTERNAL_HEADERS/corecrypto/ccpad.h index 5f8e3c38c..7cbe895c0 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccpad.h +++ b/EXTERNAL_HEADERS/corecrypto/ccpad.h @@ -1,11 +1,12 @@ -/* - * ccpad.h - * corecrypto - * - * Created on 12/07/2010 - * - * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2014,2015,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCPAD_H_ diff --git a/EXTERNAL_HEADERS/corecrypto/ccpbkdf2.h b/EXTERNAL_HEADERS/corecrypto/ccpbkdf2.h deleted file mode 100644 index bf1cd8f5a..000000000 --- a/EXTERNAL_HEADERS/corecrypto/ccpbkdf2.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * ccpbkdf2.h - * corecrypto - * - * Created on 12/15/2010 - * - * Copyright (c) 2010,2011,2012,2015 Apple Inc. All rights reserved. - * - */ - -#ifndef _CORECRYPTO_CCPBKDF2_H_ -#define _CORECRYPTO_CCPBKDF2_H_ - -#include - -/*! @function ccpbkdf2_hmac - @abstract perform a pbkdf2 using HMAC(di) for the PRF (see PKCS#5 for specification) - @discussion This performs a standard PBKDF2 transformation of password and salt through -an HMAC PRF of the callers slection (any Digest, typically SHA-1) returning dkLen bytes -containing the entropy. - -Considerations: -The salt used should be at least 8 bytes long. Each session should use it's own salt. -We use the password as the key for the HMAC and the running data as the text for the HMAC to make a PRF. -SHA-1 is a good hash to use for the core of the HMAC PRF. - @param di digest info defining the digest type to use in the PRF. - @param passwordLen amount of data to be fed in - @param password data to be fed into the PBKDF - @param saltLen length of the salt - @param salt salt to be used in pbkdf - @param iterations itrations to go - @param dkLen length of the results - @param dk buffer for the results of the PBKDF tranformation, must be dkLen big - - */ -int ccpbkdf2_hmac(const struct ccdigest_info *di, - size_t passwordLen, const void *password, - size_t saltLen, const void *salt, - size_t iterations, - size_t dkLen, void *dk); - -#endif /* _CORECRYPTO_CCPBKDF2_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccrc4.h b/EXTERNAL_HEADERS/corecrypto/ccrc4.h deleted file mode 100644 index eaf644d1d..000000000 --- a/EXTERNAL_HEADERS/corecrypto/ccrc4.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * ccrc4.h - * corecrypto - * - * Created on 12/22/2010 - * - * Copyright (c) 2010,2011,2012,2013,2014,2015 Apple Inc. All rights reserved. - * - */ - -#ifndef _CORECRYPTO_CCRC4_H_ -#define _CORECRYPTO_CCRC4_H_ - -#include - -cc_aligned_struct(16) ccrc4_ctx; - -/* Declare a rc4 key named _name_. Pass the size field of a struct ccmode_ecb - for _size_. */ -#define ccrc4_ctx_decl(_size_, _name_) cc_ctx_decl(ccrc4_ctx, _size_, _name_) -#define ccrc4_ctx_clear(_size_, _name_) cc_clear(_size_, _name_) - -struct ccrc4_info { - size_t size; /* first argument to ccrc4_ctx_decl(). */ - void (*init)(ccrc4_ctx *ctx, size_t key_len, const void *key); - void (*crypt)(ccrc4_ctx *ctx, size_t nbytes, const void *in, void *out); -}; - -const struct ccrc4_info *ccrc4(void); - -extern const struct ccrc4_info ccrc4_eay; - -#endif /* _CORECRYPTO_CCRC4_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccrng.h b/EXTERNAL_HEADERS/corecrypto/ccrng.h index 731f3e7bc..4582ddab6 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccrng.h +++ b/EXTERNAL_HEADERS/corecrypto/ccrng.h @@ -1,11 +1,12 @@ -/* - * ccrng.h - * corecrypto - * - * Created on 12/13/2010 - * - * Copyright (c) 2010,2011,2013,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2013,2014,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCRNG_H_ @@ -35,7 +36,7 @@ struct ccrng_state { @discussion - It is significantly faster than using the system /dev/random - - FIPS Compliant: NIST SP800-80A + FIPS 140-2 + - FIPS Compliant: NIST SP800-90A + FIPS 140-2 - Seeded from the system entropy. - Provides at least 128bit security if the system provide 2bit of entropy / byte. - Entropy accumulation diff --git a/EXTERNAL_HEADERS/corecrypto/ccrsa.h b/EXTERNAL_HEADERS/corecrypto/ccrsa.h index a2baa932b..d0b459fda 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccrsa.h +++ b/EXTERNAL_HEADERS/corecrypto/ccrsa.h @@ -1,11 +1,12 @@ -/* - * ccrsa.h - * corecrypto - * - * Created on 11/16/2010 - * - * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2014,2015,2016,2017,2018,2019,2020) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCRSA_H_ @@ -15,6 +16,7 @@ #include #include #include +#include #include // Apple does not generate keys of greater than 4096 bits @@ -38,7 +40,7 @@ typedef struct ccrsa_pub_ctx* ccrsa_pub_ctx_t; typedef struct ccrsa_priv_ctx* ccrsa_priv_ctx_t; /* - public key cczp d=e^-1 mod phi(m) priv key cczp priv key cczq dp, dq, qinv + public key cczp d=e^-1 mod lambda(m) priv key cczp priv key cczq dp, dq, qinv | | | | | | | | | | +-------+------+-------+------++------++-------+------+---------++-------+------+---------++-------+-------+---------+ @@ -101,7 +103,12 @@ ccrsa_pub_ctx_t ccrsa_ctx_public(ccrsa_full_ctx_t fk) { return (ccrsa_pub_ctx_t) fk; } -/* Return exact key bit size */ +/*! +@function ccrsa_pubkeylength +@abstract Compute the actual bit length of the RSA key (bit length of the modulus) +@param pubk An initialized RSA public key +@result bit length of the RSA key +*/ CC_NONNULL_ALL size_t ccrsa_pubkeylength(ccrsa_pub_ctx_t pubk); @@ -109,131 +116,356 @@ size_t ccrsa_pubkeylength(ccrsa_pub_ctx_t pubk); #define CCRSA_PKCS1_PAD_SIGN 1 #define CCRSA_PKCS1_PAD_ENCRYPT 2 -/* Initialize key based on modulus and e as cc_unit. key->zp.n must already be set. */ -CC_NONNULL((1, 2, 3)) -int ccrsa_init_pub(ccrsa_pub_ctx_t key, const cc_unit *modulus, - const cc_unit *e); - -/*! - @function ccrsa_make_priv - @abstract Initialize public and private key based on modulus and e, p and q as big endian byte arrays; - - @param full_ctx Initialized context with full_ctx->zp.n already set to 2*ccn_nof_size(p_mbytes) - @param exp_mbytes Number of bytes in big endian e. - @param exp_in pointer to big endian exponent e (may have leading 0's). - @param p_mbytes Number of bytes in big endian p. - @param p_in Pointer to the rsa p. - @param q_mbytes Number of bytes in big endian q. - @param q_in Pointer to the rsa q. - @result 0 iff successful. - - @discussion full_ctx->zp.n must already be set to 2*ccn_nof_size(p_mbytes), witt the expectation that p_mbytes>q_mbytes. - e is the public exponent, and exp_mbytes<= 2*p_mbytes. - The output is a fully formed rsa context with N=pq, d=e^{-1} mod phi(N), and appropriate inverses of different associated values precomputed - to speed computation. - */ - +/*! +@function ccrsa_init_pub +@abstract Initialize an RSA public key structure based on modulus and exponent. Values are copied into the structure. +@param pubk allocated public key structure (see requirements below) +@param modulus cc_unit array of the modulus +@param exponent cc_unit array of the exponent +@result CCERR_OK if no error + +@discussion ccrsa_ctx_n(pubk) must have been initialized based on the modulus size, typically using ccn_nof_size(mod_nbytes). + The public key structure pubk is typically allocated with ccrsa_pub_ctx_decl(ccn_sizeof_size(mod_nbytes), pubk); +*/ +CC_NONNULL_ALL +int ccrsa_init_pub(ccrsa_pub_ctx_t pubk, const cc_unit *modulus, + const cc_unit *exponent); + +/*! @function ccrsa_make_priv + @abstract Initializes an RSA public and private key given the public + exponent e and prime factors p and q. + + @param full_ctx Initialized context with ccrsa_ctx_n(full_ctx) set to 2*ccn_nof_size(p_nbytes) + @param e_nbytes Number of bytes of public exponent e. + @param e_bytes Public exponent e in Big Endian. + @param p_nbytes Number of bytes of prime factor p. + @param p_bytes Prime factor p in Big Endian. + @param q_nbytes Number of bytes of prime factor q. + @param q_bytes Prime factor q in Big Endian. + + @return 0 iff successful. + + @discussion ccrsa_ctx_n(full_ctx) must already be set to 2*ccn_nof_size(p_mbytes), with the expectation that p_nbytes>q_nbytes. + e is the public exponent, and e_nbytes<= 2*p_nbytes. + The output is a fully formed RSA context with N=pq, d=e^{-1} mod lambda(N), and appropriate inverses of different associated values precomputed + to speed computation. +*/ int ccrsa_make_priv(ccrsa_full_ctx_t full_ctx, - size_t exp_mbytes, - const uint8_t *exp_in, - size_t p_mbytes, - const uint8_t *p_in, - size_t q_mbytes, - const uint8_t *q_in); - -/* Initialize key based on modulus and e as big endian byte array - key->zp.n must already be set. */ + size_t e_nbytes, const uint8_t *e_bytes, + size_t p_nbytes, const uint8_t *p_bytes, + size_t q_nbytes, const uint8_t *q_bytes); + +/*! @function ccrsa_recover_priv + @abstract Initializes an RSA public and private key given the modulus m, + the public exponent e and the private exponent d. + + @discussion Follows the algorithm described by + NIST SP 800-56B, Appendix C, "Prime Factory Recovery". + + @param full_ctx Initialized context with ccrsa_ctx_n(full_ctx) set to ccn_nof_size(m_nbytes) + @param m_nbytes Number of bytes of modulus m. + @param m_bytes Modulus m in Big Endian. + @param e_nbytes Number of bytes of public exponent e. + @param e_bytes Public exponent e in Big Endian. + @param d_nbytes Number of bytes of private exponent d. + @param d_bytes Private exponent d in Big Endian. + @param rng RNG instance. + + @return 0 iff successful. +*/ +int ccrsa_recover_priv(ccrsa_full_ctx_t full_ctx, + size_t m_nbytes, const uint8_t *m_bytes, + size_t e_nbytes, const uint8_t *e_bytes, + size_t d_nbytes, const uint8_t *d_bytes, + struct ccrng_state *rng); + +/*! +@function ccrsa_make_pub +@abstract Initialize public key based on modulus and public exponent as big endian byte arrays; + +@param pubk allocated public key structure (see requirements below) +@param exp_nbytes Number of bytes in big endian exponent. +@param exp Pointer to big endian exponent e (may have leading 0's). +@param mod_nbytes Number of bytes in big endian modulus. +@param mod Pointer to big endian to rsa modulus N. +@result 0 iff successful. + +@discussion ccrsa_ctx_n(pubk) must have been initialized based on the modulus size, typically using ccn_nof_size(mod_nbytes). + The public key structure pubk is typically allocated with ccrsa_pub_ctx_decl(ccn_sizeof_size(mod_nbytes), pubk); +*/ + CC_NONNULL((1, 3, 5)) int ccrsa_make_pub(ccrsa_pub_ctx_t pubk, - size_t exp_nbytes, const uint8_t *exp, - size_t mod_nbytes, const uint8_t *mod); + size_t exp_nbytes, const uint8_t *exp, + size_t mod_nbytes, const uint8_t *mod); -/* Do a public key crypto operation (typically verify or encrypt) on in and put - the result in out. Both in and out should be cc_unit aligned and - ccrsa_key_n(key) units long. Clients should use ccn_read_uint() to - convert bytes to a cc_unit to use for this API.*/ +/*! +@function ccrsa_pub_crypt +@abstract Perform an RSA public key operation: (in)^e mod m +@param key initialized public key defining e and m +@param out result of the operation, at least ccrsa_key_n(key) cc_units must have been allocated +@param in base of the exponentiation, of size ccrsa_key_n(key) +@result CCERR_OK if no error + +@discussion Input to this function must not be secrets as the execution flow may expose their values + Clients can use ccn_read_uint() to convert bytes to cc_units to use for this API. +*/ CC_NONNULL((1, 2, 3)) int ccrsa_pub_crypt(ccrsa_pub_ctx_t key, cc_unit *out, const cc_unit *in); -/* Generate an nbit rsa key pair in key, which should be allocated using - ccrsa_full_ctx_decl(ccn_sizeof(1024), rsa_ctx). The unsigned big endian - byte array exponent e of length e_size is used as the exponent. It's an - error to call this function with an exponent larger than nbits. rng - must be a pointer to an initialized struct ccrng_state. */ -CC_NONNULL((2, 4, 5)) -int ccrsa_generate_key(size_t nbits, ccrsa_full_ctx_t rsa_ctx, - size_t e_size, const void *e, struct ccrng_state *rng) CC_WARN_RESULT; - -/* Generate RSA key in conformance with FIPS186-4 standard. - The first RNG `rng` will be used to generate p and q. - The second RNG `rng_mr` will be used only for primality testing. - This is relevant only for testing, just pass the same RNG twice. */ -CC_NONNULL((2, 4, 5, 6)) -int +/*! +@function ccrsa_generate_key +@abstract Generate a nbit RSA key pair. + +@param nbits Bit size requested for the key +@param fk Allocated context where the generated key will be stored +@param e_nbytes Byte size of the input public exponent +@param e_bytes Input public exponent in big endian. Recommend value is {0x01, 0x00, 0x01} +@param rng Random Number generator used. +@result CCERR_OK if no error + +@discussion + fk should be allocated using ccrsa_full_ctx_decl(ccn_sizeof(nbits), fk). + The unsigned big endian byte array exponent e of length e_size is used as the exponent. It's an error to call this function with an exponent larger than nbits +*/ +CC_NONNULL_ALL +int ccrsa_generate_key(size_t nbits, ccrsa_full_ctx_t fk, + size_t e_nbytes, const void *e_bytes, struct ccrng_state *rng) CC_WARN_RESULT; + +/*! +@function ccrsa_generate_fips186_key +@abstract Generate a nbit RSA key pair in conformance with FIPS186-4 standard. + +@param nbits Bit size requested for the key +@param fk Allocated context where the generated key will be stored +@param e_nbytes Byte size of the input public exponent +@param e_bytes Input public exponent in big endian. Recommend value is {0x01, 0x00, 0x01} +@param rng Random Number generator used for p and q +@param rng_mr Random Number generator only used for the primality check +@result CCERR_OK if no error + +@discussion + fk should be allocated using ccrsa_full_ctx_decl(ccn_sizeof(nbits), fk). + rng and rng_mr shoud be set to the same value. The distinction is only relevant for testing +*/ +CC_NONNULL_ALL int ccrsa_generate_fips186_key(size_t nbits, ccrsa_full_ctx_t fk, - size_t e_size, const void *eBytes, + size_t e_nbytes, const void *e_bytes, struct ccrng_state *rng, struct ccrng_state *rng_mr) CC_WARN_RESULT; + + /* Construct RSA key from fix input in conformance with FIPS186-4 standard */ + +/*! +@function ccrsa_make_fips186_key +@abstract Initialize an RSA full key from explicit inputs necessary for validating conformance to FIPS186-4 + +@param nbits size in bits of the key to construct +@param e_n Size in cc_unit of the public exponent +@param e Public exponent represented in cc_units +@param xp1_nbytes Size in byte of the first seed for the construction of p +@param xp1 First seed for the construction of p +@param xp2_nbytes Size in byte of the second seed for the construction of p +@param xp2 Second seed for the construction of p +@param xp_nbytes Size in byte of the large seed for the construction of p +@param xp large seed for the construction of p +@param xq1_nbytes Size in byte of the first seed for the construction of q +@param xq1 First seed for the construction of q +@param xq2_nbytes Size in byte of the second seed for the construction of q +@param xq2 Second seed for the construction of q +@param xq_nbytes Size in byte of the large seed for the construction of q +@param xq large seed for the construction of q +@param fk Allocated context where the output constructed key is stored +@param np Pointer to the size in cc_unit of the buffer for the output prime factor p. Updated with actual size. +@param r_p Copy of the output prime factor p +@param nq Pointer to the size in cc_unit of the buffer for the output prime factor q. Updated with actual size. +@param r_q Copy of the output prime factor q +@param nm Pointer to the size in cc_unit of the buffer for the output modulus m=p*q. Updated with actual size. +@param r_m Copy of the output modulus m=p*q +@param nd Pointer to the size in cc_unit of the buffer for the output private exponent d. Updated with actual size. +@param r_d Copy of the output private exponent d +@result 0 iff successful. + + @discussion + fk should be allocated using ccrsa_full_ctx_decl(ccn_sizeof(nbits), fk). +*/ + CC_NONNULL((3, 5, 7, 9, 11, 13, 15, 16)) int ccrsa_make_fips186_key(size_t nbits, const cc_size e_n, const cc_unit *e, - const cc_size xp1Len, const cc_unit *xp1, const cc_size xp2Len, const cc_unit *xp2, - const cc_size xpLen, const cc_unit *xp, - const cc_size xq1Len, const cc_unit *xq1, const cc_size xq2Len, const cc_unit *xq2, - const cc_size xqLen, const cc_unit *xq, + const cc_size xp1_nbytes, const cc_unit *xp1, const cc_size xp2_nbytes, const cc_unit *xp2, + const cc_size xp_nbytes, const cc_unit *xp, + const cc_size xq1_nbytes, const cc_unit *xq1, const cc_size xq2_nbytes, const cc_unit *xq2, + const cc_size xq_nbytes, const cc_unit *xq, ccrsa_full_ctx_t fk, cc_size *np, cc_unit *r_p, cc_size *nq, cc_unit *r_q, cc_size *nm, cc_unit *r_m, cc_size *nd, cc_unit *r_d); +/* + Signing and Verification algorithms +*/ + /*! - * @brief ccrsa_sign_pss() generates RSASSA-PSS signature in PKCS1-V2 format - * - * note that in RSASSA-PSS, salt length is part of the signature as specified in ASN1 - * RSASSA-PSS-params ::= SEQUENCE { - * hashAlgorithm [0] HashAlgorithm DEFAULT sha1, - * maskGenAlgorithm [1] MaskGenAlgorithm DEFAULT mgf1SHA1, - * saltLength [2] INTEGER DEFAULT 20, - * trailerField [3] TrailerField DEFAULT trailerFieldBC - * - * - * FIPS 186-4 for RSASSA-PSS: - * .... Both signature schemes are approved for use, but additional constraints are imposed beyond those specified in PKCS #1 v2.1..... - * - * • If nlen = 1024 bits (i.e., 128 bytes), and the output length of the approved hash function output block is 512 bits (i.e., 64 bytes), then the length (in bytes) of the salt (sLen) shall satisfy 0 ≤ sLen ≤ hLen – 2, - * • Otherwise, the length (in bytes) of the salt (sLen) shall satisfy 0 ≤ sLen ≤ hLen, where hLen is the length of the hash function output block (in bytes). - * - * - * • CAVS test vectors are not very useful in the case of RSA-PSS, because they only validate the exponentiation part of the signature. See: http://csrc.nist.gov/groups/STM/cavp/documents/components/RSA2SP1VS.pdf - * - * @param key The RSA key - * @param hashAlgorithm The hash algorithm used to generate mHash from the original message. It is also used inside the PSS encoding function. This is also the hash function to be used in the mask generation function (MGF) - * @param MgfHashAlgorithm The hash algorithm for thr mask generation function - * @param rng Random number geberator to generate salt in PSS encoding - * @param saltSize Intended length of the salt - * @param hSize Length of message hash . Must be equal to hashAlgorithm->output_size - * @param mHash The input that needs to be signed. This is the hash of message M with length of hLen - * - * @param sig The signature output - * @param sigSize The length of generated signature in bytes, which equals the size of the RSA modulus. - * @return 0:ok, non-zero:error +@function ccrsa_sign_pss + +@brief ccrsa_sign_pss() generates RSASSA-PSS signature in PKCS1-V2 format given an input digest + +@param key The RSA key +@param hashAlgorithm The hash algorithm used to generate mHash from the original message. It is also used inside the PSS encoding function. +@param MgfHashAlgorithm The hash algorithm for thr mask generation function +@param rng Random number geberator to generate salt in PSS encoding +@param salt_nbytes Intended length of the salt +@param digest_nbytes Length of message hash . Must be equal to hashAlgorithm->output_size +@param digest The input that needs to be signed. This is the hash of message M with length of hLen +@param sig_nbytes The length of generated signature in bytes, which equals the size of the RSA modulus. +@param sig The signature output +@return 0:ok, non-zero:error + +@discussion + note that in RSASSA-PSS, salt length is part of the signature as specified in ASN1 + RSASSA-PSS-params ::= SEQUENCE { + hashAlgorithm [0] HashAlgorithm DEFAULT sha1, + maskGenAlgorithm [1] MaskGenAlgorithm DEFAULT mgf1SHA1, + saltLength [2] INTEGER DEFAULT 20, + trailerField [3] TrailerField DEFAULT trailerFieldBC + + • If nlen = 1024 bits (i.e., 128 bytes), and the output length of the approved hash function output block is 512 bits (i.e., 64 bytes), then the length (in bytes) of the salt (sLen) shall satisfy 0 ≤ sLen ≤ hLen – 2, + • Otherwise, the length (in bytes) of the salt (sLen) shall satisfy 0 ≤ sLen ≤ hLen, where hLen is the length of the hash function output block (in bytes). */ -CC_NONNULL((2, 3, 5, 7, 8, 9)) +CC_NONNULL((1, 2, 3, 5, 7, 8, 9)) int ccrsa_sign_pss(ccrsa_full_ctx_t key, const struct ccdigest_info* hashAlgorithm, const struct ccdigest_info* MgfHashAlgorithm, - size_t saltSize, struct ccrng_state *rng, - size_t hSize, const uint8_t *mHash, - size_t *sigSize, uint8_t *sig); + size_t salt_nbytes, struct ccrng_state *rng, + size_t digest_nbytes, const uint8_t *digest, + size_t *sig_nbytes, uint8_t *sig); + +/*! +@function ccrsa_sign_pss_msg + +@brief ccrsa_sign_pss_msg() generates a RSASSA-PSS signature in PKCS1-V2 format given an input message + +@param key The RSA key +@param hashAlgorithm The hash algorithm used to generate mHash from the input message. It is also used inside the PSS encoding function. +@param MgfHashAlgorithm The hash algorithm for thr mask generation function +@param rng Random number generator to generate salt in PSS encoding +@param salt_nbytes Intended length of the salt +@param msg_nbytes Length of message. +@param msg The input that needs to be signed. This will be hashed using `hashAlgorithm` +@param sig_nbytes The length of generated signature in bytes, which equals the size of the RSA modulus. +@param sig The signature output +@return 0:ok, non-zero:error + +@discussion + note that in RSASSA-PSS, salt length is part of the signature as specified in ASN1 + RSASSA-PSS-params ::= SEQUENCE { + hashAlgorithm [0] HashAlgorithm DEFAULT sha1, + maskGenAlgorithm [1] MaskGenAlgorithm DEFAULT mgf1SHA1, + saltLength [2] INTEGER DEFAULT 20, + trailerField [3] TrailerField DEFAULT trailerFieldBC + + • If nlen = 1024 bits (i.e., 128 bytes), and the output length of the approved hash function output block is 512 bits (i.e., 64 bytes), then the length (in bytes) of the salt (sLen) shall satisfy 0 ≤ sLen ≤ hLen – 2, + • Otherwise, the length (in bytes) of the salt (sLen) shall satisfy 0 ≤ sLen ≤ hLen, where hLen is the length of the hash function output block (in bytes). + */ +CC_NONNULL((1, 2, 3, 5, 7, 8, 9)) +int ccrsa_sign_pss_msg(ccrsa_full_ctx_t key, + const struct ccdigest_info* hashAlgorithm, const struct ccdigest_info* MgfHashAlgorithm, + size_t salt_nbytes, struct ccrng_state *rng, + size_t msg_nbytes, const uint8_t *msg, + size_t *sig_nbytes, uint8_t *sig); + +/*! +@function ccrsa_verify_pss + +@brief ccrsa_verify_pss() verifies RSASSA-PSS signature in PKCS1-V2 format + +@param key The RSA public key +@param hashAlgorithm The hash algorithm used to generate mHash from the original message. It is also used inside the PSS encoding function. +@param MgfHashAlgorithm The hash algorithm for the mask generation function +@param digest_nbytes Length of message hash . Must be equal to hashAlgorithm->output_size +@param digest The signed message hash +@param sig_nbytes The length of generated signature in bytes, which equals the size of the RSA modulus. +@param sig The signature to verify +@param salt_nbytes Length of the salt as used during signature generation. Mismatch would result in the signature being considered invalid +@param valid Input boolean used to indicate a valid signature. + +@result 0 && valid == True indicates a valid signature. If return != 0 or valid == False, the signature is invalid. +*/ CC_NONNULL((2, 3, 5, 7, 9)) int ccrsa_verify_pss(ccrsa_pub_ctx_t key, - const struct ccdigest_info* di, const struct ccdigest_info* MgfDi, - size_t digestSize, const uint8_t *digest, - size_t sigSize, const uint8_t *sig, - size_t saltSize, bool *valid); + const struct ccdigest_info* hashAlgorithm, + const struct ccdigest_info* MgfHashAlgorithm, + size_t digest_nbytes, const uint8_t *digest, + size_t sig_nbytes, const uint8_t *sig, + size_t salt_nbytes, bool *valid) +cc_deprecate_with_replacement("ccrsa_verify_pss_digest", 13.0, 10.15, 13.0, 6.0, 4.0); + +/*! +@function ccrsa_verify_pss_digest + +@brief ccrsa_verify_pss_digest() verifies RSASSA-PSS signature in PKCS1-V2 format, given the digest + +@param key The RSA public key +@param di The hash algorithm used to generate the hash of the message. +@param mgfdi The hash algorithm for the mask generation function +@param digest_nbytes Length of digest. Must be equal to di->output_size +@param digest The signed message hash +@param sig_nbytes The length of generated signature in bytes, which equals the size of the RSA modulus. +@param sig The signature to verify +@param salt_nbytes Length of the salt as used during signature generation. +@param fault_canary_out OPTIONAL cc_fault_canary_t (see discussion) + +@result CCERR_SIGNATURE_VALID on signature success. + CCERR_SIGNATURE_INVALID on signature failure. + other on some other signature verification issue. + +@discussion If the fault_canary_out argument is not NULL, the value `CCRSA_PSS_FAULT_CANARY` will be placed into fault_canary_out + if the salted input hash is equal to the decoded hash (which strongly implies the signature is valid). Callers can then securely compare this output buffer against CCRSA_PSS_FAULT_CANARY, using CC_FAULT_CANARY_EQUAL, as an additional check of signature validity: if the two canary values are equal, the signature is valid otherwise it is not. If the signature is valid and the canary values are NOT equal this may indicate a potentially injected computational fault. +*/ + +CC_NONNULL((1, 2, 3, 5, 7)) +int ccrsa_verify_pss_digest(ccrsa_pub_ctx_t key, + const struct ccdigest_info* di, + const struct ccdigest_info* mgfdi, + size_t digest_nbytes, const uint8_t *digest, + size_t sig_nbytes, const uint8_t *sig, + size_t salt_nbytes, cc_fault_canary_t fault_canary_out); + +/*! +@function ccrsa_verify_pss_msg + +@brief ccrsa_verify_pss_msg() verifies RSASSA-PSS signature in PKCS1-V2 format, given the message + +@param key The RSA public key +@param di The hash algorithm used to generate the hash of the message. +@param mgfdi The hash algorithm for the mask generation function +@param msg_nbytes Length of message +@param msg The signed message +@param sig_nbytes The length of generated signature in bytes, which equals the size of the RSA modulus. +@param sig The signature to verify +@param salt_nbytes Length of the salt as used during signature generation. +@param fault_canary_out OPTIONAL cc_fault_canary_t (see discussion) + +@result CCERR_SIGNATURE_VALID on signature success. + CCERR_SIGNATURE_INVALID on signature failure. + other on some other signature verification issue. + +@discussion If the fault_canary_out argument is not NULL, the value `CCRSA_PSS_FAULT_CANARY` will be placed into fault_canary_out +if the salted input hash is equal to the decoded hash (which strongly implies the signature is valid). Callers can then securely compare this output buffer against CCRSA_PSS_FAULT_CANARY, using CC_FAULT_CANARY_EQUAL, as an additional check of signature validity: if the two canary values are equal, the signature is valid otherwise it is not. If the signature is valid and the canary values are NOT equal this may indicate a potentially injected computational fault. +*/ + +CC_NONNULL((1, 2, 3, 5, 7)) +int ccrsa_verify_pss_msg(ccrsa_pub_ctx_t key, + const struct ccdigest_info* di, + const struct ccdigest_info* mgfdi, + size_t msg_nbytes, const uint8_t *msg, + size_t sig_nbytes, const uint8_t *sig, + size_t salt_nbytes, cc_fault_canary_t fault_canary_out); + /*! @function ccrsa_sign_pkcs1v15 @@ -243,12 +475,12 @@ int ccrsa_verify_pss(ccrsa_pub_ctx_t key, @param oid OID describing the type of digest passed in @param digest_len Byte length of the digest @param digest Byte array of digest_len bytes containing the digest - @param sig_len Pointer to the number of byte allocate for sig. + @param sig_len Pointer to the number of bytes allocated for sig. Output the exact size of the signature. @param sig Pointer to the allocated buffer of size *sig_len for the output signature - @result 0 iff successful. + @result CCERR_OK iff successful. @discussion Null OID is a special case, required to support RFC 4346 where the padding is based on SHA1+MD5. In general it is not recommended to use a NULL OID, @@ -260,6 +492,29 @@ int ccrsa_sign_pkcs1v15(ccrsa_full_ctx_t key, const uint8_t *oid, size_t digest_len, const uint8_t *digest, size_t *sig_len, uint8_t *sig); +/*! + @function ccrsa_sign_pkcs1v15_msg + @abstract RSA signature with PKCS#1 v1.5 format per PKCS#1 v2.2 + + @param key Full key + @param di Digest context + @param msg_len Byte length of the message to sign + @param msg Byte array of msg_len bytes containing the message. Will be hashed with di. + @param sig_len Pointer to the number of bytes allocated for sig. + Output the exact size of the signature. + @param sig Pointer to the allocated buffer of size *sig_len + for the output signature + + @result CCERR_OK iff successful. + + @discussion Null OID is not supported by this API. + + */ +CC_NONNULL((1, 2, 4, 5, 6)) +int ccrsa_sign_pkcs1v15_msg(ccrsa_full_ctx_t key, const struct ccdigest_info* di, + size_t msg_len, const uint8_t *msg, + size_t *sig_len, uint8_t *sig); + /*! @function ccrsa_verify_pkcs1v15 @@ -269,11 +524,12 @@ int ccrsa_sign_pkcs1v15(ccrsa_full_ctx_t key, const uint8_t *oid, @param oid OID describing the type of digest passed in @param digest_len Byte length of the digest @param digest Byte array of digest_len bytes containing the digest - @param sig_len Number of byte of the signature sig. + @param sig_len Number of bytes of the signature sig. @param sig Pointer to the signature buffer of sig_len @param valid Output boolean, true if the signature is valid. - @result 0 iff successful. + @result A return value of 0 and valid = True indicates a valid signature. + A non-zero return value or valid = False indicates an invalid signature. @discussion Null OID is a special case, required to support RFC 4346 where the padding is based on SHA1+MD5. In general it is not @@ -286,6 +542,58 @@ int ccrsa_verify_pkcs1v15(ccrsa_pub_ctx_t key, const uint8_t *oid, size_t sig_len, const uint8_t *sig, bool *valid); +/*! + @function ccrsa_verify_pkcs1v15_digest + @abstract RSA signature with PKCS#1 v1.5 format per PKCS#1 v2.2, given a digest + + @param key Public key + @param oid OID describing the type of digest passed in + @param digest_len Byte length of the digest + @param digest Byte array of digest_len bytes containing the digest + @param sig_len Number of bytes of the signature sig. + @param sig Pointer to the signature buffer of sig_len + @param fault_canary_out OPTIONAL cc_fault_canary_t + + @result CCERR_VALID_SIGNATURE if a valid signature. + CCERR_INVALID_SIGNATURE if an invalid signature. + Other if the verification procedure failed. + + @discussion If the fault_canary_out argument is not NULL, the value `CCRSA_PKCS1_FAULT_CANARY` will be placed into fault_canary_out + if the input hash is equal to the decoded hash (which strongly implies the signature is valid). Callers can then securely compare this output buffer against CCRSA_PKCS1_FAULT_CANARY, using CC_FAULT_CANARY_EQUAL, as an additional check of signature validity: if the two canary values are equal, the signature is valid otherwise it is not. If the signature is valid and the canary values are NOT equal this may indicate a potentially injected computational fault. +*/ +CC_NONNULL((1, 4, 6)) +int ccrsa_verify_pkcs1v15_digest(ccrsa_pub_ctx_t key, const uint8_t *oid, + size_t digest_len, const uint8_t *digest, + size_t sig_len, const uint8_t *sig, + cc_fault_canary_t fault_canary_out); + +/*! + @function ccrsa_verify_pkcs1v15_msg + @abstract RSA signature with PKCS#1 v1.5 format per PKCS#1 v2.2 + + @param key Public key + @param di Hash function + @param msg_len Byte length of the digest + @param msg Byte array of digest_len bytes containing the digest + @param sig_len Number of bytes of the signature sig. + @param sig Pointer to the signature buffer of sig_len + @param fault_canary_out OPTIONAL cc_fault_canary_t + + @result CCERR_VALID_SIGNATURE if a valid signature. + CCERR_INVALID_SIGNATURE if an invalid signature. + Other if the verification procedure failed. + + @discussion Null OID is not supported by this API. + If the fault_canary_out argument is not NULL, the value `CCRSA_PKCS1_FAULT_CANARY` will + be placed into fault_canary_out if the input hash is equal to the decoded hash (which strongly + implies the signature is valid). Callers can then securely compare this output buffer against CCRSA_PKCS1_FAULT_CANARY, using CC_FAULT_CANARY_EQUAL, as an additional check of signature validity: if the two canary values are equal, the signature is valid otherwise it is not. If the signature is valid and the canary values are NOT equal this may indicate a potentially injected computational fault. +*/ +CC_NONNULL((1, 2, 4, 6)) +int ccrsa_verify_pkcs1v15_msg(ccrsa_pub_ctx_t key, const struct ccdigest_info* di, + size_t msg_len, const uint8_t *msg, + size_t sig_len, const uint8_t *sig, + cc_fault_canary_t fault_canary_out); + /*! @function ccder_encode_rsa_pub_size @abstract Calculate size of public key export format data package. @@ -541,10 +849,41 @@ int ccrsa_import_priv(ccrsa_full_ctx_t key, size_t inlen, const uint8_t *der) { return (ccder_decode_rsa_priv(key, der, der+inlen) == NULL); } - +/*! +@function ccrsa_get_pubkey_components +@abstract Copy each component of the public key to the given buffers + +@param pubkey Public key +@param modulus Buffer to the output buffer for the modulus +@param modulusLength Pointer to the byte size allocated for the modulus, updated with actual output size +@param exponent Buffer to the output buffer for the exponent +@param exponentLength Pointer to the byte size allocated for the exponent, updated with actual output size + +@return 0 is success, not 0 in case of error + +@discussion if either allocated buffer length is insufficient, the function returns an error +*/ CC_NONNULL((1, 2)) int ccrsa_get_pubkey_components(const ccrsa_pub_ctx_t pubkey, uint8_t *modulus, size_t *modulusLength, uint8_t *exponent, size_t *exponentLength); +/*! +@function ccrsa_get_fullkey_components +@abstract Copy each component of the public key to the given buffers + +@param key Full key +@param modulus Buffer to the output buffer for the modulus +@param modulusLength Pointer to the byte size allocated for the modulus, updated with actual output size +@param exponent Buffer to the output buffer for the exponent +@param exponentLength Pointer to the byte size allocated for the exponent, updated with actual output size +@param p Buffer to the output buffer for the first prime factor of the modulus +@param pLength Pointer to the byte size allocated for the prime factor, updated with actual output size +@param q Buffer to the output buffer for the second prime factor of the modulus +@param qLength Pointer to the byte size allocated for the prime factor, updated with actual output size + +@return 0 is success, not 0 in case of error + +@discussion if either allocated buffer length is insufficient, the function returns an error +*/ CC_NONNULL((1, 2)) int ccrsa_get_fullkey_components(const ccrsa_full_ctx_t key, uint8_t *modulus, size_t *modulusLength, uint8_t *exponent, size_t *exponentLength, uint8_t *p, size_t *pLength, uint8_t *q, size_t *qLength); diff --git a/EXTERNAL_HEADERS/corecrypto/ccsha1.h b/EXTERNAL_HEADERS/corecrypto/ccsha1.h index 4dc3c5194..c2198ad95 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccsha1.h +++ b/EXTERNAL_HEADERS/corecrypto/ccsha1.h @@ -1,11 +1,12 @@ -/* - * ccsha1.h - * corecrypto - * - * Created on 12/01/2010 - * - * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2014,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCSHA1_H_ @@ -33,7 +34,6 @@ extern const struct ccdigest_info ccsha1_vng_intel_SupplementalSSE3_di; extern const struct ccdigest_info ccsha1_vng_arm_di; #endif -/* TODO: Placeholders */ #define ccoid_sha1 ((unsigned char *)"\x06\x05\x2b\x0e\x03\x02\x1a") #define ccoid_sha1_len 7 diff --git a/EXTERNAL_HEADERS/corecrypto/ccsha2.h b/EXTERNAL_HEADERS/corecrypto/ccsha2.h index e80c70e9e..7246269b3 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccsha2.h +++ b/EXTERNAL_HEADERS/corecrypto/ccsha2.h @@ -1,11 +1,12 @@ -/* - * ccsha2.h - * corecrypto - * - * Created on 12/03/2010 - * - * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2014,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCSHA2_H_ @@ -18,8 +19,8 @@ const struct ccdigest_info *ccsha224_di(void); const struct ccdigest_info *ccsha256_di(void); const struct ccdigest_info *ccsha384_di(void); const struct ccdigest_info *ccsha512_di(void); +const struct ccdigest_info *ccsha512_256_di(void); // SHA512/256 (cf FIPS 180-4 https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf) -/* TODO: Placeholders */ #define ccoid_sha224 ((unsigned char *)"\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x04") #define ccoid_sha224_len 11 @@ -32,6 +33,8 @@ const struct ccdigest_info *ccsha512_di(void); #define ccoid_sha512 ((unsigned char *)"\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03") #define ccoid_sha512_len 11 +#define ccoid_sha512_256 ((unsigned char *)"\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x06") +#define ccoid_sha512_256_len 11 /* SHA256 */ #define CCSHA256_BLOCK_SIZE 64 @@ -50,6 +53,7 @@ extern const struct ccdigest_info ccsha256_vng_arm64neon_di; #endif // CC_ACCELERATECRYPTO extern const struct ccdigest_info ccsha384_vng_arm_di; extern const struct ccdigest_info ccsha512_vng_arm_di; +extern const struct ccdigest_info ccsha512_256_vng_arm_di; #endif /* SHA224 */ @@ -62,6 +66,14 @@ extern const struct ccdigest_info ccsha224_ltc_di; #define CCSHA512_STATE_SIZE 64 extern const struct ccdigest_info ccsha512_ltc_di; +/* SHA512/256 */ +// FIPS 180-4 +// https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf +#define CCSHA512_256_BLOCK_SIZE 128 +#define CCSHA512_256_OUTPUT_SIZE 32 +#define CCSHA512_256_STATE_SIZE 64 +extern const struct ccdigest_info ccsha512_256_ltc_di; + /* SHA384 */ #define CCSHA384_OUTPUT_SIZE 48 extern const struct ccdigest_info ccsha384_ltc_di; diff --git a/EXTERNAL_HEADERS/corecrypto/cczp.h b/EXTERNAL_HEADERS/corecrypto/cczp.h index e77f6b863..98ab852fb 100644 --- a/EXTERNAL_HEADERS/corecrypto/cczp.h +++ b/EXTERNAL_HEADERS/corecrypto/cczp.h @@ -1,11 +1,12 @@ -/* - * cczp.h - * corecrypto - * - * Created on 11/16/2010 - * - * Copyright (c) 2010,2011,2012,2013,2014,2015 Apple Inc. All rights reserved. +/* Copyright (c) (2010,2011,2012,2013,2014,2015,2016,2017,2018,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_CCZP_H_ @@ -27,16 +28,16 @@ struct cczp; typedef struct cczp *cczp_t; typedef const struct cczp *cczp_const_t; -typedef void (*ccmod_func_t)(cc_ws_t ws, cczp_const_t zp, cc_unit *t, const cc_unit *x, const cc_unit *y); +struct cczp_funcs; +typedef const struct cczp_funcs *cczp_funcs_t; // keep cczp_hd and cczp structures consistent // cczp_hd is typecasted to cczp to read EC curve params -// options field is to specify Montgomery arithmetic, bit field, etc // make sure n is the first element see ccrsa_ctx_n macro #define __CCZP_HEADER_ELEMENTS_DEFINITIONS(pre) \ cc_size pre##n; \ - cc_unit pre##options; \ - ccmod_func_t pre##mulmod_prime; + cc_unit pre##bitlen; \ + cczp_funcs_t pre##funcs; #define __CCZP_ELEMENTS_DEFINITIONS(pre) \ __CCZP_HEADER_ELEMENTS_DEFINITIONS(pre) \ @@ -66,6 +67,7 @@ struct cczp { #define CCZP_N(ZP) ((ZP)->n) #define CCZP_PRIME(ZP) ((ZP)->ccn) +#define CCZP_BITLEN(ZP) ((ZP)->bitlen) #define CCZP_RECIP(ZP) ((ZP)->ccn + CCZP_N(ZP)) CC_NONNULL((1)) CC_INLINE cc_size cczp_n(cczp_const_t zp) { @@ -77,6 +79,12 @@ CC_NONNULL((1)) CC_INLINE const cc_unit *cczp_prime(cczp_const_t zp) return zp->ccn; } +CC_NONNULL((1)) CC_INLINE size_t cczp_bitlen(cczp_const_t zp) +{ + cc_assert(ccn_bitlen(cczp_n(zp), cczp_prime(zp)) == CCZP_BITLEN(zp)); + return (size_t)CCZP_BITLEN(zp); +} + /* Return a pointer to the Reciprocal or Montgomery constant of zp, which is allocated cczp_n(zp) + 1 units long. */ CC_NONNULL((1)) CC_INLINE const cc_unit *cczp_recip(cczp_const_t zp) diff --git a/EXTERNAL_HEADERS/corecrypto/fipspost_trace.h b/EXTERNAL_HEADERS/corecrypto/fipspost_trace.h index c236bebd7..cb120a7c6 100644 --- a/EXTERNAL_HEADERS/corecrypto/fipspost_trace.h +++ b/EXTERNAL_HEADERS/corecrypto/fipspost_trace.h @@ -1,11 +1,12 @@ -/* - * fipspost_trace.h - * corecrypto - * - * Created on 01/25/2017 - * - * Copyright (c) 2017 Apple Inc. All rights reserved. +/* Copyright (c) (2017,2019) Apple Inc. All rights reserved. * + * corecrypto is licensed under Apple Inc.’s Internal Use License Agreement (which + * is contained in the License.txt file distributed with corecrypto) and only to + * people who accept that license. IMPORTANT: Any license rights granted to you by + * Apple Inc. (if any) are limited to internal use within your organization only on + * devices and computers you own or control, for the sole purpose of verifying the + * security characteristics and correct functioning of the Apple Software. You may + * not, directly or indirectly, redistribute the Apple Software or any portions thereof. */ #ifndef _CORECRYPTO_FIPSPOST_TRACE_H_ diff --git a/EXTERNAL_HEADERS/img4/api.h b/EXTERNAL_HEADERS/img4/api.h index 861f4e38e..8e3bac797 100644 --- a/EXTERNAL_HEADERS/img4/api.h +++ b/EXTERNAL_HEADERS/img4/api.h @@ -6,15 +6,16 @@ #define __IMG4_API_H #ifndef __IMG4_INDIRECT -#error "Please #include instead of this file directly" +#error "Please #include instead of this file directly" #endif // __IMG4_INDIRECT -#if IMG4_TAPI #include -#endif +#include +#include #ifndef KERNEL #include +#include #endif #if !XNU_KERNEL_PRIVATE @@ -40,38 +41,116 @@ * individual preprocessor macros in this header that declare new behavior as * required. */ -#define IMG4_API_VERSION (20191001u) +#define IMG4_API_VERSION (20200724u) -#if !defined(KERNEL) && !IMG4_PROJECT_BUILD +#if IMG4_TAPI || (!defined(KERNEL) && !IMG4_PROJECT_BUILD) #define IMG4_API_AVAILABLE_20180112 \ - __API_UNAVAILABLE(macos) \ - API_AVAILABLE(ios(12.0), tvos(12.0), watchos(5.0)) + API_AVAILABLE( \ + macos(10.15), \ + ios(12.0), \ + tvos(12.0), \ + watchos(5.0)) + +#define IMG4_API_AVAILABLE_20180112_DEPRECATED \ + API_DEPRECATED_WITH_REPLACEMENT( \ + "img4_firmware_t", \ + macos(10.15, 11.0), \ + ios(12.0, 14.0), \ + tvos(12.0, 14.0), \ + watchos(5.0, 7.0)) #define IMG4_API_AVAILABLE_20181004 \ - __API_UNAVAILABLE(macos) \ - API_AVAILABLE(ios(12.2), tvos(12.2), watchos(5.2)) + API_DEPRECATED_WITH_REPLACEMENT( \ + "img4_firmware_t", \ + macos(10.15, 11.0), \ + ios(12.2, 14.0), \ + tvos(12.2, 14.0), \ + watchos(5.2, 7.0)) + +// This API version introduced the nonce manager which was not deprecated when +// the new API was introduced. #define IMG4_API_AVAILABLE_20181106 \ - __API_UNAVAILABLE(macos) \ - API_AVAILABLE(ios(12.2), tvos(12.2), watchos(5.2)) + API_AVAILABLE( \ + macos(10.15), \ + ios(12.2), \ + tvos(12.2), \ + watchos(5.2)) +#define IMG4_API_AVAILABLE_20181106_DEPRECATED \ + API_DEPRECATED_WITH_REPLACEMENT( \ + "img4_firmware_t", \ + macos(10.15, 11.0), \ + ios(12.2, 14.0), \ + tvos(12.2, 14.0), \ + watchos(5.2, 7.0)) #define IMG4_API_AVAILABLE_20190125 \ - API_AVAILABLE(macos(10.15), ios(13.0), tvos(13.0), watchos(6.0)) + API_DEPRECATED_WITH_REPLACEMENT( \ + "img4_firmware_t", \ + macos(10.15, 11.0), \ + ios(13.0, 14.0), \ + tvos(13.0, 14.0), \ + watchos(6.0, 7.0)) #define IMG4_API_AVAILABLE_20191001 \ - API_AVAILABLE(macos(10.15.2), ios(13.3), tvos(13.3), watchos(6.1.1)) + API_DEPRECATED_WITH_REPLACEMENT( \ + "img4_firmware_t", \ + macos(10.15.2, 11.0), \ + ios(13.3, 14.0), \ + tvos(13.3, 14.0), \ + watchos(6.1.1, 7.0)) +#define IMG4_API_AVAILABLE_20191108 \ + API_DEPRECATED_WITH_REPLACEMENT( \ + "img4_firmware_t", \ + macos(11.0, 11.0), \ + ios(14.0, 14.0), \ + tvos(14.0, 14.0), \ + watchos(7.0, 7.0)) +#define IMG4_API_AVAILABLE_20200221 \ + API_DEPRECATED_WITH_REPLACEMENT( \ + "img4_firmware_t", \ + macos(11.0, 11.0), \ + ios(14.0, 14.0), \ + tvos(14.0, 14.0), \ + watchos(7.0, 7.0)) +#define IMG4_API_AVAILABLE_20200310 \ + API_DEPRECATED_WITH_REPLACEMENT( \ + "img4_firmware_t", \ + macos(11.0, 11.0), \ + ios(14.0, 14.0), \ + tvos(14.0, 14.0), \ + watchos(7.0, 7.0)) +#define IMG4_API_AVAILABLE_20200508 \ + API_AVAILABLE( \ + macos(11.0), \ + ios(14.0), \ + tvos(14.0), \ + watchos(7.0), \ + bridgeos(5.0)) +#define IMG4_API_AVAILABLE_20200608 \ + API_AVAILABLE( \ + macos(11.0), \ + ios(14.0), \ + tvos(14.0), \ + watchos(7.0), \ + bridgeos(5.0)) +#define IMG4_API_AVAILABLE_20200724 \ + API_AVAILABLE( \ + macos(11.0), \ + ios(14.0), \ + tvos(14.0), \ + watchos(7.0), \ + bridgeos(5.0)) #else #define IMG4_API_AVAILABLE_20180112 +#define IMG4_API_AVAILABLE_20180112_DEPRECATED #define IMG4_API_AVAILABLE_20181004 #define IMG4_API_AVAILABLE_20181106 +#define IMG4_API_AVAILABLE_20181106_DEPRECATED #define IMG4_API_AVAILABLE_20190125 #define IMG4_API_AVAILABLE_20191001 -#endif // !defined(KERNEL) && !IMG4_PROJECT_BUILD - -#if !defined(OS_CLOSED_ENUM) -#define OS_CLOSED_ENUM(_name, _type, ...) \ - OS_ENUM(_name, _type, ## __VA_ARGS__) -#endif - -#if !defined(OS_CLOSED_OPTIONS) -#define OS_CLOSED_OPTIONS(_name, _type, ...) \ - OS_ENUM(_name, _type, ## __VA_ARGS__) +#define IMG4_API_AVAILABLE_20191108 +#define IMG4_API_AVAILABLE_20200221 +#define IMG4_API_AVAILABLE_20200310 +#define IMG4_API_AVAILABLE_20200508 +#define IMG4_API_AVAILABLE_20200608 +#define IMG4_API_AVAILABLE_20200724 #endif /*! diff --git a/EXTERNAL_HEADERS/img4/chip.h b/EXTERNAL_HEADERS/img4/chip.h new file mode 100644 index 000000000..004bbe300 --- /dev/null +++ b/EXTERNAL_HEADERS/img4/chip.h @@ -0,0 +1,490 @@ +/*! + * @header + * Supported chip environments. + */ +#ifndef __IMG4_CHIP_H +#define __IMG4_CHIP_H + +#ifndef __IMG4_INDIRECT +#error "Please #include instead of this file directly" +#endif // __IMG4_INDIRECT + +#if IMG4_TAPI +#include "tapi.h" +#endif + +OS_ASSUME_NONNULL_BEGIN + +/*! + * @typedef img4_chip_t + * An opaque type describing a destination chip environment for the firmware + * image. + */ +IMG4_API_AVAILABLE_20200508 +typedef struct _img4_chip img4_chip_t; + +/*! + * @typedef img4_chip_select_array_t + * A type representing a list of chips from which the implementation may select. + */ +IMG4_API_AVAILABLE_20200724 +typedef const img4_chip_t *_Nullable const *img4_chip_select_array_t; + +/*! + * @const IMG4_CHIP_INSTANCE_STRUCT_VERSION + * The version of the {@link img4_chip_instance_t} supported by the + * implementation. + */ +#define IMG4_CHIP_INSTANCE_STRUCT_VERSION (1u) + +/*! + * @typedef img4_chip_instance_omit_t + * A bitfield describing omitted identifiers from a chip instance. + * + * @const IMG4_CHIP_INSTANCE_OMIT_CEPO + * The chip instance has no epoch. + * + * @const IMG4_CHIP_INSTANCE_OMIT_BORD + * The chip instance has no board identifier. + * + * @const IMG4_CHIP_INSTANCE_OMIT_CHIP + * The chip instance has no chip identifier. + * + * @const IMG4_CHIP_INSTANCE_OMIT_SDOM + * The chip instance has no security domain. + * + * @const IMG4_CHIP_INSTANCE_OMIT_ECID + * The chip instance has no unique chip identifier. + * + * @const IMG4_CHIP_INSTANCE_OMIT_CPRO + * The chip instance has no certificate production status. + * + * @const IMG4_CHIP_INSTANCE_OMIT_CSEC + * The chip instance has no certificate security mode. + * + * @const IMG4_CHIP_INSTANCE_OMIT_EPRO + * The chip instance has no effective production status. + * + * @const IMG4_CHIP_INSTANCE_OMIT_ESEC + * The chip instance has no effective security mode. + * + * @const IMG4_CHIP_INSTANCE_OMIT_IUOU + * The chip instance has no internal-use-only-unit property. + * + * @const IMG4_CHIP_INSTANCE_OMIT_RSCH + * The chip instance has no research fusing state. + * + * @const IMG4_CHIP_INSTANCE_OMIT_EUOU + * The chip instance has no engineering-use-only-unit property. + */ +OS_CLOSED_OPTIONS(img4_chip_instance_omit, uint64_t, + IMG4_CHIP_INSTANCE_OMIT_CEPO = (1 << 0), + IMG4_CHIP_INSTANCE_OMIT_BORD = (1 << 1), + IMG4_CHIP_INSTANCE_OMIT_CHIP = (1 << 2), + IMG4_CHIP_INSTANCE_OMIT_SDOM = (1 << 3), + IMG4_CHIP_INSTANCE_OMIT_ECID = (1 << 4), + IMG4_CHIP_INSTANCE_OMIT_CPRO = (1 << 5), + IMG4_CHIP_INSTANCE_OMIT_CSEC = (1 << 6), + IMG4_CHIP_INSTANCE_OMIT_EPRO = (1 << 7), + IMG4_CHIP_INSTANCE_OMIT_ESEC = (1 << 8), + IMG4_CHIP_INSTANCE_OMIT_IUOU = (1 << 9), + IMG4_CHIP_INSTANCE_OMIT_RSCH = (1 << 10), + IMG4_CHIP_INSTANCE_OMIT_EUOU = (1 << 11), +); + +/*! + * @typedef img4_chip_instance_t + * An structure describing an instance of a chip. + * + * @field chid_version + * The version of the structure. Initialize to + * {@link IMG4_CHIP_INSTANCE_STRUCT_VERSION}. + * + * @field chid_chip_family + * The chip family of which this is an instance. + * + * @field chid_omit + * The identifiers which are absent from the chip instance. + * + * @field chid_cepo + * The certificate epoch of the chip instance. + * + * @field chid_bord + * The board identifier of the chip instance. + * + * @field chid_chip + * The chip identifier of the chip instance. + * + * @field chid_sdom + * The security domain of the chip instance. + * + * @field chid_ecid + * The unique chip identifier of the chip instance. + * + * @field chid_cpro + * The certificate production status of the chip instance. + * + * @field chid_csec + * The certificate security mode of the chip instance. + * + * @field chid_epro + * The effective production status of the chip instance. + * + * @field chid_esec + * The effective security mode of the chip instance. + * + * @field chid_iuou + * The internal use-only unit status of the chip instance. + * + * @field chid_rsch + * The research mode of the chip instance. + * + * @field chid_euou + * The engineering use-only unit status of the chip instance. + * + * Added in version 1 of the structure. + */ +IMG4_API_AVAILABLE_20200508 +typedef struct _img4_chip_instance { + img4_struct_version_t chid_version; + const img4_chip_t *chid_chip_family; + img4_chip_instance_omit_t chid_omit; + uint32_t chid_cepo; + uint32_t chid_bord; + uint32_t chid_chip; + uint32_t chid_sdom; + uint64_t chid_ecid; + bool chid_cpro; + bool chid_csec; + bool chid_epro; + bool chid_esec; + bool chid_iuou; + bool chid_rsch; + bool chid_euou; +} img4_chip_instance_t; + +/*! + * @const IMG4_CHIP_AP_SHA1 + * The Application Processor on an Apple ARM SoC with an embedded sha1 + * certifcate chain. + * + * This chip environment represents one unique instance of such a chip. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_chip_t _img4_chip_ap_sha1; +#define IMG4_CHIP_AP_SHA1 (&_img4_chip_ap_sha1) +#else +#define IMG4_CHIP_AP_SHA1 (img4if->i4if_v7.chip_ap_sha1) +#endif + +/*! + * @const IMG4_CHIP_AP_SHA2_384 + * The Application Processor on an Apple ARM SoC with an embedded sha2-384 + * certifcate chain. + * + * This chip environment represents one unique instance of such a chip. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_chip_t _img4_chip_ap_sha2_384; +#define IMG4_CHIP_AP_SHA2_384 (&_img4_chip_ap_sha2_384) +#else +#define IMG4_CHIP_AP_SHA2_384 (img4if->i4if_v7.chip_ap_sha2_384) +#endif + +/*! + * @const IMG4_CHIP_AP_HYBRID + * An Intel x86 processor whose chain of trust is rooted in an + * {@link IMG4_CHIP_AP_SHA2_384} environment. Firmwares executed on this chip + * are authenticated against the characteristics of the corresponding AP chip + * environment. + * + * This chip environment represents one unique instance of such a chip pair. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_chip_t _img4_chip_ap_hybrid; +#define IMG4_CHIP_AP_HYBRID (&_img4_chip_ap_hybrid) +#else +#define IMG4_CHIP_AP_HYBRID (img4if->i4if_v7.chip_ap_hybrid) +#endif + +/*! + * @const IMG4_CHIP_AP_REDUCED + * An Application Processor on an Apple ARM SoC operating in a reduced security + * configuration. + * + * This chip cannot be uniquely identified. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_chip_t _img4_chip_ap_reduced; +#define IMG4_CHIP_AP_REDUCED (&_img4_chip_ap_reduced) +#else +#define IMG4_CHIP_AP_REDUCED (img4if->i4if_v7.chip_ap_reduced) +#endif + +/*! + * @const IMG4_CHIP_AP_PERMISSIVE + * An Application Processor on an Apple ARM SoC operating with no secure boot + * enforcement. + * + * This chip cannot be uniquely identified. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_chip_t _img4_chip_ap_permissive; +#define IMG4_CHIP_AP_PERMISSIVE (&_img4_chip_ap_permissive) +#else +#define IMG4_CHIP_AP_PERMISSIVE (img4if->i4if_v8.chip_ap_permissive) +#endif + +/*! + * @const IMG4_CHIP_AP_HYBRID_MEDIUM + * An Intel x86 processor whose chain of trust is rooted in an + * {@link IMG4_CHIP_AP_SHA2_384} environment and is operating in a "medium + * security" mode due to a user-approved security degradation. + * + * This chip cannot be uniquely identified. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_chip_t _img4_chip_ap_hybrid_medium; +#define IMG4_CHIP_AP_HYBRID_MEDIUM (&_img4_chip_ap_hybrid_medium) +#else +#define IMG4_CHIP_AP_HYBRID_MEDIUM (img4if->i4if_v8.chip_ap_hybrid_medium) +#endif + +/*! + * @const IMG4_CHIP_AP_HYBRID_RELAXED + * An Intel x86 processor whose chain of trust is rooted in an + * {@link IMG4_CHIP_AP_SHA2_384} environment and is operating with no secure + * boot enforcement due to a user-approved security degradation. + * + * This chip cannot be uniquely identified. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_chip_t _img4_chip_ap_hybrid_relaxed; +#define IMG4_CHIP_AP_HYBRID_RELAXED (&_img4_chip_ap_hybrid_relaxed) +#else +#define IMG4_CHIP_AP_HYBRID_RELAXED (img4if->i4if_v8.chip_ap_hybrid_relaxed) +#endif + +/*! + * @const IMG4_CHIP_AP_SOFTWARE_FF00 + * A software-defined chip environment whose firmwares are executed on any + * Application Processor on an Apple ARM SoC. The firmwares are loadable trust + * caches shipped with OTA update brains. + * + * This chip cannot be uniquely identified. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_chip_t _img4_chip_ap_software_ff00; +#define IMG4_CHIP_AP_SOFTWARE_FF00 (&_img4_chip_ap_software_ff00) +#else +#define IMG4_CHIP_AP_SOFTWARE_FF00 (img4if->i4if_v7.chip_ap_software_ff00) +#endif + +/*! + * @const IMG4_CHIP_AP_SOFTWARE_FF01 + * A software-defined chip environment whose firmwares are executed on any + * Application Processor on an Apple ARM SoC. The firmwares are loadable trust + * caches which are shipped in the Install Assistant and loaded by an + * unprivileged trampoline. + * + * This chip cannot be uniquely identified. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_chip_t _img4_chip_ap_software_ff01; +#define IMG4_CHIP_AP_SOFTWARE_FF01 (&_img4_chip_ap_software_ff01) +#else +#define IMG4_CHIP_AP_SOFTWARE_FF01 (img4if->i4if_v7.chip_ap_software_ff01) +#endif + +/*! + * @const IMG4_CHIP_X86 + * An Intel x86 processor which cannot be uniquely identified. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_chip_t _img4_chip_x86; +#define IMG4_CHIP_X86 (&_img4_chip_x86) +#else +#define IMG4_CHIP_X86 (img4if->i4if_v7.chip_x86) +#endif + +/*! + * @const IMG4_CHIP_X86_SOFTWARE_8012 + * A software-defined chip environment describing a virtualized x86 processor. + * Since the virtual machine is at the mercy of the VM, support for any sort of + * chip identity may not be available. Therefore this environment is returned + * from {@link img4_chip_select_personalized_ap} and + * {@link img4_chip_select_effective_ap} when it is called on a virtual machine + * so that the appropriate chip environment is present entirely in software. + * + * This environment provides an equivalent software identity to that of + * the {@link IMG4_CHIP_X86} chip environment on non-Gibraltar Macs. + * + * @discussion + * Do not use this environment directly. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_chip_t _img4_chip_x86_software_8012; +#define IMG4_CHIP_X86_SOFTWARE_8012 (&_img4_chip_x86_software_8012) +#else +#define IMG4_CHIP_X86_SOFTWARE_8012 (img4if->i4if_v7.chip_x86_software_8012) +#endif + +/*! + * @function img4_chip_init_from_buff + * Initializes a buffer as a chip object. + * + * @param buff + * A pointer to the storage to use for the chip object. + * + * @param len + * The size of the buffer. + * + * @discussion + * The caller is expected to pass a buffer that is "big enough". If the provided + * buffer is too small, the implementation will abort the caller. + * + * @example + * + * uint8_t _buff[IMG4_CHIP_SIZE_RECOMMENDED]; + * img4_chip_t *chip = NULL; + * + * chip = img4_chip_init_from_buff(_buff, sizeof(_buff)); + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 +img4_chip_t * +img4_chip_init_from_buff(void *buff, size_t len); +#else +#define img4_chip_init_from_buff (img4if->i4if_v7.chip_init_from_buff) +#endif + +/*! + * @function img4_chip_select_personalized_ap + * Returns the chip appropriate for personalized verification against the host + * AP. + * + * @result + * The personalized chip environment for the host which corresponds to its + * silicon identity. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT +const img4_chip_t * +img4_chip_select_personalized_ap(void); +#else +#define img4_chip_select_personalized_ap(...) \ + (img4if->i4if_v7.chip_select_personalized_ap(__VA_ARGS__)) +#endif + +/*! + * @function img4_chip_select_effective_ap + * Returns the chip appropriate for verification against the host AP. + * + * @result + * The currently enforced chip environment for the host. This interface is + * generally only useful on the AP. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT +const img4_chip_t * +img4_chip_select_effective_ap(void); +#else +#define img4_chip_select_effective_ap(...) \ + (img4if->i4if_v7.chip_select_effective_ap(__VA_ARGS__)) +#endif + +/*! + * @function img4_chip_instantiate + * Returns an instantiation of the given chip using the default runtime where + * necessary. + * + * @param chip + * The chip to instantiate. + * + * @param chip_instance + * Upon successful return, storage to be populated with the instantiated chip. + * Upon failure, the contents of this storage are undefined. + * + * @result + * Upon success, zero is returned. Otherwise, one of the following error codes + * will be returned: + * + * [EXDEV] There was an error querying the runtime's identity oracle + * [ENODATA] The expected property in the runtime's identity oracle was + * of an unexpected type + * [EOVERFLOW] The expected property in the runtime's identity oracle had + * a value that was too large to be represented in the + * expected type + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL2 +errno_t +img4_chip_instantiate(const img4_chip_t *chip, + img4_chip_instance_t *chip_instance); +#else +#define img4_chip_instantiate(...) \ + (img4if->i4if_v7.chip_instantiate(__VA_ARGS__)) +#endif + +/*! + * @function img4_chip_custom + * Returns a custom chip derived from the given chip instance. The + * {@link chid_chip_family} field of the given instance will be used as a + * template from which to derive the new chip. + * + * @param chip_instance + * The instance of the custom chip. + * + * The memory referenced by this pointer must be static or otherwise guaranteed + * to be valid for the duration of the caller's use of the custom chip. + * + * @param chip + * A pointer to storage for the new custom chip. + * + * The memory referenced by this pointer must be static or otherwise guaranteed + * to be valid for the duration of the caller's use of the custom chip. + * + * This pointer should be obtained as the result of a call to + * {@link img4_chip_init_from_buff}. + * + * @result + * A new custom chip. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 +const img4_chip_t * +img4_chip_custom(const img4_chip_instance_t *chip_instance, img4_chip_t *chip); +#else +#define img4_chip_custom(...) (img4if->i4if_v7.chip_custom(__VA_ARGS__)) +#endif + +OS_ASSUME_NONNULL_END + +#endif // __IMG4_CHIP_H diff --git a/EXTERNAL_HEADERS/img4/environment.h b/EXTERNAL_HEADERS/img4/environment.h deleted file mode 100644 index 98d202444..000000000 --- a/EXTERNAL_HEADERS/img4/environment.h +++ /dev/null @@ -1,180 +0,0 @@ -/*! - * @header - * Image4 environments. - */ -#ifndef __IMG4_ENVIRONMENT_H -#define __IMG4_ENVIRONMENT_H - -#ifndef __IMG4_INDIRECT -#error "Please #include instead of this file directly" -#endif // __IMG4_INDIRECT - -#if IMG4_TAPI -#include "tapi.h" -#endif - -/*! - * @const IMG4_IDENTITY_VERSION - * The version of the {@link img4_identity_t} supported by the implementation. - */ -#define IMG4_IDENTITY_VERSION (0u) - -/*! - * @const IMG4_ENVIRONMENT_LENGTH - * The minimum length for an allocation which can accommodate an - * img4_environment_t structure. This is the minimum length which must be given - * to {@link img4_environment_init_identity}. - */ -#define IMG4_ENVIRONMENT_LENGTH (160ul) - -/*! - * @const IMG4_IDENTITY_CRYPTO_SHA1 - * The device-tree string indicating that the identity requires SHA1. - */ -#define IMG4_IDENTITY_CRYPTO_SHA1 "sha1" - -/*! - * @const IMG4_IDENTITY_CRYPTO_SHA2_384 - * The device-tree string indicating that the identity requires SHA2-384. - */ -#define IMG4_IDENTITY_CRYPTO_SHA2_384 "sha2-384" - -/*! - * @typedef img4_environment_t - * An opaque type describing an Image4 environment. - */ -typedef struct _img4_environment img4_environment_t; - -/*! - * @typedef img4_identity_t - * A structure describing a specific Image4 identity comprised of user-supplied - * identifiers. - * - * @field i4id_version - * The version of the identity structure; initialize to - * {@link IMG4_IDENTITY_VERSION} - * - * @field i4id_algo - * A string identifying the chosen crypto algorithm as represented in the device - * tree. Currently valid values are: - * - * - {@link IMG4_IDENTITY_CRYPTO_SHA1} - * - {@link IMG4_IDENTITY_CRYPTO_SHA2_384} - * - * @field i4id_cepo - * The minimum certificate epoch required, - * - * @field i4id_bord - * The board identifier. - * - * @field i4id_chip - * The chip identifier. - * - * @field i4id_ecid - * The unique chip identifier. - * - * @field i4id_sdom - * The security domain. - * - * @field i4id_cpro - * The certificate production status. - * - * @field i4id_csec - * The certificate security mode. - * - * @field i4id_epro - * The effective production status. - * - * @field i4id_esec - * The effective security mode. - */ -IMG4_API_AVAILABLE_20191001 -typedef struct _img4_identity { - img4_struct_version_t i4id_version; - char i4id_algo[12]; - uint32_t i4id_cepo; - uint32_t i4id_bord; - uint32_t i4id_chip; - uint64_t i4id_ecid; - uint32_t i4id_sdom; - bool i4id_cpro; - bool i4id_csec; - bool i4id_epro; - bool i4id_esec; -} img4_identity_t; - -/*! - * @const IMG4_ENVIRONMENT_PLATFORM - * The environment for the host that uses the default platform implementation to - * resolve the environment. This is the environment against which manifests are - * personalized. - */ -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20180112 -OS_EXPORT -const struct _img4_environment _img4_environment_platform; -#define IMG4_ENVIRONMENT_PLATFORM (&_img4_environment_platform) -#else -#define IMG4_ENVIRONMENT_PLATFORM (img4if->i4if_environment_platform) -#endif - - -/*! - * @const IMG4_ENVIRONMENT_TRUST_CACHE - * The software environment for globally-signed loadable trust caches. This - * environment should be used as a fallback when validation against the platform - * fails, and the caller is handling a loadable trust cache. - */ -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20181004 -OS_EXPORT -const struct _img4_environment _img4_environment_trust_cache; -#define IMG4_ENVIRONMENT_TRUST_CACHE (&_img4_environment_trust_cache) -#else -#define IMG4_ENVIRONMENT_TRUST_CACHE (img4if->i4if_environment_trust_cache) -#endif - -/*! - * @function img4_environment_init_identity - * Initializes a caller-supplied environment with custom identity information. - * This may be used for performing test evaluations or evaluations against - * environments not yet supported by the implementation. - * - * @param i4e - * A pointer to the storage which will hold the custom environment. - * - * @param len - * The length of the storage referenced by {@link i4e}. This must be at least - * {@link IMG4_ENVIRONMENT_LENGTH} bytes. - * - * @param i4id - * The identity with which to initialize the environment. The resulting - * environment object will provide these identitifers to the evaluator. - * - * @result - * Upon success, zero is returned. The implementation may also return one of the - * following error codes directly: - * - * [EOVERFLOW] The length provided is insufficient to initialize an - * environment structure - * - * @discussion - * When the resulting environment is given to {@link img4_get_trusted_payload} - * or {@link img4_get_trusted_external_payload}, the trust evaluation proceeds - * as though it were creating a new chain of trust and therefore acts as though - * {@link I4F_FIRST_STAGE} was given to {@link img4_init}. No prior stage of - * secure boot will be consulted for evaluation, and mix-n-match will be - * presumed to be permitted. - */ -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20191001 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 -errno_t -img4_environment_init_identity(img4_environment_t *i4e, size_t len, - const img4_identity_t *i4id); -#else -#define img4_environment_init_identity(...) \ - (img4if->i4if_v4.environment_init_identity(__VA_ARGS__)) -#endif - -#endif // __IMG4_ENVIRONMENT_H diff --git a/EXTERNAL_HEADERS/img4/firmware.h b/EXTERNAL_HEADERS/img4/firmware.h new file mode 100644 index 000000000..b581e340b --- /dev/null +++ b/EXTERNAL_HEADERS/img4/firmware.h @@ -0,0 +1,649 @@ +/*! + * @header + * Interfaces for manipulating Image4 firmware objects. + */ +#ifndef __IMG4_FIRMWARE_H +#define __IMG4_FIRMWARE_H + +#include +#include +#include +#include + +__BEGIN_DECLS; + +#if !KERNEL +#include +#endif + +#if !_DARWIN_BUILDING_PROJECT_APPLEIMAGE4 +#if __has_include() && !KERNEL +#include +#elif XNU_KERNEL_PRIVATE +// There is no linker set header in the KDK, and the one from the SDK is not +// safe for kexts to use. +// +// +#include +#else +#define LINKER_SET_ENTRY(...) +#endif +#endif // !_DARWIN_BUILDING_PROJECT_APPLEIMAGE4 + +/*! + * @discussion + * When used from the pmap layer, this header pulls in the types from libsa, + * which conflict with the BSD sys/types.h header that we need to pull in. But + * we only need it for the errno_t typedef and the vnode_t typedef. So when + * building MACH_KERNEL_PRIVATE, we do two things: + * + * 1. Explicitly pull in , so we get errno_t and + * nothing else (no transitive #include's) + * 2. #define _SYS_TYPES_H_ before #includ'ing so that + * we don't get the transitive #include of but we still get + * the definitions we need + */ +#if MACH_KERNEL_PRIVATE +#define _SYS_TYPES_H_ 1 +#include +#include +#else +#include +#include +#endif + +#define __IMG4_INDIRECT 1 +#include + +#if IMG4_TAPI +#include "tapi.h" +#endif + +OS_ASSUME_NONNULL_BEGIN + +/*! + * @typedef img4_4cc_t + * A type which represents a four-character code (4cc) that identifies the + * firmware. These 4cc's are statically assigned and correspond to long-form tag + * names -- e.g. the 4cc 'krnl' corresponds to the "KernelCache" tag. + */ +IMG4_API_AVAILABLE_20200508 +typedef uint32_t img4_4cc_t; + +/*! + * @typedef img4_buff_t + * A structure describing a buffer. See {@link _img4_buff}. + */ +IMG4_API_AVAILABLE_20200508 +typedef struct _img4_buff img4_buff_t; + +/*! + * @typedef img4_firmware_t + * An opaque type describing an Image4 firmware object. + */ +IMG4_API_AVAILABLE_20200508 +typedef struct _img4_firmware *img4_firmware_t; + +/*! + * @typedef img4_image_t + * An opaque type describing an authenticated Image4 firmware image. + */ +IMG4_API_AVAILABLE_20200508 +typedef struct _img4_image *img4_image_t; + +/*! + * @typedef img4_runtime_t + * A structure describing required primitives in the operating environment's + * runtime. See {@link _img4_runtime}. + */ +IMG4_API_AVAILABLE_20200508 +typedef struct _img4_runtime img4_runtime_t; + +OS_ASSUME_NONNULL_END + +#if !_DARWIN_BUILDING_PROJECT_APPLEIMAGE4 || IMG4_TAPI +#define __IMG4_INDIRECT 1 +#include +#include +#include +#include +#include +#endif + +OS_ASSUME_NONNULL_BEGIN + +/*! + * @typedef img4_firmware_authenticated_execute_t + * A firmware execution function. This function is called when the firmware has + * been successfully authenticated and is ready for execution. + * + * @param fw + * The firmware which has been authenticated. + * + * @param image + * The resulting firmware image that may be executed. The implementation will + * pass NULL if there was a failure. + * + * This object is automatically freed by the implementation upon return. + * + * @param error + * An error code describing the result of the authentication. If authentication + * was successful, the implementation will pass zero. Otherwise, one of the + * following error codes will be provided: + * + * [EILSEQ] The firmware data is not valid Image4 data -- this will not + * be passed for firmwares created with + * {@link IMG4_FIRMWARE_FLAG_BARE} + * [EFTYPE] The attached manifest is not a valid Image4 manifest + * [ENOENT] The attached manifest does not authenticate this type of + * firmware + * [EAUTH] The attached manifest is not authentic (i.e. was not signed + * by an Apple CA) + * [EACCES] The given chip does not satisfy the constraints of the + * attached manifest + * [ESTALE] The manifest has been invalidated and is no longer valid for + * the provided chip + * [ENOEXEC] The firmware has been corrupted, or the given chip does not + * satisfy the constraints of the corresponding object in the + * attached manifest + * + * @param _ctx + * The user-provided context pointer. + */ +IMG4_API_AVAILABLE_20200508 +typedef void (*img4_firmware_authenticated_execute_t)( + const img4_firmware_t fw, + img4_image_t _Nullable image, + errno_t error, + void *_ctx +); + +/*! + * @define IMG4_FIRMWARE_EXECUTION_CONTEXT_STRUCT_VERSION + * The version of the {@link img4_firmware_execution_context_t} structure + * supported by the implementation. + */ +#define IMG4_FIRMWARE_EXECUTION_CONTEXT_STRUCT_VERSION (0u) + +/*! + * @typedef img4_firmware_execution_context_t + * A structure describing the context in which a firmware is to be executed. + * + * @field i4fex_version + * The version of the structure supported by the implementation. Initialize to + * {@link IMG4_FIRMWARE_EXECUTION_CONTEXT_STRUCT_VERSION}. + * + * @field i4fex_execute + * A pointer to the firmware execution function. + * + * @field i4fex_context + * A caller-provided context pointer that will be passed to functions invoked + * from the execution context. + */ +IMG4_API_AVAILABLE_20200508 +typedef struct _img4_firmware_execution_context { + img4_struct_version_t i4fex_version; + img4_firmware_authenticated_execute_t i4fex_execute; + void *i4fex_context; +} img4_firmware_execution_context_t; + +/*! + * @typedef img4_firmware_flags_t + * A bitfield modifying the behavior of an {@link img4_firmware_t} object. + * + * @const IMG4_FIRMWARE_FLAG_INIT + * No bits set. This value is suitable for initialization purposes. + * + * @const IMG4_FIRMWARE_FLAG_ATTACHED_MANIFEST + * The manifest authenticating the firmware is attached (i.e. the buffer given + * represents a .img4 file). + * + * @const IMG4_FIRMWARE_FLAG_BARE + * The firmware image is not wrapped with an Image4 payload structure. This flag + * is mutually exclusive with {@link IMG4_FIRMWARE_FLAG_ATTACHED_MANIFEST}, and + * if both are present, the implementation's behavior is undefined. + * + * @const IMG4_FIRMWARE_FLAG_SUBSEQUENT_STAGE + * The firmware image extends an existing chain of trust. If set, the + * runtime must provide a {@link i4rt_get_digest} function which returns a + * digest for {@link IMG4_IDENTIFIER_CHMH}. + * + * If set, the firmware may optionally provide a {@link i4rt_get_bool} function + * which returns a value for {@link IMG4_IDENTIFIER_AMNM}. + * + * @const IMG4_FIRMWARE_FLAG_RESPECT_AMNM + * Forces the implementation to respect the manifest's AMNM entitlement if it is + * present, even if the validation is creating a new chain of trust. This is + * technically maybe sort of against the Image4 spec, but it is useful for + * certain internal workflows (cf. v2.3, §2.2.10). + * + * This flag has no effect if {@link IMG4_FIRMWARE_FLAG_SUBSEQUENT_STAGE} is + * also passed. + */ +IMG4_API_AVAILABLE_20200508 +OS_CLOSED_OPTIONS(img4_firmware_flags, uint64_t, + IMG4_FIRMWARE_FLAG_INIT, + IMG4_FIRMWARE_FLAG_ATTACHED_MANIFEST = (1 << 0), + IMG4_FIRMWARE_FLAG_BARE = (1 << 1), + IMG4_FIRMWARE_FLAG_SUBSEQUENT_STAGE = (1 << 2), + IMG4_FIRMWARE_FLAG_RESPECT_AMNM = (1 << 3), +); + +/*! + * @function img4_firmware_new + * Allocate and initialize a new firmware object. + * + * @param rt + * The runtime in which to initialize the object. + * + * @param _4cc + * The 4cc which distinguishes the firmware. + * + * @param buff + * A buffer containing a valid Image4 payload (usually the contents of either a + * .im4p or .img4 file). + * + * Upon return, the destructor in the buffer is replaced with NULL, and the + * implementation assumes responsibility for deallocating the underlying memory. + * + * @param flags + * Flags modifying the behavior of the object. + * + * @result + * A new firmware object or NULL if there was an allocation failure. + * + * @discussion + * The resulting object assumes ownership of the given buffer. + * + * In the Darwin userspace runtime, NULL will not be returned. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_MALLOC OS_NONNULL1 OS_NONNULL2 OS_NONNULL4 +img4_firmware_t _Nullable +img4_firmware_new(const img4_runtime_t *rt, + const img4_firmware_execution_context_t *exec, + img4_4cc_t _4cc, + img4_buff_t *buff, + img4_firmware_flags_t flags); +#else +#define img4_firmware_new(...) (img4if->i4if_v7.firmware_new(__VA_ARGS__)) +#endif + +/*! + * @function img4_firmware_new_from_vnode_4xnu + * Allocate and initialize a new firmware object from a vnode. + * + * @param rt + * The runtime in which to initialize the object. This interface is only + * supported with the Darwin kernel runtime. If any other runtime is provided, + * the implementation's behavior is undefined. + * + * @param _4cc + * The 4cc which distinguishes the firmware. + * + * @param vn + * A vnode representing a valid Image4 payload (usually the contents of either a + * .im4p or .img4 file). + * + * @param flags + * Flags modifying the behavior of the object. + * + * @result + * A new firmware object or NULL if there was an allocation failure. + * + * @discussion + * Verification of a vnode is performed by reading in chunks of data, updating + * an ongoing hash operation with that data, and then discarding it. Therefore, + * firmware objects created in this manner can only guarantee their validity at + * the time the check was performed since the vnode's contents are not kept in + * memory and may be tampered with after validation has been performed. + * + * As such, on successful execution, the image passed to the + * {@link img4_firmware_authenticated_execute_t} function of the execution + * context is NULL. + * + * Firmwares created with this interface cannot be created with the + * {@link IMG4_FIRMWARE_FLAG_ATTACHED_MANIFEST} flag. + */ +#if KERNEL +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_MALLOC OS_NONNULL1 OS_NONNULL2 OS_NONNULL4 +img4_firmware_t _Nullable +img4_firmware_new_from_vnode_4xnu(const img4_runtime_t *rt, + const img4_firmware_execution_context_t *exec, + img4_4cc_t _4cc, + vnode_t vn, + img4_firmware_flags_t flags); +#else +#define img4_firmware_new_from_vnode_4xnu(...) \ + (img4if->i4if_v7.firmware_new_from_vnode_4xnu(__VA_ARGS__)) +#endif // !XNU_KERNEL_PRIVATE +#endif // !KERNEL + +/*! + * @function img4_firmware_new_from_fd_4MSM + * Allocate and initialize a new firmware object from a file descriptor. + * + * @param rt + * The runtime in which to initialize the object. This interface is only + * supported with the Darwin userspace runtime. If any other runtime is + * provided, the implementation's behavior is undefined. + * + * @param _4cc + * The 4cc which distinguishes the firmware. + * + * @param fd + * A pointer to a file descriptor representing a valid Image4 payload (usually + * the contents of either a .im4p or .img4 file). The object assumes ownership + * of the descriptor, and upon return, the value referenced by the pointer will + * be set to -1. + * + * @param flags + * Flags modifying the behavior of the object. + * + * @result + * A new firmware object. The implementation will not return NULL. + * + * @discussion + * This interface is the userspace equivalent of + * {@link img4_firmware_new_from_vnode_4xnu}, and all the same caveats apply. + */ +#if !KERNEL +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_MALLOC OS_NONNULL1 OS_NONNULL2 +img4_firmware_t +img4_firmware_new_from_fd_4MSM(const img4_runtime_t *rt, + const img4_firmware_execution_context_t *exec, + img4_4cc_t _4cc, + os_fd_t *fd, + img4_firmware_flags_t flags); +#endif + +/*! + * @function img4_firmware_init_from_buff + * Initializes a buffer as a firmware object. This interface is useful for + * runtimes which do not provide for dynamic memory allocation. + * + * @param storage + * A pointer to the storage to use for the firmware object. + * + * @param len + * The size of the buffer. + * + * @discussion + * The caller is expected to pass a buffer that is "big enough". If the provided + * buffer is too small, the implementation will abort the caller. + * + * @example + * + * uint8_t _buff[IMG4_FIRMWARE_SIZE_RECOMMENDED]; + * img4_firmware_t fw = NULL; + * + * fw = img4_firmware_init_from_buff(_buff, sizeof(_buff)); + * img4_firmware_init(fw, IMG4_RUNTIME_DEFAULT, &exec_context, + * kImg4Tag_krnl, fw_buff, 0); + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 +img4_firmware_t +img4_firmware_init_from_buff(void *buff, size_t len); +#else +#define img4_firmware_init_from_buff(...) \ + (img4if->i4if_v7.firmware_init_from_buff(__VA_ARGS__)) +#endif + +/*! + * @function img4_firmware_init + * Initialize a firmware object. + * + * @param fw + * A pointer to the storage for the firmware object. This pointer should refer + * to a region of memory that is sufficient to hold a {@link img4_firmware_t} + * object. This size should be queried with the {@link i4rt_object_size} + * function of the runtime. + * + * @param rt + * The runtime in which to initialize the object. + * + * @param _4cc + * The 4cc which distinguishes the firmware. + * + * @param buff + * A buffer containing a valid Image4 payload (usually the contents of either a + * .im4p or .img4 file). + * + * Upon return, the destructor in the buffer is replaced with NULL, and the + * implementation assumes responsibility for deallocating the underlying memory. + * + * @param flags + * Flags modifying the behavior of the object. + * + * @discussion + * The resulting object assumes ownership of the given buffer. This routine + * should only be used when dynamic memory allocation is not available in the + * runtime. Otherwise, use {@link img4_firmware_new}. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_NONNULL1 OS_NONNULL2 OS_NONNULL3 OS_NONNULL5 +void +img4_firmware_init(img4_firmware_t fw, + const img4_runtime_t *rt, + const img4_firmware_execution_context_t *exec, + img4_4cc_t _4cc, + img4_buff_t *buff, + img4_firmware_flags_t flags); +#else +#define img4_firmware_init(...) (img4if->i4if_v7.firmware_init(__VA_ARGS__)) +#endif + +/*! + * @function img4_firmware_attach_manifest + * Attaches a signed manifest to the firmware. + * + * @param fw + * The firmware to manipulate. + * + * @param buff + * A buffer containing a valid Image4 manifest (usually the contents of either a + * .im4m or .img4 file). + * + * Upon return, the destructor in the buffer is replaced with NULL, and the + * implementation assumes responsibility for deallocating the underlying memory. + * + * @discussion + * If this interface is called on a firmware created with the + * {@link IMG4_FIRMWARE_FLAG_ATTACHED_MANIFEST} flag, the implementation's + * behavior is undefined. + * + * This interface must be called on any firmware created with the + * {@link IMG4_FIRMWARE_FLAG_BARE} flag. + * + * The object assumes ownership of the given buffer. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_NONNULL1 OS_NONNULL2 +void +img4_firmware_attach_manifest(img4_firmware_t fw, + img4_buff_t *buff); +#else +#define img4_firmware_attach_manifest(...) \ + (img4if->i4if_v7.firmware_attach_manifest(__VA_ARGS__)) +#endif + +/*! + * @function img4_firmware_select_chip + * Returns the chip from the provided array which may be used to authenticate + * the firmware. + * + * @param fw + * The firmware to query. + * + * @param acceptable_chips + * An array of chips the caller finds acceptable to verify the firmware. + * + * @param acceptable_chips_cnt + * The number of elements in {@link acceptable_chips}. + * + * @result + * If the manifest may be authenticated by the certificate chain associated with + * one of the manifests provided in {@link acceptable_chips}, that chip is + * returned. If the manifest cannot be authenticated with any of the provided + * chips, NULL is returned. + * + * @discussion + * The result of calling this function on a firmware which does not have a + * manifest attached is undefined. + * + * If multiple chips may be used to authenticate the firmware, the + * implementation does not define which of those chips will be returned. + * + * If the firmware was created without the + * {@link IMG4_FIRMWARE_FLAG_SUBSEQUENT_STAGE} flag, this function will return + * NULL. This function cannot be used to establish new trust chains, only to + * verify an existing one. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200724 +OS_EXPORT OS_WARN_RESULT +const img4_chip_t *_Nullable +img4_firmware_select_chip(const img4_firmware_t fw, + const img4_chip_select_array_t _Nonnull acceptable_chips, + size_t acceptable_chips_cnt); +#else +#define img4_firmware_select_chip(...) \ + (img4if->i4if_v10.firmware_select_chip(__VA_ARGS__)) +#endif + +/*! + * @function img4_firmware_execute + * Authenticate the firmware and execute it within its context. + * + * @param fw + * The firmware to execute. + * + * @param chip + * The chip on which to execute the firmware. + * + * @param nonce + * The nonce to use for authentication. May be NULL if the chip environment does + * not maintain an anti-replay token or if a chained evaluation is being + * performed. + * + * @discussion + * The implementation will always invoke the + * {@link img4_firmware_authenticated_execute_t} provided in the execution + * context with either a successful result or a failure. All error handling must + * take place in that context. + * + * The {@link img4_firmware_authenticated_execute_t} is called before the + * implementation returns. + * + * The result of executing a firmware without a manifest attached (either via + * {@link img4_firmware_attach_manifest} or by creating the firmware with the + * {@link IMG4_FIRMWARE_FLAG_ATTACHED_MANIFEST} flag set) is undefined. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_NONNULL1 OS_NONNULL2 +void +img4_firmware_execute(img4_firmware_t fw, + const img4_chip_t *chip, + const img4_nonce_t *_Nullable nonce); +#else +#define img4_firmware_execute(...) \ + (img4if->i4if_v7.firmware_execute(__VA_ARGS__)) +#endif + +/*! + * @function img4_firmware_evaluate + * Evaluate the firmware for authenticity. + * + * @param fw + * The firmware to evaluate. + * + * @param chip + * The chip on which to evaluate the firmware. + * + * @param nonce + * The nonce to use for authentication. May be NULL if the chip environment does + * not maintain an anti-replay token or if a chained evaluation is being + * performed. + * + * @result + * An error code describing the result of the authentication. If authentication + * was successful, zero is returned. Otherwise, one of the following error codes + * will be returned: + * + * [EILSEQ] The firmware data is not valid Image4 data -- this will not + * be returned for firmwares created with + * {@link IMG4_FIRMWARE_FLAG_BARE} + * [EFTYPE] The attached manifest is not a valid Image4 manifest + * [ENOENT] The attached manifest does not authenticate this type of + * firmware + * [EAUTH] The attached manifest is not authentic (i.e. was not signed + * by an Apple CA) + * [EACCES] The given chip does not satisfy the constraints of the + * attached manifest + * [ESTALE] The manifest has been invalidated and is no longer valid for + * the provided chip + * [ENOEXEC] The firmware has been corrupted, or the given chip does not + * satisfy the constraints of the corresponding object in the + * attached manifest + * + * @discussion + * This interface should be used when the caller is only concerned with the + * authenticity and integrity of the firmware image and does not intend to + * execute it. + * + * The result of evaluating a firmware without a manifest attached (either via + * {@link img4_firmware_attach_manifest} or by creating the firmware with the + * {@link IMG4_FIRMWARE_FLAG_ATTACHED_MANIFEST} flag set) is undefined. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200608 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL2 +errno_t +img4_firmware_evaluate(img4_firmware_t fw, + const img4_chip_t *chip, + const img4_nonce_t *_Nullable nonce); +#else +#define img4_firmware_evaluate(...) \ + (img4if->i4if_v9.firmware_evaluate(__VA_ARGS__)) +#endif + +/*! + * @function img4_firmware_destroy + * Destroys a firmware object and releases the associated resources according to + * the runtime's specification. + * + * @param fw + * A pointer to the firmware object. + * + * Upon return, this will be replaced with a known-invalid pointer value. This + * parameter may be NULL in which case the implementation will return + * immediately. + * + * @discussion + * The implementation will invoke the provided deallocation function of the + * buffer object underlying the firmware. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +void +img4_firmware_destroy(img4_firmware_t _Nonnull *_Nullable fw); +#else +#define img4_firmware_destroy(...) \ + (img4if->i4if_v7.firmware_destroy(__VA_ARGS__)) +#endif + +OS_ASSUME_NONNULL_END + +__END_DECLS; + +#endif // __IMG4_FIRMWARE_H diff --git a/EXTERNAL_HEADERS/img4/image.h b/EXTERNAL_HEADERS/img4/image.h new file mode 100644 index 000000000..f40ef2857 --- /dev/null +++ b/EXTERNAL_HEADERS/img4/image.h @@ -0,0 +1,164 @@ +/*! + * @header + * Interfaces for manipulating Image4 firmware images. + */ +#ifndef __IMG4_IMAGE_H +#define __IMG4_IMAGE_H + +#ifndef __IMG4_INDIRECT +#error "Please #include instead of this file directly" +#endif // __IMG4_INDIRECT + +#if IMG4_TAPI +#include "tapi.h" +#endif + +OS_ASSUME_NONNULL_BEGIN + +/*! + * @function img4_image_get_bytes + * Returns the authenticated payload from an Image4 image. + * + * @param image + * The image to query. + * + * @result + * A buffer which describes the authenticated payload. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 +const img4_buff_t * +img4_image_get_bytes(img4_image_t image); +#else +#define img4_image_get_bytes(...) (img4if->i4if_v7.image_get_bytes(__VA_ARGS__)) +#endif + +/*! + * @function img4_image_get_property_bool + * Retrieves the Boolean value for the requested image property. + * + * @param image + * The image to query. + * + * @param _4cc + * The 4cc of the desired image property. + * + * @param storage + * A pointer to storage for a Boolean value. + * + * @result + * If the property is present for the image, a pointer to the storage provided + * in {@link storage}. If the property is not present in the image or its value + * is not a Boolean, NULL is returned. + * + * @discussion + * If the property is present for the image, a pointer to the storage provided + * in {@link storage}. If the property is not present in the image or its value + * is not a Boolean, NULL is returned. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 +const bool * +img4_image_get_property_bool(img4_image_t image, + img4_4cc_t _4cc, + bool *storage); +#else +#define img4_image_get_property_bool(...) \ + (img4if->i4if_v7.image_get_property_bool(__VA_ARGS__)) +#endif + +/*! + * @function img4_image_get_property_uint32 + * Retrieves the unsigned 32-bit integer value for the requested image property. + * + * @param image + * The image to query. + * + * @param _4cc + * The 4cc of the desired image property. + * + * @param storage + * A pointer to storage for a 32-bit unsigned integer value. + * + * @result + * If the property is present for the image, a pointer to the storage provided + * in {@link storage}. If the property is not present in the image or its value + * is not an unsigned 32-bit integer, NULL is returned. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 +const uint32_t * +img4_image_get_property_uint32(img4_image_t image, + img4_4cc_t _4cc, + uint32_t *storage); +#else +#define img4_image_get_property_uint32(...) \ + (img4if->i4if_v7.image_get_property_uint32(__VA_ARGS__)) +#endif + +/*! + * @function img4_image_get_property_uint64 + * Retrieves the unsigned 64-bit integer value for the requested image property. + * + * @param image + * The image to query. + * + * @param _4cc + * The 4cc of the desired image property. + * + * @param storage + * A pointer to storage for a 64-bit unsigned integer value. + * + * @result + * If the property is present for the image, a pointer to the storage provided + * in {@link storage}. If the property is not present in the image or its value + * is not an unsigned 64-bit integer, NULL is returned. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 +const uint64_t * +img4_image_get_property_uint64(img4_image_t image, + img4_4cc_t _4cc, + uint64_t *storage); +#else +#define img4_image_get_property_uint64(...) \ + (img4if->i4if_v7.image_get_property_uint64(__VA_ARGS__)) +#endif + +/*! + * @function img4_image_get_property_data + * Retrieves the buffer value for the requested image property. + * + * @param image + * The image to query. + * + * @param _4cc + * The 4cc of the desired image property. + * + * @param storage + * A pointer to storage for a buffer value. + * + * @result + * If the property is present for the image, a pointer to the storage provided + * in {@link storage}. If the property is not present in the image or its value + * is not a data, NULL is returned. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 +const img4_buff_t * +img4_image_get_property_data(img4_image_t image, + img4_4cc_t _4cc, + img4_buff_t *storage); +#else +#define img4_image_get_property_data(...) \ + (img4if->i4if_v7.image_get_property_data(__VA_ARGS__)) +#endif + +OS_ASSUME_NONNULL_END + +#endif // __IMG4_IMAGE_H diff --git a/EXTERNAL_HEADERS/img4/img4.h b/EXTERNAL_HEADERS/img4/img4.h deleted file mode 100644 index 3d995b10f..000000000 --- a/EXTERNAL_HEADERS/img4/img4.h +++ /dev/null @@ -1,578 +0,0 @@ -/*! - * @header - * Image4 interfaces. These interfaces encapsulate the basic concepts required - * for authenticating and validating Image4 manifests as being authoritative. - * These concepts are: - * - * @section Environment - * An environment is a description of a host comprised of hardware identifiers - * and policy configurations. For example, the environment of an iPhone may - * include the following hardware identifiers (among others): - * - * ChipID - * A number identifying the chip design. - * - * BoardID - * A number identifying the board. - * - * UniqueChipID / ECID - * A number uniquely identifying a specific instance of a chip. - * - * The environment also includes policy information derived by previous stages - * of secure boot. Examples of such policy are: - * - * Mix-n-Match Prevention - * Whether firmware payloads from multiple, valid secure boot manifests - * should be prevented from being executed on the host environment. The - * default is true. - * - * @section Manifest - * An Image4 manifest is a set of constraints that describe a host environment. - * For example, a manifest may have been signed such that it is only valid for a - * single host environment. In this case, the manifest may include specific - * values for ChipID, BoardID, UniqueChipID, etc. Such a manifest is said to be - * personalized for that environment. - * - * If an environment meets the constraints in a manifest, that manifest is said - * to be authoritative over the environment. - * - * The manifest also includes one or more objects which may be executed in the - * environment. - * - * @section Object - * An object is a description of a payload. An object can describe any payload, - * not just the payload that is in the Image4. An object describes a payload by - * means of its digest. Examples of objects present in a secure boot manifest - * are the kernelcache and the static trust cache. - * - * If an authoritative manifest accurately describes an object, then that object - * may be executed in the host environment. The mechanics of execution typically - * involve mapping its payload into a privileged memory region. For example, - * when the kernelcache is executed, its payload bytes are mapped into the range - * of memory associated with supervisor mode. - * - * Payload - * A payload is the raw sequence of bytes that is described by an object. When - * described via an Image4 object, payloads are first wrapped in Image4 encoding - * to associate a tag with them. The resulting series of bytes is what is - * contained in a .im4p file. - * - * An Image4 file may only contain a single payload (even though a manifest may - * describe multiple payloads through multiple objects). - * - * Tag - * A tag is a FourCC which can identify any of the following: - * - * - an object property (e.g. the 'DGST' property) - * - a manifest property (e.g. the 'BORD' property) - * - a certificate property - * - a type of object (e.g. 'krnl') - * - * Tags comprised of all-caps are reserved for the Image4 specification. - * - * @section Secure Boot Policy - * Manifests are evaluated with the Secure Boot evaluation policy. Broadly - * speaking, this policy: - * - * - enforces that manifest identifiers match the host's silicon - * identifiers, - * - enforces that the epoch of the certificate which signed the manifest is - * greater than or equal to the host silicon's epoch - * - enforces that the current manifest is the same one that was used in the - * previous stage of Secure Boot unless mix-n-match is allowed - * - * For manifests which lack a CHMH property, mix-n-match policy is enforced as - * follows - * - * (1) If the previous stage of Secure Boot disallows mix-n-match and the - * manifest does not possess the AMNM entitlement, the hash of the - * manifest will be enforced against the hash of the manifest which was - * evaluated by the previous stage of Secure Boot. - * - * (2) If the previous stage of Secure Boot allows mix-n-match or the manifest - * possesses the AMNM entitlement, the manifest's constraints will be - * enforced on the environment, but the manifest will not be expected to - * be consistent with the manifest evaluated in the previous stage of - * Secure Boot, i.e. the hash of the previous manifest will not be - * enforced against the manifest being evaluated. - * - * Enforcement of the manifest's constraints will include the value of the - * BNCH tag in the manifest, if any. Therefore the caller should always - * provide a nonce value to the implementation via {@link img4_set_nonce} - * if this option is used. - * - * For manifests which possess a CHMH property, mix-n-match policy is enforced - * as follows: - * - * (1) If the previous stage of Secure Boot disallows mix-n-match or the - * manifest does not possess the AMNM entitlement, the value of the CHMH - * property will be enforced against the hash of the manifest which was - * evaluated by the previous stage of Secure Boot. - * - * (2) If the previous stage of Secure Boot allows mix-n-match and the - * manifest possesses the AMNM entitlement, all of the manifest's - * constraints will be enforced on the environment except for the CHMH - * constraint, which will be ignored. - * - * Enforcement of the manifest's constraints will include the value of the - * BNCH tag in the manifest, if any. Therefore the caller should always - * provide a nonce value to the implementation via {@link img4_set_nonce} - * if this option is used. - * - * The CHMH policy may be expressed as the following truth table: - * - * AMNM [manifest] Verify Manifest Hash [environment] Enforce CHMH - * 0 0 Y - * 0 1 Y - * 1 0 N - * 1 1 Y - */ - - -#ifndef __IMG4_H -#define __IMG4_H - -#include -#include -#include -#include - -#if KERNEL -#if !defined(OS_CLOSED_ENUM) -#define OS_CLOSED_ENUM(...) OS_ENUM(__VA_ARGS__) -#endif - -#if !defined(OS_OPTIONS) -#define OS_OPTIONS(...) OS_ENUM(__VA_ARGS__) -#endif - -#if !defined(OS_CLOSED_OPTIONS) -#define OS_CLOSED_OPTIONS(...) OS_ENUM(__VA_ARGS__) -#endif -#endif - -#define __IMG4_INDIRECT 1 - -/* - * When used from the pmap layer, this header pulls in the types from libsa, - * which conflict with the BSD sys/types.h header that we need to pull in. But - * we only need it for the errno_t typedef and the vnode_t typedef. So when - * building MACH_KERNEL_PRIVATE, we do two things: - * - * 1. Explicitly pull in , so we get errno_t and - * nothing else (no transitive #include's) - * 2. #define _SYS_TYPES_H_ before #includ'ing so that - * we don't get the transitive #include of but we still get - * the definitions we need - */ -#if MACH_KERNEL_PRIVATE -#define _SYS_TYPES_H_ 1 -#include -#include -#else -#include -#include -#endif - -#if !IMG4_PROJECT_BUILD -#include -#endif - -__BEGIN_DECLS; - -/*! - * @typedef img4_tag_t - * A type describing an Image4 tag. - */ -IMG4_API_AVAILABLE_20180112 -typedef uint32_t img4_tag_t; - -/*! - * @typedef img4_section_t - * A type describing the sections of an Image4 object. - * - * @const IMG4_SECTION_MANIFEST - * The manifest section. - * - * @const IMG4_SECTION_OBJECT - * The object section. - * - * @const IMG4_SECTION_RESTOREINFO - * The restore info section. - */ -OS_ENUM(img4_section, uint8_t, - IMG4_SECTION_MANIFEST, - IMG4_SECTION_OBJECT, - IMG4_SECTION_RESTOREINFO, -) IMG4_API_AVAILABLE_20180112; - -/*! - * @typedef img4_destructor_t - * A type describing a destructor routine for an Image4 object. - * - * @param ptr - * A pointer to the buffer to dispose of. - * - * @param len - * The length of the buffer. - */ -IMG4_API_AVAILABLE_20180112 -typedef void (*img4_destructor_t)( - void *ptr, - size_t len); - -/*! - * @typedef img4_flags_t - * A flagset modifying the behavior of an {@link img4_t}. - * - * @const I4F_INIT - * No flags set. This value is suitable for initialization purposes. - * - * @const I4F_TRUST_MANIFEST - * Causes the implementation to bypass trust evaluation for the manifest, i.e. - * it will not verify that a manifest has been signed by Apple before trusting - * it. - * - * This option is for testing purposes only and is not respected on the RELEASE - * variant of the implementation. - * - * @const I4F_FORCE_MIXNMATCH - * Causes the implementation to bypass mix-n-match policy evaluation and always - * allow mix-n-match, irrespective of the previous boot stage's conclusion or - * manifest policy. This also allows replay of manifests whose personalization - * has been invalidated by rolling the nonce. - * - * This option is for testing purposes only and is not respected on the RELEASE - * variant of the implementation. - * - * @const I4F_FIRST_STAGE - * Indicates that the manifest being evaluated is the first link in the secure - * boot chain. This causes the implementation to enforce the manifest directly - * on the environment rather than requiring that a previous stage has already - * done so by way of checking the previous stage's boot manifest hash. In effect - * this disables the mix-n-match enforcement policy. - * - * The critical difference between this flag and {@link I4F_FORCE_MIXNMATCH} is - * that this flag will cause the entire manifest to be enforced on the - * environment, including the anti-replay token in BNCH. - * {@link I4F_FORCE_MIXNMATCH} will ignore the nonce. - * - * It is illegal to use a manifest which possesses a CHMH tag as a first-stage - * manifest. - */ -OS_CLOSED_OPTIONS(img4_flags, uint64_t, - I4F_INIT = 0, - I4F_TRUST_MANIFEST = (1 << 0), - I4F_FORCE_MIXNMATCH = (1 << 1), - I4F_FIRST_STAGE = (1 << 2), -) IMG4_API_AVAILABLE_20180112; - -typedef char _img4_opaque_data_64[696]; - -typedef char _img4_opaque_data_32[520]; - -/*! - * @typedef img4_t - * An opaque structure representing Image4 data. The Image4 data must contain a - * manifest and may optionally contain a payload. Neither this type nor the APIs - * APIs which manipulate it are thread-safe. - */ -IMG4_API_AVAILABLE_20180112 -typedef struct _img4 { -#if __ILP64__ || __LP64__ - _img4_opaque_data_64 __opaque; -#else - _img4_opaque_data_32 __opaque; -#endif -} img4_t; - -typedef char _img4_payload_opaque_data_64[504]; - -#if __ARM_ARCH_7A__ || __ARM_ARCH_7S__ || __ARM_ARCH_7K__ || \ - __ARM64_ARCH_8_32__ || __i386__ -typedef char _img4_payload_opaque_data_32[328]; -#else -typedef char _img4_payload_opaque_data_32[332]; -#endif - -/*! - * @typedef img4_payload_t - * An opaque structure describing Image4 payload data. Neither this type nor the - * APIs which manipulate it are thread-safe. - */ -IMG4_API_AVAILABLE_20180112 -typedef struct _img4_payload { -#if __ILP64__ || __LP64__ - _img4_payload_opaque_data_64 __opaque; -#else - _img4_payload_opaque_data_32 __opaque; -#endif -} img4_payload_t; - -#if !IMG4_PROJECT_BUILD -#include -#include -#include -#endif - -#if IMG4_TAPI -#include "environment.h" -#include "nonce.h" -#include "payload.h" -#endif - -/*! - * @function img4_init - * Initializes an Image4. - * - * @param i4 - * A pointer to the storage to initialize. - * - * @param flags - * Flags to modify initialization. - * - * @param bytes - * The Image4 data from which to initialize. If a destructor is provided, - * control of this buffer transfers to the Image4. - * - * @param len - * The length of the Image4 data. - * - * @param destructor - * A destructor for the Image4 data. May be NULL if the buffer does not require - * explicit deallocation (e.g. because the buffer is stack data). - * - * @result - * Upon success, zero is returned. The implementation may also return one of the - * following error codes directly: - * - * [EILSEQ] The data is not valid Image4 data - * [EFTYPE] The data does not contain an Image4 manifest - * - * @discussion - * The bytes given to this routine must represent an Image4 manifest. They may - * optionally also represent an Image4 payload. - */ -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 -errno_t -img4_init(img4_t *i4, img4_flags_t flags, const uint8_t *bytes, size_t len, - img4_destructor_t destructor); -#else -#define img4_init(...) (img4if->i4if_init(__VA_ARGS__)) -#endif - -/*! - * @function img4_set_nonce - * Sets the anti-reply token to be used during manifest enforcement. This value - * will be compared against the value of the manifest's BNCH property. - * - * @param i4 - * The Image4 to modify. - * - * @param bytes - * The bytes which comprise the anti-replay token. - * - * @param len - * The length of the anti-replay token. - * - * @discussion - * If a nonce is not set prior to a call to either - * {@link img4_get_trusted_payload} or - * {@link img4_get_trusted_external_payload}, the implementation will act as - * though there is no nonce in the environment. Therefore, any manifests which - * have a BNCH property constraint will fail to validate. - */ -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_NONNULL1 OS_NONNULL2 -void -img4_set_nonce(img4_t *i4, const void *bytes, size_t len); -#else -#define img4_set_nonce(...) (img4if->i4if_set_nonce(__VA_ARGS__)) -#endif - -/*! - * @function img4_set_nonce_domain - * Sets the nonce domain to be consulted for the anti-replay token during - * manifest enforcement. - * - * @param i4 - * The Image4 to modify. - * - * @param nd - * The nonce domain to use for anti-replay. - * - * @discussion - * See discussion for {@link img4_set_nonce}. - */ -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20181106 -OS_EXPORT OS_NONNULL1 OS_NONNULL2 -void -img4_set_nonce_domain(img4_t *i4, const img4_nonce_domain_t *nd); -#else -#define img4_set_nonce_domain(...) \ - (img4if->i4if_v1.set_nonce_domain(__VA_ARGS__)) -#endif - -/*! - * @function img4_get_trusted_payload - * Obtains the trusted payload bytes from the Image4. - * - * @param i4 - * The Image4 to query. - * - * @param tag - * The tag for the payload to obtain. - * - * @param env - * The environment against which to validate the Image4. - * - * @param bytes - * A pointer to the storage where the pointer to the payload buffer will be - * written on success. - * - * @param len - * A pointer to the storage where the length of the payload buffer will be - * written on success. - * - * @result - * Upon success, zero is returned. The implementation may also return one of the - * following error codes directly: - * - * [ENOENT] The Image4 does not contain a payload for the specified tag - * [EAUTH] The Image4 manifest was not authentic - * [EACCES] The environment given does not satisfy the manifest - * constraints - * [ESTALE] The nonce specified is not valid - * [EACCES] The environment and manifest do not agree on a digest - * algorithm - * [EILSEQ] The payload for the given tag does not match its description - * in the manifest - * [EIO] The payload could not be fetched - * - * @discussion - * This routine will perform the following validation: - * - * 1. Validate that the Image4 manifest is authentic (i.e. was signed by - * Apple) - * 2. Validate that the given environment satisfies the constraints in the - * manifest - * 3. Validate that the measurement of the payload for the given tag matches - * the measurement in the manifest - * - * If any one of these validation checks fails, the payload is considered - * untrustworthy and is not returned. - */ -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 OS_NONNULL4 OS_NONNULL5 -errno_t -img4_get_trusted_payload(img4_t *i4, img4_tag_t tag, - const img4_environment_t *env, const uint8_t **bytes, size_t *len); -#else -#define img4_get_trusted_payload(...) \ - (img4if->i4if_get_trusted_payload(__VA_ARGS__)) -#endif - -/*! - * @function img4_get_trusted_external_payload - * Obtains the trusted payload bytes from the external Image4 payload after - * validating them against the object description in the Image4's manifest. - * - * @param i4 - * The Image4 to query. - * - * @param payload - * The payload to validate. - * - * @param env - * The environment against which to validate the Image4. - * - * @param bytes - * A pointer to the storage where the pointer to the payload buffer will be - * written on success. - * - * If the payload objects was initialized with - * {@link img4_payload_init_with_vnode_4xnu}, this parameter should be NULL, as - * there will be no in-memory buffer to return. - * - * @param len - * A pointer to the storage where the length of the payload buffer will be - * written on success. - * - * If the payload objects was initialized with - * {@link img4_payload_init_with_vnode_4xnu}, this parameter should be NULL, as - * there will be no in-memory buffer to return. - * - * @result - * Upon success, zero is returned. The implementation may also return one of the - * following error codes directly: - * - * [ENOENT] The Image4 does not contain an object describing the given - * payload - * [EAUTH] The Image4 manifest was not authentic - * [EACCES] The environment given does not satisfy the manifest - * constraints - * [ESTALE] The nonce specified is not valid - * [EACCES] The environment and manifest do not agree on a digest - * algorithm - * [EILSEQ] The payload for the given tag does not match its description - * in the manifest - * [EIO] The payload could not be fetched - * [EIO] The payload was initialized with - * {@link img4_payload_init_with_vnode_4xnu}, and reading from - * the vnode stalled repeatedly beyond the implementation's - * tolerance - * - * If the payload was initialized with - * {@link img4_payload_init_with_vnode_4xnu}, any error returned by - * {@link vnode_getattr} or {@link vn_rdwr} may be returned. - * - * If the payload was initialized with - * {@link img4_payload_init_with_fd_4MSM}, any error returned by stat(2), - * read(2), or malloc(3) may be returned. - * - * Otherwise, an error from the underlying Image4 implementation will be - * returned. - * - * @discussion - * This routine performs the same validation steps as - * {@link img4_get_trusted_payload} and has the same caveats. - */ -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL2 OS_NONNULL3 -errno_t -img4_get_trusted_external_payload(img4_t *i4, img4_payload_t *payload, - const img4_environment_t *env, const uint8_t **bytes, size_t *len); -#else -#define img4_get_trusted_external_payload(...) \ - (img4if->i4if_get_trusted_external_payload(__VA_ARGS__)) -#endif - -/*! - * @function img4_destroy - * Destroys an Image4 and disposes of associated resources. - * - * @param i4 - * The Image4 to destroy. - * - * @discussion - * The destructor passed to {@link img4_init} is called as a result of this - * routine, if any was set. - */ -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_NONNULL1 -void -img4_destroy(img4_t *i4); -#else -#define img4_destroy(...) (img4if->i4if_destroy(__VA_ARGS__)) -#endif - -__END_DECLS; - -#endif // __IMG4_H diff --git a/EXTERNAL_HEADERS/img4/nonce.h b/EXTERNAL_HEADERS/img4/nonce.h index 93c10c0c9..bc0987ed2 100644 --- a/EXTERNAL_HEADERS/img4/nonce.h +++ b/EXTERNAL_HEADERS/img4/nonce.h @@ -46,7 +46,7 @@ #define __IMG4_NONCE_H #ifndef __IMG4_INDIRECT -#error "Please #include instead of this file directly" +#error "Please #include instead of this file directly" #endif // __IMG4_INDIRECT #if IMG4_TAPI @@ -61,18 +61,19 @@ IMG4_API_AVAILABLE_20181106 typedef struct _img4_nonce_domain img4_nonce_domain_t; /*! - * @const IMG4_NONCE_VERSION + * @const IMG4_NONCE_STRUCT_VERSION * The version of the {@link img4_nonce_t} structure supported by the * implementation. */ -#define IMG4_NONCE_VERSION ((img4_struct_version_t)0) +#define IMG4_NONCE_STRUCT_VERSION ((img4_struct_version_t)0) +#define IMG4_NONCE_VERSION IMG4_NONCE_STRUCT_VERSION /*! * @const IMG4_NONCE_MAX_LENGTH * The maximum length of a nonce. Currently, this is the length of a SHA2-384 * hash. */ -#define IMG4_NONCE_MAX_LENGTH (48) +#define IMG4_NONCE_MAX_LENGTH (48u) /*! * @typedef img4_nonce_t @@ -101,14 +102,27 @@ typedef struct _img4_nonce { * {@link i4n_version} field is properly initialized. */ #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L -#define IMG4_NONCE_INIT (img4_nonce_t){.i4n_version = IMG4_NONCE_VERSION} +#define IMG4_NONCE_INIT (img4_nonce_t){.i4n_version = IMG4_NONCE_STRUCT_VERSION} #elif defined(__cplusplus) && __cplusplus >= 201103L -#define IMG4_NONCE_INIT (img4_nonce_t{IMG4_NONCE_VERSION}) +#define IMG4_NONCE_INIT (img4_nonce_t{IMG4_NONCE_STRUCT_VERSION}) #elif defined(__cplusplus) #define IMG4_NONCE_INIT \ - (img4_nonce_t((img4_nonce_t){IMG4_NONCE_VERSION})) + (img4_nonce_t((img4_nonce_t){IMG4_NONCE_STRUCT_VERSION})) #else -#define IMG4_NONCE_INIT {IMG4_NONCE_VERSION} +#define IMG4_NONCE_INIT {IMG4_NONCE_STRUCT_VERSION} +#endif + +/*! + * @const IMG4_NONCE_ZERO + * A convenience initializer for {@link img4_nonce_t} which initializes a 48- + * byte nonce of all zeroes. + */ +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +#define IMG4_NONCE_ZERO (img4_nonce_t){ \ + .i4n_version = IMG4_NONCE_STRUCT_VERSION, \ + .i4n_nonce = {0}, \ + .i4n_length = IMG4_NONCE_MAX_LENGTH, \ +} #endif /*! @@ -162,7 +176,7 @@ OS_EXPORT const struct _img4_nonce_domain _img4_nonce_domain_cryptex; #define IMG4_NONCE_DOMAIN_CRYPTEX (&_img4_nonce_domain_cryptex) #else -#define IMG4_NONCE_DOMAIN_CRYPTEX (img4if->i4if_v1.nonce_domain_cryptex) +#define IMG4_NONCE_DOMAIN_CRYPTEX (img4if->i4if_v3.nonce_domain_cryptex) #endif /*! @@ -193,7 +207,7 @@ errno_t img4_nonce_domain_copy_nonce(const img4_nonce_domain_t *nd, img4_nonce_t *n); #else #define img4_nonce_domain_copy_nonce(...) \ - (i4if->i4if_v1.nonce_domain_copy_nonce(__VA_ARGS__)) + (img4if->i4if_v1.nonce_domain_copy_nonce(__VA_ARGS__)) #endif /*! @@ -219,7 +233,7 @@ errno_t img4_nonce_domain_roll_nonce(const img4_nonce_domain_t *nd); #else #define img4_nonce_domain_roll_nonce(...) \ - (i4if->i4if_v1.nonce_domain_roll_nonce(__VA_ARGS__)) + (img4if->i4if_v1.nonce_domain_roll_nonce(__VA_ARGS__)) #endif #endif // __IMG4_NONCE_H diff --git a/EXTERNAL_HEADERS/img4/object.h b/EXTERNAL_HEADERS/img4/object.h new file mode 100644 index 000000000..d25351bb4 --- /dev/null +++ b/EXTERNAL_HEADERS/img4/object.h @@ -0,0 +1,68 @@ +/*! + * @header + * Image4 object specifications. + */ +#ifndef __IMG4_OBJECT_H +#define __IMG4_OBJECT_H + +#ifndef __IMG4_INDIRECT +#error "Please #include instead of this file directly" +#endif // __IMG4_INDIRECT + +#if IMG4_TAPI +#include "tapi.h" +#endif + +OS_ASSUME_NONNULL_BEGIN + +/*! + * @typedef img4_object_spec_t + * An opaque type which describes information about Image4 objects for use by + * the runtime. + */ +IMG4_API_AVAILABLE_20200508 +typedef struct _img4_object_spec img4_object_spec_t; + +/*! + * @const IMG4_FIRMWARE_SPEC + * The object specification for an {@link img4_firmware_t} object. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_object_spec_t _img4_firmware_spec; +#define IMG4_FIRMWARE_SPEC (&_img4_firmware_spec) +#else +#define IMG4_FIRMWARE_SPEC (img4if->i4if_v7.firmware_spec) +#endif + +/*! + * @const IMG4_FIRMWARE_SIZE_RECOMMENDED + * A constant describing the recommended stack allocation required for a + * {@link img4_firmware_t} object. + */ +#define IMG4_FIRMWARE_SIZE_RECOMMENDED (1280u) + +/*! + * @const IMG4_CHIP_SPEC + * The object specification for an {@link img4_chip_t} object. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_object_spec_t _img4_chip_spec; +#define IMG4_CHIP_SPEC (&_img4_chip_spec) +#else +#define IMG4_CHIP_SPEC (img4if->i4if_v7.chip_spec) +#endif + +/*! + * @const IMG4_CHIP_SIZE_RECOMMENDED + * A constant describing the recommended stack allocation required for a + * {@link img4_chip_t} object. + */ +#define IMG4_CHIP_SIZE_RECOMMENDED (256u) + +OS_ASSUME_NONNULL_END + +#endif // __IMG4_OBJECT_H diff --git a/EXTERNAL_HEADERS/img4/payload.h b/EXTERNAL_HEADERS/img4/payload.h deleted file mode 100644 index 8196742f0..000000000 --- a/EXTERNAL_HEADERS/img4/payload.h +++ /dev/null @@ -1,192 +0,0 @@ -/*! - * @header - * Image4 payload interfaces. These interfaces provide a lightweight type for - * working with an Image4 payload that is described by a separate manifest (e.g. - * a .im4p file whose contents are described by an object in a manifest from a - * .im4m file). - * - * No direct access is provided to the raw payload bytes encapsulated by the - * Image4 payload by design. The intent is that in order to access the raw - * bytes, the payload object must be validated against a manifest object using - * the {@link img4_get_trusted_external_payload} interface. - */ -#ifndef __IMG4_PAYLOAD_H -#define __IMG4_PAYLOAD_H - -#ifndef __IMG4_INDIRECT -#error "Please #include instead of this file directly" -#endif // __IMG4_INDIRECT - -#if IMG4_TAPI -#include "tapi.h" -#endif - -/*! - * @typedef img4_payload_flags_t - * Flags modifying the behavior of an Image4 payload object. - * - * @const I4PLF_INIT - * No flags set. This value is suitable for initialization purposes. - * - * @const I4PLF_UNWRAPPED - * Indicates that the payload bytes are not wrapped in an Image4 payload object - * (.im4p file). If this flag is given, the payload tag is ignored. - * - * This should be used in scenarios such as x86 SecureBoot, which use Image4 to - * describe portable executable files which must be fed directly to the firmware - * and cannot tolerate being wrapped in an intermediary format. - */ -OS_CLOSED_OPTIONS(img4_payload_flags, uint64_t, - I4PLF_INIT = 0, - I4PLF_UNWRAPPED = (1 << 0), -); - -/*! - * @function img4_payload_init - * Initializes an Image4 payload object. - * - * @param i4p - * A pointer to the payload object to initialize. - * - * @param tag - * The expected tag for the payload. - * - * @param flags - * Flags modifying the behavior of the payload object. - * - * @param bytes - * The buffer containing the Image4 payload. - * - * @param len - * The length of the buffer. - * - * @param destructor - * A pointer to a routine to dispose of the buffer. May be NULL if the buffer - * does not require explicit disposal (e.g. the buffer is stack memory). - * - * @result - * Upon success, zero is returned. Otherwise, one of the following error codes: - * - * [EILSEQ] The data is not valid Image4 data - * [EFTYPE] The data does not contain an Image4 payload - * [ENOENT] The bytes do not contain a payload for the specified tag - */ -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL4 -errno_t -img4_payload_init(img4_payload_t *i4p, img4_tag_t tag, - img4_payload_flags_t flags, const uint8_t *bytes, size_t len, - img4_destructor_t destructor); -#else -#define img4_payload_init(...) img4if->i4if_payload_init(__VA_ARGS__) -#endif - -/*! - * @function img4_payload_init_with_vnode_4xnu - * Initializes an Image4 payload object from a vnode. - * - * @param i4p - * A pointer to the payload object to initialize. - * - * @param tag - * The expected tag for the payload. - * - * @param vn - * The vnode from which to initialize the payload. - * - * @param flags - * Flags modifying the behavior of the payload object. - * - * @result - * Upon success, zero is returned. Otherwise, one of the following error codes: - * - * [ENOENT] The vnode is either dead or in the process of being - * recycled - * [EIO] Reading from the vnode stalled repeatedly beyond the - * implementation's tolerance - * - * Additionally, the implementation may return any error that vnode_ref() may - * return. - * - * @discussion - * Verification of a vnode is performed by reading in chunks of data, updating - * an ongoing hash operation with that data, and then discarding it. Therefore, - * payload objects created in this manner can only guarantee their validity at - * the time the check was performed since the vnode's contents are not kept in - * memory and may be tampered with after validation has been performed. - * - * Additionally, this operation requires the payload to be unwrapped, as it does - * not parse or recognize any Image4 payload wrapper. Payloads created with this - * interface are therefore implicitly created with the {@link I4PLF_UNWRAPPED} - * flag. - */ - -#if KERNEL -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 -errno_t -img4_payload_init_with_vnode_4xnu(img4_payload_t *i4p, img4_tag_t tag, - vnode_t vn, img4_payload_flags_t flags); -#else -#define img4_payload_init_with_vnode_4xnu(...) \ - (img4if->i4if_v2.payload_init_with_vnode_4xnu(__VA_ARGS__)) -#endif // !XNU_KERNEL_PRIVATE -#endif // KERNEL - -/*! - * @function img4_payload_init_with_fd_4MSM - * Initializes an Image4 payload object from a file descriptor. - * - * @param i4p - * A pointer to the payload object to initialize. - * - * @param tag - * The expected tag for the payload. - * - * @param fd - * The file descriptor from which to initialize the payload. - * - * @param flags - * Flags modifying the behavior of the payload object. - * - * @result - * Upon success, zero is returned. Otherwise, the implementation may return any - * errno that is set by the dup(2) system call. - * - * @discussion - * This interface is a userspace equivalent to - * {@link img4_payload_init_with_vnode_4xnu}, and all the same caveats apply. - */ - -#if !KERNEL -IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 -errno_t -img4_payload_init_with_fd_4MSM(img4_payload_t *i4p, img4_tag_t tag, - int fd, img4_payload_flags_t flags); -#endif // KERNEL - -/*! - * @function img4_payload_destroy - * Disposes of the resources associated with the payload object. - * - * @param i4p - * The payload object of which to dispose. - * - * @discussion - * This routine does not deallocate the storage for the payload object itself, - * only the associated resources. This routine will cause the destructor given - * in {@link img4_payload_init} to be called, if any. - */ -#if !XNU_KERNEL_PRIVATE -IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_NONNULL1 -void -img4_payload_destroy(img4_payload_t *i4p); -#else -#define img4_payload_destroy(...) img4if->i4if_payload_destroy(__VA_ARGS__) -#endif - -#endif // __IMG4_PAYLOAD_H diff --git a/EXTERNAL_HEADERS/img4/runtime.h b/EXTERNAL_HEADERS/img4/runtime.h new file mode 100644 index 000000000..a8934de99 --- /dev/null +++ b/EXTERNAL_HEADERS/img4/runtime.h @@ -0,0 +1,680 @@ +/*! + * @header + * Image4 runtime interfaces. + */ +#ifndef __IMG4_RUNTIME_H +#define __IMG4_RUNTIME_H + +#ifndef __IMG4_INDIRECT +#error "Please #include instead of this file directly" +#endif // __IMG4_INDIRECT + +#if IMG4_TAPI +#include "tapi.h" +#endif + +OS_ASSUME_NONNULL_BEGIN + +/*! + * @typedef img4_identifier_t + * An enumeration describing identifiers in the Image4 specification. + * + * @const IMG4_IDENTIFIER_CEPO + * The chip epoch as documented in 2.1.1. Authoritative manifests will specify a + * certificate epoch which is greater than or equal to that of the chip. + * + * Unsigned 32-bit integer. + * + * @const IMG4_IDENTIFIER_BORD + * The board identifier as documented in 2.1.3. Authoritative manifests will + * specify a board identifier which is equal to that of the chip. + * + * Unsigned 32-bit integer. + * + * @const IMG4_IDENTIFIER_CHIP + * The chip identifier as documented in 2.1.2. Authoritative manifests will + * specify a chip identifier which is equal to that of the chip. + * + * Unsigned 32-bit integer. + * + * @const IMG4_IDENTIFIER_SDOM + * The security domain as documented in 2.1.5. Authoritative manifests will + * specify a security domain which is equal to that that of the chip. + * + * Unsigned 32-bit integer. + * + * @const IMG4_IDENTIFIER_ECID + * The unique chip identifier as documented in 2.1.4. Authoritative manifests + * will specify a unique chip identifier which is equal to that of the chip. + * + * Unsigned 64-bit integer. + * + * @const IMG4_IDENTIFIER_CPRO + * The certificate production status as documented in 2.1.6. Authoritative + * manifests will specify a certificate production status which is equal to that + * of the chip. + * + * Boolean. + * + * @const IMG4_IDENTIFIER_CSEC + * The certificate security mode as documented in 2.1.7. Authoritative manifests + * will specify a certificate security mode which is equal to that of the chip. + * + * Boolean. + * + * @const IMG4_IDENTIFIER_EPRO + * The effective production status as documented in 2.1.23. Unless the chip + * environment supports demotion, this will always be the same as + * {@link IMG4_IDENTIFIER_CPRO}. An executable firmware in an authoritative + * manifest will specify an EPRO object property which is equal to that of the + * chip post-demotion. + * + * Boolean. + * + * @const IMG4_IDENTIFIER_ESEC + * The effective security mode as documented in 2.1.25. Unless the chip + * environment supports demotion, this will always be the same as + * {@link IMG4_IDENTIFIER_CSEC}. An executable firmware in an authoritative + * manifest will specify an ESEC object property which is equal to that of the + * chip post-demotion. + * + * Boolean. + * + * @const IMG4_IDENTIFIER_IUOU + * The "internal use only unit" property. Indicates whether the chip is present + * on a server-side authlist which permits installing builds which are otherwise + * restricted to parts whose CPRO is 0. This property is only published by macOS + * devices whose root of trust is in an arm coprocessor (e.g. T2). + * + * Authoritative manifests will specify an internal-use-only-build property + * which, if true, is equal to the internal-use-only-unit property of the chip. + * If the internal-use-only-build property is false, then there is no constraint + * on the chip's internal-use-only-unit property. + * + * Boolean. + * + * @const IMG4_IDENTIFIER_RSCH + * The research fusing status. Indicates whether the chip is intended for + * security research to be performed by external parties. Authoritative + * manifests will specify a research fusing state which is equal to that of the + * chip. + * + * Boolean. + * + * @const IMG4_IDENTIFIER_CHMH + * The chained manifest hash from the previous stage of secure boot as described + * in 2.2.11. An authoritative manifest will either + * + * - specify a manifest hash which is equal to that of the previous secure + * boot stage's manifest + * - itself have a manifest hash which is equal to that of the previous + * secure boot stage's manifest + * + * If the previous stage of secure boot enabled mix-n-match, there is no + * constraint on the previous stage's manifest hash. + * + * Manifests which specify this property cannot be used to create new trust + * chains -- they may only extend existing ones. + * + * Digest. + * + * @const IMG4_IDENTIFIER_AMNM + * The allow-mix-n-match status of the chip. If mix-n-match is enabled, secure + * boot will permit different manifests to be used at each stage of boot. If the + * chip environment allows mix-n-match, evaluation will not require an anti- + * replay token to be specified, and any chained manifest hash constraints are + * ignored. + * + * Boolean. + * + * @const IMG4_IDENTIFIER_EUOU + * The engineering-use-only-unit status of the chip. This is in effect an alias + * for the {@link IMG4_IDENTIFIER_IUOU} property. Either property being present + * in the environment will satisfy a manifest's iuob constraint. + * + * Boolean. + * + * @const _IMG4_IDENTIFIER_CNT + * A convenience value representing the number of known identifiers. + */ +IMG4_API_AVAILABLE_20200508 +OS_CLOSED_ENUM(img4_identifier, uint64_t, + IMG4_IDENTIFIER_CEPO, + IMG4_IDENTIFIER_BORD, + IMG4_IDENTIFIER_CHIP, + IMG4_IDENTIFIER_SDOM, + IMG4_IDENTIFIER_ECID, + IMG4_IDENTIFIER_CPRO, + IMG4_IDENTIFIER_CSEC, + IMG4_IDENTIFIER_EPRO, + IMG4_IDENTIFIER_ESEC, + IMG4_IDENTIFIER_IUOU, + IMG4_IDENTIFIER_RSCH, + IMG4_IDENTIFIER_CHMH, + IMG4_IDENTIFIER_AMNM, + IMG4_IDENTIFIER_EUOU, + _IMG4_IDENTIFIER_CNT, +); + +/*! + * @const IMG4_DGST_STRUCT_VERSION + * The version of the {@link img4_dgst_t} structure supported by the + * implementation. + */ +#define IMG4_DGST_STRUCT_VERSION (0u) + +/*! + * @const IMG4_DGST_MAX_LEN + * The maximum length of a digest representable by an {@link img4_dgst_t}. + */ +#define IMG4_DGST_MAX_LEN (48u) + +/*! + * @typedef img4_dgst_t + * A structure representing an Image4 digest. + * + * @field i4d_len + * The version of the structure. Initialize to {@link IMG4_DGST_STRUCT_VERSION}. + * + * @field i4d_len + * The length of the digest. + * + * @field i4d_bytes + * The digest bytes. + */ +IMG4_API_AVAILABLE_20200508 +typedef struct _img4_dgst { + img4_struct_version_t i4d_version; + size_t i4d_len; + uint8_t i4d_bytes[IMG4_DGST_MAX_LEN]; +} img4_dgst_t; + +/*! + * @const IMG4_DGST_INIT + * A convenience initializer for an {@link img4_dgst_t} structure. + */ +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +#define IMG4_DGST_INIT (img4_dgst_t){ \ + .i4d_version = IMG4_DGST_STRUCT_VERSION, \ + .i4d_len = 0, \ + .i4d_bytes = {0}, \ +} +#elif defined(__cplusplus) && __cplusplus >= 201103L +#define IMG4_DGST_INIT (img4_nonce_t{ \ + IMG4_DGST_STRUCT_VERSION, \ + 0, \ + {0}, \ +}) +#elif defined(__cplusplus) +#define IMG4_DGST_INIT (img4_nonce_t((img4_nonce_t){ \ + IMG4_DGST_STRUCT_VERSION, \ + 0, \ + {0}, \ +})) +#else +#define IMG4_DGST_INIT {IMG4_DGST_STRUCT_VERSION} +#endif + +/*! + * @typedef img4_runtime_init_t + * A function which initializes the runtime. + * + * @param rt + * The runtime for which the function is being invoked. + * + * @discussion + * This function is called by the implementation prior to any other runtime + * function being called. The implementation will ensure that it is called only + * once. Any runtime with an initialization function must be registered with the + * {@link IMG4_RUNTIME_REGISTER} macro. + */ +IMG4_API_AVAILABLE_20200508 +typedef void (*img4_runtime_init_t)( + const img4_runtime_t *rt +); + +/*! + * @typedef img4_runtime_alloc_t + * An allocation function. + * + * @param rt + * The runtime for which the function is being invoked. + * + * @param n + * The number of bytes to allocate. + * + * @result + * A pointer to the new allocation, or NULL if there was an allocation failure. + */ +IMG4_API_AVAILABLE_20200508 +typedef void *_Nullable (*img4_runtime_alloc_t)( + const img4_runtime_t *rt, + size_t n +); + +/*! + * @typedef img4_runtime_dealloc_t + * A deallocation function. + * + * @param rt + * The runtime for which the function is being invoked. + * + * @param p + * A pointer to the allocation to free. The callee is expected to return + * immediately if NULL is passed. + * + * @param n + * The size of the allocation. Not all implementation may require this + * information to be specified. + */ +IMG4_API_AVAILABLE_20200508 +typedef void (*img4_runtime_dealloc_t)( + const img4_runtime_t *rt, + void *_Nullable p, + size_t n +); + +/*! + * @typedef img4_log_level_t + * An enumeration describing the importance/severity of a log message. + * + * @const IMG4_LOG_LEVEL_ERROR + * A fatal condition which will cause the implementation to abort its current + * operation. + * + * @const IMG4_LOG_LEVEL_INFO + * Information that may be of interest to the system operator. + * + * @const IMG4_LOG_LEVEL_DEBUG + * Information that may be of interest to the maintainer. + * + * @const _IMG4_LOG_LEVEL_CNT + * A convenience constant indicating the number of log levels. + */ +IMG4_API_AVAILABLE_20200508 +OS_CLOSED_ENUM(img4_log_level, uint64_t, + IMG4_LOG_LEVEL_ERROR, + IMG4_LOG_LEVEL_INFO, + IMG4_LOG_LEVEL_DEBUG, + _IMG4_LOG_LEVEL_CNT, +); + +/*! + * @typedef img4_runtime_log_t + * A function which writes log messages. + * + * @param rt + * The runtime for which the function is being invoked. + * + * @param handle + * An implementation-specific handle for the log message. + * + * @param level + * The message of the log level. The implementation is free to determine whether + * a given message is worthy of record. + * + * @param fmt + * A printf(3)-style format string. + * + * @param ... + * Arguments to be interpreted by the format string according to the + * specifications in printf(3). + */ +OS_FORMAT_PRINTF(4, 5) +IMG4_API_AVAILABLE_20200508 +typedef void (*img4_runtime_log_t)( + const img4_runtime_t *rt, + void *_Nullable handle, + img4_log_level_t level, + const char *fmt, + ... +); + +/*! + * @typedef img4_runtime_log_handle_t + * A function which returns a log handle. + * + * @param rt + * The runtime for which the function is being invoked. + * + * @result + * A runtime-specific log handle that will be passed to the logging function. + */ +IMG4_API_AVAILABLE_20200508 +typedef void *_Nullable (*img4_runtime_log_handle_t)( + const img4_runtime_t *rt +); + +/*! + * @typedef img4_runtime_get_identifier_bool_t + * A function which retrieves a Boolean Image4 identifier. + * + * @param rt + * The runtime for which the function is being invoked. + * + * @param chip + * The chip for which to retrieve the identifier. + * + * @param identifier + * The identifier to retrieve. + * + * @param value + * Upon successful return, storage which is populated with the retrieved value. + * + * @result + * Upon success, the callee is expected to return zero. Otherwise, the callee + * may return one of the following error codes: + * + * [ENOTSUP] The identifier cannot be queried in the runtime + * [ENOENT] The identifier was not found in the runtime's identity + * oracle + * [ENODEV] There was an error querying the runtime's identity oracle + */ +IMG4_API_AVAILABLE_20200508 +typedef errno_t (*img4_runtime_get_identifier_bool_t)( + const img4_runtime_t *rt, + const img4_chip_t *chip, + img4_identifier_t identifier, + bool *value +); + +/*! + * @typedef img4_runtime_get_identifier_uint32_t + * A function which retrieves an unsigned 32-bit integer Image4 identifier. + * + * @param rt + * The runtime for which the function is being invoked. + * + * @param chip + * The chip for which to retrieve the identifier. + * + * @param identifier + * The identifier to retrieve. + * + * @param value + * Upon successful return, storage which is populated with the retrieved value. + * + * @result + * Upon success, the callee is expected to return zero. Otherwise, the callee + * may return one of the following error codes: + * + * [ENOTSUP] The identifier cannot be queried in the runtime + * [ENOENT] The identifier was not found in the runtime's identity + * oracle + * [ENODEV] There was an error querying the runtime's identity oracle + */ +IMG4_API_AVAILABLE_20200508 +typedef errno_t (*img4_runtime_get_identifier_uint32_t)( + const img4_runtime_t *rt, + const img4_chip_t *chip, + img4_identifier_t identifier, + uint32_t *value +); + +/*! + * @typedef img4_runtime_get_identifier_uint64_t + * A function which retrieves an unsigned 64-bit integer Image4 identifier. + * + * @param rt + * The runtime for which the function is being invoked. + * + * @param chip + * The chip for which to retrieve the identifier. + * + * @param identifier + * The identifier to retrieve. + * + * @param value + * Upon successful return, storage which is populated with the retrieved value. + * + * @result + * Upon success, the callee is expected to return zero. Otherwise, the callee + * may return one of the following error codes: + * + * [ENOTSUP] The identifier cannot be queried in the runtime + * [ENOENT] The identifier was not found in the runtime's identity + * oracle + * [ENODEV] There was an error querying the runtime's identity oracle + */ +IMG4_API_AVAILABLE_20200508 +typedef errno_t (*img4_runtime_get_identifier_uint64_t)( + const img4_runtime_t *rt, + const img4_chip_t *chip, + img4_identifier_t identifier, + uint64_t *value +); + +/*! + * @typedef img4_runtime_get_identifier_digest_t + * A function which retrieves a digest Image4 identifier. + * + * @param rt + * The runtime for which the function is being invoked. + * + * @param chip + * The chip for which to retrieve the identifier. + * + * @param identifier + * The identifier to retrieve. + * + * @param value + * Upon successful return, storage which is populated with the retrieved value. + * + * @result + * Upon success, the callee is expected to return zero. Otherwise, the callee + * may return one of the following error codes: + * + * [ENOTSUP] The identifier cannot be queried in the runtime + * [ENOENT] The identifier was not found in the runtime's identity + * oracle + * [ENODEV] There was an error querying the runtime's identity oracle + */ +IMG4_API_AVAILABLE_20200508 +typedef errno_t (*img4_runtime_get_identifier_digest_t)( + const img4_runtime_t *rt, + const img4_chip_t *chip, + img4_identifier_t identifier, + img4_dgst_t *value +); + +/*! + * @define IMG4_BUFF_STRUCT_VERSION + * The version of the {@link img4_buff_t} structure supported by the + * implementation. + */ +#define IMG4_BUFF_STRUCT_VERSION (0u) + +/*! + * @struct _img4_buff + * A structure describing a buffer. + * + * @field i4b_version + * The version of the structure. Initialize to {@link IMG4_BUFF_STRUCT_VERSION}. + * + * @field i4b_bytes + * A pointer to the buffer. + * + * @field i4b_len + * The length of the buffer. + * + * @field i4b_dealloc + * The deallocation function for the buffer. May be NULL if the underlying + * memory does not require cleanup. When the implementation invokes this + * function, it will always pass {@link IMG4_RUNTIME_DEFAULT}, and the callee + * should not consult this parameter for any reason. + */ +struct _img4_buff { + img4_struct_version_t i4b_version; + uint8_t *i4b_bytes; + size_t i4b_len; + img4_runtime_dealloc_t _Nullable i4b_dealloc; +} IMG4_API_AVAILABLE_20200508; + +/*! + * @const IMG4_BUFF_INIT + * A convenience initializer for the {@link img4_buff_t} structure. + */ +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +#define IMG4_BUFF_INIT (img4_buff_t){ \ + .i4b_version = IMG4_BUFF_STRUCT_VERSION, \ + .i4b_len = 0, \ + .i4b_bytes = NULL, \ + .i4b_dealloc = NULL, \ +} +#elif defined(__cplusplus) && __cplusplus >= 201103L +#define IMG4_BUFF_INIT (img4_buff_t{ \ + IMG4_BUFF_STRUCT_VERSION, \ + NULL, \ + 0, \ + NULL, \ +}) +#elif defined(__cplusplus) +#define IMG4_BUFF_INIT (img4_buff_t((img4_buff_t){ \ + IMG4_BUFF_STRUCT_VERSION, \ + NULL, \ + 0, \ + NULL, \ +})) +#else +#define IMG4_BUFF_INIT {IMG4_BUFF_STRUCT_VERSION} +#endif + +/*! + * @define IMG4_RUNTIME_STRUCT_VERSION + * The version of the {@link img4_runtime_t} structure supported by the + * implementation. + */ +#define IMG4_RUNTIME_STRUCT_VERSION (1u) + +/*! + * @struct _img4_runtime + * A structure describing required primitives in the operating environment's + * runtime. + * + * @field i4rt_version + * The version of the structure supported by the implementation. In a custom + * execution context, initialize to {@link IMG4_RUNTIME_STRUCT_VERSION}. + * + * @field i4rt_name + * A string describing the environment. + * + * @field i4rt_init + * The runtime initialization function. See discussion in + * {@link img4_runtime_init_t}. + * + * @field i4rt_alloc + * The allocation function for the environment (e.g. in Darwin userspace, this + * would be a pointer to malloc(3)). + * + * @field i4rt_dealloc + * The deallocation function for the environment (e.g. in Darwin userspace, this + * would be a pointer to free(3)). + * + * @field i4rt_log + * The function which logs messages from the implementation. + * + * @field i4rt_log_handle + * The function which returns the handle to be passed to the logging function. + * + * @field i4rt_get_identifier_bool + * The function which returns Boolean identifiers. + * + * @field i4rt_get_identifier_uint32 + * The function which returns unsigned 32-bit integer identifiers. + * + * @field i4rt_get_identifier_uint64 + * The function which returns unsigned 64-bit integer identifiers. + * + * @field i4rt_get_identifier_digest + * The function which returns digest identifiers. + * + * @field i4rt_context + * A user-defined context pointer. + */ +struct _img4_runtime { + img4_struct_version_t i4rt_version; + const char *i4rt_name; + img4_runtime_init_t _Nullable i4rt_init; + img4_runtime_alloc_t i4rt_alloc; + img4_runtime_dealloc_t i4rt_dealloc; + img4_runtime_log_t i4rt_log; + img4_runtime_log_handle_t i4rt_log_handle; + img4_runtime_get_identifier_bool_t i4rt_get_identifier_bool; + img4_runtime_get_identifier_uint32_t i4rt_get_identifier_uint32; + img4_runtime_get_identifier_uint64_t i4rt_get_identifier_uint64; + img4_runtime_get_identifier_digest_t i4rt_get_identifier_digest; + void *_Nullable i4rt_context; +} IMG4_API_AVAILABLE_20200508; + +/*! + * @function IMG4_RUNTIME_REGISTER + * Registers a runtime with the module implementation such that its + * initialization function can be called. In environments which support dynamic + * library linkage, only runtimes registered from the main executable image can + * be discovered by the implementation. + * + * @param _rt + * The img4_runtime_t structure to register. + */ +#define IMG4_RUNTIME_REGISTER(_rt) LINKER_SET_ENTRY(__img4_rt, _rt); + +/*! + * @const IMG4_RUNTIME_DEFAULT + * The default runtime for the current operating environment. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_runtime_t _img4_runtime_default; +#define IMG4_RUNTIME_DEFAULT (&_img4_runtime_default) +#else +#define IMG4_RUNTIME_DEFAULT (img4if->i4if_v7.runtime_default) +#endif + +/*! + * @const IMG4_RUNTIME_PMAP_CS + * The runtime for the xnu pmap monitor. This runtime is not available outside + * the kernel-proper. On architectures which do not have an xnu monitor, this + * is merely an alias for the default kernel runtime. + */ +#if XNU_KERNEL_PRIVATE +#define IMG4_RUNTIME_PMAP_CS (img4if->i4if_v7.runtime_pmap_cs) +#endif + +/*! + * @const IMG4_RUNTIME_RESTORE + * The runtime for the restore ramdisk. This runtime is not available outside + * of the Darwin userspace library. + */ +#if !KERNEL +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +const img4_runtime_t _img4_runtime_restore; +#define IMG4_RUNTIME_RESTORE (&_img4_runtime_restore) +#endif + +/*! + * @function img4_buff_dealloc + * Deallocates a buffer according to its deallocation function. + * + * @param buff + * A pointer to the a pointer to the buffer. This parameter may be NULL, in + * which case the implementation will return immediately. + * + * @discussion + * This interface will always invoke the deallocation callback with + * {@link IMG4_RUNTIME_DEFAULT}. The callee should not consult this parameter + * for any reason. + */ +#if !XNU_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20200508 +OS_EXPORT +void +img4_buff_dealloc(img4_buff_t *_Nullable buff); +#else +#define img4_buff_dealloc(...) (img4if->i4if_v7.buff_dealloc(__VA_ARGS__)) +#endif + +OS_ASSUME_NONNULL_END + +#endif // __IMG4_RUNTIME_H diff --git a/EXTERNAL_HEADERS/mach-o/Makefile b/EXTERNAL_HEADERS/mach-o/Makefile index 98d2a59ba..e816a0e89 100644 --- a/EXTERNAL_HEADERS/mach-o/Makefile +++ b/EXTERNAL_HEADERS/mach-o/Makefile @@ -8,6 +8,7 @@ include $(MakeInc_def) EXPORT_FILES = \ fat.h \ + fixup-chains.h \ loader.h \ nlist.h \ reloc.h diff --git a/EXTERNAL_HEADERS/mach-o/fixup-chains.h b/EXTERNAL_HEADERS/mach-o/fixup-chains.h new file mode 100644 index 000000000..bba0dd994 --- /dev/null +++ b/EXTERNAL_HEADERS/mach-o/fixup-chains.h @@ -0,0 +1,257 @@ +/* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*- + * + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef __MACH_O_FIXUP_CHAINS__ +#define __MACH_O_FIXUP_CHAINS__ 5 + + +#include + + +//#define LC_DYLD_EXPORTS_TRIE 0x80000033 // used with linkedit_data_command +//#define LC_DYLD_CHAINED_FIXUPS 0x80000034 // used with linkedit_data_command, payload is dyld_chained_fixups_header + + +// header of the LC_DYLD_CHAINED_FIXUPS payload +struct dyld_chained_fixups_header +{ + uint32_t fixups_version; // 0 + uint32_t starts_offset; // offset of dyld_chained_starts_in_image in chain_data + uint32_t imports_offset; // offset of imports table in chain_data + uint32_t symbols_offset; // offset of symbol strings in chain_data + uint32_t imports_count; // number of imported symbol names + uint32_t imports_format; // DYLD_CHAINED_IMPORT* + uint32_t symbols_format; // 0 => uncompressed, 1 => zlib compressed +}; + +// This struct is embedded in LC_DYLD_CHAINED_FIXUPS payload +struct dyld_chained_starts_in_image +{ + uint32_t seg_count; + uint32_t seg_info_offset[1]; // each entry is offset into this struct for that segment + // followed by pool of dyld_chain_starts_in_segment data +}; + +// This struct is embedded in dyld_chain_starts_in_image +// and passed down to the kernel for page-in linking +struct dyld_chained_starts_in_segment +{ + uint32_t size; // size of this (amount kernel needs to copy) + uint16_t page_size; // 0x1000 or 0x4000 + uint16_t pointer_format; // DYLD_CHAINED_PTR_* + uint64_t segment_offset; // offset in memory to start of segment + uint32_t max_valid_pointer; // for 32-bit OS, any value beyond this is not a pointer + uint16_t page_count; // how many pages are in array + uint16_t page_start[1]; // each entry is offset in each page of first element in chain + // or DYLD_CHAINED_PTR_START_NONE if no fixups on page + // uint16_t chain_starts[1]; // some 32-bit formats may require multiple starts per page. + // for those, if high bit is set in page_starts[], then it + // is index into chain_starts[] which is a list of starts + // the last of which has the high bit set +}; + +enum { + DYLD_CHAINED_PTR_START_NONE = 0xFFFF, // used in page_start[] to denote a page with no fixups + DYLD_CHAINED_PTR_START_MULTI = 0x8000, // used in page_start[] to denote a page which has multiple starts + DYLD_CHAINED_PTR_START_LAST = 0x8000, // used in chain_starts[] to denote last start in list for page +}; + +// This struct is embedded in __TEXT,__chain_starts section in firmware +struct dyld_chained_starts_offsets +{ + uint32_t pointer_format; // DYLD_CHAINED_PTR_32_FIRMWARE + uint32_t starts_count; // number of starts in array + uint32_t chain_starts[1]; // array chain start offsets +}; + + +// values for dyld_chained_starts_in_segment.pointer_format +enum { + DYLD_CHAINED_PTR_ARM64E = 1, // stride 8, unauth target is vmaddr + DYLD_CHAINED_PTR_64 = 2, // target is vmaddr + DYLD_CHAINED_PTR_32 = 3, + DYLD_CHAINED_PTR_32_CACHE = 4, + DYLD_CHAINED_PTR_32_FIRMWARE = 5, + DYLD_CHAINED_PTR_64_OFFSET = 6, // target is vm offset + DYLD_CHAINED_PTR_ARM64E_OFFSET = 7, // old name + DYLD_CHAINED_PTR_ARM64E_KERNEL = 7, // stride 4, unauth target is vm offset + DYLD_CHAINED_PTR_64_KERNEL_CACHE = 8, + DYLD_CHAINED_PTR_ARM64E_USERLAND = 9, // stride 8, unauth target is vm offset + DYLD_CHAINED_PTR_ARM64E_FIRMWARE = 10, // stride 4, unauth target is vmaddr + DYLD_CHAINED_PTR_X86_64_KERNEL_CACHE = 11, // stride 1, x86_64 kernel caches +}; + + +// DYLD_CHAINED_PTR_ARM64E +struct dyld_chained_ptr_arm64e_rebase +{ + uint64_t target : 43, + high8 : 8, + next : 11, // 4 or 8-byte stide + bind : 1, // == 0 + auth : 1; // == 0 +}; + +// DYLD_CHAINED_PTR_ARM64E +struct dyld_chained_ptr_arm64e_bind +{ + uint64_t ordinal : 16, + zero : 16, + addend : 19, // +/-256K + next : 11, // 4 or 8-byte stide + bind : 1, // == 1 + auth : 1; // == 0 +}; + +// DYLD_CHAINED_PTR_ARM64E +struct dyld_chained_ptr_arm64e_auth_rebase +{ + uint64_t target : 32, // runtimeOffset + diversity : 16, + addrDiv : 1, + key : 2, + next : 11, // 4 or 8-byte stide + bind : 1, // == 0 + auth : 1; // == 1 +}; + +// DYLD_CHAINED_PTR_ARM64E +struct dyld_chained_ptr_arm64e_auth_bind +{ + uint64_t ordinal : 16, + zero : 16, + diversity : 16, + addrDiv : 1, + key : 2, + next : 11, // 4 or 8-byte stide + bind : 1, // == 1 + auth : 1; // == 1 +}; + +// DYLD_CHAINED_PTR_64/DYLD_CHAINED_PTR_64_OFFSET +struct dyld_chained_ptr_64_rebase +{ + uint64_t target : 36, // 64GB max image size (DYLD_CHAINED_PTR_64 => vmAddr, DYLD_CHAINED_PTR_64_OFFSET => runtimeOffset) + high8 : 8, // top 8 bits set to this (DYLD_CHAINED_PTR_64 => after slide added, DYLD_CHAINED_PTR_64_OFFSET => before slide added) + reserved : 7, // all zeros + next : 12, // 4-byte stride + bind : 1; // == 0 +}; + +// DYLD_CHAINED_PTR_64 +struct dyld_chained_ptr_64_bind +{ + uint64_t ordinal : 24, + addend : 8, // 0 thru 255 + reserved : 19, // all zeros + next : 12, // 4-byte stride + bind : 1; // == 1 +}; + +// DYLD_CHAINED_PTR_64_KERNEL_CACHE, DYLD_CHAINED_PTR_X86_64_KERNEL_CACHE +struct dyld_chained_ptr_64_kernel_cache_rebase +{ + uint64_t target : 30, // basePointers[cacheLevel] + target + cacheLevel : 2, // what level of cache to bind to (indexes a mach_header array) + diversity : 16, + addrDiv : 1, + key : 2, + next : 12, // 1 or 4-byte stide + isAuth : 1; // 0 -> not authenticated. 1 -> authenticated +}; + +// DYLD_CHAINED_PTR_32 +// Note: for DYLD_CHAINED_PTR_32 some non-pointer values are co-opted into the chain +// as out of range rebases. If an entry in the chain is > max_valid_pointer, then it +// is not a pointer. To restore the value, subtract off the bias, which is +// (64MB+max_valid_pointer)/2. +struct dyld_chained_ptr_32_rebase +{ + uint32_t target : 26, // vmaddr, 64MB max image size + next : 5, // 4-byte stride + bind : 1; // == 0 +}; + +// DYLD_CHAINED_PTR_32 +struct dyld_chained_ptr_32_bind +{ + uint32_t ordinal : 20, + addend : 6, // 0 thru 63 + next : 5, // 4-byte stride + bind : 1; // == 1 +}; + +// DYLD_CHAINED_PTR_32_CACHE +struct dyld_chained_ptr_32_cache_rebase +{ + uint32_t target : 30, // 1GB max dyld cache TEXT and DATA + next : 2; // 4-byte stride +}; + + +// DYLD_CHAINED_PTR_32_FIRMWARE +struct dyld_chained_ptr_32_firmware_rebase +{ + uint32_t target : 26, // 64MB max firmware TEXT and DATA + next : 6; // 4-byte stride +}; + + + +// values for dyld_chained_fixups_header.imports_format +enum { + DYLD_CHAINED_IMPORT = 1, + DYLD_CHAINED_IMPORT_ADDEND = 2, + DYLD_CHAINED_IMPORT_ADDEND64 = 3, +}; + +// DYLD_CHAINED_IMPORT +struct dyld_chained_import +{ + uint32_t lib_ordinal : 8, + weak_import : 1, + name_offset : 23; +}; + +// DYLD_CHAINED_IMPORT_ADDEND +struct dyld_chained_import_addend +{ + uint32_t lib_ordinal : 8, + weak_import : 1, + name_offset : 23; + int32_t addend; +}; + +// DYLD_CHAINED_IMPORT_ADDEND64 +struct dyld_chained_import_addend64 +{ + uint64_t lib_ordinal : 16, + weak_import : 1, + reserved : 15, + name_offset : 32; + uint64_t addend; +}; + +#endif // __MACH_O_FIXUP_CHAINS__ + diff --git a/EXTERNAL_HEADERS/mach-o/loader.h b/EXTERNAL_HEADERS/mach-o/loader.h index 64f6bac73..e17106362 100644 --- a/EXTERNAL_HEADERS/mach-o/loader.h +++ b/EXTERNAL_HEADERS/mach-o/loader.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2010 Apple Inc. All Rights Reserved. + * Copyright (c) 1999-2019 Apple Inc. All Rights Reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -120,6 +120,7 @@ struct mach_header_64 { #define MH_DSYM 0xa /* companion file with only debug */ /* sections */ #define MH_KEXT_BUNDLE 0xb /* x86_64 kexts */ +#define MH_FILESET 0xc /* set of mach-o's */ /* Constants for the flags field of the mach_header */ #define MH_NOUNDEFS 0x1 /* the object file has no undefined @@ -322,6 +323,7 @@ struct load_command { #define LC_BUILD_VERSION 0x32 /* build for platform min OS version */ #define LC_DYLD_EXPORTS_TRIE (0x33 | LC_REQ_DYLD) /* used with linkedit_data_command, payload is trie */ #define LC_DYLD_CHAINED_FIXUPS (0x34 | LC_REQ_DYLD) /* used with linkedit_data_command */ +#define LC_FILESET_ENTRY (0x35 | LC_REQ_DYLD) /* used with fileset_entry_command */ /* * A variable length string in a load command is represented by an lc_str @@ -622,6 +624,9 @@ struct section_64 { /* for 64-bit architectures */ /* option to ld(1) for MH_EXECUTE and */ /* FVMLIB file types only */ +#define SEG_LINKINFO "__LINKINFO" /* the segment overlapping with linkedit */ + /* containing linking information */ + #define SEG_UNIXSTACK "__UNIXSTACK" /* the unix stack segment */ #define SEG_IMPORT "__IMPORT" /* the segment for the self (dyld) */ @@ -1196,6 +1201,15 @@ struct linkedit_data_command { uint32_t datasize; /* file size of data in __LINKEDIT segment */ }; +struct fileset_entry_command { + uint32_t cmd; /* LC_FILESET_ENTRY */ + uint32_t cmdsize; /* includes id string */ + uint64_t vmaddr; /* memory address of the dylib */ + uint64_t fileoff; /* file offset of the dylib */ + union lc_str entry_id; /* contained entry id */ + uint32_t reserved; /* entry_id is 32-bits long, so this is the reserved padding */ +}; + /* * The encryption_info_command contains the file offset and size of an * of an encrypted segment. @@ -1264,7 +1278,7 @@ struct build_tool_version { #define PLATFORM_TVOS 3 #define PLATFORM_WATCHOS 4 #define PLATFORM_BRIDGEOS 5 -#define PLATFORM_IOSMAC 6 +#define PLATFORM_MACCATALYST 6 #define PLATFORM_IOSSIMULATOR 7 #define PLATFORM_TVOSSIMULATOR 8 #define PLATFORM_WATCHOSSIMULATOR 9 diff --git a/EXTERNAL_HEADERS/ptrauth.h b/EXTERNAL_HEADERS/ptrauth.h index e7d7dfd9b..a8d0564f0 100644 --- a/EXTERNAL_HEADERS/ptrauth.h +++ b/EXTERNAL_HEADERS/ptrauth.h @@ -156,13 +156,24 @@ typedef uintptr_t ptrauth_generic_signature_t; The argument must be a type. */ -#if __has_feature(ptrauth_type_discriminator) +#if __has_builtin(__builtin_ptrauth_type_discriminator) #define ptrauth_type_discriminator(__type) \ __builtin_ptrauth_type_discriminator(__type) #else #define ptrauth_type_discriminator(__type) ((uintptr_t)0) #endif +/* Compute the constant discriminator used by Clang to sign pointers with the + given C function pointer type. + + A call to this function is an integer constant expression*/ +#if __has_feature(ptrauth_function_pointer_type_discrimination) +#define ptrauth_function_pointer_type_discriminator(__type) \ + __builtin_ptrauth_type_discriminator(__type) +#else +#define ptrauth_function_pointer_type_discriminator(__type) ((uintptr_t)0) +#endif + /* Add a signature to the given pointer value using a specific key, using the given extra data as a salt to the signing process. @@ -232,6 +243,22 @@ typedef uintptr_t ptrauth_generic_signature_t; #define ptrauth_auth_function(__value, __old_key, __old_data) \ ptrauth_auth_and_resign(__value, __old_key, __old_data, ptrauth_key_function_pointer, 0) +/* Cast a pointer to the given type without changing any signature. + + The type must have the same size as a pointer type. + The type of value must have the same size as a pointer type, and will be + converted to an rvalue prior to the cast. + The result has type given by the first argument. + + The result has an identical bit-pattern to the input pointer. */ +#define ptrauth_nop_cast(__type, __value) \ + ({ union { \ + typeof(__value) __fptr; \ + typeof(__type) __opaque; \ + } __storage; \ + __storage.__fptr = (__value); \ + __storage.__opaque; }) + /* Authenticate a data pointer. The value must be an expression of non-function pointer type. @@ -317,16 +344,23 @@ typedef uintptr_t ptrauth_generic_signature_t; #else -#define ptrauth_strip(__value, __key) __value -#define ptrauth_blend_discriminator(__pointer, __integer) ((uintptr_t)0) +#define ptrauth_strip(__value, __key) ({ (void)__key; __value; }) +#define ptrauth_blend_discriminator(__pointer, __integer) ({ (void)__pointer; (void)__integer; (uintptr_t)0; }) #define ptrauth_type_discriminator(__type) ((uintptr_t)0) -#define ptrauth_sign_constant(__value, __key, __data) __value -#define ptrauth_sign_unauthenticated(__value, __key, __data) __value -#define ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, __new_data) __value -#define ptrauth_auth_function(__value, __old_key, __old_data) __value -#define ptrauth_auth_data(__value, __old_key, __old_data) __value -#define ptrauth_string_discriminator(__string) ((int)0) -#define ptrauth_sign_generic_data(__value, __data) ((ptrauth_generic_signature_t)0) +#define ptrauth_function_pointer_type_discriminator(__type) ((uintptr_t)0) +#define ptrauth_sign_constant(__value, __key, __data) ({ (void)__key; (void)__data; __value; }) +#define ptrauth_sign_unauthenticated(__value, __key, __data) ({ (void)__key; (void)__data; __value; }) +#define ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, __new_data) ({ \ + (void)__old_key; \ + (void)__old_data; \ + (void)__new_key; \ + (void)__new_data; \ + __value; }) +#define ptrauth_auth_function(__value, __old_key, __old_data) ({ (void)__old_key; (void)__old_data; __value; }) +#define ptrauth_nop_cast(__type, __value) ((__type)__value) +#define ptrauth_auth_data(__value, __old_key, __old_data) ({ (void)__old_key; (void)__old_data; __value; }) +#define ptrauth_string_discriminator(__string) ({ (void)__string; (int)0; }) +#define ptrauth_sign_generic_data(__value, __data) ({ (void)__value; (void)__data; (ptrauth_generic_signature_t)0; }) #define __ptrauth_function_pointer #define __ptrauth_return_address diff --git a/Makefile b/Makefile index 64822cdf0..30e496301 100644 --- a/Makefile +++ b/Makefile @@ -36,12 +36,16 @@ export MakeInc_dir=${VERSDIR}/makedefs/MakeInc.dir # systems. All xnu variants start with MakeInc_top. # -ifeq ($(findstring Libsyscall,$(RC_ProjectName)),Libsyscall) +ifneq ($(findstring Libsyscall,$(RC_ProjectName)),) include $(MakeInc_cmd) -ifneq ($(findstring Libsyscall_,$(RC_ProjectName)),) -TARGET=-target $(RC_ProjectName) +ifeq ($(RC_ProjectName),Libsyscall_headers_Sim) +TARGET=-target Libsyscall_headers_Sim +endif + +ifeq ($(RC_ProjectName),Libsyscall_driverkit) +TARGET=-target Libsyscall_driverkit endif default: install @@ -68,35 +72,35 @@ clean: installsrc: pax -rw . $(SRCROOT) -else ifeq ($(RC_ProjectName),libkxld) +else ifneq ($(findstring libkxld_host,$(RC_ProjectName)),) include $(MakeInc_cmd) default: install installhdrs install clean: - $(MAKE) -C libkern/kxld $@ USE_APPLE_PB_SUPPORT=all + $(MAKE) -C libkern/kxld $@ USE_APPLE_PB_SUPPORT=all PRODUCT_TYPE=ARCHIVE installsrc: $(_v)$(MKDIR) $(SRCROOT) $(_v)$(FIND) -x . \! \( \( -name BUILD -o -name .svn -o -name .git -o -name cscope.\* -o -name \*~ \) -prune \) -print0 | $(PAX) -rw -p a -d0 $(SRCROOT) $(_v)$(CHMOD) -R go+rX $(SRCROOT) -else ifeq ($(RC_ProjectName),libkxld_host) +else ifneq ($(findstring libkxld,$(RC_ProjectName)),) include $(MakeInc_cmd) default: install installhdrs install clean: - $(MAKE) -C libkern/kxld $@ USE_APPLE_PB_SUPPORT=all PRODUCT_TYPE=ARCHIVE + $(MAKE) -C libkern/kxld $@ USE_APPLE_PB_SUPPORT=all installsrc: $(_v)$(MKDIR) $(SRCROOT) $(_v)$(FIND) -x . \! \( \( -name BUILD -o -name .svn -o -name .git -o -name cscope.\* -o -name \*~ \) -prune \) -print0 | $(PAX) -rw -p a -d0 $(SRCROOT) $(_v)$(CHMOD) -R go+rX $(SRCROOT) -else ifeq ($(RC_ProjectName),libkmod) +else ifneq ($(findstring libkmod,$(RC_ProjectName)),) default: install @@ -115,7 +119,7 @@ clean: installsrc: pax -rw . $(SRCROOT) -else ifeq ($(RC_ProjectName),xnu_tests) +else ifneq ($(findstring xnu_tests,$(RC_ProjectName)),) export SYSCTL_HW_PHYSICALCPU := $(shell /usr/sbin/sysctl -n hw.physicalcpu) export SYSCTL_HW_LOGICALCPU := $(shell /usr/sbin/sysctl -n hw.logicalcpu) @@ -129,6 +133,23 @@ install: xnu_tests clean: +installsrc: + pax -rw . $(SRCROOT) + +else ifeq ($(RC_ProjectName),xnu_tests_driverkit) + +export SYSCTL_HW_PHYSICALCPU := $(shell /usr/sbin/sysctl -n hw.physicalcpu) +export SYSCTL_HW_LOGICALCPU := $(shell /usr/sbin/sysctl -n hw.logicalcpu) +MAKEJOBS := --jobs=$(shell expr $(SYSCTL_HW_LOGICALCPU) + 1) + +default: install + +installhdrs: + +install: xnu_tests_driverkit + +clean: + installsrc: pax -rw . $(SRCROOT) @@ -154,29 +175,30 @@ export SYSCTL_HW_PHYSICALCPU := $(shell /usr/sbin/sysctl -n hw.physicalcpu) export SYSCTL_HW_LOGICALCPU := $(shell /usr/sbin/sysctl -n hw.logicalcpu) MAKEJOBS := --jobs=$(shell expr $(SYSCTL_HW_LOGICALCPU) + 1) -TOP_TARGETS = \ - clean \ - installsrc \ - exporthdrs \ - all all_desktop all_embedded \ - all_release_embedded all_development_embedded \ - installhdrs installhdrs_desktop installhdrs_embedded \ - installhdrs_release_embedded installhdrs_development_embedded \ - install install_desktop install_embedded \ - install_release_embedded install_development_embedded \ - installopensource \ - cscope tags TAGS checkstyle restyle check_uncrustify uncrustify \ +TOP_TARGETS = \ + clean \ + installsrc \ + exporthdrs \ + all all_desktop all_embedded \ + all_release_embedded all_development_embedded \ + installhdrs installhdrs_desktop installhdrs_embedded \ + installhdrs_release_embedded installhdrs_development_embedded \ + install install_desktop install_embedded \ + install_release_embedded install_development_embedded \ + install_kernels \ + installopensource \ + cscope tags TAGS checkstyle restyle check_uncrustify uncrustify \ help DEFAULT_TARGET = all # Targets for internal build system debugging -TOP_TARGETS += \ - print_exports print_exports_first_build_config \ - setup \ - build \ - config \ - install_textfiles \ +TOP_TARGETS += \ + print_exports print_exports_first_build_config \ + setup \ + build \ + config \ + install_textfiles \ install_config ifeq ($(BUILD_JSON_COMPILATION_DATABASE),1) @@ -235,6 +257,10 @@ COMP_SUBDIRS_ARM64 = $(ALL_SUBDIRS) INSTTEXTFILES_SUBDIRS = \ bsd +INSTTEXTFILES_SUBDIRS_X86_64 = $(INSTTEXTFILES_SUBDIRS) +INSTTEXTFILES_SUBDIRS_X86_64H = $(INSTTEXTFILES_SUBDIRS) +INSTTEXTFILES_SUBDIRS_ARM = $(INSTTEXTFILES_SUBDIRS) +INSTTEXTFILES_SUBDIRS_ARM64 = $(INSTTEXTFILES_SUBDIRS) include $(MakeInc_kernel) include $(MakeInc_rule) @@ -286,3 +312,42 @@ xnu_tests: SRCROOT=$(SRCROOT)/tools/tests $(MAKE) -C $(SRCROOT)/tests $(if $(filter -j,$(MAKEFLAGS)),,$(MAKEJOBS)) \ SRCROOT=$(SRCROOT)/tests + +xnu_tests_driverkit: + $(MAKE) -C $(SRCROOT)/tests/driverkit $(if $(filter -j,$(MAKEFLAGS)),,$(MAKEJOBS)) \ + SRCROOT=$(SRCROOT)/tests/driverkit + +# +# The "analyze" target defined below invokes Clang Static Analyzer +# with a predefined set of checks and options for the project. +# + +# By default, analysis results are available in BUILD/StaticAnalyzer. +# Set this variable in your make invocation to use a different directory. +# Note that these results are only deleted when the build directory +# is cleaned. They aren't deleted every time the analyzer is re-run, +# but they are deleted after "make clean". +STATIC_ANALYZER_OUTPUT_DIR ?= $(SRCROOT)/BUILD/StaticAnalyzer + +# By default, the default make target is analyzed. You can analyze +# other targets by setting this variable in your make invocation. +STATIC_ANALYZER_TARGET ?= + +# You can pass additional flags to scan-build by setting this variable +# in your make invocation. For example, you can enable additional checks. +STATIC_ANALYZER_EXTRA_FLAGS ?= + +analyze: + # This is where the reports are going to be available. + # Old reports are deleted on make clean only. + mkdir -p $(STATIC_ANALYZER_OUTPUT_DIR) + + # Recursively build the requested target under scan-build. + # Exclude checks that weren't deemed to be security critical, + # like null pointer dereferences. + xcrun scan-build -o $(STATIC_ANALYZER_OUTPUT_DIR) \ + -disable-checker deadcode.DeadStores \ + -disable-checker core.NullDereference \ + -disable-checker core.DivideZero \ + $(STATIC_ANALYZER_EXTRA_FLAGS) \ + make $(STATIC_ANALYZER_TARGET) diff --git a/README.md b/README.md index bb146bea3..be335d459 100644 --- a/README.md +++ b/README.md @@ -58,6 +58,19 @@ Note: This will also create a bootable image, kernel.[config], and a kernel binary with symbols, kernel.[config].unstripped. +To intall the kernel into a DSTROOT, use the `install_kernels` target: + + $ make install_kernels DSTROOT=/tmp/xnu-dst + +Hint: +For a more satisfying kernel debugging experience, with access to all +local variables and arguments, but without all the extra check of the +DEBUG kernel, add something like: + CFLAGS_DEVELOPMENTARM64="-O0 -g -DKERNEL_STACK_MULTIPLIER=2" + CXXFLAGS_DEVELOPMENTARM64="-O0 -g -DKERNEL_STACK_MULTIPLIER=2" +to your make command. +Replace DEVELOPMENT and ARM64 with the appropriate build and platform. + * To build with RELEASE kernel configuration @@ -265,6 +278,14 @@ member file lists and their default location are described below - Definition - EXPORT_MI_LIST = ${KERNELFILES} ${PRIVATE_KERNELFILES} + g. `INSTALL_MODULEMAP_INCDIR_MI_LIST` : Installs module map file to a + location that is available to everyone in user level, installing at the + root of INCDIR. + Locations - + $(DSTROOT)/usr/include + Definition - + INSTALL_MODULEMAP_INCDIR_MI_LIST = ${MODULEMAP_INCDIR_FILES} + If you want to install the header file in a sub-directory of the paths described in (1), specify the directory name using two variables `INSTALL_MI_DIR` and `EXPORT_MI_DIR` as follows - diff --git a/SETUP/config/Makefile b/SETUP/config/Makefile index fb79f3fcd..52dd35045 100644 --- a/SETUP/config/Makefile +++ b/SETUP/config/Makefile @@ -17,21 +17,21 @@ WARNFLAGS = -Wall LDFLAGS = -isysroot $(HOST_SDKROOT) -mmacosx-version-min=$(HOST_OS_VERSION) config: $(OBJS) - $(call makelog,$(ColorH)HOST_LD$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_LD) "$@" $(_v)$(HOST_CC) $(LDFLAGS) -o $@ $^ - $(call makelog,$(ColorH)HOST_CODESIGN$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CODESIGN) "$@" $(_v)env CODESIGN_ALLOCATE=$(HOST_CODESIGN_ALLOCATE) $(HOST_CODESIGN) -s - $@ %.o: %.c - $(call makelog,$(ColorH)HOST_CC$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CC) "$@" $(_v)$(HOST_CC) $(WARNFLAGS) $(CFLAGS) -c -o $@ $< parser.c: parser.y - $(call makelog,$(ColorH)HOST_BISON$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_BISON) "$@" $(_v)$(HOST_BISON) -y -d -d -o $@ $< lexer.yy.c: lexer.l - $(call makelog,$(ColorH)HOST_FLEX$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_FLEX) "$@" $(_v)env M4=$(HOST_GM4) $(HOST_FLEX) --header-file=lexer.yy.h -o $@ $< main.o mkheaders.o mkioconf.o mkmakefile.o lexer.yy.c: parser.c diff --git a/SETUP/config/doconf b/SETUP/config/doconf index 8e2a5a1ed..7b0d5ace8 100755 --- a/SETUP/config/doconf +++ b/SETUP/config/doconf @@ -78,6 +78,7 @@ unset beverbose unset MACHINE unset profile unset SOC_CONFIG +unset PLATFORM while ($#argv >= 1) if ("$argv[1]" =~ -*) then @@ -102,6 +103,14 @@ while ($#argv >= 1) set SOC_CONFIG="$argv[2]" shift breaksw + case "-platform": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set PLATFORM="$argv[2]" + shift + breaksw case "-d": if ($#argv < 2) then echo "${prog}: missing argument to ${argv[1]}" @@ -155,22 +164,32 @@ set cpu=`echo $MACHINE | tr A-Z a-z` set ID=`echo $MACHINE | tr a-z A-Z` set MASTER_DIR=${MASTER_CONF_DIR} set MASTER = ${MASTER_DIR}/MASTER -set MASTER_CPU=${MASTER}.${cpu} -set MASTER_CPU_PER_SOC=${MASTER}.${cpu}.${SOC_CONFIG} -if (-f $MASTER_CPU_PER_SOC) set MASTER_CPU = ${MASTER_CPU_PER_SOC} + +foreach master_file (${MASTER}.${cpu}.${SOC_CONFIG}.${PLATFORM} ${MASTER}.${cpu}.${SOC_CONFIG} ${MASTER}.${cpu}.${PLATFORM} ${MASTER}.${cpu}) + if (-f $master_file) then + set MASTER_CPU = $master_file + break + endif +end + +if ($?beverbose) then + echo MASTER_CPU=$MASTER_CPU +endif foreach SYS ($argv) set SYSID=${SYS}_${ID} set SYSCONF=$OBJDIR/config.$SYSID set BLDDIR=$OBJDIR if ($?beverbose) then - echo "[ generating $SYSID from $MASTER_DIR/MASTER{,.$cpu}{,.local} ]" + echo "[ generating $SYSID from $MASTER_DIR/MASTER{,.$cpu}{,.local} ]" endif echo +$SYS \ | \ cat $MASTER $MASTER_CPU - \ $MASTER $MASTER_CPU \ | \ + unifdef -t -DPLATFORM_${PLATFORM} -DCPU_$cpu -DSOC_CONFIG_${SOC_CONFIG} -DSYS_${SYS} - \ + | \ sed -n \ -e "/^+/{" \ -e "s;[-+];#&;gp" \ diff --git a/SETUP/decomment/Makefile b/SETUP/decomment/Makefile index a22212f6e..6d3ffff40 100644 --- a/SETUP/decomment/Makefile +++ b/SETUP/decomment/Makefile @@ -15,13 +15,13 @@ WARNFLAGS = -Wall LDFLAGS = -isysroot $(HOST_SDKROOT) -mmacosx-version-min=$(HOST_OS_VERSION) decomment: $(OBJS) - $(call makelog,$(ColorH)HOST_LD$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_LD) "$@" $(_v)$(HOST_CC) $(LDFLAGS) -o $@ $^ - $(call makelog,$(ColorH)HOST_CODESIGN$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CODESIGN) "$@" $(_v)env CODESIGN_ALLOCATE=$(HOST_CODESIGN_ALLOCATE) $(HOST_CODESIGN) -s - $@ %.o: %.c - $(call makelog,$(ColorH)HOST_CC$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CC) "$@" $(_v)$(HOST_CC) $(WARNFLAGS) $(CFLAGS) -c -o $@ $< do_build_setup:: decomment diff --git a/SETUP/installfile/Makefile b/SETUP/installfile/Makefile index 4ad7a7498..0e946f8ac 100644 --- a/SETUP/installfile/Makefile +++ b/SETUP/installfile/Makefile @@ -15,13 +15,13 @@ WARNFLAGS = -Wall LDFLAGS = -isysroot $(HOST_SDKROOT) -mmacosx-version-min=$(HOST_OS_VERSION) installfile: $(OBJS) - $(call makelog,$(ColorH)HOST_LD$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_LD) "$@" $(_v)$(HOST_CC) $(LDFLAGS) -o $@ $^ - $(call makelog,$(ColorH)HOST_CODESIGN$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CODESIGN) "$@" $(_v)env CODESIGN_ALLOCATE=$(HOST_CODESIGN_ALLOCATE) $(HOST_CODESIGN) -s - $@ %.o: %.c - $(call makelog,$(ColorH)HOST_CC$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CC) "$@" $(_v)$(HOST_CC) $(WARNFLAGS) $(CFLAGS) -c -o $@ $< do_build_setup:: installfile diff --git a/SETUP/installfile/installfile.c b/SETUP/installfile/installfile.c index fa2cbe1c9..8e1c9142f 100644 --- a/SETUP/installfile/installfile.c +++ b/SETUP/installfile/installfile.c @@ -84,7 +84,7 @@ main(int argc, char * argv[]) src = argv[0]; dst = argv[1]; - srcfd = open(src, O_RDONLY | O_SYMLINK, 0); + srcfd = open(src, O_RDONLY, 0); if (srcfd < 0) { err(EX_NOINPUT, "open(%s)", src); } diff --git a/SETUP/json_compilation_db/Makefile b/SETUP/json_compilation_db/Makefile index 18af26bdd..1db5e8a61 100644 --- a/SETUP/json_compilation_db/Makefile +++ b/SETUP/json_compilation_db/Makefile @@ -15,13 +15,13 @@ WARNFLAGS = -Wall LDFLAGS = -isysroot $(HOST_SDKROOT) -mmacosx-version-min=$(HOST_OS_VERSION) json_compilation_db: $(OBJS) - $(call makelog,$(ColorH)HOST_LD$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_LD) "$@" $(_v)$(HOST_CC) $(LDFLAGS) -o $@ $^ - $(call makelog,$(ColorH)HOST_CODESIGN$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CODESIGN) "$@" $(_v)env CODESIGN_ALLOCATE=$(HOST_CODESIGN_ALLOCATE) $(HOST_CODESIGN) -s - $@ %.o: %.c - $(call makelog,$(ColorH)HOST_CC$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CC) "$@" $(_v)$(HOST_CC) $(WARNFLAGS) $(CFLAGS) -c -o $@ $< do_build_setup:: json_compilation_db diff --git a/SETUP/kextsymboltool/Makefile b/SETUP/kextsymboltool/Makefile index dde295bae..45d768983 100644 --- a/SETUP/kextsymboltool/Makefile +++ b/SETUP/kextsymboltool/Makefile @@ -15,13 +15,13 @@ WARNFLAGS = -Wall LDFLAGS = -isysroot $(HOST_SDKROOT) -mmacosx-version-min=$(HOST_OS_VERSION) -lstdc++ kextsymboltool: $(OBJS) - $(call makelog,$(ColorH)HOST_LD$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_LD) "$@" $(_v)$(HOST_CC) $(LDFLAGS) -o $@ $^ - $(call makelog,$(ColorH)HOST_CODESIGN$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CODESIGN) "$@" $(_v)env CODESIGN_ALLOCATE=$(HOST_CODESIGN_ALLOCATE) $(HOST_CODESIGN) -s - $@ %.o: %.c - $(call makelog,$(ColorH)HOST_CC$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CC) "$@" $(_v)$(HOST_CC) $(WARNFLAGS) $(CFLAGS) -c -o $@ $< do_build_setup:: kextsymboltool diff --git a/SETUP/replacecontents/Makefile b/SETUP/replacecontents/Makefile index 45459e48b..016cfeccc 100644 --- a/SETUP/replacecontents/Makefile +++ b/SETUP/replacecontents/Makefile @@ -15,13 +15,13 @@ WARNFLAGS = -Wall LDFLAGS = -isysroot $(HOST_SDKROOT) -mmacosx-version-min=$(HOST_OS_VERSION) replacecontents: $(OBJS) - $(call makelog,$(ColorH)HOST_LD$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_LD) "$@" $(_v)$(HOST_CC) $(LDFLAGS) -o $@ $^ - $(call makelog,$(ColorH)HOST_CODESIGN$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CODESIGN) "$@" $(_v)env CODESIGN_ALLOCATE=$(HOST_CODESIGN_ALLOCATE) $(HOST_CODESIGN) -s - $@ %.o: %.c - $(call makelog,$(ColorH)HOST_CC$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CC) "$@" $(_v)$(HOST_CC) $(WARNFLAGS) $(CFLAGS) -c -o $@ $< do_build_setup:: replacecontents diff --git a/SETUP/setsegname/Makefile b/SETUP/setsegname/Makefile index 70a55a7b5..39607f1f0 100644 --- a/SETUP/setsegname/Makefile +++ b/SETUP/setsegname/Makefile @@ -15,13 +15,13 @@ WARNFLAGS = -Wall LDFLAGS = -isysroot $(HOST_SDKROOT) -mmacosx-version-min=$(HOST_OS_VERSION) setsegname: $(OBJS) - $(call makelog,$(ColorH)HOST_LD$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_LD) "$@" $(_v)$(HOST_CC) $(LDFLAGS) -o $@ $^ - $(call makelog,$(ColorH)HOST_CODESIGN$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CODESIGN) "$@" $(_v)env CODESIGN_ALLOCATE=$(HOST_CODESIGN_ALLOCATE) $(HOST_CODESIGN) -s - $@ %.o: %.c - $(call makelog,$(ColorH)HOST_CC$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_HOST_CC) "$@" $(_v)$(HOST_CC) $(WARNFLAGS) $(CFLAGS) -c -o $@ $< do_build_setup:: setsegname diff --git a/bsd/Makefile b/bsd/Makefile index 22db7cb4d..a11cb1920 100644 --- a/bsd/Makefile +++ b/bsd/Makefile @@ -44,6 +44,7 @@ INSTINC_SUBDIRS_ARM64 = \ EXPINC_SUBDIRS = \ bsm \ + crypto/entropy \ dev \ libkern \ machine \ @@ -81,5 +82,17 @@ INSTTEXTFILES_SUBDIRS = \ man \ sys +INSTTEXTFILES_SUBDIRS_X86_64 = \ + dev + +INSTTEXTFILES_SUBDIRS_X86_64H = \ + dev + +INSTTEXTFILES_SUBDIRS_ARM = \ + dev + +INSTTEXTFILES_SUBDIRS_ARM64 = \ + dev + include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/bsd/arm/Makefile b/bsd/arm/Makefile index f9ab9a989..19fd88f53 100644 --- a/bsd/arm/Makefile +++ b/bsd/arm/Makefile @@ -12,6 +12,9 @@ DATAFILES = \ types.h vmparam.h _types.h _param.h \ _mcontext.h +DRIVERKIT_DATAFILES = \ + limits.h _limits.h types.h _types.h endian.h + PRIVATE_DATAFILES = \ disklabel.h @@ -22,6 +25,7 @@ KERNELFILES = \ _mcontext.h INSTALL_MD_LIST = ${DATAFILES} +INSTALL_DRIVERKIT_MD_LIST = ${DRIVERKIT_DATAFILES} INSTALL_MD_LCL_LIST = ${PRIVATE_DATAFILES} INSTALL_MD_DIR = arm diff --git a/bsd/arm/_param.h b/bsd/arm/_param.h index 1a8787637..abfdc7d0e 100644 --- a/bsd/arm/_param.h +++ b/bsd/arm/_param.h @@ -13,10 +13,10 @@ * cast to any desired pointer type. */ #define __DARWIN_ALIGNBYTES (sizeof(__darwin_size_t) - 1) -#define __DARWIN_ALIGN(p) ((__darwin_size_t)((char *)(__darwin_size_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES) +#define __DARWIN_ALIGN(p) ((__darwin_size_t)((__darwin_size_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES) #define __DARWIN_ALIGNBYTES32 (sizeof(__uint32_t) - 1) -#define __DARWIN_ALIGN32(p) ((__darwin_size_t)((char *)(__darwin_size_t)(p) + __DARWIN_ALIGNBYTES32) &~ __DARWIN_ALIGNBYTES32) +#define __DARWIN_ALIGN32(p) ((__darwin_size_t)((__darwin_size_t)(p) + __DARWIN_ALIGNBYTES32) &~ __DARWIN_ALIGNBYTES32) #endif /* _ARM__PARAM_H_ */ diff --git a/bsd/arm/vmparam.h b/bsd/arm/vmparam.h index cfa45d66e..085f13968 100644 --- a/bsd/arm/vmparam.h +++ b/bsd/arm/vmparam.h @@ -7,6 +7,10 @@ #include +#ifndef KERNEL +#include +#endif + #define USRSTACK (0x27E00000) /* ASLR slides stack down by up to 1MB */ #define USRSTACK64 (0x000000016FE00000ULL) @@ -20,11 +24,23 @@ #define MAXDSIZ (RLIM_INFINITY) /* max data size */ #endif #ifndef DFLSSIZ +/* XXX stack size default is a platform property: use getrlimit(2) */ +#if (defined(TARGET_OS_OSX) && (TARGET_OS_OSX != 0)) || \ + (defined(KERNEL) && !defined(CONFIG_EMBEDDED) || (CONFIG_EMBEDDED == 0)) +#define DFLSSIZ (8*1024*1024 - 16*1024) +#else #define DFLSSIZ (1024*1024 - 16*1024) /* initial stack size limit */ -#endif +#endif /* TARGET_OS_OSX .. || XNU_KERNEL_PRIVATE .. */ +#endif /* DFLSSIZ */ #ifndef MAXSSIZ +/* XXX stack size limit is a platform property: use getrlimit(2) */ +#if (defined(TARGET_OS_OSX) && (TARGET_OS_OSX != 0)) || \ + (defined(KERNEL) && !defined(CONFIG_EMBEDDED) || (CONFIG_EMBEDDED == 0)) +#define MAXSSIZ (64*1024*1024) /* max stack size */ +#else #define MAXSSIZ (1024*1024) /* max stack size */ -#endif +#endif /* TARGET_OS_OSX .. || XNU_KERNEL_PRIVATE .. */ +#endif /* MAXSSIZ */ #ifndef DFLCSIZ #define DFLCSIZ (0) /* initial core size limit */ #endif diff --git a/bsd/bsm/audit.h b/bsd/bsm/audit.h index 2bac16e91..3352f6b33 100644 --- a/bsd/bsm/audit.h +++ b/bsd/bsm/audit.h @@ -332,13 +332,24 @@ struct au_evclass_map { }; typedef struct au_evclass_map au_evclass_map_t; + +#if !defined(_KERNEL) && !defined(KERNEL) +#include +#define __AUDIT_API_DEPRECATED __API_DEPRECATED("audit is deprecated", macos(10.4, 10.16)) +#else +#define __AUDIT_API_DEPRECATED +#endif + /* * Audit system calls. */ #if !defined(_KERNEL) && !defined(KERNEL) -int audit(const void *, int); -int auditon(int, void *, int); -int auditctl(const char *); +int audit(const void *, int) +__AUDIT_API_DEPRECATED; +int auditon(int, void *, int) +__AUDIT_API_DEPRECATED; +int auditctl(const char *) +__AUDIT_API_DEPRECATED; int getauid(au_id_t *); int setauid(const au_id_t *); int getaudit_addr(struct auditinfo_addr *, int); @@ -360,8 +371,10 @@ __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0, __MAC_10_8, __IPHONE_2_0, __IPHONE_6_0); #else -int getaudit(struct auditinfo *); -int setaudit(const struct auditinfo *); +int getaudit(struct auditinfo *) +__AUDIT_API_DEPRECATED; +int setaudit(const struct auditinfo *) +__AUDIT_API_DEPRECATED; #endif /* !__APPLE__ */ #ifdef __APPLE_API_PRIVATE diff --git a/bsd/bsm/audit_kevents.h b/bsd/bsm/audit_kevents.h index a484e8528..c76aa9844 100644 --- a/bsd/bsm/audit_kevents.h +++ b/bsd/bsm/audit_kevents.h @@ -615,6 +615,8 @@ #define AUE_FMOUNT 43213 /* Darwin. */ #define AUE_FSGETPATH_EXTENDED 43214 /* Darwin. */ #define AUE_DBGPORTFORPID 43215 /* Darwin-specific. */ +#define AUE_PREADV 43216 /* Darwin. */ +#define AUE_PWRITEV 43217 /* Darwin. */ #define AUE_SESSION_START 44901 /* Darwin. */ #define AUE_SESSION_UPDATE 44902 /* Darwin. */ @@ -754,12 +756,10 @@ #define AUE_MADVISE AUE_NULL #define AUE_MINCORE AUE_NULL #define AUE_MKCOMPLEX AUE_NULL -#define AUE_MODWATCH AUE_NULL #define AUE_MSGCL AUE_NULL #define AUE_MSYNC AUE_NULL #define AUE_NECP AUE_NULL #define AUE_NETAGENT AUE_NULL -#define AUE_PREADV AUE_NULL #define AUE_PROCINFO AUE_NULL #define AUE_PTHREADCANCELED AUE_NULL #define AUE_PTHREADCHDIR AUE_NULL @@ -774,7 +774,6 @@ #define AUE_PTHREADMUTEXINIT AUE_NULL #define AUE_PTHREADMUTEXTRYLOCK AUE_NULL #define AUE_PTHREADMUTEXUNLOCK AUE_NULL -#define AUE_PWRITEV AUE_NULL #define AUE_REMOVEXATTR AUE_NULL #define AUE_SBRK AUE_NULL #define AUE_SELECT AUE_NULL @@ -810,9 +809,7 @@ #define AUE_SYSCALL AUE_NULL #define AUE_TABLE AUE_NULL #define AUE_VMPRESSUREMONITOR AUE_NULL -#define AUE_WAITEVENT AUE_NULL #define AUE_WAITID AUE_NULL -#define AUE_WATCHEVENT AUE_NULL #define AUE_WORKQOPEN AUE_NULL #define AUE_WORKQOPS AUE_NULL #define AUE_WORKLOOPCTL AUE_NULL @@ -821,5 +818,7 @@ #define AUE_NEXUS AUE_NULL #define AUE_CHANNEL AUE_NULL #define AUE_NET AUE_NULL +#define AUE_TASKREADFORPID AUE_NULL +#define AUE_TASKINSPECTFORPID AUE_NULL #endif /* !_BSM_AUDIT_KEVENTS_H_ */ diff --git a/bsd/bsm/audit_record.h b/bsd/bsm/audit_record.h index 8cd2cebec..dbf095086 100644 --- a/bsd/bsm/audit_record.h +++ b/bsd/bsm/audit_record.h @@ -176,6 +176,15 @@ #define AUT_TRAILER_MAGIC 0xb105 +#if !defined(_KERNEL) && !defined(KERNEL) +#include +#define __AUDIT_API_DEPRECATED __API_DEPRECATED("audit is deprecated", macos(10.4, 10.16)) +#define __AUDIT_API_14_DEPRECATED __API_DEPRECATED("audit is deprecated", macos(10.14, 10.16)) +#else +#define __AUDIT_API_DEPRECATED +#define __AUDIT_API_14_DEPRECATED +#endif + /* BSM library calls */ __BEGIN_DECLS @@ -193,118 +202,193 @@ struct sockaddr_un; struct vnode_au_info; #endif -int au_open(void); -int au_write(int d, token_t *m); -int au_close(int d, int keep, short event); -int au_close_buffer(int d, short event, u_char *buffer, size_t *buflen); -int au_close_token(token_t *tok, u_char *buffer, size_t *buflen); +int au_open(void) +__AUDIT_API_DEPRECATED; +int au_write(int d, token_t *m) +__AUDIT_API_DEPRECATED; +int au_close(int d, int keep, short event) +__AUDIT_API_DEPRECATED; +int au_close_buffer(int d, short event, u_char *buffer, size_t *buflen) +__AUDIT_API_DEPRECATED; +int au_close_token(token_t *tok, u_char *buffer, size_t *buflen) +__AUDIT_API_DEPRECATED; -token_t *au_to_file(const char *file, struct timeval tm); +token_t *au_to_file(const char *file, struct timeval tm) +__AUDIT_API_DEPRECATED; token_t *au_to_header32_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, - struct timeval tm); + struct timeval tm) +__AUDIT_API_DEPRECATED; token_t *au_to_header32_ex_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, - struct timeval tm, struct auditinfo_addr *aia); + struct timeval tm, struct auditinfo_addr *aia) +__AUDIT_API_DEPRECATED; token_t *au_to_header64_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, - struct timeval tm); + struct timeval tm) +__AUDIT_API_DEPRECATED; #if !defined(KERNEL) && !defined(_KERNEL) -token_t *au_to_header(int rec_size, au_event_t e_type, au_emod_t e_mod); -token_t *au_to_header_ex(int rec_size, au_event_t e_type, au_emod_t e_mod); -token_t *au_to_header32(int rec_size, au_event_t e_type, au_emod_t e_mod); -token_t *au_to_header64(int rec_size, au_event_t e_type, au_emod_t e_mod); -token_t *au_to_header32_ex(int rec_size, au_event_t e_type, au_emod_t e_mod); +token_t *au_to_header(int rec_size, au_event_t e_type, au_emod_t e_mod) +__AUDIT_API_DEPRECATED; +token_t *au_to_header_ex(int rec_size, au_event_t e_type, au_emod_t e_mod) +__AUDIT_API_DEPRECATED; +token_t *au_to_header32(int rec_size, au_event_t e_type, au_emod_t e_mod) +__AUDIT_API_DEPRECATED; +token_t *au_to_header64(int rec_size, au_event_t e_type, au_emod_t e_mod) +__AUDIT_API_DEPRECATED; +token_t *au_to_header32_ex(int rec_size, au_event_t e_type, au_emod_t e_mod) +__AUDIT_API_DEPRECATED; #endif -token_t *au_to_me(void); -token_t *au_to_arg(char n, const char *text, uint32_t v); -token_t *au_to_arg32(char n, const char *text, uint32_t v); -token_t *au_to_arg64(char n, const char *text, uint64_t v); +token_t *au_to_me(void) +__AUDIT_API_DEPRECATED; +token_t *au_to_arg(char n, const char *text, uint32_t v) +__AUDIT_API_DEPRECATED; +token_t *au_to_arg32(char n, const char *text, uint32_t v) +__AUDIT_API_DEPRECATED; +token_t *au_to_arg64(char n, const char *text, uint64_t v) +__AUDIT_API_DEPRECATED; #if defined(_KERNEL) || defined(KERNEL) -token_t *au_to_attr(struct vnode_au_info *vni); -token_t *au_to_attr32(struct vnode_au_info *vni); -token_t *au_to_attr64(struct vnode_au_info *vni); +token_t *au_to_attr(struct vnode_au_info *vni) +__AUDIT_API_DEPRECATED; +token_t *au_to_attr32(struct vnode_au_info *vni) +__AUDIT_API_DEPRECATED; +token_t *au_to_attr64(struct vnode_au_info *vni) +__AUDIT_API_DEPRECATED; #endif token_t *au_to_data(char unit_print, char unit_type, char unit_count, - const char *p); -token_t *au_to_exit(int retval, int err); -token_t *au_to_groups(int *groups); -token_t *au_to_newgroups(uint16_t n, gid_t *groups); -token_t *au_to_in_addr(struct in_addr *internet_addr); -token_t *au_to_in_addr_ex(struct in6_addr *internet_addr); -token_t *au_to_ip(struct ip *ip); -token_t *au_to_ipc(char type, int id); -token_t *au_to_ipc_perm(struct ipc_perm *perm); -token_t *au_to_iport(uint16_t iport); -token_t *au_to_opaque(const char *data, uint16_t bytes); -token_t *au_to_path(const char *path); + const char *p) +__AUDIT_API_DEPRECATED; +token_t *au_to_exit(int retval, int err) +__AUDIT_API_DEPRECATED; +token_t *au_to_groups(int *groups) +__AUDIT_API_DEPRECATED; +token_t *au_to_newgroups(uint16_t n, gid_t *groups) +__AUDIT_API_DEPRECATED; +token_t *au_to_in_addr(struct in_addr *internet_addr) +__AUDIT_API_DEPRECATED; +token_t *au_to_in_addr_ex(struct in6_addr *internet_addr) +__AUDIT_API_DEPRECATED; +token_t *au_to_ip(struct ip *ip) +__AUDIT_API_DEPRECATED; +token_t *au_to_ipc(char type, int id) +__AUDIT_API_DEPRECATED; +token_t *au_to_ipc_perm(struct ipc_perm *perm) +__AUDIT_API_DEPRECATED; +token_t *au_to_iport(uint16_t iport) +__AUDIT_API_DEPRECATED; +token_t *au_to_opaque(const char *data, uint16_t bytes) +__AUDIT_API_DEPRECATED; +token_t *au_to_path(const char *path) +__AUDIT_API_DEPRECATED; token_t *au_to_process(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid) +__AUDIT_API_DEPRECATED; token_t *au_to_process32(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid) +__AUDIT_API_DEPRECATED; token_t *au_to_process64(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid) +__AUDIT_API_DEPRECATED; token_t *au_to_process_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) +__AUDIT_API_DEPRECATED; token_t *au_to_process32_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, au_asid_t sid, - au_tid_addr_t *tid); + au_tid_addr_t *tid) +__AUDIT_API_DEPRECATED; token_t *au_to_process64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); -token_t *au_to_return(char status, uint32_t ret); -token_t *au_to_return32(char status, uint32_t ret); -token_t *au_to_return64(char status, uint64_t ret); -token_t *au_to_seq(long audit_count); + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) +__AUDIT_API_DEPRECATED; +token_t *au_to_return(char status, uint32_t ret) +__AUDIT_API_DEPRECATED; +token_t *au_to_return32(char status, uint32_t ret) +__AUDIT_API_DEPRECATED; +token_t *au_to_return64(char status, uint64_t ret) +__AUDIT_API_DEPRECATED; +token_t *au_to_seq(long audit_count) +__AUDIT_API_DEPRECATED; token_t *au_to_socket_ex(u_short so_domain, u_short so_type, - struct sockaddr *sa_local, struct sockaddr *sa_remote); -token_t *au_to_sock_inet(struct sockaddr_in *so); -token_t *au_to_sock_inet32(struct sockaddr_in *so); -token_t *au_to_sock_inet128(struct sockaddr_in6 *so); -token_t *au_to_sock_unix(struct sockaddr_un *so); + struct sockaddr *sa_local, struct sockaddr *sa_remote) +__AUDIT_API_DEPRECATED; +token_t *au_to_sock_inet(struct sockaddr_in *so) +__AUDIT_API_DEPRECATED; +token_t *au_to_sock_inet32(struct sockaddr_in *so) +__AUDIT_API_DEPRECATED; +token_t *au_to_sock_inet128(struct sockaddr_in6 *so) +__AUDIT_API_DEPRECATED; +token_t *au_to_sock_unix(struct sockaddr_un *so) +__AUDIT_API_DEPRECATED; token_t *au_to_subject(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid) +__AUDIT_API_DEPRECATED; token_t *au_to_subject32(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid) +__AUDIT_API_DEPRECATED; token_t *au_to_subject64(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid) +__AUDIT_API_DEPRECATED; token_t *au_to_subject_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) +__AUDIT_API_DEPRECATED; token_t *au_to_subject32_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) +__AUDIT_API_DEPRECATED; token_t *au_to_subject64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) +__AUDIT_API_DEPRECATED; #if defined(_KERNEL) || defined(KERNEL) -token_t *au_to_exec_args(char *args, int argc); -token_t *au_to_exec_env(char *envs, int envc); -token_t *au_to_certificate_hash(char *hash, int hashc); -token_t *au_to_krb5_principal(char *principal, int princ); +token_t *au_to_exec_args(char *args, int argc) +__AUDIT_API_DEPRECATED; +token_t *au_to_exec_env(char *envs, int envc) +__AUDIT_API_DEPRECATED; +token_t *au_to_certificate_hash(char *hash, int hashc) +__AUDIT_API_14_DEPRECATED; +token_t *au_to_krb5_principal(char *principal, int princ) +__AUDIT_API_14_DEPRECATED; #else -token_t *au_to_exec_args(char **argv); -token_t *au_to_exec_env(char **envp); -token_t *au_to_certificate_hash(char **hash); -token_t *au_to_krb5_principal(char **principal); +token_t *au_to_exec_args(char **argv) +__AUDIT_API_DEPRECATED; +token_t *au_to_exec_env(char **envp) +__AUDIT_API_DEPRECATED; +token_t *au_to_certificate_hash(char **hash) +__AUDIT_API_14_DEPRECATED; +token_t *au_to_krb5_principal(char **principal) +__AUDIT_API_14_DEPRECATED; #endif -token_t *au_to_text(const char *text); -token_t *au_to_kevent(struct kevent *kev); -token_t *au_to_trailer(int rec_size); -token_t *au_to_zonename(const char *zonename); +token_t *au_to_text(const char *text) +__AUDIT_API_DEPRECATED; +token_t *au_to_kevent(struct kevent *kev) +__AUDIT_API_DEPRECATED; +token_t *au_to_trailer(int rec_size) +__AUDIT_API_DEPRECATED; +token_t *au_to_zonename(const char *zonename) +__AUDIT_API_DEPRECATED; token_t *au_to_identity(uint32_t signer_type, const char* signing_id, u_char signing_id_trunc, const char* team_id, u_char team_id_trunc, - uint8_t* cdhash, uint16_t cdhash_len); + uint8_t* cdhash, uint16_t cdhash_len) +__AUDIT_API_14_DEPRECATED; /* * BSM library routines for converting between local and BSM constant spaces. */ -int au_bsm_to_domain(u_short bsm_domain, int *local_domainp); -int au_bsm_to_errno(u_char bsm_error, int *errorp); -int au_bsm_to_fcntl_cmd(u_short bsm_fcntl_cmd, int *local_fcntl_cmdp); +int au_bsm_to_domain(u_short bsm_domain, int *local_domainp) +__AUDIT_API_DEPRECATED; +int au_bsm_to_errno(u_char bsm_error, int *errorp) +__AUDIT_API_DEPRECATED; +int au_bsm_to_fcntl_cmd(u_short bsm_fcntl_cmd, int *local_fcntl_cmdp) +__AUDIT_API_DEPRECATED; int au_bsm_to_socket_type(u_short bsm_socket_type, - int *local_socket_typep); -u_short au_domain_to_bsm(int local_domain); -u_char au_errno_to_bsm(int local_errno); -u_short au_fcntl_cmd_to_bsm(int local_fcntl_command); -u_short au_socket_type_to_bsm(int local_socket_type); + int *local_socket_typep) +__AUDIT_API_DEPRECATED; +u_short au_domain_to_bsm(int local_domain) +__AUDIT_API_DEPRECATED; +u_char au_errno_to_bsm(int local_errno) +__AUDIT_API_DEPRECATED; +u_short au_fcntl_cmd_to_bsm(int local_fcntl_command) +__AUDIT_API_DEPRECATED; +u_short au_socket_type_to_bsm(int local_socket_type) +__AUDIT_API_DEPRECATED; __END_DECLS diff --git a/bsd/conf/Makefile b/bsd/conf/Makefile index 05c4b79cf..51eddb889 100644 --- a/bsd/conf/Makefile +++ b/bsd/conf/Makefile @@ -23,7 +23,7 @@ endif $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile: $(SRCROOT)/SETUP/config/doconf $(OBJROOT)/SETUP/config $(DOCONFDEPS) $(_v)$(MKDIR) $(TARGET)/$(CURRENT_KERNEL_CONFIG) - $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) + $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -platform $(PLATFORM) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) do_all: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile $(_v)${MAKE} \ diff --git a/bsd/conf/Makefile.arm b/bsd/conf/Makefile.arm index 9141afe61..a6b55625e 100644 --- a/bsd/conf/Makefile.arm +++ b/bsd/conf/Makefile.arm @@ -2,12 +2,17 @@ #BEGIN Machine dependent Makefile fragment for arm ###################################################################### -# Files that currently violate cast alignment checks at build time -fbt_arm.o_CFLAGS_ADD += -Wno-cast-qual - # Inline assembly doesn't interact well with LTO fbt_arm.o_CFLAGS_ADD += $(CFLAGS_NOLTO_FLAG) +# +# Diagnostic opt-outs. We need to make this list empty. +# +# DO NOT ADD MORE HERE. +# +# -Wno-shorten-64-to-32 +kern_mman.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vm_compressor_backing_file.o_CFLAGS_ADD += -Wno-shorten-64-to-32 ###################################################################### #END Machine dependent Makefile fragment for arm ###################################################################### diff --git a/bsd/conf/Makefile.arm64 b/bsd/conf/Makefile.arm64 index c22cdd613..333857edb 100644 --- a/bsd/conf/Makefile.arm64 +++ b/bsd/conf/Makefile.arm64 @@ -5,6 +5,7 @@ # Inline assembly doesn't interact well with LTO fbt_arm.o_CFLAGS_ADD += $(CFLAGS_NOLTO_FLAG) + ###################################################################### #END Machine dependent Makefile fragment for arm ###################################################################### diff --git a/bsd/conf/Makefile.template b/bsd/conf/Makefile.template index fa831c803..56588cf82 100644 --- a/bsd/conf/Makefile.template +++ b/bsd/conf/Makefile.template @@ -1,5 +1,5 @@ # -# Copyright (c) 2000-2016 Apple Inc. All rights reserved. +# Copyright (c) 2000-2020 Apple Inc. All rights reserved. # # @APPLE_LICENSE_HEADER_START@ # @@ -40,7 +40,8 @@ include $(MakeInc_def) # CFLAGS+= -include meta_features.h -DDRIVER_PRIVATE \ -D_KERNEL_BUILD -DKERNEL_BUILD -DMACH_KERNEL -DBSD_BUILD \ - -DBSD_KERNEL_PRIVATE -DLP64_DEBUG=0 + -DBSD_KERNEL_PRIVATE -DLP64_DEBUG=0 \ + -Warray-bounds-pointer-arithmetic SFLAGS+= -include meta_features.h # @@ -72,195 +73,511 @@ COMP_SUBDIRS = %MACHDEP +tcp_cubic.o_CFLAGS_ADD += -Wno-error=implicit-int-float-conversion + # -# Machine-independent per-file flags +# Diagnostic opt-outs. We need to make this list empty. # +# DO NOT ADD MORE HERE. +# +# -Wno-address-of-packed-member +ah_core.o_CFLAGS_ADD += -Wno-address-of-packed-member +ah_input.o_CFLAGS_ADD += -Wno-address-of-packed-member +dlil.o_CFLAGS_ADD += -Wno-address-of-packed-member +esp_input.o_CFLAGS_ADD += -Wno-address-of-packed-member +esp_output.o_CFLAGS_ADD += -Wno-address-of-packed-member +frag6.o_CFLAGS_ADD += -Wno-address-of-packed-member +icmp6.o_CFLAGS_ADD += -Wno-address-of-packed-member +if_stf.o_CFLAGS_ADD += -Wno-address-of-packed-member +in6_cksum.o_CFLAGS_ADD += -Wno-address-of-packed-member +in6_gif.o_CFLAGS_ADD += -Wno-address-of-packed-member +ip6_forward.o_CFLAGS_ADD += -Wno-address-of-packed-member +ip6_input.o_CFLAGS_ADD += -Wno-address-of-packed-member +ip6_output.o_CFLAGS_ADD += -Wno-address-of-packed-member +ipsec.o_CFLAGS_ADD += -Wno-address-of-packed-member +iptap.o_CFLAGS_ADD += -Wno-address-of-packed-member +mld6.o_CFLAGS_ADD += -Wno-address-of-packed-member +mptcp_opt.o_CFLAGS_ADD += -Wno-address-of-packed-member +nd6.o_CFLAGS_ADD += -Wno-address-of-packed-member +nd6_nbr.o_CFLAGS_ADD += -Wno-address-of-packed-member +nd6_prproxy.o_CFLAGS_ADD += -Wno-address-of-packed-member +nd6_rtr.o_CFLAGS_ADD += -Wno-address-of-packed-member +necp.o_CFLAGS_ADD += -Wno-address-of-packed-member +packet_mangler.o_CFLAGS_ADD += -Wno-address-of-packed-member +pf_norm.o_CFLAGS_ADD += -Wno-address-of-packed-member +pktap.o_CFLAGS_ADD += -Wno-address-of-packed-member +raw_ip6.o_CFLAGS_ADD += -Wno-address-of-packed-member +sixxlowpan.o_CFLAGS_ADD += -Wno-address-of-packed-member +tcp_input.o_CFLAGS_ADD += -Wno-address-of-packed-member +tcp_subr.o_CFLAGS_ADD += -Wno-address-of-packed-member +udp6_output.o_CFLAGS_ADD += -Wno-address-of-packed-member +udp6_usrreq.o_CFLAGS_ADD += -Wno-address-of-packed-member +udp_usrreq.o_CFLAGS_ADD += -Wno-address-of-packed-member +# -Wno-cast-align +audit_bsm_token.o_CFLAGS_ADD += -Wno-cast-align +audit_pipe.o_CFLAGS_ADD += -Wno-cast-align +audit_session.o_CFLAGS_ADD += -Wno-cast-align +bind_vnops.o_CFLAGS_ADD += -Wno-cast-align +dtrace.o_CFLAGS_ADD += -Wno-cast-align +fasttrap.o_CFLAGS_ADD += -Wno-cast-align +fasttrap_isa.o_CFLAGS_ADD += -Wno-cast-align +fbt.o_CFLAGS_ADD += -Wno-cast-align +fbt_arm.o_CFLAGS_ADD += -Wno-cast-align +fbt_x86.o_CFLAGS_ADD += -Wno-cast-align +if_bond.o_CFLAGS_ADD += -Wno-cast-align +ip_dummynet.o_CFLAGS_ADD += -Wno-cast-align +kern_credential.o_CFLAGS_ADD += -Wno-cast-align +kern_descrip.o_CFLAGS_ADD += -Wno-cast-align +kern_event.o_CFLAGS_ADD += -Wno-cast-align +kern_exec.o_CFLAGS_ADD += -Wno-cast-align +kern_guarded.o_CFLAGS_ADD += -Wno-cast-align +kern_lockf.o_CFLAGS_ADD += -Wno-cast-align +kern_subr.o_CFLAGS_ADD += -Wno-cast-align +km.o_CFLAGS_ADD += -Wno-cast-align +mach_loader.o_CFLAGS_ADD += -Wno-cast-align +memdev.o_CFLAGS_ADD += -Wno-cast-align +munge.o_CFLAGS_ADD += -Wno-cast-align +nfs4_subs.o_CFLAGS_ADD += -Wno-cast-align +nfs4_vnops.o_CFLAGS_ADD += -Wno-cast-align +nfs_boot.o_CFLAGS_ADD += -Wno-cast-align +nfs_gss.o_CFLAGS_ADD += -Wno-cast-align +nfs_serv.o_CFLAGS_ADD += -Wno-cast-align +nfs_socket.o_CFLAGS_ADD += -Wno-cast-align +nfs_srvcache.o_CFLAGS_ADD += -Wno-cast-align +nfs_subs.o_CFLAGS_ADD += -Wno-cast-align +nfs_syscalls.o_CFLAGS_ADD += -Wno-cast-align +nfs_vfsops.o_CFLAGS_ADD += -Wno-cast-align +nfs_vnops.o_CFLAGS_ADD += -Wno-cast-align +proc_info.o_CFLAGS_ADD += -Wno-cast-align +qsort.o_CFLAGS_ADD += -Wno-cast-align +sdt.o_CFLAGS_ADD += -Wno-cast-align +sdt_arm.o_CFLAGS_ADD += -Wno-cast-align +shadow.o_CFLAGS_ADD += -Wno-cast-align +spec_vnops.o_CFLAGS_ADD += -Wno-cast-align +subr_log.o_CFLAGS_ADD += -Wno-cast-align +sys_generic.o_CFLAGS_ADD += -Wno-cast-align +sys_pipe.o_CFLAGS_ADD += -Wno-cast-align +systemcalls.o_CFLAGS_ADD += -Wno-cast-align +systrace.o_CFLAGS_ADD += -Wno-cast-align +tty.o_CFLAGS_ADD += -Wno-cast-align +tty_compat.o_CFLAGS_ADD += -Wno-cast-align +tty_dev.o_CFLAGS_ADD += -Wno-cast-align +ubc_subr.o_CFLAGS_ADD += -Wno-cast-align +uipc_mbuf.o_CFLAGS_ADD += -Wno-cast-align +uipc_usrreq.o_CFLAGS_ADD += -Wno-cast-align +vfs_attrlist.o_CFLAGS_ADD += -Wno-cast-align +vfs_fsevents.o_CFLAGS_ADD += -Wno-cast-align +vfs_syscalls.o_CFLAGS_ADD += -Wno-cast-align +vfs_utfconv.o_CFLAGS_ADD += -Wno-cast-align +vfs_vnops.o_CFLAGS_ADD += -Wno-cast-align +vfs_xattr.o_CFLAGS_ADD += -Wno-cast-align +vn.o_CFLAGS_ADD += -Wno-cast-align +# -Wno-cast-qual +dis_tables.o_CFLAGS_ADD += -Wno-cast-qual +# -Wno-format +dlil.o_CFLAGS_ADD += -Wno-format +in_rmx.o_CFLAGS_ADD += -Wno-format +in6_rmx.o_CFLAGS_ADD += -Wno-format +route.o_CFLAGS_ADD += -Wno-format +# -Wno-format-extra-args +dlil.o_CFLAGS_ADD += -Wno-format-extra-args +kpi_interface.o_CFLAGS_ADD += -Wno-format-extra-args +in_rmx.o_CFLAGS_ADD += -Wno-format-extra-args +in6_rmx.o_CFLAGS_ADD += -Wno-format-extra-args +route.o_CFLAGS_ADD += -Wno-format-extra-args +# -Wno-format-invalid-specifier +dlil.o_CFLAGS_ADD += -Wno-format-invalid-specifier +in_rmx.o_CFLAGS_ADD += -Wno-format-invalid-specifier +in6_rmx.o_CFLAGS_ADD += -Wno-format-invalid-specifier +kpi_interface.o_CFLAGS_ADD += -Wno-format-invalid-specifier +route.o_CFLAGS_ADD += -Wno-format-invalid-specifier +# -Wno-implicit-int-conversion +audit.o_CFLAGS_ADD += -Wno-implicit-int-conversion +audit_bsm.o_CFLAGS_ADD += -Wno-implicit-int-conversion +audit_bsm_errno.o_CFLAGS_ADD += -Wno-implicit-int-conversion +audit_bsm_token.o_CFLAGS_ADD += -Wno-implicit-int-conversion +audit_worker.o_CFLAGS_ADD += -Wno-implicit-int-conversion +bpf.o_CFLAGS_ADD += -Wno-implicit-int-conversion +content_filter.o_CFLAGS_ADD += -Wno-implicit-int-conversion +devfs_fdesc_support.o_CFLAGS_ADD += -Wno-implicit-int-conversion +devfs_vnops.o_CFLAGS_ADD += -Wno-implicit-int-conversion +dis_tables.o_CFLAGS_ADD += -Wno-implicit-int-conversion +dtrace.o_CFLAGS_ADD += -Wno-implicit-int-conversion +fasttrap.o_CFLAGS_ADD += -Wno-implicit-int-conversion +fasttrap_isa.o_CFLAGS_ADD += -Wno-implicit-int-conversion +fbt_arm.o_CFLAGS_ADD += -Wno-implicit-int-conversion +flow_agg.o_CFLAGS_ADD += -Wno-implicit-int-conversion +flowadv.o_CFLAGS_ADD += -Wno-implicit-int-conversion +gss_krb5_mech.o_CFLAGS_ADD += -Wno-implicit-int-conversion +if.o_CFLAGS_ADD += -Wno-implicit-int-conversion +if_bond.o_CFLAGS_ADD += -Wno-implicit-int-conversion +if_bridge.o_CFLAGS_ADD += -Wno-implicit-int-conversion +if_fake.o_CFLAGS_ADD += -Wno-implicit-int-conversion +if_ipsec.o_CFLAGS_ADD += -Wno-implicit-int-conversion +if_stf.o_CFLAGS_ADD += -Wno-implicit-int-conversion +if_utun.o_CFLAGS_ADD += -Wno-implicit-int-conversion +if_vlan.o_CFLAGS_ADD += -Wno-implicit-int-conversion +in6.o_CFLAGS_ADD += -Wno-implicit-int-conversion +in6_gif.o_CFLAGS_ADD += -Wno-implicit-int-conversion +in6_pcb.o_CFLAGS_ADD += -Wno-implicit-int-conversion +in_gif.o_CFLAGS_ADD += -Wno-implicit-int-conversion +ip_input.o_CFLAGS_ADD += -Wno-implicit-int-conversion +km.o_CFLAGS_ADD += -Wno-implicit-int-conversion +kpi_mbuf.o_CFLAGS_ADD += -Wno-implicit-int-conversion +necp.o_CFLAGS_ADD += -Wno-implicit-int-conversion +network_agent.o_CFLAGS_ADD += -Wno-implicit-int-conversion +nfs4_vnops.o_CFLAGS_ADD += -Wno-implicit-int-conversion +nfs_boot.o_CFLAGS_ADD += -Wno-implicit-int-conversion +nfs_gss.o_CFLAGS_ADD += -Wno-implicit-int-conversion +nfs_node.o_CFLAGS_ADD += -Wno-implicit-int-conversion +nfs_socket.o_CFLAGS_ADD += -Wno-implicit-int-conversion +nfs_subs.o_CFLAGS_ADD += -Wno-implicit-int-conversion +nfs_syscalls.o_CFLAGS_ADD += -Wno-implicit-int-conversion +nfs_vfsops.o_CFLAGS_ADD += -Wno-implicit-int-conversion +nfs_vnops.o_CFLAGS_ADD += -Wno-implicit-int-conversion +ntstat.o_CFLAGS_ADD += -Wno-implicit-int-conversion +null_vnops.o_CFLAGS_ADD += -Wno-implicit-int-conversion +pf.o_CFLAGS_ADD += -Wno-implicit-int-conversion +pf_norm.o_CFLAGS_ADD += -Wno-implicit-int-conversion +radix.o_CFLAGS_ADD += -Wno-implicit-int-conversion +route.o_CFLAGS_ADD += -Wno-implicit-int-conversion +shadow.o_CFLAGS_ADD += -Wno-implicit-int-conversion +skpc.o_CFLAGS_ADD += -Wno-implicit-int-conversion +spec_vnops.o_CFLAGS_ADD += -Wno-implicit-int-conversion +systemcalls.o_CFLAGS_ADD += -Wno-implicit-int-conversion +systrace.o_CFLAGS_ADD += -Wno-implicit-int-conversion +sysv_msg.o_CFLAGS_ADD += -Wno-implicit-int-conversion +sysv_sem.o_CFLAGS_ADD += -Wno-implicit-int-conversion +tcp_input.o_CFLAGS_ADD += -Wno-implicit-int-conversion +uipc_mbuf.o_CFLAGS_ADD += -Wno-implicit-int-conversion +uipc_socket.o_CFLAGS_ADD += -Wno-implicit-int-conversion +vfs_quota.o_CFLAGS_ADD += -Wno-implicit-int-conversion +vsock_domain.o_CFLAGS_ADD += -Wno-implicit-int-conversion +# -Wno-shorten-64-to-32 +audit_bsm.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +audit_bsm_token.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +audit_pipe.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +audit_session.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +audit_syscalls.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +audit_worker.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +bind_vnops.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +bpf.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +devfs_fdesc_support.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +devfs_tree.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +devfs_vnops.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +devtimer.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +dis_tables.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +doc_tombstone.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +dtrace.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +dtrace_ptss.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +esp_chachapoly.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +fasttrap.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +fasttrap_isa.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +gss_krb5_mech.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +if.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +if_bond.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +if_bridge.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +if_fake.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +if_gif.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +if_ipsec.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +if_stf.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +if_utun.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +if_vlan.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +in6.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +in6_rmx.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +in_rmx.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +ip_dummynet.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +ip_input.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +iptap.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +kern_fork.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +kern_malloc.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +kern_overrides.o_CFLAGS_ADD += -Wno-shorten-64-to-32 + +kpi_mbuf.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +krpc_subr.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +lockprof.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +mem.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +memdev.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nd6_rtr.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +necp.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +necp_client.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +network_agent.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs4_subs.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs4_vnops.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs_bio.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs_boot.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs_gss.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs_lock.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs_node.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs_serv.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs_socket.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs_subs.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs_syscalls.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs_vfsops.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +nfs_vnops.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +ntstat.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +null_vnops.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +pf.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +pf_norm.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +pf_pbuf.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +profile_prvd.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +profile_runtime.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +radix.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +route.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +sdt.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +shadow.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +skpc.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +spec_vnops.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +stubs.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +systemcalls.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +sysv_msg.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +sysv_sem.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +sysv_shm.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +tcp_input.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +uipc_mbuf.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +uipc_socket.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +unix_signal.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +ux_exception.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vfs_cluster.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vfs_quota.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vfs_subr.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vfs_support.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vn.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +xcpm.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +# -Wno-sign-compare +dest6.o_CFLAGS_ADD += -Wno-sign-compare +dhcp_options.o_CFLAGS_ADD += -Wno-sign-compare +esp_chachapoly.o_CFLAGS_ADD += -Wno-sign-compare +frag6.o_CFLAGS_ADD += -Wno-sign-compare +in6.o_CFLAGS_ADD += -Wno-sign-compare +in6_cksum.o_CFLAGS_ADD += -Wno-sign-compare +in6_proto.o_CFLAGS_ADD += -Wno-sign-compare +ip_dummynet.o_CFLAGS_ADD += -Wno-sign-compare +ip_input.o_CFLAGS_ADD += -Wno-sign-compare +radix.o_CFLAGS_ADD += -Wno-sign-compare +route.o_CFLAGS_ADD += -Wno-sign-compare +route6.o_CFLAGS_ADD += -Wno-sign-compare +scope6.o_CFLAGS_ADD += -Wno-sign-compare +tcp_input.o_CFLAGS_ADD += -Wno-sign-compare +uipc_mbuf.o_CFLAGS_ADD += -Wno-sign-compare +uipc_socket.o_CFLAGS_ADD += -Wno-sign-compare +# -Wno-sign-conversion +audit.o_CFLAGS_ADD += -Wno-sign-conversion +audit_arg.o_CFLAGS_ADD += -Wno-sign-conversion +audit_bsd.o_CFLAGS_ADD += -Wno-sign-conversion +audit_bsm.o_CFLAGS_ADD += -Wno-sign-conversion +audit_bsm_klib.o_CFLAGS_ADD += -Wno-sign-conversion +audit_bsm_token.o_CFLAGS_ADD += -Wno-sign-conversion +audit_mac.o_CFLAGS_ADD += -Wno-sign-conversion +audit_pipe.o_CFLAGS_ADD += -Wno-sign-conversion +audit_session.o_CFLAGS_ADD += -Wno-sign-conversion +audit_syscalls.o_CFLAGS_ADD += -Wno-sign-conversion +audit_worker.o_CFLAGS_ADD += -Wno-sign-conversion +bind_vfsops.o_CFLAGS_ADD += -Wno-sign-conversion +bind_vnops.o_CFLAGS_ADD += -Wno-sign-conversion +blist.o_CFLAGS_ADD += -Wno-sign-conversion +bpf.o_CFLAGS_ADD += -Wno-sign-conversion +bsd_init.o_CFLAGS_ADD += -Wno-sign-conversion +bsd_stubs.o_CFLAGS_ADD += -Wno-sign-conversion +cbrtf.o_CFLAGS_ADD += -Wno-sign-conversion +chunklist.o_CFLAGS_ADD += -Wno-sign-conversion +content_filter.o_CFLAGS_ADD += -Wno-sign-conversion +cpu_in_cksum_gen.o_CFLAGS_ADD += -Wno-sign-conversion +cuckoo_hashtable_test.o_CFLAGS_ADD += -Wno-sign-conversion +decmpfs.o_CFLAGS_ADD += -Wno-sign-conversion +dest6.o_CFLAGS_ADD += -Wno-sign-conversion +devfs_fdesc_support.o_CFLAGS_ADD += -Wno-sign-conversion +devfs_tree.o_CFLAGS_ADD += -Wno-sign-conversion +devfs_vfsops.o_CFLAGS_ADD += -Wno-sign-conversion +devfs_vnops.o_CFLAGS_ADD += -Wno-sign-conversion +devtimer.o_CFLAGS_ADD += -Wno-sign-conversion +dhcp_options.o_CFLAGS_ADD += -Wno-sign-conversion +dis_tables.o_CFLAGS_ADD += -Wno-sign-conversion +disassembler.o_CFLAGS_ADD += -Wno-sign-conversion +doc_tombstone.o_CFLAGS_ADD += -Wno-sign-conversion +dtrace.o_CFLAGS_ADD += -Wno-sign-conversion +dtrace_glue.o_CFLAGS_ADD += -Wno-sign-conversion +dtrace_isa.o_CFLAGS_ADD += -Wno-sign-conversion +dtrace_ptss.o_CFLAGS_ADD += -Wno-sign-conversion +dtrace_subr.o_CFLAGS_ADD += -Wno-sign-conversion +dtrace_subr_arm.o_CFLAGS_ADD += -Wno-sign-conversion +ether_if_module.o_CFLAGS_ADD += -Wno-sign-conversion +ether_inet6_pr_module.o_CFLAGS_ADD += -Wno-sign-conversion +ether_inet_pr_module.o_CFLAGS_ADD += -Wno-sign-conversion +fasttrap.o_CFLAGS_ADD += -Wno-sign-conversion +fasttrap_isa.o_CFLAGS_ADD += -Wno-sign-conversion +fbt.o_CFLAGS_ADD += -Wno-sign-conversion +fbt_arm.o_CFLAGS_ADD += -Wno-sign-conversion +fbt_x86.o_CFLAGS_ADD += -Wno-sign-conversion +fifo_vnops.o_CFLAGS_ADD += -Wno-sign-conversion +flowhash.o_CFLAGS_ADD += -Wno-sign-conversion +frag6.o_CFLAGS_ADD += -Wno-sign-conversion +gss_krb5_mech.o_CFLAGS_ADD += -Wno-sign-conversion +if.o_CFLAGS_ADD += -Wno-sign-conversion +if_bond.o_CFLAGS_ADD += -Wno-sign-conversion +if_bridge.o_CFLAGS_ADD += -Wno-sign-conversion +if_fake.o_CFLAGS_ADD += -Wno-sign-conversion +if_gif.o_CFLAGS_ADD += -Wno-sign-conversion +if_ipsec.o_CFLAGS_ADD += -Wno-sign-conversion +if_llatbl.o_CFLAGS_ADD += -Wno-sign-conversion +if_loop.o_CFLAGS_ADD += -Wno-sign-conversion +if_low_power_mode.o_CFLAGS_ADD += -Wno-sign-conversion +if_pflog.o_CFLAGS_ADD += -Wno-sign-conversion +if_stf.o_CFLAGS_ADD += -Wno-sign-conversion +if_utun.o_CFLAGS_ADD += -Wno-sign-conversion +if_vlan.o_CFLAGS_ADD += -Wno-sign-conversion +imageboot.o_CFLAGS_ADD += -Wno-sign-conversion +in6.o_CFLAGS_ADD += -Wno-sign-conversion +in6_cga.o_CFLAGS_ADD += -Wno-sign-conversion +in6_cksum.o_CFLAGS_ADD += -Wno-sign-conversion +in6_gif.o_CFLAGS_ADD += -Wno-sign-conversion +in6_pcb.o_CFLAGS_ADD += -Wno-sign-conversion +in6_proto.o_CFLAGS_ADD += -Wno-sign-conversion +in6_rmx.o_CFLAGS_ADD += -Wno-sign-conversion +in_gif.o_CFLAGS_ADD += -Wno-sign-conversion +in_rmx.o_CFLAGS_ADD += -Wno-sign-conversion +instr_size.o_CFLAGS_ADD += -Wno-sign-conversion +ip6_forward.o_CFLAGS_ADD += -Wno-sign-conversion +ip6_id.o_CFLAGS_ADD += -Wno-sign-conversion +ip_dummynet.o_CFLAGS_ADD += -Wno-sign-conversion +ip_input.o_CFLAGS_ADD += -Wno-sign-conversion +iptap.o_CFLAGS_ADD += -Wno-sign-conversion +kern_acct.o_CFLAGS_ADD += -Wno-sign-conversion +kern_authorization.o_CFLAGS_ADD += -Wno-sign-conversion +kern_clock.o_CFLAGS_ADD += -Wno-sign-conversion +kern_control.o_CFLAGS_ADD += -Wno-sign-conversion +kern_core.o_CFLAGS_ADD += -Wno-sign-conversion +kern_credential.o_CFLAGS_ADD += -Wno-sign-conversion +kern_cs.o_CFLAGS_ADD += -Wno-sign-conversion +kern_descrip.o_CFLAGS_ADD += -Wno-sign-conversion +kern_event.o_CFLAGS_ADD += -Wno-sign-conversion +kern_exit.o_CFLAGS_ADD += -Wno-sign-conversion +kern_fork.o_CFLAGS_ADD += -Wno-sign-conversion +kern_guarded.o_CFLAGS_ADD += -Wno-sign-conversion +kern_kpc.o_CFLAGS_ADD += -Wno-sign-conversion +kern_lockf.o_CFLAGS_ADD += -Wno-sign-conversion +kern_malloc.o_CFLAGS_ADD += -Wno-sign-conversion +kern_memorystatus.o_CFLAGS_ADD += -Wno-sign-conversion +kern_memorystatus_freeze.o_CFLAGS_ADD += -Wno-sign-conversion +kern_memorystatus_notify.o_CFLAGS_ADD += -Wno-sign-conversion +kern_mib.o_CFLAGS_ADD += -Wno-sign-conversion +kern_mman.o_CFLAGS_ADD += -Wno-sign-conversion +kern_ntptime.o_CFLAGS_ADD += -Wno-sign-conversion +kern_overrides.o_CFLAGS_ADD += -Wno-sign-conversion +kern_persona.o_CFLAGS_ADD += -Wno-sign-conversion +kern_physio.o_CFLAGS_ADD += -Wno-sign-conversion +kern_proc.o_CFLAGS_ADD += -Wno-sign-conversion +kern_prot.o_CFLAGS_ADD += -Wno-sign-conversion +kern_resource.o_CFLAGS_ADD += -Wno-sign-conversion +kern_shutdown.o_CFLAGS_ADD += -Wno-sign-conversion +kern_sig.o_CFLAGS_ADD += -Wno-sign-conversion +kern_subr.o_CFLAGS_ADD += -Wno-sign-conversion +kern_symfile.o_CFLAGS_ADD += -Wno-sign-conversion +kern_synch.o_CFLAGS_ADD += -Wno-sign-conversion +kern_time.o_CFLAGS_ADD += -Wno-sign-conversion +km.o_CFLAGS_ADD += -Wno-sign-conversion +kpi_mbuf.o_CFLAGS_ADD += -Wno-sign-conversion +kpi_protocol.o_CFLAGS_ADD += -Wno-sign-conversion +kpi_socketfilter.o_CFLAGS_ADD += -Wno-sign-conversion +kpi_vfs.o_CFLAGS_ADD += -Wno-sign-conversion +krpc_subr.o_CFLAGS_ADD += -Wno-sign-conversion +lockprof.o_CFLAGS_ADD += -Wno-sign-conversion +mach_loader.o_CFLAGS_ADD += -Wno-sign-conversion +mach_process.o_CFLAGS_ADD += -Wno-sign-conversion +mem.o_CFLAGS_ADD += -Wno-sign-conversion +memdev.o_CFLAGS_ADD += -Wno-sign-conversion +monotonic.o_CFLAGS_ADD += -Wno-sign-conversion +munge.o_CFLAGS_ADD += -Wno-sign-conversion +nd6_prproxy.o_CFLAGS_ADD += -Wno-sign-conversion +nd6_send.o_CFLAGS_ADD += -Wno-sign-conversion +necp.o_CFLAGS_ADD += -Wno-sign-conversion +necp_client.o_CFLAGS_ADD += -Wno-sign-conversion +netboot.o_CFLAGS_ADD += -Wno-sign-conversion +network_agent.o_CFLAGS_ADD += -Wno-sign-conversion +nfs4_subs.o_CFLAGS_ADD += -Wno-sign-conversion +nfs4_vnops.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_bio.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_boot.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_gss.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_lock.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_node.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_serv.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_socket.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_subs.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_syscalls.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_upcall.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_vfsops.o_CFLAGS_ADD += -Wno-sign-conversion +nfs_vnops.o_CFLAGS_ADD += -Wno-sign-conversion +ntstat.o_CFLAGS_ADD += -Wno-sign-conversion +null_vfsops.o_CFLAGS_ADD += -Wno-sign-conversion +null_vnops.o_CFLAGS_ADD += -Wno-sign-conversion +packet_mangler.o_CFLAGS_ADD += -Wno-sign-conversion +pf.o_CFLAGS_ADD += -Wno-sign-conversion +pf_norm.o_CFLAGS_ADD += -Wno-sign-conversion +pf_pbuf.o_CFLAGS_ADD += -Wno-sign-conversion +pf_table.o_CFLAGS_ADD += -Wno-sign-conversion +posix_sem.o_CFLAGS_ADD += -Wno-sign-conversion +posix_shm.o_CFLAGS_ADD += -Wno-sign-conversion +process_policy.o_CFLAGS_ADD += -Wno-sign-conversion +profile_prvd.o_CFLAGS_ADD += -Wno-sign-conversion +profile_runtime.o_CFLAGS_ADD += -Wno-sign-conversion +radix.o_CFLAGS_ADD += -Wno-sign-conversion +randomdev.o_CFLAGS_ADD += -Wno-sign-conversion +route.o_CFLAGS_ADD += -Wno-sign-conversion +route6.o_CFLAGS_ADD += -Wno-sign-conversion +routefs_ops.o_CFLAGS_ADD += -Wno-sign-conversion +scope6.o_CFLAGS_ADD += -Wno-sign-conversion +sdt.o_CFLAGS_ADD += -Wno-sign-conversion +sdt_arm.o_CFLAGS_ADD += -Wno-sign-conversion +sdt_x86.o_CFLAGS_ADD += -Wno-sign-conversion +shadow.o_CFLAGS_ADD += -Wno-sign-conversion +sixxlowpan.o_CFLAGS_ADD += -Wno-sign-conversion +spec_vnops.o_CFLAGS_ADD += -Wno-sign-conversion +subr_log.o_CFLAGS_ADD += -Wno-sign-conversion +subr_prf.o_CFLAGS_ADD += -Wno-sign-conversion +sys_coalition.o_CFLAGS_ADD += -Wno-sign-conversion +sys_generic.o_CFLAGS_ADD += -Wno-sign-conversion +sys_pipe.o_CFLAGS_ADD += -Wno-sign-conversion +sys_reason.o_CFLAGS_ADD += -Wno-sign-conversion +sys_ulock.o_CFLAGS_ADD += -Wno-sign-conversion +systemcalls.o_CFLAGS_ADD += -Wno-sign-conversion +systrace.o_CFLAGS_ADD += -Wno-sign-conversion +sysv_msg.o_CFLAGS_ADD += -Wno-sign-conversion +sysv_sem.o_CFLAGS_ADD += -Wno-sign-conversion +sysv_shm.o_CFLAGS_ADD += -Wno-sign-conversion +tcp_cc.o_CFLAGS_ADD += -Wno-sign-conversion +tcp_cubic.o_CFLAGS_ADD += -Wno-sign-conversion +tcp_input.o_CFLAGS_ADD += -Wno-sign-conversion +ubc_subr.o_CFLAGS_ADD += -Wno-sign-conversion +uipc_mbuf.o_CFLAGS_ADD += -Wno-sign-conversion +uipc_socket.o_CFLAGS_ADD += -Wno-sign-conversion +unix_signal.o_CFLAGS_ADD += -Wno-sign-conversion +unix_startup.o_CFLAGS_ADD += -Wno-sign-conversion +ux_exception.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_attrlist.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_bio.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_cache.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_cluster.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_cprotect.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_disk_conditioner.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_fsevents.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_fslog.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_init.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_lookup.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_quota.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_subr.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_syscalls.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_utfconv.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_vnops.o_CFLAGS_ADD += -Wno-sign-conversion +vfs_xattr.o_CFLAGS_ADD += -Wno-sign-conversion +vm_compressor_backing_file.o_CFLAGS_ADD += -Wno-sign-conversion +vm_unix.o_CFLAGS_ADD += -Wno-sign-conversion +vn.o_CFLAGS_ADD += -Wno-sign-conversion +vnode_pager.o_CFLAGS_ADD += -Wno-sign-conversion -dp_backing_file.o_CFLAGS_ADD += -Wshorten-64-to-32 -ubc_subr.o_CFLAGS_ADD += -Wshorten-64-to-32 -vnode_pager.o_CFLAGS_ADD += -Wshorten-64-to-32 -vm_unix.o_CFLAGS_ADD += -Wshorten-64-to-32 -pthread_synch.o_CFLAGS_ADD += -Wno-unused-parameter -Wno-missing-prototypes -pthread_support.o_CFLAGS_ADD += -Wno-unused-parameter -Wno-missing-prototypes - -# Objects that don't want -Wsign-compare -OBJS_NO_SIGN_COMPARE = \ - radix.o \ - route.o \ - rtsock.o \ - dhcp_options.o \ - igmp.o \ - in_cksum.o \ - ip_divert.o \ - ip_dummynet.o \ - ip_flow.o \ - ip_fw2.o \ - ip_fw2_compat.o \ - ip_icmp.o \ - ip_input.o \ - ip_output.o \ - raw_ip.o \ - tcp_cache.o \ - tcp_input.o \ - tcp_output.o \ - tcp_subr.o \ - tcp_usrreq.o \ - tcp_timer.o \ - udp_usrreq.o \ - ah_input.o \ - ah_core.o \ - ah_output.o \ - esp_core.o \ - esp_input.o \ - esp_output.o \ - esp_rijndael.o \ - esp_chachapoly.o \ - ipsec.o \ - dest6.o \ - frag6.o \ - icmp6.o \ - in6.o \ - in6_src.o \ - in6_cksum.o \ - ip6_fw.o \ - ip6_forward.o \ - in6_ifattach.o \ - ip6_input.o \ - ip6_output.o \ - in6_proto.o \ - mld6.o \ - nd6.o \ - nd6_nbr.o \ - nd6_prproxy.o \ - nd6_rtr.o \ - raw_ip6.o \ - route6.o \ - scope6.o \ - udp6_usrreq.o \ - key.o \ - keysock.o \ - keydb.o \ - des_setkey.o \ - uipc_mbuf.o \ - uipc_mbuf2.o \ - uipc_socket.o \ - uipc_socket2.o - -$(foreach file,$(OBJS_NO_SIGN_COMPARE),$(eval $(call add_perfile_cflags,$(file),-Wno-sign-compare))) - -# Objects that don't want -Wcast-align warning (8474835) -OBJS_NO_CAST_ALIGN = \ - audit_bsm_token.o \ - audit_pipe.o \ - audit_session.o \ - bsd_i386.o \ - decmpfs.o \ - dtrace.o \ - fasttrap.o \ - fasttrap_isa.o \ - fbt.o \ - fbt_arm.o \ - fbt_x86.o \ - if_bond.o \ - ip6_fw.o \ - ip_dummynet.o \ - ip_fw2.o \ - kern_credential.o \ - kern_descrip.o \ - kern_event.o \ - kern_exec.o \ - kern_lockf.o \ - kern_subr.o \ - km.o \ - lockstat.o \ - mach_loader.o \ - memdev.o \ - nfs4_subs.o \ - nfs4_vnops.o \ - nfs_boot.o \ - nfs_gss.o \ - nfs_serv.o \ - nfs_socket.o \ - nfs_srvcache.o \ - nfs_subs.o \ - nfs_syscalls.o \ - nfs_vfsops.o \ - nfs_vnops.o \ - proc_info.o \ - pthread_synch.o \ - qsort.o \ - sdt.o \ - shadow.o \ - spec_vnops.o \ - subr_log.o \ - subr_prof.o \ - sys_generic.o \ - sys_pipe.o \ - systemcalls.o \ - systrace.o \ - tcp_lro.o \ - tty.o \ - tty_compat.o \ - tty_dev.o \ - tty_ptmx.o \ - tty_pty.o \ - ubc_subr.o \ - uipc_usrreq.o \ - vfs_attrlist.o \ - vfs_fsevents.o \ - vfs_lookup.o \ - vfs_syscalls.o \ - vfs_utfconv.o \ - vfs_vnops.o \ - vfs_xattr.o \ - vn.o \ - munge.o \ - aes.o \ - aeskey.o \ - sdt_arm.o \ - uipc_mbuf.o \ - kern_guarded.o - -$(foreach file,$(OBJS_NO_CAST_ALIGN),$(eval $(call add_perfile_cflags,$(file),-Wno-cast-align))) - -# Relax -Waddress-of-packed-member on networking (28123676) -OBJS_NO_PACKED_ADDRESS = \ - ah_core.o \ - ah_input.o \ - dlil.o \ - esp_input.o \ - esp_output.o \ - frag6.o \ - icmp6.o \ - if_stf.o \ - in6_cksum.o \ - in6_gif.o \ - ip6_forward.o \ - ip6_input.o \ - ip6_output.o \ - iptap.o \ - ipsec.o \ - mld6.o \ - mptcp_opt.o \ - nat464_utils.o \ - nd6.o \ - nd6_nbr.o \ - nd6_prproxy.o \ - nd6_rtr.o \ - necp.o \ - packet_mangler.o \ - pf.o \ - pf_norm.o \ - pktap.o \ - raw_ip6.o \ - tcp_input.o \ - tcp_subr.o \ - udp6_output.o \ - udp6_usrreq.o \ - udp_usrreq.o \ - sixxlowpan.o - -$(foreach file,$(OBJS_NO_PACKED_ADDRESS),$(eval $(call add_perfile_cflags,$(file),-Wno-address-of-packed-member))) +# +# Machine-independent per-file flags +# # # This rule insures that the subr_prof.c does NOT get compiled with @@ -284,27 +601,27 @@ $(SOBJS): .SFLAGS $(_v)$(REPLACECONTENTS) $@ $(S_KCC) $(SFLAGS) $(INCFLAGS) $(COMPONENT).filelist: $(OBJS) - $(call makelog,$(ColorL)LDFILELIST$(Color0) $(ColorLF)$(COMPONENT)$(Color0)) + @$(LOG_LDFILELIST) $(COMPONENT) $(_v)for obj in ${OBJS}; do \ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \ done > $(COMPONENT).filelist MAKESYSCALLS = $(SRCROOT)/bsd/kern/makesyscalls.sh -init_sysent.c: $(SRCROOT)/bsd/kern/syscalls.master $(MAKESYSCALLS) - $(call makelog,[$(CMD_MC)] $(ColorH)GENERATING$(Color0) $(ColorLF)$@$(Color0) from $(ColorF)$<$(Color0)) +init_sysent.c: $(TARGET)/bsd.syscalls.master + @$(LOG_GENERATE) "$@$(Color0) from $(ColorF)$( /dev/null -syscalls.c: $(SRCROOT)/bsd/kern/syscalls.master $(MAKESYSCALLS) - $(call makelog,[$(CMD_MC)] $(ColorH)GENERATING$(Color0) $(ColorLF)$@$(Color0) from $(ColorF)$<$(Color0)) +syscalls.c: $(TARGET)/bsd.syscalls.master + @$(LOG_GENERATE) "$@$(Color0) from $(ColorF)$( /dev/null -audit_kevents.c: $(SRCROOT)/bsd/kern/syscalls.master $(MAKESYSCALLS) - $(call makelog,[$(CMD_MC)] $(ColorH)GENERATING$(Color0) $(ColorLF)$@$(Color0) from $(ColorF)$<$(Color0)) +audit_kevents.c: $(TARGET)/bsd.syscalls.master + @$(LOG_GENERATE) "$@$(Color0) from $(ColorF)$( /dev/null -systrace_args.c: $(SRCROOT)/bsd/kern/syscalls.master $(MAKESYSCALLS) - $(call makelog,[$(CMD_MC)] $(ColorH)GENERATING$(Color0) $(ColorLF)$@$(Color0) from $(ColorF)$<$(Color0)) +systrace_args.c: $(TARGET)/bsd.syscalls.master + @$(LOG_GENERATE) "$@$(Color0) from $(ColorF)$( /dev/null do_all: $(COMPONENT).filelist diff --git a/bsd/conf/Makefile.x86_64 b/bsd/conf/Makefile.x86_64 index c397a6e9b..fd1778557 100644 --- a/bsd/conf/Makefile.x86_64 +++ b/bsd/conf/Makefile.x86_64 @@ -2,10 +2,6 @@ #BEGIN Machine dependent Makefile fragment for x86_64 ###################################################################### -# Files to build with certain warnings turned off -dis_tables.o_CFLAGS_ADD += -Wno-cast-qual -fbt_x86.o_CFLAGS_ADD += -Wno-cast-qual - # Inline assembly doesn't interact well with LTO fbt_x86.o_CFLAGS_ADD += $(CFLAGS_NOLTO_FLAG) # Taking the address of labels doesn't work with LTO (9524055) diff --git a/bsd/conf/files b/bsd/conf/files index 99c4f51bc..258176a7e 100644 --- a/bsd/conf/files +++ b/bsd/conf/files @@ -9,8 +9,6 @@ OPTIONS/hw_ast optional hw_ast OPTIONS/hw_footprint optional hw_footprint OPTIONS/config_macf optional config_macf OPTIONS/config_macf_socket_subset optional config_macf_socket_subset -OPTIONS/config_macf_socket optional config_macf_socket -OPTIONS/config_macf_net optional config_macf_net OPTIONS/mach_assert optional mach_assert OPTIONS/mach_compat optional mach_compat OPTIONS/mach_counters optional mach_counters @@ -55,7 +53,6 @@ OPTIONS/config_ecc_logging optional config_ecc_logging # OPTIONS/networking optional networking OPTIONS/inet optional inet -OPTIONS/inet6 optional inet6 OPTIONS/ipv6send optional ipv6send OPTIONS/ether optional ether OPTIONS/vlan optional vlan @@ -157,7 +154,6 @@ bsd/libkern/crc16.c standard bsd/libkern/crc32.c standard bsd/libkern/random.c standard bsd/libkern/scanc.c standard -bsd/libkern/skpc.c standard bsd/libkern/strsep.c standard bsd/libkern/bcd.c standard bsd/libkern/memchr.c standard @@ -177,6 +173,7 @@ bsd/vfs/vfs_subr.c standard bsd/vfs/vfs_syscalls.c standard bsd/vfs/vfs_support.c standard bsd/vfs/vfs_utfconv.c standard +bsd/vfs/vfs_unicode.c standard bsd/vfs/vfs_vnops.c standard bsd/vfs/vfs_xattr.c standard bsd/vfs/vnode_if.c standard @@ -213,7 +210,7 @@ bsd/net/init.c optional sockets bsd/net/dlil.c optional networking bsd/net/ether_if_module.c optional ether bsd/net/ether_inet_pr_module.c optional ether inet -bsd/net/ether_inet6_pr_module.c optional ether inet6 +bsd/net/ether_inet6_pr_module.c optional ether inet bsd/net/if_loop.c optional loop bsd/net/if_mib.c optional networking bsd/net/if_vlan.c optional vlan @@ -275,8 +272,6 @@ bsd/net/classq/classq_util.c optional networking bsd/net/classq/classq_fq_codel.c optional networking bsd/net/pktsched/pktsched.c optional networking -bsd/net/pktsched/pktsched_qfq.c optional networking -bsd/net/pktsched/pktsched_tcq.c optional networking bsd/net/pktsched/pktsched_fq_codel.c optional networking bsd/net/pktsched/pktsched_netem.c optional networking @@ -311,7 +306,6 @@ bsd/netinet/tcp_cc.c optional inet bsd/netinet/tcp_newreno.c optional inet bsd/netinet/tcp_cubic.c optional inet bsd/netinet/cbrtf.c optional inet -bsd/netinet/tcp_lro.c optional inet bsd/netinet/tcp_ledbat.c optional inet bsd/netinet/tcp_log.c optional inet bsd/netinet/udp_usrreq.c optional inet @@ -336,34 +330,35 @@ bsd/netinet6/esp_output.c optional ipsec ipsec_esp bsd/netinet6/esp_rijndael.c optional ipsec ipsec_esp bsd/netinet6/esp_chachapoly.c optional ipsec ipsec_esp bsd/netinet6/ipsec.c optional ipsec -bsd/netinet6/dest6.c optional inet6 -bsd/netinet6/frag6.c optional inet6 -bsd/netinet6/icmp6.c optional inet6 -bsd/netinet6/in6.c optional inet6 -bsd/netinet6/in6_cga.c optional inet6 ipv6send -bsd/netinet6/in6_cksum.c optional inet6 -bsd/netinet6/in6_gif.c optional gif inet6 -bsd/netinet6/ip6_forward.c optional inet6 -bsd/netinet6/in6_ifattach.c optional inet6 -bsd/netinet6/ip6_input.c optional inet6 -bsd/netinet6/ip6_output.c optional inet6 -bsd/netinet6/in6_src.c optional inet6 -bsd/netinet6/in6_mcast.c optional inet6 -bsd/netinet6/in6_pcb.c optional inet6 -bsd/netinet6/in6_proto.c optional inet6 -bsd/netinet6/in6_rmx.c optional inet6 -bsd/netinet6/mld6.c optional inet6 -bsd/netinet6/nd6.c optional inet6 -bsd/netinet6/nd6_nbr.c optional inet6 -bsd/netinet6/nd6_prproxy.c optional inet6 -bsd/netinet6/nd6_rtr.c optional inet6 -bsd/netinet6/nd6_send.c optional inet6 ipv6send -bsd/netinet6/raw_ip6.c optional inet6 -bsd/netinet6/route6.c optional inet6 -bsd/netinet6/scope6.c optional inet6 -bsd/netinet6/udp6_output.c optional inet6 -bsd/netinet6/udp6_usrreq.c optional inet6 -bsd/netinet6/ip6_id.c optional inet6 +bsd/netinet6/dest6.c optional inet +bsd/netinet6/frag6.c optional inet +bsd/netinet6/icmp6.c optional inet +bsd/netinet6/in6.c optional inet +bsd/netinet6/in6_cga.c optional inet +bsd/netinet6/in6_cksum.c optional inet +bsd/netinet6/in6_gif.c optional gif inet +bsd/netinet6/ip6_forward.c optional inet +bsd/netinet6/in6_ifattach.c optional inet +bsd/netinet6/ip6_input.c optional inet +bsd/netinet6/ip6_output.c optional inet +bsd/netinet6/in6_src.c optional inet +bsd/netinet6/in6_mcast.c optional inet +bsd/netinet6/in6_pcb.c optional inet +bsd/netinet6/in6_proto.c optional inet +bsd/netinet6/in6_rmx.c optional inet +bsd/netinet6/mld6.c optional inet +bsd/netinet6/nd6.c optional inet +bsd/netinet6/nd6_nbr.c optional inet +bsd/netinet6/nd6_prproxy.c optional inet +bsd/netinet6/nd6_rtr.c optional inet +bsd/netinet6/nd6_rti.c optional inet +bsd/netinet6/nd6_send.c optional inet +bsd/netinet6/raw_ip6.c optional inet +bsd/netinet6/route6.c optional inet +bsd/netinet6/scope6.c optional inet +bsd/netinet6/udp6_output.c optional inet +bsd/netinet6/udp6_usrreq.c optional inet +bsd/netinet6/ip6_id.c optional inet bsd/net/sixxlowpan.c optional sixlowpan bsd/net/frame802154.c optional sixlowpan @@ -374,9 +369,9 @@ bsd/netkey/key_debug.c optional ipsec bsd/netkey/keysock.c optional ipsec bsd/netkey/keydb.c optional ipsec -bsd/net/multi_layer_pkt_log.c optional inet inet6 ipsec ipsec_esp +bsd/net/multi_layer_pkt_log.c optional inet inet ipsec ipsec_esp -bsd/crypto/rc4/rc4.c optional crypto +bsd/crypto/entropy/diag_entropy_sysctl.c standard #bsd/netpm/pm_aTT.c optional pm #bsd/netpm/pm_ams.c optional pm @@ -462,6 +457,7 @@ bsd/kern/subr_log.c standard bsd/kern/subr_prf.c standard bsd/kern/subr_sbuf.c standard bsd/kern/subr_xxx.c standard +bsd/kern/sys_eventlink.c standard bsd/kern/sys_generic.c standard bsd/kern/sys_pipe.c standard bsd/kern/sys_socket.c optional sockets @@ -488,6 +484,7 @@ bsd/kern/uipc_socket.c optional sockets bsd/kern/uipc_socket2.c optional sockets bsd/kern/uipc_syscalls.c optional sockets bsd/kern/uipc_usrreq.c optional sockets +bsd/kern/vsock_domain.c optional sockets bsd/kern/sysv_ipc.c standard bsd/kern/sysv_shm.c standard bsd/kern/sysv_sem.c standard @@ -534,8 +531,13 @@ bsd/miscfs/nullfs/null_subr.c optional nullfs bsd/miscfs/nullfs/null_vfsops.c optional nullfs bsd/miscfs/nullfs/null_vnops.c optional nullfs +bsd/miscfs/bindfs/bind_subr.c optional bindfs +bsd/miscfs/bindfs/bind_vfsops.c optional bindfs +bsd/miscfs/bindfs/bind_vnops.c optional bindfs + bsd/tests/bsd_tests.c optional config_xnupost bsd/tests/copyio_tests.c optional config_xnupost bsd/tests/pmap_test_sysctl.c optional config_xnupost +bsd/tests/ptrauth_data_tests_sysctl.c optional config_xnupost bsd/net/skywalk_stubs.c standard diff --git a/bsd/conf/param.c b/bsd/conf/param.c index 687822177..c2e5f75a4 100644 --- a/bsd/conf/param.c +++ b/bsd/conf/param.c @@ -70,8 +70,6 @@ #include #include #include -#include -#include #include #include #include @@ -82,7 +80,7 @@ struct timezone tz = { .tz_minuteswest = 0, .tz_dsttime = 0 }; -#if CONFIG_EMBEDDED +#if !defined(__x86_64__) #define NPROC 1000 /* Account for TOTAL_CORPSES_ALLOWED by making this slightly lower than we can. */ #define NPROC_PER_UID 950 #else @@ -95,8 +93,8 @@ struct timezone tz = { .tz_minuteswest = 0, .tz_dsttime = 0 }; int maxproc = NPROC; int maxprocperuid = NPROC_PER_UID; -#if CONFIG_EMBEDDED -int hard_maxproc = NPROC; /* hardcoded limit -- for embedded the number of processes is limited by the ASID space */ +#if !defined(__x86_64__) +int hard_maxproc = NPROC; /* hardcoded limit -- for ARM the number of processes is limited by the ASID space */ #else int hard_maxproc = HNPROC; /* hardcoded limit */ #endif @@ -121,14 +119,5 @@ int aio_max_requests = CONFIG_AIO_MAX; int aio_max_requests_per_process = CONFIG_AIO_PROCESS_MAX; int aio_worker_threads = CONFIG_AIO_THREAD_COUNT; -/* - * These have to be allocated somewhere; allocating - * them here forces loader errors if this file is omitted - * (if they've been externed everywhere else; hah!). - */ -struct callout *callout; -struct cblock *cfree; -struct cblock *cfreelist = NULL; -int cfreecount = 0; struct buf *buf_headers; struct domains_head domains = TAILQ_HEAD_INITIALIZER(domains); diff --git a/bsd/crypto/Makefile b/bsd/crypto/Makefile index 08f2ff7e5..2479c10a9 100644 --- a/bsd/crypto/Makefile +++ b/bsd/crypto/Makefile @@ -6,8 +6,8 @@ export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir include $(MakeInc_cmd) include $(MakeInc_def) -INSTINC_SUBDIRS = \ - rc4 \ +EXPINC_SUBDIRS = \ + entropy \ PRIVATE_DATAFILES = \ sha1.h \ diff --git a/bsd/crypto/rc4/Makefile b/bsd/crypto/entropy/Makefile similarity index 69% rename from bsd/crypto/rc4/Makefile rename to bsd/crypto/entropy/Makefile index 49f4d8129..a3ad2c884 100644 --- a/bsd/crypto/rc4/Makefile +++ b/bsd/crypto/entropy/Makefile @@ -6,14 +6,14 @@ export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir include $(MakeInc_cmd) include $(MakeInc_def) -PRIVATE_DATAFILES = \ - rc4.h +DATAFILES = \ + diag_entropy_sysctl.h -INSTALL_MI_DIR = crypto +INSTALL_MI_LIST = -EXPORT_MI_DIR = ${INSTALL_MI_DIR} +EXPORT_MI_LIST = ${DATAFILES} -INSTALL_KF_MI_LCL_LIST = ${PRIVATE_DATAFILES} +EXPORT_MI_DIR = crypto/entropy include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/bsd/crypto/entropy/diag_entropy_sysctl.c b/bsd/crypto/entropy/diag_entropy_sysctl.c new file mode 100644 index 000000000..af5329859 --- /dev/null +++ b/bsd/crypto/entropy/diag_entropy_sysctl.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + +extern entropy_data_t EntropyData; + +static int +sysctl_entropy_collect(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + if (!req->oldptr || req->oldlen > EntropyData.buffer_size) { + return EINVAL; + } + return SYSCTL_OUT(req, EntropyData.buffer, req->oldlen); +} + +SYSCTL_NODE(_kern, OID_AUTO, entropy, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, 0, NULL); +// Get current size of entropy buffer in bytes +SYSCTL_UINT(_kern_entropy, OID_AUTO, entropy_buffer_size, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, &EntropyData.buffer_size, 0, NULL); +// Collect contents from entropy buffer +SYSCTL_PROC(_kern_entropy, OID_AUTO, entropy_collect, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, NULL, 0, sysctl_entropy_collect, "-", NULL); + +void +register_entropy_sysctl(void) +{ + sysctl_register_oid(&sysctl__kern_entropy); + sysctl_register_oid(&sysctl__kern_entropy_entropy_buffer_size); + sysctl_register_oid(&sysctl__kern_entropy_entropy_collect); +} diff --git a/osfmk/kperf/kperf_arch.h b/bsd/crypto/entropy/diag_entropy_sysctl.h similarity index 82% rename from osfmk/kperf/kperf_arch.h rename to bsd/crypto/entropy/diag_entropy_sysctl.h index 6c84d89f5..c05650e79 100644 --- a/osfmk/kperf/kperf_arch.h +++ b/bsd/crypto/entropy/diag_entropy_sysctl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2016 Apple Inc. All rights reserved. + * Copyright (c) 2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -25,12 +25,10 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef KPERF_ARCH_H -#define KPERF_ARCH_H -struct kperf_timer; -bool kperf_mp_broadcast_other_running(struct kperf_timer *trigger); +#ifndef _SYS_CRYPTO_ENTROPY_DIAG_ENTROPY_SYSCTL_H_ +#define _SYS_CRYPTO_ENTROPY_DIAG_ENTROPY_SYSCTL_H_ -void kperf_signal_handler(unsigned int cpu_number); +void register_entropy_sysctl(void); -#endif /* KPERF_ARCH_H */ +#endif diff --git a/bsd/crypto/rc4/rc4.c b/bsd/crypto/rc4/rc4.c deleted file mode 100644 index da9559fea..000000000 --- a/bsd/crypto/rc4/rc4.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * rc4.c - * - * Copyright (c) 1996-2000 Whistle Communications, Inc. - * All rights reserved. - * - * Subject to the following obligations and disclaimer of warranty, use and - * redistribution of this software, in source or object code forms, with or - * without modifications are expressly permitted by Whistle Communications; - * provided, however, that: - * 1. Any and all reproductions of the source or object code must include the - * copyright notice above and the following disclaimer of warranties; and - * 2. No rights are granted, in any manner or form, to use Whistle - * Communications, Inc. trademarks, including the mark "WHISTLE - * COMMUNICATIONS" on advertising, endorsements, or otherwise except as - * such appears in the above copyright notice or in the software. - * - * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO - * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE, - * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. - * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY - * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS - * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE. - * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES - * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING - * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, - * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY - * OF SUCH DAMAGE. - * - * $FreeBSD: src/sys/crypto/rc4/rc4.c,v 1.2.2.1 2000/04/18 04:48:31 archie Exp $ - */ - -#include -#include - -static __inline void -swap_bytes(u_char *a, u_char *b) -{ - u_char temp; - - temp = *a; - *a = *b; - *b = temp; -} - -/* - * Initialize an RC4 state buffer using the supplied key, - * which can have arbitrary length. - */ -void -rc4_init(struct rc4_state *const state, const u_char *key, int keylen) -{ - u_char j; - int i; - - /* Initialize state with identity permutation */ - for (i = 0; i < 256; i++) { - state->perm[i] = (u_char)i; - } - state->index1 = 0; - state->index2 = 0; - - /* Randomize the permutation using key data */ - for (j = i = 0; i < 256; i++) { - j += state->perm[i] + key[i % keylen]; - swap_bytes(&state->perm[i], &state->perm[j]); - } -} - -/* - * Encrypt some data using the supplied RC4 state buffer. - * The input and output buffers may be the same buffer. - * Since RC4 is a stream cypher, this function is used - * for both encryption and decryption. - */ -void -rc4_crypt(struct rc4_state *const state, - const u_char *inbuf, u_char *outbuf, int buflen) -{ - int i; - u_char j; - - for (i = 0; i < buflen; i++) { - /* Update modification indicies */ - state->index1++; - state->index2 += state->perm[state->index1]; - - /* Modify permutation */ - swap_bytes(&state->perm[state->index1], - &state->perm[state->index2]); - - /* Encrypt/decrypt next byte */ - j = state->perm[state->index1] + state->perm[state->index2]; - outbuf[i] = inbuf[i] ^ state->perm[j]; - } -} diff --git a/bsd/crypto/rc4/rc4.h b/bsd/crypto/rc4/rc4.h deleted file mode 100644 index 40a1f4fae..000000000 --- a/bsd/crypto/rc4/rc4.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * rc4.h - * - * Copyright (c) 1996-2000 Whistle Communications, Inc. - * All rights reserved. - * - * Subject to the following obligations and disclaimer of warranty, use and - * redistribution of this software, in source or object code forms, with or - * without modifications are expressly permitted by Whistle Communications; - * provided, however, that: - * 1. Any and all reproductions of the source or object code must include the - * copyright notice above and the following disclaimer of warranties; and - * 2. No rights are granted, in any manner or form, to use Whistle - * Communications, Inc. trademarks, including the mark "WHISTLE - * COMMUNICATIONS" on advertising, endorsements, or otherwise except as - * such appears in the above copyright notice or in the software. - * - * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND - * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO - * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE, - * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. - * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY - * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS - * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE. - * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES - * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING - * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, - * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY - * OF SUCH DAMAGE. - * - * $FreeBSD: src/sys/crypto/rc4/rc4.h,v 1.2.2.1 2000/04/18 04:48:32 archie Exp $ - */ - -#ifndef _SYS_CRYPTO_RC4_RC4_H_ -#define _SYS_CRYPTO_RC4_RC4_H_ - -struct rc4_state { - u_char perm[256]; - u_char index1; - u_char index2; -}; - -extern void rc4_init(struct rc4_state *state, const u_char *key, int keylen); -extern void rc4_crypt(struct rc4_state *state, - const u_char *inbuf, u_char *outbuf, int buflen); - -#endif diff --git a/bsd/dev/Makefile b/bsd/dev/Makefile index 8f67f466b..076f0be99 100644 --- a/bsd/dev/Makefile +++ b/bsd/dev/Makefile @@ -10,5 +10,10 @@ EXPINC_SUBDIRS = random INSTTEXTFILES_SUBDIRS = dtrace +INSTTEXTFILES_SUBDIRS_X86_64 = dtrace +INSTTEXTFILES_SUBDIRS_X86_64H = dtrace +INSTTEXTFILES_SUBDIRS_ARM = dtrace +INSTTEXTFILES_SUBDIRS_ARM64 = dtrace + include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/bsd/dev/arm/conf.c b/bsd/dev/arm/conf.c index 8925f9070..d3d0e5466 100644 --- a/bsd/dev/arm/conf.c +++ b/bsd/dev/arm/conf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2017 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. */ /* * Copyright (c) 1997 by Apple Computer, Inc., all rights reserved @@ -236,7 +236,7 @@ isdisk(dev_t dev, int type) if (maj == NODEV) { break; } - /* FALL THROUGH */ + OS_FALLTHROUGH; case VBLK: if (bdevsw[maj].d_type == D_DISK) { return 1; diff --git a/bsd/dev/arm/dtrace_isa.c b/bsd/dev/arm/dtrace_isa.c index 1f8dbd2ef..c77f08a64 100644 --- a/bsd/dev/arm/dtrace_isa.c +++ b/bsd/dev/arm/dtrace_isa.c @@ -66,21 +66,13 @@ int dtrace_arm_condition_true(int condition, int cpsr); inline void dtrace_membar_producer(void) { -#if __ARM_SMP__ __asm__ volatile ("dmb ish" : : : "memory"); -#else - __asm__ volatile ("nop" : : : "memory"); -#endif } inline void dtrace_membar_consumer(void) { -#if __ARM_SMP__ __asm__ volatile ("dmb ish" : : : "memory"); -#else - __asm__ volatile ("nop" : : : "memory"); -#endif } /* @@ -98,7 +90,6 @@ dtrace_getipl(void) return ml_at_interrupt_context() ? 1 : 0; } -#if __ARM_SMP__ /* * MP coordination */ @@ -125,7 +116,6 @@ xcRemote(void *foo) thread_wakeup((event_t) &dt_xc_sync); } } -#endif /* * dtrace_xcall() is not called from probe context. @@ -133,7 +123,6 @@ xcRemote(void *foo) void dtrace_xcall(processorid_t cpu, dtrace_xcall_t f, void *arg) { -#if __ARM_SMP__ /* Only one dtrace_xcall in flight allowed */ lck_mtx_lock(&dt_xc_lock); @@ -147,14 +136,6 @@ dtrace_xcall(processorid_t cpu, dtrace_xcall_t f, void *arg) lck_mtx_unlock(&dt_xc_lock); return; -#else -#pragma unused(cpu) - /* On uniprocessor systems, the cpu should always be either ourselves or all */ - ASSERT(cpu == CPU->cpu_id || cpu == DTRACE_CPUALL); - - (*f)(arg); - return; -#endif } /* @@ -163,9 +144,7 @@ dtrace_xcall(processorid_t cpu, dtrace_xcall_t f, void *arg) void dtrace_isa_init(void) { -#if __ARM_SMP__ lck_mtx_init(&dt_xc_lock, dtrace_lck_grp, dtrace_lck_attr); -#endif return; } @@ -189,12 +168,21 @@ dtrace_getreg(struct regs * savearea, uint_t reg) return (uint64_t) ((unsigned int *) (&(regs->r)))[reg]; } +uint64_t +dtrace_getvmreg(uint_t ndx) +{ +#pragma unused(ndx) + DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); + return 0; +} + #define RETURN_OFFSET 4 static int dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc, user_addr_t sp) { + volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; int ret = 0; ASSERT(pcstack == NULL || pcstack_limit > 0); @@ -215,6 +203,12 @@ dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc, pc = dtrace_fuword32((sp + RETURN_OFFSET)); sp = dtrace_fuword32(sp); + + /* Truncate ustack if the iterator causes fault. */ + if (*flags & CPU_DTRACE_FAULT) { + *flags &= ~CPU_DTRACE_FAULT; + break; + } } return ret; @@ -425,18 +419,11 @@ dtrace_getufpstack(uint64_t * pcstack, uint64_t * fpstack, int pcstack_limit) sp = dtrace_fuword32(sp); } -#if 0 - /* XXX ARMTODO*/ - /* - * This is totally bogus: if we faulted, we're going to clear - * the fault and break. This is to deal with the apparently - * broken Java stacks on x86. - */ + /* Truncate ustack if the iterator causes fault. */ if (*flags & CPU_DTRACE_FAULT) { *flags &= ~CPU_DTRACE_FAULT; break; } -#endif } zero: diff --git a/bsd/dev/arm/fasttrap_isa.c b/bsd/dev/arm/fasttrap_isa.c index 08f831a01..ea4426151 100644 --- a/bsd/dev/arm/fasttrap_isa.c +++ b/bsd/dev/arm/fasttrap_isa.c @@ -296,13 +296,13 @@ fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_ else { retire_tp = 0; } -#ifndef CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) { dtrace_probe(dtrace_probeid_error, 0 /* state */, id->fti_probe->ftp_id, 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); #else if (FALSE) { -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ } else { dtrace_probe(id->fti_probe->ftp_id, pc - id->fti_probe->ftp_faddr, @@ -506,13 +506,13 @@ fasttrap_pid_probe(arm_saved_state_t *regs) for (id = tp->ftt_ids; id != NULL; id = id->fti_next) { fasttrap_probe_t *probe = id->fti_probe; -#ifndef CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) { dtrace_probe(dtrace_probeid_error, 0 /* state */, probe->ftp_id, 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); #else if (FALSE) { -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ } else { if (probe->ftp_prov->ftp_provider_type == DTFTP_PROVIDER_ONESHOT) { if (os_atomic_xchg(&probe->ftp_triggered, 1, relaxed)) { diff --git a/bsd/dev/arm/kern_machdep.c b/bsd/dev/arm/kern_machdep.c index 076f3abd8..9caed58b1 100644 --- a/bsd/dev/arm/kern_machdep.c +++ b/bsd/dev/arm/kern_machdep.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. */ /* * Copyright (C) 1990, NeXT, Inc. @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -35,7 +36,8 @@ cpu_subtype32() return 0; } } -#endif /* __arm64__*/ + +#endif /* __arm64__ */ /********************************************************************** * Routine: grade_binary() @@ -46,7 +48,7 @@ cpu_subtype32() * not acceptable. **********************************************************************/ int -grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype, bool allow_simulator_binary __unused) +grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype, cpu_subtype_t execfeatures __unused, bool allow_simulator_binary __unused) { #if __arm64__ cpu_subtype_t hostsubtype = @@ -69,6 +71,7 @@ grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype, bool allow_simulato break; } /* switch (hostsubtype) */ + break; #else /* __arm64__ */ @@ -166,22 +169,3 @@ v7: return 0; } - -boolean_t -pie_required(cpu_type_t exectype, cpu_subtype_t execsubtype) -{ - switch (exectype) { -#if __arm64__ - case CPU_TYPE_ARM64: - return TRUE; -#endif /* __arm64__ */ - - case CPU_TYPE_ARM: - switch (execsubtype) { - case CPU_SUBTYPE_ARM_V7K: - return TRUE; - } - break; - } - return FALSE; -} diff --git a/bsd/dev/arm/km.c b/bsd/dev/arm/km.c index 92a26ca84..e22bf3606 100644 --- a/bsd/dev/arm/km.c +++ b/bsd/dev/arm/km.c @@ -67,7 +67,7 @@ kminit(void) * cdevsw interface to km driver. */ int -kmopen(dev_t dev, int flag, __unused int devtype, proc_t pp) +kmopen(dev_t dev, __unused int flag, __unused int devtype, proc_t pp) { int unit; struct tty *tp; @@ -115,10 +115,6 @@ kmopen(dev_t dev, int flag, __unused int devtype, proc_t pp) tty_unlock(tp); /* XXX race window */ - if (flag & O_POPUP) { - PE_initialize_console(0, kPETextScreen); - } - bzero(&video, sizeof(video)); PE_current_console(&video); @@ -128,8 +124,13 @@ kmopen(dev_t dev, int flag, __unused int devtype, proc_t pp) wp->ws_col = 80; wp->ws_row = 24; } else if (video.v_width != 0 && video.v_height != 0) { - wp->ws_col = video.v_width / wp->ws_xpixel; - wp->ws_row = video.v_height / wp->ws_ypixel; + unsigned long ws_col = video.v_width / wp->ws_xpixel; + unsigned long ws_row = video.v_height / wp->ws_ypixel; + + assert((ws_col <= USHRT_MAX) && (ws_row <= USHRT_MAX)); + + wp->ws_col = (unsigned short)ws_col; + wp->ws_row = (unsigned short)ws_row; } else { wp->ws_col = 100; wp->ws_row = 36; diff --git a/bsd/dev/arm/munge.c b/bsd/dev/arm/munge.c index 094970e28..af050d7ee 100644 --- a/bsd/dev/arm/munge.c +++ b/bsd/dev/arm/munge.c @@ -371,6 +371,12 @@ munge_wlll(const void *regs, void *args) return error; } +int +munge_wlllww(const void *regs, void *args) +{ + return munge_wllll(regs, args); +} + int munge_wllll(const void *regs, void *args) { @@ -437,6 +443,26 @@ munge_wwwlww(const void *regs, void *args) } } +int +munge_wwwlwww(const void *regs, void *args) +{ + if (REGS_TO_STYLE(regs) == kDirect) { + memcpy(args, regs, 9 * sizeof(uint32_t)); + } else { + DECLARE_AND_CAST(regs, args, ss, uu_args); + + uu_args[0] = ss->r[1]; // w + uu_args[1] = ss->r[2]; // w + uu_args[2] = ss->r[3]; // w + uu_args[4] = ss->r[4]; // l + uu_args[5] = ss->r[5]; // + uu_args[6] = ss->r[6]; // w + uu_args[7] = ss->r[7]; // w + uu_args[8] = ss->r[8]; // w + } + return 0; +} + int munge_wwwl(const void *regs, void *args) { @@ -489,6 +515,22 @@ munge_wwwwlw(const void *regs, void *args) } } +int +munge_wwwwllww(const void *regs, void *args) +{ + if (REGS_TO_STYLE(regs) == kDirect) { + return marshal_no_pad(regs, args, 10); + } else { + DECLARE_AND_CAST(regs, args, ss, uu_args); + int error = munge_wwwwl(regs, args); // wwwwl + if (error) { + return error; + } + return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // lww + uu_args + 6, 4 * sizeof(uint32_t)); + } +} + int munge_wwwwwl(const void *regs, void *args) { diff --git a/bsd/dev/arm/stubs.c b/bsd/dev/arm/stubs.c index a76f54b60..7675bb322 100644 --- a/bsd/dev/arm/stubs.c +++ b/bsd/dev/arm/stubs.c @@ -45,7 +45,7 @@ copyoutstr(const void *from, user_addr_t to, size_t maxlen, size_t * lencopied) error = ENAMETOOLONG; } - len = min(maxlen, slen); + len = MIN(maxlen, slen); if (copyout(from, to, len)) { error = EFAULT; } diff --git a/bsd/dev/arm/sysctl.c b/bsd/dev/arm/sysctl.c index c5ae6100b..c0105a0ca 100644 --- a/bsd/dev/arm/sysctl.c +++ b/bsd/dev/arm/sysctl.c @@ -189,3 +189,17 @@ static SYSCTL_INT(_machdep, OID_AUTO, lck_mtx_adaptive_spin_mode, CTLFLAG_RW, &lck_mtx_adaptive_spin_mode, 0, "Enable adaptive spin behavior for kernel mutexes"); + +static int +virtual_address_size SYSCTL_HANDLER_ARGS +{ +#pragma unused(arg1, arg2, oidp) + int return_value = 32 - (TTBCR_N_SETUP & TTBCR_N_MASK); + return SYSCTL_OUT(req, &return_value, sizeof(return_value)); +} + +static +SYSCTL_PROC(_machdep, OID_AUTO, virtual_address_size, + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, virtual_address_size, "I", + "Number of addressable bits in userspace virtual addresses"); diff --git a/bsd/dev/arm/systemcalls.c b/bsd/dev/arm/systemcalls.c index 36deb9bff..5bd9c690c 100644 --- a/bsd/dev/arm/systemcalls.c +++ b/bsd/dev/arm/systemcalls.c @@ -37,42 +37,42 @@ #if CONFIG_DTRACE extern int32_t dtrace_systrace_syscall(struct proc *, void *, int *); extern void dtrace_systrace_syscall_return(unsigned short, int, int *); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ extern void unix_syscall(struct arm_saved_state * regs, thread_t thread_act, - struct uthread * uthread, struct proc * proc); - -static int arm_get_syscall_args(uthread_t, struct arm_saved_state *, struct sysent *); -static int arm_get_u32_syscall_args(uthread_t, arm_saved_state32_t *, struct sysent *); -static void arm_prepare_u32_syscall_return(struct sysent *, arm_saved_state_t *, uthread_t, int); -static void arm_prepare_syscall_return(struct sysent *, struct arm_saved_state *, uthread_t, int); -static int arm_get_syscall_number(struct arm_saved_state *); -static void arm_trace_unix_syscall(int, struct arm_saved_state *); -static void arm_clear_syscall_error(struct arm_saved_state *); -#define save_r0 r[0] -#define save_r1 r[1] -#define save_r2 r[2] -#define save_r3 r[3] -#define save_r4 r[4] -#define save_r5 r[5] -#define save_r6 r[6] -#define save_r7 r[7] -#define save_r8 r[8] -#define save_r9 r[9] -#define save_r10 r[10] -#define save_r11 r[11] -#define save_r12 r[12] -#define save_r13 r[13] + struct uthread * uthread, struct proc * proc); + +static int arm_get_syscall_args(uthread_t, struct arm_saved_state *, const struct sysent *); +static int arm_get_u32_syscall_args(uthread_t, arm_saved_state32_t *, const struct sysent *); +static void arm_prepare_u32_syscall_return(const struct sysent *, arm_saved_state_t *, uthread_t, int); +static void arm_prepare_syscall_return(const struct sysent *, struct arm_saved_state *, uthread_t, int); +static unsigned short arm_get_syscall_number(struct arm_saved_state *); +static void arm_trace_unix_syscall(int, struct arm_saved_state *); +static void arm_clear_syscall_error(struct arm_saved_state *); +#define save_r0 r[0] +#define save_r1 r[1] +#define save_r2 r[2] +#define save_r3 r[3] +#define save_r4 r[4] +#define save_r5 r[5] +#define save_r6 r[6] +#define save_r7 r[7] +#define save_r8 r[8] +#define save_r9 r[9] +#define save_r10 r[10] +#define save_r11 r[11] +#define save_r12 r[12] +#define save_r13 r[13] #if COUNT_SYSCALLS -__XNU_PRIVATE_EXTERN int do_count_syscalls = 1; -__XNU_PRIVATE_EXTERN int syscalls_log[SYS_MAXSYSCALL]; +__XNU_PRIVATE_EXTERN int do_count_syscalls = 1; +__XNU_PRIVATE_EXTERN int syscalls_log[SYS_MAXSYSCALL]; #endif #define code_is_kdebug_trace(code) (((code) == SYS_kdebug_trace) || \ - ((code) == SYS_kdebug_trace64) || \ - ((code) == SYS_kdebug_trace_string)) + ((code) == SYS_kdebug_trace64) || \ + ((code) == SYS_kdebug_trace_string)) /* * Function: unix_syscall @@ -86,15 +86,15 @@ __attribute__((noreturn)) #endif void unix_syscall( - struct arm_saved_state * state, - __unused thread_t thread_act, - struct uthread * uthread, - struct proc * proc) + struct arm_saved_state * state, + __unused thread_t thread_act, + struct uthread * uthread, + struct proc * proc) { - struct sysent *callp; + const struct sysent *callp; int error; unsigned short code, syscode; - pid_t pid; + pid_t pid; #if defined(__arm__) assert(is_saved_state32(state)); @@ -104,16 +104,17 @@ unix_syscall( code = arm_get_syscall_number(state); -#define unix_syscall_kprintf(x...) /* kprintf("unix_syscall: " x) */ +#define unix_syscall_kprintf(x...) /* kprintf("unix_syscall: " x) */ if (kdebug_enable && !code_is_kdebug_trace(code)) { arm_trace_unix_syscall(code, state); } - if ((uthread->uu_flag & UT_VFORK)) + if ((uthread->uu_flag & UT_VFORK)) { proc = current_proc(); + } - syscode = (code < nsysent) ? code : SYS_invalid; + syscode = (code < nsysent) ? code : SYS_invalid; callp = &sysent[syscode]; /* @@ -159,27 +160,28 @@ unix_syscall( uthread->uu_vpindex = 0; #endif unix_syscall_kprintf("code %d (pid %d - %s, tid %lld)\n", code, - pid, proc->p_comm, thread_tid(current_thread())); + pid, proc->p_comm, thread_tid(current_thread())); #if CONFIG_MACF if (__improbable(proc->syscall_filter_mask != NULL && !bitstr_test(proc->syscall_filter_mask, syscode))) { error = mac_proc_check_syscall_unix(proc, syscode); - if (error) + if (error) { goto skip_syscall; + } } #endif /* CONFIG_MACF */ AUDIT_SYSCALL_ENTER(code, proc, uthread); - error = (*(callp->sy_call)) (proc, &uthread->uu_arg[0], &(uthread->uu_rval[0])); + error = (*(callp->sy_call))(proc, &uthread->uu_arg[0], &(uthread->uu_rval[0])); AUDIT_SYSCALL_EXIT(code, proc, uthread, error); #if CONFIG_MACF skip_syscall: #endif /* CONFIG_MACF */ - unix_syscall_kprintf("code %d, error %d, results %x, %x (pid %d - %s, tid %lld)\n", code, error, - uthread->uu_rval[0], uthread->uu_rval[1], - pid, get_bsdtask_info(current_task()) ? proc->p_comm : "unknown" , thread_tid(current_thread())); + unix_syscall_kprintf("code %d, error %d, results %x, %x (pid %d - %s, tid %lld)\n", code, error, + uthread->uu_rval[0], uthread->uu_rval[1], + pid, get_bsdtask_info(current_task()) ? proc->p_comm : "unknown", thread_tid(current_thread())); #ifdef JOE_DEBUG if (uthread->uu_iocount) { @@ -212,7 +214,7 @@ skip_syscall: } if (kdebug_enable && !code_is_kdebug_trace(code)) { KDBG_RELEASE(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, - error, uthread->uu_rval[0], uthread->uu_rval[1], pid); + error, uthread->uu_rval[0], uthread->uu_rval[1], pid); } #if PROC_REF_DEBUG @@ -234,10 +236,10 @@ unix_syscall_return(int error) struct proc *proc; struct arm_saved_state *regs; unsigned short code; - struct sysent *callp; + const struct sysent *callp; -#define unix_syscall_return_kprintf(x...) /* kprintf("unix_syscall_retur - * n: " x) */ +#define unix_syscall_return_kprintf(x...) /* kprintf("unix_syscall_retur + * n: " x) */ thread_act = current_thread(); proc = current_proc(); @@ -248,8 +250,9 @@ unix_syscall_return(int error) callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code]; #if CONFIG_DTRACE - if (callp->sy_call == dtrace_systrace_syscall) + if (callp->sy_call == dtrace_systrace_syscall) { dtrace_systrace_syscall_return( code, error, uthread->uu_rval ); + } #endif /* CONFIG_DTRACE */ #if DEBUG || DEVELOPMENT kern_allocation_name_t @@ -287,7 +290,7 @@ unix_syscall_return(int error) } static void -arm_prepare_u32_syscall_return(struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error) +arm_prepare_u32_syscall_return(const struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error) { assert(is_saved_state32(regs)); @@ -302,7 +305,7 @@ arm_prepare_u32_syscall_return(struct sysent *callp, arm_saved_state_t *regs, ut /* set the carry bit to execute cerror routine */ ss32->cpsr |= PSR_CF; unix_syscall_return_kprintf("error: setting carry to trigger cerror call\n"); - } else { /* (not error) */ + } else { /* (not error) */ switch (callp->sy_return_type) { case _SYSCALL_RET_INT_T: case _SYSCALL_RET_UINT_T: @@ -325,11 +328,10 @@ arm_prepare_u32_syscall_return(struct sysent *callp, arm_saved_state_t *regs, ut } } /* else (error == EJUSTRETURN) { nothing } */ - } static void -arm_trace_u32_unix_syscall(int code, arm_saved_state32_t *regs) +arm_trace_u32_unix_syscall(int code, arm_saved_state32_t *regs) { bool indirect = (regs->save_r12 == 0); if (indirect) { @@ -342,40 +344,42 @@ arm_trace_u32_unix_syscall(int code, arm_saved_state32_t *regs) } static void -arm_clear_u32_syscall_error(arm_saved_state32_t *regs) +arm_clear_u32_syscall_error(arm_saved_state32_t *regs) { regs->cpsr &= ~PSR_CF; -} +} #if defined(__arm__) static int -arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, struct sysent *callp) +arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, const struct sysent *callp) { assert(is_saved_state32(state)); return arm_get_u32_syscall_args(uthread, saved_state32(state), callp); } #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) -/* +/* * For armv7k, the alignment constraints of the ABI mean we don't know how the userspace - * arguments are arranged without knowing the the prototype of the syscall. So we use mungers + * arguments are arranged without knowing the the prototype of the syscall. So we use mungers * to marshal the userspace data into the uu_arg. This also means we need the same convention * as mach syscalls. That means we use r8 to pass arguments in the BSD case as well. */ static int -arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp) +arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, const struct sysent *callp) { sy_munge_t *munger; /* This check is probably not very useful since these both come from build-time */ - if (callp->sy_arg_bytes > sizeof(uthread->uu_arg)) + if (callp->sy_arg_bytes > sizeof(uthread->uu_arg)) { return -1; + } /* get the munger and use it to marshal in the data from userspace */ munger = callp->sy_arg_munge32; - if (munger == NULL || (callp->sy_arg_bytes == 0)) + if (munger == NULL || (callp->sy_arg_bytes == 0)) { return 0; + } return munger(regs, uthread->uu_arg); } @@ -387,14 +391,14 @@ arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sy * arguments from a 32-bit userland out to 64-bit. */ static int -arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp) +arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, const struct sysent *callp) { int regparams; int flavor = (regs->save_r12 == 0 ? 1 : 0); - + regparams = (7 - flavor); /* Indirect value consumes a register */ - assert((unsigned) callp->sy_arg_bytes <= sizeof (uthread->uu_arg)); + assert((unsigned) callp->sy_arg_bytes <= sizeof(uthread->uu_arg)); if (callp->sy_arg_bytes <= (sizeof(uint32_t) * regparams)) { /* @@ -407,9 +411,9 @@ arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sy * the remainder from the stack (offset by the 7 regs therein). */ unix_syscall_kprintf("%s: spillover...\n", __FUNCTION__); - memcpy(&uthread->uu_arg[0] , ®s->r[flavor], regparams * sizeof(int)); - if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams, - (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) { + memcpy(&uthread->uu_arg[0], ®s->r[flavor], regparams * sizeof(int)); + if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams, + (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) { return -1; } } else { @@ -420,18 +424,18 @@ arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sy } #endif -static int +static unsigned short arm_get_syscall_number(struct arm_saved_state *regs) { if (regs->save_r12 != 0) { - return regs->save_r12; + return (unsigned short)regs->save_r12; } else { - return regs->save_r0; + return (unsigned short)regs->save_r0; } } static void -arm_prepare_syscall_return(struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error) +arm_prepare_syscall_return(const struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error) { assert(is_saved_state32(state)); arm_prepare_u32_syscall_return(callp, state, uthread, error); @@ -445,18 +449,18 @@ arm_trace_unix_syscall(int code, struct arm_saved_state *state) } static void -arm_clear_syscall_error(struct arm_saved_state * state) +arm_clear_syscall_error(struct arm_saved_state * state) { assert(is_saved_state32(state)); arm_clear_u32_syscall_error(saved_state32(state)); } #elif defined(__arm64__) -static void arm_prepare_u64_syscall_return(struct sysent *, arm_saved_state_t *, uthread_t, int); -static int arm_get_u64_syscall_args(uthread_t, arm_saved_state64_t *, struct sysent *); +static void arm_prepare_u64_syscall_return(const struct sysent *, arm_saved_state_t *, uthread_t, int); +static int arm_get_u64_syscall_args(uthread_t, arm_saved_state64_t *, const struct sysent *); static int -arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, struct sysent *callp) +arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, const struct sysent *callp) { if (is_saved_state32(state)) { return arm_get_u32_syscall_args(uthread, saved_state32(state), callp); @@ -466,23 +470,23 @@ arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, struct sy } /* - * 64-bit: all arguments in registers. We're willing to use x9, a temporary - * register per the ABI, to pass an argument to the kernel for one case, + * 64-bit: all arguments in registers. We're willing to use x9, a temporary + * register per the ABI, to pass an argument to the kernel for one case, * an indirect syscall with 8 arguments. No munging required, as all arguments * are in 64-bit wide registers already. */ static int -arm_get_u64_syscall_args(uthread_t uthread, arm_saved_state64_t *regs, struct sysent *callp) +arm_get_u64_syscall_args(uthread_t uthread, arm_saved_state64_t *regs, const struct sysent *callp) { int indirect_offset; - + #if CONFIG_REQUIRES_U32_MUNGING sy_munge_t *mungerp; #endif indirect_offset = (regs->x[ARM64_SYSCALL_CODE_REG_NUM] == 0) ? 1 : 0; - /* + /* * Everything should fit in registers for now. */ if (callp->sy_narg > (int)(sizeof(uthread->uu_arg) / sizeof(uthread->uu_arg[0]))) { @@ -517,13 +521,13 @@ arm_get_u64_syscall_args(uthread_t uthread, arm_saved_state64_t *regs, struct sy return 0; } /* - * When the kernel is running AArch64, munge arguments from 32-bit + * When the kernel is running AArch64, munge arguments from 32-bit * userland out to 64-bit. * * flavor == 1 indicates an indirect syscall. */ static int -arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sysent *callp) +arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, const struct sysent *callp) { int regparams; #if CONFIG_REQUIRES_U32_MUNGING @@ -535,7 +539,7 @@ arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sy regparams = (7 - flavor); /* Indirect value consumes a register */ - assert((unsigned) callp->sy_arg_bytes <= sizeof (uthread->uu_arg)); + assert((unsigned) callp->sy_arg_bytes <= sizeof(uthread->uu_arg)); if (callp->sy_arg_bytes <= (sizeof(uint32_t) * regparams)) { /* @@ -548,9 +552,9 @@ arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sy * the remainder from the stack (offset by the 7 regs therein). */ unix_syscall_kprintf("%s: spillover...\n", __FUNCTION__); - memcpy(&uthread->uu_arg[0] , ®s->r[flavor], regparams * sizeof(int)); - if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams, - (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) { + memcpy(&uthread->uu_arg[0], ®s->r[flavor], regparams * sizeof(int)); + if (copyin((user_addr_t)regs->sp + 7 * sizeof(int), (int *)&uthread->uu_arg[0] + regparams, + (callp->sy_arg_bytes - (sizeof(uint32_t) * regparams))) != 0) { return -1; } } else { @@ -566,30 +570,28 @@ arm_get_u32_syscall_args(uthread_t uthread, arm_saved_state32_t *regs, struct sy #endif return 0; - } -static int +static unsigned short arm_get_syscall_number(struct arm_saved_state *state) { if (is_saved_state32(state)) { if (saved_state32(state)->save_r12 != 0) { - return saved_state32(state)->save_r12; - } else { - return saved_state32(state)->save_r0; + return (unsigned short)saved_state32(state)->save_r12; + } else { + return (unsigned short)saved_state32(state)->save_r0; } } else { if (saved_state64(state)->x[ARM64_SYSCALL_CODE_REG_NUM] != 0) { - return saved_state64(state)->x[ARM64_SYSCALL_CODE_REG_NUM]; - } else { - return saved_state64(state)->x[0]; + return (unsigned short)saved_state64(state)->x[ARM64_SYSCALL_CODE_REG_NUM]; + } else { + return (unsigned short)saved_state64(state)->x[0]; } } - } static void -arm_prepare_syscall_return(struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error) +arm_prepare_syscall_return(const struct sysent *callp, struct arm_saved_state *state, uthread_t uthread, int error) { if (is_saved_state32(state)) { arm_prepare_u32_syscall_return(callp, state, uthread, error); @@ -599,7 +601,7 @@ arm_prepare_syscall_return(struct sysent *callp, struct arm_saved_state *state, } static void -arm_prepare_u64_syscall_return(struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error) +arm_prepare_u64_syscall_return(const struct sysent *callp, arm_saved_state_t *regs, uthread_t uthread, int error) { assert(is_saved_state64(regs)); @@ -611,14 +613,14 @@ arm_prepare_u64_syscall_return(struct sysent *callp, arm_saved_state_t *regs, ut if (error) { ss64->x[0] = error; ss64->x[1] = 0; - /* + /* * Set the carry bit to execute cerror routine. - * ARM64_TODO: should we have a separate definition? + * ARM64_TODO: should we have a separate definition? * The bits are the same. */ ss64->cpsr |= PSR_CF; unix_syscall_return_kprintf("error: setting carry to trigger cerror call\n"); - } else { /* (not error) */ + } else { /* (not error) */ switch (callp->sy_return_type) { case _SYSCALL_RET_INT_T: ss64->x[0] = uthread->uu_rval[0]; @@ -645,11 +647,9 @@ arm_prepare_u64_syscall_return(struct sysent *callp, arm_saved_state_t *regs, ut } } /* else (error == EJUSTRETURN) { nothing } */ - - } static void -arm_trace_u64_unix_syscall(int code, arm_saved_state64_t *regs) +arm_trace_u64_unix_syscall(int code, arm_saved_state64_t *regs) { bool indirect = (regs->x[ARM64_SYSCALL_CODE_REG_NUM] == 0); if (indirect) { @@ -674,15 +674,15 @@ arm_trace_unix_syscall(int code, struct arm_saved_state *state) static void arm_clear_u64_syscall_error(arm_saved_state64_t *regs) { - /* - * ARM64_TODO: should we have a separate definition? - * The bits are the same. + /* + * ARM64_TODO: should we have a separate definition? + * The bits are the same. */ regs->cpsr &= ~PSR_CF; } static void -arm_clear_syscall_error(struct arm_saved_state * state) +arm_clear_syscall_error(struct arm_saved_state * state) { if (is_saved_state32(state)) { arm_clear_u32_syscall_error(saved_state32(state)); @@ -691,6 +691,6 @@ arm_clear_syscall_error(struct arm_saved_state * state) } } -#else +#else #error Unknown architecture. #endif diff --git a/bsd/dev/arm/unix_signal.c b/bsd/dev/arm/unix_signal.c index 1e3bb03f9..8ad193ca6 100644 --- a/bsd/dev/arm/unix_signal.c +++ b/bsd/dev/arm/unix_signal.c @@ -44,9 +44,11 @@ typedef struct mcontext32 mcontext32_t; typedef struct mcontext64 mcontext64_t; /* Signal handler flavors supported */ -/* These defns should match the Libc implmn */ +/* These defns should match the libplatform implmn */ #define UC_TRAD 1 #define UC_FLAVOR 30 +#define UC_SET_ALT_STACK 0x40000000 +#define UC_RESET_ALT_STACK 0x80000000 /* The following are valid mcontext sizes */ #define UC_FLAVOR_SIZE32 ((ARM_THREAD_STATE_COUNT + ARM_EXCEPTION_STATE_COUNT + ARM_VFP_STATE_COUNT) * sizeof(int)) @@ -56,6 +58,8 @@ typedef struct mcontext64 mcontext64_t; #define C_64_REDZONE_LEN 128 #endif +#define TRUNC_TO_16_BYTES(addr) (addr & ~0xf) + static int sendsig_get_state32(thread_t th_act, arm_thread_state_t *ts, mcontext32_t *mcp) { @@ -243,7 +247,7 @@ sendsig_do_dtrace(uthread_t ut, user_siginfo_t *sinfo, int sig, user_addr_t catc /* XXX truncates faulting address to uintptr_t */ DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo), - void (*)(void), CAST_DOWN(sig_t, catcher)); + void (*)(void), CAST_DOWN(uintptr_t, catcher)); } #endif @@ -309,21 +313,29 @@ sendsig( } trampact = ps->ps_trampact[sig]; - oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK; + oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; /* * Get sundry thread state. */ if (proc_is64bit_data(p)) { #ifdef __arm64__ - if (sendsig_get_state64(th_act, &ts.ts64.ss, &user_frame.uf64.mctx) != 0) { + int ret = 0; + if ((ret = sendsig_get_state64(th_act, &ts.ts64.ss, &user_frame.uf64.mctx)) != 0) { +#if DEVELOPMENT || DEBUG + printf("process [%s][%d] sendsig_get_state64 failed with ret %d, expected 0", p->p_comm, p->p_pid, ret); +#endif goto bad2; } #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { - if (sendsig_get_state32(th_act, &ts.ts32.ss, &user_frame.uf32.mctx) != 0) { + int ret = 0; + if ((ret = sendsig_get_state32(th_act, &ts.ts32.ss, &user_frame.uf32.mctx)) != 0) { +#if DEVELOPMENT || DEBUG + printf("process [%s][%d] sendsig_get_state32 failed with ret %d, expected 0", p->p_comm, p->p_pid, ret); +#endif goto bad2; } } @@ -331,12 +343,13 @@ sendsig( /* * Figure out where our new stack lives. */ - if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack && + if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && (ps->ps_sigonstack & sigmask(sig))) { - sp = ps->ps_sigstk.ss_sp; - sp += ps->ps_sigstk.ss_size; - stack_size = ps->ps_sigstk.ss_size; - ps->ps_sigstk.ss_flags |= SA_ONSTACK; + sp = ut->uu_sigstk.ss_sp; + stack_size = ut->uu_sigstk.ss_size; + + sp += stack_size; + ut->uu_sigstk.ss_flags |= SA_ONSTACK; } else { /* * Get stack pointer, and allocate enough space @@ -345,17 +358,27 @@ sendsig( if (proc_is64bit_data(p)) { #if defined(__arm64__) sp = CAST_USER_ADDR_T(ts.ts64.ss.sp); - sp = (sp - sizeof(user_frame.uf64) - C_64_REDZONE_LEN) & ~0xf; /* Make sure to align to 16 bytes and respect red zone */ #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { sp = CAST_USER_ADDR_T(ts.ts32.ss.sp); - sp -= sizeof(user_frame.uf32); + } + } + + /* Make sure to move stack pointer down for room for metadata */ + if (proc_is64bit_data(p)) { +#if defined(__arm64__) + sp = (sp - sizeof(user_frame.uf64) - C_64_REDZONE_LEN); + sp = TRUNC_TO_16_BYTES(sp); +#else + panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); +#endif + } else { + sp -= sizeof(user_frame.uf32); #if defined(__arm__) && (__BIGGEST_ALIGNMENT__ > 4) - sp &= ~0xf; /* Make sure to align to 16 bytes for armv7k */ + sp = TRUNC_TO_16_BYTES(sp); /* Only for armv7k */ #endif - } } proc_unlock(p); @@ -550,13 +573,20 @@ sendsig( assert(kr == KERN_SUCCESS); token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token; - if (copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64)) != 0) { + int ret = 0; + if ((ret = copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64))) != 0) { +#if DEVELOPMENT || DEBUG + printf("process [%s][%d] copyout of user_frame to (sp, size) = (0x%llx, %zu) failed with ret %d, expected 0\n", p->p_comm, p->p_pid, sp, sizeof(user_frame.uf64), ret); +#endif goto bad; } - if (sendsig_set_thread_state64(&ts.ts64.ss, + if ((kr = sendsig_set_thread_state64(&ts.ts64.ss, catcher, infostyle, sig, (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo, - (user64_addr_t)p_uctx, token, trampact, sp, th_act) != KERN_SUCCESS) { + (user64_addr_t)p_uctx, token, trampact, sp, th_act)) != KERN_SUCCESS) { +#if DEVELOPMENT || DEBUG + printf("process [%s][%d] sendsig_set_thread_state64 failed with kr %d, expected 0", p->p_comm, p->p_pid, kr); +#endif goto bad; } @@ -758,6 +788,17 @@ sigreturn( /* see osfmk/kern/restartable.c */ act_set_ast_reset_pcs(th_act); + /* + * If we are being asked to change the altstack flag on the thread, we + * just set/reset it and return (the uap->uctx is not used). + */ + if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) { + ut->uu_sigstk.ss_flags |= SA_ONSTACK; + return 0; + } else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) { + ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; + return 0; + } if (proc_is64bit_data(p)) { #if defined(__arm64__) @@ -782,9 +823,9 @@ sigreturn( } if ((onstack & 01)) { - p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; + ut->uu_sigstk.ss_flags |= SA_ONSTACK; } else { - p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; + ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; } ut->uu_sigmask = sigmask & ~sigcantmask; diff --git a/bsd/dev/arm64/conf.c b/bsd/dev/arm64/conf.c index 06062ce28..270be780a 100644 --- a/bsd/dev/arm64/conf.c +++ b/bsd/dev/arm64/conf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2017 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. */ /* * Copyright (c) 1997 by Apple Computer, Inc., all rights reserved @@ -236,7 +236,7 @@ isdisk(dev_t dev, int type) if (maj == NODEV) { break; } - /* FALL THROUGH */ + OS_FALLTHROUGH; case VBLK: if (bdevsw[maj].d_type == D_DISK) { return 1; diff --git a/bsd/dev/arm64/disassembler.c b/bsd/dev/arm64/disassembler.c index c9cb73582..5bd343913 100644 --- a/bsd/dev/arm64/disassembler.c +++ b/bsd/dev/arm64/disassembler.c @@ -6,12 +6,12 @@ #include -int dtrace_decode_arm64(uint32_t instr); +uint8_t dtrace_decode_arm64(uint32_t instr); struct arm64_decode_entry { uint32_t mask; uint32_t value; - uint32_t type; + uint8_t type; }; struct arm64_decode_entry arm64_decode_table[] = { @@ -43,7 +43,7 @@ struct arm64_decode_entry arm64_decode_table[] = { #define NUM_DECODE_ENTRIES (sizeof(arm64_decode_table) / sizeof(struct arm64_decode_entry)) -int +uint8_t dtrace_decode_arm64(uint32_t instr) { unsigned i; diff --git a/bsd/dev/arm64/dtrace_isa.c b/bsd/dev/arm64/dtrace_isa.c index 56d1729f5..494bb7fad 100644 --- a/bsd/dev/arm64/dtrace_isa.c +++ b/bsd/dev/arm64/dtrace_isa.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include /* for thread_wakeup() */ #include @@ -70,21 +71,13 @@ struct frame { inline void dtrace_membar_producer(void) { -#if __ARM_SMP__ __asm__ volatile ("dmb ish" : : : "memory"); -#else - __asm__ volatile ("nop" : : : "memory"); -#endif } inline void dtrace_membar_consumer(void) { -#if __ARM_SMP__ __asm__ volatile ("dmb ish" : : : "memory"); -#else - __asm__ volatile ("nop" : : : "memory"); -#endif } /* @@ -102,7 +95,6 @@ dtrace_getipl(void) return ml_at_interrupt_context() ? 1 : 0; } -#if __ARM_SMP__ /* * MP coordination */ @@ -129,7 +121,6 @@ xcRemote(void *foo) thread_wakeup((event_t) &dt_xc_sync); } } -#endif /* * dtrace_xcall() is not called from probe context. @@ -137,7 +128,6 @@ xcRemote(void *foo) void dtrace_xcall(processorid_t cpu, dtrace_xcall_t f, void *arg) { -#if __ARM_SMP__ /* Only one dtrace_xcall in flight allowed */ lck_mtx_lock(&dt_xc_lock); @@ -151,14 +141,6 @@ dtrace_xcall(processorid_t cpu, dtrace_xcall_t f, void *arg) lck_mtx_unlock(&dt_xc_lock); return; -#else -#pragma unused(cpu) - /* On uniprocessor systems, the cpu should always be either ourselves or all */ - ASSERT(cpu == CPU->cpu_id || cpu == DTRACE_CPUALL); - - (*f)(arg); - return; -#endif } /* @@ -202,12 +184,21 @@ dtrace_getreg(struct regs * savearea, uint_t reg) return (uint64_t)get_saved_state_reg(regs, reg); } +uint64_t +dtrace_getvmreg(uint_t ndx) +{ +#pragma unused(ndx) + DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); + return 0; +} + #define RETURN_OFFSET64 8 static int dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc, user_addr_t sp) { + volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; int ret = 0; ASSERT(pcstack == NULL || pcstack_limit > 0); @@ -228,6 +219,12 @@ dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc, pc = dtrace_fuword64((sp + RETURN_OFFSET64)); sp = dtrace_fuword64(sp); + + /* Truncate ustack if the iterator causes fault. */ + if (*flags & CPU_DTRACE_FAULT) { + *flags &= ~CPU_DTRACE_FAULT; + break; + } } return ret; @@ -236,9 +233,9 @@ dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc, void dtrace_getupcstack(uint64_t * pcstack, int pcstack_limit) { - thread_t thread = current_thread(); - savearea_t *regs; - user_addr_t pc, sp, fp; + thread_t thread = current_thread(); + savearea_t *regs; + user_addr_t pc, sp, fp; volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; int n; @@ -271,7 +268,10 @@ dtrace_getupcstack(uint64_t * pcstack, int pcstack_limit) pc = get_saved_state_pc(regs); sp = get_saved_state_sp(regs); - fp = get_saved_state_fp(regs); + + { + fp = get_saved_state_fp(regs); + } if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { *pcstack++ = (uint64_t) pc; @@ -439,18 +439,11 @@ dtrace_getufpstack(uint64_t * pcstack, uint64_t * fpstack, int pcstack_limit) sp = dtrace_fuword64(sp); } -#if 0 - /* XXX ARMTODO*/ - /* - * This is totally bogus: if we faulted, we're going to clear - * the fault and break. This is to deal with the apparently - * broken Java stacks on x86. - */ + /* Truncate ustack if the iterator causes fault. */ if (*flags & CPU_DTRACE_FAULT) { *flags &= ~CPU_DTRACE_FAULT; break; } -#endif } zero: diff --git a/bsd/dev/arm64/fasttrap_isa.c b/bsd/dev/arm64/fasttrap_isa.c index b547aa992..e7f58c5e4 100644 --- a/bsd/dev/arm64/fasttrap_isa.c +++ b/bsd/dev/arm64/fasttrap_isa.c @@ -45,7 +45,7 @@ extern dtrace_id_t dtrace_probeid_error; /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */ #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */ -extern int dtrace_decode_arm64(uint32_t instr); +extern uint8_t dtrace_decode_arm64(uint32_t instr); #define IS_ARM64_NOP(x) ((x) == 0xD503201F) /* Marker for is-enabled probes */ @@ -236,13 +236,13 @@ fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_ retire_tp = 0; } -#ifndef CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) { dtrace_probe(dtrace_probeid_error, 0 /* state */, id->fti_probe->ftp_id, 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); #else if (FALSE) { -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ } else { dtrace_probe(probe->ftp_id, pc - id->fti_probe->ftp_faddr, @@ -933,11 +933,10 @@ fasttrap_pid_probe(arm_saved_state_t *state) dtrace_icookie_t cookie; uint_t is_enabled = 0; int was_simulated, retire_tp = 1; - int is_64_bit = is_saved_state64(state); uint64_t pc = get_saved_state_pc(state); - assert(is_64_bit); + assert(is_saved_state64(state)); uthread_t uthread = (uthread_t) get_bsdthread_info(current_thread()); @@ -1018,13 +1017,13 @@ fasttrap_pid_probe(arm_saved_state_t *state) for (id = tp->ftt_ids; id != NULL; id = id->fti_next) { fasttrap_probe_t *probe = id->fti_probe; -#ifndef CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) { dtrace_probe(dtrace_probeid_error, 0 /* state */, probe->ftp_id, 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); #else if (FALSE) { -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ } else { if (probe->ftp_prov->ftp_provider_type == DTFTP_PROVIDER_ONESHOT) { if (os_atomic_xchg(&probe->ftp_triggered, 1, relaxed)) { diff --git a/bsd/dev/arm64/fbt_arm.c b/bsd/dev/arm64/fbt_arm.c index 4cff0d3f6..15b2a33cf 100644 --- a/bsd/dev/arm64/fbt_arm.c +++ b/bsd/dev/arm64/fbt_arm.c @@ -119,77 +119,55 @@ fbt_invop(uintptr_t addr, uintptr_t * stack, uintptr_t rval) CPU->cpu_dtrace_invop_underway = 1; /* Race not possible on * this per-cpu state */ + /* + * Stack looks like this: + * + * [Higher addresses] + * + * Frame of caller + * Extra args for callee + * ------------------------ + * fbt entry probe: + * Frame from traced function: + * fbt return probe: + * Missing as the return probe has already popped the frame in the callee and + * traps with LR set to the return address in caller. + * ------------------------ + * arm_context_t + * ------------------------ + * Frame from trap handler: + * The traced function has either never pushed the frame + * or already popped it. So there is no frame in the + * backtrace pointing to the frame on the stack containing + * the LR in the caller. + * ------------------------ + * | + * | + * | stack grows this way + * | + * | + * v + * [Lower addresses] + * + * cpu_dtrace_caller compensates for fact that the LR is not stored on stack as explained + * above. When walking the stack, when we reach the frame where we extract a PC in the + * patched function, we put the cpu_dtrace_caller in the backtrace instead. The next + * frame we extract will be in the caller's caller, so we output a backtrace starting + * at the caller and going sequentially up the stack. + */ + arm_saved_state_t *regs = (arm_saved_state_t *)(&((arm_context_t *)stack)->ss); + + CPU->cpu_dtrace_caller = get_saved_state_lr(regs); + + /* When fbt_roffset is non-zero, we know we are handling a return probe point. */ if (fbt->fbtp_roffset == 0) { - /* - * Stack looks like this: - * - * [Higher addresses] - * - * Frame of caller - * Extra args for callee - * ------------------------ - * Frame from traced function: - * ------------------------ - * arm_context_t - * ------------------------ - * Frame from trap handler: - * The traced function never got to mov fp, sp, - * so there is no frame in the backtrace pointing - * to the frame on the stack containing the LR in the - * caller. - * ------------------------ - * | - * | - * | stack grows this way - * | - * | - * v - * [Lower addresses] - */ - - arm_saved_state_t *regs = (arm_saved_state_t *)(&((arm_context_t *)stack)->ss); - - /* - * cpu_dtrace_caller compensates for fact that the traced function never got to update its fp. - * When walking the stack, when we reach the frame where we extract a PC in the patched - * function, we put the cpu_dtrace_caller in the backtrace instead. The next frame we extract - * will be in the caller's caller, so we output a backtrace starting at the caller and going - * sequentially up the stack. - */ - CPU->cpu_dtrace_caller = get_saved_state_lr(regs); dtrace_probe(fbt->fbtp_id, get_saved_state_reg(regs, 0), get_saved_state_reg(regs, 1), get_saved_state_reg(regs, 2), get_saved_state_reg(regs, 3), get_saved_state_reg(regs, 4)); - CPU->cpu_dtrace_caller = 0; } else { - /* - * When fbtp_roffset is non-zero, we know we are handling a return probe point. - * - * - * Stack looks like this, as we've already popped the frame in the traced callee, and - * we trap with lr set to the return address in the caller. - * [Higher addresses] - * - * Frame of caller - * Extra args for callee - * ------------------------ - * arm_context_t - * ------------------------ - * Frame from trap handler: - * ------------------------ - * | - * | - * | stack grows this way - * | - * | - * v - * [Lower addresses] - */ - arm_saved_state_t *regs = (arm_saved_state_t *)(&((arm_context_t *)stack)->ss); - - CPU->cpu_dtrace_caller = get_saved_state_lr(regs); dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset, rval, 0, 0, 0); - CPU->cpu_dtrace_caller = 0; } + + CPU->cpu_dtrace_caller = 0; CPU->cpu_dtrace_invop_underway = 0; } @@ -221,7 +199,8 @@ fbt_perfCallback( if (FBT_EXCEPTION_CODE == trapno && !IS_USER_TRAP(regs)) { boolean_t oldlevel = 0; machine_inst_t emul = 0; - uint64_t sp, lr, imm; + uint64_t sp, lr; + uint32_t imm; oldlevel = ml_set_interrupts_enabled(FALSE); @@ -514,7 +493,7 @@ again: newfbt->fbtp_ctl = ctl; newfbt->fbtp_loadcnt = ctl->mod_loadcnt; - ASSERT(FBT_IS_ARM64_RET(theInstr)); + ASSERT(FBT_IS_ARM64_RET(theInstr) || FBT_IS_ARM64_B_INSTR(theInstr)); newfbt->fbtp_rval = DTRACE_INVOP_RET; newfbt->fbtp_roffset = (uintptr_t) ((uint8_t*) instr - (uint8_t *)symbolStart); newfbt->fbtp_savedval = theInstr; diff --git a/bsd/dev/arm64/sysctl.c b/bsd/dev/arm64/sysctl.c index fd7055cb7..ef46d1af7 100644 --- a/bsd/dev/arm64/sysctl.c +++ b/bsd/dev/arm64/sysctl.c @@ -12,6 +12,10 @@ #include #include +#if HYPERVISOR +#include +#endif + extern uint64_t wake_abstime; extern int lck_mtx_adaptive_spin_mode; @@ -183,6 +187,8 @@ make_brand_string SYSCTL_HANDLER_ARGS impl = "ARM architecture"; break; } + + char buf[80]; snprintf(buf, sizeof(buf), "%s processor", impl); return SYSCTL_OUT(req, buf, strlen(buf) + 1); @@ -192,11 +198,26 @@ SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, make_brand_string, "A", "CPU brand string"); + static SYSCTL_INT(_machdep, OID_AUTO, lck_mtx_adaptive_spin_mode, CTLFLAG_RW, &lck_mtx_adaptive_spin_mode, 0, "Enable adaptive spin behavior for kernel mutexes"); +static int +virtual_address_size SYSCTL_HANDLER_ARGS +{ +#pragma unused(arg1, arg2, oidp) + int return_value = 64 - T0SZ_BOOT; + return SYSCTL_OUT(req, &return_value, sizeof(return_value)); +} + +static +SYSCTL_PROC(_machdep, OID_AUTO, virtual_address_size, + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, virtual_address_size, "I", + "Number of addressable bits in userspace virtual addresses"); + #if DEVELOPMENT || DEBUG extern uint64_t TLockTimeOut; @@ -204,118 +225,87 @@ SYSCTL_QUAD(_machdep, OID_AUTO, tlto, CTLFLAG_RW | CTLFLAG_LOCKED, &TLockTimeOut, "Ticket spinlock timeout (MATUs): use with care"); -static int -sysctl_sysreg_vbar_el1 SYSCTL_HANDLER_ARGS -{ -#pragma unused(arg1, arg2, oidp) - uint64_t return_value = __builtin_arm_rsr64("VBAR_EL1"); - return SYSCTL_OUT(req, &return_value, sizeof(return_value)); -} - /* - * machdep.cpu.sysreg_vbar_el1 - * - * ARM64: Vector Base Address Register. - * Read from the current CPU's system registers. + * macro to generate a sysctl machdep.cpu.sysreg_* for a given system register + * using __builtin_arm_rsr64. */ -SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_vbar_el1, - CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_sysreg_vbar_el1, "Q", - "VBAR_EL1 register on the current CPU"); - -static int -sysctl_sysreg_mair_el1 SYSCTL_HANDLER_ARGS -{ -#pragma unused(arg1, arg2, oidp) - uint64_t return_value = __builtin_arm_rsr64("MAIR_EL1"); - return SYSCTL_OUT(req, &return_value, sizeof(return_value)); -} - +#define SYSCTL_PROC_MACHDEP_CPU_SYSREG(name) \ +static int \ +sysctl_sysreg_##name SYSCTL_HANDLER_ARGS \ +{ \ +_Pragma("unused(arg1, arg2, oidp)") \ + uint64_t return_value = __builtin_arm_rsr64(#name); \ + return SYSCTL_OUT(req, &return_value, sizeof(return_value)); \ +} \ +SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_##name, \ + CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, \ + 0, 0, sysctl_sysreg_##name, "Q", \ + #name " register on the current CPU"); + + +// CPU system registers +// ARM64: AArch64 Vector Base Address Register +SYSCTL_PROC_MACHDEP_CPU_SYSREG(VBAR_EL1); +// ARM64: AArch64 Memory Attribute Indirection Register +SYSCTL_PROC_MACHDEP_CPU_SYSREG(MAIR_EL1); +// ARM64: AArch64 Translation table base register 1 +SYSCTL_PROC_MACHDEP_CPU_SYSREG(TTBR1_EL1); +// ARM64: AArch64 System Control Register +SYSCTL_PROC_MACHDEP_CPU_SYSREG(SCTLR_EL1); +// ARM64: AArch64 Translation Control Register +SYSCTL_PROC_MACHDEP_CPU_SYSREG(TCR_EL1); +// ARM64: AArch64 Memory Model Feature Register 0 +SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64MMFR0_EL1); +// ARM64: AArch64 Instruction Set Attribute Register 1 +SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64ISAR1_EL1); /* - * machdep.cpu.sysreg_mair_el1 + * ARM64: AArch64 Guarded Execution Mode GENTER Vector * - * ARM64: Memory Attribute Indirection Register. - * Read from the current CPU's system registers. + * Workaround for pre-H13, since register cannot be read unless in guarded + * mode, thus expose software convention that GXF_ENTRY_EL1 is always set + * to the address of the gxf_ppl_entry_handler. */ -SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_mair_el1, - CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_sysreg_mair_el1, "Q", - "MAIR_EL1 register on the current CPU"); +#endif /* DEVELOPMENT || DEBUG */ -static int -sysctl_sysreg_ttbr1_el1 SYSCTL_HANDLER_ARGS -{ -#pragma unused(arg1, arg2, oidp) - uint64_t return_value = __builtin_arm_rsr64("TTBR1_EL1"); - return SYSCTL_OUT(req, &return_value, sizeof(return_value)); -} +#if HYPERVISOR +SYSCTL_NODE(_kern, OID_AUTO, hv, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Hypervisor info"); -/* - * machdep.cpu.sysreg_ttbr1_el1 - * - * ARM64: Translation table base register 1. - * Read from the current CPU's system registers. - */ -SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_ttbr1_el1, - CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_sysreg_ttbr1_el1, "Q", - "TTBR1_EL1 register on the current CPU"); +SYSCTL_INT(_kern_hv, OID_AUTO, supported, + CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, + &hv_support_available, 0, ""); -static int -sysctl_sysreg_sctlr_el1 SYSCTL_HANDLER_ARGS -{ -#pragma unused(arg1, arg2, oidp) - uint64_t return_value = __builtin_arm_rsr64("SCTLR_EL1"); - return SYSCTL_OUT(req, &return_value, sizeof(return_value)); -} +extern unsigned int arm64_num_vmids; -/* - * machdep.cpu.sysreg_sctlr_el1 - * - * ARM64: System Control Register. - * Read from the current CPU's system registers. - */ -SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_sctlr_el1, - CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_sysreg_sctlr_el1, "Q", - "SCTLR_EL1 register on the current CPU"); +SYSCTL_UINT(_kern_hv, OID_AUTO, max_address_spaces, + CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, + &arm64_num_vmids, 0, ""); + +extern uint64_t pmap_ipa_size(uint64_t granule); static int -sysctl_sysreg_tcr_el1 SYSCTL_HANDLER_ARGS +sysctl_ipa_size_16k SYSCTL_HANDLER_ARGS { #pragma unused(arg1, arg2, oidp) - uint64_t return_value = __builtin_arm_rsr64("TCR_EL1"); + uint64_t return_value = pmap_ipa_size(16384); return SYSCTL_OUT(req, &return_value, sizeof(return_value)); } -/* - * machdep.cpu.sysreg_tcr_el1 - * - * ARM64: Translation Control Register. - * Read from the current CPU's system registers. - */ -SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_tcr_el1, +SYSCTL_PROC(_kern_hv, OID_AUTO, ipa_size_16k, CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_sysreg_tcr_el1, "Q", - "TCR_EL1 register on the current CPU"); + 0, 0, sysctl_ipa_size_16k, "P", + "Maximum size allowed for 16K-page guest IPA spaces"); static int -sysctl_sysreg_id_aa64mmfr0_el1 SYSCTL_HANDLER_ARGS +sysctl_ipa_size_4k SYSCTL_HANDLER_ARGS { #pragma unused(arg1, arg2, oidp) - uint64_t return_value = __builtin_arm_rsr64("ID_AA64MMFR0_EL1"); + uint64_t return_value = pmap_ipa_size(4096); return SYSCTL_OUT(req, &return_value, sizeof(return_value)); } -/* - * machdep.cpu.sysreg_id_aa64mmfr0_el1 - * - * ARM64: AArch64 Memory Model Feature Register 0. - * Read from the current CPU's system registers. - */ -SYSCTL_PROC(_machdep_cpu, OID_AUTO, sysreg_id_aa64mmfr0_el1, +SYSCTL_PROC(_kern_hv, OID_AUTO, ipa_size_4k, CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_sysreg_id_aa64mmfr0_el1, "Q", - "ID_AA64MMFR0_EL1 register on the current CPU"); + 0, 0, sysctl_ipa_size_4k, "P", + "Maximum size allowed for 4K-page guest IPA spaces"); -#endif +#endif // HYPERVISOR diff --git a/bsd/dev/dtrace/Makefile b/bsd/dev/dtrace/Makefile index a44fc6864..d0ed71bf4 100644 --- a/bsd/dev/dtrace/Makefile +++ b/bsd/dev/dtrace/Makefile @@ -8,5 +8,10 @@ include $(MakeInc_def) INSTTEXTFILES_SUBDIRS = scripts +INSTTEXTFILES_SUBDIRS_X86_64 = scripts +INSTTEXTFILES_SUBDIRS_X86_64H = scripts +INSTTEXTFILES_SUBDIRS_ARM = scripts +INSTTEXTFILES_SUBDIRS_ARM64 = scripts + include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/bsd/dev/dtrace/blist.c b/bsd/dev/dtrace/blist.c index 6d219f95d..3fefa43b9 100644 --- a/bsd/dev/dtrace/blist.c +++ b/bsd/dev/dtrace/blist.c @@ -71,7 +71,10 @@ typedef unsigned int u_daddr_t; #include "blist.h" #include +#if !defined(__APPLE__) #define SWAPBLK_NONE ((daddr_t)-1) +#endif + #define malloc _MALLOC #define free _FREE #define M_SWAP M_TEMP diff --git a/bsd/dev/dtrace/blist.h b/bsd/dev/dtrace/blist.h index b670ca156..578cea2cc 100644 --- a/bsd/dev/dtrace/blist.h +++ b/bsd/dev/dtrace/blist.h @@ -90,6 +90,10 @@ typedef struct blist { #define BLIST_MAX_ALLOC BLIST_BMAP_RADIX +#if defined(__APPLE__) +#define SWAPBLK_NONE ((daddr_t)-1) +#endif /* __APPLE__ */ + extern blist_t blist_create(daddr_t blocks); extern void blist_destroy(blist_t blist); extern daddr_t blist_alloc(blist_t blist, daddr_t count); diff --git a/bsd/dev/dtrace/dtrace.c b/bsd/dev/dtrace/dtrace.c index 8b315b4b2..36d4f8223 100644 --- a/bsd/dev/dtrace/dtrace.c +++ b/bsd/dev/dtrace/dtrace.c @@ -109,6 +109,9 @@ #include #include + +extern addr64_t kvtophys(vm_offset_t va); + extern uint32_t pmap_find_phys(void *, uint64_t); extern boolean_t pmap_valid_page(uint32_t); extern void OSKextRegisterKextsWithDTrace(void); @@ -154,7 +157,7 @@ extern void dtrace_proc_exit(proc_t*); */ uint64_t dtrace_buffer_memory_maxsize = 0; /* initialized in dtrace_init */ uint64_t dtrace_buffer_memory_inuse = 0; -int dtrace_destructive_disallow = 0; +int dtrace_destructive_disallow = 1; dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); size_t dtrace_difo_maxsize = (256 * 1024); dtrace_optval_t dtrace_dof_maxsize = (512 * 1024); @@ -252,7 +255,8 @@ static uint8_t dtrace_kerneluuid[16]; /* the 128-bit uuid */ * 20k elements allocated, the space saved is substantial. */ -struct zone *dtrace_probe_t_zone; +static ZONE_DECLARE(dtrace_probe_t_zone, "dtrace.dtrace_probe_t", + sizeof(dtrace_probe_t), ZC_NONE); static int dtrace_module_unloaded(struct kmod_info *kmod); @@ -2540,6 +2544,7 @@ dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) * failure; if there is no space in the aggregation buffer, the data will be * dropped, and a corresponding counter incremented. */ +__attribute__((noinline)) static void dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) @@ -2902,7 +2907,7 @@ dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, new = DTRACESPEC_COMMITTING; break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case DTRACESPEC_ACTIVEMANY: new = DTRACESPEC_COMMITTINGMANY; @@ -3012,6 +3017,7 @@ out: * do nothing. The state of the specified speculation is transitioned * according to the state transition diagram outlined in */ +__attribute__((noinline)) static void dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, dtrace_specid_t which) @@ -3175,6 +3181,7 @@ dtrace_speculation_clean(dtrace_state_t *state) * the active CPU is not the specified CPU -- the speculation will be * atomically transitioned into the ACTIVEMANY state. */ +__attribute__((noinline)) static dtrace_buffer_t * dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, dtrace_specid_t which) @@ -3356,6 +3363,20 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, return (dtrace_getreg(find_user_regs(thread), ndx)); } + case DIF_VAR_VMREGS: { + uint64_t rval; + + if (!dtrace_priv_kernel(state)) + return (0); + + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); + + rval = dtrace_getvmreg(ndx); + + DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); + + return (rval); + } case DIF_VAR_CURTHREAD: if (!dtrace_priv_kernel(state)) @@ -3388,6 +3409,14 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, } return (mstate->dtms_machtimestamp); + case DIF_VAR_MACHCTIMESTAMP: + if (!(mstate->dtms_present & DTRACE_MSTATE_MACHCTIMESTAMP)) { + mstate->dtms_machctimestamp = mach_continuous_time(); + mstate->dtms_present |= DTRACE_MSTATE_MACHCTIMESTAMP; + } + return (mstate->dtms_machctimestamp); + + case DIF_VAR_CPU: return ((uint64_t) dtrace_get_thread_last_cpu_id(current_thread())); @@ -3570,7 +3599,8 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, case DIF_VAR_EXECNAME: { char *xname = (char *)mstate->dtms_scratch_ptr; - size_t scratch_size = MAXCOMLEN+1; + char *pname = proc_best_name(curproc); + size_t scratch_size = sizeof(proc_name_t); /* The scratch allocation's lifetime is that of the clause. */ if (!DTRACE_INSCRATCH(mstate, scratch_size)) { @@ -3582,7 +3612,7 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, return (0); mstate->dtms_scratch_ptr += scratch_size; - proc_selfname( xname, scratch_size ); + strlcpy(xname, pname, scratch_size); return ((uint64_t)(uintptr_t)xname); } @@ -3984,7 +4014,7 @@ dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems, return (NULL); } - /* FALLTHRU */ + OS_FALLTHROUGH; case DTRACE_JSON_NUMBER_FRAC: if (cc == 'e' || cc == 'E') { *dd++ = cc; @@ -3998,7 +4028,7 @@ dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems, */ return (NULL); } - /* FALLTHRU */ + OS_FALLTHROUGH; case DTRACE_JSON_NUMBER_EXP: if (isdigit(cc) || cc == '+' || cc == '-') { *dd++ = cc; @@ -4414,6 +4444,9 @@ dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, break; /* Can't climb process tree any further. */ p = (struct proc *)dtrace_loadptr((uintptr_t)&(p->p_pptr)); +#if __has_feature(ptrauth_calls) + p = ptrauth_strip(p, ptrauth_key_process_independent_data); +#endif if (*flags & CPU_DTRACE_FAULT) break; } @@ -5575,6 +5608,7 @@ inetout: regs[rd] = (uintptr_t)end + 1; break; } + case DIF_SUBR_STRIP: if (!dtrace_is_valid_ptrauth_key(tupregs[1].dttk_value)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); @@ -5605,8 +5639,10 @@ inetout: regs[rd] = (uintptr_t)end + 1; break; } - if (dtrace_destructive_disallow) + if (dtrace_destructive_disallow || + !dtrace_priv_kernel_destructive(state)) { return; + } debugid = tupregs[0].dttk_value; for (i = 0; i < nargs - 1; i++) @@ -5622,8 +5658,10 @@ inetout: regs[rd] = (uintptr_t)end + 1; break; } - if (dtrace_destructive_disallow) + if (dtrace_destructive_disallow || + !dtrace_priv_kernel_destructive(state)) { return; + } uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; uint32_t debugid = tupregs[0].dttk_value; @@ -5646,7 +5684,48 @@ inetout: regs[rd] = (uintptr_t)end + 1; break; } -#endif + + case DIF_SUBR_MTONS: + absolutetime_to_nanoseconds(tupregs[0].dttk_value, ®s[rd]); + + break; + case DIF_SUBR_PHYSMEM_READ: { +#if DEBUG || DEVELOPMENT + if (dtrace_destructive_disallow || + !dtrace_priv_kernel_destructive(state)) { + return; + } + regs[rd] = dtrace_physmem_read(tupregs[0].dttk_value, + tupregs[1].dttk_value); +#else + DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); +#endif /* DEBUG || DEVELOPMENT */ + break; + } + case DIF_SUBR_PHYSMEM_WRITE: { +#if DEBUG || DEVELOPMENT + if (dtrace_destructive_disallow || + !dtrace_priv_kernel_destructive(state)) { + return; + } + + dtrace_physmem_write(tupregs[0].dttk_value, + tupregs[1].dttk_value, (size_t)tupregs[2].dttk_value); +#else + DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); +#endif /* DEBUG || DEVELOPMENT */ + break; + } + + case DIF_SUBR_KVTOPHYS: { +#if DEBUG || DEVELOPMENT + regs[rd] = kvtophys(tupregs[0].dttk_value); +#else + DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); +#endif /* DEBUG || DEVELOPMENT */ + break; + } +#endif /* defined(__APPLE__) */ } } @@ -5828,7 +5907,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, *illval = regs[r1]; break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case DIF_OP_LDSB: regs[rd] = (int8_t)dtrace_load8(regs[r1]); break; @@ -5838,7 +5917,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, *illval = regs[r1]; break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case DIF_OP_LDSH: regs[rd] = (int16_t)dtrace_load16(regs[r1]); break; @@ -5848,7 +5927,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, *illval = regs[r1]; break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case DIF_OP_LDSW: regs[rd] = (int32_t)dtrace_load32(regs[r1]); break; @@ -5858,7 +5937,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, *illval = regs[r1]; break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case DIF_OP_LDUB: regs[rd] = dtrace_load8(regs[r1]); break; @@ -5868,7 +5947,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, *illval = regs[r1]; break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case DIF_OP_LDUH: regs[rd] = dtrace_load16(regs[r1]); break; @@ -5878,7 +5957,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, *illval = regs[r1]; break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case DIF_OP_LDUW: regs[rd] = dtrace_load32(regs[r1]); break; @@ -5888,7 +5967,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, *illval = regs[r1]; break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case DIF_OP_LDX: regs[rd] = dtrace_load64(regs[r1]); break; @@ -6473,6 +6552,7 @@ dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, return (0); } +__attribute__((noinline)) static void dtrace_action_breakpoint(dtrace_ecb_t *ecb) { @@ -6535,6 +6615,7 @@ dtrace_action_breakpoint(dtrace_ecb_t *ecb) debug_enter(c); } +__attribute__((noinline)) static void dtrace_action_panic(dtrace_ecb_t *ecb) { @@ -6643,6 +6724,7 @@ dtrace_action_pidresume(uint64_t pid) } } +__attribute__((noinline)) static void dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) { @@ -6688,6 +6770,7 @@ dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) cpu->cpu_dtrace_chilled += val; } +__attribute__((noinline)) static void dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t *buf, uint64_t arg) @@ -6801,6 +6884,7 @@ out: mstate->dtms_scratch_ptr = old; } +__attribute__((noinline)) static void dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size, size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind) @@ -6931,7 +7015,19 @@ dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1, hrtime_t now; cookie = dtrace_probe_enter(id); + + /* Ensure that probe id is valid. */ + if (id - 1 >= (dtrace_id_t)dtrace_nprobes) { + dtrace_probe_exit(cookie); + return; + } + probe = dtrace_probes[id - 1]; + if (probe == NULL) { + dtrace_probe_exit(cookie); + return; + } + cpuid = CPU->cpu_id; onintr = CPU_ON_INTR(CPU); @@ -8213,12 +8309,12 @@ top: case '\\': if ((c = *p++) == '\0') return (0); - /*FALLTHRU*/ + OS_FALLTHROUGH; default: if (c != s1) return (0); - /*FALLTHRU*/ + OS_FALLTHROUGH; case '?': if (s1 != '\0') @@ -9788,11 +9884,17 @@ dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, if (rd == 0) err += efunc(pc, "cannot write to %%r0\n"); - if (subr == DIF_SUBR_COPYOUT || - subr == DIF_SUBR_COPYOUTSTR || - subr == DIF_SUBR_KDEBUG_TRACE || - subr == DIF_SUBR_KDEBUG_TRACE_STRING) { + switch (subr) { + case DIF_SUBR_COPYOUT: + case DIF_SUBR_COPYOUTSTR: + case DIF_SUBR_KDEBUG_TRACE: + case DIF_SUBR_KDEBUG_TRACE_STRING: + case DIF_SUBR_PHYSMEM_READ: + case DIF_SUBR_PHYSMEM_WRITE: dp->dtdo_destructive = 1; + break; + default: + break; } break; case DIF_OP_PUSHTR: @@ -10114,34 +10216,34 @@ dtrace_difo_validate_helper(dtrace_difo_t *dp) break; case DIF_OP_CALL: - if (subr == DIF_SUBR_ALLOCA || - subr == DIF_SUBR_BCOPY || - subr == DIF_SUBR_COPYIN || - subr == DIF_SUBR_COPYINTO || - subr == DIF_SUBR_COPYINSTR || - subr == DIF_SUBR_INDEX || - subr == DIF_SUBR_INET_NTOA || - subr == DIF_SUBR_INET_NTOA6 || - subr == DIF_SUBR_INET_NTOP || - subr == DIF_SUBR_JSON || - subr == DIF_SUBR_LLTOSTR || - subr == DIF_SUBR_STRTOLL || - subr == DIF_SUBR_RINDEX || - subr == DIF_SUBR_STRCHR || - subr == DIF_SUBR_STRJOIN || - subr == DIF_SUBR_STRRCHR || - subr == DIF_SUBR_STRSTR || - subr == DIF_SUBR_KDEBUG_TRACE || - subr == DIF_SUBR_KDEBUG_TRACE_STRING || - subr == DIF_SUBR_HTONS || - subr == DIF_SUBR_HTONL || - subr == DIF_SUBR_HTONLL || - subr == DIF_SUBR_NTOHS || - subr == DIF_SUBR_NTOHL || - subr == DIF_SUBR_NTOHLL) + switch (subr) { + case DIF_SUBR_ALLOCA: + case DIF_SUBR_BCOPY: + case DIF_SUBR_COPYIN: + case DIF_SUBR_COPYINTO: + case DIF_SUBR_COPYINSTR: + case DIF_SUBR_HTONS: + case DIF_SUBR_HTONL: + case DIF_SUBR_HTONLL: + case DIF_SUBR_INDEX: + case DIF_SUBR_INET_NTOA: + case DIF_SUBR_INET_NTOA6: + case DIF_SUBR_INET_NTOP: + case DIF_SUBR_JSON: + case DIF_SUBR_LLTOSTR: + case DIF_SUBR_NTOHS: + case DIF_SUBR_NTOHL: + case DIF_SUBR_NTOHLL: + case DIF_SUBR_RINDEX: + case DIF_SUBR_STRCHR: + case DIF_SUBR_STRTOLL: + case DIF_SUBR_STRJOIN: + case DIF_SUBR_STRRCHR: + case DIF_SUBR_STRSTR: break; - - err += efunc(pc, "invalid subr %u\n", subr); + default: + err += efunc(pc, "invalid subr %u\n", subr); + } break; default: @@ -11329,7 +11431,7 @@ dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) (char *)(uintptr_t)arg); } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case DTRACEACT_LIBACT: case DTRACEACT_TRACEMEM: case DTRACEACT_TRACEMEM_DYNSIZE: @@ -11368,7 +11470,7 @@ dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) arg = DTRACE_USTACK_ARG(nframes, strsize); - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case DTRACEACT_USTACK: if (desc->dtad_kind != DTRACEACT_JSTACK && (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { @@ -13999,7 +14101,7 @@ dtrace_state_create(dev_t *devp, cred_t *cr, dtrace_state_t **new_state) state->dts_epid = DTRACE_EPIDNONE + 1; (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor); - state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, + state->dts_aggid_arena = vmem_create(c, (void *)1, INT32_MAX, 1, NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); if (devp != NULL) { @@ -14693,12 +14795,7 @@ dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, switch (option) { case DTRACEOPT_DESTRUCTIVE: - /* - * Prevent consumers from enabling destructive actions if DTrace - * is running in a restricted environment, or if actions are - * disallowed. - */ - if (dtrace_is_restricted() || dtrace_destructive_disallow) + if (dtrace_destructive_disallow) return (EACCES); state->dts_cred.dcr_destructive = 1; @@ -15060,6 +15157,7 @@ dtrace_helper_trace(dtrace_helper_action_t *helper, } } +__attribute__((noinline)) static uint64_t dtrace_helper(int which, dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t arg0, uint64_t arg1) @@ -16790,7 +16888,9 @@ dtrace_module_loaded(struct kmod_info *kmod, uint32_t flag) } /* We will instrument the module immediately using kernel symbols */ - ctl->mod_flags |= MODCTL_HAS_KERNEL_SYMBOLS; + if (!(flag & KMOD_DTRACE_NO_KERNEL_SYMS)) { + ctl->mod_flags |= MODCTL_HAS_KERNEL_SYMBOLS; + } lck_mtx_unlock(&dtrace_lock); @@ -17150,7 +17250,7 @@ dtrace_attach(dev_info_t *devi) LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED); - dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, + dtrace_arena = vmem_create("dtrace", (void *)1, INT32_MAX, 1, NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); dtrace_state_cache = kmem_cache_create("dtrace_state_cache", @@ -18987,26 +19087,20 @@ helper_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p) #define HELPER_MAJOR -24 /* let the kernel pick the device number */ -/* - * A struct describing which functions will get invoked for certain - * actions. - */ -static struct cdevsw helper_cdevsw = -{ - helper_open, /* open */ - helper_close, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - helper_ioctl, /* ioctl */ - (stop_fcn_t *)nulldev, /* stop */ - (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ +const static struct cdevsw helper_cdevsw = +{ + .d_open = helper_open, + .d_close = helper_close, + .d_read = eno_rdwrt, + .d_write = eno_rdwrt, + .d_ioctl = helper_ioctl, + .d_stop = (stop_fcn_t *)nulldev, + .d_reset = (reset_fcn_t *)nulldev, + .d_select = eno_select, + .d_mmap = eno_mmap, + .d_strategy = eno_strat, + .d_reserved_1 = eno_getc, + .d_reserved_2 = eno_putc, }; static int helper_majdevno = 0; @@ -19090,22 +19184,20 @@ dtrace_ast(void) #define DTRACE_MAJOR -24 /* let the kernel pick the device number */ -static struct cdevsw dtrace_cdevsw = -{ - _dtrace_open, /* open */ - _dtrace_close, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - _dtrace_ioctl, /* ioctl */ - (stop_fcn_t *)nulldev, /* stop */ - (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ +static const struct cdevsw dtrace_cdevsw = +{ + .d_open = _dtrace_open, + .d_close = _dtrace_close, + .d_read = eno_rdwrt, + .d_write = eno_rdwrt, + .d_ioctl = _dtrace_ioctl, + .d_stop = (stop_fcn_t *)nulldev, + .d_reset = (reset_fcn_t *)nulldev, + .d_select = eno_select, + .d_mmap = eno_mmap, + .d_strategy = eno_strat, + .d_reserved_1 = eno_getc, + .d_reserved_2 = eno_putc, }; lck_attr_t* dtrace_lck_attr; @@ -19131,16 +19223,23 @@ void dtrace_init( void ) { if (0 == gDTraceInited) { - int i, ncpu; + unsigned int i, ncpu; size_t size = sizeof(dtrace_buffer_memory_maxsize); + /* + * Disable destructive actions when dtrace is running + * in a restricted environment + */ + dtrace_destructive_disallow = dtrace_is_restricted() && + !dtrace_are_restrictions_relaxed(); + /* * DTrace allocates buffers based on the maximum number * of enabled cpus. This call avoids any race when finding * that count. */ ASSERT(dtrace_max_cpus == 0); - ncpu = dtrace_max_cpus = ml_get_max_cpus(); + ncpu = dtrace_max_cpus = ml_wait_max_cpus(); /* * Retrieve the size of the physical memory in order to define @@ -19178,19 +19277,11 @@ dtrace_init( void ) return; } - /* - * Allocate the dtrace_probe_t zone - */ - dtrace_probe_t_zone = zinit(sizeof(dtrace_probe_t), - 1024 * sizeof(dtrace_probe_t), - sizeof(dtrace_probe_t), - "dtrace.dtrace_probe_t"); - /* * Create the dtrace lock group and attrs. */ dtrace_lck_attr = lck_attr_alloc_init(); - dtrace_lck_grp_attr= lck_grp_attr_alloc_init(); + dtrace_lck_grp_attr= lck_grp_attr_alloc_init(); dtrace_lck_grp = lck_grp_alloc_init("dtrace", dtrace_lck_grp_attr); /* @@ -19258,11 +19349,10 @@ dtrace_init( void ) * makes no sense... */ if (!PE_parse_boot_argn("dtrace_dof_mode", &dtrace_dof_mode, sizeof (dtrace_dof_mode))) { -#if CONFIG_EMBEDDED - /* Disable DOF mode by default for performance reasons */ - dtrace_dof_mode = DTRACE_DOF_MODE_NEVER; -#else +#if defined(XNU_TARGET_OS_OSX) dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON; +#else + dtrace_dof_mode = DTRACE_DOF_MODE_NEVER; #endif } diff --git a/bsd/dev/dtrace/dtrace_glue.c b/bsd/dev/dtrace/dtrace_glue.c index cd047e8d9..ffbd0bb15 100644 --- a/bsd/dev/dtrace/dtrace_glue.c +++ b/bsd/dev/dtrace/dtrace_glue.c @@ -56,6 +56,8 @@ /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */ #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */ +KALLOC_HEAP_DEFINE(KHEAP_DTRACE, "dtrace", KHEAP_ID_DEFAULT); + void dtrace_sprlock(proc_t *p) { @@ -636,10 +638,9 @@ dt_kmem_alloc_site(size_t size, int kmflag, vm_allocation_site_t *site) /* * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact). - * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock. + * Requests larger than 8K with M_NOWAIT fail in kalloc_ext. */ - vm_size_t vsize = size; - return kalloc_canblock(&vsize, TRUE, site); + return kalloc_ext(KHEAP_DTRACE, size, Z_WAITOK, site).addr; } void * @@ -649,35 +650,15 @@ dt_kmem_zalloc_site(size_t size, int kmflag, vm_allocation_site_t *site) /* * We ignore the M_NOWAIT bit in kmflag (all of kmflag, in fact). - * Requests larger than 8K with M_NOWAIT fail in kalloc_canblock. + * Requests larger than 8K with M_NOWAIT fail in kalloc_ext. */ - vm_size_t vsize = size; - void* buf = kalloc_canblock(&vsize, TRUE, site); - - if (!buf) { - return NULL; - } - - bzero(buf, size); - - return buf; + return kalloc_ext(KHEAP_DTRACE, size, Z_WAITOK | Z_ZERO, site).addr; } void dt_kmem_free(void *buf, size_t size) { -#pragma unused(size) - /* - * DTrace relies on this, its doing a lot of NULL frees. - * A null free causes the debug builds to panic. - */ - if (buf == NULL) { - return; - } - - ASSERT(size > 0); - - kfree(buf, size); + kheap_free(KHEAP_DTRACE, buf, size); } @@ -819,12 +800,13 @@ vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ign ASSERT(NULL == ignore6); ASSERT(NULL == source); ASSERT(0 == qcache_max); + ASSERT(size <= INT32_MAX); ASSERT(vmflag & VMC_IDENTIFIER); size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */ - p->blist = bl = blist_create( size ); - blist_free(bl, 0, size); + p->blist = bl = blist_create((daddr_t)size); + blist_free(bl, 0, (daddr_t)size); if (base) { blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */ } @@ -841,11 +823,11 @@ vmem_alloc(vmem_t *vmp, size_t size, int vmflag) p = blist_alloc(bl, (daddr_t)size); - if ((daddr_t)-1 == p) { + if (p == SWAPBLK_NONE) { blist_resize(&bl, (bl->bl_blocks) << 1, 1); q->blist = bl; p = blist_alloc(bl, (daddr_t)size); - if ((daddr_t)-1 == p) { + if (p == SWAPBLK_NONE) { panic("vmem_alloc: failure after blist_resize!"); } } @@ -1335,39 +1317,6 @@ cmn_err( int level, const char *format, ... ) uprintf("\n"); } -/* - * History: - * 2002-01-24 gvdl Initial implementation of strstr - */ - -__private_extern__ const char * -strstr(const char *in, const char *str) -{ - char c; - size_t len; - if (!in || !str) { - return in; - } - - c = *str++; - if (!c) { - return (const char *) in; // Trivial empty string case - } - len = strlen(str); - do { - char sc; - - do { - sc = *in++; - if (!sc) { - return (char *) 0; - } - } while (sc != c); - } while (strncmp(in, str, len) != 0); - - return (const char *) (in - 1); -} - const void* bsearch(const void *key, const void *base0, size_t nmemb, size_t size, int (*compar)(const void *, const void *)) { diff --git a/bsd/dev/dtrace/dtrace_subr.c b/bsd/dev/dtrace/dtrace_subr.c index 5f28ca810..2ac848429 100644 --- a/bsd/dev/dtrace/dtrace_subr.c +++ b/bsd/dev/dtrace/dtrace_subr.c @@ -39,6 +39,10 @@ #if CONFIG_CSR #include #include + +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) +extern bool csr_unsafe_kernel_text; +#endif #endif /* @@ -203,7 +207,7 @@ dtrace_proc_waitfor(dtrace_procdesc_t* pdesc) { * Never trust user input, compute the length of the process name and ensure the * string is null terminated. */ - pdesc->p_name_length = strnlen(pdesc->p_name, sizeof(pdesc->p_name)); + pdesc->p_name_length = (int) strnlen(pdesc->p_name, sizeof(pdesc->p_name)); if (pdesc->p_name_length >= (int) sizeof(pdesc->p_name)) return -1; @@ -329,6 +333,45 @@ dtrace_is_valid_ptrauth_key(uint64_t key) #endif /* __has_feature(ptrauth_calls) */ } +uint64_t +dtrace_physmem_read(uint64_t addr, size_t size) +{ + switch (size) { + case 1: + return (uint64_t)ml_phys_read_byte_64((addr64_t)addr); + case 2: + return (uint64_t)ml_phys_read_half_64((addr64_t)addr); + case 4: + return (uint64_t)ml_phys_read_64((addr64_t)addr); + case 8: + return (uint64_t)ml_phys_read_double_64((addr64_t)addr); + } + DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); + + return (0); +} + +void +dtrace_physmem_write(uint64_t addr, uint64_t data, size_t size) +{ + switch (size) { + case 1: + ml_phys_write_byte_64((addr64_t)addr, (unsigned int)data); + break; + case 2: + ml_phys_write_half_64((addr64_t)addr, (unsigned int)data); + break; + case 4: + ml_phys_write_64((addr64_t)addr, (unsigned int)data); + break; + case 8: + ml_phys_write_double_64((addr64_t)addr, (unsigned long long)data); + break; + default: + DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); + } +} + static minor_t next_minor = 0; static dtrace_state_t* dtrace_clients[DTRACE_NCLIENTS] = {NULL}; diff --git a/bsd/dev/dtrace/fasttrap.c b/bsd/dev/dtrace/fasttrap.c index e90e109f0..b519a7a3b 100644 --- a/bsd/dev/dtrace/fasttrap.c +++ b/bsd/dev/dtrace/fasttrap.c @@ -207,7 +207,8 @@ static void fasttrap_proc_release(fasttrap_proc_t *); * 20k elements allocated, the space saved is substantial. */ -struct zone *fasttrap_tracepoint_t_zone; +ZONE_DECLARE(fasttrap_tracepoint_t_zone, "dtrace.fasttrap_tracepoint_t", + sizeof(fasttrap_tracepoint_t), ZC_NONE); /* * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown @@ -561,6 +562,57 @@ fasttrap_pid_cleanup(uint32_t work) lck_mtx_unlock(&fasttrap_cleanup_mtx); } +static int +fasttrap_setdebug(proc_t *p) +{ + LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED); + + /* + * CS_KILL and CS_HARD will cause code-signing to kill the process + * when the process text is modified, so register the intent + * to allow invalid access beforehand. + */ + if ((p->p_csflags & (CS_KILL|CS_HARD))) { + proc_unlock(p); + for (int i = 0; i < DTRACE_NCLIENTS; i++) { + dtrace_state_t *state = dtrace_state_get(i); + if (state == NULL) + continue; + if (state->dts_cred.dcr_cred == NULL) + continue; + /* + * The get_task call flags whether the process should + * be flagged to have the cs_allow_invalid call + * succeed. We want the best credential that any dtrace + * client has, so try all of them. + */ + + /* + * mac_proc_check_get_task() can trigger upcalls. It's + * not safe to hold proc references accross upcalls, so + * just drop the reference. Given the context, it + * should not be possible for the process to actually + * disappear. + */ + struct proc_ident pident = proc_ident(p); + sprunlock(p); + p = PROC_NULL; + + mac_proc_check_get_task(state->dts_cred.dcr_cred, &pident); + + p = sprlock(pident.p_pid); + if (p == PROC_NULL) { + return (ESRCH); + } + } + int rc = cs_allow_invalid(p); + proc_lock(p); + if (rc == 0) { + return (EACCES); + } + } + return (0); +} /* * This is called from cfork() via dtrace_fasttrap_fork(). The child @@ -598,6 +650,13 @@ fasttrap_fork(proc_t *p, proc_t *cp) return; } + proc_lock(cp); + if (fasttrap_setdebug(cp) == ESRCH) { + printf("fasttrap_fork: failed to re-acquire proc\n"); + return; + } + proc_unlock(cp); + /* * Iterate over every tracepoint looking for ones that belong to the * parent process, and remove each from the child process. @@ -1146,24 +1205,23 @@ fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg) } proc_lock(p); + int p_pid = proc_pid(p); - if ((p->p_csflags & (CS_KILL|CS_HARD))) { + rc = fasttrap_setdebug(p); + switch (rc) { + case EACCES: proc_unlock(p); - for (i = 0; i < DTRACE_NCLIENTS; i++) { - dtrace_state_t *state = dtrace_state_get(i); - if (state == NULL) - continue; - if (state->dts_cred.dcr_cred == NULL) - continue; - mac_proc_check_get_task(state->dts_cred.dcr_cred, p); - } - rc = cs_allow_invalid(p); - if (rc == 0) { - sprunlock(p); - cmn_err(CE_WARN, "process doesn't allow invalid code pages, failing to install fasttrap probe\n"); - return (0); - } - proc_lock(p); + sprunlock(p); + cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: " + "Process does not allow invalid code pages\n", p_pid); + return (0); + case ESRCH: + cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: " + "Failed to re-acquire process\n", p_pid); + return (0); + default: + assert(rc == 0); + break; } /* @@ -2616,12 +2674,13 @@ static int _fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p) { int err, rv = 0; - user_addr_t uaddrp; + user_addr_t uaddrp; - if (proc_is64bit(p)) - uaddrp = *(user_addr_t *)data; - else - uaddrp = (user_addr_t) *(uint32_t *)data; + if (proc_is64bit(p)) { + uaddrp = *(user_addr_t *)data; + } else { + uaddrp = (user_addr_t) *(uint32_t *)data; + } err = fasttrap_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv); @@ -2640,27 +2699,20 @@ static int fasttrap_inited = 0; #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */ -/* - * A struct describing which functions will get invoked for certain - * actions. - */ - -static struct cdevsw fasttrap_cdevsw = +static const struct cdevsw fasttrap_cdevsw = { - _fasttrap_open, /* open */ - eno_opcl, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - _fasttrap_ioctl, /* ioctl */ - (stop_fcn_t *)nulldev, /* stop */ - (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + .d_open = _fasttrap_open, + .d_close = eno_opcl, + .d_read = eno_rdwrt, + .d_write = eno_rdwrt, + .d_ioctl = _fasttrap_ioctl, + .d_stop = (stop_fcn_t *)nulldev, + .d_reset = (reset_fcn_t *)nulldev, + .d_select = eno_select, + .d_mmap = eno_mmap, + .d_strategy = eno_strat, + .d_reserved_1 = eno_getc, + .d_reserved_2 = eno_putc, }; void fasttrap_init(void); @@ -2688,25 +2740,15 @@ fasttrap_init( void ) return; } - /* - * Allocate the fasttrap_tracepoint_t zone - */ - fasttrap_tracepoint_t_zone = zinit(sizeof(fasttrap_tracepoint_t), - 1024 * sizeof(fasttrap_tracepoint_t), - sizeof(fasttrap_tracepoint_t), - "dtrace.fasttrap_tracepoint_t"); - /* * fasttrap_probe_t's are variable in size. We use an array of zones to * cover the most common sizes. */ int i; for (i=1; i #include +#include + /* #include */ struct savearea_t; /* Used anonymously */ @@ -150,7 +152,7 @@ fbt_enable(void *arg, dtrace_id_t id, void *parg) continue; } - dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback); + dtrace_casptr(&tempDTraceTrapHook, NULL, ptrauth_nop_cast(void *, &fbt_perfCallback)); if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) { if (fbt_verbose) { cmn_err(CE_NOTE, "fbt_enable is failing for probe %s " @@ -272,7 +274,7 @@ fbt_resume(void *arg, dtrace_id_t id, void *parg) continue; } - dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback); + dtrace_casptr(&tempDTraceTrapHook, NULL, ptrauth_nop_cast(void *, &fbt_perfCallback)); if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) { if (fbt_verbose) { cmn_err(CE_NOTE, "fbt_resume is failing for probe %s " @@ -478,6 +480,10 @@ fbt_provide_module(void *arg, struct modctl *ctl) ASSERT(dtrace_kernel_symbol_mode != DTRACE_KERNEL_SYMBOLS_NEVER); LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED); + if (dtrace_fbt_probes_restricted()) { + return; + } + // Update the "ignore blacklist" bit if (ignore_fbt_blacklist) { ctl->mod_flags |= MODCTL_FBT_PROVIDE_BLACKLISTED_PROBES; @@ -577,27 +583,20 @@ _fbt_open(dev_t dev, int flags, int devtype, struct proc *p) #define FBT_MAJOR -24 /* let the kernel pick the device number */ - -/* - * A struct describing which functions will get invoked for certain - * actions. - */ -static struct cdevsw fbt_cdevsw = +static const struct cdevsw fbt_cdevsw = { - _fbt_open, /* open */ - eno_opcl, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - eno_ioctl, /* ioctl */ - (stop_fcn_t *)nulldev, /* stop */ - (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + .d_open = _fbt_open, + .d_close = eno_opcl, + .d_read = eno_rdwrt, + .d_write = eno_rdwrt, + .d_ioctl = eno_ioctl, + .d_stop = (stop_fcn_t *)nulldev, + .d_reset = (reset_fcn_t *)nulldev, + .d_select = eno_select, + .d_mmap = eno_mmap, + .d_strategy = eno_strat, + .d_reserved_1 = eno_getc, + .d_reserved_2 = eno_putc, }; #undef kmem_alloc /* from its binding to dt_kmem_alloc glue */ diff --git a/bsd/dev/dtrace/fbt_blacklist.c b/bsd/dev/dtrace/fbt_blacklist.c index 8b7d371cf..a65e6a477 100644 --- a/bsd/dev/dtrace/fbt_blacklist.c +++ b/bsd/dev/dtrace/fbt_blacklist.c @@ -172,6 +172,7 @@ const char * fbt_blacklist[] = CLOSURE(mt_update_thread) CRITICAL(nanoseconds_to_absolutetime) CRITICAL(nanotime_to_absolutetime) + CRITICAL(no_asts) CRITICAL(ovbcopy) CRITICAL(packA) X86_ONLY(pal_) @@ -187,7 +188,9 @@ const char * fbt_blacklist[] = X86_ONLY(pmTimerSave) X86_ONLY(pmUnRegister) X86_ONLY(pmap64_pdpt) + CLOSURE(pmap_find_pa) CLOSURE(pmap_find_phys) + ARM_ONLY(pmap_get_cpu_data) CLOSURE(pmap_get_mapwindow) CLOSURE(pmap_pde) CLOSURE(pmap_pde_internal0) @@ -196,12 +199,13 @@ const char * fbt_blacklist[] = CLOSURE(pmap_pte_internal) CLOSURE(pmap_put_mapwindow) CLOSURE(pmap_valid_page) + CLOSURE(pmap_vtophys) X86_ONLY(pms) CRITICAL(power_management_init) CRITICAL(preemption_underflow_panic) CLOSURE(prf) + CLOSURE(proc_best_name) CLOSURE(proc_is64bit) - CLOSURE(proc_selfname) CRITICAL(rbtrace_bt) CRITICAL(register_cpu_setup_func) CRITICAL(ret64_iret) diff --git a/bsd/dev/dtrace/lockprof.c b/bsd/dev/dtrace/lockprof.c index 12f777ae2..52ec74632 100644 --- a/bsd/dev/dtrace/lockprof.c +++ b/bsd/dev/dtrace/lockprof.c @@ -25,6 +25,12 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#include + +#include +#include +#include +#include #include #include #include @@ -32,16 +38,10 @@ #if LOCK_STATS -#define SPIN_HELD 0 -#define SPIN_MISS 1 -#define SPIN_SPIN 2 - -#define SPIN_HELD_PREFIX "spin-held-" -#define SPIN_MISS_PREFIX "spin-miss-" -#define SPIN_SPIN_PREFIX "spin-spin-" +#define LP_NODE "lockprof" -#define LOCKGROUPSTAT_AFRAMES 1 -#define LOCKGROUPSTAT_LEN 64 +#define LOCKPROF_AFRAMES 3 +#define LOCKPROF_LEN 64 static dtrace_provider_id_t lockprof_id; @@ -49,30 +49,62 @@ decl_lck_mtx_data(extern, lck_grp_lock); extern queue_head_t lck_grp_queue; extern unsigned int lck_grp_cnt; +extern void lck_grp_reference(lck_grp_t *grp); +extern void lck_grp_deallocate(lck_grp_t *grp); + #define LOCKPROF_MAX 10000 /* maximum number of lockprof probes */ static uint32_t lockprof_count; /* current number of lockprof probes */ +enum probe_flags { + /* + * Counts time spent spinning/blocking + */ + TIME_EVENT = 0x01, + /* + * Requires LCK_GRP_ATTR_STAT to be set on the lock + * group, either via lck_grp_attr_setsta on the lock group, + * or globally via the lcks=3 boot-arg + */ + STAT_NEEDED = 0x02 +}; + static const struct { - int kind; const char *prefix; - bool time_event; -} events[] = { - {SPIN_HELD, SPIN_HELD_PREFIX, false}, - {SPIN_MISS, SPIN_MISS_PREFIX, false}, - {SPIN_SPIN, SPIN_SPIN_PREFIX, true}, - {0, NULL, false} + int flags; + size_t count_offset; + size_t stat_offset; +} probes[] = { + {"spin-held-", 0, offsetof(lck_grp_t, lck_grp_spincnt), offsetof(lck_grp_stats_t, lgss_spin_held)}, + {"spin-miss-", 0, offsetof(lck_grp_t, lck_grp_spincnt), offsetof(lck_grp_stats_t, lgss_spin_miss)}, + {"spin-spin-", TIME_EVENT, offsetof(lck_grp_t, lck_grp_spincnt), offsetof(lck_grp_stats_t, lgss_spin_spin)}, + {"ticket-held-", 0, offsetof(lck_grp_t, lck_grp_ticketcnt), offsetof(lck_grp_stats_t, lgss_ticket_held)}, + {"ticket-miss-", 0, offsetof(lck_grp_t, lck_grp_ticketcnt), offsetof(lck_grp_stats_t, lgss_ticket_miss)}, + {"ticket-spin-", TIME_EVENT, offsetof(lck_grp_t, lck_grp_ticketcnt), offsetof(lck_grp_stats_t, lgss_ticket_spin)}, +#if HAS_EXT_MUTEXES + {"adaptive-held-", STAT_NEEDED, offsetof(lck_grp_t, lck_grp_mtxcnt), offsetof(lck_grp_stats_t, lgss_mtx_held)}, + {"adaptive-miss-", STAT_NEEDED, offsetof(lck_grp_t, lck_grp_mtxcnt), offsetof(lck_grp_stats_t, lgss_mtx_miss)}, + {"adaptive-wait-", STAT_NEEDED, offsetof(lck_grp_t, lck_grp_mtxcnt), offsetof(lck_grp_stats_t, lgss_mtx_wait)}, + {"adaptive-direct-wait-", STAT_NEEDED, offsetof(lck_grp_t, lck_grp_mtxcnt), offsetof(lck_grp_stats_t, lgss_mtx_direct_wait)}, +#endif /* HAS_EXT_MUTEXES */ + {NULL, false, 0, 0} }; +/* + * Default defined probes for counting events + */ const static int hold_defaults[] = { - 100, 1000 + 10000 /* 10000 events */ }; +/* + * Default defined probes for time events + */ const static struct { unsigned int time; const char *suffix; uint64_t mult; } cont_defaults[] = { - {100, "ms", NANOSEC / MILLISEC} + {100, "ms", NANOSEC / MILLISEC} /* 100 ms */ }; typedef struct lockprof_probe { @@ -88,25 +120,37 @@ lockprof_invoke(lck_grp_t *grp, lck_grp_stat_t *stat, uint64_t val) dtrace_probe(stat->lgs_probeid, (uintptr_t)grp, val, 0, 0, 0); } +static int +lockprof_lock_count(lck_grp_t *grp, int kind) +{ + return *(int*)((void*)(grp) + probes[kind].count_offset); +} + static void probe_create(int kind, const char *suffix, const char *grp_name, uint64_t count, uint64_t mult) { - char name[LOCKGROUPSTAT_LEN]; + char name[LOCKPROF_LEN]; lck_mtx_lock(&lck_grp_lock); lck_grp_t *grp = (lck_grp_t*)queue_first(&lck_grp_queue); uint64_t limit = count * mult; - if (events[kind].time_event) { + if (probes[kind].flags & TIME_EVENT) { nanoseconds_to_absolutetime(limit, &limit); } for (unsigned int i = 0; i < lck_grp_cnt; i++, grp = (lck_grp_t*)queue_next((queue_entry_t)grp)) { if (!grp_name || grp_name[0] == '\0' || strcmp(grp_name, grp->lck_grp_name) == 0) { - snprintf(name, sizeof(name), "%s%llu%s", events[kind].prefix, count, suffix ?: ""); + snprintf(name, sizeof(name), "%s%llu%s", probes[kind].prefix, count, suffix ?: ""); if (dtrace_probe_lookup(lockprof_id, grp->lck_grp_name, NULL, name) != 0) { continue; } + if (lockprof_lock_count(grp, kind) == 0) { + continue; + } + if ((probes[kind].flags & STAT_NEEDED) && !(grp->lck_grp_attr & LCK_GRP_ATTR_STAT)) { + continue; + } if (lockprof_count >= LOCKPROF_MAX) { break; } @@ -116,8 +160,10 @@ probe_create(int kind, const char *suffix, const char *grp_name, uint64_t count, probe->lockprof_limit = limit; probe->lockprof_grp = grp; + lck_grp_reference(grp); + probe->lockprof_id = dtrace_probe_create(lockprof_id, grp->lck_grp_name, NULL, name, - LOCKGROUPSTAT_AFRAMES, probe); + LOCKPROF_AFRAMES, probe); lockprof_count++; } @@ -129,15 +175,22 @@ static void lockprof_provide(void *arg, const dtrace_probedesc_t *desc) { #pragma unused(arg) - size_t event_id, i, len; + size_t event_id, i, j, len; if (desc == NULL) { for (i = 0; i < sizeof(hold_defaults) / sizeof(hold_defaults[0]); i++) { - probe_create(SPIN_HELD, NULL, NULL, hold_defaults[i], 1); - probe_create(SPIN_MISS, NULL, NULL, hold_defaults[i], 1); + for (j = 0; probes[j].prefix != NULL; j++) { + if (!(probes[j].flags & TIME_EVENT)) { + probe_create(j, NULL, NULL, hold_defaults[i], 1); + } + } } for (i = 0; i < sizeof(cont_defaults) / sizeof(cont_defaults[0]); i++) { - probe_create(SPIN_SPIN, cont_defaults[i].suffix, NULL, cont_defaults[i].time, cont_defaults[i].mult); + for (j = 0; probes[j].prefix != NULL; j++) { + if (probes[j].flags & TIME_EVENT) { + probe_create(j, cont_defaults[i].suffix, NULL, cont_defaults[i].time, cont_defaults[i].mult); + } + } } return; } @@ -160,16 +213,16 @@ lockprof_provide(void *arg, const dtrace_probedesc_t *desc) name = desc->dtpd_name; - for (event_id = 0; events[event_id].prefix != NULL; event_id++) { - len = strlen(events[event_id].prefix); + for (event_id = 0; probes[event_id].prefix != NULL; event_id++) { + len = strlen(probes[event_id].prefix); - if (strncmp(name, events[event_id].prefix, len) != 0) { + if (strncmp(name, probes[event_id].prefix, len) != 0) { continue; } break; } - if (events[event_id].prefix == NULL) { + if (probes[event_id].prefix == NULL) { return; } @@ -200,7 +253,7 @@ lockprof_provide(void *arg, const dtrace_probedesc_t *desc) return; } - if (events[event_id].time_event) { + if (probes[event_id].flags & TIME_EVENT) { for (i = 0, mult = 0; suffixes[i].name != NULL; i++) { if (strncasecmp(suffixes[i].name, suffix, strlen(suffixes[i].name) + 1) == 0) { mult = suffixes[i].mult; @@ -214,23 +267,14 @@ lockprof_provide(void *arg, const dtrace_probedesc_t *desc) return; } - probe_create(events[event_id].kind, suffix, desc->dtpd_mod, val, mult); + probe_create(event_id, suffix, desc->dtpd_mod, val, mult); } static lck_grp_stat_t* lockprof_stat(lck_grp_t *grp, int kind) { - switch (kind) { - case SPIN_HELD: - return &grp->lck_grp_stats.lgss_spin_held; - case SPIN_MISS: - return &grp->lck_grp_stats.lgss_spin_miss; - case SPIN_SPIN: - return &grp->lck_grp_stats.lgss_spin_spin; - default: - return NULL; - } + return (lck_grp_stat_t*)((void*)&grp->lck_grp_stats + probes[kind].stat_offset); } static int @@ -294,6 +338,7 @@ lockprof_destroy(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg, id) lockprof_probe_t *probe = (lockprof_probe_t*)parg; + lck_grp_deallocate(probe->lockprof_grp); kmem_free(probe, sizeof(lockprof_probe_t)); lockprof_count--; } @@ -338,14 +383,55 @@ static dtrace_pops_t lockprof_pops = { .dtps_usermode = NULL, .dtps_destroy = lockprof_destroy }; + +static int +_lockprof_open(dev_t dev, int flags, int devtype, struct proc *p) +{ +#pragma unused(dev,flags,devtype,p) + return 0; +} + +static const struct cdevsw lockprof_cdevsw = +{ + .d_open = _lockprof_open, + .d_close = eno_opcl, + .d_read = eno_rdwrt, + .d_write = eno_rdwrt, + .d_ioctl = eno_ioctl, + .d_stop = (stop_fcn_t *)nulldev, + .d_reset = (reset_fcn_t *)nulldev, + .d_select = eno_select, + .d_mmap = eno_mmap, + .d_strategy = eno_strat, + .d_reserved_1 = eno_getc, + .d_reserved_2 = eno_putc, +}; + + #endif /* LOCK_STATS */ void lockprof_init(void); void lockprof_init(void) { #if LOCK_STATS - dtrace_register("lockprof", &lockprof_attr, - DTRACE_PRIV_KERNEL, NULL, - &lockprof_pops, NULL, &lockprof_id); + int majorno = cdevsw_add(-1, &lockprof_cdevsw); + + if (majorno < 0) { + panic("dtrace: failed to allocate a major number"); + return; + } + + if (dtrace_register(LP_NODE, &lockprof_attr, DTRACE_PRIV_KERNEL, + NULL, &lockprof_pops, NULL, &lockprof_id) != 0) { + panic("dtrace: failed to register lockprof provider"); + } + + dev_t dev = makedev(majorno, 0); + + if (devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, + LP_NODE, 0 ) == NULL) { + panic("dtrace: devfs_make_node failed for lockprof"); + } + #endif /* LOCK_STATS */ } diff --git a/bsd/dev/dtrace/lockstat.c b/bsd/dev/dtrace/lockstat.c index 8c44121a6..b5a669e44 100644 --- a/bsd/dev/dtrace/lockstat.c +++ b/bsd/dev/dtrace/lockstat.c @@ -41,8 +41,6 @@ #include -#include - #define membar_producer dtrace_membar_producer #define PROBE_ARGS0(a, b, c, d, e) "\000" @@ -57,23 +55,12 @@ #define LOCKSTAT_PROBE(func, name, probe, ...) \ {func, name, probe, DTRACE_IDNONE, PROBE_ARGS(__VA_ARGS__)} -/* - * Hot patch values, x86 - */ #if defined(__x86_64__) -#define NOP 0x90 -#define RET 0xc3 #define LOCKSTAT_AFRAMES 1 -#elif defined(__arm__) -#define NOP 0xE1A00000 -#define BXLR 0xE12FFF1E -#define LOCKSTAT_AFRAMES 2 -#elif defined(__arm64__) -#define NOP 0xD503201F -#define RET 0xD65f03c0 +#elif defined(__arm__) || defined(__arm64__) #define LOCKSTAT_AFRAMES 2 #else -#error "not ported to this architecture" +#error "architecture not supported" #endif typedef struct lockstat_probe { @@ -86,84 +73,49 @@ typedef struct lockstat_probe { lockstat_probe_t lockstat_probes[] = { -#if defined(__x86_64__) - /* Only provide implemented probes for each architecture */ + // Mutex probes LOCKSTAT_PROBE(LS_LCK_MTX_LOCK, LSA_ACQUIRE, LS_LCK_MTX_LOCK_ACQUIRE, "lck_mtx_t"), LOCKSTAT_PROBE(LS_LCK_MTX_LOCK, LSA_SPIN, LS_LCK_MTX_LOCK_SPIN, "lck_mtx_t", "uint64_t"), LOCKSTAT_PROBE(LS_LCK_MTX_LOCK, LSA_BLOCK, LS_LCK_MTX_LOCK_BLOCK, "lck_mtx_t", "uint64_t"), LOCKSTAT_PROBE(LS_LCK_MTX_TRY_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_LOCK_ACQUIRE, "lck_mtx_t"), LOCKSTAT_PROBE(LS_LCK_MTX_TRY_SPIN_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, "lck_mtx_t"), LOCKSTAT_PROBE(LS_LCK_MTX_UNLOCK, LSA_RELEASE, LS_LCK_MTX_UNLOCK_RELEASE, "lck_mtx_t"), - LOCKSTAT_PROBE(LS_LCK_MTX_EXT_LOCK, LSA_ACQUIRE, LS_LCK_MTX_EXT_LOCK_ACQUIRE, "lck_mtx_t"), + LOCKSTAT_PROBE(LS_LCK_MTX_LOCK_SPIN_LOCK, LSA_ACQUIRE, LS_LCK_MTX_LOCK_SPIN_ACQUIRE, "lck_mtx_t"), + // Extended mutexes are only implemented on Intel +#if defined(__x86_64__) LOCKSTAT_PROBE(LS_LCK_MTX_EXT_LOCK, LSA_SPIN, LS_LCK_MTX_EXT_LOCK_SPIN, "lck_mtx_t", "uint64_t"), + LOCKSTAT_PROBE(LS_LCK_MTX_EXT_LOCK, LSA_ACQUIRE, LS_LCK_MTX_EXT_LOCK_ACQUIRE, "lck_mtx_t"), LOCKSTAT_PROBE(LS_LCK_MTX_EXT_LOCK, LSA_BLOCK, LS_LCK_MTX_EXT_LOCK_BLOCK, "lck_mtx_t", "uint64_t"), -// LOCKSTAT_PROBE(LS_LCK_MTX_EXT_TRY_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE) LOCKSTAT_PROBE(LS_LCK_MTX_EXT_UNLOCK, LSA_RELEASE, LS_LCK_MTX_EXT_UNLOCK_RELEASE, "lck_mtx_t"), - LOCKSTAT_PROBE(LS_LCK_MTX_LOCK_SPIN_LOCK, LSA_ACQUIRE, LS_LCK_MTX_LOCK_SPIN_ACQUIRE, "lck_mtx_t"), +#endif + + // RW lock probes // TODO: This should not be a uint64_t ! LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED, LSR_ACQUIRE, LS_LCK_RW_LOCK_SHARED_ACQUIRE, "lck_rw_t", "uint64_t"), LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED, LSR_BLOCK, LS_LCK_RW_LOCK_SHARED_BLOCK, "lck_rw_t", "uint64_t", "_Bool", "_Bool", "int"), LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED, LSR_SPIN, LS_LCK_RW_LOCK_SHARED_SPIN, "lck_rw_t", "uint64_t", "_Bool", "_Bool", "int"), - // TODO: This should NOT be a uint64_t LOCKSTAT_PROBE(LS_LCK_RW_LOCK_EXCL, LSR_ACQUIRE, LS_LCK_RW_LOCK_EXCL_ACQUIRE, "lck_rw_t", "uint64_t"), + // TODO: This should NOT be a uint64_t LOCKSTAT_PROBE(LS_LCK_RW_LOCK_EXCL, LSR_BLOCK, LS_LCK_RW_LOCK_EXCL_BLOCK, "lck_rw_t", "uint64_t", "_Bool", "_Bool", "int"), LOCKSTAT_PROBE(LS_LCK_RW_LOCK_EXCL, LSR_SPIN, LS_LCK_RW_LOCK_EXCL_SPIN, "lck_rw_t", "uint64_t", "int"), LOCKSTAT_PROBE(LS_LCK_RW_DONE, LSR_RELEASE, LS_LCK_RW_DONE_RELEASE, "lck_rw_t", "_Bool"), // TODO : This should NOT be a uint64_t LOCKSTAT_PROBE(LS_LCK_RW_TRY_LOCK_SHARED, LSR_ACQUIRE, LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, "lck_rw_t", "uint64_t"), - // See above LOCKSTAT_PROBE(LS_LCK_RW_TRY_LOCK_EXCL, LSR_ACQUIRE, LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, "lck_rw_t", "uint64_t"), LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_UPGRADE, LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, "lck_rw_t", "_Bool"), LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_SPIN, LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, "lck_rw_t", "uint64_t"), LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_BLOCK, LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, "lck_rw_t", "uint64_t", "_Bool", "_Bool", "int"), - LOCKSTAT_PROBE(LS_LCK_RW_LOCK_EXCL_TO_SHARED, LSR_DOWNGRADE, LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE, "lck_rw_t"), + + // Spin lock probes //TODO : Separate the probes for the hw_bit from the probe for the normal hw locks LOCKSTAT_PROBE(LS_LCK_SPIN_LOCK, LSS_ACQUIRE, LS_LCK_SPIN_LOCK_ACQUIRE, "hw_lock_t"), LOCKSTAT_PROBE(LS_LCK_SPIN_LOCK, LSS_SPIN, LS_LCK_SPIN_LOCK_SPIN, "hw_lock_t", "uint64_t", "uint64_t"), LOCKSTAT_PROBE(LS_LCK_SPIN_UNLOCK, LSS_RELEASE, LS_LCK_SPIN_UNLOCK_RELEASE, "hw_lock_t"), -#elif defined(__arm__) || defined(__arm64__) - LOCKSTAT_PROBE(LS_LCK_MTX_LOCK, LSA_ACQUIRE, LS_LCK_MTX_LOCK_ACQUIRE, "lck_mtx_t"), -// LOCKSTAT_PROBE(LS_LCK_MTX_LOCK, LSA_SPIN, LS_LCK_MTX_LOCK_SPIN, "lck_mtx_t", "uint64_t"), - LOCKSTAT_PROBE(LS_LCK_MTX_LOCK, LSA_BLOCK, LS_LCK_MTX_LOCK_BLOCK, "lck_mtx_t", "uint64_t"), - LOCKSTAT_PROBE(LS_LCK_MTX_TRY_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_LOCK_ACQUIRE, "lck_mtx_t"), -// LOCKSTAT_PROBE(LS_LCK_MTX_TRY_SPIN_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, "lck_mtx_t"), - LOCKSTAT_PROBE(LS_LCK_MTX_UNLOCK, LSA_RELEASE, LS_LCK_MTX_UNLOCK_RELEASE, "lck_mtx_t"), - LOCKSTAT_PROBE(LS_LCK_MTX_EXT_LOCK, LSA_ACQUIRE, LS_LCK_MTX_EXT_LOCK_ACQUIRE, "lck_mtx_t"), -// LOCKSTAT_PROBE(LS_LCK_MTX_EXT_LOCK, LSA_SPIN, LS_LCK_MTX_EXT_LOCK_SPIN, "lck_mtx_t", "uint64_t"), - LOCKSTAT_PROBE(LS_LCK_MTX_EXT_LOCK, LSA_BLOCK, LS_LCK_MTX_EXT_LOCK_BLOCK, "lck_mtx_t", "uint64_t"), -// LOCKSTAT_PROBE(LS_LCK_MTX_EXT_TRY_LOCK, LSA_ACQUIRE, LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE) -// LOCKSTAT_PROBE(LS_LCK_MTX_EXT_UNLOCK, LSA_RELEASE, LS_LCK_MTX_EXT_UNLOCK_RELEASE, "lck_mtx_t"), -// LOCKSTAT_PROBE(LS_LCK_MTX_LOCK_SPIN_LOCK, LSA_ACQUIRE, LS_LCK_MTX_LOCK_SPIN_ACQUIRE, "lck_mtx_t"), - LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED, LSR_ACQUIRE, LS_LCK_RW_LOCK_SHARED_ACQUIRE, "lck_rw_t", "uint64_t"), - LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED, LSR_BLOCK, LS_LCK_RW_LOCK_SHARED_BLOCK, "lck_rw_t", "uint64_t", "_Bool", "_Bool", "int"), - LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED, LSR_SPIN, LS_LCK_RW_LOCK_SHARED_SPIN, "lck_rw_t", "uint64_t", "_Bool", "_Bool", "int"), - LOCKSTAT_PROBE(LS_LCK_RW_LOCK_EXCL, LSR_ACQUIRE, LS_LCK_RW_LOCK_EXCL_ACQUIRE, "lck_rw_t", "uint64_t"), - LOCKSTAT_PROBE(LS_LCK_RW_LOCK_EXCL, LSR_BLOCK, LS_LCK_RW_LOCK_EXCL_BLOCK, "lck_rw_t", "uint64_t", "_Bool", "_Bool", "int"), - LOCKSTAT_PROBE(LS_LCK_RW_LOCK_EXCL, LSR_SPIN, LS_LCK_RW_LOCK_EXCL_SPIN, "lck_rw_t", "uint64_t", "int"), - LOCKSTAT_PROBE(LS_LCK_RW_DONE, LSR_RELEASE, LS_LCK_RW_DONE_RELEASE, "lck_rw_t", "_Bool"), - // TODO : This should NOT be a uint64_t - LOCKSTAT_PROBE(LS_LCK_RW_TRY_LOCK_SHARED, LSR_ACQUIRE, LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, "lck_rw_t", "uint64_t"), - // See above - LOCKSTAT_PROBE(LS_LCK_RW_TRY_LOCK_EXCL, LSR_ACQUIRE, LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, "lck_rw_t", "uint64_t"), - LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_UPGRADE, LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, "lck_rw_t", "_Bool"), - LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_SPIN, LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, "lck_rw_t", "uint64_t"), - LOCKSTAT_PROBE(LS_LCK_RW_LOCK_SHARED_TO_EXCL, LSR_BLOCK, LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, "lck_rw_t", "uint64_t", "_Bool", "_Bool", "int"), LOCKSTAT_PROBE(LS_LCK_RW_LOCK_EXCL_TO_SHARED, LSR_DOWNGRADE, LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE, "lck_rw_t"), - //TODO : Separate the probes for the hw_bit from the probe for the normal hw locks - LOCKSTAT_PROBE(LS_LCK_SPIN_LOCK, LSS_ACQUIRE, LS_LCK_SPIN_LOCK_ACQUIRE, "hw_lock_t"), - LOCKSTAT_PROBE(LS_LCK_SPIN_LOCK, LSS_SPIN, LS_LCK_SPIN_LOCK_SPIN, "hw_lock_t", "uint64_t", "uint64_t"), - LOCKSTAT_PROBE(LS_LCK_SPIN_UNLOCK, LSS_RELEASE, LS_LCK_SPIN_UNLOCK_RELEASE, "hw_lock_t"), -#endif - /* Interlock measurements would be nice, but later */ - -#ifdef LATER - LOCKSTAT_PROBE(LS_LCK_RW_LOCK_EXCL_TO_SHARED, LSA_ILK_SPIN, LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN), - LOCKSTAT_PROBE(LS_LCK_MTX_LOCK, LSA_ILK_SPIN, LS_LCK_MTX_LOCK_ILK_SPIN), - LOCKSTAT_PROBE(LS_LCK_MTX_EXT_LOCK, LSA_ILK_SPIN, LS_LCK_MTX_EXT_LOCK_ILK_SPIN), - LOCKSTAT_PROBE(LS_LCK_RW_TRY_LOCK_EXCL, LSA_ILK_SPIN, LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN), - LOCKSTAT_PROBE(LS_LCK_RW_TRY_LOCK_SHARED, LSA_SPIN, LS_LCK_RW_TRY_LOCK_SHARED_SPIN) -#endif - + // Ticket lock probes + LOCKSTAT_PROBE(LS_LCK_TICKET_LOCK, LST_ACQUIRE, LS_LCK_TICKET_LOCK_ACQUIRE, "lck_ticket_t"), + LOCKSTAT_PROBE(LS_LCK_TICKET_LOCK, LST_RELEASE, LS_LCK_TICKET_LOCK_RELEASE, "lck_ticket_t"), + LOCKSTAT_PROBE(LS_LCK_TICKET_LOCK, LST_SPIN, LS_LCK_TICKET_LOCK_SPIN, "lck_ticket_t"), { NULL, NULL, 0, 0, NULL } @@ -188,7 +140,6 @@ lockstat_enable(void *arg, dtrace_id_t id, void *parg) lockstat_probemap[probe->lsp_probe] = id; membar_producer(); - membar_producer(); return 0; } @@ -199,26 +150,11 @@ lockstat_disable(void *arg, dtrace_id_t id, void *parg) #pragma unused(arg, id) /* __APPLE__ */ lockstat_probe_t *probe = parg; - int i; ASSERT(lockstat_probemap[probe->lsp_probe]); lockstat_probemap[probe->lsp_probe] = 0; membar_producer(); - - /* - * See if we have any probes left enabled. - */ - for (i = 0; i < LS_NPROBES; i++) { - if (lockstat_probemap[i]) { - /* - * This probe is still enabled. We don't need to deal - * with waiting for all threads to be out of the - * lockstat critical sections; just return. - */ - return; - } - } } /*ARGSUSED*/ @@ -249,7 +185,7 @@ lockstat_provide(void *arg, const dtrace_probedesc_t *desc) static void lockstat_destroy(void *arg, dtrace_id_t id, void *parg) { -#pragma unused(arg, id) /* __APPLE__ */ +#pragma unused(arg, id) lockstat_probe_t *probe = parg; @@ -318,37 +254,28 @@ lockstat_attach(dev_info_t *devi) return DDI_SUCCESS; } -d_open_t _lockstat_open; - -int +static int _lockstat_open(dev_t dev, int flags, int devtype, struct proc *p) { #pragma unused(dev,flags,devtype,p) return 0; } -#define LOCKSTAT_MAJOR -24 /* let the kernel pick the device number */ -/* - * A struct describing which functions will get invoked for certain - * actions. - */ -static struct cdevsw lockstat_cdevsw = +static const struct cdevsw lockstat_cdevsw = { - _lockstat_open, /* open */ - eno_opcl, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - eno_ioctl, /* ioctl */ - (stop_fcn_t *)nulldev, /* stop */ - (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + .d_open = _lockstat_open, + .d_close = eno_opcl, + .d_read = eno_rdwrt, + .d_write = eno_rdwrt, + .d_ioctl = eno_ioctl, + .d_stop = (stop_fcn_t *)nulldev, + .d_reset = (reset_fcn_t *)nulldev, + .d_select = eno_select, + .d_mmap = eno_mmap, + .d_strategy = eno_strat, + .d_reserved_1 = eno_getc, + .d_reserved_2 = eno_putc, }; void lockstat_init( void ); @@ -356,7 +283,7 @@ void lockstat_init( void ); void lockstat_init( void ) { - int majdevno = cdevsw_add(LOCKSTAT_MAJOR, &lockstat_cdevsw); + int majdevno = cdevsw_add(-1, &lockstat_cdevsw); if (majdevno < 0) { printf("lockstat_init: failed to allocate a major number!\n"); @@ -365,4 +292,3 @@ lockstat_init( void ) lockstat_attach((dev_info_t*)(uintptr_t)majdevno); } -#undef LOCKSTAT_MAJOR diff --git a/bsd/dev/dtrace/profile_prvd.c b/bsd/dev/dtrace/profile_prvd.c index 2294eedfd..450a69f4b 100644 --- a/bsd/dev/dtrace/profile_prvd.c +++ b/bsd/dev/dtrace/profile_prvd.c @@ -711,26 +711,20 @@ _profile_open(dev_t dev, int flags, int devtype, struct proc *p) #define PROFILE_MAJOR -24 /* let the kernel pick the device number */ -/* - * A struct describing which functions will get invoked for certain - * actions. - */ -static struct cdevsw profile_cdevsw = +static const struct cdevsw profile_cdevsw = { - _profile_open, /* open */ - eno_opcl, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - eno_ioctl, /* ioctl */ - (stop_fcn_t *)nulldev, /* stop */ - (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + .d_open = _profile_open, + .d_close = eno_opcl, + .d_read = eno_rdwrt, + .d_write = eno_rdwrt, + .d_ioctl = eno_ioctl, + .d_stop = (stop_fcn_t *)nulldev, + .d_reset = (reset_fcn_t *)nulldev, + .d_select = eno_select, + .d_mmap = eno_mmap, + .d_strategy = eno_strat, + .d_reserved_1 = eno_getc, + .d_reserved_2 = eno_putc, }; void diff --git a/bsd/dev/dtrace/scripts/Makefile b/bsd/dev/dtrace/scripts/Makefile index 58fc8b304..4927b28b8 100644 --- a/bsd/dev/dtrace/scripts/Makefile +++ b/bsd/dev/dtrace/scripts/Makefile @@ -6,7 +6,7 @@ export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir include $(MakeInc_cmd) include $(MakeInc_def) -INSTALL_DTRACE_SCRIPTS_LIST = \ +INSTALL_DTRACE_MI_SCRIPTS_LIST = \ darwin.d \ errno.d \ io.d \ @@ -19,27 +19,39 @@ INSTALL_DTRACE_SCRIPTS_LIST = \ INSTALL_DTRACE_LIBEXEC_LIST = \ log_unnest_badness.d \ - vm_map_delete_permanent.d + vm_map_delete_permanent.d \ + vm_object_ownership.d ifneq ($(filter $(SUPPORTED_EMBEDDED_PLATFORMS),$(PLATFORM)),) -INSTALL_DTRACE_SCRIPTS_LIST += mptcp.d +INSTALL_DTRACE_MI_SCRIPTS_LIST += mptcp.d endif ifeq ($(CURRENT_ARCH_CONFIG),ARM64) -INSTALL_DTRACE_SCRIPTS_LIST += regs_arm64.d ptrauth_arm64.d +INSTALL_DTRACE_MD_SCRIPTS_LIST = regs_arm64.d ptrauth_arm64.d +INSTALL_DTRACE_MD_DIR = arm64 else ifeq ($(CURRENT_ARCH_CONFIG),ARM) -INSTALL_DTRACE_SCRIPTS_LIST += regs_arm.d +INSTALL_DTRACE_MD_SCRIPTS_LIST = regs_arm.d +INSTALL_DTRACE_MD_DIR = arm else -INSTALL_DTRACE_SCRIPTS_LIST += regs_x86_64.d +INSTALL_DTRACE_MD_SCRIPTS_LIST = regs_x86_64.d vmx_compat.d +INSTALL_DTRACE_MD_DIR = x86_64 endif -INSTALL_DTRACE_SCRIPTS_FILES = \ - $(addprefix $(DSTROOT)/$(INSTALL_DTRACE_SCRIPTS_DIR)/, $(INSTALL_DTRACE_SCRIPTS_LIST)) +INSTALL_DTRACE_MI_SCRIPTS_FILES = \ + $(addprefix $(DSTROOT)/$(INSTALL_DTRACE_SCRIPTS_DIR)/, $(INSTALL_DTRACE_MI_SCRIPTS_LIST)) -$(INSTALL_DTRACE_SCRIPTS_FILES): $(DSTROOT)/$(INSTALL_DTRACE_SCRIPTS_DIR)/% : % +INSTALL_DTRACE_MD_SCRIPTS_FILES = \ + $(addprefix $(DSTROOT)/$(INSTALL_DTRACE_SCRIPTS_DIR)/$(INSTALL_DTRACE_MD_DIR)/, $(INSTALL_DTRACE_MD_SCRIPTS_LIST)) + +$(INSTALL_DTRACE_MD_SCRIPTS_FILES): $(DSTROOT)/$(INSTALL_DTRACE_SCRIPTS_DIR)/$(INSTALL_DTRACE_MD_DIR)/% : % + $(_v)$(MKDIR) $(DSTROOT)/$(INSTALL_DTRACE_SCRIPTS_DIR)/$(INSTALL_DTRACE_MD_DIR) + @$(LOG) INSTALL $(@F) + $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS) $< $@ + +$(INSTALL_DTRACE_MI_SCRIPTS_FILES): $(DSTROOT)/$(INSTALL_DTRACE_SCRIPTS_DIR)/% : % $(_v)$(MKDIR) $(DSTROOT)/$(INSTALL_DTRACE_SCRIPTS_DIR) - $(call makelog,INSTALL $(@F)) + @$(LOG) INSTALL $(@F) $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS) $< $@ INSTALL_DTRACE_LIBEXEC_FILES = \ @@ -47,10 +59,12 @@ INSTALL_DTRACE_LIBEXEC_FILES = \ $(INSTALL_DTRACE_LIBEXEC_FILES): $(DSTROOT)/$(INSTALL_DTRACE_LIBEXEC_DIR)/% : % $(_v)$(MKDIR) $(DSTROOT)/$(INSTALL_DTRACE_LIBEXEC_DIR) - $(call makelog,INSTALL $(@F)) + @$(LOG) INSTALL $(@F) $(_v)$(INSTALL) $(EXEC_INSTALL_FLAGS) $< $@ -do_textfiles_install:: $(INSTALL_DTRACE_SCRIPTS_FILES) $(INSTALL_DTRACE_LIBEXEC_FILES) +do_textfiles_install_mi:: $(INSTALL_DTRACE_MI_SCRIPTS_FILES) $(INSTALL_DTRACE_LIBEXEC_FILES) + +do_textfiles_install_md:: $(INSTALL_DTRACE_MD_SCRIPTS_FILES) include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/bsd/dev/dtrace/scripts/io.d b/bsd/dev/dtrace/scripts/io.d index f295f1026..ebe226362 100644 --- a/bsd/dev/dtrace/scripts/io.d +++ b/bsd/dev/dtrace/scripts/io.d @@ -2,13 +2,13 @@ * Copyright (c) 2007 Apple, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * The contents of this file constitute Original Code as defined in and * are subject to the Apple Public Source License Version 1.1 (the * "License"). You may not use this file except in compliance with the * License. Please obtain a copy of the License at * http://www.apple.com/publicsource and read it before using this file. - * + * * This Original Code and all software distributed under the License are * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -16,7 +16,7 @@ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the * License for the specific language governing rights and limitations * under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -76,7 +76,7 @@ translator bufinfo_t < struct buf *B > { b_iodone = (caddr_t)B->b_iodone; b_error = B->b_error; b_edev = B->b_dev; -}; +}; typedef struct devinfo { int dev_major; /* major number */ @@ -113,16 +113,16 @@ typedef struct fileinfo { translator fileinfo_t < struct buf *B > { fi_name = B->b_vp->v_name == NULL ? "" : B->b_vp->v_name; - fi_dirname = B->b_vp->v_parent == NULL ? "" : + fi_dirname = B->b_vp->v_parent == NULL ? "" : (B->b_vp->v_parent->v_name == NULL ? "" : B->b_vp->v_parent->v_name); - fi_pathname = strjoin("??/", + fi_pathname = strjoin("??/", strjoin(B->b_vp->v_parent == NULL ? "" : (B->b_vp->v_parent->v_name == NULL ? "" : B->b_vp->v_parent->v_name), strjoin("/", B->b_vp->v_name == NULL ? "" : B->b_vp->v_name))); - fi_offset = B->b_upl == NULL ? -1 : ((upl_t)B->b_upl)->offset; + fi_offset = B->b_upl == NULL ? -1 : ((upl_t)B->b_upl)->u_offset; fi_fs = B->b_vp->v_mount->mnt_vtable->vfc_name; @@ -137,17 +137,17 @@ translator fileinfo_t < struct buf *B > { * flags behave as a bit-field *except* for O_RDONLY, O_WRONLY, and O_RDWR. * To test the open mode, you write code similar to that used with the fcntl(2) * F_GET[X]FL command, such as: if ((fi_oflags & O_ACCMODE) == O_WRONLY). - */ + */ inline int O_ACCMODE = 0x0003; -#pragma D binding "1.1" O_ACCMODE - +#pragma D binding "1.1" O_ACCMODE + inline int O_RDONLY = 0x0000; #pragma D binding "1.1" O_RDONLY inline int O_WRONLY = 0x0001; #pragma D binding "1.1" O_WRONLY inline int O_RDWR = 0x0002; #pragma D binding "1.1" O_RDWR - + inline int O_NONBLOCK = 0x0004; #pragma D binding "1.1" O_NONBLOCK inline int O_APPEND = 0x0008; @@ -176,12 +176,14 @@ inline int O_DIRECTORY = 0x100000; #pragma D binding "1.1" O_DIRECTORY inline int O_SYMLINK = 0x200000; #pragma D binding "1.1" O_SYMLINK +inline int O_NOFOLLOW_ANY = 0x20000000; +#pragma D binding "1.1" O_NOFOLLOW_ANY /* From bsd/sys/file_internal.h */ inline int DTYPE_VNODE = 1; #pragma D binding "1.1" DTYPE_VNODE inline int DTYPE_SOCKET = 2; -#pragma D binding "1.1" DTYPE_SOCKET +#pragma D binding "1.1" DTYPE_SOCKET inline int DTYPE_PSXSHM = 3; #pragma D binding "1.1" DTYPE_PSXSHM inline int DTYPE_PSXSEM = 4; @@ -207,18 +209,18 @@ translator fileinfo_t < struct fileglob *F > { fi_dirname = (F == NULL) ? "" : F->fg_ops->fo_type != DTYPE_VNODE ? "" : - ((struct vnode *)F->fg_data)->v_parent == NULL ? "" : - (((struct vnode *)F->fg_data)->v_parent->v_name == NULL ? "" : + ((struct vnode *)F->fg_data)->v_parent == NULL ? "" : + (((struct vnode *)F->fg_data)->v_parent->v_name == NULL ? "" : ((struct vnode *)F->fg_data)->v_parent->v_name); fi_pathname = (F == NULL) ? "" : F->fg_ops->fo_type != DTYPE_VNODE ? "" : - strjoin("??/", + strjoin("??/", strjoin(((struct vnode *)F->fg_data)->v_parent == NULL ? "" : - (((struct vnode *)F->fg_data)->v_parent->v_name == NULL ? "" : + (((struct vnode *)F->fg_data)->v_parent->v_name == NULL ? "" : ((struct vnode *)F->fg_data)->v_parent->v_name), strjoin("/", - ((struct vnode *)F->fg_data)->v_name == NULL ? "" : + ((struct vnode *)F->fg_data)->v_name == NULL ? "" : ((struct vnode *)F->fg_data)->v_name))); fi_offset = (F == NULL) ? 0 : @@ -230,16 +232,16 @@ translator fileinfo_t < struct fileglob *F > { fi_mount = (F == NULL) ? "" : F->fg_ops->fo_type != DTYPE_VNODE ? "" : - ((struct vnode *)F->fg_data)->v_mount->mnt_vnodecovered == NULL ? "/" : + ((struct vnode *)F->fg_data)->v_mount->mnt_vnodecovered == NULL ? "/" : ((struct vnode *)F->fg_data)->v_mount->mnt_vnodecovered->v_name; - fi_oflags = (F == NULL) ? 0 : + fi_oflags = (F == NULL) ? 0 : F->fg_flag - 1; /* Subtract one to map FREAD/FWRITE bitfield to O_RD/WR open() flags. */ }; inline fileinfo_t fds[int fd] = xlate ( - (fd >= 0 && fd <= curproc->p_fd->fd_lastfile) ? - (struct fileglob *)(curproc->p_fd->fd_ofiles[fd]->f_fglob) : + (fd >= 0 && fd <= curproc->p_fd->fd_lastfile) ? + (struct fileglob *)(curproc->p_fd->fd_ofiles[fd]->fp_glob) : (struct fileglob *)NULL); #pragma D attributes Stable/Stable/Common fds @@ -249,10 +251,10 @@ inline fileinfo_t fds[int fd] = xlate ( translator fileinfo_t < struct vnode *V > { fi_name = V->v_name == NULL ? "" : V->v_name; - fi_dirname = V->v_parent == NULL ? "" : + fi_dirname = V->v_parent == NULL ? "" : (V->v_parent->v_name == NULL ? "" : V->v_parent->v_name); - fi_pathname = strjoin("??/", + fi_pathname = strjoin("??/", strjoin(V->v_parent == NULL ? "" : (V->v_parent->v_name == NULL ? "" : V->v_parent->v_name), strjoin("/", diff --git a/bsd/dev/dtrace/scripts/log_unnest_badness.d b/bsd/dev/dtrace/scripts/log_unnest_badness.d index 2e8e0d003..458676da5 100644 --- a/bsd/dev/dtrace/scripts/log_unnest_badness.d +++ b/bsd/dev/dtrace/scripts/log_unnest_badness.d @@ -3,7 +3,7 @@ vminfo::log_unnest_badness: { printf("%d[%s]: unexpected unnest(0x%llx, 0x%llx) below 0x%llx", - $pid, + pid, execname, (uint64_t) arg1, (uint64_t) arg2, diff --git a/bsd/dev/dtrace/scripts/vm_object_ownership.d b/bsd/dev/dtrace/scripts/vm_object_ownership.d new file mode 100644 index 000000000..93b217ace --- /dev/null +++ b/bsd/dev/dtrace/scripts/vm_object_ownership.d @@ -0,0 +1,32 @@ +#!/usr/sbin/dtrace -s + +vminfo:::object_ownership_change +{ + old_owner = (task_t)arg1; + if (old_owner == 0) { + old_pid = -1; + old_name = "(nil)"; + } else { + old_proc = (proc_t)old_owner->bsd_info; + old_pid = old_proc->p_pid; + old_name = old_proc->p_comm; + } + new_owner = (task_t)arg4; + if (new_owner == 0) { + new_pid = -1; + new_name = "(nil)"; + } else { + new_proc = (proc_t)new_owner->bsd_info; + new_pid = new_proc->p_pid; + new_name = new_proc->p_comm; + } + + printf("%d[%s] object 0x%p id 0x%x purgeable:%d owner:0x%p (%d[%s]) tag:%d nofootprint:%d -> owner:0x%p (%d[%s]) tag:%d nofootprint:%d", + pid, execname, arg0, arg7, ((vm_object_t)arg0)->purgable, + old_owner, old_pid, old_name, + arg2, arg3, + new_owner, new_pid, new_name, + arg5, arg6); + stack(); + ustack(); +} diff --git a/bsd/dev/dtrace/scripts/vmx_compat.d b/bsd/dev/dtrace/scripts/vmx_compat.d new file mode 100644 index 000000000..b39a5d34e --- /dev/null +++ b/bsd/dev/dtrace/scripts/vmx_compat.d @@ -0,0 +1,175 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + * + * Copyright 2011 Joyent, Inc. All rights reserved. + */ + +/* + * This file delivers VMX description that is compatible with DTrace + * upstream. + */ + +enum vmregs_vmx { + VMX_VIRTUAL_PROCESSOR_ID = 0x00000000, + VMX_GUEST_ES_SELECTOR = 0x00000800, + VMX_GUEST_CS_SELECTOR = 0x00000802, + VMX_GUEST_SS_SELECTOR = 0x00000804, + VMX_GUEST_DS_SELECTOR = 0x00000806, + VMX_GUEST_FS_SELECTOR = 0x00000808, + VMX_GUEST_GS_SELECTOR = 0x0000080a, + VMX_GUEST_LDTR_SELECTOR = 0x0000080c, + VMX_GUEST_TR_SELECTOR = 0x0000080e, + VMX_HOST_ES_SELECTOR = 0x00000c00, + VMX_HOST_CS_SELECTOR = 0x00000c02, + VMX_HOST_SS_SELECTOR = 0x00000c04, + VMX_HOST_DS_SELECTOR = 0x00000c06, + VMX_HOST_FS_SELECTOR = 0x00000c08, + VMX_HOST_GS_SELECTOR = 0x00000c0a, + VMX_HOST_TR_SELECTOR = 0x00000c0c, + VMX_IO_BITMAP_A = 0x00002000, + VMX_IO_BITMAP_A_HIGH = 0x00002001, + VMX_IO_BITMAP_B = 0x00002002, + VMX_IO_BITMAP_B_HIGH = 0x00002003, + VMX_MSR_BITMAP = 0x00002004, + VMX_MSR_BITMAP_HIGH = 0x00002005, + VMX_VM_EXIT_MSR_STORE_ADDR = 0x00002006, + VMX_VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, + VMX_VM_EXIT_MSR_LOAD_ADDR = 0x00002008, + VMX_VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009, + VMX_VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a, + VMX_VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b, + VMX_TSC_OFFSET = 0x00002010, + VMX_TSC_OFFSET_HIGH = 0x00002011, + VMX_VIRTUAL_APIC_PAGE_ADDR = 0x00002012, + VMX_VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, + VMX_APIC_ACCESS_ADDR = 0x00002014, + VMX_APIC_ACCESS_ADDR_HIGH = 0x00002015, + VMX_EPT_POINTER = 0x0000201a, + VMX_EPT_POINTER_HIGH = 0x0000201b, + VMX_GUEST_PHYSICAL_ADDRESS = 0x00002400, + VMX_GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, + VMX_VMCS_LINK_POINTER = 0x00002800, + VMX_VMCS_LINK_POINTER_HIGH = 0x00002801, + VMX_GUEST_IA32_DEBUGCTL = 0x00002802, + VMX_GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, + VMX_GUEST_IA32_PAT = 0x00002804, + VMX_GUEST_IA32_PAT_HIGH = 0x00002805, + VMX_GUEST_PDPTR0 = 0x0000280a, + VMX_GUEST_PDPTR0_HIGH = 0x0000280b, + VMX_GUEST_PDPTR1 = 0x0000280c, + VMX_GUEST_PDPTR1_HIGH = 0x0000280d, + VMX_GUEST_PDPTR2 = 0x0000280e, + VMX_GUEST_PDPTR2_HIGH = 0x0000280f, + VMX_GUEST_PDPTR3 = 0x00002810, + VMX_GUEST_PDPTR3_HIGH = 0x00002811, + VMX_HOST_IA32_PAT = 0x00002c00, + VMX_HOST_IA32_PAT_HIGH = 0x00002c01, + VMX_PIN_BASED_VM_EXEC_CONTROL = 0x00004000, + VMX_CPU_BASED_VM_EXEC_CONTROL = 0x00004002, + VMX_EXCEPTION_BITMAP = 0x00004004, + VMX_PAGE_FAULT_ERROR_CODE_MASK = 0x00004006, + VMX_PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008, + VMX_CR3_TARGET_COUNT = 0x0000400a, + VMX_VM_EXIT_CONTROLS = 0x0000400c, + VMX_VM_EXIT_MSR_STORE_COUNT = 0x0000400e, + VMX_VM_EXIT_MSR_LOAD_COUNT = 0x00004010, + VMX_VM_ENTRY_CONTROLS = 0x00004012, + VMX_VM_ENTRY_MSR_LOAD_COUNT = 0x00004014, + VMX_VM_ENTRY_INTR_INFO_FIELD = 0x00004016, + VMX_VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018, + VMX_VM_ENTRY_INSTRUCTION_LEN = 0x0000401a, + VMX_TPR_THRESHOLD = 0x0000401c, + VMX_SECONDARY_VM_EXEC_CONTROL = 0x0000401e, + VMX_PLE_GAP = 0x00004020, + VMX_PLE_WINDOW = 0x00004022, + VMX_VM_INSTRUCTION_ERROR = 0x00004400, + VMX_VM_EXIT_REASON = 0x00004402, + VMX_VM_EXIT_INTR_INFO = 0x00004404, + VMX_VM_EXIT_INTR_ERROR_CODE = 0x00004406, + VMX_IDT_VECTORING_INFO_FIELD = 0x00004408, + VMX_IDT_VECTORING_ERROR_CODE = 0x0000440a, + VMX_VM_EXIT_INSTRUCTION_LEN = 0x0000440c, + VMX_VMX_INSTRUCTION_INFO = 0x0000440e, + VMX_GUEST_ES_LIMIT = 0x00004800, + VMX_GUEST_CS_LIMIT = 0x00004802, + VMX_GUEST_SS_LIMIT = 0x00004804, + VMX_GUEST_DS_LIMIT = 0x00004806, + VMX_GUEST_FS_LIMIT = 0x00004808, + VMX_GUEST_GS_LIMIT = 0x0000480a, + VMX_GUEST_LDTR_LIMIT = 0x0000480c, + VMX_GUEST_TR_LIMIT = 0x0000480e, + VMX_GUEST_GDTR_LIMIT = 0x00004810, + VMX_GUEST_IDTR_LIMIT = 0x00004812, + VMX_GUEST_ES_AR_BYTES = 0x00004814, + VMX_GUEST_CS_AR_BYTES = 0x00004816, + VMX_GUEST_SS_AR_BYTES = 0x00004818, + VMX_GUEST_DS_AR_BYTES = 0x0000481a, + VMX_GUEST_FS_AR_BYTES = 0x0000481c, + VMX_GUEST_GS_AR_BYTES = 0x0000481e, + VMX_GUEST_LDTR_AR_BYTES = 0x00004820, + VMX_GUEST_TR_AR_BYTES = 0x00004822, + VMX_GUEST_INTERRUPTIBILITY_INFO = 0x00004824, + VMX_GUEST_ACTIVITY_STATE = 0X00004826, + VMX_GUEST_SYSENTER_CS = 0x0000482A, + VMX_HOST_IA32_SYSENTER_CS = 0x00004c00, + VMX_CR0_GUEST_HOST_MASK = 0x00006000, + VMX_CR4_GUEST_HOST_MASK = 0x00006002, + VMX_CR0_READ_SHADOW = 0x00006004, + VMX_CR4_READ_SHADOW = 0x00006006, + VMX_CR3_TARGET_VALUE0 = 0x00006008, + VMX_CR3_TARGET_VALUE1 = 0x0000600a, + VMX_CR3_TARGET_VALUE2 = 0x0000600c, + VMX_CR3_TARGET_VALUE3 = 0x0000600e, + VMX_EXIT_QUALIFICATION = 0x00006400, + VMX_GUEST_LINEAR_ADDRESS = 0x0000640a, + VMX_GUEST_CR0 = 0x00006800, + VMX_GUEST_CR3 = 0x00006802, + VMX_GUEST_CR4 = 0x00006804, + VMX_GUEST_ES_BASE = 0x00006806, + VMX_GUEST_CS_BASE = 0x00006808, + VMX_GUEST_SS_BASE = 0x0000680a, + VMX_GUEST_DS_BASE = 0x0000680c, + VMX_GUEST_FS_BASE = 0x0000680e, + VMX_GUEST_GS_BASE = 0x00006810, + VMX_GUEST_LDTR_BASE = 0x00006812, + VMX_GUEST_TR_BASE = 0x00006814, + VMX_GUEST_GDTR_BASE = 0x00006816, + VMX_GUEST_IDTR_BASE = 0x00006818, + VMX_GUEST_DR7 = 0x0000681a, + VMX_GUEST_RSP = 0x0000681c, + VMX_GUEST_RIP = 0x0000681e, + VMX_GUEST_RFLAGS = 0x00006820, + VMX_GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822, + VMX_GUEST_SYSENTER_ESP = 0x00006824, + VMX_GUEST_SYSENTER_EIP = 0x00006826, + VMX_HOST_CR0 = 0x00006c00, + VMX_HOST_CR3 = 0x00006c02, + VMX_HOST_CR4 = 0x00006c04, + VMX_HOST_FS_BASE = 0x00006c06, + VMX_HOST_GS_BASE = 0x00006c08, + VMX_HOST_TR_BASE = 0x00006c0a, + VMX_HOST_GDTR_BASE = 0x00006c0c, + VMX_HOST_IDTR_BASE = 0x00006c0e, + VMX_HOST_IA32_SYSENTER_ESP = 0x00006c10, + VMX_HOST_IA32_SYSENTER_EIP = 0x00006c12, + VMX_HOST_RSP = 0x00006c14, + VMX_HOST_RIP = 0x00006c16 +}; + diff --git a/bsd/dev/dtrace/sdt.c b/bsd/dev/dtrace/sdt.c index 1a38e614b..a3722f61e 100644 --- a/bsd/dev/dtrace/sdt.c +++ b/bsd/dev/dtrace/sdt.c @@ -32,9 +32,9 @@ #include #include -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) #include -#endif +#endif /* defined(__arm__) || defined(__arm64__) */ #include #include @@ -44,6 +44,8 @@ #include extern int dtrace_kernel_symbol_mode; +#include + /* #include sdt_probes; sdpd != NULL; sdpd = sdpd->sdpd_next) { - const char *name = sdpd->sdpd_name, *func; - char *nname; - int i, j; + const char *func; dtrace_id_t id; - for (prov = sdt_providers; prov->sdtp_prefix != NULL; prov++) { - const char *prefpart, *prefix = prov->sdtp_prefix; - - if ((prefpart = strstr(name, prefix))) { - name = prefpart + strlen(prefix); + /* Validate probe's provider name. Do not provide probes for unknown providers. */ + for (prov = sdt_providers; prov->sdtp_name != NULL; prov++) { + if (strcmp(prov->sdtp_prefix, sdpd->sdpd_prov) == 0) { break; } } - nname = kmem_alloc(len = strlen(name) + 1, KM_SLEEP); - - for (i = 0, j = 0; name[j] != '\0'; i++) { - if (name[j] == '_' && name[j + 1] == '_') { - nname[i] = '-'; - j += 2; - } else { - nname[i] = name[j++]; - } + if (prov->sdtp_name == NULL) { + printf("Ignoring probes from unsupported provider %s\n", sdpd->sdpd_prov); + continue; } - nname[i] = '\0'; - sdp = kmem_zalloc(sizeof(sdt_probe_t), KM_SLEEP); sdp->sdp_loadcnt = ctl->mod_loadcnt; sdp->sdp_ctl = ctl; - sdp->sdp_name = nname; - sdp->sdp_namelen = len; + sdp->sdp_name = kmem_alloc(strlen(sdpd->sdpd_name) + 1, KM_SLEEP); + strncpy(sdp->sdp_name, sdpd->sdpd_name, strlen(sdpd->sdpd_name) + 1); + sdp->sdp_namelen = strlen(sdpd->sdpd_name) + 1; sdp->sdp_provider = prov; - func = sdpd->sdpd_func; - - if (func == NULL) { - func = ""; - } + func = (sdpd->sdpd_func != NULL) ? sdpd->sdpd_func : ""; /* * We have our provider. Now create the probe. */ if ((id = dtrace_probe_lookup(prov->sdtp_id, modname, - func, nname)) != DTRACE_IDNONE) { + func, sdp->sdp_name)) != DTRACE_IDNONE) { old = dtrace_probe_arg(prov->sdtp_id, id); ASSERT(old != NULL); @@ -158,13 +144,14 @@ __sdt_provide_module(void *arg, struct modctl *ctl) old->sdp_next = sdp; } else { sdp->sdp_id = dtrace_probe_create(prov->sdtp_id, - modname, func, nname, SDT_AFRAMES, sdp); + modname, func, sdp->sdp_name, SDT_AFRAMES, sdp); mp->sdt_nprobes++; } #if 0 - printf("__sdt_provide_module: sdpd=0x%p sdp=0x%p name=%s, id=%d\n", sdpd, sdp, nname, sdp->sdp_id); + printf("__sdt_provide_module: sdpd=0x%p sdp=0x%p name=%s, id=%d\n", sdpd, sdp, + sdp->sdp_name, sdp->sdp_id); #endif sdp->sdp_hashnext = @@ -264,7 +251,7 @@ sdt_enable(void *arg, dtrace_id_t id, void *parg) goto err; } - dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback); + dtrace_casptr(&tempDTraceTrapHook, NULL, ptrauth_nop_cast(void *, &fbt_perfCallback)); if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) { if (sdt_verbose) { cmn_err(CE_NOTE, "sdt_enable is failing for probe %s " @@ -418,26 +405,20 @@ _sdt_open(dev_t dev, int flags, int devtype, struct proc *p) #define SDT_MAJOR -24 /* let the kernel pick the device number */ -/* - * A struct describing which functions will get invoked for certain - * actions. - */ -static struct cdevsw sdt_cdevsw = +static const struct cdevsw sdt_cdevsw = { - _sdt_open, /* open */ - eno_opcl, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - eno_ioctl, /* ioctl */ - (stop_fcn_t *)nulldev, /* stop */ - (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + .d_open = _sdt_open, + .d_close = eno_opcl, + .d_read = eno_rdwrt, + .d_write = eno_rdwrt, + .d_ioctl = eno_ioctl, + .d_stop = (stop_fcn_t *)nulldev, + .d_reset = (reset_fcn_t *)nulldev, + .d_select = eno_select, + .d_mmap = eno_mmap, + .d_strategy = eno_strat, + .d_reserved_1 = eno_getc, + .d_reserved_2 = eno_putc, }; static struct modctl g_sdt_kernctl; @@ -446,6 +427,38 @@ static struct module g_sdt_mach_module; #include #include +/* + * Represents single record in __DATA,__sdt section. + */ +typedef struct dtrace_sdt_def { + uintptr_t dsd_addr; /* probe site location */ + const char *dsd_prov; /* provider's name */ + const char *dsd_name; /* probe's name */ +} __attribute__((__packed__)) dtrace_sdt_def_t; + +/* + * Creates a copy of name and unescapes '-' characters. + */ +static char * +sdt_strdup_name(const char *name) +{ + size_t len = strlen(name) + 1; + size_t i, j; + char *nname = kmem_alloc(len, KM_SLEEP); + + for (i = 0, j = 0; name[j] != '\0'; i++) { + if (name[j] == '_' && name[j + 1] == '_') { + nname[i] = '-'; + j += 2; + } else { + nname[i] = name[j++]; + } + } + + nname[i] = '\0'; + return nname; +} + void sdt_early_init( void ) { @@ -459,10 +472,12 @@ sdt_early_init( void ) kernel_mach_header_t *mh; struct load_command *cmd; kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL; + kernel_section_t *orig_dt = NULL; struct symtab_command *orig_st = NULL; kernel_nlist_t *sym = NULL; char *strings; unsigned int i; + unsigned int len; g_sdt_mach_module.sdt_nprobes = 0; g_sdt_mach_module.sdt_probes = NULL; @@ -499,6 +514,12 @@ sdt_early_init( void ) cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize); } + /* Locate DTrace SDT section in the object. */ + if ((orig_dt = getsectbyname("__DATA", "__sdt")) == NULL) { + printf("DTrace section not found.\n"); + return; + } + if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL)) { return; } @@ -506,88 +527,68 @@ sdt_early_init( void ) sym = (kernel_nlist_t *)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff); strings = (char *)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff); - for (i = 0; i < orig_st->nsyms; i++) { - uint8_t n_type = sym[i].n_type & (N_TYPE | N_EXT); - char *name = strings + sym[i].n_un.n_strx; - const char *prev_name; + /* + * Iterate over SDT section and establish all SDT probes. + */ + dtrace_sdt_def_t *sdtdef = (dtrace_sdt_def_t *)(orig_dt->addr); + for (size_t k = 0; k < orig_dt->size / sizeof(dtrace_sdt_def_t); k++, sdtdef++) { + const char *funcname; unsigned long best; - unsigned int j; - /* Check that the symbol is a global and that it has a name. */ - if (((N_SECT | N_EXT) != n_type && (N_ABS | N_EXT) != n_type)) { - continue; - } - - if (0 == sym[i].n_un.n_strx) { /* iff a null, "", name. */ - continue; - } - - /* Lop off omnipresent leading underscore. */ - if (*name == '_') { - name += 1; - } + sdt_probedesc_t *sdpd = kmem_alloc(sizeof(sdt_probedesc_t), KM_SLEEP); - if (strncmp(name, DTRACE_PROBE_PREFIX, sizeof(DTRACE_PROBE_PREFIX) - 1) == 0) { - sdt_probedesc_t *sdpd = kmem_alloc(sizeof(sdt_probedesc_t), KM_SLEEP); - int len = strlen(name) + 1; + /* Unescape probe name and keep a note of the size of original memory allocation. */ + sdpd->sdpd_name = sdt_strdup_name(sdtdef->dsd_name); + sdpd->sdpd_namelen = strlen(sdtdef->dsd_name) + 1; - sdpd->sdpd_name = kmem_alloc(len, KM_SLEEP); - strncpy(sdpd->sdpd_name, name, len); /* NUL termination is ensured. */ + /* Used only for provider structure lookup so there is no need to make dynamic copy. */ + sdpd->sdpd_prov = sdtdef->dsd_prov; - prev_name = ""; - best = 0; + /* + * Find the symbol immediately preceding the sdt probe site just discovered, + * that symbol names the function containing the sdt probe. + */ + funcname = ""; + for (i = 0; i < orig_st->nsyms; i++) { + uint8_t jn_type = sym[i].n_type & N_TYPE; + char *jname = strings + sym[i].n_un.n_strx; - /* - * Find the symbol immediately preceding the sdt probe site just discovered, - * that symbol names the function containing the sdt probe. - */ - for (j = 0; j < orig_st->nsyms; j++) { - uint8_t jn_type = sym[j].n_type & N_TYPE; - char *jname = strings + sym[j].n_un.n_strx; - - if ((N_SECT != jn_type && N_ABS != jn_type)) { - continue; - } + if ((N_SECT != jn_type && N_ABS != jn_type)) { + continue; + } - if (0 == sym[j].n_un.n_strx) { /* iff a null, "", name. */ - continue; - } + if (0 == sym[i].n_un.n_strx) { /* iff a null, "", name. */ + continue; + } - if (*jname == '_') { - jname += 1; - } + if (*jname == '_') { + jname += 1; + } - if (*(unsigned long *)sym[i].n_value <= (unsigned long)sym[j].n_value) { - continue; - } + if (sdtdef->dsd_addr <= (unsigned long)sym[i].n_value) { + continue; + } - if ((unsigned long)sym[j].n_value > best) { - best = (unsigned long)sym[j].n_value; - prev_name = jname; - } + if ((unsigned long)sym[i].n_value > best) { + best = (unsigned long)sym[i].n_value; + funcname = jname; } + } - sdpd->sdpd_func = kmem_alloc((len = strlen(prev_name) + 1), KM_SLEEP); - strncpy(sdpd->sdpd_func, prev_name, len); /* NUL termination is ensured. */ + len = strlen(funcname) + 1; + sdpd->sdpd_func = kmem_alloc(len, KM_SLEEP); + strncpy(sdpd->sdpd_func, funcname, len); - sdpd->sdpd_offset = *(unsigned long *)sym[i].n_value; + sdpd->sdpd_offset = sdtdef->dsd_addr; #if defined(__arm__) - /* PR8353094 - mask off thumb-bit */ - sdpd->sdpd_offset &= ~0x1U; + /* PR8353094 - mask off thumb-bit */ + sdpd->sdpd_offset &= ~0x1U; #elif defined(__arm64__) - sdpd->sdpd_offset &= ~0x1LU; + sdpd->sdpd_offset &= ~0x1LU; #endif /* __arm__ */ -#if 0 - printf("sdt_init: sdpd_offset=0x%lx, n_value=0x%lx, name=%s\n", - sdpd->sdpd_offset, *(unsigned long *)sym[i].n_value, name); -#endif - - sdpd->sdpd_next = g_sdt_mach_module.sdt_probes; - g_sdt_mach_module.sdt_probes = sdpd; - } else { - prev_name = name; - } + sdpd->sdpd_next = g_sdt_mach_module.sdt_probes; + g_sdt_mach_module.sdt_probes = sdpd; } } } @@ -630,7 +631,7 @@ sdt_provide_module(void *arg, struct modctl *ctl) sdt_probedesc_t *sdpd = g_sdt_mach_module.sdt_probes; while (sdpd) { sdt_probedesc_t *this_sdpd = sdpd; - kmem_free((void *)sdpd->sdpd_name, strlen(sdpd->sdpd_name) + 1); + kmem_free((void *)sdpd->sdpd_name, sdpd->sdpd_namelen); kmem_free((void *)sdpd->sdpd_func, strlen(sdpd->sdpd_func) + 1); sdpd = sdpd->sdpd_next; kmem_free((void *)this_sdpd, sizeof(sdt_probedesc_t)); diff --git a/bsd/dev/dtrace/sdt_subr.c b/bsd/dev/dtrace/sdt_subr.c index c9c52fb6a..28e92734f 100644 --- a/bsd/dev/dtrace/sdt_subr.c +++ b/bsd/dev/dtrace/sdt_subr.c @@ -73,6 +73,14 @@ static dtrace_pattr_t sdt_attr = { { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, }; +static dtrace_pattr_t hv_attr = { + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, +}; + sdt_provider_t sdt_providers[] = { { "vtrace", "__vtrace____", &vtrace_attr, 0 }, { "sysinfo", "__cpu_sysinfo____", &info_attr, 0 }, @@ -95,6 +103,7 @@ sdt_provider_t sdt_providers[] = { #if KASAN { "kasan", "__kasan____", &stab_attr, 0 }, #endif + { "hv", "__hv____", &hv_attr, 0 }, { NULL, NULL, NULL, 0 } }; @@ -959,6 +968,10 @@ sdt_argdesc_t sdt_args[] = { {"vminfo", "zalloc", 1, 1, "void*", "void*" }, {"vminfo", "zfree", 0, 0, "zone_t", "zone_t" }, {"vminfo", "zfree", 1, 1, "void*", "void*" }, + {"hv", "guest-enter", 0, 0, "uint32_t", "uint32_t" }, + {"hv", "guest-enter", 1, 1, "uint64_t *", "guest_regs_t *" }, + {"hv", "guest-exit", 0, 0, "uint32_t", "uint32_t" }, + {"hv", "guest-exit", 1, 1, "uint64_t *", "guest_regs_t *" }, { NULL, NULL, 0, 0, NULL, NULL } }; diff --git a/bsd/dev/dtrace/systrace.c b/bsd/dev/dtrace/systrace.c index ef85a1fca..0e483d760 100644 --- a/bsd/dev/dtrace/systrace.c +++ b/bsd/dev/dtrace/systrace.c @@ -23,6 +23,8 @@ * Use is subject to license terms. */ +#include + #include #include @@ -378,7 +380,7 @@ static dtrace_provider_id_t systrace_id; #define systrace_init _systrace_init static void -systrace_init(struct sysent *actual, systrace_sysent_t **interposed) +systrace_init(const struct sysent *actual, systrace_sysent_t **interposed) { systrace_sysent_t *ssysent = *interposed; /* Avoid sysent shadow warning * from bsd/sys/sysent.h */ @@ -390,7 +392,7 @@ systrace_init(struct sysent *actual, systrace_sysent_t **interposed) } for (i = 0; i < NSYSCALL; i++) { - struct sysent *a = &actual[i]; + const struct sysent *a = &actual[i]; systrace_sysent_t *s = &ssysent[i]; if (LOADABLE_SYSCALL(a) && !LOADED_SYSCALL(a)) { @@ -487,7 +489,7 @@ systrace_enable(void *arg, dtrace_id_t id, void *parg) lck_mtx_lock(&dtrace_systrace_lock); if (sysent[sysnum].sy_callc == systrace_sysent[sysnum].stsy_underlying) { - vm_offset_t dss = (vm_offset_t)&dtrace_systrace_syscall; + vm_offset_t dss = ptrauth_nop_cast(vm_offset_t, &dtrace_systrace_syscall); ml_nofault_copy((vm_offset_t)&dss, (vm_offset_t)&sysent[sysnum].sy_callc, sizeof(vm_offset_t)); } lck_mtx_unlock(&dtrace_systrace_lock); @@ -843,7 +845,7 @@ machtrace_enable(void *arg, dtrace_id_t id, void *parg) lck_mtx_lock(&dtrace_systrace_lock); if (mach_trap_table[sysnum].mach_trap_function == machtrace_sysent[sysnum].stsy_underlying) { - vm_offset_t dss = (vm_offset_t)&dtrace_machtrace_syscall; + vm_offset_t dss = ptrauth_nop_cast(vm_offset_t, &dtrace_machtrace_syscall); ml_nofault_copy((vm_offset_t)&dss, (vm_offset_t)&mach_trap_table[sysnum].mach_trap_function, sizeof(vm_offset_t)); } @@ -928,26 +930,20 @@ _systrace_open(dev_t dev, int flags, int devtype, struct proc *p) #define SYSTRACE_MAJOR -24 /* let the kernel pick the device number */ -/* - * A struct describing which functions will get invoked for certain - * actions. - */ static struct cdevsw systrace_cdevsw = { - _systrace_open, /* open */ - eno_opcl, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - eno_ioctl, /* ioctl */ - (stop_fcn_t *)nulldev, /* stop */ - (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + .d_open = _systrace_open, + .d_close = eno_opcl, + .d_read = eno_rdwrt, + .d_write = eno_rdwrt, + .d_ioctl = eno_ioctl, + .d_stop = (stop_fcn_t *)nulldev, + .d_reset = (reset_fcn_t *)nulldev, + .d_select = eno_select, + .d_mmap = eno_mmap, + .d_strategy = eno_strat, + .d_reserved_1 = eno_getc, + .d_reserved_2 = eno_putc, }; void systrace_init( void ); diff --git a/bsd/dev/i386/conf.c b/bsd/dev/i386/conf.c index e81719bf6..b2ad22676 100644 --- a/bsd/dev/i386/conf.c +++ b/bsd/dev/i386/conf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997-2017 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1997-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -259,7 +259,7 @@ isdisk(dev_t dev, int type) if (maj == NODEV) { break; } - /* FALL THROUGH */ + OS_FALLTHROUGH; case VBLK: if (bdevsw[maj].d_type == D_DISK) { return 1; diff --git a/bsd/dev/i386/dis_tables.c b/bsd/dev/i386/dis_tables.c index f167167ca..db7a139d0 100644 --- a/bsd/dev/i386/dis_tables.c +++ b/bsd/dev/i386/dis_tables.c @@ -3913,7 +3913,7 @@ mm_shift: /* accumulator to memory operand */ case AO: vbit = 1; - /*FALLTHROUGH*/ + OS_FALLTHROUGH; /* memory operand to accumulator */ case OA: @@ -3929,7 +3929,7 @@ mm_shift: /* segment register to memory or register operand */ case SM: vbit = 1; - /*FALLTHROUGH*/ + OS_FALLTHROUGH; /* memory or register operand to segment register */ case MS: @@ -3999,12 +3999,12 @@ just_mem: break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; /* prefetch instruction - memory operand, but no memory acess */ case PREF: NOMEM; - /*FALLTHROUGH*/ + OS_FALLTHROUGH; /* single memory or register operand */ case M: @@ -4048,7 +4048,7 @@ just_mem: NOMEM; break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case SVM: if (mode == 3) { #if DIS_TEXT @@ -4086,7 +4086,7 @@ just_mem: NOMEM; break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case MONITOR_MWAIT: if (mode == 3) { if (r_m == 0) { @@ -4119,7 +4119,7 @@ just_mem: goto error; } } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case XGETBV_XSETBV: if (mode == 3) { if (r_m == 0) { @@ -4139,7 +4139,7 @@ just_mem: } } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case MO: /* Similar to M, but only memory (no direct registers) */ wbit = LONG_OPND; @@ -4156,21 +4156,21 @@ just_mem: case 2: vbit = 1; - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case 0: wbit = CONTROL_OPND; break; case 3: vbit = 1; - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case 1: wbit = DEBUG_OPND; break; case 6: vbit = 1; - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case 4: wbit = TEST_OPND; break; @@ -4751,7 +4751,7 @@ xmmprm: if (dp->it_invalid32 && cpu_mode != SIZE64) goto error; NOMEM; - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case IMPLMEM: break; @@ -4793,7 +4793,7 @@ xmmprm: /* float reg to float reg, with ret bit present */ case FF: vbit = opcode2 >> 2 & 0x1; /* vbit = 1: st -> st(i) */ - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case FFC: /* case for vbit always = 0 */ #ifdef DIS_TEXT x->d86_numopnds = 2; @@ -5289,6 +5289,7 @@ L_VEX_RM: case PREFIX: case UNKNOWN: NOMEM; + OS_FALLTHROUGH; default: goto error; } /* end switch */ diff --git a/bsd/dev/i386/dtrace_isa.c b/bsd/dev/i386/dtrace_isa.c index 6785dc536..d0ac4f6c8 100644 --- a/bsd/dev/i386/dtrace_isa.c +++ b/bsd/dev/i386/dtrace_isa.c @@ -49,6 +49,7 @@ typedef x86_saved_state_t savearea_t; #include #include #include +#include /* * APPLE NOTE: The regmap is used to decode which 64bit uregs[] register @@ -260,6 +261,38 @@ dtrace_getreg(struct regs *savearea, uint_t reg) } } +uint64_t +dtrace_getvmreg(uint_t ndx) +{ + uint64_t reg = 0; + bool failed = false; + + /* Any change in the vmread final opcode must be reflected in dtrace_handle_trap below. */ + __asm__ __volatile__( + "vmread %2, %0\n" + "ja 1f\n" + "mov $1, %1\n" + "1:\n" + : "=a" (reg), "+r" (failed) : "D" ((uint64_t)ndx)); + + /* + * Check for fault in vmreg first. If DTrace has recovered the fault cause by + * vmread above then the value in failed will be unreliable. + */ + if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ILLOP)) { + return 0; + } + + /* If vmread succeeded but failed because CF or ZS is 1 report fail. */ + if (failed) { + DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); + cpu_core[CPU->cpu_id].cpuc_dtrace_illval = ndx; + return 0; + } + + return reg; +} + #define RETURN_OFFSET 4 #define RETURN_OFFSET64 8 @@ -267,10 +300,10 @@ static int dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, user_addr_t pc, user_addr_t sp) { -#if 0 volatile uint16_t *flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; +#if 0 uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack crawl */ size_t s1, s2; #endif @@ -333,17 +366,11 @@ dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, user_addr_t pc, } } -#if 0 /* XXX */ - /* - * This is totally bogus: if we faulted, we're going to clear - * the fault and break. This is to deal with the apparently - * broken Java stacks on x86. - */ + /* Truncate ustack if the iterator causes fault. */ if (*flags & CPU_DTRACE_FAULT) { *flags &= ~CPU_DTRACE_FAULT; break; } -#endif } return (ret); @@ -357,6 +384,7 @@ static int dtrace_adjust_stack(uint64_t **pcstack, int *pcstack_limit, user_addr_t *pc, user_addr_t sp) { + volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; int64_t missing_tos; int rc = 0; boolean_t is64Bit = proc_is64bit(current_proc()); @@ -381,6 +409,11 @@ dtrace_adjust_stack(uint64_t **pcstack, int *pcstack_limit, user_addr_t *pc, *pc = dtrace_fuword64(sp); else *pc = dtrace_fuword32(sp); + + /* Truncate ustack if the iterator causes fault. */ + if (*flags & CPU_DTRACE_FAULT) { + *flags &= ~CPU_DTRACE_FAULT; + } } else { /* * We might have a top of stack override, in which case we just @@ -639,17 +672,11 @@ dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) } } -#if 0 /* XXX */ - /* - * This is totally bogus: if we faulted, we're going to clear - * the fault and break. This is to deal with the apparently - * broken Java stacks on x86. - */ + /* Truncate ustack if the iterator causes fault. */ if (*flags & CPU_DTRACE_FAULT) { *flags &= ~CPU_DTRACE_FAULT; break; } -#endif } zero: @@ -840,3 +867,34 @@ dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit)) func(VM_MAX_KERNEL_ADDRESS + 1, ~(uintptr_t)0); } +/* + * Trap Safety + */ +extern boolean_t dtrace_handle_trap(int, x86_saved_state_t *); + +boolean_t +dtrace_handle_trap(int trapno, x86_saved_state_t *state) +{ + x86_saved_state64_t *saved_state = saved_state64(state); + + if (!DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT)) { + return FALSE; + } + + /* + * General purpose solution would require pulling in disassembler. Right now there + * is only one specific case to be handled so it is hardcoded here. + */ + if (trapno == T_INVALID_OPCODE) { + uint8_t *inst = (uint8_t *)saved_state->isf.rip; + + /* vmread %rdi, %rax */ + if (inst[0] == 0x0f && inst[1] == 0x78 && inst[2] == 0xf8) { + DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); + saved_state->isf.rip += 3; + return TRUE; + } + } + + return FALSE; +} diff --git a/bsd/dev/i386/dtrace_subr_x86.c b/bsd/dev/i386/dtrace_subr_x86.c index e78af6efc..cbf970c88 100644 --- a/bsd/dev/i386/dtrace_subr_x86.c +++ b/bsd/dev/i386/dtrace_subr_x86.c @@ -126,7 +126,7 @@ dtrace_user_probe(x86_saved_state_t *regs) if (regs64) { regs64->isf.rip = npc; } else { - regs32->eip = npc; + regs32->eip = (uint32_t)npc; } return KERN_SUCCESS; } @@ -145,7 +145,7 @@ dtrace_user_probe(x86_saved_state_t *regs) if (regs64) { regs64->isf.rip = npc; } else { - regs32->eip = npc; + regs32->eip = (uint32_t)npc; } return KERN_SUCCESS; diff --git a/bsd/dev/i386/fasttrap_isa.c b/bsd/dev/i386/fasttrap_isa.c index 6801862e0..3577b3109 100644 --- a/bsd/dev/i386/fasttrap_isa.c +++ b/bsd/dev/i386/fasttrap_isa.c @@ -313,22 +313,22 @@ fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp, user_addr_t pc, switch (instr[start]) { case FASTTRAP_PREFIX_SS: seg++; - /*FALLTHRU*/ + OS_FALLTHROUGH; case FASTTRAP_PREFIX_GS: seg++; - /*FALLTHRU*/ + OS_FALLTHROUGH; case FASTTRAP_PREFIX_FS: seg++; - /*FALLTHRU*/ + OS_FALLTHROUGH; case FASTTRAP_PREFIX_ES: seg++; - /*FALLTHRU*/ + OS_FALLTHROUGH; case FASTTRAP_PREFIX_DS: seg++; - /*FALLTHRU*/ + OS_FALLTHROUGH; case FASTTRAP_PREFIX_CS: seg++; - /*FALLTHRU*/ + OS_FALLTHROUGH; case FASTTRAP_PREFIX_OPERAND: case FASTTRAP_PREFIX_ADDRESS: case FASTTRAP_PREFIX_LOCK: diff --git a/bsd/dev/i386/fbt_x86.c b/bsd/dev/i386/fbt_x86.c index 0b7d9516e..e81527473 100644 --- a/bsd/dev/i386/fbt_x86.c +++ b/bsd/dev/i386/fbt_x86.c @@ -48,6 +48,8 @@ #include #include +#include + #define DTRACE_INVOP_NOP_SKIP 1 #define DTRACE_INVOP_MOVL_ESP_EBP 10 @@ -126,9 +128,7 @@ fbt_invop(uintptr_t addr, uintptr_t *state, uintptr_t rval) } #define IS_USER_TRAP(regs) (regs && (((regs)->isf.cs & 3) != 0)) -#define T_INVALID_OPCODE 6 #define FBT_EXCEPTION_CODE T_INVALID_OPCODE -#define T_PREEMPT 255 kern_return_t fbt_perfCallback( diff --git a/bsd/dev/i386/kern_machdep.c b/bsd/dev/i386/kern_machdep.c index 1512e6b0c..890c15f25 100644 --- a/bsd/dev/i386/kern_machdep.c +++ b/bsd/dev/i386/kern_machdep.c @@ -52,7 +52,7 @@ extern int bootarg_no32exec; /* bsd_init.c */ * by 32-bit binaries. 0 means unsupported. **********************************************************************/ int -grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype, bool allow_simulator_binary __unused) +grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype, cpu_subtype_t execfeatures __unused, bool allow_simulator_binary __unused) { cpu_subtype_t hostsubtype = cpu_subtype(); @@ -86,9 +86,3 @@ grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype, bool allow_simulato return 0; } - -boolean_t -pie_required(cpu_type_t exectype __unused, cpu_subtype_t execsubtype __unused) -{ - return FALSE; -} diff --git a/bsd/dev/i386/sysctl.c b/bsd/dev/i386/sysctl.c index 567c5817e..944df9f64 100644 --- a/bsd/dev/i386/sysctl.c +++ b/bsd/dev/i386/sysctl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003-2019 Apple Inc. All rights reserved. + * Copyright (c) 2003-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -40,6 +40,7 @@ #include #include #include +#include static int @@ -177,7 +178,7 @@ cpu_leaf7_features SYSCTL_HANDLER_ARGS if (leaf7_extfeatures != 0) { strlcat(buf, " ", sizeof(buf)); cpuid_get_leaf7_extfeature_names(leaf7_extfeatures, buf + strlen(buf), - sizeof(buf) - strlen(buf)); + (unsigned int)(sizeof(buf) - strlen(buf))); } return SYSCTL_OUT(req, buf, strlen(buf) + 1); @@ -247,19 +248,26 @@ cpu_flex_ratio_max SYSCTL_HANDLER_ARGS static int cpu_ucode_update SYSCTL_HANDLER_ARGS { - __unused struct sysctl_oid *unused_oidp = oidp; - __unused void *unused_arg1 = arg1; - __unused int unused_arg2 = arg2; +#pragma unused(oidp, arg1, arg2) uint64_t addr; int error; + /* Can't update microcode from within a VM. */ + + if (cpuid_features() & CPUID_FEATURE_VMM) { + return ENODEV; + } + + if (req->newptr == USER_ADDR_NULL) { + return EINVAL; + } + error = SYSCTL_IN(req, &addr, sizeof(addr)); if (error) { return error; } - int ret = ucode_interface(addr); - return ret; + return ucode_interface(addr); } extern uint64_t panic_restart_timeout; @@ -271,12 +279,16 @@ panic_set_restart_timeout(__unused struct sysctl_oid *oidp, __unused void *arg1, if (panic_restart_timeout) { absolutetime_to_nanoseconds(panic_restart_timeout, &nstime); - old_value = nstime / NSEC_PER_SEC; + old_value = (int)MIN(nstime / NSEC_PER_SEC, INT_MAX); } - error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed); + error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, &changed); if (error == 0 && changed) { - nanoseconds_to_absolutetime(((uint64_t)new_value) * NSEC_PER_SEC, &panic_restart_timeout); + if (new_value >= 0) { + nanoseconds_to_absolutetime(((uint64_t)new_value) * NSEC_PER_SEC, &panic_restart_timeout); + } else { + error = EDOM; + } } return error; } @@ -304,6 +316,26 @@ misc_interrupt_latency_max(__unused struct sysctl_oid *oidp, __unused void *arg1 } #if DEVELOPMENT || DEBUG +/* + * Populates a string with each CPU's tsc synch delta. + */ +static int +x86_cpu_tsc_deltas(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int err; + uint32_t ncpus = ml_wait_max_cpus(); + uint32_t buflen = (2 /* hex digits */ * sizeof(uint64_t) + 3 /* for "0x" + " " */) * ncpus + 1; + char *buf = kalloc(buflen); + + cpu_data_tsc_sync_deltas_string(buf, buflen, 0, ncpus - 1); + + err = sysctl_io_string(req, buf, buflen, 0, 0); + + kfree(buf, buflen); + + return err; +} + /* * Triggers a machine-check exception - for a suitably configured kernel only. */ @@ -813,6 +845,11 @@ SYSCTL_QUAD(_machdep_tsc, OID_AUTO, at_boot, CTLFLAG_RD | CTLFLAG_LOCKED, &tsc_at_boot, ""); SYSCTL_QUAD(_machdep_tsc, OID_AUTO, rebase_abs_time, CTLFLAG_RD | CTLFLAG_LOCKED, &tsc_rebase_abs_time, ""); +#if DEVELOPMENT || DEBUG +SYSCTL_PROC(_machdep_tsc, OID_AUTO, synch_deltas, + CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, x86_cpu_tsc_deltas, "A", "TSC synch deltas"); +#endif SYSCTL_NODE(_machdep_tsc, OID_AUTO, nanotime, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "TSC to ns conversion"); @@ -839,6 +876,10 @@ SYSCTL_NODE(_machdep, OID_AUTO, misc, CTLFLAG_RW | CTLFLAG_LOCKED, 0, extern uint32_t mp_interrupt_watchdog_events; SYSCTL_UINT(_machdep_misc, OID_AUTO, interrupt_watchdog_events, CTLFLAG_RW | CTLFLAG_LOCKED, &mp_interrupt_watchdog_events, 0, ""); + +extern int insnstream_force_cacheline_mismatch; +SYSCTL_INT(_machdep_misc, OID_AUTO, insnstream_force_clmismatch, + CTLFLAG_RW | CTLFLAG_LOCKED, &insnstream_force_cacheline_mismatch, 0, ""); #endif @@ -852,6 +893,11 @@ SYSCTL_PROC(_machdep_misc, OID_AUTO, interrupt_latency_max, 0, 0, misc_interrupt_latency_max, "A", "Maximum Interrupt latency"); +extern boolean_t is_x2apic; +SYSCTL_INT(_machdep, OID_AUTO, x2apic_enabled, + CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, + &is_x2apic, 0, ""); + #if DEVELOPMENT || DEBUG SYSCTL_PROC(_machdep_misc, OID_AUTO, machine_check_panic, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, @@ -960,7 +1006,7 @@ misc_nmis(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int ar { int new = 0, old = 0, changed = 0, error; - old = NMI_count; + old = (int)MIN(NMI_count, INT_MAX); error = sysctl_io_number(req, old, sizeof(int), &new, &changed); if (error == 0 && changed) { diff --git a/bsd/dev/i386/systemcalls.c b/bsd/dev/i386/systemcalls.c index a5a7255bd..8a12ad5a3 100644 --- a/bsd/dev/i386/systemcalls.c +++ b/bsd/dev/i386/systemcalls.c @@ -91,7 +91,7 @@ unix_syscall(x86_saved_state_t *state) thread_t thread; void *vt; unsigned int code, syscode; - struct sysent *callp; + const struct sysent *callp; int error; vm_offset_t params; @@ -291,7 +291,7 @@ unix_syscall64(x86_saved_state_t *state) thread_t thread; void *vt; unsigned int code, syscode; - struct sysent *callp; + const struct sysent *callp; int args_in_regs; boolean_t args_start_at_rdi; int error; @@ -511,7 +511,7 @@ unix_syscall_return(int error) struct uthread *uthread; struct proc *p; unsigned int code; - struct sysent *callp; + const struct sysent *callp; thread = current_thread(); uthread = get_bsdthread_info(thread); diff --git a/bsd/dev/i386/unix_signal.c b/bsd/dev/i386/unix_signal.c index 724a1d210..1a531a286 100644 --- a/bsd/dev/i386/unix_signal.c +++ b/bsd/dev/i386/unix_signal.c @@ -791,6 +791,8 @@ sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) ut = (struct uthread *)get_bsdthread_info(thread); + /* see osfmk/kern/restartable.c */ + act_set_ast_reset_pcs(thread); /* * If we are being asked to change the altstack flag on the thread, we * just set/reset it and return (the uap->uctx is not used). @@ -803,9 +805,6 @@ sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) return 0; } - /* see osfmk/kern/restartable.c */ - act_set_ast_reset_pcs(thread); - bzero(mctxp, sizeof(*mctxp)); sig_xstate = current_xstate(); diff --git a/bsd/dev/mem.c b/bsd/dev/mem.c index 758c358f6..06a5c7432 100644 --- a/bsd/dev/mem.c +++ b/bsd/dev/mem.c @@ -154,7 +154,7 @@ mmioctl(dev_t dev, u_long cmd, __unused caddr_t data, int mmrw(dev_t dev, struct uio *uio, enum uio_rw rw) { - unsigned int c; + user_size_t c; int error = 0; while (uio_resid(uio) > 0) { @@ -230,7 +230,7 @@ mmrw(dev_t dev, struct uio *uio, enum uio_rw rw) break; } - c = min(uio_curriovlen(uio), PAGE_SIZE); + c = MIN(uio_curriovlen(uio), PAGE_SIZE); error = uiomove(devzerobuf, (int)c, uio); if (error) { break; diff --git a/bsd/dev/memdev.c b/bsd/dev/memdev.c index 184862aa2..fd7e8fa3d 100644 --- a/bsd/dev/memdev.c +++ b/bsd/dev/memdev.c @@ -130,7 +130,7 @@ extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); * D_CANFREE We support B_FREEBUF */ -static struct bdevsw mdevbdevsw = { +static const struct bdevsw mdevbdevsw = { .d_open = mdevopen, .d_close = mdevclose, .d_strategy = mdevstrategy, @@ -140,7 +140,7 @@ static struct bdevsw mdevbdevsw = { .d_type = D_DISK, }; -static struct cdevsw mdevcdevsw = { +static const struct cdevsw mdevcdevsw = { .d_open = mdevopen, .d_close = mdevclose, .d_read = mdevrw, @@ -239,7 +239,7 @@ mdevrw(dev_t dev, struct uio *uio, __unused int ioflag) uio->uio_segflg = UIO_PHYS_USERSPACE; } } - status = uiomove64(mdata, uio_resid(uio), uio); /* Move the data */ + status = uiomove64(mdata, (int)uio_resid(uio), uio); /* Move the data */ uio->uio_segflg = saveflag; /* Restore the flag */ return status; @@ -280,7 +280,7 @@ mdevstrategy(struct buf *bp) } if ((blkoff + buf_count(bp)) > (mdev[devid].mdSize << 12)) { /* Will this read go past end? */ - buf_setcount(bp, ((mdev[devid].mdSize << 12) - blkoff)); /* Yes, trim to max */ + buf_setcount(bp, (uint32_t)((mdev[devid].mdSize << 12) - blkoff)); /* Yes, trim to max */ } /* * make sure the buffer's data area is @@ -308,7 +308,7 @@ mdevstrategy(struct buf *bp) } paddr = (addr64_t)(((addr64_t)pp << 12) | (addr64_t)(vaddr & 4095)); /* Get actual address */ bcopy_phys(fvaddr, paddr, csize); /* Copy this on in */ - mapping_set_mod(paddr >> 12); /* Make sure we know that it is modified */ + mapping_set_mod((ppnum_t)(paddr >> 12)); /* Make sure we know that it is modified */ left = left - csize; /* Calculate what is left */ vaddr = vaddr + csize; /* Move to next sink address */ @@ -441,7 +441,7 @@ mdevioctl(dev_t dev, u_long cmd, caddr_t data, __unused int flag, } memdev_info->mi_mdev = TRUE; memdev_info->mi_phys = (mdev[devid].mdFlags & mdPhys) ? TRUE : FALSE; - memdev_info->mi_base = mdev[devid].mdBase; + memdev_info->mi_base = (uint32_t)mdev[devid].mdBase; memdev_info->mi_size = mdev[devid].mdSize; break; diff --git a/bsd/dev/monotonic.c b/bsd/dev/monotonic.c index 6c445d7a3..6fc42ef7f 100644 --- a/bsd/dev/monotonic.c +++ b/bsd/dev/monotonic.c @@ -47,7 +47,7 @@ static int mt_cdev_ioctl(dev_t dev, unsigned long cmd, char *uptr, int fflag, #define MT_NODE "monotonic" -static struct cdevsw mt_cdevsw = { +static const struct cdevsw mt_cdevsw = { .d_open = mt_cdev_open, .d_close = mt_cdev_close, .d_ioctl = mt_cdev_ioctl, diff --git a/bsd/dev/munge.c b/bsd/dev/munge.c index 7d2433cd1..ced3cded2 100644 --- a/bsd/dev/munge.c +++ b/bsd/dev/munge.c @@ -232,6 +232,20 @@ munge_wlll(void *args) out_args[0] = in_args[0]; } +void +munge_wlllww(void *args) +{ + volatile uint64_t *out_args = (volatile uint64_t*)args; + volatile uint32_t *in_args = (volatile uint32_t*)args; + + out_args[5] = in_args[8]; + out_args[4] = in_args[7]; + out_args[3] = *(volatile uint64_t*)&in_args[5]; + out_args[2] = *(volatile uint64_t*)&in_args[3]; + out_args[1] = *(volatile uint64_t*)&in_args[1]; + out_args[0] = in_args[0]; +} + void munge_wllll(void *args) { @@ -300,6 +314,21 @@ munge_wwwlww(void *args) out_args[0] = in_args[0]; } +void +munge_wwwlwww(void *args) +{ + volatile uint64_t *out_args = (volatile uint64_t*)args; + volatile uint32_t *in_args = (volatile uint32_t*)args; + + out_args[6] = in_args[7]; + out_args[5] = in_args[6]; + out_args[4] = in_args[5]; + out_args[3] = *(volatile uint64_t*)&in_args[3]; + out_args[2] = in_args[2]; + out_args[1] = in_args[1]; + out_args[0] = in_args[0]; +} + void munge_wwwl(void *args) { @@ -326,6 +355,22 @@ munge_wwwwlw(void *args) out_args[0] = in_args[0]; } +void +munge_wwwwllww(void *args) +{ + volatile uint64_t *out_args = (volatile uint64_t*)args; + volatile uint32_t *in_args = (volatile uint32_t*)args; + + out_args[7] = in_args[9]; + out_args[6] = in_args[8]; + out_args[5] = *(volatile uint64_t*)&in_args[6]; + out_args[4] = *(volatile uint64_t*)&in_args[4]; + out_args[3] = in_args[3]; + out_args[2] = in_args[2]; + out_args[1] = in_args[1]; + out_args[0] = in_args[0]; +} + void munge_wwwwl(void *args) { diff --git a/bsd/dev/random/randomdev.c b/bsd/dev/random/randomdev.c index 7e5e10e3e..5d0ebe81d 100644 --- a/bsd/dev/random/randomdev.c +++ b/bsd/dev/random/randomdev.c @@ -56,26 +56,20 @@ d_ioctl_t random_ioctl; -/* - * A struct describing which functions will get invoked for certain - * actions. - */ -static struct cdevsw random_cdevsw = +static const struct cdevsw random_cdevsw = { - random_open, /* open */ - random_close, /* close */ - random_read, /* read */ - random_write, /* write */ - random_ioctl, /* ioctl */ - (stop_fcn_t *)nulldev, /* stop */ - (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + .d_open = random_open, + .d_close = random_close, + .d_read = random_read, + .d_write = random_write, + .d_ioctl = random_ioctl, + .d_stop = (stop_fcn_t *)nulldev, + .d_reset = (reset_fcn_t *)nulldev, + .d_select = eno_select, + .d_mmap = eno_mmap, + .d_strategy = eno_strat, + .d_reserved_1 = eno_getc, + .d_reserved_2 = eno_putc, }; @@ -173,15 +167,14 @@ random_write(dev_t dev, struct uio *uio, __unused int ioflag) /* Security server is sending us entropy */ - while (uio_resid(uio) > 0 && retCode == 0) { + while ((size_t)uio_resid(uio) > 0 && retCode == 0) { /* get the user's data */ - int bytesToInput = MIN(uio_resid(uio), - (user_ssize_t) sizeof(rdBuffer)); - retCode = uiomove(rdBuffer, bytesToInput, uio); + size_t bytesToInput = MIN((size_t)uio_resid(uio), sizeof(rdBuffer)); + retCode = uiomove(rdBuffer, (int)bytesToInput, uio); if (retCode != 0) { break; } - retCode = write_random(rdBuffer, bytesToInput); + retCode = write_random(rdBuffer, (u_int)bytesToInput); if (retCode != 0) { break; } @@ -199,18 +192,17 @@ random_read(__unused dev_t dev, struct uio *uio, __unused int ioflag) int retCode = 0; char buffer[512]; - user_ssize_t bytes_remaining = uio_resid(uio); + size_t bytes_remaining = (size_t)uio_resid(uio); while (bytes_remaining > 0 && retCode == 0) { - int bytesToRead = MIN(bytes_remaining, - (user_ssize_t) sizeof(buffer)); - read_random(buffer, bytesToRead); + size_t bytesToRead = MIN(bytes_remaining, sizeof(buffer)); + read_random(buffer, (u_int)bytesToRead); - retCode = uiomove(buffer, bytesToRead, uio); + retCode = uiomove(buffer, (int)bytesToRead, uio); if (retCode != 0) { break; } - bytes_remaining = uio_resid(uio); + bytes_remaining = (size_t)uio_resid(uio); } return retCode; @@ -232,10 +224,10 @@ int getentropy(__unused struct proc * p, struct getentropy_args *gap, __unused int * ret) { user_addr_t user_addr; - uint32_t user_size; + user_size_t user_size; char buffer[256]; - user_addr = (vm_map_offset_t)gap->buffer; + user_addr = (user_addr_t)gap->buffer; user_size = gap->size; /* Can't request more than 256 random bytes * at once. Complying with openbsd getentropy() @@ -243,6 +235,6 @@ getentropy(__unused struct proc * p, struct getentropy_args *gap, __unused int * if (user_size > sizeof(buffer)) { return EINVAL; } - read_random(buffer, user_size); + read_random(buffer, (u_int)user_size); return copyout(buffer, user_addr, user_size); } diff --git a/bsd/dev/unix_startup.c b/bsd/dev/unix_startup.c index 439eb25f9..474da06aa 100644 --- a/bsd/dev/unix_startup.c +++ b/bsd/dev/unix_startup.c @@ -34,6 +34,7 @@ #include +#include #include #include @@ -41,7 +42,6 @@ #include #include #include -#include #include #include #include @@ -57,7 +57,7 @@ extern uint32_t kern_maxvnodes; extern vm_map_t mb_map; -#if INET || INET6 +#if INET extern uint32_t tcp_sendspace; extern uint32_t tcp_recvspace; #endif @@ -88,9 +88,11 @@ SYSCTL_INT(_kern, OID_AUTO, nbuf, CTLFLAG_RD | CTLFLAG_LOCKED, &nbuf_headers, 0, SYSCTL_INT(_kern, OID_AUTO, maxnbuf, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_KERN, &max_nbuf_headers, 0, ""); __private_extern__ int customnbuf = 0; -int serverperfmode = 0; /* Flag indicates a server boot when set */ int ncl = 0; +/* Indicates a server boot when set */ +TUNABLE(int, serverperfmode, "serverperfmode", 0); + #if SOCKETS static unsigned int mbuf_poolsz; #endif @@ -110,9 +112,9 @@ bsd_startupearly(void) /* clip the number of buf headers upto 16k */ if (max_nbuf_headers == 0) { - max_nbuf_headers = atop_kernel(sane_size / 50); /* Get 2% of ram, but no more than we can map */ + max_nbuf_headers = (int)atop_kernel(sane_size / 50); /* Get 2% of ram, but no more than we can map */ } - if ((customnbuf == 0) && (max_nbuf_headers > 16384)) { + if ((customnbuf == 0) && ((unsigned int)max_nbuf_headers > 16384)) { max_nbuf_headers = 16384; } if (max_nbuf_headers < CONFIG_MIN_NBUF) { @@ -121,8 +123,8 @@ bsd_startupearly(void) /* clip the number of hash elements to 200000 */ if ((customnbuf == 0) && nbuf_hashelements == 0) { - nbuf_hashelements = atop_kernel(sane_size / 50); - if (nbuf_hashelements > 200000) { + nbuf_hashelements = (int)atop_kernel(sane_size / 50); + if ((unsigned int)nbuf_hashelements > 200000) { nbuf_hashelements = 200000; } } else { @@ -177,7 +179,7 @@ bsd_startupearly(void) nmbclusters = bsd_mbuf_cluster_reserve(NULL) / MCLBYTES; -#if INET || INET6 +#if INET if ((scale = nmbclusters / NMBCLUSTERS) > 1) { tcp_sendspace *= scale; tcp_recvspace *= scale; @@ -189,7 +191,7 @@ bsd_startupearly(void) tcp_recvspace = maxspace; } } -#endif /* INET || INET6 */ +#endif /* INET */ } #endif /* SOCKETS */ @@ -204,7 +206,7 @@ bsd_startupearly(void) * CONFIG_VNODES is set to 263168 for "medium" configurations (the default) * but can be smaller or larger. */ - desiredvnodes = (sane_size / 65536) + 1024; + desiredvnodes = (int)(sane_size / 65536) + 1024; #ifdef CONFIG_VNODES if (desiredvnodes > CONFIG_VNODES) { desiredvnodes = CONFIG_VNODES; @@ -300,7 +302,7 @@ bsd_mbuf_cluster_reserve(boolean_t *overridden) if ((nmbclusters = ncl) == 0) { /* Auto-configure the mbuf pool size */ - nmbclusters = mbuf_default_ncl(serverperfmode, sane_size); + nmbclusters = mbuf_default_ncl(mem_actual); } else { /* Make sure it's not odd in case ncl is manually set */ if (nmbclusters & 0x1) { @@ -314,7 +316,7 @@ bsd_mbuf_cluster_reserve(boolean_t *overridden) } /* Round it down to nearest multiple of PAGE_SIZE */ - nmbclusters = P2ROUNDDOWN(nmbclusters, NCLPG); + nmbclusters = (unsigned int)P2ROUNDDOWN(nmbclusters, NCLPG); } mbuf_poolsz = nmbclusters << MCLSHIFT; done: diff --git a/bsd/dev/vn/shadow.c b/bsd/dev/vn/shadow.c index 50e391264..717b9dee0 100644 --- a/bsd/dev/vn/shadow.c +++ b/bsd/dev/vn/shadow.c @@ -99,7 +99,7 @@ struct shadow_map { typedef struct { - uint32_t byte; + uint64_t byte; uint32_t bit; } bitmap_offset_t; @@ -151,7 +151,7 @@ bitmap_offset(off_t where) * units, using longs, then a short, then a byte, then bits. */ static void -bitmap_set(u_char * map, uint32_t start_bit, uint32_t bit_count) +bitmap_set(u_char * map, off_t start_bit, size_t bit_count) { bitmap_offset_t start; bitmap_offset_t end; @@ -159,7 +159,7 @@ bitmap_set(u_char * map, uint32_t start_bit, uint32_t bit_count) start = bitmap_offset(start_bit); end = bitmap_offset(start_bit + bit_count); if (start.byte < end.byte) { - uint32_t n_bytes; + uint64_t n_bytes; if (start.bit) { map[start.byte] |= byte_set_bits(start.bit, NBBY - 1); @@ -210,7 +210,7 @@ end: */ static uint32_t -bitmap_get(u_char * map, uint32_t start_bit, uint32_t bit_count, +bitmap_get(u_char * map, off_t start_bit, size_t bit_count, boolean_t * ret_is_set) { uint32_t count; @@ -226,7 +226,7 @@ bitmap_get(u_char * map, uint32_t start_bit, uint32_t bit_count, count = 0; if (start.byte < end.byte) { - uint32_t n_bytes; + uint64_t n_bytes; if (start.bit) { /* try to align to a byte */ for (i = start.bit; i < NBBY; i++) { @@ -311,9 +311,9 @@ done: } static __inline__ band_number_t -shadow_map_block_to_band(shadow_map_t * map, uint32_t block) +shadow_map_block_to_band(shadow_map_t * map, off_t block) { - return block / map->blocks_per_band; + return (band_number_t)(block / map->blocks_per_band); } /* @@ -341,7 +341,7 @@ shadow_map_mapped_band(shadow_map_t * map, band_number_t band, /* remember the zero'th band */ map->zeroth_band = band; } - *mapped_band = map->bands[band] = map->next_band++; + *mapped_band = map->bands[band] = (band_number_t)map->next_band++; is_mapped = TRUE; } } else { @@ -360,16 +360,16 @@ shadow_map_mapped_band(shadow_map_t * map, band_number_t band, * * If called with is_write = TRUE, this function will map bands as it goes. */ -static uint32_t -shadow_map_contiguous(shadow_map_t * map, uint32_t start_block, - uint32_t num_blocks, boolean_t is_write) +static off_t +shadow_map_contiguous(shadow_map_t * map, off_t start_block, + size_t num_blocks, boolean_t is_write) { band_number_t band = shadow_map_block_to_band(map, start_block); - uint32_t end_block = start_block + num_blocks; + off_t end_block = start_block + num_blocks; boolean_t is_mapped; band_number_t mapped_band; - uint32_t ret_end_block = end_block; - uint32_t p; + off_t ret_end_block = end_block; + off_t p; is_mapped = shadow_map_mapped_band(map, band, is_write, &mapped_band); if (is_write == FALSE && is_mapped == FALSE) { @@ -418,7 +418,7 @@ shadow_map_contiguous(shadow_map_t * map, uint32_t start_block, * particularly since most of the bits will be zero. * A sparse bitmap would really help in this case. */ -static __inline__ uint32_t +static __inline__ size_t block_bitmap_size(off_t file_size, uint32_t block_size) { off_t blocks = howmany(file_size, block_size); @@ -447,15 +447,15 @@ block_bitmap_size(off_t file_size, uint32_t block_size) * should be read. */ boolean_t -shadow_map_read(shadow_map_t * map, uint32_t block_offset, uint32_t block_count, - uint32_t * incr_block_offset, uint32_t * incr_block_count) +shadow_map_read(shadow_map_t * map, off_t block_offset, size_t block_count, + off_t * incr_block_offset, size_t * incr_block_count) { boolean_t written = FALSE; uint32_t n_blocks; if (block_offset >= map->file_size_blocks || (block_offset + block_count) > map->file_size_blocks) { - printf("shadow_map_read: request (%d, %d) exceeds file size %d\n", + printf("shadow_map_read: request (%lld, %lu) exceeds file size %d\n", block_offset, block_count, map->file_size_blocks); *incr_block_count = 0; } @@ -466,7 +466,7 @@ shadow_map_read(shadow_map_t * map, uint32_t block_offset, uint32_t block_count, *incr_block_offset = block_offset; } else { /* start has been written, and therefore mapped */ band_number_t mapped_band; - uint32_t band_limit; + off_t band_limit; mapped_band = map->bands[shadow_map_block_to_band(map, block_offset)]; *incr_block_offset = mapped_band * map->blocks_per_band @@ -498,17 +498,17 @@ shadow_map_read(shadow_map_t * map, uint32_t block_offset, uint32_t block_count, * TRUE if the shadow file was grown, FALSE otherwise. */ boolean_t -shadow_map_write(shadow_map_t * map, uint32_t block_offset, - uint32_t block_count, uint32_t * incr_block_offset, - uint32_t * incr_block_count) +shadow_map_write(shadow_map_t * map, off_t block_offset, + size_t block_count, off_t * incr_block_offset, + size_t * incr_block_count) { - uint32_t band_limit; + off_t band_limit; band_number_t mapped_band; boolean_t shadow_grew = FALSE; if (block_offset >= map->file_size_blocks || (block_offset + block_count) > map->file_size_blocks) { - printf("shadow_map_write: request (%d, %d) exceeds file size %d\n", + printf("shadow_map_write: request (%lld, %zu) exceeds file size %d\n", block_offset, block_count, map->file_size_blocks); *incr_block_count = 0; } @@ -530,7 +530,7 @@ shadow_map_write(shadow_map_t * map, uint32_t block_offset, } boolean_t -shadow_map_is_written(shadow_map_t * map, uint32_t block_offset) +shadow_map_is_written(shadow_map_t * map, off_t block_offset) { bitmap_offset_t b; @@ -564,7 +564,7 @@ shadow_map_create(off_t file_size, off_t shadow_size, uint32_t band_size, uint32_t block_size) { void * block_bitmap = NULL; - uint32_t bitmap_size; + size_t bitmap_size; band_number_t * bands = NULL; shadow_map_t * map; uint32_t n_bands = 0; @@ -573,12 +573,13 @@ shadow_map_create(off_t file_size, off_t shadow_size, band_size = BAND_SIZE_DEFAULT; } - n_bands = howmany(file_size, band_size); - if (n_bands > (BAND_MAX + 1)) { - printf("file is too big: %d > %d\n", - n_bands, BAND_MAX); + off_t many = howmany(file_size, band_size); + if (many > (BAND_MAX + 1)) { + printf("file is too big: %lld > %d\n", + many, BAND_MAX); goto failure; } + n_bands = (uint32_t)many; /* create a block bitmap, one bit per block */ bitmap_size = block_bitmap_size(file_size, block_size); @@ -608,7 +609,7 @@ shadow_map_create(off_t file_size, off_t shadow_size, map->file_size_blocks = n_bands * map->blocks_per_band; map->next_band = 0; map->zeroth_band = -1; - map->shadow_size_bands = howmany(shadow_size, band_size); + map->shadow_size_bands = (uint32_t)howmany(shadow_size, band_size); map->block_size = block_size; return map; diff --git a/bsd/dev/vn/shadow.h b/bsd/dev/vn/shadow.h index 72d68d3a3..2e6f5e600 100644 --- a/bsd/dev/vn/shadow.h +++ b/bsd/dev/vn/shadow.h @@ -36,13 +36,13 @@ typedef struct shadow_map shadow_map_t; boolean_t -shadow_map_read(shadow_map_t * map, uint32_t block_offset, uint32_t block_count, - uint32_t * incr_block_offset, uint32_t * incr_block_count); +shadow_map_read(shadow_map_t * map, off_t block_offset, size_t block_count, + off_t * incr_block_offset, size_t * incr_block_count); boolean_t -shadow_map_write(shadow_map_t * map, uint32_t block_offset, uint32_t block_count, - uint32_t * incr_block_offset, uint32_t * incr_block_count); +shadow_map_write(shadow_map_t * map, off_t block_offset, size_t block_count, + off_t * incr_block_offset, size_t * incr_block_count); boolean_t -shadow_map_is_written(shadow_map_t * map, uint32_t block_offset); +shadow_map_is_written(shadow_map_t * map, off_t block_offset); uint32_t shadow_map_shadow_size(shadow_map_t * map); diff --git a/bsd/dev/vn/vn.c b/bsd/dev/vn/vn.c index a3926c5db..b0ef8539f 100644 --- a/bsd/dev/vn/vn.c +++ b/bsd/dev/vn/vn.c @@ -141,7 +141,7 @@ static int vndevice_cdev_major; * D_CANFREE We support B_FREEBUF */ -static struct bdevsw vn_bdevsw = { +static const struct bdevsw vn_bdevsw = { .d_open = vnopen, .d_close = vnclose, .d_strategy = vnstrategy, @@ -151,7 +151,7 @@ static struct bdevsw vn_bdevsw = { .d_type = D_DISK, }; -static struct cdevsw vn_cdevsw = { +static const struct cdevsw vn_cdevsw = { .d_open = vnopen, .d_close = vnclose, .d_read = vnread, @@ -302,9 +302,9 @@ vnread_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, orig_offset = offset = uio_offset(uio); while (resid > 0) { - u_int32_t remainder; - u_int32_t this_block_number; - u_int32_t this_block_count; + u_int32_t remainder; + off_t this_block_number; + size_t this_block_count; off_t this_offset; user_ssize_t this_resid; struct vnode * vp; @@ -321,7 +321,7 @@ vnread_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, } /* read the blocks (or parts thereof) */ - this_offset = (off_t)this_block_number * blocksize + remainder; + this_offset = this_block_number * blocksize + remainder; uio_setoffset(uio, this_offset); this_resid = this_block_count * blocksize - remainder; if (this_resid > resid) { @@ -349,7 +349,7 @@ vnread_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, static int vncopy_block_to_shadow(struct vn_softc * vn, vfs_context_t ctx, - u_int32_t file_block, u_int32_t shadow_block) + off_t file_block, off_t shadow_block) { int error; char * tmpbuf; @@ -360,14 +360,14 @@ vncopy_block_to_shadow(struct vn_softc * vn, vfs_context_t ctx, } /* read one block from file at file_block offset */ error = file_io(vn->sc_vp, ctx, UIO_READ, - tmpbuf, (off_t)file_block * vn->sc_secsize, + tmpbuf, file_block * vn->sc_secsize, vn->sc_secsize, NULL); if (error) { goto done; } /* write one block to shadow file at shadow_block offset */ error = file_io(vn->sc_shadow_vp, ctx, UIO_WRITE, - tmpbuf, (off_t)shadow_block * vn->sc_secsize, + tmpbuf, shadow_block * vn->sc_secsize, vn->sc_secsize, NULL); done: FREE(tmpbuf, M_TEMP); @@ -393,11 +393,11 @@ vnwrite_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, while (resid > 0) { int flags = 0; - u_int32_t offset_block_number; - u_int32_t remainder; - u_int32_t resid_block_count; - u_int32_t shadow_block_count; - u_int32_t shadow_block_number; + off_t offset_block_number; + u_int32_t remainder; + size_t resid_block_count; + size_t shadow_block_count; + off_t shadow_block_number; user_ssize_t this_resid; /* figure out which blocks to write */ @@ -433,15 +433,14 @@ vnwrite_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, #endif } /* write the blocks (or parts thereof) */ - uio_setoffset(uio, (off_t) - shadow_block_number * blocksize + remainder); - this_resid = (off_t)shadow_block_count * blocksize - remainder; + uio_setoffset(uio, shadow_block_number * blocksize + remainder); + this_resid = shadow_block_count * blocksize - remainder; if (this_resid >= resid) { this_resid = resid; if ((flags & FLAGS_LAST_BLOCK_PARTIAL) != 0) { /* copy the last block to the shadow */ - u_int32_t d; - u_int32_t s; + off_t d; + off_t s; s = offset_block_number + resid_block_count - 1; @@ -450,7 +449,7 @@ vnwrite_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, error = vncopy_block_to_shadow(vn, ctx, s, d); if (error) { printf("vnwrite_shadow: failed to copy" - " block %u to shadow block %u\n", + " block %lld to shadow block %lld\n", s, d); break; } @@ -464,7 +463,7 @@ vnwrite_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, shadow_block_number); if (error) { printf("vnwrite_shadow: failed to" - " copy block %u to shadow block %u\n", + " copy block %lld to shadow block %lld\n", offset_block_number, shadow_block_number); break; @@ -645,17 +644,17 @@ shadow_read(struct vn_softc * vn, struct buf * bp, char * base, { u_int32_t blocksize = vn->sc_secsize; int error = 0; - u_int32_t offset; + off_t offset; boolean_t read_shadow; - u_int32_t resid; + size_t resid; u_int32_t start = 0; offset = buf_blkno(bp); resid = buf_resid(bp) / blocksize; while (resid > 0) { user_ssize_t temp_resid; - u_int32_t this_offset; - u_int32_t this_resid; + off_t this_offset; + size_t this_resid; struct vnode * vp; read_shadow = shadow_map_read(vn->sc_shadow_map, @@ -667,7 +666,7 @@ shadow_read(struct vn_softc * vn, struct buf * bp, char * base, vp = vn->sc_vp; } error = file_io(vp, ctx, UIO_READ, base + start, - (off_t)this_offset * blocksize, + this_offset * blocksize, (user_ssize_t)this_resid * blocksize, &temp_resid); if (error) { @@ -682,7 +681,7 @@ shadow_read(struct vn_softc * vn, struct buf * bp, char * base, offset += this_resid; start += this_resid * blocksize; } - buf_setresid(bp, resid * blocksize); + buf_setresid(bp, (uint32_t)(resid * blocksize)); return error; } @@ -692,17 +691,17 @@ shadow_write(struct vn_softc * vn, struct buf * bp, char * base, { u_int32_t blocksize = vn->sc_secsize; int error = 0; - u_int32_t offset; + off_t offset; boolean_t shadow_grew; - u_int32_t resid; + size_t resid; u_int32_t start = 0; offset = buf_blkno(bp); resid = buf_resid(bp) / blocksize; while (resid > 0) { user_ssize_t temp_resid; - u_int32_t this_offset; - u_int32_t this_resid; + off_t this_offset; + size_t this_resid; shadow_grew = shadow_map_write(vn->sc_shadow_map, offset, resid, @@ -718,7 +717,7 @@ shadow_write(struct vn_softc * vn, struct buf * bp, char * base, } error = file_io(vn->sc_shadow_vp, ctx, UIO_WRITE, base + start, - (off_t)this_offset * blocksize, + this_offset * blocksize, (user_ssize_t)this_resid * blocksize, &temp_resid); if (error) { @@ -733,7 +732,7 @@ shadow_write(struct vn_softc * vn, struct buf * bp, char * base, offset += this_resid; start += this_resid * blocksize; } - buf_setresid(bp, resid * blocksize); + buf_setresid(bp, (uint32_t)(resid * blocksize)); return error; } @@ -757,7 +756,7 @@ vn_readwrite_io(struct vn_softc * vn, struct buf * bp, vfs_context_t ctx) iov_base, (off_t)buf_blkno(bp) * vn->sc_secsize, buf_resid(bp), &temp_resid); - buf_setresid(bp, temp_resid); + buf_setresid(bp, (uint32_t)temp_resid); } else { if (buf_flags(bp) & B_READ) { error = shadow_read(vn, bp, iov_base, ctx); @@ -816,7 +815,7 @@ vnstrategy(struct buf *bp) * If the request crosses EOF, truncate the request. */ if ((blk_num + sz) > 0 && ((u_int64_t)(blk_num + sz)) > vn->sc_size) { - buf_setcount(bp, (vn->sc_size - blk_num) * vn->sc_secsize); + buf_setcount(bp, (uint32_t)((vn->sc_size - blk_num) * vn->sc_secsize)); buf_setresid(bp, buf_count(bp)); } vp = vn->sc_vp; diff --git a/bsd/i386/Makefile b/bsd/i386/Makefile index ab4a4ac86..f38f8c0b5 100644 --- a/bsd/i386/Makefile +++ b/bsd/i386/Makefile @@ -13,7 +13,7 @@ DATAFILES = \ _mcontext.h DRIVERKIT_DATAFILES = \ - limits.h types.h _types.h + limits.h types.h _types.h endian.h PRIVATE_DATAFILES = \ disklabel.h diff --git a/bsd/i386/_param.h b/bsd/i386/_param.h index 3a0ac8bba..1ab450d5d 100644 --- a/bsd/i386/_param.h +++ b/bsd/i386/_param.h @@ -37,10 +37,10 @@ * cast to any desired pointer type. */ #define __DARWIN_ALIGNBYTES (sizeof(__darwin_size_t) - 1) -#define __DARWIN_ALIGN(p) ((__darwin_size_t)((char *)(__darwin_size_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES) +#define __DARWIN_ALIGN(p) ((__darwin_size_t)((__darwin_size_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES) #define __DARWIN_ALIGNBYTES32 (sizeof(__uint32_t) - 1) -#define __DARWIN_ALIGN32(p) ((__darwin_size_t)((char *)(__darwin_size_t)(p) + __DARWIN_ALIGNBYTES32) &~ __DARWIN_ALIGNBYTES32) +#define __DARWIN_ALIGN32(p) ((__darwin_size_t)((__darwin_size_t)(p) + __DARWIN_ALIGNBYTES32) &~ __DARWIN_ALIGNBYTES32) #endif /* _I386__PARAM_H_ */ diff --git a/bsd/kern/bsd_init.c b/bsd/kern/bsd_init.c index 887cb454b..c204382af 100644 --- a/bsd/kern/bsd_init.c +++ b/bsd/kern/bsd_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -89,7 +89,6 @@ #include #include #include -#include #include #include #include @@ -106,7 +105,7 @@ #include #include #include -#include +#include #include /* for ux_handler_setup() */ #include @@ -127,7 +126,6 @@ #include #include #include /* for thread_resume() */ -#include /* for ubc_init() */ #include /* for mcache_init() */ #include /* for mbinit() */ #include /* for knote_init() */ @@ -139,13 +137,11 @@ #include /* for dlil_init() */ #include /* for proto_kpi_init() */ #include /* for iptap_init() */ -#include /* for pipeinit() */ #include /* for socketinit() */ #include /* for domaininit() */ #include /* for thread_wakeup() */ #include /* for ether_family_init() */ #include /* for gif_init() */ -#include /* for vnode_pager_bootstrap() */ #include /* for devfs_kernel_mount() */ #include /* for kmem_suballoc() */ #include /* for psem_lock_init() */ @@ -169,6 +165,7 @@ #include /* for assert() */ #include /* for init_system_override() */ #include /* for lf_init() */ +#include #include @@ -206,21 +203,21 @@ void IOKitInitializeTime(void); /* XXX */ void IOSleep(unsigned int); /* XXX */ void loopattach(void); /* XXX */ -const char copyright[] = +const char *const copyright = "Copyright (c) 1982, 1986, 1989, 1991, 1993\n\t" "The Regents of the University of California. " "All rights reserved.\n\n"; /* Components of the first process -- never freed. */ -struct proc proc0; +struct proc proc0 = { .p_comm = "kernel_task", .p_name = "kernel_task" }; struct session session0; struct pgrp pgrp0; struct filedesc filedesc0; struct plimit limit0; struct pstats pstats0; struct sigacts sigacts0; -proc_t kernproc; -proc_t initproc; +SECURITY_READ_ONLY_LATE(proc_t) kernproc = &proc0; +proc_t XNU_PTRAUTH_SIGNED_PTR("initproc") initproc; long tk_cancc; long tk_nin; @@ -246,14 +243,11 @@ lck_mtx_t domainname_lock; char rootdevice[DEVMAXNAMESIZE]; -#if KMEMSTATS -struct kmemstats kmemstats[M_LAST]; -#endif - struct vnode *rootvp; +bool rootvp_is_ssd = false; int boothowto; int minimalboot = 0; -#if CONFIG_EMBEDDED +#if CONFIG_DARKBOOT int darkboot = 0; #endif @@ -272,9 +266,9 @@ __private_extern__ int os_reason_debug_disabled = 0; /* disable asserts for when extern kern_return_t IOFindBSDRoot(char *, unsigned int, dev_t *, u_int32_t *); extern void IOSecureBSDRoot(const char * rootName); extern kern_return_t IOKitBSDInit(void ); +extern boolean_t IOSetRecoveryBoot(bsd_bootfail_mode_t, uuid_t, boolean_t); extern void kminit(void); extern void file_lock_init(void); -extern void kmeminit(void); extern void bsd_bufferinit(void); extern void oslog_setsize(int size); extern void throttle_init(void); @@ -288,7 +282,6 @@ extern int mount_locker_protoboot(const char *fsname, const char *mntpoint, const char *pbdevpath); #endif -extern int serverperfmode; extern int ncl; #if DEVELOPMENT || DEBUG extern int syscallfilter_disable; @@ -325,6 +318,7 @@ __private_extern__ int bootarg_no_vnode_drain = 0; __private_extern__ int bootarg_disable_aslr = 0; #endif + /* * Allow an alternate dyld to be used for testing. */ @@ -351,6 +345,13 @@ static void process_name(const char *, proc_t); static void setconf(void); +#if CONFIG_BASESYSTEMROOT +static int bsd_find_basesystem_dmg(char *bsdmgpath_out, bool *rooted_dmg); +static boolean_t bsdmgroot_bootable(void); +#endif // CONFIG_BASESYSTEMROOT + +static bool bsd_rooted_ramdisk(void); + #if SYSV_SHM extern void sysv_shm_lock_init(void); #endif @@ -405,6 +406,8 @@ struct rlimit vm_initial_limit_stack = { .rlim_cur = DFLSSIZ, .rlim_max = MAXSSI struct rlimit vm_initial_limit_data = { .rlim_cur = DFLDSIZ, .rlim_max = MAXDSIZ }; struct rlimit vm_initial_limit_core = { .rlim_cur = DFLCSIZ, .rlim_max = MAXCSIZ }; +extern struct os_refgrp rlimit_refgrp; + extern thread_t cloneproc(task_t, coalition_t, proc_t, int, int); extern int (*mountroot)(void); @@ -428,9 +431,60 @@ lck_mtx_t * sysctl_debug_test_stackshot_owner_init_mtx; extern lck_mtx_t * execargs_cache_lock; +#if XNU_TARGET_OS_OSX /* hook called after root is mounted XXX temporary hack */ void (*mountroot_post_hook)(void); void (*unmountroot_pre_hook)(void); +#endif +void set_rootvnode(vnode_t); + +extern lck_rw_t * rootvnode_rw_lock; + +/* called with an iocount and usecount on new_rootvnode */ +void +set_rootvnode(vnode_t new_rootvnode) +{ + mount_t new_mount = (new_rootvnode != NULL) ? new_rootvnode->v_mount : NULL; + vnode_t new_devvp = (new_mount != NULL) ? new_mount->mnt_devvp : NULL; + vnode_t old_rootvnode = rootvnode; + + new_rootvnode->v_flag |= VROOT; + rootvp = new_devvp; + rootvnode = new_rootvnode; + filedesc0.fd_cdir = new_rootvnode; + if (new_devvp != NULL) { + rootdev = vnode_specrdev(new_devvp); + } else if (new_mount != NULL) { + rootdev = vfs_statfs(new_mount)->f_fsid.val[0]; /* like ATTR_CMN_DEVID */ + } else { + rootdev = NODEV; + } + + if (old_rootvnode) { + vnode_rele(old_rootvnode); + } +} + +#define RAMDEV "md0" + +bool +bsd_rooted_ramdisk(void) +{ + bool is_ramdisk = false; + char *dev_path = zalloc(ZV_NAMEI); + if (dev_path == NULL) { + panic("failed to allocate devpath string! \n"); + } + + if (PE_parse_boot_argn("rd", dev_path, MAXPATHLEN)) { + if (strncmp(dev_path, RAMDEV, strlen(RAMDEV)) == 0) { + is_ramdisk = true; + } + } + + zfree(ZV_NAMEI, dev_path); + return is_ramdisk; +} /* * This function is called before IOKit initialization, so that globals @@ -467,6 +521,7 @@ bsd_init(void) kern_return_t ret; struct ucred temp_cred; struct posix_cred temp_pcred; + vnode_t init_rootvnode = NULLVP; #if CONFIG_NETBOOT || CONFIG_IMAGEBOOT boolean_t netboot = FALSE; #endif @@ -476,7 +531,7 @@ bsd_init(void) char *pbdevp = NULL; char pbdevpath[64]; char pbfsname[MFSNAMELEN]; - char *slash_dev = NULL; + const char *slash_dev = NULL; #endif #define DEBUG_BSDINIT 0 @@ -491,9 +546,6 @@ bsd_init(void) printf(copyright); - bsd_init_kprintf("calling kmeminit\n"); - kmeminit(); - bsd_init_kprintf("calling parse_bsd_args\n"); parse_bsd_args(); @@ -513,8 +565,6 @@ bsd_init(void) /* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/ tty_init(); - kernproc = &proc0; /* implicitly bzero'ed */ - /* kernel_task->proc = kernproc; */ set_bsdtask_info(kernel_task, (void *)kernproc); @@ -556,8 +606,8 @@ bsd_init(void) execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); execargs_cache_size = bsd_simul_execs; execargs_free_count = bsd_simul_execs; - execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t)); - bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t)); + execargs_cache = zalloc_permanent(bsd_simul_execs * sizeof(vm_offset_t), + ZALIGN(vm_offset_t)); if (current_task() != kernel_task) { printf("bsd_init: We have a problem, " @@ -673,10 +723,11 @@ bsd_init(void) ut->uu_context.vc_ucred = kernproc->p_ucred; ut->uu_context.vc_thread = current_thread(); + vfs_set_context_kernel(&ut->uu_context); + TAILQ_INIT(&kernproc->p_aio_activeq); TAILQ_INIT(&kernproc->p_aio_doneq); kernproc->p_aio_total_count = 0; - kernproc->p_aio_active_count = 0; bsd_init_kprintf("calling file_lock_init\n"); file_lock_init(); @@ -687,7 +738,7 @@ bsd_init(void) /* Create the file descriptor table. */ kernproc->p_fd = &filedesc0; - filedesc0.fd_cmask = cmask; + filedesc0.fd_cmask = (mode_t)cmask; filedesc0.fd_knlistsize = 0; filedesc0.fd_knlist = NULL; filedesc0.fd_knhash = NULL; @@ -697,7 +748,7 @@ bsd_init(void) /* Create the limits structures. */ kernproc->p_limit = &limit0; - for (i = 0; i < sizeof(kernproc->p_rlimit) / sizeof(kernproc->p_rlimit[0]); i++) { + for (i = 0; i < sizeof(kernproc->p_limit->pl_rlimit) / sizeof(kernproc->p_limit->pl_rlimit[0]); i++) { limit0.pl_rlimit[i].rlim_cur = limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; } @@ -707,10 +758,11 @@ bsd_init(void) limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack; limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data; limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core; - limit0.pl_refcnt = 1; + os_ref_init_count(&limit0.pl_refcnt, &rlimit_refgrp, 1); kernproc->p_stats = &pstats0; kernproc->p_sigacts = &sigacts0; + kernproc->p_subsystem_root_path = NULL; /* * Charge root for one process: launchd. @@ -759,9 +811,6 @@ bsd_init(void) bsd_init_kprintf("calling IOKitInitializeTime\n"); IOKitInitializeTime(); - bsd_init_kprintf("calling ubc_init\n"); - ubc_init(); - /* Initialize the file systems. */ bsd_init_kprintf("calling vfsinit\n"); vfsinit(); @@ -808,10 +857,6 @@ bsd_init(void) bsd_init_kprintf("calling aio_init\n"); aio_init(); - /* Initialize pipes */ - bsd_init_kprintf("calling pipeinit\n"); - pipeinit(); - /* Initialize SysV shm subsystem locks; the subsystem proper is * initialized through a sysctl. */ @@ -865,6 +910,13 @@ bsd_init(void) flow_divert_init(); #endif /* FLOW_DIVERT */ #endif /* SOCKETS */ +#if NETWORKING +#if NECP + /* Initialize Network Extension Control Policies */ + necp_init(); +#endif + netagent_init(); +#endif /* NETWORKING */ kernproc->p_fd->fd_cdir = NULL; kernproc->p_fd->fd_rdir = NULL; @@ -926,10 +978,6 @@ bsd_init(void) #endif /* ETHER */ #if NETWORKING - /* Call any kext code that wants to run just after network init */ - bsd_init_kprintf("calling net_init_run\n"); - net_init_run(); - #if CONTENT_FILTER cfil_init(); #endif @@ -938,14 +986,9 @@ bsd_init(void) pkt_mnglr_init(); #endif -#if NECP - /* Initialize Network Extension Control Policies */ - necp_init(); -#endif - - netagent_init(); - - /* register user tunnel kernel control handler */ + /* + * Register subsystems with kernel control handlers + */ utun_register_control(); #if IPSEC ipsec_register_control(); @@ -956,10 +999,14 @@ bsd_init(void) #if MPTCP mptcp_control_register(); #endif /* MPTCP */ -#endif /* NETWORKING */ - bsd_init_kprintf("calling vnode_pager_bootstrap\n"); - vnode_pager_bootstrap(); + /* + * The the networking stack is now initialized so it is a good time to call + * the clients that are waiting for the networking stack to be usable. + */ + bsd_init_kprintf("calling net_init_run\n"); + net_init_run(); +#endif /* NETWORKING */ bsd_init_kprintf("calling inittodr\n"); inittodr(0); @@ -1004,13 +1051,34 @@ bsd_init(void) bsd_init_kprintf("calling VFS_ROOT\n"); /* Get the vnode for '/'. Set fdp->fd_fd.fd_cdir to reference it. */ - if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context)) { + if (VFS_ROOT(mountlist.tqh_first, &init_rootvnode, &context)) { panic("bsd_init: cannot find root vnode: %s", PE_boot_args()); } - rootvnode->v_flag |= VROOT; - (void)vnode_ref(rootvnode); - (void)vnode_put(rootvnode); - filedesc0.fd_cdir = rootvnode; + (void)vnode_ref(init_rootvnode); + (void)vnode_put(init_rootvnode); + + lck_rw_lock_exclusive(rootvnode_rw_lock); + set_rootvnode(init_rootvnode); + lck_rw_unlock_exclusive(rootvnode_rw_lock); + init_rootvnode = NULLVP; /* use rootvnode after this point */ + + + if (!bsd_rooted_ramdisk()) { +#if CONFIG_IMAGEBOOT +#if XNU_TARGET_OS_OSX && defined(__arm64__) + /* Apple Silicon MacOS */ + if (!imageboot_desired()) { + /* enforce sealedness */ + int autherr = VNOP_IOCTL(rootvnode, FSIOC_KERNEL_ROOTAUTH, NULL, 0, vfs_context_kernel()); + if (autherr) { + panic("rootvp not authenticated after mounting \n"); + } + } +#endif // TARGET_OS_OSX && arm64 +#endif // config_imageboot + /* Otherwise, noop */ + } + #if CONFIG_NETBOOT if (netboot) { @@ -1065,6 +1133,7 @@ bsd_init(void) strlcpy(pbfsname, pbmnt->mnt_vtable->vfc_name, sizeof(pbfsname)); #endif + /* * See if a system disk image is present. If so, mount it and * switch the root vnode to point to it @@ -1102,13 +1171,42 @@ bsd_init(void) } #endif /* DEVFS */ - if (vfs_mount_rosv_data()) { - panic("failed to mount data volume!"); - } +#if CONFIG_BASESYSTEMROOT +#if CONFIG_IMAGEBOOT + if (bsdmgroot_bootable()) { + int error; + bool rooted_dmg = false; + + printf("trying to find and mount BaseSystem dmg as root volume\n"); +#if DEVELOPMENT || DEBUG + printf("(set boot-arg -nobsdmgroot to avoid this)\n"); +#endif // DEVELOPMENT || DEBUG - if (vfs_mount_vm()) { - printf("failed to mount vm volume!"); + char *dmgpath = NULL; + dmgpath = zalloc_flags(ZV_NAMEI, Z_ZERO | Z_WAITOK); + if (dmgpath == NULL) { + panic("%s: M_NAMEI zone exhausted", __FUNCTION__); + } + + error = bsd_find_basesystem_dmg(dmgpath, &rooted_dmg); + if (error) { + bsd_init_kprintf("failed to to find BaseSystem dmg: error = %d\n", error); + } else { + PE_parse_boot_argn("bsdmgpath", dmgpath, sizeof(dmgpath)); + + bsd_init_kprintf("found BaseSystem dmg at: %s\n", dmgpath); + + error = imageboot_pivot_image(dmgpath, IMAGEBOOT_DMG, "/System/Volumes/BaseSystem", "System/Volumes/macOS", rooted_dmg); + if (error) { + bsd_init_kprintf("couldn't mount BaseSystem dmg: error = %d", error); + } + } + zfree(ZV_NAMEI, dmgpath); } +#else /* CONFIG_IMAGEBOOT */ +#error CONFIG_BASESYSTEMROOT requires CONFIG_IMAGEBOOT +#endif /* CONFIG_IMAGEBOOT */ +#endif /* CONFIG_BASESYSTEMROOT */ #if CONFIG_LOCKERBOOT /* @@ -1137,10 +1235,12 @@ bsd_init(void) bsd_init_kprintf("calling mountroot_post_hook\n"); +#if XNU_TARGET_OS_OSX /* invoke post-root-mount hook */ if (mountroot_post_hook != NULL) { mountroot_post_hook(); } +#endif #if 0 /* not yet */ consider_zone_gc(FALSE); @@ -1233,6 +1333,15 @@ setconf(void) #endif } +/* + * Boot into the flavor of Recovery dictated by `mode`. + */ +boolean_t +bsd_boot_to_recovery(bsd_bootfail_mode_t mode, uuid_t volume_uuid, boolean_t reboot) +{ + return IOSetRecoveryBoot(mode, volume_uuid, reboot); +} + void bsd_utaskbootstrap(void) { @@ -1314,6 +1423,8 @@ parse_bsd_args(void) } #endif + + PE_parse_boot_argn("ncl", &ncl, sizeof(ncl)); if (PE_parse_boot_argn("nbuf", &max_nbuf_headers, sizeof(max_nbuf_headers))) { @@ -1345,7 +1456,7 @@ parse_bsd_args(void) bootarg_no_vnode_drain = 1; } -#if CONFIG_EMBEDDED +#if CONFIG_DARKBOOT /* * The darkboot flag is specified by the bootloader and is stored in * boot_args->bootFlags. This flag is available starting revision 2. @@ -1420,6 +1531,285 @@ parse_bsd_args(void) #endif /* DEVELOPMENT || DEBUG */ } +#if CONFIG_BASESYSTEMROOT + +extern const char* IOGetBootUUID(void); +extern const char* IOGetApfsPrebootUUID(void); + +// Get the UUID of the Preboot (and Recovery) folder associated with the +// current boot volume, if applicable. The meaning of the UUID can be +// filesystem-dependent and not all kinds of boots will have a UUID. +// If available, the string will be returned. It does not need to be +// deallocate. (Future: if we need to return the string as a copy that the +// caller must free, we'll introduce a new functcion for that.) +// NULL will be returned if the current boot has no applicable Preboot UUID. +static +const char * +get_preboot_uuid(void) +{ + const char *maybe_uuid_string; + + // try IOGetApfsPrebootUUID + maybe_uuid_string = IOGetApfsPrebootUUID(); + if (maybe_uuid_string) { + uuid_t maybe_uuid; + int error = uuid_parse(maybe_uuid_string, maybe_uuid); + if (error == 0) { + return maybe_uuid_string; + } + } + + // try IOGetBootUUID + maybe_uuid_string = IOGetBootUUID(); + if (maybe_uuid_string) { + uuid_t maybe_uuid; + int error = uuid_parse(maybe_uuid_string, maybe_uuid); + if (error == 0) { + return maybe_uuid_string; + } + } + + // didn't find it + return NULL; +} + +#if defined(__arm64__) +extern const char *IOGetBootObjectsPath(void); +#endif + +// Find the BaseSystem.dmg to be used as the initial root volume during certain +// kinds of boots. +// This may mount volumes and lookup vnodes. +// The DEVELOPMENT kernel will look for BaseSystem.rooted.dmg first. +// If it returns 0 (no error), then it also writes the absolute path to the +// BaseSystem.dmg into its argument (which must be a char[MAXPATHLEN]). +static +int +bsd_find_basesystem_dmg(char *bsdmgpath_out, bool *rooted_dmg) +{ + int error; + size_t len; + char *dmgbasepath; + char *dmgpath; + + dmgbasepath = zalloc_flags(ZV_NAMEI, Z_ZERO | Z_WAITOK); + dmgpath = zalloc_flags(ZV_NAMEI, Z_ZERO | Z_WAITOK); + vnode_t imagevp = NULLVP; + + //must provide output bool + if (rooted_dmg) { + *rooted_dmg = false; + } else { + error = EINVAL; + goto done; + } + + error = vfs_mount_recovery(); + if (error) { + goto done; + } + + len = strlcpy(dmgbasepath, "/System/Volumes/Recovery/", MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + +#if defined(__arm64__) + const char *boot_obj_path = IOGetBootObjectsPath(); + if (boot_obj_path) { + if (boot_obj_path[0] == '/') { + dmgbasepath[len - 1] = '\0'; + } + + len = strlcat(dmgbasepath, boot_obj_path, MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + + len = strlcat(dmgbasepath, "/usr/standalone/firmware/", MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + +#if DEVELOPMENT || DEBUG + len = strlcpy(dmgpath, dmgbasepath, MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + + len = strlcat(dmgpath, "arm64eBaseSystem.rooted.dmg", MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + + error = vnode_lookup(dmgpath, 0, &imagevp, vfs_context_kernel()); + if (error == 0) { + *rooted_dmg = true; + goto done; + } + memset(dmgpath, 0, MAXPATHLEN); +#endif // DEVELOPMENT || DEBUG + + len = strlcpy(dmgpath, dmgbasepath, MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + + len = strlcat(dmgpath, "arm64eBaseSystem.dmg", MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + + error = vnode_lookup(dmgpath, 0, &imagevp, vfs_context_kernel()); + if (error == 0) { + goto done; + } + memset(dmgpath, 0, MAXPATHLEN); + dmgbasepath[strlen("/System/Volumes/Recovery/")] = '\0'; + } +#endif // __arm64__ + + const char *preboot_uuid = get_preboot_uuid(); + if (preboot_uuid == NULL) { + // no preboot? bail out + return EINVAL; + } + + len = strlcat(dmgbasepath, preboot_uuid, MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + +#if DEVELOPMENT || DEBUG + // Try BaseSystem.rooted.dmg + len = strlcpy(dmgpath, dmgbasepath, MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + + len = strlcat(dmgpath, "/BaseSystem.rooted.dmg", MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + + error = vnode_lookup(dmgpath, 0, &imagevp, vfs_context_kernel()); + if (error == 0) { + // we found it! success! + *rooted_dmg = true; + goto done; + } +#endif // DEVELOPMENT || DEBUG + + // Try BaseSystem.dmg + len = strlcpy(dmgpath, dmgbasepath, MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + + len = strlcat(dmgpath, "/BaseSystem.dmg", MAXPATHLEN); + if (len > MAXPATHLEN) { + error = ENAMETOOLONG; + goto done; + } + + error = vnode_lookup(dmgpath, 0, &imagevp, vfs_context_kernel()); + if (error == 0) { + // success! + goto done; + } + +done: + if (error == 0) { + strlcpy(bsdmgpath_out, dmgpath, MAXPATHLEN); + } else { + bsd_init_kprintf("%s: error %d\n", __func__, error); + } + if (imagevp != NULLVP) { + vnode_put(imagevp); + } + zfree(ZV_NAMEI, dmgpath); + zfree(ZV_NAMEI, dmgbasepath); + return error; +} + +static boolean_t +bsdmgroot_bootable(void) +{ +#if defined(__arm64__) +#define BSDMGROOT_DEFAULT true +#else +#define BSDMGROOT_DEFAULT false +#endif + + boolean_t resolved = BSDMGROOT_DEFAULT; + + boolean_t boot_arg_bsdmgroot = false; + boolean_t boot_arg_nobsdmgroot = false; + int error; + mount_t mp; + boolean_t root_part_of_volume_group = false; + struct vfs_attr vfsattr; + + mp = rootvnode->v_mount; + VFSATTR_INIT(&vfsattr); + VFSATTR_WANTED(&vfsattr, f_capabilities); + + boot_arg_bsdmgroot = PE_parse_boot_argn("-bsdmgroot", NULL, 0); + boot_arg_nobsdmgroot = PE_parse_boot_argn("-nobsdmgroot", NULL, 0); + + error = vfs_getattr(mp, &vfsattr, vfs_context_kernel()); + if (!error && VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) { + if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_VOL_GROUPS) && + (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_VOL_GROUPS)) { + root_part_of_volume_group = true; + } + } + + boolean_t singleuser = (boothowto & RB_SINGLE) != 0; + + // Start with the #defined default above. + // If booting to single-user mode, default to false, because single- + // user mode inside the BaseSystem is probably not what's wanted. + // If the 'yes' boot-arg is set, we'll allow that even in single-user + // mode, we'll assume you know what you're doing. + // The 'no' boot-arg overpowers the 'yes' boot-arg. + // In any case, we will not attempt to root from BaseSystem if the + // original (booter-chosen) root volume isn't in a volume group. + // This is just out of an abundance of caution: if the boot environment + // seems to be "something other than a standard install", + // we'll be conservative in messing with the root volume. + + if (singleuser) { + resolved = false; + } + + if (boot_arg_bsdmgroot) { + resolved = true; + } + + if (boot_arg_nobsdmgroot) { + resolved = false; + } + + if (!root_part_of_volume_group) { + resolved = false; + } + + return resolved; +} +#endif // CONFIG_BASESYSTEMROOT + void bsd_exec_setup(int scale) { diff --git a/bsd/kern/bsd_stubs.c b/bsd/kern/bsd_stubs.c index f73834598..3b8290fc5 100644 --- a/bsd/kern/bsd_stubs.c +++ b/bsd/kern/bsd_stubs.c @@ -118,8 +118,8 @@ current_proc(void) /* Device switch add delete routines */ -struct bdevsw nobdev = NO_BDEVICE; -struct cdevsw nocdev = NO_CDEVICE; +const struct bdevsw nobdev = NO_BDEVICE; +const struct cdevsw nocdev = NO_CDEVICE; /* * if index is -1, return a free slot if avaliable * else see whether the index is free @@ -142,7 +142,7 @@ bdevsw_isfree(int index) } devsw = &bdevsw[index]; for (; index < nblkdev; index++, devsw++) { - if (memcmp((char *)devsw, (char *)&nobdev, sizeof(struct bdevsw)) == 0) { + if (memcmp((const char *)devsw, (const char *)&nobdev, sizeof(struct bdevsw)) == 0) { break; } } @@ -153,7 +153,7 @@ bdevsw_isfree(int index) } devsw = &bdevsw[index]; - if ((memcmp((char *)devsw, (char *)&nobdev, sizeof(struct bdevsw)) != 0)) { + if ((memcmp((const char *)devsw, (const char *)&nobdev, sizeof(struct bdevsw)) != 0)) { return -1; } return index; @@ -169,7 +169,7 @@ bdevsw_isfree(int index) * instead of starting at 0 */ int -bdevsw_add(int index, struct bdevsw * bsw) +bdevsw_add(int index, const struct bdevsw * bsw) { lck_mtx_lock_spin(&devsw_lock_list_mtx); index = bdevsw_isfree(index); @@ -186,7 +186,7 @@ bdevsw_add(int index, struct bdevsw * bsw) * else -1 */ int -bdevsw_remove(int index, struct bdevsw * bsw) +bdevsw_remove(int index, const struct bdevsw * bsw) { struct bdevsw * devsw; @@ -196,7 +196,7 @@ bdevsw_remove(int index, struct bdevsw * bsw) devsw = &bdevsw[index]; lck_mtx_lock_spin(&devsw_lock_list_mtx); - if ((memcmp((char *)devsw, (char *)bsw, sizeof(struct bdevsw)) != 0)) { + if ((memcmp((const char *)devsw, (const char *)bsw, sizeof(struct bdevsw)) != 0)) { index = -1; } else { bdevsw[index] = nobdev; @@ -227,7 +227,7 @@ cdevsw_isfree(int index) } devsw = &cdevsw[index]; for (; index < nchrdev; index++, devsw++) { - if (memcmp((char *)devsw, (char *)&nocdev, sizeof(struct cdevsw)) == 0) { + if (memcmp((const char *)devsw, (const char *)&nocdev, sizeof(struct cdevsw)) == 0) { break; } } @@ -238,7 +238,7 @@ cdevsw_isfree(int index) } devsw = &cdevsw[index]; - if ((memcmp((char *)devsw, (char *)&nocdev, sizeof(struct cdevsw)) != 0)) { + if ((memcmp((const char *)devsw, (const char *)&nocdev, sizeof(struct cdevsw)) != 0)) { return -1; } return index; @@ -259,7 +259,7 @@ cdevsw_isfree(int index) * before them. -24 is currently a safe starting point. */ int -cdevsw_add(int index, struct cdevsw * csw) +cdevsw_add(int index, const struct cdevsw * csw) { lck_mtx_lock_spin(&devsw_lock_list_mtx); index = cdevsw_isfree(index); @@ -276,7 +276,7 @@ cdevsw_add(int index, struct cdevsw * csw) * else -1 */ int -cdevsw_remove(int index, struct cdevsw * csw) +cdevsw_remove(int index, const struct cdevsw * csw) { struct cdevsw * devsw; @@ -286,7 +286,7 @@ cdevsw_remove(int index, struct cdevsw * csw) devsw = &cdevsw[index]; lck_mtx_lock_spin(&devsw_lock_list_mtx); - if ((memcmp((char *)devsw, (char *)csw, sizeof(struct cdevsw)) != 0)) { + if ((memcmp((const char *)devsw, (const char *)csw, sizeof(struct cdevsw)) != 0)) { index = -1; } else { cdevsw[index] = nocdev; @@ -303,7 +303,7 @@ cdev_set_bdev(int cdev, int bdev) } int -cdevsw_add_with_bdev(int index, struct cdevsw * csw, int bdev) +cdevsw_add_with_bdev(int index, const struct cdevsw * csw, int bdev) { index = cdevsw_add(index, csw); if (index < 0) { @@ -317,7 +317,7 @@ cdevsw_add_with_bdev(int index, struct cdevsw * csw, int bdev) } int -cdevsw_setkqueueok(int maj, struct cdevsw * csw, int extra_flags) +cdevsw_setkqueueok(int maj, const struct cdevsw * csw, int extra_flags) { struct cdevsw * devsw; uint64_t flags = CDEVSW_SELECT_KQUEUE; @@ -327,7 +327,7 @@ cdevsw_setkqueueok(int maj, struct cdevsw * csw, int extra_flags) } devsw = &cdevsw[maj]; - if ((memcmp((char *)devsw, (char *)csw, sizeof(struct cdevsw)) != 0)) { + if ((memcmp((const char *)devsw, (const char *)csw, sizeof(struct cdevsw)) != 0)) { return -1; } @@ -346,9 +346,10 @@ cdevsw_setkqueueok(int maj, struct cdevsw * csw, int extra_flags) * the NULL character in the hostname. */ int -bsd_hostname(char * buf, int bufsize, int * len) +bsd_hostname(char *buf, size_t bufsize, size_t *len) { - int ret, hnlen; + int ret; + size_t hnlen; /* * "hostname" is null-terminated */ diff --git a/bsd/kern/chunklist.c b/bsd/kern/chunklist.c index ed93a2fd9..be51d58c1 100644 --- a/bsd/kern/chunklist.c +++ b/bsd/kern/chunklist.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -52,16 +53,26 @@ #include -extern int read_file(const char *path, void **bufp, size_t *bufszp); /* implemented in imageboot.c */ -extern vnode_t imgboot_get_image_file(const char *path, off_t *fsize, int *errp); /* implemented in imageboot.c */ - #define AUTHDBG(fmt, args...) do { printf("%s: " fmt "\n", __func__, ##args); } while (0) #define AUTHPRNT(fmt, args...) do { printf("%s: " fmt "\n", __func__, ##args); } while (0) -#define kfree_safe(x) do { if ((x)) { kfree_addr((x)); (x) = NULL; } } while (0) +#define kheap_free_safe(h, x, l) do { if ((x)) { kheap_free(h, x, l); (x) = NULL; } } while (0) static const char *libkern_path = "/System/Library/Extensions/System.kext/PlugIns/Libkern.kext/Libkern"; static const char *libkern_bundle = "com.apple.kpi.libkern"; +extern boolean_t kernelcache_uuid_valid; +extern uuid_t kernelcache_uuid; + +#if DEBUG +static const char *bootkc_path = "/System/Library/KernelCollections/BootKernelExtensions.kc.debug"; +#elif KASAN +static const char *bootkc_path = "/System/Library/KernelCollections/BootKernelExtensions.kc.kasan"; +#elif DEVELOPMENT +static const char *bootkc_path = "/System/Library/KernelCollections/BootKernelExtensions.kc.development"; +#else +static const char *bootkc_path = "/System/Library/KernelCollections/BootKernelExtensions.kc"; +#endif + /* * Rev1 chunklist handling */ @@ -84,50 +95,32 @@ key_byteswap(void *_dst, const void *_src, size_t len) } static int -construct_chunklist_path(const char *root_path, char **bufp) +construct_chunklist_path(char path[static MAXPATHLEN], const char *root_path) { - int err = 0; - char *path = NULL; size_t len = 0; - path = kalloc(MAXPATHLEN); - if (path == NULL) { - AUTHPRNT("failed to allocate space for chunklist path"); - err = ENOMEM; - goto out; - } - len = strnlen(root_path, MAXPATHLEN); if (len < MAXPATHLEN && len > strlen(".dmg")) { /* correctly terminated string with space for extension */ } else { AUTHPRNT("malformed root path"); - err = EOVERFLOW; - goto out; + return EOVERFLOW; } len = strlcpy(path, root_path, MAXPATHLEN); if (len >= MAXPATHLEN) { AUTHPRNT("root path is too long"); - err = EOVERFLOW; - goto out; + return EOVERFLOW; } path[len - strlen(".dmg")] = '\0'; len = strlcat(path, ".chunklist", MAXPATHLEN); if (len >= MAXPATHLEN) { AUTHPRNT("chunklist path is too long"); - err = EOVERFLOW; - goto out; + return EOVERFLOW; } -out: - if (err) { - kfree_safe(path); - } else { - *bufp = path; - } - return err; + return 0; } static int @@ -138,16 +131,20 @@ validate_signature(const uint8_t *key_msb, size_t keylen, uint8_t *sig_msb, size uint8_t *sig = NULL; const uint8_t exponent[] = { 0x01, 0x00, 0x01 }; - uint8_t *modulus = kalloc(keylen); - rsa_pub_ctx *rsa_ctx = kalloc(sizeof(rsa_pub_ctx)); - sig = kalloc(siglen); + rsa_pub_ctx *rsa_ctx; + uint8_t *modulus; + + + modulus = kheap_alloc(KHEAP_TEMP, keylen, Z_WAITOK | Z_ZERO); + rsa_ctx = kheap_alloc(KHEAP_TEMP, sizeof(rsa_pub_ctx), + Z_WAITOK | Z_ZERO); + sig = kheap_alloc(KHEAP_TEMP, siglen, Z_WAITOK | Z_ZERO); if (modulus == NULL || rsa_ctx == NULL || sig == NULL) { err = ENOMEM; goto out; } - bzero(rsa_ctx, sizeof(rsa_pub_ctx)); key_byteswap(modulus, key_msb, keylen); key_byteswap(sig, sig_msb, siglen); @@ -170,9 +167,9 @@ validate_signature(const uint8_t *key_msb, size_t keylen, uint8_t *sig_msb, size } out: - kfree_safe(sig); - kfree_safe(rsa_ctx); - kfree_safe(modulus); + kheap_free_safe(KHEAP_TEMP, sig, siglen); + kheap_free_safe(KHEAP_TEMP, rsa_ctx, sizeof(*rsa_ctx)); + kheap_free_safe(KHEAP_TEMP, modulus, keylen); if (err) { return err; @@ -223,7 +220,7 @@ validate_root_image(const char *root_path, void *chunklist) if (!buf) { /* allocate buffer based on first chunk size */ - buf = kalloc(chk->chunk_size); + buf = kheap_alloc(KHEAP_TEMP, chk->chunk_size, Z_WAITOK); if (buf == NULL) { err = ENOMEM; goto out; @@ -237,7 +234,8 @@ validate_root_image(const char *root_path, void *chunklist) goto out; } - err = vn_rdwr(UIO_READ, vp, (caddr_t)buf, chk->chunk_size, offset, UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p); + err = vn_rdwr(UIO_READ, vp, (caddr_t)buf, chk->chunk_size, + offset, UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p); if (err) { AUTHPRNT("vn_rdrw fail (err = %d, resid = %d)", err, resid); goto out; @@ -276,7 +274,7 @@ validate_root_image(const char *root_path, void *chunklist) } out: - kfree_safe(buf); + kheap_free_safe(KHEAP_TEMP, buf, bufsz); if (doclose) { VNOP_CLOSE(vp, FREAD, ctx); } @@ -573,14 +571,16 @@ out: * Authenticate a given DMG file using chunklist */ int -authenticate_root_with_chunklist(const char *root_path) +authenticate_root_with_chunklist(const char *rootdmg_path, boolean_t *out_enforced) { char *chunklist_path = NULL; void *chunklist_buf = NULL; size_t chunklist_len = 32 * 1024 * 1024UL; + boolean_t enforced = TRUE; int err = 0; - err = construct_chunklist_path(root_path, &chunklist_path); + chunklist_path = zalloc(ZV_NAMEI); + err = construct_chunklist_path(chunklist_path, rootdmg_path); if (err) { AUTHPRNT("failed creating chunklist path"); goto out; @@ -593,7 +593,7 @@ authenticate_root_with_chunklist(const char *root_path) * the chunklist. */ AUTHDBG("reading chunklist"); - err = read_file(chunklist_path, &chunklist_buf, &chunklist_len); + err = imageboot_read_file(KHEAP_TEMP, chunklist_path, &chunklist_buf, &chunklist_len); if (err) { AUTHPRNT("failed to read chunklist"); goto out; @@ -608,7 +608,7 @@ authenticate_root_with_chunklist(const char *root_path) AUTHDBG("successfully validated chunklist"); AUTHDBG("validating root image against chunklist"); - err = validate_root_image(root_path, chunklist_buf); + err = validate_root_image(rootdmg_path, chunklist_buf); if (err) { AUTHPRNT("failed to validate root image against chunklist (%d)", err); goto out; @@ -618,8 +618,80 @@ authenticate_root_with_chunklist(const char *root_path) AUTHDBG("root image authenticated"); out: - kfree_safe(chunklist_buf); - kfree_safe(chunklist_path); +#if CONFIG_CSR + if (err && (csr_check(CSR_ALLOW_ANY_RECOVERY_OS) == 0)) { + AUTHPRNT("CSR_ALLOW_ANY_RECOVERY_OS set, allowing unauthenticated root image"); + err = 0; + enforced = FALSE; + } +#endif + + if (out_enforced != NULL) { + *out_enforced = enforced; + } + kheap_free_safe(KHEAP_TEMP, chunklist_buf, chunklist_len); + zfree(ZV_NAMEI, chunklist_path); + return err; +} + +int +authenticate_root_version_check(void) +{ + kc_format_t kc_format; + if (PE_get_primary_kc_format(&kc_format) && kc_format == KCFormatFileset) { + return authenticate_bootkc_uuid(); + } else { + return authenticate_libkern_uuid(); + } +} + +/* + * Check that the UUID of the boot KC currently loaded matches the one on disk. + */ +int +authenticate_bootkc_uuid(void) +{ + int err = 0; + void *buf = NULL; + size_t bufsz = 1 * 1024 * 1024UL; + + /* get the UUID of the bootkc in /S/L/KC */ + err = imageboot_read_file(KHEAP_TEMP, bootkc_path, &buf, &bufsz); + if (err) { + goto out; + } + + unsigned long uuidsz = 0; + const uuid_t *img_uuid = getuuidfromheader_safe(buf, bufsz, &uuidsz); + if (img_uuid == NULL || uuidsz != sizeof(uuid_t)) { + AUTHPRNT("invalid UUID (sz = %lu)", uuidsz); + err = EINVAL; + goto out; + } + + if (!kernelcache_uuid_valid) { + AUTHPRNT("Boot KC UUID was not set at boot."); + err = EINVAL; + goto out; + } + + /* ... and compare them */ + if (bcmp(&kernelcache_uuid, img_uuid, uuidsz) != 0) { + AUTHPRNT("UUID of running bootkc does not match %s", bootkc_path); + + uuid_string_t img_uuid_str, live_uuid_str; + uuid_unparse(*img_uuid, img_uuid_str); + uuid_unparse(kernelcache_uuid, live_uuid_str); + AUTHPRNT("loaded bootkc UUID = %s", live_uuid_str); + AUTHPRNT("on-disk bootkc UUID = %s", img_uuid_str); + + err = EINVAL; + goto out; + } + + /* UUID matches! */ +out: + kheap_free_safe(KHEAP_TEMP, buf, bufsz); return err; } @@ -627,18 +699,34 @@ out: * Check that the UUID of the libkern currently loaded matches the one on disk. */ int -authenticate_root_version_check(void) +authenticate_libkern_uuid(void) { int err = 0; void *buf = NULL; size_t bufsz = 4 * 1024 * 1024UL; /* get the UUID of the libkern in /S/L/E */ - err = read_file(libkern_path, &buf, &bufsz); + err = imageboot_read_file(KHEAP_TEMP, libkern_path, &buf, &bufsz); if (err) { goto out; } + if (fatfile_validate_fatarches((vm_offset_t)buf, bufsz) == LOAD_SUCCESS) { + struct fat_header *fat_header = buf; + struct fat_arch fat_arch; + if (fatfile_getbestarch((vm_offset_t)fat_header, bufsz, NULL, &fat_arch, FALSE) != LOAD_SUCCESS) { + err = EINVAL; + goto out; + } + kheap_free_safe(KHEAP_TEMP, buf, bufsz); + buf = NULL; + bufsz = MIN(fat_arch.size, 4 * 1024 * 1024UL); + err = imageboot_read_file_from_offset(KHEAP_TEMP, libkern_path, fat_arch.offset, &buf, &bufsz); + if (err) { + goto out; + } + } + unsigned long uuidsz = 0; const uuid_t *img_uuid = getuuidfromheader_safe(buf, bufsz, &uuidsz); if (img_uuid == NULL || uuidsz != sizeof(uuid_t)) { @@ -671,6 +759,6 @@ authenticate_root_version_check(void) /* UUID matches! */ out: - kfree_safe(buf); + kheap_free_safe(KHEAP_TEMP, buf, bufsz); return err; } diff --git a/bsd/kern/chunklist.h b/bsd/kern/chunklist.h index 7a1042e75..adffd552e 100644 --- a/bsd/kern/chunklist.h +++ b/bsd/kern/chunklist.h @@ -89,6 +89,8 @@ struct chunklist_pubkey { const uint8_t key[CHUNKLIST_PUBKEY_LEN]; }; -int authenticate_root_with_chunklist(const char *root_path); +int authenticate_root_with_chunklist(const char *rootdmg_path, boolean_t *out_enforced); int authenticate_root_version_check(void); +int authenticate_bootkc_uuid(void); +int authenticate_libkern_uuid(void); #endif /* _CHUNKLIST_H */ diff --git a/bsd/kern/decmpfs.c b/bsd/kern/decmpfs.c index fd532f100..4040d9b6a 100644 --- a/bsd/kern/decmpfs.c +++ b/bsd/kern/decmpfs.c @@ -72,6 +72,8 @@ UNUSED_SYMBOL(decmpfs_validate_compressed_file) #include #include +#include + #pragma mark --- debugging --- #define COMPRESSION_DEBUG 0 @@ -94,6 +96,7 @@ baseName(const char *path) return ret; } +#if COMPRESSION_DEBUG static char* vnpath(vnode_t vp, char *path, int len) { @@ -103,9 +106,14 @@ vnpath(vnode_t vp, char *path, int len) path[origlen - 1] = 0; return path; } +#endif #define ErrorLog(x, args...) printf("%s:%d:%s: " x, baseName(__FILE__), __LINE__, __FUNCTION__, ## args) +#if COMPRESSION_DEBUG #define ErrorLogWithPath(x, args...) do { char *path; MALLOC(path, char *, PATH_MAX, M_TEMP, M_WAITOK); printf("%s:%d:%s: %s: " x, baseName(__FILE__), __LINE__, __FUNCTION__, vnpath(vp, path, PATH_MAX), ## args); FREE(path, M_TEMP); } while(0) +#else +#define ErrorLogWithPath(x, args...) do { (void*)vp; printf("%s:%d:%s: %s: " x, baseName(__FILE__), __LINE__, __FUNCTION__, "", ## args); } while(0) +#endif #if COMPRESSION_DEBUG #define DebugLog ErrorLog @@ -201,7 +209,7 @@ _free(char *ret, __unused int type, const char *file, int line) static lck_grp_t *decmpfs_lockgrp; -SECURITY_READ_ONLY_EARLY(static decmpfs_registration *) decompressors[CMP_MAX]; /* the registered compressors */ +static const decmpfs_registration *decompressors[CMP_MAX]; /* the registered compressors */ static lck_rw_t * decompressorsLock; static int decompress_channel; /* channel used by decompress_file to wake up waiters */ static lck_mtx_t *decompress_channel_mtx; @@ -210,16 +218,13 @@ vfs_context_t decmpfs_ctx; #pragma mark --- decmp_get_func --- -#define offsetof_func(func) ((uintptr_t)(&(((decmpfs_registration*)NULL)->func))) +#define offsetof_func(func) ((uintptr_t)offsetof(decmpfs_registration, func)) static void * -_func_from_offset(uint32_t type, uintptr_t offset) +_func_from_offset(uint32_t type, uintptr_t offset, uint32_t discriminator) { /* get the function at the given offset in the registration for the given type */ const decmpfs_registration *reg = decompressors[type]; - const char *regChar = (const char*)reg; - const char *func = ®Char[offset]; - void * const * funcPtr = (void * const *) func; switch (reg->decmpfs_registration) { case DECMPFS_REGISTRATION_VERSION_V1: @@ -236,7 +241,12 @@ _func_from_offset(uint32_t type, uintptr_t offset) return NULL; } - return funcPtr[0]; + void *ptr = *(void * const *)((const void *)reg + offset); + if (ptr != NULL) { + /* Resign as a function-in-void* */ + ptr = ptrauth_auth_and_resign(ptr, ptrauth_key_asia, discriminator, ptrauth_key_asia, 0); + } + return ptr; } extern void IOServicePublishResource( const char * property, boolean_t value ); @@ -244,7 +254,7 @@ extern boolean_t IOServiceWaitForMatchingResource( const char * property, uint64 extern boolean_t IOCatalogueMatchingDriversPresent( const char * property ); static void * -_decmp_get_func(vnode_t vp, uint32_t type, uintptr_t offset) +_decmp_get_func(vnode_t vp, uint32_t type, uintptr_t offset, uint32_t discriminator) { /* * this function should be called while holding a shared lock to decompressorsLock, @@ -257,7 +267,7 @@ _decmp_get_func(vnode_t vp, uint32_t type, uintptr_t offset) if (decompressors[type] != NULL) { // the compressor has already registered but the function might be null - return _func_from_offset(type, offset); + return _func_from_offset(type, offset, discriminator); } // does IOKit know about a kext that is supposed to provide this type? @@ -291,7 +301,7 @@ _decmp_get_func(vnode_t vp, uint32_t type, uintptr_t offset) return NULL; } // it's now registered, so let's return the function - return _func_from_offset(type, offset); + return _func_from_offset(type, offset, discriminator); } // the compressor hasn't registered, so it never will unless someone manually kextloads it @@ -299,7 +309,7 @@ _decmp_get_func(vnode_t vp, uint32_t type, uintptr_t offset) return NULL; } -#define decmp_get_func(vp, type, func) ((typeof(((decmpfs_registration*)NULL)->func))_decmp_get_func(vp, type, offsetof_func(func))) +#define decmp_get_func(vp, type, func) (typeof(decompressors[0]->func))_decmp_get_func(vp, type, offsetof_func(func), ptrauth_function_pointer_type_discriminator(typeof(decompressors[0]->func))) #pragma mark --- utilities --- @@ -322,18 +332,19 @@ vnsize(vnode_t vp, uint64_t *size) #pragma mark --- cnode routines --- +ZONE_DECLARE(decmpfs_cnode_zone, "decmpfs_cnode", + sizeof(struct decmpfs_cnode), ZC_NONE); + decmpfs_cnode * decmpfs_cnode_alloc(void) { - decmpfs_cnode *dp; - MALLOC_ZONE(dp, decmpfs_cnode *, sizeof(decmpfs_cnode), M_DECMPFS_CNODE, M_WAITOK); - return dp; + return zalloc(decmpfs_cnode_zone); } void decmpfs_cnode_free(decmpfs_cnode *dp) { - FREE_ZONE(dp, sizeof(*dp), M_DECMPFS_CNODE); + zfree(decmpfs_cnode_zone, dp); } void @@ -419,7 +430,7 @@ decmpfs_cnode_set_vnode_state(decmpfs_cnode *cp, uint32_t state, int skiplock) if (!skiplock) { decmpfs_lock_compressed_data(cp, 1); } - cp->cmp_state = state; + cp->cmp_state = (uint8_t)state; if (state == FILE_TYPE_UNKNOWN) { /* clear out the compression type too */ cp->cmp_type = 0; @@ -447,7 +458,7 @@ decmpfs_cnode_set_vnode_minimal_xattr(decmpfs_cnode *cp, int minimal_xattr, int if (!skiplock) { decmpfs_lock_compressed_data(cp, 1); } - cp->cmp_minimal_xattr = minimal_xattr; + cp->cmp_minimal_xattr = !!minimal_xattr; if (!skiplock) { decmpfs_unlock_compressed_data(cp, 1); } @@ -624,7 +635,7 @@ decmpfs_fetch_compressed_header(vnode_t vp, decmpfs_cnode *cp, decmpfs_header ** goto out; } hdr = (decmpfs_header*)data; - hdr->attr_size = attr_size; + hdr->attr_size = (uint32_t)attr_size; /* swap the fields to native endian */ hdr->compression_magic = OSSwapLittleToHostInt32(hdr->compression_magic); hdr->compression_type = OSSwapLittleToHostInt32(hdr->compression_type); @@ -1209,7 +1220,7 @@ decmpfs_fetch_uncompressed_data(vnode_t vp, decmpfs_cnode *cp, decmpfs_header *h *bytes_read = 0; - if ((uint64_t)offset >= hdr->uncompressed_size) { + if (offset >= (off_t)hdr->uncompressed_size) { /* reading past end of file; nothing to do */ err = 0; goto out; @@ -1219,9 +1230,9 @@ decmpfs_fetch_uncompressed_data(vnode_t vp, decmpfs_cnode *cp, decmpfs_header *h err = EINVAL; goto out; } - if ((uint64_t)(offset + size) > hdr->uncompressed_size) { + if (hdr->uncompressed_size - offset < size) { /* adjust size so we don't read past the end of the file */ - size = hdr->uncompressed_size - offset; + size = (user_ssize_t)(hdr->uncompressed_size - offset); } if (size == 0) { /* nothing to read */ @@ -1253,7 +1264,8 @@ decmpfs_fetch_uncompressed_data(vnode_t vp, decmpfs_cnode *cp, decmpfs_header *h #if !defined(__i386__) && !defined(__x86_64__) int i; for (i = 0; i < nvec; i++) { - flush_dcache64((addr64_t)(uintptr_t)vec[i].buf, vec[i].size, FALSE); + assert(vec[i].size >= 0 && vec[i].size <= UINT_MAX); + flush_dcache64((addr64_t)(uintptr_t)vec[i].buf, (unsigned int)vec[i].size, FALSE); } #endif } @@ -1289,13 +1301,13 @@ commit_upl(upl_t upl, upl_offset_t pl_offset, size_t uplSize, int flags, int abo /* commit the upl pages */ if (abort) { VerboseLog("aborting upl, flags 0x%08x\n", flags); - kr = ubc_upl_abort_range(upl, pl_offset, uplSize, flags); + kr = ubc_upl_abort_range(upl, pl_offset, (upl_size_t)uplSize, flags); if (kr != KERN_SUCCESS) { ErrorLog("ubc_upl_abort_range error %d\n", (int)kr); } } else { VerboseLog("committing upl, flags 0x%08x\n", flags | UPL_COMMIT_CLEAR_DIRTY); - kr = ubc_upl_commit_range(upl, pl_offset, uplSize, flags | UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_WRITTEN_BY_KERNEL); + kr = ubc_upl_commit_range(upl, pl_offset, (upl_size_t)uplSize, flags | UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_WRITTEN_BY_KERNEL); if (kr != KERN_SUCCESS) { ErrorLog("ubc_upl_commit_range error %d\n", (int)kr); } @@ -1318,10 +1330,14 @@ decmpfs_pagein_compressed(struct vnop_pagein_args *ap, int *is_compressed, decmp int flags = ap->a_flags; off_t uplPos = 0; user_ssize_t uplSize = 0; + size_t verify_block_size = 0; void *data = NULL; decmpfs_header *hdr = NULL; uint64_t cachedSize = 0; int cmpdata_locked = 0; + bool file_tail_page_valid = false; + int num_valid_pages = 0; + int num_invalid_pages = 0; if (!decmpfs_trylock_compressed_data(cp, 0)) { return EAGAIN; @@ -1346,6 +1362,29 @@ decmpfs_pagein_compressed(struct vnop_pagein_args *ap, int *is_compressed, decmp goto out; } + /* + * If the verify block size is larger than the page size, the UPL needs + * to be aligned to it, Since the UPL has been created by the filesystem, + * we will only check if the passed in UPL length conforms to the + * alignment requirements. + */ + err = VNOP_VERIFY(vp, f_offset, NULL, 0, &verify_block_size, + VNODE_VERIFY_DEFAULT, NULL); + if (err) { + goto out; + } else if (verify_block_size) { + if (verify_block_size & (verify_block_size - 1)) { + ErrorLogWithPath("verify block size is not power of 2, no verification will be done\n"); + err = EINVAL; + } else if (size % verify_block_size) { + ErrorLogWithPath("upl size is not a multiple of verify block size\n"); + err = EINVAL; + } + if (err) { + goto out; + } + } + #if CONFIG_IOSCHED /* Mark the UPL as the requesting UPL for decompression */ upl_mark_decmp(pl); @@ -1368,7 +1407,7 @@ decmpfs_pagein_compressed(struct vnop_pagein_args *ap, int *is_compressed, decmp /* clip the size to the size of the file */ if ((uint64_t)uplPos + uplSize > cachedSize) { /* truncate the read to the size of the file */ - uplSize = cachedSize - uplPos; + uplSize = (user_ssize_t)(cachedSize - uplPos); } /* do the fetch */ @@ -1376,8 +1415,10 @@ decmpfs_pagein_compressed(struct vnop_pagein_args *ap, int *is_compressed, decmp decompress: /* the mapped data pointer points to the first page of the page list, so we want to start filling in at an offset of pl_offset */ - vec.buf = (char*)data + pl_offset; - vec.size = size; + vec = (decmpfs_vector) { + .buf = (char*)data + pl_offset, + .size = size, + }; uint64_t did_read = 0; if (decmpfs_fast_get_state(cp) == FILE_IS_CONVERTING) { @@ -1387,9 +1428,136 @@ decompress: * pretend that it succeeded but don't do anything since we're just going to write over the pages anyway */ err = 0; - did_read = 0; } else { - err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, uplPos, uplSize, 1, &vec, &did_read); + if (!verify_block_size || (verify_block_size <= PAGE_SIZE)) { + err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, uplPos, uplSize, 1, &vec, &did_read); + } else { + off_t l_uplPos = uplPos; + off_t l_pl_offset = pl_offset; + user_ssize_t l_uplSize = uplSize; + upl_page_info_t *pl_info = ubc_upl_pageinfo(pl); + + err = 0; + /* + * When the system page size is less than the "verify block size", + * the UPL passed may not consist solely of absent pages. + * We have to detect the "absent" pages and only decompress + * into those absent/invalid page ranges. + * + * Things that will change in each iteration of the loop : + * + * l_pl_offset = where we are inside the UPL [0, caller_upl_created_size) + * l_uplPos = the file offset the l_pl_offset corresponds to. + * l_uplSize = the size of the upl still unprocessed; + * + * In this picture, we have to do the transfer on 2 ranges + * (One 2 page range and one 3 page range) and the loop + * below will skip the first two pages and then identify + * the next two as invalid and fill those in and + * then skip the next one and then do the last pages. + * + * uplPos(file_offset) + * | uplSize + * 0 V<--------------> file_size + * |---------------------------------------------------> + * | | |V|V|I|I|V|I|I|I| + * ^ + * | upl + * <-------------------> + * | + * pl_offset + * + * uplSize will be clipped in case the UPL range exceeds + * the file size. + * + */ + while (l_uplSize) { + uint64_t l_did_read = 0; + int pl_offset_pg = (int)(l_pl_offset / PAGE_SIZE); + int pages_left_in_upl; + int start_pg; + int last_pg; + + /* + * l_uplSize may start off less than the size of the upl, + * we have to round it up to PAGE_SIZE to calculate + * how many more pages are left. + */ + pages_left_in_upl = (int)(round_page((vm_offset_t)l_uplSize) / PAGE_SIZE); + + /* + * scan from the beginning of the upl looking for the first + * non-valid page.... this will become the first page in + * the request we're going to make to + * 'decmpfs_fetch_uncompressed_data'... if all + * of the pages are valid, we won't call through + * to 'decmpfs_fetch_uncompressed_data' + */ + for (start_pg = 0; start_pg < pages_left_in_upl; start_pg++) { + if (!upl_valid_page(pl_info, pl_offset_pg + start_pg)) { + break; + } + } + + num_valid_pages += start_pg; + + /* + * scan from the starting invalid page looking for + * a valid page before the end of the upl is + * reached, if we find one, then it will be the + * last page of the request to 'decmpfs_fetch_uncompressed_data' + */ + for (last_pg = start_pg; last_pg < pages_left_in_upl; last_pg++) { + if (upl_valid_page(pl_info, pl_offset_pg + last_pg)) { + break; + } + } + + if (start_pg < last_pg) { + off_t inval_offset = start_pg * PAGE_SIZE; + int inval_pages = last_pg - start_pg; + int inval_size = inval_pages * PAGE_SIZE; + decmpfs_vector l_vec; + + num_invalid_pages += inval_pages; + if (inval_offset) { + did_read += inval_offset; + l_pl_offset += inval_offset; + l_uplPos += inval_offset; + l_uplSize -= inval_offset; + } + + l_vec = (decmpfs_vector) { + .buf = (char*)data + l_pl_offset, + .size = inval_size, + }; + + err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, l_uplPos, + MIN(l_uplSize, inval_size), 1, &l_vec, &l_did_read); + + if (!err && (l_did_read != inval_size) && (l_uplSize > inval_size)) { + ErrorLogWithPath("Unexpected size fetch of decompressed data, l_uplSize = %d, l_did_read = %d, inval_size = %d\n", + (int)l_uplSize, (int)l_did_read, (int)inval_size); + err = EINVAL; + } + } else { + /* no invalid pages left */ + l_did_read = l_uplSize; + if (uplSize < size) { + file_tail_page_valid = true; + } + } + + if (err) { + break; + } + + did_read += l_did_read; + l_pl_offset += l_did_read; + l_uplPos += l_did_read; + l_uplSize -= l_did_read; + } + } } if (err) { DebugLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err); @@ -1412,8 +1580,19 @@ decompress: /* zero out whatever we didn't read, and zero out the end of the last page(s) */ uint64_t total_size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); - if (did_read < total_size) { - memset((char*)vec.buf + did_read, 0, total_size - did_read); + if (did_read < total_size && !(verify_block_size && err)) { + uint64_t rounded_up_did_read = file_tail_page_valid ? (uint64_t)(round_page((vm_offset_t)did_read)) : did_read; + memset((char*)vec.buf + rounded_up_did_read, 0, (size_t)(total_size - rounded_up_did_read)); + } + + if (!err && verify_block_size) { + size_t cur_verify_block_size = verify_block_size; + + if ((err = VNOP_VERIFY(vp, uplPos, vec.buf, size, &cur_verify_block_size, 0, NULL))) { + ErrorLogWithPath("Verification failed with error %d, uplPos = %lld, uplSize = %d, did_read = %d, total_size = %d, valid_pages = %d, invalid_pages = %d, tail_page_valid = %d\n", + err, (long long)uplPos, (int)uplSize, (int)did_read, (int)total_size, num_valid_pages, num_invalid_pages, file_tail_page_valid); + } + /* XXX : If the verify block size changes, redo the read */ } #if CONFIG_IOSCHED @@ -1426,7 +1605,7 @@ decompress: } else { if (!err) { /* commit our pages */ - kr = commit_upl(pl, pl_offset, total_size, UPL_COMMIT_FREE_ON_EMPTY, 0); + kr = commit_upl(pl, pl_offset, (size_t)total_size, UPL_COMMIT_FREE_ON_EMPTY, 0); } } @@ -1478,6 +1657,8 @@ decmpfs_read_compressed(struct vnop_read_args *ap, int *is_compressed, decmpfs_c uint64_t cachedSize = 0; off_t uioPos = 0; user_ssize_t uioRemaining = 0; + size_t verify_block_size = 0; + size_t alignment_size = PAGE_SIZE; int cmpdata_locked = 0; decmpfs_lock_compressed_data(cp, 0); cmpdata_locked = 1; @@ -1490,11 +1671,11 @@ decmpfs_read_compressed(struct vnop_read_args *ap, int *is_compressed, decmpfs_c if ((uint64_t)uplPos + uplSize > cachedSize) { /* truncate the read to the size of the file */ - uplSize = cachedSize - uplPos; + uplSize = (user_ssize_t)(cachedSize - uplPos); } /* give the cluster layer a chance to fill in whatever it already has */ - countInt = (uplSize > INT_MAX) ? INT_MAX : uplSize; + countInt = (uplSize > INT_MAX) ? INT_MAX : (int)uplSize; err = cluster_copy_ubc_data(vp, uio, &countInt, 0); if (err != 0) { goto out; @@ -1505,7 +1686,7 @@ decmpfs_read_compressed(struct vnop_read_args *ap, int *is_compressed, decmpfs_c uioRemaining = uio_resid(uio); if ((uint64_t)uioPos + uioRemaining > cachedSize) { /* truncate the read to the size of the file */ - uioRemaining = cachedSize - uioPos; + uioRemaining = (user_ssize_t)(cachedSize - uioPos); } if (uioRemaining <= 0) { @@ -1540,7 +1721,7 @@ decmpfs_read_compressed(struct vnop_read_args *ap, int *is_compressed, decmpfs_c /* clip the adjusted size to the size of the file */ if ((uint64_t)uplPos + uplSize > cachedSize) { /* truncate the read to the size of the file */ - uplSize = cachedSize - uplPos; + uplSize = (user_ssize_t)(cachedSize - uplPos); } if (uplSize <= 0) { @@ -1553,13 +1734,27 @@ decmpfs_read_compressed(struct vnop_read_args *ap, int *is_compressed, decmpfs_c * make sure we're on page boundaries */ - if (uplPos & (PAGE_SIZE - 1)) { + /* If the verify block size is larger than the page size, the UPL needs to aligned to it */ + err = VNOP_VERIFY(vp, uplPos, NULL, 0, &verify_block_size, VNODE_VERIFY_DEFAULT, NULL); + if (err) { + goto out; + } else if (verify_block_size) { + if (verify_block_size & (verify_block_size - 1)) { + ErrorLogWithPath("verify block size is not power of 2, no verification will be done\n"); + verify_block_size = 0; + } else if (verify_block_size > PAGE_SIZE) { + alignment_size = verify_block_size; + } + } + + if (uplPos & (alignment_size - 1)) { /* round position down to page boundary */ - uplSize += (uplPos & (PAGE_SIZE - 1)); - uplPos &= ~(PAGE_SIZE - 1); + uplSize += (uplPos & (alignment_size - 1)); + uplPos &= ~(alignment_size - 1); } - /* round size up to page multiple */ - uplSize = (uplSize + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); + + /* round size up to alignement_size multiple */ + uplSize = (uplSize + (alignment_size - 1)) & ~(alignment_size - 1); VerboseLogWithPath("new uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); @@ -1578,7 +1773,7 @@ decmpfs_read_compressed(struct vnop_read_args *ap, int *is_compressed, decmpfs_c } /* create the upl */ - kr = ubc_create_upl_kernel(vp, curUplPos, curUplSize, &upl, &pli, UPL_SET_LITE, VM_KERN_MEMORY_FILE); + kr = ubc_create_upl_kernel(vp, curUplPos, (int)curUplSize, &upl, &pli, UPL_SET_LITE, VM_KERN_MEMORY_FILE); if (kr != KERN_SUCCESS) { ErrorLogWithPath("ubc_create_upl error %d\n", (int)kr); err = EINVAL; @@ -1644,8 +1839,19 @@ decompress: kr = KERN_FAILURE; did_read = 0; } + /* zero out the remainder of the last page */ - memset((char*)data + did_read, 0, curUplSize - did_read); + memset((char*)data + did_read, 0, (size_t)(curUplSize - did_read)); + if (!err && verify_block_size) { + size_t cur_verify_block_size = verify_block_size; + + if ((err = VNOP_VERIFY(vp, curUplPos, data, curUplSize, &cur_verify_block_size, 0, NULL))) { + ErrorLogWithPath("Verification failed with error %d\n", err); + abort_read = 1; + } + /* XXX : If the verify block size changes, redo the read */ + } + kr = ubc_upl_unmap(upl); if (kr == KERN_SUCCESS) { if (abort_read) { @@ -1657,6 +1863,9 @@ decompress: if (uplOff < 0) { ErrorLogWithPath("uplOff %lld should never be negative\n", (int64_t)uplOff); err = EINVAL; + } else if (uplOff > INT_MAX) { + ErrorLogWithPath("uplOff %lld too large\n", (int64_t)uplOff); + err = EINVAL; } else { off_t count = curUplPos + curUplSize - uioPos; if (count < 0) { @@ -1665,9 +1874,10 @@ decompress: if (count > uioRemaining) { count = uioRemaining; } - int io_resid = count; - err = cluster_copy_upl_data(uio, upl, uplOff, &io_resid); - int copied = count - io_resid; + int icount = (count > INT_MAX) ? INT_MAX : (int)count; + int io_resid = icount; + err = cluster_copy_upl_data(uio, upl, (int)uplOff, &io_resid); + int copied = icount - io_resid; VerboseLogWithPath("uplOff %lld count %lld copied %lld\n", (uint64_t)uplOff, (uint64_t)count, (uint64_t)copied); if (err) { ErrorLogWithPath("cluster_copy_upl_data err %d\n", err); @@ -1806,7 +2016,7 @@ decmpfs_decompress_file(vnode_t vp, decmpfs_cnode *cp, off_t toSize, int truncat uint32_t old_state = 0; uint32_t new_state = 0; int update_file_state = 0; - int allocSize = 0; + size_t allocSize = 0; decmpfs_header *hdr = NULL; int cmpdata_locked = 0; off_t remaining = 0; @@ -1885,7 +2095,7 @@ decompress: toSize = hdr->uncompressed_size; } - allocSize = MIN(64 * 1024, toSize); + allocSize = MIN(64 * 1024, (size_t)toSize); MALLOC(data, char *, allocSize, M_TEMP, M_WAITOK); if (!data) { err = ENOMEM; @@ -1911,7 +2121,7 @@ decompress: /* loop decompressing data from the file and writing it into the data fork */ uint64_t bytes_read = 0; - decmpfs_vector vec = { .buf = data, .size = MIN(allocSize, remaining) }; + decmpfs_vector vec = { .buf = data, .size = (user_ssize_t)MIN(allocSize, remaining) }; err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, offset, vec.size, 1, &vec, &bytes_read); if (err != 0) { ErrorLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err); @@ -1924,7 +2134,7 @@ decompress: } uio_reset(uio_w, offset, UIO_SYSSPACE, UIO_WRITE); - err = uio_addiov(uio_w, CAST_USER_ADDR_T(data), bytes_read); + err = uio_addiov(uio_w, CAST_USER_ADDR_T(data), (user_size_t)bytes_read); if (err != 0) { ErrorLogWithPath("uio_addiov err %d\n", err); err = ENOMEM; diff --git a/bsd/kern/imageboot.c b/bsd/kern/imageboot.c index 207d1fe0e..36a275c68 100644 --- a/bsd/kern/imageboot.c +++ b/bsd/kern/imageboot.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2006-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -46,10 +47,11 @@ #include #include #include +#include #if CONFIG_IMAGEBOOT_IMG4 #include -#include +#include #endif #include @@ -79,15 +81,17 @@ typedef struct _locker_mount_args { #define AUTHDBG(fmt, args...) do { printf("%s: " fmt "\n", __func__, ##args); } while (0) #define AUTHPRNT(fmt, args...) do { printf("%s: " fmt "\n", __func__, ##args); } while (0) -#define kfree_safe(x) do { if ((x)) { kfree_addr((x)); (x) = NULL; } } while (0) +#define kheap_free_safe(h, x, l) do { if ((x)) { kheap_free(h, x, l); (x) = NULL; } } while (0) +extern int di_root_image_ext(const char *path, char *devname, size_t devsz, dev_t *dev_p, bool removable); extern int di_root_image(const char *path, char *devname, size_t devsz, dev_t *dev_p); extern int di_root_ramfile_buf(void *buf, size_t bufsz, char *devname, size_t devsz, dev_t *dev_p); static boolean_t imageboot_setup_new(imageboot_type_t type); -vnode_t imgboot_get_image_file(const char *path, off_t *fsize, int *errp); /* may be required by chunklist.c */ -int read_file(const char *path, void **bufp, size_t *bufszp); /* may be required by chunklist.c */ +void *ubc_getobject_from_filename(const char *filename, struct vnode **vpp, off_t *file_size); + +extern lck_rw_t * rootvnode_rw_lock; #define kIBFilePrefix "file://" @@ -106,27 +110,25 @@ vnode_get_and_drop_always(vnode_t vp) vnode_put(vp); } -__private_extern__ imageboot_type_t -imageboot_needed(void) +__private_extern__ bool +imageboot_desired(void) { - imageboot_type_t result = IMAGEBOOT_NONE; - char *root_path = NULL; - - DBG_TRACE("%s: checking for presence of root path\n", __FUNCTION__); - - MALLOC_ZONE(root_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (root_path == NULL) { - panic("%s: M_NAMEI zone exhausted", __FUNCTION__); - } - -#if CONFIG_LOCKERBOOT - if (PE_parse_boot_argn(IMAGEBOOT_LOCKER_ARG, root_path, MAXPATHLEN)) { - result = IMAGEBOOT_LOCKER; - goto out; - } -#endif + bool do_imageboot = false; - /* Check for first layer */ + char *root_path = NULL; + root_path = zalloc(ZV_NAMEI); + /* + * Check for first layer DMG rooting. + * + * Note that here we are principally concerned with whether or not we + * SHOULD try to imageboot, not whether or not we are going to be able to. + * + * If NONE of the boot-args are present, then assume that image-rooting + * is not requested. + * + * [!! Note parens guard the entire logically OR'd set of statements, below. It validates + * that NONE of the below-mentioned boot-args is present...!!] + */ if (!(PE_parse_boot_argn("rp0", root_path, MAXPATHLEN) || #if CONFIG_IMAGEBOOT_IMG4 PE_parse_boot_argn("arp0", root_path, MAXPATHLEN) || @@ -134,16 +136,37 @@ imageboot_needed(void) PE_parse_boot_argn("rp", root_path, MAXPATHLEN) || PE_parse_boot_argn(IMAGEBOOT_ROOT_ARG, root_path, MAXPATHLEN) || PE_parse_boot_argn(IMAGEBOOT_AUTHROOT_ARG, root_path, MAXPATHLEN))) { - goto out; + /* explicitly set to false */ + do_imageboot = false; + } else { + /* now sanity check the file-path format */ + if (imageboot_format_is_valid(root_path)) { + DBG_TRACE("%s: Found %s\n", __FUNCTION__, root_path); + /* root_path looks good and we have one of the aforementioned bootargs */ + do_imageboot = true; + } else { + /* explicitly set to false */ + do_imageboot = false; + } } - /* Sanity-check first layer */ - if (imageboot_format_is_valid(root_path)) { - DBG_TRACE("%s: Found %s\n", __FUNCTION__, root_path); - } else { + zfree(ZV_NAMEI, root_path); + return do_imageboot; +} + +__private_extern__ imageboot_type_t +imageboot_needed(void) +{ + imageboot_type_t result = IMAGEBOOT_NONE; + char *root_path = NULL; + + DBG_TRACE("%s: checking for presence of root path\n", __FUNCTION__); + + if (!imageboot_desired()) { goto out; } + root_path = zalloc(ZV_NAMEI); result = IMAGEBOOT_DMG; /* Check for second layer */ @@ -161,11 +184,235 @@ imageboot_needed(void) } out: - FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI); - + if (root_path != NULL) { + zfree(ZV_NAMEI, root_path); + } return result; } +extern bool IOBaseSystemARVRootHashAvailable(void); + + +/* + * Mounts new filesystem based on image path, and pivots it to the root. + * The image to be mounted is located at image_path. + * It will be mounted at mount_path. + * The vfs_switch_root operation will be performed. + * After the pivot, the outgoing root filesystem (the filesystem at root when + * this function begins) will be at outgoing_root_path. If `rooted_dmg` is true, + * then ignore then chunklisted or authAPFS checks on this image + */ +__private_extern__ int +imageboot_pivot_image(const char *image_path, imageboot_type_t type, const char *mount_path, + const char *outgoing_root_path, const bool rooted_dmg) +{ + int error; + boolean_t authenticated_dmg_chunklist = false; + vnode_t mount_vp = NULLVP; + errno_t rootauth; + + + if (type != IMAGEBOOT_DMG) { + panic("not supported"); + } + + /* + * Check that the image file actually exists. + * We also need to find the mount it's on, to mark it as backing the + * root. + */ + vnode_t imagevp = NULLVP; + error = vnode_lookup(image_path, 0, &imagevp, vfs_context_kernel()); + if (error) { + printf("%s: image file not found or couldn't be read: %d\n", __FUNCTION__, error); + /* + * bail out here to short-circuit out of panic logic below. + * Failure to find the pivot-image should not be a fatal condition (ENOENT) + * since it may result in natural consequences (ergo, cannot unlock filevault prompt). + */ + return error; + } + + /* + * load the disk image and obtain its device. + * di_root_image's name and the names of its arguments suggest it has + * to be mounted at the root, but that's not actually needed. + * We just need to obtain the device info. + */ + + dev_t dev; + char devname[DEVMAXNAMESIZE]; + + error = di_root_image_ext(image_path, devname, DEVMAXNAMESIZE, &dev, true); + if (error) { + panic("%s: di_root_image failed: %d\n", __FUNCTION__, error); + } + + printf("%s: attached disk image %s as %s\n", __FUNCTION__, image_path, devname); + + +#if CONFIG_IMAGEBOOT_CHUNKLIST + if ((rooted_dmg == false) && !IOBaseSystemARVRootHashAvailable()) { + error = authenticate_root_with_chunklist(image_path, NULL); + if (error == 0) { + printf("authenticated root-dmg via chunklist...\n"); + authenticated_dmg_chunklist = true; + } else { + /* root hash was not available, and image is NOT chunklisted? */ + printf("failed to chunklist-authenticate root-dmg @ %s\n", image_path); + } + } +#endif + + char fulldevname[DEVMAXNAMESIZE + 5]; // "/dev/" + strlcpy(fulldevname, "/dev/", sizeof(fulldevname)); + strlcat(fulldevname, devname, sizeof(fulldevname)); + + /* + * mount expects another layer of indirection (because it expects to + * be getting a user_addr_t of a char *. + * Make a pointer-to-pointer on our stack. It won't use this + * address after it returns so this should be safe. + */ + char *fulldevnamep = &(fulldevname[0]); + char **fulldevnamepp = &fulldevnamep; + +#define PIVOTMNT "/System/Volumes/BaseSystem" + + + /* Attempt to mount as HFS; if it fails, then try as APFS */ + printf("%s: attempting to mount as hfs...\n", __FUNCTION__); + error = kernel_mount("hfs", NULLVP, NULLVP, PIVOTMNT, fulldevnamepp, 0, (MNT_RDONLY | MNT_DONTBROWSE), (KERNEL_MOUNT_NOAUTH | KERNEL_MOUNT_BASESYSTEMROOT), vfs_context_kernel()); + if (error) { + printf("mount failed: %d\n", error); + printf("%s: attempting to mount as apfs...\n", __FUNCTION__); + error = kernel_mount("apfs", NULLVP, NULLVP, PIVOTMNT, fulldevnamepp, 0, (MNT_RDONLY | MNT_DONTBROWSE), (KERNEL_MOUNT_NOAUTH | KERNEL_MOUNT_BASESYSTEMROOT), vfs_context_kernel()); + } + + /* If we didn't mount as either HFS or APFS, then bail out */ + if (error) { + /* + * Note that for this particular failure case (failure to mount), the disk image + * being attached may have failed to quiesce within the alloted time out (20-30 sec). + * For example, it may be still probing, or APFS container enumeration may have not + * completed. If so, then we may have fallen into this particular error case. However, + * failure to complete matching should be an exceptional case as 30 sec. is quite a + * long time to wait for matching to complete (which would have occurred in + * di_root_image_ext). + */ +#if defined(__arm64__) && XNU_TARGET_OS_OSX + panic("%s: failed to mount pivot image(%d)!", __FUNCTION__, error); +#endif + printf("%s: failed to mount pivot image(%d) !", __FUNCTION__, error); + goto done; + } + + /* otherwise, if the mount succeeded, then assert that the DMG is authenticated (either chunklist or authapfs) */ + error = vnode_lookup(PIVOTMNT, 0, &mount_vp, vfs_context_kernel()); + if (error) { +#if defined(__arm64__) && XNU_TARGET_OS_OSX + panic("%s: failed to lookup pivot root (%d) !", __FUNCTION__, error); +#endif + printf("%s: failed to lookup pivot root (%d)!", __FUNCTION__, error); + goto done; + } + + /* the 0x1 implies base system */ + rootauth = VNOP_IOCTL(mount_vp, FSIOC_KERNEL_ROOTAUTH, (caddr_t)0x1, 0, vfs_context_kernel()); + if (rootauth) { + printf("BS-DMG failed to authenticate intra-FS \n"); + /* + * If we are using a custom rooted DMG, or if we have already authenticated + * the DMG via chunklist, then it is permissible to use. + */ + if (rooted_dmg || authenticated_dmg_chunklist) { + rootauth = 0; + } + error = rootauth; + } + vnode_put(mount_vp); + mount_vp = NULLVP; + + if (error) { + /* + * Failure here exclusively means that the mount failed to authenticate. + * This means that the disk image either was not sealed (authapfs), or it was + * not hosted on a chunklisted DMG. Both scenarios may be fatal depending + * on the platform. + */ +#if defined(__arm64__) && XNU_TARGET_OS_OSX + panic("%s: could not authenticate the pivot image: %d. giving up.\n", __FUNCTION__, error); +#endif + printf("%s: could not authenticate the pivot image: %d. giving up.\n", __FUNCTION__, error); + goto done; + } + + if (rootvnode) { + mount_t root_mp = vnode_mount(rootvnode); + if (root_mp && (root_mp->mnt_kern_flag & MNTK_SSD)) { + rootvp_is_ssd = true; + } + } + /* + * pivot the incoming and outgoing filesystems + */ + error = vfs_switch_root(mount_path, outgoing_root_path, 0); + if (error) { + panic("%s: vfs_switch_root failed: %d\n", __FUNCTION__, error); + } + + /* + * Mark the filesystem containing the image as backing root, so it + * won't be unmountable. + * + * vfs_switch_root() clears this flag, so we have to set it after + * the pivot call. + * If the system later pivots out of the image, vfs_switch_root + * will clear it again, so the backing filesystem can be unmounted. + */ + mount_t imagemp = imagevp->v_mount; + lck_rw_lock_exclusive(&imagemp->mnt_rwlock); + imagemp->mnt_kern_flag |= MNTK_BACKS_ROOT; + lck_rw_done(&imagemp->mnt_rwlock); + + error = 0; + + /* + * Note that we do NOT change kern.bootuuid here - + * imageboot_mount_image() does, but imageboot_pivot_image() doesn't. + * imageboot_mount_image() is used when the root volume uuid was + * "always supposed to be" the one inside the dmg. imageboot_pivot_ + * image() is used when the true root volume just needs to be + * obscured for a moment by the dmg. + */ + +done: + if (imagevp != NULLVP) { + vnode_put(imagevp); + } + return error; +} + +/* kern_sysctl.c */ +extern uuid_string_t fake_bootuuid; + +static void +set_fake_bootuuid(mount_t mp) +{ + struct vfs_attr va; + VFSATTR_INIT(&va); + VFSATTR_WANTED(&va, f_uuid); + + if (vfs_getattr(mp, &va, vfs_context_current()) != 0) { + return; + } + + if (!VFSATTR_IS_SUPPORTED(&va, f_uuid)) { + return; + } + + uuid_unparse(va.f_uuid, fake_bootuuid); +} /* * Swaps in new root filesystem based on image path. @@ -173,6 +420,8 @@ out: * tagged MNTK_BACKS_ROOT, MNT_ROOTFS is cleared on it, and * "rootvnode" is reset. Root vnode of currentroot filesystem * is returned with usecount (no iocount). + * kern.bootuuid is arranged to return the UUID of the mounted image. (If + * we did nothing here, it would be the UUID of the image source volume.) */ __private_extern__ int imageboot_mount_image(const char *root_path, int height, imageboot_type_t type) @@ -207,7 +456,8 @@ imageboot_mount_image(const char *root_path, int height, imageboot_type_t type) } #if CONFIG_LOCKERBOOT else if (type == IMAGEBOOT_LOCKER) { - locker_mount_args_t *mntargs = kalloc(sizeof(*mntargs)); + locker_mount_args_t *mntargs = kheap_alloc(KHEAP_TEMP, + sizeof(*mntargs), Z_WAITOK); if (!mntargs) { panic("could not alloc mount args"); } @@ -221,7 +471,7 @@ imageboot_mount_image(const char *root_path, int height, imageboot_type_t type) if (error) { panic("failed to mount locker: %d", error); } - kfree(mntargs, sizeof(*mntargs)); + kheap_free(KHEAP_TEMP, mntargs, sizeof(*mntargs)); /* Clear the old mount association. */ old_rootvnode->v_mountedhere = NULL; @@ -247,16 +497,19 @@ imageboot_mount_image(const char *root_path, int height, imageboot_type_t type) mount_list_remove(old_rootfs); mount_lock(old_rootfs); -#ifdef CONFIG_IMGSRC_ACCESS old_rootfs->mnt_kern_flag |= MNTK_BACKS_ROOT; -#endif /* CONFIG_IMGSRC_ACCESS */ old_rootfs->mnt_flag &= ~MNT_ROOTFS; mount_unlock(old_rootfs); } + vnode_ref(newdp); + vnode_put(newdp); + + lck_rw_lock_exclusive(rootvnode_rw_lock); /* switch to the new rootvnode */ if (update_rootvnode) { rootvnode = newdp; + set_fake_bootuuid(rootvnode->v_mount); } new_rootfs = rootvnode->v_mount; @@ -264,9 +517,9 @@ imageboot_mount_image(const char *root_path, int height, imageboot_type_t type) new_rootfs->mnt_flag |= MNT_ROOTFS; mount_unlock(new_rootfs); - vnode_ref(newdp); - vnode_put(newdp); filedesc0.fd_cdir = newdp; + lck_rw_unlock_exclusive(rootvnode_rw_lock); + DBG_TRACE("%s: root switched\n", __FUNCTION__); if (old_rootvnode != NULL) { @@ -284,8 +537,53 @@ imageboot_mount_image(const char *root_path, int height, imageboot_type_t type) return 0; } +/* + * Return a memory object for given file path. + * Also returns a vnode reference for the given file path. + */ +void * +ubc_getobject_from_filename(const char *filename, struct vnode **vpp, off_t *file_size) +{ + int err = 0; + struct nameidata ndp = {}; + struct vnode *vp = NULL; + off_t fsize = 0; + vfs_context_t ctx = vfs_context_kernel(); + void *control = NULL; + + NDINIT(&ndp, LOOKUP, OP_OPEN, LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(filename), ctx); + if ((err = namei(&ndp)) != 0) { + goto errorout; + } + nameidone(&ndp); + vp = ndp.ni_vp; + + if ((err = vnode_size(vp, &fsize, ctx)) != 0) { + goto errorout; + } + + if (fsize < 0) { + goto errorout; + } + + control = ubc_getobject(vp, UBC_FLAGS_NONE); + if (control == NULL) { + goto errorout; + } + + *file_size = fsize; + *vpp = vp; + vp = NULL; + +errorout: + if (vp) { + vnode_put(vp); + } + return control; +} + int -read_file(const char *path, void **bufp, size_t *bufszp) +imageboot_read_file_from_offset(kalloc_heap_t kheap, const char *path, off_t offset, void **bufp, size_t *bufszp) { int err = 0; struct nameidata ndp = {}; @@ -326,20 +624,22 @@ read_file(const char *path, void **bufp, size_t *bufszp) fsize = *bufszp; } - buf = kalloc(fsize); + fsize = (off_t)MIN((size_t)fsize, INT_MAX); + + buf = kheap_alloc(kheap, (size_t)fsize, Z_WAITOK); if (buf == NULL) { err = ENOMEM; goto out; } - if ((err = vn_rdwr(UIO_READ, vp, (caddr_t)buf, fsize, 0, UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) { - AUTHPRNT("Cannot read %d bytes from %s - %d", (int)fsize, path, err); + if ((err = vn_rdwr(UIO_READ, vp, (caddr_t)buf, (int)fsize, offset, UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) { + AUTHPRNT("Cannot read %d bytes at offset %d from %s - %d", (int)fsize, (int)offset, path, err); goto out; } if (resid) { /* didnt get everything we wanted */ - AUTHPRNT("Short read of %d bytes from %s - %d", (int)fsize, path, resid); + AUTHPRNT("Short read of %d bytes at offset %d from %s - %d", (int)fsize, (int)offset, path, resid); err = EINVAL; goto out; } @@ -354,15 +654,21 @@ out: } if (err) { - kfree_safe(buf); + kheap_free_safe(kheap, buf, (size_t)fsize); } else { *bufp = buf; - *bufszp = fsize; + *bufszp = (size_t)fsize; } return err; } +int +imageboot_read_file(kalloc_heap_t kheap, const char *path, void **bufp, size_t *bufszp) +{ + return imageboot_read_file_from_offset(kheap, path, 0, bufp, bufszp); +} + #if CONFIG_IMAGEBOOT_IMG4 || CONFIG_IMAGEBOOT_CHUNKLIST vnode_t imgboot_get_image_file(const char *path, off_t *fsize, int *errp) @@ -402,10 +708,10 @@ imgboot_get_image_file(const char *path, off_t *fsize, int *errp) #define APTICKET_NAME "apticket.der" static char * -imgboot_get_apticket_path(const char *rootpath) +imgboot_get_apticket_path(const char *rootpath, size_t *sz) { - size_t plen = strlen(rootpath) + sizeof(APTICKET_NAME); - char *path = kalloc(plen); + size_t plen = strlen(rootpath) + sizeof(APTICKET_NAME) + 1; + char *path = kheap_alloc(KHEAP_TEMP, plen, Z_WAITOK); if (path) { char *slash; @@ -419,6 +725,8 @@ imgboot_get_apticket_path(const char *rootpath) } strlcpy(slash, APTICKET_NAME, sizeof(APTICKET_NAME) + 1); } + + *sz = plen; return path; } @@ -426,12 +734,18 @@ static int authenticate_root_with_img4(const char *rootpath) { errno_t rv; - img4_t i4; - img4_payload_t i4pl; vnode_t vp; + size_t ticket_pathsz = 0; char *ticket_path; - size_t tcksz = 0; - void *tckbuf = NULL; + img4_buff_t tck = IMG4_BUFF_INIT; + img4_firmware_execution_context_t exec = { + .i4fex_version = IMG4_FIRMWARE_EXECUTION_CONTEXT_STRUCT_VERSION, + .i4fex_execute = NULL, + .i4fex_context = NULL, + }; + img4_firmware_t fw = NULL; + img4_firmware_flags_t fw_flags = IMG4_FIRMWARE_FLAG_BARE | + IMG4_FIRMWARE_FLAG_SUBSEQUENT_STAGE; DBG_TRACE("Check %s\n", rootpath); @@ -440,50 +754,44 @@ authenticate_root_with_img4(const char *rootpath) return EAGAIN; } - ticket_path = imgboot_get_apticket_path(rootpath); + ticket_path = imgboot_get_apticket_path(rootpath, &ticket_pathsz); if (ticket_path == NULL) { AUTHPRNT("Cannot construct ticket path - out of memory"); return ENOMEM; } - rv = read_file(ticket_path, &tckbuf, &tcksz); + rv = imageboot_read_file(KHEAP_TEMP, ticket_path, (void **)&tck.i4b_bytes, &tck.i4b_len); if (rv) { AUTHPRNT("Cannot get a ticket from %s - %d\n", ticket_path, rv); goto out_with_ticket_path; } - DBG_TRACE("Got %d bytes of manifest from %s\n", (int)tcksz, ticket_path); - - rv = img4_init(&i4, 0, tckbuf, tcksz, NULL); - if (rv) { - AUTHPRNT("Cannot initialise verification handle - error %d", rv); - goto out_with_ticket_bytes; - } + DBG_TRACE("Got %lu bytes of manifest from %s\n", tck.i4b_len, ticket_path); vp = imgboot_get_image_file(rootpath, NULL, &rv); if (vp == NULL) { /* Error message had been printed already */ - goto out; + rv = EIO; + goto out_with_ticket_bytes; } - rv = img4_payload_init_with_vnode_4xnu(&i4pl, 'rosi', vp, I4PLF_UNWRAPPED); - if (rv) { - AUTHPRNT("failed to init payload: %d", rv); - goto out; + fw = img4_firmware_new_from_vnode_4xnu(IMG4_RUNTIME_DEFAULT, &exec, 'rosi', + vp, fw_flags); + if (!fw) { + AUTHPRNT("Could not allocate new firmware"); + rv = ENOMEM; + goto out_with_ticket_bytes; } - rv = img4_get_trusted_external_payload(&i4, &i4pl, IMG4_ENVIRONMENT_PPL, NULL, NULL); - if (rv) { - AUTHPRNT("failed to validate root image %s: %d", rootpath, rv); - } + img4_firmware_attach_manifest(fw, &tck); + rv = img4_firmware_evaluate(fw, img4_chip_select_personalized_ap(), NULL); - img4_payload_destroy(&i4pl); -out: - img4_destroy(&i4); out_with_ticket_bytes: - kfree_safe(tckbuf); + kheap_free_safe(KHEAP_TEMP, tck.i4b_bytes, tck.i4b_len); out_with_ticket_path: - kfree_safe(ticket_path); + kheap_free_safe(KHEAP_TEMP, ticket_path, ticket_pathsz); + + img4_firmware_destroy(&fw); return rv; } #endif /* CONFIG_IMAGEBOOT_IMG4 */ @@ -501,12 +809,13 @@ imageboot_mount_ramdisk(const char *path) void *buf = NULL; dev_t dev; vnode_t newdp; + vnode_t tvp; mount_t new_rootfs; /* Read our target image from disk */ - err = read_file(path, &buf, &bufsz); + err = imageboot_read_file(KHEAP_DATA_BUFFERS, path, &buf, &bufsz); if (err) { - printf("%s: failed: read_file() = %d\n", __func__, err); + printf("%s: failed: imageboot_read_file() = %d\n", __func__, err); goto out; } DBG_TRACE("%s: read '%s' sz = %lu\n", __func__, path, bufsz); @@ -534,10 +843,16 @@ imageboot_mount_ramdisk(const char *path) #endif /* ... and unmount everything */ - vnode_get_and_drop_always(rootvnode); + vfs_unmountall(); + + lck_rw_lock_exclusive(rootvnode_rw_lock); filedesc0.fd_cdir = NULL; + tvp = rootvnode; rootvnode = NULL; - vfs_unmountall(); + rootvp = NULLVP; + rootdev = NODEV; + lck_rw_unlock_exclusive(rootvnode_rw_lock); + vnode_get_and_drop_always(tvp); /* Attach the ramfs image ... */ err = di_root_ramfile_buf(buf, bufsz, rootdevice, DEVMAXNAMESIZE, &dev); @@ -559,6 +874,9 @@ imageboot_mount_ramdisk(const char *path) if (VFS_ROOT(TAILQ_LAST(&mountlist, mntlist), &newdp, vfs_context_kernel())) { panic("%s: cannot find root vnode", __func__); } + vnode_ref(newdp); + + lck_rw_lock_exclusive(rootvnode_rw_lock); rootvnode = newdp; rootvnode->v_flag |= VROOT; new_rootfs = rootvnode->v_mount; @@ -566,15 +884,18 @@ imageboot_mount_ramdisk(const char *path) new_rootfs->mnt_flag |= MNT_ROOTFS; mount_unlock(new_rootfs); - vnode_ref(newdp); - vnode_put(newdp); + set_fake_bootuuid(new_rootfs); + filedesc0.fd_cdir = newdp; + lck_rw_unlock_exclusive(rootvnode_rw_lock); + + vnode_put(newdp); DBG_TRACE("%s: root switched\n", __func__); out: if (err) { - kfree_safe(buf); + kheap_free_safe(KHEAP_DATA_BUFFERS, buf, bufsz); } return err; } @@ -586,7 +907,7 @@ out: * Caller is expected to check if the pointers are different. */ static char * -url_to_path(char *url_path) +url_to_path(char *url_path, size_t *sz) { char *path = url_path; size_t len = strlen(kIBFilePrefix); @@ -598,12 +919,13 @@ url_to_path(char *url_path) len = strlen(url_path); if (len) { /* Make a copy of the path to URL-decode */ - path = kalloc(len + 1); + path = kheap_alloc(KHEAP_TEMP, len + 1, Z_WAITOK); if (path == NULL) { panic("imageboot path allocation failed - cannot allocate %d bytes\n", (int)len); } strlcpy(path, url_path, len + 1); + *sz = len + 1; url_decode(path); } else { panic("Bogus imageboot path URL - missing path\n"); @@ -625,7 +947,7 @@ imageboot_setup_new(imageboot_type_t type) boolean_t auth_root = TRUE; boolean_t ramdisk_root = FALSE; - MALLOC_ZONE(root_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); + root_path = zalloc(ZV_NAMEI); assert(root_path != NULL); #if CONFIG_LOCKERBOOT @@ -675,21 +997,20 @@ imageboot_setup_new(imageboot_type_t type) printf("%s: root image URL is '%s'\n", __func__, root_path); -#if CONFIG_CSR - if (auth_root && (csr_check(CSR_ALLOW_ANY_RECOVERY_OS) == 0)) { - AUTHPRNT("CSR_ALLOW_ANY_RECOVERY_OS set, skipping root image authentication"); - auth_root = FALSE; - } -#endif - /* Make a copy of the path to URL-decode */ - char *path = url_to_path(root_path); + size_t pathsz; + char *path = url_to_path(root_path, &pathsz); assert(path); #if CONFIG_IMAGEBOOT_CHUNKLIST if (auth_root) { + /* + * This updates auth_root to reflect whether chunklist was + * actually enforced. In effect, this clears auth_root if + * CSR_ALLOW_ANY_RECOVERY_OS allowed an invalid image. + */ AUTHDBG("authenticating root image at %s", path); - error = authenticate_root_with_chunklist(path); + error = authenticate_root_with_chunklist(path, &auth_root); if (error) { panic("root image authentication failed (err = %d)\n", error); } @@ -704,7 +1025,7 @@ imageboot_setup_new(imageboot_type_t type) } if (path != root_path) { - kfree_safe(path); + kheap_free_safe(KHEAP_TEMP, path, pathsz); } if (error) { @@ -728,7 +1049,7 @@ imageboot_setup_new(imageboot_type_t type) done = TRUE; out: - FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, root_path); return done; } @@ -756,7 +1077,7 @@ imageboot_setup(imageboot_type_t type) return; } - MALLOC_ZONE(root_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); + root_path = zalloc(ZV_NAMEI); assert(root_path != NULL); /* @@ -768,7 +1089,8 @@ imageboot_setup(imageboot_type_t type) */ #if CONFIG_IMAGEBOOT_IMG4 if (PE_parse_boot_argn("arp0", root_path, MAXPATHLEN)) { - char *path = url_to_path(root_path); + size_t pathsz; + char *path = url_to_path(root_path, &pathsz); assert(path); @@ -776,7 +1098,7 @@ imageboot_setup(imageboot_type_t type) panic("Root image %s does not match the manifest\n", root_path); } if (path != root_path) { - kfree_safe(path); + kheap_free_safe(KHEAP_TEMP, path, pathsz); } } else #endif /* CONFIG_IMAGEBOOT_IMG4 */ @@ -811,7 +1133,7 @@ imageboot_setup(imageboot_type_t type) } done: - FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, root_path); DBG_TRACE("%s: exit\n", __FUNCTION__); diff --git a/bsd/kern/kdebug.c b/bsd/kern/kdebug.c index eb78ca89a..f0ca4b75c 100644 --- a/bsd/kern/kdebug.c +++ b/bsd/kern/kdebug.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -81,6 +82,9 @@ #include #include +extern unsigned int wake_nkdbufs; +extern unsigned int trace_wrap; + /* * IOP(s) * @@ -241,7 +245,6 @@ static void kdbg_disable_typefilter(void); */ void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *); -int cpu_number(void); /* XXX include path broken */ void commpage_update_kdebug_state(void); /* XXX sign */ extern int log_leaks; @@ -264,13 +267,6 @@ kdbg_timestamp(void) static int kdbg_debug = 0; -#if KDEBUG_MOJO_TRACE -#include -static void kdebug_serial_print( /* forward */ - uint32_t, uint32_t, uint64_t, - uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); -#endif - int kdbg_control(int *, u_int, user_addr_t, size_t *); static int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t, uint32_t); @@ -296,9 +292,8 @@ static void kdbg_wakeup(void); int kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t* cpumap_size); -static kd_threadmap *kdbg_thrmap_init_internal(unsigned int count, - unsigned int *mapsize, - unsigned int *mapcount); +static kd_threadmap *kdbg_thrmap_init_internal(size_t max_count, + vm_size_t *map_size, vm_size_t *map_count); static bool kdebug_current_proc_enabled(uint32_t debugid); static errno_t kdebug_check_trace_string(uint32_t debugid, uint64_t str_id); @@ -328,16 +323,16 @@ unsigned int kdebug_enable = 0; #define KD_EARLY_BUFFER_SIZE (16 * 1024) #define KD_EARLY_BUFFER_NBUFS (KD_EARLY_BUFFER_SIZE / sizeof(kd_buf)) -#if CONFIG_EMBEDDED +#if defined(__x86_64__) +__attribute__((aligned(KD_EARLY_BUFFER_SIZE))) +static kd_buf kd_early_buffer[KD_EARLY_BUFFER_NBUFS]; +#else /* defined(__x86_64__) */ /* - * On embedded, the space for this is carved out by osfmk/arm/data.s -- clang + * On ARM, the space for this is carved out by osfmk/arm/data.s -- clang * has problems aligning to greater than 4K. */ extern kd_buf kd_early_buffer[KD_EARLY_BUFFER_NBUFS]; -#else /* CONFIG_EMBEDDED */ -__attribute__((aligned(KD_EARLY_BUFFER_SIZE))) -static kd_buf kd_early_buffer[KD_EARLY_BUFFER_NBUFS]; -#endif /* !CONFIG_EMBEDDED */ +#endif /* !defined(__x86_64__) */ static unsigned int kd_early_index = 0; static bool kd_early_overflow = false; @@ -449,8 +444,8 @@ static lck_spin_t * kdw_spin_lock; static lck_spin_t * kds_spin_lock; kd_threadmap *kd_mapptr = 0; -unsigned int kd_mapsize = 0; -unsigned int kd_mapcount = 0; +vm_size_t kd_mapsize = 0; +vm_size_t kd_mapcount = 0; off_t RAW_file_offset = 0; int RAW_file_written = 0; @@ -479,20 +474,17 @@ static uint64_t g_str_id_signature = (0x70acULL << STR_ID_SIG_OFFSET); #define BSC_SysCall 0x040c0000 #define MACH_SysCall 0x010c0000 -/* task to string structure */ -struct tts { - task_t task; /* from procs task */ - pid_t pid; /* from procs p_pid */ - char task_comm[20];/* from procs p_comm */ +struct kd_task_name { + task_t ktn_task; + pid_t ktn_pid; + char ktn_name[20]; }; -typedef struct tts tts_t; - -struct krt { - kd_threadmap *map; /* pointer to the map buffer */ - int count; - int maxcount; - struct tts *atts; +struct kd_resolver { + kd_threadmap *krs_map; + vm_size_t krs_count; + vm_size_t krs_maxcount; + struct kd_task_name *krs_task; }; /* @@ -620,22 +612,26 @@ static uint32_t kdbg_cpu_count(bool early_trace) { if (early_trace) { -#if CONFIG_EMBEDDED - return ml_get_cpu_count(); -#else +#if defined(__x86_64__) return max_ncpus; -#endif +#else /* defined(__x86_64__) */ + return ml_get_cpu_count(); +#endif /* !defined(__x86_64__) */ } +#if defined(__x86_64__) host_basic_info_data_t hinfo; mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; host_info((host_t)1 /* BSD_HOST */, HOST_BASIC_INFO, (host_info_t)&hinfo, &count); assert(hinfo.logical_cpu_max > 0); return hinfo.logical_cpu_max; +#else /* defined(__x86_64__) */ + return ml_get_topology_info()->max_cpu_id + 1; +#endif /* !defined(__x86_64__) */ } #if MACH_ASSERT -#if CONFIG_EMBEDDED + static bool kdbg_iop_list_is_valid(kd_iop_t* iop) { @@ -658,19 +654,6 @@ kdbg_iop_list_is_valid(kd_iop_t* iop) return true; } -static bool -kdbg_iop_list_contains_cpu_id(kd_iop_t* list, uint32_t cpu_id) -{ - while (list) { - if (list->cpu_id == cpu_id) { - return true; - } - list = list->next; - } - - return false; -} -#endif /* CONFIG_EMBEDDED */ #endif /* MACH_ASSERT */ static void @@ -800,9 +783,7 @@ create_buffers(bool early_trace) */ kd_ctrl_page.kdebug_iops = kd_iops; -#if CONFIG_EMBEDDED assert(kdbg_iop_list_is_valid(kd_ctrl_page.kdebug_iops)); -#endif /* * If the list is valid, it is sorted, newest -> oldest. Each iop entry @@ -871,13 +852,15 @@ create_buffers(bool early_trace) for (i = 0; i < n_storage_buffers; i++) { struct kd_storage *kds; - int n_elements; - int n; + uint16_t n_elements; + static_assert(N_STORAGE_UNITS_PER_BUFFER <= UINT16_MAX); + assert(kd_bufs[i].kdsb_size <= N_STORAGE_UNITS_PER_BUFFER * + sizeof(struct kd_storage)); n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage); kds = kd_bufs[i].kdsb_addr; - for (n = 0; n < n_elements; n++) { + for (uint16_t n = 0; n < n_elements; n++) { kds[n].kds_next.buffer_index = kd_ctrl_page.kds_free_list.buffer_index; kds[n].kds_next.offset = kd_ctrl_page.kds_free_list.offset; @@ -1209,19 +1192,6 @@ record_event: goto out1; } -#if CONFIG_EMBEDDED - /* - * When start_kern_tracing is called by the kernel to trace very - * early kernel events, it saves data to a secondary buffer until - * it is possible to initialize ktrace, and then dumps the events - * into the ktrace buffer using this method. In this case, iops will - * be NULL, and the coreid will be zero. It is not possible to have - * a valid IOP coreid of zero, so pass if both iops is NULL and coreid - * is zero. - */ - assert(kdbg_iop_list_contains_cpu_id(kd_ctrl_page.kdebug_iops, coreid) || (kd_ctrl_page.kdebug_iops == NULL && coreid == 0)); -#endif - disable_preemption(); if (kd_ctrl_page.enabled == 0) { @@ -1231,13 +1201,6 @@ record_event: kdbp = &kdbip[coreid]; timestamp &= KDBG_TIMESTAMP_MASK; -#if KDEBUG_MOJO_TRACE - if (kdebug_enable & KDEBUG_ENABLE_SERIAL) { - kdebug_serial_print(coreid, debugid, timestamp, - arg1, arg2, arg3, arg4, threadid); - } -#endif - retry_q: kds_raw = kdbp->kd_list_tail; @@ -1410,14 +1373,6 @@ record_event: cpu = cpu_number(); kdbp = &kdbip[cpu]; -#if KDEBUG_MOJO_TRACE - if (kdebug_enable & KDEBUG_ENABLE_SERIAL) { - kdebug_serial_print(cpu, debugid, - kdbg_timestamp() & KDBG_TIMESTAMP_MASK, - arg1, arg2, arg3, arg4, arg5); - } -#endif - retry_q: kds_raw = kdbp->kd_list_tail; @@ -1614,7 +1569,7 @@ kernel_debug_early( /* If early tracing is over, use the normal path. */ if (kd_early_done) { - KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, 0); + KDBG_RELEASE(debugid, arg1, arg2, arg3, arg4); return; } @@ -1649,17 +1604,12 @@ kernel_debug_early_end(void) /* reset the current oldest time to allow early events */ kd_ctrl_page.oldest_time = 0; -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) /* Fake sentinel marking the start of kernel time relative to TSC */ - kernel_debug_enter(0, - TRACE_TIMESTAMPS, - 0, - (uint32_t)(tsc_rebase_abs_time >> 32), - (uint32_t)tsc_rebase_abs_time, - tsc_at_boot, - 0, - 0); -#endif + kernel_debug_enter(0, TRACE_TIMESTAMPS, 0, + (uint32_t)(tsc_rebase_abs_time >> 32), (uint32_t)tsc_rebase_abs_time, + tsc_at_boot, 0, 0); +#endif /* defined(__x86_64__) */ for (unsigned int i = 0; i < kd_early_index; i++) { kernel_debug_enter(0, kd_early_buffer[i].debugid, @@ -2183,7 +2133,8 @@ kdbg_trace_data(struct proc *proc, long *arg_pid, long *arg_uniqueid) *arg_uniqueid = 0; } else { *arg_pid = proc->p_pid; - *arg_uniqueid = proc->p_uniqueid; + /* Fit in a trace point */ + *arg_uniqueid = (long)proc->p_uniqueid; if ((uint64_t) *arg_uniqueid != proc->p_uniqueid) { *arg_uniqueid = 0; } @@ -2220,32 +2171,6 @@ kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, *arg4 = args[3]; } -static void -kdbg_resolve_map(thread_t th_act, void *opaque) -{ - kd_threadmap *mapptr; - krt_t *t = (krt_t *)opaque; - - if (t->count < t->maxcount) { - mapptr = &t->map[t->count]; - mapptr->thread = (uintptr_t)thread_tid(th_act); - - (void) strlcpy(mapptr->command, t->atts->task_comm, - sizeof(t->atts->task_comm)); - /* - * Some kernel threads have no associated pid. - * We still need to mark the entry as valid. - */ - if (t->atts->pid) { - mapptr->valid = t->atts->pid; - } else { - mapptr->valid = 1; - } - - t->count++; - } -} - /* * * Writes a cpumap for the given iops_list/cpu_count to the provided buffer. @@ -2336,99 +2261,119 @@ kdbg_thrmap_init(void) } } -static kd_threadmap * -kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned int *mapcount) +static void +kd_resolve_map(thread_t thread, void *opaque) { - kd_threadmap *mapptr; - proc_t p; - struct krt akrt; - int tts_count = 0; /* number of task-to-string structures */ - struct tts *tts_mapptr; - unsigned int tts_mapsize = 0; - vm_offset_t kaddr; - - assert(mapsize != NULL); - assert(mapcount != NULL); + struct kd_resolver *resolve = opaque; - *mapcount = threads_count; - tts_count = tasks_count; + if (resolve->krs_count < resolve->krs_maxcount) { + kd_threadmap *map = &resolve->krs_map[resolve->krs_count]; + struct kd_task_name *task_name = resolve->krs_task; + map->thread = (uintptr_t)thread_tid(thread); - /* - * The proc count could change during buffer allocation, - * so introduce a small fudge factor to bump up the - * buffer sizes. This gives new tasks some chance of - * making into the tables. Bump up by 25%. - */ - *mapcount += *mapcount / 4; - tts_count += tts_count / 4; - - *mapsize = *mapcount * sizeof(kd_threadmap); - - if (count && count < *mapcount) { - return 0; - } - - if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)*mapsize, VM_KERN_MEMORY_DIAG) == KERN_SUCCESS)) { - bzero((void *)kaddr, *mapsize); - mapptr = (kd_threadmap *)kaddr; - } else { - return 0; + (void)strlcpy(map->command, task_name->ktn_name, sizeof(map->command)); + /* + * Kernel threads should still be marked with non-zero valid bit. + */ + pid_t pid = resolve->krs_task->ktn_pid; + map->valid = pid == 0 ? 1 : pid; + resolve->krs_count++; } +} - tts_mapsize = tts_count * sizeof(struct tts); - - if ((kmem_alloc(kernel_map, &kaddr, (vm_size_t)tts_mapsize, VM_KERN_MEMORY_DIAG) == KERN_SUCCESS)) { - bzero((void *)kaddr, tts_mapsize); - tts_mapptr = (struct tts *)kaddr; - } else { - kmem_free(kernel_map, (vm_offset_t)mapptr, *mapsize); - - return 0; - } +static vm_size_t +kd_resolve_tasks(struct kd_task_name *task_names, vm_size_t ntasks) +{ + vm_size_t i = 0; + proc_t p = PROC_NULL; - /* - * Save the proc's name and take a reference for each task associated - * with a valid process. - */ proc_list_lock(); - - int i = 0; ALLPROC_FOREACH(p) { - if (i >= tts_count) { + if (i >= ntasks) { break; } - if (p->p_lflag & P_LEXIT) { - continue; - } - if (p->task) { + /* + * Only record processes that can be referenced and are not exiting. + */ + if (p->task && (p->p_lflag & P_LEXIT) == 0) { task_reference(p->task); - tts_mapptr[i].task = p->task; - tts_mapptr[i].pid = p->p_pid; - (void)strlcpy(tts_mapptr[i].task_comm, proc_best_name(p), sizeof(tts_mapptr[i].task_comm)); + task_names[i].ktn_task = p->task; + task_names[i].ktn_pid = p->p_pid; + (void)strlcpy(task_names[i].ktn_name, proc_best_name(p), + sizeof(task_names[i].ktn_name)); i++; } } - tts_count = i; - proc_list_unlock(); + return i; +} + +static vm_size_t +kd_resolve_threads(kd_threadmap *map, struct kd_task_name *task_names, + vm_size_t ntasks, vm_size_t nthreads) +{ + struct kd_resolver resolver = { + .krs_map = map, .krs_count = 0, .krs_maxcount = nthreads, + }; + + for (int i = 0; i < ntasks; i++) { + struct kd_task_name *cur_task = &task_names[i]; + resolver.krs_task = cur_task; + task_act_iterate_wth_args(cur_task->ktn_task, kd_resolve_map, + &resolver); + task_deallocate(cur_task->ktn_task); + } + + return resolver.krs_count; +} + +static kd_threadmap * +kdbg_thrmap_init_internal(size_t maxthreads, vm_size_t *mapsize, + vm_size_t *mapcount) +{ + kd_threadmap *thread_map = NULL; + struct kd_task_name *task_names; + vm_size_t names_size = 0; + + assert(mapsize != NULL); + assert(mapcount != NULL); + + vm_size_t nthreads = threads_count; + vm_size_t ntasks = tasks_count; + /* - * Initialize thread map data + * Allow 25% more threads and tasks to be created between now and taking the + * proc_list_lock. */ - akrt.map = mapptr; - akrt.count = 0; - akrt.maxcount = *mapcount; + if (os_add_overflow(nthreads, nthreads / 4, &nthreads) || + os_add_overflow(ntasks, ntasks / 4, &ntasks)) { + return NULL; + } - for (i = 0; i < tts_count; i++) { - akrt.atts = &tts_mapptr[i]; - task_act_iterate_wth_args(tts_mapptr[i].task, kdbg_resolve_map, &akrt); - task_deallocate((task_t)tts_mapptr[i].task); + *mapcount = nthreads; + if (os_mul_overflow(nthreads, sizeof(kd_threadmap), mapsize)) { + return NULL; + } + if (os_mul_overflow(ntasks, sizeof(task_names[0]), &names_size)) { + return NULL; } - kmem_free(kernel_map, (vm_offset_t)tts_mapptr, tts_mapsize); - *mapcount = akrt.count; + /* + * Wait until the out-parameters have been filled with the needed size to + * do the bounds checking on the provided maximum. + */ + if (maxthreads != 0 && maxthreads < nthreads) { + return NULL; + } - return mapptr; + thread_map = kalloc_tag(*mapsize, VM_KERN_MEMORY_DIAG); + bzero(thread_map, *mapsize); + task_names = kheap_alloc(KHEAP_TEMP, names_size, Z_WAITOK | Z_ZERO); + ntasks = kd_resolve_tasks(task_names, ntasks); + *mapcount = kd_resolve_threads(thread_map, task_names, ntasks, nthreads); + kheap_free(KHEAP_TEMP, task_names, names_size); + return thread_map; } static void @@ -2492,11 +2437,13 @@ kdebug_reset(void) void kdebug_free_early_buf(void) { -#if !CONFIG_EMBEDDED - /* Must be done with the buffer, so release it back to the VM. - * On embedded targets this buffer is freed when the BOOTDATA segment is freed. */ +#if defined(__x86_64__) + /* + * Make Intel aware that the early buffer is no longer being used. ARM + * handles this as part of the BOOTDATA segment. + */ ml_static_mfree((vm_offset_t)&kd_early_buffer, sizeof(kd_early_buffer)); -#endif +#endif /* defined(__x86_64__) */ } int @@ -2793,7 +2740,8 @@ kdbg_setreg(kd_regtype * kdr) static int kdbg_write_to_vnode(caddr_t buffer, size_t size, vnode_t vp, vfs_context_t ctx, off_t file_offset) { - return vn_rdwr(UIO_WRITE, vp, buffer, size, file_offset, UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, + assert(size < INT_MAX); + return vn_rdwr(UIO_WRITE, vp, buffer, (int)size, file_offset, UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); } @@ -2839,25 +2787,16 @@ kdbg_write_v3_chunk_to_fd(uint32_t tag, uint32_t sub_tag, uint64_t length, void vnode_t vp; p = current_proc(); - proc_fdlock(p); - if ((fp_lookup(p, fd, &fp, 1))) { - proc_fdunlock(p); - return EFAULT; + if (fp_get_ftype(p, fd, DTYPE_VNODE, EBADF, &fp)) { + return EBADF; } + vp = fp->fp_glob->fg_data; context.vc_thread = current_thread(); - context.vc_ucred = fp->f_fglob->fg_cred; - - if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) { - fp_drop(p, fd, fp, 1); - proc_fdunlock(p); - return EBADF; - } - vp = (struct vnode *) fp->f_fglob->fg_data; - proc_fdunlock(p); + context.vc_ucred = fp->fp_glob->fg_cred; if ((vnode_getwithref(vp)) == 0) { - RAW_file_offset = fp->f_fglob->fg_offset; + RAW_file_offset = fp->fp_glob->fg_offset; kd_chunk_header_v3 chunk_header = { .tag = tag, @@ -2875,7 +2814,7 @@ kdbg_write_v3_chunk_to_fd(uint32_t tag, uint32_t sub_tag, uint64_t length, void RAW_file_offset += payload_size; } - fp->f_fglob->fg_offset = RAW_file_offset; + fp->fp_glob->fg_offset = RAW_file_offset; vnode_put(vp); } @@ -2953,7 +2892,10 @@ kdbg_write_v3_header(user_addr_t user_header, size_t *user_header_size, int fd) ret = EINVAL; goto bail; } - thrmap_size = kd_mapcount * sizeof(kd_threadmap); + if (os_mul_overflow(kd_mapcount, sizeof(kd_threadmap), &thrmap_size)) { + ret = ERANGE; + goto bail; + } mach_timebase_info_data_t timebase = {0, 0}; clock_timebase_info(&timebase); @@ -2980,7 +2922,7 @@ kdbg_write_v3_header(user_addr_t user_header, size_t *user_header_size, int fd) // If its a buffer, check if we have enough space to copy the header and the maps. if (user_header) { - bytes_needed = header.length + thrmap_size + (2 * sizeof(kd_chunk_header_v3)); + bytes_needed = (size_t)header.length + thrmap_size + (2 * sizeof(kd_chunk_header_v3)); if (*user_header_size < bytes_needed) { ret = EINVAL; goto bail; @@ -3089,12 +3031,11 @@ int kdbg_readcurthrmap(user_addr_t buffer, size_t *bufsize) { kd_threadmap *mapptr; - unsigned int mapsize; - unsigned int mapcount; - unsigned int count = 0; + vm_size_t mapsize; + vm_size_t mapcount; int ret = 0; + size_t count = *bufsize / sizeof(kd_threadmap); - count = *bufsize / sizeof(kd_threadmap); *bufsize = 0; if ((mapptr = kdbg_thrmap_init_internal(count, &mapsize, &mapcount))) { @@ -3104,7 +3045,7 @@ kdbg_readcurthrmap(user_addr_t buffer, size_t *bufsize) *bufsize = (mapcount * sizeof(kd_threadmap)); } - kmem_free(kernel_map, (vm_offset_t)mapptr, mapsize); + kfree(mapptr, mapsize); } else { ret = EINVAL; } @@ -3124,12 +3065,20 @@ kdbg_write_v1_header(bool write_thread_map, vnode_t vp, vfs_context_t ctx) uint32_t extra_thread_count = 0; uint32_t cpumap_size; size_t map_size = 0; - size_t map_count = 0; + uint32_t map_count = 0; if (write_thread_map) { assert(kd_ctrl_page.kdebug_flags & KDBG_MAPINIT); - map_count = kd_mapcount; - map_size = map_count * sizeof(kd_threadmap); + if (kd_mapcount > UINT32_MAX) { + return ERANGE; + } + map_count = (uint32_t)kd_mapcount; + if (os_mul_overflow(map_count, sizeof(kd_threadmap), &map_size)) { + return ERANGE; + } + if (map_size >= INT_MAX) { + return ERANGE; + } } /* @@ -3150,7 +3099,7 @@ kdbg_write_v1_header(bool write_thread_map, vnode_t vp, vfs_context_t ctx) assert(vp); assert(ctx); - pad_size = PAGE_16KB - ((sizeof(RAW_header) + map_size) & PAGE_MASK_64); + pad_size = PAGE_16KB - ((sizeof(RAW_header) + map_size) & PAGE_MASK); cpumap_size = sizeof(kd_cpumap_header) + kd_ctrl_page.kdebug_cpus * sizeof(kd_cpumap); if (cpumap_size > pad_size) { @@ -3182,7 +3131,7 @@ kdbg_write_v1_header(bool write_thread_map, vnode_t vp, vfs_context_t ctx) header.TOD_secs = secs; header.TOD_usecs = usecs; - ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, sizeof(RAW_header), RAW_file_offset, + ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, (int)sizeof(RAW_header), RAW_file_offset, UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); if (ret) { goto write_error; @@ -3191,7 +3140,8 @@ kdbg_write_v1_header(bool write_thread_map, vnode_t vp, vfs_context_t ctx) RAW_file_written += sizeof(RAW_header); if (write_thread_map) { - ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, map_size, RAW_file_offset, + assert(map_size < INT_MAX); + ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, (int)map_size, RAW_file_offset, UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); if (ret) { goto write_error; @@ -3203,16 +3153,16 @@ kdbg_write_v1_header(bool write_thread_map, vnode_t vp, vfs_context_t ctx) if (extra_thread_count) { pad_size = extra_thread_count * sizeof(kd_threadmap); - pad_buf = kalloc(pad_size); + pad_buf = kheap_alloc(KHEAP_TEMP, pad_size, Z_WAITOK | Z_ZERO); if (!pad_buf) { ret = ENOMEM; goto write_error; } - memset(pad_buf, 0, pad_size); - ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset, + assert(pad_size < INT_MAX); + ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, (int)pad_size, RAW_file_offset, UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); - kfree(pad_buf, pad_size); + kheap_free(KHEAP_TEMP, pad_buf, pad_size); if (ret) { goto write_error; } @@ -3221,14 +3171,13 @@ kdbg_write_v1_header(bool write_thread_map, vnode_t vp, vfs_context_t ctx) RAW_file_written += pad_size; } - pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK_64); + pad_size = PAGE_SIZE - (RAW_file_offset & PAGE_MASK); if (pad_size) { - pad_buf = (char *)kalloc(pad_size); + pad_buf = (char *)kheap_alloc(KHEAP_TEMP, pad_size, Z_WAITOK | Z_ZERO); if (!pad_buf) { ret = ENOMEM; goto write_error; } - memset(pad_buf, 0, pad_size); /* * embed a cpumap in the padding bytes. @@ -3240,9 +3189,10 @@ kdbg_write_v1_header(bool write_thread_map, vnode_t vp, vfs_context_t ctx) memset(pad_buf, 0, pad_size); } - ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset, + assert(pad_size < INT_MAX); + ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, (int)pad_size, RAW_file_offset, UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); - kfree(pad_buf, pad_size); + kheap_free(KHEAP_TEMP, pad_buf, pad_size); if (ret) { goto write_error; } @@ -3262,7 +3212,7 @@ kdbg_clear_thread_map(void) if (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT) { assert(kd_mapptr != NULL); - kmem_free(kernel_map, (vm_offset_t)kd_mapptr, kd_mapsize); + kfree(kd_mapptr, kd_mapsize); kd_mapptr = NULL; kd_mapsize = 0; kd_mapcount = 0; @@ -3372,7 +3322,8 @@ kdbg_set_nkdbufs(unsigned int req_nkdbufs) * Only allow allocation up to half the available memory (sane_size). */ uint64_t max_nkdbufs = (sane_size / 2) / sizeof(kd_buf); - nkdbufs = (req_nkdbufs > max_nkdbufs) ? max_nkdbufs : req_nkdbufs; + nkdbufs = (req_nkdbufs > max_nkdbufs) ? (unsigned int)max_nkdbufs : + req_nkdbufs; } /* @@ -3533,8 +3484,8 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep) memset(&kd_bufinfo, 0, sizeof(kd_bufinfo)); kd_bufinfo.nkdbufs = nkdbufs; - kd_bufinfo.nkdthreads = kd_mapcount; - + kd_bufinfo.nkdthreads = kd_mapcount < INT_MAX ? (int)kd_mapcount : + INT_MAX; if ((kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG)) { kd_bufinfo.nolog = 1; } else { @@ -3660,26 +3611,18 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep) p = current_proc(); fd = value; - proc_fdlock(p); - if ((ret = fp_lookup(p, fd, &fp, 1))) { - proc_fdunlock(p); - break; - } - context.vc_thread = current_thread(); - context.vc_ucred = fp->f_fglob->fg_cred; - - if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) { - fp_drop(p, fd, fp, 1); - proc_fdunlock(p); + if (fp_get_ftype(p, fd, DTYPE_VNODE, EBADF, &fp)) { ret = EBADF; break; } - vp = (struct vnode *)fp->f_fglob->fg_data; - proc_fdunlock(p); + + vp = fp->fp_glob->fg_data; + context.vc_thread = current_thread(); + context.vc_ucred = fp->fp_glob->fg_cred; if ((ret = vnode_getwithref(vp)) == 0) { - RAW_file_offset = fp->f_fglob->fg_offset; + RAW_file_offset = fp->fp_glob->fg_offset; if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) { number = nkdbufs * sizeof(kd_buf); @@ -3700,7 +3643,7 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep) ret = kdbg_write_thread_map(vp, &context); } } - fp->f_fglob->fg_offset = RAW_file_offset; + fp->fp_glob->fg_offset = RAW_file_offset; vnode_put(vp); } fp_drop(p, fd, fp, 0); @@ -3774,7 +3717,7 @@ out: int kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx, uint32_t file_version) { - unsigned int count; + size_t count; unsigned int cpu, min_cpu; uint64_t barrier_min = 0, barrier_max = 0, t, earliest_time; int error = 0; @@ -3786,14 +3729,14 @@ kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx, uin struct kd_storage *kdsp_actual; struct kd_bufinfo *kdbp; struct kd_bufinfo *min_kdbp; - uint32_t tempbuf_count; + size_t tempbuf_count; uint32_t tempbuf_number; uint32_t old_kdebug_flags; uint32_t old_kdebug_slowcheck; bool out_of_events = false; bool wrapped = false; - assert(number); + assert(number != NULL); count = *number / sizeof(kd_buf); *number = 0; @@ -4039,10 +3982,17 @@ next_event: if (traced_retrograde) { continue; } + if (kdbg_debug) { + printf("kdebug: RETRO EVENT: debugid %#8x: " + "time %lld from CPU %u " + "(barrier at time %lld)\n", + kdsp_actual->kds_records[rcursor].debugid, + t, cpu, barrier_min); + } kdbg_set_timestamp_and_cpu(tempbuf, min_kdbp->kd_prev_timebase, kdbg_get_cpu(tempbuf)); tempbuf->arg1 = tempbuf->debugid; - tempbuf->arg2 = earliest_time; + tempbuf->arg2 = (kd_buf_argtype)earliest_time; tempbuf->arg3 = 0; tempbuf->arg4 = 0; tempbuf->debugid = TRACE_RETROGRADE_EVENTS; @@ -4230,27 +4180,15 @@ kdbg_test(size_t flavor) #undef KDEBUG_TEST_CODE void -kdebug_init(unsigned int n_events, char *filter_desc, bool wrapping) +kdebug_init(unsigned int n_events, char *filter_desc, enum kdebug_opts opts) { assert(filter_desc != NULL); -#if defined(__x86_64__) - /* only trace MACH events when outputting kdebug to serial */ - if (kdebug_serial) { - n_events = 1; - if (filter_desc[0] == '\0') { - filter_desc[0] = 'C'; - filter_desc[1] = '1'; - filter_desc[2] = '\0'; - } - } -#endif /* defined(__x86_64__) */ - if (log_leaks && n_events == 0) { n_events = 200000; } - kdebug_trace_start(n_events, filter_desc, wrapping, false); + kdebug_trace_start(n_events, filter_desc, opts); } static void @@ -4269,7 +4207,7 @@ kdbg_set_typefilter_string(const char *filter_desc) if (filter_desc[0] >= '0' && filter_desc[0] <= '9') { unsigned long csc = strtoul(filter_desc, NULL, 0); if (filter_desc != end && csc <= KDBG_CSC_MAX) { - typefilter_allow_csc(kdbg_typefilter, csc); + typefilter_allow_csc(kdbg_typefilter, (uint16_t)csc); } return; } @@ -4279,35 +4217,36 @@ kdbg_set_typefilter_string(const char *filter_desc) char filter_type = filter_desc[0]; if (filter_type != 'C' && filter_type != 'S') { + printf("kdebug: unexpected filter type `%c'\n", filter_type); return; } filter_desc++; allow_value = strtoul(filter_desc, &end, 0); if (filter_desc == end) { - /* cannot parse as integer */ + printf("kdebug: cannot parse `%s' as integer\n", filter_desc); return; } switch (filter_type) { case 'C': - if (allow_value <= KDBG_CLASS_MAX) { - typefilter_allow_class(kdbg_typefilter, allow_value); - } else { - /* illegal class */ + if (allow_value > KDBG_CLASS_MAX) { + printf("kdebug: class 0x%lx is invalid\n", allow_value); return; } + printf("kdebug: C 0x%lx\n", allow_value); + typefilter_allow_class(kdbg_typefilter, (uint8_t)allow_value); break; case 'S': - if (allow_value <= KDBG_CSC_MAX) { - typefilter_allow_csc(kdbg_typefilter, allow_value); - } else { - /* illegal class subclass */ + if (allow_value > KDBG_CSC_MAX) { + printf("kdebug: class-subclass 0x%lx is invalid\n", allow_value); return; } + printf("kdebug: S 0x%lx\n", allow_value); + typefilter_allow_csc(kdbg_typefilter, (uint16_t)allow_value); break; default: - return; + __builtin_unreachable(); } /* advance to next filter entry */ @@ -4318,13 +4257,23 @@ kdbg_set_typefilter_string(const char *filter_desc) } } +uint64_t +kdebug_wake(void) +{ + if (!wake_nkdbufs) { + return 0; + } + uint64_t start = mach_absolute_time(); + kdebug_trace_start(wake_nkdbufs, NULL, trace_wrap ? KDOPT_WRAPPING : 0); + return mach_absolute_time() - start; +} + /* - * This function is meant to be called from the bootstrap thread or coming out - * of acpi_idle_kernel. + * This function is meant to be called from the bootstrap thread or kdebug_wake. */ void kdebug_trace_start(unsigned int n_events, const char *filter_desc, - bool wrapping, bool at_wake) + enum kdebug_opts opts) { if (!n_events) { kd_early_done = true; @@ -4341,7 +4290,7 @@ kdebug_trace_start(unsigned int n_events, const char *filter_desc, kernel_debug_string_early("start_kern_tracing"); - if (kdbg_reinit(true)) { + if (kdbg_reinit((opts & KDOPT_ATBOOT))) { printf("error from kdbg_reinit, kernel tracing not started\n"); goto out; } @@ -4350,7 +4299,7 @@ kdebug_trace_start(unsigned int n_events, const char *filter_desc, * Wrapping is disabled because boot and wake tracing is interested in * the earliest events, at the expense of later ones. */ - if (!wrapping) { + if (!(opts & KDOPT_WRAPPING)) { uint32_t old1, old2; (void)disable_wrap(&old1, &old2); } @@ -4368,14 +4317,13 @@ kdebug_trace_start(unsigned int n_events, const char *filter_desc, */ bool s = ml_set_interrupts_enabled(false); - if (at_wake) { + if (!(opts & KDOPT_ATBOOT)) { kdbg_thrmap_init(); } - kdbg_set_tracing_enabled(true, KDEBUG_ENABLE_TRACE | (kdebug_serial ? - KDEBUG_ENABLE_SERIAL : 0)); + kdbg_set_tracing_enabled(true, KDEBUG_ENABLE_TRACE); - if (!at_wake) { + if ((opts & KDOPT_ATBOOT)) { /* * Transfer all very early events from the static buffer into the real * buffers. @@ -4385,14 +4333,8 @@ kdebug_trace_start(unsigned int n_events, const char *filter_desc, ml_set_interrupts_enabled(s); - printf("kernel tracing started with %u events\n", n_events); - -#if KDEBUG_MOJO_TRACE - if (kdebug_serial) { - printf("serial output enabled with %lu named events\n", - sizeof(kd_events) / sizeof(kd_event_t)); - } -#endif /* KDEBUG_MOJO_TRACE */ + printf("kernel tracing started with %u events, filter = %s\n", n_events, + filter_desc ?: "none"); out: ktrace_end_single_threaded(); @@ -4456,7 +4398,7 @@ kdbg_dump_trace_to_file(const char *filename) .debugid = TRACE_WRITING_EVENTS | DBG_FUNC_END, .arg1 = write_size, .arg2 = ret, - .arg5 = thread_tid(current_thread()), + .arg5 = (kd_buf_argtype)thread_tid(current_thread()), }; kdbg_set_timestamp_and_cpu(&end_event, kdbg_timestamp(), cpu_number()); @@ -4504,150 +4446,3 @@ SYSCTL_QUAD(_kern_kdbg, OID_AUTO, oldest_time, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, &kd_ctrl_page.oldest_time, "Find the oldest timestamp still in trace"); - -#if KDEBUG_MOJO_TRACE -static kd_event_t * -binary_search(uint32_t id) -{ - int low, high, mid; - - low = 0; - high = (int)(sizeof(kd_events) / sizeof(kd_event_t)) - 1; - - while (true) { - mid = (low + high) / 2; - - if (low > high) { - return NULL; /* failed */ - } else if (low + 1 >= high) { - /* We have a match */ - if (kd_events[high].id == id) { - return &kd_events[high]; - } else if (kd_events[low].id == id) { - return &kd_events[low]; - } else { - return NULL; /* search failed */ - } - } else if (id < kd_events[mid].id) { - high = mid; - } else { - low = mid; - } - } -} - -/* - * Look up event id to get name string. - * Using a per-cpu cache of a single entry - * before resorting to a binary search of the full table. - */ -#define NCACHE 1 -static kd_event_t *last_hit[MAX_CPUS]; -static kd_event_t * -event_lookup_cache(uint32_t cpu, uint32_t id) -{ - if (last_hit[cpu] == NULL || last_hit[cpu]->id != id) { - last_hit[cpu] = binary_search(id); - } - return last_hit[cpu]; -} - -static uint64_t kd_last_timstamp; - -static void -kdebug_serial_print( - uint32_t cpunum, - uint32_t debugid, - uint64_t timestamp, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t arg4, - uintptr_t threadid - ) -{ - char kprintf_line[192]; - char event[40]; - uint64_t us = timestamp / NSEC_PER_USEC; - uint64_t us_tenth = (timestamp % NSEC_PER_USEC) / 100; - uint64_t delta = timestamp - kd_last_timstamp; - uint64_t delta_us = delta / NSEC_PER_USEC; - uint64_t delta_us_tenth = (delta % NSEC_PER_USEC) / 100; - uint32_t event_id = debugid & KDBG_EVENTID_MASK; - const char *command; - const char *bra; - const char *ket; - kd_event_t *ep; - - /* event time and delta from last */ - snprintf(kprintf_line, sizeof(kprintf_line), - "%11llu.%1llu %8llu.%1llu ", - us, us_tenth, delta_us, delta_us_tenth); - - - /* event (id or name) - start prefixed by "[", end postfixed by "]" */ - bra = (debugid & DBG_FUNC_START) ? "[" : " "; - ket = (debugid & DBG_FUNC_END) ? "]" : " "; - ep = event_lookup_cache(cpunum, event_id); - if (ep) { - if (strlen(ep->name) < sizeof(event) - 3) { - snprintf(event, sizeof(event), "%s%s%s", - bra, ep->name, ket); - } else { - snprintf(event, sizeof(event), "%s%x(name too long)%s", - bra, event_id, ket); - } - } else { - snprintf(event, sizeof(event), "%s%x%s", - bra, event_id, ket); - } - snprintf(kprintf_line + strlen(kprintf_line), - sizeof(kprintf_line) - strlen(kprintf_line), - "%-40s ", event); - - /* arg1 .. arg4 with special cases for strings */ - switch (event_id) { - case VFS_LOOKUP: - case VFS_LOOKUP_DONE: - if (debugid & DBG_FUNC_START) { - /* arg1 hex then arg2..arg4 chars */ - snprintf(kprintf_line + strlen(kprintf_line), - sizeof(kprintf_line) - strlen(kprintf_line), - "%-16lx %-8s%-8s%-8s ", - arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4); - break; - } - /* else fall through for arg1..arg4 chars */ - case TRACE_STRING_EXEC: - case TRACE_STRING_NEWTHREAD: - case TRACE_INFO_STRING: - snprintf(kprintf_line + strlen(kprintf_line), - sizeof(kprintf_line) - strlen(kprintf_line), - "%-8s%-8s%-8s%-8s ", - (char*)&arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4); - break; - default: - snprintf(kprintf_line + strlen(kprintf_line), - sizeof(kprintf_line) - strlen(kprintf_line), - "%-16lx %-16lx %-16lx %-16lx", - arg1, arg2, arg3, arg4); - } - - /* threadid, cpu and command name */ - if (threadid == (uintptr_t)thread_tid(current_thread()) && - current_proc() && - current_proc()->p_comm[0]) { - command = current_proc()->p_comm; - } else { - command = "-"; - } - snprintf(kprintf_line + strlen(kprintf_line), - sizeof(kprintf_line) - strlen(kprintf_line), - " %-16lx %-2d %s\n", - threadid, cpunum, command); - - kprintf("%s", kprintf_line); - kd_last_timstamp = timestamp; -} - -#endif diff --git a/bsd/kern/kern_acct.c b/bsd/kern/kern_acct.c index b9a3dde38..fd3172f7f 100644 --- a/bsd/kern/kern_acct.c +++ b/bsd/kern/kern_acct.c @@ -268,14 +268,14 @@ acct_process(proc_t p) /* (2) The amount of user and system time that was used */ calcru(p, &ut, &st, NULL); - an_acct.ac_utime = encode_comp_t(ut.tv_sec, ut.tv_usec); - an_acct.ac_stime = encode_comp_t(st.tv_sec, st.tv_usec); + an_acct.ac_utime = encode_comp_t((uint32_t)ut.tv_sec, ut.tv_usec); + an_acct.ac_stime = encode_comp_t((uint32_t)st.tv_sec, st.tv_usec); /* (3) The elapsed time the commmand ran (and its starting time) */ - an_acct.ac_btime = p->p_start.tv_sec; + an_acct.ac_btime = (u_int32_t)p->p_start.tv_sec; microtime(&tmp); timevalsub(&tmp, &p->p_start); - an_acct.ac_etime = encode_comp_t(tmp.tv_sec, tmp.tv_usec); + an_acct.ac_etime = encode_comp_t((uint32_t)tmp.tv_sec, tmp.tv_usec); /* (4) The average amount of memory used */ proc_lock(p); @@ -284,15 +284,15 @@ acct_process(proc_t p) r = &rup; tmp = ut; timevaladd(&tmp, &st); - t = tmp.tv_sec * hz + tmp.tv_usec / tick; + t = (int)(tmp.tv_sec * hz + tmp.tv_usec / tick); if (t) { - an_acct.ac_mem = (r->ru_ixrss + r->ru_idrss + r->ru_isrss) / t; + an_acct.ac_mem = (u_int16_t)((r->ru_ixrss + r->ru_idrss + r->ru_isrss) / t); } else { an_acct.ac_mem = 0; } /* (5) The number of disk I/O operations done */ - an_acct.ac_io = encode_comp_t(r->ru_inblock + r->ru_oublock, 0); + an_acct.ac_io = encode_comp_t((uint32_t)(r->ru_inblock + r->ru_oublock), 0); /* (6) The UID and GID of the process */ safecred = kauth_cred_proc_ref(p); @@ -316,7 +316,7 @@ acct_process(proc_t p) } /* (8) The boolean flags that tell how the process terminated, etc. */ - an_acct.ac_flag = p->p_acflag; + an_acct.ac_flag = (u_int8_t)p->p_acflag; /* * Now, just write the accounting information to the file. @@ -369,7 +369,7 @@ encode_comp_t(uint32_t s, uint32_t us) /* Clean it up and polish it off. */ exp <<= MANTSIZE; /* Shift the exponent into place */ exp += s; /* and add on the mantissa. */ - return exp; + return (comp_t)exp; } /* diff --git a/bsd/kern/kern_aio.c b/bsd/kern/kern_aio.c index 4a07657cb..8a4bedecf 100644 --- a/bsd/kern/kern_aio.c +++ b/bsd/kern/kern_aio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003-2016 Apple Inc. All rights reserved. + * Copyright (c) 2003-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -70,41 +70,115 @@ #include -#include +#include #include -#define AIO_work_queued 1 -#define AIO_worker_wake 2 -#define AIO_completion_sig 3 -#define AIO_completion_cleanup_wait 4 -#define AIO_completion_cleanup_wake 5 +#define AIO_work_queued 1 +#define AIO_worker_wake 2 +#define AIO_completion_sig 3 +#define AIO_completion_cleanup_wait 4 +#define AIO_completion_cleanup_wake 5 #define AIO_completion_suspend_wake 6 -#define AIO_fsync_delay 7 -#define AIO_cancel 10 -#define AIO_cancel_async_workq 11 -#define AIO_cancel_sync_workq 12 -#define AIO_cancel_activeq 13 -#define AIO_cancel_doneq 14 -#define AIO_fsync 20 -#define AIO_read 30 -#define AIO_write 40 -#define AIO_listio 50 -#define AIO_error 60 -#define AIO_error_val 61 -#define AIO_error_activeq 62 -#define AIO_error_workq 63 -#define AIO_return 70 -#define AIO_return_val 71 -#define AIO_return_activeq 72 -#define AIO_return_workq 73 -#define AIO_exec 80 -#define AIO_exit 90 -#define AIO_exit_sleep 91 -#define AIO_close 100 -#define AIO_close_sleep 101 -#define AIO_suspend 110 -#define AIO_suspend_sleep 111 -#define AIO_worker_thread 120 +#define AIO_fsync_delay 7 +#define AIO_cancel 10 +#define AIO_cancel_async_workq 11 +#define AIO_cancel_sync_workq 12 +#define AIO_cancel_activeq 13 +#define AIO_cancel_doneq 14 +#define AIO_fsync 20 +#define AIO_read 30 +#define AIO_write 40 +#define AIO_listio 50 +#define AIO_error 60 +#define AIO_error_val 61 +#define AIO_error_activeq 62 +#define AIO_error_workq 63 +#define AIO_return 70 +#define AIO_return_val 71 +#define AIO_return_activeq 72 +#define AIO_return_workq 73 +#define AIO_exec 80 +#define AIO_exit 90 +#define AIO_exit_sleep 91 +#define AIO_close 100 +#define AIO_close_sleep 101 +#define AIO_suspend 110 +#define AIO_suspend_sleep 111 +#define AIO_worker_thread 120 + +__options_decl(aio_entry_flags_t, uint32_t, { + AIO_READ = 0x00000001, /* a read */ + AIO_WRITE = 0x00000002, /* a write */ + AIO_FSYNC = 0x00000004, /* aio_fsync with op = O_SYNC */ + AIO_DSYNC = 0x00000008, /* aio_fsync with op = O_DSYNC (not supported yet) */ + AIO_LIO = 0x00000010, /* lio_listio generated IO */ + AIO_LIO_WAIT = 0x00000020, /* lio_listio is waiting on the leader */ + + /* + * These flags mean that this entry is blocking either: + * - close (AIO_CLOSE_WAIT) + * - exit or exec (AIO_EXIT_WAIT) + * + * These flags are mutually exclusive, and the AIO_EXIT_WAIT variant + * will also neuter notifications in do_aio_completion_and_unlock(). + */ + AIO_CLOSE_WAIT = 0x00004000, + AIO_EXIT_WAIT = 0x00008000, +}); + +/*! @struct aio_workq_entry + * + * @discussion + * This represents a piece of aio/lio work. + * + * The ownership rules go as follows: + * + * - the "proc" owns one refcount on the entry (from creation), while it is + * enqueued on the aio_activeq and then the aio_doneq. + * + * either aio_return() (user read the status) or _aio_exit() (the process + * died) will dequeue the entry and consume this ref. + * + * - the async workqueue owns one refcount once the work is submitted, + * which is consumed in do_aio_completion_and_unlock(). + * + * This ref protects the entry for the the end of + * do_aio_completion_and_unlock() (when signal delivery happens). + * + * - lio_listio() for batches picks one of the entries to be the "leader" + * of the batch. Each work item will have a refcount on its leader + * so that the accounting of the batch completion can be done on the leader + * (to be able to decrement lio_pending). + * + * This ref is consumed in do_aio_completion_and_unlock() as well. + * + * - lastly, in lio_listio() when the LIO_WAIT behavior is requested, + * an extra ref is taken in this syscall as it needs to keep accessing + * the leader "lio_pending" field until it hits 0. + */ +struct aio_workq_entry { + /* queue lock */ + TAILQ_ENTRY(aio_workq_entry) aio_workq_link; + + /* Proc lock */ + TAILQ_ENTRY(aio_workq_entry) aio_proc_link; /* p_aio_activeq or p_aio_doneq */ + user_ssize_t returnval; /* return value from read / write request */ + errno_t errorval; /* error value from read / write request */ + os_refcnt_t aio_refcount; + aio_entry_flags_t flags; + + int lio_pending; /* pending I/Os in lio group, only on leader */ + struct aio_workq_entry *lio_leader; /* pointer to the lio leader, can be self */ + + /* Initialized and never changed, safe to access */ + struct proc *procp; /* user proc that queued this request */ + user_addr_t uaiocbp; /* pointer passed in from user land */ + struct user_aiocb aiocb; /* copy of aiocb from user land */ + thread_t thread; /* thread that queued this request */ + + /* Initialized, and possibly freed by aio_work_thread() or at free if cancelled */ + vm_map_t aio_map; /* user land map we have a reference to */ +}; /* * aio requests queue up on the aio_async_workq or lio_sync_workq (for @@ -117,16 +191,13 @@ */ typedef struct aio_workq { TAILQ_HEAD(, aio_workq_entry) aioq_entries; - int aioq_count; - lck_mtx_t aioq_mtx; + lck_spin_t aioq_lock; struct waitq aioq_waitq; } *aio_workq_t; #define AIO_NUM_WORK_QUEUES 1 struct aio_anchor_cb { - volatile int32_t aio_inflight_count; /* entries that have been taken from a workq */ - volatile int32_t aio_done_count; /* entries on all done queues (proc.aio_doneq) */ - volatile int32_t aio_total_count; /* total extant entries */ + os_atomic(int) aio_total_count; /* total extant entries */ /* Hash table of queues here */ int aio_num_workqs; @@ -134,21 +205,13 @@ struct aio_anchor_cb { }; typedef struct aio_anchor_cb aio_anchor_cb; -struct aio_lio_context { - int io_waiter; - int io_issued; - int io_completed; -}; -typedef struct aio_lio_context aio_lio_context; - - /* * Notes on aio sleep / wake channels. * We currently pick a couple fields within the proc structure that will allow * us sleep channels that currently do not collide with any other kernel routines. * At this time, for binary compatibility reasons, we cannot create new proc fields. */ -#define AIO_SUSPEND_SLEEP_CHAN p_aio_active_count +#define AIO_SUSPEND_SLEEP_CHAN p_aio_activeq #define AIO_CLEANUP_SLEEP_CHAN p_aio_total_count #define ASSERT_AIO_FROM_PROC(aiop, theproc) \ @@ -162,67 +225,43 @@ typedef struct aio_lio_context aio_lio_context; static void aio_proc_lock(proc_t procp); static void aio_proc_lock_spin(proc_t procp); static void aio_proc_unlock(proc_t procp); -static lck_mtx_t* aio_proc_mutex(proc_t procp); -static void aio_proc_move_done_locked(proc_t procp, aio_workq_entry *entryp); -static void aio_proc_remove_done_locked(proc_t procp, aio_workq_entry *entryp); -static int aio_get_process_count(proc_t procp ); -static int aio_active_requests_for_process(proc_t procp ); -static int aio_proc_active_requests_for_file(proc_t procp, int fd); -static boolean_t is_already_queued(proc_t procp, user_addr_t aiocbp ); -static boolean_t should_cancel(aio_workq_entry *entryp, user_addr_t aiocbp, int fd); - -static void aio_entry_lock(aio_workq_entry *entryp); -static void aio_entry_lock_spin(aio_workq_entry *entryp); +static lck_mtx_t *aio_proc_mutex(proc_t procp); +static bool aio_has_active_requests_for_process(proc_t procp); +static bool aio_proc_has_active_requests_for_file(proc_t procp, int fd); +static boolean_t is_already_queued(proc_t procp, user_addr_t aiocbp); + static aio_workq_t aio_entry_workq(aio_workq_entry *entryp); -static lck_mtx_t* aio_entry_mutex(__unused aio_workq_entry *entryp); static void aio_workq_remove_entry_locked(aio_workq_t queue, aio_workq_entry *entryp); static void aio_workq_add_entry_locked(aio_workq_t queue, aio_workq_entry *entryp); -static void aio_entry_ref_locked(aio_workq_entry *entryp); -static void aio_entry_unref_locked(aio_workq_entry *entryp); static void aio_entry_ref(aio_workq_entry *entryp); static void aio_entry_unref(aio_workq_entry *entryp); -static void aio_entry_update_for_cancel(aio_workq_entry *entryp, boolean_t cancelled, - int wait_for_completion, boolean_t disable_notification); -static int aio_entry_try_workq_remove(aio_workq_entry *entryp); -static boolean_t aio_delay_fsync_request( aio_workq_entry *entryp ); -static int aio_free_request(aio_workq_entry *entryp); +static bool aio_entry_try_workq_remove(aio_workq_entry *entryp); +static boolean_t aio_delay_fsync_request(aio_workq_entry *entryp); +static void aio_free_request(aio_workq_entry *entryp); static void aio_workq_init(aio_workq_t wq); static void aio_workq_lock_spin(aio_workq_t wq); static void aio_workq_unlock(aio_workq_t wq); -static lck_mtx_t* aio_workq_mutex(aio_workq_t wq); - -static void aio_work_thread( void ); -static aio_workq_entry *aio_get_some_work( void ); - -static int aio_get_all_queues_count( void ); -static int aio_queue_async_request(proc_t procp, user_addr_t aiocbp, int kindOfIO ); -static int aio_validate( aio_workq_entry *entryp ); -static int aio_increment_total_count(void); -static int aio_decrement_total_count(void); - -static int do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, int wait_for_completion, boolean_t disable_notification ); -static void do_aio_completion( aio_workq_entry *entryp ); -static int do_aio_fsync( aio_workq_entry *entryp ); -static int do_aio_read( aio_workq_entry *entryp ); -static int do_aio_write( aio_workq_entry *entryp ); -static void do_munge_aiocb_user32_to_user( struct user32_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ); -static void do_munge_aiocb_user64_to_user( struct user64_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ); -static int lio_create_entry(proc_t procp, - user_addr_t aiocbp, - void *group_tag, - aio_workq_entry **entrypp ); -static aio_workq_entry *aio_create_queue_entry(proc_t procp, - user_addr_t aiocbp, - void *group_tag, - int kindOfIO); -static user_addr_t *aio_copy_in_list(proc_t procp, user_addr_t aiocblist, int nent); -static void free_lio_context(aio_lio_context* context); -static void aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, int proc_locked); - -#define ASSERT_AIO_PROC_LOCK_OWNED(p) lck_mtx_assert(aio_proc_mutex((p)), LCK_MTX_ASSERT_OWNED) -#define ASSERT_AIO_WORKQ_LOCK_OWNED(q) lck_mtx_assert(aio_workq_mutex((q)), LCK_MTX_ASSERT_OWNED) -#define ASSERT_AIO_ENTRY_LOCK_OWNED(e) lck_mtx_assert(aio_entry_mutex((e)), LCK_MTX_ASSERT_OWNED) +static lck_spin_t *aio_workq_lock(aio_workq_t wq); + +static void aio_work_thread(void *arg, wait_result_t wr); +static aio_workq_entry *aio_get_some_work(void); + +static int aio_queue_async_request(proc_t procp, user_addr_t aiocbp, aio_entry_flags_t); +static int aio_validate(proc_t, aio_workq_entry *entryp); + +static int do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, aio_entry_flags_t); +static void do_aio_completion_and_unlock(proc_t p, aio_workq_entry *entryp); +static int do_aio_fsync(aio_workq_entry *entryp); +static int do_aio_read(aio_workq_entry *entryp); +static int do_aio_write(aio_workq_entry *entryp); +static void do_munge_aiocb_user32_to_user(struct user32_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp); +static void do_munge_aiocb_user64_to_user(struct user64_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp); +static aio_workq_entry *aio_create_queue_entry(proc_t procp, user_addr_t aiocbp, aio_entry_flags_t); +static int aio_copy_in_list(proc_t, user_addr_t, user_addr_t *, int); + +#define ASSERT_AIO_PROC_LOCK_OWNED(p) LCK_MTX_ASSERT(aio_proc_mutex(p), LCK_MTX_ASSERT_OWNED) +#define ASSERT_AIO_WORKQ_LOCK_OWNED(q) LCK_SPIN_ASSERT(aio_workq_lock(q), LCK_ASSERT_OWNED) /* * EXTERNAL PROTOTYPES @@ -231,13 +270,10 @@ static void aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, /* in ...bsd/kern/sys_generic.c */ extern int dofileread(vfs_context_t ctx, struct fileproc *fp, user_addr_t bufp, user_size_t nbyte, - off_t offset, int flags, user_ssize_t *retval ); + off_t offset, int flags, user_ssize_t *retval); extern int dofilewrite(vfs_context_t ctx, struct fileproc *fp, user_addr_t bufp, user_size_t nbyte, off_t offset, - int flags, user_ssize_t *retval ); -#if DEBUG -static uint32_t lio_contexts_alloced = 0; -#endif /* DEBUG */ + int flags, user_ssize_t *retval); /* * aio external global variables. @@ -250,33 +286,16 @@ extern int aio_worker_threads; /* AIO_THREAD_COUNT - configurab /* * aio static variables. */ -static aio_anchor_cb aio_anchor; -static lck_grp_t *aio_proc_lock_grp; -static lck_grp_t *aio_entry_lock_grp; -static lck_grp_t *aio_queue_lock_grp; -static lck_attr_t *aio_lock_attr; -static lck_grp_attr_t *aio_lock_grp_attr; -static struct zone *aio_workq_zonep; -static lck_mtx_t aio_entry_mtx; -static lck_mtx_t aio_proc_mtx; - -static void -aio_entry_lock(__unused aio_workq_entry *entryp) -{ - lck_mtx_lock(&aio_entry_mtx); -} - -static void -aio_entry_lock_spin(__unused aio_workq_entry *entryp) -{ - lck_mtx_lock_spin(&aio_entry_mtx); -} +static aio_anchor_cb aio_anchor = { + .aio_num_workqs = AIO_NUM_WORK_QUEUES, +}; +os_refgrp_decl(static, aio_refgrp, "aio", NULL); +static LCK_GRP_DECLARE(aio_proc_lock_grp, "aio_proc"); +static LCK_GRP_DECLARE(aio_queue_lock_grp, "aio_queue"); +static LCK_MTX_DECLARE(aio_proc_mtx, &aio_proc_lock_grp); -static void -aio_entry_unlock(__unused aio_workq_entry *entryp) -{ - lck_mtx_unlock(&aio_entry_mtx); -} +static ZONE_DECLARE(aio_workq_zonep, "aiowq", sizeof(aio_workq_entry), + ZC_ZFREE_CLEARMEM); /* Hash */ static aio_workq_t @@ -285,18 +304,11 @@ aio_entry_workq(__unused aio_workq_entry *entryp) return &aio_anchor.aio_async_workqs[0]; } -static lck_mtx_t* -aio_entry_mutex(__unused aio_workq_entry *entryp) -{ - return &aio_entry_mtx; -} - static void aio_workq_init(aio_workq_t wq) { TAILQ_INIT(&wq->aioq_entries); - wq->aioq_count = 0; - lck_mtx_init(&wq->aioq_mtx, aio_queue_lock_grp, aio_lock_attr); + lck_spin_init(&wq->aioq_lock, &aio_queue_lock_grp, LCK_ATTR_NULL); waitq_init(&wq->aioq_waitq, SYNC_POLICY_FIFO); } @@ -314,12 +326,7 @@ aio_workq_remove_entry_locked(aio_workq_t queue, aio_workq_entry *entryp) } TAILQ_REMOVE(&queue->aioq_entries, entryp, aio_workq_link); - queue->aioq_count--; entryp->aio_workq_link.tqe_prev = NULL; /* Not on a workq */ - - if (queue->aioq_count < 0) { - panic("Negative count on a queue.\n"); - } } static void @@ -328,10 +335,6 @@ aio_workq_add_entry_locked(aio_workq_t queue, aio_workq_entry *entryp) ASSERT_AIO_WORKQ_LOCK_OWNED(queue); TAILQ_INSERT_TAIL(&queue->aioq_entries, entryp, aio_workq_link); - if (queue->aioq_count < 0) { - panic("Negative count on a queue.\n"); - } - queue->aioq_count++; } static void @@ -346,24 +349,57 @@ aio_proc_lock_spin(proc_t procp) lck_mtx_lock_spin(aio_proc_mutex(procp)); } -static void -aio_proc_move_done_locked(proc_t procp, aio_workq_entry *entryp) +static bool +aio_has_any_work(void) +{ + return os_atomic_load(&aio_anchor.aio_total_count, relaxed) != 0; +} + +static bool +aio_try_proc_insert_active_locked(proc_t procp, aio_workq_entry *entryp) { + int old, new; + ASSERT_AIO_PROC_LOCK_OWNED(procp); - TAILQ_REMOVE(&procp->p_aio_activeq, entryp, aio_proc_link ); - TAILQ_INSERT_TAIL( &procp->p_aio_doneq, entryp, aio_proc_link); - procp->p_aio_active_count--; - OSIncrementAtomic(&aio_anchor.aio_done_count); + if (procp->p_aio_total_count >= aio_max_requests_per_process) { + return false; + } + + if (is_already_queued(procp, entryp->uaiocbp)) { + return false; + } + + os_atomic_rmw_loop(&aio_anchor.aio_total_count, old, new, relaxed, { + if (old >= aio_max_requests) { + os_atomic_rmw_loop_give_up(return false); + } + new = old + 1; + }); + + TAILQ_INSERT_TAIL(&procp->p_aio_activeq, entryp, aio_proc_link); + procp->p_aio_total_count++; + return true; +} + +static void +aio_proc_move_done_locked(proc_t procp, aio_workq_entry *entryp) +{ + TAILQ_REMOVE(&procp->p_aio_activeq, entryp, aio_proc_link); + TAILQ_INSERT_TAIL(&procp->p_aio_doneq, entryp, aio_proc_link); } static void aio_proc_remove_done_locked(proc_t procp, aio_workq_entry *entryp) { TAILQ_REMOVE(&procp->p_aio_doneq, entryp, aio_proc_link); - OSDecrementAtomic(&aio_anchor.aio_done_count); - aio_decrement_total_count(); - procp->p_aio_total_count--; + entryp->aio_proc_link.tqe_prev = NULL; + if (os_atomic_dec_orig(&aio_anchor.aio_total_count, relaxed) <= 0) { + panic("Negative total AIO count!\n"); + } + if (procp->p_aio_total_count-- <= 0) { + panic("proc %p: p_aio_total_count accounting mismatch", procp); + } } static void @@ -378,76 +414,21 @@ aio_proc_mutex(proc_t procp) return &procp->p_mlock; } -static void -aio_entry_ref_locked(aio_workq_entry *entryp) -{ - ASSERT_AIO_ENTRY_LOCK_OWNED(entryp); - - if (entryp->aio_refcount < 0) { - panic("AIO workq entry with a negative refcount.\n"); - } - entryp->aio_refcount++; -} - - -/* Return 1 if you've freed it */ -static void -aio_entry_unref_locked(aio_workq_entry *entryp) -{ - ASSERT_AIO_ENTRY_LOCK_OWNED(entryp); - - entryp->aio_refcount--; - if (entryp->aio_refcount < 0) { - panic("AIO workq entry with a negative refcount.\n"); - } -} - static void aio_entry_ref(aio_workq_entry *entryp) { - aio_entry_lock_spin(entryp); - aio_entry_ref_locked(entryp); - aio_entry_unlock(entryp); + os_ref_retain(&entryp->aio_refcount); } + static void aio_entry_unref(aio_workq_entry *entryp) { - aio_entry_lock_spin(entryp); - aio_entry_unref_locked(entryp); - - if ((entryp->aio_refcount == 0) && ((entryp->flags & AIO_DO_FREE) != 0)) { - aio_entry_unlock(entryp); + if (os_ref_release(&entryp->aio_refcount) == 0) { aio_free_request(entryp); - } else { - aio_entry_unlock(entryp); - } - - return; -} - -static void -aio_entry_update_for_cancel(aio_workq_entry *entryp, boolean_t cancelled, int wait_for_completion, boolean_t disable_notification) -{ - aio_entry_lock_spin(entryp); - - if (cancelled) { - aio_entry_ref_locked(entryp); - entryp->errorval = ECANCELED; - entryp->returnval = -1; - } - - if (wait_for_completion) { - entryp->flags |= wait_for_completion; /* flag for special completion processing */ - } - - if (disable_notification) { - entryp->flags |= AIO_DISABLE; /* Don't want a signal */ } - - aio_entry_unlock(entryp); } -static int +static bool aio_entry_try_workq_remove(aio_workq_entry *entryp) { /* Can only be cancelled if it's still on a work queue */ @@ -460,31 +441,31 @@ aio_entry_try_workq_remove(aio_workq_entry *entryp) if (entryp->aio_workq_link.tqe_prev != NULL) { aio_workq_remove_entry_locked(queue, entryp); aio_workq_unlock(queue); - return 1; + return true; } else { aio_workq_unlock(queue); } } - return 0; + return false; } static void aio_workq_lock_spin(aio_workq_t wq) { - lck_mtx_lock_spin(aio_workq_mutex(wq)); + lck_spin_lock(aio_workq_lock(wq)); } static void aio_workq_unlock(aio_workq_t wq) { - lck_mtx_unlock(aio_workq_mutex(wq)); + lck_spin_unlock(aio_workq_lock(wq)); } -static lck_mtx_t* -aio_workq_mutex(aio_workq_t wq) +static lck_spin_t* +aio_workq_lock(aio_workq_t wq) { - return &wq->aioq_mtx; + return &wq->aioq_lock; } /* @@ -495,16 +476,16 @@ aio_workq_mutex(aio_workq_t wq) * descriptor are cancelled (if possible). */ int -aio_cancel(proc_t p, struct aio_cancel_args *uap, int *retval ) +aio_cancel(proc_t p, struct aio_cancel_args *uap, int *retval) { struct user_aiocb my_aiocb; int result; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel) | DBG_FUNC_START, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, 0, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, 0, 0, 0); /* quick check to see if there are any async IO requests queued up */ - if (aio_get_all_queues_count() < 1) { + if (!aio_has_any_work()) { result = 0; *retval = AIO_ALLDONE; goto ExitRoutine; @@ -515,16 +496,16 @@ aio_cancel(proc_t p, struct aio_cancel_args *uap, int *retval ) if (proc_is64bit(p)) { struct user64_aiocb aiocb64; - result = copyin( uap->aiocbp, &aiocb64, sizeof(aiocb64)); + result = copyin(uap->aiocbp, &aiocb64, sizeof(aiocb64)); if (result == 0) { do_munge_aiocb_user64_to_user(&aiocb64, &my_aiocb); } } else { struct user32_aiocb aiocb32; - result = copyin( uap->aiocbp, &aiocb32, sizeof(aiocb32)); + result = copyin(uap->aiocbp, &aiocb32, sizeof(aiocb32)); if (result == 0) { - do_munge_aiocb_user32_to_user( &aiocb32, &my_aiocb ); + do_munge_aiocb_user32_to_user(&aiocb32, &my_aiocb); } } @@ -544,7 +525,7 @@ aio_cancel(proc_t p, struct aio_cancel_args *uap, int *retval ) } aio_proc_lock(p); - result = do_aio_cancel_locked( p, uap->fd, uap->aiocbp, 0, FALSE ); + result = do_aio_cancel_locked(p, uap->fd, uap->aiocbp, 0); ASSERT_AIO_PROC_LOCK_OWNED(p); aio_proc_unlock(p); @@ -558,10 +539,10 @@ aio_cancel(proc_t p, struct aio_cancel_args *uap, int *retval ) ExitRoutine: KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel) | DBG_FUNC_END, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, result, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, result, 0, 0); return result; -} /* aio_cancel */ +} /* @@ -570,12 +551,12 @@ ExitRoutine: * THIS MAY BLOCK. */ __private_extern__ void -_aio_close(proc_t p, int fd ) +_aio_close(proc_t p, int fd) { int error; /* quick check to see if there are any async IO requests queued up */ - if (aio_get_all_queues_count() < 1) { + if (!aio_has_any_work()) { return; } @@ -584,7 +565,7 @@ _aio_close(proc_t p, int fd ) /* cancel all async IO requests on our todo queues for this file descriptor */ aio_proc_lock(p); - error = do_aio_cancel_locked( p, fd, 0, AIO_CLOSE_WAIT, FALSE ); + error = do_aio_cancel_locked(p, fd, USER_ADDR_NULL, AIO_CLOSE_WAIT); ASSERT_AIO_PROC_LOCK_OWNED(p); if (error == AIO_NOTCANCELED) { /* @@ -599,8 +580,8 @@ _aio_close(proc_t p, int fd ) KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_close_sleep) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM(p), fd, 0, 0, 0); - while (aio_proc_active_requests_for_file(p, fd) > 0) { - msleep(&p->AIO_CLEANUP_SLEEP_CHAN, aio_proc_mutex(p), PRIBIO, "aio_close", 0 ); + while (aio_proc_has_active_requests_for_file(p, fd)) { + msleep(&p->AIO_CLEANUP_SLEEP_CHAN, aio_proc_mutex(p), PRIBIO, "aio_close", 0); } } @@ -608,9 +589,7 @@ _aio_close(proc_t p, int fd ) KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_close) | DBG_FUNC_END, VM_KERNEL_ADDRPERM(p), fd, 0, 0, 0); - - return; -} /* _aio_close */ +} /* @@ -620,44 +599,43 @@ _aio_close(proc_t p, int fd ) * fdatasync, or sync). */ int -aio_error(proc_t p, struct aio_error_args *uap, int *retval ) +aio_error(proc_t p, struct aio_error_args *uap, int *retval) { aio_workq_entry *entryp; int error; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_error) | DBG_FUNC_START, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, 0, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, 0, 0, 0); /* see if there are any aios to check */ - if (aio_get_all_queues_count() < 1) { + if (!aio_has_any_work()) { return EINVAL; } aio_proc_lock(p); /* look for a match on our queue of async IO requests that have completed */ - TAILQ_FOREACH( entryp, &p->p_aio_doneq, aio_proc_link) { + TAILQ_FOREACH(entryp, &p->p_aio_doneq, aio_proc_link) { if (entryp->uaiocbp == uap->aiocbp) { ASSERT_AIO_FROM_PROC(entryp, p); - aio_entry_lock_spin(entryp); *retval = entryp->errorval; error = 0; - aio_entry_unlock(entryp); + KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_error_val) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, *retval, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, *retval, 0, 0); goto ExitRoutine; } } /* look for a match on our queue of active async IO requests */ - TAILQ_FOREACH( entryp, &p->p_aio_activeq, aio_proc_link) { + TAILQ_FOREACH(entryp, &p->p_aio_activeq, aio_proc_link) { if (entryp->uaiocbp == uap->aiocbp) { ASSERT_AIO_FROM_PROC(entryp, p); *retval = EINPROGRESS; error = 0; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_error_activeq) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, *retval, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, *retval, 0, 0); goto ExitRoutine; } } @@ -666,11 +644,11 @@ aio_error(proc_t p, struct aio_error_args *uap, int *retval ) ExitRoutine: KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_error) | DBG_FUNC_END, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, error, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, error, 0, 0); aio_proc_unlock(p); return error; -} /* aio_error */ +} /* @@ -681,13 +659,13 @@ ExitRoutine: * fdatasync() call. */ int -aio_fsync(proc_t p, struct aio_fsync_args *uap, int *retval ) +aio_fsync(proc_t p, struct aio_fsync_args *uap, int *retval) { + aio_entry_flags_t fsync_kind; int error; - int fsync_kind; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync) | DBG_FUNC_START, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, uap->op, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, uap->op, 0, 0); *retval = 0; /* 0 := O_SYNC for binary backward compatibility with Panther */ @@ -701,17 +679,17 @@ aio_fsync(proc_t p, struct aio_fsync_args *uap, int *retval ) goto ExitRoutine; } - error = aio_queue_async_request( p, uap->aiocbp, fsync_kind ); + error = aio_queue_async_request(p, uap->aiocbp, fsync_kind); if (error != 0) { *retval = -1; } ExitRoutine: KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync) | DBG_FUNC_END, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, error, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, error, 0, 0); return error; -} /* aio_fsync */ +} /* aio_read - asynchronously read uap->aiocbp->aio_nbytes bytes from the @@ -719,25 +697,25 @@ ExitRoutine: * (uap->aiocbp->aio_buf). */ int -aio_read(proc_t p, struct aio_read_args *uap, int *retval ) +aio_read(proc_t p, struct aio_read_args *uap, int *retval) { int error; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_read) | DBG_FUNC_START, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, 0, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, 0, 0, 0); *retval = 0; - error = aio_queue_async_request( p, uap->aiocbp, AIO_READ ); + error = aio_queue_async_request(p, uap->aiocbp, AIO_READ); if (error != 0) { *retval = -1; } KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_read) | DBG_FUNC_END, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, error, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, error, 0, 0); return error; -} /* aio_read */ +} /* @@ -748,79 +726,60 @@ aio_read(proc_t p, struct aio_read_args *uap, int *retval ) * held for async IO call associated with the given aiocb pointer. */ int -aio_return(proc_t p, struct aio_return_args *uap, user_ssize_t *retval ) +aio_return(proc_t p, struct aio_return_args *uap, user_ssize_t *retval) { aio_workq_entry *entryp; - int error; - boolean_t proc_lock_held = FALSE; + int error = EINVAL; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_return) | DBG_FUNC_START, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, 0, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, 0, 0, 0); /* See if there are any entries to check */ - if (aio_get_all_queues_count() < 1) { - error = EINVAL; + if (!aio_has_any_work()) { goto ExitRoutine; } aio_proc_lock(p); - proc_lock_held = TRUE; *retval = 0; /* look for a match on our queue of async IO requests that have completed */ - TAILQ_FOREACH( entryp, &p->p_aio_doneq, aio_proc_link) { + TAILQ_FOREACH(entryp, &p->p_aio_doneq, aio_proc_link) { ASSERT_AIO_FROM_PROC(entryp, p); if (entryp->uaiocbp == uap->aiocbp) { /* Done and valid for aio_return(), pull it off the list */ aio_proc_remove_done_locked(p, entryp); - /* Drop the proc lock, but keep the entry locked */ - aio_entry_lock(entryp); - aio_proc_unlock(p); - proc_lock_held = FALSE; - *retval = entryp->returnval; error = 0; + aio_proc_unlock(p); - /* No references and off all lists, safe to free */ - if (entryp->aio_refcount == 0) { - aio_entry_unlock(entryp); - aio_free_request(entryp); - } else { - /* Whoever has the refcount will have to free it */ - entryp->flags |= AIO_DO_FREE; - aio_entry_unlock(entryp); - } - + aio_entry_unref(entryp); KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_return_val) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, *retval, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, *retval, 0, 0); goto ExitRoutine; } } /* look for a match on our queue of active async IO requests */ - TAILQ_FOREACH( entryp, &p->p_aio_activeq, aio_proc_link) { + TAILQ_FOREACH(entryp, &p->p_aio_activeq, aio_proc_link) { ASSERT_AIO_FROM_PROC(entryp, p); if (entryp->uaiocbp == uap->aiocbp) { error = EINPROGRESS; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_return_activeq) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, *retval, 0, 0); - goto ExitRoutine; + VM_KERNEL_ADDRPERM(p), uap->aiocbp, *retval, 0, 0); + break; } } - error = EINVAL; + aio_proc_unlock(p); ExitRoutine: - if (proc_lock_held) { - aio_proc_unlock(p); - } KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_return) | DBG_FUNC_END, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, error, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, error, 0, 0); return error; -} /* aio_return */ +} /* @@ -831,33 +790,33 @@ ExitRoutine: * This routine MAY block! */ __private_extern__ void -_aio_exec(proc_t p ) +_aio_exec(proc_t p) { KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_exec) | DBG_FUNC_START, VM_KERNEL_ADDRPERM(p), 0, 0, 0, 0); - _aio_exit( p ); + _aio_exit(p); KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_exec) | DBG_FUNC_END, VM_KERNEL_ADDRPERM(p), 0, 0, 0, 0); -} /* _aio_exec */ +} /* * _aio_exit - internal function used to clean up async IO requests for - * a process that is terminating (via exit() or exec() ). We cancel any async IOs + * a process that is terminating (via exit() or exec()). We cancel any async IOs * we can and wait for those already active. We also disable signaling * for cancelled or active aio requests that complete. This routine MAY block! */ __private_extern__ void -_aio_exit(proc_t p ) +_aio_exit(proc_t p) { + TAILQ_HEAD(, aio_workq_entry) tofree = TAILQ_HEAD_INITIALIZER(tofree); + aio_workq_entry *entryp, *tmp; int error; - aio_workq_entry *entryp; - /* quick check to see if there are any async IO requests queued up */ - if (aio_get_all_queues_count() < 1) { + if (!aio_has_any_work()) { return; } @@ -870,7 +829,7 @@ _aio_exit(proc_t p ) * cancel async IO requests on the todo work queue and wait for those * already active to complete. */ - error = do_aio_cancel_locked( p, 0, 0, AIO_EXIT_WAIT, TRUE ); + error = do_aio_cancel_locked(p, -1, USER_ADDR_NULL, AIO_EXIT_WAIT); ASSERT_AIO_PROC_LOCK_OWNED(p); if (error == AIO_NOTCANCELED) { /* @@ -885,62 +844,51 @@ _aio_exit(proc_t p ) KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_exit_sleep) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM(p), 0, 0, 0, 0); - while (p->p_aio_active_count != 0) { - msleep(&p->AIO_CLEANUP_SLEEP_CHAN, aio_proc_mutex(p), PRIBIO, "aio_exit", 0 ); + while (aio_has_active_requests_for_process(p)) { + msleep(&p->AIO_CLEANUP_SLEEP_CHAN, aio_proc_mutex(p), PRIBIO, "aio_exit", 0); } } - if (p->p_aio_active_count != 0) { - panic("Exiting process has %d active AIOs after cancellation has completed.\n", p->p_aio_active_count); - } + assert(!aio_has_active_requests_for_process(p)); /* release all aio resources used by this process */ - entryp = TAILQ_FIRST( &p->p_aio_doneq ); - while (entryp != NULL) { + TAILQ_FOREACH_SAFE(entryp, &p->p_aio_doneq, aio_proc_link, tmp) { ASSERT_AIO_FROM_PROC(entryp, p); - aio_workq_entry *next_entryp; - next_entryp = TAILQ_NEXT( entryp, aio_proc_link); aio_proc_remove_done_locked(p, entryp); - - /* we cannot free requests that are still completing */ - aio_entry_lock_spin(entryp); - if (entryp->aio_refcount == 0) { - aio_proc_unlock(p); - aio_entry_unlock(entryp); - aio_free_request(entryp); - - /* need to start over since aio_doneq may have been */ - /* changed while we were away. */ - aio_proc_lock(p); - entryp = TAILQ_FIRST( &p->p_aio_doneq ); - continue; - } else { - /* whoever has the reference will have to do the free */ - entryp->flags |= AIO_DO_FREE; - } - - aio_entry_unlock(entryp); - entryp = next_entryp; + TAILQ_INSERT_TAIL(&tofree, entryp, aio_proc_link); } aio_proc_unlock(p); + /* free all the entries outside of the aio_proc_lock() */ + TAILQ_FOREACH_SAFE(entryp, &tofree, aio_proc_link, tmp) { + entryp->aio_proc_link.tqe_prev = NULL; + aio_entry_unref(entryp); + } + KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_exit) | DBG_FUNC_END, VM_KERNEL_ADDRPERM(p), 0, 0, 0, 0); -} /* _aio_exit */ +} -static boolean_t -should_cancel(aio_workq_entry *entryp, user_addr_t aiocbp, int fd) +static bool +should_cancel(aio_workq_entry *entryp, int fd, user_addr_t aiocbp, + aio_entry_flags_t reason) { - if ((aiocbp == USER_ADDR_NULL && fd == 0) || - (aiocbp != USER_ADDR_NULL && entryp->uaiocbp == aiocbp) || - (aiocbp == USER_ADDR_NULL && fd == entryp->aiocb.aio_fildes)) { - return TRUE; + if (reason & AIO_EXIT_WAIT) { + /* caller is _aio_exit() */ + return true; } - - return FALSE; + if (fd != entryp->aiocb.aio_fildes) { + /* not the file we're looking for */ + return false; + } + /* + * aio_cancel() or _aio_close() cancel + * everything for a given fd when aiocbp is NULL + */ + return aiocbp == USER_ADDR_NULL || entryp->uaiocbp == aiocbp; } /* @@ -955,89 +903,81 @@ should_cancel(aio_workq_entry *entryp, user_addr_t aiocbp, int fd) * target async IO requests, and AIO_ALLDONE if all target async IO requests * were already complete. * WARNING - do not deference aiocbp in this routine, it may point to user - * land data that has not been copied in (when called from aio_cancel() ) + * land data that has not been copied in (when called from aio_cancel()) * * Called with proc locked, and returns the same way. */ static int do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, - int wait_for_completion, boolean_t disable_notification ) + aio_entry_flags_t reason) { - ASSERT_AIO_PROC_LOCK_OWNED(p); - - aio_workq_entry *entryp; - int result; + bool multiple_matches = (aiocbp == USER_ADDR_NULL); + aio_workq_entry *entryp, *tmp; + int result; - result = -1; + ASSERT_AIO_PROC_LOCK_OWNED(p); /* look for a match on our queue of async todo work. */ - entryp = TAILQ_FIRST(&p->p_aio_activeq); - while (entryp != NULL) { +again: + result = -1; + TAILQ_FOREACH_SAFE(entryp, &p->p_aio_activeq, aio_proc_link, tmp) { ASSERT_AIO_FROM_PROC(entryp, p); - aio_workq_entry *next_entryp; - next_entryp = TAILQ_NEXT( entryp, aio_proc_link); - if (!should_cancel(entryp, aiocbp, fd)) { - entryp = next_entryp; + if (!should_cancel(entryp, fd, aiocbp, reason)) { continue; } - /* Can only be cancelled if it's still on a work queue */ - if (aio_entry_try_workq_remove(entryp) != 0) { - /* Have removed from workq. Update entry state and take a ref */ - aio_entry_update_for_cancel(entryp, TRUE, 0, disable_notification); + if (reason) { + /* mark the entry as blocking close or exit/exec */ + entryp->flags |= reason; + if ((entryp->flags & AIO_EXIT_WAIT) && (entryp->flags & AIO_CLOSE_WAIT)) { + panic("Close and exit flags set at the same time\n"); + } + } - /* Put on the proc done queue and update counts, then unlock the proc */ - aio_proc_move_done_locked(p, entryp); - aio_proc_unlock(p); + /* Can only be cancelled if it's still on a work queue */ + if (aio_entry_try_workq_remove(entryp)) { + entryp->errorval = ECANCELED; + entryp->returnval = -1; /* Now it's officially cancelled. Do the completion */ - result = AIO_CANCELED; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_async_workq) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), fd, 0, 0); - do_aio_completion(entryp); + do_aio_completion_and_unlock(p, entryp); - /* This will free if the aio_return() has already happened ... */ - aio_entry_unref(entryp); aio_proc_lock(p); - if (aiocbp != USER_ADDR_NULL) { - return result; + if (multiple_matches) { + /* + * Restart from the head of the proc active queue since it + * may have been changed while we were away doing completion + * processing. + * + * Note that if we found an uncancellable AIO before, we will + * either find it again or discover that it's been completed, + * so resetting the result will not cause us to return success + * despite outstanding AIOs. + */ + goto again; } - /* - * Restart from the head of the proc active queue since it - * may have been changed while we were away doing completion - * processing. - * - * Note that if we found an uncancellable AIO before, we will - * either find it again or discover that it's been completed, - * so resetting the result will not cause us to return success - * despite outstanding AIOs. - */ - entryp = TAILQ_FIRST(&p->p_aio_activeq); - result = -1; /* As if beginning anew */ - } else { - /* - * It's been taken off the active queue already, i.e. is in flight. - * All we can do is ask for notification. - */ - result = AIO_NOTCANCELED; - - KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_activeq) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), - fd, 0, 0); + return AIO_CANCELED; + } - /* Mark for waiting and such; will not take a ref if "cancelled" arg is FALSE */ - aio_entry_update_for_cancel(entryp, FALSE, wait_for_completion, disable_notification); + /* + * It's been taken off the active queue already, i.e. is in flight. + * All we can do is ask for notification. + */ + KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_activeq) | DBG_FUNC_NONE, + VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), + fd, 0, 0); - if (aiocbp != USER_ADDR_NULL) { - return result; - } - entryp = next_entryp; + result = AIO_NOTCANCELED; + if (!multiple_matches) { + return result; } - } /* while... */ + } /* * if we didn't find any matches on the todo or active queues then look for a @@ -1049,13 +989,13 @@ do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, if (result == -1) { TAILQ_FOREACH(entryp, &p->p_aio_doneq, aio_proc_link) { ASSERT_AIO_FROM_PROC(entryp, p); - if (should_cancel(entryp, aiocbp, fd)) { - result = AIO_ALLDONE; + if (should_cancel(entryp, fd, aiocbp, reason)) { KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_doneq) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), fd, 0, 0); - if (aiocbp != USER_ADDR_NULL) { + result = AIO_ALLDONE; + if (!multiple_matches) { return result; } } @@ -1064,7 +1004,6 @@ do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, return result; } -/* do_aio_cancel_locked */ /* @@ -1077,7 +1016,7 @@ do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, * woke us up. */ int -aio_suspend(proc_t p, struct aio_suspend_args *uap, int *retval ) +aio_suspend(proc_t p, struct aio_suspend_args *uap, int *retval) { __pthread_testcancel(1); return aio_suspend_nocancel(p, (struct aio_suspend_nocancel_args *)uap, retval); @@ -1085,14 +1024,15 @@ aio_suspend(proc_t p, struct aio_suspend_args *uap, int *retval ) int -aio_suspend_nocancel(proc_t p, struct aio_suspend_nocancel_args *uap, int *retval ) +aio_suspend_nocancel(proc_t p, struct aio_suspend_nocancel_args *uap, int *retval) { int error; - int i, count; + int i; uint64_t abstime; struct user_timespec ts; aio_workq_entry *entryp; user_addr_t *aiocbpp; + size_t aiocbpp_size; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend) | DBG_FUNC_START, VM_KERNEL_ADDRPERM(p), uap->nent, 0, 0, 0); @@ -1101,13 +1041,13 @@ aio_suspend_nocancel(proc_t p, struct aio_suspend_nocancel_args *uap, int *retva abstime = 0; aiocbpp = NULL; - count = aio_get_all_queues_count(); - if (count < 1) { + if (!aio_has_any_work()) { error = EINVAL; goto ExitThisRoutine; } - if (uap->nent < 1 || uap->nent > aio_max_requests_per_process) { + if (uap->nent < 1 || uap->nent > aio_max_requests_per_process || + os_mul_overflow(sizeof(user_addr_t), uap->nent, &aiocbpp_size)) { error = EINVAL; goto ExitThisRoutine; } @@ -1115,14 +1055,14 @@ aio_suspend_nocancel(proc_t p, struct aio_suspend_nocancel_args *uap, int *retva if (uap->timeoutp != USER_ADDR_NULL) { if (proc_is64bit(p)) { struct user64_timespec temp; - error = copyin( uap->timeoutp, &temp, sizeof(temp)); + error = copyin(uap->timeoutp, &temp, sizeof(temp)); if (error == 0) { - ts.tv_sec = temp.tv_sec; - ts.tv_nsec = temp.tv_nsec; + ts.tv_sec = (user_time_t)temp.tv_sec; + ts.tv_nsec = (user_long_t)temp.tv_nsec; } } else { struct user32_timespec temp; - error = copyin( uap->timeoutp, &temp, sizeof(temp)); + error = copyin(uap->timeoutp, &temp, sizeof(temp)); if (error == 0) { ts.tv_sec = temp.tv_sec; ts.tv_nsec = temp.tv_nsec; @@ -1139,12 +1079,12 @@ aio_suspend_nocancel(proc_t p, struct aio_suspend_nocancel_args *uap, int *retva } nanoseconds_to_absolutetime((uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec, - &abstime ); - clock_absolutetime_interval_to_deadline( abstime, &abstime ); + &abstime); + clock_absolutetime_interval_to_deadline(abstime, &abstime); } - aiocbpp = aio_copy_in_list(p, uap->aiocblist, uap->nent); - if (aiocbpp == NULL) { + aiocbpp = kheap_alloc(KHEAP_TEMP, aiocbpp_size, Z_WAITOK); + if (aiocbpp == NULL || aio_copy_in_list(p, uap->aiocblist, aiocbpp, uap->nent)) { error = EAGAIN; goto ExitThisRoutine; } @@ -1162,7 +1102,7 @@ check_for_our_aiocbp: } /* return immediately if any aio request in the list is done */ - TAILQ_FOREACH( entryp, &p->p_aio_doneq, aio_proc_link) { + TAILQ_FOREACH(entryp, &p->p_aio_doneq, aio_proc_link) { ASSERT_AIO_FROM_PROC(entryp, p); if (entryp->uaiocbp == aiocbp) { aio_proc_unlock(p); @@ -1171,7 +1111,7 @@ check_for_our_aiocbp: goto ExitThisRoutine; } } - } /* for ( ; i < uap->nent; ) */ + } KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend_sleep) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM(p), uap->nent, 0, 0, 0); @@ -1183,7 +1123,8 @@ check_for_our_aiocbp: * timeout expires, we get a wakeup call from aio_work_thread(). */ - error = msleep1(&p->AIO_SUSPEND_SLEEP_CHAN, aio_proc_mutex(p), PCATCH | PWAIT | PDROP, "aio_suspend", abstime); /* XXX better priority? */ + error = msleep1(&p->AIO_SUSPEND_SLEEP_CHAN, aio_proc_mutex(p), + PCATCH | PWAIT | PDROP, "aio_suspend", abstime); if (error == 0) { /* * got our wakeup call from aio_work_thread(). @@ -1204,14 +1145,14 @@ check_for_our_aiocbp: ExitThisRoutine: if (aiocbpp != NULL) { - FREE( aiocbpp, M_TEMP ); + kheap_free(KHEAP_TEMP, aiocbpp, aiocbpp_size); } KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend) | DBG_FUNC_END, VM_KERNEL_ADDRPERM(p), uap->nent, error, 0, 0); return error; -} /* aio_suspend */ +} /* aio_write - asynchronously write uap->aiocbp->aio_nbytes bytes to the @@ -1225,37 +1166,29 @@ aio_write(proc_t p, struct aio_write_args *uap, int *retval __unused) int error; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_write) | DBG_FUNC_START, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, 0, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, 0, 0, 0); - error = aio_queue_async_request( p, uap->aiocbp, AIO_WRITE ); + error = aio_queue_async_request(p, uap->aiocbp, AIO_WRITE); KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_write) | DBG_FUNC_END, - VM_KERNEL_ADDRPERM(p), uap->uaiocbp, error, 0, 0); + VM_KERNEL_ADDRPERM(p), uap->aiocbp, error, 0, 0); return error; -} /* aio_write */ +} -static user_addr_t * -aio_copy_in_list(proc_t procp, user_addr_t aiocblist, int nent) +static int +aio_copy_in_list(proc_t procp, user_addr_t aiocblist, user_addr_t *aiocbpp, + int nent) { - user_addr_t *aiocbpp; - int i, result; - - /* we reserve enough space for largest possible pointer size */ - MALLOC( aiocbpp, user_addr_t *, (nent * sizeof(user_addr_t)), M_TEMP, M_WAITOK ); - if (aiocbpp == NULL) { - goto err; - } + int result; /* copyin our aiocb pointers from list */ - result = copyin( aiocblist, aiocbpp, + result = copyin(aiocblist, aiocbpp, proc_is64bit(procp) ? (nent * sizeof(user64_addr_t)) : (nent * sizeof(user32_addr_t))); if (result) { - FREE( aiocbpp, M_TEMP ); - aiocbpp = NULL; - goto err; + return result; } /* @@ -1268,13 +1201,12 @@ aio_copy_in_list(proc_t procp, user_addr_t aiocblist, int nent) user32_addr_t *my_ptrp = ((user32_addr_t *)aiocbpp) + (nent - 1); user_addr_t *my_addrp = aiocbpp + (nent - 1); - for (i = 0; i < nent; i++, my_ptrp--, my_addrp--) { + for (int i = 0; i < nent; i++, my_ptrp--, my_addrp--) { *my_addrp = (user_addr_t) (*my_ptrp); } } -err: - return aiocbpp; + return 0; } @@ -1298,9 +1230,10 @@ aio_copy_in_sigev(proc_t procp, user_addr_t sigp, struct user_sigevent *sigev) * sigev_value yet in the aio context. */ if (proc_is64bit(procp)) { +#if __LP64__ struct user64_sigevent sigevent64; - result = copyin( sigp, &sigevent64, sizeof(sigevent64)); + result = copyin(sigp, &sigevent64, sizeof(sigevent64)); if (result == 0) { sigev->sigev_notify = sigevent64.sigev_notify; sigev->sigev_signo = sigevent64.sigev_signo; @@ -1308,10 +1241,13 @@ aio_copy_in_sigev(proc_t procp, user_addr_t sigp, struct user_sigevent *sigev) sigev->sigev_notify_function = sigevent64.sigev_notify_function; sigev->sigev_notify_attributes = sigevent64.sigev_notify_attributes; } +#else + panic("64bit process on 32bit kernel is not supported"); +#endif } else { struct user32_sigevent sigevent32; - result = copyin( sigp, &sigevent32, sizeof(sigevent32)); + result = copyin(sigp, &sigevent32, sizeof(sigevent32)); if (result == 0) { sigev->sigev_notify = sigevent32.sigev_notify; sigev->sigev_signo = sigevent32.sigev_signo; @@ -1337,7 +1273,7 @@ out: * with no [RTS] (RalTime Signal) option group support. */ static int -aio_sigev_validate( const struct user_sigevent *sigev ) +aio_sigev_validate(const struct user_sigevent *sigev) { switch (sigev->sigev_notify) { case SIGEV_SIGNAL: @@ -1368,7 +1304,7 @@ aio_sigev_validate( const struct user_sigevent *sigev ) /* - * aio_enqueue_work + * aio_try_enqueue_work_locked * * Queue up the entry on the aio asynchronous work queue in priority order * based on the relative priority of the request. We calculate the relative @@ -1376,8 +1312,9 @@ aio_sigev_validate( const struct user_sigevent *sigev ) * * Parameters: procp Process queueing the I/O * entryp The work queue entry being queued + * leader The work leader if any * - * Returns: (void) No failure modes + * Returns: Wether the enqueue was successful * * Notes: This function is used for both lio_listio and aio * @@ -1385,88 +1322,41 @@ aio_sigev_validate( const struct user_sigevent *sigev ) * rather than process priority, but we don't maintain the * adjusted priority for threads the POSIX way. * - * * Called with proc locked. */ -static void -aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, int proc_locked) +static bool +aio_try_enqueue_work_locked(proc_t procp, aio_workq_entry *entryp, + aio_workq_entry *leader) { -#if 0 - aio_workq_entry *my_entryp; /* used for insertion sort */ -#endif /* 0 */ aio_workq_t queue = aio_entry_workq(entryp); - if (proc_locked == 0) { - aio_proc_lock(procp); - } - ASSERT_AIO_PROC_LOCK_OWNED(procp); /* Onto proc queue */ - TAILQ_INSERT_TAIL(&procp->p_aio_activeq, entryp, aio_proc_link); - procp->p_aio_active_count++; - procp->p_aio_total_count++; + if (!aio_try_proc_insert_active_locked(procp, entryp)) { + return false; + } + + if (leader) { + aio_entry_ref(leader); /* consumed in do_aio_completion_and_unlock */ + leader->lio_pending++; + entryp->lio_leader = leader; + } /* And work queue */ + aio_entry_ref(entryp); /* consumed in do_aio_completion_and_unlock */ aio_workq_lock_spin(queue); aio_workq_add_entry_locked(queue, entryp); waitq_wakeup64_one(&queue->aioq_waitq, CAST_EVENT64_T(queue), THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); aio_workq_unlock(queue); - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued) | DBG_FUNC_START, VM_KERNEL_ADDRPERM(procp), VM_KERNEL_ADDRPERM(entryp->uaiocbp), - entryp->flags, entryp->aiocb.aio_fildes, 0 ); + entryp->flags, entryp->aiocb.aio_fildes, 0); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued) | DBG_FUNC_END, entryp->aiocb.aio_offset, 0, entryp->aiocb.aio_nbytes, 0, 0); - - if (proc_locked == 0) { - aio_proc_unlock(procp); - } - -#if 0 - /* - * Procedure: - * - * (1) The nice value is in the range PRIO_MIN..PRIO_MAX [-20..20] - * (2) The normalized nice value is in the range 0..((2 * NZERO) - 1) - * which is [0..39], with 0 not being used. In nice values, the - * lower the nice value, the higher the priority. - * (3) The normalized scheduling prioritiy is the highest nice value - * minus the current nice value. In I/O scheduling priority, the - * higher the value the lower the priority, so it is the inverse - * of the nice value (the higher the number, the higher the I/O - * priority). - * (4) From the normalized scheduling priority, we subtract the - * request priority to get the request priority value number; - * this means that requests are only capable of depressing their - * priority relative to other requests, - */ - entryp->priority = (((2 * NZERO) - 1) - procp->p_nice); - - /* only premit depressing the priority */ - if (entryp->aiocb.aio_reqprio < 0) { - entryp->aiocb.aio_reqprio = 0; - } - if (entryp->aiocb.aio_reqprio > 0) { - entryp->priority -= entryp->aiocb.aio_reqprio; - if (entryp->priority < 0) { - entryp->priority = 0; - } - } - - /* Insertion sort the entry; lowest ->priority to highest */ - TAILQ_FOREACH(my_entryp, &aio_anchor.aio_async_workq, aio_workq_link) { - if (entryp->priority <= my_entryp->priority) { - TAILQ_INSERT_BEFORE(my_entryp, entryp, aio_workq_link); - break; - } - } - if (my_entryp == NULL) { - TAILQ_INSERT_TAIL( &aio_anchor.aio_async_workq, entryp, aio_workq_link ); - } -#endif /* 0 */ + return true; } @@ -1480,79 +1370,24 @@ aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, int proc_locked) * released by the aio_return call. */ int -lio_listio(proc_t p, struct lio_listio_args *uap, int *retval ) +lio_listio(proc_t p, struct lio_listio_args *uap, int *retval __unused) { - int i; - int call_result; - int result; - int old_count; - aio_workq_entry **entryp_listp; - user_addr_t *aiocbpp; - struct user_sigevent aiosigev; - aio_lio_context *lio_context; - boolean_t free_context = FALSE; + aio_workq_entry *entries[AIO_LISTIO_MAX] = { }; + user_addr_t aiocbpp[AIO_LISTIO_MAX]; + struct user_sigevent aiosigev = { }; + int result = 0; + int lio_count = 0; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_listio) | DBG_FUNC_START, VM_KERNEL_ADDRPERM(p), uap->nent, uap->mode, 0, 0); - entryp_listp = NULL; - lio_context = NULL; - aiocbpp = NULL; - call_result = -1; - *retval = -1; if (!(uap->mode == LIO_NOWAIT || uap->mode == LIO_WAIT)) { - call_result = EINVAL; + result = EINVAL; goto ExitRoutine; } if (uap->nent < 1 || uap->nent > AIO_LISTIO_MAX) { - call_result = EINVAL; - goto ExitRoutine; - } - - /* - * allocate a list of aio_workq_entry pointers that we will use - * to queue up all our requests at once while holding our lock. - */ - MALLOC( entryp_listp, void *, (uap->nent * sizeof(aio_workq_entry *)), M_TEMP, M_WAITOK ); - if (entryp_listp == NULL) { - call_result = EAGAIN; - goto ExitRoutine; - } - - /* - * lio_context ownership rules go as follow: - * - * - when the mode is LIO_WAIT, and that the AIOs aren't cancelled, - * this function will perform the deallocation. - * - * - when the mode is LIO_WAIT but AIOs are cancelled, then io_waiter is - * forced to '0' (pretending the mode is LIO_NOWAIT) and the ownership is - * handed over to the async path. - * - * - when the mode is LIO_NOWAIT, then the aio thread is responsible for - * cleaning up the context. - * - * However, there is a last case, which is when none of the commands pass - * preflight and no submission is done, in this case this function is - * responsible for cleanup. - */ - MALLOC( lio_context, aio_lio_context*, sizeof(aio_lio_context), M_TEMP, M_WAITOK ); - if (lio_context == NULL) { - call_result = EAGAIN; - goto ExitRoutine; - } - -#if DEBUG - OSIncrementAtomic(&lio_contexts_alloced); -#endif /* DEBUG */ - - free_context = TRUE; - bzero(lio_context, sizeof(aio_lio_context)); - - aiocbpp = aio_copy_in_list(p, uap->aiocblist, uap->nent); - if (aiocbpp == NULL) { - call_result = EAGAIN; + result = EINVAL; goto ExitRoutine; } @@ -1560,155 +1395,111 @@ lio_listio(proc_t p, struct lio_listio_args *uap, int *retval ) * Use sigevent passed in to lio_listio for each of our calls, but * only do completion notification after the last request completes. */ - bzero(&aiosigev, sizeof(aiosigev)); - /* Only copy in an sigev if the user supplied one */ if (uap->sigp != USER_ADDR_NULL) { - call_result = aio_copy_in_sigev(p, uap->sigp, &aiosigev); - if (call_result) { + result = aio_copy_in_sigev(p, uap->sigp, &aiosigev); + if (result) { goto ExitRoutine; } - call_result = aio_sigev_validate(&aiosigev); - if (call_result) { + result = aio_sigev_validate(&aiosigev); + if (result) { goto ExitRoutine; } } - /* process list of aio requests */ - free_context = FALSE; - lio_context->io_issued = uap->nent; - lio_context->io_waiter = uap->mode == LIO_WAIT ? 1 : 0; /* Should it be freed by last AIO */ - for (i = 0; i < uap->nent; i++) { - user_addr_t my_aiocbp; - aio_workq_entry *entryp; + if (aio_copy_in_list(p, uap->aiocblist, aiocbpp, uap->nent)) { + result = EAGAIN; + goto ExitRoutine; + } - *(entryp_listp + i) = NULL; - my_aiocbp = *(aiocbpp + i); + /* + * allocate/parse all entries + */ + for (int i = 0; i < uap->nent; i++) { + aio_workq_entry *entryp; /* NULL elements are legal so check for 'em */ - if (my_aiocbp == USER_ADDR_NULL) { - aio_proc_lock_spin(p); - if (--lio_context->io_issued == 0) { - /* no submission made, needs cleanup */ - free_context = TRUE; - } - aio_proc_unlock(p); + if (aiocbpp[i] == USER_ADDR_NULL) { continue; } - /* - * We use lio_context to mark IO requests for delayed completion - * processing which means we wait until all IO requests in the - * group have completed before we either return to the caller - * when mode is LIO_WAIT or signal user when mode is LIO_NOWAIT. - * - * We use the address of the lio_context for this, since it is - * unique in the address space. - */ - result = lio_create_entry( p, my_aiocbp, lio_context, (entryp_listp + i)); - if (result != 0 && call_result == -1) { - call_result = result; - } - - /* NULL elements are legal so check for 'em */ - entryp = *(entryp_listp + i); + entryp = aio_create_queue_entry(p, aiocbpp[i], AIO_LIO); if (entryp == NULL) { - aio_proc_lock_spin(p); - if (--lio_context->io_issued == 0) { - /* no submission made, needs cleanup */ - free_context = TRUE; - } - aio_proc_unlock(p); - continue; + result = EAGAIN; + goto ExitRoutine; } + /* + * This refcount is cleaned up on exit if the entry + * isn't submitted + */ + entries[lio_count++] = entryp; if (uap->mode == LIO_NOWAIT) { /* Set signal hander, if any */ entryp->aiocb.aio_sigevent = aiosigev; - } else { - /* flag that this thread blocks pending completion */ - entryp->flags |= AIO_LIO_NOTIFY; - } - - /* check our aio limits to throttle bad or rude user land behavior */ - old_count = aio_increment_total_count(); - - aio_proc_lock_spin(p); - if (old_count >= aio_max_requests || - aio_get_process_count( entryp->procp ) >= aio_max_requests_per_process || - is_already_queued( entryp->procp, entryp->uaiocbp ) == TRUE) { - if (--lio_context->io_issued == 0) { - /* no submission made, needs cleanup */ - free_context = TRUE; - } - aio_proc_unlock(p); - - aio_decrement_total_count(); - - if (call_result == -1) { - call_result = EAGAIN; - } - aio_free_request(entryp); - entryp_listp[i] = NULL; - continue; } - - lck_mtx_convert_spin(aio_proc_mutex(p)); - aio_enqueue_work(p, entryp, 1); - aio_proc_unlock(p); } - if (free_context) { - /* no submission was made, just exit */ + if (lio_count == 0) { + /* There's nothing to submit */ goto ExitRoutine; } + /* + * Past this point we're commited and will not bail out + * + * - keep a reference on the leader for LIO_WAIT + * - perform the submissions and optionally wait + */ + + aio_workq_entry *leader = entries[0]; if (uap->mode == LIO_WAIT) { - aio_proc_lock_spin(p); + aio_entry_ref(leader); /* consumed below */ + } + + aio_proc_lock_spin(p); + + for (int i = 0; i < lio_count; i++) { + if (aio_try_enqueue_work_locked(p, entries[i], leader)) { + entries[i] = NULL; /* the entry was submitted */ + } else { + result = EAGAIN; + } + } - while (lio_context->io_completed < lio_context->io_issued) { - result = msleep(lio_context, aio_proc_mutex(p), PCATCH | PRIBIO | PSPIN, "lio_listio", 0); + if (uap->mode == LIO_WAIT && result == 0) { + leader->flags |= AIO_LIO_WAIT; + while (leader->lio_pending) { /* If we were interrupted, fail out (even if all finished) */ - if (result != 0) { - call_result = EINTR; + if (msleep(leader, aio_proc_mutex(p), + PCATCH | PRIBIO | PSPIN, "lio_listio", 0) != 0) { + result = EINTR; break; } } - if (lio_context->io_completed == lio_context->io_issued) { - /* If all IOs have finished must free it */ - free_context = TRUE; - } else { - /* handoff to the async codepath for clean up */ - assert(call_result == EINTR); - lio_context->io_waiter = 0; - } - - aio_proc_unlock(p); + leader->flags &= ~AIO_LIO_WAIT; } - /* call_result == -1 means we had no trouble queueing up requests */ - if (call_result == -1) { - call_result = 0; - *retval = 0; + aio_proc_unlock(p); + + if (uap->mode == LIO_WAIT) { + aio_entry_unref(leader); } ExitRoutine: - if (entryp_listp != NULL) { - FREE( entryp_listp, M_TEMP ); - } - if (aiocbpp != NULL) { - FREE( aiocbpp, M_TEMP ); - } - if (free_context) { - free_lio_context(lio_context); + /* Consume unsubmitted entries */ + for (int i = 0; i < lio_count; i++) { + if (entries[i]) { + aio_entry_unref(entries[i]); + } } KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_listio) | DBG_FUNC_END, - VM_KERNEL_ADDRPERM(p), call_result, 0, 0, 0); + VM_KERNEL_ADDRPERM(p), result, 0, 0, 0); - return call_result; -} /* lio_listio */ + return result; +} /* @@ -1718,7 +1509,7 @@ ExitRoutine: */ __attribute__((noreturn)) static void -aio_work_thread(void) +aio_work_thread(void *arg __unused, wait_result_t wr __unused) { aio_workq_entry *entryp; int error; @@ -1726,6 +1517,7 @@ aio_work_thread(void) vm_map_t oldmap = VM_MAP_NULL; task_t oldaiotask = TASK_NULL; struct uthread *uthreadp = NULL; + proc_t p = NULL; for (;;) { /* @@ -1733,6 +1525,7 @@ aio_work_thread(void) * sleeps until work is available. */ entryp = aio_get_some_work(); + p = entryp->procp; KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_worker_thread) | DBG_FUNC_START, VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), @@ -1743,68 +1536,51 @@ aio_work_thread(void) * of the IO. Note: don't need to have the entryp locked, * because the proc and map don't change until it's freed. */ - currentmap = get_task_map((current_proc())->task ); + currentmap = get_task_map((current_proc())->task); if (currentmap != entryp->aio_map) { uthreadp = (struct uthread *) get_bsdthread_info(current_thread()); oldaiotask = uthreadp->uu_aio_task; - uthreadp->uu_aio_task = entryp->procp->task; - oldmap = vm_map_switch( entryp->aio_map ); + /* + * workq entries at this stage cause _aio_exec() and _aio_exit() to + * block until we hit `do_aio_completion_and_unlock()` below, + * which means that it is safe to dereference p->task without + * holding a lock or taking references. + */ + uthreadp->uu_aio_task = p->task; + oldmap = vm_map_switch(entryp->aio_map); } if ((entryp->flags & AIO_READ) != 0) { - error = do_aio_read( entryp ); + error = do_aio_read(entryp); } else if ((entryp->flags & AIO_WRITE) != 0) { - error = do_aio_write( entryp ); + error = do_aio_write(entryp); } else if ((entryp->flags & (AIO_FSYNC | AIO_DSYNC)) != 0) { - error = do_aio_fsync( entryp ); + error = do_aio_fsync(entryp); } else { - printf( "%s - unknown aio request - flags 0x%02X \n", - __FUNCTION__, entryp->flags ); error = EINVAL; } /* Restore old map */ if (currentmap != entryp->aio_map) { - (void) vm_map_switch( oldmap ); + vm_map_switch(oldmap); uthreadp->uu_aio_task = oldaiotask; } + /* liberate unused map */ + vm_map_deallocate(entryp->aio_map); + entryp->aio_map = VM_MAP_NULL; + KERNEL_DEBUG(SDDBG_CODE(DBG_BSD_AIO, AIO_worker_thread) | DBG_FUNC_END, VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), entryp->errorval, entryp->returnval, 0); - - /* XXX COUNTS */ - aio_entry_lock_spin(entryp); - entryp->errorval = error; - aio_entry_unlock(entryp); - /* we're done with the IO request so pop it off the active queue and */ /* push it on the done queue */ - aio_proc_lock(entryp->procp); - aio_proc_move_done_locked(entryp->procp, entryp); - aio_proc_unlock(entryp->procp); - - OSDecrementAtomic(&aio_anchor.aio_inflight_count); - - /* remove our reference to the user land map. */ - if (VM_MAP_NULL != entryp->aio_map) { - vm_map_t my_map; - - my_map = entryp->aio_map; - entryp->aio_map = VM_MAP_NULL; - vm_map_deallocate( my_map ); - } - - /* Provide notifications */ - do_aio_completion( entryp ); - - /* Will free if needed */ - aio_entry_unref(entryp); - } /* for ( ;; ) */ - - /* NOT REACHED */ -} /* aio_work_thread */ + aio_proc_lock(p); + entryp->errorval = error; + do_aio_completion_and_unlock(p, entryp); + } +} /* @@ -1814,7 +1590,7 @@ aio_work_thread(void) * NOTE - AIO_LOCK must be held by caller */ static aio_workq_entry * -aio_get_some_work( void ) +aio_get_some_work(void) { aio_workq_entry *entryp = NULL; aio_workq_t queue = NULL; @@ -1822,9 +1598,6 @@ aio_get_some_work( void ) /* Just one queue for the moment. In the future there will be many. */ queue = &aio_anchor.aio_async_workqs[0]; aio_workq_lock_spin(queue); - if (queue->aioq_count == 0) { - goto nowork; - } /* * Hold the queue lock. @@ -1832,20 +1605,11 @@ aio_get_some_work( void ) * pop some work off the work queue and add to our active queue * Always start with the queue lock held. */ - for (;;) { + while ((entryp = TAILQ_FIRST(&queue->aioq_entries))) { /* * Pull of of work queue. Once it's off, it can't be cancelled, * so we can take our ref once we drop the queue lock. */ - entryp = TAILQ_FIRST(&queue->aioq_entries); - - /* - * If there's no work or only fsyncs that need delay, go to sleep - * and then start anew from aio_work_thread - */ - if (entryp == NULL) { - goto nowork; - } aio_workq_remove_entry_locked(queue, entryp); @@ -1861,7 +1625,7 @@ aio_get_some_work( void ) * in this proc's queue. */ aio_proc_lock_spin(entryp->procp); - if (aio_delay_fsync_request( entryp )) { + if (aio_delay_fsync_request(entryp)) { /* It needs to be delayed. Put it back on the end of the work queue */ KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync_delay) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), @@ -1876,22 +1640,15 @@ aio_get_some_work( void ) aio_proc_unlock(entryp->procp); } - break; + return entryp; } - aio_entry_ref(entryp); - - OSIncrementAtomic(&aio_anchor.aio_inflight_count); - return entryp; - -nowork: /* We will wake up when someone enqueues something */ waitq_assert_wait64(&queue->aioq_waitq, CAST_EVENT64_T(queue), THREAD_UNINT, 0); aio_workq_unlock(queue); - thread_block((thread_continue_t)aio_work_thread ); + thread_block(aio_work_thread); - // notreached - return NULL; + __builtin_unreachable(); } /* @@ -1900,7 +1657,7 @@ nowork: * not been completed. */ static boolean_t -aio_delay_fsync_request( aio_workq_entry *entryp ) +aio_delay_fsync_request(aio_workq_entry *entryp) { if (proc_in_teardown(entryp->procp)) { /* @@ -1915,77 +1672,53 @@ aio_delay_fsync_request( aio_workq_entry *entryp ) } return TRUE; -} /* aio_delay_fsync_request */ +} static aio_workq_entry * -aio_create_queue_entry(proc_t procp, user_addr_t aiocbp, void *group_tag, int kindOfIO) +aio_create_queue_entry(proc_t procp, user_addr_t aiocbp, aio_entry_flags_t flags) { aio_workq_entry *entryp; - int result = 0; - - entryp = (aio_workq_entry *) zalloc( aio_workq_zonep ); - if (entryp == NULL) { - result = EAGAIN; - goto error_exit; - } - bzero( entryp, sizeof(*entryp)); - - /* fill in the rest of the aio_workq_entry */ + entryp = zalloc_flags(aio_workq_zonep, Z_WAITOK | Z_ZERO); entryp->procp = procp; entryp->uaiocbp = aiocbp; - entryp->flags |= kindOfIO; - entryp->group_tag = group_tag; - entryp->aio_map = VM_MAP_NULL; - entryp->aio_refcount = 0; + entryp->flags = flags; + /* consumed in aio_return or _aio_exit */ + os_ref_init(&entryp->aio_refcount, &aio_refgrp); if (proc_is64bit(procp)) { struct user64_aiocb aiocb64; - result = copyin( aiocbp, &aiocb64, sizeof(aiocb64)); - if (result == 0) { - do_munge_aiocb_user64_to_user(&aiocb64, &entryp->aiocb); + if (copyin(aiocbp, &aiocb64, sizeof(aiocb64)) != 0) { + goto error_exit; } + do_munge_aiocb_user64_to_user(&aiocb64, &entryp->aiocb); } else { struct user32_aiocb aiocb32; - result = copyin( aiocbp, &aiocb32, sizeof(aiocb32)); - if (result == 0) { - do_munge_aiocb_user32_to_user( &aiocb32, &entryp->aiocb ); + if (copyin(aiocbp, &aiocb32, sizeof(aiocb32)) != 0) { + goto error_exit; } + do_munge_aiocb_user32_to_user(&aiocb32, &entryp->aiocb); } - if (result != 0) { - result = EAGAIN; + /* do some more validation on the aiocb and embedded file descriptor */ + if (aio_validate(procp, entryp) != 0) { goto error_exit; } /* get a reference to the user land map in order to keep it around */ - entryp->aio_map = get_task_map( procp->task ); - vm_map_reference( entryp->aio_map ); - - /* do some more validation on the aiocb and embedded file descriptor */ - result = aio_validate( entryp ); - if (result != 0) { - goto error_exit_with_ref; - } + entryp->aio_map = get_task_map(procp->task); + vm_map_reference(entryp->aio_map); /* get a reference on the current_thread, which is passed in vfs_context. */ entryp->thread = current_thread(); - thread_reference( entryp->thread ); + thread_reference(entryp->thread); return entryp; -error_exit_with_ref: - if (VM_MAP_NULL != entryp->aio_map) { - vm_map_deallocate( entryp->aio_map ); - } error_exit: - if (result && entryp != NULL) { - zfree( aio_workq_zonep, entryp ); - entryp = NULL; - } - - return entryp; + zfree(aio_workq_zonep, entryp); + return NULL; } @@ -1996,43 +1729,23 @@ error_exit: * processing the request. */ static int -aio_queue_async_request(proc_t procp, user_addr_t aiocbp, int kindOfIO ) +aio_queue_async_request(proc_t procp, user_addr_t aiocbp, + aio_entry_flags_t flags) { aio_workq_entry *entryp; int result; - int old_count; - - old_count = aio_increment_total_count(); - if (old_count >= aio_max_requests) { - result = EAGAIN; - goto error_noalloc; - } - entryp = aio_create_queue_entry( procp, aiocbp, 0, kindOfIO); + entryp = aio_create_queue_entry(procp, aiocbp, flags); if (entryp == NULL) { result = EAGAIN; goto error_noalloc; } - aio_proc_lock_spin(procp); - - if (is_already_queued( entryp->procp, entryp->uaiocbp ) == TRUE) { - result = EAGAIN; - goto error_exit; - } - - /* check our aio limits to throttle bad or rude user land behavior */ - if (aio_get_process_count( procp ) >= aio_max_requests_per_process) { - printf("aio_queue_async_request(): too many in flight for proc: %d.\n", procp->p_aio_total_count); + if (!aio_try_enqueue_work_locked(procp, entryp, NULL)) { result = EAGAIN; goto error_exit; } - - /* Add the IO to proc and work queues, wake up threads as appropriate */ - lck_mtx_convert_spin(aio_proc_mutex(procp)); - aio_enqueue_work(procp, entryp, 1); - aio_proc_unlock(procp); return 0; @@ -2043,79 +1756,9 @@ error_exit: */ aio_proc_unlock(procp); aio_free_request(entryp); - error_noalloc: - aio_decrement_total_count(); - return result; -} /* aio_queue_async_request */ - - -/* - * lio_create_entry - * - * Allocate an aio_workq_entry and fill it in. If all goes well return 0 - * and pass the aio_workq_entry pointer back to our caller. - * - * Parameters: procp The process makign the request - * aiocbp The aio context buffer pointer - * group_tag The group tag used to indicate a - * group of operations has completed - * entrypp Pointer to the pointer to receive the - * address of the created aio_workq_entry - * - * Returns: 0 Successfully created - * EAGAIN Try again (usually resource shortage) - * - * - * Notes: We get a reference to our caller's user land map in order - * to keep it around while we are processing the request. - * - * lio_listio calls behave differently at completion they do - * completion notification when all async IO requests have - * completed. We use group_tag to tag IO requests that behave - * in the delay notification manner. - * - * All synchronous operations are considered to not have a - * signal routine associated with them (sigp == USER_ADDR_NULL). - */ -static int -lio_create_entry(proc_t procp, user_addr_t aiocbp, void *group_tag, - aio_workq_entry **entrypp ) -{ - aio_workq_entry *entryp; - int result; - - entryp = aio_create_queue_entry( procp, aiocbp, group_tag, AIO_LIO); - if (entryp == NULL) { - result = EAGAIN; - goto error_exit; - } - - /* - * Look for lio_listio LIO_NOP requests and ignore them; this is - * not really an error, but we need to free our aio_workq_entry. - */ - if (entryp->aiocb.aio_lio_opcode == LIO_NOP) { - result = 0; - goto error_exit; - } - - *entrypp = entryp; - return 0; - -error_exit: - - if (entryp != NULL) { - /* - * This entry has not been queued up so no worries about - * unlocked state and aio_map - */ - aio_free_request(entryp); - } - - return result; -} /* lio_create_entry */ +} /* @@ -2123,10 +1766,13 @@ error_exit: * free the work queue entry resources. The entry is off all lists * and has zero refcount, so no one can have a pointer to it. */ - -static int +static void aio_free_request(aio_workq_entry *entryp) { + if (entryp->aio_proc_link.tqe_prev || entryp->aio_workq_link.tqe_prev) { + panic("aio_workq_entry %p being freed while still enqueued", entryp); + } + /* remove our reference to the user land map. */ if (VM_MAP_NULL != entryp->aio_map) { vm_map_deallocate(entryp->aio_map); @@ -2134,15 +1780,11 @@ aio_free_request(aio_workq_entry *entryp) /* remove our reference to thread which enqueued the request */ if (NULL != entryp->thread) { - thread_deallocate( entryp->thread ); + thread_deallocate(entryp->thread); } - entryp->aio_refcount = -1; /* A bit of poisoning in case of bad refcounting. */ - - zfree( aio_workq_zonep, entryp ); - - return 0; -} /* aio_free_request */ + zfree(aio_workq_zonep, entryp); +} /* @@ -2151,7 +1793,7 @@ aio_free_request(aio_workq_entry *entryp) * validate the aiocb passed in by one of the aio syscalls. */ static int -aio_validate( aio_workq_entry *entryp ) +aio_validate(proc_t p, aio_workq_entry *entryp) { struct fileproc *fp; int flag; @@ -2192,118 +1834,49 @@ aio_validate( aio_workq_entry *entryp ) /* validate the file descriptor and that the file was opened * for the appropriate read / write access. */ - proc_fdlock(entryp->procp); - - result = fp_lookup( entryp->procp, entryp->aiocb.aio_fildes, &fp, 1); - if (result == 0) { - if ((fp->f_fglob->fg_flag & flag) == 0) { - /* we don't have read or write access */ - result = EBADF; - } else if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) { - /* this is not a file */ - result = ESPIPE; - } else { - fp->f_flags |= FP_AIOISSUED; - } + proc_fdlock(p); - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 1); - } else { + fp = fp_get_noref_locked(p, entryp->aiocb.aio_fildes); + if (fp == NULL) { result = EBADF; + } else if ((fp->fp_glob->fg_flag & flag) == 0) { + /* we don't have read or write access */ + result = EBADF; + } else if (FILEGLOB_DTYPE(fp->fp_glob) != DTYPE_VNODE) { + /* this is not a file */ + result = ESPIPE; + } else { + fp->fp_flags |= FP_AIOISSUED; } - proc_fdunlock(entryp->procp); + proc_fdunlock(p); return result; -} /* aio_validate */ - -static int -aio_increment_total_count(void) -{ - return OSIncrementAtomic(&aio_anchor.aio_total_count); -} - -static int -aio_decrement_total_count(void) -{ - int old = OSDecrementAtomic(&aio_anchor.aio_total_count); - if (old <= 0) { - panic("Negative total AIO count!\n"); - } - - return old; } -static int -aio_get_process_count(proc_t procp) -{ - return procp->p_aio_total_count; -} /* aio_get_process_count */ - -static int -aio_get_all_queues_count( void ) -{ - return aio_anchor.aio_total_count; -} /* aio_get_all_queues_count */ - - /* - * do_aio_completion. Handle async IO completion. + * do_aio_completion_and_unlock. Handle async IO completion. */ static void -do_aio_completion( aio_workq_entry *entryp ) +do_aio_completion_and_unlock(proc_t p, aio_workq_entry *entryp) { - boolean_t lastLioCompleted = FALSE; - aio_lio_context *lio_context = NULL; - int waiter = 0; - - lio_context = (aio_lio_context *)entryp->group_tag; + aio_workq_entry *leader = entryp->lio_leader; + int lio_pending = 0; + bool do_signal = false; - if (lio_context != NULL) { - aio_proc_lock_spin(entryp->procp); + ASSERT_AIO_PROC_LOCK_OWNED(p); - /* Account for this I/O completing. */ - lio_context->io_completed++; + aio_proc_move_done_locked(p, entryp); - /* Are we done with this lio context? */ - if (lio_context->io_issued == lio_context->io_completed) { - lastLioCompleted = TRUE; + if (leader) { + lio_pending = --leader->lio_pending; + if (lio_pending < 0) { + panic("lio_pending accounting mistake"); } - - waiter = lio_context->io_waiter; - - /* explicit wakeup of lio_listio() waiting in LIO_WAIT */ - if ((entryp->flags & AIO_LIO_NOTIFY) && (lastLioCompleted) && (waiter != 0)) { - /* wake up the waiter */ - wakeup(lio_context); + if (lio_pending == 0 && (leader->flags & AIO_LIO_WAIT)) { + wakeup(leader); } - - aio_proc_unlock(entryp->procp); - } - - if (entryp->aiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL && - (entryp->flags & AIO_DISABLE) == 0) { - boolean_t performSignal = FALSE; - if (lio_context == NULL) { - performSignal = TRUE; - } else { - /* - * If this was the last request in the group and a signal - * is desired, send one. - */ - performSignal = lastLioCompleted; - } - - if (performSignal) { - KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_sig) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), - entryp->aiocb.aio_sigevent.sigev_signo, 0, 0); - - psignal( entryp->procp, entryp->aiocb.aio_sigevent.sigev_signo ); - } - } - - if ((entryp->flags & AIO_EXIT_WAIT) && (entryp->flags & AIO_CLOSE_WAIT)) { - panic("Close and exit flags set at the same time\n"); + entryp->lio_leader = NULL; /* no dangling pointers please */ } /* @@ -2314,57 +1887,56 @@ do_aio_completion( aio_workq_entry *entryp ) * none then wakeup using the AIO_CLEANUP_SLEEP_CHAN tsleep channel. * If there are some still active then do nothing - we only want to * wakeup when all active aio requests for the process are complete. - * - * Don't need to lock the entry or proc to check the cleanup flag. It can only be - * set for cancellation, while the entryp is still on a proc list; now it's - * off, so that flag is already set if it's going to be. */ - if ((entryp->flags & AIO_EXIT_WAIT) != 0) { - int active_requests; - + if (__improbable(entryp->flags & AIO_EXIT_WAIT)) { KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wait) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), 0, 0, 0); - aio_proc_lock_spin(entryp->procp); - active_requests = aio_active_requests_for_process( entryp->procp ); - if (active_requests < 1) { + if (!aio_has_active_requests_for_process(p)) { /* * no active aio requests for this process, continue exiting. In this * case, there should be no one else waiting ont he proc in AIO... */ - wakeup_one((caddr_t)&entryp->procp->AIO_CLEANUP_SLEEP_CHAN); - aio_proc_unlock(entryp->procp); + wakeup_one((caddr_t)&p->AIO_CLEANUP_SLEEP_CHAN); KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wake) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), 0, 0, 0); - } else { - aio_proc_unlock(entryp->procp); } + } else if (entryp->aiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL) { + /* + * If this was the last request in the group, or not part of + * a group, and that a signal is desired, send one. + */ + do_signal = (lio_pending == 0); } - if ((entryp->flags & AIO_CLOSE_WAIT) != 0) { - int active_requests; - + if (__improbable(entryp->flags & AIO_CLOSE_WAIT)) { KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wait) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), 0, 0, 0); - aio_proc_lock_spin(entryp->procp); - active_requests = aio_proc_active_requests_for_file( entryp->procp, entryp->aiocb.aio_fildes); - if (active_requests < 1) { + if (!aio_proc_has_active_requests_for_file(p, entryp->aiocb.aio_fildes)) { /* Can't wakeup_one(); multiple closes might be in progress. */ - wakeup(&entryp->procp->AIO_CLEANUP_SLEEP_CHAN); - aio_proc_unlock(entryp->procp); + wakeup(&p->AIO_CLEANUP_SLEEP_CHAN); KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wake) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), 0, 0, 0); - } else { - aio_proc_unlock(entryp->procp); } } + + aio_proc_unlock(p); + + if (do_signal) { + KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_sig) | DBG_FUNC_NONE, + VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), + entryp->aiocb.aio_sigevent.sigev_signo, 0, 0); + + psignal(p, entryp->aiocb.aio_sigevent.sigev_signo); + } + /* * A thread in aio_suspend() wants to known about completed IOs. If it checked * the done list before we moved our AIO there, then it already asserted its wait, @@ -2372,135 +1944,130 @@ do_aio_completion( aio_workq_entry *entryp ) * we did our move, then it already has seen the AIO that we moved. Herego, we * can do our wakeup without holding the lock. */ - wakeup((caddr_t) &entryp->procp->AIO_SUSPEND_SLEEP_CHAN ); + wakeup(&p->AIO_SUSPEND_SLEEP_CHAN); KERNEL_DEBUG(BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_suspend_wake) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM(p), VM_KERNEL_ADDRPERM(entryp->uaiocbp), 0, 0, 0); - /* - * free the LIO context if the last lio completed and no thread is - * waiting - */ - if (lastLioCompleted && (waiter == 0)) { - free_lio_context(lio_context); + aio_entry_unref(entryp); /* see aio_try_enqueue_work_locked */ + if (leader) { + aio_entry_unref(leader); /* see lio_listio */ } -} /* do_aio_completion */ +} /* * do_aio_read */ static int -do_aio_read( aio_workq_entry *entryp ) +do_aio_read(aio_workq_entry *entryp) { - struct fileproc *fp; - int error; - struct vfs_context context; + struct proc *p = entryp->procp; + struct fileproc *fp; + int error; - if ((error = fp_lookup(entryp->procp, entryp->aiocb.aio_fildes, &fp, 0))) { + if ((error = fp_lookup(p, entryp->aiocb.aio_fildes, &fp, 0))) { return error; } - if ((fp->f_fglob->fg_flag & FREAD) == 0) { - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); - return EBADF; - } - context.vc_thread = entryp->thread; /* XXX */ - context.vc_ucred = fp->f_fglob->fg_cred; + if (fp->fp_glob->fg_flag & FREAD) { + struct vfs_context context = { + .vc_thread = entryp->thread, /* XXX */ + .vc_ucred = fp->fp_glob->fg_cred, + }; - error = dofileread(&context, fp, - entryp->aiocb.aio_buf, - entryp->aiocb.aio_nbytes, - entryp->aiocb.aio_offset, FOF_OFFSET, - &entryp->returnval); - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); + error = dofileread(&context, fp, + entryp->aiocb.aio_buf, + entryp->aiocb.aio_nbytes, + entryp->aiocb.aio_offset, FOF_OFFSET, + &entryp->returnval); + } else { + error = EBADF; + } + fp_drop(p, entryp->aiocb.aio_fildes, fp, 0); return error; -} /* do_aio_read */ +} /* * do_aio_write */ static int -do_aio_write( aio_workq_entry *entryp ) +do_aio_write(aio_workq_entry *entryp) { - struct fileproc *fp; - int error, flags; - struct vfs_context context; + struct proc *p = entryp->procp; + struct fileproc *fp; + int error; - if ((error = fp_lookup(entryp->procp, entryp->aiocb.aio_fildes, &fp, 0))) { + if ((error = fp_lookup(p, entryp->aiocb.aio_fildes, &fp, 0))) { return error; } - if ((fp->f_fglob->fg_flag & FWRITE) == 0) { - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); - return EBADF; - } - - flags = FOF_PCRED; - if ((fp->f_fglob->fg_flag & O_APPEND) == 0) { - flags |= FOF_OFFSET; - } - context.vc_thread = entryp->thread; /* XXX */ - context.vc_ucred = fp->f_fglob->fg_cred; + if (fp->fp_glob->fg_flag & FWRITE) { + struct vfs_context context = { + .vc_thread = entryp->thread, /* XXX */ + .vc_ucred = fp->fp_glob->fg_cred, + }; + int flags = FOF_PCRED; - /* NB: tell dofilewrite the offset, and to use the proc cred */ - error = dofilewrite(&context, - fp, - entryp->aiocb.aio_buf, - entryp->aiocb.aio_nbytes, - entryp->aiocb.aio_offset, - flags, - &entryp->returnval); + if ((fp->fp_glob->fg_flag & O_APPEND) == 0) { + flags |= FOF_OFFSET; + } - if (entryp->returnval) { - fp_drop_written(entryp->procp, entryp->aiocb.aio_fildes, fp); + /* NB: tell dofilewrite the offset, and to use the proc cred */ + error = dofilewrite(&context, + fp, + entryp->aiocb.aio_buf, + entryp->aiocb.aio_nbytes, + entryp->aiocb.aio_offset, + flags, + &entryp->returnval); } else { - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); + error = EBADF; } + fp_drop(p, entryp->aiocb.aio_fildes, fp, 0); return error; -} /* do_aio_write */ +} /* - * aio_active_requests_for_process - return number of active async IO - * requests for the given process. + * aio_has_active_requests_for_process - return whether the process has active + * requests pending. */ -static int -aio_active_requests_for_process(proc_t procp) +static bool +aio_has_active_requests_for_process(proc_t procp) { - return procp->p_aio_active_count; -} /* aio_active_requests_for_process */ + return !TAILQ_EMPTY(&procp->p_aio_activeq); +} /* * Called with the proc locked. */ -static int -aio_proc_active_requests_for_file(proc_t procp, int fd) +static bool +aio_proc_has_active_requests_for_file(proc_t procp, int fd) { - int count = 0; aio_workq_entry *entryp; + TAILQ_FOREACH(entryp, &procp->p_aio_activeq, aio_proc_link) { if (entryp->aiocb.aio_fildes == fd) { - count++; + return true; } } - return count; -} /* aio_active_requests_for_process */ - + return false; +} /* * do_aio_fsync */ static int -do_aio_fsync( aio_workq_entry *entryp ) +do_aio_fsync(aio_workq_entry *entryp) { - struct vfs_context context; - struct vnode *vp; - struct fileproc *fp; + struct proc *p = entryp->procp; + struct vnode *vp; + struct fileproc *fp; int sync_flag; int error; @@ -2524,28 +2091,29 @@ do_aio_fsync( aio_workq_entry *entryp ) sync_flag = MNT_DWAIT; } - error = fp_getfvp( entryp->procp, entryp->aiocb.aio_fildes, &fp, &vp); - if (error == 0) { - if ((error = vnode_getwithref(vp))) { - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); - entryp->returnval = -1; - return error; - } - context.vc_thread = current_thread(); - context.vc_ucred = fp->f_fglob->fg_cred; + error = fp_get_ftype(p, entryp->aiocb.aio_fildes, DTYPE_VNODE, ENOTSUP, &fp); + if (error != 0) { + entryp->returnval = -1; + return error; + } + vp = fp->fp_glob->fg_data; - error = VNOP_FSYNC( vp, sync_flag, &context); + if ((error = vnode_getwithref(vp)) == 0) { + struct vfs_context context = { + .vc_thread = entryp->thread, /* XXX */ + .vc_ucred = fp->fp_glob->fg_cred, + }; - (void)vnode_put(vp); + error = VNOP_FSYNC(vp, sync_flag, &context); - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); - } - if (error != 0) { + (void)vnode_put(vp); + } else { entryp->returnval = -1; } + fp_drop(p, entryp->aiocb.aio_fildes, fp, 0); return error; -} /* do_aio_fsync */ +} /* @@ -2564,7 +2132,7 @@ is_already_queued(proc_t procp, user_addr_t aiocbp) result = FALSE; /* look for matches on our queue of async IO requests that have completed */ - TAILQ_FOREACH( entryp, &procp->p_aio_doneq, aio_proc_link ) { + TAILQ_FOREACH(entryp, &procp->p_aio_doneq, aio_proc_link) { if (aiocbp == entryp->uaiocbp) { result = TRUE; goto ExitThisRoutine; @@ -2572,7 +2140,7 @@ is_already_queued(proc_t procp, user_addr_t aiocbp) } /* look for matches on our queue of active async IO requests */ - TAILQ_FOREACH( entryp, &procp->p_aio_activeq, aio_proc_link ) { + TAILQ_FOREACH(entryp, &procp->p_aio_activeq, aio_proc_link) { if (aiocbp == entryp->uaiocbp) { result = TRUE; goto ExitThisRoutine; @@ -2581,59 +2149,28 @@ is_already_queued(proc_t procp, user_addr_t aiocbp) ExitThisRoutine: return result; -} /* is_already_queued */ - - -static void -free_lio_context(aio_lio_context* context) -{ -#if DEBUG - OSDecrementAtomic(&lio_contexts_alloced); -#endif /* DEBUG */ - - FREE( context, M_TEMP ); -} /* free_lio_context */ +} /* * aio initialization */ __private_extern__ void -aio_init( void ) +aio_init(void) { - int i; - - aio_lock_grp_attr = lck_grp_attr_alloc_init(); - aio_proc_lock_grp = lck_grp_alloc_init("aio_proc", aio_lock_grp_attr);; - aio_entry_lock_grp = lck_grp_alloc_init("aio_entry", aio_lock_grp_attr);; - aio_queue_lock_grp = lck_grp_alloc_init("aio_queue", aio_lock_grp_attr);; - aio_lock_attr = lck_attr_alloc_init(); - - lck_mtx_init(&aio_entry_mtx, aio_entry_lock_grp, aio_lock_attr); - lck_mtx_init(&aio_proc_mtx, aio_proc_lock_grp, aio_lock_attr); - - aio_anchor.aio_inflight_count = 0; - aio_anchor.aio_done_count = 0; - aio_anchor.aio_total_count = 0; - aio_anchor.aio_num_workqs = AIO_NUM_WORK_QUEUES; - - for (i = 0; i < AIO_NUM_WORK_QUEUES; i++) { + for (int i = 0; i < AIO_NUM_WORK_QUEUES; i++) { aio_workq_init(&aio_anchor.aio_async_workqs[i]); } - - i = sizeof(aio_workq_entry); - aio_workq_zonep = zinit( i, i * aio_max_requests, i * aio_max_requests, "aiowq" ); - - _aio_create_worker_threads( aio_worker_threads ); -} /* aio_init */ + _aio_create_worker_threads(aio_worker_threads); +} /* * aio worker threads created here. */ __private_extern__ void -_aio_create_worker_threads( int num ) +_aio_create_worker_threads(int num) { int i; @@ -2641,15 +2178,13 @@ _aio_create_worker_threads( int num ) for (i = 0; i < num; i++) { thread_t myThread; - if (KERN_SUCCESS != kernel_thread_start((thread_continue_t)aio_work_thread, NULL, &myThread)) { - printf( "%s - failed to create a work thread \n", __FUNCTION__ ); + if (KERN_SUCCESS != kernel_thread_start(aio_work_thread, NULL, &myThread)) { + printf("%s - failed to create a work thread \n", __FUNCTION__); } else { thread_deallocate(myThread); } } - - return; -} /* _aio_create_worker_threads */ +} /* * Return the current activation utask @@ -2668,7 +2203,7 @@ get_aiotask(void) * aiocb (in our case that is a user_aiocb) */ static void -do_munge_aiocb_user32_to_user( struct user32_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ) +do_munge_aiocb_user32_to_user(struct user32_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp) { the_user_aiocbp->aio_fildes = my_aiocbp->aio_fildes; the_user_aiocbp->aio_offset = my_aiocbp->aio_offset; @@ -2697,9 +2232,13 @@ do_munge_aiocb_user32_to_user( struct user32_aiocb *my_aiocbp, struct user_aiocb /* Similar for 64-bit user process, so that we don't need to satisfy * the alignment constraints of the original user64_aiocb */ +#if !__LP64__ +__dead2 +#endif static void -do_munge_aiocb_user64_to_user( struct user64_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ) +do_munge_aiocb_user64_to_user(struct user64_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp) { +#if __LP64__ the_user_aiocbp->aio_fildes = my_aiocbp->aio_fildes; the_user_aiocbp->aio_offset = my_aiocbp->aio_offset; the_user_aiocbp->aio_buf = my_aiocbp->aio_buf; @@ -2715,4 +2254,8 @@ do_munge_aiocb_user64_to_user( struct user64_aiocb *my_aiocbp, struct user_aiocb my_aiocbp->aio_sigevent.sigev_notify_function; the_user_aiocbp->aio_sigevent.sigev_notify_attributes = my_aiocbp->aio_sigevent.sigev_notify_attributes; +#else +#pragma unused(my_aiocbp, the_user_aiocbp) + panic("64bit process on 32bit kernel is not supported"); +#endif } diff --git a/bsd/kern/kern_asl.c b/bsd/kern/kern_asl.c index a005c1055..c9e1cdfea 100644 --- a/bsd/kern/kern_asl.c +++ b/bsd/kern/kern_asl.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -78,13 +78,12 @@ * to truncate the output. */ static int -kern_asl_msg_va(int level, const char *facility, int num_pairs, va_list vargs) +kern_asl_msg_va(int level, const char *facility, size_t num_pairs, va_list vargs) { int err = 0; char fmt[MAX_FMT_LEN]; /* Format string to use with vaddlog */ - int calc_pairs = 0; + size_t calc_pairs = 0; size_t len; - int i; /* Mask extra bits, if any, from priority level */ level = LOG_PRI(level); @@ -120,7 +119,7 @@ kern_asl_msg_va(int level, const char *facility, int num_pairs, va_list vargs) /* Append format strings [%s %s] for the key-value pairs in vargs */ len = MAX_FMT_LEN - KASL_NEWLINE_CHAR_LEN; - for (i = 0; i < calc_pairs; i++) { + for (size_t i = 0; i < calc_pairs; i++) { (void) strlcat(fmt, KASL_KEYVAL_FMT, len); } @@ -141,7 +140,7 @@ kern_asl_msg_va(int level, const char *facility, int num_pairs, va_list vargs) } int -kern_asl_msg(int level, const char *facility, int num_pairs, ...) +kern_asl_msg(int level, const char *facility, size_t num_pairs, ...) { int err; va_list ap; @@ -164,9 +163,9 @@ kern_asl_msg(int level, const char *facility, int num_pairs, ...) * buflen - size of buffer that contains the string */ int -escape_str(char *str, int len, int buflen) +escape_str(char *str, size_t len, size_t buflen) { - int count; + size_t count; char *src, *dst; /* Count number of characters to escape */ diff --git a/bsd/kern/kern_authorization.c b/bsd/kern/kern_authorization.c index 574d4b198..e1000e3d4 100644 --- a/bsd/kern/kern_authorization.c +++ b/bsd/kern/kern_authorization.c @@ -983,7 +983,7 @@ kauth_copyinfilesec(user_addr_t xsecurity, kauth_filesec_t *xsecdestpp) { int error; kauth_filesec_t fsec; - u_int32_t count; + size_t count; size_t copysize; error = 0; @@ -1006,14 +1006,14 @@ kauth_copyinfilesec(user_addr_t xsecurity, kauth_filesec_t *xsecdestpp) */ { user_addr_t known_bound = (xsecurity & PAGE_MASK) + KAUTH_FILESEC_SIZE(0); - user_addr_t uaddr = mach_vm_round_page(known_bound); + user_addr_t uaddr = (user_addr_t)mach_vm_round_page(known_bound); count = (uaddr - known_bound) / sizeof(struct kauth_ace); } if (count > 32) { count = 32; } restart: - if ((fsec = kauth_filesec_alloc(count)) == NULL) { + if ((fsec = kauth_filesec_alloc((int)count)) == NULL) { error = ENOMEM; goto out; } diff --git a/bsd/kern/kern_backtrace.c b/bsd/kern/kern_backtrace.c index f51656aa1..008c28268 100644 --- a/bsd/kern/kern_backtrace.c +++ b/bsd/kern/kern_backtrace.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -54,7 +54,7 @@ backtrace_sysctl SYSCTL_HANDLER_ARGS #pragma unused(oidp, arg2) uintptr_t type = (uintptr_t)arg1; uintptr_t *bt = NULL; - uint32_t bt_len = 0, bt_filled = 0; + unsigned int bt_len = 0, bt_filled = 0; size_t bt_size = 0; int error = 0; @@ -66,13 +66,12 @@ backtrace_sysctl SYSCTL_HANDLER_ARGS return EFAULT; } - bt_len = req->oldlen > MAX_BACKTRACE ? MAX_BACKTRACE : req->oldlen; + bt_len = req->oldlen > MAX_BACKTRACE ? MAX_BACKTRACE : (unsigned int)req->oldlen; bt_size = sizeof(bt[0]) * bt_len; - bt = kalloc(bt_size); + bt = kheap_alloc(KHEAP_TEMP, bt_size, Z_WAITOK | Z_ZERO); if (!bt) { return ENOBUFS; } - memset(bt, 0, bt_size); bt_filled = backtrace_user(bt, bt_len, &error, NULL, NULL); if (error != 0) { goto out; @@ -86,7 +85,7 @@ backtrace_sysctl SYSCTL_HANDLER_ARGS req->oldidx = bt_filled; out: - kfree(bt, bt_size); + kheap_free(KHEAP_TEMP, bt, bt_size); return error; } diff --git a/bsd/kern/kern_clock.c b/bsd/kern/kern_clock.c index a9c778a64..226f90417 100644 --- a/bsd/kern/kern_clock.c +++ b/bsd/kern/kern_clock.c @@ -242,7 +242,7 @@ hzto(struct timeval *tv) ticks = 0x7fffffff; } - return ticks; + return (int)ticks; } /* @@ -346,6 +346,6 @@ get_procrustime(time_value_t *tv) st = p->p_stats->p_ru.ru_stime; //proc_unlock(p); - tv->seconds = st.tv_sec; + tv->seconds = (integer_t)st.tv_sec; tv->microseconds = st.tv_usec; } diff --git a/bsd/kern/kern_control.c b/bsd/kern/kern_control.c index 0151fac5e..dec9a91a9 100644 --- a/bsd/kern/kern_control.c +++ b/bsd/kern/kern_control.c @@ -72,6 +72,7 @@ struct kctl { u_int32_t sendbufsize; /* request more than the default buffer size */ /* Dispatch functions */ + ctl_setup_func setup; /* Setup contact */ ctl_bind_func bind; /* Prepare contact */ ctl_connect_func connect; /* Make contact */ ctl_disconnect_func disconnect; /* Break contact */ @@ -127,7 +128,7 @@ struct ctl_cb { * Definitions and vars for we support */ -static u_int32_t ctl_maxunit = 65536; +const u_int32_t ctl_maxunit = 65536; static lck_grp_attr_t *ctl_lck_grp_attr = 0; static lck_attr_t *ctl_lck_attr = 0; static lck_grp_t *ctl_lck_grp = 0; @@ -504,6 +505,12 @@ ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p) lck_mtx_unlock(ctl_mtx); return EBUSY; } + } else if (kctl->setup != NULL) { + error = (*kctl->setup)(&sa.sc_unit, &kcb->userdata); + if (error != 0) { + lck_mtx_unlock(ctl_mtx); + return error; + } } else { /* Find an unused ID, assumes control IDs are in order */ u_int32_t unit = 1; @@ -546,13 +553,13 @@ ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p) sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); if (kctl->sendbufsize > sbmaxsize) { - sendbufsize = sbmaxsize; + sendbufsize = (u_int32_t)sbmaxsize; } else { sendbufsize = kctl->sendbufsize; } if (kctl->recvbufsize > sbmaxsize) { - recvbufsize = sbmaxsize; + recvbufsize = (u_int32_t)sbmaxsize; } else { recvbufsize = kctl->recvbufsize; } @@ -942,7 +949,7 @@ ctl_send_list(struct socket *so, int flags, struct mbuf *m, } static errno_t -ctl_rcvbspace(struct socket *so, u_int32_t datasize, +ctl_rcvbspace(struct socket *so, size_t datasize, u_int32_t kctlflags, u_int32_t flags) { struct sockbuf *sb = &so->so_rcv; @@ -966,7 +973,7 @@ ctl_rcvbspace(struct socket *so, u_int32_t datasize, error = 0; } } else { - u_int32_t autorcvbuf_max; + size_t autorcvbuf_max; /* * Allow overcommit of 25% @@ -981,10 +988,10 @@ ctl_rcvbspace(struct socket *so, u_int32_t datasize, /* * Grow with a little bit of leeway */ - u_int32_t grow = datasize - space + MSIZE; + size_t grow = datasize - space + MSIZE; + u_int32_t cc = (u_int32_t)MIN(MIN((sb->sb_hiwat + grow), autorcvbuf_max), UINT32_MAX); - if (sbreserve(sb, - min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) { + if (sbreserve(sb, cc) == 1) { if (sb->sb_hiwat > ctl_autorcvbuf_high) { ctl_autorcvbuf_high = sb->sb_hiwat; } @@ -1229,7 +1236,7 @@ ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, if (mlen + curlen > len) { mlen = len - curlen; } - n->m_len = mlen; + n->m_len = (int32_t)mlen; bcopy((char *)data + curlen, n->m_data, mlen); curlen += mlen; } @@ -1675,6 +1682,7 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) u_int32_t id = 1; size_t name_len; int is_extended = 0; + int is_setup = 0; if (userkctl == NULL) { /* sanity check */ return EINVAL; @@ -1772,6 +1780,7 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) } is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED); + is_setup = (userkctl->ctl_flags & CTL_FLAG_REG_SETUP); strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME); kctl->flags = userkctl->ctl_flags; @@ -1792,6 +1801,9 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) kctl->recvbufsize = userkctl->ctl_recvsize; } + if (is_setup) { + kctl->setup = userkctl->ctl_setup; + } kctl->bind = userkctl->ctl_bind; kctl->connect = userkctl->ctl_connect; kctl->disconnect = userkctl->ctl_disconnect; @@ -2155,7 +2167,7 @@ kctl_reg_list SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) int error = 0; - int n, i; + u_int64_t i, n; struct xsystmgen xsg; void *buf = NULL; struct kctl *kctl; @@ -2171,7 +2183,7 @@ kctl_reg_list SYSCTL_HANDLER_ARGS n = kctlstat.kcs_reg_count; if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = (n + n / 8) * sizeof(struct xkctl_reg); + req->oldidx = (size_t)(n + n / 8) * sizeof(struct xkctl_reg); goto done; } if (req->newptr != USER_ADDR_NULL) { @@ -2194,7 +2206,6 @@ kctl_reg_list SYSCTL_HANDLER_ARGS goto done; } - i = 0; for (i = 0, kctl = TAILQ_FIRST(&ctl_head); i < n && kctl != NULL; i++, kctl = TAILQ_NEXT(kctl, next)) { @@ -2265,7 +2276,7 @@ kctl_pcblist SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) int error = 0; - int n, i; + u_int64_t n, i; struct xsystmgen xsg; void *buf = NULL; struct kctl *kctl; @@ -2284,7 +2295,7 @@ kctl_pcblist SYSCTL_HANDLER_ARGS n = kctlstat.kcs_pcbcount; if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = (n + n / 8) * item_size; + req->oldidx = (size_t)(n + n / 8) * item_size; goto done; } if (req->newptr != USER_ADDR_NULL) { @@ -2307,7 +2318,6 @@ kctl_pcblist SYSCTL_HANDLER_ARGS goto done; } - i = 0; for (i = 0, kctl = TAILQ_FIRST(&ctl_head); i < n && kctl != NULL; kctl = TAILQ_NEXT(kctl, next)) { diff --git a/bsd/kern/kern_core.c b/bsd/kern/kern_core.c index bbf2fcac5..a4a3ee6cf 100644 --- a/bsd/kern/kern_core.c +++ b/bsd/kern/kern_core.c @@ -104,10 +104,10 @@ int mynum_flavors = 2; typedef struct { vm_offset_t header; - int hoffset; + size_t hoffset; mythread_state_flavor_t *flavors; - int tstate_size; - int flavor_count; + size_t tstate_size; + size_t flavor_count; } tir_t; extern int freespace_mb(vnode_t vp); @@ -173,7 +173,7 @@ static void collectth_state(thread_t th_act, void *tirp) { vm_offset_t header; - int hoffset, i; + size_t hoffset, i; mythread_state_flavor_t *flavors; struct thread_command *tc; tir_t *t = (tir_t *)tirp; @@ -187,8 +187,8 @@ collectth_state(thread_t th_act, void *tirp) tc = (struct thread_command *) (header + hoffset); tc->cmd = LC_THREAD; - tc->cmdsize = sizeof(struct thread_command) - + t->tstate_size; + tc->cmdsize = (uint32_t)(sizeof(struct thread_command) + + t->tstate_size); hoffset += sizeof(struct thread_command); /* * Follow with a struct thread_state_flavor and @@ -238,9 +238,9 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) kauth_cred_t cred = vfs_context_ucred(ctx); int error = 0; struct vnode_attr va; - int thread_count, segment_count; - int command_size, header_size, tstate_size; - int hoffset; + size_t thread_count, segment_count; + size_t command_size, header_size, tstate_size; + size_t hoffset; off_t foffset; mach_vm_offset_t vmoffset; vm_offset_t header; @@ -254,7 +254,7 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) char *name = NULL; mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS]; vm_size_t mapsize; - int i; + size_t i; uint32_t nesting_depth = 0; kern_return_t kret; struct vm_region_submap_info_64 vbr; @@ -295,7 +295,7 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) mapsize = get_vmmap_size(map); if (((coredump_flags & COREDUMP_IGNORE_ULIMIT) == 0) && - (mapsize >= core_proc->p_rlimit[RLIMIT_CORE].rlim_cur)) { + (mapsize >= proc_limitgetcur(core_proc, RLIMIT_CORE, FALSE))) { error = EFAULT; goto out2; } @@ -358,11 +358,34 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) tstate_size += sizeof(mythread_state_flavor_t) + (flavors[i].count * sizeof(int)); } - command_size = segment_count * segment_command_sz + - thread_count * sizeof(struct thread_command) + - tstate_size * thread_count; - header_size = command_size + mach_header_sz; + { + size_t lhs; + size_t rhs; + + /* lhs = segment_count * segment_command_sz */ + if (os_mul_overflow(segment_count, segment_command_sz, &lhs)) { + error = ENOMEM; + goto out; + } + + /* rhs = (tstate_size + sizeof(struct thread_command)) * thread_count */ + if (os_add_and_mul_overflow(tstate_size, sizeof(struct thread_command), thread_count, &rhs)) { + error = ENOMEM; + goto out; + } + + /* command_size = lhs + rhs */ + if (os_add_overflow(lhs, rhs, &command_size)) { + error = ENOMEM; + goto out; + } + } + + if (os_add_overflow(command_size, mach_header_sz, &header_size)) { + error = ENOMEM; + goto out; + } if (kmem_alloc(kernel_map, &header, (vm_size_t)header_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) { error = ENOMEM; @@ -378,8 +401,8 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) mh64->cputype = process_cpu_type(core_proc); mh64->cpusubtype = process_cpu_subtype(core_proc); mh64->filetype = MH_CORE; - mh64->ncmds = segment_count + thread_count; - mh64->sizeofcmds = command_size; + mh64->ncmds = (uint32_t)(segment_count + thread_count); + mh64->sizeofcmds = (uint32_t)command_size; mh64->reserved = 0; /* 8 byte alignment */ } else { mh = (struct mach_header *)header; @@ -387,8 +410,8 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) mh->cputype = process_cpu_type(core_proc); mh->cpusubtype = process_cpu_subtype(core_proc); mh->filetype = MH_CORE; - mh->ncmds = segment_count + thread_count; - mh->sizeofcmds = command_size; + mh->ncmds = (uint32_t)(segment_count + thread_count); + mh->sizeofcmds = (uint32_t)command_size; } hoffset = mach_header_sz; /* offset into header */ @@ -463,8 +486,8 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) sc->cmdsize = sizeof(struct segment_command); /* segment name is zeroed by kmem_alloc */ sc->segname[0] = 0; - sc->vmaddr = CAST_DOWN_EXPLICIT(vm_offset_t, vmoffset); - sc->vmsize = CAST_DOWN_EXPLICIT(vm_size_t, vmsize); + sc->vmaddr = CAST_DOWN_EXPLICIT(uint32_t, vmoffset); + sc->vmsize = CAST_DOWN_EXPLICIT(uint32_t, vmsize); sc->fileoff = CAST_DOWN_EXPLICIT(uint32_t, foffset); /* will never truncate */ sc->filesize = CAST_DOWN_EXPLICIT(uint32_t, vmsize); /* will never truncate */ sc->maxprot = maxprot; @@ -525,7 +548,7 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) * Write out the Mach header at the beginning of the * file. OK to use a 32 bit write for this. */ - error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, header_size, (off_t)0, + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, (int)MIN(header_size, INT_MAX), (off_t)0, UIO_SYSSPACE, IO_NOCACHE | IO_NODELOCKED | IO_UNIT, cred, (int *) 0, core_proc); kmem_free(kernel_map, header, header_size); diff --git a/bsd/kern/kern_credential.c b/bsd/kern/kern_credential.c index 643b1cebb..c3eb07764 100644 --- a/bsd/kern/kern_credential.c +++ b/bsd/kern/kern_credential.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2011 Apple Inc. All rights reserved. + * Copyright (c) 2004-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -216,7 +216,7 @@ struct kauth_identity { int ki_valid; uid_t ki_uid; gid_t ki_gid; - int ki_supgrpcnt; + uint32_t ki_supgrpcnt; gid_t ki_supgrps[NGROUPS]; guid_t ki_guid; ntsid_t ki_ntsid; @@ -242,7 +242,7 @@ static int kauth_identity_cachemax = KAUTH_IDENTITY_CACHEMAX_DEFAULT; static int kauth_identity_count; static struct kauth_identity *kauth_identity_alloc(uid_t uid, gid_t gid, guid_t *guidp, time_t guid_expiry, - ntsid_t *ntsidp, time_t ntsid_expiry, int supgrpcnt, gid_t *supgrps, time_t groups_expiry, + ntsid_t *ntsidp, time_t ntsid_expiry, size_t supgrpcnt, gid_t *supgrps, time_t groups_expiry, const char *name, int nametype); static void kauth_identity_register_and_free(struct kauth_identity *kip); static void kauth_identity_updatecache(struct kauth_identity_extlookup *elp, struct kauth_identity *kip, uint64_t extend_data); @@ -282,6 +282,7 @@ static void kauth_groups_trimcache(int newsize); #define KAUTH_CRED_TABLE_SIZE 128 +ZONE_DECLARE(ucred_zone, "cred", sizeof(struct ucred), ZC_ZFREE_CLEARMEM); LIST_HEAD(kauth_cred_entry_head, ucred); static struct kauth_cred_entry_head kauth_cred_table_anchor[KAUTH_CRED_TABLE_SIZE]; @@ -379,6 +380,28 @@ kauth_resolver_init(void) kauth_resolver_mtx = lck_mtx_alloc_init(kauth_lck_grp, 0 /*LCK_ATTR_NULL*/); } +/* + * kauth_resolver_identity_reset + * + * Description: Reset the identity of the external resolver in certain + * controlled circumstances. + * + * Parameters: None. + * + * Returns: Nothing. + */ +void +kauth_resolver_identity_reset(void) +{ + KAUTH_RESOLVER_LOCK(); + if (kauth_resolver_identity != 0) { + printf("kauth external resolver %d failed to de-register.\n", + kauth_resolver_identity); + kauth_resolver_identity = 0; + kauth_resolver_registered = 0; + } + KAUTH_RESOLVER_UNLOCK(); +} /* * kauth_resolver_submit @@ -624,7 +647,7 @@ identitysvc(__unused struct proc *p, struct identitysvc_args *uap, __unused int3 * external resolution timeout */ if (message > 30 && message < 10000) { - kauth_resolver_timeout = message; + kauth_resolver_timeout = (int)message; KAUTH_DEBUG("RESOLVER - new resolver changes timeout to %d seconds\n", (int)message); } kauth_resolver_identity = new_id; @@ -974,7 +997,7 @@ kauth_resolver_complete(user_addr_t message) once = 1; } } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case KAUTH_EXTLOOKUP_SUCCESS: break; @@ -1169,7 +1192,7 @@ kauth_identity_init(void) */ static struct kauth_identity * kauth_identity_alloc(uid_t uid, gid_t gid, guid_t *guidp, time_t guid_expiry, - ntsid_t *ntsidp, time_t ntsid_expiry, int supgrpcnt, gid_t *supgrps, time_t groups_expiry, + ntsid_t *ntsidp, time_t ntsid_expiry, size_t supgrpcnt, gid_t *supgrps, time_t groups_expiry, const char *name, int nametype) { struct kauth_identity *kip; @@ -1192,17 +1215,16 @@ kauth_identity_alloc(uid_t uid, gid_t gid, guid_t *guidp, time_t guid_expiry, /* * A malicious/faulty resolver could return bad values */ - assert(supgrpcnt >= 0); assert(supgrpcnt <= NGROUPS); assert(supgrps != NULL); - if ((supgrpcnt < 0) || (supgrpcnt > NGROUPS) || (supgrps == NULL)) { + if ((supgrpcnt > NGROUPS) || (supgrps == NULL)) { return NULL; } if (kip->ki_valid & KI_VALID_GID) { panic("can't allocate kauth identity with both gid and supplementary groups"); } - kip->ki_supgrpcnt = supgrpcnt; + kip->ki_supgrpcnt = (uint32_t)supgrpcnt; memcpy(kip->ki_supgrps, supgrps, sizeof(supgrps[0]) * supgrpcnt); kip->ki_valid |= KI_VALID_GROUPS; } @@ -1364,7 +1386,7 @@ kauth_identity_updatecache(struct kauth_identity_extlookup *elp, struct kauth_id */ if (elp->el_flags & (KAUTH_EXTLOOKUP_VALID_PWNAM | KAUTH_EXTLOOKUP_VALID_GRNAM)) { const char *tmp = CAST_DOWN(const char *, extend_data); - speculative_name = vfs_addname(tmp, strnlen(tmp, MAXPATHLEN - 1), 0, 0); + speculative_name = vfs_addname(tmp, (uint32_t)strnlen(tmp, MAXPATHLEN - 1), 0, 0); } /* user identity? */ @@ -2200,7 +2222,7 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) * kauth_cred_cache_lookup below. */ struct supgroups { - int *count; + size_t *count; gid_t *groups; }; @@ -2222,7 +2244,7 @@ struct supgroups { * */ static int -kauth_cred_uid2groups(uid_t *uid, gid_t *groups, int *gcount) +kauth_cred_uid2groups(uid_t *uid, gid_t *groups, size_t *gcount) { int rv; @@ -2863,11 +2885,11 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) * expiration. */ if (ki.ki_supgrpcnt > NGROUPS) { - panic("kauth data structure corrupted. kauth identity 0x%p with %d groups, greater than max of %d", + panic("kauth data structure corrupted. kauth identity 0x%p with %u groups, greater than max of %d", &ki, ki.ki_supgrpcnt, NGROUPS); } - el.el_sup_grp_cnt = ki.ki_supgrpcnt; + el.el_sup_grp_cnt = (uint32_t)ki.ki_supgrpcnt; memcpy(el.el_sup_groups, ki.ki_supgrps, sizeof(el.el_sup_groups[0]) * ki.ki_supgrpcnt); /* Let the resolver know these were the previous valid groups */ @@ -2937,7 +2959,7 @@ found: break; case KI_VALID_GROUPS: { struct supgroups *gp = (struct supgroups *)dst; - u_int32_t limit = ki.ki_supgrpcnt; + size_t limit = ki.ki_supgrpcnt; if (gp->count) { limit = MIN(ki.ki_supgrpcnt, *gp->count); @@ -3826,17 +3848,13 @@ kauth_cred_alloc(void) { kauth_cred_t newcred; - MALLOC_ZONE(newcred, kauth_cred_t, sizeof(*newcred), M_CRED, M_WAITOK | M_ZERO); - assert(newcred); - if (newcred != 0) { - posix_cred_t newpcred = posix_cred_get(newcred); - newcred->cr_audit.as_aia_p = audit_default_aia_p; - /* must do this, or cred has same group membership as uid 0 */ - newpcred->cr_gmuid = KAUTH_UID_NONE; + newcred = zalloc_flags(ucred_zone, Z_WAITOK | Z_ZERO); + posix_cred_get(newcred)->cr_gmuid = KAUTH_UID_NONE; + newcred->cr_audit.as_aia_p = audit_default_aia_p; + /* must do this, or cred has same group membership as uid 0 */ #if CONFIG_MACF - mac_cred_label_init(newcred); + mac_cred_label_init(newcred); #endif - } return newcred; } @@ -3856,7 +3874,7 @@ kauth_cred_free(kauth_cred_t cred) mac_cred_label_destroy(cred); #endif AUDIT_SESSION_UNREF(cred); - FREE_ZONE(cred, sizeof(*cred), M_CRED); + zfree(ucred_zone, cred); } /* @@ -4011,7 +4029,7 @@ kauth_cred_setresuid(kauth_cred_t cred, uid_t ruid, uid_t euid, uid_t svuid, uid * Look up in cred hash table to see if we have a matching credential * with the new values; this is done by calling kauth_cred_update(). */ - bcopy(cred, &temp_cred, sizeof(temp_cred)); + temp_cred = *cred; if (euid != KAUTH_UID_NONE) { temp_pcred->cr_uid = euid; } @@ -4086,7 +4104,7 @@ kauth_cred_setresgid(kauth_cred_t cred, gid_t rgid, gid_t egid, gid_t svgid) * Look up in cred hash table to see if we have a matching credential * with the new values; this is done by calling kauth_cred_update(). */ - bcopy(cred, &temp_cred, sizeof(temp_cred)); + temp_cred = *cred; if (egid != KAUTH_GID_NONE) { /* displacing a supplementary group opts us out of memberd */ if (kauth_cred_change_egid(&temp_cred, egid)) { @@ -4158,14 +4176,16 @@ kauth_cred_setresgid(kauth_cred_t cred, gid_t rgid, gid_t egid, gid_t svgid) * to be the caller's problem. */ kauth_cred_t -kauth_cred_setgroups(kauth_cred_t cred, gid_t *groups, int groupcount, uid_t gmuid) +kauth_cred_setgroups(kauth_cred_t cred, gid_t *groups, size_t groupcount, uid_t gmuid) { - int i; + size_t i; struct ucred temp_cred; posix_cred_t temp_pcred = posix_cred_get(&temp_cred); posix_cred_t pcred; NULLCRED_CHECK(cred); + assert(groupcount <= NGROUPS); + groupcount = MIN(groupcount, NGROUPS); pcred = posix_cred_get(cred); @@ -4192,9 +4212,9 @@ kauth_cred_setgroups(kauth_cred_t cred, gid_t *groups, int groupcount, uid_t gmu * opt-out of memberd processing using setgroups(), and an opt-in * using initgroups(). This is required for POSIX conformance. */ - bcopy(cred, &temp_cred, sizeof(temp_cred)); - temp_pcred->cr_ngroups = groupcount; - bcopy(groups, temp_pcred->cr_groups, sizeof(temp_pcred->cr_groups)); + temp_cred = *cred; + temp_pcred->cr_ngroups = (short)groupcount; + bcopy(groups, temp_pcred->cr_groups, groupcount * sizeof(temp_pcred->cr_groups[0])); temp_pcred->cr_gmuid = gmuid; if (gmuid == KAUTH_UID_NONE) { temp_pcred->cr_flags |= CRF_NOMEMBERD; @@ -4217,9 +4237,9 @@ SYSCTL_INT(_kern, OID_AUTO, ds_supgroups_supported, CTLFLAG_RW | CTLFLAG_LOCKED, #endif int -kauth_cred_getgroups(kauth_cred_t cred, gid_t *grouplist, int *countp) +kauth_cred_getgroups(kauth_cred_t cred, gid_t *grouplist, size_t *countp) { - int limit = NGROUPS; + size_t limit = NGROUPS; posix_cred_t pcred; pcred = posix_cred_get(cred); @@ -4401,7 +4421,7 @@ kauth_cred_setsvuidgid(kauth_cred_t cred, uid_t uid, gid_t gid) /* look up in cred hash table to see if we have a matching credential * with new values. */ - bcopy(cred, &temp_cred, sizeof(temp_cred)); + temp_cred = *cred; temp_pcred->cr_svuid = uid; temp_pcred->cr_svgid = gid; @@ -4447,7 +4467,7 @@ kauth_cred_setauditinfo(kauth_cred_t cred, au_session_t *auditinfo_p) return cred; } - bcopy(cred, &temp_cred, sizeof(temp_cred)); + temp_cred = *cred; bcopy(auditinfo_p, &temp_cred.cr_audit, sizeof(temp_cred.cr_audit)); return kauth_cred_update(cred, &temp_cred, FALSE); @@ -4482,7 +4502,7 @@ kauth_cred_label_update(kauth_cred_t cred, struct label *label) kauth_cred_t newcred; struct ucred temp_cred; - bcopy(cred, &temp_cred, sizeof(temp_cred)); + temp_cred = *cred; mac_cred_label_init(&temp_cred); mac_cred_label_associate(cred, &temp_cred); @@ -4534,7 +4554,7 @@ kauth_cred_label_update_execve(kauth_cred_t cred, vfs_context_t ctx, kauth_cred_t newcred; struct ucred temp_cred; - bcopy(cred, &temp_cred, sizeof(temp_cred)); + temp_cred = *cred; mac_cred_label_init(&temp_cred); mac_cred_label_associate(cred, &temp_cred); @@ -5006,7 +5026,7 @@ kauth_cred_copy_real(kauth_cred_t cred) * Look up in cred hash table to see if we have a matching credential * with the new values. */ - bcopy(cred, &temp_cred, sizeof(temp_cred)); + temp_cred = *cred; temp_pcred->cr_uid = pcred->cr_ruid; /* displacing a supplementary group opts us out of memberd */ if (kauth_cred_change_egid(&temp_cred, pcred->cr_rgid)) { @@ -5539,7 +5559,7 @@ struct debug_ucred { uid_t cr_uid; /* effective user id */ uid_t cr_ruid; /* real user id */ uid_t cr_svuid; /* saved user id */ - short cr_ngroups; /* number of groups in advisory list */ + u_short cr_ngroups; /* number of groups in advisory list */ gid_t cr_groups[NGROUPS]; /* advisory group list */ gid_t cr_rgid; /* real group id */ gid_t cr_svgid; /* saved group id */ @@ -5860,8 +5880,8 @@ posix_cred_access(kauth_cred_t cred, id_t object_uid, id_t object_gid, mode_t ob { int is_member; mode_t mode_owner = (object_mode & S_IRWXU); - mode_t mode_group = (object_mode & S_IRWXG) << 3; - mode_t mode_world = (object_mode & S_IRWXO) << 6; + mode_t mode_group = (mode_t)((object_mode & S_IRWXG) << 3); + mode_t mode_world = (mode_t)((object_mode & S_IRWXO) << 6); /* * Check first for owner rights diff --git a/bsd/kern/kern_cs.c b/bsd/kern/kern_cs.c index 4a9fbc3ff..ad1eb76ef 100644 --- a/bsd/kern/kern_cs.c +++ b/bsd/kern/kern_cs.c @@ -59,6 +59,7 @@ #include #include +#include #include #include @@ -72,6 +73,8 @@ #include #include +#include + unsigned long cs_procs_killed = 0; unsigned long cs_procs_invalidated = 0; @@ -85,7 +88,22 @@ int cs_debug_fail_on_unsigned_code = 0; unsigned int cs_debug_unsigned_exec_failures = 0; unsigned int cs_debug_unsigned_mmap_failures = 0; +#if CONFIG_ENFORCE_SIGNED_CODE +#define DEFAULT_CS_SYSTEM_ENFORCEMENT_ENABLE 1 +#define DEFAULT_CS_PROCESS_ENFORCEMENT_ENABLE 1 +#else +#define DEFAULT_CS_SYSTEM_ENFORCEMENT_ENABLE 1 +#define DEFAULT_CS_PROCESS_ENFORCEMENT_ENABLE 0 +#endif + +#if CONFIG_ENFORCE_LIBRARY_VALIDATION +#define DEFAULT_CS_LIBRARY_VA_ENABLE 1 +#else +#define DEFAULT_CS_LIBRARY_VA_ENABLE 0 +#endif + #if SECURE_KERNEL + /* * Here we split cs_enforcement_enable into cs_system_enforcement_enable and cs_process_enforcement_enable * @@ -99,28 +117,17 @@ unsigned int cs_debug_unsigned_mmap_failures = 0; * (On iOS and related, both of these are set by default. On macOS, only cs_system_enforcement_enable * is set by default. Processes can then be opted into code signing enforcement on a case by case basis.) */ -const int cs_system_enforcement_enable = 1; -const int cs_process_enforcement_enable = 1; -const int cs_library_val_enable = 1; +SECURITY_READ_ONLY_EARLY(int) cs_system_enforcement_enable = DEFAULT_CS_SYSTEM_ENFORCEMENT_ENABLE; +SECURITY_READ_ONLY_EARLY(int) cs_process_enforcement_enable = DEFAULT_CS_PROCESS_ENFORCEMENT_ENABLE; +SECURITY_READ_ONLY_EARLY(int) cs_library_val_enable = DEFAULT_CS_LIBRARY_VA_ENABLE; + #else /* !SECURE_KERNEL */ int cs_enforcement_panic = 0; int cs_relax_platform_task_ports = 0; -#if CONFIG_ENFORCE_SIGNED_CODE -#define DEFAULT_CS_SYSTEM_ENFORCEMENT_ENABLE 1 -#define DEFAULT_CS_PROCESS_ENFORCEMENT_ENABLE 1 -#else -#define DEFAULT_CS_SYSTEM_ENFORCEMENT_ENABLE 1 -#define DEFAULT_CS_PROCESS_ENFORCEMENT_ENABLE 0 -#endif SECURITY_READ_ONLY_LATE(int) cs_system_enforcement_enable = DEFAULT_CS_SYSTEM_ENFORCEMENT_ENABLE; SECURITY_READ_ONLY_LATE(int) cs_process_enforcement_enable = DEFAULT_CS_PROCESS_ENFORCEMENT_ENABLE; -#if CONFIG_ENFORCE_LIBRARY_VALIDATION -#define DEFAULT_CS_LIBRARY_VA_ENABLE 1 -#else -#define DEFAULT_CS_LIBRARY_VA_ENABLE 0 -#endif SECURITY_READ_ONLY_LATE(int) cs_library_val_enable = DEFAULT_CS_LIBRARY_VA_ENABLE; #endif /* !SECURE_KERNEL */ @@ -152,7 +159,8 @@ SYSCTL_INT(_vm, OID_AUTO, cs_library_validation, CTLFLAG_RD | CTLFLAG_LOCKED, &c int panic_on_cs_killed = 0; -void +__startup_func +static void cs_init(void) { #if MACH_ASSERT @@ -191,6 +199,7 @@ cs_init(void) cs_lockgrp = lck_grp_alloc_init("KERNCS", attr); lck_grp_attr_free(attr); } +STARTUP(CODESIGNING, STARTUP_RANK_FIRST, cs_init); int cs_allow_invalid(struct proc *p) @@ -237,7 +246,6 @@ cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed) { struct proc *p; int send_kill = 0, retval = 0, verbose = cs_debug; - uint32_t csflags; p = current_proc(); @@ -272,9 +280,9 @@ cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed) p->p_csflags &= ~CS_VALID; cs_procs_invalidated++; verbose = 1; + cs_process_invalidated(NULL); } } - csflags = p->p_csflags; proc_unlock(p); if (verbose) { @@ -295,10 +303,29 @@ cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed) *cs_killed = FALSE; } - return retval; } +/* + * Called after a process got its CS_VALID bit removed, either by + * a previous call to cs_invalid_page, or through other means. + * Called from fault handler with vm object lock held. + * Called with proc lock held for current_proc or, if passed in, p, + * to ensure MACF hook can suspend the task before other threads + * can access the memory that is paged in after cs_invalid_page + * returns 0 due to missing CS_HARD|CS_KILL. + */ +void +cs_process_invalidated(struct proc * __unused p) +{ +#if CONFIG_MACF + if (p == NULL) { + p = current_proc(); + } + mac_proc_notify_cs_invalidated(p); +#endif +} + /* * Assumes p (if passed in) is locked with proc_lock(). */ @@ -333,6 +360,13 @@ cs_system_enforcement(void) return cs_system_enforcement_enable ? 1 : 0; } +int +cs_vm_supports_4k_translations(void) +{ + return 0; +} + + /* * Returns whether a given process is still valid. */ @@ -491,7 +525,7 @@ csproc_get_blob(struct proc *p) return NULL; } - return ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff); + return ubc_cs_blob_get(p->p_textvp, -1, -1, p->p_textoff); } /* @@ -503,7 +537,7 @@ csproc_get_blob(struct proc *p) struct cs_blob * csvnode_get_blob(struct vnode *vp, off_t offset) { - return ubc_cs_blob_get(vp, -1, offset); + return ubc_cs_blob_get(vp, -1, -1, offset); } /* @@ -550,6 +584,11 @@ csblob_get_identity(struct cs_blob *csblob) const uint8_t * csblob_get_cdhash(struct cs_blob *csblob) { + ptrauth_utils_auth_blob_generic(csblob->csb_cdhash, + sizeof(csblob->csb_cdhash), + OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"), + PTRAUTH_ADDR_DIVERSIFY, + csblob->csb_cdhash_signature); return csblob->csb_cdhash; } @@ -604,6 +643,19 @@ csproc_get_teamid(struct proc *p) return csblob_get_teamid(csblob); } +const char * +csproc_get_identity(struct proc *p) +{ + struct cs_blob *csblob = NULL; + + csblob = csproc_get_blob(p); + if (csblob == NULL) { + return NULL; + } + + return csblob_get_identity(csblob); +} + /* * Function: csproc_get_signer_type * @@ -638,7 +690,7 @@ csvnode_get_teamid(struct vnode *vp, off_t offset) return NULL; } - csblob = ubc_cs_blob_get(vp, -1, offset); + csblob = ubc_cs_blob_get(vp, -1, -1, offset); if (csblob == NULL) { return NULL; } @@ -701,6 +753,7 @@ csproc_disable_enforcement(struct proc* __unused p) if (p != NULL) { proc_lock(p); p->p_csflags &= (~CS_ENFORCEMENT); + vm_map_cs_enforcement_set(get_task_map(p->task), FALSE); proc_unlock(p); } #endif @@ -805,6 +858,49 @@ out: return platform_binary; } +int +csfg_get_supplement_platform_binary(struct fileglob *fg __unused) +{ +#if CONFIG_SUPPLEMENTAL_SIGNATURES + int platform_binary = 0; + struct ubc_info *uip; + vnode_t vp; + + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { + return 0; + } + + vp = (struct vnode *)fg->fg_data; + if (vp == NULL) { + return 0; + } + + vnode_lock(vp); + if (!UBCINFOEXISTS(vp)) { + goto out; + } + + uip = vp->v_ubcinfo; + if (uip == NULL) { + goto out; + } + + if (uip->cs_blob_supplement == NULL) { + goto out; + } + + platform_binary = uip->cs_blob_supplement->csb_platform_binary; +out: + vnode_unlock(vp); + + return platform_binary; +#else + // Supplemental signatures are only allowed in CONFIG_SUPPLEMENTAL_SIGNATURES + // Return false if anyone asks about them + return 0; +#endif +} + uint8_t * csfg_get_cdhash(struct fileglob *fg, uint64_t offset, size_t *cdhash_size) { @@ -820,15 +916,87 @@ csfg_get_cdhash(struct fileglob *fg, uint64_t offset, size_t *cdhash_size) } struct cs_blob *csblob = NULL; - if ((csblob = ubc_cs_blob_get(vp, -1, offset)) == NULL) { + if ((csblob = ubc_cs_blob_get(vp, -1, -1, offset)) == NULL) { return NULL; } if (cdhash_size) { *cdhash_size = CS_CDHASH_LEN; } + ptrauth_utils_auth_blob_generic(csblob->csb_cdhash, + sizeof(csblob->csb_cdhash), + OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"), + PTRAUTH_ADDR_DIVERSIFY, + csblob->csb_cdhash_signature); + return csblob->csb_cdhash; +} + +uint8_t * +csfg_get_supplement_cdhash(struct fileglob *fg __unused, uint64_t offset __unused, size_t *cdhash_size __unused) +{ +#if CONFIG_SUPPLEMENTAL_SIGNATURES + vnode_t vp; + + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { + return NULL; + } + vp = (struct vnode *)fg->fg_data; + if (vp == NULL) { + return NULL; + } + + struct cs_blob *csblob = NULL; + if ((csblob = ubc_cs_blob_get_supplement(vp, offset)) == NULL) { + return NULL; + } + + if (cdhash_size) { + *cdhash_size = CS_CDHASH_LEN; + } + ptrauth_utils_auth_blob_generic(csblob->csb_cdhash, + sizeof(csblob->csb_cdhash), + OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"), + PTRAUTH_ADDR_DIVERSIFY, + csblob->csb_cdhash_signature); return csblob->csb_cdhash; +#else + // Supplemental signatures are only available in CONFIG_SUPPLEMENTAL_SIGNATURES + // return NULL if anyone asks about them + return NULL; +#endif +} + +const uint8_t * +csfg_get_supplement_linkage_cdhash(struct fileglob *fg __unused, uint64_t offset __unused, size_t *cdhash_size __unused) +{ +#if CONFIG_SUPPLEMENTAL_SIGNATURES + vnode_t vp; + + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { + return NULL; + } + + vp = (struct vnode *)fg->fg_data; + if (vp == NULL) { + return NULL; + } + + struct cs_blob *csblob = NULL; + if ((csblob = ubc_cs_blob_get_supplement(vp, offset)) == NULL) { + return NULL; + } + + if (cdhash_size) { + *cdhash_size = CS_CDHASH_LEN; + } + + return csblob->csb_linkage; +#else + // Supplemental signatures are only available in CONFIG_SUPPLEMENTAL_SIGNATURES + // return NULL if anyone asks about them + return NULL; +#endif } /* @@ -876,6 +1044,49 @@ out: return signer_type; } +unsigned int +csfg_get_supplement_signer_type(struct fileglob *fg __unused) +{ +#if CONFIG_SUPPLEMENTAL_SIGNATURES + struct ubc_info *uip; + unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN; + vnode_t vp; + + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { + return CS_SIGNER_TYPE_UNKNOWN; + } + + vp = (struct vnode *)fg->fg_data; + if (vp == NULL) { + return CS_SIGNER_TYPE_UNKNOWN; + } + + vnode_lock(vp); + if (!UBCINFOEXISTS(vp)) { + goto out; + } + + uip = vp->v_ubcinfo; + if (uip == NULL) { + goto out; + } + + if (uip->cs_blob_supplement == NULL) { + goto out; + } + + signer_type = uip->cs_blob_supplement->csb_signer_type; +out: + vnode_unlock(vp); + + return signer_type; +#else + // Supplemental signatures are only available in CONFIG_SUPPLEMENTAL_SIGNATURES + // Return unknown if anyone asks + return CS_SIGNER_TYPE_UNKNOWN; +#endif +} + /* * Function: csfg_get_teamid * @@ -921,6 +1132,49 @@ out: return str; } +const char * +csfg_get_supplement_teamid(struct fileglob *fg __unused) +{ +#if CONFIG_SUPPLEMENTAL_SIGNATURES + struct ubc_info *uip; + const char *str = NULL; + vnode_t vp; + + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { + return NULL; + } + + vp = (struct vnode *)fg->fg_data; + if (vp == NULL) { + return NULL; + } + + vnode_lock(vp); + if (!UBCINFOEXISTS(vp)) { + goto out; + } + + uip = vp->v_ubcinfo; + if (uip == NULL) { + goto out; + } + + if (uip->cs_blob_supplement == NULL) { + goto out; + } + + str = uip->cs_blob_supplement->csb_supplement_teamid; +out: + vnode_unlock(vp); + + return str; +#else + // Supplemental Signatures are only available in CONFIG_SUPPLEMENTAL_SIGNATURES + // Return NULL if anyone asks + return NULL; +#endif +} + /* * Function: csfg_get_prod_signed * @@ -967,6 +1221,51 @@ out: return prod_signed; } +int +csfg_get_supplement_prod_signed(struct fileglob *fg __unused) +{ +#if CONFIG_SUPPLEMENTAL_SIGNATURES + struct ubc_info *uip; + vnode_t vp; + int prod_signed = 0; + + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { + return 0; + } + + vp = (struct vnode *)fg->fg_data; + if (vp == NULL) { + return 0; + } + + vnode_lock(vp); + if (!UBCINFOEXISTS(vp)) { + goto out; + } + + uip = vp->v_ubcinfo; + if (uip == NULL) { + goto out; + } + + if (uip->cs_blob_supplement == NULL) { + goto out; + } + + /* It is OK to extract the flag from the first blob + * because all blobs of a vnode must have the same cs_flags */ + prod_signed = (uip->cs_blob_supplement->csb_flags & CS_DEV_CODE) == 0; +out: + vnode_unlock(vp); + + return prod_signed; +#else + // Supplemental signatures are only available in CONFIG_SUPPLEMENTAL_SIGNATURES + // Indicate development signed if anyone tries to ask about one. + return 0; +#endif +} + /* * Function: csfg_get_identity * @@ -988,7 +1287,7 @@ csfg_get_identity(struct fileglob *fg, off_t offset) return NULL; } - csblob = ubc_cs_blob_get(vp, -1, offset); + csblob = ubc_cs_blob_get(vp, -1, -1, offset); if (csblob == NULL) { return NULL; } @@ -1033,7 +1332,7 @@ csvnode_get_platform_identifier(struct vnode *vp, off_t offset) struct cs_blob *csblob; const CS_CodeDirectory *code_dir; - csblob = ubc_cs_blob_get(vp, -1, offset); + csblob = ubc_cs_blob_get(vp, -1, -1, offset); if (csblob == NULL) { return 0; } @@ -1106,7 +1405,8 @@ csfg_get_path(struct fileglob *fg, char *path, int *len) return vn_getpath(vp, path, len); } -/* Retrieve the entitlements blob for a process. +/* + * Retrieve the entitlements blob for a vnode * Returns: * EINVAL no text vnode associated with the process * EBADEXEC invalid code signing data @@ -1116,30 +1416,46 @@ csfg_get_path(struct fileglob *fg, char *path, int *len) * entitlements blob if found; or will be set to NULL/zero * if there were no entitlements. */ - int -cs_entitlements_blob_get(proc_t p, void **out_start, size_t *out_length) +cs_entitlements_blob_get_vnode(vnode_t vnode, off_t offset, void **out_start, size_t *out_length) { struct cs_blob *csblob; *out_start = NULL; *out_length = 0; - if ((p->p_csflags & CS_SIGNED) == 0) { - return 0; - } - - if (NULL == p->p_textvp) { + if (vnode == NULL) { return EINVAL; } - if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) { + if ((csblob = ubc_cs_blob_get(vnode, -1, -1, offset)) == NULL) { return 0; } return csblob_get_entitlements(csblob, out_start, out_length); } +/* + * Retrieve the entitlements blob for a process. + * Returns: + * EINVAL no text vnode associated with the process + * EBADEXEC invalid code signing data + * 0 no error occurred + * + * On success, out_start and out_length will point to the + * entitlements blob if found; or will be set to NULL/zero + * if there were no entitlements. + */ +int +cs_entitlements_blob_get(proc_t p, void **out_start, size_t *out_length) +{ + if ((p->p_csflags & CS_SIGNED) == 0) { + return 0; + } + + return cs_entitlements_blob_get_vnode(p->p_textvp, p->p_textoff, out_start, out_length); +} + /* Retrieve the cached entitlements for a process * Returns: @@ -1165,7 +1481,7 @@ cs_entitlements_dictionary_copy(proc_t p, void **entitlements) return EINVAL; } - if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) { + if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, -1, p->p_textoff)) == NULL) { return 0; } @@ -1192,7 +1508,7 @@ cs_identity_get(proc_t p) return NULL; } - if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) { + if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, -1, p->p_textoff)) == NULL) { return NULL; } @@ -1220,7 +1536,7 @@ cs_blob_get(proc_t p, void **out_start, size_t *out_length) return EINVAL; } - if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) { + if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, -1, p->p_textoff)) == NULL) { return 0; } @@ -1247,9 +1563,14 @@ cs_get_cdhash(struct proc *p) return NULL; } - if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) { + if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, -1, p->p_textoff)) == NULL) { return NULL; } + ptrauth_utils_auth_blob_generic(csblob->csb_cdhash, + sizeof(csblob->csb_cdhash), + OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"), + PTRAUTH_ADDR_DIVERSIFY, + csblob->csb_cdhash_signature); return csblob->csb_cdhash; } diff --git a/bsd/kern/kern_csr.c b/bsd/kern/kern_csr.c index 6423d23ca..29f2d080c 100644 --- a/bsd/kern/kern_csr.c +++ b/bsd/kern/kern_csr.c @@ -33,11 +33,233 @@ #include #include +#if CONFIG_CSR_FROM_DT + +/* + * New style CSR for non-x86 platforms, using Per-OS Security Policy + * (POSP) + */ + +#include +#include + +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) +#include +#endif + +static SECURITY_READ_ONLY_LATE(csr_config_t) csr_config = 0; + +// WARNING: Used extremely early during boot. See csr_bootstrap(). +static bool +_csr_get_dt_bool(DTEntry *entry, char const *name, bool *out) +{ + const uint32_t *value; + unsigned int size; + + if (SecureDTGetProperty(*entry, name, (const void**)&value, &size) != kSuccess) { + return false; + } + + if (size != sizeof(uint32_t)) { + panic("unexpected size %xu for bool property '%s'", size, name); + } + + *out = (bool)*value; + return true; +} + +// WARNING: Used extremely early during boot. See csr_bootstrap(). +static bool +_csr_get_dt_uint64(DTEntry *entry, char const *name, uint64_t *out) +{ + const uint64_t *value; + unsigned int size; + + if (SecureDTGetProperty(*entry, name, (const void**)&value, &size) != kSuccess) { + return false; + } + + if (size != sizeof(uint64_t)) { + panic("unexpected size %xu for uint64 property '%s'", size, name); + } + + *out = *value; + return true; +} + +// WARNING: Used extremely early during boot. See csr_bootstrap(). +static bool +_csr_dt_string_is_equal(DTEntry *entry, const char *name, const char *str) +{ + const void *value; + unsigned size; + size_t str_size; + + str_size = strlen(str) + 1; + return entry != NULL && + SecureDTGetProperty(*entry, name, &value, &size) == kSuccess && + value != NULL && + size == str_size && + strncmp(str, value, str_size) == 0; +} + +static bool +_csr_is_recovery_environment(void) +{ + DTEntry chosen; + + return SecureDTLookupEntry(0, "/chosen", &chosen) == kSuccess && + _csr_dt_string_is_equal(&chosen, "osenvironment", "recoveryos"); +} + +static bool +_csr_is_iuou_or_iuos_device(void) +{ + DTEntry entry; + bool bool_value; + + return (SecureDTLookupEntry(0, "/chosen", &entry) == kSuccess && + (_csr_get_dt_bool(&entry, "internal-use-only-unit", &bool_value) && bool_value)) || + (SecureDTLookupEntry(0, "/chosen/manifest-properties", &entry) == kSuccess && + (_csr_get_dt_bool(&entry, "iuos", &bool_value) && bool_value)); +} + +static bool +_csr_should_allow_device_configuration(void) +{ + /* + * Allow CSR_ALLOW_DEVICE_CONFIGURATION if the device is running in a + * restore environment, or if the "csr-allow-device-configuration" + * property is set in the device tree. + */ + DTEntry chosen; + bool bool_value; + + return _csr_is_recovery_environment() || ( + SecureDTLookupEntry(0, "/chosen", &chosen) == kSuccess && + _csr_get_dt_bool(&chosen, "csr-allow-device-configuration", &bool_value) && bool_value); +} + +/* + * Initialize CSR from the Device Tree. + * + * WARNING: csr_bootstrap() is called extremely early in the kernel + * startup process in kernel_startup_bootstrap(), which happens + * before even the vm or pmap layer are initialized. + * + * It is marked as STARTUP_RANK_FIRST so that it is called before panic_init(), + * which runs during STARTUP_RANK_MIDDLE. This is necessary because panic_init() + * calls csr_check() to determine whether the device is configured to allow + * kernel debugging. + * + * Only do things here that don't require any dynamic memory (other + * than the stack). Parsing boot-args, walking the device tree and + * setting global variables is fine, most other things are not. Defer + * those other things with global variables, if necessary. + * + */ +__startup_func +static void +csr_bootstrap(void) +{ + DTEntry entry; + uint64_t uint64_value; + bool config_active = false; + bool bool_value; + + csr_config = 0; // start out fully restrictive + + if (SecureDTLookupEntry(0, "/chosen/asmb", &entry) == kSuccess && + _csr_get_dt_uint64(&entry, "lp-sip0", &uint64_value)) { + csr_config = (uint32_t)uint64_value; // Currently only 32 bits used. + config_active = true; + } + + /* + * If the device is an Internal Use Only Unit (IUOU) or if it is running a + * build that is signed with the Internal Use Only Software (IUOS) tag, then + * allow the preservation of the CSR_ALLOW_APPLE_INTERNAL bit. Otherwise, + * forcefully remove the bit on boot. + */ + if (!_csr_is_iuou_or_iuos_device()) { + csr_config &= ~CSR_ALLOW_APPLE_INTERNAL; + } else if (!config_active) { + // If there is no custom configuration present, infer the AppleInternal + // bit on IUOU or IUOS devices. + csr_config |= CSR_ALLOW_APPLE_INTERNAL; + } + + if (_csr_should_allow_device_configuration()) { + csr_config |= CSR_ALLOW_DEVICE_CONFIGURATION; + } + + // The CSR_ALLOW_UNAUTHENTICATED_ROOT flag must be synthesized from sip1 + // in the local boot policy. + if (_csr_get_dt_bool(&entry, "lp-sip1", &bool_value) && bool_value) { + csr_config |= CSR_ALLOW_UNAUTHENTICATED_ROOT; + } else { + csr_config &= ~CSR_ALLOW_UNAUTHENTICATED_ROOT; + } + +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) + // Check whether we have to disable CTRR. + // lp-sip2 in the local boot policy is the bit driving this, + // which csrutil also sets implicitly when e.g. requesting kernel debugging. + csr_unsafe_kernel_text = _csr_get_dt_bool(&entry, "lp-sip2", &bool_value) && bool_value; +#endif +} +STARTUP(TUNABLES, STARTUP_RANK_FIRST, csr_bootstrap); + +int +csr_get_active_config(csr_config_t * config) +{ + *config = (csr_config & CSR_VALID_FLAGS); + + return 0; +} + +int +csr_check(csr_config_t mask) +{ + csr_config_t config; + int ret = csr_get_active_config(&config); + + if (ret != 0) { + return ret; + } + + // CSR_ALLOW_KERNEL_DEBUGGER needs to be allowed when SIP is disabled + // to allow 3rd-party developers to debug their kexts. Use + // CSR_ALLOW_UNTRUSTED_KEXTS as a proxy for "SIP is disabled" on the + // grounds that you can do the same damage with a kernel debugger as + // you can with an untrusted kext. + if ((config & (CSR_ALLOW_UNTRUSTED_KEXTS | CSR_ALLOW_APPLE_INTERNAL)) != 0) { + config |= CSR_ALLOW_KERNEL_DEBUGGER; + } + + return ((config & mask) == mask) ? 0 : EPERM; +} + +#else + +/* + * Old style CSR for x86 platforms, using NVRAM values + */ + +#include + /* enable enforcement by default */ -static int csr_allow_all = 0; +static SECURITY_READ_ONLY_LATE(int) csr_allow_all = 0; -void -csr_init(void) +/* + * Initialize csr_allow_all from device boot state. + * + * Needs to be run before panic_init() since panic_init() + * calls into csr_check() and runs during STARTUP_RANK_MIDDLE. + */ +__startup_func +static void +csr_bootstrap(void) { boot_args *args = (boot_args *)PE_state.bootArgs; if (args->flags & kBootArgsFlagCSRBoot) { @@ -45,6 +267,8 @@ csr_init(void) csr_allow_all = 1; } } +STARTUP(TUNABLES, STARTUP_RANK_FIRST, csr_bootstrap); + int csr_get_active_config(csr_config_t *config) @@ -93,6 +317,8 @@ csr_check(csr_config_t mask) return ret; } +#endif /* CONFIG_CSR_FROM_DT */ + /* * Syscall stubs */ diff --git a/bsd/kern/kern_descrip.c b/bsd/kern/kern_descrip.c index c17f84143..efca619e8 100644 --- a/bsd/kern/kern_descrip.c +++ b/bsd/kern/kern_descrip.c @@ -99,50 +99,43 @@ #include #include #include - -#include - #include #include #include #include #include #include +#include + #include #include #include -#include - -#include - #include -#include +#include #include -#include +#include #if CONFIG_MACF #include #endif +#include +#include +#include + #define IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND 0x1 kern_return_t ipc_object_copyin(ipc_space_t, mach_port_name_t, mach_msg_type_name_t, ipc_port_t *, mach_port_context_t, mach_msg_guard_flags_t *, uint32_t); void ipc_port_release_send(ipc_port_t); -struct psemnode; -struct pshmnode; - +static void fileproc_drain(proc_t, struct fileproc *); static int finishdup(proc_t p, struct filedesc *fdp, int old, int new, int flags, int32_t *retval); -int falloc_locked(proc_t p, struct fileproc **resultfp, int *resultfd, vfs_context_t ctx, int locked); -void fg_drop(struct fileproc * fp); -void fg_free(struct fileglob *fg); -void fg_ref(struct fileproc * fp); void fileport_releasefg(struct fileglob *fg); -/* flags for close_internal_locked */ +/* flags for fp_close_and_unlock */ #define FD_DUP2RESV 1 /* We don't want these exported */ @@ -150,8 +143,7 @@ void fileport_releasefg(struct fileglob *fg); __private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int); -static void _fdrelse(struct proc * p, int fd); - +static void fdrelse(struct proc * p, int fd); extern void file_lock_init(void); @@ -172,32 +164,169 @@ extern struct waitq select_conflict_queue; #define APFSIOC_REVERT_TO_SNAPSHOT _IOW('J', 1, u_int64_t) #endif -#define f_flag f_fglob->fg_flag -#define f_type f_fglob->fg_ops->fo_type -#define f_msgcount f_fglob->fg_msgcount -#define f_cred f_fglob->fg_cred -#define f_ops f_fglob->fg_ops -#define f_offset f_fglob->fg_offset -#define f_data f_fglob->fg_data +#define f_flag fp_glob->fg_flag +#define f_type fp_glob->fg_ops->fo_type +#define f_cred fp_glob->fg_cred +#define f_ops fp_glob->fg_ops +#define f_offset fp_glob->fg_offset +#define f_data fp_glob->fg_data #define CHECK_ADD_OVERFLOW_INT64L(x, y) \ (((((x) > 0) && ((y) > 0) && ((x) > LLONG_MAX - (y))) || \ (((x) < 0) && ((y) < 0) && ((x) < LLONG_MIN - (y)))) \ ? 1 : 0) + +ZONE_DECLARE(fg_zone, "fileglob", + sizeof(struct fileglob), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM); +ZONE_DECLARE(fp_zone, "fileproc", + sizeof(struct fileproc), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM); +ZONE_DECLARE(fdp_zone, "filedesc", + sizeof(struct filedesc), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM); + /* * Descriptor management. */ -struct fmsglist fmsghead; /* head of list of open files */ -struct fmsglist fmsg_ithead; /* head of list of open files */ int nfiles; /* actual number of open files */ +/* + * "uninitialized" ops -- ensure FILEGLOB_DTYPE(fg) always exists + */ +static const struct fileops uninitops; - +os_refgrp_decl(, f_refgrp, "files refcounts", NULL); lck_grp_attr_t * file_lck_grp_attr; lck_grp_t * file_lck_grp; lck_attr_t * file_lck_attr; -lck_mtx_t * uipc_lock; +#pragma mark fileglobs + +/*! + * @function fg_free + * + * @brief + * Free a file structure. + */ +static void +fg_free(struct fileglob *fg) +{ + os_atomic_dec(&nfiles, relaxed); + + if (fg->fg_vn_data) { + fg_vn_data_free(fg->fg_vn_data); + fg->fg_vn_data = NULL; + } + + if (IS_VALID_CRED(fg->fg_cred)) { + kauth_cred_unref(&fg->fg_cred); + } + lck_mtx_destroy(&fg->fg_lock, file_lck_grp); + +#if CONFIG_MACF + mac_file_label_destroy(fg); +#endif + zfree(fg_zone, fg); +} + +OS_ALWAYS_INLINE +void +fg_ref(struct fileglob *fg) +{ + os_ref_retain_raw(&fg->fg_count, &f_refgrp); +} + +int +fg_drop(proc_t p, struct fileglob *fg) +{ + struct vnode *vp; + struct vfs_context context; + int error = 0; + + if (fg == NULL) { + return 0; + } + + /* Set up context with cred stashed in fg */ + if (p == current_proc()) { + context.vc_thread = current_thread(); + } else { + context.vc_thread = NULL; + } + context.vc_ucred = fg->fg_cred; + + /* + * POSIX record locking dictates that any close releases ALL + * locks owned by this process. This is handled by setting + * a flag in the unlock to free ONLY locks obeying POSIX + * semantics, and not to free BSD-style file locks. + * If the descriptor was in a message, POSIX-style locks + * aren't passed with the descriptor. + */ + if (p && DTYPE_VNODE == FILEGLOB_DTYPE(fg) && + (p->p_ladvflag & P_LADVLOCK)) { + struct flock lf = { + .l_whence = SEEK_SET, + .l_type = F_UNLCK, + }; + + vp = (struct vnode *)fg->fg_data; + if ((error = vnode_getwithref(vp)) == 0) { + (void)VNOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_POSIX, &context, NULL); + (void)vnode_put(vp); + } + } + + if (os_ref_release_raw(&fg->fg_count, &f_refgrp) == 0) { + /* + * Since we ensure that fg->fg_ops is always initialized, + * it is safe to invoke fo_close on the fg + */ + error = fo_close(fg, &context); + + fg_free(fg); + } + + return error; +} + +/* + * fg_get_vnode + * + * Description: Return vnode associated with the file structure, if + * any. The lifetime of the returned vnode is bound to + * the lifetime of the file structure. + * + * Parameters: fg Pointer to fileglob to + * inspect + * + * Returns: vnode_t + */ +vnode_t +fg_get_vnode(struct fileglob *fg) +{ + if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE) { + return (vnode_t)fg->fg_data; + } else { + return NULL; + } +} + +bool +fg_sendable(struct fileglob *fg) +{ + switch (FILEGLOB_DTYPE(fg)) { + case DTYPE_VNODE: + case DTYPE_SOCKET: + case DTYPE_PIPE: + case DTYPE_PSXSHM: + case DTYPE_NETPOLICY: + return (fg->fg_lflags & FG_CONFINED) == 0; + + default: + return false; + } +} +#pragma mark fileprocs + /* * check_file_seek_range * @@ -277,8 +406,6 @@ file_lock_init(void) /* Allocate file lock attribute */ file_lck_attr = lck_attr_alloc_init(); - - uipc_lock = lck_mtx_alloc_init(file_lck_grp, file_lck_attr); } @@ -353,6 +480,56 @@ proc_fdunlock(proc_t p) lck_mtx_unlock(&p->p_fdmlock); } +struct fdt_iterator +fdt_next(proc_t p, int fd, bool only_settled) +{ + struct fdt_iterator it; + struct filedesc *fdp = p->p_fd; + struct fileproc *fp; + int nfds = min(fdp->fd_lastfile + 1, fdp->fd_nfiles); + + while (++fd < nfds) { + fp = fdp->fd_ofiles[fd]; + if (fp == NULL || fp->fp_glob == NULL) { + continue; + } + if (only_settled && (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + continue; + } + it.fdti_fd = fd; + it.fdti_fp = fp; + return it; + } + + it.fdti_fd = nfds; + it.fdti_fp = NULL; + return it; +} + +struct fdt_iterator +fdt_prev(proc_t p, int fd, bool only_settled) +{ + struct fdt_iterator it; + struct filedesc *fdp = p->p_fd; + struct fileproc *fp; + + while (--fd >= 0) { + fp = fdp->fd_ofiles[fd]; + if (fp == NULL || fp->fp_glob == NULL) { + continue; + } + if (only_settled && (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + continue; + } + it.fdti_fd = fd; + it.fdti_fp = fp; + return it; + } + + it.fdti_fd = -1; + it.fdti_fp = NULL; + return it; +} /* * System calls on descriptors. @@ -360,7 +537,7 @@ proc_fdunlock(proc_t p) /* - * getdtablesize + * sys_getdtablesize * * Description: Returns the per process maximum size of the descriptor table * @@ -373,29 +550,21 @@ proc_fdunlock(proc_t p) * *retval (modified) Size of dtable */ int -getdtablesize(proc_t p, __unused struct getdtablesize_args *uap, int32_t *retval) +sys_getdtablesize(proc_t p, __unused struct getdtablesize_args *uap, int32_t *retval) { - proc_fdlock_spin(p); - *retval = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles); - proc_fdunlock(p); + *retval = (int32_t)MIN(proc_limitgetcur(p, RLIMIT_NOFILE, TRUE), maxfilesperproc); return 0; } -void +static void procfdtbl_reservefd(struct proc * p, int fd) { p->p_fd->fd_ofiles[fd] = NULL; p->p_fd->fd_ofileflags[fd] |= UF_RESERVED; } -void -procfdtbl_markclosefd(struct proc * p, int fd) -{ - p->p_fd->fd_ofileflags[fd] |= (UF_RESERVED | UF_CLOSING); -} - void procfdtbl_releasefd(struct proc * p, int fd, struct fileproc * fp) { @@ -409,15 +578,14 @@ procfdtbl_releasefd(struct proc * p, int fd, struct fileproc * fp) } } -void +static void procfdtbl_waitfd(struct proc * p, int fd) { p->p_fd->fd_ofileflags[fd] |= UF_RESVWAIT; msleep(&p->p_fd, &p->p_fdmlock, PRIBIO, "ftbl_waitfd", NULL); } - -void +static void procfdtbl_clearfd(struct proc * p, int fd) { int waiting; @@ -431,7 +599,7 @@ procfdtbl_clearfd(struct proc * p, int fd) } /* - * _fdrelse + * fdrelse * * Description: Inline utility function to free an fd in a filedesc * @@ -445,7 +613,7 @@ procfdtbl_clearfd(struct proc * p, int fd) * the caller */ static void -_fdrelse(struct proc * p, int fd) +fdrelse(struct proc * p, int fd) { struct filedesc *fdp = p->p_fd; int nfd = 0; @@ -488,7 +656,6 @@ fd_rdwr( uio_t auio = NULL; char uio_buf[UIO_SIZEOF(1)]; struct vfs_context context = *(vfs_context_current()); - bool wrote_some = false; p = current_proc(); @@ -497,7 +664,12 @@ fd_rdwr( return error; } - if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_PIPE && fp->f_type != DTYPE_SOCKET) { + switch (FILEGLOB_DTYPE(fp->fp_glob)) { + case DTYPE_VNODE: + case DTYPE_PIPE: + case DTYPE_SOCKET: + break; + default: error = EINVAL; goto out; } @@ -511,7 +683,7 @@ fd_rdwr( goto out; } - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; if (UIO_SEG_IS_USER_SPACE(segflg)) { spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; @@ -521,7 +693,7 @@ fd_rdwr( auio = uio_createwithbuffer(1, offset, spacetype, rw, &uio_buf[0], sizeof(uio_buf)); - uio_addiov(auio, base, len); + uio_addiov(auio, (user_addr_t)base, (user_size_t)len); if (!(io_flg & IO_APPEND)) { flags = FOF_OFFSET; @@ -530,32 +702,27 @@ fd_rdwr( if (rw == UIO_WRITE) { user_ssize_t orig_resid = uio_resid(auio); error = fo_write(fp, auio, flags, &context); - wrote_some = uio_resid(auio) < orig_resid; + if (uio_resid(auio) < orig_resid) { + os_atomic_or(&fp->fp_glob->fg_flag, FWASWRITTEN, relaxed); + } } else { error = fo_read(fp, auio, flags, &context); } if (aresid) { *aresid = uio_resid(auio); - } else { - if (uio_resid(auio) && error == 0) { - error = EIO; - } + } else if (uio_resid(auio) && error == 0) { + error = EIO; } out: - if (wrote_some) { - fp_drop_written(p, fd, fp); - } else { - fp_drop(p, fd, fp, 0); - } - + fp_drop(p, fd, fp, 0); return error; } /* - * dup + * sys_dup * * Description: Duplicate a file descriptor. * @@ -570,7 +737,7 @@ out: * *retval (modified) The new descriptor */ int -dup(proc_t p, struct dup_args *uap, int32_t *retval) +sys_dup(proc_t p, struct dup_args *uap, int32_t *retval) { struct filedesc *fdp = p->p_fd; int old = uap->fd; @@ -597,7 +764,7 @@ dup(proc_t p, struct dup_args *uap, int32_t *retval) fp_drop(p, old, fp, 1); proc_fdunlock(p); - if (ENTR_SHOULDTRACE && fp->f_type == DTYPE_SOCKET) { + if (ENTR_SHOULDTRACE && FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) { KERNEL_ENERGYTRACE(kEnTrActKernSocket, DBG_FUNC_START, new, 0, (int64_t)VM_KERNEL_ADDRPERM(fp->f_data)); } @@ -606,7 +773,7 @@ dup(proc_t p, struct dup_args *uap, int32_t *retval) } /* - * dup2 + * sys_dup2 * * Description: Duplicate a file descriptor to a particular value. * @@ -622,12 +789,18 @@ dup(proc_t p, struct dup_args *uap, int32_t *retval) * *retval (modified) The new descriptor */ int -dup2(proc_t p, struct dup2_args *uap, int32_t *retval) +sys_dup2(proc_t p, struct dup2_args *uap, int32_t *retval) +{ + return dup2(p, uap->from, uap->to, retval); +} + +int +dup2(proc_t p, int old, int new, int *retval) { struct filedesc *fdp = p->p_fd; - int old = uap->from, new = uap->to; - int i, error; struct fileproc *fp, *nfp; + int i, error; + rlim_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE, TRUE); proc_fdlock(p); @@ -643,8 +816,8 @@ startover: return error; } if (new < 0 || - (rlim_t)new >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur || - new >= maxfiles) { + (rlim_t)new >= nofile || + new >= maxfilesperproc) { fp_drop(p, old, fp, 1); proc_fdunlock(p); return EBADF; @@ -667,7 +840,7 @@ startover: } } else { closeit: - while ((fdp->fd_ofileflags[new] & UF_RESERVED) == UF_RESERVED) { + if ((fdp->fd_ofileflags[new] & UF_RESERVED) == UF_RESERVED) { fp_drop(p, old, fp, 1); procfdtbl_waitfd(p, new); #if DIAGNOSTIC @@ -676,22 +849,17 @@ closeit: goto startover; } - if ((fdp->fd_ofiles[new] != NULL) && - ((error = fp_lookup(p, new, &nfp, 1)) == 0)) { - fp_drop(p, old, fp, 1); + if ((nfp = fdp->fd_ofiles[new]) != NULL) { if (FP_ISGUARDED(nfp, GUARD_CLOSE)) { + fp_drop(p, old, fp, 1); error = fp_guard_exception(p, new, nfp, kGUARD_EXC_CLOSE); - (void) fp_drop(p, new, nfp, 1); proc_fdunlock(p); return error; } - (void)close_internal_locked(p, new, nfp, FD_DUP2RESV); -#if DIAGNOSTIC - proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED); -#endif - procfdtbl_clearfd(p, new); - goto startover; + (void)fp_close_and_unlock(p, new, nfp, FD_DUP2RESV); + proc_fdlock(p); + assert(fdp->fd_ofileflags[new] & UF_RESERVED); } else { #if DIAGNOSTIC if (fdp->fd_ofiles[new] != NULL) { @@ -700,10 +868,6 @@ closeit: #endif procfdtbl_reservefd(p, new); } - -#if DIAGNOSTIC - proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED); -#endif } #if DIAGNOSTIC if (fdp->fd_ofiles[new] != 0) { @@ -743,15 +907,17 @@ closeit: * blocking operation. */ int -fcntl(proc_t p, struct fcntl_args *uap, int32_t *retval) +sys_fcntl(proc_t p, struct fcntl_args *uap, int32_t *retval) { __pthread_testcancel(1); - return fcntl_nocancel(p, (struct fcntl_nocancel_args *)uap, retval); + return sys_fcntl_nocancel(p, (struct fcntl_nocancel_args *)uap, retval); } +#define ACCOUNT_OPENFROM_ENTITLEMENT \ + "com.apple.private.vfs.role-account-openfrom" /* - * fcntl_nocancel + * sys_fcntl_nocancel * * Description: A non-cancel-testing file control system call. * @@ -794,6 +960,7 @@ fcntl(proc_t p, struct fcntl_args *uap, int32_t *retval) * VNOP_ALLOCATE:??? * [F_SETSIZE,F_RDADVISE] * EBADF + * EINVAL * copyin:EFAULT * vnode_getwithref:??? * [F_RDAHEAD,F_NOCACHE] @@ -805,13 +972,14 @@ fcntl(proc_t p, struct fcntl_args *uap, int32_t *retval) * *retval (modified) fcntl return value (if any) */ int -fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) +sys_fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) { int fd = uap->fd; struct filedesc *fdp = p->p_fd; struct fileproc *fp; char *pop; struct vnode *vp = NULLVP; /* for AUDIT_ARG() at end */ + unsigned int oflags, nflags; int i, tmp, error, error2, flg = 0; struct flock fl = {}; struct flocktimeout fltimeout; @@ -823,10 +991,14 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) unsigned int fflag; user_addr_t argp; boolean_t is64bit; + rlim_t nofile; + int has_entitlement = 0; AUDIT_ARG(fd, uap->fd); AUDIT_ARG(cmd, uap->cmd); + nofile = proc_limitgetcur(p, RLIMIT_NOFILE, TRUE); + proc_fdlock(p); if ((error = fp_lookup(p, fd, &fp, 1))) { proc_fdunlock(p); @@ -851,16 +1023,16 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) argp = CAST_USER_ADDR_T((uint32_t)uap->arg); } - pop = &fdp->fd_ofileflags[fd]; - #if CONFIG_MACF - error = mac_file_check_fcntl(proc_ucred(p), fp->f_fglob, uap->cmd, + error = mac_file_check_fcntl(proc_ucred(p), fp->fp_glob, uap->cmd, uap->arg); if (error) { goto out; } #endif + pop = &fdp->fd_ofileflags[fd]; + switch (uap->cmd) { case F_DUPFD: case F_DUPFD_CLOEXEC: @@ -870,8 +1042,8 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } newmin = CAST_DOWN_EXPLICIT(int, uap->arg); /* arg is an int, so we won't lose bits */ AUDIT_ARG(value32, newmin); - if ((u_int)newmin >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur || - newmin >= maxfiles) { + if ((rlim_t)newmin >= nofile || + newmin >= maxfilesperproc) { error = EINVAL; goto out; } @@ -888,7 +1060,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto out; case F_SETFD: - AUDIT_ARG(value32, uap->arg); + AUDIT_ARG(value32, (uint32_t)uap->arg); if (uap->arg & FD_CLOEXEC) { *pop |= UF_EXCLOSE; } else { @@ -908,21 +1080,29 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto out; case F_SETFL: - fp->f_flag &= ~FCNTLFLAGS; + // FIXME (rdar://54898652) + // + // this code is broken if fnctl(F_SETFL), ioctl() are + // called concurrently for the same fileglob. + tmp = CAST_DOWN_EXPLICIT(int, uap->arg); /* arg is an int, so we won't lose bits */ AUDIT_ARG(value32, tmp); - fp->f_flag |= FFLAGS(tmp) & FCNTLFLAGS; - tmp = fp->f_flag & FNONBLOCK; + + os_atomic_rmw_loop(&fp->f_flag, oflags, nflags, relaxed, { + nflags = oflags & ~FCNTLFLAGS; + nflags |= FFLAGS(tmp) & FCNTLFLAGS; + }); + tmp = nflags & FNONBLOCK; error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context); if (error) { goto out; } - tmp = fp->f_flag & FASYNC; + tmp = nflags & FASYNC; error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context); if (!error) { goto out; } - fp->f_flag &= ~FNONBLOCK; + os_atomic_andnot(&fp->f_flag, FNONBLOCK, relaxed); tmp = 0; (void)fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context); goto out; @@ -974,7 +1154,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = EINVAL; #endif } else { - struct fileglob *fg = fp->f_fglob; + struct fileglob *fg = fp->fp_glob; lck_mtx_lock_spin(&fg->fg_lock); if (tmp) { @@ -997,7 +1177,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = EINVAL; #endif } else { - *retval = (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) ? + *retval = (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) ? 1 : 0; error = 0; } @@ -1012,12 +1192,12 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) * mechanism to move the descriptor elsewhere will fail. */ if (CAST_DOWN_EXPLICIT(int, uap->arg)) { - struct fileglob *fg = fp->f_fglob; + struct fileglob *fg = fp->fp_glob; lck_mtx_lock_spin(&fg->fg_lock); if (fg->fg_lflags & FG_CONFINED) { error = 0; - } else if (1 != fg->fg_count) { + } else if (1 != os_ref_get_count_raw(&fg->fg_count)) { error = EAGAIN; /* go close the dup .. */ } else if (UF_FORKCLOSE == (*pop & UF_FORKCLOSE)) { fg->fg_lflags |= FG_CONFINED; @@ -1036,7 +1216,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto out; case F_GETCONFINED: - *retval = (fp->f_fglob->fg_lflags & FG_CONFINED) ? 1 : 0; + *retval = (fp->fp_glob->fg_lflags & FG_CONFINED) ? 1 : 0; error = 0; goto out; @@ -1045,7 +1225,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) case F_OFD_SETLKWTIMEOUT: case F_OFD_SETLKW: flg |= F_WAIT; - /* Fall into F_SETLK */ + OS_FALLTHROUGH; case F_SETLK: case F_OFD_SETLK: @@ -1090,7 +1270,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } #if CONFIG_MACF - error = mac_file_check_lock(proc_ucred(p), fp->f_fglob, + error = mac_file_check_lock(proc_ucred(p), fp->fp_glob, F_SETLK, &fl); if (error) { (void)vnode_put(vp); @@ -1108,7 +1288,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = EBADF; break; } - error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, + error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_SETLK, &fl, flg, &context, timeout); break; case F_WRLCK: @@ -1116,11 +1296,11 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = EBADF; break; } - error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, + error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_SETLK, &fl, flg, &context, timeout); break; case F_UNLCK: - error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, + error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_UNLCK, &fl, F_OFD_LOCK, &context, timeout); break; @@ -1130,7 +1310,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } if (0 == error && (F_RDLCK == fl.l_type || F_WRLCK == fl.l_type)) { - struct fileglob *fg = fp->f_fglob; + struct fileglob *fg = fp->fp_glob; /* * arrange F_UNLCK on last close (once @@ -1152,7 +1332,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) break; } // XXX UInt32 unsafe for LP64 kernel - OSBitOrAtomic(P_LADVLOCK, &p->p_ladvflag); + os_atomic_or(&p->p_ladvflag, P_LADVLOCK, relaxed); error = VNOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg, &context, timeout); break; @@ -1162,7 +1342,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) break; } // XXX UInt32 unsafe for LP64 kernel - OSBitOrAtomic(P_LADVLOCK, &p->p_ladvflag); + os_atomic_or(&p->p_ladvflag, P_LADVLOCK, relaxed); error = VNOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg, &context, timeout); break; @@ -1236,17 +1416,17 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } #if CONFIG_MACF - error = mac_file_check_lock(proc_ucred(p), fp->f_fglob, + error = mac_file_check_lock(proc_ucred(p), fp->fp_glob, uap->cmd, &fl); if (error == 0) #endif switch (uap->cmd) { case F_OFD_GETLK: - error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, + error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_GETLK, &fl, F_OFD_LOCK, &context, NULL); break; case F_OFD_GETLKPID: - error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, + error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_GETLKPID, &fl, F_OFD_LOCK, &context, NULL); break; default: @@ -1377,7 +1557,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } #if CONFIG_MACF - if ((error = mac_vnode_check_write(&context, fp->f_fglob->fg_cred, vp))) { + if ((error = mac_vnode_check_write(&context, fp->fp_glob->fg_cred, vp))) { (void)vnode_put(vp); goto outdrop; } @@ -1488,7 +1668,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) #if CONFIG_MACF error = mac_vnode_check_truncate(&context, - fp->f_fglob->fg_cred, vp); + fp->fp_glob->fg_cred, vp); if (error) { (void)vnode_put(vp); goto outdrop; @@ -1496,21 +1676,25 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) #endif /* * Make sure that we are root. Growing a file - * without zero filling the data is a security hole - * root would have access anyway so we'll allow it + * without zero filling the data is a security hole. */ if (!kauth_cred_issuser(kauth_cred_get())) { error = EACCES; } else { /* - * set the file size + * Require privilege to change file size without zerofill, + * else will change the file size and zerofill it. */ - error = vnode_setsize(vp, offset, IO_NOZEROFILL, - &context); + error = priv_check_cred(kauth_cred_get(), PRIV_VFS_SETSIZE, 0); + if (error == 0) { + error = vnode_setsize(vp, offset, IO_NOZEROFILL, &context); + } else { + error = vnode_setsize(vp, offset, 0, &context); + } #if CONFIG_MACF if (error == 0) { - mac_vnode_notify_truncate(&context, fp->f_fglob->fg_cred, vp); + mac_vnode_notify_truncate(&context, fp->fp_glob->fg_cred, vp); } #endif } @@ -1524,11 +1708,10 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto out; } if (uap->arg) { - fp->f_fglob->fg_flag &= ~FNORDAHEAD; + os_atomic_andnot(&fp->fp_glob->fg_flag, FNORDAHEAD, relaxed); } else { - fp->f_fglob->fg_flag |= FNORDAHEAD; + os_atomic_or(&fp->fp_glob->fg_flag, FNORDAHEAD, relaxed); } - goto out; case F_NOCACHE: @@ -1537,11 +1720,10 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto out; } if (uap->arg) { - fp->f_fglob->fg_flag |= FNOCACHE; + os_atomic_or(&fp->fp_glob->fg_flag, FNOCACHE, relaxed); } else { - fp->f_fglob->fg_flag &= ~FNOCACHE; + os_atomic_andnot(&fp->fp_glob->fg_flag, FNOCACHE, relaxed); } - goto out; case F_NODIRECT: @@ -1550,11 +1732,10 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto out; } if (uap->arg) { - fp->f_fglob->fg_flag |= FNODIRECT; + os_atomic_or(&fp->fp_glob->fg_flag, FNODIRECT, relaxed); } else { - fp->f_fglob->fg_flag &= ~FNODIRECT; + os_atomic_andnot(&fp->fp_glob->fg_flag, FNODIRECT, relaxed); } - goto out; case F_SINGLE_WRITER: @@ -1563,11 +1744,10 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto out; } if (uap->arg) { - fp->f_fglob->fg_flag |= FSINGLE_WRITER; + os_atomic_or(&fp->fp_glob->fg_flag, FSINGLE_WRITER, relaxed); } else { - fp->f_fglob->fg_flag &= ~FSINGLE_WRITER; + os_atomic_andnot(&fp->fp_glob->fg_flag, FSINGLE_WRITER, relaxed); } - goto out; case F_GLOBAL_NOCACHE: @@ -1625,6 +1805,10 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) if ((error = copyin(argp, (caddr_t)&ra_struct, sizeof(ra_struct)))) { goto outdrop; } + if (ra_struct.ra_offset < 0 || ra_struct.ra_count < 0) { + error = EINVAL; + goto outdrop; + } if ((error = vnode_getwithref(vp)) == 0) { error = VNOP_IOCTL(vp, F_RDADVISE, (caddr_t)&ra_struct, 0, &context); @@ -1694,7 +1878,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto outdrop; } - a_size = MIN((uint64_t)l2p_struct.l2p_contigbytes, SIZE_MAX); + a_size = (size_t)MIN((uint64_t)l2p_struct.l2p_contigbytes, SIZE_MAX); } else { a_size = devBlockSize; } @@ -1772,17 +1956,17 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) proc_fdunlock(p); pathlen = MAXPATHLEN; - pathbufp = kalloc(MAXPATHLEN); + pathbufp = zalloc(ZV_NAMEI); if ((error = copyinstr(argp, pathbufp, MAXPATHLEN, &pathlen)) == 0) { if ((error = vnode_getwithref(vp)) == 0) { AUDIT_ARG(text, pathbufp); - error = vn_path_package_check(vp, pathbufp, pathlen, retval); + error = vn_path_package_check(vp, pathbufp, (int)pathlen, retval); (void)vnode_put(vp); } } - kfree(pathbufp, MAXPATHLEN); + zfree(ZV_NAMEI, pathbufp); goto outdrop; } @@ -1836,6 +2020,15 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto outdrop; } + /* + * Only entitled apps may use the credentials of the thread + * that opened the file descriptor. + * Non-entitled threads will use their own context. + */ + if (IOTaskHasEntitlement(current_task(), ACCOUNT_OPENFROM_ENTITLEMENT)) { + has_entitlement = 1; + } + /* Get flags, mode and pathname arguments. */ if (IS_64BIT_PROCESS(p)) { error = copyin(argp, &fopen, sizeof(fopen)); @@ -1860,11 +2053,11 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) /* Start the lookup relative to the file descriptor's vnode. */ NDINIT(&nd, LOOKUP, OP_OPEN, USEDVP | FOLLOW | AUDITVNPATH1, UIO_USERSPACE, - fopen.o_pathname, &context); + fopen.o_pathname, has_entitlement ? &context : vfs_context_current()); nd.ni_dvp = vp; - error = open1(&context, &nd, fopen.o_flags, &va, - fileproc_alloc_init, NULL, retval); + error = open1(has_entitlement ? &context : vfs_context_current(), + &nd, fopen.o_flags, &va, fileproc_alloc_init, NULL, retval); vnode_put(vp); break; @@ -1896,6 +2089,15 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto outdrop; } + /* + * Only entitled apps may use the credentials of the thread + * that opened the file descriptor. + * Non-entitled threads will use their own context. + */ + if (IOTaskHasEntitlement(current_task(), ACCOUNT_OPENFROM_ENTITLEMENT)) { + has_entitlement = 1; + } + /* Get flags, mode and pathname arguments. */ if (IS_64BIT_PROCESS(p)) { pathname = (user_addr_t)argp; @@ -1904,7 +2106,8 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } /* Start the lookup relative to the file descriptor's vnode. */ - error = unlink1(&context, vp, pathname, UIO_USERSPACE, 0); + error = unlink1(has_entitlement ? &context : vfs_context_current(), + vp, pathname, UIO_USERSPACE, 0); vnode_put(vp); break; @@ -1914,6 +2117,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) case F_ADDFILESIGS: case F_ADDFILESIGS_FOR_DYLD_SIM: case F_ADDFILESIGS_RETURN: + case F_ADDFILESIGS_INFO: { struct cs_blob *blob = NULL; struct user_fsignatures fs; @@ -1921,6 +2125,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) vm_offset_t kernel_blob_addr; vm_size_t kernel_blob_size; int blob_add_flags = 0; + const size_t sizeof_fs = (uap->cmd == F_ADDFILESIGS_INFO ? + offsetof(struct user_fsignatures, fs_cdhash /* first output element */) : + offsetof(struct user_fsignatures, fs_fsignatures_size /* compat */)); if (fp->f_type != DTYPE_VNODE) { error = EBADF; @@ -1944,8 +2151,14 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } if (IS_64BIT_PROCESS(p)) { - error = copyin(argp, &fs, sizeof(fs)); + error = copyin(argp, &fs, sizeof_fs); } else { + if (uap->cmd == F_ADDFILESIGS_INFO) { + error = EINVAL; + vnode_put(vp); + goto outdrop; + } + struct user32_fsignatures fs32; error = copyin(argp, &fs32, sizeof(fs32)); @@ -1962,11 +2175,11 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) /* * First check if we have something loaded a this offset */ - blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, fs.fs_file_start); + blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, CPU_SUBTYPE_ANY, fs.fs_file_start); if (blob != NULL) { /* If this is for dyld_sim revalidate the blob */ if (uap->cmd == F_ADDFILESIGS_FOR_DYLD_SIM) { - error = ubc_cs_blob_revalidate(vp, blob, NULL, blob_add_flags); + error = ubc_cs_blob_revalidate(vp, blob, NULL, blob_add_flags, proc_platform(p)); if (error) { blob = NULL; if (error != EAGAIN) { @@ -2006,13 +2219,13 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = copyin(fs.fs_blob_start, (void *) kernel_blob_addr, fs.fs_blob_size); - } else { /* F_ADDFILESIGS || F_ADDFILESIGS_RETURN || F_ADDFILESIGS_FOR_DYLD_SIM */ + } else { /* F_ADDFILESIGS || F_ADDFILESIGS_RETURN || F_ADDFILESIGS_FOR_DYLD_SIM || F_ADDFILESIGS_INFO */ int resid; error = vn_rdwr(UIO_READ, vp, (caddr_t) kernel_blob_addr, - kernel_blob_size, + (int)kernel_blob_size, fs.fs_file_start + fs.fs_blob_start, UIO_SYSSPACE, 0, @@ -2034,7 +2247,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) blob = NULL; error = ubc_cs_blob_add(vp, + proc_platform(p), CPU_TYPE_ANY, /* not for a specific architecture */ + CPU_SUBTYPE_ANY, fs.fs_file_start, &kernel_blob_addr, kernel_blob_size, @@ -2057,7 +2272,8 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } } - if (uap->cmd == F_ADDFILESIGS_RETURN || uap->cmd == F_ADDFILESIGS_FOR_DYLD_SIM) { + if (uap->cmd == F_ADDFILESIGS_RETURN || uap->cmd == F_ADDFILESIGS_FOR_DYLD_SIM || + uap->cmd == F_ADDFILESIGS_INFO) { /* * The first element of the structure is a * off_t that happen to have the same size for @@ -2068,25 +2284,167 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) end_offset = blob->csb_end_offset; } error = copyout(&end_offset, argp, sizeof(end_offset)); + + if (error) { + vnode_put(vp); + goto outdrop; + } + } + + if (uap->cmd == F_ADDFILESIGS_INFO) { + /* Return information. What we copy out depends on the size of the + * passed in structure, to keep binary compatibility. */ + + if (fs.fs_fsignatures_size >= sizeof(struct user_fsignatures)) { + // enough room for fs_cdhash[20]+fs_hash_type + + if (blob != NULL) { + error = copyout(blob->csb_cdhash, + (vm_address_t)argp + offsetof(struct user_fsignatures, fs_cdhash), + USER_FSIGNATURES_CDHASH_LEN); + if (error) { + vnode_put(vp); + goto outdrop; + } + int hashtype = cs_hash_type(blob->csb_hashtype); + error = copyout(&hashtype, + (vm_address_t)argp + offsetof(struct user_fsignatures, fs_hash_type), + sizeof(int)); + if (error) { + vnode_put(vp); + goto outdrop; + } + } + } } (void) vnode_put(vp); break; } - case F_GETCODEDIR: - case F_FINDSIGS: { - error = ENOTSUP; - goto out; - } - case F_CHECK_LV: { - struct fileglob *fg; - fchecklv_t lv = {}; +#if CONFIG_SUPPLEMENTAL_SIGNATURES + case F_ADDFILESUPPL: + { + struct vnode *ivp; + struct cs_blob *blob = NULL; + struct user_fsupplement fs; + int orig_fd; + struct fileproc* orig_fp = NULL; + kern_return_t kr; + vm_offset_t kernel_blob_addr; + vm_size_t kernel_blob_size; - if (fp->f_type != DTYPE_VNODE) { - error = EBADF; + if (!IS_64BIT_PROCESS(p)) { + error = EINVAL; + goto out; // drop fp and unlock fds + } + + if (fp->f_type != DTYPE_VNODE) { + error = EBADF; + goto out; + } + + error = copyin(argp, &fs, sizeof(fs)); + if (error) { + goto out; + } + + orig_fd = fs.fs_orig_fd; + if ((error = fp_lookup(p, orig_fd, &orig_fp, 1))) { + printf("CODE SIGNING: Failed to find original file for supplemental signature attachment\n"); + goto out; + } + + if (orig_fp->f_type != DTYPE_VNODE) { + error = EBADF; + fp_drop(p, orig_fd, orig_fp, 1); + goto out; + } + + ivp = (struct vnode *)orig_fp->f_data; + + vp = (struct vnode *)fp->f_data; + + proc_fdunlock(p); + + error = vnode_getwithref(ivp); + if (error) { + fp_drop(p, orig_fd, orig_fp, 0); + goto outdrop; //drop fp + } + + error = vnode_getwithref(vp); + if (error) { + vnode_put(ivp); + fp_drop(p, orig_fd, orig_fp, 0); + goto outdrop; + } + + if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) { + error = E2BIG; + goto dropboth; // drop iocounts on vp and ivp, drop orig_fp then drop fp via outdrop + } + + kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size); + kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size); + if (kr != KERN_SUCCESS) { + error = ENOMEM; + goto dropboth; + } + + int resid; + error = vn_rdwr(UIO_READ, vp, + (caddr_t)kernel_blob_addr, (int)kernel_blob_size, + fs.fs_file_start + fs.fs_blob_start, + UIO_SYSSPACE, 0, + kauth_cred_get(), &resid, p); + if ((error == 0) && resid) { + /* kernel_blob_size rounded to a page size, but signature may be at end of file */ + memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid); + } + + if (error) { + ubc_cs_blob_deallocate(kernel_blob_addr, + kernel_blob_size); + goto dropboth; + } + + error = ubc_cs_blob_add_supplement(vp, ivp, fs.fs_file_start, + &kernel_blob_addr, kernel_blob_size, &blob); + + /* ubc_blob_add_supplement() has consumed kernel_blob_addr if it is zeroed */ + if (error) { + if (kernel_blob_addr) { + ubc_cs_blob_deallocate(kernel_blob_addr, + kernel_blob_size); + } + goto dropboth; + } + vnode_put(ivp); + vnode_put(vp); + fp_drop(p, orig_fd, orig_fp, 0); + break; + +dropboth: + vnode_put(ivp); + vnode_put(vp); + fp_drop(p, orig_fd, orig_fp, 0); + goto outdrop; + } +#endif + case F_GETCODEDIR: + case F_FINDSIGS: { + error = ENOTSUP; + goto out; + } + case F_CHECK_LV: { + struct fileglob *fg; + fchecklv_t lv = {}; + + if (fp->f_type != DTYPE_VNODE) { + error = EBADF; goto out; } - fg = fp->f_fglob; + fg = fp->fp_glob; proc_fdunlock(p); if (IS_64BIT_PROCESS(p)) { @@ -2110,6 +2468,53 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) break; } + case F_GETSIGSINFO: { + struct cs_blob *blob = NULL; + fgetsigsinfo_t sigsinfo = {}; + + if (fp->f_type != DTYPE_VNODE) { + error = EBADF; + goto out; + } + vp = (struct vnode *)fp->f_data; + proc_fdunlock(p); + + error = vnode_getwithref(vp); + if (error) { + goto outdrop; + } + + error = copyin(argp, &sigsinfo, sizeof(sigsinfo)); + if (error) { + vnode_put(vp); + goto outdrop; + } + + blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, CPU_SUBTYPE_ANY, sigsinfo.fg_file_start); + if (blob == NULL) { + error = ENOENT; + vnode_put(vp); + goto outdrop; + } + switch (sigsinfo.fg_info_request) { + case GETSIGSINFO_PLATFORM_BINARY: + sigsinfo.fg_sig_is_platform = blob->csb_platform_binary; + error = copyout(&sigsinfo.fg_sig_is_platform, + (vm_address_t)argp + offsetof(struct fgetsigsinfo, fg_sig_is_platform), + sizeof(sigsinfo.fg_sig_is_platform)); + if (error) { + vnode_put(vp); + goto outdrop; + } + break; + default: + error = EINVAL; + vnode_put(vp); + goto outdrop; + } + vnode_put(vp); + break; + } #if CONFIG_PROTECT case F_GETPROTECTIONCLASS: { if (fp->f_type != DTYPE_VNODE) { @@ -2308,7 +2713,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) #if CONFIG_MACF /* Re-do MAC checks against the new FD, pass in a fake argument */ - error = mac_file_check_fcntl(proc_ucred(p), fp2->f_fglob, uap->cmd, 0); + error = mac_file_check_fcntl(proc_ucred(p), fp2->fp_glob, uap->cmd, 0); if (error) { fp_drop(p, fd2, fp2, 1); goto out; @@ -2675,6 +3080,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) case FIOPINSWAP: case F_MARKDEPENDENCY: case TIOCREVOKE: + case TIOCREVOKECLEAR: error = EINVAL; goto out; default: @@ -2712,7 +3118,8 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) memp = NULL; if (size > sizeof(stkbuf)) { - if ((memp = (caddr_t)kalloc(size)) == 0) { + memp = (caddr_t)kheap_alloc(KHEAP_TEMP, size, Z_WAITOK); + if (memp == 0) { (void)vnode_put(vp); error = ENOMEM; goto outdrop; @@ -2729,7 +3136,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) if (error) { (void)vnode_put(vp); if (memp) { - kfree(memp, size); + kheap_free(KHEAP_TEMP, memp, size); } goto outdrop; } @@ -2769,7 +3176,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = copyout(data, argp, size); } if (memp) { - kfree(memp, size); + kheap_free(KHEAP_TEMP, memp, size); } } break; @@ -2829,12 +3236,10 @@ finishdup(proc_t p, fdrelse(p, new); return EBADF; } - fg_ref(ofp); #if CONFIG_MACF - error = mac_file_check_dup(proc_ucred(p), ofp->f_fglob, new); + error = mac_file_check_dup(proc_ucred(p), ofp->fp_glob, new); if (error) { - fg_drop(ofp); fdrelse(p, new); return error; } @@ -2847,12 +3252,12 @@ finishdup(proc_t p, proc_fdlock(p); if (nfp == NULL) { - fg_drop(ofp); fdrelse(p, new); return ENOMEM; } - nfp->f_fglob = ofp->f_fglob; + fg_ref(ofp->fp_glob); + nfp->fp_glob = ofp->fp_glob; #if DIAGNOSTIC if (fdp->fd_ofiles[new] != 0) { @@ -2874,7 +3279,7 @@ finishdup(proc_t p, /* - * close + * sys_close * * Description: The implementation of the close(2) system call * @@ -2891,114 +3296,100 @@ finishdup(proc_t p, * close function */ int -close(proc_t p, struct close_args *uap, int32_t *retval) +sys_close(proc_t p, struct close_args *uap, __unused int32_t *retval) { __pthread_testcancel(1); - return close_nocancel(p, (struct close_nocancel_args *)uap, retval); + return close_nocancel(p, uap->fd); } +int +sys_close_nocancel(proc_t p, struct close_nocancel_args *uap, __unused int32_t *retval) +{ + return close_nocancel(p, uap->fd); +} int -close_nocancel(proc_t p, struct close_nocancel_args *uap, __unused int32_t *retval) +close_nocancel(proc_t p, int fd) { struct fileproc *fp; - int fd = uap->fd; - int error; AUDIT_SYSCLOSE(p, fd); proc_fdlock(p); - - if ((error = fp_lookup(p, fd, &fp, 1))) { + if ((fp = fp_get_noref_locked(p, fd)) == NULL) { proc_fdunlock(p); - return error; + return EBADF; } if (FP_ISGUARDED(fp, GUARD_CLOSE)) { - error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE); - (void) fp_drop(p, fd, fp, 1); + int error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE); proc_fdunlock(p); return error; } - error = close_internal_locked(p, fd, fp, 0); - - proc_fdunlock(p); - - return error; + return fp_close_and_unlock(p, fd, fp, 0); } -/* - * close_internal_locked - * - * Close a file descriptor. - * - * Parameters: p Process in whose per process file table - * the close is to occur - * fd fd to be closed - * fp fileproc associated with the fd - * - * Returns: 0 Success - * EBADF fd already in close wait state - * closef_locked:??? Anything returnable by a per-fileops - * close function - * - * Locks: Assumes proc_fdlock for process is held by the caller and returns - * with lock held - * - * Notes: This function may drop and reacquire this lock; it is unsafe - * for a caller to assume that other state protected by the lock - * has not been subsequently changed out from under it. - */ int -close_internal_locked(proc_t p, int fd, struct fileproc *fp, int flags) +fp_close_and_unlock(proc_t p, int fd, struct fileproc *fp, int flags) { struct filedesc *fdp = p->p_fd; - int error = 0; - int resvfd = flags & FD_DUP2RESV; - + struct fileglob *fg = fp->fp_glob; #if DIAGNOSTIC proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED); #endif - /* Keep people from using the filedesc while we are closing it */ - procfdtbl_markclosefd(p, fd); - - - if ((fp->f_flags & FP_CLOSING) == FP_CLOSING) { - panic("close_internal_locked: being called on already closing fd"); - } - - -#if DIAGNOSTIC - if ((fdp->fd_ofileflags[fd] & UF_RESERVED) == 0) { - panic("close_internal: unreserved fileflags with fd %d", fd); + /* + * Keep most people from finding the filedesc while we are closing it. + * + * Callers are: + * + * - dup2() which always waits for UF_RESERVED to clear + * + * - close/guarded_close/... who will fail the fileproc lookup if + * UF_RESERVED is set, + * + * - fdexec()/fdfree() who only run once all threads in the proc + * are properly canceled, hence no fileproc in this proc should + * be in flux. + * + * Which means that neither UF_RESERVED nor UF_CLOSING should be set. + * + * Callers of fp_get_noref_locked_with_iocount() can still find + * this entry so that they can drop their I/O reference despite + * not having remembered the fileproc pointer (namely select() and + * file_drop()). + */ + if (p->p_fd->fd_ofileflags[fd] & (UF_RESERVED | UF_CLOSING)) { + panic("%s: called with fileproc in flux (%d/:%p)", + __func__, fd, fp); } -#endif - - fp->f_flags |= FP_CLOSING; + p->p_fd->fd_ofileflags[fd] |= (UF_RESERVED | UF_CLOSING); - if ((fp->f_flags & FP_AIOISSUED) || kauth_authorize_fileop_has_listeners()) { + if ((fp->fp_flags & FP_AIOISSUED) || kauth_authorize_fileop_has_listeners()) { proc_fdunlock(p); - if ((fp->f_type == DTYPE_VNODE) && kauth_authorize_fileop_has_listeners()) { + if ((FILEGLOB_DTYPE(fg) == DTYPE_VNODE) && kauth_authorize_fileop_has_listeners()) { /* * call out to allow 3rd party notification of close. * Ignore result of kauth_authorize_fileop call. */ - if (vnode_getwithref((vnode_t)fp->f_data) == 0) { + if (vnode_getwithref((vnode_t)fg->fg_data) == 0) { u_int fileop_flags = 0; - if ((fp->f_flags & FP_WRITTEN) != 0) { + if (fg->fg_flag & FWASWRITTEN) { fileop_flags |= KAUTH_FILEOP_CLOSE_MODIFIED; } - kauth_authorize_fileop(fp->f_fglob->fg_cred, KAUTH_FILEOP_CLOSE, - (uintptr_t)fp->f_data, (uintptr_t)fileop_flags); - vnode_put((vnode_t)fp->f_data); + kauth_authorize_fileop(fg->fg_cred, KAUTH_FILEOP_CLOSE, + (uintptr_t)fg->fg_data, (uintptr_t)fileop_flags); +#if CONFIG_MACF + mac_file_notify_close(proc_ucred(p), fp->fp_glob); +#endif + vnode_put((vnode_t)fg->fg_data); } } - if (fp->f_flags & FP_AIOISSUED) { + if (fp->fp_flags & FP_AIOISSUED) { /* * cancel all async IO requests that can be cancelled. */ @@ -3012,51 +3403,30 @@ close_internal_locked(proc_t p, int fd, struct fileproc *fp, int flags) knote_fdclose(p, fd); } - /* release the ref returned from fp_lookup before calling drain */ - (void) os_ref_release_locked(&fp->f_iocount); fileproc_drain(p, fp); - if (fp->f_flags & FP_WAITEVENT) { - (void)waitevent_close(p, fp); - } - - if (resvfd == 0) { - _fdrelse(p, fd); + if (flags & FD_DUP2RESV) { + fdp->fd_ofiles[fd] = NULL; + fdp->fd_ofileflags[fd] &= ~(UF_CLOSING | UF_EXCLOSE | UF_FORKCLOSE); } else { - procfdtbl_reservefd(p, fd); + fdrelse(p, fd); } - if (ENTR_SHOULDTRACE && fp->f_type == DTYPE_SOCKET) { - KERNEL_ENERGYTRACE(kEnTrActKernSocket, DBG_FUNC_END, - fd, 0, (int64_t)VM_KERNEL_ADDRPERM(fp->f_data)); - } + proc_fdunlock(p); - error = closef_locked(fp, fp->f_fglob, p); - if ((fp->f_flags & FP_WAITCLOSE) == FP_WAITCLOSE) { - wakeup(&fp->f_flags); + if (ENTR_SHOULDTRACE && FILEGLOB_DTYPE(fg) == DTYPE_SOCKET) { + KERNEL_ENERGYTRACE(kEnTrActKernSocket, DBG_FUNC_END, + fd, 0, (int64_t)VM_KERNEL_ADDRPERM(fg->fg_data)); } - fp->f_flags &= ~(FP_WAITCLOSE | FP_CLOSING); - - proc_fdunlock(p); fileproc_free(fp); - proc_fdlock(p); - -#if DIAGNOSTIC - if (resvfd != 0) { - if ((fdp->fd_ofileflags[fd] & UF_RESERVED) == 0) { - panic("close with reserved fd returns with freed fd:%d: proc: %p", fd, p); - } - } -#endif - - return error; + return fg_drop(p, fg); } /* - * fstat1 + * fstat * * Description: Return status information about a file descriptor. * @@ -3090,7 +3460,7 @@ close_internal_locked(proc_t p, int fd, struct fileproc *fp, int flags) * XXX fileops instead. */ static int -fstat1(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size, int isstat64) +fstat(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size, int isstat64) { struct fileproc *fp; union { @@ -3133,7 +3503,7 @@ fstat1(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsec */ if (xsecurity == USER_ADDR_NULL) { error = vn_stat_noauth((vnode_t)data, sbptr, NULL, isstat64, 0, ctx, - fp->f_fglob->fg_cred); + fp->fp_glob->fg_cred); } else { error = vn_stat((vnode_t)data, sbptr, &fsec, isstat64, 0, ctx); } @@ -3234,7 +3604,7 @@ out: /* - * fstat_extended + * sys_fstat_extended * * Description: Extended version of fstat supporting returning extended * security information @@ -3247,17 +3617,17 @@ out: * uap->xsecurity_size The size of xsecurity, or 0 * * Returns: 0 Success - * !0 Errno (see fstat1) + * !0 Errno (see fstat) */ int -fstat_extended(proc_t p, struct fstat_extended_args *uap, __unused int32_t *retval) +sys_fstat_extended(proc_t p, struct fstat_extended_args *uap, __unused int32_t *retval) { - return fstat1(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 0); + return fstat(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 0); } /* - * fstat + * sys_fstat * * Description: Get file status for the file associated with fd * @@ -3266,17 +3636,17 @@ fstat_extended(proc_t p, struct fstat_extended_args *uap, __unused int32_t *retv * uap->ub The user stat buffer * * Returns: 0 Success - * !0 Errno (see fstat1) + * !0 Errno (see fstat) */ int -fstat(proc_t p, struct fstat_args *uap, __unused int32_t *retval) +sys_fstat(proc_t p, struct fstat_args *uap, __unused int32_t *retval) { - return fstat1(p, uap->fd, uap->ub, 0, 0, 0); + return fstat(p, uap->fd, uap->ub, 0, 0, 0); } /* - * fstat64_extended + * sys_fstat64_extended * * Description: Extended version of fstat64 supporting returning extended * security information @@ -3289,17 +3659,17 @@ fstat(proc_t p, struct fstat_args *uap, __unused int32_t *retval) * uap->xsecurity_size The size of xsecurity, or 0 * * Returns: 0 Success - * !0 Errno (see fstat1) + * !0 Errno (see fstat) */ int -fstat64_extended(proc_t p, struct fstat64_extended_args *uap, __unused int32_t *retval) +sys_fstat64_extended(proc_t p, struct fstat64_extended_args *uap, __unused int32_t *retval) { - return fstat1(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 1); + return fstat(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 1); } /* - * fstat64 + * sys_fstat64 * * Description: Get 64 bit version of the file status for the file associated * with fd @@ -3309,17 +3679,17 @@ fstat64_extended(proc_t p, struct fstat64_extended_args *uap, __unused int32_t * * uap->ub The user stat buffer * * Returns: 0 Success - * !0 Errno (see fstat1) + * !0 Errno (see fstat) */ int -fstat64(proc_t p, struct fstat64_args *uap, __unused int32_t *retval) +sys_fstat64(proc_t p, struct fstat64_args *uap, __unused int32_t *retval) { - return fstat1(p, uap->fd, uap->ub, 0, 0, 1); + return fstat(p, uap->fd, uap->ub, 0, 0, 1); } /* - * fpathconf + * sys_fpathconf * * Description: Return pathconf information about a file descriptor. * @@ -3338,7 +3708,7 @@ fstat64(proc_t p, struct fstat64_args *uap, __unused int32_t *retval) * *retval (modified) Returned information (numeric) */ int -fpathconf(proc_t p, struct fpathconf_args *uap, int32_t *retval) +sys_fpathconf(proc_t p, struct fpathconf_args *uap, int32_t *retval) { int fd = uap->fd; struct fileproc *fp; @@ -3426,9 +3796,13 @@ fdalloc(proc_t p, int want, int *result) { struct filedesc *fdp = p->p_fd; int i; - int lim, last, numfiles, oldnfiles; + int last, numfiles, oldnfiles; struct fileproc **newofiles, **ofiles; char *newofileflags; + rlim_t lim; + rlim_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE, TRUE); + + nofile = MIN(nofile, INT_MAX); /* * Search for a free descriptor starting at the higher @@ -3439,9 +3813,9 @@ fdalloc(proc_t p, int want, int *result) proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED); #endif - lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles); + lim = MIN(nofile, maxfilesperproc); for (;;) { - last = min(fdp->fd_nfiles, lim); + last = (int)MIN((unsigned int)fdp->fd_nfiles, (unsigned int)lim); if ((i = want) < fdp->fd_freefile) { i = fdp->fd_freefile; } @@ -3462,7 +3836,7 @@ fdalloc(proc_t p, int want, int *result) /* * No space in current array. Expand? */ - if (fdp->fd_nfiles >= lim) { + if ((rlim_t)fdp->fd_nfiles >= lim) { return EMFILE; } if (fdp->fd_nfiles < NDEXTENT) { @@ -3471,18 +3845,18 @@ fdalloc(proc_t p, int want, int *result) numfiles = 2 * fdp->fd_nfiles; } /* Enforce lim */ - if (numfiles > lim) { - numfiles = lim; + if ((rlim_t)numfiles > lim) { + numfiles = (int)lim; } proc_fdunlock(p); - MALLOC_ZONE(newofiles, struct fileproc **, + MALLOC(newofiles, struct fileproc **, numfiles * OFILESIZE, M_OFILETABL, M_WAITOK); proc_fdlock(p); if (newofiles == NULL) { return ENOMEM; } if (fdp->fd_nfiles >= numfiles) { - FREE_ZONE(newofiles, numfiles * OFILESIZE, M_OFILETABL); + FREE(newofiles, M_OFILETABL); continue; } newofileflags = (char *) &newofiles[numfiles]; @@ -3505,7 +3879,7 @@ fdalloc(proc_t p, int want, int *result) fdp->fd_ofiles = newofiles; fdp->fd_ofileflags = newofileflags; fdp->fd_nfiles = numfiles; - FREE_ZONE(ofiles, oldnfiles * OFILESIZE, M_OFILETABL); + FREE(ofiles, M_OFILETABL); fdexpand++; } } @@ -3534,9 +3908,11 @@ fdavail(proc_t p, int n) struct filedesc *fdp = p->p_fd; struct fileproc **fpp; char *flags; - int i, lim; + int i; + int lim; + rlim_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE, TRUE); - lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles); + lim = (int)MIN(nofile, maxfilesperproc); if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) { return 1; } @@ -3551,61 +3927,61 @@ fdavail(proc_t p, int n) } -/* - * fdrelse - * - * Description: Legacy KPI wrapper function for _fdrelse - * - * Parameters: p Process in which fd lives - * fd fd to free - * - * Returns: void - * - * Locks: Assumes proc_fdlock for process is held by the caller - */ -void -fdrelse(proc_t p, int fd) +struct fileproc * +fp_get_noref_locked(proc_t p, int fd) { - _fdrelse(p, fd); + struct filedesc *fdp = p->p_fd; + struct fileproc *fp; + + if (fd < 0 || fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + return NULL; + } + return fp; } +struct fileproc * +fp_get_noref_locked_with_iocount(proc_t p, int fd) +{ + struct filedesc *fdp = p->p_fd; + struct fileproc *fp = NULL; + + if (fd < 0 || fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + os_ref_get_count(&fp->fp_iocount) <= 1 || + ((fdp->fd_ofileflags[fd] & UF_RESERVED) && + !(fdp->fd_ofileflags[fd] & UF_CLOSING))) { + panic("%s: caller without an ioccount on fileproc (%d/:%p)", + __func__, fd, fp); + } + + return fp; +} -/* - * fdgetf_noref - * - * Description: Get the fileproc pointer for the given fd from the per process - * open file table without taking an explicit reference on it. - * - * Parameters: p Process containing fd - * fd fd to obtain fileproc for - * resultfp Pointer to pointer return area - * - * Returns: 0 Success - * EBADF - * - * Implicit returns: - * *resultfp (modified) Pointer to fileproc pointer - * - * Locks: Assumes proc_fdlock for process is held by the caller - * - * Notes: Because there is no reference explicitly taken, the returned - * fileproc pointer is only valid so long as the proc_fdlock - * remains held by the caller. - */ int -fdgetf_noref(proc_t p, int fd, struct fileproc **resultfp) +fp_get_ftype(proc_t p, int fd, file_type_t ftype, int err, struct fileproc **fpp) { struct filedesc *fdp = p->p_fd; struct fileproc *fp; + proc_fdlock_spin(p); if (fd < 0 || fd >= fdp->fd_nfiles || (fp = fdp->fd_ofiles[fd]) == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + proc_fdunlock(p); return EBADF; } - if (resultfp) { - *resultfp = fp; + + if (fp->f_type != ftype) { + proc_fdunlock(p); + return err; } + + os_ref_retain_locked(&fp->fp_iocount); + proc_fdunlock(p); + + *fpp = fp; return 0; } @@ -3615,7 +3991,7 @@ fdgetf_noref(proc_t p, int fd, struct fileproc **resultfp) * * Description: Get fileproc and vnode pointer for a given fd from the per * process open file table of the specified process, and if - * successful, increment the f_iocount + * successful, increment the fp_iocount * * Parameters: p Process in which fd lives * fd fd to get information for @@ -3640,393 +4016,70 @@ fdgetf_noref(proc_t p, int fd, struct fileproc **resultfp) int fp_getfvp(proc_t p, int fd, struct fileproc **resultfp, struct vnode **resultvp) { - struct filedesc *fdp = p->p_fd; struct fileproc *fp; + int error; - proc_fdlock_spin(p); - if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { - proc_fdunlock(p); - return EBADF; - } - if (fp->f_type != DTYPE_VNODE) { - proc_fdunlock(p); - return ENOTSUP; - } - os_ref_retain_locked(&fp->f_iocount); - - if (resultfp) { - *resultfp = fp; - } - if (resultvp) { - *resultvp = (struct vnode *)fp->f_data; + error = fp_get_ftype(p, fd, DTYPE_VNODE, ENOTSUP, &fp); + if (error == 0) { + if (resultfp) { + *resultfp = fp; + } + if (resultvp) { + *resultvp = (struct vnode *)fp->f_data; + } } - proc_fdunlock(p); - return 0; + return error; } /* - * fp_getfvpandvid + * fp_get_pipe_id * - * Description: Get fileproc, vnode pointer, and vid for a given fd from the - * per process open file table of the specified process, and if - * successful, increment the f_iocount + * Description: Get pipe id for a given fd from the per process open file table + * of the specified process. * * Parameters: p Process in which fd lives * fd fd to get information for - * resultfp Pointer to result fileproc - * pointer area, or 0 if none - * resultvp Pointer to result vnode pointer - * area, or 0 if none - * vidp Pointer to resuld vid area + * result_pipe_id Pointer to result pipe id * * Returns: 0 Success - * EBADF Bad file descriptor - * ENOTSUP fd does not refer to a vnode + * EIVAL NULL pointer arguments passed + * fp_lookup:EBADF Bad file descriptor + * ENOTSUP fd does not refer to a pipe * * Implicit returns: - * *resultfp (modified) Fileproc pointer - * *resultvp (modified) vnode pointer - * *vidp vid value - * - * Notes: The resultfp and resultvp fields are optional, and may be - * independently specified as NULL to skip returning information + * *result_pipe_id (modified) pipe id * * Locks: Internally takes and releases proc_fdlock */ int -fp_getfvpandvid(proc_t p, int fd, struct fileproc **resultfp, - struct vnode **resultvp, uint32_t *vidp) -{ - struct filedesc *fdp = p->p_fd; - struct fileproc *fp; - - proc_fdlock_spin(p); - if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { - proc_fdunlock(p); - return EBADF; - } - if (fp->f_type != DTYPE_VNODE) { - proc_fdunlock(p); - return ENOTSUP; - } - os_ref_retain_locked(&fp->f_iocount); - - if (resultfp) { - *resultfp = fp; - } - if (resultvp) { - *resultvp = (struct vnode *)fp->f_data; - } - if (vidp) { - *vidp = (uint32_t)vnode_vid((struct vnode *)fp->f_data); - } - proc_fdunlock(p); - - return 0; -} - - -/* - * fp_getfsock - * - * Description: Get fileproc and socket pointer for a given fd from the - * per process open file table of the specified process, and if - * successful, increment the f_iocount - * - * Parameters: p Process in which fd lives - * fd fd to get information for - * resultfp Pointer to result fileproc - * pointer area, or 0 if none - * results Pointer to result socket - * pointer area, or 0 if none - * - * Returns: EBADF The file descriptor is invalid - * EOPNOTSUPP The file descriptor is not a socket - * 0 Success - * - * Implicit returns: - * *resultfp (modified) Fileproc pointer - * *results (modified) socket pointer - * - * Notes: EOPNOTSUPP should probably be ENOTSOCK; this function is only - * ever called from accept1(). - */ -int -fp_getfsock(proc_t p, int fd, struct fileproc **resultfp, - struct socket **results) -{ - struct filedesc *fdp = p->p_fd; - struct fileproc *fp; - - proc_fdlock_spin(p); - if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { - proc_fdunlock(p); - return EBADF; - } - if (fp->f_type != DTYPE_SOCKET) { - proc_fdunlock(p); - return EOPNOTSUPP; - } - os_ref_retain_locked(&fp->f_iocount); - - if (resultfp) { - *resultfp = fp; - } - if (results) { - *results = (struct socket *)fp->f_data; - } - proc_fdunlock(p); - - return 0; -} - - -/* - * fp_getfkq - * - * Description: Get fileproc and kqueue pointer for a given fd from the - * per process open file table of the specified process, and if - * successful, increment the f_iocount - * - * Parameters: p Process in which fd lives - * fd fd to get information for - * resultfp Pointer to result fileproc - * pointer area, or 0 if none - * resultkq Pointer to result kqueue - * pointer area, or 0 if none - * - * Returns: EBADF The file descriptor is invalid - * EBADF The file descriptor is not a socket - * 0 Success - * - * Implicit returns: - * *resultfp (modified) Fileproc pointer - * *resultkq (modified) kqueue pointer - * - * Notes: The second EBADF should probably be something else to make - * the error condition distinct. - */ -int -fp_getfkq(proc_t p, int fd, struct fileproc **resultfp, - struct kqueue **resultkq) -{ - struct filedesc *fdp = p->p_fd; - struct fileproc *fp; - - proc_fdlock_spin(p); - if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { - proc_fdunlock(p); - return EBADF; - } - if (fp->f_type != DTYPE_KQUEUE) { - proc_fdunlock(p); - return EBADF; - } - os_ref_retain_locked(&fp->f_iocount); - - if (resultfp) { - *resultfp = fp; - } - if (resultkq) { - *resultkq = (struct kqueue *)fp->f_data; - } - proc_fdunlock(p); - - return 0; -} - - -/* - * fp_getfpshm - * - * Description: Get fileproc and POSIX shared memory pointer for a given fd - * from the per process open file table of the specified process - * and if successful, increment the f_iocount - * - * Parameters: p Process in which fd lives - * fd fd to get information for - * resultfp Pointer to result fileproc - * pointer area, or 0 if none - * resultpshm Pointer to result POSIX - * shared memory pointer - * pointer area, or 0 if none - * - * Returns: EBADF The file descriptor is invalid - * EBADF The file descriptor is not a POSIX - * shared memory area - * 0 Success - * - * Implicit returns: - * *resultfp (modified) Fileproc pointer - * *resultpshm (modified) POSIX shared memory pointer - * - * Notes: The second EBADF should probably be something else to make - * the error condition distinct. - */ -int -fp_getfpshm(proc_t p, int fd, struct fileproc **resultfp, - struct pshmnode **resultpshm) +fp_get_pipe_id(proc_t p, int fd, uint64_t *result_pipe_id) { - struct filedesc *fdp = p->p_fd; - struct fileproc *fp; - - proc_fdlock_spin(p); - if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { - proc_fdunlock(p); - return EBADF; - } - if (fp->f_type != DTYPE_PSXSHM) { - proc_fdunlock(p); - return EBADF; - } - os_ref_retain_locked(&fp->f_iocount); + struct fileproc *fp = FILEPROC_NULL; + struct fileglob *fg = NULL; + int error = 0; - if (resultfp) { - *resultfp = fp; - } - if (resultpshm) { - *resultpshm = (struct pshmnode *)fp->f_data; + if (p == NULL || result_pipe_id == NULL) { + return EINVAL; } - proc_fdunlock(p); - - return 0; -} - - -/* - * fp_getfsem - * - * Description: Get fileproc and POSIX semaphore pointer for a given fd from - * the per process open file table of the specified process - * and if successful, increment the f_iocount - * - * Parameters: p Process in which fd lives - * fd fd to get information for - * resultfp Pointer to result fileproc - * pointer area, or 0 if none - * resultpsem Pointer to result POSIX - * semaphore pointer area, or - * 0 if none - * - * Returns: EBADF The file descriptor is invalid - * EBADF The file descriptor is not a POSIX - * semaphore - * 0 Success - * - * Implicit returns: - * *resultfp (modified) Fileproc pointer - * *resultpsem (modified) POSIX semaphore pointer - * - * Notes: The second EBADF should probably be something else to make - * the error condition distinct. - * - * In order to support unnamed POSIX semaphores, the named - * POSIX semaphores will have to move out of the per-process - * open filetable, and into a global table that is shared with - * unnamed POSIX semaphores, since unnamed POSIX semaphores - * are typically used by declaring instances in shared memory, - * and there's no other way to do this without changing the - * underlying type, which would introduce binary compatibility - * issues. - */ -int -fp_getfpsem(proc_t p, int fd, struct fileproc **resultfp, - struct psemnode **resultpsem) -{ - struct filedesc *fdp = p->p_fd; - struct fileproc *fp; - proc_fdlock_spin(p); - if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { - proc_fdunlock(p); - return EBADF; - } - if (fp->f_type != DTYPE_PSXSEM) { + proc_fdlock(p); + if ((error = fp_lookup(p, fd, &fp, 1))) { proc_fdunlock(p); - return EBADF; - } - os_ref_retain_locked(&fp->f_iocount); - - if (resultfp) { - *resultfp = fp; - } - if (resultpsem) { - *resultpsem = (struct psemnode *)fp->f_data; + return error; } - proc_fdunlock(p); - - return 0; -} - - -/* - * fp_getfpipe - * - * Description: Get fileproc and pipe pointer for a given fd from the - * per process open file table of the specified process - * and if successful, increment the f_iocount - * - * Parameters: p Process in which fd lives - * fd fd to get information for - * resultfp Pointer to result fileproc - * pointer area, or 0 if none - * resultpipe Pointer to result pipe - * pointer area, or 0 if none - * - * Returns: EBADF The file descriptor is invalid - * EBADF The file descriptor is not a socket - * 0 Success - * - * Implicit returns: - * *resultfp (modified) Fileproc pointer - * *resultpipe (modified) pipe pointer - * - * Notes: The second EBADF should probably be something else to make - * the error condition distinct. - */ -int -fp_getfpipe(proc_t p, int fd, struct fileproc **resultfp, - struct pipe **resultpipe) -{ - struct filedesc *fdp = p->p_fd; - struct fileproc *fp; + fg = fp->fp_glob; - proc_fdlock_spin(p); - if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { - proc_fdunlock(p); - return EBADF; - } - if (fp->f_type != DTYPE_PIPE) { - proc_fdunlock(p); - return EBADF; + if (FILEGLOB_DTYPE(fg) == DTYPE_PIPE) { + *result_pipe_id = pipe_id((struct pipe*)fg->fg_data); + } else { + error = ENOTSUP; } - os_ref_retain_locked(&fp->f_iocount); - if (resultfp) { - *resultfp = fp; - } - if (resultpipe) { - *resultpipe = (struct pipe *)fp->f_data; - } + fp_drop(p, fd, fp, 1); proc_fdunlock(p); - - return 0; + return error; } @@ -4035,7 +4088,7 @@ fp_getfpipe(proc_t p, int fd, struct fileproc **resultfp, * * Description: Get fileproc pointer for a given fd from the per process * open file table of the specified process and if successful, - * increment the f_iocount + * increment the fp_iocount * * Parameters: p Process in which fd lives * fd fd to get information for @@ -4071,7 +4124,7 @@ fp_lookup(proc_t p, int fd, struct fileproc **resultfp, int locked) } return EBADF; } - os_ref_retain_locked(&fp->f_iocount); + os_ref_retain_locked(&fp->fp_iocount); if (resultfp) { *resultfp = fp; @@ -4114,29 +4167,29 @@ fp_tryswap(proc_t p, int fd, struct fileproc *nfp) } /* * At this point, our caller (change_guardedfd_np) has - * one f_iocount reference, and we just took another + * one fp_iocount reference, and we just took another * one to begin the replacement. * fp and nfp have a +1 reference from allocation. - * Thus if no-one else is looking, f_iocount should be 3. + * Thus if no-one else is looking, fp_iocount should be 3. */ - if (os_ref_get_count(&fp->f_iocount) < 3 || - 1 != os_ref_get_count(&nfp->f_iocount)) { - panic("%s: f_iocount", __func__); - } else if (3 == os_ref_get_count(&fp->f_iocount)) { + if (os_ref_get_count(&fp->fp_iocount) < 3 || + 1 != os_ref_get_count(&nfp->fp_iocount)) { + panic("%s: fp_iocount", __func__); + } else if (3 == os_ref_get_count(&fp->fp_iocount)) { /* Copy the contents of *fp, preserving the "type" of *nfp */ - nfp->f_flags = (nfp->f_flags & FP_TYPEMASK) | - (fp->f_flags & ~FP_TYPEMASK); - os_ref_retain_locked(&nfp->f_iocount); - os_ref_retain_locked(&nfp->f_iocount); - nfp->f_fglob = fp->f_fglob; - nfp->f_wset = fp->f_wset; + nfp->fp_flags = (nfp->fp_flags & FP_TYPEMASK) | + (fp->fp_flags & ~FP_TYPEMASK); + os_ref_retain_locked(&nfp->fp_iocount); + os_ref_retain_locked(&nfp->fp_iocount); + nfp->fp_glob = fp->fp_glob; + nfp->fp_wset = fp->fp_wset; p->p_fd->fd_ofiles[fd] = nfp; fp_drop(p, fd, nfp, 1); - os_ref_release_live(&fp->f_iocount); - os_ref_release_live(&fp->f_iocount); + os_ref_release_live(&fp->fp_iocount); + os_ref_release_live(&fp->fp_iocount); fileproc_free(fp); } else { /* @@ -4158,78 +4211,6 @@ fp_tryswap(proc_t p, int fd, struct fileproc *nfp) } -/* - * fp_drop_written - * - * Description: Set the FP_WRITTEN flag on the fileproc and drop the I/O - * reference previously taken by calling fp_lookup et. al. - * - * Parameters: p Process in which the fd lives - * fd fd associated with the fileproc - * fp fileproc on which to set the - * flag and drop the reference - * - * Returns: 0 Success - * fp_drop:EBADF Bad file descriptor - * - * Locks: This function internally takes and drops the proc_fdlock for - * the supplied process - * - * Notes: The fileproc must correspond to the fd in the supplied proc - */ -int -fp_drop_written(proc_t p, int fd, struct fileproc *fp) -{ - int error; - - proc_fdlock_spin(p); - - fp->f_flags |= FP_WRITTEN; - - error = fp_drop(p, fd, fp, 1); - - proc_fdunlock(p); - - return error; -} - - -/* - * fp_drop_event - * - * Description: Set the FP_WAITEVENT flag on the fileproc and drop the I/O - * reference previously taken by calling fp_lookup et. al. - * - * Parameters: p Process in which the fd lives - * fd fd associated with the fileproc - * fp fileproc on which to set the - * flag and drop the reference - * - * Returns: 0 Success - * fp_drop:EBADF Bad file descriptor - * - * Locks: This function internally takes and drops the proc_fdlock for - * the supplied process - * - * Notes: The fileproc must correspond to the fd in the supplied proc - */ -int -fp_drop_event(proc_t p, int fd, struct fileproc *fp) -{ - int error; - - proc_fdlock_spin(p); - - fp->f_flags |= FP_WAITEVENT; - - error = fp_drop(p, fd, fp, 1); - - proc_fdunlock(p); - - return error; -} - - /* * fp_drop * @@ -4272,9 +4253,9 @@ fp_drop(proc_t p, int fd, struct fileproc *fp, int locked) return EBADF; } - if (1 == os_ref_release_locked(&fp->f_iocount)) { - if (fp->f_flags & FP_SELCONFLICT) { - fp->f_flags &= ~FP_SELCONFLICT; + if (1 == os_ref_release_locked(&fp->fp_iocount)) { + if (fp->fp_flags & FP_SELCONFLICT) { + fp->fp_flags &= ~FP_SELCONFLICT; } if (p->p_fpdrainwait) { @@ -4313,7 +4294,7 @@ fp_drop(proc_t p, int fd, struct fileproc *fp, int locked) * Locks: This function internally takes and drops the proc_fdlock for * the current process * - * Notes: If successful, this function increments the f_iocount on the + * Notes: If successful, this function increments the fp_iocount on the * fd's corresponding fileproc. * * The fileproc referenced is not returned; because of this, care @@ -4322,7 +4303,7 @@ fp_drop(proc_t p, int fd, struct fileproc *fp, int locked) * not be recoverable from the vnode, if there is a subsequent * close that destroys the associate fileproc. The caller should * therefore retain their own reference on the fileproc so that - * the f_iocount can be dropped subsequently. Failure to do this + * the fp_iocount can be dropped subsequently. Failure to do this * can result in the returned pointer immediately becoming invalid * following the call. * @@ -4331,29 +4312,9 @@ fp_drop(proc_t p, int fd, struct fileproc *fp, int locked) int file_vnode(int fd, struct vnode **vpp) { - proc_t p = current_proc(); - struct fileproc *fp; - int error; - - proc_fdlock_spin(p); - if ((error = fp_lookup(p, fd, &fp, 1))) { - proc_fdunlock(p); - return error; - } - if (fp->f_type != DTYPE_VNODE) { - fp_drop(p, fd, fp, 1); - proc_fdunlock(p); - return EINVAL; - } - if (vpp != NULL) { - *vpp = (struct vnode *)fp->f_data; - } - proc_fdunlock(p); - - return 0; + return file_vnode_withvid(fd, vpp, NULL); } - /* * file_vnode_withvid * @@ -4375,7 +4336,7 @@ file_vnode(int fd, struct vnode **vpp) * Locks: This function internally takes and drops the proc_fdlock for * the current process * - * Notes: If successful, this function increments the f_iocount on the + * Notes: If successful, this function increments the fp_iocount on the * fd's corresponding fileproc. * * The fileproc referenced is not returned; because of this, care @@ -4384,45 +4345,30 @@ file_vnode(int fd, struct vnode **vpp) * not be recoverable from the vnode, if there is a subsequent * close that destroys the associate fileproc. The caller should * therefore retain their own reference on the fileproc so that - * the f_iocount can be dropped subsequently. Failure to do this + * the fp_iocount can be dropped subsequently. Failure to do this * can result in the returned pointer immediately becoming invalid * following the call. * * Use of this function is discouraged. */ int -file_vnode_withvid(int fd, struct vnode **vpp, uint32_t * vidp) +file_vnode_withvid(int fd, struct vnode **vpp, uint32_t *vidp) { - proc_t p = current_proc(); struct fileproc *fp; - vnode_t vp; int error; - proc_fdlock_spin(p); - if ((error = fp_lookup(p, fd, &fp, 1))) { - proc_fdunlock(p); - return error; - } - if (fp->f_type != DTYPE_VNODE) { - fp_drop(p, fd, fp, 1); - proc_fdunlock(p); - return EINVAL; - } - vp = (struct vnode *)fp->f_data; - if (vpp != NULL) { - *vpp = vp; - } - - if ((vidp != NULL) && (vp != NULLVP)) { - *vidp = (uint32_t)vp->v_id; + error = fp_get_ftype(current_proc(), fd, DTYPE_VNODE, EINVAL, &fp); + if (error == 0) { + if (vpp) { + *vpp = fp->f_data; + } + if (vidp) { + *vidp = vnode_vid(fp->f_data); + } } - - proc_fdunlock(p); - - return 0; + return error; } - /* * file_socket * @@ -4442,7 +4388,7 @@ file_vnode_withvid(int fd, struct vnode **vpp, uint32_t * vidp) * Locks: This function internally takes and drops the proc_fdlock for * the current process * - * Notes: If successful, this function increments the f_iocount on the + * Notes: If successful, this function increments the fp_iocount on the * fd's corresponding fileproc. * * The fileproc referenced is not returned; because of this, care @@ -4451,7 +4397,7 @@ file_vnode_withvid(int fd, struct vnode **vpp, uint32_t * vidp) * not be recoverable from the socket, if there is a subsequent * close that destroys the associate fileproc. The caller should * therefore retain their own reference on the fileproc so that - * the f_iocount can be dropped subsequently. Failure to do this + * the fp_iocount can be dropped subsequently. Failure to do this * can result in the returned pointer immediately becoming invalid * following the call. * @@ -4460,24 +4406,16 @@ file_vnode_withvid(int fd, struct vnode **vpp, uint32_t * vidp) int file_socket(int fd, struct socket **sp) { - proc_t p = current_proc(); struct fileproc *fp; int error; - proc_fdlock_spin(p); - if ((error = fp_lookup(p, fd, &fp, 1))) { - proc_fdunlock(p); - return error; - } - if (fp->f_type != DTYPE_SOCKET) { - fp_drop(p, fd, fp, 1); - proc_fdunlock(p); - return ENOTSOCK; + error = fp_get_ftype(current_proc(), fd, DTYPE_SOCKET, ENOTSOCK, &fp); + if (error == 0) { + if (sp) { + *sp = (struct socket *)fp->f_data; + } } - *sp = (struct socket *)fp->f_data; - proc_fdunlock(p); - - return 0; + return error; } @@ -4500,27 +4438,23 @@ file_socket(int fd, struct socket **sp) * * Locks: This function internally takes and drops the proc_fdlock for * the current process - * - * Notes: This function will internally increment and decrement the - * f_iocount of the fileproc as part of its operation. */ int file_flags(int fd, int *flags) { proc_t p = current_proc(); struct fileproc *fp; - int error; + int error = EBADF; proc_fdlock_spin(p); - if ((error = fp_lookup(p, fd, &fp, 1))) { - proc_fdunlock(p); - return error; + fp = fp_get_noref_locked(p, fd); + if (fp) { + *flags = (int)fp->f_flag; + error = 0; } - *flags = (int)fp->f_flag; - fp_drop(p, fd, fp, 1); proc_fdunlock(p); - return 0; + return error; } @@ -4535,28 +4469,17 @@ file_flags(int fd, int *flags) * to be dropped * * Returns: 0 Success - * EBADF Bad file descriptor * * Description: Given an fd, look it up in the current process's per process - * open file table, and drop it's fileproc's f_iocount by one + * open file table, and drop it's fileproc's fp_iocount by one * * Notes: This is intended as a corresponding operation to the functions * file_vnode() and file_socket() operations. * - * Technically, the close reference is supposed to be protected - * by a fileproc_drain(), however, a drain will only block if - * the fd refers to a character device, and that device has had - * preparefileread() called on it. If it refers to something - * other than a character device, then the drain will occur and - * block each close attempt, rather than merely the last close. - * - * Since it's possible for an fd that refers to a character - * device to have an intermediate close followed by an open to - * cause a different file to correspond to that descriptor, - * unless there was a cautionary reference taken on the fileproc, - * this is an inherently unsafe function. This happens in the - * case where multiple fd's in a process refer to the same - * character device (e.g. stdin/out/err pointing to a tty, etc.). + * If the caller can't possibly hold an I/O reference, + * this function will panic the kernel rather than allowing + * for memory corruption. Callers should always call this + * because they acquired an I/O reference on this file before. * * Use of this function is discouraged. */ @@ -4568,17 +4491,11 @@ file_drop(int fd) int needwakeup = 0; proc_fdlock_spin(p); - if (fd < 0 || fd >= p->p_fd->fd_nfiles || - (fp = p->p_fd->fd_ofiles[fd]) == NULL || - ((p->p_fd->fd_ofileflags[fd] & UF_RESERVED) && - !(p->p_fd->fd_ofileflags[fd] & UF_CLOSING))) { - proc_fdunlock(p); - return EBADF; - } + fp = fp_get_noref_locked_with_iocount(p, fd); - if (1 == os_ref_release_locked(&fp->f_iocount)) { - if (fp->f_flags & FP_SELCONFLICT) { - fp->f_flags &= ~FP_SELCONFLICT; + if (1 == os_ref_release_locked(&fp->fp_iocount)) { + if (fp->fp_flags & FP_SELCONFLICT) { + fp->fp_flags &= ~FP_SELCONFLICT; } if (p->p_fpdrainwait) { @@ -4595,71 +4512,9 @@ file_drop(int fd) } -static int falloc_withalloc_locked(proc_t, struct fileproc **, int *, - vfs_context_t, struct fileproc * (*)(void *), void *, int); - -/* - * falloc - * - * Description: Allocate an entry in the per process open file table and - * return the corresponding fileproc and fd. - * - * Parameters: p The process in whose open file - * table the fd is to be allocated - * resultfp Pointer to fileproc pointer - * return area - * resultfd Pointer to fd return area - * ctx VFS context - * - * Returns: 0 Success - * falloc:ENFILE Too many open files in system - * falloc:EMFILE Too many open files in process - * falloc:ENOMEM M_FILEPROC or M_FILEGLOB zone - * exhausted - * - * Implicit returns: - * *resultfd (modified) Returned fileproc pointer - * *resultfd (modified) Returned fd - * - * Locks: This function takes and drops the proc_fdlock; if this lock - * is already held, use falloc_locked() instead. - * - * Notes: This function takes separate process and context arguments - * solely to support kern_exec.c; otherwise, it would take - * neither, and expect falloc_locked() to use the - * vfs_context_current() routine internally. - */ -int -falloc(proc_t p, struct fileproc **resultfp, int *resultfd, vfs_context_t ctx) -{ - return falloc_withalloc(p, resultfp, resultfd, ctx, - fileproc_alloc_init, NULL); -} - -/* - * Like falloc, but including the fileproc allocator and create-args - */ -int -falloc_withalloc(proc_t p, struct fileproc **resultfp, int *resultfd, - vfs_context_t ctx, fp_allocfn_t fp_zalloc, void *arg) -{ - int error; - - proc_fdlock(p); - error = falloc_withalloc_locked(p, - resultfp, resultfd, ctx, fp_zalloc, arg, 1); - proc_fdunlock(p); - - return error; -} - -/* - * "uninitialized" ops -- ensure fg->fg_ops->fo_type always exists - */ -static const struct fileops uninitops; /* - * falloc_locked + * falloc_withalloc * * Create a new open file structure and allocate * a file descriptor for the process that refers to it. @@ -4675,70 +4530,50 @@ static const struct fileops uninitops; * return area * resultfd Pointer to fd return area * ctx VFS context - * locked Flag to indicate whether the - * caller holds proc_fdlock + * fp_zalloc fileproc allocator to use + * crarg allocator args * * Returns: 0 Success * ENFILE Too many open files in system * fdalloc:EMFILE Too many open files in process - * ENOMEM M_FILEPROC or M_FILEGLOB zone + * fdalloc:ENOMEM M_OFILETABL zone exhausted + * ENOMEM fp_zone or fg_zone zone * exhausted - * fdalloc:ENOMEM * * Implicit returns: * *resultfd (modified) Returned fileproc pointer * *resultfd (modified) Returned fd * - * Locks: If the parameter 'locked' is zero, this function takes and - * drops the proc_fdlock; if non-zero, the caller must hold the - * lock. - * - * Notes: If you intend to use a non-zero 'locked' parameter, use the - * utility function falloc() instead. - * - * This function takes separate process and context arguments + * Notes: This function takes separate process and context arguments * solely to support kern_exec.c; otherwise, it would take * neither, and use the vfs_context_current() routine internally. */ int -falloc_locked(proc_t p, struct fileproc **resultfp, int *resultfd, - vfs_context_t ctx, int locked) -{ - return falloc_withalloc_locked(p, resultfp, resultfd, ctx, - fileproc_alloc_init, NULL, locked); -} - -static int -falloc_withalloc_locked(proc_t p, struct fileproc **resultfp, int *resultfd, - vfs_context_t ctx, fp_allocfn_t fp_zalloc, void *crarg, - int locked) +falloc_withalloc(proc_t p, struct fileproc **resultfp, int *resultfd, + vfs_context_t ctx, fp_allocfn_t fp_zalloc, void *crarg) { struct fileproc *fp; struct fileglob *fg; int error, nfd; + /* Make sure we don't go beyond the system-wide limit */ if (nfiles >= maxfiles) { tablefull("file"); return ENFILE; } - if (!locked) { - proc_fdlock(p); - } + proc_fdlock(p); + /* fdalloc will make sure the process stays below per-process limit */ if ((error = fdalloc(p, 0, &nfd))) { - if (!locked) { - proc_fdunlock(p); - } + proc_fdunlock(p); return error; } #if CONFIG_MACF error = mac_file_check_create(proc_ucred(p)); if (error) { - if (!locked) { - proc_fdunlock(p); - } + proc_fdunlock(p); return error; } #endif @@ -4753,113 +4588,50 @@ falloc_withalloc_locked(proc_t p, struct fileproc **resultfp, int *resultfd, fp = (*fp_zalloc)(crarg); if (fp == NULL) { - if (locked) { - proc_fdlock(p); - } - return ENOMEM; - } - MALLOC_ZONE(fg, struct fileglob *, sizeof(struct fileglob), M_FILEGLOB, M_WAITOK); - if (fg == NULL) { - fileproc_free(fp); - if (locked) { - proc_fdlock(p); - } return ENOMEM; } - bzero(fg, sizeof(struct fileglob)); + fg = zalloc_flags(fg_zone, Z_WAITOK | Z_ZERO); lck_mtx_init(&fg->fg_lock, file_lck_grp, file_lck_attr); - os_ref_retain_locked(&fp->f_iocount); - fg->fg_count = 1; + os_ref_retain_locked(&fp->fp_iocount); + os_ref_init_raw(&fg->fg_count, &f_refgrp); fg->fg_ops = &uninitops; - fp->f_fglob = fg; + fp->fp_glob = fg; #if CONFIG_MACF mac_file_label_init(fg); #endif kauth_cred_ref(ctx->vc_ucred); - proc_fdlock(p); - fp->f_cred = ctx->vc_ucred; #if CONFIG_MACF mac_file_label_associate(fp->f_cred, fg); #endif - OSAddAtomic(1, &nfiles); - - p->p_fd->fd_ofiles[nfd] = fp; - - if (!locked) { - proc_fdunlock(p); - } - - if (resultfp) { - *resultfp = fp; - } - if (resultfd) { - *resultfd = nfd; - } - - return 0; -} + os_atomic_inc(&nfiles, relaxed); + proc_fdlock(p); -/* - * fg_free - * - * Description: Free a file structure; drop the global open file count, and - * drop the credential reference, if the fileglob has one, and - * destroy the instance mutex before freeing - * - * Parameters: fg Pointer to fileglob to be - * freed - * - * Returns: void - */ -void -fg_free(struct fileglob *fg) -{ - OSAddAtomic(-1, &nfiles); + p->p_fd->fd_ofiles[nfd] = fp; - if (fg->fg_vn_data) { - fg_vn_data_free(fg->fg_vn_data); - fg->fg_vn_data = NULL; - } + proc_fdunlock(p); - if (IS_VALID_CRED(fg->fg_cred)) { - kauth_cred_unref(&fg->fg_cred); + if (resultfp) { + *resultfp = fp; + } + if (resultfd) { + *resultfd = nfd; } - lck_mtx_destroy(&fg->fg_lock, file_lck_grp); -#if CONFIG_MACF - mac_file_label_destroy(fg); -#endif - FREE_ZONE(fg, sizeof *fg, M_FILEGLOB); + return 0; } - -/* - * fg_get_vnode - * - * Description: Return vnode associated with the file structure, if - * any. The lifetime of the returned vnode is bound to - * the lifetime of the file structure. - * - * Parameters: fg Pointer to fileglob to - * inspect - * - * Returns: vnode_t - */ -vnode_t -fg_get_vnode(struct fileglob *fg) +int +falloc(proc_t p, struct fileproc **resultfp, int *resultfd, vfs_context_t ctx) { - if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE) { - return (vnode_t)fg->fg_data; - } else { - return NULL; - } + return falloc_withalloc(p, resultfp, resultfd, ctx, + fileproc_alloc_init, NULL); } /* @@ -4931,32 +4703,11 @@ fdexec(proc_t p, short flags, int self_exec) if ( ((*flagp & (UF_RESERVED | UF_EXCLOSE)) == UF_EXCLOSE) #if CONFIG_MACF - || (fp && mac_file_check_inherit(proc_ucred(p), fp->f_fglob)) + || (fp && mac_file_check_inherit(proc_ucred(p), fp->fp_glob)) #endif ) { - procfdtbl_clearfd(p, i); - if (i == fdp->fd_lastfile && i > 0) { - fdp->fd_lastfile--; - } - if (i < fdp->fd_freefile) { - fdp->fd_freefile = i; - } - - /* - * Wait for any third party viewers (e.g., lsof) - * to release their references to this fileproc. - */ - while (os_ref_get_count(&fp->f_iocount) > 1) { - p->p_fpdrainwait = 1; - msleep(&p->p_fpdrainwait, &p->p_fdmlock, PRIBIO, - "fpdrain", NULL); - } - if (fp->f_flags & FP_WAITEVENT) { - (void)waitevent_close(p, fp); - } - closef_locked(fp, fp->f_fglob, p); - - fileproc_free(fp); + fp_close_and_unlock(p, i, fp, 0); + proc_fdlock(p); } } @@ -5019,11 +4770,7 @@ fdcopy(proc_t p, vnode_t uth_cdir) struct fileproc *ofp, *fp; vnode_t v_dir; - MALLOC_ZONE(newfdp, struct filedesc *, - sizeof(*newfdp), M_FILEDESC, M_WAITOK); - if (newfdp == NULL) { - return NULL; - } + newfdp = zalloc(fdp_zone); proc_fdlock(p); @@ -5097,7 +4844,7 @@ fdcopy(proc_t p, vnode_t uth_cdir) if (newfdp->fd_cdir) { vnode_rele(newfdp->fd_cdir); } - FREE_ZONE(newfdp, sizeof *newfdp, M_FILEDESC); + zfree(fdp_zone, newfdp); return NULL; } @@ -5122,7 +4869,7 @@ fdcopy(proc_t p, vnode_t uth_cdir) } proc_fdunlock(p); - MALLOC_ZONE(newfdp->fd_ofiles, struct fileproc **, + MALLOC(newfdp->fd_ofiles, struct fileproc **, i * OFILESIZE, M_OFILETABL, M_WAITOK); if (newfdp->fd_ofiles == NULL) { if (newfdp->fd_cdir) { @@ -5132,7 +4879,7 @@ fdcopy(proc_t p, vnode_t uth_cdir) vnode_rele(newfdp->fd_rdir); } - FREE_ZONE(newfdp, sizeof(*newfdp), M_FILEDESC); + zfree(fdp_zone, newfdp); return NULL; } (void) memset(newfdp->fd_ofiles, 0, i * OFILESIZE); @@ -5154,7 +4901,7 @@ fdcopy(proc_t p, vnode_t uth_cdir) flags = &newfdp->fd_ofileflags[newfdp->fd_lastfile]; for (i = newfdp->fd_lastfile; i >= 0; i--, fpp--, flags--) { if ((ofp = *fpp) != NULL && - 0 == (ofp->f_fglob->fg_lflags & FG_CONFINED) && + 0 == (ofp->fp_glob->fg_lflags & FG_CONFINED) && 0 == (*flags & (UF_FORKCLOSE | UF_RESERVED))) { #if DEBUG if (FILEPROC_TYPE(ofp) != FTYPE_SIMPLE) { @@ -5169,10 +4916,10 @@ fdcopy(proc_t p, vnode_t uth_cdir) */ *fpp = NULL; } else { - fp->f_flags |= - (ofp->f_flags & ~FP_TYPEMASK); - fp->f_fglob = ofp->f_fglob; - (void)fg_ref(fp); + fp->fp_flags |= + (ofp->fp_flags & ~FP_TYPEMASK); + fp->fp_glob = ofp->fp_glob; + fg_ref(fp->fp_glob); *fpp = fp; } } else { @@ -5265,18 +5012,11 @@ fdfree(proc_t p) if (fdp->fd_ofileflags[i] & UF_RESERVED) { panic("fdfree: found fp with UF_RESERVED"); } - - fileproc_drain(p, fp); - procfdtbl_reservefd(p, i); - - if (fp->f_flags & FP_WAITEVENT) { - (void)waitevent_close(p, fp); - } - (void) closef_locked(fp, fp->f_fglob, p); - fileproc_free(fp); + fp_close_and_unlock(p, i, fp, 0); + proc_fdlock(p); } } - FREE_ZONE(fdp->fd_ofiles, fdp->fd_nfiles * OFILESIZE, M_OFILETABL); + FREE(fdp->fd_ofiles, M_OFILETABL); fdp->fd_ofiles = NULL; fdp->fd_nfiles = 0; } @@ -5306,117 +5046,15 @@ fdfree(proc_t p) for (uint32_t j = 0; j <= fdp->fd_kqhashmask; j++) { assert(LIST_EMPTY(&fdp->fd_kqhash[j])); } - FREE(fdp->fd_kqhash, M_KQUEUE); + hashdestroy(fdp->fd_kqhash, M_KQUEUE, fdp->fd_kqhashmask); } lck_mtx_destroy(&fdp->fd_kqhashlock, proc_kqhashlock_grp); lck_mtx_destroy(&fdp->fd_knhashlock, proc_knhashlock_grp); - FREE_ZONE(fdp, sizeof(*fdp), M_FILEDESC); -} - -/* - * closef_locked - * - * Description: Internal form of closef; called with proc_fdlock held - * - * Parameters: fp Pointer to fileproc for fd - * fg Pointer to fileglob for fd - * p Pointer to proc structure - * - * Returns: 0 Success - * closef_finish:??? Anything returnable by a per-fileops - * close function - * - * Note: Decrements reference count on file structure; if this was the - * last reference, then closef_finish() is called - * - * p and fp are allowed to be NULL when closing a file that was - * being passed in a message (but only if we are called when this - * is NOT the last reference). - */ -int -closef_locked(struct fileproc *fp, struct fileglob *fg, proc_t p) -{ - struct vnode *vp; - struct flock lf; - struct vfs_context context; - int error; - - if (fg == NULL) { - return 0; - } - - /* Set up context with cred stashed in fg */ - if (p == current_proc()) { - context.vc_thread = current_thread(); - } else { - context.vc_thread = NULL; - } - context.vc_ucred = fg->fg_cred; - - /* - * POSIX record locking dictates that any close releases ALL - * locks owned by this process. This is handled by setting - * a flag in the unlock to free ONLY locks obeying POSIX - * semantics, and not to free BSD-style file locks. - * If the descriptor was in a message, POSIX-style locks - * aren't passed with the descriptor. - */ - if (p && (p->p_ladvflag & P_LADVLOCK) && - DTYPE_VNODE == FILEGLOB_DTYPE(fg)) { - proc_fdunlock(p); - - lf.l_whence = SEEK_SET; - lf.l_start = 0; - lf.l_len = 0; - lf.l_type = F_UNLCK; - vp = (struct vnode *)fg->fg_data; - - if ((error = vnode_getwithref(vp)) == 0) { - (void) VNOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_POSIX, &context, NULL); - (void)vnode_put(vp); - } - proc_fdlock(p); - } - lck_mtx_lock_spin(&fg->fg_lock); - fg->fg_count--; - - if (fg->fg_count > 0) { - lck_mtx_unlock(&fg->fg_lock); - return 0; - } -#if DIAGNOSTIC - if (fg->fg_count != 0) { - panic("fg %p: being freed with bad fg_count (%d)", fg, fg->fg_count); - } -#endif - - if (fp && (fp->f_flags & FP_WRITTEN)) { - fg->fg_flag |= FWASWRITTEN; - } - - fg->fg_lflags |= FG_TERM; - lck_mtx_unlock(&fg->fg_lock); - - if (p) { - proc_fdunlock(p); - } - - /* Since we ensure that fg->fg_ops is always initialized, - * it is safe to invoke fo_close on the fg */ - error = fo_close(fg, &context); - - fg_free(fg); - - if (p) { - proc_fdlock(p); - } - - return error; + zfree(fdp_zone, fdp); } - /* * fileproc_drain * @@ -5442,24 +5080,37 @@ void fileproc_drain(proc_t p, struct fileproc * fp) { struct vfs_context context; + thread_t thread; + bool is_current_proc; + + is_current_proc = (p == current_proc()); - context.vc_thread = proc_thread(p); /* XXX */ - context.vc_ucred = fp->f_fglob->fg_cred; + if (!is_current_proc) { + proc_lock(p); + thread = proc_thread(p); /* XXX */ + thread_reference(thread); + proc_unlock(p); + } else { + thread = current_thread(); + } + + context.vc_thread = thread; + context.vc_ucred = fp->fp_glob->fg_cred; /* Set the vflag for drain */ fileproc_modify_vflags(fp, FPV_DRAIN, FALSE); - while (os_ref_get_count(&fp->f_iocount) > 1) { + while (os_ref_get_count(&fp->fp_iocount) > 1) { lck_mtx_convert_spin(&p->p_fdmlock); fo_drain(fp, &context); - if ((fp->f_flags & FP_INSELECT) == FP_INSELECT) { - if (waitq_wakeup64_all((struct waitq *)fp->f_wset, NO_EVENT64, + if ((fp->fp_flags & FP_INSELECT) == FP_INSELECT) { + if (waitq_wakeup64_all((struct waitq *)fp->fp_wset, NO_EVENT64, THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES) == KERN_INVALID_ARGUMENT) { - panic("bad wait queue for waitq_wakeup64_all %p (fp:%p)", fp->f_wset, fp); + panic("bad wait queue for waitq_wakeup64_all %p (fp:%p)", fp->fp_wset, fp); } } - if ((fp->f_flags & FP_SELCONFLICT) == FP_SELCONFLICT) { + if ((fp->fp_flags & FP_SELCONFLICT) == FP_SELCONFLICT) { if (waitq_wakeup64_all(&select_conflict_queue, NO_EVENT64, THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES) == KERN_INVALID_ARGUMENT) { panic("bad select_conflict_queue"); @@ -5470,12 +5121,16 @@ fileproc_drain(proc_t p, struct fileproc * fp) msleep(&p->p_fpdrainwait, &p->p_fdmlock, PRIBIO, "fpdrain", NULL); } #if DIAGNOSTIC - if ((fp->f_flags & FP_INSELECT) != 0) { + if ((fp->fp_flags & FP_INSELECT) != 0) { panic("FP_INSELECT set on drained fp"); } #endif - if ((fp->f_flags & FP_SELCONFLICT) == FP_SELCONFLICT) { - fp->f_flags &= ~FP_SELCONFLICT; + if ((fp->fp_flags & FP_SELCONFLICT) == FP_SELCONFLICT) { + fp->fp_flags &= ~FP_SELCONFLICT; + } + + if (!is_current_proc) { + thread_deallocate(thread); } } @@ -5498,14 +5153,14 @@ fp_free(proc_t p, int fd, struct fileproc * fp) fdrelse(p, fd); proc_fdunlock(p); - fg_free(fp->f_fglob); - os_ref_release_live(&fp->f_iocount); + fg_free(fp->fp_glob); + os_ref_release_live(&fp->fp_iocount); fileproc_free(fp); } /* - * flock + * sys_flock * * Description: Apply an advisory lock on a file descriptor. * @@ -5528,7 +5183,7 @@ fp_free(proc_t p, int fd, struct fileproc * fp) * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). */ int -flock(proc_t p, struct flock_args *uap, __unused int32_t *retval) +sys_flock(proc_t p, struct flock_args *uap, __unused int32_t *retval) { int fd = uap->fd; int how = uap->how; @@ -5552,8 +5207,7 @@ flock(proc_t p, struct flock_args *uap, __unused int32_t *retval) lf.l_len = 0; if (how & LOCK_UN) { lf.l_type = F_UNLCK; - fp->f_flag &= ~FHASLOCK; - error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_UNLCK, &lf, F_FLOCK, ctx, NULL); + error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_UNLCK, &lf, F_FLOCK, ctx, NULL); goto out; } if (how & LOCK_EX) { @@ -5565,16 +5219,16 @@ flock(proc_t p, struct flock_args *uap, __unused int32_t *retval) goto out; } #if CONFIG_MACF - error = mac_file_check_lock(proc_ucred(p), fp->f_fglob, F_SETLK, &lf); + error = mac_file_check_lock(proc_ucred(p), fp->fp_glob, F_SETLK, &lf); if (error) { goto out; } #endif - error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, + error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_SETLK, &lf, (how & LOCK_NB ? F_FLOCK : F_FLOCK | F_WAIT), ctx, NULL); if (!error) { - fp->f_flag |= FHASLOCK; + os_atomic_or(&fp->fp_glob->fg_flag, FWASLOCKED, relaxed); } out: (void)vnode_put(vp); @@ -5584,7 +5238,7 @@ out1: } /* - * fileport_makeport + * sys_fileport_makeport * * Description: Obtain a Mach send right for a given file descriptor. * @@ -5602,7 +5256,7 @@ out1: * On success, name of send right is stored at user-specified address. */ int -fileport_makeport(proc_t p, struct fileport_makeport_args *uap, +sys_fileport_makeport(proc_t p, struct fileport_makeport_args *uap, __unused int *retval) { int err; @@ -5619,7 +5273,8 @@ fileport_makeport(proc_t p, struct fileport_makeport_args *uap, goto out_unlock; } - if (!file_issendable(p, fp)) { + fg = fp->fp_glob; + if (!fg_sendable(fg)) { err = EINVAL; goto out_unlock; } @@ -5629,20 +5284,18 @@ fileport_makeport(proc_t p, struct fileport_makeport_args *uap, goto out_unlock; } - /* Dropped when port is deallocated */ - fg = fp->f_fglob; - fg_ref(fp); - proc_fdunlock(p); /* Allocate and initialize a port */ fileport = fileport_alloc(fg); if (fileport == IPC_PORT_NULL) { err = EAGAIN; - fg_drop(fp); goto out; } + /* Dropped when port is deallocated */ + fg_ref(fg); + /* Add an entry. Deallocates port on failure. */ name = ipc_port_copyout_send(fileport, get_task_ipcspace(p->task)); if (!MACH_PORT_VALID(name)) { @@ -5682,13 +5335,11 @@ out: void fileport_releasefg(struct fileglob *fg) { - (void)closef_locked(NULL, fg, PROC_NULL); - - return; + (void)fg_drop(PROC_NULL, fg); } /* - * fileport_makefd_internal + * fileport_makefd * * Description: Obtain the file descriptor for a given Mach send right. * @@ -5701,7 +5352,7 @@ fileport_releasefg(struct fileglob *fg) * *retval (modified) The new descriptor */ int -fileport_makefd_internal(proc_t p, ipc_port_t port, int uf_flags, int *retval) +fileport_makefd(proc_t p, ipc_port_t port, int uf_flags, int *retval) { struct fileglob *fg; struct fileproc *fp = FILEPROC_NULL; @@ -5720,20 +5371,19 @@ fileport_makefd_internal(proc_t p, ipc_port_t port, int uf_flags, int *retval) goto out; } - fp->f_fglob = fg; - fg_ref(fp); - proc_fdlock(p); err = fdalloc(p, 0, &fd); if (err != 0) { proc_fdunlock(p); - fg_drop(fp); goto out; } if (uf_flags) { *fdflags(p, fd) |= uf_flags; } + fp->fp_glob = fg; + fg_ref(fg); + procfdtbl_releasefd(p, fd, fp); proc_fdunlock(p); @@ -5748,7 +5398,7 @@ out: } /* - * fileport_makefd + * sys_fileport_makefd * * Description: Obtain the file descriptor for a given Mach send right. * @@ -5764,7 +5414,7 @@ out: * *retval (modified) The new descriptor */ int -fileport_makefd(proc_t p, struct fileport_makefd_args *uap, int32_t *retval) +sys_fileport_makefd(proc_t p, struct fileport_makefd_args *uap, int32_t *retval) { ipc_port_t port = IPC_PORT_NULL; mach_port_name_t send = uap->port; @@ -5775,7 +5425,7 @@ fileport_makefd(proc_t p, struct fileport_makefd_args *uap, int32_t *retval) send, MACH_MSG_TYPE_COPY_SEND, &port, 0, NULL, IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND); if (res == KERN_SUCCESS) { - err = fileport_makefd_internal(p, port, UF_EXCLOSE, retval); + err = fileport_makefd(p, port, UF_EXCLOSE, retval); } else { err = EINVAL; } @@ -5835,7 +5485,7 @@ dupfdopen(struct filedesc *fdp, int indx, int dfd, int flags, int error) return EBADF; } #if CONFIG_MACF - myerror = mac_file_check_dup(proc_ucred(p), wfp->f_fglob, dfd); + myerror = mac_file_check_dup(proc_ucred(p), wfp->fp_glob, dfd); if (myerror) { proc_fdunlock(p); return myerror; @@ -5871,12 +5521,12 @@ dupfdopen(struct filedesc *fdp, int indx, int dfd, int flags, int error) if (indx > fdp->fd_lastfile) { fdp->fd_lastfile = indx; } - (void)fg_ref(wfp); - if (fp->f_fglob) { - fg_free(fp->f_fglob); + if (fp->fp_glob) { + fg_free(fp->fp_glob); } - fp->f_fglob = wfp->f_fglob; + fg_ref(wfp->fp_glob); + fp->fp_glob = wfp->fp_glob; fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd] | (flags & O_CLOEXEC) ? UF_EXCLOSE : 0; @@ -5892,199 +5542,6 @@ dupfdopen(struct filedesc *fdp, int indx, int dfd, int flags, int error) } -/* - * fg_ref - * - * Description: Add a reference to a fileglob by fileproc - * - * Parameters: fp fileproc containing fileglob - * pointer - * - * Returns: void - * - * Notes: XXX Should use OSAddAtomic? - */ -void -fg_ref(struct fileproc * fp) -{ - struct fileglob *fg; - - fg = fp->f_fglob; - - lck_mtx_lock_spin(&fg->fg_lock); - -#if DIAGNOSTIC - if ((fp->f_flags & ~((unsigned int)FP_VALID_FLAGS)) != 0) { - panic("fg_ref: invalid bits on fp %p", fp); - } - - if (fg->fg_count == 0) { - panic("fg_ref: adding fgcount to zeroed fg: fp %p fg %p", - fp, fg); - } -#endif - fg->fg_count++; - lck_mtx_unlock(&fg->fg_lock); -} - - -/* - * fg_drop - * - * Description: Remove a reference to a fileglob by fileproc - * - * Parameters: fp fileproc containing fileglob - * pointer - * - * Returns: void - * - * Notes: XXX Should use OSAddAtomic? - */ -void -fg_drop(struct fileproc * fp) -{ - struct fileglob *fg; - - fg = fp->f_fglob; - lck_mtx_lock_spin(&fg->fg_lock); - fg->fg_count--; - lck_mtx_unlock(&fg->fg_lock); -} - -#if SOCKETS -/* - * fg_insertuipc_mark - * - * Description: Mark fileglob for insertion onto message queue if needed - * Also takes fileglob reference - * - * Parameters: fg Fileglob pointer to insert - * - * Returns: true, if the fileglob needs to be inserted onto msg queue - * - * Locks: Takes and drops fg_lock, potentially many times - */ -boolean_t -fg_insertuipc_mark(struct fileglob * fg) -{ - boolean_t insert = FALSE; - - lck_mtx_lock_spin(&fg->fg_lock); - while (fg->fg_lflags & FG_RMMSGQ) { - lck_mtx_convert_spin(&fg->fg_lock); - - fg->fg_lflags |= FG_WRMMSGQ; - msleep(&fg->fg_lflags, &fg->fg_lock, 0, "fg_insertuipc", NULL); - } - - fg->fg_count++; - fg->fg_msgcount++; - if (fg->fg_msgcount == 1) { - fg->fg_lflags |= FG_INSMSGQ; - insert = TRUE; - } - lck_mtx_unlock(&fg->fg_lock); - return insert; -} - -/* - * fg_insertuipc - * - * Description: Insert marked fileglob onto message queue - * - * Parameters: fg Fileglob pointer to insert - * - * Returns: void - * - * Locks: Takes and drops fg_lock & uipc_lock - * DO NOT call this function with proc_fdlock held as unp_gc() - * can potentially try to acquire proc_fdlock, which can result - * in a deadlock if this function is in unp_gc_wait(). - */ -void -fg_insertuipc(struct fileglob * fg) -{ - if (fg->fg_lflags & FG_INSMSGQ) { - lck_mtx_lock_spin(uipc_lock); - unp_gc_wait(); - LIST_INSERT_HEAD(&fmsghead, fg, f_msglist); - lck_mtx_unlock(uipc_lock); - lck_mtx_lock(&fg->fg_lock); - fg->fg_lflags &= ~FG_INSMSGQ; - if (fg->fg_lflags & FG_WINSMSGQ) { - fg->fg_lflags &= ~FG_WINSMSGQ; - wakeup(&fg->fg_lflags); - } - lck_mtx_unlock(&fg->fg_lock); - } -} - -/* - * fg_removeuipc_mark - * - * Description: Mark the fileglob for removal from message queue if needed - * Also releases fileglob message queue reference - * - * Parameters: fg Fileglob pointer to remove - * - * Returns: true, if the fileglob needs to be removed from msg queue - * - * Locks: Takes and drops fg_lock, potentially many times - */ -boolean_t -fg_removeuipc_mark(struct fileglob * fg) -{ - boolean_t remove = FALSE; - - lck_mtx_lock_spin(&fg->fg_lock); - while (fg->fg_lflags & FG_INSMSGQ) { - lck_mtx_convert_spin(&fg->fg_lock); - - fg->fg_lflags |= FG_WINSMSGQ; - msleep(&fg->fg_lflags, &fg->fg_lock, 0, "fg_removeuipc", NULL); - } - fg->fg_msgcount--; - if (fg->fg_msgcount == 0) { - fg->fg_lflags |= FG_RMMSGQ; - remove = TRUE; - } - lck_mtx_unlock(&fg->fg_lock); - return remove; -} - -/* - * fg_removeuipc - * - * Description: Remove marked fileglob from message queue - * - * Parameters: fg Fileglob pointer to remove - * - * Returns: void - * - * Locks: Takes and drops fg_lock & uipc_lock - * DO NOT call this function with proc_fdlock held as unp_gc() - * can potentially try to acquire proc_fdlock, which can result - * in a deadlock if this function is in unp_gc_wait(). - */ -void -fg_removeuipc(struct fileglob * fg) -{ - if (fg->fg_lflags & FG_RMMSGQ) { - lck_mtx_lock_spin(uipc_lock); - unp_gc_wait(); - LIST_REMOVE(fg, f_msglist); - lck_mtx_unlock(uipc_lock); - lck_mtx_lock(&fg->fg_lock); - fg->fg_lflags &= ~FG_RMMSGQ; - if (fg->fg_lflags & FG_WRMMSGQ) { - fg->fg_lflags &= ~FG_WRMMSGQ; - wakeup(&fg->fg_lflags); - } - lck_mtx_unlock(&fg->fg_lock); - } -} -#endif /* SOCKETS */ - /* * fo_read * @@ -6284,41 +5741,12 @@ fo_no_kqfilter(struct fileproc *fp, struct knote *kn, struct kevent_qos_s *kev) } -/* - * The ability to send a file descriptor to another - * process is opt-in by file type. - */ -boolean_t -file_issendable(proc_t p, struct fileproc *fp) -{ - proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED); - - switch (fp->f_type) { - case DTYPE_VNODE: - case DTYPE_SOCKET: - case DTYPE_PIPE: - case DTYPE_PSXSHM: - case DTYPE_NETPOLICY: - return 0 == (fp->f_fglob->fg_lflags & FG_CONFINED); - default: - /* DTYPE_KQUEUE, DTYPE_FSEVENTS, DTYPE_PSXSEM */ - return FALSE; - } -} - -os_refgrp_decl(, f_iocount_refgrp, "f_iocount", NULL); - struct fileproc * fileproc_alloc_init(__unused void *arg) { - struct fileproc *fp; - - MALLOC_ZONE(fp, struct fileproc *, sizeof(*fp), M_FILEPROC, M_WAITOK); - if (fp) { - bzero(fp, sizeof(*fp)); - os_ref_init(&fp->f_iocount, &f_iocount_refgrp); - } + struct fileproc *fp = zalloc_flags(fp_zone, Z_WAITOK | Z_ZERO); + os_ref_init(&fp->fp_iocount, &f_refgrp); return fp; } @@ -6326,7 +5754,7 @@ fileproc_alloc_init(__unused void *arg) void fileproc_free(struct fileproc *fp) { - os_ref_count_t __unused refc = os_ref_release(&fp->f_iocount); + os_ref_count_t __unused refc = os_ref_release(&fp->fp_iocount); #if DEVELOPMENT || DEBUG if (0 != refc) { panic("%s: pid %d refc: %u != 0", @@ -6335,13 +5763,13 @@ fileproc_free(struct fileproc *fp) #endif switch (FILEPROC_TYPE(fp)) { case FTYPE_SIMPLE: - FREE_ZONE(fp, sizeof(*fp), M_FILEPROC); + zfree(fp_zone, fp); break; case FTYPE_GUARDED: guarded_fileproc_free(fp); break; default: - panic("%s: corrupt fp %p flags %x", __func__, fp, fp->f_flags); + panic("%s: corrupt fp %p flags %x", __func__, fp, fp->fp_flags); } } @@ -6349,14 +5777,14 @@ void fileproc_modify_vflags(struct fileproc *fp, fileproc_vflags_t vflags, boolean_t clearflags) { if (clearflags) { - os_atomic_andnot(&fp->f_vflags, vflags, relaxed); + os_atomic_andnot(&fp->fp_vflags, vflags, relaxed); } else { - os_atomic_or(&fp->f_vflags, vflags, relaxed); + os_atomic_or(&fp->fp_vflags, vflags, relaxed); } } fileproc_vflags_t fileproc_get_vflags(struct fileproc *fp) { - return os_atomic_load(&fp->f_vflags, relaxed); + return os_atomic_load(&fp->fp_vflags, relaxed); } diff --git a/bsd/kern/kern_event.c b/bsd/kern/kern_event.c index 8f4d2207b..0593bcb08 100644 --- a/bsd/kern/kern_event.c +++ b/bsd/kern/kern_event.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -117,7 +117,18 @@ #include #endif +#if DEVELOPMENT || DEBUG +#define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK (1U << 0) +#define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS (1U << 1) +TUNABLE(uint32_t, kevent_debug_flags, "kevent_debug", 0); +#endif + +static LCK_GRP_DECLARE(kq_lck_grp, "kqueue"); +SECURITY_READ_ONLY_EARLY(vm_packing_params_t) kn_kq_packing_params = + VM_PACKING_PARAMS(KNOTE_KQ_PACKED); + extern mach_port_name_t ipc_entry_name_mask(mach_port_name_t name); /* osfmk/ipc/ipc_entry.h */ +extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int); /* bsd/kern/kern_sig.c */ #define KEV_EVTID(code) BSDDBG_CODE(DBG_BSD_KEVENT, (code)) @@ -223,32 +234,14 @@ static void knote_drop(kqueue_t kqu, struct knote *kn, struct knote_lock_ctx *kn static void knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result); static void knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp); -static zone_t knote_zone; -static zone_t kqfile_zone; -static zone_t kqworkq_zone; -static zone_t kqworkloop_zone; -#if DEVELOPMENT || DEBUG -#define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK (1U << 0) -#define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS (1U << 1) -#define KEVENT_PANIC_BOOT_ARG_INITIALIZED (1U << 31) - -#define KEVENT_PANIC_DEFAULT_VALUE (0) -static uint32_t -kevent_debug_flags(void) -{ - static uint32_t flags = KEVENT_PANIC_DEFAULT_VALUE; - - if ((flags & KEVENT_PANIC_BOOT_ARG_INITIALIZED) == 0) { - uint32_t value = 0; - if (!PE_parse_boot_argn("kevent_debug", &value, sizeof(value))) { - value = KEVENT_PANIC_DEFAULT_VALUE; - } - value |= KEVENT_PANIC_BOOT_ARG_INITIALIZED; - os_atomic_store(&flags, value, relaxed); - } - return flags; -} -#endif +static ZONE_DECLARE(knote_zone, "knote zone", + sizeof(struct knote), ZC_CACHING | ZC_ZFREE_CLEARMEM); +static ZONE_DECLARE(kqfile_zone, "kqueue file zone", + sizeof(struct kqfile), ZC_ZFREE_CLEARMEM); +static ZONE_DECLARE(kqworkq_zone, "kqueue workq zone", + sizeof(struct kqworkq), ZC_ZFREE_CLEARMEM); +static ZONE_DECLARE(kqworkloop_zone, "kqueue workloop zone", + sizeof(struct kqworkloop), ZC_CACHING | ZC_ZFREE_CLEARMEM); #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) @@ -433,9 +426,6 @@ kqr_kqueue(proc_t p, workq_threadreq_t kqr) * by calling the filter to get a [consistent] snapshot of that * data. */ -static lck_grp_attr_t *kq_lck_grp_attr; -static lck_grp_t *kq_lck_grp; -static lck_attr_t *kq_lck_attr; static inline void kqlock(kqueue_t kqu) @@ -930,10 +920,10 @@ SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = { #pragma mark kqread_filtops -#define f_flag f_fglob->fg_flag -#define f_ops f_fglob->fg_ops -#define f_data f_fglob->fg_data -#define f_lflags f_fglob->fg_lflags +#define f_flag fp_glob->fg_flag +#define f_ops fp_glob->fg_ops +#define f_data fp_glob->fg_data +#define f_lflags fp_glob->fg_lflags static void filt_kqdetach(struct knote *kn) @@ -1006,7 +996,7 @@ filt_procattach(struct knote *kn, __unused struct kevent_qos_s *kev) return 0; } - p = proc_find(kn->kn_id); + p = proc_find((int)kn->kn_id); if (p == NULL) { knote_set_error(kn, ESRCH); return 0; @@ -1025,6 +1015,9 @@ filt_procattach(struct knote *kn, __unused struct kevent_qos_s *kev) (p->p_oppid == selfpid)) { break; /* parent-in-waiting => ok */ } + if (cansignal(current_proc(), kauth_cred_get(), p, SIGKILL)) { + break; /* allowed to signal => ok */ + } proc_rele(p); knote_set_error(kn, EACCES); return 0; @@ -1241,7 +1234,7 @@ struct filt_timer_params { * kn->kn_ext[1] leeway value * kn->kn_sdata interval timer: the interval * absolute/deadline timer: 0 - * kn->kn_hook32 timer state + * kn->kn_hook32 timer state (with gencount) * * TIMER_IDLE: * The timer has either never been scheduled or been cancelled. @@ -1262,6 +1255,8 @@ struct filt_timer_params { #define TIMER_ARMED 0x1 #define TIMER_FIRED 0x2 #define TIMER_IMMEDIATE 0x3 +#define TIMER_STATE_MASK 0x3 +#define TIMER_GEN_INC 0x4 static void filt_timer_set_params(struct knote *kn, struct filt_timer_params *params) @@ -1476,13 +1471,14 @@ filt_timervalidate(const struct kevent_qos_s *kev, * filt_timerexpire - the timer callout routine */ static void -filt_timerexpire(void *knx, __unused void *spare) +filt_timerexpire(void *knx, void *state_on_arm) { struct knote *kn = knx; - int v; - if (os_atomic_cmpxchgv(&kn->kn_hook32, TIMER_ARMED, TIMER_FIRED, - &v, relaxed)) { + uint32_t state = (uint32_t)(uintptr_t)state_on_arm; + uint32_t fired_state = state ^ TIMER_ARMED ^ TIMER_FIRED; + + if (os_atomic_cmpxchg(&kn->kn_hook32, state, fired_state, relaxed)) { // our f_event always would say FILTER_ACTIVE, // so be leaner and just do it. struct kqueue *kq = knote_get_kq(kn); @@ -1491,22 +1487,9 @@ filt_timerexpire(void *knx, __unused void *spare) kqunlock(kq); } else { /* - * From TIMER_ARMED, the only allowed transition are: - * - to TIMER_FIRED through the timer callout just above - * - to TIMER_IDLE due to filt_timercancel() which will wait for the - * timer callout (and any possible invocation of filt_timerexpire) to - * have finished before the state is changed again. + * The timer has been reprogrammed or canceled since it was armed, + * and this is a late firing for the timer, just ignore it. */ - assert(v == TIMER_IDLE); - } -} - -static void -filt_timercancel(struct knote *kn) -{ - if (os_atomic_xchg(&kn->kn_hook32, TIMER_IDLE, relaxed) == TIMER_ARMED) { - /* cancel the thread call and wait for any filt_timerexpire in flight */ - thread_call_cancel_wait(kn->kn_thcall); } } @@ -1541,12 +1524,11 @@ filt_timerarm(struct knote *kn) { uint64_t deadline = kn->kn_ext[0]; uint64_t leeway = kn->kn_ext[1]; + uint32_t state; int filter_flags = kn->kn_sfflags; unsigned int timer_flags = 0; - assert(os_atomic_load(&kn->kn_hook32, relaxed) == TIMER_IDLE); - if (filter_flags & NOTE_CRITICAL) { timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL; } else if (filter_flags & NOTE_BACKGROUND) { @@ -1563,9 +1545,56 @@ filt_timerarm(struct knote *kn) timer_flags |= THREAD_CALL_CONTINUOUS; } - os_atomic_store(&kn->kn_hook32, TIMER_ARMED, relaxed); - thread_call_enter_delayed_with_leeway(kn->kn_thcall, NULL, - deadline, leeway, timer_flags); + /* + * Move to ARMED. + * + * We increase the gencount, and setup the thread call with this expected + * state. It means that if there was a previous generation of the timer in + * flight that needs to be ignored, then 3 things are possible: + * + * - the timer fires first, filt_timerexpire() and sets the state to FIRED + * but we clobber it with ARMED and a new gencount. The knote will still + * be activated, but filt_timerprocess() which is serialized with this + * call will not see the FIRED bit set and will not deliver an event. + * + * - this code runs first, but filt_timerexpire() comes second. Because it + * knows an old gencount, it will debounce and not activate the knote. + * + * - filt_timerexpire() wasn't in flight yet, and thread_call_enter below + * will just cancel it properly. + * + * This is important as userspace expects to never be woken up for past + * timers after filt_timertouch ran. + */ + state = os_atomic_load(&kn->kn_hook32, relaxed); + state &= ~TIMER_STATE_MASK; + state += TIMER_GEN_INC + TIMER_ARMED; + os_atomic_store(&kn->kn_hook32, state, relaxed); + + thread_call_enter_delayed_with_leeway(kn->kn_thcall, + (void *)(uintptr_t)state, deadline, leeway, timer_flags); +} + +/* + * Mark a timer as "already fired" when it is being reprogrammed + * + * If there is a timer in flight, this will do a best effort at canceling it, + * but will not wait. If the thread call was in flight, having set the + * TIMER_IMMEDIATE bit will debounce a filt_timerexpire() racing with this + * cancelation. + */ +static void +filt_timerfire_immediate(struct knote *kn) +{ + uint32_t state; + + static_assert(TIMER_IMMEDIATE == TIMER_STATE_MASK, + "validate that this atomic or will transition to IMMEDIATE"); + state = os_atomic_or_orig(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed); + + if ((state & TIMER_STATE_MASK) == TIMER_ARMED) { + thread_call_cancel(kn->kn_thcall); + } } /* @@ -1655,12 +1684,11 @@ filt_timertouch(struct knote *kn, struct kevent_qos_s *kev) } /* capture the new values used to compute deadline */ - filt_timercancel(kn); filt_timer_set_params(kn, ¶ms); kn->kn_sfflags = kev->fflags; if (filt_timer_is_ready(kn)) { - os_atomic_store(&kn->kn_hook32, TIMER_IMMEDIATE, relaxed); + filt_timerfire_immediate(kn); return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS; } else { filt_timerarm(kn); @@ -1678,6 +1706,8 @@ filt_timertouch(struct knote *kn, struct kevent_qos_s *kev) static int filt_timerprocess(struct knote *kn, struct kevent_qos_s *kev) { + uint32_t state = os_atomic_load(&kn->kn_hook32, relaxed); + /* * filt_timerprocess is serialized with any filter routine except for * filt_timerexpire which atomically does a TIMER_ARMED -> TIMER_FIRED @@ -1687,7 +1717,7 @@ filt_timerprocess(struct knote *kn, struct kevent_qos_s *kev) * whether we see any of the "FIRED" state, and if we do, it is safe to * do simple state machine transitions. */ - switch (os_atomic_load(&kn->kn_hook32, relaxed)) { + switch (state & TIMER_STATE_MASK) { case TIMER_IDLE: case TIMER_ARMED: /* @@ -1697,7 +1727,7 @@ filt_timerprocess(struct knote *kn, struct kevent_qos_s *kev) return 0; } - os_atomic_store(&kn->kn_hook32, TIMER_IDLE, relaxed); + os_atomic_store(&kn->kn_hook32, state & ~TIMER_STATE_MASK, relaxed); /* * Copy out the interesting kevent state, @@ -1972,7 +2002,7 @@ again: goto again; } } - /* FALLTHROUGH */ + OS_FALLTHROUGH; default: goto out; } @@ -2197,7 +2227,7 @@ static int filt_wlupdate_sync_ipc(struct kqworkloop *kqwl, struct knote *kn, struct kevent_qos_s *kev, int op) { - uint64_t uaddr = kev->ext[EV_EXTIDX_WL_ADDR]; + user_addr_t uaddr = (user_addr_t) kev->ext[EV_EXTIDX_WL_ADDR]; uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE]; uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK]; uint64_t udata = 0; @@ -2243,7 +2273,7 @@ again: goto again; } } - /* FALLTHROUGH */ + OS_FALLTHROUGH; default: goto out; } @@ -2462,13 +2492,16 @@ void kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread, event64_t event, thread_waitinfo_t *waitinfo) { + extern zone_t thread_zone; struct knote *kn = (struct knote *)event; - assert(kdp_is_in_zone(kn, "knote zone")); + + zone_require(knote_zone, kn); assert(kn->kn_thread == thread); struct kqueue *kq = knote_get_kq(kn); - assert(kdp_is_in_zone(kq, "kqueue workloop zone")); + + zone_require(kqworkloop_zone, kq); assert(kq->kq_state & KQ_WORKLOOP); struct kqworkloop *kqwl = (struct kqworkloop *)kq; @@ -2477,13 +2510,12 @@ kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread, thread_t kqwl_owner = kqwl->kqwl_owner; if (kqwl_owner != THREAD_NULL) { - assert(kdp_is_in_zone(kqwl_owner, "threads")); - + zone_require(thread_zone, kqwl_owner); waitinfo->owner = thread_tid(kqwl->kqwl_owner); } else if (kqr_thread_requested_pending(kqr)) { waitinfo->owner = STACKSHOT_WAITOWNER_THREQUESTED; } else if (kqr->tr_state >= WORKQ_TR_STATE_BINDING) { - assert(kdp_is_in_zone(kqr->tr_thread, "threads")); + zone_require(thread_zone, kqr->tr_thread); waitinfo->owner = thread_tid(kqr->tr_thread); } else { waitinfo->owner = 0; @@ -2663,7 +2695,7 @@ filt_wlprocess(struct knote *kn, struct kevent_qos_s *kev) knote_activate(kqwl, kn, FILTER_ACTIVE); } else { #if DEBUG || DEVELOPMENT - if (kevent_debug_flags() & KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS) { + if (kevent_debug_flags & KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS) { /* * see src/queue_internal.h in libdispatch */ @@ -2841,7 +2873,7 @@ kqueue_destroy(kqueue_t kqu, zone_t zone) * kq_lock. */ waitq_set_deinit(&kqu.kq->kq_wqs); - lck_spin_destroy(&kqu.kq->kq_lock, kq_lck_grp); + lck_spin_destroy(&kqu.kq->kq_lock, &kq_lck_grp); zfree(zone, kqu.kq); } @@ -2856,7 +2888,7 @@ static kqueue_t kqueue_init(kqueue_t kqu, waitq_set_prepost_hook_t *hook, int policy) { waitq_set_init(&kqu.kq->kq_wqs, policy, NULL, hook); - lck_spin_init(&kqu.kq->kq_lock, kq_lck_grp, kq_lck_attr); + lck_spin_init(&kqu.kq->kq_lock, &kq_lck_grp, LCK_ATTR_NULL); return kqu; } @@ -2947,17 +2979,12 @@ kqueue_alloc(struct proc *p) { struct kqfile *kqf; - kqf = (struct kqfile *)zalloc(kqfile_zone); - if (__improbable(kqf == NULL)) { - return NULL; - } - bzero(kqf, sizeof(struct kqfile)); - /* * kqfiles are created with kqueue() so we need to wait for * the first kevent syscall to know which bit among * KQ_KEV_{32,64,QOS} will be set in kqf_state */ + kqf = zalloc_flags(kqfile_zone, Z_WAITOK | Z_ZERO); kqf->kqf_p = p; TAILQ_INIT_AFTER_BZERO(&kqf->kqf_queue); TAILQ_INIT_AFTER_BZERO(&kqf->kqf_suppressed); @@ -3051,11 +3078,7 @@ kqworkq_alloc(struct proc *p, unsigned int flags) { struct kqworkq *kqwq, *tmp; - kqwq = (struct kqworkq *)zalloc(kqworkq_zone); - if (__improbable(kqwq == NULL)) { - return NULL; - } - bzero(kqwq, sizeof(struct kqworkq)); + kqwq = zalloc_flags(kqworkq_zone, Z_WAITOK | Z_ZERO); assert((flags & KEVENT_FLAG_LEGACY32) == 0); if (flags & KEVENT_FLAG_LEGACY64) { @@ -3086,7 +3109,7 @@ kqworkq_alloc(struct proc *p, unsigned int flags) if (i != KQWQ_QOS_MANAGER) { kqwq->kqwq_request[i].tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT; } - kqwq->kqwq_request[i].tr_kq_qos_index = i; + kqwq->kqwq_request[i].tr_kq_qos_index = (kq_index_t)i; } kqueue_init(kqwq, &kqwq->kqwq_waitq_hook, SYNC_POLICY_FIFO); @@ -3175,7 +3198,7 @@ kqworkloop_hash_init(struct filedesc *fdp) fdp->fd_kqhashmask = alloc_mask; } else { kqhash_unlock(fdp); - FREE(alloc_hash, M_KQUEUE); + hashdestroy(alloc_hash, M_KQUEUE, alloc_mask); kqhash_lock(fdp); } } @@ -3244,7 +3267,7 @@ kqworkloop_dealloc(struct kqworkloop *kqwl, kqworkloop_dealloc_flags_t flags, assert(kqwl->kqwl_owner == THREAD_NULL); assert(kqwl->kqwl_turnstile == TURNSTILE_NULL); - lck_spin_destroy(&kqwl->kqwl_statelock, kq_lck_grp); + lck_spin_destroy(&kqwl->kqwl_statelock, &kq_lck_grp); kqueue_destroy(kqwl, kqworkloop_zone); } @@ -3258,8 +3281,6 @@ static void kqworkloop_init(struct kqworkloop *kqwl, proc_t p, kqueue_id_t id, workq_threadreq_param_t *trp) { - bzero(kqwl, sizeof(struct kqworkloop)); - kqwl->kqwl_state = KQ_WORKLOOP | KQ_DYNAMIC | KQ_KEV_QOS; kqwl->kqwl_retains = 1; /* donate a retain to creator */ kqwl->kqwl_dynamicid = id; @@ -3285,7 +3306,7 @@ kqworkloop_init(struct kqworkloop *kqwl, proc_t p, } TAILQ_INIT_AFTER_BZERO(&kqwl->kqwl_suppressed); - lck_spin_init(&kqwl->kqwl_statelock, kq_lck_grp, kq_lck_attr); + lck_spin_init(&kqwl->kqwl_statelock, &kq_lck_grp, LCK_ATTR_NULL); kqueue_init(kqwl, &kqwl->kqwl_waitq_hook, SYNC_POLICY_FIFO); } @@ -3361,7 +3382,7 @@ kqworkloop_get_or_create(struct proc *p, kqueue_id_t id, * then try to allocate one without blocking. */ if (__probable(alloc_kqwl == NULL)) { - alloc_kqwl = (struct kqworkloop *)zalloc_noblock(kqworkloop_zone); + alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_NOWAIT | Z_ZERO); } if (__probable(alloc_kqwl)) { kqworkloop_init(alloc_kqwl, p, id, trp); @@ -3378,10 +3399,7 @@ kqworkloop_get_or_create(struct proc *p, kqueue_id_t id, */ kqhash_unlock(fdp); - alloc_kqwl = (struct kqworkloop *)zalloc(kqworkloop_zone); - if (__improbable(!alloc_kqwl)) { - return ENOMEM; - } + alloc_kqwl = zalloc_flags(kqworkloop_zone, Z_WAITOK | Z_ZERO); } kqhash_unlock(fdp); @@ -3441,6 +3459,7 @@ knotes_dealloc(proc_t p) struct kqueue *kq; struct knote *kn; struct klist *kn_hash = NULL; + u_long kn_hashmask; int i; /* Close all the fd-indexed knotes up front */ @@ -3475,15 +3494,15 @@ knotes_dealloc(proc_t p) } } kn_hash = fdp->fd_knhash; + kn_hashmask = fdp->fd_knhashmask; fdp->fd_knhashmask = 0; fdp->fd_knhash = NULL; } knhash_unlock(fdp); - /* free the kn_hash table */ if (kn_hash) { - FREE(kn_hash, M_KQUEUE); + hashdestroy(kn_hash, M_KQUEUE, kn_hashmask); } proc_fdlock(p); @@ -3617,7 +3636,7 @@ kevent_register_wait_block(struct turnstile *ts, thread_t thread, turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD); kqunlock(cont_args->kqwl); cont_args->handoff_thread = thread; - thread_handoff_parameter(thread, cont, cont_args); + thread_handoff_parameter(thread, cont, cont_args, THREAD_HANDOFF_NONE); } /* @@ -3746,7 +3765,7 @@ restart: /* grab a file reference for the new knote */ if (fops->f_isfd) { - if ((error = fp_lookup(p, kev->ident, &knote_fp, 0)) != 0) { + if ((error = fp_lookup(p, (int)kev->ident, &knote_fp, 0)) != 0) { goto out; } } @@ -3755,14 +3774,14 @@ restart: if (kn == NULL) { error = ENOMEM; if (knote_fp != NULL) { - fp_drop(p, kev->ident, knote_fp, 0); + fp_drop(p, (int)kev->ident, knote_fp, 0); } goto out; } kn->kn_fp = knote_fp; kn->kn_is_fd = fops->f_isfd; - kn->kn_kq_packed = (intptr_t)(struct kqueue *)kq; + kn->kn_kq_packed = VM_PACK_POINTER((vm_offset_t)kq, KNOTE_KQ_PACKED); kn->kn_status = 0; /* was vanish support requested */ @@ -3771,7 +3790,7 @@ restart: kn->kn_status |= KN_REQVANISH; } - /* snapshot matching/dispatching protcol flags into knote */ + /* snapshot matching/dispatching protocol flags into knote */ if (kev->flags & EV_DISABLE) { kn->kn_status |= KN_DISABLED; } @@ -3797,7 +3816,7 @@ restart: if (error) { knote_free(kn); if (knote_fp != NULL) { - fp_drop(p, kev->ident, knote_fp, 0); + fp_drop(p, (int)kev->ident, knote_fp, 0); } if (error == ERESTART) { @@ -3829,7 +3848,7 @@ restart: * Failed to attach correctly, so drop. */ kn->kn_filtid = EVFILTID_DETACHED; - error = kn->kn_sdata; + error = (int)kn->kn_sdata; knote_drop(kq, kn, &knlc); result = 0; goto out; @@ -4038,7 +4057,7 @@ knote_process(struct knote *kn, kevent_ctx_t kectx, knote_suppress(kq, kn); if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) { - int kev_flags = EV_DISPATCH2 | EV_ONESHOT; + uint16_t kev_flags = EV_DISPATCH2 | EV_ONESHOT; if (kn->kn_status & KN_DEFERDELETE) { kev_flags |= EV_DELETE; } else { @@ -4623,11 +4642,11 @@ kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options, if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) { trp.trp_flags |= TRP_PRIORITY; - trp.trp_pri = params->kqwlp_sched_pri; + trp.trp_pri = (uint8_t)params->kqwlp_sched_pri; } if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) { trp.trp_flags |= TRP_POLICY; - trp.trp_pol = params->kqwlp_sched_pol; + trp.trp_pol = (uint8_t)params->kqwlp_sched_pol; } if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) { trp.trp_flags |= TRP_CPUPERCENT; @@ -4915,7 +4934,7 @@ kqueue_kqfilter(struct fileproc *fp, struct knote *kn, static int kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx) { - struct kqfile *kqf = (struct kqfile *)fp->f_fglob->fg_data; + struct kqfile *kqf = (struct kqfile *)fp->fp_glob->fg_data; assert((kqf->kqf_state & KQ_WORKQ) == 0); @@ -5354,7 +5373,7 @@ kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos) case KQWL_UTQ_PARKING: case KQWL_UTQ_UNBINDING: kqr->tr_kq_override_index = qos; - /* FALLTHROUGH */ + OS_FALLTHROUGH; case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS: if (op == KQWL_UTQ_RECOMPUTE_WAKEUP_QOS) { assert(qos == THREAD_QOS_UNSPECIFIED); @@ -5404,7 +5423,7 @@ recompute: * suppressed knote pushing on the kqueue. */ if (kqwl->kqwl_wakeup_indexes > (1 << qos)) { - qos = fls(kqwl->kqwl_wakeup_indexes) - 1; /* fls is 1-based */ + qos = (uint8_t)(fls(kqwl->kqwl_wakeup_indexes) - 1); /* fls is 1-based */ } if (kqr->tr_kq_override_index < qos) { kqr->tr_kq_override_index = qos; @@ -5854,7 +5873,7 @@ knote_reset_priority(kqueue_t kqu, struct knote *kn, pthread_priority_t pp) qos = THREAD_QOS_UNSPECIFIED; } - kn->kn_qos = pp; + kn->kn_qos = (int32_t)pp; if ((kn->kn_status & KN_MERGE_QOS) == 0 || qos > kn->kn_qos_override) { /* Never lower QoS when in "Merge" mode */ @@ -6195,7 +6214,7 @@ restart: kqunlock(kq); knote_fops(kn)->f_detach(kn); if (kn->kn_is_fd) { - fp_drop(p, kn->kn_id, kn->kn_fp, 0); + fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0); } kn->kn_filtid = EVFILTID_DETACHED; kqlock(kq); @@ -6291,6 +6310,7 @@ kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc, struct klist *list = NULL; int ret = 0; bool is_fd = kn->kn_is_fd; + uint64_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE, TRUE); if (is_fd) { proc_fdlock(p); @@ -6329,8 +6349,9 @@ kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc, if ((u_int)fdp->fd_knlistsize <= kn->kn_id) { u_int size = 0; - if (kn->kn_id >= (uint64_t)p->p_rlimit[RLIMIT_NOFILE].rlim_cur - || kn->kn_id >= (uint64_t)maxfiles) { + /* Make sure that fd stays below current process's soft limit AND system allowed per-process limits */ + if (kn->kn_id >= (uint64_t) nofile + || kn->kn_id >= (uint64_t)maxfilesperproc) { ret = EINVAL; goto out_locked; } @@ -6746,7 +6767,7 @@ knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc) /* kq may be freed when kq_remove_knote() returns */ kq_remove_knote(kq, kn, p, knlc); if (kn->kn_is_fd && ((kn->kn_status & KN_VANISHED) == 0)) { - fp_drop(p, kn->kn_id, kn->kn_fp, 0); + fp_drop(p, (int)kn->kn_id, kn->kn_fp, 0); } knote_free(kn); @@ -6755,31 +6776,9 @@ knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc) void knote_init(void) { - knote_zone = zinit(sizeof(struct knote), 8192 * sizeof(struct knote), - 8192, "knote zone"); - zone_change(knote_zone, Z_CACHING_ENABLED, TRUE); - - kqfile_zone = zinit(sizeof(struct kqfile), 8192 * sizeof(struct kqfile), - 8192, "kqueue file zone"); - - kqworkq_zone = zinit(sizeof(struct kqworkq), 8192 * sizeof(struct kqworkq), - 8192, "kqueue workq zone"); - - kqworkloop_zone = zinit(sizeof(struct kqworkloop), 8192 * sizeof(struct kqworkloop), - 8192, "kqueue workloop zone"); - zone_change(kqworkloop_zone, Z_CACHING_ENABLED, TRUE); - - /* allocate kq lock group attribute and group */ - kq_lck_grp_attr = lck_grp_attr_alloc_init(); - - kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr); - - /* Allocate kq lock attribute */ - kq_lck_attr = lck_attr_alloc_init(); - #if CONFIG_MEMORYSTATUS /* Initialize the memorystatus list lock */ - memorystatus_kevent_init(kq_lck_grp, kq_lck_attr); + memorystatus_kevent_init(&kq_lck_grp, LCK_ATTR_NULL); #endif } SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); @@ -6793,9 +6792,7 @@ knote_fops(struct knote *kn) static struct knote * knote_alloc(void) { - struct knote *kn = ((struct knote *)zalloc(knote_zone)); - bzero(kn, sizeof(struct knote)); - return kn; + return zalloc_flags(knote_zone, Z_WAITOK | Z_ZERO); } static void @@ -6841,15 +6838,16 @@ kevent_adjust_flags_for_proc(proc_t p, int flags) OS_NOINLINE static int kevent_get_kqfile(struct proc *p, int fd, int flags, - struct fileproc **fp, struct kqueue **kqp) + struct fileproc **fpp, struct kqueue **kqp) { int error = 0; struct kqueue *kq; - error = fp_getfkq(p, fd, fp, &kq); + error = fp_get_ftype(p, fd, DTYPE_KQUEUE, EBADF, fpp); if (__improbable(error)) { return error; } + kq = (struct kqueue *)(*fpp)->f_data; uint16_t kq_state = os_atomic_load(&kq->kq_state, relaxed); if (__improbable((kq_state & (KQ_KEV32 | KQ_KEV64 | KQ_KEV_QOS)) == 0)) { @@ -6874,7 +6872,7 @@ kevent_get_kqfile(struct proc *p, int fd, int flags, */ if (__improbable((bool)(flags & KEVENT_FLAG_LEGACY32) != (bool)(kq_state & KQ_KEV32))) { - fp_drop(p, fd, *fp, 0); + fp_drop(p, fd, *fpp, 0); return EINVAL; } @@ -7110,7 +7108,7 @@ kevent_legacy_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp, unsigned in .flags = kevp->flags, .fflags = kevp->fflags, .data = (int64_t) kevp->data, - .udata = kevp->udata, + .udata = (user_addr_t) kevp->udata, }; advance = sizeof(kev64); error = copyout((caddr_t)&kev64, *addrp, advance); @@ -7121,7 +7119,7 @@ kevent_legacy_copyout(struct kevent_qos_s *kevp, user_addr_t *addrp, unsigned in .flags = kevp->flags, .fflags = kevp->fflags, .data = (int32_t)kevp->data, - .udata = kevp->udata, + .udata = (uint32_t)kevp->udata, }; advance = sizeof(kev32); error = copyout((caddr_t)&kev32, *addrp, advance); @@ -7677,7 +7675,7 @@ kevent_internal(kqueue_t kqu, noutputs++; } } else if (kev.flags & EV_ERROR) { - error = kev.data; + error = (int)kev.data; } nchanges--; } @@ -7941,8 +7939,8 @@ kevent_legacy_get_deadline(int flags, user_addr_t utimeout, uint64_t *deadline) if (__improbable(error)) { return error; } - ts.tv_sec = ts64.tv_sec; - ts.tv_nsec = ts64.tv_nsec; + ts.tv_sec = (unsigned long)ts64.tv_sec; + ts.tv_nsec = (long)ts64.tv_nsec; } else { struct user32_timespec ts32; int error = copyin(utimeout, &ts32, sizeof(ts32)); @@ -8070,11 +8068,8 @@ kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval) #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n)) #endif -static lck_grp_attr_t *kev_lck_grp_attr; -static lck_attr_t *kev_lck_attr; -static lck_grp_t *kev_lck_grp; -static decl_lck_rw_data(, kev_lck_data); -static lck_rw_t *kev_rwlock = &kev_lck_data; +static LCK_GRP_DECLARE(kev_lck_grp, "Kernel Event Protocol"); +static LCK_RW_DECLARE(kev_rwlock, &kev_lck_grp); static int kev_attach(struct socket *so, int proto, struct proc *p); static int kev_detach(struct socket *so); @@ -8238,11 +8233,11 @@ event_sofreelastref(struct socket *so) lck_mtx_unlock(&(ev_pcb->evp_mtx)); LCK_MTX_ASSERT(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED); - lck_rw_lock_exclusive(kev_rwlock); + lck_rw_lock_exclusive(&kev_rwlock); LIST_REMOVE(ev_pcb, evp_link); kevtstat.kes_pcbcount--; kevtstat.kes_gencnt++; - lck_rw_done(kev_rwlock); + lck_rw_done(&kev_rwlock); kev_delete(ev_pcb); sofreelastref(so, 1); @@ -8256,9 +8251,8 @@ struct kern_event_head kern_event_head; static u_int32_t static_event_id = 0; -#define EVPCB_ZONE_MAX 65536 -#define EVPCB_ZONE_NAME "kerneventpcb" -static struct zone *ev_pcb_zone; +static ZONE_DECLARE(ev_pcb_zone, "kerneventpcb", + sizeof(struct kern_event_pcb), ZC_ZFREE_CLEARMEM); /* * Install the protosw's for the NKE manager. Invoked at extension load time @@ -8272,43 +8266,9 @@ kern_event_init(struct domain *dp) VERIFY(!(dp->dom_flags & DOM_INITIALIZED)); VERIFY(dp == systemdomain); - kev_lck_grp_attr = lck_grp_attr_alloc_init(); - if (kev_lck_grp_attr == NULL) { - panic("%s: lck_grp_attr_alloc_init failed\n", __func__); - /* NOTREACHED */ - } - - kev_lck_grp = lck_grp_alloc_init("Kernel Event Protocol", - kev_lck_grp_attr); - if (kev_lck_grp == NULL) { - panic("%s: lck_grp_alloc_init failed\n", __func__); - /* NOTREACHED */ - } - - kev_lck_attr = lck_attr_alloc_init(); - if (kev_lck_attr == NULL) { - panic("%s: lck_attr_alloc_init failed\n", __func__); - /* NOTREACHED */ - } - - lck_rw_init(kev_rwlock, kev_lck_grp, kev_lck_attr); - if (kev_rwlock == NULL) { - panic("%s: lck_mtx_alloc_init failed\n", __func__); - /* NOTREACHED */ - } - for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++) { net_add_proto(pr, dp, 1); } - - ev_pcb_zone = zinit(sizeof(struct kern_event_pcb), - EVPCB_ZONE_MAX * sizeof(struct kern_event_pcb), 0, EVPCB_ZONE_NAME); - if (ev_pcb_zone == NULL) { - panic("%s: failed allocating ev_pcb_zone", __func__); - /* NOTREACHED */ - } - zone_change(ev_pcb_zone, Z_EXPAND, TRUE); - zone_change(ev_pcb_zone, Z_CALLERACCT, TRUE); } static int @@ -8322,21 +8282,18 @@ kev_attach(struct socket *so, __unused int proto, __unused struct proc *p) return error; } - if ((ev_pcb = (struct kern_event_pcb *)zalloc(ev_pcb_zone)) == NULL) { - return ENOBUFS; - } - bzero(ev_pcb, sizeof(struct kern_event_pcb)); - lck_mtx_init(&ev_pcb->evp_mtx, kev_lck_grp, kev_lck_attr); + ev_pcb = zalloc_flags(ev_pcb_zone, Z_WAITOK | Z_ZERO); + lck_mtx_init(&ev_pcb->evp_mtx, &kev_lck_grp, LCK_ATTR_NULL); ev_pcb->evp_socket = so; ev_pcb->evp_vendor_code_filter = 0xffffffff; so->so_pcb = (caddr_t) ev_pcb; - lck_rw_lock_exclusive(kev_rwlock); + lck_rw_lock_exclusive(&kev_rwlock); LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link); kevtstat.kes_pcbcount++; kevtstat.kes_gencnt++; - lck_rw_done(kev_rwlock); + lck_rw_done(&kev_rwlock); return error; } @@ -8345,7 +8302,7 @@ static void kev_delete(struct kern_event_pcb *ev_pcb) { VERIFY(ev_pcb != NULL); - lck_mtx_destroy(&ev_pcb->evp_mtx, kev_lck_grp); + lck_mtx_destroy(&ev_pcb->evp_mtx, &kev_lck_grp); zfree(ev_pcb_zone, ev_pcb); } @@ -8454,7 +8411,7 @@ kev_post_msg(struct kev_msg *event_msg) ev->event_code = event_msg->event_code; m->m_len = total_size; - lck_rw_lock_shared(kev_rwlock); + lck_rw_lock_shared(&kev_rwlock); for (ev_pcb = LIST_FIRST(&kern_event_head); ev_pcb; ev_pcb = LIST_NEXT(ev_pcb, evp_link)) { @@ -8490,7 +8447,7 @@ kev_post_msg(struct kev_msg *event_msg) os_atomic_inc(&kevtstat.kes_nomem, relaxed); m_free(m); lck_mtx_unlock(&ev_pcb->evp_mtx); - lck_rw_done(kev_rwlock); + lck_rw_done(&kev_rwlock); return ENOMEM; } if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) { @@ -8509,7 +8466,7 @@ kev_post_msg(struct kev_msg *event_msg) lck_mtx_unlock(&ev_pcb->evp_mtx); } m_free(m); - lck_rw_done(kev_rwlock); + lck_rw_done(&kev_rwlock); return 0; } @@ -8561,7 +8518,7 @@ kevt_getstat SYSCTL_HANDLER_ARGS #pragma unused(oidp, arg1, arg2) int error = 0; - lck_rw_lock_shared(kev_rwlock); + lck_rw_lock_shared(&kev_rwlock); if (req->newptr != USER_ADDR_NULL) { error = EPERM; @@ -8575,7 +8532,7 @@ kevt_getstat SYSCTL_HANDLER_ARGS error = SYSCTL_OUT(req, &kevtstat, MIN(sizeof(struct kevtstat), req->oldlen)); done: - lck_rw_done(kev_rwlock); + lck_rw_done(&kev_rwlock); return error; } @@ -8585,7 +8542,7 @@ kevt_pcblist SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) int error = 0; - int n, i; + uint64_t n, i; struct xsystmgen xsg; void *buf = NULL; size_t item_size = ROUNDUP64(sizeof(struct xkevtpcb)) + @@ -8599,12 +8556,12 @@ kevt_pcblist SYSCTL_HANDLER_ARGS return ENOMEM; } - lck_rw_lock_shared(kev_rwlock); + lck_rw_lock_shared(&kev_rwlock); n = kevtstat.kes_pcbcount; if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = (n + n / 8) * item_size; + req->oldidx = (size_t) ((n + n / 8) * item_size); goto done; } if (req->newptr != USER_ADDR_NULL) { @@ -8684,7 +8641,11 @@ kevt_pcblist SYSCTL_HANDLER_ARGS } done: - lck_rw_done(kev_rwlock); + lck_rw_done(&kev_rwlock); + + if (buf != NULL) { + FREE(buf, M_TEMP); + } return error; } @@ -8869,19 +8830,18 @@ kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize, goto out; } - buflen = min(ubuflen, PROC_PIDDYNKQUEUES_MAX); + buflen = MIN(ubuflen, PROC_PIDDYNKQUEUES_MAX); if (ubuflen != 0) { if (os_mul_overflow(sizeof(kqueue_id_t), buflen, &bufsize)) { err = ERANGE; goto out; } - kq_ids = kalloc(bufsize); + kq_ids = kheap_alloc(KHEAP_TEMP, bufsize, Z_WAITOK | Z_ZERO); if (!kq_ids) { err = ENOMEM; goto out; } - bzero(kq_ids, bufsize); } kqhash_lock(fdp); @@ -8904,7 +8864,7 @@ kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize, if (kq_ids) { size_t copysize; - if (os_mul_overflow(sizeof(kqueue_id_t), min(buflen, nkqueues), ©size)) { + if (os_mul_overflow(sizeof(kqueue_id_t), MIN(buflen, nkqueues), ©size)) { err = ERANGE; goto out; } @@ -8915,7 +8875,7 @@ kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize, out: if (kq_ids) { - kfree(kq_ids, bufsize); + kheap_free(KHEAP_TEMP, kq_ids, bufsize); } if (!err) { @@ -8993,14 +8953,14 @@ pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf, struct kevent_extinfo *kqext = NULL; /* arbitrary upper limit to cap kernel memory usage, copyout size, etc. */ - buflen = min(buflen, PROC_PIDFDKQUEUE_KNOTES_MAX); + buflen = MIN(buflen, PROC_PIDFDKQUEUE_KNOTES_MAX); - kqext = kalloc(buflen * sizeof(struct kevent_extinfo)); + kqext = kheap_alloc(KHEAP_TEMP, + buflen * sizeof(struct kevent_extinfo), Z_WAITOK | Z_ZERO); if (kqext == NULL) { err = ENOMEM; goto out; } - bzero(kqext, buflen * sizeof(struct kevent_extinfo)); proc_fdlock(p); for (i = 0; i < fdp->fd_knlistsize; i++) { @@ -9018,17 +8978,17 @@ pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf, } } - assert(bufsize >= sizeof(struct kevent_extinfo) * min(buflen, nknotes)); - err = copyout(kqext, ubuf, sizeof(struct kevent_extinfo) * min(buflen, nknotes)); + assert(bufsize >= sizeof(struct kevent_extinfo) * MIN(buflen, nknotes)); + err = copyout(kqext, ubuf, sizeof(struct kevent_extinfo) * MIN(buflen, nknotes)); out: if (kqext) { - kfree(kqext, buflen * sizeof(struct kevent_extinfo)); + kheap_free(KHEAP_TEMP, kqext, buflen * sizeof(struct kevent_extinfo)); kqext = NULL; } if (!err) { - *retval = min(nknotes, PROC_PIDFDKQUEUE_KNOTES_MAX); + *retval = (int32_t)MIN(nknotes, PROC_PIDFDKQUEUE_KNOTES_MAX); } return err; } @@ -9054,12 +9014,12 @@ klist_copy_udata(struct klist *list, uint64_t *buf, } int -kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize) +kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize) { proc_t p = (proc_t)proc; struct filedesc *fdp = p->p_fd; unsigned int nuptrs = 0; - unsigned long buflen = bufsize / sizeof(uint64_t); + unsigned int buflen = bufsize / sizeof(uint64_t); struct kqworkloop *kqwl; if (buflen > 0) { diff --git a/bsd/kern/kern_exec.c b/bsd/kern/kern_exec.c index a5704a7f2..db94eab6a 100644 --- a/bsd/kern/kern_exec.c +++ b/bsd/kern/kern_exec.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -116,7 +116,7 @@ #include #include - +#include #include #include @@ -177,6 +177,11 @@ extern boolean_t vm_darkwake_mode; extern int bootarg_execfailurereports; /* bsd_init.c */ +boolean_t unentitled_ios_sim_launch = FALSE; + +#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) +static TUNABLE(bool, bootarg_arm64e_preview_abi, "-arm64e_preview_abi", false); +#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */ #if CONFIG_DTRACE /* Do not include dtrace.h, it redefines kmem_[alloc/free] */ @@ -192,6 +197,34 @@ static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL; #include #endif +#if __has_feature(ptrauth_calls) +static int vm_shared_region_per_team_id = 1; +static int vm_shared_region_by_entitlement = 1; + +/* Flag to control whether shared cache randomized resliding is enabled */ +#if DEVELOPMENT || DEBUG || XNU_TARGET_OS_IOS +static int vm_shared_region_reslide_aslr = 1; +#else /* DEVELOPMENT || DEBUG || XNU_TARGET_OS_IOS */ +static int vm_shared_region_reslide_aslr = 0; +#endif /* DEVELOPMENT || DEBUG || XNU_TARGET_OS_IOS */ +/* + * Flag to control what processes should get shared cache randomize resliding + * after a fault in the shared cache region: + * + * 0 - all processes get a new randomized slide + * 1 - only platform processes get a new randomized slide + */ +int vm_shared_region_reslide_restrict = 1; + +#if DEVELOPMENT || DEBUG +SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_per_team_id, CTLFLAG_RW, &vm_shared_region_per_team_id, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_by_entitlement, CTLFLAG_RW, &vm_shared_region_by_entitlement, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_reslide_restrict, CTLFLAG_RW, &vm_shared_region_reslide_restrict, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, vm_shared_region_reslide_aslr, CTLFLAG_RW, &vm_shared_region_reslide_aslr, 0, ""); +#endif + +#endif /* __has_feature(ptrauth_calls) */ + /* support for child creation in exec after vfork */ thread_t fork_create_child(task_t parent_task, coalition_t *parent_coalition, @@ -204,13 +237,18 @@ void vfork_exit(proc_t p, int rv); extern void proc_apply_task_networkbg_internal(proc_t, thread_t); extern void task_set_did_exec_flag(task_t task); extern void task_clear_exec_copy_flag(task_t task); -proc_t proc_exec_switch_task(proc_t p, task_t old_task, task_t new_task, thread_t new_thread); +proc_t proc_exec_switch_task(proc_t p, task_t old_task, task_t new_task, thread_t new_thread, void **inherit); boolean_t task_is_active(task_t); boolean_t thread_is_active(thread_t thread); void thread_copy_resource_info(thread_t dst_thread, thread_t src_thread); void *ipc_importance_exec_switch_task(task_t old_task, task_t new_task); extern void ipc_importance_release(void *elem); extern boolean_t task_has_watchports(task_t task); +extern void task_set_no_smt(task_t task); +#if defined(HAS_APPLE_PAC) +char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid); +#endif +task_t convert_port_to_task(ipc_port_t port); /* * Mach things for which prototypes are unavailable from Mach headers @@ -268,10 +306,16 @@ extern vm_map_t bsd_pageable_map; extern const struct fileops vnops; extern int nextpidversion; + #define USER_ADDR_ALIGN(addr, val) \ ( ( (user_addr_t)(addr) + (val) - 1) \ & ~((val) - 1) ) +/* + * For subsystem root support + */ +#define SPAWN_SUBSYSTEM_ROOT_ENTITLEMENT "com.apple.private.spawn-subsystem-root" + /* Platform Code Exec Logging */ static int platform_exec_logging = 0; @@ -309,7 +353,7 @@ static void exec_prefault_data(proc_t, struct image_params *, load_result_t *); static errno_t exec_handle_port_actions(struct image_params *imgp, struct exec_port_actions *port_actions); static errno_t exec_handle_spawnattr_policy(proc_t p, thread_t thread, int psa_apptype, uint64_t psa_qos_clamp, - uint64_t psa_darwin_role, struct exec_port_actions *port_actions); + task_role_t psa_darwin_role, struct exec_port_actions *port_actions); static void exec_port_actions_destroy(struct exec_port_actions *port_actions); /* @@ -330,6 +374,7 @@ static void exec_port_actions_destroy(struct exec_port_actions *port_actions); * (imgp->ip_strspace) updated byte count of space remaining * (imgp->ip_argspace) updated byte count of space in NCARGS */ +__attribute__((noinline)) static int exec_add_user_string(struct image_params *imgp, user_addr_t str, int seg, boolean_t is_ncargs) { @@ -600,7 +645,7 @@ exec_shell_imgact(struct image_params *imgp) } *interp = '\0'; -#if !SECURE_KERNEL +#if CONFIG_SETUID /* * If we have an SUID or SGID script, create a file descriptor * from the vnode and pass /dev/fd/%d instead of the actual @@ -618,9 +663,9 @@ exec_shell_imgact(struct image_params *imgp) return error; } - fp->f_fglob->fg_flag = FREAD; - fp->f_fglob->fg_ops = &vnops; - fp->f_fglob->fg_data = (caddr_t)imgp->ip_vp; + fp->fp_glob->fg_flag = FREAD; + fp->fp_glob->fg_ops = &vnops; + fp->fp_glob->fg_data = (caddr_t)imgp->ip_vp; proc_fdlock(p); procfdtbl_releasefd(p, fd, NULL); @@ -630,7 +675,7 @@ exec_shell_imgact(struct image_params *imgp) imgp->ip_interp_sugid_fd = fd; } -#endif +#endif /* CONFIG_SETUID */ return -3; } @@ -701,6 +746,8 @@ exec_fat_imgact(struct image_params *imgp) /* Check each preference listed against all arches in header */ for (pr = 0; pr < NBINPREFS; pr++) { cpu_type_t pref = psa->psa_binprefs[pr]; + cpu_type_t subpref = psa->psa_subcpuprefs[pr]; + if (pref == 0) { /* No suitable arch in the pref list */ error = EBADARCH; @@ -713,6 +760,7 @@ exec_fat_imgact(struct image_params *imgp) } lret = fatfile_getbestarch_for_cputype(pref, + subpref, (vm_offset_t)fat_header, PAGE_SIZE, imgp, @@ -732,7 +780,8 @@ regular_grading: lret = fatfile_getbestarch((vm_offset_t)fat_header, PAGE_SIZE, imgp, - &fat_arch); + &fat_arch, + (p->p_flag & P_AFFINITY) != 0); if (lret != LOAD_SUCCESS) { error = load_return_to_errno(lret); goto bad; @@ -785,7 +834,7 @@ activate_exec_state(task_t task, proc_t p, thread_t thread, load_result_t *resul if (result->threadstate) { uint32_t *ts = result->threadstate; - uint32_t total_size = result->threadstate_sz; + uint32_t total_size = (uint32_t)result->threadstate_sz; while (total_size > 0) { uint32_t flavor = *ts++; @@ -831,6 +880,83 @@ set_proc_name(struct image_params *imgp, proc_t p) p->p_comm[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0'; } +#if __has_feature(ptrauth_calls) +/** + * Returns a team ID string that may be used to assign a shared region. + * + * Platform binaries do not have team IDs and will return NULL. Non-platform + * binaries without a team ID will be assigned an artificial team ID of "" + * (empty string) so that they will not be assigned to the default shared + * region. + * + * @param imgp image parameter block + * @return NULL if this is a platform binary, or an appropriate team ID string + * otherwise + */ +static inline const char * +get_teamid_for_shared_region(struct image_params *imgp) +{ + assert(imgp->ip_vp != NULL); + + const char *ret = csvnode_get_teamid(imgp->ip_vp, imgp->ip_arch_offset); + if (ret) { + return ret; + } + + struct cs_blob *blob = csvnode_get_blob(imgp->ip_vp, imgp->ip_arch_offset); + if (csblob_get_platform_binary(blob)) { + return NULL; + } else { + static const char *NO_TEAM_ID = ""; + return NO_TEAM_ID; + } +} + +/** + * Determines whether ptrauth should be enabled for the provided arm64 CPU subtype. + * + * @param cpusubtype Mach-O style CPU subtype + * @return whether the CPU subtype matches arm64e with the current ptrauth ABI + */ +static inline bool +arm64_cpusubtype_uses_ptrauth(cpu_subtype_t cpusubtype) +{ + return (cpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E && + CPU_SUBTYPE_ARM64_PTR_AUTH_VERSION(cpusubtype) == CPU_SUBTYPE_ARM64_PTR_AUTH_CURRENT_VERSION; +} + +#endif /* __has_feature(ptrauth_calls) */ + +/** + * Returns whether a type/subtype slice matches the requested + * type/subtype. + * + * @param mask Bits to mask from the requested/tested cpu type + * @param req_cpu Requested cpu type + * @param req_subcpu Requested cpu subtype + * @param test_cpu Tested slice cpu type + * @param test_subcpu Tested slice cpu subtype + */ +boolean_t +binary_match(cpu_type_t mask, cpu_type_t req_cpu, + cpu_subtype_t req_subcpu, cpu_type_t test_cpu, + cpu_subtype_t test_subcpu) +{ + if ((test_cpu & ~mask) != (req_cpu & ~mask)) { + return FALSE; + } + + test_subcpu &= ~CPU_SUBTYPE_MASK; + req_subcpu &= ~CPU_SUBTYPE_MASK; + + if (test_subcpu != req_subcpu && req_subcpu != (CPU_SUBTYPE_ANY & ~CPU_SUBTYPE_MASK)) { + return FALSE; + } + + return TRUE; +} + + /* * exec_mach_imgact * @@ -873,6 +999,7 @@ exec_mach_imgact(struct image_params *imgp) int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC); int exec = (imgp->ip_flags & IMGPF_EXEC); os_reason_t exec_failure_reason = OS_REASON_NULL; + boolean_t reslide = FALSE; /* * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference @@ -917,12 +1044,15 @@ exec_mach_imgact(struct image_params *imgp) imgp->ip_flags |= IMGPF_IS_64BIT_ADDR | IMGPF_IS_64BIT_DATA; } + /* If posix_spawn binprefs exist, respect those prefs. */ psa = (struct _posix_spawnattr *) imgp->ip_px_sa; if (psa != NULL && psa->psa_binprefs[0] != 0) { int pr = 0; for (pr = 0; pr < NBINPREFS; pr++) { cpu_type_t pref = psa->psa_binprefs[pr]; + cpu_subtype_t subpref = psa->psa_subcpuprefs[pr]; + if (pref == 0) { /* No suitable arch in the pref list */ error = EBADARCH; @@ -934,8 +1064,8 @@ exec_mach_imgact(struct image_params *imgp) goto grade; } - if (pref == imgp->ip_origcputype) { - /* We have a match! */ + if (binary_match(CPU_ARCH_MASK, pref, subpref, + imgp->ip_origcputype, imgp->ip_origcpusubtype)) { goto grade; } } @@ -943,7 +1073,8 @@ exec_mach_imgact(struct image_params *imgp) goto bad; } grade: - if (!grade_binary(imgp->ip_origcputype, imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK, TRUE)) { + if (!grade_binary(imgp->ip_origcputype, imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK, + imgp->ip_origcpusubtype & CPU_SUBTYPE_MASK, TRUE)) { error = EBADARCH; goto bad; } @@ -963,9 +1094,9 @@ grade: assert(mach_header->cputype == CPU_TYPE_ARM64 ); - if (((mach_header->cputype == CPU_TYPE_ARM64 && - (mach_header->cpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E) - ) && (CPU_SUBTYPE_ARM64_PTR_AUTH_VERSION(mach_header->cpusubtype) == 0)) { + if ((mach_header->cputype == CPU_TYPE_ARM64 && + arm64_cpusubtype_uses_ptrauth(mach_header->cpusubtype)) + ) { imgp->ip_flags &= ~IMGPF_NOJOP; } else { imgp->ip_flags |= IMGPF_NOJOP; @@ -1051,21 +1182,28 @@ grade: } proc_lock(p); - p->p_cputype = imgp->ip_origcputype; - p->p_cpusubtype = imgp->ip_origcpusubtype; + { + p->p_cputype = imgp->ip_origcputype; + p->p_cpusubtype = imgp->ip_origcpusubtype; + } p->p_platform = load_result.ip_platform; + p->p_min_sdk = load_result.lr_min_sdk; p->p_sdk = load_result.lr_sdk; + vm_map_set_user_wire_limit(map, (vm_size_t)proc_limitgetcur(p, RLIMIT_MEMLOCK, FALSE)); +#if XNU_TARGET_OS_OSX + if (p->p_platform == PLATFORM_IOS) { + vm_map_mark_alien(map); + } +#endif /* XNU_TARGET_OS_OSX */ proc_unlock(p); - vm_map_set_user_wire_limit(map, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); - /* * Set code-signing flags if this binary is signed, or if parent has * requested them on exec. */ if (load_result.csflags & CS_VALID) { imgp->ip_csflags |= load_result.csflags & - (CS_VALID | CS_SIGNED | CS_DEV_CODE | + (CS_VALID | CS_SIGNED | CS_DEV_CODE | CS_LINKER_SIGNED | CS_HARD | CS_KILL | CS_RESTRICT | CS_ENFORCEMENT | CS_REQUIRE_LV | CS_FORCED_LV | CS_ENTITLEMENTS_VALIDATED | CS_DYLD_PLATFORM | CS_RUNTIME | CS_ENTITLEMENT_FLAGS | @@ -1095,25 +1233,131 @@ grade: } } +#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) /* - * Set up the system reserved areas in the new address space. + * ptrauth version 0 is a preview ABI. Developers can opt into running + * their own arm64e binaries for local testing, with the understanding + * that future OSes may break ABI. */ - int cpu_subtype; - cpu_subtype = 0; /* all cpu_subtypes use the same shared region */ -#if defined(HAS_APPLE_PAC) + if ((imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E && + CPU_SUBTYPE_ARM64_PTR_AUTH_VERSION(imgp->ip_origcpusubtype) == 0 && + !load_result.platform_binary && + !bootarg_arm64e_preview_abi) { + static bool logged_once = false; + set_proc_name(imgp, p); + + printf("%s: not running binary \"%s\" built against preview arm64e ABI\n", __func__, p->p_name); + if (!os_atomic_xchg(&logged_once, true, relaxed)) { + printf("%s: (to allow this, add \"-arm64e_preview_abi\" to boot-args)\n", __func__); + } + + exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO); + if (bootarg_execfailurereports) { + exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT; + exec_failure_reason->osr_flags |= OS_REASON_FLAG_CONSISTENT_FAILURE; + } + goto badtoolate; + } + + if ((imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_ARM64E && + imgp->ip_origcputype == CPU_TYPE_ARM64 && + load_result.platform_binary && + (imgp->ip_flags & IMGPF_DRIVER) != 0) { + set_proc_name(imgp, p); + printf("%s: disallowing arm64 platform driverkit binary \"%s\", should be arm64e\n", __func__, p->p_name); + exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO); + if (bootarg_execfailurereports) { + exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT; + exec_failure_reason->osr_flags |= OS_REASON_FLAG_CONSISTENT_FAILURE; + } + goto badtoolate; + } +#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */ + + /* + * Set up the shared cache region in the new process. + * + * Normally there is a single shared region per architecture. + * However on systems with Pointer Authentication, we can create + * multiple shared caches with the amount of sharing determined + * by team-id or entitlement. Inherited shared region IDs are used + * for system processes that need to match and be able to inspect + * a pre-existing task. + */ + int cpu_subtype = 0; /* all cpu_subtypes use the same shared region */ +#if __has_feature(ptrauth_calls) + char *shared_region_id = NULL; + size_t len; + char *base; + const char *cbase; +#define TEAM_ID_PREFIX "T-" +#define ENTITLE_PREFIX "E-" +#define SR_PREFIX_LEN 2 +#define SR_ENTITLEMENT "com.apple.pac.shared_region_id" + if (cpu_type() == CPU_TYPE_ARM64 && - (p->p_cpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E) { + arm64_cpusubtype_uses_ptrauth(p->p_cpusubtype) && + (imgp->ip_flags & IMGPF_NOJOP) == 0) { assertf(p->p_cputype == CPU_TYPE_ARM64, "p %p cpu_type() 0x%x p->p_cputype 0x%x p->p_cpusubtype 0x%x", p, cpu_type(), p->p_cputype, p->p_cpusubtype); + /* * arm64e uses pointer authentication, so request a separate * shared region for this CPU subtype. */ cpu_subtype = p->p_cpusubtype & ~CPU_SUBTYPE_MASK; + + /* + * Determine which shared cache to select based on being told, + * matching a team-id or matching an entitlement. + */ + if (imgp->ip_inherited_shared_region_id) { + len = strlen(imgp->ip_inherited_shared_region_id); + shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS, + len + 1, Z_WAITOK); + memcpy(shared_region_id, imgp->ip_inherited_shared_region_id, len + 1); + } else if ((cbase = get_teamid_for_shared_region(imgp)) != NULL) { + len = strlen(cbase); + if (vm_shared_region_per_team_id) { + shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS, + len + SR_PREFIX_LEN + 1, Z_WAITOK); + memcpy(shared_region_id, TEAM_ID_PREFIX, SR_PREFIX_LEN); + memcpy(shared_region_id + SR_PREFIX_LEN, cbase, len + 1); + } + } else if ((base = IOVnodeGetEntitlement(imgp->ip_vp, + (int64_t)imgp->ip_arch_offset, SR_ENTITLEMENT)) != NULL) { + len = strlen(base); + if (vm_shared_region_by_entitlement) { + shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS, + len + SR_PREFIX_LEN + 1, Z_WAITOK); + memcpy(shared_region_id, ENTITLE_PREFIX, SR_PREFIX_LEN); + memcpy(shared_region_id + SR_PREFIX_LEN, base, len + 1); + } + /* Discard the copy of the entitlement */ + kheap_free(KHEAP_DATA_BUFFERS, base, len + 1); + } + } + + if (imgp->ip_flags & IMGPF_RESLIDE) { + reslide = TRUE; + } + + /* use "" as the default shared_region_id */ + if (shared_region_id == NULL) { + shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS, 1, Z_WAITOK); + *shared_region_id = 0; } -#endif /* HAS_APPLE_PAC */ - vm_map_exec(map, task, load_result.is_64bit_addr, (void *)p->p_fd->fd_rdir, cpu_type(), cpu_subtype); + + /* ensure there's a unique pointer signing key for this shared_region_id */ + shared_region_key_alloc(shared_region_id, + imgp->ip_inherited_shared_region_id != NULL, imgp->ip_inherited_jop_pid); + task_set_shared_region_id(task, shared_region_id); + shared_region_id = NULL; +#endif /* __has_feature(ptrauth_calls) */ + + int cputype = cpu_type(); + vm_map_exec(map, task, load_result.is_64bit_addr, (void *)p->p_fd->fd_rdir, cputype, cpu_subtype, reslide); /* * Close file descriptors which specify close-on-exec. @@ -1241,8 +1485,8 @@ grade: thread_setuserstack(thread, ap); } - if (load_result.dynlinker) { - uint64_t ap; + if (load_result.dynlinker || load_result.is_cambria) { + user_addr_t ap; int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; /* Adjust the stack */ @@ -1266,6 +1510,7 @@ grade: load_result.all_image_info_size); } + /* Avoid immediate VM faults back into kernel */ exec_prefault_data(p, imgp, &load_result); @@ -1369,8 +1614,8 @@ grade: uintptr_t fsid = 0, fileid = 0; if (imgp->ip_vattr) { uint64_t fsid64 = vnode_get_va_fsid(imgp->ip_vattr); - fsid = fsid64; - fileid = imgp->ip_vattr->va_fileid; + fsid = (uintptr_t)fsid64; + fileid = (uintptr_t)imgp->ip_vattr->va_fileid; // check for (unexpected) overflow and trace zero in that case if (fsid != fsid64 || fileid != imgp->ip_vattr->va_fileid) { fsid = fileid = 0; @@ -1387,6 +1632,7 @@ grade: args[2], args[3], (uintptr_t)thread_tid(thread)); } + /* * If posix_spawned with the START_SUSPENDED flag, stop the * process before it runs. @@ -1725,11 +1971,11 @@ exec_validate_spawnattr_policy(int psa_apptype) */ static errno_t exec_handle_spawnattr_policy(proc_t p, thread_t thread, int psa_apptype, uint64_t psa_qos_clamp, - uint64_t psa_darwin_role, struct exec_port_actions *port_actions) + task_role_t psa_darwin_role, struct exec_port_actions *port_actions) { int apptype = TASK_APPTYPE_NONE; int qos_clamp = THREAD_QOS_UNSPECIFIED; - int role = TASK_UNSPECIFIED; + task_role_t role = TASK_UNSPECIFIED; if ((psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) != 0) { int proctype = psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK; @@ -1750,11 +1996,6 @@ exec_handle_spawnattr_policy(proc_t p, thread_t thread, int psa_apptype, uint64_ case POSIX_SPAWN_PROC_TYPE_APP_DEFAULT: apptype = TASK_APPTYPE_APP_DEFAULT; break; -#if !CONFIG_EMBEDDED - case POSIX_SPAWN_PROC_TYPE_APP_TAL: - apptype = TASK_APPTYPE_APP_TAL; - break; -#endif /* !CONFIG_EMBEDDED */ case POSIX_SPAWN_PROC_TYPE_DRIVER: apptype = TASK_APPTYPE_DRIVER; break; @@ -1863,6 +2104,7 @@ exec_handle_port_actions(struct image_params *imgp, kern_return_t kr; boolean_t task_has_watchport_boost = task_has_watchports(current_task()); boolean_t in_exec = (imgp->ip_flags & IMGPF_EXEC); + int ptrauth_task_port_count = 0; boolean_t suid_cred_specified = FALSE; for (i = 0; i < pacts->pspa_count; i++) { @@ -1888,6 +2130,13 @@ exec_handle_port_actions(struct image_params *imgp, } break; + case PSPA_PTRAUTH_TASK_PORT: + if (++ptrauth_task_port_count > 1) { + ret = EINVAL; + goto done; + } + break; + case PSPA_SUID_CRED: /* Only a single suid credential can be specified. */ if (suid_cred_specified) { @@ -1985,6 +2234,29 @@ exec_handle_port_actions(struct image_params *imgp, actions->registered_array[registered_i++] = port; break; + case PSPA_PTRAUTH_TASK_PORT: +#if defined(HAS_APPLE_PAC) + { + task_t ptr_auth_task = convert_port_to_task(port); + + if (ptr_auth_task == TASK_NULL) { + ret = EINVAL; + break; + } + + imgp->ip_inherited_shared_region_id = + task_get_vm_shared_region_id_and_jop_pid(ptr_auth_task, + &imgp->ip_inherited_jop_pid); + + /* Deallocate task ref returned by convert_port_to_task */ + task_deallocate(ptr_auth_task); + } +#endif /* HAS_APPLE_PAC */ + + /* consume the port right in case of success */ + ipc_port_release_send(port); + break; + case PSPA_SUID_CRED: imgp->ip_sc_port = port; break; @@ -2055,8 +2327,6 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) struct vnode_attr *vap; struct nameidata *ndp; int mode = psfa->psfaa_openargs.psfao_mode; - struct dup2_args dup2a; - struct close_nocancel_args ca; int origfd; MALLOC(bufp, char *, sizeof(*vap) + sizeof(*ndp), M_TEMP, M_WAITOK | M_ZERO); @@ -2084,7 +2354,7 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) psfa->psfaa_openargs.psfao_oflag, vap, fileproc_alloc_init, NULL, - ival); + &origfd); FREE(bufp, M_TEMP); @@ -2096,27 +2366,17 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) * reworking all the open code to preallocate fd * slots, and internally taking one as an argument. */ - if (error || ival[0] == psfa->psfaa_filedes) { + if (error || origfd == psfa->psfaa_filedes) { break; } - origfd = ival[0]; /* * If we didn't fall out from an error, we ended up * with the wrong fd; so now we've got to try to dup2 * it to the right one. */ - dup2a.from = origfd; - dup2a.to = psfa->psfaa_filedes; - - /* - * The dup2() system call implementation sets - * ival to newfd in the success case, but we - * can ignore that, since if we didn't get the - * fd we wanted, the error will stop us. - */ AUDIT_SUBCALL_ENTER(DUP2, p, uthread); - error = dup2(p, &dup2a, ival); + error = dup2(p, origfd, psfa->psfaa_filedes, ival); AUDIT_SUBCALL_EXIT(uthread, error); if (error) { break; @@ -2125,28 +2385,16 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) /* * Finally, close the original fd. */ - ca.fd = origfd; - AUDIT_SUBCALL_ENTER(CLOSE, p, uthread); - error = close_nocancel(p, &ca, ival); + error = close_nocancel(p, origfd); AUDIT_SUBCALL_EXIT(uthread, error); } break; case PSFA_DUP2: { - struct dup2_args dup2a; - - dup2a.from = psfa->psfaa_filedes; - dup2a.to = psfa->psfaa_dup2args.psfad_newfiledes; - - /* - * The dup2() system call implementation sets - * ival to newfd in the success case, but we - * can ignore that, since if we didn't get the - * fd we wanted, the error will stop us. - */ AUDIT_SUBCALL_ENTER(DUP2, p, uthread); - error = dup2(p, &dup2a, ival); + error = dup2(p, psfa->psfaa_filedes, + psfa->psfaa_dup2args.psfad_newfiledes, ival); AUDIT_SUBCALL_EXIT(uthread, error); } break; @@ -2154,8 +2402,7 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) case PSFA_FILEPORT_DUP2: { ipc_port_t port; kern_return_t kr; - struct dup2_args dup2a; - struct close_nocancel_args ca; + int origfd; if (!MACH_PORT_VALID(psfa->psfaa_fileport)) { error = EINVAL; @@ -2171,44 +2418,39 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) break; } - error = fileport_makefd_internal(p, port, 0, ival); + error = fileport_makefd(p, port, 0, &origfd); if (IPC_PORT_NULL != port) { ipc_port_release_send(port); } - if (error || ival[0] == psfa->psfaa_dup2args.psfad_newfiledes) { + if (error || origfd == psfa->psfaa_dup2args.psfad_newfiledes) { break; } - dup2a.from = ca.fd = ival[0]; - dup2a.to = psfa->psfaa_dup2args.psfad_newfiledes; AUDIT_SUBCALL_ENTER(DUP2, p, uthread); - error = dup2(p, &dup2a, ival); + error = dup2(p, origfd, + psfa->psfaa_dup2args.psfad_newfiledes, ival); AUDIT_SUBCALL_EXIT(uthread, error); if (error) { break; } AUDIT_SUBCALL_ENTER(CLOSE, p, uthread); - error = close_nocancel(p, &ca, ival); + error = close_nocancel(p, origfd); AUDIT_SUBCALL_EXIT(uthread, error); } break; case PSFA_CLOSE: { - struct close_nocancel_args ca; - - ca.fd = psfa->psfaa_filedes; - AUDIT_SUBCALL_ENTER(CLOSE, p, uthread); - error = close_nocancel(p, &ca, ival); + error = close_nocancel(p, psfa->psfaa_filedes); AUDIT_SUBCALL_EXIT(uthread, error); } break; case PSFA_INHERIT: { - struct fcntl_nocancel_args fcntla; + struct fileproc *fp; /* * Check to see if the descriptor exists, and @@ -2217,18 +2459,18 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) * Attempting to "inherit" a guarded fd will * result in a error. */ - fcntla.fd = psfa->psfaa_filedes; - fcntla.cmd = F_GETFD; - if ((error = fcntl_nocancel(p, &fcntla, ival)) != 0) { - break; - } - if ((ival[0] & FD_CLOEXEC) == FD_CLOEXEC) { - fcntla.fd = psfa->psfaa_filedes; - fcntla.cmd = F_SETFD; - fcntla.arg = ival[0] & ~FD_CLOEXEC; - error = fcntl_nocancel(p, &fcntla, ival); + proc_fdlock(p); + if ((fp = fp_get_noref_locked(p, psfa->psfaa_filedes)) == NULL) { + error = EBADF; + } else if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) { + error = fp_guard_exception(p, psfa->psfaa_filedes, + fp, kGUARD_EXC_NOCLOEXEC); + } else { + p->p_fd->fd_ofileflags[psfa->psfaa_filedes] &= ~UF_EXCLOSE; + error = 0; } + proc_fdunlock(p); } break; @@ -2241,14 +2483,20 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) * context of UIO_SYSSPACE, and casts the address * argument to a user_addr_t. */ - struct nameidata nd; + struct nameidata *nd; + nd = kheap_alloc(KHEAP_TEMP, sizeof(*nd), Z_WAITOK | Z_ZERO); + if (nd == NULL) { + error = ENOMEM; + break; + } AUDIT_SUBCALL_ENTER(CHDIR, p, uthread); - NDINIT(&nd, LOOKUP, OP_CHDIR, FOLLOW | AUDITVNPATH1, UIO_SYSSPACE, + NDINIT(nd, LOOKUP, OP_CHDIR, FOLLOW | AUDITVNPATH1, UIO_SYSSPACE, CAST_USER_ADDR_T(psfa->psfaa_chdirargs.psfac_path), imgp->ip_vfs_context); - error = chdir_internal(p, imgp->ip_vfs_context, &nd, 0); + error = chdir_internal(p, imgp->ip_vfs_context, nd, 0); + kheap_free(KHEAP_TEMP, nd, sizeof(*nd)); AUDIT_SUBCALL_EXIT(uthread, error); } break; @@ -2304,7 +2552,7 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) case PSFA_DUP2: case PSFA_FILEPORT_DUP2: fd = psfa->psfaa_dup2args.psfad_newfiledes; - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case PSFA_OPEN: case PSFA_INHERIT: *fdflags(p, fd) |= UF_INHERIT; @@ -2346,7 +2594,7 @@ exec_spawnattr_getmacpolicyinfo(const void *macextensions, const char *policynam const _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i]; if (strncmp(extension->policyname, policyname, sizeof(extension->policyname)) == 0) { if (lenp != NULL) { - *lenp = extension->datalen; + *lenp = (size_t)extension->datalen; } return extension->datap; } @@ -2397,8 +2645,13 @@ spawn_copyin_macpolicyinfo(const struct user__posix_spawn_args_desc *px_args, _p _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[copycnt]; void *data = NULL; - MALLOC(data, void *, extension->datalen, M_TEMP, M_WAITOK); - if ((error = copyin(extension->data, data, extension->datalen)) != 0) { +#if !__LP64__ + if (extension->data > UINT32_MAX) { + goto bad; + } +#endif + MALLOC(data, void *, (size_t)extension->datalen, M_TEMP, M_WAITOK); + if ((error = copyin((user_addr_t)extension->data, data, (size_t)extension->datalen)) != 0) { FREE(data, M_TEMP); goto bad; } @@ -2484,7 +2737,7 @@ spawn_validate_persona(struct _posix_spawn_persona_info *px_persona) } } if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) { - unsigned ngroups = 0; + size_t ngroups = 0; gid_t groups[NGROUPS_MAX]; if (persona_get_groups(persona, &ngroups, groups, @@ -2577,9 +2830,9 @@ out: #if __arm64__ extern int legacy_footprint_entitlement_mode; static inline void -proc_legacy_footprint_entitled(proc_t p, task_t task, const char *caller) +proc_legacy_footprint_entitled(proc_t p, task_t task) { -#pragma unused(p, caller) +#pragma unused(p) boolean_t legacy_footprint_entitled; switch (legacy_footprint_entitlement_mode) { @@ -2608,9 +2861,9 @@ proc_legacy_footprint_entitled(proc_t p, task_t task, const char *caller) } static inline void -proc_ios13extended_footprint_entitled(proc_t p, task_t task, const char *caller) +proc_ios13extended_footprint_entitled(proc_t p, task_t task) { -#pragma unused(p, caller) +#pragma unused(p) boolean_t ios13extended_footprint_entitled; /* the entitlement grants a footprint limit increase */ @@ -2620,8 +2873,50 @@ proc_ios13extended_footprint_entitled(proc_t p, task_t task, const char *caller) task_set_ios13extended_footprint_limit(task); } } +static inline void +proc_increased_memory_limit_entitled(proc_t p, task_t task) +{ + static const char kIncreasedMemoryLimitEntitlement[] = "com.apple.developer.kernel.increased-memory-limit"; + bool entitled = false; + + entitled = IOTaskHasEntitlement(task, kIncreasedMemoryLimitEntitlement); + if (entitled) { + memorystatus_act_on_entitled_task_limit(p); + } +} + +/* + * Check for any of the various entitlements that permit a higher + * task footprint limit or alternate accounting and apply them. + */ +static inline void +proc_footprint_entitlement_hacks(proc_t p, task_t task) +{ + proc_legacy_footprint_entitled(p, task); + proc_ios13extended_footprint_entitled(p, task); + proc_increased_memory_limit_entitled(p, task); +} #endif /* __arm64__ */ +#if CONFIG_MACF +/* + * Processes with certain entitlements are granted a jumbo-size VM map. + */ +static inline void +proc_apply_jit_and_jumbo_va_policies(proc_t p, task_t task) +{ + bool jit_entitled; + jit_entitled = (mac_proc_check_map_anon(p, 0, 0, 0, MAP_JIT, NULL) == 0); + if (jit_entitled || (IOTaskHasEntitlement(task, + "com.apple.developer.kernel.extended-virtual-addressing"))) { + vm_map_set_jumbo(get_task_map(task)); + if (jit_entitled) { + vm_map_set_jit_entitled(get_task_map(task)); + } + } +} +#endif /* CONFIG_MACF */ + /* * Apply a modification on the proc's kauth cred until it converges. * @@ -2738,6 +3033,7 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) user_addr_t pid = uap->pid; int ival[2]; /* dummy retval for setpgid() */ char *bufp = NULL; + char *subsystem_root_path = NULL; struct image_params *imgp; struct vnode_attr *vap; struct vnode_attr *origvap; @@ -2791,6 +3087,9 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) imgp->ip_px_pcred_info = NULL; imgp->ip_cs_error = OS_REASON_NULL; imgp->ip_simulator_binary = IMGPF_SB_DEFAULT; + imgp->ip_subsystem_root_path = NULL; + imgp->ip_inherited_shared_region_id = NULL; + imgp->ip_inherited_jop_pid = 0; if (uap->adesc != USER_ADDR_NULL) { if (is_64) { @@ -2818,6 +3117,8 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) px_args.persona_info = CAST_USER_ADDR_T(px_args32.persona_info); px_args.posix_cred_info_size = px_args32.posix_cred_info_size; px_args.posix_cred_info = CAST_USER_ADDR_T(px_args32.posix_cred_info); + px_args.subsystem_root_path_size = px_args32.subsystem_root_path_size; + px_args.subsystem_root_path = CAST_USER_ADDR_T(px_args32.subsystem_root_path); } if (error) { goto bad; @@ -2840,7 +3141,7 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) } if (px_args.file_actions_size != 0) { /* Limit file_actions to allowed number of open files */ - int maxfa = (p->p_limit ? p->p_rlimit[RLIMIT_NOFILE].rlim_cur : NOFILE); + rlim_t maxfa = (p->p_limit ? MIN(proc_limitgetcur(p, RLIMIT_NOFILE, TRUE), maxfilesperproc) : NOFILE); size_t maxfa_size = PSF_ACTIONS_SIZE(maxfa); if (px_args.file_actions_size < PSF_ACTIONS_SIZE(1) || maxfa_size == 0 || px_args.file_actions_size > maxfa_size) { @@ -2959,6 +3260,31 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) } } #endif /* CONFIG_MACF */ + if ((px_args.subsystem_root_path_size > 0) && (px_args.subsystem_root_path_size <= MAXPATHLEN)) { + /* + * If a valid-looking subsystem root has been + * specified... + */ + if (IOTaskHasEntitlement(old_task, SPAWN_SUBSYSTEM_ROOT_ENTITLEMENT)) { + /* + * ...AND the parent has the entitlement, copy + * the subsystem root path in. + */ + MALLOC(subsystem_root_path, char *, px_args.subsystem_root_path_size, M_SBUF, M_WAITOK | M_ZERO | M_NULL); + + if (subsystem_root_path == NULL) { + error = ENOMEM; + goto bad; + } + + if ((error = copyin(px_args.subsystem_root_path, subsystem_root_path, px_args.subsystem_root_path_size))) { + goto bad; + } + + /* Paranoia */ + subsystem_root_path[px_args.subsystem_root_path_size - 1] = 0; + } + } } /* set uthread to parent */ @@ -2976,6 +3302,10 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) if (imgp->ip_px_sa != NULL) { struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa; + if ((psa->psa_options & PSA_OPTION_PLUGIN_HOST_DISABLE_A_KEYS) == PSA_OPTION_PLUGIN_HOST_DISABLE_A_KEYS) { + imgp->ip_flags |= IMGPF_PLUGIN_HOST_DISABLE_A_KEYS; + } + if ((error = exec_validate_spawnattr_policy(psa->psa_apptype)) != 0) { goto bad; } @@ -3021,7 +3351,8 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) * privileged coalition to spawn processes * into coalitions other than their own */ - if (!task_is_in_privileged_coalition(p->task, i)) { + if (!task_is_in_privileged_coalition(p->task, i) && + !IOTaskHasEntitlement(p->task, COALITION_SPAWN_ENTITLEMENT)) { coal_dbg("ERROR: %d not in privilegd " "coalition of type %d", p->p_pid, i); @@ -3175,6 +3506,16 @@ do_fork1: } assert(p != NULL); + if (subsystem_root_path) { + /* If a subsystem root was specified, swap it in */ + char * old_subsystem_root_path = p->p_subsystem_root_path; + p->p_subsystem_root_path = subsystem_root_path; + subsystem_root_path = old_subsystem_root_path; + } + + /* We'll need the subsystem root for setting up Apple strings */ + imgp->ip_subsystem_root_path = p->p_subsystem_root_path; + context.vc_thread = imgp->ip_new_thread; context.vc_ucred = p->p_ucred; /* XXX must NOT be kauth_cred_get() */ @@ -3317,6 +3658,12 @@ do_fork1: } #endif /* !SECURE_KERNEL */ +#if __has_feature(ptrauth_calls) + if (vm_shared_region_reslide_aslr && is_64 && (px_sa.psa_flags & _POSIX_SPAWN_RESLIDE)) { + imgp->ip_flags |= IMGPF_RESLIDE; + } +#endif /* __has_feature(ptrauth_calls) */ + if ((px_sa.psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) == POSIX_SPAWN_PROC_TYPE_DRIVER) { imgp->ip_flags |= IMGPF_DRIVER; @@ -3357,23 +3704,16 @@ do_fork1: */ error = exec_activate_image(imgp); #if defined(HAS_APPLE_PAC) + ml_task_set_jop_pid_from_shared_region(new_task); ml_task_set_disable_user_jop(new_task, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE); ml_thread_set_disable_user_jop(imgp->ip_new_thread, imgp->ip_flags & IMGPF_NOJOP ? TRUE : FALSE); + ml_thread_set_jop_pid(imgp->ip_new_thread, new_task); #endif if (error == 0 && !spawn_no_exec) { - p = proc_exec_switch_task(p, old_task, new_task, imgp->ip_new_thread); + p = proc_exec_switch_task(p, old_task, new_task, imgp->ip_new_thread, &inherit); /* proc ref returned */ should_release_proc_ref = TRUE; - - /* - * Need to transfer pending watch port boosts to the new task while still making - * sure that the old task remains in the importance linkage. Create an importance - * linkage from old task to new task, then switch the task importance base - * of old task and new task. After the switch the port watch boost will be - * boosting the new task and new task will be donating importance to old task. - */ - inherit = ipc_importance_exec_switch_task(old_task, new_task); } if (error == 0) { @@ -3457,7 +3797,7 @@ do_fork1: * * Userland gives us interval in seconds, and the kernel SPI expects nanoseconds. */ - if (px_sa.psa_cpumonitor_percent != 0) { + if ((px_sa.psa_cpumonitor_percent != 0) && (px_sa.psa_cpumonitor_percent < UINT8_MAX)) { /* * Always treat a CPU monitor activation coming from spawn as entitled. Requiring * an entitlement to configure the monitor a certain way seems silly, since @@ -3465,7 +3805,7 @@ do_fork1: */ error = proc_set_task_ruse_cpu(p->task, TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC, - px_sa.psa_cpumonitor_percent, + (uint8_t)px_sa.psa_cpumonitor_percent, px_sa.psa_cpumonitor_interval * NSEC_PER_SEC, 0, TRUE); } @@ -3484,11 +3824,11 @@ bad: if (error == 0) { /* reset delay idle sleep status if set */ -#if !CONFIG_EMBEDDED +#if CONFIG_DELAY_IDLE_SLEEP if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) { OSBitAndAtomic(~((uint32_t)P_DELAYIDLESLEEP), &p->p_flag); } -#endif /* !CONFIG_EMBEDDED */ +#endif /* CONFIG_DELAY_IDLE_SLEEP */ /* upon successful spawn, re/set the proc control state */ if (imgp->ip_px_sa != NULL) { switch (px_sa.psa_pcontrol) { @@ -3572,6 +3912,12 @@ bad: if (imgp->ip_px_sa != NULL && px_sa.psa_thread_limit > 0) { task_set_thread_limit(new_task, (uint16_t)px_sa.psa_thread_limit); } + + /* Disable wakeup monitoring for DriverKit processes */ + if (px_sa.psa_apptype == POSIX_SPAWN_PROC_TYPE_DRIVER) { + uint32_t flags = WAKEMON_DISABLE; + task_wakeups_monitor_ctl(new_task, &flags, NULL); + } } /* @@ -3619,8 +3965,7 @@ bad: } #if __arm64__ - proc_legacy_footprint_entitled(p, new_task, __FUNCTION__); - proc_ios13extended_footprint_entitled(p, new_task, __FUNCTION__); + proc_footprint_entitlement_hacks(p, new_task); #endif /* __arm64__ */ #if __has_feature(ptrauth_calls) @@ -3676,7 +4021,18 @@ bad: struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa; if (psa->psa_max_addr) { - vm_map_set_max_addr(get_task_map(new_task), psa->psa_max_addr); + vm_map_set_max_addr(get_task_map(new_task), (vm_map_offset_t)psa->psa_max_addr); + } + } + + if (error == 0 && imgp->ip_px_sa != NULL) { + struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa; + + if (psa->psa_no_smt) { + task_set_no_smt(new_task); + } + if (psa->psa_tecs) { + task_set_tecs(new_task); } } @@ -3686,14 +4042,7 @@ bad: task_set_main_thread_qos(new_task, main_thread); #if CONFIG_MACF - /* - * Processes with the MAP_JIT entitlement are permitted to have - * a jumbo-size map. - */ - if (mac_proc_check_map_anon(p, 0, 0, 0, MAP_JIT, NULL) == 0) { - vm_map_set_jumbo(get_task_map(new_task)); - vm_map_set_jit_entitled(get_task_map(new_task)); - } + proc_apply_jit_and_jumbo_va_policies(p, new_task); #endif /* CONFIG_MACF */ } @@ -3753,6 +4102,10 @@ bad: if (imgp->ip_px_pcred_info != NULL) { FREE(imgp->ip_px_pcred_info, M_TEMP); } + + if (subsystem_root_path != NULL) { + FREE(subsystem_root_path, M_SBUF); + } #if CONFIG_MACF if (imgp->ip_px_smpx != NULL) { spawn_free_macpolicyinfo(imgp->ip_px_smpx); @@ -3767,6 +4120,11 @@ bad: os_reason_free(imgp->ip_cs_error); imgp->ip_cs_error = OS_REASON_NULL; } + if (imgp->ip_inherited_shared_region_id != NULL) { + kheap_free(KHEAP_DATA_BUFFERS, imgp->ip_inherited_shared_region_id, + strlen(imgp->ip_inherited_shared_region_id) + 1); + imgp->ip_inherited_shared_region_id = NULL; + } #endif if (imgp->ip_sc_port != NULL) { ipc_port_release_send(imgp->ip_sc_port); @@ -3923,6 +4281,7 @@ bad: * old_task task before exec * new_task task after exec * new_thread thread in new task + * inherit resulting importance linkage * * Returns: proc. * @@ -3939,7 +4298,8 @@ bad: * error and let the terminated process complete exec and die. */ proc_t -proc_exec_switch_task(proc_t p, task_t old_task, task_t new_task, thread_t new_thread) +proc_exec_switch_task(proc_t p, task_t old_task, task_t new_task, thread_t new_thread, + void **inherit) { int error = 0; boolean_t task_active; @@ -4028,6 +4388,20 @@ proc_exec_switch_task(proc_t p, task_t old_task, task_t new_task, thread_t new_t task_copy_fields_for_exec(new_task, old_task); + /* Transfer sandbox filter bits to new_task. */ + task_transfer_mach_filter_bits(new_task, old_task); + + /* + * Need to transfer pending watch port boosts to the new task + * while still making sure that the old task remains in the + * importance linkage. Create an importance linkage from old task + * to new task, then switch the task importance base of old task + * and new task. After the switch the port watch boost will be + * boosting the new task and new task will be donating importance + * to old task. + */ + *inherit = ipc_importance_exec_switch_task(old_task, new_task); + proc_transend(p, 1); } } @@ -4153,6 +4527,7 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) imgp->ip_mac_return = 0; imgp->ip_cs_error = OS_REASON_NULL; imgp->ip_simulator_binary = IMGPF_SB_DEFAULT; + imgp->ip_subsystem_root_path = NULL; #if CONFIG_MACF if (uap->mac_p != USER_ADDR_NULL) { @@ -4212,6 +4587,8 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) context.vc_thread = imgp->ip_new_thread; } + imgp->ip_subsystem_root_path = p->p_subsystem_root_path; + error = exec_activate_image(imgp); /* thread and task ref returned for vfexec case */ @@ -4228,18 +4605,9 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) } if (!error && !in_vfexec) { - p = proc_exec_switch_task(p, old_task, new_task, imgp->ip_new_thread); + p = proc_exec_switch_task(p, old_task, new_task, imgp->ip_new_thread, &inherit); /* proc ref returned */ should_release_proc_ref = TRUE; - - /* - * Need to transfer pending watch port boosts to the new task while still making - * sure that the old task remains in the importance linkage. Create an importance - * linkage from old task to new task, then switch the task importance base - * of old task and new task. After the switch the port watch boost will be - * boosting the new task and new task will be donating importance to old task. - */ - inherit = ipc_importance_exec_switch_task(old_task, new_task); } kauth_cred_unref(&context.vc_ucred); @@ -4257,6 +4625,13 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) error = check_for_signature(p, imgp); } +#if defined(HAS_APPLE_PAC) + if (imgp->ip_new_thread && !error) { + ml_task_set_jop_pid_from_shared_region(new_task); + ml_thread_set_jop_pid(imgp->ip_new_thread, new_task); + } +#endif /* defined(HAS_APPLE_PAC) */ + /* flag exec has occurred, notify only if it has not failed due to FP Key error */ if (exec_done && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) { proc_knote(p, NOTE_EXEC); @@ -4299,8 +4674,7 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) proc_transend(p, 0); #if __arm64__ - proc_legacy_footprint_entitled(p, new_task, __FUNCTION__); - proc_ios13extended_footprint_entitled(p, new_task, __FUNCTION__); + proc_footprint_entitlement_hacks(p, new_task); #endif /* __arm64__ */ /* Sever any extant thread affinity */ @@ -4328,14 +4702,7 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) #endif /* CONFIG_ARCADE */ #if CONFIG_MACF - /* - * Processes with the MAP_JIT entitlement are permitted to have - * a jumbo-size map. - */ - if (mac_proc_check_map_anon(p, 0, 0, 0, MAP_JIT, NULL) == 0) { - vm_map_set_jumbo(get_task_map(new_task)); - vm_map_set_jit_entitled(get_task_map(new_task)); - } + proc_apply_jit_and_jumbo_va_policies(p, new_task); #endif /* CONFIG_MACF */ if (vm_darkwake_mode == TRUE) { @@ -4577,7 +4944,7 @@ copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size) * to a pointer boundary so that the exec_path, env[i], and argv[i] pointers * which preceed it on the stack are properly aligned. */ - +__attribute__((noinline)) static int exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp) { @@ -4585,7 +4952,7 @@ exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp) int ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; int ptr_area_size; void *ptr_buffer_start, *ptr_buffer; - int string_size; + size_t string_size; user_addr_t string_area; /* *argv[], *env[] */ user_addr_t ptr_area; /* argv[], env[], applev[] */ @@ -4781,6 +5148,7 @@ bad: * Note: The argument and environment vectors are user space pointers * to arrays of user space pointers. */ +__attribute__((noinline)) static int exec_extract_strings(struct image_params *imgp) { @@ -5013,6 +5381,8 @@ bad: extern user32_addr_t commpage_text32_location; extern user64_addr_t commpage_text64_location; +extern uuid_string_t bootsessionuuid_string; + #define MAIN_STACK_VALUES 4 #define MAIN_STACK_KEY "main_stack=" @@ -5020,6 +5390,12 @@ extern user64_addr_t commpage_text64_location; #define DYLD_FSID_KEY "dyld_file=" #define CDHASH_KEY "executable_cdhash=" #define DYLD_FLAGS_KEY "dyld_flags=" +#define SUBSYSTEM_ROOT_PATH_KEY "subsystem_root_path=" +#define APP_BOOT_SESSION_KEY "executable_boothash=" +#if __has_feature(ptrauth_calls) +#define PTRAUTH_DISABLED_FLAG "ptrauth_disabled=1" +#define DYLD_ARM64E_ABI_KEY "arm64e_abi=" +#endif /* __has_feature(ptrauth_calls) */ #define FSID_MAX_STRING "0x1234567890abcdef,0x1234567890abcdef" @@ -5045,9 +5421,9 @@ exec_add_entropy_key(struct image_params *imgp, } int len = scnprintf(str, sizeof(str), "%s0x%llx", key, entropy[0]); - int remaining = sizeof(str) - len; + size_t remaining = sizeof(str) - len; for (int i = 1; i < values && remaining > 0; ++i) { - int start = sizeof(str) - remaining; + size_t start = sizeof(str) - remaining; len = scnprintf(&str[start], remaining, ",0x%llx", entropy[i]); remaining -= len; } @@ -5059,9 +5435,18 @@ exec_add_entropy_key(struct image_params *imgp, * Build up the contents of the apple[] string vector */ #if (DEVELOPMENT || DEBUG) -uint64_t dyld_flags = 0; +extern uint64_t dyld_flags; #endif +#if __has_feature(ptrauth_calls) +static inline bool +is_arm64e_running_as_arm64(const struct image_params *imgp) +{ + return (imgp->ip_origcpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM64E && + (imgp->ip_flags & IMGPF_NOJOP); +} +#endif /* __has_feature(ptrauth_calls) */ + static int exec_add_apple_strings(struct image_params *imgp, const load_result_t *load_result) @@ -5077,12 +5462,14 @@ exec_add_apple_strings(struct image_params *imgp, char pfz_string[strlen(PFZ_KEY) + HEX_STR_LEN + 1]; if (img_ptr_size == 8) { - snprintf(pfz_string, sizeof(pfz_string), PFZ_KEY "0x%llx", commpage_text64_location); + __assert_only size_t ret = snprintf(pfz_string, sizeof(pfz_string), PFZ_KEY "0x%llx", commpage_text64_location); + assert(ret < sizeof(pfz_string)); } else { snprintf(pfz_string, sizeof(pfz_string), PFZ_KEY "0x%x", commpage_text32_location); } error = exec_add_user_string(imgp, CAST_USER_ADDR_T(pfz_string), UIO_SYSSPACE, FALSE); if (error) { + printf("Failed to add the pfz string with error %d\n", error); goto bad; } imgp->ip_applec++; @@ -5196,6 +5583,26 @@ exec_add_apple_strings(struct image_params *imgp, goto bad; } imgp->ip_applec++; + + /* hash together cd-hash and boot-session-uuid */ + uint8_t sha_digest[SHA256_DIGEST_LENGTH]; + SHA256_CTX sha_ctx; + SHA256_Init(&sha_ctx); + SHA256_Update(&sha_ctx, bootsessionuuid_string, sizeof(bootsessionuuid_string)); + SHA256_Update(&sha_ctx, cdhash, sizeof(cdhash)); + SHA256_Final(sha_digest, &sha_ctx); + char app_boot_string[strlen(APP_BOOT_SESSION_KEY) + 2 * SHA1_RESULTLEN + 1]; + strncpy(app_boot_string, APP_BOOT_SESSION_KEY, sizeof(app_boot_string)); + char *s = app_boot_string + sizeof(APP_BOOT_SESSION_KEY) - 1; + for (int i = 0; i < SHA1_RESULTLEN; i++) { + snprintf(s, 3, "%02x", (int) sha_digest[i]); + s += 2; + } + error = exec_add_user_string(imgp, CAST_USER_ADDR_T(app_boot_string), UIO_SYSSPACE, FALSE); + if (error) { + goto bad; + } + imgp->ip_applec++; } #if (DEVELOPMENT || DEBUG) if (dyld_flags) { @@ -5208,6 +5615,49 @@ exec_add_apple_strings(struct image_params *imgp, imgp->ip_applec++; } #endif + if (imgp->ip_subsystem_root_path) { + size_t buffer_len = MAXPATHLEN + strlen(SUBSYSTEM_ROOT_PATH_KEY); + char subsystem_root_path_string[buffer_len]; + int required_len = snprintf(subsystem_root_path_string, buffer_len, SUBSYSTEM_ROOT_PATH_KEY "%s", imgp->ip_subsystem_root_path); + + if (((size_t)required_len >= buffer_len) || (required_len < 0)) { + error = ENAMETOOLONG; + goto bad; + } + + error = exec_add_user_string(imgp, CAST_USER_ADDR_T(subsystem_root_path_string), UIO_SYSSPACE, FALSE); + if (error) { + goto bad; + } + + imgp->ip_applec++; + } +#if __has_feature(ptrauth_calls) + if (is_arm64e_running_as_arm64(imgp)) { + error = exec_add_user_string(imgp, CAST_USER_ADDR_T(PTRAUTH_DISABLED_FLAG), UIO_SYSSPACE, FALSE); + if (error) { + goto bad; + } + + imgp->ip_applec++; + } +#endif /* __has_feature(ptrauth_calls) */ + + +#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) + { + char dyld_abi_string[strlen(DYLD_ARM64E_ABI_KEY) + 8]; + strlcpy(dyld_abi_string, DYLD_ARM64E_ABI_KEY, sizeof(dyld_abi_string)); + bool allowAll = bootarg_arm64e_preview_abi; + strlcat(dyld_abi_string, (allowAll ? "all" : "os"), sizeof(dyld_abi_string)); + error = exec_add_user_string(imgp, CAST_USER_ADDR_T(dyld_abi_string), UIO_SYSSPACE, FALSE); + if (error) { + goto bad; + } + + imgp->ip_applec++; + } +#endif /* Align the tail of the combined applev area */ while (imgp->ip_strspace % img_ptr_size != 0) { @@ -5219,8 +5669,6 @@ bad: return error; } -#define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur) - /* * exec_check_permissions * @@ -5279,7 +5727,14 @@ exec_check_permissions(struct image_params *imgp) } imgp->ip_arch_offset = (user_size_t)0; +#if __LP64__ imgp->ip_arch_size = vap->va_data_size; +#else + if (vap->va_data_size > UINT32_MAX) { + return ENOEXEC; + } + imgp->ip_arch_size = (user_size_t)vap->va_data_size; +#endif /* Disable setuid-ness for traced programs or if MNT_NOSUID */ if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_lflag & P_LTRACED)) { @@ -5322,7 +5777,6 @@ exec_check_permissions(struct image_params *imgp) vnode_unlock(vp); #endif - /* XXX May want to indicate to underlying FS that vnode is open */ return error; @@ -5355,6 +5809,7 @@ exec_check_permissions(struct image_params *imgp) * P_SUGID bit potentially modified * Potentially modified */ +__attribute__((noinline)) static int exec_handle_sugid(struct image_params *imgp) { @@ -5416,7 +5871,7 @@ exec_handle_sugid(struct image_params *imgp) handle_mac_transition: #endif -#if !SECURE_KERNEL +#if CONFIG_SETUID /* * Replace the credential with a copy of itself if euid or * egid change. @@ -5482,7 +5937,7 @@ handle_mac_transition: imgp->ip_origvattr->va_gid); }); } -#endif /* !SECURE_KERNEL */ +#endif /* CONFIG_SETUID */ #if CONFIG_MACF /* @@ -5569,7 +6024,7 @@ handle_mac_transition: * to libc. */ for (i = 0; i < 3; i++) { - if (p->p_fd->fd_ofiles[i] != NULL) { + if (fp_get_noref_locked(p, i) != NULL) { continue; } @@ -5615,7 +6070,7 @@ handle_mac_transition: break; } - struct fileglob *fg = fp->f_fglob; + struct fileglob *fg = fp->fp_glob; fg->fg_flag = flag; fg->fg_ops = &vnops; @@ -5694,6 +6149,7 @@ handle_mac_transition: * Returns: KERN_SUCCESS Stack successfully created * !KERN_SUCCESS Mach failure code */ +__attribute__((noinline)) static kern_return_t create_unix_stack(vm_map_t map, load_result_t* load_result, proc_t p) @@ -5705,11 +6161,14 @@ create_unix_stack(vm_map_t map, load_result_t* load_result, mach_vm_address_t user_stack = load_result->user_stack; proc_lock(p); - p->user_stack = user_stack; + p->user_stack = (uintptr_t)user_stack; if (load_result->custom_stack) { p->p_lflag |= P_LCUSTOM_STACK; } proc_unlock(p); + if (vm_map_page_shift(map) < (int)PAGE_SHIFT) { + DEBUG4K_LOAD("map %p user_stack 0x%llx custom %d user_stack_alloc_size 0x%llx\n", map, user_stack, load_result->custom_stack, load_result->user_stack_alloc_size); + } if (load_result->user_stack_alloc_size > 0) { /* @@ -5722,7 +6181,8 @@ create_unix_stack(vm_map_t map, load_result_t* load_result, if (mach_vm_round_page_overflow(load_result->user_stack_alloc_size, &size)) { return KERN_INVALID_ARGUMENT; } - addr = mach_vm_trunc_page(load_result->user_stack - size); + addr = vm_map_trunc_page(load_result->user_stack - size, + vm_map_page_mask(map)); kr = mach_vm_allocate_kernel(map, &addr, size, VM_FLAGS_FIXED, VM_MEMORY_STACK); if (kr != KERN_SUCCESS) { @@ -5735,24 +6195,22 @@ create_unix_stack(vm_map_t map, load_result_t* load_result, } user_stack = addr + size; - load_result->user_stack = user_stack; + load_result->user_stack = (user_addr_t)user_stack; proc_lock(p); - p->user_stack = user_stack; + p->user_stack = (uintptr_t)user_stack; proc_unlock(p); } - load_result->user_stack_alloc = addr; + load_result->user_stack_alloc = (user_addr_t)addr; /* * And prevent access to what's above the current stack * size limit for this process. */ if (load_result->user_stack_size == 0) { - proc_list_lock(); - load_result->user_stack_size = unix_stack_size(p); - proc_list_unlock(); - prot_size = mach_vm_trunc_page(size - load_result->user_stack_size); + load_result->user_stack_size = proc_limitgetcur(p, RLIMIT_STACK, TRUE); + prot_size = vm_map_trunc_page(size - load_result->user_stack_size, vm_map_page_mask(map)); } else { prot_size = PAGE_SIZE; } @@ -5881,10 +6339,10 @@ load_init_program_at_path(proc_t p, user_addr_t scratch_addr, const char* path) static const char * init_programs[] = { #if DEBUG - "/usr/local/sbin/launchd.debug", + "/usr/appleinternal/sbin/launchd.debug", #endif #if DEVELOPMENT || DEBUG - "/usr/local/sbin/launchd.development", + "/usr/appleinternal/sbin/launchd.development", #endif "/sbin/launchd", }; @@ -5913,9 +6371,9 @@ static const char * init_programs[] = { * * DEBUG DEVELOPMENT RELEASE PATH * ---------------------------------------------------------------------------------- - * 1 1 NA /usr/local/sbin/launchd.$LAUNCHDSUFFIX - * 2 NA NA /usr/local/sbin/launchd.debug - * 3 2 NA /usr/local/sbin/launchd.development + * 1 1 NA /usr/appleinternal/sbin/launchd.$LAUNCHDSUFFIX + * 2 NA NA /usr/appleinternal/sbin/launchd.debug + * 3 2 NA /usr/appleinternal/sbin/launchd.development * 4 3 1 /sbin/launchd */ void @@ -5932,7 +6390,18 @@ load_init_program(proc_t p) (void) memorystatus_init_at_boot_snapshot(); #endif /* CONFIG_MEMORYSTATUS */ +#if __has_feature(ptrauth_calls) + PE_parse_boot_argn("vm_shared_region_per_team_id", &vm_shared_region_per_team_id, sizeof(vm_shared_region_per_team_id)); + PE_parse_boot_argn("vm_shared_region_by_entitlement", &vm_shared_region_by_entitlement, sizeof(vm_shared_region_by_entitlement)); + PE_parse_boot_argn("vm_shared_region_reslide_aslr", &vm_shared_region_reslide_aslr, sizeof(vm_shared_region_reslide_aslr)); + PE_parse_boot_argn("vm_shared_region_reslide_restrict", &vm_shared_region_reslide_restrict, sizeof(vm_shared_region_reslide_restrict)); +#endif /* __has_feature(ptrauth_calls) */ + #if DEBUG || DEVELOPMENT +#if XNU_TARGET_OS_OSX + PE_parse_boot_argn("unentitled_ios_sim_launch", &unentitled_ios_sim_launch, sizeof(unentitled_ios_sim_launch)); +#endif /* XNU_TARGET_OS_OSX */ + /* Check for boot-arg suffix first */ char launchd_suffix[64]; if (PE_parse_boot_argn("launchdsuffix", launchd_suffix, sizeof(launchd_suffix))) { @@ -5949,14 +6418,14 @@ load_init_program(proc_t p) panic("Process 1 exec of launchd.release failed, errno %d", error); } else { - strlcpy(launchd_path, "/usr/local/sbin/launchd.", sizeof(launchd_path)); + strlcpy(launchd_path, "/usr/appleinternal/sbin/launchd.", sizeof(launchd_path)); strlcat(launchd_path, launchd_suffix, sizeof(launchd_path)); printf("load_init_program: attempting to load %s\n", launchd_path); error = load_init_program_at_path(p, (user_addr_t)scratch_addr, launchd_path); if (!error) { return; - } else { + } else if (error != ENOENT) { printf("load_init_program: failed loading %s: errno %d\n", launchd_path, error); } } @@ -5969,7 +6438,7 @@ load_init_program(proc_t p) error = load_init_program_at_path(p, (user_addr_t)scratch_addr, init_programs[i]); if (!error) { return; - } else { + } else if (error != ENOENT) { printf("load_init_program: failed loading %s: errno %d\n", init_programs[i], error); } } @@ -6321,6 +6790,12 @@ check_for_signature(proc_t p, struct image_params *imgp) if (p->p_csflags & (CS_HARD | CS_KILL)) { vm_map_switch_protect(get_task_map(p->task), TRUE); } + /* set the cs_enforced flags in the map */ + if (p->p_csflags & CS_ENFORCEMENT) { + vm_map_cs_enforcement_set(get_task_map(p->task), TRUE); + } else { + vm_map_cs_enforcement_set(get_task_map(p->task), FALSE); + } /* * image activation may be failed due to policy @@ -6343,6 +6818,47 @@ check_for_signature(proc_t p, struct image_params *imgp) goto done; } +#if XNU_TARGET_OS_OSX + /* Check for platform passed in spawn attr if iOS binary is being spawned */ + if (proc_platform(p) == PLATFORM_IOS) { + struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa; + if (psa == NULL || psa->psa_platform == 0) { + boolean_t no_sandbox_entitled = FALSE; +#if DEBUG || DEVELOPMENT + /* + * Allow iOS binaries to spawn on internal systems + * if no-sandbox entitlement is present of unentitled_ios_sim_launch + * boot-arg set to true + */ + if (unentitled_ios_sim_launch) { + no_sandbox_entitled = TRUE; + } else { + no_sandbox_entitled = IOVnodeHasEntitlement(imgp->ip_vp, + (int64_t)imgp->ip_arch_offset, "com.apple.private.security.no-sandbox"); + } +#endif /* DEBUG || DEVELOPMENT */ + if (!no_sandbox_entitled) { + signature_failure_reason = os_reason_create(OS_REASON_EXEC, + EXEC_EXIT_REASON_WRONG_PLATFORM); + error = EACCES; + goto done; + } + printf("Allowing spawn of iOS binary %s since it has " + "com.apple.private.security.no-sandbox entitlement or unentitled_ios_sim_launch " + "boot-arg set to true\n", p->p_name); + } else if (psa->psa_platform != PLATFORM_IOS) { + /* Simulator binary spawned with wrong platform */ + signature_failure_reason = os_reason_create(OS_REASON_EXEC, + EXEC_EXIT_REASON_WRONG_PLATFORM); + error = EACCES; + goto done; + } else { + printf("Allowing spawn of iOS binary %s since correct platform was passed in spawn\n", + p->p_name); + } + } +#endif /* XNU_TARGET_OS_OSX */ + /* If the code signature came through the image activation path, we skip the * taskgated / externally attached path. */ if (imgp->ip_csflags & CS_SIGNED) { @@ -6398,7 +6914,7 @@ check_for_signature(proc_t p, struct image_params *imgp) /* Only do this if exec_resettextvp() did not fail */ if (p->p_textvp != NULLVP) { - csb = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff); + csb = ubc_cs_blob_get(p->p_textvp, -1, -1, p->p_textoff); if (csb != NULL) { /* As the enforcement we can do here is very limited, we only allow things that @@ -6408,7 +6924,7 @@ check_for_signature(proc_t p, struct image_params *imgp) if ( /* Revalidate the blob if necessary through bumped generation count. */ (ubc_cs_generation_check(p->p_textvp) == 0 || - ubc_cs_blob_revalidate(p->p_textvp, csb, imgp, 0) == 0) && + ubc_cs_blob_revalidate(p->p_textvp, csb, imgp, 0, proc_platform(p)) == 0) && /* Only CS_ADHOC, no CS_KILL, CS_HARD etc. */ (csb->csb_flags & CS_ALLOWED_MACHO) == CS_ADHOC && /* If it has a CMS blob, it's not adhoc. The CS_ADHOC flag can lie. */ @@ -6485,21 +7001,31 @@ done: * in the process' page tables, we prefault some pages if * possible. Errors are non-fatal. */ -static void +#ifndef PREVENT_CALLER_STACK_USE +#define PREVENT_CALLER_STACK_USE __attribute__((noinline)) +#endif +static void PREVENT_CALLER_STACK_USE exec_prefault_data(proc_t p __unused, struct image_params *imgp, load_result_t *load_result) { int ret; size_t expected_all_image_infos_size; + kern_return_t kr; /* * Prefault executable or dyld entry point. */ - vm_fault(current_map(), + if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) { + DEBUG4K_LOAD("entry_point 0x%llx\n", (uint64_t)load_result->entry_point); + } + kr = vm_fault(current_map(), vm_map_trunc_page(load_result->entry_point, vm_map_page_mask(current_map())), VM_PROT_READ | VM_PROT_EXECUTE, FALSE, VM_KERN_MEMORY_NONE, THREAD_UNINT, NULL, 0); + if (kr != KERN_SUCCESS) { + DEBUG4K_ERROR("map %p va 0x%llx -> 0x%x\n", current_map(), (uint64_t)vm_map_trunc_page(load_result->entry_point, vm_map_page_mask(current_map())), kr); + } if (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) { expected_all_image_infos_size = sizeof(struct user64_dyld_all_image_infos); @@ -6520,23 +7046,35 @@ exec_prefault_data(proc_t p __unused, struct image_params *imgp, load_result_t * * Pre-fault to avoid copyin() going through the trap handler * and recovery path. */ - vm_fault(current_map(), + if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) { + DEBUG4K_LOAD("all_image_info_addr 0x%llx\n", load_result->all_image_info_addr); + } + kr = vm_fault(current_map(), vm_map_trunc_page(load_result->all_image_info_addr, vm_map_page_mask(current_map())), VM_PROT_READ | VM_PROT_WRITE, FALSE, VM_KERN_MEMORY_NONE, THREAD_UNINT, NULL, 0); + if (kr != KERN_SUCCESS) { +// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(load_result->all_image_info_addr, vm_map_page_mask(current_map())), kr); + } if ((load_result->all_image_info_addr & PAGE_MASK) + expected_all_image_infos_size > PAGE_SIZE) { /* all_image_infos straddles a page */ - vm_fault(current_map(), + kr = vm_fault(current_map(), vm_map_trunc_page(load_result->all_image_info_addr + expected_all_image_infos_size - 1, vm_map_page_mask(current_map())), VM_PROT_READ | VM_PROT_WRITE, FALSE, VM_KERN_MEMORY_NONE, THREAD_UNINT, NULL, 0); + if (kr != KERN_SUCCESS) { +// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(load_result->all_image_info_addr + expected_all_image_infos_size -1, vm_map_page_mask(current_map())), kr); + } } - ret = copyin(load_result->all_image_info_addr, + if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) { + DEBUG4K_LOAD("copyin(0x%llx, 0x%lx)\n", load_result->all_image_info_addr, expected_all_image_infos_size); + } + ret = copyin((user_addr_t)load_result->all_image_info_addr, &all_image_infos, expected_all_image_infos_size); if (ret == 0 && all_image_infos.infos32.version >= DYLD_ALL_IMAGE_INFOS_ADDRESS_MINIMUM_VERSION) { @@ -6547,10 +7085,10 @@ exec_prefault_data(proc_t p __unused, struct image_params *imgp, load_result_t * user_addr_t dyld_slide_amount; if (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) { - notification_address = all_image_infos.infos64.notification; - dyld_image_address = all_image_infos.infos64.dyldImageLoadAddress; - dyld_version_address = all_image_infos.infos64.dyldVersion; - dyld_all_image_infos_address = all_image_infos.infos64.dyldAllImageInfosAddress; + notification_address = (user_addr_t)all_image_infos.infos64.notification; + dyld_image_address = (user_addr_t)all_image_infos.infos64.dyldImageLoadAddress; + dyld_version_address = (user_addr_t)all_image_infos.infos64.dyldVersion; + dyld_all_image_infos_address = (user_addr_t)all_image_infos.infos64.dyldAllImageInfosAddress; } else { notification_address = all_image_infos.infos32.notification; dyld_image_address = all_image_infos.infos32.dyldImageLoadAddress; @@ -6571,7 +7109,7 @@ exec_prefault_data(proc_t p __unused, struct image_params *imgp, load_result_t * * "dyld_slide_amount" will be 0, if we were to consult it again. */ - dyld_slide_amount = load_result->all_image_info_addr - dyld_all_image_infos_address; + dyld_slide_amount = (user_addr_t)load_result->all_image_info_addr - dyld_all_image_infos_address; #if 0 kprintf("exec_prefault: 0x%016llx 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", @@ -6583,30 +7121,54 @@ exec_prefault_data(proc_t p __unused, struct image_params *imgp, load_result_t * (uint64_t)dyld_all_image_infos_address); #endif - vm_fault(current_map(), + if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) { + DEBUG4K_LOAD("notification_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)notification_address, (uint64_t)dyld_slide_amount); + } + kr = vm_fault(current_map(), vm_map_trunc_page(notification_address + dyld_slide_amount, vm_map_page_mask(current_map())), VM_PROT_READ | VM_PROT_EXECUTE, FALSE, VM_KERN_MEMORY_NONE, THREAD_UNINT, NULL, 0); - vm_fault(current_map(), + if (kr != KERN_SUCCESS) { +// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(notification_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr); + } + if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) { + DEBUG4K_LOAD("dyld_image_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)dyld_image_address, (uint64_t)dyld_slide_amount); + } + kr = vm_fault(current_map(), vm_map_trunc_page(dyld_image_address + dyld_slide_amount, vm_map_page_mask(current_map())), VM_PROT_READ | VM_PROT_EXECUTE, FALSE, VM_KERN_MEMORY_NONE, THREAD_UNINT, NULL, 0); - vm_fault(current_map(), + if (kr != KERN_SUCCESS) { +// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(dyld_image_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr); + } + if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) { + DEBUG4K_LOAD("dyld_version_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)dyld_version_address, (uint64_t)dyld_slide_amount); + } + kr = vm_fault(current_map(), vm_map_trunc_page(dyld_version_address + dyld_slide_amount, vm_map_page_mask(current_map())), VM_PROT_READ, FALSE, VM_KERN_MEMORY_NONE, THREAD_UNINT, NULL, 0); - vm_fault(current_map(), + if (kr != KERN_SUCCESS) { +// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(dyld_version_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr); + } + if (vm_map_page_shift(current_map()) < (int)PAGE_SHIFT) { + DEBUG4K_LOAD("dyld_all_image_infos_address 0x%llx dyld_slide_amount 0x%llx\n", (uint64_t)dyld_version_address, (uint64_t)dyld_slide_amount); + } + kr = vm_fault(current_map(), vm_map_trunc_page(dyld_all_image_infos_address + dyld_slide_amount, vm_map_page_mask(current_map())), VM_PROT_READ | VM_PROT_WRITE, FALSE, VM_KERN_MEMORY_NONE, THREAD_UNINT, NULL, 0); + if (kr != KERN_SUCCESS) { +// printf("%s:%d map %p va 0x%llx -> 0x%x\n", __FUNCTION__, __LINE__, current_map(), vm_map_trunc_page(dyld_all_image_infos_address + dyld_slide_amount, vm_map_page_mask(current_map())), kr); + } } } } diff --git a/bsd/kern/kern_exit.c b/bsd/kern/kern_exit.c index 7349f618f..c38e6a898 100644 --- a/bsd/kern/kern_exit.c +++ b/bsd/kern/kern_exit.c @@ -178,6 +178,9 @@ extern uint64_t get_task_phys_footprint_limit(task_t); int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size); extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task); +ZONE_DECLARE(zombie_zone, "zombie", + sizeof(struct rusage_superset), ZC_NOENCRYPT); + /* * Things which should have prototypes in headers, but don't @@ -214,7 +217,7 @@ siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out) out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr); /* following cast works for sival_int because of padding */ out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr); - out->si_band = in->si_band; /* range reduction */ + out->si_band = (user32_long_t)in->si_band; /* range reduction */ } void @@ -339,6 +342,14 @@ populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset * uint64_t ledger_network_nonvolatile; uint64_t ledger_network_nonvolatile_compressed; uint64_t ledger_wired_mem; + uint64_t ledger_tagged_footprint; + uint64_t ledger_tagged_footprint_compressed; + uint64_t ledger_media_footprint; + uint64_t ledger_media_footprint_compressed; + uint64_t ledger_graphics_footprint; + uint64_t ledger_graphics_footprint_compressed; + uint64_t ledger_neural_footprint; + uint64_t ledger_neural_footprint_compressed; void *crash_info_ptr = task_get_corpseinfo(corpse_task); @@ -404,16 +415,13 @@ populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset * } if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_PATH, MAXPATHLEN, &uaddr)) { - char *buf = (char *) kalloc(MAXPATHLEN); - if (buf != NULL) { - bzero(buf, MAXPATHLEN); - proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval); - kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN); - kfree(buf, MAXPATHLEN); - } + char *buf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO); + proc_pidpathinfo_internal(p, 0, buf, MAXPATHLEN, &retval); + kcdata_memcpy(crash_info_ptr, uaddr, buf, MAXPATHLEN); + zfree(ZV_NAMEI, buf); } - pflags = p->p_flag & (P_LP64 | P_SUGID); + pflags = p->p_flag & (P_LP64 | P_SUGID | P_TRANSLATED); if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_PROC_FLAGS, sizeof(pflags), &uaddr)) { kcdata_memcpy(crash_info_ptr, uaddr, &pflags, sizeof(pflags)); } @@ -543,6 +551,50 @@ populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset * kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memlimit_increase, sizeof(p->p_memlimit_increase)); } + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT, sizeof(ledger_tagged_footprint), &uaddr)) { + ledger_tagged_footprint = get_task_tagged_footprint(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint, sizeof(ledger_tagged_footprint)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED, sizeof(ledger_tagged_footprint_compressed), &uaddr)) { + ledger_tagged_footprint_compressed = get_task_tagged_footprint_compressed(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_tagged_footprint_compressed, sizeof(ledger_tagged_footprint_compressed)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT, sizeof(ledger_media_footprint), &uaddr)) { + ledger_media_footprint = get_task_media_footprint(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint, sizeof(ledger_media_footprint)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED, sizeof(ledger_media_footprint_compressed), &uaddr)) { + ledger_media_footprint_compressed = get_task_media_footprint_compressed(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_media_footprint_compressed, sizeof(ledger_media_footprint_compressed)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT, sizeof(ledger_graphics_footprint), &uaddr)) { + ledger_graphics_footprint = get_task_graphics_footprint(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint, sizeof(ledger_graphics_footprint)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED, sizeof(ledger_graphics_footprint_compressed), &uaddr)) { + ledger_graphics_footprint_compressed = get_task_graphics_footprint_compressed(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_graphics_footprint_compressed, sizeof(ledger_graphics_footprint_compressed)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT, sizeof(ledger_neural_footprint), &uaddr)) { + ledger_neural_footprint = get_task_neural_footprint(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint, sizeof(ledger_neural_footprint)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED, sizeof(ledger_neural_footprint_compressed), &uaddr)) { + ledger_neural_footprint_compressed = get_task_neural_footprint_compressed(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_neural_footprint_compressed, sizeof(ledger_neural_footprint_compressed)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY, sizeof(p->p_memstat_effectivepriority), &uaddr)) { + kcdata_memcpy(crash_info_ptr, uaddr, &p->p_memstat_effectivepriority, sizeof(p->p_memstat_effectivepriority)); + } + if (p->p_exit_reason != OS_REASON_NULL && reason == OS_REASON_NULL) { reason = p->p_exit_reason; } @@ -558,7 +610,7 @@ populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset * } if (reason->osr_kcd_buf != 0) { - uint32_t reason_buf_size = kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor); + uint32_t reason_buf_size = (uint32_t)kcdata_memory_get_used_bytes(&reason->osr_kcd_descriptor); assert(reason_buf_size != 0); if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, KCDATA_TYPE_NESTED_KCDATA, reason_buf_size, &uaddr)) { @@ -785,7 +837,7 @@ void exit(proc_t p, struct exit_args *uap, int *retval) { p->p_xhighbits = ((uint32_t)(uap->rval) & 0xFF000000) >> 24; - exit1(p, W_EXITCODE(uap->rval, 0), retval); + exit1(p, W_EXITCODE((uint32_t)uap->rval, 0), retval); thread_exception_return(); /* NOTREACHED */ @@ -1048,29 +1100,26 @@ skipcheck: * * If the zombie allocation fails, just punt the stats. */ - MALLOC_ZONE(rup, struct rusage_superset *, - sizeof(*rup), M_ZOMBIE, M_WAITOK); - if (rup != NULL) { - gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT); - rup->ri.ri_phys_footprint = 0; - rup->ri.ri_proc_exit_abstime = mach_absolute_time(); + rup = zalloc(zombie_zone); + gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT); + rup->ri.ri_phys_footprint = 0; + rup->ri.ri_proc_exit_abstime = mach_absolute_time(); + /* + * Make the rusage_info visible to external observers + * only after it has been completely filled in. + */ + p->p_ru = rup; - /* - * Make the rusage_info visible to external observers - * only after it has been completely filled in. - */ - p->p_ru = rup; - } if (create_corpse) { int est_knotes = 0, num_knotes = 0; uint64_t *buffer = NULL; - int buf_size = 0; + uint32_t buf_size = 0; /* Get all the udata pointers from kqueue */ est_knotes = kevent_proc_copy_uptrs(p, NULL, 0); if (est_knotes > 0) { - buf_size = (est_knotes + 32) * sizeof(uint64_t); - buffer = (uint64_t *) kalloc(buf_size); + buf_size = (uint32_t)((est_knotes + 32) * sizeof(uint64_t)); + buffer = kheap_alloc(KHEAP_TEMP, buf_size, Z_WAITOK); num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size); if (num_knotes > est_knotes + 32) { num_knotes = est_knotes + 32; @@ -1082,7 +1131,7 @@ skipcheck: populate_corpse_crashinfo(p, p->task, rup, code, subcode, buffer, num_knotes, NULL); if (buffer != NULL) { - kfree(buffer, buf_size); + kheap_free(KHEAP_TEMP, buffer, buf_size); } } /* @@ -1258,17 +1307,6 @@ proc_exit(proc_t p) if ((tp != TTY_NULL) && (tp->t_session == sessp)) { session_unlock(sessp); - /* - * We're going to SIGHUP the foreground process - * group. It can't change from this point on - * until the revoke is complete. - * The process group changes under both the tty - * lock and proc_list_lock but we need only one - */ - tty_lock(tp); - ttysetpgrphup(tp); - tty_unlock(tp); - tty_pgsignal(tp, SIGHUP, 1); session_lock(sessp); @@ -1290,7 +1328,8 @@ proc_exit(proc_t p) (void) ttywait(tp); tty_unlock(tp); } - context.vc_thread = proc_thread(p); /* XXX */ + + context.vc_thread = NULL; context.vc_ucred = kauth_cred_proc_ref(p); VNOP_REVOKE(ttyvp, REVOKEALL, &context); if (cttyflag) { @@ -1308,14 +1347,6 @@ proc_exit(proc_t p) ttyvp = NULLVP; } if (tp) { - /* - * This is cleared even if not set. This is also done in - * spec_close to ensure that the flag is cleared. - */ - tty_lock(tp); - ttyclrpgrphup(tp); - tty_unlock(tp); - ttyfree(tp); } } @@ -1329,7 +1360,13 @@ proc_exit(proc_t p) fixjobc(p, pg, 0); pg_rele(pg); - p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; + /* + * Change RLIMIT_FSIZE for accounting/debugging. proc_limitsetcur_internal() will COW the current plimit + * before making changes if the current plimit is shared. The COW'ed plimit will be freed + * below by calling proc_limitdrop(). + */ + proc_limitsetcur_internal(p, RLIMIT_FSIZE, RLIM_INFINITY); + (void)acct_process(p); proc_list_lock(); @@ -1486,14 +1523,13 @@ proc_exit(proc_t p) /* * Other substructures are freed from wait(). */ - FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS); + zfree(proc_stats_zone, p->p_stats); p->p_stats = NULL; - FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS); + zfree(proc_sigacts_zone, p->p_sigacts); p->p_sigacts = NULL; - proc_limitdrop(p, 1); - p->p_limit = NULL; + proc_limitdrop(p); /* * Finish up by terminating the task @@ -1740,7 +1776,7 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi ruadd(&parent->p_stats->p_cru, &child->p_ru->ru); update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri); proc_unlock(parent); - FREE_ZONE(child->p_ru, sizeof *child->p_ru, M_ZOMBIE); + zfree(zombie_zone, child->p_ru); child->p_ru = NULL; } else { printf("Warning : lost p_ru for %s\n", child->p_comm); @@ -1765,15 +1801,6 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi os_reason_free(child->p_exit_reason); - /* - * Free up credentials. - */ - if (IS_VALID_CRED(child->p_ucred)) { - kauth_cred_unref(&child->p_ucred); - } - - /* XXXX Note NOT SAFE TO USE p_ucred from this point onwards */ - /* * Finally finished with old proc entry. * Unlink it from its process group and free it. @@ -1807,6 +1834,14 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi proc_list_unlock(); + /* + * Free up credentials. + */ + if (IS_VALID_CRED(child->p_ucred)) { + kauth_cred_t tmp_ucred = child->p_ucred; + kauth_cred_unref(&tmp_ucred); + child->p_ucred = NOCRED; + } lck_mtx_destroy(&child->p_mlock, proc_mlock_grp); lck_mtx_destroy(&child->p_ucred_mlock, proc_ucred_mlock_grp); @@ -1816,7 +1851,7 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi #endif lck_spin_destroy(&child->p_slock, proc_slock_grp); - FREE_ZONE(child, sizeof *child, M_PROC); + zfree(proc_zone, child); if ((locked == 1) && (droplock == 0)) { proc_list_lock(); } @@ -2187,14 +2222,20 @@ loop1: #endif siginfo.si_signo = SIGCHLD; siginfo.si_pid = p->p_pid; - siginfo.si_status = (WEXITSTATUS(p->p_xstat) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); - p->p_xhighbits = 0; + + /* If the child terminated abnormally due to a signal, the signum + * needs to be preserved in the exit status. + */ if (WIFSIGNALED(p->p_xstat)) { siginfo.si_code = WCOREDUMP(p->p_xstat) ? CLD_DUMPED : CLD_KILLED; + siginfo.si_status = WTERMSIG(p->p_xstat); } else { siginfo.si_code = CLD_EXITED; + siginfo.si_status = WEXITSTATUS(p->p_xstat) & 0x00FFFFFF; } + siginfo.si_status |= (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); + p->p_xhighbits = 0; if ((error = copyoutsiginfo(&siginfo, caller64, uap->infop)) != 0) { @@ -2484,9 +2525,7 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) struct session *sessp; struct rusage_superset *rup; - /* XXX Zombie allocation may fail, in which case stats get lost */ - MALLOC_ZONE(rup, struct rusage_superset *, - sizeof(*rup), M_ZOMBIE, M_WAITOK); + rup = zalloc(zombie_zone); proc_refdrain(p); @@ -2498,88 +2537,7 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) sessp = proc_session(p); if (SESS_LEADER(p, sessp)) { - if (sessp->s_ttyvp != NULLVP) { - struct vnode *ttyvp; - int ttyvid; - int cttyflag = 0; - struct vfs_context context; - struct tty *tp; - - /* - * Controlling process. - * Signal foreground pgrp, - * drain controlling terminal - * and revoke access to controlling terminal. - */ - session_lock(sessp); - tp = SESSION_TP(sessp); - if ((tp != TTY_NULL) && (tp->t_session == sessp)) { - session_unlock(sessp); - - /* - * We're going to SIGHUP the foreground process - * group. It can't change from this point on - * until the revoke is complete. - * The process group changes under both the tty - * lock and proc_list_lock but we need only one - */ - tty_lock(tp); - ttysetpgrphup(tp); - tty_unlock(tp); - - tty_pgsignal(tp, SIGHUP, 1); - - session_lock(sessp); - tp = SESSION_TP(sessp); - } - cttyflag = sessp->s_flags & S_CTTYREF; - sessp->s_flags &= ~S_CTTYREF; - ttyvp = sessp->s_ttyvp; - ttyvid = sessp->s_ttyvid; - sessp->s_ttyvp = NULL; - sessp->s_ttyvid = 0; - sessp->s_ttyp = TTY_NULL; - sessp->s_ttypgrpid = NO_PID; - session_unlock(sessp); - - if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) { - if (tp != TTY_NULL) { - tty_lock(tp); - (void) ttywait(tp); - tty_unlock(tp); - } - context.vc_thread = proc_thread(p); /* XXX */ - context.vc_ucred = kauth_cred_proc_ref(p); - VNOP_REVOKE(ttyvp, REVOKEALL, &context); - if (cttyflag) { - /* - * Release the extra usecount taken in cttyopen. - * usecount should be released after VNOP_REVOKE is called. - * This usecount was taken to ensure that - * the VNOP_REVOKE results in a close to - * the tty since cttyclose is a no-op. - */ - vnode_rele(ttyvp); - } - vnode_put(ttyvp); - kauth_cred_unref(&context.vc_ucred); - ttyvp = NULLVP; - } - if (tp) { - /* - * This is cleared even if not set. This is also done in - * spec_close to ensure that the flag is cleared. - */ - tty_lock(tp); - ttyclrpgrphup(tp); - tty_unlock(tp); - - ttyfree(tp); - } - } - session_lock(sessp); - sessp->s_leader = NULL; - session_unlock(sessp); + panic("vfork child is session leader"); } session_rele(sessp); @@ -2587,9 +2545,15 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) fixjobc(p, pg, 0); pg_rele(pg); - p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; + /* + * Change RLIMIT_FSIZE for accounting/debugging. proc_limitsetcur_internal() will COW the current plimit + * before making changes if the current plimit is shared. The COW'ed plimit will be freed + * below by calling proc_limitdrop(). + */ + proc_limitsetcur_internal(p, RLIMIT_FSIZE, RLIM_INFINITY); proc_list_lock(); + proc_childdrainstart(p); while ((q = p->p_children.lh_first) != NULL) { if (q->p_stat == SZOMB) { @@ -2755,14 +2719,16 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) /* * Other substructures are freed from wait(). */ - FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS); + zfree(proc_stats_zone, p->p_stats); p->p_stats = NULL; - FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS); + zfree(proc_sigacts_zone, p->p_sigacts); p->p_sigacts = NULL; - proc_limitdrop(p, 1); - p->p_limit = NULL; + FREE(p->p_subsystem_root_path, M_SBUF); + p->p_subsystem_root_path = NULL; + + proc_limitdrop(p); /* * Finish up by terminating the task @@ -2874,28 +2840,28 @@ munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusa bzero(a_user_rusage_p, sizeof(struct user32_rusage)); /* timeval changes size, so utime and stime need special handling */ - a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec; + a_user_rusage_p->ru_utime.tv_sec = (user32_time_t)a_rusage_p->ru_utime.tv_sec; a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec; - a_user_rusage_p->ru_stime.tv_sec = a_rusage_p->ru_stime.tv_sec; + a_user_rusage_p->ru_stime.tv_sec = (user32_time_t)a_rusage_p->ru_stime.tv_sec; a_user_rusage_p->ru_stime.tv_usec = a_rusage_p->ru_stime.tv_usec; /* * everything else can be a direct assign. We currently ignore * the loss of precision */ - a_user_rusage_p->ru_maxrss = a_rusage_p->ru_maxrss; - a_user_rusage_p->ru_ixrss = a_rusage_p->ru_ixrss; - a_user_rusage_p->ru_idrss = a_rusage_p->ru_idrss; - a_user_rusage_p->ru_isrss = a_rusage_p->ru_isrss; - a_user_rusage_p->ru_minflt = a_rusage_p->ru_minflt; - a_user_rusage_p->ru_majflt = a_rusage_p->ru_majflt; - a_user_rusage_p->ru_nswap = a_rusage_p->ru_nswap; - a_user_rusage_p->ru_inblock = a_rusage_p->ru_inblock; - a_user_rusage_p->ru_oublock = a_rusage_p->ru_oublock; - a_user_rusage_p->ru_msgsnd = a_rusage_p->ru_msgsnd; - a_user_rusage_p->ru_msgrcv = a_rusage_p->ru_msgrcv; - a_user_rusage_p->ru_nsignals = a_rusage_p->ru_nsignals; - a_user_rusage_p->ru_nvcsw = a_rusage_p->ru_nvcsw; - a_user_rusage_p->ru_nivcsw = a_rusage_p->ru_nivcsw; + a_user_rusage_p->ru_maxrss = (user32_long_t)a_rusage_p->ru_maxrss; + a_user_rusage_p->ru_ixrss = (user32_long_t)a_rusage_p->ru_ixrss; + a_user_rusage_p->ru_idrss = (user32_long_t)a_rusage_p->ru_idrss; + a_user_rusage_p->ru_isrss = (user32_long_t)a_rusage_p->ru_isrss; + a_user_rusage_p->ru_minflt = (user32_long_t)a_rusage_p->ru_minflt; + a_user_rusage_p->ru_majflt = (user32_long_t)a_rusage_p->ru_majflt; + a_user_rusage_p->ru_nswap = (user32_long_t)a_rusage_p->ru_nswap; + a_user_rusage_p->ru_inblock = (user32_long_t)a_rusage_p->ru_inblock; + a_user_rusage_p->ru_oublock = (user32_long_t)a_rusage_p->ru_oublock; + a_user_rusage_p->ru_msgsnd = (user32_long_t)a_rusage_p->ru_msgsnd; + a_user_rusage_p->ru_msgrcv = (user32_long_t)a_rusage_p->ru_msgrcv; + a_user_rusage_p->ru_nsignals = (user32_long_t)a_rusage_p->ru_nsignals; + a_user_rusage_p->ru_nvcsw = (user32_long_t)a_rusage_p->ru_nvcsw; + a_user_rusage_p->ru_nivcsw = (user32_long_t)a_rusage_p->ru_nivcsw; } void @@ -2923,14 +2889,11 @@ exit_with_pac_exception(proc_t p, exception_type_t exception, mach_exception_cod struct uthread *ut = get_bsdthread_info(self); os_reason_t exception_reason = os_reason_create(OS_REASON_PAC_EXCEPTION, (uint64_t)code); - if (exception_reason == OS_REASON_NULL) { - printf("exit_with_pac_exception: failed to allocate exit reason\n"); - } else { - exception_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT; - ut->uu_exception = exception; - ut->uu_code = code; - ut->uu_subcode = subcode; - } + assert(exception_reason != OS_REASON_NULL); + exception_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT; + ut->uu_exception = exception; + ut->uu_code = code; + ut->uu_subcode = subcode; return exit_with_reason(p, W_EXITCODE(0, SIGKILL), (int *)NULL, TRUE, FALSE, 0, exception_reason); diff --git a/bsd/kern/kern_fork.c b/bsd/kern/kern_fork.c index e8de4d1c2..b9475aed1 100644 --- a/bsd/kern/kern_fork.c +++ b/bsd/kern/kern_fork.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -126,8 +126,6 @@ static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL; #include -#include - #if CONFIG_MACF #include #include @@ -169,6 +167,20 @@ thread_t fork_create_child(task_t parent_task, void proc_vfork_begin(proc_t parent_proc); void proc_vfork_end(proc_t parent_proc); +static LCK_GRP_DECLARE(rethrottle_lock_grp, "rethrottle"); +static ZONE_DECLARE(uthread_zone, "uthreads", + sizeof(struct uthread), ZC_ZFREE_CLEARMEM); + +SECURITY_READ_ONLY_LATE(zone_t) proc_zone; +ZONE_INIT(&proc_zone, "proc", sizeof(struct proc), ZC_ZFREE_CLEARMEM, + ZONE_ID_PROC, NULL); + +ZONE_DECLARE(proc_stats_zone, "pstats", + sizeof(struct pstats), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM); + +ZONE_DECLARE(proc_sigacts_zone, "sigacts", + sizeof(struct sigacts), ZC_NOENCRYPT); + #define DOFORK 0x1 /* fork() system call */ #define DOVFORK 0x2 /* vfork() system call */ @@ -382,9 +394,10 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit proc_t child_proc = NULL; /* set in switch, but compiler... */ thread_t child_thread = NULL; uid_t uid; - int count; + size_t count; int err = 0; int spawn = 0; + rlim_t rlimit_nproc_cur; /* * Although process entries are dynamically created, we still keep @@ -396,7 +409,7 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit uid = kauth_getruid(); proc_list_lock(); if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) { -#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED +#if (DEVELOPMENT || DEBUG) && !defined(XNU_TARGET_OS_OSX) /* * On the development kernel, panic so that the fact that we hit * the process limit is obvious, as this may very well wedge the @@ -417,9 +430,10 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit * (locking protection is provided by list lock held in chgproccnt) */ count = chgproccnt(uid, 1); + rlimit_nproc_cur = proc_limitgetcur(parent_proc, RLIMIT_NPROC, TRUE); if (uid != 0 && - (rlim_t)count > parent_proc->p_rlimit[RLIMIT_NPROC].rlim_cur) { -#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED + (rlim_t)count > rlimit_nproc_cur) { +#if (DEVELOPMENT || DEBUG) && !defined(XNU_TARGET_OS_OSX) /* * On the development kernel, panic so that the fact that we hit * the per user process limit is obvious. This may be less dire @@ -575,7 +589,7 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit */ spawn = 1; - /* FALLSTHROUGH */ + OS_FALLTHROUGH; case PROC_CREATE_FORK: /* @@ -1080,8 +1094,7 @@ forkproc_free(proc_t p) * need to free it. If it's a shared copy, we need to drop our * reference on it. */ - proc_limitdrop(p, 0); - p->p_limit = NULL; + proc_limitdrop(p); #if SYSV_SHM /* Need to drop references to the shared memory segment(s), if any */ @@ -1120,7 +1133,9 @@ forkproc_free(proc_t p) lck_spin_destroy(&p->p_slock, proc_slock_grp); /* Release the credential reference */ - kauth_cred_unref(&p->p_ucred); + kauth_cred_t tmp_ucred = p->p_ucred; + kauth_cred_unref(&tmp_ucred); + p->p_ucred = tmp_ucred; proc_list_lock(); /* Decrement the count of processes in the system */ @@ -1134,13 +1149,15 @@ forkproc_free(proc_t p) thread_call_free(p->p_rcall); /* Free allocated memory */ - FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SIGACTS); + zfree(proc_sigacts_zone, p->p_sigacts); p->p_sigacts = NULL; - FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_PSTATS); + zfree(proc_stats_zone, p->p_stats); p->p_stats = NULL; + FREE(p->p_subsystem_root_path, M_SBUF); + p->p_subsystem_root_path = NULL; proc_checkdeadrefs(p); - FREE_ZONE(p, sizeof *p, M_PROC); + zfree(proc_zone, p); } @@ -1168,42 +1185,18 @@ forkproc(proc_t parent_proc) int error = 0; struct session *sessp; uthread_t parent_uthread = (uthread_t)get_bsdthread_info(current_thread()); + rlim_t rlimit_cpu_cur; - MALLOC_ZONE(child_proc, proc_t, sizeof *child_proc, M_PROC, M_WAITOK); - if (child_proc == NULL) { - printf("forkproc: M_PROC zone exhausted\n"); - goto bad; - } - /* zero it out as we need to insert in hash */ - bzero(child_proc, sizeof *child_proc); - - MALLOC_ZONE(child_proc->p_stats, struct pstats *, - sizeof *child_proc->p_stats, M_PSTATS, M_WAITOK); - if (child_proc->p_stats == NULL) { - printf("forkproc: M_SUBPROC zone exhausted (p_stats)\n"); - FREE_ZONE(child_proc, sizeof *child_proc, M_PROC); - child_proc = NULL; - goto bad; - } - MALLOC_ZONE(child_proc->p_sigacts, struct sigacts *, - sizeof *child_proc->p_sigacts, M_SIGACTS, M_WAITOK); - if (child_proc->p_sigacts == NULL) { - printf("forkproc: M_SUBPROC zone exhausted (p_sigacts)\n"); - FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS); - child_proc->p_stats = NULL; - FREE_ZONE(child_proc, sizeof *child_proc, M_PROC); - child_proc = NULL; - goto bad; - } + child_proc = zalloc_flags(proc_zone, Z_WAITOK | Z_ZERO); + child_proc->p_stats = zalloc_flags(proc_stats_zone, Z_WAITOK | Z_ZERO); + child_proc->p_sigacts = zalloc_flags(proc_sigacts_zone, Z_WAITOK); /* allocate a callout for use by interval timers */ child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc); if (child_proc->p_rcall == NULL) { - FREE_ZONE(child_proc->p_sigacts, sizeof *child_proc->p_sigacts, M_SIGACTS); - child_proc->p_sigacts = NULL; - FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS); - child_proc->p_stats = NULL; - FREE_ZONE(child_proc, sizeof *child_proc, M_PROC); + zfree(proc_sigacts_zone, child_proc->p_sigacts); + zfree(proc_stats_zone, child_proc->p_stats); + zfree(proc_zone, child_proc); child_proc = NULL; goto bad; } @@ -1282,17 +1275,33 @@ retry: __nochk_bcopy(&parent_proc->p_startcopy, &child_proc->p_startcopy, (unsigned) ((caddr_t)&child_proc->p_endcopy - (caddr_t)&child_proc->p_startcopy)); +#if defined(HAS_APPLE_PAC) + /* + * The p_textvp and p_pgrp pointers are address-diversified by PAC, so we must + * resign them here for the new proc + */ + if (parent_proc->p_textvp) { + child_proc->p_textvp = parent_proc->p_textvp; + } + + if (parent_proc->p_pgrp) { + child_proc->p_pgrp = parent_proc->p_pgrp; + } +#endif /* defined(HAS_APPLE_PAC) */ + + child_proc->p_sessionid = parent_proc->p_sessionid; + /* * Some flags are inherited from the parent. * Duplicate sub-structures as needed. * Increase reference counts on shared objects. * The p_stats and p_sigacts substructs are set in vm_fork. */ -#if !CONFIG_EMBEDDED - child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID)); -#else /* !CONFIG_EMBEDDED */ - child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_SUGID)); -#endif /* !CONFIG_EMBEDDED */ +#if CONFIG_DELAY_IDLE_SLEEP + child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_DISABLE_ASLR | P_DELAYIDLESLEEP | P_SUGID | P_AFFINITY)); +#else /* CONFIG_DELAY_IDLE_SLEEP */ + child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_DISABLE_ASLR | P_SUGID)); +#endif /* CONFIG_DELAY_IDLE_SLEEP */ child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_VALID_MASK)); @@ -1348,19 +1357,19 @@ retry: (void)shmfork(parent_proc, child_proc); } #endif + /* - * inherit the limit structure to child + * Child inherits the parent's plimit */ proc_limitfork(parent_proc, child_proc); - if (child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { - uint64_t rlim_cur = child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur; - child_proc->p_rlim_cpu.tv_sec = (rlim_cur > __INT_MAX__) ? __INT_MAX__ : rlim_cur; + rlimit_cpu_cur = proc_limitgetcur(child_proc, RLIMIT_CPU, TRUE); + if (rlimit_cpu_cur != RLIM_INFINITY) { + child_proc->p_rlim_cpu.tv_sec = (rlimit_cpu_cur > __INT_MAX__) ? __INT_MAX__ : rlimit_cpu_cur; } /* Intialize new process stats, including start time */ /* non-zeroed portion contains garbage AFAICT */ - bzero(child_proc->p_stats, sizeof(*child_proc->p_stats)); microtime_with_abstime(&child_proc->p_start, &child_proc->p_stats->ps_start); if (parent_proc->p_sigacts != NULL) { @@ -1451,6 +1460,12 @@ retry: child_proc->p_memstat_idledeadline = 0; #endif /* CONFIG_MEMORYSTATUS */ + if (parent_proc->p_subsystem_root_path) { + size_t parent_length = strlen(parent_proc->p_subsystem_root_path) + 1; + MALLOC(child_proc->p_subsystem_root_path, char *, parent_length, M_SBUF, M_WAITOK | M_ZERO); + memcpy(child_proc->p_subsystem_root_path, parent_proc->p_subsystem_root_path, parent_length); + } + bad: return child_proc; } @@ -1504,29 +1519,6 @@ proc_ucred_unlock(proc_t p) lck_mtx_unlock(&p->p_ucred_mlock); } -#include - -struct zone *uthread_zone = NULL; - -static lck_grp_t *rethrottle_lock_grp; -static lck_attr_t *rethrottle_lock_attr; -static lck_grp_attr_t *rethrottle_lock_grp_attr; - -static void -uthread_zone_init(void) -{ - assert(uthread_zone == NULL); - - rethrottle_lock_grp_attr = lck_grp_attr_alloc_init(); - rethrottle_lock_grp = lck_grp_alloc_init("rethrottle", rethrottle_lock_grp_attr); - rethrottle_lock_attr = lck_attr_alloc_init(); - - uthread_zone = zinit(sizeof(struct uthread), - thread_max * sizeof(struct uthread), - THREAD_CHUNK * sizeof(struct uthread), - "uthreads"); -} - void * uthread_alloc(task_t task, thread_t thread, int noinherit) { @@ -1535,19 +1527,14 @@ uthread_alloc(task_t task, thread_t thread, int noinherit) uthread_t uth_parent; void *ut; - if (uthread_zone == NULL) { - uthread_zone_init(); - } - - ut = (void *)zalloc(uthread_zone); - bzero(ut, sizeof(struct uthread)); + ut = zalloc_flags(uthread_zone, Z_WAITOK | Z_ZERO); p = (proc_t) get_bsdtask_info(task); uth = (uthread_t)ut; uth->uu_thread = thread; - lck_spin_init(&uth->uu_rethrottle_lock, rethrottle_lock_grp, - rethrottle_lock_attr); + lck_spin_init(&uth->uu_rethrottle_lock, &rethrottle_lock_grp, + LCK_ATTR_NULL); /* * Thread inherits credential from the creating thread, if both @@ -1759,7 +1746,7 @@ uthread_zone_free(void *uthread) uth->t_tombstone = NULL; } - lck_spin_destroy(&uth->uu_rethrottle_lock, rethrottle_lock_grp); + lck_spin_destroy(&uth->uu_rethrottle_lock, &rethrottle_lock_grp); uthread_cleanup_name(uthread); /* and free the uthread itself */ diff --git a/bsd/kern/kern_guarded.c b/bsd/kern/kern_guarded.c index c78c64673..5e10308d8 100644 --- a/bsd/kern/kern_guarded.c +++ b/bsd/kern/kern_guarded.c @@ -57,12 +57,11 @@ #endif -#define f_flag f_fglob->fg_flag -#define f_type f_fglob->fg_ops->fo_type +#define f_flag fp_glob->fg_flag extern int dofilewrite(vfs_context_t ctx, struct fileproc *fp, user_addr_t bufp, user_size_t nbyte, off_t offset, int flags, user_ssize_t *retval ); -extern int wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval); +extern int do_uiowrite(struct proc *p, struct fileproc *fp, uio_t uio, int flags, user_ssize_t *retval); /* * Experimental guarded file descriptor support. @@ -77,7 +76,7 @@ kern_return_t task_violated_guard(mach_exception_code_t, mach_exception_subcode_ * guarded_fileproc structs which implement guarded fds. The latter * struct (below) embeds the former. * - * The two types should be distinguished by the "type" portion of f_flags. + * The two types should be distinguished by the "type" portion of fp_flags. * There's also a magic number to help catch misuse and bugs. * * This is a bit unpleasant, but results from the desire to allow @@ -104,10 +103,6 @@ struct gfp_crarg { u_int gca_attrs; }; -#ifdef OS_REFCNT_DEBUG -extern struct os_refgrp f_iocount_refgrp; -#endif - static struct fileproc * guarded_fileproc_alloc_init(void *crarg) { @@ -121,8 +116,8 @@ guarded_fileproc_alloc_init(void *crarg) bzero(gfp, sizeof(*gfp)); struct fileproc *fp = &gfp->gf_fileproc; - os_ref_init(&fp->f_iocount, &f_iocount_refgrp); - fp->f_flags = FTYPE_GUARDED; + os_ref_init(&fp->fp_iocount, &f_refgrp); + fp->fp_flags = FTYPE_GUARDED; gfp->gf_magic = GUARDED_FILEPROC_MAGIC; gfp->gf_guard = aarg->gca_guard; @@ -138,7 +133,7 @@ guarded_fileproc_free(struct fileproc *fp) if (FILEPROC_TYPE(fp) != FTYPE_GUARDED || GUARDED_FILEPROC_MAGIC != gfp->gf_magic) { - panic("%s: corrupt fp %p flags %x", __func__, fp, fp->f_flags); + panic("%s: corrupt fp %p flags %x", __func__, fp, fp->fp_flags); } kfree(gfp, sizeof(*gfp)); @@ -192,7 +187,7 @@ fp_isguarded(struct fileproc *fp, u_int attrs) if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) { panic("%s: corrupt gfp %p flags %x", - __func__, gfp, fp->f_flags); + __func__, gfp, fp->fp_flags); } return (attrs & gfp->gf_attrs) == attrs; } @@ -205,7 +200,7 @@ int fp_guard_exception(proc_t p, int fd, struct fileproc *fp, u_int flavor) { if (FILEPROC_TYPE(fp) != FTYPE_GUARDED) { - panic("%s corrupt fp %p flags %x", __func__, fp, fp->f_flags); + panic("%s corrupt fp %p flags %x", __func__, fp, fp->fp_flags); } struct guarded_fileproc *gfp = FP_TO_GFP(fp); @@ -447,9 +442,8 @@ guarded_close_np(proc_t p, struct guarded_close_np_args *uap, proc_fdunlock(p); return error; } - error = close_internal_locked(p, fd, GFP_TO_FP(gfp), 0); - proc_fdunlock(p); - return error; + fp_drop(p, fd, GFP_TO_FP(gfp), 1); + return fp_close_and_unlock(p, fd, GFP_TO_FP(gfp), 0); } /* @@ -589,7 +583,7 @@ restart: if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) { panic("%s: corrupt gfp %p flags %x", - __func__, gfp, fp->f_flags); + __func__, gfp, fp->fp_flags); } if (oldg == gfp->gf_guard && @@ -619,7 +613,7 @@ restart: /* * Add a guard to a previously unguarded descriptor */ - switch (FILEGLOB_DTYPE(fp->f_fglob)) { + switch (FILEGLOB_DTYPE(fp->fp_glob)) { case DTYPE_VNODE: case DTYPE_PIPE: case DTYPE_SOCKET: @@ -653,7 +647,7 @@ restart: FDFLAGS_SET(p, fd, UF_EXCLOSE); (void) fp_drop(p, fd, nfp, 1); break; - case EKEEPLOOKING: /* f_iocount indicates a collision */ + case EKEEPLOOKING: /* fp_iocount indicates a collision */ (void) fp_drop(p, fd, fp, 1); fileproc_free(nfp); goto restart; @@ -682,7 +676,7 @@ restart: if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) { panic("%s: corrupt gfp %p flags %x", - __func__, gfp, fp->f_flags); + __func__, gfp, fp->fp_flags); } if (oldg != gfp->gf_guard || @@ -706,7 +700,7 @@ restart: (nfdflags & FD_CLOEXEC) ? UF_EXCLOSE : 0); (void) fp_drop(p, fd, nfp, 1); break; - case EKEEPLOOKING: /* f_iocount indicates collision */ + case EKEEPLOOKING: /* fp_iocount indicates collision */ (void) fp_drop(p, fd, fp, 1); fileproc_free(nfp); goto restart; @@ -745,7 +739,6 @@ guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t guardid_t uguard; struct fileproc *fp; struct guarded_fileproc *gfp; - bool wrote_some = false; AUDIT_ARG(fd, fd); @@ -763,17 +756,14 @@ guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t error = EBADF; } else { struct vfs_context context = *(vfs_context_current()); - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; error = dofilewrite(&context, fp, uap->cbuf, uap->nbyte, (off_t)-1, 0, retval); - wrote_some = *retval > 0; - } - if (wrote_some) { - fp_drop_written(p, fd, fp); - } else { - fp_drop(p, fd, fp, 0); } + + fp_drop(p, fd, fp, 0); + return error; } @@ -792,7 +782,6 @@ guarded_pwrite_np(struct proc *p, struct guarded_pwrite_np_args *uap, user_ssize vnode_t vp = (vnode_t)0; guardid_t uguard; struct guarded_fileproc *gfp; - bool wrote_some = false; AUDIT_ARG(fd, fd); @@ -810,13 +799,13 @@ guarded_pwrite_np(struct proc *p, struct guarded_pwrite_np_args *uap, user_ssize error = EBADF; } else { struct vfs_context context = *vfs_context_current(); - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; - if (fp->f_type != DTYPE_VNODE) { + if (FILEGLOB_DTYPE(fp->fp_glob) != DTYPE_VNODE) { error = ESPIPE; goto errout; } - vp = (vnode_t)fp->f_fglob->fg_data; + vp = (vnode_t)fp->fp_glob->fg_data; if (vnode_isfifo(vp)) { error = ESPIPE; goto errout; @@ -832,14 +821,9 @@ guarded_pwrite_np(struct proc *p, struct guarded_pwrite_np_args *uap, user_ssize error = dofilewrite(&context, fp, uap->buf, uap->nbyte, uap->offset, FOF_OFFSET, retval); - wrote_some = *retval > 0; } errout: - if (wrote_some) { - fp_drop_written(p, fd, fp); - } else { - fp_drop(p, fd, fp, 0); - } + fp_drop(p, fd, fp, 0); KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_guarded_pwrite_np) | DBG_FUNC_NONE), uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0); @@ -863,7 +847,6 @@ guarded_writev_np(struct proc *p, struct guarded_writev_np_args *uap, user_ssize struct user_iovec *iovp; guardid_t uguard; struct guarded_fileproc *gfp; - bool wrote_some = false; AUDIT_ARG(fd, uap->fd); @@ -912,15 +895,10 @@ guarded_writev_np(struct proc *p, struct guarded_writev_np_args *uap, user_ssize if ((fp->f_flag & FWRITE) == 0) { error = EBADF; } else { - error = wr_uio(p, fp, auio, retval); - wrote_some = *retval > 0; + error = do_uiowrite(p, fp, auio, 0, retval); } - if (wrote_some) { - fp_drop_written(p, uap->fd, fp); - } else { - fp_drop(p, uap->fd, fp, 0); - } + fp_drop(p, uap->fd, fp, 0); ExitThisRoutine: if (auio != NULL) { uio_free(auio); @@ -1100,7 +1078,7 @@ vnguard_sysc_getguardattr(proc_t p, struct vnguard_getattr *vga) return error; } do { - struct fileglob *fg = fp->f_fglob; + struct fileglob *fg = fp->fp_glob; if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { error = EBADF; break; @@ -1162,7 +1140,7 @@ vnguard_sysc_setguard(proc_t p, const struct vnguard_set *vns) error = EBADF; break; } - struct fileglob *fg = fp->f_fglob; + struct fileglob *fg = fp->fp_glob; if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { error = EBADF; break; diff --git a/bsd/kern/kern_kpc.c b/bsd/kern/kern_kpc.c index 5e23e548e..e60018e65 100644 --- a/bsd/kern/kern_kpc.c +++ b/bsd/kern/kern_kpc.c @@ -81,8 +81,6 @@ kpc_init(void) lck_mtx_init(&sysctl_lock, sysctl_lckgrp, LCK_ATTR_NULL); kpc_arch_init(); - kpc_common_init(); - kpc_thread_init(); kpc_initted = 1; } @@ -105,7 +103,8 @@ kpc_get_bigarray(uint32_t *size_out) * Another element is needed to hold the CPU number when getting counter * values. */ - bigarray = kalloc_tag(size, VM_KERN_MEMORY_DIAG); + bigarray = kheap_alloc_tag(KHEAP_DATA_BUFFERS, size, + Z_WAITOK, VM_KERN_MEMORY_DIAG); assert(bigarray != NULL); return bigarray; } diff --git a/bsd/kern/kern_ktrace.c b/bsd/kern/kern_ktrace.c index 672bd151e..c36219ae7 100644 --- a/bsd/kern/kern_ktrace.c +++ b/bsd/kern/kern_ktrace.c @@ -330,7 +330,7 @@ ktrace_disable(enum ktrace_state state_to_match) { if (ktrace_state == state_to_match) { kernel_debug_disable(); - kperf_sampling_disable(); + kperf_disable_sampling(); } } diff --git a/bsd/kern/kern_lockf.c b/bsd/kern/kern_lockf.c index d67a8f84b..782346ed2 100644 --- a/bsd/kern/kern_lockf.c +++ b/bsd/kern/kern_lockf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -99,12 +99,11 @@ static int lockf_debug = 0; /* was 2, could be 3 ;-) */ SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &lockf_debug, 0, ""); /* - * If there is no mask bit selector, or there is one, and the selector is - * set, then output the debugging diagnostic. + * If the selector is set, then output the debugging diagnostic. */ #define LOCKF_DEBUG(mask, ...) \ do { \ - if (!(mask) || ((mask) & lockf_debug)) { \ + if ((mask) & lockf_debug) { \ printf("%s>", __FUNCTION__); \ printf(__VA_ARGS__); \ } \ @@ -298,7 +297,7 @@ lf_advlock(struct vnop_advlock_args *ap) lock->lf_head = head; lock->lf_next = (struct lockf *)0; TAILQ_INIT(&lock->lf_blkhd); - lock->lf_flags = ap->a_flags; + lock->lf_flags = (short)ap->a_flags; #if IMPORTANCE_INHERITANCE lock->lf_boosted = LF_NOT_BOOSTED; #endif diff --git a/bsd/kern/kern_malloc.c b/bsd/kern/kern_malloc.c index c9c87bc16..199f0eb7a 100644 --- a/bsd/kern/kern_malloc.c +++ b/bsd/kern/kern_malloc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -67,519 +67,25 @@ * Version 2.0. */ -#include -#include - -#include -#include - -#include -#include - -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include - -#include - #include #include -void kmeminit(void); - -/* Strings corresponding to types of memory. - * Must be in synch with the #defines is sys/malloc.h - * NOTE - the reason we pass null strings in some cases is to reduce of foot - * print as much as possible for systems where a tiny kernel is needed. - * todo - We should probably redesign this and use enums for our types and only - * include types needed for that configuration of the kernel. This can't be - * done without some kind of kpi since several types are hardwired and exported - * (for example see types M_UDFMNT, M_TEMP, etc in sys/malloc.h) - */ -const char *memname[] = { - "free", /* 0 M_FREE */ - "mbuf", /* 1 M_MBUF */ - "devbuf", /* 2 M_DEVBUF */ - "socket", /* 3 M_SOCKET */ - "pcb", /* 4 M_PCB */ - "routetbl", /* 5 M_RTABLE */ - "hosttbl", /* 6 M_HTABLE */ - "fragtbl", /* 7 M_FTABLE */ - "zombie", /* 8 M_ZOMBIE */ - "ifaddr", /* 9 M_IFADDR */ - "soopts", /* 10 M_SOOPTS */ - "soname", /* 11 M_SONAME */ - "namei", /* 12 M_NAMEI */ - "gprof", /* 13 M_GPROF */ - "ioctlops", /* 14 M_IOCTLOPS */ - "mapmem", /* 15 M_MAPMEM */ - "cred", /* 16 M_CRED */ - "pgrp", /* 17 M_PGRP */ - "session", /* 18 M_SESSION */ - "iov32", /* 19 M_IOV32 */ - "mount", /* 20 M_MOUNT */ - "fhandle", /* 21 M_FHANDLE */ -#if CONFIG_NFS - "NFS req", /* 22 M_NFSREQ */ - "NFS mount", /* 23 M_NFSMNT */ - "NFS node", /* 24 M_NFSNODE */ -#else - "", /* 22 M_NFSREQ */ - "", /* 23 M_NFSMNT */ - "", /* 24 M_NFSNODE */ -#endif - "vnodes", /* 25 M_VNODE */ - "namecache", /* 26 M_CACHE */ -#if QUOTA - "UFS quota", /* 27 M_DQUOT */ -#else - "", /* 27 M_DQUOT */ -#endif - "proc uuid policy", /* 28 M_PROC_UUID_POLICY */ -#if (SYSV_SEM || SYSV_MSG || SYSV_SHM) - "shm", /* 29 M_SHM */ -#else - "", /* 29 M_SHM */ -#endif - "plimit", /* 30 M_VMMAP */ - "sigacts", /* 31 M_VMMAPENT */ - "VM object", /* 32 M_VMOBJ */ - "VM objhash", /* 33 M_VMOBJHASH */ - "VM pmap", /* 34 M_VMPMAP */ - "VM pvmap", /* 35 M_VMPVENT */ - "VM pager", /* 36 M_VMPAGER */ - "VM pgdata", /* 37 M_VMPGDATA */ - "fileproc", /* 38 M_FILEPROC */ - "file desc", /* 39 M_FILEDESC */ - "lockf", /* 40 M_LOCKF */ - "proc", /* 41 M_PROC */ - "pstats", /* 42 M_SUBPROC */ - "LFS segment", /* 43 M_SEGMENT */ - "LFS node", /* 44 M_LFSNODE */ - "", /* 45 M_FFSNODE */ - "MFS node", /* 46 M_MFSNODE */ - "NQNFS Lease", /* 47 M_NQLEASE */ - "NQNFS Host", /* 48 M_NQMHOST */ - "Export Host", /* 49 M_NETADDR */ -#if CONFIG_NFS - "NFS srvsock", /* 50 M_NFSSVC */ - "NFS uid", /* 51 M_NFSUID */ - "NFS daemon", /* 52 M_NFSD */ -#else - "", /* 50 M_NFSSVC */ - "", /* 51 M_NFSUID */ - "", /* 52 M_NFSD */ -#endif - "ip_moptions", /* 53 M_IPMOPTS */ - "in_multi", /* 54 M_IPMADDR */ - "ether_multi", /* 55 M_IFMADDR */ - "mrt", /* 56 M_MRTABLE */ - "", /* 57 unused entry */ - "", /* 58 unused entry */ -#if CONFIG_NFS - "NFSV3 srvdesc",/* 59 M_NFSRVDESC */ - "NFSV3 diroff", /* 60 M_NFSDIROFF */ - "NFSV3 bigfh", /* 61 M_NFSBIGFH */ -#else - "", /* 59 M_NFSRVDESC */ - "", /* 60 M_NFSDIROFF */ - "", /* 61 M_NFSBIGFH */ -#endif - "MSDOSFS mount",/* 62 M_MSDOSFSMNT */ - "MSDOSFS fat", /* 63 M_MSDOSFSFAT */ - "MSDOSFS node", /* 64 M_MSDOSFSNODE */ - "ttys", /* 65 M_TTYS */ - "exec", /* 66 M_EXEC */ - "miscfs mount", /* 67 M_MISCFSMNT */ - "miscfs node", /* 68 M_MISCFSNODE */ - "adosfs mount", /* 69 M_ADOSFSMNT */ - "adosfs node", /* 70 M_ADOSFSNODE */ - "adosfs anode", /* 71 M_ANODE */ - "buf hdrs", /* 72 M_BUFHDR */ - "ofile tabl", /* 73 M_OFILETABL */ - "mbuf clust", /* 74 M_MCLUST */ - "", /* 75 unused */ - "", /* 76 unused */ - "", /* 77 unused */ - "", /* 78 unused */ - "", /* 79 unused */ - "temp", /* 80 M_TEMP */ - "key mgmt", /* 81 M_SECA */ - "DEVFS", /* 82 M_DEVFS */ - "IpFw/IpAcct", /* 83 M_IPFW */ - "UDF node", /* 84 M_UDFNODE */ - "UDF mount", /* 85 M_UDFMNT */ -#if INET6 - "IPv6 NDP", /* 86 M_IP6NDP */ - "IPv6 options", /* 87 M_IP6OPT */ - "IPv6 Misc", /* 88 M_IP6MISC */ -#else - "", /* 86 M_IP6NDP */ - "", /* 87 M_IP6OPT */ - "", /* 88 M_IP6MISC */ -#endif - "TCP Segment Q",/* 89 M_TSEGQ */ - "IGMP state", /* 90 M_IGMP */ - "", /* 91 unused */ - "", /* 92 unused */ - "specinfo", /* 93 M_SPECINFO */ - "kqueue", /* 94 M_KQUEUE */ - "", /* 95 unused */ - "cluster_read", /* 96 M_CLRDAHEAD */ - "cluster_write",/* 97 M_CLWRBEHIND */ - "iov64", /* 98 M_IOV64 */ - "fileglob", /* 99 M_FILEGLOB */ - "kauth", /* 100 M_KAUTH */ - "dummynet", /* 101 M_DUMMYNET */ - "", /* 102 M_UNSAFEFS */ - "macpipelabel", /* 103 M_MACPIPELABEL */ - "mactemp", /* 104 M_MACTEMP */ - "sbuf", /* 105 M_SBUF */ - "extattr", /* 106 M_EXTATTR */ - "select", /* 107 M_SELECT */ -#if TRAFFIC_MGT - "traffic_mgt", /* 108 M_TRAFFIC_MGT */ -#else - "", /* 108 M_TRAFFIC_MGT */ -#endif -#if FS_COMPRESSION - "decmpfs_cnode",/* 109 M_DECMPFS_CNODE */ -#else - "", /* 109 M_DECMPFS_CNODE */ -#endif /* FS_COMPRESSION */ - "ipmfilter", /* 110 M_INMFILTER */ - "ipmsource", /* 111 M_IPMSOURCE */ - "in6mfilter", /* 112 M_IN6MFILTER */ - "ip6mopts", /* 113 M_IP6MOPTS */ - "ip6msource", /* 114 M_IP6MSOURCE */ -#if FLOW_DIVERT - "flow_divert_pcb", /* 115 M_FLOW_DIVERT_PCB */ - "flow_divert_group", /* 116 M_FLOW_DIVERT_GROUP */ -#else - "", /* 115 M_FLOW_DIVERT_PCB */ - "", /* 116 M_FLOW_DIVERT_GROUP */ -#endif - "ip6cga", /* 117 M_IP6CGA */ -#if NECP - "necp", /* 118 M_NECP */ - "necp_session_policy", /* 119 M_NECP_SESSION_POLICY */ - "necp_socket_policy", /* 120 M_NECP_SOCKET_POLICY */ - "necp_ip_policy", /* 121 M_NECP_IP_POLICY */ -#else - "", /* 118 M_NECP */ - "", /* 119 M_NECP_SESSION_POLICY */ - "", /* 120 M_NECP_SOCKET_POLICY */ - "", /* 121 M_NECP_IP_POLICY */ -#endif - "fdvnodedata" /* 122 M_FD_VN_DATA */ - "fddirbuf", /* 123 M_FD_DIRBUF */ - "netagent", /* 124 M_NETAGENT */ - "Event Handler",/* 125 M_EVENTHANDLER */ - "Link Layer Table", /* 126 M_LLTABLE */ - "Network Work Queue", /* 127 M_NWKWQ */ - "Content Filter", /* 128 M_CFIL */ - "" -}; - -/* for use with kmzones.kz_zalloczone */ -#define KMZ_CREATEZONE_ACCT ((void *)-3) -#define KMZ_CREATEZONE ((void *)-2) -#define KMZ_LOOKUPZONE ((void *)-1) -#define KMZ_MALLOC ((void *)0) -#define KMZ_SHAREZONE ((void *)1) - -struct kmzones { - size_t kz_elemsize; - void *kz_zalloczone; - boolean_t kz_noencrypt; -} kmzones[M_LAST] = { -#define SOS(sname) sizeof (struct sname) -#define SOX(sname) -1 - { -1, 0, FALSE }, /* 0 M_FREE */ - { MSIZE, KMZ_CREATEZONE, FALSE }, /* 1 M_MBUF */ - { 0, KMZ_MALLOC, FALSE }, /* 2 M_DEVBUF */ - { SOS(socket), KMZ_CREATEZONE, TRUE }, /* 3 M_SOCKET */ - { SOS(inpcb), KMZ_LOOKUPZONE, TRUE }, /* 4 M_PCB */ - { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 5 M_RTABLE */ - { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 6 M_HTABLE */ - { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 7 M_FTABLE */ - { SOS(rusage), KMZ_CREATEZONE, TRUE }, /* 8 M_ZOMBIE */ - { 0, KMZ_MALLOC, FALSE }, /* 9 M_IFADDR */ - { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 10 M_SOOPTS */ - { 0, KMZ_MALLOC, FALSE }, /* 11 M_SONAME */ - { MAXPATHLEN, KMZ_CREATEZONE, FALSE }, /* 12 M_NAMEI */ - { 0, KMZ_MALLOC, FALSE }, /* 13 M_GPROF */ - { 0, KMZ_MALLOC, FALSE }, /* 14 M_IOCTLOPS */ - { 0, KMZ_MALLOC, FALSE }, /* 15 M_MAPMEM */ - { SOS(ucred), KMZ_CREATEZONE, FALSE }, /* 16 M_CRED */ - { SOS(pgrp), KMZ_CREATEZONE, FALSE }, /* 17 M_PGRP */ - { SOS(session), KMZ_CREATEZONE, FALSE }, /* 18 M_SESSION */ - { SOS(user32_iovec), KMZ_LOOKUPZONE, FALSE }, /* 19 M_IOV32 */ - { SOS(mount), KMZ_CREATEZONE, FALSE }, /* 20 M_MOUNT */ - { 0, KMZ_MALLOC, FALSE }, /* 21 M_FHANDLE */ -#if CONFIG_NFS - { SOS(nfsreq), KMZ_CREATEZONE, FALSE }, /* 22 M_NFSREQ */ - { SOS(nfsmount), KMZ_CREATEZONE, FALSE }, /* 23 M_NFSMNT */ - { SOS(nfsnode), KMZ_CREATEZONE, FALSE }, /* 24 M_NFSNODE */ -#else - { 0, KMZ_MALLOC, FALSE }, /* 22 M_NFSREQ */ - { 0, KMZ_MALLOC, FALSE }, /* 23 M_NFSMNT */ - { 0, KMZ_MALLOC, FALSE }, /* 24 M_NFSNODE */ -#endif - { SOS(vnode), KMZ_CREATEZONE, TRUE }, /* 25 M_VNODE */ - { SOS(namecache), KMZ_CREATEZONE, FALSE }, /* 26 M_CACHE */ -#if QUOTA - { SOX(dquot), KMZ_LOOKUPZONE, FALSE }, /* 27 M_DQUOT */ -#else - { 0, KMZ_MALLOC, FALSE }, /* 27 M_DQUOT */ -#endif - { 0, KMZ_MALLOC, FALSE }, /* 28 M_PROC_UUID_POLICY */ - { 0, KMZ_MALLOC, FALSE }, /* 29 M_SHM */ - { SOS(plimit), KMZ_CREATEZONE, TRUE }, /* 30 M_PLIMIT */ - { SOS(sigacts), KMZ_CREATEZONE_ACCT, TRUE }, /* 31 M_SIGACTS */ - { 0, KMZ_MALLOC, FALSE }, /* 32 M_VMOBJ */ - { 0, KMZ_MALLOC, FALSE }, /* 33 M_VMOBJHASH */ - { 0, KMZ_MALLOC, FALSE }, /* 34 M_VMPMAP */ - { 0, KMZ_MALLOC, FALSE }, /* 35 M_VMPVENT */ - { 0, KMZ_MALLOC, FALSE }, /* 36 M_VMPAGER */ - { 0, KMZ_MALLOC, FALSE }, /* 37 M_VMPGDATA */ - { SOS(fileproc), KMZ_CREATEZONE_ACCT, TRUE }, /* 38 M_FILEPROC */ - { SOS(filedesc), KMZ_CREATEZONE_ACCT, TRUE }, /* 39 M_FILEDESC */ - { SOX(lockf), KMZ_CREATEZONE_ACCT, TRUE }, /* 40 M_LOCKF */ - { SOS(proc), KMZ_CREATEZONE, FALSE }, /* 41 M_PROC */ - { SOS(pstats), KMZ_CREATEZONE, TRUE }, /* 42 M_PSTATS */ - { 0, KMZ_MALLOC, FALSE }, /* 43 M_SEGMENT */ - { M_FFSNODE, KMZ_SHAREZONE, FALSE }, /* 44 M_LFSNODE */ - { 0, KMZ_MALLOC, FALSE }, /* 45 M_FFSNODE */ - { M_FFSNODE, KMZ_SHAREZONE, FALSE }, /* 46 M_MFSNODE */ - { 0, KMZ_MALLOC, FALSE }, /* 47 M_NQLEASE */ - { 0, KMZ_MALLOC, FALSE }, /* 48 M_NQMHOST */ - { 0, KMZ_MALLOC, FALSE }, /* 49 M_NETADDR */ -#if CONFIG_NFS - { SOX(nfsrv_sock), - KMZ_CREATEZONE_ACCT, FALSE }, /* 50 M_NFSSVC */ - { 0, KMZ_MALLOC, FALSE }, /* 51 M_NFSUID */ - { SOX(nfsrvcache), - KMZ_CREATEZONE_ACCT, FALSE }, /* 52 M_NFSD */ -#else - { 0, KMZ_MALLOC, FALSE }, /* 50 M_NFSSVC */ - { 0, KMZ_MALLOC, FALSE }, /* 51 M_NFSUID */ - { 0, KMZ_MALLOC, FALSE }, /* 52 M_NFSD */ -#endif - { SOX(ip_moptions), - KMZ_LOOKUPZONE, FALSE }, /* 53 M_IPMOPTS */ - { SOX(in_multi), KMZ_LOOKUPZONE, FALSE }, /* 54 M_IPMADDR */ - { SOX(ether_multi), - KMZ_LOOKUPZONE, FALSE }, /* 55 M_IFMADDR */ - { SOX(mrt), KMZ_CREATEZONE, TRUE }, /* 56 M_MRTABLE */ - { 0, KMZ_MALLOC, FALSE }, /* 57 unused entry */ - { 0, KMZ_MALLOC, FALSE }, /* 58 unused entry */ -#if CONFIG_NFS - { SOS(nfsrv_descript), - KMZ_CREATEZONE_ACCT, FALSE }, /* 59 M_NFSRVDESC */ - { SOS(nfsdmap), KMZ_CREATEZONE, FALSE }, /* 60 M_NFSDIROFF */ - { SOS(fhandle), KMZ_LOOKUPZONE, FALSE }, /* 61 M_NFSBIGFH */ -#else - { 0, KMZ_MALLOC, FALSE }, /* 59 M_NFSRVDESC */ - { 0, KMZ_MALLOC, FALSE }, /* 60 M_NFSDIROFF */ - { 0, KMZ_MALLOC, FALSE }, /* 61 M_NFSBIGFH */ -#endif - { 0, KMZ_MALLOC, FALSE }, /* 62 M_MSDOSFSMNT */ - { 0, KMZ_MALLOC, FALSE }, /* 63 M_MSDOSFSFAT */ - { 0, KMZ_MALLOC, FALSE }, /* 64 M_MSDOSFSNODE */ - { SOS(tty), KMZ_CREATEZONE, FALSE }, /* 65 M_TTYS */ - { 0, KMZ_MALLOC, FALSE }, /* 66 M_EXEC */ - { 0, KMZ_MALLOC, FALSE }, /* 67 M_MISCFSMNT */ - { 0, KMZ_MALLOC, FALSE }, /* 68 M_MISCFSNODE */ - { 0, KMZ_MALLOC, FALSE }, /* 69 M_ADOSFSMNT */ - { 0, KMZ_MALLOC, FALSE }, /* 70 M_ADOSFSNODE */ - { 0, KMZ_MALLOC, FALSE }, /* 71 M_ANODE */ - { 0, KMZ_MALLOC, TRUE }, /* 72 M_BUFHDR */ - { (NDFILE * OFILESIZE), - KMZ_CREATEZONE_ACCT, FALSE }, /* 73 M_OFILETABL */ - { MCLBYTES, KMZ_CREATEZONE, FALSE }, /* 74 M_MCLUST */ - { 0, KMZ_MALLOC, FALSE }, /* 75 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 76 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 77 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 78 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 79 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 80 M_TEMP */ - { 0, KMZ_MALLOC, FALSE }, /* 81 M_SECA */ - { 0, KMZ_MALLOC, FALSE }, /* 82 M_DEVFS */ - { 0, KMZ_MALLOC, FALSE }, /* 83 M_IPFW */ - { 0, KMZ_MALLOC, FALSE }, /* 84 M_UDFNODE */ - { 0, KMZ_MALLOC, FALSE }, /* 85 M_UDFMOUNT */ - { 0, KMZ_MALLOC, FALSE }, /* 86 M_IP6NDP */ - { 0, KMZ_MALLOC, FALSE }, /* 87 M_IP6OPT */ - { 0, KMZ_MALLOC, FALSE }, /* 88 M_IP6MISC */ - { 0, KMZ_MALLOC, FALSE }, /* 89 M_TSEGQ */ - { 0, KMZ_MALLOC, FALSE }, /* 90 M_IGMP */ - { 0, KMZ_MALLOC, FALSE }, /* 91 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 92 unused */ - { SOS(specinfo), KMZ_CREATEZONE, TRUE }, /* 93 M_SPECINFO */ - { SOS(kqueue), KMZ_CREATEZONE, FALSE }, /* 94 M_KQUEUE */ - { 0, KMZ_MALLOC, FALSE }, /* 95 unused */ - { SOS(cl_readahead), KMZ_CREATEZONE, TRUE }, /* 96 M_CLRDAHEAD */ - { SOS(cl_writebehind), KMZ_CREATEZONE, TRUE }, /* 97 M_CLWRBEHIND */ - { SOS(user64_iovec), KMZ_LOOKUPZONE, FALSE }, /* 98 M_IOV64 */ - { SOS(fileglob), KMZ_CREATEZONE, TRUE }, /* 99 M_FILEGLOB */ - { 0, KMZ_MALLOC, FALSE }, /* 100 M_KAUTH */ - { 0, KMZ_MALLOC, FALSE }, /* 101 M_DUMMYNET */ - { 0, KMZ_MALLOC, FALSE }, /* 102 M_UNSAFEFS */ - { 0, KMZ_MALLOC, FALSE }, /* 103 M_MACPIPELABEL */ - { 0, KMZ_MALLOC, FALSE }, /* 104 M_MACTEMP */ - { 0, KMZ_MALLOC, FALSE }, /* 105 M_SBUF */ - { 0, KMZ_MALLOC, FALSE }, /* 106 M_HFS_EXTATTR */ - { 0, KMZ_MALLOC, FALSE }, /* 107 M_SELECT */ - { 0, KMZ_MALLOC, FALSE }, /* 108 M_TRAFFIC_MGT */ -#if FS_COMPRESSION - { SOS(decmpfs_cnode), KMZ_CREATEZONE, FALSE}, /* 109 M_DECMPFS_CNODE */ -#else - { 0, KMZ_MALLOC, FALSE }, /* 109 M_DECMPFS_CNODE */ -#endif /* FS_COMPRESSION */ - { 0, KMZ_MALLOC, FALSE }, /* 110 M_INMFILTER */ - { 0, KMZ_MALLOC, FALSE }, /* 111 M_IPMSOURCE */ - { 0, KMZ_MALLOC, FALSE }, /* 112 M_IN6MFILTER */ - { 0, KMZ_MALLOC, FALSE }, /* 113 M_IP6MOPTS */ - { 0, KMZ_MALLOC, FALSE }, /* 114 M_IP6MSOURCE */ -#if FLOW_DIVERT - { SOS(flow_divert_pcb), KMZ_CREATEZONE, TRUE }, /* 115 M_FLOW_DIVERT_PCB */ - { SOS(flow_divert_group), KMZ_CREATEZONE, TRUE }, /* 116 M_FLOW_DIVERT_GROUP */ -#else - { 0, KMZ_MALLOC, FALSE }, /* 115 M_FLOW_DIVERT_PCB */ - { 0, KMZ_MALLOC, FALSE }, /* 116 M_FLOW_DIVERT_GROUP */ -#endif /* FLOW_DIVERT */ - { 0, KMZ_MALLOC, FALSE }, /* 117 M_IP6CGA */ - { 0, KMZ_MALLOC, FALSE }, /* 118 M_NECP */ -#if NECP - { SOS(necp_session_policy), KMZ_CREATEZONE, TRUE }, /* 119 M_NECP_SESSION_POLICY */ - { SOS(necp_kernel_socket_policy), KMZ_CREATEZONE, TRUE }, /* 120 M_NECP_SOCKET_POLICY */ - { SOS(necp_kernel_ip_output_policy), KMZ_CREATEZONE, TRUE }, /* 121 M_NECP_IP_POLICY */ -#else - { 0, KMZ_MALLOC, FALSE }, /* 119 M_NECP_SESSION_POLICY */ - { 0, KMZ_MALLOC, FALSE }, /* 120 M_NECP_SOCKET_POLICY */ - { 0, KMZ_MALLOC, FALSE }, /* 121 M_NECP_IP_POLICY */ -#endif /* NECP */ - { 0, KMZ_MALLOC, FALSE }, /* 122 M_FD_VN_DATA */ - { 0, KMZ_MALLOC, FALSE }, /* 123 M_FD_DIRBUF */ - { 0, KMZ_MALLOC, FALSE }, /* 124 M_NETAGENT */ - { 0, KMZ_MALLOC, FALSE }, /* 125 M_EVENTHANDLER */ - { 0, KMZ_MALLOC, FALSE }, /* 126 M_LLTABLE */ - { 0, KMZ_MALLOC, FALSE }, /* 127 M_NWKWQ */ - { 0, KMZ_MALLOC, FALSE }, /* 128 M_CFIL */ -#undef SOS -#undef SOX -}; - -extern zone_t kalloc_zone(vm_size_t); /* XXX */ - -/* - * Initialize the kernel memory allocator - */ -void -kmeminit(void) -{ - struct kmzones *kmz; - - if ((sizeof(kmzones) / sizeof(kmzones[0])) != (sizeof(memname) / sizeof(memname[0]))) { - panic("kmeminit: kmzones has %lu elements but memname has %lu\n", - (sizeof(kmzones) / sizeof(kmzones[0])), (sizeof(memname) / sizeof(memname[0]))); - } - - kmz = kmzones; - while (kmz < &kmzones[M_LAST]) { -/* XXX */ - if (kmz->kz_elemsize == (size_t)(-1)) { - ; - } else -/* XXX */ - if (kmz->kz_zalloczone == KMZ_CREATEZONE || - kmz->kz_zalloczone == KMZ_CREATEZONE_ACCT) { - kmz->kz_zalloczone = zinit(kmz->kz_elemsize, - 1024 * 1024, PAGE_SIZE, - memname[kmz - kmzones]); - zone_change(kmz->kz_zalloczone, Z_CALLERACCT, - (kmz->kz_zalloczone == KMZ_CREATEZONE_ACCT)); - - if (kmz->kz_noencrypt == TRUE) { - zone_change(kmz->kz_zalloczone, Z_NOENCRYPT, TRUE); - } - } else if (kmz->kz_zalloczone == KMZ_LOOKUPZONE) { - kmz->kz_zalloczone = kalloc_zone(kmz->kz_elemsize); - } - - kmz++; - } +#include +#include - kmz = kmzones; - while (kmz < &kmzones[M_LAST]) { -/* XXX */ - if (kmz->kz_elemsize == (size_t)(-1)) { - ; - } else -/* XXX */ - if (kmz->kz_zalloczone == KMZ_SHAREZONE) { - kmz->kz_zalloczone = - kmzones[kmz->kz_elemsize].kz_zalloczone; - kmz->kz_elemsize = - kmzones[kmz->kz_elemsize].kz_elemsize; - } +#include - kmz++; - } -} +ZONE_VIEW_DEFINE(ZV_NAMEI, "vfs.namei", KHEAP_ID_DATA_BUFFERS, MAXPATHLEN); -void * -_MALLOC_external( - size_t size, - int type, - int flags); -void * -_MALLOC_external( - size_t size, - int type, - int flags) -{ - static vm_allocation_site_t site = { .tag = VM_KERN_MEMORY_KALLOC, .flags = VM_TAG_BT }; - return __MALLOC(size, type, flags, &site); -} - -void * -__MALLOC( +static void * +__MALLOC_ext( size_t size, int type, int flags, - vm_allocation_site_t *site) + vm_allocation_site_t *site, + kalloc_heap_t heap) { void *addr = NULL; - vm_size_t msize = size; if (type >= M_LAST) { panic("_malloc TYPE"); @@ -589,192 +95,93 @@ __MALLOC( return NULL; } - if (msize != size) { - panic("Requested size to __MALLOC is too large (%llx)!\n", (uint64_t)size); - } + static_assert(sizeof(vm_size_t) == sizeof(size_t)); + static_assert(M_WAITOK == Z_WAITOK); + static_assert(M_NOWAIT == Z_NOWAIT); + static_assert(M_ZERO == Z_ZERO); - if (flags & M_NOWAIT) { - addr = (void *)kalloc_canblock(&msize, FALSE, site); - } else { - addr = (void *)kalloc_canblock(&msize, TRUE, site); - if (addr == NULL) { - /* - * We get here when the caller told us to block waiting for memory, but - * kalloc said there's no memory left to get. Generally, this means there's a - * leak or the caller asked for an impossibly large amount of memory. If the caller - * is expecting a NULL return code then it should explicitly set the flag M_NULL. - * If the caller isn't expecting a NULL return code, we just panic. This is less - * than ideal, but returning NULL when the caller isn't expecting it doesn't help - * since the majority of callers don't check the return value and will just - * dereference the pointer and trap anyway. We may as well get a more - * descriptive message out while we can. - */ - if (flags & M_NULL) { - return NULL; - } - panic("_MALLOC: kalloc returned NULL (potential leak), size %llu", (uint64_t) size); - } - } - if (!addr) { - return 0; + addr = kalloc_ext(heap, size, + flags & (M_WAITOK | M_NOWAIT | M_ZERO), site).addr; + if (__probable(addr)) { + return addr; } - if (flags & M_ZERO) { - bzero(addr, size); + if (flags & (M_NOWAIT | M_NULL)) { + return NULL; } - return addr; + /* + * We get here when the caller told us to block waiting for memory, but + * kalloc said there's no memory left to get. Generally, this means there's a + * leak or the caller asked for an impossibly large amount of memory. If the caller + * is expecting a NULL return code then it should explicitly set the flag M_NULL. + * If the caller isn't expecting a NULL return code, we just panic. This is less + * than ideal, but returning NULL when the caller isn't expecting it doesn't help + * since the majority of callers don't check the return value and will just + * dereference the pointer and trap anyway. We may as well get a more + * descriptive message out while we can. + */ + panic("_MALLOC: kalloc returned NULL (potential leak), size %llu", (uint64_t) size); } -void -_FREE( - void *addr, - int type) +void * +__MALLOC(size_t size, int type, int flags, vm_allocation_site_t *site) { - if (type >= M_LAST) { - panic("_free TYPE"); - } - - if (!addr) { - return; /* correct (convenient bsd kernel legacy) */ - } - kfree_addr(addr); + return __MALLOC_ext(size, type, flags, site, KHEAP_DEFAULT); } void * __REALLOC( void *addr, size_t size, - int type, + int type __unused, int flags, vm_allocation_site_t *site) { - void *newaddr; - size_t alloc; - - /* realloc(NULL, ...) is equivalent to malloc(...) */ - if (addr == NULL) { - return __MALLOC(size, type, flags, site); - } + addr = kheap_realloc_addr(KHEAP_DEFAULT, addr, size, + flags & (M_WAITOK | M_NOWAIT | M_ZERO), site).addr; - alloc = kalloc_size(addr); - /* - * Find out the size of the bucket in which the new sized allocation - * would land. If it matches the bucket of the original allocation, - * simply return the address. - */ - if (kalloc_bucket_size(size) == alloc) { - if (flags & M_ZERO) { - if (alloc < size) { - bzero(addr + alloc, (size - alloc)); - } else { - bzero(addr + size, (alloc - size)); - } - } + if (__probable(addr)) { return addr; } - /* Allocate a new, bigger (or smaller) block */ - if ((newaddr = __MALLOC(size, type, flags, site)) == NULL) { + if (flags & (M_NOWAIT | M_NULL)) { return NULL; } - /* Copy over original contents */ - bcopy(addr, newaddr, MIN(size, alloc)); - _FREE(addr, type); - - return newaddr; + panic("_REALLOC: kalloc returned NULL (potential leak), size %llu", (uint64_t) size); } void * -_MALLOC_ZONE_external( - size_t size, - int type, - int flags); +_MALLOC_external(size_t size, int type, int flags); void * -_MALLOC_ZONE_external( - size_t size, - int type, - int flags) +_MALLOC_external(size_t size, int type, int flags) { - return __MALLOC_ZONE(size, type, flags, NULL); + static vm_allocation_site_t site = { + .tag = VM_KERN_MEMORY_KALLOC, + .flags = VM_TAG_BT, + }; + return __MALLOC_ext(size, type, flags, &site, KHEAP_KEXT); } -void * -__MALLOC_ZONE( - size_t size, - int type, - int flags, - vm_allocation_site_t *site) +void +_FREE_external(void *addr, int type); +void +_FREE_external(void *addr, int type __unused) { - struct kmzones *kmz; - void *elem; - - if (type >= M_LAST) { - panic("_malloc_zone TYPE"); - } - - kmz = &kmzones[type]; - if (kmz->kz_zalloczone == KMZ_MALLOC) { - panic("_malloc_zone ZONE: type = %d", type); - } - -/* XXX */ - if (kmz->kz_elemsize == (size_t)(-1)) { - panic("_malloc_zone XXX"); - } -/* XXX */ - if (size == kmz->kz_elemsize) { - if (flags & M_NOWAIT) { - elem = (void *)zalloc_noblock(kmz->kz_zalloczone); - } else { - elem = (void *)zalloc(kmz->kz_zalloczone); - } - } else { - vm_size_t kalloc_size = size; - if (size > kalloc_size) { - elem = NULL; - } else if (flags & M_NOWAIT) { - elem = (void *)kalloc_canblock(&kalloc_size, FALSE, site); - } else { - elem = (void *)kalloc_canblock(&kalloc_size, TRUE, site); - } - } - - if (elem && (flags & M_ZERO)) { - bzero(elem, size); - } - - return elem; + /* + * hashinit and other functions allocate on behalf of kexts and do not have + * a matching hashdestroy, so we sadly have to allow this for now. + */ + kheap_free_addr(KHEAP_ANY, addr); } void -_FREE_ZONE( - void *elem, - size_t size, - int type) +_FREE_ZONE_external(void *elem, size_t size, int type); +void +_FREE_ZONE_external(void *elem, size_t size, int type __unused) { - struct kmzones *kmz; - - if (type >= M_LAST) { - panic("FREE_SIZE"); - } - - kmz = &kmzones[type]; - if (kmz->kz_zalloczone == KMZ_MALLOC) { - panic("free_zone ZONE"); - } - -/* XXX */ - if (kmz->kz_elemsize == (size_t)(-1)) { - panic("FREE_SIZE XXX"); - } -/* XXX */ - if (size == kmz->kz_elemsize) { - zfree(kmz->kz_zalloczone, elem); - } else { - kfree(elem, size); - } + (kheap_free)(KHEAP_KEXT, elem, size); } #if DEBUG || DEVELOPMENT @@ -976,3 +383,43 @@ sysctl_zones_collectable_bytes SYSCTL_HANDLER_ARGS SYSCTL_PROC(_kern, OID_AUTO, zones_collectable_bytes, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, 0, &sysctl_zones_collectable_bytes, "Q", "Collectable memory in zones"); + + +#if DEBUG || DEVELOPMENT + +static int +sysctl_zone_gc_replenish_test SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + /* require setting this sysctl to prevent sysctl -a from running this */ + if (!req->newptr) { + return 0; + } + + int ret_val = 0; + zone_gc_replenish_test(); + return SYSCTL_OUT(req, &ret_val, sizeof(ret_val)); +} + +static int +sysctl_zone_alloc_replenish_test SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + /* require setting this sysctl to prevent sysctl -a from running this */ + if (!req->newptr) { + return 0; + } + + int ret_val = 0; + zone_alloc_replenish_test(); + return SYSCTL_OUT(req, &ret_val, sizeof(ret_val)); +} + +SYSCTL_PROC(_kern, OID_AUTO, zone_gc_replenish_test, + CTLTYPE_INT | CTLFLAG_MASKED | CTLFLAG_LOCKED | CTLFLAG_WR, + 0, 0, &sysctl_zone_gc_replenish_test, "I", "Test zone GC replenish"); +SYSCTL_PROC(_kern, OID_AUTO, zone_alloc_replenish_test, + CTLTYPE_INT | CTLFLAG_MASKED | CTLFLAG_LOCKED | CTLFLAG_WR, + 0, 0, &sysctl_zone_alloc_replenish_test, "I", "Test zone alloc replenish"); + +#endif /* DEBUG || DEVELOPMENT */ diff --git a/bsd/kern/kern_memorystatus.c b/bsd/kern/kern_memorystatus.c index 81f60d5e8..3d8198474 100644 --- a/bsd/kern/kern_memorystatus.c +++ b/bsd/kern/kern_memorystatus.c @@ -38,8 +38,6 @@ #include #include -#include - #include #include #include @@ -61,12 +59,15 @@ #include #include #include +#include #include #include #include #include #include +#include + #if CONFIG_FREEZE #include #endif /* CONFIG_FREEZE */ @@ -227,6 +228,21 @@ unsigned long sysproc_aging_aggr_threshold_percentage = 7; */ memorystatus_jetsam_snapshot_t *memorystatus_jetsam_snapshot; memorystatus_jetsam_snapshot_t *memorystatus_jetsam_snapshot_copy; + +#if CONFIG_FREEZE +memorystatus_jetsam_snapshot_t *memorystatus_jetsam_snapshot_freezer; +/* + * The size of the freezer snapshot is given by memorystatus_jetsam_snapshot_max / JETSAM_SNAPSHOT_FREEZER_MAX_FACTOR + * The freezer snapshot can be much smaller than the default snapshot + * because it only includes apps that have been killed and dasd consumes it every 30 minutes. + * Since the snapshots are always wired we don't want to overallocate too much. + */ +#define JETSAM_SNAPSHOT_FREEZER_MAX_FACTOR 20 +unsigned int memorystatus_jetsam_snapshot_freezer_max; +unsigned int memorystatus_jetsam_snapshot_freezer_size; +TUNABLE(bool, memorystatus_jetsam_use_freezer_snapshot, "kern.jetsam_user_freezer_snapshot", true); +#endif /* CONFIG_FREEZE */ + unsigned int memorystatus_jetsam_snapshot_count = 0; unsigned int memorystatus_jetsam_snapshot_copy_count = 0; unsigned int memorystatus_jetsam_snapshot_max = 0; @@ -234,10 +250,31 @@ unsigned int memorystatus_jetsam_snapshot_size = 0; uint64_t memorystatus_jetsam_snapshot_last_timestamp = 0; uint64_t memorystatus_jetsam_snapshot_timeout = 0; +#if DEVELOPMENT || DEBUG +/* + * On development and debug kernels, we allow one pid to take ownership + * of the memorystatus snapshot (via memorystatus_control). + * If there's an owner, then only they may consume the snapshot. + * This is used when testing the snapshot interface to avoid racing with other + * processes on the system that consume snapshots. + */ +static pid_t memorystatus_snapshot_owner = 0; +SYSCTL_INT(_kern, OID_AUTO, memorystatus_snapshot_owner, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_snapshot_owner, 0, ""); +#endif /* DEVELOPMENT || DEBUG */ +static void memorystatus_init_jetsam_snapshot_header(memorystatus_jetsam_snapshot_t *snapshot); + /* General memorystatus stuff */ uint64_t memorystatus_sysprocs_idle_delay_time = 0; uint64_t memorystatus_apps_idle_delay_time = 0; +/* Some devices give entitled apps a higher memory limit */ +#if __arm64__ +int32_t memorystatus_entitled_max_task_footprint_mb = 0; + +#if DEVELOPMENT || DEBUG +SYSCTL_INT(_kern, OID_AUTO, entitled_max_task_pmem, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_entitled_max_task_footprint_mb, 0, ""); +#endif /* DEVELOPMENT || DEBUG */ +#endif /* __arm64__ */ static lck_grp_attr_t *memorystatus_jetsam_fg_band_lock_grp_attr; static lck_grp_t *memorystatus_jetsam_fg_band_lock_grp; @@ -481,7 +518,7 @@ sysctl_jetsam_set_sysprocs_idle_delay_time SYSCTL_HANDLER_ARGS uint64_t old_time_in_ns = 0; absolutetime_to_nanoseconds(memorystatus_sysprocs_idle_delay_time, &old_time_in_ns); - old_time_in_secs = old_time_in_ns / NSEC_PER_SEC; + old_time_in_secs = (int) (old_time_in_ns / NSEC_PER_SEC); error = sysctl_io_number(req, old_time_in_secs, sizeof(int), &val, NULL); if (error || !req->newptr) { @@ -511,7 +548,7 @@ sysctl_jetsam_set_apps_idle_delay_time SYSCTL_HANDLER_ARGS uint64_t old_time_in_ns = 0; absolutetime_to_nanoseconds(memorystatus_apps_idle_delay_time, &old_time_in_ns); - old_time_in_secs = old_time_in_ns / NSEC_PER_SEC; + old_time_in_secs = (int) (old_time_in_ns / NSEC_PER_SEC); error = sysctl_io_number(req, old_time_in_secs, sizeof(int), &val, NULL); if (error || !req->newptr) { @@ -541,7 +578,6 @@ static int memorystatus_highwater_enabled = 1; /* Update the cached memlimit da static boolean_t proc_jetsam_state_is_active_locked(proc_t); #if __arm64__ -#if CONFIG_MEMORYSTATUS int legacy_footprint_bonus_mb = 50; /* This value was chosen after looking at the top 30 apps * that needed the additional room in their footprint when * the 'correct' accounting methods were applied to them. @@ -550,18 +586,19 @@ int legacy_footprint_bonus_mb = 50; /* This value was chosen after looking at th #if DEVELOPMENT || DEBUG SYSCTL_INT(_kern, OID_AUTO, legacy_footprint_bonus_mb, CTLFLAG_RW | CTLFLAG_LOCKED, &legacy_footprint_bonus_mb, 0, ""); #endif /* DEVELOPMENT || DEBUG */ - -void -memorystatus_act_on_legacy_footprint_entitlement(proc_t p, boolean_t footprint_increase) +/* + * Raise the inactive and active memory limits to new values. + * Will only raise the limits and will do nothing if either of the current + * limits are 0. + * Caller must hold the proc_list_lock + */ +static void +memorystatus_raise_memlimit(proc_t p, int new_memlimit_active, int new_memlimit_inactive) { int memlimit_mb_active = 0, memlimit_mb_inactive = 0; - boolean_t memlimit_active_is_fatal = FALSE, memlimit_inactive_is_fatal = 0, use_active_limit = FALSE; + boolean_t memlimit_active_is_fatal = FALSE, memlimit_inactive_is_fatal = FALSE, use_active_limit = FALSE; - if (p == NULL) { - return; - } - - proc_list_lock(); + LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED); if (p->p_memstat_memlimit_active > 0) { memlimit_mb_active = p->p_memstat_memlimit_active; @@ -573,7 +610,6 @@ memorystatus_act_on_legacy_footprint_entitlement(proc_t p, boolean_t footprint_i * a special value only used internally * to test 'no limits'. */ - proc_list_unlock(); return; } @@ -587,24 +623,11 @@ memorystatus_act_on_legacy_footprint_entitlement(proc_t p, boolean_t footprint_i * a special value only used internally * to test 'no limits'. */ - proc_list_unlock(); return; } - if (footprint_increase) { - memlimit_mb_active += legacy_footprint_bonus_mb; - memlimit_mb_inactive += legacy_footprint_bonus_mb; - } else { - memlimit_mb_active -= legacy_footprint_bonus_mb; - if (memlimit_mb_active == max_task_footprint_mb) { - memlimit_mb_active = -1; /* reverting back to default system limit */ - } - - memlimit_mb_inactive -= legacy_footprint_bonus_mb; - if (memlimit_mb_inactive == max_task_footprint_mb) { - memlimit_mb_inactive = -1; /* reverting back to default system limit */ - } - } + memlimit_mb_active = MAX(new_memlimit_active, memlimit_mb_active); + memlimit_mb_inactive = MAX(new_memlimit_inactive, memlimit_mb_inactive); memlimit_active_is_fatal = (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL); memlimit_inactive_is_fatal = (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL); @@ -619,7 +642,6 @@ memorystatus_act_on_legacy_footprint_entitlement(proc_t p, boolean_t footprint_i CACHE_INACTIVE_LIMITS_LOCKED(p, memlimit_inactive_is_fatal); } - if (memorystatus_highwater_enabled) { task_set_phys_footprint_limit_internal(p->task, (p->p_memstat_memlimit > 0) ? p->p_memstat_memlimit : -1, @@ -627,19 +649,14 @@ memorystatus_act_on_legacy_footprint_entitlement(proc_t p, boolean_t footprint_i use_active_limit, /*active limit?*/ (use_active_limit ? memlimit_active_is_fatal : memlimit_inactive_is_fatal)); } - - proc_list_unlock(); } void -memorystatus_act_on_ios13extended_footprint_entitlement(proc_t p) +memorystatus_act_on_legacy_footprint_entitlement(proc_t p, boolean_t footprint_increase) { int memlimit_mb_active = 0, memlimit_mb_inactive = 0; - boolean_t memlimit_active_is_fatal = FALSE, memlimit_inactive_is_fatal = 0, use_active_limit = FALSE; - if (max_mem < 1500ULL * 1024 * 1024 || - max_mem > 2ULL * 1024 * 1024 * 1024) { - /* ios13extended_footprint is only for 2GB devices */ + if (p == NULL) { return; } @@ -673,50 +690,54 @@ memorystatus_act_on_ios13extended_footprint_entitlement(proc_t p) return; } - /* limit to "almost 2GB" */ - int ios13extended_footprint_mb = 1800; - if (memlimit_mb_active > ios13extended_footprint_mb) { - /* do not lower the current limit */ - proc_list_unlock(); - return; - } - memlimit_mb_active = ios13extended_footprint_mb; - memlimit_mb_inactive = ios13extended_footprint_mb; - - memlimit_active_is_fatal = (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL); - memlimit_inactive_is_fatal = (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL); - - SET_ACTIVE_LIMITS_LOCKED(p, memlimit_mb_active, memlimit_active_is_fatal); - SET_INACTIVE_LIMITS_LOCKED(p, memlimit_mb_inactive, memlimit_inactive_is_fatal); - - if (proc_jetsam_state_is_active_locked(p) == TRUE) { - use_active_limit = TRUE; - CACHE_ACTIVE_LIMITS_LOCKED(p, memlimit_active_is_fatal); + if (footprint_increase) { + memlimit_mb_active += legacy_footprint_bonus_mb; + memlimit_mb_inactive += legacy_footprint_bonus_mb; } else { - CACHE_INACTIVE_LIMITS_LOCKED(p, memlimit_inactive_is_fatal); + memlimit_mb_active -= legacy_footprint_bonus_mb; + if (memlimit_mb_active == max_task_footprint_mb) { + memlimit_mb_active = -1; /* reverting back to default system limit */ + } + + memlimit_mb_inactive -= legacy_footprint_bonus_mb; + if (memlimit_mb_inactive == max_task_footprint_mb) { + memlimit_mb_inactive = -1; /* reverting back to default system limit */ + } } + memorystatus_raise_memlimit(p, memlimit_mb_active, memlimit_mb_inactive); + proc_list_unlock(); +} - if (memorystatus_highwater_enabled) { - task_set_phys_footprint_limit_internal(p->task, - (p->p_memstat_memlimit > 0) ? p->p_memstat_memlimit : -1, - NULL, /*return old value */ - use_active_limit, /*active limit?*/ - (use_active_limit ? memlimit_active_is_fatal : memlimit_inactive_is_fatal)); +void +memorystatus_act_on_ios13extended_footprint_entitlement(proc_t p) +{ + if (max_mem < 1500ULL * 1024 * 1024 || + max_mem > 2ULL * 1024 * 1024 * 1024) { + /* ios13extended_footprint is only for 2GB devices */ + return; } - + /* limit to "almost 2GB" */ + proc_list_lock(); + memorystatus_raise_memlimit(p, 1800, 1800); proc_list_unlock(); } -#endif /* CONFIG_MEMORYSTATUS */ +void +memorystatus_act_on_entitled_task_limit(proc_t p) +{ + if (memorystatus_entitled_max_task_footprint_mb == 0) { + // Entitlement is not supported on this device. + return; + } + proc_list_lock(); + memorystatus_raise_memlimit(p, memorystatus_entitled_max_task_footprint_mb, memorystatus_entitled_max_task_footprint_mb); + proc_list_unlock(); +} #endif /* __arm64__ */ -#if CONFIG_EMBEDDED - SYSCTL_INT(_kern, OID_AUTO, memorystatus_level, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_level, 0, ""); -#endif /* CONFIG_EMBEDDED */ - int memorystatus_get_level(__unused struct proc *p, struct memorystatus_get_level_args *args, __unused int *ret) { @@ -805,7 +826,7 @@ static void memorystatus_get_task_phys_footprint_page_counts(task_t task, uint64_t *internal_pages, uint64_t *internal_compressed_pages, uint64_t *purgeable_nonvolatile_pages, uint64_t *purgeable_nonvolatile_compressed_pages, uint64_t *alternate_accounting_pages, uint64_t *alternate_accounting_compressed_pages, - uint64_t *iokit_mapped_pages, uint64_t *page_table_pages); + uint64_t *iokit_mapped_pages, uint64_t *page_table_pages, uint64_t *frozen_to_swap_pages); static void memorystatus_get_task_memory_region_count(task_t task, uint64_t *count); @@ -838,6 +859,13 @@ extern unsigned int vm_page_inactive_count; extern unsigned int vm_page_throttled_count; extern unsigned int vm_page_purgeable_count; extern unsigned int vm_page_wire_count; +extern unsigned int vm_page_speculative_count; + +#if CONFIG_JETSAM +#define MEMORYSTATUS_LOG_AVAILABLE_PAGES memorystatus_available_pages +#else /* CONFIG_JETSAM */ +#define MEMORYSTATUS_LOG_AVAILABLE_PAGES (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count) +#endif /* CONFIG_JETSAM */ #if CONFIG_SECLUDED_MEMORY extern unsigned int vm_page_secluded_count; extern unsigned int vm_page_secluded_count_over_target; @@ -894,7 +922,7 @@ lck_grp_attr_t *disconnect_page_mappings_lck_grp_attr; lck_grp_t *disconnect_page_mappings_lck_grp; static lck_mtx_t disconnect_page_mappings_mutex; -extern boolean_t kill_on_no_paging_space; +extern bool kill_on_no_paging_space; #endif /* DEVELOPMENT || DEBUG */ @@ -1132,6 +1160,39 @@ SYSCTL_PROC(_kern, OID_AUTO, memorystatus_disconnect_page_mappings, CTLTYPE_INT #endif /* DEVELOPMENT || DEBUG */ +/* + * Sorts the given bucket. + * + * Input: + * bucket_index - jetsam priority band to be sorted. + * sort_order - JETSAM_SORT_xxx from kern_memorystatus.h + * Currently sort_order is only meaningful when handling + * coalitions. + * + * proc_list_lock must be held by the caller. + */ +static void +memorystatus_sort_bucket_locked(unsigned int bucket_index, int sort_order) +{ + LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED); + if (memstat_bucket[bucket_index].count == 0) { + return; + } + + switch (bucket_index) { + case JETSAM_PRIORITY_FOREGROUND: + if (memorystatus_sort_by_largest_coalition_locked(bucket_index, sort_order) == 0) { + /* + * Fall back to per process sorting when zero coalitions are found. + */ + memorystatus_sort_by_largest_process_locked(bucket_index); + } + break; + default: + memorystatus_sort_by_largest_process_locked(bucket_index); + break; + } +} /* * Picks the sorting routine for a given jetsam priority band. @@ -1174,25 +1235,7 @@ memorystatus_sort_bucket(unsigned int bucket_index, int sort_order) #endif proc_list_lock(); - - if (memstat_bucket[bucket_index].count == 0) { - proc_list_unlock(); - return 0; - } - - switch (bucket_index) { - case JETSAM_PRIORITY_FOREGROUND: - if (memorystatus_sort_by_largest_coalition_locked(bucket_index, coal_sort_order) == 0) { - /* - * Fall back to per process sorting when zero coalitions are found. - */ - memorystatus_sort_by_largest_process_locked(bucket_index); - } - break; - default: - memorystatus_sort_by_largest_process_locked(bucket_index); - break; - } + memorystatus_sort_bucket_locked(bucket_index, coal_sort_order); proc_list_unlock(); return 0; @@ -1327,6 +1370,11 @@ int fast_jetsam_enabled = 1; int fast_jetsam_enabled = 0; #endif /* __AMP__ */ +#if CONFIG_DIRTYSTATUS_TRACKING +int dirtystatus_tracking_enabled = 0; +SYSCTL_INT(_kern, OID_AUTO, dirtystatus_tracking_enabled, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &dirtystatus_tracking_enabled, 0, ""); +#endif + /* Routine to find the jetsam state structure for the current jetsam thread */ static inline struct jetsam_thread_state * jetsam_current_thread(void) @@ -1363,7 +1411,7 @@ memorystatus_init(void) lck_mtx_init(&disconnect_page_mappings_mutex, disconnect_page_mappings_lck_grp, NULL); - if (kill_on_no_paging_space == TRUE) { + if (kill_on_no_paging_space) { max_kill_priority = JETSAM_PRIORITY_MAX; } #endif @@ -1466,11 +1514,11 @@ memorystatus_init(void) PE_get_default("kern.jetsam_idle_snapshot", &memorystatus_idle_snapshot, sizeof(memorystatus_idle_snapshot)); } - memorystatus_delta = delta_percentage * atop_64(max_mem) / 100; - memorystatus_available_pages_critical_idle_offset = idle_offset_percentage * atop_64(max_mem) / 100; - memorystatus_available_pages_critical_base = (critical_threshold_percentage / delta_percentage) * memorystatus_delta; - memorystatus_policy_more_free_offset_pages = (policy_more_free_offset_percentage / delta_percentage) * memorystatus_delta; - memorystatus_sysproc_aging_aggr_pages = sysproc_aging_aggr_threshold_percentage * atop_64(max_mem) / 100; + memorystatus_delta = (unsigned int) (delta_percentage * atop_64(max_mem) / 100); + memorystatus_available_pages_critical_idle_offset = (unsigned int) (idle_offset_percentage * atop_64(max_mem) / 100); + memorystatus_available_pages_critical_base = (unsigned int) ((critical_threshold_percentage / delta_percentage) * memorystatus_delta); + memorystatus_policy_more_free_offset_pages = (unsigned int) ((policy_more_free_offset_percentage / delta_percentage) * memorystatus_delta); + memorystatus_sysproc_aging_aggr_pages = (unsigned int) (sysproc_aging_aggr_threshold_percentage * atop_64(max_mem) / 100); /* Jetsam Loop Detection */ if (max_mem <= (512 * 1024 * 1024)) { @@ -1488,29 +1536,53 @@ memorystatus_init(void) #endif /* CONFIG_JETSAM */ +#if __arm64__ + if (!PE_parse_boot_argn("entitled_max_task_pmem", &memorystatus_entitled_max_task_footprint_mb, + sizeof(memorystatus_entitled_max_task_footprint_mb))) { + if (!PE_get_default("kern.entitled_max_task_pmem", &memorystatus_entitled_max_task_footprint_mb, + sizeof(memorystatus_entitled_max_task_footprint_mb))) { + // entitled_max_task_pmem is not supported on this system. + memorystatus_entitled_max_task_footprint_mb = 0; + } + } + if (memorystatus_entitled_max_task_footprint_mb > max_mem / (1UL << 20) || memorystatus_entitled_max_task_footprint_mb < 0) { + os_log_with_startup_serial(OS_LOG_DEFAULT, "Invalid value (%d) for entitled_max_task_pmem. Setting to 0", + memorystatus_entitled_max_task_footprint_mb); + } +#endif /* __arm64__ */ + memorystatus_jetsam_snapshot_max = maxproc; memorystatus_jetsam_snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + (sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_max); - memorystatus_jetsam_snapshot = - (memorystatus_jetsam_snapshot_t*)kalloc(memorystatus_jetsam_snapshot_size); + memorystatus_jetsam_snapshot = kalloc_flags(memorystatus_jetsam_snapshot_size, Z_WAITOK | Z_ZERO); if (!memorystatus_jetsam_snapshot) { panic("Could not allocate memorystatus_jetsam_snapshot"); } - memorystatus_jetsam_snapshot_copy = - (memorystatus_jetsam_snapshot_t*)kalloc(memorystatus_jetsam_snapshot_size); + memorystatus_jetsam_snapshot_copy = kalloc_flags(memorystatus_jetsam_snapshot_size, Z_WAITOK | Z_ZERO); if (!memorystatus_jetsam_snapshot_copy) { panic("Could not allocate memorystatus_jetsam_snapshot_copy"); } +#if CONFIG_FREEZE + memorystatus_jetsam_snapshot_freezer_max = memorystatus_jetsam_snapshot_max / JETSAM_SNAPSHOT_FREEZER_MAX_FACTOR; + memorystatus_jetsam_snapshot_freezer_size = sizeof(memorystatus_jetsam_snapshot_t) + + (sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_freezer_max); + + memorystatus_jetsam_snapshot_freezer = kalloc_flags(memorystatus_jetsam_snapshot_freezer_size, Z_WAITOK | Z_ZERO); + if (!memorystatus_jetsam_snapshot_freezer) { + panic("Could not allocate memorystatus_jetsam_snapshot_freezer"); + } +#endif /* CONFIG_FREEZE */ + nanoseconds_to_absolutetime((uint64_t)JETSAM_SNAPSHOT_TIMEOUT_SECS * NSEC_PER_SEC, &memorystatus_jetsam_snapshot_timeout); memset(&memorystatus_at_boot_snapshot, 0, sizeof(memorystatus_jetsam_snapshot_t)); #if CONFIG_FREEZE - memorystatus_freeze_threshold = (freeze_threshold_percentage / delta_percentage) * memorystatus_delta; + memorystatus_freeze_threshold = (unsigned int) ((freeze_threshold_percentage / delta_percentage) * memorystatus_delta); #endif /* Check the boot-arg to see if fast jetsam is allowed */ @@ -1535,7 +1607,8 @@ memorystatus_init(void) } /* Initialize the jetsam_threads state array */ - jetsam_threads = kalloc(sizeof(struct jetsam_thread_state) * max_jetsam_threads); + jetsam_threads = zalloc_permanent(sizeof(struct jetsam_thread_state) * + max_jetsam_threads, ZALIGN(struct jetsam_thread_state)); /* Initialize all the jetsam threads */ for (i = 0; i < max_jetsam_threads; i++) { @@ -1594,7 +1667,7 @@ memorystatus_do_kill(proc_t p, uint32_t cause, os_reason_t jetsam_reason, uint64 printf("memorystatus: killing process %d [%s] in high band %s (%d) - memorystatus_available_pages: %llu\n", p->p_pid, (*p->p_name ? p->p_name : "unknown"), memorystatus_priority_band_name(p->p_memstat_effectivepriority), p->p_memstat_effectivepriority, - (uint64_t)memorystatus_available_pages); + (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); } /* @@ -2636,6 +2709,12 @@ memorystatus_remove(proc_t p) } #endif +#if DEVELOPMENT || DEBUG + if (p->p_pid == memorystatus_snapshot_owner) { + memorystatus_snapshot_owner = 0; + } +#endif /* DEVELOPMENT || DEBUG */ + if (p) { ret = 0; } else { @@ -2934,6 +3013,10 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) boolean_t reschedule = FALSE; boolean_t was_dirty = FALSE; boolean_t now_dirty = FALSE; +#if CONFIG_DIRTYSTATUS_TRACKING + boolean_t notify_change = FALSE; + dirty_status_change_event_t change_event; +#endif MEMORYSTATUS_DEBUG(1, "memorystatus_dirty_set(): %d %d 0x%x 0x%x\n", self, p->p_pid, pcontrol, p->p_memstat_dirty); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_DIRTY_SET), p->p_pid, self, pcontrol, 0, 0); @@ -3001,6 +3084,20 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) if ((was_dirty == TRUE && now_dirty == FALSE) || (was_dirty == FALSE && now_dirty == TRUE)) { +#if CONFIG_DIRTYSTATUS_TRACKING + if (dirtystatus_tracking_enabled) { + uint32_t pages = 0; + memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL); + change_event.dsc_pid = p->p_pid; + change_event.dsc_event_type = (now_dirty == TRUE) ? kDirtyStatusChangedDirty : kDirtyStatusChangedClean; + change_event.dsc_time = mach_absolute_time(); + change_event.dsc_pages = pages; + change_event.dsc_priority = p->p_memstat_effectivepriority; + strlcpy(&change_event.dsc_process_name[0], p->p_name, sizeof(change_event.dsc_process_name)); + notify_change = TRUE; + } +#endif + /* Manage idle exit deferral, if applied */ if ((p->p_memstat_dirty & P_DIRTY_IDLE_EXIT_ENABLED) == P_DIRTY_IDLE_EXIT_ENABLED) { /* @@ -3154,6 +3251,13 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) exit: proc_list_unlock(); +#if CONFIG_DIRTYSTATUS_TRACKING + // Before returning, let's notify the dirtiness status if we have to + if (notify_change) { + memorystatus_send_dirty_status_change_note(&change_event, sizeof(change_event)); + } +#endif + return ret; } @@ -3256,8 +3360,21 @@ memorystatus_on_terminate(proc_t p) p->p_memstat_dirty |= P_DIRTY_TERMINATED; - if ((p->p_memstat_dirty & (P_DIRTY_TRACK | P_DIRTY_IS_DIRTY)) == P_DIRTY_TRACK) { - /* Clean; mark as terminated and issue SIGKILL */ + if (((p->p_memstat_dirty & (P_DIRTY_TRACK | P_DIRTY_IS_DIRTY)) == P_DIRTY_TRACK) || + (p->p_memstat_state & P_MEMSTAT_SUSPENDED)) { + /* + * Mark as terminated and issue SIGKILL if:- + * - process is clean, or, + * - if process is dirty but suspended. This case is likely + * an extension because apps don't opt into dirty-tracking + * and daemons aren't suspended. + */ +#if DEVELOPMENT || DEBUG + if (p->p_memstat_state & P_MEMSTAT_SUSPENDED) { + os_log(OS_LOG_DEFAULT, "memorystatus: sending suspended process %s (pid %d) SIGKILL", + (*p->p_name ? p->p_name : "unknown"), p->p_pid); + } +#endif /* DEVELOPMENT || DEBUG */ sig = SIGKILL; } else { /* Dirty, terminated, or state tracking is unsupported; issue SIGTERM to allow cleanup */ @@ -3284,6 +3401,8 @@ memorystatus_on_suspend(proc_t p) proc_list_unlock(); } +extern uint64_t memorystatus_thaw_count_since_boot; + void memorystatus_on_resume(proc_t p) { @@ -3315,6 +3434,7 @@ memorystatus_on_resume(proc_t p) p->p_memstat_thaw_count++; memorystatus_thaw_count++; + memorystatus_thaw_count_since_boot++; } memorystatus_suspended_count--; @@ -3487,26 +3607,21 @@ memorystatus_thread_block(uint32_t interval_ms, thread_continue_t continuation) static boolean_t memorystatus_avail_pages_below_pressure(void) { -#if CONFIG_EMBEDDED -/* - * Instead of CONFIG_EMBEDDED for these *avail_pages* routines, we should - * key off of the system having dynamic swap support. With full swap support, - * the system shouldn't really need to worry about various page thresholds. - */ +#if CONFIG_JETSAM return memorystatus_available_pages <= memorystatus_available_pages_pressure; -#else /* CONFIG_EMBEDDED */ +#else /* CONFIG_JETSAM */ return FALSE; -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_JETSAM */ } static boolean_t memorystatus_avail_pages_below_critical(void) { -#if CONFIG_EMBEDDED +#if CONFIG_JETSAM return memorystatus_available_pages <= memorystatus_available_pages_critical; -#else /* CONFIG_EMBEDDED */ +#else /* CONFIG_JETSAM */ return FALSE; -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_JETSAM */ } static boolean_t @@ -3519,7 +3634,7 @@ memorystatus_post_snapshot(int32_t priority, uint32_t cause) } else { is_idle_priority = (priority == JETSAM_PRIORITY_IDLE || priority == JETSAM_PRIORITY_IDLE_DEFERRED); } -#if CONFIG_EMBEDDED +#if CONFIG_JETSAM #pragma unused(cause) /* * Don't generate logs for steady-state idle-exit kills, @@ -3529,7 +3644,7 @@ memorystatus_post_snapshot(int32_t priority, uint32_t cause) return !is_idle_priority || memorystatus_idle_snapshot; -#else /* CONFIG_EMBEDDED */ +#else /* CONFIG_JETSAM */ /* * Don't generate logs for steady-state idle-exit kills, * unless @@ -3541,20 +3656,20 @@ memorystatus_post_snapshot(int32_t priority, uint32_t cause) boolean_t snapshot_eligible_kill_cause = (is_reason_thrashing(cause) || is_reason_zone_map_exhaustion(cause)); return !is_idle_priority || memorystatus_idle_snapshot || snapshot_eligible_kill_cause; -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_JETSAM */ } static boolean_t memorystatus_action_needed(void) { -#if CONFIG_EMBEDDED +#if CONFIG_JETSAM return is_reason_thrashing(kill_under_pressure_cause) || is_reason_zone_map_exhaustion(kill_under_pressure_cause) || memorystatus_available_pages <= memorystatus_available_pages_pressure; -#else /* CONFIG_EMBEDDED */ +#else /* CONFIG_JETSAM */ return is_reason_thrashing(kill_under_pressure_cause) || is_reason_zone_map_exhaustion(kill_under_pressure_cause); -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_JETSAM */ } static boolean_t @@ -3655,11 +3770,28 @@ memorystatus_aggressive_jetsam_needed_sysproc_aging(__unused int jld_eval_aggres #if DEVELOPMENT || DEBUG printf("memorystatus: aggressive%d: [%s] Bad Candidate Threshold Check (total: %d, bad: %d, threshold: %d %%); Memory Pressure Check (available_pgs: %llu, threshold_pgs: %llu)\n", jld_eval_aggressive_count, aggressive_jetsam_needed ? "PASSED" : "FAILED", *total_candidates, bad_candidates, - kJetsamHighRelaunchCandidatesThreshold, (uint64_t)memorystatus_available_pages, (uint64_t)memorystatus_sysproc_aging_aggr_pages); + kJetsamHighRelaunchCandidatesThreshold, (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES, (uint64_t)memorystatus_sysproc_aging_aggr_pages); #endif /* DEVELOPMENT || DEBUG */ return aggressive_jetsam_needed; } +/* + * Gets memory back from various system caches. + * Called before jetsamming in the foreground band in the hope that we'll + * avoid a jetsam. + */ +static void +memorystatus_approaching_fg_band(boolean_t *corpse_list_purged) +{ + assert(corpse_list_purged != NULL); + pmap_release_pages_fast(); + memorystatus_issue_fg_band_notify(); + if (total_corpses_count() > 0 && !*corpse_list_purged) { + task_purge_all_corpses(); + *corpse_list_purged = TRUE; + } +} + static boolean_t memorystatus_aggressive_jetsam_needed_default(__unused int jld_eval_aggressive_count, int *jld_idle_kills, int jld_idle_kill_candidates, int *total_candidates, int *elevated_bucket_count) { @@ -3811,17 +3943,7 @@ memorystatus_act_aggressive(uint32_t cause, os_reason_t jetsam_reason, int *jld_ jld_eval_aggressive_count++; if (jld_eval_aggressive_count == memorystatus_jld_eval_aggressive_count) { - memorystatus_issue_fg_band_notify(); - - /* - * If we reach this aggressive cycle, corpses might be causing memory pressure. - * So, in an effort to avoid jetsams in the FG band, we will attempt to purge - * corpse memory prior to this final march through JETSAM_PRIORITY_UI_SUPPORT. - */ - if (total_corpses_count() > 0 && !*corpse_list_purged) { - task_purge_all_corpses(); - *corpse_list_purged = TRUE; - } + memorystatus_approaching_fg_band(corpse_list_purged); } else if (jld_eval_aggressive_count > memorystatus_jld_eval_aggressive_count) { /* * Bump up the jetsam priority limit (eg: the bucket index) @@ -3930,13 +4052,16 @@ memorystatus_thread(void *param __unused, wait_result_t wr __unused) } else { jetsam_thread->limit_to_low_bands = TRUE; } +#if CONFIG_THREAD_GROUPS + thread_group_vm_add(); +#endif thread_set_thread_name(current_thread(), name); jetsam_thread->inited = TRUE; memorystatus_thread_block(0, memorystatus_thread); } KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_SCAN) | DBG_FUNC_START, - memorystatus_available_pages, memorystatus_jld_enabled, memorystatus_jld_eval_period_msecs, memorystatus_jld_eval_aggressive_count, 0); + MEMORYSTATUS_LOG_AVAILABLE_PAGES, memorystatus_jld_enabled, memorystatus_jld_eval_period_msecs, memorystatus_jld_eval_aggressive_count, 0); /* * Jetsam aware version. @@ -4041,11 +4166,7 @@ memorystatus_thread(void *param __unused, wait_result_t wr __unused) * anybody wanting to know this. */ if (priority >= JETSAM_PRIORITY_UI_SUPPORT) { - memorystatus_issue_fg_band_notify(); - if (total_corpses_count() > 0 && !corpse_list_purged) { - task_purge_all_corpses(); - corpse_list_purged = TRUE; - } + memorystatus_approaching_fg_band(&corpse_list_purged); } goto done; } @@ -4053,7 +4174,9 @@ memorystatus_thread(void *param __unused, wait_result_t wr __unused) if (memorystatus_avail_pages_below_critical()) { /* * Still under pressure and unable to kill a process - purge corpse memory + * and get everything back from the pmap. */ + pmap_release_pages_fast(); if (total_corpses_count() > 0) { task_purge_all_corpses(); corpse_list_purged = TRUE; @@ -4063,7 +4186,7 @@ memorystatus_thread(void *param __unused, wait_result_t wr __unused) /* * Still under pressure and unable to kill a process - panic */ - panic("memorystatus_jetsam_thread: no victim! available pages:%llu\n", (uint64_t)memorystatus_available_pages); + panic("memorystatus_jetsam_thread: no victim! available pages:%llu\n", (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); } } @@ -4114,7 +4237,7 @@ done: } KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_SCAN) | DBG_FUNC_END, - memorystatus_available_pages, total_memory_reclaimed, 0, 0, 0); + MEMORYSTATUS_LOG_AVAILABLE_PAGES, total_memory_reclaimed, 0, 0, 0); memorystatus_thread_block(0, memorystatus_thread); } @@ -4163,7 +4286,7 @@ memorystatus_on_ledger_footprint_exceeded(boolean_t warning, boolean_t memlimit_ * This is a warning path which implies that the current process is close, but has * not yet exceeded its per-process memory limit. */ - if (memorystatus_warn_process(p->p_pid, memlimit_is_active, memlimit_is_fatal, FALSE /* not exceeded */) != TRUE) { + if (memorystatus_warn_process(p, memlimit_is_active, memlimit_is_fatal, FALSE /* not exceeded */) != TRUE) { /* Print warning, since it's possible that task has not registered for pressure notifications */ os_log(OS_LOG_DEFAULT, "memorystatus_on_ledger_footprint_exceeded: failed to warn the current task (%d exiting, or no handler registered?).\n", p->p_pid); } @@ -4200,7 +4323,7 @@ memorystatus_on_ledger_footprint_exceeded(boolean_t warning, boolean_t memlimit_ * This path implies the current process has exceeded a non-fatal (soft) memory limit. * Failure to send note is ignored here. */ - (void)memorystatus_warn_process(p->p_pid, memlimit_is_active, memlimit_is_fatal, TRUE /* exceeded */); + (void)memorystatus_warn_process(p, memlimit_is_active, memlimit_is_fatal, TRUE /* exceeded */); #endif /* VM_PRESSURE_EVENTS */ } @@ -4377,7 +4500,7 @@ memorystatus_kill_specific_process(pid_t victim_pid, uint32_t cause, os_reason_t os_log_with_startup_serial(OS_LOG_DEFAULT, "%lu.%03d memorystatus: killing_specific_process pid %d [%s] (%s %d) %lluKB - memorystatus_available_pages: %llu\n", (unsigned long)tv_sec, tv_msec, victim_pid, ((p && *p->p_name) ? p->p_name : "unknown"), memorystatus_kill_cause_name[cause], (p ? p->p_memstat_effectivepriority: -1), - footprint_of_killed_proc >> 10, (uint64_t)memorystatus_available_pages); + footprint_of_killed_proc >> 10, (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); proc_rele(p); @@ -4556,11 +4679,9 @@ set_vm_map_fork_pidwatch(task_t task, uint64_t x) * By default, a vm_map_fork is allowed to proceed. * * A few simple policy assumptions: - * Desktop platform is not considered in this path. - * The vm_map_fork is always allowed. - * * If the device has a zero system-wide task limit, - * then the vm_map_fork is allowed. + * then the vm_map_fork is allowed. macOS always has a zero + * system wide task limit (unless overriden by a boot-arg). * * And if a process's memory footprint calculates less * than or equal to quarter of the system-wide task limit, @@ -4574,8 +4695,6 @@ memorystatus_allowed_vm_map_fork(task_t task) { boolean_t is_allowed = TRUE; /* default */ -#if CONFIG_EMBEDDED - uint64_t footprint_in_bytes; uint64_t max_allowed_bytes; @@ -4602,7 +4721,6 @@ memorystatus_allowed_vm_map_fork(task_t task) set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED); return !is_allowed; } -#endif /* CONFIG_EMBEDDED */ set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_ALLOWED); return is_allowed; @@ -4637,7 +4755,7 @@ memorystatus_get_task_phys_footprint_page_counts(task_t task, uint64_t *internal_pages, uint64_t *internal_compressed_pages, uint64_t *purgeable_nonvolatile_pages, uint64_t *purgeable_nonvolatile_compressed_pages, uint64_t *alternate_accounting_pages, uint64_t *alternate_accounting_compressed_pages, - uint64_t *iokit_mapped_pages, uint64_t *page_table_pages) + uint64_t *iokit_mapped_pages, uint64_t *page_table_pages, uint64_t *frozen_to_swap_pages) { assert(task); @@ -4672,6 +4790,57 @@ memorystatus_get_task_phys_footprint_page_counts(task_t task, if (page_table_pages) { *page_table_pages = (get_task_page_table(task) / PAGE_SIZE_64); } + +#if CONFIG_FREEZE + if (frozen_to_swap_pages) { + *frozen_to_swap_pages = (get_task_frozen_to_swap(task) / PAGE_SIZE_64); + } +#else /* CONFIG_FREEZE */ +#pragma unused(frozen_to_swap_pages) +#endif /* CONFIG_FREEZE */ +} + +#if CONFIG_FREEZE +/* + * Copies the source entry into the destination snapshot. + * Returns true on success. Fails if the destination snapshot is full. + * Caller must hold the proc list lock. + */ +static bool +memorystatus_jetsam_snapshot_copy_entry_locked(memorystatus_jetsam_snapshot_t *dst_snapshot, unsigned int dst_snapshot_size, const memorystatus_jetsam_snapshot_entry_t *src_entry) +{ + LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED); + assert(dst_snapshot); + + if (dst_snapshot->entry_count == dst_snapshot_size) { + /* Destination snapshot is full. Can not be updated until it is consumed. */ + return false; + } + if (dst_snapshot->entry_count == 0) { + memorystatus_init_jetsam_snapshot_header(dst_snapshot); + } + memorystatus_jetsam_snapshot_entry_t *dst_entry = &dst_snapshot->entries[dst_snapshot->entry_count++]; + memcpy(dst_entry, src_entry, sizeof(memorystatus_jetsam_snapshot_entry_t)); + return true; +} +#endif /* CONFIG_FREEZE */ + +static bool +memorystatus_init_jetsam_snapshot_entry_with_kill_locked(memorystatus_jetsam_snapshot_t *snapshot, proc_t p, uint32_t kill_cause, uint64_t killtime, memorystatus_jetsam_snapshot_entry_t **entry) +{ + LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED); + memorystatus_jetsam_snapshot_entry_t *snapshot_list = snapshot->entries; + size_t i = snapshot->entry_count; + + if (memorystatus_init_jetsam_snapshot_entry_locked(p, &snapshot_list[i], (snapshot->js_gencount)) == TRUE) { + *entry = &snapshot_list[i]; + (*entry)->killed = kill_cause; + (*entry)->jse_killtime = killtime; + + snapshot->entry_count = i + 1; + return true; + } + return false; } /* @@ -4687,6 +4856,9 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, memorystatus_jetsam_snapshot_entry_t *snapshot_list = NULL; unsigned int i; +#if CONFIG_FREEZE + bool copied_to_freezer_snapshot = false; +#endif /* CONFIG_FREEZE */ LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED); @@ -4695,7 +4867,7 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, * No active snapshot. * Nothing to do. */ - return; + goto exit; } /* @@ -4768,11 +4940,12 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, uint64_t alternate_accounting_compressed_pages = 0; uint64_t iokit_mapped_pages = 0; uint64_t page_table_pages = 0; + uint64_t frozen_to_swap_pages = 0; memorystatus_get_task_phys_footprint_page_counts(p->task, &internal_pages, &internal_compressed_pages, &purgeable_nonvolatile_pages, &purgeable_nonvolatile_compressed_pages, &alternate_accounting_pages, &alternate_accounting_compressed_pages, - &iokit_mapped_pages, &page_table_pages); + &iokit_mapped_pages, &page_table_pages, &frozen_to_swap_pages); entry->jse_internal_pages = internal_pages; entry->jse_internal_compressed_pages = internal_compressed_pages; @@ -4782,6 +4955,7 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, entry->jse_alternate_accounting_compressed_pages = alternate_accounting_compressed_pages; entry->jse_iokit_mapped_pages = iokit_mapped_pages; entry->jse_page_table_pages = page_table_pages; + entry->jse_frozen_to_swap_pages = frozen_to_swap_pages; uint64_t region_count = 0; memorystatus_get_task_memory_region_count(p->task, ®ion_count); @@ -4804,15 +4978,8 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, */ assert(memorystatus_jetsam_snapshot_count == snapshot->entry_count); - unsigned int next = memorystatus_jetsam_snapshot_count; - - if (memorystatus_init_jetsam_snapshot_entry_locked(p, &snapshot_list[next], (snapshot->js_gencount)) == TRUE) { - entry = &snapshot_list[next]; - entry->killed = kill_cause; - entry->jse_killtime = killtime; - - snapshot->entry_count = ++next; - memorystatus_jetsam_snapshot_count = next; + if (memorystatus_init_jetsam_snapshot_entry_with_kill_locked(snapshot, p, kill_cause, killtime, &entry)) { + memorystatus_jetsam_snapshot_count++; if (memorystatus_jetsam_snapshot_count >= memorystatus_jetsam_snapshot_max) { /* @@ -4828,7 +4995,23 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, } exit: - if (entry == NULL) { + if (entry) { +#if CONFIG_FREEZE + if (memorystatus_jetsam_use_freezer_snapshot && isApp(p)) { + /* This is an app kill. Record it in the freezer snapshot so dasd can incorporate this in its recommendations. */ + copied_to_freezer_snapshot = memorystatus_jetsam_snapshot_copy_entry_locked(memorystatus_jetsam_snapshot_freezer, memorystatus_jetsam_snapshot_freezer_max, entry); + if (copied_to_freezer_snapshot && memorystatus_jetsam_snapshot_freezer->entry_count == memorystatus_jetsam_snapshot_freezer_max) { + /* + * We just used the last slot in the freezer snapshot buffer. + * We only want to log it once... so we do it here + * when we notice we've hit the max. + */ + os_log_error(OS_LOG_DEFAULT, "memorystatus: WARNING freezer snapshot buffer is full, count %zu", + memorystatus_jetsam_snapshot_freezer->entry_count); + } + } +#endif /* CONFIG_FREEZE */ + } else { /* * If we reach here, the snapshot buffer could not be updated. * Most likely, the buffer is full, in which case we would have @@ -4840,6 +5023,25 @@ exit: MEMORYSTATUS_DEBUG(4, "memorystatus_update_jetsam_snapshot_entry_locked: failed to update pid %d, priority %d, count %d\n", p->p_pid, p->p_memstat_effectivepriority, memorystatus_jetsam_snapshot_count); + +#if CONFIG_FREEZE + /* We still attempt to record this in the freezer snapshot */ + if (memorystatus_jetsam_use_freezer_snapshot && isApp(p)) { + snapshot = memorystatus_jetsam_snapshot_freezer; + if (snapshot->entry_count < memorystatus_jetsam_snapshot_freezer_max) { + copied_to_freezer_snapshot = memorystatus_init_jetsam_snapshot_entry_with_kill_locked(snapshot, p, kill_cause, killtime, &entry); + if (copied_to_freezer_snapshot && memorystatus_jetsam_snapshot_freezer->entry_count == memorystatus_jetsam_snapshot_freezer_max) { + /* + * We just used the last slot in the freezer snapshot buffer. + * We only want to log it once... so we do it here + * when we notice we've hit the max. + */ + os_log_error(OS_LOG_DEFAULT, "memorystatus: WARNING freezer snapshot buffer is full, count %zu", + memorystatus_jetsam_snapshot_freezer->entry_count); + } + } + } +#endif /* CONFIG_FREEZE */ } return; @@ -4926,6 +5128,7 @@ memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_sna uint64_t alternate_accounting_compressed_pages = 0; uint64_t iokit_mapped_pages = 0; uint64_t page_table_pages = 0; + uint64_t frozen_to_swap_pages = 0; uint64_t region_count = 0; uint64_t cids[COALITION_NUM_TYPES]; @@ -4943,7 +5146,7 @@ memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_sna memorystatus_get_task_phys_footprint_page_counts(p->task, &internal_pages, &internal_compressed_pages, &purgeable_nonvolatile_pages, &purgeable_nonvolatile_compressed_pages, &alternate_accounting_pages, &alternate_accounting_compressed_pages, - &iokit_mapped_pages, &page_table_pages); + &iokit_mapped_pages, &page_table_pages, &frozen_to_swap_pages); entry->jse_internal_pages = internal_pages; entry->jse_internal_compressed_pages = internal_compressed_pages; @@ -4953,6 +5156,7 @@ memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_sna entry->jse_alternate_accounting_compressed_pages = alternate_accounting_compressed_pages; entry->jse_iokit_mapped_pages = iokit_mapped_pages; entry->jse_page_table_pages = page_table_pages; + entry->jse_frozen_to_swap_pages = frozen_to_swap_pages; memorystatus_get_task_memory_region_count(p->task, ®ion_count); entry->jse_memory_region_count = region_count; @@ -5034,6 +5238,15 @@ memorystatus_init_at_boot_snapshot() memorystatus_at_boot_snapshot.snapshot_time = mach_absolute_time(); } +static void +memorystatus_init_jetsam_snapshot_header(memorystatus_jetsam_snapshot_t *snapshot) +{ + memorystatus_init_snapshot_vmstats(snapshot); + snapshot->snapshot_time = mach_absolute_time(); + snapshot->notification_time = 0; + snapshot->js_gencount = 0; +} + static void memorystatus_init_jetsam_snapshot_locked(memorystatus_jetsam_snapshot_t *od_snapshot, uint32_t ods_list_count ) { @@ -5062,13 +5275,7 @@ memorystatus_init_jetsam_snapshot_locked(memorystatus_jetsam_snapshot_t *od_snap snapshot_max = memorystatus_jetsam_snapshot_max; } - /* - * Init the snapshot header information - */ - memorystatus_init_snapshot_vmstats(snapshot); - snapshot->snapshot_time = mach_absolute_time(); - snapshot->notification_time = 0; - snapshot->js_gencount = 0; + memorystatus_init_jetsam_snapshot_header(snapshot); next_p = memorystatus_get_first_proc_locked(&b, TRUE); while (next_p) { @@ -5101,7 +5308,7 @@ memorystatus_init_jetsam_snapshot_locked(memorystatus_jetsam_snapshot_t *od_snap #if CONFIG_JETSAM static int -memorystatus_cmd_set_panic_bits(user_addr_t buffer, uint32_t buffer_size) +memorystatus_cmd_set_panic_bits(user_addr_t buffer, size_t buffer_size) { int ret; memorystatus_jetsam_panic_options_t debug; @@ -5126,17 +5333,113 @@ memorystatus_cmd_set_panic_bits(user_addr_t buffer, uint32_t buffer_size) } #endif /* CONFIG_JETSAM */ +/* + * Verify that the given bucket has been sorted correctly. + * + * Walks through the bucket and verifies that all pids in the + * expected_order buffer are in that bucket and in the same + * relative order. + * + * The proc_list_lock must be held by the caller. + */ +static int +memorystatus_verify_sort_order(unsigned int bucket_index, pid_t *expected_order, size_t num_pids) +{ + LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED); + + int error = 0; + proc_t p = NULL; + size_t i = 0; + + /* + * NB: We allow other procs to be mixed in within the expected ones. + * We just need the expected procs to be in the right order relative to each other. + */ + p = memorystatus_get_first_proc_locked(&bucket_index, FALSE); + while (p) { + if (p->p_pid == expected_order[i]) { + i++; + } + if (i == num_pids) { + break; + } + p = memorystatus_get_next_proc_locked(&bucket_index, p, FALSE); + } + if (i != num_pids) { + char buffer[128]; + size_t len = sizeof(buffer); + size_t buffer_idx = 0; + os_log_error(OS_LOG_DEFAULT, "memorystatus_verify_sort_order: Processes in bucket %d were not sorted properly\n", bucket_index); + for (i = 0; i < num_pids; i++) { + int num_written = snprintf(buffer + buffer_idx, len - buffer_idx, "%d,", expected_order[i]); + if (num_written <= 0) { + break; + } + if (buffer_idx + (unsigned int) num_written >= len) { + break; + } + buffer_idx += num_written; + } + os_log_error(OS_LOG_DEFAULT, "memorystatus_verify_sort_order: Expected order [%s]", buffer); + memset(buffer, 0, len); + buffer_idx = 0; + p = memorystatus_get_first_proc_locked(&bucket_index, FALSE); + i = 0; + os_log_error(OS_LOG_DEFAULT, "memorystatus_verify_sort_order: Actual order:"); + while (p) { + int num_written; + if (buffer_idx == 0) { + num_written = snprintf(buffer + buffer_idx, len - buffer_idx, "%zu: %d,", i, p->p_pid); + } else { + num_written = snprintf(buffer + buffer_idx, len - buffer_idx, "%d,", p->p_pid); + } + if (num_written <= 0) { + break; + } + buffer_idx += (unsigned int) num_written; + assert(buffer_idx <= len); + if (i % 10 == 0) { + os_log_error(OS_LOG_DEFAULT, "memorystatus_verify_sort_order: %s", buffer); + buffer_idx = 0; + } + p = memorystatus_get_next_proc_locked(&bucket_index, p, FALSE); + i++; + } + if (buffer_idx != 0) { + os_log_error(OS_LOG_DEFAULT, "memorystatus_verify_sort_order: %s", buffer); + } + error = EINVAL; + } + return error; +} + /* * Triggers a sort_order on a specified jetsam priority band. * This is for testing only, used to force a path through the sort * function. */ static int -memorystatus_cmd_test_jetsam_sort(int priority, int sort_order) +memorystatus_cmd_test_jetsam_sort(int priority, + int sort_order, + user_addr_t expected_order_user, + size_t expected_order_user_len) { int error = 0; - unsigned int bucket_index = 0; + static size_t kMaxPids = 8; + pid_t expected_order[kMaxPids]; + size_t copy_size = sizeof(expected_order); + size_t num_pids; + + if (expected_order_user_len < copy_size) { + copy_size = expected_order_user_len; + } + num_pids = copy_size / sizeof(pid_t); + + error = copyin(expected_order_user, expected_order, copy_size); + if (error != 0) { + return error; + } if (priority == -1) { /* Use as shorthand for default priority */ @@ -5145,7 +5448,19 @@ memorystatus_cmd_test_jetsam_sort(int priority, int sort_order) bucket_index = (unsigned int)priority; } - error = memorystatus_sort_bucket(bucket_index, sort_order); + /* + * Acquire lock before sorting so we can check the sort order + * while still holding the lock. + */ + proc_list_lock(); + + memorystatus_sort_bucket_locked(bucket_index, sort_order); + + if (expected_order_user != CAST_USER_ADDR_T(NULL) && expected_order_user_len > 0) { + error = memorystatus_verify_sort_order(bucket_index, expected_order, num_pids); + } + + proc_list_unlock(); return error; } @@ -5287,7 +5602,7 @@ memorystatus_kill_proc(proc_t p, uint32_t cause, os_reason_t jetsam_reason, bool (unsigned long)tv_sec, tv_msec, kill_reason_string, aPid, ((p && *p->p_name) ? p->p_name : "unknown"), memorystatus_kill_cause_name[cause], aPid_ep, - (*footprint_of_killed_proc) >> 10, (uint64_t)memorystatus_available_pages); + (*footprint_of_killed_proc) >> 10, (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); return retval; } @@ -5312,7 +5627,7 @@ memorystatus_kill_top_process(boolean_t any, boolean_t sort_flag, uint32_t cause #endif KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_START, - memorystatus_available_pages, 0, 0, 0, 0); + MEMORYSTATUS_LOG_AVAILABLE_PAGES, 0, 0, 0, 0); #if CONFIG_JETSAM @@ -5495,7 +5810,7 @@ exit: } KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_END, - memorystatus_available_pages, killed ? aPid : 0, killed, *memory_reclaimed, 0); + MEMORYSTATUS_LOG_AVAILABLE_PAGES, killed ? aPid : 0, killed, *memory_reclaimed, 0); return killed; } @@ -5524,7 +5839,7 @@ memorystatus_kill_processes_aggressive(uint32_t cause, int aggr_count, *memory_reclaimed = 0; KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_START, - memorystatus_available_pages, priority_max, 0, 0, 0); + MEMORYSTATUS_LOG_AVAILABLE_PAGES, priority_max, 0, 0, 0); if (priority_max >= JETSAM_PRIORITY_FOREGROUND) { /* @@ -5638,7 +5953,7 @@ memorystatus_kill_processes_aggressive(uint32_t cause, int aggr_count, (unsigned long)tv_sec, tv_msec, ((aPid_ep == JETSAM_PRIORITY_IDLE) ? "killing_idle_process_aggressive" : "killing_top_process_aggressive"), aggr_count, aPid, (*p->p_name ? p->p_name : "unknown"), - memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages); + memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); memorystatus_level_snapshot = memorystatus_level; @@ -5723,7 +6038,7 @@ exit: } KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_END, - memorystatus_available_pages, 0, kill_count, *memory_reclaimed, 0); + MEMORYSTATUS_LOG_AVAILABLE_PAGES, 0, kill_count, *memory_reclaimed, 0); if (kill_count > 0) { return TRUE; @@ -5742,7 +6057,7 @@ memorystatus_kill_hiwat_proc(uint32_t *errors, boolean_t *purged, uint64_t *memo uint32_t aPid_ep; os_reason_t jetsam_reason = OS_REASON_NULL; KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM_HIWAT) | DBG_FUNC_START, - memorystatus_available_pages, 0, 0, 0, 0); + MEMORYSTATUS_LOG_AVAILABLE_PAGES, 0, 0, 0, 0); jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_HIGHWATER); if (jetsam_reason == OS_REASON_NULL) { @@ -5866,7 +6181,7 @@ exit: } KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM_HIWAT) | DBG_FUNC_END, - memorystatus_available_pages, killed ? aPid : 0, killed, *memory_reclaimed, 0); + MEMORYSTATUS_LOG_AVAILABLE_PAGES, killed ? aPid : 0, killed, *memory_reclaimed, 0); return killed; } @@ -5893,7 +6208,7 @@ memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, un KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_START, - memorystatus_available_pages, 0, 0, 0, 0); + MEMORYSTATUS_LOG_AVAILABLE_PAGES, 0, 0, 0, 0); #if CONFIG_FREEZE boolean_t consider_frozen_only = FALSE; @@ -5938,7 +6253,7 @@ memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, un MEMORYSTATUS_DEBUG(1, "jetsam: elevated%d process pid %d [%s] - memorystatus_available_pages: %d\n", aggr_count, aPid, (*p->p_name ? p->p_name : "unknown"), - memorystatus_available_pages); + MEMORYSTATUS_LOG_AVAILABLE_PAGES); #endif /* DEVELOPMENT || DEBUG */ if (memorystatus_jetsam_snapshot_count == 0) { @@ -5970,7 +6285,7 @@ memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, un aggr_count, aPid, ((p && *p->p_name) ? p->p_name : "unknown"), memorystatus_kill_cause_name[cause], aPid_ep, - footprint_of_killed_proc >> 10, (uint64_t)memorystatus_available_pages); + footprint_of_killed_proc >> 10, (uint64_t)MEMORYSTATUS_LOG_AVAILABLE_PAGES); /* Success? */ if (killed) { @@ -6023,7 +6338,7 @@ exit: } KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_END, - memorystatus_available_pages, killed ? aPid : 0, kill_count, *memory_reclaimed, 0); + MEMORYSTATUS_LOG_AVAILABLE_PAGES, killed ? aPid : 0, kill_count, *memory_reclaimed, 0); return killed; } @@ -6169,13 +6484,11 @@ memorystatus_get_priority_list(memorystatus_priority_entry_t **list_ptr, size_t return EINVAL; } - *list_ptr = (memorystatus_priority_entry_t*)kalloc(*list_size); + *list_ptr = kheap_alloc(KHEAP_TEMP, *list_size, Z_WAITOK | Z_ZERO); if (!*list_ptr) { return ENOMEM; } - memset(*list_ptr, 0, *list_size); - *buffer_size = *list_size; *list_size = 0; @@ -6279,12 +6592,13 @@ memorystatus_cmd_get_priority_list(pid_t pid, user_addr_t buffer, size_t buffer_ } if (list) { - kfree(list, buffer_size); + kheap_free(KHEAP_TEMP, list, buffer_size); } } if (error == 0) { - *retval = list_size; + assert(list_size <= INT32_MAX); + *retval = (int32_t) list_size; } return error; @@ -6344,7 +6658,7 @@ memorystatus_update_levels_locked(boolean_t critical_only) } #if VM_PRESSURE_EVENTS - memorystatus_available_pages_pressure = pressure_threshold_percentage * (atop_64(max_mem) / 100); + memorystatus_available_pages_pressure = (int32_t)(pressure_threshold_percentage * (atop_64(max_mem) / 100)); #endif } @@ -6475,6 +6789,36 @@ memorystatus_get_jetsam_snapshot_copy(memorystatus_jetsam_snapshot_t **snapshot, return 0; } +#if CONFIG_FREEZE +static int +memorystatus_get_jetsam_snapshot_freezer(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) +{ + size_t input_size = *snapshot_size; + + if (memorystatus_jetsam_snapshot_freezer->entry_count > 0) { + *snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + (sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_freezer->entry_count)); + } else { + *snapshot_size = 0; + } + assert(*snapshot_size <= memorystatus_jetsam_snapshot_freezer_size); + + if (size_only) { + return 0; + } + + if (input_size < *snapshot_size) { + return EINVAL; + } + + *snapshot = memorystatus_jetsam_snapshot_freezer; + + MEMORYSTATUS_DEBUG(7, "memorystatus_get_jetsam_snapshot_freezer: returned inputsize (%ld), snapshot_size(%ld), listcount(%ld)\n", + (long)input_size, (long)*snapshot_size, (long)memorystatus_jetsam_snapshot_freezer->entry_count); + + return 0; +} +#endif /* CONFIG_FREEZE */ + static int memorystatus_get_on_demand_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) { @@ -6501,7 +6845,7 @@ memorystatus_get_on_demand_snapshot(memorystatus_jetsam_snapshot_t **snapshot, s /* * Allocate and initialize a snapshot buffer. */ - ods = (memorystatus_jetsam_snapshot_t *)kalloc(*snapshot_size); + ods = kalloc(*snapshot_size); if (!ods) { return ENOMEM; } @@ -6562,6 +6906,9 @@ memorystatus_cmd_get_jetsam_snapshot(int32_t flags, user_addr_t buffer, size_t b boolean_t is_default_snapshot = FALSE; boolean_t is_on_demand_snapshot = FALSE; boolean_t is_at_boot_snapshot = FALSE; +#if CONFIG_FREEZE + bool is_freezer_snapshot = false; +#endif /* CONFIG_FREEZE */ memorystatus_jetsam_snapshot_t *snapshot; size_only = ((buffer == USER_ADDR_NULL) ? TRUE : FALSE); @@ -6571,7 +6918,7 @@ memorystatus_cmd_get_jetsam_snapshot(int32_t flags, user_addr_t buffer, size_t b is_default_snapshot = TRUE; error = memorystatus_get_jetsam_snapshot(&snapshot, &buffer_size, size_only); } else { - if (flags & ~(MEMORYSTATUS_SNAPSHOT_ON_DEMAND | MEMORYSTATUS_SNAPSHOT_AT_BOOT | MEMORYSTATUS_SNAPSHOT_COPY)) { + if (flags & ~(MEMORYSTATUS_SNAPSHOT_ON_DEMAND | MEMORYSTATUS_SNAPSHOT_AT_BOOT | MEMORYSTATUS_SNAPSHOT_COPY | MEMORYSTATUS_FLAGS_SNAPSHOT_FREEZER)) { /* * Unsupported bit set in flag. */ @@ -6597,6 +6944,11 @@ memorystatus_cmd_get_jetsam_snapshot(int32_t flags, user_addr_t buffer, size_t b error = memorystatus_get_at_boot_snapshot(&snapshot, &buffer_size, size_only); } else if (flags & MEMORYSTATUS_SNAPSHOT_COPY) { error = memorystatus_get_jetsam_snapshot_copy(&snapshot, &buffer_size, size_only); +#if CONFIG_FREEZE + } else if (flags & MEMORYSTATUS_FLAGS_SNAPSHOT_FREEZER) { + is_freezer_snapshot = true; + error = memorystatus_get_jetsam_snapshot_freezer(&snapshot, &buffer_size, size_only); +#endif /* CONFIG_FREEZE */ } else { /* * Invalid flag setting. @@ -6619,20 +6971,40 @@ memorystatus_cmd_get_jetsam_snapshot(int32_t flags, user_addr_t buffer, size_t b * there is nothing to clear or update. * If working with a copy of the snapshot * there is nothing to clear or update. + * If working with the freezer snapshot + * clearing the buffer means, reset the count. */ if (!size_only) { if ((error = copyout(snapshot, buffer, buffer_size)) == 0) { +#if CONFIG_FREEZE + if (is_default_snapshot || is_freezer_snapshot) { +#else if (is_default_snapshot) { +#endif /* CONFIG_FREEZE */ /* * The jetsam snapshot is never freed, its count is simply reset. * However, we make a copy for any parties that might be interested * in the previous fully populated snapshot. */ proc_list_lock(); - memcpy(memorystatus_jetsam_snapshot_copy, memorystatus_jetsam_snapshot, memorystatus_jetsam_snapshot_size); - memorystatus_jetsam_snapshot_copy_count = memorystatus_jetsam_snapshot_count; - snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0; - memorystatus_jetsam_snapshot_last_timestamp = 0; +#if DEVELOPMENT || DEBUG + if (memorystatus_snapshot_owner != 0 && memorystatus_snapshot_owner != current_proc()->p_pid) { + /* Snapshot is currently owned by someone else. Don't consume it. */ + proc_list_unlock(); + goto out; + } +#endif /* (DEVELOPMENT || DEBUG)*/ + if (is_default_snapshot) { + memcpy(memorystatus_jetsam_snapshot_copy, memorystatus_jetsam_snapshot, memorystatus_jetsam_snapshot_size); + memorystatus_jetsam_snapshot_copy_count = memorystatus_jetsam_snapshot_count; + snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0; + memorystatus_jetsam_snapshot_last_timestamp = 0; + } +#if CONFIG_FREEZE + else if (is_freezer_snapshot) { + memorystatus_jetsam_snapshot_freezer->entry_count = 0; + } +#endif /* CONFIG_FREEZE */ proc_list_unlock(); } } @@ -6648,13 +7020,47 @@ memorystatus_cmd_get_jetsam_snapshot(int32_t flags, user_addr_t buffer, size_t b } } +out: if (error == 0) { - *retval = buffer_size; + assert(buffer_size <= INT32_MAX); + *retval = (int32_t) buffer_size; } -out: return error; } +#if DEVELOPMENT || DEBUG +static int +memorystatus_cmd_set_jetsam_snapshot_ownership(int32_t flags) +{ + int error = EINVAL; + proc_t caller = current_proc(); + assert(caller != kernproc); + proc_list_lock(); + if (flags & MEMORYSTATUS_FLAGS_SNAPSHOT_TAKE_OWNERSHIP) { + if (memorystatus_snapshot_owner == 0) { + memorystatus_snapshot_owner = caller->p_pid; + error = 0; + } else if (memorystatus_snapshot_owner == caller->p_pid) { + error = 0; + } else { + /* We don't allow ownership to be taken from another proc. */ + error = EBUSY; + } + } else if (flags & MEMORYSTATUS_FLAGS_SNAPSHOT_DROP_OWNERSHIP) { + if (memorystatus_snapshot_owner == caller->p_pid) { + memorystatus_snapshot_owner = 0; + error = 0; + } else if (memorystatus_snapshot_owner != 0) { + /* We don't allow ownership to be taken from another proc. */ + error = EPERM; + } + } + proc_list_unlock(); + + return error; +} +#endif /* DEVELOPMENT || DEBUG */ + /* * Routine: memorystatus_cmd_grp_set_priorities * Purpose: Update priorities for a group of processes. @@ -6700,7 +7106,7 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) int error = 0; memorystatus_properties_entry_v1_t *entries = NULL; - uint32_t entry_count = 0; + size_t entry_count = 0; /* This will be the ordered proc list */ typedef struct memorystatus_internal_properties { @@ -6712,7 +7118,7 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) size_t table_size = 0; uint32_t table_count = 0; - uint32_t i = 0; + size_t i = 0; uint32_t bucket_index = 0; boolean_t head_insert; int32_t new_priority; @@ -6726,7 +7132,13 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) } entry_count = (buffer_size / sizeof(memorystatus_properties_entry_v1_t)); - if ((entries = (memorystatus_properties_entry_v1_t *)kalloc(buffer_size)) == NULL) { + if (entry_count == 0) { + /* buffer size was not large enough for a single entry */ + error = EINVAL; + goto out; + } + + if ((entries = kheap_alloc(KHEAP_TEMP, buffer_size, Z_WAITOK)) == NULL) { error = ENOMEM; goto out; } @@ -6768,11 +7180,10 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) } table_size = sizeof(memorystatus_internal_properties_t) * entry_count; - if ((table = (memorystatus_internal_properties_t *)kalloc(table_size)) == NULL) { + if ((table = kheap_alloc(KHEAP_TEMP, table_size, Z_WAITOK | Z_ZERO)) == NULL) { error = ENOMEM; goto out; } - memset(table, 0, table_size); /* @@ -6843,10 +7254,10 @@ out: KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_END, MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY, entry_count, table_count, 0, 0); if (entries) { - kfree(entries, buffer_size); + kheap_free(KHEAP_TEMP, entries, buffer_size); } if (table) { - kfree(table, table_size); + kheap_free(KHEAP_TEMP, table, table_size); } return error; @@ -6860,7 +7271,7 @@ memorystatus_cmd_grp_set_probabilities(user_addr_t buffer, size_t buffer_size) { int error = 0; memorystatus_properties_entry_v1_t *entries = NULL; - uint32_t entry_count = 0, i = 0; + size_t entry_count = 0, i = 0; memorystatus_internal_probabilities_t *tmp_table_new = NULL, *tmp_table_old = NULL; size_t tmp_table_new_size = 0, tmp_table_old_size = 0; @@ -6872,7 +7283,7 @@ memorystatus_cmd_grp_set_probabilities(user_addr_t buffer, size_t buffer_size) entry_count = (buffer_size / sizeof(memorystatus_properties_entry_v1_t)); - if ((entries = (memorystatus_properties_entry_v1_t *) kalloc(buffer_size)) == NULL) { + if ((entries = kheap_alloc(KHEAP_TEMP, buffer_size, Z_WAITOK)) == NULL) { error = ENOMEM; goto out; } @@ -6911,11 +7322,10 @@ memorystatus_cmd_grp_set_probabilities(user_addr_t buffer, size_t buffer_size) tmp_table_new_size = sizeof(memorystatus_internal_probabilities_t) * entry_count; - if ((tmp_table_new = (memorystatus_internal_probabilities_t *) kalloc(tmp_table_new_size)) == NULL) { + if ((tmp_table_new = kalloc_flags(tmp_table_new_size, Z_WAITOK | Z_ZERO)) == NULL) { error = ENOMEM; goto out; } - memset(tmp_table_new, 0, tmp_table_new_size); proc_list_lock(); @@ -6940,7 +7350,7 @@ out: KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_END, MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY, entry_count, tmp_table_new_size, 0, 0); if (entries) { - kfree(entries, buffer_size); + kheap_free(KHEAP_TEMP, entries, buffer_size); entries = NULL; } @@ -7518,6 +7928,11 @@ memorystatus_control(struct proc *p __unused, struct memorystatus_control_args * case MEMORYSTATUS_CMD_GET_JETSAM_SNAPSHOT: error = memorystatus_cmd_get_jetsam_snapshot((int32_t)args->flags, args->buffer, args->buffersize, ret); break; +#if DEVELOPMENT || DEBUG + case MEMORYSTATUS_CMD_SET_JETSAM_SNAPSHOT_OWNERSHIP: + error = memorystatus_cmd_set_jetsam_snapshot_ownership((int32_t) args->flags); + break; +#endif case MEMORYSTATUS_CMD_GET_PRESSURE_STATUS: error = memorystatus_cmd_get_pressure_status(ret); break; @@ -7550,7 +7965,7 @@ memorystatus_control(struct proc *p __unused, struct memorystatus_control_args * error = memorystatus_kill_process_sync(args->pid, kMemorystatusKilled, jetsam_reason) ? 0 : EINVAL; break; case MEMORYSTATUS_CMD_TEST_JETSAM_SORT: - error = memorystatus_cmd_test_jetsam_sort(args->pid, (int32_t)args->flags); + error = memorystatus_cmd_test_jetsam_sort(args->pid, (int32_t)args->flags, args->buffer, args->buffersize); break; #if CONFIG_JETSAM case MEMORYSTATUS_CMD_SET_JETSAM_PANIC_BITS: @@ -7609,11 +8024,9 @@ memorystatus_control(struct proc *p __unused, struct memorystatus_control_args * error = memorystatus_get_process_is_freezable(args->pid, ret); break; -#if DEVELOPMENT || DEBUG case MEMORYSTATUS_CMD_FREEZER_CONTROL: error = memorystatus_freezer_control(args->flags, args->buffer, args->buffersize, ret); break; -#endif /* DEVELOPMENT || DEBUG */ #endif /* CONFIG_FREEZE */ #if CONFIG_JETSAM @@ -7621,7 +8034,7 @@ memorystatus_control(struct proc *p __unused, struct memorystatus_control_args * case MEMORYSTATUS_CMD_INCREASE_JETSAM_TASK_LIMIT: error = memorystatus_cmd_increase_jetsam_task_limit(args->pid, args->flags); break; -#endif /* DEVELOPMENT */ +#endif /* DEVELOPMENT || DEBUG */ #endif /* CONFIG_JETSAM */ default: @@ -8039,12 +8452,13 @@ memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap) } uint64_t -memorystatus_available_memory_internal(proc_t p) +memorystatus_available_memory_internal(struct proc *p) { #ifdef XNU_TARGET_OS_OSX - #pragma unused(p) - return 0; -#else + if (p->p_memstat_memlimit <= 0) { + return 0; + } +#endif /* XNU_TARGET_OS_OSX */ const uint64_t footprint_in_bytes = get_task_phys_footprint(p->task); int32_t memlimit_mb; int64_t memlimit_bytes; @@ -8069,7 +8483,6 @@ memorystatus_available_memory_internal(proc_t p) rc = memlimit_bytes - footprint_in_bytes; return (rc >= 0) ? rc : 0; -#endif } int @@ -8099,7 +8512,8 @@ memorystatus_cmd_increase_jetsam_task_limit(pid_t pid, uint32_t byte_increase) } const uint32_t current_memlimit_increase = roundToNearestMB(p->p_memlimit_increase); - const uint32_t page_aligned_increase = round_page(p->p_memlimit_increase + byte_increase); /* round to page */ + /* round to page */ + const int32_t page_aligned_increase = (int32_t) MIN(round_page(p->p_memlimit_increase + byte_increase), INT32_MAX); proc_list_lock(); diff --git a/bsd/kern/kern_memorystatus_freeze.c b/bsd/kern/kern_memorystatus_freeze.c index a6e720285..08c86e4f6 100644 --- a/bsd/kern/kern_memorystatus_freeze.c +++ b/bsd/kern/kern_memorystatus_freeze.c @@ -38,8 +38,6 @@ #include #include -#include - #include #include #include @@ -66,6 +64,8 @@ #include #include +#include + #if CONFIG_FREEZE #include #endif /* CONFIG_FREEZE */ @@ -125,7 +125,8 @@ unsigned int memorystatus_frozen_shared_mb = 0; unsigned int memorystatus_frozen_shared_mb_max = 0; unsigned int memorystatus_freeze_shared_mb_per_process_max = 0; /* Max. MB allowed per process to be freezer-eligible. */ unsigned int memorystatus_freeze_private_shared_pages_ratio = 2; /* Ratio of private:shared pages for a process to be freezer-eligible. */ -unsigned int memorystatus_thaw_count = 0; +unsigned int memorystatus_thaw_count = 0; /* # of thaws in the current freezer interval */ +uint64_t memorystatus_thaw_count_since_boot = 0; /* The number of thaws since boot */ unsigned int memorystatus_refreeze_eligible_count = 0; /* # of processes currently thawed i.e. have state on disk & in-memory */ /* Freezer counters collected for telemtry */ @@ -173,6 +174,14 @@ static struct memorystatus_freezer_stats_t { * on our NAND budget if we did swap out these pages. */ uint64_t mfs_shared_pages_skipped; + + /* + * A running sum of the total number of bytes sent to NAND during + * refreeze operations since boot. + */ + uint64_t mfs_bytes_refrozen; + /* The number of refreeze operations since boot */ + uint64_t mfs_refreeze_count; } memorystatus_freezer_stats = {0}; #endif /* XNU_KERNEL_PRIVATE */ @@ -181,6 +190,7 @@ static inline boolean_t memorystatus_can_freeze_processes(void); static boolean_t memorystatus_can_freeze(boolean_t *memorystatus_freeze_swap_low); static boolean_t memorystatus_is_process_eligible_for_freeze(proc_t p); static void memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused); +static void memorystatus_freeze_start_normal_throttle_interval(uint32_t new_budget, mach_timespec_t start_ts); void memorystatus_disable_freeze(void); @@ -205,6 +215,10 @@ extern int i_coal_jetsam_get_taskrole(coalition_t coal, task_t task); static void memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed); static void memorystatus_demote_frozen_processes(boolean_t force_one); +/* + * Converts the freezer_error_code into a string and updates freezer error counts. + */ +static void memorystatus_freezer_stringify_error(const int freezer_error_code, char* buffer, size_t len); static uint64_t memorystatus_freezer_thread_next_run_ts = 0; @@ -212,8 +226,40 @@ static uint64_t memorystatus_freezer_thread_next_run_ts = 0; SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_frozen_count, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, memorystatus_thaw_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_thaw_count, 0, ""); +SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_thaw_count_since_boot, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_thaw_count_since_boot, ""); SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_pageouts, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freeze_pageouts, ""); +#if DEVELOPMENT || DEBUG +static int sysctl_memorystatus_freeze_budget_pages_remaining SYSCTL_HANDLER_ARGS +{ + #pragma unused(arg1, arg2, oidp) + int error, changed; + uint64_t new_budget = memorystatus_freeze_budget_pages_remaining; + mach_timespec_t now_ts; + clock_sec_t sec; + clock_nsec_t nsec; + + lck_mtx_lock(&freezer_mutex); + + error = sysctl_io_number(req, memorystatus_freeze_budget_pages_remaining, sizeof(uint64_t), &new_budget, &changed); + if (changed) { + /* Start a new interval with this budget. */ + clock_get_system_nanotime(&sec, &nsec); + now_ts.tv_sec = (unsigned int)(MIN(sec, UINT32_MAX)); + now_ts.tv_nsec = nsec; + memorystatus_freeze_start_normal_throttle_interval((uint32_t) MIN(new_budget, UINT32_MAX), now_ts); + /* Don't carry over any excess pageouts since we're forcing a new budget */ + normal_throttle_window->pageouts = 0; + memorystatus_freeze_budget_pages_remaining = normal_throttle_window->max_pageouts; + } + + lck_mtx_unlock(&freezer_mutex); + return error; +} + +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_freeze_budget_pages_remaining, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, &sysctl_memorystatus_freeze_budget_pages_remaining, "Q", ""); +#else /* DEVELOPMENT || DEBUG */ SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_budget_pages_remaining, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freeze_budget_pages_remaining, ""); +#endif /* DEVELOPMENT || DEBUG */ SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_error_excess_shared_memory_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_error_excess_shared_memory_count, ""); SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_error_low_private_shared_ratio_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_error_low_private_shared_ratio_count, ""); SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_error_no_compressor_space_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_error_no_compressor_space_count, ""); @@ -226,6 +272,9 @@ SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_below_threshold_count, CTLFLAG SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_skipped_full_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_skipped_full_count, ""); SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_skipped_shared_mb_high_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_skipped_shared_mb_high_count, ""); SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_shared_pages_skipped, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_shared_pages_skipped, ""); +SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_bytes_refrozen, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_bytes_refrozen, ""); +SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_refreeze_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_refreeze_count, ""); + /* * Calculates the hit rate for the freezer. @@ -255,12 +304,14 @@ static int sysctl_memorystatus_freezer_thaw_percentage SYSCTL_HANDLER_ARGS } proc_list_unlock(); if (frozen_count > 0) { - thaw_percentage = 100 * thaw_count / frozen_count; + assert(thaw_count <= frozen_count); + thaw_percentage = (int)(100 * thaw_count / frozen_count); } return sysctl_handle_int(oidp, &thaw_percentage, 0, req); } SYSCTL_PROC(_kern, OID_AUTO, memorystatus_freezer_thaw_percentage, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, &sysctl_memorystatus_freezer_thaw_percentage, "I", ""); +#define FREEZER_ERROR_STRING_LENGTH 128 #if DEVELOPMENT || DEBUG @@ -307,6 +358,7 @@ boolean_t memorystatus_freeze_to_memory = FALSE; SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_to_memory, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_freeze_to_memory, 0, ""); #define VM_PAGES_FOR_ALL_PROCS (2) + /* * Manual trigger of freeze and thaw for dev / debug kernels only. */ @@ -397,25 +449,8 @@ again: } if (error) { - char reason[128]; - if (freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) { - memorystatus_freezer_stats.mfs_error_excess_shared_memory_count++; - strlcpy(reason, "too much shared memory", 128); - } - - if (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO) { - memorystatus_freezer_stats.mfs_error_low_private_shared_ratio_count++; - strlcpy(reason, "low private-shared pages ratio", 128); - } - - if (freezer_error_code == FREEZER_ERROR_NO_COMPRESSOR_SPACE) { - memorystatus_freezer_stats.mfs_error_no_compressor_space_count++; - strlcpy(reason, "no compressor space", 128); - } - - if (freezer_error_code == FREEZER_ERROR_NO_SWAP_SPACE) { - strlcpy(reason, "no swap space", 128); - } + char reason[FREEZER_ERROR_STRING_LENGTH]; + memorystatus_freezer_stringify_error(freezer_error_code, reason, sizeof(reason)); printf("sysctl_freeze: task_freeze failed: %s\n", reason); @@ -430,6 +465,12 @@ again: if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) { p->p_memstat_state |= P_MEMSTAT_FROZEN; memorystatus_frozen_count++; + } else { + // This was a re-freeze + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { + memorystatus_freezer_stats.mfs_bytes_refrozen += dirty * PAGE_SIZE; + memorystatus_freezer_stats.mfs_refreeze_count++; + } } p->p_memstat_frozen_count++; @@ -616,10 +657,10 @@ memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t uint32_t proc_count = 0, freeze_eligible_proc_considered = 0, band = 0, xpc_index = 0, leader_index = 0; global_freezable_status_t *list_head; proc_freezable_status_t *list_entry, *list_entry_start; - size_t list_size = 0; + size_t list_size = 0, entry_count = 0; proc_t p, leader_proc; memstat_bucket_t *bucket; - uint32_t state = 0, pages = 0, entry_count = 0; + uint32_t state = 0, pages = 0; boolean_t try_freeze = TRUE, xpc_skip_size_probability_check = FALSE; int error = 0, probability_of_use = 0; pid_t leader_pid = 0; @@ -635,13 +676,11 @@ memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t return EINVAL; } - list_head = (global_freezable_status_t*)kalloc(list_size); + list_head = kheap_alloc(KHEAP_TEMP, list_size, Z_WAITOK | Z_ZERO); if (list_head == NULL) { return ENOMEM; } - memset(list_head, 0, list_size); - list_size = sizeof(global_freezable_status_t); proc_list_lock(); @@ -854,37 +893,92 @@ continue_eval: } } - buffer_size = list_size; + buffer_size = MIN(list_size, INT32_MAX); error = copyout(list_head, buffer, buffer_size); if (error == 0) { - *retval = buffer_size; + *retval = (int32_t) buffer_size; } else { *retval = 0; } list_size = sizeof(global_freezable_status_t) + (sizeof(proc_freezable_status_t) * MAX_FREEZABLE_PROCESSES); - kfree(list_head, list_size); + kheap_free(KHEAP_TEMP, list_head, list_size); MEMORYSTATUS_DEBUG(1, "memorystatus_freezer_get_status: returning %d (%lu - size)\n", error, (unsigned long)*list_size); return error; } +#endif /* DEVELOPMENT || DEBUG */ + +/* + * Get a list of all processes in the freezer band which are currently frozen. + * Used by powerlog to collect analytics on frozen process. + */ +static int +memorystatus_freezer_get_procs(user_addr_t buffer, size_t buffer_size, int32_t *retval) +{ + global_frozen_procs_t *frozen_procs = NULL; + uint32_t band = memorystatus_freeze_jetsam_band; + proc_t p; + uint32_t state; + int error; + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE == FALSE) { + return ENOTSUP; + } + if (buffer_size < sizeof(global_frozen_procs_t)) { + return EINVAL; + } + frozen_procs = kheap_alloc(KHEAP_TEMP, sizeof(global_frozen_procs_t), + Z_WAITOK | Z_ZERO); + if (frozen_procs == NULL) { + return ENOMEM; + } + + proc_list_lock(); + p = memorystatus_get_first_proc_locked(&band, FALSE); + while (p && frozen_procs->gfp_num_frozen < FREEZER_CONTROL_GET_PROCS_MAX_COUNT) { + state = p->p_memstat_state; + if (state & P_MEMSTAT_FROZEN) { + frozen_procs->gfp_procs[frozen_procs->gfp_num_frozen].fp_pid = p->p_pid; + strlcpy(frozen_procs->gfp_procs[frozen_procs->gfp_num_frozen].fp_name, + p->p_name, sizeof(proc_name_t)); + frozen_procs->gfp_num_frozen++; + } + p = memorystatus_get_next_proc_locked(&band, p, FALSE); + } + proc_list_unlock(); + + buffer_size = MIN(buffer_size, sizeof(global_frozen_procs_t)); + error = copyout(frozen_procs, buffer, buffer_size); + if (error == 0) { + *retval = (int32_t) buffer_size; + } else { + *retval = 0; + } + kheap_free(KHEAP_TEMP, frozen_procs, sizeof(global_frozen_procs_t)); + + return error; +} + int memorystatus_freezer_control(int32_t flags, user_addr_t buffer, size_t buffer_size, int32_t *retval) { int err = ENOTSUP; +#if DEVELOPMENT || DEBUG if (flags == FREEZER_CONTROL_GET_STATUS) { err = memorystatus_freezer_get_status(buffer, buffer_size, retval); } +#endif /* DEVELOPMENT || DEBUG */ + if (flags == FREEZER_CONTROL_GET_PROCS) { + err = memorystatus_freezer_get_procs(buffer, buffer_size, retval); + } return err; } -#endif /* DEVELOPMENT || DEBUG */ - extern void vm_swap_consider_defragmenting(int); extern boolean_t memorystatus_kill_elevated_process(uint32_t, os_reason_t, unsigned int, int, uint32_t *, uint64_t *); @@ -1117,8 +1211,10 @@ memorystatus_is_process_eligible_for_freeze(proc_t p) LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED); boolean_t should_freeze = FALSE; - uint32_t state = 0, entry_count = 0, pages = 0, i = 0; + uint32_t state = 0, pages = 0; int probability_of_use = 0; + size_t entry_count = 0, i = 0; + bool first_consideration = true; state = p->p_memstat_state; @@ -1195,13 +1291,23 @@ memorystatus_is_process_eligible_for_freeze(proc_t p) * This proc is a suspended application. * We're interested in tracking what percentage of these * actually get frozen. + * To avoid skewing the metrics towards processes which + * are considered more frequently, we only track failures once + * per process. */ - memorystatus_freezer_stats.mfs_process_considered_count++; + first_consideration = !(state & P_MEMSTAT_FREEZE_CONSIDERED); + + if (first_consideration) { + memorystatus_freezer_stats.mfs_process_considered_count++; + p->p_memstat_state |= P_MEMSTAT_FREEZE_CONSIDERED; + } /* Only freeze applications meeting our minimum resident page criteria */ memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL); if (pages < memorystatus_freeze_pages_min) { - memorystatus_freezer_stats.mfs_error_below_min_pages_count++; + if (first_consideration) { + memorystatus_freezer_stats.mfs_error_below_min_pages_count++; + } goto out; } @@ -1211,7 +1317,9 @@ memorystatus_is_process_eligible_for_freeze(proc_t p) * memorystatus_freeze_top_process holds the proc_list_lock while it traverses the bands. */ if ((p->p_listflag & P_LIST_EXITED) != 0) { - memorystatus_freezer_stats.mfs_error_other_count++; + if (first_consideration) { + memorystatus_freezer_stats.mfs_error_other_count++; + } goto out; } @@ -1228,13 +1336,23 @@ memorystatus_is_process_eligible_for_freeze(proc_t p) } if (probability_of_use == 0) { - memorystatus_freezer_stats.mfs_error_low_probability_of_use_count++; + if (first_consideration) { + memorystatus_freezer_stats.mfs_error_low_probability_of_use_count++; + } goto out; } } should_freeze = TRUE; out: + if (should_freeze && !first_consideration && !(state & P_MEMSTAT_FROZEN)) { + /* + * We're freezing this for the first time and we previously considered it ineligible. + * Bump the considered count so that we track this as 1 failure + * and 1 success. + */ + memorystatus_freezer_stats.mfs_process_considered_count++; + } return should_freeze; } @@ -1288,7 +1406,8 @@ memorystatus_freeze_process_sync(proc_t p) if (p != NULL) { uint32_t purgeable, wired, clean, dirty, shared; - uint32_t max_pages, i; + uint32_t i; + uint64_t max_pages; aPid = p->p_pid; @@ -1314,7 +1433,8 @@ memorystatus_freeze_process_sync(proc_t p) KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_START, memorystatus_available_pages, 0, 0, 0, 0); - ret = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, &freezer_error_code, FALSE /* eval only */); + max_pages = MIN(max_pages, UINT32_MAX); + ret = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, (uint32_t) max_pages, &shared, &freezer_error_code, FALSE /* eval only */); if (ret == KERN_SUCCESS || freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO) { memorystatus_freezer_stats.mfs_shared_pages_skipped += shared; } @@ -1341,6 +1461,12 @@ memorystatus_freeze_process_sync(proc_t p) if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) { p->p_memstat_state |= P_MEMSTAT_FROZEN; memorystatus_frozen_count++; + } else { + // This was a re-freeze + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { + memorystatus_freezer_stats.mfs_bytes_refrozen += dirty * PAGE_SIZE; + memorystatus_freezer_stats.mfs_refreeze_count++; + } } p->p_memstat_frozen_count++; @@ -1385,26 +1511,8 @@ memorystatus_freeze_process_sync(proc_t p) */ } } else { - char reason[128]; - if (freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) { - memorystatus_freezer_stats.mfs_error_excess_shared_memory_count++; - strlcpy(reason, "too much shared memory", 128); - } - - if (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO) { - memorystatus_freezer_stats.mfs_error_low_private_shared_ratio_count++; - strlcpy(reason, "low private-shared pages ratio", 128); - } - - if (freezer_error_code == FREEZER_ERROR_NO_COMPRESSOR_SPACE) { - memorystatus_freezer_stats.mfs_error_no_compressor_space_count++; - strlcpy(reason, "no compressor space", 128); - } - - if (freezer_error_code == FREEZER_ERROR_NO_SWAP_SPACE) { - memorystatus_freezer_stats.mfs_error_no_swap_space_count++; - strlcpy(reason, "no swap space", 128); - } + char reason[FREEZER_ERROR_STRING_LENGTH]; + memorystatus_freezer_stringify_error(freezer_error_code, reason, sizeof(reason)); os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (specific) pid %d [%s]...skipped (%s)", aPid, ((p && *p->p_name) ? p->p_name : "unknown"), reason); @@ -1460,7 +1568,7 @@ freeze_process: while (next_p) { kern_return_t kr; uint32_t purgeable, wired, clean, dirty, shared; - uint32_t max_pages = 0; + uint64_t max_pages = 0; int freezer_error_code = 0; p = next_p; @@ -1569,7 +1677,8 @@ freeze_process: KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_START, memorystatus_available_pages, 0, 0, 0, 0); - kr = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, &freezer_error_code, FALSE /* eval only */); + max_pages = MIN(max_pages, UINT32_MAX); + kr = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, (uint32_t) max_pages, &shared, &freezer_error_code, FALSE /* eval only */); if (kr == KERN_SUCCESS || freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO) { memorystatus_freezer_stats.mfs_shared_pages_skipped += shared; } @@ -1595,6 +1704,12 @@ freeze_process: if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) { p->p_memstat_state |= P_MEMSTAT_FROZEN; memorystatus_frozen_count++; + } else { + // This was a re-freeze + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { + memorystatus_freezer_stats.mfs_bytes_refrozen += dirty * PAGE_SIZE; + memorystatus_freezer_stats.mfs_refreeze_count++; + } } p->p_memstat_frozen_count++; @@ -1730,29 +1845,11 @@ freeze_process: p->p_memstat_state |= P_MEMSTAT_FREEZE_IGNORE; } - char reason[128]; - if (freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) { - memorystatus_freezer_stats.mfs_error_excess_shared_memory_count++; - strlcpy(reason, "too much shared memory", 128); - } - - if (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO) { - memorystatus_freezer_stats.mfs_error_low_private_shared_ratio_count++; - strlcpy(reason, "low private-shared pages ratio", 128); - } + char reason[FREEZER_ERROR_STRING_LENGTH]; + memorystatus_freezer_stringify_error(freezer_error_code, reason, sizeof(reason)); - if (freezer_error_code == FREEZER_ERROR_NO_COMPRESSOR_SPACE) { - memorystatus_freezer_stats.mfs_error_no_compressor_space_count++; - strlcpy(reason, "no compressor space", 128); - } - - if (freezer_error_code == FREEZER_ERROR_NO_SWAP_SPACE) { - memorystatus_freezer_stats.mfs_error_no_swap_space_count++; - strlcpy(reason, "no swap space", 128); - } - - os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (%s) pid %d [%s]...skipped (%s)\n", - (coal == NULL ? "general" : "coalition-driven"), aPid, ((p && *p->p_name) ? p->p_name : "unknown"), reason); + os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: %sfreezing (%s) pid %d [%s]...skipped (%s)\n", + refreeze_processes? "re" : "", (coal == NULL ? "general" : "coalition-driven"), aPid, ((p && *p->p_name) ? p->p_name : "unknown"), reason); proc_rele_locked(p); @@ -1995,17 +2092,17 @@ memorystatus_freeze_calculate_new_budget( unsigned int interval_duration_min, uint32_t rollover) { - uint64_t freeze_daily_budget = 0; - unsigned int daily_budget_pageouts = 0; - unsigned int freeze_daily_pageouts_max = 0; + uint64_t freeze_daily_budget = 0, freeze_daily_budget_mb = 0, daily_budget_pageouts = 0, budget_missed = 0, freeze_daily_pageouts_max = 0, new_budget = 0; const static unsigned int kNumSecondsInDay = 60 * 60 * 24; /* Precision factor for days_missed. 2 decimal points. */ const static unsigned int kFixedPointFactor = 100; - unsigned int days_missed, budget_missed; + unsigned int days_missed; /* Get the daily budget from the storage layer */ if (vm_swap_max_budget(&freeze_daily_budget)) { - memorystatus_freeze_daily_mb_max = (freeze_daily_budget / (1024 * 1024)); + freeze_daily_budget_mb = freeze_daily_budget / (1024 * 1024); + assert(freeze_daily_budget_mb <= UINT32_MAX); + memorystatus_freeze_daily_mb_max = (unsigned int) freeze_daily_budget_mb; os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: memorystatus_freeze_daily_mb_max set to %dMB\n", memorystatus_freeze_daily_mb_max); } /* Calculate the daily pageout budget */ @@ -2020,7 +2117,54 @@ memorystatus_freeze_calculate_new_budget( */ days_missed = time_since_last_interval_expired_sec * kFixedPointFactor / kNumSecondsInDay; budget_missed = days_missed * freeze_daily_pageouts_max / kFixedPointFactor; - return rollover + daily_budget_pageouts + budget_missed; + new_budget = rollover + daily_budget_pageouts + budget_missed; + return (uint32_t) MIN(new_budget, UINT32_MAX); +} + +static void +memorystatus_freezer_stringify_error( + const int freezer_error_code, + char* buffer, + size_t len) +{ + if (freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) { + memorystatus_freezer_stats.mfs_error_excess_shared_memory_count++; + strlcpy(buffer, "too much shared memory", len); + } else if (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO) { + memorystatus_freezer_stats.mfs_error_low_private_shared_ratio_count++; + strlcpy(buffer, "low private-shared pages ratio", len); + } else if (freezer_error_code == FREEZER_ERROR_NO_COMPRESSOR_SPACE) { + memorystatus_freezer_stats.mfs_error_no_compressor_space_count++; + strlcpy(buffer, "no compressor space", len); + } else if (freezer_error_code == FREEZER_ERROR_NO_SWAP_SPACE) { + memorystatus_freezer_stats.mfs_error_no_swap_space_count++; + strlcpy(buffer, "no swap space", len); + } else { + strlcpy(buffer, "unknown error", len); + } +} + +/* + * Start a new normal throttle interval with the given budget. + * Caller must hold the freezer mutex + */ +static void +memorystatus_freeze_start_normal_throttle_interval(uint32_t new_budget, mach_timespec_t start_ts) +{ + LCK_MTX_ASSERT(&freezer_mutex, LCK_MTX_ASSERT_OWNED); + + normal_throttle_window->max_pageouts = new_budget; + normal_throttle_window->ts.tv_sec = normal_throttle_window->mins * 60; + normal_throttle_window->ts.tv_nsec = 0; + ADD_MACH_TIMESPEC(&normal_throttle_window->ts, &start_ts); + /* Since we update the throttle stats pre-freeze, adjust for overshoot here */ + if (normal_throttle_window->pageouts > normal_throttle_window->max_pageouts) { + normal_throttle_window->pageouts -= normal_throttle_window->max_pageouts; + } else { + normal_throttle_window->pageouts = 0; + } + /* Ensure the normal window is now active. */ + memorystatus_freeze_degradation = FALSE; } #if DEVELOPMENT || DEBUG @@ -2075,6 +2219,7 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED); unsigned int freeze_daily_pageouts_max = 0; + uint32_t budget_rollover = 0; #if DEVELOPMENT || DEBUG if (!memorystatus_freeze_throttle_enabled) { @@ -2087,7 +2232,7 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) #endif clock_get_system_nanotime(&sec, &nsec); - now_ts.tv_sec = sec; + now_ts.tv_sec = (unsigned int)(MIN(sec, UINT32_MAX)); now_ts.tv_nsec = nsec; struct throttle_interval_t *interval = NULL; @@ -2096,7 +2241,6 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) interval = degraded_throttle_window; if (CMP_MACH_TIMESPEC(&now_ts, &interval->ts) >= 0) { - memorystatus_freeze_degradation = FALSE; interval->pageouts = 0; interval->max_pageouts = 0; } else { @@ -2110,19 +2254,14 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) /* How long has it been since the previous interval expired? */ mach_timespec_t expiration_period_ts = now_ts; SUB_MACH_TIMESPEC(&expiration_period_ts, &interval->ts); - - interval->max_pageouts = memorystatus_freeze_calculate_new_budget( - expiration_period_ts.tv_sec, interval->burst_multiple, - interval->mins, interval->max_pageouts - interval->pageouts); - interval->ts.tv_sec = interval->mins * 60; - interval->ts.tv_nsec = 0; - ADD_MACH_TIMESPEC(&interval->ts, &now_ts); - /* Since we update the throttle stats pre-freeze, adjust for overshoot here */ - if (interval->pageouts > interval->max_pageouts) { - interval->pageouts -= interval->max_pageouts; - } else { - interval->pageouts = 0; - } + /* Get unused budget. Clamp to 0. We'll adjust for overused budget in the next interval. */ + budget_rollover = interval->pageouts > interval->max_pageouts ? + 0 : interval->max_pageouts - interval->pageouts; + + memorystatus_freeze_start_normal_throttle_interval(memorystatus_freeze_calculate_new_budget( + expiration_period_ts.tv_sec, interval->burst_multiple, + interval->mins, budget_rollover), + now_ts); *budget_pages_allowed = interval->max_pageouts; memorystatus_freezer_stats.mfs_shared_pages_skipped = 0; @@ -2136,20 +2275,6 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) * - the current budget left is below our normal budget expectations. */ -#if DEVELOPMENT || DEBUG - /* - * This can only happen in the INTERNAL configs because we allow modifying the daily budget for testing. - */ - - if (freeze_daily_pageouts_max > interval->max_pageouts) { - /* - * We just bumped the daily budget. Re-evaluate our normal window params. - */ - interval->max_pageouts = (interval->burst_multiple * (((uint64_t)interval->mins * freeze_daily_pageouts_max) / NORMAL_WINDOW_MINS)); - memorystatus_freeze_degradation = FALSE; //we'll re-evaluate this below... - } -#endif /* DEVELOPMENT || DEBUG */ - if (memorystatus_freeze_degradation == FALSE) { if (interval->pageouts >= interval->max_pageouts) { *budget_pages_allowed = 0; diff --git a/bsd/kern/kern_memorystatus_notify.c b/bsd/kern/kern_memorystatus_notify.c index c5be3d0d8..83262fec4 100644 --- a/bsd/kern/kern_memorystatus_notify.c +++ b/bsd/kern/kern_memorystatus_notify.c @@ -29,7 +29,6 @@ #include #include -#include #include #include #include @@ -131,11 +130,11 @@ kern_return_t memorystatus_update_vm_pressure(boolean_t target_foreground_proces /* * This value is the threshold that a process must meet to be considered for scavenging. */ -#if CONFIG_EMBEDDED -#define VM_PRESSURE_MINIMUM_RSIZE 6 /* MB */ -#else /* CONFIG_EMBEDDED */ +#if XNU_TARGET_OS_OSX #define VM_PRESSURE_MINIMUM_RSIZE 10 /* MB */ -#endif /* CONFIG_EMBEDDED */ +#else /* XNU_TARGET_OS_OSX */ +#define VM_PRESSURE_MINIMUM_RSIZE 6 /* MB */ +#endif /* XNU_TARGET_OS_OSX */ static uint32_t vm_pressure_task_footprint_min = VM_PRESSURE_MINIMUM_RSIZE; @@ -283,7 +282,7 @@ filt_memorystatustouch(struct knote *kn, struct kevent_qos_s *kev) prev_kn_sfflags = kn->kn_sfflags; kn->kn_sfflags = (kev->fflags & EVFILT_MEMORYSTATUS_ALL_MASK); -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX /* * Only on desktop do we restrict notifications to * one per active/inactive state (soft limits only). @@ -337,7 +336,7 @@ filt_memorystatustouch(struct knote *kn, struct kevent_qos_s *kev) kn->kn_sfflags |= NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE; } } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ /* * reset the output flags based on a @@ -398,7 +397,7 @@ memorystatus_knote_register(struct knote *kn) * Support only userspace visible flags. */ if ((kn->kn_sfflags & EVFILT_MEMORYSTATUS_ALL_MASK) == (unsigned int) kn->kn_sfflags) { -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) { kn->kn_sfflags |= NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE; kn->kn_sfflags |= NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE; @@ -408,7 +407,7 @@ memorystatus_knote_register(struct knote *kn) kn->kn_sfflags |= NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE; kn->kn_sfflags |= NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ KNOTE_ATTACH(&memorystatus_klist, kn); } else { @@ -432,15 +431,15 @@ memorystatus_knote_unregister(struct knote *kn __unused) #if CONFIG_MEMORYSTATUS -int -memorystatus_send_note(int event_code, void *data, size_t data_length) +static inline int +memorystatus_send_note_internal(int event_code, int subclass, void *data, uint32_t data_length) { int ret; struct kev_msg ev_msg; ev_msg.vendor_code = KEV_VENDOR_APPLE; ev_msg.kev_class = KEV_SYSTEM_CLASS; - ev_msg.kev_subclass = KEV_MEMORYSTATUS_SUBCLASS; + ev_msg.kev_subclass = subclass; ev_msg.event_code = event_code; @@ -456,13 +455,32 @@ memorystatus_send_note(int event_code, void *data, size_t data_length) return ret; } +int +memorystatus_send_note(int event_code, void *data, uint32_t data_length) +{ + return memorystatus_send_note_internal(event_code, KEV_MEMORYSTATUS_SUBCLASS, data, data_length); +} + +int +memorystatus_send_dirty_status_change_note(void *data, uint32_t data_length) +{ + return memorystatus_send_note_internal(kDirtyStatusChangeNote, KEV_DIRTYSTATUS_SUBCLASS, data, data_length); +} + boolean_t -memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused boolean_t is_fatal, boolean_t limit_exceeded) +memorystatus_warn_process(const proc_t p, __unused boolean_t is_active, __unused boolean_t is_fatal, boolean_t limit_exceeded) { + /* + * This function doesn't take a reference to p or lock it. So it better be the current process. + */ + assert(p == current_proc()); + pid_t pid = p->p_pid; boolean_t ret = FALSE; boolean_t found_knote = FALSE; struct knote *kn = NULL; int send_knote_count = 0; + uint32_t platform; + platform = proc_platform(p); /* * See comment in sysctl_memorystatus_vm_pressure_send. @@ -484,133 +502,146 @@ memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused bool * filt_memorystatus(). */ -#if CONFIG_EMBEDDED - if (!limit_exceeded) { - /* - * Intentionally set either the unambiguous limit warning, - * the system-wide critical or the system-wide warning - * notification bit. - */ - - if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) { - kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; - found_knote = TRUE; - send_knote_count++; - } else if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PRESSURE_CRITICAL) { - kn->kn_fflags = NOTE_MEMORYSTATUS_PRESSURE_CRITICAL; - found_knote = TRUE; - send_knote_count++; - } else if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PRESSURE_WARN) { - kn->kn_fflags = NOTE_MEMORYSTATUS_PRESSURE_WARN; - found_knote = TRUE; - send_knote_count++; - } - } else { - /* - * Send this notification when a process has exceeded a soft limit. - */ - if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) { - kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL; - found_knote = TRUE; - send_knote_count++; - } - } -#else /* CONFIG_EMBEDDED */ - if (!limit_exceeded) { - /* - * Processes on desktop are not expecting to handle a system-wide - * critical or system-wide warning notification from this path. - * Intentionally set only the unambiguous limit warning here. - * - * If the limit is soft, however, limit this to one notification per - * active/inactive limit (per each registered listener). - */ - - if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) { - found_knote = TRUE; - if (!is_fatal) { - /* - * Restrict proc_limit_warn notifications when - * non-fatal (soft) limit is at play. - */ - if (is_active) { - if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE) { - /* - * Mark this knote for delivery. - */ - kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; - /* - * And suppress it from future notifications. - */ - kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE; - send_knote_count++; + /* + * The type of notification and the frequency are different between + * embedded and desktop. + * + * Embedded processes register for global pressure notifications + * (NOTE_MEMORYSTATUS_PRESSURE_WARN | NOTE_MEMORYSTATUS_PRESSURE_CRITICAL) via UIKit + * (see applicationDidReceiveMemoryWarning in UIKit). We'll warn them here if + * they are near there memory limit. filt_memorystatus() will warn them based + * on the system pressure level. + * + * On desktop, (NOTE_MEMORYSTATUS_PRESSURE_WARN | NOTE_MEMORYSTATUS_PRESSURE_CRITICAL) + * are only expected to fire for system level warnings. Desktop procesess + * register for NOTE_MEMORYSTATUS_PROC_LIMIT_WARN + * if they want to be warned when they approach their limit + * and for NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL to be warned when they + * exceed their limit. + * + * On embedded we continuously warn processes that are approaching their + * memory limit. However on desktop, we only send one warning while + * the process is active/inactive if the limit is soft.. + * + */ + if (platform == PLATFORM_MACOS || platform == PLATFORM_MACCATALYST || platform == PLATFORM_DRIVERKIT) { + if (!limit_exceeded) { + if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) { + found_knote = TRUE; + if (!is_fatal) { + /* + * Restrict proc_limit_warn notifications when + * non-fatal (soft) limit is at play. + */ + if (is_active) { + if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE) { + /* + * Mark this knote for delivery. + */ + kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; + /* + * And suppress it from future notifications. + */ + kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE; + send_knote_count++; + } + } else { + if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE) { + /* + * Mark this knote for delivery. + */ + kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; + /* + * And suppress it from future notifications. + */ + kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE; + send_knote_count++; + } } } else { - if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE) { - /* - * Mark this knote for delivery. - */ - kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; - /* - * And suppress it from future notifications. - */ - kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE; - send_knote_count++; - } + /* + * No restriction on proc_limit_warn notifications when + * fatal (hard) limit is at play. + */ + kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; + send_knote_count++; } - } else { - /* - * No restriction on proc_limit_warn notifications when - * fatal (hard) limit is at play. - */ - kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; - send_knote_count++; } - } - } else { - /* - * Send this notification when a process has exceeded a soft limit, - */ - - if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) { - found_knote = TRUE; - if (!is_fatal) { - /* - * Restrict critical notifications for soft limits. - */ - - if (is_active) { - if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE) { - /* - * Suppress future proc_limit_critical notifications - * for the active soft limit. - */ - kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE; - kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL; - send_knote_count++; + } else { + /* + * Send this notification when a process has exceeded a soft limit, + */ + + if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) { + found_knote = TRUE; + if (!is_fatal) { + /* + * Restrict critical notifications for soft limits. + */ + + if (is_active) { + if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE) { + /* + * Suppress future proc_limit_critical notifications + * for the active soft limit. + */ + kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE; + kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL; + send_knote_count++; + } + } else { + if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE) { + /* + * Suppress future proc_limit_critical_notifications + * for the inactive soft limit. + */ + kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE; + kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL; + send_knote_count++; + } } } else { - if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE) { - /* - * Suppress future proc_limit_critical_notifications - * for the inactive soft limit. - */ - kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE; - kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL; - send_knote_count++; - } + /* + * We should never be trying to send a critical notification for + * a hard limit... the process would be killed before it could be + * received. + */ + panic("Caught sending pid %d a critical warning for a fatal limit.\n", pid); } - } else { - /* - * We should never be trying to send a critical notification for - * a hard limit... the process would be killed before it could be - * received. - */ - panic("Caught sending pid %d a critical warning for a fatal limit.\n", pid); + } + } + } else { + if (!limit_exceeded) { + /* + * Intentionally set either the unambiguous limit warning, + * the system-wide critical or the system-wide warning + * notification bit. + */ + + if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) { + kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; + found_knote = TRUE; + send_knote_count++; + } else if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PRESSURE_CRITICAL) { + kn->kn_fflags = NOTE_MEMORYSTATUS_PRESSURE_CRITICAL; + found_knote = TRUE; + send_knote_count++; + } else if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PRESSURE_WARN) { + kn->kn_fflags = NOTE_MEMORYSTATUS_PRESSURE_WARN; + found_knote = TRUE; + send_knote_count++; + } + } else { + /* + * Send this notification when a process has exceeded a soft limit. + */ + if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) { + kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL; + found_knote = TRUE; + send_knote_count++; } } } -#endif /* CONFIG_EMBEDDED */ } } @@ -810,8 +841,6 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int { struct knote *kn = NULL, *kn_max = NULL; uint64_t resident_max = 0;/* MB */ - struct timeval curr_tstamp = {0, 0}; - int elapsed_msecs = 0; int selected_task_importance = 0; static int pressure_snapshot = -1; boolean_t pressure_increase = FALSE; @@ -846,8 +875,6 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int selected_task_importance = 0; } - microuptime(&curr_tstamp); - SLIST_FOREACH(kn, candidate_list, kn_selnext) { uint64_t resident_size = 0;/* MB */ proc_t p = PROC_NULL; @@ -877,9 +904,6 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int t = (struct task *)(p->task); - timevalsub(&curr_tstamp, &p->vm_pressure_last_notify_tstamp); - elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000; - vm_pressure_level_t dispatch_level = convert_internal_pressure_level_to_dispatch_level(level); if ((kn->kn_sfflags & dispatch_level) == 0) { @@ -895,11 +919,11 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int } #endif /* CONFIG_MEMORYSTATUS */ -#if CONFIG_EMBEDDED - curr_task_importance = p->p_memstat_effectivepriority; -#else /* CONFIG_EMBEDDED */ +#if XNU_TARGET_OS_OSX curr_task_importance = task_importance_estimate(t); -#endif /* CONFIG_EMBEDDED */ +#else /* XNU_TARGET_OS_OSX */ + curr_task_importance = p->p_memstat_effectivepriority; +#endif /* XNU_TARGET_OS_OSX */ /* * Privileged listeners are only considered in the multi-level pressure scheme @@ -1022,7 +1046,7 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) boolean_t smoothing_window_started = FALSE; struct timeval smoothing_window_start_tstamp = {0, 0}; struct timeval curr_tstamp = {0, 0}; - int elapsed_msecs = 0; + int64_t elapsed_msecs = 0; uint64_t curr_ts = mach_absolute_time(); #if !CONFIG_JETSAM @@ -1321,7 +1345,7 @@ static int sysctl_memorystatus_vm_pressure_level SYSCTL_HANDLER_ARGS { #pragma unused(arg1, arg2, oidp) -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX int error = 0; error = priv_check_cred(kauth_cred_get(), PRIV_VM_PRESSURE, 0); @@ -1329,7 +1353,7 @@ sysctl_memorystatus_vm_pressure_level SYSCTL_HANDLER_ARGS return error; } -#endif /* CONFIG_EMBEDDED */ +#endif /* !XNU_TARGET_OS_OSX */ uint32_t dispatch_level = convert_internal_pressure_level_to_dispatch_level(memorystatus_vm_pressure_level); return SYSCTL_OUT(req, &dispatch_level, sizeof(dispatch_level)); diff --git a/bsd/kern/kern_mib.c b/bsd/kern/kern_mib.c index c09b4217b..85ffba28d 100644 --- a/bsd/kern/kern_mib.c +++ b/bsd/kern/kern_mib.c @@ -102,6 +102,9 @@ #include #include #include +#include +#include +#include extern vm_map_t bsd_pageable_map; @@ -111,6 +114,7 @@ extern vm_map_t bsd_pageable_map; #include #include +#include #include #include @@ -132,12 +136,14 @@ extern vm_map_t bsd_pageable_map; /* XXX This should be in a BSD accessible Mach header, but isn't. */ extern unsigned int vm_page_wire_count; -static int cputype, cpusubtype, cputhreadtype, cpufamily, cpu64bit; +static int cputhreadtype, cpu64bit; static uint64_t cacheconfig[10], cachesize[10]; static int packages; static char * osenvironment; static uint32_t osenvironment_size = 0; +static int osenvironment_initialized = 0; + static uint32_t ephemeral_storage = 0; static uint32_t use_recovery_securityd = 0; @@ -182,6 +188,10 @@ SYSCTL_NODE(_kern, OID_AUTO, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0, #define HW_LOCAL_PHYSICALCPUMAX (3 | CTLHW_LOCAL) #define HW_LOCAL_LOGICALCPU (4 | CTLHW_LOCAL) #define HW_LOCAL_LOGICALCPUMAX (5 | CTLHW_LOCAL) +#define HW_LOCAL_CPUTYPE (6 | CTLHW_LOCAL) +#define HW_LOCAL_CPUSUBTYPE (7 | CTLHW_LOCAL) +#define HW_LOCAL_CPUFAMILY (8 | CTLHW_LOCAL) +#define HW_LOCAL_CPUSUBFAMILY (9 | CTLHW_LOCAL) /* @@ -256,6 +266,46 @@ sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1, } else { return EINVAL; } + case HW_LOCAL_CPUTYPE: + if (kret == KERN_SUCCESS) { + return SYSCTL_RETURN(req, hinfo.cpu_type); + } else { + return EINVAL; + } + case HW_LOCAL_CPUSUBTYPE: + if (kret == KERN_SUCCESS) { + return SYSCTL_RETURN(req, hinfo.cpu_subtype); + } else { + return EINVAL; + } + case HW_LOCAL_CPUFAMILY: + { + int cpufamily = 0; +#if defined (__i386__) || defined (__x86_64__) + cpufamily = cpuid_cpufamily(); +#elif defined(__arm__) || defined(__arm64__) + { + cpufamily = cpuid_get_cpufamily(); + } +#else +#error unknown architecture +#endif + return SYSCTL_RETURN(req, cpufamily); + } + case HW_LOCAL_CPUSUBFAMILY: + { + int cpusubfamily = 0; +#if defined (__i386__) || defined (__x86_64__) + cpusubfamily = CPUSUBFAMILY_UNKNOWN; +#elif defined(__arm__) || defined(__arm64__) + { + cpusubfamily = cpuid_get_cpusubfamily(); + } +#else +#error unknown architecture +#endif + return SYSCTL_RETURN(req, cpusubfamily); + } case HW_PAGESIZE: { vm_map_t map = get_task_map(current_task()); @@ -264,36 +314,83 @@ sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1, break; } case HW_CACHELINE: - val = cpu_info.cache_line_size; + val = (int)cpu_info.cache_line_size; qval = (long long)val; break; case HW_L1ICACHESIZE: - val = cpu_info.l1_icache_size; - qval = (long long)val; + val = (int)cpu_info.l1_icache_size; + qval = (long long)cpu_info.l1_icache_size; break; case HW_L1DCACHESIZE: - val = cpu_info.l1_dcache_size; - qval = (long long)val; + val = (int)cpu_info.l1_dcache_size; + qval = (long long)cpu_info.l1_dcache_size; break; case HW_L2CACHESIZE: - if (cpu_info.l2_cache_size == 0xFFFFFFFF) { + if (cpu_info.l2_cache_size == UINT32_MAX) { return EINVAL; } - val = cpu_info.l2_cache_size; - qval = (long long)val; + val = (int)cpu_info.l2_cache_size; + qval = (long long)cpu_info.l2_cache_size; break; case HW_L3CACHESIZE: - if (cpu_info.l3_cache_size == 0xFFFFFFFF) { + if (cpu_info.l3_cache_size == UINT32_MAX) { return EINVAL; } - val = cpu_info.l3_cache_size; - qval = (long long)val; + val = (int)cpu_info.l3_cache_size; + qval = (long long)cpu_info.l3_cache_size; break; + case HW_TARGET: + bzero(dummy, sizeof(dummy)); + if (!PEGetTargetName(dummy, 64)) { + return EINVAL; + } + dummy[64] = 0; + return SYSCTL_OUT(req, dummy, strlen(dummy) + 1); + case HW_PRODUCT: + bzero(dummy, sizeof(dummy)); + if (!PEGetProductName(dummy, 64)) { + return EINVAL; + } + dummy[64] = 0; + return SYSCTL_OUT(req, dummy, strlen(dummy) + 1); + + /* + * Deprecated variables. We still support these for + * backwards compatibility purposes only. + */ +#if XNU_TARGET_OS_OSX && defined(__arm64__) + /* The following two are kludged for backward + * compatibility. Use hw.product/hw.target for something + * consistent instead. */ - /* - * Deprecated variables. We still support these for - * backwards compatibility purposes only. - */ + case HW_MACHINE: + bzero(dummy, sizeof(dummy)); + if (proc_platform(req->p) == PLATFORM_IOS) { + /* iOS-on-Mac processes don't expect the macOS kind of + * hw.machine, e.g. "arm64", but are used to seeing + * a product string on iOS, which we here hardcode + * to return as "iPad8,6" for compatibility. + * + * Another reason why hw.machine and hw.model are + * trouble and hw.target+hw.product should be used + * instead. + */ + + strlcpy(dummy, "iPad8,6", sizeof(dummy)); + } + else { + strlcpy(dummy, "arm64", sizeof(dummy)); + } + dummy[64] = 0; + return SYSCTL_OUT(req, dummy, strlen(dummy) + 1); + case HW_MODEL: + bzero(dummy, sizeof(dummy)); + if (!PEGetProductName(dummy, 64)) { + return EINVAL; + } + dummy[64] = 0; + return SYSCTL_OUT(req, dummy, strlen(dummy) + 1); +#else case HW_MACHINE: bzero(dummy, sizeof(dummy)); if (!PEGetMachineName(dummy, 64)) { @@ -308,9 +405,10 @@ sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1, } dummy[64] = 0; return SYSCTL_OUT(req, dummy, strlen(dummy) + 1); +#endif case HW_USERMEM: { - int usermem = mem_size - vm_page_wire_count * page_size; + int usermem = (int)(mem_size - vm_page_wire_count * page_size); return SYSCTL_RETURN(req, usermem); } @@ -325,12 +423,12 @@ sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1, return SYSCTL_RETURN(req, vector); } case HW_L2SETTINGS: - if (cpu_info.l2_cache_size == 0xFFFFFFFF) { + if (cpu_info.l2_cache_size == UINT32_MAX) { return EINVAL; } return SYSCTL_RETURN(req, cpu_info.l2_settings); case HW_L3SETTINGS: - if (cpu_info.l3_cache_size == 0xFFFFFFFF) { + if (cpu_info.l3_cache_size == UINT32_MAX) { return EINVAL; } return SYSCTL_RETURN(req, cpu_info.l3_settings); @@ -377,10 +475,29 @@ sysctl_tbfrequency return sysctl_io_number(req, l, sizeof(l), NULL, NULL); } +void +sysctl_set_osenvironment(unsigned int size, const void* value) +{ + if (osenvironment_size == 0 && size > 0) { + MALLOC(osenvironment, char *, size, M_TEMP, M_WAITOK); + if (osenvironment) { + memcpy(osenvironment, value, size); + osenvironment_size = size; + } + } +} + +void +sysctl_unblock_osenvironment(void) +{ + os_atomic_inc(&osenvironment_initialized, relaxed); + thread_wakeup((event_t) &osenvironment_initialized); +} + /* * Create sysctl entries coming from device tree. * - * Entries from device tree are loaded here because DTLookupEntry() only works before + * Entries from device tree are loaded here because SecureDTLookupEntry() only works before * PE_init_iokit(). Doing this also avoids the extern-C hackery to access these entries * from IORegistry (which requires C++). */ @@ -388,15 +505,15 @@ void sysctl_load_devicetree_entries(void) { DTEntry chosen; - void *value; + void const *value; unsigned int size; - if (kSuccess != DTLookupEntry(0, "/chosen", &chosen)) { + if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) { return; } /* load osenvironment */ - if (kSuccess == DTGetProperty(chosen, "osenvironment", (void **) &value, &size)) { + if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &value, &size)) { MALLOC(osenvironment, char *, size, M_TEMP, M_WAITOK); if (osenvironment) { memcpy(osenvironment, value, size); @@ -405,17 +522,17 @@ sysctl_load_devicetree_entries(void) } /* load ephemeral_storage */ - if (kSuccess == DTGetProperty(chosen, "ephemeral-storage", (void **) &value, &size)) { + if (kSuccess == SecureDTGetProperty(chosen, "ephemeral-storage", (void const **) &value, &size)) { if (size == sizeof(uint32_t)) { - ephemeral_storage = *(uint32_t *)value; + ephemeral_storage = *(uint32_t const *)value; property_existence.ephemeral_storage = 1; } } /* load use_recovery_securityd */ - if (kSuccess == DTGetProperty(chosen, "use-recovery-securityd", (void **) &value, &size)) { + if (kSuccess == SecureDTGetProperty(chosen, "use-recovery-securityd", (void const **) &value, &size)) { if (size == sizeof(uint32_t)) { - use_recovery_securityd = *(uint32_t *)value; + use_recovery_securityd = *(uint32_t const *)value; property_existence.use_recovery_securityd = 1; } } @@ -425,6 +542,18 @@ static int sysctl_osenvironment (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { +#if defined(__x86_64__) +#if (DEVELOPMENT || DEBUG) + if (os_atomic_load(&osenvironment_initialized, relaxed) == 0) { + assert_wait((event_t) &osenvironment_initialized, THREAD_UNINT); + if (os_atomic_load(&osenvironment_initialized, relaxed) != 0) { + clear_wait(current_thread(), THREAD_AWAKENED); + } else { + (void) thread_block(THREAD_CONTINUE_NULL); + } + } +#endif +#endif if (osenvironment_size > 0) { return SYSCTL_OUT(req, osenvironment, osenvironment_size); } else { @@ -454,6 +583,102 @@ sysctl_use_recovery_securityd } } +static int +sysctl_use_kernelmanagerd +(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ +#if CONFIG_ARROW + static int use_kernelmanagerd = 1; +#else + static int use_kernelmanagerd = 0; +#endif + static bool once = false; + + if (!once) { + kc_format_t kc_format; + PE_get_primary_kc_format(&kc_format); + if (kc_format == KCFormatFileset) { + use_kernelmanagerd = 1; + } else { + PE_parse_boot_argn("kernelmanagerd", &use_kernelmanagerd, sizeof(use_kernelmanagerd)); + } + once = true; + } + return SYSCTL_OUT(req, &use_kernelmanagerd, sizeof(use_kernelmanagerd)); +} + +#define HW_LOCAL_FREQUENCY 1 +#define HW_LOCAL_FREQUENCY_MIN 2 +#define HW_LOCAL_FREQUENCY_MAX 3 +#define HW_LOCAL_FREQUENCY_CLOCK_RATE 4 + +static int +sysctl_bus_frequency +(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req) +{ + +#if DEBUG || DEVELOPMENT || (!defined(__arm__) && !defined(__arm64__)) + switch (arg2) { + case HW_LOCAL_FREQUENCY: + return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_hz); + case HW_LOCAL_FREQUENCY_MIN: + return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_min_hz); + case HW_LOCAL_FREQUENCY_MAX: + return SYSCTL_RETURN(req, gPEClockFrequencyInfo.bus_frequency_max_hz); + case HW_LOCAL_FREQUENCY_CLOCK_RATE: + return SYSCTL_OUT(req, &gPEClockFrequencyInfo.bus_clock_rate_hz, sizeof(int)); + default: + return EINVAL; + } +#else + return ENOENT; +#endif +} + +static int +sysctl_cpu_frequency +(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req) +{ + +#if DEBUG || DEVELOPMENT || (!defined(__arm__) && !defined(__arm64__)) + switch (arg2) { + case HW_LOCAL_FREQUENCY: + return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_hz); + case HW_LOCAL_FREQUENCY_MIN: + return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_min_hz); + case HW_LOCAL_FREQUENCY_MAX: + return SYSCTL_RETURN(req, gPEClockFrequencyInfo.cpu_frequency_max_hz); + case HW_LOCAL_FREQUENCY_CLOCK_RATE: + return SYSCTL_OUT(req, &gPEClockFrequencyInfo.cpu_clock_rate_hz, sizeof(int)); + default: + return EINVAL; + } +#else + return ENOENT; +#endif +} + +/* + * This sysctl will signal to userspace that a serial console is desired: + * + * hw.serialdebugmode = 1 will load the serial console job in the multi-user session; + * hw.serialdebugmode = 2 will load the serial console job in the base system as well + */ +static int +sysctl_serialdebugmode +(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + uint32_t serial_boot_arg; + int serialdebugmode = 0; + + if (PE_parse_boot_argn("serial", &serial_boot_arg, sizeof(serial_boot_arg)) && + (serial_boot_arg & SERIALMODE_OUTPUT) && (serial_boot_arg & SERIALMODE_INPUT)) { + serialdebugmode = (serial_boot_arg & SERIALMODE_BASE_TTY) ? 2 : 1; + } + + return sysctl_io_number(req, serialdebugmode, sizeof(serialdebugmode), NULL, NULL); +} + /* * hw.* MIB variables. */ @@ -464,22 +689,21 @@ SYSCTL_PROC(_hw, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_K SYSCTL_PROC(_hw, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPU, sysctl_hw_generic, "I", ""); SYSCTL_PROC(_hw, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPUMAX, sysctl_hw_generic, "I", ""); SYSCTL_INT(_hw, HW_BYTEORDER, byteorder, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, BYTE_ORDER, ""); -SYSCTL_INT(_hw, OID_AUTO, cputype, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputype, 0, ""); -SYSCTL_INT(_hw, OID_AUTO, cpusubtype, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpusubtype, 0, ""); +SYSCTL_PROC(_hw, OID_AUTO, cputype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUTYPE, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, OID_AUTO, cpusubtype, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBTYPE, sysctl_hw_generic, "I", ""); SYSCTL_INT(_hw, OID_AUTO, cpu64bit_capable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpu64bit, 0, ""); -SYSCTL_INT(_hw, OID_AUTO, cpufamily, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpufamily, 0, ""); +SYSCTL_PROC(_hw, OID_AUTO, cpufamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUFAMILY, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, OID_AUTO, cpusubfamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_CPUSUBFAMILY, sysctl_hw_generic, "I", ""); SYSCTL_OPAQUE(_hw, OID_AUTO, cacheconfig, CTLFLAG_RD | CTLFLAG_LOCKED, &cacheconfig, sizeof(cacheconfig), "Q", ""); SYSCTL_OPAQUE(_hw, OID_AUTO, cachesize, CTLFLAG_RD | CTLFLAG_LOCKED, &cachesize, sizeof(cachesize), "Q", ""); SYSCTL_PROC(_hw, OID_AUTO, pagesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize, "Q", ""); SYSCTL_PROC(_hw, OID_AUTO, pagesize32, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize32, "Q", ""); -#if DEBUG || DEVELOPMENT || (!defined(__arm__) && !defined(__arm64__)) -SYSCTL_QUAD(_hw, OID_AUTO, busfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_frequency_hz, ""); -SYSCTL_QUAD(_hw, OID_AUTO, busfrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_frequency_min_hz, ""); -SYSCTL_QUAD(_hw, OID_AUTO, busfrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_frequency_max_hz, ""); -SYSCTL_QUAD(_hw, OID_AUTO, cpufrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_frequency_hz, ""); -SYSCTL_QUAD(_hw, OID_AUTO, cpufrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_frequency_min_hz, ""); -SYSCTL_QUAD(_hw, OID_AUTO, cpufrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_frequency_max_hz, ""); -#endif +SYSCTL_PROC(_hw, OID_AUTO, busfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_bus_frequency, "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, busfrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_bus_frequency, "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, busfrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_bus_frequency, "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, cpufrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY, sysctl_cpu_frequency, "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_min, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MIN, sysctl_cpu_frequency, "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, cpufrequency_max, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_MAX, sysctl_cpu_frequency, "Q", ""); SYSCTL_PROC(_hw, OID_AUTO, cachelinesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_CACHELINE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); SYSCTL_PROC(_hw, OID_AUTO, l1icachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); SYSCTL_PROC(_hw, OID_AUTO, l1dcachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); @@ -495,19 +719,24 @@ SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG SYSCTL_QUAD(_hw, OID_AUTO, fixfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.fix_frequency_hz, ""); #endif /* __arm__ || __arm64__ */ SYSCTL_PROC(_hw, OID_AUTO, tbfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_tbfrequency, "Q", ""); +#if XNU_TARGET_OS_OSX +SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem_actual, ""); +#else SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, ""); +#endif /* XNU_TARGET_OS_OSX */ SYSCTL_INT(_hw, OID_AUTO, packages, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &packages, 0, ""); SYSCTL_PROC(_hw, OID_AUTO, osenvironment, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_osenvironment, "A", ""); SYSCTL_PROC(_hw, OID_AUTO, ephemeral_storage, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_ephemeral_storage, "I", ""); SYSCTL_PROC(_hw, OID_AUTO, use_recovery_securityd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_recovery_securityd, "I", ""); +SYSCTL_PROC(_hw, OID_AUTO, use_kernelmanagerd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_use_kernelmanagerd, "I", ""); +SYSCTL_PROC(_hw, OID_AUTO, serialdebugmode, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_serialdebugmode, "I", ""); /* * Optional CPU features can register nodes below hw.optional. * * If the feature is not present, the node should either not be registered, - * or it should return -1. If the feature is present, the node should return - * 0. If the feature is present and its use is advised, the node should - * return 1. + * or it should return 0. If the feature is present, the node should return + * 1. */ SYSCTL_NODE(_hw, OID_AUTO, optional, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features"); @@ -530,11 +759,10 @@ SYSCTL_NODE(_hw, OID_AUTO, features, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "hardwar * * The *_compat nodes are *NOT* visible within the kernel. */ + SYSCTL_PROC(_hw, HW_PAGESIZE, pagesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PAGESIZE, sysctl_hw_generic, "I", ""); -#if DEBUG || DEVELOPMENT || (!defined(__arm__) && !defined(__arm64__)) -SYSCTL_COMPAT_INT(_hw, HW_BUS_FREQ, busfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_clock_rate_hz, 0, ""); -SYSCTL_COMPAT_INT(_hw, HW_CPU_FREQ, cpufrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_clock_rate_hz, 0, ""); -#endif +SYSCTL_PROC(_hw, HW_BUS_FREQ, busfrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_bus_frequency, "I", ""); +SYSCTL_PROC(_hw, HW_CPU_FREQ, cpufrequency_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_LOCAL_FREQUENCY_CLOCK_RATE, sysctl_cpu_frequency, "I", ""); SYSCTL_PROC(_hw, HW_CACHELINE, cachelinesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_CACHELINE, sysctl_hw_generic, "I", ""); SYSCTL_PROC(_hw, HW_L1ICACHESIZE, l1icachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE, sysctl_hw_generic, "I", ""); SYSCTL_PROC(_hw, HW_L1DCACHESIZE, l1dcachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE, sysctl_hw_generic, "I", ""); @@ -543,6 +771,8 @@ SYSCTL_PROC(_hw, HW_L3CACHESIZE, l3cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | SYSCTL_COMPAT_INT(_hw, HW_TB_FREQ, tbfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.timebase_frequency_hz, 0, ""); SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MACHINE, sysctl_hw_generic, "A", ""); SYSCTL_PROC(_hw, HW_MODEL, model, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MODEL, sysctl_hw_generic, "A", ""); +SYSCTL_PROC(_hw, HW_TARGET, target, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_TARGET, sysctl_hw_generic, "A", ""); +SYSCTL_PROC(_hw, HW_PRODUCT, product, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PRODUCT, sysctl_hw_generic, "A", ""); SYSCTL_COMPAT_UINT(_hw, HW_PHYSMEM, physmem, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &mem_size, 0, ""); SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_USERMEM, sysctl_hw_generic, "I", ""); SYSCTL_PROC(_hw, HW_EPOCH, epoch, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_EPOCH, sysctl_hw_generic, "I", ""); @@ -551,64 +781,74 @@ SYSCTL_PROC(_hw, HW_L2SETTINGS, l2settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_M SYSCTL_PROC(_hw, HW_L3SETTINGS, l3settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3SETTINGS, sysctl_hw_generic, "I", ""); SYSCTL_INT(_hw, OID_AUTO, cputhreadtype, CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputhreadtype, 0, ""); -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || defined(__x86_64__) || CONFIG_X86_64_COMPAT static int sysctl_cpu_capability (__unused struct sysctl_oid *oidp, void *arg1, __unused int arg2, struct sysctl_req *req) { + uint64_t caps; + caps = _get_cpu_capabilities(); + uint64_t mask = (uint64_t) (uintptr_t) arg1; - boolean_t is_capable = (_get_cpu_capabilities() & mask) != 0; + boolean_t is_capable = (caps & mask) != 0; return SYSCTL_OUT(req, &is_capable, sizeof(is_capable)); } +#define capability(name) name -SYSCTL_PROC(_hw_optional, OID_AUTO, mmx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasMMX, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sse, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sse2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE2, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE3, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, supplementalsse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSupplementalSSE3, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE4_1, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE4_2, 0, sysctl_cpu_capability, "I", ""); + +SYSCTL_PROC(_hw_optional, OID_AUTO, mmx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMMX), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sse, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sse2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE2), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE3), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, supplementalsse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSupplementalSSE3), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_1), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSSE4_2), 0, sysctl_cpu_capability, "I", ""); /* "x86_64" is actually a preprocessor symbol on the x86_64 kernel, so we have to hack this */ #undef x86_64 -SYSCTL_PROC(_hw_optional, OID_AUTO, x86_64, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) k64Bit, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, aes, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAES, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx1_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX1_0, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, rdrand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasRDRAND, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, f16c, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasF16C, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, enfstrg, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasENFSTRG, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, fma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasFMA, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx2_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX2_0, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, bmi1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasBMI1, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasBMI2, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasRTM, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasHLE, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasADX, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasMPX, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSGX, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512f, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512F, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512cd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512CD, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512dq, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512DQ, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512bw, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512BW, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vl, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512VL, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512ifma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512IFMA, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vbmi, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512VBMI, 0, sysctl_cpu_capability, "I", ""); -#elif defined (__arm__) || defined (__arm64__) -int watchpoint_flag = -1; -int breakpoint_flag = -1; -int gNeon = -1; -int gNeonHpfp = -1; -int gNeonFp16 = -1; +SYSCTL_PROC(_hw_optional, OID_AUTO, x86_64, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(k64Bit), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, aes, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAES), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx1_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX1_0), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, rdrand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRDRAND), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, f16c, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasF16C), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, enfstrg, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasENFSTRG), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, fma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasFMA), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx2_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX2_0), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, bmi1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI1), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasBMI2), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasRTM), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasHLE), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasADX), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasMPX), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasSGX), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512f, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512F), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512cd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512CD), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512dq, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512DQ), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512bw, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512BW), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vl, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VL), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512ifma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512IFMA), 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vbmi, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) capability(kHasAVX512VBMI), 0, sysctl_cpu_capability, "I", ""); +#undef capability +#endif /* !__i386__ && !__x86_64 && !CONFIG_X86_64_COMPAT */ + +#if defined (__arm__) || defined (__arm64__) +int watchpoint_flag = 0; +int breakpoint_flag = 0; +int gNeon = 0; +int gNeonHpfp = 0; +int gNeonFp16 = 0; int gARMv81Atomics = 0; int gARMv8Crc32 = 0; int gARMv82FHM = 0; +int gARMv82SHA512 = 0; +int gARMv82SHA3 = 0; #if defined (__arm__) int arm64_flag = 0; #elif defined (__arm64__) /* end __arm__*/ int arm64_flag = 1; #else /* end __arm64__*/ -int arm64_flag = -1; +int arm64_flag = 0; #endif SYSCTL_INT(_hw_optional, OID_AUTO, watchpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &watchpoint_flag, 0, ""); @@ -619,6 +859,35 @@ SYSCTL_INT(_hw_optional, OID_AUTO, neon_fp16, CTLFLAG_RD | CTLFLAG_KERN | CTLFLA SYSCTL_INT(_hw_optional, OID_AUTO, armv8_1_atomics, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv81Atomics, 0, ""); SYSCTL_INT(_hw_optional, OID_AUTO, armv8_crc32, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv8Crc32, 0, ""); SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_fhm, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv82FHM, 0, ""); +SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_sha512, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv82SHA512, 0, ""); +SYSCTL_INT(_hw_optional, OID_AUTO, armv8_2_sha3, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gARMv82SHA3, 0, ""); + +#if DEBUG || DEVELOPMENT +#if __ARM_KERNEL_PROTECT__ +static int arm_kernel_protect = 1; +#else +static int arm_kernel_protect = 0; +#endif +SYSCTL_INT(_hw_optional, OID_AUTO, arm_kernel_protect, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm_kernel_protect, 0, ""); +#endif + +#if DEBUG || DEVELOPMENT +#if __ARM_WKDM_POPCNT__ +static int wkdm_popcount = 1; +#else +static int wkdm_popcount = 0; +#endif +SYSCTL_INT(_hw_optional, OID_AUTO, wkdm_popcount, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &wkdm_popcount, 0, ""); +#endif + +#if DEBUG || DEVELOPMENT +#if __has_feature(ptrauth_calls) +static int ptrauth = 1; +#else +static int ptrauth = 0; +#endif +SYSCTL_INT(_hw_optional, OID_AUTO, ptrauth, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &ptrauth, 0, ""); +#endif /* * Without this little ifdef dance, the preprocessor replaces "arm64" with "1", @@ -631,10 +900,7 @@ SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LO #else SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &arm64_flag, 0, ""); #endif - -#else -#error Unsupported arch -#endif /* !__i386__ && !__x86_64 && !__arm__ && ! __arm64__ */ +#endif /* !__arm__ && ! __arm64__ */ /****************************************************************************** @@ -646,13 +912,11 @@ SYSCTL_INT(_hw_optional, OID_AUTO, arm64, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LO void sysctl_mib_init(void) { - cputype = cpu_type(); - cpusubtype = cpu_subtype(); cputhreadtype = cpu_threadtype(); #if defined(__i386__) || defined (__x86_64__) cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit; #elif defined(__arm__) || defined (__arm64__) - cpu64bit = (cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64; + cpu64bit = (cpu_type() & CPU_ARCH_ABI64) == CPU_ARCH_ABI64; #else #error Unsupported arch #endif @@ -670,9 +934,6 @@ sysctl_mib_init(void) } #if defined (__i386__) || defined (__x86_64__) - /* hw.cpufamily */ - cpufamily = cpuid_cpufamily(); - /* hw.cacheconfig */ cacheconfig[0] = ml_cpu_cache_sharing(0); cacheconfig[1] = ml_cpu_cache_sharing(1); @@ -688,13 +949,10 @@ sysctl_mib_init(void) cachesize[4] = 0; /* hw.packages */ - packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count) - / cpuid_info()->thread_count; + packages = (int)(roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count) + / cpuid_info()->thread_count); #elif defined(__arm__) || defined(__arm64__) /* end __i386 */ - - cpufamily = cpuid_get_cpufamily(); - watchpoint_flag = arm_debug_info()->num_watchpoint_pairs; breakpoint_flag = arm_debug_info()->num_breakpoint_pairs; @@ -703,7 +961,7 @@ sysctl_mib_init(void) gNeonHpfp = mvfp_info->neon_hpfp; gNeonFp16 = mvfp_info->neon_fp16; - cacheconfig[0] = ml_get_max_cpus(); + cacheconfig[0] = ml_wait_max_cpus(); cacheconfig[1] = 1; cacheconfig[2] = cache_info()->c_l2size ? 1:0; cacheconfig[3] = 0; diff --git a/bsd/kern/kern_mman.c b/bsd/kern/kern_mman.c index de0e20667..9e9e8f8f2 100644 --- a/bsd/kern/kern_mman.c +++ b/bsd/kern/kern_mman.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2019 Apple Inc. All Rights Reserved. + * Copyright (c) 2007-2020 Apple Inc. All Rights Reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -113,6 +113,7 @@ #include #include #include +#include #include @@ -133,6 +134,34 @@ #endif #include +/* + * this function implements the same logic as dyld's "dyld_fall_2020_os_versions" + * from dyld_priv.h. this way we can consistently deny / allow allocations based + * on SDK version at fall 2020 level. Compare output to proc_sdk(current_proc()) + */ +static uint32_t +proc_2020_fall_os_sdk(void) +{ + switch (current_proc()->p_platform) { + case PLATFORM_MACOS: + return 0x000a1000; // DYLD_MACOSX_VERSION_10_16 + case PLATFORM_IOS: + case PLATFORM_IOSSIMULATOR: + case PLATFORM_MACCATALYST: + return 0x000e0000; // DYLD_IOS_VERSION_14_0 + case PLATFORM_BRIDGEOS: + return 0x00050000; // DYLD_BRIDGEOS_VERSION_5_0 + case PLATFORM_TVOS: + case PLATFORM_TVOSSIMULATOR: + return 0x000e0000; // DYLD_TVOS_VERSION_14_0 + case PLATFORM_WATCHOS: + case PLATFORM_WATCHOSSIMULATOR: + return 0x00070000; // DYLD_WATCHOS_VERSION_7_0 + default: + return 0; + } +} + /* * XXX Internally, we use VM_PROT_* somewhat interchangeably, but the correct * XXX usage is PROT_* from an interface perspective. Thus the values of @@ -204,6 +233,37 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) flags = uap->flags; vp = NULLVP; + /* + * verify no unknown flags are passed in, and if any are, + * fail out early to make sure the logic below never has to deal + * with invalid flag values + */ + if (flags & ~(MAP_SHARED | + MAP_PRIVATE | + MAP_COPY | + MAP_FIXED | + MAP_RENAME | + MAP_NORESERVE | + MAP_RESERVED0080 | //grandfathered in as accepted and ignored + MAP_NOEXTEND | + MAP_HASSEMAPHORE | + MAP_NOCACHE | + MAP_JIT | + MAP_FILE | + MAP_ANON | + MAP_RESILIENT_CODESIGN | + MAP_RESILIENT_MEDIA | +#if XNU_TARGET_OS_OSX + MAP_32BIT | +#endif + MAP_TRANSLATED_ALLOW_EXECUTE | + MAP_UNIX03)) { + if (proc_sdk(current_proc()) >= proc_2020_fall_os_sdk()) { + return EINVAL; + } + } + + /* * The vm code does not have prototypes & compiler doesn't do * the right thing when you cast 64bit value and pass it in function @@ -217,6 +277,32 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) return EINVAL; } + if (flags & MAP_UNIX03) { + vm_map_offset_t offset_alignment_mask; + + /* + * Enforce UNIX03 compliance. + */ + + if (vm_map_is_exotic(current_map())) { + offset_alignment_mask = 0xFFF; + } else { + offset_alignment_mask = vm_map_page_mask(current_map()); + } + if (file_pos & offset_alignment_mask) { + /* file offset should be page-aligned */ + return EINVAL; + } + if (!(flags & (MAP_PRIVATE | MAP_SHARED))) { + /* need either MAP_PRIVATE or MAP_SHARED */ + return EINVAL; + } + if (user_size == 0) { + /* mapping length should not be 0 */ + return EINVAL; + } + } + /* * Align the file position to a page boundary, * and save its page offset component. @@ -230,6 +316,7 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) user_size = vm_map_round_page(user_size, vm_map_page_mask(user_map)); /* hi end */ + if (flags & MAP_JIT) { if ((flags & MAP_FIXED) || (flags & MAP_SHARED) || @@ -248,7 +335,12 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) } } if (flags & MAP_RESILIENT_CODESIGN) { - if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) { + int reject_prot = ((flags & MAP_PRIVATE) ? VM_PROT_EXECUTE : (VM_PROT_WRITE | VM_PROT_EXECUTE)); + if (prot & reject_prot) { + /* + * Quick sanity check. maxprot is calculated below and + * we will test it again. + */ return EPERM; } } @@ -342,6 +434,7 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) handle = NULL; file_pos = 0; + pageoff = 0; mapanon = 1; } else { struct vnode_attr va; @@ -360,7 +453,7 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) return err; } fpref = 1; - switch (FILEGLOB_DTYPE(fp->f_fglob)) { + switch (FILEGLOB_DTYPE(fp->fp_glob)) { case DTYPE_PSXSHM: uap->addr = (user_addr_t)user_addr; uap->len = (user_size_t)user_size; @@ -375,7 +468,7 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) error = EINVAL; goto bad; } - vp = (struct vnode *)fp->f_fglob->fg_data; + vp = (struct vnode *)fp->fp_glob->fg_data; error = vnode_getwithref(vp); if (error != 0) { goto bad; @@ -417,8 +510,8 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) * credentials do we use for determination? What if * proc does a setuid? */ - maxprot = VM_PROT_EXECUTE; /* ??? */ - if (fp->f_fglob->fg_flag & FREAD) { + maxprot = VM_PROT_EXECUTE; /* TODO: Remove this and restrict maxprot? */ + if (fp->fp_glob->fg_flag & FREAD) { maxprot |= VM_PROT_READ; } else if (prot & PROT_READ) { (void)vnode_put(vp); @@ -434,7 +527,7 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) */ if ((flags & MAP_SHARED) != 0) { - if ((fp->f_fglob->fg_flag & FWRITE) != 0 && + if ((fp->fp_glob->fg_flag & FWRITE) != 0 && /* * Do not allow writable mappings of * swap files (see vm_swapfile_pager.c). @@ -470,7 +563,8 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) handle = (void *)vp; #if CONFIG_MACF error = mac_file_check_mmap(vfs_context_ucred(ctx), - fp->f_fglob, prot, flags, file_pos, &maxprot); + fp->fp_glob, prot, flags, file_pos + pageoff, + &maxprot); if (error) { (void)vnode_put(vp); goto bad; @@ -479,8 +573,13 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) /* * Consult the file system to determine if this * particular file object can be mapped. + * + * N.B. If MAP_PRIVATE (i.e. CoW) has been specified, + * then we don't check for writeability on the file + * object, because it will only ever see reads. */ - error = VNOP_MMAP_CHECK(vp, prot, ctx); + error = VNOP_MMAP_CHECK(vp, (flags & MAP_PRIVATE) ? + (prot & ~PROT_WRITE) : prot, ctx); if (error) { (void)vnode_put(vp); goto bad; @@ -551,6 +650,7 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) vmk_flags.vmkf_map_jit = TRUE; } + if (flags & MAP_RESILIENT_CODESIGN) { alloc_flags |= VM_FLAGS_RESILIENT_CODESIGN; } @@ -558,7 +658,8 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) alloc_flags |= VM_FLAGS_RESILIENT_MEDIA; } -#ifndef CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX + /* macOS-specific MAP_32BIT flag handling */ if (flags & MAP_32BIT) { vmk_flags.vmkf_32bit_map_va = TRUE; } @@ -590,6 +691,7 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) } #endif /* radar 3777787 */ map_anon_retry: + result = vm_map_enter_mem_object(user_map, &user_addr, user_size, 0, alloc_flags, vmk_flags, @@ -666,7 +768,17 @@ map_anon_retry: map_file_retry: if (flags & MAP_RESILIENT_CODESIGN) { - if (prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) { + int reject_prot = ((flags & MAP_PRIVATE) ? VM_PROT_EXECUTE : (VM_PROT_WRITE | VM_PROT_EXECUTE)); + if (prot & reject_prot) { + /* + * Would like to use (prot | maxprot) here + * but the assignment of VM_PROT_EXECUTE + * to maxprot above would always fail the test. + * + * Skipping the check is ok, however, because we + * restrict maxprot to prot just below in this + * block. + */ assert(!mapanon); vnode_put(vp); error = EPERM; @@ -1109,18 +1221,33 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) vm_map_t map = VM_MAP_NULL; user_addr_t vec = 0; int error = 0; - int lastvecindex = 0; + int64_t lastvecindex = 0; int mincoreinfo = 0; int pqueryinfo = 0; - unsigned int pqueryinfo_vec_size = 0; + uint64_t pqueryinfo_vec_size = 0; vm_page_info_basic_t info = NULL; mach_msg_type_number_t count = 0; char *kernel_vec = NULL; uint64_t req_vec_size_pages = 0, cur_vec_size_pages = 0, vecindex = 0; kern_return_t kr = KERN_SUCCESS; + int effective_page_shift, effective_page_size; map = current_map(); + /* + * On systems with 4k kernel space and 16k user space, we will + * use the kernel page size to report back the residency information. + * This is for backwards compatibility since we already have + * processes that depend on this behavior. + */ + if (vm_map_page_shift(map) < PAGE_SHIFT) { + effective_page_shift = vm_map_page_shift(map); + effective_page_size = vm_map_page_size(map); + } else { + effective_page_shift = PAGE_SHIFT; + effective_page_size = PAGE_SIZE; + } + /* * Make sure that the addresses presented are valid for user * mode. @@ -1143,8 +1270,8 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) * range in chunks of 'cur_vec_size'. */ - req_vec_size_pages = (end - addr) >> PAGE_SHIFT; - cur_vec_size_pages = MIN(req_vec_size_pages, (MAX_PAGE_RANGE_QUERY >> PAGE_SHIFT)); + req_vec_size_pages = (end - addr) >> effective_page_shift; + cur_vec_size_pages = MIN(req_vec_size_pages, (MAX_PAGE_RANGE_QUERY >> effective_page_shift)); kernel_vec = (void*) _MALLOC(cur_vec_size_pages * sizeof(char), M_TEMP, M_WAITOK | M_ZERO); @@ -1166,12 +1293,13 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) } while (addr < end) { - cur_end = addr + (cur_vec_size_pages * PAGE_SIZE_64); + cur_end = addr + (cur_vec_size_pages * effective_page_size); count = VM_PAGE_INFO_BASIC_COUNT; kr = vm_map_page_range_info_internal(map, addr, cur_end, + effective_page_shift, VM_PAGE_INFO_BASIC, (vm_page_info_t) info, &count); @@ -1184,7 +1312,8 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) * up the pages elsewhere. */ lastvecindex = -1; - for (; addr < cur_end; addr += PAGE_SIZE) { + + for (; addr < cur_end; addr += effective_page_size) { pqueryinfo = info[lastvecindex + 1].disposition; mincoreinfo = 0; @@ -1210,7 +1339,7 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) /* * calculate index into user supplied byte vector */ - vecindex = (addr - first_addr) >> PAGE_SHIFT; + vecindex = (addr - first_addr) >> effective_page_shift; kernel_vec[vecindex] = (char)mincoreinfo; lastvecindex = vecindex; } @@ -1231,8 +1360,8 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) * - starting address */ vec += cur_vec_size_pages * sizeof(char); - req_vec_size_pages = (end - addr) >> PAGE_SHIFT; - cur_vec_size_pages = MIN(req_vec_size_pages, (MAX_PAGE_RANGE_QUERY >> PAGE_SHIFT)); + req_vec_size_pages = (end - addr) >> effective_page_shift; + cur_vec_size_pages = MIN(req_vec_size_pages, (MAX_PAGE_RANGE_QUERY >> effective_page_shift)); first_addr = addr; } @@ -1363,10 +1492,11 @@ mremap_encrypted(__unused struct proc *p, struct mremap_encrypted_args *uap, __u } switch (cryptid) { - case 0: + case CRYPTID_NO_ENCRYPTION: /* not encrypted, just an empty load command */ return 0; - case 1: + case CRYPTID_APP_ENCRYPTION: + case CRYPTID_MODEL_ENCRYPTION: cryptname = "com.apple.unfree"; break; case 0x10: @@ -1390,16 +1520,12 @@ mremap_encrypted(__unused struct proc *p, struct mremap_encrypted_args *uap, __u vp = (vnode_t)vnodeaddr; if ((vnode_getwithvid(vp, vid)) == 0) { - MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (vpath == NULL) { - vnode_put(vp); - return ENOMEM; - } + vpath = zalloc(ZV_NAMEI); len = MAXPATHLEN; ret = vn_getpath(vp, vpath, &len); if (ret) { - FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, vpath); vnode_put(vp); return ret; } @@ -1431,7 +1557,7 @@ mremap_encrypted(__unused struct proc *p, struct mremap_encrypted_args *uap, __u __FUNCTION__, vpath, result); } #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ - FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, vpath); if (result) { printf("%s: unable to create decrypter %s, kr=%d\n", @@ -1451,7 +1577,8 @@ mremap_encrypted(__unused struct proc *p, struct mremap_encrypted_args *uap, __u user_addr, user_addr + user_size, crypto_backing_offset, - &crypt_info); + &crypt_info, + cryptid); if (result) { printf("%s: mapping failed with %d\n", __FUNCTION__, result); } diff --git a/bsd/kern/kern_newsysctl.c b/bsd/kern/kern_newsysctl.c index 07cd0e082..b6765a202 100644 --- a/bsd/kern/kern_newsysctl.c +++ b/bsd/kern/kern_newsysctl.c @@ -85,6 +85,7 @@ #endif #if defined(HAS_APPLE_PAC) +#include #include #endif /* defined(HAS_APPLE_PAC) */ @@ -115,7 +116,7 @@ STATIC int sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, struct sysctl_oid **oidpp); STATIC int sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l); STATIC int sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l); -STATIC int name2oid(char *name, int *oid, u_int *len); +STATIC int name2oid(char *name, int *oid, size_t *len); STATIC int sysctl_sysctl_name2oid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); STATIC int sysctl_sysctl_next(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); @@ -125,9 +126,9 @@ STATIC int sysctl_new_user(struct sysctl_req *req, void *p, size_t l); STATIC void sysctl_create_user_req(struct sysctl_req *req, struct proc *p, user_addr_t oldp, size_t oldlen, user_addr_t newp, size_t newlen); -STATIC int sysctl_root(boolean_t from_kernel, boolean_t string_is_canonical, char *namestring, size_t namestringlen, int *name, u_int namelen, struct sysctl_req *req); +STATIC int sysctl_root(boolean_t from_kernel, boolean_t string_is_canonical, char *namestring, size_t namestringlen, int *name, size_t namelen, struct sysctl_req *req); -int kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); +int kernel_sysctl(struct proc *p, int *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); int kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); int userland_sysctl(boolean_t string_is_canonical, char *namestring, size_t namestringlen, @@ -158,6 +159,7 @@ sysctl_register_oid(struct sysctl_oid *new_oidp) * structure was changed for a necessary reason). */ if (!(new_oidp->oid_kind & CTLFLAG_OID2)) { +#if __x86_64__ /* * XXX: M_TEMP is perhaps not the most apropriate zone, as it * XXX: will subject us to use-after-free by other consumers. @@ -173,7 +175,10 @@ sysctl_register_oid(struct sysctl_oid *new_oidp) * Note: We may want to set the oid_descr to the * oid_name (or "") at some future date. */ - memcpy(oidp, new_oidp, offsetof(struct sysctl_oid, oid_descr)); + *oidp = *new_oidp; +#else + panic("Old style sysctl without a version number isn't supported"); +#endif } else { /* It's a later version; handle the versions we know about */ switch (new_oidp->oid_version) { @@ -214,30 +219,17 @@ sysctl_register_oid(struct sysctl_oid *new_oidp) #if defined(HAS_APPLE_PAC) if (oidp->oid_handler) { - /* - * Dereference function-pointer-signed oid_handler to prevent an - * attacker with the ability to observe the result of the - * auth_and_resign below from trying all possible inputs until an auth - * succeeds. - */ - if (__builtin_expect(!*(uintptr_t*)ptrauth_auth_data((void*) - oidp->oid_handler, ptrauth_key_function_pointer, 0), 0)) { - /* - * This is necessary to force the dereference but will never - * actually be reached, dereferencing an invalidly signed pointer - * will trap before getting here (and the codegen is nicer than - * with a panic). - */ - __builtin_trap(); - } /* * Sign oid_handler address-discriminated upon installation to make it - * harder to replace with an arbitrary function pointer. + * harder to replace with an arbitrary function pointer. Blend with + * a hash of oid_arg1 for robustness against memory corruption. */ oidp->oid_handler = ptrauth_auth_and_resign(oidp->oid_handler, - ptrauth_key_function_pointer, 0, ptrauth_key_function_pointer, + ptrauth_key_function_pointer, + ptrauth_function_pointer_type_discriminator(typeof(oidp->oid_handler)), + ptrauth_key_function_pointer, ptrauth_blend_discriminator(&oidp->oid_handler, - ptrauth_string_discriminator("oid_handler"))); + os_hash_kernel_pointer(oidp->oid_arg1))); } #endif /* defined(HAS_APPLE_PAC) */ @@ -267,12 +259,17 @@ void sysctl_unregister_oid(struct sysctl_oid *oidp) { struct sysctl_oid *removed_oidp = NULL; /* OID removed from tree */ +#if __x86_64__ struct sysctl_oid *old_oidp = NULL; /* OID compatibility copy */ +#else + struct sysctl_oid *const old_oidp = NULL; +#endif /* Get the write lock to modify the geometry */ lck_rw_lock_exclusive(sysctl_geometry_lock); if (!(oidp->oid_kind & CTLFLAG_OID2)) { +#if __x86_64__ /* * We're using a copy so we can get the new fields in an * old structure, so we have to iterate to compare the @@ -288,6 +285,9 @@ sysctl_unregister_oid(struct sysctl_oid *oidp) SLIST_REMOVE(old_oidp->oid_parent, old_oidp, sysctl_oid, oid_link); removed_oidp = old_oidp; } +#else + panic("Old style sysctl without a version number isn't supported"); +#endif } else { /* It's a later version; handle the versions we know about */ switch (oidp->oid_version) { @@ -308,26 +308,12 @@ sysctl_unregister_oid(struct sysctl_oid *oidp) * Revert address-discriminated signing performed by * sysctl_register_oid() (in case this oid is registered again). */ - removed_oidp->oid_handler = ptrauth_auth_function(removed_oidp->oid_handler, + removed_oidp->oid_handler = ptrauth_auth_and_resign(removed_oidp->oid_handler, ptrauth_key_function_pointer, ptrauth_blend_discriminator(&removed_oidp->oid_handler, - ptrauth_string_discriminator("oid_handler"))); - /* - * Dereference the function-pointer-signed result to prevent an - * attacker with the ability to observe the result of the - * auth_and_resign above from trying all possible inputs until an auth - * succeeds. - */ - if (__builtin_expect(!*(uintptr_t*)ptrauth_auth_data((void*) - removed_oidp->oid_handler, ptrauth_key_function_pointer, 0), 0)) { - /* - * This is necessary to force the dereference but will never - * actually be reached, dereferencing an invalidly signed pointer - * will trap before getting here (and the codegen is nicer than - * with a panic). - */ - __builtin_trap(); - } + os_hash_kernel_pointer(removed_oidp->oid_arg1)), + ptrauth_key_function_pointer, + ptrauth_function_pointer_type_discriminator(typeof(removed_oidp->oid_handler))); } #endif /* defined(HAS_APPLE_PAC) */ @@ -346,9 +332,11 @@ sysctl_unregister_oid(struct sysctl_oid *oidp) /* Release the write lock */ lck_rw_unlock_exclusive(sysctl_geometry_lock); - /* If it was allocated, free it after dropping the lock */ if (old_oidp != NULL) { +#if __x86_64__ + /* If it was allocated, free it after dropping the lock */ FREE(old_oidp, M_TEMP); +#endif } } @@ -492,12 +480,13 @@ int sysctl_io_string(struct sysctl_req *req, char *pValue, size_t valueSize, int trunc, int *changed) { int error; + size_t len = strlen(pValue) + 1; if (changed) { *changed = 0; } - if (trunc && req->oldptr && req->oldlen && (req->oldlen < strlen(pValue) + 1)) { + if (trunc && req->oldptr && req->oldlen && (req->oldlen < len)) { /* If trunc != 0, if you give it a too small (but larger than * 0 bytes) buffer, instead of returning ENOMEM, it truncates the * returned string to the buffer size. This preserves the semantics @@ -511,7 +500,7 @@ sysctl_io_string(struct sysctl_req *req, char *pValue, size_t valueSize, int tru } } else { /* Copy string out */ - error = SYSCTL_OUT(req, pValue, strlen(pValue) + 1); + error = SYSCTL_OUT(req, pValue, len); } /* error or no new value */ @@ -975,9 +964,9 @@ SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_sysctl_next, " * Locks: Assumes sysctl_geometry_lock is held prior to calling */ STATIC int -name2oid(char *name, int *oid, u_int *len) +name2oid(char *name, int *oid, size_t *len) { - int i; + char i; struct sysctl_oid *oidp; struct sysctl_oid_list *lsp = &sysctl__children; char *p; @@ -1011,7 +1000,7 @@ name2oid(char *name, int *oid, u_int *len) *oid++ = oidp->oid_number; (*len)++; - if (!i) { + if (i == '\0') { return 0; } @@ -1083,7 +1072,7 @@ sysctl_sysctl_name2oid(__unused struct sysctl_oid *oidp, __unused void *arg1, { char *p; int error, oid[CTL_MAXNAME] = {}; - u_int len = 0; /* set by name2oid() */ + size_t len = 0; /* set by name2oid() */ if (req->newlen < 1) { return ENOENT; @@ -1374,7 +1363,7 @@ sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l) } int -kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen) +kernel_sysctl(struct proc *p, int *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen) { int error = 0; struct sysctl_req req; @@ -1465,7 +1454,7 @@ sysctl_new_user(struct sysctl_req *req, void *p, size_t l) */ int -sysctl_root(boolean_t from_kernel, boolean_t string_is_canonical, char *namestring, size_t namestringlen, int *name, u_int namelen, struct sysctl_req *req) +sysctl_root(boolean_t from_kernel, boolean_t string_is_canonical, char *namestring, size_t namestringlen, int *name, size_t namelen, struct sysctl_req *req) { u_int indx; int i; @@ -1655,14 +1644,16 @@ found: /* * oid_handler is signed address-discriminated by sysctl_register_oid(). */ - oid_handler = ptrauth_auth_function(oid_handler, + oid_handler = ptrauth_auth_and_resign(oid_handler, ptrauth_key_function_pointer, ptrauth_blend_discriminator(&oid->oid_handler, - ptrauth_string_discriminator("oid_handler"))); + os_hash_kernel_pointer(oid->oid_arg1)), + ptrauth_key_function_pointer, + ptrauth_function_pointer_type_discriminator(typeof(oid_handler))); #endif /* defined(HAS_APPLE_PAC) */ if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { - i = oid_handler(oid, name + indx, namelen - indx, req); + i = oid_handler(oid, name + indx, (int)(namelen - indx), req); } else { i = oid_handler(oid, oid->oid_arg1, oid->oid_arg2, req); } @@ -1945,10 +1936,13 @@ kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, s oidlen = sizeof(oid); error = kernel_sysctl(current_proc(), name2mib_oid, 2, oid, &oidlen, __DECONST(void *, name), strlen(name)); oidlen /= sizeof(int); + if (oidlen > UINT_MAX) { + error = EDOM; + } /* now use the OID */ if (error == 0) { - error = kernel_sysctl(current_proc(), oid, oidlen, oldp, oldlenp, newp, newlen); + error = kernel_sysctl(current_proc(), oid, (u_int)oidlen, oldp, oldlenp, newp, newlen); } return error; } diff --git a/bsd/kern/kern_ntptime.c b/bsd/kern/kern_ntptime.c index 2ad397e74..0ae62258f 100644 --- a/bsd/kern/kern_ntptime.c +++ b/bsd/kern/kern_ntptime.c @@ -278,11 +278,11 @@ ntp_gettime(struct proc *p, struct ntp_gettime_args *uap, __unused int32_t *retv error = copyout(&user_ntv, uap->ntvp, sizeof(user_ntv)); } else { struct user32_ntptimeval user_ntv = {}; - user_ntv.time.tv_sec = ntv.time.tv_sec; - user_ntv.time.tv_nsec = ntv.time.tv_nsec; - user_ntv.maxerror = ntv.maxerror; - user_ntv.esterror = ntv.esterror; - user_ntv.tai = ntv.tai; + user_ntv.time.tv_sec = (user32_long_t)ntv.time.tv_sec; + user_ntv.time.tv_nsec = (user32_long_t)ntv.time.tv_nsec; + user_ntv.maxerror = (user32_long_t)ntv.maxerror; + user_ntv.esterror = (user32_long_t)ntv.esterror; + user_ntv.tai = (user32_long_t)ntv.tai; user_ntv.time_state = ntv.time_state; error = copyout(&user_ntv, uap->ntvp, sizeof(user_ntv)); } @@ -309,14 +309,14 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) struct user64_timex user_ntv; error = copyin(uap->tp, &user_ntv, sizeof(user_ntv)); ntv.modes = user_ntv.modes; - ntv.offset = user_ntv.offset; - ntv.freq = user_ntv.freq; - ntv.maxerror = user_ntv.maxerror; - ntv.esterror = user_ntv.esterror; + ntv.offset = (long)user_ntv.offset; + ntv.freq = (long)user_ntv.freq; + ntv.maxerror = (long)user_ntv.maxerror; + ntv.esterror = (long)user_ntv.esterror; ntv.status = user_ntv.status; - ntv.constant = user_ntv.constant; - ntv.precision = user_ntv.precision; - ntv.tolerance = user_ntv.tolerance; + ntv.constant = (long)user_ntv.constant; + ntv.precision = (long)user_ntv.precision; + ntv.tolerance = (long)user_ntv.tolerance; } else { struct user32_timex user_ntv; error = copyin(uap->tp, &user_ntv, sizeof(user_ntv)); @@ -458,7 +458,11 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) } else { user_ntv.offset = L_GINT(time_offset) / 1000; } - user_ntv.freq = L_GINT((time_freq / 1000LL) << 16); + if (time_freq > 0) { + user_ntv.freq = L_GINT(((int64_t)(time_freq / 1000LL)) << 16); + } else { + user_ntv.freq = -L_GINT(((int64_t)(-(time_freq) / 1000LL)) << 16); + } user_ntv.maxerror = time_maxerror; user_ntv.esterror = time_esterror; user_ntv.status = time_status; @@ -483,15 +487,19 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) } else { user_ntv.offset = L_GINT(time_offset) / 1000; } - user_ntv.freq = L_GINT((time_freq / 1000LL) << 16); - user_ntv.maxerror = time_maxerror; - user_ntv.esterror = time_esterror; + if (time_freq > 0) { + user_ntv.freq = L_GINT((time_freq / 1000LL) << 16); + } else { + user_ntv.freq = -L_GINT((-(time_freq) / 1000LL) << 16); + } + user_ntv.maxerror = (user32_long_t)time_maxerror; + user_ntv.esterror = (user32_long_t)time_esterror; user_ntv.status = time_status; - user_ntv.constant = time_constant; + user_ntv.constant = (user32_long_t)time_constant; if (time_status & STA_NANO) { - user_ntv.precision = time_precision; + user_ntv.precision = (user32_long_t)time_precision; } else { - user_ntv.precision = time_precision / 1000; + user_ntv.precision = (user32_long_t)(time_precision / 1000); } user_ntv.tolerance = MAXFREQ * SCALE_PPM; @@ -559,7 +567,7 @@ ntp_update_second(int64_t *adjustment, clock_sec_t secs) } else if (time_adjtime < -500) { tickrate = -500; } else { - tickrate = time_adjtime; + tickrate = (int)time_adjtime; } time_adjtime -= tickrate; L_LINT(ftemp, tickrate * 1000); @@ -683,7 +691,7 @@ kern_adjtime(struct timeval *delta) #endif NTP_UNLOCK(enable); - atv.tv_sec = ltr / (int64_t)USEC_PER_SEC; + atv.tv_sec = (__darwin_time_t)(ltr / (int64_t)USEC_PER_SEC); atv.tv_usec = ltr % (int64_t)USEC_PER_SEC; if (atv.tv_usec < 0) { atv.tv_usec += (suseconds_t)USEC_PER_SEC; @@ -719,7 +727,7 @@ adjtime(struct proc *p, struct adjtime_args *uap, __unused int32_t *retval) if (IS_64BIT_PROCESS(p)) { struct user64_timeval user_atv; error = copyin(uap->delta, &user_atv, sizeof(user_atv)); - atv.tv_sec = user_atv.tv_sec; + atv.tv_sec = (__darwin_time_t)user_atv.tv_sec; atv.tv_usec = user_atv.tv_usec; } else { struct user32_timeval user_atv; @@ -741,7 +749,7 @@ adjtime(struct proc *p, struct adjtime_args *uap, __unused int32_t *retval) error = copyout(&user_atv, uap->olddelta, sizeof(user_atv)); } else { struct user32_timeval user_atv = {}; - user_atv.tv_sec = atv.tv_sec; + user_atv.tv_sec = (user32_time_t)atv.tv_sec; user_atv.tv_usec = atv.tv_usec; error = copyout(&user_atv, uap->olddelta, sizeof(user_atv)); } diff --git a/bsd/kern/kern_persona.c b/bsd/kern/kern_persona.c index 7fc207026..b3470216a 100644 --- a/bsd/kern/kern_persona.c +++ b/bsd/kern/kern_persona.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -66,10 +66,10 @@ static LIST_HEAD(personalist, persona) all_personas; static uint32_t g_total_personas; -uint32_t g_max_personas = MAX_PERSONAS; +const uint32_t g_max_personas = MAX_PERSONAS; struct persona *system_persona = NULL; struct persona *proxy_system_persona = NULL; -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) int unique_persona = 1; #else int unique_persona = 0; @@ -77,17 +77,15 @@ int unique_persona = 0; static uid_t g_next_persona_id; -lck_mtx_t all_personas_lock; -lck_attr_t *persona_lck_attr; -lck_grp_t *persona_lck_grp; -lck_grp_attr_t *persona_lck_grp_attr; +LCK_GRP_DECLARE(persona_lck_grp, "personas"); +LCK_MTX_DECLARE(all_personas_lock, &persona_lck_grp); os_refgrp_decl(static, persona_refgrp, "persona", NULL); -static zone_t persona_zone; +static ZONE_DECLARE(persona_zone, "personas", sizeof(struct persona), ZC_ZFREE_CLEARMEM); kauth_cred_t g_default_persona_cred; -extern struct auditinfo_addr *audit_default_aia_p; +extern struct auditinfo_addr * const audit_default_aia_p; #define lock_personas() lck_mtx_lock(&all_personas_lock) #define unlock_personas() lck_mtx_unlock(&all_personas_lock) @@ -111,18 +109,6 @@ personas_bootstrap(void) g_next_persona_id = FIRST_PERSONA_ID; - persona_lck_grp_attr = lck_grp_attr_alloc_init(); - - persona_lck_grp = lck_grp_alloc_init("personas", persona_lck_grp_attr); - persona_lck_attr = lck_attr_alloc_init(); - - lck_mtx_init(&all_personas_lock, persona_lck_grp, persona_lck_attr); - - persona_zone = zinit(sizeof(struct persona), - MAX_PERSONAS * sizeof(struct persona), - MAX_PERSONAS, "personas"); - assert(persona_zone != NULL); - /* * setup the default credentials that a persona temporarily * inherits (to work around kauth APIs) @@ -149,7 +135,7 @@ personas_bootstrap(void) } struct persona * -persona_alloc(uid_t id, const char *login, int type, char *path, int *error) +persona_alloc(uid_t id, const char *login, persona_type_t type, char *path, int *error) { struct persona *persona; int err = 0; @@ -191,7 +177,7 @@ persona_alloc(uid_t id, const char *login, int type, char *path, int *error) persona_dbg("Starting persona allocation for: '%s'", persona->pna_login); LIST_INIT(&persona->pna_members); - lck_mtx_init(&persona->pna_lock, persona_lck_grp, persona_lck_attr); + lck_mtx_init(&persona->pna_lock, &persona_lck_grp, LCK_ATTR_NULL); os_ref_init(&persona->pna_refcount, &persona_refgrp); /* @@ -525,7 +511,7 @@ persona_put(struct persona *persona) persona_mkinvalid(persona); } if (persona->pna_path != NULL) { - FREE_ZONE(persona->pna_path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, persona->pna_path); } persona_unlock(persona); unlock_personas(); @@ -601,7 +587,7 @@ persona_lookup_and_invalidate(uid_t id) } int -persona_find_by_type(int persona_type, struct persona **persona, size_t *plen) +persona_find_by_type(persona_type_t persona_type, struct persona **persona, size_t *plen) { return persona_find_all(NULL, PERSONA_ID_NONE, persona_type, persona, plen); } @@ -614,7 +600,7 @@ persona_find(const char *login, uid_t uid, } int -persona_find_all(const char *login, uid_t uid, int persona_type, +persona_find_all(const char *login, uid_t uid, persona_type_t persona_type, struct persona **persona, size_t *plen) { struct persona *tmp; @@ -785,7 +771,7 @@ proc_reset_persona_internal(proc_t p, persona_reset_op_t op, switch (op) { case PROC_REMOVE_PERSONA: old_persona = p->p_persona; - /* fall through */ + OS_FALLTHROUGH; case PROC_RESET_OLD_PERSONA: break; default: @@ -796,6 +782,7 @@ proc_reset_persona_internal(proc_t p, persona_reset_op_t op, /* unlock the new persona (locked on entry) */ persona_unlock(new_persona); /* lock the old persona and the process */ + assert(old_persona != NULL); persona_lock(old_persona); proc_lock(p); @@ -831,7 +818,8 @@ proc_set_cred_internal(proc_t p, struct persona *persona, struct persona *old_persona = NULL; kauth_cred_t my_cred, my_new_cred; uid_t old_uid, new_uid; - int count; + size_t count; + rlim_t nproc = proc_limitgetcur(p, RLIMIT_NPROC, TRUE); /* * This operation must be done under the proc trans lock @@ -878,12 +866,14 @@ proc_set_cred_internal(proc_t p, struct persona *persona, * the process or changing its credentials. */ if (new_uid != 0 && - (rlim_t)chgproccnt(new_uid, 0) > p->p_rlimit[RLIMIT_NPROC].rlim_cur) { + (rlim_t)chgproccnt(new_uid, 0) > nproc) { pna_err("PID:%d hit proc rlimit in new persona(%d): %s", p->p_pid, new_uid, persona_desc(persona, 1)); *rlim_error = EACCES; - (void)proc_reset_persona_internal(p, PROC_RESET_OLD_PERSONA, - old_persona, persona); + if (old_persona) { + (void)proc_reset_persona_internal(p, PROC_RESET_OLD_PERSONA, + old_persona, persona); + } kauth_cred_unref(&my_new_cred); return NULL; } @@ -943,7 +933,7 @@ set_proc_cred: if (new_uid != old_uid) { count = chgproccnt(old_uid, -1); - persona_dbg("Decrement %s:%d proc_count to: %d", + persona_dbg("Decrement %s:%d proc_count to: %lu", old_persona ? "Persona" : "UID", old_uid, count); /* @@ -952,7 +942,7 @@ set_proc_cred: * as in fork1() */ count = chgproccnt(new_uid, 1); - persona_dbg("Increment Persona:%d (UID:%d) proc_count to: %d", + persona_dbg("Increment Persona:%d (UID:%d) proc_count to: %lu", new_uid, kauth_cred_getuid(my_new_cred), count); } @@ -1288,7 +1278,7 @@ persona_get_gid(struct persona *persona) } int -persona_set_groups(struct persona *persona, gid_t *groups, unsigned ngroups, uid_t gmuid) +persona_set_groups(struct persona *persona, gid_t *groups, size_t ngroups, uid_t gmuid) { int ret = 0; kauth_cred_t my_cred, new_cred; @@ -1312,7 +1302,7 @@ persona_set_groups(struct persona *persona, gid_t *groups, unsigned ngroups, uid my_cred = persona->pna_cred; kauth_cred_ref(my_cred); - new_cred = kauth_cred_setgroups(my_cred, groups, (int)ngroups, gmuid); + new_cred = kauth_cred_setgroups(my_cred, groups, ngroups, gmuid); if (new_cred != my_cred) { persona->pna_cred = new_cred; } @@ -1324,7 +1314,7 @@ out_unlock: } int -persona_get_groups(struct persona *persona, unsigned *ngroups, gid_t *groups, unsigned groups_sz) +persona_get_groups(struct persona *persona, size_t *ngroups, gid_t *groups, size_t groups_sz) { int ret = EINVAL; if (!persona || !persona->pna_cred || !groups || !ngroups || groups_sz > NGROUPS) { @@ -1335,9 +1325,9 @@ persona_get_groups(struct persona *persona, unsigned *ngroups, gid_t *groups, un persona_lock(persona); if (persona_valid(persona)) { - int kauth_ngroups = (int)groups_sz; + size_t kauth_ngroups = groups_sz; kauth_cred_getgroups(persona->pna_cred, groups, &kauth_ngroups); - *ngroups = (unsigned)kauth_ngroups; + *ngroups = (uint32_t)kauth_ngroups; ret = 0; } persona_unlock(persona); diff --git a/bsd/kern/kern_physio.c b/bsd/kern/kern_physio.c index ccab3bbc3..3d8b00ef3 100644 --- a/bsd/kern/kern_physio.c +++ b/bsd/kern/kern_physio.c @@ -88,7 +88,8 @@ physio( void (*f_strategy)(buf_t), int blocksize) { struct proc *p = current_proc(); - int error, i, buf_allocated, todo, iosize; + int error, i, buf_allocated, todo; + size_t iosize; int orig_bflags = 0; int64_t done; @@ -156,9 +157,11 @@ physio( void (*f_strategy)(buf_t), * of the 'while' loop. */ while (uio_resid(uio) > 0) { - if ((iosize = uio_curriovlen(uio)) > MAXPHYSIO_WIRED) { + iosize = uio_curriovlen(uio); + if (iosize > MAXPHYSIO_WIRED) { iosize = MAXPHYSIO_WIRED; } + /* * make sure we're set to issue a fresh I/O * in the right direction @@ -167,7 +170,8 @@ physio( void (*f_strategy)(buf_t), /* [set up the buffer for a maximum-sized transfer] */ buf_setblkno(bp, uio_offset(uio) / blocksize); - buf_setcount(bp, iosize); + assert(iosize <= UINT32_MAX); + buf_setcount(bp, (uint32_t)iosize); buf_setdataptr(bp, (uintptr_t)CAST_DOWN(caddr_t, uio_curriovbase(uio))); /* @@ -187,7 +191,7 @@ physio( void (*f_strategy)(buf_t), error = vslock(CAST_USER_ADDR_T(buf_dataptr(bp)), (user_size_t)todo); if (error) { - goto done; + goto finished; } } @@ -213,18 +217,19 @@ physio( void (*f_strategy)(buf_t), * of data to transfer] */ done = buf_count(bp) - buf_resid(bp); - uio_update(uio, done); + assert(0 <= done && done <= UINT32_MAX); + uio_update(uio, (user_size_t)done); /* * Now, check for an error. * Also, handle weird end-of-disk semantics. */ if (error || done < todo) { - goto done; + goto finished; } } -done: +finished: if (buf_allocated) { buf_free(bp); } else { diff --git a/bsd/kern/kern_proc.c b/bsd/kern/kern_proc.c index 01f548e79..21fa06635 100644 --- a/bsd/kern/kern_proc.c +++ b/bsd/kern/kern_proc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -113,7 +113,9 @@ #include #include #include +#include #include /* IOTaskHasEntitlement() */ +#include /* ipc_kobject_set_kobjidx() */ #ifdef CONFIG_32BIT_TELEMETRY #include @@ -129,6 +131,7 @@ #if CONFIG_MACF #include +#include #endif #include @@ -143,7 +146,7 @@ struct uidinfo { LIST_ENTRY(uidinfo) ui_hash; uid_t ui_uid; - long ui_proccnt; + size_t ui_proccnt; }; #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) LIST_HEAD(uihashhead, uidinfo) * uihashtbl; @@ -176,10 +179,10 @@ int syscallfilter_disable = 0; /* Name to give to core files */ #if defined(XNU_TARGET_OS_BRIDGE) __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/internal/%N.core"}; -#elif CONFIG_EMBEDDED -__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/cores/%N.core"}; -#else +#elif defined(XNU_TARGET_OS_OSX) __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/cores/core.%P"}; +#else +__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/cores/%N.core"}; #endif #endif @@ -187,6 +190,11 @@ __XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/cores/core.%P"}; #include #endif +ZONE_DECLARE(pgrp_zone, "pgrp", + sizeof(struct pgrp), ZC_ZFREE_CLEARMEM); +ZONE_DECLARE(session_zone, "session", + sizeof(struct session), ZC_ZFREE_CLEARMEM); + typedef uint64_t unaligned_u64 __attribute__((aligned(1))); static void orphanpg(struct pgrp * pg); @@ -194,7 +202,9 @@ void proc_name_kdp(task_t t, char * buf, int size); boolean_t proc_binary_uuid_kdp(task_t task, uuid_t uuid); int proc_threadname_kdp(void * uth, char * buf, size_t size); void proc_starttime_kdp(void * p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime); +void proc_archinfo_kdp(void* p, cpu_type_t* cputype, cpu_subtype_t* cpusubtype); char * proc_name_address(void * p); +char * proc_longname_address(void *); static void pgrp_add(struct pgrp * pgrp, proc_t parent, proc_t child); static void pgrp_remove(proc_t p); @@ -246,13 +256,13 @@ procinit(void) * a given user is using. This routine protects the uihash * with the list lock */ -int +size_t chgproccnt(uid_t uid, int diff) { struct uidinfo *uip; struct uidinfo *newuip = NULL; struct uihashhead *uipp; - int retval; + size_t retval; again: proc_list_lock(); @@ -269,13 +279,10 @@ again: proc_list_unlock(); goto out; } - if (uip->ui_proccnt < 0) { - panic("chgproccnt: procs < 0"); - } LIST_REMOVE(uip, ui_hash); retval = 0; proc_list_unlock(); - FREE_ZONE(uip, sizeof(*uip), M_PROC); + FREE(uip, M_PROC); goto out; } if (diff <= 0) { @@ -297,14 +304,14 @@ again: goto out; } proc_list_unlock(); - MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK); + MALLOC(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK); if (newuip == NULL) { panic("chgproccnt: M_PROC zone depleted"); } goto again; out: if (newuip != NULL) { - FREE_ZONE(newuip, sizeof(*uip), M_PROC); + FREE(newuip, M_PROC); } return retval; } @@ -425,6 +432,41 @@ proc_findthread(thread_t thread) return p; } +/* + * Returns process identity of a given process. Calling this function is not + * racy for a current process or if a reference to the process is held. + */ +struct proc_ident +proc_ident(proc_t p) +{ + struct proc_ident ident = { + .p_pid = proc_pid(p), + .p_uniqueid = proc_uniqueid(p), + .p_idversion = proc_pidversion(p), + }; + + return ident; +} + +proc_t +proc_find_ident(struct proc_ident const *ident) +{ + proc_t proc = PROC_NULL; + + proc = proc_find(ident->p_pid); + if (proc == PROC_NULL) { + return PROC_NULL; + } + + if (proc_uniqueid(proc) != ident->p_uniqueid || + proc_pidversion(proc) != ident->p_idversion) { + proc_rele(proc); + return PROC_NULL; + } + + return proc; +} + void uthread_reset_proc_refcount(void *uthread) { @@ -797,6 +839,20 @@ proc_checkdeadrefs(__unused proc_t p) #endif } + +__attribute__((always_inline, visibility("hidden"))) +void +proc_require(proc_t proc, proc_require_flags_t flags) +{ + if ((flags & PROC_REQUIRE_ALLOW_NULL) && proc == PROC_NULL) { + return; + } + if ((flags & PROC_REQUIRE_ALLOW_KERNPROC) && proc == &proc0) { + return; + } + zone_id_require(ZONE_ID_PROC, sizeof(struct proc), proc); +} + int proc_pid(proc_t p) { @@ -824,6 +880,17 @@ proc_original_ppid(proc_t p) return -1; } +int +proc_starttime(proc_t p, struct timeval *tv) +{ + if (p != NULL && tv != NULL) { + tv->tv_sec = p->p_start.tv_sec; + tv->tv_usec = p->p_start.tv_usec; + return 0; + } + return EINVAL; +} + int proc_selfpid(void) { @@ -853,7 +920,7 @@ proc_csflags(proc_t p, uint64_t *flags) } uint32_t -proc_platform(proc_t p) +proc_platform(const proc_t p) { if (p != NULL) { return p->p_platform; @@ -861,6 +928,15 @@ proc_platform(proc_t p) return (uint32_t)-1; } +uint32_t +proc_min_sdk(proc_t p) +{ + if (p != NULL) { + return p->p_min_sdk; + } + return (uint32_t)-1; +} + uint32_t proc_sdk(proc_t p) { @@ -1029,16 +1105,32 @@ proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unali } } +void +proc_archinfo_kdp(void* p, cpu_type_t* cputype, cpu_subtype_t* cpusubtype) +{ + proc_t pp = (proc_t)p; + if (pp != PROC_NULL) { + *cputype = pp->p_cputype; + *cpusubtype = pp->p_cpusubtype; + } +} + char * proc_name_address(void *p) { return &((proc_t)p)->p_comm[0]; } +char * +proc_longname_address(void *p) +{ + return &((proc_t)p)->p_name[0]; +} + char * proc_best_name(proc_t p) { - if (p->p_name[0] != 0) { + if (p->p_name[0] != '\0') { return &p->p_name[0]; } return &p->p_comm[0]; @@ -1152,6 +1244,8 @@ proc_task(proc_t proc) thread_t proc_thread(proc_t proc) { + LCK_MTX_ASSERT(&proc->p_mlock, LCK_MTX_ASSERT_OWNED); + uthread_t uth = TAILQ_FIRST(&proc->p_uthlist); if (uth != NULL) { @@ -1189,6 +1283,15 @@ proc_is64bit_data(proc_t p) return (int)task_get_64bit_data(p->task); } +int +proc_isinitproc(proc_t p) +{ + if (initproc == NULL) { + return 0; + } + return p == initproc; +} + int proc_pidversion(proc_t p) { @@ -1553,7 +1656,6 @@ pinsertchild(proc_t parent, proc_t child) struct pgrp * pg; LIST_INIT(&child->p_children); - TAILQ_INIT(&child->p_evlist); child->p_pptr = parent; child->p_ppid = parent->p_pid; child->p_original_ppid = parent->p_pid; @@ -1616,11 +1718,7 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) panic("enterpgrp: new pgrp and pid != pgid"); } #endif - MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP, - M_WAITOK); - if (pgrp == NULL) { - panic("enterpgrp: M_PGRP zone depleted"); - } + pgrp = zalloc_flags(pgrp_zone, Z_WAITOK | Z_ZERO); if ((np = proc_find(savepid)) == NULL || np != p) { if (np != PROC_NULL) { proc_rele(np); @@ -1631,7 +1729,7 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) if (procsp != SESSION_NULL) { session_rele(procsp); } - FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP); + zfree(pgrp_zone, pgrp); return ESRCH; } proc_rele(np); @@ -1641,18 +1739,10 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) /* * new session */ - MALLOC_ZONE(sess, struct session *, - sizeof(struct session), M_SESSION, M_WAITOK); - if (sess == NULL) { - panic("enterpgrp: M_SESSION zone depleted"); - } + sess = zalloc_flags(session_zone, Z_WAITOK | Z_ZERO); sess->s_leader = p; sess->s_sid = p->p_pid; sess->s_count = 1; - sess->s_ttyvp = NULL; - sess->s_ttyp = TTY_NULL; - sess->s_flags = 0; - sess->s_listflags = 0; sess->s_ttypgrpid = NO_PID; lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr); @@ -1664,6 +1754,7 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) LIST_INSERT_HEAD(SESSHASH(sess->s_sid), sess, s_hash); proc_list_unlock(); pgrp->pg_session = sess; + p->p_sessionid = sess->s_sid; #if DIAGNOSTIC if (p != current_proc()) { panic("enterpgrp: mksession and p != curproc"); @@ -1672,6 +1763,7 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) } else { proc_list_lock(); pgrp->pg_session = procsp; + p->p_sessionid = procsp->s_sid; if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) { panic("enterpgrp: providing ref to terminating session "); @@ -1684,11 +1776,8 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr); LIST_INIT(&pgrp->pg_members); - pgrp->pg_membercnt = 0; - pgrp->pg_jobc = 0; proc_list_lock(); pgrp->pg_refcount = 1; - pgrp->pg_listflags = 0; LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); proc_list_unlock(); } else if (pgrp == mypgrp) { @@ -1810,12 +1899,12 @@ pgdelete_dropref(struct pgrp *pgrp) proc_list_unlock(); lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp); - FREE_ZONE(sessp, sizeof(struct session), M_SESSION); + zfree(session_zone, sessp); } else { proc_list_unlock(); } lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp); - FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP); + zfree(pgrp_zone, pgrp); } @@ -1976,11 +2065,11 @@ static u_int pidlist_alloc(pidlist_t *pl, u_int needed) { while (pl->pl_nalloc < needed) { - pidlist_entry_t *pe = kalloc(sizeof(*pe)); + pidlist_entry_t *pe = kheap_alloc(KHEAP_TEMP, sizeof(*pe), + Z_WAITOK | Z_ZERO); if (NULL == pe) { panic("no space for pidlist entry"); } - pe->pe_nused = 0; SLIST_INSERT_HEAD(&pl->pl_head, pe, pe_link); pl->pl_nalloc += (sizeof(pe->pe_pid) / sizeof(pe->pe_pid[0])); } @@ -1993,7 +2082,7 @@ pidlist_free(pidlist_t *pl) pidlist_entry_t *pe; while (NULL != (pe = SLIST_FIRST(&pl->pl_head))) { SLIST_FIRST(&pl->pl_head) = SLIST_NEXT(pe, pe_link); - kfree(pe, sizeof(*pe)); + kheap_free(KHEAP_TEMP, pe, sizeof(*pe)); } pl->pl_nalloc = 0; } @@ -2092,12 +2181,38 @@ out: pidlist_free(pl); } +boolean_t +proc_is_translated(proc_t p __unused) +{ + return 0; +} + int proc_is_classic(proc_t p __unused) { return 0; } +bool +proc_is_exotic( + proc_t p) +{ + if (p == NULL) { + return false; + } + return task_is_exotic(proc_task(p)); +} + +bool +proc_is_alien( + proc_t p) +{ + if (p == NULL) { + return false; + } + return task_is_alien(proc_task(p)); +} + /* XXX Why does this function exist? Need to kill it off... */ proc_t current_proc_EXTERNAL(void) @@ -2111,6 +2226,12 @@ proc_is_forcing_hfs_case_sensitivity(proc_t p) return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) ? 1 : 0; } +bool +proc_ignores_content_protection(proc_t p) +{ + return os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION; +} + #if CONFIG_COREDUMP /* * proc_core_name(name, uid, pid) @@ -2346,6 +2467,7 @@ csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user proc_lock(pt); if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */ pt->p_csflags &= ~CS_VALID; /* set invalid */ + cs_process_invalidated(pt); if ((pt->p_csflags & CS_KILL) == CS_KILL) { pt->p_csflags |= CS_KILLED; proc_unlock(pt); @@ -2464,6 +2586,10 @@ csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user proc_lock(pt); if (pt->p_csflags & CS_VALID) { + if ((flags & CS_ENFORCEMENT) && + !(pt->p_csflags & CS_ENFORCEMENT)) { + vm_map_cs_enforcement_set(get_task_map(pt->task), TRUE); + } pt->p_csflags |= flags; } else { error = EINVAL; @@ -2485,8 +2611,8 @@ csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user * your application without library validation, or * fork an untrusted child. */ -#ifdef CONFIG_EMBEDDED - // On embedded platforms, we don't support dropping LV +#if !defined(XNU_TARGET_OS_OSX) + // We only support dropping library validation on macOS error = ENOTSUP; #else /* @@ -2564,7 +2690,7 @@ csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user } length = strlen(identity) + 1; /* include NUL */ - idlen = htonl(length + sizeof(fakeheader)); + idlen = htonl((uint32_t)(length + sizeof(fakeheader))); memcpy(&fakeheader[4], &idlen, sizeof(idlen)); error = copyout(fakeheader, uaddr, sizeof(fakeheader)); @@ -2699,13 +2825,13 @@ proc_iterate( switch (callout_ret) { case PROC_RETURNED_DONE: proc_rele(p); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case PROC_CLAIMED_DONE: goto out; case PROC_RETURNED: proc_rele(p); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case PROC_CLAIMED: break; default: @@ -2723,13 +2849,13 @@ proc_iterate( switch (callout_ret) { case PROC_RETURNED_DONE: proc_drop_zombref(p); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case PROC_CLAIMED_DONE: goto out; case PROC_RETURNED: proc_drop_zombref(p); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case PROC_CLAIMED: break; default: @@ -2837,13 +2963,13 @@ proc_childrenwalk( switch (callout_ret) { case PROC_RETURNED_DONE: proc_rele(p); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case PROC_CLAIMED_DONE: goto out; case PROC_RETURNED: proc_rele(p); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case PROC_CLAIMED: break; default: @@ -2930,12 +3056,12 @@ pgrp_iterate( switch (callout_ret) { case PROC_RETURNED: proc_rele(p); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case PROC_CLAIMED: break; case PROC_RETURNED_DONE: proc_rele(p); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case PROC_CLAIMED_DONE: goto out; @@ -2956,6 +3082,7 @@ pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child) proc_list_lock(); child->p_pgrp = pgrp; child->p_pgrpid = pgrp->pg_id; + child->p_sessionid = pgrp->pg_session->s_sid; child->p_listflag |= P_LIST_INPGRP; /* * When pgrp is being freed , a process can still @@ -3077,6 +3204,7 @@ pgrp_replace(struct proc * p, struct pgrp * newpg) proc_list_lock(); p->p_pgrp = newpg; p->p_pgrpid = newpg->pg_id; + p->p_sessionid = newpg->pg_session->s_sid; p->p_listflag |= P_LIST_INPGRP; /* * When pgrp is being freed , a process can still @@ -3229,7 +3357,7 @@ session_rele(struct session *sess) } proc_list_unlock(); lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp); - FREE_ZONE(sess, sizeof(struct session), M_SESSION); + zfree(session_zone, sess); } else { proc_list_unlock(); } @@ -3381,15 +3509,7 @@ proc_pgrpid(proc_t p) pid_t proc_sessionid(proc_t p) { - pid_t sid = -1; - struct session * sessp = proc_session(p); - - if (sessp != SESSION_NULL) { - sid = sessp->s_sid; - session_rele(sessp); - } - - return sid; + return p->p_sessionid; } pid_t @@ -3606,10 +3726,6 @@ extern uint64_t vm_compressor_pages_compressed(void); struct timeval last_no_space_action = {.tv_sec = 0, .tv_usec = 0}; -#if DEVELOPMENT || DEBUG -extern boolean_t kill_on_no_paging_space; -#endif /* DEVELOPMENT || DEBUG */ - #define MB_SIZE (1024 * 1024ULL) boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t); @@ -3833,14 +3949,20 @@ proc_send_synchronous_EXC_RESOURCE(proc_t p) return FALSE; } +#if CONFIG_MACF size_t proc_get_syscall_filter_mask_size(int which) { - if (which == SYSCALL_MASK_UNIX) { + switch (which) { + case SYSCALL_MASK_UNIX: return nsysent; + case SYSCALL_MASK_MACH: + return mach_trap_count; + case SYSCALL_MASK_KOBJ: + return mach_kobj_count; + default: + return 0; } - - return 0; } int @@ -3849,18 +3971,94 @@ proc_set_syscall_filter_mask(proc_t p, int which, unsigned char *maskptr, size_t #if DEVELOPMENT || DEBUG if (syscallfilter_disable) { printf("proc_set_syscall_filter_mask: attempt to set policy for pid %d, but disabled by boot-arg\n", proc_pid(p)); - return KERN_SUCCESS; + return 0; } #endif // DEVELOPMENT || DEBUG - if (which != SYSCALL_MASK_UNIX || - (maskptr != NULL && masklen != nsysent)) { + switch (which) { + case SYSCALL_MASK_UNIX: + if (maskptr != NULL && masklen != nsysent) { + return EINVAL; + } + p->syscall_filter_mask = maskptr; + break; + case SYSCALL_MASK_MACH: + if (maskptr != NULL && masklen != (size_t)mach_trap_count) { + return EINVAL; + } + mac_task_set_mach_filter_mask(p->task, maskptr); + break; + case SYSCALL_MASK_KOBJ: + if (maskptr != NULL && masklen != (size_t)mach_kobj_count) { + return EINVAL; + } + mac_task_set_kobj_filter_mask(p->task, maskptr); + break; + default: + return EINVAL; + } + + return 0; +} + +int +proc_set_syscall_filter_callbacks(syscall_filter_cbs_t cbs) +{ + if (cbs->version != SYSCALL_FILTER_CALLBACK_VERSION) { + return EINVAL; + } + + /* XXX register unix filter callback instead of using MACF hook. */ + + if (cbs->mach_filter_cbfunc || cbs->kobj_filter_cbfunc) { + if (mac_task_register_filter_callbacks(cbs->mach_filter_cbfunc, + cbs->kobj_filter_cbfunc) != 0) { + return EPERM; + } + } + + return 0; +} + +int +proc_set_syscall_filter_index(int which, int num, int index) +{ + switch (which) { + case SYSCALL_MASK_KOBJ: + if (ipc_kobject_set_kobjidx(num, index) != 0) { + return ENOENT; + } + break; + default: + return EINVAL; + } + + return 0; +} +#endif /* CONFIG_MACF */ + +int +proc_set_filter_message_flag(proc_t p, boolean_t flag) +{ + if (p == PROC_NULL) { return EINVAL; } - p->syscall_filter_mask = maskptr; + task_set_filter_msg_flag(proc_task(p), flag); - return KERN_SUCCESS; + return 0; +} + +int +proc_get_filter_message_flag(proc_t p, boolean_t *flag) +{ + if (p == PROC_NULL || flag == NULL) { + return EINVAL; + } + + *flag = task_get_filter_msg_flag(proc_task(p)); + + return 0; } bool diff --git a/bsd/kern/kern_prot.c b/bsd/kern/kern_prot.c index 4a4a662ed..d689b70d5 100644 --- a/bsd/kern/kern_prot.c +++ b/bsd/kern/kern_prot.c @@ -2033,6 +2033,30 @@ set_security_token(proc_t p) return set_security_token_task_internal(p, p->task); } +static void +proc_calc_audit_token(proc_t p, kauth_cred_t my_cred, audit_token_t *audit_token) +{ + posix_cred_t my_pcred = posix_cred_get(my_cred); + + /* + * The current layout of the Mach audit token explicitly + * adds these fields. But nobody should rely on such + * a literal representation. Instead, the BSM library + * provides a function to convert an audit token into + * a BSM subject. Use of that mechanism will isolate + * the user of the trailer from future representation + * changes. + */ + audit_token->val[0] = my_cred->cr_audit.as_aia_p->ai_auid; + audit_token->val[1] = my_pcred->cr_uid; + audit_token->val[2] = my_pcred->cr_gid; + audit_token->val[3] = my_pcred->cr_ruid; + audit_token->val[4] = my_pcred->cr_rgid; + audit_token->val[5] = p->p_pid; + audit_token->val[6] = my_cred->cr_audit.as_aia_p->ai_asid; + audit_token->val[7] = p->p_idversion; +} + /* * Set the secrity token of the task with current euid and eguid * The function takes a proc and a task, where proc->task might point to a @@ -2042,10 +2066,9 @@ set_security_token(proc_t p) int set_security_token_task_internal(proc_t p, void *t) { + kauth_cred_t my_cred; security_token_t sec_token; audit_token_t audit_token; - kauth_cred_t my_cred; - posix_cred_t my_pcred; host_priv_t host_priv; task_t task = t; @@ -2064,7 +2087,8 @@ set_security_token_task_internal(proc_t p, void *t) } my_cred = kauth_cred_proc_ref(p); - my_pcred = posix_cred_get(my_cred); + + proc_calc_audit_token(p, my_cred, &audit_token); /* XXX mach_init doesn't have a p_ucred when it calls this function */ if (IS_VALID_CRED(my_cred)) { @@ -2075,24 +2099,6 @@ set_security_token_task_internal(proc_t p, void *t) sec_token.val[1] = 0; } - /* - * The current layout of the Mach audit token explicitly - * adds these fields. But nobody should rely on such - * a literal representation. Instead, the BSM library - * provides a function to convert an audit token into - * a BSM subject. Use of that mechanism will isolate - * the user of the trailer from future representation - * changes. - */ - audit_token.val[0] = my_cred->cr_audit.as_aia_p->ai_auid; - audit_token.val[1] = my_pcred->cr_uid; - audit_token.val[2] = my_pcred->cr_gid; - audit_token.val[3] = my_pcred->cr_ruid; - audit_token.val[4] = my_pcred->cr_rgid; - audit_token.val[5] = p->p_pid; - audit_token.val[6] = my_cred->cr_audit.as_aia_p->ai_asid; - audit_token.val[7] = p->p_idversion; - host_priv = (sec_token.val[0]) ? HOST_PRIV_NULL : host_priv_self(); #if CONFIG_MACF if (host_priv != HOST_PRIV_NULL && mac_system_check_host_priv(my_cred)) { @@ -2115,6 +2121,22 @@ set_security_token_task_internal(proc_t p, void *t) host_priv) != KERN_SUCCESS; } +void +proc_parent_audit_token(proc_t p, audit_token_t *token_out) +{ + proc_t parent; + kauth_cred_t my_cred; + + proc_list_lock(); + + parent = p->p_pptr; + my_cred = kauth_cred_proc_ref(parent); + proc_calc_audit_token(parent, my_cred, token_out); + kauth_cred_unref(&my_cred); + + proc_list_unlock(); +} + int get_audit_token_pid(audit_token_t *audit_token); diff --git a/bsd/kern/kern_resource.c b/bsd/kern/kern_resource.c index 7978cff02..662846584 100644 --- a/bsd/kern/kern_resource.c +++ b/bsd/kern/kern_resource.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -141,6 +141,11 @@ int proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zo rlim_t maxdmap = MAXDSIZ; /* XXX */ rlim_t maxsmap = MAXSSIZ - PAGE_MAX_SIZE; /* XXX */ +/* For plimit reference count */ +os_refgrp_decl(, rlimit_refgrp, "plimit_refcnt", NULL); + +ZONE_DECLARE(plimit_zone, "plimit", sizeof(struct plimit), ZC_NOENCRYPT); + /* * Limits on the number of open files per process, and the number * of child processes per process. @@ -556,7 +561,7 @@ donice(struct proc *curp, struct proc *chgp, int n) } #endif proc_lock(chgp); - chgp->p_nice = n; + chgp->p_nice = (char)n; proc_unlock(chgp); (void)resetpriority(chgp); out: @@ -647,7 +652,7 @@ proc_set_darwin_role(proc_t curp, proc_t targetp, int priority) goto out; } - integer_t role = 0; + task_role_t role = TASK_UNSPECIFIED; if ((error = proc_darwin_role_to_task_role(priority, &role))) { goto out; @@ -778,12 +783,10 @@ static void do_background_socket(struct proc *p, thread_t thread) { #if SOCKETS - struct filedesc *fdp; - struct fileproc *fp; - int i = 0; - int background = false; + struct fileproc *fp; + int background = false; #if NECP - int update_necp = false; + int update_necp = false; #endif /* NECP */ proc_fdlock(p); @@ -801,20 +804,14 @@ do_background_socket(struct proc *p, thread_t thread) * to do here for the PRIO_DARWIN_THREAD case. */ if (thread == THREAD_NULL) { - fdp = p->p_fd; - - for (i = 0; i < fdp->fd_nfiles; i++) { - fp = fdp->fd_ofiles[i]; - if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { - continue; - } - if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) { - struct socket *sockp = (struct socket *)fp->f_fglob->fg_data; + fdt_foreach(fp, p) { + if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) { + struct socket *sockp = (struct socket *)fp->fp_glob->fg_data; socket_set_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND); sockp->so_background_thread = NULL; } #if NECP - else if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_NETPOLICY) { + else if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_NETPOLICY) { if (necp_set_client_as_background(p, fp, background)) { update_necp = true; } @@ -828,16 +825,11 @@ do_background_socket(struct proc *p, thread_t thread) * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for * sockets created by other threads within this process. */ - fdp = p->p_fd; - for (i = 0; i < fdp->fd_nfiles; i++) { - struct socket *sockp; + fdt_foreach(fp, p) { + struct socket *sockp; - fp = fdp->fd_ofiles[i]; - if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { - continue; - } - if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) { - sockp = (struct socket *)fp->f_fglob->fg_data; + if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) { + sockp = (struct socket *)fp->fp_glob->fg_data; /* skip if only clearing this thread's sockets */ if ((thread) && (sockp->so_background_thread != thread)) { continue; @@ -846,7 +838,7 @@ do_background_socket(struct proc *p, thread_t thread) sockp->so_background_thread = NULL; } #if NECP - else if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_NETPOLICY) { + else if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_NETPOLICY) { if (necp_set_client_as_background(p, fp, background)) { update_necp = true; } @@ -935,7 +927,6 @@ setrlimit(struct proc *p, struct setrlimit_args *uap, __unused int32_t *retval) /* * Returns: 0 Success * EINVAL - * ENOMEM Cannot copy limit structure * suser:EPERM * * Notes: EINVAL is returned both for invalid arguments, and in the @@ -943,62 +934,75 @@ setrlimit(struct proc *p, struct setrlimit_args *uap, __unused int32_t *retval) * in excess of the requested limit. */ int -dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) +dosetrlimit(struct proc *p, u_int which, struct rlimit *newrlim) { - struct rlimit *alimp; - int error; - kern_return_t kr; - int posix = (which & _RLIMIT_POSIX_FLAG) ? 1 : 0; + struct rlimit rlim; + int error; + kern_return_t kr; + int posix = (which & _RLIMIT_POSIX_FLAG) ? 1 : 0; /* Mask out POSIX flag, saved above */ which &= ~_RLIMIT_POSIX_FLAG; + /* Unknown resource */ if (which >= RLIM_NLIMITS) { return EINVAL; } - alimp = &p->p_rlimit[which]; - if (limp->rlim_cur > limp->rlim_max) { - return EINVAL; - } + /* + * Take a snapshot of the current rlimit values and read this throughout + * this routine. This minimizes the critical sections and allow other + * processes in the system to access the plimit while we are in the + * middle of this setrlimit call. + */ + proc_lock(p); + rlim = p->p_limit->pl_rlimit[which]; + proc_unlock(p); - if (limp->rlim_cur > alimp->rlim_max || - limp->rlim_max > alimp->rlim_max) { - if ((error = suser(kauth_cred_get(), &p->p_acflag))) { - return error; - } + error = 0; + /* Sanity check: new soft limit cannot exceed new hard limit */ + if (newrlim->rlim_cur > newrlim->rlim_max) { + error = EINVAL; + } + /* + * Sanity check: only super-user may raise the hard limit. + * newrlim->rlim_cur > rlim.rlim_max implies that the call is increasing the hard limit as well. + */ + else if (newrlim->rlim_cur > rlim.rlim_max || newrlim->rlim_max > rlim.rlim_max) { + /* suser() returns 0 if the calling thread is super user. */ + error = suser(kauth_cred_get(), &p->p_acflag); } - proc_limitblock(p); - - if ((error = proc_limitreplace(p)) != 0) { - proc_limitunblock(p); + if (error) { + /* Invalid setrlimit request: EINVAL or EPERM */ return error; } - alimp = &p->p_rlimit[which]; + /* Only one thread is able to change the current process's rlimit values */ + proc_lock(p); + proc_limitblock(p); + proc_unlock(p); + /* We have the reader lock of the process's plimit so it's safe to read the rlimit values */ switch (which) { case RLIMIT_CPU: - if (limp->rlim_cur == RLIM_INFINITY) { + if (newrlim->rlim_cur == RLIM_INFINITY) { task_vtimer_clear(p->task, TASK_VTIMER_RLIM); timerclear(&p->p_rlim_cpu); } else { task_absolutetime_info_data_t tinfo; - mach_msg_type_number_t count; - struct timeval ttv, tv; - clock_sec_t tv_sec; - clock_usec_t tv_usec; + mach_msg_type_number_t count; + struct timeval ttv, tv; + clock_sec_t tv_sec; + clock_usec_t tv_usec; count = TASK_ABSOLUTETIME_INFO_COUNT; - task_info(p->task, TASK_ABSOLUTETIME_INFO, - (task_info_t)&tinfo, &count); - absolutetime_to_microtime(tinfo.total_user + tinfo.total_system, - &tv_sec, &tv_usec); + task_info(p->task, TASK_ABSOLUTETIME_INFO, (task_info_t)&tinfo, &count); + absolutetime_to_microtime(tinfo.total_user + tinfo.total_system, &tv_sec, &tv_usec); ttv.tv_sec = tv_sec; ttv.tv_usec = tv_usec; - tv.tv_sec = (limp->rlim_cur > __INT_MAX__ ? __INT_MAX__ : limp->rlim_cur); + tv.tv_sec = (newrlim->rlim_cur > __INT_MAX__ ? __INT_MAX__ : (__darwin_time_t)newrlim->rlim_cur); tv.tv_usec = 0; timersub(&tv, &ttv, &p->p_rlim_cpu); @@ -1016,11 +1020,11 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) break; case RLIMIT_DATA: - if (limp->rlim_cur > maxdmap) { - limp->rlim_cur = maxdmap; + if (newrlim->rlim_cur > maxdmap) { + newrlim->rlim_cur = maxdmap; } - if (limp->rlim_max > maxdmap) { - limp->rlim_max = maxdmap; + if (newrlim->rlim_max > maxdmap) { + newrlim->rlim_max = maxdmap; } break; @@ -1032,8 +1036,8 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) } /* Disallow illegal stack size instead of clipping */ - if (limp->rlim_cur > maxsmap || - limp->rlim_max > maxsmap) { + if (newrlim->rlim_cur > maxsmap || + newrlim->rlim_max > maxsmap) { if (posix) { error = EINVAL; goto out; @@ -1043,11 +1047,11 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) * doing previous implementation (< 10.5) when caller * is non-POSIX conforming. */ - if (limp->rlim_cur > maxsmap) { - limp->rlim_cur = maxsmap; + if (newrlim->rlim_cur > maxsmap) { + newrlim->rlim_cur = maxsmap; } - if (limp->rlim_max > maxsmap) { - limp->rlim_max = maxsmap; + if (newrlim->rlim_max > maxsmap) { + newrlim->rlim_max = maxsmap; } } } @@ -1057,26 +1061,24 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) * "rlim_cur" bytes accessible. If stack limit is going * up make more accessible, if going down make inaccessible. */ - if (limp->rlim_cur > alimp->rlim_cur) { - user_addr_t addr; - user_size_t size; + if (newrlim->rlim_cur > rlim.rlim_cur) { + mach_vm_offset_t addr; + mach_vm_size_t size; /* grow stack */ - size = round_page_64(limp->rlim_cur); - size -= round_page_64(alimp->rlim_cur); + size = round_page_64(newrlim->rlim_cur); + size -= round_page_64(rlim.rlim_cur); - addr = p->user_stack - round_page_64(limp->rlim_cur); - kr = mach_vm_protect(current_map(), - addr, size, - FALSE, VM_PROT_DEFAULT); + addr = (mach_vm_offset_t)(p->user_stack - round_page_64(newrlim->rlim_cur)); + kr = mach_vm_protect(current_map(), addr, size, FALSE, VM_PROT_DEFAULT); if (kr != KERN_SUCCESS) { error = EINVAL; goto out; } - } else if (limp->rlim_cur < alimp->rlim_cur) { - user_addr_t addr; - user_size_t size; - user_addr_t cur_sp; + } else if (newrlim->rlim_cur < rlim.rlim_cur) { + mach_vm_offset_t addr; + mach_vm_size_t size; + uint64_t cur_sp; /* shrink stack */ @@ -1085,17 +1087,13 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) * with current stack usage. * Get the current thread's stack pointer... */ - cur_sp = thread_adjuserstack(current_thread(), - 0); + cur_sp = thread_adjuserstack(current_thread(), 0); if (cur_sp <= p->user_stack && - cur_sp > (p->user_stack - - round_page_64(alimp->rlim_cur))) { + cur_sp > (p->user_stack - round_page_64(rlim.rlim_cur))) { /* stack pointer is in main stack */ - if (cur_sp <= (p->user_stack - - round_page_64(limp->rlim_cur))) { + if (cur_sp <= (p->user_stack - round_page_64(newrlim->rlim_cur))) { /* - * New limit would cause - * current usage to be invalid: + * New limit would cause current usage to be invalid: * reject new limit. */ error = EINVAL; @@ -1107,14 +1105,12 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) goto out; } - size = round_page_64(alimp->rlim_cur); - size -= round_page_64(limp->rlim_cur); + size = round_page_64(rlim.rlim_cur); + size -= round_page_64(rlim.rlim_cur); - addr = p->user_stack - round_page_64(alimp->rlim_cur); + addr = (mach_vm_offset_t)(p->user_stack - round_page_64(rlim.rlim_cur)); - kr = mach_vm_protect(current_map(), - addr, size, - FALSE, VM_PROT_NONE); + kr = mach_vm_protect(current_map(), addr, size, FALSE, VM_PROT_NONE); if (kr != KERN_SUCCESS) { error = EINVAL; goto out; @@ -1126,39 +1122,9 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) case RLIMIT_NOFILE: /* - * Only root can set the maxfiles limits, as it is - * systemwide resource. If we are expecting POSIX behavior, - * instead of clamping the value, return EINVAL. We do this - * because historically, people have been able to attempt to - * set RLIM_INFINITY to get "whatever the maximum is". + * Nothing to be done here as we already performed the sanity checks before entering the switch code block. + * The real NOFILE limits enforced by the kernel is capped at MIN(RLIMIT_NOFILE, maxfilesperproc) */ - if (kauth_cred_issuser(kauth_cred_get())) { - if (limp->rlim_cur != alimp->rlim_cur && - limp->rlim_cur > (rlim_t)maxfiles) { - if (posix) { - error = EINVAL; - goto out; - } - limp->rlim_cur = maxfiles; - } - if (limp->rlim_max != alimp->rlim_max && - limp->rlim_max > (rlim_t)maxfiles) { - limp->rlim_max = maxfiles; - } - } else { - if (limp->rlim_cur != alimp->rlim_cur && - limp->rlim_cur > (rlim_t)maxfilesperproc) { - if (posix) { - error = EINVAL; - goto out; - } - limp->rlim_cur = maxfilesperproc; - } - if (limp->rlim_max != alimp->rlim_max && - limp->rlim_max > (rlim_t)maxfilesperproc) { - limp->rlim_max = maxfilesperproc; - } - } break; case RLIMIT_NPROC: @@ -1168,18 +1134,18 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) * maxprocperuid (presumably less than maxproc). */ if (kauth_cred_issuser(kauth_cred_get())) { - if (limp->rlim_cur > (rlim_t)maxproc) { - limp->rlim_cur = maxproc; + if (newrlim->rlim_cur > (rlim_t)maxproc) { + newrlim->rlim_cur = maxproc; } - if (limp->rlim_max > (rlim_t)maxproc) { - limp->rlim_max = maxproc; + if (newrlim->rlim_max > (rlim_t)maxproc) { + newrlim->rlim_max = maxproc; } } else { - if (limp->rlim_cur > (rlim_t)maxprocperuid) { - limp->rlim_cur = maxprocperuid; + if (newrlim->rlim_cur > (rlim_t)maxprocperuid) { + newrlim->rlim_cur = maxprocperuid; } - if (limp->rlim_max > (rlim_t)maxprocperuid) { - limp->rlim_max = maxprocperuid; + if (newrlim->rlim_max > (rlim_t)maxprocperuid) { + newrlim->rlim_max = maxprocperuid; } } break; @@ -1188,16 +1154,35 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) /* * Tell the Mach VM layer about the new limit value. */ - - vm_map_set_user_wire_limit(current_map(), limp->rlim_cur); + newrlim->rlim_cur = (vm_size_t)newrlim->rlim_cur; + vm_map_set_user_wire_limit(current_map(), (vm_size_t)newrlim->rlim_cur); break; } /* switch... */ - proc_lock(p); - *alimp = *limp; - proc_unlock(p); + + /* Everything checks out and we are now ready to update the rlimit */ error = 0; + out: - proc_limitunblock(p); + + if (error == 0) { + /* + * COW the current plimit if it's shared, otherwise update it in place. + * Finally unblock other threads wishing to change plimit. + */ + proc_lock(p); + proc_limitupdate(p, newrlim, (uint8_t)which); + proc_limitunblock(p); + proc_unlock(p); + } else { + /* + * This setrlimit has failed, just leave the plimit as is and unblock other + * threads wishing to change plimit. + */ + proc_lock(p); + proc_limitunblock(p); + proc_unlock(p); + } + return error; } @@ -1284,7 +1269,7 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *i p->p_stats->p_ru.ru_nivcsw = 0; } - p->p_stats->p_ru.ru_maxrss = tinfo.resident_size_max; + p->p_stats->p_ru.ru_maxrss = (long)tinfo.resident_size_max; } } @@ -1382,110 +1367,237 @@ update_rusage_info_child(struct rusage_info_child *ri, rusage_info_current *ri_c ri_current->ri_proc_start_abstime) + ri_current->ri_child_elapsed_abstime); } -void -proc_limitget(proc_t p, int which, struct rlimit * limp) +/* + * Reading soft limit from specified resource. + */ +rlim_t +proc_limitgetcur(proc_t p, int which, boolean_t to_lock_proc) { - proc_list_lock(); - limp->rlim_cur = p->p_rlimit[which].rlim_cur; - limp->rlim_max = p->p_rlimit[which].rlim_max; - proc_list_unlock(); -} + rlim_t rlim_cur; + assert(p); + assert(which < RLIM_NLIMITS); + /* + * Serialize access to the process's plimit pointer for concurrent threads. + */ + if (to_lock_proc) { + lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED); + proc_lock(p); + } + + rlim_cur = p->p_limit->pl_rlimit[which].rlim_cur; + + if (to_lock_proc) { + proc_unlock(p); + } + + return rlim_cur; +} + +/* + * Writing soft limit to specified resource. This is an internal function + * used only by proc_exit and vfork_exit_internal to update RLIMIT_FSIZE in + * place without invoking setrlimit. + */ void -proc_limitdrop(proc_t p, int exiting) +proc_limitsetcur_internal(proc_t p, int which, rlim_t value) { - struct plimit * freelim = NULL; - struct plimit * freeoldlim = NULL; + struct rlimit rlim; - proc_list_lock(); + assert(p); + assertf(which == RLIMIT_FSIZE, "%s only supports RLIMIT_FSIZE\n", __FUNCTION__); - if (--p->p_limit->pl_refcnt == 0) { - freelim = p->p_limit; - p->p_limit = NULL; - } - if ((exiting != 0) && (p->p_olimit != NULL) && (--p->p_olimit->pl_refcnt == 0)) { - freeoldlim = p->p_olimit; - p->p_olimit = NULL; - } - proc_list_unlock(); - if (freelim != NULL) { - FREE_ZONE(freelim, sizeof *p->p_limit, M_PLIMIT); - } - if (freeoldlim != NULL) { - FREE_ZONE(freeoldlim, sizeof *p->p_olimit, M_PLIMIT); - } + proc_lock(p); + + /* Only one thread is able to change rlimit values at a time */ + proc_limitblock(p); + + /* Prepare an rlimit for proc_limitupdate */ + rlim = p->p_limit->pl_rlimit[which]; + rlim.rlim_cur = value; + + /* + * proc_limitupdate will COW the current plimit and update specified the soft limit + * if the plimit is shared, otherwise it will update the soft limit in place. + */ + proc_limitupdate(p, &rlim, (uint8_t)which); + + /* Unblock other threads wishing to change plimit */ + proc_limitunblock(p); + + proc_unlock(p); } +void +proc_limitget(proc_t p, int which, struct rlimit * limp) +{ + assert(p); + assert(limp); + assert(which < RLIM_NLIMITS); + + /* Protect writes to the process's plimit pointer issued by concurrent threads */ + proc_lock(p); + + limp->rlim_cur = p->p_limit->pl_rlimit[which].rlim_cur; + limp->rlim_max = p->p_limit->pl_rlimit[which].rlim_max; + + proc_unlock(p); +} void proc_limitfork(proc_t parent, proc_t child) { - proc_list_lock(); + assert(parent && child); + + proc_lock(parent); + + /* Child proc inherits parent's plimit */ child->p_limit = parent->p_limit; - child->p_limit->pl_refcnt++; - child->p_olimit = NULL; - proc_list_unlock(); + + /* Increment refcnt of the shared plimit */ + os_ref_retain(&parent->p_limit->pl_refcnt); + + proc_unlock(parent); } void -proc_limitblock(proc_t p) +proc_limitdrop(proc_t p) { + struct plimit *free_plim = NULL; + os_ref_count_t refcnt; + proc_lock(p); + + /* Drop the plimit reference before exiting the system */ + refcnt = os_ref_release(&p->p_limit->pl_refcnt); + if (refcnt == 0) { + free_plim = p->p_limit; + } + + p->p_limit = NULL; + proc_unlock(p); + + /* We are the last user of this plimit, free it now. */ + if (free_plim != NULL) { + zfree(plimit_zone, free_plim); + } +} + +/* + * proc_limitblock/unblock are used to serialize access to plimit + * from concurrent threads within the same process. + * Callers must be holding the proc lock to enter, return with + * the proc lock locked + */ +void +proc_limitblock(proc_t p) +{ + lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED); + while (p->p_lflag & P_LLIMCHANGE) { p->p_lflag |= P_LLIMWAIT; - msleep(&p->p_olimit, &p->p_mlock, 0, "proc_limitblock", NULL); + msleep(&p->p_limit, &p->p_mlock, 0, "proc_limitblock", NULL); } p->p_lflag |= P_LLIMCHANGE; - proc_unlock(p); } - +/* + * Callers must be holding the proc lock to enter, return with + * the proc lock locked + */ void proc_limitunblock(proc_t p) { - proc_lock(p); + lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED); + p->p_lflag &= ~P_LLIMCHANGE; if (p->p_lflag & P_LLIMWAIT) { p->p_lflag &= ~P_LLIMWAIT; - wakeup(&p->p_olimit); + wakeup(&p->p_limit); } - proc_unlock(p); } -/* This is called behind serialization provided by proc_limitblock/unlbock */ -int -proc_limitreplace(proc_t p) +/* + * Change the rlimit values of process "p" to "rlim" for resource "which". + * + * If the current plimit is shared by multiple processes (refcnt > 1): + * this routine replaces the process's original plimit with a new plimit, + * update the requeted rlimit values, and free the original plimit if this + * process is the last user. + * + * If the current plimit is used only by the calling process (refcnt == 1): + * this routine updates the new rlimit values in place. + * + * Note: caller must be holding the proc lock before entering this routine. + * This routine allocates and frees kernel memory without holding the proc lock + * to minimize contention, and returns with the proc lock held. + */ +void +proc_limitupdate(proc_t p, struct rlimit *rlim, uint8_t which) { - struct plimit *copy; - + struct plimit *copy_plim; + struct plimit *free_plim; + os_ref_count_t refcnt; - proc_list_lock(); + assert(p && p->p_limit); + assert(rlim); + assert(which < RLIM_NLIMITS); + lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_OWNED); - if (p->p_limit->pl_refcnt == 1) { - proc_list_unlock(); - return 0; + /* + * If we are the only user of this plimit, don't bother allocating a plimit + * before making changes. Just modify the rlimit values in place. + */ + refcnt = os_ref_get_count(&p->p_limit->pl_refcnt); + if (refcnt == 1) { + p->p_limit->pl_rlimit[which] = *rlim; + return; } - proc_list_unlock(); + /* + * Allocating a new plimit for this process to apply the requested rlimit values. + * Not holding the lock on the original plimit gives other processes in the system + * a chance to access the plimit while we wait for memory below. + * + * The default zalloc should always succeed when WAIT flag. + */ + proc_unlock(p); + copy_plim = zalloc(plimit_zone); + + /* Copy the current p_limit */ + proc_lock(p); + bcopy(p->p_limit->pl_rlimit, copy_plim->pl_rlimit, sizeof(struct rlimit) * RLIM_NLIMITS); - MALLOC_ZONE(copy, struct plimit *, - sizeof(struct plimit), M_PLIMIT, M_WAITOK); - if (copy == NULL) { - return ENOMEM; + /* + * Drop our reference to the old plimit. Other processes sharing the old plimit could + * have exited the system when we wait for memory for the new plimit above, thus, we + * need to check the refcnt again and free the old plimit if this process is the last + * user. Also since we are holding the proc lock here, it's impossible for another threads + * to dereference the plimit, so it's safe to free the old plimit memory. + */ + free_plim = NULL; + refcnt = os_ref_release(&p->p_limit->pl_refcnt); + if (refcnt == 0) { + free_plim = p->p_limit; } + /* Initialize the newly allocated plimit */ + os_ref_init_count(©_plim->pl_refcnt, &rlimit_refgrp, 1); - proc_list_lock(); - bcopy(p->p_limit->pl_rlimit, copy->pl_rlimit, - sizeof(struct rlimit) * RLIM_NLIMITS); - copy->pl_refcnt = 1; - /* hang on to reference to old till process exits */ - p->p_olimit = p->p_limit; - p->p_limit = copy; - proc_list_unlock(); + /* Apply new rlimit values */ + copy_plim->pl_rlimit[which] = *rlim; - return 0; + /* All set, update the process's plimit pointer to the new plimit. */ + p->p_limit = copy_plim; + proc_unlock(p); + + if (free_plim != NULL) { + zfree(plimit_zone, free_plim); + } + + /* Return with proc->p_mlock locked */ + proc_lock(p); } static int @@ -1498,6 +1610,10 @@ static int iopolicysys_vfs_materialize_dataless_files(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); static int iopolicysys_vfs_statfs_no_data_volume(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); +static int +iopolicysys_vfs_trigger_resolve(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); +static int +iopolicysys_vfs_ignore_content_protection(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); /* * iopolicysys @@ -1555,6 +1671,19 @@ iopolicysys(struct proc *p, struct iopolicysys_args *uap, int32_t *retval) if (error) { goto out; } + break; + case IOPOL_TYPE_VFS_TRIGGER_RESOLVE: + error = iopolicysys_vfs_trigger_resolve(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); + if (error) { + goto out; + } + break; + case IOPOL_TYPE_VFS_IGNORE_CONTENT_PROTECTION: + error = iopolicysys_vfs_ignore_content_protection(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); + if (error) { + goto out; + } + break; default: error = EINVAL; goto out; @@ -1609,7 +1738,7 @@ iopolicysys_disk(struct proc *p __unused, int cmd, int scope, int policy, struct error = EIDRM; break; } - /* otherwise, fall through to the error case. */ + OS_FALLTHROUGH; default: error = EINVAL; goto out; @@ -1618,15 +1747,15 @@ iopolicysys_disk(struct proc *p __unused, int cmd, int scope, int policy, struct break; case IOPOL_SCOPE_DARWIN_BG: -#if CONFIG_EMBEDDED - /* Embedded doesn't want this as BG is always IOPOL_THROTTLE */ +#if !defined(XNU_TARGET_OS_OSX) + /* We don't want this on platforms outside of macOS as BG is always IOPOL_THROTTLE */ error = ENOTSUP; goto out; -#else /* CONFIG_EMBEDDED */ +#else /* !defined(XNU_TARGET_OS_OSX) */ thread = THREAD_NULL; policy_flavor = TASK_POLICY_DARWIN_BG_IOPOL; break; -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ default: error = EINVAL; @@ -2030,6 +2159,136 @@ out: return error; } +static int +iopolicysys_vfs_trigger_resolve(struct proc *p __unused, int cmd, + int scope, int policy, struct _iopol_param_t *iop_param) +{ + int error = 0; + + /* Validate scope */ + switch (scope) { + case IOPOL_SCOPE_PROCESS: + /* Only process OK */ + break; + default: + error = EINVAL; + goto out; + } + + /* Validate policy */ + if (cmd == IOPOL_CMD_SET) { + switch (policy) { + case IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT: + /* fall-through */ + case IOPOL_VFS_TRIGGER_RESOLVE_OFF: + /* These policies are OK */ + break; + default: + error = EINVAL; + goto out; + } + } + + /* Perform command */ + switch (cmd) { + case IOPOL_CMD_SET: + switch (policy) { + case IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT: + OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE), &p->p_vfs_iopolicy); + break; + case IOPOL_VFS_TRIGGER_RESOLVE_OFF: + OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE, &p->p_vfs_iopolicy); + break; + default: + error = EINVAL; + goto out; + } + + break; + case IOPOL_CMD_GET: + iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE) + ? IOPOL_VFS_TRIGGER_RESOLVE_OFF + : IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT; + break; + default: + error = EINVAL; /* unknown command */ + break; + } + +out: + return error; +} + +static int +iopolicysys_vfs_ignore_content_protection(struct proc *p, int cmd, int scope, + int policy, struct _iopol_param_t *iop_param) +{ + int error = 0; + + /* Validate scope */ + switch (scope) { + case IOPOL_SCOPE_PROCESS: + /* Only process OK */ + break; + default: + error = EINVAL; + goto out; + } + + /* Validate policy */ + if (cmd == IOPOL_CMD_SET) { + switch (policy) { + case IOPOL_VFS_CONTENT_PROTECTION_DEFAULT: + OS_FALLTHROUGH; + case IOPOL_VFS_CONTENT_PROTECTION_IGNORE: + /* These policies are OK */ + break; + default: + error = EINVAL; + goto out; + } + } + + /* Perform command */ + switch (cmd) { + case IOPOL_CMD_SET: + if (0 == kauth_cred_issuser(kauth_cred_get())) { + /* If it's a non-root process, it needs to have the entitlement to set the policy */ + boolean_t entitled = FALSE; + entitled = IOTaskHasEntitlement(current_task(), "com.apple.private.iopol.case_sensitivity"); + if (!entitled) { + error = EPERM; + goto out; + } + } + + switch (policy) { + case IOPOL_VFS_CONTENT_PROTECTION_DEFAULT: + os_atomic_andnot(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION, relaxed); + break; + case IOPOL_VFS_CONTENT_PROTECTION_IGNORE: + os_atomic_or(&p->p_vfs_iopolicy, P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION, relaxed); + break; + default: + error = EINVAL; + goto out; + } + + break; + case IOPOL_CMD_GET: + iop_param->iop_policy = (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION) + ? IOPOL_VFS_CONTENT_PROTECTION_IGNORE + : IOPOL_VFS_CONTENT_PROTECTION_DEFAULT; + break; + default: + error = EINVAL; /* unknown command */ + break; + } + +out: + return error; +} + /* BSD call back function for task_policy networking changes */ void proc_apply_task_networkbg(void * bsd_info, thread_t thread) @@ -2056,6 +2315,13 @@ gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor) assert(p->p_stats != NULL); memset(ru, 0, sizeof(*ru)); switch (flavor) { + case RUSAGE_INFO_V5: +#if !XNU_TARGET_OS_OSX && __has_feature(ptrauth_calls) + if (vm_shared_region_is_reslide(p->task)) { + ru->ri_flags |= RU_PROC_RUNS_RESLIDE; + } +#endif /* !XNU_TARGET_OS_OSX && __has_feature(ptrauth_calls) */ + OS_FALLTHROUGH; case RUSAGE_INFO_V4: ru->ri_logical_writes = get_task_logical_writes(p->task, FALSE); ru->ri_lifetime_max_phys_footprint = get_task_phys_footprint_lifetime_max(p->task); @@ -2063,16 +2329,16 @@ gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor) ru->ri_interval_max_phys_footprint = get_task_phys_footprint_interval_max(p->task, FALSE); #endif fill_task_monotonic_rusage(p->task, ru); - /* fall through */ + OS_FALLTHROUGH; case RUSAGE_INFO_V3: fill_task_qos_rusage(p->task, ru); fill_task_billed_usage(p->task, ru); - /* fall through */ + OS_FALLTHROUGH; case RUSAGE_INFO_V2: fill_task_io_rusage(p->task, ru); - /* fall through */ + OS_FALLTHROUGH; case RUSAGE_INFO_V1: /* @@ -2089,7 +2355,7 @@ gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor) ru->ri_child_elapsed_abstime = ri_child->ri_child_elapsed_abstime; proc_unlock(p); - /* fall through */ + OS_FALLTHROUGH; case RUSAGE_INFO_V0: proc_getexecutableuuid(p, (unsigned char *)&ru->ri_uuid, sizeof(ru->ri_uuid)); @@ -2127,6 +2393,9 @@ proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie size = sizeof(struct rusage_info_v4); break; + case RUSAGE_INFO_V5: + size = sizeof(struct rusage_info_v5); + break; default: return EINVAL; } @@ -2229,7 +2498,7 @@ proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *ua error = copyout(&wakeupmon_args, uap->arg, sizeof(wakeupmon_args)); break; case RLIMIT_CPU_USAGE_MONITOR: - cpumon_flags = uap->arg; // XXX temporarily stashing flags in argp (12592127) + cpumon_flags = (uint32_t)uap->arg; // XXX temporarily stashing flags in argp (12592127) error = mach_to_bsd_rv(task_cpu_usage_monitor_ctl(targetp->task, &cpumon_flags)); break; case RLIMIT_THREAD_CPULIMITS: @@ -2258,7 +2527,7 @@ proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *ua #if CONFIG_LEDGER_INTERVAL_MAX case RLIMIT_FOOTPRINT_INTERVAL: - footprint_interval_flags = uap->arg; // XXX temporarily stashing flags in argp (12592127) + footprint_interval_flags = (uint32_t)uap->arg; // XXX temporarily stashing flags in argp (12592127) /* * There is currently only one option for this flavor. */ diff --git a/bsd/kern/kern_shutdown.c b/bsd/kern/kern_shutdown.c index 1e0027f95..3bd774db5 100644 --- a/bsd/kern/kern_shutdown.c +++ b/bsd/kern/kern_shutdown.c @@ -42,8 +42,6 @@ #include #include #include -#include -#include #include #include #include @@ -74,8 +72,10 @@ uint32_t system_inshutdown = 0; +#if XNU_TARGET_OS_OSX /* XXX should be in a header file somewhere, but isn't */ extern void (*unmountroot_pre_hook)(void); +#endif unsigned int proc_shutdown_exitcount = 0; @@ -226,9 +226,11 @@ reboot_kernel(int howto, char *message) halt_log_enter("audit_shutdown", 0, mach_absolute_time() - startTime); #endif +#if XNU_TARGET_OS_OSX if (unmountroot_pre_hook != NULL) { unmountroot_pre_hook(); } +#endif startTime = mach_absolute_time(); sync((proc_t)NULL, (void *)NULL, (int *)NULL); @@ -392,7 +394,7 @@ sd_callback1(proc_t p, void * args) int countproc = sd->countproc; proc_lock(p); - p->p_shutdownstate = setsdstate; + p->p_shutdownstate = (char)setsdstate; if (p->p_stat != SZOMB) { proc_unlock(p); if (countproc != 0) { @@ -440,7 +442,7 @@ sd_callback2(proc_t p, void * args) int countproc = sd->countproc; proc_lock(p); - p->p_shutdownstate = setsdstate; + p->p_shutdownstate = (char)setsdstate; if (p->p_stat != SZOMB) { proc_unlock(p); if (countproc != 0) { @@ -469,7 +471,7 @@ sd_callback3(proc_t p, void * args) int setsdstate = sd->setsdstate; proc_lock(p); - p->p_shutdownstate = setsdstate; + p->p_shutdownstate = (char)setsdstate; if (p->p_stat != SZOMB) { /* * NOTE: following code ignores sig_lock and plays diff --git a/bsd/kern/kern_sig.c b/bsd/kern/kern_sig.c index dc26af6f3..3c9cb1fee 100644 --- a/bsd/kern/kern_sig.c +++ b/bsd/kern/kern_sig.c @@ -131,6 +131,7 @@ extern kern_return_t get_signalact(task_t, thread_t *, int); extern unsigned int get_useraddr(void); extern boolean_t task_did_exec(task_t task); extern boolean_t task_is_exec_copy(task_t task); +extern void vm_shared_region_reslide_stale(void); /* * --- @@ -233,8 +234,8 @@ static void sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out) { out->ss_flags = in->ss_flags; - out->ss_size = in->ss_size; - out->ss_sp = in->ss_sp; + out->ss_size = (user_size_t)in->ss_size; + out->ss_sp = (user_addr_t)in->ss_sp; } static void @@ -271,8 +272,8 @@ __sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigactio static void __sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigaction *out) { - out->__sigaction_u.__sa_handler = in->__sigaction_u.__sa_handler; - out->sa_tramp = in->sa_tramp; + out->__sigaction_u.__sa_handler = (user_addr_t)in->__sigaction_u.__sa_handler; + out->sa_tramp = (user_addr_t)in->sa_tramp; out->sa_mask = in->sa_mask; out->sa_flags = in->sa_flags; @@ -808,7 +809,7 @@ execsigs(proc_t p, thread_t thread) * and are now ignored by default). */ while (p->p_sigcatch) { - nc = ffs((long)p->p_sigcatch); + nc = ffs((unsigned int)p->p_sigcatch); mask = sigmask(nc); p->p_sigcatch &= ~mask; if (sigprop[nc] & SA_IGNORE) { @@ -1103,8 +1104,8 @@ __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_arg if (IS_64BIT_PROCESS(p)) { struct user64_timespec ts64; error = copyin(uap->ts, &ts64, sizeof(ts64)); - ts.tv_sec = ts64.tv_sec; - ts.tv_nsec = ts64.tv_nsec; + ts.tv_sec = (user_time_t)ts64.tv_sec; + ts.tv_nsec = (user_long_t)ts64.tv_nsec; } else { struct user32_timespec ts32; error = copyin(uap->ts, &ts32, sizeof(ts32)); @@ -1123,8 +1124,8 @@ __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_arg } if (uap->relative) { - then.tv_sec = ts.tv_sec; - then.tv_nsec = ts.tv_nsec; + then.tv_sec = (unsigned int)ts.tv_sec; + then.tv_nsec = (clock_res_t)ts.tv_nsec; } else { nanotime(&now); @@ -1135,8 +1136,8 @@ __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_arg then.tv_sec = 0; then.tv_nsec = 0; } else { - then.tv_sec = ts.tv_sec - now.tv_sec; - then.tv_nsec = ts.tv_nsec - now.tv_nsec; + then.tv_sec = (unsigned int)(ts.tv_sec - now.tv_sec); + then.tv_nsec = (clock_res_t)(ts.tv_nsec - now.tv_nsec); if (then.tv_nsec < 0) { then.tv_nsec += NSEC_PER_SEC; then.tv_sec--; @@ -1197,7 +1198,7 @@ __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_ar boolean_t truncated_timeout = FALSE; if (uap->timeout) { - ts.tv_sec = uap->tv_sec; + ts.tv_sec = (user_time_t)uap->tv_sec; ts.tv_nsec = uap->tv_nsec; if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) { @@ -1207,8 +1208,8 @@ __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_ar } if (uap->relative) { - then.tv_sec = ts.tv_sec; - then.tv_nsec = ts.tv_nsec; + then.tv_sec = (unsigned int)ts.tv_sec; + then.tv_nsec = (clock_res_t)ts.tv_nsec; } else { nanotime(&now); @@ -1219,8 +1220,8 @@ __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_ar then.tv_sec = 0; then.tv_nsec = 0; } else { - then.tv_sec = ts.tv_sec - now.tv_sec; - then.tv_nsec = ts.tv_nsec - now.tv_nsec; + then.tv_sec = (unsigned int)(ts.tv_sec - now.tv_sec); + then.tv_nsec = (clock_res_t)(ts.tv_nsec - now.tv_nsec); if (then.tv_nsec < 0) { then.tv_nsec += NSEC_PER_SEC; then.tv_sec--; @@ -1629,7 +1630,8 @@ build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, use } if (reason_string != USER_ADDR_NULL) { - reason_user_desc = (char *) kalloc(EXIT_REASON_USER_DESC_MAX_LEN); + reason_user_desc = kheap_alloc(KHEAP_TEMP, + EXIT_REASON_USER_DESC_MAX_LEN, Z_WAITOK); if (reason_user_desc != NULL) { error = copyinstr(reason_string, (void *) reason_user_desc, @@ -1644,7 +1646,8 @@ build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, use user_data_to_copy += reason_user_desc_len; } else { exit_reason->osr_flags |= OS_REASON_FLAG_FAILED_DATA_COPYIN; - kfree(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN); + kheap_free(KHEAP_TEMP, reason_user_desc, + EXIT_REASON_USER_DESC_MAX_LEN); reason_user_desc = NULL; reason_user_desc_len = 0; } @@ -1666,10 +1669,10 @@ build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, use if (reason_user_desc != NULL && reason_user_desc_len != 0) { if (KERN_SUCCESS == kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor, EXIT_REASON_USER_DESC, - reason_user_desc_len, + (uint32_t)reason_user_desc_len, &data_addr)) { kcdata_memcpy(&exit_reason->osr_kcd_descriptor, (mach_vm_address_t) data_addr, - reason_user_desc, reason_user_desc_len); + reason_user_desc, (uint32_t)reason_user_desc_len); } else { printf("build_userspace_exit_reason: failed to allocate space for reason string\n"); goto out_failed_copyin; @@ -1695,7 +1698,7 @@ build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, use } if (reason_user_desc != NULL) { - kfree(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN); + kheap_free(KHEAP_TEMP, reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN); reason_user_desc = NULL; reason_user_desc_len = 0; } @@ -1705,7 +1708,7 @@ build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, use out_failed_copyin: if (reason_user_desc != NULL) { - kfree(reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN); + kheap_free(KHEAP_TEMP, reason_user_desc, EXIT_REASON_USER_DESC_MAX_LEN); reason_user_desc = NULL; reason_user_desc_len = 0; } @@ -2001,6 +2004,46 @@ threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code, boo signal_setast(sig_actthread); } +/* Called with proc locked */ +static void +set_thread_extra_flags(struct uthread *uth, os_reason_t reason) +{ + extern int vm_shared_region_reslide_restrict; + assert(uth != NULL); + /* + * Check whether the userland fault address falls within the shared + * region and notify userland if so. This allows launchd to apply + * special policies around this fault type. + */ + if (reason->osr_namespace == OS_REASON_SIGNAL && + reason->osr_code == SIGSEGV) { + mach_vm_address_t fault_address = uth->uu_subcode; + +#if defined(__arm64__) + /* taken from osfmk/arm/misc_protos.h */ + #define TBI_MASK 0xff00000000000000 + #define tbi_clear(addr) ((addr) & ~(TBI_MASK)) + fault_address = tbi_clear(fault_address); +#endif /* __arm64__ */ + + if (fault_address >= SHARED_REGION_BASE && + fault_address <= SHARED_REGION_BASE + SHARED_REGION_SIZE) { + /* + * Always report whether the fault happened within the shared cache + * region, but only stale the slide if the resliding is extended + * to all processes or if the process faulting is a platform one. + */ + reason->osr_flags |= OS_REASON_FLAG_SHAREDREGION_FAULT; + +#if __has_feature(ptrauth_calls) + if (!vm_shared_region_reslide_restrict || csproc_get_platform_binary(current_proc())) { + vm_shared_region_reslide_stale(); + } +#endif /* __has_feature(ptrauth_calls) */ + } + } +} + void set_thread_exit_reason(void *th, void *reason, boolean_t proc_locked) { @@ -2021,6 +2064,8 @@ set_thread_exit_reason(void *th, void *reason, boolean_t proc_locked) proc_lock(targ_proc); } + set_thread_extra_flags(targ_uth, exit_reason); + if (targ_uth->uu_exit_reason == OS_REASON_NULL) { targ_uth->uu_exit_reason = exit_reason; } else { @@ -2137,13 +2182,13 @@ build_signal_reason(int signum, const char *procname) truncated_procname[proc_name_length - 1] = '\0'; kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, truncated_procname, - strlen((char *) &truncated_procname)); + (uint32_t)strlen((char *) &truncated_procname)); } else if (*sender_proc->p_name) { kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &sender_proc->p_name, sizeof(sender_proc->p_name)); } else { kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &default_sender_procname, - strlen(default_sender_procname) + 1); + (uint32_t)strlen(default_sender_procname) + 1); } } else { printf("build_signal_reason: exceeded space in signal reason buf, unable to log procname\n"); @@ -2707,7 +2752,7 @@ psignal_with_reason(proc_t p, int signum, struct os_reason *signal_reason) } void -psignal_sigkill_with_reason(proc_t p, struct os_reason *signal_reason) +psignal_sigkill_with_reason(struct proc *p, struct os_reason *signal_reason) { psignal_internal(p, NULL, NULL, 0, SIGKILL, signal_reason); } @@ -2808,7 +2853,7 @@ issignal_locked(proc_t p) goto out; } - signum = ffs((long)sigbits); + signum = ffs((unsigned int)sigbits); mask = sigmask(signum); prop = sigprop[signum]; @@ -3065,7 +3110,7 @@ CURSIG(proc_t p) return retnum; } - signum = ffs((long)sigbits); + signum = ffs((unsigned int)sigbits); mask = sigmask(signum); prop = sigprop[signum]; sigbits &= ~mask; /* take the signal out */ @@ -3505,7 +3550,7 @@ bsd_ast(thread_t thread) } if (ut->t_dtrace_resumepid) { - proc_t resumeproc = proc_find(ut->t_dtrace_resumepid); + proc_t resumeproc = proc_find((int)ut->t_dtrace_resumepid); ut->t_dtrace_resumepid = 0; if (resumeproc != PROC_NULL) { proc_lock(resumeproc); diff --git a/bsd/kern/kern_subr.c b/bsd/kern/kern_subr.c index 9988a3a3d..5ee595e58 100644 --- a/bsd/kern/kern_subr.c +++ b/bsd/kern/kern_subr.c @@ -165,7 +165,7 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0, 0); - error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.uiovp->iov_base, acnt ); + error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.uiovp->iov_base, (size_t)acnt ); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0, 0); @@ -173,7 +173,7 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0, 0); - error = copyin(uio->uio_iovs.uiovp->iov_base, CAST_DOWN(caddr_t, cp), acnt); + error = copyin(uio->uio_iovs.uiovp->iov_base, CAST_DOWN(caddr_t, cp), (size_t)acnt); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0, 0); @@ -187,21 +187,23 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) case UIO_SYSSPACE: if (uio->uio_rw == UIO_READ) { error = copywithin(CAST_DOWN(caddr_t, cp), CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base), - acnt); + (size_t)acnt); } else { error = copywithin(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base), CAST_DOWN(caddr_t, cp), - acnt); + (size_t)acnt); } break; case UIO_PHYS_USERSPACE64: case UIO_PHYS_USERSPACE32: case UIO_PHYS_USERSPACE: + acnt = MIN(acnt, UINT_MAX); + if (uio->uio_rw == UIO_READ) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 1, 0); - error = copypv((addr64_t)cp, uio->uio_iovs.uiovp->iov_base, acnt, cppvPsrc | cppvNoRefSrc); + error = copypv((addr64_t)cp, uio->uio_iovs.uiovp->iov_base, (unsigned int)acnt, cppvPsrc | cppvNoRefSrc); if (error) { /* Copy physical to virtual */ error = EFAULT; } @@ -212,7 +214,7 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 1, 0); - error = copypv(uio->uio_iovs.uiovp->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk); + error = copypv(uio->uio_iovs.uiovp->iov_base, (addr64_t)cp, (unsigned int)acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk); if (error) { /* Copy virtual to physical */ error = EFAULT; } @@ -226,11 +228,13 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) break; case UIO_PHYS_SYSSPACE: + acnt = MIN(acnt, UINT_MAX); + if (uio->uio_rw == UIO_READ) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, (int)cp, (uintptr_t)uio->uio_iovs.kiovp->iov_base, acnt, 2, 0); - error = copypv((addr64_t)cp, uio->uio_iovs.kiovp->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc); + error = copypv((addr64_t)cp, uio->uio_iovs.kiovp->iov_base, (unsigned int)acnt, cppvKmap | cppvPsrc | cppvNoRefSrc); if (error) { /* Copy physical to virtual */ error = EFAULT; } @@ -241,7 +245,7 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, (uintptr_t)uio->uio_iovs.kiovp->iov_base, (int)cp, acnt, 2, 0); - error = copypv(uio->uio_iovs.kiovp->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk); + error = copypv(uio->uio_iovs.kiovp->iov_base, (addr64_t)cp, (unsigned int)acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk); if (error) { /* Copy virtual to physical */ error = EFAULT; } @@ -257,7 +261,7 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) default: break; } - uio_update(uio, acnt); + uio_update(uio, (user_size_t)acnt); cp += acnt; n -= acnt; } @@ -295,7 +299,7 @@ ureadc(int c, struct uio *uio) case UIO_SYSSPACE32: case UIO_SYSSPACE: - *(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base)) = c; + *(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base)) = (char)c; break; default: @@ -305,34 +309,38 @@ ureadc(int c, struct uio *uio) return 0; } +LIST_HEAD(generic_hash_head, generic); + /* * General routine to allocate a hash table. */ void * -hashinit(int elements, int type, u_long *hashmask) +hashinit(int elements, int type __unused, u_long *hashmask) { - long hashsize; - LIST_HEAD(generic, generic) * hashtbl; - int i; + struct generic_hash_head *hashtbl; + vm_size_t hashsize; if (elements <= 0) { panic("hashinit: bad cnt"); } - for (hashsize = 1; hashsize <= elements; hashsize <<= 1) { - continue; - } - hashsize >>= 1; - MALLOC(hashtbl, struct generic *, - hashsize * sizeof(*hashtbl), type, M_WAITOK | M_ZERO); + + hashsize = 1UL << (fls(elements) - 1); + hashtbl = kheap_alloc(KHEAP_DEFAULT, hashsize * sizeof(*hashtbl), + Z_WAITOK | Z_ZERO); if (hashtbl != NULL) { - for (i = 0; i < hashsize; i++) { - LIST_INIT(&hashtbl[i]); - } *hashmask = hashsize - 1; } return hashtbl; } +void +hashdestroy(void *hash, int type __unused, u_long hashmask) +{ + struct generic_hash_head *hashtbl = hash; + assert(powerof2(hashmask + 1)); + kheap_free(KHEAP_DEFAULT, hashtbl, (hashmask + 1) * sizeof(*hashtbl)); +} + /* * uio_resid - return the residual IO value for the given uio_t */ @@ -629,32 +637,29 @@ uio_createwithbuffer( int a_iovcount, /* number of iovecs */ uio_t my_uio = (uio_t) a_buf_p; size_t my_size; + assert(a_iovcount >= 0 && a_iovcount <= UIO_MAXIOV); + if (a_iovcount < 0 || a_iovcount > UIO_MAXIOV) { + return NULL; + } + my_size = UIO_SIZEOF(a_iovcount); + assert(a_buffer_size >= my_size); if (a_buffer_size < my_size) { -#if DEBUG - panic("%s :%d - a_buffer_size is too small\n", __FILE__, __LINE__); -#endif /* DEBUG */ return NULL; } my_size = a_buffer_size; -#if DEBUG - if (my_uio == 0) { - panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__); - } - if (!IS_VALID_UIO_SEGFLG(a_spacetype)) { - panic("%s :%d - invalid address space type\n", __FILE__, __LINE__); - } - if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) { - panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__); - } - if (a_iovcount > UIO_MAXIOV) { - panic("%s :%d - invalid a_iovcount\n", __FILE__, __LINE__); + assert(my_size <= INT_MAX); + if (my_size > INT_MAX) { + return NULL; } -#endif /* DEBUG */ + + assert(my_uio != NULL); + assert(IS_VALID_UIO_SEGFLG(a_spacetype)); + assert(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE); bzero(my_uio, my_size); - my_uio->uio_size = my_size; + my_uio->uio_size = (int)my_size; /* * we use uio_segflg to indicate if the uio_t is the new format or @@ -774,7 +779,7 @@ uio_reset( uio_t a_uio, my_old_flags = a_uio->uio_flags; my_max_iovs = a_uio->uio_max_iovs; bzero(a_uio, my_size); - a_uio->uio_size = my_size; + a_uio->uio_size = (int)my_size; /* * we use uio_segflg to indicate if the uio_t is the new format or @@ -919,10 +924,10 @@ uio_getiov( uio_t a_uio, } } else { if (a_baseaddr_p != NULL) { - *a_baseaddr_p = a_uio->uio_iovs.kiovp[a_index].iov_base; + *a_baseaddr_p = (user_addr_t)a_uio->uio_iovs.kiovp[a_index].iov_base; } if (a_length_p != NULL) { - *a_length_p = a_uio->uio_iovs.kiovp[a_index].iov_len; + *a_length_p = (user_size_t)a_uio->uio_iovs.kiovp[a_index].iov_len; } } @@ -960,7 +965,7 @@ uio_calculateresid( uio_t a_uio ) } } } - a_uio->uio_resid_64 = resid; + a_uio->uio_resid_64 = (user_size_t)resid; /* position to first non zero length iovec (4235922) */ while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.uiovp->iov_len == 0) { @@ -982,7 +987,7 @@ uio_calculateresid( uio_t a_uio ) } } } - a_uio->uio_resid_64 = resid; + a_uio->uio_resid_64 = (user_size_t)resid; /* position to first non zero length iovec (4235922) */ while (a_uio->uio_iovcnt > 0 && a_uio->uio_iovs.kiovp->iov_len == 0) { @@ -1195,8 +1200,8 @@ copyin_user_iovec_array(user_addr_t uaddr, int spacetype, int count, struct user for (i = count - 1; i >= 0; i--) { if (spacetype == UIO_USERSPACE64) { struct user64_iovec iovec = ((struct user64_iovec *)dst)[i]; - dst[i].iov_base = iovec.iov_base; - dst[i].iov_len = iovec.iov_len; + dst[i].iov_base = (user_addr_t)iovec.iov_base; + dst[i].iov_len = (user_size_t)iovec.iov_len; } else { struct user32_iovec iovec = ((struct user32_iovec *)dst)[i]; dst[i].iov_base = iovec.iov_base; diff --git a/bsd/kern/kern_symfile.c b/bsd/kern/kern_symfile.c index 94b6a8975..720ebf9bf 100644 --- a/bsd/kern/kern_symfile.c +++ b/bsd/kern/kern_symfile.c @@ -162,7 +162,7 @@ kern_ioctl_file_extents(struct kern_direct_file_io_ref_t * ref, u_long theIoctl, fileblk = blkno * ref->blksize; } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { fileblk = offset; - filechunk = ref->filelength; + filechunk = (unsigned long)((ref->filelength > ULONG_MAX) ? ULONG_MAX: ref->filelength); } if (DKIOCUNMAP == theIoctl) { @@ -234,7 +234,8 @@ kern_open_file_for_direct_io(const char * name, uint32_t blksize; off_t maxiocount, count, segcount, wbctotal; boolean_t locked = FALSE; - int fmode, cmode; + int fmode; + mode_t cmode; struct nameidata nd; u_int32_t ndflags; off_t mpFree; @@ -435,7 +436,7 @@ kern_open_file_for_direct_io(const char * name, fileblk = blkno * ref->blksize; } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { fileblk = f_offset; - filechunk = f_offset ? 0 : ref->filelength; + filechunk = f_offset ? 0 : (unsigned long)ref->filelength; } physoffset = 0; @@ -621,8 +622,9 @@ out: int kern_write_file(struct kern_direct_file_io_ref_t * ref, off_t offset, void * addr, size_t len, int ioflag) { + assert(len <= INT32_MAX); return vn_rdwr(UIO_WRITE, ref->vp, - addr, len, offset, + addr, (int)len, offset, UIO_SYSSPACE, ioflag | IO_SYNC | IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ref->ctx), (int *) 0, vfs_context_proc(ref->ctx)); @@ -631,8 +633,9 @@ kern_write_file(struct kern_direct_file_io_ref_t * ref, off_t offset, void * add int kern_read_file(struct kern_direct_file_io_ref_t * ref, off_t offset, void * addr, size_t len, int ioflag) { + assert(len <= INT32_MAX); return vn_rdwr(UIO_READ, ref->vp, - addr, len, offset, + addr, (int)len, offset, UIO_SYSSPACE, ioflag | IO_SYNC | IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ref->ctx), (int *) 0, vfs_context_proc(ref->ctx)); diff --git a/bsd/kern/kern_synch.c b/bsd/kern/kern_synch.c index 019952e73..9b2d1517b 100644 --- a/bsd/kern/kern_synch.c +++ b/bsd/kern/kern_synch.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -88,7 +88,7 @@ _sleep_continue( __unused void *parameter, wait_result_t wresult) if (!catch) { break; } - /* else fall through */ + OS_FALLTHROUGH; case THREAD_INTERRUPTED: if (catch) { if (thread_should_abort(self)) { @@ -257,7 +257,7 @@ _sleep( block: if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL) { ut->uu_continuation = continuation; - ut->uu_pri = pri; + ut->uu_pri = (uint16_t)pri; ut->uu_mtx = mtx; (void) thread_block(_sleep_continue); /* NOTREACHED */ @@ -288,7 +288,7 @@ block: if (catch != THREAD_ABORTSAFE) { break; } - /* else fall through */ + OS_FALLTHROUGH; case THREAD_INTERRUPTED: if (catch == THREAD_ABORTSAFE) { if (thread_should_abort(self)) { diff --git a/bsd/kern/kern_sysctl.c b/bsd/kern/kern_sysctl.c index a6702441c..4c07b8ce9 100644 --- a/bsd/kern/kern_sysctl.c +++ b/bsd/kern/kern_sysctl.c @@ -111,6 +111,7 @@ #include #include +#include #include #include #include @@ -146,6 +147,11 @@ #include #include #include +#include +#include +#if CONFIG_CSR +#include +#endif #if defined(__i386__) || defined(__x86_64__) #include @@ -255,6 +261,9 @@ STATIC int sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg); STATIC int sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg); int sysdoproc_callback(proc_t p, void *arg); +#if CONFIG_THREAD_GROUPS && (DEVELOPMENT || DEBUG) +STATIC int sysctl_get_thread_group_id SYSCTL_HANDLER_ARGS; +#endif /* forward declarations for non-static STATIC */ STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64); @@ -266,9 +275,9 @@ STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS; #if COUNT_SYSCALLS STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS; #endif /* COUNT_SYSCALLS */ -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS; -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS; STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS; STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); @@ -283,6 +292,7 @@ STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, stru STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); +STATIC int sysctl_bootuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); #if CONFIG_NFS_CLIENT STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); @@ -727,14 +737,14 @@ sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg) /* * try over estimating by 5 procs */ -#define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc)) +#define KERN_PROCSLOP (5 * sizeof(struct kinfo_proc)) struct sysdoproc_args { - int buflen; - void *kprocp; + size_t buflen; + void *kprocp; boolean_t is_64_bit; - user_addr_t dp; + user_addr_t dp; size_t needed; - int sizeof_kproc; + unsigned int sizeof_kproc; int *errorp; int uidcheck; int ruidcheck; @@ -787,7 +797,7 @@ sysctl_prochandle SYSCTL_HANDLER_ARGS user_addr_t dp = where; size_t needed = 0; - int buflen = where != USER_ADDR_NULL ? req->oldlen : 0; + size_t buflen = where != USER_ADDR_NULL ? req->oldlen : 0; int error = 0; boolean_t is_64_bit = proc_is64bit(current_proc()); struct user32_kinfo_proc user32_kproc; @@ -880,6 +890,7 @@ sysctl_prochandle SYSCTL_HANDLER_ARGS return 0; } + /* * We specify the subcommand code for multiple nodes as the 'req->arg2' value * in the sysctl declaration itself, which comes into the handler function @@ -978,9 +989,10 @@ fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep) ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred); /* A fake historical *kauth_cred_t */ - ep->e_ucred.cr_ref = os_atomic_load(&my_cred->cr_ref, relaxed); + unsigned long refcnt = os_atomic_load(&my_cred->cr_ref, relaxed); + ep->e_ucred.cr_ref = (uint32_t)MIN(refcnt, UINT32_MAX); ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred); - ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups; + ep->e_ucred.cr_ngroups = (short)posix_cred_get(my_cred)->cr_ngroups; bcopy(posix_cred_get(my_cred)->cr_groups, ep->e_ucred.cr_groups, NGROUPS * sizeof(gid_t)); @@ -1038,9 +1050,10 @@ fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep) ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred); /* A fake historical *kauth_cred_t */ - ep->e_ucred.cr_ref = os_atomic_load(&my_cred->cr_ref, relaxed); + unsigned long refcnt = os_atomic_load(&my_cred->cr_ref, relaxed); + ep->e_ucred.cr_ref = (uint32_t)MIN(refcnt, UINT32_MAX); ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred); - ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups; + ep->e_ucred.cr_ngroups = (short)posix_cred_get(my_cred)->cr_ngroups; bcopy(posix_cred_get(my_cred)->cr_groups, ep->e_ucred.cr_groups, NGROUPS * sizeof(gid_t)); @@ -1073,7 +1086,7 @@ fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep) STATIC void fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp) { - exp->p_starttime.tv_sec = p->p_start.tv_sec; + exp->p_starttime.tv_sec = (user32_time_t)p->p_start.tv_sec; exp->p_starttime.tv_usec = p->p_start.tv_usec; exp->p_flag = p->p_flag; if (p->p_lflag & P_LTRACED) { @@ -1115,7 +1128,7 @@ fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp) exp->p_priority = p->p_priority; exp->p_nice = p->p_nice; bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN); - exp->p_xstat = p->p_xstat; + exp->p_xstat = (u_short)MIN(p->p_xstat, USHRT_MAX); exp->p_acflag = p->p_acflag; } @@ -1163,7 +1176,7 @@ fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp) exp->p_priority = p->p_priority; exp->p_nice = p->p_nice; bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN); - exp->p_xstat = p->p_xstat; + exp->p_xstat = (u_short)MIN(p->p_xstat, USHRT_MAX); exp->p_acflag = p->p_acflag; } @@ -1244,7 +1257,7 @@ SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCK ""); -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) /* * Return the top *sizep bytes of the user stack, or the entire area of the * user stack down through the saved exec_path, whichever is smaller. @@ -1276,7 +1289,7 @@ SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_ sysctl_doprocargs, /* Handler function */ NULL, /* Data pointer */ ""); -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS @@ -1306,31 +1319,40 @@ SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE | CTLFLAG_RD | CTLFLA NULL, /* Data pointer */ ""); +#define SYSCTL_PROCARGS_READ_ENVVARS_ENTITLEMENT "com.apple.private.read-environment-variables" STATIC int sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep, proc_t cur_proc, int argc_yes) { - proc_t p; - int buflen = where != USER_ADDR_NULL ? *sizep : 0; + assert(sizep != NULL); + proc_t p = NULL; + size_t buflen = where != USER_ADDR_NULL ? *sizep : 0; int error = 0; - struct _vm_map *proc_map; + struct _vm_map *proc_map = NULL; struct task * task; - vm_map_copy_t tmp; + vm_map_copy_t tmp = NULL; user_addr_t arg_addr; size_t arg_size; caddr_t data; size_t argslen = 0; - int size; - vm_size_t alloc_size = 0; - vm_offset_t copy_start, copy_end; + size_t size = 0; + vm_offset_t copy_start = 0, copy_end; + vm_offset_t smallbuffer_start; kern_return_t ret; int pid; kauth_cred_t my_cred; uid_t uid; int argc = -1; + size_t argvsize; + size_t remaining; + size_t current_arg_index; + size_t current_arg_len; + const char * current_arg; + bool omit_env_vars = true; if (namelen < 1) { - return EINVAL; + error = EINVAL; + goto finish; } if (argc_yes) { @@ -1341,9 +1363,9 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, /* is not NULL then the caller wants us to return the length needed to */ /* hold the data we would return */ if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) { - return EINVAL; + error = EINVAL; + goto finish; } - arg_size = buflen; /* * Lookup process by pid @@ -1351,7 +1373,29 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, pid = name[0]; p = proc_find(pid); if (p == NULL) { - return EINVAL; + error = EINVAL; + goto finish; + } + + /* Allow reading environment variables if any of the following are true: + * - kernel is DEVELOPMENT || DEBUG + * - target process is same as current_proc() + * - target process is not cs_restricted + * - SIP is off + * - caller has an entitlement + */ + +#if DEVELOPMENT || DEBUG + omit_env_vars = false; +#endif + if (p == current_proc() || + !cs_restricted(p) || +#if CONFIG_CSR + csr_check(CSR_ALLOW_UNRESTRICTED_DTRACE) == 0 || +#endif + IOTaskHasEntitlement(current_task(), SYSCTL_PROCARGS_READ_ENVVARS_ENTITLEMENT) + ) { + omit_env_vars = false; } /* @@ -1365,31 +1409,31 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, */ if (!p->user_stack) { - proc_rele(p); - return EINVAL; + error = EINVAL; + goto finish; } - if (where == USER_ADDR_NULL) { - /* caller only wants to know length of proc args data */ - if (sizep == NULL) { - proc_rele(p); - return EFAULT; - } + /* save off argc before releasing the proc */ + argc = p->p_argc; - size = p->p_argslen; - proc_rele(p); - if (argc_yes) { - size += sizeof(int); - } else { - /* - * old PROCARGS will return the executable's path and plus some - * extra space for work alignment and data tags - */ - size += PATH_MAX + (6 * sizeof(int)); - } - size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0; - *sizep = size; - return 0; + argslen = p->p_argslen; + + /* + * When these sysctls were introduced, the first string in the strings + * section was just the bare path of the executable. However, for security + * reasons we now prefix this string with executable_path= so it can be + * parsed getenv style. To avoid binary compatability issues with exising + * callers of this sysctl, we strip it off here. + * (rdar://problem/13746466) + */ +#define EXECUTABLE_KEY "executable_path=" + argslen -= strlen(EXECUTABLE_KEY); + + if (where == USER_ADDR_NULL && !omit_env_vars) { + /* caller only wants to know length of proc args data. + * If we don't need to omit environment variables, we can skip + * copying the target process stack */ + goto calculate_size; } my_cred = kauth_cred_proc_ref(p); @@ -1398,13 +1442,11 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, if ((uid != kauth_cred_getuid(kauth_cred_get())) && suser(kauth_cred_get(), &cur_proc->p_acflag)) { - proc_rele(p); - return EINVAL; + error = EINVAL; + goto finish; } - if ((u_int)arg_size > p->p_argslen) { - arg_size = round_page(p->p_argslen); - } + arg_size = round_page(argslen); arg_addr = p->user_stack - arg_size; @@ -1415,14 +1457,10 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, */ task = p->task; if (task == NULL) { - proc_rele(p); - return EINVAL; + error = EINVAL; + goto finish; } - /* save off argc before releasing the proc */ - argc = p->p_argc; - - argslen = p->p_argslen; /* * Once we have a task reference we can convert that into a * map reference, which we will use in the calls below. The @@ -1432,29 +1470,30 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, */ task_reference(task); proc_rele(p); + p = NULL; proc_map = get_task_map_reference(task); task_deallocate(task); if (proc_map == NULL) { - return EINVAL; + error = EINVAL; + goto finish; } - alloc_size = round_page(arg_size); - ret = kmem_alloc(kernel_map, ©_start, alloc_size, VM_KERN_MEMORY_BSD); + ret = kmem_alloc(kernel_map, ©_start, arg_size, VM_KERN_MEMORY_BSD); if (ret != KERN_SUCCESS) { - vm_map_deallocate(proc_map); - return ENOMEM; + error = ENOMEM; + goto finish; } - bzero((void *)copy_start, alloc_size); + bzero((void *)copy_start, arg_size); - copy_end = round_page(copy_start + arg_size); + /* End of buffer should be page aligned */ + assert(copy_start + arg_size == round_page(copy_start + arg_size)); + copy_end = copy_start + arg_size; if (vm_map_copyin(proc_map, (vm_map_address_t)arg_addr, (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) { - vm_map_deallocate(proc_map); - kmem_free(kernel_map, copy_start, - round_page(arg_size)); - return EIO; + error = EIO; + goto finish; } /* @@ -1462,36 +1501,102 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, * map, we can release the reference to it. */ vm_map_deallocate(proc_map); + proc_map = NULL; if (vm_map_copy_overwrite(kernel_map, (vm_map_address_t)copy_start, tmp, (vm_map_size_t) arg_size, FALSE) != KERN_SUCCESS) { - kmem_free(kernel_map, copy_start, - round_page(arg_size)); - vm_map_copy_discard(tmp); - return EIO; + error = EIO; + goto finish; + } + /* tmp was consumed */ + tmp = NULL; + + if (omit_env_vars) { + argvsize = 0; + + /* Iterate over everything in argv, plus one for the bare executable path */ + for (current_arg_index = 0; current_arg_index < argc + 1 && argvsize < argslen; ++current_arg_index) { + current_arg = (const char *)(copy_end - argslen) + argvsize; + remaining = argslen - argvsize; + current_arg_len = strnlen(current_arg, remaining); + if (current_arg_len < remaining) { + /* We have space for the null terminator */ + current_arg_len += 1; + + if (current_arg_index == 0) { + /* The bare executable path may have multiple null bytes after it for alignment */ + while (current_arg_len < remaining && current_arg[current_arg_len] == 0) { + current_arg_len += 1; + } + } + } + argvsize += current_arg_len; + } + assert(argvsize <= argslen); + + /* Adjust argslen and copy_end to make the copyout range extend to the end of argv */ + copy_end = copy_end - argslen + argvsize; + argslen = argvsize; + } + + if (where == USER_ADDR_NULL) { + /* Skip copyout */ + goto calculate_size; } - if (arg_size > argslen) { + if (buflen >= argslen) { data = (caddr_t) (copy_end - argslen); size = argslen; } else { - data = (caddr_t) (copy_end - arg_size); - size = arg_size; - } - - /* - * When these sysctls were introduced, the first string in the strings - * section was just the bare path of the executable. However, for security - * reasons we now prefix this string with executable_path= so it can be - * parsed getenv style. To avoid binary compatability issues with exising - * callers of this sysctl, we strip it off here if present. - * (rdar://problem/13746466) - */ -#define EXECUTABLE_KEY "executable_path=" - if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0) { - data += strlen(EXECUTABLE_KEY); - size -= strlen(EXECUTABLE_KEY); + /* + * Before rdar://25397314, this function contained incorrect logic when buflen is less + * than argslen. The problem was that it copied in `buflen` bytes from the end of the target + * process user stack into the beginning of a buffer of size round_page(buflen), and then + * copied out `buflen` bytes from the end of this buffer. The effect of this was that + * the caller of this sysctl would get zeros at the end of their buffer. + * + * To preserve this behavior, bzero everything from copy_end-round_page(buflen)+buflen to the + * end of the buffer. This emulates copying in only `buflen` bytes. + * + * + * In the old code: + * + * copy_start .... size: round_page(buflen) .... copy_end + * [---copied in data (size: buflen)---|--- zeros ----------] + * ^ + * data = copy_end - buflen + * + * + * In the new code: + * copy_start .... size: round_page(p->argslen) .... full copy_end + * ^ ....................... p->argslen ...............................^ + * ^ ^ truncated copy_end ^ + * ^ ^ ^ ^ + * ^ ................ argslen ........................ ^ + * ^ ^ ^ ^ + * [-------copied in data (size: round_page(p->argslen))-------:----env vars---] + * ^ ^ + * ^ data = copy_end - buflen + * smallbuffer_start = max(copy_end - round_page(buflen), copy_start) + * + * + * Full copy_end: copy_end calculated from copy_start + round_page(p->argslen) + * Truncated copy_end: copy_end after truncation to remove environment variables. + * + * If environment variables were omitted, then we use the truncated copy_end, otherwise + * we use full copy_end. + * + * smallbuffer_start: represents where copy_start would be in the old code. + * data: The beginning of the region we copyout + */ + smallbuffer_start = copy_end - round_page(buflen); + if (smallbuffer_start < copy_start) { + smallbuffer_start = copy_start; + } + bzero((void *)(smallbuffer_start + buflen), copy_end - (smallbuffer_start + buflen)); + data = (caddr_t) (copy_end - buflen); + size = buflen; } if (argc_yes) { @@ -1510,12 +1615,12 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, * * Note: we keep all pointers&sizes aligned to word boundries */ - if ((!error) && (buflen > 0 && (u_int)buflen > argslen)) { + if ((!error) && (buflen > 0 && (u_int)buflen > size)) { int binPath_sz, alignedBinPath_sz = 0; int extraSpaceNeeded, addThis; user_addr_t placeHere; char * str = (char *) data; - int max_len = size; + size_t max_len = size; /* Some apps are really bad about messing up their stacks * So, we have to be extra careful about getting the length @@ -1552,7 +1657,7 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int)); /* is there is room to tack on argv[0]? */ - if ((buflen & ~(sizeof(int) - 1)) >= (argslen + extraSpaceNeeded)) { + if ((buflen & ~(sizeof(int) - 1)) >= (size + extraSpaceNeeded)) { placeHere += addThis; suword(placeHere, 0); placeHere += sizeof(int); @@ -1570,17 +1675,38 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, } } - if (copy_start != (vm_offset_t) 0) { - kmem_free(kernel_map, copy_start, copy_end - copy_start); - } - if (error) { - return error; +calculate_size: + /* Size has already been calculated for the where != NULL case */ + if (where == USER_ADDR_NULL) { + size = argslen; + if (argc_yes) { + size += sizeof(int); + } else { + /* + * old PROCARGS will return the executable's path and plus some + * extra space for work alignment and data tags + */ + size += PATH_MAX + (6 * sizeof(int)); + } + size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0; } - if (where != USER_ADDR_NULL) { - *sizep = size; + *sizep = size; + +finish: + if (p != NULL) { + proc_rele(p); } - return 0; + if (tmp != NULL) { + vm_map_copy_discard(tmp); + } + if (proc_map != NULL) { + vm_map_deallocate(proc_map); + } + if (copy_start != (vm_offset_t) 0) { + kmem_free(kernel_map, copy_start, arg_size); + } + return error; } @@ -1829,28 +1955,111 @@ SYSCTL_PROC(_kern, KERN_OSVERSION, osversion, osversion, 256 /* OSVERSIZE*/, sysctl_osversion, "A", ""); -char osproductversion[48] = { '\0' }; - -STATIC int -sysctl_osproductversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) +static bool +_already_set_or_not_launchd(struct sysctl_req *req, char *val) { if (req->newptr != 0) { /* * Can only ever be set by launchd, and only once at boot. */ - if (req->p->p_pid != 1 || osproductversion[0] != '\0') { - return EPERM; + if (req->p->p_pid != 1 || val[0] != '\0') { + return true; } } + return false; +} + +#if XNU_TARGET_OS_OSX +static int +sysctl_system_version_compat +(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int oldval = (task_has_system_version_compat_enabled(current_task())); + int new_value = 0, changed = 0; + + int error = sysctl_io_number(req, oldval, sizeof(int), &new_value, &changed); + if (changed) { + task_set_system_version_compat_enabled(current_task(), (new_value)); + } + return error; +} + +SYSCTL_PROC(_kern, OID_AUTO, system_version_compat, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, + 0, 0, sysctl_system_version_compat, "A", ""); + +char osproductversioncompat[48] = { '\0' }; + +static int +sysctl_osproductversioncompat(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) +{ + if (_already_set_or_not_launchd(req, osproductversioncompat)) { + return EPERM; + } + return sysctl_handle_string(oidp, arg1, arg2, req); +} + + +SYSCTL_PROC(_kern, OID_AUTO, osproductversioncompat, + CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, + osproductversioncompat, sizeof(osproductversioncompat), + sysctl_osproductversioncompat, "A", "The ProductVersion from SystemVersionCompat.plist"); +#endif + +char osproductversion[48] = { '\0' }; + +static int +sysctl_osproductversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) +{ + if (_already_set_or_not_launchd(req, osproductversion)) { + return EPERM; + } +#if !XNU_TARGET_OS_OSX return sysctl_handle_string(oidp, arg1, arg2, req); +#else + if (task_has_system_version_compat_enabled(current_task()) && (osproductversioncompat[0] != '\0')) { + return sysctl_handle_string(oidp, osproductversioncompat, arg2, req); + } else { + return sysctl_handle_string(oidp, arg1, arg2, req); + } +#endif } +#if XNU_TARGET_OS_OSX +static_assert(sizeof(osproductversioncompat) == sizeof(osproductversion), + "osproductversion size matches osproductversioncompat size"); +#endif + SYSCTL_PROC(_kern, OID_AUTO, osproductversion, CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, osproductversion, sizeof(osproductversion), sysctl_osproductversion, "A", "The ProductVersion from SystemVersion.plist"); +char osreleasetype[48] = { '\0' }; + +STATIC int +sysctl_osreleasetype(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) +{ + if (_already_set_or_not_launchd(req, osreleasetype)) { + return EPERM; + } + return sysctl_handle_string(oidp, arg1, arg2, req); +} + +void reset_osreleasetype(void); + +void +reset_osreleasetype(void) +{ + memset(osreleasetype, 0, sizeof(osreleasetype)); +} + +SYSCTL_PROC(_kern, OID_AUTO, osreleasetype, + CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, + osreleasetype, sizeof(osreleasetype), + sysctl_osreleasetype, "A", "The ReleaseType from SystemVersion.plist"); + static uint64_t iossupportversion_string[48]; STATIC int @@ -1880,7 +2089,10 @@ sysctl_osvariant_status(__unused struct sysctl_oid *oidp, void *arg1, int arg2, { if (req->newptr != 0) { /* - * Can only ever be set by launchd, and only once at boot. + * Can only ever be set by launchd, and only once. + * Reset by usrctl() -> reset_osvariant_status() during + * userspace reboot, since userspace could reboot into + * a different variant. */ if (req->p->p_pid != 1 || osvariant_status != 0) { return EPERM; @@ -1895,11 +2107,19 @@ SYSCTL_PROC(_kern, OID_AUTO, osvariant_status, &osvariant_status, sizeof(osvariant_status), sysctl_osvariant_status, "Q", "Opaque flags used to cache OS variant information"); +void reset_osvariant_status(void); + +void +reset_osvariant_status(void) +{ + osvariant_status = 0; +} + extern void commpage_update_dyld_flags(uint64_t); -static uint64_t dyld_system_flags = 0; +uint64_t dyld_flags = 0; STATIC int -sysctl_dyld_system_flags(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) +sysctl_dyld_flags(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) { /* * Can only ever be set by launchd, possibly several times @@ -1911,15 +2131,15 @@ sysctl_dyld_system_flags(__unused struct sysctl_oid *oidp, void *arg1, int arg2, int res = sysctl_handle_quad(oidp, arg1, arg2, req); if (req->newptr && res == 0) { - commpage_update_dyld_flags(osvariant_status); + commpage_update_dyld_flags(dyld_flags); } return res; } -SYSCTL_PROC(_kern, OID_AUTO, dyld_system_flags, +SYSCTL_PROC(_kern, OID_AUTO, dyld_flags, CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED, - &dyld_system_flags, sizeof(dyld_system_flags), - sysctl_dyld_system_flags, "Q", "Opaque flags used to cache dyld system-wide configuration"); + &dyld_flags, sizeof(dyld_flags), + sysctl_dyld_flags, "Q", "Opaque flags used to cache dyld system-wide configuration"); #if defined(XNU_TARGET_OS_BRIDGE) char macosproductversion[MACOS_VERS_LEN] = { '\0' }; @@ -1967,6 +2187,67 @@ SYSCTL_PROC(_kern, OID_AUTO, kernelcacheuuid, kernelcache_uuid_string, sizeof(kernelcache_uuid_string), sysctl_kernelcacheuuid, "A", ""); +STATIC int +sysctl_systemfilesetuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) +{ + int rval = ENOENT; + if (pageablekc_uuid_valid) { + rval = sysctl_handle_string(oidp, arg1, arg2, req); + } + return rval; +} + +SYSCTL_PROC(_kern, OID_AUTO, systemfilesetuuid, + CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, + pageablekc_uuid_string, sizeof(pageablekc_uuid_string), + sysctl_systemfilesetuuid, "A", ""); + +STATIC int +sysctl_auxiliaryfilesetuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) +{ + int rval = ENOENT; + if (auxkc_uuid_valid) { + rval = sysctl_handle_string(oidp, arg1, arg2, req); + } + return rval; +} + +SYSCTL_PROC(_kern, OID_AUTO, auxiliaryfilesetuuid, + CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, + auxkc_uuid_string, sizeof(auxkc_uuid_string), + sysctl_auxiliaryfilesetuuid, "A", ""); + +STATIC int +sysctl_filesetuuid(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int rval = ENOENT; + kc_format_t kcformat; + kernel_mach_header_t *mh; + void *uuid = NULL; + unsigned long uuidlen = 0; + uuid_string_t uuid_str; + + if (!PE_get_primary_kc_format(&kcformat) || kcformat != KCFormatFileset) { + return rval; + } + + mh = (kernel_mach_header_t *)PE_get_kc_header(KCKindPrimary); + uuid = getuuidfromheader(mh, &uuidlen); + + if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) { + uuid_unparse_upper(*(uuid_t *)uuid, uuid_str); + rval = sysctl_io_string(req, (char *)uuid_str, sizeof(uuid_str), 0, NULL); + } + + return rval; +} + +SYSCTL_PROC(_kern, OID_AUTO, filesetuuid, + CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, + NULL, 0, + sysctl_filesetuuid, "A", ""); + + SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, &maxfiles, 0, ""); @@ -2057,6 +2338,16 @@ SYSCTL_INT(_kern, OID_AUTO, sched_allow_NO_SMT_threads, &sched_allow_NO_SMT_threads, 0, ""); #if (DEVELOPMENT || DEBUG) +extern int smt_sched_bonus_16ths; +SYSCTL_INT(_kern, OID_AUTO, smt_sched_bonus_16ths, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &smt_sched_bonus_16ths, 0, ""); + +extern int smt_timeshare_enabled; +SYSCTL_INT(_kern, OID_AUTO, sched_smt_timeshare_enable, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &smt_timeshare_enabled, 0, ""); + extern int sched_smt_balance; SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance, CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, @@ -2163,6 +2454,23 @@ SYSCTL_INT(_kern, OID_AUTO, legacy_footprint_entitlement_mode, &legacy_footprint_entitlement_mode, 0, ""); #endif /* __arm64__ */ +static int +sysctl_kern_sched_rt_n_backup_processors(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int new_value, changed; + int old_value = sched_get_rt_n_backup_processors(); + int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed); + if (changed) { + sched_set_rt_n_backup_processors(new_value); + } + + return error; +} + +SYSCTL_PROC(_kern, OID_AUTO, sched_rt_n_backup_processors, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_kern_sched_rt_n_backup_processors, "I", ""); + #endif /* (DEVELOPMENT || DEBUG) */ STATIC int @@ -2305,6 +2613,7 @@ SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid, CTLFLAG_RD | CTLFLAG_LOCKED, &bootsessionuuid_string, sizeof(bootsessionuuid_string), ""); + STATIC int sysctl_boottime (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) @@ -2320,7 +2629,7 @@ sysctl_boottime return sysctl_io_opaque(req, &t, sizeof(t), NULL); } else { struct user32_timeval t = {}; - t.tv_sec = tv.tv_sec; + t.tv_sec = (user32_time_t)tv.tv_sec; t.tv_usec = tv.tv_usec; return sysctl_io_opaque(req, &t, sizeof(t), NULL); } @@ -2330,6 +2639,107 @@ SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_boottime, "S,timeval", ""); +extern const char* IOGetBootUUID(void); + +/* non-static: written by imageboot.c */ +uuid_string_t fake_bootuuid; + +STATIC int +sysctl_bootuuid +(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int error = ENOENT; + + /* check the first byte to see if the string has been + * populated. this is a uuid_STRING_t, this check would + * not work with a uuid_t. + */ + if (fake_bootuuid[0] != '\0') { + error = sysctl_io_string(req, fake_bootuuid, 0, 0, NULL); + goto out; + } + + const char *uuid_string = IOGetBootUUID(); + if (uuid_string) { + uuid_t boot_uuid; + error = uuid_parse(uuid_string, boot_uuid); + if (!error) { + error = sysctl_io_string(req, __DECONST(char *, uuid_string), 0, 0, NULL); + } + } + +out: + return error; +} + +SYSCTL_PROC(_kern, OID_AUTO, bootuuid, + CTLTYPE_STRING | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_bootuuid, "A", ""); + + +extern const char* IOGetApfsPrebootUUID(void); +extern const char *IOGetAssociatedApfsVolgroupUUID(void); + +STATIC int +sysctl_apfsprebootuuid +(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int error = ENOENT; + + const char *uuid_string = IOGetApfsPrebootUUID(); + if (uuid_string) { + uuid_t apfs_preboot_uuid; + error = uuid_parse(uuid_string, apfs_preboot_uuid); + if (!error) { + error = sysctl_io_string(req, __DECONST(char *, uuid_string), 0, 0, NULL); + } + } + + return error; +} + +SYSCTL_PROC(_kern, OID_AUTO, apfsprebootuuid, + CTLTYPE_STRING | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_apfsprebootuuid, "A", ""); + +STATIC int +sysctl_targetsystemvolgroupuuid +(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int error = ENOENT; + + const char *uuid_string = IOGetApfsPrebootUUID(); + if (uuid_string) { + uuid_t apfs_preboot_uuid; + error = uuid_parse(uuid_string, apfs_preboot_uuid); + if (!error) { + error = sysctl_io_string(req, __DECONST(char *, uuid_string), 0, 0, NULL); + } + } else { + /* + * In special boot modes, such as kcgen-mode, the + * apfs-preboot-uuid property will not be set. Instead, a + * different property, associated-volume-group, will be set + * which indicates the UUID of the VolumeGroup containing the + * system volume into which you will boot. + */ + uuid_string = IOGetAssociatedApfsVolgroupUUID(); + if (uuid_string) { + uuid_t apfs_preboot_uuid; + error = uuid_parse(uuid_string, apfs_preboot_uuid); + if (!error) { + error = sysctl_io_string(req, __DECONST(char *, uuid_string), 0, 0, NULL); + } + } + } + + return error; +} + +SYSCTL_PROC(_kern, OID_AUTO, targetsystemvolgroupuuid, + CTLTYPE_STRING | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, + 0, 0, sysctl_targetsystemvolgroupuuid, "A", ""); + STATIC int sysctl_symfile (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) @@ -2886,9 +3296,7 @@ SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_IN #ifdef CONFIG_XNUPOST -extern int xnupost_export_testdata(void *outp, uint32_t size, uint32_t *lenp); extern uint32_t xnupost_get_estimated_testdata_size(void); - extern int xnupost_reset_all_tests(void); STATIC int @@ -3226,7 +3634,7 @@ sysctl_swapusage xsu.xsu_total = swap_total; xsu.xsu_avail = swap_avail; xsu.xsu_used = swap_total - swap_avail; - xsu.xsu_pagesize = swap_pagesize; + xsu.xsu_pagesize = (u_int32_t)MIN(swap_pagesize, UINT32_MAX); xsu.xsu_encrypted = swap_encrypted; return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL); } @@ -3368,6 +3776,7 @@ out: return error; } + STATIC int sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) @@ -3383,7 +3792,7 @@ sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2, } return SYSCTL_OUT(req, &res, sizeof(res)); } -SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native, "I", "proc_native"); +SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native, "I", "proc_native"); STATIC int sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2, @@ -3460,6 +3869,29 @@ SYSCTL_PROC(_kern, OID_AUTO, slide, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_slide, "I", ""); +/* User address of the PFZ */ +#if DEBUG || DEVELOPMENT +extern user32_addr_t commpage_text32_location; +extern user64_addr_t commpage_text64_location; + +STATIC int +sysctl_pfz_start SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + +#ifdef __LP64__ + return sysctl_io_number(req, commpage_text64_location, sizeof(user64_addr_t), NULL, NULL); +#else + return sysctl_io_number(req, commpage_text32_location, sizeof(user32_addr_t), NULL, NULL); +#endif +} + +SYSCTL_PROC(_kern, OID_AUTO, pfz, + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, + 0, 0, sysctl_pfz_start, "I", ""); +#endif + + /* * Limit on total memory users can wire. * @@ -3475,8 +3907,10 @@ SYSCTL_PROC(_kern, OID_AUTO, slide, vm_map_size_t vm_global_user_wire_limit; vm_map_size_t vm_per_task_user_wire_limit; -extern uint64_t max_mem; +extern uint64_t max_mem_actual, max_mem; +uint64_t vm_add_wire_count_over_global_limit; +uint64_t vm_add_wire_count_over_user_limit; /* * We used to have a global in the kernel called vm_global_no_user_wire_limit which was the inverse * of vm_global_user_wire_limit. But maintaining both of those is silly, and vm_global_user_wire_limit is the @@ -3491,14 +3925,18 @@ sysctl_global_no_user_wire_amount(__unused struct sysctl_oid *oidp, __unused voi vm_map_size_t new_value; int changed; int error; + uint64_t config_memsize = max_mem; +#if defined(XNU_TARGET_OS_OSX) + config_memsize = max_mem_actual; +#endif /* defined(XNU_TARGET_OS_OSX) */ - old_value = max_mem - vm_global_user_wire_limit; + old_value = (vm_map_size_t)(config_memsize - vm_global_user_wire_limit); error = sysctl_io_number(req, old_value, sizeof(vm_map_size_t), &new_value, &changed); if (changed) { - if ((uint64_t)new_value > max_mem) { + if ((uint64_t)new_value > config_memsize) { error = EINVAL; } else { - vm_global_user_wire_limit = max_mem - new_value; + vm_global_user_wire_limit = (vm_map_size_t)(config_memsize - new_value); } } return error; @@ -3516,8 +3954,36 @@ SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_per SYSCTL_PROC(_vm, OID_AUTO, global_no_user_wire_amount, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, &sysctl_global_no_user_wire_amount, "Q", ""); #endif +/* + * Relaxed atomic RW of a 64bit value via sysctl. + */ +STATIC int +sysctl_r_64bit_atomic(uint64_t *ptr, struct sysctl_req *req) +{ + uint64_t old_value; + uint64_t new_value; + int error; + + old_value = os_atomic_load_wide(ptr, relaxed); + error = sysctl_io_number(req, old_value, sizeof(vm_map_size_t), &new_value, NULL); + return error; +} +STATIC int +sysctl_add_wire_count_over_global_limit(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + return sysctl_r_64bit_atomic(&vm_add_wire_count_over_global_limit, req); +} +STATIC int +sysctl_add_wire_count_over_user_limit(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + return sysctl_r_64bit_atomic(&vm_add_wire_count_over_user_limit, req); +} + +SYSCTL_PROC(_vm, OID_AUTO, add_wire_count_over_global_limit, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, &sysctl_add_wire_count_over_global_limit, "Q", ""); +SYSCTL_PROC(_vm, OID_AUTO, add_wire_count_over_user_limit, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, &sysctl_add_wire_count_over_user_limit, "Q", ""); + #if DEVELOPMENT || DEBUG -/* These sysyctls are used to test the wired limit. */ +/* These sysctls are used to test the wired limit. */ extern unsigned int vm_page_wire_count; extern uint32_t vm_lopage_free_count; SYSCTL_INT(_vm, OID_AUTO, page_wire_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_wire_count, 0, ""); @@ -3688,6 +4154,36 @@ SYSCTL_INT(_vm, OID_AUTO, compressor_thread_minpages1, CTLFLAG_RD | CTLFLAG_LOCK SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages0, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[0], 0, ""); SYSCTL_INT(_vm, OID_AUTO, compressor_thread_maxpages1, CTLFLAG_RD | CTLFLAG_LOCKED, &vmct_stats.vmct_maxpages[1], 0, ""); +int vm_compressor_injected_error_count; + +SYSCTL_INT(_vm, OID_AUTO, compressor_injected_error_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_injected_error_count, 0, ""); + +static int +sysctl_compressor_inject_error(__unused struct sysctl_oid *oidp, + __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int result; + vm_address_t va = 0; + int changed; + + result = sysctl_io_number(req, va, sizeof(va), &va, &changed); + if (result == 0 && changed) { + result = vm_map_inject_error(current_map(), va); + if (result == 0) { + /* + * Count the number of errors injected successfully to detect + * situations where corruption was caused by improper use of this + * sysctl. + */ + os_atomic_inc(&vm_compressor_injected_error_count, relaxed); + } + } + return result; +} + +SYSCTL_PROC(_vm, OID_AUTO, compressor_inject_error, CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_RW, + 0, 0, sysctl_compressor_inject_error, "Q", "flips a bit in a compressed page for the current task"); + #endif SYSCTL_QUAD(_vm, OID_AUTO, lz4_compressions, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_stats.lz4_compressions, ""); @@ -3843,6 +4339,9 @@ extern uint32_t vm_grab_anon_nops; SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_debug.vm_grab_anon_overrides, 0, ""); SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_debug.vm_grab_anon_nops, 0, ""); +extern int vm_page_delayed_work_ctx_needed; +SYSCTL_INT(_vm, OID_AUTO, vm_page_needed_delayed_work_ctx, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_delayed_work_ctx_needed, 0, ""); + /* log message counters for persistence mode */ extern uint32_t oslog_p_total_msgcount; extern uint32_t oslog_p_metadata_saved_msgcount; @@ -3851,6 +4350,8 @@ extern uint32_t oslog_p_error_count; extern uint32_t oslog_p_saved_msgcount; extern uint32_t oslog_p_dropped_msgcount; extern uint32_t oslog_p_boot_dropped_msgcount; +extern uint32_t oslog_p_coprocessor_total_msgcount; +extern uint32_t oslog_p_coprocessor_dropped_msgcount; /* log message counters for streaming mode */ extern uint32_t oslog_s_total_msgcount; @@ -3859,6 +4360,15 @@ extern uint32_t oslog_s_error_count; extern uint32_t oslog_s_streamed_msgcount; extern uint32_t oslog_s_dropped_msgcount; +/* log message counters for msgbuf logging */ +extern uint32_t oslog_msgbuf_msgcount; +extern uint32_t oslog_msgbuf_dropped_msgcount; +extern uint32_t oslog_msgbuf_dropped_charcount; + +/* log message counters for vaddlog logging */ +extern uint32_t vaddlog_msgcount; +extern uint32_t vaddlog_msgcount_dropped; + SYSCTL_UINT(_debug, OID_AUTO, oslog_p_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_total_msgcount, 0, ""); SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_saved_msgcount, 0, ""); SYSCTL_UINT(_debug, OID_AUTO, oslog_p_metadata_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_metadata_dropped_msgcount, 0, ""); @@ -3866,6 +4376,8 @@ SYSCTL_UINT(_debug, OID_AUTO, oslog_p_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD SYSCTL_UINT(_debug, OID_AUTO, oslog_p_saved_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_saved_msgcount, 0, ""); SYSCTL_UINT(_debug, OID_AUTO, oslog_p_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_dropped_msgcount, 0, ""); SYSCTL_UINT(_debug, OID_AUTO, oslog_p_boot_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_boot_dropped_msgcount, 0, ""); +SYSCTL_UINT(_debug, OID_AUTO, oslog_p_coprocessor_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_coprocessor_total_msgcount, 0, ""); +SYSCTL_UINT(_debug, OID_AUTO, oslog_p_coprocessor_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_p_coprocessor_dropped_msgcount, 0, ""); SYSCTL_UINT(_debug, OID_AUTO, oslog_s_total_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_total_msgcount, 0, ""); SYSCTL_UINT(_debug, OID_AUTO, oslog_s_metadata_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_metadata_msgcount, 0, ""); @@ -3873,6 +4385,12 @@ SYSCTL_UINT(_debug, OID_AUTO, oslog_s_error_count, CTLFLAG_ANYBODY | CTLFLAG_RD SYSCTL_UINT(_debug, OID_AUTO, oslog_s_streamed_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_streamed_msgcount, 0, ""); SYSCTL_UINT(_debug, OID_AUTO, oslog_s_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_s_dropped_msgcount, 0, ""); +SYSCTL_UINT(_debug, OID_AUTO, oslog_msgbuf_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_msgbuf_msgcount, 0, ""); +SYSCTL_UINT(_debug, OID_AUTO, oslog_msgbuf_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_msgbuf_dropped_msgcount, 0, ""); +SYSCTL_UINT(_debug, OID_AUTO, oslog_msgbuf_dropped_charcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &oslog_msgbuf_dropped_charcount, 0, ""); + +SYSCTL_UINT(_debug, OID_AUTO, vaddlog_msgcount, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &vaddlog_msgcount, 0, ""); +SYSCTL_UINT(_debug, OID_AUTO, vaddlog_msgcount_dropped, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, &vaddlog_msgcount_dropped, 0, ""); #endif /* DEVELOPMENT || DEBUG */ @@ -3905,6 +4423,28 @@ SYSCTL_INT(_kern, OID_AUTO, ipc_portbt, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, &ipc_portbt, 0, ""); +/* + * Mach message signature validation control and outputs + */ +extern unsigned int ikm_signature_failures; +SYSCTL_INT(_kern, OID_AUTO, ikm_signature_failures, + CTLFLAG_RD | CTLFLAG_LOCKED, &ikm_signature_failures, 0, "Message signature failure count"); +extern unsigned int ikm_signature_failure_id; +SYSCTL_INT(_kern, OID_AUTO, ikm_signature_failure_id, + CTLFLAG_RD | CTLFLAG_LOCKED, &ikm_signature_failure_id, 0, "Message signature failure count"); + +#if (DEVELOPMENT || DEBUG) +extern unsigned int ikm_signature_panic_disable; +SYSCTL_INT(_kern, OID_AUTO, ikm_signature_panic_disable, + CTLFLAG_RW | CTLFLAG_LOCKED, &ikm_signature_panic_disable, 0, "Message signature failure mode"); +extern unsigned int ikm_signature_header_failures; +SYSCTL_INT(_kern, OID_AUTO, ikm_signature_header_failures, + CTLFLAG_RD | CTLFLAG_LOCKED, &ikm_signature_header_failures, 0, "Message header signature failure count"); +extern unsigned int ikm_signature_trailer_failures; +SYSCTL_INT(_kern, OID_AUTO, ikm_signature_trailer_failures, + CTLFLAG_RD | CTLFLAG_LOCKED, &ikm_signature_trailer_failures, 0, "Message trailer signature failure count"); +#endif + /* * Scheduler sysctls */ @@ -3941,18 +4481,14 @@ SYSCTL_PROC(_kern, OID_AUTO, cpu_checkin_interval, /* - * Only support runtime modification on embedded platforms - * with development config enabled + * Only support runtime modification on development / debug */ -#if CONFIG_EMBEDDED -#if !SECURE_KERNEL +#if DEVELOPMENT || DEBUG extern int precise_user_kernel_time; SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time, CTLFLAG_RW | CTLFLAG_LOCKED, &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time"); -#endif -#endif - +#endif /* DEVELOPMENT || DEBUG */ /* Parameters related to timer coalescing tuning, to be replaced * with a dedicated systemcall in the future. @@ -4138,9 +4674,43 @@ SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level, SYSCTL_INT(_kern, OID_AUTO, hv_support, CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, &hv_support_available, 0, ""); + +SYSCTL_INT(_kern, OID_AUTO, hv_disable, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &hv_disable, 0, ""); +#endif + +#if DEVELOPMENT || DEBUG +extern uint64_t driverkit_checkin_timed_out; +SYSCTL_QUAD(_kern, OID_AUTO, driverkit_checkin_timed_out, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &driverkit_checkin_timed_out, "timestamp of dext checkin timeout"); #endif -#if CONFIG_EMBEDDED +static int +hv_vmm_present SYSCTL_HANDLER_ARGS +{ + __unused struct sysctl_oid *unused_oidp = oidp; + __unused void *unused_arg1 = arg1; + __unused int unused_arg2 = arg2; + + int hv_vmm_present = 0; + +#if defined (__arm64__) + /* Need a way to determine if ARM xnu is running as a guest */ +#elif defined (__x86_64__) + hv_vmm_present = cpuid_vmm_present(); +#endif + + return SYSCTL_OUT(req, &hv_vmm_present, sizeof(hv_vmm_present)); +} + +SYSCTL_PROC(_kern, OID_AUTO, hv_vmm_present, + CTLTYPE_INT | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, 0, + hv_vmm_present, "I", ""); + +#if CONFIG_DARKBOOT STATIC int sysctl_darkboot SYSCTL_HANDLER_ARGS { @@ -4210,7 +4780,7 @@ exit: SYSCTL_PROC(_kern, OID_AUTO, darkboot, CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0, sysctl_darkboot, "I", ""); -#endif +#endif /* CONFIG_DARKBOOT */ #if DEVELOPMENT || DEBUG #include @@ -4225,7 +4795,7 @@ kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unuse error = sysctl_io_number(req, old_value, sizeof(uint64_t), &new_value, &changed); if ((error == 0) && changed) { - volatile uint32_t *wraddr = (uint32_t *) &sysent[0]; + volatile uint32_t *wraddr = __DECONST(uint32_t *, &sysent[0]); *wraddr = 0; printf("sysent[0] write succeeded\n"); } @@ -4398,7 +4968,35 @@ SYSCTL_INT(_kern, OID_AUTO, exc_resource_threads_enabled, CTLFLAG_RD | CTLFLAG_L #endif /* DEVELOPMENT || DEBUG */ +#if CONFIG_THREAD_GROUPS +#if DEVELOPMENT || DEBUG + +static int +sysctl_get_thread_group_id SYSCTL_HANDLER_ARGS +{ +#pragma unused(arg1, arg2, oidp) + uint64_t thread_group_id = thread_group_get_id(thread_group_get(current_thread())); + return SYSCTL_OUT(req, &thread_group_id, sizeof(thread_group_id)); +} + +SYSCTL_PROC(_kern, OID_AUTO, thread_group_id, CTLFLAG_RD | CTLFLAG_LOCKED | CTLTYPE_QUAD, + 0, 0, &sysctl_get_thread_group_id, "I", "thread group id of the thread"); + +STATIC int +sysctl_thread_group_count(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int value = thread_group_count(); + return sysctl_io_number(req, value, sizeof(value), NULL, NULL); +} + +SYSCTL_PROC(_kern, OID_AUTO, thread_group_count, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN, + 0, 0, &sysctl_thread_group_count, "I", "count of thread groups"); + +#endif /* DEVELOPMENT || DEBUG */ +const uint32_t thread_groups_supported = 1; +#else /* CONFIG_THREAD_GROUPS */ const uint32_t thread_groups_supported = 0; +#endif /* CONFIG_THREAD_GROUPS */ STATIC int sysctl_thread_groups_supported(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) @@ -4424,7 +5022,7 @@ sysctl_grade_cputype SYSCTL_HANDLER_ARGS return error; } - return_value = grade_binary(type_tuple[0], type_tuple[1], FALSE); + return_value = grade_binary(type_tuple[0], type_tuple[1] & ~CPU_SUBTYPE_MASK, type_tuple[1] & CPU_SUBTYPE_MASK, FALSE); error = SYSCTL_OUT(req, &return_value, sizeof(return_value)); @@ -4440,9 +5038,20 @@ SYSCTL_PROC(_kern, OID_AUTO, grade_cputype, 0, 0, &sysctl_grade_cputype, "S", "grade value of cpu_type_t+cpu_sub_type_t"); +extern boolean_t allow_direct_handoff; +SYSCTL_INT(_kern, OID_AUTO, direct_handoff, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &allow_direct_handoff, 0, "Enable direct handoff for realtime threads"); #if DEVELOPMENT || DEBUG +SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_pa, CTLFLAG_RD | CTLFLAG_LOCKED, + &phys_carveout_pa, + "base physical address of the phys_carveout_mb boot-arg region"); +SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_size, CTLFLAG_RD | CTLFLAG_LOCKED, + &phys_carveout_size, + "size in bytes of the phys_carveout_mb boot-arg region"); + extern void do_cseg_wedge_thread(void); extern void do_cseg_unwedge_thread(void); @@ -4496,13 +5105,6 @@ unwedge_thread SYSCTL_HANDLER_ARGS SYSCTL_PROC(_kern, OID_AUTO, unwedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, unwedge_thread, "I", "unwedge the thread wedged by kern.wedge_thread"); -SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_pa, CTLFLAG_RD | CTLFLAG_LOCKED, - &phys_carveout_pa, - "base physical address of the phys_carveout_mb boot-arg region"); -SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_size, CTLFLAG_RD | CTLFLAG_LOCKED, - &phys_carveout_size, - "size in bytes of the phys_carveout_mb boot-arg region"); - static int wedge_thread SYSCTL_HANDLER_ARGS { @@ -4525,23 +5127,24 @@ wedge_thread SYSCTL_HANDLER_ARGS return 0; } -SYSCTL_PROC(_kern, OID_AUTO, wedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, wedge_thread, "I", "wedge this thread so it cannot be cleaned up"); - -extern unsigned long -total_corpses_count(void); - -static int -sysctl_total_corpses_count SYSCTL_HANDLER_ARGS; +SYSCTL_PROC(_kern, OID_AUTO, wedge_thread, + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, wedge_thread, "I", + "wedge this thread so it cannot be cleaned up"); static int sysctl_total_corpses_count SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - int corpse_count = total_corpses_count(); - return sysctl_io_opaque(req, &corpse_count, sizeof(int), NULL); + extern unsigned long total_corpses_count(void); + + unsigned long corpse_count_long = total_corpses_count(); + unsigned int corpse_count = (unsigned int)MIN(corpse_count_long, UINT_MAX); + return sysctl_io_opaque(req, &corpse_count, sizeof(corpse_count), NULL); } -SYSCTL_PROC(_kern, OID_AUTO, total_corpses_count, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, sysctl_total_corpses_count, "I", "total corpses on the system"); +SYSCTL_PROC(_kern, OID_AUTO, total_corpses_count, + CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, + sysctl_total_corpses_count, "I", "total corpses on the system"); static int sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS; @@ -4660,7 +5263,7 @@ sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS int size, buffer_size, error; buffer_size = 1000; - buffer = kalloc(buffer_size); + buffer = kheap_alloc(KHEAP_TEMP, buffer_size, Z_WAITOK); if (!buffer) { panic("Impossible to allocate memory for %s\n", __func__); } @@ -4671,7 +5274,7 @@ sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS error = sysctl_io_string(req, buffer, size, 0, NULL); - kfree(buffer, buffer_size); + kheap_free(KHEAP_TEMP, buffer, buffer_size); return error; } @@ -4718,7 +5321,7 @@ sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS buffer_size = 2000; offset = 0; - buffer = kalloc(buffer_size); + buffer = kheap_alloc(KHEAP_TEMP, buffer_size, Z_WAITOK); if (!buffer) { panic("Impossible to allocate memory for %s\n", __func__); } @@ -4734,7 +5337,7 @@ sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS error = SYSCTL_OUT(req, buffer, offset); - kfree(buffer, buffer_size); + kheap_free(KHEAP_TEMP, buffer, buffer_size); return error; } @@ -4782,11 +5385,10 @@ sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS buffer_size = 2000; offset = 0; - buffer = kalloc(buffer_size); + buffer = kheap_alloc(KHEAP_TEMP, buffer_size, Z_WAITOK | Z_ZERO); if (!buffer) { panic("Impossible to allocate memory for %s\n", __func__); } - memset(buffer, 0, buffer_size); printf("%s starting contended mutex test with %d iterations FULL_CONTENDED\n", __func__, iter); @@ -4811,21 +5413,25 @@ sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS error = SYSCTL_OUT(req, buffer, offset); printf("\n%s\n", buffer); - kfree(buffer, buffer_size); + kheap_free(KHEAP_TEMP, buffer, buffer_size); return error; } -SYSCTL_PROC(_kern, OID_AUTO, erase_all_test_mtx_stats, CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, +SYSCTL_PROC(_kern, OID_AUTO, erase_all_test_mtx_stats, + CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_erase_all_test_mtx_stats, "I", "erase test_mtx statistics"); -SYSCTL_PROC(_kern, OID_AUTO, get_test_mtx_stats, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED, +SYSCTL_PROC(_kern, OID_AUTO, get_test_mtx_stats, + CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_get_test_mtx_stats, "A", "get test_mtx statistics"); -SYSCTL_PROC(_kern, OID_AUTO, test_mtx_contended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, +SYSCTL_PROC(_kern, OID_AUTO, test_mtx_contended, + CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_test_mtx_contended, "A", "get statistics for contended mtx test"); -SYSCTL_PROC(_kern, OID_AUTO, test_mtx_uncontended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, +SYSCTL_PROC(_kern, OID_AUTO, test_mtx_uncontended, + CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_test_mtx_uncontended, "A", "get statistics for uncontended mtx test"); extern uint64_t MutexSpin; @@ -4867,6 +5473,7 @@ sysctl_high_mutex_spin_ns SYSCTL_HANDLER_ARGS SYSCTL_PROC(_kern, OID_AUTO, high_mutex_spin_abs, CTLFLAG_RW | CTLTYPE_QUAD, 0, 0, sysctl_high_mutex_spin_ns, "I", "High spin threshold in abs for acquiring a kernel mutex"); + #if defined (__x86_64__) semaphore_t sysctl_test_panic_with_thread_sem; @@ -4940,7 +5547,8 @@ sysctl_test_panic_with_thread SYSCTL_HANDLER_ARGS return EINVAL; } -SYSCTL_PROC(_kern, OID_AUTO, test_panic_with_thread, CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_WR | CTLTYPE_STRING, +SYSCTL_PROC(_kern, OID_AUTO, test_panic_with_thread, + CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_WR | CTLTYPE_STRING, 0, 0, sysctl_test_panic_with_thread, "A", "test panic flow for backtracing a different thread"); #endif /* defined (__x86_64__) */ @@ -4963,15 +5571,14 @@ sysctl_get_owned_vmobjects SYSCTL_HANDLER_ARGS size_t buffer_size = (req->oldptr != USER_ADDR_NULL) ? req->oldlen : 0; vmobject_list_output_t buffer; size_t output_size; + size_t entries; if (buffer_size) { - const size_t min_size = sizeof(vm_object_query_data_t) + sizeof(int64_t); - - if (buffer_size < min_size || buffer_size > INT_MAX) { - return EINVAL; + if (buffer_size < sizeof(*buffer) + sizeof(vm_object_query_data_t)) { + return ENOMEM; } - buffer = kalloc(buffer_size); + buffer = kheap_alloc(KHEAP_TEMP, buffer_size, Z_WAITOK); if (!buffer) { error = ENOMEM; @@ -4996,12 +5603,12 @@ sysctl_get_owned_vmobjects SYSCTL_HANDLER_ARGS /* copy the vmobjects and vmobject data out of the task */ if (buffer_size == 0) { - int64_t __size; - task_copy_vmobjects(task, NULL, 0, &__size); - output_size = (__size > 0) ? __size * sizeof(vm_object_query_data_t) + sizeof(int64_t) : 0; + task_copy_vmobjects(task, NULL, 0, &entries); + output_size = (entries > 0) ? entries * sizeof(vm_object_query_data_t) + sizeof(*buffer) : 0; } else { - task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(int64_t), &buffer->entries); - output_size = buffer->entries * sizeof(vm_object_query_data_t) + sizeof(int64_t); + task_copy_vmobjects(task, &buffer->data[0], buffer_size - sizeof(*buffer), &entries); + buffer->entries = (uint64_t)entries; + output_size = entries * sizeof(vm_object_query_data_t) + sizeof(*buffer); } task_deallocate(task); @@ -5010,11 +5617,12 @@ sysctl_get_owned_vmobjects SYSCTL_HANDLER_ARGS sysctl_get_vmobject_list_exit: if (buffer) { - kfree(buffer, buffer_size); + kheap_free(KHEAP_TEMP, buffer, buffer_size); } return error; } -SYSCTL_PROC(_vm, OID_AUTO, get_owned_vmobjects, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, +SYSCTL_PROC(_vm, OID_AUTO, get_owned_vmobjects, + CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0, sysctl_get_owned_vmobjects, "A", "get owned vmobjects in task"); diff --git a/bsd/kern/kern_time.c b/bsd/kern/kern_time.c index d1d3c498e..eac338d17 100644 --- a/bsd/kern/kern_time.c +++ b/bsd/kern/kern_time.c @@ -186,7 +186,7 @@ settimeofday(__unused struct proc *p, struct settimeofday_args *uap, __unused i return error; } #endif -#ifndef CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) if ((error = suser(kauth_cred_get(), &p->p_acflag))) { return error; } @@ -198,7 +198,7 @@ settimeofday(__unused struct proc *p, struct settimeofday_args *uap, __unused i if (IS_64BIT_PROCESS(p)) { struct user64_timeval user_atv; error = copyin(uap->tv, &user_atv, sizeof(user_atv)); - atv.tv_sec = user_atv.tv_sec; + atv.tv_sec = (__darwin_time_t)user_atv.tv_sec; atv.tv_usec = user_atv.tv_usec; } else { struct user32_timeval user_atv; @@ -369,9 +369,9 @@ getitimer(struct proc *p, struct getitimer_args *uap, __unused int32_t *retval) } else { struct user32_itimerval user_itv; bzero(&user_itv, sizeof(user_itv)); - user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec; + user_itv.it_interval.tv_sec = (user32_time_t)aitv.it_interval.tv_sec; user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec; - user_itv.it_value.tv_sec = aitv.it_value.tv_sec; + user_itv.it_value.tv_sec = (user32_time_t)aitv.it_value.tv_sec; user_itv.it_value.tv_usec = aitv.it_value.tv_usec; return copyout((caddr_t)&user_itv, uap->itv, sizeof(user_itv)); } @@ -403,9 +403,9 @@ setitimer(struct proc *p, struct setitimer_args *uap, int32_t *retval) if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof(user_itv)))) { return error; } - aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec; + aitv.it_interval.tv_sec = (__darwin_time_t)user_itv.it_interval.tv_sec; aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec; - aitv.it_value.tv_sec = user_itv.it_value.tv_sec; + aitv.it_value.tv_sec = (__darwin_time_t)user_itv.it_value.tv_sec; aitv.it_value.tv_usec = user_itv.it_value.tv_usec; } else { struct user32_itimerval user_itv; @@ -824,7 +824,7 @@ tvtoabstime( uint64_t result, usresult; clock_interval_to_absolutetime_interval( - tvp->tv_sec, NSEC_PER_SEC, &result); + (uint32_t)tvp->tv_sec, NSEC_PER_SEC, &result); clock_interval_to_absolutetime_interval( tvp->tv_usec, NSEC_PER_USEC, &usresult); @@ -835,8 +835,8 @@ uint64_t tstoabstime(struct timespec *ts) { uint64_t abstime_s, abstime_ns; - clock_interval_to_absolutetime_interval(ts->tv_sec, NSEC_PER_SEC, &abstime_s); - clock_interval_to_absolutetime_interval(ts->tv_nsec, 1, &abstime_ns); + clock_interval_to_absolutetime_interval((uint32_t)ts->tv_sec, NSEC_PER_SEC, &abstime_s); + clock_interval_to_absolutetime_interval((uint32_t)ts->tv_nsec, 1, &abstime_ns); return abstime_s + abstime_ns; } diff --git a/bsd/kern/kern_xxx.c b/bsd/kern/kern_xxx.c index a07457cd6..2e111de75 100644 --- a/bsd/kern/kern_xxx.c +++ b/bsd/kern/kern_xxx.c @@ -84,9 +84,14 @@ #if CONFIG_MACF #include #endif +#if CONFIG_ATM +#include +#endif -int pshm_cache_purge_all(proc_t p); -int psem_cache_purge_all(proc_t p); +extern int psem_cache_purge_all(proc_t p); +extern int pshm_cache_purge_all(proc_t p); +extern void reset_osvariant_status(void); +extern void reset_osreleasetype(void); int reboot(struct proc *p, struct reboot_args *uap, __unused int32_t *retval) @@ -148,6 +153,9 @@ skip_cred_check: return error; } +extern void OSKextResetAfterUserspaceReboot(void); +extern void zone_gc(boolean_t); + int usrctl(struct proc *p, __unused struct usrctl_args *uap, __unused int32_t *retval) { @@ -155,12 +163,28 @@ usrctl(struct proc *p, __unused struct usrctl_args *uap, __unused int32_t *retva return EPERM; } - int error = 0; - error = pshm_cache_purge_all(p); - if (error) { - return error; - } + reset_osvariant_status(); + reset_osreleasetype(); - error = psem_cache_purge_all(p); - return error; +#if CONFIG_ATM + atm_reset(); +#endif + +#if CONFIG_EXT_RESOLVER + /* + * We're doing a user space reboot. We are guaranteed that the + * external identity resolver is gone, so ensure that everything + * comes back up as with fresh-boot just in case it didn't go + * down cleanly. + */ + kauth_resolver_identity_reset(); +#endif /* CONFIG_EXT_RESOLVER */ + + OSKextResetAfterUserspaceReboot(); + int shm_error = pshm_cache_purge_all(p); + int sem_error = psem_cache_purge_all(p); + + zone_gc(FALSE); + + return shm_error != 0 ? shm_error : sem_error; } diff --git a/bsd/kern/kpi_mbuf.c b/bsd/kern/kpi_mbuf.c index 27d01a4a7..8cd16e220 100644 --- a/bsd/kern/kpi_mbuf.c +++ b/bsd/kern/kpi_mbuf.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -814,14 +813,12 @@ mbuf_outbound_finalize(struct mbuf *m, u_int32_t pf, size_t o) break; case PF_INET6: -#if INET6 /* * Checksum offload should not have been enabled when * extension headers exist; indicate that the callee * should skip such case by setting optlen to -1. */ (void) in6_finalize_cksum(m, o, -1, -1, m->m_pkthdr.csum_flags); -#endif /* INET6 */ break; default: @@ -983,7 +980,6 @@ mbuf_inet_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length, return 0; } -#if INET6 errno_t mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length, u_int16_t *csum) @@ -996,45 +992,6 @@ mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length, *csum = inet6_cksum(mbuf, protocol, offset, length); return 0; } -#else /* INET6 */ -errno_t -mbuf_inet6_cksum(__unused mbuf_t mbuf, __unused int protocol, - __unused u_int32_t offset, __unused u_int32_t length, - __unused u_int16_t *csum) -{ - panic("mbuf_inet6_cksum() doesn't exist on this platform\n"); - return 0; -} - -u_int16_t -inet6_cksum(__unused struct mbuf *m, __unused unsigned int nxt, - __unused unsigned int off, __unused unsigned int len) -{ - panic("inet6_cksum() doesn't exist on this platform\n"); - return 0; -} - -void nd6_lookup_ipv6(void); -void -nd6_lookup_ipv6(void) -{ - panic("nd6_lookup_ipv6() doesn't exist on this platform\n"); -} - -int -in6addr_local(__unused struct in6_addr *a) -{ - panic("in6addr_local() doesn't exist on this platform\n"); - return 0; -} - -void nd6_storelladdr(void); -void -nd6_storelladdr(void) -{ - panic("nd6_storelladdr() doesn't exist on this platform\n"); -} -#endif /* INET6 */ /* * Mbuf tag KPIs diff --git a/bsd/kern/kpi_socket.c b/bsd/kern/kpi_socket.c index 5feba8769..30d0b513a 100644 --- a/bsd/kern/kpi_socket.c +++ b/bsd/kern/kpi_socket.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003-2017 Apple Inc. All rights reserved. + * Copyright (c) 2003-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -176,11 +176,11 @@ check_again: /* see comments in sock_setupcall() */ if (callback != NULL) { -#if CONFIG_EMBEDDED +#if (defined(__arm__) || defined(__arm64__)) sock_setupcalls_locked(new_so, callback, cookie, callback, cookie, 0); -#else +#else /* (defined(__arm__) || defined(__arm64__)) */ sock_setupcalls_locked(new_so, callback, cookie, NULL, NULL, 0); -#endif /* !CONFIG_EMBEDDED */ +#endif /* (defined(__arm__) || defined(__arm64__)) */ } if (sa != NULL && from != NULL) { @@ -525,7 +525,7 @@ sock_getsockopt(socket_t sock, int level, int optname, void *optval, sopt.sopt_p = kernproc; error = sogetoptlock(sock, &sopt, 1); /* will lock socket */ if (error == 0) { - *optlen = sopt.sopt_valsize; + *optlen = (uint32_t)sopt.sopt_valsize; } return error; } @@ -559,11 +559,10 @@ sock_setsockopt(socket_t sock, int level, int optname, const void *optval, * This follows the recommended mappings between DSCP code points * and WMM access classes. */ -static u_int32_t so_tc_from_dscp(u_int8_t dscp); -static u_int32_t -so_tc_from_dscp(u_int8_t dscp) +static uint32_t +so_tc_from_dscp(uint8_t dscp) { - u_int32_t tc; + uint32_t tc; if (dscp >= 0x30 && dscp <= 0x3f) { tc = SO_TC_VO; @@ -610,7 +609,7 @@ sock_settclassopt(socket_t sock, const void *optval, size_t optlen) * Set the socket traffic class based on the passed DSCP code point * regardless of the scope of the destination */ - sotc = so_tc_from_dscp((*(const int *)optval) >> 2); + sotc = so_tc_from_dscp((uint8_t)((*(const int *)optval) >> 2)); sopt.sopt_dir = SOPT_SET; sopt.sopt_val = CAST_USER_ADDR_T(&sotc); @@ -722,7 +721,7 @@ sock_receive_internal(socket_t sock, struct msghdr *msg, mbuf_t *data, uio_t auio; struct mbuf *control = NULL; int error = 0; - int length = 0; + user_ssize_t length = 0; struct sockaddr *fromsa = NULL; char uio_buf[UIO_SIZEOF((msg != NULL) ? msg->msg_iovlen : 0)]; @@ -799,7 +798,7 @@ sock_receive_internal(socket_t sock, struct msghdr *msg, mbuf_t *data, m = m->m_next; } msg->msg_controllen = - (uintptr_t)ctlbuf - (uintptr_t)msg->msg_control; + (socklen_t)((uintptr_t)ctlbuf - (uintptr_t)msg->msg_control); } } @@ -844,7 +843,7 @@ sock_send_internal(socket_t sock, const struct msghdr *msg, mbuf_t data, uio_t auio = NULL; struct mbuf *control = NULL; int error = 0; - int datalen = 0; + user_ssize_t datalen = 0; char uio_buf[UIO_SIZEOF((msg != NULL ? msg->msg_iovlen : 1))]; if (sock == NULL) { @@ -1312,11 +1311,11 @@ sock_setupcall(socket_t sock, sock_upcall callback, void *context) * the read and write callbacks and their respective parameters. */ socket_lock(sock, 1); -#if CONFIG_EMBEDDED +#if (defined(__arm__) || defined(__arm64__)) sock_setupcalls_locked(sock, callback, context, callback, context, 0); -#else +#else /* (defined(__arm__) || defined(__arm64__)) */ sock_setupcalls_locked(sock, callback, context, NULL, NULL, 0); -#endif /* !CONFIG_EMBEDDED */ +#endif /* (defined(__arm__) || defined(__arm64__)) */ socket_unlock(sock, 1); return 0; @@ -1342,7 +1341,7 @@ sock_setupcalls(socket_t sock, sock_upcall rcallback, void *rcontext, void sock_catchevents_locked(socket_t sock, sock_evupcall ecallback, void *econtext, - u_int32_t emask) + long emask) { socket_lock_assert_owned(sock); @@ -1352,7 +1351,7 @@ sock_catchevents_locked(socket_t sock, sock_evupcall ecallback, void *econtext, if (ecallback != NULL) { sock->so_event = ecallback; sock->so_eventarg = econtext; - sock->so_eventmask = emask; + sock->so_eventmask = (uint32_t)emask; } else { sock->so_event = sonullevent; sock->so_eventarg = NULL; @@ -1362,7 +1361,7 @@ sock_catchevents_locked(socket_t sock, sock_evupcall ecallback, void *econtext, errno_t sock_catchevents(socket_t sock, sock_evupcall ecallback, void *econtext, - u_int32_t emask) + long emask) { if (sock == NULL) { return EINVAL; diff --git a/bsd/kern/mach_fat.c b/bsd/kern/mach_fat.c index ce9ab133d..a2aaaef24 100644 --- a/bsd/kern/mach_fat.c +++ b/bsd/kern/mach_fat.c @@ -63,6 +63,7 @@ fatfile_getarch( vm_size_t data_size, cpu_type_t req_cpu_type, cpu_type_t mask_bits, + cpu_subtype_t req_subcpu_type, struct image_params *imgp, struct fat_arch *archret) { @@ -71,9 +72,10 @@ fatfile_getarch( struct fat_arch *best_arch; int grade; int best_grade; - uint32_t nfat_arch, max_nfat_arch; + size_t nfat_arch, max_nfat_arch; cpu_type_t testtype; - cpu_type_t testsubtype; + cpu_subtype_t testsubtype; + cpu_subtype_t testfeatures; struct fat_header *header; if (sizeof(struct fat_header) > data_size) { @@ -97,18 +99,19 @@ fatfile_getarch( for (; nfat_arch-- > 0; arch++) { testtype = OSSwapBigToHostInt32(arch->cputype); testsubtype = OSSwapBigToHostInt32(arch->cpusubtype) & ~CPU_SUBTYPE_MASK; + testfeatures = OSSwapBigToHostInt32(arch->cpusubtype) & CPU_SUBTYPE_MASK; /* - * Check to see if right cpu type. + * Check to see if right cpu/subcpu type. */ - if ((testtype & ~mask_bits) != (req_cpu_type & ~mask_bits)) { + if (!binary_match(mask_bits, req_cpu_type, req_subcpu_type, testtype, testsubtype)) { continue; } /* - * Get the grade of the cpu subtype (without feature flags) + * Get the grade of the cpu subtype */ - grade = grade_binary(testtype, testsubtype, TRUE); + grade = grade_binary(testtype, testsubtype, testfeatures, TRUE); /* * Remember it if it's the best we've seen. @@ -162,18 +165,24 @@ fatfile_getbestarch( vm_offset_t data_ptr, vm_size_t data_size, struct image_params *imgp, - struct fat_arch *archret) + struct fat_arch *archret, + __unused bool affinity) { + int primary_type = cpu_type(); + + /* * Ignore all architectural bits when determining if an image * in a fat file should be skipped or graded. */ - return fatfile_getarch(data_ptr, data_size, cpu_type(), CPU_ARCH_MASK, imgp, archret); + load_return_t ret = fatfile_getarch(data_ptr, data_size, primary_type, CPU_ARCH_MASK, CPU_SUBTYPE_ANY, imgp, archret); + return ret; } load_return_t fatfile_getbestarch_for_cputype( cpu_type_t cputype, + cpu_subtype_t cpusubtype, vm_offset_t data_ptr, vm_size_t data_size, struct image_params *imgp, @@ -182,7 +191,7 @@ fatfile_getbestarch_for_cputype( /* * Scan the fat_arch array for exact matches for this cpu_type_t only */ - return fatfile_getarch(data_ptr, data_size, cputype, 0, imgp, archret); + return fatfile_getarch(data_ptr, data_size, cputype, 0, cpusubtype, imgp, archret); } /********************************************************************** @@ -211,7 +220,7 @@ fatfile_getarch_with_bits( * Scan the fat_arch array for matches with the requested * architectural bits set, and for the current hardware cpu CPU. */ - return fatfile_getarch(data_ptr, data_size, (archbits & CPU_ARCH_MASK) | (cpu_type() & ~CPU_ARCH_MASK), 0, NULL, archret); + return fatfile_getarch(data_ptr, data_size, (archbits & CPU_ARCH_MASK) | (cpu_type() & ~CPU_ARCH_MASK), 0, CPU_SUBTYPE_ANY, NULL, archret); } /* @@ -226,9 +235,9 @@ fatfile_getarch_with_bits( load_return_t fatfile_validate_fatarches(vm_offset_t data_ptr, vm_size_t data_size) { - uint32_t magic, nfat_arch; - uint32_t max_nfat_arch, i, j; - uint32_t fat_header_size; + uint32_t magic; + size_t nfat_arch, max_nfat_arch, i, j; + size_t fat_header_size; struct fat_arch *arches; struct fat_header *header; diff --git a/bsd/kern/mach_fat.h b/bsd/kern/mach_fat.h index 885fb32ee..9a0cda939 100644 --- a/bsd/kern/mach_fat.h +++ b/bsd/kern/mach_fat.h @@ -36,8 +36,8 @@ load_return_t fatfile_validate_fatarches(vm_offset_t data_ptr, vm_size_t data_size); -load_return_t fatfile_getbestarch(vm_offset_t data_ptr, vm_size_t data_size, struct image_params *imgp, struct fat_arch *archret); -load_return_t fatfile_getbestarch_for_cputype(cpu_type_t cputype, +load_return_t fatfile_getbestarch(vm_offset_t data_ptr, vm_size_t data_size, struct image_params *imgp, struct fat_arch *archret, bool affinity); +load_return_t fatfile_getbestarch_for_cputype(cpu_type_t cputype, cpu_subtype_t cpusubtype, vm_offset_t data_ptr, vm_size_t data_size, struct image_params *imgp, struct fat_arch *archret); load_return_t fatfile_getarch_with_bits(integer_t archbits, vm_offset_t data_ptr, vm_size_t data_size, struct fat_arch *archret); diff --git a/bsd/kern/mach_loader.c b/bsd/kern/mach_loader.c index 0d91fdd48..d01293ca7 100644 --- a/bsd/kern/mach_loader.c +++ b/bsd/kern/mach_loader.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2010 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -47,12 +47,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #include /* vm_allocate() */ @@ -84,7 +86,9 @@ #include #include #include +#include #include /* for kIOReturnNotPrivileged */ +#include /* for IOVnodeHasEntitlement */ #include @@ -94,6 +98,9 @@ */ extern pmap_t pmap_create_options(ledger_t ledger, vm_map_size_t size, unsigned int flags); +#if __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX +extern void pmap_disable_user_jop(pmap_t pmap); +#endif /* __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX */ /* XXX should have prototypes in a shared header file */ extern int get_map_nentries(vm_map_t); @@ -127,7 +134,10 @@ static const load_result_t load_result_null = { .max_vm_addr = MACH_VM_MIN_ADDRESS, .cs_end_offset = 0, .threadstate = NULL, - .threadstate_sz = 0 + .threadstate_sz = 0, + .is_cambria = 0, + .dynlinker_mach_header = MACH_VM_MIN_ADDRESS, + .dynlinker_fd = -1, }; /* @@ -159,7 +169,8 @@ load_segment( struct vnode *vp, vm_map_t map, int64_t slide, - load_result_t *result + load_result_t *result, + struct image_params *imgp ); static load_return_t @@ -173,6 +184,7 @@ static load_return_t load_version( struct version_min_command *vmc, boolean_t *found_version_cmd, + int ip_flags, load_result_t *result ); @@ -183,6 +195,7 @@ load_code_signature( off_t macho_offset, off_t macho_size, cpu_type_t cputype, + cpu_subtype_t cpusubtype, load_result_t *result, struct image_params *imgp); @@ -221,6 +234,7 @@ load_unixthread( struct thread_command *tcp, thread_t thread, int64_t slide, + boolean_t is_x86_64_compat_binary, load_result_t *result ); @@ -239,6 +253,7 @@ load_threadstack( uint32_t total_size, mach_vm_offset_t *user_stack, int *customstack, + boolean_t is_x86_64_compat_binary, load_result_t *result ); @@ -262,6 +277,7 @@ load_dylinker( struct image_params *imgp ); + #if __x86_64__ extern int bootarg_no32exec; static boolean_t @@ -281,7 +297,8 @@ get_macho_vnode( off_t *file_offset, off_t *macho_size, struct macho_data *macho_data, - struct vnode **vpp + struct vnode **vpp, + struct image_params *imgp ); static inline void @@ -313,7 +330,8 @@ note_all_image_info_section(const struct segment_command_64 *scp, unsigned int i; - if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) { + if (strncmp(scp->segname, "__DATA_DIRTY", sizeof(scp->segname)) != 0 && + strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) { return; } for (i = 0; i < scp->nsects; ++i) { @@ -341,6 +359,68 @@ const int fourk_binary_compatibility_unsafe = TRUE; const int fourk_binary_compatibility_allow_wx = FALSE; #endif /* __arm64__ */ +#if __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX +/** + * Determines whether this is an arm64e process which may host in-process + * plugins. + */ +static inline bool +arm64e_plugin_host(struct image_params *imgp, load_result_t *result) +{ + if (imgp->ip_flags & IMGPF_NOJOP) { + return false; + } + + if (!result->platform_binary) { + return false; + } + + struct cs_blob *csblob = csvnode_get_blob(imgp->ip_vp, imgp->ip_arch_offset); + const char *identity = csblob_get_identity(csblob); + if (!identity) { + return false; + } + + /* Check if override host plugin entitlement is present and posix spawn attribute to disable A keys is passed */ + if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, OVERRIDE_PLUGIN_HOST_ENTITLEMENT)) { + return imgp->ip_flags & IMGPF_PLUGIN_HOST_DISABLE_A_KEYS; + } + + /* Disabling library validation is a good signal that this process plans to host plugins */ + const char *const disable_lv_entitlements[] = { + "com.apple.security.cs.disable-library-validation", + "com.apple.private.cs.automator-plugins", + CLEAR_LV_ENTITLEMENT, + }; + for (size_t i = 0; i < ARRAY_COUNT(disable_lv_entitlements); i++) { + if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, disable_lv_entitlements[i])) { + return true; + } + } + + /* From /System/Library/Security/HardeningExceptions.plist */ + const char *const hardening_exceptions[] = { + "com.apple.perl5", /* Scripting engines may load third party code and jit*/ + "com.apple.perl", /* Scripting engines may load third party code and jit*/ + "org.python.python", /* Scripting engines may load third party code and jit*/ + "com.apple.expect", /* Scripting engines may load third party code and jit*/ + "com.tcltk.wish", /* Scripting engines may load third party code and jit*/ + "com.tcltk.tclsh", /* Scripting engines may load third party code and jit*/ + "com.apple.ruby", /* Scripting engines may load third party code and jit*/ + "com.apple.bash", /* Required for the 'enable' command */ + "com.apple.zsh", /* Required for the 'zmodload' command */ + "com.apple.ksh", /* Required for 'builtin' command */ + }; + for (size_t i = 0; i < ARRAY_COUNT(hardening_exceptions); i++) { + if (strncmp(hardening_exceptions[i], identity, strlen(hardening_exceptions[i])) == 0) { + return true; + } + } + + return false; +} +#endif /* __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX */ + load_return_t load_machfile( struct image_params *imgp, @@ -387,6 +467,16 @@ load_machfile( } else { ledger_task = task; } + +#if defined(PMAP_CREATE_FORCE_4K_PAGES) && (DEBUG || DEVELOPMENT) + if (imgp->ip_px_sa != NULL) { + struct _posix_spawnattr* psa = (struct _posix_spawnattr *) imgp->ip_px_sa; + if (psa->psa_flags & _POSIX_SPAWN_FORCE_4K_PAGES) { + pmap_flags |= PMAP_CREATE_FORCE_4K_PAGES; + } + } +#endif /* defined(PMAP_CREATE_FORCE_4K_PAGES) && (DEBUG || DEVELOPMENT) */ + pmap = pmap_create_options(get_task_ledger(ledger_task), (vm_map_size_t) 0, pmap_flags); @@ -410,6 +500,13 @@ load_machfile( vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT); #endif /* __arm64__ */ +#if PMAP_CREATE_FORCE_4K_PAGES + if (pmap_flags & PMAP_CREATE_FORCE_4K_PAGES) { + DEBUG4K_LIFE("***** launching '%s' as 4k *****\n", vp->v_name); + vm_map_set_page_shift(map, FOURK_PAGE_SHIFT); + } +#endif /* PMAP_CREATE_FORCE_4K_PAGES */ + #ifndef CONFIG_ENFORCE_SIGNED_CODE /* This turns off faulting for executable pages, which allows * to circumvent Code Signing Enforcement. The per process @@ -445,6 +542,9 @@ load_machfile( aslr_page_offset += aslr_section_offset; } + if (vm_map_page_shift(map) < (int)PAGE_SHIFT) { + DEBUG4K_LOAD("slide=0x%llx dyld_slide=0x%llx\n", aslr_page_offset, dyld_aslr_page_offset); + } if (!result) { result = &myresult; @@ -500,12 +600,13 @@ load_machfile( if (enforce_hard_pagezero && (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) { #if __arm64__ - if (!result->is_64bit_addr && /* not 64-bit address space */ - !(header->flags & MH_PIE) && /* not PIE */ - (vm_map_page_shift(map) != FOURK_PAGE_SHIFT || - PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */ - result->has_pagezero && /* has a "soft" page zero */ - fourk_binary_compatibility_unsafe) { + if ( + !result->is_64bit_addr && /* not 64-bit address space */ + !(header->flags & MH_PIE) && /* not PIE */ + (vm_map_page_shift(map) != FOURK_PAGE_SHIFT || + PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */ + result->has_pagezero && /* has a "soft" page zero */ + fourk_binary_compatibility_unsafe) { /* * For backwards compatibility of "4K" apps on * a 16K system, do not enforce a hard page zero... @@ -518,6 +619,16 @@ load_machfile( } } +#if __arm64__ + if (enforce_hard_pagezero && result->is_64bit_addr && (header->cputype == CPU_TYPE_ARM64)) { + /* 64 bit ARM binary must have "hard page zero" of 4GB to cover the lower 32 bit address space */ + if (vm_map_has_hard_pagezero(map, 0x100000000) == FALSE) { + vm_map_deallocate(map); /* will lose pmap reference too */ + return LOAD_BADMACHO; + } + } +#endif + vm_commit_pagezero_status(map); /* @@ -565,6 +676,17 @@ load_machfile( } *mapp = map; +#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) + /* + * arm64e plugin hosts currently run with JOP keys disabled, since they + * may need to run arm64 plugins. + */ + if (arm64e_plugin_host(imgp, result)) { + imgp->ip_flags |= IMGPF_NOJOP; + pmap_disable_user_jop(pmap); + } +#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */ + #ifdef CONFIG_32BIT_TELEMETRY if (!result->is_64bit_data) { /* @@ -588,6 +710,27 @@ int macho_printf = 0; } \ } while (0) + +static boolean_t +pie_required( + cpu_type_t exectype, + cpu_subtype_t execsubtype) +{ + switch (exectype) { + case CPU_TYPE_X86_64: + return FALSE; + case CPU_TYPE_ARM64: + return TRUE; + case CPU_TYPE_ARM: + switch (execsubtype) { + case CPU_SUBTYPE_ARM_V7K: + return TRUE; + } + break; + } + return FALSE; +} + /* * The file size of a mach-o file is limited to 32 bits; this is because * this is the limit on the kalloc() of enough bytes for a mach_header and @@ -619,7 +762,6 @@ parse_machfile( uint32_t ncmds; struct load_command *lcp; struct dylinker_command *dlp = 0; - integer_t dlarchbits = 0; void * control; load_return_t ret = LOAD_SUCCESS; void * addr; @@ -641,14 +783,15 @@ parse_machfile( int64_t slide = 0; boolean_t dyld_no_load_addr = FALSE; boolean_t is_dyld = FALSE; - vm_map_offset_t effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map)); + vm_map_offset_t effective_page_mask = PAGE_MASK; #if __arm64__ - uint32_t pagezero_end = 0; - uint32_t executable_end = 0; - uint32_t writable_start = 0; + uint64_t pagezero_end = 0; + uint64_t executable_end = 0; + uint64_t writable_start = 0; vm_map_size_t effective_page_size; - effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map)); + effective_page_mask = vm_map_page_mask(map); + effective_page_size = vm_map_page_size(map); #endif /* __arm64__ */ if (header->magic == MH_MAGIC_64 || @@ -659,7 +802,7 @@ parse_machfile( /* * Break infinite recursion */ - if (depth > 1) { + if (depth > 2) { return LOAD_FAILURE; } @@ -668,9 +811,14 @@ parse_machfile( /* * Check to see if right machine type. */ - if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) || - !grade_binary(header->cputype, - header->cpusubtype & ~CPU_SUBTYPE_MASK, TRUE)) { + if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) + ) { + return LOAD_BADARCH; + } + + if (!grade_binary(header->cputype, + header->cpusubtype & ~CPU_SUBTYPE_MASK, + header->cpusubtype & CPU_SUBTYPE_MASK, TRUE)) { return LOAD_BADARCH; } @@ -678,24 +826,23 @@ parse_machfile( switch (header->filetype) { case MH_EXECUTE: - if (depth != 1) { + if (depth != 1 && depth != 3) { return LOAD_FAILURE; } -#if CONFIG_EMBEDDED if (header->flags & MH_DYLDLINK) { /* Check properties of dynamic executables */ if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) { return LOAD_FAILURE; } result->needs_dynlinker = TRUE; + } else if (header->cputype == CPU_TYPE_X86_64) { + /* x86_64 static binaries allowed */ } else { /* Check properties of static executables (disallowed except for development) */ #if !(DEVELOPMENT || DEBUG) return LOAD_FAILURE; #endif } -#endif /* CONFIG_EMBEDDED */ - break; case MH_DYLINKER: if (depth != 2) { @@ -708,6 +855,13 @@ parse_machfile( return LOAD_FAILURE; } + /* + * For PIE and dyld, slide everything by the ASLR offset. + */ + if ((header->flags & MH_PIE) || is_dyld) { + slide = aslr_offset; + } + /* * Get the pager for the file. */ @@ -716,7 +870,8 @@ parse_machfile( /* ensure header + sizeofcmds falls within the file */ if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) || (off_t)cmds_size > macho_size || - round_page_overflow(cmds_size, &alloc_size)) { + round_page_overflow(cmds_size, &alloc_size) || + alloc_size > INT_MAX) { return LOAD_BADMACHO; } @@ -728,7 +883,7 @@ parse_machfile( return LOAD_NOSPACE; } - error = vn_rdwr(UIO_READ, vp, addr, alloc_size, file_offset, + error = vn_rdwr(UIO_READ, vp, addr, (int)alloc_size, file_offset, UIO_SYSSPACE, 0, vfs_context_ucred(imgp->ip_vfs_context), &resid, p); if (error) { kfree(addr, alloc_size); @@ -736,22 +891,17 @@ parse_machfile( } if (resid) { - /* We must be able to read in as much as the mach_header indicated */ - kfree(addr, alloc_size); - return LOAD_BADMACHO; - } - - /* - * For PIE and dyld, slide everything by the ASLR offset. - */ - if ((header->flags & MH_PIE) || is_dyld) { - slide = aslr_offset; + { + /* We must be able to read in as much as the mach_header indicated */ + kfree(addr, alloc_size); + return LOAD_BADMACHO; + } } /* * Scan through the commands, processing each one as necessary. * We parse in three passes through the headers: - * 0: determine if TEXT and DATA boundary can be page-aligned + * 0: determine if TEXT and DATA boundary can be page-aligned, load platform version * 1: thread state, uuid, code signature * 2: segments * 3: dyld, encryption, check entry point @@ -765,11 +915,7 @@ parse_machfile( #endif for (pass = 0; pass <= 3; pass++) { - if (pass == 0 && !slide_realign && !is_dyld) { - /* if we dont need to realign the slide or determine dyld's load - * address, pass 0 can be skipped */ - continue; - } else if (pass == 1) { + if (pass == 1) { #if __arm64__ boolean_t is_pie; int64_t adjust; @@ -831,14 +977,15 @@ parse_machfile( * it right after the main binary. If binresult == NULL, load * directly to the given slide. */ - slide = vm_map_round_page(slide + binresult->max_vm_addr, effective_page_mask); + mach_vm_address_t max_vm_addr = binresult->max_vm_addr; + slide = vm_map_round_page(slide + max_vm_addr, effective_page_mask); } } /* - * Check that the entry point is contained in an executable segments + * Check that the entry point is contained in an executable segment */ - if (pass == 3) { + if ((pass == 3) && (thread != THREAD_NULL)) { if (depth == 1 && imgp && (imgp->ip_flags & IMGPF_DRIVER)) { /* Driver binaries must have driverkit platform */ if (result->ip_platform == PLATFORM_DRIVERKIT) { @@ -930,21 +1077,21 @@ parse_machfile( if (scp->initprot == 0 && scp->maxprot == 0 && scp->vmaddr == 0) { /* PAGEZERO */ - if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end)) { + if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end) || pagezero_end > UINT32_MAX) { ret = LOAD_BADMACHO; break; } } if (scp->initprot & VM_PROT_EXECUTE) { /* TEXT */ - if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end)) { + if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end) || executable_end > UINT32_MAX) { ret = LOAD_BADMACHO; break; } } if (scp->initprot & VM_PROT_WRITE) { /* DATA */ - if (os_add_overflow(scp->vmaddr, slide, &writable_start)) { + if (os_add_overflow(scp->vmaddr, slide, &writable_start) || writable_start > UINT32_MAX) { ret = LOAD_BADMACHO; break; } @@ -978,7 +1125,8 @@ parse_machfile( vp, map, slide, - result); + result, + imgp); if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) { /* Enforce a single segment mapping offset zero, with R+X * protection. */ @@ -1001,11 +1149,9 @@ parse_machfile( if (pass == 0) { if (is_dyld && scp64->vmaddr == 0 && scp64->fileoff == 0) { dyld_no_load_addr = TRUE; - if (!slide_realign) { - /* got what we need, bail early on pass 0 */ - continue; - } } + /* got what we need, bail early on pass 0 */ + continue; } if (pass == 1 && !strncmp(scp64->segname, "__XHDR", sizeof(scp64->segname))) { @@ -1033,7 +1179,8 @@ parse_machfile( vp, map, slide, - result); + result, + imgp); if (ret == LOAD_SUCCESS && scp64->fileoff == 0 && scp64->filesize > 0) { /* Enforce a single segment mapping offset zero, with R+X @@ -1048,7 +1195,8 @@ parse_machfile( break; } - case LC_UNIXTHREAD: + case LC_UNIXTHREAD: { + boolean_t is_x86_64_compat_binary = FALSE; if (pass != 1) { break; } @@ -1056,8 +1204,10 @@ parse_machfile( (struct thread_command *) lcp, thread, slide, + is_x86_64_compat_binary, result); break; + } case LC_MAIN: if (pass != 1) { break; @@ -1077,7 +1227,6 @@ parse_machfile( } if ((depth == 1) && (dlp == 0)) { dlp = (struct dylinker_command *)lcp; - dlarchbits = (header->cputype & CPU_ARCH_MASK); } else { ret = LOAD_FAILURE; } @@ -1094,6 +1243,7 @@ parse_machfile( if (pass != 1) { break; } + /* pager -> uip -> * load signatures & store in uip * set VM object "signed_pages" @@ -1104,6 +1254,7 @@ parse_machfile( file_offset, macho_size, header->cputype, + header->cpusubtype, result, imgp); if (ret != LOAD_SUCCESS) { @@ -1137,7 +1288,7 @@ parse_machfile( NULL, file_offset + off, addr + off, - PAGE_SIZE, + MIN(PAGE_SIZE, cmds_size), &tainted); if (!valid || (tainted & CS_VALIDATE_TAINTED)) { if (cs_debug) { @@ -1217,15 +1368,15 @@ parse_machfile( case LC_VERSION_MIN_TVOS: { struct version_min_command *vmc; - if (depth != 1 || pass != 1) { + if (depth != 1 || pass != 0) { break; } vmc = (struct version_min_command *) lcp; - ret = load_version(vmc, &found_version_cmd, result); + ret = load_version(vmc, &found_version_cmd, imgp->ip_flags, result); break; } case LC_BUILD_VERSION: { - if (depth != 1 || pass != 1) { + if (depth != 1 || pass != 0) { break; } struct build_version_command* bvc = (struct build_version_command*)lcp; @@ -1238,6 +1389,7 @@ parse_machfile( break; } result->ip_platform = bvc->platform; + result->lr_sdk = bvc->sdk; found_version_cmd = TRUE; break; } @@ -1270,10 +1422,11 @@ parse_machfile( * load the dylinker, and slide it by the independent DYLD ASLR * offset regardless of the PIE-ness of the main binary. */ - ret = load_dylinker(dlp, dlarchbits, map, thread, depth, + ret = load_dylinker(dlp, header->cputype, map, thread, depth, dyld_aslr_offset, result, imgp); } + if ((ret == LOAD_SUCCESS) && (depth == 1)) { if (result->thread_count == 0) { ret = LOAD_FAILURE; @@ -1344,6 +1497,7 @@ check_if_simulator_binary( /* Allocate page to copyin mach header */ ip_vdata = kalloc(PAGE_SIZE); + bzero(ip_vdata, PAGE_SIZE); if (ip_vdata == NULL) { goto bad; } @@ -1367,7 +1521,8 @@ check_if_simulator_binary( /* ensure header + sizeofcmds falls within the file */ if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) || (off_t)cmds_size > macho_size || - round_page_overflow(cmds_size, &alloc_size)) { + round_page_overflow(cmds_size, &alloc_size) || + alloc_size > INT_MAX) { goto bad; } @@ -1379,7 +1534,7 @@ check_if_simulator_binary( goto bad; } - error = vn_rdwr(UIO_READ, imgp->ip_vp, addr, alloc_size, file_offset, + error = vn_rdwr(UIO_READ, imgp->ip_vp, addr, (int)alloc_size, file_offset, UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p); if (error) { goto bad; @@ -1494,28 +1649,38 @@ unprotect_dsmos_segment( vm_map_size_t map_size) { kern_return_t kr; + uint64_t slice_off; /* * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of * this part of a Universal binary) are not protected... * The rest needs to be "transformed". */ - if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE && - file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) { + slice_off = file_off - macho_offset; + if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE && + slice_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) { /* it's all unprotected, nothing to do... */ kr = KERN_SUCCESS; } else { - if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) { + if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE) { /* * We start mapping in the unprotected area. * Skip the unprotected part... */ - vm_map_offset_t delta; + uint64_t delta_file; + vm_map_offset_t delta_map; - delta = APPLE_UNPROTECTED_HEADER_SIZE; - delta -= file_off; - map_addr += delta; - map_size -= delta; + delta_file = (uint64_t)APPLE_UNPROTECTED_HEADER_SIZE; + delta_file -= slice_off; + if (os_convert_overflow(delta_file, &delta_map)) { + return LOAD_BADMACHO; + } + if (os_add_overflow(map_addr, delta_map, &map_addr)) { + return LOAD_BADMACHO; + } + if (os_sub_overflow(map_size, delta_map, &map_size)) { + return LOAD_BADMACHO; + } } /* ... transform the rest of the mapping. */ struct pager_crypt_info crypt_info; @@ -1549,7 +1714,8 @@ unprotect_dsmos_segment( map_addr, map_addr + map_size, crypto_backing_offset, - &crypt_info); + &crypt_info, + CRYPTID_APP_ENCRYPTION); } if (kr != KERN_SUCCESS) { @@ -1619,7 +1785,7 @@ map_segment( return LOAD_SUCCESS; } - effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map)); + effective_page_mask = vm_map_page_mask(map); vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; if (vm_map_page_aligned(vm_start, effective_page_mask) && @@ -1707,9 +1873,9 @@ map_segment( cur_vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; } -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) (void) result; -#else /* CONFIG_EMBEDDED */ +#else /* !defined(XNU_TARGET_OS_OSX) */ /* * This process doesn't have its new csflags (from * the image being loaded) yet, so tell VM to override the @@ -1721,7 +1887,11 @@ map_segment( cur_vmk_flags.vmkf_cs_enforcement = FALSE; } cur_vmk_flags.vmkf_cs_enforcement_override = TRUE; -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ + + if (result->is_cambria && (initprot & VM_PROT_EXECUTE) == VM_PROT_EXECUTE) { + cur_vmk_flags.vmkf_translated_allow_execute = TRUE; + } cur_end = vm_map_trunc_page(vm_start + (file_end - file_start), @@ -1825,7 +1995,8 @@ load_segment( struct vnode *vp, vm_map_t map, int64_t slide, - load_result_t *result) + load_result_t *result, + struct image_params *imgp) { struct segment_command_64 segment_command, *scp; kern_return_t ret; @@ -1834,8 +2005,9 @@ load_segment( vm_prot_t maxprot; size_t segment_command_size, total_section_size, single_section_size; - vm_map_offset_t file_offset, file_size; - vm_map_offset_t vm_offset, vm_size; + uint64_t file_offset, file_size; + vm_map_offset_t vm_offset; + size_t vm_size; vm_map_offset_t vm_start, vm_end, vm_end_aligned; vm_map_offset_t file_start, file_end; kern_return_t kr; @@ -1847,8 +2019,10 @@ load_segment( boolean_t fourk_align; #endif /* __arm64__ */ - effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map)); - effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map)); + (void)imgp; + + effective_page_size = vm_map_page_size(map); + effective_page_mask = vm_map_page_mask(map); verbose = FALSE; if (LC_SEGMENT_64 == lcp->cmd) { @@ -1857,6 +2031,12 @@ load_segment( #if __arm64__ /* 64-bit binary: should already be 16K-aligned */ fourk_align = FALSE; + + if (vm_map_page_shift(map) == FOURK_PAGE_SHIFT && + PAGE_SHIFT != FOURK_PAGE_SHIFT) { + fourk_align = TRUE; + verbose = TRUE; + } #endif /* __arm64__ */ } else { segment_command_size = sizeof(struct segment_command); @@ -1874,6 +2054,7 @@ load_segment( #endif /* __arm64__ */ } if (lcp->cmdsize < segment_command_size) { + DEBUG4K_ERROR("LOAD_BADMACHO cmdsize %d < %zu\n", lcp->cmdsize, segment_command_size); return LOAD_BADMACHO; } total_section_size = lcp->cmdsize - segment_command_size; @@ -1905,6 +2086,7 @@ load_segment( */ if (scp->fileoff + scp->filesize < scp->fileoff || scp->fileoff + scp->filesize > (uint64_t)macho_size) { + DEBUG4K_ERROR("LOAD_BADMACHO fileoff 0x%llx filesize 0x%llx macho_size 0x%llx\n", scp->fileoff, scp->filesize, (uint64_t)macho_size); return LOAD_BADMACHO; } /* @@ -1912,12 +2094,16 @@ load_segment( * within the load command size. */ if (total_section_size / single_section_size < scp->nsects) { + DEBUG4K_ERROR("LOAD_BADMACHO 0x%zx 0x%zx %d\n", total_section_size, single_section_size, scp->nsects); return LOAD_BADMACHO; } /* * Make sure the segment is page-aligned in the file. */ - file_offset = pager_offset + scp->fileoff; /* limited to 32 bits */ + if (os_add_overflow(pager_offset, scp->fileoff, &file_offset)) { + DEBUG4K_ERROR("LOAD_BADMACHO file_offset: 0x%llx + 0x%llx\n", pager_offset, scp->fileoff); + return LOAD_BADMACHO; + } file_size = scp->filesize; #if __arm64__ if (fourk_align) { @@ -1926,6 +2112,7 @@ load_segment( * we can't mmap() it if it's not at least 4KB-aligned * in the file */ + DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset); return LOAD_BADMACHO; } } else @@ -1938,6 +2125,7 @@ load_segment( * was what this process believe is the page size, so let's * fail here too for the sake of consistency. */ + DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset); return LOAD_BADMACHO; } @@ -1952,6 +2140,7 @@ load_segment( if (cs_debug) { printf("section outside code signature\n"); } + DEBUG4K_ERROR("LOAD_BADMACHO end_offset 0x%llx fileoff 0x%llx filesize 0x%llx\n", result->cs_end_offset, scp->fileoff, scp->filesize); return LOAD_BADMACHO; } @@ -1959,10 +2148,16 @@ load_segment( if (cs_debug) { printf("vmaddr too large\n"); } + DEBUG4K_ERROR("LOAD_BADMACHO vmaddr 0x%llx slide 0x%llx vm_offset 0x%llx\n", scp->vmaddr, slide, (uint64_t)vm_offset); return LOAD_BADMACHO; } - vm_size = scp->vmsize; + if (scp->vmsize > SIZE_MAX) { + DEBUG4K_ERROR("LOAD_BADMACHO vmsize 0x%llx\n", scp->vmsize); + return LOAD_BADMACHO; + } + + vm_size = (size_t)scp->vmsize; if (vm_size == 0) { return LOAD_SUCCESS; @@ -1972,6 +2167,10 @@ load_segment( vm_size != 0 && (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE && (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) { + if (map == VM_MAP_NULL) { + return LOAD_SUCCESS; + } + /* * For PIE, extend page zero rather than moving it. Extending * page zero keeps early allocations from falling predictably @@ -1985,10 +2184,12 @@ load_segment( * make it completely off limits by raising the VM map's * minimum offset. */ - vm_end = vm_offset + vm_size; + vm_end = (vm_map_offset_t)(vm_offset + vm_size); if (vm_end < vm_offset) { + DEBUG4K_ERROR("LOAD_BADMACHO vm_end 0x%llx vm_offset 0x%llx vm_size 0x%llx\n", (uint64_t)vm_end, (uint64_t)vm_offset, (uint64_t)vm_size); return LOAD_BADMACHO; } + if (verbose) { MACHO_PRINTF(("++++++ load_segment: " "page_zero up to 0x%llx\n", @@ -2033,16 +2234,18 @@ load_segment( #endif /* __arm64__ */ if (ret != KERN_SUCCESS) { + DEBUG4K_ERROR("LOAD_FAILURE ret 0x%x\n", ret); return LOAD_FAILURE; } return LOAD_SUCCESS; } else { -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) /* not PAGEZERO: should not be mapped at address 0 */ if (filetype != MH_DYLINKER && scp->vmaddr == 0) { + DEBUG4K_ERROR("LOAD_BADMACHO filetype %d vmaddr 0x%llx\n", filetype, scp->vmaddr); return LOAD_BADMACHO; } -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ } #if __arm64__ @@ -2056,6 +2259,18 @@ load_segment( FOURK_PAGE_MASK); vm_end = vm_map_round_page(vm_offset + vm_size, FOURK_PAGE_MASK); + + if (file_offset - file_start > FOURK_PAGE_MASK || + file_end - file_offset - file_size > FOURK_PAGE_MASK) { + DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap " + "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n", + file_offset, + file_offset + file_size, + (uint64_t) file_start, + (uint64_t) file_end); + return LOAD_BADMACHO; + } + if (!strncmp(scp->segname, "__LINKEDIT", 11) && page_aligned(file_start) && vm_map_page_aligned(file_start, vm_map_page_mask(map)) && @@ -2078,6 +2293,17 @@ load_segment( effective_page_mask); vm_end = vm_map_round_page(vm_offset + vm_size, effective_page_mask); + + if (file_offset - file_start > effective_page_mask || + file_end - file_offset - file_size > effective_page_mask) { + DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap " + "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n", + file_offset, + file_offset + file_size, + (uint64_t) file_start, + (uint64_t) file_end); + return LOAD_BADMACHO; + } } if (vm_start < result->min_vm_addr) { @@ -2116,6 +2342,7 @@ load_segment( maxprot, result); if (ret) { + DEBUG4K_ERROR("LOAD_NOSPACE start 0x%llx end 0x%llx ret 0x%x\n", (uint64_t)vm_start, (uint64_t)vm_end, ret); return LOAD_NOSPACE; } @@ -2130,6 +2357,7 @@ load_segment( ret = mach_vm_allocate_kernel(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_BSD); if (ret != KERN_SUCCESS) { + DEBUG4K_ERROR("LOAD_RESOURCE delta_size 0x%llx ret 0x%x\n", delta_size, ret); return LOAD_RESOURCE; } @@ -2137,6 +2365,7 @@ load_segment( delta_size)) { (void) mach_vm_deallocate( kernel_map, tmp, delta_size); + DEBUG4K_ERROR("LOAD_FAILURE copyout 0x%llx 0x%llx\n", map_addr + scp->filesize, delta_size); return LOAD_FAILURE; } @@ -2156,18 +2385,28 @@ load_segment( delta_size = 0; } if (delta_size > 0) { - mach_vm_offset_t tmp; + vm_map_offset_t tmp_start; + vm_map_offset_t tmp_end; + + if (os_add_overflow(vm_start, file_end - file_start, &tmp_start)) { + DEBUG4K_ERROR("LOAD_NOSPACE tmp_start: 0x%llx + 0x%llx\n", (uint64_t)vm_start, (uint64_t)(file_end - file_start)); + return LOAD_NOSPACE; + } + + if (os_add_overflow(tmp_start, delta_size, &tmp_end)) { + DEBUG4K_ERROR("LOAD_NOSPACE tmp_end: 0x%llx + 0x%llx\n", (uint64_t)tmp_start, (uint64_t)delta_size); + return LOAD_NOSPACE; + } - tmp = vm_start + (file_end - file_start); if (verbose) { MACHO_PRINTF(("++++++ load_segment: " "delta mapping vm [0x%llx:0x%llx]\n", - (uint64_t) tmp, - (uint64_t) (tmp + delta_size))); + (uint64_t) tmp_start, + (uint64_t) tmp_end)); } kr = map_segment(map, - tmp, - tmp + delta_size, + tmp_start, + tmp_end, MEMORY_OBJECT_CONTROL_NULL, 0, delta_size, @@ -2175,6 +2414,7 @@ load_segment( scp->maxprot, result); if (kr != KERN_SUCCESS) { + DEBUG4K_ERROR("LOAD_NOSPACE 0x%llx 0x%llx kr 0x%x\n", (unsigned long long)tmp_start, (uint64_t)delta_size, kr); return LOAD_NOSPACE; } } @@ -2192,6 +2432,7 @@ load_segment( vm_start, vm_end - vm_start); if (ret != LOAD_SUCCESS) { + DEBUG4K_ERROR("unprotect 0x%llx 0x%llx ret %d \n", (uint64_t)vm_start, (uint64_t)vm_end, ret); return ret; } } else { @@ -2221,6 +2462,9 @@ load_segment( } } + if (ret != LOAD_SUCCESS && verbose) { + DEBUG4K_ERROR("ret %d\n", ret); + } return ret; } @@ -2252,6 +2496,7 @@ load_return_t load_version( struct version_min_command *vmc, boolean_t *found_version_cmd, + int ip_flags __unused, load_result_t *result ) { @@ -2305,7 +2550,7 @@ load_version( __builtin_unreachable(); } result->ip_platform = platform; - result->lr_sdk = sdk; + result->lr_min_sdk = sdk; return LOAD_SUCCESS; } @@ -2362,8 +2607,7 @@ load_main( } /* The stack slides down from the default location */ - result->user_stack = addr; - result->user_stack -= slide; + result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide); if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { /* Already processed LC_MAIN or LC_UNIXTHREAD */ @@ -2411,7 +2655,7 @@ setup_driver_main( } /* The stack slides down from the default location */ - result->user_stack = addr; + result->user_stack = (user_addr_t)addr; result->user_stack -= slide; if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { @@ -2438,6 +2682,7 @@ load_unixthread( struct thread_command *tcp, thread_t thread, int64_t slide, + boolean_t is_x86_64_compat_binary, load_result_t *result ) { @@ -2459,7 +2704,7 @@ load_unixthread( (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), tcp->cmdsize - sizeof(struct thread_command), - &addr, &customstack, result); + &addr, &customstack, is_x86_64_compat_binary, result); if (ret != LOAD_SUCCESS) { return ret; } @@ -2473,32 +2718,33 @@ load_unixthread( } /* The stack slides down from the default location */ - result->user_stack = addr; - result->user_stack -= slide; + result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide); - ret = load_threadentry(thread, - (uint32_t *)(((vm_offset_t)tcp) + - sizeof(struct thread_command)), - tcp->cmdsize - sizeof(struct thread_command), - &addr); - if (ret != LOAD_SUCCESS) { - return ret; - } + { + ret = load_threadentry(thread, + (uint32_t *)(((vm_offset_t)tcp) + + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command), + &addr); + if (ret != LOAD_SUCCESS) { + return ret; + } - if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { - /* Already processed LC_MAIN or LC_UNIXTHREAD */ - return LOAD_FAILURE; - } + if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { + /* Already processed LC_MAIN or LC_UNIXTHREAD */ + return LOAD_FAILURE; + } - result->entry_point = addr; - result->entry_point += slide; + result->entry_point = (user_addr_t)addr; + result->entry_point += slide; - ret = load_threadstate(thread, - (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), - tcp->cmdsize - sizeof(struct thread_command), - result); - if (ret != LOAD_SUCCESS) { - return ret; + ret = load_threadstate(thread, + (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command), + result); + if (ret != LOAD_SUCCESS) { + return ret; + } } result->unixproc = TRUE; @@ -2542,6 +2788,10 @@ load_threadstate( * activation time where we can't bail out cleanly. */ while (total_size > 0) { + if (total_size < 2 * sizeof(uint32_t)) { + return LOAD_BADMACHO; + } + flavor = *ts++; size = *ts++; @@ -2565,6 +2815,7 @@ bad: return ret; } + static load_return_t load_threadstack( @@ -2573,6 +2824,7 @@ load_threadstack( uint32_t total_size, mach_vm_offset_t *user_stack, int *customstack, + __unused boolean_t is_x86_64_compat_binary, load_result_t *result ) { @@ -2586,6 +2838,10 @@ load_threadstack( } while (total_size > 0) { + if (total_size < 2 * sizeof(uint32_t)) { + return LOAD_BADMACHO; + } + flavor = *ts++; size = *ts++; if (UINT32_MAX - 2 < size || @@ -2603,10 +2859,13 @@ load_threadstack( * to the appropriate type in thread_userstack() based on * the value of flavor. */ - ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data); - if (ret != KERN_SUCCESS) { - return LOAD_FAILURE; + { + ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data); + if (ret != KERN_SUCCESS) { + return LOAD_FAILURE; + } } + ts += size; /* ts is a (uint32_t *) */ } return LOAD_SUCCESS; @@ -2631,6 +2890,10 @@ load_threadentry( */ *entry_point = MACH_VM_MIN_ADDRESS; while (total_size > 0) { + if (total_size < 2 * sizeof(uint32_t)) { + return LOAD_BADMACHO; + } + flavor = *ts++; size = *ts++; if (UINT32_MAX - 2 < size || @@ -2675,7 +2938,7 @@ extern int use_alt_dyld; static load_return_t load_dylinker( struct dylinker_command *lcp, - integer_t archbits, + cpu_type_t cputype, vm_map_t map, thread_t thread, int depth, @@ -2747,8 +3010,12 @@ load_dylinker( myresult = &dyld_data->__myresult; macho_data = &dyld_data->__macho_data; - ret = get_macho_vnode(name, archbits, header, - &file_offset, &macho_size, macho_data, &vp); + { + cputype = (cputype & CPU_ARCH_MASK) | (cpu_type() & ~CPU_ARCH_MASK); + } + + ret = get_macho_vnode(name, cputype, header, + &file_offset, &macho_size, macho_data, &vp, imgp); if (ret) { goto novp_out; } @@ -2776,25 +3043,29 @@ load_dylinker( if (myresult->platform_binary) { result->csflags |= CS_DYLD_PLATFORM; } + } - struct vnode_attr va; - VATTR_INIT(&va); - VATTR_WANTED(&va, va_fsid64); - VATTR_WANTED(&va, va_fsid); - VATTR_WANTED(&va, va_fileid); - int error = vnode_getattr(vp, &va, imgp->ip_vfs_context); + struct vnode_attr *va; + va = kheap_alloc(KHEAP_TEMP, sizeof(*va), Z_WAITOK | Z_ZERO); + VATTR_INIT(va); + VATTR_WANTED(va, va_fsid64); + VATTR_WANTED(va, va_fsid); + VATTR_WANTED(va, va_fileid); + int error = vnode_getattr(vp, va, imgp->ip_vfs_context); if (error == 0) { - imgp->ip_dyld_fsid = vnode_get_va_fsid(&va); - imgp->ip_dyld_fsobjid = va.va_fileid; + imgp->ip_dyld_fsid = vnode_get_va_fsid(va); + imgp->ip_dyld_fsobjid = va->va_fileid; } vnode_put(vp); + kheap_free(KHEAP_TEMP, va, sizeof(*va)); novp_out: FREE(dyld_data, M_TEMP); return ret; } + static load_return_t load_code_signature( struct linkedit_data_command *lcp, @@ -2802,6 +3073,7 @@ load_code_signature( off_t macho_offset, off_t macho_size, cpu_type_t cputype, + cpu_subtype_t cpusubtype, load_result_t *result, struct image_params *imgp) { @@ -2813,10 +3085,13 @@ load_code_signature( int error; vm_size_t blob_size; uint32_t sum; + boolean_t anyCPU; addr = 0; blob = NULL; + cpusubtype &= ~CPU_SUBTYPE_MASK; + if (lcp->cmdsize != sizeof(struct linkedit_data_command)) { ret = LOAD_BADMACHO; goto out; @@ -2828,11 +3103,13 @@ load_code_signature( goto out; } - blob = ubc_cs_blob_get(vp, cputype, macho_offset); + blob = ubc_cs_blob_get(vp, cputype, cpusubtype, macho_offset); if (blob != NULL) { - /* we already have a blob for this vnode and cputype */ - if (blob->csb_cpu_type != cputype || + /* we already have a blob for this vnode and cpu(sub)type */ + anyCPU = blob->csb_cpu_type == -1; + if ((blob->csb_cpu_type != cputype && + blob->csb_cpu_subtype != cpusubtype && !anyCPU) || blob->csb_base_offset != macho_offset) { /* the blob has changed for this vnode: fail ! */ ret = LOAD_BADMACHO; @@ -2840,16 +3117,23 @@ load_code_signature( } /* It matches the blob we want here, let's verify the version */ - if (ubc_cs_generation_check(vp) == 0) { + if (!anyCPU && ubc_cs_generation_check(vp) == 0) { /* No need to revalidate, we're good! */ ret = LOAD_SUCCESS; goto out; } /* That blob may be stale, let's revalidate. */ - error = ubc_cs_blob_revalidate(vp, blob, imgp, 0); + error = ubc_cs_blob_revalidate(vp, blob, imgp, 0, result->ip_platform); if (error == 0) { /* Revalidation succeeded, we're good! */ + /* If we were revaliding a CS blob with any CPU arch we adjust it */ + if (anyCPU) { + vnode_lock_spin(vp); + blob->csb_cpu_type = cputype; + blob->csb_cpu_subtype = cpusubtype; + vnode_unlock(vp); + } ret = LOAD_SUCCESS; goto out; } @@ -2893,7 +3177,9 @@ load_code_signature( } if (ubc_cs_blob_add(vp, + result->ip_platform, cputype, + cpusubtype, macho_offset, &addr, lcp->datasize, @@ -2987,15 +3273,12 @@ set_code_unprotect( return LOAD_FAILURE; } - MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (vpath == NULL) { - return LOAD_FAILURE; - } + vpath = zalloc(ZV_NAMEI); len = MAXPATHLEN; error = vn_getpath(vp, vpath, &len); if (error) { - FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, vpath); return LOAD_FAILURE; } @@ -3014,7 +3297,7 @@ set_code_unprotect( p->p_pid, p->p_comm, map, __FUNCTION__, vpath, kr); } #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ - FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, vpath); if (kr) { printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n", @@ -3051,21 +3334,23 @@ set_code_unprotect( if ((seg64->fileoff <= eip->cryptoff) && (seg64->fileoff + seg64->filesize >= eip->cryptoff + eip->cryptsize)) { - map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide; + map_offset = (vm_map_offset_t)(seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide); map_size = eip->cryptsize; crypto_backing_offset = macho_offset + eip->cryptoff; goto remap_now; } + break; case LC_SEGMENT: seg32 = (struct segment_command *)lcp; if ((seg32->fileoff <= eip->cryptoff) && (seg32->fileoff + seg32->filesize >= eip->cryptoff + eip->cryptsize)) { - map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide; + map_offset = (vm_map_offset_t)(seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide); map_size = eip->cryptsize; crypto_backing_offset = macho_offset + eip->cryptoff; goto remap_now; } + break; } } @@ -3081,7 +3366,8 @@ remap_now: map_offset, map_offset + map_size, crypto_backing_offset, - &crypt_info); + &crypt_info, + CRYPTID_APP_ENCRYPTION); if (kr) { printf("set_code_unprotect(): mapping failed with %x\n", kr); return LOAD_PROTECT; @@ -3102,12 +3388,13 @@ static load_return_t get_macho_vnode( const char *path, - integer_t archbits, + cpu_type_t cputype, struct mach_header *mach_header, off_t *file_offset, off_t *macho_size, struct macho_data *data, - struct vnode **vpp + struct vnode **vpp, + struct image_params *imgp ) { struct vnode *vp; @@ -3205,8 +3492,8 @@ get_macho_vnode( } /* Look up our architecture in the fat file. */ - error = fatfile_getarch_with_bits(archbits, - (vm_offset_t)(&header->fat_header), sizeof(*header), &fat_arch); + error = fatfile_getbestarch_for_cputype(cputype, CPU_SUBTYPE_ANY, + (vm_offset_t)(&header->fat_header), sizeof(*header), imgp, &fat_arch); if (error != LOAD_SUCCESS) { goto bad2; } @@ -3245,7 +3532,7 @@ get_macho_vnode( * required, since the dynamic linker might work, but we will * refuse to load it because of this check. */ - if ((cpu_type_t)(header->mach_header.cputype & CPU_ARCH_MASK) != archbits) { + if ((cpu_type_t)header->mach_header.cputype != cputype) { error = LOAD_BADARCH; goto bad2; } diff --git a/bsd/kern/mach_loader.h b/bsd/kern/mach_loader.h index 606d733fb..dde6d6f51 100644 --- a/bsd/kern/mach_loader.h +++ b/bsd/kern/mach_loader.h @@ -77,7 +77,8 @@ typedef struct _load_result { #endif /* __arm64__ */ is_64bit_addr : 1, is_64bit_data : 1, - custom_stack : 1; + custom_stack : 1, + is_cambria : 1; unsigned int csflags; unsigned char uuid[16]; mach_vm_address_t min_vm_addr; @@ -87,7 +88,11 @@ typedef struct _load_result { void *threadstate; size_t threadstate_sz; uint32_t ip_platform; + uint32_t lr_min_sdk; uint32_t lr_sdk; + user_addr_t dynlinker_mach_header; + user_addr_t dynlinker_max_vm_addr; + int dynlinker_fd; } load_result_t; struct image_params; diff --git a/bsd/kern/mach_process.c b/bsd/kern/mach_process.c index 915a8cb45..08d8e1e88 100644 --- a/bsd/kern/mach_process.c +++ b/bsd/kern/mach_process.c @@ -117,7 +117,7 @@ extern thread_t get_firstthread(task_t); int ptrace(struct proc *p, struct ptrace_args *uap, int32_t *retval) { - struct proc *t = current_proc(); /* target process */ + struct proc *t; /* target process */ task_t task; thread_t th_act; struct uthread *ut; @@ -131,7 +131,7 @@ ptrace(struct proc *p, struct ptrace_args *uap, int32_t *retval) AUDIT_ARG(value32, uap->data); if (uap->req == PT_DENY_ATTACH) { -#if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED +#if (DEVELOPMENT || DEBUG) && !defined(XNU_TARGET_OS_OSX) if (PE_i_can_has_debugger(NULL)) { return 0; } @@ -154,6 +154,7 @@ ptrace(struct proc *p, struct ptrace_args *uap, int32_t *retval) if (uap->req == PT_FORCEQUOTA) { if (kauth_cred_issuser(kauth_cred_get())) { + t = current_proc(); OSBitOrAtomic(P_FORCEQUOTA, &t->p_flag); return 0; } else { @@ -177,7 +178,20 @@ retry_trace_me: ; * when, in this case, it is the current process's parent. * Most of the other checks in cantrace() don't apply either. */ - if ((error = mac_proc_check_debug(pproc, p)) == 0) { + struct proc_ident p_ident = proc_ident(p); + struct proc_ident pproc_ident = proc_ident(pproc); + kauth_cred_t pproc_cred = kauth_cred_proc_ref(pproc); + + proc_rele(pproc); + error = mac_proc_check_debug(&pproc_ident, pproc_cred, &p_ident); + kauth_cred_unref(&pproc_cred); + + if (error != 0) { + return error; + } + if (proc_find_ident(&pproc_ident) == PROC_NULL) { + return ESRCH; + } #endif proc_lock(p); /* Make sure the process wasn't re-parented. */ @@ -193,10 +207,8 @@ retry_trace_me: ; /* Child and parent will have to be able to run modified code. */ cs_allow_invalid(p); cs_allow_invalid(pproc); -#if CONFIG_MACF - } -#endif proc_rele(pproc); + return error; } if (uap->req == PT_SIGEXC) { @@ -238,7 +250,7 @@ retry_trace_me: ; #pragma clang diagnostic pop int err; -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) if (tr_sigexc == 0) { error = ENOTSUP; goto out; @@ -282,12 +294,18 @@ retry_trace_me: ; error = 0; goto out; } else { + error = err; + if (error == ESRCH) { + /* + * The target 't' is not valid anymore as it + * could not be found after the MAC check. + */ + return error; + } /* not allowed to attach, proper error code returned by kauth_authorize_process */ if (ISSET(t->p_lflag, P_LNOATTACH)) { psignal(p, SIGSEGV); } - - error = err; goto out; } } @@ -528,11 +546,26 @@ cantrace(proc_t cur_procp, kauth_cred_t creds, proc_t traced_procp, int *errp) } #if CONFIG_MACF - if ((my_err = mac_proc_check_debug(cur_procp, traced_procp)) != 0) { + struct proc_ident cur_ident = proc_ident(cur_procp); + struct proc_ident traced_ident = proc_ident(traced_procp); + kauth_cred_t cur_cred = kauth_cred_proc_ref(cur_procp); + + /* + * Drop the proc reference to avoid a deadlock during an upcall and find + * (reference) the proc again so our caller can keep using it. + */ + proc_rele(traced_procp); + my_err = mac_proc_check_debug(&cur_ident, cur_cred, &traced_ident); + kauth_cred_unref(&cur_cred); + + if (proc_find_ident(&traced_ident) == PROC_NULL) { + *errp = ESRCH; + return 0; + } + if (my_err != 0) { *errp = my_err; return 0; } #endif - return 1; } diff --git a/bsd/kern/makekdebugevents.py b/bsd/kern/makekdebugevents.py deleted file mode 100755 index 73b2db49e..000000000 --- a/bsd/kern/makekdebugevents.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/python -# -# This script scans the trace.codes file, containing a mapping of event id to -# event name for all events, and writes to stdout a C declaration for a table -# named kd_events[] or these mappings. -# Required to generate a header file used by DEVELOPMENT and DEBUG kernels. -# - -import sys -import re - -# we expect one arg specifying the path to the trace.codes file -if (len(sys.argv) < 2): - exit(1) -trace_code_file = sys.argv[1] - -# regular expression pattern to match -id_name_pattern = re.compile('0x([0-9a-fA-F]+)\s+([^\s]*)') -code_table = [] - -# scan file to generate internal table -with open(trace_code_file, 'rt') as codes: - for line in codes: - m = id_name_pattern.match(line) - if m: - code_table += [(int(m.group(1),base=16), m.group(2))] - -# emit typedef: -print "typedef struct {" -print " uint32_t id;" -print " const char *name;" -print "} kd_event_t;" -# emit structure declaration and sorted initialization: -print "kd_event_t kd_events[] = {" -for mapping in sorted(code_table, key=lambda x: x[0]): - print " {0x%x, \"%s\"}," % mapping -print "};" - diff --git a/bsd/kern/makesyscalls.sh b/bsd/kern/makesyscalls.sh index ffdacf957..a8d67728a 100755 --- a/bsd/kern/makesyscalls.sh +++ b/bsd/kern/makesyscalls.sh @@ -157,6 +157,7 @@ s/\$//g switchname = \"$switchname\" namesname = \"$namesname\" infile = \"$input_file\" + infilepretty = \"${input_file#"$TARGET"}\" "' printf "/*\n" > syslegal @@ -189,7 +190,7 @@ s/\$//g printf " * \n" > syslegal printf " * System call switch table.\n *\n" > syslegal printf " * DO NOT EDIT-- this file is automatically generated.\n" > syslegal - printf " * created from %s\n */\n\n", infile > syslegal + printf " * created from %s\n */\n\n", infilepretty > syslegal } NR == 1 { printf "\n/* The casts are bogus but will do for now. */\n" > sysent @@ -516,15 +517,15 @@ s/\$//g else printf("\t\tcase %d:\n\t\t\tp = \"%s\";\n\t\t\tbreak;\n", i - 1, arg) > systraceargdesctempfile if (index(arg, "*") > 0 || arg == "caddr_t") - printf("\t\tuarg[%d] = (intptr_t) p->%s; /* %s */\n", \ + printf("\t\tuarg[%d] = (uint64_t) p->%s; /* %s */\n", \ i - 1, \ argname[i], arg) > systraceargstempfile else if (substr(arg, 1, 1) == "u" || arg == "size_t") - printf("\t\tuarg[%d] = p->%s; /* %s */\n", \ + printf("\t\tuarg[%d] = (uint64_t) p->%s; /* %s */\n", \ i - 1, \ argname[i], arg) > systraceargstempfile else - printf("\t\tiarg[%d] = p->%s; /* %s */\n", \ + printf("\t\tiarg[%d] = (int64_t) p->%s; /* %s */\n", \ i - 1, \ argname[i], arg) > systraceargstempfile } @@ -771,7 +772,7 @@ s/\$//g printf("\n#endif /* !%s */\n", sysproto_h) > sysprotoend printf("};\n") > sysent - printf("const unsigned int nsysent = sizeof(sysent) / sizeof(sysent[0]);\n") > sysent + printf("const unsigned int nsysent = sizeof(sysent) / sizeof(sysent[0]);\n") > sysent printf("};\n") > syscallnamestempfile printf("#define\t%sMAXSYSCALL\t%d\n", syscallprefix, syscall_num) \ @@ -788,11 +789,8 @@ s/\$//g printf "\tdefault:\n\t\tbreak;\n\t};\n\tif (p != NULL)\n\t\tstrlcpy(desc, p, descsz);\n}\n" > systracerettempfile } ' -# define value in syscall table file to permit redifintion because of the way -# __private_extern__ (doesn't) work. if [ $output_syscalltablefile -eq 1 ]; then cat $syslegal > $syscalltablefile - printf "#define __INIT_SYSENT_C__ 1\n" >> $syscalltablefile cat $sysinc $sysent >> $syscalltablefile fi diff --git a/bsd/kern/mcache.c b/bsd/kern/mcache.c index 03326e009..76b4c601e 100644 --- a/bsd/kern/mcache.c +++ b/bsd/kern/mcache.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006-2019 Apple Inc. All rights reserved. + * Copyright (c) 2006-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -99,7 +99,7 @@ #define MCACHE_UNLOCK(l) lck_mtx_unlock(l) #define MCACHE_LOCK_TRY(l) lck_mtx_try_lock(l) -static int ncpu; +static unsigned int ncpu; static unsigned int cache_line_size; static lck_mtx_t *mcache_llock; static struct thread *mcache_llock_owner; @@ -186,7 +186,7 @@ mcache_init(void) VERIFY(mca_trn_max >= 2); - ncpu = ml_get_max_cpus(); + ncpu = ml_wait_max_cpus(); (void) mcache_cache_line_size(); /* prime it */ mcache_llock_grp_attr = lck_grp_attr_alloc_init(); @@ -203,14 +203,7 @@ mcache_init(void) __builtin_unreachable(); } - mcache_zone = zinit(MCACHE_ALLOC_SIZE, 256 * MCACHE_ALLOC_SIZE, - PAGE_SIZE, "mcache"); - if (mcache_zone == NULL) { - panic("mcache_init: failed to allocate mcache zone\n"); - /* NOTREACHED */ - __builtin_unreachable(); - } - zone_change(mcache_zone, Z_CALLERACCT, FALSE); + mcache_zone = zone_create("mcache", MCACHE_ALLOC_SIZE, ZC_DESTRUCTIBLE); LIST_INIT(&mcache_head); @@ -253,7 +246,7 @@ mcache_cache_line_size(void) if (cache_line_size == 0) { ml_cpu_info_t cpu_info; ml_cpu_get_info(&cpu_info); - cache_line_size = cpu_info.cache_line_size; + cache_line_size = (unsigned int)cpu_info.cache_line_size; } return cache_line_size; } @@ -300,7 +293,7 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, mcache_t *cp = NULL; size_t chunksize; void *buf, **pbuf; - int c; + unsigned int c; char lck_name[64]; /* If auditing is on and print buffer is NULL, allocate it now */ @@ -382,11 +375,7 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, VERIFY(align != 0 && (align % MCACHE_ALIGN) == 0); chunksize += sizeof(uint64_t) + align; chunksize = P2ROUNDUP(chunksize, align); - if ((cp->mc_slab_zone = zinit(chunksize, 64 * 1024 * ncpu, - PAGE_SIZE, cp->mc_name)) == NULL) { - goto fail; - } - zone_change(cp->mc_slab_zone, Z_EXPAND, TRUE); + cp->mc_slab_zone = zone_create(cp->mc_name, chunksize, ZC_DESTRUCTIBLE); } cp->mc_chunksize = chunksize; @@ -1146,7 +1135,7 @@ static void mcache_cache_bkt_enable(mcache_t *cp) { mcache_cpu_t *ccp; - int cpu; + unsigned int cpu; if (cp->mc_flags & MCF_NOCPUCACHE) { return; @@ -1168,7 +1157,8 @@ mcache_bkt_purge(mcache_t *cp) { mcache_cpu_t *ccp; mcache_bkt_t *bp, *pbp; - int cpu, objs, pobjs; + int objs, pobjs; + unsigned int cpu; for (cpu = 0; cpu < ncpu; cpu++) { ccp = &cp->mc_cpu[cpu]; @@ -1508,7 +1498,7 @@ mcache_buffer_log(mcache_audit_t *mca, void *addr, mcache_t *cp, transaction->mca_thread = current_thread(); bzero(stack, sizeof(stack)); - transaction->mca_depth = OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1; + transaction->mca_depth = (uint16_t)OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1; bcopy(&stack[1], transaction->mca_stack, sizeof(transaction->mca_stack)); @@ -1526,7 +1516,13 @@ mcache_buffer_log(mcache_audit_t *mca, void *addr, mcache_t *cp, (mca->mca_next_trn + 1) % mca_trn_max; } -__private_extern__ void +/* + * N.B.: mcache_set_pattern(), mcache_verify_pattern() and + * mcache_verify_set_pattern() are marked as noinline to prevent the + * compiler from aliasing pointers when they are inlined inside the callers + * (e.g. mcache_audit_free_verify_set()) which would be undefined behavior. + */ +__private_extern__ OS_NOINLINE void mcache_set_pattern(u_int64_t pattern, void *buf_arg, size_t size) { u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size)); @@ -1540,7 +1536,7 @@ mcache_set_pattern(u_int64_t pattern, void *buf_arg, size_t size) } } -__private_extern__ void * +__private_extern__ OS_NOINLINE void * mcache_verify_pattern(u_int64_t pattern, void *buf_arg, size_t size) { u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size)); @@ -1557,7 +1553,7 @@ mcache_verify_pattern(u_int64_t pattern, void *buf_arg, size_t size) return NULL; } -__private_extern__ void * +OS_NOINLINE static void * mcache_verify_set_pattern(u_int64_t old, u_int64_t new, void *buf_arg, size_t size) { diff --git a/bsd/kern/netboot.c b/bsd/kern/netboot.c index f0cfb1037..63307089f 100644 --- a/bsd/kern/netboot.c +++ b/bsd/kern/netboot.c @@ -60,9 +60,6 @@ #include #include -//#include -extern struct filedesc filedesc0; - extern int nfs_mountroot(void); /* nfs_vfsops.c */ extern int (*mountroot)(void); @@ -102,14 +99,14 @@ struct netboot_info { struct in_addr client_ip; struct in_addr server_ip; char * server_name; - int server_name_length; + size_t server_name_length; char * mount_point; - int mount_point_length; + size_t mount_point_length; char * image_path; - int image_path_length; + size_t image_path_length; NetBootImageType image_type; char * second_image_path; - int second_image_path_length; + size_t second_image_path_length; }; /* @@ -308,10 +305,10 @@ get_root_path(char * root_path) } static void -save_path(char * * str_p, int * length_p, char * path) +save_path(char * * str_p, size_t * length_p, char * path) { *length_p = strlen(path) + 1; - *str_p = (char *)kalloc(*length_p); + *str_p = (char *)kheap_alloc(KHEAP_DATA_BUFFERS, *length_p, Z_WAITOK); strlcpy(*str_p, path, *length_p); return; } @@ -329,10 +326,8 @@ netboot_info_init(struct in_addr iaddr) info->image_type = kNetBootImageTypeUnknown; /* check for a booter-specified path then a NetBoot path */ - MALLOC_ZONE(root_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (root_path == NULL) { - panic("netboot_info_init: M_NAMEI zone exhausted"); - } + root_path = zalloc(ZV_NAMEI); + if (PE_parse_boot_argn("rp0", root_path, MAXPATHLEN) == TRUE || PE_parse_boot_argn("rp", root_path, MAXPATHLEN) == TRUE || PE_parse_boot_argn("rootpath", root_path, MAXPATHLEN) == TRUE) { @@ -357,9 +352,11 @@ netboot_info_init(struct in_addr iaddr) info->image_type = kNetBootImageTypeNFS; info->server_ip = server_ip; info->server_name_length = strlen(server_name) + 1; - info->server_name = (char *)kalloc(info->server_name_length); + info->server_name = kheap_alloc(KHEAP_DATA_BUFFERS, + info->server_name_length, Z_WAITOK); info->mount_point_length = strlen(mount_point) + 1; - info->mount_point = (char *)kalloc(info->mount_point_length); + info->mount_point = kheap_alloc(KHEAP_DATA_BUFFERS, + info->mount_point_length, Z_WAITOK); strlcpy(info->server_name, server_name, info->server_name_length); strlcpy(info->mount_point, mount_point, info->mount_point_length); @@ -373,7 +370,8 @@ netboot_info_init(struct in_addr iaddr) needs_slash = TRUE; info->image_path_length++; } - info->image_path = (char *)kalloc(info->image_path_length); + info->image_path = kheap_alloc(KHEAP_DATA_BUFFERS, + info->image_path_length, Z_WAITOK); if (needs_slash) { info->image_path[0] = '/'; strlcpy(info->image_path + 1, image_path, @@ -408,7 +406,7 @@ netboot_info_init(struct in_addr iaddr) printf("netboot: nested image %s\n", info->second_image_path); } } - FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, root_path); return info; } @@ -419,21 +417,24 @@ netboot_info_free(struct netboot_info * * info_p) if (info) { if (info->mount_point) { - kfree(info->mount_point, info->mount_point_length); + kheap_free(KHEAP_DATA_BUFFERS, info->mount_point, + info->mount_point_length); } if (info->server_name) { - kfree(info->server_name, info->server_name_length); + kheap_free(KHEAP_DATA_BUFFERS, info->server_name, + info->server_name_length); } if (info->image_path) { - kfree(info->image_path, info->image_path_length); + kheap_free(KHEAP_DATA_BUFFERS, info->image_path, + info->image_path_length); } if (info->second_image_path) { - kfree(info->second_image_path, info->second_image_path_length); + kheap_free(KHEAP_DATA_BUFFERS, info->second_image_path, + info->second_image_path_length); } kfree(info, sizeof(*info)); } *info_p = NULL; - return; } boolean_t @@ -449,8 +450,8 @@ netboot_iaddr(struct in_addr * iaddr_p) boolean_t netboot_rootpath(struct in_addr * server_ip, - char * name, int name_len, - char * path, int path_len) + char * name, size_t name_len, + char * path, size_t path_len) { if (S_netboot_info_p == NULL) { return FALSE; @@ -463,7 +464,7 @@ netboot_rootpath(struct in_addr * server_ip, return FALSE; } if (path_len < S_netboot_info_p->mount_point_length) { - printf("netboot: path too small %d < %d\n", + printf("netboot: path too small %zu < %zu\n", path_len, S_netboot_info_p->mount_point_length); return FALSE; } @@ -761,7 +762,7 @@ failed: } int -netboot_setup() +netboot_setup(void) { int error = 0; diff --git a/bsd/kern/policy_check.c b/bsd/kern/policy_check.c index 83581807f..7621bdec3 100644 --- a/bsd/kern/policy_check.c +++ b/bsd/kern/policy_check.c @@ -121,7 +121,7 @@ common_hook(void) return rv; } -#if (MAC_POLICY_OPS_VERSION != 62) +#if (MAC_POLICY_OPS_VERSION != 69) # error "struct mac_policy_ops doesn't match definition in mac_policy.h" #endif /* @@ -134,10 +134,10 @@ const static struct mac_policy_ops policy_ops = { CHECK_SET_HOOK(audit_check_postselect) CHECK_SET_HOOK(audit_check_preselect) - CHECK_SET_HOOK(bpfdesc_label_associate) - CHECK_SET_HOOK(bpfdesc_label_destroy) - CHECK_SET_HOOK(bpfdesc_label_init) - CHECK_SET_HOOK(bpfdesc_check_receive) + .mpo_reserved01 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved02 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved03 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved04 = (mpo_reserved_hook_t *)common_hook, CHECK_SET_HOOK(cred_check_label_update_execve) CHECK_SET_HOOK(cred_check_label_update) @@ -177,32 +177,29 @@ const static struct mac_policy_ops policy_ops = { CHECK_SET_HOOK(file_label_init) CHECK_SET_HOOK(file_label_destroy) CHECK_SET_HOOK(file_label_associate) - - CHECK_SET_HOOK(ifnet_check_label_update) - CHECK_SET_HOOK(ifnet_check_transmit) - CHECK_SET_HOOK(ifnet_label_associate) - CHECK_SET_HOOK(ifnet_label_copy) - CHECK_SET_HOOK(ifnet_label_destroy) - CHECK_SET_HOOK(ifnet_label_externalize) - CHECK_SET_HOOK(ifnet_label_init) - CHECK_SET_HOOK(ifnet_label_internalize) - CHECK_SET_HOOK(ifnet_label_update) - CHECK_SET_HOOK(ifnet_label_recycle) - - CHECK_SET_HOOK(inpcb_check_deliver) - CHECK_SET_HOOK(inpcb_label_associate) - CHECK_SET_HOOK(inpcb_label_destroy) - CHECK_SET_HOOK(inpcb_label_init) - CHECK_SET_HOOK(inpcb_label_recycle) - CHECK_SET_HOOK(inpcb_label_update) - - CHECK_SET_HOOK(iokit_check_device) - - CHECK_SET_HOOK(ipq_label_associate) - CHECK_SET_HOOK(ipq_label_compare) - CHECK_SET_HOOK(ipq_label_destroy) - CHECK_SET_HOOK(ipq_label_init) - CHECK_SET_HOOK(ipq_label_update) + CHECK_SET_HOOK(file_notify_close) + + .mpo_reserved06 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved07 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved08 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved09 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved10 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved11 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved12 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved13 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved14 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved15 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved16 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved17 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved18 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved19 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved20 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved21 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved22 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved23 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved24 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved25 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved26 = (mpo_reserved_hook_t *)common_hook, CHECK_SET_HOOK(file_check_library_validation) @@ -215,17 +212,17 @@ const static struct mac_policy_ops policy_ops = { CHECK_SET_HOOK(vnode_notify_setutimes) CHECK_SET_HOOK(vnode_notify_truncate) - CHECK_SET_HOOK(mbuf_label_associate_bpfdesc) - CHECK_SET_HOOK(mbuf_label_associate_ifnet) - CHECK_SET_HOOK(mbuf_label_associate_inpcb) - CHECK_SET_HOOK(mbuf_label_associate_ipq) - CHECK_SET_HOOK(mbuf_label_associate_linklayer) - CHECK_SET_HOOK(mbuf_label_associate_multicast_encap) - CHECK_SET_HOOK(mbuf_label_associate_netlayer) - CHECK_SET_HOOK(mbuf_label_associate_socket) - CHECK_SET_HOOK(mbuf_label_copy) - CHECK_SET_HOOK(mbuf_label_destroy) - CHECK_SET_HOOK(mbuf_label_init) + .mpo_reserved27 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved28 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved29 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved30 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved31 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved32 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved33 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved34 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved35 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved36 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved37 = (mpo_reserved_hook_t *)common_hook, CHECK_SET_HOOK(mount_check_fsctl) CHECK_SET_HOOK(mount_check_getattr) @@ -241,24 +238,24 @@ const static struct mac_policy_ops policy_ops = { CHECK_SET_HOOK(mount_label_init) CHECK_SET_HOOK(mount_label_internalize) - CHECK_SET_HOOK(netinet_fragment) - CHECK_SET_HOOK(netinet_icmp_reply) - CHECK_SET_HOOK(netinet_tcp_reply) + .mpo_reserved38 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved39 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved40 = (mpo_reserved_hook_t *)common_hook, CHECK_SET_HOOK(pipe_check_ioctl) CHECK_SET_HOOK(pipe_check_kqfilter) - CHECK_SET_HOOK(pipe_check_label_update) + .mpo_reserved41 = (mpo_reserved_hook_t *)common_hook, CHECK_SET_HOOK(pipe_check_read) CHECK_SET_HOOK(pipe_check_select) CHECK_SET_HOOK(pipe_check_stat) CHECK_SET_HOOK(pipe_check_write) CHECK_SET_HOOK(pipe_label_associate) - CHECK_SET_HOOK(pipe_label_copy) + .mpo_reserved42 = (mpo_reserved_hook_t *)common_hook, CHECK_SET_HOOK(pipe_label_destroy) - CHECK_SET_HOOK(pipe_label_externalize) + .mpo_reserved43 = (mpo_reserved_hook_t *)common_hook, CHECK_SET_HOOK(pipe_label_init) - CHECK_SET_HOOK(pipe_label_internalize) - CHECK_SET_HOOK(pipe_label_update) + .mpo_reserved44 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved45 = (mpo_reserved_hook_t *)common_hook, CHECK_SET_HOOK(policy_destroy) /* special hooks for policy init's */ @@ -271,7 +268,7 @@ const static struct mac_policy_ops policy_ops = { CHECK_SET_HOOK(vnode_check_rename) CHECK_SET_HOOK(kext_check_query) CHECK_SET_HOOK(proc_notify_exec_complete) - .mpo_reserved4 = (mpo_reserved_hook_t *)common_hook, + CHECK_SET_HOOK(proc_notify_cs_invalidated) CHECK_SET_HOOK(proc_check_syscall_unix) CHECK_SET_HOOK(proc_check_expose_task) CHECK_SET_HOOK(proc_check_set_host_special_port) @@ -286,7 +283,7 @@ const static struct mac_policy_ops policy_ops = { CHECK_SET_HOOK(vnode_check_trigger_resolve) CHECK_SET_HOOK(mount_check_mount_late) CHECK_SET_HOOK(mount_check_snapshot_mount) - .mpo_reserved2 = (mpo_reserved_hook_t *)common_hook, + CHECK_SET_HOOK(vnode_notify_reclaim) CHECK_SET_HOOK(skywalk_flow_check_connect) CHECK_SET_HOOK(skywalk_flow_check_listen) @@ -323,39 +320,38 @@ const static struct mac_policy_ops policy_ops = { CHECK_SET_HOOK(proc_check_signal) CHECK_SET_HOOK(proc_check_wait) CHECK_SET_HOOK(proc_check_dump_core) - - .mpo_reserved5 = (mpo_reserved_hook_t *)common_hook, + CHECK_SET_HOOK(proc_check_remote_thread_create) CHECK_SET_HOOK(socket_check_accept) CHECK_SET_HOOK(socket_check_accepted) CHECK_SET_HOOK(socket_check_bind) CHECK_SET_HOOK(socket_check_connect) CHECK_SET_HOOK(socket_check_create) - CHECK_SET_HOOK(socket_check_deliver) - CHECK_SET_HOOK(socket_check_kqfilter) - CHECK_SET_HOOK(socket_check_label_update) + .mpo_reserved46 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved47 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved48 = (mpo_reserved_hook_t *)common_hook, CHECK_SET_HOOK(socket_check_listen) CHECK_SET_HOOK(socket_check_receive) CHECK_SET_HOOK(socket_check_received) - CHECK_SET_HOOK(socket_check_select) + .mpo_reserved49 = (mpo_reserved_hook_t *)common_hook, CHECK_SET_HOOK(socket_check_send) CHECK_SET_HOOK(socket_check_stat) CHECK_SET_HOOK(socket_check_setsockopt) CHECK_SET_HOOK(socket_check_getsockopt) - CHECK_SET_HOOK(socket_label_associate_accept) - CHECK_SET_HOOK(socket_label_associate) - CHECK_SET_HOOK(socket_label_copy) - CHECK_SET_HOOK(socket_label_destroy) - CHECK_SET_HOOK(socket_label_externalize) - CHECK_SET_HOOK(socket_label_init) - CHECK_SET_HOOK(socket_label_internalize) - CHECK_SET_HOOK(socket_label_update) - - CHECK_SET_HOOK(socketpeer_label_associate_mbuf) - CHECK_SET_HOOK(socketpeer_label_associate_socket) - CHECK_SET_HOOK(socketpeer_label_destroy) - CHECK_SET_HOOK(socketpeer_label_externalize) - CHECK_SET_HOOK(socketpeer_label_init) + + .mpo_reserved50 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved51 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved52 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved53 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved54 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved55 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved56 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved57 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved58 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved59 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved60 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved61 = (mpo_reserved_hook_t *)common_hook, + .mpo_reserved62 = (mpo_reserved_hook_t *)common_hook, CHECK_SET_HOOK(system_check_acct) CHECK_SET_HOOK(system_check_audit) @@ -474,7 +470,7 @@ const static struct mac_policy_ops policy_ops = { CHECK_SET_HOOK(iokit_check_set_properties) - .mpo_reserved3 = (mpo_reserved_hook_t *)common_hook, + CHECK_SET_HOOK(vnode_check_supplemental_signature) CHECK_SET_HOOK(vnode_check_searchfs) @@ -508,7 +504,6 @@ const static struct mac_policy_ops policy_ops = { CHECK_SET_HOOK(vnode_find_sigs) - CHECK_SET_HOOK(kext_check_load) CHECK_SET_HOOK(kext_check_unload) diff --git a/bsd/kern/posix_sem.c b/bsd/kern/posix_sem.c index b1119d153..38106d043 100644 --- a/bsd/kern/posix_sem.c +++ b/bsd/kern/posix_sem.c @@ -83,14 +83,10 @@ #include #include +#define f_flag fp_glob->fg_flag +#define f_ops fp_glob->fg_ops +#define f_data fp_glob->fg_data -#define f_flag f_fglob->fg_flag -#define f_type f_fglob->fg_ops->fo_type -#define f_msgcount f_fglob->fg_msgcount -#define f_cred f_fglob->fg_cred -#define f_ops f_fglob->fg_ops -#define f_offset f_fglob->fg_offset -#define f_data f_fglob->fg_data #define PSEMNAMLEN 31 /* maximum name segment length we bother with */ struct pseminfo { @@ -119,7 +115,7 @@ struct pseminfo { struct psemcache { LIST_ENTRY(psemcache) psem_hash; /* hash chain */ struct pseminfo *pseminfo; /* vnode the name refers to */ - int psem_nlen; /* length of name */ + size_t psem_nlen; /* length of name */ char psem_name[PSEMNAMLEN + 1]; /* segment name */ }; #define PSEMCACHE_NULL (struct psemcache *)0 @@ -139,7 +135,7 @@ struct psemstats { struct psemname { char *psem_nameptr; /* pointer to looked up name */ - long psem_namelen; /* length of looked up component */ + size_t psem_namelen; /* length of looked up component */ u_int32_t psem_hash; /* hash value of looked up name */ }; @@ -166,7 +162,7 @@ SYSCTL_LONG(_kern_posix_sem, OID_AUTO, max, CTLFLAG_RW | CTLFLAG_LOCKED, &posix_ struct psemstats psemstats; /* cache effectiveness statistics */ -static int psem_access(struct pseminfo *pinfo, int mode, kauth_cred_t cred); +static int psem_access(struct pseminfo *pinfo, mode_t mode, kauth_cred_t cred); static int psem_cache_search(struct pseminfo **, struct psemname *, struct psemcache **); static int psem_delete(struct pseminfo * pinfo); @@ -238,7 +234,7 @@ psem_cache_search(struct pseminfo **psemp, struct psemname *pnp, for (pcp = pcpp->lh_first; pcp != 0; pcp = nnp) { nnp = pcp->psem_hash.le_next; if (pcp->psem_nlen == pnp->psem_namelen && - !bcmp(pcp->psem_name, pnp->psem_nameptr, (u_int)pcp->psem_nlen)) { + !bcmp(pcp->psem_name, pnp->psem_nameptr, pcp->psem_nlen)) { break; } } @@ -298,7 +294,7 @@ psem_cache_add(struct pseminfo *psemp, struct psemname *pnp, struct psemcache *p */ pcp->pseminfo = psemp; pcp->psem_nlen = pnp->psem_namelen; - bcopy(pnp->psem_nameptr, pcp->psem_name, (unsigned)pcp->psem_nlen); + bcopy(pnp->psem_nameptr, pcp->psem_name, pcp->psem_nlen); pcpp = PSEMHASH(pnp); #if DIAGNOSTIC { @@ -321,7 +317,7 @@ psem_cache_add(struct pseminfo *psemp, struct psemname *pnp, struct psemcache *p void psem_cache_init(void) { - psemhashtbl = hashinit(posix_sem_max / 2, M_SHM, &psemhash); + psemhashtbl = hashinit((int)(posix_sem_max / 2), M_SHM, &psemhash); } static void @@ -381,6 +377,16 @@ out: return error; } +/* + * In order to support unnamed POSIX semaphores, the named + * POSIX semaphores will have to move out of the per-process + * open filetable, and into a global table that is shared with + * unnamed POSIX semaphores, since unnamed POSIX semaphores + * are typically used by declaring instances in shared memory, + * and there's no other way to do this without changing the + * underlying type, which would introduce binary compatibility + * issues. + */ int sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) { @@ -396,15 +402,15 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) char * nameptr; char * cp; size_t pathlen, plen; - int fmode; - int cmode = uap->mode; + mode_t fmode; + mode_t cmode = (mode_t)uap->mode; int value = uap->value; int incache = 0; struct psemcache *pcp = PSEMCACHE_NULL; kern_return_t kret = KERN_INVALID_ADDRESS; /* default fail */ AUDIT_ARG(fflags, uap->oflag); - AUDIT_ARG(mode, uap->mode); + AUDIT_ARG(mode, (mode_t)uap->mode); AUDIT_ARG(value32, uap->value); pinfo = PSEMINFO_NULL; @@ -413,11 +419,7 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) * Preallocate everything we might need up front to avoid taking * and dropping the lock, opening us up to race conditions. */ - MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK | M_ZERO); - if (pnbuf == NULL) { - error = ENOSPC; - goto bad; - } + pnbuf = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO); pathlen = MAXPATHLEN; error = copyinstr(uap->name, pnbuf, MAXPATHLEN, &pathlen); @@ -490,7 +492,7 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) * to KERN_INVALID_ADDRESS, above. */ - fmode = FFLAGS(uap->oflag); + fmode = (mode_t)FFLAGS(uap->oflag); if ((fmode & O_CREAT)) { if ((value < 0) || (value > SEM_VALUE_MAX)) { @@ -636,7 +638,7 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) proc_fdunlock(p); *retval = CAST_USER_ADDR_T(indx); - FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, pnbuf); return 0; bad_locked: @@ -671,7 +673,7 @@ bad: } if (pnbuf != NULL) { - FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, pnbuf); } return error; } @@ -680,9 +682,9 @@ bad: * XXX This code is repeated in several places */ static int -psem_access(struct pseminfo *pinfo, int mode, kauth_cred_t cred) +psem_access(struct pseminfo *pinfo, mode_t mode, kauth_cred_t cred) { - int mode_req = ((mode & FREAD) ? S_IRUSR : 0) | + mode_t mode_req = ((mode & FREAD) ? S_IRUSR : 0) | ((mode & FWRITE) ? S_IWUSR : 0); /* Otherwise, user id 0 always gets access. */ @@ -744,10 +746,8 @@ sem_unlink(__unused proc_t p, struct sem_unlink_args *uap, __unused int32_t *ret pinfo = PSEMINFO_NULL; - MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (pnbuf == NULL) { - return ENOSPC; /* XXX non-standard */ - } + pnbuf = zalloc(ZV_NAMEI); + pathlen = MAXPATHLEN; error = copyinstr(uap->name, pnbuf, MAXPATHLEN, &pathlen); if (error) { @@ -807,7 +807,7 @@ sem_unlink(__unused proc_t p, struct sem_unlink_args *uap, __unused int32_t *ret PSEM_SUBSYS_UNLOCK(); bad: - FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, pnbuf); return error; } @@ -816,30 +816,19 @@ sem_close(proc_t p, struct sem_close_args *uap, __unused int32_t *retval) { int fd = CAST_DOWN_EXPLICIT(int, uap->sem); struct fileproc *fp; - int error = 0; AUDIT_ARG(fd, fd); /* XXX This seems wrong; uap->sem is a pointer */ proc_fdlock(p); - error = fp_lookup(p, fd, &fp, 1); - if (error) { + if ((fp = fp_get_noref_locked(p, fd)) == NULL) { proc_fdunlock(p); - return error; + return EBADF; } - if (fp->f_type != DTYPE_PSXSEM) { - fp_drop(p, fd, fp, 1); + if (FILEGLOB_DTYPE(fp->fp_glob) != DTYPE_PSXSEM) { proc_fdunlock(p); return EBADF; } - procfdtbl_markclosefd(p, fd); - /* release the ref returned from fp_lookup before calling drain */ - (void) os_ref_release_locked(&fp->f_iocount); - fileproc_drain(p, fp); - fdrelse(p, fd); - error = closef_locked(fp, fp->f_fglob, p); - fileproc_free(fp); - proc_fdunlock(p); - return error; + return fp_close_and_unlock(p, fd, fp, 0); } int @@ -859,14 +848,12 @@ sem_wait_nocancel(proc_t p, struct sem_wait_nocancel_args *uap, __unused int32_t kern_return_t kret; int error; - error = fp_getfpsem(p, fd, &fp, &pnode); + error = fp_get_ftype(p, fd, DTYPE_PSXSEM, EBADF, &fp); if (error) { return error; } - if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL) { - error = EINVAL; - goto out; - } + pnode = (struct psemnode *)fp->f_data; + PSEM_SUBSYS_LOCK(); if ((pinfo = pnode->pinfo) == PSEMINFO_NULL) { PSEM_SUBSYS_UNLOCK(); @@ -920,14 +907,12 @@ sem_trywait(proc_t p, struct sem_trywait_args *uap, __unused int32_t *retval) mach_timespec_t wait_time; int error; - error = fp_getfpsem(p, fd, &fp, &pnode); + error = fp_get_ftype(p, fd, DTYPE_PSXSEM, EBADF, &fp); if (error) { return error; } - if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL) { - error = EINVAL; - goto out; - } + pnode = (struct psemnode *)fp->f_data; + PSEM_SUBSYS_LOCK(); if ((pinfo = pnode->pinfo) == PSEMINFO_NULL) { PSEM_SUBSYS_UNLOCK(); @@ -985,14 +970,12 @@ sem_post(proc_t p, struct sem_post_args *uap, __unused int32_t *retval) kern_return_t kret; int error; - error = fp_getfpsem(p, fd, &fp, &pnode); + error = fp_get_ftype(p, fd, DTYPE_PSXSEM, EBADF, &fp); if (error) { return error; } - if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL) { - error = EINVAL; - goto out; - } + pnode = (struct psemnode *)fp->f_data; + PSEM_SUBSYS_LOCK(); if ((pinfo = pnode->pinfo) == PSEMINFO_NULL) { PSEM_SUBSYS_UNLOCK(); @@ -1036,7 +1019,7 @@ out: } static int -psem_close(struct psemnode *pnode, __unused int flags) +psem_close(struct psemnode *pnode) { int error = 0; struct pseminfo *pinfo; @@ -1074,15 +1057,11 @@ psem_close(struct psemnode *pnode, __unused int flags) static int psem_closefile(struct fileglob *fg, __unused vfs_context_t ctx) { - int error; - /* * Not locked as psem_close is called only from here and is locked * properly */ - error = psem_close(((struct psemnode *)fg->fg_data), fg->fg_flag); - - return error; + return psem_close((struct psemnode *)fg->fg_data); } static int @@ -1149,7 +1128,7 @@ psem_label_associate(struct fileproc *fp, struct vnode *vp, vfs_context_t ctx) struct pseminfo *psem; PSEM_SUBSYS_LOCK(); - pnode = (struct psemnode *)fp->f_fglob->fg_data; + pnode = (struct psemnode *)fp->fp_glob->fg_data; if (pnode != NULL) { psem = pnode->pinfo; if (psem != NULL) { diff --git a/bsd/kern/posix_shm.c b/bsd/kern/posix_shm.c index 29c89efb9..bffc46999 100644 --- a/bsd/kern/posix_shm.c +++ b/bsd/kern/posix_shm.c @@ -86,13 +86,9 @@ #include #include -#define f_flag f_fglob->fg_flag -#define f_type f_fglob->fg_ops->fo_type -#define f_msgcount f_fglob->fg_msgcount -#define f_cred f_fglob->fg_cred -#define f_ops f_fglob->fg_ops -#define f_offset f_fglob->fg_offset -#define f_data f_fglob->fg_data +#define f_flag fp_glob->fg_flag +#define f_ops fp_glob->fg_ops +#define f_data fp_glob->fg_data /* * Used to construct the list of memory objects @@ -351,12 +347,12 @@ shm_open(proc_t p, struct shm_open_args *uap, int32_t *retval) pshmnode_t *new_pnode = NULL; struct fileproc *fp = NULL; int fmode; - int cmode = uap->mode; + mode_t cmode = (mode_t)uap->mode; bool incache = false; bool have_label = false; AUDIT_ARG(fflags, uap->oflag); - AUDIT_ARG(mode, uap->mode); + AUDIT_ARG(mode, cmode); /* * Allocate data structures we need. We parse the userspace name into @@ -565,7 +561,7 @@ pshm_truncate( user_map = current_map(); - if (fp->f_type != DTYPE_PSXSHM) { + if (FILEGLOB_DTYPE(fp->fp_glob) != DTYPE_PSXSHM) { return EINVAL; } @@ -738,7 +734,7 @@ pshm_stat(pshmnode_t *pnode, void *ub, int isstat64) static int pshm_access(pshm_info_t *pinfo, int mode, kauth_cred_t cred, __unused proc_t p) { - int mode_req = ((mode & FREAD) ? S_IRUSR : 0) | + mode_t mode_req = ((mode & FREAD) ? S_IRUSR : 0) | ((mode & FWRITE) ? S_IWUSR : 0); /* Otherwise, user id 0 always gets access. */ @@ -901,7 +897,7 @@ pshm_mmap( if (file_pos >= map_pos + pshmobj->pshmo_size) { continue; } - map_size = pshmobj->pshmo_size - (file_pos - map_pos); + map_size = (vm_map_size_t)(pshmobj->pshmo_size - (file_pos - map_pos)); if (map_size > user_size) { map_size = user_size; } @@ -948,7 +944,7 @@ out_deref: switch (kret) { case KERN_SUCCESS: - *retval = (user_start_addr + pageoff); + *retval = (user_addr_t)(user_start_addr + pageoff); return 0; case KERN_INVALID_ADDRESS: case KERN_NO_SPACE: diff --git a/bsd/kern/proc_info.c b/bsd/kern/proc_info.c index d4bc5e794..bcdc1c18d 100644 --- a/bsd/kern/proc_info.c +++ b/bsd/kern/proc_info.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2016 Apple Inc. All rights reserved. + * Copyright (c) 2005-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -108,7 +108,7 @@ uint64_t get_dispatchqueue_offset_from_proc(void *); uint64_t get_dispatchqueue_serialno_offset_from_proc(void *); uint64_t get_dispatchqueue_label_offset_from_proc(void *p); uint64_t get_return_to_kernel_offset_from_proc(void *p); -int proc_info_internal(int callnum, int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +int proc_info_internal(int callnum, int pid, uint32_t flags, uint64_t ext_id, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval); /* * TODO: Replace the noinline attribute below. Currently, it serves @@ -138,70 +138,69 @@ int proc_info_internal(int callnum, int pid, int flavor, uint64_t arg, user_addr */ /* protos for proc_info calls */ -int __attribute__ ((noinline)) proc_listpids(uint32_t type, uint32_t tyoneinfo, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) proc_kernmsgbuf(user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) proc_setcontrol(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) proc_pidfileportinfo(int pid, int flavor, mach_port_name_t name, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_dirtycontrol(int pid, int flavor, uint64_t arg, int32_t * retval); -int __attribute__ ((noinline)) proc_terminate(int pid, int32_t * retval); -int __attribute__ ((noinline)) proc_pid_rusage(int pid, int flavor, user_addr_t buffer, int32_t * retval); -int __attribute__ ((noinline)) proc_pidoriginatorinfo(int pid, int flavor, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) proc_listcoalitions(int flavor, int coaltype, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_can_use_foreground_hw(int pid, user_addr_t reason, uint32_t resonsize, int32_t *retval); +static int __attribute__ ((noinline)) proc_listpids(uint32_t type, uint32_t tyoneinfo, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) proc_pidinfo(int pid, uint32_t flags, uint64_t ext_id, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) proc_kernmsgbuf(user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) proc_setcontrol(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) proc_pidfileportinfo(int pid, int flavor, mach_port_name_t name, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_dirtycontrol(int pid, int flavor, uint64_t arg, int32_t * retval); +static int __attribute__ ((noinline)) proc_terminate(int pid, int32_t * retval); +static int __attribute__ ((noinline)) proc_pid_rusage(int pid, int flavor, user_addr_t buffer, int32_t * retval); +static int __attribute__ ((noinline)) proc_pidoriginatorinfo(int pid, int flavor, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) proc_listcoalitions(int flavor, int coaltype, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_can_use_foreground_hw(int pid, user_addr_t reason, uint32_t resonsize, int32_t *retval); /* protos for procpidinfo calls */ -int __attribute__ ((noinline)) proc_pidfdlist(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidbsdinfo(proc_t p, struct proc_bsdinfo *pbsd, int zombie); -int __attribute__ ((noinline)) proc_pidshortbsdinfo(proc_t p, struct proc_bsdshortinfo *pbsd_shortp, int zombie); -int __attribute__ ((noinline)) proc_pidtaskinfo(proc_t p, struct proc_taskinfo *ptinfo); -int __attribute__ ((noinline)) proc_pidthreadinfo(proc_t p, uint64_t arg, bool thuniqueid, struct proc_threadinfo *pthinfo); -int __attribute__ ((noinline)) proc_pidthreadpathinfo(proc_t p, uint64_t arg, struct proc_threadwithpathinfo *pinfo); -int __attribute__ ((noinline)) proc_pidlistthreads(proc_t p, bool thuniqueid, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidregioninfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidregionpathinfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidregionpathinfo2(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidregionpathinfo3(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidvnodepathinfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidpathinfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidworkqueueinfo(proc_t p, struct proc_workqueueinfo *pwqinfo); -int __attribute__ ((noinline)) proc_pidfileportlist(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -void __attribute__ ((noinline)) proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo); -void __attribute__ ((noinline)) proc_archinfo(proc_t p, struct proc_archinfo *pai); -void __attribute__ ((noinline)) proc_pidcoalitioninfo(proc_t p, struct proc_pidcoalitioninfo *pci); -int __attribute__ ((noinline)) proc_pidnoteexit(proc_t p, uint64_t arg, uint32_t *data); -int __attribute__ ((noinline)) proc_pidexitreasoninfo(proc_t p, struct proc_exitreasoninfo *peri, struct proc_exitreasonbasicinfo *pberi); -int __attribute__ ((noinline)) proc_pidoriginatorpid_uuid(uuid_t uuid, uint32_t buffersize, pid_t *pid); -int __attribute__ ((noinline)) proc_pidlistuptrs(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_piddynkqueueinfo(pid_t pid, int flavor, kqueue_id_t id, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidregionpath(proc_t p, uint64_t arg, user_addr_t buffer, __unused uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidipctableinfo(proc_t p, struct proc_ipctableinfo *table_info); - -#if !CONFIG_EMBEDDED +static int __attribute__ ((noinline)) proc_pidfdlist(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_pidbsdinfo(proc_t p, struct proc_bsdinfo *pbsd, int zombie); +static int __attribute__ ((noinline)) proc_pidshortbsdinfo(proc_t p, struct proc_bsdshortinfo *pbsd_shortp, int zombie); +static int __attribute__ ((noinline)) proc_pidtaskinfo(proc_t p, struct proc_taskinfo *ptinfo); +static int __attribute__ ((noinline)) proc_pidthreadinfo(proc_t p, uint64_t arg, bool thuniqueid, struct proc_threadinfo *pthinfo); +static int __attribute__ ((noinline)) proc_pidthreadpathinfo(proc_t p, uint64_t arg, struct proc_threadwithpathinfo *pinfo); +static int __attribute__ ((noinline)) proc_pidlistthreads(proc_t p, bool thuniqueid, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_pidregioninfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_pidregionpathinfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_pidregionpathinfo2(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_pidregionpathinfo3(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_pidvnodepathinfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_pidpathinfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_pidworkqueueinfo(proc_t p, struct proc_workqueueinfo *pwqinfo); +static int __attribute__ ((noinline)) proc_pidfileportlist(proc_t p, user_addr_t buffer, size_t buffersize, int32_t *retval); +extern void __attribute__ ((noinline)) proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo); +static void __attribute__ ((noinline)) proc_archinfo(proc_t p, struct proc_archinfo *pai); +static void __attribute__ ((noinline)) proc_pidcoalitioninfo(proc_t p, struct proc_pidcoalitioninfo *pci); +static int __attribute__ ((noinline)) proc_pidnoteexit(proc_t p, uint64_t arg, uint32_t *data); +static int __attribute__ ((noinline)) proc_pidexitreasoninfo(proc_t p, struct proc_exitreasoninfo *peri, struct proc_exitreasonbasicinfo *pberi); +static int __attribute__ ((noinline)) proc_pidoriginatorpid_uuid(uuid_t uuid, uint32_t buffersize, pid_t *pid); +static int __attribute__ ((noinline)) proc_pidlistuptrs(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_piddynkqueueinfo(pid_t pid, int flavor, kqueue_id_t id, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_pidregionpath(proc_t p, uint64_t arg, user_addr_t buffer, __unused uint32_t buffersize, int32_t *retval); +static int __attribute__ ((noinline)) proc_pidipctableinfo(proc_t p, struct proc_ipctableinfo *table_info); + +#if CONFIG_PROC_UDATA_STORAGE int __attribute__ ((noinline)) proc_udata_info(pid_t pid, int flavor, user_addr_t buffer, uint32_t buffersize, int32_t *retval); #endif /* protos for proc_pidfdinfo calls */ -int __attribute__ ((noinline)) pid_vnodeinfo(vnode_t vp, uint32_t vid, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) pid_vnodeinfopath(vnode_t vp, uint32_t vid, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) pid_socketinfo(socket_t so, struct fileproc *fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) pid_pseminfo(struct psemnode * psem, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) pid_pshminfo(struct pshmnode * pshm, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) pid_pipeinfo(struct pipe * p, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) pid_kqueueinfo(struct kqueue * kq, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) pid_atalkinfo(struct atalk * at, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) pid_vnodeinfo(vnode_t vp, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) pid_vnodeinfopath(vnode_t vp, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) pid_socketinfo(socket_t so, struct fileproc *fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) pid_pseminfo(struct psemnode * psem, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) pid_pshminfo(struct pshmnode * pshm, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) pid_pipeinfo(struct pipe * p, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +static int __attribute__ ((noinline)) pid_kqueueinfo(struct kqueue * kq, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); /* protos for misc */ -int fill_vnodeinfo(vnode_t vp, struct vnode_info *vinfo, boolean_t check_fsgetpath); -void fill_fileinfo(struct fileproc * fp, proc_t proc, int fd, struct proc_fileinfo * finfo); -int proc_security_policy(proc_t targetp, int callnum, int flavor, boolean_t check_same_user); +static int fill_vnodeinfo(vnode_t vp, struct vnode_info *vinfo, boolean_t check_fsgetpath); +static void fill_fileinfo(struct fileproc *fp, proc_t proc, int fd, struct proc_fileinfo * finfo); +static int proc_security_policy(proc_t targetp, int callnum, int flavor, boolean_t check_same_user); static void munge_vinfo_stat(struct stat64 *sbp, struct vinfo_stat *vsbp); static int proc_piduuidinfo(pid_t pid, uuid_t uuid_buf, uint32_t buffersize); -int proc_pidpathinfo_internal(proc_t p, __unused uint64_t arg, char *buf, uint32_t buffersize, __unused int32_t *retval); +extern int proc_pidpathinfo_internal(proc_t p, __unused uint64_t arg, char *buf, uint32_t buffersize, __unused int32_t *retval); extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int); extern int proc_get_rusage(proc_t proc, int flavor, user_addr_t buffer, int is_zombie); @@ -257,19 +256,30 @@ get_return_to_kernel_offset_from_proc(void *p) int proc_info(__unused struct proc *p, struct proc_info_args * uap, int32_t *retval) { - return proc_info_internal(uap->callnum, uap->pid, uap->flavor, uap->arg, uap->buffer, uap->buffersize, retval); + return proc_info_internal(uap->callnum, uap->pid, 0, 0, uap->flavor, uap->arg, uap->buffer, uap->buffersize, retval); } +int +proc_info_extended_id(__unused struct proc *p, struct proc_info_extended_id_args *uap, int32_t *retval) +{ + uint32_t flags = uap->flags; + + if ((flags & (PIF_COMPARE_IDVERSION | PIF_COMPARE_UNIQUEID)) == (PIF_COMPARE_IDVERSION | PIF_COMPARE_UNIQUEID)) { + return EINVAL; + } + + return proc_info_internal(uap->callnum, uap->pid, flags, uap->ext_id, uap->flavor, uap->arg, uap->buffer, uap->buffersize, retval); +} int -proc_info_internal(int callnum, int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval) +proc_info_internal(int callnum, int pid, uint32_t flags, uint64_t ext_id, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval) { switch (callnum) { case PROC_INFO_CALL_LISTPIDS: /* pid contains type and flavor contains typeinfo */ return proc_listpids(pid, flavor, buffer, buffersize, retval); case PROC_INFO_CALL_PIDINFO: - return proc_pidinfo(pid, flavor, arg, buffer, buffersize, retval); + return proc_pidinfo(pid, flags, ext_id, flavor, arg, buffer, buffersize, retval); case PROC_INFO_CALL_PIDFDINFO: return proc_pidfdinfo(pid, flavor, (int)arg, buffer, buffersize, retval); case PROC_INFO_CALL_KERNMSGBUF: @@ -293,10 +303,10 @@ proc_info_internal(int callnum, int pid, int flavor, uint64_t arg, user_addr_t b return proc_can_use_foreground_hw(pid, buffer, buffersize, retval); case PROC_INFO_CALL_PIDDYNKQUEUEINFO: return proc_piddynkqueueinfo(pid, flavor, (kqueue_id_t)arg, buffer, buffersize, retval); -#if !CONFIG_EMBEDDED +#if CONFIG_PROC_UDATA_STORAGE case PROC_INFO_CALL_UDATA_INFO: return proc_udata_info(pid, flavor, buffer, buffersize, retval); -#endif /* !CONFIG_EMBEDDED */ +#endif /* CONFIG_PROC_UDATA_STORAGE */ default: return EINVAL; } @@ -310,8 +320,8 @@ proc_listpids(uint32_t type, uint32_t typeinfo, user_addr_t buffer, uint32_t bu { uint32_t numprocs = 0; uint32_t wantpids; - char * kbuf; - int * ptr; + int *kbuf; + int *ptr; uint32_t n; int skip; struct proc * p; @@ -341,16 +351,16 @@ proc_listpids(uint32_t type, uint32_t typeinfo, user_addr_t buffer, uint32_t bu numprocs = wantpids; } - kbuf = (char *)kalloc((vm_size_t)(numprocs * sizeof(int))); + kbuf = kheap_alloc(KHEAP_TEMP, numprocs * sizeof(int), + Z_WAITOK | Z_ZERO); if (kbuf == NULL) { return ENOMEM; } - bzero(kbuf, numprocs * sizeof(int)); proc_list_lock(); n = 0; - ptr = (int *)kbuf; + ptr = kbuf; current_list = &allproc; proc_loop: LIST_FOREACH(p, current_list, p_list) { @@ -436,12 +446,12 @@ proc_loop: proc_list_unlock(); - ptr = (int *)kbuf; + ptr = kbuf; error = copyout((caddr_t)ptr, buffer, n * sizeof(int)); if (error == 0) { *retval = (n * sizeof(int)); } - kfree(kbuf, (vm_size_t)(numprocs * sizeof(int))); + kheap_free(KHEAP_TEMP, kbuf, numprocs * sizeof(int)); return error; } @@ -449,16 +459,36 @@ proc_loop: /********************************** proc_pidfdlist routines ********************************/ +static size_t +proc_fdlist_internal(proc_t p, struct proc_fdinfo *pfd, size_t numfds) +{ + struct fileproc *fp; + size_t count = 0; + + proc_fdlock(p); + + fdt_foreach(fp, p) { + if (count >= numfds) { + break; + } + file_type_t fdtype = FILEGLOB_DTYPE(fp->fp_glob); + pfd[count].proc_fd = fdt_foreach_fd(); + pfd[count].proc_fdtype = (fdtype != DTYPE_ATALK) ? + fdtype : PROX_FDTYPE_ATALK; + count++; + } + + proc_fdunlock(p); + return count; +} + int proc_pidfdlist(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval) { uint32_t numfds = 0; uint32_t needfds; char * kbuf; - struct proc_fdinfo * pfd; - struct fileproc * fp; - int n; - int count = 0; + uint32_t count = 0; int error = 0; if (p->p_fd->fd_nfiles > 0) { @@ -478,37 +508,48 @@ proc_pidfdlist(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retv numfds = needfds; } - kbuf = (char *)kalloc((vm_size_t)(numfds * sizeof(struct proc_fdinfo))); + kbuf = kheap_alloc(KHEAP_TEMP, numfds * sizeof(struct proc_fdinfo), + Z_WAITOK | Z_ZERO); if (kbuf == NULL) { return ENOMEM; } - bzero(kbuf, numfds * sizeof(struct proc_fdinfo)); - - proc_fdlock(p); - pfd = (struct proc_fdinfo *)kbuf; - - for (n = 0; ((n < (int)numfds) && (n < p->p_fd->fd_nfiles)); n++) { - if (((fp = p->p_fd->fd_ofiles[n]) != 0) - && ((p->p_fd->fd_ofileflags[n] & UF_RESERVED) == 0)) { - file_type_t fdtype = FILEGLOB_DTYPE(fp->f_fglob); - pfd->proc_fd = n; - pfd->proc_fdtype = (fdtype != DTYPE_ATALK) ? - fdtype : PROX_FDTYPE_ATALK; - count++; - pfd++; - } - } - proc_fdunlock(p); + /* cannot overflow due to count <= numfds */ + count = (uint32_t)proc_fdlist_internal(p, (struct proc_fdinfo *)kbuf, (size_t)numfds); error = copyout(kbuf, buffer, count * sizeof(struct proc_fdinfo)); - kfree(kbuf, (vm_size_t)(numfds * sizeof(struct proc_fdinfo))); + kheap_free(KHEAP_TEMP, kbuf, numfds * sizeof(struct proc_fdinfo)); if (error == 0) { - *retval = (count * sizeof(struct proc_fdinfo)); + *retval = count * sizeof(struct proc_fdinfo); } return error; } +/* + * KPI variant of proc_pidfdlist. + * + * Caller is responsible for adding margin to *count when calling this in + * circumstances where file descriptors can appear/disappear between the + * two calls to this function. + */ +int +proc_fdlist(proc_t p, struct proc_fdinfo *buf, size_t *count) +{ + if (p == NULL || count == NULL) { + return EINVAL; + } + + if (buf == NULL) { + proc_fdlock(p); + *count = (size_t)max(min(p->p_fd->fd_lastfile + 1, p->p_fd->fd_nfiles), 0); + proc_fdunlock(p); + return 0; + } + + *count = proc_fdlist_internal(p, buf, *count); + return 0; +} + /* * Helper functions for proc_pidfileportlist. */ @@ -516,7 +557,7 @@ static int proc_fileport_count(__unused mach_port_name_t name, __unused struct fileglob *fg, void *arg) { - uint32_t *counter = arg; + size_t *counter = arg; *counter += 1; return 0; @@ -547,17 +588,17 @@ proc_fileport_fdtype(mach_port_name_t name, struct fileglob *fg, void *arg) int proc_pidfileportlist(proc_t p, - user_addr_t buffer, uint32_t buffersize, int32_t *retval) + user_addr_t buffer, size_t buffersize, int32_t *retval) { void *kbuf; - vm_size_t kbufsize; + size_t kbufsize; struct proc_fileportinfo *pfi; - uint32_t needfileports, numfileports; + size_t needfileports, numfileports; struct fileport_fdtype_args ffa; int error; needfileports = buffersize / sizeof(*pfi); - if ((user_addr_t)0 == buffer || needfileports > (uint32_t)maxfiles) { + if ((user_addr_t)0 == buffer || needfileports > (size_t)maxfilesperproc) { /* * Either (i) the user is asking for a fileport count, * or (ii) the number of fileports they're asking for is @@ -583,7 +624,7 @@ proc_pidfileportlist(proc_t p, } if ((user_addr_t)0 == buffer) { numfileports += 20; /* accelerate convergence */ - *retval = numfileports * sizeof(*pfi); + *retval = (int32_t)MIN(numfileports * sizeof(*pfi), INT32_MAX); return 0; } if (needfileports > numfileports) { @@ -593,12 +634,11 @@ proc_pidfileportlist(proc_t p, assert(buffersize >= PROC_PIDLISTFILEPORTS_SIZE); - kbufsize = (vm_size_t)needfileports * sizeof(*pfi); - pfi = kbuf = kalloc(kbufsize); + kbufsize = needfileports * sizeof(*pfi); + pfi = kbuf = kheap_alloc(KHEAP_TEMP, kbufsize, Z_WAITOK | Z_ZERO); if (kbuf == NULL) { return ENOMEM; } - bzero(kbuf, kbufsize); ffa.ffa_pfi = pfi; ffa.ffa_pfi_end = pfi + needfileports; @@ -607,7 +647,7 @@ proc_pidfileportlist(proc_t p, case KERN_SUCCESS: error = 0; pfi = ffa.ffa_pfi; - if ((numfileports = pfi - (typeof(pfi))kbuf) == 0) { + if ((numfileports = (size_t)(pfi - (typeof(pfi))kbuf)) == 0) { break; } if (numfileports > needfileports) { @@ -625,9 +665,9 @@ proc_pidfileportlist(proc_t p, error = EINVAL; break; } - kfree(kbuf, kbufsize); + kheap_free(KHEAP_TEMP, kbuf, kbufsize); if (error == 0) { - *retval = numfileports * sizeof(*pfi); + *retval = (int32_t)MIN(numfileports * sizeof(*pfi), INT32_MAX); } return error; } @@ -703,11 +743,11 @@ proc_pidbsdinfo(proc_t p, struct proc_bsdinfo * pbsd, int zombie) } } -#if !CONFIG_EMBEDDED +#if CONFIG_DELAY_IDLE_SLEEP if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) { pbsd->pbi_flags |= PROC_FLAG_DELAYIDLESLEEP; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* CONFIG_DELAY_IDLE_SLEEP */ switch (PROC_CONTROL_STATE(p)) { case P_PCTHROTTLE: @@ -800,11 +840,11 @@ proc_pidshortbsdinfo(proc_t p, struct proc_bsdshortinfo * pbsd_shortp, int zombi if ((p->p_flag & P_EXEC) == P_EXEC) { pbsd_shortp->pbsi_flags |= PROC_FLAG_EXEC; } -#if !CONFIG_EMBEDDED +#if CONFIG_DELAY_IDLE_SLEEP if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) { pbsd_shortp->pbsi_flags |= PROC_FLAG_DELAYIDLESLEEP; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* CONFIG_DELAY_IDLE_SLEEP */ switch (PROC_CONTROL_STATE(p)) { case P_PCTHROTTLE: @@ -894,6 +934,8 @@ bsd_getthreadname(void *uth, char *buffer) struct uthread *ut = (struct uthread *)uth; if (ut->pth_name) { bcopy(ut->pth_name, buffer, MAXTHREADNAMESIZE); + } else { + *buffer = '\0'; } } @@ -1015,16 +1057,16 @@ proc_pidlistthreads(proc_t p, bool thuniqueid, user_addr_t buffer, uint32_t buf numthreads = count; } - kbuf = (void *)kalloc(numthreads * sizeof(uint64_t)); + kbuf = kheap_alloc(KHEAP_TEMP, + numthreads * sizeof(uint64_t), Z_WAITOK | Z_ZERO); if (kbuf == NULL) { return ENOMEM; } - bzero(kbuf, numthreads * sizeof(uint64_t)); ret = fill_taskthreadlist(p->task, kbuf, numthreads, thuniqueid); error = copyout(kbuf, buffer, ret); - kfree(kbuf, numthreads * sizeof(uint64_t)); + kheap_free(KHEAP_TEMP, kbuf, numthreads * sizeof(uint64_t)); if (error == 0) { *retval = ret; } @@ -1129,14 +1171,12 @@ proc_pidregionpathinfo2(proc_t p, uint64_t arg, user_addr_t buffer, __unused uin int proc_pidregionpath(proc_t p, uint64_t arg, user_addr_t buffer, __unused uint32_t buffersize, int32_t *retval) { - struct proc_regionpath path; + struct proc_regionpath path = {}; int ret, error = 0; uintptr_t vnodeaddr = 0; uint32_t vnodeid = 0; vnode_t vp; - bzero(&path, sizeof(struct proc_regionpath)); - ret = find_region_details(p->task, (vm_map_offset_t) arg, (uintptr_t *)&vnodeaddr, (uint32_t *)&vnodeid, &path.prpo_addr, &path.prpo_regionlength); @@ -1312,18 +1352,16 @@ proc_pidpathinfo(proc_t p, __unused uint64_t arg, user_addr_t buffer, uint32_t b return ESRCH; } - buf = (char *)kalloc(buffersize); + buf = kheap_alloc(KHEAP_TEMP, buffersize, Z_WAITOK | Z_ZERO); if (buf == NULL) { return ENOMEM; } - bzero(buf, buffersize); - error = proc_pidpathinfo_internal(p, arg, buf, buffersize, retval); if (error == 0) { error = copyout(buf, buffer, len); } - kfree(buf, buffersize); + kheap_free(KHEAP_TEMP, buf, buffersize); return error; } @@ -1379,6 +1417,7 @@ proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinf p_uniqidinfo->p_uniqueid = proc_uniqueid(p); proc_getexecutableuuid(p, (unsigned char *)&p_uniqidinfo->p_uuid, sizeof(p_uniqidinfo->p_uuid)); p_uniqidinfo->p_puniqueid = proc_puniqueid(p); + p_uniqidinfo->p_idversion = proc_pidversion(p); p_uniqidinfo->p_reserve2 = 0; p_uniqidinfo->p_reserve3 = 0; p_uniqidinfo->p_reserve4 = 0; @@ -1603,12 +1642,11 @@ proc_listcoalitions(int flavor, int type, user_addr_t buffer, } k_buffersize = ncoals * elem_size; - coalinfo = kalloc((vm_size_t)k_buffersize); + coalinfo = kheap_alloc(KHEAP_TEMP, k_buffersize, Z_WAITOK | Z_ZERO); if (!coalinfo) { error = ENOMEM; goto out; } - bzero(coalinfo, k_buffersize); switch (flavor) { case LISTCOALITIONS_ALL_COALS: @@ -1649,7 +1687,7 @@ proc_listcoalitions(int flavor, int type, user_addr_t buffer, out: if (coalinfo) { - kfree(coalinfo, k_buffersize); + kheap_free(KHEAP_TEMP, coalinfo, k_buffersize); } return error; @@ -1841,7 +1879,7 @@ out: int -proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval) +proc_pidinfo(int pid, uint32_t flags, uint64_t ext_id, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval) { struct proc * p = PROC_NULL; int error = ENOTSUP; @@ -1852,7 +1890,7 @@ proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t bu int zombie = 0; bool thuniqueid = false; int uniqidversion = 0; - boolean_t check_same_user; + bool check_same_user; switch (flavor) { case PROC_PIDLISTFDS: @@ -2009,6 +2047,15 @@ proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t bu gotref = 1; } + if ((flags & PIF_COMPARE_IDVERSION) && (ext_id != p->p_idversion)) { + error = ESRCH; + goto out; + } + if ((flags & PIF_COMPARE_UNIQUEID) && (ext_id != p->p_uniqueid)) { + error = ESRCH; + goto out; + } + /* Certain operations don't require privileges */ switch (flavor) { case PROC_PIDT_SHORTBSDINFO: @@ -2047,6 +2094,7 @@ proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t bu case PROC_PIDT_SHORTBSDINFO: shortversion = 1; + OS_FALLTHROUGH; case PROC_PIDT_BSDINFOWITHUNIQID: case PROC_PIDTBSDINFO: { struct proc_bsdinfo pbsd; @@ -2118,6 +2166,7 @@ proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t bu case PROC_PIDTHREADID64INFO: thuniqueid = true; + OS_FALLTHROUGH; case PROC_PIDTHREADINFO:{ struct proc_threadinfo pthinfo; @@ -2133,6 +2182,7 @@ proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t bu case PROC_PIDLISTTHREADIDS: thuniqueid = true; + OS_FALLTHROUGH; case PROC_PIDLISTTHREADS:{ error = proc_pidlistthreads(p, thuniqueid, buffer, buffersize, retval); } @@ -2287,22 +2337,23 @@ proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t bu } size_t kbufsz = MIN(buffersize, vmrtfaultinfo_bufsz()); - void *vmrtfbuf = kalloc(kbufsz); + void *vmrtfbuf = kheap_alloc(KHEAP_TEMP, kbufsz, Z_WAITOK | Z_ZERO); if (vmrtfbuf == NULL) { error = ENOMEM; break; } - bzero(vmrtfbuf, kbufsz); - uint64_t effpid = get_current_unique_pid(); /* The VM may choose to provide more comprehensive records * for root-privileged users on internal configurations. */ boolean_t isroot = (suser(kauth_cred_get(), (u_short *)0) == 0); - int vmf_residue = vmrtf_extract(effpid, isroot, kbufsz, vmrtfbuf, retval); - int vmfsz = *retval * sizeof(vm_rtfault_record_t); + size_t num_extracted = 0; + int vmf_residue = vmrtf_extract(effpid, isroot, kbufsz, vmrtfbuf, &num_extracted); + size_t vmfsz = num_extracted * sizeof(vm_rtfault_record_t); + + *retval = (int32_t)MIN(num_extracted, INT32_MAX); error = 0; if (vmfsz) { @@ -2314,7 +2365,7 @@ proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t bu error = ENOMEM; } } - kfree(vmrtfbuf, kbufsz); + kheap_free(KHEAP_TEMP, vmrtfbuf, kbufsz); } break; case PROC_PIDPLATFORMINFO: { @@ -2358,9 +2409,10 @@ out: int -pid_vnodeinfo(vnode_t vp, uint32_t vid, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, __unused uint32_t buffersize, int32_t * retval) +pid_vnodeinfo(vnode_t vp, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, __unused uint32_t buffersize, int32_t * retval) { struct vnode_fdinfo vfi; + uint32_t vid = vnode_vid(vp); int error = 0; if ((error = vnode_getwithvid(vp, vid)) != 0) { @@ -2380,9 +2432,10 @@ pid_vnodeinfo(vnode_t vp, uint32_t vid, struct fileproc * fp, proc_t proc, int f } int -pid_vnodeinfopath(vnode_t vp, uint32_t vid, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, __unused uint32_t buffersize, int32_t * retval) +pid_vnodeinfopath(vnode_t vp, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, __unused uint32_t buffersize, int32_t * retval) { struct vnode_fdinfowithpath vfip; + uint32_t vid = vnode_vid(vp); int count, error = 0; if ((error = vnode_getwithvid(vp, vid)) != 0) { @@ -2409,11 +2462,11 @@ pid_vnodeinfopath(vnode_t vp, uint32_t vid, struct fileproc * fp, proc_t proc, i void fill_fileinfo(struct fileproc * fp, proc_t proc, int fd, struct proc_fileinfo * fproc) { - fproc->fi_openflags = fp->f_fglob->fg_flag; + fproc->fi_openflags = fp->fp_glob->fg_flag; fproc->fi_status = 0; - fproc->fi_offset = fp->f_fglob->fg_offset; - fproc->fi_type = FILEGLOB_DTYPE(fp->f_fglob); - if (fp->f_fglob->fg_count > 1) { + fproc->fi_offset = fp->fp_glob->fg_offset; + fproc->fi_type = FILEGLOB_DTYPE(fp->fp_glob); + if (os_ref_get_count_raw(&fp->fp_glob->fg_count) > 1) { fproc->fi_status |= PROC_FP_SHARED; } if (proc != PROC_NULL) { @@ -2577,12 +2630,6 @@ pid_kqueueinfo(struct kqueue * kq, struct fileproc *fp, proc_t proc, int fd, use return error; } -int -pid_atalkinfo(__unused struct atalk * at, __unused struct fileproc *fp, __unused proc_t proc, __unused int fd, __unused user_addr_t buffer, __unused uint32_t buffersize, __unused int32_t * retval) -{ - return ENOTSUP; -} - /************************** proc_pidfdinfo routine ***************************/ int @@ -2590,7 +2637,7 @@ proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffers { proc_t p; int error = ENOTSUP; - struct fileproc * fp = NULL; + struct fileproc *fp = NULL; uint32_t size; switch (flavor) { @@ -2645,71 +2692,50 @@ proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffers switch (flavor) { case PROC_PIDFDVNODEINFO: { - vnode_t vp; - uint32_t vid = 0; - - if ((error = fp_getfvpandvid(p, fd, &fp, &vp, &vid)) != 0) { + if ((error = fp_get_ftype(p, fd, DTYPE_VNODE, EBADF, &fp)) != 0) { goto out1; } - /* no need to be under the fdlock */ - error = pid_vnodeinfo(vp, vid, fp, p, fd, buffer, buffersize, retval); + error = pid_vnodeinfo(fp->fp_glob->fg_data, fp, p, fd, buffer, buffersize, retval); } break; case PROC_PIDFDVNODEPATHINFO: { - vnode_t vp; - uint32_t vid = 0; - - if ((error = fp_getfvpandvid(p, fd, &fp, &vp, &vid)) != 0) { + if ((error = fp_get_ftype(p, fd, DTYPE_VNODE, EBADF, &fp)) != 0) { goto out1; } - - /* no need to be under the fdlock */ - error = pid_vnodeinfopath(vp, vid, fp, p, fd, buffer, buffersize, retval); + error = pid_vnodeinfopath(fp->fp_glob->fg_data, fp, p, fd, buffer, buffersize, retval); } break; case PROC_PIDFDSOCKETINFO: { - socket_t so; - - if ((error = fp_getfsock(p, fd, &fp, &so)) != 0) { + if ((error = fp_get_ftype(p, fd, DTYPE_SOCKET, ENOTSOCK, &fp)) != 0) { goto out1; } - /* no need to be under the fdlock */ - error = pid_socketinfo(so, fp, p, fd, buffer, buffersize, retval); + error = pid_socketinfo(fp->fp_glob->fg_data, fp, p, fd, buffer, buffersize, retval); } break; case PROC_PIDFDPSEMINFO: { - struct psemnode * psem; - - if ((error = fp_getfpsem(p, fd, &fp, &psem)) != 0) { + if ((error = fp_get_ftype(p, fd, DTYPE_PSXSHM, EBADF, &fp)) != 0) { goto out1; } - /* no need to be under the fdlock */ - error = pid_pseminfo(psem, fp, p, fd, buffer, buffersize, retval); + error = pid_pseminfo(fp->fp_glob->fg_data, fp, p, fd, buffer, buffersize, retval); } break; case PROC_PIDFDPSHMINFO: { - struct pshmnode * pshm; - - if ((error = fp_getfpshm(p, fd, &fp, &pshm)) != 0) { + if ((error = fp_get_ftype(p, fd, DTYPE_PSXSHM, EBADF, &fp)) != 0) { goto out1; } - /* no need to be under the fdlock */ - error = pid_pshminfo(pshm, fp, p, fd, buffer, buffersize, retval); + error = pid_pshminfo(fp->fp_glob->fg_data, fp, p, fd, buffer, buffersize, retval); } break; case PROC_PIDFDPIPEINFO: { - struct pipe * cpipe; - - if ((error = fp_getfpipe(p, fd, &fp, &cpipe)) != 0) { + if ((error = fp_get_ftype(p, fd, DTYPE_PIPE, EBADF, &fp)) != 0) { goto out1; } - /* no need to be under the fdlock */ - error = pid_pipeinfo(cpipe, fp, p, fd, buffer, buffersize, retval); + error = pid_pipeinfo(fp->fp_glob->fg_data, fp, p, fd, buffer, buffersize, retval); } break; @@ -2722,11 +2748,12 @@ proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffers error = 0; break; } - } else if ((error = fp_getfkq(p, fd, &fp, &kqu.kq)) != 0) { + } else if ((error = fp_get_ftype(p, fd, DTYPE_KQUEUE, EBADF, &fp)) != 0) { goto out1; + } else { + kqu.kq = fp->fp_glob->fg_data; } - /* no need to be under the fdlock */ error = pid_kqueueinfo(kqu.kq, fp, p, fd, buffer, buffersize, retval); } break; @@ -2740,8 +2767,10 @@ proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffers error = 0; break; } - } else if ((error = fp_getfkq(p, fd, &fp, &kqu.kq)) != 0) { + } else if ((error = fp_get_ftype(p, fd, DTYPE_KQUEUE, EBADF, &fp)) != 0) { goto out1; + } else { + kqu.kq = fp->fp_glob->fg_data; } error = pid_kqueue_extinfo(p, kqu.kq, buffer, buffersize, retval); } @@ -2772,21 +2801,12 @@ proc_pidlistuptrs(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *re void *kbuf = NULL; int32_t nuptrs = 0; - if (buffer != USER_ADDR_NULL) { - count = buffersize / sizeof(uint64_t); - if (count > MAX_UPTRS) { - count = MAX_UPTRS; - } - if (count > 0) { - buffersize = count * sizeof(uint64_t); - kbuf = kalloc(buffersize); - bzero(kbuf, buffersize); - assert(kbuf != NULL); - } else { - buffersize = 0; - } - } else { + if (buffer == USER_ADDR_NULL || buffersize < sizeof(uint64_t)) { buffersize = 0; + } else { + count = MIN(buffersize / sizeof(uint64_t), MAX_UPTRS); + buffersize = count * sizeof(uint64_t); + kbuf = kheap_alloc(KHEAP_TEMP, buffersize, Z_WAITOK); } nuptrs = kevent_proc_copy_uptrs(p, kbuf, buffersize); @@ -2807,7 +2827,7 @@ out: *retval = nuptrs; if (kbuf) { - kfree(kbuf, buffersize); + kheap_free(KHEAP_TEMP, kbuf, buffersize); kbuf = NULL; } @@ -2834,7 +2854,7 @@ proc_fileport_info(__unused mach_port_name_t name, int error; bzero(fp, sizeof(*fp)); - fp->f_fglob = fg; + fp->fp_glob = fg; switch (fia->fia_flavor) { case PROC_PIDFILEPORTVNODEPATHINFO: { @@ -2845,7 +2865,7 @@ proc_fileport_info(__unused mach_port_name_t name, break; } vp = (struct vnode *)fg->fg_data; - error = pid_vnodeinfopath(vp, vnode_vid(vp), fp, PROC_NULL, 0, + error = pid_vnodeinfopath(vp, fp, PROC_NULL, 0, fia->fia_buffer, fia->fia_buffersize, fia->fia_retval); } break; @@ -2994,6 +3014,14 @@ proc_security_policy(proc_t targetp, __unused int callnum, __unused int flavor, int proc_kernmsgbuf(user_addr_t buffer, uint32_t buffersize, int32_t * retval) { +#if CONFIG_MACF + int error = 0; + + if ((error = mac_system_check_info(kauth_cred_get(), "kern.msgbuf"))) { + return error; + } +#endif + if (suser(kauth_cred_get(), (u_short *)0) == 0) { return log_dmesg(buffer, buffersize, retval); } else { @@ -3072,6 +3100,7 @@ proc_setcontrol(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t break; case PROC_SELFSET_DELAYIDLESLEEP: { +#if CONFIG_DELAY_IDLE_SLEEP /* mark or clear the process property to delay idle sleep disk IO */ if (pcontrol != 0) { OSBitOrAtomic(P_DELAYIDLESLEEP, &pself->p_flag); @@ -3080,6 +3109,11 @@ proc_setcontrol(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t } } break; +#else + error = ENOTSUP; + goto out; + } +#endif default: error = ENOTSUP; @@ -3328,8 +3362,10 @@ void proc_archinfo(proc_t p, struct proc_archinfo *pai) { proc_lock(p); - pai->p_cputype = p->p_cputype; - pai->p_cpusubtype = p->p_cpusubtype; + { + pai->p_cputype = p->p_cputype; + pai->p_cpusubtype = p->p_cpusubtype; + } proc_unlock(p); } @@ -3377,7 +3413,7 @@ proc_pidexitreasoninfo(proc_t p, struct proc_exitreasoninfo *peri, struct proc_e } if (p->p_exit_reason->osr_kcd_buf != NULL) { - reason_data_size = kcdata_memory_get_used_bytes(&p->p_exit_reason->osr_kcd_descriptor); + reason_data_size = (uint32_t)kcdata_memory_get_used_bytes(&p->p_exit_reason->osr_kcd_descriptor); } if (peri != NULL) { @@ -3392,7 +3428,7 @@ proc_pidexitreasoninfo(proc_t p, struct proc_exitreasoninfo *peri, struct proc_e peri->eri_reason_buf_size = reason_data_size; if (reason_data_size != 0) { - error = copyout(p->p_exit_reason->osr_kcd_buf, peri->eri_kcd_buf, reason_data_size); + error = copyout(p->p_exit_reason->osr_kcd_buf, (user_addr_t)peri->eri_kcd_buf, reason_data_size); } } else { pberi->beri_namespace = p->p_exit_reason->osr_namespace; @@ -3528,7 +3564,7 @@ out: return err; } -#if !CONFIG_EMBEDDED +#if CONFIG_PROC_UDATA_STORAGE int proc_udata_info(int pid, int flavor, user_addr_t buffer, uint32_t bufsize, int32_t *retval) { @@ -3574,4 +3610,4 @@ out: return err; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* CONFIG_PROC_UDATA_STORAGE */ diff --git a/bsd/kern/process_policy.c b/bsd/kern/process_policy.c index f8f8f1f8b..bc18d89a5 100644 --- a/bsd/kern/process_policy.c +++ b/bsd/kern/process_policy.c @@ -48,7 +48,6 @@ #include #include #include -#include #include #include @@ -71,9 +70,9 @@ #include #include -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) #include -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ #if CONFIG_MACF #include @@ -84,13 +83,15 @@ static int handle_lowresource(int scope, int action, int policy, int policy_subt static int handle_cpuuse(int action, user_addr_t attrp, proc_t proc, uint64_t target_threadid); static int handle_apptype(int scope, int action, int policy, int policy_subtype, user_addr_t attrp, proc_t proc, uint64_t target_threadid); static int handle_boost(int scope, int action, int policy, int policy_subtype, user_addr_t attrp, proc_t proc, uint64_t target_threadid); +static int handle_no_smt(int scope, int action, proc_t target_proc, uint64_t target_threadid); +static int handle_tecs(int scope, int action, proc_t target_proc, uint64_t target_threadid); extern kern_return_t task_suspend(task_t); extern kern_return_t task_resume(task_t); -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) static int handle_applifecycle(int scope, int action, int policy, int policy_subtype, user_addr_t attrp, proc_t proc, uint64_t target_threadid); -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ /***************************** process_policy ********************/ @@ -115,11 +116,11 @@ process_policy(__unused struct proc *p, struct process_policy_args * uap, __unus pid_t target_pid = uap->target_pid; uint64_t target_threadid = uap->target_threadid; proc_t target_proc = PROC_NULL; -#if CONFIG_MACF || !CONFIG_EMBEDDED +#if CONFIG_MACF || defined(XNU_TARGET_OS_OSX) proc_t curp = current_proc(); #endif kauth_cred_t my_cred; -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) kauth_cred_t target_cred; #endif @@ -139,7 +140,7 @@ process_policy(__unused struct proc *p, struct process_policy_args * uap, __unus my_cred = kauth_cred_get(); -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) target_cred = kauth_cred_proc_ref(target_proc); if (!kauth_cred_issuser(my_cred) && kauth_cred_getruid(my_cred) && @@ -163,7 +164,7 @@ process_policy(__unused struct proc *p, struct process_policy_args * uap, __unus switch (policy) { case PROC_POLICY_BOOST: case PROC_POLICY_RESOURCE_USAGE: -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) case PROC_POLICY_APPTYPE: case PROC_POLICY_APP_LIFECYCLE: #endif @@ -207,17 +208,23 @@ process_policy(__unused struct proc *p, struct process_policy_args * uap, __unus error = handle_cpuuse(action, attrp, target_proc, target_threadid); break; -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) case PROC_POLICY_APP_LIFECYCLE: error = handle_applifecycle(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); break; -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ case PROC_POLICY_APPTYPE: error = handle_apptype(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); break; case PROC_POLICY_BOOST: error = handle_boost(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); break; + case PROC_POLICY_NO_SMT: + error = handle_no_smt(scope, action, target_proc, target_threadid); + break; + case PROC_POLICY_TECS: + error = handle_tecs(scope, action, target_proc, target_threadid); + break; default: error = EINVAL; break; @@ -225,7 +232,7 @@ process_policy(__unused struct proc *p, struct process_policy_args * uap, __unus out: proc_rele(target_proc); -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) kauth_cred_unref(&target_cred); #endif return error; @@ -258,7 +265,7 @@ static int handle_cpuuse(int action, user_addr_t attrp, proc_t proc, __unused uint64_t target_threadid) { proc_policy_cpuusage_attr_t cpuattr = { }; -#if CONFIG_MACF || !CONFIG_EMBEDDED +#if CONFIG_MACF || defined(XNU_TARGET_OS_OSX) proc_t curp = current_proc(); #endif Boolean privileged = FALSE; @@ -267,7 +274,7 @@ handle_cpuuse(int action, user_addr_t attrp, proc_t proc, __unused uint64_t targ int error = 0; uint8_t percentage; -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) /* On macOS, tasks can only set and clear their own CPU limits. */ if ((action == PROC_POLICY_ACTION_APPLY || action == PROC_POLICY_ACTION_RESTORE) && curp != proc) { @@ -327,8 +334,8 @@ handle_cpuuse(int action, user_addr_t attrp, proc_t proc, __unused uint64_t targ interval = -1ULL; } - error = proc_set_task_ruse_cpu(proc->task, cpuattr.ppattr_cpu_attr, - cpuattr.ppattr_cpu_percentage, + error = proc_set_task_ruse_cpu(proc->task, (uint16_t)cpuattr.ppattr_cpu_attr, + (uint8_t)MIN(cpuattr.ppattr_cpu_percentage, UINT8_MAX), interval, cpuattr.ppattr_cpu_attr_deadline, privileged); @@ -354,7 +361,7 @@ handle_cpuuse(int action, user_addr_t attrp, proc_t proc, __unused uint64_t targ return error; } -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) static int handle_applifecycle(__unused int scope, int action, @@ -362,7 +369,7 @@ handle_applifecycle(__unused int scope, int policy_subtype, user_addr_t attrp, proc_t proc, - uint64_t target_threadid) + __unused uint64_t target_threadid) { int error = 0; int state = 0; @@ -380,7 +387,7 @@ handle_applifecycle(__unused int scope, case PROC_POLICY_APPLIFE_DEVSTATUS: #if CONFIG_MACF /* ToDo - this should be a generic check, since we could potentially hang other behaviours here. */ - error = mac_proc_check_suspend_resume(current_proc(), MAC_PROC_CHECK_HIBERNATE); + error = mac_proc_check_suspend_resume(proc, MAC_PROC_CHECK_HIBERNATE); if (error) { error = EPERM; goto out; @@ -402,7 +409,7 @@ handle_applifecycle(__unused int scope, case PROC_POLICY_APPLIFE_PIDBIND: #if CONFIG_MACF - error = mac_proc_check_suspend_resume(current_proc(), MAC_PROC_CHECK_PIDBIND); + error = mac_proc_check_suspend_resume(proc, MAC_PROC_CHECK_PIDBIND); if (error) { error = EPERM; goto out; @@ -412,10 +419,13 @@ handle_applifecycle(__unused int scope, if (error != 0) { goto out; } +#if CONFIG_TASKWATCH if (action == PROC_POLICY_ACTION_APPLY) { /* bind the thread in target_thread in current process to target_proc */ error = proc_lf_pidbind(current_task(), target_threadid, proc->task, state); - } else { + } else +#endif /* CONFIG_TASKWATCH */ + { error = EINVAL; } break; @@ -427,7 +437,7 @@ handle_applifecycle(__unused int scope, out: return error; } -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ static int handle_apptype( int scope, @@ -485,42 +495,9 @@ handle_apptype( int scope, return error; - default: - /* continue to TAL handling */ - break; - } - - if (policy_subtype != PROC_POLICY_OSX_APPTYPE_TAL) { - return EINVAL; - } - - /* need to be super user to do this */ - if (kauth_cred_issuser(kauth_cred_get()) == 0) { - return EPERM; - } - - if (proc_task_is_tal(target_proc->task) == FALSE) { - return EINVAL; - } - - switch (action) { - case PROC_POLICY_ACTION_ENABLE: - /* PROCESS ENABLE APPTYPE TAL */ - proc_set_task_policy(target_proc->task, - TASK_POLICY_ATTRIBUTE, TASK_POLICY_TAL, - TASK_POLICY_ENABLE); - break; - case PROC_POLICY_ACTION_DISABLE: - /* PROCESS DISABLE APPTYPE TAL */ - proc_set_task_policy(target_proc->task, - TASK_POLICY_ATTRIBUTE, TASK_POLICY_TAL, - TASK_POLICY_DISABLE); - break; default: return EINVAL; } - - return 0; } static int @@ -591,6 +568,57 @@ handle_boost(int scope, return error; } +static int +handle_no_smt(int scope, int action, proc_t target_proc, uint64_t target_threadid) +{ + extern void task_set_no_smt(task_t); + + if (action != PROC_POLICY_ACTION_APPLY) { + return EINVAL; + } + + if (scope == PROC_POLICY_SCOPE_PROCESS) { + if (target_proc != current_proc()) { + return EINVAL; + } + task_set_no_smt(TASK_NULL); + } else if (scope == PROC_POLICY_SCOPE_THREAD) { + if (target_threadid != thread_tid(current_thread())) { + return EINVAL; + } + thread_set_no_smt(true); + } else { + return EINVAL; + } + + return 0; +} + +static int +handle_tecs(int scope, int action, proc_t target_proc, uint64_t target_threadid) +{ + if (action != PROC_POLICY_ACTION_APPLY) { + return EINVAL; + } + + if (scope == PROC_POLICY_SCOPE_PROCESS) { + if (target_proc != current_proc()) { + return EINVAL; + } + task_set_tecs(TASK_NULL); + } else if (scope == PROC_POLICY_SCOPE_THREAD) { + if (target_threadid != thread_tid(current_thread())) { + return EINVAL; + } + if (machine_csv(CPUVN_CI)) { + machine_tecs(current_thread()); + } + } else { + return EINVAL; + } + + return 0; +} /* * KPI to determine if a pid is currently backgrounded. diff --git a/bsd/kern/qsort.c b/bsd/kern/qsort.c index cfa58910a..74c506b20 100644 --- a/bsd/kern/qsort.c +++ b/bsd/kern/qsort.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -71,53 +71,53 @@ __private_extern__ void qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *)); -static inline char *med3(char *, char *, char *, int (*)(const void *, const void *)); -static inline void swapfunc(char *, char *, int, int); +static inline char *med3(char *, char *, char *, int (*)(const void *, const void *)); +static inline void swapfunc(char *, char *, long, int); -#define min(a, b) (a) < (b) ? a : b +#define min(a, b) ((a) < (b) ? (a) : (b)) /* * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function". */ -#define swapcode(TYPE, parmi, parmj, n) { \ - long i = (n) / sizeof (TYPE); \ - TYPE *pi = (TYPE *) (parmi); \ - TYPE *pj = (TYPE *) (parmj); \ - do { \ - TYPE t = *pi; \ - *pi++ = *pj; \ - *pj++ = t; \ - } while (--i > 0); \ -} +#define swapcode(TYPE, parmi, parmj, n) \ + long i = (n) / sizeof (TYPE); \ + TYPE *pi = (TYPE *) (parmi); \ + TYPE *pj = (TYPE *) (parmj); \ + do { \ + TYPE t = *pi; \ + *pi++ = *pj; \ + *pj++ = t; \ + } while (--i > 0); #define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \ es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1; static inline void -swapfunc(char *a, char *b, int n, int swaptype) +swapfunc(char *a, char *b, long n, int swaptype) { - if(swaptype <= 1) - swapcode(long, a, b, n) - else - swapcode(char, a, b, n) + if (swaptype <= 1) { + swapcode(long, a, b, n); + } else { + swapcode(char, a, b, n); + } } -#define swap(a, b) \ - if (swaptype == 0) { \ - long t = *(long *)(a); \ - *(long *)(a) = *(long *)(b); \ - *(long *)(b) = t; \ - } else \ - swapfunc(a, b, es, swaptype) +#define swap(a, b) \ + if (swaptype == 0) { \ + long t = *(long *)(a); \ + *(long *)(a) = *(long *)(b); \ + *(long *)(b) = t; \ + } else \ + swapfunc(a, b, es, swaptype) -#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype) +#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype) static inline char * med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *)) { return cmp(a, b) < 0 ? - (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a )) - :(cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c )); + (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a)) + :(cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c)); } __private_extern__ @@ -125,16 +125,18 @@ void qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *)) { char *pa, *pb, *pc, *pd, *pl, *pm, *pn; - int d, swaptype, swap_cnt; - int r; + int swaptype, swap_cnt; + long d, r; -loop: SWAPINIT(a, es); +loop: SWAPINIT(a, es); swap_cnt = 0; if (n < 7) { - for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es) + for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es) { for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; - pl -= es) - swap(pl, pl - es); + pl -= es) { + swap(pl, (char *)(pl - es)); + } + } return; } pm = (char *)a + (n / 2) * es; @@ -170,29 +172,33 @@ loop: SWAPINIT(a, es); } pc -= es; } - if (pb > pc) + if (pb > pc) { break; + } swap(pb, pc); swap_cnt = 1; pb += es; pc -= es; } if (swap_cnt == 0) { /* Switch to insertion sort */ - for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es) - for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; - pl -= es) + for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es) { + for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; + pl -= es) { swap(pl, pl - es); + } + } return; } pn = (char *)a + n * es; r = min(pa - (char *)a, pb - pa); vecswap(a, pb - r, r); - r = min((size_t)(pd - pc), pn - pd - es); + r = min(pd - pc, pn - pd - es); vecswap(pb, pn - r, r); - if ((size_t)(r = pb - pa) > es) + if ((size_t)(r = pb - pa) > es) { qsort(a, r / es, es, cmp); - if ((size_t)(r = pd - pc) > es) { + } + if ((size_t)(r = pd - pc) > es) { /* Iterate rather than recurse to save stack space */ a = pn - r; n = r / es; @@ -202,7 +208,8 @@ loop: SWAPINIT(a, es); } /* private KPI */ -void -kx_qsort (void *array, size_t nm, size_t member_size, int (*cmpf)(const void *, const void *)) { - qsort (array, nm, member_size, cmpf); +void +kx_qsort(void *array, size_t nm, size_t member_size, int (*cmpf)(const void *, const void *)) +{ + qsort(array, nm, member_size, cmpf); } diff --git a/bsd/kern/socket_info.c b/bsd/kern/socket_info.c index 44ea0477a..1e4c009bb 100644 --- a/bsd/kern/socket_info.c +++ b/bsd/kern/socket_info.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2015 Apple Inc. All rights reserved. + * Copyright (c) 2005-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -59,9 +60,9 @@ fill_sockbuf_info(struct sockbuf *sb, struct sockbuf_info *sbi) sbi->sbi_mbcnt = sb->sb_mbcnt; sbi->sbi_mbmax = sb->sb_mbmax; sbi->sbi_lowat = sb->sb_lowat; - sbi->sbi_flags = sb->sb_flags; - sbi->sbi_timeo = (u_int32_t)(sb->sb_timeo.tv_sec * hz) + - sb->sb_timeo.tv_usec / tick; + sbi->sbi_flags = (short)sb->sb_flags; + sbi->sbi_timeo = (short)((sb->sb_timeo.tv_sec * hz) + + sb->sb_timeo.tv_usec / tick); if (sbi->sbi_timeo == 0 && sb->sb_timeo.tv_usec != 0) { sbi->sbi_timeo = 1; } @@ -215,6 +216,19 @@ fill_socketinfo(struct socket *so, struct socket_info *si) } break; } + case PF_VSOCK: { + const struct vsockpcb *pcb = (struct vsockpcb *)(so)->so_pcb; + struct vsock_sockinfo *vsocksi = &si->soi_proto.pri_vsock; + + si->soi_kind = SOCKINFO_VSOCK; + + vsocksi->local_cid = pcb->local_address.cid; + vsocksi->local_port = pcb->local_address.port; + vsocksi->remote_cid = pcb->remote_address.cid; + vsocksi->remote_port = pcb->remote_address.port; + + break; + } case PF_SYSTEM: if (SOCK_PROTO(so) == SYSPROTO_EVENT) { struct kern_event_pcb *ev_pcb = diff --git a/bsd/kern/subr_eventhandler.c b/bsd/kern/subr_eventhandler.c index 0fd805173..d7f3f5cbd 100644 --- a/bsd/kern/subr_eventhandler.c +++ b/bsd/kern/subr_eventhandler.c @@ -92,7 +92,7 @@ lck_attr_t *el_lock_attr; struct eventhandler_entry_generic { struct eventhandler_entry ee; - void (* func)(void); + void *func; }; static struct eventhandler_list *_eventhandler_find_list( @@ -203,8 +203,8 @@ eventhandler_register_internal( ("%s: handler for %s registered with dead priority", __func__, name)); /* sort it into the list */ - evhlog((LOG_DEBUG, "%s: adding item %p (function %p to \"%s\"", __func__, VM_KERNEL_ADDRPERM(epn), - VM_KERNEL_UNSLIDE(((struct eventhandler_entry_generic *)epn)->func), name)); + evhlog((LOG_DEBUG, "%s: adding item %p (function %p to \"%s\"", __func__, (void *)VM_KERNEL_ADDRPERM(epn), + (void *)VM_KERNEL_UNSLIDE(((struct eventhandler_entry_generic *)epn)->func), name)); EHL_LOCK(list); TAILQ_FOREACH(ep, &list->el_entries, ee_link) { if (ep->ee_priority != EHE_DEAD_PRIORITY && @@ -251,7 +251,7 @@ eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag) if (ep != NULL) { /* remove just this entry */ if (list->el_runcount == 0) { - evhlog((LOG_DEBUG, "%s: removing item %p from \"%s\"", __func__, VM_KERNEL_ADDRPERM(ep), + evhlog((LOG_DEBUG, "%s: removing item %p from \"%s\"", __func__, (void *)VM_KERNEL_ADDRPERM(ep), list->el_name)); /* * We may have purged the list because of certain events. @@ -265,7 +265,7 @@ eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag) mcache_free(eg_cache, ep); } else { evhlog((LOG_DEBUG, "%s: marking item %p from \"%s\" as dead", __func__, - VM_KERNEL_ADDRPERM(ep), list->el_name)); + (void *)VM_KERNEL_ADDRPERM(ep), list->el_name)); ep->ee_priority = EHE_DEAD_PRIORITY; } } else { diff --git a/bsd/kern/subr_log.c b/bsd/kern/subr_log.c index 4e1a1c4fb..b3352a4e6 100644 --- a/bsd/kern/subr_log.c +++ b/bsd/kern/subr_log.c @@ -105,20 +105,27 @@ extern void logwakeup(struct msgbuf *); extern void oslogwakeup(void); extern void oslog_streamwakeup(void); static void oslog_streamwakeup_locked(void); -vm_offset_t kernel_firehose_addr = 0; + +SECURITY_READ_ONLY_LATE(vm_offset_t) kernel_firehose_addr = 0; +SECURITY_READ_ONLY_LATE(uint8_t) __firehose_buffer_kernel_chunk_count = + FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT; +SECURITY_READ_ONLY_LATE(uint8_t) __firehose_num_kernel_io_pages = + FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES; /* log message counters for streaming mode */ uint32_t oslog_s_streamed_msgcount = 0; uint32_t oslog_s_dropped_msgcount = 0; extern uint32_t oslog_s_error_count; +uint32_t oslog_msgbuf_dropped_charcount = 0; + #define LOG_RDPRI (PZERO + 1) #define LOG_NBIO 0x02 #define LOG_ASYNC 0x04 #define LOG_RDWAIT 0x08 -/* All globals should be accessed under LOG_LOCK() */ +/* All globals should be accessed under bsd_log_lock() or bsd_log_lock_safe() */ static char amsg_bufc[1024]; static struct msgbuf aslbuf = {.msg_magic = MSG_MAGIC, .msg_size = sizeof(amsg_bufc), .msg_bufx = 0, .msg_bufr = 0, .msg_bufc = amsg_bufc}; @@ -163,9 +170,6 @@ int oslog_stream_buf_bytesavail = 0; int oslog_stream_buf_size = OSLOG_STREAM_BUF_SIZE; int oslog_stream_num_entries = OSLOG_NUM_STREAM_ENTRIES; -uint8_t __firehose_buffer_kernel_chunk_count = FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT; -uint8_t __firehose_num_kernel_io_pages = FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES; - /* oslogsoftc only valid while oslog_open=1 */ struct oslogsoftc { int sc_state; /* see above for possibilities */ @@ -185,18 +189,12 @@ STAILQ_HEAD(, oslog_stream_buf_entry_s) oslog_stream_buf_head = STAILQ_HEAD_INITIALIZER(oslog_stream_buf_head); /* defined in osfmk/kern/printf.c */ -extern void oslog_lock_init(void); -extern void bsd_log_lock(void); +extern bool bsd_log_lock(bool); +extern void bsd_log_lock_safe(void); extern void bsd_log_unlock(void); -/* defined for osfmk/kern/printf.c */ -void bsd_log_init(void); - -/* - * Ideally this file would define this lock, but bsd doesn't have the definition - * for lock groups. - */ -decl_lck_spin_data(extern, oslog_stream_lock); +LCK_GRP_DECLARE(oslog_stream_lock_grp, "oslog streaming"); +LCK_SPIN_DECLARE(oslog_stream_lock, &oslog_stream_lock_grp); #define stream_lock() lck_spin_lock(&oslog_stream_lock) #define stream_unlock() lck_spin_unlock(&oslog_stream_lock) @@ -220,7 +218,6 @@ extern d_read_t oslog_streamread; extern d_ioctl_t oslog_streamioctl; extern d_select_t oslog_streamselect; -void oslog_init(void); void oslog_setsize(int size); void oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, uint64_t stamp, const void *pubdata, size_t publen); @@ -234,9 +231,6 @@ static void oslog_streamwrite_append_bytes(const char *buffer, int buflen); * at interrupt level must be guarded with a spin lock. */ -#define LOG_LOCK() bsd_log_lock() -#define LOG_UNLOCK() bsd_log_unlock() - #if DEBUG #define LOG_SETSIZE_DEBUG(x...) kprintf(x) #else @@ -250,9 +244,9 @@ static int sysctl_kern_msgbuf(struct sysctl_oid *oidp, int logopen(__unused dev_t dev, __unused int flags, __unused int mode, struct proc *p) { - LOG_LOCK(); + bsd_log_lock_safe(); if (log_open) { - LOG_UNLOCK(); + bsd_log_unlock(); return EBUSY; } if (atm_get_diagnostic_config() & ATM_ENABLE_LEGACY_LOGGING) { @@ -268,7 +262,7 @@ logopen(__unused dev_t dev, __unused int flags, __unused int mode, struct proc * logsoftc.sc_pgid = p->p_pid; /* signal process only */ log_open = 1; - LOG_UNLOCK(); + bsd_log_unlock(); return 0; } @@ -277,12 +271,12 @@ logopen(__unused dev_t dev, __unused int flags, __unused int mode, struct proc * int logclose(__unused dev_t dev, __unused int flag, __unused int devtype, __unused struct proc *p) { - LOG_LOCK(); + bsd_log_lock_safe(); logsoftc.sc_state &= ~(LOG_NBIO | LOG_ASYNC); selwakeup(&logsoftc.sc_selp); selthreadclear(&logsoftc.sc_selp); log_open = 0; - LOG_UNLOCK(); + bsd_log_unlock(); return 0; } @@ -290,27 +284,27 @@ logclose(__unused dev_t dev, __unused int flag, __unused int devtype, __unused s int oslogopen(__unused dev_t dev, __unused int flags, __unused int mode, struct proc *p) { - LOG_LOCK(); + bsd_log_lock_safe(); if (oslog_open) { - LOG_UNLOCK(); + bsd_log_unlock(); return EBUSY; } oslogsoftc.sc_pgid = p->p_pid; /* signal process only */ oslog_open = 1; - LOG_UNLOCK(); + bsd_log_unlock(); return 0; } int oslogclose(__unused dev_t dev, __unused int flag, __unused int devtype, __unused struct proc *p) { - LOG_LOCK(); + bsd_log_lock_safe(); oslogsoftc.sc_state &= ~(LOG_NBIO | LOG_ASYNC); selwakeup(&oslogsoftc.sc_selp); selthreadclear(&oslogsoftc.sc_selp); oslog_open = 0; - LOG_UNLOCK(); + bsd_log_unlock(); return 0; } @@ -328,18 +322,18 @@ oslog_streamopen(__unused dev_t dev, __unused int flags, __unused int mode, stru stream_unlock(); // Allocate the stream buffer - oslog_stream_msg_bufc = kalloc(oslog_stream_buf_size); + oslog_stream_msg_bufc = kheap_alloc(KHEAP_DATA_BUFFERS, + oslog_stream_buf_size, Z_WAITOK | Z_ZERO); if (!oslog_stream_msg_bufc) { return ENOMEM; } - /* Zeroing to avoid copying uninitialized struct padding to userspace. */ - bzero(oslog_stream_msg_bufc, oslog_stream_buf_size); /* entries to support kernel logging in stream mode */ size_t entries_size = oslog_stream_num_entries * sizeof(struct oslog_stream_buf_entry_s); entries = kalloc(entries_size); if (!entries) { - kfree(oslog_stream_msg_bufc, oslog_stream_buf_size); + kheap_free(KHEAP_DATA_BUFFERS, + oslog_stream_msg_bufc, oslog_stream_buf_size); return ENOMEM; } /* Zeroing to avoid copying uninitialized struct padding to userspace. */ @@ -348,7 +342,8 @@ oslog_streamopen(__unused dev_t dev, __unused int flags, __unused int mode, stru stream_lock(); if (oslog_stream_open) { stream_unlock(); - kfree(oslog_stream_msg_bufc, oslog_stream_buf_size); + kheap_free(KHEAP_DATA_BUFFERS, + oslog_stream_msg_bufc, oslog_stream_buf_size); kfree(entries, entries_size); return EBUSY; } @@ -417,7 +412,8 @@ oslog_streamclose(__unused dev_t dev, __unused int flag, __unused int devtype, _ stream_unlock(); // Free the stream buffer - kfree(oslog_stream_msg_bufc, oslog_stream_buf_size); + kheap_free(KHEAP_DATA_BUFFERS, oslog_stream_msg_bufc, + oslog_stream_buf_size); // Free the list entries kfree(entries, oslog_stream_num_entries * sizeof(struct oslog_stream_buf_entry_s)); @@ -428,11 +424,11 @@ oslog_streamclose(__unused dev_t dev, __unused int flag, __unused int devtype, _ int logread(__unused dev_t dev, struct uio *uio, int flag) { - int l; int error = 0; struct msgbuf *mbp = logsoftc.sc_mbp; + ssize_t resid; - LOG_LOCK(); + bsd_log_lock_safe(); while (mbp->msg_bufr == mbp->msg_bufx) { if (flag & IO_NDELAY) { error = EWOULDBLOCK; @@ -443,7 +439,7 @@ logread(__unused dev_t dev, struct uio *uio, int flag) goto out; } logsoftc.sc_state |= LOG_RDWAIT; - LOG_UNLOCK(); + bsd_log_unlock(); /* * If the wakeup is missed * then wait for 5 sec and reevaluate @@ -455,36 +451,38 @@ logread(__unused dev_t dev, struct uio *uio, int flag) return error; } } - LOG_LOCK(); + bsd_log_lock_safe(); } logsoftc.sc_state &= ~LOG_RDWAIT; - while (uio_resid(uio) > 0) { - int readpos; + while ((resid = uio_resid(uio)) > 0) { + size_t l; - l = mbp->msg_bufx - mbp->msg_bufr; - if (l < 0) { + if (mbp->msg_bufx >= mbp->msg_bufr) { + l = mbp->msg_bufx - mbp->msg_bufr; + } else { l = mbp->msg_size - mbp->msg_bufr; } - l = min(l, uio_resid(uio)); - if (l == 0) { + if ((l = MIN(l, (size_t)resid)) == 0) { break; } - readpos = mbp->msg_bufr; - LOG_UNLOCK(); - error = uiomove((caddr_t)&mbp->msg_bufc[readpos], l, uio); - LOG_LOCK(); + const size_t readpos = mbp->msg_bufr; + + bsd_log_unlock(); + error = uiomove((caddr_t)&mbp->msg_bufc[readpos], (int)l, uio); + bsd_log_lock_safe(); if (error) { break; } - mbp->msg_bufr = readpos + l; + + mbp->msg_bufr = (int)(readpos + l); if (mbp->msg_bufr >= mbp->msg_size) { mbp->msg_bufr = 0; } } out: - LOG_UNLOCK(); + bsd_log_unlock(); return error; } @@ -558,7 +556,7 @@ oslog_streamread(__unused dev_t dev, struct uio *uio, int flag) stream_unlock(); // Free the list entry - kfree(read_entry, (sizeof(struct oslog_stream_buf_entry_s) + read_entry->size)); + kfree(read_entry, sizeof(struct oslog_stream_buf_entry_s) + read_entry->size); break; } /* Handle log messages */ @@ -608,8 +606,8 @@ oslog_streamread(__unused dev_t dev, struct uio *uio, int flag) } } - copy_size = min(logpos, uio_resid(uio)); - if (copy_size != 0) { + copy_size = min(logpos, (int) MIN(uio_resid(uio), INT_MAX)); + if (copy_size > 0) { error = uiomove((caddr_t)logline, copy_size, uio); } os_atomic_inc(&oslog_s_streamed_msgcount, relaxed); @@ -625,13 +623,13 @@ logselect(__unused dev_t dev, int rw, void * wql, struct proc *p) switch (rw) { case FREAD: - LOG_LOCK(); + bsd_log_lock_safe(); if (mbp->msg_bufr != mbp->msg_bufx) { - LOG_UNLOCK(); + bsd_log_unlock(); return 1; } selrecord(p, &logsoftc.sc_selp, wql); - LOG_UNLOCK(); + bsd_log_unlock(); break; } return 0; @@ -642,13 +640,13 @@ oslogselect(__unused dev_t dev, int rw, void * wql, struct proc *p) { switch (rw) { case FREAD: - LOG_LOCK(); + bsd_log_lock_safe(); if (os_log_wakeup) { - LOG_UNLOCK(); + bsd_log_unlock(); return 1; } selrecord(p, &oslogsoftc.sc_selp, wql); - LOG_UNLOCK(); + bsd_log_unlock(); break; } return 0; @@ -683,9 +681,9 @@ logwakeup(struct msgbuf *mbp) return; } - LOG_LOCK(); + bsd_log_lock_safe(); if (!log_open) { - LOG_UNLOCK(); + bsd_log_unlock(); return; } if (NULL == mbp) { @@ -697,33 +695,37 @@ logwakeup(struct msgbuf *mbp) selwakeup(&logsoftc.sc_selp); if (logsoftc.sc_state & LOG_ASYNC) { int pgid = logsoftc.sc_pgid; - LOG_UNLOCK(); + bsd_log_unlock(); if (pgid < 0) { gsignal(-pgid, SIGIO); } else { proc_signal(pgid, SIGIO); } - LOG_LOCK(); + bsd_log_lock_safe(); } if (logsoftc.sc_state & LOG_RDWAIT) { wakeup((caddr_t)mbp); logsoftc.sc_state &= ~LOG_RDWAIT; } out: - LOG_UNLOCK(); + bsd_log_unlock(); } void oslogwakeup(void) { - LOG_LOCK(); + if (!oslog_is_safe()) { + return; + } + + bsd_log_lock_safe(); if (!oslog_open) { - LOG_UNLOCK(); + bsd_log_unlock(); return; } selwakeup(&oslogsoftc.sc_selp); os_log_wakeup = 1; - LOG_UNLOCK(); + bsd_log_unlock(); } static void @@ -760,7 +762,7 @@ logioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int flag, __unus int l; const struct msgbuf *mbp = logsoftc.sc_mbp; - LOG_LOCK(); + bsd_log_lock_safe(); switch (com) { /* return number of characters immediately available */ case FIONREAD: @@ -796,10 +798,10 @@ logioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int flag, __unus break; default: - LOG_UNLOCK(); + bsd_log_unlock(); return -1; } - LOG_UNLOCK(); + bsd_log_unlock(); return 0; } @@ -849,9 +851,9 @@ oslogioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int flag, __un } break; case LOGFLUSHED: - LOG_LOCK(); + bsd_log_lock_safe(); os_log_wakeup = 0; - LOG_UNLOCK(); + bsd_log_unlock(); __firehose_merge_updates(*(firehose_push_reply_t *)(data)); break; default: @@ -892,13 +894,8 @@ oslog_streamioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int fla return err; } -void -bsd_log_init(void) -{ - /* After this point, we must be ready to accept characters */ -} - -void +__startup_func +static void oslog_init(void) { kern_return_t kr; @@ -915,27 +912,26 @@ oslog_init(void) } vm_size_t size = __firehose_buffer_kernel_chunk_count * FIREHOSE_CHUNK_SIZE; - oslog_lock_init(); - kr = kmem_alloc_flags(kernel_map, &kernel_firehose_addr, size + (2 * PAGE_SIZE), VM_KERN_MEMORY_LOG, - KMA_GUARD_FIRST | KMA_GUARD_LAST); + KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_ZERO); if (kr != KERN_SUCCESS) { panic("Failed to allocate memory for firehose logging buffer"); } kernel_firehose_addr += PAGE_SIZE; - bzero((void *)kernel_firehose_addr, size); /* register buffer with firehose */ kernel_firehose_addr = (vm_offset_t)__firehose_buffer_create((size_t *) &size); - printf("oslog_init completed, %u chunks, %u io pages\n", __firehose_buffer_kernel_chunk_count, __firehose_num_kernel_io_pages); + printf("oslog_init completed, %u chunks, %u io pages\n", + __firehose_buffer_kernel_chunk_count, __firehose_num_kernel_io_pages); } +STARTUP(OSLOG, STARTUP_RANK_FIRST, oslog_init); /* * log_putc_locked * - * Decription: Output a character to the log; assumes the LOG_LOCK() is held - * by the caller. + * Decription: Output a character to the log; assumes the bsd_log_lock() or + * bsd_log_lock_safe() is held by the caller. * * Parameters: c Character to output * @@ -1053,11 +1049,11 @@ oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, oslog_stream_buf_entry_t buf_entry = NULL; oslog_stream_buf_entry_t next_entry = NULL; - uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); - int ft_length = ft_size + publen; - LCK_SPIN_ASSERT(&oslog_stream_lock, LCK_ASSERT_OWNED); + assert(publen <= UINT16_MAX); + const ssize_t ft_length = offsetof(struct firehose_tracepoint_s, ft_data) + publen; + mbp = oslog_streambufp; if (ft_length > mbp->msg_size) { os_atomic_inc(&oslog_s_error_count, relaxed); @@ -1101,7 +1097,7 @@ oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, // Write the log line and update the list entry for this record buf_entry->offset = mbp->msg_bufx; - buf_entry->size = ft_length; + buf_entry->size = (uint16_t)ft_length; buf_entry->timestamp = stamp; buf_entry->type = oslog_stream_link_type_log; @@ -1113,7 +1109,7 @@ oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, }; oslog_streamwrite_append_bytes((char *)&fs, sizeof(fs)); - oslog_streamwrite_append_bytes(pubdata, publen); + oslog_streamwrite_append_bytes(pubdata, (int)publen); assert(mbp->msg_bufr < mbp->msg_size); // Insert the element to the buffer data list @@ -1122,13 +1118,11 @@ oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, return; } - - /* * log_putc * - * Decription: Output a character to the log; assumes the LOG_LOCK() is NOT - * held by the caller. + * Decription: Output a character to the log; assumes the bsd_log_lock() or + * bsd_log_lock_safe() is NOT held by the caller. * * Parameters: c Character to output * @@ -1140,11 +1134,15 @@ oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, void log_putc(char c) { - int unread_count = 0; - LOG_LOCK(); + if (!bsd_log_lock(oslog_is_safe())) { + os_atomic_inc(&oslog_msgbuf_dropped_charcount, relaxed); + return; + } + log_putc_locked(msgbufp, c); - unread_count = msgbufp->msg_bufx - msgbufp->msg_bufr; - LOG_UNLOCK(); + int unread_count = msgbufp->msg_bufx - msgbufp->msg_bufr; + + bsd_log_unlock(); if (unread_count < 0) { unread_count = 0 - unread_count; @@ -1183,13 +1181,13 @@ log_setsize(int size) } new_logsize = size; - if (!(new_logdata = (char*)kalloc(size))) { + new_logdata = kheap_alloc(KHEAP_DATA_BUFFERS, size, Z_WAITOK | Z_ZERO); + if (!new_logdata) { printf("log_setsize: unable to allocate memory\n"); return ENOMEM; } - bzero(new_logdata, new_logsize); - LOG_LOCK(); + bsd_log_lock_safe(); old_logsize = msgbufp->msg_size; old_logdata = msgbufp->msg_bufc; @@ -1242,14 +1240,14 @@ log_setsize(int size) LOG_SETSIZE_DEBUG("log_setsize(%d): new_logdata %p new_logsize %d new_bufr %d new_bufx %d\n", size, new_logdata, new_logsize, new_bufr, new_bufx); - LOG_UNLOCK(); + bsd_log_unlock(); /* this memory is now dead - clear it so that it compresses better * in case of suspend to disk etc. */ bzero(old_logdata, old_logsize); if (old_logdata != smsg_bufc) { /* dynamic memory that must be freed */ - kfree(old_logdata, old_logsize); + kheap_free(KHEAP_DATA_BUFFERS, old_logdata, old_logsize); } printf("set system log size to %d bytes\n", new_logsize); @@ -1285,9 +1283,9 @@ sysctl_kern_msgbuf(struct sysctl_oid *oidp __unused, int old_bufsize, bufsize; int error; - LOG_LOCK(); + bsd_log_lock_safe(); old_bufsize = bufsize = msgbufp->msg_size; - LOG_UNLOCK(); + bsd_log_unlock(); error = sysctl_io_number(req, bufsize, sizeof(bufsize), &bufsize, NULL); if (error) { @@ -1307,7 +1305,7 @@ sysctl_kern_msgbuf(struct sysctl_oid *oidp __unused, * It returns as much data still in the buffer as possible. */ int -log_dmesg(user_addr_t buffer, uint32_t buffersize, int32_t * retval) +log_dmesg(user_addr_t buffer, uint32_t buffersize, int32_t *retval) { uint32_t i; uint32_t localbuff_size; @@ -1315,18 +1313,19 @@ log_dmesg(user_addr_t buffer, uint32_t buffersize, int32_t * retval) char *localbuff, *p, *copystart, ch; size_t copysize; - LOG_LOCK(); + bsd_log_lock_safe(); localbuff_size = (msgbufp->msg_size + 2); /* + '\n' + '\0' */ - LOG_UNLOCK(); + bsd_log_unlock(); /* Allocate a temporary non-circular buffer for copyout */ - if (!(localbuff = (char *)kalloc(localbuff_size))) { + localbuff = kheap_alloc(KHEAP_DATA_BUFFERS, localbuff_size, Z_WAITOK); + if (!localbuff) { printf("log_dmesg: unable to allocate memory\n"); return ENOMEM; } /* in between here, the log could become bigger, but that's fine */ - LOG_LOCK(); + bsd_log_lock_safe(); /* * The message buffer is circular; start at the write pointer, and @@ -1376,56 +1375,50 @@ log_dmesg(user_addr_t buffer, uint32_t buffersize, int32_t * retval) copysize = buffersize; } - LOG_UNLOCK(); + bsd_log_unlock(); error = copyout(copystart, buffer, copysize); if (!error) { - *retval = copysize; + *retval = (int32_t)copysize; } - kfree(localbuff, localbuff_size); + kheap_free(KHEAP_DATA_BUFFERS, localbuff, localbuff_size); return error; } #ifdef CONFIG_XNUPOST -uint32_t find_pattern_in_buffer(char * pattern, uint32_t len, int expected_count); +size_t find_pattern_in_buffer(const char *, size_t, size_t); /* * returns count of pattern found in systemlog buffer. * stops searching further if count reaches expected_count. */ -uint32_t -find_pattern_in_buffer(char * pattern, uint32_t len, int expected_count) +size_t +find_pattern_in_buffer(const char *pattern, size_t len, size_t expected_count) { - int match_count = 0; - int i = 0; - int j = 0; - int no_match = 0; - int pos = 0; - char ch = 0; - if (pattern == NULL || len == 0 || expected_count == 0) { return 0; } - for (i = 0; i < msgbufp->msg_size; i++) { - no_match = 0; - for (j = 0; j < (int)len; j++) { - pos = (msgbufp->msg_bufx + i + j) % msgbufp->msg_size; - ch = msgbufp->msg_bufc[pos]; - if (ch != pattern[j]) { - no_match = 1; + size_t msg_bufx = msgbufp->msg_bufx; + size_t msg_size = msgbufp->msg_size; + size_t match_count = 0; + + for (size_t i = 0; i < msg_size; i++) { + boolean_t match = TRUE; + for (size_t j = 0; j < len; j++) { + size_t pos = (msg_bufx + i + j) % msg_size; + if (msgbufp->msg_bufc[pos] != pattern[j]) { + match = FALSE; break; } } - if (no_match == 0) { - match_count++; - if (match_count >= expected_count) { - break; - } + if (match && ++match_count >= expected_count) { + break; } } + return match_count; } diff --git a/bsd/kern/subr_prf.c b/bsd/kern/subr_prf.c index 0d7973826..d5d5c7de7 100644 --- a/bsd/kern/subr_prf.c +++ b/bsd/kern/subr_prf.c @@ -102,9 +102,12 @@ #include /* for vaddlog(): the following are implemented in osfmk/kern/printf.c */ -extern void bsd_log_lock(void); +extern bool bsd_log_lock(bool); extern void bsd_log_unlock(void); +uint32_t vaddlog_msgcount = 0; +uint32_t vaddlog_msgcount_dropped = 0; + /* Keep this around only because it's exported */ void _printf(int, struct tty *, const char *, ...); @@ -145,6 +148,7 @@ snprintf_func(int ch, void *arg); struct putchar_args { int flags; struct tty *tty; + bool last_char_was_cr; }; static void putchar(int c, void *arg); @@ -235,14 +239,6 @@ tprintf(tpr_t tpr, const char *fmt, ...) tty_unlock(tp); } - pca.flags = TOLOG; - pca.tty = TTY_NULL; - va_start(ap, fmt); - __doprnt(fmt, ap, putchar, &pca, 10, TRUE); - va_end(ap); - - logwakeup(msgbufp); - va_start(ap, fmt); os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, ap, __builtin_return_address(0)); va_end(ap); @@ -276,7 +272,7 @@ ttyprintf(struct tty *tp, const char *fmt, ...) void logtime(time_t secs) { - printf("Time %ld Message ", secs); + printf("Time 0x%lx Message ", secs); } static void @@ -285,7 +281,7 @@ putchar_asl(int c, void *arg) struct putchar_args *pca = arg; if ((pca->flags & TOLOGLOCKED) && c != '\0' && c != '\r' && c != 0177) { - log_putc_locked(aslbufp, c); + log_putc_locked(aslbufp, (char)c); } putchar(c, arg); } @@ -296,16 +292,21 @@ putchar_asl(int c, void *arg) int vaddlog(const char *fmt, va_list ap) { + if (!bsd_log_lock(oslog_is_safe())) { + os_atomic_inc(&vaddlog_msgcount_dropped, relaxed); + return 1; + } + struct putchar_args pca = { .flags = TOLOGLOCKED, .tty = NULL, }; - bsd_log_lock(); __doprnt(fmt, ap, putchar_asl, &pca, 10, TRUE); bsd_log_unlock(); logwakeup(NULL); + os_atomic_inc(&vaddlog_msgcount, relaxed); return 0; } @@ -378,36 +379,51 @@ putchar(int c, void *arg) constty = 0; } if ((pca->flags & TOLOG) && c != '\0' && c != '\r' && c != 0177) { - log_putc(c); + log_putc((char)c); } if ((pca->flags & TOLOGLOCKED) && c != '\0' && c != '\r' && c != 0177) { - log_putc_locked(msgbufp, c); + log_putc_locked(msgbufp, (char)c); } if ((pca->flags & TOCONS) && constty == 0 && c != '\0') { - cnputc(c); + cnputc((char)c); } if (pca->flags & TOSTR) { - **sp = c; + **sp = (char)c; (*sp)++; } + + pca->last_char_was_cr = ('\n' == c); } -int +bool +printf_log_locked(bool addcr, const char *fmt, ...) +{ + bool retval; + va_list args; + + va_start(args, fmt); + retval = vprintf_log_locked(fmt, args, addcr); + va_end(args); + + return retval; +} + +bool vprintf_log_locked(const char *fmt, va_list ap, bool addcr) { struct putchar_args pca; pca.flags = TOLOGLOCKED; pca.tty = NULL; + pca.last_char_was_cr = false; __doprnt(fmt, ap, putchar, &pca, 10, TRUE); if (addcr) { putchar('\n', &pca); } - return 0; + return pca.last_char_was_cr; } -#if !CONFIG_EMBEDDED - +#if CONFIG_VSPRINTF /* * Scaled down version of vsprintf(3). * @@ -429,7 +445,7 @@ vsprintf(char *buf, const char *cfmt, va_list ap) } return 0; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* CONFIG_VSPRINTF */ /* * Scaled down version of snprintf(3). @@ -472,7 +488,7 @@ vscnprintf(char *buf, size_t size, const char *fmt, va_list args) i = vsnprintf(buf, size, fmt, args); - return (i >= ssize) ? (ssize - 1) : i; + return (i >= ssize) ? (int)(ssize - 1) : i; } int @@ -494,7 +510,7 @@ snprintf_func(int ch, void *arg) struct snprintf_arg *const info = arg; if (info->remain >= 2) { - *info->str++ = ch; + *info->str++ = (char)ch; info->remain--; } } diff --git a/bsd/kern/subr_sbuf.c b/bsd/kern/subr_sbuf.c index e9d175fb4..190485145 100644 --- a/bsd/kern/subr_sbuf.c +++ b/bsd/kern/subr_sbuf.c @@ -1,472 +1,534 @@ -/*- - * Copyright (c) 2000 Poul-Henning Kamp and Dag-Erling Co•dan Sm¿rgrav - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer - * in this position and unchanged. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - -#include - +#include +#include +#include +#include #include - -#ifdef KERNEL -/* #include */ -#include -#include -#include -#include -#include -#include -#include -#else /* KERNEL */ -#include -#include -#include -#include -#include -#endif /* KERNEL */ - #include +#include -#ifdef KERNEL -/* MALLOC_DEFINE(M_SBUF, "sbuf", "string buffers"); */ -#define SBMALLOC(size) _MALLOC(size, M_SBUF, M_WAITOK) -#define SBFREE(buf) FREE(buf, M_SBUF) -#else /* KERNEL */ -#define KASSERT(e, m) -#define SBMALLOC(size) malloc(size) -#define SBFREE(buf) free(buf) -#define min(x, y) MIN(x,y) - -#endif /* KERNEL */ - -/* - * Predicates - */ -#define SBUF_ISDYNAMIC(s) ((s)->s_flags & SBUF_DYNAMIC) -#define SBUF_ISDYNSTRUCT(s) ((s)->s_flags & SBUF_DYNSTRUCT) -#define SBUF_ISFINISHED(s) ((s)->s_flags & SBUF_FINISHED) -#define SBUF_HASOVERFLOWED(s) ((s)->s_flags & SBUF_OVERFLOWED) -#define SBUF_HASROOM(s) ((s)->s_len < (s)->s_size - 1) -#define SBUF_FREESPACE(s) ((s)->s_size - (s)->s_len - 1) -#define SBUF_CANEXTEND(s) ((s)->s_flags & SBUF_AUTOEXTEND) +#if DEBUG || DEVELOPMENT +#include +#include +#include +#endif /* DEBUG || DEVELOPMENT */ -/* - * Set / clear flags - */ +#define SBUF_ISSET(s, f) ((s)->s_flags & (f)) #define SBUF_SETFLAG(s, f) do { (s)->s_flags |= (f); } while (0) #define SBUF_CLEARFLAG(s, f) do { (s)->s_flags &= ~(f); } while (0) -#define SBUF_MINEXTENDSIZE 16 /* Should be power of 2. */ +#define SBUF_CANEXTEND(s) SBUF_ISSET(s, SBUF_AUTOEXTEND) +#define SBUF_HASOVERFLOWED(s) SBUF_ISSET(s, SBUF_OVERFLOWED) +#define SBUF_ISDYNAMIC(s) SBUF_ISSET(s, SBUF_DYNAMIC) +#define SBUF_ISDYNSTRUCT(s) SBUF_ISSET(s, SBUF_DYNSTRUCT) +#define SBUF_ISFINISHED(s) SBUF_ISSET(s, SBUF_FINISHED) + +#define SBUF_MINEXTENDSIZE 16 #define SBUF_MAXEXTENDSIZE PAGE_SIZE #define SBUF_MAXEXTENDINCR PAGE_SIZE -/* - * Debugging support +/*! + * @function sbuf_delete + * + * @brief + * Destroys an sbuf. Frees the underlying buffer if it's allocated on the heap + * (indicated by SBUF_ISDYNAMIC) and frees the sbuf if it itself is allocated + * on the heap (SBUF_ISDYNSTRUCT). + * + * @param s + * The sbuf to destroy. */ -#if defined(KERNEL) && defined(INVARIANTS) -static void -_assert_sbuf_integrity(const char *fun, struct sbuf *s) -{ - KASSERT(s != NULL, - ("%s called with a NULL sbuf pointer", fun)); - KASSERT(s->s_buf != NULL, - ("%s called with uninitialized or corrupt sbuf", fun)); - KASSERT(s->s_len < s->s_size, - ("wrote past end of sbuf (%d >= %d)", s->s_len, s->s_size)); -} - -static void -_assert_sbuf_state(const char *fun, struct sbuf *s, int state) -{ - KASSERT((s->s_flags & SBUF_FINISHED) == state, - ("%s called with %sfinished or corrupt sbuf", fun, - (state ? "un" : ""))); -} -#define assert_sbuf_integrity(s) _assert_sbuf_integrity(__func__, (s)) -#define assert_sbuf_state(s, i) _assert_sbuf_state(__func__, (s), (i)) -#else /* KERNEL && INVARIANTS */ -#define assert_sbuf_integrity(s) do { } while (0) -#define assert_sbuf_state(s, i) do { } while (0) -#endif /* KERNEL && INVARIANTS */ - -static int -sbuf_extendsize(int size) +void +sbuf_delete(struct sbuf *s) { - int newsize; - - newsize = SBUF_MINEXTENDSIZE; - while (newsize < size) { - if (newsize < (int)SBUF_MAXEXTENDSIZE) { - newsize *= 2; - } else { - newsize += SBUF_MAXEXTENDINCR; - } + if (SBUF_ISDYNAMIC(s) && s->s_buf) { + kheap_free(KHEAP_DATA_BUFFERS, s->s_buf, s->s_size); + s->s_buf = NULL; } - return newsize; + if (SBUF_ISDYNSTRUCT(s)) { + kheap_free(KHEAP_DEFAULT, s, sizeof(*s)); + } } - -/* - * Extend an sbuf. +/*! + * @function sbuf_extendsize + * + * @brief + * Attempts to extend the size of an sbuf to the value pointed to by size. + * + * @param size + * Points to a size_t containing the desired size for input and receives the + * actual new size on success (which will be greater than or equal to the + * requested size). + * + * @returns + * 0 on success, -1 on failure. */ static int -sbuf_extend(struct sbuf *s, int addlen) +sbuf_extendsize(size_t *size) { - char *newbuf; - int newsize; + size_t target_size = *size; + size_t new_size; - if (!SBUF_CANEXTEND(s)) { + if (target_size > INT_MAX) { return -1; } - newsize = sbuf_extendsize(s->s_size + addlen); - newbuf = (char *)SBMALLOC(newsize); - if (newbuf == NULL) { - return -1; - } - bcopy(s->s_buf, newbuf, s->s_size); - if (SBUF_ISDYNAMIC(s)) { - SBFREE(s->s_buf); + if (target_size < SBUF_MAXEXTENDSIZE) { + new_size = SBUF_MINEXTENDSIZE; + while (new_size < target_size) { + new_size *= 2; + } } else { - SBUF_SETFLAG(s, SBUF_DYNAMIC); + /* round up to nearest page: */ + new_size = (target_size + PAGE_SIZE - 1) & ~PAGE_MASK; + } + + if (new_size > INT_MAX) { + return -1; } - s->s_buf = newbuf; - s->s_size = newsize; + + *size = new_size; return 0; } -/* - * Initialize an sbuf. - * If buf is non-NULL, it points to a static or already-allocated string - * big enough to hold at least length characters. +/*! + * @function sbuf_new + * + * @brief + * Allocates and/or initializes an sbuf. + * + * @param s + * An optional existing sbuf to initialize. If NULL, a new one is allocated on + * the heap. + * + * @param buf + * An optional existing backing buffer to assign to the sbuf. If NULL, a new + * one is allocated on the heap. + * + * @param length_ + * The initial size of the sbuf. The actual size may be greater than this + * value. + * + * @param flags + * The flags to set on the sbuf. Accepted values are: + * + * - SBUF_FIXEDLEN: Do not allow the backing buffer to dynamically expand + * to accommodate appended data. + * - SBUF_AUTOEXPAND: Automatically reallocate the backing buffer using the + * heap if required. + * + * @returns + * The new and/or initialized sbuf on success, or NULL on failure. */ struct sbuf * -sbuf_new(struct sbuf *s, char *buf, int length, int flags) +sbuf_new(struct sbuf *s, char *buf, int length_, int flags) { - KASSERT(length >= 0, - ("attempt to create an sbuf of negative length (%d)", length)); - KASSERT((flags & ~SBUF_USRFLAGMSK) == 0, - ("%s called with invalid flags", __func__)); + size_t length = (size_t)length_; + + if (length > INT_MAX || flags & ~SBUF_USRFLAGMSK) { + return NULL; + } - flags &= SBUF_USRFLAGMSK; if (s == NULL) { - s = (struct sbuf *)SBMALLOC(sizeof *s); - if (s == NULL) { + s = (struct sbuf *)kheap_alloc(KHEAP_DEFAULT, sizeof(*s), Z_WAITOK); + if (NULL == s) { return NULL; } - bzero(s, sizeof *s); + + bzero(s, sizeof(*s)); s->s_flags = flags; SBUF_SETFLAG(s, SBUF_DYNSTRUCT); } else { - bzero(s, sizeof *s); + bzero(s, sizeof(*s)); s->s_flags = flags; } - s->s_size = length; + if (buf) { + s->s_size = (int)length; s->s_buf = buf; return s; } - if (flags & SBUF_AUTOEXTEND) { - s->s_size = sbuf_extendsize(s->s_size); + + if (SBUF_CANEXTEND(s) && (-1 == sbuf_extendsize(&length))) { + goto fail; } - s->s_buf = (char *)SBMALLOC(s->s_size); - if (s->s_buf == NULL) { - if (SBUF_ISDYNSTRUCT(s)) { - SBFREE(s); - } - return NULL; + + /* + * we always need at least 1 byte for \0, so s_size of 0 will cause an + * underflow in sbuf_capacity. + */ + if (length == 0) { + goto fail; } + + s->s_buf = (char *)kheap_alloc(KHEAP_DATA_BUFFERS, length, Z_WAITOK); + if (NULL == s->s_buf) { + goto fail; + } + bzero(s->s_buf, length); + s->s_size = (int)length; + SBUF_SETFLAG(s, SBUF_DYNAMIC); return s; + +fail: + sbuf_delete(s); + return NULL; } -#ifdef KERNEL -/* - * Create an sbuf with uio data +/*! + * @function sbuf_setpos + * + * @brief + * Set the current position of the sbuf. + * + * @param s + * The sbuf to modify. + * + * @param pos + * The new position to set. Must be less than or equal to the current position. + * + * @returns + * 0 on success, -1 on failure. */ -struct sbuf * -sbuf_uionew(struct sbuf *s, struct uio *uio, int *error) +int +sbuf_setpos(struct sbuf *s, int pos) { - KASSERT(uio != NULL, - ("%s called with NULL uio pointer", __func__)); - KASSERT(error != NULL, - ("%s called with NULL error pointer", __func__)); - - s = sbuf_new(s, NULL, uio_resid(uio) + 1, 0); - if (s == NULL) { - *error = ENOMEM; - return NULL; - } - *error = uiomove(s->s_buf, uio_resid(uio), uio); - if (*error != 0) { - sbuf_delete(s); - return NULL; + if (pos < 0 || pos > s->s_len) { + return -1; } - s->s_len = s->s_size - 1; - *error = 0; - return s; + + s->s_len = pos; + return 0; } -#endif -/* - * Clear an sbuf and reset its position. +/*! + * @function sbuf_clear + * + * @brief + * Resets the position/length of the sbuf data to zero and clears the finished + * and overflow flags. + * + * @param s + * The sbuf to clear. */ void sbuf_clear(struct sbuf *s) { - assert_sbuf_integrity(s); - /* don't care if it's finished or not */ - SBUF_CLEARFLAG(s, SBUF_FINISHED); SBUF_CLEARFLAG(s, SBUF_OVERFLOWED); - s->s_len = 0; + sbuf_setpos(s, 0); } -/* - * Set the sbuf's end position to an arbitrary value. - * Effectively truncates the sbuf at the new position. +/*! + * @function sbuf_extend + * + * @brief + * Attempt to extend the size of an sbuf's backing buffer by @a addlen bytes. + * + * @param s + * The sbuf to extend. + * + * @param addlen + * How many bytes to increase the size by. + * + * @returns + * 0 on success, -1 on failure. */ -int -sbuf_setpos(struct sbuf *s, int pos) +static int OS_WARN_RESULT +sbuf_extend(struct sbuf *s, size_t addlen) { - assert_sbuf_integrity(s); - assert_sbuf_state(s, 0); + char *new_buf; + size_t new_size; - KASSERT(pos >= 0, - ("attempt to seek to a negative position (%d)", pos)); - KASSERT(pos < s->s_size, - ("attempt to seek past end of sbuf (%d >= %d)", pos, s->s_size)); + if (addlen == 0) { + return 0; + } - if (pos < 0 || pos > s->s_len) { + if (!SBUF_CANEXTEND(s)) { return -1; } - s->s_len = pos; - return 0; -} - -/* - * Append a byte string to an sbuf. - */ -int -sbuf_bcat(struct sbuf *s, const void *buf, size_t len) -{ - const char *str = buf; - - assert_sbuf_integrity(s); - assert_sbuf_state(s, 0); - if (SBUF_HASOVERFLOWED(s)) { + if (os_add_overflow((size_t)s->s_size, addlen, &new_size)) { return -1; } - for (; len; len--) { - if (!SBUF_HASROOM(s) && sbuf_extend(s, len) < 0) { - break; - } - s->s_buf[s->s_len++] = *str++; - } - if (len) { - SBUF_SETFLAG(s, SBUF_OVERFLOWED); + if (-1 == sbuf_extendsize(&new_size)) { return -1; } - return 0; -} - -#ifdef KERNEL -/* - * Copy a byte string from userland into an sbuf. - */ -int -sbuf_bcopyin(struct sbuf *s, const void *uaddr, size_t len) -{ - assert_sbuf_integrity(s); - assert_sbuf_state(s, 0); - if (SBUF_HASOVERFLOWED(s)) { + new_buf = (char *)kheap_alloc(KHEAP_DATA_BUFFERS, new_size, Z_WAITOK); + if (NULL == new_buf) { return -1; } - if (len == 0) { - return 0; - } - if (len > (unsigned) SBUF_FREESPACE(s)) { - sbuf_extend(s, len - SBUF_FREESPACE(s)); - len = min(len, SBUF_FREESPACE(s)); - } - if (copyin(CAST_USER_ADDR_T(uaddr), s->s_buf + s->s_len, len) != 0) { - return -1; + bcopy(s->s_buf, new_buf, (size_t)s->s_size); + if (SBUF_ISDYNAMIC(s)) { + kheap_free(KHEAP_DATA_BUFFERS, s->s_buf, (size_t)s->s_size); + } else { + SBUF_SETFLAG(s, SBUF_DYNAMIC); } - s->s_len += len; + s->s_buf = new_buf; + s->s_size = (int)new_size; return 0; } -#endif -/* - * Copy a byte string into an sbuf. +/*! + * @function sbuf_capacity + * + * @brief + * Get the current capacity of an sbuf: how many more bytes we can append given + * the current size and position. + * + * @param s + * The sbuf to get the capacity of. + * + * @returns + * The current sbuf capacity. */ -int -sbuf_bcpy(struct sbuf *s, const void *buf, size_t len) +static size_t +sbuf_capacity(const struct sbuf *s) { - assert_sbuf_integrity(s); - assert_sbuf_state(s, 0); + /* 1 byte reserved for \0: */ + return (size_t)(s->s_size - s->s_len - 1); +} - sbuf_clear(s); - return sbuf_bcat(s, buf, len); +/*! + * @function sbuf_ensure_capacity + * + * @brief + * Ensure that an sbuf can accommodate @a add_len bytes, reallocating the + * backing buffer if necessary. + * + * @param s + * The sbuf. + * + * @param wanted + * The minimum capacity to ensure @a s has. + * + * @returns + * 0 if the minimum capacity is met by @a s, or -1 on error. + */ +static int +sbuf_ensure_capacity(struct sbuf *s, size_t wanted) +{ + size_t size; + + size = sbuf_capacity(s); + if (size >= wanted) { + return 0; + } + + return sbuf_extend(s, wanted - size); } -/* - * Append a string to an sbuf. +/*! + * @function sbuf_bcat + * + * @brief + * Append data to an sbuf. + * + * @param s + * The sbuf. + * + * @param data + * The data to append. + * + * @param len + * The length of the data. + * + * @returns + * 0 on success, -1 on failure. Will always fail if the sbuf is marked as + * overflowed. */ int -sbuf_cat(struct sbuf *s, const char *str) +sbuf_bcat(struct sbuf *s, const void *data, size_t len) { - assert_sbuf_integrity(s); - assert_sbuf_state(s, 0); - if (SBUF_HASOVERFLOWED(s)) { return -1; } - while (*str) { - if (!SBUF_HASROOM(s) && sbuf_extend(s, strlen(str)) < 0) { - break; - } - s->s_buf[s->s_len++] = *str++; - } - if (*str) { + if (-1 == sbuf_ensure_capacity(s, len)) { SBUF_SETFLAG(s, SBUF_OVERFLOWED); return -1; } + + bcopy(data, s->s_buf + s->s_len, len); + s->s_len += (int)len; /* safe */ + return 0; } -#ifdef KERNEL -/* - * Append a string from userland to an sbuf. +/*! + * @function sbuf_bcpy + * + * @brief + * Set the entire sbuf data, possibly reallocating the backing buffer to + * accommodate. + * + * @param s + * The sbuf. + * + * @param data + * The data to set. + * + * @param len + * The length of the data to set. + * + * @returns + * 0 on success or -1 on failure. Will clear the finished/overflowed flags. */ int -sbuf_copyin(struct sbuf *s, const void *uaddr, size_t len) +sbuf_bcpy(struct sbuf *s, const void *data, size_t len) { - size_t done; - - assert_sbuf_integrity(s); - assert_sbuf_state(s, 0); - - if (SBUF_HASOVERFLOWED(s)) { - return -1; - } - - if (len == 0) { - len = SBUF_FREESPACE(s); /* XXX return 0? */ - } - if (len > (unsigned) SBUF_FREESPACE(s)) { - sbuf_extend(s, len); - len = min(len, SBUF_FREESPACE(s)); - } - switch (copyinstr(CAST_USER_ADDR_T(uaddr), s->s_buf + s->s_len, len + 1, &done)) { - case ENAMETOOLONG: - SBUF_SETFLAG(s, SBUF_OVERFLOWED); - /* fall through */ - case 0: - s->s_len += done - 1; - break; - default: - return -1; /* XXX */ - } + sbuf_clear(s); + return sbuf_bcat(s, data, len); +} - return done; +/*! + * @function sbuf_cat + * + * @brief + * Append a string to an sbuf, possibly expanding the backing buffer to + * accommodate. + * + * @param s + * The sbuf. + * + * @param str + * The string to append. + * + * @returns + * 0 on success, -1 on failure. Always fails if the sbuf is marked as + * overflowed. + */ +int +sbuf_cat(struct sbuf *s, const char *str) +{ + return sbuf_bcat(s, str, strlen(str)); } -#endif -/* - * Copy a string into an sbuf. +/*! + * @function sbuf_cpy + * + * @brief + * Set the entire sbuf data to the given nul-terminated string, possibly + * expanding the backing buffer to accommodate it if necessary. + * + * @param s + * The sbuf. + * + * @param str + * The string to set the sbuf data to. + * + * @returns + * 0 on success, -1 on failure. Clears and resets the sbuf first. */ int sbuf_cpy(struct sbuf *s, const char *str) { - assert_sbuf_integrity(s); - assert_sbuf_state(s, 0); - sbuf_clear(s); return sbuf_cat(s, str); } -/* - * Format the given argument list and append the resulting string to an sbuf. +/*! + * @function sbuf_vprintf + * + * @brief + * Formatted-print into an sbuf using a va_list. + * + * @param s + * The sbuf. + * + * @param fmt + * The format string. + * + * @param ap + * The format string argument data. + * + * @returns + * 0 on success, -1 on failure. Always fails if the sbuf is marked as + * overflowed. */ int sbuf_vprintf(struct sbuf *s, const char *fmt, va_list ap) { - __builtin_va_list ap_copy; /* XXX tduffy - blame on him */ - int len; - - assert_sbuf_integrity(s); - assert_sbuf_state(s, 0); - - KASSERT(fmt != NULL, - ("%s called with a NULL format string", __func__)); + va_list ap_copy; + int result; + size_t capacity; + size_t len; if (SBUF_HASOVERFLOWED(s)) { return -1; } do { + capacity = sbuf_capacity(s); + va_copy(ap_copy, ap); - len = vsnprintf(&s->s_buf[s->s_len], SBUF_FREESPACE(s) + 1, - fmt, ap_copy); + /* +1 for \0. safe because we already accommodate this. */ + result = vsnprintf(&s->s_buf[s->s_len], capacity + 1, fmt, ap_copy); va_end(ap_copy); - } while (len > SBUF_FREESPACE(s) && - sbuf_extend(s, len - SBUF_FREESPACE(s)) == 0); - /* - * s->s_len is the length of the string, without the terminating nul. - * When updating s->s_len, we must subtract 1 from the length that - * we passed into vsnprintf() because that length includes the - * terminating nul. - * - * vsnprintf() returns the amount that would have been copied, - * given sufficient space, hence the min() calculation below. - */ - s->s_len += min(len, SBUF_FREESPACE(s)); - if (!SBUF_HASROOM(s) && !SBUF_CANEXTEND(s)) { - SBUF_SETFLAG(s, SBUF_OVERFLOWED); - } + if (result < 0) { + return -1; + } - KASSERT(s->s_len < s->s_size, - ("wrote past end of sbuf (%d >= %d)", s->s_len, s->s_size)); + len = (size_t)result; + if (len <= capacity) { + s->s_len += (int)len; + return 0; + } + } while (-1 != sbuf_ensure_capacity(s, len)); - if (SBUF_HASOVERFLOWED(s)) { - return -1; - } - return 0; + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + return -1; } -/* - * Format the given arguments and append the resulting string to an sbuf. +/*! + * @function sbuf_printf + * + * @brief + * Formatted-print into an sbuf using variadic arguments. + * + * @param s + * The sbuf. + * + * @param fmt + * The format string. + * + * @returns + * 0 on success, -1 on failure. Always fails if the sbuf is marked as + * overflowed. */ int sbuf_printf(struct sbuf *s, const char *fmt, ...) @@ -480,26 +542,40 @@ sbuf_printf(struct sbuf *s, const char *fmt, ...) return result; } -/* - * Append a character to an sbuf. +/*! + * @function sbuf_putc + * + * @brief + * Append a single character to an sbuf. Ignores '\0'. + * + * @param s + * The sbuf. + * + * @param c_ + * The character to append. + * + * @returns + * 0 on success, -1 on failure. This function will always fail if the sbuf is + * marked as overflowed. */ int -sbuf_putc(struct sbuf *s, int c) +sbuf_putc(struct sbuf *s, int c_) { - assert_sbuf_integrity(s); - assert_sbuf_state(s, 0); + char c = (char)c_; if (SBUF_HASOVERFLOWED(s)) { return -1; } - if (!SBUF_HASROOM(s) && sbuf_extend(s, 1) < 0) { + if (-1 == sbuf_ensure_capacity(s, 1)) { SBUF_SETFLAG(s, SBUF_OVERFLOWED); return -1; } + if (c != '\0') { s->s_buf[s->s_len++] = c; } + return 0; } @@ -509,102 +585,1608 @@ isspace(char ch) return ch == ' ' || ch == '\n' || ch == '\t'; } -/* - * Trim whitespace characters from end of an sbuf. +/*! + * @function sbuf_trim + * + * @brief + * Removes whitespace from the end of an sbuf. + * + * @param s + * The sbuf. + * + * @returns + * 0 on success or -1 if the sbuf is marked as overflowed. */ int sbuf_trim(struct sbuf *s) { - assert_sbuf_integrity(s); - assert_sbuf_state(s, 0); - if (SBUF_HASOVERFLOWED(s)) { return -1; } - while (s->s_len && isspace(s->s_buf[s->s_len - 1])) { + while (s->s_len > 0 && isspace(s->s_buf[s->s_len - 1])) { --s->s_len; } return 0; } -/* - * Check if an sbuf overflowed +/*! + * @function sbuf_overflowed + * + * @brief + * Indicates whether the sbuf is marked as overflowed. + * + * @param s + * The sbuf. + * + * @returns + * 1 if the sbuf has overflowed or 0 otherwise. */ int sbuf_overflowed(struct sbuf *s) { - return SBUF_HASOVERFLOWED(s); + return !!SBUF_HASOVERFLOWED(s); } -/* - * Finish off an sbuf. +/*! + * @function sbuf_finish + * + * @brief + * Puts a trailing nul byte onto the sbuf data. + * + * @param s + * The sbuf. */ void sbuf_finish(struct sbuf *s) { - assert_sbuf_integrity(s); - assert_sbuf_state(s, 0); - + /* safe because we always reserve a byte at the end for \0: */ s->s_buf[s->s_len] = '\0'; SBUF_CLEARFLAG(s, SBUF_OVERFLOWED); SBUF_SETFLAG(s, SBUF_FINISHED); } -/* - * Return a pointer to the sbuf data. +/*! + * @function sbuf_data + * + * @brief + * Gets a pointer to the sbuf backing data. + * + * @param s + * The sbuf. + * + * @returns + * A pointer to the sbuf data. */ char * sbuf_data(struct sbuf *s) { - assert_sbuf_integrity(s); - assert_sbuf_state(s, SBUF_FINISHED); - return s->s_buf; } -/* - * Return the length of the sbuf data. +/*! + * @function sbuf_len + * + * @brief + * Retrieves the current length of the sbuf data. + * + * @param s + * The sbuf + * + * @returns + * The length of the sbuf data or -1 if the sbuf is marked as overflowed. */ int sbuf_len(struct sbuf *s) { - assert_sbuf_integrity(s); - /* don't care if it's finished or not */ - if (SBUF_HASOVERFLOWED(s)) { return -1; } + return s->s_len; } -/* - * Clear an sbuf, free its buffer if necessary. +/*! + * @function sbuf_done + * + * @brief + * Tests if the sbuf is marked as finished. + * + * @param s + * The sbuf. + * + * @returns + * 1 if the sbuf is marked as finished or 0 if not. */ -void -sbuf_delete(struct sbuf *s) +int +sbuf_done(struct sbuf *s) { - int isdyn; + return !!SBUF_ISFINISHED(s); +} - assert_sbuf_integrity(s); - /* don't care if it's finished or not */ +/*! + * @function sbuf_uionew + * + * @brief + * Create a new sbuf and initialize its buffer with data from the given uio. + * + * @param s + * An optional existing sbuf to initialize, or NULL to allocate a new one. + * + * @param uio + * The uio describing the data to populate the sbuf with. + * + * @param error + * An output parameter to report any error to. + * + * @returns + * The new and/or initialized sbuf, or NULL on error. The error code is + * reported back via @a error. + */ +struct sbuf * +sbuf_uionew(struct sbuf *s, struct uio *uio, int *error) +{ + int size; - if (SBUF_ISDYNAMIC(s)) { - SBFREE(s->s_buf); + if ((user_size_t)uio_resid(uio) > INT_MAX - 1) { + *error = EINVAL; + return NULL; } - isdyn = SBUF_ISDYNSTRUCT(s); - bzero(s, sizeof *s); - if (isdyn) { - SBFREE(s); + + size = (int)uio_resid(uio); + s = sbuf_new(s, NULL, size + 1, 0); + if (s == NULL) { + *error = ENOMEM; + return NULL; } + + *error = uiomove(s->s_buf, size, uio); + if (*error != 0) { + sbuf_delete(s); + return NULL; + } + + s->s_len = size; + *error = 0; + + return s; } -/* - * Check if an sbuf has been finished. +/*! + * @function sbuf_bcopyin + * + * @brief + * Append userland data to an sbuf. + * + * @param s + * The sbuf. + * + * @param uaddr + * The userland address of data to append to the sbuf. + * + * @param len + * The length of the data to copy from userland. + * + * @returns + * 0 on success or -1 on error. Always returns -1 if the sbuf is marked as + * overflowed. */ int -sbuf_done(struct sbuf *s) +sbuf_bcopyin(struct sbuf *s, const void *uaddr, size_t len) +{ + if (SBUF_HASOVERFLOWED(s)) { + return -1; + } + + if (len == 0) { + return 0; + } + + if (-1 == sbuf_ensure_capacity(s, len)) { + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + return -1; + } + + if (copyin(CAST_USER_ADDR_T(uaddr), &s->s_buf[s->s_len], len) != 0) { + return -1; + } + + s->s_len += (int)len; + return 0; +} + +/*! + * @function sbuf_copyin + * + * @brief + * Append a userland string to an sbuf. + * + * @param s + * The sbuf. + * + * @param uaddr + * The userland address of the string to append to the sbuf. + * + * @param len + * The maximum length of the string to copy. If zero, the current capacity of + * the sbuf is used. + * + * @returns + * The number of bytes copied or -1 if an error occurred. Always returns -1 if + * the sbuf is marked as overflowed. + */ +int +sbuf_copyin(struct sbuf *s, const void *uaddr, size_t len) { - return SBUF_ISFINISHED(s); + size_t done; + + if (SBUF_HASOVERFLOWED(s)) { + return -1; + } + + if (len == 0) { + len = sbuf_capacity(s); + } else if (-1 == sbuf_ensure_capacity(s, len)) { + return -1; + } + + switch (copyinstr(CAST_USER_ADDR_T(uaddr), &s->s_buf[s->s_len], len + 1, &done)) { + case ENAMETOOLONG: + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + s->s_len += done; + return -1; + case 0: + s->s_len += done - 1; + break; + default: + return -1; + } + + return (int)done; +} + +#if DEBUG || DEVELOPMENT + +/* + * a = assertion string + */ +#define SBUF_FAIL(a) \ + MACRO_BEGIN \ + printf("sbuf_tests: failed assertion: %s\n", a); \ + if (what != NULL && should != NULL) { \ + printf("sbuf_tests: while testing: %s should %s\n", what, should); \ + } \ + goto fail; \ + MACRO_END + +#define SBUF_PASS \ + ++passed + +/* + * x = expression + */ +#define SBUF_ASSERT(x) \ + MACRO_BEGIN \ + if (x) { \ + SBUF_PASS; \ + } else { \ + SBUF_FAIL(#x); \ + } \ + MACRO_END + +#define SBUF_ASSERT_NOT(x) \ + SBUF_ASSERT(!(x)) + +/* + * e = expected + * a = actual + * c = comparator + */ +#define SBUF_ASSERT_CMP(e, a, c) \ + MACRO_BEGIN \ + if ((a) c (e)) { \ + SBUF_PASS; \ + } else { \ + SBUF_FAIL(#a " " #c " " #e); \ + } \ + MACRO_END + +#define SBUF_ASSERT_EQ(e, a) SBUF_ASSERT_CMP(e, a, ==) +#define SBUF_ASSERT_NE(e, a) SBUF_ASSERT_CMP(e, a, !=) +#define SBUF_ASSERT_GT(e, a) SBUF_ASSERT_CMP(e, a, >) +#define SBUF_ASSERT_GTE(e, a) SBUF_ASSERT_CMP(e, a, >=) +#define SBUF_ASSERT_LT(e, a) SBUF_ASSERT_CMP(e, a, <) +#define SBUF_ASSERT_LTE(e, a) SBUF_ASSERT_CMP(e, a, <=) + +#define SBUF_TEST_BEGIN \ + size_t passed = 0; \ + const char *what = NULL; \ + const char *should = NULL; + +/* + * include the trailing semi-colons here intentionally to allow for block-like + * appearance: + */ +#define SBUF_TESTING(f) \ + MACRO_BEGIN \ + what = (f); \ + MACRO_END; + +#define SBUF_SHOULD(s) \ + MACRO_BEGIN \ + should = (s); \ + MACRO_END; + +#define SBUF_TEST_END \ + printf("sbuf_tests: %zu assertions passed\n", passed); \ + return 0; \ +fail: \ + return ENOTRECOVERABLE; + +static int +sysctl_sbuf_tests SYSCTL_HANDLER_ARGS +{ +#pragma unused(arg1, arg2) + int rval = 0; + char str[32] = { 'o', 'k', 0 }; + + rval = sysctl_handle_string(oidp, str, sizeof(str), req); + if (rval != 0 || req->newptr == 0 || req->newlen < 1) { + return rval; + } + + SBUF_TEST_BEGIN; + + SBUF_TESTING("sbuf_new") + { + SBUF_SHOULD("fail to allocate >INT_MAX") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, INT_MAX + 1, 0); + SBUF_ASSERT_EQ(NULL, s); + } + + SBUF_SHOULD("fail when claiming a backing buffer >INT_MAX") + { + struct sbuf *s = NULL; + char buf[4] = { 0 }; + + s = sbuf_new(NULL, buf, INT_MAX + 1, 0); + SBUF_ASSERT_EQ(NULL, s); + } + + SBUF_SHOULD("fail to allocate a zero-length sbuf") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 0, 0); + SBUF_ASSERT_EQ(NULL, s); + } + + SBUF_SHOULD("not accept invalid flags") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0x10000); + SBUF_ASSERT_EQ(NULL, s); + } + + SBUF_SHOULD("succeed when passed an existing sbuf") + { + struct sbuf *s = NULL; + struct sbuf existing; + + memset(&existing, 0x41, sizeof(existing)); + s = sbuf_new(&existing, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(&existing, s); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_AUTOEXTEND)); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_DYNAMIC)); + SBUF_ASSERT_NE(NULL, s->s_buf); + SBUF_ASSERT_NE(0, s->s_size); + SBUF_ASSERT_EQ(0, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed when passed an existing sbuf and buffer") + { + struct sbuf *s = NULL; + struct sbuf existing; + char buf[4] = { 0 }; + + memset(&existing, 0x41, sizeof(existing)); + s = sbuf_new(&existing, buf, sizeof(buf), 0); + SBUF_ASSERT_EQ(&existing, s); + SBUF_ASSERT_EQ(buf, s->s_buf); + SBUF_ASSERT_EQ(4, s->s_size); + SBUF_ASSERT_EQ(0, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed without an existing sbuf or buffer") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_NE(NULL, s); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_DYNAMIC)); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_DYNSTRUCT)); + SBUF_ASSERT_NE(NULL, s->s_buf); + SBUF_ASSERT_NE(0, s->s_size); + SBUF_ASSERT_EQ(0, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed without an existing sbuf, but with a buffer") + { + struct sbuf *s = NULL; + char buf[4] = { 0 }; + + s = sbuf_new(NULL, buf, sizeof(buf), 0); + SBUF_ASSERT_NE(NULL, s); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_DYNSTRUCT)); + SBUF_ASSERT_EQ(buf, s->s_buf); + SBUF_ASSERT_EQ(4, s->s_size); + SBUF_ASSERT_EQ(0, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("round up the requested size if SBUF_AUTOEXTEND") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 1, SBUF_AUTOEXTEND); + SBUF_ASSERT_GT(1, s->s_size); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_clear") + { + SBUF_SHOULD("clear the overflowed and finished flags") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + SBUF_SETFLAG(s, SBUF_FINISHED); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_FINISHED)); + sbuf_clear(s); + SBUF_ASSERT_NOT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + SBUF_ASSERT_NOT(SBUF_ISSET(s, SBUF_FINISHED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("reset the position to zero") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + + s->s_len = 1; + sbuf_clear(s); + SBUF_ASSERT_EQ(0, s->s_len); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_extend") + { + SBUF_SHOULD("allow zero") + { + struct sbuf *s = NULL; + int size_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + size_before = s->s_size; + SBUF_ASSERT_EQ(0, sbuf_extend(s, 0)); + SBUF_ASSERT_EQ(size_before, s->s_size); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail for sbuf not marked as SBUF_AUTOEXTEND") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(-1, sbuf_extend(s, 10)); + + sbuf_delete(s); + } + + SBUF_SHOULD("accommodate reasonable requests") + { + struct sbuf *s = NULL; + int size_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + size_before = s->s_size; + + SBUF_ASSERT_EQ(0, sbuf_extend(s, 10)); + SBUF_ASSERT_GTE(10, s->s_size - size_before); + + sbuf_delete(s); + } + + SBUF_SHOULD("reject requests that cause overflows") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(-1, sbuf_extend(s, SIZE_MAX)); + SBUF_ASSERT_EQ(-1, sbuf_extend(s, INT_MAX)); + + sbuf_delete(s); + } + + SBUF_SHOULD("transform the sbuf into an SBUF_DYNAMIC one") + { + struct sbuf *s = NULL; + char buf[4] = { 0 }; + + s = sbuf_new(NULL, buf, sizeof(buf), SBUF_AUTOEXTEND); + SBUF_ASSERT_NOT(SBUF_ISSET(s, SBUF_DYNAMIC)); + SBUF_ASSERT_EQ(0, sbuf_extend(s, 10)); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_DYNAMIC)); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_capacity") + { + SBUF_SHOULD("account for the trailing nul byte") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(s->s_size - s->s_len - 1, sbuf_capacity(s)); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_ensure_capacity") + { + SBUF_SHOULD("return 0 if the sbuf already has enough capacity") + { + struct sbuf *s = NULL; + int size_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + size_before = s->s_size; + SBUF_ASSERT_EQ(0, sbuf_ensure_capacity(s, 5)); + SBUF_ASSERT_EQ(size_before, s->s_size); + + sbuf_delete(s); + } + + SBUF_SHOULD("extend the buffer as needed") + { + struct sbuf *s = NULL; + int size_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + size_before = s->s_size; + SBUF_ASSERT_EQ(0, sbuf_ensure_capacity(s, 30)); + SBUF_ASSERT_GT(size_before, s->s_size); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_bcat") + { + SBUF_SHOULD("fail if the sbuf is marked as overflowed") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT_EQ(-1, sbuf_bcat(s, "A", 1)); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail if len is too big") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(-1, sbuf_bcat(s, "A", INT_MAX)); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed for a fixed buf within limits") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_bcat(s, "ABC", 3)); + SBUF_ASSERT_EQ(3, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + SBUF_ASSERT_EQ('B', s->s_buf[1]); + SBUF_ASSERT_EQ('C', s->s_buf[2]); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed for binary data, even with nul bytes") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_bcat(s, "A\0C", 3)); + SBUF_ASSERT_EQ(3, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + SBUF_ASSERT_EQ('\0', s->s_buf[1]); + SBUF_ASSERT_EQ('C', s->s_buf[2]); + + sbuf_delete(s); + } + + SBUF_SHOULD("append to existing data") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_bcat(s, "ABC", 3)); + SBUF_ASSERT_EQ(3, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + SBUF_ASSERT_EQ('B', s->s_buf[1]); + SBUF_ASSERT_EQ('C', s->s_buf[2]); + + SBUF_ASSERT_EQ(0, sbuf_bcat(s, "DEF", 3)); + SBUF_ASSERT_EQ(6, s->s_len); + SBUF_ASSERT_EQ('D', s->s_buf[3]); + SBUF_ASSERT_EQ('E', s->s_buf[4]); + SBUF_ASSERT_EQ('F', s->s_buf[5]); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed for a fixed buf right up to the limit") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_bcat(s, "0123456789abcde", 15)); + SBUF_ASSERT_EQ(15, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail for a fixed buf if too big") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(-1, sbuf_bcat(s, "0123456789abcdef", 16)); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("expand the backing buffer as needed") + { + struct sbuf *s = NULL; + int size_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + size_before = s->s_size; + SBUF_ASSERT_EQ(0, sbuf_bcat(s, "0123456789abcdef", 16)); + SBUF_ASSERT_GT(size_before, s->s_size); + SBUF_ASSERT_EQ(16, s->s_len); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_bcpy") + { + SBUF_SHOULD("overwrite any existing data") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_bcpy(s, "ABC", 3)); + SBUF_ASSERT_EQ(3, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + SBUF_ASSERT_EQ('B', s->s_buf[1]); + SBUF_ASSERT_EQ('C', s->s_buf[2]); + + SBUF_ASSERT_EQ(0, sbuf_bcpy(s, "XYZ123", 6)); + SBUF_ASSERT_EQ(6, s->s_len); + SBUF_ASSERT_EQ('X', s->s_buf[0]); + SBUF_ASSERT_EQ('Y', s->s_buf[1]); + SBUF_ASSERT_EQ('Z', s->s_buf[2]); + SBUF_ASSERT_EQ('1', s->s_buf[3]); + SBUF_ASSERT_EQ('2', s->s_buf[4]); + SBUF_ASSERT_EQ('3', s->s_buf[5]); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed if the sbuf is marked as overflowed, but there is space") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT_EQ(0, sbuf_bcpy(s, "A", 1)); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail if len is too big") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(-1, sbuf_bcpy(s, "A", INT_MAX)); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed for a fixed buf within limits") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_bcpy(s, "ABC", 3)); + SBUF_ASSERT_EQ(3, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + SBUF_ASSERT_EQ('B', s->s_buf[1]); + SBUF_ASSERT_EQ('C', s->s_buf[2]); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed for a fixed buf right up to the limit") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_bcpy(s, "0123456789abcde", 15)); + SBUF_ASSERT_EQ(15, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail for a fixed buf if too big") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(-1, sbuf_bcpy(s, "0123456789abcdef", 16)); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("expand the backing buffer as needed") + { + struct sbuf *s = NULL; + int size_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + size_before = s->s_size; + SBUF_ASSERT_EQ(0, sbuf_bcpy(s, "0123456789abcdef", 16)); + SBUF_ASSERT_GT(size_before, s->s_size); + SBUF_ASSERT_EQ(16, s->s_len); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_cat") + { + SBUF_SHOULD("fail if the sbuf is marked as overflowed") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT_EQ(-1, sbuf_cat(s, "A")); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed for a fixed buf within limits") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_cat(s, "ABC")); + SBUF_ASSERT_EQ(3, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + SBUF_ASSERT_EQ('B', s->s_buf[1]); + SBUF_ASSERT_EQ('C', s->s_buf[2]); + + sbuf_delete(s); + } + + SBUF_SHOULD("only copy up to a nul byte") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_cat(s, "A\0C")); + SBUF_ASSERT_EQ(1, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + + sbuf_delete(s); + } + + SBUF_SHOULD("append to existing data") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_cat(s, "ABC")); + SBUF_ASSERT_EQ(3, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + SBUF_ASSERT_EQ('B', s->s_buf[1]); + SBUF_ASSERT_EQ('C', s->s_buf[2]); + + SBUF_ASSERT_EQ(0, sbuf_cat(s, "DEF")); + SBUF_ASSERT_EQ(6, s->s_len); + SBUF_ASSERT_EQ('D', s->s_buf[3]); + SBUF_ASSERT_EQ('E', s->s_buf[4]); + SBUF_ASSERT_EQ('F', s->s_buf[5]); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed for a fixed buf right up to the limit") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_cat(s, "0123456789abcde")); + SBUF_ASSERT_EQ(15, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail for a fixed buf if too big") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(-1, sbuf_cat(s, "0123456789abcdef")); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("expand the backing buffer as needed") + { + struct sbuf *s = NULL; + int size_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + size_before = s->s_size; + SBUF_ASSERT_EQ(0, sbuf_cat(s, "0123456789abcdef")); + SBUF_ASSERT_GT(size_before, s->s_size); + SBUF_ASSERT_EQ(16, s->s_len); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_cpy") + { + SBUF_SHOULD("overwrite any existing data") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "ABC")); + SBUF_ASSERT_EQ(3, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + SBUF_ASSERT_EQ('B', s->s_buf[1]); + SBUF_ASSERT_EQ('C', s->s_buf[2]); + + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "XYZ123")); + SBUF_ASSERT_EQ(6, s->s_len); + SBUF_ASSERT_EQ('X', s->s_buf[0]); + SBUF_ASSERT_EQ('Y', s->s_buf[1]); + SBUF_ASSERT_EQ('Z', s->s_buf[2]); + SBUF_ASSERT_EQ('1', s->s_buf[3]); + SBUF_ASSERT_EQ('2', s->s_buf[4]); + SBUF_ASSERT_EQ('3', s->s_buf[5]); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed if the sbuf is marked as overflowed, but there is space") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT_EQ(0, sbuf_bcpy(s, "A", 1)); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed for a fixed buf within limits") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "ABC")); + SBUF_ASSERT_EQ(3, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + SBUF_ASSERT_EQ('B', s->s_buf[1]); + SBUF_ASSERT_EQ('C', s->s_buf[2]); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed for a fixed buf right up to the limit") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "0123456789abcde")); + SBUF_ASSERT_EQ(15, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail for a fixed buf if too big") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(-1, sbuf_cpy(s, "0123456789abcdef")); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("expand the backing buffer as needed") + { + struct sbuf *s = NULL; + int size_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + size_before = s->s_size; + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "0123456789abcdef")); + SBUF_ASSERT_GT(size_before, s->s_size); + SBUF_ASSERT_EQ(16, s->s_len); + + sbuf_delete(s); + } + } + + /* also tests sbuf_vprintf: */ + SBUF_TESTING("sbuf_printf") + { + SBUF_SHOULD("support simple printing") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(0, sbuf_printf(s, "hello")); + SBUF_ASSERT_EQ(5, s->s_len); + SBUF_ASSERT_EQ('h', s->s_buf[0]); + SBUF_ASSERT_EQ('e', s->s_buf[1]); + SBUF_ASSERT_EQ('l', s->s_buf[2]); + SBUF_ASSERT_EQ('l', s->s_buf[3]); + SBUF_ASSERT_EQ('o', s->s_buf[4]); + + sbuf_delete(s); + } + + SBUF_SHOULD("support format strings") + { + struct sbuf *s = NULL; + char data1 = 'A'; + int data2 = 123; + const char *data3 = "foo"; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(0, sbuf_printf(s, "%c %d %s", data1, data2, data3)); + SBUF_ASSERT_EQ(9, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + SBUF_ASSERT_EQ(' ', s->s_buf[1]); + SBUF_ASSERT_EQ('1', s->s_buf[2]); + SBUF_ASSERT_EQ('2', s->s_buf[3]); + SBUF_ASSERT_EQ('3', s->s_buf[4]); + SBUF_ASSERT_EQ(' ', s->s_buf[5]); + SBUF_ASSERT_EQ('f', s->s_buf[6]); + SBUF_ASSERT_EQ('o', s->s_buf[7]); + SBUF_ASSERT_EQ('o', s->s_buf[8]); + + sbuf_delete(s); + } + + SBUF_SHOULD("work with the fact we reserve a nul byte at the end") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_printf(s, "0123456789abcde")); + SBUF_ASSERT_NOT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("mark the sbuf as overflowed if we try to write too much") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(-1, sbuf_printf(s, "0123456789abcdef")); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("auto-extend as necessary") + { + struct sbuf *s = NULL; + const char *data = "0123456789abcdef"; + int size_before; + size_t n; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + size_before = s->s_size; + SBUF_ASSERT_EQ(0, sbuf_printf(s, "%s", data)); + SBUF_ASSERT_GT(size_before, s->s_size); + + for (n = 0; n < strlen(data); ++n) { + SBUF_ASSERT_EQ(data[n], s->s_buf[n]); + } + + sbuf_delete(s); + } + + SBUF_SHOULD("fail if the sbuf is marked as overflowed") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT_EQ(-1, sbuf_printf(s, "A")); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_putc") + { + SBUF_SHOULD("work where we have capacity") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_putc(s, 'a')); + SBUF_ASSERT_EQ(1, s->s_len); + SBUF_ASSERT_EQ('a', s->s_buf[0]); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail if we have a full, fixedlen sbuf") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "0123456789abcd")); + SBUF_ASSERT_EQ(0, sbuf_putc(s, 'e')); + SBUF_ASSERT_EQ(-1, sbuf_putc(s, 'f')); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("ignore nul") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(0, sbuf_putc(s, '\0')); + SBUF_ASSERT_EQ(0, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("auto-extend if necessary") + { + struct sbuf *s = NULL; + int len_before; + int size_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "0123456789abcde")); + len_before = s->s_len; + size_before = s->s_size; + SBUF_ASSERT_EQ(0, sbuf_putc(s, 'f')); + SBUF_ASSERT_EQ(len_before + 1, s->s_len); + SBUF_ASSERT_GT(size_before, s->s_size); + SBUF_ASSERT_EQ('f', s->s_buf[s->s_len - 1]); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail if the sbuf is overflowed") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT_EQ(-1, sbuf_putc(s, 'a')); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_trim") + { + SBUF_SHOULD("remove trailing spaces, tabs and newlines") + { + struct sbuf *s = NULL; + const char *test = "foo \t\t\n\t"; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, test)); + SBUF_ASSERT_EQ(strlen(test), s->s_len); + SBUF_ASSERT_EQ(0, sbuf_trim(s)); + SBUF_ASSERT_EQ(3, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("do nothing if there is no trailing whitespace") + { + struct sbuf *s = NULL; + const char *test = "foo"; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, test)); + SBUF_ASSERT_EQ(strlen(test), s->s_len); + SBUF_ASSERT_EQ(0, sbuf_trim(s)); + SBUF_ASSERT_EQ(strlen(test), s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail if the sbuf is overflowed") + { + struct sbuf *s = NULL; + const char *test = "foo "; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, test)); + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT_EQ(-1, sbuf_trim(s)); + SBUF_ASSERT_EQ(strlen(test), s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("work on empty strings") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_trim(s)); + SBUF_ASSERT_EQ(0, s->s_len); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_overflowed") + { + SBUF_SHOULD("return false if it hasn't overflowed") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_NOT(sbuf_overflowed(s)); + + sbuf_delete(s); + } + + SBUF_SHOULD("return true if it has overflowed") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT(sbuf_overflowed(s)); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_finish") + { + SBUF_SHOULD("insert a nul byte, clear the overflowed flag and set the finished flag") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_putc(s, 'A')); + s->s_buf[s->s_len] = 'x'; + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT_NOT(SBUF_ISSET(s, SBUF_FINISHED)); + + sbuf_finish(s); + + SBUF_ASSERT_EQ(0, s->s_buf[s->s_len]); + SBUF_ASSERT_NOT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_FINISHED)); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_data") + { + SBUF_SHOULD("return the s_buf pointer") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(s->s_buf, sbuf_data(s)); + + sbuf_delete(s); + } + + SBUF_SHOULD("return the buffer we gave it") + { + struct sbuf *s = NULL; + char buf[4] = { 0 }; + + s = sbuf_new(NULL, buf, sizeof(buf), 0); + SBUF_ASSERT_EQ(buf, sbuf_data(s)); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_len") + { + SBUF_SHOULD("return the length of the sbuf data") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "hello")); + SBUF_ASSERT_EQ(5, sbuf_len(s)); + + sbuf_delete(s); + } + + SBUF_SHOULD("return -1 if the sbuf is overflowed") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "hello")); + SBUF_ASSERT_EQ(5, sbuf_len(s)); + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT_EQ(-1, sbuf_len(s)); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_done") + { + SBUF_SHOULD("return false if the sbuf isn't finished") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_NOT(sbuf_done(s)); + + sbuf_delete(s); + } + + SBUF_SHOULD("return true if the sbuf has finished") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_NOT(sbuf_done(s)); + SBUF_SETFLAG(s, SBUF_FINISHED); + SBUF_ASSERT(sbuf_done(s)); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_delete") + { + SBUF_SHOULD("just free the backing buffer if we supplied an sbuf") + { + struct sbuf *s = NULL; + struct sbuf existing = {}; + + s = sbuf_new(&existing, NULL, 16, 0); + SBUF_ASSERT_NE(NULL, s->s_buf); + + sbuf_delete(s); + SBUF_ASSERT_EQ(NULL, s->s_buf); + } + } + + SBUF_TESTING("sbuf_uionew") + { + SBUF_SHOULD("reject residuals that are too large") + { + struct sbuf *s = NULL; + uio_t auio = NULL; + char buf[4]; + int error = 0; + + buf[0] = 'A'; + buf[1] = 'B'; + buf[2] = 'C'; + buf[3] = 'D'; + + auio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ); + uio_addiov(auio, (user_addr_t)buf, INT_MAX); + + s = sbuf_uionew(NULL, auio, &error); + SBUF_ASSERT_EQ(NULL, s); + SBUF_ASSERT_EQ(EINVAL, error); + + uio_free(auio); + } + + SBUF_SHOULD("initialize using data described by the uio") + { + struct sbuf *s = NULL; + uio_t auio = NULL; + char buf[4]; + int error = 0; + + buf[0] = 'A'; + buf[1] = 'B'; + buf[2] = 'C'; + buf[3] = 'D'; + + auio = uio_create(1, 0, UIO_SYSSPACE, UIO_WRITE); + uio_addiov(auio, (user_addr_t)buf, sizeof(buf)); + + s = sbuf_uionew(NULL, auio, &error); + SBUF_ASSERT_NE(NULL, s); + SBUF_ASSERT_EQ(0, error); + SBUF_ASSERT_EQ(4, s->s_len); + SBUF_ASSERT_EQ('A', s->s_buf[0]); + SBUF_ASSERT_EQ('B', s->s_buf[1]); + SBUF_ASSERT_EQ('C', s->s_buf[2]); + SBUF_ASSERT_EQ('D', s->s_buf[3]); + + sbuf_delete(s); + uio_free(auio); + } + + SBUF_SHOULD("fail gracefully for bad addresses") + { + struct sbuf *s = NULL; + uio_t auio = NULL; + int error = 0; + + auio = uio_create(1, 0, UIO_USERSPACE, UIO_WRITE); + uio_addiov(auio, (user_addr_t)0xdeadUL, 123); + + s = sbuf_uionew(NULL, auio, &error); + SBUF_ASSERT_EQ(NULL, s); + SBUF_ASSERT_NE(0, error); + + uio_free(auio); + } + } + + SBUF_TESTING("sbuf_bcopyin") + { + SBUF_SHOULD("succeed when len is zero") + { + struct sbuf *s = NULL; + const void *uptr = (const void *)req->newptr; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_bcopyin(s, uptr, 0)); + SBUF_ASSERT_EQ(0, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("succeed in the simple case") + { + struct sbuf *s = NULL; + const void *uptr = (const void *)req->newptr; + size_t ulen = req->newlen; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(0, sbuf_bcopyin(s, uptr, ulen)); + SBUF_ASSERT_EQ(ulen, (size_t)s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail for invalid userland addresses") + { + struct sbuf *s = NULL; + const void *uptr = (const void *)0xdeadUL; + size_t ulen = req->newlen; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(-1, sbuf_bcopyin(s, uptr, ulen)); + SBUF_ASSERT_EQ(0, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail for kernel addresses") + { + struct sbuf *s = NULL; + const void *uptr = "abcd"; + size_t ulen = 4; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_ASSERT_EQ(-1, sbuf_bcopyin(s, uptr, ulen)); + SBUF_ASSERT_EQ(0, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail if we don't have capacity for a fixed-len sbuf") + { + struct sbuf *s = NULL; + const void *uptr = (const void *)req->newptr; + size_t ulen = req->newlen; + int len_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "0123456789abcde")); + len_before = s->s_len; + SBUF_ASSERT_EQ(-1, sbuf_bcopyin(s, uptr, ulen)); + SBUF_ASSERT_EQ(len_before, s->s_len); + SBUF_ASSERT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("auto-extend if we don't have capacity for an auto-extend sbuf") + { + struct sbuf *s = NULL; + const void *uptr = (const void *)req->newptr; + size_t ulen = req->newlen; + int len_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "0123456789abcde")); + len_before = s->s_len; + SBUF_ASSERT_EQ(0, sbuf_bcopyin(s, uptr, ulen)); + SBUF_ASSERT_EQ(len_before + (int)ulen, s->s_len); + SBUF_ASSERT_NOT(SBUF_ISSET(s, SBUF_OVERFLOWED)); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail if overflowed") + { + struct sbuf *s = NULL; + const void *uptr = (const void *)req->newptr; + size_t ulen = req->newlen; + + s = sbuf_new(NULL, NULL, 16, 0); + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT_EQ(-1, sbuf_bcopyin(s, uptr, ulen)); + + sbuf_delete(s); + } + } + + SBUF_TESTING("sbuf_copyin") + { + SBUF_SHOULD("succeed in the simple case") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(req->newlen + 1, sbuf_copyin(s, (const void *)req->newptr, req->newlen)); + SBUF_ASSERT_EQ(req->newlen, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("use the sbuf capacity if len is zero") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(req->newlen + 1, sbuf_copyin(s, (const void *)req->newptr, 0)); + SBUF_ASSERT_EQ(req->newlen, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail if we can't extend the sbuf to accommodate") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_FIXEDLEN); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "0123456789abcde")); + SBUF_ASSERT_EQ(-1, sbuf_copyin(s, (const void *)req->newptr, req->newlen)); + + sbuf_delete(s); + } + + SBUF_SHOULD("auto-extend the buffer if necessary") + { + struct sbuf *s = NULL; + int len_before; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(0, sbuf_cpy(s, "0123456789abcde")); + len_before = s->s_len; + SBUF_ASSERT_NE(-1, sbuf_copyin(s, (const void *)req->newptr, req->newlen)); + SBUF_ASSERT_GT(len_before, s->s_len); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail if the sbuf is overflowed") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_SETFLAG(s, SBUF_OVERFLOWED); + SBUF_ASSERT_EQ(-1, sbuf_copyin(s, (const void *)req->newptr, req->newlen)); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail gracefully for an invalid address") + { + struct sbuf *s = NULL; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(-1, sbuf_copyin(s, (void *)0xdeadUL, req->newlen)); + + sbuf_delete(s); + } + + SBUF_SHOULD("fail gracefully for a kernel address") + { + struct sbuf *s = NULL; + const char *ptr = "abcd"; + + s = sbuf_new(NULL, NULL, 16, SBUF_AUTOEXTEND); + SBUF_ASSERT_EQ(-1, sbuf_copyin(s, ptr, strlen(ptr))); + + sbuf_delete(s); + } + } + + SBUF_TEST_END; } + +SYSCTL_PROC(_kern, OID_AUTO, sbuf_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_sbuf_tests, "A", "sbuf tests"); + +#endif /* DEBUG || DEVELOPMENT */ diff --git a/bsd/kern/subr_xxx.c b/bsd/kern/subr_xxx.c index c3f69f22f..d0297adc0 100644 --- a/bsd/kern/subr_xxx.c +++ b/bsd/kern/subr_xxx.c @@ -187,22 +187,3 @@ nosys(__unused struct proc *p, __unused struct nosys_args *args, __unused int32_ } return ENOSYS; } - -#if !CRYPTO -#include - -/* Stubs must be present in all configs for Unsupported KPI exports */ - -void -rc4_init(struct rc4_state *state __unused, const u_char *key __unused, int keylen __unused) -{ - panic("rc4_init: unsupported kernel configuration"); -} - -void -rc4_crypt(struct rc4_state *state __unused, - const u_char *inbuf __unused, u_char *outbuf __unused, int buflen __unused) -{ - panic("rc4_crypt: unsupported kernel configuration"); -} -#endif /* !CRYPTO */ diff --git a/bsd/kern/sys_coalition.c b/bsd/kern/sys_coalition.c index 5b1d7d7ac..28e3d3f40 100644 --- a/bsd/kern/sys_coalition.c +++ b/bsd/kern/sys_coalition.c @@ -43,15 +43,13 @@ coalition_create_syscall(user_addr_t cidp, uint32_t flags) return EINVAL; } - kr = coalition_create_internal(type, role, privileged, &coal); + kr = coalition_create_internal(type, role, privileged, &coal, &cid); if (kr != KERN_SUCCESS) { /* for now, the only kr is KERN_RESOURCE_SHORTAGE */ error = ENOMEM; goto out; } - cid = coalition_id(coal); - coal_dbg("(addr, %u) -> %llu", flags, cid); error = copyout(&cid, cidp, sizeof(cid)); out: @@ -238,7 +236,30 @@ coalition_info_resource_usage(coalition_t coal, user_addr_t buffer, user_size_t return copyout(&cru, buffer, MIN(bufsize, sizeof(cru))); } +#if CONFIG_THREAD_GROUPS +static int +coalition_info_set_name_internal(coalition_t coal, user_addr_t buffer, user_size_t bufsize) +{ + int error; + char name[THREAD_GROUP_MAXNAME]; + + if (coalition_type(coal) != COALITION_TYPE_JETSAM) { + return EINVAL; + } + bzero(name, sizeof(name)); + error = copyin(buffer, name, MIN(bufsize, sizeof(name) - 1)); + if (error) { + return error; + } + struct thread_group *tg = coalition_get_thread_group(coal); + thread_group_set_name(tg, name); + thread_group_release(tg); + return error; +} + +#else /* CONFIG_THREAD_GROUPS */ #define coalition_info_set_name_internal(...) 0 +#endif /* CONFIG_THREAD_GROUPS */ static int coalition_info_efficiency(coalition_t coal, user_addr_t buffer, user_size_t bufsize) @@ -257,6 +278,11 @@ coalition_info_efficiency(coalition_t coal, user_addr_t buffer, user_size_t bufs } if (flags & COALITION_FLAGS_EFFICIENT) { coalition_set_efficient(coal); +#if CONFIG_THREAD_GROUPS + struct thread_group *tg = coalition_get_thread_group(coal); + thread_group_set_flags(tg, THREAD_GROUP_FLAGS_EFFICIENT); + thread_group_release(tg); +#endif /* CONFIG_THREAD_GROUPS */ } return error; } diff --git a/bsd/kern/sys_eventlink.c b/bsd/kern/sys_eventlink.c new file mode 100644 index 000000000..7a326d44e --- /dev/null +++ b/bsd/kern/sys_eventlink.c @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include + +extern uint64_t +mach_eventlink_signal_trap( + mach_port_name_t port, + uint64_t signal_count __unused); + +extern uint64_t +mach_eventlink_wait_until_trap( + mach_port_name_t eventlink_port, + uint64_t wait_count, + mach_eventlink_signal_wait_option_t option, + kern_clock_id_t clock_id, + uint64_t deadline); + +extern uint64_t +mach_eventlink_signal_wait_until_trap( + mach_port_name_t eventlink_port, + uint64_t wait_count, + uint64_t signal_count __unused, + mach_eventlink_signal_wait_option_t option, + kern_clock_id_t clock_id, + uint64_t deadline); + +int +mach_eventlink_signal( + __unused proc_t p, + struct mach_eventlink_signal_args *uap, + uint64_t *retval) +{ + *retval = mach_eventlink_signal_trap(uap->eventlink_port, uap->signal_count); + return 0; +} + +int +mach_eventlink_wait_until( + __unused proc_t p, + struct mach_eventlink_wait_until_args *uap, + uint64_t *retval) +{ + *retval = mach_eventlink_wait_until_trap(uap->eventlink_port, uap->wait_count, + uap->option, uap->clock_id, uap->deadline); + return 0; +} + +int +mach_eventlink_signal_wait_until( + __unused proc_t p, + struct mach_eventlink_signal_wait_until_args *uap, + uint64_t *retval) +{ + *retval = mach_eventlink_signal_wait_until_trap(uap->eventlink_port, uap->wait_count, + uap->signal_count, uap->option, uap->clock_id, uap->deadline); + return 0; +} diff --git a/bsd/kern/sys_generic.c b/bsd/kern/sys_generic.c index bd2d1ad52..25b46a6e1 100644 --- a/bsd/kern/sys_generic.c +++ b/bsd/kern/sys_generic.c @@ -110,6 +110,7 @@ #include #include #include +#include #include #include @@ -139,7 +140,6 @@ #include /* for wait queue based select */ #include -#include #include /* for remote time api*/ #include @@ -154,14 +154,11 @@ #include /* XXX should be in a header file somewhere */ -void evsofree(struct socket *); -void evpipefree(struct pipe *); -void postpipeevent(struct pipe *, int); -void postevent(struct socket *, struct sockbuf *, int); extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t timeoutp); -int rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval); -int wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval); +int rd_uio(struct proc *p, int fdes, uio_t uio, int is_preadv, user_ssize_t *retval); +int wr_uio(struct proc *p, int fdes, uio_t uio, int is_pwritev, user_ssize_t *retval); +int do_uiowrite(struct proc *p, struct fileproc *fp, uio_t uio, int flags, user_ssize_t *retval); __private_extern__ int dofileread(vfs_context_t ctx, struct fileproc *fp, user_addr_t bufp, user_size_t nbyte, @@ -169,8 +166,7 @@ __private_extern__ int dofileread(vfs_context_t ctx, struct fileproc *fp, __private_extern__ int dofilewrite(vfs_context_t ctx, struct fileproc *fp, user_addr_t bufp, user_size_t nbyte, off_t offset, int flags, user_ssize_t *retval); -__private_extern__ int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode); -__private_extern__ void donefileread(struct proc *p, struct fileproc *fp_ret, int fd); +static int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode); /* Conflict wait queue for when selects collide (opaque type) */ struct waitq select_conflict_queue; @@ -185,13 +181,11 @@ select_waitq_init(void) waitq_init(&select_conflict_queue, SYNC_POLICY_FIFO); } -#define f_flag f_fglob->fg_flag -#define f_type f_fglob->fg_ops->fo_type -#define f_msgcount f_fglob->fg_msgcount -#define f_cred f_fglob->fg_cred -#define f_ops f_fglob->fg_ops -#define f_offset f_fglob->fg_offset -#define f_data f_fglob->fg_data +#define f_flag fp_glob->fg_flag +#define f_type fp_glob->fg_ops->fo_type +#define f_cred fp_glob->fg_cred +#define f_ops fp_glob->fg_ops +#define f_data fp_glob->fg_data /* * Read system call. @@ -223,12 +217,12 @@ read_nocancel(struct proc *p, struct read_nocancel_args *uap, user_ssize_t *retv } context = *(vfs_context_current()); - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; error = dofileread(&context, fp, uap->cbuf, uap->nbyte, (off_t)-1, 0, retval); - donefileread(p, fp, fd); + fp_drop(p, fd, fp, 0); return error; } @@ -263,12 +257,12 @@ pread_nocancel(struct proc *p, struct pread_nocancel_args *uap, user_ssize_t *re } context = *(vfs_context_current()); - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; error = dofileread(&context, fp, uap->buf, uap->nbyte, uap->offset, FOF_OFFSET, retval); - donefileread(p, fp, fd); + fp_drop(p, fd, fp, 0); KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE), uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0); @@ -281,23 +275,14 @@ out: * Code common for read and pread */ -void -donefileread(struct proc *p, struct fileproc *fp, int fd) -{ - proc_fdlock_spin(p); - fp_drop(p, fd, fp, 1); - proc_fdunlock(p); -} - /* * Returns: 0 Success * EBADF * ESPIPE * ENXIO * fp_lookup:EBADF - * fo_read:??? */ -int +static int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pread) { vnode_t vp; @@ -323,7 +308,7 @@ preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_ goto out; } if (fp->f_type == DTYPE_VNODE) { - vp = (struct vnode *)fp->f_fglob->fg_data; + vp = (struct vnode *)fp->fp_glob->fg_data; if (check_for_pread && (vnode_isfifo(vp))) { error = ESPIPE; @@ -394,35 +379,34 @@ dofileread(vfs_context_t ctx, struct fileproc *fp, } /* - * Scatter read system call. + * Vector read. * - * Returns: 0 Success - * EINVAL - * ENOMEM - * copyin:EFAULT - * rd_uio:??? + * Returns: 0 Success + * EINVAL + * ENOMEM + * preparefileread:EBADF + * preparefileread:ESPIPE + * preparefileread:ENXIO + * preparefileread:EBADF + * copyin:EFAULT + * rd_uio:??? */ -int -readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval) -{ - __pthread_testcancel(1); - return readv_nocancel(p, (struct readv_nocancel_args *)uap, retval); -} - -int -readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *retval) +static int +readv_preadv_uio(struct proc *p, int fdes, + user_addr_t user_iovp, int iovcnt, off_t offset, int is_preadv, + user_ssize_t *retval) { uio_t auio = NULL; int error; struct user_iovec *iovp; - /* Verify range bedfore calling uio_create() */ - if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV) { + /* Verify range before calling uio_create() */ + if (iovcnt <= 0 || iovcnt > UIO_MAXIOV) { return EINVAL; } /* allocate a uio large enough to hold the number of iovecs passed */ - auio = uio_create(uap->iovcnt, 0, + auio = uio_create(iovcnt, offset, (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), UIO_READ); @@ -434,9 +418,9 @@ readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *re error = ENOMEM; goto ExitThisRoutine; } - error = copyin_user_iovec_array(uap->iovp, + error = copyin_user_iovec_array(user_iovp, IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, - uap->iovcnt, iovp); + iovcnt, iovp); if (error) { goto ExitThisRoutine; } @@ -447,7 +431,7 @@ readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *re if (error) { goto ExitThisRoutine; } - error = rd_uio(p, uap->fd, auio, retval); + error = rd_uio(p, fdes, auio, is_preadv, retval); ExitThisRoutine: if (auio != NULL) { @@ -456,6 +440,38 @@ ExitThisRoutine: return error; } +/* + * Scatter read system call. + */ +int +readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval) +{ + __pthread_testcancel(1); + return readv_nocancel(p, (struct readv_nocancel_args *)uap, retval); +} + +int +readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *retval) +{ + return readv_preadv_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, retval); +} + +/* + * Preadv system call + */ +int +sys_preadv(struct proc *p, struct preadv_args *uap, user_ssize_t *retval) +{ + __pthread_testcancel(1); + return sys_preadv_nocancel(p, (struct preadv_nocancel_args *)uap, retval); +} + +int +sys_preadv_nocancel(struct proc *p, struct preadv_nocancel_args *uap, user_ssize_t *retval) +{ + return readv_preadv_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset, 1, retval); +} + /* * Write system call * @@ -477,7 +493,6 @@ write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *re struct fileproc *fp; int error; int fd = uap->fd; - bool wrote_some = false; AUDIT_ARG(fd, fd); @@ -493,18 +508,12 @@ write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *re proc_fdunlock(p); } else { struct vfs_context context = *(vfs_context_current()); - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; error = dofilewrite(&context, fp, uap->cbuf, uap->nbyte, (off_t)-1, 0, retval); - - wrote_some = *retval > 0; - } - if (wrote_some) { - fp_drop_written(p, fd, fp); - } else { - fp_drop(p, fd, fp, 0); } + fp_drop(p, fd, fp, 0); return error; } @@ -533,11 +542,10 @@ pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t * int error; int fd = uap->fd; vnode_t vp = (vnode_t)0; - bool wrote_some = false; AUDIT_ARG(fd, fd); - error = fp_lookup(p, fd, &fp, 0); + error = fp_get_ftype(p, fd, DTYPE_VNODE, ESPIPE, &fp); if (error) { return error; } @@ -550,13 +558,9 @@ pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t * proc_fdunlock(p); } else { struct vfs_context context = *vfs_context_current(); - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; - if (fp->f_type != DTYPE_VNODE) { - error = ESPIPE; - goto errout; - } - vp = (vnode_t)fp->f_fglob->fg_data; + vp = (vnode_t)fp->fp_glob->fg_data; if (vnode_isfifo(vp)) { error = ESPIPE; goto errout; @@ -572,14 +576,9 @@ pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t * error = dofilewrite(&context, fp, uap->buf, uap->nbyte, uap->offset, FOF_OFFSET, retval); - wrote_some = *retval > 0; } errout: - if (wrote_some) { - fp_drop_written(p, fd, fp); - } else { - fp_drop(p, fd, fp, 0); - } + fp_drop(p, fd, fp, 0); KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE), uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0); @@ -628,45 +627,97 @@ dofilewrite(vfs_context_t ctx, struct fileproc *fp, } /* The socket layer handles SIGPIPE */ if (error == EPIPE && fp->f_type != DTYPE_SOCKET && - (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) == 0) { + (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) == 0) { /* XXX Raise the signal on the thread? */ psignal(vfs_context_proc(ctx), SIGPIPE); } } bytecnt -= uio_resid(auio); + if (bytecnt) { + os_atomic_or(&fp->fp_glob->fg_flag, FWASWRITTEN, relaxed); + } *retval = bytecnt; return error; } /* - * Gather write system call + * Returns: 0 Success + * EBADF + * ESPIPE + * ENXIO + * fp_lookup:EBADF + * fp_guard_exception:??? */ -int -writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval) +static int +preparefilewrite(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pwrite) { - __pthread_testcancel(1); - return writev_nocancel(p, (struct writev_nocancel_args *)uap, retval); + vnode_t vp; + int error; + struct fileproc *fp; + + AUDIT_ARG(fd, fd); + + proc_fdlock_spin(p); + + error = fp_lookup(p, fd, &fp, 1); + + if (error) { + proc_fdunlock(p); + return error; + } + if ((fp->f_flag & FWRITE) == 0) { + error = EBADF; + goto ExitThisRoutine; + } + if (FP_ISGUARDED(fp, GUARD_WRITE)) { + error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE); + goto ExitThisRoutine; + } + if (check_for_pwrite) { + if (fp->f_type != DTYPE_VNODE) { + error = ESPIPE; + goto ExitThisRoutine; + } + + vp = (vnode_t)fp->fp_glob->fg_data; + if (vnode_isfifo(vp)) { + error = ESPIPE; + goto ExitThisRoutine; + } + if ((vp->v_flag & VISTTY)) { + error = ENXIO; + goto ExitThisRoutine; + } + } + + *fp_ret = fp; + + proc_fdunlock(p); + return 0; + +ExitThisRoutine: + fp_drop(p, fd, fp, 1); + proc_fdunlock(p); + return error; } -int -writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t *retval) +static int +writev_prwritev_uio(struct proc *p, int fd, + user_addr_t user_iovp, int iovcnt, off_t offset, int is_pwritev, + user_ssize_t *retval) { uio_t auio = NULL; int error; - struct fileproc *fp; struct user_iovec *iovp; - bool wrote_some = false; - - AUDIT_ARG(fd, uap->fd); - /* Verify range bedfore calling uio_create() */ - if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV) { + /* Verify range before calling uio_create() */ + if (iovcnt <= 0 || iovcnt > UIO_MAXIOV || offset < 0) { return EINVAL; } /* allocate a uio large enough to hold the number of iovecs passed */ - auio = uio_create(uap->iovcnt, 0, + auio = uio_create(iovcnt, offset, (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), UIO_WRITE); @@ -678,9 +729,9 @@ writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t * error = ENOMEM; goto ExitThisRoutine; } - error = copyin_user_iovec_array(uap->iovp, + error = copyin_user_iovec_array(user_iovp, IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, - uap->iovcnt, iovp); + iovcnt, iovp); if (error) { goto ExitThisRoutine; } @@ -692,27 +743,7 @@ writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t * goto ExitThisRoutine; } - error = fp_lookup(p, uap->fd, &fp, 0); - if (error) { - goto ExitThisRoutine; - } - - if ((fp->f_flag & FWRITE) == 0) { - error = EBADF; - } else if (FP_ISGUARDED(fp, GUARD_WRITE)) { - proc_fdlock(p); - error = fp_guard_exception(p, uap->fd, fp, kGUARD_EXC_WRITE); - proc_fdunlock(p); - } else { - error = wr_uio(p, fp, auio, retval); - wrote_some = *retval > 0; - } - - if (wrote_some) { - fp_drop_written(p, uap->fd, fp); - } else { - fp_drop(p, uap->fd, fp, 0); - } + error = wr_uio(p, fd, auio, is_pwritev, retval); ExitThisRoutine: if (auio != NULL) { @@ -721,9 +752,67 @@ ExitThisRoutine: return error; } +/* + * Gather write system call + */ +int +writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval) +{ + __pthread_testcancel(1); + return writev_nocancel(p, (struct writev_nocancel_args *)uap, retval); +} + +int +writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t *retval) +{ + return writev_prwritev_uio(p, uap->fd, uap->iovp, uap->iovcnt, 0, 0, retval); +} + +/* + * Pwritev system call + */ +int +sys_pwritev(struct proc *p, struct pwritev_args *uap, user_ssize_t *retval) +{ + __pthread_testcancel(1); + return sys_pwritev_nocancel(p, (struct pwritev_nocancel_args *)uap, retval); +} + +int +sys_pwritev_nocancel(struct proc *p, struct pwritev_nocancel_args *uap, user_ssize_t *retval) +{ + return writev_prwritev_uio(p, uap->fd, uap->iovp, uap->iovcnt, uap->offset, 1, retval); +} + +/* + * Returns: 0 Success + * preparefileread:EBADF + * preparefileread:ESPIPE + * preparefileread:ENXIO + * preparefileread:??? + * fo_write:??? + */ +int +wr_uio(struct proc *p, int fd, uio_t uio, int is_pwritev, user_ssize_t *retval) +{ + struct fileproc *fp; + int error; + int flags; + + if ((error = preparefilewrite(p, &fp, fd, is_pwritev))) { + return error; + } + + flags = is_pwritev ? FOF_OFFSET : 0; + error = do_uiowrite(p, fp, uio, flags, retval); + + fp_drop(p, fd, fp, 0); + + return error; +} int -wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval) +do_uiowrite(struct proc *p, struct fileproc *fp, uio_t uio, int flags, user_ssize_t *retval) { int error; user_ssize_t count; @@ -732,7 +821,7 @@ wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval) count = uio_resid(uio); context.vc_ucred = fp->f_cred; - error = fo_write(fp, uio, 0, &context); + error = fo_write(fp, uio, flags, &context); if (error) { if (uio_resid(uio) != count && (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) { @@ -740,25 +829,35 @@ wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval) } /* The socket layer handles SIGPIPE */ if (error == EPIPE && fp->f_type != DTYPE_SOCKET && - (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) == 0) { + (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) == 0) { psignal(p, SIGPIPE); } } - *retval = count - uio_resid(uio); + count -= uio_resid(uio); + if (count) { + os_atomic_or(&fp->fp_glob->fg_flag, FWASWRITTEN, relaxed); + } + *retval = count; return error; } - +/* + * Returns: 0 Success + * preparefileread:EBADF + * preparefileread:ESPIPE + * preparefileread:ENXIO + * fo_read:??? + */ int -rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval) +rd_uio(struct proc *p, int fdes, uio_t uio, int is_preadv, user_ssize_t *retval) { struct fileproc *fp; int error; user_ssize_t count; struct vfs_context context = *vfs_context_current(); - if ((error = preparefileread(p, &fp, fdes, 0))) { + if ((error = preparefileread(p, &fp, fdes, is_preadv))) { return error; } @@ -766,7 +865,8 @@ rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval) context.vc_ucred = fp->f_cred; - error = fo_read(fp, uio, 0, &context); + int flags = is_preadv ? FOF_OFFSET : 0; + error = fo_read(fp, uio, flags, &context); if (error) { if (uio_resid(uio) != count && (error == ERESTART || @@ -776,7 +876,7 @@ rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval) } *retval = count - uio_resid(uio); - donefileread(p, fp, fdes); + fp_drop(p, fdes, fp, 0); return error; } @@ -830,7 +930,8 @@ ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval) return ENOTTY; } if (size > sizeof(stkbuf)) { - if ((memp = (caddr_t)kalloc(size)) == 0) { + memp = (caddr_t)kheap_alloc(KHEAP_TEMP, size, Z_WAITOK); + if (memp == 0) { return ENOMEM; } datap = memp; @@ -880,10 +981,10 @@ ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval) goto out; } - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; #if CONFIG_MACF - error = mac_file_check_ioctl(context.vc_ucred, fp->f_fglob, com); + error = mac_file_check_ioctl(context.vc_ucred, fp->fp_glob, com); if (error) { goto out; } @@ -899,19 +1000,27 @@ ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval) break; case FIONBIO: + // FIXME (rdar://54898652) + // + // this code is broken if fnctl(F_SETFL), ioctl() are + // called concurrently for the same fileglob. if ((tmp = *(int *)datap)) { - fp->f_flag |= FNONBLOCK; + os_atomic_or(&fp->f_flag, FNONBLOCK, relaxed); } else { - fp->f_flag &= ~FNONBLOCK; + os_atomic_andnot(&fp->f_flag, FNONBLOCK, relaxed); } error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context); break; case FIOASYNC: + // FIXME (rdar://54898652) + // + // this code is broken if fnctl(F_SETFL), ioctl() are + // called concurrently for the same fileglob. if ((tmp = *(int *)datap)) { - fp->f_flag |= FASYNC; + os_atomic_or(&fp->f_flag, FASYNC, relaxed); } else { - fp->f_flag &= ~FASYNC; + os_atomic_andnot(&fp->f_flag, FASYNC, relaxed); } error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context); break; @@ -966,7 +1075,7 @@ out: out_nofp: if (memp) { - kfree(memp, size); + kheap_free(KHEAP_TEMP, memp, size); } return error; } @@ -979,8 +1088,8 @@ extern int selprocess(int error, int sel_pass); static int selscan(struct proc *p, struct _select * sel, struct _select_data * seldata, int nfd, int32_t *retval, int sel_pass, struct waitq_set *wqset); static int selcount(struct proc *p, u_int32_t *ibits, int nfd, int *count); -static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup, int fromselcount); -static int seldrop(struct proc *p, u_int32_t *ibits, int nfd); +static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup); +static int seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim); static int select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeout, int32_t *retval); /* @@ -1009,7 +1118,7 @@ select_nocancel(struct proc *p, struct select_nocancel_args *uap, int32_t *retva struct user64_timeval atv64; err = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64)); /* Loses resolution - assume timeout < 68 years */ - atv.tv_sec = atv64.tv_sec; + atv.tv_sec = (__darwin_time_t)atv64.tv_sec; atv.tv_usec = atv64.tv_usec; } else { struct user32_timeval atv32; @@ -1052,8 +1161,8 @@ pselect_nocancel(struct proc *p, struct pselect_nocancel_args *uap, int32_t *ret if (IS_64BIT_PROCESS(p)) { struct user64_timespec ts64; err = copyin(uap->ts, (caddr_t)&ts64, sizeof(ts64)); - ts.tv_sec = ts64.tv_sec; - ts.tv_nsec = ts64.tv_nsec; + ts.tv_sec = (__darwin_time_t)ts64.tv_sec; + ts.tv_nsec = (long)ts64.tv_nsec; } else { struct user32_timespec ts32; err = copyin(uap->ts, (caddr_t)&ts32, sizeof(ts32)); @@ -1412,7 +1521,7 @@ retry: } done: if (unwind) { - seldrop(p, sel->ibits, uap->nd); + seldrop(p, sel->ibits, uap->nd, seldata->count); waitq_set_deinit(uth->uu_wqset); /* * zero out the waitq pointer array to avoid use-after free @@ -1487,8 +1596,8 @@ selunlinkfp(struct fileproc *fp, uint64_t wqp_id, struct waitq_set *wqset) waitq_unlink_by_prepost_id(wqp_id, wqset); } - /* allow passing a NULL/invalid fp for seldrop unwind */ - if (!fp || !(fp->f_flags & (FP_INSELECT | FP_SELCONFLICT))) { + /* allow passing a invalid fp for seldrop unwind */ + if (!(fp->fp_flags & (FP_INSELECT | FP_SELCONFLICT))) { return; } @@ -1499,7 +1608,7 @@ selunlinkfp(struct fileproc *fp, uint64_t wqp_id, struct waitq_set *wqset) * be linked with the global conflict queue, and the last waiter * on the fp clears the CONFLICT marker. */ - if (valid_set && (fp->f_flags & FP_SELCONFLICT)) { + if (valid_set && (fp->fp_flags & FP_SELCONFLICT)) { waitq_unlink(&select_conflict_queue, wqset); } @@ -1509,9 +1618,9 @@ selunlinkfp(struct fileproc *fp, uint64_t wqp_id, struct waitq_set *wqset) * that if we were the first thread to select on the FD, then * we'll be the one to clear this flag... */ - if (valid_set && fp->f_wset == (void *)wqset) { - fp->f_flags &= ~FP_INSELECT; - fp->f_wset = NULL; + if (valid_set && fp->fp_wset == (void *)wqset) { + fp->fp_flags &= ~FP_INSELECT; + fp->fp_wset = NULL; } } @@ -1533,7 +1642,7 @@ sellinkfp(struct fileproc *fp, void **wq_data, struct waitq_set *wqset) { struct waitq *f_wq = NULL; - if ((fp->f_flags & FP_INSELECT) != FP_INSELECT) { + if ((fp->fp_flags & FP_INSELECT) != FP_INSELECT) { if (wq_data) { panic("non-null data:%p on fp:%p not in select?!" "(wqset:%p)", wq_data, fp, wqset); @@ -1541,7 +1650,7 @@ sellinkfp(struct fileproc *fp, void **wq_data, struct waitq_set *wqset) return 0; } - if ((fp->f_flags & FP_SELCONFLICT) == FP_SELCONFLICT) { + if ((fp->fp_flags & FP_SELCONFLICT) == FP_SELCONFLICT) { waitq_link(&select_conflict_queue, wqset, WAITQ_SHOULD_LOCK, NULL); } @@ -1562,8 +1671,8 @@ sellinkfp(struct fileproc *fp, void **wq_data, struct waitq_set *wqset) } /* record the first thread's wqset in the fileproc structure */ - if (!fp->f_wset) { - fp->f_wset = (void *)wqset; + if (!fp->fp_wset) { + fp->fp_wset = (void *)wqset; } /* handles NULL f_wq */ @@ -1637,13 +1746,8 @@ selscan(struct proc *p, struct _select *sel, struct _select_data * seldata, while ((j = ffs(bits)) && (fd = i + --j) < nfd) { bits &= ~(1U << j); - if (fd < fdp->fd_nfiles) { - fp = fdp->fd_ofiles[fd]; - } else { - fp = NULL; - } - - if (fp == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + fp = fp_get_noref_locked(p, fd); + if (fp == NULL) { /* * If we abort because of a bad * fd, let the caller unwind... @@ -1658,11 +1762,11 @@ selscan(struct proc *p, struct _select *sel, struct _select_data * seldata, } else { reserved_link = waitq_link_reserve((struct waitq *)wqset); rl_ptr = &reserved_link; - if (fp->f_flags & FP_INSELECT) { + if (fp->fp_flags & FP_INSELECT) { /* someone is already in select on this fp */ - fp->f_flags |= FP_SELCONFLICT; + fp->fp_flags |= FP_SELCONFLICT; } else { - fp->f_flags |= FP_INSELECT; + fp->fp_flags |= FP_INSELECT; } waitq_set_lazy_init_link(wqset); @@ -1729,6 +1833,7 @@ poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval) int ncoll, error = 0; u_int nfds = uap->nfds; u_int rfds = 0; + rlim_t nofile = proc_limitgetcur(p, RLIMIT_NOFILE, TRUE); /* * This is kinda bogus. We have fd limits, but that is not @@ -1738,7 +1843,7 @@ poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval) * safe, but not overly restrictive. */ if (nfds > OPEN_MAX || - (nfds > p->p_rlimit[RLIMIT_NOFILE].rlim_cur && (proc_suser(p) || nfds > FD_SETSIZE))) { + (nfds > nofile && (proc_suser(p) || nfds > FD_SETSIZE))) { return EINVAL; } @@ -1949,7 +2054,7 @@ seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p) * selcount * * Count the number of bits set in the input bit vector, and establish an - * outstanding fp->f_iocount for each of the descriptors which will be in + * outstanding fp->fp_iocount for each of the descriptors which will be in * use in the select operation. * * Parameters: p The process doing the select @@ -1980,7 +2085,6 @@ selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp) u_int32_t *iptr; u_int nw; int error = 0; - int dropcount; int need_wakeup = 0; /* @@ -2001,19 +2105,13 @@ selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp) while ((j = ffs(bits)) && (fd = i + --j) < nfd) { bits &= ~(1U << j); - if (fd < fdp->fd_nfiles) { - fp = fdp->fd_ofiles[fd]; - } else { - fp = NULL; - } - - if (fp == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + fp = fp_get_noref_locked(p, fd); + if (fp == NULL) { *countp = 0; error = EBADF; goto bad; } - os_ref_retain_locked(&fp->f_iocount); + os_ref_retain_locked(&fp->fp_iocount); n++; } } @@ -2024,13 +2122,11 @@ selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp) return 0; bad: - dropcount = 0; - if (n == 0) { goto out; } /* Ignore error return; it's already EBADF */ - (void)seldrop_locked(p, ibits, nfd, n, &need_wakeup, 1); + (void)seldrop_locked(p, ibits, nfd, n, &need_wakeup); out: proc_fdunlock(p); @@ -2045,7 +2141,7 @@ out: * seldrop_locked * * Drop outstanding wait queue references set up during selscan(); drop the - * outstanding per fileproc f_iocount() picked up during the selcount(). + * outstanding per fileproc fp_iocount picked up during the selcount(). * * Parameters: p Process performing the select * ibits Input bit bector of fd's @@ -2067,7 +2163,7 @@ out: * clean up after the set up on the remaining fds. */ static int -seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup, int fromselcount) +seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup) { struct filedesc *fdp = p->p_fd; int msk, i, j, nc, fd; @@ -2076,7 +2172,6 @@ seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wak u_int32_t *iptr; u_int nw; int error = 0; - int dropcount = 0; uthread_t uth = get_bsdthread_info(current_thread()); struct _select_data *seldata; @@ -2100,35 +2195,28 @@ seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wak bits = iptr[i / NFDBITS]; while ((j = ffs(bits)) && (fd = i + --j) < nfd) { bits &= ~(1U << j); - fp = fdp->fd_ofiles[fd]; /* * If we've already dropped as many as were * counted/scanned, then we are done. */ - if ((fromselcount != 0) && (++dropcount > lim)) { + if (nc >= lim) { goto done; } /* - * unlink even potentially NULL fileprocs. - * If the FD was closed from under us, we - * still need to cleanup the waitq links! + * We took an I/O reference in selcount, + * so the fp can't possibly be NULL. */ + fp = fp_get_noref_locked_with_iocount(p, fd); selunlinkfp(fp, seldata->wqp ? seldata->wqp[nc] : 0, uth->uu_wqset); nc++; - if (fp == NULL) { - /* skip (now) bad fds */ - error = EBADF; - continue; - } - - const os_ref_count_t refc = os_ref_release_locked(&fp->f_iocount); + const os_ref_count_t refc = os_ref_release_locked(&fp->fp_iocount); if (0 == refc) { - panic("f_iocount overdecrement!"); + panic("fp_iocount overdecrement!"); } if (1 == refc) { @@ -2138,8 +2226,8 @@ seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wak * and is also responsible for waking up anyone * waiting on iocounts to drain. */ - if (fp->f_flags & FP_SELCONFLICT) { - fp->f_flags &= ~FP_SELCONFLICT; + if (fp->fp_flags & FP_SELCONFLICT) { + fp->fp_flags &= ~FP_SELCONFLICT; } if (p->p_fpdrainwait) { p->p_fpdrainwait = 0; @@ -2155,13 +2243,13 @@ done: static int -seldrop(struct proc *p, u_int32_t *ibits, int nfd) +seldrop(struct proc *p, u_int32_t *ibits, int nfd, int lim) { int error; int need_wakeup = 0; proc_fdlock(p); - error = seldrop_locked(p, ibits, nfd, nfd, &need_wakeup, 0); + error = seldrop_locked(p, ibits, nfd, lim, &need_wakeup); proc_fdunlock(p); if (need_wakeup) { wakeup(&p->p_fpdrainwait); @@ -2283,922 +2371,40 @@ selthreadclear(struct selinfo *sip) } - - -#define DBG_POST 0x10 -#define DBG_WATCH 0x11 -#define DBG_WAIT 0x12 -#define DBG_MOD 0x13 -#define DBG_EWAKEUP 0x14 -#define DBG_ENQUEUE 0x15 -#define DBG_DEQUEUE 0x16 - -#define DBG_MISC_POST MISCDBG_CODE(DBG_EVENT,DBG_POST) -#define DBG_MISC_WATCH MISCDBG_CODE(DBG_EVENT,DBG_WATCH) -#define DBG_MISC_WAIT MISCDBG_CODE(DBG_EVENT,DBG_WAIT) -#define DBG_MISC_MOD MISCDBG_CODE(DBG_EVENT,DBG_MOD) -#define DBG_MISC_EWAKEUP MISCDBG_CODE(DBG_EVENT,DBG_EWAKEUP) -#define DBG_MISC_ENQUEUE MISCDBG_CODE(DBG_EVENT,DBG_ENQUEUE) -#define DBG_MISC_DEQUEUE MISCDBG_CODE(DBG_EVENT,DBG_DEQUEUE) - - -#define EVPROCDEQUE(p, evq) do { \ - proc_lock(p); \ - if (evq->ee_flags & EV_QUEUED) { \ - TAILQ_REMOVE(&p->p_evlist, evq, ee_plist); \ - evq->ee_flags &= ~EV_QUEUED; \ - } \ - proc_unlock(p); \ -} while (0); - - /* - * called upon socket close. deque and free all events for - * the socket... socket must be locked by caller. + * gethostuuid + * + * Description: Get the host UUID from IOKit and return it to user space. + * + * Parameters: uuid_buf Pointer to buffer to receive UUID + * timeout Timespec for timout + * + * Returns: 0 Success + * EWOULDBLOCK Timeout is too short + * copyout:EFAULT Bad user buffer + * mac_system_check_info:EPERM Client not allowed to perform this operation + * + * Notes: A timeout seems redundant, since if it's tolerable to not + * have a system UUID in hand, then why ask for one? */ -void -evsofree(struct socket *sp) +int +gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval) { - struct eventqelt *evq, *next; - proc_t p; - - if (sp == NULL) { - return; - } - - for (evq = sp->so_evlist.tqh_first; evq != NULL; evq = next) { - next = evq->ee_slist.tqe_next; - p = evq->ee_proc; + kern_return_t kret; + int error; + mach_timespec_t mach_ts; /* for IOKit call */ + __darwin_uuid_t uuid_kern = {}; /* for IOKit call */ - if (evq->ee_flags & EV_QUEUED) { - EVPROCDEQUE(p, evq); + /* Check entitlement */ + if (!IOTaskHasEntitlement(current_task(), "com.apple.private.getprivatesysid")) { +#if !defined(XNU_TARGET_OS_OSX) +#if CONFIG_MACF + if ((error = mac_system_check_info(kauth_cred_get(), "hw.uuid")) != 0) { + /* EPERM invokes userspace upcall if present */ + return error; } - TAILQ_REMOVE(&sp->so_evlist, evq, ee_slist); // remove from socket q - FREE(evq, M_TEMP); - } -} - - -/* - * called upon pipe close. deque and free all events for - * the pipe... pipe must be locked by caller - */ -void -evpipefree(struct pipe *cpipe) -{ - struct eventqelt *evq, *next; - proc_t p; - - for (evq = cpipe->pipe_evlist.tqh_first; evq != NULL; evq = next) { - next = evq->ee_slist.tqe_next; - p = evq->ee_proc; - - EVPROCDEQUE(p, evq); - - TAILQ_REMOVE(&cpipe->pipe_evlist, evq, ee_slist); // remove from pipe q - FREE(evq, M_TEMP); - } -} - - -/* - * enqueue this event if it's not already queued. wakeup - * the proc if we do queue this event to it... - * entered with proc lock held... we drop it before - * doing the wakeup and return in that state - */ -static void -evprocenque(struct eventqelt *evq) -{ - proc_t p; - - assert(evq); - p = evq->ee_proc; - - KERNEL_DEBUG(DBG_MISC_ENQUEUE | DBG_FUNC_START, (uint32_t)evq, evq->ee_flags, evq->ee_eventmask, 0, 0); - - proc_lock(p); - - if (evq->ee_flags & EV_QUEUED) { - proc_unlock(p); - - KERNEL_DEBUG(DBG_MISC_ENQUEUE | DBG_FUNC_END, 0, 0, 0, 0, 0); - return; - } - evq->ee_flags |= EV_QUEUED; - - TAILQ_INSERT_TAIL(&p->p_evlist, evq, ee_plist); - - proc_unlock(p); - - wakeup(&p->p_evlist); - - KERNEL_DEBUG(DBG_MISC_ENQUEUE | DBG_FUNC_END, 0, 0, 0, 0, 0); -} - - -/* - * pipe lock must be taken by the caller - */ -void -postpipeevent(struct pipe *pipep, int event) -{ - int mask; - struct eventqelt *evq; - - if (pipep == NULL) { - return; - } - KERNEL_DEBUG(DBG_MISC_POST | DBG_FUNC_START, event, 0, 0, 1, 0); - - for (evq = pipep->pipe_evlist.tqh_first; - evq != NULL; evq = evq->ee_slist.tqe_next) { - if (evq->ee_eventmask == 0) { - continue; - } - mask = 0; - - switch (event & (EV_RWBYTES | EV_RCLOSED | EV_WCLOSED)) { - case EV_RWBYTES: - if ((evq->ee_eventmask & EV_RE) && pipep->pipe_buffer.cnt) { - mask |= EV_RE; - evq->ee_req.er_rcnt = pipep->pipe_buffer.cnt; - } - if ((evq->ee_eventmask & EV_WR) && - (MAX(pipep->pipe_buffer.size, PIPE_SIZE) - pipep->pipe_buffer.cnt) >= PIPE_BUF) { - if (pipep->pipe_state & PIPE_EOF) { - mask |= EV_WR | EV_RESET; - break; - } - mask |= EV_WR; - evq->ee_req.er_wcnt = MAX(pipep->pipe_buffer.size, PIPE_SIZE) - pipep->pipe_buffer.cnt; - } - break; - - case EV_WCLOSED: - case EV_RCLOSED: - if ((evq->ee_eventmask & EV_RE)) { - mask |= EV_RE | EV_RCLOSED; - } - if ((evq->ee_eventmask & EV_WR)) { - mask |= EV_WR | EV_WCLOSED; - } - break; - - default: - return; - } - if (mask) { - /* - * disarm... postevents are nops until this event is 'read' via - * waitevent and then re-armed via modwatch - */ - evq->ee_eventmask = 0; - - /* - * since events are disarmed until after the waitevent - * the ee_req.er_xxxx fields can't change once we've - * inserted this event into the proc queue... - * therefore, the waitevent will see a 'consistent' - * snapshot of the event, even though it won't hold - * the pipe lock, and we're updating the event outside - * of the proc lock, which it will hold - */ - evq->ee_req.er_eventbits |= mask; - - KERNEL_DEBUG(DBG_MISC_POST, (uint32_t)evq, evq->ee_req.er_eventbits, mask, 1, 0); - - evprocenque(evq); - } - } - KERNEL_DEBUG(DBG_MISC_POST | DBG_FUNC_END, 0, 0, 0, 1, 0); -} - -#if SOCKETS -/* - * given either a sockbuf or a socket run down the - * event list and queue ready events found... - * the socket must be locked by the caller - */ -void -postevent(struct socket *sp, struct sockbuf *sb, int event) -{ - int mask; - struct eventqelt *evq; - struct tcpcb *tp; - - if (sb) { - sp = sb->sb_so; - } - if (sp == NULL) { - return; - } - - KERNEL_DEBUG(DBG_MISC_POST | DBG_FUNC_START, (int)sp, event, 0, 0, 0); - - for (evq = sp->so_evlist.tqh_first; - evq != NULL; evq = evq->ee_slist.tqe_next) { - if (evq->ee_eventmask == 0) { - continue; - } - mask = 0; - - /* ready for reading: - * - byte cnt >= receive low water mark - * - read-half of conn closed - * - conn pending for listening sock - * - socket error pending - * - * ready for writing - * - byte cnt avail >= send low water mark - * - write half of conn closed - * - socket error pending - * - non-blocking conn completed successfully - * - * exception pending - * - out of band data - * - sock at out of band mark - */ - - switch (event & EV_DMASK) { - case EV_OOB: - if ((evq->ee_eventmask & EV_EX)) { - if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK))) { - mask |= EV_EX | EV_OOB; - } - } - break; - - case EV_RWBYTES | EV_OOB: - if ((evq->ee_eventmask & EV_EX)) { - if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK))) { - mask |= EV_EX | EV_OOB; - } - } - /* - * fall into the next case - */ - case EV_RWBYTES: - if ((evq->ee_eventmask & EV_RE) && soreadable(sp)) { - /* for AFP/OT purposes; may go away in future */ - if ((SOCK_DOM(sp) == PF_INET || - SOCK_DOM(sp) == PF_INET6) && - SOCK_PROTO(sp) == IPPROTO_TCP && - (sp->so_error == ECONNREFUSED || - sp->so_error == ECONNRESET)) { - if (sp->so_pcb == NULL || - sotoinpcb(sp)->inp_state == - INPCB_STATE_DEAD || - (tp = sototcpcb(sp)) == NULL || - tp->t_state == TCPS_CLOSED) { - mask |= EV_RE | EV_RESET; - break; - } - } - mask |= EV_RE; - evq->ee_req.er_rcnt = sp->so_rcv.sb_cc; - - if (sp->so_state & SS_CANTRCVMORE) { - mask |= EV_FIN; - break; - } - } - if ((evq->ee_eventmask & EV_WR) && sowriteable(sp)) { - /* for AFP/OT purposes; may go away in future */ - if ((SOCK_DOM(sp) == PF_INET || - SOCK_DOM(sp) == PF_INET6) && - SOCK_PROTO(sp) == IPPROTO_TCP && - (sp->so_error == ECONNREFUSED || - sp->so_error == ECONNRESET)) { - if (sp->so_pcb == NULL || - sotoinpcb(sp)->inp_state == - INPCB_STATE_DEAD || - (tp = sototcpcb(sp)) == NULL || - tp->t_state == TCPS_CLOSED) { - mask |= EV_WR | EV_RESET; - break; - } - } - mask |= EV_WR; - evq->ee_req.er_wcnt = sbspace(&sp->so_snd); - } - break; - - case EV_RCONN: - if ((evq->ee_eventmask & EV_RE)) { - mask |= EV_RE | EV_RCONN; - evq->ee_req.er_rcnt = sp->so_qlen + 1; // incl this one - } - break; - - case EV_WCONN: - if ((evq->ee_eventmask & EV_WR)) { - mask |= EV_WR | EV_WCONN; - } - break; - - case EV_RCLOSED: - if ((evq->ee_eventmask & EV_RE)) { - mask |= EV_RE | EV_RCLOSED; - } - break; - - case EV_WCLOSED: - if ((evq->ee_eventmask & EV_WR)) { - mask |= EV_WR | EV_WCLOSED; - } - break; - - case EV_FIN: - if (evq->ee_eventmask & EV_RE) { - mask |= EV_RE | EV_FIN; - } - break; - - case EV_RESET: - case EV_TIMEOUT: - if (evq->ee_eventmask & EV_RE) { - mask |= EV_RE | event; - } - if (evq->ee_eventmask & EV_WR) { - mask |= EV_WR | event; - } - break; - - default: - KERNEL_DEBUG(DBG_MISC_POST | DBG_FUNC_END, (int)sp, -1, 0, 0, 0); - return; - } /* switch */ - - KERNEL_DEBUG(DBG_MISC_POST, (int)evq, evq->ee_eventmask, evq->ee_req.er_eventbits, mask, 0); - - if (mask) { - /* - * disarm... postevents are nops until this event is 'read' via - * waitevent and then re-armed via modwatch - */ - evq->ee_eventmask = 0; - - /* - * since events are disarmed until after the waitevent - * the ee_req.er_xxxx fields can't change once we've - * inserted this event into the proc queue... - * since waitevent can't see this event until we - * enqueue it, waitevent will see a 'consistent' - * snapshot of the event, even though it won't hold - * the socket lock, and we're updating the event outside - * of the proc lock, which it will hold - */ - evq->ee_req.er_eventbits |= mask; - - evprocenque(evq); - } - } - KERNEL_DEBUG(DBG_MISC_POST | DBG_FUNC_END, (int)sp, 0, 0, 0, 0); -} -#endif /* SOCKETS */ - - -/* - * watchevent system call. user passes us an event to watch - * for. we malloc an event object, initialize it, and queue - * it to the open socket. when the event occurs, postevent() - * will enque it back to our proc where we can retrieve it - * via waitevent(). - * - * should this prevent duplicate events on same socket? - * - * Returns: - * ENOMEM No memory for operation - * copyin:EFAULT - */ -int -watchevent(proc_t p, struct watchevent_args *uap, __unused int *retval) -{ - struct eventqelt *evq = (struct eventqelt *)0; - struct eventqelt *np = NULL; - struct eventreq64 *erp; - struct fileproc *fp = NULL; - int error; - - KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_START, 0, 0, 0, 0, 0); - - // get a qelt and fill with users req - MALLOC(evq, struct eventqelt *, sizeof(struct eventqelt), M_TEMP, M_WAITOK); - - if (evq == NULL) { - return ENOMEM; - } - erp = &evq->ee_req; - - // get users request pkt - - if (IS_64BIT_PROCESS(p)) { - error = copyin(uap->u_req, (caddr_t)erp, sizeof(struct eventreq64)); - } else { - struct eventreq32 er32; - - error = copyin(uap->u_req, (caddr_t)&er32, sizeof(struct eventreq32)); - if (error == 0) { - /* - * the user only passes in the - * er_type, er_handle and er_data... - * the other fields are initialized - * below, so don't bother to copy - */ - erp->er_type = er32.er_type; - erp->er_handle = er32.er_handle; - erp->er_data = (user_addr_t)er32.er_data; - } - } - if (error) { - FREE(evq, M_TEMP); - KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_END, error, 0, 0, 0, 0); - - return error; - } - KERNEL_DEBUG(DBG_MISC_WATCH, erp->er_handle, uap->u_eventmask, (uint32_t)evq, 0, 0); - - // validate, freeing qelt if errors - error = 0; - proc_fdlock(p); - - if (erp->er_type != EV_FD) { - error = EINVAL; - } else if ((error = fp_lookup(p, erp->er_handle, &fp, 1)) != 0) { - error = EBADF; -#if SOCKETS - } else if (fp->f_type == DTYPE_SOCKET) { - socket_lock((struct socket *)fp->f_data, 1); - np = ((struct socket *)fp->f_data)->so_evlist.tqh_first; -#endif /* SOCKETS */ - } else if (fp->f_type == DTYPE_PIPE) { - PIPE_LOCK((struct pipe *)fp->f_data); - np = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first; - } else { - fp_drop(p, erp->er_handle, fp, 1); - error = EINVAL; - } - proc_fdunlock(p); - - if (error) { - FREE(evq, M_TEMP); - - KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_END, error, 0, 0, 0, 0); - return error; - } - - /* - * only allow one watch per file per proc - */ - for (; np != NULL; np = np->ee_slist.tqe_next) { - if (np->ee_proc == p) { -#if SOCKETS - if (fp->f_type == DTYPE_SOCKET) { - socket_unlock((struct socket *)fp->f_data, 1); - } else -#endif /* SOCKETS */ - PIPE_UNLOCK((struct pipe *)fp->f_data); - fp_drop(p, erp->er_handle, fp, 0); - FREE(evq, M_TEMP); - - KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_END, EINVAL, 0, 0, 0, 0); - return EINVAL; - } - } - erp->er_ecnt = erp->er_rcnt = erp->er_wcnt = erp->er_eventbits = 0; - evq->ee_proc = p; - evq->ee_eventmask = uap->u_eventmask & EV_MASK; - evq->ee_flags = 0; - -#if SOCKETS - if (fp->f_type == DTYPE_SOCKET) { - TAILQ_INSERT_TAIL(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist); - postevent((struct socket *)fp->f_data, 0, EV_RWBYTES); // catch existing events - - socket_unlock((struct socket *)fp->f_data, 1); - } else -#endif /* SOCKETS */ - { - TAILQ_INSERT_TAIL(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist); - postpipeevent((struct pipe *)fp->f_data, EV_RWBYTES); - - PIPE_UNLOCK((struct pipe *)fp->f_data); - } - fp_drop_event(p, erp->er_handle, fp); - - KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_END, 0, 0, 0, 0, 0); - return 0; -} - - - -/* - * waitevent system call. - * grabs the next waiting event for this proc and returns - * it. if no events, user can request to sleep with timeout - * or without or poll mode - * ((tv != NULL && interval == 0) || tv == -1) - */ -int -waitevent(proc_t p, struct waitevent_args *uap, int *retval) -{ - int error = 0; - struct eventqelt *evq; - struct eventreq64 *erp; - uint64_t abstime, interval; - boolean_t fast_poll = FALSE; - union { - struct eventreq64 er64; - struct eventreq32 er32; - } uer = {}; - - interval = 0; - - if (uap->tv) { - struct timeval atv; - /* - * check for fast poll method - */ - if (IS_64BIT_PROCESS(p)) { - if (uap->tv == (user_addr_t)-1) { - fast_poll = TRUE; - } - } else if (uap->tv == (user_addr_t)((uint32_t)-1)) { - fast_poll = TRUE; - } - - if (fast_poll == TRUE) { - if (p->p_evlist.tqh_first == NULL) { - KERNEL_DEBUG(DBG_MISC_WAIT | DBG_FUNC_NONE, -1, 0, 0, 0, 0); - /* - * poll failed - */ - *retval = 1; - return 0; - } - proc_lock(p); - goto retry; - } - if (IS_64BIT_PROCESS(p)) { - struct user64_timeval atv64; - error = copyin(uap->tv, (caddr_t)&atv64, sizeof(atv64)); - /* Loses resolution - assume timeout < 68 years */ - atv.tv_sec = atv64.tv_sec; - atv.tv_usec = atv64.tv_usec; - } else { - struct user32_timeval atv32; - error = copyin(uap->tv, (caddr_t)&atv32, sizeof(atv32)); - atv.tv_sec = atv32.tv_sec; - atv.tv_usec = atv32.tv_usec; - } - - if (error) { - return error; - } - if (itimerfix(&atv)) { - error = EINVAL; - return error; - } - interval = tvtoabstime(&atv); - } - KERNEL_DEBUG(DBG_MISC_WAIT | DBG_FUNC_START, 0, 0, 0, 0, 0); - - proc_lock(p); -retry: - if ((evq = p->p_evlist.tqh_first) != NULL) { - /* - * found one... make a local copy while it's still on the queue - * to prevent it from changing while in the midst of copying - * don't want to hold the proc lock across a copyout because - * it might block on a page fault at the target in user space - */ - erp = &evq->ee_req; - - if (IS_64BIT_PROCESS(p)) { - bcopy((caddr_t)erp, (caddr_t)&uer.er64, sizeof(struct eventreq64)); - } else { - uer.er32.er_type = erp->er_type; - uer.er32.er_handle = erp->er_handle; - uer.er32.er_data = (uint32_t)erp->er_data; - uer.er32.er_ecnt = erp->er_ecnt; - uer.er32.er_rcnt = erp->er_rcnt; - uer.er32.er_wcnt = erp->er_wcnt; - uer.er32.er_eventbits = erp->er_eventbits; - } - TAILQ_REMOVE(&p->p_evlist, evq, ee_plist); - - evq->ee_flags &= ~EV_QUEUED; - - proc_unlock(p); - - if (IS_64BIT_PROCESS(p)) { - error = copyout((caddr_t)&uer.er64, uap->u_req, sizeof(struct eventreq64)); - } else { - error = copyout((caddr_t)&uer.er32, uap->u_req, sizeof(struct eventreq32)); - } - - KERNEL_DEBUG(DBG_MISC_WAIT | DBG_FUNC_END, error, - evq->ee_req.er_handle, evq->ee_req.er_eventbits, (uint32_t)evq, 0); - return error; - } else { - if (uap->tv && interval == 0) { - proc_unlock(p); - *retval = 1; // poll failed - - KERNEL_DEBUG(DBG_MISC_WAIT | DBG_FUNC_END, error, 0, 0, 0, 0); - return error; - } - if (interval != 0) { - clock_absolutetime_interval_to_deadline(interval, &abstime); - } else { - abstime = 0; - } - - KERNEL_DEBUG(DBG_MISC_WAIT, 1, (uint32_t)&p->p_evlist, 0, 0, 0); - - error = msleep1(&p->p_evlist, &p->p_mlock, (PSOCK | PCATCH), "waitevent", abstime); - - KERNEL_DEBUG(DBG_MISC_WAIT, 2, (uint32_t)&p->p_evlist, 0, 0, 0); - - if (error == 0) { - goto retry; - } - if (error == ERESTART) { - error = EINTR; - } - if (error == EWOULDBLOCK) { - *retval = 1; - error = 0; - } - } - proc_unlock(p); - - KERNEL_DEBUG(DBG_MISC_WAIT | DBG_FUNC_END, 0, 0, 0, 0, 0); - return error; -} - - -/* - * modwatch system call. user passes in event to modify. - * if we find it we reset the event bits and que/deque event - * it needed. - */ -int -modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval) -{ - struct eventreq64 er; - struct eventreq64 *erp = &er; - struct eventqelt *evq = NULL; /* protected by error return */ - int error; - struct fileproc *fp; - int flag; - - KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_START, 0, 0, 0, 0, 0); - - /* - * get user's request pkt - * just need the er_type and er_handle which sit above the - * problematic er_data (32/64 issue)... so only copy in - * those 2 fields - */ - if ((error = copyin(uap->u_req, (caddr_t)erp, sizeof(er.er_type) + sizeof(er.er_handle)))) { - KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_END, error, 0, 0, 0, 0); - return error; - } - proc_fdlock(p); - - if (erp->er_type != EV_FD) { - error = EINVAL; - } else if ((error = fp_lookup(p, erp->er_handle, &fp, 1)) != 0) { - error = EBADF; -#if SOCKETS - } else if (fp->f_type == DTYPE_SOCKET) { - socket_lock((struct socket *)fp->f_data, 1); - evq = ((struct socket *)fp->f_data)->so_evlist.tqh_first; -#endif /* SOCKETS */ - } else if (fp->f_type == DTYPE_PIPE) { - PIPE_LOCK((struct pipe *)fp->f_data); - evq = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first; - } else { - fp_drop(p, erp->er_handle, fp, 1); - error = EINVAL; - } - - if (error) { - proc_fdunlock(p); - KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_END, error, 0, 0, 0, 0); - return error; - } - - if ((uap->u_eventmask == EV_RM) && (fp->f_flags & FP_WAITEVENT)) { - fp->f_flags &= ~FP_WAITEVENT; - } - proc_fdunlock(p); - - // locate event if possible - for (; evq != NULL; evq = evq->ee_slist.tqe_next) { - if (evq->ee_proc == p) { - break; - } - } - if (evq == NULL) { -#if SOCKETS - if (fp->f_type == DTYPE_SOCKET) { - socket_unlock((struct socket *)fp->f_data, 1); - } else -#endif /* SOCKETS */ - PIPE_UNLOCK((struct pipe *)fp->f_data); - fp_drop(p, erp->er_handle, fp, 0); - KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_END, EINVAL, 0, 0, 0, 0); - return EINVAL; - } - KERNEL_DEBUG(DBG_MISC_MOD, erp->er_handle, uap->u_eventmask, (uint32_t)evq, 0, 0); - - if (uap->u_eventmask == EV_RM) { - EVPROCDEQUE(p, evq); - -#if SOCKETS - if (fp->f_type == DTYPE_SOCKET) { - TAILQ_REMOVE(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist); - socket_unlock((struct socket *)fp->f_data, 1); - } else -#endif /* SOCKETS */ - { - TAILQ_REMOVE(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist); - PIPE_UNLOCK((struct pipe *)fp->f_data); - } - fp_drop(p, erp->er_handle, fp, 0); - FREE(evq, M_TEMP); - KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_END, 0, 0, 0, 0, 0); - return 0; - } - switch (uap->u_eventmask & EV_MASK) { - case 0: - flag = 0; - break; - - case EV_RE: - case EV_WR: - case EV_RE | EV_WR: - flag = EV_RWBYTES; - break; - - case EV_EX: - flag = EV_OOB; - break; - - case EV_EX | EV_RE: - case EV_EX | EV_WR: - case EV_EX | EV_RE | EV_WR: - flag = EV_OOB | EV_RWBYTES; - break; - - default: -#if SOCKETS - if (fp->f_type == DTYPE_SOCKET) { - socket_unlock((struct socket *)fp->f_data, 1); - } else -#endif /* SOCKETS */ - PIPE_UNLOCK((struct pipe *)fp->f_data); - fp_drop(p, erp->er_handle, fp, 0); - KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_END, EINVAL, 0, 0, 0, 0); - return EINVAL; - } - /* - * since we're holding the socket/pipe lock, the event - * cannot go from the unqueued state to the queued state - * however, it can go from the queued state to the unqueued state - * since that direction is protected by the proc_lock... - * so do a quick check for EV_QUEUED w/o holding the proc lock - * since by far the common case will be NOT EV_QUEUED, this saves - * us taking the proc_lock the majority of the time - */ - if (evq->ee_flags & EV_QUEUED) { - /* - * EVPROCDEQUE will recheck the state after it grabs the proc_lock - */ - EVPROCDEQUE(p, evq); - } - /* - * while the event is off the proc queue and - * we're holding the socket/pipe lock - * it's safe to update these fields... - */ - evq->ee_req.er_eventbits = 0; - evq->ee_eventmask = uap->u_eventmask & EV_MASK; - -#if SOCKETS - if (fp->f_type == DTYPE_SOCKET) { - postevent((struct socket *)fp->f_data, 0, flag); - socket_unlock((struct socket *)fp->f_data, 1); - } else -#endif /* SOCKETS */ - { - postpipeevent((struct pipe *)fp->f_data, flag); - PIPE_UNLOCK((struct pipe *)fp->f_data); - } - fp_drop(p, erp->er_handle, fp, 0); - KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_END, evq->ee_req.er_handle, evq->ee_eventmask, (uint32_t)fp->f_data, flag, 0); - return 0; -} - -/* this routine is called from the close of fd with proc_fdlock held */ -int -waitevent_close(struct proc *p, struct fileproc *fp) -{ - struct eventqelt *evq; - - - fp->f_flags &= ~FP_WAITEVENT; - -#if SOCKETS - if (fp->f_type == DTYPE_SOCKET) { - socket_lock((struct socket *)fp->f_data, 1); - evq = ((struct socket *)fp->f_data)->so_evlist.tqh_first; - } else -#endif /* SOCKETS */ - if (fp->f_type == DTYPE_PIPE) { - PIPE_LOCK((struct pipe *)fp->f_data); - evq = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first; - } else { - return EINVAL; - } - proc_fdunlock(p); - - - // locate event if possible - for (; evq != NULL; evq = evq->ee_slist.tqe_next) { - if (evq->ee_proc == p) { - break; - } - } - if (evq == NULL) { -#if SOCKETS - if (fp->f_type == DTYPE_SOCKET) { - socket_unlock((struct socket *)fp->f_data, 1); - } else -#endif /* SOCKETS */ - PIPE_UNLOCK((struct pipe *)fp->f_data); - - proc_fdlock(p); - - return EINVAL; - } - EVPROCDEQUE(p, evq); - -#if SOCKETS - if (fp->f_type == DTYPE_SOCKET) { - TAILQ_REMOVE(&((struct socket *)fp->f_data)->so_evlist, evq, ee_slist); - socket_unlock((struct socket *)fp->f_data, 1); - } else -#endif /* SOCKETS */ - { - TAILQ_REMOVE(&((struct pipe *)fp->f_data)->pipe_evlist, evq, ee_slist); - PIPE_UNLOCK((struct pipe *)fp->f_data); - } - FREE(evq, M_TEMP); - - proc_fdlock(p); - - return 0; -} - - -/* - * gethostuuid - * - * Description: Get the host UUID from IOKit and return it to user space. - * - * Parameters: uuid_buf Pointer to buffer to receive UUID - * timeout Timespec for timout - * - * Returns: 0 Success - * EWOULDBLOCK Timeout is too short - * copyout:EFAULT Bad user buffer - * mac_system_check_info:EPERM Client not allowed to perform this operation - * - * Notes: A timeout seems redundant, since if it's tolerable to not - * have a system UUID in hand, then why ask for one? - */ -int -gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retval) -{ - kern_return_t kret; - int error; - mach_timespec_t mach_ts; /* for IOKit call */ - __darwin_uuid_t uuid_kern = {}; /* for IOKit call */ - - /* Check entitlement */ - if (!IOTaskHasEntitlement(current_task(), "com.apple.private.getprivatesysid")) { -#if CONFIG_EMBEDDED -#if CONFIG_MACF - if ((error = mac_system_check_info(kauth_cred_get(), "hw.uuid")) != 0) { - /* EPERM invokes userspace upcall if present */ - return error; - } -#endif -#endif +#endif +#endif } /* Convert the 32/64 bit timespec into a mach_timespec_t */ @@ -3208,8 +2414,8 @@ gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retv if (error) { return error; } - mach_ts.tv_sec = ts.tv_sec; - mach_ts.tv_nsec = ts.tv_nsec; + mach_ts.tv_sec = (unsigned int)ts.tv_sec; + mach_ts.tv_nsec = (clock_res_t)ts.tv_nsec; } else { struct user32_timespec ts; error = copyin(uap->timeoutp, &ts, sizeof(ts)); @@ -3281,7 +2487,7 @@ ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval) rval = 0; if (args->cmd != LEDGER_TEMPLATE_INFO) { - pid = args->arg1; + pid = (int)args->arg1; proc = proc_find(pid); if (proc == NULL) { return ESRCH; @@ -3330,7 +2536,7 @@ ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval) if ((rval == 0) && (len >= 0)) { sz = len * sizeof(struct ledger_entry_info); rval = copyout(buf, args->arg2, sz); - kfree(buf, sz); + kheap_free(KHEAP_DATA_BUFFERS, buf, sz); } if (rval == 0) { rval = copyout(&len, args->arg3, sizeof(len)); @@ -3346,7 +2552,7 @@ ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval) if ((rval == 0) && (len >= 0)) { sz = len * sizeof(struct ledger_template_info); rval = copyout(buf, args->arg1, sz); - kfree(buf, sz); + kheap_free(KHEAP_DATA_BUFFERS, buf, sz); } if (rval == 0) { rval = copyout(&len, args->arg2, sizeof(len)); @@ -3440,11 +2646,12 @@ log_data(__unused struct proc *p, struct log_data_args *args, int *retval) /* truncate to OS_LOG_DATA_MAX_SIZE */ if (size > OS_LOG_DATA_MAX_SIZE) { - printf("%s: WARNING msg is going to be truncated from %u to %u\n", __func__, size, OS_LOG_DATA_MAX_SIZE); + printf("%s: WARNING msg is going to be truncated from %u to %u\n", + __func__, size, OS_LOG_DATA_MAX_SIZE); size = OS_LOG_DATA_MAX_SIZE; } - log_msg = kalloc(size); + log_msg = kheap_alloc(KHEAP_TEMP, size, Z_WAITOK); if (!log_msg) { return ENOMEM; } @@ -3461,11 +2668,11 @@ log_data(__unused struct proc *p, struct log_data_args *args, int *retval) * The call will fail if the current * process is not a driverKit process. */ - os_log_driverKit(&ret, OS_LOG_DEFAULT, flags, "%s", log_msg); + os_log_driverKit(&ret, OS_LOG_DEFAULT, (os_log_type_t)flags, "%s", log_msg); out: if (log_msg != NULL) { - kfree(log_msg, size); + kheap_free(KHEAP_TEMP, log_msg, size); } return ret; @@ -4121,6 +3328,81 @@ out: SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_cluster_type, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_kern_sched_task_set_cluster_type, "A", ""); + +#if CONFIG_SCHED_EDGE + +/* + * Edge Scheduler Sysctls + * + * The Edge scheduler uses edge configurations to decide feasability of + * migrating threads across clusters. The sysctls allow dynamic configuration + * of the edge properties and edge weights. This configuration is typically + * updated via callouts from CLPC. + * + * + */ +extern sched_clutch_edge sched_edge_config_e_to_p; +extern sched_clutch_edge sched_edge_config_p_to_e; +extern kern_return_t sched_edge_sysctl_configure_e_to_p(uint64_t); +extern kern_return_t sched_edge_sysctl_configure_p_to_e(uint64_t); +extern sched_clutch_edge sched_edge_e_to_p(void); +extern sched_clutch_edge sched_edge_p_to_e(void); + +static int sysctl_sched_edge_config_e_to_p SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + int error; + kern_return_t kr; + int64_t edge_config = 0; + + error = SYSCTL_IN(req, &edge_config, sizeof(edge_config)); + if (error) { + return error; + } + + if (!req->newptr) { + edge_config = sched_edge_e_to_p().sce_edge_packed; + return SYSCTL_OUT(req, &edge_config, sizeof(edge_config)); + } + + kr = sched_edge_sysctl_configure_e_to_p(edge_config); + return SYSCTL_OUT(req, &kr, sizeof(kr)); +} +SYSCTL_PROC(_kern, OID_AUTO, sched_edge_config_e_to_p, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_sched_edge_config_e_to_p, "Q", "Edge Scheduler Config for E-to-P cluster"); + +static int sysctl_sched_edge_config_p_to_e SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + int error; + kern_return_t kr; + int64_t edge_config = 0; + + error = SYSCTL_IN(req, &edge_config, sizeof(edge_config)); + if (error) { + return error; + } + + if (!req->newptr) { + edge_config = sched_edge_p_to_e().sce_edge_packed; + return SYSCTL_OUT(req, &edge_config, sizeof(edge_config)); + } + + kr = sched_edge_sysctl_configure_p_to_e(edge_config); + return SYSCTL_OUT(req, &kr, sizeof(kr)); +} +SYSCTL_PROC(_kern, OID_AUTO, sched_edge_config_p_to_e, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_sched_edge_config_p_to_e, "Q", "Edge Scheduler Config for P-to-E cluster"); + +extern int sched_edge_restrict_ut; +SYSCTL_INT(_kern, OID_AUTO, sched_edge_restrict_ut, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_restrict_ut, 0, "Edge Scheduler Restrict UT Threads"); +extern int sched_edge_restrict_bg; +SYSCTL_INT(_kern, OID_AUTO, sched_edge_restrict_bg, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_restrict_ut, 0, "Edge Scheduler Restrict BG Threads"); +extern int sched_edge_migrate_ipi_immediate; +SYSCTL_INT(_kern, OID_AUTO, sched_edge_migrate_ipi_immediate, CTLFLAG_RW | CTLFLAG_LOCKED, &sched_edge_migrate_ipi_immediate, 0, "Edge Scheduler uses immediate IPIs for migration event based on execution latency"); + +#endif /* CONFIG_SCHED_EDGE */ + #endif /* __AMP__ */ #endif /* DEVELOPMENT || DEBUG */ @@ -4219,4 +3501,62 @@ sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid *oidp, __unused v SYSCTL_PROC(_kern, OID_AUTO, sched_thread_set_no_smt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0, sysctl_kern_sched_thread_set_no_smt, "I", ""); + +static int +sysctl_kern_debug_get_preoslog SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + static bool oneshot_executed = false; + size_t preoslog_size = 0; + const char *preoslog = NULL; + + // DumpPanic pases a non-zero write value when it needs oneshot behaviour + if (req->newptr) { + uint8_t oneshot = 0; + int error = SYSCTL_IN(req, &oneshot, sizeof(oneshot)); + if (error) { + return error; + } + + if (oneshot) { + if (!OSCompareAndSwap8(false, true, &oneshot_executed)) { + return EPERM; + } + } + } + + preoslog = sysctl_debug_get_preoslog(&preoslog_size); + if (preoslog == NULL || preoslog_size == 0) { + return 0; + } + + if (req->oldptr == USER_ADDR_NULL) { + req->oldidx = preoslog_size; + return 0; + } + + return SYSCTL_OUT(req, preoslog, preoslog_size); +} + +SYSCTL_PROC(_kern, OID_AUTO, preoslog, CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_kern_debug_get_preoslog, "-", ""); + +static int +sysctl_kern_task_set_filter_msg_flag SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + int new_value, changed; + int old_value = task_get_filter_msg_flag(current_task()) ? 1 : 0; + int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed); + + if (changed) { + task_set_filter_msg_flag(current_task(), !!new_value); + } + + return error; +} + +SYSCTL_PROC(_kern, OID_AUTO, task_set_filter_msg_flag, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_kern_task_set_filter_msg_flag, "I", ""); + #endif /* DEVELOPMENT || DEBUG */ diff --git a/bsd/kern/sys_persona.c b/bsd/kern/sys_persona.c index 186f82993..d44ad74bf 100644 --- a/bsd/kern/sys_persona.c +++ b/bsd/kern/sys_persona.c @@ -117,14 +117,12 @@ kpersona_alloc_syscall(user_addr_t infop, user_addr_t idp, user_addr_t path) } if (path) { - MALLOC_ZONE(pna_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK | M_ZERO); - if (pna_path == NULL) { - return ENOMEM; - } + pna_path = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO); + size_t pathlen; error = copyinstr(path, (void *)pna_path, MAXPATHLEN, &pathlen); if (error) { - FREE_ZONE(pna_path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, pna_path); return error; } } @@ -133,7 +131,7 @@ kpersona_alloc_syscall(user_addr_t infop, user_addr_t idp, user_addr_t path) persona = persona_alloc(id, login, kinfo.persona_type, pna_path, &error); if (!persona) { if (pna_path != NULL) { - FREE_ZONE(pna_path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, pna_path); } return error; } @@ -349,9 +347,9 @@ kpersona_info_syscall(user_addr_t idp, user_addr_t infop) kinfo.persona_id = persona->pna_id; kinfo.persona_type = persona->pna_type; kinfo.persona_gid = persona_get_gid(persona); - unsigned ngroups = 0; + size_t ngroups = 0; persona_get_groups(persona, &ngroups, kinfo.persona_groups, NGROUPS); - kinfo.persona_ngroups = ngroups; + kinfo.persona_ngroups = (uint32_t)ngroups; kinfo.persona_gmuid = persona_get_gmuid(persona); /* @@ -395,9 +393,9 @@ kpersona_pidinfo_syscall(user_addr_t idp, user_addr_t infop) kinfo.persona_id = persona->pna_id; kinfo.persona_type = persona->pna_type; kinfo.persona_gid = persona_get_gid(persona); - unsigned ngroups = 0; + size_t ngroups = 0; persona_get_groups(persona, &ngroups, kinfo.persona_groups, NGROUPS); - kinfo.persona_ngroups = ngroups; + kinfo.persona_ngroups = (uint32_t)ngroups; kinfo.persona_gmuid = persona_get_gmuid(persona); strncpy(kinfo.persona_name, persona->pna_login, MAXLOGNAME); @@ -444,7 +442,7 @@ kpersona_find_syscall(user_addr_t infop, user_addr_t idp, user_addr_t idlenp) } k_idlen = u_idlen; - error = persona_find_all(login, kinfo.persona_id, kinfo.persona_type, persona, &k_idlen); + error = persona_find_all(login, kinfo.persona_id, (persona_type_t)kinfo.persona_type, persona, &k_idlen); if (error) { goto out; } diff --git a/bsd/kern/sys_pipe.c b/bsd/kern/sys_pipe.c index ef7dcbab1..508a92c87 100644 --- a/bsd/kern/sys_pipe.c +++ b/bsd/kern/sys_pipe.c @@ -17,7 +17,7 @@ * are met. */ /* - * Copyright (c) 2003-2014 Apple Inc. All rights reserved. + * Copyright (c) 2003-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -152,17 +152,15 @@ #include #endif -#define f_flag f_fglob->fg_flag -#define f_msgcount f_fglob->fg_msgcount -#define f_cred f_fglob->fg_cred -#define f_ops f_fglob->fg_ops -#define f_offset f_fglob->fg_offset -#define f_data f_fglob->fg_data +#define f_flag fp_glob->fg_flag +#define f_ops fp_glob->fg_ops +#define f_data fp_glob->fg_data struct pipepair { lck_mtx_t pp_mtx; struct pipe pp_rpipe; struct pipe pp_wpipe; + uint64_t pp_pipe_id; /* unique ID shared by both pipe ends */ }; #define PIPE_PAIR(pipe) \ @@ -233,10 +231,14 @@ SECURITY_READ_ONLY_EARLY(struct filterops) pipe_wfiltops = { .f_process = filt_pipewriteprocess, }; +#if PIPE_SYSCTLS static int nbigpipe; /* for compatibility sake. no longer used */ +#endif static int amountpipes; /* total number of pipes in system */ static int amountpipekva; /* total memory used by pipes */ +static _Atomic uint64_t pipe_unique_id = 1; + int maxpipekva __attribute__((used)) = PIPE_KVAMAX; /* allowing 16MB max. */ #if PIPE_SYSCTLS @@ -266,39 +268,14 @@ static void pipeselwakeup(struct pipe *cpipe, struct pipe *spipe); static __inline int pipeio_lock(struct pipe *cpipe, int catch); static __inline void pipeio_unlock(struct pipe *cpipe); -extern int postpipeevent(struct pipe *, int); -extern void evpipefree(struct pipe *cpipe); - -static lck_grp_t *pipe_mtx_grp; -static lck_attr_t *pipe_mtx_attr; -static lck_grp_attr_t *pipe_mtx_grp_attr; - -static zone_t pipe_zone; +static LCK_GRP_DECLARE(pipe_mtx_grp, "pipe"); +static ZONE_DECLARE(pipe_zone, "pipe zone", sizeof(struct pipepair), ZC_NONE); #define MAX_PIPESIZE(pipe) ( MAX(PIPE_SIZE, (pipe)->pipe_buffer.size) ) SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL); -/* initial setup done at time of sysinit */ -void -pipeinit(void) -{ - nbigpipe = 0; - vm_size_t zone_size; - - zone_size = 8192 * sizeof(struct pipepair); - pipe_zone = zinit(sizeof(struct pipepair), zone_size, 4096, "pipe zone"); - - - /* allocate lock group attribute and group for pipe mutexes */ - pipe_mtx_grp_attr = lck_grp_attr_alloc_init(); - pipe_mtx_grp = lck_grp_alloc_init("pipe", pipe_mtx_grp_attr); - - /* allocate the lock attribute for pipe mutexes */ - pipe_mtx_attr = lck_attr_alloc_init(); -} - -#ifndef CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) /* Bitmap for things to touch in pipe_touch() */ #define PIPE_ATIME 0x00000001 /* time of last access */ #define PIPE_MTIME 0x00000002 /* time of last modification */ @@ -588,6 +565,11 @@ pipe_stat(struct pipe *cpipe, void *ub, int isstat64) return 0; } +uint64_t +pipe_id(struct pipe *p) +{ + return PIPE_PAIR(p)->pp_pipe_id; +} /* * Allocate kva for pipe circular buffer, the space is pageable @@ -604,7 +586,8 @@ pipespace(struct pipe *cpipe, int size) return EINVAL; } - if ((buffer = (vm_offset_t)kalloc(size)) == 0) { + buffer = (vm_offset_t)kheap_alloc(KHEAP_DATA_BUFFERS, size, Z_WAITOK); + if (!buffer) { return ENOMEM; } @@ -641,15 +624,13 @@ pipepair_alloc(struct pipe **rp_out, struct pipe **wp_out) * if pipespace() fails. */ bzero(pp, sizeof(struct pipepair)); - lck_mtx_init(&pp->pp_mtx, pipe_mtx_grp, pipe_mtx_attr); + pp->pp_pipe_id = os_atomic_inc_orig(&pipe_unique_id, relaxed); + lck_mtx_init(&pp->pp_mtx, &pipe_mtx_grp, LCK_ATTR_NULL); rpipe->pipe_mtxp = &pp->pp_mtx; wpipe->pipe_mtxp = &pp->pp_mtx; - TAILQ_INIT(&rpipe->pipe_evlist); - TAILQ_INIT(&wpipe->pipe_evlist); - -#ifndef CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) /* Initial times are all the time of creation of the pipe */ pipe_touch(rpipe, PIPE_ATIME | PIPE_MTIME | PIPE_CTIME); pipe_touch(wpipe, PIPE_ATIME | PIPE_MTIME | PIPE_CTIME); @@ -662,7 +643,7 @@ pipepair_alloc(struct pipe **rp_out, struct pipe **wp_out) */ int error = pipespace(rpipe, choose_pipespace(rpipe->pipe_buffer.size, 0)); if (__improbable(error)) { - lck_mtx_destroy(&pp->pp_mtx, pipe_mtx_grp); + lck_mtx_destroy(&pp->pp_mtx, &pipe_mtx_grp); zfree(pipe_zone, pp); return error; } @@ -691,7 +672,7 @@ pipepair_destroy_pipe(struct pipepair *pp, struct pipe *cpipe) lck_mtx_unlock(&pp->pp_mtx); if (can_free) { - lck_mtx_destroy(&pp->pp_mtx, pipe_mtx_grp); + lck_mtx_destroy(&pp->pp_mtx, &pipe_mtx_grp); zfree(pipe_zone, pp); } } @@ -741,8 +722,6 @@ pipeselwakeup(struct pipe *cpipe, struct pipe *spipe) KNOTE(&cpipe->pipe_sel.si_note, 1); - postpipeevent(cpipe, EV_RWBYTES); - if (spipe && (spipe->pipe_state & PIPE_ASYNC) && spipe->pipe_pgid) { if (spipe->pipe_pgid < 0) { gsignal(-spipe->pipe_pgid, SIGIO); @@ -796,10 +775,9 @@ pipe_read(struct fileproc *fp, struct uio *uio, __unused int flags, if (size > rpipe->pipe_buffer.cnt) { size = rpipe->pipe_buffer.cnt; } - // LP64todo - fix this! - if (size > (u_int) uio_resid(uio)) { - size = (u_int) uio_resid(uio); - } + + size = (u_int) MIN(INT_MAX, MIN((user_size_t)size, + (user_size_t)uio_resid(uio))); PIPE_UNLOCK(rpipe); /* we still hold io lock.*/ error = uiomove( @@ -905,7 +883,7 @@ unlocked_error: pipeselwakeup(rpipe, rpipe->pipe_peer); } -#ifndef CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) /* update last read time */ pipe_touch(rpipe, PIPE_ATIME); #endif @@ -924,11 +902,14 @@ pipe_write(struct fileproc *fp, struct uio *uio, __unused int flags, __unused vfs_context_t ctx) { int error = 0; - int orig_resid; + size_t orig_resid; int pipe_size; struct pipe *wpipe, *rpipe; // LP64todo - fix this! - orig_resid = uio_resid(uio); + orig_resid = (size_t)uio_resid(uio); + if (orig_resid > LONG_MAX) { + return EINVAL; + } int space; rpipe = (struct pipe *)fp->f_data; @@ -1013,8 +994,8 @@ retrywrite: if (space > 0) { if ((error = pipeio_lock(wpipe, 1)) == 0) { - int size; /* Transfer size */ - int segsize; /* first segment to transfer */ + size_t size; /* Transfer size */ + size_t segsize; /* first segment to transfer */ if ((wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) || (fileproc_get_vflags(fp) & FPV_DRAIN)) { @@ -1039,7 +1020,10 @@ retrywrite: */ // LP64todo - fix this! if (space > uio_resid(uio)) { - size = uio_resid(uio); + size = (size_t)uio_resid(uio); + if (size > LONG_MAX) { + panic("size greater than LONG_MAX"); + } } else { size = space; } @@ -1060,7 +1044,7 @@ retrywrite: PIPE_UNLOCK(rpipe); error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], - segsize, uio); + (int)segsize, uio); PIPE_LOCK(rpipe); if (error == 0 && segsize < size) { @@ -1078,7 +1062,7 @@ retrywrite: PIPE_UNLOCK(rpipe); error = uiomove( &wpipe->pipe_buffer.buffer[0], - size - segsize, uio); + (int)(size - segsize), uio); PIPE_LOCK(rpipe); } /* @@ -1094,8 +1078,8 @@ retrywrite: panic("Expected " "wraparound bad"); } - wpipe->pipe_buffer.in = size - - segsize; + wpipe->pipe_buffer.in = (unsigned int)(size - + segsize); } wpipe->pipe_buffer.cnt += size; @@ -1173,7 +1157,7 @@ retrywrite: pipeselwakeup(wpipe, wpipe); } -#ifndef CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) /* Update modification, status change (# of bytes in pipe) times */ pipe_touch(rpipe, PIPE_MTIME | PIPE_CTIME); pipe_touch(wpipe, PIPE_MTIME | PIPE_CTIME); @@ -1331,7 +1315,7 @@ pipe_free_kmem(struct pipe *cpipe) if (cpipe->pipe_buffer.buffer != NULL) { OSAddAtomic(-(cpipe->pipe_buffer.size), &amountpipekva); OSAddAtomic(-1, &amountpipes); - kfree(cpipe->pipe_buffer.buffer, + kheap_free(KHEAP_DATA_BUFFERS, cpipe->pipe_buffer.buffer, cpipe->pipe_buffer.size); cpipe->pipe_buffer.buffer = NULL; cpipe->pipe_buffer.size = 0; @@ -1384,11 +1368,8 @@ pipeclose(struct pipe *cpipe) KNOTE(&ppipe->pipe_sel.si_note, 1); - postpipeevent(ppipe, EV_RCLOSED); - ppipe->pipe_peer = NULL; } - evpipefree(cpipe); /* * free resources @@ -1766,15 +1747,15 @@ static int pipe_drain(struct fileproc *fp, __unused vfs_context_t ctx) { /* Note: fdlock already held */ - struct pipe *ppipe, *cpipe = (struct pipe *)(fp->f_fglob->fg_data); + struct pipe *ppipe, *cpipe = (struct pipe *)(fp->fp_glob->fg_data); boolean_t drain_pipe = FALSE; /* Check if the pipe is going away */ - lck_mtx_lock_spin(&fp->f_fglob->fg_lock); - if (fp->f_fglob->fg_count == 1) { + lck_mtx_lock_spin(&fp->fp_glob->fg_lock); + if (os_ref_get_count_raw(&fp->fp_glob->fg_count) == 1) { drain_pipe = TRUE; } - lck_mtx_unlock(&fp->f_fglob->fg_lock); + lck_mtx_unlock(&fp->fp_glob->fg_lock); if (cpipe) { PIPE_LOCK(cpipe); diff --git a/bsd/kern/sys_reason.c b/bsd/kern/sys_reason.c index 70493f974..6a773e495 100644 --- a/bsd/kern/sys_reason.c +++ b/bsd/kern/sys_reason.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -46,48 +46,29 @@ extern int maxproc; /* * Lock group attributes for os_reason subsystem */ -lck_grp_attr_t *os_reason_lock_grp_attr; -lck_grp_t *os_reason_lock_grp; -lck_attr_t *os_reason_lock_attr; +static LCK_GRP_DECLARE(os_reason_lock_grp, "os_reason_lock"); +static ZONE_DECLARE(os_reason_zone, "os reasons", + sizeof(struct os_reason), ZC_ZFREE_CLEARMEM); os_refgrp_decl(static, os_reason_refgrp, "os_reason", NULL); #define OS_REASON_RESERVE_COUNT 100 -#define OS_REASON_MAX_COUNT (maxproc + 100) -static struct zone *os_reason_zone; static int os_reason_alloc_buffer_internal(os_reason_t cur_reason, uint32_t osr_bufsize, - boolean_t can_block); + zalloc_flags_t flags); void -os_reason_init() +os_reason_init(void) { int reasons_allocated = 0; - /* - * Initialize OS reason group and lock attributes - */ - os_reason_lock_grp_attr = lck_grp_attr_alloc_init(); - os_reason_lock_grp = lck_grp_alloc_init("os_reason_lock", os_reason_lock_grp_attr); - os_reason_lock_attr = lck_attr_alloc_init(); - - /* - * Create OS reason zone. - */ - os_reason_zone = zinit(sizeof(struct os_reason), OS_REASON_MAX_COUNT * sizeof(struct os_reason), - OS_REASON_MAX_COUNT, "os reasons"); - if (os_reason_zone == NULL) { - panic("failed to initialize os_reason_zone"); - } - /* * We pre-fill the OS reason zone to reduce the likelihood that * the jetsam thread and others block when they create an exit - * reason. This pre-filled memory is not-collectable since it's - * foreign memory crammed in as part of zfill(). + * reason. */ reasons_allocated = zfill(os_reason_zone, OS_REASON_RESERVE_COUNT); - assert(reasons_allocated > 0); + assert(reasons_allocated >= OS_REASON_RESERVE_COUNT); } /* @@ -103,36 +84,12 @@ os_reason_init() os_reason_t os_reason_create(uint32_t osr_namespace, uint64_t osr_code) { - os_reason_t new_reason = OS_REASON_NULL; - - new_reason = (os_reason_t) zalloc(os_reason_zone); - if (new_reason == OS_REASON_NULL) { -#if OS_REASON_DEBUG - /* - * We rely on OS reasons to communicate important things such - * as process exit reason information, we should be aware - * when issues prevent us from allocating them. - */ - if (os_reason_debug_disabled) { - kprintf("os_reason_create: failed to allocate reason with namespace: %u, code : %llu\n", - osr_namespace, osr_code); - } else { - panic("os_reason_create: failed to allocate reason with namespace: %u, code: %llu\n", - osr_namespace, osr_code); - } -#endif - return new_reason; - } - - bzero(new_reason, sizeof(*new_reason)); + os_reason_t new_reason; + new_reason = zalloc_flags(os_reason_zone, Z_WAITOK | Z_ZERO); new_reason->osr_namespace = osr_namespace; new_reason->osr_code = osr_code; - new_reason->osr_flags = 0; - new_reason->osr_bufsize = 0; - new_reason->osr_kcd_buf = NULL; - - lck_mtx_init(&new_reason->osr_lock, os_reason_lock_grp, os_reason_lock_attr); + lck_mtx_init(&new_reason->osr_lock, &os_reason_lock_grp, LCK_ATTR_NULL); os_ref_init(&new_reason->osr_refcount, &os_reason_refgrp); return new_reason; @@ -145,14 +102,13 @@ os_reason_dealloc_buffer(os_reason_t cur_reason) LCK_MTX_ASSERT(&cur_reason->osr_lock, LCK_MTX_ASSERT_OWNED); if (cur_reason->osr_kcd_buf != NULL && cur_reason->osr_bufsize != 0) { - kfree(cur_reason->osr_kcd_buf, cur_reason->osr_bufsize); + kheap_free(KHEAP_DATA_BUFFERS, cur_reason->osr_kcd_buf, + cur_reason->osr_bufsize); } cur_reason->osr_bufsize = 0; cur_reason->osr_kcd_buf = NULL; bzero(&cur_reason->osr_kcd_descriptor, sizeof(cur_reason->osr_kcd_descriptor)); - - return; } /* @@ -172,7 +128,7 @@ os_reason_dealloc_buffer(os_reason_t cur_reason) int os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize) { - return os_reason_alloc_buffer_internal(cur_reason, osr_bufsize, TRUE); + return os_reason_alloc_buffer_internal(cur_reason, osr_bufsize, Z_WAITOK); } /* @@ -192,12 +148,12 @@ os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize) int os_reason_alloc_buffer_noblock(os_reason_t cur_reason, uint32_t osr_bufsize) { - return os_reason_alloc_buffer_internal(cur_reason, osr_bufsize, FALSE); + return os_reason_alloc_buffer_internal(cur_reason, osr_bufsize, Z_NOWAIT); } static int os_reason_alloc_buffer_internal(os_reason_t cur_reason, uint32_t osr_bufsize, - boolean_t can_block) + zalloc_flags_t flags) { if (cur_reason == OS_REASON_NULL) { return EINVAL; @@ -216,23 +172,20 @@ os_reason_alloc_buffer_internal(os_reason_t cur_reason, uint32_t osr_bufsize, return 0; } - if (can_block) { - cur_reason->osr_kcd_buf = kalloc_tag(osr_bufsize, VM_KERN_MEMORY_REASON); - assert(cur_reason->osr_kcd_buf != NULL); - } else { - cur_reason->osr_kcd_buf = kalloc_noblock_tag(osr_bufsize, VM_KERN_MEMORY_REASON); - if (cur_reason->osr_kcd_buf == NULL) { - lck_mtx_unlock(&cur_reason->osr_lock); - return ENOMEM; - } - } + cur_reason->osr_kcd_buf = kheap_alloc_tag(KHEAP_DATA_BUFFERS, osr_bufsize, + flags | Z_ZERO, VM_KERN_MEMORY_REASON); - bzero(cur_reason->osr_kcd_buf, osr_bufsize); + if (cur_reason->osr_kcd_buf == NULL) { + lck_mtx_unlock(&cur_reason->osr_lock); + return ENOMEM; + } cur_reason->osr_bufsize = osr_bufsize; - if (kcdata_memory_static_init(&cur_reason->osr_kcd_descriptor, (mach_vm_address_t) cur_reason->osr_kcd_buf, - KCDATA_BUFFER_BEGIN_OS_REASON, osr_bufsize, KCFLAG_USE_MEMCOPY) != KERN_SUCCESS) { + if (kcdata_memory_static_init(&cur_reason->osr_kcd_descriptor, + (mach_vm_address_t)cur_reason->osr_kcd_buf, + KCDATA_BUFFER_BEGIN_OS_REASON, osr_bufsize, KCFLAG_USE_MEMCOPY) != + KERN_SUCCESS) { os_reason_dealloc_buffer(cur_reason); lck_mtx_unlock(&cur_reason->osr_lock); @@ -259,8 +212,10 @@ os_reason_get_kcdata_descriptor(os_reason_t cur_reason) return NULL; } - assert(cur_reason->osr_kcd_descriptor.kcd_addr_begin == (mach_vm_address_t) cur_reason->osr_kcd_buf); - if (cur_reason->osr_kcd_descriptor.kcd_addr_begin != (mach_vm_address_t) cur_reason->osr_kcd_buf) { + assert(cur_reason->osr_kcd_descriptor.kcd_addr_begin == + (mach_vm_address_t)cur_reason->osr_kcd_buf); + if (cur_reason->osr_kcd_descriptor.kcd_addr_begin != + (mach_vm_address_t)cur_reason->osr_kcd_buf) { return NULL; } @@ -304,7 +259,7 @@ os_reason_free(os_reason_t cur_reason) os_reason_dealloc_buffer(cur_reason); lck_mtx_unlock(&cur_reason->osr_lock); - lck_mtx_destroy(&cur_reason->osr_lock, os_reason_lock_grp); + lck_mtx_destroy(&cur_reason->osr_lock, &os_reason_lock_grp); zfree(os_reason_zone, cur_reason); } diff --git a/bsd/kern/sys_socket.c b/bsd/kern/sys_socket.c index 53e8f07b5..d3ea4fe65 100644 --- a/bsd/kern/sys_socket.c +++ b/bsd/kern/sys_socket.c @@ -111,33 +111,20 @@ const struct fileops socketops = { /* ARGSUSED */ static int soo_read(struct fileproc *fp, struct uio *uio, __unused int flags, -#if !CONFIG_MACF_SOCKET - __unused -#endif - vfs_context_t ctx) + __unused vfs_context_t ctx) { struct socket *so; int stat; -#if CONFIG_MACF_SOCKET - int error; -#endif int (*fsoreceive)(struct socket *so2, struct sockaddr **paddr, struct uio *uio2, struct mbuf **mp0, struct mbuf **controlp, int *flagsp); - if ((so = (struct socket *)fp->f_fglob->fg_data) == NULL) { + if ((so = (struct socket *)fp->fp_glob->fg_data) == NULL) { /* This is not a valid open file descriptor */ return EBADF; } -#if CONFIG_MACF_SOCKET - error = mac_socket_check_receive(vfs_context_ucred(ctx), so); - if (error) { - return error; - } -#endif /* CONFIG_MACF_SOCKET */ - fsoreceive = so->so_proto->pr_usrreqs->pru_soreceive; stat = (*fsoreceive)(so, 0, uio, 0, 0, 0); @@ -156,23 +143,11 @@ soo_write(struct fileproc *fp, struct uio *uio, __unused int flags, int flags2); proc_t procp; -#if CONFIG_MACF_SOCKET - int error; -#endif - - if ((so = (struct socket *)fp->f_fglob->fg_data) == NULL) { + if ((so = (struct socket *)fp->fp_glob->fg_data) == NULL) { /* This is not a valid open file descriptor */ return EBADF; } -#if CONFIG_MACF_SOCKET - /* JMM - have to fetch the socket's remote addr */ - error = mac_socket_check_send(vfs_context_ucred(ctx), so, NULL); - if (error) { - return error; - } -#endif /* CONFIG_MACF_SOCKET */ - fsosend = so->so_proto->pr_usrreqs->pru_sosend; stat = (*fsosend)(so, 0, uio, 0, 0, 0); @@ -314,7 +289,7 @@ soo_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx) struct socket *so; proc_t procp = vfs_context_proc(ctx); - if ((so = (struct socket *)fp->f_fglob->fg_data) == NULL) { + if ((so = (struct socket *)fp->fp_glob->fg_data) == NULL) { /* This is not a valid open file descriptor */ return EBADF; } @@ -325,7 +300,7 @@ soo_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx) int soo_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) { - struct socket *so = (struct socket *)fp->f_fglob->fg_data; + struct socket *so = (struct socket *)fp->fp_glob->fg_data; int retnum = 0; proc_t procp; @@ -335,13 +310,6 @@ soo_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) procp = vfs_context_proc(ctx); -#if CONFIG_MACF_SOCKET - if (mac_socket_check_select(vfs_context_ucred(ctx), so, which) != 0) { - return 0; - } -#endif /* CONFIG_MACF_SOCKET */ - - socket_lock(so, 1); switch (which) { case FREAD: @@ -457,7 +425,7 @@ static int soo_drain(struct fileproc *fp, __unused vfs_context_t ctx) { int error = 0; - struct socket *so = (struct socket *)fp->f_fglob->fg_data; + struct socket *so = (struct socket *)fp->fp_glob->fg_data; if (so) { socket_lock(so, 1); diff --git a/bsd/kern/sys_ulock.c b/bsd/kern/sys_ulock.c index dce4c3aec..4b36b45fd 100644 --- a/bsd/kern/sys_ulock.c +++ b/bsd/kern/sys_ulock.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -46,7 +46,7 @@ #include #include #include -#include +#include #include #include #include @@ -90,12 +90,12 @@ * relies on that thread to carry the torch for the other waiting threads. */ -static lck_grp_t *ull_lck_grp; +static LCK_GRP_DECLARE(ull_lck_grp, "ulocks"); typedef lck_spin_t ull_lock_t; -#define ull_lock_init(ull) lck_spin_init(&ull->ull_lock, ull_lck_grp, NULL) -#define ull_lock_destroy(ull) lck_spin_destroy(&ull->ull_lock, ull_lck_grp) -#define ull_lock(ull) lck_spin_lock_grp(&ull->ull_lock, ull_lck_grp) +#define ull_lock_init(ull) lck_spin_init(&ull->ull_lock, &ull_lck_grp, NULL) +#define ull_lock_destroy(ull) lck_spin_destroy(&ull->ull_lock, &ull_lck_grp) +#define ull_lock(ull) lck_spin_lock_grp(&ull->ull_lock, &ull_lck_grp) #define ull_unlock(ull) lck_spin_unlock(&ull->ull_lock) #define ull_assert_owned(ull) LCK_SPIN_ASSERT(&ull->ull_lock, LCK_ASSERT_OWNED) #define ull_assert_notwned(ull) LCK_SPIN_ASSERT(&ull->ull_lock, LCK_ASSERT_NOTOWNED) @@ -207,9 +207,9 @@ typedef struct ull_bucket { static int ull_hash_buckets; static ull_bucket_t *ull_bucket; static uint32_t ull_nzalloc = 0; -static zone_t ull_zone; +static ZONE_DECLARE(ull_zone, "ulocks", sizeof(ull_t), ZC_NOENCRYPT | ZC_CACHING); -#define ull_bucket_lock(i) lck_spin_lock_grp(&ull_bucket[i].ulb_lock, ull_lck_grp) +#define ull_bucket_lock(i) lck_spin_lock_grp(&ull_bucket[i].ulb_lock, &ull_lck_grp) #define ull_bucket_unlock(i) lck_spin_unlock(&ull_bucket[i].ulb_lock) static __inline__ uint32_t @@ -227,8 +227,6 @@ ull_hash_index(const void *key, size_t length) void ulock_initialize(void) { - ull_lck_grp = lck_grp_alloc_init("ulocks", NULL); - assert(thread_max > 16); /* Size ull_hash_buckets based on thread_max. * Round up to nearest power of 2, then divide by 4 @@ -238,20 +236,14 @@ ulock_initialize(void) kprintf("%s>thread_max=%d, ull_hash_buckets=%d\n", __FUNCTION__, thread_max, ull_hash_buckets); assert(ull_hash_buckets >= thread_max / 4); - ull_bucket = (ull_bucket_t *)kalloc(sizeof(ull_bucket_t) * ull_hash_buckets); + ull_bucket = zalloc_permanent(sizeof(ull_bucket_t) * ull_hash_buckets, + ZALIGN_PTR); assert(ull_bucket != NULL); for (int i = 0; i < ull_hash_buckets; i++) { queue_init(&ull_bucket[i].ulb_head); - lck_spin_init(&ull_bucket[i].ulb_lock, ull_lck_grp, NULL); + lck_spin_init(&ull_bucket[i].ulb_lock, &ull_lck_grp, NULL); } - - ull_zone = zinit(sizeof(ull_t), - thread_max * sizeof(ull_t), - 0, "ulocks"); - - zone_change(ull_zone, Z_NOENCRYPT, TRUE); - zone_change(ull_zone, Z_CACHING_ENABLED, TRUE); } #if DEVELOPMENT || DEBUG @@ -470,7 +462,21 @@ ulock_resolve_owner(uint32_t value, thread_t *owner) int ulock_wait(struct proc *p, struct ulock_wait_args *args, int32_t *retval) { - uint opcode = args->operation & UL_OPCODE_MASK; + struct ulock_wait2_args args2; + + args2.operation = args->operation; + args2.addr = args->addr; + args2.value = args->value; + args2.timeout = (uint64_t)(args->timeout) * NSEC_PER_USEC; + args2.value2 = 0; + + return ulock_wait2(p, &args2, retval); +} + +int +ulock_wait2(struct proc *p, struct ulock_wait2_args *args, int32_t *retval) +{ + uint8_t opcode = (uint8_t)(args->operation & UL_OPCODE_MASK); uint flags = args->operation & UL_FLAGS_MASK; if (flags & ULF_WAIT_CANCEL_POINT) { @@ -642,7 +648,7 @@ ulock_wait(struct proc *p, struct ulock_wait_args *args, int32_t *retval) if (set_owner) { if (owner_thread == THREAD_NULL) { - ret = ulock_resolve_owner(args->value, &owner_thread); + ret = ulock_resolve_owner((uint32_t)args->value, &owner_thread); if (ret == EOWNERDEAD) { /* * Translation failed - even though the lock value is up to date, @@ -680,7 +686,7 @@ ulock_wait(struct proc *p, struct ulock_wait_args *args, int32_t *retval) } wait_result_t wr; - uint32_t timeout = args->timeout; + uint64_t timeout = args->timeout; /* nanoseconds */ uint64_t deadline = TIMEOUT_WAIT_FOREVER; wait_interrupt_t interruptible = THREAD_ABORTSAFE; struct turnstile *ts; @@ -694,7 +700,7 @@ ulock_wait(struct proc *p, struct ulock_wait_args *args, int32_t *retval) } if (timeout) { - clock_interval_to_deadline(timeout, NSEC_PER_USEC, &deadline); + nanoseconds_to_deadline(timeout, &deadline); } turnstile_update_inheritor(ts, owner_thread, @@ -703,6 +709,15 @@ ulock_wait(struct proc *p, struct ulock_wait_args *args, int32_t *retval) wr = waitq_assert_wait64(&ts->ts_waitq, CAST_EVENT64_T(ULOCK_TO_EVENT(ull)), interruptible, deadline); + if (wr == THREAD_WAITING) { + uthread_t uthread = (uthread_t)get_bsdthread_info(self); + uthread->uu_save.uus_ulock_wait_data.ull = ull; + uthread->uu_save.uus_ulock_wait_data.retval = retval; + uthread->uu_save.uus_ulock_wait_data.flags = flags; + uthread->uu_save.uus_ulock_wait_data.owner_thread = owner_thread; + uthread->uu_save.uus_ulock_wait_data.old_owner = old_owner; + } + ull_unlock(ull); if (unused_ull) { @@ -713,13 +728,8 @@ ulock_wait(struct proc *p, struct ulock_wait_args *args, int32_t *retval) turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD); if (wr == THREAD_WAITING) { - uthread_t uthread = (uthread_t)get_bsdthread_info(self); - uthread->uu_save.uus_ulock_wait_data.retval = retval; - uthread->uu_save.uus_ulock_wait_data.flags = flags; - uthread->uu_save.uus_ulock_wait_data.owner_thread = owner_thread; - uthread->uu_save.uus_ulock_wait_data.old_owner = old_owner; if (set_owner && owner_thread != THREAD_NULL) { - thread_handoff_parameter(owner_thread, ulock_wait_continue, ull); + thread_handoff_parameter(owner_thread, ulock_wait_continue, ull, THREAD_HANDOFF_NONE); } else { assert(owner_thread == THREAD_NULL); thread_block_parameter(ulock_wait_continue, ull); @@ -803,13 +813,13 @@ ulock_wait_cleanup(ull_t *ull, thread_t owner_thread, thread_t old_owner, int32_ __attribute__((noreturn)) static void -ulock_wait_continue(void * parameter, wait_result_t wr) +ulock_wait_continue(__unused void * parameter, wait_result_t wr) { thread_t self = current_thread(); uthread_t uthread = (uthread_t)get_bsdthread_info(self); int ret = 0; - ull_t *ull = (ull_t *)parameter; + ull_t *ull = uthread->uu_save.uus_ulock_wait_data.ull; int32_t *retval = uthread->uu_save.uus_ulock_wait_data.retval; uint flags = uthread->uu_save.uus_ulock_wait_data.flags; thread_t owner_thread = uthread->uu_save.uus_ulock_wait_data.owner_thread; @@ -833,7 +843,7 @@ ulock_wait_continue(void * parameter, wait_result_t wr) int ulock_wake(struct proc *p, struct ulock_wake_args *args, __unused int32_t *retval) { - uint opcode = args->operation & UL_OPCODE_MASK; + uint8_t opcode = (uint8_t)(args->operation & UL_OPCODE_MASK); uint flags = args->operation & UL_FLAGS_MASK; int ret = 0; ulk_t key; @@ -855,6 +865,7 @@ ulock_wake(struct proc *p, struct ulock_wake_args *args, __unused int32_t *retva #endif bool set_owner = false; + bool allow_non_owner = false; bool xproc = false; switch (opcode) { @@ -883,6 +894,15 @@ ulock_wake(struct proc *p, struct ulock_wake_args *args, __unused int32_t *retva goto munge_retval; } + if (flags & ULF_WAKE_ALLOW_NON_OWNER) { + if (!set_owner) { + ret = EINVAL; + goto munge_retval; + } + + allow_non_owner = true; + } + if (args->addr == 0) { ret = EINVAL; goto munge_retval; @@ -934,7 +954,7 @@ ulock_wake(struct proc *p, struct ulock_wake_args *args, __unused int32_t *retva } if (set_owner) { - if (ull->ull_owner != current_thread()) { + if ((ull->ull_owner != current_thread()) && !allow_non_owner) { /* * If the current thread isn't the known owner, * then this wake call was late to the party, @@ -1018,7 +1038,8 @@ void kdp_ulock_find_owner(__unused struct waitq * waitq, event64_t event, thread_waitinfo_t * waitinfo) { ull_t *ull = EVENT_TO_ULOCK(event); - assert(kdp_is_in_zone(ull, "ulocks")); + + zone_require(ull_zone, ull); switch (ull->ull_opcode) { case UL_UNFAIR_LOCK: diff --git a/bsd/kern/sys_work_interval.c b/bsd/kern/sys_work_interval.c index 203086a1b..c68f6ccfd 100644 --- a/bsd/kern/sys_work_interval.c +++ b/bsd/kern/sys_work_interval.c @@ -49,6 +49,7 @@ work_interval_ctl(__unused proc_t p, struct work_interval_ctl_args *uap, struct work_interval_create_params create_params; struct kern_work_interval_create_args create_args; + mach_port_name_t port_name; switch (operation) { case WORK_INTERVAL_OPERATION_CREATE: @@ -103,6 +104,29 @@ work_interval_ctl(__unused proc_t p, struct work_interval_ctl_args *uap, return error; } break; + case WORK_INTERVAL_OPERATION_GET_FLAGS: + if (uap->arg == USER_ADDR_NULL || uap->len < sizeof(create_params)) { + return EINVAL; + } + + port_name = (mach_port_name_t) uap->work_interval_id; + if (!MACH_PORT_VALID(port_name)) { + return EINVAL; + } + + create_params = (struct work_interval_create_params) { + .wicp_port = port_name + }; + + kret = kern_work_interval_get_flags_from_port(port_name, &create_params.wicp_create_flags); + if (kret != KERN_SUCCESS) { + return EINVAL; + } + + if ((error = copyout(&create_params, uap->arg, sizeof(create_params)))) { + return error; + } + break; case WORK_INTERVAL_OPERATION_DESTROY: if (uap->arg != USER_ADDR_NULL || uap->work_interval_id == 0) { return EINVAL; diff --git a/bsd/kern/syscalls.master b/bsd/kern/syscalls.master index 240bae020..c2802385f 100644 --- a/bsd/kern/syscalls.master +++ b/bsd/kern/syscalls.master @@ -1,11 +1,12 @@ +;/* ; derived from: FreeBSD @(#)syscalls.master 8.2 (Berkeley) 1/13/94 ; ; System call name/number master file. ; This is file processed by .../xnu/bsd/kern/makesyscalls.sh and creates: -; .../xnu/bsd/kern/init_sysent.c -; .../xnu/bsd/kern/syscalls.c -; .../xnu/bsd/sys/syscall.h -; .../xnu/bsd/sys/sysproto.h +; .../xnu/bsd/kern/init_sysent.c +; .../xnu/bsd/kern/syscalls.c +; .../xnu/bsd/sys/syscall.h +; .../xnu/bsd/sys/sysproto.h ; .../xnu/bsd/security/audit_syscalls.c ; Columns -> | Number Audit Files | { Name and Args } | { Comments } @@ -30,6 +31,7 @@ ; #ifdef's, #include's, #if's etc. are copied to all output files. ; N.B.: makesyscalls.sh and createsyscalls.pl must be updated to account ; for any new argument types. +;*/ #include #include @@ -40,39 +42,39 @@ #include 0 AUE_NULL ALL { int nosys(void); } { indirect syscall } -1 AUE_EXIT ALL { void exit(int rval) NO_SYSCALL_STUB; } -2 AUE_FORK ALL { int fork(void) NO_SYSCALL_STUB; } -3 AUE_NULL ALL { user_ssize_t read(int fd, user_addr_t cbuf, user_size_t nbyte); } -4 AUE_NULL ALL { user_ssize_t write(int fd, user_addr_t cbuf, user_size_t nbyte); } -5 AUE_OPEN_RWTC ALL { int open(user_addr_t path, int flags, int mode) NO_SYSCALL_STUB; } -6 AUE_CLOSE ALL { int close(int fd); } -7 AUE_WAIT4 ALL { int wait4(int pid, user_addr_t status, int options, user_addr_t rusage) NO_SYSCALL_STUB; } +1 AUE_EXIT ALL { void exit(int rval) NO_SYSCALL_STUB; } +2 AUE_FORK ALL { int fork(void) NO_SYSCALL_STUB; } +3 AUE_NULL ALL { user_ssize_t read(int fd, user_addr_t cbuf, user_size_t nbyte); } +4 AUE_NULL ALL { user_ssize_t write(int fd, user_addr_t cbuf, user_size_t nbyte); } +5 AUE_OPEN_RWTC ALL { int open(user_addr_t path, int flags, int mode) NO_SYSCALL_STUB; } +6 AUE_CLOSE ALL { int sys_close(int fd); } +7 AUE_WAIT4 ALL { int wait4(int pid, user_addr_t status, int options, user_addr_t rusage) NO_SYSCALL_STUB; } 8 AUE_NULL ALL { int enosys(void); } { old creat } -9 AUE_LINK ALL { int link(user_addr_t path, user_addr_t link); } -10 AUE_UNLINK ALL { int unlink(user_addr_t path) NO_SYSCALL_STUB; } +9 AUE_LINK ALL { int link(user_addr_t path, user_addr_t link); } +10 AUE_UNLINK ALL { int unlink(user_addr_t path) NO_SYSCALL_STUB; } 11 AUE_NULL ALL { int enosys(void); } { old execv } -12 AUE_CHDIR ALL { int chdir(user_addr_t path); } -13 AUE_FCHDIR ALL { int fchdir(int fd); } -14 AUE_MKNOD ALL { int mknod(user_addr_t path, int mode, int dev); } -15 AUE_CHMOD ALL { int chmod(user_addr_t path, int mode) NO_SYSCALL_STUB; } -16 AUE_CHOWN ALL { int chown(user_addr_t path, int uid, int gid); } +12 AUE_CHDIR ALL { int chdir(user_addr_t path); } +13 AUE_FCHDIR ALL { int fchdir(int fd); } +14 AUE_MKNOD ALL { int mknod(user_addr_t path, int mode, int dev); } +15 AUE_CHMOD ALL { int chmod(user_addr_t path, int mode) NO_SYSCALL_STUB; } +16 AUE_CHOWN ALL { int chown(user_addr_t path, int uid, int gid); } 17 AUE_NULL ALL { int enosys(void); } { old break } -18 AUE_GETFSSTAT ALL { int getfsstat(user_addr_t buf, int bufsize, int flags); } +18 AUE_GETFSSTAT ALL { int getfsstat(user_addr_t buf, int bufsize, int flags); } 19 AUE_NULL ALL { int enosys(void); } { old lseek } -20 AUE_GETPID ALL { int getpid(void); } +20 AUE_GETPID ALL { int getpid(void); } 21 AUE_NULL ALL { int enosys(void); } { old mount } 22 AUE_NULL ALL { int enosys(void); } { old umount } -23 AUE_SETUID ALL { int setuid(uid_t uid); } -24 AUE_GETUID ALL { int getuid(void); } -25 AUE_GETEUID ALL { int geteuid(void); } -26 AUE_PTRACE ALL { int ptrace(int req, pid_t pid, caddr_t addr, int data); } +23 AUE_SETUID ALL { int setuid(uid_t uid); } +24 AUE_GETUID ALL { int getuid(void); } +25 AUE_GETEUID ALL { int geteuid(void); } +26 AUE_PTRACE ALL { int ptrace(int req, pid_t pid, caddr_t addr, int data); } #if SOCKETS -27 AUE_RECVMSG ALL { int recvmsg(int s, struct msghdr *msg, int flags) NO_SYSCALL_STUB; } -28 AUE_SENDMSG ALL { int sendmsg(int s, caddr_t msg, int flags) NO_SYSCALL_STUB; } -29 AUE_RECVFROM ALL { int recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, int *fromlenaddr) NO_SYSCALL_STUB; } -30 AUE_ACCEPT ALL { int accept(int s, caddr_t name, socklen_t *anamelen) NO_SYSCALL_STUB; } -31 AUE_GETPEERNAME ALL { int getpeername(int fdes, caddr_t asa, socklen_t *alen) NO_SYSCALL_STUB; } -32 AUE_GETSOCKNAME ALL { int getsockname(int fdes, caddr_t asa, socklen_t *alen) NO_SYSCALL_STUB; } +27 AUE_RECVMSG ALL { int recvmsg(int s, struct msghdr *msg, int flags) NO_SYSCALL_STUB; } +28 AUE_SENDMSG ALL { int sendmsg(int s, caddr_t msg, int flags) NO_SYSCALL_STUB; } +29 AUE_RECVFROM ALL { int recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, int *fromlenaddr) NO_SYSCALL_STUB; } +30 AUE_ACCEPT ALL { int accept(int s, caddr_t name, socklen_t *anamelen) NO_SYSCALL_STUB; } +31 AUE_GETPEERNAME ALL { int getpeername(int fdes, caddr_t asa, socklen_t *alen) NO_SYSCALL_STUB; } +32 AUE_GETSOCKNAME ALL { int getsockname(int fdes, caddr_t asa, socklen_t *alen) NO_SYSCALL_STUB; } #else 27 AUE_NULL ALL { int nosys(void); } 28 AUE_NULL ALL { int nosys(void); } @@ -81,86 +83,86 @@ 31 AUE_NULL ALL { int nosys(void); } 32 AUE_NULL ALL { int nosys(void); } #endif /* SOCKETS */ -33 AUE_ACCESS ALL { int access(user_addr_t path, int flags); } -34 AUE_CHFLAGS ALL { int chflags(char *path, int flags); } -35 AUE_FCHFLAGS ALL { int fchflags(int fd, int flags); } -36 AUE_SYNC ALL { int sync(void); } -37 AUE_KILL ALL { int kill(int pid, int signum, int posix) NO_SYSCALL_STUB; } +33 AUE_ACCESS ALL { int access(user_addr_t path, int flags); } +34 AUE_CHFLAGS ALL { int chflags(char *path, int flags); } +35 AUE_FCHFLAGS ALL { int fchflags(int fd, int flags); } +36 AUE_SYNC ALL { int sync(void); } +37 AUE_KILL ALL { int kill(int pid, int signum, int posix) NO_SYSCALL_STUB; } 38 AUE_NULL ALL { int nosys(void); } { old stat } -39 AUE_GETPPID ALL { int getppid(void); } +39 AUE_GETPPID ALL { int getppid(void); } 40 AUE_NULL ALL { int nosys(void); } { old lstat } -41 AUE_DUP ALL { int dup(u_int fd); } -42 AUE_PIPE ALL { int pipe(void); } -43 AUE_GETEGID ALL { int getegid(void); } +41 AUE_DUP ALL { int sys_dup(u_int fd); } +42 AUE_PIPE ALL { int pipe(void); } +43 AUE_GETEGID ALL { int getegid(void); } 44 AUE_NULL ALL { int nosys(void); } { old profil } 45 AUE_NULL ALL { int nosys(void); } { old ktrace } -46 AUE_SIGACTION ALL { int sigaction(int signum, struct __sigaction *nsa, struct sigaction *osa) NO_SYSCALL_STUB; } -47 AUE_GETGID ALL { int getgid(void); } -48 AUE_SIGPROCMASK ALL { int sigprocmask(int how, user_addr_t mask, user_addr_t omask); } -49 AUE_GETLOGIN ALL { int getlogin(char *namebuf, u_int namelen) NO_SYSCALL_STUB; } -50 AUE_SETLOGIN ALL { int setlogin(char *namebuf) NO_SYSCALL_STUB; } -51 AUE_ACCT ALL { int acct(char *path); } -52 AUE_SIGPENDING ALL { int sigpending(struct sigvec *osv); } -53 AUE_SIGALTSTACK ALL { int sigaltstack(struct sigaltstack *nss, struct sigaltstack *oss) NO_SYSCALL_STUB ; } -54 AUE_IOCTL ALL { int ioctl(int fd, u_long com, caddr_t data) NO_SYSCALL_STUB; } +46 AUE_SIGACTION ALL { int sigaction(int signum, struct __sigaction *nsa, struct sigaction *osa) NO_SYSCALL_STUB; } +47 AUE_GETGID ALL { int getgid(void); } +48 AUE_SIGPROCMASK ALL { int sigprocmask(int how, user_addr_t mask, user_addr_t omask); } +49 AUE_GETLOGIN ALL { int getlogin(char *namebuf, u_int namelen) NO_SYSCALL_STUB; } +50 AUE_SETLOGIN ALL { int setlogin(char *namebuf) NO_SYSCALL_STUB; } +51 AUE_ACCT ALL { int acct(char *path); } +52 AUE_SIGPENDING ALL { int sigpending(struct sigvec *osv); } +53 AUE_SIGALTSTACK ALL { int sigaltstack(struct sigaltstack *nss, struct sigaltstack *oss) NO_SYSCALL_STUB ; } +54 AUE_IOCTL ALL { int ioctl(int fd, u_long com, caddr_t data) NO_SYSCALL_STUB; } 55 AUE_REBOOT ALL { int reboot(int opt, char *msg) NO_SYSCALL_STUB; } -56 AUE_REVOKE ALL { int revoke(char *path); } -57 AUE_SYMLINK ALL { int symlink(char *path, char *link); } -58 AUE_READLINK ALL { int readlink(char *path, char *buf, int count); } -59 AUE_EXECVE ALL { int execve(char *fname, char **argp, char **envp); } -60 AUE_UMASK ALL { int umask(int newmask); } -61 AUE_CHROOT ALL { int chroot(user_addr_t path); } +56 AUE_REVOKE ALL { int revoke(char *path); } +57 AUE_SYMLINK ALL { int symlink(char *path, char *link); } +58 AUE_READLINK ALL { int readlink(char *path, char *buf, int count); } +59 AUE_EXECVE ALL { int execve(char *fname, char **argp, char **envp); } +60 AUE_UMASK ALL { int umask(int newmask); } +61 AUE_CHROOT ALL { int chroot(user_addr_t path); } 62 AUE_NULL ALL { int nosys(void); } { old fstat } 63 AUE_NULL ALL { int nosys(void); } { used internally and reserved } 64 AUE_NULL ALL { int nosys(void); } { old getpagesize } -65 AUE_MSYNC ALL { int msync(caddr_t addr, size_t len, int flags) NO_SYSCALL_STUB; } -66 AUE_VFORK ALL { int vfork(void); } +65 AUE_MSYNC ALL { int msync(caddr_t addr, size_t len, int flags) NO_SYSCALL_STUB; } +66 AUE_VFORK ALL { int vfork(void); } 67 AUE_NULL ALL { int nosys(void); } { old vread } 68 AUE_NULL ALL { int nosys(void); } { old vwrite } 69 AUE_NULL ALL { int nosys(void); } { old sbrk } -70 AUE_NULL ALL { int nosys(void); } { old sstk } +70 AUE_NULL ALL { int nosys(void); } { old sstk } 71 AUE_NULL ALL { int nosys(void); } { old mmap } 72 AUE_NULL ALL { int nosys(void); } { old vadvise } -73 AUE_MUNMAP ALL { int munmap(caddr_t addr, size_t len) NO_SYSCALL_STUB; } -74 AUE_MPROTECT ALL { int mprotect(caddr_t addr, size_t len, int prot) NO_SYSCALL_STUB; } -75 AUE_MADVISE ALL { int madvise(caddr_t addr, size_t len, int behav); } +73 AUE_MUNMAP ALL { int munmap(caddr_t addr, size_t len) NO_SYSCALL_STUB; } +74 AUE_MPROTECT ALL { int mprotect(caddr_t addr, size_t len, int prot) NO_SYSCALL_STUB; } +75 AUE_MADVISE ALL { int madvise(caddr_t addr, size_t len, int behav); } 76 AUE_NULL ALL { int nosys(void); } { old vhangup } 77 AUE_NULL ALL { int nosys(void); } { old vlimit } -78 AUE_MINCORE ALL { int mincore(user_addr_t addr, user_size_t len, user_addr_t vec); } -79 AUE_GETGROUPS ALL { int getgroups(u_int gidsetsize, gid_t *gidset); } -80 AUE_SETGROUPS ALL { int setgroups(u_int gidsetsize, gid_t *gidset); } -81 AUE_GETPGRP ALL { int getpgrp(void); } -82 AUE_SETPGRP ALL { int setpgid(int pid, int pgid); } -83 AUE_SETITIMER ALL { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } +78 AUE_MINCORE ALL { int mincore(user_addr_t addr, user_size_t len, user_addr_t vec); } +79 AUE_GETGROUPS ALL { int getgroups(u_int gidsetsize, gid_t *gidset); } +80 AUE_SETGROUPS ALL { int setgroups(u_int gidsetsize, gid_t *gidset); } +81 AUE_GETPGRP ALL { int getpgrp(void); } +82 AUE_SETPGRP ALL { int setpgid(int pid, int pgid); } +83 AUE_SETITIMER ALL { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } 84 AUE_NULL ALL { int nosys(void); } { old wait } -85 AUE_SWAPON ALL { int swapon(void); } -86 AUE_GETITIMER ALL { int getitimer(u_int which, struct itimerval *itv); } +85 AUE_SWAPON ALL { int swapon(void); } +86 AUE_GETITIMER ALL { int getitimer(u_int which, struct itimerval *itv); } 87 AUE_NULL ALL { int nosys(void); } { old gethostname } 88 AUE_NULL ALL { int nosys(void); } { old sethostname } -89 AUE_GETDTABLESIZE ALL { int getdtablesize(void); } -90 AUE_DUP2 ALL { int dup2(u_int from, u_int to); } +89 AUE_GETDTABLESIZE ALL { int sys_getdtablesize(void); } +90 AUE_DUP2 ALL { int sys_dup2(u_int from, u_int to); } 91 AUE_NULL ALL { int nosys(void); } { old getdopt } -92 AUE_FCNTL ALL { int fcntl(int fd, int cmd, long arg) NO_SYSCALL_STUB; } -93 AUE_SELECT ALL { int select(int nd, u_int32_t *in, u_int32_t *ou, u_int32_t *ex, struct timeval *tv) NO_SYSCALL_STUB; } +92 AUE_FCNTL ALL { int sys_fcntl(int fd, int cmd, long arg) NO_SYSCALL_STUB; } +93 AUE_SELECT ALL { int select(int nd, u_int32_t *in, u_int32_t *ou, u_int32_t *ex, struct timeval *tv) NO_SYSCALL_STUB; } 94 AUE_NULL ALL { int nosys(void); } { old setdopt } -95 AUE_FSYNC ALL { int fsync(int fd); } +95 AUE_FSYNC ALL { int fsync(int fd); } 96 AUE_SETPRIORITY ALL { int setpriority(int which, id_t who, int prio) NO_SYSCALL_STUB; } #if SOCKETS -97 AUE_SOCKET ALL { int socket(int domain, int type, int protocol); } -98 AUE_CONNECT ALL { int connect(int s, caddr_t name, socklen_t namelen) NO_SYSCALL_STUB; } +97 AUE_SOCKET ALL { int socket(int domain, int type, int protocol); } +98 AUE_CONNECT ALL { int connect(int s, caddr_t name, socklen_t namelen) NO_SYSCALL_STUB; } #else 97 AUE_NULL ALL { int nosys(void); } 98 AUE_NULL ALL { int nosys(void); } #endif /* SOCKETS */ 99 AUE_NULL ALL { int nosys(void); } { old accept } -100 AUE_GETPRIORITY ALL { int getpriority(int which, id_t who); } +100 AUE_GETPRIORITY ALL { int getpriority(int which, id_t who); } 101 AUE_NULL ALL { int nosys(void); } { old send } 102 AUE_NULL ALL { int nosys(void); } { old recv } 103 AUE_NULL ALL { int nosys(void); } { old sigreturn } #if SOCKETS -104 AUE_BIND ALL { int bind(int s, caddr_t name, socklen_t namelen) NO_SYSCALL_STUB; } -105 AUE_SETSOCKOPT ALL { int setsockopt(int s, int level, int name, caddr_t val, socklen_t valsize); } -106 AUE_LISTEN ALL { int listen(int s, int backlog) NO_SYSCALL_STUB; } +104 AUE_BIND ALL { int bind(int s, caddr_t name, socklen_t namelen) NO_SYSCALL_STUB; } +105 AUE_SETSOCKOPT ALL { int setsockopt(int s, int level, int name, caddr_t val, socklen_t valsize); } +106 AUE_LISTEN ALL { int listen(int s, int backlog) NO_SYSCALL_STUB; } #else 104 AUE_NULL ALL { int nosys(void); } 105 AUE_NULL ALL { int nosys(void); } @@ -170,7 +172,7 @@ 108 AUE_NULL ALL { int nosys(void); } { old sigvec } 109 AUE_NULL ALL { int nosys(void); } { old sigblock } 110 AUE_NULL ALL { int nosys(void); } { old sigsetmask } -111 AUE_NULL ALL { int sigsuspend(sigset_t mask) NO_SYSCALL_STUB; } +111 AUE_NULL ALL { int sigsuspend(sigset_t mask) NO_SYSCALL_STUB; } 112 AUE_NULL ALL { int nosys(void); } { old sigstack } #if SOCKETS 113 AUE_NULL ALL { int nosys(void); } { old recvmsg } @@ -181,54 +183,54 @@ #endif /* SOCKETS */ 115 AUE_NULL ALL { int nosys(void); } { old vtrace } 116 AUE_GETTIMEOFDAY ALL { int gettimeofday(struct timeval *tp, struct timezone *tzp, uint64_t *mach_absolute_time) NO_SYSCALL_STUB; } -117 AUE_GETRUSAGE ALL { int getrusage(int who, struct rusage *rusage); } +117 AUE_GETRUSAGE ALL { int getrusage(int who, struct rusage *rusage); } #if SOCKETS -118 AUE_GETSOCKOPT ALL { int getsockopt(int s, int level, int name, caddr_t val, socklen_t *avalsize); } +118 AUE_GETSOCKOPT ALL { int getsockopt(int s, int level, int name, caddr_t val, socklen_t *avalsize); } #else 118 AUE_NULL ALL { int nosys(void); } #endif /* SOCKETS */ 119 AUE_NULL ALL { int nosys(void); } { old resuba } -120 AUE_READV ALL { user_ssize_t readv(int fd, struct iovec *iovp, u_int iovcnt); } -121 AUE_WRITEV ALL { user_ssize_t writev(int fd, struct iovec *iovp, u_int iovcnt); } -122 AUE_SETTIMEOFDAY ALL { int settimeofday(struct timeval *tv, struct timezone *tzp) NO_SYSCALL_STUB; } -123 AUE_FCHOWN ALL { int fchown(int fd, int uid, int gid); } -124 AUE_FCHMOD ALL { int fchmod(int fd, int mode) NO_SYSCALL_STUB; } +120 AUE_READV ALL { user_ssize_t readv(int fd, struct iovec *iovp, u_int iovcnt); } +121 AUE_WRITEV ALL { user_ssize_t writev(int fd, struct iovec *iovp, u_int iovcnt); } +122 AUE_SETTIMEOFDAY ALL { int settimeofday(struct timeval *tv, struct timezone *tzp) NO_SYSCALL_STUB; } +123 AUE_FCHOWN ALL { int fchown(int fd, int uid, int gid); } +124 AUE_FCHMOD ALL { int fchmod(int fd, int mode) NO_SYSCALL_STUB; } 125 AUE_NULL ALL { int nosys(void); } { old recvfrom } 126 AUE_SETREUID ALL { int setreuid(uid_t ruid, uid_t euid) NO_SYSCALL_STUB; } 127 AUE_SETREGID ALL { int setregid(gid_t rgid, gid_t egid) NO_SYSCALL_STUB; } -128 AUE_RENAME ALL { int rename(char *from, char *to) NO_SYSCALL_STUB; } +128 AUE_RENAME ALL { int rename(char *from, char *to) NO_SYSCALL_STUB; } 129 AUE_NULL ALL { int nosys(void); } { old truncate } 130 AUE_NULL ALL { int nosys(void); } { old ftruncate } -131 AUE_FLOCK ALL { int flock(int fd, int how); } -132 AUE_MKFIFO ALL { int mkfifo(user_addr_t path, int mode); } +131 AUE_FLOCK ALL { int sys_flock(int fd, int how); } +132 AUE_MKFIFO ALL { int mkfifo(user_addr_t path, int mode); } #if SOCKETS -133 AUE_SENDTO ALL { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, socklen_t tolen) NO_SYSCALL_STUB; } -134 AUE_SHUTDOWN ALL { int shutdown(int s, int how); } -135 AUE_SOCKETPAIR ALL { int socketpair(int domain, int type, int protocol, int *rsv) NO_SYSCALL_STUB; } +133 AUE_SENDTO ALL { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, socklen_t tolen) NO_SYSCALL_STUB; } +134 AUE_SHUTDOWN ALL { int shutdown(int s, int how); } +135 AUE_SOCKETPAIR ALL { int socketpair(int domain, int type, int protocol, int *rsv) NO_SYSCALL_STUB; } #else 133 AUE_NULL ALL { int nosys(void); } 134 AUE_NULL ALL { int nosys(void); } 135 AUE_NULL ALL { int nosys(void); } #endif /* SOCKETS */ -136 AUE_MKDIR ALL { int mkdir(user_addr_t path, int mode); } -137 AUE_RMDIR ALL { int rmdir(char *path) NO_SYSCALL_STUB; } -138 AUE_UTIMES ALL { int utimes(char *path, struct timeval *tptr); } -139 AUE_FUTIMES ALL { int futimes(int fd, struct timeval *tptr); } -140 AUE_ADJTIME ALL { int adjtime(struct timeval *delta, struct timeval *olddelta); } +136 AUE_MKDIR ALL { int mkdir(user_addr_t path, int mode); } +137 AUE_RMDIR ALL { int rmdir(char *path) NO_SYSCALL_STUB; } +138 AUE_UTIMES ALL { int utimes(char *path, struct timeval *tptr); } +139 AUE_FUTIMES ALL { int futimes(int fd, struct timeval *tptr); } +140 AUE_ADJTIME ALL { int adjtime(struct timeval *delta, struct timeval *olddelta); } 141 AUE_NULL ALL { int nosys(void); } { old getpeername } 142 AUE_SYSCTL ALL { int gethostuuid(unsigned char *uuid_buf, const struct timespec *timeoutp) NO_SYSCALL_STUB; } 143 AUE_NULL ALL { int nosys(void); } { old sethostid } 144 AUE_NULL ALL { int nosys(void); } { old getrlimit } 145 AUE_NULL ALL { int nosys(void); } { old setrlimit } 146 AUE_NULL ALL { int nosys(void); } { old killpg } -147 AUE_SETSID ALL { int setsid(void); } +147 AUE_SETSID ALL { int setsid(void); } 148 AUE_NULL ALL { int nosys(void); } { old setquota } 149 AUE_NULL ALL { int nosys(void); } { old qquota } 150 AUE_NULL ALL { int nosys(void); } { old getsockname } -151 AUE_GETPGID ALL { int getpgid(pid_t pid); } -152 AUE_SETPRIVEXEC ALL { int setprivexec(int flag); } -153 AUE_PREAD ALL { user_ssize_t pread(int fd, user_addr_t buf, user_size_t nbyte, off_t offset); } -154 AUE_PWRITE ALL { user_ssize_t pwrite(int fd, user_addr_t buf, user_size_t nbyte, off_t offset); } +151 AUE_GETPGID ALL { int getpgid(pid_t pid); } +152 AUE_SETPRIVEXEC ALL { int setprivexec(int flag); } +153 AUE_PREAD ALL { user_ssize_t pread(int fd, user_addr_t buf, user_size_t nbyte, off_t offset); } +154 AUE_PWRITE ALL { user_ssize_t pwrite(int fd, user_addr_t buf, user_size_t nbyte, off_t offset); } #if NFSSERVER /* XXX */ 155 AUE_NFS_SVC ALL { int nfssvc(int flag, caddr_t argp); } @@ -237,9 +239,9 @@ #endif 156 AUE_NULL ALL { int nosys(void); } { old getdirentries } -157 AUE_STATFS ALL { int statfs(char *path, struct statfs *buf); } -158 AUE_FSTATFS ALL { int fstatfs(int fd, struct statfs *buf); } -159 AUE_UNMOUNT ALL { int unmount(user_addr_t path, int flags); } +157 AUE_STATFS ALL { int statfs(char *path, struct statfs *buf); } +158 AUE_FSTATFS ALL { int fstatfs(int fd, struct statfs *buf); } +159 AUE_UNMOUNT ALL { int unmount(user_addr_t path, int flags); } 160 AUE_NULL ALL { int nosys(void); } { old async_daemon } #if NFSSERVER /* XXX */ @@ -250,48 +252,48 @@ 162 AUE_NULL ALL { int nosys(void); } { old getdomainname } 163 AUE_NULL ALL { int nosys(void); } { old setdomainname } -164 AUE_NULL ALL { int nosys(void); } -165 AUE_QUOTACTL ALL { int quotactl(const char *path, int cmd, int uid, caddr_t arg); } +164 AUE_NULL ALL { int nosys(void); } +165 AUE_QUOTACTL ALL { int quotactl(const char *path, int cmd, int uid, caddr_t arg); } 166 AUE_NULL ALL { int nosys(void); } { old exportfs } -167 AUE_MOUNT ALL { int mount(char *type, char *path, int flags, caddr_t data); } +167 AUE_MOUNT ALL { int mount(char *type, char *path, int flags, caddr_t data); } 168 AUE_NULL ALL { int nosys(void); } { old ustat } -169 AUE_CSOPS ALL { int csops(pid_t pid, uint32_t ops, user_addr_t useraddr, user_size_t usersize); } -170 AUE_CSOPS ALL { int csops_audittoken(pid_t pid, uint32_t ops, user_addr_t useraddr, user_size_t usersize, user_addr_t uaudittoken); } +169 AUE_CSOPS ALL { int csops(pid_t pid, uint32_t ops, user_addr_t useraddr, user_size_t usersize); } +170 AUE_CSOPS ALL { int csops_audittoken(pid_t pid, uint32_t ops, user_addr_t useraddr, user_size_t usersize, user_addr_t uaudittoken); } 171 AUE_NULL ALL { int nosys(void); } { old wait3 } 172 AUE_NULL ALL { int nosys(void); } { old rpause } -173 AUE_WAITID ALL { int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options); } +173 AUE_WAITID ALL { int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options); } 174 AUE_NULL ALL { int nosys(void); } { old getdents } 175 AUE_NULL ALL { int nosys(void); } { old gc_control } 176 AUE_NULL ALL { int nosys(void); } { old add_profil } -177 AUE_NULL ALL { int kdebug_typefilter(void** addr, size_t* size) NO_SYSCALL_STUB; } +177 AUE_NULL ALL { int kdebug_typefilter(void** addr, size_t* size) NO_SYSCALL_STUB; } 178 AUE_NULL ALL { uint64_t kdebug_trace_string(uint32_t debugid, uint64_t str_id, const char *str) NO_SYSCALL_STUB; } -179 AUE_NULL ALL { int kdebug_trace64(uint32_t code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) NO_SYSCALL_STUB; } -180 AUE_NULL ALL { int kdebug_trace(uint32_t code, u_long arg1, u_long arg2, u_long arg3, u_long arg4) NO_SYSCALL_STUB; } -181 AUE_SETGID ALL { int setgid(gid_t gid); } -182 AUE_SETEGID ALL { int setegid(gid_t egid); } -183 AUE_SETEUID ALL { int seteuid(uid_t euid); } +179 AUE_NULL ALL { int kdebug_trace64(uint32_t code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) NO_SYSCALL_STUB; } +180 AUE_NULL ALL { int kdebug_trace(uint32_t code, u_long arg1, u_long arg2, u_long arg3, u_long arg4) NO_SYSCALL_STUB; } +181 AUE_SETGID ALL { int setgid(gid_t gid); } +182 AUE_SETEGID ALL { int setegid(gid_t egid); } +183 AUE_SETEUID ALL { int seteuid(uid_t euid); } 184 AUE_SIGRETURN ALL { int sigreturn(struct ucontext *uctx, int infostyle, user_addr_t token) NO_SYSCALL_STUB; } 185 AUE_NULL ALL { int enosys(void); } { old chud } 186 AUE_NULL ALL { int thread_selfcounts(int type, user_addr_t buf, user_size_t nbytes); } -187 AUE_FDATASYNC ALL { int fdatasync(int fd); } -188 AUE_STAT ALL { int stat(user_addr_t path, user_addr_t ub); } -189 AUE_FSTAT ALL { int fstat(int fd, user_addr_t ub); } -190 AUE_LSTAT ALL { int lstat(user_addr_t path, user_addr_t ub); } -191 AUE_PATHCONF ALL { int pathconf(char *path, int name); } -192 AUE_FPATHCONF ALL { int fpathconf(int fd, int name); } -193 AUE_NULL ALL { int nosys(void); } { old getfsstat } -194 AUE_GETRLIMIT ALL { int getrlimit(u_int which, struct rlimit *rlp) NO_SYSCALL_STUB; } -195 AUE_SETRLIMIT ALL { int setrlimit(u_int which, struct rlimit *rlp) NO_SYSCALL_STUB; } -196 AUE_GETDIRENTRIES ALL { int getdirentries(int fd, char *buf, u_int count, long *basep); } -197 AUE_MMAP ALL { user_addr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos) NO_SYSCALL_STUB; } +187 AUE_FDATASYNC ALL { int fdatasync(int fd); } +188 AUE_STAT ALL { int stat(user_addr_t path, user_addr_t ub); } +189 AUE_FSTAT ALL { int sys_fstat(int fd, user_addr_t ub); } +190 AUE_LSTAT ALL { int lstat(user_addr_t path, user_addr_t ub); } +191 AUE_PATHCONF ALL { int pathconf(char *path, int name); } +192 AUE_FPATHCONF ALL { int sys_fpathconf(int fd, int name); } +193 AUE_NULL ALL { int nosys(void); } { old getfsstat } +194 AUE_GETRLIMIT ALL { int getrlimit(u_int which, struct rlimit *rlp) NO_SYSCALL_STUB; } +195 AUE_SETRLIMIT ALL { int setrlimit(u_int which, struct rlimit *rlp) NO_SYSCALL_STUB; } +196 AUE_GETDIRENTRIES ALL { int getdirentries(int fd, char *buf, u_int count, long *basep); } +197 AUE_MMAP ALL { user_addr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos) NO_SYSCALL_STUB; } 198 AUE_NULL ALL { int nosys(void); } { old __syscall } -199 AUE_LSEEK ALL { off_t lseek(int fd, off_t offset, int whence); } -200 AUE_TRUNCATE ALL { int truncate(char *path, off_t length); } -201 AUE_FTRUNCATE ALL { int ftruncate(int fd, off_t length); } -202 AUE_SYSCTL ALL { int sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen) NO_SYSCALL_STUB; } -203 AUE_MLOCK ALL { int mlock(caddr_t addr, size_t len); } -204 AUE_MUNLOCK ALL { int munlock(caddr_t addr, size_t len); } -205 AUE_UNDELETE ALL { int undelete(user_addr_t path); } +199 AUE_LSEEK ALL { off_t lseek(int fd, off_t offset, int whence); } +200 AUE_TRUNCATE ALL { int truncate(char *path, off_t length); } +201 AUE_FTRUNCATE ALL { int ftruncate(int fd, off_t length); } +202 AUE_SYSCTL ALL { int sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen) NO_SYSCALL_STUB; } +203 AUE_MLOCK ALL { int mlock(caddr_t addr, size_t len); } +204 AUE_MUNLOCK ALL { int munlock(caddr_t addr, size_t len); } +205 AUE_UNDELETE ALL { int undelete(user_addr_t path); } 206 AUE_NULL ALL { int nosys(void); } { old ATsocket } 207 AUE_NULL ALL { int nosys(void); } { old ATgetmsg } @@ -306,7 +308,7 @@ 215 AUE_NULL ALL { int nosys(void); } ; System Calls 216 - 230 are reserved for calls to support HFS/HFS Plus -; file system semantics. Currently, we only use 215-227. The rest is +; file system semantics. Currently, we only use 215-227. The rest is ; for future expansion in anticipation of new MacOS APIs for HFS Plus. ; These calls are not conditionalized because while they are specific ; to HFS semantics, they are not specific to the HFS filesystem. @@ -318,33 +320,33 @@ 217 AUE_FSGETPATH_EXTENDED ALL { user_ssize_t fsgetpath_ext(user_addr_t buf, size_t bufsize, user_addr_t fsid, uint64_t objid, uint32_t options); } 218 AUE_NULL ALL { int nosys(void); } { old lstatv } 219 AUE_NULL ALL { int nosys(void); } { old fstatv } -220 AUE_GETATTRLIST ALL { int getattrlist(const char *path, struct attrlist *alist, void *attributeBuffer, size_t bufferSize, u_long options) NO_SYSCALL_STUB; } -221 AUE_SETATTRLIST ALL { int setattrlist(const char *path, struct attrlist *alist, void *attributeBuffer, size_t bufferSize, u_long options) NO_SYSCALL_STUB; } -222 AUE_GETDIRENTRIESATTR ALL { int getdirentriesattr(int fd, struct attrlist *alist, void *buffer, size_t buffersize, u_long *count, u_long *basep, u_long *newstate, u_long options); } -223 AUE_EXCHANGEDATA ALL { int exchangedata(const char *path1, const char *path2, u_long options); } +220 AUE_GETATTRLIST ALL { int getattrlist(const char *path, struct attrlist *alist, void *attributeBuffer, size_t bufferSize, u_long options) NO_SYSCALL_STUB; } +221 AUE_SETATTRLIST ALL { int setattrlist(const char *path, struct attrlist *alist, void *attributeBuffer, size_t bufferSize, u_long options) NO_SYSCALL_STUB; } +222 AUE_GETDIRENTRIESATTR ALL { int getdirentriesattr(int fd, struct attrlist *alist, void *buffer, size_t buffersize, u_long *count, u_long *basep, u_long *newstate, u_long options); } +223 AUE_EXCHANGEDATA ALL { int exchangedata(const char *path1, const char *path2, u_long options); } 224 AUE_NULL ALL { int nosys(void); } { old checkuseraccess or fsgetpath } -225 AUE_SEARCHFS ALL { int searchfs(const char *path, struct fssearchblock *searchblock, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct searchstate *state); } +225 AUE_SEARCHFS ALL { int searchfs(const char *path, struct fssearchblock *searchblock, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct searchstate *state); } 226 AUE_DELETE ALL { int delete(user_addr_t path) NO_SYSCALL_STUB; } { private delete (Carbon semantics) } -227 AUE_COPYFILE ALL { int copyfile(char *from, char *to, int mode, int flags) NO_SYSCALL_STUB; } -228 AUE_FGETATTRLIST ALL { int fgetattrlist(int fd, struct attrlist *alist, void *attributeBuffer, size_t bufferSize, u_long options); } -229 AUE_FSETATTRLIST ALL { int fsetattrlist(int fd, struct attrlist *alist, void *attributeBuffer, size_t bufferSize, u_long options); } -230 AUE_POLL ALL { int poll(struct pollfd *fds, u_int nfds, int timeout); } -231 AUE_WATCHEVENT ALL { int watchevent(struct eventreq *u_req, int u_eventmask); } -232 AUE_WAITEVENT ALL { int waitevent(struct eventreq *u_req, struct timeval *tv); } -233 AUE_MODWATCH ALL { int modwatch(struct eventreq *u_req, int u_eventmask); } -234 AUE_GETXATTR ALL { user_ssize_t getxattr(user_addr_t path, user_addr_t attrname, user_addr_t value, size_t size, uint32_t position, int options); } -235 AUE_FGETXATTR ALL { user_ssize_t fgetxattr(int fd, user_addr_t attrname, user_addr_t value, size_t size, uint32_t position, int options); } -236 AUE_SETXATTR ALL { int setxattr(user_addr_t path, user_addr_t attrname, user_addr_t value, size_t size, uint32_t position, int options); } -237 AUE_FSETXATTR ALL { int fsetxattr(int fd, user_addr_t attrname, user_addr_t value, size_t size, uint32_t position, int options); } -238 AUE_REMOVEXATTR ALL { int removexattr(user_addr_t path, user_addr_t attrname, int options); } -239 AUE_FREMOVEXATTR ALL { int fremovexattr(int fd, user_addr_t attrname, int options); } -240 AUE_LISTXATTR ALL { user_ssize_t listxattr(user_addr_t path, user_addr_t namebuf, size_t bufsize, int options); } -241 AUE_FLISTXATTR ALL { user_ssize_t flistxattr(int fd, user_addr_t namebuf, size_t bufsize, int options); } -242 AUE_FSCTL ALL { int fsctl(const char *path, u_long cmd, caddr_t data, u_int options); } -243 AUE_INITGROUPS ALL { int initgroups(u_int gidsetsize, gid_t *gidset, int gmuid) NO_SYSCALL_STUB; } -244 AUE_POSIX_SPAWN ALL { int posix_spawn(pid_t *pid, const char *path, const struct _posix_spawn_args_desc *adesc, char **argv, char **envp) NO_SYSCALL_STUB; } -245 AUE_FFSCTL ALL { int ffsctl(int fd, u_long cmd, caddr_t data, u_int options); } -246 AUE_NULL ALL { int nosys(void); } +227 AUE_COPYFILE ALL { int copyfile(char *from, char *to, int mode, int flags) NO_SYSCALL_STUB; } +228 AUE_FGETATTRLIST ALL { int fgetattrlist(int fd, struct attrlist *alist, void *attributeBuffer, size_t bufferSize, u_long options); } +229 AUE_FSETATTRLIST ALL { int fsetattrlist(int fd, struct attrlist *alist, void *attributeBuffer, size_t bufferSize, u_long options); } +230 AUE_POLL ALL { int poll(struct pollfd *fds, u_int nfds, int timeout); } +231 AUE_NULL ALL { int nosys(void); } { old watchevent } +232 AUE_NULL ALL { int nosys(void); } { old waitevent } +233 AUE_NULL ALL { int nosys(void); } { old modwatch } +234 AUE_GETXATTR ALL { user_ssize_t getxattr(user_addr_t path, user_addr_t attrname, user_addr_t value, size_t size, uint32_t position, int options); } +235 AUE_FGETXATTR ALL { user_ssize_t fgetxattr(int fd, user_addr_t attrname, user_addr_t value, size_t size, uint32_t position, int options); } +236 AUE_SETXATTR ALL { int setxattr(user_addr_t path, user_addr_t attrname, user_addr_t value, size_t size, uint32_t position, int options); } +237 AUE_FSETXATTR ALL { int fsetxattr(int fd, user_addr_t attrname, user_addr_t value, size_t size, uint32_t position, int options); } +238 AUE_REMOVEXATTR ALL { int removexattr(user_addr_t path, user_addr_t attrname, int options); } +239 AUE_FREMOVEXATTR ALL { int fremovexattr(int fd, user_addr_t attrname, int options); } +240 AUE_LISTXATTR ALL { user_ssize_t listxattr(user_addr_t path, user_addr_t namebuf, size_t bufsize, int options); } +241 AUE_FLISTXATTR ALL { user_ssize_t flistxattr(int fd, user_addr_t namebuf, size_t bufsize, int options); } +242 AUE_FSCTL ALL { int fsctl(const char *path, u_long cmd, caddr_t data, u_int options); } +243 AUE_INITGROUPS ALL { int initgroups(u_int gidsetsize, gid_t *gidset, int gmuid) NO_SYSCALL_STUB; } +244 AUE_POSIX_SPAWN ALL { int posix_spawn(pid_t *pid, const char *path, const struct _posix_spawn_args_desc *adesc, char **argv, char **envp) NO_SYSCALL_STUB; } +245 AUE_FFSCTL ALL { int ffsctl(int fd, u_long cmd, caddr_t data, u_int options); } +246 AUE_NULL ALL { int nosys(void); } #if NFSCLIENT /* XXX */ 247 AUE_NULL ALL { int nfsclnt(int flag, caddr_t argp); } @@ -357,87 +359,87 @@ 248 AUE_NULL ALL { int nosys(void); } #endif -249 AUE_NULL ALL { int nosys(void); } -250 AUE_MINHERIT ALL { int minherit(void *addr, size_t len, int inherit); } +249 AUE_NULL ALL { int nosys(void); } +250 AUE_MINHERIT ALL { int minherit(void *addr, size_t len, int inherit); } #if SYSV_SEM -251 AUE_SEMSYS ALL { int semsys(u_int which, int a2, int a3, int a4, int a5) NO_SYSCALL_STUB; } +251 AUE_SEMSYS ALL { int semsys(u_int which, int a2, int a3, int a4, int a5) NO_SYSCALL_STUB; } #else -251 AUE_NULL ALL { int nosys(void); } +251 AUE_NULL ALL { int nosys(void); } #endif #if SYSV_MSG 252 AUE_MSGSYS ALL { int msgsys(u_int which, int a2, int a3, int a4, int a5) NO_SYSCALL_STUB; } #else -252 AUE_NULL ALL { int nosys(void); } +252 AUE_NULL ALL { int nosys(void); } #endif #if SYSV_SHM -253 AUE_SHMSYS ALL { int shmsys(u_int which, int a2, int a3, int a4) NO_SYSCALL_STUB; } +253 AUE_SHMSYS ALL { int shmsys(u_int which, int a2, int a3, int a4) NO_SYSCALL_STUB; } #else -253 AUE_NULL ALL { int nosys(void); } +253 AUE_NULL ALL { int nosys(void); } #endif #if SYSV_SEM -254 AUE_SEMCTL ALL { int semctl(int semid, int semnum, int cmd, semun_t arg) NO_SYSCALL_STUB; } -255 AUE_SEMGET ALL { int semget(key_t key, int nsems, int semflg); } -256 AUE_SEMOP ALL { int semop(int semid, struct sembuf *sops, int nsops); } -257 AUE_NULL ALL { int nosys(void); } { old semconfig } -#else -254 AUE_NULL ALL { int nosys(void); } -255 AUE_NULL ALL { int nosys(void); } -256 AUE_NULL ALL { int nosys(void); } -257 AUE_NULL ALL { int nosys(void); } +254 AUE_SEMCTL ALL { int semctl(int semid, int semnum, int cmd, semun_t arg) NO_SYSCALL_STUB; } +255 AUE_SEMGET ALL { int semget(key_t key, int nsems, int semflg); } +256 AUE_SEMOP ALL { int semop(int semid, struct sembuf *sops, int nsops); } +257 AUE_NULL ALL { int nosys(void); } { old semconfig } +#else +254 AUE_NULL ALL { int nosys(void); } +255 AUE_NULL ALL { int nosys(void); } +256 AUE_NULL ALL { int nosys(void); } +257 AUE_NULL ALL { int nosys(void); } #endif #if SYSV_MSG -258 AUE_MSGCTL ALL { int msgctl(int msqid, int cmd, struct msqid_ds *buf) NO_SYSCALL_STUB; } -259 AUE_MSGGET ALL { int msgget(key_t key, int msgflg); } -260 AUE_MSGSND ALL { int msgsnd(int msqid, void *msgp, size_t msgsz, int msgflg); } -261 AUE_MSGRCV ALL { user_ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } -#else -258 AUE_NULL ALL { int nosys(void); } -259 AUE_NULL ALL { int nosys(void); } -260 AUE_NULL ALL { int nosys(void); } -261 AUE_NULL ALL { int nosys(void); } +258 AUE_MSGCTL ALL { int msgctl(int msqid, int cmd, struct msqid_ds *buf) NO_SYSCALL_STUB; } +259 AUE_MSGGET ALL { int msgget(key_t key, int msgflg); } +260 AUE_MSGSND ALL { int msgsnd(int msqid, void *msgp, size_t msgsz, int msgflg); } +261 AUE_MSGRCV ALL { user_ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } +#else +258 AUE_NULL ALL { int nosys(void); } +259 AUE_NULL ALL { int nosys(void); } +260 AUE_NULL ALL { int nosys(void); } +261 AUE_NULL ALL { int nosys(void); } #endif #if SYSV_SHM -262 AUE_SHMAT ALL { user_addr_t shmat(int shmid, void *shmaddr, int shmflg); } -263 AUE_SHMCTL ALL { int shmctl(int shmid, int cmd, struct shmid_ds *buf) NO_SYSCALL_STUB; } -264 AUE_SHMDT ALL { int shmdt(void *shmaddr); } -265 AUE_SHMGET ALL { int shmget(key_t key, size_t size, int shmflg); } -#else -262 AUE_NULL ALL { int nosys(void); } -263 AUE_NULL ALL { int nosys(void); } -264 AUE_NULL ALL { int nosys(void); } -265 AUE_NULL ALL { int nosys(void); } +262 AUE_SHMAT ALL { user_addr_t shmat(int shmid, void *shmaddr, int shmflg); } +263 AUE_SHMCTL ALL { int shmctl(int shmid, int cmd, struct shmid_ds *buf) NO_SYSCALL_STUB; } +264 AUE_SHMDT ALL { int shmdt(void *shmaddr); } +265 AUE_SHMGET ALL { int shmget(key_t key, size_t size, int shmflg); } +#else +262 AUE_NULL ALL { int nosys(void); } +263 AUE_NULL ALL { int nosys(void); } +264 AUE_NULL ALL { int nosys(void); } +265 AUE_NULL ALL { int nosys(void); } #endif -266 AUE_SHMOPEN ALL { int shm_open(const char *name, int oflag, int mode) NO_SYSCALL_STUB; } -267 AUE_SHMUNLINK ALL { int shm_unlink(const char *name); } -268 AUE_SEMOPEN ALL { user_addr_t sem_open(const char *name, int oflag, int mode, int value) NO_SYSCALL_STUB; } -269 AUE_SEMCLOSE ALL { int sem_close(sem_t *sem); } -270 AUE_SEMUNLINK ALL { int sem_unlink(const char *name); } -271 AUE_SEMWAIT ALL { int sem_wait(sem_t *sem); } -272 AUE_SEMTRYWAIT ALL { int sem_trywait(sem_t *sem); } -273 AUE_SEMPOST ALL { int sem_post(sem_t *sem); } +266 AUE_SHMOPEN ALL { int shm_open(const char *name, int oflag, int mode) NO_SYSCALL_STUB; } +267 AUE_SHMUNLINK ALL { int shm_unlink(const char *name); } +268 AUE_SEMOPEN ALL { user_addr_t sem_open(const char *name, int oflag, int mode, int value) NO_SYSCALL_STUB; } +269 AUE_SEMCLOSE ALL { int sem_close(sem_t *sem); } +270 AUE_SEMUNLINK ALL { int sem_unlink(const char *name); } +271 AUE_SEMWAIT ALL { int sem_wait(sem_t *sem); } +272 AUE_SEMTRYWAIT ALL { int sem_trywait(sem_t *sem); } +273 AUE_SEMPOST ALL { int sem_post(sem_t *sem); } 274 AUE_SYSCTL ALL { int sys_sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen) NO_SYSCALL_STUB; } 275 AUE_NULL ALL { int enosys(void); } { old sem_init } 276 AUE_NULL ALL { int enosys(void); } { old sem_destroy } -277 AUE_OPEN_EXTENDED_RWTC ALL { int open_extended(user_addr_t path, int flags, uid_t uid, gid_t gid, int mode, user_addr_t xsecurity) NO_SYSCALL_STUB; } -278 AUE_UMASK_EXTENDED ALL { int umask_extended(int newmask, user_addr_t xsecurity) NO_SYSCALL_STUB; } -279 AUE_STAT_EXTENDED ALL { int stat_extended(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } -280 AUE_LSTAT_EXTENDED ALL { int lstat_extended(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } -281 AUE_FSTAT_EXTENDED ALL { int fstat_extended(int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } -282 AUE_CHMOD_EXTENDED ALL { int chmod_extended(user_addr_t path, uid_t uid, gid_t gid, int mode, user_addr_t xsecurity) NO_SYSCALL_STUB; } -283 AUE_FCHMOD_EXTENDED ALL { int fchmod_extended(int fd, uid_t uid, gid_t gid, int mode, user_addr_t xsecurity) NO_SYSCALL_STUB; } -284 AUE_ACCESS_EXTENDED ALL { int access_extended(user_addr_t entries, size_t size, user_addr_t results, uid_t uid) NO_SYSCALL_STUB; } -285 AUE_SETTID ALL { int settid(uid_t uid, gid_t gid) NO_SYSCALL_STUB; } -286 AUE_GETTID ALL { int gettid(uid_t *uidp, gid_t *gidp) NO_SYSCALL_STUB; } -287 AUE_SETSGROUPS ALL { int setsgroups(int setlen, user_addr_t guidset) NO_SYSCALL_STUB; } -288 AUE_GETSGROUPS ALL { int getsgroups(user_addr_t setlen, user_addr_t guidset) NO_SYSCALL_STUB; } -289 AUE_SETWGROUPS ALL { int setwgroups(int setlen, user_addr_t guidset) NO_SYSCALL_STUB; } +277 AUE_OPEN_EXTENDED_RWTC ALL { int open_extended(user_addr_t path, int flags, uid_t uid, gid_t gid, int mode, user_addr_t xsecurity) NO_SYSCALL_STUB; } +278 AUE_UMASK_EXTENDED ALL { int umask_extended(int newmask, user_addr_t xsecurity) NO_SYSCALL_STUB; } +279 AUE_STAT_EXTENDED ALL { int stat_extended(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } +280 AUE_LSTAT_EXTENDED ALL { int lstat_extended(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } +281 AUE_FSTAT_EXTENDED ALL { int sys_fstat_extended(int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } +282 AUE_CHMOD_EXTENDED ALL { int chmod_extended(user_addr_t path, uid_t uid, gid_t gid, int mode, user_addr_t xsecurity) NO_SYSCALL_STUB; } +283 AUE_FCHMOD_EXTENDED ALL { int fchmod_extended(int fd, uid_t uid, gid_t gid, int mode, user_addr_t xsecurity) NO_SYSCALL_STUB; } +284 AUE_ACCESS_EXTENDED ALL { int access_extended(user_addr_t entries, size_t size, user_addr_t results, uid_t uid) NO_SYSCALL_STUB; } +285 AUE_SETTID ALL { int settid(uid_t uid, gid_t gid) NO_SYSCALL_STUB; } +286 AUE_GETTID ALL { int gettid(uid_t *uidp, gid_t *gidp) NO_SYSCALL_STUB; } +287 AUE_SETSGROUPS ALL { int setsgroups(int setlen, user_addr_t guidset) NO_SYSCALL_STUB; } +288 AUE_GETSGROUPS ALL { int getsgroups(user_addr_t setlen, user_addr_t guidset) NO_SYSCALL_STUB; } +289 AUE_SETWGROUPS ALL { int setwgroups(int setlen, user_addr_t guidset) NO_SYSCALL_STUB; } 290 AUE_GETWGROUPS ALL { int getwgroups(user_addr_t setlen, user_addr_t guidset) NO_SYSCALL_STUB; } -291 AUE_MKFIFO_EXTENDED ALL { int mkfifo_extended(user_addr_t path, uid_t uid, gid_t gid, int mode, user_addr_t xsecurity) NO_SYSCALL_STUB; } -292 AUE_MKDIR_EXTENDED ALL { int mkdir_extended(user_addr_t path, uid_t uid, gid_t gid, int mode, user_addr_t xsecurity) NO_SYSCALL_STUB; } +291 AUE_MKFIFO_EXTENDED ALL { int mkfifo_extended(user_addr_t path, uid_t uid, gid_t gid, int mode, user_addr_t xsecurity) NO_SYSCALL_STUB; } +292 AUE_MKDIR_EXTENDED ALL { int mkdir_extended(user_addr_t path, uid_t uid, gid_t gid, int mode, user_addr_t xsecurity) NO_SYSCALL_STUB; } #if CONFIG_EXT_RESOLVER -293 AUE_IDENTITYSVC ALL { int identitysvc(int opcode, user_addr_t message) NO_SYSCALL_STUB; } +293 AUE_IDENTITYSVC ALL { int identitysvc(int opcode, user_addr_t message) NO_SYSCALL_STUB; } #else -293 AUE_NULL ALL { int nosys(void); } +293 AUE_NULL ALL { int nosys(void); } #endif 294 AUE_NULL ALL { int shared_region_check_np(uint64_t *start_address) NO_SYSCALL_STUB; } 295 AUE_NULL ALL { int nosys(void); } { old shared_region_map_np } @@ -461,7 +463,7 @@ 298 AUE_NULL ALL { int nosys(void); } { old new_system_shared_regions } 299 AUE_NULL ALL { int enosys(void); } { old shared_region_map_file_np } 300 AUE_NULL ALL { int enosys(void); } { old shared_region_make_private_np } -301 AUE_NULL ALL { int nosys(void); } +301 AUE_NULL ALL { int nosys(void); } 302 AUE_NULL ALL { int nosys(void); } 303 AUE_NULL ALL { int nosys(void); } 304 AUE_NULL ALL { int nosys(void); } @@ -471,34 +473,34 @@ 308 AUE_NULL ALL { int nosys(void); } 309 AUE_NULL ALL { int nosys(void); } #endif -310 AUE_GETSID ALL { int getsid(pid_t pid); } -311 AUE_SETTIDWITHPID ALL { int settid_with_pid(pid_t pid, int assume) NO_SYSCALL_STUB; } +310 AUE_GETSID ALL { int getsid(pid_t pid); } +311 AUE_SETTIDWITHPID ALL { int settid_with_pid(pid_t pid, int assume) NO_SYSCALL_STUB; } #if PSYNCH 312 AUE_NULL ALL { int psynch_cvclrprepost(user_addr_t cv, uint32_t cvgen, uint32_t cvugen, uint32_t cvsgen, uint32_t prepocnt, uint32_t preposeq, uint32_t flags) NO_SYSCALL_STUB; } #else 312 AUE_NULL ALL { int nosys(void); } { old __pthread_cond_timedwait } #endif -313 AUE_NULL ALL { int aio_fsync(int op, user_addr_t aiocbp); } -314 AUE_NULL ALL { user_ssize_t aio_return(user_addr_t aiocbp); } -315 AUE_NULL ALL { int aio_suspend(user_addr_t aiocblist, int nent, user_addr_t timeoutp); } -316 AUE_NULL ALL { int aio_cancel(int fd, user_addr_t aiocbp); } -317 AUE_NULL ALL { int aio_error(user_addr_t aiocbp); } -318 AUE_NULL ALL { int aio_read(user_addr_t aiocbp); } -319 AUE_NULL ALL { int aio_write(user_addr_t aiocbp); } -320 AUE_LIOLISTIO ALL { int lio_listio(int mode, user_addr_t aiocblist, int nent, user_addr_t sigp); } +313 AUE_NULL ALL { int aio_fsync(int op, user_addr_t aiocbp); } +314 AUE_NULL ALL { user_ssize_t aio_return(user_addr_t aiocbp); } +315 AUE_NULL ALL { int aio_suspend(user_addr_t aiocblist, int nent, user_addr_t timeoutp); } +316 AUE_NULL ALL { int aio_cancel(int fd, user_addr_t aiocbp); } +317 AUE_NULL ALL { int aio_error(user_addr_t aiocbp); } +318 AUE_NULL ALL { int aio_read(user_addr_t aiocbp); } +319 AUE_NULL ALL { int aio_write(user_addr_t aiocbp); } +320 AUE_LIOLISTIO ALL { int lio_listio(int mode, user_addr_t aiocblist, int nent, user_addr_t sigp); } 321 AUE_NULL ALL { int nosys(void); } { old __pthread_cond_wait } -322 AUE_IOPOLICYSYS ALL { int iopolicysys(int cmd, void *arg) NO_SYSCALL_STUB; } -323 AUE_NULL ALL { int process_policy(int scope, int action, int policy, int policy_subtype, user_addr_t attrp, pid_t target_pid, uint64_t target_threadid) NO_SYSCALL_STUB; } -324 AUE_MLOCKALL ALL { int mlockall(int how); } -325 AUE_MUNLOCKALL ALL { int munlockall(int how); } -326 AUE_NULL ALL { int nosys(void); } -327 AUE_ISSETUGID ALL { int issetugid(void); } -328 AUE_PTHREADKILL ALL { int __pthread_kill(int thread_port, int sig); } -329 AUE_PTHREADSIGMASK ALL { int __pthread_sigmask(int how, user_addr_t set, user_addr_t oset); } -330 AUE_SIGWAIT ALL { int __sigwait(user_addr_t set, user_addr_t sig); } -331 AUE_NULL ALL { int __disable_threadsignal(int value); } -332 AUE_NULL ALL { int __pthread_markcancel(int thread_port); } -333 AUE_NULL ALL { int __pthread_canceled(int action); } +322 AUE_IOPOLICYSYS ALL { int iopolicysys(int cmd, void *arg) NO_SYSCALL_STUB; } +323 AUE_NULL ALL { int process_policy(int scope, int action, int policy, int policy_subtype, user_addr_t attrp, pid_t target_pid, uint64_t target_threadid) NO_SYSCALL_STUB; } +324 AUE_MLOCKALL ALL { int mlockall(int how); } +325 AUE_MUNLOCKALL ALL { int munlockall(int how); } +326 AUE_NULL ALL { int nosys(void); } +327 AUE_ISSETUGID ALL { int issetugid(void); } +328 AUE_PTHREADKILL ALL { int __pthread_kill(int thread_port, int sig); } +329 AUE_PTHREADSIGMASK ALL { int __pthread_sigmask(int how, user_addr_t set, user_addr_t oset); } +330 AUE_SIGWAIT ALL { int __sigwait(user_addr_t set, user_addr_t sig); } +331 AUE_NULL ALL { int __disable_threadsignal(int value); } +332 AUE_NULL ALL { int __pthread_markcancel(int thread_port); } +333 AUE_NULL ALL { int __pthread_canceled(int action); } ;#if OLD_SEMWAIT_SIGNAL ;334 AUE_NULL ALL { int nosys(void); } { old __semwait_signal } @@ -507,55 +509,55 @@ ;#endif 335 AUE_NULL ALL { int nosys(void); } { old utrace } -336 AUE_PROCINFO ALL { int proc_info(int32_t callnum,int32_t pid,uint32_t flavor, uint64_t arg,user_addr_t buffer,int32_t buffersize) NO_SYSCALL_STUB; } +336 AUE_PROCINFO ALL { int proc_info(int32_t callnum,int32_t pid,uint32_t flavor, uint64_t arg,user_addr_t buffer,int32_t buffersize) NO_SYSCALL_STUB; } #if SENDFILE -337 AUE_SENDFILE ALL { int sendfile(int fd, int s, off_t offset, off_t *nbytes, struct sf_hdtr *hdtr, int flags); } +337 AUE_SENDFILE ALL { int sendfile(int fd, int s, off_t offset, off_t *nbytes, struct sf_hdtr *hdtr, int flags); } #else /* !SENDFILE */ 337 AUE_NULL ALL { int nosys(void); } #endif /* SENDFILE */ -338 AUE_STAT64 ALL { int stat64(user_addr_t path, user_addr_t ub); } -339 AUE_FSTAT64 ALL { int fstat64(int fd, user_addr_t ub); } -340 AUE_LSTAT64 ALL { int lstat64(user_addr_t path, user_addr_t ub); } -341 AUE_STAT64_EXTENDED ALL { int stat64_extended(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } -342 AUE_LSTAT64_EXTENDED ALL { int lstat64_extended(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } -343 AUE_FSTAT64_EXTENDED ALL { int fstat64_extended(int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } -344 AUE_GETDIRENTRIES64 ALL { user_ssize_t getdirentries64(int fd, void *buf, user_size_t bufsize, off_t *position) NO_SYSCALL_STUB; } -345 AUE_STATFS64 ALL { int statfs64(char *path, struct statfs64 *buf); } -346 AUE_FSTATFS64 ALL { int fstatfs64(int fd, struct statfs64 *buf); } -347 AUE_GETFSSTAT64 ALL { int getfsstat64(user_addr_t buf, int bufsize, int flags); } -348 AUE_NULL ALL { int __pthread_chdir(user_addr_t path); } -349 AUE_NULL ALL { int __pthread_fchdir(int fd); } -350 AUE_AUDIT ALL { int audit(void *record, int length); } -351 AUE_AUDITON ALL { int auditon(int cmd, void *data, int length); } -352 AUE_NULL ALL { int nosys(void); } -353 AUE_GETAUID ALL { int getauid(au_id_t *auid); } -354 AUE_SETAUID ALL { int setauid(au_id_t *auid); } +338 AUE_STAT64 ALL { int stat64(user_addr_t path, user_addr_t ub); } +339 AUE_FSTAT64 ALL { int sys_fstat64(int fd, user_addr_t ub); } +340 AUE_LSTAT64 ALL { int lstat64(user_addr_t path, user_addr_t ub); } +341 AUE_STAT64_EXTENDED ALL { int stat64_extended(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } +342 AUE_LSTAT64_EXTENDED ALL { int lstat64_extended(user_addr_t path, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } +343 AUE_FSTAT64_EXTENDED ALL { int sys_fstat64_extended(int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsecurity_size) NO_SYSCALL_STUB; } +344 AUE_GETDIRENTRIES64 ALL { user_ssize_t getdirentries64(int fd, void *buf, user_size_t bufsize, off_t *position) NO_SYSCALL_STUB; } +345 AUE_STATFS64 ALL { int statfs64(char *path, struct statfs64 *buf); } +346 AUE_FSTATFS64 ALL { int fstatfs64(int fd, struct statfs64 *buf); } +347 AUE_GETFSSTAT64 ALL { int getfsstat64(user_addr_t buf, int bufsize, int flags); } +348 AUE_NULL ALL { int __pthread_chdir(user_addr_t path); } +349 AUE_NULL ALL { int __pthread_fchdir(int fd); } +350 AUE_AUDIT ALL { int audit(void *record, int length); } +351 AUE_AUDITON ALL { int auditon(int cmd, void *data, int length); } +352 AUE_NULL ALL { int nosys(void); } +353 AUE_GETAUID ALL { int getauid(au_id_t *auid); } +354 AUE_SETAUID ALL { int setauid(au_id_t *auid); } 355 AUE_NULL ALL { int nosys(void); } { old getaudit } 356 AUE_NULL ALL { int nosys(void); } { old setaudit } -357 AUE_GETAUDIT_ADDR ALL { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, int length); } -358 AUE_SETAUDIT_ADDR ALL { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, int length); } -359 AUE_AUDITCTL ALL { int auditctl(char *path); } +357 AUE_GETAUDIT_ADDR ALL { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, int length); } +358 AUE_SETAUDIT_ADDR ALL { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, int length); } +359 AUE_AUDITCTL ALL { int auditctl(char *path); } #if CONFIG_WORKQUEUE -360 AUE_NULL ALL { user_addr_t bsdthread_create(user_addr_t func, user_addr_t func_arg, user_addr_t stack, user_addr_t pthread, uint32_t flags) NO_SYSCALL_STUB; } -361 AUE_NULL ALL { int bsdthread_terminate(user_addr_t stackaddr, size_t freesize, uint32_t port, uint32_t sem) NO_SYSCALL_STUB; } +360 AUE_NULL ALL { user_addr_t bsdthread_create(user_addr_t func, user_addr_t func_arg, user_addr_t stack, user_addr_t pthread, uint32_t flags) NO_SYSCALL_STUB; } +361 AUE_NULL ALL { int bsdthread_terminate(user_addr_t stackaddr, size_t freesize, uint32_t port, uint32_t sem) NO_SYSCALL_STUB; } #else -360 AUE_NULL ALL { int nosys(void); } -361 AUE_NULL ALL { int nosys(void); } +360 AUE_NULL ALL { int nosys(void); } +361 AUE_NULL ALL { int nosys(void); } #endif /* CONFIG_WORKQUEUE */ -362 AUE_KQUEUE ALL { int kqueue(void); } -363 AUE_NULL ALL { int kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } +362 AUE_KQUEUE ALL { int kqueue(void); } +363 AUE_NULL ALL { int kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } 364 AUE_LCHOWN ALL { int lchown(user_addr_t path, uid_t owner, gid_t group) NO_SYSCALL_STUB; } 365 AUE_NULL ALL { int nosys(void); } { old stack_snapshot } #if CONFIG_WORKQUEUE -366 AUE_NULL ALL { int bsdthread_register(user_addr_t threadstart, user_addr_t wqthread, uint32_t flags, user_addr_t stack_addr_hint, user_addr_t targetconc_ptr, uint32_t dispatchqueue_offset, uint32_t tsd_offset) NO_SYSCALL_STUB; } +366 AUE_NULL ALL { int bsdthread_register(user_addr_t threadstart, user_addr_t wqthread, uint32_t flags, user_addr_t stack_addr_hint, user_addr_t targetconc_ptr, uint32_t dispatchqueue_offset, uint32_t tsd_offset) NO_SYSCALL_STUB; } 367 AUE_WORKQOPEN ALL { int workq_open(void) NO_SYSCALL_STUB; } 368 AUE_WORKQOPS ALL { int workq_kernreturn(int options, user_addr_t item, int affinity, int prio) NO_SYSCALL_STUB; } #else -366 AUE_NULL ALL { int nosys(void); } -367 AUE_NULL ALL { int nosys(void); } -368 AUE_NULL ALL { int nosys(void); } +366 AUE_NULL ALL { int nosys(void); } +367 AUE_NULL ALL { int nosys(void); } +368 AUE_NULL ALL { int nosys(void); } #endif /* CONFIG_WORKQUEUE */ -369 AUE_NULL ALL { int kevent64(int fd, const struct kevent64_s *changelist, int nchanges, struct kevent64_s *eventlist, int nevents, unsigned int flags, const struct timespec *timeout); } +369 AUE_NULL ALL { int kevent64(int fd, const struct kevent64_s *changelist, int nchanges, struct kevent64_s *eventlist, int nevents, unsigned int flags, const struct timespec *timeout); } #if OLD_SEMWAIT_SIGNAL 370 AUE_SEMWAITSIGNAL ALL { int __old_semwait_signal(int cond_sem, int mutex_sem, int timeout, int relative, const struct timespec *ts); } 371 AUE_SEMWAITSIGNAL ALL { int __old_semwait_signal_nocancel(int cond_sem, int mutex_sem, int timeout, int relative, const struct timespec *ts) NO_SYSCALL_STUB; } @@ -563,26 +565,26 @@ 370 AUE_NULL ALL { int nosys(void); } { old __semwait_signal } 371 AUE_NULL ALL { int nosys(void); } { old __semwait_signal } #endif -372 AUE_NULL ALL { uint64_t thread_selfid (void) NO_SYSCALL_STUB; } -373 AUE_LEDGER ALL { int ledger(int cmd, caddr_t arg1, caddr_t arg2, caddr_t arg3); } -374 AUE_NULL ALL { int kevent_qos(int fd, const struct kevent_qos_s *changelist, int nchanges, struct kevent_qos_s *eventlist, int nevents, void *data_out, size_t *data_available, unsigned int flags); } -375 AUE_NULL ALL { int kevent_id(uint64_t id, const struct kevent_qos_s *changelist, int nchanges, struct kevent_qos_s *eventlist, int nevents, void *data_out, size_t *data_available, unsigned int flags); } -376 AUE_NULL ALL { int nosys(void); } -377 AUE_NULL ALL { int nosys(void); } -378 AUE_NULL ALL { int nosys(void); } -379 AUE_NULL ALL { int nosys(void); } -380 AUE_MAC_EXECVE ALL { int __mac_execve(char *fname, char **argp, char **envp, struct mac *mac_p); } +372 AUE_NULL ALL { uint64_t thread_selfid (void) NO_SYSCALL_STUB; } +373 AUE_LEDGER ALL { int ledger(int cmd, caddr_t arg1, caddr_t arg2, caddr_t arg3); } +374 AUE_NULL ALL { int kevent_qos(int fd, const struct kevent_qos_s *changelist, int nchanges, struct kevent_qos_s *eventlist, int nevents, void *data_out, size_t *data_available, unsigned int flags); } +375 AUE_NULL ALL { int kevent_id(uint64_t id, const struct kevent_qos_s *changelist, int nchanges, struct kevent_qos_s *eventlist, int nevents, void *data_out, size_t *data_available, unsigned int flags); } +376 AUE_NULL ALL { int nosys(void); } +377 AUE_NULL ALL { int nosys(void); } +378 AUE_NULL ALL { int nosys(void); } +379 AUE_NULL ALL { int nosys(void); } +380 AUE_MAC_EXECVE ALL { int __mac_execve(char *fname, char **argp, char **envp, struct mac *mac_p); } #if CONFIG_MACF -381 AUE_MAC_SYSCALL ALL { int __mac_syscall(char *policy, int call, user_addr_t arg); } -382 AUE_MAC_GET_FILE ALL { int __mac_get_file(char *path_p, struct mac *mac_p); } -383 AUE_MAC_SET_FILE ALL { int __mac_set_file(char *path_p, struct mac *mac_p); } -384 AUE_MAC_GET_LINK ALL { int __mac_get_link(char *path_p, struct mac *mac_p); } -385 AUE_MAC_SET_LINK ALL { int __mac_set_link(char *path_p, struct mac *mac_p); } -386 AUE_MAC_GET_PROC ALL { int __mac_get_proc(struct mac *mac_p); } -387 AUE_MAC_SET_PROC ALL { int __mac_set_proc(struct mac *mac_p); } -388 AUE_MAC_GET_FD ALL { int __mac_get_fd(int fd, struct mac *mac_p); } -389 AUE_MAC_SET_FD ALL { int __mac_set_fd(int fd, struct mac *mac_p); } -390 AUE_MAC_GET_PID ALL { int __mac_get_pid(pid_t pid, struct mac *mac_p); } +381 AUE_MAC_SYSCALL ALL { int __mac_syscall(char *policy, int call, user_addr_t arg); } +382 AUE_MAC_GET_FILE ALL { int __mac_get_file(char *path_p, struct mac *mac_p); } +383 AUE_MAC_SET_FILE ALL { int __mac_set_file(char *path_p, struct mac *mac_p); } +384 AUE_MAC_GET_LINK ALL { int __mac_get_link(char *path_p, struct mac *mac_p); } +385 AUE_MAC_SET_LINK ALL { int __mac_set_link(char *path_p, struct mac *mac_p); } +386 AUE_MAC_GET_PROC ALL { int __mac_get_proc(struct mac *mac_p); } +387 AUE_MAC_SET_PROC ALL { int __mac_set_proc(struct mac *mac_p); } +388 AUE_MAC_GET_FD ALL { int __mac_get_fd(int fd, struct mac *mac_p); } +389 AUE_MAC_SET_FD ALL { int __mac_set_fd(int fd, struct mac *mac_p); } +390 AUE_MAC_GET_PID ALL { int __mac_get_pid(pid_t pid, struct mac *mac_p); } #else 381 AUE_MAC_SYSCALL ALL { int enosys(void); } 382 AUE_MAC_GET_FILE ALL { int nosys(void); } @@ -600,70 +602,70 @@ 393 AUE_NULL ALL { int enosys(void); } 394 AUE_SELECT ALL { int pselect(int nd, u_int32_t *in, u_int32_t *ou, u_int32_t *ex, const struct timespec *ts, const struct sigset_t *mask) NO_SYSCALL_STUB; } 395 AUE_SELECT ALL { int pselect_nocancel(int nd, u_int32_t *in, u_int32_t *ou, u_int32_t *ex, const struct timespec *ts, const struct sigset_t *mask) NO_SYSCALL_STUB; } -396 AUE_NULL ALL { user_ssize_t read_nocancel(int fd, user_addr_t cbuf, user_size_t nbyte) NO_SYSCALL_STUB; } -397 AUE_NULL ALL { user_ssize_t write_nocancel(int fd, user_addr_t cbuf, user_size_t nbyte) NO_SYSCALL_STUB; } -398 AUE_OPEN_RWTC ALL { int open_nocancel(user_addr_t path, int flags, int mode) NO_SYSCALL_STUB; } -399 AUE_CLOSE ALL { int close_nocancel(int fd) NO_SYSCALL_STUB; } -400 AUE_WAIT4 ALL { int wait4_nocancel(int pid, user_addr_t status, int options, user_addr_t rusage) NO_SYSCALL_STUB; } +396 AUE_NULL ALL { user_ssize_t read_nocancel(int fd, user_addr_t cbuf, user_size_t nbyte) NO_SYSCALL_STUB; } +397 AUE_NULL ALL { user_ssize_t write_nocancel(int fd, user_addr_t cbuf, user_size_t nbyte) NO_SYSCALL_STUB; } +398 AUE_OPEN_RWTC ALL { int open_nocancel(user_addr_t path, int flags, int mode) NO_SYSCALL_STUB; } +399 AUE_CLOSE ALL { int sys_close_nocancel(int fd) NO_SYSCALL_STUB; } +400 AUE_WAIT4 ALL { int wait4_nocancel(int pid, user_addr_t status, int options, user_addr_t rusage) NO_SYSCALL_STUB; } #if SOCKETS -401 AUE_RECVMSG ALL { int recvmsg_nocancel(int s, struct msghdr *msg, int flags) NO_SYSCALL_STUB; } -402 AUE_SENDMSG ALL { int sendmsg_nocancel(int s, caddr_t msg, int flags) NO_SYSCALL_STUB; } -403 AUE_RECVFROM ALL { int recvfrom_nocancel(int s, void *buf, size_t len, int flags, struct sockaddr *from, int *fromlenaddr) NO_SYSCALL_STUB; } -404 AUE_ACCEPT ALL { int accept_nocancel(int s, caddr_t name, socklen_t *anamelen) NO_SYSCALL_STUB; } +401 AUE_RECVMSG ALL { int recvmsg_nocancel(int s, struct msghdr *msg, int flags) NO_SYSCALL_STUB; } +402 AUE_SENDMSG ALL { int sendmsg_nocancel(int s, caddr_t msg, int flags) NO_SYSCALL_STUB; } +403 AUE_RECVFROM ALL { int recvfrom_nocancel(int s, void *buf, size_t len, int flags, struct sockaddr *from, int *fromlenaddr) NO_SYSCALL_STUB; } +404 AUE_ACCEPT ALL { int accept_nocancel(int s, caddr_t name, socklen_t *anamelen) NO_SYSCALL_STUB; } #else 401 AUE_NULL ALL { int nosys(void); } 402 AUE_NULL ALL { int nosys(void); } 403 AUE_NULL ALL { int nosys(void); } 404 AUE_NULL ALL { int nosys(void); } #endif /* SOCKETS */ -405 AUE_MSYNC ALL { int msync_nocancel(caddr_t addr, size_t len, int flags) NO_SYSCALL_STUB; } -406 AUE_FCNTL ALL { int fcntl_nocancel(int fd, int cmd, long arg) NO_SYSCALL_STUB; } -407 AUE_SELECT ALL { int select_nocancel(int nd, u_int32_t *in, u_int32_t *ou, u_int32_t *ex, struct timeval *tv) NO_SYSCALL_STUB; } -408 AUE_FSYNC ALL { int fsync_nocancel(int fd) NO_SYSCALL_STUB; } +405 AUE_MSYNC ALL { int msync_nocancel(caddr_t addr, size_t len, int flags) NO_SYSCALL_STUB; } +406 AUE_FCNTL ALL { int sys_fcntl_nocancel(int fd, int cmd, long arg) NO_SYSCALL_STUB; } +407 AUE_SELECT ALL { int select_nocancel(int nd, u_int32_t *in, u_int32_t *ou, u_int32_t *ex, struct timeval *tv) NO_SYSCALL_STUB; } +408 AUE_FSYNC ALL { int fsync_nocancel(int fd) NO_SYSCALL_STUB; } #if SOCKETS -409 AUE_CONNECT ALL { int connect_nocancel(int s, caddr_t name, socklen_t namelen) NO_SYSCALL_STUB; } +409 AUE_CONNECT ALL { int connect_nocancel(int s, caddr_t name, socklen_t namelen) NO_SYSCALL_STUB; } #else 409 AUE_NULL ALL { int nosys(void); } #endif /* SOCKETS */ -410 AUE_NULL ALL { int sigsuspend_nocancel(sigset_t mask) NO_SYSCALL_STUB; } -411 AUE_READV ALL { user_ssize_t readv_nocancel(int fd, struct iovec *iovp, u_int iovcnt) NO_SYSCALL_STUB; } -412 AUE_WRITEV ALL { user_ssize_t writev_nocancel(int fd, struct iovec *iovp, u_int iovcnt) NO_SYSCALL_STUB; } +410 AUE_NULL ALL { int sigsuspend_nocancel(sigset_t mask) NO_SYSCALL_STUB; } +411 AUE_READV ALL { user_ssize_t readv_nocancel(int fd, struct iovec *iovp, u_int iovcnt) NO_SYSCALL_STUB; } +412 AUE_WRITEV ALL { user_ssize_t writev_nocancel(int fd, struct iovec *iovp, u_int iovcnt) NO_SYSCALL_STUB; } #if SOCKETS -413 AUE_SENDTO ALL { int sendto_nocancel(int s, caddr_t buf, size_t len, int flags, caddr_t to, socklen_t tolen) NO_SYSCALL_STUB; } +413 AUE_SENDTO ALL { int sendto_nocancel(int s, caddr_t buf, size_t len, int flags, caddr_t to, socklen_t tolen) NO_SYSCALL_STUB; } #else 413 AUE_NULL ALL { int nosys(void); } #endif /* SOCKETS */ -414 AUE_PREAD ALL { user_ssize_t pread_nocancel(int fd, user_addr_t buf, user_size_t nbyte, off_t offset) NO_SYSCALL_STUB; } -415 AUE_PWRITE ALL { user_ssize_t pwrite_nocancel(int fd, user_addr_t buf, user_size_t nbyte, off_t offset) NO_SYSCALL_STUB; } -416 AUE_WAITID ALL { int waitid_nocancel(idtype_t idtype, id_t id, siginfo_t *infop, int options) NO_SYSCALL_STUB; } -417 AUE_POLL ALL { int poll_nocancel(struct pollfd *fds, u_int nfds, int timeout) NO_SYSCALL_STUB; } +414 AUE_PREAD ALL { user_ssize_t pread_nocancel(int fd, user_addr_t buf, user_size_t nbyte, off_t offset) NO_SYSCALL_STUB; } +415 AUE_PWRITE ALL { user_ssize_t pwrite_nocancel(int fd, user_addr_t buf, user_size_t nbyte, off_t offset) NO_SYSCALL_STUB; } +416 AUE_WAITID ALL { int waitid_nocancel(idtype_t idtype, id_t id, siginfo_t *infop, int options) NO_SYSCALL_STUB; } +417 AUE_POLL ALL { int poll_nocancel(struct pollfd *fds, u_int nfds, int timeout) NO_SYSCALL_STUB; } #if SYSV_MSG -418 AUE_MSGSND ALL { int msgsnd_nocancel(int msqid, void *msgp, size_t msgsz, int msgflg) NO_SYSCALL_STUB; } -419 AUE_MSGRCV ALL { user_ssize_t msgrcv_nocancel(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) NO_SYSCALL_STUB; } +418 AUE_MSGSND ALL { int msgsnd_nocancel(int msqid, void *msgp, size_t msgsz, int msgflg) NO_SYSCALL_STUB; } +419 AUE_MSGRCV ALL { user_ssize_t msgrcv_nocancel(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg) NO_SYSCALL_STUB; } #else -418 AUE_NULL ALL { int nosys(void); } -419 AUE_NULL ALL { int nosys(void); } +418 AUE_NULL ALL { int nosys(void); } +419 AUE_NULL ALL { int nosys(void); } #endif -420 AUE_SEMWAIT ALL { int sem_wait_nocancel(sem_t *sem) NO_SYSCALL_STUB; } -421 AUE_NULL ALL { int aio_suspend_nocancel(user_addr_t aiocblist, int nent, user_addr_t timeoutp) NO_SYSCALL_STUB; } -422 AUE_SIGWAIT ALL { int __sigwait_nocancel(user_addr_t set, user_addr_t sig) NO_SYSCALL_STUB; } -;#if OLD_SEMWAIT_SIGNAL +420 AUE_SEMWAIT ALL { int sem_wait_nocancel(sem_t *sem) NO_SYSCALL_STUB; } +421 AUE_NULL ALL { int aio_suspend_nocancel(user_addr_t aiocblist, int nent, user_addr_t timeoutp) NO_SYSCALL_STUB; } +422 AUE_SIGWAIT ALL { int __sigwait_nocancel(user_addr_t set, user_addr_t sig) NO_SYSCALL_STUB; } +;#if OLD_SEMWAIT_SIGNAL ;423 AUE_NULL ALL { int nosys(void); } { old __semwait_signal_nocancel } ;#else 423 AUE_SEMWAITSIGNAL ALL { int __semwait_signal_nocancel(int cond_sem, int mutex_sem, int timeout, int relative, int64_t tv_sec, int32_t tv_nsec); } ;#endif -424 AUE_MAC_MOUNT ALL { int __mac_mount(char *type, char *path, int flags, caddr_t data, struct mac *mac_p); } +424 AUE_MAC_MOUNT ALL { int __mac_mount(char *type, char *path, int flags, caddr_t data, struct mac *mac_p); } #if CONFIG_MACF -425 AUE_MAC_GET_MOUNT ALL { int __mac_get_mount(char *path, struct mac *mac_p); } +425 AUE_MAC_GET_MOUNT ALL { int __mac_get_mount(char *path, struct mac *mac_p); } #else 425 AUE_MAC_GET_MOUNT ALL { int nosys(void); } #endif -426 AUE_MAC_GETFSSTAT ALL { int __mac_getfsstat(user_addr_t buf, int bufsize, user_addr_t mac, int macsize, int flags); } +426 AUE_MAC_GETFSSTAT ALL { int __mac_getfsstat(user_addr_t buf, int bufsize, user_addr_t mac, int macsize, int flags); } 427 AUE_FSGETPATH ALL { user_ssize_t fsgetpath(user_addr_t buf, size_t bufsize, user_addr_t fsid, uint64_t objid); } { private fsgetpath (File Manager SPI) } 428 AUE_NULL ALL { mach_port_name_t audit_session_self(void); } 429 AUE_NULL ALL { int audit_session_join(mach_port_name_t port); } -430 AUE_NULL ALL { int fileport_makeport(int fd, user_addr_t portnamep); } -431 AUE_NULL ALL { int fileport_makefd(mach_port_name_t port); } +430 AUE_NULL ALL { int sys_fileport_makeport(int fd, user_addr_t portnamep); } +431 AUE_NULL ALL { int sys_fileport_makefd(mach_port_name_t port); } 432 AUE_NULL ALL { int audit_session_port(au_asid_t asid, user_addr_t portnamep); } 433 AUE_NULL ALL { int pid_suspend(int pid); } 434 AUE_NULL ALL { int pid_resume(int pid); } @@ -681,7 +683,7 @@ 438 AUE_NULL ALL { int shared_region_map_and_slide_np(int fd, uint32_t count, const struct shared_file_mapping_np *mappings, uint32_t slide, uint64_t* slide_start, uint32_t slide_size) NO_SYSCALL_STUB; } 439 AUE_NULL ALL { int kas_info(int selector, void *value, size_t *size); } #if CONFIG_MEMORYSTATUS -440 AUE_NULL ALL { int memorystatus_control(uint32_t command, int32_t pid, uint32_t flags, user_addr_t buffer, size_t buffersize); } +440 AUE_NULL ALL { int memorystatus_control(uint32_t command, int32_t pid, uint32_t flags, user_addr_t buffer, size_t buffersize); } #else 440 AUE_NULL ALL { int nosys(void); } #endif @@ -695,14 +697,14 @@ 447 AUE_CONNECT ALL { int connectx(int socket, const sa_endpoints_t *endpoints, sae_associd_t associd, unsigned int flags, const struct iovec *iov, unsigned int iovcnt, size_t *len, sae_connid_t *connid); } 448 AUE_NULL ALL { int disconnectx(int s, sae_associd_t aid, sae_connid_t cid); } 449 AUE_NULL ALL { int peeloff(int s, sae_associd_t aid); } -450 AUE_SOCKET ALL { int socket_delegate(int domain, int type, int protocol, pid_t epid); } +450 AUE_SOCKET ALL { int socket_delegate(int domain, int type, int protocol, pid_t epid); } #else 447 AUE_NULL ALL { int nosys(void); } 448 AUE_NULL ALL { int nosys(void); } 449 AUE_NULL ALL { int nosys(void); } 450 AUE_NULL ALL { int nosys(void); } #endif /* SOCKETS */ -451 AUE_NULL ALL { int telemetry(uint64_t cmd, uint64_t deadline, uint64_t interval, uint64_t leeway, uint64_t arg4, uint64_t arg5) NO_SYSCALL_STUB; } +451 AUE_NULL ALL { int telemetry(uint64_t cmd, uint64_t deadline, uint64_t interval, uint64_t leeway, uint64_t arg4, uint64_t arg5) NO_SYSCALL_STUB; } #if CONFIG_PROC_UUID_POLICY 452 AUE_NULL ALL { int proc_uuid_policy(uint32_t operation, uuid_t uuid, size_t uuidlen, uint32_t flags); } #else @@ -744,8 +746,8 @@ 473 AUE_READLINKAT ALL { int readlinkat(int fd, user_addr_t path, user_addr_t buf, size_t bufsize); } 474 AUE_SYMLINKAT ALL { int symlinkat(user_addr_t *path1, int fd, user_addr_t path2); } 475 AUE_MKDIRAT ALL { int mkdirat(int fd, user_addr_t path, int mode); } -476 AUE_GETATTRLISTAT ALL { int getattrlistat(int fd, const char *path, struct attrlist *alist, void *attributeBuffer, size_t bufferSize, u_long options); } -477 AUE_NULL ALL { int proc_trace_log(pid_t pid, uint64_t uniqueid); } +476 AUE_GETATTRLISTAT ALL { int getattrlistat(int fd, const char *path, struct attrlist *alist, void *attributeBuffer, size_t bufferSize, u_long options); } +477 AUE_NULL ALL { int proc_trace_log(pid_t pid, uint64_t uniqueid); } 478 AUE_NULL ALL { int bsdthread_ctl(user_addr_t cmd, user_addr_t arg1, user_addr_t arg2, user_addr_t arg3) NO_SYSCALL_STUB; } 479 AUE_OPENBYID_RWT ALL { int openbyid_np(user_addr_t fsid, user_addr_t objid, int oflags); } #if SOCKETS @@ -767,7 +769,7 @@ 487 AUE_WRITEV ALL { user_ssize_t guarded_writev_np(int fd, const guardid_t *guard, struct iovec *iovp, int iovcnt); } 488 AUE_RENAMEAT ALL { int renameatx_np(int fromfd, char *from, int tofd, char *to, u_int flags) NO_SYSCALL_STUB; } #if CONFIG_CODE_DECRYPTION -489 AUE_MPROTECT ALL { int mremap_encrypted(caddr_t addr, size_t len, uint32_t cryptid, uint32_t cputype, uint32_t cpusubtype); } +489 AUE_MPROTECT ALL { int mremap_encrypted(caddr_t addr, size_t len, uint32_t cryptid, uint32_t cputype, uint32_t cpusubtype); } #else 489 AUE_NULL ALL { int enosys(void); } #endif @@ -793,9 +795,9 @@ 494 AUE_NULL ALL { int enosys(void); } #endif 495 AUE_NULL ALL { int enosys(void); } -496 AUE_NULL ALL { int enosys(void); } -497 AUE_NULL ALL { int enosys(void); } -498 AUE_NULL ALL { int enosys(void); } +496 AUE_NULL ALL { uint64_t mach_eventlink_signal(mach_port_name_t eventlink_port, uint64_t signal_count) NO_SYSCALL_STUB; } +497 AUE_NULL ALL { uint64_t mach_eventlink_wait_until(mach_port_name_t eventlink_port, uint64_t wait_count, uint64_t deadline, uint32_t clock_id, uint32_t option) NO_SYSCALL_STUB; } +498 AUE_NULL ALL { uint64_t mach_eventlink_signal_wait_until(mach_port_name_t eventlink_port, uint64_t wait_count, uint64_t signal_count, uint64_t deadline, uint32_t clock_id, uint32_t option) NO_SYSCALL_STUB; } 499 AUE_NULL ALL { int work_interval_ctl(uint32_t operation, uint64_t work_interval_id, void *arg, size_t len) NO_SYSCALL_STUB; } 500 AUE_NULL ALL { int getentropy(void *buffer, size_t size); } #if NECP @@ -850,3 +852,14 @@ #endif // CONFIG_COALITIONS 533 AUE_NULL ALL { int log_data(unsigned int tag, unsigned int flags, void *buffer, unsigned int size) NO_SYSCALL_STUB; } 534 AUE_NULL ALL { uint64_t memorystatus_available_memory(void) NO_SYSCALL_STUB; } +535 AUE_NULL ALL { int enosys(void); } +536 AUE_NULL ALL { int shared_region_map_and_slide_2_np(uint32_t files_count, const struct shared_file_np *files, uint32_t mappings_count, const struct shared_file_mapping_slide_np *mappings) NO_SYSCALL_STUB; } +537 AUE_NULL ALL { int pivot_root(const char *new_rootfs_path_before, const char *old_rootfs_path_after); } +538 AUE_TASKINSPECTFORPID ALL { int task_inspect_for_pid(mach_port_name_t target_tport, int pid, mach_port_name_t *t); } +539 AUE_TASKINSPECTFORPID ALL { int task_read_for_pid(mach_port_name_t target_tport, int pid, mach_port_name_t *t); } +540 AUE_PREADV ALL { user_ssize_t sys_preadv(int fd, struct iovec *iovp, int iovcnt, off_t offset); } +541 AUE_PWRITEV ALL { user_ssize_t sys_pwritev(int fd, struct iovec *iovp, int iovcnt, off_t offset); } +542 AUE_PREADV ALL { user_ssize_t sys_preadv_nocancel(int fd, struct iovec *iovp, int iovcnt, off_t offset) NO_SYSCALL_STUB; } +543 AUE_PWRITEV ALL { user_ssize_t sys_pwritev_nocancel(int fd, struct iovec *iovp, int iovcnt, off_t offset) NO_SYSCALL_STUB; } +544 AUE_NULL ALL { int ulock_wait2(uint32_t operation, void *addr, uint64_t value, uint64_t timeout, uint64_t value2) NO_SYSCALL_STUB; } +545 AUE_PROCINFO ALL { int proc_info_extended_id(int32_t callnum, int32_t pid, uint32_t flavor, uint32_t flags, uint64_t ext_id, uint64_t arg, user_addr_t buffer, int32_t buffersize) NO_SYSCALL_STUB; } diff --git a/bsd/kern/sysv_ipc.c b/bsd/kern/sysv_ipc.c index 926ce9f7e..ca8167a49 100644 --- a/bsd/kern/sysv_ipc.c +++ b/bsd/kern/sysv_ipc.c @@ -104,8 +104,8 @@ ipcperm(kauth_cred_t cred, struct ipc_perm *perm, int mode_req) int want_mod_controlinfo = (mode_req & IPC_M); int is_member; mode_t mode_owner = (perm->mode & S_IRWXU); - mode_t mode_group = (perm->mode & S_IRWXG) << 3; - mode_t mode_world = (perm->mode & S_IRWXO) << 6; + mode_t mode_group = (mode_t)((perm->mode & S_IRWXG) << 3); + mode_t mode_world = (mode_t)((perm->mode & S_IRWXO) << 6); /* Grant all rights to super user */ if (!suser(cred, (u_short *)NULL)) { diff --git a/bsd/kern/sysv_msg.c b/bsd/kern/sysv_msg.c index c7352a6ce..5769cf276 100644 --- a/bsd/kern/sysv_msg.c +++ b/bsd/kern/sysv_msg.c @@ -1518,7 +1518,7 @@ IPCS_msg_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, union { struct user32_IPCS_command u32; struct user_IPCS_command u64; - } ipcs; + } ipcs = { }; struct user32_msqid_ds msqid_ds32 = {}; /* post conversion, 32 bit version */ struct user64_msqid_ds msqid_ds64 = {}; /* post conversion, 64 bit version */ void *msqid_dsp; diff --git a/bsd/kern/sysv_sem.c b/bsd/kern/sysv_sem.c index b6cad0b1b..cf284cd82 100644 --- a/bsd/kern/sysv_sem.c +++ b/bsd/kern/sysv_sem.c @@ -1629,9 +1629,9 @@ IPCS_sem_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, union { struct user32_IPCS_command u32; struct user_IPCS_command u64; - } ipcs; - struct user32_semid_ds semid_ds32; /* post conversion, 32 bit version */ - struct user64_semid_ds semid_ds64; /* post conversion, 64 bit version */ + } ipcs = { }; + struct user32_semid_ds semid_ds32 = { }; /* post conversion, 32 bit version */ + struct user64_semid_ds semid_ds64 = { }; /* post conversion, 64 bit version */ void *semid_dsp; size_t ipcs_sz; size_t semid_ds_sz; diff --git a/bsd/kern/sysv_shm.c b/bsd/kern/sysv_shm.c index d31d1f57b..7e778e60d 100644 --- a/bsd/kern/sysv_shm.c +++ b/bsd/kern/sysv_shm.c @@ -280,7 +280,8 @@ shm_deallocate_segment(struct shmid_kernel *shmseg) FREE(shm_handle, M_SHM); } shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */ - size = mach_vm_round_page(shmseg->u.shm_segsz); + size = vm_map_round_page(shmseg->u.shm_segsz, + vm_map_page_mask(current_map())); shm_committed -= btoc(size); shm_nused--; shmseg->u.shm_perm.mode = SHMSEG_FREE; @@ -300,7 +301,8 @@ shm_delete_mapping(__unused struct proc *p, struct shmmap_state *shmmap_s, segnum = IPCID_TO_IX(shmmap_s->shmid); shmseg = &shmsegs[segnum]; - size = mach_vm_round_page(shmseg->u.shm_segsz); /* XXX done for us? */ + size = vm_map_round_page(shmseg->u.shm_segsz, + vm_map_page_mask(current_map())); /* XXX done for us? */ if (deallocate) { result = mach_vm_deallocate(current_map(), shmmap_s->va, size); if (result != KERN_SUCCESS) { @@ -383,6 +385,7 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) struct shmmap_state *shmmap_s = NULL; struct shm_handle *shm_handle; mach_vm_address_t attach_va; /* attach address in/out */ + mach_vm_address_t shmlba; mach_vm_size_t map_size; /* size of map entry */ mach_vm_size_t mapped_size; vm_prot_t prot; @@ -465,7 +468,8 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) goto shmat_out; } - map_size = mach_vm_round_page(shmseg->u.shm_segsz); + map_size = vm_map_round_page(shmseg->u.shm_segsz, + vm_map_page_mask(current_map())); prot = VM_PROT_READ; if ((uap->shmflg & SHM_RDONLY) == 0) { prot |= VM_PROT_WRITE; @@ -476,9 +480,10 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) } attach_va = (mach_vm_address_t)uap->shmaddr; + shmlba = vm_map_page_size(current_map()); /* XXX instead of SHMLBA */ if (uap->shmflg & SHM_RND) { - attach_va &= ~(SHMLBA - 1); - } else if ((attach_va & (SHMLBA - 1)) != 0) { + attach_va &= ~(shmlba - 1); + } else if ((attach_va & (shmlba - 1)) != 0) { shmat_ret = EINVAL; goto shmat_out; } @@ -515,10 +520,23 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal);/* tunnel */ shm_handle != NULL; shm_handle = shm_handle->shm_handle_next) { + vm_map_size_t chunk_size; + + assert(mapped_size < map_size); + chunk_size = shm_handle->shm_handle_size; + if (chunk_size > map_size - mapped_size) { + /* + * Partial mapping of last chunk due to + * page size mismatch. + */ + assert(vm_map_page_shift(current_map()) < PAGE_SHIFT); + assert(shm_handle->shm_handle_next == NULL); + chunk_size = map_size - mapped_size; + } rv = vm_map_enter_mem_object( current_map(), /* process map */ &attach_va, /* attach address */ - shm_handle->shm_handle_size, /* segment size */ + chunk_size, /* size to map */ (mach_vm_offset_t)0, /* alignment mask */ VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, VM_MAP_KERNEL_FLAGS_NONE, @@ -533,8 +551,8 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) goto out; } - mapped_size += shm_handle->shm_handle_size; - attach_va = attach_va + shm_handle->shm_handle_size; + mapped_size += chunk_size; + attach_va = attach_va + chunk_size; } shmmap_s->shmid = uap->shmid; @@ -1185,9 +1203,9 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, union { struct user32_IPCS_command u32; struct user_IPCS_command u64; - } ipcs; - struct user32_shmid_ds shmid_ds32 = {}; /* post conversion, 32 bit version */ - struct user_shmid_ds shmid_ds; /* 64 bit version */ + } ipcs = { }; + struct user32_shmid_ds shmid_ds32 = { }; /* post conversion, 32 bit version */ + struct user_shmid_ds shmid_ds = { }; /* 64 bit version */ void *shmid_dsp; size_t ipcs_sz = sizeof(struct user_IPCS_command); size_t shmid_ds_sz = sizeof(struct user_shmid_ds); diff --git a/bsd/kern/trace_codes b/bsd/kern/trace_codes index 929b0b88e..14858edc5 100644 --- a/bsd/kern/trace_codes +++ b/bsd/kern/trace_codes @@ -19,6 +19,127 @@ 0x1020048 KTrap_MachineCheck 0x102004c KTrap_SIMD_FP 0x10203fc KTrap_Preempt +0x1030000 Kernel_Uncategorized_Exc_ARM +0x1030004 Kernel_WF_Exc_ARM +0x103000c Kernel_MCR_MRC_CP15_Exc_ARM +0x1030010 Kernel_MCRR_MRRC_CP15_Exc_ARM +0x1030014 Kernel_MCR_MRC_CP14_Exc_ARM +0x1030018 Kernel_LDC_STC_CP14_Exc_ARM +0x1030020 Kernel_VMRS_Exc_ARM +0x103001c Kernel_SIMD_FP_Exc_ARM +0x1030024 Kernel_Ptrauth_Exc_ARM +0x1030030 Kernel_MCRR_MRRC_CP14_Exc_ARM +0x1030034 Kernel_BranchTarget_Exc_ARM +0x1030038 Kernel_Illegal_State_Exc_ARM +0x1030044 Kernel_SVC32_Exc_ARM +0x1030048 Kernel_HVC32_Exc_ARM +0x103004c Kernel_SMC32_Exc_ARM +0x1030054 Kernel_SVC64_Exc_ARM +0x1030058 Kernel_HVC64_Exc_ARM +0x103005c Kernel_SMC64_Exc_ARM +0x1030060 Kernel_MSR_Exc_ARM +0x1030064 Kernel_SVE_Exc_ARM +0x1030068 Kernel_ERET_Exc_ARM +0x1030070 Kernel_Ptrauth_Fail_Exc_ARM +0x1030080 Kernel_Instr_Abort_Lower_EL_Exc_ARM +0x1030084 Kernel_Instr_Abort_Same_EL_Exc_ARM +0x1030088 Kernel_PC_Align_Exc_ARM +0x1030090 Kernel_Data_Abort_Lower_EL_Exc_ARM +0x1030094 Kernel_Data_Abort_Same_EL_Exc_ARM +0x1030098 Kernel_SP_Align_Exc_ARM +0x10300a0 Kernel_FP32_Exc_ARM +0x10300b0 Kernel_FP64_Exc_ARM +0x10300c0 Kernel_Breakpoint_Lower_EL_Exc_ARM +0x10300c4 Kernel_Breakpoint_Same_EL_Exc_ARM +0x10300c8 Kernel_Step_Lower_EL_Exc_ARM +0x10300cc Kernel_Step_Same_EL_Exc_ARM +0x10300d0 Kernel_Watchpoint_Lower_EL_Exc_ARM +0x10300d4 Kernel_Watchpoint_Same_EL_Exc_ARM +0x10300e0 Kernel_Bkpt32_Exc_ARM +0x10300e8 Kernel_Vector_Catch_Exc_ARM +0x10300f0 Kernel_Brk64_Exc_ARM +0x10300fc Kernel_Private_Exc_ARM +0x1030400 User_Uncategorized_Exc_ARM +0x1030404 User_WF_Exc_ARM +0x103040c User_MCR_MRC_CP15_Exc_ARM +0x1030410 User_MCRR_MRRC_CP15_Exc_ARM +0x1030414 User_MCR_MRC_CP14_Exc_ARM +0x1030418 User_LDC_STC_CP14_Exc_ARM +0x1030420 User_VMRS_Exc_ARM +0x103041c User_SIMD_FP_Exc_ARM +0x1030424 User_Ptrauth_Exc_ARM +0x1030430 User_MCRR_MRRC_CP14_Exc_ARM +0x1030434 User_BranchTarget_Exc_ARM +0x1030438 User_Illegal_State_Exc_ARM +0x1030444 User_SVC32_Exc_ARM +0x1030448 User_HVC32_Exc_ARM +0x103044c User_SMC32_Exc_ARM +0x1030454 User_SVC64_Exc_ARM +0x1030458 User_HVC64_Exc_ARM +0x103045c User_SMC64_Exc_ARM +0x1030460 User_MSR_Exc_ARM +0x1030464 User_SVE_Exc_ARM +0x1030468 User_ERET_Exc_ARM +0x1030470 User_Ptrauth_Fail_Exc_ARM +0x1030480 User_Instr_Abort_Lower_EL_Exc_ARM +0x1030484 User_Instr_Abort_Same_EL_Exc_ARM +0x1030488 User_PC_Align_Exc_ARM +0x1030490 User_Data_Abort_Lower_EL_Exc_ARM +0x1030494 User_Data_Abort_Same_EL_Exc_ARM +0x1030498 User_SP_Align_Exc_ARM +0x10304a0 User_FP32_Exc_ARM +0x10304b0 User_FP64_Exc_ARM +0x10304c0 User_Breakpoint_Lower_EL_Exc_ARM +0x10304c4 User_Breakpoint_Same_EL_Exc_ARM +0x10304c8 User_Step_Lower_EL_Exc_ARM +0x10304cc User_Step_Same_EL_Exc_ARM +0x10304d0 User_Watchpoint_Lower_EL_Exc_ARM +0x10304d4 User_Watchpoint_Same_EL_Exc_ARM +0x10304e0 User_Bkpt32_Exc_ARM +0x10304e8 User_Vector_Catch_Exc_ARM +0x10304f0 User_Brk64_Exc_ARM +0x10304fc User_Private_Exc_ARM +0x1030800 Guest_Uncategorized_Exc_ARM +0x1030804 Guest_WF_Exc_ARM +0x103080c Guest_MCR_MRC_CP15_Exc_ARM +0x1030810 Guest_MCRR_MRRC_CP15_Exc_ARM +0x1030814 Guest_MCR_MRC_CP14_Exc_ARM +0x1030818 Guest_LDC_STC_CP14_Exc_ARM +0x1030820 Guest_VMRS_Exc_ARM +0x103081c Guest_SIMD_FP_Exc_ARM +0x1030824 Guest_Ptrauth_Exc_ARM +0x1030830 Guest_MCRR_MRRC_CP14_Exc_ARM +0x1030834 Guest_BranchTarget_Exc_ARM +0x1030838 Guest_Illegal_State_Exc_ARM +0x1030844 Guest_SVC32_Exc_ARM +0x1030848 Guest_HVC32_Exc_ARM +0x103084c Guest_SMC32_Exc_ARM +0x1030854 Guest_SVC64_Exc_ARM +0x1030858 Guest_HVC64_Exc_ARM +0x103085c Guest_SMC64_Exc_ARM +0x1030860 Guest_MSR_Exc_ARM +0x1030864 Guest_SVE_Exc_ARM +0x1030868 Guest_ERET_Exc_ARM +0x1030870 Guest_Ptrauth_Fail_Exc_ARM +0x1030880 Guest_Instr_Abort_Lower_EL_Exc_ARM +0x1030884 Guest_Instr_Abort_Same_EL_Exc_ARM +0x1030888 Guest_PC_Align_Exc_ARM +0x1030890 Guest_Data_Abort_Lower_EL_Exc_ARM +0x1030894 Guest_Data_Abort_Same_EL_Exc_ARM +0x1030898 Guest_SP_Align_Exc_ARM +0x10308a0 Guest_FP32_Exc_ARM +0x10308b0 Guest_FP64_Exc_ARM +0x10308c0 Guest_Breakpoint_Lower_EL_Exc_ARM +0x10308c4 Guest_Breakpoint_Same_EL_Exc_ARM +0x10308c8 Guest_Step_Lower_EL_Exc_ARM +0x10308cc Guest_Step_Same_EL_Exc_ARM +0x10308d0 Guest_Watchpoint_Lower_EL_Exc_ARM +0x10308d4 Guest_Watchpoint_Same_EL_Exc_ARM +0x10308e0 Guest_Bkpt32_Exc_ARM +0x10308e8 Guest_Vector_Catch_Exc_ARM +0x10308f0 Guest_Brk64_Exc_ARM +0x10308fc Guest_Private_Exc_ARM +0x1040000 SError_ARM 0x1050000 INTERRUPT 0x1070000 UTrap_DivideError 0x1070004 UTrap_Debug @@ -58,19 +179,19 @@ 0x109003C TMR_Rescan 0x1090040 TMR_set_apic_deadline 0x10c0000 MACH_SysCall -0x10c0004 MSC_kern_invalid_#1 -0x10c0008 MSC_kern_invalid_#2 -0x10c000c MSC_kern_invalid_#3 -0x10c0010 MSC_kern_invalid_#4 -0x10c0014 MSC_kern_invalid_#5 -0x10c0018 MSC_kern_invalid_#6 -0x10c001c MSC_kern_invalid_#7 -0x10c0020 MSC_kern_invalid_#8 -0x10c0024 MSC_kern_invalid_#9 +0x10c0004 MSC_kern_invalid_1 +0x10c0008 MSC_kern_invalid_2 +0x10c000c MSC_kern_invalid_3 +0x10c0010 MSC_kern_invalid_4 +0x10c0014 MSC_kern_invalid_5 +0x10c0018 MSC_kern_invalid_6 +0x10c001c MSC_kern_invalid_7 +0x10c0020 MSC_kern_invalid_8 +0x10c0024 MSC_kern_invalid_9 0x10c0028 MSC_mach_vm_allocate_trap -0x10c002c MSC_kern_invalid_#11 +0x10c002c MSC_kern_mach_vm_purgable_control_trap 0x10c0030 MSC_mach_vm_deallocate_trap -0x10c0034 MSC_kern_invalid_#13 +0x10c0034 MSC_kern_invalid_13 0x10c0038 MSC_mach_vm_protect_trap 0x10c003c MSC_mach_vm_map_trap 0x10c0040 MSC_mach_port_allocate_trap @@ -87,7 +208,7 @@ 0x10c006c MSC_thread_self_trap 0x10c0070 MSC_task_self_trap 0x10c0074 MSC_host_self_trap -0x10c0078 MSC_kern_invalid_#30 +0x10c0078 MSC_kern_invalid_30 0x10c007c MSC_mach_msg_trap 0x10c0080 MSC_mach_msg_overwrite_trap 0x10c0084 MSC_semaphore_signal_trap @@ -104,48 +225,48 @@ 0x10c00b0 MSC_task_name_for_pid 0x10c00b4 MSC_task_for_pid 0x10c00b8 MSC_pid_for_task -0x10c00bc MSC_kern_invalid_#47 +0x10c00bc MSC_kern_invalid_47 0x10c00c0 MSC_macx_swapon 0x10c00c4 MSC_macx_swapoff 0x10c00c8 MSC_thread_get_special_reply_port 0x10c00cc MSC_macx_triggers 0x10c00d0 MSC_macx_backing_store_suspend 0x10c00d4 MSC_macx_backing_store_recovery -0x10c00d8 MSC_kern_invalid_#54 -0x10c00dc MSC_kern_invalid_#55 -0x10c00e0 MSC_kern_invalid_#56 -0x10c00e4 MSC_kern_invalid_#57 +0x10c00d8 MSC_kern_invalid_54 +0x10c00dc MSC_kern_invalid_55 +0x10c00e0 MSC_kern_invalid_56 +0x10c00e4 MSC_kern_invalid_57 0x10c00e8 MSC_pfz_exit 0x10c00ec MSC_swtch_pri 0x10c00f0 MSC_swtch 0x10c00f4 MSC_thread_switch 0x10c00f8 MSC_clock_sleep_trap -0x10c00fc MSC_kern_invalid_#63 -0x10c0100 MSC_kern_invalid_#64 -0x10c0104 MSC_kern_invalid_#65 -0x10c0108 MSC_kern_invalid_#66 -0x10c010c MSC_kern_invalid_#67 -0x10c0110 MSC_kern_invalid_#68 -0x10c0114 MSC_kern_invalid_#69 +0x10c00fc MSC_kern_invalid_63 +0x10c0100 MSC_kern_invalid_64 +0x10c0104 MSC_kern_invalid_65 +0x10c0108 MSC_kern_invalid_66 +0x10c010c MSC_kern_invalid_67 +0x10c0110 MSC_kern_invalid_68 +0x10c0114 MSC_kern_invalid_69 0x10c0118 MSC_host_create_mach_voucher_trap -0x10c011c MSC_kern_invalid_#71 +0x10c011c MSC_kern_invalid_71 0x10c0120 MSC_mach_voucher_extract_attr_recipe_trap -0x10c0124 MSC_kern_invalid_#73 -0x10c0128 MSC_kern_invalid_#74 -0x10c012c MSC_kern_invalid_#75 +0x10c0124 MSC_kern_invalid_73 +0x10c0128 MSC_kern_invalid_74 +0x10c012c MSC_kern_invalid_75 0x10c0130 MSC_mach_port_type_trap 0x10c0134 MSC_mach_port_request_notification_trap -0x10c0138 MSC_kern_invalid_#78 -0x10c013c MSC_kern_invalid_#79 -0x10c0140 MSC_kern_invalid_#80 -0x10c0144 MSC_kern_invalid_#81 -0x10c0148 MSC_kern_invalid_#82 -0x10c014c MSC_kern_invalid_#83 -0x10c0150 MSC_kern_invalid_#84 -0x10c0154 MSC_kern_invalid_#85 -0x10c0158 MSC_kern_invalid_#86 -0x10c015c MSC_kern_invalid_#87 -0x10c0160 MSC_kern_invalid_#88 +0x10c0138 MSC_kern_invalid_78 +0x10c013c MSC_kern_invalid_79 +0x10c0140 MSC_kern_invalid_80 +0x10c0144 MSC_kern_invalid_81 +0x10c0148 MSC_kern_invalid_82 +0x10c014c MSC_kern_invalid_83 +0x10c0150 MSC_kern_invalid_84 +0x10c0154 MSC_kern_invalid_85 +0x10c0158 MSC_kern_invalid_86 +0x10c015c MSC_kern_invalid_87 +0x10c0160 MSC_kern_invalid_88 0x10c0164 MSC_mach_timebase_info 0x10c0168 MSC_mach_wait_until 0x10c016c MSC_mk_timer_create @@ -153,38 +274,38 @@ 0x10c0174 MSC_mk_timer_arm 0x10c0178 MSC_mk_timer_cancel 0x10c017c MSC_mk_timer_arm_leeway -0x10c0180 MSC_kern_invalid_#96 -0x10c0184 MSC_kern_invalid_#97 -0x10c0188 MSC_kern_invalid_#98 -0x10c018c MSC_kern_invalid_#99 +0x10c0180 MSC_debug_control_port_for_pid +0x10c0184 MSC_kern_invalid_97 +0x10c0188 MSC_kern_invalid_98 +0x10c018c MSC_kern_invalid_99 0x10c0190 MSC_iokit_user_client -0x10c0194 MSC_kern_invalid_#101 -0x10c0198 MSC_kern_invalid_#102 -0x10c019c MSC_kern_invalid_#103 -0x10c01a0 MSC_kern_invalid_#104 -0x10c01a4 MSC_kern_invalid_#105 -0x10c01a8 MSC_kern_invalid_#106 -0x10c01ac MSC_kern_invalid_#107 -0x10c01b0 MSC_kern_invalid_#108 -0x10c01b4 MSC_kern_invalid_#109 -0x10c01b8 MSC_kern_invalid_#110 -0x10c01bc MSC_kern_invalid_#111 -0x10c01c0 MSC_kern_invalid_#112 -0x10c01c4 MSC_kern_invalid_#113 -0x10c01c8 MSC_kern_invalid_#114 -0x10c01cc MSC_kern_invalid_#115 -0x10c01d0 MSC_kern_invalid_#116 -0x10c01d4 MSC_kern_invalid_#117 -0x10c01d8 MSC_kern_invalid_#118 -0x10c01dc MSC_kern_invalid_#119 -0x10c01e0 MSC_kern_invalid_#120 -0x10c01e4 MSC_kern_invalid_#121 -0x10c01e8 MSC_kern_invalid_#122 -0x10c01ec MSC_kern_invalid_#123 -0x10c01f0 MSC_kern_invalid_#124 -0x10c01f4 MSC_kern_invalid_#125 -0x10c01f8 MSC_kern_invalid_#126 -0x10c01fc MSC_kern_invalid_#127 +0x10c0194 MSC_kern_invalid_101 +0x10c0198 MSC_kern_invalid_102 +0x10c019c MSC_kern_invalid_103 +0x10c01a0 MSC_kern_invalid_104 +0x10c01a4 MSC_kern_invalid_105 +0x10c01a8 MSC_kern_invalid_106 +0x10c01ac MSC_kern_invalid_107 +0x10c01b0 MSC_kern_invalid_108 +0x10c01b4 MSC_kern_invalid_109 +0x10c01b8 MSC_kern_invalid_110 +0x10c01bc MSC_kern_invalid_111 +0x10c01c0 MSC_kern_invalid_112 +0x10c01c4 MSC_kern_invalid_113 +0x10c01c8 MSC_kern_invalid_114 +0x10c01cc MSC_kern_invalid_115 +0x10c01d0 MSC_kern_invalid_116 +0x10c01d4 MSC_kern_invalid_117 +0x10c01d8 MSC_kern_invalid_118 +0x10c01dc MSC_kern_invalid_119 +0x10c01e0 MSC_kern_invalid_120 +0x10c01e4 MSC_kern_invalid_121 +0x10c01e8 MSC_kern_invalid_122 +0x10c01ec MSC_kern_invalid_123 +0x10c01f0 MSC_kern_invalid_124 +0x10c01f4 MSC_kern_invalid_125 +0x10c01f8 MSC_kern_invalid_126 +0x10c01fc MSC_kern_invalid_127 0x1200000 MACH_task_suspend 0x1200004 MACH_task_resume 0x1200008 MACH_thread_set_voucher @@ -256,6 +377,7 @@ 0x13004d4 MACH_vm_kern_request 0x1300500 MACH_vm_data_write 0x1300504 vm_pressure_level_change +0x1300508 MACH_vm_phys_write_acct 0x1320000 vm_disconnect_all_page_mappings 0x1320004 vm_disconnect_task_page_mappings 0x1320008 RealFaultAddressInternal @@ -321,7 +443,9 @@ 0x14000E0 MACH_QUIESCENT_COUNTER 0x14000E4 MACH_TURNSTILE_USER_CHANGE 0x14000E8 MACH_AMP_RECOMMENDATION_CHANGE +0x14000EC MACH_AMP_PERFCTL_POLICY_CHANGE 0x1400100 MACH_TURNSTILE_KERNEL_CHANGE +0x1400140 MACH_PSET_AVG_EXEC_TIME 0x1500000 MACH_MSGID_INVALID 0x1600000 MTX_SLEEP 0x1600004 MTX_SLEEP_DEADLINE @@ -370,6 +494,11 @@ 0x1700040 PMAP_flush_EPT 0x1700044 PMAP_fast_fault 0x1700048 PMAP_switch +0x170004c PMAP_tte +0x1700050 PMAP_switch_user_ttb +0x1700054 PMAP_update_caching +0x1700058 PMAP_attribute_clear_range +0x170005c PMAP_clear_user_ttb 0x1800000 MACH_CLOCK_EPOCH_CHANGE 0x1800004 MACH_CLOCK_BRIDGE_RCV_TS 0x1800008 MACH_CLOCK_BRIDGE_REMOTE_TIME @@ -409,6 +538,7 @@ 0x1a6000c THREAD_GROUP_NAME 0x1a60010 THREAD_GROUP_NAME_FREE 0x1a60014 THREAD_GROUP_FLAGS +0x1a60018 THREAD_GROUP_BLOCK 0x1a70000 COALITION_NEW 0x1a70004 COALITION_FREE 0x1a70008 COALITION_ADOPT @@ -893,6 +1023,28 @@ 0x30A00F8 SMB_smbfs_get_max_access 0x30A00FC SMB_smbfs_lookup 0x30A0100 SMB_smbfs_notify +0x30A0104 SMB_get_attrlist_bulk +0x30A0108 SMB_smbfs_fetch_new_entries +0x30A010C SMB_smbfs_handle_lease_break +0x30A0110 SMB_smb_rq_sign +0x30A0114 SMB_smb_rq_verify +0x30A0118 SMB_smb_rq_encrypt +0x30A011C SMB_smb_rq_decrypt +0x30A0120 SMB_smb_dir_cache_check +0x30A0124 SMB_smb_dir_cache_remove +0x30A0128 SMB_global_dir_cache_cnt +0x30A012C SMB_global_dir_low_memory +0x30A0130 SMB_iod_muxcnt +0x30A0134 SMB_iod_reconnect +0x30A0138 SMB_smb_echo +0x30A013C SMB_rq_reply_time +0x30A0140 SMB_curr_credits +0x30A0144 SMB_max_credits +0x30A0148 SMB_smb_rw_thread +0x30A014C SMB_read_quantum_size +0x30A0150 SMB_write_quantum_size +0x30A0154 SMB_read_bytes_per_sec +0x30A0158 SMB_write_bytes_per_sec 0x30B0000 VFS_MountRoot 0x3110004 OpenThrottleWindow 0x3110008 CauseIOThrottle @@ -995,6 +1147,7 @@ 0x3130168 VFS_devfs_label_associate_directory 0x313016C VFS_label_associate_fdesc 0x3130170 VFS_mount_check_snapshot_mount +0x3130174 VFS_check_supplemental_signature 0x3CF0000 CP_OFFSET_IO 0x4010004 proc_exit 0x4010008 force_exit @@ -1125,6 +1278,12 @@ 0x50700f4 PM_PCIDevChangeDone 0x50700f8 PM_SleepWakeMessage 0x50700fc PM_DriverPSChangeDelay +0x5060004 IOMDESC_WIRE +0x5060008 IOMDESC_PREPARE +0x506000c IOMDESC_MAP +0x5060010 IOMDESC_UNMAP +0x5060014 IOMDESC_DMA_MAP +0x5060018 IOMDESC_DMAP_UNMAP 0x5080004 IOSERVICE_BUSY 0x5080008 IOSERVICE_NONBUSY 0x508000c IOSERVICE_MODULESTALL @@ -1317,6 +1476,7 @@ 0x53102A0 CPUPM_PST_LOAD_CAPTURE 0x53102A4 CPUPM_PSTATE_HWP_MODE 0x53102A8 CPUPM_PST_WR_REASON +0x5310300 CPUPM_IDLE_WFE 0x5330000 HIBERNATE 0x5330004 HIBERNATE_WRITE_IMAGE 0x5330008 HIBERNATE_MACHINE_INIT @@ -1453,6 +1613,24 @@ 0x11004100 NC_lock_shared 0x11004104 NC_lock_exclusive 0x11004108 NC_unlock +0x01a90000 SCHED_CLUTCH_ROOT_BUCKET_STATE +0x01a90004 SCHED_CLUTCH_TG_BUCKET_STATE +0x01a90008 SCHED_CLUTCH_CPU_THREAD_SELECT +0x01a9000c SCHED_CLUTCH_THREAD_STATE +0x01a90010 SCHED_CLUTCH_TG_BUCKET_PRI +0x01a90014 MACH_SCHED_EDGE_CLUSTER_OVERLOAD +0x01a90018 MACH_SCHED_EDGE_STEAL +0x01a9001c MACH_SCHED_EDGE_REBAL_RUNNABLE +0x01a90020 MACH_SCHED_EDGE_REBAL_RUNNING +0x01a90024 MACH_SCHED_EDGE_SHOULD_YIELD +0x01a90028 MACH_SCHED_CLUTCH_THR_COUNT +0x01a9002c MACH_SCHED_EDGE_LOAD_AVG +0x01ab0000 WORKGROUP_INTERVAL_CREATE +0x01ab0004 WORKGROUP_INTERVAL_DESTROY +0x01ab0008 WORKGROUP_INTERVAL_CHANGE +0x01ab000c WORKGROUP_INTERVAL_START +0x01ab0010 WORKGROUP_INTERVAL_UPDATE +0x01ab0014 WORKGROUP_INTERVAL_FINISH 0x1e000000 SEC_ENTROPY_READ0 0x1e000004 SEC_ENTROPY_READ1 0x1e000008 SEC_ENTROPY_READ2 @@ -1719,7 +1897,6 @@ 0x26240020 imp_thread_int_passive_io 0x26240024 imp_thread_ext_passive_io 0x26270018 imp_task_dbg_iopol -0x26280018 imp_task_tal 0x26290018 imp_task_boost 0x262a0018 imp_task_role 0x262b0018 imp_task_suppressed_cpu diff --git a/bsd/kern/tty.c b/bsd/kern/tty.c index 814898803..fbb861b00 100644 --- a/bsd/kern/tty.c +++ b/bsd/kern/tty.c @@ -138,6 +138,9 @@ static void ttydeallocate(struct tty *tp); static int isctty(proc_t p, struct tty *tp); static int isctty_sp(proc_t p, struct tty *tp, struct session *sessp); +__private_extern__ void termios32to64(struct termios32 *in, struct user_termios *out); +__private_extern__ void termios64to32(struct user_termios *in, struct termios32 *out); + /* * Table with character classes and parity. The 8th bit indicates parity, * the 7th bit indicates the character is an alphameric or underscore (for @@ -226,7 +229,7 @@ static u_char const char_type[] = { #define I_HIGH_WATER (TTYHOG - 2 * 256) /* XXX */ #define I_LOW_WATER ((TTYHOG - 2 * 256) * 7 / 8) /* XXX */ -static void +__private_extern__ void termios32to64(struct termios32 *in, struct user_termios *out) { out->c_iflag = (user_tcflag_t)in->c_iflag; @@ -241,19 +244,19 @@ termios32to64(struct termios32 *in, struct user_termios *out) out->c_ospeed = (user_speed_t)in->c_ospeed; } -static void +__private_extern__ void termios64to32(struct user_termios *in, struct termios32 *out) { - out->c_iflag = (tcflag_t)in->c_iflag; - out->c_oflag = (tcflag_t)in->c_oflag; - out->c_cflag = (tcflag_t)in->c_cflag; - out->c_lflag = (tcflag_t)in->c_lflag; + out->c_iflag = (uint32_t)in->c_iflag; + out->c_oflag = (uint32_t)in->c_oflag; + out->c_cflag = (uint32_t)in->c_cflag; + out->c_lflag = (uint32_t)in->c_lflag; /* bcopy is OK, since this type is ILP32/LP64 size invariant */ bcopy(in->c_cc, out->c_cc, sizeof(in->c_cc)); - out->c_ispeed = (speed_t)in->c_ispeed; - out->c_ospeed = (speed_t)in->c_ospeed; + out->c_ispeed = (uint32_t)MIN(in->c_ispeed, UINT32_MAX); + out->c_ospeed = (uint32_t)MIN(in->c_ospeed, UINT32_MAX); } @@ -934,44 +937,6 @@ ttyoutput(int c, struct tty *tp) return -1; } -/* - * Sets the tty state to not allow any more changes of foreground process - * group. This is required to be done so that a subsequent revoke on a vnode - * is able to always successfully complete. - * - * Locks : Assumes tty_lock held on entry - */ -void -ttysetpgrphup(struct tty *tp) -{ - TTY_LOCK_OWNED(tp); /* debug assert */ - SET(tp->t_state, TS_PGRPHUP); - /* - * Also wake up sleeping readers which may or may not belong to the - * current foreground process group. - * - * This forces any non-fg readers (which entered read when - * that process group was in the fg) to return with EIO (if they're - * catching SIGTTIN or with SIGTTIN). The ones which do belong to the fg - * process group will promptly go back to sleep and get a SIGHUP shortly - * This would normally happen as part of the close in revoke but if - * there is a sleeping reader from a non-fg process group we never get - * to the close because the sleeping reader holds an iocount on the - * vnode of the terminal which is going to get revoked->reclaimed. - */ - wakeup(TSA_HUP_OR_INPUT(tp)); -} - -/* - * Locks : Assumes tty lock held on entry - */ -void -ttyclrpgrphup(struct tty *tp) -{ - TTY_LOCK_OWNED(tp); /* debug assert */ - CLR(tp->t_state, TS_PGRPHUP); -} - /* * ttioctl * @@ -1163,9 +1128,8 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) case TIOCSCONS: { /* Set current console device to this line */ data = (caddr_t) &bogusData; - - /* No break - Fall through to BSD code */ } + OS_FALLTHROUGH; case TIOCCONS: { /* become virtual console */ if (*(int *)data) { if (constty && constty != tp && @@ -1510,19 +1474,7 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) error = EPERM; goto out; } - /* - * The session leader is going away and is possibly going to revoke - * the terminal, we can't change the process group when that is the - * case. - */ - if (ISSET(tp->t_state, TS_PGRPHUP)) { - if (sessp != SESSION_NULL) { - session_rele(sessp); - } - pg_rele(pgrp); - error = EPERM; - goto out; - } + proc_list_lock(); oldpg = tp->t_pgrp; tp->t_pgrp = pgrp; @@ -1534,7 +1486,7 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) * process group. * * ttwakeup() isn't called because the readers aren't getting - * woken up becuse there is something to read but to force + * woken up because there is something to read but to force * the re-evaluation of their foreground process group status. * * Ordinarily leaving these readers waiting wouldn't be an issue @@ -1582,10 +1534,17 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) *(int *)data = tp->t_timeout / hz; break; case TIOCREVOKE: - if (ISSET(tp->t_state, TS_PGRPHUP)) { - tp->t_gen++; - wakeup(TSA_HUP_OR_INPUT(tp)); - } + SET(tp->t_state, TS_REVOKE); + tp->t_gen++; + /* + * At this time, only this wait channel is woken up as only + * ttread has been problematic. It is possible we may need + * to add wake up other tty wait addresses as well. + */ + wakeup(TSA_HUP_OR_INPUT(tp)); + break; + case TIOCREVOKECLEAR: + CLR(tp->t_state, TS_REVOKE); break; default: error = ttcompat(tp, cmd, data, flag, p); @@ -2088,9 +2047,10 @@ loop: } /* - * Signal the process if it's in the background. + * Signal the process if it's in the background. If the terminal is + * getting revoked, everybody is in the background. */ - if (isbackground(p, tp)) { + if (isbackground(p, tp) || ISSET(tp->t_state, TS_REVOKE)) { if ((p->p_sigignore & sigmask(SIGTTIN)) || (ut->uu_sigmask & sigmask(SIGTTIN)) || p->p_lflag & P_LPPWAIT) { @@ -2185,20 +2145,13 @@ loop: goto read; } microuptime(&timecopy); - if (!has_etime) { - /* first character, start timer */ + if (!has_etime || qp->c_cc > last_cc) { + /* first character or got a character, start timer */ has_etime = 1; etime.tv_sec = t / 1000000; - etime.tv_usec = (t - (etime.tv_sec * 1000000)); - timeradd(&etime, &timecopy, &etime); - - slp = t; - } else if (qp->c_cc > last_cc) { - /* got a character, restart timer */ - - etime.tv_sec = t / 1000000; - etime.tv_usec = (t - (etime.tv_sec * 1000000)); + etime.tv_usec = + (__darwin_suseconds_t)(t - (etime.tv_sec * 1000000)); timeradd(&etime, &timecopy, &etime); slp = t; @@ -2220,7 +2173,8 @@ loop: has_etime = 1; etime.tv_sec = t / 1000000; - etime.tv_usec = (t - (etime.tv_sec * 1000000)); + etime.tv_usec = + (__darwin_suseconds_t)(t - (etime.tv_sec * 1000000)); timeradd(&etime, &timecopy, &etime); slp = t; @@ -2279,8 +2233,13 @@ read: for (;;) { char ibuf[IBUFSIZ]; int icc; + ssize_t size = uio_resid(uio); + if (size < 0) { + error = ERANGE; + break; + } - icc = MIN(uio_resid(uio), IBUFSIZ); + icc = (int)MIN(size, IBUFSIZ); icc = q_to_b(qp, (u_char *)ibuf, icc); if (icc <= 0) { if (first) { @@ -2515,7 +2474,12 @@ loop: * leftover from last time. */ if (cc == 0) { - cc = MIN(uio_resid(uio), OBUFSIZ); + ssize_t size = uio_resid(uio); + if (size < 0) { + error = ERANGE; + break; + } + cc = (int)MIN((size_t)size, OBUFSIZ); cp = obuf; error = uiomove(cp, cc, uio); if (error) { @@ -2536,8 +2500,8 @@ loop: if (!ISSET(tp->t_oflag, OPOST)) { ce = cc; } else { - ce = cc - scanc((u_int)cc, (u_char *)cp, - char_type, CCLASSMASK); + ce = (int)((size_t)cc - scanc((size_t)cc, + (u_char *)cp, char_type, CCLASSMASK)); /* * If ce is zero, then we're processing * a special character through ttyoutput. @@ -2911,7 +2875,7 @@ ttspeedtab(int speed, struct speedtab *table) void ttsetwater(struct tty *tp) { - int cps; + speed_t cps; unsigned int x; TTY_LOCK_OWNED(tp); /* debug assert */ @@ -2919,7 +2883,9 @@ ttsetwater(struct tty *tp) #define CLAMP(x, h, l) ((x) > h ? h : ((x) < l) ? l : (x)) cps = tp->t_ospeed / 10; - tp->t_lowat = x = CLAMP(cps / 2, TTMAXLOWAT, TTMINLOWAT); + static_assert(TTMAXLOWAT <= UINT_MAX, "max low water fits in unsigned int"); + static_assert(TTMINLOWAT <= UINT_MAX, "min low water fits in unsigned int"); + tp->t_lowat = x = (unsigned int)CLAMP(cps / 2, TTMAXLOWAT, TTMINLOWAT); x += cps; x = CLAMP(x, TTMAXHIWAT, TTMINHIWAT); tp->t_hiwat = roundup(x, CBSIZE); @@ -3202,6 +3168,10 @@ ttysleep(struct tty *tp, void *chan, int pri, const char *wmesg, int timo) TTY_LOCK_OWNED(tp); + if (tp->t_state & TS_REVOKE) { + return ERESTART; + } + gen = tp->t_gen; /* Use of msleep0() avoids conversion timo/timespec/timo */ error = msleep0(chan, &tp->t_lock, pri, wmesg, timo, (int (*)(int))0); @@ -3473,7 +3443,7 @@ tty_set_knote_hook(struct knote *kn) uth = get_bsdthread_info(current_thread()); ctx = vfs_context_current(); - vp = (vnode_t)kn->kn_fp->f_fglob->fg_data; + vp = (vnode_t)kn->kn_fp->fp_glob->fg_data; /* * Reserve a link element to avoid potential allocation under diff --git a/bsd/kern/tty_compat.c b/bsd/kern/tty_compat.c index ac452a3d9..89ab2f7d5 100644 --- a/bsd/kern/tty_compat.c +++ b/bsd/kern/tty_compat.c @@ -88,7 +88,7 @@ static int ttcompatgetflags(struct tty *tp); static void ttcompatsetflags(struct tty *tp, struct termios *t); static void ttcompatsetlflags(struct tty *tp, struct termios *t); -static int ttcompatspeedtab(int speed, struct speedtab *table); +static unsigned int ttcompatspeedtab(speed_t speed, struct speedtab *table); /* * These two tables encode baud rate to speed code and speed code to @@ -98,7 +98,7 @@ static int ttcompatspeedtab(int speed, struct speedtab *table); * name space. */ static struct speedtab compatspeeds[] = { -#define MAX_SPEED 17 +#define MAX_SPEED 17 { .sp_speed = 115200, .sp_code = 17 }, { .sp_speed = 57600, .sp_code = 16 }, { .sp_speed = 38400, .sp_code = 15 }, @@ -127,34 +127,34 @@ static int compatspcodes[] = { /* * ttcompatspeedtab * - * Description: Given a baud rate value as an integer, and a speed table, - * convert the baud rate to a speed code, according to the + * Description: Given a baud rate value as a speed_t, and a speed table, + * convert the baud rate to a speed code integer, according to the * contents of the table. This effectively changes termios.h * baud rate values into ttydev.h baud rate codes. * - * Parameters: int speed Baud rate, as an integer - * struct speedtab *table Baud rate table to speed code table + * Parameters: speed_t speed Baud rate + * struct speedtab *table Baud rate table to speed code table * - * Returns: 1 B50 speed code; returned if we can - * not find an answer in the table. - * 0 If a 0 was requested in order to - * trigger a hangup (250ms of line - * silence, per Bell 103C standard). - * * A speed code matching the requested - * baud rate (potentially rounded down, - * if there is no exact match). + * Returns: 1 B50 speed code; returned if we can + * not find an answer in the table. + * 0 If a 0 was requested in order to + * trigger a hangup (250ms of line + * silence, per Bell 103C standard). + * [2, MAX_SPEED] A speed code matching the requested + * baud rate (potentially rounded down, + * if there is no exact match). * * Notes: This function is used for TIOCGETP, TIOCSETP, and TIOCSETN. */ -static int -ttcompatspeedtab(int speed, struct speedtab *table) +static unsigned int +ttcompatspeedtab(speed_t speed, struct speedtab *table) { if (speed == 0) { return 0; /* hangup */ } for (; table->sp_speed > 0; table++) { if (table->sp_speed <= speed) { /* nearest one, rounded down */ - return table->sp_code; + return (unsigned int)table->sp_code; } } return 1; /* 50, min and not hangup */ @@ -205,7 +205,7 @@ ttcompatspeedtab(int speed, struct speedtab *table) * real thing. A subsequent call to ttioctl_locked() in * ttcompat(), however, may result in subsequent changes. * - * WARNING: This compatibility code is not 6/432 clean; it will only + * WARNING: This compatibility code is not 64/32 clean; it will only * work for 32 bit processes on 32 bit kernels or 64 bit * processes on 64 bit kernels. We are not addressing this * due to . @@ -227,23 +227,42 @@ ttsetcompat(struct tty *tp, u_long *com, caddr_t data, struct termios *term) * pending input is not discarded. */ { - struct sgttyb *sg = (struct sgttyb *)data; - int speed; - - if ((speed = sg->sg_ispeed) > MAX_SPEED || speed < 0) { + __IGNORE_WCASTALIGN(struct sgttyb *sg = (struct sgttyb *)data); + if (sg->sg_ispeed < 0) { + return EINVAL; + } + unsigned int ispeed = (unsigned int)sg->sg_ispeed; + if (ispeed > MAX_SPEED) { return EINVAL; - } else if (speed != ttcompatspeedtab(tp->t_ispeed, compatspeeds)) { - term->c_ispeed = compatspcodes[speed]; + } + if (ispeed != ttcompatspeedtab(tp->t_ispeed, compatspeeds)) { + term->c_ispeed = compatspcodes[ispeed]; } else { term->c_ispeed = tp->t_ispeed; } - if ((speed = sg->sg_ospeed) > MAX_SPEED || speed < 0) { + + /* + * Can't error out at the beginning due to potential for + * backwards-incompatibility. For instance: + * + * struct sgttyb sg; // uninitialized + * sg.sg_ispeed = SOME_VALID_VALUE; + * + * Should still set the input speed. + */ + if (sg->sg_ospeed < 0) { + return EINVAL; + } + unsigned int ospeed = (unsigned int)sg->sg_ospeed; + if (ospeed > MAX_SPEED) { return EINVAL; - } else if (speed != ttcompatspeedtab(tp->t_ospeed, compatspeeds)) { - term->c_ospeed = compatspcodes[speed]; + } + if (ospeed != ttcompatspeedtab(tp->t_ospeed, compatspeeds)) { + term->c_ospeed = compatspcodes[ospeed]; } else { term->c_ospeed = tp->t_ospeed; } + term->c_cc[VERASE] = sg->sg_erase; term->c_cc[VKILL] = sg->sg_kill; tp->t_flags = (tp->t_flags & 0xffff0000) | (sg->sg_flags & 0xffff); @@ -257,7 +276,7 @@ ttsetcompat(struct tty *tp, u_long *com, caddr_t data, struct termios *term) * the struct tchars that 'data' points to. */ { - struct tchars *tc = (struct tchars *)data; + __IGNORE_WCASTALIGN(struct tchars *tc = (struct tchars *)data); cc_t *cc; cc = term->c_cc; @@ -279,7 +298,7 @@ ttsetcompat(struct tty *tp, u_long *com, caddr_t data, struct termios *term) * the struct ltchars that 'data' points to. */ { - struct ltchars *ltc = (struct ltchars *)data; + __IGNORE_WCASTALIGN(struct ltchars *ltc = (struct ltchars *)data); cc_t *cc; cc = term->c_cc; @@ -310,21 +329,24 @@ ttsetcompat(struct tty *tp, u_long *com, caddr_t data, struct termios *term) * bits that correspond to the 16 bit value pointed to by * 'data'. */ + { + __IGNORE_WCASTALIGN(int set = *(int *)data); if (*com == TIOCLSET) { - tp->t_flags = (tp->t_flags & 0xffff) | *(int *)data << 16; + tp->t_flags = (tp->t_flags & 0xffff) | set << 16; } else { tp->t_flags = (ttcompatgetflags(tp) & 0xffff0000) | (tp->t_flags & 0xffff); if (*com == TIOCLBIS) { - tp->t_flags |= *(int *)data << 16; + tp->t_flags |= set << 16; } else { - tp->t_flags &= ~(*(int *)data << 16); + tp->t_flags &= ~(set << 16); } } ttcompatsetlflags(tp, term); *com = TIOCSETA; break; } + } return 0; } @@ -395,18 +417,20 @@ ttcompat(struct tty *tp, u_long com, caddr_t data, int flag, struct proc *p) * flags, into the structure pointed to by 'data'. */ { - struct sgttyb *sg = (struct sgttyb *)data; + __IGNORE_WCASTALIGN(struct sgttyb *sg = (struct sgttyb *)data); cc_t *cc = tp->t_cc; - sg->sg_ospeed = ttcompatspeedtab(tp->t_ospeed, compatspeeds); + static_assert(MAX_SPEED <= CHAR_MAX, "maximum speed fits in a char"); + sg->sg_ospeed = (char)ttcompatspeedtab(tp->t_ospeed, compatspeeds); if (tp->t_ispeed == 0) { sg->sg_ispeed = sg->sg_ospeed; } else { - sg->sg_ispeed = ttcompatspeedtab(tp->t_ispeed, compatspeeds); + sg->sg_ispeed = (char)ttcompatspeedtab(tp->t_ispeed, compatspeeds); } sg->sg_erase = cc[VERASE]; sg->sg_kill = cc[VKILL]; - sg->sg_flags = tp->t_flags = ttcompatgetflags(tp); + tp->t_flags = ttcompatgetflags(tp); + sg->sg_flags = (short)tp->t_flags; break; } case TIOCGETC: diff --git a/bsd/kern/tty_dev.c b/bsd/kern/tty_dev.c index 302b76aa5..28fa8508d 100644 --- a/bsd/kern/tty_dev.c +++ b/bsd/kern/tty_dev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997-2013 Apple Inc. All rights reserved. + * Copyright (c) 1997-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -82,6 +82,7 @@ #include #include #include /* DEVFS_LOCK()/DEVFS_UNLOCK() */ +#include #if CONFIG_MACF #include @@ -671,10 +672,25 @@ ptcread(dev_t dev, struct uio *uio, int flag) goto out; } if (pti->pt_send & TIOCPKT_IOCTL) { - cc = MIN((int)uio_resid(uio), - (int)sizeof(tp->t_termios)); - uiomove((caddr_t)&tp->t_termios, cc, - uio); +#ifdef __LP64__ + if (uio->uio_segflg == UIO_USERSPACE32) { + static struct termios32 tio32; + cc = MIN((int)uio_resid(uio), (int)sizeof(tio32)); + termios64to32((struct user_termios *)&tp->t_termios, + (struct termios32 *)&tio32); + uiomove((caddr_t)&tio32, cc, uio); +#else + if (uio->uio_segflg == UIO_USERSPACE64) { + static struct user_termios tio64; + cc = MIN((int)uio_resid(uio), (int)sizeof(tio64)); + termios32to64((struct termios32 *)&tp->t_termios, + (struct user_termios *)&tio64); + uiomove((caddr_t)&tio64, cc, uio); +#endif + } else { + cc = MIN((int)uio_resid(uio), (int)sizeof(tp->t_termios)); + uiomove((caddr_t)&tp->t_termios, cc, uio); + } } pti->pt_send = 0; goto out; @@ -841,7 +857,7 @@ ptcselect(dev_t dev, int rw, void *wql, proc_t p) retval = (driver->fix_7828447) ? tp->t_outq.c_cc : 1; break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case 0: /* exceptional */ if ((tp->t_state & TS_ISOPEN) && @@ -1037,6 +1053,10 @@ ptyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) return ENXIO; } + if (cmd == KMIOCDISABLCONS) { + return 0; + } + tp = pti->pt_tty; tty_lock(tp); @@ -1258,6 +1278,7 @@ ptyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) case TIOCLSET: pti->pt_send |= TIOCPKT_IOCTL; ptcwakeup(tp, FREAD); + break; default: break; } diff --git a/bsd/kern/tty_dev.h b/bsd/kern/tty_dev.h index 19a1edd12..c9b49d658 100644 --- a/bsd/kern/tty_dev.h +++ b/bsd/kern/tty_dev.h @@ -77,4 +77,7 @@ extern void tty_dev_register(struct tty_dev_t *dev); extern int ttnread(struct tty *tp); +extern void termios32to64(struct termios32 *in, struct user_termios *out); +extern void termios64to32(struct user_termios *in, struct termios32 *out); + #endif // __TTY_DEV_H__ diff --git a/bsd/kern/tty_ptmx.c b/bsd/kern/tty_ptmx.c index 8f0ba28e4..d4efb5c12 100644 --- a/bsd/kern/tty_ptmx.c +++ b/bsd/kern/tty_ptmx.c @@ -117,7 +117,7 @@ extern d_reset_t ptcreset; extern d_select_t ptcselect; static int ptmx_major; /* dynamically assigned major number */ -static struct cdevsw ptmx_cdev = { +static const struct cdevsw ptmx_cdev = { .d_open = ptcopen, .d_close = ptcclose, .d_read = ptcread, @@ -135,7 +135,7 @@ static struct cdevsw ptmx_cdev = { }; static int ptsd_major; /* dynamically assigned major number */ -static struct cdevsw ptsd_cdev = { +static const struct cdevsw ptsd_cdev = { .d_open = ptsopen, .d_close = ptsclose, .d_read = ptsread, @@ -269,9 +269,12 @@ static struct _ptmx_ioctl_state { static struct ptmx_ioctl * ptmx_get_ioctl(int minor, int open_flag) { - struct ptmx_ioctl *new_ptmx_ioctl; + struct ptmx_ioctl *ptmx_ioctl = NULL; if (open_flag & PF_OPEN_M) { + struct ptmx_ioctl *new_ptmx_ioctl; + + DEVFS_LOCK(); /* * If we are about to allocate more memory, but we have * already hit the administrative limit, then fail the @@ -282,8 +285,10 @@ ptmx_get_ioctl(int minor, int open_flag) * snapping to the nearest PTMX_GROW_VECTOR... */ if ((_state.pis_total - _state.pis_free) >= ptmx_max) { + DEVFS_UNLOCK(); return NULL; } + DEVFS_UNLOCK(); MALLOC(new_ptmx_ioctl, struct ptmx_ioctl *, sizeof(struct ptmx_ioctl), M_TTYS, M_WAITOK | M_ZERO); if (new_ptmx_ioctl == NULL) { @@ -302,6 +307,18 @@ ptmx_get_ioctl(int minor, int open_flag) * doing so avoids a reallocation race on the minor number. */ DEVFS_LOCK(); + + /* + * Check again to ensure the limit is not reached after initial check + * when the lock was dropped momentarily for malloc. + */ + if ((_state.pis_total - _state.pis_free) >= ptmx_max) { + ttyfree(new_ptmx_ioctl->pt_tty); + DEVFS_UNLOCK(); + FREE(new_ptmx_ioctl, M_TTYS); + return NULL; + } + /* Need to allocate a larger vector? */ if (_state.pis_free == 0) { struct ptmx_ioctl **new_pis_ioctl_list; @@ -365,11 +382,17 @@ ptmx_get_ioctl(int minor, int open_flag) } } - if (minor < 0 || minor >= _state.pis_total) { - return NULL; + /* + * Lock is held here to protect race when the 'pis_ioctl_list' array is + * being reallocated to increase its slots. + */ + DEVFS_LOCK(); + if (minor >= 0 && minor < _state.pis_total) { + ptmx_ioctl = _state.pis_ioctl_list[minor]; } + DEVFS_UNLOCK(); - return _state.pis_ioctl_list[minor]; + return ptmx_ioctl; } /* diff --git a/bsd/kern/tty_subr.c b/bsd/kern/tty_subr.c index c00ccdbe8..0a30028c6 100644 --- a/bsd/kern/tty_subr.c +++ b/bsd/kern/tty_subr.c @@ -103,19 +103,18 @@ cinit(void) int clalloc(struct clist *clp, int size, int quot) { - MALLOC_ZONE(clp->c_cs, u_char *, size, M_TTYS, M_WAITOK); + clp->c_cs = kheap_alloc(KHEAP_DATA_BUFFERS, size, Z_WAITOK | Z_ZERO); if (!clp->c_cs) { return -1; } - bzero(clp->c_cs, size); if (quot) { - MALLOC_ZONE(clp->c_cq, u_char *, QMEM(size), M_TTYS, M_WAITOK); + clp->c_cq = kheap_alloc(KHEAP_DATA_BUFFERS, + QMEM(size), Z_WAITOK | Z_ZERO); if (!clp->c_cq) { - FREE_ZONE(clp->c_cs, size, M_TTYS); + kheap_free(KHEAP_DATA_BUFFERS, clp->c_cs, size); return -1; } - bzero(clp->c_cs, QMEM(size)); } else { clp->c_cq = (u_char *)0; } @@ -131,10 +130,10 @@ void clfree(struct clist *clp) { if (clp->c_cs) { - FREE_ZONE(clp->c_cs, clp->c_cn, M_TTYS); + kheap_free(KHEAP_DATA_BUFFERS, clp->c_cs, clp->c_cn); } if (clp->c_cq) { - FREE_ZONE(clp->c_cq, QMEM(clp->c_cn), M_TTYS); + kheap_free(KHEAP_DATA_BUFFERS, clp->c_cq, QMEM(clp->c_cn)); } clp->c_cs = clp->c_cq = (u_char *)0; } @@ -181,7 +180,7 @@ out: int q_to_b(struct clist *clp, u_char *cp, int count) { - int cc; + size_t cc; u_char *p = cp; /* optimize this while loop */ @@ -190,7 +189,7 @@ q_to_b(struct clist *clp, u_char *cp, int count) if (clp->c_cf >= clp->c_cl) { cc = clp->c_ce - clp->c_cf; } - if (cc > count) { + if (cc > INT_MAX || (int)cc > count) { cc = count; } bcopy(clp->c_cf, p, cc); @@ -205,7 +204,7 @@ q_to_b(struct clist *clp, u_char *cp, int count) if (clp->c_cc == 0) { clp->c_cf = clp->c_cl = (u_char *)0; } - return p - cp; + return (int)MIN(INT32_MAX, p - cp); } /* @@ -215,8 +214,8 @@ q_to_b(struct clist *clp, u_char *cp, int count) int ndqb(struct clist *clp, int flag) { - int count = 0; - int i; + size_t count = 0; + size_t i; int cc; if ((cc = clp->c_cc) == 0) { @@ -232,24 +231,30 @@ ndqb(struct clist *clp, int flag) } i = clp->c_cf - clp->c_cs; + if (i > INT_MAX) { + return 0; + } if (flag & TTY_QUOTE) { while (cc-- > 0 && !(clp->c_cs[i++] & (flag & ~TTY_QUOTE) || isset(clp->c_cq, i))) { count++; - if (i == clp->c_cn) { + if ((int)i == clp->c_cn) { break; } } } else { while (cc-- > 0 && !(clp->c_cs[i++] & flag)) { count++; - if (i == clp->c_cn) { + if ((int)i == clp->c_cn) { break; } } } out: - return count; + if (count > INT_MAX) { + return 0; + } + return (int)count; } /* @@ -258,7 +263,7 @@ out: void ndflush(struct clist *clp, int count) { - int cc; + size_t cc; if (count == clp->c_cc) { clp->c_cc = 0; @@ -271,7 +276,7 @@ ndflush(struct clist *clp, int count) if (clp->c_cf >= clp->c_cl) { cc = clp->c_ce - clp->c_cf; } - if (cc > count) { + if (cc > INT_MAX || (int)cc > count) { cc = count; } count -= cc; @@ -292,7 +297,7 @@ ndflush(struct clist *clp, int count) int putc(int c, struct clist *clp) { - int i; + size_t i; if (clp->c_cc == 0) { if (!clp->c_cs) { @@ -300,7 +305,6 @@ putc(int c, struct clist *clp) //printf("putc: required clalloc\n"); #endif if (clalloc(clp, 1024, 1)) { -out: return -1; } } @@ -308,11 +312,14 @@ out: } if (clp->c_cc == clp->c_cn) { - goto out; + return -1; } *clp->c_cl = c & 0xff; i = clp->c_cl - clp->c_cs; + if (i > INT_MAX) { + return -1; + } if (clp->c_cq) { #ifdef QBITS if (c & TTY_QUOTE) { @@ -357,13 +364,13 @@ clrbits(u_char *cp, int off, int len) eby = (off + len) / NBBY; ebi = (off + len) % NBBY; if (sby == eby) { - mask = ((1 << (ebi - sbi)) - 1) << sbi; + mask = (u_char)(((1 << (ebi - sbi)) - 1) << sbi); cp[sby] &= ~mask; } else { - mask = (1 << sbi) - 1; + mask = (u_char)((1 << sbi) - 1); cp[sby++] &= mask; - mask = (1 << ebi) - 1; + mask = (u_char)((1 << ebi) - 1); /* handle remainder bits, if any, for a non-0 ebi value */ if (mask) { cp[eby] &= ~mask; @@ -383,7 +390,7 @@ clrbits(u_char *cp, int off, int len) int b_to_q(const u_char *cp, int count, struct clist *clp) { - int cc; + size_t cc; const u_char *p = cp; if (count <= 0) { @@ -413,13 +420,17 @@ b_to_q(const u_char *cp, int count, struct clist *clp) if (clp->c_cf > clp->c_cl) { cc = clp->c_cf - clp->c_cl; } - if (cc > count) { + if (cc > INT_MAX || (int)cc > count) { cc = count; } bcopy(p, clp->c_cl, cc); if (clp->c_cq) { #ifdef QBITS - clrbits(clp->c_cq, clp->c_cl - clp->c_cs, cc); + if (clp->c_cl - clp->c_cs > INT_MAX || cc > INT_MAX) { + count = 0; + goto out; + } + clrbits(clp->c_cq, (int)(clp->c_cl - clp->c_cs), (int)cc); #else bzero(clp->c_cl - clp->c_cs + clp->c_cq, cc); #endif diff --git a/bsd/kern/ubc_subr.c b/bsd/kern/ubc_subr.c index cc16291df..7968e0491 100644 --- a/bsd/kern/ubc_subr.c +++ b/bsd/kern/ubc_subr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2014 Apple Inc. All rights reserved. + * Copyright (c) 1999-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -72,9 +73,11 @@ #include #include #include +#include #include #include +#include /* XXX These should be in a BSD accessible Mach header, but aren't. */ extern kern_return_t memory_object_pages_resident(memory_object_control_t, @@ -117,7 +120,8 @@ static void ubc_cs_free(struct ubc_info *uip); static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob); static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob); -struct zone *ubc_info_zone; +ZONE_DECLARE(ubc_info_zone, "ubc_info zone", sizeof(struct ubc_info), + ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM); static uint32_t cs_blob_generation_count = 1; /* @@ -464,6 +468,17 @@ cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length) } } + /* linkage is variable length binary data */ + if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0) { + const uintptr_t ptr = (uintptr_t)cd + ntohl(cd->linkageOffset); + const uintptr_t ptr_end = ptr + ntohl(cd->linkageSize); + + if (ptr_end < ptr || ptr < (uintptr_t)cd || ptr_end > (uintptr_t)cd + length) { + return EBADEXEC; + } + } + + return 0; } @@ -782,31 +797,6 @@ csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_le -/* - * ubc_init - * - * Initialization of the zone for Unified Buffer Cache. - * - * Parameters: (void) - * - * Returns: (void) - * - * Implicit returns: - * ubc_info_zone(global) initialized for subsequent allocations - */ -__private_extern__ void -ubc_init(void) -{ - int i; - - i = (vm_size_t) sizeof(struct ubc_info); - - ubc_info_zone = zinit(i, 10000 * i, 8192, "ubc_info zone"); - - zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE); -} - - /* * ubc_info_init * @@ -2894,17 +2884,17 @@ ubc_is_mapped_writable(const struct vnode *vp) /* * CODE SIGNING */ -static volatile SInt32 cs_blob_size = 0; -static volatile SInt32 cs_blob_count = 0; -static SInt32 cs_blob_size_peak = 0; -static UInt32 cs_blob_size_max = 0; -static SInt32 cs_blob_count_peak = 0; - -SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, 0, "Peak size of code signature blobs"); -SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, 0, "Size of biggest code signature blob"); +static atomic_size_t cs_blob_size = 0; +static atomic_uint_fast32_t cs_blob_count = 0; +static atomic_size_t cs_blob_size_peak = 0; +static atomic_size_t cs_blob_size_max = 0; +static atomic_uint_fast32_t cs_blob_count_peak = 0; + +SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count, 0, "Current number of code signature blobs"); +SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size, "Current size of all code signature blobs"); +SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs"); +SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, "Peak size of code signature blobs"); +SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, "Size of biggest code signature blob"); /* * Function: csblob_parse_teamid @@ -2990,7 +2980,7 @@ ubc_cs_blob_deallocate( * non-16KiB multiples for compatibility with 3rd party binaries. */ static boolean_t -ubc_cs_supports_multilevel_hash(struct cs_blob *blob) +ubc_cs_supports_multilevel_hash(struct cs_blob *blob __unused) { const CS_CodeDirectory *cd; @@ -3205,7 +3195,7 @@ ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob) nCodeSlots >>= hashes_per_new_hash_shift; new_cd->nCodeSlots = htonl(nCodeSlots); - new_cd->pageSize = PAGE_SHIFT; /* Not byte-swapped */ + new_cd->pageSize = (uint8_t)PAGE_SHIFT; /* Not byte-swapped */ if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) { SC_Scatter *scatter = (SC_Scatter*) @@ -3266,10 +3256,8 @@ ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob) /* The blob has some cached attributes of the Code Directory, so update those */ - blob->csb_hash_firstlevel_pagesize = blob->csb_hash_pagesize; /* Save the original page size */ + blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */ - blob->csb_hash_pagesize = PAGE_SIZE; - blob->csb_hash_pagemask = PAGE_MASK; blob->csb_hash_pageshift = PAGE_SHIFT; blob->csb_end_offset = ntohl(cd->codeLimit); if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { @@ -3324,6 +3312,9 @@ cs_blob_create_validated( blob->csb_platform_binary = 0; blob->csb_platform_path = 0; blob->csb_teamid = NULL; +#if CONFIG_SUPPLEMENTAL_SIGNATURES + blob->csb_supplement_teamid = NULL; +#endif blob->csb_entitlements_blob = NULL; blob->csb_entitlements = NULL; blob->csb_reconstituted = false; @@ -3349,6 +3340,7 @@ cs_blob_create_validated( const unsigned char *md_base; uint8_t hash[CS_HASH_MAX_SIZE]; int md_size; + vm_offset_t hash_pagemask; blob->csb_cd = cd; blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */ @@ -3358,15 +3350,14 @@ cs_blob_create_validated( } blob->csb_hash_pageshift = cd->pageSize; - blob->csb_hash_pagesize = (1U << cd->pageSize); - blob->csb_hash_pagemask = blob->csb_hash_pagesize - 1; - blob->csb_hash_firstlevel_pagesize = 0; + hash_pagemask = (1U << cd->pageSize) - 1; + blob->csb_hash_firstlevel_pageshift = 0; blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID; - blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + blob->csb_hash_pagemask) & ~((vm_offset_t)blob->csb_hash_pagemask)); + blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask); if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { const SC_Scatter *scatter = (const SC_Scatter*) ((const char*)cd + ntohl(cd->scatterOffset)); - blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * blob->csb_hash_pagesize; + blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift); } else { blob->csb_start_offset = 0; } @@ -3379,6 +3370,23 @@ cs_blob_create_validated( blob->csb_hashtype->cs_final(hash, &mdctx); memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN); + blob->csb_cdhash_signature = ptrauth_utils_sign_blob_generic(blob->csb_cdhash, + sizeof(blob->csb_cdhash), + OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"), + PTRAUTH_ADDR_DIVERSIFY); + +#if CONFIG_SUPPLEMENTAL_SIGNATURES + blob->csb_linkage_hashtype = NULL; + if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 && + ntohl(cd->linkageSize) >= CS_CDHASH_LEN) { + blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType); + + if (blob->csb_linkage_hashtype != NULL) { + memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset), + CS_CDHASH_LEN); + } + } +#endif } error = 0; @@ -3419,11 +3427,53 @@ cs_blob_free( (kfree)(blob, sizeof(*blob)); } } +#if CONFIG_SUPPLEMENTAL_SIGNATURES +static void +cs_blob_supplement_free(struct cs_blob * const blob) +{ + if (blob != NULL) { + if (blob->csb_supplement_teamid != NULL) { + vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1; + kfree(blob->csb_supplement_teamid, teamid_size); + blob->csb_supplement_teamid = NULL; + } + cs_blob_free(blob); + } +} +#endif + +static void +ubc_cs_blob_adjust_statistics(struct cs_blob const *blob) +{ + /* Note that the atomic ops are not enough to guarantee + * correctness: If a blob with an intermediate size is inserted + * concurrently, we can lose a peak value assignment. But these + * statistics are only advisory anyway, so we're not going to + * employ full locking here. (Consequently, we are also okay with + * relaxed ordering of those accesses.) + */ + + unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed); + if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) { + os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed); + } + + size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed); + + if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) { + os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed); + } + if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) { + os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed); + } +} int ubc_cs_blob_add( struct vnode *vp, + uint32_t platform, cpu_type_t cputype, + cpu_subtype_t cpusubtype, off_t base_offset, vm_address_t *addr, vm_size_t size, @@ -3433,7 +3483,7 @@ ubc_cs_blob_add( { kern_return_t kr; struct ubc_info *uip; - struct cs_blob *blob, *oblob; + struct cs_blob *blob = NULL, *oblob = NULL; int error; CS_CodeDirectory const *cd; off_t blob_start_offset, blob_end_offset; @@ -3454,6 +3504,7 @@ ubc_cs_blob_add( } blob->csb_cpu_type = cputype; + blob->csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK; blob->csb_base_offset = base_offset; /* @@ -3462,7 +3513,7 @@ ubc_cs_blob_add( #if CONFIG_MACF unsigned int cs_flags = blob->csb_flags; unsigned int signer_type = blob->csb_signer_type; - error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags); + error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform); blob->csb_flags = cs_flags; blob->csb_signer_type = signer_type; @@ -3509,7 +3560,40 @@ ubc_cs_blob_add( blob->csb_entitlements_blob = new_entitlements; blob->csb_reconstituted = true; } +#elif PMAP_CS + /* + * When pmap_cs is enabled, there's an expectation that large blobs are + * relocated to their own page. Above, this happens under + * ubc_cs_reconstitute_code_signature() but that discards parts of the + * signatures that are necessary on some platforms (eg, requirements). + * So in this case, just copy everything. + */ + if (pmap_cs && (blob->csb_mem_size > pmap_cs_blob_limit)) { + vm_offset_t cd_offset, ent_offset; + vm_size_t new_mem_size = round_page(blob->csb_mem_size); + vm_address_t new_mem_kaddr = 0; + + kr = kmem_alloc_kobject(kernel_map, &new_mem_kaddr, new_mem_size, VM_KERN_MEMORY_SECURITY); + if (kr != KERN_SUCCESS) { + printf("failed to allocate %lu bytes to relocate blob: %d\n", new_mem_size, kr); + error = ENOMEM; + goto out; + } + cd_offset = (vm_address_t) blob->csb_cd - blob->csb_mem_kaddr; + ent_offset = (vm_address_t) blob->csb_entitlements_blob - blob->csb_mem_kaddr; + + memcpy((void *) new_mem_kaddr, (const void *) blob->csb_mem_kaddr, blob->csb_mem_size); + ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size); + blob->csb_cd = (const CS_CodeDirectory *) (new_mem_kaddr + cd_offset); + /* Only update the entitlements blob pointer if it is non-NULL. If it is NULL, then + * the blob has no entitlements and ent_offset is garbage. */ + if (blob->csb_entitlements_blob != NULL) { + blob->csb_entitlements_blob = (const CS_GenericBlob *) (new_mem_kaddr + ent_offset); + } + blob->csb_mem_kaddr = new_mem_kaddr; + blob->csb_mem_size = new_mem_size; + } #endif @@ -3676,17 +3760,7 @@ ubc_cs_blob_add( blob->csb_next = uip->cs_blobs; uip->cs_blobs = blob; - OSAddAtomic(+1, &cs_blob_count); - if (cs_blob_count > cs_blob_count_peak) { - cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */ - } - OSAddAtomic((SInt32) + blob->csb_mem_size, &cs_blob_size); - if ((SInt32) cs_blob_size > cs_blob_size_peak) { - cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */ - } - if ((UInt32) blob->csb_mem_size > cs_blob_size_max) { - cs_blob_size_max = (UInt32) blob->csb_mem_size; - } + ubc_cs_blob_adjust_statistics(blob); if (cs_debug > 1) { proc_t p; @@ -3737,6 +3811,226 @@ out: return error; } +#if CONFIG_SUPPLEMENTAL_SIGNATURES +int +ubc_cs_blob_add_supplement( + struct vnode *vp, + struct vnode *orig_vp, + off_t base_offset, + vm_address_t *addr, + vm_size_t size, + struct cs_blob **ret_blob) +{ + kern_return_t kr; + struct ubc_info *uip, *orig_uip; + int error; + struct cs_blob *blob, *orig_blob; + CS_CodeDirectory const *cd; + off_t blob_start_offset, blob_end_offset; + + if (ret_blob) { + *ret_blob = NULL; + } + + /* Create the struct cs_blob wrapper that will be attached to the vnode. + * Validates the passed in blob in the process. */ + error = cs_blob_create_validated(addr, size, &blob, &cd); + + if (error != 0) { + printf("malformed code signature supplement blob: %d\n", error); + return error; + } + + blob->csb_cpu_type = -1; + blob->csb_base_offset = base_offset; + + blob->csb_reconstituted = false; + + vnode_lock(orig_vp); + if (!UBCINFOEXISTS(orig_vp)) { + vnode_unlock(orig_vp); + error = ENOENT; + goto out; + } + + orig_uip = orig_vp->v_ubcinfo; + + /* check that the supplement's linked cdhash matches a cdhash of + * the target image. + */ + + if (blob->csb_linkage_hashtype == NULL) { + proc_t p; + const char *iname = vnode_getname_printable(vp); + p = current_proc(); + + printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) " + "is not a supplemental.\n", + p->p_pid, p->p_comm, iname); + + error = EINVAL; + + vnode_putname_printable(iname); + vnode_unlock(orig_vp); + goto out; + } + + for (orig_blob = orig_uip->cs_blobs; orig_blob != NULL; + orig_blob = orig_blob->csb_next) { + ptrauth_utils_auth_blob_generic(orig_blob->csb_cdhash, + sizeof(orig_blob->csb_cdhash), + OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"), + PTRAUTH_ADDR_DIVERSIFY, + orig_blob->csb_cdhash_signature); + if (orig_blob->csb_hashtype == blob->csb_linkage_hashtype && + memcmp(orig_blob->csb_cdhash, blob->csb_linkage, CS_CDHASH_LEN) == 0) { + // Found match! + break; + } + } + + if (orig_blob == NULL) { + // Not found. + + proc_t p; + const char *iname = vnode_getname_printable(vp); + p = current_proc(); + + printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) " + "does not match any attached cdhash.\n", + p->p_pid, p->p_comm, iname); + + error = ESRCH; + + vnode_putname_printable(iname); + vnode_unlock(orig_vp); + goto out; + } + + vnode_unlock(orig_vp); + + // validate the signature against policy! +#if CONFIG_MACF + unsigned int signer_type = blob->csb_signer_type; + error = mac_vnode_check_supplemental_signature(vp, blob, orig_vp, orig_blob, &signer_type); + blob->csb_signer_type = signer_type; + + + if (error) { + if (cs_debug) { + printf("check_supplemental_signature[pid: %d], error = %d\n", current_proc()->p_pid, error); + } + goto out; + } +#endif + + // We allowed the supplemental signature blob so + // copy the platform bit or team-id from the linked signature and whether or not the original is developer code + blob->csb_platform_binary = 0; + blob->csb_platform_path = 0; + if (orig_blob->csb_platform_binary == 1) { + blob->csb_platform_binary = orig_blob->csb_platform_binary; + blob->csb_platform_path = orig_blob->csb_platform_path; + } else if (orig_blob->csb_teamid != NULL) { + vm_size_t teamid_size = strlen(orig_blob->csb_teamid) + 1; + blob->csb_supplement_teamid = kalloc(teamid_size); + if (blob->csb_supplement_teamid == NULL) { + error = ENOMEM; + goto out; + } + strlcpy(blob->csb_supplement_teamid, orig_blob->csb_teamid, teamid_size); + } + blob->csb_flags = (orig_blob->csb_flags & CS_DEV_CODE); + + // Validate the blob's coverage + blob_start_offset = blob->csb_base_offset + blob->csb_start_offset; + blob_end_offset = blob->csb_base_offset + blob->csb_end_offset; + + if (blob_start_offset >= blob_end_offset || blob_start_offset < 0 || blob_end_offset <= 0) { + /* reject empty or backwards blob */ + error = EINVAL; + goto out; + } + + vnode_lock(vp); + if (!UBCINFOEXISTS(vp)) { + vnode_unlock(vp); + error = ENOENT; + goto out; + } + uip = vp->v_ubcinfo; + + struct cs_blob *existing = uip->cs_blob_supplement; + if (existing != NULL) { + if (blob->csb_hashtype == existing->csb_hashtype && + memcmp(blob->csb_cdhash, existing->csb_cdhash, CS_CDHASH_LEN) == 0) { + error = EAGAIN; // non-fatal + } else { + error = EALREADY; // fatal + } + + vnode_unlock(vp); + goto out; + } + + /* Unlike regular cs_blobs, we only ever support one supplement. */ + blob->csb_next = NULL; + uip->cs_blob_supplement = blob; + + /* mark this vnode's VM object as having "signed pages" */ + kr = memory_object_signed(uip->ui_control, TRUE); + if (kr != KERN_SUCCESS) { + vnode_unlock(vp); + error = ENOENT; + goto out; + } + + vnode_unlock(vp); + + /* We still adjust statistics even for supplemental blobs, as they + * consume memory just the same. */ + ubc_cs_blob_adjust_statistics(blob); + + if (cs_debug > 1) { + proc_t p; + const char *name = vnode_getname_printable(vp); + p = current_proc(); + printf("CODE SIGNING: proc %d(%s) " + "loaded supplemental signature for file (%s) " + "range 0x%llx:0x%llx\n", + p->p_pid, p->p_comm, + name, + blob->csb_base_offset + blob->csb_start_offset, + blob->csb_base_offset + blob->csb_end_offset); + vnode_putname_printable(name); + } + + if (ret_blob) { + *ret_blob = blob; + } + + error = 0; // Success! +out: + if (error) { + if (cs_debug) { + printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", current_proc()->p_pid, error); + } + + cs_blob_supplement_free(blob); + } + + if (error == EAGAIN) { + /* We were asked to add an existing blob. + * We cleaned up and ignore the attempt. */ + error = 0; + } + + return error; +} +#endif + + + void csvnode_print_debug(struct vnode *vp) { @@ -3772,10 +4066,50 @@ out: vnode_unlock(vp); } +#if CONFIG_SUPPLEMENTAL_SIGNATURES +struct cs_blob * +ubc_cs_blob_get_supplement( + struct vnode *vp, + off_t offset) +{ + struct cs_blob *blob; + off_t offset_in_blob; + + vnode_lock_spin(vp); + + if (!UBCINFOEXISTS(vp)) { + blob = NULL; + goto out; + } + + blob = vp->v_ubcinfo->cs_blob_supplement; + + if (blob == NULL) { + // no supplemental blob + goto out; + } + + + if (offset != -1) { + offset_in_blob = offset - blob->csb_base_offset; + if (offset_in_blob < blob->csb_start_offset || offset_in_blob >= blob->csb_end_offset) { + // not actually covered by this blob + blob = NULL; + } + } + +out: + vnode_unlock(vp); + + return blob; +} +#endif + struct cs_blob * ubc_cs_blob_get( struct vnode *vp, cpu_type_t cputype, + cpu_subtype_t cpusubtype, off_t offset) { struct ubc_info *uip; @@ -3793,7 +4127,7 @@ ubc_cs_blob_get( for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) { - if (cputype != -1 && blob->csb_cpu_type == cputype) { + if (cputype != -1 && blob->csb_cpu_type == cputype && (cpusubtype == -1 || blob->csb_cpu_subtype == (cpusubtype & ~CPU_SUBTYPE_MASK))) { break; } if (offset != -1) { @@ -3822,14 +4156,23 @@ ubc_cs_free( blob != NULL; blob = next_blob) { next_blob = blob->csb_next; - OSAddAtomic(-1, &cs_blob_count); - OSAddAtomic((SInt32) - blob->csb_mem_size, &cs_blob_size); + os_atomic_add(&cs_blob_count, -1, relaxed); + os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed); cs_blob_free(blob); } #if CHECK_CS_VALIDATION_BITMAP ubc_cs_validation_bitmap_deallocate( uip->ui_vnode ); #endif uip->cs_blobs = NULL; +#if CONFIG_SUPPLEMENTAL_SIGNATURES + if (uip->cs_blob_supplement != NULL) { + blob = uip->cs_blob_supplement; + os_atomic_add(&cs_blob_count, -1, relaxed); + os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed); + cs_blob_supplement_free(uip->cs_blob_supplement); + uip->cs_blob_supplement = NULL; + } +#endif } /* check cs blob generation on vnode @@ -3858,7 +4201,8 @@ ubc_cs_blob_revalidate( struct vnode *vp, struct cs_blob *blob, struct image_params *imgp, - int flags + int flags, + uint32_t platform ) { int error = 0; @@ -3911,7 +4255,7 @@ ubc_cs_blob_revalidate( /* callout to mac_vnode_check_signature */ #if CONFIG_MACF - error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags); + error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform); if (cs_debug && error) { printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error); } @@ -3980,6 +4324,42 @@ out: return blobs; } +#if CONFIG_SUPPLEMENTAL_SIGNATURES +struct cs_blob * +ubc_get_cs_supplement( + struct vnode *vp) +{ + struct ubc_info *uip; + struct cs_blob *blob; + + /* + * No need to take the vnode lock here. The caller must be holding + * a reference on the vnode (via a VM mapping or open file descriptor), + * so the vnode will not go away. The ubc_info stays until the vnode + * goes away. + * The ubc_info could go away entirely if the vnode gets reclaimed as + * part of a forced unmount. In the case of a code-signature validation + * during a page fault, the "paging_in_progress" reference on the VM + * object guarantess that the vnode pager (and the ubc_info) won't go + * away during the fault. + * Other callers need to protect against vnode reclaim by holding the + * vnode lock, for example. + */ + + if (!UBCINFOEXISTS(vp)) { + blob = NULL; + goto out; + } + + uip = vp->v_ubcinfo; + blob = uip->cs_blob_supplement; + +out: + return blob; +} +#endif + + void ubc_get_cs_mtime( struct vnode *vp, @@ -4058,7 +4438,7 @@ cs_validate_hash( if (hashtype->cs_digest_size > sizeof(actual_hash)) { panic("hash size too large"); } - if (offset & blob->csb_hash_pagemask) { + if (offset & ((1U << blob->csb_hash_pageshift) - 1)) { panic("offset not aligned to cshash boundary"); } @@ -4096,26 +4476,26 @@ cs_validate_hash( } else { *tainted = 0; - size = blob->csb_hash_pagesize; + size = (1U << blob->csb_hash_pageshift); *bytes_processed = size; const uint32_t *asha1, *esha1; if ((off_t)(offset + size) > codeLimit) { /* partial page at end of segment */ assert(offset < codeLimit); - size = (size_t) (codeLimit & blob->csb_hash_pagemask); + size = (size_t) (codeLimit & (size - 1)); *tainted |= CS_VALIDATE_NX; } hashtype->cs_init(&mdctx); - if (blob->csb_hash_firstlevel_pagesize) { + if (blob->csb_hash_firstlevel_pageshift) { const unsigned char *partial_data = (const unsigned char *)data; size_t i; for (i = 0; i < size;) { union cs_hash_union partialctx; unsigned char partial_digest[CS_HASH_MAX_SIZE]; - size_t partial_size = MIN(size - i, blob->csb_hash_firstlevel_pagesize); + size_t partial_size = MIN(size - i, (1U << blob->csb_hash_firstlevel_pageshift)); hashtype->cs_init(&partialctx); hashtype->cs_update(&partialctx, partial_data, partial_size); @@ -4176,6 +4556,20 @@ cs_validate_range( struct cs_blob *blobs = ubc_get_cs_blobs(vp); +#if CONFIG_SUPPLEMENTAL_SIGNATURES + if (blobs == NULL && proc_is_translated(current_proc())) { + struct cs_blob *supp = ubc_get_cs_supplement(vp); + + if (supp != NULL) { + blobs = supp; + } else { + return FALSE; + } + } +#endif + + + *tainted = 0; for (offset_in_range = 0; @@ -4209,6 +4603,91 @@ cs_validate_range( return all_subranges_validated; } +void +cs_validate_page( + struct vnode *vp, + memory_object_t pager, + memory_object_offset_t page_offset, + const void *data, + int *validated_p, + int *tainted_p, + int *nx_p) +{ + vm_size_t offset_in_page; + struct cs_blob *blobs; + + blobs = ubc_get_cs_blobs(vp); + +#if CONFIG_SUPPLEMENTAL_SIGNATURES + if (blobs == NULL && proc_is_translated(current_proc())) { + struct cs_blob *supp = ubc_get_cs_supplement(vp); + + if (supp != NULL) { + blobs = supp; + } + } +#endif + + *validated_p = VMP_CS_ALL_FALSE; + *tainted_p = VMP_CS_ALL_FALSE; + *nx_p = VMP_CS_ALL_FALSE; + + for (offset_in_page = 0; + offset_in_page < PAGE_SIZE; + /* offset_in_page updated based on bytes processed */) { + unsigned subrange_tainted = 0; + boolean_t subrange_validated; + vm_size_t bytes_processed = 0; + int sub_bit; + + subrange_validated = cs_validate_hash(blobs, + pager, + page_offset + offset_in_page, + (const void *)((const char *)data + offset_in_page), + &bytes_processed, + &subrange_tainted); + + if (bytes_processed == 0) { + /* 4k chunk not code-signed: try next one */ + offset_in_page += FOURK_PAGE_SIZE; + continue; + } + if (offset_in_page == 0 && + bytes_processed > PAGE_SIZE - FOURK_PAGE_SIZE) { + /* all processed: no 4k granularity */ + if (subrange_validated) { + *validated_p = VMP_CS_ALL_TRUE; + } + if (subrange_tainted & CS_VALIDATE_TAINTED) { + *tainted_p = VMP_CS_ALL_TRUE; + } + if (subrange_tainted & CS_VALIDATE_NX) { + *nx_p = VMP_CS_ALL_TRUE; + } + break; + } + /* we only handle 4k or 16k code-signing granularity... */ + assertf(bytes_processed <= FOURK_PAGE_SIZE, + "vp %p blobs %p offset 0x%llx + 0x%llx bytes_processed 0x%llx\n", + vp, blobs, (uint64_t)page_offset, + (uint64_t)offset_in_page, (uint64_t)bytes_processed); + sub_bit = 1 << (offset_in_page >> FOURK_PAGE_SHIFT); + if (subrange_validated) { + *validated_p |= sub_bit; + } + if (subrange_tainted & CS_VALIDATE_TAINTED) { + *tainted_p |= sub_bit; + } + if (subrange_tainted & CS_VALIDATE_NX) { + *nx_p |= sub_bit; + } + /* go to next 4k chunk */ + offset_in_page += FOURK_PAGE_SIZE; + } + + return; +} + int ubc_cs_getcdhash( vnode_t vp, @@ -4239,6 +4718,11 @@ ubc_cs_getcdhash( ret = EBADEXEC; /* XXX any better error ? */ } else { /* get the SHA1 hash of that blob */ + ptrauth_utils_auth_blob_generic(blob->csb_cdhash, + sizeof(blob->csb_cdhash), + OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_cd_signature"), + PTRAUTH_ADDR_DIVERSIFY, + blob->csb_cdhash_signature); bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash)); ret = 0; } @@ -4271,7 +4755,7 @@ ubc_cs_is_range_codesigned( return FALSE; } - csblob = ubc_cs_blob_get(vp, -1, start); + csblob = ubc_cs_blob_get(vp, -1, -1, start); if (csblob == NULL) { return FALSE; } @@ -4473,7 +4957,8 @@ cs_associate_blob_with_mapping( kr = pmap_cs_associate(pmap, cd_entry, start, - size); + size, + offset - blob_start_offset); } else { kr = KERN_CODESIGN_ERROR; } diff --git a/bsd/kern/uipc_domain.c b/bsd/kern/uipc_domain.c index 9321399dc..c1f6a3efb 100644 --- a/bsd/kern/uipc_domain.c +++ b/bsd/kern/uipc_domain.c @@ -684,15 +684,14 @@ net_drain_domains(void) lck_mtx_unlock(&domain_timeout_mtx); } -#if INET6 extern struct domain inet6domain_s; -#endif #if IPSEC extern struct domain keydomain_s; #endif extern struct domain routedomain_s, ndrvdomain_s, inetdomain_s; extern struct domain systemdomain_s, localdomain_s; +extern struct domain vsockdomain_s; #if MULTIPATH extern struct domain mpdomain_s; @@ -764,9 +763,7 @@ domaininit(void) * dom_rtattach() called on rt_tables[]. */ attach_domain(&inetdomain_s); -#if INET6 attach_domain(&inet6domain_s); -#endif /* INET6 */ #if MULTIPATH attach_domain(&mpdomain_s); #endif /* MULTIPATH */ @@ -776,6 +773,7 @@ domaininit(void) attach_domain(&keydomain_s); #endif /* IPSEC */ attach_domain(&ndrvdomain_s); + attach_domain(&vsockdomain_s); attach_domain(&routedomain_s); /* must be last domain */ /* @@ -1031,7 +1029,7 @@ net_uptime2timeval(struct timeval *tv) } tv->tv_usec = 0; - tv->tv_sec = net_uptime(); + tv->tv_sec = (time_t)net_uptime(); } /* diff --git a/bsd/kern/uipc_mbuf.c b/bsd/kern/uipc_mbuf.c index 947f38ad6..1a0e04ac9 100644 --- a/bsd/kern/uipc_mbuf.c +++ b/bsd/kern/uipc_mbuf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2019 Apple Inc. All rights reserved. + * Copyright (c) 1998-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -67,6 +67,8 @@ * Version 2.0. */ +#include + #include #include #include @@ -100,10 +102,6 @@ #include #include -#if CONFIG_MACF_NET -#include -#endif /* MAC_NET */ - #include #include @@ -346,7 +344,7 @@ static uint32_t mbuf_worker_run_cnt; static uint64_t mbuf_worker_last_runtime; static uint64_t mbuf_drain_last_runtime; static int mbuf_worker_ready; /* worker thread is runnable */ -static int ncpu; /* number of CPUs */ +static unsigned int ncpu; /* number of CPUs */ static ppnum_t *mcl_paddr; /* Array of cluster physical addresses */ static ppnum_t mcl_pages; /* Size of array (# physical pages) */ static ppnum_t mcl_paddr_base; /* Handle returned by IOMapper::iovmAlloc() */ @@ -711,16 +709,16 @@ static char *mbuf_dump_buf; * mb_drain_maxint controls the amount of time to wait (in seconds) before * consecutive calls to mbuf_drain(). */ -#if CONFIG_EMBEDDED || DEVELOPMENT || DEBUG +#if !XNU_TARGET_OS_OSX || DEVELOPMENT || DEBUG static unsigned int mb_watchdog = 1; -#else +#else /* XNU_TARGET_OS_OSX && !DEVELOPMENT && !DEBUG */ static unsigned int mb_watchdog = 0; -#endif -#if CONFIG_EMBEDDED +#endif /* XNU_TARGET_OS_OSX && !DEVELOPMENT && !DEBUG */ +#if !XNU_TARGET_OS_OSX static unsigned int mb_drain_maxint = 60; -#else +#else /* XNU_TARGET_OS_OSX */ static unsigned int mb_drain_maxint = 0; -#endif /* CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ uintptr_t mb_obscure_extfree __attribute__((visibility("hidden"))); uintptr_t mb_obscure_extref __attribute__((visibility("hidden"))); @@ -934,6 +932,7 @@ static void mbuf_drain_locked(boolean_t); (m)->m_pkthdr.csum_flags = 0; \ (m)->m_pkthdr.csum_data = 0; \ (m)->m_pkthdr.vlan_tag = 0; \ + (m)->m_pkthdr.comp_gencnt = 0; \ m_classifier_init(m, 0); \ m_tag_init(m, 1); \ m_scratch_init(m); \ @@ -1438,31 +1437,20 @@ typedef struct ncl_tbl { uint32_t nt_mbpool; /* mbuf pool size */ } ncl_tbl_t; -/* Non-server */ -static ncl_tbl_t ncl_table[] = { +static const ncl_tbl_t ncl_table[] = { { (1ULL << GBSHIFT) /* 1 GB */, (64 << MBSHIFT) /* 64 MB */ }, - { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (96 << MBSHIFT) /* 96 MB */ }, - { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (128 << MBSHIFT) /* 128 MB */ }, - { 0, 0 } -}; - -/* Server */ -static ncl_tbl_t ncl_table_srv[] = { - { (1ULL << GBSHIFT) /* 1 GB */, (96 << MBSHIFT) /* 96 MB */ }, - { (1ULL << (GBSHIFT + 2)) /* 4 GB */, (128 << MBSHIFT) /* 128 MB */ }, - { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (160 << MBSHIFT) /* 160 MB */ }, - { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (192 << MBSHIFT) /* 192 MB */ }, - { (1ULL << (GBSHIFT + 5)) /* 32 GB */, (256 << MBSHIFT) /* 256 MB */ }, - { (1ULL << (GBSHIFT + 6)) /* 64 GB */, (384 << MBSHIFT) /* 384 MB */ }, + { (1ULL << (GBSHIFT + 2)) /* 4 GB */, (96 << MBSHIFT) /* 96 MB */ }, + { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (128 << MBSHIFT) /* 128 MB */ }, + { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (256 << MBSHIFT) /* 256 MB */ }, + { (1ULL << (GBSHIFT + 5)) /* 32 GB */, (512 << MBSHIFT) /* 512 MB */ }, { 0, 0 } }; #endif /* __LP64__ */ __private_extern__ unsigned int -mbuf_default_ncl(int server, uint64_t mem) +mbuf_default_ncl(uint64_t mem) { #if !defined(__LP64__) -#pragma unused(server) unsigned int n; /* * 32-bit kernel (default to 64MB of mbuf pool for >= 1GB RAM). @@ -1472,16 +1460,15 @@ mbuf_default_ncl(int server, uint64_t mem) } #else unsigned int n, i; - ncl_tbl_t *tbl = (server ? ncl_table_srv : ncl_table); /* * 64-bit kernel (mbuf pool size based on table). */ - n = tbl[0].nt_mbpool; - for (i = 0; tbl[i].nt_mbpool != 0; i++) { - if (mem < tbl[i].nt_maxmem) { + n = ncl_table[0].nt_mbpool; + for (i = 0; ncl_table[i].nt_mbpool != 0; i++) { + if (mem < ncl_table[i].nt_maxmem) { break; } - n = tbl[i].nt_mbpool; + n = ncl_table[i].nt_mbpool; } n >>= MCLSHIFT; #endif /* !__LP64__ */ @@ -1569,6 +1556,9 @@ mbinit(void) _CASSERT(!(offsetof(struct mbuf, m_pkthdr.pkt_mpriv) % sizeof(uint32_t))); + /* pktdata needs to start at 128-bit offset! */ + _CASSERT((offsetof(struct mbuf, m_pktdat) % 16) == 0); + /* Initialize random red zone cookie value */ _CASSERT(sizeof(mb_redzone_cookie) == sizeof(((struct pkthdr *)0)->redzone)); @@ -1658,7 +1648,7 @@ mbinit(void) * uninitialize this framework, since the original address * before alignment is not saved. */ - ncpu = ml_get_max_cpus(); + ncpu = ml_wait_max_cpus(); MALLOC(buf, void *, MBUF_MTYPES_SIZE(ncpu) + CPU_CACHE_LINE_SIZE, M_TEMP, M_WAITOK); VERIFY(buf != NULL); @@ -3638,12 +3628,6 @@ m_get_common(int wait, short type, int hdr) MBUF_INIT(m, hdr, type); mtype_stat_inc(type); mtype_stat_dec(MT_FREE); -#if CONFIG_MACF_NET - if (hdr && mac_init_mbuf(m, wait) != 0) { - m_free(m); - return NULL; - } -#endif /* MAC_NET */ } return m; } @@ -3980,12 +3964,6 @@ m_getcl(int wait, int type, int flags) mtype_stat_inc(type); mtype_stat_dec(MT_FREE); -#if CONFIG_MACF_NET - if (hdr && mac_init_mbuf(m, wait) != 0) { - m_freem(m); - return NULL; - } -#endif /* MAC_NET */ } return m; } @@ -4308,12 +4286,6 @@ m_getpackets_internal(unsigned int *num_needed, int num_with_pkthdrs, if (num_with_pkthdrs > 0) { --num_with_pkthdrs; -#if CONFIG_MACF_NET - if (mac_mbuf_label_init(m, wait) != 0) { - m_freem(m); - break; - } -#endif /* MAC_NET */ } *np = m; @@ -4400,6 +4372,7 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, (wantsize == m_maxsize(MC_16KCL) && njcl > 0)) { bufsize = wantsize; } else { + *numlist = 0; return NULL; } @@ -4423,6 +4396,7 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, if (maxsegments != NULL) { if (*maxsegments && nsegs > *maxsegments) { *maxsegments = nsegs; + *numlist = 0; return NULL; } *maxsegments = nsegs; @@ -4471,12 +4445,6 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, ASSERT(m != NULL); MBUF_INIT(m, 1, MT_DATA); -#if CONFIG_MACF_NET - if (mac_init_mbuf(m, wait) != 0) { - m_free(m); - break; - } -#endif /* MAC_NET */ num++; if (bufsize > MHLEN) { /* A second mbuf for this segment chain */ @@ -4644,13 +4612,6 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, } else { MBUF_CL_INIT(m, cl, rfa, 1, flag); } -#if CONFIG_MACF_NET - if (pkthdr && mac_init_mbuf(m, wait) != 0) { - --num; - m_freem(m); - break; - } -#endif /* MAC_NET */ *np = m; if ((num % nsegs) == 0) { @@ -4687,6 +4648,7 @@ fail: } if (wantall && top != NULL) { m_freem_list(top); + *numlist = 0; return NULL; } *numlist = num; @@ -5238,14 +5200,6 @@ m_copym_with_hdrs(struct mbuf *m0, int off0, int len0, int wait, type = (top == NULL) ? MT_HEADER : m->m_type; MBUF_INIT(n, (top == NULL), type); -#if CONFIG_MACF_NET - if (top == NULL && mac_mbuf_label_init(n, wait) != 0) { - mtype_stat_inc(MT_HEADER); - mtype_stat_dec(MT_FREE); - m_free(n); - goto nospace; - } -#endif /* MAC_NET */ if (top == NULL) { top = n; @@ -8257,8 +8211,8 @@ m_set_ext(struct mbuf *m, struct ext_ref *rfa, m_ext_free_func_t ext_free, if (ext_free != NULL) { rfa->ext_token = ((uintptr_t)&rfa->ext_token) ^ mb_obscure_extfree; - m->m_ext.ext_free = (m_ext_free_func_t) - (((uintptr_t)ext_free) ^ rfa->ext_token); + uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, ext_free) ^ rfa->ext_token; + m->m_ext.ext_free = ptrauth_nop_cast(m_ext_free_func_t, ext_free_val); if (ext_arg != NULL) { m->m_ext.ext_arg = (caddr_t)(((uintptr_t)ext_arg) ^ rfa->ext_token); @@ -8277,9 +8231,8 @@ m_set_ext(struct mbuf *m, struct ext_ref *rfa, m_ext_free_func_t ext_free, * to obscure the ext_free and ext_arg pointers. */ if (ext_free != NULL) { - m->m_ext.ext_free = - (m_ext_free_func_t)((uintptr_t)ext_free ^ - mb_obscure_extfree); + uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, ext_free) ^ mb_obscure_extfree; + m->m_ext.ext_free = ptrauth_nop_cast(m_ext_free_func_t, ext_free_val); if (ext_arg != NULL) { m->m_ext.ext_arg = (caddr_t)((uintptr_t)ext_arg ^ @@ -8315,10 +8268,11 @@ m_get_ext_free(struct mbuf *m) rfa = m_get_rfa(m); if (rfa == NULL) { - return (m_ext_free_func_t)((uintptr_t)m->m_ext.ext_free ^ mb_obscure_extfree); + uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, m->m_ext.ext_free) ^ mb_obscure_extfree; + return ptrauth_nop_cast(m_ext_free_func_t, ext_free_val); } else { - return (m_ext_free_func_t)(((uintptr_t)m->m_ext.ext_free) - ^ rfa->ext_token); + uintptr_t ext_free_val = ptrauth_nop_cast(uintptr_t, m->m_ext.ext_free) ^ rfa->ext_token; + return ptrauth_nop_cast(m_ext_free_func_t, ext_free_val); } } diff --git a/bsd/kern/uipc_mbuf2.c b/bsd/kern/uipc_mbuf2.c index 2efefbc33..91e5a58d0 100644 --- a/bsd/kern/uipc_mbuf2.c +++ b/bsd/kern/uipc_mbuf2.c @@ -108,14 +108,8 @@ #include #include #include -#if INET6 #include #include -#endif /* INET6 */ - -#if CONFIG_MACF_NET -#include -#endif /* * ensure that [off, off + len) is contiguous on the mbuf chain "m". @@ -399,7 +393,7 @@ m_tag_create(u_int32_t id, u_int16_t type, int len, int wait, struct mbuf *buf) t->m_tag_cookie = M_TAG_VALID_PATTERN; t->m_tag_type = type; - t->m_tag_len = len; + t->m_tag_len = (uint16_t)len; t->m_tag_id = id; if (len > 0) { bzero(t + 1, len); @@ -448,7 +442,7 @@ m_tag_alloc(u_int32_t id, u_int16_t type, int len, int wait) VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t))); t->m_tag_cookie = M_TAG_VALID_PATTERN; t->m_tag_type = type; - t->m_tag_len = len; + t->m_tag_len = (uint16_t)len; t->m_tag_id = id; if (len > 0) { bzero(t + 1, len); @@ -461,13 +455,6 @@ m_tag_alloc(u_int32_t id, u_int16_t type, int len, int wait) void m_tag_free(struct m_tag *t) { -#if CONFIG_MACF_NET - if (t != NULL && - t->m_tag_id == KERNEL_MODULE_TAG_ID && - t->m_tag_type == KERNEL_TAG_TYPE_MACLABEL) { - mac_mbuf_tag_destroy(t); - } -#endif if (t == NULL) { return; } @@ -587,22 +574,6 @@ m_tag_copy(struct m_tag *t, int how) if (p == NULL) { return NULL; } -#if CONFIG_MACF_NET - /* - * XXXMAC: we should probably pass off the initialization, and - * copying here? can we hid that KERNEL_TAG_TYPE_MACLABEL is - * special from the mbuf code? - */ - if (t != NULL && - t->m_tag_id == KERNEL_MODULE_TAG_ID && - t->m_tag_type == KERNEL_TAG_TYPE_MACLABEL) { - if (mac_mbuf_tag_init(p, how) != 0) { - m_tag_free(p); - return NULL; - } - mac_mbuf_tag_copy(t, p); - } else -#endif bcopy(t + 1, p + 1, t->m_tag_len); /* Copy the data */ return p; } @@ -903,5 +874,5 @@ m_sum16(struct mbuf *m, uint32_t off, uint32_t len) /* NOTREACHED */ } - return os_cpu_in_cksum_mbuf(m, len, off, 0); + return (uint16_t)os_cpu_in_cksum_mbuf(m, len, off, 0); } diff --git a/bsd/kern/uipc_socket.c b/bsd/kern/uipc_socket.c index 2a754f050..607af6d3c 100644 --- a/bsd/kern/uipc_socket.c +++ b/bsd/kern/uipc_socket.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2019 Apple Inc. All rights reserved. + * Copyright (c) 1998-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -229,7 +229,7 @@ static unsigned long sodefunct_calls = 0; SYSCTL_LONG(_kern_ipc, OID_AUTO, sodefunct_calls, CTLFLAG_LOCKED, &sodefunct_calls, ""); -static int socket_zone = M_SOCKET; +ZONE_DECLARE(socket_zone, "socket", sizeof(struct socket), ZC_ZFREE_CLEARMEM); so_gen_t so_gencnt; /* generation count for sockets */ MALLOC_DEFINE(M_SONAME, "soname", "socket name"); @@ -338,7 +338,7 @@ vm_size_t so_cache_zone_element_size; static int sodelayed_copy(struct socket *, struct uio *, struct mbuf **, user_ssize_t *); -static void cached_sock_alloc(struct socket **, int); +static void cached_sock_alloc(struct socket **, zalloc_flags_t); static void cached_sock_free(struct socket *); /* @@ -433,10 +433,8 @@ socketinit(void) so_cache_zone_element_size = (vm_size_t)(sizeof(struct socket) + 4 + get_inpcb_str_size() + 4 + get_tcp_str_size()); - so_cache_zone = zinit(so_cache_zone_element_size, - (120000 * so_cache_zone_element_size), 8192, "socache zone"); - zone_change(so_cache_zone, Z_CALLERACCT, FALSE); - zone_change(so_cache_zone, Z_NOENCRYPT, TRUE); + so_cache_zone = zone_create("socache zone", so_cache_zone_element_size, + ZC_ZFREE_CLEARMEM | ZC_NOENCRYPT); bzero(&soextbkidlestat, sizeof(struct soextbkidlestat)); soextbkidlestat.so_xbkidle_maxperproc = SO_IDLE_BK_IDLE_MAX_PER_PROC; @@ -452,7 +450,7 @@ socketinit(void) } static void -cached_sock_alloc(struct socket **so, int waitok) +cached_sock_alloc(struct socket **so, zalloc_flags_t how) { caddr_t temp; uintptr_t offset; @@ -476,17 +474,7 @@ cached_sock_alloc(struct socket **so, int waitok) } else { lck_mtx_unlock(so_cache_mtx); - if (waitok) { - *so = (struct socket *)zalloc(so_cache_zone); - } else { - *so = (struct socket *)zalloc_noblock(so_cache_zone); - } - - if (*so == NULL) { - return; - } - - bzero((caddr_t)*so, sizeof(struct socket)); + *so = zalloc_flags(so_cache_zone, how | Z_ZERO); /* * Define offsets for extra structures into our @@ -628,33 +616,21 @@ so_cache_timer(void) struct socket * soalloc(int waitok, int dom, int type) { + zalloc_flags_t how = waitok ? Z_WAITOK : Z_NOWAIT; struct socket *so; if ((dom == PF_INET) && (type == SOCK_STREAM)) { - cached_sock_alloc(&so, waitok); + cached_sock_alloc(&so, how); } else { - MALLOC_ZONE(so, struct socket *, sizeof(*so), socket_zone, - M_WAITOK); - if (so != NULL) { - bzero(so, sizeof(*so)); - } + so = zalloc_flags(socket_zone, how | Z_ZERO); } if (so != NULL) { so->so_gencnt = OSIncrementAtomic64((SInt64 *)&so_gencnt); - so->so_zone = socket_zone; /* * Increment the socket allocation statistics */ INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_alloc_total); - -#if CONFIG_MACF_SOCKET - /* Convert waitok to M_WAITOK/M_NOWAIT for MAC Framework. */ - if (mac_socket_label_init(so, !waitok) != 0) { - sodealloc(so); - return NULL; - } -#endif /* MAC_SOCKET */ } return so; @@ -795,10 +771,6 @@ socreate_internal(int dom, struct socket **aso, int type, int proto, so->next_lock_lr = 0; so->next_unlock_lr = 0; -#if CONFIG_MACF_SOCKET - mac_socket_label_associate(kauth_cred_get(), so); -#endif /* MAC_SOCKET */ - /* * Attachment will create the per pcb lock if necessary and * increase refcount for creation, make sure it's done before @@ -828,7 +800,6 @@ socreate_internal(int dom, struct socket **aso, int type, int proto, } atomic_add_32(&prp->pr_domain->dom_refs, 1); - TAILQ_INIT(&so->so_evlist); /* Attach socket filters for this protocol */ sflt_initsock(so); @@ -1003,23 +974,12 @@ sodealloc(struct socket *so) cfil_sock_detach(so); #endif /* CONTENT_FILTER */ - /* Delete the state allocated for msg queues on a socket */ - if (so->so_flags & SOF_ENABLE_MSGS) { - FREE(so->so_msg_state, M_TEMP); - so->so_msg_state = NULL; - } - VERIFY(so->so_msg_state == NULL); - so->so_gencnt = OSIncrementAtomic64((SInt64 *)&so_gencnt); -#if CONFIG_MACF_SOCKET - mac_socket_label_destroy(so); -#endif /* MAC_SOCKET */ - if (so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) { cached_sock_free(so); } else { - FREE_ZONE(so, sizeof(*so), so->so_zone); + zfree(socket_zone, so); } } @@ -1522,7 +1482,6 @@ discard: } atomic_add_32(&so->so_proto->pr_domain->dom_refs, -1); - evsofree(so); VERIFY(so->so_usecount > 0); so->so_usecount--; @@ -1983,8 +1942,7 @@ sodisconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid) */ int sosendcheck(struct socket *so, struct sockaddr *addr, user_ssize_t resid, - int32_t clen, int32_t atomic, int flags, int *sblocked, - struct mbuf *control) + int32_t clen, int32_t atomic, int flags, int *sblocked) { int error = 0; int32_t space; @@ -2062,11 +2020,7 @@ defunct: } } - if (so->so_flags & SOF_ENABLE_MSGS) { - space = msgq_sbspace(so, control); - } else { - space = sbspace(&so->so_snd); - } + space = sbspace(&so->so_snd); if (flags & MSG_OOB) { space += 1024; @@ -2177,7 +2131,6 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, int atomic = sosendallatonce(so) || top; int sblocked = 0; struct proc *p = current_proc(); - struct mbuf *control_copy = NULL; uint16_t headroom = 0; boolean_t en_tracing = FALSE; @@ -2235,13 +2188,12 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, * causes us to loop sending 0-length segments to the protocol. * * Usually, MSG_EOR isn't used on SOCK_STREAM type sockets. - * But it will be used by sockets doing message delivery. * * Note: We limit resid to be a positive int value as we use * imin() to set bytes_to_copy -- radr://14558484 */ - if (resid < 0 || resid > INT_MAX || (so->so_type == SOCK_STREAM && - !(so->so_flags & SOF_ENABLE_MSGS) && (flags & MSG_EOR))) { + if (resid < 0 || resid > INT_MAX || + (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) { error = EINVAL; goto out_locked; } @@ -2261,17 +2213,13 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, do { error = sosendcheck(so, addr, resid, clen, atomic, flags, - &sblocked, control); + &sblocked); if (error) { goto out_locked; } mp = ⊤ - if (so->so_flags & SOF_ENABLE_MSGS) { - space = msgq_sbspace(so, control); - } else { - space = sbspace(&so->so_snd) - clen; - } + space = sbspace(&so->so_snd) - clen; space += ((flags & MSG_OOB) ? 1024 : 0); do { @@ -2446,12 +2394,14 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, * reserving the socket headroom */ if (freelist == NULL) { - if (top == NULL) { - MGETHDR(freelist, - M_WAIT, MT_DATA); - } else { - MGET(freelist, - M_WAIT, MT_DATA); + if (SOCK_TYPE(so) != SOCK_STREAM || bytes_to_alloc <= MINCLSIZE) { + if (top == NULL) { + MGETHDR(freelist, + M_WAIT, MT_DATA); + } else { + MGET(freelist, + M_WAIT, MT_DATA); + } } if (freelist == NULL) { @@ -2570,14 +2520,6 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, } #endif /* CONTENT_FILTER */ } - if (so->so_flags & SOF_ENABLE_MSGS) { - /* - * Make a copy of control mbuf, - * so that msg priority can be - * passed to subsequent mbufs. - */ - control_copy = m_dup(control, M_NOWAIT); - } error = (*so->so_proto->pr_usrreqs->pru_send) (so, sendflags, top, addr, control, p); @@ -2586,8 +2528,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, } clen = 0; - control = control_copy; - control_copy = NULL; + control = NULL; top = NULL; mp = ⊤ if (error) { @@ -2611,9 +2552,6 @@ out_locked: if (freelist != NULL) { m_freem_list(freelist); } - if (control_copy != NULL) { - m_freem(control_copy); - } soclearfastopen(so); @@ -2744,8 +2682,7 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) (so->so_proto->pr_flags & PR_ATOMIC); OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgsnd); - error = sosendcheck(so, NULL, resid, 0, atomic, flags, - &sblocked, NULL); + error = sosendcheck(so, NULL, resid, 0, atomic, flags, &sblocked); if (error) { goto release; } @@ -3252,6 +3189,51 @@ done: return error; } +/* + * If we have less data than requested, block awaiting more + * (subject to any timeout) if: + * 1. the current count is less than the low water mark, or + * 2. MSG_WAITALL is set, and it is possible to do the entire + * receive operation at once if we block (resid <= hiwat). + * 3. MSG_DONTWAIT is not set + * If MSG_WAITALL is set but resid is larger than the receive buffer, + * we have to do the receive in sections, and thus risk returning + * a short count if a timeout or signal occurs after we start. + */ +static boolean_t +so_should_wait(struct socket *so, struct uio *uio, struct mbuf *m, int flags) +{ + struct protosw *pr = so->so_proto; + + /* No mbufs in the receive-queue? Wait! */ + if (m == NULL) { + return true; + } + + /* Not enough data in the receive socket-buffer - we may have to wait */ + if ((flags & MSG_DONTWAIT) == 0 && so->so_rcv.sb_cc < uio_resid(uio) && + m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0) { + /* + * Application did set the lowater-mark, so we should wait for + * this data to be present. + */ + if (so->so_rcv.sb_cc < so->so_rcv.sb_lowat) { + return true; + } + + /* + * Application wants all the data - so let's try to do the + * receive-operation at once by waiting for everything to + * be there. + */ + if ((flags & MSG_WAITALL) && uio_resid(uio) <= so->so_rcv.sb_hiwat) { + return true; + } + } + + return false; +} + /* * Implement receive operations on a socket. * We depend on the way that records are added to the sockbuf @@ -3302,7 +3284,6 @@ soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, user_ssize_t orig_resid = uio_resid(uio); user_ssize_t delayed_copy_len; int can_delay; - int need_event; struct proc *p = current_proc(); boolean_t en_tracing = FALSE; @@ -3511,22 +3492,7 @@ restart: } m = so->so_rcv.sb_mb; - /* - * If we have less data than requested, block awaiting more - * (subject to any timeout) if: - * 1. the current count is less than the low water mark, or - * 2. MSG_WAITALL is set, and it is possible to do the entire - * receive operation at once if we block (resid <= hiwat). - * 3. MSG_DONTWAIT is not set - * If MSG_WAITALL is set but resid is larger than the receive buffer, - * we have to do the receive in sections, and thus risk returning - * a short count if a timeout or signal occurs after we start. - */ - if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && - so->so_rcv.sb_cc < uio_resid(uio)) && - (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || - ((flags & MSG_WAITALL) && uio_resid(uio) <= so->so_rcv.sb_hiwat)) && - m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { + if (so_should_wait(so, uio, m, flags)) { /* * Panic if we notice inconsistencies in the socket's * receive list; both sb_mb and sb_cc should correctly @@ -3593,7 +3559,24 @@ restart: } #endif - error = sbwait(&so->so_rcv); + /* + * Depending on the protocol (e.g. TCP), the following + * might cause the socket lock to be dropped and later + * be reacquired, and more data could have arrived and + * have been appended to the receive socket buffer by + * the time it returns. Therefore, we only sleep in + * sbwait() below if and only if the wait-condition is still + * true. + */ + if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb != NULL) { + (*pr->pr_usrreqs->pru_rcvd)(so, flags); + } + + error = 0; + if (so_should_wait(so, uio, so->so_rcv.sb_mb, flags)) { + error = sbwait(&so->so_rcv); + } + #if EVEN_MORE_LOCKING_DEBUG if (socket_debug) { printf("SORECEIVE - sbwait returned %d\n", error); @@ -3648,28 +3631,6 @@ dontblock: orig_resid = 0; } - /* - * If the socket is a TCP socket with message delivery - * enabled, then create a control msg to deliver the - * relative TCP sequence number for this data. Waiting - * until this point will protect against failures to - * allocate an mbuf for control msgs. - */ - if (so->so_type == SOCK_STREAM && SOCK_PROTO(so) == IPPROTO_TCP && - (so->so_flags & SOF_ENABLE_MSGS) && controlp != NULL) { - struct mbuf *seq_cm; - - seq_cm = sbcreatecontrol((caddr_t)&m->m_pkthdr.msg_seq, - sizeof(uint32_t), SCM_SEQNUM, SOL_SOCKET); - if (seq_cm == NULL) { - /* unable to allocate a control mbuf */ - error = ENOBUFS; - goto release; - } - *controlp = seq_cm; - controlp = &seq_cm->m_next; - } - if (m != NULL) { if (!(flags & MSG_PEEK)) { /* @@ -3712,8 +3673,6 @@ dontblock: can_delay = 0; } - need_event = 0; - while (m != NULL && (uio_resid(uio) - delayed_copy_len) > 0 && error == 0) { if (m->m_type == MT_OOBDATA) { @@ -3811,28 +3770,6 @@ dontblock: sbfree(&so->so_rcv, m); m->m_nextpkt = NULL; - /* - * If this packet is an unordered packet - * (indicated by M_UNORDERED_DATA flag), remove - * the additional bytes added to the - * receive socket buffer size. - */ - if ((so->so_flags & SOF_ENABLE_MSGS) && - m->m_len && - (m->m_flags & M_UNORDERED_DATA) && - sbreserve(&so->so_rcv, - so->so_rcv.sb_hiwat - m->m_len)) { - if (so->so_msg_state->msg_uno_bytes > - m->m_len) { - so->so_msg_state-> - msg_uno_bytes -= m->m_len; - } else { - so->so_msg_state-> - msg_uno_bytes = 0; - } - m->m_flags &= ~M_UNORDERED_DATA; - } - if (mp != NULL) { *mp = m; mp = &m->m_next; @@ -3895,12 +3832,6 @@ dontblock: so->so_oobmark -= len; if (so->so_oobmark == 0) { so->so_state |= SS_RCVATMARK; - /* - * delay posting the actual event until - * after any delayed copy processing - * has finished - */ - need_event = 1; break; } } else { @@ -3941,9 +3872,7 @@ dontblock: * sbwait() below if and only if the socket buffer is * empty, in order to avoid a false sleep. */ - if (pr->pr_flags & PR_WANTRCVD && so->so_pcb && - (((struct inpcb *)so->so_pcb)->inp_state != - INPCB_STATE_DEAD)) { + if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb != NULL) { (*pr->pr_usrreqs->pru_rcvd)(so, flags); } @@ -4052,9 +3981,6 @@ dontblock: m_freem_list(free_list); free_list = NULL; } - if (need_event) { - postevent(so, 0, EV_OOB); - } if (orig_resid == uio_resid(uio) && orig_resid && (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { @@ -4702,7 +4628,6 @@ soshutdownlock_final(struct socket *so, int how) goto done; } sorflush(so); - postevent(so, 0, EV_RCLOSED); } if (how != SHUT_RD) { if ((so->so_state & SS_CANTSENDMORE) != 0) { @@ -4711,7 +4636,6 @@ soshutdownlock_final(struct socket *so, int how) goto done; } error = (*pr->pr_usrreqs->pru_shutdown)(so); - postevent(so, 0, EV_WCLOSED); } done: KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN, how, 1, 0, 0, 0); @@ -5037,9 +4961,6 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) int64_t long_optval; struct linger l; struct timeval tv; -#if CONFIG_MACF_SOCKET - struct mac extmac; -#endif /* MAC_SOCKET */ if (sopt->sopt_dir != SOPT_SET) { sopt->sopt_dir = SOPT_SET; @@ -5348,17 +5269,7 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) break; case SO_LABEL: -#if CONFIG_MACF_SOCKET - if ((error = sooptcopyin(sopt, &extmac, sizeof(extmac), - sizeof(extmac))) != 0) { - goto out; - } - - error = mac_setsockopt_label(proc_ucred(sopt->sopt_p), - so, &extmac); -#else error = EOPNOTSUPP; -#endif /* MAC_SOCKET */ break; case SO_UPCALLCLOSEWAIT: @@ -5786,6 +5697,19 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) } break; } + case SO_WANT_KEV_SOCKET_CLOSED: { + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { + goto out; + } + if (optval == 0) { + so->so_flags1 &= ~SOF1_WANT_KEV_SOCK_CLOSED; + } else { + so->so_flags1 |= SOF1_WANT_KEV_SOCK_CLOSED; + } + break; + } default: error = ENOPROTOOPT; break; @@ -5879,9 +5803,6 @@ sogetoptlock(struct socket *so, struct sockopt *sopt, int dolock) int error, optval; struct linger l; struct timeval tv; -#if CONFIG_MACF_SOCKET - struct mac extmac; -#endif /* MAC_SOCKET */ if (sopt->sopt_dir != SOPT_GET) { sopt->sopt_dir = SOPT_GET; @@ -6078,33 +5999,11 @@ integer: break; case SO_LABEL: -#if CONFIG_MACF_SOCKET - if ((error = sooptcopyin(sopt, &extmac, sizeof(extmac), - sizeof(extmac))) != 0 || - (error = mac_socket_label_get(proc_ucred( - sopt->sopt_p), so, &extmac)) != 0) { - break; - } - - error = sooptcopyout(sopt, &extmac, sizeof(extmac)); -#else error = EOPNOTSUPP; -#endif /* MAC_SOCKET */ break; case SO_PEERLABEL: -#if CONFIG_MACF_SOCKET - if ((error = sooptcopyin(sopt, &extmac, sizeof(extmac), - sizeof(extmac))) != 0 || - (error = mac_socketpeer_label_get(proc_ucred( - sopt->sopt_p), so, &extmac)) != 0) { - break; - } - - error = sooptcopyout(sopt, &extmac, sizeof(extmac)); -#else error = EOPNOTSUPP; -#endif /* MAC_SOCKET */ break; #ifdef __APPLE_API_PRIVATE @@ -6136,11 +6035,6 @@ integer: optval = (so->so_flags & SOF_RECV_TRAFFIC_CLASS); goto integer; - case SO_TRAFFIC_CLASS_STATS: - error = sooptcopyout(sopt, &so->so_tc_stats, - sizeof(so->so_tc_stats)); - break; - #if (DEVELOPMENT || DEBUG) case SO_TRAFFIC_CLASS_DBG: error = sogetopt_tcdbg(so, sopt); @@ -6482,22 +6376,13 @@ sopoll(struct socket *so, int events, kauth_cred_t cred, void * wql) int soo_kqfilter(struct fileproc *fp, struct knote *kn, struct kevent_qos_s *kev) { - struct socket *so = (struct socket *)fp->f_fglob->fg_data; + struct socket *so = (struct socket *)fp->fp_glob->fg_data; int result; socket_lock(so, 1); so_update_last_owner_locked(so, PROC_NULL); so_update_policy(so); -#if CONFIG_MACF_SOCKET - proc_t p = knote_get_kq(kn)->kq_p; - if (mac_socket_check_kqfilter(proc_ucred(p), kn, so) != 0) { - socket_unlock(so, 1); - knote_set_error(kn, EPERM); - return 0; - } -#endif /* MAC_SOCKET */ - switch (kn->kn_filter) { case EVFILT_READ: kn->kn_filtid = EVFILTID_SOREAD; @@ -6610,7 +6495,7 @@ out: static int filt_sorattach(struct knote *kn, __unused struct kevent_qos_s *kev) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; /* socket locked */ @@ -6637,7 +6522,7 @@ filt_sorattach(struct knote *kn, __unused struct kevent_qos_s *kev) static void filt_sordetach(struct knote *kn) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; socket_lock(so, 1); if (so->so_rcv.sb_flags & SB_KNOTE) { @@ -6652,7 +6537,7 @@ filt_sordetach(struct knote *kn) static int filt_soread(struct knote *kn, long hint) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; int retval; if ((hint & SO_FILT_HINT_LOCKED) == 0) { @@ -6671,7 +6556,7 @@ filt_soread(struct knote *kn, long hint) static int filt_sortouch(struct knote *kn, struct kevent_qos_s *kev) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; int retval; socket_lock(so, 1); @@ -6691,7 +6576,7 @@ filt_sortouch(struct knote *kn, struct kevent_qos_s *kev) static int filt_sorprocess(struct knote *kn, struct kevent_qos_s *kev) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; int retval; socket_lock(so, 1); @@ -6791,7 +6676,7 @@ out: static int filt_sowattach(struct knote *kn, __unused struct kevent_qos_s *kev) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; /* socket locked */ if (KNOTE_ATTACH(&so->so_snd.sb_sel.si_note, kn)) { @@ -6805,7 +6690,7 @@ filt_sowattach(struct knote *kn, __unused struct kevent_qos_s *kev) static void filt_sowdetach(struct knote *kn) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; socket_lock(so, 1); if (so->so_snd.sb_flags & SB_KNOTE) { @@ -6820,7 +6705,7 @@ filt_sowdetach(struct knote *kn) static int filt_sowrite(struct knote *kn, long hint) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; int ret; if ((hint & SO_FILT_HINT_LOCKED) == 0) { @@ -6839,7 +6724,7 @@ filt_sowrite(struct knote *kn, long hint) static int filt_sowtouch(struct knote *kn, struct kevent_qos_s *kev) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; int ret; socket_lock(so, 1); @@ -6859,7 +6744,7 @@ filt_sowtouch(struct knote *kn, struct kevent_qos_s *kev) static int filt_sowprocess(struct knote *kn, struct kevent_qos_s *kev) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; int ret; socket_lock(so, 1); @@ -6961,7 +6846,7 @@ filt_sockev_common(struct knote *kn, struct kevent_qos_s *kev, data = so->so_error; kn->kn_flags |= EV_EOF; } else { - u_int32_t data32; + u_int32_t data32 = 0; get_sockev_state(so, &data32); data = data32; } @@ -7011,7 +6896,7 @@ filt_sockev_common(struct knote *kn, struct kevent_qos_s *kev, static int filt_sockattach(struct knote *kn, __unused struct kevent_qos_s *kev) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; /* socket locked */ kn->kn_hook32 = 0; @@ -7026,7 +6911,7 @@ filt_sockattach(struct knote *kn, __unused struct kevent_qos_s *kev) static void filt_sockdetach(struct knote *kn) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; socket_lock(so, 1); if ((so->so_flags & SOF_KNOTE) != 0) { @@ -7041,7 +6926,7 @@ static int filt_sockev(struct knote *kn, long hint) { int ret = 0, locked = 0; - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; long ev_hint = (hint & SO_FILT_HINT_EV); if ((hint & SO_FILT_HINT_LOCKED) == 0) { @@ -7068,7 +6953,7 @@ filt_socktouch( struct knote *kn, struct kevent_qos_s *kev) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; uint32_t changed_flags; int ret; @@ -7109,7 +6994,7 @@ filt_socktouch( static int filt_sockprocess(struct knote *kn, struct kevent_qos_s *kev) { - struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; + struct socket *so = (struct socket *)kn->kn_fp->fp_glob->fg_data; int ret = 0; socket_lock(so, 1); @@ -7601,8 +7486,7 @@ so_set_extended_bk_idle(struct socket *so, int optval) soresume(current_proc(), so, 1); } else { struct proc *p = current_proc(); - int i; - struct filedesc *fdp; + struct fileproc *fp; int count = 0; /* @@ -7612,19 +7496,14 @@ so_set_extended_bk_idle(struct socket *so, int optval) socket_unlock(so, 0); proc_fdlock(p); - - fdp = p->p_fd; - for (i = 0; i < fdp->fd_nfiles; i++) { - struct fileproc *fp = fdp->fd_ofiles[i]; + fdt_foreach(fp, p) { struct socket *so2; - if (fp == NULL || - (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 || - FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) { + if (FILEGLOB_DTYPE(fp->fp_glob) != DTYPE_SOCKET) { continue; } - so2 = (struct socket *)fp->f_fglob->fg_data; + so2 = (struct socket *)fp->fp_glob->fg_data; if (so != so2 && so2->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) { count++; @@ -7728,23 +7607,16 @@ void resume_proc_sockets(proc_t p) { if (p->p_ladvflag & P_LXBKIDLEINPROG) { - struct filedesc *fdp; - int i; + struct fileproc *fp; + struct socket *so; proc_fdlock(p); - fdp = p->p_fd; - for (i = 0; i < fdp->fd_nfiles; i++) { - struct fileproc *fp; - struct socket *so; - - fp = fdp->fd_ofiles[i]; - if (fp == NULL || - (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 || - FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) { + fdt_foreach(fp, p) { + if (FILEGLOB_DTYPE(fp->fp_glob) != DTYPE_SOCKET) { continue; } - so = (struct socket *)fp->f_fglob->fg_data; + so = (struct socket *)fp->fp_glob->fg_data; (void) soresume(p, so, 0); } proc_fdunlock(p); @@ -7758,11 +7630,7 @@ so_set_recv_anyif(struct socket *so, int optval) { int ret = 0; -#if INET6 if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) { -#else - if (SOCK_DOM(so) == PF_INET) { -#endif /* !INET6 */ if (optval) { sotoinpcb(so)->inp_flags |= INP_RECV_ANYIF; } else { @@ -7779,11 +7647,7 @@ so_get_recv_anyif(struct socket *so) { int ret = 0; -#if INET6 if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) { -#else - if (SOCK_DOM(so) == PF_INET) { -#endif /* !INET6 */ ret = (sotoinpcb(so)->inp_flags & INP_RECV_ANYIF) ? 1 : 0; } @@ -7826,11 +7690,7 @@ so_set_restrictions(struct socket *so, uint32_t vals) (noconstrained_new - noconstrained_old) == 0) { return 0; } -#if INET6 if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) { -#else - if (SOCK_DOM(so) == PF_INET) { -#endif /* !INET6 */ if (nocell_new - nocell_old != 0) { /* * if deny cellular is now set, do what's needed @@ -8128,10 +7988,13 @@ socket_post_kev_msg(uint32_t ev_code, void socket_post_kev_msg_closed(struct socket *so) { - struct kev_socket_closed ev; + struct kev_socket_closed ev = {}; struct sockaddr *socksa = NULL, *peersa = NULL; int err; - bzero(&ev, sizeof(ev)); + + if ((so->so_flags1 & SOF1_WANT_KEV_SOCK_CLOSED) == 0) { + return; + } err = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, &socksa); if (err == 0) { err = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, diff --git a/bsd/kern/uipc_socket2.c b/bsd/kern/uipc_socket2.c index cbac73e06..c3b8d739f 100644 --- a/bsd/kern/uipc_socket2.c +++ b/bsd/kern/uipc_socket2.c @@ -232,7 +232,6 @@ soisconnected(struct socket *so) so_release_accept_list(head); socket_unlock(so, 0); } - postevent(head, 0, EV_RCONN); sorwakeup(head); wakeup_one((caddr_t)&head->so_timeo); @@ -245,7 +244,6 @@ soisconnected(struct socket *so) socket_unlock(head, 1); } } else { - postevent(so, 0, EV_WCONN); wakeup((caddr_t)&so->so_timeo); sorwakeup(so); sowwakeup(so); @@ -393,7 +391,7 @@ sonewconn_internal(struct socket *head, int connstatus) (SOF_NOSIGPIPE | SOF_NOADDRAVAIL | SOF_REUSESHAREUID | SOF_NOTIFYCONFLICT | SOF_BINDRANDOMPORT | SOF_NPX_SETOPTSHUT | SOF_NODEFUNCT | SOF_PRIVILEGED_TRAFFIC_CLASS | SOF_NOTSENT_LOWAT | - SOF_USELRO | SOF_DELEGATED); + SOF_DELEGATED); so->so_flags1 |= SOF1_INBOUND; so->so_usecount = 1; so->next_lock_lr = 0; @@ -401,11 +399,6 @@ sonewconn_internal(struct socket *head, int connstatus) so->so_rcv.sb_flags |= SB_RECV; /* XXX */ so->so_rcv.sb_so = so->so_snd.sb_so = so; - TAILQ_INIT(&so->so_evlist); - -#if CONFIG_MACF_SOCKET - mac_socket_label_associate_accept(head, so); -#endif /* inherit traffic management properties of listener */ so->so_flags1 |= @@ -759,7 +752,7 @@ sowakeup(struct socket *so, struct sockbuf *sb, struct socket *so2) * ENOBUFS */ int -soreserve(struct socket *so, u_int32_t sndcc, u_int32_t rcvcc) +soreserve(struct socket *so, uint32_t sndcc, uint32_t rcvcc) { /* * We do not want to fail the creation of a socket @@ -770,10 +763,10 @@ soreserve(struct socket *so, u_int32_t sndcc, u_int32_t rcvcc) */ uint64_t maxcc = (uint64_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); if (sndcc > maxcc) { - sndcc = maxcc; + sndcc = (uint32_t)maxcc; } if (rcvcc > maxcc) { - rcvcc = maxcc; + rcvcc = (uint32_t)maxcc; } if (sbreserve(&so->so_snd, sndcc) == 0) { goto bad; @@ -821,7 +814,8 @@ soreserve_preconnect(struct socket *so, unsigned int pre_cc) int sbreserve(struct sockbuf *sb, u_int32_t cc) { - if ((u_quad_t)cc > (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES)) { + if ((u_quad_t)cc > (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES) || + (cc > sb->sb_hiwat && (sb->sb_flags & SB_LIMITED))) { return 0; } sb->sb_hiwat = cc; @@ -1271,8 +1265,6 @@ sbappendchain(struct sockbuf *sb, struct mbuf *m, int space) SBLASTMBUFCHK(sb, __func__); SBLASTRECORDCHK(sb, "sbappendadddr 2"); - - postevent(0, sb, EV_RWBYTES); return 1; } @@ -1430,8 +1422,6 @@ sbappendcontrol_internal(struct sockbuf *sb, struct mbuf *m0, SBLASTMBUFCHK(sb, __func__); SBLASTRECORDCHK(sb, "sbappendcontrol 2"); - - postevent(0, sb, EV_RWBYTES); return 1; } @@ -1509,82 +1499,10 @@ sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control, } /* - * Append a contiguous TCP data blob with TCP sequence number as control data - * as a new msg to the receive socket buffer. - */ -int -sbappendmsgstream_rcv(struct sockbuf *sb, struct mbuf *m, uint32_t seqnum, - int unordered) -{ - struct mbuf *m_eor = NULL; - u_int32_t data_len = 0; - int ret = 0; - struct socket *so = sb->sb_so; - - if (m == NULL) { - return 0; - } - - VERIFY((m->m_flags & M_PKTHDR) && m_pktlen(m) > 0); - VERIFY(so->so_msg_state != NULL); - VERIFY(sb->sb_flags & SB_RECV); - - /* Keep the TCP sequence number in the mbuf pkthdr */ - m->m_pkthdr.msg_seq = seqnum; - - /* find last mbuf and set M_EOR */ - for (m_eor = m;; m_eor = m_eor->m_next) { - /* - * If the msg is unordered, we need to account for - * these bytes in receive socket buffer size. Otherwise, - * the receive window advertised will shrink because - * of the additional unordered bytes added to the - * receive buffer. - */ - if (unordered) { - m_eor->m_flags |= M_UNORDERED_DATA; - data_len += m_eor->m_len; - so->so_msg_state->msg_uno_bytes += m_eor->m_len; - } else { - m_eor->m_flags &= ~M_UNORDERED_DATA; - } - if (m_eor->m_next == NULL) { - break; - } - } - - /* set EOR flag at end of byte blob */ - m_eor->m_flags |= M_EOR; - - /* expand the receive socket buffer to allow unordered data */ - if (unordered && !sbreserve(sb, sb->sb_hiwat + data_len)) { - /* - * Could not allocate memory for unordered data, it - * means this packet will have to be delivered in order - */ - printf("%s: could not reserve space for unordered data\n", - __func__); - } - - if (!unordered && (sb->sb_mbtail != NULL) && - !(sb->sb_mbtail->m_flags & M_UNORDERED_DATA)) { - sb->sb_mbtail->m_flags &= ~M_EOR; - sbcompress(sb, m, sb->sb_mbtail); - ret = 1; - } else { - ret = sbappendrecord(sb, m); - } - VERIFY(sb->sb_mbtail->m_flags & M_EOR); - return ret; -} - -/* - * TCP streams have message based out of order delivery support, or have - * Multipath TCP support, or are regular TCP sockets + * TCP streams have Multipath TCP support or are regular TCP sockets. */ int -sbappendstream_rcvdemux(struct socket *so, struct mbuf *m, uint32_t seqnum, - int unordered) +sbappendstream_rcvdemux(struct socket *so, struct mbuf *m) { int ret = 0; @@ -1597,18 +1515,14 @@ sbappendstream_rcvdemux(struct socket *so, struct mbuf *m, uint32_t seqnum, return ret; } - if (so->so_flags & SOF_ENABLE_MSGS) { - ret = sbappendmsgstream_rcv(&so->so_rcv, m, seqnum, unordered); - } #if MPTCP - else if (so->so_flags & SOF_MP_SUBFLOW) { - ret = sbappendmptcpstream_rcv(&so->so_rcv, m); - } + if (so->so_flags & SOF_MP_SUBFLOW) { + return sbappendmptcpstream_rcv(&so->so_rcv, m); + } else #endif /* MPTCP */ - else { - ret = sbappendstream(&so->so_rcv, m); + { + return sbappendstream(&so->so_rcv, m); } - return ret; } #if MPTCP @@ -1654,214 +1568,6 @@ sbappendmptcpstream_rcv(struct sockbuf *sb, struct mbuf *m) } #endif /* MPTCP */ -/* - * Append message to send socket buffer based on priority. - */ -int -sbappendmsg_snd(struct sockbuf *sb, struct mbuf *m) -{ - struct socket *so = sb->sb_so; - struct msg_priq *priq; - int set_eor = 0; - - VERIFY(so->so_msg_state != NULL); - - if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord)) { - panic("sbappendstream: nexpkt %p || mb %p != lastrecord %p\n", - m->m_nextpkt, sb->sb_mb, sb->sb_lastrecord); - } - - SBLASTMBUFCHK(sb, __func__); - - if (m == NULL || (sb->sb_flags & SB_DROP) || so->so_msg_state == NULL) { - if (m != NULL) { - m_freem(m); - } - return 0; - } - - priq = &so->so_msg_state->msg_priq[m->m_pkthdr.msg_pri]; - - /* note if we need to propogate M_EOR to the last mbuf */ - if (m->m_flags & M_EOR) { - set_eor = 1; - - /* Reset M_EOR from the first mbuf */ - m->m_flags &= ~(M_EOR); - } - - if (priq->msgq_head == NULL) { - VERIFY(priq->msgq_tail == NULL && priq->msgq_lastmsg == NULL); - priq->msgq_head = priq->msgq_lastmsg = m; - } else { - VERIFY(priq->msgq_tail->m_next == NULL); - - /* Check if the last message has M_EOR flag set */ - if (priq->msgq_tail->m_flags & M_EOR) { - /* Insert as a new message */ - priq->msgq_lastmsg->m_nextpkt = m; - - /* move the lastmsg pointer */ - priq->msgq_lastmsg = m; - } else { - /* Append to the existing message */ - priq->msgq_tail->m_next = m; - } - } - - /* Update accounting and the queue tail pointer */ - - while (m->m_next != NULL) { - sballoc(sb, m); - priq->msgq_bytes += m->m_len; - m = m->m_next; - } - sballoc(sb, m); - priq->msgq_bytes += m->m_len; - - if (set_eor) { - m->m_flags |= M_EOR; - - /* - * Since the user space can not write a new msg - * without completing the previous one, we can - * reset this flag to start sending again. - */ - priq->msgq_flags &= ~(MSGQ_MSG_NOTDONE); - } - - priq->msgq_tail = m; - - SBLASTRECORDCHK(sb, "sbappendstream 2"); - postevent(0, sb, EV_RWBYTES); - return 1; -} - -/* - * Pull data from priority queues to the serial snd queue - * right before sending. - */ -void -sbpull_unordered_data(struct socket *so, int32_t off, int32_t len) -{ - int32_t topull, i; - struct msg_priq *priq = NULL; - - VERIFY(so->so_msg_state != NULL); - - topull = (off + len) - so->so_msg_state->msg_serial_bytes; - - i = MSG_PRI_MAX; - while (i >= MSG_PRI_MIN && topull > 0) { - struct mbuf *m = NULL, *mqhead = NULL, *mend = NULL; - priq = &so->so_msg_state->msg_priq[i]; - if ((priq->msgq_flags & MSGQ_MSG_NOTDONE) && - priq->msgq_head == NULL) { - /* - * We were in the middle of sending - * a message and we have not seen the - * end of it. - */ - VERIFY(priq->msgq_lastmsg == NULL && - priq->msgq_tail == NULL); - return; - } - if (priq->msgq_head != NULL) { - int32_t bytes = 0, topull_tmp = topull; - /* - * We found a msg while scanning the priority - * queue from high to low priority. - */ - m = priq->msgq_head; - mqhead = m; - mend = m; - - /* - * Move bytes from the priority queue to the - * serial queue. Compute the number of bytes - * being added. - */ - while (mqhead->m_next != NULL && topull_tmp > 0) { - bytes += mqhead->m_len; - topull_tmp -= mqhead->m_len; - mend = mqhead; - mqhead = mqhead->m_next; - } - - if (mqhead->m_next == NULL) { - /* - * If we have only one more mbuf left, - * move the last mbuf of this message to - * serial queue and set the head of the - * queue to be the next message. - */ - bytes += mqhead->m_len; - mend = mqhead; - mqhead = m->m_nextpkt; - if (!(mend->m_flags & M_EOR)) { - /* - * We have not seen the end of - * this message, so we can not - * pull anymore. - */ - priq->msgq_flags |= MSGQ_MSG_NOTDONE; - } else { - /* Reset M_EOR */ - mend->m_flags &= ~(M_EOR); - } - } else { - /* propogate the next msg pointer */ - mqhead->m_nextpkt = m->m_nextpkt; - } - priq->msgq_head = mqhead; - - /* - * if the lastmsg pointer points to - * the mbuf that is being dequeued, update - * it to point to the new head. - */ - if (priq->msgq_lastmsg == m) { - priq->msgq_lastmsg = priq->msgq_head; - } - - m->m_nextpkt = NULL; - mend->m_next = NULL; - - if (priq->msgq_head == NULL) { - /* Moved all messages, update tail */ - priq->msgq_tail = NULL; - VERIFY(priq->msgq_lastmsg == NULL); - } - - /* Move it to serial sb_mb queue */ - if (so->so_snd.sb_mb == NULL) { - so->so_snd.sb_mb = m; - } else { - so->so_snd.sb_mbtail->m_next = m; - } - - priq->msgq_bytes -= bytes; - VERIFY(priq->msgq_bytes >= 0); - sbwakeup(&so->so_snd); - - so->so_msg_state->msg_serial_bytes += bytes; - so->so_snd.sb_mbtail = mend; - so->so_snd.sb_lastrecord = so->so_snd.sb_mb; - - topull = - (off + len) - so->so_msg_state->msg_serial_bytes; - - if (priq->msgq_flags & MSGQ_MSG_NOTDONE) { - break; - } - } else { - --i; - } - } - sblastrecordchk(&so->so_snd, "sbpull_unordered_data"); - sblastmbufchk(&so->so_snd, "sbpull_unordered_data"); -} - /* * Compress mbuf chain m into the socket * buffer sb following mbuf n. If n @@ -1940,7 +1646,6 @@ sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) } done: SBLASTMBUFCHK(sb, __func__); - postevent(0, sb, EV_RWBYTES); } void @@ -1956,18 +1661,6 @@ sb_empty_assert(struct sockbuf *sb, const char *where) } } -static void -sbflush_priq(struct msg_priq *priq) -{ - struct mbuf *m; - m = priq->msgq_head; - if (m != NULL) { - m_freem_list(m); - } - priq->msgq_head = priq->msgq_tail = priq->msgq_lastmsg = NULL; - priq->msgq_bytes = priq->msgq_flags = 0; -} - /* * Free all mbufs in a sockbuf. * Check that all resources are reclaimed. @@ -1977,7 +1670,6 @@ sbflush(struct sockbuf *sb) { void *lr_saved = __builtin_return_address(0); struct socket *so = sb->sb_so; - u_int32_t i; /* so_usecount may be 0 if we get here from sofreelastref() */ if (so == NULL) { @@ -2012,18 +1704,7 @@ sbflush(struct sockbuf *sb) sbdrop(sb, (int)sb->sb_cc); } - if (!(sb->sb_flags & SB_RECV) && (so->so_flags & SOF_ENABLE_MSGS)) { - VERIFY(so->so_msg_state != NULL); - for (i = MSG_PRI_MIN; i <= MSG_PRI_MAX; ++i) { - sbflush_priq(&so->so_msg_state->msg_priq[i]); - } - so->so_msg_state->msg_serial_bytes = 0; - so->so_msg_state->msg_uno_bytes = 0; - } - sb_empty_assert(sb, __func__); - postevent(0, sb, EV_RWBYTES); - sbunlock(sb, TRUE); /* keep socket locked */ } @@ -2083,11 +1764,6 @@ sbdrop(struct sockbuf *sb, int len) */ sb->sb_cc = 0; sb->sb_mbcnt = 0; - if (!(sb->sb_flags & SB_RECV) && - (sb->sb_so->so_flags & SOF_ENABLE_MSGS)) { - sb->sb_so->so_msg_state-> - msg_serial_bytes = 0; - } break; } m = last = next; @@ -2149,8 +1825,6 @@ sbdrop(struct sockbuf *sb, int len) cfil_sock_buf_update(sb); #endif /* CONTENT_FILTER */ - postevent(0, sb, EV_RWBYTES); - KERNEL_DEBUG((DBG_FNC_SBDROP | DBG_FUNC_END), sb, 0, 0, 0, 0); } @@ -2173,7 +1847,6 @@ sbdroprecord(struct sockbuf *sb) } while (m); } SB_EMPTY_FIXUP(sb); - postevent(0, sb, EV_RWBYTES); } /* @@ -2196,7 +1869,7 @@ sbcreatecontrol(caddr_t p, int size, int type, int level) VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t))); /* XXX check size? */ (void) memcpy(CMSG_DATA(cp), p, size); - m->m_len = CMSG_SPACE(size); + m->m_len = (int32_t)CMSG_SPACE(size); cp->cmsg_len = CMSG_LEN(size); cp->cmsg_level = level; cp->cmsg_type = type; @@ -2225,7 +1898,7 @@ sbcreatecontrol_mbuf(caddr_t p, int size, int type, int level, struct mbuf **mp) cp = (struct cmsghdr *)(void *)(mtod(m, char *) + m->m_len); /* CMSG_SPACE ensures 32-bit alignment */ VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t))); - m->m_len += CMSG_SPACE(size); + m->m_len += (int32_t)CMSG_SPACE(size); /* XXX check size? */ (void) memcpy(CMSG_DATA(cp), p, size); @@ -2542,33 +2215,6 @@ sbspace(struct sockbuf *sb) return space; } -/* - * If this socket has priority queues, check if there is enough - * space in the priority queue for this msg. - */ -int -msgq_sbspace(struct socket *so, struct mbuf *control) -{ - int space = 0, error; - u_int32_t msgpri = 0; - VERIFY(so->so_type == SOCK_STREAM && - SOCK_PROTO(so) == IPPROTO_TCP); - if (control != NULL) { - error = tcp_get_msg_priority(control, &msgpri); - if (error) { - return 0; - } - } else { - msgpri = MSG_PRI_0; - } - space = (so->so_snd.sb_idealsize / MSG_PRI_COUNT) - - so->so_msg_state->msg_priq[msgpri].msgq_bytes; - if (space < 0) { - space = 0; - } - return space; -} - /* do we have to send all at once on a socket? */ int sosendallatonce(struct socket *so) @@ -2918,7 +2564,7 @@ soevent(struct socket *so, long hint) } void -soevupcall(struct socket *so, u_int32_t hint) +soevupcall(struct socket *so, long hint) { if (so->so_event != NULL) { caddr_t so_eventarg = so->so_eventarg; @@ -2962,7 +2608,7 @@ soevent_ifdenied(struct socket *so) uuid_string_t buf; uuid_unparse(ev_ifdenied.ev_data.euuid, buf); - log(LOG_DEBUG, "%s[%d]: so 0x%llx [%d,%d] epid %d " + log(LOG_DEBUG, "%s[%d]: so 0x%llx [%d,%d] epid %llu " "euuid %s%s has %d redundant events supressed\n", __func__, so->last_pid, (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so), @@ -2975,7 +2621,7 @@ soevent_ifdenied(struct socket *so) uuid_string_t buf; uuid_unparse(ev_ifdenied.ev_data.euuid, buf); - log(LOG_DEBUG, "%s[%d]: so 0x%llx [%d,%d] epid %d " + log(LOG_DEBUG, "%s[%d]: so 0x%llx [%d,%d] epid %llu " "euuid %s%s event posted\n", __func__, so->last_pid, (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), @@ -3041,7 +2687,7 @@ sotoxsocket(struct socket *so, struct xsocket *xso) } -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX void sotoxsocket64(struct socket *so, struct xsocket64 *xso) @@ -3071,7 +2717,7 @@ sotoxsocket64(struct socket *so, struct xsocket64 *xso) xso->so_uid = kauth_cred_getuid(so->so_cred); } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ /* * This does the same for sockbufs. Note that the xsockbuf structure, @@ -3087,9 +2733,9 @@ sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) xsb->sb_mbcnt = sb->sb_mbcnt; xsb->sb_mbmax = sb->sb_mbmax; xsb->sb_lowat = sb->sb_lowat; - xsb->sb_flags = sb->sb_flags; + xsb->sb_flags = (short)sb->sb_flags; xsb->sb_timeo = (short) - (sb->sb_timeo.tv_sec * hz) + sb->sb_timeo.tv_usec / tick; + ((sb->sb_timeo.tv_sec * hz) + sb->sb_timeo.tv_usec / tick); if (xsb->sb_timeo == 0 && sb->sb_timeo.tv_usec != 0) { xsb->sb_timeo = 1; } @@ -3146,7 +2792,7 @@ soclearfastopen(struct socket *so) } void -sonullevent(struct socket *so, void *arg, uint32_t hint) +sonullevent(struct socket *so, void *arg, long hint) { #pragma unused(so, arg, hint) } diff --git a/bsd/kern/uipc_syscalls.c b/bsd/kern/uipc_syscalls.c index e2455a1f3..e82721676 100644 --- a/bsd/kern/uipc_syscalls.c +++ b/bsd/kern/uipc_syscalls.c @@ -106,13 +106,9 @@ #include #endif /* MAC_SOCKET_SUBSET */ -#define f_flag f_fglob->fg_flag -#define f_type f_fglob->fg_ops->fo_type -#define f_msgcount f_fglob->fg_msgcount -#define f_cred f_fglob->fg_cred -#define f_ops f_fglob->fg_ops -#define f_offset f_fglob->fg_offset -#define f_data f_fglob->fg_data +#define f_flag fp_glob->fg_flag +#define f_ops fp_glob->fg_ops +#define f_data fp_glob->fg_data #define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0) #define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2) @@ -139,9 +135,6 @@ #define DBG_PRINTF(...) do { } while (0) #endif -/* TODO: should be in header file */ -int falloc_locked(proc_t, struct fileproc **, int *, vfs_context_t, int); - static int sendit(struct proc *, struct socket *, struct user_msghdr *, uio_t, int, int32_t *); static int recvit(struct proc *, int, struct user_msghdr *, uio_t, user_addr_t, @@ -169,7 +162,7 @@ static u_int externalize_user_msghdr_array(void *, int, int, u_int, const struct user_msghdr_x *, struct uio **); static void free_uio_array(struct uio **, u_int); -static int uio_array_is_valid(struct uio **, u_int); +static boolean_t uio_array_is_valid(struct uio **, u_int); static int recv_msg_array_is_valid(struct recv_msg_elem *, u_int); static int internalize_recv_msghdr_array(const void *, int, int, u_int, struct user_msghdr_x *, struct recv_msg_elem *); @@ -393,9 +386,8 @@ listen(__unused struct proc *p, struct listen_args *uap, } /* - * Returns: fp_getfsock:EBADF Bad file descriptor - * fp_getfsock:EOPNOTSUPP ... - * xlate => :ENOTSOCK Socket operation on non-socket + * Returns: fp_get_ftype:EBADF Bad file descriptor + * fp_get_ftype:ENOTSOCK Socket operation on non-socket * :EFAULT Bad address on copyin/copyout * :EBADF Bad file descriptor * :EOPNOTSUPP Operation not supported on socket @@ -404,9 +396,9 @@ listen(__unused struct proc *p, struct listen_args *uap, * :ECONNABORTED Connection aborted * :EINTR Interrupted function * :EACCES Mandatory Access Control failure - * falloc_locked:ENFILE Too many files open in system - * falloc_locked::EMFILE Too many open files - * falloc_locked::ENOMEM Not enough space + * falloc:ENFILE Too many files open in system + * falloc:EMFILE Too many open files + * falloc:ENOMEM Not enough space * 0 Success */ int @@ -421,7 +413,7 @@ accept_nocancel(struct proc *p, struct accept_nocancel_args *uap, lck_mtx_t *mutex_held; int fd = uap->s; int newfd; - short fflag; /* type must match fp->f_flag */ + unsigned int fflag; int dosocklock = 0; *retval = -1; @@ -435,17 +427,12 @@ accept_nocancel(struct proc *p, struct accept_nocancel_args *uap, return error; } } - error = fp_getfsock(p, fd, &fp, &head); + error = fp_get_ftype(p, fd, DTYPE_SOCKET, ENOTSOCK, &fp); if (error) { - if (error == EOPNOTSUPP) { - error = ENOTSOCK; - } return error; } - if (head == NULL) { - error = EBADF; - goto out; - } + head = fp->f_data; + #if CONFIG_MACF_SOCKET_SUBSET if ((error = mac_socket_check_accept(kauth_cred_get(), head)) != 0) { goto out; @@ -795,9 +782,9 @@ connectx_nocancel(struct proc *p, struct connectx_args *uap, int *retval) } ep.sae_srcif = ep64.sae_srcif; - ep.sae_srcaddr = ep64.sae_srcaddr; + ep.sae_srcaddr = (user_addr_t)ep64.sae_srcaddr; ep.sae_srcaddrlen = ep64.sae_srcaddrlen; - ep.sae_dstaddr = ep64.sae_dstaddr; + ep.sae_dstaddr = (user_addr_t)ep64.sae_dstaddr; ep.sae_dstaddrlen = ep64.sae_dstaddrlen; } else { error = copyin(uap->endpoints, (caddr_t)&ep32, sizeof(ep32)); @@ -1311,7 +1298,8 @@ sendit(struct proc *p, struct socket *so, struct user_msghdr *mp, uio_t uiop, error = 0; } /* Generation of SIGPIPE can be controlled per socket */ - if (error == EPIPE && !(so->so_flags & SOF_NOSIGPIPE)) { + if (error == EPIPE && !(so->so_flags & SOF_NOSIGPIPE) && + !(flags & MSG_NOSIGNAL)) { psignal(p, SIGPIPE); } } @@ -1359,6 +1347,11 @@ sendto_nocancel(struct proc *p, goto done; } + if (uap->len > LONG_MAX) { + error = EINVAL; + goto done; + } + auio = uio_create(1, 0, (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), UIO_WRITE); @@ -1450,11 +1443,11 @@ sendmsg_nocancel(struct proc *p, struct sendmsg_nocancel_args *uap, if (IS_64BIT_PROCESS(p)) { user_msg.msg_flags = msg64.msg_flags; user_msg.msg_controllen = msg64.msg_controllen; - user_msg.msg_control = msg64.msg_control; + user_msg.msg_control = (user_addr_t)msg64.msg_control; user_msg.msg_iovlen = msg64.msg_iovlen; - user_msg.msg_iov = msg64.msg_iov; + user_msg.msg_iov = (user_addr_t)msg64.msg_iov; user_msg.msg_namelen = msg64.msg_namelen; - user_msg.msg_name = msg64.msg_name; + user_msg.msg_name = (user_addr_t)msg64.msg_name; } else { user_msg.msg_flags = msg32.msg_flags; user_msg.msg_controllen = msg32.msg_controllen; @@ -1617,7 +1610,7 @@ sendmsg_x(struct proc *p, struct sendmsg_x_args *uap, user_ssize_t *retval) * Make sure the size of each message iovec and * the aggregate size of all the iovec is valid */ - if (uio_array_is_valid(uiop, uap->cnt) == 0) { + if (uio_array_is_valid(uiop, uap->cnt) == false) { error = EINVAL; goto out; } @@ -1711,7 +1704,8 @@ sendmsg_x(struct proc *p, struct sendmsg_x_args *uap, user_ssize_t *retval) error = 0; } /* Generation of SIGPIPE can be controlled per socket */ - if (error == EPIPE && !(so->so_flags & SOF_NOSIGPIPE)) { + if (error == EPIPE && !(so->so_flags & SOF_NOSIGPIPE) && + !(uap->flags & MSG_NOSIGNAL)) { psignal(p, SIGPIPE); } } @@ -1771,21 +1765,22 @@ out: static int copyout_control(struct proc *p, struct mbuf *m, user_addr_t control, - socklen_t *controllen, int *flags) + socklen_t *controllen, int *flags, struct socket *so) { int error = 0; - ssize_t len; + socklen_t len; user_addr_t ctlbuf; + struct inpcb *inp = so ? sotoinpcb(so) : NULL; len = *controllen; *controllen = 0; ctlbuf = control; while (m && len > 0) { - unsigned int tocopy; + socklen_t tocopy; struct cmsghdr *cp = mtod(m, struct cmsghdr *); - int cp_size = CMSG_ALIGN(cp->cmsg_len); - int buflen = m->m_len; + socklen_t cp_size = CMSG_ALIGN(cp->cmsg_len); + socklen_t buflen = m->m_len; while (buflen > 0 && len > 0) { /* @@ -1795,7 +1790,7 @@ copyout_control(struct proc *p, struct mbuf *m, user_addr_t control, if (cp->cmsg_level == SOL_SOCKET && cp->cmsg_type == SCM_TIMESTAMP) { unsigned char tmp_buffer[CMSG_SPACE(sizeof(struct user64_timeval))] = {}; struct cmsghdr *tmp_cp = (struct cmsghdr *)(void *)tmp_buffer; - int tmp_space; + socklen_t tmp_space; struct timeval *tv = (struct timeval *)(void *)CMSG_DATA(cp); tmp_cp->cmsg_level = SOL_SOCKET; @@ -1812,7 +1807,7 @@ copyout_control(struct proc *p, struct mbuf *m, user_addr_t control, } else { struct user32_timeval *tv32 = (struct user32_timeval *)(void *)CMSG_DATA(tmp_cp); - tv32->tv_sec = tv->tv_sec; + tv32->tv_sec = (user32_time_t)tv->tv_sec; tv32->tv_usec = tv->tv_usec; tmp_cp->cmsg_len = CMSG_LEN(sizeof(struct user32_timeval)); @@ -1829,19 +1824,31 @@ copyout_control(struct proc *p, struct mbuf *m, user_addr_t control, goto out; } } else { - if (cp_size > buflen) { - panic("cp_size > buflen, something" - "wrong with alignment!"); - } - if (len >= cp_size) { - tocopy = cp_size; - } else { - *flags |= MSG_CTRUNC; - tocopy = len; - } - error = copyout((caddr_t) cp, ctlbuf, tocopy); - if (error) { - goto out; +#if CONTENT_FILTER + /* If socket is attached to Content Filter and socket did not request address, ignore it */ + if ((so != NULL) && (so->so_cfil_db != NULL) && + ((cp->cmsg_level == IPPROTO_IP && cp->cmsg_type == IP_RECVDSTADDR && inp && + !(inp->inp_flags & INP_RECVDSTADDR)) || + (cp->cmsg_level == IPPROTO_IPV6 && (cp->cmsg_type == IPV6_PKTINFO || cp->cmsg_type == IPV6_2292PKTINFO) && inp && + !(inp->inp_flags & IN6P_PKTINFO)))) { + tocopy = 0; + } else +#endif + { + if (cp_size > buflen) { + panic("cp_size > buflen, something" + "wrong with alignment!"); + } + if (len >= cp_size) { + tocopy = cp_size; + } else { + *flags |= MSG_CTRUNC; + tocopy = len; + } + error = copyout((caddr_t) cp, ctlbuf, tocopy); + if (error) { + goto out; + } } } @@ -1856,7 +1863,7 @@ copyout_control(struct proc *p, struct mbuf *m, user_addr_t control, m = m->m_next; } - *controllen = ctlbuf - control; + *controllen = (socklen_t)(ctlbuf - control); out: return error; } @@ -1895,26 +1902,11 @@ recvit(struct proc *p, int s, struct user_msghdr *mp, uio_t uiop, struct fileproc *fp; KERNEL_DEBUG(DBG_FNC_RECVIT | DBG_FUNC_START, 0, 0, 0, 0, 0); - proc_fdlock(p); - if ((error = fp_lookup(p, s, &fp, 1))) { + if ((error = fp_get_ftype(p, s, DTYPE_SOCKET, ENOTSOCK, &fp))) { KERNEL_DEBUG(DBG_FNC_RECVIT | DBG_FUNC_END, error, 0, 0, 0, 0); - proc_fdunlock(p); return error; } - if (fp->f_type != DTYPE_SOCKET) { - fp_drop(p, s, fp, 1); - proc_fdunlock(p); - return ENOTSOCK; - } - - so = (struct socket *)fp->f_data; - if (so == NULL) { - fp_drop(p, s, fp, 1); - proc_fdunlock(p); - return EBADF; - } - - proc_fdunlock(p); + so = fp->f_data; #if CONFIG_MACF_SOCKET_SUBSET /* @@ -1929,7 +1921,7 @@ recvit(struct proc *p, int s, struct user_msghdr *mp, uio_t uiop, goto out1; } #endif /* MAC_SOCKET_SUBSET */ - if (uio_resid(uiop) < 0) { + if (uio_resid(uiop) < 0 || uio_resid(uiop) > INT_MAX) { KERNEL_DEBUG(DBG_FNC_RECVIT | DBG_FUNC_END, EINVAL, 0, 0, 0, 0); error = EINVAL; goto out1; @@ -1953,7 +1945,7 @@ recvit(struct proc *p, int s, struct user_msghdr *mp, uio_t uiop, goto out; } - *retval = len - uio_resid(uiop); + *retval = (int32_t)(len - uio_resid(uiop)); if (mp->msg_name) { error = copyout_sa(fromsa, mp->msg_name, &mp->msg_namelen); @@ -1970,7 +1962,7 @@ recvit(struct proc *p, int s, struct user_msghdr *mp, uio_t uiop, if (mp->msg_control) { error = copyout_control(p, control, mp->msg_control, - &mp->msg_controllen, &mp->msg_flags); + &mp->msg_controllen, &mp->msg_flags, so); } out: if (fromsa) { @@ -2105,11 +2097,11 @@ recvmsg_nocancel(struct proc *p, struct recvmsg_nocancel_args *uap, if (IS_64BIT_PROCESS(p)) { user_msg.msg_flags = msg64.msg_flags; user_msg.msg_controllen = msg64.msg_controllen; - user_msg.msg_control = msg64.msg_control; + user_msg.msg_control = (user_addr_t)msg64.msg_control; user_msg.msg_iovlen = msg64.msg_iovlen; - user_msg.msg_iov = msg64.msg_iov; + user_msg.msg_iov = (user_addr_t)msg64.msg_iov; user_msg.msg_namelen = msg64.msg_namelen; - user_msg.msg_name = msg64.msg_name; + user_msg.msg_name = (user_addr_t)msg64.msg_name; } else { user_msg.msg_flags = msg32.msg_flags; user_msg.msg_controllen = msg32.msg_controllen; @@ -2175,11 +2167,11 @@ recvmsg_nocancel(struct proc *p, struct recvmsg_nocancel_args *uap, } else { msg32.msg_flags = user_msg.msg_flags; msg32.msg_controllen = user_msg.msg_controllen; - msg32.msg_control = user_msg.msg_control; + msg32.msg_control = (user32_addr_t)user_msg.msg_control; msg32.msg_iovlen = user_msg.msg_iovlen; - msg32.msg_iov = user_msg.msg_iov; + msg32.msg_iov = (user32_addr_t)user_msg.msg_iov; msg32.msg_namelen = user_msg.msg_namelen; - msg32.msg_name = user_msg.msg_name; + msg32.msg_name = (user32_addr_t)user_msg.msg_name; } error = copyout(msghdrp, uap->msg, size_of_msghdr); } @@ -2384,7 +2376,7 @@ recvmsg_x(struct proc *p, struct recvmsg_x_args *uap, user_ssize_t *retval) if (mp->msg_control) { error = copyout_control(p, recv_msg_elem->controlp, mp->msg_control, &mp->msg_controllen, - &mp->msg_flags); + &mp->msg_flags, so); if (error) { goto out; } @@ -2565,7 +2557,7 @@ getsockopt(struct proc *p, struct getsockopt_args *uap, #endif /* MAC_SOCKET_SUBSET */ error = sogetoptlock((struct socket *)so, &sopt, 1); /* will lock */ if (error == 0) { - valsize = sopt.sopt_valsize; + valsize = (socklen_t)sopt.sopt_valsize; error = copyout((caddr_t)&valsize, uap->avalsize, sizeof(valsize)); } @@ -2739,17 +2731,20 @@ out: } int -sockargs(struct mbuf **mp, user_addr_t data, int buflen, int type) +sockargs(struct mbuf **mp, user_addr_t data, socklen_t buflen, int type) { struct sockaddr *sa; struct mbuf *m; int error; + socklen_t alloc_buflen = buflen; - size_t alloc_buflen = (size_t)buflen; - - if (alloc_buflen > INT_MAX / 2) { + if (buflen > INT_MAX / 2) { return EINVAL; } + if (type == MT_SONAME && buflen > SOCK_MAXADDRLEN) { + return EINVAL; + } + #ifdef __LP64__ /* * The fd's in the buffer must expand to be pointers, thus we need twice @@ -2791,7 +2786,8 @@ sockargs(struct mbuf **mp, user_addr_t data, int buflen, int type) *mp = m; if (type == MT_SONAME) { sa = mtod(m, struct sockaddr *); - sa->sa_len = buflen; + VERIFY(buflen <= SOCK_MAXADDRLEN); + sa->sa_len = (__uint8_t)buflen; } } return error; @@ -2840,8 +2836,8 @@ getsockaddr(struct socket *so, struct sockaddr **namp, user_addr_t uaddr, len == sizeof(struct sockaddr_in)) { sa->sa_family = AF_INET; } - - sa->sa_len = len; + VERIFY(len <= SOCK_MAXADDRLEN); + sa->sa_len = (__uint8_t)len; *namp = sa; } return error; @@ -2881,7 +2877,7 @@ getsockaddr_s(struct socket *so, struct sockaddr_storage *ss, ss->ss_family = AF_INET; } - ss->ss_len = len; + ss->ss_len = (__uint8_t)len; } return error; } @@ -2905,14 +2901,14 @@ internalize_user_msghdr_array(const void *src, int spacetype, int direction, msghdr64 = ((const struct user64_msghdr_x *)src) + i; - user_msg->msg_name = msghdr64->msg_name; + user_msg->msg_name = (user_addr_t)msghdr64->msg_name; user_msg->msg_namelen = msghdr64->msg_namelen; - user_msg->msg_iov = msghdr64->msg_iov; + user_msg->msg_iov = (user_addr_t)msghdr64->msg_iov; user_msg->msg_iovlen = msghdr64->msg_iovlen; - user_msg->msg_control = msghdr64->msg_control; + user_msg->msg_control = (user_addr_t)msghdr64->msg_control; user_msg->msg_controllen = msghdr64->msg_controllen; user_msg->msg_flags = msghdr64->msg_flags; - user_msg->msg_datalen = msghdr64->msg_datalen; + user_msg->msg_datalen = (size_t)msghdr64->msg_datalen; } else { const struct user32_msghdr_x *msghdr32; @@ -2989,14 +2985,14 @@ internalize_recv_msghdr_array(const void *src, int spacetype, int direction, msghdr64 = ((const struct user64_msghdr_x *)src) + i; - user_msg->msg_name = msghdr64->msg_name; + user_msg->msg_name = (user_addr_t)msghdr64->msg_name; user_msg->msg_namelen = msghdr64->msg_namelen; - user_msg->msg_iov = msghdr64->msg_iov; + user_msg->msg_iov = (user_addr_t)msghdr64->msg_iov; user_msg->msg_iovlen = msghdr64->msg_iovlen; - user_msg->msg_control = msghdr64->msg_control; + user_msg->msg_control = (user_addr_t)msghdr64->msg_control; user_msg->msg_controllen = msghdr64->msg_controllen; user_msg->msg_flags = msghdr64->msg_flags; - user_msg->msg_datalen = msghdr64->msg_datalen; + user_msg->msg_datalen = (size_t)msghdr64->msg_datalen; } else { const struct user32_msghdr_x *msghdr32; @@ -3089,7 +3085,7 @@ externalize_user_msghdr_array(void *dst, int spacetype, int direction, msghdr32 = ((struct user32_msghdr_x *)dst) + i; msghdr32->msg_flags = user_msg->msg_flags; - msghdr32->msg_datalen = len; + msghdr32->msg_datalen = (user32_size_t)len; } } return retcnt; @@ -3138,7 +3134,7 @@ externalize_recv_msghdr_array(void *dst, int spacetype, int direction, msghdr32 = ((struct user32_msghdr_x *)dst) + i; msghdr32->msg_flags = user_msg->msg_flags; - msghdr32->msg_datalen = len; + msghdr32->msg_datalen = (user32_size_t)len; } } return retcnt; @@ -3172,7 +3168,7 @@ uio_array_resid(struct uio **uiop, u_int count) return len; } -int +static boolean_t uio_array_is_valid(struct uio **uiop, u_int count) { user_ssize_t len = 0; @@ -3188,17 +3184,17 @@ uio_array_is_valid(struct uio **uiop, u_int count) * Sanity check on the validity of the iovec: * no point of going over sb_max */ - if (resid < 0 || (u_int32_t)resid > sb_max) { - return 0; + if (resid < 0 || resid > (user_ssize_t)sb_max) { + return false; } len += resid; - if (len < 0 || (u_int32_t)len > sb_max) { - return 0; + if (len < 0 || len > (user_ssize_t)sb_max) { + return false; } } } - return 1; + return true; } @@ -3415,7 +3411,7 @@ sendfile(struct proc *p, struct sendfile_args *uap, __unused int *retval) goto done2; } - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; #if CONFIG_MACF_SOCKET_SUBSET /* JMM - fetch connected sockaddr? */ diff --git a/bsd/kern/uipc_usrreq.c b/bsd/kern/uipc_usrreq.c index b08bbbee7..91eb43b4a 100644 --- a/bsd/kern/uipc_usrreq.c +++ b/bsd/kern/uipc_usrreq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -93,6 +93,7 @@ #include #include +#include #if CONFIG_MACF #include @@ -105,26 +106,25 @@ */ #define UIPC_MAX_CMSG_FD 512 -#define f_msgcount f_fglob->fg_msgcount -#define f_cred f_fglob->fg_cred -#define f_ops f_fglob->fg_ops -#define f_offset f_fglob->fg_offset -#define f_data f_fglob->fg_data -struct zone *unp_zone; +ZONE_DECLARE(unp_zone, "unpzone", sizeof(struct unpcb), ZC_NONE); static unp_gen_t unp_gencnt; static u_int unp_count; -static lck_attr_t *unp_mtx_attr; -static lck_grp_t *unp_mtx_grp; -static lck_grp_attr_t *unp_mtx_grp_attr; -static lck_rw_t *unp_list_mtx; +static lck_attr_t *unp_mtx_attr; +static lck_grp_t *unp_mtx_grp; +static lck_grp_attr_t *unp_mtx_grp_attr; +static lck_rw_t unp_list_mtx; -static lck_mtx_t *unp_disconnect_lock; -static lck_mtx_t *unp_connect_lock; +static lck_mtx_t unp_disconnect_lock; +static lck_mtx_t unp_connect_lock; +static lck_mtx_t uipc_lock; static u_int disconnect_in_progress; -extern lck_mtx_t *uipc_lock; -static struct unp_head unp_shead, unp_dhead; +static struct unp_head unp_shead, unp_dhead; +static int unp_defer, unp_gcing, unp_gcwait; +static thread_t unp_gcthread = NULL; +static LIST_HEAD(, fileglob) unp_msghead = LIST_HEAD_INITIALIZER(unp_msghead); + /* * mDNSResponder tracing. When enabled, endpoints connected to @@ -744,6 +744,8 @@ uipc_ctloutput(struct socket *so, struct sockopt *sopt) struct unpcb *unp = sotounpcb(so); int error = 0; pid_t peerpid; + proc_t p; + task_t t; struct socket *peerso; switch (sopt->sopt_dir) { @@ -802,6 +804,37 @@ uipc_ctloutput(struct socket *so, struct sockopt *sopt) } socket_unlock(peerso, 1); break; + case LOCAL_PEERTOKEN: + if (unp->unp_conn == NULL) { + error = ENOTCONN; + break; + } + peerso = unp->unp_conn->unp_socket; + if (peerso == NULL) { + panic("peer is connected but has no socket?"); + } + unp_get_locks_in_order(so, peerso); + peerpid = peerso->last_pid; + p = proc_find(peerpid); + if (p != PROC_NULL) { + t = proc_task(p); + if (t != TASK_NULL) { + audit_token_t peertoken; + mach_msg_type_number_t count = TASK_AUDIT_TOKEN_COUNT; + if (task_info(t, TASK_AUDIT_TOKEN, (task_info_t)&peertoken, &count) == KERN_SUCCESS) { + error = sooptcopyout(sopt, &peertoken, sizeof(peertoken)); + } else { + error = EINVAL; + } + } else { + error = EINVAL; + } + proc_rele(p); + } else { + error = EINVAL; + } + socket_unlock(peerso, 1); + break; default: error = EOPNOTSUPP; break; @@ -887,14 +920,14 @@ unp_attach(struct socket *so) lck_mtx_init(&unp->unp_mtx, unp_mtx_grp, unp_mtx_attr); - lck_rw_lock_exclusive(unp_list_mtx); + lck_rw_lock_exclusive(&unp_list_mtx); LIST_INIT(&unp->unp_refs); unp->unp_socket = so; unp->unp_gencnt = ++unp_gencnt; unp_count++; LIST_INSERT_HEAD(so->so_type == SOCK_DGRAM ? &unp_dhead : &unp_shead, unp, unp_link); - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); so->so_pcb = (caddr_t)unp; /* * Mark AF_UNIX socket buffers accordingly so that: @@ -924,11 +957,11 @@ unp_detach(struct unpcb *unp) { int so_locked = 1; - lck_rw_lock_exclusive(unp_list_mtx); + lck_rw_lock_exclusive(&unp_list_mtx); LIST_REMOVE(unp, unp_link); --unp_count; ++unp_gencnt; - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); if (unp->unp_vnode) { struct vnode *tvp = NULL; socket_unlock(unp->unp_socket, 0); @@ -937,14 +970,14 @@ unp_detach(struct unpcb *unp) * a thread closing the listening socket and a thread * connecting to it. */ - lck_mtx_lock(unp_connect_lock); + lck_mtx_lock(&unp_connect_lock); socket_lock(unp->unp_socket, 0); if (unp->unp_vnode) { tvp = unp->unp_vnode; unp->unp_vnode->v_socket = NULL; unp->unp_vnode = NULL; } - lck_mtx_unlock(unp_connect_lock); + lck_mtx_unlock(&unp_connect_lock); if (tvp != NULL) { vnode_rele(tvp); /* drop the usecount */ } @@ -964,13 +997,13 @@ unp_detach(struct unpcb *unp) socket_unlock(unp->unp_socket, 0); so_locked = 0; } - lck_mtx_lock(unp_disconnect_lock); + lck_mtx_lock(&unp_disconnect_lock); while (disconnect_in_progress != 0) { - (void)msleep((caddr_t)&disconnect_in_progress, unp_disconnect_lock, + (void)msleep((caddr_t)&disconnect_in_progress, &unp_disconnect_lock, PSOCK, "disconnect", NULL); } disconnect_in_progress = 1; - lck_mtx_unlock(unp_disconnect_lock); + lck_mtx_unlock(&unp_disconnect_lock); /* Now we are sure that any unpcb socket disconnect is not happening */ if (unp->unp_refs.lh_first != NULL) { @@ -978,10 +1011,10 @@ unp_detach(struct unpcb *unp) socket_lock(unp2->unp_socket, 1); } - lck_mtx_lock(unp_disconnect_lock); + lck_mtx_lock(&unp_disconnect_lock); disconnect_in_progress = 0; wakeup(&disconnect_in_progress); - lck_mtx_unlock(unp_disconnect_lock); + lck_mtx_unlock(&unp_disconnect_lock); if (unp2 != NULL) { /* We already locked this socket and have a reference on it */ @@ -1219,10 +1252,10 @@ unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p) goto out; } - lck_mtx_lock(unp_connect_lock); + lck_mtx_lock(&unp_connect_lock); if (vp->v_socket == 0) { - lck_mtx_unlock(unp_connect_lock); + lck_mtx_unlock(&unp_connect_lock); error = ECONNREFUSED; socket_lock(so, 0); goto out; @@ -1230,7 +1263,7 @@ unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p) socket_lock(vp->v_socket, 1); /* Get a reference on the listening socket */ so2 = vp->v_socket; - lck_mtx_unlock(unp_connect_lock); + lck_mtx_unlock(&unp_connect_lock); if (so2->so_pcb == NULL) { @@ -1337,13 +1370,6 @@ unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p) sizeof(unp->unp_peercred)); unp->unp_flags |= UNP_HAVEPC; -#if CONFIG_MACF_SOCKET - /* XXXMAC: recursive lock: SOCK_LOCK(so); */ - mac_socketpeer_label_associate_socket(so, so3); - mac_socketpeer_label_associate_socket(so3, so); - /* XXXMAC: SOCK_UNLOCK(so); */ -#endif /* MAC_SOCKET */ - /* Hold the reference on listening socket until the end */ socket_unlock(so2, 0); list_so = so2; @@ -1491,17 +1517,17 @@ unp_disconnect(struct unpcb *unp) if (unp->unp_conn == NULL) { return; } - lck_mtx_lock(unp_disconnect_lock); + lck_mtx_lock(&unp_disconnect_lock); while (disconnect_in_progress != 0) { if (so_locked == 1) { socket_unlock(so, 0); so_locked = 0; } - (void)msleep((caddr_t)&disconnect_in_progress, unp_disconnect_lock, + (void)msleep((caddr_t)&disconnect_in_progress, &unp_disconnect_lock, PSOCK, "disconnect", NULL); } disconnect_in_progress = 1; - lck_mtx_unlock(unp_disconnect_lock); + lck_mtx_unlock(&unp_disconnect_lock); if (so_locked == 0) { socket_lock(so, 0); @@ -1601,10 +1627,10 @@ try_again: panic("unknown socket type %d", so->so_type); } out: - lck_mtx_lock(unp_disconnect_lock); + lck_mtx_lock(&unp_disconnect_lock); disconnect_in_progress = 0; wakeup(&disconnect_in_progress); - lck_mtx_unlock(unp_disconnect_lock); + lck_mtx_unlock(&unp_disconnect_lock); if (strdisconn) { socket_unlock(so, 0); @@ -1672,7 +1698,7 @@ unp_pcblist SYSCTL_HANDLER_ARGS struct xunpgen xug; struct unp_head *head; - lck_rw_lock_shared(unp_list_mtx); + lck_rw_lock_shared(&unp_list_mtx); head = ((intptr_t)arg1 == SOCK_DGRAM ? &unp_dhead : &unp_shead); /* @@ -1683,12 +1709,12 @@ unp_pcblist SYSCTL_HANDLER_ARGS n = unp_count; req->oldidx = 2 * sizeof(xug) + (n + n / 8) * sizeof(struct xunpcb); - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return 0; } if (req->newptr != USER_ADDR_NULL) { - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return EPERM; } @@ -1705,7 +1731,7 @@ unp_pcblist SYSCTL_HANDLER_ARGS xug.xug_sogen = so_gencnt; error = SYSCTL_OUT(req, &xug, sizeof(xug)); if (error) { - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return error; } @@ -1713,14 +1739,14 @@ unp_pcblist SYSCTL_HANDLER_ARGS * We are done if there is no pcb */ if (n == 0) { - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return 0; } MALLOC(unp_list, struct unpcb **, n * sizeof(*unp_list), M_TEMP, M_WAITOK); if (unp_list == 0) { - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return ENOMEM; } @@ -1776,7 +1802,7 @@ unp_pcblist SYSCTL_HANDLER_ARGS error = SYSCTL_OUT(req, &xug, sizeof(xug)); } FREE(unp_list, M_TEMP); - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return error; } @@ -1789,7 +1815,7 @@ SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb", "List of active local stream sockets"); -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX static int unp_pcblist64 SYSCTL_HANDLER_ARGS @@ -1801,7 +1827,7 @@ unp_pcblist64 SYSCTL_HANDLER_ARGS struct xunpgen xug; struct unp_head *head; - lck_rw_lock_shared(unp_list_mtx); + lck_rw_lock_shared(&unp_list_mtx); head = ((intptr_t)arg1 == SOCK_DGRAM ? &unp_dhead : &unp_shead); /* @@ -1812,12 +1838,12 @@ unp_pcblist64 SYSCTL_HANDLER_ARGS n = unp_count; req->oldidx = 2 * sizeof(xug) + (n + n / 8) * (sizeof(struct xunpcb64)); - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return 0; } if (req->newptr != USER_ADDR_NULL) { - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return EPERM; } @@ -1834,7 +1860,7 @@ unp_pcblist64 SYSCTL_HANDLER_ARGS xug.xug_sogen = so_gencnt; error = SYSCTL_OUT(req, &xug, sizeof(xug)); if (error) { - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return error; } @@ -1842,14 +1868,14 @@ unp_pcblist64 SYSCTL_HANDLER_ARGS * We are done if there is no pcb */ if (n == 0) { - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return 0; } MALLOC(unp_list, struct unpcb **, n * sizeof(*unp_list), M_TEMP, M_WAITOK); if (unp_list == 0) { - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return ENOMEM; } @@ -1869,7 +1895,7 @@ unp_pcblist64 SYSCTL_HANDLER_ARGS size_t xu_len = sizeof(struct xunpcb64); bzero(&xu, xu_len); - xu.xu_len = xu_len; + xu.xu_len = (u_int32_t)xu_len; xu.xu_unpp = (u_int64_t)VM_KERNEL_ADDRPERM(unp); xu.xunp_link.le_next = (u_int64_t) VM_KERNEL_ADDRPERM(unp->unp_link.le_next); @@ -1929,7 +1955,7 @@ unp_pcblist64 SYSCTL_HANDLER_ARGS error = SYSCTL_OUT(req, &xug, sizeof(xug)); } FREE(unp_list, M_TEMP); - lck_rw_done(unp_list_mtx); + lck_rw_done(&unp_list_mtx); return error; } @@ -1942,7 +1968,7 @@ SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist64, (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist64, "S,xunpcb64", "List of active local stream sockets 64 bit"); -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ static void unp_shutdown(struct unpcb *unp) @@ -1962,10 +1988,156 @@ unp_drop(struct unpcb *unp, int errno) { struct socket *so = unp->unp_socket; - so->so_error = errno; + so->so_error = (u_short)errno; unp_disconnect(unp); } +/* always called under uipc_lock */ +static void +unp_gc_wait(void) +{ + if (unp_gcthread == current_thread()) { + return; + } + + while (unp_gcing != 0) { + unp_gcwait = 1; + msleep(&unp_gcing, &uipc_lock, 0, "unp_gc_wait", NULL); + } +} + +/* + * fg_insertuipc_mark + * + * Description: Mark fileglob for insertion onto message queue if needed + * Also takes fileglob reference + * + * Parameters: fg Fileglob pointer to insert + * + * Returns: true, if the fileglob needs to be inserted onto msg queue + * + * Locks: Takes and drops fg_lock, potentially many times + */ +static boolean_t +fg_insertuipc_mark(struct fileglob * fg) +{ + boolean_t insert = FALSE; + + lck_mtx_lock_spin(&fg->fg_lock); + while (fg->fg_lflags & FG_RMMSGQ) { + lck_mtx_convert_spin(&fg->fg_lock); + + fg->fg_lflags |= FG_WRMMSGQ; + msleep(&fg->fg_lflags, &fg->fg_lock, 0, "fg_insertuipc", NULL); + } + + os_ref_retain_locked_raw(&fg->fg_count, &f_refgrp); + fg->fg_msgcount++; + if (fg->fg_msgcount == 1) { + fg->fg_lflags |= FG_INSMSGQ; + insert = TRUE; + } + lck_mtx_unlock(&fg->fg_lock); + return insert; +} + +/* + * fg_insertuipc + * + * Description: Insert marked fileglob onto message queue + * + * Parameters: fg Fileglob pointer to insert + * + * Returns: void + * + * Locks: Takes and drops fg_lock & uipc_lock + * DO NOT call this function with proc_fdlock held as unp_gc() + * can potentially try to acquire proc_fdlock, which can result + * in a deadlock if this function is in unp_gc_wait(). + */ +static void +fg_insertuipc(struct fileglob * fg) +{ + if (fg->fg_lflags & FG_INSMSGQ) { + lck_mtx_lock_spin(&uipc_lock); + unp_gc_wait(); + LIST_INSERT_HEAD(&unp_msghead, fg, f_msglist); + lck_mtx_unlock(&uipc_lock); + lck_mtx_lock(&fg->fg_lock); + fg->fg_lflags &= ~FG_INSMSGQ; + if (fg->fg_lflags & FG_WINSMSGQ) { + fg->fg_lflags &= ~FG_WINSMSGQ; + wakeup(&fg->fg_lflags); + } + lck_mtx_unlock(&fg->fg_lock); + } +} + +/* + * fg_removeuipc_mark + * + * Description: Mark the fileglob for removal from message queue if needed + * Also releases fileglob message queue reference + * + * Parameters: fg Fileglob pointer to remove + * + * Returns: true, if the fileglob needs to be removed from msg queue + * + * Locks: Takes and drops fg_lock, potentially many times + */ +static boolean_t +fg_removeuipc_mark(struct fileglob * fg) +{ + boolean_t remove = FALSE; + + lck_mtx_lock_spin(&fg->fg_lock); + while (fg->fg_lflags & FG_INSMSGQ) { + lck_mtx_convert_spin(&fg->fg_lock); + + fg->fg_lflags |= FG_WINSMSGQ; + msleep(&fg->fg_lflags, &fg->fg_lock, 0, "fg_removeuipc", NULL); + } + fg->fg_msgcount--; + if (fg->fg_msgcount == 0) { + fg->fg_lflags |= FG_RMMSGQ; + remove = TRUE; + } + lck_mtx_unlock(&fg->fg_lock); + return remove; +} + +/* + * fg_removeuipc + * + * Description: Remove marked fileglob from message queue + * + * Parameters: fg Fileglob pointer to remove + * + * Returns: void + * + * Locks: Takes and drops fg_lock & uipc_lock + * DO NOT call this function with proc_fdlock held as unp_gc() + * can potentially try to acquire proc_fdlock, which can result + * in a deadlock if this function is in unp_gc_wait(). + */ +static void +fg_removeuipc(struct fileglob * fg) +{ + if (fg->fg_lflags & FG_RMMSGQ) { + lck_mtx_lock_spin(&uipc_lock); + unp_gc_wait(); + LIST_REMOVE(fg, f_msglist); + lck_mtx_unlock(&uipc_lock); + lck_mtx_lock(&fg->fg_lock); + fg->fg_lflags &= ~FG_RMMSGQ; + if (fg->fg_lflags & FG_WRMMSGQ) { + fg->fg_lflags &= ~FG_WRMMSGQ; + wakeup(&fg->fg_lflags); + } + lck_mtx_unlock(&fg->fg_lock); + } +} + /* * Returns: 0 Success * EMSGSIZE The new fd's will not fit @@ -2010,33 +2182,20 @@ unp_externalize(struct mbuf *rights) * XXX (2) allocation failures should be non-fatal */ for (i = 0; i < newfds; i++) { -#if CONFIG_MACF_SOCKET - /* - * If receive access is denied, don't pass along - * and error message, just discard the descriptor. - */ - if (mac_file_check_receive(kauth_cred_get(), rp[i])) { - proc_fdunlock(p); - unp_discard(rp[i], p); - fds[i] = 0; - proc_fdlock(p); - continue; - } -#endif if (fdalloc(p, 0, &f)) { panic("unp_externalize:fdalloc"); } fp = fileproc_alloc_init(NULL); if (fp == NULL) { - panic("unp_externalize: MALLOC_ZONE"); + panic("unp_externalize:fileproc_alloc_init"); } - fp->f_fglob = rp[i]; + fp->fp_glob = rp[i]; if (fg_removeuipc_mark(rp[i])) { /* * Take an iocount on the fp for completing the * removal from the global msg queue */ - os_ref_retain_locked(&fp->f_iocount); + os_ref_retain_locked(&fp->fp_iocount); fileproc_l[i] = fp; } else { fileproc_l[i] = NULL; @@ -2048,10 +2207,10 @@ unp_externalize(struct mbuf *rights) for (i = 0; i < newfds; i++) { if (fileproc_l[i] != NULL) { - VERIFY(fileproc_l[i]->f_fglob != NULL && - (fileproc_l[i]->f_fglob->fg_lflags & FG_RMMSGQ)); + VERIFY(fileproc_l[i]->fp_glob != NULL && + (fileproc_l[i]->fp_glob->fg_lflags & FG_RMMSGQ)); VERIFY(fds[i] >= 0); - fg_removeuipc(fileproc_l[i]->f_fglob); + fg_removeuipc(fileproc_l[i]->fp_glob); /* Drop the iocount */ fp_drop(p, fds[i], fileproc_l[i], 0); @@ -2079,12 +2238,6 @@ void unp_init(void) { _CASSERT(UIPC_MAX_CMSG_FD >= (MCLBYTES / sizeof(int))); - unp_zone = zinit(sizeof(struct unpcb), - (nmbclusters * sizeof(struct unpcb)), 4096, "unpzone"); - - if (unp_zone == 0) { - panic("unp_init"); - } LIST_INIT(&unp_dhead); LIST_INIT(&unp_shead); @@ -2097,19 +2250,10 @@ unp_init(void) unp_mtx_attr = lck_attr_alloc_init(); - if ((unp_list_mtx = lck_rw_alloc_init(unp_mtx_grp, - unp_mtx_attr)) == NULL) { - return; /* pretty much dead if this fails... */ - } - if ((unp_disconnect_lock = lck_mtx_alloc_init(unp_mtx_grp, - unp_mtx_attr)) == NULL) { - return; - } - - if ((unp_connect_lock = lck_mtx_alloc_init(unp_mtx_grp, - unp_mtx_attr)) == NULL) { - return; - } + lck_mtx_init(&uipc_lock, unp_mtx_grp, unp_mtx_attr); + lck_rw_init(&unp_list_mtx, unp_mtx_grp, unp_mtx_attr); + lck_mtx_init(&unp_disconnect_lock, unp_mtx_grp, unp_mtx_attr); + lck_mtx_init(&unp_connect_lock, unp_mtx_grp, unp_mtx_attr); } #ifndef MIN @@ -2119,7 +2263,7 @@ unp_init(void) /* * Returns: 0 Success * EINVAL - * fdgetf_noref:EBADF + * EBADF */ static int unp_internalize(struct mbuf *control, proc_t p) @@ -2145,10 +2289,10 @@ unp_internalize(struct mbuf *control, proc_t p) for (i = 0; i < oldfds; i++) { struct fileproc *tmpfp; - if (((error = fdgetf_noref(p, fds[i], &tmpfp)) != 0)) { + if ((tmpfp = fp_get_noref_locked(p, fds[i])) == NULL) { proc_fdunlock(p); - return error; - } else if (!file_issendable(p, tmpfp)) { + return EBADF; + } else if (!fg_sendable(tmpfp->fp_glob)) { proc_fdunlock(p); return EINVAL; } else if (FP_ISGUARDED(tmpfp, GUARD_SOCKET_IPC)) { @@ -2164,11 +2308,11 @@ unp_internalize(struct mbuf *control, proc_t p) * and doing them in-order would result in stomping over unprocessed fd's */ for (i = (oldfds - 1); i >= 0; i--) { - (void) fdgetf_noref(p, fds[i], &fp); - if (fg_insertuipc_mark(fp->f_fglob)) { + fp = fp_get_noref_locked(p, fds[i]); + if (fg_insertuipc_mark(fp->fp_glob)) { fg_ins[i / 8] |= 0x80 >> (i % 8); } - rp[i] = fp->f_fglob; + rp[i] = fp->fp_glob; } proc_fdunlock(p); @@ -2183,24 +2327,6 @@ unp_internalize(struct mbuf *control, proc_t p) return 0; } -static int unp_defer, unp_gcing, unp_gcwait; -static thread_t unp_gcthread = NULL; - -/* always called under uipc_lock */ -void -unp_gc_wait(void) -{ - if (unp_gcthread == current_thread()) { - return; - } - - while (unp_gcing != 0) { - unp_gcwait = 1; - msleep(&unp_gcing, uipc_lock, 0, "unp_gc_wait", NULL); - } -} - - __private_extern__ void unp_gc(void) { @@ -2211,32 +2337,30 @@ unp_gc(void) int nunref, i; int need_gcwakeup = 0; - lck_mtx_lock(uipc_lock); + lck_mtx_lock(&uipc_lock); if (unp_gcing) { - lck_mtx_unlock(uipc_lock); + lck_mtx_unlock(&uipc_lock); return; } unp_gcing = 1; unp_defer = 0; unp_gcthread = current_thread(); - lck_mtx_unlock(uipc_lock); + lck_mtx_unlock(&uipc_lock); /* * before going through all this, set all FDs to * be NOT defered and NOT externally accessible */ - for (fg = fmsghead.lh_first; fg != 0; fg = fg->f_msglist.le_next) { - lck_mtx_lock(&fg->fg_lock); - fg->fg_flag &= ~(FMARK | FDEFER); - lck_mtx_unlock(&fg->fg_lock); + for (fg = unp_msghead.lh_first; fg != 0; fg = fg->f_msglist.le_next) { + os_atomic_andnot(&fg->fg_flag, FMARK | FDEFER, relaxed); } do { - for (fg = fmsghead.lh_first; fg != 0; + for (fg = unp_msghead.lh_first; fg != 0; fg = fg->f_msglist.le_next) { lck_mtx_lock(&fg->fg_lock); /* * If the file is not open, skip it */ - if (fg->fg_count == 0) { + if (os_ref_get_count_raw(&fg->fg_count) == 0) { lck_mtx_unlock(&fg->fg_lock); continue; } @@ -2246,7 +2370,7 @@ unp_gc(void) * and un-mark it */ if (fg->fg_flag & FDEFER) { - fg->fg_flag &= ~FDEFER; + os_atomic_andnot(&fg->fg_flag, FDEFER, relaxed); unp_defer--; } else { /* @@ -2262,7 +2386,8 @@ unp_gc(void) * in transit, then skip it. it's not * externally accessible. */ - if (fg->fg_count == fg->fg_msgcount) { + if (os_ref_get_count_raw(&fg->fg_count) == + fg->fg_msgcount) { lck_mtx_unlock(&fg->fg_lock); continue; } @@ -2270,7 +2395,7 @@ unp_gc(void) * If it got this far then it must be * externally accessible. */ - fg->fg_flag |= FMARK; + os_atomic_or(&fg->fg_flag, FMARK, relaxed); } /* * either it was defered, or it is externally @@ -2326,7 +2451,7 @@ unp_gc(void) * The bug in the orginal code is a little tricky, so I'll describe * what's wrong with it here. * - * It is incorrect to simply unp_discard each entry for f_msgcount + * It is incorrect to simply unp_discard each entry for fg_msgcount * times -- consider the case of sockets A and B that contain * references to each other. On a last close of some other socket, * we trigger a gc since the number of outstanding rights (unp_rights) @@ -2357,12 +2482,12 @@ unp_gc(void) * * 91/09/19, bsy@cs.cmu.edu */ - extra_ref = _MALLOC(nfiles * sizeof(struct fileglob *), - M_FILEGLOB, M_WAITOK); + MALLOC(extra_ref, struct fileglob **, nfiles * sizeof(struct fileglob *), + M_TEMP, M_WAITOK); if (extra_ref == NULL) { goto bail; } - for (nunref = 0, fg = fmsghead.lh_first, fpp = extra_ref; fg != 0; + for (nunref = 0, fg = unp_msghead.lh_first, fpp = extra_ref; fg != 0; fg = nextfg) { lck_mtx_lock(&fg->fg_lock); @@ -2370,7 +2495,7 @@ unp_gc(void) /* * If it's not open, skip it */ - if (fg->fg_count == 0) { + if (os_ref_get_count_raw(&fg->fg_count) == 0) { lck_mtx_unlock(&fg->fg_lock); continue; } @@ -2380,8 +2505,12 @@ unp_gc(void) * of (shut-down) FDs, so include it in our * list of FDs to remove */ - if (fg->fg_count == fg->fg_msgcount && !(fg->fg_flag & FMARK)) { - fg->fg_count++; + if (fg->fg_flag & FMARK) { + lck_mtx_unlock(&fg->fg_lock); + continue; + } + if (os_ref_get_count_raw(&fg->fg_count) == fg->fg_msgcount) { + os_ref_retain_raw(&fg->fg_count, &f_refgrp); *fpp++ = fg; nunref++; } @@ -2407,12 +2536,12 @@ unp_gc(void) } } for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) { - closef_locked((struct fileproc *)0, *fpp, (proc_t)NULL); + fg_drop(PROC_NULL, *fpp); } - FREE(extra_ref, M_FILEGLOB); + FREE(extra_ref, M_TEMP); bail: - lck_mtx_lock(uipc_lock); + lck_mtx_lock(&uipc_lock); unp_gcing = 0; unp_gcthread = NULL; @@ -2420,7 +2549,7 @@ bail: unp_gcwait = 0; need_gcwakeup = 1; } - lck_mtx_unlock(uipc_lock); + lck_mtx_unlock(&uipc_lock); if (need_gcwakeup != 0) { wakeup(&unp_gcing); @@ -2482,15 +2611,14 @@ unp_scan(struct mbuf *m0, void (*op)(struct fileglob *, void *arg), void *arg) static void unp_mark(struct fileglob *fg, __unused void *arg) { - lck_mtx_lock(&fg->fg_lock); + uint32_t oflags, nflags; - if (fg->fg_flag & FMARK) { - lck_mtx_unlock(&fg->fg_lock); - return; - } - fg->fg_flag |= (FMARK | FDEFER); - - lck_mtx_unlock(&fg->fg_lock); + os_atomic_rmw_loop(&fg->fg_flag, oflags, nflags, relaxed, { + if (oflags & FMARK) { + os_atomic_rmw_loop_give_up(return ); + } + nflags = oflags | FMARK | FDEFER; + }); unp_defer++; } @@ -2508,9 +2636,7 @@ unp_discard(struct fileglob *fg, void *p) } (void) OSAddAtomic(-1, &unp_rights); - proc_fdlock(p); - (void) closef_locked((struct fileproc *)0, fg, p); - proc_fdunlock(p); + (void) fg_drop(p, fg); } int diff --git a/bsd/kern/vsock_domain.c b/bsd/kern/vsock_domain.c new file mode 100644 index 000000000..ae118349d --- /dev/null +++ b/bsd/kern/vsock_domain.c @@ -0,0 +1,1433 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define sotovsockpcb(so) ((struct vsockpcb *)(so)->so_pcb) + +#define VSOCK_PORT_RESERVED 1024 + +/* VSock Protocol Globals */ + +static struct vsock_transport * _Atomic the_vsock_transport = NULL; +static ZONE_DECLARE(vsockpcb_zone, "vsockpcbzone", + sizeof(struct vsockpcb), ZC_NONE); +static struct vsockpcbinfo vsockinfo; + +static uint32_t vsock_sendspace = VSOCK_MAX_PACKET_SIZE * 8; +static uint32_t vsock_recvspace = VSOCK_MAX_PACKET_SIZE * 8; + +/* VSock PCB Helpers */ + +static uint32_t +vsock_get_peer_space(struct vsockpcb *pcb) +{ + return pcb->peer_buf_alloc - (pcb->tx_cnt - pcb->peer_fwd_cnt); +} + +static struct vsockpcb * +vsock_get_matching_pcb(struct vsock_address src, struct vsock_address dst) +{ + struct vsockpcb *preferred = NULL; + struct vsockpcb *match = NULL; + struct vsockpcb *pcb = NULL; + + lck_rw_lock_shared(vsockinfo.bound_lock); + LIST_FOREACH(pcb, &vsockinfo.bound, bound) { + // Source cid and port must match. Only destination port must match. (Allows for a changing CID during migration) + socket_lock(pcb->so, 1); + if ((pcb->so->so_state & SS_ISCONNECTED || pcb->so->so_state & SS_ISCONNECTING) && + pcb->local_address.cid == src.cid && pcb->local_address.port == src.port && + pcb->remote_address.port == dst.port) { + preferred = pcb; + break; + } else if ((pcb->local_address.cid == src.cid || pcb->local_address.cid == VMADDR_CID_ANY) && + pcb->local_address.port == src.port) { + match = pcb; + } + socket_unlock(pcb->so, 1); + } + if (!preferred && match) { + socket_lock(match->so, 1); + preferred = match; + } + lck_rw_done(vsockinfo.bound_lock); + + return preferred; +} + +static errno_t +vsock_bind_address_if_free(struct vsockpcb *pcb, uint32_t local_cid, uint32_t local_port, uint32_t remote_cid, uint32_t remote_port) +{ + socket_lock_assert_owned(pcb->so); + + // Privileged ports. + if (local_port != VMADDR_PORT_ANY && local_port < VSOCK_PORT_RESERVED && + current_task() != kernel_task && proc_suser(current_proc()) != 0) { + return EACCES; + } + + bool taken = false; + const bool check_remote = (remote_cid != VMADDR_CID_ANY && remote_port != VMADDR_PORT_ANY); + + struct vsockpcb *pcb_match = NULL; + + socket_unlock(pcb->so, 0); + lck_rw_lock_exclusive(vsockinfo.bound_lock); + LIST_FOREACH(pcb_match, &vsockinfo.bound, bound) { + socket_lock(pcb_match->so, 1); + if (pcb == pcb_match || + (!check_remote && pcb_match->local_address.port == local_port) || + (check_remote && pcb_match->local_address.port == local_port && + pcb_match->remote_address.cid == remote_cid && pcb_match->remote_address.port == remote_port)) { + socket_unlock(pcb_match->so, 1); + taken = true; + break; + } + socket_unlock(pcb_match->so, 1); + } + socket_lock(pcb->so, 0); + if (!taken) { + pcb->local_address = (struct vsock_address) { .cid = local_cid, .port = local_port }; + pcb->remote_address = (struct vsock_address) { .cid = remote_cid, .port = remote_port }; + LIST_INSERT_HEAD(&vsockinfo.bound, pcb, bound); + } + lck_rw_done(vsockinfo.bound_lock); + + return taken ? EADDRINUSE : 0; +} + +static errno_t +vsock_bind_address(struct vsockpcb *pcb, struct vsock_address laddr, struct vsock_address raddr) +{ + if (!pcb) { + return EINVAL; + } + + socket_lock_assert_owned(pcb->so); + + // Certain CIDs are reserved. + if (laddr.cid == VMADDR_CID_HYPERVISOR || laddr.cid == VMADDR_CID_RESERVED || laddr.cid == VMADDR_CID_HOST) { + return EADDRNOTAVAIL; + } + + // Remote address must be fully specified or not specified at all. + if ((raddr.cid == VMADDR_CID_ANY) ^ (raddr.port == VMADDR_PORT_ANY)) { + return EINVAL; + } + + // Cannot bind if already bound. + if (pcb->local_address.port != VMADDR_PORT_ANY) { + return EINVAL; + } + + uint32_t transport_cid; + struct vsock_transport *transport = pcb->transport; + errno_t error = transport->get_cid(transport->provider, &transport_cid); + if (error) { + return error; + } + + // Local CID must be this transport's CID or any. + if (laddr.cid != transport_cid && laddr.cid != VMADDR_CID_ANY) { + return EINVAL; + } + + if (laddr.port != VMADDR_PORT_ANY) { + error = vsock_bind_address_if_free(pcb, laddr.cid, laddr.port, raddr.cid, raddr.port); + } else { + lck_mtx_lock(&vsockinfo.port_lock); + + const uint32_t first = VSOCK_PORT_RESERVED; + const uint32_t last = VMADDR_PORT_ANY - 1; + uint32_t count = last - first + 1; + uint32_t *last_port = &vsockinfo.last_port; + + if (pcb->so->so_flags & SOF_BINDRANDOMPORT) { + uint32_t random = 0; + read_frandom(&random, sizeof(random)); + *last_port = first + (random % count); + } + + do { + if (count == 0) { + lck_mtx_unlock(&vsockinfo.port_lock); + return EADDRNOTAVAIL; + } + count--; + + ++*last_port; + if (*last_port < first || *last_port > last) { + *last_port = first; + } + + error = vsock_bind_address_if_free(pcb, laddr.cid, *last_port, raddr.cid, raddr.port); + } while (error); + + lck_mtx_unlock(&vsockinfo.port_lock); + } + + return error; +} + +static void +vsock_unbind_pcb(struct vsockpcb *pcb, bool is_locked) +{ + if (!pcb) { + return; + } + + socket_lock_assert_owned(pcb->so); + + soisdisconnected(pcb->so); + + if (!pcb->bound.le_prev) { + return; + } + + if (!is_locked) { + socket_unlock(pcb->so, 0); + lck_rw_lock_exclusive(vsockinfo.bound_lock); + socket_lock(pcb->so, 0); + if (!pcb->bound.le_prev) { + lck_rw_done(vsockinfo.bound_lock); + return; + } + } + + LIST_REMOVE(pcb, bound); + pcb->bound.le_next = NULL; + pcb->bound.le_prev = NULL; + + if (!is_locked) { + lck_rw_done(vsockinfo.bound_lock); + } +} + +static struct sockaddr * +vsock_new_sockaddr(struct vsock_address *address) +{ + if (!address) { + return NULL; + } + + struct sockaddr_vm *addr; + MALLOC(addr, struct sockaddr_vm *, sizeof(*addr), M_SONAME, M_WAITOK); + if (!addr) { + return NULL; + } + + bzero(addr, sizeof(*addr)); + addr->svm_len = sizeof(*addr); + addr->svm_family = AF_VSOCK; + addr->svm_port = address->port; + addr->svm_cid = address->cid; + + return (struct sockaddr *)addr; +} + +static errno_t +vsock_pcb_send_message(struct vsockpcb *pcb, enum vsock_operation operation, mbuf_t m) +{ + if (!pcb) { + if (m != NULL) { + mbuf_freem_list(m); + } + return EINVAL; + } + + socket_lock_assert_owned(pcb->so); + + errno_t error; + + struct vsock_address dst = pcb->remote_address; + if (dst.cid == VMADDR_CID_ANY || dst.port == VMADDR_PORT_ANY) { + if (m != NULL) { + mbuf_freem_list(m); + } + return EINVAL; + } + + struct vsock_address src = pcb->local_address; + if (src.cid == VMADDR_CID_ANY) { + uint32_t transport_cid; + struct vsock_transport *transport = pcb->transport; + error = transport->get_cid(transport->provider, &transport_cid); + if (error) { + if (m != NULL) { + mbuf_freem_list(m); + } + return error; + } + src.cid = transport_cid; + } + + uint32_t buf_alloc = pcb->so->so_rcv.sb_hiwat; + uint32_t fwd_cnt = pcb->fwd_cnt; + + if (src.cid == dst.cid) { + pcb->last_buf_alloc = buf_alloc; + pcb->last_fwd_cnt = fwd_cnt; + + socket_unlock(pcb->so, 0); + error = vsock_put_message(src, dst, operation, buf_alloc, fwd_cnt, m); + socket_lock(pcb->so, 0); + } else { + struct vsock_transport *transport = pcb->transport; + error = transport->put_message(transport->provider, src, dst, operation, buf_alloc, fwd_cnt, m); + + if (!error) { + pcb->last_buf_alloc = buf_alloc; + pcb->last_fwd_cnt = fwd_cnt; + } + } + + return error; +} + +static errno_t +vsock_pcb_reset_address(struct vsock_address src, struct vsock_address dst) +{ + if (dst.cid == VMADDR_CID_ANY || dst.port == VMADDR_PORT_ANY) { + return EINVAL; + } + + errno_t error; + struct vsock_transport *transport = NULL; + + if (src.cid == VMADDR_CID_ANY) { + transport = os_atomic_load(&the_vsock_transport, relaxed); + if (transport == NULL) { + return ENODEV; + } + + uint32_t transport_cid; + error = transport->get_cid(transport->provider, &transport_cid); + if (error) { + return error; + } + src.cid = transport_cid; + } + + if (src.cid == dst.cid) { + error = vsock_put_message(src, dst, VSOCK_RESET, 0, 0, NULL); + } else { + if (!transport) { + transport = os_atomic_load(&the_vsock_transport, relaxed); + if (transport == NULL) { + return ENODEV; + } + } + error = transport->put_message(transport->provider, src, dst, VSOCK_RESET, 0, 0, NULL); + } + + return error; +} + +static errno_t +vsock_pcb_safe_reset_address(struct vsockpcb *pcb, struct vsock_address src, struct vsock_address dst) +{ + if (pcb) { + socket_lock_assert_owned(pcb->so); + socket_unlock(pcb->so, 0); + } + errno_t error = vsock_pcb_reset_address(src, dst); + if (pcb) { + socket_lock(pcb->so, 0); + } + return error; +} + +static errno_t +vsock_pcb_connect(struct vsockpcb *pcb) +{ + return vsock_pcb_send_message(pcb, VSOCK_REQUEST, NULL); +} + +static errno_t +vsock_pcb_respond(struct vsockpcb *pcb) +{ + return vsock_pcb_send_message(pcb, VSOCK_RESPONSE, NULL); +} + +static errno_t +vsock_pcb_send(struct vsockpcb *pcb, mbuf_t m) +{ + return vsock_pcb_send_message(pcb, VSOCK_PAYLOAD, m); +} + +static errno_t +vsock_pcb_shutdown_send(struct vsockpcb *pcb) +{ + return vsock_pcb_send_message(pcb, VSOCK_SHUTDOWN_SEND, NULL); +} + +static errno_t +vsock_pcb_reset(struct vsockpcb *pcb) +{ + return vsock_pcb_send_message(pcb, VSOCK_RESET, NULL); +} + +static errno_t +vsock_pcb_credit_update(struct vsockpcb *pcb) +{ + return vsock_pcb_send_message(pcb, VSOCK_CREDIT_UPDATE, NULL); +} + +static errno_t +vsock_pcb_credit_request(struct vsockpcb *pcb) +{ + return vsock_pcb_send_message(pcb, VSOCK_CREDIT_REQUEST, NULL); +} + +static errno_t +vsock_disconnect_pcb_common(struct vsockpcb *pcb, bool is_locked) +{ + socket_lock_assert_owned(pcb->so); + vsock_unbind_pcb(pcb, is_locked); + return vsock_pcb_reset(pcb); +} + +static errno_t +vsock_disconnect_pcb_locked(struct vsockpcb *pcb) +{ + return vsock_disconnect_pcb_common(pcb, true); +} + +static errno_t +vsock_disconnect_pcb(struct vsockpcb *pcb) +{ + return vsock_disconnect_pcb_common(pcb, false); +} + +static errno_t +vsock_sockaddr_vm_validate(struct vsockpcb *pcb, struct sockaddr_vm *addr) +{ + if (!pcb || !pcb->so || !addr) { + return EINVAL; + } + + // Validate address length. + if (addr->svm_len < sizeof(struct sockaddr_vm)) { + return EINVAL; + } + + // Validate address family. + if (addr->svm_family != AF_UNSPEC && addr->svm_family != AF_VSOCK) { + return EAFNOSUPPORT; + } + + // Only stream is supported currently. + if (pcb->so->so_type != SOCK_STREAM) { + return EAFNOSUPPORT; + } + + return 0; +} +/* VSock Receive Handlers */ + +static errno_t +vsock_put_message_connected(struct vsockpcb *pcb, enum vsock_operation op, mbuf_t m) +{ + socket_lock_assert_owned(pcb->so); + + errno_t error = 0; + + switch (op) { + case VSOCK_SHUTDOWN: + error = vsock_disconnect_pcb(pcb); + break; + case VSOCK_SHUTDOWN_RECEIVE: + socantsendmore(pcb->so); + break; + case VSOCK_SHUTDOWN_SEND: + socantrcvmore(pcb->so); + break; + case VSOCK_PAYLOAD: + // Add data to the receive queue then wakeup any reading threads. + error = !sbappendstream(&pcb->so->so_rcv, m); + if (!error) { + sorwakeup(pcb->so); + } + break; + case VSOCK_RESET: + vsock_unbind_pcb(pcb, false); + break; + default: + error = ENOTSUP; + break; + } + + return error; +} + +static errno_t +vsock_put_message_connecting(struct vsockpcb *pcb, enum vsock_operation op) +{ + socket_lock_assert_owned(pcb->so); + + errno_t error = 0; + + switch (op) { + case VSOCK_RESPONSE: + soisconnected(pcb->so); + break; + case VSOCK_RESET: + pcb->so->so_error = EAGAIN; + error = vsock_disconnect_pcb(pcb); + break; + default: + vsock_disconnect_pcb(pcb); + error = ENOTSUP; + break; + } + + return error; +} + +static errno_t +vsock_put_message_listening(struct vsockpcb *pcb, enum vsock_operation op, struct vsock_address src, struct vsock_address dst) +{ + socket_lock_assert_owned(pcb->so); + + struct sockaddr_vm addr; + struct socket *so2 = NULL; + struct vsockpcb *pcb2 = NULL; + + errno_t error = 0; + + switch (op) { + case VSOCK_REQUEST: + addr = (struct sockaddr_vm) { + .svm_len = sizeof(addr), + .svm_family = AF_VSOCK, + .svm_reserved1 = 0, + .svm_port = pcb->local_address.port, + .svm_cid = pcb->local_address.cid + }; + so2 = sonewconn(pcb->so, 0, (struct sockaddr *)&addr); + if (!so2) { + // It is likely that the backlog is full. Deny this request. + vsock_pcb_safe_reset_address(pcb, dst, src); + error = ECONNREFUSED; + break; + } + + pcb2 = sotovsockpcb(so2); + if (!pcb2) { + error = EINVAL; + goto done; + } + + error = vsock_bind_address(pcb2, dst, src); + if (error) { + goto done; + } + + error = vsock_pcb_respond(pcb2); + if (error) { + goto done; + } + + soisconnected(so2); + +done: + if (error) { + soisdisconnected(so2); + if (pcb2) { + vsock_unbind_pcb(pcb2, false); + } + socket_unlock(so2, 1); + vsock_pcb_reset_address(dst, src); + } else { + socket_unlock(so2, 0); + } + socket_lock(pcb->so, 0); + + break; + case VSOCK_RESET: + error = vsock_pcb_safe_reset_address(pcb, dst, src); + break; + default: + vsock_pcb_safe_reset_address(pcb, dst, src); + error = ENOTSUP; + break; + } + + return error; +} + +/* VSock Transport */ + +errno_t +vsock_add_transport(struct vsock_transport *transport) +{ + if (transport == NULL || transport->provider == NULL) { + return EINVAL; + } + if (!os_atomic_cmpxchg((void * volatile *)&the_vsock_transport, NULL, transport, acq_rel)) { + return EEXIST; + } + return 0; +} + +errno_t +vsock_remove_transport(struct vsock_transport *transport) +{ + if (!os_atomic_cmpxchg((void * volatile *)&the_vsock_transport, transport, NULL, acq_rel)) { + return ENODEV; + } + return 0; +} + +errno_t +vsock_reset_transport(struct vsock_transport *transport) +{ + if (transport == NULL) { + return EINVAL; + } + + errno_t error = 0; + struct vsockpcb *pcb = NULL; + struct vsockpcb *tmp_pcb = NULL; + + lck_rw_lock_exclusive(vsockinfo.bound_lock); + LIST_FOREACH_SAFE(pcb, &vsockinfo.bound, bound, tmp_pcb) { + // Disconnect this transport's sockets. Listen and bind sockets must stay alive. + socket_lock(pcb->so, 1); + if (pcb->transport == transport && pcb->so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) { + errno_t dc_error = vsock_disconnect_pcb_locked(pcb); + if (dc_error && !error) { + error = dc_error; + } + } + socket_unlock(pcb->so, 1); + } + lck_rw_done(vsockinfo.bound_lock); + + return error; +} + +errno_t +vsock_put_message(struct vsock_address src, struct vsock_address dst, enum vsock_operation op, uint32_t buf_alloc, uint32_t fwd_cnt, mbuf_t m) +{ + struct vsockpcb *pcb = vsock_get_matching_pcb(dst, src); + if (!pcb) { + if (op != VSOCK_RESET) { + vsock_pcb_reset_address(dst, src); + } + if (m != NULL) { + mbuf_freem_list(m); + } + return EINVAL; + } + + socket_lock_assert_owned(pcb->so); + + struct socket *so = pcb->so; + errno_t error = 0; + + // Check if the peer's buffer has changed. Update our view of the peer's forwarded bytes. + int buffers_changed = (pcb->peer_buf_alloc != buf_alloc) || (pcb->peer_fwd_cnt) != fwd_cnt; + pcb->peer_buf_alloc = buf_alloc; + pcb->peer_fwd_cnt = fwd_cnt; + + // Peer's buffer has enough space for the next packet. Notify any threads waiting for space. + if (buffers_changed && vsock_get_peer_space(pcb) >= pcb->waiting_send_size) { + sowwakeup(so); + } + + switch (op) { + case VSOCK_CREDIT_REQUEST: + error = vsock_pcb_credit_update(pcb); + break; + case VSOCK_CREDIT_UPDATE: + break; + default: + if (so->so_state & SS_ISCONNECTED) { + error = vsock_put_message_connected(pcb, op, m); + m = NULL; + } else if (so->so_state & SS_ISCONNECTING) { + error = vsock_put_message_connecting(pcb, op); + } else if (so->so_options & SO_ACCEPTCONN) { + error = vsock_put_message_listening(pcb, op, src, dst); + } else { + // Reset the connection for other states such as 'disconnecting'. + error = vsock_disconnect_pcb(pcb); + if (!error) { + error = ENODEV; + } + } + break; + } + socket_unlock(so, 1); + + if (m != NULL) { + mbuf_freem_list(m); + } + + return error; +} + +/* VSock Sysctl */ + +static int +vsock_pcblist SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp,arg2) + + int error; + + // Only stream is supported. + if ((intptr_t)arg1 != SOCK_STREAM) { + return EINVAL; + } + + // Get the generation count and the count of all vsock sockets. + lck_rw_lock_shared(vsockinfo.all_lock); + uint64_t n = vsockinfo.all_pcb_count; + vsock_gen_t gen_count = vsockinfo.vsock_gencnt; + lck_rw_done(vsockinfo.all_lock); + + const size_t xpcb_len = sizeof(struct xvsockpcb); + struct xvsockpgen xvg; + + /* + * The process of preparing the PCB list is too time-consuming and + * resource-intensive to repeat twice on every request. + */ + if (req->oldptr == USER_ADDR_NULL) { + req->oldidx = (size_t)(2 * sizeof(xvg) + (n + n / 8) * xpcb_len); + return 0; + } + + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } + + bzero(&xvg, sizeof(xvg)); + xvg.xvg_len = sizeof(xvg); + xvg.xvg_count = n; + xvg.xvg_gen = gen_count; + xvg.xvg_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xvg, sizeof(xvg)); + if (error) { + return error; + } + + // Return if no sockets exist. + if (n == 0) { + return 0; + } + + lck_rw_lock_shared(vsockinfo.all_lock); + + n = 0; + struct vsockpcb *pcb = NULL; + TAILQ_FOREACH(pcb, &vsockinfo.all, all) { + // Bail if there is not enough user buffer for this next socket. + if (req->oldlen - req->oldidx - sizeof(xvg) < xpcb_len) { + break; + } + + // Populate the socket structure. + socket_lock(pcb->so, 1); + if (pcb->vsock_gencnt <= gen_count) { + struct xvsockpcb xpcb; + bzero(&xpcb, xpcb_len); + xpcb.xv_len = xpcb_len; + xpcb.xv_vsockpp = (uint64_t)VM_KERNEL_ADDRHASH(pcb); + xpcb.xvp_local_cid = pcb->local_address.cid; + xpcb.xvp_local_port = pcb->local_address.port; + xpcb.xvp_remote_cid = pcb->remote_address.cid; + xpcb.xvp_remote_port = pcb->remote_address.port; + xpcb.xvp_rxcnt = pcb->fwd_cnt; + xpcb.xvp_txcnt = pcb->tx_cnt; + xpcb.xvp_peer_rxhiwat = pcb->peer_buf_alloc; + xpcb.xvp_peer_rxcnt = pcb->peer_fwd_cnt; + xpcb.xvp_last_pid = pcb->so->last_pid; + xpcb.xvp_gencnt = pcb->vsock_gencnt; + if (pcb->so) { + sotoxsocket(pcb->so, &xpcb.xv_socket); + } + socket_unlock(pcb->so, 1); + + error = SYSCTL_OUT(req, &xpcb, xpcb_len); + if (error != 0) { + break; + } + n++; + } else { + socket_unlock(pcb->so, 1); + } + } + + // Update the generation count to match the sockets being returned. + gen_count = vsockinfo.vsock_gencnt; + + lck_rw_done(vsockinfo.all_lock); + + if (!error) { + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + bzero(&xvg, sizeof(xvg)); + xvg.xvg_len = sizeof(xvg); + xvg.xvg_count = n; + xvg.xvg_gen = gen_count; + xvg.xvg_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xvg, sizeof(xvg)); + } + + return error; +} + +#ifdef SYSCTL_DECL +SYSCTL_NODE(_net, OID_AUTO, vsock, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "vsock"); +SYSCTL_UINT(_net_vsock, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED, + &vsock_sendspace, 0, "Maximum outgoing vsock datagram size"); +SYSCTL_UINT(_net_vsock, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED, + &vsock_recvspace, 0, "Maximum incoming vsock datagram size"); +SYSCTL_PROC(_net_vsock, OID_AUTO, pcblist, + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + (caddr_t)(long)SOCK_STREAM, 0, vsock_pcblist, "S,xvsockpcb", + "List of active vsock sockets"); +#endif + +/* VSock Protocol */ + +static int +vsock_attach(struct socket *so, int proto, struct proc *p) +{ + #pragma unused(proto, p) + + // Attach should only be run once per socket. + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb) { + return EINVAL; + } + + // Get the transport for this socket. + struct vsock_transport *transport = os_atomic_load(&the_vsock_transport, relaxed); + if (transport == NULL) { + return ENODEV; + } + + // Reserve send and receive buffers. + errno_t error = soreserve(so, vsock_sendspace, vsock_recvspace); + if (error) { + return error; + } + + // Initialize the vsock protocol control block. + pcb = zalloc(vsockpcb_zone); + if (pcb == NULL) { + return ENOBUFS; + } + bzero(pcb, sizeof(*pcb)); + pcb->so = so; + pcb->transport = transport; + pcb->local_address = (struct vsock_address) { + .cid = VMADDR_CID_ANY, + .port = VMADDR_PORT_ANY + }; + pcb->remote_address = (struct vsock_address) { + .cid = VMADDR_CID_ANY, + .port = VMADDR_PORT_ANY + }; + so->so_pcb = pcb; + + // Tell the transport that this socket has attached. + error = transport->attach_socket(transport->provider); + if (error) { + return error; + } + + // Add to the list of all vsock sockets. + lck_rw_lock_exclusive(vsockinfo.all_lock); + TAILQ_INSERT_TAIL(&vsockinfo.all, pcb, all); + vsockinfo.all_pcb_count++; + pcb->vsock_gencnt = ++vsockinfo.vsock_gencnt; + lck_rw_done(vsockinfo.all_lock); + + return 0; +} + +static int +vsock_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, struct proc *p) +{ + #pragma unused(ifp) + + VERIFY(so != NULL || p == kernproc); + + if (cmd != IOCTL_VM_SOCKETS_GET_LOCAL_CID) { + return EINVAL; + } + + struct vsock_transport *transport; + if (so) { + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL) { + return EINVAL; + } + transport = pcb->transport; + } else { + transport = os_atomic_load(&the_vsock_transport, relaxed); + } + + if (transport == NULL) { + return ENODEV; + } + + uint32_t transport_cid; + errno_t error = transport->get_cid(transport->provider, &transport_cid); + if (error) { + return error; + } + + memcpy(data, &transport_cid, sizeof(transport_cid)); + + return 0; +} + +static int +vsock_detach(struct socket *so) +{ + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL) { + return EINVAL; + } + + vsock_unbind_pcb(pcb, false); + + // Tell the transport that this socket has detached. + struct vsock_transport *transport = pcb->transport; + errno_t error = transport->detach_socket(transport->provider); + if (error) { + return error; + } + + // Remove from the list of all vsock sockets. + lck_rw_lock_exclusive(vsockinfo.all_lock); + TAILQ_REMOVE(&vsockinfo.all, pcb, all); + pcb->all.tqe_next = NULL; + pcb->all.tqe_prev = NULL; + vsockinfo.all_pcb_count--; + vsockinfo.vsock_gencnt++; + lck_rw_done(vsockinfo.all_lock); + + // Deallocate any resources. + zfree(vsockpcb_zone, pcb); + so->so_pcb = 0; + so->so_flags |= SOF_PCBCLEARING; + sofree(so); + + return 0; +} + +static int +vsock_abort(struct socket *so) +{ + soisdisconnected(so); + return vsock_detach(so); +} + +static int +vsock_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + #pragma unused(p) + + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL) { + return EINVAL; + } + + struct sockaddr_vm *addr = (struct sockaddr_vm *)nam; + + errno_t error = vsock_sockaddr_vm_validate(pcb, addr); + if (error) { + return error; + } + + struct vsock_address laddr = (struct vsock_address) { + .cid = addr->svm_cid, + .port = addr->svm_port, + }; + + struct vsock_address raddr = (struct vsock_address) { + .cid = VMADDR_CID_ANY, + .port = VMADDR_PORT_ANY, + }; + + error = vsock_bind_address(pcb, laddr, raddr); + if (error) { + return error; + } + + return 0; +} + +static int +vsock_listen(struct socket *so, struct proc *p) +{ + #pragma unused(p) + + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL) { + return EINVAL; + } + + // Only stream is supported currently. + if (so->so_type != SOCK_STREAM) { + return EAFNOSUPPORT; + } + + struct vsock_address *addr = &pcb->local_address; + + if (addr->port == VMADDR_CID_ANY) { + return EFAULT; + } + + struct vsock_transport *transport = pcb->transport; + uint32_t transport_cid; + errno_t error = transport->get_cid(transport->provider, &transport_cid); + if (error) { + return error; + } + + // Can listen on the transport's cid or any. + if (addr->cid != transport_cid && addr->cid != VMADDR_CID_ANY) { + return EFAULT; + } + + return 0; +} + +static int +vsock_accept(struct socket *so, struct sockaddr **nam) +{ + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL) { + return EINVAL; + } + + // Do not accept disconnected sockets. + if (so->so_state & SS_ISDISCONNECTED) { + return ECONNABORTED; + } + + *nam = vsock_new_sockaddr(&pcb->remote_address); + + return 0; +} + +static int +vsock_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + #pragma unused(p) + + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL) { + return EINVAL; + } + + struct sockaddr_vm *addr = (struct sockaddr_vm *)nam; + + errno_t error = vsock_sockaddr_vm_validate(pcb, addr); + if (error) { + return error; + } + + uint32_t transport_cid; + struct vsock_transport *transport = pcb->transport; + error = transport->get_cid(transport->provider, &transport_cid); + if (error) { + return error; + } + + // Only supporting connections to the host, hypervisor, or self for now. + if (addr->svm_cid != VMADDR_CID_HOST && + addr->svm_cid != VMADDR_CID_HYPERVISOR && + addr->svm_cid != transport_cid) { + return EFAULT; + } + + soisconnecting(so); + + // Set the remote and local address. + struct vsock_address remote_addr = (struct vsock_address) { + .cid = addr->svm_cid, + .port = addr->svm_port, + }; + + struct vsock_address local_addr = (struct vsock_address) { + .cid = transport_cid, + .port = VMADDR_PORT_ANY, + }; + + // Bind to the address. + error = vsock_bind_address(pcb, local_addr, remote_addr); + if (error) { + goto cleanup; + } + + // Attempt a connection using the socket's transport. + error = vsock_pcb_connect(pcb); + if (error) { + goto cleanup; + } + + if ((so->so_state & SS_ISCONNECTED) == 0) { + // Don't wait for peer's response if non-blocking. + if (so->so_state & SS_NBIO) { + error = EINPROGRESS; + goto done; + } + + struct timespec ts = (struct timespec) { + .tv_sec = so->so_snd.sb_timeo.tv_sec, + .tv_nsec = so->so_snd.sb_timeo.tv_usec * 1000, + }; + + lck_mtx_t *mutex_held; + if (so->so_proto->pr_getlock != NULL) { + mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); + } else { + mutex_held = so->so_proto->pr_domain->dom_mtx; + } + + // Wait until we receive a response to the connect request. + error = msleep((caddr_t)&so->so_timeo, mutex_held, PSOCK | PCATCH, "vsock_connect", &ts); + if (error) { + if (error == EAGAIN) { + error = ETIMEDOUT; + } + goto cleanup; + } + } + +cleanup: + if (so->so_error && !error) { + error = so->so_error; + so->so_error = 0; + } + if (!error) { + error = !(so->so_state & SS_ISCONNECTED); + } + if (error) { + vsock_unbind_pcb(pcb, false); + } + +done: + return error; +} + +static int +vsock_disconnect(struct socket *so) +{ + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL) { + return EINVAL; + } + + return vsock_disconnect_pcb(pcb); +} + +static int +vsock_sockaddr(struct socket *so, struct sockaddr **nam) +{ + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL) { + return EINVAL; + } + + *nam = vsock_new_sockaddr(&pcb->local_address); + + return 0; +} + +static int +vsock_peeraddr(struct socket *so, struct sockaddr **nam) +{ + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL) { + return EINVAL; + } + + *nam = vsock_new_sockaddr(&pcb->remote_address); + + return 0; +} + +static int +vsock_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, proc_t p) +{ + #pragma unused(flags, nam, p) + + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL || m == NULL) { + return EINVAL; + } + + if (control != NULL) { + m_freem(control); + return EOPNOTSUPP; + } + + // Ensure this socket is connected. + if ((so->so_state & SS_ISCONNECTED) == 0) { + if (m != NULL) { + mbuf_freem_list(m); + } + return EPERM; + } + + errno_t error; + + const size_t len = mbuf_pkthdr_len(m); + uint32_t free_space = vsock_get_peer_space(pcb); + + // Ensure the peer has enough space in their receive buffer. + while (len > free_space) { + // Record the number of free peer bytes necessary before we can send. + if (len > pcb->waiting_send_size) { + pcb->waiting_send_size = len; + } + + // Send a credit request. + error = vsock_pcb_credit_request(pcb); + if (error) { + if (m != NULL) { + mbuf_freem_list(m); + } + return error; + } + + // Check again in case free space was automatically updated in loopback case. + free_space = vsock_get_peer_space(pcb); + if (len <= free_space) { + pcb->waiting_send_size = 0; + break; + } + + // Bail if this is a non-blocking socket. + if (so->so_state & SS_NBIO) { + if (m != NULL) { + mbuf_freem_list(m); + } + return EWOULDBLOCK; + } + + // Wait until our peer has enough free space in their receive buffer. + error = sbwait(&so->so_snd); + pcb->waiting_send_size = 0; + if (error) { + if (m != NULL) { + mbuf_freem_list(m); + } + return error; + } + + // Bail if an error occured or we can't send more. + if (so->so_state & SS_CANTSENDMORE) { + if (m != NULL) { + mbuf_freem_list(m); + } + return EPIPE; + } else if (so->so_error) { + error = so->so_error; + so->so_error = 0; + if (m != NULL) { + mbuf_freem_list(m); + } + return error; + } + + free_space = vsock_get_peer_space(pcb); + } + + // Send a payload over the transport. + error = vsock_pcb_send(pcb, m); + if (error) { + return error; + } + + pcb->tx_cnt += len; + + return 0; +} + +static int +vsock_shutdown(struct socket *so) +{ + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL) { + return EINVAL; + } + + socantsendmore(so); + + // Tell peer we will no longer send. + errno_t error = vsock_pcb_shutdown_send(pcb); + if (error) { + return error; + } + + return 0; +} + +static int +vsock_soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, + struct mbuf **mp0, struct mbuf **controlp, int *flagsp) +{ + struct vsockpcb *pcb = sotovsockpcb(so); + if (pcb == NULL) { + return EINVAL; + } + + user_ssize_t length = uio_resid(uio); + int result = soreceive(so, psa, uio, mp0, controlp, flagsp); + length -= uio_resid(uio); + + socket_lock(so, 1); + + pcb->fwd_cnt += length; + + const uint32_t threshold = VSOCK_MAX_PACKET_SIZE; + + // Send a credit update if is possible that the peer will no longer send. + if ((pcb->fwd_cnt - pcb->last_fwd_cnt + threshold) >= pcb->last_buf_alloc) { + errno_t error = vsock_pcb_credit_update(pcb); + if (!result && error) { + result = error; + } + } + + socket_unlock(so, 1); + + return result; +} + +static struct pr_usrreqs vsock_usrreqs = { + .pru_abort = vsock_abort, + .pru_attach = vsock_attach, + .pru_control = vsock_control, + .pru_detach = vsock_detach, + .pru_bind = vsock_bind, + .pru_listen = vsock_listen, + .pru_accept = vsock_accept, + .pru_connect = vsock_connect, + .pru_disconnect = vsock_disconnect, + .pru_send = vsock_send, + .pru_shutdown = vsock_shutdown, + .pru_sockaddr = vsock_sockaddr, + .pru_peeraddr = vsock_peeraddr, + .pru_sosend = sosend, + .pru_soreceive = vsock_soreceive, +}; + +static void +vsock_init(struct protosw *pp, struct domain *dp) +{ + #pragma unused(dp) + + static int vsock_initialized = 0; + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); + if (!os_atomic_cmpxchg((volatile int *)&vsock_initialized, 0, 1, acq_rel)) { + return; + } + + // Setup VSock protocol info struct. + vsockinfo.vsock_lock_grp_attr = lck_grp_attr_alloc_init(); + vsockinfo.vsock_lock_grp = lck_grp_alloc_init("vsock", vsockinfo.vsock_lock_grp_attr); + vsockinfo.vsock_lock_attr = lck_attr_alloc_init(); + if ((vsockinfo.all_lock = lck_rw_alloc_init(vsockinfo.vsock_lock_grp, vsockinfo.vsock_lock_attr)) == NULL || + (vsockinfo.bound_lock = lck_rw_alloc_init(vsockinfo.vsock_lock_grp, vsockinfo.vsock_lock_attr)) == NULL) { + panic("%s: unable to allocate PCB lock\n", __func__); + /* NOTREACHED */ + } + lck_mtx_init(&vsockinfo.port_lock, vsockinfo.vsock_lock_grp, vsockinfo.vsock_lock_attr); + TAILQ_INIT(&vsockinfo.all); + LIST_INIT(&vsockinfo.bound); + vsockinfo.last_port = VMADDR_PORT_ANY; +} + +static struct protosw vsocksw[] = { + { + .pr_type = SOCK_STREAM, + .pr_protocol = 0, + .pr_flags = PR_CONNREQUIRED | PR_WANTRCVD, + .pr_init = vsock_init, + .pr_usrreqs = &vsock_usrreqs, + } +}; + +static const int vsock_proto_count = (sizeof(vsocksw) / sizeof(struct protosw)); + +/* VSock Domain */ + +static struct domain *vsock_domain = NULL; + +static void +vsock_dinit(struct domain *dp) +{ + // The VSock domain is initialized with a singleton pattern. + VERIFY(!(dp->dom_flags & DOM_INITIALIZED)); + VERIFY(vsock_domain == NULL); + vsock_domain = dp; + + // Add protocols and initialize. + for (int i = 0; i < vsock_proto_count; i++) { + net_add_proto((struct protosw *)&vsocksw[i], dp, 1); + } +} + +struct domain vsockdomain_s = { + .dom_family = PF_VSOCK, + .dom_name = "vsock", + .dom_init = vsock_dinit, + .dom_maxrtkey = sizeof(struct sockaddr_vm), + .dom_protohdrlen = sizeof(struct sockaddr_vm), +}; diff --git a/bsd/libkern/libkern.h b/bsd/libkern/libkern.h index b95c70e3c..d27764e59 100644 --- a/bsd/libkern/libkern.h +++ b/bsd/libkern/libkern.h @@ -149,8 +149,7 @@ extern int ffsll(unsigned long long); extern int fls(unsigned int); extern int flsll(unsigned long long); extern u_int32_t random(void); -extern int scanc(u_int, u_char *, const u_char *, int); -extern int skpc(int, int, char *); +extern size_t scanc(size_t, u_char *, const u_char *, u_char); extern long strtol(const char*, char **, int); extern u_long strtoul(const char *, char **, int); extern quad_t strtoq(const char *, char **, int); @@ -218,7 +217,8 @@ extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3, extern int vscnprintf(char *, size_t, const char *, va_list) __printflike(3, 0); #if XNU_KERNEL_PRIVATE -extern int vprintf_log_locked(const char *, va_list, bool addcr) __printflike(1, 0); +extern bool printf_log_locked(bool addcr, const char*, ...) __printflike(2, 3); +extern bool vprintf_log_locked(const char *, va_list, bool addcr) __printflike(1, 0); extern void osobject_retain(void * object); extern void osobject_release(void * object); #endif diff --git a/bsd/libkern/scanc.c b/bsd/libkern/scanc.c index d8d1c51cd..59f3d4778 100644 --- a/bsd/libkern/scanc.c +++ b/bsd/libkern/scanc.c @@ -62,8 +62,8 @@ #include -int -scanc(u_int size, u_char *cp, const u_char table[], int mask0) +size_t +scanc(size_t size, u_char *cp, const u_char table[], u_char mask0) { u_char *end; u_char mask; @@ -72,5 +72,5 @@ scanc(u_int size, u_char *cp, const u_char table[], int mask0) for (end = &cp[size]; cp != end && (table[*cp] & mask) == 0; ++cp) { ; } - return end - cp; + return (size_t)(end - cp); } diff --git a/bsd/libkern/skpc.c b/bsd/libkern/skpc.c deleted file mode 100644 index 16cba0997..000000000 --- a/bsd/libkern/skpc.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/*- - * Copyright (c) 1992, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)skpc.c 8.1 (Berkeley) 6/10/93 - */ - -#include - -int -skpc(int mask0, int size, char *cp0) -{ - u_char *cp, *end, mask; - - mask = mask0; - cp = (u_char *)cp0; - for (end = &cp[size]; cp < end && *cp == mask; ++cp) { - ; - } - return end - cp; -} diff --git a/bsd/libkern/url_encode.c b/bsd/libkern/url_encode.c index 353dbbdce..0ca3d5eb0 100644 --- a/bsd/libkern/url_encode.c +++ b/bsd/libkern/url_encode.c @@ -66,7 +66,7 @@ url_decode(char *str) c += hex2int(*str++); } if (*str) { - c = (c << 4) + hex2int(*str++); + c = (char)((c << 4) + hex2int(*str++)); } if (isprint(c)) { diff --git a/bsd/machine/Makefile b/bsd/machine/Makefile index cd91a9858..d8a50edcc 100644 --- a/bsd/machine/Makefile +++ b/bsd/machine/Makefile @@ -14,7 +14,7 @@ DATAFILES = \ _mcontext.h DRIVERKIT_DATAFILES = \ - limits.h types.h _types.h + limits.h types.h _types.h endian.h PRIVATE_DATAFILES = \ disklabel.h @@ -27,6 +27,11 @@ KERNELFILES = \ vmparam.h _types.h _limits.h _param.h \ _mcontext.h +MODULEMAP_INCDIR_FILES = \ + machine_types.modulemap + +INSTALL_MODULEMAP_INCDIR_MI_LIST = ${MODULEMAP_INCDIR_FILES} + INSTALL_MI_LIST = ${DATAFILES} INSTALL_DRIVERKIT_MI_LIST = ${DRIVERKIT_DATAFILES} INSTALL_MI_LCL_LIST = ${PRIVATE_DATAFILES} diff --git a/bsd/machine/_param.h b/bsd/machine/_param.h index 1a6ffc066..c6a6028b1 100644 --- a/bsd/machine/_param.h +++ b/bsd/machine/_param.h @@ -26,9 +26,9 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #if defined (__i386__) || defined (__x86_64__) -#include "i386/_param.h" +#include #elif defined (__arm__) || defined (__arm64__) -#include "arm/_param.h" +#include #else #error architecture not supported #endif diff --git a/bsd/machine/exec.h b/bsd/machine/exec.h index cdbf3a2e3..cb2616336 100644 --- a/bsd/machine/exec.h +++ b/bsd/machine/exec.h @@ -42,7 +42,9 @@ struct exec_info { char **ev; }; -int grade_binary(cpu_type_t, cpu_subtype_t, bool allow_simulator_binary); -boolean_t pie_required(cpu_type_t, cpu_subtype_t); +int grade_binary(cpu_type_t, cpu_subtype_t, cpu_subtype_t, bool allow_simulator_binary); +boolean_t binary_match(cpu_type_t mask_bits, cpu_type_t req_cpu, + cpu_subtype_t req_subcpu, cpu_type_t test_cpu, + cpu_subtype_t test_subcpu); #endif /* _BSD_MACHINE_EXEC_H_ */ diff --git a/bsd/machine/machine_types.modulemap b/bsd/machine/machine_types.modulemap new file mode 100644 index 000000000..21260dd72 --- /dev/null +++ b/bsd/machine/machine_types.modulemap @@ -0,0 +1,12 @@ +// Module maps for machine/types.h and machine/_types.h. +// +// See also: stdint.modulemap + +module Darwin_C_stdint._machine_types { + export * + header "machine/types.h" +} +module Darwin_C_stdint._machine__types { + export * + header "machine/_types.h" +} diff --git a/bsd/machine/param.h b/bsd/machine/param.h index 6f9f03e70..5b69eeb33 100644 --- a/bsd/machine/param.h +++ b/bsd/machine/param.h @@ -32,9 +32,9 @@ #define _BSD_MACHINE_PARAM_H_ #if defined (__i386__) || defined(__x86_64__) -#include "i386/param.h" +#include #elif defined (__arm__) || defined (__arm64__) -#include "arm/param.h" +#include #else #error architecture not supported #endif diff --git a/bsd/man/man2/Makefile b/bsd/man/man2/Makefile index 0e724cc86..36387149b 100644 --- a/bsd/man/man2/Makefile +++ b/bsd/man/man2/Makefile @@ -150,10 +150,12 @@ DATAFILES = \ poll.2 \ posix_madvise.2 \ pread.2 \ + preadv.2 \ pselect.2 \ pthread_setugid_np.2 \ ptrace.2 \ pwrite.2 \ + pwritev.2 \ quotactl.2 \ read.2 \ readlink.2 \ diff --git a/bsd/man/man2/chflags.2 b/bsd/man/man2/chflags.2 index 6bea184c4..b1e15ed2e 100644 --- a/bsd/man/man2/chflags.2 +++ b/bsd/man/man2/chflags.2 @@ -77,7 +77,7 @@ The file has been archived. The file may not be changed. .It SF_APPEND The file may only be appended to. -.It SF_DATALESSFAULT +.It SF_DATALESS The file is a dataless placeholder. The system will attempt to materialize it when accessed according to the dataless file materialization policy of the accessing thread or process. See @@ -100,7 +100,7 @@ and flags may only be set or unset by the super-user. .Pp The -.Dq SF_DATALESSFAULT +.Dq SF_DATALESS flag is an internal flag and may not be set from user space. .Sh RETURN VALUES Upon successful completion, a value of 0 is returned. diff --git a/bsd/man/man2/fcntl.2 b/bsd/man/man2/fcntl.2 index 7390b18e3..519bb1e0b 100644 --- a/bsd/man/man2/fcntl.2 +++ b/bsd/man/man2/fcntl.2 @@ -166,8 +166,12 @@ physical blocks. This will not change the actual file size. Holes must be aligned to file system block boundaries. This will fail on file systems that do not support this interface. .It Dv F_SETSIZE -Truncate a file without zeroing space. -The calling process must have root privileges. +Deprecated. +In previous releases, this would allow a process with root privileges to +truncate a file without zeroing space. +For security reasons, this operation is no longer supported and will +instead truncate the file in the same manner as +.Xr truncate 2 . .It Dv F_RDADVISE Issue an advisory read async with no copy to user. .It Dv F_RDAHEAD diff --git a/bsd/man/man2/getattrlist.2 b/bsd/man/man2/getattrlist.2 index d0d8daec9..38f4bc35e 100644 --- a/bsd/man/man2/getattrlist.2 +++ b/bsd/man/man2/getattrlist.2 @@ -16,7 +16,7 @@ .\" .\" @(#)getattrlist.2 . -.Dd February 25, 2014 +.Dd February 11, 2020 .Dt GETATTRLIST 2 .Os Darwin .Sh NAME @@ -166,8 +166,13 @@ See below for a description of these attributes. If you request volume attributes, .Fa path must reference the root of a volume. +You must set ATTR_VOL_INFO in the volattr field if you request any +other volume attributes. In addition, you can't request volume attributes if you also request -file or directory attributes. +file, directory, fork or extended common attributes. +In addition, you can't request volume attributes if you also request the common +attributes ATTR_CMN_EXTENDED_SECURITY, ATTR_CMN_UUID, ATTR_CMN_GRPUUID, +ATTR_CMN_FILEID, or ATTR_CMN_PARENTID. . .It dirattr A bit set that specifies the directory attributes that you require. @@ -732,12 +737,11 @@ The following volume attributes are defined. .Bl -tag -width ATTR_VOL_ALLOCATIONCLUMP . .It ATTR_VOL_INFO -For reasons that are not at all obvious, you must set +For historical reasons you must set .Dv ATTR_VOL_INFO in the .Fa volattr field if you request any other volume attributes. -This does not result in any attribute data being added to the attribute buffer. . .It ATTR_VOL_FSTYPE A @@ -1257,7 +1261,20 @@ by the file system when asked to free space. .It EF_IS_SPARSE If this bit is set the item has sparse regions. . +.It EF_IS_SYNTHETIC +If this bit is set the item is a synthetic directory/symlink. +. .El +. +.It ATTR_CMNEXT_RECURSIVE_GENCOUNT +A +.Vt u_int64_t +that represents the recursive generation count of a directory that has +been marked as maintain-dir-stats in an apfs file system. This +gencount is updated any time any child is modified (as part of the +contract that a maintain-dir-stats directory manages). If the +directory is not marked maintain-dir-stats, a zero is returned. +. .El .Pp . @@ -1509,6 +1526,9 @@ in a single "partition" which share space. If this bit is set, the volume format supports having multiple logical filesystems which may be mounted and unmounted together and may present common filesystem identifier information. +.It VOL_CAP_FMT_SEALED +If this bit is set, the volume is cryptographically sealed and any modifications +may render the volume unusable. . . .El diff --git a/bsd/man/man2/i386_get_ldt.2 b/bsd/man/man2/i386_get_ldt.2 index 40ae07566..d19b557a1 100644 --- a/bsd/man/man2/i386_get_ldt.2 +++ b/bsd/man/man2/i386_get_ldt.2 @@ -32,7 +32,7 @@ .\" from: @(#)fork.2 6.5 (Berkeley) 3/10/91 .\" $FreeBSD: /repoman/r/ncvs/src/lib/libc/i386/sys/i386_get_ldt.2,v 1.21 2004/07/02 19:07:30 ru Exp $ .\" -.Dd September 20, 1993 +.Dd February 14, 2020 .Dt I386_GET_LDT 2 .Os .Sh NAME @@ -106,6 +106,16 @@ is 0 and is NULL then, as a special case, .Fn i386_set_ldt will free all descriptors. +.Sh NOTES +.Pp +.Fn i386_set_ldt +and +.Fn i386_get_ldt +may be used by 64-bit processes to create 32-bit (compatibility mode) code segments +(in addition to the set of other segments already specified), that, together with +additional infrastructure not provided by macOS, enables 32-bit code execution. +Some platforms may reject segments with non-zero base addresses by returning -1 and +setting errno to EINVAL. .Sh RETURN VALUES Upon successful completion, .Fn i386_get_ldt @@ -132,7 +142,10 @@ will fail if: An inappropriate value was used for .Fa start_sel or -.Fa num_sels . +.Fa num_sels , +or the platform does not support non-zero base addresses in custom descriptors and the descriptor base address passed to +.Fn i386_set_ldt +is non-zero. .It Bq Er EACCES The caller attempted to use a descriptor that would circumvent protection or cause a failure. diff --git a/bsd/man/man2/mmap.2 b/bsd/man/man2/mmap.2 index 9cbe1aa2a..a6b90722d 100644 --- a/bsd/man/man2/mmap.2 +++ b/bsd/man/man2/mmap.2 @@ -28,7 +28,7 @@ .\" @(#)mmap.2 8.4 (Berkeley) 5/11/95 .\" $FreeBSD: src/lib/libc/sys/mmap.2,v 1.56 2007/01/09 00:28:15 imp Exp $ .\" -.Dd April 21, 2006 +.Dd February 14, 2020 .Dt MMAP 2 .Os .Sh NAME @@ -65,22 +65,26 @@ argument is used by the system to determine the starting address of the mapping, and its interpretation is dependent on the setting of the MAP_FIXED flag. If MAP_FIXED is specified in .Fa flags , -the system will try to place the mapping at the specified address, +the system will try to place the mapping at the specified address, possibly removing a mapping that already exists at that location. If MAP_FIXED is not specified, then the system will attempt to use the range of addresses starting at .Fa addr if they do not overlap any existing mappings, -including memory allocated by malloc(3) and other such allocators. +including memory allocated by +.Xr malloc 3 +and other such allocators. Otherwise, the system will choose an alternate address for the mapping (using an implementation dependent algorithm) that does not overlap any existing mappings. In other words, -without MAP_FIXED the system will attempt to find an empty location in the address space if the specified address -range has already been mapped by something else. +without +.Dv MAP_FIXED +the system will attempt to find an empty location in the address space if the +specified address range has already been mapped by something else. If .Fa addr is zero and MAP_FIXED is not specified, @@ -113,7 +117,23 @@ Pages may be executed. .Pp Note that, due to hardware limitations, on some platforms PROT_WRITE may imply PROT_READ, and PROT_READ may imply PROT_EXEC. Portable programs -should not rely on these flags being separately enforcable. +should not rely on these flags being separately enforceable. +.Pp +When the hardened runtime is enabled +.Po +See the links in the +.Sx SEE ALSO +section +.Pc , +the protections cannot be both +.Dv PROT_WRITE +and +.Dv PROT_EXEC +without also having the flag +.Dv MAP_JIT +and the process possessing the +.Dv com.apple.security.cs.allow-jit +entitlement .Pp The .Fa flags @@ -136,11 +156,12 @@ The argument is ignored. Mac OS X specific: the file descriptor used for creating .Dv MAP_ANON -regions can be used to pass some Mach VM flags, and can -be specified as \-1 if no such flags are associated with -the region. Mach VM flags are defined in - and the ones that currently apply -to +regions can be used to pass some Mach VM flags, and can +be specified as \-1 if no such flags are associated with +the region. Mach VM flags are defined in +.In mach/vm_statistics.h +and the ones that currently apply +to .Nm mmap are: .Pp @@ -148,10 +169,12 @@ VM_FLAGS_PURGABLE to create Mach purgable (i.e. volatile) memory. .Pp VM_MAKE_TAG(tag) to associate an 8-bit tag with the region. .br - defines some preset tags (with a VM_MEMORY_ prefix). +.In mach/vm_statistics.h +defines some preset tags (with a VM_MEMORY_ prefix). Users are encouraged to use tags between 240 and 255. -Tags are used by tools such as vmmap(1) to help identify specific memory regions. -.Pp +Tags are used by tools such as +.Xr vmmap 1 +to help identify specific memory regions. .It Dv MAP_FILE Mapped from a regular file. (This is the default mapping type, and need not be specified.) @@ -188,9 +211,39 @@ Modifications are shared. Pages in this mapping are not retained in the kernel's memory cache. If the system runs low on memory, pages in MAP_NOCACHE mappings will be among the first to be reclaimed. -This flag is intended for mappings that have little locality and +This flag is intended for mappings that have little locality and provides a hint to the kernel that pages in this mapping are unlikely to be needed again in the near future. +.It Dv MAP_JIT +Allow mapping pages both +.Dv PROT_WRITE +and +.Dv PROT_EXEC +when the hardened is runtime enabled. Without this flag an attempt to create a +mapping with both +.Dv PROT_WRITE +and +.Dv PROT_EXEC +set will fail with +.Dv MAP_FAILED +on macOS. A writable, but not executable mapping +is returned on iOS, watchOS and tvOS. +.Pp +Usage of this flag requires the caller to have the +.Dv com.apple.security.cs.allow-jit +entitlement on macOS. +.It Dv MAP_32BIT +Directs +.Fn mmap +to place the mapping into the first 4 Gigabytes of the process's address space. If +there is no free virtual address space in this range, +.Fn mmap +will return +.Dv MAP_FAILED. +.Pp +Note that in order for this flag to yield addresses below 4GiB, the program's +PAGEZERO must be reduced in size, since the default PAGEZERO size for 64-bit +programs is at least 4GiB. .El .Pp Conforming applications must specify either MAP_PRIVATE or MAP_SHARED. @@ -257,12 +310,16 @@ resides out of the valid address space for a user process. .Fa flags does not include either MAP_PRIVATE or MAP_SHARED. .It Bq Er EINVAL +.Fa flags +includes bits that are not part of any valid flags value. +.It Bq Er EINVAL The .Fa len argument -was negative or zero. Historically, the system call would not return an error if the argument was zero. -See other potential additional restrictions in the -COMPATIBILITY section below. +was negative or zero. Historically, the system call would not return an error +if the argument was zero. +See other potential additional restrictions in the +COMPATIBILITY section below. .It Bq Er EINVAL The .Fa offset @@ -284,13 +341,31 @@ limit for the process. .Dv MAP_ANON was specified and insufficient memory was available. .It Bq Er ENXIO -Addresses in the specified range are invalid for +Addresses in the specified range are invalid for .Fa fd . .It Bq Er EOVERFLOW Addresses in the specified range exceed the maximum offset -set for +set for .Fa fd . .El +.Sh ENTITLEMENTS +The following entitlements only have an effect when the hardened runtime is +enabled. +.Bl -tag -width Er +.It Dv com.apple.security.cs.allow-jit +A Boolean value that indicates whether the app may create writable and +executable memory using the +.Dv MAP_JIT +.Fa flag . +.It Dv com.apple.security.cs.allow-unsigned-executable-memory +A Boolean value that indicates whether the app may create writable and +executable memory without the restrictions imposed by using the +.Dv MAP_JIT +.Fa flag . +.It Dv com.apple.security.cs.disable-executable-page-protection +A Boolean value that indicates whether to disable all code signing +protections while launching an application, and during its execution. +.El .Sh LEGACY SYNOPSIS .Fd #include .Fd #include @@ -320,6 +395,18 @@ parameter must be a multiple of pagesize, as returned by .Fn sysconf . .El +.Pp +On macOS 10.14 Mojave the hardened runtime restricts pages from having both +the +.Dv PROT_WRITE +and +.Dv PROT_EXEC +protections without the caller also setting the +.Dv MAP_JIT +.Fa flag +and possessing the +.Dv com.apple.security.cs.allow-jit +entitlement. .Sh SEE ALSO .Xr madvise 2 , .Xr mincore 2 , @@ -331,3 +418,5 @@ as returned by .Xr munmap 2 , .Xr shmat 2 , .Xr getpagesize 3 +.Ss Apple Developer Documentation +https://developer.apple.com/documentation/security/hardened_runtime_entitlements diff --git a/bsd/man/man2/open.2 b/bsd/man/man2/open.2 index 73eb670f7..35078abc5 100644 --- a/bsd/man/man2/open.2 +++ b/bsd/man/man2/open.2 @@ -153,6 +153,7 @@ O_NOFOLLOW do not follow symlinks O_SYMLINK allow open of symlinks O_EVTONLY descriptor requested for event notifications only O_CLOEXEC mark as close-on-exec +O_NOFOLLOW_ANY do not follow symlinks in the entire path. .Ed .Pp Opening a file with @@ -234,6 +235,14 @@ flag. The state of the file descriptor flags can be inspected using the F_GETFD fcntl. See .Xr fcntl 2 . .Pp +If +.Dv O_NOFOLLOW_ANY +is used in the mask and any component of the path passed to +.Fn open +is a symbolic link then the +.Fn open +will fail. +.Pp If successful, .Fn open returns a non-negative integer, termed a file descriptor. @@ -253,6 +262,8 @@ and .Pp The system imposes a limit on the number of file descriptors that can be held open simultaneously by one process. +.Pp +A file's metadata can be updated even if the file was opened in read-only mode. .Xr Getdtablesize 2 returns the current system limit. .Sh RETURN VALUES @@ -354,6 +365,11 @@ The system file table is full. .Dv O_NOFOLLOW was specified and the target is a symbolic link. .\" =========== +.\" =========== +.It Bq Er ELOOP +.Dv O_NOFOLLOW_ANY +was specified and and a component of the path is a symbolic link. +.\" =========== .It Bq Er ENOENT .Dv O_CREAT is not set and the named file does not exist. diff --git a/bsd/man/man2/posix_spawn.2 b/bsd/man/man2/posix_spawn.2 index 83dea9536..a2d521bcd 100644 --- a/bsd/man/man2/posix_spawn.2 +++ b/bsd/man/man2/posix_spawn.2 @@ -327,6 +327,10 @@ file that is currently open for writing or reading by some process. .\" ========== .It Bq Er EBADARCH The new process file has no architectures appropriate for the current system. +.\" ========== +.It Bq Er EBADF +Bad file descriptor for one or more +.Fa file_actions . .El .Pp Additionally, they may fail for any of the reasons listed in diff --git a/bsd/man/man2/preadv.2 b/bsd/man/man2/preadv.2 new file mode 100644 index 000000000..d5e1c8ac7 --- /dev/null +++ b/bsd/man/man2/preadv.2 @@ -0,0 +1 @@ +.so man2/read.2 diff --git a/bsd/man/man2/pwritev.2 b/bsd/man/man2/pwritev.2 new file mode 100644 index 000000000..5a7182844 --- /dev/null +++ b/bsd/man/man2/pwritev.2 @@ -0,0 +1 @@ +.so man2/write.2 diff --git a/bsd/man/man2/read.2 b/bsd/man/man2/read.2 index cdf753cac..319a7c388 100644 --- a/bsd/man/man2/read.2 +++ b/bsd/man/man2/read.2 @@ -38,6 +38,7 @@ .Sh NAME .Nm pread , .Nm read , +.Nm preadv , .Nm readv .Nd read input .Sh LIBRARY @@ -60,6 +61,13 @@ .Fa "size_t nbyte" .Fc .Ft ssize_t +.Fo preadv +.Fa "int d" +.Fa "const struct iovec *iov" +.Fa "int iovcnt" +.Fa "off_t offset" +.Fc +.Ft ssize_t .Fo readv .Fa "int d" .Fa "const struct iovec *iov" @@ -81,12 +89,16 @@ buffers specified by the members of the .Fa iov array: iov[0], iov[1], ..., iov[iovcnt\|\-\|1]. .Fn pread -performs the same function, -but reads from the specified position in the file +and +.Fn preadv +perform the same functions, +but read from the specified position in the file without modifying the file pointer. .Pp For -.Fn readv , +.Fn readv +and +.Fn preadv , the .Fa iovec structure is defined as: @@ -103,6 +115,8 @@ Each entry specifies the base address and length of an area in memory where data should be placed. .Fn readv +and +.Fn preadv will always fill an area completely before proceeding to the next. .Pp @@ -124,8 +138,9 @@ object is undefined. Upon successful completion, .Fn read , .Fn readv , +.Fn pread , and -.Fn pread +.Fn preadv return the number of bytes actually read and placed in the buffer. The system guarantees to read the number of bytes requested if the descriptor references a normal file that has that many bytes left @@ -140,10 +155,11 @@ Otherwise, a -1 is returned and the global variable is set to indicate the error. .Sh ERRORS The -.Fn pread , .Fn read , +.Fn readv , +.Fn pread , and -.Fn readv +.Fn preadv calls will succeed unless: .Bl -tag -width Er @@ -201,7 +217,7 @@ An attempt to allocate a memory buffer fails. Insufficient memory is available. .\" =========== .It Bq Er ENXIO -An action is requested of a device that does not exist.. +An action is requested of a device that does not exist. .\" =========== .It Bq Er ENXIO A requested action cannot be performed by the device. @@ -216,9 +232,10 @@ volume (see .Xr mount_nfs 8 ) . .El .Pp -The .Fn pread -call may also return the following errors: +and +.Fn preadv +calls may also return the following errors: .Bl -tag -width Er .\" =========== .It Bq Er EINVAL @@ -247,7 +264,9 @@ during a read attempt on a socket. .Pp The .Fn readv -call may also return one of the following errors: +and +.Fn preadv +calls may also return one of the following errors: .Bl -tag -width Er .\" =========== .It Bq Er EFAULT @@ -303,6 +322,8 @@ and .Fn pread functions are expected to conform to .St -xpg4.2 . +.Fn preadv +is nonstandard. .Sh HISTORY The .Fn pread diff --git a/bsd/man/man2/select.2 b/bsd/man/man2/select.2 index fd0833dc1..b6b56c034 100644 --- a/bsd/man/man2/select.2 +++ b/bsd/man/man2/select.2 @@ -140,13 +140,19 @@ to the maximum number of descriptors supported by the system. .Pp If .Fa timeout -is a non-nil pointer, it specifies a maximum interval to wait for the -selection to complete. If +is not a null pointer, it specifies a maximum interval to wait for the +selection to complete. +.Pp +If .Fa timeout -is a nil pointer, the select blocks indefinitely. To effect a poll, the +is a null pointer, the select blocks indefinitely. +.Pp +To effect a poll, the +.Fa timeout +argument should be not be a null pointer, +but it should point to a zero-valued timeval structure. +.Pp .Fa timeout -argument should be non-nil, pointing to a zero-valued timeval structure. -.Fa Timeout is not changed by .Fn select , and may be reused on subsequent calls, however it is good style to re-initialize @@ -158,7 +164,7 @@ Any of .Fa writefds , and .Fa errorfds -may be given as nil pointers if no descriptors are of interest. +may be given as null pointers if no descriptors are of interest. .Sh RETURN VALUES .Fn select returns the number of ready descriptors that are contained in diff --git a/bsd/man/man2/socket.2 b/bsd/man/man2/socket.2 index 4815d2e94..e4f5056f3 100644 --- a/bsd/man/man2/socket.2 +++ b/bsd/man/man2/socket.2 @@ -68,7 +68,8 @@ PF_ROUTE Internal Routing protocol, PF_KEY Internal key-management function, PF_INET6 Internet version 6 protocols, PF_SYSTEM System domain, -PF_NDRV Raw access to network device +PF_NDRV Raw access to network device, +PF_VSOCK VM Sockets protocols .Ed .Pp The socket has the indicated diff --git a/bsd/man/man2/write.2 b/bsd/man/man2/write.2 index 5d5ebcd8b..c79a07a74 100644 --- a/bsd/man/man2/write.2 +++ b/bsd/man/man2/write.2 @@ -38,6 +38,7 @@ .Sh NAME .Nm pwrite , .Nm write , +.Nm pwritev , .Nm writev .Nd write output .Sh LIBRARY @@ -64,6 +65,13 @@ .Fa "const struct iovec *iov" .Fa "int iovcnt" .Fc +.Ft ssize_t +.Fo pwritev +.Fa "int fildes" +.Fa "const struct iovec *iov" +.Fa "int iovcnt" +.Fa "off_t offset" +.Fc .Sh DESCRIPTION .Fn write attempts to write @@ -80,11 +88,15 @@ buffers specified by the members of the .Fa iov array: iov[0], iov[1], ..., iov[iovcnt\|-\|1]. .Fn pwrite -performs the same function, but writes to the specified position in +and +.Fn pwritev +perform the same functions, but write to the specified position in the file without modifying the file pointer. .Pp For -.Fn writev , +.Fn writev +and +.Fn pwritev , the .Fa iovec structure is defined as: @@ -101,6 +113,8 @@ Each entry specifies the base address and length of an area in memory from which data should be written. .Fn writev +and +.Fn pwritev will always write a complete area before proceeding to the next. .Pp @@ -146,8 +160,9 @@ is set to indicate the error. The .Fn write , .Fn writev , +.Fn pwrite , and -.Fn pwrite +.Fn pwritev system calls will fail and the file pointer will remain unchanged if: .Bl -tag -width Er .\" =========== @@ -259,6 +274,17 @@ or .Xr connectx 2 had been used to set a destination address. .\" =========== +.It Bq Er ENOBUFS +The mbuf pool has been completely exhausted when writing to a socket. +.El +.Pp +The +.Fn writev +and +.Fn pwritev +calls may also return the following errors: +.Bl -tag -width Er +.\" =========== .It Bq Er EINVAL .Fa Iovcnt is less than or equal to 0, or greater than @@ -277,14 +303,13 @@ The sum of the values in the .Fa iov array overflows a 32-bit integer. -.\" =========== -.It Bq Er ENOBUFS -The mbuf pool has been completely exhausted when writing to a socket. .El .Pp The .Fn pwrite -call may also return the following errors: +and +.Fn pwritev +calls may also return the following errors: .Bl -tag -width Er .\" =========== .It Bq Er EINVAL @@ -317,6 +342,8 @@ and .Fn pwrite functions are expected to conform to .St -xpg4.2 . +.Fn pwritev +is nonstandard. .Sh HISTORY The .Fn pwrite diff --git a/bsd/man/man3/Makefile b/bsd/man/man3/Makefile index d9eeb14aa..21c65aeaa 100644 --- a/bsd/man/man3/Makefile +++ b/bsd/man/man3/Makefile @@ -11,6 +11,7 @@ DATAFILES = \ posix_spawn_file_actions_addclose.3 \ posix_spawn_file_actions_init.3 \ posix_spawnattr_init.3 \ + posix_spawnattr_setarchpref_np.3 \ posix_spawnattr_setbinpref_np.3 \ posix_spawnattr_setflags.3 \ posix_spawnattr_setpgroup.3 \ @@ -52,6 +53,7 @@ INSTALL_MAN_LINKS = \ posix_spawn_file_actions_addclose.3 posix_spawn_file_actions_addfchdir_np.3 \ posix_spawn_file_actions_init.3 posix_spawn_file_actions_destroy.3 \ posix_spawnattr_init.3 posix_spawnattr_destroy.3 \ + posix_spawnattr_setarchpref_np.3 posix_spawnattr_getarchpref_np.3 \ posix_spawnattr_setbinpref_np.3 posix_spawnattr_getbinpref_np.3 \ posix_spawnattr_setflags.3 posix_spawnattr_getflags.3 \ posix_spawnattr_setpgroup.3 posix_spawnattr_getpgroup.3 \ diff --git a/bsd/man/man3/posix_spawnattr_init.3 b/bsd/man/man3/posix_spawnattr_init.3 index 27759a591..324195597 100644 --- a/bsd/man/man3/posix_spawnattr_init.3 +++ b/bsd/man/man3/posix_spawnattr_init.3 @@ -62,6 +62,7 @@ specified by calls to .Xr posix_spawnattr_setpgroup 3 , .Xr posix_spawnattr_setsigdefault 3 , .Xr posix_spawnattr_setsigmask 3 , +.Xr posix_spawnattr_setarchpref_np 3 , .Xr posix_spawnattr_setbinpref_np 3 , .Xr posix_spawnattr_setspecialport_np 3 , or diff --git a/bsd/man/man3/posix_spawnattr_setarchpref_np.3 b/bsd/man/man3/posix_spawnattr_setarchpref_np.3 new file mode 100644 index 000000000..e9039a3e9 --- /dev/null +++ b/bsd/man/man3/posix_spawnattr_setarchpref_np.3 @@ -0,0 +1,185 @@ +.\" +.\" Copyright (c) 2020 Apple Inc. All rights reserved. +.\" +.\" @APPLE_OSREFERENCE_LICENSE_HEADER_START@ +.\" +.\" This file contains Original Code and/or Modifications of Original Code +.\" as defined in and that are subject to the Apple Public Source License +.\" Version 2.0 (the 'License'). You may not use this file except in +.\" compliance with the License. The rights granted to you under the License +.\" may not be used to create, or enable the creation or redistribution of, +.\" unlawful or unlicensed copies of an Apple operating system, or to +.\" circumvent, violate, or enable the circumvention or violation of, any +.\" terms of an Apple operating system software license agreement. +.\" +.\" Please obtain a copy of the License at +.\" http://www.opensource.apple.com/apsl/ and read it before using this file. +.\" +.\" The Original Code and all software distributed under the License are +.\" distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +.\" FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +.\" Please see the License for the specific language governing rights and +.\" limitations under the License. +.\" +.\" @APPLE_OSREFERENCE_LICENSE_HEADER_END@ +.\" +.\" @(#)posix_spawnattr_setarchpref_np.3 +. +.Dd May 8, 2020 +.Dt POSIX_SPAWNATTR_SETARCHPREF_NP 3 +.Os "Mac OS X" +.Sh NAME +.Nm posix_spawnattr_setarchpref_np +.Nm posix_spawnattr_getarchpref_np +.Nd set or get the +.Em cpu/subcpu preference +attribute on a +.Em posix_spawnattr_t +.Sh SYNOPSIS +.Fd #include +.Ft int +.Fo posix_spawnattr_setarchpref_np +.Fa "posix_spawnattr_t *restrict attr" +.Fa "size_t count" +.Fa "cpu_type_t *pref" +.Fa "cpu_subtype_t *subpref" +.Fa "size_t *restrict ocount" +.Fc +.Ft int +.Fo posix_spawnattr_getarchpref_np +.Fa "const posix_spawnattr_t *restrict attr" +.Fa "size_t count" +.Fa "cpu_type_t *pref" +.Fa "cpu_subtype_t *subpref" +.Fa "size_t * restrict ocount" +.Fc +.Sh IMPORTANT +These functions represent an Apple extension to +.Xr posix_spawn 2 +and +.Xr posix_spawnp 2 , +and as such should not be used by programs intending their code to be +portable to other platforms. +.Sh DESCRIPTION +The +.Fn posix_spawnattr_setarchpref_np +function sets the precise universal binary preferences for the spawn attribute +value referenced by +.Fa attr +from the memory containing the +.Em cpu_type_t +referenced by +.Fa pref +and the +.Em cpu_subtype_t +referenced by +.Fa subpref +with a size of +.Fa count +elements; the actual number of elements that are set in the attribute +is returned in +.Fa ocount . +.Pp +When +.Xr spawn 2 +or +.Xr spawnp 2 +is subsequently invoked on a Universal binary with the +.Em posix_spawnattr_t , +the elements which were set will be used, in the order they were set, +to select the first element in the list which matches any +.Em cpu_type_t +and +.Em cpu_subtype_t +of those available in the Universal binary. If there is no match, then +the attempt to create the child process will fail with the error +EBADARCH. +If the +.Em cpu_type_t +.Em CPU_TYPE_ANY +and +.Em cpu_subtype_t +.Em CPU_SUBTYPE_ANY +are the last pair in the list, then rather than returning +EBADARCH +on no match, the system will instead fall back to the standard Universal +binary grading preference order. Using +.Em CPU_SUBTYPE_ANY +as a +.Em cpu_subtype_t +for any +.Em cpu_type_t +value will select the best slice for that specific +.Em cpu_type_t , +similar to using +.Fn posix_spawnattr_setbinpref_np . +If called multiple times on the same +.Em attr , +the previous preferences will be overwritten. +.Pp +The +.Fn posix_spawnattr_getarchpref_np +function gets the precise universal binary preferences for the spawn attribute +value referenced by +.Fa attr +(set by a prior call to +.Fn posix_spawnattr_setbinpref_np +or +.Fn posix_spawnattr_setarchpref_np ) +into the memory +containing the +.Em cpu_type_t +referenced by +.Fa pref +and the +.Em cpu_subtype_t +referenced by +.Fa subpref +with a prereserved size of +.Fa count +elements; the actual number of elements that are copied from the attribute +is returned in +.Fa ocount . +.Pp +.Sh RETURN VALUES +On success, these functions return 0; on failure they return an error +number from +.In errno.h +and modify the value of +.Fa ocount . +Additionally, if successful, +.Fn posix_spawnattr_getarchpref_np +will modify the contents of the +.Fa pref +array with the current attribute values. +.Sh ERRORS +These functions may fail if: +.Bl -tag -width Er +.\" ========== +.It Bq Er EINVAL +The value specified by +.Fa attr +is invalid. +.\" ========== +.It Bq Er EINVAL +The value of +.Fa attr +is invalid. +.El +.Sh SEE ALSO +.Xr posix_spawn 2 , +.Xr posix_spawnp 2 , +.Xr posix_spawnattr_init 3 , +.Xr posix_spawnattr_destroy 3 , +.Xr posix_spawnattr_setbinpref_np 3 , +.Xr posix_spawnattr_setflags 3 +.Sh STANDARDS +Nonstandard +.Sh HISTORY +The +.Fn posix_spawnattr_setarchpref_np +and +.Fn posix_spawnattr_getarchpref_np +function calls appeared in macOS 10.16 diff --git a/bsd/man/man3/posix_spawnattr_setbinpref_np.3 b/bsd/man/man3/posix_spawnattr_setbinpref_np.3 index 5ddbd6582..9a6811518 100644 --- a/bsd/man/man3/posix_spawnattr_setbinpref_np.3 +++ b/bsd/man/man3/posix_spawnattr_setbinpref_np.3 @@ -27,7 +27,7 @@ .\" .\" @(#)posix_spawnattr_setbinpref_np.3 . -.Dd August 22, 2007 +.Dd May 8, 2020 .Dt POSIX_SPAWNATTR_SETBINPREF_NP 3 .Os "Mac OS X" .Sh NAME @@ -85,7 +85,10 @@ is subsequently invoked on a Universal binary with the the elements which were set will be used, in the order they were set, to select the first element in the list which matches any .Em cpu_type_t -of those available in the Universal binary. If there is no match, then +of those available in the Universal binary, using the best slice matching +that +.Em cpu_type_t . +If there is no match, then the attempt to create the child process will fail with the error EBADARCH. If the @@ -95,15 +98,22 @@ is the last element in the list, then rather than returning EBADARCH on no match, the system will instead fall back to the standard Universal binary grading preference order. +If called multiple times on the same +.Em attr , +the previous preferences will be overwritten. .Pp The .Fn posix_spawnattr_getbinpref_np function gets the universal binary preferences for the spawn attribute value referenced by .Fa attr +(set by a prior call to +.Fn posix_spawnattr_setbinpref_np +or +.Fn posix_spawnattr_setarchpref_np ) into the memory containing the .Em cpu_type_t -referenced by +referenced by .Fa pref with a prereserved size of .Fa count @@ -141,6 +151,7 @@ is invalid. .Xr posix_spawnp 2 , .Xr posix_spawnattr_init 3 , .Xr posix_spawnattr_destroy 3 , +.Xr posix_spawnattr_setarchpref_np 3 .Xr posix_spawnattr_setflags 3 .Sh STANDARDS Nonstandard diff --git a/bsd/man/man4/Makefile b/bsd/man/man4/Makefile index 18dca587c..53e23cc46 100644 --- a/bsd/man/man4/Makefile +++ b/bsd/man/man4/Makefile @@ -12,8 +12,7 @@ DATAFILES = \ audit.4 \ auditpipe.4 \ bpf.4 \ - divert.4 \ - dummynet.4 \ + dummynet.4 \ fd.4 \ gif.4 \ icmp.4 \ @@ -42,6 +41,7 @@ DATAFILES = \ udp.4 \ unix.4 \ urandom.4 \ + vsock.4 \ INSTALL_MAN_LIST = ${DATAFILES} diff --git a/bsd/man/man4/divert.4 b/bsd/man/man4/divert.4 deleted file mode 100644 index 33922c4c3..000000000 --- a/bsd/man/man4/divert.4 +++ /dev/null @@ -1,168 +0,0 @@ -.\" $FreeBSD: src/share/man/man4/divert.4,v 1.15.2.5 2001/08/17 13:08:37 ru Exp $ -.\" -.Dd June 18, 1996 -.Dt DIVERT 4 -.Os -.Sh NAME -.Nm divert -.Nd kernel packet diversion mechanism -.Sh SYNOPSIS -.Fd #include -.Fd #include -.Fd #include -.Ft int -.Fn socket PF_INET SOCK_RAW IPPROTO_DIVERT -.Sh DESCRIPTION -Divert sockets are similar to raw IP sockets, except that they -can be bound to a specific -.Nm -port via the -.Xr bind 2 -system call. -The IP address in the bind is ignored; only the port -number is significant. -A divert socket bound to a divert port will receive all packets diverted -to that port by some (here unspecified) kernel mechanism(s). -Packets may also be written to a divert port, in which case they -re-enter kernel IP packet processing. -.Pp -Divert sockets are normally used in conjunction with -.Fx Ns 's -packet filtering implementation and the -.Xr ipfw 8 -program. -By reading from and writing to a divert socket, matching packets -can be passed through an arbitrary ``filter'' as they travel through -the host machine, special routing tricks can be done, etc. -.Sh READING PACKETS -Packets are diverted either as they are ``incoming'' or ``outgoing.'' -Incoming packets are diverted after reception on an IP interface, -whereas outgoing packets are diverted before next hop forwarding. -.Pp -Diverted packets may be read unaltered via -.Xr read 2 , -.Xr recv 2 , -or -.Xr recvfrom 2 . -In the latter case, the address returned will have its port set to -the some tag supplied by the packet diverter, (usually the ipfw rule number) -and the IP address set to the (first) address of -the interface on which the packet was received (if the packet -was incoming) or -.Dv INADDR_ANY -(if the packet was outgoing). In the case of an incoming packet the interface -name will also be placed in the 8 bytes following the address, -(assuming it fits). -.Sh WRITING PACKETS -Writing to a divert socket is similar to writing to a raw IP socket; -the packet is injected ``as is'' into the normal kernel IP packet -processing and minimal error checking is done. -Packets are written as either incoming or outgoing: -if -.Xr write 2 -or -.Xr send 2 -is used to deliver the packet, or if -.Xr sendto 2 -is used with a destination IP address of -.Dv INADDR_ANY , -then the packet is treated as if it were outgoing, i.e., destined -for a non-local address. Otherwise, the packet is assumed to be -incoming and full packet routing is done. -.Pp -In the latter case, the -IP address specified must match the address of some local interface, -or an interface name -must be found after the IP address. -If an interface name is found, -that interface will be used and the value of the IP address will be -ignored (other than the fact that it is not -.Dv INADDR_ANY ) . -This is to indicate on which interface the packet ``arrived.'' -.Pp -Normally, packets read as incoming should be written as incoming; -similarly for outgoing packets. When reading and then writing back -packets, passing the same socket address supplied by -.Xr recvfrom 2 -unmodified to -.Xr sendto 2 -simplifies things (see below). -.Pp -The port part of the socket address passed to the -.Xr sendto 2 -contains a tag that should be meaningful to the diversion module. -In the -case of -.Xr ipfw 8 -the tag is interpreted as the rule number -.Em after which -rule processing should restart. -.Sh LOOP AVOIDANCE -Packets written into a divert socket -(using -.Xr sendto 2 ) -re-enter the packet filter at the rule number -following the tag given in the port part of the socket address, which -is usually already set at the rule number that caused the diversion -(not the next rule if there are several at the same number). If the 'tag' -is altered to indicate an alternative re-entry point, care should be taken -to avoid loops, where the same packet is diverted more than once at the -same rule. -.Sh DETAILS -To enable divert sockets, your kernel must be compiled with the option -.Dv IPDIVERT . -.Pp -If a packet is diverted but no socket is bound to the -port, or if -.Dv IPDIVERT -is not enabled in the kernel, the packet is dropped. -.Pp -Incoming packet fragments which get diverted are fully reassembled -before delivery; the diversion of any one fragment causes the entire -packet to get diverted. -If different fragments divert to different ports, -then which port ultimately gets chosen is unpredictable. -.Pp -Packets are received and sent unchanged, except that -packets read as outgoing have invalid IP header checksums, and -packets written as outgoing have their IP header checksums overwritten -with the correct value. -Packets written as incoming and having incorrect checksums will be dropped. -Otherwise, all header fields are unchanged (and therefore in network order). -.Pp -Binding to port numbers less than 1024 requires super-user access, as does -creating a socket of type SOCK_RAW. -.Sh ERRORS -Writing to a divert socket can return these errors, along with -the usual errors possible when writing raw packets: -.Bl -tag -width Er -.It Bq Er EINVAL -The packet had an invalid header, or the IP options in the packet -and the socket options set were incompatible. -.It Bq Er EADDRNOTAVAIL -The destination address contained an IP address not equal to -.Dv INADDR_ANY -that was not associated with any interface. -.El -.Sh SEE ALSO -.Xr bind 2 , -.Xr recvfrom 2 , -.Xr sendto 2 , -.Xr socket 2 , -.Xr ipfw 8 -.Sh BUGS -This is an attempt to provide a clean way for user mode processes -to implement various IP tricks like address translation, but it -could be cleaner, and it's too dependent on -.Xr ipfw 8 . -.Pp -It's questionable whether incoming fragments should be reassembled -before being diverted. -For example, if only some fragments of a -packet destined for another machine don't get routed through the -local machine, the packet is lost. -This should probably be -a settable socket option in any case. -.Sh AUTHORS -.An Archie Cobbs Aq archie@FreeBSD.org , -Whistle Communications Corp. diff --git a/bsd/man/man4/inet6.4 b/bsd/man/man4/inet6.4 index 74fe8baf8..37853700a 100644 --- a/bsd/man/man4/inet6.4 +++ b/bsd/man/man4/inet6.4 @@ -500,7 +500,6 @@ sockets. .Xr intro 4 , .Xr ip6 4 , .Xr tcp 4 , -.Xr ttcp 4 , .Xr udp 4 .Sh STANDARDS .Rs diff --git a/bsd/man/man4/netintro.4 b/bsd/man/man4/netintro.4 index 050aea324..c9a227fdd 100644 --- a/bsd/man/man4/netintro.4 +++ b/bsd/man/man4/netintro.4 @@ -151,6 +151,7 @@ are known to the system #define AF_CCITT 10 /* CCITT protocols, X.25 etc */ #define AF_HYLINK 15 /* NSC Hyperchannel */ #define AF_ISO 18 /* ISO protocols */ +#define AF_VSOCK 40 /* VM Sockets */ .Ed .Sh ROUTING Mac OS X provides some packet routing facilities. @@ -248,7 +249,7 @@ The metric is used only by user-level routers. Get interface metric. .El .Pp -There are two requests that make use of a new structure: +There are requests that make use of a different structure: .Bl -tag -width SIOCGIFBRDADDR .It Dv SIOCAIFADDR An interface may have more than one address associated with it @@ -270,13 +271,20 @@ identifier itself to include the total size, as described in .Xr ioctl . .It Dv SIOCDIFADDR This requests deletes the specified address from the list -associated with an interface. It also uses the -.Ar if_aliasreq -structure to allow for the possibility of protocols allowing -multiple masks or destination addresses, and also adopts the -convention that specification of the default address means -to delete the first address for the interface belonging to -the address family in which the original socket was opened. +associated with an interface. It uses the +.Ar ifreq +structure for protocols like +.Dv PF_INET +whose +address fit in the +.Ar sockaddr +structure. +Protocols like +.Dv PF_INET6 +with addresses that do not fit in the +.Ar sockaddr +structure +must have a variant of that request. .It Dv SIOCGIFCONF Get interface configuration list. This request takes an .Ar ifconf diff --git a/bsd/man/man4/vsock.4 b/bsd/man/man4/vsock.4 new file mode 100644 index 000000000..8a996b369 --- /dev/null +++ b/bsd/man/man4/vsock.4 @@ -0,0 +1,205 @@ +.\" +.\" Copyright (c) 2020 Apple Inc. All rights reserved. +.\" +.\" @APPLE_OSREFERENCE_LICENSE_HEADER_START@ +.\" +.\" This file contains Original Code and/or Modifications of Original Code +.\" as defined in and that are subject to the Apple Public Source License +.\" Version 2.0 (the 'License'). You may not use this file except in +.\" compliance with the License. The rights granted to you under the License +.\" may not be used to create, or enable the creation or redistribution of, +.\" unlawful or unlicensed copies of an Apple operating system, or to +.\" circumvent, violate, or enable the circumvention or violation of, any +.\" terms of an Apple operating system software license agreement. +.\" +.\" Please obtain a copy of the License at +.\" http://www.opensource.apple.com/apsl/ and read it before using this file. +.\" +.\" The Original Code and all software distributed under the License are +.\" distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +.\" FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +.\" Please see the License for the specific language governing rights and +.\" limitations under the License. +.\" +.\" @APPLE_OSREFERENCE_LICENSE_HEADER_END@ +.\" +.\" @(#)vsock.4 7/9/2020 +.\" +.Dd July 9, 2020 +.Dt VSOCK 4 +.Os macOS +.Sh NAME +.Nm vsock +.Nd VM Sockets +.Sh SYNOPSIS +.In sys/socket.h +.In sys/vsock.h +.Ft int +.Fn socket AF_VSOCK SOCK_STREAM 0 +.Sh DESCRIPTION +The +.Tn vsock +protocol allows for socket communication between a virtual machine and its host. Socket connections may be established using standard socket interfaces. Currently, only stream connections from a guest are supported using this protocol. +.Pp +.Ss "Non-blocking connect" +When a +.Tn vsock +socket is set non-blocking, and the connection cannot be established immediately, +.Xr connect 2 +returns with the error +.Dv EINPROGRESS , +and the connection is established asynchronously. +.Pp +When the asynchronous connection completes successfully, +.Xr select 2 +or +.Xr poll 2 +or +.Xr kqueue 2 +will indicate the file descriptor is ready for writing. +If the connection encounters an error, the file descriptor +is marked ready for both reading and writing, and the pending error +can be retrieved via the socket option +.Dv SO_ERROR . +.Pp +Note that even if the socket is non-blocking, it is possible for the connection +to be established immediately. In that case +.Xr connect 2 +does not return with +.Dv EINPROGRESS . +.Sh ADDRESSING +Sockets bound to the vsock protocol family utilize +the following addressing structure which can be found in the header +.Aq Pa sys/vsock.h . +.Bd -literal -offset indent +struct sockaddr_vm { + uint8_t svm_len; + sa_family_t svm_family; + uint16_t svm_reserved1; + uint32_t svm_port; + uint32_t svm_cid; +}; +.Ed +.Pp +Addresses consist of a cid and a port. +The field +.Ar svm_len +contains the total length of the structure, while the field +.Ar svm_family +contains the value +.Fa AF_VSOCK . +The field +.Fa svm_reserved1 +is reserved and should be set to zero. +.Pp +Sockets may be created with the local address +.Dv VMADDR_CID_ANY +to effect +.Dq wildcard +matching on incoming messages. +The address in a +.Xr connect 2 +call may be given as +.Dv VMADDR_CID_ANY +to mean +.Dq this host . +The cid addresses +.Dv VMADDR_CID_HYPERVISOR +or +.Dv VMADDR_CID_HOST +may be used to +.Xr connect 2 +or +.Xr bind 2 +to the hypervisor or host respectively. +.Dv VMADDR_PORT_ANY +may be used to obtain the next available free port when calling +.Xr bind 2 . +.Ss CID Constants +.Bl -tag -width ".Dv VMADDR_CID_HYPERVISOR" +.It Dv VMADDR_CID_ANY +Wildcard matches any address. +.It Dv VMADDR_CID_HYPERVISOR +The address of the hypervisor. +.It Dv VMADDR_CID_HOST +The address of the host. +.El +.Ss Port Constants +.Bl -tag -width ".Dv VMADDR_CID_HYPERVISOR" +.It Dv VMADDR_PORT_ANY +Wildcard matches any port. +.El +.Sh ERRORS +A +.Tn vsock +socket operation may fail with a general socket error or one of the following +.Tn vsock +specific errors: +.Bl -tag -width ".It Bq Er EADDRNOTAVAIL" +.It Bq Er EACCES +returned by +.Xr bind 2 +when attempting to bind to a privileged port; +.It Bq Er EADDRINUSE +returned by +.Xr bind 2 +when attempting to bind to a cid and port that is already in use; +.It Bq Er EADDRNOTAVAIL +returned by +.Xr bind 2 +when attempting to bind to an invalid cid or port; +.It Bq Er EFAULT +returned by +.Xr connect 2 +when attempting to connect to an invalid cid; +.It Bq Er EINPROGRESS +returned by +.Xr connect 2 +when attempting to connect using a non-blocking socket; +.It Bq Er EINVAL +when passing an invalid parameter; +.It Bq Er ENODEV +when a vsock transport does not exist; +.It Bq Er ENOTCONN +when performing an operation on a non-connected socket; +.It Bq Er ETIMEDOUT +returned by +.Xr connect 2 +when a connection attempt has timed out; +.It Bq Er EWOULDBLOCK +returned by +.Xr send 2 +or +.Xr recv 2 +when sending or receiving using a non-blocking socket. +.El +.Sh IOCTLS +The +.Xr ioctl 2 +command codes below are defined in +.Aq Pa sys/vsock.h . +All commands require +these includes: +.Bd -literal + #include + #include +.Ed +.Pp +The third argument to +.Xr ioctl 2 +should be a pointer to the type indicated in parenthesis. +.Bl -tag -width IOCTL_VM_SOCKETS_GET_LOCAL_CID +.It Dv IOCTL_VM_SOCKETS_GET_LOCAL_CID +.Pq Li uint32_t +Returns the local cid of this socket's transport. +.El +.Sh SEE ALSO +.Xr bind 2 , +.Xr connect 2 , +.Xr ioctl 2 , +.Xr kqueue 2 , +.Xr poll 2 , +.Xr select 2 , +.Xr socket 2 diff --git a/bsd/miscfs/Makefile b/bsd/miscfs/Makefile index fccfd3a14..a7534e2c8 100644 --- a/bsd/miscfs/Makefile +++ b/bsd/miscfs/Makefile @@ -12,7 +12,8 @@ INSTINC_SUBDIRS = \ routefs \ specfs \ union \ - nullfs + nullfs \ + bindfs EXPINC_SUBDIRS = \ devfs \ @@ -20,7 +21,8 @@ EXPINC_SUBDIRS = \ routefs \ specfs \ union \ - nullfs + nullfs \ + bindfs include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/bsd/miscfs/bindfs/Makefile b/bsd/miscfs/bindfs/Makefile new file mode 100644 index 000000000..e365abd48 --- /dev/null +++ b/bsd/miscfs/bindfs/Makefile @@ -0,0 +1,22 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = + +PRIVATE_KERNELFILES = bindfs.h + +INSTALL_MI_LIST = + +INSTALL_MI_DIR = miscfs/bindfs + +EXPORT_MI_LIST = + +EXPORT_MI_DIR = miscfs/bindfs + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/bsd/miscfs/bindfs/bind_subr.c b/bsd/miscfs/bindfs/bind_subr.c new file mode 100644 index 000000000..2c82cbcc8 --- /dev/null +++ b/bsd/miscfs/bindfs/bind_subr.c @@ -0,0 +1,411 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*- + * Portions Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)null_subr.c 8.7 (Berkeley) 5/14/95 + * + * $FreeBSD$ + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bindfs.h" + +/* + * Null layer cache: + * Each cache entry holds a reference to the lower vnode + * along with a pointer to the alias vnode. When an + * entry is added the lower vnode is VREF'd. When the + * alias is removed the lower vnode is vrele'd. + */ + +#define BIND_HASH_SIZE (desiredvnodes / 10) + +/* xnu doesn't really have the functionality freebsd uses here..gonna try this + * hacked hash...*/ +#define BIND_NHASH(vp) (&bind_node_hashtbl[((((uintptr_t)vp) >> vnsz2log) + (uintptr_t)vnode_mount(vp)) & bind_hash_mask]) + +static LIST_HEAD(bind_node_hashhead, bind_node) * bind_node_hashtbl; +static lck_mtx_t bind_hashmtx; +static lck_attr_t * bind_hashlck_attr; +static lck_grp_t * bind_hashlck_grp; +static lck_grp_attr_t * bind_hashlck_grp_attr; +static u_long bind_hash_mask; + +/* xnu doesn't have hashes built into vnodes. This mimics what freebsd does + * 9 is an eyeball of the log 2 size of vnode */ +static int vnsz2log = 9; + +static int bind_hashins(struct mount *, struct bind_node *, struct vnode **); + +int +bindfs_init_lck(lck_mtx_t * lck) +{ + int error = 1; + if (lck && bind_hashlck_grp && bind_hashlck_attr) { + lck_mtx_init(lck, bind_hashlck_grp, bind_hashlck_attr); + error = 0; + } + return error; +} + +int +bindfs_destroy_lck(lck_mtx_t * lck) +{ + int error = 1; + if (lck && bind_hashlck_grp) { + lck_mtx_destroy(lck, bind_hashlck_grp); + error = 0; + } + return error; +} + +/* + * Initialise cache headers + */ +int +bindfs_init(__unused struct vfsconf * vfsp) +{ + BINDFSDEBUG("%s\n", __FUNCTION__); + + /* assuming for now that this happens immediately and by default after fs + * installation */ + bind_hashlck_grp_attr = lck_grp_attr_alloc_init(); + if (bind_hashlck_grp_attr == NULL) { + goto error; + } + bind_hashlck_grp = lck_grp_alloc_init("com.apple.filesystems.bindfs", bind_hashlck_grp_attr); + if (bind_hashlck_grp == NULL) { + goto error; + } + bind_hashlck_attr = lck_attr_alloc_init(); + if (bind_hashlck_attr == NULL) { + goto error; + } + + bind_node_hashtbl = hashinit(BIND_HASH_SIZE, M_TEMP, &bind_hash_mask); + if (bind_node_hashtbl == NULL) { + goto error; + } + lck_mtx_init(&bind_hashmtx, bind_hashlck_grp, bind_hashlck_attr); + + BINDFSDEBUG("%s finished\n", __FUNCTION__); + return 0; +error: + printf("BINDFS: failed to initialize globals\n"); + if (bind_hashlck_grp_attr) { + lck_grp_attr_free(bind_hashlck_grp_attr); + bind_hashlck_grp_attr = NULL; + } + if (bind_hashlck_grp) { + lck_grp_free(bind_hashlck_grp); + bind_hashlck_grp = NULL; + } + if (bind_hashlck_attr) { + lck_attr_free(bind_hashlck_attr); + bind_hashlck_attr = NULL; + } + return KERN_FAILURE; +} + +int +bindfs_destroy(void) +{ + /* This gets called when the fs is uninstalled, there wasn't an exact + * equivalent in vfsops */ + lck_mtx_destroy(&bind_hashmtx, bind_hashlck_grp); + hashdestroy(bind_node_hashtbl, M_TEMP, bind_hash_mask); + if (bind_hashlck_grp_attr) { + lck_grp_attr_free(bind_hashlck_grp_attr); + bind_hashlck_grp_attr = NULL; + } + if (bind_hashlck_grp) { + lck_grp_free(bind_hashlck_grp); + bind_hashlck_grp = NULL; + } + if (bind_hashlck_attr) { + lck_attr_free(bind_hashlck_attr); + bind_hashlck_attr = NULL; + } + return 0; +} + +/* + * Find the bindfs vnode mapped to lowervp. Return it in *vpp with an iocount if found. + * Return 0 on success. On failure *vpp will be NULL and a non-zero error code will be returned. + */ +int +bind_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp) +{ + struct bind_node_hashhead * hd; + struct bind_node * a; + struct vnode * vp = NULL; + int error = ENOENT; + + /* + * Find hash base, and then search the (two-way) linked + * list looking for a bind_node structure which is referencing + * the lower vnode. We only give up our reference at reclaim so + * just check whether the lowervp has gotten pulled from under us + */ + hd = BIND_NHASH(lowervp); + lck_mtx_lock(&bind_hashmtx); + LIST_FOREACH(a, hd, bind_hash) + { + if (a->bind_lowervp == lowervp && vnode_mount(BINDTOV(a)) == mp) { + vp = BINDTOV(a); + if (a->bind_lowervid != vnode_vid(lowervp)) { + /*lowervp has reved */ + error = EIO; + vp = NULL; + } + break; + } + } + lck_mtx_unlock(&bind_hashmtx); + + if (vp != NULL) { + error = vnode_getwithvid(vp, a->bind_myvid); + if (error == 0) { + *vpp = vp; + } + } + return error; +} + +/* + * Act like bind_hashget, but add passed bind_node to hash if no existing + * node found. + * If we find a vnode in the hash table it is returned via vpp. If we don't + * find a hit in the table, then vpp is NULL on return and xp is added to the table. + * 0 is returned if a hash table hit occurs or if we insert the bind_node. + * EIO is returned if we found a hash table hit but the lower vnode was recycled. + */ +static int +bind_hashins(struct mount * mp, struct bind_node * xp, struct vnode ** vpp) +{ + struct bind_node_hashhead * hd; + struct bind_node * oxp; + struct vnode * ovp = NULL; + int error = 0; + + hd = BIND_NHASH(xp->bind_lowervp); + lck_mtx_lock(&bind_hashmtx); + LIST_FOREACH(oxp, hd, bind_hash) + { + if (oxp->bind_lowervp == xp->bind_lowervp && vnode_mount(BINDTOV(oxp)) == mp) { + ovp = BINDTOV(oxp); + if (oxp->bind_lowervid != vnode_vid(oxp->bind_lowervp)) { + /* vp doesn't exist so return null (not sure we are actually gonna catch + * recycle right now + * This is an exceptional case right now, it suggests the vnode we are + * trying to add has been recycled + * don't add it.*/ + error = EIO; + ovp = NULL; + } + goto end; + } + } + /* if it wasn't in the hash map then the vnode pointed to by xp already has a + * iocount so don't get another. */ + LIST_INSERT_HEAD(hd, xp, bind_hash); + xp->bind_flags |= BIND_FLAG_HASHED; +end: + lck_mtx_unlock(&bind_hashmtx); + if (ovp != NULL) { + /* if we found something in the hash map then grab an iocount */ + error = vnode_getwithvid(ovp, oxp->bind_myvid); + if (error == 0) { + *vpp = ovp; + } + } + return error; +} + +/* + * Remove node from hash. + */ +void +bind_hashrem(struct bind_node * xp) +{ + if (xp->bind_flags & BIND_FLAG_HASHED) { + lck_mtx_lock(&bind_hashmtx); + LIST_REMOVE(xp, bind_hash); + lck_mtx_unlock(&bind_hashmtx); + } +} + +static struct bind_node * +bind_nodecreate(struct vnode * lowervp) +{ + struct bind_node * xp; + + MALLOC(xp, struct bind_node *, sizeof(struct bind_node), M_TEMP, M_WAITOK | M_ZERO); + if (xp != NULL) { + if (lowervp) { + xp->bind_lowervp = lowervp; + xp->bind_lowervid = vnode_vid(lowervp); + } + } + return xp; +} + +/* assumption is that vnode has iocount on it after vnode create */ +int +bind_getnewvnode( + struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root) +{ + struct vnode_fsparam vnfs_param; + int error = 0; + enum vtype type = VDIR; + struct bind_node * xp = bind_nodecreate(lowervp); + + if (xp == NULL) { + return ENOMEM; + } + + if (lowervp) { + type = vnode_vtype(lowervp); + } + + vnfs_param.vnfs_mp = mp; + vnfs_param.vnfs_vtype = type; + vnfs_param.vnfs_str = "bindfs"; + vnfs_param.vnfs_dvp = dvp; + vnfs_param.vnfs_fsnode = (void *)xp; + vnfs_param.vnfs_vops = bindfs_vnodeop_p; + vnfs_param.vnfs_markroot = root; + vnfs_param.vnfs_marksystem = 0; + vnfs_param.vnfs_rdev = 0; + vnfs_param.vnfs_filesize = 0; // set this to 0 since we should only be shadowing non-regular files + vnfs_param.vnfs_cnp = cnp; + vnfs_param.vnfs_flags = VNFS_ADDFSREF; + + error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vnfs_param, vpp); + if (error == 0) { + xp->bind_vnode = *vpp; + xp->bind_myvid = vnode_vid(*vpp); + vnode_settag(*vpp, VT_BINDFS); + } else { + FREE(xp, M_TEMP); + } + return error; +} + +/* + * Make a new or get existing bindfs node. + * Vp is the alias vnode, lowervp is the lower vnode. + * + * lowervp is assumed to have an iocount on it from the caller + */ +int +bind_nodeget( + struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root) +{ + struct vnode * vp; + int error; + + /* Lookup the hash firstly. */ + error = bind_hashget(mp, lowervp, vpp); + /* ENOENT means it wasn't found, EIO is a failure we should bail from, 0 is it + * was found */ + if (error != ENOENT) { + /* bind_hashget checked the vid, so if we got something here its legit to + * the best of our knowledge*/ + /* if we found something then there is an iocount on vpp, + * if we didn't find something then vpp shouldn't be used by the caller */ + return error; + } + + /* + * We do not serialize vnode creation, instead we will check for + * duplicates later, when adding new vnode to hash. + */ + error = vnode_ref(lowervp); // take a ref on lowervp so we let the system know we care about it + if (error) { + // Failed to get a reference on the lower vp so bail. Lowervp may be gone already. + return error; + } + + error = bind_getnewvnode(mp, lowervp, dvp, &vp, cnp, root); + + if (error) { + vnode_rele(lowervp); + return error; + } + + /* + * Atomically insert our new node into the hash or vget existing + * if someone else has beaten us to it. + */ + error = bind_hashins(mp, VTOBIND(vp), vpp); + if (error || *vpp != NULL) { + /* recycle will call reclaim which will get rid of the internals */ + vnode_recycle(vp); + vnode_put(vp); + /* if we found vpp, then bind_hashins put an iocount on it */ + return error; + } + + /* vp has an iocount from bind_getnewvnode */ + *vpp = vp; + + return 0; +} diff --git a/bsd/miscfs/bindfs/bind_vfsops.c b/bsd/miscfs/bindfs/bind_vfsops.c new file mode 100644 index 000000000..248469a11 --- /dev/null +++ b/bsd/miscfs/bindfs/bind_vfsops.c @@ -0,0 +1,559 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*- + * Portions Copyright (c) 1992, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)null_vfsops.c 8.2 (Berkeley) 1/21/94 + * + * @(#)lofs_vfsops.c 1.2 (Berkeley) 6/18/92 + * $FreeBSD$ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "bindfs.h" + +#define BINDFS_ENTITLEMENT "com.apple.private.bindfs-allow" + +#define SIZEOF_MEMBER(type, member) (sizeof(((type *)0)->member)) +#define MAX_MNT_FROM_LENGTH (SIZEOF_MEMBER(struct vfsstatfs, f_mntfromname)) + +static int +bindfs_vfs_getlowerattr(mount_t mp, struct vfs_attr * vfap, vfs_context_t ctx) +{ + memset(vfap, 0, sizeof(*vfap)); + VFSATTR_INIT(vfap); + VFSATTR_WANTED(vfap, f_bsize); + VFSATTR_WANTED(vfap, f_iosize); + VFSATTR_WANTED(vfap, f_blocks); + VFSATTR_WANTED(vfap, f_bfree); + VFSATTR_WANTED(vfap, f_bavail); + VFSATTR_WANTED(vfap, f_bused); + VFSATTR_WANTED(vfap, f_files); + VFSATTR_WANTED(vfap, f_ffree); + VFSATTR_WANTED(vfap, f_capabilities); + + return vfs_getattr(mp, vfap, ctx); +} + +/* + * Mount bind layer + */ +static int +bindfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, vfs_context_t ctx) +{ + int error = 0; + struct vnode *lowerrootvp = NULL, *vp = NULL; + struct vfsstatfs * sp = NULL; + struct bind_mount * xmp = NULL; + char data[MAXPATHLEN]; + size_t count; + struct vfs_attr vfa; + /* set defaults (arbitrary since this file system is readonly) */ + uint32_t bsize = BLKDEV_IOSIZE; + size_t iosize = BLKDEV_IOSIZE; + uint64_t blocks = 4711 * 4711; + uint64_t bfree = 0; + uint64_t bavail = 0; + uint64_t bused = 4711; + uint64_t files = 4711; + uint64_t ffree = 0; + + kauth_cred_t cred = vfs_context_ucred(ctx); + + BINDFSDEBUG("mp = %p %llx\n", (void *)mp, vfs_flags(mp)); + + if (vfs_flags(mp) & MNT_ROOTFS) { + return EOPNOTSUPP; + } + + /* + * Update is a no-op + */ + if (vfs_isupdate(mp)) { + return ENOTSUP; + } + + /* check entitlement */ + if (!IOTaskHasEntitlement(current_task(), BINDFS_ENTITLEMENT)) { + return EPERM; + } + + /* + * Get argument + */ + error = copyinstr(user_data, data, MAXPATHLEN - 1, &count); + if (error) { + BINDFSERROR("error copying data from user %d\n", error); + goto error; + } + + /* This could happen if the system is configured for 32 bit inodes instead of + * 64 bit */ + if (count > MAX_MNT_FROM_LENGTH) { + error = EINVAL; + BINDFSERROR("path to mount too large for this system %zu vs %lu\n", count, MAX_MNT_FROM_LENGTH); + goto error; + } + + error = vnode_lookup(data, 0, &lowerrootvp, ctx); + if (error) { + BINDFSERROR("lookup of %s failed error: %d\n", data, error); + goto error; + } + + /* lowervrootvp has an iocount after vnode_lookup, drop that for a usecount. + * Keep this to signal what we want to keep around the thing we are mirroring. + * Drop it in unmount.*/ + error = vnode_ref(lowerrootvp); + vnode_put(lowerrootvp); + if (error) { + // If vnode_ref failed, then bind it out so it can't be used anymore in cleanup. + lowerrootvp = NULL; + goto error; + } + + BINDFSDEBUG("mount %s\n", data); + + MALLOC(xmp, struct bind_mount *, sizeof(*xmp), M_TEMP, M_WAITOK | M_ZERO); + if (xmp == NULL) { + error = ENOMEM; + goto error; + } + + /* + * Save reference to underlying FS + */ + xmp->bindm_lowerrootvp = lowerrootvp; + xmp->bindm_lowerrootvid = vnode_vid(lowerrootvp); + + error = bind_nodeget(mp, lowerrootvp, NULL, &vp, NULL, 1); + if (error) { + goto error; + } + /* After bind_nodeget our root vnode is in the hash table and we have to usecounts on lowerrootvp + * One use count will get dropped when we reclaim the root during unmount. + * The other will get dropped in unmount */ + + + /* vp has an iocount on it from vnode_create. drop that for a usecount. This + * is our root vnode so we drop the ref in unmount + * + * Assuming for now that because we created this vnode and we aren't finished mounting we can get a ref*/ + vnode_ref(vp); + vnode_put(vp); + + xmp->bindm_rootvp = vp; + + /* read the flags the user set, but then ignore some of them, we will only + * allow them if they are set on the lower file system */ + uint64_t flags = vfs_flags(mp) & (~(MNT_IGNORE_OWNERSHIP | MNT_LOCAL)); + uint64_t lowerflags = vfs_flags(vnode_mount(lowerrootvp)) & (MNT_LOCAL | MNT_QUARANTINE | MNT_IGNORE_OWNERSHIP | MNT_NOEXEC); + + if (lowerflags) { + flags |= lowerflags; + } + + /* force these flags */ + flags |= (MNT_DONTBROWSE | MNT_MULTILABEL | MNT_NOSUID | MNT_RDONLY); + vfs_setflags(mp, flags); + + vfs_setfsprivate(mp, xmp); + vfs_getnewfsid(mp); + vfs_setlocklocal(mp); + + /* fill in the stat block */ + sp = vfs_statfs(mp); + strlcpy(sp->f_mntfromname, data, MAX_MNT_FROM_LENGTH); + + sp->f_flags = flags; + + xmp->bindm_flags = BINDM_CASEINSENSITIVE; /* default to case insensitive */ + + error = bindfs_vfs_getlowerattr(vnode_mount(lowerrootvp), &vfa, ctx); + if (error == 0) { + if (VFSATTR_IS_SUPPORTED(&vfa, f_bsize)) { + bsize = vfa.f_bsize; + } + if (VFSATTR_IS_SUPPORTED(&vfa, f_iosize)) { + iosize = vfa.f_iosize; + } + if (VFSATTR_IS_SUPPORTED(&vfa, f_blocks)) { + blocks = vfa.f_blocks; + } + if (VFSATTR_IS_SUPPORTED(&vfa, f_bfree)) { + bfree = vfa.f_bfree; + } + if (VFSATTR_IS_SUPPORTED(&vfa, f_bavail)) { + bavail = vfa.f_bavail; + } + if (VFSATTR_IS_SUPPORTED(&vfa, f_bused)) { + bused = vfa.f_bused; + } + if (VFSATTR_IS_SUPPORTED(&vfa, f_files)) { + files = vfa.f_files; + } + if (VFSATTR_IS_SUPPORTED(&vfa, f_ffree)) { + ffree = vfa.f_ffree; + } + if (VFSATTR_IS_SUPPORTED(&vfa, f_capabilities)) { + if ((vfa.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & (VOL_CAP_FMT_CASE_SENSITIVE)) && + (vfa.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & (VOL_CAP_FMT_CASE_SENSITIVE))) { + xmp->bindm_flags &= ~BINDM_CASEINSENSITIVE; + } + } + } else { + goto error; + } + + sp->f_bsize = bsize; + sp->f_iosize = iosize; + sp->f_blocks = blocks; + sp->f_bfree = bfree; + sp->f_bavail = bavail; + sp->f_bused = bused; + sp->f_files = files; + sp->f_ffree = ffree; + + /* Associate the mac label information from the mirrored filesystem with the + * mirror */ + MAC_PERFORM(mount_label_associate, cred, vnode_mount(lowerrootvp), vfs_mntlabel(mp)); + + BINDFSDEBUG("lower %s, alias at %s\n", sp->f_mntfromname, sp->f_mntonname); + return 0; + +error: + if (xmp) { + FREE(xmp, M_TEMP); + } + if (lowerrootvp) { + vnode_getwithref(lowerrootvp); + vnode_rele(lowerrootvp); + vnode_put(lowerrootvp); + } + if (vp) { + /* we made the root vnode but the mount is failed, so clean it up */ + vnode_getwithref(vp); + vnode_rele(vp); + /* give vp back */ + vnode_recycle(vp); + vnode_put(vp); + } + return error; +} + +/* + * Free reference to bind layer + */ +static int +bindfs_unmount(struct mount * mp, int mntflags, __unused vfs_context_t ctx) +{ + struct bind_mount * mntdata; + struct vnode * vp; + int error, flags; + + BINDFSDEBUG("mp = %p\n", (void *)mp); + + /* check entitlement or superuser*/ + if (!IOTaskHasEntitlement(current_task(), BINDFS_ENTITLEMENT) && + vfs_context_suser(ctx) != 0) { + return EPERM; + } + + if (mntflags & MNT_FORCE) { + flags = FORCECLOSE; + } else { + flags = 0; + } + + mntdata = MOUNTTOBINDMOUNT(mp); + vp = mntdata->bindm_rootvp; + + // release our reference on the root before flushing. + // it will get pulled out of the mount structure by reclaim + vnode_getalways(vp); + + error = vflush(mp, vp, flags); + if (error) { + vnode_put(vp); + return error; + } + + if (vnode_isinuse(vp, 1) && flags == 0) { + vnode_put(vp); + return EBUSY; + } + + vnode_rele(vp); // Drop reference taken by bindfs_mount + vnode_put(vp); // Drop ref taken above + + //Force close to get rid of the last vnode + (void)vflush(mp, NULL, FORCECLOSE); + + /* no more vnodes, so tear down the mountpoint */ + + vfs_setfsprivate(mp, NULL); + + vnode_getalways(mntdata->bindm_lowerrootvp); + vnode_rele(mntdata->bindm_lowerrootvp); + vnode_put(mntdata->bindm_lowerrootvp); + + FREE(mntdata, M_TEMP); + + uint64_t vflags = vfs_flags(mp); + vfs_setflags(mp, vflags & ~MNT_LOCAL); + + return 0; +} + +static int +bindfs_root(struct mount * mp, struct vnode ** vpp, __unused vfs_context_t ctx) +{ + struct vnode * vp; + int error; + + BINDFSDEBUG("mp = %p, vp = %p\n", (void *)mp, (void *)MOUNTTOBINDMOUNT(mp)->bindm_rootvp); + + /* + * Return locked reference to root. + */ + vp = MOUNTTOBINDMOUNT(mp)->bindm_rootvp; + + error = vnode_get(vp); + if (error) { + return error; + } + + *vpp = vp; + return 0; +} + +static int +bindfs_vfs_getattr(struct mount * mp, struct vfs_attr * vfap, vfs_context_t ctx) +{ + struct vnode * coveredvp = NULL; + struct vfs_attr vfa; + struct bind_mount * bind_mp = MOUNTTOBINDMOUNT(mp); + vol_capabilities_attr_t capabilities; + struct vfsstatfs * sp = vfs_statfs(mp); + + struct timespec tzero = {.tv_sec = 0, .tv_nsec = 0}; + + BINDFSDEBUG("\n"); + + /* Set default capabilities in case the lower file system is gone */ + memset(&capabilities, 0, sizeof(capabilities)); + capabilities.capabilities[VOL_CAPABILITIES_FORMAT] = VOL_CAP_FMT_FAST_STATFS | VOL_CAP_FMT_HIDDEN_FILES; + capabilities.valid[VOL_CAPABILITIES_FORMAT] = VOL_CAP_FMT_FAST_STATFS | VOL_CAP_FMT_HIDDEN_FILES; + + if (bindfs_vfs_getlowerattr(vnode_mount(bind_mp->bindm_lowerrootvp), &vfa, ctx) == 0) { + if (VFSATTR_IS_SUPPORTED(&vfa, f_capabilities)) { + memcpy(&capabilities, &vfa.f_capabilities, sizeof(capabilities)); + /* don't support vget */ + capabilities.capabilities[VOL_CAPABILITIES_FORMAT] &= ~(VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_PATH_FROM_ID); + + capabilities.capabilities[VOL_CAPABILITIES_FORMAT] |= VOL_CAP_FMT_HIDDEN_FILES; /* Always support UF_HIDDEN */ + + capabilities.valid[VOL_CAPABILITIES_FORMAT] &= ~(VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_PATH_FROM_ID); + + capabilities.valid[VOL_CAPABILITIES_FORMAT] |= VOL_CAP_FMT_HIDDEN_FILES; /* Always support UF_HIDDEN */ + + /* dont' support interfaces that only make sense on a writable file system + * or one with specific vnops implemented */ + capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] = 0; + + capabilities.valid[VOL_CAPABILITIES_INTERFACES] &= + ~(VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_READDIRATTR | VOL_CAP_INT_EXCHANGEDATA | + VOL_CAP_INT_COPYFILE | VOL_CAP_INT_ALLOCATE | VOL_CAP_INT_VOL_RENAME | VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK); + } + } + + if (VFSATTR_IS_ACTIVE(vfap, f_create_time)) { + VFSATTR_RETURN(vfap, f_create_time, tzero); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_modify_time)) { + VFSATTR_RETURN(vfap, f_modify_time, tzero); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_access_time)) { + VFSATTR_RETURN(vfap, f_access_time, tzero); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_bsize)) { + VFSATTR_RETURN(vfap, f_bsize, sp->f_bsize); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_iosize)) { + VFSATTR_RETURN(vfap, f_iosize, sp->f_iosize); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_owner)) { + VFSATTR_RETURN(vfap, f_owner, 0); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_blocks)) { + VFSATTR_RETURN(vfap, f_blocks, sp->f_blocks); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_bfree)) { + VFSATTR_RETURN(vfap, f_bfree, sp->f_bfree); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_bavail)) { + VFSATTR_RETURN(vfap, f_bavail, sp->f_bavail); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_bused)) { + VFSATTR_RETURN(vfap, f_bused, sp->f_bused); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_files)) { + VFSATTR_RETURN(vfap, f_files, sp->f_files); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_ffree)) { + VFSATTR_RETURN(vfap, f_ffree, sp->f_ffree); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_fssubtype)) { + VFSATTR_RETURN(vfap, f_fssubtype, 0); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_capabilities)) { + memcpy(&vfap->f_capabilities, &capabilities, sizeof(vol_capabilities_attr_t)); + + VFSATTR_SET_SUPPORTED(vfap, f_capabilities); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_attributes)) { + vol_attributes_attr_t * volattr = &vfap->f_attributes; + + volattr->validattr.commonattr = 0; + volattr->validattr.volattr = ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES; + volattr->validattr.dirattr = 0; + volattr->validattr.fileattr = 0; + volattr->validattr.forkattr = 0; + + volattr->nativeattr.commonattr = 0; + volattr->nativeattr.volattr = ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES; + volattr->nativeattr.dirattr = 0; + volattr->nativeattr.fileattr = 0; + volattr->nativeattr.forkattr = 0; + + VFSATTR_SET_SUPPORTED(vfap, f_attributes); + } + + if (VFSATTR_IS_ACTIVE(vfap, f_vol_name)) { + /* The name of the volume is the same as the directory we mounted on */ + coveredvp = vfs_vnodecovered(mp); + if (coveredvp) { + const char * name = vnode_getname_printable(coveredvp); + strlcpy(vfap->f_vol_name, name, MAXPATHLEN); + vnode_putname_printable(name); + + VFSATTR_SET_SUPPORTED(vfap, f_vol_name); + vnode_put(coveredvp); + } + } + + return 0; +} + +static int +bindfs_sync(__unused struct mount * mp, __unused int waitfor, __unused vfs_context_t ctx) +{ + return 0; +} + + + +static int +bindfs_vfs_start(__unused struct mount * mp, __unused int flags, __unused vfs_context_t ctx) +{ + BINDFSDEBUG("\n"); + return 0; +} + +extern const struct vnodeopv_desc bindfs_vnodeop_opv_desc; + +const struct vnodeopv_desc * bindfs_vnodeopv_descs[] = { + &bindfs_vnodeop_opv_desc, +}; + +struct vfsops bindfs_vfsops = { + .vfs_mount = bindfs_mount, + .vfs_unmount = bindfs_unmount, + .vfs_start = bindfs_vfs_start, + .vfs_root = bindfs_root, + .vfs_getattr = bindfs_vfs_getattr, + .vfs_sync = bindfs_sync, + .vfs_init = bindfs_init, + .vfs_sysctl = NULL, + .vfs_setattr = NULL, +}; diff --git a/bsd/miscfs/bindfs/bind_vnops.c b/bsd/miscfs/bindfs/bind_vnops.c new file mode 100644 index 000000000..2290bd890 --- /dev/null +++ b/bsd/miscfs/bindfs/bind_vnops.c @@ -0,0 +1,701 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*- + * Portions Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * John Heidemann of the UCLA Ficus project. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95 + * + * Ancestors: + * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 + * ...and... + * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project + * + * $FreeBSD$ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bindfs.h" + +#define BIND_ROOT_INO 2 + +vop_t * bindfs_vnodeop_p = NULL; + +static int +bindfs_default(__unused struct vnop_generic_args * args) +{ + return ENOTSUP; +} + +static int +bindfs_getattr(struct vnop_getattr_args * args) +{ + int error; + BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); + + struct vnode * lowervp = BINDVPTOLOWERVP(args->a_vp); + + error = vnode_getwithref(lowervp); + if (error == 0) { + error = VNOP_GETATTR(lowervp, args->a_vap, args->a_context); + vnode_put(lowervp); + + if (error == 0) { + if (VATTR_IS_ACTIVE(args->a_vap, va_fsid)) { + /* fix up fsid so it doesn't say the underlying fs*/ + VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(vnode_mount(args->a_vp))->f_fsid.val[0]); + } + if (VATTR_IS_ACTIVE(args->a_vap, va_fsid64)) { + /* fix up fsid so it doesn't say the underlying fs*/ + VATTR_RETURN(args->a_vap, va_fsid64, vfs_statfs(vnode_mount(args->a_vp))->f_fsid); + } + struct vnode * parent = vnode_parent(args->a_vp); + if (vnode_isvroot(args->a_vp)) { + // We can use the lower answers for most questions about the root vnode but need to fix up a few things + if (VATTR_IS_ACTIVE(args->a_vap, va_fileid)) { + VATTR_RETURN(args->a_vap, va_fileid, BIND_ROOT_INO); + } + if (VATTR_IS_ACTIVE(args->a_vap, va_linkid)) { + VATTR_RETURN(args->a_vap, va_linkid, BIND_ROOT_INO); + } + if (VATTR_IS_ACTIVE(args->a_vap, va_parentid)) { + // The parent of the root is itself + VATTR_RETURN(args->a_vap, va_parentid, BIND_ROOT_INO); + } + } else if (parent != NULL && vnode_isvroot(parent)) { + if (VATTR_IS_ACTIVE(args->a_vap, va_parentid)) { + // This vnode's parent is the root. + VATTR_RETURN(args->a_vap, va_parentid, BIND_ROOT_INO); + } + } + } + } + + return error; +} + +static int +bindfs_open(struct vnop_open_args * args) +{ + int error; + struct vnode *vp, *lvp; + + BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); + + vp = args->a_vp; + lvp = BINDVPTOLOWERVP(vp); + error = vnode_getwithref(lvp); + if (error == 0) { + error = VNOP_OPEN(lvp, args->a_mode, args->a_context); + vnode_put(lvp); + } + + return error; +} + +static int +bindfs_close(struct vnop_close_args * args) +{ + int error; + struct vnode *vp, *lvp; + + BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); + + vp = args->a_vp; + lvp = BINDVPTOLOWERVP(vp); + + error = vnode_getwithref(lvp); + if (error == 0) { + error = VNOP_CLOSE(lvp, args->a_fflag, args->a_context); + vnode_put(lvp); + } + return error; +} + +/* + * We have to carry on the locking protocol on the bind layer vnodes + * as we progress through the tree. We also have to enforce read-only + * if this layer is mounted read-only. + */ +static int +bind_lookup(struct vnop_lookup_args * ap) +{ + struct componentname * cnp = ap->a_cnp; + struct vnode * dvp = ap->a_dvp; + struct vnode *vp, *ldvp, *lvp; + struct mount * mp; + struct bind_mount * bind_mp; + int error; + + BINDFSDEBUG("%s parent: %p component: %.*s\n", __FUNCTION__, ap->a_dvp, cnp->cn_namelen, cnp->cn_nameptr); + + mp = vnode_mount(dvp); + /* rename and delete are not allowed. this is a read only file system */ + if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME || cnp->cn_nameiop == CREATE) { + return EROFS; + } + bind_mp = MOUNTTOBINDMOUNT(mp); + + // . and .. handling + if (cnp->cn_nameptr[0] == '.') { + if (cnp->cn_namelen == 1) { + vp = dvp; + } else if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { + vp = (vnode_isvroot(dvp)) ? dvp : vnode_parent(dvp); + } else { + goto notdot; + } + + error = vp ? vnode_get(vp) : ENOENT; + + if (error == 0) { + *ap->a_vpp = vp; + } + + return error; + } + +notdot: + ldvp = BINDVPTOLOWERVP(dvp); + vp = lvp = NULL; + + /* + * Hold ldvp. The reference on it, owned by dvp, is lost in + * case of dvp reclamation. + */ + error = vnode_getwithref(ldvp); + if (error) { + return error; + } + + error = VNOP_LOOKUP(ldvp, &lvp, cnp, ap->a_context); + + vnode_put(ldvp); + + if ((error == 0 || error == EJUSTRETURN) && lvp != NULL) { + if (ldvp == lvp) { + vp = dvp; + error = vnode_get(vp); + } else { + error = bind_nodeget(mp, lvp, dvp, &vp, cnp, 0); + } + if (error == 0) { + *ap->a_vpp = vp; + } + } + + /* if we got lvp, drop the iocount from VNOP_LOOKUP */ + if (lvp != NULL) { + vnode_put(lvp); + } + + return error; +} + +/* + * Don't think this needs to do anything + */ +static int +bind_inactive(__unused struct vnop_inactive_args * ap) +{ + BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); + + return 0; +} + +static int +bind_reclaim(struct vnop_reclaim_args * ap) +{ + struct vnode * vp; + struct bind_node * xp; + struct vnode * lowervp; + + BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); + + vp = ap->a_vp; + + xp = VTOBIND(vp); + lowervp = xp->bind_lowervp; + + vnode_removefsref(vp); + + bind_hashrem(xp); + vnode_getwithref(lowervp); + vnode_rele(lowervp); + vnode_put(lowervp); + + cache_purge(vp); + vnode_clearfsnode(vp); + + FREE(xp, M_TEMP); + + return 0; +} + +/* Get dirent length padded to 4 byte alignment */ +#define DIRENT_LEN(namelen) \ + ((sizeof(struct dirent) + (namelen + 1) - (__DARWIN_MAXNAMLEN + 1) + 3) & ~3) + +/* Get the end of this dirent */ +#define DIRENT_END(dep) \ + (((char *)(dep)) + (dep)->d_reclen - 1) + +static int +bindfs_readdir(struct vnop_readdir_args * ap) +{ + struct vnode *vp, *lvp, *dvp; + int error; + uio_t uio = ap->a_uio; + + BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); + /* assumption is that any vp that comes through here had to go through lookup + */ + + if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) { + return EINVAL; + } + + vp = ap->a_vp; + dvp = vnode_parent(vp); + lvp = BINDVPTOLOWERVP(vp); + error = vnode_getwithref(lvp); + if (error != 0) { + goto lb_end; + } + + if (vnode_isvroot(vp) || (dvp != NULL && vnode_isvroot(dvp))) { + size_t bufsize; + void * bufptr; + uio_t auio; + struct dirent *dep; + size_t bytesread; + bufsize = 3 * MIN((user_size_t)uio_resid(uio), 87371u) / 8; + MALLOC(bufptr, void *, bufsize, M_TEMP, M_WAITOK); + if (bufptr == NULL) { + return ENOMEM; + } + auio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ); + uio_addiov(auio, (uintptr_t)bufptr, bufsize); + uio_setoffset(auio, uio_offset(uio)); + error = VNOP_READDIR(lvp, auio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ap->a_context); + vnode_put(lvp); + if (error != 0) { + goto lb_end; + } + + dep = (struct dirent *)bufptr; + bytesread = bufsize - uio_resid(auio); + while (error == 0 && (char *)dep < ((char *)bufptr + bytesread)) { + if (DIRENT_END(dep) > ((char *)bufptr + bytesread) || + DIRENT_LEN(dep->d_namlen) > dep->d_reclen) { + printf("%s: %s: Bad dirent received from directory %s\n", __func__, + vfs_statfs(vnode_mount(vp))->f_mntonname, + vp->v_name ? vp->v_name : ""); + error = EIO; + break; + } + if (dep->d_name[0] == '.') { + /* re-write the inode number for the mount root */ + /* if vp is the mount root then . = 2 and .. = 2 */ + /* if the parent of vp is the mount root then .. = 2 */ + if ((vnode_isvroot(vp) && dep->d_namlen == 1) || + (dep->d_namlen == 2 && dep->d_name[1] == '.')) { + dep->d_ino = BIND_ROOT_INO; + } + } + /* Copy entry64 to user's buffer. */ + error = uiomove((caddr_t)dep, dep->d_reclen, uio); + /* Move to next entry. */ + dep = (struct dirent *)((char *)dep + dep->d_reclen); + } + /* Update the real offset using the offset we got from VNOP_READDIR. */ + if (error == 0) { + uio_setoffset(uio, uio_offset(auio)); + } + uio_free(auio); + FREE(bufptr, M_TEMP); + } else { + error = VNOP_READDIR(lvp, ap->a_uio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ap->a_context); + vnode_put(lvp); + } + +lb_end: + return error; +} + +static int +bindfs_readlink(struct vnop_readlink_args * ap) +{ + BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); + int error; + struct vnode *vp, *lvp; + + vp = ap->a_vp; + lvp = BINDVPTOLOWERVP(vp); + + error = vnode_getwithref(lvp); + if (error == 0) { + error = VNOP_READLINK(lvp, ap->a_uio, ap->a_context); + vnode_put(lvp); + + if (error) { + printf("bindfs: readlink failed: %d\n", error); + } + } + + return error; +} + +static int +bindfs_pathconf(__unused struct vnop_pathconf_args * args) +{ + BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); + return EINVAL; +} + +static int +bindfs_fsync(__unused struct vnop_fsync_args * args) +{ + BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); + return 0; +} + +static int +bindfs_mmap(struct vnop_mmap_args * args) +{ + int error; + struct vnode *vp, *lvp; + + BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); + + vp = args->a_vp; + lvp = BINDVPTOLOWERVP(vp); + error = vnode_getwithref(lvp); + if (error == 0) { + error = VNOP_MMAP(lvp, args->a_fflags, args->a_context); + vnode_put(lvp); + } + + return error; +} + +static int +bindfs_mnomap(struct vnop_mnomap_args * args) +{ + int error; + struct vnode *vp, *lvp; + + BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); + + vp = args->a_vp; + lvp = BINDVPTOLOWERVP(vp); + error = vnode_getwithref(lvp); + if (error == 0) { + error = VNOP_MNOMAP(lvp, args->a_context); + vnode_put(lvp); + } + + return error; +} + +static int +bindfs_getxattr(struct vnop_getxattr_args * args) +{ + int error; + struct vnode *vp, *lvp; + + BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); + + vp = args->a_vp; + lvp = BINDVPTOLOWERVP(vp); + error = vnode_getwithref(lvp); + if (error == 0) { + error = VNOP_GETXATTR(lvp, args->a_name, args->a_uio, args->a_size, args->a_options, args->a_context); + vnode_put(lvp); + } + + return error; +} + +static int +bindfs_listxattr(struct vnop_listxattr_args * args) +{ + int error; + struct vnode *vp, *lvp; + + BINDFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); + + vp = args->a_vp; + lvp = BINDVPTOLOWERVP(vp); + error = vnode_getwithref(lvp); + if (error == 0) { + error = VNOP_LISTXATTR(lvp, args->a_uio, args->a_size, args->a_options, args->a_context); + vnode_put(lvp); + } + + return error; +} + +/* relies on v1 paging */ +static int +bindfs_pagein(struct vnop_pagein_args * ap) +{ + int error = EIO; + struct vnode *vp, *lvp; + + BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); + + vp = ap->a_vp; + lvp = BINDVPTOLOWERVP(vp); + + if (vnode_vtype(vp) != VREG) { + return ENOTSUP; + } + + /* + * Ask VM/UBC/VFS to do our bidding + */ + if (vnode_getwithvid(lvp, BINDVPTOLOWERVID(vp)) == 0) { + vm_offset_t ioaddr; + uio_t auio; + kern_return_t kret; + off_t bytes_to_commit; + off_t lowersize; + upl_t upl = ap->a_pl; + user_ssize_t bytes_remaining = 0; + + auio = uio_create(1, ap->a_f_offset, UIO_SYSSPACE, UIO_READ); + if (auio == NULL) { + error = EIO; + goto exit_no_unmap; + } + + kret = ubc_upl_map(upl, &ioaddr); + if (KERN_SUCCESS != kret) { + panic("bindfs_pagein: ubc_upl_map() failed with (%d)", kret); + } + + ioaddr += ap->a_pl_offset; + + error = uio_addiov(auio, (user_addr_t)ioaddr, ap->a_size); + if (error) { + goto exit; + } + + lowersize = ubc_getsize(lvp); + if (lowersize != ubc_getsize(vp)) { + (void)ubc_setsize(vp, lowersize); /* ignore failures, nothing can be done */ + } + + error = VNOP_READ(lvp, auio, ((ap->a_flags & UPL_IOSYNC) ? IO_SYNC : 0), ap->a_context); + + bytes_remaining = uio_resid(auio); + if (bytes_remaining > 0 && bytes_remaining <= (user_ssize_t)ap->a_size) { + /* zero bytes that weren't read in to the upl */ + bzero((void*)((uintptr_t)(ioaddr + ap->a_size - bytes_remaining)), (size_t) bytes_remaining); + } + +exit: + kret = ubc_upl_unmap(upl); + if (KERN_SUCCESS != kret) { + panic("bindfs_pagein: ubc_upl_unmap() failed with (%d)", kret); + } + + if (auio != NULL) { + uio_free(auio); + } + +exit_no_unmap: + if ((ap->a_flags & UPL_NOCOMMIT) == 0) { + if (!error && (bytes_remaining >= 0) && (bytes_remaining <= (user_ssize_t)ap->a_size)) { + /* only commit what was read in (page aligned)*/ + bytes_to_commit = ap->a_size - bytes_remaining; + if (bytes_to_commit) { + /* need to make sure bytes_to_commit and byte_remaining are page aligned before calling ubc_upl_commit_range*/ + if (bytes_to_commit & PAGE_MASK) { + bytes_to_commit = (bytes_to_commit & (~PAGE_MASK)) + (PAGE_MASK + 1); + assert(bytes_to_commit <= (off_t)ap->a_size); + + bytes_remaining = ap->a_size - bytes_to_commit; + } + ubc_upl_commit_range(upl, ap->a_pl_offset, (upl_size_t)bytes_to_commit, UPL_COMMIT_FREE_ON_EMPTY); + } + + /* abort anything thats left */ + if (bytes_remaining) { + ubc_upl_abort_range(upl, ap->a_pl_offset + (upl_offset_t)bytes_to_commit, (upl_size_t)bytes_remaining, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); + } + } else { + ubc_upl_abort_range(upl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); + } + } + vnode_put(lvp); + } else if ((ap->a_flags & UPL_NOCOMMIT) == 0) { + ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); + } + return error; +} + +static int +bindfs_read(struct vnop_read_args * ap) +{ + int error = EIO; + + struct vnode *vp, *lvp; + + BINDFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); + + vp = ap->a_vp; + lvp = BINDVPTOLOWERVP(vp); + + /* + * First some house keeping + */ + if (vnode_getwithvid(lvp, BINDVPTOLOWERVID(vp)) == 0) { + if (!vnode_isreg(lvp) && !vnode_islnk(lvp)) { + error = EPERM; + goto end; + } + + if (uio_resid(ap->a_uio) == 0) { + error = 0; + goto end; + } + + /* + * Now ask VM/UBC/VFS to do our bidding + */ + + error = VNOP_READ(lvp, ap->a_uio, ap->a_ioflag, ap->a_context); + if (error) { + printf("bindfs: VNOP_READ failed: %d\n", error); + } +end: + vnode_put(lvp); + } + return error; +} + +/* + * Global vfs data structures + */ + +static const struct vnodeopv_entry_desc bindfs_vnodeop_entries[] = { + {.opve_op = &vnop_default_desc, .opve_impl = (vop_t)bindfs_default}, /* default */ + {.opve_op = &vnop_getattr_desc, .opve_impl = (vop_t)bindfs_getattr}, /* getattr */ + {.opve_op = &vnop_open_desc, .opve_impl = (vop_t)bindfs_open}, /* open */ + {.opve_op = &vnop_close_desc, .opve_impl = (vop_t)bindfs_close}, /* close */ + {.opve_op = &vnop_inactive_desc, .opve_impl = (vop_t)bind_inactive}, /* inactive */ + {.opve_op = &vnop_reclaim_desc, .opve_impl = (vop_t)bind_reclaim}, /* reclaim */ + {.opve_op = &vnop_lookup_desc, .opve_impl = (vop_t)bind_lookup}, /* lookup */ + {.opve_op = &vnop_readdir_desc, .opve_impl = (vop_t)bindfs_readdir}, /* readdir */ + {.opve_op = &vnop_readlink_desc, .opve_impl = (vop_t)bindfs_readlink}, /* readlink */ + {.opve_op = &vnop_pathconf_desc, .opve_impl = (vop_t)bindfs_pathconf}, /* pathconf */ + {.opve_op = &vnop_fsync_desc, .opve_impl = (vop_t)bindfs_fsync}, /* fsync */ + {.opve_op = &vnop_mmap_desc, .opve_impl = (vop_t)bindfs_mmap}, /* mmap */ + {.opve_op = &vnop_mnomap_desc, .opve_impl = (vop_t)bindfs_mnomap}, /* mnomap */ + {.opve_op = &vnop_getxattr_desc, .opve_impl = (vop_t)bindfs_getxattr}, /* getxattr */ + {.opve_op = &vnop_pagein_desc, .opve_impl = (vop_t)bindfs_pagein}, /* pagein */ + {.opve_op = &vnop_read_desc, .opve_impl = (vop_t)bindfs_read}, /* read */ + {.opve_op = &vnop_listxattr_desc, .opve_impl = (vop_t)bindfs_listxattr}, /* listxattr */ + {.opve_op = NULL, .opve_impl = NULL}, +}; + +const struct vnodeopv_desc bindfs_vnodeop_opv_desc = {.opv_desc_vector_p = &bindfs_vnodeop_p, .opv_desc_ops = bindfs_vnodeop_entries}; + +//BINDFS Specific helper function + +int +bindfs_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp) +{ + int result = EINVAL; + + if (out_vpp == NULL || in_vp == NULL) { + goto end; + } + + struct vfsstatfs * sp = NULL; + mount_t mp = vnode_mount(in_vp); + + sp = vfs_statfs(mp); + //If this isn't a bindfs vnode or it is but it's a special vnode + if (strcmp(sp->f_fstypename, "bindfs") != 0) { + *out_vpp = NULLVP; + result = ENOENT; + goto end; + } + + vnode_t lvp = BINDVPTOLOWERVP(in_vp); + if ((result = vnode_getwithvid(lvp, BINDVPTOLOWERVID(in_vp)))) { + goto end; + } + + *out_vpp = lvp; + +end: + return result; +} diff --git a/bsd/miscfs/bindfs/bindfs.h b/bsd/miscfs/bindfs/bindfs.h new file mode 100644 index 000000000..c50a91f4d --- /dev/null +++ b/bsd/miscfs/bindfs/bindfs.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*- + * Portions Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)null.h 8.3 (Berkeley) 8/20/94 + * + * $FreeBSD$ + */ + +#ifndef FS_BIND_H +#define FS_BIND_H + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if KERNEL +#include +#else +#include +#endif + +//#define BINDFS_DEBUG 0 + +#define BINDM_CACHE 0x0001 +#define BINDM_CASEINSENSITIVE 0x0000000000000002 + +typedef int (*vop_t)(void *); + +struct bind_mount { + struct vnode * bindm_rootvp; /* Reference to root bind_node (inode 1) */ + struct vnode * bindm_lowerrootvp; /* reference to the root of the tree we are + * relocating (in the other file system) */ + uint32_t bindm_lowerrootvid; /* store the lower root vid so we can check + * before we build the shadow vnode lazily */ + uint64_t bindm_flags; +}; + +#ifdef KERNEL + +#define BIND_FLAG_HASHED 0x000000001 + +/* + * A cache of vnode references + */ +struct bind_node { + LIST_ENTRY(bind_node) bind_hash; /* Hash list */ + struct vnode * bind_lowervp; /* VREFed once */ + struct vnode * bind_vnode; /* Back pointer */ + uint32_t bind_lowervid; /* vid for lowervp to detect lowervp getting recycled out + * from under us */ + uint32_t bind_myvid; + uint32_t bind_flags; +}; + +struct vnodeop_desc_fake { + int vdesc_offset; + const char * vdesc_name; + /* other stuff */ +}; + +#define BINDV_NOUNLOCK 0x0001 +#define BINDV_DROP 0x0002 + +#define MOUNTTOBINDMOUNT(mp) ((struct bind_mount *)(vfs_fsprivate(mp))) +#define VTOBIND(vp) ((struct bind_node *)vnode_fsnode(vp)) +#define BINDTOV(xp) ((xp)->bind_vnode) + +__BEGIN_DECLS + +int bindfs_init(struct vfsconf * vfsp); +int bindfs_init_lck(lck_mtx_t * lck); +int bindfs_destroy_lck(lck_mtx_t * lck); +int bindfs_destroy(void); +int bind_nodeget( + struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root); +int bind_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp); +int bind_getnewvnode( + struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root); +void bind_hashrem(struct bind_node * xp); + +int bindfs_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp); + +#define BINDVPTOLOWERVP(vp) (VTOBIND(vp)->bind_lowervp) +#define BINDVPTOLOWERVID(vp) (VTOBIND(vp)->bind_lowervid) +#define BINDVPTOMYVID(vp) (VTOBIND(vp)->bind_myvid) + +extern const struct vnodeopv_desc bindfs_vnodeop_opv_desc; + +extern vop_t * bindfs_vnodeop_p; + +__END_DECLS + +#ifdef BINDFS_DEBUG +#define BINDFSDEBUG(format, args...) printf("DEBUG: BindFS %s: " format, __FUNCTION__, ##args) +#else +#define BINDFSDEBUG(format, args...) +#endif /* BINDFS_DEBUG */ + +#define BINDFSERROR(format, args...) printf("ERROR: BindFS %s: " format, __FUNCTION__, ##args) + +#endif /* KERNEL */ + +#endif diff --git a/bsd/miscfs/devfs/devfs.h b/bsd/miscfs/devfs/devfs.h index 966926d4f..ea9e80edf 100644 --- a/bsd/miscfs/devfs/devfs.h +++ b/bsd/miscfs/devfs/devfs.h @@ -140,6 +140,7 @@ __END_DECLS #define UID_ROOT 0 #define UID_BIN 3 #define UID_UUCP 66 +#define UID_LOGD 272 /* XXX */ #define GID_WHEEL 0 @@ -150,6 +151,7 @@ __END_DECLS #define GID_GAMES 13 #define GID_DIALER 68 #define GID_WINDOWSERVER 88 +#define GID_LOGD 272 #endif /* __APPLE_API_PRIVATE */ #endif /* !_MISCFS_DEVFS_DEVFS_H_ */ diff --git a/bsd/miscfs/devfs/devfs_fdesc_support.c b/bsd/miscfs/devfs/devfs_fdesc_support.c index 95a5be5f2..9c861796f 100644 --- a/bsd/miscfs/devfs/devfs_fdesc_support.c +++ b/bsd/miscfs/devfs/devfs_fdesc_support.c @@ -225,7 +225,7 @@ loop: } *vpp = fd->fd_vnode; - (*vpp)->v_type = vtype; + (*vpp)->v_type = (uint16_t)vtype; return error; } @@ -394,16 +394,16 @@ fdesc_attr(int fd, struct vnode_attr *vap, vfs_context_t a_context) if ((error = fp_lookup(p, fd, &fp, 0))) { return error; } - switch (FILEGLOB_DTYPE(fp->f_fglob)) { + switch (FILEGLOB_DTYPE(fp->fp_glob)) { case DTYPE_VNODE: - if ((error = vnode_getwithref((struct vnode *) fp->f_fglob->fg_data)) != 0) { + if ((error = vnode_getwithref((struct vnode *) fp->fp_glob->fg_data)) != 0) { break; } - if ((error = vnode_authorize((struct vnode *)fp->f_fglob->fg_data, + if ((error = vnode_authorize((struct vnode *)fp->fp_glob->fg_data, NULL, KAUTH_VNODE_READ_ATTRIBUTES | KAUTH_VNODE_READ_SECURITY, a_context)) == 0) { - error = vnode_getattr((struct vnode *)fp->f_fglob->fg_data, vap, a_context); + error = vnode_getattr((struct vnode *)fp->fp_glob->fg_data, vap, a_context); } if (error == 0 && vap->va_type == VDIR) { /* @@ -414,20 +414,20 @@ fdesc_attr(int fd, struct vnode_attr *vap, vfs_context_t a_context) */ vap->va_mode &= ~((VEXEC) | (VEXEC >> 3) | (VEXEC >> 6)); } - (void)vnode_put((struct vnode *) fp->f_fglob->fg_data); + (void)vnode_put((struct vnode *) fp->fp_glob->fg_data); break; case DTYPE_SOCKET: case DTYPE_PIPE: #if SOCKETS - if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) { - error = soo_stat((struct socket *)fp->f_fglob->fg_data, (void *)&stb, 0); + if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) { + error = soo_stat((struct socket *)fp->fp_glob->fg_data, (void *)&stb, 0); } else #endif /* SOCKETS */ - error = pipe_stat((struct pipe *)fp->f_fglob->fg_data, (void *)&stb, 0); + error = pipe_stat((struct pipe *)fp->fp_glob->fg_data, (void *)&stb, 0); if (error == 0) { - if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) { + if (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) { VATTR_RETURN(vap, va_type, VSOCK); } else { VATTR_RETURN(vap, va_type, VFIFO); @@ -483,7 +483,7 @@ fdesc_getattr(struct vnop_getattr_args *ap) * a snapshot. */ if (error == 0) { - vp->v_type = vap->va_type; + vp->v_type = (uint16_t)vap->va_type; /* We need an inactive to reset type to VNON */ vnode_setneedinactive(vp); @@ -519,14 +519,14 @@ fdesc_setattr(struct vnop_setattr_args *ap) /* * Can setattr the underlying vnode, but not sockets! */ - switch (FILEGLOB_DTYPE(fp->f_fglob)) { + switch (FILEGLOB_DTYPE(fp->fp_glob)) { case DTYPE_VNODE: { - if ((error = vnode_getwithref((struct vnode *) fp->f_fglob->fg_data)) != 0) { + if ((error = vnode_getwithref((struct vnode *) fp->fp_glob->fg_data)) != 0) { break; } - error = vnode_setattr((struct vnode *) fp->f_fglob->fg_data, ap->a_vap, ap->a_context); - (void)vnode_put((struct vnode *) fp->f_fglob->fg_data); + error = vnode_setattr((struct vnode *) fp->fp_glob->fg_data, ap->a_vap, ap->a_context); + (void)vnode_put((struct vnode *) fp->fp_glob->fg_data); break; } @@ -567,7 +567,8 @@ devfs_devfd_readdir(struct vnop_readdir_args *ap) { struct uio *uio = ap->a_uio; struct proc *p = current_proc(); - int i, error; + off_t i; + int error; /* * We don't allow exporting fdesc mounts, and currently local @@ -587,7 +588,7 @@ devfs_devfd_readdir(struct vnop_readdir_args *ap) i = uio->uio_offset / UIO_MX; error = 0; while (uio_resid(uio) >= UIO_MX) { - if (i >= p->p_fd->fd_nfiles) { + if (i >= p->p_fd->fd_nfiles || i < 0) { break; } @@ -597,11 +598,11 @@ devfs_devfd_readdir(struct vnop_readdir_args *ap) bzero((caddr_t) dp, UIO_MX); - dp->d_namlen = scnprintf(dp->d_name, sizeof(dp->d_name), - "%d", i); + dp->d_namlen = (__uint8_t)scnprintf(dp->d_name, sizeof(dp->d_name), + "%lld", i); dp->d_reclen = UIO_MX; dp->d_type = DT_UNKNOWN; - dp->d_fileno = i + FD_STDIN; + dp->d_fileno = (ino_t)i + FD_STDIN; /* * And ship to userland */ diff --git a/bsd/miscfs/devfs/devfs_tree.c b/bsd/miscfs/devfs/devfs_tree.c index 589472d57..21a0ac5c0 100644 --- a/bsd/miscfs/devfs/devfs_tree.c +++ b/bsd/miscfs/devfs/devfs_tree.c @@ -200,9 +200,7 @@ devfs_sinit(void) return ENOTSUP; } #ifdef HIDDEN_MOUNTPOINT - MALLOC(devfs_hidden_mount, struct mount *, sizeof(struct mount), - M_MOUNT, M_WAITOK); - bzero(devfs_hidden_mount, sizeof(struct mount)); + devfs_hidden_mount = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO); mount_lock_init(devfs_hidden_mount); TAILQ_INIT(&devfs_hidden_mount->mnt_vnodelist); TAILQ_INIT(&devfs_hidden_mount->mnt_workerqueue); @@ -224,7 +222,7 @@ devfs_sinit(void) = (struct devfsmount *)devfs_hidden_mount->mnt_data; #endif /* HIDDEN_MOUNTPOINT */ #if CONFIG_MACF - mac_devfs_label_associate_directory("/", strlen("/"), + mac_devfs_label_associate_directory("/", (int) strlen("/"), dev_root->de_dnp, "/"); #endif devfs_ready = 1; @@ -360,7 +358,7 @@ dev_finddir(const char * path, #if CONFIG_MACF mac_devfs_label_associate_directory( dirnode->dn_typeinfo.Dir.myname->de_name, - strlen(dirnode->dn_typeinfo.Dir.myname->de_name), + (int) strlen(dirnode->dn_typeinfo.Dir.myname->de_name), dnp, fullpath); #endif devfs_propogate(dirnode->dn_typeinfo.Dir.myname, dirent_p, delp); @@ -1581,7 +1579,7 @@ devfs_make_node_internal(dev_t dev, devfstype_t type, uid_t uid, #if CONFIG_MACF char buff[sizeof(buf)]; #endif - int i; + size_t i; uint32_t log_count; struct devfs_event_log event_log; struct devfs_vnode_event stackbuf[NUM_STACK_ENTRIES]; @@ -1682,7 +1680,7 @@ devfs_make_link(void *original, char *fmt, ...) va_list ap; char *p, buf[256]; /* XXX */ - int i; + size_t i; DEVFS_LOCK(); diff --git a/bsd/miscfs/devfs/devfs_vfsops.c b/bsd/miscfs/devfs/devfs_vfsops.c index 3498ffc04..f01b26873 100644 --- a/bsd/miscfs/devfs/devfs_vfsops.c +++ b/bsd/miscfs/devfs/devfs_vfsops.c @@ -134,7 +134,7 @@ devfs_init(__unused struct vfsconf *vfsp) if (!(logging_config & ATM_TRACE_DISABLE)) { devfs_make_node(makedev(7, 0), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0600, "oslog"); + UID_LOGD, GID_LOGD, 0600, "oslog"); if (cdevsw_setkqueueok(7, (&(cdevsw[7])), 0) == -1) { return ENOTSUP; } diff --git a/bsd/miscfs/devfs/devfs_vnops.c b/bsd/miscfs/devfs/devfs_vnops.c index 322f40823..b9b9ef5db 100644 --- a/bsd/miscfs/devfs/devfs_vnops.c +++ b/bsd/miscfs/devfs/devfs_vnops.c @@ -493,7 +493,7 @@ devfs_getattr(struct vnop_getattr_args *ap) } else if (vp->v_type == VCHR) { VATTR_RETURN(vap, va_iosize, MAXPHYSIO); } else { - VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize); + VATTR_RETURN(vap, va_iosize, (uint32_t)vp->v_mount->mnt_vfsstat.f_iosize); } @@ -564,9 +564,9 @@ devfs_setattr(struct vnop_setattr_args *ap) file_node->dn_update = 1; } atimeval.tv_sec = vap->va_access_time.tv_sec; - atimeval.tv_usec = vap->va_access_time.tv_nsec / 1000; + atimeval.tv_usec = (suseconds_t)(vap->va_access_time.tv_nsec / 1000); mtimeval.tv_sec = vap->va_modify_time.tv_sec; - mtimeval.tv_usec = vap->va_modify_time.tv_nsec / 1000; + mtimeval.tv_usec = (suseconds_t)(vap->va_modify_time.tv_nsec / 1000); if ((error = devfs_update(vp, &atimeval, &mtimeval))) { goto exit; @@ -1226,7 +1226,7 @@ devfs_symlink(struct vnop_symlink_args *ap) /* Called with devfs locked */ int -devfs_make_symlink(devnode_t *dir_p, char *name, int mode, char *target, devdirent_t **newent) +devfs_make_symlink(devnode_t *dir_p, char *name, mode_t mode, char *target, devdirent_t **newent) { int error = 0; devnode_type_t typeinfo; @@ -1337,7 +1337,7 @@ devfs_readdir(struct vnop_readdir_args *ap) int error = 0; int reclen; int nodenumber; - int startpos, pos; + off_t startpos, pos; if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) { return EINVAL; @@ -1376,7 +1376,7 @@ devfs_readdir(struct vnop_readdir_args *ap) break; default: dirent.d_fileno = name_node->de_dnp->dn_ino; - dirent.d_namlen = strlen(name_node->de_name); + dirent.d_namlen = (__uint8_t) strlen(name_node->de_name); name = name_node->de_name; switch (name_node->de_dnp->dn_type) { case DEV_BDEV: @@ -1448,7 +1448,7 @@ devfs_readlink(struct vnop_readlink_args *ap) goto out; } error = uiomove(lnk_node->dn_typeinfo.Slnk.name, - lnk_node->dn_typeinfo.Slnk.namelen, uio); + (int)lnk_node->dn_typeinfo.Slnk.namelen, uio); out: return error; } diff --git a/bsd/miscfs/devfs/devfsdefs.h b/bsd/miscfs/devfs/devfsdefs.h index e2dee3842..6533fbe3b 100644 --- a/bsd/miscfs/devfs/devfsdefs.h +++ b/bsd/miscfs/devfs/devfsdefs.h @@ -113,7 +113,7 @@ union devnode_type { }Dir; struct { char * name;/* must be allocated separately */ - int namelen; + size_t namelen; }Slnk; }; @@ -298,7 +298,7 @@ dn_copy_times(devnode_t * target, devnode_t * source) } #ifdef BSD_KERNEL_PRIVATE -int devfs_make_symlink(devnode_t *dir_p, char *name, int mode, char *target, devdirent_t **newent); +int devfs_make_symlink(devnode_t *dir_p, char *name, mode_t mode, char *target, devdirent_t **newent); #endif /* BSD_KERNEL_PRIVATE */ #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/miscfs/devfs/fdesc.h b/bsd/miscfs/devfs/fdesc.h index f024c55e3..7d77ff310 100644 --- a/bsd/miscfs/devfs/fdesc.h +++ b/bsd/miscfs/devfs/fdesc.h @@ -89,7 +89,7 @@ struct fdescnode { LIST_ENTRY(fdescnode) fd_hash; /* Hash list */ struct vnode *fd_vnode; /* Back ptr to vnode */ fdntype fd_type; /* Type of this node */ - long fd_fd; /* Fd to be dup'ed */ + int fd_fd; /* Fd to be dup'ed */ const char *fd_link; /* Link to fd/n */ int fd_ix; /* filesystem index */ }; diff --git a/bsd/miscfs/fifofs/fifo_vnops.c b/bsd/miscfs/fifofs/fifo_vnops.c index df13d0d96..6fb2efbdd 100644 --- a/bsd/miscfs/fifofs/fifo_vnops.c +++ b/bsd/miscfs/fifofs/fifo_vnops.c @@ -390,16 +390,16 @@ fifo_ioctl(struct vnop_ioctl_args *ap) return 0; } bzero(&filetmp, sizeof(struct fileproc)); - filetmp.f_fglob = &filefg; + filetmp.fp_glob = &filefg; if (ap->a_fflag & FREAD) { - filetmp.f_fglob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_readsock; + filetmp.fp_glob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_readsock; error = soo_ioctl(&filetmp, ap->a_command, ap->a_data, ap->a_context); if (error) { return error; } } if (ap->a_fflag & FWRITE) { - filetmp.f_fglob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_writesock; + filetmp.fp_glob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_writesock; error = soo_ioctl(&filetmp, ap->a_command, ap->a_data, ap->a_context); if (error) { return error; @@ -416,16 +416,16 @@ fifo_select(struct vnop_select_args *ap) int ready; bzero(&filetmp, sizeof(struct fileproc)); - filetmp.f_fglob = &filefg; + filetmp.fp_glob = &filefg; if (ap->a_which & FREAD) { - filetmp.f_fglob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_readsock; + filetmp.fp_glob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_readsock; ready = soo_select(&filetmp, ap->a_which, ap->a_wql, ap->a_context); if (ready) { return ready; } } if (ap->a_which & FWRITE) { - filetmp.f_fglob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_writesock; + filetmp.fp_glob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_writesock; ready = soo_select(&filetmp, ap->a_which, ap->a_wql, ap->a_context); if (ready) { return ready; diff --git a/bsd/miscfs/nullfs/null_subr.c b/bsd/miscfs/nullfs/null_subr.c index 3743d52b2..746f09e6a 100644 --- a/bsd/miscfs/nullfs/null_subr.c +++ b/bsd/miscfs/nullfs/null_subr.c @@ -169,7 +169,7 @@ nullfs_uninit() /* This gets called when the fs is uninstalled, there wasn't an exact * equivalent in vfsops */ lck_mtx_destroy(&null_hashmtx, null_hashlck_grp); - FREE(null_node_hashtbl, M_TEMP); + hashdestroy(null_node_hashtbl, M_TEMP, null_hash_mask); if (null_hashlck_grp_attr) { lck_grp_attr_free(null_hashlck_grp_attr); null_hashlck_grp_attr = NULL; @@ -192,9 +192,9 @@ nullfs_uninit() int null_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp) { - struct null_node_hashhead * hd; - struct null_node * a; - struct vnode * vp; + struct null_node_hashhead * hd = NULL; + struct null_node * a = NULL; + struct vnode * vp = NULL; int error = ENOENT; /* @@ -204,6 +204,7 @@ null_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp) * just check whether the lowervp has gotten pulled from under us */ hd = NULL_NHASH(lowervp); + // In the future we should consider using a per bucket lock lck_mtx_lock(&null_hashmtx); LIST_FOREACH(a, hd, null_hash) { @@ -212,17 +213,19 @@ null_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp) if (a->null_lowervid != vnode_vid(lowervp)) { /*lowervp has reved */ error = EIO; - } else { - /* if we found something then get an iocount on it */ - error = vnode_getwithvid(vp, a->null_myvid); - if (error == 0) { - *vpp = vp; - } + vp = NULL; } + // In the case of a succesful look-up we should consider moving the object to the top of the head break; } } lck_mtx_unlock(&null_hashmtx); + if (vp != NULL) { + error = vnode_getwithvid(vp, a->null_myvid); + if (error == 0) { + *vpp = vp; + } + } return error; } @@ -233,9 +236,9 @@ null_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp) static int null_hashins(struct mount * mp, struct null_node * xp, struct vnode ** vpp) { - struct null_node_hashhead * hd; - struct null_node * oxp; - struct vnode * ovp; + struct null_node_hashhead * hd = NULL; + struct null_node * oxp = NULL; + struct vnode * ovp = NULL; int error = 0; hd = NULL_NHASH(xp->null_lowervp); @@ -255,12 +258,7 @@ null_hashins(struct mount * mp, struct null_node * xp, struct vnode ** vpp) * trying to add has been recycled * don't add it.*/ error = EIO; - goto end; - } - /* if we found something in the hash map then grab an iocount */ - error = vnode_getwithvid(ovp, oxp->null_myvid); - if (error == 0) { - *vpp = ovp; + ovp = NULL; } goto end; } @@ -271,6 +269,13 @@ null_hashins(struct mount * mp, struct null_node * xp, struct vnode ** vpp) xp->null_flags |= NULL_FLAG_HASHED; end: lck_mtx_unlock(&null_hashmtx); + if (ovp != NULL) { + /* if we found something in the hash map then grab an iocount */ + error = vnode_getwithvid(ovp, oxp->null_myvid); + if (error == 0) { + *vpp = ovp; + } + } return error; } diff --git a/bsd/miscfs/nullfs/null_vfsops.c b/bsd/miscfs/nullfs/null_vfsops.c index 8305be1c6..b09395429 100644 --- a/bsd/miscfs/nullfs/null_vfsops.c +++ b/bsd/miscfs/nullfs/null_vfsops.c @@ -70,6 +70,7 @@ #include #include #include +#include #include @@ -110,7 +111,9 @@ nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, v struct vnode *lowerrootvp = NULL, *vp = NULL; struct vfsstatfs * sp = NULL; struct null_mount * xmp = NULL; - char data[MAXPATHLEN]; + struct null_mount_conf conf = {0}; + char path[MAXPATHLEN]; + size_t count; struct vfs_attr vfa; /* set defaults (arbitrary since this file system is readonly) */ @@ -143,10 +146,19 @@ nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, v return EPERM; } + /* + * Get configuration + */ + error = copyin(user_data, &conf, sizeof(conf)); + if (error) { + NULLFSDEBUG("nullfs: error copying configuration form user %d\n", error); + goto error; + } + /* * Get argument */ - error = copyinstr(user_data, data, MAXPATHLEN - 1, &count); + error = copyinstr(user_data + sizeof(conf), path, MAXPATHLEN - 1, &count); if (error) { NULLFSDEBUG("nullfs: error copying data form user %d\n", error); goto error; @@ -156,13 +168,13 @@ nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, v * 64 bit */ if (count > MAX_MNT_FROM_LENGTH) { error = EINVAL; - NULLFSDEBUG("nullfs: path to translocate too large for this system %d vs %d\n", count, MAX_MNT_FROM_LENGTH); + NULLFSDEBUG("nullfs: path to translocate too large for this system %ld vs %ld\n", count, MAX_MNT_FROM_LENGTH); goto error; } - error = vnode_lookup(data, 0, &lowerrootvp, ctx); + error = vnode_lookup(path, 0, &lowerrootvp, ctx); if (error) { - NULLFSDEBUG("lookup %s -> %d\n", data, error); + NULLFSDEBUG("lookup %s -> %d\n", path, error); goto error; } @@ -177,7 +189,7 @@ nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, v goto error; } - NULLFSDEBUG("mount %s\n", data); + NULLFSDEBUG("mount %s\n", path); MALLOC(xmp, struct null_mount *, sizeof(*xmp), M_TEMP, M_WAITOK | M_ZERO); if (xmp == NULL) { @@ -185,6 +197,12 @@ nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, v goto error; } + /* + * Grab the uid/gid of the caller, which may be used for unveil later + */ + xmp->uid = kauth_cred_getuid(cred); + xmp->gid = kauth_cred_getgid(cred); + /* * Save reference to underlying FS */ @@ -229,12 +247,15 @@ nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, v /* fill in the stat block */ sp = vfs_statfs(mp); - strlcpy(sp->f_mntfromname, data, MAX_MNT_FROM_LENGTH); + strlcpy(sp->f_mntfromname, path, MAX_MNT_FROM_LENGTH); sp->f_flags = flags; xmp->nullm_flags = NULLM_CASEINSENSITIVE; /* default to case insensitive */ + // Set the flags that are requested + xmp->nullm_flags |= conf.flags & NULLM_UNVEIL; + error = nullfs_vfs_getlowerattr(vnode_mount(lowerrootvp), &vfa, ctx); if (error == 0) { if (VFSATTR_IS_SUPPORTED(&vfa, f_bsize)) { @@ -407,6 +428,7 @@ nullfs_vfs_getattr(struct mount * mp, struct vfs_attr * vfap, vfs_context_t ctx) struct null_mount * null_mp = MOUNTTONULLMOUNT(mp); vol_capabilities_attr_t capabilities; struct vfsstatfs * sp = vfs_statfs(mp); + vfs_context_t ectx = nullfs_get_patched_context(null_mp, ctx); struct timespec tzero = {.tv_sec = 0, .tv_nsec = 0}; @@ -417,7 +439,7 @@ nullfs_vfs_getattr(struct mount * mp, struct vfs_attr * vfap, vfs_context_t ctx) capabilities.capabilities[VOL_CAPABILITIES_FORMAT] = VOL_CAP_FMT_FAST_STATFS | VOL_CAP_FMT_HIDDEN_FILES; capabilities.valid[VOL_CAPABILITIES_FORMAT] = VOL_CAP_FMT_FAST_STATFS | VOL_CAP_FMT_HIDDEN_FILES; - if (nullfs_vfs_getlowerattr(vnode_mount(null_mp->nullm_lowerrootvp), &vfa, ctx) == 0) { + if (nullfs_vfs_getlowerattr(vnode_mount(null_mp->nullm_lowerrootvp), &vfa, ectx) == 0) { if (VFSATTR_IS_SUPPORTED(&vfa, f_capabilities)) { memcpy(&capabilities, &vfa.f_capabilities, sizeof(capabilities)); /* don't support vget */ @@ -528,6 +550,8 @@ nullfs_vfs_getattr(struct mount * mp, struct vfs_attr * vfap, vfs_context_t ctx) } } + nullfs_cleanup_patched_context(null_mp, ectx); + return 0; } diff --git a/bsd/miscfs/nullfs/null_vnops.c b/bsd/miscfs/nullfs/null_vnops.c index 6afadbfab..a351309c3 100644 --- a/bsd/miscfs/nullfs/null_vnops.c +++ b/bsd/miscfs/nullfs/null_vnops.c @@ -77,6 +77,7 @@ #include #include #include +#include #include "nullfs.h" @@ -118,6 +119,25 @@ nullfs_checkspecialvp(struct vnode* vp) return result; } +vfs_context_t +nullfs_get_patched_context(struct null_mount * null_mp, vfs_context_t ctx) +{ + struct vfs_context* ectx = ctx; + if ((null_mp->nullm_flags & NULLM_UNVEIL) == NULLM_UNVEIL) { + ectx = vfs_context_create(ctx); + ectx->vc_ucred = kauth_cred_setuidgid(ectx->vc_ucred, null_mp->uid, null_mp->gid); + } + return ectx; +} + +void +nullfs_cleanup_patched_context(struct null_mount * null_mp, vfs_context_t ctx) +{ + if ((null_mp->nullm_flags & NULLM_UNVEIL) == NULLM_UNVEIL) { + vfs_context_rele(ctx); + } +} + static int nullfs_default(__unused struct vnop_generic_args * args) { @@ -134,6 +154,7 @@ nullfs_special_getattr(struct vnop_getattr_args * args) ino_t ino = NULL_ROOT_INO; struct vnode_attr covered_rootattr; vnode_t checkvp = null_mp->nullm_lowerrootvp; + vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context); VATTR_INIT(&covered_rootattr); VATTR_WANTED(&covered_rootattr, va_uid); @@ -147,16 +168,18 @@ nullfs_special_getattr(struct vnop_getattr_args * args) if (vnode_getwithvid(checkvp, null_mp->nullm_lowerrootvid)) { checkvp = vfs_vnodecovered(mp); if (checkvp == NULL) { + nullfs_cleanup_patched_context(null_mp, ectx); return EIO; } } - int error = vnode_getattr(checkvp, &covered_rootattr, args->a_context); + int error = vnode_getattr(checkvp, &covered_rootattr, ectx); vnode_put(checkvp); if (error) { /* we should have been able to get attributes fore one of the two choices so * fail if we didn't */ + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -175,7 +198,12 @@ nullfs_special_getattr(struct vnop_getattr_args * args) VATTR_RETURN(args->a_vap, va_iosize, vfs_statfs(mp)->f_iosize); VATTR_RETURN(args->a_vap, va_fileid, ino); VATTR_RETURN(args->a_vap, va_linkid, ino); - VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(mp)->f_fsid.val[0]); // return the fsid of the mount point + if (VATTR_IS_ACTIVE(args->a_vap, va_fsid)) { + VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(mp)->f_fsid.val[0]); // return the fsid of the mount point + } + if (VATTR_IS_ACTIVE(args->a_vap, va_fsid64)) { + VATTR_RETURN(args->a_vap, va_fsid64, vfs_statfs(mp)->f_fsid); + } VATTR_RETURN(args->a_vap, va_filerev, 0); VATTR_RETURN(args->a_vap, va_gen, 0); VATTR_RETURN(args->a_vap, va_flags, UF_HIDDEN); /* mark our fake directories as hidden. People @@ -216,6 +244,7 @@ nullfs_special_getattr(struct vnop_getattr_args * args) args->a_vap->va_modify_time.tv_nsec = covered_rootattr.va_access_time.tv_nsec; } + nullfs_cleanup_patched_context(null_mp, ectx); return 0; } @@ -224,6 +253,7 @@ nullfs_getattr(struct vnop_getattr_args * args) { int error; struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp)); + kauth_cred_t cred = vfs_context_ucred(args->a_context); NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); lck_mtx_lock(&null_mp->nullm_lock); @@ -236,18 +266,57 @@ nullfs_getattr(struct vnop_getattr_args * args) /* this will return a different inode for third than read dir will */ struct vnode * lowervp = NULLVPTOLOWERVP(args->a_vp); - + vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context); error = vnode_getwithref(lowervp); + if (error == 0) { - error = VNOP_GETATTR(lowervp, args->a_vap, args->a_context); + error = VNOP_GETATTR(lowervp, args->a_vap, ectx); vnode_put(lowervp); if (error == 0) { /* fix up fsid so it doesn't say the underlying fs*/ - VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(vnode_mount(args->a_vp))->f_fsid.val[0]); + if (VATTR_IS_ACTIVE(args->a_vap, va_fsid)) { + VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(vnode_mount(args->a_vp))->f_fsid.val[0]); + } + if (VATTR_IS_ACTIVE(args->a_vap, va_fsid64)) { + VATTR_RETURN(args->a_vap, va_fsid64, vfs_statfs(vnode_mount(args->a_vp))->f_fsid); + } + + /* Conjure up permissions */ + if ((null_mp->nullm_flags & NULLM_UNVEIL) == NULLM_UNVEIL) { + if (VATTR_IS_ACTIVE(args->a_vap, va_mode)) { + mode_t mode = args->a_vap->va_mode; // We will take away permisions if we don't have them + + // Check for authorizations + // If we can read: + if (vnode_authorize(lowervp, NULL, KAUTH_VNODE_GENERIC_READ_BITS, ectx) == 0) { + mode |= S_IRUSR; + } else { + mode &= ~S_IRUSR; + } + + // Or execute + // Directories need an execute bit... + if (vnode_authorize(lowervp, NULL, KAUTH_VNODE_GENERIC_EXECUTE_BITS, ectx) == 0) { + mode |= S_IXUSR; + } else { + mode &= ~S_IXUSR; + } + + NULLFSDEBUG("Settings bits to %d\n", mode); + VATTR_RETURN(args->a_vap, va_mode, mode); + } + if (VATTR_IS_ACTIVE(args->a_vap, va_uid)) { + VATTR_RETURN(args->a_vap, va_uid, kauth_cred_getuid(cred)); + } + if (VATTR_IS_ACTIVE(args->a_vap, va_gid)) { + VATTR_RETURN(args->a_vap, va_gid, kauth_cred_getgid(cred)); + } + } } } + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -256,21 +325,24 @@ nullfs_open(struct vnop_open_args * args) { int error; struct vnode *vp, *lvp; - + mount_t mp = vnode_mount(args->a_vp); + struct null_mount * null_mp = MOUNTTONULLMOUNT(mp); NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); if (nullfs_checkspecialvp(args->a_vp)) { return 0; /* nothing extra needed */ } + vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context); vp = args->a_vp; lvp = NULLVPTOLOWERVP(vp); error = vnode_getwithref(lvp); if (error == 0) { - error = VNOP_OPEN(lvp, args->a_mode, args->a_context); + error = VNOP_OPEN(lvp, args->a_mode, ectx); vnode_put(lvp); } + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -279,6 +351,8 @@ nullfs_close(struct vnop_close_args * args) { int error; struct vnode *vp, *lvp; + mount_t mp = vnode_mount(args->a_vp); + struct null_mount * null_mp = MOUNTTONULLMOUNT(mp); NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); @@ -286,14 +360,17 @@ nullfs_close(struct vnop_close_args * args) return 0; /* nothing extra needed */ } + vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context); vp = args->a_vp; lvp = NULLVPTOLOWERVP(vp); error = vnode_getwithref(lvp); if (error == 0) { - error = VNOP_CLOSE(lvp, args->a_fflag, args->a_context); + error = VNOP_CLOSE(lvp, args->a_fflag, ectx); vnode_put(lvp); } + + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -350,6 +427,7 @@ null_special_lookup(struct vnop_lookup_args * ap) struct mount * mp = vnode_mount(dvp); struct null_mount * null_mp = MOUNTTONULLMOUNT(mp); int error = ENOENT; + vfs_context_t ectx = nullfs_get_patched_context(null_mp, ap->a_context); if (dvp == null_mp->nullm_rootvp) { /* handle . and .. */ @@ -411,10 +489,10 @@ null_special_lookup(struct vnop_lookup_args * ap) * so we got a match * 4. Anything else results in ENOENT. */ - error = null_get_lowerparent(null_mp->nullm_lowerrootvp, &ldvp, ap->a_context); + error = null_get_lowerparent(null_mp->nullm_lowerrootvp, &ldvp, ectx); if (error == 0) { - error = VNOP_LOOKUP(ldvp, &lvp, cnp, ap->a_context); + error = VNOP_LOOKUP(ldvp, &lvp, cnp, ectx); vnode_put(ldvp); if (error == 0) { @@ -437,6 +515,7 @@ null_special_lookup(struct vnop_lookup_args * ap) } end: + nullfs_cleanup_patched_context(null_mp, ectx); if (error == 0) { *ap->a_vpp = vp; } @@ -457,6 +536,7 @@ null_lookup(struct vnop_lookup_args * ap) struct mount * mp; struct null_mount * null_mp; int error; + vfs_context_t ectx; NULLFSDEBUG("%s parent: %p component: %.*s\n", __FUNCTION__, ap->a_dvp, cnp->cn_namelen, cnp->cn_nameptr); @@ -467,6 +547,7 @@ null_lookup(struct vnop_lookup_args * ap) } null_mp = MOUNTTONULLMOUNT(mp); + lck_mtx_lock(&null_mp->nullm_lock); if (nullfs_isspecialvp(dvp)) { error = null_special_lookup(ap); @@ -496,6 +577,7 @@ null_lookup(struct vnop_lookup_args * ap) } notdot: + ectx = nullfs_get_patched_context(null_mp, ap->a_context); ldvp = NULLVPTOLOWERVP(dvp); vp = lvp = NULL; @@ -505,10 +587,11 @@ notdot: */ error = vnode_getwithref(ldvp); if (error) { + nullfs_cleanup_patched_context(null_mp, ectx); return error; } - error = VNOP_LOOKUP(ldvp, &lvp, cnp, ap->a_context); + error = VNOP_LOOKUP(ldvp, &lvp, cnp, ectx); vnode_put(ldvp); @@ -529,6 +612,7 @@ notdot: vnode_put(lvp); } + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -725,14 +809,16 @@ nullfs_readdir(struct vnop_readdir_args * ap) } lck_mtx_unlock(&null_mp->nullm_lock); + vfs_context_t ectx = nullfs_get_patched_context(null_mp, ap->a_context); vp = ap->a_vp; lvp = NULLVPTOLOWERVP(vp); error = vnode_getwithref(lvp); if (error == 0) { - error = VNOP_READDIR(lvp, ap->a_uio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ap->a_context); + error = VNOP_READDIR(lvp, ap->a_uio, ap->a_flags, ap->a_eofflag, ap->a_numdirent, ectx); vnode_put(lvp); } + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -742,17 +828,19 @@ nullfs_readlink(struct vnop_readlink_args * ap) NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); int error; struct vnode *vp, *lvp; + struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp)); if (nullfs_checkspecialvp(ap->a_vp)) { return ENOTSUP; /* the special vnodes aren't links */ } + vfs_context_t ectx = nullfs_get_patched_context(null_mp, ap->a_context); vp = ap->a_vp; lvp = NULLVPTOLOWERVP(vp); error = vnode_getwithref(lvp); if (error == 0) { - error = VNOP_READLINK(lvp, ap->a_uio, ap->a_context); + error = VNOP_READLINK(lvp, ap->a_uio, ectx); vnode_put(lvp); if (error) { @@ -760,6 +848,7 @@ nullfs_readlink(struct vnop_readlink_args * ap) } } + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -782,6 +871,7 @@ nullfs_mmap(struct vnop_mmap_args * args) { int error; struct vnode *vp, *lvp; + struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp)); NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); @@ -789,14 +879,16 @@ nullfs_mmap(struct vnop_mmap_args * args) return 0; /* nothing extra needed */ } + vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context); vp = args->a_vp; lvp = NULLVPTOLOWERVP(vp); error = vnode_getwithref(lvp); if (error == 0) { - error = VNOP_MMAP(lvp, args->a_fflags, args->a_context); + error = VNOP_MMAP(lvp, args->a_fflags, ectx); vnode_put(lvp); } + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -805,6 +897,7 @@ nullfs_mnomap(struct vnop_mnomap_args * args) { int error; struct vnode *vp, *lvp; + struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp)); NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); @@ -812,14 +905,16 @@ nullfs_mnomap(struct vnop_mnomap_args * args) return 0; /* nothing extra needed */ } + vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context); vp = args->a_vp; lvp = NULLVPTOLOWERVP(vp); error = vnode_getwithref(lvp); if (error == 0) { - error = VNOP_MNOMAP(lvp, args->a_context); + error = VNOP_MNOMAP(lvp, ectx); vnode_put(lvp); } + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -828,6 +923,7 @@ nullfs_getxattr(struct vnop_getxattr_args * args) { int error; struct vnode *vp, *lvp; + struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp)); NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); @@ -835,14 +931,16 @@ nullfs_getxattr(struct vnop_getxattr_args * args) return ENOATTR; /* no xattrs on the special vnodes */ } + vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context); vp = args->a_vp; lvp = NULLVPTOLOWERVP(vp); error = vnode_getwithref(lvp); if (error == 0) { - error = VNOP_GETXATTR(lvp, args->a_name, args->a_uio, args->a_size, args->a_options, args->a_context); + error = VNOP_GETXATTR(lvp, args->a_name, args->a_uio, args->a_size, args->a_options, ectx); vnode_put(lvp); } + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -851,6 +949,7 @@ nullfs_listxattr(struct vnop_listxattr_args * args) { int error; struct vnode *vp, *lvp; + struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(args->a_vp)); NULLFSDEBUG("%s %p\n", __FUNCTION__, args->a_vp); @@ -858,14 +957,16 @@ nullfs_listxattr(struct vnop_listxattr_args * args) return 0; /* no xattrs on the special vnodes */ } + vfs_context_t ectx = nullfs_get_patched_context(null_mp, args->a_context); vp = args->a_vp; lvp = NULLVPTOLOWERVP(vp); error = vnode_getwithref(lvp); if (error == 0) { - error = VNOP_LISTXATTR(lvp, args->a_uio, args->a_size, args->a_options, args->a_context); + error = VNOP_LISTXATTR(lvp, args->a_uio, args->a_size, args->a_options, ectx); vnode_put(lvp); } + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -875,7 +976,7 @@ nullfs_pagein(struct vnop_pagein_args * ap) { int error = EIO; struct vnode *vp, *lvp; - + struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp)); NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); vp = ap->a_vp; @@ -885,6 +986,7 @@ nullfs_pagein(struct vnop_pagein_args * ap) return ENOTSUP; } + vfs_context_t ectx = nullfs_get_patched_context(null_mp, ap->a_context); /* * Ask VM/UBC/VFS to do our bidding */ @@ -920,7 +1022,7 @@ nullfs_pagein(struct vnop_pagein_args * ap) (void)ubc_setsize(vp, lowersize); /* ignore failures, nothing can be done */ } - error = VNOP_READ(lvp, auio, ((ap->a_flags & UPL_IOSYNC) ? IO_SYNC : 0), ap->a_context); + error = VNOP_READ(lvp, auio, ((ap->a_flags & UPL_IOSYNC) ? IO_SYNC : 0), ectx); bytes_remaining = uio_resid(auio); if (bytes_remaining > 0 && bytes_remaining <= (user_ssize_t)ap->a_size) { @@ -966,6 +1068,8 @@ exit_no_unmap: } else if ((ap->a_flags & UPL_NOCOMMIT) == 0) { ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); } + + nullfs_cleanup_patched_context(null_mp, ectx); return error; } @@ -975,13 +1079,14 @@ nullfs_read(struct vnop_read_args * ap) int error = EIO; struct vnode *vp, *lvp; - + struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(ap->a_vp)); NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); if (nullfs_checkspecialvp(ap->a_vp)) { return ENOTSUP; /* the special vnodes can't be read */ } + vfs_context_t ectx = nullfs_get_patched_context(null_mp, ap->a_context); vp = ap->a_vp; lvp = NULLVPTOLOWERVP(vp); @@ -1003,13 +1108,15 @@ nullfs_read(struct vnop_read_args * ap) * Now ask VM/UBC/VFS to do our bidding */ - error = VNOP_READ(lvp, ap->a_uio, ap->a_ioflag, ap->a_context); + error = VNOP_READ(lvp, ap->a_uio, ap->a_ioflag, ectx); if (error) { NULLFSDEBUG("VNOP_READ failed: %d\n", error); } end: vnode_put(lvp); } + + nullfs_cleanup_patched_context(null_mp, ectx); return error; } diff --git a/bsd/miscfs/nullfs/nullfs.h b/bsd/miscfs/nullfs/nullfs.h index 38b55fd53..4dd8d50f6 100644 --- a/bsd/miscfs/nullfs/nullfs.h +++ b/bsd/miscfs/nullfs/nullfs.h @@ -78,10 +78,11 @@ #include #endif -//#define NULLFS_DEBUG 0 +// #define NULLFS_DEBUG 0 #define NULLM_CACHE 0x0001 #define NULLM_CASEINSENSITIVE 0x0000000000000002 +#define NULLM_UNVEIL 0x1ULL << 2 typedef int (*vop_t)(void *); @@ -97,6 +98,12 @@ struct null_mount { * before we build the shadow vnode lazily*/ lck_mtx_t nullm_lock; /* lock to protect vps above */ uint64_t nullm_flags; + uid_t uid; + gid_t gid; +}; + +struct null_mount_conf { + uint64_t flags; }; #ifdef KERNEL @@ -144,6 +151,9 @@ void null_hashrem(struct null_node * xp); int nullfs_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp); +vfs_context_t nullfs_get_patched_context(struct null_mount * null_mp, vfs_context_t ctx); +void nullfs_cleanup_patched_context(struct null_mount * null_mp, vfs_context_t ctx); + #define NULLVPTOLOWERVP(vp) (VTONULL(vp)->null_lowervp) #define NULLVPTOLOWERVID(vp) (VTONULL(vp)->null_lowervid) #define NULLVPTOMYVID(vp) (VTONULL(vp)->null_myvid) diff --git a/bsd/miscfs/specfs/spec_vnops.c b/bsd/miscfs/specfs/spec_vnops.c index 300894634..eaa194215 100644 --- a/bsd/miscfs/specfs/spec_vnops.c +++ b/bsd/miscfs/specfs/spec_vnops.c @@ -95,6 +95,7 @@ #include #include +#include #include #include @@ -105,6 +106,10 @@ extern boolean_t iskmemdev(dev_t dev); extern int bpfkqfilter(dev_t dev, struct knote *kn); extern int ptsd_kqfilter(dev_t, struct knote *); extern int ptmx_kqfilter(dev_t, struct knote *); +#if CONFIG_PHYS_WRITE_ACCT +uint64_t kernel_pm_writes; // to track the sync writes occuring during power management transitions +#endif /* CONFIG_PHYS_WRITE_ACCT */ + struct vnode *speclisth[SPECHSZ]; @@ -295,6 +300,8 @@ set_fsblocksize(struct vnode *vp) int spec_open(struct vnop_open_args *ap) { + static const char *OPEN_MOUNTED_ENTITLEMENT = "com.apple.private.vfs.open-mounted"; + struct proc *p = vfs_context_proc(ap->a_context); kauth_cred_t cred = vfs_context_ucred(ap->a_context); struct vnode *vp = ap->a_vp; @@ -366,7 +373,7 @@ spec_open(struct vnop_open_args *ap) vnode_lock(vp); - vp->v_un.vu_specinfo->si_isssd = isssd; + vp->v_un.vu_specinfo->si_isssd = isssd ? 1 : 0; vp->v_un.vu_specinfo->si_devbsdunit = devbsdunit; vp->v_un.vu_specinfo->si_throttle_mask = throttle_mask; vp->v_un.vu_specinfo->si_throttleable = 1; @@ -399,8 +406,10 @@ spec_open(struct vnop_open_args *ap) * Do not allow opens of block devices that are * currently mounted. */ - if ((error = vfs_mountedon(vp))) { - return error; + if (!IOTaskHasEntitlement(current_task(), OPEN_MOUNTED_ENTITLEMENT)) { + if ((error = vfs_mountedon(vp))) { + return error; + } } devsw_lock(dev, S_IFBLK); @@ -464,9 +473,9 @@ spec_read(struct vnop_read_args *ap) struct uio *uio = ap->a_uio; struct buf *bp; daddr64_t bn, nextbn; - long bsize, bscale; + long bscale; int devBlockSize = 0; - int n, on; + size_t bsize, n, on; int error = 0; dev_t dev; @@ -578,9 +587,9 @@ spec_read(struct vnop_read_args *ap) buf_brelse(bp); return error; } - n = min((unsigned)(n - on), uio_resid(uio)); + n = MIN((n - on), (size_t)uio_resid(uio)); - error = uiomove((char *)buf_dataptr(bp) + on, n, uio); + error = uiomove((char *)buf_dataptr(bp) + on, (int)n, uio); if (n + on == bsize) { buf_markaged(bp); } @@ -606,10 +615,10 @@ spec_write(struct vnop_write_args *ap) struct uio *uio = ap->a_uio; struct buf *bp; daddr64_t bn; - int bsize, blkmask, bscale; + int blkmask, bscale; int io_sync; int devBlockSize = 0; - int n, on; + size_t bsize, n, on; int error = 0; dev_t dev; @@ -705,7 +714,7 @@ spec_write(struct vnop_write_args *ap) bn = (daddr64_t)((uio->uio_offset / devBlockSize) & ~blkmask); on = uio->uio_offset % bsize; - n = min((unsigned)(bsize - on), uio_resid(uio)); + n = MIN((bsize - on), (size_t)uio_resid(uio)); /* * Use buf_getblk() as an optimization IFF: @@ -725,9 +734,9 @@ spec_write(struct vnop_write_args *ap) } if (n == bsize) { - bp = buf_getblk(vp, bn, bsize, 0, 0, BLK_WRITE); + bp = buf_getblk(vp, bn, (int)bsize, 0, 0, BLK_WRITE); } else { - error = (int)buf_bread(vp, bn, bsize, NOCRED, &bp); + error = (int)buf_bread(vp, bn, (int)bsize, NOCRED, &bp); } /* Translate downstream error for upstream, if needed */ @@ -738,9 +747,9 @@ spec_write(struct vnop_write_args *ap) buf_brelse(bp); return error; } - n = min(n, bsize - buf_resid(bp)); + n = MIN(n, bsize - buf_resid(bp)); - error = uiomove((char *)buf_dataptr(bp) + on, n, uio); + error = uiomove((char *)buf_dataptr(bp) + on, (int)n, uio); if (error) { buf_brelse(bp); return error; @@ -1151,7 +1160,7 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count } elapsed = min_target; timevalsub(&elapsed, &now); - target_msecs = elapsed.tv_sec * 1000 + elapsed.tv_usec / 1000; + target_msecs = (int)(elapsed.tv_sec * 1000 + elapsed.tv_usec / 1000); if (target_msecs <= 0) { /* @@ -1285,7 +1294,7 @@ throttle_add_to_list(struct _throttle_io_info_t *info, uthread_t ut, int mylevel TAILQ_INSERT_HEAD(&info->throttle_uthlist[mylevel], ut, uu_throttlelist); } - ut->uu_on_throttlelist = mylevel; + ut->uu_on_throttlelist = (int8_t)mylevel; if (start_timer == TRUE) { /* we may need to start or rearm the timer */ @@ -1605,7 +1614,13 @@ throttle_info_ref_by_mask(uint64_t throttle_mask, throttle_info_handle_t *thrott int dev_index; struct _throttle_io_info_t *info; - if (throttle_info_handle == NULL) { + /* + * The 'throttle_mask' is not expected to be 0 otherwise num_trailing_0() + * would return value of 64 and this will cause '_throttle_io_info' to + * go out of bounds as '_throttle_io_info' is only LOWPRI_MAX_NUM_DEV (64) + * elements long. + */ + if (throttle_info_handle == NULL || throttle_mask == 0) { return EINVAL; } @@ -2418,6 +2433,10 @@ throttle_lowpri_window(void) int upl_get_cached_tier(void *); #endif +#if CONFIG_PHYS_WRITE_ACCT +extern thread_t pm_sync_thread; +#endif /* CONFIG_PHYS_WRITE_ACCT */ + int spec_strategy(struct vnop_strategy_args *ap) { @@ -2436,15 +2455,21 @@ spec_strategy(struct vnop_strategy_args *ap) boolean_t upgrade = FALSE; int code = 0; -#if !CONFIG_EMBEDDED +#if CONFIG_DELAY_IDLE_SLEEP proc_t curproc = current_proc(); -#endif /* !CONFIG_EMBEDDED */ +#endif /* CONFIG_DELAY_IDLE_SLEEP */ bp = ap->a_bp; bdev = buf_device(bp); mp = buf_vnode(bp)->v_mount; bap = &bp->b_attr; +#if CONFIG_PHYS_WRITE_ACCT + if (current_thread() == pm_sync_thread) { + OSAddAtomic64(buf_count(bp), (SInt64 *)&(kernel_pm_writes)); + } +#endif /* CONFIG_PHYS_WRITE_ACCT */ + #if CONFIG_IOSCHED if (bp->b_flags & B_CLUSTER) { io_tier = upl_get_cached_tier(bp->b_upl); @@ -2517,11 +2542,11 @@ spec_strategy(struct vnop_strategy_args *ap) bap->ba_flags |= BA_PASSIVE; } -#if !CONFIG_EMBEDDED +#if CONFIG_DELAY_IDLE_SLEEP if ((curproc != NULL) && ((curproc->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP)) { bap->ba_flags |= BA_DELAYIDLESLEEP; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* CONFIG_DELAY_IDLE_SLEEP */ bflags = bp->b_flags; @@ -2697,15 +2722,6 @@ spec_close(struct vnop_close_args *ap) session_unlock(sessp); if (tp != TTY_NULL) { - /* - * We may have won a race with a proc_exit - * of the session leader, the winner - * clears the flag (even if not set) - */ - tty_lock(tp); - ttyclrpgrphup(tp); - tty_unlock(tp); - ttyfree(tp); } devsw_lock(dev, S_IFCHR); @@ -2904,7 +2920,7 @@ spec_knote_select_and_link(struct knote *kn) uth = get_bsdthread_info(current_thread()); ctx = vfs_context_current(); - vp = (vnode_t)kn->kn_fp->f_fglob->fg_data; + vp = (vnode_t)kn->kn_fp->fp_glob->fg_data; int error = vnode_getwithvid(vp, vnode_vid(vp)); if (error != 0) { @@ -3016,10 +3032,10 @@ filt_spec_common(struct knote *kn, struct kevent_qos_s *kev, int selres) int ret; if (kn->kn_vnode_use_ofst) { - if (kn->kn_fp->f_fglob->fg_offset >= (uint32_t)selres) { + if (kn->kn_fp->fp_glob->fg_offset >= (uint32_t)selres) { data = 0; } else { - data = ((uint32_t)selres) - kn->kn_fp->f_fglob->fg_offset; + data = ((uint32_t)selres) - kn->kn_fp->fp_glob->fg_offset; } } else { data = selres; @@ -3040,7 +3056,7 @@ filt_specattach(struct knote *kn, __unused struct kevent_qos_s *kev) vnode_t vp; dev_t dev; - vp = (vnode_t)kn->kn_fp->f_fglob->fg_data; /* Already have iocount, and vnode is alive */ + vp = (vnode_t)kn->kn_fp->fp_glob->fg_data; /* Already have iocount, and vnode is alive */ assert(vnode_ischr(vp)); @@ -3135,7 +3151,7 @@ filt_specprocess(struct knote *kn, struct kevent_qos_s *kev) uth = get_bsdthread_info(current_thread()); ctx = vfs_context_current(); - vp = (vnode_t)kn->kn_fp->f_fglob->fg_data; + vp = (vnode_t)kn->kn_fp->fp_glob->fg_data; error = vnode_getwithvid(vp, vnode_vid(vp)); if (error != 0) { diff --git a/bsd/net/Makefile b/bsd/net/Makefile index fb8e28cd1..50839156f 100644 --- a/bsd/net/Makefile +++ b/bsd/net/Makefile @@ -77,6 +77,10 @@ PRIVATE_DATAFILES = \ frame802154.h \ nat464_utils.h +DRIVERKIT_DATAFILES = \ + if_media.h \ + ethernet.h + PRIVATE_KERNELFILES = $(filter-out radix.h,${KERNELFILES}) \ bpfdesc.h ppp_comp.h \ zlib.h bpf_compat.h net_osdep.h \ @@ -84,6 +88,8 @@ PRIVATE_KERNELFILES = $(filter-out radix.h,${KERNELFILES}) \ INSTALL_MI_LIST = ${DATAFILES} +INSTALL_DRIVERKIT_MI_LIST = ${DRIVERKIT_DATAFILES} + INSTALL_MI_DIR = net EXPORT_MI_LIST = ${INSTALL_MI_LIST} ${KERNELFILES} diff --git a/bsd/net/altq/altq_qfq.h b/bsd/net/altq/altq_qfq.h index 9bc5de99a..c553a3e81 100644 --- a/bsd/net/altq/altq_qfq.h +++ b/bsd/net/altq/altq_qfq.h @@ -31,6 +31,5 @@ #include #include -#include #endif /* _NET_ALTQ_ALTQ_QFQ_H_ */ diff --git a/bsd/net/bpf.c b/bsd/net/bpf.c index b855f3a48..37d4c7d6a 100644 --- a/bsd/net/bpf.c +++ b/bsd/net/bpf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -132,10 +132,6 @@ #include #include -#if CONFIG_MACF_NET -#include -#endif /* MAC_NET */ - #include extern int tvtohz(struct timeval *); @@ -173,11 +169,11 @@ SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RW | CTLFLAG_LOCKED, * For OS X is off by default so process need to use the ioctl BPF_WANT_PKTAP * explicitly to be able to use DLT_PKTAP. */ -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX static unsigned int bpf_wantpktap = 1; -#else +#else /* XNU_TARGET_OS_OSX */ static unsigned int bpf_wantpktap = 0; -#endif +#endif /* XNU_TARGET_OS_OSX */ SYSCTL_UINT(_debug, OID_AUTO, bpf_wantpktap, CTLFLAG_RW | CTLFLAG_LOCKED, &bpf_wantpktap, 0, ""); @@ -253,7 +249,7 @@ select_fcn_t bpfselect; /* Darwin's cdevsw struct differs slightly from BSDs */ #define CDEV_MAJOR 23 -static struct cdevsw bpf_cdevsw = { +static const struct cdevsw bpf_cdevsw = { .d_open = bpfopen, .d_close = bpfclose, .d_read = bpfread, @@ -851,10 +847,6 @@ bpfopen(dev_t dev, int flags, __unused int fmt, d->bd_opened_by = p; uuid_generate(d->bd_uuid); -#if CONFIG_MACF_NET - mac_bpfdesc_label_init(d); - mac_bpfdesc_label_associate(kauth_cred_get(), d); -#endif bpf_dtab[minor(dev)] = d; /* Mark opened */ lck_mtx_unlock(bpf_mlock); @@ -952,9 +944,6 @@ bpfclose(dev_t dev, __unused int flags, __unused int fmt, bpf_detachd(d, 1); } selthreadclear(&d->bd_sel); -#if CONFIG_MACF_NET - mac_bpfdesc_label_destroy(d); -#endif thread_call_free(d->bd_thread_call); while (d->bd_hbuf_read != 0) { @@ -1428,10 +1417,6 @@ bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) return EMSGSIZE; } -#if CONFIG_MACF_NET - mac_mbuf_label_associate_bpfdesc(d, m); -#endif - bpf_set_packet_service_class(m, d->bd_traffic_class); lck_mtx_unlock(bpf_mlock); @@ -2799,11 +2784,6 @@ bpf_tap_imp( } } if (slen != 0) { -#if CONFIG_MACF_NET - if (mac_bpfdesc_check_receive(d, bp->bif_ifp) != 0) { - continue; - } -#endif catchpacket(d, bpf_pkt, slen, outbound); } bpf_pkt = bpf_pkt_saved; @@ -3718,20 +3698,6 @@ bpf_init(__unused void *unused) SYSINIT(bpfdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, bpf_drvinit, NULL); #endif -#if CONFIG_MACF_NET -struct label * -mac_bpfdesc_label_get(struct bpf_d *d) -{ - return d->bd_label; -} - -void -mac_bpfdesc_label_set(struct bpf_d *d, struct label *label) -{ - d->bd_label = label; -} -#endif - static int sysctl_bpf_maxbufsize SYSCTL_HANDLER_ARGS { diff --git a/bsd/net/bpf_filter.c b/bsd/net/bpf_filter.c index 6d5bcc587..984279474 100644 --- a/bsd/net/bpf_filter.c +++ b/bsd/net/bpf_filter.c @@ -83,7 +83,7 @@ extern unsigned int bpf_maxbufsize; static inline u_int32_t -get_word_from_buffers(u_char * cp, u_char * np, int num_from_cp) +get_word_from_buffers(u_char * cp, u_char * np, size_t num_from_cp) { u_int32_t val; @@ -174,7 +174,7 @@ bad: return 0; } -static u_int16_t +static uint16_t m_xhalf(struct mbuf *m, void * hdr, size_t hdrlen, bpf_u_int32 k, int *err) { size_t len; @@ -192,7 +192,7 @@ m_xhalf(struct mbuf *m, void * hdr, size_t hdrlen, bpf_u_int32 k, int *err) goto bad; } *err = 0; - return (cp[0] << 8) | mtod(m, u_char *)[0]; + return (uint16_t)((cp[0] << 8) | mtod(m, u_char *)[0]); bad: *err = 1; return 0; diff --git a/bsd/net/bpfdesc.h b/bsd/net/bpfdesc.h index ce9899f0a..531d9217b 100644 --- a/bsd/net/bpfdesc.h +++ b/bsd/net/bpfdesc.h @@ -135,9 +135,6 @@ struct bpf_d { int bd_seesent; /* true if bpf should see sent packets */ int bd_oflags; /* device open flags */ thread_call_t bd_thread_call; /* for BPF timeouts with select */ -#if CONFIG_MACF_NET - struct label * bd_label; /* MAC label for descriptor */ -#endif int bd_traffic_class; /* traffic service class */ int bd_flags; /* flags */ diff --git a/bsd/net/bridgestp.c b/bsd/net/bridgestp.c index fc64a1624..f16868afe 100644 --- a/bsd/net/bridgestp.c +++ b/bsd/net/bridgestp.c @@ -71,7 +71,6 @@ #include #include #include -//#include //#include #include #include diff --git a/bsd/net/classq/classq.c b/bsd/net/classq/classq.c index dad2552c3..dddaadcf6 100644 --- a/bsd/net/classq/classq.c +++ b/bsd/net/classq/classq.c @@ -134,7 +134,7 @@ _addq(class_queue_t *q, classq_pkt_t *pkt) /* add one or more packets at the tail of the queue */ void _addq_multi(class_queue_t *q, classq_pkt_t *pkt_head, classq_pkt_t *pkt_tail, - u_int32_t cnt, u_int32_t size) + u_int32_t cnt, u_int64_t size) { ASSERT(pkt_head->cp_ptype == qptype(q)); ASSERT(pkt_tail->cp_ptype == qptype(q)); diff --git a/bsd/net/classq/classq.h b/bsd/net/classq/classq.h index 33d64b75d..1ec52efea 100644 --- a/bsd/net/classq/classq.h +++ b/bsd/net/classq/classq.h @@ -189,7 +189,7 @@ SYSCTL_DECL(_net_classq); extern void _qinit(class_queue_t *, int, int, classq_pkt_type_t); extern void _addq(class_queue_t *, classq_pkt_t *); extern void _addq_multi(class_queue_t *, classq_pkt_t *, classq_pkt_t *, - u_int32_t, u_int32_t); + u_int32_t, u_int64_t); extern void _getq(class_queue_t *, classq_pkt_t *); extern void _getq_all(class_queue_t *, classq_pkt_t *, classq_pkt_t *, u_int32_t *, u_int64_t *); diff --git a/bsd/net/classq/classq_fq_codel.c b/bsd/net/classq/classq_fq_codel.c index 912302beb..f587e7689 100644 --- a/bsd/net/classq/classq_fq_codel.c +++ b/bsd/net/classq/classq_fq_codel.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 Apple Inc. All rights reserved. + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -47,6 +48,8 @@ #include #include +#include + static uint32_t flowq_size; /* size of flowq */ static struct mcache *flowq_cache = NULL; /* mcache for flowq */ @@ -85,7 +88,7 @@ fq_alloc(classq_pkt_type_t ptype) fq_t *fq = NULL; fq = mcache_alloc(flowq_cache, MCR_SLEEP); if (fq == NULL) { - log(LOG_ERR, "%s: unable to allocate from flowq_cache\n"); + log(LOG_ERR, "%s: unable to allocate from flowq_cache\n", __func__); return NULL; } @@ -106,7 +109,7 @@ fq_destroy(fq_t *fq) mcache_free(flowq_cache, fq); } -static void +static inline void fq_detect_dequeue_stall(fq_if_t *fqs, fq_t *flowq, fq_if_classq_t *fq_cl, u_int64_t *now) { @@ -160,6 +163,62 @@ fq_head_drop(fq_if_t *fqs, fq_t *fq) pktsched_free_pkt(&pkt); } + +static int +fq_compressor(fq_if_t *fqs, fq_t *fq, fq_if_classq_t *fq_cl, + pktsched_pkt_t *pkt) +{ + classq_pkt_type_t ptype = fq->fq_ptype; + uint32_t comp_gencnt = 0; + uint64_t *pkt_timestamp; + uint64_t old_timestamp = 0; + uint32_t old_pktlen = 0; + struct ifclassq *ifq = fqs->fqs_ifq; + + if (__improbable(!tcp_do_ack_compression)) { + return 0; + } + + pktsched_get_pkt_vars(pkt, NULL, &pkt_timestamp, NULL, NULL, NULL, + &comp_gencnt); + + if (comp_gencnt == 0) { + return 0; + } + + fq_cl->fcl_stat.fcl_pkts_compressible++; + + if (fq_empty(fq)) { + return 0; + } + + if (ptype == QP_MBUF) { + struct mbuf *m = MBUFQ_LAST(&fq->fq_mbufq); + + if (comp_gencnt != m->m_pkthdr.comp_gencnt) { + return 0; + } + + /* If we got until here, we should merge/replace the segment */ + MBUFQ_REMOVE(&fq->fq_mbufq, m); + old_pktlen = m_pktlen(m); + old_timestamp = m->m_pkthdr.pkt_timestamp; + + IFCQ_CONVERT_LOCK(fqs->fqs_ifq); + m_freem(m); + } + + fq->fq_bytes -= old_pktlen; + fq_cl->fcl_stat.fcl_byte_cnt -= old_pktlen; + fq_cl->fcl_stat.fcl_pkt_cnt--; + IFCQ_DEC_LEN(ifq); + IFCQ_DEC_BYTES(ifq, old_pktlen); + + *pkt_timestamp = old_timestamp; + + return CLASSQEQ_COMPRESSED; +} + int fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl) { @@ -168,12 +227,18 @@ fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl) fq_t *fq = NULL; uint64_t *pkt_timestamp; volatile uint32_t *pkt_flags; - uint32_t pkt_flowid, pkt_tx_start_seq; + uint32_t pkt_flowid, cnt; uint8_t pkt_proto, pkt_flowsrc; + cnt = pkt->pktsched_pcnt; pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid, - &pkt_flowsrc, &pkt_proto, &pkt_tx_start_seq); + &pkt_flowsrc, &pkt_proto, NULL); + /* + * XXX Not walking the chain to set this flag on every packet. + * This flag is only used for debugging. Nothing is affected if it's + * not set. + */ switch (pkt->pktsched_ptype) { case QP_MBUF: /* See comments in */ @@ -186,58 +251,69 @@ fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl) __builtin_unreachable(); } - if (*pkt_timestamp > 0) { - now = *pkt_timestamp; - } else { - struct timespec now_ts; - nanouptime(&now_ts); - now = (now_ts.tv_sec * NSEC_PER_SEC) + now_ts.tv_nsec; - *pkt_timestamp = now; - } + /* + * Timestamps for every packet must be set prior to entering this path. + */ + now = *pkt_timestamp; + ASSERT(now > 0); /* find the flowq for this packet */ fq = fq_if_hash_pkt(fqs, pkt_flowid, pktsched_get_pkt_svc(pkt), now, TRUE, pkt->pktsched_ptype); - if (fq == NULL) { + if (__improbable(fq == NULL)) { + DTRACE_IP1(memfail__drop, fq_if_t *, fqs); /* drop the packet if we could not allocate a flow queue */ - fq_cl->fcl_stat.fcl_drop_memfailure++; - IFCQ_CONVERT_LOCK(fqs->fqs_ifq); + fq_cl->fcl_stat.fcl_drop_memfailure += cnt; return CLASSQEQ_DROP; } VERIFY(fq->fq_ptype == pkt->pktsched_ptype); fq_detect_dequeue_stall(fqs, fq, fq_cl, &now); - if (FQ_IS_DELAYHIGH(fq)) { + if (__improbable(FQ_IS_DELAYHIGH(fq))) { if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) && (*pkt_flags & PKTF_FLOW_ADV)) { fc_adv = 1; /* * If the flow is suspended or it is not - * TCP/QUIC, drop the packet + * TCP/QUIC, drop the chain. */ if ((pkt_proto != IPPROTO_TCP) && (pkt_proto != IPPROTO_QUIC)) { droptype = DTYPE_EARLY; - fq_cl->fcl_stat.fcl_drop_early++; + fq_cl->fcl_stat.fcl_drop_early += cnt; } + DTRACE_IP6(flow__adv, fq_if_t *, fqs, + fq_if_classq_t *, fq_cl, fq_t *, fq, + int, droptype, pktsched_pkt_t *, pkt, + uint32_t, cnt); } else { /* - * Need to drop a packet, instead of dropping this - * one, try to drop from the head of the queue + * Need to drop packets to make room for the new + * ones. Try to drop from the head of the queue + * instead of the latest packets. */ if (!fq_empty(fq)) { - fq_head_drop(fqs, fq); + uint32_t i; + + for (i = 0; i < cnt; i++) { + fq_head_drop(fqs, fq); + } droptype = DTYPE_NODROP; } else { droptype = DTYPE_EARLY; } - fq_cl->fcl_stat.fcl_drop_early++; + fq_cl->fcl_stat.fcl_drop_early += cnt; + + DTRACE_IP6(no__flow__adv, fq_if_t *, fqs, + fq_if_classq_t *, fq_cl, fq_t *, fq, + int, droptype, pktsched_pkt_t *, pkt, + uint32_t, cnt); } } /* Set the return code correctly */ - if (fc_adv == 1 && droptype != DTYPE_FORCED) { + if (__improbable(fc_adv == 1 && droptype != DTYPE_FORCED)) { if (fq_if_add_fcentry(fqs, pkt, pkt_flowid, pkt_flowsrc, fq_cl)) { fq->fq_flags |= FQF_FLOWCTL_ON; @@ -257,28 +333,42 @@ fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl) ret = CLASSQEQ_DROP_FC; fq_cl->fcl_stat.fcl_flow_control_fail++; } + DTRACE_IP3(fc__ret, fq_if_t *, fqs, int, droptype, int, ret); } /* - * If the queue length hits the queue limit, drop a packet from the - * front of the queue for a flow with maximum number of bytes. This - * will penalize heavy and unresponsive flows. It will also avoid a - * tail drop. + * If the queue length hits the queue limit, drop a chain with the + * same number of packets from the front of the queue for a flow with + * maximum number of bytes. This will penalize heavy and unresponsive + * flows. It will also avoid a tail drop. */ - if (droptype == DTYPE_NODROP && fq_if_at_drop_limit(fqs)) { + if (__improbable(droptype == DTYPE_NODROP && + fq_if_at_drop_limit(fqs))) { + uint32_t i; + if (fqs->fqs_large_flow == fq) { /* * Drop from the head of the current fq. Since a * new packet will be added to the tail, it is ok * to leave fq in place. */ - fq_head_drop(fqs, fq); + DTRACE_IP5(large__flow, fq_if_t *, fqs, + fq_if_classq_t *, fq_cl, fq_t *, fq, + pktsched_pkt_t *, pkt, uint32_t, cnt); + + for (i = 0; i < cnt; i++) { + fq_head_drop(fqs, fq); + } } else { if (fqs->fqs_large_flow == NULL) { droptype = DTYPE_FORCED; - fq_cl->fcl_stat.fcl_drop_overflow++; + fq_cl->fcl_stat.fcl_drop_overflow += cnt; ret = CLASSQEQ_DROP; + DTRACE_IP5(no__large__flow, fq_if_t *, fqs, + fq_if_classq_t *, fq_cl, fq_t *, fq, + pktsched_pkt_t *, pkt, uint32_t, cnt); + /* * if this fq was freshly created and there * is nothing to enqueue, free it @@ -289,17 +379,41 @@ fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl) fq = NULL; } } else { - fq_if_drop_packet(fqs); + DTRACE_IP5(different__large__flow, + fq_if_t *, fqs, fq_if_classq_t *, fq_cl, + fq_t *, fq, pktsched_pkt_t *, pkt, + uint32_t, cnt); + + for (i = 0; i < cnt; i++) { + fq_if_drop_packet(fqs); + } } } } - if (droptype == DTYPE_NODROP) { - uint32_t pkt_len = pktsched_get_pkt_len(pkt); - fq_enqueue(fq, pkt->pktsched_pkt); - fq->fq_bytes += pkt_len; - fq_cl->fcl_stat.fcl_byte_cnt += pkt_len; - fq_cl->fcl_stat.fcl_pkt_cnt++; + if (__probable(droptype == DTYPE_NODROP)) { + uint32_t chain_len = pktsched_get_pkt_len(pkt); + + /* + * We do not compress if we are enqueuing a chain. + * Traversing the chain to look for acks would defeat the + * purpose of batch enqueueing. + */ + if (cnt == 1) { + ret = fq_compressor(fqs, fq, fq_cl, pkt); + if (ret != CLASSQEQ_COMPRESSED) { + ret = CLASSQEQ_SUCCESS; + } else { + fq_cl->fcl_stat.fcl_pkts_compressed++; + } + } + DTRACE_IP5(fq_enqueue, fq_if_t *, fqs, fq_if_classq_t *, fq_cl, + fq_t *, fq, pktsched_pkt_t *, pkt, uint32_t, cnt); + fq_enqueue(fq, pkt->pktsched_pkt, pkt->pktsched_tail, cnt); + + fq->fq_bytes += chain_len; + fq_cl->fcl_stat.fcl_byte_cnt += chain_len; + fq_cl->fcl_stat.fcl_pkt_cnt += cnt; /* * check if this queue will qualify to be the next @@ -307,7 +421,7 @@ fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl) */ fq_if_is_flow_heavy(fqs, fq); } else { - IFCQ_CONVERT_LOCK(fqs->fqs_ifq); + DTRACE_IP3(fq_drop, fq_if_t *, fqs, int, droptype, int, ret); return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP; } @@ -367,7 +481,6 @@ fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt) int64_t qdelay = 0; struct timespec now_ts; volatile uint32_t *pkt_flags; - uint32_t pkt_tx_start_seq; uint64_t *pkt_timestamp; fq_getq_flow_internal(fqs, fq, pkt); @@ -377,7 +490,7 @@ fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt) } pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, NULL, - NULL, &pkt_tx_start_seq); + NULL, NULL); nanouptime(&now_ts); now = (now_ts.tv_sec * NSEC_PER_SEC) + now_ts.tv_nsec; diff --git a/bsd/net/classq/classq_fq_codel.h b/bsd/net/classq/classq_fq_codel.h index 582e4a899..b8c4d10be 100644 --- a/bsd/net/classq/classq_fq_codel.h +++ b/bsd/net/classq/classq_fq_codel.h @@ -57,16 +57,16 @@ typedef struct flowq { #define FQF_NEW_FLOW 0x04 /* Currently on new flows queue */ #define FQF_OLD_FLOW 0x08 /* Currently on old flows queue */ #define FQF_FLOWCTL_ON 0x10 /* Currently flow controlled */ - u_int8_t fq_flags; /* flags */ - u_int8_t fq_sc_index; /* service_class index */ + uint8_t fq_flags; /* flags */ + uint8_t fq_sc_index; /* service_class index */ int16_t fq_deficit; /* Deficit for scheduling */ - u_int32_t fq_bytes; /* Number of bytes in the queue */ - u_int64_t fq_min_qdelay; /* min queue delay for Codel */ - u_int64_t fq_updatetime; /* next update interval */ - u_int64_t fq_getqtime; /* last dequeue time */ + uint32_t fq_bytes; /* Number of bytes in the queue */ + uint64_t fq_min_qdelay; /* min queue delay for Codel */ + uint64_t fq_updatetime; /* next update interval */ + uint64_t fq_getqtime; /* last dequeue time */ SLIST_ENTRY(flowq) fq_hashlink; /* for flow queue hash table */ STAILQ_ENTRY(flowq) fq_actlink; /* for new/old flow queues */ - u_int32_t fq_flowhash; /* Flow hash */ + uint32_t fq_flowhash; /* Flow hash */ classq_pkt_type_t fq_ptype; /* Packet type */ } fq_t; @@ -74,7 +74,8 @@ typedef struct flowq { #define fq_empty(_q) MBUFQ_EMPTY(&(_q)->fq_mbufq) -#define fq_enqueue(_q, _p) MBUFQ_ENQUEUE(&(_q)->fq_mbufq, _p.cp_mbuf) +#define fq_enqueue(_q, _h, _t, _c) \ + MBUFQ_ENQUEUE_MULTI(&(_q)->fq_mbufq, (_h).cp_mbuf, (_t).cp_mbuf) #define fq_dequeue(_q, _p) do { \ MBUFQ_DEQUEUE(&(_q)->fq_mbufq, (_p)->cp_mbuf); \ diff --git a/bsd/net/classq/classq_sfb.c b/bsd/net/classq/classq_sfb.c index 1e5963e5a..ffdc20922 100644 --- a/bsd/net/classq/classq_sfb.c +++ b/bsd/net/classq/classq_sfb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2019 Apple Inc. All rights reserved. + * Copyright (c) 2011-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -50,9 +50,7 @@ #include #include #include -#if INET6 #include -#endif #include #include @@ -190,8 +188,8 @@ #define SFB_SET_DELAY_HIGH(_sp_, _q_) do { \ (_sp_)->sfb_flags |= SFBF_DELAYHIGH; \ - (_sp_)->sfb_fc_threshold = max(SFB_MIN_FC_THRESHOLD_BYTES, \ - (qsize((_q_)) >> 3)); \ + (_sp_)->sfb_fc_threshold = ulmax(SFB_MIN_FC_THRESHOLD_BYTES, \ + (uint32_t)(qsize((_q_)) >> 3)); \ } while (0) #define SFB_QUEUE_DELAYBASED(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYBASED) @@ -206,26 +204,17 @@ #define DEQUEUE_SPIKE(_new, _old) \ ((u_int64_t)ABS((int64_t)(_new) - (int64_t)(_old)) > ((_old) << 11)) -#define SFB_ZONE_MAX 32 /* maximum elements in zone */ -#define SFB_ZONE_NAME "classq_sfb" /* zone name */ - -#define SFB_BINS_ZONE_MAX 32 /* maximum elements in zone */ -#define SFB_BINS_ZONE_NAME "classq_sfb_bins" /* zone name */ - -#define SFB_FCL_ZONE_MAX 32 /* maximum elements in zone */ -#define SFB_FCL_ZONE_NAME "classq_sfb_fcl" /* zone name */ - /* Place the flow control entries in current bin on level 0 */ #define SFB_FC_LEVEL 0 -static unsigned int sfb_size; /* size of zone element */ -static struct zone *sfb_zone; /* zone for sfb */ +static ZONE_DECLARE(sfb_zone, "classq_sfb", + sizeof(struct sfb), ZC_ZFREE_CLEARMEM); -static unsigned int sfb_bins_size; /* size of zone element */ -static struct zone *sfb_bins_zone; /* zone for sfb_bins */ +static ZONE_DECLARE(sfb_bins_zone, "classq_sfb_bins", + sizeof(struct sfb_bins), ZC_ZFREE_CLEARMEM); -static unsigned int sfb_fcl_size; /* size of zone element */ -static struct zone *sfb_fcl_zone; /* zone for sfb_fc_lists */ +static ZONE_DECLARE(sfb_fcl_zone, "classq_sfb_fcl", + sizeof(struct sfb_fcl), ZC_ZFREE_CLEARMEM); /* internal function prototypes */ static u_int32_t sfb_random(struct sfb *); @@ -246,7 +235,7 @@ static void sfb_decrement_bin(struct sfb *, struct sfbbinstats *, static void sfb_increment_bin(struct sfb *, struct sfbbinstats *, struct timespec *, struct timespec *); static inline void sfb_dq_update_bins(struct sfb *, uint32_t, uint32_t, - struct timespec *, u_int32_t qsize); + struct timespec *, u_int64_t qsize); static inline void sfb_eq_update_bins(struct sfb *, uint32_t, uint32_t); static int sfb_drop_early(struct sfb *, uint32_t, u_int16_t *, struct timespec *); @@ -308,42 +297,8 @@ static struct sfb_time_tbl sfb_ttbl[] = { { .speed = 0, .holdtime = 0, .pboxtime = 0 } }; -void -sfb_init(void) -{ - _CASSERT(SFBF_ECN4 == CLASSQF_ECN4); - _CASSERT(SFBF_ECN6 == CLASSQF_ECN6); - - sfb_size = sizeof(struct sfb); - sfb_zone = zinit(sfb_size, SFB_ZONE_MAX * sfb_size, - 0, SFB_ZONE_NAME); - if (sfb_zone == NULL) { - panic("%s: failed allocating %s", __func__, SFB_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(sfb_zone, Z_EXPAND, TRUE); - zone_change(sfb_zone, Z_CALLERACCT, TRUE); - - sfb_bins_size = sizeof(struct sfb_bins); - sfb_bins_zone = zinit(sfb_bins_size, SFB_BINS_ZONE_MAX * sfb_bins_size, - 0, SFB_BINS_ZONE_NAME); - if (sfb_bins_zone == NULL) { - panic("%s: failed allocating %s", __func__, SFB_BINS_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(sfb_bins_zone, Z_EXPAND, TRUE); - zone_change(sfb_bins_zone, Z_CALLERACCT, TRUE); - - sfb_fcl_size = sizeof(struct sfb_fcl); - sfb_fcl_zone = zinit(sfb_fcl_size, SFB_FCL_ZONE_MAX * sfb_fcl_size, - 0, SFB_FCL_ZONE_NAME); - if (sfb_fcl_zone == NULL) { - panic("%s: failed allocating %s", __func__, SFB_FCL_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(sfb_fcl_zone, Z_EXPAND, TRUE); - zone_change(sfb_fcl_zone, Z_CALLERACCT, TRUE); -} +static_assert(SFBF_ECN4 == CLASSQF_ECN4); +static_assert(SFBF_ECN6 == CLASSQF_ECN6); static u_int32_t sfb_random(struct sfb *sp) @@ -362,7 +317,7 @@ sfb_calc_holdtime(struct sfb *sp, u_int64_t outbw) } else if (outbw == 0) { holdtime = SFB_RANDOM(sp, HOLDTIME_MIN, HOLDTIME_MAX); } else { - unsigned int n, i; + uint64_t n, i; n = sfb_ttbl[0].holdtime; for (i = 0; sfb_ttbl[i].speed != 0; i++) { @@ -386,7 +341,7 @@ sfb_calc_pboxtime(struct sfb *sp, u_int64_t outbw) } else if (outbw == 0) { pboxtime = SFB_RANDOM(sp, PBOXTIME_MIN, PBOXTIME_MAX); } else { - unsigned int n, i; + uint64_t n, i; n = sfb_ttbl[0].pboxtime; for (i = 0; sfb_ttbl[i].speed != 0; i++) { @@ -446,27 +401,9 @@ sfb_alloc(struct ifnet *ifp, u_int32_t qid, u_int32_t qlim, u_int32_t flags) VERIFY(ifp != NULL && qlim > 0); - sp = zalloc(sfb_zone); - if (sp == NULL) { - log(LOG_ERR, "%s: SFB unable to allocate\n", if_name(ifp)); - return NULL; - } - bzero(sp, sfb_size); - - if ((sp->sfb_bins = zalloc(sfb_bins_zone)) == NULL) { - log(LOG_ERR, "%s: SFB unable to allocate bins\n", if_name(ifp)); - sfb_destroy(sp); - return NULL; - } - bzero(sp->sfb_bins, sfb_bins_size); - - if ((sp->sfb_fc_lists = zalloc(sfb_fcl_zone)) == NULL) { - log(LOG_ERR, "%s: SFB unable to allocate flow control lists\n", - if_name(ifp)); - sfb_destroy(sp); - return NULL; - } - bzero(sp->sfb_fc_lists, sfb_fcl_size); + sp = zalloc_flags(sfb_zone, Z_WAITOK | Z_ZERO); + sp->sfb_bins = zalloc_flags(sfb_bins_zone, Z_WAITOK | Z_ZERO); + sp->sfb_fc_lists = zalloc_flags(sfb_fcl_zone, Z_WAITOK | Z_ZERO); for (i = 0; i < SFB_BINS; ++i) { STAILQ_INIT(&SFB_FC_LIST(sp, i)->fclist); @@ -541,8 +478,9 @@ sfb_resetq(struct sfb *sp, cqev_t ev) if (ev != CLASSQ_EV_LINK_DOWN) { (*sp->sfb_bins)[0].fudge = sfb_random(sp); (*sp->sfb_bins)[1].fudge = sfb_random(sp); - sp->sfb_allocation = ((sfb_allocation == 0) ? - (sp->sfb_qlim / 3) : sfb_allocation); + sp->sfb_allocation = sfb_allocation == 0 ? + (uint16_t)(sp->sfb_qlim / 3) : + (uint16_t)sfb_allocation; sp->sfb_drop_thresh = sp->sfb_allocation + (sp->sfb_allocation >> 1); } @@ -594,7 +532,7 @@ sfb_getstats(struct sfb *sp, struct sfb_stats *sps) sps->current = sp->sfb_current; sps->target_qdelay = sp->sfb_target_qdelay; sps->min_estdelay = sp->sfb_min_qdelay; - sps->delay_fcthreshold = sp->sfb_fc_threshold; + sps->delay_fcthreshold = (uint32_t)sp->sfb_fc_threshold; sps->flags = sp->sfb_flags; net_timernsec(&sp->sfb_holdtime, &sp->sfb_stats.hold_time); @@ -823,7 +761,7 @@ sfb_increment_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft, static inline void sfb_dq_update_bins(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t pkt_len, - struct timespec *now, u_int32_t qsize) + struct timespec *now, u_int64_t qsize) { #if SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 int i; diff --git a/bsd/net/classq/classq_sfb.h b/bsd/net/classq/classq_sfb.h index 487a29f2e..76a95429d 100644 --- a/bsd/net/classq/classq_sfb.h +++ b/bsd/net/classq/classq_sfb.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2012 Apple Inc. All rights reserved. + * Copyright (c) 2011-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -151,7 +151,6 @@ typedef struct sfb { struct sfbstats sfb_stats __attribute__((aligned(8))); } sfb_t; -extern void sfb_init(void); extern struct sfb *sfb_alloc(struct ifnet *, u_int32_t, u_int32_t, u_int32_t); extern void sfb_destroy(struct sfb *); extern int sfb_addq(struct sfb *, class_queue_t *, pktsched_pkt_t *, diff --git a/bsd/net/classq/classq_subr.c b/bsd/net/classq/classq_subr.c index d5af79b4e..d18e9c767 100644 --- a/bsd/net/classq/classq_subr.c +++ b/bsd/net/classq/classq_subr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2019 Apple Inc. All rights reserved. + * Copyright (c) 2011-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -44,6 +44,7 @@ #include #include #include +#include #include @@ -63,8 +64,6 @@ SYSCTL_QUAD(_net_classq, OID_AUTO, update_interval, CTLFLAG_RW | CTLFLAG_LOCKED, &ifclassq_update_interval, "update interval in nanoseconds"); -static int32_t ifclassq_sched_fq_codel; - void classq_init(void) { @@ -72,13 +71,7 @@ classq_init(void) _CASSERT(MBUF_SC_BE == 0); _CASSERT(IFCQ_SC_MAX == MBUF_SC_MAX_CLASSES); - sfb_init(); - fq_codel_scheduler_init(); - - if (!PE_parse_boot_argn("fq_codel", &ifclassq_sched_fq_codel, - sizeof(ifclassq_sched_fq_codel))) { - ifclassq_sched_fq_codel = 1; - } + fq_codel_init(); } int @@ -101,10 +94,6 @@ ifclassq_setup(struct ifnet *ifp, u_int32_t sflags, boolean_t reuse) VERIFY(ifq->ifcq_flags == 0); VERIFY(ifq->ifcq_sflags == 0); VERIFY(ifq->ifcq_disc == NULL); - VERIFY(ifq->ifcq_enqueue == NULL); - VERIFY(ifq->ifcq_dequeue == NULL); - VERIFY(ifq->ifcq_dequeue_sc == NULL); - VERIFY(ifq->ifcq_request == NULL); if (ifp->if_eflags & IFEF_TXSTART) { u_int32_t maxlen = 0; @@ -144,7 +133,7 @@ ifclassq_teardown(struct ifnet *ifp) struct tb_profile tb = { .rate = 0, .percent = 0, .depth = 0 }; (void) ifclassq_tbr_set(ifq, &tb, FALSE); } - (void) pktsched_teardown(ifq); + pktsched_teardown(ifq); ifq->ifcq_flags = 0; } ifq->ifcq_sflags = 0; @@ -155,10 +144,6 @@ ifclassq_teardown(struct ifnet *ifp) VERIFY(ifq->ifcq_flags == 0); VERIFY(ifq->ifcq_sflags == 0); VERIFY(ifq->ifcq_disc == NULL); - VERIFY(ifq->ifcq_enqueue == NULL); - VERIFY(ifq->ifcq_dequeue == NULL); - VERIFY(ifq->ifcq_dequeue_sc == NULL); - VERIFY(ifq->ifcq_request == NULL); IFCQ_LEN(ifq) = 0; IFCQ_BYTES(ifq) = 0; IFCQ_MAXLEN(ifq) = 0; @@ -178,34 +163,7 @@ ifclassq_pktsched_setup(struct ifclassq *ifq) IFCQ_LOCK_ASSERT_HELD(ifq); VERIFY(ifp->if_eflags & IFEF_TXSTART); - switch (ifp->if_output_sched_model) { - case IFNET_SCHED_MODEL_DRIVER_MANAGED: - if (ifclassq_sched_fq_codel != 0) { - err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, - ifq->ifcq_sflags, ptype); - } else { - err = pktsched_setup(ifq, PKTSCHEDT_TCQ, - ifq->ifcq_sflags, ptype); - } - break; - - case IFNET_SCHED_MODEL_NORMAL: - if (ifclassq_sched_fq_codel != 0) { - err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, - ifq->ifcq_sflags, ptype); - } else { - err = pktsched_setup(ifq, PKTSCHEDT_QFQ, - ifq->ifcq_sflags, ptype); - } - break; - case IFNET_SCHED_MODEL_FQ_CODEL: - err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, - ifq->ifcq_sflags, ptype); - break; - default: - VERIFY(0); - /* NOTREACHED */ - } + err = pktsched_setup(ifq, PKTSCHEDT_FQ_CODEL, ifq->ifcq_sflags, ptype); return err; } @@ -238,9 +196,18 @@ ifclassq_get_len(struct ifclassq *ifq, mbuf_svc_class_t sc, u_int32_t *packets, VERIFY(packets != NULL); *packets = IFCQ_LEN(ifq); } else { + cqrq_stat_sc_t req = { sc, 0, 0 }; + VERIFY(MBUF_VALID_SC(sc)); VERIFY(packets != NULL && bytes != NULL); - IFCQ_LEN_SC(ifq, sc, packets, bytes, err); + + err = fq_if_request_classq(ifq, CLASSQRQ_STAT_SC, &req); + if (packets != NULL) { + *packets = req.packets; + } + if (bytes != NULL) { + *bytes = req.bytes; + } } IFCQ_UNLOCK(ifq); @@ -261,7 +228,7 @@ ifclassq_set_packet_metadata(struct ifclassq *ifq, struct ifnet *ifp, struct mbuf *m = p->cp_mbuf; m->m_pkthdr.pkt_flags |= PKTF_VALID_UNSENT_DATA; m->m_pkthdr.bufstatus_if = IFCQ_BYTES(ifq); - m->m_pkthdr.bufstatus_sndbuf = ifp->if_sndbyte_unsent; + m->m_pkthdr.bufstatus_sndbuf = (uint32_t)ifp->if_sndbyte_unsent; break; } @@ -274,23 +241,10 @@ ifclassq_set_packet_metadata(struct ifclassq *ifq, struct ifnet *ifp, } errno_t -ifclassq_enqueue(struct ifclassq *ifq, classq_pkt_t *p, boolean_t *pdrop) +ifclassq_enqueue(struct ifclassq *ifq, classq_pkt_t *head, classq_pkt_t *tail, + u_int32_t cnt, u_int32_t bytes, boolean_t *pdrop) { - errno_t err; - - switch (p->cp_ptype) { - case QP_MBUF: - IFCQ_LOCK_SPIN(ifq); - break; - - default: - IFCQ_LOCK(ifq); - break; - } - - IFCQ_ENQUEUE(ifq, p, err, pdrop); - IFCQ_UNLOCK(ifq); - return err; + return fq_if_enqueue_classq(ifq, head, tail, cnt, bytes, pdrop); } errno_t @@ -317,13 +271,12 @@ ifclassq_dequeue_common_default(struct ifclassq *ifq, mbuf_svc_class_t sc, classq_pkt_t *tail, u_int32_t *cnt, u_int32_t *len, boolean_t drvmgt) { struct ifnet *ifp = ifq->ifcq_ifp; - u_int32_t i = 0, l = 0, lock_spin = 1; + u_int32_t i = 0, l = 0; classq_pkt_t first = CLASSQ_PKT_INITIALIZER(first); classq_pkt_t last = CLASSQ_PKT_INITIALIZER(last); VERIFY(!drvmgt || MBUF_VALID_SC(sc)); - if (IFCQ_TBR_IS_ENABLED(ifq)) { goto dequeue_loop; } @@ -332,15 +285,11 @@ ifclassq_dequeue_common_default(struct ifclassq *ifq, mbuf_svc_class_t sc, * If the scheduler support dequeueing multiple packets at the * same time, call that one instead. */ - if (drvmgt && ifq->ifcq_dequeue_sc_multi != NULL) { + if (drvmgt) { int err; - if (lock_spin) { - IFCQ_LOCK_SPIN(ifq); - } else { - IFCQ_LOCK(ifq); - } - err = ifq->ifcq_dequeue_sc_multi(ifq, sc, pkt_limit, + IFCQ_LOCK_SPIN(ifq); + err = fq_if_dequeue_sc_classq_multi(ifq, sc, pkt_limit, byte_limit, head, tail, cnt, len); IFCQ_UNLOCK(ifq); @@ -348,16 +297,11 @@ ifclassq_dequeue_common_default(struct ifclassq *ifq, mbuf_svc_class_t sc, err = EAGAIN; } return err; - } else if (ifq->ifcq_dequeue_multi != NULL) { + } else { int err; - if (lock_spin) { - IFCQ_LOCK_SPIN(ifq); - } else { - IFCQ_LOCK(ifq); - } - - err = ifq->ifcq_dequeue_multi(ifq, pkt_limit, byte_limit, + IFCQ_LOCK_SPIN(ifq); + err = fq_if_dequeue_classq_multi(ifq, pkt_limit, byte_limit, head, tail, cnt, len); IFCQ_UNLOCK(ifq); @@ -369,24 +313,20 @@ ifclassq_dequeue_common_default(struct ifclassq *ifq, mbuf_svc_class_t sc, dequeue_loop: - if (lock_spin) { - IFCQ_LOCK_SPIN(ifq); - } else { - IFCQ_LOCK(ifq); - } + IFCQ_LOCK_SPIN(ifq); while (i < pkt_limit && l < byte_limit) { if (drvmgt) { if (IFCQ_TBR_IS_ENABLED(ifq)) { IFCQ_TBR_DEQUEUE_SC(ifq, sc, head); } else { - IFCQ_DEQUEUE_SC(ifq, sc, head); + fq_if_dequeue_sc_classq(ifq, sc, head); } } else { if (IFCQ_TBR_IS_ENABLED(ifq)) { IFCQ_TBR_DEQUEUE(ifq, head); } else { - IFCQ_DEQUEUE(ifq, head); + fq_if_dequeue_classq(ifq, head); } } @@ -449,34 +389,23 @@ ifclassq_update(struct ifclassq *ifq, cqev_t ev) { IFCQ_LOCK_ASSERT_HELD(ifq); VERIFY(IFCQ_IS_READY(ifq)); - IFCQ_UPDATE(ifq, ev); + fq_if_request_classq(ifq, CLASSQRQ_EVENT, (void *)ev); } int -ifclassq_attach(struct ifclassq *ifq, u_int32_t type, void *discipline, - ifclassq_enq_func enqueue, ifclassq_deq_func dequeue, - ifclassq_deq_sc_func dequeue_sc, ifclassq_deq_multi_func dequeue_multi, - ifclassq_deq_sc_multi_func dequeue_sc_multi, ifclassq_req_func request) +ifclassq_attach(struct ifclassq *ifq, u_int32_t type, void *discipline) { IFCQ_LOCK_ASSERT_HELD(ifq); VERIFY(ifq->ifcq_disc == NULL); - VERIFY(enqueue != NULL); - VERIFY(request != NULL); ifq->ifcq_type = type; ifq->ifcq_disc = discipline; - ifq->ifcq_enqueue = enqueue; - ifq->ifcq_dequeue = dequeue; - ifq->ifcq_dequeue_sc = dequeue_sc; - ifq->ifcq_dequeue_multi = dequeue_multi; - ifq->ifcq_dequeue_sc_multi = dequeue_sc_multi; - ifq->ifcq_request = request; return 0; } -int +void ifclassq_detach(struct ifclassq *ifq) { IFCQ_LOCK_ASSERT_HELD(ifq); @@ -484,13 +413,6 @@ ifclassq_detach(struct ifclassq *ifq) VERIFY(ifq->ifcq_disc == NULL); ifq->ifcq_type = PKTSCHEDT_NONE; - ifq->ifcq_disc = NULL; - ifq->ifcq_enqueue = NULL; - ifq->ifcq_dequeue = NULL; - ifq->ifcq_dequeue_sc = NULL; - ifq->ifcq_request = NULL; - - return 0; } int @@ -632,9 +554,9 @@ ifclassq_tbr_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, * ifcq_drain count is adjusted by the caller. */ if (drvmgt) { - IFCQ_DEQUEUE_SC(ifq, sc, pkt); + fq_if_dequeue_sc_classq(ifq, sc, pkt); } else { - IFCQ_DEQUEUE(ifq, pkt); + fq_if_dequeue_classq(ifq, pkt); } if (pkt->cp_mbuf != NULL) { @@ -762,7 +684,7 @@ ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile, if (tbr->tbr_rate > 0 && (ifp->if_flags & IFF_UP)) { struct timespec ts = - { 0, pktsched_abs_to_nsecs(tbr->tbr_filluptime) }; + { 0, (long)pktsched_abs_to_nsecs(tbr->tbr_filluptime) }; if (pktsched_verbose) { printf("%s: TBR calculated tokens %lld " "filluptime %llu ns\n", if_name(ifp), @@ -843,4 +765,5 @@ void ifclassq_reap_caches(boolean_t purge) { fq_codel_reap_caches(purge); + flowadv_reap_caches(purge); } diff --git a/bsd/net/classq/classq_util.c b/bsd/net/classq/classq_util.c index f009b4ee0..100943f63 100644 --- a/bsd/net/classq/classq_util.c +++ b/bsd/net/classq/classq_util.c @@ -74,9 +74,7 @@ #include #include #include -#if INET6 #include -#endif #include #include @@ -121,9 +119,7 @@ read_dsfield(struct mbuf *m, struct pf_mtag *t) return (u_int8_t)0; /* version mismatch! */ } ds_field = ip->ip_tos; - } -#if INET6 - else if (t->pftag_flags & PF_TAG_HDR_INET6) { + } else if (t->pftag_flags & PF_TAG_HDR_INET6) { struct ip6_hdr *ip6 = (struct ip6_hdr *)(void *)t->pftag_hdr; u_int32_t flowlabel; @@ -137,7 +133,6 @@ read_dsfield(struct mbuf *m, struct pf_mtag *t) } ds_field = (flowlabel >> 20) & 0xff; } -#endif return ds_field; } @@ -192,9 +187,7 @@ write_dsfield(struct mbuf *m, struct pf_mtag *t, u_int8_t dsfield) sum += (sum >> 16); /* add carry */ ip->ip_sum = htons(~sum & 0xffff); - } -#if INET6 - else if (t->pftag_flags & PF_TAG_HDR_INET6) { + } else if (t->pftag_flags & PF_TAG_HDR_INET6) { struct ip6_hdr *ip6 = (struct ip6_hdr *)t->pftag_hdr; u_int32_t flowlabel; @@ -209,7 +202,6 @@ write_dsfield(struct mbuf *m, struct pf_mtag *t, u_int8_t dsfield) flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20); ip6->ip6_flow = htonl(flowlabel); } -#endif } /* @@ -290,7 +282,6 @@ mark_ecn(struct mbuf *m, struct pf_mtag *t, int flags) return 1; } break; -#if INET6 case AF_INET6: if (flags & CLASSQF_ECN6) { /* REDF_ECN6 == BLUEF_ECN6 */ struct ip6_hdr *ip6 = hdr; @@ -320,7 +311,6 @@ mark_ecn(struct mbuf *m, struct pf_mtag *t, int flags) return 1; } break; -#endif /* INET6 */ } /* not marked */ diff --git a/bsd/net/classq/if_classq.h b/bsd/net/classq/if_classq.h index 2de9ac9b1..15e6b6bb4 100644 --- a/bsd/net/classq/if_classq.h +++ b/bsd/net/classq/if_classq.h @@ -155,13 +155,6 @@ struct ifclassq { void *cl; } ifcq_disc_slots[IFCQ_SC_MAX]; /* for discipline use */ - ifclassq_enq_func ifcq_enqueue; - ifclassq_deq_func ifcq_dequeue; - ifclassq_deq_sc_func ifcq_dequeue_sc; - ifclassq_deq_multi_func ifcq_dequeue_multi; - ifclassq_deq_sc_multi_func ifcq_dequeue_sc_multi; - ifclassq_req_func ifcq_request; - /* token bucket regulator */ struct tb_regulator ifcq_tbr; /* TBR */ }; @@ -186,6 +179,8 @@ struct ifclassq { #define CLASSQEQ_DROP_FC 2 /* packet needs to be dropped due to suspension; give flow control feedback */ #define CLASSQEQ_DROP_SP 3 +/* packet has been compressed with another one */ +#define CLASSQEQ_COMPRESSED 4 /* interface event argument for CLASSQRQ_EVENT */ typedef enum cqev { @@ -198,8 +193,7 @@ typedef enum cqev { } cqev_t; #endif /* BSD_KERNEL_PRIVATE */ -#include -#include +#include #include #ifdef __cplusplus @@ -211,11 +205,7 @@ struct if_ifclassq_stats { struct pktcntr ifqs_xmitcnt; struct pktcntr ifqs_dropcnt; u_int32_t ifqs_scheduler; - union { - struct tcq_classstats ifqs_tcq_stats; - struct qfq_classstats ifqs_qfq_stats; - struct fq_codel_classstats ifqs_fq_codel_stats; - }; + struct fq_codel_classstats ifqs_fq_codel_stats; } __attribute__((aligned(8))); #ifdef __cplusplus @@ -249,18 +239,6 @@ struct if_ifclassq_stats { /* * For ifclassq operations */ -#define IFCQ_ENQUEUE(_ifq, _p, _err, _drop) do { \ - (_err) = (*(_ifq)->ifcq_enqueue)(_ifq, _p, _drop); \ -} while (0) - -#define IFCQ_DEQUEUE(_ifq, _p) do { \ - (*(_ifq)->ifcq_dequeue)(_ifq, _p); \ -} while (0) - -#define IFCQ_DEQUEUE_SC(_ifq, _sc, _p) do { \ - (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, _p); \ -} while (0) - #define IFCQ_TBR_DEQUEUE(_ifcq, _p) do { \ ifclassq_tbr_dequeue(_ifcq, _p); \ } while (0) @@ -269,49 +247,13 @@ struct if_ifclassq_stats { ifclassq_tbr_dequeue_sc(_ifcq, _sc, _p); \ } while (0) -#define IFCQ_PURGE(_ifq) do { \ - (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE, NULL); \ -} while (0) - -#define IFCQ_PURGE_SC(_ifq, _sc, _flow, _packets, _bytes) do { \ - cqrq_purge_sc_t _req = { _sc, _flow, 0, 0 }; \ - (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE_SC, &_req); \ - (_packets) = _req.packets; \ - (_bytes) = _req.bytes; \ -} while (0) - -#define IFCQ_UPDATE(_ifq, _ev) do { \ - (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_EVENT, \ - (void *)(_ev)); \ -} while (0) - -#define IFCQ_SET_THROTTLE(_ifq, _level, _err) do { \ - cqrq_throttle_t _req = { 1, _level }; \ - (_err) = (*(_ifq)->ifcq_request) \ - (_ifq, CLASSQRQ_THROTTLE, &_req); \ -} while (0) - -#define IFCQ_GET_THROTTLE(_ifq, _level, _err) do { \ - cqrq_throttle_t _req = { 0, IFNET_THROTTLE_OFF }; \ - (_err) = (*(_ifq)->ifcq_request) \ - (_ifq, CLASSQRQ_THROTTLE, &_req); \ - (_level) = _req.level; \ -} while (0) - -#define IFCQ_LEN_SC(_ifq, _sc, _packets, _bytes, _err) do { \ - cqrq_stat_sc_t _req = { _sc, 0, 0 }; \ - (_err) = (*(ifq)->ifcq_request)(_ifq, CLASSQRQ_STAT_SC, &_req); \ - if ((_packets) != NULL) \ - (*(_packets)) = _req.packets; \ - if ((_bytes) != NULL) \ - (*(_bytes)) = _req.bytes; \ -} while (0) - #define IFCQ_LEN(_ifcq) ((_ifcq)->ifcq_len) #define IFCQ_QFULL(_ifcq) (IFCQ_LEN(_ifcq) >= (_ifcq)->ifcq_maxlen) #define IFCQ_IS_EMPTY(_ifcq) (IFCQ_LEN(_ifcq) == 0) #define IFCQ_INC_LEN(_ifcq) (IFCQ_LEN(_ifcq)++) #define IFCQ_DEC_LEN(_ifcq) (IFCQ_LEN(_ifcq)--) +#define IFCQ_ADD_LEN(_ifcq, _len) (IFCQ_LEN(_ifcq) += (_len)) +#define IFCQ_SUB_LEN(_ifcq, _len) (IFCQ_LEN(_ifcq) -= (_len)) #define IFCQ_MAXLEN(_ifcq) ((_ifcq)->ifcq_maxlen) #define IFCQ_SET_MAXLEN(_ifcq, _len) ((_ifcq)->ifcq_maxlen = (_len)) #define IFCQ_TARGET_QDELAY(_ifcq) ((_ifcq)->ifcq_target_qdelay) @@ -338,7 +280,8 @@ extern void ifclassq_set_maxlen(struct ifclassq *, u_int32_t); extern u_int32_t ifclassq_get_maxlen(struct ifclassq *); extern int ifclassq_get_len(struct ifclassq *, mbuf_svc_class_t, u_int32_t *, u_int32_t *); -extern errno_t ifclassq_enqueue(struct ifclassq *, classq_pkt_t *, boolean_t *); +extern errno_t ifclassq_enqueue(struct ifclassq *, classq_pkt_t *, + classq_pkt_t *, u_int32_t, u_int32_t, boolean_t *); extern errno_t ifclassq_dequeue(struct ifclassq *, u_int32_t, u_int32_t, classq_pkt_t *, classq_pkt_t *, u_int32_t *, u_int32_t *); extern errno_t ifclassq_dequeue_sc(struct ifclassq *, mbuf_svc_class_t, @@ -348,10 +291,8 @@ extern void *ifclassq_poll(struct ifclassq *, classq_pkt_type_t *); extern void *ifclassq_poll_sc(struct ifclassq *, mbuf_svc_class_t, classq_pkt_type_t *); extern void ifclassq_update(struct ifclassq *, cqev_t); -extern int ifclassq_attach(struct ifclassq *, u_int32_t, void *, - ifclassq_enq_func, ifclassq_deq_func, ifclassq_deq_sc_func, - ifclassq_deq_multi_func, ifclassq_deq_sc_multi_func, ifclassq_req_func); -extern int ifclassq_detach(struct ifclassq *); +extern int ifclassq_attach(struct ifclassq *, u_int32_t, void *); +extern void ifclassq_detach(struct ifclassq *); extern int ifclassq_getqstats(struct ifclassq *, u_int32_t, void *, u_int32_t *); extern const char *ifclassq_ev2str(cqev_t); diff --git a/bsd/net/content_filter.c b/bsd/net/content_filter.c index 68422f880..d0f3b06b3 100644 --- a/bsd/net/content_filter.c +++ b/bsd/net/content_filter.c @@ -405,14 +405,8 @@ void* cfil_rw_lock_history[CFIL_RW_LCK_MAX]; int cfil_rw_nxt_unlck = 0; void* cfil_rw_unlock_history[CFIL_RW_LCK_MAX]; -#define CONTENT_FILTER_ZONE_NAME "content_filter" -#define CONTENT_FILTER_ZONE_MAX 10 -static struct zone *content_filter_zone = NULL; /* zone for content_filter */ - - -#define CFIL_INFO_ZONE_NAME "cfil_info" -#define CFIL_INFO_ZONE_MAX 1024 -static struct zone *cfil_info_zone = NULL; /* zone for cfil_info */ +static ZONE_DECLARE(content_filter_zone, "content_filter", + sizeof(struct content_filter), ZC_NONE); MBUFQ_HEAD(cfil_mqhead); @@ -472,7 +466,7 @@ struct cfil_entry { #define CFI_ADD_TIME_LOG(cfil, t1, t0, op) \ - struct timeval _tdiff; \ + struct timeval64 _tdiff; \ if ((cfil)->cfi_op_list_ctr < CFI_MAX_TIME_LOG_ENTRY) { \ timersub(t1, t0, &_tdiff); \ (cfil)->cfi_op_time[(cfil)->cfi_op_list_ctr] = (uint32_t)(_tdiff.tv_sec * 1000 + _tdiff.tv_usec / 1000);\ @@ -505,6 +499,7 @@ struct cfil_info { uint64_t cfi_byte_outbound_count; boolean_t cfi_isSignatureLatest; /* Indicates if signature covers latest flow attributes */ + u_int32_t cfi_filter_control_unit; u_int32_t cfi_debug; struct cfi_buf { /* @@ -533,6 +528,7 @@ struct cfil_info { struct cfil_entry cfi_entries[MAX_CONTENT_FILTER]; struct cfil_hash_entry *cfi_hash_entry; SLIST_HEAD(, cfil_entry) cfi_ordered_entries; + os_refcnt_t cfi_ref_count; } __attribute__((aligned(8))); #define CFIF_DROP 0x0001 /* drop action applied */ @@ -550,7 +546,10 @@ struct cfil_info { #define CFI_MASK_FLOWHASH 0x00000000FFFFFFFF /* lower 32 bits */ #define CFI_SHIFT_FLOWHASH 0 -#define CFI_ENTRY_KCUNIT(i, e) (((e) - &((i)->cfi_entries[0])) + 1) +#define CFI_ENTRY_KCUNIT(i, e) ((uint32_t)(((e) - &((i)->cfi_entries[0])) + 1)) + +static ZONE_DECLARE(cfil_info_zone, "cfil_info", + sizeof(struct cfil_info), ZC_NONE); TAILQ_HEAD(cfil_sock_head, cfil_info) cfil_sock_head; TAILQ_HEAD(cfil_sock_head_stats, cfil_info) cfil_sock_head_stats; @@ -584,6 +583,7 @@ LIST_HEAD(cfilhashhead, cfil_hash_entry); #define UNCONNECTED(inp) (inp && (((inp->inp_vflag & INP_IPV4) && (inp->inp_faddr.s_addr == INADDR_ANY)) || \ ((inp->inp_vflag & INP_IPV6) && IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)))) +#define IS_INP_V6(inp) (inp && (inp->inp_vflag & INP_IPV6)) #define IS_ENTRY_ATTACHED(cfil_info, kcunit) (cfil_info != NULL && (kcunit <= MAX_CONTENT_FILTER) && \ cfil_info->cfi_entries[kcunit - 1].cfe_filter != NULL) #define IS_DNS(local, remote) (check_port(local, 53) || check_port(remote, 53) || check_port(local, 5353) || check_port(remote, 5353)) @@ -591,6 +591,25 @@ LIST_HEAD(cfilhashhead, cfil_hash_entry); #define NULLADDRESS(addr) ((addr.sa.sa_len == 0) || \ (addr.sa.sa_family == AF_INET && addr.sin.sin_addr.s_addr == 0) || \ (addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&addr.sin6.sin6_addr))) +#define LOCAL_ADDRESS_NEEDS_UPDATE(entry) \ + ((entry->cfentry_family == AF_INET && entry->cfentry_laddr.addr46.ia46_addr4.s_addr == 0) || \ + entry->cfentry_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&entry->cfentry_laddr.addr6)) +#define LOCAL_PORT_NEEDS_UPDATE(entry, so) (entry->cfentry_lport == 0 && IS_UDP(so)) + +#define SKIP_FILTER_FOR_TCP_SOCKET(so) \ + (so == NULL || so->so_proto == NULL || so->so_proto->pr_domain == NULL || \ + (so->so_proto->pr_domain->dom_family != PF_INET && so->so_proto->pr_domain->dom_family != PF_INET6) || \ + so->so_proto->pr_type != SOCK_STREAM || \ + so->so_proto->pr_protocol != IPPROTO_TCP || \ + (so->so_flags & SOF_MP_SUBFLOW) != 0 || \ + (so->so_flags1 & SOF1_CONTENT_FILTER_SKIP) != 0) + +os_refgrp_decl(static, cfil_refgrp, "CFILRefGroup", NULL); + +#define CFIL_INFO_FREE(cfil_info) \ + if (cfil_info && (os_ref_release(&cfil_info->cfi_ref_count) == 0)) { \ + cfil_info_free(cfil_info); \ + } /* * Periodic Statistics Report: @@ -653,6 +672,9 @@ struct cfil_hash_entry { struct in_addr_4in6 addr46; struct in6_addr addr6; } cfentry_laddr; + uint8_t cfentry_laddr_updated: 1; + uint8_t cfentry_lport_updated: 1; + uint8_t cfentry_reserved: 6; }; /* @@ -677,17 +699,15 @@ struct cfil_db { struct cfil_tag { union sockaddr_in_4_6 cfil_faddr; uint32_t cfil_so_state_change_cnt; - short cfil_so_options; + uint32_t cfil_so_options; int cfil_inp_flags; }; -#define CFIL_HASH_ENTRY_ZONE_NAME "cfil_entry_hash" -#define CFIL_HASH_ENTRY_ZONE_MAX 1024 -static struct zone *cfil_hash_entry_zone = NULL; +static ZONE_DECLARE(cfil_hash_entry_zone, "cfil_entry_hash", + sizeof(struct cfil_hash_entry), ZC_NONE); -#define CFIL_DB_ZONE_NAME "cfil_db" -#define CFIL_DB_ZONE_MAX 1024 -static struct zone *cfil_db_zone = NULL; +static ZONE_DECLARE(cfil_db_zone, "cfil_db", + sizeof(struct cfil_db), ZC_NONE); /* * Statistics @@ -761,7 +781,7 @@ static int cfil_dispatch_closed_event(struct socket *, struct cfil_info *, int); static int cfil_data_common(struct socket *, struct cfil_info *, int, struct sockaddr *, struct mbuf *, struct mbuf *, uint32_t); static int cfil_data_filter(struct socket *, struct cfil_info *, uint32_t, int, - struct mbuf *, uint64_t); + struct mbuf *, uint32_t); static void fill_ip_sockaddr_4_6(union sockaddr_in_4_6 *, struct in_addr, u_int16_t); static void fill_ip6_sockaddr_4_6(union sockaddr_in_4_6 *, @@ -793,14 +813,16 @@ static unsigned int cfil_data_length(struct mbuf *, int *, int *); static errno_t cfil_db_init(struct socket *); static void cfil_db_free(struct socket *so); struct cfil_hash_entry *cfil_db_lookup_entry(struct cfil_db *, struct sockaddr *, struct sockaddr *, boolean_t); +struct cfil_hash_entry *cfil_db_lookup_entry_internal(struct cfil_db *, struct sockaddr *, struct sockaddr *, boolean_t, boolean_t); struct cfil_hash_entry *cfil_db_lookup_entry_with_sockid(struct cfil_db *, u_int64_t); struct cfil_hash_entry *cfil_db_add_entry(struct cfil_db *, struct sockaddr *, struct sockaddr *); -void cfil_db_update_entry_local(struct cfil_db *, struct cfil_hash_entry *, struct sockaddr *); +void cfil_db_update_entry_local(struct cfil_db *, struct cfil_hash_entry *, struct sockaddr *, struct mbuf *); void cfil_db_delete_entry(struct cfil_db *, struct cfil_hash_entry *); -struct cfil_hash_entry *cfil_sock_udp_get_flow(struct socket *, uint32_t, bool, struct sockaddr *, struct sockaddr *, int); +struct cfil_hash_entry *cfil_sock_udp_get_flow(struct socket *, uint32_t, bool, struct sockaddr *, struct sockaddr *, struct mbuf *, int); struct cfil_info *cfil_db_get_cfil_info(struct cfil_db *, cfil_sock_id_t); static errno_t cfil_sock_udp_handle_data(bool, struct socket *, struct sockaddr *, struct sockaddr *, struct mbuf *, struct mbuf *, uint32_t); +static int cfil_sock_udp_get_address_from_control(sa_family_t, struct mbuf *, uint8_t **); static int32_t cfil_sock_udp_data_pending(struct sockbuf *, bool); static void cfil_sock_udp_is_closed(struct socket *); static int cfil_sock_udp_notify_shutdown(struct socket *, int, int, int); @@ -817,14 +839,14 @@ static void cfil_get_flow_address(struct cfil_hash_entry *, struct inpcb *, static void cfil_info_log(int, struct cfil_info *, const char *); void cfil_filter_show(u_int32_t); void cfil_info_show(void); -bool cfil_info_idle_timed_out(struct cfil_info *, int, u_int32_t); +bool cfil_info_idle_timed_out(struct cfil_info *, int, u_int64_t); bool cfil_info_action_timed_out(struct cfil_info *, int); bool cfil_info_buffer_threshold_exceeded(struct cfil_info *); struct m_tag *cfil_dgram_save_socket_state(struct cfil_info *, struct mbuf *); boolean_t cfil_dgram_peek_socket_state(struct mbuf *m, int *inp_flags); static void cfil_udp_gc_thread_func(void *, wait_result_t); static void cfil_info_udp_expire(void *, wait_result_t); -static bool fill_cfil_hash_entry_from_address(struct cfil_hash_entry *, bool, struct sockaddr *); +static bool fill_cfil_hash_entry_from_address(struct cfil_hash_entry *, bool, struct sockaddr *, bool); static void cfil_sock_received_verdict(struct socket *so); static void cfil_fill_event_msg_addresses(struct cfil_hash_entry *, struct inpcb *, union sockaddr_in_4_6 *, union sockaddr_in_4_6 *, @@ -1925,6 +1947,14 @@ cfil_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, mbuf_t m, cfil_info = so->so_cfil_db != NULL ? cfil_db_get_cfil_info(so->so_cfil_db, msghdr->cfm_sock_id) : so->so_cfil; + // We should not obtain global lock here in order to avoid deadlock down the path. + // But we attempt to retain a valid cfil_info to prevent any deallocation until + // we are done. Abort retain if cfil_info has already entered the free code path. + if (cfil_info && os_ref_retain_try(&cfil_info->cfi_ref_count) == false) { + socket_unlock(so, 1); + goto done; + } + if (cfil_info == NULL) { CFIL_LOG(LOG_NOTICE, "so %llx not attached", (uint64_t)VM_KERNEL_ADDRPERM(so), msghdr->cfm_sock_id); @@ -2045,6 +2075,7 @@ cfil_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, mbuf_t m, break; } unlock: + CFIL_INFO_FREE(cfil_info) socket_unlock(so, 1); done: mbuf_freem(m); @@ -2364,10 +2395,6 @@ cfil_init(void) { struct kern_ctl_reg kern_ctl; errno_t error = 0; - vm_size_t content_filter_size = 0; /* size of content_filter */ - vm_size_t cfil_info_size = 0; /* size of cfil_info */ - vm_size_t cfil_hash_entry_size = 0; /* size of cfil_hash_entry */ - vm_size_t cfil_db_size = 0; /* size of cfil_db */ unsigned int mbuf_limit = 0; CFIL_LOG(LOG_NOTICE, ""); @@ -2406,64 +2433,6 @@ cfil_init(void) VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_out_passed, sizeof(uint32_t))); - /* - * Zone for content filters kernel control sockets - */ - content_filter_size = sizeof(struct content_filter); - content_filter_zone = zinit(content_filter_size, - CONTENT_FILTER_ZONE_MAX * content_filter_size, - 0, - CONTENT_FILTER_ZONE_NAME); - if (content_filter_zone == NULL) { - panic("%s: zinit(%s) failed", __func__, - CONTENT_FILTER_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(content_filter_zone, Z_CALLERACCT, FALSE); - zone_change(content_filter_zone, Z_EXPAND, TRUE); - - /* - * Zone for per socket content filters - */ - cfil_info_size = sizeof(struct cfil_info); - cfil_info_zone = zinit(cfil_info_size, - CFIL_INFO_ZONE_MAX * cfil_info_size, - 0, - CFIL_INFO_ZONE_NAME); - if (cfil_info_zone == NULL) { - panic("%s: zinit(%s) failed", __func__, CFIL_INFO_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(cfil_info_zone, Z_CALLERACCT, FALSE); - zone_change(cfil_info_zone, Z_EXPAND, TRUE); - - /* - * Zone for content filters cfil hash entries and db - */ - cfil_hash_entry_size = sizeof(struct cfil_hash_entry); - cfil_hash_entry_zone = zinit(cfil_hash_entry_size, - CFIL_HASH_ENTRY_ZONE_MAX * cfil_hash_entry_size, - 0, - CFIL_HASH_ENTRY_ZONE_NAME); - if (cfil_hash_entry_zone == NULL) { - panic("%s: zinit(%s) failed", __func__, CFIL_HASH_ENTRY_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(cfil_hash_entry_zone, Z_CALLERACCT, FALSE); - zone_change(cfil_hash_entry_zone, Z_EXPAND, TRUE); - - cfil_db_size = sizeof(struct cfil_db); - cfil_db_zone = zinit(cfil_db_size, - CFIL_DB_ZONE_MAX * cfil_db_size, - 0, - CFIL_DB_ZONE_NAME); - if (cfil_db_zone == NULL) { - panic("%s: zinit(%s) failed", __func__, CFIL_DB_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(cfil_db_zone, Z_CALLERACCT, FALSE); - zone_change(cfil_db_zone, Z_EXPAND, TRUE); - /* * Allocate locks */ @@ -2551,6 +2520,7 @@ cfil_info_alloc(struct socket *so, struct cfil_hash_entry *hash_entry) goto done; } bzero(cfil_info, sizeof(struct cfil_info)); + os_ref_init(&cfil_info->cfi_ref_count, &cfil_refgrp); cfil_queue_init(&cfil_info->cfi_snd.cfi_inject_q); cfil_queue_init(&cfil_info->cfi_rcv.cfi_inject_q); @@ -2833,13 +2803,17 @@ cfil_sock_attach(struct socket *so, struct sockaddr *local, struct sockaddr *rem socket_lock_assert_owned(so); + if (so->so_flags1 & SOF1_FLOW_DIVERT_SKIP) { + /* + * This socket has already been evaluated (and ultimately skipped) by + * flow divert, so it has also already been through content filter if there + * is one. + */ + goto done; + } + /* Limit ourselves to TCP that are not MPTCP subflows */ - if ((so->so_proto->pr_domain->dom_family != PF_INET && - so->so_proto->pr_domain->dom_family != PF_INET6) || - so->so_proto->pr_type != SOCK_STREAM || - so->so_proto->pr_protocol != IPPROTO_TCP || - (so->so_flags & SOF_MP_SUBFLOW) != 0 || - (so->so_flags1 & SOF1_CONTENT_FILTER_SKIP) != 0) { + if (SKIP_FILTER_FOR_TCP_SOCKET(so)) { goto done; } @@ -2870,6 +2844,7 @@ cfil_sock_attach(struct socket *so, struct sockaddr *local, struct sockaddr *rem goto done; } so->so_cfil->cfi_dir = dir; + so->so_cfil->cfi_filter_control_unit = filter_control_unit; } if (cfil_info_attach_unit(so, filter_control_unit, so->so_cfil) == 0) { CFIL_LOG(LOG_ERR, "cfil_info_attach_unit(%u) failed", @@ -2929,7 +2904,7 @@ cfil_sock_detach(struct socket *so) VERIFY(so->so_usecount > 0); so->so_usecount--; } - cfil_info_free(so->so_cfil); + CFIL_INFO_FREE(so->so_cfil); so->so_cfil = NULL; OSIncrementAtomic(&cfil_stats.cfs_sock_detached); } @@ -3105,8 +3080,8 @@ cfil_dispatch_closed_event_sign(cfil_crypto_state_t crypto_state, hash_entry_ptr = cfil_info->cfi_hash_entry; } else if (cfil_info->cfi_so_attach_faddr.sa.sa_len > 0 || cfil_info->cfi_so_attach_laddr.sa.sa_len > 0) { - fill_cfil_hash_entry_from_address(&hash_entry, TRUE, &cfil_info->cfi_so_attach_laddr.sa); - fill_cfil_hash_entry_from_address(&hash_entry, FALSE, &cfil_info->cfi_so_attach_faddr.sa); + fill_cfil_hash_entry_from_address(&hash_entry, TRUE, &cfil_info->cfi_so_attach_laddr.sa, FALSE); + fill_cfil_hash_entry_from_address(&hash_entry, FALSE, &cfil_info->cfi_so_attach_faddr.sa, FALSE); hash_entry_ptr = &hash_entry; } if (hash_entry_ptr != NULL) { @@ -3216,8 +3191,8 @@ cfil_dispatch_attach_event(struct socket *so, struct cfil_info *cfil_info, hash_entry_ptr = cfil_info->cfi_hash_entry; } else if (cfil_info->cfi_so_attach_faddr.sa.sa_len > 0 || cfil_info->cfi_so_attach_laddr.sa.sa_len > 0) { - fill_cfil_hash_entry_from_address(&hash_entry, TRUE, &cfil_info->cfi_so_attach_laddr.sa); - fill_cfil_hash_entry_from_address(&hash_entry, FALSE, &cfil_info->cfi_so_attach_faddr.sa); + fill_cfil_hash_entry_from_address(&hash_entry, TRUE, &cfil_info->cfi_so_attach_laddr.sa, FALSE); + fill_cfil_hash_entry_from_address(&hash_entry, FALSE, &cfil_info->cfi_so_attach_faddr.sa, FALSE); hash_entry_ptr = &hash_entry; } if (hash_entry_ptr != NULL) { @@ -3666,7 +3641,7 @@ cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_ msg->m_next = copy; data_req = (struct cfil_msg_data_event *)mbuf_data(msg); bzero(data_req, hdrsize); - data_req->cfd_msghdr.cfm_len = hdrsize + copylen; + data_req->cfd_msghdr.cfm_len = (uint32_t)hdrsize + copylen; data_req->cfd_msghdr.cfm_version = 1; data_req->cfd_msghdr.cfm_type = CFM_TYPE_EVENT; data_req->cfd_msghdr.cfm_op = @@ -3828,8 +3803,7 @@ cfil_data_service_ctl_q(struct socket *so, struct cfil_info *cfil_info, uint32_t /* * The first mbuf can partially pass */ - copylen = entrybuf->cfe_pass_offset - - entrybuf->cfe_ctl_q.q_start; + copylen = (unsigned int)(entrybuf->cfe_pass_offset - entrybuf->cfe_ctl_q.q_start); } VERIFY(copylen <= datalen); @@ -3904,7 +3878,7 @@ cfil_data_service_ctl_q(struct socket *so, struct cfil_info *cfil_info, uint32_t * The data in the first mbuf may have been * partially peeked at */ - copyoffset = entrybuf->cfe_peeked - currentoffset; + copyoffset = (unsigned int)(entrybuf->cfe_peeked - currentoffset); VERIFY(copyoffset < datalen); copylen = datalen - copyoffset; VERIFY(copylen <= datalen); @@ -3913,8 +3887,8 @@ cfil_data_service_ctl_q(struct socket *so, struct cfil_info *cfil_info, uint32_t */ if (currentoffset + copyoffset + copylen > entrybuf->cfe_peek_offset) { - copylen = entrybuf->cfe_peek_offset - - (currentoffset + copyoffset); + copylen = (unsigned int)(entrybuf->cfe_peek_offset - + (currentoffset + copyoffset)); } #if DATA_DEBUG @@ -4017,7 +3991,7 @@ done: */ int cfil_data_filter(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing, - struct mbuf *data, uint64_t datalen) + struct mbuf *data, uint32_t datalen) { errno_t error = 0; struct cfil_entry *entry; @@ -4438,6 +4412,7 @@ cfil_set_socket_pass_offset(struct socket *so, struct cfil_info *cfil_info, int struct cfe_buf *entrybuf; uint32_t kcunit; uint64_t pass_offset = 0; + boolean_t first = true; if (cfil_info == NULL) { return 0; @@ -4473,9 +4448,11 @@ cfil_set_socket_pass_offset(struct socket *so, struct cfil_info *cfil_info, int entrybuf = &entry->cfe_rcv; } - if (pass_offset == 0 || + // Keep track of the smallest pass_offset among filters. + if (first == true || entrybuf->cfe_pass_offset < pass_offset) { pass_offset = entrybuf->cfe_pass_offset; + first = false; } } cfi_buf->cfi_pass_offset = pass_offset; @@ -4893,15 +4870,29 @@ cfil_sock_data_out(struct socket *so, struct sockaddr *to, struct mbuf *data, struct mbuf *control, uint32_t flags) { int error = 0; + int new_filter_control_unit = 0; if (IS_IP_DGRAM(so)) { return cfil_sock_udp_handle_data(TRUE, so, NULL, to, data, control, flags); } if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) { + /* Drop pre-existing TCP sockets if filter is enabled now */ + if (cfil_active_count > 0 && !SKIP_FILTER_FOR_TCP_SOCKET(so)) { + new_filter_control_unit = necp_socket_get_content_filter_control_unit(so); + if (new_filter_control_unit > 0) { + return EPIPE; + } + } return 0; } + /* Drop pre-existing TCP sockets when filter state changed */ + new_filter_control_unit = necp_socket_get_content_filter_control_unit(so); + if (new_filter_control_unit > 0 && new_filter_control_unit != so->so_cfil->cfi_filter_control_unit && !SKIP_FILTER_FOR_TCP_SOCKET(so)) { + return EPIPE; + } + /* * Pass initial data for TFO. */ @@ -4948,15 +4939,29 @@ cfil_sock_data_in(struct socket *so, struct sockaddr *from, struct mbuf *data, struct mbuf *control, uint32_t flags) { int error = 0; + int new_filter_control_unit = 0; if (IS_IP_DGRAM(so)) { return cfil_sock_udp_handle_data(FALSE, so, NULL, from, data, control, flags); } if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) { + /* Drop pre-existing TCP sockets if filter is enabled now */ + if (cfil_active_count > 0 && !SKIP_FILTER_FOR_TCP_SOCKET(so)) { + new_filter_control_unit = necp_socket_get_content_filter_control_unit(so); + if (new_filter_control_unit > 0) { + return EPIPE; + } + } return 0; } + /* Drop pre-existing TCP sockets when filter state changed */ + new_filter_control_unit = necp_socket_get_content_filter_control_unit(so); + if (new_filter_control_unit > 0 && new_filter_control_unit != so->so_cfil->cfi_filter_control_unit && !SKIP_FILTER_FOR_TCP_SOCKET(so)) { + return EPIPE; + } + /* * Pass initial data for TFO. */ @@ -5638,11 +5643,12 @@ cfil_hash_entry_log(int level, struct socket *so, struct cfil_hash_entry *entry, return; } - CFIL_LOG(level, "<%s>: <%s(%d) so %llx, entry %p, sockID %llu> lport %d fport %d laddr %s faddr %s", + CFIL_LOG(level, "<%s>: <%s(%d) so %llx, entry %p, sockID %llu> lport %d fport %d laddr %s faddr %s hash %X", msg, IS_UDP(so) ? "UDP" : "proto", GET_SO_PROTO(so), (uint64_t)VM_KERNEL_ADDRPERM(so), entry, sockId, - ntohs(entry->cfentry_lport), ntohs(entry->cfentry_fport), local, remote); + ntohs(entry->cfentry_lport), ntohs(entry->cfentry_fport), local, remote, + entry->cfentry_flowhash); } static void @@ -5664,15 +5670,12 @@ cfil_inp_log(int level, struct socket *so, const char* msg) local[0] = remote[0] = 0x0; -#if INET6 if (inp->inp_vflag & INP_IPV6) { addr = &inp->in6p_laddr.s6_addr32; inet_ntop(AF_INET6, addr, local, sizeof(local)); addr = &inp->in6p_faddr.s6_addr32; inet_ntop(AF_INET6, addr, remote, sizeof(local)); - } else -#endif /* INET6 */ - { + } else { addr = &inp->inp_laddr.s_addr; inet_ntop(AF_INET, addr, local, sizeof(local)); addr = &inp->inp_faddr.s_addr; @@ -5762,7 +5765,7 @@ cfil_db_free(struct socket *so) #if LIFECYCLE_DEBUG cfil_info_log(LOG_ERR, entry->cfentry_cfil, "CFIL: LIFECYCLE: DB FREE CLEAN UP"); #endif - cfil_info_free(entry->cfentry_cfil); + CFIL_INFO_FREE(entry->cfentry_cfil); OSIncrementAtomic(&cfil_stats.cfs_sock_detached); entry->cfentry_cfil = NULL; } @@ -5784,13 +5787,13 @@ cfil_db_free(struct socket *so) CFIL_LOG(LOG_ERR, "CFIL: LIFECYCLE: so usecount %d", so->so_usecount); #endif - FREE(db->cfdb_hashbase, M_CFIL); + hashdestroy(db->cfdb_hashbase, M_CFIL, db->cfdb_hashmask); zfree(cfil_db_zone, db); so->so_cfil_db = NULL; } static bool -fill_cfil_hash_entry_from_address(struct cfil_hash_entry *entry, bool isLocal, struct sockaddr *addr) +fill_cfil_hash_entry_from_address(struct cfil_hash_entry *entry, bool isLocal, struct sockaddr *addr, bool islocalUpdate) { struct sockaddr_in *sin = NULL; struct sockaddr_in6 *sin6 = NULL; @@ -5806,11 +5809,25 @@ fill_cfil_hash_entry_from_address(struct cfil_hash_entry *entry, bool isLocal, s return FALSE; } if (isLocal == TRUE) { - entry->cfentry_lport = sin->sin_port; - entry->cfentry_laddr.addr46.ia46_addr4.s_addr = sin->sin_addr.s_addr; + if (sin->sin_port) { + entry->cfentry_lport = sin->sin_port; + if (islocalUpdate) { + entry->cfentry_lport_updated = TRUE; + } + } + if (sin->sin_addr.s_addr) { + entry->cfentry_laddr.addr46.ia46_addr4.s_addr = sin->sin_addr.s_addr; + if (islocalUpdate) { + entry->cfentry_laddr_updated = TRUE; + } + } } else { - entry->cfentry_fport = sin->sin_port; - entry->cfentry_faddr.addr46.ia46_addr4.s_addr = sin->sin_addr.s_addr; + if (sin->sin_port) { + entry->cfentry_fport = sin->sin_port; + } + if (sin->sin_addr.s_addr) { + entry->cfentry_faddr.addr46.ia46_addr4.s_addr = sin->sin_addr.s_addr; + } } entry->cfentry_family = AF_INET; return TRUE; @@ -5820,11 +5837,25 @@ fill_cfil_hash_entry_from_address(struct cfil_hash_entry *entry, bool isLocal, s return FALSE; } if (isLocal == TRUE) { - entry->cfentry_lport = sin6->sin6_port; - entry->cfentry_laddr.addr6 = sin6->sin6_addr; + if (sin6->sin6_port) { + entry->cfentry_lport = sin6->sin6_port; + if (islocalUpdate) { + entry->cfentry_lport_updated = TRUE; + } + } + if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { + entry->cfentry_laddr.addr6 = sin6->sin6_addr; + if (islocalUpdate) { + entry->cfentry_laddr_updated = TRUE; + } + } } else { - entry->cfentry_fport = sin6->sin6_port; - entry->cfentry_faddr.addr6 = sin6->sin6_addr; + if (sin6->sin6_port) { + entry->cfentry_fport = sin6->sin6_port; + } + if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { + entry->cfentry_faddr.addr6 = sin6->sin6_addr; + } } entry->cfentry_family = AF_INET6; return TRUE; @@ -5834,7 +5865,7 @@ fill_cfil_hash_entry_from_address(struct cfil_hash_entry *entry, bool isLocal, s } static bool -fill_cfil_hash_entry_from_inp(struct cfil_hash_entry *entry, bool isLocal, struct inpcb *inp) +fill_cfil_hash_entry_from_inp(struct cfil_hash_entry *entry, bool isLocal, struct inpcb *inp, bool islocalUpdate) { if (entry == NULL || inp == NULL) { return FALSE; @@ -5842,21 +5873,49 @@ fill_cfil_hash_entry_from_inp(struct cfil_hash_entry *entry, bool isLocal, struc if (inp->inp_vflag & INP_IPV6) { if (isLocal == TRUE) { - entry->cfentry_lport = inp->inp_lport; - entry->cfentry_laddr.addr6 = inp->in6p_laddr; + if (inp->inp_lport) { + entry->cfentry_lport = inp->inp_lport; + if (islocalUpdate) { + entry->cfentry_lport_updated = TRUE; + } + } + if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { + entry->cfentry_laddr.addr6 = inp->in6p_laddr; + if (islocalUpdate) { + entry->cfentry_laddr_updated = TRUE; + } + } } else { - entry->cfentry_fport = inp->inp_fport; - entry->cfentry_faddr.addr6 = inp->in6p_faddr; + if (inp->inp_fport) { + entry->cfentry_fport = inp->inp_fport; + } + if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) { + entry->cfentry_faddr.addr6 = inp->in6p_faddr; + } } entry->cfentry_family = AF_INET6; return TRUE; } else if (inp->inp_vflag & INP_IPV4) { if (isLocal == TRUE) { - entry->cfentry_lport = inp->inp_lport; - entry->cfentry_laddr.addr46.ia46_addr4.s_addr = inp->inp_laddr.s_addr; + if (inp->inp_lport) { + entry->cfentry_lport = inp->inp_lport; + if (islocalUpdate) { + entry->cfentry_lport_updated = TRUE; + } + } + if (inp->inp_laddr.s_addr) { + entry->cfentry_laddr.addr46.ia46_addr4.s_addr = inp->inp_laddr.s_addr; + if (islocalUpdate) { + entry->cfentry_laddr_updated = TRUE; + } + } } else { - entry->cfentry_fport = inp->inp_fport; - entry->cfentry_faddr.addr46.ia46_addr4.s_addr = inp->inp_faddr.s_addr; + if (inp->inp_fport) { + entry->cfentry_fport = inp->inp_fport; + } + if (inp->inp_faddr.s_addr) { + entry->cfentry_faddr.addr46.ia46_addr4.s_addr = inp->inp_faddr.s_addr; + } } entry->cfentry_family = AF_INET; return TRUE; @@ -5929,7 +5988,7 @@ cfil_db_lookup_entry_with_sockid(struct cfil_db *db, u_int64_t sock_id) } struct cfil_hash_entry * -cfil_db_lookup_entry(struct cfil_db *db, struct sockaddr *local, struct sockaddr *remote, boolean_t remoteOnly) +cfil_db_lookup_entry_internal(struct cfil_db *db, struct sockaddr *local, struct sockaddr *remote, boolean_t remoteOnly, boolean_t withLocalPort) { struct cfil_hash_entry matchentry = { }; struct cfil_hash_entry *nextentry = NULL; @@ -5945,54 +6004,45 @@ cfil_db_lookup_entry(struct cfil_db *db, struct sockaddr *local, struct sockaddr goto done; } - if (remoteOnly == false) { - if (local != NULL) { - fill_cfil_hash_entry_from_address(&matchentry, TRUE, local); - } else { - fill_cfil_hash_entry_from_inp(&matchentry, TRUE, inp); - } + if (local != NULL) { + fill_cfil_hash_entry_from_address(&matchentry, TRUE, local, FALSE); + } else { + fill_cfil_hash_entry_from_inp(&matchentry, TRUE, inp, FALSE); } if (remote != NULL) { - fill_cfil_hash_entry_from_address(&matchentry, FALSE, remote); + fill_cfil_hash_entry_from_address(&matchentry, FALSE, remote, FALSE); } else { - fill_cfil_hash_entry_from_inp(&matchentry, FALSE, inp); + fill_cfil_hash_entry_from_inp(&matchentry, FALSE, inp, FALSE); } -#if INET6 if (inp->inp_vflag & INP_IPV6) { hashkey_faddr = matchentry.cfentry_faddr.addr6.s6_addr32[3]; hashkey_laddr = (remoteOnly == false) ? matchentry.cfentry_laddr.addr6.s6_addr32[3] : 0; - } else -#endif /* INET6 */ - { + } else { hashkey_faddr = matchentry.cfentry_faddr.addr46.ia46_addr4.s_addr; hashkey_laddr = (remoteOnly == false) ? matchentry.cfentry_laddr.addr46.ia46_addr4.s_addr : 0; } hashkey_fport = matchentry.cfentry_fport; - hashkey_lport = (remoteOnly == false) ? matchentry.cfentry_lport : 0; + hashkey_lport = (remoteOnly == false || withLocalPort == true) ? matchentry.cfentry_lport : 0; inp_hash_element = CFIL_HASH(hashkey_laddr, hashkey_faddr, hashkey_lport, hashkey_fport); inp_hash_element &= db->cfdb_hashmask; - cfilhash = &db->cfdb_hashbase[inp_hash_element]; LIST_FOREACH(nextentry, cfilhash, cfentry_link) { -#if INET6 if ((inp->inp_vflag & INP_IPV6) && - (remoteOnly || nextentry->cfentry_lport == matchentry.cfentry_lport) && + (remoteOnly || nextentry->cfentry_lport_updated || nextentry->cfentry_lport == matchentry.cfentry_lport) && nextentry->cfentry_fport == matchentry.cfentry_fport && - (remoteOnly || IN6_ARE_ADDR_EQUAL(&nextentry->cfentry_laddr.addr6, &matchentry.cfentry_laddr.addr6)) && + (remoteOnly || nextentry->cfentry_laddr_updated || IN6_ARE_ADDR_EQUAL(&nextentry->cfentry_laddr.addr6, &matchentry.cfentry_laddr.addr6)) && IN6_ARE_ADDR_EQUAL(&nextentry->cfentry_faddr.addr6, &matchentry.cfentry_faddr.addr6)) { #if DATA_DEBUG cfil_hash_entry_log(LOG_DEBUG, db->cfdb_so, &matchentry, 0, "CFIL LOOKUP ENTRY: UDP V6 found entry"); #endif return nextentry; - } else -#endif /* INET6 */ - if ((remoteOnly || nextentry->cfentry_lport == matchentry.cfentry_lport) && + } else if ((remoteOnly || nextentry->cfentry_lport_updated || nextentry->cfentry_lport == matchentry.cfentry_lport) && nextentry->cfentry_fport == matchentry.cfentry_fport && - (remoteOnly || nextentry->cfentry_laddr.addr46.ia46_addr4.s_addr == matchentry.cfentry_laddr.addr46.ia46_addr4.s_addr) && + (remoteOnly || nextentry->cfentry_laddr_updated || nextentry->cfentry_laddr.addr46.ia46_addr4.s_addr == matchentry.cfentry_laddr.addr46.ia46_addr4.s_addr) && nextentry->cfentry_faddr.addr46.ia46_addr4.s_addr == matchentry.cfentry_faddr.addr46.ia46_addr4.s_addr) { #if DATA_DEBUG cfil_hash_entry_log(LOG_DEBUG, db->cfdb_so, &matchentry, 0, "CFIL LOOKUP ENTRY: UDP V4 found entry"); @@ -6008,6 +6058,39 @@ done: return NULL; } +struct cfil_hash_entry * +cfil_db_lookup_entry(struct cfil_db *db, struct sockaddr *local, struct sockaddr *remote, boolean_t remoteOnly) +{ + struct cfil_hash_entry *entry = cfil_db_lookup_entry_internal(db, local, remote, remoteOnly, false); + if (entry == NULL && remoteOnly == true) { + entry = cfil_db_lookup_entry_internal(db, local, remote, remoteOnly, true); + } + return entry; +} + +cfil_sock_id_t +cfil_sock_id_from_datagram_socket(struct socket *so, struct sockaddr *local, struct sockaddr *remote) +{ + struct cfil_hash_entry *hash_entry = NULL; + + socket_lock_assert_owned(so); + + if (so->so_cfil_db == NULL) { + return CFIL_SOCK_ID_NONE; + } + + hash_entry = cfil_db_lookup_entry(so->so_cfil_db, local, remote, false); + if (hash_entry == NULL) { + // No match with both local and remote, try match with remote only + hash_entry = cfil_db_lookup_entry(so->so_cfil_db, local, remote, true); + } + if (hash_entry == NULL || hash_entry->cfentry_cfil == NULL) { + return CFIL_SOCK_ID_NONE; + } + + return hash_entry->cfentry_cfil->cfi_sock_id; +} + void cfil_db_delete_entry(struct cfil_db *db, struct cfil_hash_entry *hash_entry) { @@ -6047,24 +6130,21 @@ cfil_db_add_entry(struct cfil_db *db, struct sockaddr *local, struct sockaddr *r bzero(entry, sizeof(struct cfil_hash_entry)); if (local != NULL) { - fill_cfil_hash_entry_from_address(entry, TRUE, local); + fill_cfil_hash_entry_from_address(entry, TRUE, local, FALSE); } else { - fill_cfil_hash_entry_from_inp(entry, TRUE, inp); + fill_cfil_hash_entry_from_inp(entry, TRUE, inp, FALSE); } if (remote != NULL) { - fill_cfil_hash_entry_from_address(entry, FALSE, remote); + fill_cfil_hash_entry_from_address(entry, FALSE, remote, FALSE); } else { - fill_cfil_hash_entry_from_inp(entry, FALSE, inp); + fill_cfil_hash_entry_from_inp(entry, FALSE, inp, FALSE); } entry->cfentry_lastused = net_uptime(); -#if INET6 if (inp->inp_vflag & INP_IPV6) { hashkey_faddr = entry->cfentry_faddr.addr6.s6_addr32[3]; hashkey_laddr = entry->cfentry_laddr.addr6.s6_addr32[3]; - } else -#endif /* INET6 */ - { + } else { hashkey_faddr = entry->cfentry_faddr.addr46.ia46_addr4.s_addr; hashkey_laddr = entry->cfentry_laddr.addr46.ia46_addr4.s_addr; } @@ -6085,9 +6165,10 @@ done: } void -cfil_db_update_entry_local(struct cfil_db *db, struct cfil_hash_entry *entry, struct sockaddr *local) +cfil_db_update_entry_local(struct cfil_db *db, struct cfil_hash_entry *entry, struct sockaddr *local, struct mbuf *control) { struct inpcb *inp = sotoinpcb(db->cfdb_so); + union sockaddr_in_4_6 address_buf = { }; CFIL_LOG(LOG_INFO, ""); @@ -6095,12 +6176,48 @@ cfil_db_update_entry_local(struct cfil_db *db, struct cfil_hash_entry *entry, st return; } - if (local != NULL) { - fill_cfil_hash_entry_from_address(entry, TRUE, local); - } else { - fill_cfil_hash_entry_from_inp(entry, TRUE, inp); + if (LOCAL_ADDRESS_NEEDS_UPDATE(entry)) { + // Flow does not have a local address yet. Retrieve local address + // from control mbufs if present. + if (local == NULL && control != NULL) { + uint8_t *addr_ptr = NULL; + int size = cfil_sock_udp_get_address_from_control(entry->cfentry_family, control, &addr_ptr); + + if (size && addr_ptr) { + switch (entry->cfentry_family) { + case AF_INET: + if (size == sizeof(struct in_addr)) { + address_buf.sin.sin_port = 0; + address_buf.sin.sin_family = AF_INET; + address_buf.sin.sin_len = sizeof(struct sockaddr_in); + (void) memcpy(&address_buf.sin.sin_addr, addr_ptr, sizeof(struct in_addr)); + local = sintosa(&address_buf.sin); + } + break; + case AF_INET6: + if (size == sizeof(struct in6_addr)) { + address_buf.sin6.sin6_port = 0; + address_buf.sin6.sin6_family = AF_INET6; + address_buf.sin6.sin6_len = sizeof(struct sockaddr_in6); + (void) memcpy(&address_buf.sin6.sin6_addr, addr_ptr, sizeof(struct in6_addr)); + local = sin6tosa(&address_buf.sin6); + } + break; + default: + break; + } + } + } + if (local != NULL) { + fill_cfil_hash_entry_from_address(entry, TRUE, local, TRUE); + } else { + fill_cfil_hash_entry_from_inp(entry, TRUE, inp, TRUE); + } + } + + if (LOCAL_PORT_NEEDS_UPDATE(entry, db->cfdb_so)) { + fill_cfil_hash_entry_from_inp(entry, TRUE, inp, TRUE); } - cfil_hash_entry_log(LOG_DEBUG, db->cfdb_so, entry, 0, "CFIL: cfil_db_add_entry: local updated"); return; } @@ -6132,9 +6249,10 @@ cfil_db_get_cfil_info(struct cfil_db *db, cfil_sock_id_t id) } struct cfil_hash_entry * -cfil_sock_udp_get_flow(struct socket *so, uint32_t filter_control_unit, bool outgoing, struct sockaddr *local, struct sockaddr *remote, int debug) +cfil_sock_udp_get_flow(struct socket *so, uint32_t filter_control_unit, bool outgoing, struct sockaddr *local, struct sockaddr *remote, struct mbuf *control, int debug) { struct cfil_hash_entry *hash_entry = NULL; + int new_filter_control_unit = 0; errno_t error = 0; socket_lock_assert_owned(so); @@ -6151,13 +6269,19 @@ cfil_sock_udp_get_flow(struct socket *so, uint32_t filter_control_unit, bool out if (hash_entry == NULL) { // No match with both local and remote, try match with remote only hash_entry = cfil_db_lookup_entry(so->so_cfil_db, local, remote, true); - if (hash_entry != NULL) { - // Simply update the local address into the original flow, keeping - // its sockId and flow_hash unchanged. - cfil_db_update_entry_local(so->so_cfil_db, hash_entry, local); - } } if (hash_entry != NULL) { + /* Drop pre-existing UDP flow if filter state changed */ + new_filter_control_unit = necp_socket_get_content_filter_control_unit(so); + if (new_filter_control_unit > 0 && + new_filter_control_unit != hash_entry->cfentry_cfil->cfi_filter_control_unit) { + return NULL; + } + + // Try to update flow info from socket and/or control mbufs if necessary + if (LOCAL_ADDRESS_NEEDS_UPDATE(hash_entry) || LOCAL_PORT_NEEDS_UPDATE(hash_entry, so)) { + cfil_db_update_entry_local(so->so_cfil_db, hash_entry, local, control); + } return hash_entry; } @@ -6175,6 +6299,7 @@ cfil_sock_udp_get_flow(struct socket *so, uint32_t filter_control_unit, bool out OSIncrementAtomic(&cfil_stats.cfs_sock_attach_no_mem); return NULL; } + hash_entry->cfentry_cfil->cfi_filter_control_unit = filter_control_unit; hash_entry->cfentry_cfil->cfi_dir = outgoing ? CFS_CONNECTION_DIR_OUT : CFS_CONNECTION_DIR_IN; hash_entry->cfentry_cfil->cfi_debug = debug; @@ -6182,8 +6307,13 @@ cfil_sock_udp_get_flow(struct socket *so, uint32_t filter_control_unit, bool out cfil_info_log(LOG_ERR, hash_entry->cfentry_cfil, "CFIL: LIFECYCLE: ADDED"); #endif + // Check if we can update the new flow's local address from control mbufs + if (control != NULL) { + cfil_db_update_entry_local(so->so_cfil_db, hash_entry, local, control); + } + if (cfil_info_attach_unit(so, filter_control_unit, hash_entry->cfentry_cfil) == 0) { - cfil_info_free(hash_entry->cfentry_cfil); + CFIL_INFO_FREE(hash_entry->cfentry_cfil); cfil_db_delete_entry(so->so_cfil_db, hash_entry); CFIL_LOG(LOG_ERR, "CFIL: UDP cfil_info_attach_unit(%u) failed", filter_control_unit); @@ -6215,6 +6345,54 @@ cfil_sock_udp_get_flow(struct socket *so, uint32_t filter_control_unit, bool out return hash_entry; } +int +cfil_sock_udp_get_address_from_control(sa_family_t family, struct mbuf *control, uint8_t **address_ptr) +{ + struct cmsghdr *cm; + struct in6_pktinfo *pi6; + + if (control == NULL || address_ptr == NULL) { + return 0; + } + + while (control) { + if (control->m_type != MT_CONTROL) { + control = control->m_next; + continue; + } + + for (cm = M_FIRST_CMSGHDR(control); + is_cmsg_valid(control, cm); + cm = M_NXT_CMSGHDR(control, cm)) { + switch (cm->cmsg_type) { + case IP_RECVDSTADDR: + if (family == AF_INET && + cm->cmsg_level == IPPROTO_IP && + cm->cmsg_len == CMSG_LEN(sizeof(struct in_addr))) { + *address_ptr = CMSG_DATA(cm); + return sizeof(struct in_addr); + } + break; + case IPV6_PKTINFO: + case IPV6_2292PKTINFO: + if (family == AF_INET6 && + cm->cmsg_level == IPPROTO_IPV6 && + cm->cmsg_len == CMSG_LEN(sizeof(struct in6_pktinfo))) { + pi6 = (struct in6_pktinfo *)(void *)CMSG_DATA(cm); + *address_ptr = (uint8_t *)&pi6->ipi6_addr; + return sizeof(struct in6_addr); + } + break; + default: + break; + } + } + + control = control->m_next; + } + return 0; +} + errno_t cfil_sock_udp_handle_data(bool outgoing, struct socket *so, struct sockaddr *local, struct sockaddr *remote, @@ -6256,7 +6434,7 @@ cfil_sock_udp_handle_data(bool outgoing, struct socket *so, return error; } - hash_entry = cfil_sock_udp_get_flow(so, filter_control_unit, outgoing, local, remote, debug); + hash_entry = cfil_sock_udp_get_flow(so, filter_control_unit, outgoing, local, remote, control, debug); if (hash_entry == NULL || hash_entry->cfentry_cfil == NULL) { CFIL_LOG(LOG_ERR, "CFIL: Falied to create UDP flow"); return EPIPE; @@ -6794,10 +6972,10 @@ cfil_info_show(void) } bool -cfil_info_idle_timed_out(struct cfil_info *cfil_info, int timeout, u_int32_t current_time) +cfil_info_idle_timed_out(struct cfil_info *cfil_info, int timeout, u_int64_t current_time) { if (cfil_info && cfil_info->cfi_hash_entry && - (current_time - cfil_info->cfi_hash_entry->cfentry_lastused >= (u_int32_t)timeout)) { + (current_time - cfil_info->cfi_hash_entry->cfentry_lastused >= (u_int64_t)timeout)) { #if GC_DEBUG cfil_info_log(LOG_ERR, cfil_info, "CFIL: flow IDLE timeout expired"); #endif @@ -6973,7 +7151,7 @@ cfil_info_udp_expire(void *v, wait_result_t w) #endif cfil_db_delete_entry(db, hash_entry); - cfil_info_free(cfil_info); + CFIL_INFO_FREE(cfil_info); OSIncrementAtomic(&cfil_stats.cfs_sock_detached); if (so->so_flags & SOF_CONTENT_FILTER) { @@ -7044,7 +7222,7 @@ cfil_dgram_save_socket_state(struct cfil_info *cfil_info, struct mbuf *m) } struct m_tag * -cfil_dgram_get_socket_state(struct mbuf *m, uint32_t *state_change_cnt, short *options, +cfil_dgram_get_socket_state(struct mbuf *m, uint32_t *state_change_cnt, uint32_t *options, struct sockaddr **faddr, int *inp_flags) { struct m_tag *tag = NULL; @@ -7120,7 +7298,7 @@ cfil_dispatch_stats_event_locked(int kcunit, struct cfil_stats_report_buffer *bu } msgsize = sizeof(struct cfil_msg_stats_report) + (sizeof(struct cfil_msg_sock_stats) * stats_count); - buffer->msghdr.cfm_len = msgsize; + buffer->msghdr.cfm_len = (uint32_t)msgsize; buffer->msghdr.cfm_version = 1; buffer->msghdr.cfm_type = CFM_TYPE_EVENT; buffer->msghdr.cfm_op = CFM_OP_STATS; diff --git a/bsd/net/content_filter.h b/bsd/net/content_filter.h index 2944eba1e..08ad5280d 100644 --- a/bsd/net/content_filter.h +++ b/bsd/net/content_filter.h @@ -530,9 +530,10 @@ extern int cfil_sock_data_space(struct sockbuf *sb); extern void cfil_sock_buf_update(struct sockbuf *sb); extern cfil_sock_id_t cfil_sock_id_from_socket(struct socket *so); +extern cfil_sock_id_t cfil_sock_id_from_datagram_socket(struct socket *so, struct sockaddr *local, struct sockaddr *remote); extern struct m_tag *cfil_dgram_get_socket_state(struct mbuf *m, uint32_t *state_change_cnt, - short *options, struct sockaddr **faddr, int *inp_flags); + uint32_t *options, struct sockaddr **faddr, int *inp_flags); extern boolean_t cfil_dgram_peek_socket_state(struct mbuf *m, int *inp_flags); #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/net/dlil.c b/bsd/net/dlil.c index 4a703aee2..b58785c17 100644 --- a/bsd/net/dlil.c +++ b/bsd/net/dlil.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2019 Apple Inc. All rights reserved. + * Copyright (c) 1999-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -32,6 +32,7 @@ * Version 2.0. */ #include +#include #include #include @@ -96,7 +97,6 @@ #include #endif /* INET */ -#if INET6 #include #include #include @@ -104,7 +104,6 @@ #include #include #include -#endif /* INET6 */ #include #include #include @@ -222,6 +221,9 @@ struct dlil_ifnet { u_int8_t msdl[DLIL_SDLMAXLEN]; /* mask storage */ } dl_if_lladdr; u_int8_t dl_if_descstorage[IF_DESCSIZE]; /* desc storage */ + u_int8_t dl_if_permanent_ether[ETHER_ADDR_LEN]; /* permanent address */ + u_int8_t dl_if_permanent_ether_is_set; + u_int8_t dl_if_unused; struct dlil_threading_info dl_if_inpstorage; /* input thread storage */ ctrace_t dl_if_attach; /* attach PC stacktrace */ ctrace_t dl_if_detach; /* detach PC stacktrace */ @@ -287,40 +289,25 @@ static unsigned int ifnet_debug; /* debugging (disabled) */ static unsigned int dlif_size; /* size of dlil_ifnet to allocate */ static unsigned int dlif_bufsize; /* size of dlif_size + headroom */ static struct zone *dlif_zone; /* zone for dlil_ifnet */ - -#define DLIF_ZONE_MAX IFNETS_MAX /* maximum elements in zone */ #define DLIF_ZONE_NAME "ifnet" /* zone name */ -static unsigned int dlif_filt_size; /* size of ifnet_filter */ -static struct zone *dlif_filt_zone; /* zone for ifnet_filter */ - -#define DLIF_FILT_ZONE_MAX 8 /* maximum elements in zone */ -#define DLIF_FILT_ZONE_NAME "ifnet_filter" /* zone name */ - -static unsigned int dlif_phash_size; /* size of ifnet proto hash table */ -static struct zone *dlif_phash_zone; /* zone for ifnet proto hash table */ +static ZONE_DECLARE(dlif_filt_zone, "ifnet_filter", + sizeof(struct ifnet_filter), ZC_ZFREE_CLEARMEM); -#define DLIF_PHASH_ZONE_MAX DLIF_ZONE_MAX /* maximum elements in zone */ -#define DLIF_PHASH_ZONE_NAME "ifnet_proto_hash" /* zone name */ +static ZONE_DECLARE(dlif_phash_zone, "ifnet_proto_hash", + sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS, ZC_ZFREE_CLEARMEM); -static unsigned int dlif_proto_size; /* size of if_proto */ -static struct zone *dlif_proto_zone; /* zone for if_proto */ - -#define DLIF_PROTO_ZONE_MAX (DLIF_ZONE_MAX*2) /* maximum elements in zone */ -#define DLIF_PROTO_ZONE_NAME "ifnet_proto" /* zone name */ +static ZONE_DECLARE(dlif_proto_zone, "ifnet_proto", + sizeof(struct if_proto), ZC_ZFREE_CLEARMEM); static unsigned int dlif_tcpstat_size; /* size of tcpstat_local to allocate */ static unsigned int dlif_tcpstat_bufsize; /* size of dlif_tcpstat_size + headroom */ static struct zone *dlif_tcpstat_zone; /* zone for tcpstat_local */ - -#define DLIF_TCPSTAT_ZONE_MAX 1 /* maximum elements in zone */ #define DLIF_TCPSTAT_ZONE_NAME "ifnet_tcpstat" /* zone name */ static unsigned int dlif_udpstat_size; /* size of udpstat_local to allocate */ static unsigned int dlif_udpstat_bufsize; /* size of dlif_udpstat_size + headroom */ static struct zone *dlif_udpstat_zone; /* zone for udpstat_local */ - -#define DLIF_UDPSTAT_ZONE_MAX 1 /* maximum elements in zone */ #define DLIF_UDPSTAT_ZONE_NAME "ifnet_udpstat" /* zone name */ static u_int32_t net_rtref; @@ -378,14 +365,14 @@ static errno_t ifp_if_add_proto(struct ifnet *, protocol_family_t, const struct ifnet_demux_desc *, u_int32_t); static errno_t ifp_if_del_proto(struct ifnet *, protocol_family_t); static errno_t ifp_if_check_multi(struct ifnet *, const struct sockaddr *); -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX static errno_t ifp_if_framer(struct ifnet *, struct mbuf **, const struct sockaddr *, const char *, const char *, u_int32_t *, u_int32_t *); -#else +#else /* XNU_TARGET_OS_OSX */ static errno_t ifp_if_framer(struct ifnet *, struct mbuf **, const struct sockaddr *, const char *, const char *); -#endif /* CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ static errno_t ifp_if_framer_extended(struct ifnet *, struct mbuf **, const struct sockaddr *, const char *, const char *, u_int32_t *, u_int32_t *); @@ -395,6 +382,13 @@ static void ifp_if_event(struct ifnet *, const struct kev_msg *); static __inline void ifp_inc_traffic_class_in(struct ifnet *, struct mbuf *); static __inline void ifp_inc_traffic_class_out(struct ifnet *, struct mbuf *); +static errno_t dlil_input_async(struct dlil_threading_info *, struct ifnet *, + struct mbuf *, struct mbuf *, const struct ifnet_stat_increment_param *, + boolean_t, struct thread *); +static errno_t dlil_input_sync(struct dlil_threading_info *, struct ifnet *, + struct mbuf *, struct mbuf *, const struct ifnet_stat_increment_param *, + boolean_t, struct thread *); + static void dlil_main_input_thread_func(void *, wait_result_t); static void dlil_main_input_thread_cont(void *, wait_result_t); @@ -404,7 +398,8 @@ static void dlil_input_thread_cont(void *, wait_result_t); static void dlil_rxpoll_input_thread_func(void *, wait_result_t); static void dlil_rxpoll_input_thread_cont(void *, wait_result_t); -static int dlil_create_input_thread(ifnet_t, struct dlil_threading_info *); +static int dlil_create_input_thread(ifnet_t, struct dlil_threading_info *, + thread_continue_t *); static void dlil_terminate_input_thread(struct dlil_threading_info *); static void dlil_input_stats_add(const struct ifnet_stat_increment_param *, struct dlil_threading_info *, struct ifnet *, boolean_t); @@ -429,7 +424,7 @@ static void dlil_incr_pending_thread_count(void); static void dlil_decr_pending_thread_count(void); static void ifnet_detacher_thread_func(void *, wait_result_t); -static int ifnet_detacher_thread_cont(int); +static void ifnet_detacher_thread_cont(void *, wait_result_t); static void ifnet_detach_final(struct ifnet *); static void ifnet_detaching_enqueue(struct ifnet *); static struct ifnet *ifnet_detaching_dequeue(void); @@ -445,10 +440,8 @@ static errno_t ifnet_enqueue_common(struct ifnet *, classq_pkt_t *, static void ifp_src_route_copyout(struct ifnet *, struct route *); static void ifp_src_route_copyin(struct ifnet *, struct route *); -#if INET6 static void ifp_src_route6_copyout(struct ifnet *, struct route_in6 *); static void ifp_src_route6_copyin(struct ifnet *, struct route_in6 *); -#endif /* INET6 */ static int sysctl_rxpoll SYSCTL_HANDLER_ARGS; static int sysctl_rxpoll_mode_holdtime SYSCTL_HANDLER_ARGS; @@ -472,6 +465,7 @@ static int sysctl_input_thread_termination_spin SYSCTL_HANDLER_ARGS; /* The following are protected by dlil_ifnet_lock */ static TAILQ_HEAD(, ifnet) ifnet_detaching_head; static u_int32_t ifnet_detaching_cnt; +static boolean_t ifnet_detaching_embryonic; static void *ifnet_delayed_run; /* wait channel for detaching thread */ decl_lck_mtx_data(static, ifnet_fc_lock); @@ -509,11 +503,8 @@ RB_HEAD(ifnet_fc_tree, ifnet_fc_entry) ifnet_fc_tree; RB_PROTOTYPE(ifnet_fc_tree, ifnet_fc_entry, ifce_entry, ifce_cmp); RB_GENERATE(ifnet_fc_tree, ifnet_fc_entry, ifce_entry, ifce_cmp); -static unsigned int ifnet_fc_zone_size; /* sizeof ifnet_fc_entry */ -static struct zone *ifnet_fc_zone; /* ifnet_fc_entry zone */ - -#define IFNET_FC_ZONE_NAME "ifnet_fc_zone" -#define IFNET_FC_ZONE_MAX 32 +static ZONE_DECLARE(ifnet_fc_zone, "ifnet_fc_zone", + sizeof(struct ifnet_fc_entry), ZC_ZFREE_CLEARMEM); extern void bpfdetach(struct ifnet *); extern void proto_input_run(void); @@ -526,12 +517,12 @@ extern uint32_t tcp_count_opportunistic(unsigned int ifindex, __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *); #if CONFIG_MACF -#ifdef CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX int dlil_lladdr_ckreq = 1; -#else +#else /* XNU_TARGET_OS_OSX */ int dlil_lladdr_ckreq = 0; -#endif -#endif +#endif /* XNU_TARGET_OS_OSX */ +#endif /* CONFIG_MACF */ #if DEBUG int dlil_verbose = 1; @@ -671,6 +662,12 @@ SYSCTL_UINT(_net_link_generic_system, OID_AUTO, start_delay_disabled, CTLFLAG_RW | CTLFLAG_LOCKED, &ifnet_delay_start_disabled, 0, "number of times start was delayed"); +static inline void +ifnet_delay_start_disabled_increment(void) +{ + OSIncrementAtomic(&ifnet_delay_start_disabled); +} + #define HWCKSUM_DBG_PARTIAL_FORCED 0x1 /* forced partial checksum */ #define HWCKSUM_DBG_PARTIAL_RXOFF_ADJ 0x2 /* adjust start offset */ #define HWCKSUM_DBG_FINALIZE_FORCED 0x10 /* forced finalize */ @@ -772,9 +769,10 @@ struct net_api_stats net_api_stats; SYSCTL_STRUCT(_net, OID_AUTO, api_stats, CTLFLAG_RD | CTLFLAG_LOCKED, &net_api_stats, net_api_stats, ""); - unsigned int net_rxpoll = 1; unsigned int net_affinity = 1; +unsigned int net_async = 1; /* 0: synchronous, 1: asynchronous */ + static kern_return_t dlil_affinity_set(struct thread *, u_int32_t); extern u_int32_t inject_buckets; @@ -837,6 +835,7 @@ static struct rxpoll_time_tbl rxpoll_tbl[] = { decl_lck_mtx_data(static, dlil_thread_sync_lock); static uint32_t dlil_pending_thread_cnt = 0; + static void dlil_incr_pending_thread_count(void) { @@ -929,9 +928,6 @@ if_proto_free(struct if_proto *proto) return; } - /* No more reference on this, protocol must have been detached */ - VERIFY(proto->detached); - if (proto->proto_kpi == kProtoKPI_v1) { if (proto->kpi.v1.detached) { proto->kpi.v1.detached(ifp, proto->protocol_family); @@ -949,13 +945,18 @@ if_proto_free(struct if_proto *proto) */ if_rtproto_del(ifp, proto_family); + ifnet_lock_shared(ifp); + + /* No more reference on this, protocol must have been detached */ + VERIFY(proto->detached); + /* * The reserved field carries the number of protocol still attached * (subject to change) */ - ifnet_lock_shared(ifp); ev_pr_data.proto_family = proto_family; ev_pr_data.proto_remaining_count = dlil_ifp_protolist(ifp, NULL, 0); + ifnet_lock_done(ifp); dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED, @@ -1050,7 +1051,6 @@ if_inetdata_lock_done(struct ifnet *ifp) } #endif -#if INET6 __private_extern__ void if_inet6data_lock_shared(struct ifnet *ifp) { @@ -1068,7 +1068,6 @@ if_inet6data_lock_done(struct ifnet *ifp) { lck_rw_done(&ifp->if_inet6data_lock); } -#endif __private_extern__ void ifnet_head_lock_shared(void) @@ -1208,12 +1207,11 @@ dlil_alloc_local_stats(struct ifnet *ifp) if (ifp->if_tcp_stat == NULL && ifp->if_udp_stat == NULL) { /* allocate tcpstat_local structure */ - buf = zalloc(dlif_tcpstat_zone); + buf = zalloc_flags(dlif_tcpstat_zone, Z_WAITOK | Z_ZERO); if (buf == NULL) { ret = ENOMEM; goto end; } - bzero(buf, dlif_tcpstat_bufsize); /* Get the 64-bit aligned base address for this object */ base = (void *)P2ROUNDUP((intptr_t)buf + sizeof(u_int64_t), @@ -1230,12 +1228,11 @@ dlil_alloc_local_stats(struct ifnet *ifp) ifp->if_tcp_stat = base; /* allocate udpstat_local structure */ - buf = zalloc(dlif_udpstat_zone); + buf = zalloc_flags(dlif_udpstat_zone, Z_WAITOK | Z_ZERO); if (buf == NULL) { ret = ENOMEM; goto end; } - bzero(buf, dlif_udpstat_bufsize); /* Get the 64-bit aligned base address for this object */ base = (void *)P2ROUNDUP((intptr_t)buf + sizeof(u_int64_t), @@ -1321,39 +1318,68 @@ dlil_reset_rxpoll_params(ifnet_t ifp) } static int -dlil_create_input_thread(ifnet_t ifp, struct dlil_threading_info *inp) +dlil_create_input_thread(ifnet_t ifp, struct dlil_threading_info *inp, + thread_continue_t *thfunc) { boolean_t dlil_rxpoll_input; - thread_continue_t func; + thread_continue_t func = NULL; u_int32_t limit; - int error; + int error = 0; dlil_rxpoll_input = (ifp != NULL && net_rxpoll && (ifp->if_eflags & IFEF_RXPOLL) && (ifp->if_xflags & IFXF_LEGACY)); + /* default strategy utilizes the DLIL worker thread */ + inp->dlth_strategy = dlil_input_async; + /* NULL ifp indicates the main input thread, called at dlil_init time */ if (ifp == NULL) { + /* + * Main input thread only. + */ func = dlil_main_input_thread_func; VERIFY(inp == dlil_main_input_thread); - (void) strlcat(inp->input_name, + (void) strlcat(inp->dlth_name, "main_input", DLIL_THREADNAME_LEN); } else if (dlil_rxpoll_input) { + /* + * Legacy (non-netif) hybrid polling. + */ func = dlil_rxpoll_input_thread_func; VERIFY(inp != dlil_main_input_thread); - (void) snprintf(inp->input_name, DLIL_THREADNAME_LEN, + (void) snprintf(inp->dlth_name, DLIL_THREADNAME_LEN, "%s_input_poll", if_name(ifp)); - } else { + } else if (net_async || (ifp->if_xflags & IFXF_LEGACY)) { + /* + * Asynchronous strategy. + */ func = dlil_input_thread_func; VERIFY(inp != dlil_main_input_thread); - (void) snprintf(inp->input_name, DLIL_THREADNAME_LEN, + (void) snprintf(inp->dlth_name, DLIL_THREADNAME_LEN, "%s_input", if_name(ifp)); + } else { + /* + * Synchronous strategy if there's a netif below and + * the device isn't capable of hybrid polling. + */ + ASSERT(func == NULL); + ASSERT(!(ifp->if_xflags & IFXF_LEGACY)); + VERIFY(inp != dlil_main_input_thread); + ASSERT(!inp->dlth_affinity); + inp->dlth_strategy = dlil_input_sync; } - VERIFY(inp->input_thr == THREAD_NULL); + VERIFY(inp->dlth_thread == THREAD_NULL); - inp->lck_grp = lck_grp_alloc_init(inp->input_name, dlil_grp_attributes); - lck_mtx_init(&inp->input_lck, inp->lck_grp, dlil_lck_attributes); + /* let caller know */ + if (thfunc != NULL) { + *thfunc = func; + } - inp->ifp = ifp; /* NULL for main input thread */ + inp->dlth_lock_grp = lck_grp_alloc_init(inp->dlth_name, + dlil_grp_attributes); + lck_mtx_init(&inp->dlth_lock, inp->dlth_lock_grp, dlil_lck_attributes); + + inp->dlth_ifp = ifp; /* NULL for main input thread */ /* * For interfaces that support opportunistic polling, set the * low and high watermarks for outstanding inbound packets/bytes. @@ -1369,24 +1395,38 @@ dlil_create_input_thread(ifnet_t ifp, struct dlil_threading_info *inp) limit = (u_int32_t)-1; } - _qinit(&inp->rcvq_pkts, Q_DROPTAIL, limit, QP_MBUF); + _qinit(&inp->dlth_pkts, Q_DROPTAIL, limit, QP_MBUF); if (inp == dlil_main_input_thread) { struct dlil_main_threading_info *inpm = (struct dlil_main_threading_info *)inp; _qinit(&inpm->lo_rcvq_pkts, Q_DROPTAIL, limit, QP_MBUF); } - error = kernel_thread_start(func, inp, &inp->input_thr); + if (func == NULL) { + ASSERT(!(ifp->if_xflags & IFXF_LEGACY)); + ASSERT(error == 0); + error = ENODEV; + goto done; + } + + error = kernel_thread_start(func, inp, &inp->dlth_thread); if (error == KERN_SUCCESS) { - ml_thread_policy(inp->input_thr, MACHINE_GROUP, - (MACHINE_NETWORK_GROUP | MACHINE_NETWORK_NETISR)); + thread_precedence_policy_data_t info; + __unused kern_return_t kret; + + bzero(&info, sizeof(info)); + info.importance = 0; + kret = thread_policy_set(inp->dlth_thread, + THREAD_PRECEDENCE_POLICY, (thread_policy_t)&info, + THREAD_PRECEDENCE_POLICY_COUNT); + ASSERT(kret == KERN_SUCCESS); /* * We create an affinity set so that the matching workloop * thread or the starter thread (for loopback) can be * scheduled on the same processor set as the input thread. */ if (net_affinity) { - struct thread *tp = inp->input_thr; + struct thread *tp = inp->dlth_thread; u_int32_t tag; /* * Randomize to reduce the probability @@ -1395,8 +1435,8 @@ dlil_create_input_thread(ifnet_t ifp, struct dlil_threading_info *inp) read_frandom(&tag, sizeof(tag)); if (dlil_affinity_set(tp, tag) == KERN_SUCCESS) { thread_reference(tp); - inp->tag = tag; - inp->net_affinity = TRUE; + inp->dlth_affinity_tag = tag; + inp->dlth_affinity = TRUE; } } } else if (inp == dlil_main_input_thread) { @@ -1409,6 +1449,7 @@ dlil_create_input_thread(ifnet_t ifp, struct dlil_threading_info *inp) } OSAddAtomic(1, &cur_dlil_input_threads); +done: return error; } @@ -1439,34 +1480,36 @@ sysctl_input_thread_termination_spin SYSCTL_HANDLER_ARGS static void dlil_clean_threading_info(struct dlil_threading_info *inp) { - lck_mtx_destroy(&inp->input_lck, inp->lck_grp); - lck_grp_free(inp->lck_grp); - - inp->input_waiting = 0; - inp->wtot = 0; - bzero(inp->input_name, sizeof(inp->input_name)); - inp->ifp = NULL; - VERIFY(qhead(&inp->rcvq_pkts) == NULL && qempty(&inp->rcvq_pkts)); - qlimit(&inp->rcvq_pkts) = 0; - bzero(&inp->stats, sizeof(inp->stats)); - - VERIFY(!inp->net_affinity); - inp->input_thr = THREAD_NULL; - VERIFY(inp->wloop_thr == THREAD_NULL); - VERIFY(inp->poll_thr == THREAD_NULL); - VERIFY(inp->tag == 0); + lck_mtx_destroy(&inp->dlth_lock, inp->dlth_lock_grp); + lck_grp_free(inp->dlth_lock_grp); + inp->dlth_lock_grp = NULL; + + inp->dlth_flags = 0; + inp->dlth_wtot = 0; + bzero(inp->dlth_name, sizeof(inp->dlth_name)); + inp->dlth_ifp = NULL; + VERIFY(qhead(&inp->dlth_pkts) == NULL && qempty(&inp->dlth_pkts)); + qlimit(&inp->dlth_pkts) = 0; + bzero(&inp->dlth_stats, sizeof(inp->dlth_stats)); + + VERIFY(!inp->dlth_affinity); + inp->dlth_thread = THREAD_NULL; + inp->dlth_strategy = NULL; + VERIFY(inp->dlth_driver_thread == THREAD_NULL); + VERIFY(inp->dlth_poller_thread == THREAD_NULL); + VERIFY(inp->dlth_affinity_tag == 0); #if IFNET_INPUT_SANITY_CHK - inp->input_mbuf_cnt = 0; + inp->dlth_pkts_cnt = 0; #endif /* IFNET_INPUT_SANITY_CHK */ } static void dlil_terminate_input_thread(struct dlil_threading_info *inp) { - struct ifnet *ifp = inp->ifp; + struct ifnet *ifp = inp->dlth_ifp; classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt); - VERIFY(current_thread() == inp->input_thr); + VERIFY(current_thread() == inp->dlth_thread); VERIFY(inp != dlil_main_input_thread); OSAddAtomic(-1, &cur_dlil_input_threads); @@ -1483,12 +1526,12 @@ dlil_terminate_input_thread(struct dlil_threading_info *inp) } #endif /* TEST_INPUT_THREAD_TERMINATION */ - lck_mtx_lock_spin(&inp->input_lck); - _getq_all(&inp->rcvq_pkts, &pkt, NULL, NULL, NULL); - VERIFY((inp->input_waiting & DLIL_INPUT_TERMINATE) != 0); - inp->input_waiting |= DLIL_INPUT_TERMINATE_COMPLETE; - wakeup_one((caddr_t)&inp->input_waiting); - lck_mtx_unlock(&inp->input_lck); + lck_mtx_lock_spin(&inp->dlth_lock); + _getq_all(&inp->dlth_pkts, &pkt, NULL, NULL, NULL); + VERIFY((inp->dlth_flags & DLIL_INPUT_TERMINATE) != 0); + inp->dlth_flags |= DLIL_INPUT_TERMINATE_COMPLETE; + wakeup_one((caddr_t)&inp->dlth_flags); + lck_mtx_unlock(&inp->dlth_lock); /* free up pending packets */ if (pkt.cp_mbuf != NULL) { @@ -1647,6 +1690,8 @@ dlil_init(void) PE_parse_boot_argn("net_rtref", &net_rtref, sizeof(net_rtref)); + PE_parse_boot_argn("net_async", &net_async, sizeof(net_async)); + PE_parse_boot_argn("ifnet_debug", &ifnet_debug, sizeof(ifnet_debug)); VERIFY(dlil_pending_thread_cnt == 0); @@ -1654,85 +1699,27 @@ dlil_init(void) sizeof(struct dlil_ifnet_dbg); /* Enforce 64-bit alignment for dlil_ifnet structure */ dlif_bufsize = dlif_size + sizeof(void *) + sizeof(u_int64_t); - dlif_bufsize = P2ROUNDUP(dlif_bufsize, sizeof(u_int64_t)); - dlif_zone = zinit(dlif_bufsize, DLIF_ZONE_MAX * dlif_bufsize, - 0, DLIF_ZONE_NAME); - if (dlif_zone == NULL) { - panic_plain("%s: failed allocating %s", __func__, - DLIF_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(dlif_zone, Z_EXPAND, TRUE); - zone_change(dlif_zone, Z_CALLERACCT, FALSE); - - dlif_filt_size = sizeof(struct ifnet_filter); - dlif_filt_zone = zinit(dlif_filt_size, - DLIF_FILT_ZONE_MAX * dlif_filt_size, 0, DLIF_FILT_ZONE_NAME); - if (dlif_filt_zone == NULL) { - panic_plain("%s: failed allocating %s", __func__, - DLIF_FILT_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(dlif_filt_zone, Z_EXPAND, TRUE); - zone_change(dlif_filt_zone, Z_CALLERACCT, FALSE); - - dlif_phash_size = sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS; - dlif_phash_zone = zinit(dlif_phash_size, - DLIF_PHASH_ZONE_MAX * dlif_phash_size, 0, DLIF_PHASH_ZONE_NAME); - if (dlif_phash_zone == NULL) { - panic_plain("%s: failed allocating %s", __func__, - DLIF_PHASH_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(dlif_phash_zone, Z_EXPAND, TRUE); - zone_change(dlif_phash_zone, Z_CALLERACCT, FALSE); - - dlif_proto_size = sizeof(struct if_proto); - dlif_proto_zone = zinit(dlif_proto_size, - DLIF_PROTO_ZONE_MAX * dlif_proto_size, 0, DLIF_PROTO_ZONE_NAME); - if (dlif_proto_zone == NULL) { - panic_plain("%s: failed allocating %s", __func__, - DLIF_PROTO_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(dlif_proto_zone, Z_EXPAND, TRUE); - zone_change(dlif_proto_zone, Z_CALLERACCT, FALSE); + dlif_bufsize = (uint32_t)P2ROUNDUP(dlif_bufsize, sizeof(u_int64_t)); + dlif_zone = zone_create(DLIF_ZONE_NAME, dlif_bufsize, ZC_ZFREE_CLEARMEM); dlif_tcpstat_size = sizeof(struct tcpstat_local); /* Enforce 64-bit alignment for tcpstat_local structure */ dlif_tcpstat_bufsize = dlif_tcpstat_size + sizeof(void *) + sizeof(u_int64_t); - dlif_tcpstat_bufsize = + dlif_tcpstat_bufsize = (uint32_t) P2ROUNDUP(dlif_tcpstat_bufsize, sizeof(u_int64_t)); - dlif_tcpstat_zone = zinit(dlif_tcpstat_bufsize, - DLIF_TCPSTAT_ZONE_MAX * dlif_tcpstat_bufsize, 0, - DLIF_TCPSTAT_ZONE_NAME); - if (dlif_tcpstat_zone == NULL) { - panic_plain("%s: failed allocating %s", __func__, - DLIF_TCPSTAT_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(dlif_tcpstat_zone, Z_EXPAND, TRUE); - zone_change(dlif_tcpstat_zone, Z_CALLERACCT, FALSE); + dlif_tcpstat_zone = zone_create(DLIF_TCPSTAT_ZONE_NAME, + dlif_tcpstat_bufsize, ZC_ZFREE_CLEARMEM); dlif_udpstat_size = sizeof(struct udpstat_local); /* Enforce 64-bit alignment for udpstat_local structure */ dlif_udpstat_bufsize = dlif_udpstat_size + sizeof(void *) + sizeof(u_int64_t); - dlif_udpstat_bufsize = + dlif_udpstat_bufsize = (uint32_t) P2ROUNDUP(dlif_udpstat_bufsize, sizeof(u_int64_t)); - dlif_udpstat_zone = zinit(dlif_udpstat_bufsize, - DLIF_TCPSTAT_ZONE_MAX * dlif_udpstat_bufsize, 0, - DLIF_UDPSTAT_ZONE_NAME); - if (dlif_udpstat_zone == NULL) { - panic_plain("%s: failed allocating %s", __func__, - DLIF_UDPSTAT_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(dlif_udpstat_zone, Z_EXPAND, TRUE); - zone_change(dlif_udpstat_zone, Z_CALLERACCT, FALSE); + dlif_udpstat_zone = zone_create(DLIF_UDPSTAT_ZONE_NAME, + dlif_udpstat_bufsize, ZC_ZFREE_CLEARMEM); - ifnet_llreach_init(); eventhandler_lists_ctxt_init(&ifnet_evhdlr_ctxt); TAILQ_INIT(&dlil_ifnet_head); @@ -1767,17 +1754,6 @@ dlil_init(void) /* Setup interface flow control related items */ lck_mtx_init(&ifnet_fc_lock, dlil_lock_group, dlil_lck_attributes); - ifnet_fc_zone_size = sizeof(struct ifnet_fc_entry); - ifnet_fc_zone = zinit(ifnet_fc_zone_size, - IFNET_FC_ZONE_MAX * ifnet_fc_zone_size, 0, IFNET_FC_ZONE_NAME); - if (ifnet_fc_zone == NULL) { - panic_plain("%s: failed allocating %s", __func__, - IFNET_FC_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(ifnet_fc_zone, Z_EXPAND, TRUE); - zone_change(ifnet_fc_zone, Z_CALLERACCT, FALSE); - /* Initialize interface address subsystem */ ifa_init(); @@ -1820,7 +1796,7 @@ dlil_init(void) * detacher threads once everything is initialized. */ dlil_incr_pending_thread_count(); - dlil_create_input_thread(NULL, dlil_main_input_thread); + (void) dlil_create_input_thread(NULL, dlil_main_input_thread, NULL); /* * Create ifnet detacher thread. @@ -1843,15 +1819,15 @@ dlil_init(void) */ lck_mtx_lock(&dlil_thread_sync_lock); while (dlil_pending_thread_cnt != 0) { - DLIL_PRINTF("%s: Waiting for all the create dlil kernel threads " - "to get scheduled at least once.\n", __func__); - (void) msleep(&dlil_pending_thread_cnt, &dlil_thread_sync_lock, (PZERO - 1), - __func__, NULL); + DLIL_PRINTF("%s: Waiting for all the create dlil kernel " + "threads to get scheduled at least once.\n", __func__); + (void) msleep(&dlil_pending_thread_cnt, &dlil_thread_sync_lock, + (PZERO - 1), __func__, NULL); LCK_MTX_ASSERT(&dlil_thread_sync_lock, LCK_ASSERT_OWNED); } lck_mtx_unlock(&dlil_thread_sync_lock); - DLIL_PRINTF("%s: All the created dlil kernel threads have been scheduled " - "at least once. Proceeding.\n", __func__); + DLIL_PRINTF("%s: All the created dlil kernel threads have been " + "scheduled at least once. Proceeding.\n", __func__); } static void @@ -1910,12 +1886,11 @@ dlil_attach_filter(struct ifnet *ifp, const struct iff_filter *if_filter, goto done; } - filter = zalloc(dlif_filt_zone); + filter = zalloc_flags(dlif_filt_zone, Z_WAITOK | Z_ZERO); if (filter == NULL) { retval = ENOMEM; goto done; } - bzero(filter, dlif_filt_size); /* refcnt held above during lookup */ filter->filt_flags = flags; @@ -2070,6 +2045,34 @@ dlil_detach_filter(interface_filter_t filter) dlil_detach_filter_internal(filter, 0); } +__private_extern__ boolean_t +dlil_has_ip_filter(void) +{ + boolean_t has_filter = (net_api_stats.nas_ipf_add_count > 0); + DTRACE_IP1(dlil_has_ip_filter, boolean_t, has_filter); + return has_filter; +} + +__private_extern__ boolean_t +dlil_has_if_filter(struct ifnet *ifp) +{ + boolean_t has_filter = !TAILQ_EMPTY(&ifp->if_flt_head); + DTRACE_IP1(dlil_has_if_filter, boolean_t, has_filter); + return has_filter; +} + +static inline void +dlil_input_wakeup(struct dlil_threading_info *inp) +{ + LCK_MTX_ASSERT(&inp->dlth_lock, LCK_MTX_ASSERT_OWNED); + + inp->dlth_flags |= DLIL_INPUT_WAITING; + if (!(inp->dlth_flags & DLIL_INPUT_RUNNING)) { + inp->dlth_wtot++; + wakeup_one((caddr_t)&inp->dlth_flags); + } +} + __attribute__((noreturn)) static void dlil_main_input_thread_func(void *v, wait_result_t w) @@ -2078,14 +2081,16 @@ dlil_main_input_thread_func(void *v, wait_result_t w) struct dlil_threading_info *inp = v; VERIFY(inp == dlil_main_input_thread); - VERIFY(inp->ifp == NULL); - VERIFY(current_thread() == inp->input_thr); - - dlil_decr_pending_thread_count(); - lck_mtx_lock(&inp->input_lck); - VERIFY(!(inp->input_waiting & DLIL_INPUT_RUNNING)); - (void) assert_wait(&inp->input_waiting, THREAD_UNINT); - lck_mtx_unlock(&inp->input_lck); + VERIFY(inp->dlth_ifp == NULL); + VERIFY(current_thread() == inp->dlth_thread); + + lck_mtx_lock(&inp->dlth_lock); + VERIFY(!(inp->dlth_flags & (DLIL_INPUT_EMBRYONIC | DLIL_INPUT_RUNNING))); + (void) assert_wait(&inp->dlth_flags, THREAD_UNINT); + inp->dlth_flags |= DLIL_INPUT_EMBRYONIC; + /* wake up once to get out of embryonic state */ + dlil_input_wakeup(inp); + lck_mtx_unlock(&inp->dlth_lock); (void) thread_block_parameter(dlil_main_input_thread_cont, inp); /* NOTREACHED */ __builtin_unreachable(); @@ -2110,25 +2115,31 @@ dlil_main_input_thread_cont(void *v, wait_result_t wres) /* main input thread is uninterruptible */ VERIFY(wres != THREAD_INTERRUPTED); - lck_mtx_lock_spin(&inp->input_lck); - VERIFY(!(inp->input_waiting & (DLIL_INPUT_TERMINATE | + lck_mtx_lock_spin(&inp->dlth_lock); + VERIFY(!(inp->dlth_flags & (DLIL_INPUT_TERMINATE | DLIL_INPUT_RUNNING))); - inp->input_waiting |= DLIL_INPUT_RUNNING; + inp->dlth_flags |= DLIL_INPUT_RUNNING; while (1) { struct mbuf *m = NULL, *m_loop = NULL; u_int32_t m_cnt, m_cnt_loop; classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt); boolean_t proto_req; + boolean_t embryonic; - inp->input_waiting &= ~DLIL_INPUT_WAITING; + inp->dlth_flags &= ~DLIL_INPUT_WAITING; - proto_req = (inp->input_waiting & + if (__improbable(embryonic = + (inp->dlth_flags & DLIL_INPUT_EMBRYONIC))) { + inp->dlth_flags &= ~DLIL_INPUT_EMBRYONIC; + } + + proto_req = (inp->dlth_flags & (DLIL_PROTO_WAITING | DLIL_PROTO_REGISTER)); /* Packets for non-dedicated interfaces other than lo0 */ - m_cnt = qlen(&inp->rcvq_pkts); - _getq_all(&inp->rcvq_pkts, &pkt, NULL, NULL, NULL); + m_cnt = qlen(&inp->dlth_pkts); + _getq_all(&inp->dlth_pkts, &pkt, NULL, NULL, NULL); m = pkt.cp_mbuf; /* Packets exclusive to lo0 */ @@ -2136,41 +2147,45 @@ dlil_main_input_thread_cont(void *v, wait_result_t wres) _getq_all(&inpm->lo_rcvq_pkts, &pkt, NULL, NULL, NULL); m_loop = pkt.cp_mbuf; - inp->wtot = 0; + inp->dlth_wtot = 0; - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); + + if (__improbable(embryonic)) { + dlil_decr_pending_thread_count(); + } /* * NOTE warning %%% attention !!!! * We should think about putting some thread starvation * safeguards if we deal with long chains of packets. */ - if (m_loop != NULL) { + if (__probable(m_loop != NULL)) { dlil_input_packet_list_extended(lo_ifp, m_loop, m_cnt_loop, IFNET_MODEL_INPUT_POLL_OFF); } - if (m != NULL) { + if (__probable(m != NULL)) { dlil_input_packet_list_extended(NULL, m, m_cnt, IFNET_MODEL_INPUT_POLL_OFF); } - if (proto_req) { + if (__improbable(proto_req)) { proto_input_run(); } - lck_mtx_lock_spin(&inp->input_lck); - VERIFY(inp->input_waiting & DLIL_INPUT_RUNNING); + lck_mtx_lock_spin(&inp->dlth_lock); + VERIFY(inp->dlth_flags & DLIL_INPUT_RUNNING); /* main input thread cannot be terminated */ - VERIFY(!(inp->input_waiting & DLIL_INPUT_TERMINATE)); - if (!(inp->input_waiting & ~DLIL_INPUT_RUNNING)) { + VERIFY(!(inp->dlth_flags & DLIL_INPUT_TERMINATE)); + if (!(inp->dlth_flags & ~DLIL_INPUT_RUNNING)) { break; } } - inp->input_waiting &= ~DLIL_INPUT_RUNNING; - (void) assert_wait(&inp->input_waiting, THREAD_UNINT); - lck_mtx_unlock(&inp->input_lck); + inp->dlth_flags &= ~DLIL_INPUT_RUNNING; + (void) assert_wait(&inp->dlth_flags, THREAD_UNINT); + lck_mtx_unlock(&inp->dlth_lock); (void) thread_block_parameter(dlil_main_input_thread_cont, inp); VERIFY(0); /* we should never get here */ @@ -2188,7 +2203,7 @@ dlil_input_thread_func(void *v, wait_result_t w) #pragma unused(w) char thread_name[MAXTHREADNAMESIZE]; struct dlil_threading_info *inp = v; - struct ifnet *ifp = inp->ifp; + struct ifnet *ifp = inp->dlth_ifp; VERIFY(inp != dlil_main_input_thread); VERIFY(ifp != NULL); @@ -2196,19 +2211,21 @@ dlil_input_thread_func(void *v, wait_result_t w) !(ifp->if_xflags & IFXF_LEGACY)); VERIFY(ifp->if_poll_mode == IFNET_MODEL_INPUT_POLL_OFF || !(ifp->if_xflags & IFXF_LEGACY)); - VERIFY(current_thread() == inp->input_thr); + VERIFY(current_thread() == inp->dlth_thread); /* construct the name for this thread, and then apply it */ bzero(thread_name, sizeof(thread_name)); (void) snprintf(thread_name, sizeof(thread_name), "dlil_input_%s", ifp->if_xname); - thread_set_thread_name(inp->input_thr, thread_name); - ifnet_decr_pending_thread_count(ifp); - - lck_mtx_lock(&inp->input_lck); - VERIFY(!(inp->input_waiting & DLIL_INPUT_RUNNING)); - (void) assert_wait(&inp->input_waiting, THREAD_UNINT); - lck_mtx_unlock(&inp->input_lck); + thread_set_thread_name(inp->dlth_thread, thread_name); + + lck_mtx_lock(&inp->dlth_lock); + VERIFY(!(inp->dlth_flags & (DLIL_INPUT_EMBRYONIC | DLIL_INPUT_RUNNING))); + (void) assert_wait(&inp->dlth_flags, THREAD_UNINT); + inp->dlth_flags |= DLIL_INPUT_EMBRYONIC; + /* wake up once to get out of embryonic state */ + dlil_input_wakeup(inp); + lck_mtx_unlock(&inp->dlth_lock); (void) thread_block_parameter(dlil_input_thread_cont, inp); /* NOTREACHED */ __builtin_unreachable(); @@ -2219,24 +2236,30 @@ static void dlil_input_thread_cont(void *v, wait_result_t wres) { struct dlil_threading_info *inp = v; - struct ifnet *ifp = inp->ifp; + struct ifnet *ifp = inp->dlth_ifp; - lck_mtx_lock_spin(&inp->input_lck); + lck_mtx_lock_spin(&inp->dlth_lock); if (__improbable(wres == THREAD_INTERRUPTED || - (inp->input_waiting & DLIL_INPUT_TERMINATE))) { + (inp->dlth_flags & DLIL_INPUT_TERMINATE))) { goto terminate; } - VERIFY(!(inp->input_waiting & DLIL_INPUT_RUNNING)); - inp->input_waiting |= DLIL_INPUT_RUNNING; + VERIFY(!(inp->dlth_flags & DLIL_INPUT_RUNNING)); + inp->dlth_flags |= DLIL_INPUT_RUNNING; while (1) { struct mbuf *m = NULL; classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt); boolean_t notify = FALSE; + boolean_t embryonic; u_int32_t m_cnt; - inp->input_waiting &= ~DLIL_INPUT_WAITING; + inp->dlth_flags &= ~DLIL_INPUT_WAITING; + + if (__improbable(embryonic = + (inp->dlth_flags & DLIL_INPUT_EMBRYONIC))) { + inp->dlth_flags &= ~DLIL_INPUT_EMBRYONIC; + } /* * Protocol registration and injection must always use @@ -2245,21 +2268,25 @@ dlil_input_thread_cont(void *v, wait_result_t wres) * on, but that requires our knowing the interface in advance * (and the benefits might not worth the trouble.) */ - VERIFY(!(inp->input_waiting & + VERIFY(!(inp->dlth_flags & (DLIL_PROTO_WAITING | DLIL_PROTO_REGISTER))); /* Packets for this interface */ - m_cnt = qlen(&inp->rcvq_pkts); - _getq_all(&inp->rcvq_pkts, &pkt, NULL, NULL, NULL); + m_cnt = qlen(&inp->dlth_pkts); + _getq_all(&inp->dlth_pkts, &pkt, NULL, NULL, NULL); m = pkt.cp_mbuf; - inp->wtot = 0; + inp->dlth_wtot = 0; notify = dlil_input_stats_sync(ifp, inp); - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); - if (notify) { + if (__improbable(embryonic)) { + ifnet_decr_pending_thread_count(ifp); + } + + if (__improbable(notify)) { ifnet_notify_data_threshold(ifp); } @@ -2268,29 +2295,29 @@ dlil_input_thread_cont(void *v, wait_result_t wres) * We should think about putting some thread starvation * safeguards if we deal with long chains of packets. */ - if (m != NULL) { + if (__probable(m != NULL)) { dlil_input_packet_list_extended(NULL, m, m_cnt, ifp->if_poll_mode); } - lck_mtx_lock_spin(&inp->input_lck); - VERIFY(inp->input_waiting & DLIL_INPUT_RUNNING); - if (!(inp->input_waiting & ~(DLIL_INPUT_RUNNING | + lck_mtx_lock_spin(&inp->dlth_lock); + VERIFY(inp->dlth_flags & DLIL_INPUT_RUNNING); + if (!(inp->dlth_flags & ~(DLIL_INPUT_RUNNING | DLIL_INPUT_TERMINATE))) { break; } } - inp->input_waiting &= ~DLIL_INPUT_RUNNING; + inp->dlth_flags &= ~DLIL_INPUT_RUNNING; - if (__improbable(inp->input_waiting & DLIL_INPUT_TERMINATE)) { + if (__improbable(inp->dlth_flags & DLIL_INPUT_TERMINATE)) { terminate: - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); dlil_terminate_input_thread(inp); /* NOTREACHED */ } else { - (void) assert_wait(&inp->input_waiting, THREAD_UNINT); - lck_mtx_unlock(&inp->input_lck); + (void) assert_wait(&inp->dlth_flags, THREAD_UNINT); + lck_mtx_unlock(&inp->dlth_lock); (void) thread_block_parameter(dlil_input_thread_cont, inp); /* NOTREACHED */ } @@ -2310,24 +2337,26 @@ dlil_rxpoll_input_thread_func(void *v, wait_result_t w) #pragma unused(w) char thread_name[MAXTHREADNAMESIZE]; struct dlil_threading_info *inp = v; - struct ifnet *ifp = inp->ifp; + struct ifnet *ifp = inp->dlth_ifp; VERIFY(inp != dlil_main_input_thread); VERIFY(ifp != NULL && (ifp->if_eflags & IFEF_RXPOLL) && (ifp->if_xflags & IFXF_LEGACY)); - VERIFY(current_thread() == inp->input_thr); + VERIFY(current_thread() == inp->dlth_thread); /* construct the name for this thread, and then apply it */ bzero(thread_name, sizeof(thread_name)); (void) snprintf(thread_name, sizeof(thread_name), "dlil_input_poll_%s", ifp->if_xname); - thread_set_thread_name(inp->input_thr, thread_name); - ifnet_decr_pending_thread_count(ifp); - - lck_mtx_lock(&inp->input_lck); - VERIFY(!(inp->input_waiting & DLIL_INPUT_RUNNING)); - (void) assert_wait(&inp->input_waiting, THREAD_UNINT); - lck_mtx_unlock(&inp->input_lck); + thread_set_thread_name(inp->dlth_thread, thread_name); + + lck_mtx_lock(&inp->dlth_lock); + VERIFY(!(inp->dlth_flags & (DLIL_INPUT_EMBRYONIC | DLIL_INPUT_RUNNING))); + (void) assert_wait(&inp->dlth_flags, THREAD_UNINT); + inp->dlth_flags |= DLIL_INPUT_EMBRYONIC; + /* wake up once to get out of embryonic state */ + dlil_input_wakeup(inp); + lck_mtx_unlock(&inp->dlth_lock); (void) thread_block_parameter(dlil_rxpoll_input_thread_cont, inp); /* NOTREACHED */ __builtin_unreachable(); @@ -2338,28 +2367,36 @@ static void dlil_rxpoll_input_thread_cont(void *v, wait_result_t wres) { struct dlil_threading_info *inp = v; - struct ifnet *ifp = inp->ifp; + struct ifnet *ifp = inp->dlth_ifp; struct timespec ts; - lck_mtx_lock_spin(&inp->input_lck); + lck_mtx_lock_spin(&inp->dlth_lock); if (__improbable(wres == THREAD_INTERRUPTED || - (inp->input_waiting & DLIL_INPUT_TERMINATE))) { + (inp->dlth_flags & DLIL_INPUT_TERMINATE))) { goto terminate; } - VERIFY(!(inp->input_waiting & DLIL_INPUT_RUNNING)); - inp->input_waiting |= DLIL_INPUT_RUNNING; + VERIFY(!(inp->dlth_flags & DLIL_INPUT_RUNNING)); + inp->dlth_flags |= DLIL_INPUT_RUNNING; while (1) { struct mbuf *m = NULL; - u_int32_t m_cnt, m_size, poll_req = 0; + uint32_t m_cnt, poll_req = 0; + uint64_t m_size = 0; ifnet_model_t mode; struct timespec now, delta; classq_pkt_t pkt = CLASSQ_PKT_INITIALIZER(pkt); boolean_t notify; - u_int64_t ival; + boolean_t embryonic; + uint64_t ival; - inp->input_waiting &= ~DLIL_INPUT_WAITING; + inp->dlth_flags &= ~DLIL_INPUT_WAITING; + + if (__improbable(embryonic = + (inp->dlth_flags & DLIL_INPUT_EMBRYONIC))) { + inp->dlth_flags &= ~DLIL_INPUT_EMBRYONIC; + goto skip; + } if ((ival = ifp->if_rxpoll_ival) < IF_RXPOLL_INTERVALTIME_MIN) { ival = IF_RXPOLL_INTERVALTIME_MIN; @@ -2381,17 +2418,17 @@ dlil_rxpoll_input_thread_cont(void *v, wait_result_t wres) * on, but that requires our knowing the interface in advance * (and the benefits might not worth the trouble.) */ - VERIFY(!(inp->input_waiting & + VERIFY(!(inp->dlth_flags & (DLIL_PROTO_WAITING | DLIL_PROTO_REGISTER))); /* Total count of all packets */ - m_cnt = qlen(&inp->rcvq_pkts); + m_cnt = qlen(&inp->dlth_pkts); /* Total bytes of all packets */ - m_size = qsize(&inp->rcvq_pkts); + m_size = qsize(&inp->dlth_pkts); /* Packets for this interface */ - _getq_all(&inp->rcvq_pkts, &pkt, NULL, NULL, NULL); + _getq_all(&inp->dlth_pkts, &pkt, NULL, NULL, NULL); m = pkt.cp_mbuf; VERIFY(m != NULL || m_cnt == 0); @@ -2441,8 +2478,9 @@ dlil_rxpoll_input_thread_cont(void *v, wait_result_t wres) PKTCNTR_CLEAR(&ifp->if_poll_sstats); /* Calculate EWMA of wakeup requests */ - DLIL_EWMA(ifp->if_rxpoll_wavg, inp->wtot, if_rxpoll_decay); - inp->wtot = 0; + DLIL_EWMA(ifp->if_rxpoll_wavg, inp->dlth_wtot, + if_rxpoll_decay); + inp->dlth_wtot = 0; if (dlil_verbose) { if (!net_timerisset(&ifp->if_poll_dbg_lasttime)) { @@ -2500,9 +2538,13 @@ dlil_rxpoll_input_thread_cont(void *v, wait_result_t wres) skip: notify = dlil_input_stats_sync(ifp, inp); - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); + + if (__improbable(embryonic)) { + ifnet_decr_pending_thread_count(ifp); + } - if (notify) { + if (__improbable(notify)) { ifnet_notify_data_threshold(ifp); } @@ -2574,28 +2616,28 @@ skip: * We should think about putting some thread starvation * safeguards if we deal with long chains of packets. */ - if (m != NULL) { + if (__probable(m != NULL)) { dlil_input_packet_list_extended(NULL, m, m_cnt, mode); } - lck_mtx_lock_spin(&inp->input_lck); - VERIFY(inp->input_waiting & DLIL_INPUT_RUNNING); - if (!(inp->input_waiting & ~(DLIL_INPUT_RUNNING | + lck_mtx_lock_spin(&inp->dlth_lock); + VERIFY(inp->dlth_flags & DLIL_INPUT_RUNNING); + if (!(inp->dlth_flags & ~(DLIL_INPUT_RUNNING | DLIL_INPUT_TERMINATE))) { break; } } - inp->input_waiting &= ~DLIL_INPUT_RUNNING; + inp->dlth_flags &= ~DLIL_INPUT_RUNNING; - if (__improbable(inp->input_waiting & DLIL_INPUT_TERMINATE)) { + if (__improbable(inp->dlth_flags & DLIL_INPUT_TERMINATE)) { terminate: - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); dlil_terminate_input_thread(inp); /* NOTREACHED */ } else { - (void) assert_wait(&inp->input_waiting, THREAD_UNINT); - lck_mtx_unlock(&inp->input_lck); + (void) assert_wait(&inp->dlth_flags, THREAD_UNINT); + lck_mtx_unlock(&inp->dlth_lock); (void) thread_block_parameter(dlil_rxpoll_input_thread_cont, inp); /* NOTREACHED */ @@ -2724,9 +2766,9 @@ dlil_rxpoll_set_params(struct ifnet *ifp, struct ifnet_poll_params *p, } if (!locked) { - lck_mtx_lock(&inp->input_lck); + lck_mtx_lock(&inp->dlth_lock); } - LCK_MTX_ASSERT(&inp->input_lck, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(&inp->dlth_lock, LCK_MTX_ASSERT_OWNED); /* * Normally, we'd reset the parameters to the auto-tuned values * if the the input thread detects a change in link rate. If the @@ -2740,7 +2782,7 @@ dlil_rxpoll_set_params(struct ifnet *ifp, struct ifnet_poll_params *p, } dlil_rxpoll_update_params(ifp, p); if (!locked) { - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); } return 0; } @@ -2760,14 +2802,14 @@ dlil_rxpoll_get_params(struct ifnet *ifp, struct ifnet_poll_params *p) bzero(p, sizeof(*p)); - lck_mtx_lock(&inp->input_lck); + lck_mtx_lock(&inp->dlth_lock); p->packets_limit = ifp->if_rxpoll_plim; p->packets_lowat = ifp->if_rxpoll_plowat; p->packets_hiwat = ifp->if_rxpoll_phiwat; p->bytes_lowat = ifp->if_rxpoll_blowat; p->bytes_hiwat = ifp->if_rxpoll_bhiwat; p->interval_time = ifp->if_rxpoll_ival; - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); return 0; } @@ -2834,7 +2876,7 @@ ifnet_input_common(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, last = m_head; while (m_head != NULL) { #if IFNET_INPUT_SANITY_CHK - if (dlil_input_sanity_check != 0) { + if (__improbable(dlil_input_sanity_check != 0)) { DLIL_INPUT_CHECK(last, ifp); } #endif /* IFNET_INPUT_SANITY_CHK */ @@ -2848,7 +2890,7 @@ ifnet_input_common(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, m_tail = last; } else { #if IFNET_INPUT_SANITY_CHK - if (dlil_input_sanity_check != 0) { + if (__improbable(dlil_input_sanity_check != 0)) { last = m_head; while (1) { DLIL_INPUT_CHECK(last, ifp); @@ -2921,34 +2963,44 @@ dlil_input_handler(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, const struct ifnet_stat_increment_param *s, boolean_t poll, struct thread *tp) { - struct dlil_threading_info *inp; - u_int32_t m_cnt = s->packets_in; - u_int32_t m_size = s->bytes_in; - boolean_t notify = FALSE; + struct dlil_threading_info *inp = ifp->if_inp; - if ((inp = ifp->if_inp) == NULL) { + if (__improbable(inp == NULL)) { inp = dlil_main_input_thread; } + return inp->dlth_strategy(inp, ifp, m_head, m_tail, s, poll, tp); +} + +static errno_t +dlil_input_async(struct dlil_threading_info *inp, + struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, + const struct ifnet_stat_increment_param *s, boolean_t poll, + struct thread *tp) +{ + u_int32_t m_cnt = s->packets_in; + u_int32_t m_size = s->bytes_in; + boolean_t notify = FALSE; + /* * If there is a matching DLIL input thread associated with an * affinity set, associate this thread with the same set. We * will only do this once. */ - lck_mtx_lock_spin(&inp->input_lck); - if (inp != dlil_main_input_thread && inp->net_affinity && tp != NULL && - ((!poll && inp->wloop_thr == THREAD_NULL) || - (poll && inp->poll_thr == THREAD_NULL))) { - u_int32_t tag = inp->tag; + lck_mtx_lock_spin(&inp->dlth_lock); + if (inp != dlil_main_input_thread && inp->dlth_affinity && tp != NULL && + ((!poll && inp->dlth_driver_thread == THREAD_NULL) || + (poll && inp->dlth_poller_thread == THREAD_NULL))) { + u_int32_t tag = inp->dlth_affinity_tag; if (poll) { - VERIFY(inp->poll_thr == THREAD_NULL); - inp->poll_thr = tp; + VERIFY(inp->dlth_poller_thread == THREAD_NULL); + inp->dlth_poller_thread = tp; } else { - VERIFY(inp->wloop_thr == THREAD_NULL); - inp->wloop_thr = tp; + VERIFY(inp->dlth_driver_thread == THREAD_NULL); + inp->dlth_driver_thread = tp; } - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); /* Associate the current thread with the new affinity tag */ (void) dlil_affinity_set(tp, tag); @@ -2959,7 +3011,7 @@ dlil_input_handler(struct ifnet *ifp, struct mbuf *m_head, * affinity. */ thread_reference(tp); - lck_mtx_lock_spin(&inp->input_lck); + lck_mtx_lock_spin(&inp->dlth_lock); } VERIFY(m_head != NULL || (m_tail == NULL && m_cnt == 0)); @@ -2981,28 +3033,34 @@ dlil_input_handler(struct ifnet *ifp, struct mbuf *m_head, _addq_multi(&inpm->lo_rcvq_pkts, &head, &tail, m_cnt, m_size); } else { - _addq_multi(&inp->rcvq_pkts, &head, &tail, + _addq_multi(&inp->dlth_pkts, &head, &tail, m_cnt, m_size); } } #if IFNET_INPUT_SANITY_CHK - if (dlil_input_sanity_check != 0) { - u_int32_t count; + if (__improbable(dlil_input_sanity_check != 0)) { + u_int32_t count = 0, size = 0; struct mbuf *m0; - for (m0 = m_head, count = 0; m0; m0 = mbuf_nextpkt(m0)) { + for (m0 = m_head; m0; m0 = mbuf_nextpkt(m0)) { + size += m_length(m0); count++; } if (count != m_cnt) { - panic_plain("%s: invalid packet count %d " - "(expected %d)\n", if_name(ifp), - count, m_cnt); + panic_plain("%s: invalid total packet count %u " + "(expected %u)\n", if_name(ifp), count, m_cnt); /* NOTREACHED */ + __builtin_unreachable(); + } else if (size != m_size) { + panic_plain("%s: invalid total packet size %u " + "(expected %u)\n", if_name(ifp), size, m_size); + /* NOTREACHED */ + __builtin_unreachable(); } - inp->input_mbuf_cnt += m_cnt; + inp->dlth_pkts_cnt += m_cnt; } #endif /* IFNET_INPUT_SANITY_CHK */ @@ -3017,17 +3075,90 @@ dlil_input_handler(struct ifnet *ifp, struct mbuf *m_head, notify = dlil_input_stats_sync(ifp, inp); } - inp->input_waiting |= DLIL_INPUT_WAITING; - if (!(inp->input_waiting & DLIL_INPUT_RUNNING)) { - inp->wtot++; - wakeup_one((caddr_t)&inp->input_waiting); + dlil_input_wakeup(inp); + lck_mtx_unlock(&inp->dlth_lock); + + if (notify) { + ifnet_notify_data_threshold(ifp); + } + + return 0; +} + +static errno_t +dlil_input_sync(struct dlil_threading_info *inp, + struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, + const struct ifnet_stat_increment_param *s, boolean_t poll, + struct thread *tp) +{ +#pragma unused(tp) + u_int32_t m_cnt = s->packets_in; + u_int32_t m_size = s->bytes_in; + boolean_t notify = FALSE; + classq_pkt_t head, tail; + + ASSERT(inp != dlil_main_input_thread); + + /* XXX: should we just assert instead? */ + if (__improbable(m_head == NULL)) { + return 0; + } + + CLASSQ_PKT_INIT_MBUF(&head, m_head); + CLASSQ_PKT_INIT_MBUF(&tail, m_tail); + + lck_mtx_lock_spin(&inp->dlth_lock); + _addq_multi(&inp->dlth_pkts, &head, &tail, m_cnt, m_size); + +#if IFNET_INPUT_SANITY_CHK + if (__improbable(dlil_input_sanity_check != 0)) { + u_int32_t count = 0, size = 0; + struct mbuf *m0; + + for (m0 = m_head; m0; m0 = mbuf_nextpkt(m0)) { + size += m_length(m0); + count++; + } + + if (count != m_cnt) { + panic_plain("%s: invalid total packet count %u " + "(expected %u)\n", if_name(ifp), count, m_cnt); + /* NOTREACHED */ + __builtin_unreachable(); + } else if (size != m_size) { + panic_plain("%s: invalid total packet size %u " + "(expected %u)\n", if_name(ifp), size, m_size); + /* NOTREACHED */ + __builtin_unreachable(); + } + + inp->dlth_pkts_cnt += m_cnt; } - lck_mtx_unlock(&inp->input_lck); +#endif /* IFNET_INPUT_SANITY_CHK */ + + dlil_input_stats_add(s, inp, ifp, poll); + + m_cnt = qlen(&inp->dlth_pkts); + _getq_all(&inp->dlth_pkts, &head, NULL, NULL, NULL); + + notify = dlil_input_stats_sync(ifp, inp); + + lck_mtx_unlock(&inp->dlth_lock); if (notify) { ifnet_notify_data_threshold(ifp); } + /* + * NOTE warning %%% attention !!!! + * We should think about putting some thread starvation + * safeguards if we deal with long chains of packets. + */ + if (head.cp_mbuf != NULL) { + dlil_input_packet_list_extended(NULL, head.cp_mbuf, + m_cnt, ifp->if_poll_mode); + } + return 0; } @@ -3057,8 +3188,7 @@ ifnet_start_common(struct ifnet *ifp, boolean_t resetfc) (resetfc || !(ifp->if_eflags & IFEF_ENQUEUE_MULTI) || IFCQ_LEN(&ifp->if_snd) >= ifp->if_start_delay_qlen || ifp->if_start_delayed == 0)) { - (void) thread_wakeup_thread((caddr_t)&ifp->if_start_thread, - ifp->if_start_thread); + (void) wakeup_one((caddr_t)&ifp->if_start_thread); } lck_mtx_unlock(&ifp->if_start_lock); } @@ -3095,26 +3225,29 @@ ifnet_start_thread_func(void *v, wait_result_t w) struct dlil_threading_info *inp = dlil_main_input_thread; struct thread *tp = current_thread(); - lck_mtx_lock(&inp->input_lck); - if (inp->net_affinity) { - u_int32_t tag = inp->tag; + lck_mtx_lock(&inp->dlth_lock); + if (inp->dlth_affinity) { + u_int32_t tag = inp->dlth_affinity_tag; - VERIFY(inp->wloop_thr == THREAD_NULL); - VERIFY(inp->poll_thr == THREAD_NULL); - inp->wloop_thr = tp; - lck_mtx_unlock(&inp->input_lck); + VERIFY(inp->dlth_driver_thread == THREAD_NULL); + VERIFY(inp->dlth_poller_thread == THREAD_NULL); + inp->dlth_driver_thread = tp; + lck_mtx_unlock(&inp->dlth_lock); /* Associate this thread with the affinity tag */ (void) dlil_affinity_set(tp, tag); } else { - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); } } - ifnet_decr_pending_thread_count(ifp); lck_mtx_lock(&ifp->if_start_lock); - VERIFY(!ifp->if_start_active); + VERIFY(!ifp->if_start_embryonic && !ifp->if_start_active); (void) assert_wait(&ifp->if_start_thread, THREAD_UNINT); + ifp->if_start_embryonic = 1; + /* wake up once to get out of embryonic state */ + ifp->if_start_req++; + (void) wakeup_one((caddr_t)&ifp->if_start_thread); lck_mtx_unlock(&ifp->if_start_lock); (void) thread_block_parameter(ifnet_start_thread_cont, ifp); /* NOTREACHED */ @@ -3128,12 +3261,20 @@ ifnet_start_thread_cont(void *v, wait_result_t wres) struct ifnet *ifp = v; struct ifclassq *ifq = &ifp->if_snd; - lck_mtx_lock(&ifp->if_start_lock); + lck_mtx_lock_spin(&ifp->if_start_lock); if (__improbable(wres == THREAD_INTERRUPTED || ifp->if_start_thread == THREAD_NULL)) { goto terminate; } + if (__improbable(ifp->if_start_embryonic)) { + ifp->if_start_embryonic = 0; + lck_mtx_unlock(&ifp->if_start_lock); + ifnet_decr_pending_thread_count(ifp); + lck_mtx_lock_spin(&ifp->if_start_lock); + goto skip; + } + ifp->if_start_active = 1; /* @@ -3184,7 +3325,7 @@ ifnet_start_thread_cont(void *v, wait_result_t wres) break; } } - +skip: ifp->if_start_req = 0; ifp->if_start_active = 0; @@ -3214,7 +3355,7 @@ ifnet_start_thread_cont(void *v, wait_result_t wres) } if (__improbable(ts != NULL)) { - clock_interval_to_deadline((ts->tv_nsec + + clock_interval_to_deadline((uint32_t)(ts->tv_nsec + (ts->tv_sec * NSEC_PER_SEC)), 1, &deadline); } @@ -3263,6 +3404,18 @@ ifnet_set_start_cycle(struct ifnet *ifp, struct timespec *ts) } } +static inline void +ifnet_poll_wakeup(struct ifnet *ifp) +{ + LCK_MTX_ASSERT(&ifp->if_poll_lock, LCK_MTX_ASSERT_OWNED); + + ifp->if_poll_req++; + if (!(ifp->if_poll_flags & IF_POLLF_RUNNING) && + ifp->if_poll_thread != THREAD_NULL) { + wakeup_one((caddr_t)&ifp->if_poll_thread); + } +} + void ifnet_poll(struct ifnet *ifp) { @@ -3270,11 +3423,7 @@ ifnet_poll(struct ifnet *ifp) * If the poller thread is inactive, signal it to do work. */ lck_mtx_lock_spin(&ifp->if_poll_lock); - ifp->if_poll_req++; - if (!(ifp->if_poll_flags & IF_POLLF_RUNNING) && - ifp->if_poll_thread != THREAD_NULL) { - wakeup_one((caddr_t)&ifp->if_poll_thread); - } + ifnet_poll_wakeup(ifp); lck_mtx_unlock(&ifp->if_poll_lock); } @@ -3294,10 +3443,13 @@ ifnet_poll_thread_func(void *v, wait_result_t w) (void) snprintf(thread_name, sizeof(thread_name), "ifnet_poller_%s", ifp->if_xname); thread_set_thread_name(ifp->if_poll_thread, thread_name); - ifnet_decr_pending_thread_count(ifp); lck_mtx_lock(&ifp->if_poll_lock); + VERIFY(!(ifp->if_poll_flags & (IF_POLLF_EMBRYONIC | IF_POLLF_RUNNING))); (void) assert_wait(&ifp->if_poll_thread, THREAD_UNINT); + ifp->if_poll_flags |= IF_POLLF_EMBRYONIC; + /* wake up once to get out of embryonic state */ + ifnet_poll_wakeup(ifp); lck_mtx_unlock(&ifp->if_poll_lock); (void) thread_block_parameter(ifnet_poll_thread_cont, ifp); /* NOTREACHED */ @@ -3327,6 +3479,14 @@ ifnet_poll_thread_cont(void *v, wait_result_t wres) inp = ifp->if_inp; VERIFY(inp != NULL); + if (__improbable(ifp->if_poll_flags & IF_POLLF_EMBRYONIC)) { + ifp->if_poll_flags &= ~IF_POLLF_EMBRYONIC; + lck_mtx_unlock(&ifp->if_poll_lock); + ifnet_decr_pending_thread_count(ifp); + lck_mtx_lock_spin(&ifp->if_poll_lock); + goto skip; + } + ifp->if_poll_flags |= IF_POLLF_RUNNING; /* @@ -3338,7 +3498,7 @@ ifnet_poll_thread_cont(void *v, wait_result_t wres) u_int16_t req = ifp->if_poll_req; m_lim = (ifp->if_rxpoll_plim != 0) ? ifp->if_rxpoll_plim : - MAX((qlimit(&inp->rcvq_pkts)), (ifp->if_rxpoll_phiwat << 2)); + MAX((qlimit(&inp->dlth_pkts)), (ifp->if_rxpoll_phiwat << 2)); lck_mtx_unlock(&ifp->if_poll_lock); /* @@ -3407,7 +3567,7 @@ ifnet_poll_thread_cont(void *v, wait_result_t wres) break; } } - +skip: ifp->if_poll_req = 0; ifp->if_poll_flags &= ~IF_POLLF_RUNNING; @@ -3425,7 +3585,7 @@ ifnet_poll_thread_cont(void *v, wait_result_t wres) } if (ts != NULL) { - clock_interval_to_deadline((ts->tv_nsec + + clock_interval_to_deadline((uint32_t)(ts->tv_nsec + (ts->tv_sec * NSEC_PER_SEC)), 1, &deadline); } @@ -3622,9 +3782,9 @@ ifnet_set_rcvq_maxlen(struct ifnet *ifp, u_int32_t maxqlen) } inp = ifp->if_inp; - lck_mtx_lock(&inp->input_lck); - qlimit(&inp->rcvq_pkts) = maxqlen; - lck_mtx_unlock(&inp->input_lck); + lck_mtx_lock(&inp->dlth_lock); + qlimit(&inp->dlth_pkts) = maxqlen; + lck_mtx_unlock(&inp->dlth_lock); return 0; } @@ -3641,9 +3801,9 @@ ifnet_get_rcvq_maxlen(struct ifnet *ifp, u_int32_t *maxqlen) } inp = ifp->if_inp; - lck_mtx_lock(&inp->input_lck); - *maxqlen = qlimit(&inp->rcvq_pkts); - lck_mtx_unlock(&inp->input_lck); + lck_mtx_lock(&inp->dlth_lock); + *maxqlen = qlimit(&inp->dlth_pkts); + lck_mtx_unlock(&inp->dlth_lock); return 0; } @@ -3652,8 +3812,8 @@ ifnet_enqueue_multi_setup(struct ifnet *ifp, uint16_t delay_qlen, uint16_t delay_timeout) { if (delay_qlen > 0 && delay_timeout > 0) { - ifp->if_eflags |= IFEF_ENQUEUE_MULTI; - ifp->if_start_delay_qlen = min(100, delay_qlen); + if_set_eflags(ifp, IFEF_ENQUEUE_MULTI); + ifp->if_start_delay_qlen = MIN(100, delay_qlen); ifp->if_start_delay_timeout = min(20000, delay_timeout); /* convert timeout to nanoseconds */ ifp->if_start_delay_timeout *= 1000; @@ -3661,7 +3821,7 @@ ifnet_enqueue_multi_setup(struct ifnet *ifp, uint16_t delay_qlen, ifp->if_xname, (uint32_t)delay_qlen, (uint32_t)delay_timeout); } else { - ifp->if_eflags &= ~IFEF_ENQUEUE_MULTI; + if_clear_eflags(ifp, IFEF_ENQUEUE_MULTI); } } @@ -3737,6 +3897,7 @@ ifnet_enqueue_ifclassq(struct ifnet *ifp, classq_pkt_t *p, boolean_t flush, int error = 0; uint8_t *mcast_buf = NULL; uint8_t ip_ver; + uint32_t pktlen; ASSERT(ifp->if_eflags & IFEF_TXSTART); @@ -3767,24 +3928,25 @@ ifnet_enqueue_ifclassq(struct ifnet *ifp, classq_pkt_t *p, boolean_t flush, p->cp_mbuf->m_pkthdr.pkt_flowsrc == FLOWSRC_INPCB) { if (!(p->cp_mbuf->m_pkthdr.pkt_flags & PKTF_SO_BACKGROUND)) { - ifp->if_fg_sendts = _net_uptime; + ifp->if_fg_sendts = (uint32_t)_net_uptime; if (fg_ts != NULL) { - *fg_ts = _net_uptime; + *fg_ts = (uint32_t)_net_uptime; } } if (p->cp_mbuf->m_pkthdr.pkt_flags & PKTF_SO_REALTIME) { - ifp->if_rt_sendts = _net_uptime; + ifp->if_rt_sendts = (uint32_t)_net_uptime; if (rt_ts != NULL) { - *rt_ts = _net_uptime; + *rt_ts = (uint32_t)_net_uptime; } } } + pktlen = m_pktlen(p->cp_mbuf); /* * Some Wi-Fi AP implementations do not correctly handle * multicast IP packets with DSCP bits set (radr://9331522). - * As a workaround we clear the DSCP bits and set the service - * class to BE. + * As a workaround we clear the DSCP bits but keep service + * class (rdar://51507725). */ if ((p->cp_mbuf->m_flags & M_MCAST) != 0 && IFNET_IS_WIFI_INFRA(ifp)) { @@ -3823,7 +3985,7 @@ ifnet_enqueue_ifclassq(struct ifnet *ifp, classq_pkt_t *p, boolean_t flush, break; } if (pullup) { - if ((p->cp_mbuf = m_pullup(p->cp_mbuf, hlen)) == + if ((p->cp_mbuf = m_pullup(p->cp_mbuf, (int)hlen)) == NULL) { return ENOMEM; } @@ -3831,7 +3993,6 @@ ifnet_enqueue_ifclassq(struct ifnet *ifp, classq_pkt_t *p, boolean_t flush, eh = (struct ether_header *)mbuf_data( p->cp_mbuf); } - mbuf_set_service_class(p->cp_mbuf, MBUF_SC_BE); mcast_buf = (uint8_t *)(eh + 1); /* * ifnet_mcast_clear_dscp() will finish the work below. @@ -3881,20 +4042,19 @@ ifnet_enqueue_ifclassq(struct ifnet *ifp, classq_pkt_t *p, boolean_t flush, ifp->if_start_delay_cnt = 1; ifp->if_start_delay_idle = 0; if (ifp->if_eflags & IFEF_DELAY_START) { - ifp->if_eflags &= - ~(IFEF_DELAY_START); - ifnet_delay_start_disabled++; + if_clear_eflags(ifp, IFEF_DELAY_START); + ifnet_delay_start_disabled_increment(); } } else { if (ifp->if_start_delay_cnt >= ifp->if_start_delay_qlen) { - ifp->if_eflags |= IFEF_DELAY_START; + if_set_eflags(ifp, IFEF_DELAY_START); ifp->if_start_delay_idle = 0; } else { if (ifp->if_start_delay_idle >= 10) { - ifp->if_eflags &= - ~(IFEF_DELAY_START); - ifnet_delay_start_disabled++; + if_clear_eflags(ifp, + IFEF_DELAY_START); + ifnet_delay_start_disabled_increment(); } else { ifp->if_start_delay_idle++; } @@ -3906,14 +4066,14 @@ ifnet_enqueue_ifclassq(struct ifnet *ifp, classq_pkt_t *p, boolean_t flush, ifp->if_start_delay_swin = now_nsec; ifp->if_start_delay_cnt = 1; ifp->if_start_delay_idle = 0; - ifp->if_eflags &= ~(IFEF_DELAY_START); + if_clear_eflags(ifp, IFEF_DELAY_START); } } else { - ifp->if_eflags &= ~(IFEF_DELAY_START); + if_clear_eflags(ifp, IFEF_DELAY_START); } /* enqueue the packet (caller consumes object) */ - error = ifclassq_enqueue(&ifp->if_snd, p, pdrop); + error = ifclassq_enqueue(&ifp->if_snd, p, p, 1, pktlen, pdrop); /* * Tell the driver to start dequeueing; do this even when the queue @@ -3928,6 +4088,27 @@ ifnet_enqueue_ifclassq(struct ifnet *ifp, classq_pkt_t *p, boolean_t flush, return error; } +static inline errno_t +ifnet_enqueue_ifclassq_chain(struct ifnet *ifp, classq_pkt_t *head, + classq_pkt_t *tail, uint32_t cnt, uint32_t bytes, boolean_t flush, + boolean_t *pdrop) +{ + int error; + + /* enqueue the packet (caller consumes object) */ + error = ifclassq_enqueue(&ifp->if_snd, head, tail, cnt, bytes, pdrop); + + /* + * Tell the driver to start dequeueing; do this even when the queue + * for the packet is suspended (EQSUSPENDED), as the driver could still + * be dequeueing from other unsuspended queues. + */ + if ((error == 0 && flush) || error == EQFULL || error == EQSUSPENDED) { + ifnet_start(ifp); + } + return error; +} + int ifnet_enqueue_netem(void *handle, pktsched_pkt_t *pkts, uint32_t n_pkts) { @@ -3993,6 +4174,37 @@ ifnet_enqueue_mbuf(struct ifnet *ifp, struct mbuf *m, boolean_t flush, return ifnet_enqueue_common(ifp, &pkt, flush, pdrop); } +errno_t +ifnet_enqueue_mbuf_chain(struct ifnet *ifp, struct mbuf *m_head, + struct mbuf *m_tail, uint32_t cnt, uint32_t bytes, boolean_t flush, + boolean_t *pdrop) +{ + classq_pkt_t head, tail; + + ASSERT(m_head != NULL); + ASSERT((m_head->m_flags & M_PKTHDR) != 0); + ASSERT(m_tail != NULL); + ASSERT((m_tail->m_flags & M_PKTHDR) != 0); + ASSERT(ifp != NULL); + ASSERT((ifp->if_eflags & IFEF_TXSTART) != 0); + + if (!IF_FULLY_ATTACHED(ifp)) { + /* flag tested without lock for performance */ + m_freem_list(m_head); + *pdrop = TRUE; + return ENXIO; + } else if (!(ifp->if_flags & IFF_UP)) { + m_freem_list(m_head); + *pdrop = TRUE; + return ENETDOWN; + } + + CLASSQ_PKT_INIT_MBUF(&head, m_head); + CLASSQ_PKT_INIT_MBUF(&tail, m_tail); + return ifnet_enqueue_ifclassq_chain(ifp, &head, &tail, cnt, bytes, + flush, pdrop); +} + errno_t ifnet_dequeue(struct ifnet *ifp, struct mbuf **mp) @@ -4133,7 +4345,7 @@ ifnet_dequeue_service_class_multi(struct ifnet *ifp, mbuf_svc_class_t sc, return rc; } -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX errno_t ifnet_framer_stub(struct ifnet *ifp, struct mbuf **m, const struct sockaddr *dest, const char *dest_linkaddr, @@ -4148,7 +4360,7 @@ ifnet_framer_stub(struct ifnet *ifp, struct mbuf **m, return ifp->if_framer_legacy(ifp, m, dest, dest_linkaddr, frame_type); } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ static boolean_t packet_has_vlan_tag(struct mbuf * m) @@ -4175,6 +4387,10 @@ dlil_interface_filters_input(struct ifnet *ifp, struct mbuf **m_p, is_vlan_packet = packet_has_vlan_tag(m); + if (TAILQ_EMPTY(&ifp->if_flt_head)) { + return 0; + } + /* * Pass the inbound packet to the interface filters */ @@ -4307,7 +4523,7 @@ static void dlil_input_stats_add(const struct ifnet_stat_increment_param *s, struct dlil_threading_info *inp, struct ifnet *ifp, boolean_t poll) { - struct ifnet_stat_increment_param *d = &inp->stats; + struct ifnet_stat_increment_param *d = &inp->dlth_stats; if (s->packets_in != 0) { d->packets_in += s->packets_in; @@ -4344,7 +4560,7 @@ dlil_input_stats_add(const struct ifnet_stat_increment_param *s, static boolean_t dlil_input_stats_sync(struct ifnet *ifp, struct dlil_threading_info *inp) { - struct ifnet_stat_increment_param *s = &inp->stats; + struct ifnet_stat_increment_param *s = &inp->dlth_stats; /* * Use of atomic operations is unavoidable here because @@ -4428,6 +4644,7 @@ dlil_input_packet_list_common(struct ifnet *ifp_param, struct mbuf *m, mbuf_t pkt_first = NULL; mbuf_t *pkt_next = NULL; u_int32_t poll_thresh = 0, poll_ival = 0; + int iorefcnt = 0; KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START, 0, 0, 0, 0, 0); @@ -4438,7 +4655,6 @@ dlil_input_packet_list_common(struct ifnet *ifp_param, struct mbuf *m, while (m != NULL) { struct if_proto *ifproto = NULL; - int iorefcnt = 0; uint32_t pktf_mask; /* pkt flags to preserve */ if (ifp_param == NULL) { @@ -4465,15 +4681,18 @@ dlil_input_packet_list_common(struct ifnet *ifp_param, struct mbuf *m, * away, so optimize for that. */ if (ifp != lo_ifp) { - if (!ifnet_datamov_begin(ifp)) { - m_freem(m); - goto next; + /* iorefcnt is 0 if it hasn't been taken yet */ + if (iorefcnt == 0) { + if (!ifnet_datamov_begin(ifp)) { + m_freem(m); + goto next; + } } iorefcnt = 1; /* - * Preserve the time stamp if it was set. + * Preserve the time stamp and skip pktap flags. */ - pktf_mask = PKTF_TS_VALID; + pktf_mask = PKTF_TS_VALID | PKTF_SKIP_PKTAP; } else { /* * If this arrived on lo0, preserve interface addr @@ -4591,7 +4810,7 @@ skip_clat: if (frame_header == NULL || frame_header < (char *)mbuf_datastart(m) || frame_header > (char *)m->m_data || - (adj = (m->m_data - frame_header)) > + (adj = (int)(m->m_data - frame_header)) > m->m_pkthdr.csum_rx_start) { m->m_pkthdr.csum_data = 0; m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; @@ -4682,7 +4901,11 @@ next: ifp->if_updatemcasts = 0; } if (iorefcnt == 1) { - ifnet_datamov_end(ifp); + /* If the next mbuf is on a different interface, unlock data-mov */ + if (!m || (ifp != ifp_param && ifp != m->m_pkthdr.rcvif)) { + ifnet_datamov_end(ifp); + iorefcnt = 0; + } } } @@ -4871,52 +5094,6 @@ ifnet_event(ifnet_t ifp, struct kern_event_msg *event) return result; } -#if CONFIG_MACF_NET -#include -#include -static int -dlil_get_socket_type(struct mbuf **mp, int family, int raw) -{ - struct mbuf *m; - struct ip *ip; - struct ip6_hdr *ip6; - int type = SOCK_RAW; - - if (!raw) { - switch (family) { - case PF_INET: - m = m_pullup(*mp, sizeof(struct ip)); - if (m == NULL) { - break; - } - *mp = m; - ip = mtod(m, struct ip *); - if (ip->ip_p == IPPROTO_TCP) { - type = SOCK_STREAM; - } else if (ip->ip_p == IPPROTO_UDP) { - type = SOCK_DGRAM; - } - break; - case PF_INET6: - m = m_pullup(*mp, sizeof(struct ip6_hdr)); - if (m == NULL) { - break; - } - *mp = m; - ip6 = mtod(m, struct ip6_hdr *); - if (ip6->ip6_nxt == IPPROTO_TCP) { - type = SOCK_STREAM; - } else if (ip6->ip6_nxt == IPPROTO_UDP) { - type = SOCK_DGRAM; - } - break; - } - } - - return type; -} -#endif - static void dlil_count_chain_len(mbuf_t m, struct chain_len_stats *cls) { @@ -5132,16 +5309,25 @@ preout_again: } } -#if CONFIG_MACF_NET - retval = mac_ifnet_check_transmit(ifp, m, proto_family, - dlil_get_socket_type(&m, proto_family, raw)); - if (retval != 0) { - m_freem(m); - goto cleanup; - } -#endif - do { + /* + * pkt_hdr is set here to point to m_data prior to + * calling into the framer. This value of pkt_hdr is + * used by the netif gso logic to retrieve the ip header + * for the TCP packets, offloaded for TSO processing. + */ + if ((raw != 0) && (ifp->if_family == IFNET_FAMILY_ETHERNET)) { + uint8_t vlan_encap_len = 0; + + if ((old_proto_family == PF_VLAN) && + ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0)) { + vlan_encap_len = ETHER_VLAN_ENCAP_LEN; + } + m->m_pkthdr.pkt_hdr = mtod(m, char *) + ETHER_HDR_LEN + vlan_encap_len; + } else { + m->m_pkthdr.pkt_hdr = mtod(m, void *); + } + /* * Perform address family translation if needed. * For now we only support stateless 4 to 6 translation @@ -5277,6 +5463,7 @@ preout_again: } ifp_inc_traffic_class_out(ifp, m); + pktap_output(ifp, proto_family, m, pre, post); /* @@ -5506,8 +5693,8 @@ dlil_clat46(ifnet_t ifp, protocol_family_t *proto_family, mbuf_t *m) struct in6_addr *src = NULL; struct in6_addr dst; int error = 0; - uint32_t off = 0; - uint64_t tot_len = 0; + uint16_t off = 0; + uint16_t tot_len = 0; uint16_t ip_id_val = 0; uint16_t ip_frag_off = 0; @@ -5522,7 +5709,7 @@ dlil_clat46(ifnet_t ifp, protocol_family_t *proto_family, mbuf_t *m) osrc = iph->ip_src; odst = iph->ip_dst; proto = iph->ip_p; - off = iph->ip_hl << 2; + off = (uint16_t)(iph->ip_hl << 2); ip_id_val = iph->ip_id; ip_frag_off = ntohs(iph->ip_off) & IP_OFFMASK; @@ -6561,12 +6748,11 @@ ifnet_attach_protocol(ifnet_t ifp, protocol_family_t protocol, goto end; } - ifproto = zalloc(dlif_proto_zone); + ifproto = zalloc_flags(dlif_proto_zone, Z_WAITOK | Z_ZERO); if (ifproto == NULL) { retval = ENOMEM; goto end; } - bzero(ifproto, dlif_proto_size); /* refcnt held above during lookup */ ifproto->ifp = ifp; @@ -6815,6 +7001,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) struct if_data_internal if_data_saved; struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp; struct dlil_threading_info *dl_inp; + thread_continue_t thfunc = NULL; u_int32_t sflags = 0; int err; @@ -6880,14 +7067,13 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) /* Allocate protocol hash table */ VERIFY(ifp->if_proto_hash == NULL); - ifp->if_proto_hash = zalloc(dlif_phash_zone); + ifp->if_proto_hash = zalloc_flags(dlif_phash_zone, Z_WAITOK | Z_ZERO); if (ifp->if_proto_hash == NULL) { ifnet_lock_done(ifp); ifnet_head_done(); dlil_if_unlock(); return ENOBUFS; } - bzero(ifp->if_proto_hash, dlif_phash_size); lck_mtx_lock_spin(&ifp->if_flt_lock); VERIFY(TAILQ_EMPTY(&ifp->if_flt_head)); @@ -6915,7 +7101,16 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) dlil_if_unlock(); return ENOBUFS; } - ifp->if_index = idx; + ifp->if_index = (uint16_t)idx; + + /* the lladdr passed at attach time is the permanent address */ + if (ll_addr != NULL && ifp->if_type == IFT_ETHER && + ll_addr->sdl_alen == ETHER_ADDR_LEN) { + bcopy(CONST_LLADDR(ll_addr), + dl_if->dl_if_permanent_ether, + ETHER_ADDR_LEN); + dl_if->dl_if_permanent_ether_is_set = 1; + } } /* There should not be anything occupying this slot */ VERIFY(ifindex2ifnet[ifp->if_index] == NULL); @@ -6940,10 +7135,6 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) if_attach_link_ifa(ifp, ifa); IFA_UNLOCK(ifa); -#if CONFIG_MACF_NET - mac_ifnet_label_associate(ifp); -#endif - TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link); ifindex2ifnet[ifp->if_index] = ifp; @@ -6994,21 +7185,22 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) /* Sanity checks on the input thread storage */ dl_inp = &dl_if->dl_if_inpstorage; - bzero(&dl_inp->stats, sizeof(dl_inp->stats)); - VERIFY(dl_inp->input_waiting == 0); - VERIFY(dl_inp->wtot == 0); - VERIFY(dl_inp->ifp == NULL); - VERIFY(qhead(&dl_inp->rcvq_pkts) == NULL && qempty(&dl_inp->rcvq_pkts)); - VERIFY(qlimit(&dl_inp->rcvq_pkts) == 0); - VERIFY(!dl_inp->net_affinity); + bzero(&dl_inp->dlth_stats, sizeof(dl_inp->dlth_stats)); + VERIFY(dl_inp->dlth_flags == 0); + VERIFY(dl_inp->dlth_wtot == 0); + VERIFY(dl_inp->dlth_ifp == NULL); + VERIFY(qhead(&dl_inp->dlth_pkts) == NULL && qempty(&dl_inp->dlth_pkts)); + VERIFY(qlimit(&dl_inp->dlth_pkts) == 0); + VERIFY(!dl_inp->dlth_affinity); VERIFY(ifp->if_inp == NULL); - VERIFY(dl_inp->input_thr == THREAD_NULL); - VERIFY(dl_inp->wloop_thr == THREAD_NULL); - VERIFY(dl_inp->poll_thr == THREAD_NULL); - VERIFY(dl_inp->tag == 0); + VERIFY(dl_inp->dlth_thread == THREAD_NULL); + VERIFY(dl_inp->dlth_strategy == NULL); + VERIFY(dl_inp->dlth_driver_thread == THREAD_NULL); + VERIFY(dl_inp->dlth_poller_thread == THREAD_NULL); + VERIFY(dl_inp->dlth_affinity_tag == 0); #if IFNET_INPUT_SANITY_CHK - VERIFY(dl_inp->input_mbuf_cnt == 0); + VERIFY(dl_inp->dlth_pkts_cnt == 0); #endif /* IFNET_INPUT_SANITY_CHK */ VERIFY(ifp->if_poll_thread == THREAD_NULL); @@ -7019,8 +7211,11 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) if (ifp->if_family != IFNET_FAMILY_LOOPBACK) { ifp->if_inp = dl_inp; ifnet_incr_pending_thread_count(ifp); - err = dlil_create_input_thread(ifp, ifp->if_inp); - if (err != 0) { + err = dlil_create_input_thread(ifp, ifp->if_inp, &thfunc); + if (err == ENODEV) { + VERIFY(thfunc == NULL); + ifnet_decr_pending_thread_count(ifp); + } else if (err != 0) { panic_plain("%s: ifp=%p couldn't get an input thread; " "err=%d", __func__, ifp, err); /* NOTREACHED */ @@ -7032,6 +7227,9 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) * where the packets may be dequeued and transmitted. */ if (ifp->if_eflags & IFEF_TXSTART) { + thread_precedence_policy_data_t info; + __unused kern_return_t kret; + ifp->if_flowhash = ifnet_calc_flowhash(ifp); VERIFY(ifp->if_flowhash != 0); VERIFY(ifp->if_start_thread == THREAD_NULL); @@ -7049,8 +7247,12 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) "err=%d", __func__, ifp, err); /* NOTREACHED */ } - ml_thread_policy(ifp->if_start_thread, MACHINE_GROUP, - (MACHINE_NETWORK_GROUP | MACHINE_NETWORK_WORKLOOP)); + bzero(&info, sizeof(info)); + info.importance = 1; + kret = thread_policy_set(ifp->if_start_thread, + THREAD_PRECEDENCE_POLICY, (thread_policy_t)&info, + THREAD_PRECEDENCE_POLICY_COUNT); + ASSERT(kret == KERN_SUCCESS); } else { ifp->if_flowhash = 0; } @@ -7066,10 +7268,12 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) * If the driver supports the new receive model, create a poller * thread to invoke if_input_poll callback where the packets may * be dequeued from the driver and processed for reception. - * if the interface is netif compat then the poller thread is managed by netif. + * if the interface is netif compat then the poller thread is + * managed by netif. */ - if (net_rxpoll && (ifp->if_eflags & IFEF_RXPOLL) && - (ifp->if_xflags & IFXF_LEGACY)) { + if (thfunc == dlil_rxpoll_input_thread_func) { + thread_precedence_policy_data_t info; + __unused kern_return_t kret; VERIFY(ifp->if_input_poll != NULL); VERIFY(ifp->if_input_ctl != NULL); ifnet_incr_pending_thread_count(ifp); @@ -7079,8 +7283,12 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) "err=%d", __func__, ifp, err); /* NOTREACHED */ } - ml_thread_policy(ifp->if_poll_thread, MACHINE_GROUP, - (MACHINE_NETWORK_GROUP | MACHINE_NETWORK_WORKLOOP)); + bzero(&info, sizeof(info)); + info.importance = 1; + kret = thread_policy_set(ifp->if_poll_thread, + THREAD_PRECEDENCE_POLICY, (thread_policy_t)&info, + THREAD_PRECEDENCE_POLICY_COUNT); + ASSERT(kret == KERN_SUCCESS); } VERIFY(ifp->if_desc.ifd_maxlen == IF_DESCSIZE); @@ -7145,8 +7353,8 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) * value of ECN global setting */ if (tcp_ecn_outbound == 2 && !IFNET_IS_CELLULAR(ifp)) { - ifp->if_eflags |= IFEF_ECN_ENABLE; - ifp->if_eflags &= ~IFEF_ECN_DISABLE; + if_set_eflags(ifp, IFEF_ECN_ENABLE); + if_clear_eflags(ifp, IFEF_ECN_DISABLE); } /* @@ -7161,7 +7369,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) DLIL_PRINTF("%s if_set_qosmarking_mode(%s) error %d\n", __func__, ifp->if_xname, error); } else { - ifp->if_eflags |= IFEF_QOSMARKING_ENABLED; + if_set_eflags(ifp, IFEF_QOSMARKING_ENABLED); #if (DEVELOPMENT || DEBUG) DLIL_PRINTF("%s fastlane enabled on %s\n", __func__, ifp->if_xname); @@ -7198,22 +7406,20 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) */ #if INET if (IGMP_IFINFO(ifp) == NULL) { - IGMP_IFINFO(ifp) = igmp_domifattach(ifp, M_WAITOK); + IGMP_IFINFO(ifp) = igmp_domifattach(ifp, Z_WAITOK); VERIFY(IGMP_IFINFO(ifp) != NULL); } else { VERIFY(IGMP_IFINFO(ifp)->igi_ifp == ifp); igmp_domifreattach(IGMP_IFINFO(ifp)); } #endif /* INET */ -#if INET6 if (MLD_IFINFO(ifp) == NULL) { - MLD_IFINFO(ifp) = mld_domifattach(ifp, M_WAITOK); + MLD_IFINFO(ifp) = mld_domifattach(ifp, Z_WAITOK); VERIFY(MLD_IFINFO(ifp) != NULL); } else { VERIFY(MLD_IFINFO(ifp)->mli_ifp == ifp); mld_domifreattach(MLD_IFINFO(ifp)); } -#endif /* INET6 */ VERIFY(ifp->if_data_threshold == 0); VERIFY(ifp->if_dt_tcall != NULL); @@ -7362,12 +7568,12 @@ dlil_alloc_lladdr(struct ifnet *ifp, const struct sockaddr_dl *ll_addr) ifa->ifa_ifp = ifp; ifa->ifa_rtrequest = link_rtrequest; ifa->ifa_addr = (struct sockaddr *)asdl; - asdl->sdl_len = socksize; + asdl->sdl_len = (u_char)socksize; asdl->sdl_family = AF_LINK; if (namelen > 0) { bcopy(workbuf, asdl->sdl_data, min(namelen, sizeof(asdl->sdl_data))); - asdl->sdl_nlen = namelen; + asdl->sdl_nlen = (u_char)namelen; } else { asdl->sdl_nlen = 0; } @@ -7380,7 +7586,7 @@ dlil_alloc_lladdr(struct ifnet *ifp, const struct sockaddr_dl *ll_addr) asdl->sdl_alen = 0; } ifa->ifa_netmask = (struct sockaddr *)msdl; - msdl->sdl_len = masklen; + msdl->sdl_len = (u_char)masklen; while (namelen > 0) { msdl->sdl_data[--namelen] = 0xff; } @@ -7399,9 +7605,7 @@ if_purgeaddrs(struct ifnet *ifp) #if INET in_purgeaddrs(ifp); #endif /* INET */ -#if INET6 in6_purgeaddrs(ifp); -#endif /* INET6 */ } errno_t @@ -7467,11 +7671,8 @@ ifnet_detach(ifnet_t ifp) } /* Reset ECN enable/disable flags */ - ifp->if_eflags &= ~IFEF_ECN_DISABLE; - ifp->if_eflags &= ~IFEF_ECN_ENABLE; - /* Reset CLAT46 flag */ - ifp->if_eflags &= ~IFEF_CLAT46; + if_clear_eflags(ifp, IFEF_ECN_ENABLE | IFEF_ECN_DISABLE | IFEF_CLAT46); /* * We do not reset the TCP keep alive counters in case @@ -7498,8 +7699,9 @@ ifnet_detach(ifnet_t ifp) } ifindex2ifnet[ifp->if_index] = NULL; - /* 18717626 - reset IFEF_IPV4_ROUTER and IFEF_IPV6_ROUTER */ - ifp->if_eflags &= ~(IFEF_IPV4_ROUTER | IFEF_IPV6_ROUTER); + /* 18717626 - reset router mode */ + if_clear_eflags(ifp, IFEF_IPV4_ROUTER); + ifp->if_ipv6_router_mode = IPV6_ROUTER_MODE_DISABLED; /* Record detach PC stacktrace */ ctrace_record(&((struct dlil_ifnet *)ifp)->dl_if_detach); @@ -7587,9 +7789,7 @@ ifnet_detach(ifnet_t ifp) #if INET igmp_domifdetach(ifp); #endif /* INET */ -#if INET6 mld_domifdetach(ifp); -#endif /* INET6 */ dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, NULL, 0); @@ -7631,19 +7831,25 @@ ifnet_detaching_dequeue(void) return ifp; } -static int -ifnet_detacher_thread_cont(int err) +__attribute__((noreturn)) +static void +ifnet_detacher_thread_cont(void *v, wait_result_t wres) { -#pragma unused(err) +#pragma unused(v, wres) struct ifnet *ifp; + dlil_if_lock(); + if (__improbable(ifnet_detaching_embryonic)) { + ifnet_detaching_embryonic = FALSE; + /* there's no lock ordering constrain so OK to do this here */ + dlil_decr_pending_thread_count(); + } + for (;;) { dlil_if_lock_assert(); - while (ifnet_detaching_cnt == 0) { - (void) msleep0(&ifnet_delayed_run, &dlil_ifnet_lock, - (PZERO - 1), "ifnet_detacher_cont", 0, - ifnet_detacher_thread_cont); - /* NOTREACHED */ + + if (ifnet_detaching_cnt == 0) { + break; } net_update_uptime(); @@ -7658,6 +7864,14 @@ ifnet_detacher_thread_cont(int err) dlil_if_lock(); } } + + (void) assert_wait(&ifnet_delayed_run, THREAD_UNINT); + dlil_if_unlock(); + (void) thread_block(ifnet_detacher_thread_cont); + + VERIFY(0); /* we should never get here */ + /* NOTREACHED */ + __builtin_unreachable(); } __dead2 @@ -7665,16 +7879,16 @@ static void ifnet_detacher_thread_func(void *v, wait_result_t w) { #pragma unused(v, w) - dlil_decr_pending_thread_count(); dlil_if_lock(); - (void) msleep0(&ifnet_delayed_run, &dlil_ifnet_lock, - (PZERO - 1), "ifnet_detacher", 0, ifnet_detacher_thread_cont); - /* - * msleep0() shouldn't have returned as PCATCH was not set; - * therefore assert in this case. - */ + (void) assert_wait(&ifnet_delayed_run, THREAD_UNINT); + ifnet_detaching_embryonic = TRUE; + /* wake up once to get out of embryonic state */ + wakeup((caddr_t)&ifnet_delayed_run); dlil_if_unlock(); + (void) thread_block(ifnet_detacher_thread_cont); VERIFY(0); + /* NOTREACHED */ + __builtin_unreachable(); } static void @@ -7738,7 +7952,7 @@ ifnet_detach_final(struct ifnet *ifp) ifnet_lock_exclusive(ifp); - /* Uplumb all protocols */ + /* Unplumb all protocols */ for (i = 0; i < PROTO_HASH_SLOTS; i++) { struct if_proto *proto; @@ -7811,18 +8025,19 @@ ifnet_detach_final(struct ifnet *ifp) if ((inp = ifp->if_inp) != NULL) { VERIFY(inp != dlil_main_input_thread); - if (inp->net_affinity) { + if (inp->dlth_affinity) { struct thread *tp, *wtp, *ptp; - lck_mtx_lock_spin(&inp->input_lck); - wtp = inp->wloop_thr; - inp->wloop_thr = THREAD_NULL; - ptp = inp->poll_thr; - inp->poll_thr = THREAD_NULL; - tp = inp->input_thr; /* don't nullify now */ - inp->tag = 0; - inp->net_affinity = FALSE; - lck_mtx_unlock(&inp->input_lck); + lck_mtx_lock_spin(&inp->dlth_lock); + wtp = inp->dlth_driver_thread; + inp->dlth_driver_thread = THREAD_NULL; + ptp = inp->dlth_poller_thread; + inp->dlth_poller_thread = THREAD_NULL; + ASSERT(inp->dlth_thread != THREAD_NULL); + tp = inp->dlth_thread; /* don't nullify now */ + inp->dlth_affinity_tag = 0; + inp->dlth_affinity = FALSE; + lck_mtx_unlock(&inp->dlth_lock); /* Tear down poll thread affinity */ if (ptp != NULL) { @@ -7848,24 +8063,26 @@ ifnet_detach_final(struct ifnet *ifp) /* disassociate ifp DLIL input thread */ ifp->if_inp = NULL; - /* tell the input thread to terminate */ - lck_mtx_lock_spin(&inp->input_lck); - inp->input_waiting |= DLIL_INPUT_TERMINATE; - if (!(inp->input_waiting & DLIL_INPUT_RUNNING)) { - wakeup_one((caddr_t)&inp->input_waiting); - } - lck_mtx_unlock(&inp->input_lck); - ifnet_lock_done(ifp); + /* if the worker thread was created, tell it to terminate */ + if (inp->dlth_thread != THREAD_NULL) { + lck_mtx_lock_spin(&inp->dlth_lock); + inp->dlth_flags |= DLIL_INPUT_TERMINATE; + if (!(inp->dlth_flags & DLIL_INPUT_RUNNING)) { + wakeup_one((caddr_t)&inp->dlth_flags); + } + lck_mtx_unlock(&inp->dlth_lock); + ifnet_lock_done(ifp); - /* wait for the input thread to terminate */ - lck_mtx_lock_spin(&inp->input_lck); - while ((inp->input_waiting & DLIL_INPUT_TERMINATE_COMPLETE) - == 0) { - (void) msleep(&inp->input_waiting, &inp->input_lck, - (PZERO - 1) | PSPIN, inp->input_name, NULL); + /* wait for the input thread to terminate */ + lck_mtx_lock_spin(&inp->dlth_lock); + while ((inp->dlth_flags & DLIL_INPUT_TERMINATE_COMPLETE) + == 0) { + (void) msleep(&inp->dlth_flags, &inp->dlth_lock, + (PZERO - 1) | PSPIN, inp->dlth_name, NULL); + } + lck_mtx_unlock(&inp->dlth_lock); + ifnet_lock_exclusive(ifp); } - lck_mtx_unlock(&inp->input_lck); - ifnet_lock_exclusive(ifp); /* clean-up input thread state */ dlil_clean_threading_info(inp); @@ -7910,7 +8127,7 @@ ifnet_detach_final(struct ifnet *ifp) VERIFY(ifp->if_delegated.constrained == 0); /* QoS marking get cleared */ - ifp->if_eflags &= ~IFEF_QOSMARKING_ENABLED; + if_clear_eflags(ifp, IFEF_QOSMARKING_ENABLED); if_set_qosmarking_mode(ifp, IFRTYPE_QOSMARKING_MODE_NONE); @@ -8055,23 +8272,23 @@ ifp_if_check_multi(struct ifnet *ifp, const struct sockaddr *sa) return EOPNOTSUPP; } -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX static errno_t ifp_if_framer(struct ifnet *ifp, struct mbuf **m, const struct sockaddr *sa, const char *ll, const char *t, u_int32_t *pre, u_int32_t *post) -#else +#else /* XNU_TARGET_OS_OSX */ static errno_t ifp_if_framer(struct ifnet *ifp, struct mbuf **m, const struct sockaddr *sa, const char *ll, const char *t) -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ { #pragma unused(ifp, m, sa, ll, t) -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX return ifp_if_framer_extended(ifp, m, sa, ll, t, pre, post); -#else +#else /* XNU_TARGET_OS_OSX */ return ifp_if_framer_extended(ifp, m, sa, ll, t, NULL, NULL); -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ } static errno_t @@ -8192,12 +8409,11 @@ dlil_if_acquire(u_int32_t family, const void *uniqueid, } /* no interface found, allocate a new one */ - buf = zalloc(dlif_zone); + buf = zalloc_flags(dlif_zone, Z_WAITOK | Z_ZERO); if (buf == NULL) { ret = ENOMEM; goto end; } - bzero(buf, dlif_bufsize); /* Get the 64-bit aligned base address for this object */ base = (void *)P2ROUNDUP((intptr_t)buf + sizeof(u_int64_t), @@ -8239,10 +8455,6 @@ dlil_if_acquire(u_int32_t family, const void *uniqueid, ifp1->if_desc.ifd_desc = dlifp1->dl_if_descstorage; -#if CONFIG_MACF_NET - mac_ifnet_label_init(ifp1); -#endif - if ((ret = dlil_alloc_local_stats(ifp1)) != 0) { DLIL_PRINTF("%s: failed to allocate if local stats, " "error: %d\n", __func__, ret); @@ -8262,11 +8474,9 @@ dlil_if_acquire(u_int32_t family, const void *uniqueid, ifnet_lock_attr); ifp1->if_inetdata = NULL; #endif -#if INET6 lck_rw_init(&ifp1->if_inet6data_lock, ifnet_lock_group, ifnet_lock_attr); ifp1->if_inet6data = NULL; -#endif lck_rw_init(&ifp1->if_link_status_lock, ifnet_lock_group, ifnet_lock_attr); ifp1->if_link_status = NULL; @@ -8324,15 +8534,6 @@ dlil_if_release(ifnet_t ifp) snprintf(__DECONST(char *, ifp->if_xname), IFXNAMSIZ, "%s?", ifp->if_name); lck_mtx_unlock(&dlifp->dl_if_lock); -#if CONFIG_MACF_NET - /* - * We can either recycle the MAC label here or in dlil_if_acquire(). - * It seems logical to do it here but this means that anything that - * still has a handle on ifp will now see it as unlabeled. - * Since the interface is "dead" that may be OK. Revisit later. - */ - mac_ifnet_label_recycle(ifp); -#endif ifnet_lock_done(ifp); } @@ -8367,9 +8568,7 @@ dlil_proto_unplumb_all(struct ifnet *ifp) * have happened by now) and do the unplumb then. */ (void) proto_unplumb(PF_INET, ifp); -#if INET6 (void) proto_unplumb(PF_INET6, ifp); -#endif /* INET6 */ } static void @@ -8397,7 +8596,6 @@ ifp_src_route_copyin(struct ifnet *ifp, struct route *src) lck_mtx_unlock(&ifp->if_cached_route_lock); } -#if INET6 static void ifp_src_route6_copyout(struct ifnet *ifp, struct route_in6 *dst) { @@ -8424,7 +8622,6 @@ ifp_src_route6_copyin(struct ifnet *ifp, struct route_in6 *src) } lck_mtx_unlock(&ifp->if_cached_route_lock); } -#endif /* INET6 */ struct rtentry * ifnet_cached_rtlookup_inet(struct ifnet *ifp, struct in_addr src_ip) @@ -8461,7 +8658,6 @@ ifnet_cached_rtlookup_inet(struct ifnet *ifp, struct in_addr src_ip) return src_rt.ro_rt; } -#if INET6 struct rtentry * ifnet_cached_rtlookup_inet6(struct ifnet *ifp, struct in6_addr *src_ip6) { @@ -8498,7 +8694,6 @@ ifnet_cached_rtlookup_inet6(struct ifnet *ifp, struct in6_addr *src_ip6) return src_rt.ro_rt; } -#endif /* INET6 */ void if_lqm_update(struct ifnet *ifp, int lqm, int locked) @@ -8544,7 +8739,7 @@ if_lqm_update(struct ifnet *ifp, int lqm, int locked) } ifp->if_interface_state.valid_bitmask |= IF_INTERFACE_STATE_LQM_STATE_VALID; - ifp->if_interface_state.lqm_state = lqm; + ifp->if_interface_state.lqm_state = (int8_t)lqm; /* * Don't want to hold the lock when issuing kernel events @@ -8579,7 +8774,7 @@ if_rrc_state_update(struct ifnet *ifp, unsigned int rrc_state) ifp->if_interface_state.valid_bitmask |= IF_INTERFACE_STATE_RRC_STATE_VALID; - ifp->if_interface_state.rrc_state = rrc_state; + ifp->if_interface_state.rrc_state = (uint8_t)rrc_state; /* * Don't want to hold the lock when issuing kernel events @@ -8702,17 +8897,14 @@ if_get_state(struct ifnet *ifp, errno_t if_probe_connectivity(struct ifnet *ifp, u_int32_t conn_probe) { - ifnet_lock_exclusive(ifp); if (conn_probe > 1) { - ifnet_lock_done(ifp); return EINVAL; } if (conn_probe == 0) { - ifp->if_eflags &= ~IFEF_PROBE_CONNECTIVITY; + if_clear_eflags(ifp, IFEF_PROBE_CONNECTIVITY); } else { - ifp->if_eflags |= IFEF_PROBE_CONNECTIVITY; + if_set_eflags(ifp, IFEF_PROBE_CONNECTIVITY); } - ifnet_lock_done(ifp); #if NECP necp_update_all_clients(); @@ -8784,9 +8976,21 @@ uuid_get_ethernet(u_int8_t *node) the_index = other_index; } if (the_index != 0) { + struct dlil_ifnet *dl_if; + ifp = ifindex2ifnet[the_index]; VERIFY(ifp != NULL); - memcpy(node, IF_LLADDR(ifp), ETHER_ADDR_LEN); + dl_if = (struct dlil_ifnet *)ifp; + if (dl_if->dl_if_permanent_ether_is_set != 0) { + /* + * Use the permanent ethernet address if it is + * available because it will never change. + */ + memcpy(node, dl_if->dl_if_permanent_ether, + ETHER_ADDR_LEN); + } else { + memcpy(node, IF_LLADDR(ifp), ETHER_ADDR_LEN); + } ret = 0; } else { ret = -1; @@ -9078,8 +9282,7 @@ dlil_node_present_v2(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr_dl int err = dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_NODE_PRESENCE, &kev.link_data, sizeof(kev)); if (err != 0) { - log(LOG_ERR, "%s: Post DL_NODE_PRESENCE failed with", - "error %d\n", __func__, err); + log(LOG_ERR, "%s: Post DL_NODE_PRESENCE failed with error %d\n", __func__, err); } } return ret; @@ -9225,7 +9428,10 @@ ifnet_get_throttle(struct ifnet *ifp, u_int32_t *level) IFCQ_LOCK(ifq); /* Throttling works only for IFCQ, not ALTQ instances */ if (IFCQ_IS_ENABLED(ifq)) { - IFCQ_GET_THROTTLE(ifq, *level, err); + cqrq_throttle_t req = { 0, IFNET_THROTTLE_OFF }; + + err = fq_if_request_classq(ifq, CLASSQRQ_THROTTLE, &req); + *level = req.level; } IFCQ_UNLOCK(ifq); @@ -9254,7 +9460,9 @@ ifnet_set_throttle(struct ifnet *ifp, u_int32_t level) IFCQ_LOCK(ifq); if (IFCQ_IS_ENABLED(ifq)) { - IFCQ_SET_THROTTLE(ifq, level, err); + cqrq_throttle_t req = { 1, level }; + + err = fq_if_request_classq(ifq, CLASSQRQ_THROTTLE, &req); } IFCQ_UNLOCK(ifq); @@ -9413,7 +9621,7 @@ ifnet_notify_address(struct ifnet *ifp, int af) } bzero(&na, sizeof(na)); - na.address_family = af; + na.address_family = (sa_family_t)af; return ifp->if_output_ctl(ifp, IFNET_CTL_NOTIFY_ADDRESS, sizeof(na), &na); @@ -9536,14 +9744,7 @@ ifnet_fc_add(struct ifnet *ifp) /* become regular mutex */ lck_mtx_convert_spin(&ifnet_fc_lock); - ifce = zalloc(ifnet_fc_zone); - if (ifce == NULL) { - /* memory allocation failed */ - lck_mtx_unlock(&ifnet_fc_lock); - return ENOMEM; - } - bzero(ifce, ifnet_fc_zone_size); - + ifce = zalloc_flags(ifnet_fc_zone, Z_WAITOK | Z_ZERO); ifce->ifce_flowhash = flowhash; ifce->ifce_ifp = ifp; @@ -9710,7 +9911,7 @@ ifnet_get_netsignature(struct ifnet *ifp, uint8_t family, uint8_t *len, if_inetdata_lock_done(ifp); break; } - if ((*len = IN_IFEXTRA(ifp)->netsig_len) > 0) { + if ((*len = (uint8_t)IN_IFEXTRA(ifp)->netsig_len) > 0) { bcopy(IN_IFEXTRA(ifp)->netsig, data, *len); } else { error = ENOENT; @@ -9729,7 +9930,7 @@ ifnet_get_netsignature(struct ifnet *ifp, uint8_t family, uint8_t *len, if_inet6data_lock_done(ifp); break; } - if ((*len = IN6_IFEXTRA(ifp)->netsig_len) > 0) { + if ((*len = (uint8_t)IN6_IFEXTRA(ifp)->netsig_len) > 0) { bcopy(IN6_IFEXTRA(ifp)->netsig, data, *len); } else { error = ENOENT; @@ -9752,7 +9953,6 @@ ifnet_get_netsignature(struct ifnet *ifp, uint8_t family, uint8_t *len, return error; } -#if INET6 int ifnet_set_nat64prefix(struct ifnet *ifp, struct ipv6_prefix *prefixes) { @@ -9856,7 +10056,6 @@ out: return error; } -#endif static void dlil_output_cksum_dbg(struct ifnet *ifp, struct mbuf *m, uint32_t hoff, @@ -9880,7 +10079,6 @@ dlil_output_cksum_dbg(struct ifnet *ifp, struct mbuf *m, uint32_t hoff, hwcksum_dbg_finalized_data++; } break; -#if INET6 case PF_INET6: /* * Checksum offload should not have been enabled when @@ -9895,7 +10093,6 @@ dlil_output_cksum_dbg(struct ifnet *ifp, struct mbuf *m, uint32_t hoff, hwcksum_dbg_finalized_data++; } break; -#endif /* INET6 */ default: return; } @@ -9919,13 +10116,11 @@ dlil_input_cksum_dbg(struct ifnet *ifp, struct mbuf *m, char *frame_header, (uint64_t)VM_KERNEL_ADDRPERM(m)); return; } - hlen = (m->m_data - frame_header); + hlen = (uint32_t)(m->m_data - frame_header); switch (pf) { case PF_INET: -#if INET6 case PF_INET6: -#endif /* INET6 */ break; default: return; @@ -9950,7 +10145,7 @@ dlil_input_cksum_dbg(struct ifnet *ifp, struct mbuf *m, char *frame_header, m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PARTIAL); m->m_pkthdr.csum_rx_val = sum; - m->m_pkthdr.csum_rx_start = (foff + hlen); + m->m_pkthdr.csum_rx_start = (uint16_t)(foff + hlen); hwcksum_dbg_partial_forced++; hwcksum_dbg_partial_forced_bytes += m->m_pkthdr.len; @@ -10021,7 +10216,7 @@ dlil_input_cksum_dbg(struct ifnet *ifp, struct mbuf *m, char *frame_header, m_pktlen(m) - aoff, sum); m->m_pkthdr.csum_rx_val = sum; - m->m_pkthdr.csum_rx_start = (aoff + hlen); + m->m_pkthdr.csum_rx_start = (uint16_t)(aoff + hlen); hwcksum_dbg_adjusted++; } @@ -10219,7 +10414,7 @@ dlil_verify_sum16(void) sum = m_sum16(m, 0, len); if (!sumtbl[n].init) { - sumr = in_cksum_mbuf_ref(m, len, 0, 0); + sumr = (uint16_t)in_cksum_mbuf_ref(m, len, 0, 0); sumtbl[n].sumr = sumr; sumtbl[n].init = TRUE; } else { @@ -10457,3 +10652,39 @@ ifnet_update_stats_per_flow(struct ifnet_stats_per_flow *ifs, { tcp_update_stats_per_flow(ifs, ifp); } + +static inline u_int32_t +_set_flags(u_int32_t *flags_p, u_int32_t set_flags) +{ + return (u_int32_t)OSBitOrAtomic(set_flags, flags_p); +} + +static inline void +_clear_flags(u_int32_t *flags_p, u_int32_t clear_flags) +{ + OSBitAndAtomic(~clear_flags, flags_p); +} + +__private_extern__ u_int32_t +if_set_eflags(ifnet_t interface, u_int32_t set_flags) +{ + return _set_flags(&interface->if_eflags, set_flags); +} + +__private_extern__ void +if_clear_eflags(ifnet_t interface, u_int32_t clear_flags) +{ + _clear_flags(&interface->if_eflags, clear_flags); +} + +__private_extern__ u_int32_t +if_set_xflags(ifnet_t interface, u_int32_t set_flags) +{ + return _set_flags(&interface->if_xflags, set_flags); +} + +__private_extern__ void +if_clear_xflags(ifnet_t interface, u_int32_t clear_flags) +{ + _clear_flags(&interface->if_xflags, clear_flags); +} diff --git a/bsd/net/dlil.h b/bsd/net/dlil.h index 7f2753cba..a86b87b86 100644 --- a/bsd/net/dlil.h +++ b/bsd/net/dlil.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2019 Apple Inc. All rights reserved. + * Copyright (c) 1999-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -152,30 +152,39 @@ struct iff_filter; #define DLIL_THREADNAME_LEN 32 /* - * DLIL input thread info + * DLIL threading info */ struct dlil_threading_info { - decl_lck_mtx_data(, input_lck); - lck_grp_t *lck_grp; /* lock group (for lock stats) */ - u_int32_t input_waiting; /* DLIL condition of thread */ - u_int32_t wtot; /* # of wakeup requests */ - char input_name[DLIL_THREADNAME_LEN]; /* name storage */ - struct ifnet *ifp; /* pointer to interface */ - class_queue_t rcvq_pkts; /* queue of pkts */ - struct ifnet_stat_increment_param stats; /* incremental statistics */ + decl_lck_mtx_data(, dlth_lock); + class_queue_t dlth_pkts; /* queue of pkts */ + struct ifnet *dlth_ifp; /* pointer to interface */ + struct ifnet_stat_increment_param dlth_stats; /* incremental stats */ + uint32_t dlth_flags; /* thread flags (see below) */ + uint32_t dlth_wtot; /* # of wakeup requests */ + + /* strategy (sync or async) */ + errno_t (*dlth_strategy)(struct dlil_threading_info *, + struct ifnet *, struct mbuf *, struct mbuf *, + const struct ifnet_stat_increment_param *, boolean_t, + struct thread *); + /* * Thread affinity (workloop and DLIL threads). */ - boolean_t net_affinity; /* affinity set is available */ - struct thread *input_thr; /* input thread */ - struct thread *wloop_thr; /* workloop thread */ - struct thread *poll_thr; /* poll thread */ - u_int32_t tag; /* affinity tag */ + boolean_t dlth_affinity; /* affinity set is available */ + uint32_t dlth_affinity_tag; /* affinity tag */ + struct thread *dlth_thread; /* DLIL worker thread */ + struct thread *dlth_driver_thread; /* driver/workloop thread */ + struct thread *dlth_poller_thread; /* poll thread */ + + lck_grp_t *dlth_lock_grp; /* lock group (for lock stats) */ + char dlth_name[DLIL_THREADNAME_LEN]; /* name storage */ + #if IFNET_INPUT_SANITY_CHK /* * For debugging. */ - u_int64_t input_mbuf_cnt; /* total # of packets processed */ + uint64_t dlth_pkts_cnt; /* total # of packets */ #endif }; @@ -188,15 +197,18 @@ struct dlil_main_threading_info { }; /* + * Valid values for dlth_flags. + * * The following are shared with kpi_protocol.c so that it may wakeup * the input thread to run through packets queued for protocol input. */ -#define DLIL_INPUT_RUNNING 0x80000000 -#define DLIL_INPUT_WAITING 0x40000000 -#define DLIL_PROTO_REGISTER 0x20000000 -#define DLIL_PROTO_WAITING 0x10000000 -#define DLIL_INPUT_TERMINATE 0x08000000 +#define DLIL_INPUT_RUNNING 0x80000000 +#define DLIL_INPUT_WAITING 0x40000000 +#define DLIL_PROTO_REGISTER 0x20000000 +#define DLIL_PROTO_WAITING 0x10000000 +#define DLIL_INPUT_TERMINATE 0x08000000 #define DLIL_INPUT_TERMINATE_COMPLETE 0x04000000 +#define DLIL_INPUT_EMBRYONIC 0x00000001 /* * Flags for dlil_attach_filter() @@ -303,6 +315,8 @@ extern errno_t dlil_send_arp(ifnet_t, u_int16_t, const struct sockaddr_dl *, extern int dlil_attach_filter(ifnet_t, const struct iff_filter *, interface_filter_t *, u_int32_t); extern void dlil_detach_filter(interface_filter_t); +extern boolean_t dlil_has_ip_filter(void); +extern boolean_t dlil_has_if_filter(struct ifnet *); extern void dlil_proto_unplumb_all(ifnet_t); @@ -381,19 +395,19 @@ ifp_inc_traffic_class_in(struct ifnet *ifp, struct mbuf *m) switch (m_get_traffic_class(m)) { case MBUF_TC_BE: ifp->if_tc.ifi_ibepackets++; - ifp->if_tc.ifi_ibebytes += m->m_pkthdr.len; + ifp->if_tc.ifi_ibebytes += (u_int64_t)m->m_pkthdr.len; break; case MBUF_TC_BK: ifp->if_tc.ifi_ibkpackets++; - ifp->if_tc.ifi_ibkbytes += m->m_pkthdr.len; + ifp->if_tc.ifi_ibkbytes += (u_int64_t)m->m_pkthdr.len; break; case MBUF_TC_VI: ifp->if_tc.ifi_ivipackets++; - ifp->if_tc.ifi_ivibytes += m->m_pkthdr.len; + ifp->if_tc.ifi_ivibytes += (u_int64_t)m->m_pkthdr.len; break; case MBUF_TC_VO: ifp->if_tc.ifi_ivopackets++; - ifp->if_tc.ifi_ivobytes += m->m_pkthdr.len; + ifp->if_tc.ifi_ivobytes += (u_int64_t)m->m_pkthdr.len; break; default: break; @@ -401,7 +415,7 @@ ifp_inc_traffic_class_in(struct ifnet *ifp, struct mbuf *m) if (mbuf_is_traffic_class_privileged(m)) { ifp->if_tc.ifi_ipvpackets++; - ifp->if_tc.ifi_ipvbytes += m->m_pkthdr.len; + ifp->if_tc.ifi_ipvbytes += (u_int64_t)m->m_pkthdr.len; } } @@ -421,19 +435,19 @@ ifp_inc_traffic_class_out(struct ifnet *ifp, struct mbuf *m) switch (m_get_traffic_class(m)) { case MBUF_TC_BE: ifp->if_tc.ifi_obepackets++; - ifp->if_tc.ifi_obebytes += m->m_pkthdr.len; + ifp->if_tc.ifi_obebytes += (u_int64_t)m->m_pkthdr.len; break; case MBUF_TC_BK: ifp->if_tc.ifi_obkpackets++; - ifp->if_tc.ifi_obkbytes += m->m_pkthdr.len; + ifp->if_tc.ifi_obkbytes += (u_int64_t)m->m_pkthdr.len; break; case MBUF_TC_VI: ifp->if_tc.ifi_ovipackets++; - ifp->if_tc.ifi_ovibytes += m->m_pkthdr.len; + ifp->if_tc.ifi_ovibytes += (u_int64_t)m->m_pkthdr.len; break; case MBUF_TC_VO: ifp->if_tc.ifi_ovopackets++; - ifp->if_tc.ifi_ovobytes += m->m_pkthdr.len; + ifp->if_tc.ifi_ovobytes += (u_int64_t)m->m_pkthdr.len; break; default: break; @@ -441,7 +455,7 @@ ifp_inc_traffic_class_out(struct ifnet *ifp, struct mbuf *m) if (mbuf_is_traffic_class_privileged(m)) { ifp->if_tc.ifi_opvpackets++; - ifp->if_tc.ifi_opvbytes += m->m_pkthdr.len; + ifp->if_tc.ifi_opvbytes += (u_int64_t)m->m_pkthdr.len; } } #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/net/ether_if_module.c b/bsd/net/ether_if_module.c index f26aec76c..1c051ff91 100644 --- a/bsd/net/ether_if_module.c +++ b/bsd/net/ether_if_module.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2017 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -123,11 +123,11 @@ struct en_desc { }; /* descriptors are allocated in blocks of ETHER_DESC_BLK_SIZE */ -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX #define ETHER_DESC_BLK_SIZE (2) /* IP, ARP */ -#else +#else /* XNU_TARGET_OS_OSX */ #define ETHER_DESC_BLK_SIZE (10) -#endif +#endif /* XNU_TARGET_OS_OSX */ /* * Header for the demux list, hangs off of IFP at if_family_cookie @@ -639,14 +639,12 @@ ether_family_init(void) error); goto done; } -#if INET6 if ((error = proto_register_plumber(PF_INET6, APPLE_IF_FAM_ETHERNET, ether_attach_inet6, ether_detach_inet6)) != 0) { printf("proto_register_plumber failed for PF_INET6 error=%d\n", error); goto done; } -#endif /* INET6 */ #if VLAN vlan_family_init(); #endif /* VLAN */ diff --git a/bsd/net/ether_inet6_pr_module.c b/bsd/net/ether_inet6_pr_module.c index 816995437..2b077473a 100644 --- a/bsd/net/ether_inet6_pr_module.c +++ b/bsd/net/ether_inet6_pr_module.c @@ -85,11 +85,9 @@ #include #include -#if INET6 #include #include #include -#endif /* #include "vlan.h" */ #if NVLAN > 0 diff --git a/bsd/net/ether_inet_pr_module.c b/bsd/net/ether_inet_pr_module.c index 436c8fc7b..9830039be 100644 --- a/bsd/net/ether_inet_pr_module.c +++ b/bsd/net/ether_inet_pr_module.c @@ -454,10 +454,6 @@ ether_inet_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, eh = mbuf_data(m); eh->ether_type = htons(ETHERTYPE_ARP); -#if CONFIG_MACF_NET - mac_mbuf_label_associate_linklayer(ifp, m); -#endif - /* Fill out the arp header */ ea->arp_pro = htons(ETHERTYPE_IP); ea->arp_hln = sizeof(ea->arp_sha); diff --git a/bsd/net/ethernet.h b/bsd/net/ethernet.h index c4df50720..354a563a4 100644 --- a/bsd/net/ethernet.h +++ b/bsd/net/ethernet.h @@ -32,8 +32,14 @@ #ifndef _NET_ETHERNET_H_ #define _NET_ETHERNET_H_ +#ifndef DRIVERKIT #include #include /* u_ types */ +#else +#include +#include +#include +#endif /* DRIVERKIT */ /* * The number of bytes in an ethernet (MAC) address. @@ -121,6 +127,7 @@ typedef struct ether_addr { #define ETHERMTU (ETHER_MAX_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN) #define ETHERMIN (ETHER_MIN_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN) +#ifndef DRIVERKIT #ifdef KERNEL_PRIVATE /* * The following are used by ethernet interfaces. @@ -179,5 +186,6 @@ struct ether_addr *ether_aton(const char *); int ether_ntohost(char *, const struct ether_addr *); __END_DECLS #endif /* !KERNEL */ +#endif /* DRIVERKIT */ #endif /* !_NET_ETHERNET_H_ */ diff --git a/bsd/net/flowadv.c b/bsd/net/flowadv.c index 2d6a41ecb..bf9419748 100644 --- a/bsd/net/flowadv.c +++ b/bsd/net/flowadv.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 Apple Inc. All rights reserved. + * Copyright (c) 2012-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -105,11 +105,10 @@ static STAILQ_HEAD(fadv_head, flowadv_fcentry) fadv_list; static thread_t fadv_thread = THREAD_NULL; static uint32_t fadv_active; -static unsigned int fadv_zone_size; /* size of flowadv_fcentry */ -static struct zone *fadv_zone; /* zone for flowadv_fcentry */ +static unsigned int fadv_size; /* size of flowadv_fcentry */ +static struct mcache *fadv_cache; /* mcache for flowadv_fcentry */ -#define FADV_ZONE_MAX 32 /* maximum elements in zone */ -#define FADV_ZONE_NAME "fadv_zone" /* zone name */ +#define FADV_CACHE_NAME "flowadv" /* cache name */ static int flowadv_thread_cont(int); static void flowadv_thread_func(void *, wait_result_t); @@ -124,16 +123,9 @@ flowadv_init(void) fadv_lock_grp = lck_grp_alloc_init("fadv_lock", fadv_lock_grp_attr); lck_mtx_init(&fadv_lock, fadv_lock_grp, NULL); - fadv_zone_size = P2ROUNDUP(sizeof(struct flowadv_fcentry), - sizeof(u_int64_t)); - fadv_zone = zinit(fadv_zone_size, - FADV_ZONE_MAX * fadv_zone_size, 0, FADV_ZONE_NAME); - if (fadv_zone == NULL) { - panic("%s: failed allocating %s", __func__, FADV_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(fadv_zone, Z_EXPAND, TRUE); - zone_change(fadv_zone, Z_CALLERACCT, FALSE); + fadv_size = sizeof(struct flowadv_fcentry); + fadv_cache = mcache_create(FADV_CACHE_NAME, fadv_size, + sizeof(uint64_t), 0, MCR_SLEEP); if (kernel_thread_start(flowadv_thread_func, NULL, &fadv_thread) != KERN_SUCCESS) { @@ -149,9 +141,9 @@ flowadv_alloc_entry(int how) { struct flowadv_fcentry *fce; - fce = (how == M_WAITOK) ? zalloc(fadv_zone) : zalloc_noblock(fadv_zone); - if (fce != NULL) { - bzero(fce, fadv_zone_size); + if ((fce = mcache_alloc(fadv_cache, (how == M_WAITOK) ? + MCR_SLEEP : MCR_NOSLEEP)) != NULL) { + bzero(fce, fadv_size); } return fce; @@ -160,7 +152,7 @@ flowadv_alloc_entry(int how) void flowadv_free_entry(struct flowadv_fcentry *fce) { - zfree(fadv_zone, fce); + mcache_free(fadv_cache, fce); } void @@ -261,3 +253,9 @@ flowadv_thread_func(void *v, wait_result_t w) lck_mtx_unlock(&fadv_lock); VERIFY(0); } + +void +flowadv_reap_caches(boolean_t purge) +{ + mcache_reap_now(fadv_cache, purge); +} diff --git a/bsd/net/flowadv.h b/bsd/net/flowadv.h index c3872a425..cc5e6c978 100644 --- a/bsd/net/flowadv.h +++ b/bsd/net/flowadv.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 Apple Inc. All rights reserved. + * Copyright (c) 2012-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -58,6 +58,7 @@ extern struct flowadv_fcentry *flowadv_alloc_entry(int); extern void flowadv_free_entry(struct flowadv_fcentry *); extern void flowadv_add(struct flowadv_fclist *); extern void flowadv_add_entry(struct flowadv_fcentry *); +extern void flowadv_reap_caches(boolean_t); __END_DECLS diff --git a/bsd/net/flowhash.c b/bsd/net/flowhash.c index 45d01169c..2d654422d 100644 --- a/bsd/net/flowhash.c +++ b/bsd/net/flowhash.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2012 Apple Inc. All rights reserved. + * Copyright (c) 2011-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -48,6 +48,7 @@ #include #include #include +#include static inline u_int32_t getblock32(const u_int32_t *, int); static inline u_int64_t getblock64(const u_int64_t *, int); @@ -209,10 +210,10 @@ net_flowhash_mh3_x86_32(const void *key, u_int32_t len, const u_int32_t seed) switch (len & 3) { case 3: k1 ^= tail[2] << 16; - /* FALLTHRU */ + OS_FALLTHROUGH; case 2: k1 ^= tail[1] << 8; - /* FALLTHRU */ + OS_FALLTHROUGH; case 1: k1 ^= tail[0]; k1 *= MH3_X86_32_C1; @@ -305,22 +306,22 @@ net_flowhash_mh3_x64_128(const void *key, u_int32_t len, const u_int32_t seed) switch (len & 15) { case 15: k2 ^= ((u_int64_t)tail[14]) << 48; - /* FALLTHRU */ + OS_FALLTHROUGH; case 14: k2 ^= ((u_int64_t)tail[13]) << 40; - /* FALLTHRU */ + OS_FALLTHROUGH; case 13: k2 ^= ((u_int64_t)tail[12]) << 32; - /* FALLTHRU */ + OS_FALLTHROUGH; case 12: k2 ^= ((u_int64_t)tail[11]) << 24; - /* FALLTHRU */ + OS_FALLTHROUGH; case 11: k2 ^= ((u_int64_t)tail[10]) << 16; - /* FALLTHRU */ + OS_FALLTHROUGH; case 10: k2 ^= ((u_int64_t)tail[9]) << 8; - /* FALLTHRU */ + OS_FALLTHROUGH; case 9: k2 ^= ((u_int64_t)tail[8]) << 0; k2 *= MH3_X64_128_C2; @@ -333,28 +334,28 @@ net_flowhash_mh3_x64_128(const void *key, u_int32_t len, const u_int32_t seed) #endif /* !__x86_64__ && !__arm64__ */ k2 *= MH3_X64_128_C1; h2 ^= k2; - /* FALLTHRU */ + OS_FALLTHROUGH; case 8: k1 ^= ((u_int64_t)tail[7]) << 56; - /* FALLTHRU */ + OS_FALLTHROUGH; case 7: k1 ^= ((u_int64_t)tail[6]) << 48; - /* FALLTHRU */ + OS_FALLTHROUGH; case 6: k1 ^= ((u_int64_t)tail[5]) << 40; - /* FALLTHRU */ + OS_FALLTHROUGH; case 5: k1 ^= ((u_int64_t)tail[4]) << 32; - /* FALLTHRU */ + OS_FALLTHROUGH; case 4: k1 ^= ((u_int64_t)tail[3]) << 24; - /* FALLTHRU */ + OS_FALLTHROUGH; case 3: k1 ^= ((u_int64_t)tail[2]) << 16; - /* FALLTHRU */ + OS_FALLTHROUGH; case 2: k1 ^= ((u_int64_t)tail[1]) << 8; - /* FALLTHRU */ + OS_FALLTHROUGH; case 1: k1 ^= ((u_int64_t)tail[0]) << 0; k1 *= MH3_X64_128_C1; @@ -544,37 +545,37 @@ net_flowhash_jhash(const void *key, u_int32_t len, const u_int32_t seed) switch (len) { case 12: c += k[11]; - /* FALLTHRU */ + OS_FALLTHROUGH; case 11: c += ((u_int32_t)k[10]) << 8; - /* FALLTHRU */ + OS_FALLTHROUGH; case 10: c += ((u_int32_t)k[9]) << 16; - /* FALLTHRU */ + OS_FALLTHROUGH; case 9: c += ((u_int32_t)k[8]) << 24; - /* FALLTHRU */ + OS_FALLTHROUGH; case 8: b += k[7]; - /* FALLTHRU */ + OS_FALLTHROUGH; case 7: b += ((u_int32_t)k[6]) << 8; - /* FALLTHRU */ + OS_FALLTHROUGH; case 6: b += ((u_int32_t)k[5]) << 16; - /* FALLTHRU */ + OS_FALLTHROUGH; case 5: b += ((u_int32_t)k[4]) << 24; - /* FALLTHRU */ + OS_FALLTHROUGH; case 4: a += k[3]; - /* FALLTHRU */ + OS_FALLTHROUGH; case 3: a += ((u_int32_t)k[2]) << 8; - /* FALLTHRU */ + OS_FALLTHROUGH; case 2: a += ((u_int32_t)k[1]) << 16; - /* FALLTHRU */ + OS_FALLTHROUGH; case 1: a += ((u_int32_t)k[0]) << 24; break; @@ -734,7 +735,7 @@ net_flowhash_jhash(const void *key, u_int32_t len, const u_int32_t seed) case 11: c += ((u_int32_t)k8[10]) << 16; - /* FALLTHRU */ + OS_FALLTHROUGH; case 10: c += k[4]; b += k[2] + (((u_int32_t)k[3]) << 16); @@ -743,7 +744,7 @@ net_flowhash_jhash(const void *key, u_int32_t len, const u_int32_t seed) case 9: c += k8[8]; - /* FALLTHRU */ + OS_FALLTHROUGH; case 8: b += k[2] + (((u_int32_t)k[3]) << 16); a += k[0] + (((u_int32_t)k[1]) << 16); @@ -751,7 +752,7 @@ net_flowhash_jhash(const void *key, u_int32_t len, const u_int32_t seed) case 7: b += ((u_int32_t)k8[6]) << 16; - /* FALLTHRU */ + OS_FALLTHROUGH; case 6: b += k[2]; a += k[0] + (((u_int32_t)k[1]) << 16); @@ -759,14 +760,14 @@ net_flowhash_jhash(const void *key, u_int32_t len, const u_int32_t seed) case 5: b += k8[4]; - /* FALLTHRU */ + OS_FALLTHROUGH; case 4: a += k[0] + (((u_int32_t)k[1]) << 16); break; case 3: a += ((u_int32_t)k8[2]) << 16; - /* FALLTHRU */ + OS_FALLTHROUGH; case 2: a += k[0]; break; @@ -812,37 +813,37 @@ while (len > 12) { switch (len) { case 12: c += ((u_int32_t)k[11]) << 24; -/* FALLTHRU */ + OS_FALLTHROUGH; case 11: c += ((u_int32_t)k[10]) << 16; -/* FALLTHRU */ + OS_FALLTHROUGH; case 10: c += ((u_int32_t)k[9]) << 8; -/* FALLTHRU */ + OS_FALLTHROUGH; case 9: c += k[8]; -/* FALLTHRU */ + OS_FALLTHROUGH; case 8: b += ((u_int32_t)k[7]) << 24; -/* FALLTHRU */ + OS_FALLTHROUGH; case 7: b += ((u_int32_t)k[6]) << 16; -/* FALLTHRU */ + OS_FALLTHROUGH; case 6: b += ((u_int32_t)k[5]) << 8; -/* FALLTHRU */ + OS_FALLTHROUGH; case 5: b += k[4]; -/* FALLTHRU */ + OS_FALLTHROUGH; case 4: a += ((u_int32_t)k[3]) << 24; -/* FALLTHRU */ + OS_FALLTHROUGH; case 3: a += ((u_int32_t)k[2]) << 16; -/* FALLTHRU */ + OS_FALLTHROUGH; case 2: a += ((u_int32_t)k[1]) << 8; -/* FALLTHRU */ + OS_FALLTHROUGH; case 1: a += k[0]; break; diff --git a/bsd/net/frame802154.c b/bsd/net/frame802154.c index e5f2e9355..c5d0d2c99 100644 --- a/bsd/net/frame802154.c +++ b/bsd/net/frame802154.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Apple Inc. All rights reserved. + * Copyright (c) 2017-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -223,14 +223,14 @@ frame802154_create(frame802154_t *p, uint8_t *buf) /* OK, now we have field lengths. Time to actually construct */ /* the outgoing frame, and store it in buf */ - buf[0] = (p->fcf.frame_type & 7) | + buf[0] = (uint8_t)((p->fcf.frame_type & 7) | ((p->fcf.security_enabled & 1) << 3) | ((p->fcf.frame_pending & 1) << 4) | ((p->fcf.ack_required & 1) << 5) | - ((p->fcf.panid_compression & 1) << 6); - buf[1] = ((p->fcf.dest_addr_mode & 3) << 2) | + ((p->fcf.panid_compression & 1) << 6)); + buf[1] = (uint8_t)(((p->fcf.dest_addr_mode & 3) << 2) | ((p->fcf.frame_version & 3) << 4) | - ((p->fcf.src_addr_mode & 3) << 6); + ((p->fcf.src_addr_mode & 3) << 6)); /* sequence number */ buf[2] = p->seq; @@ -293,12 +293,12 @@ frame802154_create(frame802154_t *p, uint8_t *buf) * \param len The size of the input data * \param pf The frame802154_t struct to store the parsed frame information. */ -int -frame802154_parse(uint8_t *data, int len, frame802154_t *pf, uint8_t **payload) +size_t +frame802154_parse(uint8_t *data, size_t len, frame802154_t *pf, uint8_t **payload) { uint8_t *p; frame802154_fcf_t fcf; - int c; + size_t c; #if LLSEC802154_USES_EXPLICIT_KEYS uint8_t key_id_mode; #endif /* LLSEC802154_USES_EXPLICIT_KEYS */ @@ -328,7 +328,7 @@ frame802154_parse(uint8_t *data, int len, frame802154_t *pf, uint8_t **payload) /* Destination address, if any */ if (fcf.dest_addr_mode) { /* Destination PAN */ - pf->dest_pid = p[0] + (p[1] << 8); + pf->dest_pid = (uint16_t)(p[0] + (p[1] << 8)); p += 2; /* Destination address */ @@ -357,7 +357,7 @@ frame802154_parse(uint8_t *data, int len, frame802154_t *pf, uint8_t **payload) if (fcf.src_addr_mode) { /* Source PAN */ if (!fcf.panid_compression) { - pf->src_pid = p[0] + (p[1] << 8); + pf->src_pid = (uint16_t)(p[0] + (p[1] << 8)); p += 2; } else { pf->src_pid = pf->dest_pid; @@ -412,7 +412,7 @@ frame802154_parse(uint8_t *data, int len, frame802154_t *pf, uint8_t **payload) /* header length */ c = p - data; /* payload length */ - pf->payload_len = (len - c); + pf->payload_len = (int)(len - c); /* payload */ *payload = p; diff --git a/bsd/net/frame802154.h b/bsd/net/frame802154.h index fbdb29cab..b069ef58f 100644 --- a/bsd/net/frame802154.h +++ b/bsd/net/frame802154.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Apple Inc. All rights reserved. + * Copyright (c) 2017-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -95,7 +95,7 @@ #include "contiki-conf.h" #include - +#include #ifdef IEEE802154_CONF_PANID #define IEEE802154_PANID IEEE802154_CONF_PANID #else /* IEEE802154_CONF_PANID */ @@ -220,7 +220,7 @@ typedef struct frame802154 frame802154_t; int frame802154_hdrlen(frame802154_t *p); int frame802154_create(frame802154_t *p, uint8_t *buf); -int frame802154_parse(uint8_t *data, int length, frame802154_t *pf, uint8_t **payload); +size_t frame802154_parse(uint8_t *data, size_t length, frame802154_t *pf, uint8_t **payload); /** @} */ #endif /* FRAME_802154_H */ diff --git a/bsd/net/if.c b/bsd/net/if.c index 308c04b54..6cc232bc6 100644 --- a/bsd/net/if.c +++ b/bsd/net/if.c @@ -109,7 +109,7 @@ #include #include -#if INET || INET6 +#if INET #include #include #include @@ -121,17 +121,11 @@ #include #include #include -#if INET6 #include #include #include #include -#endif /* INET6 */ -#endif /* INET || INET6 */ - -#if CONFIG_MACF_NET -#include -#endif +#endif /* INET */ #include @@ -230,14 +224,12 @@ static decl_lck_mtx_data(, ifma_trash_lock); #define IFMA_ZONE_MAX 64 /* maximum elements in zone */ #define IFMA_ZONE_NAME "ifmultiaddr" /* zone name */ -#if INET6 /* * XXX: declare here to avoid to include many inet6 related files.. * should be more generalized? */ extern void nd6_setmtu(struct ifnet *); extern lck_mtx_t *nd6_mutex; -#endif SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Link layers"); SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW | CTLFLAG_LOCKED, 0, @@ -257,6 +249,44 @@ SYSCTL_INT(_net_link_generic_system, OID_AUTO, default_tcp_kao_max, static const uint32_t default_tcp_kao_max = 0; #endif /* (DEBUG || DEVELOPMENT) */ +u_int32_t companion_link_sock_buffer_limit = 0; + +static int +sysctl_set_companion_link_sock_buf_limit SYSCTL_HANDLER_ARGS +{ +#pragma unused(arg1, arg2) + int error, tmp = companion_link_sock_buffer_limit; + error = sysctl_handle_int(oidp, &tmp, 0, req); + if (tmp < 0) { + return EINVAL; + } + if ((error = priv_check_cred(kauth_cred_get(), + PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { + return error; + } + + u_int32_t new_limit = tmp; + if (new_limit == companion_link_sock_buffer_limit) { + return 0; + } + + bool recover = new_limit == 0 ? true : false; + if (recover) { + error = inp_recover_companion_link(&tcbinfo); + } else { + error = inp_limit_companion_link(&tcbinfo, new_limit); + } + if (!error) { + companion_link_sock_buffer_limit = new_limit; + } + return error; +} + +SYSCTL_PROC(_net_link_generic_system, OID_AUTO, companion_sndbuf_limit, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, + &companion_link_sock_buffer_limit, 0, sysctl_set_companion_link_sock_buf_limit, + "I", "set sock send buffer limit of connections using companion links"); + boolean_t intcoproc_unrestricted; /* Eventhandler context for interface events */ @@ -275,15 +305,7 @@ ifa_init(void) ifma_size = (ifma_debug == 0) ? sizeof(struct ifmultiaddr) : sizeof(struct ifmultiaddr_dbg); - ifma_zone = zinit(ifma_size, IFMA_ZONE_MAX * ifma_size, 0, - IFMA_ZONE_NAME); - if (ifma_zone == NULL) { - panic("%s: failed allocating %s", __func__, IFMA_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(ifma_zone, Z_EXPAND, TRUE); - zone_change(ifma_zone, Z_CALLERACCT, FALSE); - + ifma_zone = zone_create(IFMA_ZONE_NAME, ifma_size, ZC_NONE); lck_mtx_init(&ifma_trash_lock, ifa_mtx_grp, ifa_mtx_attr); TAILQ_INIT(&ifma_trash_head); @@ -722,14 +744,8 @@ if_clone_attach(struct if_clone *ifc) lck_mtx_init(&ifc->ifc_mutex, ifnet_lock_group, ifnet_lock_attr); if (ifc->ifc_softc_size != 0) { - ifc->ifc_zone = zinit(ifc->ifc_softc_size, - ifc->ifc_zone_max_elem * ifc->ifc_softc_size, 0, ifc->ifc_name); - if (ifc->ifc_zone == NULL) { - FREE(ifc->ifc_units, M_CLONE); - return ENOBUFS; - } - zone_change(ifc->ifc_zone, Z_EXPAND, TRUE); - zone_change(ifc->ifc_zone, Z_CALLERACCT, FALSE); + ifc->ifc_zone = zone_create(ifc->ifc_name, ifc->ifc_softc_size, + ZC_DESTRUCTIBLE); } LIST_INSERT_HEAD(&if_cloners, ifc, ifc_list); @@ -874,7 +890,6 @@ ifa_foraddr_scoped(unsigned int addr, unsigned int scope) return ia; } -#if INET6 /* * Similar to ifa_foraddr, except that this for IPv6. */ @@ -904,7 +919,6 @@ ifa_foraddr6_scoped(struct in6_addr *addr6, unsigned int scope) return ia; } -#endif /* INET6 */ /* * Return the first (primary) address of a given family on an interface. @@ -1135,11 +1149,7 @@ ifa_ifwithnet_common(const struct sockaddr *addr, unsigned int ifscope) u_int af = addr->sa_family; const char *addr_data = addr->sa_data, *cplim; -#if INET6 if (af != AF_INET && af != AF_INET6) { -#else - if (af != AF_INET) { -#endif /* !INET6 */ ifscope = IFSCOPE_NONE; } @@ -1418,6 +1428,7 @@ link_rtrequest(int cmd, struct rtentry *rt, struct sockaddr *sa) __private_extern__ void if_updown( struct ifnet *ifp, int up) { + u_int32_t eflags; int i; struct ifaddr **ifa; struct timespec tv; @@ -1439,7 +1450,8 @@ if_updown( struct ifnet *ifp, int up) } /* Indicate that the up/down state is changing */ - ifp->if_eflags |= IFEF_UPDOWNCHANGE; + eflags = if_set_eflags(ifp, IFEF_UPDOWNCHANGE); + ASSERT((eflags & IFEF_UPDOWNCHANGE) == 0); /* Mark interface up or down */ if (up) { @@ -1471,7 +1483,7 @@ if_updown( struct ifnet *ifp, int up) /* Aquire the lock to clear the changing flag */ ifnet_lock_exclusive(ifp); - ifp->if_eflags &= ~IFEF_UPDOWNCHANGE; + if_clear_eflags(ifp, IFEF_UPDOWNCHANGE); wakeup(&ifp->if_eflags); } @@ -1514,7 +1526,7 @@ if_qflush(struct ifnet *ifp, int ifq_locked) } if (IFCQ_IS_ENABLED(ifq)) { - IFCQ_PURGE(ifq); + fq_if_request_classq(ifq, CLASSQRQ_PURGE, NULL); } VERIFY(IFCQ_IS_EMPTY(ifq)); @@ -1530,7 +1542,6 @@ if_qflush_sc(struct ifnet *ifp, mbuf_svc_class_t sc, u_int32_t flow, { struct ifclassq *ifq = &ifp->if_snd; u_int32_t cnt = 0, len = 0; - u_int32_t a_cnt = 0, a_len = 0; VERIFY(sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(sc)); VERIFY(flow != 0); @@ -1540,7 +1551,11 @@ if_qflush_sc(struct ifnet *ifp, mbuf_svc_class_t sc, u_int32_t flow, } if (IFCQ_IS_ENABLED(ifq)) { - IFCQ_PURGE_SC(ifq, sc, flow, cnt, len); + cqrq_purge_sc_t req = { sc, flow, 0, 0 }; + + fq_if_request_classq(ifq, CLASSQRQ_PURGE_SC, &req); + cnt = req.packets; + len = req.bytes; } if (!ifq_locked) { @@ -1548,10 +1563,10 @@ if_qflush_sc(struct ifnet *ifp, mbuf_svc_class_t sc, u_int32_t flow, } if (packets != NULL) { - *packets = cnt + a_cnt; + *packets = cnt; } if (bytes != NULL) { - *bytes = len + a_len; + *bytes = len; } } @@ -2410,6 +2425,33 @@ ifioctl_iforder(u_long cmd, caddr_t data) return error; } +static __attribute__((noinline)) int +ifioctl_networkid(struct ifnet *ifp, caddr_t data) +{ + struct if_netidreq *ifnetidr = (struct if_netidreq *)(void *)data; + int error = 0; + int len = ifnetidr->ifnetid_len; + + VERIFY(ifp != NULL); + + if (len > sizeof(ifnetidr->ifnetid)) { + error = EINVAL; + goto end; + } + + if (len == 0) { + bzero(&ifp->network_id, sizeof(ifp->network_id)); + } else if (len > sizeof(ifp->network_id)) { + error = EINVAL; + goto end; + } + + ifp->network_id_len = len; + bcopy(data, ifp->network_id, len); +end: + return error; +} + static __attribute__((noinline)) int ifioctl_netsignature(struct ifnet *ifp, u_long cmd, caddr_t data) { @@ -2449,7 +2491,6 @@ ifioctl_netsignature(struct ifnet *ifp, u_long cmd, caddr_t data) return error; } -#if INET6 static __attribute__((noinline)) int ifioctl_nat64prefix(struct ifnet *ifp, u_long cmd, caddr_t data) { @@ -2510,7 +2551,6 @@ ifioctl_clat46addr(struct ifnet *ifp, u_long cmd, caddr_t data) return error; } -#endif static int @@ -2625,7 +2665,6 @@ ifioctl_restrict_intcoproc(unsigned long cmd, const char *ifname, case SIOCGIFFLAGS: case SIOCGIFEFLAGS: case SIOCGIFCAP: - case SIOCGIFMAC: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFPHYS: @@ -2861,17 +2900,13 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) error = EOPNOTSUPP; goto done; } - /* FALLTHRU */ + OS_FALLTHROUGH; case SIOCIFCREATE: /* struct ifreq */ case SIOCIFCREATE2: /* struct ifreq */ case SIOCIFDESTROY: /* struct ifreq */ case SIOCGIFFLAGS: /* struct ifreq */ case SIOCGIFEFLAGS: /* struct ifreq */ case SIOCGIFCAP: /* struct ifreq */ -#if CONFIG_MACF_NET - case SIOCGIFMAC: /* struct ifreq */ - case SIOCSIFMAC: /* struct ifreq */ -#endif /* CONFIG_MACF_NET */ case SIOCGIFMETRIC: /* struct ifreq */ case SIOCGIFMTU: /* struct ifreq */ case SIOCGIFPHYS: /* struct ifreq */ @@ -2967,7 +3002,6 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) ifp = ifunit_ref(ifname); break; -#if INET6 case SIOCSIFPHYADDR_IN6_32: /* struct in6_aliasreq_32 */ bcopy(((struct in6_aliasreq_32 *)(void *)data)->ifra_name, ifname, IFNAMSIZ); @@ -2979,7 +3013,6 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) ifname, IFNAMSIZ); ifp = ifunit_ref(ifname); break; -#endif /* INET6 */ case SIOCGIFSTATUS: /* struct ifstat */ ifs = _MALLOC(sizeof(*ifs), M_DEVBUF, M_WAITOK); @@ -3051,6 +3084,11 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) ifp = ifunit_ref(ifname); break; + case SIOCSIFNETWORKID: /* struct if_netidreq */ + bcopy(((struct if_netidreq *)(void *)data)->ifnetid_name, + ifname, IFNAMSIZ); + ifp = ifunit_ref(ifname); + break; case SIOCGIFPROTOLIST32: /* struct if_protolistreq32 */ case SIOCGIFPROTOLIST64: /* struct if_protolistreq64 */ bcopy(((struct if_protolistreq *)(void *)data)->ifpl_name, @@ -3080,10 +3118,8 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) } switch (cmd) { case SIOCSIFPHYADDR: /* struct {if,in_}aliasreq */ -#if INET6 case SIOCSIFPHYADDR_IN6_32: /* struct in6_aliasreq_32 */ case SIOCSIFPHYADDR_IN6_64: /* struct in6_aliasreq_64 */ -#endif /* INET6 */ error = proc_suser(p); if (error != 0) { break; @@ -3144,7 +3180,9 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) error = ifioctl_netsignature(ifp, cmd, data); break; -#if INET6 + case SIOCSIFNETWORKID: /* struct if_netidreq */ + error = ifioctl_networkid(ifp, data); + break; case SIOCSIFNAT64PREFIX: /* struct if_nat64req */ case SIOCGIFNAT64PREFIX: /* struct if_nat64req */ error = ifioctl_nat64prefix(ifp, cmd, data); @@ -3153,7 +3191,6 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) case SIOCGIFCLAT46ADDR: /* struct if_clat46req */ error = ifioctl_clat46addr(ifp, cmd, data); break; -#endif case SIOCGIFPROTOLIST32: /* struct if_protolistreq32 */ case SIOCGIFPROTOLIST64: /* struct if_protolistreq64 */ @@ -3284,16 +3321,6 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) ifnet_lock_done(ifp); break; -#if CONFIG_MACF_NET - case SIOCGIFMAC: - error = mac_ifnet_label_get(kauth_cred_get(), ifr, ifp); - break; - - case SIOCSIFMAC: - error = mac_ifnet_label_set(kauth_cred_get(), ifr, ifp); - break; -#endif /* CONFIG_MACF_NET */ - case SIOCGIFMETRIC: ifnet_lock_shared(ifp); ifr->ifr_metric = ifp->if_metric; @@ -3448,9 +3475,7 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) */ if (ifp->if_mtu != oldmtu) { if_rtmtu_update(ifp); -#if INET6 nd6_setmtu(ifp); -#endif /* INET6 */ /* Inform all transmit queues about the new MTU */ IFCQ_LOCK(ifq); ifnet_update_sndq(ifq, CLASSQ_EV_LINK_MTU); @@ -3478,6 +3503,9 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) error = EINVAL; break; } + if (ifr->ifr_addr.sa_len > sizeof(struct sockaddr)) { + ifr->ifr_addr.sa_len = sizeof(struct sockaddr); + } /* * User is permitted to anonymously join a particular link @@ -3662,14 +3690,13 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { return error; } - ifnet_lock_exclusive(ifp); if (ifr->ifr_expensive) { - ifp->if_eflags |= IFEF_EXPENSIVE; + if_set_eflags(ifp, IFEF_EXPENSIVE); } else { - ifp->if_eflags &= ~IFEF_EXPENSIVE; + if_clear_eflags(ifp, IFEF_EXPENSIVE); } ifnet_increment_generation(ifp); - ifnet_lock_done(ifp); + /* * Update the expensive bit in the delegated interface * structure. @@ -3690,13 +3717,11 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) } case SIOCGIFCONSTRAINED: - ifnet_lock_shared(ifp); - if (ifp->if_xflags & IFXF_CONSTRAINED) { + if ((ifp->if_xflags & IFXF_CONSTRAINED) != 0) { ifr->ifr_constrained = 1; } else { ifr->ifr_constrained = 0; } - ifnet_lock_done(ifp); break; case SIOCSIFCONSTRAINED: @@ -3707,14 +3732,12 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { return error; } - ifnet_lock_exclusive(ifp); if (ifr->ifr_constrained) { - ifp->if_xflags |= IFXF_CONSTRAINED; + if_set_xflags(ifp, IFXF_CONSTRAINED); } else { - ifp->if_xflags &= ~IFXF_CONSTRAINED; + if_clear_xflags(ifp, IFXF_CONSTRAINED); } ifnet_increment_generation(ifp); - ifnet_lock_done(ifp); /* * Update the constrained bit in the delegated interface * structure. @@ -3724,7 +3747,7 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) ifnet_lock_exclusive(difp); if (difp->if_delegated.ifp == ifp) { difp->if_delegated.constrained = - ifp->if_xflags & IFXF_CONSTRAINED ? 1 : 0; + ((ifp->if_xflags & IFXF_CONSTRAINED) != 0) ? 1 : 0; ifnet_increment_generation(difp); } ifnet_lock_done(difp); @@ -3749,13 +3772,11 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { return error; } - ifnet_lock_exclusive(ifp); if (ifr->ifr_2kcl) { - ifp->if_eflags |= IFEF_2KCL; + if_set_eflags(ifp, IFEF_2KCL); } else { - ifp->if_eflags &= ~IFEF_2KCL; + if_clear_eflags(ifp, IFEF_2KCL); } - ifnet_lock_done(ifp); break; case SIOCGSTARTDELAY: ifnet_lock_shared(ifp); @@ -3835,8 +3856,8 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) case SIOCGIFINTERFACESTATE: if_get_state(ifp, &ifr->ifr_interface_state); - break; + case SIOCSIFINTERFACESTATE: if ((error = priv_check_cred(kauth_cred_get(), PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { @@ -3882,13 +3903,13 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) return error; } if (ifr->ifr_ecn_mode == IFRTYPE_ECN_DEFAULT) { - ifp->if_eflags &= ~(IFEF_ECN_ENABLE | IFEF_ECN_DISABLE); + if_clear_eflags(ifp, IFEF_ECN_ENABLE | IFEF_ECN_DISABLE); } else if (ifr->ifr_ecn_mode == IFRTYPE_ECN_ENABLE) { - ifp->if_eflags |= IFEF_ECN_ENABLE; - ifp->if_eflags &= ~IFEF_ECN_DISABLE; + if_set_eflags(ifp, IFEF_ECN_ENABLE); + if_clear_eflags(ifp, IFEF_ECN_DISABLE); } else if (ifr->ifr_ecn_mode == IFRTYPE_ECN_DISABLE) { - ifp->if_eflags |= IFEF_ECN_DISABLE; - ifp->if_eflags &= ~IFEF_ECN_ENABLE; + if_set_eflags(ifp, IFEF_ECN_DISABLE); + if_clear_eflags(ifp, IFEF_ECN_ENABLE); } else { error = EINVAL; } @@ -3901,20 +3922,17 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) break; } - ifnet_lock_exclusive(ifp); if ((cmd == SIOCSIFTIMESTAMPENABLE && (ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0) || (cmd == SIOCSIFTIMESTAMPDISABLE && (ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) == 0)) { - ifnet_lock_done(ifp); break; } if (cmd == SIOCSIFTIMESTAMPENABLE) { - ifp->if_xflags |= IFXF_TIMESTAMP_ENABLED; + if_set_xflags(ifp, IFXF_TIMESTAMP_ENABLED); } else { - ifp->if_xflags &= ~IFXF_TIMESTAMP_ENABLED; + if_clear_xflags(ifp, IFXF_TIMESTAMP_ENABLED); } - ifnet_lock_done(ifp); /* * Pass the setting to the interface if it supports either * software or hardware time stamping @@ -3951,15 +3969,15 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) return error; } if (ifr->ifr_qosmarking_enabled != 0) { - ifp->if_eflags |= IFEF_QOSMARKING_ENABLED; + if_set_eflags(ifp, IFEF_QOSMARKING_ENABLED); } else { - ifp->if_eflags &= ~IFEF_QOSMARKING_ENABLED; + if_clear_eflags(ifp, IFEF_QOSMARKING_ENABLED); } break; case SIOCGQOSMARKINGENABLED: ifr->ifr_qosmarking_enabled = - (ifp->if_eflags & IFEF_QOSMARKING_ENABLED) ? 1 : 0; + ((ifp->if_eflags & IFEF_QOSMARKING_ENABLED) != 0) ? 1 : 0; break; case SIOCSIFDISABLEOUTPUT: @@ -3990,27 +4008,25 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) return error; } - ifnet_lock_exclusive(ifp); if (ifr->ifr_low_internet & IFRTYPE_LOW_INTERNET_ENABLE_UL) { - ifp->if_xflags |= IFXF_LOW_INTERNET_UL; + if_set_xflags(ifp, IFXF_LOW_INTERNET_UL); } else { - ifp->if_xflags &= ~(IFXF_LOW_INTERNET_UL); + if_clear_xflags(ifp, IFXF_LOW_INTERNET_UL); } if (ifr->ifr_low_internet & IFRTYPE_LOW_INTERNET_ENABLE_DL) { - ifp->if_xflags |= IFXF_LOW_INTERNET_DL; + if_set_xflags(ifp, IFXF_LOW_INTERNET_DL); } else { - ifp->if_xflags &= ~(IFXF_LOW_INTERNET_DL); + if_clear_xflags(ifp, IFXF_LOW_INTERNET_DL); } - ifnet_lock_done(ifp); break; case SIOCGIFLOWINTERNET: ifnet_lock_shared(ifp); ifr->ifr_low_internet = 0; - if (ifp->if_xflags & IFXF_LOW_INTERNET_UL) { + if ((ifp->if_xflags & IFXF_LOW_INTERNET_UL) != 0) { ifr->ifr_low_internet |= IFRTYPE_LOW_INTERNET_ENABLE_UL; } - if (ifp->if_xflags & IFXF_LOW_INTERNET_DL) { + if ((ifp->if_xflags & IFXF_LOW_INTERNET_DL) != 0) { ifr->ifr_low_internet |= IFRTYPE_LOW_INTERNET_ENABLE_DL; } @@ -4018,34 +4034,32 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) break; case SIOCGIFLOWPOWER: ifr->ifr_low_power_mode = - !!(ifp->if_xflags & IFXF_LOW_POWER); + ((ifp->if_xflags & IFXF_LOW_POWER) != 0); break; case SIOCSIFLOWPOWER: #if (DEVELOPMENT || DEBUG) - error = if_set_low_power(ifp, !!(ifr->ifr_low_power_mode)); + error = if_set_low_power(ifp, (ifr->ifr_low_power_mode != 0)); #else /* DEVELOPMENT || DEBUG */ error = EOPNOTSUPP; #endif /* DEVELOPMENT || DEBUG */ break; case SIOCGIFMPKLOG: - ifr->ifr_mpk_log = !!(ifp->if_xflags & IFXF_MPK_LOG); + ifr->ifr_mpk_log = ((ifp->if_xflags & IFXF_MPK_LOG) != 0); break; case SIOCSIFMPKLOG: if (ifr->ifr_mpk_log) { - ifp->if_xflags |= IFXF_MPK_LOG; + if_set_xflags(ifp, IFXF_MPK_LOG); } else { - ifp->if_xflags &= ~IFXF_MPK_LOG; + if_clear_xflags(ifp, IFXF_MPK_LOG); } break; case SIOCGIFNOACKPRIO: - ifnet_lock_shared(ifp); - if (ifp->if_eflags & IFEF_NOACKPRI) { + if ((ifp->if_eflags & IFEF_NOACKPRI) != 0) { ifr->ifr_noack_prio = 1; } else { ifr->ifr_noack_prio = 0; } - ifnet_lock_done(ifp); break; case SIOCSIFNOACKPRIO: @@ -4053,13 +4067,11 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { return error; } - ifnet_lock_exclusive(ifp); if (ifr->ifr_noack_prio) { - ifp->if_eflags |= IFEF_NOACKPRI; + if_set_eflags(ifp, IFEF_NOACKPRI); } else { - ifp->if_eflags &= ~IFEF_NOACKPRI; + if_clear_eflags(ifp, IFEF_NOACKPRI); } - ifnet_lock_done(ifp); break; default: @@ -5477,7 +5489,6 @@ ifioctl_cassert(void) case SIOCGPPPSTATS: case SIOCGPPPCSTATS: -#if INET6 /* bsd/netinet6/in6_var.h */ case SIOCSIFADDR_IN6: case SIOCGIFADDR_IN6: @@ -5531,11 +5542,11 @@ ifioctl_cassert(void) case SIOCAUTOCONF_START: case SIOCAUTOCONF_STOP: case SIOCSETROUTERMODE_IN6: + case SIOCGETROUTERMODE_IN6: case SIOCLL_CGASTART_32: case SIOCLL_CGASTART_64: case SIOCGIFCGAPREP_IN6: case SIOCSIFCGAPREP_IN6: -#endif /* INET6 */ /* bsd/sys/sockio.h */ case SIOCSIFADDR: @@ -5616,10 +5627,6 @@ ifioctl_cassert(void) case SIOCGIFASYNCMAP: case SIOCSIFASYNCMAP: -#if CONFIG_MACF_NET - case SIOCGIFMAC: - case SIOCSIFMAC: -#endif /* CONFIG_MACF_NET */ case SIOCSIFKPI: case SIOCGIFKPI: @@ -5629,6 +5636,7 @@ ifioctl_cassert(void) case SIOCGIFLINKQUALITYMETRIC: case SIOCSIFOPPORTUNISTIC: case SIOCGIFOPPORTUNISTIC: + case SIOCGETROUTERMODE: case SIOCSETROUTERMODE: case SIOCGIFEFLAGS: case SIOCSIFDESC: @@ -5675,6 +5683,7 @@ ifioctl_cassert(void) case SIOCSIFNETSIGNATURE: case SIOCGIFNETSIGNATURE: + case SIOCSIFNETWORKID: case SIOCGECNMODE: case SIOCSECNMODE: @@ -5699,12 +5708,10 @@ ifioctl_cassert(void) case SIOCSIFLOWINTERNET: case SIOCGIFLOWINTERNET: -#if INET6 case SIOCGIFNAT64PREFIX: case SIOCSIFNAT64PREFIX: case SIOCGIFCLAT46ADDR: -#endif /* INET6 */ case SIOCGIFPROTOLIST32: case SIOCGIFPROTOLIST64: diff --git a/bsd/net/if.h b/bsd/net/if.h index 0e516a42e..18cabd3d4 100644 --- a/bsd/net/if.h +++ b/bsd/net/if.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -153,7 +153,6 @@ struct if_clonereq32 { #define IFEF_NOAUTOIPV6LL 0x00002000 /* Need explicit IPv6 LL address */ #define IFEF_EXPENSIVE 0x00004000 /* Data access has a cost */ #define IFEF_IPV4_ROUTER 0x00008000 /* interior when in IPv4 router mode */ -#define IFEF_IPV6_ROUTER 0x00010000 /* interior when in IPv6 router mode */ #define IFEF_LOCALNET_PRIVATE 0x00020000 /* local private network */ #define IFEF_SERVICE_TRIGGERED IFEF_LOCALNET_PRIVATE #define IFEF_IPV6_ND6ALT 0x00040000 /* alternative. KPI for ND6 */ @@ -168,7 +167,7 @@ struct if_clonereq32 { #define IFEF_3CA 0x08000000 /* Capable of 3CA */ #define IFEF_SENDLIST 0x10000000 /* Supports tx packet lists */ #define IFEF_DIRECTLINK 0x20000000 /* point-to-point topology */ -#define IFEF_QOSMARKING_ENABLED 0x40000000 /* OoS marking is enabled */ +#define IFEF_QOSMARKING_ENABLED 0x40000000 /* QoS marking is enabled */ #define IFEF_UPDOWNCHANGE 0x80000000 /* up/down state is changing */ #ifdef XNU_KERNEL_PRIVATE @@ -982,6 +981,12 @@ struct if_nsreq { u_int8_t ifnsr_data[IFNET_SIGNATURELEN]; }; +/* Structure for SIOCSIFNETWORKID */ +struct if_netidreq { + char ifnetid_name[IFNAMSIZ]; + u_int8_t ifnetid_len; + u_int8_t ifnetid[IFNET_NETWORK_ID_LEN]; +}; #define NAT64_PREFIX_LEN_32 4 #define NAT64_PREFIX_LEN_40 5 diff --git a/bsd/net/if_6lowpan.c b/bsd/net/if_6lowpan.c index 92eebb0fa..2d6246d31 100644 --- a/bsd/net/if_6lowpan.c +++ b/bsd/net/if_6lowpan.c @@ -403,7 +403,7 @@ sixlowpan_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *para if_epraram.len = sizeof(if_epraram); if_epraram.flags = IFNET_INIT_LEGACY; if_epraram.uniqueid = ifl->if6lpan_name; - if_epraram.uniqueid_len = strlen(ifl->if6lpan_name); + if_epraram.uniqueid_len = (uint32_t)strlen(ifl->if6lpan_name); if_epraram.name = ifc->ifc_name; if_epraram.unit = unit; if_epraram.family = IFNET_FAMILY_6LOWPAN; @@ -633,6 +633,10 @@ sixlowpan_input(ifnet_t p, __unused protocol_family_t protocol, p = p_6lowpan_ifnet; mc->m_pkthdr.rcvif = p; + if (len > mc->m_pkthdr.len) { + err = -1; + goto err_out; + } sixlowpan_lock(); ifl = ifnet_get_if6lpan_retained(p); @@ -661,9 +665,16 @@ sixlowpan_input(ifnet_t p, __unused protocol_family_t protocol, /* Parse the 802.15.4 frame header */ bzero(&ieee02154hdr, sizeof(ieee02154hdr)); frame802154_parse(mtod(mc, uint8_t *), len, &ieee02154hdr, &payload); + if (payload == NULL) { + err = -1; + goto err_out; + } /* XXX Add check for your link layer address being dest */ - sixxlowpan_input(&ieee02154hdr, payload); + if (sixxlowpan_input(&ieee02154hdr, payload) != 0) { + err = -1; + goto err_out; + } if (mbuf_setdata(mc, payload, ieee02154hdr.payload_len)) { err = -1; @@ -764,8 +775,9 @@ sixlowpan_ioctl(ifnet_t ifp, u_long cmd, void * data) break; case SIOCSIF6LOWPAN: - user_addr = proc_is64bit(current_proc()) - ? ifr->ifr_data64 : CAST_USER_ADDR_T(ifr->ifr_data); + user_addr = proc_is64bit(current_proc()) ? + CAST_USER_ADDR_T(ifr->ifr_data64) : + CAST_USER_ADDR_T(ifr->ifr_data); error = copyin(user_addr, &req, sizeof(req)); req.parent[IFNAMSIZ - 1] = '\0'; if (error) { @@ -803,8 +815,9 @@ sixlowpan_ioctl(ifnet_t ifp, u_long cmd, void * data) snprintf(req.parent, sizeof(req.parent), "%s%d", ifnet_name(p), ifnet_unit(p)); } - user_addr = proc_is64bit(current_proc()) - ? ifr->ifr_data64 : CAST_USER_ADDR_T(ifr->ifr_data); + user_addr = proc_is64bit(current_proc()) ? + CAST_USER_ADDR_T(ifr->ifr_data64) : + CAST_USER_ADDR_T(ifr->ifr_data); error = copyout(&req, user_addr, sizeof(req)); break; @@ -941,9 +954,9 @@ sixlowpan_framer_extended(struct ifnet *ifp, struct mbuf **m, int buflen = 0, err = 0; frame802154_t ieee02154hdr; if6lpan_ref ifl = NULL; - u_int8_t *payload = NULL; + uint8_t *payload = NULL; struct mbuf *mc = NULL; - u_int16_t len; + uint16_t len; struct sockaddr_in6 *dest6 = (struct sockaddr_in6 *)(uintptr_t)(size_t)ndest; /* Initialize 802.15.4 frame header */ @@ -1020,7 +1033,7 @@ sixlowpan_framer_extended(struct ifnet *ifp, struct mbuf **m, * Add 2 bytes at the front of the frame indicating the total payload * length */ - len = htons(buflen + ieee02154hdr.payload_len); + len = htons((uint16_t)(buflen + ieee02154hdr.payload_len)); m_copyback(mc, 0, sizeof(len), &len); /* Copy back the 802.15.4 Data frame header into mbuf */ m_copyback(mc, sizeof(len), buflen, buf); @@ -1067,7 +1080,6 @@ sixlowpan_detach_inet6(struct ifnet *ifp, protocol_family_t protocol_family) (void) ifnet_detach_protocol(ifp, protocol_family); } -#if INET6 __private_extern__ int sixlowpan_family_init(void) { @@ -1092,4 +1104,3 @@ sixlowpan_family_init(void) done: return error; } -#endif diff --git a/bsd/net/if_bond.c b/bsd/net/if_bond.c index 754923927..ac2115b13 100644 --- a/bsd/net/if_bond.c +++ b/bsd/net/if_bond.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Apple Inc. All rights reserved. + * Copyright (c) 2004-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -2056,6 +2056,7 @@ bond_device_mtu(struct ifnet * ifp, ifbond_ref ifb) static int bond_add_interface(struct ifnet * ifp, struct ifnet * port_ifp) { + u_int32_t eflags; uint32_t control_flags = 0; int devmtu; int error = 0; @@ -2111,18 +2112,20 @@ bond_add_interface(struct ifnet * ifp, struct ifnet * port_ifp) error = EBUSY; goto signal_done; } - ifnet_lock_exclusive(port_ifp); if ((ifnet_eflags(port_ifp) & (IFEF_VLAN | IFEF_BOND)) != 0) { /* interface already has VLAN's, or is part of bond */ - ifnet_lock_done(port_ifp); error = EBUSY; goto signal_done; } /* mark the interface busy */ - /* can't use ifnet_set_eflags because that takes the lock */ - port_ifp->if_eflags |= IFEF_BOND; - ifnet_lock_done(port_ifp); + eflags = if_set_eflags(port_ifp, IFEF_BOND); + if ((eflags & IFEF_VLAN) != 0) { + /* vlan got in ahead of us */ + if_clear_eflags(port_ifp, IFEF_BOND); + error = EBUSY; + goto signal_done; + } if (TAILQ_EMPTY(&ifb->ifb_port_list)) { ifnet_set_offload(ifp, ifnet_offload(port_ifp)); @@ -2356,7 +2359,7 @@ failed: TAILQ_REMOVE(&ifb->ifb_port_list, p, po_port_list); ifb->ifb_port_count--; } - ifnet_set_eflags(ifp, 0, IFEF_BOND); + if_clear_eflags(ifp, IFEF_BOND); if (TAILQ_EMPTY(&ifb->ifb_port_list)) { ifb->ifb_altmtu = 0; ifnet_set_mtu(ifp, ETHERMTU); @@ -2532,7 +2535,7 @@ bond_remove_interface(ifbond_ref ifb, struct ifnet * port_ifp) bond_lock(); bondport_free(p); - ifnet_set_eflags(port_ifp, 0, IFEF_BOND); + if_clear_eflags(port_ifp, IFEF_BOND); /* release this bondport's reference to the ifbond */ ifbond_release(ifb); @@ -3193,23 +3196,24 @@ bond_iff_detached(__unused void *cookie, ifnet_t port_ifp) static void interface_link_event(struct ifnet * ifp, u_int32_t event_code) { - struct { - struct kern_event_msg header; - u_int32_t unit; - char if_name[IFNAMSIZ]; - } event; - - bzero(&event, sizeof(event)); - event.header.total_size = sizeof(event); - event.header.vendor_code = KEV_VENDOR_APPLE; - event.header.kev_class = KEV_NETWORK_CLASS; - event.header.kev_subclass = KEV_DL_SUBCLASS; - event.header.event_code = event_code; - event.header.event_data[0] = ifnet_family(ifp); - event.unit = (u_int32_t) ifnet_unit(ifp); - strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ); - ifnet_event(ifp, &event.header); - return; + struct event { + u_int32_t ifnet_family; + u_int32_t unit; + char if_name[IFNAMSIZ]; + }; + _Alignas(struct kern_event_msg) char message[sizeof(struct kern_event_msg) + sizeof(struct event)] = { 0 }; + struct kern_event_msg *header = (struct kern_event_msg*)message; + struct event *data = (struct event *)(header + 1); + + header->total_size = sizeof(message); + header->vendor_code = KEV_VENDOR_APPLE; + header->kev_class = KEV_NETWORK_CLASS; + header->kev_subclass = KEV_DL_SUBCLASS; + header->event_code = event_code; + data->ifnet_family = ifnet_family(ifp); + data->unit = (u_int32_t)ifnet_unit(ifp); + strlcpy(data->if_name, ifnet_name(ifp), IFNAMSIZ); + ifnet_event(ifp, header); } static errno_t @@ -3320,7 +3324,6 @@ bond_family_init(void) error); goto done; } -#if INET6 error = proto_register_plumber(PF_INET6, APPLE_IF_FAM_BOND, ether_attach_inet6, ether_detach_inet6); @@ -3329,7 +3332,6 @@ bond_family_init(void) error); goto done; } -#endif error = bond_clone_attach(); if (error != 0) { printf("bond: proto_register_plumber failed bond_clone_attach error=%d\n", @@ -4267,7 +4269,7 @@ bondport_receive_machine_port_disabled(bondport_ref p, LAEvent event, p->po_receive_state = ReceiveState_PORT_DISABLED; ps = &p->po_partner_state; ps->ps_state = lacp_actor_partner_state_set_out_of_sync(ps->ps_state); - /* FALL THROUGH */ + OS_FALLTHROUGH; case LAEventMediaChange: if (media_active(&p->po_media_info)) { if (media_ok(&p->po_media_info)) { @@ -4461,7 +4463,7 @@ bondport_periodic_transmit_machine(bondport_ref p, LAEvent event, timestamp_printf("[%s] periodic_transmit Start\n", bondport_get_name(p)); } - /* FALL THROUGH */ + OS_FALLTHROUGH; case LAEventMediaChange: devtimer_cancel(p->po_periodic_timer); p->po_periodic_interval = 0; @@ -4469,6 +4471,7 @@ bondport_periodic_transmit_machine(bondport_ref p, LAEvent event, || media_ok(&p->po_media_info) == 0) { break; } + OS_FALLTHROUGH; case LAEventPacket: /* Neither Partner nor Actor are LACP Active, no periodic tx */ ps = &p->po_partner_state; @@ -4743,7 +4746,7 @@ bondport_mux_machine_waiting(bondport_ref p, LAEvent event, bondport_get_name(p)); } p->po_mux_state = MuxState_WAITING; - /* FALL THROUGH */ + OS_FALLTHROUGH; default: case LAEventSelectedChange: if (p->po_selected == SelectedState_UNSELECTED) { @@ -4837,7 +4840,7 @@ bondport_mux_machine_attached(bondport_ref p, LAEvent event, bondport_disable_distributing(p); p->po_actor_state = s; bondport_flags_set_ntt(p); - /* FALL THROUGH */ + OS_FALLTHROUGH; default: switch (p->po_selected) { case SelectedState_SELECTED: @@ -4877,7 +4880,7 @@ bondport_mux_machine_collecting_distributing(bondport_ref p, s = lacp_actor_partner_state_set_distributing(s); p->po_actor_state = s; bondport_flags_set_ntt(p); - /* FALL THROUGH */ + OS_FALLTHROUGH; default: s = p->po_partner_state.ps_state; if (lacp_actor_partner_state_in_sync(s) == 0) { diff --git a/bsd/net/if_bridge.c b/bsd/net/if_bridge.c index e72c0d57c..bb031fc3b 100644 --- a/bsd/net/if_bridge.c +++ b/bsd/net/if_bridge.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Apple Inc. All rights reserved. + * Copyright (c) 2004-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -143,15 +143,14 @@ #include #include /* for struct arpcom */ +#include /* for struct tcphdr */ #include #include #define _IP_VHL #include #include -#if INET6 #include #include -#endif #ifdef DEV_CARP #include #endif @@ -168,10 +167,6 @@ #include #include -#ifdef PFIL_HOOKS -#include -#include -#endif /* PFIL_HOOKS */ #include #include @@ -190,6 +185,7 @@ #define BR_DBGF_HOSTFILTER 0x0100 #define BR_DBGF_CHECKSUM 0x0200 #define BR_DBGF_MAC_NAT 0x0400 +#define BR_DBGF_SEGMENTATION 0x0800 #endif /* BRIDGE_DEBUG */ #define _BRIDGE_LOCK(_sc) lck_mtx_lock(&(_sc)->sc_mtx) @@ -304,7 +300,7 @@ /* * List of capabilities to possibly mask on the member interface. */ -#define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM) +#define BRIDGE_IFCAPS_MASK (IFCAP_TSO | IFCAP_TXCSUM) /* * List of capabilities to disable on the member interface. */ @@ -327,6 +323,7 @@ struct bridge_iflist { struct bridge_softc *bif_sc; uint32_t bif_flags; + /* host filter */ struct in_addr bif_hf_ipsrc; uint8_t bif_hf_hwsrc[ETHER_ADDR_LEN]; }; @@ -498,8 +495,10 @@ decl_lck_mtx_data(static, bridge_list_mtx); static int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; -static zone_t bridge_rtnode_pool = NULL; -static zone_t bridge_mne_pool = NULL; +static ZONE_DECLARE(bridge_rtnode_pool, "bridge_rtnode", + sizeof(struct bridge_rtnode), ZC_NONE); +static ZONE_DECLARE(bridge_mne_pool, "bridge_mac_nat_entry", + sizeof(struct mac_nat_entry), ZC_NONE); static int bridge_clone_create(struct if_clone *, uint32_t, void *); static int bridge_clone_destroy(struct ifnet *); @@ -624,18 +623,10 @@ static int bridge_ioctl_ghostfilter(struct bridge_softc *, void *); static int bridge_ioctl_shostfilter(struct bridge_softc *, void *); static int bridge_ioctl_gmnelist32(struct bridge_softc *, void *); static int bridge_ioctl_gmnelist64(struct bridge_softc *, void *); -#ifdef PFIL_HOOKS -static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *, - int); -static int bridge_fragment(struct ifnet *, struct mbuf *, - struct ether_header *, int, struct llc *); -#endif /* PFIL_HOOKS */ -static int bridge_ip_checkbasic(struct mbuf **); -#ifdef INET6 -static int bridge_ip6_checkbasic(struct mbuf **); -#endif /* INET6 */ static int bridge_pf(struct mbuf **, struct ifnet *, uint32_t sc_filter_flags, int input); +static int bridge_ip_checkbasic(struct mbuf **); +static int bridge_ip6_checkbasic(struct mbuf **); static errno_t bridge_set_bpf_tap(ifnet_t, bpf_tap_mode, bpf_packet_func); static errno_t bridge_bpf_input(ifnet_t, struct mbuf *, const char *, int); @@ -664,9 +655,18 @@ static boolean_t bridge_mac_nat_output(struct bridge_softc *, struct bridge_iflist *, mbuf_t *, struct mac_nat_record *); static void bridge_mac_nat_translate(mbuf_t *, struct mac_nat_record *, const caddr_t); +static boolean_t is_broadcast_ip_packet(mbuf_t *); #define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how) +static int +gso_ipv4_tcp(struct ifnet *ifp, struct mbuf **mp, u_int mac_hlen, + boolean_t is_tx); + +static int +gso_ipv6_tcp(struct ifnet *ifp, struct mbuf **mp, u_int mac_hlen, + boolean_t is_tx); + /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */ #define VLANTAGOF(_m) 0 @@ -716,26 +716,6 @@ SYSCTL_STRUCT(_net_link_bridge, OID_AUTO, hostfilterstats, CTLFLAG_RD | CTLFLAG_LOCKED, &bridge_hostfilter_stats, bridge_hostfilter_stats, ""); -#if defined(PFIL_HOOKS) -static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */ -static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */ -static int pfil_member = 1; /* run pfil hooks on the member interface */ -static int pfil_ipfw = 0; /* layer2 filter with ipfw */ -static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */ -static int pfil_local_phys = 0; /* run pfil hooks on the physical interface */ - /* for locally destined packets */ -SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW | CTLFLAG_LOCKED, - &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled"); -SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW | CTLFLAG_LOCKED, - &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2"); -SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW | CTLFLAG_LOCKED, - &pfil_bridge, 0, "Packet filter on the bridge interface"); -SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW | CTLFLAG_LOCKED, - &pfil_member, 0, "Packet filter on the member interface"); -SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, - CTLFLAG_RW | CTLFLAG_LOCKED, &pfil_local_phys, 0, - "Packet filter on the physical interface for locally destined packets"); -#endif /* PFIL_HOOKS */ #if BRIDGESTP static int log_stp = 0; /* log STP state changes */ @@ -981,6 +961,11 @@ static int if_bridge_debug = 0; SYSCTL_INT(_net_link_bridge, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &if_bridge_debug, 0, "Bridge debug"); +static int if_bridge_segmentation = 1; +SYSCTL_INT(_net_link_bridge, OID_AUTO, segmentation, + CTLFLAG_RW | CTLFLAG_LOCKED, + &if_bridge_segmentation, 0, "Bridge interface enable segmentation"); + static void printf_ether_header(struct ether_header *); static void printf_mbuf_data(mbuf_t, size_t, size_t); static void printf_mbuf_pkthdr(mbuf_t, const char *, const char *); @@ -1223,14 +1208,6 @@ bridgeattach(int n) int error; lck_grp_attr_t *lck_grp_attr = NULL; - bridge_rtnode_pool = zinit(sizeof(struct bridge_rtnode), - 1024 * sizeof(struct bridge_rtnode), 0, "bridge_rtnode"); - zone_change(bridge_rtnode_pool, Z_CALLERACCT, FALSE); - - bridge_mne_pool = zinit(sizeof(struct mac_nat_entry), - 256 * sizeof(struct mac_nat_entry), 0, "bridge_mac_nat_entry"); - zone_change(bridge_mne_pool, Z_CALLERACCT, FALSE); - lck_grp_attr = lck_grp_attr_alloc_init(); bridge_lock_grp = lck_grp_alloc_init("if_bridge", lck_grp_attr); @@ -1260,42 +1237,6 @@ bridgeattach(int n) return error; } -#if defined(PFIL_HOOKS) -/* - * handler for net.link.bridge.pfil_ipfw - */ -static int -sysctl_pfil_ipfw SYSCTL_HANDLER_ARGS -{ -#pragma unused(arg1, arg2) - int enable = pfil_ipfw; - int error; - - error = sysctl_handle_int(oidp, &enable, 0, req); - enable = (enable) ? 1 : 0; - - if (enable != pfil_ipfw) { - pfil_ipfw = enable; - - /* - * Disable pfil so that ipfw doesnt run twice, if the user - * really wants both then they can re-enable pfil_bridge and/or - * pfil_member. Also allow non-ip packets as ipfw can filter by - * layer2 type. - */ - if (pfil_ipfw) { - pfil_onlyip = 0; - pfil_bridge = 0; - pfil_member = 0; - } - } - - return error; -} - -SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT | CTLFLAG_RW, - &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW"); -#endif /* PFIL_HOOKS */ static errno_t bridge_ifnet_set_attrs(struct ifnet * ifp) @@ -2481,8 +2422,8 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg) /* XXX is there a better way to identify Wi-Fi STA? */ mac_nat = TRUE; } + break; case IFT_L2VLAN: - /* permitted interface types */ break; case IFT_GIF: /* currently not supported */ @@ -3787,6 +3728,7 @@ bridge_iflinkevent(struct ifnet *ifp) struct bridge_softc *sc = ifp->if_bridge; struct bridge_iflist *bif; u_int32_t event_code = 0; + int media_active; #if BRIDGE_DEBUG if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) { @@ -3799,10 +3741,11 @@ bridge_iflinkevent(struct ifnet *ifp) return; } + media_active = interface_media_active(ifp); BRIDGE_LOCK(sc); bif = bridge_lookup_member_if(sc, ifp); if (bif != NULL) { - if (interface_media_active(ifp)) { + if (media_active) { bif->bif_flags |= BIFF_MEDIA_ACTIVE; } else { bif->bif_flags &= ~BIFF_MEDIA_ACTIVE; @@ -4100,11 +4043,9 @@ bridge_compute_cksum(struct ifnet *src_if, struct ifnet *dst_if, struct mbuf *m) case ETHERTYPE_IP: did_sw = in_finalize_cksum(m, sizeof(*eh), csum_flags); break; -#if INET6 case ETHERTYPE_IPV6: did_sw = in6_finalize_cksum(m, sizeof(*eh), -1, -1, csum_flags); break; -#endif /* INET6 */ } #if BRIDGE_DEBUG if (IF_BRIDGE_DEBUG(BR_DBGF_CHECKSUM)) { @@ -4116,6 +4057,118 @@ bridge_compute_cksum(struct ifnet *src_if, struct ifnet *dst_if, struct mbuf *m) #endif /* BRIDGE_DEBUG */ } +static int +bridge_transmit(struct ifnet * ifp, struct mbuf *m) +{ + struct flowadv adv = { .code = FADV_SUCCESS }; + errno_t error; + + error = dlil_output(ifp, 0, m, NULL, NULL, 1, &adv); + if (error == 0) { + if (adv.code == FADV_FLOW_CONTROLLED) { + error = EQFULL; + } else if (adv.code == FADV_SUSPENDED) { + error = EQSUSPENDED; + } + } + return error; +} + +static int +bridge_send(struct ifnet *src_ifp, + struct ifnet *dst_ifp, struct mbuf *m, ChecksumOperation cksum_op) +{ + switch (cksum_op) { + case kChecksumOperationClear: + m->m_pkthdr.csum_flags = 0; + break; + case kChecksumOperationFinalize: + /* the checksum might not be correct, finalize now */ + bridge_finalize_cksum(dst_ifp, m); + break; + case kChecksumOperationCompute: + bridge_compute_cksum(src_ifp, dst_ifp, m); + break; + default: + break; + } +#if HAS_IF_CAP + /* + * If underlying interface can not do VLAN tag insertion itself + * then attach a packet tag that holds it. + */ + if ((m->m_flags & M_VLANTAG) && + (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { + m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); + if (m == NULL) { + printf("%s: %s: unable to prepend VLAN " + "header\n", __func__, dst_ifp->if_xname); + (void) ifnet_stat_increment_out(dst_ifp, + 0, 0, 1); + return 0; + } + m->m_flags &= ~M_VLANTAG; + } +#endif /* HAS_IF_CAP */ + return bridge_transmit(dst_ifp, m); +} + +static int +bridge_send_tso(struct ifnet *dst_ifp, struct mbuf *m) +{ + struct ether_header *eh; + uint16_t ether_type; + errno_t error; + boolean_t is_ipv4; + u_int mac_hlen; + + eh = mtod(m, struct ether_header *); + ether_type = ntohs(eh->ether_type); + switch (ether_type) { + case ETHERTYPE_IP: + is_ipv4 = TRUE; + break; + case ETHERTYPE_IPV6: + is_ipv4 = FALSE; + break; + default: + printf("%s: large non IPv4/IPv6 packet\n", __func__); + m_freem(m); + error = EINVAL; + goto done; + } + mac_hlen = sizeof(*eh); + +#if HAS_IF_CAP + /* + * If underlying interface can not do VLAN tag insertion itself + * then attach a packet tag that holds it. + */ + if ((m->m_flags & M_VLANTAG) && + (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { + m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); + if (m == NULL) { + printf("%s: %s: unable to prepend VLAN " + "header\n", __func__, dst_ifp->if_xname); + (void) ifnet_stat_increment_out(dst_ifp, + 0, 0, 1); + error = ENOBUFS; + goto done; + } + m->m_flags &= ~M_VLANTAG; + mac_hlen += ETHER_VLAN_ENCAP_LEN; + } +#endif /* HAS_IF_CAP */ + if (is_ipv4) { + error = gso_ipv4_tcp(dst_ifp, &m, mac_hlen, TRUE); + } else { + error = gso_ipv6_tcp(dst_ifp, &m, mac_hlen, TRUE); + } + +done: + return error; +} + /* * bridge_enqueue: * @@ -4126,8 +4179,8 @@ static int bridge_enqueue(ifnet_t bridge_ifp, struct ifnet *src_ifp, struct ifnet *dst_ifp, struct mbuf *m, ChecksumOperation cksum_op) { - int len, error = 0; - struct mbuf *next_m; + errno_t error = 0; + int len; VERIFY(dst_ifp != NULL); @@ -4136,62 +4189,28 @@ bridge_enqueue(ifnet_t bridge_ifp, struct ifnet *src_ifp, * * NOTE: bridge_fragment() is called only when PFIL_HOOKS is enabled. */ - for (; m; m = next_m) { + for (struct mbuf *next_m = NULL; m != NULL; m = next_m) { errno_t _error; - struct flowadv adv = { .code = FADV_SUCCESS }; - - next_m = m->m_nextpkt; - m->m_nextpkt = NULL; len = m->m_pkthdr.len; m->m_flags |= M_PROTO1; /* set to avoid loops */ - - switch (cksum_op) { - case kChecksumOperationClear: - m->m_pkthdr.csum_flags = 0; - break; - case kChecksumOperationFinalize: - /* the checksum might not be correct, finalize now */ - bridge_finalize_cksum(dst_ifp, m); - break; - case kChecksumOperationCompute: - bridge_compute_cksum(src_ifp, dst_ifp, m); - break; - default: - break; - } -#if HAS_IF_CAP + next_m = m->m_nextpkt; + m->m_nextpkt = NULL; /* - * If underlying interface can not do VLAN tag insertion itself - * then attach a packet tag that holds it. + * need to segment the packet if it is a large frame + * and the destination interface does not support TSO */ - if ((m->m_flags & M_VLANTAG) && - (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { - m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); - if (m == NULL) { - printf("%s: %s: unable to prepend VLAN " - "header\n", __func__, dst_ifp->if_xname); - (void) ifnet_stat_increment_out(dst_ifp, - 0, 0, 1); - continue; - } - m->m_flags &= ~M_VLANTAG; + if (if_bridge_segmentation != 0 && + len > (bridge_ifp->if_mtu + ETHER_HDR_LEN) && + (dst_ifp->if_capabilities & IFCAP_TSO) != IFCAP_TSO) { + _error = bridge_send_tso(dst_ifp, m); + } else { + _error = bridge_send(src_ifp, dst_ifp, m, cksum_op); } -#endif /* HAS_IF_CAP */ - - _error = dlil_output(dst_ifp, 0, m, NULL, NULL, 1, &adv); - - /* Preserve existing error value */ - if (error == 0) { - if (_error != 0) { - error = _error; - } else if (adv.code == FADV_FLOW_CONTROLLED) { - error = EQFULL; - } else if (adv.code == FADV_SUSPENDED) { - error = EQSUSPENDED; - } + /* Preserve first error value */ + if (error == 0 && _error != 0) { + error = _error; } - if (_error == 0) { (void) ifnet_stat_increment_out(bridge_ifp, 1, len, 0); } else { @@ -4238,6 +4257,7 @@ bridge_dummynet(struct mbuf *m, struct ifnet *ifp) } (void) bridge_enqueue(sc->sc_ifp, NULL, ifp, m, kChecksumOperationNone); } + #endif /* HAS_BRIDGE_DUMMYNET */ /* @@ -4343,9 +4363,11 @@ bridge_member_output(struct bridge_softc *sc, ifnet_t ifp, mbuf_t *data) } dst_if = bif->bif_ifp; +#if 0 if (dst_if->if_type == IFT_GIF) { continue; } +#endif if ((dst_if->if_flags & IFF_RUNNING) == 0) { continue; } @@ -4506,7 +4528,6 @@ bridge_finalize_cksum(struct ifnet *ifp, struct mbuf *m) (void) in_finalize_cksum(m, sizeof(*eh), sw_csum); break; -#if INET6 case ETHERTYPE_IPV6: if ((hwcap & CSUM_PARTIAL) && !(sw_csum & CSUM_DELAY_IPV6_DATA) && @@ -4527,7 +4548,6 @@ bridge_finalize_cksum(struct ifnet *ifp, struct mbuf *m) } (void) in6_finalize_cksum(m, sizeof(*eh), -1, -1, sw_csum); break; -#endif /* INET6 */ } } @@ -4674,20 +4694,6 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, } #endif /* NBPFILTER */ -#if defined(PFIL_HOOKS) - /* run the packet filter */ - if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) { - BRIDGE_UNLOCK(sc); - if (bridge_pfil(&m, bridge_ifp, src_if, PFIL_IN) != 0) { - return; - } - if (m == NULL) { - return; - } - BRIDGE_LOCK(sc); - } -#endif /* PFIL_HOOKS */ - if (dst_if == NULL) { /* bridge_broadcast will unlock */ bridge_broadcast(sc, src_if, m, 1); @@ -4744,17 +4750,6 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, } } -#if defined(PFIL_HOOKS) - if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) { - if (bridge_pfil(&m, bridge_ifp, dst_if, PFIL_OUT) != 0) { - return; - } - if (m == NULL) { - return; - } - } -#endif /* PFIL_HOOKS */ - sc_filter_flags = sc->sc_filter_flags; BRIDGE_UNLOCK(sc); if (PF_IS_ENABLED && (sc_filter_flags & IFBF_FILT_MEMBER)) { @@ -4814,6 +4809,124 @@ inject_input_packet(ifnet_t ifp, mbuf_t m) return; } +static boolean_t +in_addr_is_ours(struct in_addr ip) +{ + struct in_ifaddr *ia; + boolean_t ours = FALSE; + + lck_rw_lock_shared(in_ifaddr_rwlock); + TAILQ_FOREACH(ia, INADDR_HASH(ip.s_addr), ia_hash) { + if (IA_SIN(ia)->sin_addr.s_addr == ip.s_addr) { + ours = TRUE; + break; + } + } + lck_rw_done(in_ifaddr_rwlock); + return ours; +} + +static boolean_t +in6_addr_is_ours(const struct in6_addr * ip6_p) +{ + struct in6_ifaddr *ia6; + boolean_t ours = FALSE; + + lck_rw_lock_shared(&in6_ifaddr_rwlock); + TAILQ_FOREACH(ia6, IN6ADDR_HASH(ip6_p), ia6_hash) { + if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, ip6_p)) { + ours = TRUE; + break; + } + } + lck_rw_done(&in6_ifaddr_rwlock); + return ours; +} + +static void +bridge_interface_input(ifnet_t bridge_ifp, mbuf_t m, + bpf_packet_func bpf_input_func) +{ + size_t byte_count; + struct ether_header *eh; + uint16_t ether_type; + errno_t error; + boolean_t is_ipv4; + int len; + u_int mac_hlen; + int pkt_count; + + /* segment large packets before sending them up */ + if (if_bridge_segmentation == 0) { + goto done; + } + len = m->m_pkthdr.len; + if (len <= (bridge_ifp->if_mtu + ETHER_HDR_LEN)) { + goto done; + } + eh = mtod(m, struct ether_header *); + ether_type = ntohs(eh->ether_type); + switch (ether_type) { + case ETHERTYPE_IP: + is_ipv4 = TRUE; + break; + case ETHERTYPE_IPV6: + is_ipv4 = FALSE; + break; + default: + printf("%s: large non IPv4/IPv6 packet\n", __func__); + m_freem(m); + return; + } + + /* + * We have a large IPv4/IPv6 TCP packet. Segment it if required. + * + * If gso_ipv[46]_tcp() returns success (0), the packet(s) are + * ready to be passed up. If the destination is a local IP address, + * the packet will be passed up as a large, single packet. + * + * If gso_ipv[46]_tcp() returns an error, the packet has already + * been freed. + */ + mac_hlen = sizeof(*eh); + if (is_ipv4) { + error = gso_ipv4_tcp(bridge_ifp, &m, mac_hlen, FALSE); + } else { + error = gso_ipv6_tcp(bridge_ifp, &m, mac_hlen, FALSE); + } + if (error != 0) { + return; + } + +done: + pkt_count = 0; + byte_count = 0; + for (mbuf_t scan = m; scan != NULL; scan = scan->m_nextpkt) { + /* Mark the packet as arriving on the bridge interface */ + mbuf_pkthdr_setrcvif(scan, bridge_ifp); + mbuf_pkthdr_setheader(scan, mbuf_data(scan)); + if (bpf_input_func != NULL) { + (*bpf_input_func)(bridge_ifp, scan); + } + mbuf_setdata(scan, (char *)mbuf_data(scan) + ETHER_HDR_LEN, + mbuf_len(scan) - ETHER_HDR_LEN); + mbuf_pkthdr_adjustlen(scan, -ETHER_HDR_LEN); + byte_count += mbuf_pkthdr_len(scan); + pkt_count++; + } + (void)ifnet_stat_increment_in(bridge_ifp, pkt_count, byte_count, 0); +#if BRIDGE_DEBUG + if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT)) { + printf("%s: %s %d packet(s) %ld bytes\n", __func__, + bridge_ifp->if_xname, pkt_count, byte_count); + } +#endif /* BRIDGE_DEBUG */ + + dlil_input_packet_list(bridge_ifp, m); + return; +} + /* * bridge_input: * @@ -4830,6 +4943,8 @@ bridge_input(struct ifnet *ifp, mbuf_t *data) struct mbuf *mc, *mc2; uint16_t vlan; errno_t error; + boolean_t is_broadcast; + boolean_t is_ip_broadcast = FALSE; boolean_t is_ifp_mac = FALSE; mbuf_t m = *data; uint32_t sc_filter_flags = 0; @@ -4923,13 +5038,29 @@ bridge_input(struct ifnet *ifp, mbuf_t *data) m = *data; } + is_broadcast = (m->m_flags & (M_BCAST | M_MCAST)) != 0; eh = mtod(m, struct ether_header *); + if (!is_broadcast && + memcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0) { + if (sc->sc_mac_nat_bif == bif) { + /* doing MAC-NAT, check if destination is broadcast */ + is_ip_broadcast = is_broadcast_ip_packet(data); + if (*data == NULL) { + BRIDGE_UNLOCK(sc); + return EJUSTRETURN; + } + m = *data; + } + if (!is_ip_broadcast) { + is_ifp_mac = TRUE; + } + } bridge_span(sc, m); - if (m->m_flags & (M_BCAST | M_MCAST)) { + if (is_broadcast || is_ip_broadcast) { #if BRIDGE_DEBUG - if (IF_BRIDGE_DEBUG(BR_DBGF_MCAST)) { + if (is_broadcast && IF_BRIDGE_DEBUG(BR_DBGF_MCAST)) { if ((m->m_flags & M_MCAST)) { printf("%s: multicast: " "%02x:%02x:%02x:%02x:%02x:%02x\n", @@ -4942,7 +5073,7 @@ bridge_input(struct ifnet *ifp, mbuf_t *data) #endif /* BRIDGE_DEBUG */ /* Tap off 802.1D packets; they do not get forwarded. */ - if (memcmp(eh->ether_dhost, bstp_etheraddr, + if (is_broadcast && memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) { #if BRIDGESTP m = bstp_input(&bif->bif_stp, ifp, m); @@ -4978,6 +5109,13 @@ bridge_input(struct ifnet *ifp, mbuf_t *data) * * Note that bridge_forward calls BRIDGE_UNLOCK */ + if (is_ip_broadcast) { + /* make the copy look like it is actually broadcast */ + mc->m_flags |= M_BCAST; + eh = mtod(mc, struct ether_header *); + bcopy(etherbroadcastaddr, eh->ether_dhost, + ETHER_ADDR_LEN); + } bridge_forward(sc, bif, mc); /* @@ -5040,26 +5178,9 @@ bridge_input(struct ifnet *ifp, mbuf_t *data) #define CARP_CHECK_WE_ARE_SRC(iface) 0 #endif -#ifdef INET6 #define PFIL_HOOKED_INET6 PFIL_HOOKED(&inet6_pfil_hook) -#else -#define PFIL_HOOKED_INET6 0 -#endif -#if defined(PFIL_HOOKS) -#define PFIL_PHYS(sc, ifp, m) do { \ - if (pfil_local_phys && \ - (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { \ - if (bridge_pfil(&m, NULL, ifp, \ - PFIL_IN) != 0 || m == NULL) { \ - BRIDGE_UNLOCK(sc); \ - return (NULL); \ - } \ - } \ -} while (0) -#else /* PFIL_HOOKS */ #define PFIL_PHYS(sc, ifp, m) -#endif /* PFIL_HOOKS */ #define GRAB_OUR_PACKETS(iface) \ if ((iface)->if_type == IFT_GIF) \ @@ -5099,9 +5220,6 @@ bridge_input(struct ifnet *ifp, mbuf_t *data) /* * Unicast. */ - if (memcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0) { - is_ifp_mac = TRUE; - } /* handle MAC-NAT if enabled */ if (is_ifp_mac && sc->sc_mac_nat_bif == bif) { @@ -5129,14 +5247,11 @@ bridge_input(struct ifnet *ifp, mbuf_t *data) } /* - * If the packet is for the bridge, set the packet's source interface - * and return the packet back to ether_input for local processing. + * If the packet is for the bridge, pass it up for local processing. */ if (memcmp(eh->ether_dhost, IF_LLADDR(bridge_ifp), ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST(bridge_ifp)) { - /* Mark the packet as arriving on the bridge interface */ - (void) mbuf_pkthdr_setrcvif(m, bridge_ifp); - mbuf_pkthdr_setheader(m, mbuf_data(m)); + bpf_packet_func bpf_input_func = sc->sc_bpf_input; /* * If the interface is learning, and the source @@ -5147,26 +5262,9 @@ bridge_input(struct ifnet *ifp, mbuf_t *data) (void) bridge_rtupdate(sc, eh->ether_shost, vlan, bif, 0, IFBAF_DYNAMIC); } - - BRIDGE_BPF_MTAP_INPUT(sc, m); - - (void) mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN, - mbuf_len(m) - ETHER_HDR_LEN); - (void) mbuf_pkthdr_adjustlen(m, -ETHER_HDR_LEN); - - (void) ifnet_stat_increment_in(bridge_ifp, 1, mbuf_pkthdr_len(m), 0); - BRIDGE_UNLOCK(sc); -#if BRIDGE_DEBUG - if (IF_BRIDGE_DEBUG(BR_DBGF_INPUT)) { - printf("%s: %s packet for bridge\n", __func__, - bridge_ifp->if_xname); - } -#endif /* BRIDGE_DEBUG */ - - dlil_input_packet_list(bridge_ifp, m); - + bridge_interface_input(bridge_ifp, m, bpf_input_func); return EJUSTRETURN; } @@ -5272,17 +5370,6 @@ bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, return; } -#ifdef PFIL_HOOKS - /* Filter on the bridge interface before broadcasting */ - if (runfilt && (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { - if (bridge_pfil(&m, bridge_ifp, NULL, PFIL_OUT) != 0) { - goto out; - } - if (m == NULL) { - goto out; - } - } -#endif /* PFIL_HOOKS */ TAILQ_FOREACH(dbif, &sc->sc_iflist, bif_next) { dst_if = dbif->bif_ifp; if (dst_if == src_if) { @@ -5338,84 +5425,48 @@ bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, mc_in = NULL; } -#ifdef PFIL_HOOKS - /* - * Filter on the output interface. Pass a NULL bridge interface - * pointer so we do not redundantly filter on the bridge for - * each interface we broadcast on. - */ + /* out */ + if (translate_mac && mac_nat_bif == dbif) { + /* translate the packet without holding the lock */ + bridge_mac_nat_translate(&mc, &mnr, IF_LLADDR(dst_if)); + } + + sc_filter_flags = sc->sc_filter_flags; if (runfilt && - (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { + PF_IS_ENABLED && (sc_filter_flags & IFBF_FILT_MEMBER)) { if (used == 0) { /* Keep the layer3 header aligned */ int i = min(mc->m_pkthdr.len, max_protohdr); mc = m_copyup(mc, i, ETHER_ALIGN); if (mc == NULL) { (void) ifnet_stat_increment_out( - bridge_ifp, 0, 0, 1); + sc->sc_ifp, 0, 0, 1); if (mc_in != NULL) { m_freem(mc_in); + mc_in = NULL; } continue; } } - if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) { + if (bridge_pf(&mc, dst_if, sc_filter_flags, FALSE) != 0) { if (mc_in != NULL) { m_freem(mc_in); + mc_in = NULL; } continue; } if (mc == NULL) { if (mc_in != NULL) { m_freem(mc_in); + mc_in = NULL; } continue; } } -#endif /* PFIL_HOOKS */ - /* out */ - if (translate_mac && mac_nat_bif == dbif) { - /* translate the packet without holding the lock */ - bridge_mac_nat_translate(&mc, &mnr, IF_LLADDR(dst_if)); - } - - sc_filter_flags = sc->sc_filter_flags; - if (runfilt && - PF_IS_ENABLED && (sc_filter_flags & IFBF_FILT_MEMBER)) { - if (used == 0) { - /* Keep the layer3 header aligned */ - int i = min(mc->m_pkthdr.len, max_protohdr); - mc = m_copyup(mc, i, ETHER_ALIGN); - if (mc == NULL) { - (void) ifnet_stat_increment_out( - sc->sc_ifp, 0, 0, 1); - if (mc_in != NULL) { - m_freem(mc_in); - mc_in = NULL; - } - continue; - } - } - if (bridge_pf(&mc, dst_if, sc_filter_flags, FALSE) != 0) { - if (mc_in != NULL) { - m_freem(mc_in); - mc_in = NULL; - } - continue; - } - if (mc == NULL) { - if (mc_in != NULL) { - m_freem(mc_in); - mc_in = NULL; - } - continue; - } - } - - if (mc != NULL) { - (void) bridge_enqueue(bridge_ifp, - NULL, dst_if, mc, cksum_op); + if (mc != NULL) { + (void) bridge_enqueue(bridge_ifp, + NULL, dst_if, mc, cksum_op); } /* in */ @@ -5435,9 +5486,6 @@ bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, m_freem(m); } -#ifdef PFIL_HOOKS -out: -#endif /* PFIL_HOOKS */ BRIDGE_UNREF(sc); } @@ -6160,686 +6208,152 @@ bridge_state_change(struct ifnet *ifp, int state) } #endif /* BRIDGESTP */ -#ifdef PFIL_HOOKS /* - * Send bridge packets through pfil if they are one of the types pfil can deal - * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without - * question.) If *bifp or *ifp are NULL then packet filtering is skipped for - * that interface. + * bridge_set_bpf_tap: + * + * Sets ups the BPF callbacks. */ -static int -bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) +static errno_t +bridge_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func bpf_callback) { - int snap, error, i, hlen; - struct ether_header *eh1, eh2; - struct ip_fw_args args; - struct ip *ip; - struct llc llc1; - u_int16_t ether_type; - - snap = 0; - error = -1; /* Default error if not error == 0 */ - -#if 0 - /* we may return with the IP fields swapped, ensure its not shared */ - KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__)); -#endif + struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp); - if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0) { - return 0; /* filtering is disabled */ - } - i = min((*mp)->m_pkthdr.len, max_protohdr); - if ((*mp)->m_len < i) { - *mp = m_pullup(*mp, i); - if (*mp == NULL) { - printf("%s: m_pullup failed\n", __func__); - return -1; - } + /* TBD locking */ + if (sc == NULL || (sc->sc_flags & SCF_DETACHING)) { + return ENODEV; } + switch (mode) { + case BPF_TAP_DISABLE: + sc->sc_bpf_input = sc->sc_bpf_output = NULL; + break; - eh1 = mtod(*mp, struct ether_header *); - ether_type = ntohs(eh1->ether_type); - - /* - * Check for SNAP/LLC. - */ - if (ether_type < ETHERMTU) { - struct llc *llc2 = (struct llc *)(eh1 + 1); - - if ((*mp)->m_len >= ETHER_HDR_LEN + 8 && - llc2->llc_dsap == LLC_SNAP_LSAP && - llc2->llc_ssap == LLC_SNAP_LSAP && - llc2->llc_control == LLC_UI) { - ether_type = htons(llc2->llc_un.type_snap.ether_type); - snap = 1; - } - } + case BPF_TAP_INPUT: + sc->sc_bpf_input = bpf_callback; + break; - /* - * If we're trying to filter bridge traffic, don't look at anything - * other than IP and ARP traffic. If the filter doesn't understand - * IPv6, don't allow IPv6 through the bridge either. This is lame - * since if we really wanted, say, an AppleTalk filter, we are hosed, - * but of course we don't have an AppleTalk filter to begin with. - * (Note that since pfil doesn't understand ARP it will pass *ALL* - * ARP traffic.) - */ - switch (ether_type) { - case ETHERTYPE_ARP: - case ETHERTYPE_REVARP: - if (pfil_ipfw_arp == 0) { - return 0; /* Automatically pass */ - } + case BPF_TAP_OUTPUT: + sc->sc_bpf_output = bpf_callback; break; - case ETHERTYPE_IP: -#if INET6 - case ETHERTYPE_IPV6: -#endif /* INET6 */ + case BPF_TAP_INPUT_OUTPUT: + sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback; break; + default: - /* - * Check to see if the user wants to pass non-ip - * packets, these will not be checked by pfil(9) and - * passed unconditionally so the default is to drop. - */ - if (pfil_onlyip) { - goto bad; - } + break; } - /* Strip off the Ethernet header and keep a copy. */ - m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t)&eh2); - m_adj(*mp, ETHER_HDR_LEN); + return 0; +} - /* Strip off snap header, if present */ - if (snap) { - m_copydata(*mp, 0, sizeof(struct llc), (caddr_t)&llc1); - m_adj(*mp, sizeof(struct llc)); - } +/* + * bridge_detach: + * + * Callback when interface has been detached. + */ +static void +bridge_detach(ifnet_t ifp) +{ + struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp); - /* - * Check the IP header for alignment and errors - */ - if (dir == PFIL_IN) { - switch (ether_type) { - case ETHERTYPE_IP: - error = bridge_ip_checkbasic(mp); - break; -#if INET6 - case ETHERTYPE_IPV6: - error = bridge_ip6_checkbasic(mp); - break; -#endif /* INET6 */ - default: - error = 0; - } - if (error) { - goto bad; - } - } +#if BRIDGESTP + bstp_detach(&sc->sc_stp); +#endif /* BRIDGESTP */ - if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) { - error = -1; - args.rule = ip_dn_claim_rule(*mp); - if (args.rule != NULL && fw_one_pass) { - goto ipfwpass; /* packet already partially processed */ - } - args.m = *mp; - args.oif = ifp; - args.next_hop = NULL; - args.eh = &eh2; - args.inp = NULL; /* used by ipfw uid/gid/jail rules */ - i = ip_fw_chk_ptr(&args); - *mp = args.m; + /* Tear down the routing table. */ + bridge_rtable_fini(sc); - if (*mp == NULL) { - return error; - } + lck_mtx_lock(&bridge_list_mtx); + LIST_REMOVE(sc, sc_list); + lck_mtx_unlock(&bridge_list_mtx); - if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) { - /* put the Ethernet header back on */ - M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT, 0); - if (*mp == NULL) { - return error; - } - bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); + ifnet_release(ifp); - /* - * Pass the pkt to dummynet, which consumes it. The - * packet will return to us via bridge_dummynet(). - */ - args.oif = ifp; - ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args, DN_CLIENT_IPFW); - return error; - } + lck_mtx_destroy(&sc->sc_mtx, bridge_lock_grp); + if_clone_softc_deallocate(&bridge_cloner, sc); +} - if (i != IP_FW_PASS) { /* drop */ - goto bad; +/* + * bridge_bpf_input: + * + * Invoke the input BPF callback if enabled + */ +static errno_t +bridge_bpf_input(ifnet_t ifp, struct mbuf *m, const char * func, int line) +{ + struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp); + bpf_packet_func input_func = sc->sc_bpf_input; + + if (input_func != NULL) { + if (mbuf_pkthdr_rcvif(m) != ifp) { + printf("%s.%d: rcvif: 0x%llx != ifp 0x%llx\n", func, line, + (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m)), + (uint64_t)VM_KERNEL_ADDRPERM(ifp)); } + (*input_func)(ifp, m); } + return 0; +} -ipfwpass: - error = 0; - - /* - * Run the packet through pfil - */ - switch (ether_type) { - case ETHERTYPE_IP: - /* - * before calling the firewall, swap fields the same as - * IP does. here we assume the header is contiguous - */ - ip = mtod(*mp, struct ip *); - - ip->ip_len = ntohs(ip->ip_len); - ip->ip_off = ntohs(ip->ip_off); +/* + * bridge_bpf_output: + * + * Invoke the output BPF callback if enabled + */ +static errno_t +bridge_bpf_output(ifnet_t ifp, struct mbuf *m) +{ + struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp); + bpf_packet_func output_func = sc->sc_bpf_output; - /* - * Run pfil on the member interface and the bridge, both can - * be skipped by clearing pfil_member or pfil_bridge. - * - * Keep the order: - * in_if -> bridge_if -> out_if - */ - if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) { - error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, - dir, NULL); - } + if (output_func != NULL) { + (*output_func)(ifp, m); + } + return 0; +} - if (*mp == NULL || error != 0) { /* filter may consume */ - break; - } +/* + * bridge_link_event: + * + * Report a data link event on an interface + */ +static void +bridge_link_event(struct ifnet *ifp, u_int32_t event_code) +{ + struct event { + u_int32_t ifnet_family; + u_int32_t unit; + char if_name[IFNAMSIZ]; + }; + _Alignas(struct kern_event_msg) char message[sizeof(struct kern_event_msg) + sizeof(struct event)] = { 0 }; + struct kern_event_msg *header = (struct kern_event_msg*)message; + struct event *data = (struct event *)(header + 1); - if (pfil_member && ifp != NULL) { - error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, - dir, NULL); - } +#if BRIDGE_DEBUG + if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) { + printf("%s: %s event_code %u - %s\n", __func__, ifp->if_xname, + event_code, dlil_kev_dl_code_str(event_code)); + } +#endif /* BRIDGE_DEBUG */ - if (*mp == NULL || error != 0) { /* filter may consume */ - break; - } + header->total_size = sizeof(message); + header->vendor_code = KEV_VENDOR_APPLE; + header->kev_class = KEV_NETWORK_CLASS; + header->kev_subclass = KEV_DL_SUBCLASS; + header->event_code = event_code; + data->ifnet_family = ifnet_family(ifp); + data->unit = (u_int32_t)ifnet_unit(ifp); + strlcpy(data->if_name, ifnet_name(ifp), IFNAMSIZ); + ifnet_event(ifp, header); +} - if (pfil_bridge && dir == PFIL_IN && bifp != NULL) { - error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, - dir, NULL); - } - - if (*mp == NULL || error != 0) { /* filter may consume */ - break; - } - - /* check if we need to fragment the packet */ - if (pfil_member && ifp != NULL && dir == PFIL_OUT) { - i = (*mp)->m_pkthdr.len; - if (i > ifp->if_mtu) { - error = bridge_fragment(ifp, *mp, &eh2, snap, - &llc1); - return error; - } - } - - /* Recalculate the ip checksum and restore byte ordering */ - ip = mtod(*mp, struct ip *); - hlen = ip->ip_hl << 2; - if (hlen < sizeof(struct ip)) { - goto bad; - } - if (hlen > (*mp)->m_len) { - if ((*mp = m_pullup(*mp, hlen)) == 0) { - goto bad; - } - ip = mtod(*mp, struct ip *); - if (ip == NULL) { - goto bad; - } - } - ip->ip_len = htons(ip->ip_len); - ip->ip_off = htons(ip->ip_off); - ip->ip_sum = 0; - if (hlen == sizeof(struct ip)) { - ip->ip_sum = in_cksum_hdr(ip); - } else { - ip->ip_sum = in_cksum(*mp, hlen); - } - - break; -#if INET6 - case ETHERTYPE_IPV6: - if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) { - error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp, - dir, NULL); - } - - if (*mp == NULL || error != 0) { /* filter may consume */ - break; - } - - if (pfil_member && ifp != NULL) { - error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp, - dir, NULL); - } - - if (*mp == NULL || error != 0) { /* filter may consume */ - break; - } - - if (pfil_bridge && dir == PFIL_IN && bifp != NULL) { - error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp, - dir, NULL); - } - break; -#endif - default: - error = 0; - break; - } - - if (*mp == NULL) { - return error; - } - if (error != 0) { - goto bad; - } - - error = -1; - - /* - * Finally, put everything back the way it was and return - */ - if (snap) { - M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT, 0); - if (*mp == NULL) { - return error; - } - bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc)); - } - - M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT, 0); - if (*mp == NULL) { - return error; - } - bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); - - return 0; - -bad: - m_freem(*mp); - *mp = NULL; - return error; -} -#endif /* PFIL_HOOKS */ - -/* - * Perform basic checks on header size since - * pfil assumes ip_input has already processed - * it for it. Cut-and-pasted from ip_input.c. - * Given how simple the IPv6 version is, - * does the IPv4 version really need to be - * this complicated? - * - * XXX Should we update ipstat here, or not? - * XXX Right now we update ipstat but not - * XXX csum_counter. - */ -static int -bridge_ip_checkbasic(struct mbuf **mp) -{ - struct mbuf *m = *mp; - struct ip *ip; - int len, hlen; - u_short sum; - - if (*mp == NULL) { - return -1; - } - - if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { - /* max_linkhdr is already rounded up to nearest 4-byte */ - if ((m = m_copyup(m, sizeof(struct ip), - max_linkhdr)) == NULL) { - /* XXXJRT new stat, please */ - ipstat.ips_toosmall++; - goto bad; - } - } else if (OS_EXPECT((size_t)m->m_len < sizeof(struct ip), 0)) { - if ((m = m_pullup(m, sizeof(struct ip))) == NULL) { - ipstat.ips_toosmall++; - goto bad; - } - } - ip = mtod(m, struct ip *); - if (ip == NULL) { - goto bad; - } - - if (IP_VHL_V(ip->ip_vhl) != IPVERSION) { - ipstat.ips_badvers++; - goto bad; - } - hlen = IP_VHL_HL(ip->ip_vhl) << 2; - if (hlen < (int)sizeof(struct ip)) { /* minimum header length */ - ipstat.ips_badhlen++; - goto bad; - } - if (hlen > m->m_len) { - if ((m = m_pullup(m, hlen)) == 0) { - ipstat.ips_badhlen++; - goto bad; - } - ip = mtod(m, struct ip *); - if (ip == NULL) { - goto bad; - } - } - - if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { - sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); - } else { - if (hlen == sizeof(struct ip)) { - sum = in_cksum_hdr(ip); - } else { - sum = in_cksum(m, hlen); - } - } - if (sum) { - ipstat.ips_badsum++; - goto bad; - } - - /* Retrieve the packet length. */ - len = ntohs(ip->ip_len); - - /* - * Check for additional length bogosity - */ - if (len < hlen) { - ipstat.ips_badlen++; - goto bad; - } - - /* - * Check that the amount of data in the buffers - * is as at least much as the IP header would have us expect. - * Drop packet if shorter than we expect. - */ - if (m->m_pkthdr.len < len) { - ipstat.ips_tooshort++; - goto bad; - } - - /* Checks out, proceed */ - *mp = m; - return 0; - -bad: - *mp = m; - return -1; -} - -#if INET6 -/* - * Same as above, but for IPv6. - * Cut-and-pasted from ip6_input.c. - * XXX Should we update ip6stat, or not? - */ -static int -bridge_ip6_checkbasic(struct mbuf **mp) -{ - struct mbuf *m = *mp; - struct ip6_hdr *ip6; - - /* - * If the IPv6 header is not aligned, slurp it up into a new - * mbuf with space for link headers, in the event we forward - * it. Otherwise, if it is aligned, make sure the entire base - * IPv6 header is in the first mbuf of the chain. - */ - if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { - struct ifnet *inifp = m->m_pkthdr.rcvif; - /* max_linkhdr is already rounded up to nearest 4-byte */ - if ((m = m_copyup(m, sizeof(struct ip6_hdr), - max_linkhdr)) == NULL) { - /* XXXJRT new stat, please */ - ip6stat.ip6s_toosmall++; - in6_ifstat_inc(inifp, ifs6_in_hdrerr); - goto bad; - } - } else if (OS_EXPECT((size_t)m->m_len < sizeof(struct ip6_hdr), 0)) { - struct ifnet *inifp = m->m_pkthdr.rcvif; - if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { - ip6stat.ip6s_toosmall++; - in6_ifstat_inc(inifp, ifs6_in_hdrerr); - goto bad; - } - } - - ip6 = mtod(m, struct ip6_hdr *); - - if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { - ip6stat.ip6s_badvers++; - in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); - goto bad; - } - - /* Checks out, proceed */ - *mp = m; - return 0; - -bad: - *mp = m; - return -1; -} -#endif /* INET6 */ - -#ifdef PFIL_HOOKS -/* - * bridge_fragment: - * - * Return a fragmented mbuf chain. - */ -static int -bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh, - int snap, struct llc *llc) -{ - struct mbuf *m0; - struct ip *ip; - int error = -1; - - if (m->m_len < sizeof(struct ip) && - (m = m_pullup(m, sizeof(struct ip))) == NULL) { - goto out; - } - ip = mtod(m, struct ip *); - - error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist, - CSUM_DELAY_IP); - if (error) { - goto out; - } - - /* walk the chain and re-add the Ethernet header */ - for (m0 = m; m0; m0 = m0->m_nextpkt) { - if (error == 0) { - if (snap) { - M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT, 0); - if (m0 == NULL) { - error = ENOBUFS; - continue; - } - bcopy(llc, mtod(m0, caddr_t), - sizeof(struct llc)); - } - M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT, 0); - if (m0 == NULL) { - error = ENOBUFS; - continue; - } - bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN); - } else { - m_freem(m); - } - } - - if (error == 0) { - ipstat.ips_fragmented++; - } - - return error; - -out: - if (m != NULL) { - m_freem(m); - } - return error; -} -#endif /* PFIL_HOOKS */ - -/* - * bridge_set_bpf_tap: - * - * Sets ups the BPF callbacks. - */ -static errno_t -bridge_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func bpf_callback) -{ - struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp); - - /* TBD locking */ - if (sc == NULL || (sc->sc_flags & SCF_DETACHING)) { - return ENODEV; - } - switch (mode) { - case BPF_TAP_DISABLE: - sc->sc_bpf_input = sc->sc_bpf_output = NULL; - break; - - case BPF_TAP_INPUT: - sc->sc_bpf_input = bpf_callback; - break; - - case BPF_TAP_OUTPUT: - sc->sc_bpf_output = bpf_callback; - break; - - case BPF_TAP_INPUT_OUTPUT: - sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback; - break; - - default: - break; - } - - return 0; -} - -/* - * bridge_detach: - * - * Callback when interface has been detached. - */ -static void -bridge_detach(ifnet_t ifp) -{ - struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp); - -#if BRIDGESTP - bstp_detach(&sc->sc_stp); -#endif /* BRIDGESTP */ - - /* Tear down the routing table. */ - bridge_rtable_fini(sc); - - lck_mtx_lock(&bridge_list_mtx); - LIST_REMOVE(sc, sc_list); - lck_mtx_unlock(&bridge_list_mtx); - - ifnet_release(ifp); - - lck_mtx_destroy(&sc->sc_mtx, bridge_lock_grp); - if_clone_softc_deallocate(&bridge_cloner, sc); -} - -/* - * bridge_bpf_input: - * - * Invoke the input BPF callback if enabled - */ -static errno_t -bridge_bpf_input(ifnet_t ifp, struct mbuf *m, const char * func, int line) -{ - struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp); - bpf_packet_func input_func = sc->sc_bpf_input; - - if (input_func != NULL) { - if (mbuf_pkthdr_rcvif(m) != ifp) { - printf("%s.%d: rcvif: 0x%llx != ifp 0x%llx\n", func, line, - (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m)), - (uint64_t)VM_KERNEL_ADDRPERM(ifp)); - } - (*input_func)(ifp, m); - } - return 0; -} - -/* - * bridge_bpf_output: - * - * Invoke the output BPF callback if enabled - */ -static errno_t -bridge_bpf_output(ifnet_t ifp, struct mbuf *m) -{ - struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp); - bpf_packet_func output_func = sc->sc_bpf_output; - - if (output_func != NULL) { - (*output_func)(ifp, m); - } - return 0; -} - -/* - * bridge_link_event: - * - * Report a data link event on an interface - */ -static void -bridge_link_event(struct ifnet *ifp, u_int32_t event_code) -{ - struct { - struct kern_event_msg header; - u_int32_t unit; - char if_name[IFNAMSIZ]; - } event; - -#if BRIDGE_DEBUG - if (IF_BRIDGE_DEBUG(BR_DBGF_LIFECYCLE)) { - printf("%s: %s event_code %u - %s\n", __func__, ifp->if_xname, - event_code, dlil_kev_dl_code_str(event_code)); - } -#endif /* BRIDGE_DEBUG */ - - bzero(&event, sizeof(event)); - event.header.total_size = sizeof(event); - event.header.vendor_code = KEV_VENDOR_APPLE; - event.header.kev_class = KEV_NETWORK_CLASS; - event.header.kev_subclass = KEV_DL_SUBCLASS; - event.header.event_code = event_code; - event.header.event_data[0] = ifnet_family(ifp); - event.unit = (u_int32_t)ifnet_unit(ifp); - strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ); - ifnet_event(ifp, &event.header); -} - -#define BRIDGE_HF_DROP(reason, func, line) { \ - bridge_hostfilter_stats.reason++; \ - if (IF_BRIDGE_DEBUG(BR_DBGF_HOSTFILTER)) { \ - printf("%s.%d" #reason, func, line); \ - error = EINVAL; \ - } \ - } +#define BRIDGE_HF_DROP(reason, func, line) { \ + bridge_hostfilter_stats.reason++; \ + if (IF_BRIDGE_DEBUG(BR_DBGF_HOSTFILTER)) { \ + printf("%s.%d" #reason, func, line); \ + error = EINVAL; \ + } \ + } /* * Make sure this is a DHCP or Bootp request that match the host filter @@ -7620,6 +7134,33 @@ done: return eh; } +static boolean_t +is_broadcast_ip_packet(mbuf_t *data) +{ + struct ether_header *eh; + uint16_t ether_type; + boolean_t is_broadcast = FALSE; + + eh = mtod(*data, struct ether_header *); + ether_type = ntohs(eh->ether_type); + switch (ether_type) { + case ETHERTYPE_IP: + eh = get_ether_ip_header(data, FALSE); + if (eh != NULL) { + struct in_addr dst; + struct ip *iphdr; + + iphdr = (struct ip *)(void *)(eh + 1); + bcopy(&iphdr->ip_dst, &dst, sizeof(dst)); + is_broadcast = (dst.s_addr == INADDR_BROADCAST); + } + break; + default: + break; + } + return is_broadcast; +} + static struct mac_nat_entry * bridge_mac_nat_ip_input(struct bridge_softc *sc, mbuf_t *data) { @@ -7775,19 +7316,6 @@ done: return eh; } -#if 0 -static void -bridge_mac_nat_icmpv6_input(struct bridge_softc *sc, mbuf_t *data, - struct ether_header *eh, struct ip6_hdr *hdr) -{ -#pragma unused(sc) -#pragma unused(data) -#pragma unused(eh) -#pragma unused(hdr) - return; -} -#endif - #include #include @@ -8008,11 +7536,6 @@ bridge_mac_nat_ipv6_input(struct bridge_softc *sc, mbuf_t *data) goto done; } ip6h = (struct ip6_hdr *)(void *)(eh + 1); -#if 0 - if (ip6h->ip6_nxt == IPPROTO_ICMPV6) { - bridge_mac_nat_icmpv6_input(sc, data, eh, ip6h); - } -#endif bcopy(&ip6h->ip6_dst, &dst, sizeof(dst)); /* XXX validate IPv6 address */ if (IN6_IS_ADDR_UNSPECIFIED(&dst)) { @@ -8332,19 +7855,179 @@ bridge_mac_nat_translate(mbuf_t *data, struct mac_nat_record *mnr, bridge_mac_nat_ip_translate(data, mnr); break; - case ETHERTYPE_IPV6: - bridge_mac_nat_ipv6_translate(data, mnr, eaddr); - break; + case ETHERTYPE_IPV6: + bridge_mac_nat_ipv6_translate(data, mnr, eaddr); + break; + + default: + break; + } + return; +} + +/* + * bridge packet filtering + */ + +/* + * Perform basic checks on header size since + * pfil assumes ip_input has already processed + * it for it. Cut-and-pasted from ip_input.c. + * Given how simple the IPv6 version is, + * does the IPv4 version really need to be + * this complicated? + * + * XXX Should we update ipstat here, or not? + * XXX Right now we update ipstat but not + * XXX csum_counter. + */ +static int +bridge_ip_checkbasic(struct mbuf **mp) +{ + struct mbuf *m = *mp; + struct ip *ip; + int len, hlen; + u_short sum; + + if (*mp == NULL) { + return -1; + } + + if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { + /* max_linkhdr is already rounded up to nearest 4-byte */ + if ((m = m_copyup(m, sizeof(struct ip), + max_linkhdr)) == NULL) { + /* XXXJRT new stat, please */ + ipstat.ips_toosmall++; + goto bad; + } + } else if (OS_EXPECT((size_t)m->m_len < sizeof(struct ip), 0)) { + if ((m = m_pullup(m, sizeof(struct ip))) == NULL) { + ipstat.ips_toosmall++; + goto bad; + } + } + ip = mtod(m, struct ip *); + if (ip == NULL) { + goto bad; + } + + if (IP_VHL_V(ip->ip_vhl) != IPVERSION) { + ipstat.ips_badvers++; + goto bad; + } + hlen = IP_VHL_HL(ip->ip_vhl) << 2; + if (hlen < (int)sizeof(struct ip)) { /* minimum header length */ + ipstat.ips_badhlen++; + goto bad; + } + if (hlen > m->m_len) { + if ((m = m_pullup(m, hlen)) == 0) { + ipstat.ips_badhlen++; + goto bad; + } + ip = mtod(m, struct ip *); + if (ip == NULL) { + goto bad; + } + } + + if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { + sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); + } else { + if (hlen == sizeof(struct ip)) { + sum = in_cksum_hdr(ip); + } else { + sum = in_cksum(m, hlen); + } + } + if (sum) { + ipstat.ips_badsum++; + goto bad; + } + + /* Retrieve the packet length. */ + len = ntohs(ip->ip_len); + + /* + * Check for additional length bogosity + */ + if (len < hlen) { + ipstat.ips_badlen++; + goto bad; + } + + /* + * Check that the amount of data in the buffers + * is as at least much as the IP header would have us expect. + * Drop packet if shorter than we expect. + */ + if (m->m_pkthdr.len < len) { + ipstat.ips_tooshort++; + goto bad; + } + + /* Checks out, proceed */ + *mp = m; + return 0; + +bad: + *mp = m; + return -1; +} + +/* + * Same as above, but for IPv6. + * Cut-and-pasted from ip6_input.c. + * XXX Should we update ip6stat, or not? + */ +static int +bridge_ip6_checkbasic(struct mbuf **mp) +{ + struct mbuf *m = *mp; + struct ip6_hdr *ip6; + + /* + * If the IPv6 header is not aligned, slurp it up into a new + * mbuf with space for link headers, in the event we forward + * it. Otherwise, if it is aligned, make sure the entire base + * IPv6 header is in the first mbuf of the chain. + */ + if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { + struct ifnet *inifp = m->m_pkthdr.rcvif; + /* max_linkhdr is already rounded up to nearest 4-byte */ + if ((m = m_copyup(m, sizeof(struct ip6_hdr), + max_linkhdr)) == NULL) { + /* XXXJRT new stat, please */ + ip6stat.ip6s_toosmall++; + in6_ifstat_inc(inifp, ifs6_in_hdrerr); + goto bad; + } + } else if (OS_EXPECT((size_t)m->m_len < sizeof(struct ip6_hdr), 0)) { + struct ifnet *inifp = m->m_pkthdr.rcvif; + if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { + ip6stat.ip6s_toosmall++; + in6_ifstat_inc(inifp, ifs6_in_hdrerr); + goto bad; + } + } + + ip6 = mtod(m, struct ip6_hdr *); - default: - break; + if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { + ip6stat.ip6s_badvers++; + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); + goto bad; } - return; -} -/* - * bridge packet filtering - */ + /* Checks out, proceed */ + *mp = m; + return 0; + +bad: + *mp = m; + return -1; +} /* * the PF routines expect to be called from ip_input, so we @@ -8352,9 +8035,9 @@ bridge_mac_nat_translate(mbuf_t *data, struct mac_nat_record *mnr, * * XXX : this is heavily inspired on bridge_pfil() */ -static -int -bridge_pf(struct mbuf **mp, struct ifnet *ifp, uint32_t sc_filter_flags, int input) +static int +bridge_pf(struct mbuf **mp, struct ifnet *ifp, uint32_t sc_filter_flags, + int input) { /* * XXX : mpetit : heavily inspired by bridge_pfil() @@ -8552,3 +8235,603 @@ bad: *mp = NULL; return error; } + +/* + * Copyright (C) 2014, Stefano Garzarella - Universita` di Pisa. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * XXX-ste: Maybe this function must be moved into kern/uipc_mbuf.c + * + * Create a queue of packets/segments which fit the given mss + hdr_len. + * m0 points to mbuf chain to be segmented. + * This function splits the payload (m0-> m_pkthdr.len - hdr_len) + * into segments of length MSS bytes and then copy the first hdr_len bytes + * from m0 at the top of each segment. + * If hdr2_buf is not NULL (hdr2_len is the buf length), it is copied + * in each segment after the first hdr_len bytes + * + * Return the new queue with the segments on success, NULL on failure. + * (the mbuf queue is freed in this case). + * nsegs contains the number of segments generated. + */ + +static struct mbuf * +m_seg(struct mbuf *m0, int hdr_len, int mss, int *nsegs, + char * hdr2_buf, int hdr2_len) +{ + int off = 0, n, firstlen; + struct mbuf **mnext, *mseg; + int total_len = m0->m_pkthdr.len; + + /* + * Segmentation useless + */ + if (total_len <= hdr_len + mss) { + return m0; + } + + if (hdr2_buf == NULL || hdr2_len <= 0) { + hdr2_buf = NULL; + hdr2_len = 0; + } + + off = hdr_len + mss; + firstlen = mss; /* first segment stored in the original mbuf */ + + mnext = &(m0->m_nextpkt); /* pointer to next packet */ + + for (n = 1; off < total_len; off += mss, n++) { + struct mbuf *m; + /* + * Copy the header from the original packet + * and create a new mbuf chain + */ + if (MHLEN < hdr_len) { + m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); + } else { + m = m_gethdr(M_NOWAIT, MT_DATA); + } + + if (m == NULL) { +#ifdef GSO_DEBUG + D("MGETHDR error\n"); +#endif + goto err; + } + + m_copydata(m0, 0, hdr_len, mtod(m, caddr_t)); + + m->m_len = hdr_len; + /* + * if the optional header is present, copy it + */ + if (hdr2_buf != NULL) { + m_copyback(m, hdr_len, hdr2_len, hdr2_buf); + } + + m->m_flags |= (m0->m_flags & M_COPYFLAGS); + if (off + mss >= total_len) { /* last segment */ + mss = total_len - off; + } + /* + * Copy the payload from original packet + */ + mseg = m_copym(m0, off, mss, M_NOWAIT); + if (mseg == NULL) { + m_freem(m); +#ifdef GSO_DEBUG + D("m_copym error\n"); +#endif + goto err; + } + m_cat(m, mseg); + + m->m_pkthdr.len = hdr_len + hdr2_len + mss; + m->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; + /* + * Copy the checksum flags and data (in_cksum() need this) + */ + m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; + m->m_pkthdr.csum_data = m0->m_pkthdr.csum_data; + m->m_pkthdr.tso_segsz = m0->m_pkthdr.tso_segsz; + + *mnext = m; + mnext = &(m->m_nextpkt); + } + + /* + * Update first segment. + * If the optional header is present, is necessary + * to insert it into the first segment. + */ + if (hdr2_buf == NULL) { + m_adj(m0, hdr_len + firstlen - total_len); + m0->m_pkthdr.len = hdr_len + firstlen; + } else { + mseg = m_copym(m0, hdr_len, firstlen, M_NOWAIT); + if (mseg == NULL) { +#ifdef GSO_DEBUG + D("m_copym error\n"); +#endif + goto err; + } + m_adj(m0, hdr_len - total_len); + m_copyback(m0, hdr_len, hdr2_len, hdr2_buf); + m_cat(m0, mseg); + m0->m_pkthdr.len = hdr_len + hdr2_len + firstlen; + } + + if (nsegs != NULL) { + *nsegs = n; + } + return m0; +err: + while (m0 != NULL) { + mseg = m0->m_nextpkt; + m0->m_nextpkt = NULL; + m_freem(m0); + m0 = mseg; + } + return NULL; +} + +/* + * Wrappers of IPv4 checksum functions + */ +static inline void +gso_ipv4_data_cksum(struct mbuf *m, struct ip *ip, int mac_hlen) +{ + m->m_data += mac_hlen; + m->m_len -= mac_hlen; + m->m_pkthdr.len -= mac_hlen; +#if __FreeBSD_version < 1000000 + ip->ip_len = ntohs(ip->ip_len); /* needed for in_delayed_cksum() */ +#endif + + in_delayed_cksum(m); + +#if __FreeBSD_version < 1000000 + ip->ip_len = htons(ip->ip_len); +#endif + m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; + m->m_len += mac_hlen; + m->m_pkthdr.len += mac_hlen; + m->m_data -= mac_hlen; +} + +static inline void +gso_ipv4_hdr_cksum(struct mbuf *m, struct ip *ip, int mac_hlen, int ip_hlen) +{ + m->m_data += mac_hlen; + + ip->ip_sum = in_cksum(m, ip_hlen); + + m->m_pkthdr.csum_flags &= ~CSUM_IP; + m->m_data -= mac_hlen; +} + +/* + * Structure that contains the state during the TCP segmentation + */ +struct gso_ip_tcp_state { + void (*update) + (struct gso_ip_tcp_state*, struct mbuf*); + void (*internal) + (struct gso_ip_tcp_state*, struct mbuf*); + union { + struct ip *ip; + struct ip6_hdr *ip6; + } hdr; + struct tcphdr *tcp; + int mac_hlen; + int ip_hlen; + int tcp_hlen; + int hlen; + int pay_len; + int sw_csum; + uint32_t tcp_seq; + uint16_t ip_id; + boolean_t is_tx; +}; + +/* + * Update the pointers to TCP and IPv4 headers + */ +static inline void +gso_ipv4_tcp_update(struct gso_ip_tcp_state *state, struct mbuf *m) +{ + state->hdr.ip = (struct ip *)(void *)(mtod(m, uint8_t *) + state->mac_hlen); + state->tcp = (struct tcphdr *)(void *)((caddr_t)(state->hdr.ip) + state->ip_hlen); + state->pay_len = m->m_pkthdr.len - state->hlen; +} + +/* + * Set properly the TCP and IPv4 headers + */ +static inline void +gso_ipv4_tcp_internal(struct gso_ip_tcp_state *state, struct mbuf *m) +{ + /* + * Update IP header + */ + state->hdr.ip->ip_id = htons((state->ip_id)++); + state->hdr.ip->ip_len = htons(m->m_pkthdr.len - state->mac_hlen); + /* + * TCP Checksum + */ + state->tcp->th_sum = 0; + state->tcp->th_sum = in_pseudo(state->hdr.ip->ip_src.s_addr, + state->hdr.ip->ip_dst.s_addr, + htons(state->tcp_hlen + IPPROTO_TCP + state->pay_len)); + /* + * Checksum HW not supported (TCP) + */ + if (state->sw_csum & CSUM_DELAY_DATA) { + gso_ipv4_data_cksum(m, state->hdr.ip, state->mac_hlen); + } + + state->tcp_seq += state->pay_len; + /* + * IP Checksum + */ + state->hdr.ip->ip_sum = 0; + /* + * Checksum HW not supported (IP) + */ + if (state->sw_csum & CSUM_IP) { + gso_ipv4_hdr_cksum(m, state->hdr.ip, state->mac_hlen, state->ip_hlen); + } +} + + +/* + * Updates the pointers to TCP and IPv6 headers + */ +static inline void +gso_ipv6_tcp_update(struct gso_ip_tcp_state *state, struct mbuf *m) +{ + state->hdr.ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + state->mac_hlen); + state->tcp = (struct tcphdr *)(void *)((caddr_t)(state->hdr.ip6) + state->ip_hlen); + state->pay_len = m->m_pkthdr.len - state->hlen; +} + +/* + * Sets properly the TCP and IPv6 headers + */ +static inline void +gso_ipv6_tcp_internal(struct gso_ip_tcp_state *state, struct mbuf *m) +{ + state->hdr.ip6->ip6_plen = htons(m->m_pkthdr.len - + state->mac_hlen - state->ip_hlen); + /* + * TCP Checksum + */ + state->tcp->th_sum = 0; + state->tcp->th_sum = in6_pseudo(&state->hdr.ip6->ip6_src, + &state->hdr.ip6->ip6_dst, + htonl(state->tcp_hlen + state->pay_len + IPPROTO_TCP)); + /* + * Checksum HW not supported (TCP) + */ + if (state->sw_csum & CSUM_DELAY_IPV6_DATA) { + (void)in6_finalize_cksum(m, state->mac_hlen, -1, -1, state->sw_csum); + m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IPV6_DATA; + } + state->tcp_seq += state->pay_len; +} + +/* + * Init the state during the TCP segmentation + */ +static inline boolean_t +gso_ip_tcp_init_state(struct gso_ip_tcp_state *state, struct ifnet *ifp, struct mbuf *m, int mac_hlen, int ip_hlen, boolean_t isipv6) +{ +#pragma unused(ifp) + + if (isipv6) { + state->hdr.ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + mac_hlen); + if (state->hdr.ip6->ip6_nxt != IPPROTO_TCP) { + printf("%s: Non-TCP (%d) IPv6 frame", __func__, state->hdr.ip6->ip6_nxt); + return FALSE; + } + state->tcp = (struct tcphdr *)(void *)((caddr_t)(state->hdr.ip6) + ip_hlen); + state->update = gso_ipv6_tcp_update; + state->internal = gso_ipv6_tcp_internal; + state->sw_csum = CSUM_DELAY_IPV6_DATA; + } else { + state->hdr.ip = (struct ip *)(void *)(mtod(m, uint8_t *) + mac_hlen); + if (state->hdr.ip->ip_p != IPPROTO_TCP) { + printf("%s: Non-TCP (%d) IPv4 frame", __func__, state->hdr.ip->ip_p); + return FALSE; + } + state->ip_id = ntohs(state->hdr.ip->ip_id); + state->tcp = (struct tcphdr *)(void *)((caddr_t)(state->hdr.ip) + ip_hlen); + state->update = gso_ipv4_tcp_update; + state->internal = gso_ipv4_tcp_internal; + state->sw_csum = CSUM_DELAY_DATA | CSUM_IP; + } + state->mac_hlen = mac_hlen; + state->ip_hlen = ip_hlen; + state->tcp_hlen = state->tcp->th_off << 2; + state->hlen = mac_hlen + ip_hlen + state->tcp_hlen; + state->tcp_seq = ntohl(state->tcp->th_seq); + //state->sw_csum = m->m_pkthdr.csum_flags & ~ifp->if_hwassist; + return TRUE; +} + +/* + * GSO on TCP/IP (v4 or v6) + * + * If is_tx is TRUE, segmented packets are transmitted after they are + * segmented. + * + * If is_tx is FALSE, the segmented packets are returned as a chain in *mp. + */ +static int +gso_ip_tcp(struct ifnet *ifp, struct mbuf **mp, struct gso_ip_tcp_state *state, + boolean_t is_tx) +{ + struct mbuf *m, *m_tx; + int error = 0; + int mss = 0; + int nsegs = 0; + struct mbuf *m0 = *mp; +#ifdef GSO_STATS + int total_len = m0->m_pkthdr.len; +#endif /* GSO_STATS */ + +#if 1 + mss = ifp->if_mtu - state->ip_hlen - state->tcp_hlen; +#else + if (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) {/* TSO with GSO */ + mss = ifp->if_hw_tsomax - state->ip_hlen - state->tcp_hlen; + } else { + mss = m0->m_pkthdr.tso_segsz; + } +#endif + + *mp = m0 = m_seg(m0, state->hlen, mss, &nsegs, 0, 0); + if (m0 == NULL) { + return ENOBUFS; /* XXX ok? */ + } +#if BRIDGE_DEBUG + if (IF_BRIDGE_DEBUG(BR_DBGF_SEGMENTATION)) { + printf("%s: %s %s mss %d nsegs %d\n", __func__, + ifp->if_xname, + is_tx ? "TX" : "RX", + mss, nsegs); + } +#endif /* BRIDGE_DEBUG */ + + + /* + * XXX-ste: can this happen? + */ + if (m0->m_nextpkt == NULL) { +#ifdef GSO_DEBUG + D("only 1 segment"); +#endif + if (is_tx) { + error = bridge_transmit(ifp, m0); + } + return error; + } +#ifdef GSO_STATS + GSOSTAT_SET_MAX(tcp.gsos_max_mss, mss); + GSOSTAT_SET_MIN(tcp.gsos_min_mss, mss); + GSOSTAT_ADD(tcp.gsos_osegments, nsegs); +#endif /* GSO_STATS */ + + /* first pkt */ + m = m0; + + state->update(state, m); + + do { + state->tcp->th_flags &= ~(TH_FIN | TH_PUSH); + + state->internal(state, m); + m_tx = m; + m = m->m_nextpkt; + if (is_tx) { + m_tx->m_nextpkt = NULL; + if ((error = bridge_transmit(ifp, m_tx)) != 0) { + /* + * XXX: If a segment can not be sent, discard the following + * segments and propagate the error to the upper levels. + * In this way the TCP retransmits all the initial packet. + */ +#ifdef GSO_DEBUG + D("if_transmit error\n"); +#endif + goto err; + } + } + state->update(state, m); + + state->tcp->th_flags &= ~TH_CWR; + state->tcp->th_seq = htonl(state->tcp_seq); + } while (m->m_nextpkt); + + /* last pkt */ + state->internal(state, m); + + if (is_tx) { + error = bridge_transmit(ifp, m); +#ifdef GSO_DEBUG + if (error) { + D("last if_transmit error\n"); + D("error - type = %d \n", error); + } +#endif + } +#ifdef GSO_STATS + if (!error) { + GSOSTAT_INC(tcp.gsos_segmented); + GSOSTAT_SET_MAX(tcp.gsos_maxsegmented, total_len); + GSOSTAT_SET_MIN(tcp.gsos_minsegmented, total_len); + GSOSTAT_ADD(tcp.gsos_totalbyteseg, total_len); + } +#endif /* GSO_STATS */ + return error; + +err: +#ifdef GSO_DEBUG + D("error - type = %d \n", error); +#endif + while (m != NULL) { + m_tx = m->m_nextpkt; + m->m_nextpkt = NULL; + m_freem(m); + m = m_tx; + } + return error; +} + +/* + * GSO on TCP/IPv4 + */ +static int +gso_ipv4_tcp(struct ifnet *ifp, struct mbuf **mp, u_int mac_hlen, + boolean_t is_tx) +{ + struct ip *ip; + struct gso_ip_tcp_state state; + int hlen; + int ip_hlen; + struct mbuf *m0 = *mp; + + if (!is_tx && ipforwarding == 0) { + /* no need to segment if the packet will not be forwarded */ + return 0; + } + hlen = mac_hlen + sizeof(struct ip); + if (m0->m_len < hlen) { +#ifdef GSO_DEBUG + D("m_len < hlen - m_len: %d hlen: %d", m0->m_len, hlen); +#endif + *mp = m0 = m_pullup(m0, hlen); + if (m0 == NULL) { + return ENOBUFS; + } + } + ip = (struct ip *)(void *)(mtod(m0, uint8_t *) + mac_hlen); + ip_hlen = IP_VHL_HL(ip->ip_vhl) << 2; + hlen = mac_hlen + ip_hlen + sizeof(struct tcphdr); + if (m0->m_len < hlen) { +#ifdef GSO_DEBUG + D("m_len < hlen - m_len: %d hlen: %d", m0->m_len, hlen); +#endif + *mp = m0 = m_pullup(m0, hlen); + if (m0 == NULL) { + return ENOBUFS; + } + } + if (!is_tx) { + /* if the destination is a local IP address, don't segment */ + struct in_addr dst_ip; + + bcopy(&ip->ip_dst, &dst_ip, sizeof(dst_ip)); + if (in_addr_is_ours(dst_ip)) { + return 0; + } + } + + m0->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); + m0->m_pkthdr.csum_flags = CSUM_DELAY_DATA; + + if (!gso_ip_tcp_init_state(&state, ifp, m0, mac_hlen, ip_hlen, FALSE)) { + m_freem(m0); + *mp = NULL; + return EINVAL; + } + + return gso_ip_tcp(ifp, mp, &state, is_tx); +} + +/* + * GSO on TCP/IPv6 + */ +static int +gso_ipv6_tcp(struct ifnet *ifp, struct mbuf **mp, u_int mac_hlen, + boolean_t is_tx) +{ + struct ip6_hdr *ip6; + struct gso_ip_tcp_state state; + int hlen; + int ip_hlen; + struct mbuf *m0 = *mp; + + if (!is_tx && ip6_forwarding == 0) { + /* no need to segment if the packet will not be forwarded */ + return 0; + } + + hlen = mac_hlen + sizeof(struct ip6_hdr); + if (m0->m_len < hlen) { +#ifdef GSO_DEBUG + D("m_len < hlen - m_len: %d hlen: %d", m0->m_len, hlen); +#endif + *mp = m0 = m_pullup(m0, hlen); + if (m0 == NULL) { + return ENOBUFS; + } + } + ip6 = (struct ip6_hdr *)(mtod(m0, uint8_t *) + mac_hlen); + ip_hlen = ip6_lasthdr(m0, mac_hlen, IPPROTO_IPV6, NULL) - mac_hlen; + hlen = mac_hlen + ip_hlen + sizeof(struct tcphdr); + if (m0->m_len < hlen) { +#ifdef GSO_DEBUG + D("m_len < hlen - m_len: %d hlen: %d", m0->m_len, hlen); +#endif + *mp = m0 = m_pullup(m0, hlen); + if (m0 == NULL) { + return ENOBUFS; + } + } + if (!is_tx) { + struct in6_addr dst_ip6; + + bcopy(&ip6->ip6_dst, &dst_ip6, sizeof(dst_ip6)); + if (IN6_IS_ADDR_LINKLOCAL(&dst_ip6)) { + dst_ip6.s6_addr16[1] = htons(ifp->if_index); + } + if (in6_addr_is_ours(&dst_ip6)) { + /* local IP address, no need to segment */ + return 0; + } + } + m0->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); + m0->m_pkthdr.csum_flags = CSUM_DELAY_IPV6_DATA; + + if (!gso_ip_tcp_init_state(&state, ifp, m0, mac_hlen, ip_hlen, TRUE)) { + m_freem(m0); + *mp = NULL; + return EINVAL; + } + + return gso_ip_tcp(ifp, mp, &state, is_tx); +} diff --git a/bsd/net/if_fake.c b/bsd/net/if_fake.c index 06baffe5e..a224f7e7e 100644 --- a/bsd/net/if_fake.c +++ b/bsd/net/if_fake.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2019 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -121,14 +121,47 @@ static int if_fake_multibuflet = 0; SYSCTL_INT(_net_link_fake, OID_AUTO, multibuflet, CTLFLAG_RW | CTLFLAG_LOCKED, &if_fake_multibuflet, 0, "Fake interface using multi-buflet packets"); -static int if_fake_copypkt_mode = 0; -SYSCTL_INT(_net_link_fake, OID_AUTO, copypkt_mode, CTLFLAG_RW | CTLFLAG_LOCKED, - &if_fake_copypkt_mode, 0, "Fake interface copying packet to peer"); +typedef enum { + IFF_PP_MODE_GLOBAL = 0, /* share a global pool */ + IFF_PP_MODE_PRIVATE = 1, /* creates its own rx/tx pool */ + IFF_PP_MODE_PRIVATE_SPLIT = 2, /* creates its own split rx & tx pool */ +} iff_pktpool_mode_t; +static iff_pktpool_mode_t if_fake_pktpool_mode = 0; +SYSCTL_INT(_net_link_fake, OID_AUTO, pktpool_mode, CTLFLAG_RW | CTLFLAG_LOCKED, + &if_fake_pktpool_mode, 0, + "Fake interface packet pool mode (0 global, 1 private, 2 private split"); + +#define FETH_LINK_LAYER_AGGRETATION_FACTOR_MAX 32 +static int if_fake_link_layer_aggregation_factor = + FETH_LINK_LAYER_AGGRETATION_FACTOR_MAX; +static int +feth_link_layer_aggregation_factor_sysctl SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + unsigned int new_value; + int changed; + int error; -/* sysctl net.link.fake.tx_headroom */ -#define FETH_TX_HEADROOM_MAX 32 -static unsigned int if_fake_tx_headroom = 0; + error = sysctl_io_number(req, if_fake_link_layer_aggregation_factor, + sizeof(if_fake_link_layer_aggregation_factor), &new_value, + &changed); + if (error == 0 && changed != 0) { + if (new_value <= 0 || + new_value > FETH_LINK_LAYER_AGGRETATION_FACTOR_MAX) { + return EINVAL; + } + if_fake_link_layer_aggregation_factor = new_value; + } + return error; +} +SYSCTL_PROC(_net_link_fake, OID_AUTO, link_layer_aggregation_factor, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, feth_link_layer_aggregation_factor_sysctl, "IU", + "Fake interface link layer aggregation factor"); + +#define FETH_TX_HEADROOM_MAX 32 +static unsigned int if_fake_tx_headroom = FETH_TX_HEADROOM_MAX; static int feth_tx_headroom_sysctl SYSCTL_HANDLER_ARGS { @@ -153,6 +186,35 @@ SYSCTL_PROC(_net_link_fake, OID_AUTO, tx_headroom, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, feth_tx_headroom_sysctl, "IU", "Fake ethernet Tx headroom"); +static int if_fake_fcs = 0; +SYSCTL_INT(_net_link_fake, OID_AUTO, fcs, CTLFLAG_RW | CTLFLAG_LOCKED, + &if_fake_fcs, 0, "Fake interface using frame check sequence"); + +#define FETH_TRAILER_LENGTH_MAX 28 +char feth_trailer[FETH_TRAILER_LENGTH_MAX + 1] = "trailertrailertrailertrailer"; +static unsigned int if_fake_trailer_length = 0; +static int +feth_trailer_length_sysctl SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + unsigned int new_value; + int changed; + int error; + + error = sysctl_io_number(req, if_fake_trailer_length, + sizeof(if_fake_trailer_length), &new_value, &changed); + if (error == 0 && changed != 0) { + if (new_value > FETH_TRAILER_LENGTH_MAX) { + return EINVAL; + } + if_fake_trailer_length = new_value; + } + return 0; +} + +SYSCTL_PROC(_net_link_fake, OID_AUTO, trailer_length, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, + feth_trailer_length_sysctl, "IU", "Fake interface frame trailer length"); /* sysctl net.link.fake.max_mtu */ #define FETH_MAX_MTU_DEFAULT 2048 @@ -235,13 +297,6 @@ feth_user_access_sysctl SYSCTL_HANDLER_ARGS if (new_value != 1) { return EINVAL; } - /* - * copypkt mode requires a kernel only buffer pool so - * it is incompatible with user access mode. - */ - if (if_fake_copypkt_mode != 0) { - return ENOTSUP; - } } if_fake_user_access = new_value; } @@ -324,13 +379,14 @@ SYSCTL_PROC(_net_link_fake, OID_AUTO, tx_drops, #define IFF_MAX_TX_RINGS IFF_NUM_TX_RINGS_WMM_MODE #define IFF_MAX_RX_RINGS IFF_NUM_RX_RINGS_WMM_MODE +#define IFF_MAX_BATCH_SIZE 32 + typedef uint16_t iff_flags_t; #define IFF_FLAGS_HWCSUM 0x0001 #define IFF_FLAGS_BSD_MODE 0x0002 #define IFF_FLAGS_DETACHING 0x0004 #define IFF_FLAGS_WMM_MODE 0x0008 #define IFF_FLAGS_MULTIBUFLETS 0x0010 -#define IFF_FLAGS_COPYPKT_MODE 0x0020 struct if_fake { @@ -346,6 +402,8 @@ struct if_fake { struct mbuf * iff_pending_tx_packet; boolean_t iff_start_busy; unsigned int iff_max_mtu; + uint32_t iff_fcs; + uint32_t iff_trailer_length; }; typedef struct if_fake * if_fake_ref; @@ -472,6 +530,13 @@ feth_lock_init(void) } #if 0 +static inline void +feth_assert_lock_held(void) +{ + LCK_MTX_ASSERT(feth_lck_mtx, LCK_MTX_ASSERT_OWNED); + return; +} + static inline void feth_assert_lock_not_held(void) { @@ -529,7 +594,7 @@ feth_max_mtu(ifnet_t ifp) static void feth_free(if_fake_ref fakeif) { - assert(fakeif->iff_retain_count == 0); + VERIFY(fakeif->iff_retain_count == 0); if (feth_in_bsd_mode(fakeif)) { if (fakeif->iff_pending_tx_packet) { m_freem(fakeif->iff_pending_tx_packet); @@ -548,7 +613,7 @@ feth_release(if_fake_ref fakeif) old_retain_count = OSDecrementAtomic(&fakeif->iff_retain_count); switch (old_retain_count) { case 0: - assert(old_retain_count != 0); + VERIFY(old_retain_count != 0); break; case 1: feth_free(fakeif); @@ -586,23 +651,24 @@ feth_ifnet_set_attrs(if_fake_ref fakeif, ifnet_t ifp) static void interface_link_event(ifnet_t ifp, u_int32_t event_code) { - struct { - struct kern_event_msg header; - u_int32_t unit; - char if_name[IFNAMSIZ]; - } event; - - bzero(&event, sizeof(event)); - event.header.total_size = sizeof(event); - event.header.vendor_code = KEV_VENDOR_APPLE; - event.header.kev_class = KEV_NETWORK_CLASS; - event.header.kev_subclass = KEV_DL_SUBCLASS; - event.header.event_code = event_code; - event.header.event_data[0] = ifnet_family(ifp); - event.unit = (u_int32_t) ifnet_unit(ifp); - strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ); - ifnet_event(ifp, &event.header); - return; + struct event { + u_int32_t ifnet_family; + u_int32_t unit; + char if_name[IFNAMSIZ]; + }; + _Alignas(struct kern_event_msg) char message[sizeof(struct kern_event_msg) + sizeof(struct event)] = { 0 }; + struct kern_event_msg *header = (struct kern_event_msg*)message; + struct event *data = (struct event *)(header + 1); + + header->total_size = sizeof(message); + header->vendor_code = KEV_VENDOR_APPLE; + header->kev_class = KEV_NETWORK_CLASS; + header->kev_subclass = KEV_DL_SUBCLASS; + header->event_code = event_code; + data->ifnet_family = ifnet_family(ifp); + data->unit = (u_int32_t)ifnet_unit(ifp); + strlcpy(data->if_name, ifnet_name(ifp), IFNAMSIZ); + ifnet_event(ifp, header); } static if_fake_ref @@ -637,6 +703,8 @@ feth_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) fakeif->iff_flags |= IFF_FLAGS_HWCSUM; } fakeif->iff_max_mtu = get_max_mtu(if_fake_bsd_mode, if_fake_max_mtu); + fakeif->iff_fcs = if_fake_fcs; + fakeif->iff_trailer_length = if_fake_trailer_length; /* use the interface name as the unique id for ifp recycle */ if ((unsigned int) @@ -790,9 +858,53 @@ failed: return NULL; } +static int +feth_add_mbuf_trailer(struct mbuf *m, void *trailer, size_t trailer_len) +{ + int ret; + ASSERT(trailer_len <= FETH_TRAILER_LENGTH_MAX); + + ret = m_append(m, trailer_len, (caddr_t)trailer); + if (ret == 1) { + FETH_DPRINTF("%s %zuB trailer added\n", __func__, trailer_len); + return 0; + } + printf("%s m_append failed\n", __func__); + return ENOTSUP; +} + +static int +feth_add_mbuf_fcs(struct mbuf *m) +{ + uint32_t pkt_len, offset = 0; + uint32_t crc = 0; + int err = 0; + + ASSERT(sizeof(crc) == ETHER_CRC_LEN); + + pkt_len = m->m_pkthdr.len; + struct mbuf *iter = m; + while (iter != NULL && offset < pkt_len) { + uint32_t frag_len = iter->m_len; + ASSERT(frag_len <= (pkt_len - offset)); + crc = crc32(crc, mtod(iter, void *), frag_len); + offset += frag_len; + iter = m->m_next; + } + + err = feth_add_mbuf_trailer(m, &crc, ETHER_CRC_LEN); + if (err != 0) { + return err; + } + + m->m_flags |= M_HASFCS; + + return 0; +} + static void feth_output_common(ifnet_t ifp, struct mbuf * m, ifnet_t peer, - iff_flags_t flags) + iff_flags_t flags, bool fcs, void *trailer, size_t trailer_len) { void * frame_header; @@ -807,6 +919,13 @@ feth_output_common(ifnet_t ifp, struct mbuf * m, ifnet_t peer, (void)ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0); bpf_tap_out(ifp, DLT_EN10MB, m, NULL, 0); + if (trailer != 0) { + feth_add_mbuf_trailer(m, trailer, trailer_len); + } + if (fcs) { + feth_add_mbuf_fcs(m); + } + (void)mbuf_pkthdr_setrcvif(m, peer); mbuf_pkthdr_setheader(m, frame_header); mbuf_pkthdr_adjustlen(m, -ETHER_HDR_LEN); @@ -823,7 +942,9 @@ feth_start(ifnet_t ifp) struct mbuf * copy_m = NULL; if_fake_ref fakeif; iff_flags_t flags = 0; - ifnet_t peer = NULL; + bool fcs; + size_t trailer_len; + ifnet_t peer = NULL; struct mbuf * m; struct mbuf * save_m; @@ -842,6 +963,8 @@ feth_start(ifnet_t ifp) peer = fakeif->iff_peer; flags = fakeif->iff_flags; + fcs = fakeif->iff_fcs; + trailer_len = fakeif->iff_trailer_length; /* check for pending TX */ m = fakeif->iff_pending_tx_packet; @@ -862,8 +985,9 @@ feth_start(ifnet_t ifp) save_m = NULL; for (;;) { if (copy_m != NULL) { - assert(peer != NULL); - feth_output_common(ifp, copy_m, peer, flags); + VERIFY(peer != NULL); + feth_output_common(ifp, copy_m, peer, flags, fcs, + feth_trailer, trailer_len); copy_m = NULL; } if (ifnet_dequeue(ifp, &m) != 0) { @@ -904,7 +1028,9 @@ feth_output(ifnet_t ifp, struct mbuf * m) struct mbuf * copy_m; if_fake_ref fakeif; iff_flags_t flags; - ifnet_t peer = NULL; + bool fcs; + size_t trailer_len; + ifnet_t peer = NULL; if (m == NULL) { return 0; @@ -922,6 +1048,8 @@ feth_output(ifnet_t ifp, struct mbuf * m) if (fakeif != NULL) { peer = fakeif->iff_peer; flags = fakeif->iff_flags; + fcs = fakeif->iff_fcs; + trailer_len = fakeif->iff_trailer_length; } feth_unlock(); if (peer == NULL) { @@ -929,7 +1057,8 @@ feth_output(ifnet_t ifp, struct mbuf * m) ifnet_stat_increment_out(ifp, 0, 0, 1); return 0; } - feth_output_common(ifp, copy_m, peer, flags); + feth_output_common(ifp, copy_m, peer, flags, fcs, feth_trailer, + trailer_len); return 0; } @@ -1228,7 +1357,7 @@ feth_ioctl(ifnet_t ifp, u_long cmd, void * data) break; } drv_set_command = TRUE; - /* FALL THROUGH */ + OS_FALLTHROUGH; case SIOCGDRVSPEC32: case SIOCGDRVSPEC64: drv.ifdrvu_p = data; diff --git a/bsd/net/if_gif.c b/bsd/net/if_gif.c index 9f9e6c574..0a0fe86aa 100644 --- a/bsd/net/if_gif.c +++ b/bsd/net/if_gif.c @@ -94,13 +94,11 @@ #include #endif /* INET */ -#if INET6 #include #include #include #include #include -#endif /* INET6 */ #include #include @@ -108,10 +106,6 @@ #include -#if CONFIG_MACF_NET -#include -#endif - #define GIFNAME "gif" #define GIFDEV "if_gif" @@ -146,7 +140,6 @@ static struct protosw in_gif_protosw = .pr_unlock = rip_unlock, }; #endif -#if INET6 static struct ip6protosw in6_gif_protosw = { .pr_type = SOCK_RAW, @@ -156,7 +149,6 @@ static struct ip6protosw in6_gif_protosw = .pr_usrreqs = &rip6_usrreqs, .pr_unlock = rip_unlock, }; -#endif static int gif_remove(struct ifnet *); static int gif_clone_create(struct if_clone *, uint32_t, void *); @@ -386,7 +378,6 @@ gif_clone_create(struct if_clone *ifc, uint32_t unit, __unused void *params) goto done; } #endif -#if INET6 sc->encap_cookie6 = encap_attach_func(AF_INET6, -1, gif_encapcheck, (struct protosw *)&in6_gif_protosw, sc); if (sc->encap_cookie6 == NULL) { @@ -400,14 +391,9 @@ gif_clone_create(struct if_clone *ifc, uint32_t unit, __unused void *params) error = ENOBUFS; goto done; } -#endif sc->gif_called = 0; ifnet_set_mtu(sc->gif_if, GIF_MTU); ifnet_set_flags(sc->gif_if, IFF_POINTOPOINT | IFF_MULTICAST, 0xffff); -#if 0 - /* turn off ingress filter */ - sc->gif_if.if_flags |= IFF_LINK2; -#endif sc->gif_flags |= IFGIF_DETACHING; error = ifnet_attach(sc->gif_if, NULL); if (error != 0) { @@ -424,9 +410,6 @@ gif_clone_create(struct if_clone *ifc, uint32_t unit, __unused void *params) if_clone_softc_deallocate(&gif_cloner, sc); goto done; } -#if CONFIG_MACF_NET - mac_ifnet_label_init(&sc->gif_if); -#endif bpfattach(sc->gif_if, DLT_NULL, sizeof(u_int)); sc->gif_flags &= ~IFGIF_DETACHING; TAILQ_INSERT_TAIL(&gifs, sc, gif_link); @@ -464,9 +447,7 @@ gif_remove(struct ifnet *ifp) ngif--; gif_delete_tunnel(sc); -#ifdef INET6 encap_cookie6 = sc->encap_cookie6; -#endif #ifdef INET encap_cookie4 = sc->encap_cookie4; #endif @@ -546,10 +527,8 @@ gif_encapcheck( case IPPROTO_IPV4: break; #endif -#if INET6 case IPPROTO_IPV6: break; -#endif default: goto done; } @@ -565,14 +544,14 @@ gif_encapcheck( } error = gif_encapcheck4(m, off, proto, arg); #endif -#if INET6 + OS_FALLTHROUGH; case 6: if (sc->gif_psrc->sa_family != AF_INET6 || sc->gif_pdst->sa_family != AF_INET6) { goto done; } error = gif_encapcheck6(m, off, proto, arg); -#endif + OS_FALLTHROUGH; default: goto done; } @@ -631,11 +610,9 @@ gif_output( error = in_gif_output(ifp, sc->gif_proto, m, NULL); break; #endif -#if INET6 case AF_INET6: error = in6_gif_output(ifp, sc->gif_proto, m, NULL); break; -#endif default: error = ENETDOWN; break; @@ -735,10 +712,8 @@ gif_ioctl( #endif /* SIOCSIFMTU */ case SIOCSIFPHYADDR: -#if INET6 case SIOCSIFPHYADDR_IN6_32: case SIOCSIFPHYADDR_IN6_64: -#endif /* INET6 */ switch (cmd) { #if INET case SIOCSIFPHYADDR: @@ -748,7 +723,6 @@ gif_ioctl( &(((struct in_aliasreq *)data)->ifra_dstaddr); break; #endif -#if INET6 case SIOCSIFPHYADDR_IN6_32: { struct in6_aliasreq_32 *ifra_32 = (struct in6_aliasreq_32 *)data; @@ -766,7 +740,6 @@ gif_ioctl( dst = (struct sockaddr *)&ifra_64->ifra_dstaddr; break; } -#endif } /* sa_family must be equal */ @@ -783,13 +756,11 @@ gif_ioctl( } break; #endif -#if INET6 case AF_INET6: if (src->sa_len != sizeof(struct sockaddr_in6)) { return EINVAL; } break; -#endif default: return EAFNOSUPPORT; } @@ -801,13 +772,11 @@ gif_ioctl( } break; #endif -#if INET6 case AF_INET6: if (dst->sa_len != sizeof(struct sockaddr_in6)) { return EINVAL; } break; -#endif default: return EAFNOSUPPORT; } @@ -819,14 +788,12 @@ gif_ioctl( break; } return EAFNOSUPPORT; -#if INET6 case SIOCSIFPHYADDR_IN6_32: case SIOCSIFPHYADDR_IN6_64: if (src->sa_family == AF_INET6) { break; } return EAFNOSUPPORT; -#endif /* INET6 */ } #define GIF_ORDERED_LOCK(sc, sc2) \ @@ -883,11 +850,9 @@ gif_ioctl( /* can't configure multiple multi-dest interfaces */ #define multidest(x) \ (((struct sockaddr_in *)(void *)(x))->sin_addr.s_addr == INADDR_ANY) -#if INET6 #define multidest6(x) \ (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *) \ (void *)(x))->sin6_addr)) -#endif if (dst->sa_family == AF_INET && multidest(dst) && multidest(sc2->gif_pdst)) { GIF_ORDERED_UNLOCK(sc, sc2); @@ -895,7 +860,6 @@ gif_ioctl( ifnet_head_done(); goto bad; } -#if INET6 if (dst->sa_family == AF_INET6 && multidest6(dst) && multidest6(sc2->gif_pdst)) { GIF_ORDERED_UNLOCK(sc, sc2); @@ -903,7 +867,6 @@ gif_ioctl( ifnet_head_done(); goto bad; } -#endif GIF_ORDERED_UNLOCK(sc, sc2); } ifnet_head_done(); @@ -957,9 +920,7 @@ gif_ioctl( #endif case SIOCGIFPSRCADDR: -#if INET6 case SIOCGIFPSRCADDR_IN6: -#endif /* INET6 */ GIF_LOCK(sc); if (sc->gif_psrc == NULL) { GIF_UNLOCK(sc); @@ -974,13 +935,11 @@ gif_ioctl( size = sizeof(ifr->ifr_addr); break; #endif /* INET */ -#if INET6 case SIOCGIFPSRCADDR_IN6: dst = (struct sockaddr *) &(((struct in6_ifreq *)data)->ifr_addr); size = sizeof(((struct in6_ifreq *)data)->ifr_addr); break; -#endif /* INET6 */ default: GIF_UNLOCK(sc); error = EADDRNOTAVAIL; @@ -995,9 +954,7 @@ gif_ioctl( break; case SIOCGIFPDSTADDR: -#if INET6 case SIOCGIFPDSTADDR_IN6: -#endif /* INET6 */ GIF_LOCK(sc); if (sc->gif_pdst == NULL) { GIF_UNLOCK(sc); @@ -1012,13 +969,11 @@ gif_ioctl( size = sizeof(ifr->ifr_addr); break; #endif /* INET */ -#if INET6 case SIOCGIFPDSTADDR_IN6: dst = (struct sockaddr *) &(((struct in6_ifreq *)data)->ifr_addr); size = sizeof(((struct in6_ifreq *)data)->ifr_addr); break; -#endif /* INET6 */ default: error = EADDRNOTAVAIL; GIF_UNLOCK(sc); diff --git a/bsd/net/if_gif.h b/bsd/net/if_gif.h index 19460fcc3..8534c7ac9 100644 --- a/bsd/net/if_gif.h +++ b/bsd/net/if_gif.h @@ -82,9 +82,7 @@ struct gif_softc { #endif union { struct route gifscr_ro; /* xxx */ -#if INET6 struct route_in6 gifscr_ro6; /* xxx */ -#endif } gifsc_gifscr; int gif_flags; #define IFGIF_DETACHING 0x1 @@ -104,9 +102,7 @@ struct gif_softc { LCK_MTX_ASSERT_OWNED) #define gif_ro gifsc_gifscr.gifscr_ro -#if INET6 #define gif_ro6 gifsc_gifscr.gifscr_ro6 -#endif #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/net/if_headless.c b/bsd/net/if_headless.c index 02c935096..212f6208d 100644 --- a/bsd/net/if_headless.c +++ b/bsd/net/if_headless.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * diff --git a/bsd/net/if_ipsec.c b/bsd/net/if_ipsec.c index 0d1af9f9d..eb5499819 100644 --- a/bsd/net/if_ipsec.c +++ b/bsd/net/if_ipsec.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2019 Apple Inc. All rights reserved. + * Copyright (c) 2012-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -62,6 +62,7 @@ extern int net_qos_policy_restricted; extern int net_qos_policy_restrict_avapps; /* Kernel Control functions */ +static errno_t ipsec_ctl_setup(u_int32_t *unit, void **unitinfo); static errno_t ipsec_ctl_bind(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo); static errno_t ipsec_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, @@ -266,11 +267,8 @@ ipsec_flag_isset(struct ipsec_pcb *pcb, uint32_t flag) TAILQ_HEAD(ipsec_list, ipsec_pcb) ipsec_head; -#define IPSEC_PCB_ZONE_MAX 32 -#define IPSEC_PCB_ZONE_NAME "net.if_ipsec" - -static unsigned int ipsec_pcb_size; /* size of zone element */ -static struct zone *ipsec_pcb_zone; /* zone for ipsec_pcb */ +static ZONE_DECLARE(ipsec_pcb_zone, "net.if_ipsec", + sizeof(struct ipsec_pcb), ZC_ZFREE_CLEARMEM); #define IPSECQ_MAXLEN 256 @@ -351,16 +349,7 @@ errno_t ipsec_register_control(void) { struct kern_ctl_reg kern_ctl; - errno_t result = 0; - - ipsec_pcb_size = sizeof(struct ipsec_pcb); - ipsec_pcb_zone = zinit(ipsec_pcb_size, - IPSEC_PCB_ZONE_MAX * ipsec_pcb_size, - 0, IPSEC_PCB_ZONE_NAME); - if (ipsec_pcb_zone == NULL) { - os_log_error(OS_LOG_DEFAULT, "ipsec_register_control - zinit(ipsec_pcb) failed"); - return ENOMEM; - } + errno_t result = 0; #if IPSEC_NEXUS ipsec_register_nexus(); @@ -371,9 +360,10 @@ ipsec_register_control(void) bzero(&kern_ctl, sizeof(kern_ctl)); strlcpy(kern_ctl.ctl_name, IPSEC_CONTROL_NAME, sizeof(kern_ctl.ctl_name)); kern_ctl.ctl_name[sizeof(kern_ctl.ctl_name) - 1] = 0; - kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED; /* Require root */ + kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED | CTL_FLAG_REG_SETUP; /* Require root */ kern_ctl.ctl_sendsize = 64 * 1024; kern_ctl.ctl_recvsize = 64 * 1024; + kern_ctl.ctl_setup = ipsec_ctl_setup; kern_ctl.ctl_bind = ipsec_ctl_bind; kern_ctl.ctl_connect = ipsec_ctl_connect; kern_ctl.ctl_disconnect = ipsec_ctl_disconnect; @@ -896,7 +886,7 @@ ipsec_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, kern_buflet_t tx_buf = kern_packet_get_next_buflet(tx_ph, NULL); VERIFY(tx_buf != NULL); - uint8_t *tx_baddr = kern_buflet_get_object_address(tx_buf); + uint8_t *tx_baddr = kern_buflet_get_data_address(tx_buf); VERIFY(tx_baddr != NULL); tx_baddr += kern_buflet_get_data_offset(tx_buf); @@ -955,7 +945,7 @@ ipsec_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Fillout rx packet kern_buflet_t rx_buf = kern_packet_get_next_buflet(rx_ph, NULL); VERIFY(rx_buf != NULL); - void *rx_baddr = kern_buflet_get_object_address(rx_buf); + void *rx_baddr = kern_buflet_get_data_address(rx_buf); VERIFY(rx_baddr != NULL); // Copy-in data from mbuf to buflet @@ -1097,11 +1087,13 @@ ipsec_netif_ring_fini(kern_nexus_provider_t nxprov, kern_nexus_t nexus, } static bool -ipsec_netif_check_policy(mbuf_t data) +ipsec_netif_check_policy(ifnet_t interface, mbuf_t data) { necp_kernel_policy_result necp_result = 0; necp_kernel_policy_result_parameter necp_result_parameter = {}; uint32_t necp_matched_policy_id = 0; + struct ip_out_args args4 = { }; + struct ip6_out_args args6 = { }; // This packet has been marked with IP level policy, do not mark again. if (data && data->m_pkthdr.necp_mtag.necp_policy_id >= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) { @@ -1117,12 +1109,20 @@ ipsec_netif_check_policy(mbuf_t data) u_int ip_version = ip->ip_v; switch (ip_version) { case 4: { - necp_matched_policy_id = necp_ip_output_find_policy_match(data, 0, NULL, NULL, + if (interface != NULL) { + args4.ipoa_flags |= IPOAF_BOUND_IF; + args4.ipoa_boundif = interface->if_index; + } + necp_matched_policy_id = necp_ip_output_find_policy_match(data, IP_OUTARGS, &args4, NULL, &necp_result, &necp_result_parameter); break; } case 6: { - necp_matched_policy_id = necp_ip6_output_find_policy_match(data, 0, NULL, NULL, + if (interface != NULL) { + args6.ip6oa_flags |= IP6OAF_BOUND_IF; + args6.ip6oa_boundif = interface->if_index; + } + necp_matched_policy_id = necp_ip6_output_find_policy_match(data, IPV6_OUTARGS, &args6, NULL, &necp_result, &necp_result_parameter); break; } @@ -1208,7 +1208,7 @@ ipsec_netif_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, kern_buflet_t tx_buf = kern_packet_get_next_buflet(tx_ph, NULL); VERIFY(tx_buf != NULL); - uint8_t *tx_baddr = kern_buflet_get_object_address(tx_buf); + uint8_t *tx_baddr = kern_buflet_get_data_address(tx_buf); VERIFY(tx_baddr != 0); tx_baddr += kern_buflet_get_data_offset(tx_buf); @@ -1227,7 +1227,7 @@ ipsec_netif_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, necp_mark_packet_from_ip(data, policy_id); // Check policy with NECP - if (!ipsec_netif_check_policy(data)) { + if (!ipsec_netif_check_policy(pcb->ipsec_ifp, data)) { os_log_error(OS_LOG_DEFAULT, "ipsec_netif_sync_tx %s - failed policy check\n", pcb->ipsec_ifp->if_xname); STATS_INC(nifs, NETIF_STATS_DROP); mbuf_freem(data); @@ -1563,7 +1563,7 @@ ipsec_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Fillout rx packet kern_buflet_t rx_buf = kern_packet_get_next_buflet(rx_ph, NULL); VERIFY(rx_buf != NULL); - void *rx_baddr = kern_buflet_get_object_address(rx_buf); + void *rx_baddr = kern_buflet_get_data_address(rx_buf); VERIFY(rx_baddr != NULL); // Copy-in data from mbuf to buflet @@ -1658,7 +1658,7 @@ ipsec_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, kern_buflet_t tx_buf = kern_packet_get_next_buflet(tx_ph, NULL); VERIFY(tx_buf != NULL); - uint8_t *tx_baddr = kern_buflet_get_object_address(tx_buf); + uint8_t *tx_baddr = kern_buflet_get_data_address(tx_buf); VERIFY(tx_baddr != 0); tx_baddr += kern_buflet_get_data_offset(tx_buf); @@ -1772,7 +1772,7 @@ ipsec_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Fillout rx packet kern_buflet_t rx_buf = kern_packet_get_next_buflet(rx_ph, NULL); VERIFY(rx_buf != NULL); - void *rx_baddr = kern_buflet_get_object_address(rx_buf); + void *rx_baddr = kern_buflet_get_data_address(rx_buf); VERIFY(rx_baddr != NULL); // Copy-in data from mbuf to buflet @@ -1953,6 +1953,7 @@ ipsec_nexus_ifattach(struct ipsec_pcb *pcb, net_init.nxneti_eparams = init_params; net_init.nxneti_lladdr = NULL; net_init.nxneti_prepare = ipsec_netif_prepare; + net_init.nxneti_rx_pbufpool = pcb->ipsec_netif_pp; net_init.nxneti_tx_pbufpool = pcb->ipsec_netif_pp; err = kern_nexus_controller_alloc_net_provider_instance(controller, pcb->ipsec_nx.if_provider, @@ -2087,6 +2088,13 @@ ipsec_create_fs_provider_and_instance(struct ipsec_pcb *pcb, uint64_t rx_ring_size = pcb->ipsec_rx_fsw_ring_size; err = kern_nexus_attr_set(attr, NEXUS_ATTR_RX_SLOTS, rx_ring_size); VERIFY(err == 0); + /* + * Configure flowswitch to use super-packet (multi-buflet). + * This allows flowswitch to perform intra-stack packet aggregation. + */ + err = kern_nexus_attr_set(attr, NEXUS_ATTR_MAX_FRAGS, + sk_fsw_rx_agg_tcp ? NX_PBUF_FRAGS_MAX : 1); + VERIFY(err == 0); snprintf((char *)provider_name, sizeof(provider_name), "com.apple.%s.%s", type_name, ifname); @@ -2485,8 +2493,24 @@ done: /* Kernel control functions */ +static inline int +ipsec_find_by_unit(u_int32_t unit) +{ + struct ipsec_pcb *next_pcb = NULL; + int found = 0; + + TAILQ_FOREACH(next_pcb, &ipsec_head, ipsec_chain) { + if (next_pcb->ipsec_unit == unit) { + found = 1; + break; + } + } + + return found; +} + static inline void -ipsec_free_pcb(struct ipsec_pcb *pcb, bool in_list) +ipsec_free_pcb(struct ipsec_pcb *pcb, bool locked) { #if IPSEC_NEXUS mbuf_freem_list(pcb->ipsec_input_chain); @@ -2497,24 +2521,103 @@ ipsec_free_pcb(struct ipsec_pcb *pcb, bool in_list) #endif // IPSEC_NEXUS lck_mtx_destroy(&pcb->ipsec_pcb_data_move_lock, ipsec_lck_grp); lck_rw_destroy(&pcb->ipsec_pcb_lock, ipsec_lck_grp); - if (in_list) { + if (!locked) { lck_mtx_lock(&ipsec_lock); - TAILQ_REMOVE(&ipsec_head, pcb, ipsec_chain); + } + TAILQ_REMOVE(&ipsec_head, pcb, ipsec_chain); + if (!locked) { lck_mtx_unlock(&ipsec_lock); } zfree(ipsec_pcb_zone, pcb); } +static errno_t +ipsec_ctl_setup(u_int32_t *unit, void **unitinfo) +{ + if (unit == NULL || unitinfo == NULL) { + return EINVAL; + } + + lck_mtx_lock(&ipsec_lock); + + /* Find next available unit */ + if (*unit == 0) { + *unit = 1; + while (*unit != ctl_maxunit) { + if (ipsec_find_by_unit(*unit)) { + (*unit)++; + } else { + break; + } + } + if (*unit == ctl_maxunit) { + lck_mtx_unlock(&ipsec_lock); + return EBUSY; + } + } else if (ipsec_find_by_unit(*unit)) { + lck_mtx_unlock(&ipsec_lock); + return EBUSY; + } + + /* Find some open interface id */ + u_int32_t chosen_unique_id = 1; + struct ipsec_pcb *next_pcb = TAILQ_LAST(&ipsec_head, ipsec_list); + if (next_pcb != NULL) { + /* List was not empty, add one to the last item */ + chosen_unique_id = next_pcb->ipsec_unique_id + 1; + next_pcb = NULL; + + /* + * If this wrapped the id number, start looking at + * the front of the list for an unused id. + */ + if (chosen_unique_id == 0) { + /* Find the next unused ID */ + chosen_unique_id = 1; + TAILQ_FOREACH(next_pcb, &ipsec_head, ipsec_chain) { + if (next_pcb->ipsec_unique_id > chosen_unique_id) { + /* We found a gap */ + break; + } + + chosen_unique_id = next_pcb->ipsec_unique_id + 1; + } + } + } + + struct ipsec_pcb *pcb = zalloc_flags(ipsec_pcb_zone, Z_WAITOK | Z_ZERO); + + *unitinfo = pcb; + pcb->ipsec_unit = *unit; + pcb->ipsec_unique_id = chosen_unique_id; + + if (next_pcb != NULL) { + TAILQ_INSERT_BEFORE(next_pcb, pcb, ipsec_chain); + } else { + TAILQ_INSERT_TAIL(&ipsec_head, pcb, ipsec_chain); + } + + lck_mtx_unlock(&ipsec_lock); + + return 0; +} + static errno_t ipsec_ctl_bind(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo) { - struct ipsec_pcb *pcb = zalloc(ipsec_pcb_zone); - memset(pcb, 0, sizeof(*pcb)); + if (*unitinfo == NULL) { + u_int32_t unit = 0; + (void)ipsec_ctl_setup(&unit, unitinfo); + } + + struct ipsec_pcb *pcb = (struct ipsec_pcb *)*unitinfo; + if (pcb == NULL) { + return EINVAL; + } /* Setup the protocol control block */ - *unitinfo = pcb; pcb->ipsec_ctlref = kctlref; pcb->ipsec_unit = sac->sc_unit; pcb->ipsec_output_service_class = MBUF_SC_OAM; @@ -2556,42 +2659,10 @@ ipsec_ctl_connect(kern_ctl_ref kctlref, return EINVAL; } - lck_mtx_lock(&ipsec_lock); - - /* Find some open interface id */ - u_int32_t chosen_unique_id = 1; - struct ipsec_pcb *next_pcb = TAILQ_LAST(&ipsec_head, ipsec_list); - if (next_pcb != NULL) { - /* List was not empty, add one to the last item */ - chosen_unique_id = next_pcb->ipsec_unique_id + 1; - next_pcb = NULL; - - /* - * If this wrapped the id number, start looking at - * the front of the list for an unused id. - */ - if (chosen_unique_id == 0) { - /* Find the next unused ID */ - chosen_unique_id = 1; - TAILQ_FOREACH(next_pcb, &ipsec_head, ipsec_chain) { - if (next_pcb->ipsec_unique_id > chosen_unique_id) { - /* We found a gap */ - break; - } - - chosen_unique_id = next_pcb->ipsec_unique_id + 1; - } - } - } - - pcb->ipsec_unique_id = chosen_unique_id; - - if (next_pcb != NULL) { - TAILQ_INSERT_BEFORE(next_pcb, pcb, ipsec_chain); - } else { - TAILQ_INSERT_TAIL(&ipsec_head, pcb, ipsec_chain); + /* Handle case where ipsec_ctl_setup() was called, but ipsec_ctl_bind() was not */ + if (pcb->ipsec_ctlref == NULL) { + (void)ipsec_ctl_bind(kctlref, sac, unitinfo); } - lck_mtx_unlock(&ipsec_lock); snprintf(pcb->ipsec_if_xname, sizeof(pcb->ipsec_if_xname), "ipsec%d", pcb->ipsec_unit - 1); snprintf(pcb->ipsec_unique_name, sizeof(pcb->ipsec_unique_name), "ipsecid%d", pcb->ipsec_unique_id - 1); @@ -2622,14 +2693,14 @@ ipsec_ctl_connect(kern_ctl_ref kctlref, ipsec_init.del_proto = ipsec_del_proto; ipsec_init.softc = pcb; ipsec_init.ioctl = ipsec_ioctl; - ipsec_init.detach = ipsec_detached; + ipsec_init.free = ipsec_detached; #if IPSEC_NEXUS /* We don't support kpipes without a netif */ if (pcb->ipsec_kpipe_count && !pcb->ipsec_use_netif) { result = ENOTSUP; os_log_error(OS_LOG_DEFAULT, "ipsec_ctl_connect - kpipe requires netif: failed %d\n", result); - ipsec_free_pcb(pcb, true); + ipsec_free_pcb(pcb, false); *unitinfo = NULL; return result; } @@ -2652,7 +2723,7 @@ ipsec_ctl_connect(kern_ctl_ref kctlref, if (result) { os_log_error(OS_LOG_DEFAULT, "%s: %s failed to enable channels\n", __func__, pcb->ipsec_if_xname); - ipsec_free_pcb(pcb, true); + ipsec_free_pcb(pcb, false); *unitinfo = NULL; return result; } @@ -2661,7 +2732,7 @@ ipsec_ctl_connect(kern_ctl_ref kctlref, result = ipsec_nexus_ifattach(pcb, &ipsec_init, &pcb->ipsec_ifp); if (result != 0) { os_log_error(OS_LOG_DEFAULT, "ipsec_ctl_connect - ipsec_nexus_ifattach failed: %d\n", result); - ipsec_free_pcb(pcb, true); + ipsec_free_pcb(pcb, false); *unitinfo = NULL; return result; } @@ -2683,7 +2754,7 @@ ipsec_ctl_connect(kern_ctl_ref kctlref, result = ifnet_allocate_extended(&ipsec_init, &pcb->ipsec_ifp); if (result != 0) { os_log_error(OS_LOG_DEFAULT, "ipsec_ctl_connect - ifnet_allocate failed: %d\n", result); - ipsec_free_pcb(pcb, true); + ipsec_free_pcb(pcb, false); *unitinfo = NULL; return result; } @@ -2694,7 +2765,7 @@ ipsec_ctl_connect(kern_ctl_ref kctlref, if (result != 0) { os_log_error(OS_LOG_DEFAULT, "ipsec_ctl_connect - ifnet_attach failed: %d\n", result); ifnet_release(pcb->ipsec_ifp); - ipsec_free_pcb(pcb, true); + ipsec_free_pcb(pcb, false); *unitinfo = NULL; return result; } @@ -3555,8 +3626,8 @@ ipsec_output(ifnet_t interface, struct route ro; struct route_in6 ro6; int length; - struct ip *ip; - struct ip6_hdr *ip6; + struct ip *ip = NULL; + struct ip6_hdr *ip6 = NULL; struct ip_out_args ipoa; struct ip6_out_args ip6oa; int error = 0; @@ -3573,11 +3644,30 @@ ipsec_output(ifnet_t interface, // Mark the interface so NECP can evaluate tunnel policy necp_mark_packet_from_interface(data, interface); + if (data->m_len < sizeof(*ip)) { + os_log_error(OS_LOG_DEFAULT, "ipsec_output: first mbuf length shorter than IP header length: %d.\n", data->m_len); + IPSEC_STAT_INCREMENT(ipsecstat.out_inval); + error = EINVAL; + goto ipsec_output_err; + } + ip = mtod(data, struct ip *); ip_version = ip->ip_v; switch (ip_version) { case 4: { + u_int8_t ip_hlen = 0; +#ifdef _IP_VHL + ip_hlen = _IP_VHL_HL(ip->ip_vhl) << 2; +#else + ip_hlen = ip->ip_hl << 2; +#endif + if (ip_hlen < sizeof(*ip)) { + os_log_error(OS_LOG_DEFAULT, "ipsec_output: Bad ip header length %d.\n", ip_hlen); + IPSEC_STAT_INCREMENT(ipsecstat.out_inval); + error = EINVAL; + goto ipsec_output_err; + } #if IPSEC_NEXUS if (!pcb->ipsec_use_netif) #endif // IPSEC_NEXUS @@ -3650,6 +3740,12 @@ ipsec_output(ifnet_t interface, goto done; } case 6: { + if (data->m_len < sizeof(*ip6)) { + os_log_error(OS_LOG_DEFAULT, "ipsec_output: first mbuf length shorter than IPv6 header length: %d.\n", data->m_len); + IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); + error = EINVAL; + goto ipsec_output_err; + } #if IPSEC_NEXUS if (!pcb->ipsec_use_netif) #endif // IPSEC_NEXUS @@ -3883,7 +3979,10 @@ ipsec_detached(ifnet_t interface) struct ipsec_pcb *pcb = ifnet_softc(interface); (void)ifnet_release(interface); + lck_mtx_lock(&ipsec_lock); ipsec_free_pcb(pcb, true); + (void)ifnet_dispose(interface); + lck_mtx_unlock(&ipsec_lock); } /* Protocol Handlers */ diff --git a/bsd/net/if_llatbl.c b/bsd/net/if_llatbl.c index 3a4174113..517629702 100644 --- a/bsd/net/if_llatbl.c +++ b/bsd/net/if_llatbl.c @@ -228,7 +228,7 @@ struct prefix_match_data { const struct sockaddr *addr; const struct sockaddr *mask; struct llentries dchain; - u_int flags; + uint16_t flags; }; static int @@ -248,7 +248,7 @@ htable_prefix_free_cb(struct lltable *llt, struct llentry *lle, void *farg) static void htable_prefix_free(struct lltable *llt, const struct sockaddr *addr, - const struct sockaddr *mask, u_int flags) + const struct sockaddr *mask, uint16_t flags) { struct llentry *lle, *next; struct prefix_match_data pmd; @@ -639,7 +639,7 @@ lltable_delete_addr(struct lltable *llt, u_int flags, void lltable_prefix_free(int af, struct sockaddr *addr, struct sockaddr *mask, - u_int flags) + uint16_t flags) { struct lltable *llt; @@ -711,7 +711,7 @@ lltable_foreach_lle(struct lltable *llt, llt_foreach_cb_t *f, void *farg) } struct llentry * -lltable_alloc_entry(struct lltable *llt, u_int flags, +lltable_alloc_entry(struct lltable *llt, uint16_t flags, const struct sockaddr *l3addr) { return llt->llt_alloc_entry(llt, flags, l3addr); @@ -770,7 +770,7 @@ lla_rt_output(struct rt_msghdr *rtm, struct rt_addrinfo *info) struct ifnet *ifp; struct lltable *llt; struct llentry *lle, *lle_tmp; - u_int laflags = 0; + uint16_t laflags = 0; int error; KASSERT(dl != NULL && dl->sdl_family == AF_LINK, diff --git a/bsd/net/if_llatbl.h b/bsd/net/if_llatbl.h index 67755dc29..4968ca76b 100644 --- a/bsd/net/if_llatbl.h +++ b/bsd/net/if_llatbl.h @@ -171,18 +171,18 @@ extern lck_attr_t *lle_lock_attr; LLE_FREE_LOCKED(lle); \ } while (0) -typedef struct llentry *(llt_lookup_t)(struct lltable *, u_int flags, +typedef struct llentry *(llt_lookup_t)(struct lltable *, uint16_t flags, const struct sockaddr *l3addr); -typedef struct llentry *(llt_alloc_t)(struct lltable *, u_int flags, +typedef struct llentry *(llt_alloc_t)(struct lltable *, uint16_t flags, const struct sockaddr *l3addr); typedef void (llt_delete_t)(struct lltable *, struct llentry *); typedef void (llt_prefix_free_t)(struct lltable *, - const struct sockaddr *addr, const struct sockaddr *mask, u_int flags); + const struct sockaddr *addr, const struct sockaddr *mask, uint16_t flags); typedef int (llt_dump_entry_t)(struct lltable *, struct llentry *, struct sysctl_req *); typedef uint32_t (llt_hash_t)(const struct llentry *, uint32_t); typedef int (llt_match_prefix_t)(const struct sockaddr *, - const struct sockaddr *, u_int, struct llentry *); + const struct sockaddr *, uint16_t, struct llentry *); typedef void (llt_free_entry_t)(struct lltable *, struct llentry *); typedef void (llt_fill_sa_entry_t)(const struct llentry *, struct sockaddr *); typedef void (llt_free_tbl_t)(struct lltable *); @@ -246,7 +246,7 @@ struct lltable *lltable_allocate_htbl(uint32_t hsize); void lltable_free(struct lltable *); void lltable_link(struct lltable *llt); void lltable_prefix_free(int, struct sockaddr *, - struct sockaddr *, u_int); + struct sockaddr *, uint16_t); #if 0 void lltable_drain(int); #endif @@ -266,7 +266,7 @@ int lltable_try_set_entry_addr(struct ifnet *ifp, struct llentry *lle, int lltable_calc_llheader(struct ifnet *ifp, int family, char *lladdr, char *buf, size_t *bufsize, int *lladdr_off); void lltable_update_ifaddr(struct lltable *llt); -struct llentry *lltable_alloc_entry(struct lltable *llt, u_int flags, +struct llentry *lltable_alloc_entry(struct lltable *llt, uint16_t flags, const struct sockaddr *l4addr); void lltable_free_entry(struct lltable *llt, struct llentry *lle); int lltable_delete_addr(struct lltable *llt, u_int flags, @@ -283,7 +283,7 @@ int lltable_foreach_lle(struct lltable *llt, llt_foreach_cb_t *f, * Generic link layer address lookup function. */ static __inline struct llentry * -lla_lookup(struct lltable *llt, u_int flags, const struct sockaddr *l3addr) +lla_lookup(struct lltable *llt, uint16_t flags, const struct sockaddr *l3addr) { return llt->llt_lookup(llt, flags, l3addr); } diff --git a/bsd/net/if_llreach.c b/bsd/net/if_llreach.c index 91f6435d2..16314876d 100644 --- a/bsd/net/if_llreach.c +++ b/bsd/net/if_llreach.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2012 Apple Inc. All rights reserved. + * Copyright (c) 2011-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -131,18 +131,13 @@ #include #include -#if INET6 #include #include -#endif /* INET6 */ -static unsigned int iflr_size; /* size of if_llreach */ -static struct zone *iflr_zone; /* zone for if_llreach */ +static ZONE_DECLARE(iflr_zone, "if_llreach", sizeof(struct if_llreach), + ZC_ZFREE_CLEARMEM); -#define IFLR_ZONE_MAX 128 /* maximum elements in zone */ -#define IFLR_ZONE_NAME "if_llreach" /* zone name */ - -static struct if_llreach *iflr_alloc(int); +static struct if_llreach *iflr_alloc(zalloc_flags_t); static void iflr_free(struct if_llreach *); static __inline int iflr_cmp(const struct if_llreach *, const struct if_llreach *); @@ -161,29 +156,7 @@ SYSCTL_NODE(_net_link_generic_system, OID_AUTO, llreach_info, /* * Link-layer reachability is based off node constants in RFC4861. */ -#if INET6 #define LL_COMPUTE_RTIME(x) ND_COMPUTE_RTIME(x) -#else -#define LL_MIN_RANDOM_FACTOR 512 /* 1024 * 0.5 */ -#define LL_MAX_RANDOM_FACTOR 1536 /* 1024 * 1.5 */ -#define LL_COMPUTE_RTIME(x) \ - (((LL_MIN_RANDOM_FACTOR * (x >> 10)) + (RandomULong() & \ - ((LL_MAX_RANDOM_FACTOR - LL_MIN_RANDOM_FACTOR) * (x >> 10)))) / 1000) -#endif /* !INET6 */ - -void -ifnet_llreach_init(void) -{ - iflr_size = sizeof(struct if_llreach); - iflr_zone = zinit(iflr_size, - IFLR_ZONE_MAX * iflr_size, 0, IFLR_ZONE_NAME); - if (iflr_zone == NULL) { - panic("%s: failed allocating %s", __func__, IFLR_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(iflr_zone, Z_EXPAND, TRUE); - zone_change(iflr_zone, Z_CALLERACCT, FALSE); -} void ifnet_llreach_ifattach(struct ifnet *ifp, boolean_t reuse) @@ -291,7 +264,7 @@ ifnet_llreach_set_reachable(struct ifnet *ifp, u_int16_t llproto, void *addr, struct if_llreach * ifnet_llreach_alloc(struct ifnet *ifp, u_int16_t llproto, void *addr, - unsigned int alen, u_int64_t llreach_base) + unsigned int alen, u_int32_t llreach_base) { struct if_llreach find, *lr; struct timeval cnow; @@ -332,11 +305,8 @@ found: goto found; } - lr = iflr_alloc(M_WAITOK); - if (lr == NULL) { - lck_rw_done(&ifp->if_llreach_lock); - return NULL; - } + lr = iflr_alloc(Z_WAITOK); + IFLR_LOCK(lr); lr->lr_reqcnt++; VERIFY(lr->lr_reqcnt == 1); @@ -430,7 +400,7 @@ ifnet_llreach_up2upexp(struct if_llreach *lr, u_int64_t uptime) } int -ifnet_llreach_get_defrouter(struct ifnet *ifp, int af, +ifnet_llreach_get_defrouter(struct ifnet *ifp, sa_family_t af, struct ifnet_llreach_info *iflri) { struct radix_node_head *rnh; @@ -481,13 +451,11 @@ ifnet_llreach_get_defrouter(struct ifnet *ifp, int af, } static struct if_llreach * -iflr_alloc(int how) +iflr_alloc(zalloc_flags_t how) { - struct if_llreach *lr; + struct if_llreach *lr = zalloc_flags(iflr_zone, how | Z_ZERO); - lr = (how == M_WAITOK) ? zalloc(iflr_zone) : zalloc_noblock(iflr_zone); - if (lr != NULL) { - bzero(lr, iflr_size); + if (lr) { lck_mtx_init(&lr->lr_lock, ifnet_lock_group, ifnet_lock_attr); lr->lr_debug |= IFD_ALLOC; } diff --git a/bsd/net/if_llreach.h b/bsd/net/if_llreach.h index 42c9f6ddd..61d4acd28 100644 --- a/bsd/net/if_llreach.h +++ b/bsd/net/if_llreach.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2014 Apple Inc. All rights reserved. + * Copyright (c) 2011-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -61,10 +61,8 @@ struct if_llreach_info { #include #include #include -#if INET6 #include #include -#endif /* INET6 */ /* * Per-interface link-layer reachability. (Currently only for ARP/NDP/Ethernet.) @@ -127,11 +125,10 @@ RB_PROTOTYPE_SC_PREV(__private_extern__, ll_reach_tree, if_llreach, struct ifnet_llreach_info; /* forward declaration */ -extern void ifnet_llreach_init(void); extern void ifnet_llreach_ifattach(struct ifnet *, boolean_t); extern void ifnet_llreach_ifdetach(struct ifnet *); extern struct if_llreach *ifnet_llreach_alloc(struct ifnet *, u_int16_t, void *, - unsigned int, u_int64_t); + unsigned int, u_int32_t); extern void ifnet_llreach_free(struct if_llreach *); extern int ifnet_llreach_reachable(struct if_llreach *); extern int ifnet_llreach_reachable_delta(struct if_llreach *, u_int64_t); @@ -139,7 +136,7 @@ extern void ifnet_llreach_set_reachable(struct ifnet *, u_int16_t, void *, unsigned int); extern u_int64_t ifnet_llreach_up2calexp(struct if_llreach *, u_int64_t); extern u_int64_t ifnet_llreach_up2upexp(struct if_llreach *, u_int64_t); -extern int ifnet_llreach_get_defrouter(struct ifnet *, int, +extern int ifnet_llreach_get_defrouter(struct ifnet *, sa_family_t, struct ifnet_llreach_info *); extern void ifnet_lr2ri(struct if_llreach *, struct rt_reach_info *); extern void ifnet_lr2iflri(struct if_llreach *, struct ifnet_llreach_info *); diff --git a/bsd/net/if_loop.c b/bsd/net/if_loop.c index b76714440..ac60da58e 100644 --- a/bsd/net/if_loop.c +++ b/bsd/net/if_loop.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -97,21 +97,15 @@ #include #endif -#if INET6 #if !INET #include #endif #include #include -#endif #include #include -#if CONFIG_MACF_NET -#include -#endif - #include #define LOMTU 16384 @@ -296,7 +290,6 @@ lo_output(struct ifnet *ifp, struct mbuf *m_list) for (m = m_list; m; m = m->m_nextpkt) { VERIFY(m->m_flags & M_PKTHDR); cnt++; - len += m->m_pkthdr.len; /* * Don't overwrite the rcvif field if it is in use. @@ -317,6 +310,7 @@ lo_output(struct ifnet *ifp, struct mbuf *m_list) CSUM_IP_CHECKED | CSUM_IP_VALID; m_adj(m, sizeof(struct loopback_header)); + len += m->m_pkthdr.len; LO_BPF_TAP_OUT(m); if (m->m_nextpkt == NULL) { @@ -545,10 +539,8 @@ lo_ioctl(struct ifnet *ifp, u_long cmd, void *data) case AF_INET: break; #endif -#if INET6 case AF_INET6: break; -#endif default: error = EAFNOSUPPORT; @@ -698,10 +690,6 @@ loopattach(void) ifnet_set_hdrlen(lo_ifp, sizeof(struct loopback_header)); ifnet_set_eflags(lo_ifp, IFEF_SENDLIST, IFEF_SENDLIST); -#if CONFIG_MACF_NET - mac_ifnet_label_init(ifp); -#endif - result = ifnet_attach(lo_ifp, NULL); if (result != 0) { panic("%s: couldn't attach loopback ifnet (%d)\n", @@ -712,8 +700,8 @@ loopattach(void) * Disable ECN on loopback as ECN serves no purpose and otherwise * TCP connections are subject to heuristics like SYN retransmits on RST */ - lo_ifp->if_eflags &= ~IFEF_ECN_ENABLE; - lo_ifp->if_eflags |= IFEF_ECN_DISABLE; + if_clear_eflags(lo_ifp, IFEF_ECN_ENABLE); + if_set_eflags(lo_ifp, IFEF_ECN_DISABLE); bpfattach(lo_ifp, DLT_NULL, sizeof(u_int32_t)); } diff --git a/bsd/net/if_low_power_mode.c b/bsd/net/if_low_power_mode.c index b93387b15..ff371c5ce 100644 --- a/bsd/net/if_low_power_mode.c +++ b/bsd/net/if_low_power_mode.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Apple Inc. All rights reserved. + * Copyright (c) 2018-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -103,13 +103,11 @@ if_low_power_evhdlr_callback(__unused struct eventhandler_entry_arg arg, if_name(ifp), event_code); } - ifnet_lock_exclusive(ifp); if (event_code == IF_LOW_POWER_EVENT_OFF) { - ifp->if_xflags &= ~IFXF_LOW_POWER; + if_clear_xflags(ifp, IFXF_LOW_POWER); } else { - ifp->if_xflags |= IFXF_LOW_POWER; + if_set_xflags(ifp, IFXF_LOW_POWER); } - ifnet_lock_done(ifp); if (event_code == IF_LOW_POWER_EVENT_ON) { atomic_add_32(&ifp->if_low_power_gencnt, 1); @@ -188,10 +186,10 @@ if_set_low_power(ifnet_t ifp, bool on) os_log(OS_LOG_DEFAULT, "%s: ifp %s low_power mode %d", __func__, if_name(ifp), on); - ifnet_lock_exclusive(ifp); - ifp->if_xflags = on ? (ifp->if_xflags | IFXF_LOW_POWER) : - (ifp->if_xflags & ~IFXF_LOW_POWER); - ifnet_lock_done(ifp); - + if (on) { + if_set_xflags(ifp, IFXF_LOW_POWER); + } else { + if_clear_xflags(ifp, IFXF_LOW_POWER); + } return error; } diff --git a/bsd/net/if_media.h b/bsd/net/if_media.h index f1ee7273a..3a11ad373 100644 --- a/bsd/net/if_media.h +++ b/bsd/net/if_media.h @@ -64,7 +64,9 @@ #ifndef _NET_IF_MEDIA_H_ #define _NET_IF_MEDIA_H_ +#ifndef DRIVERKIT #include +#endif /* DRIVERKIT */ /* * Prototypes and definitions for BSD/OS-compatible network interface diff --git a/bsd/net/if_mib.c b/bsd/net/if_mib.c index 93bd59ce7..a3ca1b89e 100644 --- a/bsd/net/if_mib.c +++ b/bsd/net/if_mib.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -135,7 +135,7 @@ make_ifmibdata(struct ifnet *ifp, int *name, struct sysctl_req *req) #undef COPY ifmd.ifmd_snd_len = IFCQ_LEN(&ifp->if_snd); ifmd.ifmd_snd_maxlen = IFCQ_MAXLEN(&ifp->if_snd); - ifmd.ifmd_snd_drops = ifp->if_snd.ifcq_dropcnt.packets; + ifmd.ifmd_snd_drops = (unsigned int)ifp->if_snd.ifcq_dropcnt.packets; } error = SYSCTL_OUT(req, &ifmd, sizeof ifmd); if (error || !req->newptr) { diff --git a/bsd/net/if_pflog.c b/bsd/net/if_pflog.c index 9747bb37a..e1c0c84ca 100644 --- a/bsd/net/if_pflog.c +++ b/bsd/net/if_pflog.c @@ -85,12 +85,10 @@ #include #endif -#if INET6 #if !INET #include #endif #include -#endif /* INET6 */ #include #include diff --git a/bsd/net/if_ports_used.c b/bsd/net/if_ports_used.c index 29b05770f..586b2ed10 100644 --- a/bsd/net/if_ports_used.c +++ b/bsd/net/if_ports_used.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 Apple Inc. All rights reserved. + * Copyright (c) 2017-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -149,10 +149,8 @@ struct net_port_entry { struct net_port_info npe_npi; }; -static struct zone *net_port_entry_zone = NULL; - -#define NET_PORT_ENTRY_ZONE_MAX 128 -#define NET_PORT_ENTRY_ZONE_NAME "net_port_entry" +static ZONE_DECLARE(net_port_entry_zone, "net_port_entry", + sizeof(struct net_port_entry), ZC_NONE); static SLIST_HEAD(net_port_entry_list, net_port_entry) net_port_entry_list = SLIST_HEAD_INITIALIZER(&net_port_entry_list); @@ -183,16 +181,6 @@ if_ports_used_init(void) lck_attributes); net_port_entry_count = 0; - net_port_entry_zone = zinit(sizeof(struct net_port_entry), - NET_PORT_ENTRY_ZONE_MAX * sizeof(struct net_port_entry), - 0, NET_PORT_ENTRY_ZONE_NAME); - if (net_port_entry_zone == NULL) { - panic("%s: zinit(%s) failed", __func__, - NET_PORT_ENTRY_ZONE_NAME); - } - zone_change(net_port_entry_zone, Z_EXPAND, TRUE); - zone_change(net_port_entry_zone, Z_CALLERACCT, FALSE); - if_ports_used_inited = 1; lck_attr_free(lck_attributes); @@ -361,7 +349,7 @@ net_port_info_add_entry(const struct net_port_info *npi) if (__improbable(is_wakeuuid_set() == false)) { if (if_ports_used_verbose > 0) { - log(LOG_ERR, "%s: wakeuuid not set %u not adding " + log(LOG_ERR, "%s: wakeuuid not set not adding " "port: %u flags: 0x%xif: %u pid: %u epid %u\n", __func__, ntohs(npi->npi_local_port), @@ -509,7 +497,7 @@ sysctl_wakeuuid_not_set_last_time SYSCTL_HANDLER_ARGS } else { struct user32_timeval tv = {}; - tv.tv_sec = wakeuuid_not_set_last_time.tv_sec; + tv.tv_sec = (user32_time_t)wakeuuid_not_set_last_time.tv_sec; tv.tv_usec = wakeuuid_not_set_last_time.tv_usec; return SYSCTL_OUT(req, &tv, sizeof(tv)); } @@ -656,11 +644,16 @@ if_ports_used_add_inpcb(const uint32_t ifindex, const struct inpcb *inp) bzero(&npi, sizeof(struct net_port_info)); - npi.npi_if_index = ifindex; + /* This is unlikely to happen but better be safe than sorry */ + if (ifindex > UINT16_MAX) { + os_log(OS_LOG_DEFAULT, "%s: ifindex %u too big\n", __func__, ifindex); + return; + } + npi.npi_if_index = (uint16_t)ifindex; npi.npi_flags |= NPIF_SOCKET; - npi.npi_timestamp.tv_sec = wakeuiid_last_check.tv_sec; + npi.npi_timestamp.tv_sec = (int32_t)wakeuiid_last_check.tv_sec; npi.npi_timestamp.tv_usec = wakeuiid_last_check.tv_usec; if (SOCK_PROTO(so) == IPPROTO_TCP) { @@ -700,6 +693,14 @@ if_ports_used_add_inpcb(const uint32_t ifindex, const struct inpcb *inp) &inp->in6p_laddr, sizeof(struct in6_addr)); memcpy(&npi.npi_foreign_addr_in6, &inp->in6p_faddr, sizeof(struct in6_addr)); + + /* Clear the embedded scope ID */ + if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_local_addr_in6)) { + npi.npi_local_addr_in6.s6_addr16[1] = 0; + } + if (IN6_IS_ADDR_LINKLOCAL(&npi.npi_foreign_addr_in6)) { + npi.npi_foreign_addr_in6.s6_addr16[1] = 0; + } } npi.npi_owner_pid = so->last_pid; diff --git a/bsd/net/if_stf.c b/bsd/net/if_stf.c index 5ceab5438..8695b9ae7 100644 --- a/bsd/net/if_stf.c +++ b/bsd/net/if_stf.c @@ -146,10 +146,6 @@ #include -#if CONFIG_MACF_NET -#include -#endif - #define GET_V4(x) ((const struct in_addr *)(const void *)(&(x)->s6_addr16[1])) static lck_grp_t *stf_mtx_grp; @@ -372,10 +368,6 @@ stfattach(void) ifnet_set_flags(sc->sc_if, IFF_LINK2, IFF_LINK2); #endif -#if CONFIG_MACF_NET - mac_ifnet_label_init(&sc->sc_if); -#endif - error = ifnet_attach(sc->sc_if, NULL); if (error != 0) { printf("stfattach: ifnet_attach returned error=%d\n", error); @@ -792,10 +784,6 @@ in_stf_input( ifp = sc->sc_if; -#if MAC_LABEL - mac_mbuf_label_associate_ifnet(ifp, m); -#endif - /* * perform sanity check against outer src/dst. * for source, perform ingress filter as well. diff --git a/bsd/net/if_utun.c b/bsd/net/if_utun.c index dd076948f..e98d9ec87 100644 --- a/bsd/net/if_utun.c +++ b/bsd/net/if_utun.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2019 Apple Inc. All rights reserved. + * Copyright (c) 2008-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -127,6 +127,7 @@ struct utun_pcb { }; /* Kernel Control functions */ +static errno_t utun_ctl_setup(u_int32_t *unit, void **unitinfo); static errno_t utun_ctl_bind(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo); static errno_t utun_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, @@ -251,11 +252,8 @@ static lck_mtx_t utun_lock; TAILQ_HEAD(utun_list, utun_pcb) utun_head; -#define UTUN_PCB_ZONE_MAX 32 -#define UTUN_PCB_ZONE_NAME "net.if_utun" - -static unsigned int utun_pcb_size; /* size of zone element */ -static struct zone *utun_pcb_zone; /* zone for utun_pcb */ +static ZONE_DECLARE(utun_pcb_zone, "net.if_utun", + sizeof(struct utun_pcb), ZC_ZFREE_CLEARMEM); #if UTUN_NEXUS @@ -413,7 +411,7 @@ utun_netif_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, VERIFY(tx_buf != NULL); /* tx_baddr is the absolute buffer address */ - uint8_t *tx_baddr = kern_buflet_get_object_address(tx_buf); + uint8_t *tx_baddr = kern_buflet_get_data_address(tx_buf); VERIFY(tx_baddr != 0); bpf_tap_packet_out(pcb->utun_ifp, DLT_RAW, tx_ph, NULL, 0); @@ -651,7 +649,7 @@ utun_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Fillout rx packet kern_buflet_t rx_buf = kern_packet_get_next_buflet(rx_ph, NULL); VERIFY(rx_buf != NULL); - void *rx_baddr = kern_buflet_get_object_address(rx_buf); + void *rx_baddr = kern_buflet_get_data_address(rx_buf); VERIFY(rx_baddr != NULL); // Copy-in data from mbuf to buflet @@ -737,7 +735,7 @@ utun_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, kern_buflet_t tx_buf = kern_packet_get_next_buflet(tx_ph, NULL); VERIFY(tx_buf != NULL); - uint8_t *tx_baddr = kern_buflet_get_object_address(tx_buf); + uint8_t *tx_baddr = kern_buflet_get_data_address(tx_buf); VERIFY(tx_baddr != 0); tx_baddr += kern_buflet_get_data_offset(tx_buf); @@ -763,7 +761,7 @@ utun_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Fillout rx packet kern_buflet_t rx_buf = kern_packet_get_next_buflet(rx_ph, NULL); VERIFY(rx_buf != NULL); - void *rx_baddr = kern_buflet_get_object_address(rx_buf); + void *rx_baddr = kern_buflet_get_data_address(rx_buf); VERIFY(rx_baddr != NULL); // Copy-in data from tx to rx @@ -906,6 +904,7 @@ utun_nexus_ifattach(struct utun_pcb *pcb, net_init.nxneti_eparams = init_params; net_init.nxneti_lladdr = NULL; net_init.nxneti_prepare = utun_netif_prepare; + net_init.nxneti_rx_pbufpool = pcb->utun_netif_pp; net_init.nxneti_tx_pbufpool = pcb->utun_netif_pp; err = kern_nexus_controller_alloc_net_provider_instance(controller, pcb->utun_nx.if_provider, @@ -1037,6 +1036,13 @@ utun_create_fs_provider_and_instance(struct utun_pcb *pcb, uint64_t rx_ring_size = pcb->utun_rx_fsw_ring_size; err = kern_nexus_attr_set(attr, NEXUS_ATTR_RX_SLOTS, rx_ring_size); VERIFY(err == 0); + /* + * Configure flowswitch to use super-packet (multi-buflet). + * This allows flowswitch to perform intra-stack packet aggregation. + */ + err = kern_nexus_attr_set(attr, NEXUS_ATTR_MAX_FRAGS, + sk_fsw_rx_agg_tcp ? NX_PBUF_FRAGS_MAX : 1); + VERIFY(err == 0); snprintf((char *)provider_name, sizeof(provider_name), "com.apple.%s.%s", type_name, ifname); @@ -1391,15 +1397,6 @@ utun_register_control(void) struct kern_ctl_reg kern_ctl; errno_t result = 0; - utun_pcb_size = sizeof(struct utun_pcb); - utun_pcb_zone = zinit(utun_pcb_size, - UTUN_PCB_ZONE_MAX * utun_pcb_size, - 0, UTUN_PCB_ZONE_NAME); - if (utun_pcb_zone == NULL) { - os_log_error(OS_LOG_DEFAULT, "utun_register_control - zinit(utun_pcb) failed"); - return ENOMEM; - } - #if UTUN_NEXUS utun_register_nexus(); #endif // UTUN_NEXUS @@ -1409,9 +1406,10 @@ utun_register_control(void) bzero(&kern_ctl, sizeof(kern_ctl)); strlcpy(kern_ctl.ctl_name, UTUN_CONTROL_NAME, sizeof(kern_ctl.ctl_name)); kern_ctl.ctl_name[sizeof(kern_ctl.ctl_name) - 1] = 0; - kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED | CTL_FLAG_REG_EXTENDED; /* Require root */ + kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED | CTL_FLAG_REG_SETUP | CTL_FLAG_REG_EXTENDED; /* Require root */ kern_ctl.ctl_sendsize = 512 * 1024; kern_ctl.ctl_recvsize = 512 * 1024; + kern_ctl.ctl_setup = utun_ctl_setup; kern_ctl.ctl_bind = utun_ctl_bind; kern_ctl.ctl_connect = utun_ctl_connect; kern_ctl.ctl_disconnect = utun_ctl_disconnect; @@ -1456,32 +1454,127 @@ utun_register_control(void) /* Kernel control functions */ +static inline int +utun_find_by_unit(u_int32_t unit) +{ + struct utun_pcb *next_pcb = NULL; + int found = 0; + + TAILQ_FOREACH(next_pcb, &utun_head, utun_chain) { + if (next_pcb->utun_unit == unit) { + found = 1; + break; + } + } + + return found; +} + static inline void -utun_free_pcb(struct utun_pcb *pcb, bool in_list) +utun_free_pcb(struct utun_pcb *pcb, bool locked) { -#ifdef UTUN_NEXUS +#if UTUN_NEXUS mbuf_freem_list(pcb->utun_input_chain); pcb->utun_input_chain_count = 0; lck_mtx_destroy(&pcb->utun_input_chain_lock, utun_lck_grp); #endif // UTUN_NEXUS lck_rw_destroy(&pcb->utun_pcb_lock, utun_lck_grp); - if (in_list) { + if (!locked) { lck_mtx_lock(&utun_lock); - TAILQ_REMOVE(&utun_head, pcb, utun_chain); + } + TAILQ_REMOVE(&utun_head, pcb, utun_chain); + if (!locked) { lck_mtx_unlock(&utun_lock); } zfree(utun_pcb_zone, pcb); } +static errno_t +utun_ctl_setup(u_int32_t *unit, void **unitinfo) +{ + if (unit == NULL || unitinfo == NULL) { + return EINVAL; + } + + lck_mtx_lock(&utun_lock); + + /* Find next available unit */ + if (*unit == 0) { + *unit = 1; + while (*unit != ctl_maxunit) { + if (utun_find_by_unit(*unit)) { + (*unit)++; + } else { + break; + } + } + if (*unit == ctl_maxunit) { + lck_mtx_unlock(&utun_lock); + return EBUSY; + } + } else if (utun_find_by_unit(*unit)) { + lck_mtx_unlock(&utun_lock); + return EBUSY; + } + + /* Find some open interface id */ + u_int32_t chosen_unique_id = 1; + struct utun_pcb *next_pcb = TAILQ_LAST(&utun_head, utun_list); + if (next_pcb != NULL) { + /* List was not empty, add one to the last item */ + chosen_unique_id = next_pcb->utun_unique_id + 1; + next_pcb = NULL; + + /* + * If this wrapped the id number, start looking at + * the front of the list for an unused id. + */ + if (chosen_unique_id == 0) { + /* Find the next unused ID */ + chosen_unique_id = 1; + TAILQ_FOREACH(next_pcb, &utun_head, utun_chain) { + if (next_pcb->utun_unique_id > chosen_unique_id) { + /* We found a gap */ + break; + } + + chosen_unique_id = next_pcb->utun_unique_id + 1; + } + } + } + + struct utun_pcb *pcb = zalloc_flags(utun_pcb_zone, Z_WAITOK | Z_ZERO); + + *unitinfo = pcb; + pcb->utun_unit = *unit; + pcb->utun_unique_id = chosen_unique_id; + + if (next_pcb != NULL) { + TAILQ_INSERT_BEFORE(next_pcb, pcb, utun_chain); + } else { + TAILQ_INSERT_TAIL(&utun_head, pcb, utun_chain); + } + + lck_mtx_unlock(&utun_lock); + + return 0; +} + static errno_t utun_ctl_bind(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo) { - struct utun_pcb *pcb = zalloc(utun_pcb_zone); - memset(pcb, 0, sizeof(*pcb)); + if (*unitinfo == NULL) { + u_int32_t unit = 0; + (void)utun_ctl_setup(&unit, unitinfo); + } + + struct utun_pcb *pcb = (struct utun_pcb *)*unitinfo; + if (pcb == NULL) { + return EINVAL; + } - *unitinfo = pcb; pcb->utun_ctlref = kctlref; pcb->utun_unit = sac->sc_unit; pcb->utun_max_pending_packets = 1; @@ -1516,47 +1609,17 @@ utun_ctl_connect(kern_ctl_ref kctlref, } struct utun_pcb *pcb = *unitinfo; - - lck_mtx_lock(&utun_lock); - - /* Find some open interface id */ - u_int32_t chosen_unique_id = 1; - struct utun_pcb *next_pcb = TAILQ_LAST(&utun_head, utun_list); - if (next_pcb != NULL) { - /* List was not empty, add one to the last item */ - chosen_unique_id = next_pcb->utun_unique_id + 1; - next_pcb = NULL; - - /* - * If this wrapped the id number, start looking at - * the front of the list for an unused id. - */ - if (chosen_unique_id == 0) { - /* Find the next unused ID */ - chosen_unique_id = 1; - TAILQ_FOREACH(next_pcb, &utun_head, utun_chain) { - if (next_pcb->utun_unique_id > chosen_unique_id) { - /* We found a gap */ - break; - } - - chosen_unique_id = next_pcb->utun_unique_id + 1; - } - } + if (pcb == NULL) { + return EINVAL; } - pcb->utun_unique_id = chosen_unique_id; - - if (next_pcb != NULL) { - TAILQ_INSERT_BEFORE(next_pcb, pcb, utun_chain); - } else { - TAILQ_INSERT_TAIL(&utun_head, pcb, utun_chain); + /* Handle case where utun_ctl_setup() was called, but ipsec_ctl_bind() was not */ + if (pcb->utun_ctlref == NULL) { + (void)utun_ctl_bind(kctlref, sac, unitinfo); } - lck_mtx_unlock(&utun_lock); snprintf(pcb->utun_if_xname, sizeof(pcb->utun_if_xname), "utun%d", pcb->utun_unit - 1); snprintf(pcb->utun_unique_name, sizeof(pcb->utun_unique_name), "utunid%d", pcb->utun_unique_id - 1); - os_log(OS_LOG_DEFAULT, "utun_ctl_connect: creating interface %s (id %s)\n", pcb->utun_if_xname, pcb->utun_unique_name); /* Create the interface */ bzero(&utun_init, sizeof(utun_init)); @@ -1585,14 +1648,14 @@ utun_ctl_connect(kern_ctl_ref kctlref, utun_init.del_proto = utun_del_proto; utun_init.softc = pcb; utun_init.ioctl = utun_ioctl; - utun_init.detach = utun_detached; + utun_init.free = utun_detached; #if UTUN_NEXUS if (pcb->utun_use_netif) { result = utun_nexus_ifattach(pcb, &utun_init, &pcb->utun_ifp); if (result != 0) { os_log_error(OS_LOG_DEFAULT, "utun_ctl_connect - utun_nexus_ifattach failed: %d\n", result); - utun_free_pcb(pcb, true); + utun_free_pcb(pcb, false); *unitinfo = NULL; return result; } @@ -1601,6 +1664,8 @@ utun_ctl_connect(kern_ctl_ref kctlref, result = utun_flowswitch_attach(pcb); if (result != 0) { os_log_error(OS_LOG_DEFAULT, "utun_ctl_connect - utun_flowswitch_attach failed: %d\n", result); + // Do not call utun_free_pcb(). We will be attached already, and will be freed later + // in utun_detached(). *unitinfo = NULL; return result; } @@ -1618,7 +1683,7 @@ utun_ctl_connect(kern_ctl_ref kctlref, result = ifnet_allocate_extended(&utun_init, &pcb->utun_ifp); if (result != 0) { os_log_error(OS_LOG_DEFAULT, "utun_ctl_connect - ifnet_allocate failed: %d\n", result); - utun_free_pcb(pcb, true); + utun_free_pcb(pcb, false); *unitinfo = NULL; return result; } @@ -1643,7 +1708,7 @@ utun_ctl_connect(kern_ctl_ref kctlref, os_log_error(OS_LOG_DEFAULT, "utun_ctl_connect - ifnet_attach failed: %d\n", result); /* Release reference now since attach failed */ ifnet_release(pcb->utun_ifp); - utun_free_pcb(pcb, true); + utun_free_pcb(pcb, false); *unitinfo = NULL; return result; } @@ -1965,27 +2030,14 @@ utun_ctl_setopt(__unused kern_ctl_ref kctlref, case UTUN_OPT_FLAGS: if (len != sizeof(u_int32_t)) { result = EMSGSIZE; - } else { - if (pcb->utun_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } -#if UTUN_NEXUS - if (pcb->utun_use_netif) { - pcb->utun_flags = *(u_int32_t *)data; - } else -#endif // UTUN_NEXUS - { - u_int32_t old_flags = pcb->utun_flags; - pcb->utun_flags = *(u_int32_t *)data; - if (((old_flags ^ pcb->utun_flags) & UTUN_FLAGS_ENABLE_PROC_UUID)) { - // If UTUN_FLAGS_ENABLE_PROC_UUID flag changed, update bpf - bpfdetach(pcb->utun_ifp); - bpfattach(pcb->utun_ifp, DLT_NULL, UTUN_HEADER_SIZE(pcb)); - } - } + break; + } + if (pcb->utun_ifp != NULL) { + // Only can set before connecting + result = EINVAL; + break; } + pcb->utun_flags = *(u_int32_t *)data; break; case UTUN_OPT_EXT_IFDATA_STATS: @@ -2727,7 +2779,10 @@ utun_detached(ifnet_t interface) { struct utun_pcb *pcb = ifnet_softc(interface); (void)ifnet_release(interface); + lck_mtx_lock(&utun_lock); utun_free_pcb(pcb, true); + (void)ifnet_dispose(interface); + lck_mtx_unlock(&utun_lock); } /* Protocol Handlers */ @@ -3082,7 +3137,7 @@ utun_kpipe_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, kern_buflet_t tx_buf = kern_packet_get_next_buflet(tx_ph, NULL); VERIFY(tx_buf != NULL); - uint8_t *tx_baddr = kern_buflet_get_object_address(tx_buf); + uint8_t *tx_baddr = kern_buflet_get_data_address(tx_buf); VERIFY(tx_baddr != 0); tx_baddr += kern_buflet_get_data_offset(tx_buf); @@ -3221,7 +3276,7 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, kern_buflet_t tx_buf = kern_packet_get_next_buflet(tx_ph, NULL); VERIFY(tx_buf != NULL); - uint8_t *tx_baddr = kern_buflet_get_object_address(tx_buf); + uint8_t *tx_baddr = kern_buflet_get_data_address(tx_buf); VERIFY(tx_baddr != NULL); tx_baddr += kern_buflet_get_data_offset(tx_buf); @@ -3249,7 +3304,7 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, /* fillout packet */ rx_buf = kern_packet_get_next_buflet(rx_ph, NULL); VERIFY(rx_buf != NULL); - rx_baddr = kern_buflet_get_object_address(rx_buf); + rx_baddr = kern_buflet_get_data_address(rx_buf); VERIFY(rx_baddr != NULL); // Find family @@ -3383,7 +3438,7 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Fillout rx packet kern_buflet_t rx_buf = kern_packet_get_next_buflet(rx_ph, NULL); VERIFY(rx_buf != NULL); - void *rx_baddr = kern_buflet_get_object_address(rx_buf); + void *rx_baddr = kern_buflet_get_data_address(rx_buf); VERIFY(rx_baddr != NULL); // Copy-in data from mbuf to buflet diff --git a/bsd/net/if_var.h b/bsd/net/if_var.h index cd7010f73..848ef43d8 100644 --- a/bsd/net/if_var.h +++ b/bsd/net/if_var.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -76,6 +76,7 @@ #include #endif #ifdef BSD_KERN_PRIVATE +#include #include #endif @@ -297,8 +298,8 @@ struct if_description { }; struct if_bandwidths { - u_int64_t eff_bw; /* effective bandwidth */ - u_int64_t max_bw; /* maximum theoretical bandwidth */ + uint64_t eff_bw; /* effective bandwidth */ + uint64_t max_bw; /* maximum theoretical bandwidth */ }; struct if_latencies { @@ -920,6 +921,7 @@ extern boolean_t intcoproc_unrestricted; #ifdef PRIVATE #define IFXNAMSIZ (IFNAMSIZ + 8) /* external name (name + unit) */ +#define IFNET_NETWORK_ID_LEN 32 #endif #ifdef BSD_KERNEL_PRIVATE @@ -941,6 +943,8 @@ typedef errno_t (*dlil_input_func)(ifnet_t ifp, mbuf_t m_head, boolean_t poll, struct thread *tp); typedef errno_t (*dlil_output_func)(ifnet_t interface, mbuf_t data); +typedef u_int8_t ipv6_router_mode_t; + #define if_name(ifp) ifp->if_xname /* * Structure defining a network interface. @@ -988,7 +992,7 @@ struct ifnet { int if_capenable; /* enabled features & capabilities */ void *if_linkmib; /* link-type-specific MIB data */ - size_t if_linkmiblen; /* length of above data */ + uint32_t if_linkmiblen; /* length of above data */ struct if_data_internal if_data __attribute__((aligned(8))); @@ -1021,7 +1025,8 @@ struct ifnet { decl_lck_mtx_data(, if_start_lock); u_int32_t if_start_flags; /* see IFSF flags below */ u_int32_t if_start_req; - u_int16_t if_start_active; /* output is active */ + u_int8_t if_start_embryonic; + u_int8_t if_start_active; /* output is active */ u_int16_t if_start_delayed; u_int16_t if_start_delay_qlen; u_int16_t if_start_delay_idle; @@ -1061,6 +1066,7 @@ struct ifnet { u_int32_t poll_flags; #define IF_POLLF_READY 0x1 /* poll thread is ready */ #define IF_POLLF_RUNNING 0x2 /* poll thread is running/active */ +#define IF_POLLF_EMBRYONIC 0x8000 /* poll thread is being setup */ struct timespec poll_cycle; /* poll interval */ struct thread *poll_thread; @@ -1122,9 +1128,6 @@ struct ifnet { u_char *ptr; } u; } if_broadcast; -#if CONFIG_MACF_NET - struct label *if_label; /* interface MAC label */ -#endif #if PF struct pfi_kif *if_pf_kif; @@ -1152,9 +1155,7 @@ struct ifnet { #if INET struct igmp_ifinfo *if_igi; /* for IGMPv3 */ #endif /* INET */ -#if INET6 struct mld_ifinfo *if_mli; /* for MLDv2 */ -#endif /* INET6 */ struct tcpstat_local *if_tcp_stat; /* TCP specific stats */ struct udpstat_local *if_udp_stat; /* UDP specific stats */ @@ -1196,10 +1197,8 @@ struct ifnet { decl_lck_rw_data(, if_inetdata_lock); void *if_inetdata; #endif /* INET */ -#if INET6 decl_lck_rw_data(, if_inet6data_lock); void *if_inet6data; -#endif decl_lck_rw_data(, if_link_status_lock); struct if_link_status *if_link_status; struct if_interface_state if_interface_state; @@ -1212,6 +1211,11 @@ struct ifnet { uint32_t if_tcp_kao_cnt; struct netem *if_output_netem; + + ipv6_router_mode_t if_ipv6_router_mode; /* see */ + + uint8_t network_id[IFNET_NETWORK_ID_LEN]; + uint8_t network_id_len; }; /* Interface event handling declarations */ @@ -1694,11 +1698,9 @@ __private_extern__ void if_inetdata_lock_exclusive(struct ifnet *ifp); __private_extern__ void if_inetdata_lock_done(struct ifnet *ifp); #endif -#if INET6 __private_extern__ void if_inet6data_lock_shared(struct ifnet *ifp); __private_extern__ void if_inet6data_lock_exclusive(struct ifnet *ifp); __private_extern__ void if_inet6data_lock_done(struct ifnet *ifp); -#endif __private_extern__ void ifnet_head_lock_shared(void); __private_extern__ void ifnet_head_lock_exclusive(void); @@ -1793,11 +1795,9 @@ if_afdata_rlock(struct ifnet *ifp, int af) lck_rw_lock_shared(&ifp->if_inetdata_lock); break; #endif -#if INET6 case AF_INET6: lck_rw_lock_shared(&ifp->if_inet6data_lock); break; -#endif default: VERIFY(0); /* NOTREACHED */ @@ -1814,11 +1814,9 @@ if_afdata_runlock(struct ifnet *ifp, int af) lck_rw_done(&ifp->if_inetdata_lock); break; #endif -#if INET6 case AF_INET6: lck_rw_done(&ifp->if_inet6data_lock); break; -#endif default: VERIFY(0); /* NOTREACHED */ @@ -1835,11 +1833,9 @@ if_afdata_wlock(struct ifnet *ifp, int af) lck_rw_lock_exclusive(&ifp->if_inetdata_lock); break; #endif -#if INET6 case AF_INET6: lck_rw_lock_exclusive(&ifp->if_inet6data_lock); break; -#endif default: VERIFY(0); /* NOTREACHED */ @@ -1856,11 +1852,9 @@ if_afdata_unlock(struct ifnet *ifp, int af) lck_rw_done(&ifp->if_inetdata_lock); break; #endif -#if INET6 case AF_INET6: lck_rw_done(&ifp->if_inet6data_lock); break; -#endif default: VERIFY(0); /* NOTREACHED */ @@ -1880,11 +1874,9 @@ if_afdata_wlock_assert(struct ifnet *ifp, int af) LCK_RW_ASSERT(&ifp->if_inetdata_lock, LCK_RW_ASSERT_EXCLUSIVE); break; #endif -#if INET6 case AF_INET6: LCK_RW_ASSERT(&ifp->if_inet6data_lock, LCK_RW_ASSERT_EXCLUSIVE); break; -#endif default: VERIFY(0); /* NOTREACHED */ @@ -1904,11 +1896,9 @@ if_afdata_unlock_assert(struct ifnet *ifp, int af) LCK_RW_ASSERT(&ifp->if_inetdata_lock, LCK_RW_ASSERT_NOTHELD); break; #endif -#if INET6 case AF_INET6: LCK_RW_ASSERT(&ifp->if_inet6data_lock, LCK_RW_ASSERT_NOTHELD); break; -#endif default: VERIFY(0); /* NOTREACHED */ @@ -1928,11 +1918,9 @@ if_afdata_lock_assert(struct ifnet *ifp, int af) LCK_RW_ASSERT(&ifp->if_inetdata_lock, LCK_RW_ASSERT_HELD); break; #endif -#if INET6 case AF_INET6: LCK_RW_ASSERT(&ifp->if_inet6data_lock, LCK_RW_ASSERT_HELD); break; -#endif default: VERIFY(0); /* NOTREACHED */ @@ -1940,12 +1928,10 @@ if_afdata_lock_assert(struct ifnet *ifp, int af) return; } -#if INET6 struct in6_addr; __private_extern__ struct in6_ifaddr *ifa_foraddr6(struct in6_addr *); __private_extern__ struct in6_ifaddr *ifa_foraddr6_scoped(struct in6_addr *, unsigned int); -#endif /* INET6 */ __private_extern__ void if_data_internal_to_if_data(struct ifnet *ifp, const struct if_data_internal *if_data_int, struct if_data *if_data); @@ -1964,10 +1950,8 @@ __private_extern__ void if_copy_netif_stats(struct ifnet *ifp, __private_extern__ struct rtentry *ifnet_cached_rtlookup_inet(struct ifnet *, struct in_addr); -#if INET6 __private_extern__ struct rtentry *ifnet_cached_rtlookup_inet6(struct ifnet *, struct in6_addr *); -#endif /* INET6 */ __private_extern__ u_int32_t if_get_protolist(struct ifnet * ifp, u_int32_t *protolist, u_int32_t count); @@ -2003,13 +1987,11 @@ __private_extern__ int ifnet_set_netsignature(struct ifnet *, uint8_t, __private_extern__ int ifnet_get_netsignature(struct ifnet *, uint8_t, uint8_t *, uint16_t *, uint8_t *); -#if INET6 struct ipv6_prefix; __private_extern__ int ifnet_set_nat64prefix(struct ifnet *, struct ipv6_prefix *); __private_extern__ int ifnet_get_nat64prefix(struct ifnet *, struct ipv6_prefix *); -#endif /* Required exclusive ifnet_head lock */ __private_extern__ void ifnet_remove_from_ordered_list(struct ifnet *); @@ -2031,15 +2013,17 @@ __private_extern__ void intf_event_enqueue_nwk_wq_entry(struct ifnet *ifp, __private_extern__ void ifnet_update_stats_per_flow(struct ifnet_stats_per_flow *, struct ifnet *); __private_extern__ int if_get_tcp_kao_max(struct ifnet *); -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX __private_extern__ errno_t ifnet_framer_stub(struct ifnet *, struct mbuf **, const struct sockaddr *, const char *, const char *, u_int32_t *, u_int32_t *); -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ __private_extern__ void ifnet_enqueue_multi_setup(struct ifnet *, uint16_t, uint16_t); __private_extern__ errno_t ifnet_enqueue_mbuf(struct ifnet *, struct mbuf *, boolean_t, boolean_t *); +__private_extern__ errno_t ifnet_enqueue_mbuf_chain(struct ifnet *, + struct mbuf *, struct mbuf *, uint32_t, uint32_t, boolean_t, boolean_t *); __private_extern__ int ifnet_enqueue_netem(void *handle, pktsched_pkt_t *pkts, uint32_t n_pkts); @@ -2047,6 +2031,10 @@ extern int if_low_power_verbose; extern int if_low_power_restricted; extern void if_low_power_evhdlr_init(void); extern int if_set_low_power(struct ifnet *, bool); +extern u_int32_t if_set_eflags(ifnet_t, u_int32_t); +extern void if_clear_eflags(ifnet_t, u_int32_t); +extern u_int32_t if_set_xflags(ifnet_t, u_int32_t); +extern void if_clear_xflags(ifnet_t, u_int32_t); #endif /* BSD_KERNEL_PRIVATE */ #ifdef XNU_KERNEL_PRIVATE diff --git a/bsd/net/if_vlan.c b/bsd/net/if_vlan.c index 8509cca99..7cba0957a 100644 --- a/bsd/net/if_vlan.c +++ b/bsd/net/if_vlan.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003-2019 Apple Inc. All rights reserved. + * Copyright (c) 2003-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -879,7 +879,7 @@ vlan_parent_remove_all_vlans(struct ifnet * p) } /* the vlan parent has no more VLAN's */ - ifnet_set_eflags(p, 0, IFEF_VLAN); /* clear IFEF_VLAN */ + if_clear_eflags(p, IFEF_VLAN); /* clear IFEF_VLAN */ LIST_REMOVE(vlp, vlp_parent_list); need_vlp_release++; /* one for being in the list */ @@ -992,6 +992,7 @@ vlan_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) ifnet_set_addrlen(ifp, ETHER_ADDR_LEN); /* XXX ethernet specific */ ifnet_set_baudrate(ifp, 0); ifnet_set_hdrlen(ifp, ETHER_VLAN_ENCAP_LEN); + ifnet_set_mtu(ifp, ETHERMTU); error = ifnet_attach(ifp, NULL); if (error) { @@ -1262,6 +1263,7 @@ vlan_input(ifnet_t p, __unused protocol_family_t protocol, static int vlan_config(struct ifnet * ifp, struct ifnet * p, int tag) { + u_int32_t eflags; int error; int first_vlan = FALSE; ifvlan_ref ifv = NULL; @@ -1343,18 +1345,19 @@ vlan_config(struct ifnet * ifp, struct ifnet * p, int tag) ifvlan_retain(ifv); /* parent references ifv */ ifv_added = TRUE; - /* check whether bond interface is using parent interface */ - ifnet_lock_exclusive(p); + /* don't allow VLAN on interface that's part of a bond */ if ((ifnet_eflags(p) & IFEF_BOND) != 0) { - ifnet_lock_done(p); - /* don't allow VLAN over interface that's already part of a bond */ error = EBUSY; goto signal_done; } - /* prevent BOND interface from using it */ - /* Can't use ifnet_set_eflags because that would take the lock */ - p->if_eflags |= IFEF_VLAN; - ifnet_lock_done(p); + /* mark it as in use by VLAN */ + eflags = if_set_eflags(p, IFEF_VLAN); + if ((eflags & IFEF_BOND) != 0) { + /* bond got in ahead of us */ + if_clear_eflags(p, IFEF_VLAN); + error = EBUSY; + goto signal_done; + } vlan_unlock(); if (first_vlan) { @@ -1436,7 +1439,7 @@ signal_done: vlan_parent_remove_vlan(vlp, ifv); if (!vlan_parent_flags_detaching(vlp) && vlan_parent_no_vlans(vlp)) { /* the vlan parent has no more VLAN's */ - ifnet_set_eflags(p, 0, IFEF_VLAN); + if_clear_eflags(p, IFEF_VLAN); LIST_REMOVE(vlp, vlp_parent_list); /* release outside of the lock below */ need_vlp_release++; @@ -1547,7 +1550,7 @@ vlan_unconfig(ifvlan_ref ifv, int need_to_wait) vlan_lock(); /* return to the state we were in before SIFVLAN */ - ifnet_set_mtu(ifp, 0); + ifnet_set_mtu(ifp, ETHERMTU); ifnet_set_flags(ifp, 0, IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX | IFF_RUNNING); ifnet_set_offload(ifp, 0); @@ -1563,7 +1566,7 @@ vlan_unconfig(ifvlan_ref ifv, int need_to_wait) /* from this point on, no more referencing ifv */ if (last_vlan && !vlan_parent_flags_detaching(vlp)) { /* the vlan parent has no more VLAN's */ - ifnet_set_eflags(p, 0, IFEF_VLAN); + if_clear_eflags(p, IFEF_VLAN); LIST_REMOVE(vlp, vlp_parent_list); /* one for being in the list */ @@ -1877,7 +1880,8 @@ vlan_ioctl(ifnet_t ifp, u_long cmd, void * data) error = (ifv == NULL ? EOPNOTSUPP : EBUSY); break; } - need_link_event = vlan_remove(ifv, TRUE); + need_link_event = (ifv->ifv_vlp != NULL); + vlan_unconfig(ifv, TRUE); vlan_unlock(); if (need_link_event) { interface_link_event(ifp, KEV_DL_LINK_OFF); @@ -1978,23 +1982,24 @@ vlan_detached(ifnet_t p, __unused protocol_family_t protocol) static void interface_link_event(struct ifnet * ifp, u_int32_t event_code) { - struct { - struct kern_event_msg header; - u_int32_t unit; - char if_name[IFNAMSIZ]; - } event; - - bzero(&event, sizeof(event)); - event.header.total_size = sizeof(event); - event.header.vendor_code = KEV_VENDOR_APPLE; - event.header.kev_class = KEV_NETWORK_CLASS; - event.header.kev_subclass = KEV_DL_SUBCLASS; - event.header.event_code = event_code; - event.header.event_data[0] = ifnet_family(ifp); - event.unit = (u_int32_t) ifnet_unit(ifp); - strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ); - ifnet_event(ifp, &event.header); - return; + struct event { + u_int32_t ifnet_family; + u_int32_t unit; + char if_name[IFNAMSIZ]; + }; + _Alignas(struct kern_event_msg) char message[sizeof(struct kern_event_msg) + sizeof(struct event)] = { 0 }; + struct kern_event_msg *header = (struct kern_event_msg*)message; + struct event *data = (struct event *)(header + 1); + + header->total_size = sizeof(message); + header->vendor_code = KEV_VENDOR_APPLE; + header->kev_class = KEV_NETWORK_CLASS; + header->kev_subclass = KEV_DL_SUBCLASS; + header->event_code = event_code; + data->ifnet_family = ifnet_family(ifp); + data->unit = (u_int32_t)ifnet_unit(ifp); + strlcpy(data->if_name, ifnet_name(ifp), IFNAMSIZ); + ifnet_event(ifp, header); } static void @@ -2095,7 +2100,6 @@ vlan_detach_inet(struct ifnet *ifp, protocol_family_t protocol_family) ether_detach_inet(ifp, protocol_family); } -#if INET6 static errno_t vlan_attach_inet6(struct ifnet *ifp, protocol_family_t protocol_family) { @@ -2107,7 +2111,6 @@ vlan_detach_inet6(struct ifnet *ifp, protocol_family_t protocol_family) { ether_detach_inet6(ifp, protocol_family); } -#endif /* INET6 */ __private_extern__ int vlan_family_init(void) @@ -2121,7 +2124,6 @@ vlan_family_init(void) error); goto done; } -#if INET6 error = proto_register_plumber(PF_INET6, IFNET_FAMILY_VLAN, vlan_attach_inet6, vlan_detach_inet6); if (error != 0) { @@ -2129,7 +2131,6 @@ vlan_family_init(void) error); goto done; } -#endif error = vlan_clone_attach(); if (error != 0) { printf("proto_register_plumber failed vlan_clone_attach error=%d\n", diff --git a/bsd/net/kpi_interface.c b/bsd/net/kpi_interface.c index 41fb91f69..fce85dadf 100644 --- a/bsd/net/kpi_interface.c +++ b/bsd/net/kpi_interface.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Apple Inc. All rights reserved. + * Copyright (c) 2004-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -68,9 +68,7 @@ #ifdef INET #include #endif -#ifdef INET6 #include -#endif #include #include @@ -91,11 +89,11 @@ static errno_t ifnet_allocate_common(const struct ifnet_init_params *init, #define TOUCHLASTCHANGE(__if_lastchange) { \ - (__if_lastchange)->tv_sec = net_uptime(); \ + (__if_lastchange)->tv_sec = (time_t)net_uptime(); \ (__if_lastchange)->tv_usec = 0; \ } -static errno_t ifnet_defrouter_llreachinfo(ifnet_t, int, +static errno_t ifnet_defrouter_llreachinfo(ifnet_t, sa_family_t, struct ifnet_llreach_info *); static void ifnet_kpi_free(ifnet_t); static errno_t ifnet_list_get_common(ifnet_family_t, boolean_t, ifnet_t **, @@ -121,12 +119,7 @@ ifnet_kpi_free(ifnet_t ifp) detach_func(ifp); } - if (ifp->if_broadcast.length > sizeof(ifp->if_broadcast.u.buffer)) { - FREE(ifp->if_broadcast.u.ptr, M_IFADDR); - ifp->if_broadcast.u.ptr = NULL; - } - - dlil_if_release(ifp); + ifnet_dispose(ifp); } errno_t @@ -232,20 +225,28 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, } } + if (einit.type > UCHAR_MAX) { + return EINVAL; + } + + if (einit.unit > SHRT_MAX) { + return EINVAL; + } + /* Initialize external name (name + unit) */ (void) snprintf(if_xname, sizeof(if_xname), "%s%d", einit.name, einit.unit); if (einit.uniqueid == NULL) { einit.uniqueid = if_xname; - einit.uniqueid_len = strlen(if_xname); + einit.uniqueid_len = (uint32_t)strlen(if_xname); } error = dlil_if_acquire(einit.family, einit.uniqueid, einit.uniqueid_len, if_xname, &ifp); if (error == 0) { - u_int64_t br; + uint64_t br; /* * Cast ifp->if_name as non const. dlil_if_acquire sets it up @@ -253,10 +254,10 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, * to write to this. */ strlcpy(__DECONST(char *, ifp->if_name), einit.name, IFNAMSIZ); - ifp->if_type = einit.type; + ifp->if_type = (u_char)einit.type; ifp->if_family = einit.family; ifp->if_subfamily = einit.subfamily; - ifp->if_unit = einit.unit; + ifp->if_unit = (short)einit.unit; ifp->if_output = einit.output; ifp->if_pre_enqueue = einit.pre_enqueue; ifp->if_start = einit.start; @@ -281,10 +282,14 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, ifp->if_softc = einit.softc; ifp->if_ioctl = einit.ioctl; ifp->if_set_bpf_tap = einit.set_bpf_tap; - ifp->if_free = ifnet_kpi_free; + ifp->if_free = (einit.free != NULL) ? einit.free : ifnet_kpi_free; ifp->if_event = einit.event; ifp->if_kpi_storage = einit.detach; + /* Initialize Network ID */ + ifp->network_id_len = 0; + bzero(&ifp->network_id, sizeof(ifp->network_id)); + /* Initialize external name (name + unit) */ snprintf(__DECONST(char *, ifp->if_xname), IFXNAMSIZ, "%s", if_xname); @@ -305,11 +310,11 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, * Internally, DLIL will only use the extended callback * variant which is represented by if_framer. */ -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) { ifp->if_framer = ifp->if_framer_legacy; } -#else /* !CONFIG_EMBEDDED */ +#else /* XNU_TARGET_OS_OSX */ if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) { if (ifp->if_framer_legacy == ether_frameout) { ifp->if_framer = ether_frameout_extended; @@ -317,7 +322,7 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, ifp->if_framer = ifnet_framer_stub; } } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) { ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw; @@ -340,7 +345,7 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, /* Pin if_baudrate to 32 bits */ br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw); if (br != 0) { - ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br; + ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br; } if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) { @@ -365,21 +370,17 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, ifp->if_ioctl = ifp_if_ioctl; } - ifp->if_eflags = 0; + if_clear_eflags(ifp, -1); if (ifp->if_start != NULL) { - ifp->if_eflags |= IFEF_TXSTART; + if_set_eflags(ifp, IFEF_TXSTART); if (ifp->if_pre_enqueue == NULL) { ifp->if_pre_enqueue = ifnet_enqueue; } ifp->if_output = ifp->if_pre_enqueue; - } else { - ifp->if_eflags &= ~IFEF_TXSTART; } if (ifp->if_input_poll != NULL) { - ifp->if_eflags |= IFEF_RXPOLL; - } else { - ifp->if_eflags &= ~IFEF_RXPOLL; + if_set_eflags(ifp, IFEF_RXPOLL); } ifp->if_output_dlil = dlil_output_handler; @@ -414,9 +415,9 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, bzero(&ifp->if_broadcast, sizeof(ifp->if_broadcast)); } - ifp->if_xflags = 0; + if_clear_xflags(ifp, -1); /* legacy interface */ - ifp->if_xflags |= IFXF_LEGACY; + if_set_xflags(ifp, IFXF_LEGACY); /* * output target queue delay is specified in millisecond @@ -442,8 +443,8 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, */ OSIncrementAtomic64(&net_api_stats.nas_ifnet_alloc_count); INC_ATOMIC_INT64_LIM(net_api_stats.nas_ifnet_alloc_total); - if (einit.flags & IFNET_INIT_ALLOC_KPI) { - ifp->if_xflags |= IFXF_ALLOC_KPI; + if ((einit.flags & IFNET_INIT_ALLOC_KPI) != 0) { + if_set_xflags(ifp, IFXF_ALLOC_KPI); } else { OSIncrementAtomic64( &net_api_stats.nas_ifnet_alloc_os_count); @@ -469,6 +470,17 @@ ifnet_reference(ifnet_t ifp) return dlil_if_ref(ifp); } +void +ifnet_dispose(ifnet_t ifp) +{ + if (ifp->if_broadcast.length > sizeof(ifp->if_broadcast.u.buffer)) { + FREE(ifp->if_broadcast.u.ptr, M_IFADDR); + ifp->if_broadcast.u.ptr = NULL; + } + + dlil_if_release(ifp); +} + errno_t ifnet_release(ifnet_t ifp) { @@ -551,11 +563,9 @@ ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask) igmp_initsilent(interface, IGMP_IFINFO(interface)); } #endif /* INET */ -#if INET6 if (MLD_IFINFO(interface) != NULL) { mld6_initsilent(interface, MLD_IFINFO(interface)); } -#endif /* INET6 */ } ifnet_lock_done(interface); @@ -645,8 +655,10 @@ ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask) return EINVAL; } oeflags = interface->if_eflags; - interface->if_eflags = - (new_flags & mask) | (interface->if_eflags & ~mask); + if_clear_eflags(interface, mask); + if (new_flags != 0) { + if_set_eflags(interface, (new_flags & mask)); + } ifnet_lock_done(interface); if (interface->if_eflags & IFEF_AWDL_RESTRICTED && !(oeflags & IFEF_AWDL_RESTRICTED)) { @@ -830,7 +842,7 @@ done: static errno_t -ifnet_defrouter_llreachinfo(ifnet_t ifp, int af, +ifnet_defrouter_llreachinfo(ifnet_t ifp, sa_family_t af, struct ifnet_llreach_info *iflri) { if (ifp == NULL || iflri == NULL) { @@ -1103,18 +1115,14 @@ ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask) return EINVAL; } - ifnet_lock_exclusive(interface); - - if (mask & IF_WAKE_ON_MAGIC_PACKET) { - if (properties & IF_WAKE_ON_MAGIC_PACKET) { - interface->if_xflags |= IFXF_WAKE_ON_MAGIC_PACKET; + if ((mask & IF_WAKE_ON_MAGIC_PACKET) != 0) { + if ((properties & IF_WAKE_ON_MAGIC_PACKET) != 0) { + if_set_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET); } else { - interface->if_xflags &= ~IFXF_WAKE_ON_MAGIC_PACKET; + if_clear_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET); } } - ifnet_lock_done(interface); - (void) ifnet_touch_lastchange(interface); /* Notify application of the change */ @@ -1143,7 +1151,7 @@ ifnet_get_wake_flags(ifnet_t interface) return 0; } - if (interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET) { + if ((interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET) != 0) { flags |= IF_WAKE_ON_MAGIC_PACKET; } @@ -1154,7 +1162,7 @@ ifnet_get_wake_flags(ifnet_t interface) * Should MIB data store a copy? */ errno_t -ifnet_set_link_mib_data(ifnet_t interface, void *mibData, u_int32_t mibLen) +ifnet_set_link_mib_data(ifnet_t interface, void *mibData, uint32_t mibLen) { if (interface == NULL) { return EINVAL; @@ -1168,7 +1176,7 @@ ifnet_set_link_mib_data(ifnet_t interface, void *mibData, u_int32_t mibLen) } errno_t -ifnet_get_link_mib_data(ifnet_t interface, void *mibData, u_int32_t *mibLen) +ifnet_get_link_mib_data(ifnet_t interface, void *mibData, uint32_t *mibLen) { errno_t result = 0; @@ -1193,7 +1201,7 @@ ifnet_get_link_mib_data(ifnet_t interface, void *mibData, u_int32_t *mibLen) return result; } -u_int32_t +uint32_t ifnet_get_link_mib_data_length(ifnet_t interface) { return (interface == NULL) ? 0 : interface->if_linkmiblen; @@ -1299,7 +1307,7 @@ ifnet_metric(ifnet_t interface) } errno_t -ifnet_set_baudrate(struct ifnet *ifp, u_int64_t baudrate) +ifnet_set_baudrate(struct ifnet *ifp, uint64_t baudrate) { if (ifp == NULL) { return EINVAL; @@ -1309,7 +1317,7 @@ ifnet_set_baudrate(struct ifnet *ifp, u_int64_t baudrate) ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate; /* Pin if_baudrate to 32 bits until we can change the storage size */ - ifp->if_baudrate = (baudrate > 0xFFFFFFFF) ? 0xFFFFFFFF : baudrate; + ifp->if_baudrate = (baudrate > UINT32_MAX) ? UINT32_MAX : (uint32_t)baudrate; return 0; } @@ -1349,13 +1357,17 @@ ifnet_set_link_status_outbw(struct ifnet *ifp) sr->valid_bitmask |= IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID; sr->ul_effective_bandwidth = - ifp->if_output_bw.eff_bw; + ifp->if_output_bw.eff_bw > UINT32_MAX ? + UINT32_MAX : + (uint32_t)ifp->if_output_bw.eff_bw; } if (ifp->if_output_bw.max_bw != 0) { sr->valid_bitmask |= IF_WIFI_UL_MAX_BANDWIDTH_VALID; sr->ul_max_bandwidth = - ifp->if_output_bw.max_bw; + ifp->if_output_bw.max_bw > UINT32_MAX ? + UINT32_MAX : + (uint32_t)ifp->if_output_bw.max_bw; } } @@ -1391,7 +1403,7 @@ ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw, /* Pin if_baudrate to 32 bits */ br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw); if (br != 0) { - ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br; + ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br; } /* Adjust queue parameters if needed */ @@ -1427,12 +1439,16 @@ ifnet_set_link_status_inbw(struct ifnet *ifp) sr->valid_bitmask |= IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID; sr->dl_effective_bandwidth = - ifp->if_input_bw.eff_bw; + ifp->if_input_bw.eff_bw > UINT32_MAX ? + UINT32_MAX : + (uint32_t)ifp->if_input_bw.eff_bw; } if (ifp->if_input_bw.max_bw != 0) { sr->valid_bitmask |= IF_WIFI_DL_MAX_BANDWIDTH_VALID; - sr->dl_max_bandwidth = ifp->if_input_bw.max_bw; + sr->dl_max_bandwidth = ifp->if_input_bw.max_bw > UINT32_MAX ? + UINT32_MAX : + (uint32_t)ifp->if_input_bw.max_bw; } } @@ -1860,7 +1876,7 @@ ifnet_updown_delta(ifnet_t interface, struct timeval *updown_delta) } /* Calculate the delta */ - updown_delta->tv_sec = net_uptime(); + updown_delta->tv_sec = (time_t)net_uptime(); if (updown_delta->tv_sec > interface->if_data.ifi_lastupdown.tv_sec) { updown_delta->tv_sec -= interface->if_data.ifi_lastupdown.tv_sec; } @@ -2192,7 +2208,8 @@ ifnet_set_lladdr_internal(ifnet_t interface, const void *lladdr, } else { bzero(LLADDR(sdl), interface->if_addrlen); } - sdl->sdl_alen = lladdr_len; + /* lladdr_len-check with if_addrlen makes sure it fits in u_char */ + sdl->sdl_alen = (u_char)lladdr_len; if (apply_type) { sdl->sdl_type = new_type; @@ -2297,7 +2314,7 @@ ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t **addresses) } MALLOC(*addresses, ifmultiaddr_t *, sizeof(ifmultiaddr_t) * (cmax + 1), - M_TEMP, M_NOWAIT); + M_TEMP, M_WAITOK); if (*addresses == NULL) { ifnet_lock_done(ifp); return ENOMEM; @@ -2337,7 +2354,7 @@ errno_t ifnet_find_by_name(const char *ifname, ifnet_t *ifpp) { struct ifnet *ifp; - int namelen; + size_t namelen; if (ifname == NULL) { return EINVAL; @@ -3003,6 +3020,8 @@ ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp) } bzero(&ifp->if_delegated, sizeof(ifp->if_delegated)); if (delegated_ifp != NULL && ifp != delegated_ifp) { + uint32_t set_eflags; + ifp->if_delegated.ifp = delegated_ifp; ifnet_reference(delegated_ifp); ifp->if_delegated.type = delegated_ifp->if_type; @@ -3016,10 +3035,10 @@ ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp) /* * Propogate flags related to ECN from delegated interface */ - ifp->if_eflags &= ~(IFEF_ECN_ENABLE | IFEF_ECN_DISABLE); - ifp->if_eflags |= (delegated_ifp->if_eflags & + if_clear_eflags(ifp, IFEF_ECN_ENABLE | IFEF_ECN_DISABLE); + set_eflags = (delegated_ifp->if_eflags & (IFEF_ECN_ENABLE | IFEF_ECN_DISABLE)); - + if_set_eflags(ifp, set_eflags); printf("%s: is now delegating %s (type 0x%x, family %u, " "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname, delegated_ifp->if_type, delegated_ifp->if_family, @@ -3263,7 +3282,9 @@ ifnet_link_status_report(ifnet_t ifp, const void *buffer, if_wifi_sr->valid_bitmask |= IF_WIFI_UL_MAX_BANDWIDTH_VALID; if_wifi_sr->ul_max_bandwidth = - ifp->if_output_bw.max_bw; + ifp->if_output_bw.max_bw > UINT32_MAX ? + UINT32_MAX : + (uint32_t)ifp->if_output_bw.max_bw; } if (!(new_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) && @@ -3271,7 +3292,9 @@ ifnet_link_status_report(ifnet_t ifp, const void *buffer, if_wifi_sr->valid_bitmask |= IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID; if_wifi_sr->ul_effective_bandwidth = - ifp->if_output_bw.eff_bw; + ifp->if_output_bw.eff_bw > UINT32_MAX ? + UINT32_MAX : + (uint32_t)ifp->if_output_bw.eff_bw; } if (!(new_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) && @@ -3279,7 +3302,9 @@ ifnet_link_status_report(ifnet_t ifp, const void *buffer, if_wifi_sr->valid_bitmask |= IF_WIFI_DL_MAX_BANDWIDTH_VALID; if_wifi_sr->dl_max_bandwidth = - ifp->if_input_bw.max_bw; + ifp->if_input_bw.max_bw > UINT32_MAX ? + UINT32_MAX : + (uint32_t)ifp->if_input_bw.max_bw; } if (!(new_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) && @@ -3287,7 +3312,9 @@ ifnet_link_status_report(ifnet_t ifp, const void *buffer, if_wifi_sr->valid_bitmask |= IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID; if_wifi_sr->dl_effective_bandwidth = - ifp->if_input_bw.eff_bw; + ifp->if_input_bw.eff_bw > UINT32_MAX ? + UINT32_MAX : + (uint32_t)ifp->if_input_bw.eff_bw; } } @@ -3420,8 +3447,7 @@ ifnet_get_low_power_mode(ifnet_t ifp, boolean_t *on) return EINVAL; } - *on = !!(ifp->if_xflags & IFXF_LOW_POWER); - + *on = ((ifp->if_xflags & IFXF_LOW_POWER) != 0); return 0; } diff --git a/bsd/net/kpi_interface.h b/bsd/net/kpi_interface.h index 291aea2d7..bbcf46712 100644 --- a/bsd/net/kpi_interface.h +++ b/bsd/net/kpi_interface.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Apple Inc. All rights reserved. + * Copyright (c) 2004-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -57,11 +57,11 @@ struct ifnet_interface_advisory; #endif /* PRIVATE */ #ifdef XNU_KERNEL_PRIVATE -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX || (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #define KPI_INTERFACE_EMBEDDED 1 -#else +#else /* XNU_TARGET_OS_OSX && !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) */ #define KPI_INTERFACE_EMBEDDED 0 -#endif +#endif /* XNU_TARGET_OS_OSX && !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) */ #else #if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #define KPI_INTERFACE_EMBEDDED 1 @@ -773,6 +773,15 @@ typedef void (*ifnet_input_poll_func)(ifnet_t interface, u_int32_t flags, u_int32_t max_count, mbuf_t *first_packet, mbuf_t *last_packet, u_int32_t *cnt, u_int32_t *len); +/*! + * @typedef ifnet_free_func + * @discussion ifnet_free_func is called as an alternative to ifnet_detach_func + * on a specific interface. Implementors of this callback are responsible + * for fully tearing down the interface. + * @param interface The interface that should be freed + */ +typedef void (*ifnet_free_func)(ifnet_t interface); + /* * @enum Interface control commands * @abstract Constants defining control commands. @@ -1127,10 +1136,12 @@ struct ifnet_init_eparams { u_int16_t tx_trailer; /* optional */ u_int32_t rx_mit_ival; /* optional */ #if !defined(__LP64__) - u_int64_t ____reserved[2]; /* for future use */ + ifnet_free_func free; /* optional */ + u_int32_t _____reserved; /* for future use */ + u_int64_t ____reserved[1]; /* for future use */ #else u_int32_t ____reserved; /* for future use */ - u_int64_t _____reserved[1]; /* for future use */ + ifnet_free_func free; /* optional */ #endif /* __LP64__ */ }; #endif /* KERNEL_PRIVATE */ @@ -1285,6 +1296,14 @@ __NKE_API_DEPRECATED; extern errno_t ifnet_allocate_extended(const struct ifnet_init_eparams *init, ifnet_t *interface); +/* + * @function ifnet_dispose + * @discusion Dispose the interface. This is meant to only be called + * by clients that implement ifnet_free_func + * @param interface The interface to dispose + */ +extern void ifnet_dispose(ifnet_t interface); + /* * @function ifnet_purge * @discussion Purge the output queue of an interface which implements @@ -3765,4 +3784,5 @@ extern errno_t ifnet_interface_advisory_report(ifnet_t ifp, __END_DECLS +#undef __NKE_API_DEPRECATED #endif /* __KPI_INTERFACE__ */ diff --git a/bsd/net/kpi_interfacefilter.h b/bsd/net/kpi_interfacefilter.h index 819112d7a..b8586b4aa 100644 --- a/bsd/net/kpi_interfacefilter.h +++ b/bsd/net/kpi_interfacefilter.h @@ -232,4 +232,5 @@ extern void iflt_detach(interface_filter_t filter_ref) __NKE_API_DEPRECATED; __END_DECLS +#undef __NKE_API_DEPRECATED #endif /* __KPI_INTERFACEFILTER__ */ diff --git a/bsd/net/kpi_protocol.c b/bsd/net/kpi_protocol.c index 3b3c50f5f..f1d7856a3 100644 --- a/bsd/net/kpi_protocol.c +++ b/bsd/net/kpi_protocol.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2013 Apple Inc. All rights reserved. + * Copyright (c) 2004-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -131,15 +131,15 @@ proto_register_input(protocol_family_t protocol, proto_input_handler input, entry->domain = dp; - lck_mtx_lock(&inp->input_lck); + lck_mtx_lock(&inp->dlth_lock); entry->next = proto_input_add_list; proto_input_add_list = entry; - inp->input_waiting |= DLIL_PROTO_REGISTER; - if ((inp->input_waiting & DLIL_INPUT_RUNNING) == 0) { - wakeup((caddr_t)&inp->input_waiting); + inp->dlth_flags |= DLIL_PROTO_REGISTER; + if ((inp->dlth_flags & DLIL_INPUT_RUNNING) == 0) { + wakeup((caddr_t)&inp->dlth_flags); } - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); return 0; } @@ -201,14 +201,14 @@ proto_input_run(void) mbuf_t packet_list; int i, locked = 0; - LCK_MTX_ASSERT(&inp->input_lck, LCK_MTX_ASSERT_NOTOWNED); + LCK_MTX_ASSERT(&inp->dlth_lock, LCK_MTX_ASSERT_NOTOWNED); - if (inp->input_waiting & DLIL_PROTO_REGISTER) { - lck_mtx_lock_spin(&inp->input_lck); + if (inp->dlth_flags & DLIL_PROTO_REGISTER) { + lck_mtx_lock_spin(&inp->dlth_lock); entry = proto_input_add_list; proto_input_add_list = NULL; - inp->input_waiting &= ~DLIL_PROTO_REGISTER; - lck_mtx_unlock(&inp->input_lck); + inp->dlth_flags &= ~DLIL_PROTO_REGISTER; + lck_mtx_unlock(&inp->dlth_lock); proto_delayed_attach(entry); } @@ -220,8 +220,8 @@ proto_input_run(void) for (entry = proto_hash[i]; entry != NULL && proto_total_waiting; entry = entry->next) { if (entry->inject_first != NULL) { - lck_mtx_lock_spin(&inp->input_lck); - inp->input_waiting &= ~DLIL_PROTO_WAITING; + lck_mtx_lock_spin(&inp->dlth_lock); + inp->dlth_flags &= ~DLIL_PROTO_WAITING; packet_list = entry->inject_first; @@ -229,7 +229,7 @@ proto_input_run(void) entry->inject_last = NULL; proto_total_waiting--; - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); if (entry->domain != NULL && !(entry->domain-> dom_flags & DOM_REENTRANT)) { @@ -324,19 +324,19 @@ proto_inject(protocol_family_t protocol, mbuf_t packet_list) } if (entry != NULL) { - lck_mtx_lock(&inp->input_lck); + lck_mtx_lock(&inp->dlth_lock); if (entry->inject_first == NULL) { proto_total_waiting++; - inp->input_waiting |= DLIL_PROTO_WAITING; + inp->dlth_flags |= DLIL_PROTO_WAITING; entry->inject_first = packet_list; } else { mbuf_setnextpkt(entry->inject_last, packet_list); } entry->inject_last = last_packet; - if ((inp->input_waiting & DLIL_INPUT_RUNNING) == 0) { - wakeup((caddr_t)&inp->input_waiting); + if ((inp->dlth_flags & DLIL_INPUT_RUNNING) == 0) { + wakeup((caddr_t)&inp->dlth_flags); } - lck_mtx_unlock(&inp->input_lck); + lck_mtx_unlock(&inp->dlth_lock); } else { return ENOENT; } diff --git a/bsd/net/kpi_protocol.h b/bsd/net/kpi_protocol.h index f7ba31c2d..ccccc91f1 100644 --- a/bsd/net/kpi_protocol.h +++ b/bsd/net/kpi_protocol.h @@ -223,4 +223,5 @@ proto_kpi_init(void); #endif /* BSD_KERNEL_PRIVATE */ __END_DECLS +#undef __NKE_API_DEPRECATED #endif /* __KPI_PROTOCOL__ */ diff --git a/bsd/net/lacp.h b/bsd/net/lacp.h index 0b0cb02a3..623d11095 100644 --- a/bsd/net/lacp.h +++ b/bsd/net/lacp.h @@ -114,7 +114,7 @@ lacp_actor_partner_state_set_active_lacp(lacp_actor_partner_state state) static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_passive_lacp(lacp_actor_partner_state state) { - return state &= ~LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY; + return state & ~LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY; } static __inline__ int @@ -132,7 +132,7 @@ lacp_actor_partner_state_set_short_timeout(lacp_actor_partner_state state) static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_long_timeout(lacp_actor_partner_state state) { - return state &= ~LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT; + return state & ~LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT; } static __inline__ int @@ -150,7 +150,7 @@ lacp_actor_partner_state_set_aggregatable(lacp_actor_partner_state state) static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_individual(lacp_actor_partner_state state) { - return state &= ~LACP_ACTOR_PARTNER_STATE_AGGREGATION; + return state & ~LACP_ACTOR_PARTNER_STATE_AGGREGATION; } static __inline__ lacp_actor_partner_state @@ -168,7 +168,7 @@ lacp_actor_partner_state_set_in_sync(lacp_actor_partner_state state) static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_out_of_sync(lacp_actor_partner_state state) { - return state &= ~LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION; + return state & ~LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION; } static __inline__ int @@ -186,7 +186,7 @@ lacp_actor_partner_state_set_collecting(lacp_actor_partner_state state) static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_not_collecting(lacp_actor_partner_state state) { - return state &= ~LACP_ACTOR_PARTNER_STATE_COLLECTING; + return state & ~LACP_ACTOR_PARTNER_STATE_COLLECTING; } static __inline__ lacp_actor_partner_state @@ -204,7 +204,7 @@ lacp_actor_partner_state_set_distributing(lacp_actor_partner_state state) static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_not_distributing(lacp_actor_partner_state state) { - return state &= ~LACP_ACTOR_PARTNER_STATE_DISTRIBUTING; + return state & ~LACP_ACTOR_PARTNER_STATE_DISTRIBUTING; } static __inline__ lacp_actor_partner_state @@ -222,7 +222,7 @@ lacp_actor_partner_state_set_defaulted(lacp_actor_partner_state state) static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_not_defaulted(lacp_actor_partner_state state) { - return state &= ~LACP_ACTOR_PARTNER_STATE_DEFAULTED; + return state & ~LACP_ACTOR_PARTNER_STATE_DEFAULTED; } static __inline__ lacp_actor_partner_state @@ -240,7 +240,7 @@ lacp_actor_partner_state_set_expired(lacp_actor_partner_state state) static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_not_expired(lacp_actor_partner_state state) { - return state &= ~LACP_ACTOR_PARTNER_STATE_EXPIRED; + return state & ~LACP_ACTOR_PARTNER_STATE_EXPIRED; } static __inline__ lacp_actor_partner_state diff --git a/bsd/net/multi_layer_pkt_log.c b/bsd/net/multi_layer_pkt_log.c index b6af63d42..8b941319f 100644 --- a/bsd/net/multi_layer_pkt_log.c +++ b/bsd/net/multi_layer_pkt_log.c @@ -43,7 +43,7 @@ SYSCTL_PROC(_net_mpklog, OID_AUTO, enabled, CTLTYPE_INT | CTLFLAG_LOCKED | CTLFL 0, 0, &sysctl_net_mpklog_enabled, "I", "Multi-layer packet logging enabled"); static int sysctl_net_mpklog_type SYSCTL_HANDLER_ARGS; -int net_mpklog_type = OS_LOG_TYPE_DEFAULT; +uint8_t net_mpklog_type = OS_LOG_TYPE_DEFAULT; SYSCTL_PROC(_net_mpklog, OID_AUTO, type, CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, 0, 0, &sysctl_net_mpklog_type, "I", "Multi-layer packet logging type"); @@ -84,10 +84,9 @@ sysctl_net_mpklog_type SYSCTL_HANDLER_ARGS value != OS_LOG_TYPE_INFO) { return EINVAL; } + net_mpklog_type = (uint8_t)value; - net_mpklog_type = value; - - os_log(OS_LOG_DEFAULT, "%s:%d set net_mpklog_type to %d (%s)", + os_log(OS_LOG_DEFAULT, "%s:%d set net_mpklog_type to %u (%s)", proc_best_name(current_proc()), proc_selfpid(), net_mpklog_type, net_mpklog_type == OS_LOG_TYPE_DEFAULT ? "default" : "info"); diff --git a/bsd/net/multi_layer_pkt_log.h b/bsd/net/multi_layer_pkt_log.h index ef3a3eac1..556c16473 100644 --- a/bsd/net/multi_layer_pkt_log.h +++ b/bsd/net/multi_layer_pkt_log.h @@ -399,7 +399,7 @@ os_log(LOGOBJECT, "15 {Receive Incomplete. curProtocol: %hhu, nextProtocol: %hhu * @param TCP_FLAGS uint8_t Flags of the TCP header of the segment */ #define MPKL_ESP_OUTPUT_TCP(LOGOBJECT, SPI, ESP_SEQ, LOCAL_PORT, REMOTE_PORT, TCP_SEQ, TCP_ACK, TCP_LEN, TCP_FLAGS) \ - os_log_with_type(LOGOBJECT, net_mpklog_type, \ + os_log_with_type(LOGOBJECT, (os_log_type_t)net_mpklog_type, \ "18 {curProtocol: 80, spi: 0x%X, espSeq: %u, PayloadProtocol: 100, " \ "localPort: %hu, remotePort: %hu, tcpSeq: %u, tcpAck: %u, tcpLen: %hu, tcpFlags: 0x%02x}", \ SPI, ESP_SEQ, \ @@ -418,7 +418,7 @@ os_log(LOGOBJECT, "15 {Receive Incomplete. curProtocol: %hhu, nextProtocol: %hhu * @param TCP_LEN uint16_t Length in the TCP header of the segment */ #define MPKL_ESP_INPUT_TCP(LOGOBJECT, SPI, ESP_SEQ, LOCAL_PORT, REMOTE_PORT, TCP_SEQ, TCP_LEN) \ - os_log_with_type(LOGOBJECT, net_mpklog_type, \ + os_log_with_type(LOGOBJECT, (os_log_type_t)net_mpklog_type, \ "19 {curProtocol: 80 spi: 0x%X, espSeq: %u, PayloadProtocol: 100, " \ "localPort: %hu, remotePort: %hu, tcpSeq: %u, tcpLen: %hu}", \ SPI, ESP_SEQ, \ @@ -457,7 +457,7 @@ os_log(LOGOBJECT, "33 {curProtocol: %hhu, nextProtocol: %hhu, curUUID: %{public} #ifdef KERNEL_PRIVATE extern int net_mpklog_enabled; -extern int net_mpklog_type; +extern uint8_t net_mpklog_type; #endif /* KERNEL_PRIVATE */ #endif /* _NET_MULTI_LAYER_PKT_LOG_H_ */ diff --git a/bsd/net/multicast_list.c b/bsd/net/multicast_list.c index 15c87fd40..5169de4b9 100644 --- a/bsd/net/multicast_list.c +++ b/bsd/net/multicast_list.c @@ -96,7 +96,7 @@ multicast_list_program(struct multicast_list * mc_list, struct ifnet * source_ifp, struct ifnet * target_ifp) { - int alen; + u_char alen; int error = 0; int i; struct multicast_entry * mc = NULL; diff --git a/bsd/net/nat464_utils.c b/bsd/net/nat464_utils.c index 30370a84e..8df7d737c 100644 --- a/bsd/net/nat464_utils.c +++ b/bsd/net/nat464_utils.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Apple Inc. All rights reserved. + * Copyright (c) 2018-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -393,7 +393,14 @@ nat464_translate_icmp(int naf, void *arg) case ICMP_UNREACH_NEEDFRAG: type = ICMP6_PACKET_TOO_BIG; code = 0; - mtu += 20; + /* + * Make sure we don't overflow adjusting for + * translation overhead. + * If we do, just work with a lower mtu as is. + */ + if (mtu <= (UINT16_MAX - CLAT46_HDR_EXPANSION_OVERHD)) { + mtu += CLAT46_HDR_EXPANSION_OVERHD; + } break; default: return -1; @@ -440,7 +447,8 @@ nat464_translate_icmp(int naf, void *arg) } icmp4->icmp_type = type; icmp4->icmp_code = code; - icmp4->icmp_nextmtu = htons(mtu); + icmp4->icmp_nextmtu = htons((uint16_t)mtu); + if (ptr >= 0) { icmp4->icmp_void = htonl(ptr); } @@ -470,8 +478,8 @@ nat464_translate_icmp(int naf, void *arg) * @return -1 on error and 0 on success */ int -nat464_translate_icmp_ip(pbuf_t *pbuf, uint32_t off, uint64_t *tot_len, uint32_t *off2, - uint8_t proto2, uint8_t ttl2, uint64_t tot_len2, struct nat464_addr *src, +nat464_translate_icmp_ip(pbuf_t *pbuf, uint16_t off, uint16_t *tot_len, uint16_t *off2, + uint8_t proto2, uint8_t ttl2, uint16_t tot_len2, struct nat464_addr *src, struct nat464_addr *dst, protocol_family_t af, protocol_family_t naf) { struct ip *ip4 = NULL; @@ -502,7 +510,7 @@ nat464_translate_icmp_ip(pbuf_t *pbuf, uint32_t off, uint64_t *tot_len, uint32_t bzero(ip4, sizeof(*ip4)); ip4->ip_v = IPVERSION; ip4->ip_hl = sizeof(*ip4) >> 2; - ip4->ip_len = htons(sizeof(*ip4) + tot_len2 - olen); + ip4->ip_len = htons((uint16_t)(sizeof(*ip4) + tot_len2 - olen)); ip4->ip_id = rfc6864 ? 0 : htons(ip_randomid()); ip4->ip_off = htons(IP_DF); ip4->ip_ttl = ttl2; @@ -528,7 +536,7 @@ nat464_translate_icmp_ip(pbuf_t *pbuf, uint32_t off, uint64_t *tot_len, uint32_t ip6 = hdr; bzero(ip6, sizeof(*ip6)); ip6->ip6_vfc = IPV6_VERSION; - ip6->ip6_plen = htons(tot_len2 - olen); + ip6->ip6_plen = htons((uint16_t)(tot_len2 - olen)); if (proto2 == IPPROTO_ICMP) { ip6->ip6_nxt = IPPROTO_ICMPV6; } else { @@ -595,7 +603,7 @@ nat464_insert_frag46(pbuf_t *pbuf, uint16_t ip_id_val, uint16_t frag_offset, /* Populate IPv6 fragmentation header */ p_ip6_frag->ip6f_nxt = p_ip6h->ip6_nxt; p_ip6_frag->ip6f_reserved = 0; - p_ip6_frag->ip6f_offlg = (frag_offset) << 3; + p_ip6_frag->ip6f_offlg = (uint16_t)(frag_offset << 3); if (!is_last_frag) { p_ip6_frag->ip6f_offlg |= 0x1; } @@ -659,7 +667,7 @@ nat464_translate_64(pbuf_t *pbuf, int off, uint8_t tos, ip4->ip_v = 4; ip4->ip_hl = 5; ip4->ip_tos = tos; - ip4->ip_len = htons(sizeof(*ip4) + (tot_len - off)); + ip4->ip_len = htons((uint16_t)(sizeof(*ip4) + (tot_len - off))); ip4->ip_id = 0; ip4->ip_off = 0; ip4->ip_ttl = ttl; @@ -716,9 +724,9 @@ nat464_translate_64(pbuf_t *pbuf, int off, uint8_t tos, * @return NT_NAT64 if IP header translation is successful, else error */ int -nat464_translate_46(pbuf_t *pbuf, int off, uint8_t tos, +nat464_translate_46(pbuf_t *pbuf, uint16_t off, uint8_t tos, uint8_t proto, uint8_t ttl, struct in6_addr src_v6, - struct in6_addr dst_v6, uint64_t tot_len) + struct in6_addr dst_v6, uint16_t tot_len) { struct ip6_hdr *ip6; @@ -772,8 +780,8 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, { struct ip *iph = NULL; struct ip6_hdr *ip6h = NULL; - uint32_t hlen = 0, plen = 0; - uint64_t tot_len = 0; + uint16_t hlen = 0, plen = 0; + uint16_t tot_len = 0; void *nsrc = NULL, *ndst = NULL; uint8_t *proto = 0; uint16_t *psum = NULL; @@ -790,7 +798,7 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, switch (naf) { case PF_INET: { iph = pbuf->pb_data; - hlen = iph->ip_hl << 2; + hlen = (uint16_t)(iph->ip_hl << 2); plen = ntohs(iph->ip_len) - hlen; tot_len = ntohs(iph->ip_len); nsrc = &iph->ip_src; @@ -800,7 +808,7 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, } case PF_INET6: { ip6h = pbuf->pb_data; - hlen = sizeof(*ip6h); + hlen = (uint16_t)sizeof(*ip6h); plen = ntohs(ip6h->ip6_plen); tot_len = hlen + plen; nsrc = &ip6h->ip6_src; @@ -915,7 +923,7 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, struct icmp *icmph = NULL; struct icmp6_hdr *icmp6h = NULL; - uint32_t ip2off = 0, hlen2 = 0, tot_len2 = 0; + uint16_t ip2off = 0, hlen2 = 0, tot_len2 = 0; icmph = (struct icmp*) pbuf_contig_segment(pbuf, hlen, ICMP_MINLEN); @@ -935,15 +943,15 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, /*Translate the inner IP header only for error messages */ if (ICMP6_ERRORTYPE(icmp6h->icmp6_type)) { - ip2off = hlen + sizeof(*icmp6h); - struct ip *iph2; + ip2off = (uint16_t)(hlen + sizeof(*icmp6h)); + struct ip *iph2 = NULL; iph2 = (struct ip*) pbuf_contig_segment(pbuf, ip2off, sizeof(*iph2)); if (iph2 == NULL) { return NT_DROP; } - hlen2 = ip2off + (iph2->ip_hl << 2); + hlen2 = (uint16_t)(ip2off + (iph2->ip_hl << 2)); tot_len2 = ntohs(iph2->ip_len); /* Destination in outer IP should be Source in inner IP */ @@ -985,7 +993,7 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, struct icmp6_hdr *icmp6h = NULL; struct icmp *icmph = NULL; - uint32_t ip2off = 0, hlen2 = 0, tot_len2 = 0; + uint16_t ip2off = 0, hlen2 = 0, tot_len2 = 0; icmp6h = (struct icmp6_hdr*) pbuf_contig_segment(pbuf, hlen, sizeof(*icmp6h)); @@ -1006,7 +1014,7 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, /*Translate the inner IP header only for error messages */ if (ICMP_ERRORTYPE(icmph->icmp_type)) { ip2off = hlen + ICMP_MINLEN; - struct ip6_hdr *iph2; + struct ip6_hdr *iph2 = NULL; iph2 = (struct ip6_hdr*) pbuf_contig_segment(pbuf, ip2off, sizeof(*iph2)); if (iph2 == NULL) { @@ -1096,6 +1104,15 @@ done: /* Clear IPv4 checksum flags */ *pbuf->pb_csum_flags &= ~(CSUM_IP | CSUM_IP_FRAGS | CSUM_DELAY_DATA | CSUM_FRAGMENT); + /* + * If the packet requires TCP segmentation due to TSO offload, + * then change the checksum flag to indicate that an IPv6 + * TCP segmentation is needed now. + */ + if (*pbuf->pb_csum_flags & CSUM_TSO_IPV4) { + *pbuf->pb_csum_flags &= ~CSUM_TSO_IPV4; + *pbuf->pb_csum_flags |= CSUM_TSO_IPV6; + } } else if (direction == NT_IN) { /* XXX On input just reset csum flags */ *pbuf->pb_csum_flags = 0; /* Reset all flags for now */ @@ -1188,7 +1205,7 @@ nat464_cksum_fixup(uint16_t cksum, uint16_t old, uint16_t new, uint8_t udp) if (udp && !l) { return 0xffff; } - return l; + return (uint16_t)l; } /* CLAT46 event handlers */ diff --git a/bsd/net/nat464_utils.h b/bsd/net/nat464_utils.h index 23675ac50..5e7e22528 100644 --- a/bsd/net/nat464_utils.h +++ b/bsd/net/nat464_utils.h @@ -101,8 +101,8 @@ int nat464_translate_icmp(int, void *); int - nat464_translate_icmp_ip(pbuf_t *, uint32_t, uint64_t *, uint32_t *, - uint8_t, uint8_t, uint64_t, struct nat464_addr *, + nat464_translate_icmp_ip(pbuf_t *, uint16_t, uint16_t *, uint16_t *, + uint8_t, uint8_t, uint16_t, struct nat464_addr *, struct nat464_addr *, protocol_family_t, protocol_family_t ); int @@ -116,8 +116,8 @@ int struct in_addr, uint64_t, boolean_t *); int - nat464_translate_46(pbuf_t *, int, uint8_t, uint8_t, uint8_t, struct in6_addr, - struct in6_addr, uint64_t); + nat464_translate_46(pbuf_t *, uint16_t, uint8_t, uint8_t, uint8_t, struct in6_addr, + struct in6_addr, uint16_t); int nat464_translate_proto(pbuf_t *, struct nat464_addr *, struct nat464_addr *, diff --git a/bsd/net/ndrv.c b/bsd/net/ndrv.c index aeec3bb46..2075bbec1 100644 --- a/bsd/net/ndrv.c +++ b/bsd/net/ndrv.c @@ -250,8 +250,8 @@ ndrv_attach(struct socket *so, int proto, __unused struct proc *p) TAILQ_INIT(&np->nd_dlist); np->nd_signature = NDRV_SIGNATURE; np->nd_socket = so; - np->nd_proto.sp_family = SOCK_DOM(so); - np->nd_proto.sp_protocol = proto; + np->nd_proto.sp_family = (uint16_t)SOCK_DOM(so); + np->nd_proto.sp_protocol = (uint16_t)proto; np->nd_if = NULL; np->nd_proto_family = 0; np->nd_family = 0; @@ -331,13 +331,11 @@ ndrv_event(struct ifnet *ifp, __unused protocol_family_t protocol, event->event_code == KEV_DL_IF_DETACHING) { LCK_MTX_ASSERT(ndrvdomain->dom_mtx, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(ndrvdomain->dom_mtx); - ndrv_handle_ifp_detach(ifnet_family(ifp), ifnet_unit(ifp)); + ndrv_handle_ifp_detach(ifnet_family(ifp), ifp->if_unit); lck_mtx_unlock(ndrvdomain->dom_mtx); } } -static int name_cmp(struct ifnet *, char *); - /* * This is the "driver open" hook - we 'bind' to the * named driver. @@ -383,7 +381,7 @@ ndrv_bind(struct socket *so, struct sockaddr *nam, __unused struct proc *p) */ ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { - if (name_cmp(ifp, dname) == 0) { + if (strncmp(ifp->if_xname, dname, IFNAMSIZ) == 0) { break; } } @@ -415,7 +413,7 @@ ndrv_bind(struct socket *so, struct sockaddr *nam, __unused struct proc *p) np->nd_if = ifp; np->nd_family = ifnet_family(ifp); - np->nd_unit = ifnet_unit(ifp); + np->nd_unit = ifp->if_unit; return 0; } @@ -460,7 +458,8 @@ ndrv_send(struct socket *so, __unused int flags, struct mbuf *m, { int error; - if (control) { + if (control != NULL) { + m_freem(control); return EOPNOTSUPP; } @@ -649,46 +648,6 @@ ndrv_do_disconnect(struct ndrv_cb *np) return 0; } -/* Hackery - return a string version of a decimal number */ -static void -sprint_d(u_int n, char *buf, int buflen) -{ - char dbuf[IFNAMSIZ]; - char *cp = dbuf + IFNAMSIZ - 1; - - *cp = 0; - do { - buflen--; - cp--; - *cp = "0123456789"[n % 10]; - n /= 10; - } while (n != 0 && buflen > 0); - strlcpy(buf, cp, IFNAMSIZ - buflen); - return; -} - -/* - * Try to compare a device name (q) with one of the funky ifnet - * device names (ifp). - */ -static int -name_cmp(struct ifnet *ifp, char *q) -{ - char *r; - int len; - char buf[IFNAMSIZ]; - - r = buf; - len = strlen(ifnet_name(ifp)); - strlcpy(r, ifnet_name(ifp), IFNAMSIZ); - r += len; - sprint_d(ifnet_unit(ifp), r, IFNAMSIZ - (r - buf)); -#if NDRV_DEBUG - printf("Comparing %s, %s\n", buf, q); -#endif - return strncmp(buf, q, IFNAMSIZ); -} - #if 0 //### Not used /* @@ -746,7 +705,7 @@ ndrv_setspec(struct ndrv_cb *np, struct sockopt *sopt) ndrvSpec.protocol_family = ndrvSpec64.protocol_family; ndrvSpec.demux_count = ndrvSpec64.demux_count; - user_addr = ndrvSpec64.demux_list; + user_addr = CAST_USER_ADDR_T(ndrvSpec64.demux_list); } else { struct ndrv_protocol_desc32 ndrvSpec32; diff --git a/bsd/net/necp.c b/bsd/net/necp.c index 0c014f0be..ac3b6fbb3 100644 --- a/bsd/net/necp.c +++ b/bsd/net/necp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Apple Inc. All rights reserved. + * Copyright (c) 2013-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -70,6 +69,7 @@ #include #include #include +#include /* * NECP - Network Extension Control Policy database @@ -144,6 +144,8 @@ u_int32_t necp_drop_all_level = 0; u_int32_t necp_pass_loopback = 1; // 0=Off, 1=On u_int32_t necp_pass_keepalives = 1; // 0=Off, 1=On u_int32_t necp_pass_interpose = 1; // 0=Off, 1=On +u_int32_t necp_restrict_multicast = 1; // 0=Off, 1=On +u_int32_t necp_dedup_policies = 0; // 0=Off, 1=On u_int32_t necp_drop_unentitled_order = 0; #ifdef XNU_TARGET_OS_WATCH @@ -154,8 +156,17 @@ u_int32_t necp_drop_unentitled_level = 0; u_int32_t necp_debug = 0; // 0=None, 1=Basic, 2=EveryMatch +os_log_t necp_log_handle = NULL; + u_int32_t necp_session_count = 0; +ZONE_DECLARE(necp_session_policy_zone, "necp_session_policy", + sizeof(struct necp_session_policy), ZC_ZFREE_CLEARMEM | ZC_NOENCRYPT); +ZONE_DECLARE(necp_socket_policy_zone, "necp_socket_policy", + sizeof(struct necp_kernel_socket_policy), ZC_ZFREE_CLEARMEM | ZC_NOENCRYPT); +ZONE_DECLARE(necp_ip_policy_zone, "necp_ip_policy", + sizeof(struct necp_kernel_ip_output_policy), ZC_ZFREE_CLEARMEM | ZC_NOENCRYPT); + #define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \ if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \ LIST_INSERT_HEAD((head), elm, field); \ @@ -227,7 +238,9 @@ u_int32_t necp_session_count = 0; #define NECP_KERNEL_CONDITION_LOCAL_EMPTY 0x1000000 #define NECP_KERNEL_CONDITION_REMOTE_EMPTY 0x2000000 #define NECP_KERNEL_CONDITION_PLATFORM_BINARY 0x4000000 -#define NECP_KERNEL_CONDITION_SIGNING_IDENTIFIER 0x8000000 +#define NECP_KERNEL_CONDITION_SDK_VERSION 0x8000000 +#define NECP_KERNEL_CONDITION_SIGNING_IDENTIFIER 0x10000000 +#define NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS 0x20000000 #define NECP_MAX_POLICY_RESULT_SIZE 512 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024 @@ -379,13 +392,13 @@ static bool necp_policy_mark_all_for_deletion(struct necp_session *session); static bool necp_policy_delete(struct necp_session *session, struct necp_session_policy *policy); static void necp_policy_apply_all(struct necp_session *session); -static necp_kernel_policy_id necp_kernel_socket_policy_add(necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, struct necp_policy_condition_agent_type *cond_agent_type, u_int32_t cond_client_flags, char *cond_signing_identifier, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter); +static necp_kernel_policy_id necp_kernel_socket_policy_add(necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, struct necp_policy_condition_agent_type *cond_agent_type, struct necp_policy_condition_sdk_version *cond_sdk_version, u_int32_t cond_client_flags, char *cond_signing_identifier, u_int16_t cond_packet_filter_tags, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter); static bool necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id); static bool necp_kernel_socket_policies_reprocess(void); static bool necp_kernel_socket_policies_update_uuid_table(void); -static inline struct necp_kernel_socket_policy *necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy **policy_search_array, struct necp_socket_info *info, necp_kernel_policy_filter *return_filter, u_int32_t *return_route_rule_id_array, size_t *return_route_rule_id_array_count, size_t route_rule_id_array_count, necp_kernel_policy_result *return_service_action, necp_kernel_policy_service *return_service, u_int32_t *return_netagent_array, u_int32_t *return_netagent_use_flags_array, size_t netagent_array_count, struct necp_client_parameter_netagent_type *required_agent_types, u_int32_t num_required_agent_types, proc_t proc, necp_kernel_policy_id *skip_policy_id, struct rtentry *rt, necp_kernel_policy_result *return_drop_dest_policy_result, necp_drop_all_bypass_check_result_t *return_drop_all_bypass); +static inline struct necp_kernel_socket_policy *necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy **policy_search_array, struct necp_socket_info *info, necp_kernel_policy_filter *return_filter, u_int32_t *return_route_rule_id_array, size_t *return_route_rule_id_array_count, size_t route_rule_id_array_count, necp_kernel_policy_result *return_service_action, necp_kernel_policy_service *return_service, u_int32_t *return_netagent_array, u_int32_t *return_netagent_use_flags_array, size_t netagent_array_count, struct necp_client_parameter_netagent_type *required_agent_types, u_int32_t num_required_agent_types, proc_t proc, u_int16_t pf_tag, necp_kernel_policy_id *skip_policy_id, struct rtentry *rt, necp_kernel_policy_result *return_drop_dest_policy_result, necp_drop_all_bypass_check_result_t *return_drop_all_bypass, u_int32_t *return_flow_divert_aggregate_unit); -static necp_kernel_policy_id necp_kernel_ip_output_policy_add(necp_policy_order order, necp_policy_order suborder, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_kernel_policy_id cond_policy_id, ifnet_t cond_bound_interface, u_int32_t cond_last_interface_index, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter); +static necp_kernel_policy_id necp_kernel_ip_output_policy_add(necp_policy_order order, necp_policy_order suborder, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_kernel_policy_id cond_policy_id, ifnet_t cond_bound_interface, u_int32_t cond_last_interface_index, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, u_int16_t cond_packet_filter_tags, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter); static bool necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id); static bool necp_kernel_ip_output_policies_reprocess(void); @@ -480,6 +493,8 @@ static int sysctl_handle_necp_level SYSCTL_HANDLER_ARGS; static int sysctl_handle_necp_unentitled_level SYSCTL_HANDLER_ARGS; SYSCTL_NODE(_net, OID_AUTO, necp, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "NECP"); +SYSCTL_INT(_net_necp, NECPCTL_DEDUP_POLICIES, dedup_policies, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_dedup_policies, 0, ""); +SYSCTL_INT(_net_necp, NECPCTL_RESTRICT_MULTICAST, restrict_multicast, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_restrict_multicast, 0, ""); SYSCTL_INT(_net_necp, NECPCTL_PASS_LOOPBACK, pass_loopback, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_pass_loopback, 0, ""); SYSCTL_INT(_net_necp, NECPCTL_PASS_KEEPALIVES, pass_keepalives, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_pass_keepalives, 0, ""); SYSCTL_INT(_net_necp, NECPCTL_PASS_INTERPOSE, pass_interpose, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_pass_interpose, 0, ""); @@ -576,6 +591,12 @@ static const struct fileops necp_session_fd_ops = { .fo_kqfilter = fo_no_kqfilter, }; +static inline int +necp_is_platform_binary(proc_t proc) +{ + return (proc != NULL) ? (csproc_get_platform_binary(proc) && cs_valid(proc)) : 0; +} + static inline necp_drop_all_bypass_check_result_t necp_check_drop_all_bypass_result(proc_t proc) { @@ -588,7 +609,7 @@ necp_check_drop_all_bypass_result(proc_t proc) #if defined(XNU_TARGET_OS_OSX) const char *signing_id = NULL; - const bool isConfigd = (csproc_get_platform_binary(proc) && + const bool isConfigd = (necp_is_platform_binary(proc) && (signing_id = cs_identity_get(proc)) && (strlen(signing_id) == SIGNING_ID_CONFIGD_LEN) && (memcmp(signing_id, SIGNING_ID_CONFIGD, SIGNING_ID_CONFIGD_LEN) == 0)); @@ -632,9 +653,9 @@ necp_session_open(struct proc *p, struct necp_session_open_args *uap, int *retva goto done; } - fp->f_fglob->fg_flag = 0; - fp->f_fglob->fg_ops = &necp_session_fd_ops; - fp->f_fglob->fg_data = session; + fp->fp_glob->fg_flag = 0; + fp->fp_glob->fg_ops = &necp_session_fd_ops; + fp->fp_glob->fg_data = session; proc_fdlock(p); FDFLAGS_SET(p, fd, (UF_EXCLOSE | UF_FORKCLOSE)); @@ -672,32 +693,22 @@ necp_session_op_close(struct fileglob *fg, vfs_context_t ctx) } static int -necp_session_find_from_fd(int fd, struct necp_session **session) +necp_session_find_from_fd(struct proc *p, int fd, + struct fileproc **fpp, struct necp_session **session) { - proc_t p = current_proc(); struct fileproc *fp = NULL; - int error = 0; - - proc_fdlock_spin(p); - if ((error = fp_lookup(p, fd, &fp, 1)) != 0) { - goto done; - } - if (fp->f_fglob->fg_ops->fo_type != DTYPE_NETPOLICY) { - fp_drop(p, fd, fp, 1); - error = ENODEV; - goto done; - } - *session = (struct necp_session *)fp->f_fglob->fg_data; + int error = fp_get_ftype(p, fd, DTYPE_NETPOLICY, ENODEV, &fp); - if ((*session)->necp_fd_type != necp_fd_type_session) { - // Not a client fd, ignore - fp_drop(p, fd, fp, 1); - error = EINVAL; - goto done; + if (error == 0) { + *fpp = fp; + *session = (struct necp_session *)fp->fp_glob->fg_data; + if ((*session)->necp_fd_type != necp_fd_type_session) { + // Not a client fd, ignore + fp_drop(p, fd, fp, 0); + error = EINVAL; + } } -done: - proc_fdunlock(p); return error; } @@ -708,13 +719,13 @@ necp_session_add_policy(struct necp_session *session, struct necp_session_action u_int8_t *tlv_buffer = NULL; if (uap->in_buffer_length == 0 || uap->in_buffer_length > NECP_MAX_POLICY_SIZE || uap->in_buffer == 0) { - NECPLOG(LOG_ERR, "necp_session_add_policy invalid input (%zu)", uap->in_buffer_length); + NECPLOG(LOG_ERR, "necp_session_add_policy invalid input (%zu)", (size_t)uap->in_buffer_length); error = EINVAL; goto done; } if (uap->out_buffer_length < sizeof(necp_policy_id) || uap->out_buffer == 0) { - NECPLOG(LOG_ERR, "necp_session_add_policy invalid output buffer (%zu)", uap->out_buffer_length); + NECPLOG(LOG_ERR, "necp_session_add_policy invalid output buffer (%zu)", (size_t)uap->out_buffer_length); error = EINVAL; goto done; } @@ -759,7 +770,7 @@ necp_session_get_policy(struct necp_session *session, struct necp_session_action u_int8_t *response = NULL; if (uap->in_buffer_length < sizeof(necp_policy_id) || uap->in_buffer == 0) { - NECPLOG(LOG_ERR, "necp_session_get_policy invalid input (%zu)", uap->in_buffer_length); + NECPLOG(LOG_ERR, "necp_session_get_policy invalid input (%zu)", (size_t)uap->in_buffer_length); error = EINVAL; goto done; } @@ -783,7 +794,7 @@ necp_session_get_policy(struct necp_session *session, struct necp_session_action u_int32_t response_size = order_tlv_size + result_tlv_size + policy->conditions_size; if (uap->out_buffer_length < response_size || uap->out_buffer == 0) { - NECPLOG(LOG_ERR, "necp_session_get_policy buffer not large enough (%u < %u)", uap->out_buffer_length, response_size); + NECPLOG(LOG_ERR, "necp_session_get_policy buffer not large enough (%zu < %u)", (size_t)uap->out_buffer_length, response_size); error = EINVAL; goto done; } @@ -831,7 +842,7 @@ necp_session_delete_policy(struct necp_session *session, struct necp_session_act int error = 0; if (uap->in_buffer_length < sizeof(necp_policy_id) || uap->in_buffer == 0) { - NECPLOG(LOG_ERR, "necp_session_delete_policy invalid input (%zu)", uap->in_buffer_length); + NECPLOG(LOG_ERR, "necp_session_delete_policy invalid input (%zu)", (size_t)uap->in_buffer_length); error = EINVAL; goto done; } @@ -890,7 +901,7 @@ necp_session_list_all(struct necp_session *session, struct necp_session_action_a response_size = num_policies * tlv_size; if (uap->out_buffer_length < response_size || uap->out_buffer == 0) { - NECPLOG(LOG_ERR, "necp_session_list_all buffer not large enough (%u < %u)", uap->out_buffer_length, response_size); + NECPLOG(LOG_ERR, "necp_session_list_all buffer not large enough (%zu < %u)", (size_t)uap->out_buffer_length, response_size); error = EINVAL; goto done; } @@ -944,7 +955,7 @@ necp_session_set_session_priority(struct necp_session *session, struct necp_sess struct necp_session_policy *temp_policy = NULL; if (uap->in_buffer_length < sizeof(necp_session_priority) || uap->in_buffer == 0) { - NECPLOG(LOG_ERR, "necp_session_set_session_priority invalid input (%zu)", uap->in_buffer_length); + NECPLOG(LOG_ERR, "necp_session_set_session_priority invalid input (%zu)", (size_t)uap->in_buffer_length); error = EINVAL; goto done; } @@ -1000,7 +1011,7 @@ necp_session_register_service(struct necp_session *session, struct necp_session_ struct necp_service_registration *new_service = NULL; if (uap->in_buffer_length < sizeof(uuid_t) || uap->in_buffer == 0) { - NECPLOG(LOG_ERR, "necp_session_register_service invalid input (%zu)", uap->in_buffer_length); + NECPLOG(LOG_ERR, "necp_session_register_service invalid input (%zu)", (size_t)uap->in_buffer_length); error = EINVAL; goto done; } @@ -1039,7 +1050,7 @@ necp_session_unregister_service(struct necp_session *session, struct necp_sessio struct necp_uuid_id_mapping *mapping = NULL; if (uap->in_buffer_length < sizeof(uuid_t) || uap->in_buffer == 0) { - NECPLOG(LOG_ERR, "necp_session_unregister_service invalid input (%zu)", uap->in_buffer_length); + NECPLOG(LOG_ERR, "necp_session_unregister_service invalid input (%zu)", (size_t)uap->in_buffer_length); error = EINVAL; goto done; } @@ -1078,7 +1089,7 @@ necp_session_dump_all(struct necp_session *session, struct necp_session_action_a int error = 0; if (uap->out_buffer_length == 0 || uap->out_buffer == 0) { - NECPLOG(LOG_ERR, "necp_session_dump_all invalid output buffer (%zu)", uap->out_buffer_length); + NECPLOG(LOG_ERR, "necp_session_dump_all invalid output buffer (%zu)", (size_t)uap->out_buffer_length); error = EINVAL; goto done; } @@ -1092,11 +1103,12 @@ done: int necp_session_action(struct proc *p, struct necp_session_action_args *uap, int *retval) { -#pragma unused(p) + struct fileproc *fp; int error = 0; int return_value = 0; struct necp_session *session = NULL; - error = necp_session_find_from_fd(uap->necp_fd, &session); + + error = necp_session_find_from_fd(p, uap->necp_fd, &fp, &session); if (error != 0) { NECPLOG(LOG_ERR, "necp_session_action find fd error (%d)", error); return error; @@ -1173,8 +1185,7 @@ necp_session_action(struct proc *p, struct necp_session_action_args *uap, int *r done: NECP_SESSION_UNLOCK(session); - file_drop(uap->necp_fd); - + fp_drop(p, uap->necp_fd, fp, 0); return return_value; } @@ -1294,6 +1305,8 @@ necp_init(void) { errno_t result = 0; + necp_log_handle = os_log_create("com.apple.xnu.net.necp", "necp"); + necp_kernel_policy_grp_attr = lck_grp_attr_alloc_init(); if (necp_kernel_policy_grp_attr == NULL) { NECPLOG0(LOG_ERR, "lck_grp_attr_alloc_init failed"); @@ -1606,7 +1619,7 @@ necp_get_tlv_at_offset(u_int8_t *buffer, u_int32_t buffer_length, // Validate that buffer has enough room for any TLV if (tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t) > buffer_length) { - NECPLOG(LOG_ERR, "necp_get_tlv_at_offset buffer_length is too small for TLV (%u < %u)", + NECPLOG(LOG_ERR, "necp_get_tlv_at_offset buffer_length is too small for TLV (%u < %lu)", buffer_length, tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t)); return EINVAL; } @@ -1614,7 +1627,7 @@ necp_get_tlv_at_offset(u_int8_t *buffer, u_int32_t buffer_length, // Validate that buffer has enough room for this TLV u_int32_t tlv_length = necp_buffer_get_tlv_length(buffer, tlv_offset); if (tlv_length > buffer_length - (tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t))) { - NECPLOG(LOG_ERR, "necp_get_tlv_at_offset buffer_length is too small for TLV of length %u (%u < %u)", + NECPLOG(LOG_ERR, "necp_get_tlv_at_offset buffer_length is too small for TLV of length %u (%u < %lu)", tlv_length, buffer_length, tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t) + tlv_length); return EINVAL; } @@ -1790,12 +1803,18 @@ necp_policy_result_is_valid(u_int8_t *buffer, u_int32_t length) u_int8_t type = necp_policy_result_get_type_from_buffer(buffer, length); u_int32_t parameter_length = necp_policy_result_get_parameter_length_from_buffer(buffer, length); switch (type) { - case NECP_POLICY_RESULT_PASS: + case NECP_POLICY_RESULT_PASS: { + if (parameter_length == 0 || parameter_length == sizeof(u_int32_t)) { + validated = TRUE; + } + break; + } + case NECP_POLICY_RESULT_DROP: { if (parameter_length == 0 || parameter_length == sizeof(u_int32_t)) { validated = TRUE; } break; - case NECP_POLICY_RESULT_DROP: + } case NECP_POLICY_RESULT_ROUTE_RULES: case NECP_POLICY_RESULT_SCOPED_DIRECT: case NECP_POLICY_RESULT_ALLOW_UNENTITLED: { @@ -1899,7 +1918,8 @@ static inline bool necp_policy_condition_requires_real_application(u_int8_t *buffer, u_int32_t length) { u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length); - return type == NECP_POLICY_CONDITION_ENTITLEMENT; + u_int32_t condition_length = necp_policy_condition_get_value_length_from_buffer(buffer, length); + return type == NECP_POLICY_CONDITION_ENTITLEMENT && condition_length > 0; } static bool @@ -1959,6 +1979,13 @@ necp_policy_condition_is_valid(u_int8_t *buffer, u_int32_t length, u_int8_t poli } break; } + case NECP_POLICY_CONDITION_SDK_VERSION: { + if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE) && + condition_length >= sizeof(struct necp_policy_condition_sdk_version)) { + validated = TRUE; + } + break; + } case NECP_POLICY_CONDITION_IP_PROTOCOL: { if (condition_length >= sizeof(u_int16_t)) { validated = TRUE; @@ -2040,6 +2067,15 @@ necp_policy_condition_is_valid(u_int8_t *buffer, u_int32_t length, u_int8_t poli validated = TRUE; break; } + case NECP_POLICY_CONDITION_PACKET_FILTER_TAGS: { + if (condition_length >= sizeof(u_int16_t)) { + u_int16_t packet_filter_tags = *(u_int16_t *)(void *)condition_value; + if (packet_filter_tags > 0 && packet_filter_tags <= NECP_POLICY_CONDITION_PACKET_FILTER_TAG_MAX) { + validated = TRUE; + } + } + break; + } default: { validated = FALSE; break; @@ -2547,13 +2583,13 @@ necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length) MALLOC(tlv_buffer_pointers, u_int8_t * *, sizeof(u_int8_t *) * policy_count, M_NECP, M_NOWAIT | M_ZERO); if (tlv_buffer_pointers == NULL) { - NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer_pointers (%u bytes)", sizeof(u_int8_t *) * policy_count); + NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer_pointers (%lu bytes)", sizeof(u_int8_t *) * policy_count); UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock, NECP_ERROR_INTERNAL); } MALLOC(tlv_buffer_lengths, u_int32_t *, sizeof(u_int32_t) * policy_count, M_NECP, M_NOWAIT | M_ZERO); if (tlv_buffer_lengths == NULL) { - NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer_lengths (%u bytes)", sizeof(u_int32_t) * policy_count); + NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer_lengths (%lu bytes)", sizeof(u_int32_t) * policy_count); UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock, NECP_ERROR_INTERNAL); } @@ -2651,6 +2687,10 @@ necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length) if (condition_mask & NECP_KERNEL_CONDITION_PLATFORM_BINARY) { num_conditions++; } + if (condition_mask & NECP_KERNEL_CONDITION_SDK_VERSION) { + condition_tlv_length += sizeof(struct necp_policy_condition_sdk_version); + num_conditions++; + } if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_NETWORKS) { num_conditions++; } @@ -2689,6 +2729,10 @@ necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length) condition_tlv_length += identifier_len; num_conditions++; } + if (condition_mask & NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) { + condition_tlv_length += sizeof(u_int16_t); + num_conditions++; + } } condition_tlv_length += num_conditions * (sizeof(u_int8_t) + sizeof(u_int32_t)); // These are for the condition TLVs. The space for "value" is already accounted for above. @@ -2792,6 +2836,11 @@ necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length) if (condition_mask & NECP_KERNEL_CONDITION_PLATFORM_BINARY) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_PLATFORM_BINARY, 0, "", cond_buf, condition_tlv_length); } + if (condition_mask & NECP_KERNEL_CONDITION_SDK_VERSION) { + cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_SDK_VERSION, + sizeof(policy->cond_sdk_version), &policy->cond_sdk_version, + cond_buf, condition_tlv_length); + } if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) { if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) { struct necp_policy_condition_addr_range range; @@ -2840,6 +2889,9 @@ necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length) cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_SIGNING_IDENTIFIER, strlen(policy->cond_signing_identifier) + 1, policy->cond_signing_identifier, cond_buf, condition_tlv_length); } + if (condition_mask & NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) { + cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_PACKET_FILTER_TAGS, sizeof(policy->cond_packet_filter_tags), &policy->cond_packet_filter_tags, cond_buf, condition_tlv_length); + } } cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_CONDITION, cond_buf_cursor - cond_buf, cond_buf, tlv_buffer, total_allocated_bytes); @@ -2860,14 +2912,14 @@ necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length) // Copy out if (out_buffer != 0) { if (out_buffer_length < total_tlv_len + sizeof(u_int32_t)) { - NECPLOG(LOG_DEBUG, "out_buffer_length too small (%u < %u)", out_buffer_length, total_tlv_len + sizeof(u_int32_t)); + NECPLOG(LOG_DEBUG, "out_buffer_length too small (%lu < %lu)", out_buffer_length, total_tlv_len + sizeof(u_int32_t)); REPORT_ERROR(NECP_ERROR_INVALID_TLV); } // Allow malloc to wait, since the total buffer may be large and we are not holding any locks MALLOC(result_buf, u_int8_t *, total_tlv_len + sizeof(u_int32_t), M_NECP, M_WAITOK | M_ZERO); if (result_buf == NULL) { - NECPLOG(LOG_DEBUG, "Failed to allocate result_buffer (%u bytes)", total_tlv_len + sizeof(u_int32_t)); + NECPLOG(LOG_DEBUG, "Failed to allocate result_buffer (%lu bytes)", total_tlv_len + sizeof(u_int32_t)); REPORT_ERROR(NECP_ERROR_INTERNAL); } @@ -2885,7 +2937,7 @@ necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length) int copy_error = copyout(result_buf, out_buffer, total_tlv_len + sizeof(u_int32_t)); if (copy_error) { - NECPLOG(LOG_DEBUG, "Failed to copy out result_buffer (%u bytes)", total_tlv_len + sizeof(u_int32_t)); + NECPLOG(LOG_DEBUG, "Failed to copy out result_buffer (%lu bytes)", total_tlv_len + sizeof(u_int32_t)); REPORT_ERROR(NECP_ERROR_INTERNAL); } } @@ -2931,12 +2983,7 @@ necp_policy_create(struct necp_session *session, necp_policy_order order, u_int8 goto done; } - MALLOC_ZONE(new_policy, struct necp_session_policy *, sizeof(*new_policy), M_NECP_SESSION_POLICY, M_WAITOK); - if (new_policy == NULL) { - goto done; - } - - memset(new_policy, 0, sizeof(*new_policy)); // M_ZERO is not supported for MALLOC_ZONE + new_policy = zalloc_flags(necp_session_policy_zone, Z_WAITOK | Z_ZERO); new_policy->applied = FALSE; new_policy->pending_deletion = FALSE; new_policy->pending_update = FALSE; @@ -3059,7 +3106,7 @@ necp_policy_delete(struct necp_session *session, struct necp_session_policy *pol policy->route_rules = NULL; } - FREE_ZONE(policy, sizeof(*policy), M_NECP_SESSION_POLICY); + zfree(necp_session_policy_zone, policy); if (necp_debug) { NECPLOG0(LOG_DEBUG, "Removed NECP policy"); @@ -3183,6 +3230,8 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli u_int8_t ultimate_result = 0; u_int32_t secondary_result = 0; struct necp_policy_condition_agent_type cond_agent_type = {}; + struct necp_policy_condition_sdk_version cond_sdk_version = {}; + u_int16_t cond_packet_filter_tags = 0; necp_kernel_policy_result_parameter secondary_result_parameter; memset(&secondary_result_parameter, 0, sizeof(secondary_result_parameter)); u_int32_t cond_last_interface_index = 0; @@ -3240,6 +3289,14 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli socket_only_conditions = TRUE; break; } + case NECP_POLICY_CONDITION_SDK_VERSION: { + if (condition_length >= sizeof(cond_sdk_version)) { + master_condition_mask |= NECP_KERNEL_CONDITION_SDK_VERSION; + memcpy(&cond_sdk_version, condition_value, sizeof(cond_sdk_version)); + socket_only_conditions = TRUE; + } + break; + } case NECP_POLICY_CONDITION_DOMAIN: { // Make sure there is only one such rule if (condition_length > 0 && cond_domain == NULL) { @@ -3528,6 +3585,17 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli } break; } + case NECP_POLICY_CONDITION_PACKET_FILTER_TAGS: { + if (condition_length >= sizeof(u_int16_t)) { + master_condition_mask |= NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS; + if (condition_is_negative) { + master_condition_negated_mask |= NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS; + } + memcpy(&cond_packet_filter_tags, condition_value, sizeof(cond_packet_filter_tags)); + socket_ip_conditions = TRUE; + } + break; + } default: { break; } @@ -3557,6 +3625,12 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli break; } case NECP_POLICY_RESULT_DROP: { + u_int32_t drop_flags = 0; + if (necp_policy_result_get_parameter_length_from_buffer(policy->result, policy->result_size) > 0) { + if (necp_policy_get_result_parameter(policy, (u_int8_t *)&drop_flags, sizeof(drop_flags))) { + ultimate_result_parameter.drop_flags = drop_flags; + } + } if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE socket_layer_non_id_conditions = TRUE; } else if (socket_ip_conditions) { @@ -3708,7 +3782,7 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli } if (socket_layer_non_id_conditions) { - necp_kernel_policy_id policy_id = necp_kernel_socket_policy_add(policy->order, session->session_order, session->proc_pid, master_condition_mask, master_condition_negated_mask, cond_app_id, cond_real_app_id, cond_custom_entitlement, cond_account_id, cond_domain, cond_pid, cond_uid, cond_bound_interface, cond_traffic_class, cond_protocol, &cond_local_start, &cond_local_end, cond_local_prefix, &cond_remote_start, &cond_remote_end, cond_remote_prefix, &cond_agent_type, cond_client_flags, cond_signing_identifier, ultimate_result, ultimate_result_parameter); + necp_kernel_policy_id policy_id = necp_kernel_socket_policy_add(policy->order, session->session_order, session->proc_pid, master_condition_mask, master_condition_negated_mask, cond_app_id, cond_real_app_id, cond_custom_entitlement, cond_account_id, cond_domain, cond_pid, cond_uid, cond_bound_interface, cond_traffic_class, cond_protocol, &cond_local_start, &cond_local_end, cond_local_prefix, &cond_remote_start, &cond_remote_end, cond_remote_prefix, &cond_agent_type, &cond_sdk_version, cond_client_flags, cond_signing_identifier, cond_packet_filter_tags, ultimate_result, ultimate_result_parameter); if (policy_id == 0) { NECPLOG0(LOG_DEBUG, "Error applying socket kernel policy"); @@ -3725,7 +3799,7 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli condition_mask |= NECP_KERNEL_CONDITION_POLICY_ID; } - necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->order, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS, session->session_order, session->proc_pid, condition_mask, master_condition_negated_mask, NECP_KERNEL_POLICY_ID_NONE, cond_bound_interface, 0, cond_protocol, &cond_local_start, &cond_local_end, cond_local_prefix, &cond_remote_start, &cond_remote_end, cond_remote_prefix, ultimate_result, ultimate_result_parameter); + necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->order, NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS, session->session_order, session->proc_pid, condition_mask, master_condition_negated_mask, NECP_KERNEL_POLICY_ID_NONE, cond_bound_interface, 0, cond_protocol, &cond_local_start, &cond_local_end, cond_local_prefix, &cond_remote_start, &cond_remote_end, cond_remote_prefix, cond_packet_filter_tags, ultimate_result, ultimate_result_parameter); if (policy_id == 0) { NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy"); @@ -3736,7 +3810,7 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli } if (ip_output_layer_id_condition) { - necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->order, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, cond_ip_output_layer_id, NULL, 0, 0, NULL, NULL, 0, NULL, NULL, 0, ultimate_result, ultimate_result_parameter); + necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->order, NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, cond_ip_output_layer_id, NULL, 0, 0, NULL, NULL, 0, NULL, NULL, 0, 0, ultimate_result, ultimate_result_parameter); if (policy_id == 0) { NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy"); @@ -3748,7 +3822,7 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli // Extra policies for IP Output tunnels for when packets loop back if (ip_output_layer_tunnel_condition_from_id) { - necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->order, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS], NULL, cond_last_interface_index, 0, NULL, NULL, 0, NULL, NULL, 0, secondary_result, secondary_result_parameter); + necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->order, NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS], NULL, cond_last_interface_index, 0, NULL, NULL, 0, NULL, NULL, 0, 0, secondary_result, secondary_result_parameter); if (policy_id == 0) { NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy"); @@ -3759,7 +3833,7 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli } if (ip_output_layer_tunnel_condition_from_id) { - necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->order, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION], NULL, cond_last_interface_index, 0, NULL, NULL, 0, NULL, NULL, 0, secondary_result, secondary_result_parameter); + necp_kernel_policy_id policy_id = necp_kernel_ip_output_policy_add(policy->order, NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION, session->session_order, session->proc_pid, NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_ALL_INTERFACES, 0, policy->kernel_ip_output_policies[NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION], NULL, cond_last_interface_index, 0, NULL, NULL, 0, NULL, NULL, 0, 0, secondary_result, secondary_result_parameter); if (policy_id == 0) { NECPLOG0(LOG_DEBUG, "Error applying IP output kernel policy"); @@ -3877,20 +3951,16 @@ necp_kernel_policy_get_new_id(bool socket_level) return newid; } -#define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE | NECP_KERNEL_CONDITION_HAS_CLIENT | NECP_KERNEL_CONDITION_LOCAL_NETWORKS | NECP_KERNEL_CONDITION_CLIENT_FLAGS | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_PLATFORM_BINARY | NECP_KERNEL_CONDITION_SIGNING_IDENTIFIER) +#define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE | NECP_KERNEL_CONDITION_HAS_CLIENT | NECP_KERNEL_CONDITION_LOCAL_NETWORKS | NECP_KERNEL_CONDITION_CLIENT_FLAGS | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_PLATFORM_BINARY | NECP_KERNEL_CONDITION_SDK_VERSION | NECP_KERNEL_CONDITION_SIGNING_IDENTIFIER | NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) static necp_kernel_policy_id -necp_kernel_socket_policy_add(necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *cond_domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, struct necp_policy_condition_agent_type *cond_agent_type, u_int32_t cond_client_flags, char *cond_signing_identifier, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter) +necp_kernel_socket_policy_add(necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *cond_domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, struct necp_policy_condition_agent_type *cond_agent_type, struct necp_policy_condition_sdk_version *cond_sdk_version, u_int32_t cond_client_flags, char *cond_signing_identifier, u_int16_t cond_packet_filter_tags, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter) { struct necp_kernel_socket_policy *new_kernel_policy = NULL; struct necp_kernel_socket_policy *tmp_kernel_policy = NULL; - MALLOC_ZONE(new_kernel_policy, struct necp_kernel_socket_policy *, sizeof(*new_kernel_policy), M_NECP_SOCKET_POLICY, M_WAITOK); - if (new_kernel_policy == NULL) { - goto done; - } + new_kernel_policy = zalloc_flags(necp_socket_policy_zone, Z_WAITOK | Z_ZERO); - memset(new_kernel_policy, 0, sizeof(*new_kernel_policy)); // M_ZERO is not supported for MALLOC_ZONE new_kernel_policy->id = necp_kernel_policy_get_new_id(true); new_kernel_policy->order = order; new_kernel_policy->session_order = session_order; @@ -3904,8 +3974,8 @@ necp_kernel_socket_policy_add(necp_policy_order order, u_int32_t session_order, if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) && !(new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID)) { new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_REAL_APP_ID; } - if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) && !(new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID)) { - new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_ENTITLEMENT; + if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) && !(new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID)) { + new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT; } if ((new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) && (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX)) { new_kernel_policy->condition_mask &= ~NECP_KERNEL_CONDITION_LOCAL_PREFIX; @@ -3978,12 +4048,18 @@ necp_kernel_socket_policy_add(necp_policy_order order, u_int32_t session_order, if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_AGENT_TYPE) { memcpy(&new_kernel_policy->cond_agent_type, cond_agent_type, sizeof(*cond_agent_type)); } + if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_SDK_VERSION) { + memcpy(&new_kernel_policy->cond_sdk_version, cond_sdk_version, sizeof(*cond_sdk_version)); + } if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_CLIENT_FLAGS) { new_kernel_policy->cond_client_flags = cond_client_flags; } if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_SIGNING_IDENTIFIER) { new_kernel_policy->cond_signing_identifier = cond_signing_identifier; } + if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) { + new_kernel_policy->cond_packet_filter_tags = cond_packet_filter_tags; + } new_kernel_policy->result = result; memcpy(&new_kernel_policy->result_parameter, &result_parameter, sizeof(result_parameter)); @@ -3992,7 +4068,7 @@ necp_kernel_socket_policy_add(necp_policy_order order, u_int32_t session_order, NECPLOG(LOG_DEBUG, "Added kernel policy: socket, id=%d, mask=%x\n", new_kernel_policy->id, new_kernel_policy->condition_mask); } LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies, new_kernel_policy, chain, session_order, order, tmp_kernel_policy); -done: + return new_kernel_policy ? new_kernel_policy->id : 0; } @@ -4046,7 +4122,7 @@ necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id) policy->cond_signing_identifier = NULL; } - FREE_ZONE(policy, sizeof(*policy), M_NECP_SOCKET_POLICY); + zfree(necp_socket_policy_zone, policy); return TRUE; } @@ -4071,7 +4147,7 @@ necp_get_result_description(char *result_string, necp_kernel_policy_result resul break; } case NECP_KERNEL_POLICY_RESULT_DROP: { - snprintf(result_string, MAX_RESULT_STRING_LEN, "Drop"); + snprintf(result_string, MAX_RESULT_STRING_LEN, "Drop (%X)", result_parameter.drop_flags); break; } case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT: { @@ -4512,6 +4588,16 @@ necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy *polic continue; } + if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_SDK_VERSION && + memcmp(&compared_policy->cond_sdk_version, &policy->cond_sdk_version, sizeof(policy->cond_sdk_version)) == 0) { + continue; + } + + if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS && + memcmp(&compared_policy->cond_packet_filter_tags, &policy->cond_packet_filter_tags, sizeof(policy->cond_packet_filter_tags)) == 0) { + continue; + } + return TRUE; } @@ -4602,7 +4688,7 @@ necp_kernel_socket_policies_reprocess(void) // Fill out maps LIST_FOREACH(kernel_policy, &necp_kernel_socket_policies, chain) { // Add app layer policies - if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_app_layer_map, app_layer_current_free_index)) { + if (!necp_dedup_policies || !necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_app_layer_map, app_layer_current_free_index)) { necp_kernel_socket_policies_app_layer_map[app_layer_current_free_index] = kernel_policy; app_layer_current_free_index++; necp_kernel_socket_policies_app_layer_map[app_layer_current_free_index] = NULL; @@ -4617,7 +4703,7 @@ necp_kernel_socket_policies_reprocess(void) if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) || kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) { for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) { - if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_map[app_i], bucket_current_free_index[app_i])) { + if (!necp_dedup_policies || !necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_map[app_i], bucket_current_free_index[app_i])) { (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = kernel_policy; bucket_current_free_index[app_i]++; (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = NULL; @@ -4625,7 +4711,7 @@ necp_kernel_socket_policies_reprocess(void) } } else { app_i = NECP_SOCKET_MAP_APP_ID_TO_BUCKET(kernel_policy->cond_app_id); - if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_map[app_i], bucket_current_free_index[app_i])) { + if (!necp_dedup_policies || !necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_map[app_i], bucket_current_free_index[app_i])) { (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = kernel_policy; bucket_current_free_index[app_i]++; (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = NULL; @@ -5401,19 +5487,14 @@ necp_kernel_socket_policies_update_uuid_table(void) return TRUE; } -#define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_LOCAL_NETWORKS) +#define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE | NECP_KERNEL_CONDITION_LOCAL_NETWORKS | NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) static necp_kernel_policy_id -necp_kernel_ip_output_policy_add(necp_policy_order order, necp_policy_order suborder, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_kernel_policy_id cond_policy_id, ifnet_t cond_bound_interface, u_int32_t cond_last_interface_index, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter) +necp_kernel_ip_output_policy_add(necp_policy_order order, necp_policy_order suborder, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_kernel_policy_id cond_policy_id, ifnet_t cond_bound_interface, u_int32_t cond_last_interface_index, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, u_int16_t cond_packet_filter_tags, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter) { struct necp_kernel_ip_output_policy *new_kernel_policy = NULL; struct necp_kernel_ip_output_policy *tmp_kernel_policy = NULL; - MALLOC_ZONE(new_kernel_policy, struct necp_kernel_ip_output_policy *, sizeof(*new_kernel_policy), M_NECP_IP_POLICY, M_WAITOK); - if (new_kernel_policy == NULL) { - goto done; - } - - memset(new_kernel_policy, 0, sizeof(*new_kernel_policy)); // M_ZERO is not supported for MALLOC_ZONE + new_kernel_policy = zalloc_flags(necp_ip_policy_zone, Z_WAITOK | Z_ZERO); new_kernel_policy->id = necp_kernel_policy_get_new_id(false); new_kernel_policy->suborder = suborder; new_kernel_policy->order = order; @@ -5467,6 +5548,9 @@ necp_kernel_ip_output_policy_add(necp_policy_order order, necp_policy_order subo if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) { new_kernel_policy->cond_remote_prefix = cond_remote_prefix; } + if (new_kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) { + new_kernel_policy->cond_packet_filter_tags = cond_packet_filter_tags; + } new_kernel_policy->result = result; memcpy(&new_kernel_policy->result_parameter, &result_parameter, sizeof(result_parameter)); @@ -5475,7 +5559,7 @@ necp_kernel_ip_output_policy_add(necp_policy_order order, necp_policy_order subo NECPLOG(LOG_DEBUG, "Added kernel policy: ip output, id=%d, mask=%x\n", new_kernel_policy->id, new_kernel_policy->condition_mask); } LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies, new_kernel_policy, chain, session_order, order, suborder, tmp_kernel_policy); -done: + return new_kernel_policy ? new_kernel_policy->id : 0; } @@ -5514,7 +5598,7 @@ necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id) policy->cond_bound_interface = NULL; } - FREE_ZONE(policy, sizeof(*policy), M_NECP_IP_POLICY); + zfree(necp_ip_policy_zone, policy); return TRUE; } @@ -5751,7 +5835,7 @@ necp_kernel_ip_output_policies_reprocess(void) (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_NETWORKS) || kernel_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) { for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) { - if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy, necp_kernel_ip_output_policies_map[i], bucket_current_free_index[i])) { + if (!necp_dedup_policies || !necp_kernel_ip_output_policy_is_unnecessary(kernel_policy, necp_kernel_ip_output_policies_map[i], bucket_current_free_index[i])) { (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = kernel_policy; bucket_current_free_index[i]++; (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = NULL; @@ -5759,7 +5843,7 @@ necp_kernel_ip_output_policies_reprocess(void) } } else { i = NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(kernel_policy->cond_policy_id); - if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy, necp_kernel_ip_output_policies_map[i], bucket_current_free_index[i])) { + if (!necp_dedup_policies || !necp_kernel_ip_output_policy_is_unnecessary(kernel_policy, necp_kernel_ip_output_policies_map[i], bucket_current_free_index[i])) { (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = kernel_policy; bucket_current_free_index[i]++; (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = NULL; @@ -5973,6 +6057,81 @@ necp_get_parent_cred_result(proc_t proc, struct necp_socket_info *info) } } +// Some processes, due to particular entitlements, require using an NECP client to +// access networking. Returns true if the result should be a Drop. +static inline bool +necp_check_missing_client_drop(proc_t proc, struct necp_socket_info *info) +{ + task_t task = proc_task(proc ? proc : current_proc()); + + if (!info->has_client && + task != NULL && + IOTaskHasEntitlement(task, "com.apple.developer.on-demand-install-capable")) { + // Drop connections that don't use NECP clients and have the + // com.apple.developer.on-demand-install-capable entitlement. + // This effectively restricts those processes to only using + // an NECP-aware path for networking. + return true; + } else { + return false; + } +} + +static inline bool +necp_check_restricted_multicast_drop(proc_t proc, struct necp_socket_info *info, bool check_minor_version) +{ + if (!necp_restrict_multicast || proc == NULL) { + return false; + } + + // Check for multicast/broadcast here + if (info->remote_addr.sa.sa_family == AF_INET) { + if (!IN_MULTICAST(ntohl(info->remote_addr.sin.sin_addr.s_addr)) && + info->remote_addr.sin.sin_addr.s_addr != INADDR_BROADCAST) { + return false; + } + } else if (info->remote_addr.sa.sa_family == AF_INET6) { + if (!IN6_IS_ADDR_MULTICAST(&info->remote_addr.sin6.sin6_addr)) { + return false; + } + } else { + // Not IPv4/IPv6 + return false; + } + + if (necp_is_platform_binary(proc)) { + return false; + } + + const uint32_t platform = proc_platform(proc); + const uint32_t sdk = proc_sdk(proc); + + // Enforce for iOS, linked on or after version 14 + // If the caller set `check_minor_version`, only enforce starting at 14.TBD + if (platform != PLATFORM_IOS || + sdk == 0 || + (sdk >> 16) < 14 || +#if 0 + (check_minor_version && (sdk >> 16) == 14 && ((sdk >> 8) & 0xff) < TBD)) { +#else + (check_minor_version)) { +#endif + return false; + } + + // Allow entitled processes to use multicast + task_t task = proc_task(proc); + if (task != NULL && + IOTaskHasEntitlement(task, "com.apple.developer.networking.multicast")) { + return false; + } + + const uint32_t min_sdk = proc_min_sdk(proc); + NECPLOG(LOG_INFO, "Dropping unentitled multicast (SDK 0x%x, min 0x%x)", sdk, min_sdk); + + return true; +} + #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_LOCAL_NETWORKS) static void necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_application_uuid, uuid_t responsible_application_uuid, char *account, char *domain, pid_t pid, uid_t uid, u_int16_t protocol, u_int32_t bound_interface_index, u_int32_t traffic_class, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, u_int16_t local_port, u_int16_t remote_port, bool has_client, proc_t proc, proc_t responsible_proc, u_int32_t drop_order, u_int32_t client_flags, struct necp_socket_info *info) @@ -6028,7 +6187,7 @@ necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_applic } if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_PLATFORM_BINARY && proc != NULL) { - info->is_platform_binary = csproc_get_platform_binary(proc) ? true : false; + info->is_platform_binary = necp_is_platform_binary(proc) ? true : false; } if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID && account != NULL) { @@ -6042,7 +6201,8 @@ necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_applic info->domain = domain; } - if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_ADDRESS_TYPE_CONDITIONS) { + if (necp_restrict_multicast || + (necp_kernel_application_policies_condition_mask & NECP_KERNEL_ADDRESS_TYPE_CONDITIONS)) { if (local_addr && local_addr->sa.sa_len > 0) { memcpy(&info->local_addr, local_addr, local_addr->sa.sa_len); if (local_port != 0) { @@ -6080,6 +6240,20 @@ necp_send_application_interface_denied_event(pid_t pid, uuid_t proc_uuid, u_int3 netpolicy_post_msg(KEV_NETPOLICY_IFDENIED, &ev_ifdenied.ev_data, sizeof(ev_ifdenied)); } +static void +necp_send_network_denied_event(pid_t pid, uuid_t proc_uuid, u_int32_t network_type) +{ + struct kev_netpolicy_netdenied ev_netdenied = {}; + + bzero(&ev_netdenied, sizeof(ev_netdenied)); + + ev_netdenied.ev_data.epid = pid; + uuid_copy(ev_netdenied.ev_data.euuid, proc_uuid); + ev_netdenied.ev_network_type = network_type; + + netpolicy_post_msg(KEV_NETPOLICY_NETDENIED, &ev_netdenied.ev_data, sizeof(ev_netdenied)); +} + extern char *proc_name_address(void *p); #define NECP_VERIFY_DELEGATION_ENTITLEMENT(_p, _d) \ @@ -6175,6 +6349,8 @@ necp_application_find_policy_match_internal(proc_t proc, proc_t effective_proc = proc; bool release_eproc = false; + u_int32_t flow_divert_aggregate_unit = 0; + if (returned_result == NULL) { return EINVAL; } @@ -6424,7 +6600,7 @@ necp_application_find_policy_match_internal(proc_t proc, u_int32_t route_rule_id_array[MAX_AGGREGATE_ROUTE_RULES]; size_t route_rule_id_array_count = 0; necp_application_fillout_info_locked(application_uuid, real_application_uuid, responsible_application_uuid, account, domain, pid, uid, protocol, bound_interface_index, traffic_class, &local_addr, &remote_addr, local_port, remote_port, has_client, effective_proc, responsible_proc, drop_order, client_flags, &info); - matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map, &info, &filter_control_unit, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, netagent_use_flags, NECP_MAX_NETAGENTS, required_agent_types, num_required_agent_types, info.used_responsible_pid ? responsible_proc : effective_proc, NULL, NULL, &drop_dest_policy_result, &drop_all_bypass); + matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map, &info, &filter_control_unit, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, netagent_use_flags, NECP_MAX_NETAGENTS, required_agent_types, num_required_agent_types, info.used_responsible_pid ? responsible_proc : effective_proc, 0, NULL, NULL, &drop_dest_policy_result, &drop_all_bypass, &flow_divert_aggregate_unit); if (matched_policy) { returned_result->policy_id = matched_policy->id; returned_result->routing_result = matched_policy->result; @@ -6449,11 +6625,22 @@ necp_application_find_policy_match_internal(proc_t proc, returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_NONE; } } + if (necp_check_missing_client_drop(proc, &info) || + necp_check_restricted_multicast_drop(proc, &info, false)) { + // Mark as drop + returned_result->policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH; + returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_DROP; + } if (filter_control_unit == NECP_FILTER_UNIT_NO_FILTER) { returned_result->filter_control_unit = 0; } else { returned_result->filter_control_unit = filter_control_unit; } + + if (flow_divert_aggregate_unit > 0) { + returned_result->flow_divert_aggregate_unit = flow_divert_aggregate_unit; + } + returned_result->service_action = service_action; // Handle trigger service @@ -6507,6 +6694,15 @@ necp_application_find_policy_match_internal(proc_t proc, } } + if (returned_result->routing_result == NECP_KERNEL_POLICY_RESULT_DROP && + returned_result->routing_result_parameter.drop_flags & NECP_KERNEL_POLICY_DROP_FLAG_LOCAL_NETWORK) { + // Trigger the event that we dropped due to a local network policy + necp_send_network_denied_event(pid, application_uuid, NETPOLICY_NETWORKTYPE_LOCAL); + if (reason != NULL) { + *reason = NECP_CLIENT_RESULT_REASON_LOCAL_NETWORK_PROHIBITED; + } + } + if (local_addr.sa.sa_len == 0 || (local_addr.sa.sa_family == AF_INET && local_addr.sin.sin_addr.s_addr == 0) || (local_addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&local_addr.sin6.sin6_addr))) { @@ -6907,7 +7103,7 @@ done: } static bool -necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_app_id app_id, necp_app_id real_app_id, errno_t cred_result, u_int32_t account_id, struct substring domain, u_int8_t domain_dot_count, pid_t pid, uid_t uid, u_int32_t bound_interface_index, u_int32_t traffic_class, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote, struct necp_client_parameter_netagent_type *required_agent_types, u_int32_t num_required_agent_types, bool has_client, uint32_t client_flags, int is_platform_binary, proc_t proc, struct rtentry *rt) +necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_app_id app_id, necp_app_id real_app_id, errno_t cred_result, u_int32_t account_id, struct substring domain, u_int8_t domain_dot_count, pid_t pid, uid_t uid, u_int32_t bound_interface_index, u_int32_t traffic_class, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote, struct necp_client_parameter_netagent_type *required_agent_types, u_int32_t num_required_agent_types, bool has_client, uint32_t client_flags, int is_platform_binary, proc_t proc, u_int16_t pf_tag, struct rtentry *rt) { if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) { if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) { @@ -7007,6 +7203,31 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a } } + if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_SDK_VERSION) { + if (proc != NULL) { + if (kernel_policy->cond_sdk_version.platform != 0) { + if (kernel_policy->cond_sdk_version.platform != proc_platform(proc)) { + // Process does not match platform + return FALSE; + } + } + + if (kernel_policy->cond_sdk_version.min_version != 0) { + if (kernel_policy->cond_sdk_version.min_version > proc_min_sdk(proc)) { + // Process min version is older than required min version + return FALSE; + } + } + + if (kernel_policy->cond_sdk_version.version != 0) { + if (kernel_policy->cond_sdk_version.version > proc_sdk(proc)) { + // Process SDK version is older than required version + return FALSE; + } + } + } + } + if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) { if (kernel_policy->cond_custom_entitlement_matched == necp_boolean_state_false) { // Process is missing entitlement based on previous check @@ -7242,6 +7463,25 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a } } + if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) { + bool tags_matched = false; + if (kernel_policy->cond_packet_filter_tags & NECP_POLICY_CONDITION_PACKET_FILTER_TAG_STACK_DROP) { + if (pf_tag == PF_TAG_ID_STACK_DROP) { + tags_matched = true; + } + } + + if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) { + if (tags_matched) { + return FALSE; + } + } else { + if (!tags_matched) { + return FALSE; + } + } + } + return TRUE; } @@ -7360,7 +7600,7 @@ necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_loc } if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_PLATFORM_BINARY) { - info->is_platform_binary = csproc_get_platform_binary(sock_proc ? sock_proc : curr_proc) ? true : false; + info->is_platform_binary = necp_is_platform_binary(sock_proc ? sock_proc : curr_proc) ? true : false; } if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID && inp->inp_necp_attributes.inp_account != NULL) { @@ -7382,7 +7622,8 @@ necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_loc } } - if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_ADDRESS_TYPE_CONDITIONS) { + if (necp_restrict_multicast || + (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_ADDRESS_TYPE_CONDITIONS)) { if (override_local_addr != NULL) { if (override_local_addr->sa_family == AF_INET6 && override_local_addr->sa_len <= sizeof(struct sockaddr_in6)) { memcpy(&info->local_addr, override_local_addr, override_local_addr->sa_len); @@ -7444,8 +7685,9 @@ necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy necp_kernel_policy_result *return_service_action, necp_kernel_policy_service *return_service, u_int32_t *return_netagent_array, u_int32_t *return_netagent_use_flags_array, size_t netagent_array_count, struct necp_client_parameter_netagent_type *required_agent_types, - u_int32_t num_required_agent_types, proc_t proc, necp_kernel_policy_id *skip_policy_id, struct rtentry *rt, - necp_kernel_policy_result *return_drop_dest_policy_result, necp_drop_all_bypass_check_result_t *return_drop_all_bypass) + u_int32_t num_required_agent_types, proc_t proc, u_int16_t pf_tag, necp_kernel_policy_id *skip_policy_id, struct rtentry *rt, + necp_kernel_policy_result *return_drop_dest_policy_result, necp_drop_all_bypass_check_result_t *return_drop_all_bypass, + u_int32_t *return_flow_divert_aggregate_unit) { struct necp_kernel_socket_policy *matched_policy = NULL; u_int32_t skip_order = 0; @@ -7529,7 +7771,7 @@ necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy continue; } - if (necp_socket_check_policy(policy_search_array[i], info->application_id, info->real_application_id, info->cred_result, info->account_id, domain_substring, domain_dot_count, info->pid, info->uid, info->bound_interface_index, info->traffic_class, info->protocol, &info->local_addr, &info->remote_addr, required_agent_types, num_required_agent_types, info->has_client, info->client_flags, info->is_platform_binary, proc, rt)) { + if (necp_socket_check_policy(policy_search_array[i], info->application_id, info->real_application_id, info->cred_result, info->account_id, domain_substring, domain_dot_count, info->pid, info->uid, info->bound_interface_index, info->traffic_class, info->protocol, &info->local_addr, &info->remote_addr, required_agent_types, num_required_agent_types, info->has_client, info->client_flags, info->is_platform_binary, proc, pf_tag, rt)) { if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER) { if (return_filter && *return_filter != NECP_FILTER_UNIT_NO_FILTER) { necp_kernel_policy_filter control_unit = policy_search_array[i]->result_parameter.filter_control_unit; @@ -7584,6 +7826,18 @@ necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy } } continue; + } else if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT) { + u_int32_t control_unit = policy_search_array[i]->result_parameter.flow_divert_control_unit; + if (control_unit & FLOW_DIVERT_IS_TRANSPARENT) { + /* For transparent proxies, accumulate the control unit and continue to the next policy */ + if (return_flow_divert_aggregate_unit != NULL) { + *return_flow_divert_aggregate_unit |= (control_unit & ~FLOW_DIVERT_IS_TRANSPARENT); + if (necp_debug > 1) { + NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) flow divert %u", info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, control_unit); + } + } + continue; + } } // Matched policy is a skip. Do skip and continue. @@ -7682,6 +7936,21 @@ necp_socket_bypass(struct sockaddr *override_local_addr, struct sockaddr *overri return false; } +static inline void +necp_socket_ip_tunnel_tso(struct inpcb *inp) +{ + u_int tunnel_interface_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index; + ifnet_t tunnel_interface = NULL; + + ifnet_head_lock_shared(); + tunnel_interface = ifindex2ifnet[tunnel_interface_index]; + ifnet_head_done(); + + if (tunnel_interface != NULL) { + tcp_set_tso(intotcpcb(inp), tunnel_interface); + } +} + necp_kernel_policy_id necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int32_t override_bound_interface) { @@ -7701,6 +7970,8 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local struct necp_socket_info info; + u_int32_t flow_divert_aggregate_unit = 0; + if (inp == NULL) { return NECP_KERNEL_POLICY_ID_NONE; } @@ -7729,6 +8000,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local inp->inp_policyresult.app_id = 0; inp->inp_policyresult.flowhash = 0; inp->inp_policyresult.results.filter_control_unit = 0; + inp->inp_policyresult.results.flow_divert_aggregate_unit = 0; inp->inp_policyresult.results.route_rule_id = 0; if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) { inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS; @@ -7753,6 +8025,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local inp->inp_policyresult.app_id = 0; inp->inp_policyresult.flowhash = 0; inp->inp_policyresult.results.filter_control_unit = 0; + inp->inp_policyresult.results.flow_divert_aggregate_unit = 0; inp->inp_policyresult.results.route_rule_id = 0; inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS; return NECP_KERNEL_POLICY_ID_NONE; @@ -7760,7 +8033,6 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local // Lock lck_rw_lock_shared(&necp_kernel_policy_lock); - necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, override_bound_interface, drop_order, &socket_proc, &info); // Check info @@ -7786,7 +8058,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local necp_kernel_policy_id skip_policy_id = NECP_KERNEL_POLICY_ID_NONE; u_int32_t route_rule_id_array[MAX_AGGREGATE_ROUTE_RULES]; size_t route_rule_id_array_count = 0; - matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, &filter_control_unit, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, NULL, NECP_MAX_NETAGENTS, NULL, 0, socket_proc ? socket_proc : current_proc(), &skip_policy_id, inp->inp_route.ro_rt, &drop_dest_policy_result, &drop_all_bypass); + matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, &filter_control_unit, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, NULL, NECP_MAX_NETAGENTS, NULL, 0, socket_proc ? socket_proc : current_proc(), 0, &skip_policy_id, inp->inp_route.ro_rt, &drop_dest_policy_result, &drop_all_bypass, &flow_divert_aggregate_unit); // If the socket matched a scoped service policy, mark as Drop if not registered. // This covers the cases in which a service is required (on demand) but hasn't started yet. @@ -7809,6 +8081,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount; inp->inp_policyresult.flowhash = flowhash; inp->inp_policyresult.results.filter_control_unit = 0; + inp->inp_policyresult.results.flow_divert_aggregate_unit = 0; inp->inp_policyresult.results.route_rule_id = 0; inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP; @@ -7855,6 +8128,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount; inp->inp_policyresult.flowhash = flowhash; inp->inp_policyresult.results.filter_control_unit = 0; + inp->inp_policyresult.results.flow_divert_aggregate_unit = 0; inp->inp_policyresult.results.route_rule_id = 0; inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP; @@ -7882,7 +8156,8 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local route_rule_id = necp_create_aggregate_route_rule(route_rule_id_array); } - bool reset_tcp_mss = false; + bool reset_tcp_tunnel_interface = false; + bool send_local_network_denied_event = false; if (matched_policy) { matched_policy_id = matched_policy->id; inp->inp_policyresult.policy_id = matched_policy->id; @@ -7890,6 +8165,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount; inp->inp_policyresult.flowhash = flowhash; inp->inp_policyresult.results.filter_control_unit = filter_control_unit; + inp->inp_policyresult.results.flow_divert_aggregate_unit = flow_divert_aggregate_unit; inp->inp_policyresult.results.route_rule_id = route_rule_id; inp->inp_policyresult.results.result = matched_policy->result; memcpy(&inp->inp_policyresult.results.result_parameter, &matched_policy->result_parameter, sizeof(matched_policy->result_parameter)); @@ -7908,13 +8184,19 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local } else if (necp_socket_is_connected(inp) && matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && info.protocol == IPPROTO_TCP) { - // Reset MSS on TCP socket if tunnel policy changes - reset_tcp_mss = true; + // Reset TCP socket interface based parameters if tunnel policy changes + reset_tcp_tunnel_interface = true; } if (necp_debug > 1) { NECPLOG(LOG_DEBUG, "Socket Policy: %p (BoundInterface %d Proto %d) Policy %d Result %d Parameter %d", inp->inp_socket, info.bound_interface_index, info.protocol, matched_policy->id, matched_policy->result, matched_policy->result_parameter.tunnel_interface_index); } + + if (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP && + matched_policy->result_parameter.drop_flags & NECP_KERNEL_POLICY_DROP_FLAG_LOCAL_NETWORK) { + // Trigger the event that we dropped due to a local network policy + send_local_network_denied_event = true; + } } else { bool drop_all = false; if (necp_drop_all_order > 0 || info.drop_order > 0 || drop_dest_policy_result == NECP_KERNEL_POLICY_RESULT_DROP) { @@ -7930,6 +8212,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount; inp->inp_policyresult.flowhash = flowhash; inp->inp_policyresult.results.filter_control_unit = 0; + inp->inp_policyresult.results.flow_divert_aggregate_unit = 0; inp->inp_policyresult.results.route_rule_id = 0; inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP; } else { @@ -7939,17 +8222,40 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount; inp->inp_policyresult.flowhash = flowhash; inp->inp_policyresult.results.filter_control_unit = filter_control_unit; // We may have matched a filter, so mark it! + inp->inp_policyresult.results.flow_divert_aggregate_unit = flow_divert_aggregate_unit; inp->inp_policyresult.results.route_rule_id = route_rule_id; // We may have matched a route rule, so mark it! inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_NONE; } } + if (necp_check_missing_client_drop(socket_proc ? socket_proc : current_proc(), &info) || + necp_check_restricted_multicast_drop(socket_proc ? socket_proc : current_proc(), &info, false)) { + // Mark as drop + inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH; + inp->inp_policyresult.skip_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH; + inp->inp_policyresult.policy_gencount = necp_kernel_socket_policies_gencount; + inp->inp_policyresult.flowhash = flowhash; + inp->inp_policyresult.results.filter_control_unit = 0; + inp->inp_policyresult.results.flow_divert_aggregate_unit = 0; + inp->inp_policyresult.results.route_rule_id = 0; + inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP; + } + // Unlock lck_rw_done(&necp_kernel_policy_lock); - if (reset_tcp_mss) { + if (reset_tcp_tunnel_interface) { // Update MSS when not holding the policy lock to avoid recursive locking tcp_mtudisc(inp, 0); + + // Update TSO flag based on the tunnel interface + necp_socket_ip_tunnel_tso(inp); + } + + if (send_local_network_denied_event) { + necp_send_network_denied_event(((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid), + ((so->so_flags & SOF_DELEGATED) ? so->e_uuid : so->last_uuid), + NETPOLICY_NETWORKTYPE_LOCAL); } if (socket_proc) { @@ -7960,7 +8266,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local } static bool -necp_ip_output_check_policy(struct necp_kernel_ip_output_policy *kernel_policy, necp_kernel_policy_id socket_policy_id, necp_kernel_policy_id socket_skip_policy_id, u_int32_t bound_interface_index, u_int32_t last_interface_index, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote, struct rtentry *rt) +necp_ip_output_check_policy(struct necp_kernel_ip_output_policy *kernel_policy, necp_kernel_policy_id socket_policy_id, necp_kernel_policy_id socket_skip_policy_id, u_int32_t bound_interface_index, u_int32_t last_interface_index, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote, struct rtentry *rt, u_int16_t pf_tag) { if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) { if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) { @@ -8084,11 +8390,31 @@ necp_ip_output_check_policy(struct necp_kernel_ip_output_policy *kernel_policy, } } + if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) { + bool tags_matched = false; + + if (kernel_policy->cond_packet_filter_tags & NECP_POLICY_CONDITION_PACKET_FILTER_TAG_STACK_DROP) { + if ((pf_tag & PF_TAG_ID_STACK_DROP) == PF_TAG_ID_STACK_DROP) { + tags_matched = true; + } + + if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) { + if (tags_matched) { + return FALSE; + } + } else { + if (!tags_matched) { + return FALSE; + } + } + } + } + return TRUE; } static inline struct necp_kernel_ip_output_policy * -necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id, necp_kernel_policy_id socket_skip_policy_id, u_int32_t bound_interface_index, u_int32_t last_interface_index, u_int16_t protocol, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, struct rtentry *rt, u_int32_t *return_route_rule_id, necp_kernel_policy_result *return_drop_dest_policy_result, necp_drop_all_bypass_check_result_t *return_drop_all_bypass) +necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id, necp_kernel_policy_id socket_skip_policy_id, u_int32_t bound_interface_index, u_int32_t last_interface_index, u_int16_t protocol, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, struct rtentry *rt, u_int16_t pf_tag, u_int32_t *return_route_rule_id, necp_kernel_policy_result *return_drop_dest_policy_result, necp_drop_all_bypass_check_result_t *return_drop_all_bypass) { u_int32_t skip_order = 0; u_int32_t skip_session_order = 0; @@ -8146,7 +8472,7 @@ necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id, continue; } - if (necp_ip_output_check_policy(policy_search_array[i], socket_policy_id, socket_skip_policy_id, bound_interface_index, last_interface_index, protocol, local_addr, remote_addr, rt)) { + if (necp_ip_output_check_policy(policy_search_array[i], socket_policy_id, socket_skip_policy_id, bound_interface_index, last_interface_index, protocol, local_addr, remote_addr, rt, pf_tag)) { if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_ROUTE_RULES) { if (return_route_rule_id != NULL && route_rule_id_count < MAX_AGGREGATE_ROUTE_RULES) { route_rule_id_array[route_rule_id_count++] = policy_search_array[i]->result_parameter.route_rule_id; @@ -8206,6 +8532,7 @@ necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_a union necp_sockaddr_union remote_addr; u_int32_t drop_dest_policy_result = NECP_KERNEL_POLICY_RESULT_NONE; necp_drop_all_bypass_check_result_t drop_all_bypass = NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE; + u_int16_t pf_tag = 0; if (result) { *result = 0; @@ -8221,6 +8548,7 @@ necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_a socket_policy_id = necp_get_policy_id_from_packet(packet); socket_skip_policy_id = necp_get_skip_policy_id_from_packet(packet); + pf_tag = necp_get_packet_filter_tags_from_packet(packet); // Exit early for an empty list // Don't lock. Possible race condition, but we don't want the performance hit. @@ -8304,7 +8632,7 @@ necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_a // Match packet to policy lck_rw_lock_shared(&necp_kernel_policy_lock); u_int32_t route_rule_id = 0; - matched_policy = necp_ip_output_find_policy_match_locked(socket_policy_id, socket_skip_policy_id, bound_interface_index, last_interface_index, protocol, &local_addr, &remote_addr, rt, &route_rule_id, &drop_dest_policy_result, &drop_all_bypass); + matched_policy = necp_ip_output_find_policy_match_locked(socket_policy_id, socket_skip_policy_id, bound_interface_index, last_interface_index, protocol, &local_addr, &remote_addr, rt, pf_tag, &route_rule_id, &drop_dest_policy_result, &drop_all_bypass); if (matched_policy) { matched_policy_id = matched_policy->id; if (result) { @@ -8371,6 +8699,7 @@ necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out union necp_sockaddr_union remote_addr; u_int32_t drop_dest_policy_result = NECP_KERNEL_POLICY_RESULT_NONE; necp_drop_all_bypass_check_result_t drop_all_bypass = NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE; + u_int16_t pf_tag = 0; if (result) { *result = 0; @@ -8386,6 +8715,7 @@ necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out socket_policy_id = necp_get_policy_id_from_packet(packet); socket_skip_policy_id = necp_get_skip_policy_id_from_packet(packet); + pf_tag = necp_get_packet_filter_tags_from_packet(packet); // Exit early for an empty list // Don't lock. Possible race condition, but we don't want the performance hit. @@ -8466,7 +8796,7 @@ necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out // Match packet to policy lck_rw_lock_shared(&necp_kernel_policy_lock); u_int32_t route_rule_id = 0; - matched_policy = necp_ip_output_find_policy_match_locked(socket_policy_id, socket_skip_policy_id, bound_interface_index, last_interface_index, protocol, &local_addr, &remote_addr, rt, &route_rule_id, &drop_dest_policy_result, &drop_all_bypass); + matched_policy = necp_ip_output_find_policy_match_locked(socket_policy_id, socket_skip_policy_id, bound_interface_index, last_interface_index, protocol, &local_addr, &remote_addr, rt, pf_tag, &route_rule_id, &drop_dest_policy_result, &drop_all_bypass); if (matched_policy) { matched_policy_id = matched_policy->id; if (result) { @@ -8821,26 +9151,21 @@ done: return qos_marking; } -void -necp_socket_update_qos_marking(struct inpcb *inp, struct rtentry *route, struct ifnet *interface, u_int32_t route_rule_id) +bool +necp_lookup_current_qos_marking(int32_t *qos_marking_gencount, struct rtentry *route, struct ifnet *interface, u_int32_t route_rule_id, bool old_qos_marking) { - bool qos_marking = FALSE; - struct ifnet *ifp = interface = NULL; + bool new_qos_marking = old_qos_marking; + struct ifnet *ifp = interface; if (net_qos_policy_restricted == 0) { - return; - } - if (inp->inp_socket == NULL) { - return; - } - if ((inp->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) { - return; + return new_qos_marking; } + /* * This is racy but we do not need the performance hit of taking necp_kernel_policy_lock */ - if (inp->inp_policyresult.results.qos_marking_gencount == necp_kernel_socket_policies_gencount) { - return; + if (*qos_marking_gencount == necp_kernel_socket_policies_gencount) { + return new_qos_marking; } lck_rw_lock_shared(&necp_kernel_policy_lock); @@ -8852,7 +9177,7 @@ necp_socket_update_qos_marking(struct inpcb *inp, struct rtentry *route, struct * By default, until we have a interface, do not mark and reevaluate the Qos marking policy */ if (ifp == NULL || route_rule_id == 0) { - qos_marking = FALSE; + new_qos_marking = FALSE; goto done; } @@ -8865,22 +9190,41 @@ necp_socket_update_qos_marking(struct inpcb *inp, struct rtentry *route, struct if (sub_route_rule_id == 0) { break; } - qos_marking = necp_update_qos_marking(ifp, sub_route_rule_id); - if (qos_marking == TRUE) { + new_qos_marking = necp_update_qos_marking(ifp, sub_route_rule_id); + if (new_qos_marking == TRUE) { break; } } } } else { - qos_marking = necp_update_qos_marking(ifp, route_rule_id); + new_qos_marking = necp_update_qos_marking(ifp, route_rule_id); } /* * Now that we have an interface we remember the gencount */ - inp->inp_policyresult.results.qos_marking_gencount = necp_kernel_socket_policies_gencount; + *qos_marking_gencount = necp_kernel_socket_policies_gencount; done: lck_rw_done(&necp_kernel_policy_lock); + return new_qos_marking; +} + +void +necp_socket_update_qos_marking(struct inpcb *inp, struct rtentry *route, u_int32_t route_rule_id) +{ + bool qos_marking = inp->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED ? TRUE : FALSE; + + if (net_qos_policy_restricted == 0) { + return; + } + if (inp->inp_socket == NULL) { + return; + } + if ((inp->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) { + return; + } + + qos_marking = necp_lookup_current_qos_marking(&(inp->inp_policyresult.results.qos_marking_gencount), route, NULL, route_rule_id, qos_marking); if (qos_marking == TRUE) { inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED; @@ -9136,7 +9480,20 @@ necp_netagents_allow_traffic(u_int32_t *netagent_ids, size_t netagent_id_count) } static bool -necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, necp_kernel_policy_id *return_skip_policy_id) +necp_packet_filter_tags_receive(u_int16_t pf_tag, u_int32_t pass_flags) +{ + bool allowed_to_receive = TRUE; + + if (pf_tag == PF_TAG_ID_STACK_DROP && + (pass_flags & NECP_KERNEL_POLICY_PASS_PF_TAG) != NECP_KERNEL_POLICY_PASS_PF_TAG) { + allowed_to_receive = FALSE; + } + + return allowed_to_receive; +} + +static bool +necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, ifnet_t interface, u_int16_t pf_tag, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, necp_kernel_policy_id *return_skip_policy_id, u_int32_t *return_pass_flags) { u_int32_t verifyifindex = interface ? interface->if_index : 0; bool allowed_to_receive = TRUE; @@ -9150,8 +9507,12 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr necp_kernel_policy_result drop_dest_policy_result = NECP_KERNEL_POLICY_RESULT_NONE; necp_drop_all_bypass_check_result_t drop_all_bypass = NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE; u_int32_t netagent_ids[NECP_MAX_NETAGENTS]; - memset(&netagent_ids, 0, sizeof(netagent_ids)); proc_t socket_proc = NULL; + necp_kernel_policy_filter filter_control_unit = 0; + u_int32_t pass_flags = 0; + u_int32_t flow_divert_aggregate_unit = 0; + + memset(&netagent_ids, 0, sizeof(netagent_ids)); if (return_policy_id) { *return_policy_id = NECP_KERNEL_POLICY_ID_NONE; @@ -9162,6 +9523,9 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr if (return_route_rule_id) { *return_route_rule_id = 0; } + if (return_pass_flags) { + *return_pass_flags = 0; + } if (inp == NULL) { goto done; @@ -9220,6 +9584,9 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr if (return_route_rule_id) { *return_route_rule_id = inp->inp_policyresult.results.route_rule_id; } + if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_PASS) { + pass_flags = inp->inp_policyresult.results.result_parameter.pass_flags; + } } goto done; } @@ -9256,6 +9623,9 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr if (return_skip_policy_id) { *return_skip_policy_id = inp->inp_policyresult.skip_policy_id; } + if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_PASS) { + pass_flags = inp->inp_policyresult.results.result_parameter.pass_flags; + } } lck_rw_done(&necp_kernel_policy_lock); goto done; @@ -9263,7 +9633,7 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr u_int32_t route_rule_id_array[MAX_AGGREGATE_ROUTE_RULES]; size_t route_rule_id_array_count = 0; - struct necp_kernel_socket_policy *matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, NULL, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, NULL, NECP_MAX_NETAGENTS, NULL, 0, socket_proc ? socket_proc : current_proc(), return_skip_policy_id, inp->inp_route.ro_rt, &drop_dest_policy_result, &drop_all_bypass); + struct necp_kernel_socket_policy *matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, &filter_control_unit, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, NULL, NECP_MAX_NETAGENTS, NULL, 0, socket_proc ? socket_proc : current_proc(), pf_tag, return_skip_policy_id, inp->inp_route.ro_rt, &drop_dest_policy_result, &drop_all_bypass, &flow_divert_aggregate_unit); if (route_rule_id_array_count == 1) { route_rule_id = route_rule_id_array[0]; @@ -9271,7 +9641,14 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr route_rule_id = necp_create_aggregate_route_rule(route_rule_id_array); } + bool send_local_network_denied_event = false; if (matched_policy != NULL) { + if (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP && + matched_policy->result_parameter.drop_flags & NECP_KERNEL_POLICY_DROP_FLAG_LOCAL_NETWORK) { + // Trigger the event that we dropped due to a local network policy + send_local_network_denied_event = true; + } + if (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP || matched_policy->result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT || (matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface && @@ -9290,13 +9667,21 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr if (return_route_rule_id) { *return_route_rule_id = route_rule_id; } + if (matched_policy->result == NECP_KERNEL_POLICY_RESULT_PASS) { + pass_flags = matched_policy->result_parameter.pass_flags; + } + // Polices has changed since last evaluation, update inp result with new filter state + if (inp->inp_policyresult.results.filter_control_unit != filter_control_unit) { + inp->inp_policyresult.results.filter_control_unit = filter_control_unit; + } + if (inp->inp_policyresult.results.flow_divert_aggregate_unit != flow_divert_aggregate_unit) { + inp->inp_policyresult.results.flow_divert_aggregate_unit = flow_divert_aggregate_unit; + } } - lck_rw_done(&necp_kernel_policy_lock); if (necp_debug > 1 && matched_policy->id != inp->inp_policyresult.policy_id) { NECPLOG(LOG_DEBUG, "Socket Send/Recv Policy: Policy %d Allowed %d", return_policy_id ? *return_policy_id : 0, allowed_to_receive); } - goto done; } else { bool drop_all = false; if (necp_drop_all_order > 0 || info.drop_order > 0 || drop_dest_policy_result == NECP_KERNEL_POLICY_RESULT_DROP) { @@ -9314,12 +9699,38 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr if (return_route_rule_id) { *return_route_rule_id = route_rule_id; } + + // Polices has changed since last evaluation, update inp result with new filter state + if (inp->inp_policyresult.results.filter_control_unit != filter_control_unit) { + inp->inp_policyresult.results.filter_control_unit = filter_control_unit; + } + if (inp->inp_policyresult.results.flow_divert_aggregate_unit != flow_divert_aggregate_unit) { + inp->inp_policyresult.results.flow_divert_aggregate_unit = flow_divert_aggregate_unit; + } } } + if (necp_check_restricted_multicast_drop(socket_proc ? socket_proc : current_proc(), &info, true)) { + allowed_to_receive = FALSE; + } + lck_rw_done(&necp_kernel_policy_lock); + if (send_local_network_denied_event) { + necp_send_network_denied_event(((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid), + ((so->so_flags & SOF_DELEGATED) ? so->e_uuid : so->last_uuid), + NETPOLICY_NETWORKTYPE_LOCAL); + } + done: + if (return_pass_flags != NULL) { + *return_pass_flags = pass_flags; + } + + if (pf_tag != 0 && allowed_to_receive) { + allowed_to_receive = necp_packet_filter_tags_receive(pf_tag, pass_flags); + } + if (!allowed_to_receive && interface_type_denied != IFRTYPE_FUNCTIONAL_UNKNOWN) { soevent(inp->inp_socket, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED)); } @@ -9332,7 +9743,7 @@ done: } bool -necp_socket_is_allowed_to_send_recv_v4(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in_addr *local_addr, struct in_addr *remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, necp_kernel_policy_id *return_skip_policy_id) +necp_socket_is_allowed_to_send_recv_v4(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in_addr *local_addr, struct in_addr *remote_addr, ifnet_t interface, u_int16_t pf_tag, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, necp_kernel_policy_id *return_skip_policy_id, u_int32_t *return_pass_flags) { struct sockaddr_in local = {}; struct sockaddr_in remote = {}; @@ -9344,11 +9755,11 @@ necp_socket_is_allowed_to_send_recv_v4(struct inpcb *inp, u_int16_t local_port, memcpy(&remote.sin_addr, remote_addr, sizeof(remote.sin_addr)); return necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, - return_policy_id, return_route_rule_id, return_skip_policy_id); + pf_tag, return_policy_id, return_route_rule_id, return_skip_policy_id, return_pass_flags); } bool -necp_socket_is_allowed_to_send_recv_v6(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in6_addr *local_addr, struct in6_addr *remote_addr, ifnet_t interface, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, necp_kernel_policy_id *return_skip_policy_id) +necp_socket_is_allowed_to_send_recv_v6(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in6_addr *local_addr, struct in6_addr *remote_addr, ifnet_t interface, u_int16_t pf_tag, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, necp_kernel_policy_id *return_skip_policy_id, u_int32_t *return_pass_flags) { struct sockaddr_in6 local = {}; struct sockaddr_in6 remote = {}; @@ -9360,22 +9771,21 @@ necp_socket_is_allowed_to_send_recv_v6(struct inpcb *inp, u_int16_t local_port, memcpy(&remote.sin6_addr, remote_addr, sizeof(remote.sin6_addr)); return necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, - return_policy_id, return_route_rule_id, return_skip_policy_id); + pf_tag, return_policy_id, return_route_rule_id, return_skip_policy_id, return_pass_flags); } bool -necp_socket_is_allowed_to_send_recv(struct inpcb *inp, ifnet_t interface, necp_kernel_policy_id *return_policy_id, - u_int32_t *return_route_rule_id, - necp_kernel_policy_id *return_skip_policy_id) +necp_socket_is_allowed_to_send_recv(struct inpcb *inp, ifnet_t interface, u_int16_t pf_tag, necp_kernel_policy_id *return_policy_id, + u_int32_t *return_route_rule_id, necp_kernel_policy_id *return_skip_policy_id, u_int32_t *return_pass_flags) { - return necp_socket_is_allowed_to_send_recv_internal(inp, NULL, NULL, interface, + return necp_socket_is_allowed_to_send_recv_internal(inp, NULL, NULL, interface, pf_tag, return_policy_id, return_route_rule_id, - return_skip_policy_id); + return_skip_policy_id, return_pass_flags); } int necp_mark_packet_from_socket(struct mbuf *packet, struct inpcb *inp, necp_kernel_policy_id policy_id, u_int32_t route_rule_id, - necp_kernel_policy_id skip_policy_id) + necp_kernel_policy_id skip_policy_id, u_int32_t pass_flags) { if (packet == NULL || inp == NULL || !(packet->m_flags & M_PKTHDR)) { return EINVAL; @@ -9412,6 +9822,11 @@ necp_mark_packet_from_socket(struct mbuf *packet, struct inpcb *inp, necp_kernel packet->m_pkthdr.necp_mtag.necp_skip_policy_id = NECP_KERNEL_POLICY_ID_NONE; } + if (((pass_flags & NECP_KERNEL_POLICY_PASS_PF_TAG) == NECP_KERNEL_POLICY_PASS_PF_TAG) || + ((inp->inp_policyresult.results.result_parameter.pass_flags & NECP_KERNEL_POLICY_PASS_PF_TAG) == NECP_KERNEL_POLICY_PASS_PF_TAG)) { + m_pftag(packet)->pftag_tag = PF_TAG_ID_SYSTEM_SERVICE; + } + return 0; } @@ -9488,6 +9903,16 @@ necp_get_skip_policy_id_from_packet(struct mbuf *packet) return packet->m_pkthdr.necp_mtag.necp_skip_policy_id; } +u_int16_t +necp_get_packet_filter_tags_from_packet(struct mbuf *packet) +{ + if (packet == NULL || !(packet->m_flags & M_PKTHDR)) { + return 0; + } + + return m_pftag(packet)->pftag_tag; +} + bool necp_packet_should_skip_filters(struct mbuf *packet) { @@ -9572,16 +9997,27 @@ necp_socket_should_use_flow_divert(struct inpcb *inp) return FALSE; } - return inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT; + return !(inp->inp_socket->so_flags1 & SOF1_FLOW_DIVERT_SKIP) && + (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT || + (inp->inp_policyresult.results.flow_divert_aggregate_unit != 0)); } u_int32_t -necp_socket_get_flow_divert_control_unit(struct inpcb *inp) +necp_socket_get_flow_divert_control_unit(struct inpcb *inp, uint32_t *aggregate_unit) { if (inp == NULL) { return 0; } + if (inp->inp_socket->so_flags1 & SOF1_FLOW_DIVERT_SKIP) { + return 0; + } + + if (aggregate_unit != NULL && + inp->inp_policyresult.results.flow_divert_aggregate_unit != 0) { + *aggregate_unit = inp->inp_policyresult.results.flow_divert_aggregate_unit; + } + if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT) { return inp->inp_policyresult.results.result_parameter.flow_divert_control_unit; } diff --git a/bsd/net/necp.h b/bsd/net/necp.h index 1da29404a..041682c5b 100644 --- a/bsd/net/necp.h +++ b/bsd/net/necp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Apple Inc. All rights reserved. + * Copyright (c) 2013-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -149,7 +149,15 @@ struct necp_packet_header { #define NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY 25 // N/A #define NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_EMPTY 26 // N/A #define NECP_POLICY_CONDITION_PLATFORM_BINARY 27 // N/A -#define NECP_POLICY_CONDITION_SIGNING_IDENTIFIER 28 // String +#define NECP_POLICY_CONDITION_SDK_VERSION 28 // struct necp_policy_condition_sdk_version +#define NECP_POLICY_CONDITION_SIGNING_IDENTIFIER 29 // String +#define NECP_POLICY_CONDITION_PACKET_FILTER_TAGS 30 // u_int16_t + +/* + * Policy Packet tags + */ +#define NECP_POLICY_CONDITION_PACKET_FILTER_TAG_STACK_DROP 0x01 +#define NECP_POLICY_CONDITION_PACKET_FILTER_TAG_MAX NECP_POLICY_CONDITION_PACKET_FILTER_TAG_STACK_DROP /* * Results @@ -178,6 +186,12 @@ struct necp_packet_header { * PASS Result Flags */ #define NECP_POLICY_PASS_NO_SKIP_IPSEC 0x01 +#define NECP_POLICY_PASS_PF_TAG 0x02 + +/* + * DROP Result Flags + */ +#define NECP_POLICY_DROP_FLAG_LOCAL_NETWORK 0x01 /* * Route Rules @@ -242,6 +256,12 @@ struct necp_policy_condition_agent_type { char agent_type[32]; } __attribute__((__packed__)); +struct necp_policy_condition_sdk_version { + uint32_t platform; // e.g., PLATFORM_IOS + uint32_t min_version; // Encoded as XXXX.YY.ZZ + uint32_t version; // Encoded as XXXX.YY.ZZ +} __attribute__((__packed__)); + #define NECP_SESSION_PRIORITY_UNKNOWN 0 #define NECP_SESSION_PRIORITY_CONTROL 1 #define NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL 2 @@ -267,6 +287,8 @@ typedef union { u_int scoped_interface_index; u_int32_t flow_divert_control_unit; u_int32_t filter_control_unit; + u_int32_t pass_flags; + u_int32_t drop_flags; } necp_kernel_policy_routing_result_parameter; #define NECP_SERVICE_FLAGS_REGISTERED 0x01 @@ -279,6 +301,7 @@ struct necp_aggregate_result { necp_kernel_policy_result routing_result; necp_kernel_policy_routing_result_parameter routing_result_parameter; necp_kernel_policy_filter filter_control_unit; + u_int32_t flow_divert_aggregate_unit; necp_kernel_policy_result service_action; uuid_t service_uuid; u_int32_t service_flags; @@ -510,6 +533,7 @@ typedef struct necp_cache_buffer { #define NECP_CLIENT_ACTION_REMOVE_FLOW 18 // Remove a flow. Input: flow_id, optional struct ifnet_stats_per_flow #define NECP_CLIENT_ACTION_CLAIM 19 // Claim a client that has been added for this unique PID. Input: client_id #define NECP_CLIENT_ACTION_SIGN 20 // Sign a resolver answer. Input: struct necp_client_resolver_answer; Output: signed tag, expected to be 32 bytes +#define NECP_CLIENT_ACTION_GET_INTERFACE_ADDRESS 21 // Get the best interface local address for given remote address. Input: ifindex, remote sockaddr; Output: matching local sockaddr #define NECP_CLIENT_PARAMETER_APPLICATION NECP_POLICY_CONDITION_APPLICATION // Requires entitlement #define NECP_CLIENT_PARAMETER_REAL_APPLICATION NECP_POLICY_CONDITION_REAL_APPLICATION // Requires entitlement @@ -603,6 +627,7 @@ typedef struct necp_cache_buffer { #define NECP_CLIENT_RESULT_FLOW_ID 16 // uuid_t #define NECP_CLIENT_RESULT_INTERFACE_TIME_DELTA 17 // u_int32_t, seconds since interface up/down #define NECP_CLIENT_RESULT_REASON 18 // u_int32_t, see NECP_CLIENT_RESULT_REASON_* values +#define NECP_CLIENT_RESULT_FLOW_DIVERT_AGGREGATE_UNIT 19 // u_int32_t #define NECP_CLIENT_RESULT_NEXUS_INSTANCE 100 // uuid_t #define NECP_CLIENT_RESULT_NEXUS_PORT 101 // u_int16_t @@ -650,6 +675,7 @@ typedef struct necp_cache_buffer { #define NECP_CLIENT_RESULT_REASON_CONSTRAINED_PROHIBITED 2 // Constrained networks were prohibited #define NECP_CLIENT_RESULT_REASON_CELLULAR_DENIED 3 // Denied by a cellular route rule #define NECP_CLIENT_RESULT_REASON_WIFI_DENIED 4 // Denied by a wifi route rule +#define NECP_CLIENT_RESULT_REASON_LOCAL_NETWORK_PROHIBITED 5 // Local network access prohibited struct necp_interface_signature { u_int8_t signature[IFNET_SIGNATURELEN]; @@ -679,6 +705,11 @@ struct necp_interface_details { #define NECP_INTERFACE_FLAG_CONSTRAINED 0x0040 #define NECP_INTERFACE_FLAG_HAS_NETMASK 0x0080 #define NECP_INTERFACE_FLAG_HAS_BROADCAST 0x0100 +#define NECP_INTERFACE_FLAG_SUPPORTS_MULTICAST 0x0200 +#define NECP_INTERFACE_FLAG_HAS_DNS 0x0400 +#define NECP_INTERFACE_FLAG_HAS_NAT64 0x0800 +#define NECP_INTERFACE_FLAG_IPV4_ROUTABLE 0x1000 +#define NECP_INTERFACE_FLAG_IPV6_ROUTABLE 0x2000 struct necp_client_parameter_netagent_type { char netagent_domain[32]; @@ -731,6 +762,7 @@ struct kev_necp_policies_changed_data { #define NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID 0x02 // Register the client ID rather than the flow registration ID with network agents #define NECP_CLIENT_FLOW_FLAGS_BROWSE 0x04 // Create request with a browse agent #define NECP_CLIENT_FLOW_FLAGS_RESOLVE 0x08 // Create request with a resolution agent +#define NECP_CLIENT_FLOW_FLAGS_OVERRIDE_ADDRESS 0x10 // Flow has a different remote address than the parent flow struct necp_client_flow_stats { u_int32_t stats_type; // NECP_CLIENT_STATISTICS_TYPE_* @@ -745,6 +777,7 @@ struct necp_client_add_flow { u_int16_t flags; // NECP_CLIENT_FLOW_FLAGS_* u_int16_t stats_request_count; struct necp_client_flow_stats stats_requests[0]; + // sockaddr for override endpoint } __attribute__((__packed__)); struct necp_agent_use_parameters { @@ -813,16 +846,27 @@ struct necp_drop_dest_policy { #include #include #include +#include SYSCTL_DECL(_net_necp); +extern os_log_t necp_log_handle; + #define NECPLOG(level, format, ...) do { \ - log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: " format "\n", __FUNCTION__, __VA_ARGS__); \ + if (level == LOG_ERR) { \ + os_log_error(necp_log_handle, "%s: " format "\n", __FUNCTION__, __VA_ARGS__); \ + } else { \ + os_log(necp_log_handle, "%s: " format "\n", __FUNCTION__, __VA_ARGS__); \ + } \ } while (0) #define NECPLOG0(level, msg) do { \ - log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: %s\n", __FUNCTION__, msg); \ + if (level == LOG_ERR) { \ + os_log_error(necp_log_handle, "%s: %s\n", __FUNCTION__, msg); \ + } else { \ + os_log(necp_log_handle, "%s: %s\n", __FUNCTION__, msg); \ + } \ } while (0) enum necp_fd_type_t { @@ -900,6 +944,9 @@ extern int necp_buffer_find_tlv(u_int8_t *buffer, u_int32_t buffer_length, int o #define NECPCTL_SYSCTL_ARENA_COUNT 17 /* Count of sysctl arenas */ #define NECPCTL_DROP_UNENTITLED_LEVEL 18 /* Drop unentitled process traffic above this level */ #define NECPCTL_PASS_INTERPOSE 19 /* Pass interpose */ +#define NECPCTL_RESTRICT_MULTICAST 20 /* Restrict multicast access */ +#define NECPCTL_DEDUP_POLICIES 21 /* Dedup overlapping policies */ + #define NECPCTL_NAMES { \ { 0, 0 }, \ @@ -938,6 +985,9 @@ typedef u_int32_t necp_app_id; #define NECP_KERNEL_POLICY_RESULT_ALLOW_UNENTITLED NECP_POLICY_RESULT_ALLOW_UNENTITLED #define NECP_KERNEL_POLICY_PASS_NO_SKIP_IPSEC NECP_POLICY_PASS_NO_SKIP_IPSEC +#define NECP_KERNEL_POLICY_PASS_PF_TAG NECP_POLICY_PASS_PF_TAG + +#define NECP_KERNEL_POLICY_DROP_FLAG_LOCAL_NETWORK NECP_POLICY_DROP_FLAG_LOCAL_NETWORK typedef struct { u_int32_t identifier; @@ -953,6 +1003,7 @@ typedef union { u_int32_t route_rule_id; u_int32_t netagent_id; u_int32_t pass_flags; + u_int32_t drop_flags; necp_kernel_policy_service service; } necp_kernel_policy_result_parameter; @@ -992,7 +1043,9 @@ struct necp_kernel_socket_policy { union necp_sockaddr_union cond_remote_end; // Matches IP address range u_int8_t cond_remote_prefix; // Defines subnet struct necp_policy_condition_agent_type cond_agent_type; + struct necp_policy_condition_sdk_version cond_sdk_version; char *cond_signing_identifier; // String + u_int16_t cond_packet_filter_tags; necp_kernel_policy_result result; necp_kernel_policy_result_parameter result_parameter; @@ -1018,6 +1071,7 @@ struct necp_kernel_ip_output_policy { union necp_sockaddr_union cond_remote_end; // Matches IP address range u_int8_t cond_remote_prefix; // Defines subnet u_int32_t cond_last_interface_index; + u_int16_t cond_packet_filter_tags; necp_kernel_policy_result result; necp_kernel_policy_result_parameter result_parameter; @@ -1055,6 +1109,7 @@ struct necp_aggregate_socket_result { necp_kernel_policy_result result; necp_kernel_policy_result_parameter result_parameter; necp_kernel_policy_filter filter_control_unit; + u_int32_t flow_divert_aggregate_unit; u_int32_t route_rule_id; int32_t qos_marking_gencount; }; @@ -1078,7 +1133,7 @@ extern void necp_inpcb_dispose(struct inpcb *inp); extern u_int32_t necp_socket_get_content_filter_control_unit(struct socket *so); extern bool necp_socket_should_use_flow_divert(struct inpcb *inp); -extern u_int32_t necp_socket_get_flow_divert_control_unit(struct inpcb *inp); +extern u_int32_t necp_socket_get_flow_divert_control_unit(struct inpcb *inp, uint32_t *aggregate_unit); extern bool necp_socket_should_rescope(struct inpcb *inp); extern u_int necp_socket_get_rescope_if_index(struct inpcb *inp); @@ -1086,25 +1141,27 @@ extern u_int32_t necp_socket_get_effective_mtu(struct inpcb *inp, u_int32_t curr extern bool necp_socket_is_allowed_to_recv_on_interface(struct inpcb *inp, ifnet_t interface); -extern bool necp_socket_is_allowed_to_send_recv(struct inpcb *inp, ifnet_t interface, +extern bool necp_socket_is_allowed_to_send_recv(struct inpcb *inp, ifnet_t interface, u_int16_t pf_tag, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, - necp_kernel_policy_id *return_skip_policy_id); + necp_kernel_policy_id *return_skip_policy_id, u_int32_t *return_pass_flags); extern bool necp_socket_is_allowed_to_send_recv_v4(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in_addr *local_addr, - struct in_addr *remote_addr, ifnet_t interface, + struct in_addr *remote_addr, ifnet_t interface, u_int16_t pf_tag, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, - necp_kernel_policy_id *return_skip_policy_id); + necp_kernel_policy_id *return_skip_policy_id, u_int32_t *return_pass_flags); extern bool necp_socket_is_allowed_to_send_recv_v6(struct inpcb *inp, u_int16_t local_port, u_int16_t remote_port, struct in6_addr *local_addr, - struct in6_addr *remote_addr, ifnet_t interface, + struct in6_addr *remote_addr, ifnet_t interface, u_int16_t pf_tag, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, - necp_kernel_policy_id *return_skip_policy_id); -extern void necp_socket_update_qos_marking(struct inpcb *inp, struct rtentry *route, struct ifnet *interface, u_int32_t route_rule_id); + necp_kernel_policy_id *return_skip_policy_id, u_int32_t *return_pass_flags); +extern void necp_socket_update_qos_marking(struct inpcb *inp, struct rtentry *route, u_int32_t route_rule_id); +extern bool necp_lookup_current_qos_marking(int32_t *qos_marking_gencount, struct rtentry *route, struct ifnet *interface, u_int32_t route_rule_id, bool old_qos_marking); extern int necp_mark_packet_from_socket(struct mbuf *packet, struct inpcb *inp, necp_kernel_policy_id policy_id, - u_int32_t route_rule_id, necp_kernel_policy_id skip_policy_id); + u_int32_t route_rule_id, necp_kernel_policy_id skip_policy_id, u_int32_t pass_flags); extern necp_kernel_policy_id necp_get_policy_id_from_packet(struct mbuf *packet); extern necp_kernel_policy_id necp_get_skip_policy_id_from_packet(struct mbuf *packet); +extern u_int16_t necp_get_packet_filter_tags_from_packet(struct mbuf *packet); extern bool necp_packet_should_skip_filters(struct mbuf *packet); extern u_int32_t necp_get_last_interface_index_from_packet(struct mbuf *packet); extern u_int32_t necp_get_route_rule_id_from_packet(struct mbuf *packet); diff --git a/bsd/net/necp_client.c b/bsd/net/necp_client.c index 0b3ef6782..0ef2776e3 100644 --- a/bsd/net/necp_client.c +++ b/bsd/net/necp_client.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2019 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -31,8 +31,6 @@ #include #include -#include - #include #include #include @@ -307,6 +305,7 @@ struct necp_client_flow { }; } u; uint32_t interface_index; + u_short delegated_interface_index; uint16_t interface_flags; uint32_t necp_flow_flags; struct necp_client_flow_protoctl_event protoctl_event; @@ -398,6 +397,7 @@ struct necp_client { #define NECP_CLIENT_ROUTE_UNLOCK(_c) lck_mtx_unlock(&_c->route_lock) static void necp_client_retain_locked(struct necp_client *client); +static void necp_client_retain(struct necp_client *client); static bool necp_client_release_locked(struct necp_client *client); static bool necp_client_release(struct necp_client *client); @@ -484,13 +484,10 @@ struct necp_fd_data { static LIST_HEAD(_necp_fd_list, necp_fd_data) necp_fd_list; static LIST_HEAD(_necp_fd_observer_list, necp_fd_data) necp_fd_observer_list; -#define NECP_CLIENT_FD_ZONE_MAX 128 -#define NECP_CLIENT_FD_ZONE_NAME "necp.clientfd" - -static unsigned int necp_client_fd_size; /* size of zone element */ -static struct zone *necp_client_fd_zone; /* zone for necp_fd_data */ +static ZONE_DECLARE(necp_client_fd_zone, "necp.clientfd", + sizeof(struct necp_fd_data), ZC_NONE); -#define NECP_FLOW_ZONE_NAME "necp.flow" +#define NECP_FLOW_ZONE_NAME "necp.flow" #define NECP_FLOW_REGISTRATION_ZONE_NAME "necp.flowregistration" static unsigned int necp_flow_size; /* size of necp_client_flow */ @@ -499,13 +496,10 @@ static struct mcache *necp_flow_cache; /* cache for necp_client_flow */ static unsigned int necp_flow_registration_size; /* size of necp_client_flow_registration */ static struct mcache *necp_flow_registration_cache; /* cache for necp_client_flow_registration */ -#define NECP_ARENA_INFO_ZONE_MAX 128 -#define NECP_ARENA_INFO_ZONE_NAME "necp.arenainfo" - static lck_grp_attr_t *necp_fd_grp_attr = NULL; -static lck_attr_t *necp_fd_mtx_attr = NULL; -static lck_grp_t *necp_fd_mtx_grp = NULL; +static lck_attr_t *necp_fd_mtx_attr = NULL; +static lck_grp_t *necp_fd_mtx_grp = NULL; decl_lck_rw_data(static, necp_fd_lock); decl_lck_rw_data(static, necp_observer_lock); @@ -748,7 +742,7 @@ necpop_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) int events = 0; proc_t procp; - fd_data = (struct necp_fd_data *)fp->f_fglob->fg_data; + fd_data = (struct necp_fd_data *)fp->fp_glob->fg_data; if (fd_data == NULL) { return 0; } @@ -847,7 +841,7 @@ necpop_kqfilter(struct fileproc *fp, struct knote *kn, return 0; } - fd_data = (struct necp_fd_data *)fp->f_fglob->fg_data; + fd_data = (struct necp_fd_data *)fp->fp_glob->fg_data; if (fd_data == NULL) { NECPLOG0(LOG_ERR, "No channel for kqfilter"); knote_set_error(kn, ENOENT); @@ -956,6 +950,13 @@ necp_client_retain_locked(struct necp_client *client) os_ref_retain_locked(&client->reference_count); } +static void +necp_client_retain(struct necp_client *client) +{ + NECP_CLIENT_LOCK(client); + necp_client_retain_locked(client); + NECP_CLIENT_UNLOCK(client); +} static bool necp_client_release_locked(struct necp_client *client) @@ -1133,24 +1134,26 @@ necp_destroy_client_flow_registration(struct necp_client *client, LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) { if (search_flow->nexus && !uuid_is_null(search_flow->u.nexus_agent)) { - // Note that if we had defuncted the client earlier, this would result in a harmless ENOENT - u_int8_t message_type = (abort ? NETAGENT_MESSAGE_TYPE_ABORT_NEXUS : - NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS); - if (((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) || - (flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) && - !(flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) { - message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT; - } - int netagent_error = netagent_client_message_with_params(search_flow->u.nexus_agent, - ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ? - client->client_id : - flow_registration->registration_id), - pid, client->agent_handle, - message_type, - has_close_parameters ? &close_parameters : NULL, - NULL, 0); - if (netagent_error != 0 && netagent_error != ENOENT) { - NECPLOG(LOG_ERR, "necp_client_remove close nexus error (%d) MESSAGE TYPE %u", netagent_error, message_type); + // Don't unregister for defunct flows + if (!flow_registration->defunct) { + u_int8_t message_type = (abort ? NETAGENT_MESSAGE_TYPE_ABORT_NEXUS : + NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS); + if (((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) || + (flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) && + !(flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) { + message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT; + } + int netagent_error = netagent_client_message_with_params(search_flow->u.nexus_agent, + ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ? + client->client_id : + flow_registration->registration_id), + pid, client->agent_handle, + message_type, + has_close_parameters ? &close_parameters : NULL, + NULL, 0); + if (netagent_error != 0 && netagent_error != ENOENT) { + NECPLOG(LOG_ERR, "necp_client_remove close nexus error (%d) MESSAGE TYPE %u", netagent_error, message_type); + } } uuid_clear(search_flow->u.nexus_agent); } @@ -1219,6 +1222,46 @@ necp_destroy_client(struct necp_client *client, pid_t pid, bool abort) OSDecrementAtomic(&necp_client_count); } +static bool +necp_defunct_client_fd_locked_inner(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, bool destroy_stats); + +static void +necp_process_defunct_list(struct _necp_flow_defunct_list *defunct_list) +{ + if (!LIST_EMPTY(defunct_list)) { + struct necp_flow_defunct *flow_defunct = NULL; + struct necp_flow_defunct *temp_flow_defunct = NULL; + + // For each newly defunct client, send a message to the nexus to remove the flow + LIST_FOREACH_SAFE(flow_defunct, defunct_list, chain, temp_flow_defunct) { + if (!uuid_is_null(flow_defunct->nexus_agent)) { + u_int8_t message_type = NETAGENT_MESSAGE_TYPE_ABORT_NEXUS; + if (((flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) || + (flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) && + !(flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) { + message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT; + } + int netagent_error = netagent_client_message_with_params(flow_defunct->nexus_agent, + flow_defunct->flow_id, + flow_defunct->proc_pid, + flow_defunct->agent_handle, + message_type, + flow_defunct->has_close_parameters ? &flow_defunct->close_parameters : NULL, + NULL, 0); + if (netagent_error != 0) { + char namebuf[MAXCOMLEN + 1]; + (void) strlcpy(namebuf, "unknown", sizeof(namebuf)); + proc_name(flow_defunct->proc_pid, namebuf, sizeof(namebuf)); + NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR), "necp_update_client abort nexus error (%d) for pid %d %s", netagent_error, flow_defunct->proc_pid, namebuf); + } + } + LIST_REMOVE(flow_defunct, chain); + FREE(flow_defunct, M_NECP); + } + } + ASSERT(LIST_EMPTY(defunct_list)); +} + static int necpop_close(struct fileglob *fg, vfs_context_t ctx) { @@ -1247,6 +1290,11 @@ necpop_close(struct fileglob *fg, vfs_context_t ctx) NECP_FD_LOCK(fd_data); pid_t pid = fd_data->proc_pid; + struct _necp_flow_defunct_list defunct_list; + LIST_INIT(&defunct_list); + + (void)necp_defunct_client_fd_locked_inner(fd_data, &defunct_list, false); + struct necp_client_flow_registration *flow_registration = NULL; struct necp_client_flow_registration *temp_flow_registration = NULL; RB_FOREACH_SAFE(flow_registration, _necp_fd_flow_tree, &fd_data->flows, temp_flow_registration) { @@ -1295,6 +1343,8 @@ necpop_close(struct fileglob *fg, vfs_context_t ctx) RB_REMOVE(_necp_client_tree, &clients_to_close, client); necp_destroy_client(client, pid, true); } + + necp_process_defunct_list(&defunct_list); } return error; @@ -1310,35 +1360,77 @@ necp_address_is_wildcard(const union necp_sockaddr_union * const addr) } static int -necp_find_fd_data(int fd, struct necp_fd_data **fd_data) +necp_find_fd_data(struct proc *p, int fd, + struct fileproc **fpp, struct necp_fd_data **fd_data) { - proc_t p = current_proc(); - struct fileproc *fp = NULL; - int error = 0; + struct fileproc *fp; + int error = fp_get_ftype(p, fd, DTYPE_NETPOLICY, ENODEV, &fp); - proc_fdlock_spin(p); - if ((error = fp_lookup(p, fd, &fp, 1)) != 0) { - goto done; - } - if (fp->f_fglob->fg_ops->fo_type != DTYPE_NETPOLICY) { - fp_drop(p, fd, fp, 1); - error = ENODEV; - goto done; + if (error == 0) { + *fd_data = (struct necp_fd_data *)fp->fp_glob->fg_data; + *fpp = fp; + + if ((*fd_data)->necp_fd_type != necp_fd_type_client) { + // Not a client fd, ignore + fp_drop(p, fd, fp, 0); + error = EINVAL; + } } - *fd_data = (struct necp_fd_data *)fp->f_fglob->fg_data; + return error; +} - if ((*fd_data)->necp_fd_type != necp_fd_type_client) { - // Not a client fd, ignore - fp_drop(p, fd, fp, 1); - error = EINVAL; - goto done; +static void +necp_client_add_nexus_flow(struct necp_client_flow_registration *flow_registration, + uuid_t nexus_agent, + uint32_t interface_index, + uint16_t interface_flags) +{ + struct necp_client_flow *new_flow = mcache_alloc(necp_flow_cache, MCR_SLEEP); + if (new_flow == NULL) { + NECPLOG0(LOG_ERR, "Failed to allocate nexus flow"); + return; } -done: - proc_fdunlock(p); - return error; + memset(new_flow, 0, sizeof(*new_flow)); + + new_flow->nexus = TRUE; + uuid_copy(new_flow->u.nexus_agent, nexus_agent); + new_flow->interface_index = interface_index; + new_flow->interface_flags = interface_flags; + new_flow->check_tcp_heuristics = TRUE; + + + LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain); + } +static void +necp_client_add_nexus_flow_if_needed(struct necp_client_flow_registration *flow_registration, + uuid_t nexus_agent, + uint32_t interface_index) +{ + struct necp_client_flow *flow = NULL; + LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) { + if (flow->nexus && + uuid_compare(flow->u.nexus_agent, nexus_agent) == 0) { + return; + } + } + + uint16_t interface_flags = 0; + ifnet_t ifp = NULL; + ifnet_head_lock_shared(); + if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) { + ifp = ifindex2ifnet[interface_index]; + if (ifp != NULL) { + ifnet_lock_shared(ifp); + interface_flags = nstat_ifnet_to_flags(ifp); + ifnet_lock_done(ifp); + } + } + ifnet_head_done(); + necp_client_add_nexus_flow(flow_registration, nexus_agent, interface_index, interface_flags); +} static struct necp_client_flow * necp_client_add_interface_flow(struct necp_client_flow_registration *flow_registration, @@ -1503,6 +1595,17 @@ necp_client_flow_is_viable(proc_t proc, struct necp_client *client, } } + if (flow->interface_index != IFSCOPE_NONE) { + ifnet_head_lock_shared(); + + struct ifnet *ifp = ifindex2ifnet[flow->interface_index]; + if (ifp && ifp->if_delegated.ifp != IFSCOPE_NONE) { + flow->delegated_interface_index = ifp->if_delegated.ifp->if_index; + } + + ifnet_head_done(); + } + return error == 0 && result.routed_interface_index != IFSCOPE_NONE && result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP; @@ -1549,7 +1652,7 @@ necp_client_update_flows(proc_t proc, { NECP_CLIENT_ASSERT_LOCKED(client); - bool client_updated = FALSE; + bool any_client_updated = FALSE; struct necp_client_flow *flow = NULL; struct necp_client_flow *temp_flow = NULL; struct necp_client_flow_registration *flow_registration = NULL; @@ -1560,7 +1663,11 @@ necp_client_update_flows(proc_t proc, } LIST_FOREACH_SAFE(flow, &flow_registration->flow_list, flow_chain, temp_flow) { + bool client_updated = FALSE; + // Check policy result for flow + u_short old_delegated_ifindex = flow->delegated_interface_index; + int old_flags = flow->necp_flow_flags; bool viable = necp_client_flow_is_viable(proc, client, flow); @@ -1576,6 +1683,10 @@ necp_client_update_flows(proc_t proc, client_updated = TRUE; } + if (flow->delegated_interface_index != old_delegated_ifindex) { + client_updated = TRUE; + } + if (flow->viable && client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) { bool flow_viable = flow->viable; flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_VIABLE, flow->interface_index, flow->necp_flow_flags, &flow_viable); @@ -1613,10 +1724,12 @@ necp_client_update_flows(proc_t proc, mcache_free(necp_flow_cache, flow); } } + + any_client_updated |= client_updated; } } - return client_updated; + return any_client_updated; } static void @@ -2849,9 +2962,8 @@ necp_update_flow_protoctl_event(uuid_t netagent_uuid, uuid_t client_id, struct necp_client_flow *flow = NULL; LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) { // Verify that the client nexus agent matches - if (flow->nexus && - uuid_compare(flow->u.nexus_agent, - netagent_uuid) == 0) { + if ((flow->nexus && uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) || + flow->socket) { flow->has_protoctl_event = TRUE; flow->protoctl_event.protoctl_event_code = protoctl_event_code; flow->protoctl_event.protoctl_event_val = protoctl_event_val; @@ -3339,6 +3451,11 @@ necp_update_client_result(proc_t proc, sizeof(result.filter_control_unit), &result.filter_control_unit, &updated, client->result, sizeof(client->result)); } + if (result.flow_divert_aggregate_unit != 0) { + cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FLOW_DIVERT_AGGREGATE_UNIT, + sizeof(result.flow_divert_aggregate_unit), &result.flow_divert_aggregate_unit, &updated, + client->result, sizeof(client->result)); + } if (result.routed_interface_index != 0) { u_int routed_interface_index = result.routed_interface_index; if (result.routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && @@ -3634,10 +3751,9 @@ necp_update_client_result(proc_t proc, return updated; } -static inline void -necp_defunct_client_fd_locked(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, struct proc *proc) +static bool +necp_defunct_client_fd_locked_inner(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, bool destroy_stats) { -#pragma unused(proc) bool updated_result = FALSE; struct necp_client *client = NULL; @@ -3672,9 +3788,24 @@ necp_defunct_client_fd_locked(struct necp_fd_data *client_fd, struct _necp_flow_ } } } + if (destroy_stats) { + } NECP_CLIENT_UNLOCK(client); } + return updated_result; +} + +static inline void +necp_defunct_client_fd_locked(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, struct proc *proc) +{ +#pragma unused(proc) + bool updated_result = FALSE; + + NECP_FD_ASSERT_LOCKED(client_fd); + + updated_result = necp_defunct_client_fd_locked_inner(client_fd, defunct_list, true); + if (updated_result) { necp_fd_notify(client_fd, true); @@ -3731,38 +3862,7 @@ necp_update_all_clients_callout(__unused thread_call_param_t dummy, NECP_FD_LIST_UNLOCK(); // Handle the case in which some clients became newly defunct - if (!LIST_EMPTY(&defunct_list)) { - struct necp_flow_defunct *flow_defunct = NULL; - struct necp_flow_defunct *temp_flow_defunct = NULL; - - // For each newly defunct client, send a message to the nexus to remove the flow - LIST_FOREACH_SAFE(flow_defunct, &defunct_list, chain, temp_flow_defunct) { - if (!uuid_is_null(flow_defunct->nexus_agent)) { - u_int8_t message_type = NETAGENT_MESSAGE_TYPE_ABORT_NEXUS; - if (((flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) || - (flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) && - !(flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) { - message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT; - } - int netagent_error = netagent_client_message_with_params(flow_defunct->nexus_agent, - flow_defunct->flow_id, - flow_defunct->proc_pid, - flow_defunct->agent_handle, - message_type, - flow_defunct->has_close_parameters ? &flow_defunct->close_parameters : NULL, - NULL, 0); - if (netagent_error != 0) { - char namebuf[MAXCOMLEN + 1]; - (void) strlcpy(namebuf, "unknown", sizeof(namebuf)); - proc_name(flow_defunct->proc_pid, namebuf, sizeof(namebuf)); - NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR), "necp_update_client abort nexus error (%d) for pid %d %s", netagent_error, flow_defunct->proc_pid, namebuf); - } - } - LIST_REMOVE(flow_defunct, chain); - FREE(flow_defunct, M_NECP); - } - } - ASSERT(LIST_EMPTY(&defunct_list)); + necp_process_defunct_list(&defunct_list); } void @@ -3811,7 +3911,7 @@ necp_set_client_as_background(proc_t proc, return FALSE; } - struct necp_fd_data *client_fd = (struct necp_fd_data *)fp->f_fglob->fg_data; + struct necp_fd_data *client_fd = (struct necp_fd_data *)fp->fp_glob->fg_data; if (client_fd == NULL) { NECPLOG0(LOG_ERR, "Could not find client structure for backgrounded client"); return FALSE; @@ -3861,35 +3961,7 @@ necp_fd_defunct(proc_t proc, struct necp_fd_data *client_fd) necp_defunct_client_fd_locked(client_fd, &defunct_list, proc); NECP_FD_UNLOCK(client_fd); - if (!LIST_EMPTY(&defunct_list)) { - struct necp_flow_defunct *flow_defunct = NULL; - struct necp_flow_defunct *temp_flow_defunct = NULL; - - // For each defunct client, remove flow from the nexus - LIST_FOREACH_SAFE(flow_defunct, &defunct_list, chain, temp_flow_defunct) { - if (!uuid_is_null(flow_defunct->nexus_agent)) { - u_int8_t message_type = NETAGENT_MESSAGE_TYPE_ABORT_NEXUS; - if (((flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) || - (flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) && - !(flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) { - message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT; - } - int netagent_error = netagent_client_message_with_params(flow_defunct->nexus_agent, - flow_defunct->flow_id, - flow_defunct->proc_pid, - flow_defunct->agent_handle, - message_type, - flow_defunct->has_close_parameters ? &flow_defunct->close_parameters : NULL, - NULL, 0); - if (netagent_error != 0) { - NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR), "necp_defunct_client abort nexus error (%d)", netagent_error); - } - } - LIST_REMOVE(flow_defunct, chain); - FREE(flow_defunct, M_NECP); - } - } - ASSERT(LIST_EMPTY(&defunct_list)); + necp_process_defunct_list(&defunct_list); } static void @@ -4447,9 +4519,10 @@ necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_ ifnet_head_done(); - if ((parsed_parameters->valid_fields == (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS)) && - best_preferred_count == 0) { - // If only has preferred fields, and nothing was found, clear the interface index and return TRUE + if (has_preferred_fields && best_preferred_count == 0 && + ((parsed_parameters->valid_fields & (NECP_PARSED_PARAMETERS_SCOPED_FIELDS | NECP_PARSED_PARAMETERS_PREFERRED_FIELDS)) == + (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS))) { + // If only has preferred ifnet fields, and nothing was found, clear the interface index and return TRUE *return_ifindex = 0; return TRUE; } @@ -4515,9 +4588,9 @@ necp_open(struct proc *p, struct necp_open_args *uap, int *retval) klist_init(&fd_data->si.si_note); fd_data->proc_pid = proc_pid(p); - fp->f_fglob->fg_flag = FREAD; - fp->f_fglob->fg_ops = &necp_fd_ops; - fp->f_fglob->fg_data = fd_data; + fp->fp_glob->fg_flag = FREAD; + fp->fp_glob->fg_ops = &necp_fd_ops; + fp->fp_glob->fg_data = fd_data; proc_fdlock(p); @@ -4577,6 +4650,7 @@ necp_client_add(struct proc *p, struct necp_fd_data *fd_data, struct necp_client { int error = 0; struct necp_client *client = NULL; + const size_t buffer_size = uap->buffer_size; if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) { NECPLOG0(LOG_ERR, "NECP client observers with push enabled may not add their own clients"); @@ -4584,17 +4658,17 @@ necp_client_add(struct proc *p, struct necp_fd_data *fd_data, struct necp_client } if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) || - uap->buffer_size == 0 || uap->buffer_size > NECP_MAX_CLIENT_PARAMETERS_SIZE || uap->buffer == 0) { + buffer_size == 0 || buffer_size > NECP_MAX_CLIENT_PARAMETERS_SIZE || uap->buffer == 0) { return EINVAL; } - if ((client = _MALLOC(sizeof(struct necp_client) + uap->buffer_size, M_NECP, + if ((client = _MALLOC(sizeof(struct necp_client) + buffer_size, M_NECP, M_WAITOK | M_ZERO)) == NULL) { error = ENOMEM; goto done; } - error = copyin(uap->buffer, client->parameters, uap->buffer_size); + error = copyin(uap->buffer, client->parameters, buffer_size); if (error) { NECPLOG(LOG_ERR, "necp_client_add parameters copyin error (%d)", error); goto done; @@ -4605,7 +4679,7 @@ necp_client_add(struct proc *p, struct necp_fd_data *fd_data, struct necp_client os_ref_init(&client->reference_count, &necp_client_refgrp); // Hold our reference until close - client->parameters_length = uap->buffer_size; + client->parameters_length = buffer_size; client->proc_pid = fd_data->proc_pid; // Save off proc pid in case the client will persist past fd client->agent_handle = (void *)fd_data; client->platform_binary = ((csproc_get_platform_binary(p) == 0) ? 0 : 1); @@ -4697,6 +4771,8 @@ necp_client_claim(struct proc *p, struct necp_fd_data *fd_data, struct necp_clie } client->proc_pid = fd_data->proc_pid; // Transfer client to claiming pid + client->agent_handle = (void *)fd_data; + client->platform_binary = ((csproc_get_platform_binary(p) == 0) ? 0 : 1); // Add matched client to our fd and re-run result NECP_FD_LOCK(fd_data); @@ -4720,6 +4796,7 @@ necp_client_remove(struct necp_fd_data *fd_data, struct necp_client_action_args int error = 0; uuid_t client_id = {}; struct ifnet_stats_per_flow flow_ifnet_stats = {}; + const size_t buffer_size = uap->buffer_size; if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) { error = EINVAL; @@ -4732,8 +4809,8 @@ necp_client_remove(struct necp_fd_data *fd_data, struct necp_client_action_args goto done; } - if (uap->buffer != 0 && uap->buffer_size == sizeof(flow_ifnet_stats)) { - error = copyin(uap->buffer, &flow_ifnet_stats, uap->buffer_size); + if (uap->buffer != 0 && buffer_size == sizeof(flow_ifnet_stats)) { + error = copyin(uap->buffer, &flow_ifnet_stats, buffer_size); if (error) { NECPLOG(LOG_ERR, "necp_client_remove flow_ifnet_stats copyin error (%d)", error); // Not fatal; make sure to zero-out stats in case of partial copy @@ -4741,7 +4818,7 @@ necp_client_remove(struct necp_fd_data *fd_data, struct necp_client_action_args error = 0; } } else if (uap->buffer != 0) { - NECPLOG(LOG_ERR, "necp_client_remove unexpected parameters length (%zu)", uap->buffer_size); + NECPLOG(LOG_ERR, "necp_client_remove unexpected parameters length (%zu)", buffer_size); } NECP_FD_LOCK(fd_data); @@ -4783,6 +4860,84 @@ done: return error; } +static struct necp_client_flow_registration * +necp_client_fd_find_flow(struct necp_fd_data *client_fd, uuid_t flow_id) +{ + NECP_FD_ASSERT_LOCKED(client_fd); + struct necp_client_flow_registration *flow = NULL; + + if (necp_client_id_is_flow(flow_id)) { + struct necp_client_flow_registration find; + uuid_copy(find.registration_id, flow_id); + flow = RB_FIND(_necp_fd_flow_tree, &client_fd->flows, &find); + } + + return flow; +} + +static NECP_CLIENT_ACTION_FUNCTION int +necp_client_remove_flow(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval) +{ + int error = 0; + uuid_t flow_id = {}; + struct ifnet_stats_per_flow flow_ifnet_stats = {}; + const size_t buffer_size = uap->buffer_size; + + if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) { + error = EINVAL; + NECPLOG(LOG_ERR, "necp_client_remove_flow invalid client_id (length %zu)", (size_t)uap->client_id_len); + goto done; + } + + error = copyin(uap->client_id, flow_id, sizeof(uuid_t)); + if (error) { + NECPLOG(LOG_ERR, "necp_client_remove_flow copyin client_id error (%d)", error); + goto done; + } + + if (uap->buffer != 0 && buffer_size == sizeof(flow_ifnet_stats)) { + error = copyin(uap->buffer, &flow_ifnet_stats, buffer_size); + if (error) { + NECPLOG(LOG_ERR, "necp_client_remove flow_ifnet_stats copyin error (%d)", error); + // Not fatal + } + } else if (uap->buffer != 0) { + NECPLOG(LOG_ERR, "necp_client_remove unexpected parameters length (%zu)", buffer_size); + } + + NECP_FD_LOCK(fd_data); + struct necp_client *client = NULL; + struct necp_client_flow_registration *flow_registration = necp_client_fd_find_flow(fd_data, flow_id); + if (flow_registration != NULL) { + NECP_FLOW_TREE_LOCK_EXCLUSIVE(); + RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration); + NECP_FLOW_TREE_UNLOCK(); + RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration); + + client = flow_registration->client; + if (client != NULL) { + necp_client_retain(client); + } + } + NECP_FD_UNLOCK(fd_data); + + if (flow_registration != NULL && client != NULL) { + NECP_CLIENT_LOCK(client); + if (flow_registration->client == client) { + necp_destroy_client_flow_registration(client, flow_registration, fd_data->proc_pid, false); + } + necp_client_release_locked(client); + NECP_CLIENT_UNLOCK(client); + } + +done: + *retval = error; + if (error != 0) { + NECPLOG(LOG_ERR, "Remove flow error (%d)", error); + } + + return error; +} // Don't inline the function since it includes necp_client_parsed_parameters on the stack static __attribute__((noinline)) int @@ -5177,7 +5332,7 @@ necp_client_copy(struct necp_fd_data *fd_data, struct necp_client_action_args *u if (uap->client_id) { if (uap->client_id_len != sizeof(uuid_t)) { - NECPLOG(LOG_ERR, "Incorrect length (got %d, expected %d)", uap->client_id_len, sizeof(uuid_t)); + NECPLOG(LOG_ERR, "Incorrect length (got %zu, expected %zu)", (size_t)uap->client_id_len, sizeof(uuid_t)); return ERANGE; } @@ -5288,7 +5443,7 @@ necp_client_copy_client_update(struct necp_fd_data *fd_data, struct necp_client_ NECPLOG(LOG_ERR, "Copy client update copyout client id error (%d)", error); } else { if (uap->buffer_size < client_update->update_length) { - NECPLOG(LOG_ERR, "Buffer size cannot hold update (%zu < %zu)", uap->buffer_size, client_update->update_length); + NECPLOG(LOG_ERR, "Buffer size cannot hold update (%zu < %zu)", (size_t)uap->buffer_size, client_update->update_length); error = EINVAL; } else { error = copyout(&client_update->update, uap->buffer, client_update->update_length); @@ -5461,6 +5616,192 @@ done: return error; } +static NECP_CLIENT_ACTION_FUNCTION int +necp_client_add_flow(struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval) +{ + int error = 0; + struct necp_client *client = NULL; + uuid_t client_id; + struct necp_client_nexus_parameters parameters = {}; + struct proc *proc = PROC_NULL; + struct necp_client_add_flow *add_request = NULL; + struct necp_client_add_flow *allocated_add_request = NULL; + struct necp_client_add_flow_default default_add_request = {}; + const size_t buffer_size = uap->buffer_size; + + if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t)) { + error = EINVAL; + NECPLOG(LOG_ERR, "necp_client_add_flow invalid client_id (length %zu)", (size_t)uap->client_id_len); + goto done; + } + + if (uap->buffer == 0 || buffer_size < sizeof(struct necp_client_add_flow)) { + error = EINVAL; + NECPLOG(LOG_ERR, "necp_client_add_flow invalid buffer (length %zu)", buffer_size); + goto done; + } + + error = copyin(uap->client_id, client_id, sizeof(uuid_t)); + if (error) { + NECPLOG(LOG_ERR, "necp_client_add_flow copyin client_id error (%d)", error); + goto done; + } + + if (buffer_size <= sizeof(struct necp_client_add_flow_default)) { + // Fits in default size + error = copyin(uap->buffer, &default_add_request, buffer_size); + if (error) { + NECPLOG(LOG_ERR, "necp_client_add_flow copyin default_add_request error (%d)", error); + goto done; + } + + add_request = (struct necp_client_add_flow *)&default_add_request; + } else { + allocated_add_request = _MALLOC(buffer_size, M_NECP, M_WAITOK | M_ZERO); + if (allocated_add_request == NULL) { + error = ENOMEM; + goto done; + } + + error = copyin(uap->buffer, allocated_add_request, buffer_size); + if (error) { + NECPLOG(LOG_ERR, "necp_client_add_flow copyin default_add_request error (%d)", error); + goto done; + } + + add_request = (struct necp_client_add_flow *)allocated_add_request; + } + + NECP_FD_LOCK(fd_data); + pid_t pid = fd_data->proc_pid; + proc = proc_find(pid); + if (proc == PROC_NULL) { + NECP_FD_UNLOCK(fd_data); + NECPLOG(LOG_ERR, "necp_client_add_flow process not found for pid %d error (%d)", pid, error); + error = ESRCH; + goto done; + } + + client = necp_client_fd_find_client_and_lock(fd_data, client_id); + if (client == NULL) { + error = ENOENT; + NECP_FD_UNLOCK(fd_data); + goto done; + } + + // Using ADD_FLOW indicates that the client supports multiple flows per client + client->legacy_client_is_flow = false; + + necp_client_retain_locked(client); + necp_client_copy_parameters_locked(client, ¶meters); + + struct necp_client_flow_registration *new_registration = necp_client_create_flow_registration(fd_data, client); + if (new_registration == NULL) { + error = ENOMEM; + NECP_CLIENT_UNLOCK(client); + NECP_FD_UNLOCK(fd_data); + NECPLOG0(LOG_ERR, "Failed to allocate flow registration"); + goto done; + } + + new_registration->flags = add_request->flags; + + // Copy new ID out to caller + uuid_copy(add_request->registration_id, new_registration->registration_id); + + // Copy override address + if (add_request->flags & NECP_CLIENT_FLOW_FLAGS_OVERRIDE_ADDRESS) { + size_t offset_of_address = (sizeof(struct necp_client_add_flow) + + add_request->stats_request_count * sizeof(struct necp_client_flow_stats)); + if (buffer_size >= offset_of_address + sizeof(struct sockaddr_in)) { + struct sockaddr *override_address = (struct sockaddr *)(((uint8_t *)add_request) + offset_of_address); + if (buffer_size >= offset_of_address + override_address->sa_len && + override_address->sa_len <= sizeof(parameters.remote_addr)) { + memcpy(¶meters.remote_addr, override_address, override_address->sa_len); + } + } + } + + + if (error == 0 && + (add_request->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE || + add_request->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) { + uint32_t interface_index = IFSCOPE_NONE; + ifnet_head_lock_shared(); + struct ifnet *interface = NULL; + TAILQ_FOREACH(interface, &ifnet_head, if_link) { + ifnet_lock_shared(interface); + if (interface->if_agentids != NULL) { + for (u_int32_t i = 0; i < interface->if_agentcount; i++) { + if (uuid_compare(interface->if_agentids[i], add_request->agent_uuid) == 0) { + interface_index = interface->if_index; + break; + } + } + } + ifnet_lock_done(interface); + if (interface_index != IFSCOPE_NONE) { + break; + } + } + ifnet_head_done(); + + necp_client_add_nexus_flow_if_needed(new_registration, add_request->agent_uuid, interface_index); + + error = netagent_client_message_with_params(add_request->agent_uuid, + ((new_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ? + client->client_id : + new_registration->registration_id), + pid, client->agent_handle, + NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT, + (struct necp_client_agent_parameters *)¶meters, + NULL, NULL); + if (error != 0) { + NECPLOG(LOG_ERR, "netagent_client_message error (%d)", error); + } + } + + if (error != 0) { + // Encountered an error in adding the flow, destroy the flow registration + NECP_FLOW_TREE_LOCK_EXCLUSIVE(); + RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, new_registration); + NECP_FLOW_TREE_UNLOCK(); + RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, new_registration); + necp_destroy_client_flow_registration(client, new_registration, fd_data->proc_pid, true); + new_registration = NULL; + } + + NECP_CLIENT_UNLOCK(client); + NECP_FD_UNLOCK(fd_data); + + necp_client_release(client); + + if (error != 0) { + goto done; + } + + // Copy the request back out to the caller with assigned fields + error = copyout(add_request, uap->buffer, buffer_size); + if (error != 0) { + NECPLOG(LOG_ERR, "necp_client_add_flow copyout add_request error (%d)", error); + } + +done: + *retval = error; + if (error != 0) { + NECPLOG(LOG_ERR, "Add flow error (%d)", error); + } + + if (allocated_add_request != NULL) { + FREE(allocated_add_request, M_NECP); + } + + if (proc != PROC_NULL) { + proc_rele(proc); + } + return error; +} + static void necp_client_add_assertion(struct necp_client *client, uuid_t netagent_uuid) @@ -5508,10 +5849,10 @@ necp_client_agent_action(struct necp_fd_data *fd_data, struct necp_client_action uuid_t client_id; bool acted_on_agent = FALSE; u_int8_t *parameters = NULL; - size_t parameters_size = uap->buffer_size; + const size_t buffer_size = uap->buffer_size; if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) || - uap->buffer_size == 0 || uap->buffer == 0) { + buffer_size == 0 || uap->buffer == 0) { NECPLOG0(LOG_ERR, "necp_client_agent_action invalid parameters"); error = EINVAL; goto done; @@ -5523,19 +5864,19 @@ necp_client_agent_action(struct necp_fd_data *fd_data, struct necp_client_action goto done; } - if (uap->buffer_size > NECP_MAX_AGENT_ACTION_SIZE) { + if (buffer_size > NECP_MAX_AGENT_ACTION_SIZE) { NECPLOG(LOG_ERR, "necp_client_agent_action invalid buffer size (>%u)", NECP_MAX_AGENT_ACTION_SIZE); error = EINVAL; goto done; } - if ((parameters = _MALLOC(uap->buffer_size, M_NECP, M_WAITOK | M_ZERO)) == NULL) { + if ((parameters = _MALLOC(buffer_size, M_NECP, M_WAITOK | M_ZERO)) == NULL) { NECPLOG0(LOG_ERR, "necp_client_agent_action malloc failed"); error = ENOMEM; goto done; } - error = copyin(uap->buffer, parameters, uap->buffer_size); + error = copyin(uap->buffer, parameters, buffer_size); if (error) { NECPLOG(LOG_ERR, "necp_client_agent_action parameters copyin error (%d)", error); goto done; @@ -5545,11 +5886,11 @@ necp_client_agent_action(struct necp_fd_data *fd_data, struct necp_client_action client = necp_client_fd_find_client_and_lock(fd_data, client_id); if (client != NULL) { size_t offset = 0; - while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) { + while ((offset + sizeof(struct necp_tlv_header)) <= buffer_size) { u_int8_t type = necp_buffer_get_tlv_type(parameters, offset); u_int32_t length = necp_buffer_get_tlv_length(parameters, offset); - if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) { + if (length > (buffer_size - (offset + sizeof(struct necp_tlv_header)))) { // If the length is larger than what can fit in the remaining parameters size, bail NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length); break; @@ -5630,9 +5971,10 @@ necp_client_copy_agent(__unused struct necp_fd_data *fd_data, struct necp_client { int error = 0; uuid_t agent_uuid; + const size_t buffer_size = uap->buffer_size; if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) || - uap->buffer_size == 0 || uap->buffer == 0) { + buffer_size == 0 || uap->buffer == 0) { NECPLOG0(LOG_ERR, "necp_client_copy_agent bad input"); error = EINVAL; goto done; @@ -5644,7 +5986,7 @@ necp_client_copy_agent(__unused struct necp_fd_data *fd_data, struct necp_client goto done; } - error = netagent_copyout(agent_uuid, uap->buffer, uap->buffer_size); + error = netagent_copyout(agent_uuid, uap->buffer, buffer_size); if (error) { // netagent_copyout already logs appropriate errors goto done; @@ -5661,10 +6003,11 @@ necp_client_agent_use(struct necp_fd_data *fd_data, struct necp_client_action_ar int error = 0; struct necp_client *client = NULL; uuid_t client_id; - struct necp_agent_use_parameters parameters; + struct necp_agent_use_parameters parameters = {}; + const size_t buffer_size = uap->buffer_size; if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) || - uap->buffer_size != sizeof(parameters) || uap->buffer == 0) { + buffer_size != sizeof(parameters) || uap->buffer == 0) { error = EINVAL; goto done; } @@ -5675,7 +6018,7 @@ necp_client_agent_use(struct necp_fd_data *fd_data, struct necp_client_action_ar goto done; } - error = copyin(uap->buffer, ¶meters, uap->buffer_size); + error = copyin(uap->buffer, ¶meters, buffer_size); if (error) { NECPLOG(LOG_ERR, "Parameters copyin error (%d)", error); goto done; @@ -5693,7 +6036,7 @@ necp_client_agent_use(struct necp_fd_data *fd_data, struct necp_client_action_ar NECP_FD_UNLOCK(fd_data); if (error == 0) { - error = copyout(¶meters, uap->buffer, uap->buffer_size); + error = copyout(¶meters, uap->buffer, buffer_size); if (error) { NECPLOG(LOG_ERR, "Parameters copyout error (%d)", error); goto done; @@ -5706,18 +6049,6 @@ done: return error; } -struct necp_interface_details_legacy { - char name[IFXNAMSIZ]; - u_int32_t index; - u_int32_t generation; - u_int32_t functional_type; - u_int32_t delegate_index; - u_int32_t flags; // see NECP_INTERFACE_FLAG_* - u_int32_t mtu; - struct necp_interface_signature ipv4_signature; - struct necp_interface_signature ipv6_signature; -}; - static NECP_CLIENT_ACTION_FUNCTION int necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_client_action_args *uap, int *retval) { @@ -5726,7 +6057,7 @@ necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_cl struct necp_interface_details interface_details = {}; if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) || - uap->buffer_size < sizeof(struct necp_interface_details_legacy) || + uap->buffer_size < sizeof(interface_details) || uap->buffer == 0) { NECPLOG0(LOG_ERR, "necp_client_copy_interface bad input"); error = EINVAL; @@ -5745,6 +6076,7 @@ necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_cl goto done; } + lck_mtx_lock(rnh_lock); ifnet_head_lock_shared(); ifnet_t interface = NULL; if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) { @@ -5782,6 +6114,12 @@ necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_cl if (interface->if_xflags & IFXF_MPK_LOG) { interface_details.flags |= NECP_INTERFACE_FLAG_MPK_LOG; } + if (interface->if_flags & IFF_MULTICAST) { + interface_details.flags |= NECP_INTERFACE_FLAG_SUPPORTS_MULTICAST; + } + if (IS_INTF_CLAT46(interface)) { + interface_details.flags |= NECP_INTERFACE_FLAG_HAS_NAT64; + } interface_details.mtu = interface->if_mtu; u_int8_t ipv4_signature_len = sizeof(interface_details.ipv4_signature.signature); @@ -5792,6 +6130,36 @@ necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_cl } interface_details.ipv4_signature.signature_len = ipv4_signature_len; + // Check for default scoped routes for IPv4 and IPv6 + union necp_sockaddr_union default_address; + struct rtentry *v4Route = NULL; + memset(&default_address, 0, sizeof(default_address)); + default_address.sa.sa_family = AF_INET; + default_address.sa.sa_len = sizeof(struct sockaddr_in); + v4Route = rtalloc1_scoped_locked((struct sockaddr *)&default_address, 0, 0, + interface->if_index); + if (v4Route != NULL) { + if (v4Route->rt_ifp != NULL && !IS_INTF_CLAT46(v4Route->rt_ifp)) { + interface_details.flags |= NECP_INTERFACE_FLAG_IPV4_ROUTABLE; + } + rtfree_locked(v4Route); + v4Route = NULL; + } + + struct rtentry *v6Route = NULL; + memset(&default_address, 0, sizeof(default_address)); + default_address.sa.sa_family = AF_INET6; + default_address.sa.sa_len = sizeof(struct sockaddr_in6); + v6Route = rtalloc1_scoped_locked((struct sockaddr *)&default_address, 0, 0, + interface->if_index); + if (v6Route != NULL) { + if (v6Route->rt_ifp != NULL) { + interface_details.flags |= NECP_INTERFACE_FLAG_IPV6_ROUTABLE; + } + rtfree_locked(v6Route); + v6Route = NULL; + } + u_int8_t ipv6_signature_len = sizeof(interface_details.ipv6_signature.signature); u_int16_t ipv6_signature_flags; if (ifnet_get_netsignature(interface, AF_INET6, &ipv6_signature_len, &ipv6_signature_flags, @@ -5818,13 +6186,10 @@ necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_cl } ifnet_head_done(); + lck_mtx_unlock(rnh_lock); // If the client is using an older version of the struct, copy that length - size_t copy_length = sizeof(interface_details); - if (uap->buffer_size < sizeof(struct necp_interface_details_legacy)) { - copy_length = sizeof(struct necp_interface_details_legacy); - } - error = copyout(&interface_details, uap->buffer, copy_length); + error = copyout(&interface_details, uap->buffer, sizeof(interface_details)); if (error) { NECPLOG(LOG_ERR, "necp_client_copy_interface copyout error (%d)", error); goto done; @@ -6139,11 +6504,12 @@ done: int necp_client_action(struct proc *p, struct necp_client_action_args *uap, int *retval) { -#pragma unused(p) + struct fileproc *fp; int error = 0; int return_value = 0; struct necp_fd_data *fd_data = NULL; - error = necp_find_fd_data(uap->necp_fd, &fd_data); + + error = necp_find_fd_data(p, uap->necp_fd, &fp, &fd_data); if (error != 0) { NECPLOG(LOG_ERR, "necp_client_action find fd error (%d)", error); return error; @@ -6173,6 +6539,14 @@ necp_client_action(struct proc *p, struct necp_client_action_args *uap, int *ret return_value = necp_client_list(fd_data, uap, retval); break; } + case NECP_CLIENT_ACTION_ADD_FLOW: { + return_value = necp_client_add_flow(fd_data, uap, retval); + break; + } + case NECP_CLIENT_ACTION_REMOVE_FLOW: { + return_value = necp_client_remove_flow(fd_data, uap, retval); + break; + } case NECP_CLIENT_ACTION_AGENT: { return_value = necp_client_agent_action(fd_data, uap, retval); break; @@ -6212,8 +6586,7 @@ necp_client_action(struct proc *p, struct necp_client_action_args *uap, int *ret } } - file_drop(uap->necp_fd); - + fp_drop(p, uap->necp_fd, fp, 0); return return_value; } @@ -6292,7 +6665,7 @@ necp_set_socket_attribute(u_int8_t *buffer, size_t buffer_length, u_int8_t type, MALLOC(local_string, char *, string_size + 1, M_NECP, M_WAITOK | M_ZERO); if (local_string == NULL) { - NECPLOG(LOG_ERR, "Failed to allocate a socket attribute buffer (size %d)", string_size); + NECPLOG(LOG_ERR, "Failed to allocate a socket attribute buffer (size %zu)", string_size); goto fail; } @@ -6327,11 +6700,7 @@ necp_set_socket_attributes(struct socket *so, struct sockopt *sopt) u_int8_t *buffer = NULL; struct inpcb *inp = NULL; - if ((SOCK_DOM(so) != PF_INET -#if INET6 - && SOCK_DOM(so) != PF_INET6 -#endif - )) { + if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) { error = EINVAL; goto done; } @@ -6386,11 +6755,7 @@ necp_get_socket_attributes(struct socket *so, struct sockopt *sopt) size_t valsize = 0; struct inpcb *inp = NULL; - if ((SOCK_DOM(so) != PF_INET -#if INET6 - && SOCK_DOM(so) != PF_INET6 -#endif - )) { + if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) { error = EINVAL; goto done; } @@ -6560,15 +6925,6 @@ necp_client_init(void) /* NOTREACHED */ } - necp_client_fd_size = sizeof(struct necp_fd_data); - necp_client_fd_zone = zinit(necp_client_fd_size, - NECP_CLIENT_FD_ZONE_MAX * necp_client_fd_size, - 0, NECP_CLIENT_FD_ZONE_NAME); - if (necp_client_fd_zone == NULL) { - panic("zinit(necp_client_fd) failed\n"); - /* NOTREACHED */ - } - necp_flow_size = sizeof(struct necp_client_flow); necp_flow_cache = mcache_create(NECP_FLOW_ZONE_NAME, necp_flow_size, sizeof(uint64_t), 0, MCR_SLEEP); if (necp_flow_cache == NULL) { diff --git a/bsd/net/net_kev.h b/bsd/net/net_kev.h index a32a2ab04..d5de03495 100644 --- a/bsd/net/net_kev.h +++ b/bsd/net/net_kev.h @@ -88,6 +88,10 @@ /* KEV_NETPOLICY_SUBCLASS event codes */ #define KEV_NETPOLICY_IFDENIED 1 /* denied access to interface */ #define KEV_NETPOLICY_IFFAILED 2 /* failed to bring up interface */ +#define KEV_NETPOLICY_NETDENIED 3 /* denied access to some network */ + +#define NETPOLICY_NETWORKTYPE_LOCAL 1 /* local network */ + #define KEV_SOCKET_SUBCLASS 4 /* Socket subclass */ /* KEV_SOCKET_SUBCLASS event codes */ diff --git a/bsd/net/net_perf.c b/bsd/net/net_perf.c index 476b111eb..67f2791a8 100644 --- a/bsd/net/net_perf.c +++ b/bsd/net/net_perf.c @@ -65,7 +65,7 @@ update_bins(net_perf_t *npp, uint64_t bins) { bzero(&npp->np_hist_bars, sizeof(npp->np_hist_bars)); - for (int i = 1, j = 0; i <= 64 && j < NET_PERF_BARS; i++) { + for (uint8_t i = 1, j = 0; i <= 64 && j < NET_PERF_BARS; i++) { if (bins & 0x1) { npp->np_hist_bars[j] = i; j++; diff --git a/bsd/net/net_str_id.c b/bsd/net/net_str_id.c index 637006974..a9688c076 100644 --- a/bsd/net/net_str_id.c +++ b/bsd/net/net_str_id.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include #include @@ -41,6 +41,7 @@ #include "net/net_str_id.h" +#define NET_ID_STR_MAX_LEN 2048 #define NET_ID_STR_ENTRY_SIZE(__str) \ (__builtin_offsetof(struct net_str_id_entry, nsi_string[0]) + \ strlen(__str) + 1) @@ -96,8 +97,8 @@ net_str_id_first_last(u_int32_t *first, u_int32_t *last, u_int32_t kind) } __private_extern__ errno_t -net_str_id_find_internal(const char *string, u_int32_t *out_id, - u_int32_t kind, int create) +net_str_id_find_internal(const char *string, u_int32_t *out_id, + u_int32_t kind, int create) { struct net_str_id_entry *entry = NULL; @@ -105,6 +106,9 @@ net_str_id_find_internal(const char *string, u_int32_t *out_id, if (string == NULL || out_id == NULL || kind >= NSI_MAX_KIND) { return EINVAL; } + if (strlen(string) > NET_ID_STR_MAX_LEN) { + return EINVAL; + } *out_id = 0; @@ -122,7 +126,8 @@ net_str_id_find_internal(const char *string, u_int32_t *out_id, return ENOENT; } - entry = kalloc(NET_ID_STR_ENTRY_SIZE(string)); + entry = zalloc_permanent(NET_ID_STR_ENTRY_SIZE(string), + ZALIGN_PTR); if (entry == NULL) { lck_mtx_unlock(net_str_id_lock); return ENOMEM; @@ -174,7 +179,11 @@ sysctl_if_family_ids SYSCTL_HANDLER_ARGS /* XXX bad syntax! */ continue; } - str_size = strlen(entry->nsi_string) + 1; + str_size = strlen(entry->nsi_string); + if (str_size > NET_ID_STR_MAX_LEN) { + str_size = NET_ID_STR_MAX_LEN; + } + str_size += 1; // make room for end-of-string iffmid_size = ROUNDUP32(offsetof(struct net_str_id_entry, nsi_string) + str_size); if (iffmid_size > max_size) { @@ -191,7 +200,7 @@ sysctl_if_family_ids SYSCTL_HANDLER_ARGS /* XXX bad syntax! */ } bzero(iffmid, iffmid_size); - iffmid->iffmid_len = iffmid_size; + iffmid->iffmid_len = (uint32_t)iffmid_size; iffmid->iffmid_id = entry->nsi_id; strlcpy(iffmid->iffmid_str, entry->nsi_string, str_size); error = SYSCTL_OUT(req, iffmid, iffmid_size); diff --git a/bsd/net/net_stubs.c b/bsd/net/net_stubs.c index 169575c24..c3d24d9da 100644 --- a/bsd/net/net_stubs.c +++ b/bsd/net/net_stubs.c @@ -42,6 +42,10 @@ STUB(bpf_attach); STUB(bpf_tap_in); STUB(bpf_tap_out); STUB(bpfattach); +#if !SKYWALK +STUB(bpf_tap_packet_in); +STUB(bpf_tap_packet_out); +#endif /* SKYWALK */ STUB(ctl_deregister); STUB(ctl_enqueuedata); STUB(ctl_enqueuembuf); @@ -462,6 +466,10 @@ STUB(ipf_addv6_internal); STUB(sflt_register_internal); STUB(sock_accept_internal); STUB(sock_socket_internal); +STUB(vsock_add_transport); +STUB(vsock_remove_transport); +STUB(vsock_reset_transport); +STUB(vsock_put_message); #undef STUB /* diff --git a/bsd/net/netsrc.c b/bsd/net/netsrc.c index 4a10ea965..7554e2668 100644 --- a/bsd/net/netsrc.c +++ b/bsd/net/netsrc.c @@ -76,7 +76,7 @@ netsrc_ctlconnect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo) } static errno_t -netsrc_reply(kern_ctl_ref kctl, uint32_t unit, uint16_t version, +netsrc_reply(kern_ctl_ref kctl, uint32_t unit, unsigned int version, struct netsrc_rep *reply) { switch (version) { @@ -175,15 +175,21 @@ netsrc_policy_common(struct netsrc_req *request, struct netsrc_rep *reply) // Destination policy struct in6_addrpolicy *policy = lookup_policy(&request->nrq_dst.sa); if (policy != NULL && policy->label != -1) { - reply->nrp_dstlabel = policy->label; - reply->nrp_dstprecedence = policy->preced; + /* Explicit cast because both policy and netsrc are public APIs + * and apps might rely on it. + */ + reply->nrp_dstlabel = (uint16_t)policy->label; + reply->nrp_dstprecedence = (uint16_t)policy->preced; } // Source policy policy = lookup_policy(&reply->nrp_src.sa); if (policy != NULL && policy->label != -1) { - reply->nrp_label = policy->label; - reply->nrp_precedence = policy->preced; + /* Explicit cast because both policy and netsrc are public APIs + * and apps might rely on it. + */ + reply->nrp_label = (uint16_t)policy->label; + reply->nrp_precedence = (uint16_t)policy->preced; } } diff --git a/bsd/net/network_agent.c b/bsd/net/network_agent.c index 0072bfaca..e0489c5d8 100644 --- a/bsd/net/network_agent.c +++ b/bsd/net/network_agent.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -118,10 +117,9 @@ static u_int32_t g_next_generation = 1; static kern_ctl_ref netagent_kctlref; static u_int32_t netagent_family; -static OSMallocTag netagent_malloc_tag; -static lck_grp_attr_t *netagent_grp_attr = NULL; -static lck_attr_t *netagent_mtx_attr = NULL; -static lck_grp_t *netagent_mtx_grp = NULL; +static lck_grp_attr_t *netagent_grp_attr = NULL; +static lck_attr_t *netagent_mtx_attr = NULL; +static lck_grp_t *netagent_mtx_grp = NULL; decl_lck_rw_data(static, netagent_lock); static errno_t netagent_register_control(void); @@ -143,34 +141,34 @@ static void netagent_delete_session(struct netagent_session *session); // Register static void netagent_handle_register_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset); + size_t payload_length, mbuf_t packet, size_t offset); static errno_t netagent_handle_register_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length); + size_t payload_length); // Unregister static void netagent_handle_unregister_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset); + size_t payload_length, mbuf_t packet, size_t offset); static errno_t netagent_handle_unregister_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length); + size_t payload_length); // Update static void netagent_handle_update_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset); + size_t payload_length, mbuf_t packet, size_t offset); static errno_t netagent_handle_update_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length); + size_t payload_length); // Assign nexus static void netagent_handle_assign_nexus_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset); + size_t payload_length, mbuf_t packet, size_t offset); static errno_t netagent_handle_assign_nexus_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length); + size_t payload_length); // Set/get assert count static errno_t netagent_handle_use_count_setopt(struct netagent_session *session, u_int8_t *payload, size_t payload_length); static errno_t netagent_handle_use_count_getopt(struct netagent_session *session, u_int8_t *buffer, size_t *buffer_length); static void netagent_handle_get(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset); + size_t payload_length, mbuf_t packet, size_t offset); static struct netagent_wrapper *netagent_find_agent_with_uuid(uuid_t uuid); @@ -235,10 +233,7 @@ static errno_t netagent_register_control(void) { struct kern_ctl_reg kern_ctl; - errno_t result = 0; - - // Create a tag to allocate memory - netagent_malloc_tag = OSMalloc_Tagalloc(NETAGENT_CONTROL_NAME, OSMT_DEFAULT); + errno_t result = 0; // Find a unique value for our interface family result = mbuf_tag_id_find(NETAGENT_CONTROL_NAME, &netagent_family); @@ -322,13 +317,14 @@ netagent_post_event(uuid_t agent_uuid, u_int32_t event_code, bool update_necp, b // Message handling static u_int8_t * netagent_buffer_write_message_header(u_int8_t *buffer, u_int8_t message_type, u_int8_t flags, - u_int32_t message_id, u_int32_t error, u_int32_t payload_length) + u_int32_t message_id, u_int32_t error, size_t payload_length) { + memset(buffer, 0, sizeof(struct netagent_message_header)); ((struct netagent_message_header *)(void *)buffer)->message_type = message_type; ((struct netagent_message_header *)(void *)buffer)->message_flags = flags; ((struct netagent_message_header *)(void *)buffer)->message_id = message_id; ((struct netagent_message_header *)(void *)buffer)->message_error = error; - ((struct netagent_message_header *)(void *)buffer)->message_payload_length = payload_length; + ((struct netagent_message_header *)(void *)buffer)->message_payload_length = (u_int32_t)payload_length; return buffer + sizeof(struct netagent_message_header); } @@ -343,7 +339,7 @@ netagent_send_ctl_data(u_int32_t control_unit, u_int8_t *buffer, size_t buffer_s } static int -netagent_send_trigger(struct netagent_wrapper *wrapper, struct proc *p, u_int32_t flags, u_int32_t trigger_type) +netagent_send_trigger(struct netagent_wrapper *wrapper, struct proc *p, u_int32_t flags, u_int8_t trigger_type) { int error = 0; struct netagent_trigger_message *trigger_message = NULL; @@ -421,7 +417,7 @@ netagent_send_success_response(struct netagent_session *session, u_int8_t messag return error; } -static int +static errno_t netagent_send_error_response(struct netagent_session *session, u_int8_t message_type, u_int32_t message_id, u_int32_t error_code) { @@ -708,8 +704,8 @@ netagent_destroy(netagent_session_t session) return netagent_delete_session((struct netagent_session *)session); } -static int -netagent_packet_get_netagent_data_size(mbuf_t packet, int offset, int *err) +static size_t +netagent_packet_get_netagent_data_size(mbuf_t packet, size_t offset, int *err) { int error = 0; @@ -721,7 +717,7 @@ netagent_packet_get_netagent_data_size(mbuf_t packet, int offset, int *err) error = mbuf_copydata(packet, offset, sizeof(netagent_peek), &netagent_peek); if (error) { *err = ENOENT; - return -1; + return 0; } return netagent_peek.netagent_data_size; @@ -760,7 +756,6 @@ netagent_handle_register_inner(struct netagent_session *session, struct netagent errno_t netagent_register(netagent_session_t _session, struct netagent *agent) { - int data_size = 0; struct netagent_wrapper *new_wrapper = NULL; uuid_t registered_uuid; @@ -780,9 +775,9 @@ netagent_register(netagent_session_t _session, struct netagent *agent) return EINVAL; } - data_size = agent->netagent_data_size; - if (data_size < 0 || data_size > NETAGENT_MAX_DATA_SIZE) { - NETAGENTLOG(LOG_ERR, "Register message size could not be read, data_size %d", + size_t data_size = agent->netagent_data_size; + if (data_size > NETAGENT_MAX_DATA_SIZE) { + NETAGENTLOG(LOG_ERR, "Register message size could not be read, data_size %zu", data_size); return EINVAL; } @@ -798,7 +793,7 @@ netagent_register(netagent_session_t _session, struct netagent *agent) uuid_copy(registered_uuid, new_wrapper->netagent.netagent_uuid); - int error = netagent_handle_register_inner(session, new_wrapper); + errno_t error = netagent_handle_register_inner(session, new_wrapper); if (error != 0) { FREE(new_wrapper, M_NETAGENT); return error; @@ -812,11 +807,10 @@ netagent_register(netagent_session_t _session, struct netagent *agent) static errno_t netagent_handle_register_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length) + size_t payload_length) { - int data_size = 0; struct netagent_wrapper *new_wrapper = NULL; - u_int32_t response_error = 0; + errno_t response_error = 0; struct netagent *register_netagent = (struct netagent *)(void *)payload; uuid_t registered_uuid; @@ -839,21 +833,21 @@ netagent_handle_register_setopt(struct netagent_session *session, u_int8_t *payl } if (payload_length < sizeof(struct netagent)) { - NETAGENTLOG(LOG_ERR, "Register message size too small for agent: (%u < %lu)", + NETAGENTLOG(LOG_ERR, "Register message size too small for agent: (%zu < %zu)", payload_length, sizeof(struct netagent)); response_error = EINVAL; goto done; } - data_size = register_netagent->netagent_data_size; - if (data_size < 0 || data_size > NETAGENT_MAX_DATA_SIZE) { - NETAGENTLOG(LOG_ERR, "Register message size could not be read, data_size %d", data_size); + size_t data_size = register_netagent->netagent_data_size; + if (data_size > NETAGENT_MAX_DATA_SIZE) { + NETAGENTLOG(LOG_ERR, "Register message size could not be read, data_size %zu", data_size); response_error = EINVAL; goto done; } if (payload_length != (sizeof(struct netagent) + data_size)) { - NETAGENTLOG(LOG_ERR, "Mismatch between data size and payload length (%lu != %u)", (sizeof(struct netagent) + data_size), payload_length); + NETAGENTLOG(LOG_ERR, "Mismatch between data size and payload length (%lu != %zu)", (sizeof(struct netagent) + data_size), payload_length); response_error = EINVAL; goto done; } @@ -885,10 +879,9 @@ done: static void netagent_handle_register_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset) + size_t payload_length, mbuf_t packet, size_t offset) { - int error; - int data_size = 0; + errno_t error; struct netagent_wrapper *new_wrapper = NULL; u_int32_t response_error = NETAGENT_MESSAGE_ERROR_INTERNAL; uuid_t registered_uuid; @@ -906,15 +899,15 @@ netagent_handle_register_message(struct netagent_session *session, u_int32_t mes } if (payload_length < sizeof(struct netagent)) { - NETAGENTLOG(LOG_ERR, "Register message size too small for agent: (%u < %lu)", + NETAGENTLOG(LOG_ERR, "Register message size too small for agent: (%zu < %zu)", payload_length, sizeof(struct netagent)); response_error = NETAGENT_MESSAGE_ERROR_INVALID_DATA; goto fail; } - data_size = netagent_packet_get_netagent_data_size(packet, offset, &error); - if (error || data_size < 0 || data_size > NETAGENT_MAX_DATA_SIZE) { - NETAGENTLOG(LOG_ERR, "Register message size could not be read, error %d data_size %d", + size_t data_size = netagent_packet_get_netagent_data_size(packet, offset, &error); + if (error || data_size > NETAGENT_MAX_DATA_SIZE) { + NETAGENTLOG(LOG_ERR, "Register message size could not be read, error %d data_size %zu", error, data_size); response_error = NETAGENT_MESSAGE_ERROR_INVALID_DATA; goto fail; @@ -971,10 +964,10 @@ netagent_unregister(netagent_session_t _session) static errno_t netagent_handle_unregister_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length) + size_t payload_length) { #pragma unused(payload, payload_length) - u_int32_t response_error = 0; + errno_t response_error = 0; if (session == NULL) { NETAGENTLOG0(LOG_ERR, "Failed to find session"); @@ -990,7 +983,7 @@ done: static void netagent_handle_unregister_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset) + size_t payload_length, mbuf_t packet, size_t offset) { #pragma unused(payload_length, packet, offset) u_int32_t response_error = NETAGENT_MESSAGE_ERROR_INTERNAL; @@ -1021,7 +1014,7 @@ netagent_send_cellular_failed_event(struct netagent_wrapper *wrapper, bzero(&ev_ifdenied, sizeof(ev_ifdenied)); - ev_ifdenied.ev_data.epid = pid; + ev_ifdenied.ev_data.epid = (u_int64_t)pid; uuid_copy(ev_ifdenied.ev_data.euuid, proc_uuid); ev_ifdenied.ev_if_functional_type = IFRTYPE_FUNCTIONAL_CELLULAR; @@ -1029,9 +1022,9 @@ netagent_send_cellular_failed_event(struct netagent_wrapper *wrapper, } static errno_t -netagent_handle_update_inner(struct netagent_session *session, struct netagent_wrapper *new_wrapper, u_int32_t data_size, u_int8_t *agent_changed, netagent_error_domain_t error_domain) +netagent_handle_update_inner(struct netagent_session *session, struct netagent_wrapper *new_wrapper, size_t data_size, u_int8_t *agent_changed, netagent_error_domain_t error_domain) { - u_int32_t response_error = 0; + errno_t response_error = 0; if (agent_changed == NULL) { NETAGENTLOG0(LOG_ERR, "Invalid argument: agent_changed"); @@ -1040,6 +1033,12 @@ netagent_handle_update_inner(struct netagent_session *session, struct netagent_w lck_rw_lock_exclusive(&netagent_lock); + if (session->wrapper == NULL) { + lck_rw_done(&netagent_lock); + response_error = ENOENT; + return response_error; + } + if (uuid_compare(session->wrapper->netagent.netagent_uuid, new_wrapper->netagent.netagent_uuid) != 0 || memcmp(&session->wrapper->netagent.netagent_domain, &new_wrapper->netagent.netagent_domain, sizeof(new_wrapper->netagent.netagent_domain)) != 0 || @@ -1118,7 +1117,6 @@ errno_t netagent_update(netagent_session_t _session, struct netagent *agent) { u_int8_t agent_changed; - int data_size = 0; struct netagent_wrapper *new_wrapper = NULL; bool should_update_immediately; uuid_t updated_uuid; @@ -1139,9 +1137,9 @@ netagent_update(netagent_session_t _session, struct netagent *agent) return EINVAL; } - data_size = agent->netagent_data_size; + size_t data_size = agent->netagent_data_size; if (data_size > NETAGENT_MAX_DATA_SIZE) { - NETAGENTLOG(LOG_ERR, "Update message size (%u > %u) too large", data_size, NETAGENT_MAX_DATA_SIZE); + NETAGENTLOG(LOG_ERR, "Update message size (%zu > %u) too large", data_size, NETAGENT_MAX_DATA_SIZE); return EINVAL; } @@ -1157,7 +1155,7 @@ netagent_update(netagent_session_t _session, struct netagent *agent) uuid_copy(updated_uuid, new_wrapper->netagent.netagent_uuid); should_update_immediately = (NETAGENT_FLAG_UPDATE_IMMEDIATELY == (new_wrapper->netagent.netagent_flags & NETAGENT_FLAG_UPDATE_IMMEDIATELY)); - int error = netagent_handle_update_inner(session, new_wrapper, data_size, &agent_changed, kNetagentErrorDomainPOSIX); + errno_t error = netagent_handle_update_inner(session, new_wrapper, data_size, &agent_changed, kNetagentErrorDomainPOSIX); if (error == 0) { netagent_post_event(updated_uuid, KEV_NETAGENT_UPDATED, agent_changed, should_update_immediately); if (agent_changed == FALSE) { @@ -1173,9 +1171,8 @@ netagent_update(netagent_session_t _session, struct netagent *agent) } static errno_t -netagent_handle_update_setopt(struct netagent_session *session, u_int8_t *payload, u_int32_t payload_length) +netagent_handle_update_setopt(struct netagent_session *session, u_int8_t *payload, size_t payload_length) { - u_int32_t data_size = 0; struct netagent_wrapper *new_wrapper = NULL; errno_t response_error = 0; struct netagent *update_netagent = (struct netagent *)(void *)payload; @@ -1202,21 +1199,21 @@ netagent_handle_update_setopt(struct netagent_session *session, u_int8_t *payloa } if (payload_length < sizeof(struct netagent)) { - NETAGENTLOG(LOG_ERR, "Update message size too small for agent: (%u < %lu)", + NETAGENTLOG(LOG_ERR, "Update message size too small for agent: (%zu < %zu)", payload_length, sizeof(struct netagent)); response_error = EINVAL; goto done; } - data_size = update_netagent->netagent_data_size; + size_t data_size = update_netagent->netagent_data_size; if (data_size > NETAGENT_MAX_DATA_SIZE) { - NETAGENTLOG(LOG_ERR, "Update message size (%u > %u) too large", data_size, NETAGENT_MAX_DATA_SIZE); + NETAGENTLOG(LOG_ERR, "Update message size (%zu > %u) too large", data_size, NETAGENT_MAX_DATA_SIZE); response_error = EINVAL; goto done; } if (payload_length != (sizeof(struct netagent) + data_size)) { - NETAGENTLOG(LOG_ERR, "Mismatch between data size and payload length (%lu != %u)", (sizeof(struct netagent) + data_size), payload_length); + NETAGENTLOG(LOG_ERR, "Mismatch between data size and payload length (%lu != %zu)", (sizeof(struct netagent) + data_size), payload_length); response_error = EINVAL; goto done; } @@ -1251,10 +1248,9 @@ done: static void netagent_handle_update_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset) + size_t payload_length, mbuf_t packet, size_t offset) { int error; - int data_size = 0; struct netagent_wrapper *new_wrapper = NULL; u_int32_t response_error = NETAGENT_MESSAGE_ERROR_INTERNAL; u_int8_t agent_changed; @@ -1274,15 +1270,15 @@ netagent_handle_update_message(struct netagent_session *session, u_int32_t messa } if (payload_length < sizeof(struct netagent)) { - NETAGENTLOG(LOG_ERR, "Update message size too small for agent: (%u < %lu)", + NETAGENTLOG(LOG_ERR, "Update message size too small for agent: (%zu < %zu)", payload_length, sizeof(struct netagent)); response_error = NETAGENT_MESSAGE_ERROR_INVALID_DATA; goto fail; } - data_size = netagent_packet_get_netagent_data_size(packet, offset, &error); - if (error || data_size < 0 || data_size > NETAGENT_MAX_DATA_SIZE) { - NETAGENTLOG(LOG_ERR, "Update message size could not be read, error %d data_size %d", + size_t data_size = netagent_packet_get_netagent_data_size(packet, offset, &error); + if (error || data_size > NETAGENT_MAX_DATA_SIZE) { + NETAGENTLOG(LOG_ERR, "Update message size could not be read, error %d data_size %zu", error, data_size); response_error = NETAGENT_MESSAGE_ERROR_INVALID_DATA; goto fail; @@ -1308,7 +1304,7 @@ netagent_handle_update_message(struct netagent_session *session, u_int32_t messa uuid_copy(updated_uuid, new_wrapper->netagent.netagent_uuid); should_update_immediately = (NETAGENT_FLAG_UPDATE_IMMEDIATELY == (new_wrapper->netagent.netagent_flags & NETAGENT_FLAG_UPDATE_IMMEDIATELY)); - response_error = netagent_handle_update_inner(session, new_wrapper, data_size, &agent_changed, kNetagentErrorDomainUserDefined); + response_error = (u_int32_t)netagent_handle_update_inner(session, new_wrapper, data_size, &agent_changed, kNetagentErrorDomainUserDefined); if (response_error != 0) { FREE(new_wrapper, M_NETAGENT); goto fail; @@ -1329,7 +1325,7 @@ fail: static void netagent_handle_get(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset) + size_t payload_length, mbuf_t packet, size_t offset) { #pragma unused(payload_length, packet, offset) u_int8_t *response = NULL; @@ -1342,14 +1338,15 @@ netagent_handle_get(struct netagent_session *session, u_int32_t message_id, goto fail; } + lck_rw_lock_shared(&netagent_lock); + if (session->wrapper == NULL) { + lck_rw_done(&netagent_lock); NETAGENTLOG0(LOG_ERR, "Session has no agent to get"); response_error = NETAGENT_MESSAGE_ERROR_NOT_REGISTERED; goto fail; } - lck_rw_lock_shared(&netagent_lock); - size_t response_size = sizeof(struct netagent_message_header) + sizeof(session->wrapper->netagent) + session->wrapper->netagent.netagent_data_size; MALLOC(response, u_int8_t *, response_size, M_NETAGENT, M_WAITOK); @@ -1380,18 +1377,23 @@ netagent_assign_nexus(netagent_session_t _session, uuid_t necp_client_uuid, void *assign_message, size_t assigned_results_length) { struct netagent_session *session = (struct netagent_session *)_session; + uuid_t netagent_uuid; if (session == NULL) { NETAGENTLOG0(LOG_ERR, "Cannot assign nexus from NULL session"); return EINVAL; } + lck_rw_lock_shared(&netagent_lock); if (session->wrapper == NULL) { + lck_rw_done(&netagent_lock); NETAGENTLOG0(LOG_ERR, "Session has no agent"); return ENOENT; } + uuid_copy(netagent_uuid, session->wrapper->netagent.netagent_uuid); + lck_rw_done(&netagent_lock); // Note that if the error is 0, NECP has taken over our malloc'ed buffer - int error = necp_assign_client_result(session->wrapper->netagent.netagent_uuid, necp_client_uuid, assign_message, assigned_results_length); + int error = necp_assign_client_result(netagent_uuid, necp_client_uuid, assign_message, assigned_results_length); if (error) { // necp_assign_client_result returns POSIX errors; don't error for ENOENT NETAGENTLOG((error == ENOENT ? LOG_DEBUG : LOG_ERR), "Client assignment failed: %d", error); @@ -1408,6 +1410,7 @@ netagent_update_flow_protoctl_event(netagent_session_t _session, uint32_t protoctl_event_val, uint32_t protoctl_event_tcp_seq_number) { struct netagent_session *session = (struct netagent_session *)_session; + uuid_t netagent_uuid; int error = 0; if (session == NULL) { @@ -1415,12 +1418,16 @@ netagent_update_flow_protoctl_event(netagent_session_t _session, return EINVAL; } + lck_rw_lock_shared(&netagent_lock); if (session->wrapper == NULL) { + lck_rw_done(&netagent_lock); NETAGENTLOG0(LOG_ERR, "Session has no agent"); return ENOENT; } + uuid_copy(netagent_uuid, session->wrapper->netagent.netagent_uuid); + lck_rw_done(&netagent_lock); - error = necp_update_flow_protoctl_event(session->wrapper->netagent.netagent_uuid, + error = necp_update_flow_protoctl_event(netagent_uuid, client_id, protoctl_event_code, protoctl_event_val, protoctl_event_tcp_seq_number); return error; @@ -1428,11 +1435,12 @@ netagent_update_flow_protoctl_event(netagent_session_t _session, static errno_t netagent_handle_assign_nexus_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length) + size_t payload_length) { errno_t response_error = 0; struct netagent_assign_nexus_message *assign_nexus_netagent = (struct netagent_assign_nexus_message *)(void *)payload; uuid_t client_id; + uuid_t netagent_uuid; u_int8_t *assigned_results = NULL; if (session == NULL) { @@ -1447,12 +1455,17 @@ netagent_handle_assign_nexus_setopt(struct netagent_session *session, u_int8_t * goto done; } + lck_rw_lock_shared(&netagent_lock); if (session->wrapper == NULL) { + lck_rw_done(&netagent_lock); NETAGENTLOG0(LOG_ERR, "Session has no agent to get"); response_error = ENOENT; goto done; } + uuid_copy(netagent_uuid, session->wrapper->netagent.netagent_uuid); + lck_rw_done(&netagent_lock); + if (payload_length < sizeof(uuid_t)) { NETAGENTLOG0(LOG_ERR, "Assign message is too short"); response_error = EINVAL; @@ -1473,7 +1486,7 @@ netagent_handle_assign_nexus_setopt(struct netagent_session *session, u_int8_t * } // Note that if the error is 0, NECP has taken over our malloc'ed buffer - response_error = necp_assign_client_result(session->wrapper->netagent.netagent_uuid, client_id, assigned_results, assigned_results_length); + response_error = necp_assign_client_result(netagent_uuid, client_id, assigned_results, assigned_results_length); if (response_error) { // necp_assign_client_result returns POSIX errors if (assigned_results) { @@ -1491,11 +1504,12 @@ done: static void netagent_handle_assign_nexus_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset) + size_t payload_length, mbuf_t packet, size_t offset) { int error = 0; u_int32_t response_error = NETAGENT_MESSAGE_ERROR_INTERNAL; uuid_t client_id; + uuid_t netagent_uuid; u_int8_t *assigned_results = NULL; if (session == NULL) { @@ -1504,11 +1518,15 @@ netagent_handle_assign_nexus_message(struct netagent_session *session, u_int32_t goto fail; } + lck_rw_lock_shared(&netagent_lock); if (session->wrapper == NULL) { + lck_rw_done(&netagent_lock); NETAGENTLOG0(LOG_ERR, "Session has no agent to get"); response_error = NETAGENT_MESSAGE_ERROR_NOT_REGISTERED; goto fail; } + uuid_copy(netagent_uuid, session->wrapper->netagent.netagent_uuid); + lck_rw_done(&netagent_lock); if (payload_length < sizeof(uuid_t)) { NETAGENTLOG0(LOG_ERR, "Assign message is too short"); @@ -1542,7 +1560,7 @@ netagent_handle_assign_nexus_message(struct netagent_session *session, u_int32_t } // Note that if the error is 0, NECP has taken over our malloc'ed buffer - error = necp_assign_client_result(session->wrapper->netagent.netagent_uuid, client_id, assigned_results, assigned_results_length); + error = necp_assign_client_result(netagent_uuid, client_id, assigned_results, assigned_results_length); if (error) { if (assigned_results) { FREE(assigned_results, M_NETAGENT); @@ -2140,7 +2158,7 @@ int netagent_trigger(struct proc *p, struct netagent_trigger_args *uap, int32_t *retval) { #pragma unused(p, retval) - uuid_t agent_uuid; + uuid_t agent_uuid = {}; int error = 0; if (uap == NULL) { diff --git a/bsd/net/ntstat.c b/bsd/net/ntstat.c index cd6c3dacb..77dd06cc6 100644 --- a/bsd/net/ntstat.c +++ b/bsd/net/ntstat.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2019 Apple Inc. All rights reserved. + * Copyright (c) 2010-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -42,7 +42,6 @@ #include #include -#include #include #include @@ -84,11 +83,11 @@ SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_collect, 0, "Collect detailed statistics"); #endif /* (DEBUG || DEVELOPMENT) */ -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX static int nstat_privcheck = 1; -#else +#else /* XNU_TARGET_OS_OSX */ static int nstat_privcheck = 0; -#endif +#endif /* XNU_TARGET_OS_OSX */ SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_privcheck, 0, "Entitlement check"); @@ -145,11 +144,11 @@ enum{ NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3), }; -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX #define QUERY_CONTINUATION_SRC_COUNT 50 -#else +#else /* XNU_TARGET_OS_OSX */ #define QUERY_CONTINUATION_SRC_COUNT 100 -#endif +#endif /* XNU_TARGET_OS_OSX */ typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src; typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src; @@ -187,7 +186,7 @@ typedef struct nstat_provider { errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone); errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req); void (*nstat_watcher_remove)(nstat_control_state *state); - errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, u_int32_t len); + errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, size_t len); void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked); bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter); } nstat_provider; @@ -238,9 +237,10 @@ static void nstat_control_register(void); * nstat_mtx * state->ncs_mtx */ -static volatile OSMallocTag nstat_malloc_tag = NULL; +static KALLOC_HEAP_DEFINE(KHEAP_NET_STAT, NET_STAT_CONTROL_NAME, + KHEAP_ID_DEFAULT); static nstat_control_state *nstat_controls = NULL; -static uint64_t nstat_idle_time = 0; +static uint64_t nstat_idle_time = 0; static decl_lck_mtx_data(, nstat_mtx); /* some extern definitions */ @@ -407,22 +407,11 @@ static void nstat_init_ifnet_provider(void); __private_extern__ void nstat_init(void) { - if (nstat_malloc_tag != NULL) { - return; - } - - OSMallocTag tag = OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME, OSMT_DEFAULT); - if (!OSCompareAndSwapPtr(NULL, tag, &nstat_malloc_tag)) { - OSMalloc_Tagfree(tag); - tag = nstat_malloc_tag; - } else { - // we need to initialize other things, we do it here as this code path will only be hit once; - nstat_init_route_provider(); - nstat_init_tcp_provider(); - nstat_init_udp_provider(); - nstat_init_ifnet_provider(); - nstat_control_register(); - } + nstat_init_route_provider(); + nstat_init_tcp_provider(); + nstat_init_udp_provider(); + nstat_init_ifnet_provider(); + nstat_control_register(); } #pragma mark -- Aligned Buffer Allocation -- @@ -434,14 +423,18 @@ struct align_header { static void* nstat_malloc_aligned( - u_int32_t length, + size_t length, u_int8_t alignment, - OSMallocTag tag) + zalloc_flags_t flags) { struct align_header *hdr = NULL; - u_int32_t size = length + sizeof(*hdr) + alignment - 1; + size_t size = length + sizeof(*hdr) + alignment - 1; - u_int8_t *buffer = OSMalloc(size, tag); + // Arbitrary limit to prevent abuse + if (length > (64 * 1024)) { + return NULL; + } + u_int8_t *buffer = kheap_alloc(KHEAP_NET_STAT, size, flags); if (buffer == NULL) { return NULL; } @@ -458,11 +451,10 @@ nstat_malloc_aligned( static void nstat_free_aligned( - void *buffer, - OSMallocTag tag) + void *buffer) { struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr)); - OSFree(((char*)buffer) - hdr->offset, hdr->length, tag); + (kheap_free)(KHEAP_NET_STAT, (char *)buffer - hdr->offset, hdr->length); } #pragma mark -- Route Provider -- @@ -692,7 +684,7 @@ static errno_t nstat_route_copy_descriptor( nstat_provider_cookie_t cookie, void *data, - u_int32_t len) + size_t len) { nstat_route_descriptor *desc = (nstat_route_descriptor*)data; if (len < sizeof(*desc)) { @@ -780,19 +772,14 @@ nstat_route_attach( return result; } - if (nstat_malloc_tag == NULL) { - nstat_init(); - } - - result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t), nstat_malloc_tag); + result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t), + Z_WAITOK | Z_ZERO); if (!result) { return result; } - bzero(result, sizeof(*result)); - if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats)) { - nstat_free_aligned(result, nstat_malloc_tag); + nstat_free_aligned(result); result = rte->rt_stats; } @@ -804,7 +791,7 @@ nstat_route_detach( struct rtentry *rte) { if (rte->rt_stats) { - nstat_free_aligned(rte->rt_stats, nstat_malloc_tag); + nstat_free_aligned(rte->rt_stats); rte->rt_stats = NULL; } } @@ -1013,7 +1000,7 @@ nstat_tucookie_alloc_internal( { struct nstat_tucookie *cookie; - cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag); + cookie = kheap_alloc(KHEAP_NET_STAT, sizeof(*cookie), Z_WAITOK); if (cookie == NULL) { return NULL; } @@ -1021,7 +1008,7 @@ nstat_tucookie_alloc_internal( LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED); } if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) { - OSFree(cookie, sizeof(*cookie), nstat_malloc_tag); + kheap_free(KHEAP_NET_STAT, cookie, sizeof(*cookie)); return NULL; } bzero(cookie, sizeof(*cookie)); @@ -1069,7 +1056,7 @@ nstat_tucookie_release_internal( OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt); } in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock); - OSFree(cookie, sizeof(*cookie), nstat_malloc_tag); + kheap_free(KHEAP_NET_STAT, cookie, sizeof(*cookie)); } static void @@ -1125,7 +1112,6 @@ nstat_tcpudp_lookup( } break; -#if INET6 case AF_INET6: { union{ @@ -1146,7 +1132,6 @@ nstat_tcpudp_lookup( local.in6, param->local.v6.sin6_port, 1, NULL); } break; -#endif default: return EINVAL; @@ -1507,7 +1492,7 @@ static errno_t nstat_tcp_copy_descriptor( nstat_provider_cookie_t cookie, void *data, - u_int32_t len) + size_t len) { if (len < sizeof(nstat_tcp_descriptor)) { return EINVAL; @@ -1583,6 +1568,7 @@ nstat_tcp_copy_descriptor( desc->epid = desc->pid; memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid)); } + uuid_copy(desc->fuuid, inp->necp_client_uuid); desc->sndbufsize = so->so_snd.sb_hiwat; desc->sndbufused = so->so_snd.sb_cc; desc->rcvbufsize = so->so_rcv.sb_hiwat; @@ -1852,7 +1838,7 @@ static errno_t nstat_udp_copy_descriptor( nstat_provider_cookie_t cookie, void *data, - u_int32_t len) + size_t len) { if (len < sizeof(nstat_udp_descriptor)) { return EINVAL; @@ -1929,6 +1915,7 @@ nstat_udp_copy_descriptor( desc->epid = desc->pid; memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid)); } + uuid_copy(desc->fuuid, inp->necp_client_uuid); desc->rcvbufsize = so->so_rcv.sb_hiwat; desc->rcvbufused = so->so_rcv.sb_cc; desc->traffic_class = so->so_traffic_class; @@ -2003,11 +1990,10 @@ nstat_ifnet_lookup( return result; } } - cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag); + cookie = kheap_alloc(KHEAP_NET_STAT, sizeof(*cookie), Z_WAITOK | Z_ZERO); if (cookie == NULL) { return ENOMEM; } - bzero(cookie, sizeof(*cookie)); ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) @@ -2052,7 +2038,7 @@ nstat_ifnet_lookup( lck_mtx_unlock(&nstat_mtx); } if (cookie->ifp == NULL) { - OSFree(cookie, sizeof(*cookie), nstat_malloc_tag); + kheap_free(KHEAP_NET_STAT, cookie, sizeof(*cookie)); } return ifp ? 0 : EINVAL; @@ -2158,7 +2144,7 @@ nstat_ifnet_release( ifnet_decr_iorefcnt(ifp); } ifnet_release(ifp); - OSFree(ifcookie, sizeof(*ifcookie), nstat_malloc_tag); + kheap_free(KHEAP_NET_STAT, ifcookie, sizeof(*ifcookie)); } static void @@ -2675,7 +2661,7 @@ static errno_t nstat_ifnet_copy_descriptor( nstat_provider_cookie_t cookie, void *data, - u_int32_t len) + size_t len) { nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data; struct nstat_ifnet_cookie *ifcookie = @@ -2760,6 +2746,15 @@ nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val) kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar); } +static void +nstat_set_keyval_u64_scalar(nstat_sysinfo_keyval *kv, int key, u_int64_t val) +{ + kv->nstat_sysinfo_key = key; + kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR; + kv->u.nstat_sysinfo_scalar = val; + kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar); +} + static void nstat_set_keyval_string(nstat_sysinfo_keyval *kv, int key, u_int8_t *buf, u_int32_t len) @@ -2817,11 +2812,10 @@ nstat_sysinfo_send_data_internal( countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals; allocsize += countsize; - syscnt = OSMalloc(allocsize, nstat_malloc_tag); + syscnt = kheap_alloc(KHEAP_TEMP, allocsize, Z_WAITOK | Z_ZERO); if (syscnt == NULL) { return; } - bzero(syscnt, allocsize); kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals; switch (data->flags) { @@ -3042,22 +3036,22 @@ nstat_sysinfo_send_data_internal( nstat_set_keyval_scalar(&kv[i++], NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI, data->u.tcp_stats.mptcp_interactive_cell_from_wifi); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES, data->u.tcp_stats.mptcp_handover_cell_bytes); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES, data->u.tcp_stats.mptcp_interactive_cell_bytes); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES, data->u.tcp_stats.mptcp_aggregate_cell_bytes); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES, data->u.tcp_stats.mptcp_handover_all_bytes); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES, data->u.tcp_stats.mptcp_interactive_all_bytes); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES, data->u.tcp_stats.mptcp_aggregate_all_bytes); nstat_set_keyval_scalar(&kv[i++], @@ -3083,142 +3077,142 @@ nstat_sysinfo_send_data_internal( nstat_set_keyval_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_PROTO, data->u.ifnet_ecn_stats.ifnet_proto); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP, data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP, data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS, data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS, data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT, data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_SYN_LOST, data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST, data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_RECV_CE, data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_RECV_ECE, data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE, data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE, data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE, data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE, data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE, data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS, data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER, data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE, data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST, data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST, data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN, data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn); nstat_set_keyval_scalar(&kv[i++], NSTAT_SYSINFO_IFNET_UNSENT_DATA, data->unsent_data_cnt); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST, data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT, data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST, data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst); break; @@ -3229,28 +3223,28 @@ nstat_sysinfo_send_data_internal( NSTAT_SYSINFO_LIM_IFNET_SIGNATURE, data->u.lim_stats.ifnet_signature, data->u.lim_stats.ifnet_siglen); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH, data->u.lim_stats.lim_stat.lim_dl_max_bandwidth); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH, data->u.lim_stats.lim_stat.lim_ul_max_bandwidth); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT, data->u.lim_stats.lim_stat.lim_packet_loss_percent); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT, data->u.lim_stats.lim_stat.lim_packet_ooo_percent); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE, data->u.lim_stats.lim_stat.lim_rtt_variance); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_LIM_IFNET_RTT_MIN, data->u.lim_stats.lim_stat.lim_rtt_min); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_LIM_IFNET_RTT_AVG, data->u.lim_stats.lim_stat.lim_rtt_average); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT, data->u.lim_stats.lim_stat.lim_conn_timeout_percent); nstat_set_keyval_scalar(&kv[i++], @@ -3266,135 +3260,135 @@ nstat_sysinfo_send_data_internal( } case NSTAT_SYSINFO_NET_API_STATS: { - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_IF_FLTR_ATTACH, data->u.net_api_stats.net_api_stats.nas_iflt_attach_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS, data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_IP_FLTR_ADD, data->u.net_api_stats.net_api_stats.nas_ipf_add_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_IP_FLTR_ADD_OS, data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH, data->u.net_api_stats.net_api_stats.nas_sfltr_register_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS, data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL, data->u.net_api_stats.net_api_stats.nas_socket_alloc_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL, data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS, data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID, data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL, data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE, data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_DOMAIN_INET, data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6, data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM, data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH, data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY, data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV, data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER, data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET_STREAM, data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET_DGRAM, data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED, data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS, data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA, data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET6_STREAM, data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET6_DGRAM, data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED, data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS, data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA, data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN, data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS, data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM, data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM, data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM, data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM, data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_IFNET_ALLOC, data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_IFNET_ALLOC_OS, data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_PF_ADDRULE, data->u.net_api_stats.net_api_stats.nas_pf_addrule_total); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_PF_ADDRULE_OS, data->u.net_api_stats.net_api_stats.nas_pf_addrule_os); - nstat_set_keyval_scalar(&kv[i++], + nstat_set_keyval_u64_scalar(&kv[i++], NSTAT_SYSINFO_API_VMNET_START, data->u.net_api_stats.net_api_stats.nas_vmnet_total); @@ -3413,15 +3407,16 @@ nstat_sysinfo_send_data_internal( sizeof(nstat_sysinfo_keyval) * i; finalsize += countsize; syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS; - syscnt->hdr.length = finalsize; - syscnt->counts.nstat_sysinfo_len = countsize; + assert(finalsize <= MAX_NSTAT_MSG_HDR_LENGTH); + syscnt->hdr.length = (u_int16_t)finalsize; + syscnt->counts.nstat_sysinfo_len = (u_int32_t)countsize; result = ctl_enqueuedata(control->ncs_kctl, control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR); if (result != 0) { nstat_stats.nstat_sysinfofailures += 1; } - OSFree(syscnt, allocsize, nstat_malloc_tag); + kheap_free(KHEAP_TEMP, syscnt, allocsize); } return; } @@ -3472,7 +3467,7 @@ nstat_net_api_report_stats(void) return; } - st->report_interval = uptime - net_api_stats_last_report_time; + st->report_interval = (u_int32_t)(uptime - net_api_stats_last_report_time); net_api_stats_last_report_time = uptime; data.flags = NSTAT_SYSINFO_NET_API_STATS; @@ -3709,8 +3704,10 @@ static errno_t nstat_accumulate_msg( nstat_control_state *state, nstat_msg_hdr *hdr, - size_t length) + size_t length) { + assert(length <= MAX_NSTAT_MSG_HDR_LENGTH); + if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length) { // Will send the current mbuf nstat_flush_accumulated_msgs(state); @@ -3731,7 +3728,7 @@ nstat_accumulate_msg( } if (result == 0) { - hdr->length = length; + hdr->length = (u_int16_t)length; result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated), length, hdr, MBUF_DONTWAIT); } @@ -3852,7 +3849,7 @@ nstat_control_cleanup_source( } // Cleanup the source if we found it. src->provider->nstat_release(src->cookie, locked); - OSFree(src, sizeof(*src), nstat_malloc_tag); + kheap_free(KHEAP_NET_STAT, src, sizeof(*src)); } @@ -3876,12 +3873,12 @@ nstat_control_connect( struct sockaddr_ctl *sac, void **uinfo) { - nstat_control_state *state = OSMalloc(sizeof(*state), nstat_malloc_tag); + nstat_control_state *state = kheap_alloc(KHEAP_NET_STAT, + sizeof(*state), Z_WAITOK | Z_ZERO); if (state == NULL) { return ENOMEM; } - bzero(state, sizeof(*state)); lck_mtx_init(&state->ncs_mtx, nstat_lck_grp, NULL); state->ncs_kctl = kctl; state->ncs_unit = sac->sc_unit; @@ -3956,7 +3953,7 @@ nstat_control_disconnect( } lck_mtx_destroy(&state->ncs_mtx, nstat_lck_grp); - OSFree(state, sizeof(*state), nstat_malloc_tag); + kheap_free(KHEAP_NET_STAT, state, sizeof(*state)); return 0; } @@ -4057,7 +4054,9 @@ nstat_control_send_description( // Allocate storage for the descriptor message mbuf_t msg; unsigned int one = 1; - u_int32_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length; + size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length; + assert(size <= MAX_NSTAT_MSG_HDR_LENGTH); + if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) { return ENOMEM; } @@ -4077,7 +4076,7 @@ nstat_control_send_description( desc->hdr.context = context; desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC; - desc->hdr.length = size; + desc->hdr.length = (u_int16_t)size; desc->hdr.flags = hdr_flags; desc->srcref = src->srcref; desc->event_flags = 0; @@ -4109,7 +4108,7 @@ nstat_control_append_description( nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer; desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC; - desc->hdr.length = size; + desc->hdr.length = (u_int16_t)size; desc->srcref = src->srcref; desc->event_flags = 0; desc->provider = src->provider->nstat_provider_id; @@ -4145,8 +4144,10 @@ nstat_control_send_update( // Allocate storage for the descriptor message mbuf_t msg; unsigned int one = 1; - u_int32_t size = offsetof(nstat_msg_src_update, data) + + size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length; + assert(size <= MAX_NSTAT_MSG_HDR_LENGTH); + if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) { return ENOMEM; } @@ -4155,7 +4156,7 @@ nstat_control_send_update( bzero(desc, size); desc->hdr.context = context; desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE; - desc->hdr.length = size; + desc->hdr.length = (u_int16_t)size; desc->hdr.flags = hdr_flags; desc->srcref = src->srcref; desc->event_flags = event; @@ -4214,7 +4215,7 @@ nstat_control_append_update( nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer; desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE; - desc->hdr.length = size; + desc->hdr.length = (u_int16_t)size; desc->srcref = src->srcref; desc->event_flags = 0; desc->provider = src->provider->nstat_provider_id; @@ -4288,7 +4289,7 @@ nstat_control_handle_add_request( } // Calculate the length of the parameter field - int32_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param); + ssize_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param); if (paramlength < 0 || paramlength > 2 * 1024) { return EINVAL; } @@ -4298,7 +4299,7 @@ nstat_control_handle_add_request( nstat_msg_add_src_req *req = mbuf_data(m); if (mbuf_pkthdr_len(m) > mbuf_len(m)) { // parameter is too large, we need to make a contiguous copy - void *data = OSMalloc(paramlength, nstat_malloc_tag); + void *data = kheap_alloc(KHEAP_TEMP, paramlength, Z_WAITOK); if (!data) { return ENOMEM; @@ -4307,7 +4308,7 @@ nstat_control_handle_add_request( if (result == 0) { result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie); } - OSFree(data, paramlength, nstat_malloc_tag); + kheap_free(KHEAP_TEMP, data, paramlength); } else { result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie); } @@ -4436,14 +4437,15 @@ nstat_control_source_add( nstat_msg_src_added *add = mbuf_data(msg); bzero(add, sizeof(*add)); add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED; - add->hdr.length = mbuf_len(msg); + assert(mbuf_len(msg) <= MAX_NSTAT_MSG_HDR_LENGTH); + add->hdr.length = (u_int16_t)mbuf_len(msg); add->hdr.context = context; add->provider = provider->nstat_provider_id; srcrefp = &add->srcref; } // Allocate storage for the source - nstat_src *src = OSMalloc(sizeof(*src), nstat_malloc_tag); + nstat_src *src = kheap_alloc(KHEAP_NET_STAT, sizeof(*src), Z_WAITOK); if (src == NULL) { if (msg) { mbuf_freem(msg); @@ -4461,7 +4463,7 @@ nstat_control_source_add( if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID) { lck_mtx_unlock(&state->ncs_mtx); - OSFree(src, sizeof(*src), nstat_malloc_tag); + kheap_free(KHEAP_NET_STAT, src, sizeof(*src)); if (msg) { mbuf_freem(msg); } @@ -4479,7 +4481,7 @@ nstat_control_source_add( if (result != 0) { nstat_stats.nstat_srcaddedfailures += 1; lck_mtx_unlock(&state->ncs_mtx); - OSFree(src, sizeof(*src), nstat_malloc_tag); + kheap_free(KHEAP_NET_STAT, src, sizeof(*src)); mbuf_freem(msg); return result; } @@ -4997,7 +4999,8 @@ nstat_control_send( // Fix everything up so old clients continue to work if (hdr->length != mbuf_pkthdr_len(m)) { hdr->flags = 0; - hdr->length = mbuf_pkthdr_len(m); + assert(mbuf_pkthdr_len(m) <= MAX_NSTAT_MSG_HDR_LENGTH); + hdr->length = (u_int16_t)mbuf_pkthdr_len(m); if (hdr == &storage) { mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT); } @@ -5046,7 +5049,7 @@ nstat_control_send( bzero(&err, sizeof(err)); err.hdr.type = NSTAT_MSG_TYPE_ERROR; - err.hdr.length = sizeof(err) + mbuf_pkthdr_len(m); + err.hdr.length = (u_int16_t)(sizeof(err) + mbuf_pkthdr_len(m)); err.hdr.context = hdr->context; err.error = result; @@ -5158,7 +5161,7 @@ ntstat_tcp_progress_indicators(struct sysctl_req *req) if (error != 0) { return error; } - error = tcp_progress_indicators_for_interface(requested.ifindex, requested.recentflow_maxduration, (uint16_t)requested.filter_flags, &indicators); + error = tcp_progress_indicators_for_interface((unsigned int)requested.ifindex, requested.recentflow_maxduration, (uint16_t)requested.filter_flags, &indicators); if (error != 0) { return error; } diff --git a/bsd/net/ntstat.h b/bsd/net/ntstat.h index a5b976061..0891fe2d0 100644 --- a/bsd/net/ntstat.h +++ b/bsd/net/ntstat.h @@ -394,6 +394,9 @@ typedef struct nstat_tcp_descriptor { u_int64_t start_timestamp __attribute__((aligned(sizeof(u_int64_t)))); u_int64_t timestamp __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t rx_transfer_size __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t tx_transfer_size __attribute__((aligned(sizeof(u_int64_t)))); + activity_bitmap_t activity_bitmap; u_int32_t ifindex; @@ -428,6 +431,7 @@ typedef struct nstat_tcp_descriptor { uuid_t uuid; uuid_t euuid; uuid_t vuuid; + uuid_t fuuid; union { struct tcp_conn_status connstatus; // On armv7k, tcp_conn_status is 1 byte instead of 4 @@ -471,6 +475,7 @@ typedef struct nstat_udp_descriptor { uuid_t uuid; uuid_t euuid; uuid_t vuuid; + uuid_t fuuid; uint16_t ifnet_properties; u_int8_t reserved[6]; @@ -697,7 +702,7 @@ enum{ , NSTAT_MSG_TYPE_REM_SRC = 1003 , NSTAT_MSG_TYPE_QUERY_SRC = 1004 , NSTAT_MSG_TYPE_GET_SRC_DESC = 1005 - , NSTAT_MSG_TYPE_SET_FILTER = 1006 + , NSTAT_MSG_TYPE_SET_FILTER = 1006 // Obsolete , NSTAT_MSG_TYPE_GET_UPDATE = 1007 , NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO = 1008 @@ -734,14 +739,16 @@ enum{ , NSTAT_FILTER_ACCEPT_IS_CONSTRAINED = 0x00000400 , NSTAT_FILTER_ACCEPT_IS_LOCAL = 0x00000800 , NSTAT_FILTER_ACCEPT_IS_NON_LOCAL = 0x00001000 - , NSTAT_FILTER_IFNET_FLAGS = 0x00001FFF - - , NSTAT_FILTER_TCP_INTERFACE_ATTACH = 0x00004000 - , NSTAT_FILTER_TCP_NO_EARLY_CLOSE = 0x00008000 - , NSTAT_FILTER_TCP_FLAGS = 0x0000C000 + , NSTAT_FILTER_ACCEPT_ROUTE_VAL_ERR = 0x00002000 + , NSTAT_FILTER_ACCEPT_FLOWSWITCH_ERR = 0x00004000 + , NSTAT_FILTER_IFNET_FLAGS = 0x0000FFFF , NSTAT_FILTER_UDP_INTERFACE_ATTACH = 0x00010000 - , NSTAT_FILTER_UDP_FLAGS = 0x000F0000 + , NSTAT_FILTER_UDP_FLAGS = 0x00010000 + + , NSTAT_FILTER_TCP_INTERFACE_ATTACH = 0x00040000 + , NSTAT_FILTER_TCP_NO_EARLY_CLOSE = 0x00080000 + , NSTAT_FILTER_TCP_FLAGS = 0x000C0000 , NSTAT_FILTER_SUPPRESS_SRC_ADDED = 0x00100000 , NSTAT_FILTER_REQUIRE_SRC_ADDED = 0x00200000 @@ -767,6 +774,8 @@ typedef struct nstat_msg_hdr { u_int16_t flags; } nstat_msg_hdr; +#define MAX_NSTAT_MSG_HDR_LENGTH 65532 + typedef struct nstat_msg_error { nstat_msg_hdr hdr; u_int32_t error; // errno error diff --git a/bsd/net/packet_mangler.c b/bsd/net/packet_mangler.c index 8e7f41be0..d55219469 100644 --- a/bsd/net/packet_mangler.c +++ b/bsd/net/packet_mangler.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -112,10 +112,8 @@ void* pkt_mnglr_rw_lock_history[PKT_MNGLR_RW_LCK_MAX]; int pkt_mnglr_rw_nxt_unlck = 0; void* pkt_mnglr_rw_unlock_history[PKT_MNGLR_RW_LCK_MAX]; - -#define PACKET_MANGLER_ZONE_NAME "packet_mangler" -#define PACKET_MANGLER_ZONE_MAX 10 -static struct zone *packet_mangler_zone = NULL; /* zone for packet_mangler */ +static ZONE_DECLARE(packet_mangler_zone, "packet_mangler", + sizeof(struct packet_mangler), ZC_NONE); /* * For troubleshooting @@ -701,7 +699,6 @@ pkt_mnglr_init(void) { struct kern_ctl_reg kern_ctl; errno_t error = 0; - vm_size_t pkt_mnglr_size = 0; PKT_MNGLR_LOG(LOG_NOTICE, ""); @@ -710,23 +707,6 @@ pkt_mnglr_init(void) */ _CASSERT(PKT_MNGLR_MAX_FILTER_COUNT == MAX_PACKET_MANGLER); - /* - * Zone for packet mangler kernel control sockets - */ - pkt_mnglr_size = sizeof(struct packet_mangler); - packet_mangler_zone = zinit(pkt_mnglr_size, - PACKET_MANGLER_ZONE_MAX * pkt_mnglr_size, - 0, - PACKET_MANGLER_ZONE_NAME); - - if (packet_mangler_zone == NULL) { - panic("%s: zinit(%s) failed", __func__, - PACKET_MANGLER_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(packet_mangler_zone, Z_CALLERACCT, FALSE); - zone_change(packet_mangler_zone, Z_EXPAND, TRUE); - /* * Allocate locks */ diff --git a/bsd/net/pf.c b/bsd/net/pf.c index 87a392533..2a6309271 100644 --- a/bsd/net/pf.c +++ b/bsd/net/pf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2019 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -115,13 +115,11 @@ #include #endif /* NPFSYNC */ -#if INET6 #include #include #include #include #include -#endif /* INET6 */ #if DUMMYNET #include @@ -213,13 +211,11 @@ static void pf_change_ap(int, pbuf_t *, struct pf_addr *, sa_family_t, int); static int pf_modulate_sack(pbuf_t *, int, struct pf_pdesc *, struct tcphdr *, struct pf_state_peer *); -#if INET6 static void pf_change_a6(struct pf_addr *, u_int16_t *, struct pf_addr *, u_int8_t); void pf_change_addr(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u, sa_family_t af, sa_family_t afn); -#endif /* INET6 */ static void pf_change_icmp(struct pf_addr *, u_int16_t *, struct pf_addr *, struct pf_addr *, u_int16_t, u_int16_t *, u_int16_t *, u_int16_t *, @@ -285,11 +281,9 @@ static int pf_get_sport(struct pf_pdesc *, struct pfi_kif *, static void pf_route(pbuf_t **, struct pf_rule *, int, struct ifnet *, struct pf_state *, struct pf_pdesc *); -#if INET6 static void pf_route6(pbuf_t **, struct pf_rule *, int, struct ifnet *, struct pf_state *, struct pf_pdesc *); -#endif /* INET6 */ static u_int8_t pf_get_wscale(pbuf_t *, int, u_int16_t, sa_family_t); static u_int16_t pf_get_mss(pbuf_t *, int, u_int16_t, @@ -364,14 +358,12 @@ pf_lazy_makewritable(struct pf_pdesc *pd, pbuf_t *pbuf, int len) pd->ip_sum = &h->ip_sum; break; } -#if INET6 case AF_INET6: { struct ip6_hdr *h = p; pd->src = (struct pf_addr *)(uintptr_t)&h->ip6_src; pd->dst = (struct pf_addr *)(uintptr_t)&h->ip6_dst; break; } -#endif /* INET6 */ } } } @@ -655,31 +647,6 @@ struct pf_pptp_ctrl_set_linkinfo { u_int32_t rx_accm; }; -#if 0 -static const char * -pf_pptp_ctrl_type_name(u_int16_t code) -{ - code = ntohs(code); - - if (code < PF_PPTP_CTRL_TYPE_START_REQ || - code > PF_PPTP_CTRL_TYPE_SET_LINKINFO) { - static char reserved[] = "reserved-00"; - - sprintf(&reserved[9], "%02x", code); - return reserved; - } else { - static const char *name[] = { - "start_req", "start_rpy", "stop_req", "stop_rpy", - "echo_req", "echo_rpy", "call_out_req", "call_out_rpy", - "call_in_1st", "call_in_2nd", "call_in_3rd", - "call_clr", "call_disc", "error", "set_linkinfo" - }; - - return name[code - 1]; - } -}; -#endif - static const size_t PF_PPTP_CTRL_MSG_MINSIZE = sizeof(struct pf_pptp_hdr) + sizeof(struct pf_pptp_ctrl_hdr); @@ -770,7 +737,6 @@ pf_addr_compare(struct pf_addr *a, struct pf_addr *b, sa_family_t af) } break; #endif /* INET */ -#ifdef INET6 case AF_INET6: if (a->addr32[3] > b->addr32[3]) { return 1; @@ -797,7 +763,6 @@ pf_addr_compare(struct pf_addr *a, struct pf_addr *b, sa_family_t af) return -1; } break; -#endif /* INET6 */ } return 0; } @@ -905,7 +870,6 @@ pf_state_compare_lan_ext(struct pf_state_key *a, struct pf_state_key *b) } break; #endif /* INET */ -#if INET6 case AF_INET6: if ((diff = pf_addr_compare(&a->lan.addr, &b->lan.addr, a->af_lan)) != 0) { @@ -921,7 +885,6 @@ pf_state_compare_lan_ext(struct pf_state_key *a, struct pf_state_key *b) } } break; -#endif /* INET6 */ } if (a->app_state && b->app_state) { @@ -1026,7 +989,6 @@ pf_state_compare_ext_gwy(struct pf_state_key *a, struct pf_state_key *b) } break; #endif /* INET */ -#if INET6 case AF_INET6: if ((diff = pf_addr_compare(&a->gwy.addr, &b->gwy.addr, a->af_gwy)) != 0) { @@ -1041,7 +1003,6 @@ pf_state_compare_ext_gwy(struct pf_state_key *a, struct pf_state_key *b) } } break; -#endif /* INET6 */ } if (a->app_state && b->app_state) { @@ -1082,7 +1043,6 @@ pf_state_compare_id(struct pf_state *a, struct pf_state *b) return 0; } -#if INET6 void pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) { @@ -1100,7 +1060,6 @@ pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) break; } } -#endif /* INET6 */ struct pf_state * pf_find_state_byid(struct pf_state_cmp *key) @@ -1278,12 +1237,10 @@ pf_src_connlimit(struct pf_state **state) p.pfra_ip4addr = (*state)->src_node->addr.v4addr; break; #endif /* INET */ -#if INET6 case AF_INET6: p.pfra_net = 128; p.pfra_ip6addr = (*state)->src_node->addr.v6addr; break; -#endif /* INET6 */ } pfr_insert_kentry((*state)->rule.ptr->overload_tbl, @@ -1844,7 +1801,6 @@ pf_print_addr(struct pf_addr *addr, sa_family_t af) break; } #endif /* INET */ -#if INET6 case AF_INET6: { u_int16_t b; u_int8_t i, curstart = 255, curend = 0, @@ -1888,7 +1844,6 @@ pf_print_addr(struct pf_addr *addr, sa_family_t af) } break; } -#endif /* INET6 */ } } @@ -2250,7 +2205,6 @@ pf_change_ap(int dir, pbuf_t *pbuf, struct pf_addr *a, u_int16_t *p, po, pn, u); } break; -#ifdef INET6 case AF_INET6: *p = pn; *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( @@ -2267,11 +2221,9 @@ pf_change_ap(int dir, pbuf_t *pbuf, struct pf_addr *a, u_int16_t *p, 0, an->addr16[7], u), po, pn, u); break; -#endif /* INET6 */ } break; #endif /* INET */ -#if INET6 case AF_INET6: switch (afn) { case AF_INET6: @@ -2336,7 +2288,6 @@ pf_change_ap(int dir, pbuf_t *pbuf, struct pf_addr *a, u_int16_t *p, #endif /* INET */ } break; -#endif /* INET6 */ } } @@ -2353,7 +2304,6 @@ pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) ao % 65536, an % 65536, u); } -#if INET6 static void pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) { @@ -2430,8 +2380,6 @@ pf_change_addr(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u, } } -#endif /* INET6 */ - static void pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, @@ -2476,7 +2424,6 @@ pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, break; } #endif /* INET */ -#if INET6 case AF_INET6: *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( @@ -2490,7 +2437,6 @@ pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, oia.addr16[6], ia->addr16[6], u), oia.addr16[7], ia->addr16[7], u); break; -#endif /* INET6 */ } /* Change outer ip address, fix outer ip or icmpv6 checksum. */ PF_ACPY(oa, na, af); @@ -2502,7 +2448,6 @@ pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, ooa.addr16[1], oa->addr16[1], 0); break; #endif /* INET */ -#if INET6 case AF_INET6: *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( @@ -2516,7 +2461,6 @@ pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, ooa.addr16[6], oa->addr16[6], u), ooa.addr16[7], oa->addr16[7], u); break; -#endif /* INET6 */ } } @@ -2566,7 +2510,7 @@ pf_modulate_sack(pbuf_t *pbuf, int off, struct pf_pdesc *pd, } copyback = off + sizeof(*th) + thoptlen; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; default: if (olen < 2) { olen = 2; @@ -2610,9 +2554,7 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, #if INET struct ip *h = NULL; #endif /* INET */ -#if INET6 struct ip6_hdr *h6 = NULL; -#endif /* INET6 */ struct tcphdr *th = NULL; char *opt; struct pf_mtag *pf_mtag; @@ -2629,11 +2571,9 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, len = sizeof(struct ip) + tlen; break; #endif /* INET */ -#if INET6 case AF_INET6: len = sizeof(struct ip6_hdr) + tlen; break; -#endif /* INET6 */ default: panic("pf_send_tcp: not AF_INET or AF_INET6!"); return; @@ -2669,11 +2609,9 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, pf_mtag->pftag_flags |= PF_TAG_HDR_INET; break; #endif /* INET */ -#if INET6 case AF_INET6: pf_mtag->pftag_flags |= PF_TAG_HDR_INET6; break; -#endif /* INET6 */ } #endif /* PF_ECN */ @@ -2699,7 +2637,6 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, th = (struct tcphdr *)(void *)((caddr_t)h + sizeof(struct ip)); break; #endif /* INET */ -#if INET6 case AF_INET6: h6 = mtod(m, struct ip6_hdr *); @@ -2712,7 +2649,6 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, th = (struct tcphdr *)(void *) ((caddr_t)h6 + sizeof(struct ip6_hdr)); break; -#endif /* INET6 */ } /* TCP header */ @@ -2760,7 +2696,6 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, break; } #endif /* INET */ -#if INET6 case AF_INET6: { struct route_in6 ro6; @@ -2776,7 +2711,6 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, ROUTE_RELEASE(&ro6); break; } -#endif /* INET6 */ } } @@ -2814,12 +2748,10 @@ pf_send_icmp(pbuf_t *pbuf, u_int8_t type, u_int8_t code, sa_family_t af, m0->m_pkthdr.pkt_proto = IPPROTO_ICMP; break; #endif /* INET */ -#if INET6 case AF_INET6: pf_mtag->pftag_flags |= PF_TAG_HDR_INET6; m0->m_pkthdr.pkt_proto = IPPROTO_ICMPV6; break; -#endif /* INET6 */ } #endif /* PF_ECN */ @@ -2829,11 +2761,9 @@ pf_send_icmp(pbuf_t *pbuf, u_int8_t type, u_int8_t code, sa_family_t af, icmp_error(m0, type, code, 0, 0); break; #endif /* INET */ -#if INET6 case AF_INET6: icmp6_error(m0, type, code, 0); break; -#endif /* INET6 */ } } @@ -2857,7 +2787,6 @@ pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, } break; #endif /* INET */ -#if INET6 case AF_INET6: if (((a->addr32[0] & m->addr32[0]) == (b->addr32[0] & m->addr32[0])) && @@ -2870,7 +2799,6 @@ pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, match++; } break; -#endif /* INET6 */ } if (match) { if (n) { @@ -2903,7 +2831,6 @@ pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, } break; #endif /* INET */ -#if INET6 case AF_INET6: { int i; @@ -2925,7 +2852,6 @@ pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, } break; } -#endif /* INET6 */ } return 1; } @@ -3145,7 +3071,6 @@ pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n, return quick; } -#if INET6 void pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) @@ -3203,7 +3128,6 @@ pf_addr_inc(struct pf_addr *addr, sa_family_t af) break; } } -#endif /* INET6 */ #define mix(a, b, c) \ do { \ @@ -3236,7 +3160,6 @@ pf_hash(struct pf_addr *inaddr, struct pf_addr *hash, hash->addr32[0] = c + key->key32[2]; break; #endif /* INET */ -#if INET6 case AF_INET6: a += inaddr->addr32[0]; b += inaddr->addr32[2]; @@ -3258,7 +3181,6 @@ pf_hash(struct pf_addr *inaddr, struct pf_addr *hash, mix(a, b, c); hash->addr32[3] = c; break; -#endif /* INET6 */ } } @@ -3317,7 +3239,6 @@ pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, rmask = &rpool->cur->addr.p.dyn->pfid_mask4; break; #endif /* INET */ -#if INET6 case AF_INET6: if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 && (rpool->opts & PF_POOL_TYPEMASK) != @@ -3327,7 +3248,6 @@ pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, raddr = &rpool->cur->addr.p.dyn->pfid_addr6; rmask = &rpool->cur->addr.p.dyn->pfid_mask6; break; -#endif /* INET6 */ } } else if (rpool->cur->addr.type == PF_ADDR_TABLE) { if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) { @@ -3354,7 +3274,6 @@ pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, rpool->counter.addr32[0] = htonl(random()); break; #endif /* INET */ -#if INET6 case AF_INET6: if (rmask->addr32[3] != 0xffffffff) { rpool->counter.addr32[3] = @@ -3379,7 +3298,6 @@ pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, RandomULong(); } break; -#endif /* INET6 */ } PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, rpool->af); @@ -3885,7 +3803,6 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, saddr, AF_INET); break; #endif /* INET */ -#if INET6 case AF_INET6: if (r->rpool.cur->addr.p.dyn-> pfid_acnt6 < 1) { @@ -3898,7 +3815,6 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, pfid_mask6, saddr, AF_INET6); break; -#endif /* INET6 */ } } else { PF_POOLMASK(nsaddr, @@ -3927,7 +3843,6 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, daddr, AF_INET); break; #endif /* INET */ -#if INET6 case AF_INET6: if (r->src.addr.p.dyn-> pfid_acnt6 < 1) { @@ -3940,7 +3855,6 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, pfid_mask6, daddr, AF_INET6); break; -#endif /* INET6 */ } } else { PF_POOLMASK(ndaddr, @@ -3973,7 +3887,6 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, daddr, AF_INET); break; #endif /* INET */ -#if INET6 case AF_INET6: if (r->dst.addr.p.dyn-> pfid_acnt6 < 1) { @@ -3986,7 +3899,6 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, pfid_mask6, daddr, AF_INET6); break; -#endif /* INET6 */ } } else { PF_POOLMASK(nsaddr, @@ -4098,7 +4010,6 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) case AF_INET: inp = in_pcblookup_hash_exists(pi, saddr->v4addr, sport, daddr->v4addr, dport, 0, &pd->lookup.uid, &pd->lookup.gid, NULL); -#if INET6 if (inp == 0) { struct in6_addr s6, d6; @@ -4127,19 +4038,8 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) } } } -#else - if (inp == 0) { - inp = in_pcblookup_hash_exists(pi, saddr->v4addr, sport, - daddr->v4addr, dport, INPLOOKUP_WILDCARD, - &pd->lookup.uid, &pd->lookup.gid, NULL); - if (inp == 0) { - return -1; - } - } -#endif /* !INET6 */ break; #endif /* INET */ -#if INET6 case AF_INET6: inp = in6_pcblookup_hash_exists(pi, &saddr->v6addr, sport, &daddr->v6addr, dport, 0, &pd->lookup.uid, &pd->lookup.gid, NULL); @@ -4152,7 +4052,6 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) } } break; -#endif /* INET6 */ default: return -1; @@ -4191,7 +4090,7 @@ pf_get_wscale(pbuf_t *pbuf, int off, u_int16_t th_off, sa_family_t af) wscale = TCP_MAX_WINSHIFT; } wscale |= PF_WSCALE_FLAG; - /* FALLTHROUGH */ + OS_FALLTHROUGH; default: optlen = opt[1]; if (optlen < 2) { @@ -4234,7 +4133,7 @@ pf_get_mss(pbuf_t *pbuf, int off, u_int16_t th_off, sa_family_t af) #if BYTE_ORDER != BIG_ENDIAN NTOHS(mss); #endif - /* FALLTHROUGH */ + OS_FALLTHROUGH; default: optlen = opt[1]; if (optlen < 2) { @@ -4255,10 +4154,8 @@ pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) struct sockaddr_in *dst; struct route ro; #endif /* INET */ -#if INET6 struct sockaddr_in6 *dst6; struct route_in6 ro6; -#endif /* INET6 */ struct rtentry *rt = NULL; int hlen; u_int16_t mss = tcp_mssdflt; @@ -4276,7 +4173,6 @@ pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) rt = ro.ro_rt; break; #endif /* INET */ -#if INET6 case AF_INET6: hlen = sizeof(struct ip6_hdr); bzero(&ro6, sizeof(ro6)); @@ -4287,7 +4183,6 @@ pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) rtalloc((struct route *)&ro); rt = ro6.ro_rt; break; -#endif /* INET6 */ default: panic("pf_calc_mss: not AF_INET or AF_INET6!"); return 0; @@ -4899,7 +4794,6 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, } break; #endif /* INET */ -#if INET6 case IPPROTO_ICMPV6: if (pd->af != AF_INET6) { break; @@ -4913,7 +4807,6 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, state_icmp++; } break; -#endif /* INET6 */ case IPPROTO_GRE: if (pd->proto_variant == PF_GRE_PPTP_VARIANT) { sxport.call_id = dxport.call_id = @@ -5038,7 +4931,6 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, ++rewrite; break; #endif /* INET */ -#if INET6 case IPPROTO_ICMPV6: if (pd->af != AF_INET6) { break; @@ -5067,7 +4959,6 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, } rewrite++; break; -#endif /* INET */ case IPPROTO_GRE: if ((direction == PF_IN) && (pd->proto_variant == PF_GRE_PPTP_VARIANT)) { @@ -5089,7 +4980,6 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, } break; #endif /* INET */ -#if INET6 case AF_INET6: if (PF_ANEQ(saddr, &pd->naddr, pd->af)) { PF_ACPY(saddr, &pd->naddr, AF_INET6); @@ -5098,7 +4988,6 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, PF_ACPY(daddr, &pd->ndaddr, AF_INET6); } break; -#endif /* INET6 */ } ++rewrite; break; @@ -5121,7 +5010,6 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, } break; #endif /* INET */ -#if INET6 case AF_INET6: if (PF_ANEQ(saddr, &pd->naddr, pd->af)) { PF_ACPY(saddr, &pd->naddr, AF_INET6); @@ -5130,7 +5018,6 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, PF_ACPY(daddr, &pd->ndaddr, AF_INET6); } break; -#endif /* INET6 */ } break; default: @@ -5150,7 +5037,6 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, } break; #endif /* INET */ -#if INET6 case AF_INET6: if (PF_ANEQ(saddr, &pd->naddr, pd->af)) { PF_ACPY(saddr, &pd->naddr, af); @@ -5159,7 +5045,6 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, PF_ACPY(daddr, &pd->ndaddr, af); } break; -#endif /* INET */ } break; } @@ -5331,9 +5216,7 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, rewrite++; break; case IPPROTO_ICMP: -#if INET6 case IPPROTO_ICMPV6: -#endif /* nothing! */ break; case IPPROTO_GRE: @@ -5347,12 +5230,10 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, pd->baddr.v4addr.s_addr, 0); break; #endif /* INET */ -#if INET6 case AF_INET6: PF_ACPY(saddr, &pd->baddr, AF_INET6); break; -#endif /* INET6 */ } break; case IPPROTO_ESP: @@ -5365,12 +5246,10 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, pd->baddr.v4addr.s_addr, 0); break; #endif /* INET */ -#if INET6 case AF_INET6: PF_ACPY(saddr, &pd->baddr, AF_INET6); break; -#endif /* INET6 */ } break; default: @@ -5404,9 +5283,7 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, rewrite++; break; case IPPROTO_ICMP: -#if INET6 case IPPROTO_ICMPV6: -#endif /* nothing! */ break; case IPPROTO_GRE: @@ -5424,12 +5301,10 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, pd->bdaddr.v4addr.s_addr, 0); break; #endif /* INET */ -#if INET6 case AF_INET6: PF_ACPY(daddr, &pd->bdaddr, AF_INET6); break; -#endif /* INET6 */ } break; case IPPROTO_ESP: @@ -5441,12 +5316,10 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, pd->bdaddr.v4addr.s_addr, 0); break; #endif /* INET */ -#if INET6 case AF_INET6: PF_ACPY(daddr, &pd->bdaddr, AF_INET6); break; -#endif /* INET6 */ } break; default: @@ -5456,11 +5329,9 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, pd->ip_sum, pd->bdaddr.v4addr.s_addr, 0); break; -#if INET6 case AF_INET6: PF_ACPY(daddr, &pd->bdaddr, af); break; -#endif /* INET6 */ } } } @@ -5472,22 +5343,18 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, u_int32_t ack = ntohl(th->th_seq) + pd->p_len; int len = 0; struct ip *h4; -#if INET6 struct ip6_hdr *h6; -#endif /* INET6 */ switch (pd->af) { case AF_INET: h4 = pbuf->pb_data; len = ntohs(h4->ip_len) - off; break; -#if INET6 case AF_INET6: h6 = pbuf->pb_data; len = ntohs(h6->ip6_plen) - (off - sizeof(*h6)); break; -#endif /* INET6 */ } if (pf_check_proto_cksum(pbuf, off, len, IPPROTO_TCP, @@ -5549,9 +5416,7 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, psk.ext_gwy.xport.spi = pd->hdr.esp->spi; break; case IPPROTO_ICMP: -#if INET6 case IPPROTO_ICMPV6: -#endif /* * NAT64 requires protocol translation between ICMPv4 * and ICMPv6. TCP and UDP do not require protocol @@ -5591,9 +5456,7 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, } switch (pd->proto) { case IPPROTO_ICMP: -#if INET6 case IPPROTO_ICMPV6: -#endif /* * NAT64 requires protocol translation between ICMPv4 * and ICMPv6. TCP and UDP do not require protocol @@ -5827,9 +5690,7 @@ cleanup: s->timeout = PFTM_UDP_FIRST_PACKET; break; case IPPROTO_ICMP: -#if INET6 case IPPROTO_ICMPV6: -#endif s->timeout = PFTM_ICMP_FIRST_PACKET; break; case IPPROTO_GRE: @@ -6148,7 +6009,6 @@ pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, icmpcode = pd->hdr.icmp->icmp_code; break; #endif /* INET */ -#if INET6 case IPPROTO_ICMPV6: if (af != AF_INET6) { break; @@ -6157,7 +6017,6 @@ pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, icmptype = pd->hdr.icmp6->icmp6_type; icmpcode = pd->hdr.icmp6->icmp6_code; break; -#endif /* INET6 */ case IPPROTO_GRE: if (pd->proto_variant == PF_GRE_PPTP_VARIANT) { hdrlen = sizeof(*pd->hdr.grev1); @@ -6350,7 +6209,7 @@ pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, dnflow.fwa_cookie, (af == AF_INET) ? ((direction == PF_IN) ? DN_TO_IP_IN : DN_TO_IP_OUT) : ((direction == PF_IN) ? DN_TO_IP6_IN : DN_TO_IP6_OUT), - &dnflow, DN_CLIENT_PF); + &dnflow); } /* @@ -7735,7 +7594,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, } break; #endif /* INET */ -#if INET6 case IPPROTO_ICMPV6: icmptype = pd->hdr.icmp6->icmp6_type; icmpid = pd->hdr.icmp6->icmp6_id; @@ -7745,7 +7603,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, state_icmp++; } break; -#endif /* INET6 */ } if (!state_icmp) { @@ -7804,7 +7661,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, pd->hdr.icmp); break; #endif /* INET */ -#if INET6 case AF_INET6: pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, @@ -7818,7 +7674,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, sizeof(struct icmp6_hdr), pd->hdr.icmp6); break; -#endif /* INET6 */ } } else { switch (pd->af) { @@ -7857,7 +7712,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, } break; #endif /* INET */ -#if INET6 case AF_INET6: if (pd->naf != AF_INET6) { if (pf_translate_icmp_af( @@ -7884,7 +7738,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, pbuf, off); } break; -#endif /* INET6 */ } } } @@ -7899,10 +7752,8 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, #if INET struct ip h2; #endif /* INET */ -#if INET6 struct ip6_hdr h2_6; int terminal = 0; -#endif /* INET6 */ int ipoff2 = 0; int off2 = 0; @@ -7942,7 +7793,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, pd2.ip_sum = &h2.ip_sum; break; #endif /* INET */ -#if INET6 case AF_INET6: ipoff2 = off + sizeof(struct icmp6_hdr); @@ -7998,7 +7848,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, /* TODO */ pd2.off = ipoff2; break; -#endif /* INET6 */ } switch (pd2.proto) { @@ -8190,7 +8039,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, &h2); break; #endif /* INET */ -#if INET6 case AF_INET6: pbuf_copy_back(pbuf, off, sizeof(struct icmp6_hdr), @@ -8198,7 +8046,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, pbuf_copy_back(pbuf, ipoff2, sizeof(h2_6), &h2_6); break; -#endif /* INET6 */ } pbuf_copy_back(pbuf, off2, 8, &th); } @@ -8397,7 +8244,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, sizeof(h2), &h2); break; #endif /* INET */ -#if INET6 case AF_INET6: pbuf_copy_back(pbuf, off, sizeof(struct icmp6_hdr), @@ -8405,7 +8251,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, pbuf_copy_back(pbuf, ipoff2, sizeof(h2_6), &h2_6); break; -#endif /* INET6 */ } pbuf_copy_back(pbuf, off2, sizeof(uh), &uh); } @@ -8469,7 +8314,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, return PF_PASS; } #endif /* INET */ -#if INET6 case IPPROTO_ICMPV6: { struct icmp6_hdr iih; @@ -8527,7 +8371,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, return PF_PASS; } -#endif /* INET6 */ default: { key.proto = pd2.proto; if (direction == PF_IN) { @@ -8579,7 +8422,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, * */ #endif /* INET */ -#if INET6 case AF_INET6: if (pf_lazy_makewritable(pd, pbuf, ipoff2 + sizeof(h2_6)) == NULL) { @@ -8591,7 +8433,6 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, pbuf_copy_back(pbuf, ipoff2, sizeof(h2_6), &h2_6); break; -#endif /* INET6 */ } } @@ -8669,12 +8510,10 @@ pf_test_state_grev1(struct pf_state **state, int direction, (*state)->state_key->gwy.addr.v4addr.s_addr, 0); break; #endif /* INET */ -#if INET6 case AF_INET6: PF_ACPY(pd->src, &(*state)->state_key->gwy.addr, pd->af); break; -#endif /* INET6 */ } } else { grev1->call_id = (*state)->state_key->lan.xport.call_id; @@ -8687,12 +8526,10 @@ pf_test_state_grev1(struct pf_state **state, int direction, (*state)->state_key->lan.addr.v4addr.s_addr, 0); break; #endif /* INET */ -#if INET6 case AF_INET6: PF_ACPY(pd->dst, &(*state)->state_key->lan.addr, pd->af); break; -#endif /* INET6 */ } } @@ -8846,12 +8683,10 @@ pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif, (*state)->state_key->gwy.addr.v4addr.s_addr, 0); break; #endif /* INET */ -#if INET6 case AF_INET6: PF_ACPY(pd->src, &(*state)->state_key->gwy.addr, pd->af); break; -#endif /* INET6 */ } } else { switch (pd->af) { @@ -8862,12 +8697,10 @@ pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif, (*state)->state_key->lan.addr.v4addr.s_addr, 0); break; #endif /* INET */ -#if INET6 case AF_INET6: PF_ACPY(pd->dst, &(*state)->state_key->lan.addr, pd->af); break; -#endif /* INET6 */ } } } @@ -8936,12 +8769,10 @@ pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, 0); break; #endif /* INET */ -#if INET6 case AF_INET6: PF_ACPY(pd->src, &(*state)->state_key->gwy.addr, pd->af); break; -#endif /* INET6 */ } } else { switch (pd->af) { @@ -8953,12 +8784,10 @@ pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, 0); break; #endif /* INET */ -#if INET6 case AF_INET6: PF_ACPY(pd->dst, &(*state)->state_key->lan.addr, pd->af); break; -#endif /* INET6 */ } } } @@ -8998,7 +8827,6 @@ pf_pull_hdr(pbuf_t *pbuf, int off, void *p, int len, break; } #endif /* INET */ -#if INET6 case AF_INET6: { struct ip6_hdr *h = pbuf->pb_data; @@ -9011,7 +8839,6 @@ pf_pull_hdr(pbuf_t *pbuf, int off, void *p, int len, } break; } -#endif /* INET6 */ } pbuf_copy_data(pbuf, off, len, p); return p; @@ -9023,12 +8850,8 @@ pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif) #pragma unused(kif) struct sockaddr_in *dst; int ret = 1; -#if INET6 struct sockaddr_in6 *dst6; struct route_in6 ro; -#else - struct route ro; -#endif bzero(&ro, sizeof(ro)); switch (af) { @@ -9038,14 +8861,12 @@ pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif) dst->sin_len = sizeof(*dst); dst->sin_addr = addr->v4addr; break; -#if INET6 case AF_INET6: dst6 = (struct sockaddr_in6 *)&ro.ro_dst; dst6->sin6_family = AF_INET6; dst6->sin6_len = sizeof(*dst6); dst6->sin6_addr = addr->v6addr; break; -#endif /* INET6 */ default: return 0; } @@ -9069,12 +8890,8 @@ pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw) { #pragma unused(aw) struct sockaddr_in *dst; -#if INET6 struct sockaddr_in6 *dst6; struct route_in6 ro; -#else - struct route ro; -#endif int ret = 0; bzero(&ro, sizeof(ro)); @@ -9085,14 +8902,12 @@ pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw) dst->sin_len = sizeof(*dst); dst->sin_addr = addr->v4addr; break; -#if INET6 case AF_INET6: dst6 = (struct sockaddr_in6 *)&ro.ro_dst; dst6->sin6_family = AF_INET6; dst6->sin6_len = sizeof(*dst6); dst6->sin6_addr = addr->v6addr; break; -#endif /* INET6 */ default: return 0; } @@ -9308,7 +9123,6 @@ bad: } #endif /* INET */ -#if INET6 static void pf_route6(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, struct pf_state *s, struct pf_pdesc *pd) @@ -9463,7 +9277,6 @@ bad: } goto done; } -#endif /* INET6 */ /* @@ -9495,9 +9308,7 @@ pf_check_proto_cksum(pbuf_t *pbuf, int off, int len, u_int8_t p, } break; case IPPROTO_ICMP: -#if INET6 case IPPROTO_ICMPV6: -#endif /* INET6 */ break; default: return 1; @@ -9512,21 +9323,10 @@ pf_check_proto_cksum(pbuf_t *pbuf, int off, int len, u_int8_t p, #if INET case AF_INET: if (p == IPPROTO_ICMP) { -#if 0 - if (m->m_len < off) { - return 1; - } - m->m_data += off; - m->m_len -= off; - sum = in_cksum(m, len); - m->m_data -= off; - m->m_len += off; -#else if (pbuf->pb_contig_len < (unsigned)off) { return 1; } sum = pbuf_inet_cksum(pbuf, 0, off, len); -#endif } else { if (pbuf->pb_contig_len < (int)sizeof(struct ip)) { return 1; @@ -9535,14 +9335,12 @@ pf_check_proto_cksum(pbuf_t *pbuf, int off, int len, u_int8_t p, } break; #endif /* INET */ -#if INET6 case AF_INET6: if (pbuf->pb_contig_len < (int)sizeof(struct ip6_hdr)) { return 1; } sum = pbuf_inet6_cksum(pbuf, p, off, len); break; -#endif /* INET6 */ default: return 1; } @@ -9557,11 +9355,9 @@ pf_check_proto_cksum(pbuf_t *pbuf, int off, int len, u_int8_t p, case IPPROTO_ICMP: icmpstat.icps_checksum++; break; -#if INET6 case IPPROTO_ICMPV6: icmp6stat.icp6s_checksum++; break; -#endif /* INET6 */ } return 1; } @@ -9960,6 +9756,7 @@ nonormalize: } /* not GREv1/PPTP, so treat as ordinary GRE... */ + OS_FALLTHROUGH; } default: @@ -10157,7 +9954,6 @@ done: } #endif /* INET */ -#if INET6 #define PF_APPLE_UPDATE_PDESC_IPv6() \ do { \ if (pbuf && pd.mp && pbuf != pd.mp) { \ @@ -10292,7 +10088,6 @@ nonormalize: #endif /* DUMMYNET */ h = pbuf->pb_data; -#if 1 /* * we do not support jumbogram yet. if we keep going, zero ip6_plen * will do something bad, so drop the packet for now. @@ -10302,7 +10097,6 @@ nonormalize: REASON_SET(&reason, PFRES_NORM); /*XXX*/ goto done; } -#endif pd.src = (struct pf_addr *)(uintptr_t)&h->ip6_src; pd.dst = (struct pf_addr *)(uintptr_t)&h->ip6_dst; PF_ACPY(&pd.baddr, pd.src, AF_INET6); @@ -10356,7 +10150,7 @@ nonormalize: } case IPPROTO_ROUTING: ++rh_cnt; - /* FALL THROUGH */ + OS_FALLTHROUGH; case IPPROTO_AH: case IPPROTO_HOPOPTS: @@ -10609,6 +10403,7 @@ nonormalize: } /* not GREv1/PPTP, so treat as ordinary GRE... */ + OS_FALLTHROUGH; /* XXX is this correct? */ } default: @@ -10768,16 +10563,6 @@ done: } } -#if 0 - if (action == PF_SYNPROXY_DROP) { - m_freem(*m0); - *m0 = NULL; - action = PF_PASS; - } else if (r->rt) { - /* pf_route6 can free the mbuf causing *m0 to become NULL */ - pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd); - } -#else VERIFY(pbuf == NULL || pd.mp == NULL || pd.mp == pbuf); if (*pbufp) { @@ -10803,7 +10588,6 @@ done: /* pf_route6 can free the mbuf causing *pbufp to become NULL */ pf_route6(pbufp, r, dir, kif->pfik_ifp, s, &pd); } -#endif /* 0 */ /* if reassembled packet passed, create new fragments */ struct pf_fragment_tag *ftag = NULL; @@ -10813,7 +10597,6 @@ done: } return action; } -#endif /* INET6 */ static int pf_check_congestion(struct ifqueue *ifq) @@ -10828,13 +10611,9 @@ pool_init(struct pool *pp, size_t size, unsigned int align, unsigned int ioff, { #pragma unused(align, ioff, flags, palloc) bzero(pp, sizeof(*pp)); - pp->pool_zone = zinit(size, 1024 * size, PAGE_SIZE, wchan); - if (pp->pool_zone != NULL) { - zone_change(pp->pool_zone, Z_EXPAND, TRUE); - zone_change(pp->pool_zone, Z_CALLERACCT, FALSE); - pp->pool_hiwat = pp->pool_limit = (unsigned int)-1; - pp->pool_name = wchan; - } + pp->pool_zone = zone_create(wchan, size, ZC_DESTRUCTIBLE); + pp->pool_hiwat = pp->pool_limit = (unsigned int)-1; + pp->pool_name = wchan; } /* Zones cannot be currently destroyed */ @@ -10873,7 +10652,8 @@ pool_get(struct pool *pp, int flags) return NULL; } - buf = zalloc_canblock(pp->pool_zone, (flags & (PR_NOWAIT | PR_WAITOK))); + buf = zalloc_flags(pp->pool_zone, + (flags & PR_WAITOK) ? Z_WAITOK : Z_NOWAIT); if (buf != NULL) { pp->pool_count++; VERIFY(pp->pool_count != 0); diff --git a/bsd/net/pf_if.c b/bsd/net/pf_if.c index 4c03c1c35..932429855 100644 --- a/bsd/net/pf_if.c +++ b/bsd/net/pf_if.c @@ -81,9 +81,7 @@ #include #include -#if INET6 #include -#endif /* INET6 */ #include @@ -100,12 +98,12 @@ __private_extern__ void pfi_kifaddr_update(void *); static void pfi_kif_update(struct pfi_kif *); static void pfi_dynaddr_update(struct pfi_dynaddr *dyn); -static void pfi_table_update(struct pfr_ktable *, struct pfi_kif *, int, int); -static void pfi_instance_add(struct ifnet *, int, int); -static void pfi_address_add(struct sockaddr *, int, int); +static void pfi_table_update(struct pfr_ktable *, struct pfi_kif *, uint8_t, int); +static void pfi_instance_add(struct ifnet *, uint8_t, int); +static void pfi_address_add(struct sockaddr *, uint8_t, uint8_t); static int pfi_if_compare(struct pfi_kif *, struct pfi_kif *); static int pfi_skip_if(const char *, struct pfi_kif *); -static int pfi_unmask(void *); +static uint8_t pfi_unmask(void *); RB_PROTOTYPE_SC(static, pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare); RB_GENERATE(pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare); @@ -146,12 +144,12 @@ struct pfi_kif * pfi_kif_get(const char *kif_name) { struct pfi_kif *kif; - struct pfi_kif_cmp s; + struct pfi_kif s; - bzero(&s, sizeof(s)); + bzero(&s.pfik_name, sizeof(s.pfik_name)); strlcpy(s.pfik_name, kif_name, sizeof(s.pfik_name)); - if ((kif = RB_FIND(pfi_ifhead, &pfi_ifs, - (struct pfi_kif *)(void *)&s)) != NULL) { + kif = RB_FIND(pfi_ifhead, &pfi_ifs, &s); + if (kif != NULL) { return kif; } @@ -294,7 +292,6 @@ pfi_match_addr(struct pfi_dynaddr *dyn, struct pf_addr *a, sa_family_t af) return pfr_match_addr(dyn->pfid_kt, a, AF_INET); } #endif /* INET */ -#if INET6 case AF_INET6: switch (dyn->pfid_acnt6) { case 0: @@ -305,7 +302,6 @@ pfi_match_addr(struct pfi_dynaddr *dyn, struct pf_addr *a, sa_family_t af) default: return pfr_match_addr(dyn->pfid_kt, a, AF_INET6); } -#endif /* INET6 */ default: return 0; } @@ -428,7 +424,7 @@ pfi_dynaddr_update(struct pfi_dynaddr *dyn) } void -pfi_table_update(struct pfr_ktable *kt, struct pfi_kif *kif, int net, int flags) +pfi_table_update(struct pfr_ktable *kt, struct pfi_kif *kif, uint8_t net, int flags) { int e, size2 = 0; @@ -446,11 +442,11 @@ pfi_table_update(struct pfr_ktable *kt, struct pfi_kif *kif, int net, int flags) } void -pfi_instance_add(struct ifnet *ifp, int net, int flags) +pfi_instance_add(struct ifnet *ifp, uint8_t net, int flags) { struct ifaddr *ia; int got4 = 0, got6 = 0; - int net2, af; + uint8_t net2, af; if (ifp == NULL) { return; @@ -535,7 +531,7 @@ pfi_instance_add(struct ifnet *ifp, int net, int flags) } void -pfi_address_add(struct sockaddr *sa, int af, int net) +pfi_address_add(struct sockaddr *sa, uint8_t af, uint8_t net) { struct pfr_addr *p; int i; @@ -627,20 +623,21 @@ pfi_kifaddr_update(void *v) int pfi_if_compare(struct pfi_kif *p, struct pfi_kif *q) { - return strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ); + return strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ - 1); } void pfi_update_status(const char *name, struct pf_status *pfs) { struct pfi_kif *p; - struct pfi_kif_cmp key; + struct pfi_kif key; int i, j, k; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); + bzero(&key.pfik_name, sizeof(key.pfik_name)); strlcpy(key.pfik_name, name, sizeof(key.pfik_name)); - p = RB_FIND(pfi_ifhead, &pfi_ifs, (struct pfi_kif *)(void *)&key); + p = RB_FIND(pfi_ifhead, &pfi_ifs, &key); if (p == NULL) { return; } @@ -715,7 +712,7 @@ pfi_get_ifaces(const char *name, user_addr_t buf, int *size) int pfi_skip_if(const char *filter, struct pfi_kif *p) { - int n; + size_t n; if (filter == NULL || !*filter) { return 0; @@ -769,7 +766,7 @@ pfi_clear_flags(const char *name, int flags) } /* from pf_print_state.c */ -int +uint8_t pfi_unmask(void *addr) { struct pf_addr *m = addr; @@ -786,5 +783,6 @@ pfi_unmask(void *addr) b++; } } - return b; + VERIFY(b >= 0 && b <= UINT8_MAX); + return (uint8_t)b; } diff --git a/bsd/net/pf_ioctl.c b/bsd/net/pf_ioctl.c index 9ba6cc70c..b40fe80ac 100644 --- a/bsd/net/pf_ioctl.c +++ b/bsd/net/pf_ioctl.c @@ -120,10 +120,8 @@ struct ip_fw_args; #include #endif /* PFLOG */ -#if INET6 #include #include -#endif /* INET6 */ #include @@ -189,7 +187,7 @@ static void pf_deleterule_anchor_step_out(struct pf_ruleset **, #define PF_CDEV_MAJOR (-1) -static struct cdevsw pf_cdevsw = { +static const struct cdevsw pf_cdevsw = { .d_open = pfopen, .d_close = pfclose, .d_read = eno_rdwrt, @@ -229,7 +227,7 @@ int16_t pf_nat64_configured = 0; static u_int64_t pf_enabled_ref_count; static u_int32_t nr_tokens = 0; -static u_int64_t pffwrules; +static u_int32_t pffwrules; static u_int32_t pfdevcnt; SLIST_HEAD(list_head, pfioc_kernel_token); @@ -237,7 +235,19 @@ static struct list_head token_list_head; struct pf_rule pf_default_rule; -#define TAGID_MAX 50000 +typedef struct { + char tag_name[PF_TAG_NAME_SIZE]; + uint16_t tag_id; +} pf_reserved_tag_table_t; + +#define NUM_RESERVED_TAGS 2 +static pf_reserved_tag_table_t pf_reserved_tag_table[NUM_RESERVED_TAGS] = { + { PF_TAG_NAME_SYSTEM_SERVICE, PF_TAG_ID_SYSTEM_SERVICE}, + { PF_TAG_NAME_STACK_DROP, PF_TAG_ID_STACK_DROP}, +}; +#define RESERVED_TAG_ID_MIN PF_TAG_ID_SYSTEM_SERVICE + +#define DYNAMIC_TAG_ID_MAX 50000 static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags); @@ -255,10 +265,8 @@ static void pf_rtlabel_copyout(struct pf_addr_wrap *); static int pf_inet_hook(struct ifnet *, struct mbuf **, int, struct ip_fw_args *); #endif /* INET */ -#if INET6 static int pf_inet6_hook(struct ifnet *, struct mbuf **, int, struct ip_fw_args *); -#endif /* INET6 */ #define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x @@ -777,7 +785,8 @@ static u_int16_t tagname2tag(struct pf_tags *head, char *tagname) { struct pf_tagname *tag, *p = NULL; - u_int16_t new_tagid = 1; + uint16_t new_tagid = 1; + bool reserved_tag = false; TAILQ_FOREACH(tag, head, entries) if (strcmp(tagname, tag->name) == 0) { @@ -785,6 +794,19 @@ tagname2tag(struct pf_tags *head, char *tagname) return tag->tag; } + /* + * check if it is a reserved tag. + */ + _CASSERT(RESERVED_TAG_ID_MIN > DYNAMIC_TAG_ID_MAX); + for (int i = 0; i < NUM_RESERVED_TAGS; i++) { + if (strncmp(tagname, pf_reserved_tag_table[i].tag_name, + PF_TAG_NAME_SIZE) == 0) { + new_tagid = pf_reserved_tag_table[i].tag_id; + reserved_tag = true; + goto skip_dynamic_tag_alloc; + } + } + /* * to avoid fragmentation, we do a linear search from the beginning * and take the first free slot we find. if there is none or the list @@ -793,16 +815,24 @@ tagname2tag(struct pf_tags *head, char *tagname) /* new entry */ if (!TAILQ_EMPTY(head)) { + /* skip reserved tags */ for (p = TAILQ_FIRST(head); p != NULL && - p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) { + p->tag >= RESERVED_TAG_ID_MIN; + p = TAILQ_NEXT(p, entries)) { + ; + } + + for (; p != NULL && p->tag == new_tagid; + p = TAILQ_NEXT(p, entries)) { new_tagid = p->tag + 1; } } - if (new_tagid > TAGID_MAX) { + if (new_tagid > DYNAMIC_TAG_ID_MAX) { return 0; } +skip_dynamic_tag_alloc: /* allocate and fill new struct pf_tagname */ tag = _MALLOC(sizeof(*tag), M_TEMP, M_WAITOK | M_ZERO); if (tag == NULL) { @@ -812,7 +842,9 @@ tagname2tag(struct pf_tags *head, char *tagname) tag->tag = new_tagid; tag->ref++; - if (p != NULL) { /* insert new entry before p */ + if (reserved_tag) { /* insert reserved tag at the head */ + TAILQ_INSERT_HEAD(head, tag, entries); + } else if (p != NULL) { /* insert new entry before p */ TAILQ_INSERT_BEFORE(p, tag, entries); } else { /* either list empty or no free slot in between */ TAILQ_INSERT_TAIL(head, tag, entries); @@ -953,7 +985,7 @@ pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm)) #define PF_MD5_UPD_STR(st, elm) \ - MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm)) + MD5Update(ctx, (u_int8_t *)(st)->elm, (unsigned int)strlen((st)->elm)) #define PF_MD5_UPD_HTONL(st, elm, stor) do { \ (stor) = htonl((st)->elm); \ @@ -1928,6 +1960,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, goto struct32; } +#ifdef __LP64__ /* * 64-bit structure processing */ @@ -2102,6 +2135,9 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, /* NOTREACHED */ } goto done; +#else +#pragma unused(io64) +#endif /* __LP64__ */ struct32: /* @@ -2277,8 +2313,9 @@ struct32: VERIFY(0); /* NOTREACHED */ } - +#ifdef __LP64__ done: +#endif return error; } @@ -2317,7 +2354,11 @@ pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32, break; } +#ifdef __LP64__ token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf); +#else + token_buf = tok32->pgt_buf; +#endif tokens = _MALLOC(size, M_TEMP, M_WAITOK | M_ZERO); if (tokens == NULL) { error = ENOMEM; @@ -2792,13 +2833,6 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p break; } #endif /* INET */ -#if !INET6 - if (rule->af == AF_INET6) { - pool_put(&pf_rule_pl, rule); - error = EAFNOSUPPORT; - break; - } -#endif /* INET6 */ tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, pf_rulequeue); if (tail) { @@ -3004,13 +3038,6 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p break; } #endif /* INET */ -#if !INET6 - if (newrule->af == AF_INET6) { - pool_put(&pf_rule_pl, newrule); - error = EAFNOSUPPORT; - break; - } -#endif /* INET6 */ if (newrule->ifname[0]) { newrule->kif = pfi_kif_get(newrule->ifname); if (newrule->kif == NULL) { @@ -3218,14 +3245,6 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p break; } #endif /* INET */ -#if !INET6 - if (rule->af == AF_INET6) { - pool_put(&pf_rule_pl, rule); - error = EAFNOSUPPORT; - break; - } - -#endif /* INET6 */ r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) { r = TAILQ_NEXT(r, entries); @@ -3374,7 +3393,7 @@ pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p) killed++; } } - psk->psk_af = killed; + psk->psk_af = (sa_family_t)killed; #if NPFSYNC pfsync_clear_states(pf_status.hostid, psk->psk_ifname); #endif @@ -3439,7 +3458,7 @@ pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p) killed++; } } - psk->psk_af = killed; + psk->psk_af = (sa_family_t)killed; break; } @@ -3556,7 +3575,11 @@ pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32, error = ENOMEM; break; } +#ifdef __LP64__ buf = (p64 ? ps64->ps_buf : ps32->ps_buf); +#else + buf = ps32->ps_buf; +#endif state = TAILQ_FIRST(&state_list); while (state) { @@ -3797,12 +3820,6 @@ pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) break; } #endif /* INET */ -#if !INET6 - if (pp->af == AF_INET6) { - error = EAFNOSUPPORT; - break; - } -#endif /* INET6 */ if (pp->addr.addr.type != PF_ADDR_ADDRMASK && pp->addr.addr.type != PF_ADDR_DYNIFTL && pp->addr.addr.type != PF_ADDR_TABLE) { @@ -3919,13 +3936,6 @@ pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) break; } #endif /* INET */ -#if !INET6 - if (pca->af == AF_INET6) { - pool_put(&pf_pooladdr_pl, newpa); - error = EAFNOSUPPORT; - break; - } -#endif /* INET6 */ if (newpa->ifname[0]) { newpa->kif = pfi_kif_get(newpa->ifname); if (newpa->kif == NULL) { @@ -4076,13 +4086,21 @@ static int pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32, struct pfioc_trans_64 *io64, struct proc *p) { - int p64 = proc_is64bit(p); int error = 0, esize, size; user_addr_t buf; +#ifdef __LP64__ + int p64 = proc_is64bit(p); + esize = (p64 ? io64->esize : io32->esize); size = (p64 ? io64->size : io32->size); buf = (p64 ? io64->array : io32->array); +#else +#pragma unused(io64, p) + esize = io32->esize; + size = io32->size; + buf = io32->array; +#endif switch (cmd) { case DIOCXBEGIN: { @@ -4323,7 +4341,11 @@ pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32, error = ENOMEM; break; } +#ifdef __LP64__ buf = (p64 ? psn64->psn_buf : psn32->psn_buf); +#else + buf = psn32->psn_buf; +#endif RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { uint64_t secs = pf_time_second(), diff; @@ -4429,7 +4451,7 @@ pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk, pf_purge_expired_src_nodes(); } - psnk->psnk_af = killed; + psnk->psnk_af = (sa_family_t)killed; break; } @@ -4453,8 +4475,13 @@ pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32, user_addr_t buf; int esize; +#ifdef __LP64__ buf = (p64 ? io64->pfiio_buffer : io32->pfiio_buffer); esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize); +#else + buf = io32->pfiio_buffer; + esize = io32->pfiio_esize; +#endif /* esize must be that of the user space version of pfi_kif */ if (esize != sizeof(struct pfi_uif)) { @@ -4560,11 +4587,9 @@ pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp, break; } #endif /* INET */ -#if INET6 case AF_INET6: error = pf_inet6_hook(pf_ifp, mp, input, fwa); break; -#endif /* INET6 */ default: break; } @@ -4655,7 +4680,6 @@ pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input, } #endif /* INET */ -#if INET6 int pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input, struct ip_fw_args *fwa) @@ -4695,7 +4719,6 @@ pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input, } return error; } -#endif /* INET6 */ int pf_ifaddr_hook(struct ifnet *ifp) diff --git a/bsd/net/pf_norm.c b/bsd/net/pf_norm.c index 96b85f462..324c296ca 100644 --- a/bsd/net/pf_norm.c +++ b/bsd/net/pf_norm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2018 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -82,10 +82,8 @@ #include #include -#if INET6 #include #include -#endif /* INET6 */ #include @@ -168,7 +166,6 @@ static struct mbuf *pf_fragcache(struct mbuf **, struct ip *, struct pf_fragment **, int, int, int *); static int pf_normalize_tcpopt(struct pf_rule *, int, struct pfi_kif *, struct pf_pdesc *, pbuf_t *, struct tcphdr *, int, int *); -#if INET6 static __inline struct pf_fragment * pf_find_fragment_by_ipv6_header(struct ip6_hdr *, struct ip6_frag *, struct pf_frag_tree *); @@ -176,7 +173,6 @@ static struct mbuf *pf_reassemble6(struct mbuf **, struct pf_fragment **, struct pf_frent *, int); static struct mbuf *pf_frag6cache(struct mbuf **, struct ip6_hdr*, struct ip6_frag *, struct pf_fragment **, int, int, int, int *); -#endif /* INET6 */ #define DPFPRINTF(x) do { \ if (pf_status.debug >= PF_DEBUG_MISC) { \ @@ -264,7 +260,6 @@ pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) } break; #endif -#ifdef INET6 case AF_INET6: if ((diff = a->fr_id6 - b->fr_id6)) { return diff; @@ -302,7 +297,6 @@ pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) return 1; } break; -#endif default: VERIFY(!0 && "only IPv4 and IPv6 supported!"); break; @@ -1114,7 +1108,6 @@ drop_fragment: return NULL; } -#if INET6 #define FR_IP6_OFF(fr) \ (ntohs((fr)->fr_ip6f_opt.ip6f_offlg & IP6F_OFF_MASK)) #define FR_IP6_PLEN(fr) (ntohs((fr)->fr_ip6->ip6_plen)) @@ -1908,7 +1901,6 @@ pf_refragment6(struct ifnet *ifp, pbuf_t **pbufp, struct pf_fragment_tag *ftag) done: return action; } -#endif /* INET6 */ int pf_normalize_ip(pbuf_t *pbuf, int dir, struct pfi_kif *kif, u_short *reason, @@ -2231,7 +2223,6 @@ bad: return PF_DROP; } -#if INET6 static __inline struct pf_fragment * pf_find_fragment_by_ipv6_header(struct ip6_hdr *ip6, struct ip6_frag *fh, struct pf_frag_tree *tree) @@ -2599,7 +2590,6 @@ dropout: } return PF_DROP; } -#endif /* INET6 */ int pf_normalize_tcp(int dir, struct pfi_kif *kif, pbuf_t *pbuf, int ipoff, @@ -2783,13 +2773,11 @@ pf_normalize_tcp_init(pbuf_t *pbuf, int off, struct pf_pdesc *pd, break; } #endif /* INET */ -#if INET6 case AF_INET6: { struct ip6_hdr *h = pbuf->pb_data; src->scrub->pfss_ttl = h->ip6_hlim; break; } -#endif /* INET6 */ } @@ -2832,7 +2820,7 @@ pf_normalize_tcp_init(pbuf_t *pbuf, int off, struct pf_pdesc *pd, src->scrub->pfss_tsecr = ntohl(tsecr); getmicrouptime(&src->scrub->pfss_last); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; default: hlen -= MAX(opt[1], 2); opt += MAX(opt[1], 2); @@ -2890,7 +2878,6 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, break; } #endif /* INET */ -#if INET6 case AF_INET6: { if (src->scrub) { struct ip6_hdr *h = pbuf->pb_data; @@ -2901,7 +2888,6 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, } break; } -#endif /* INET6 */ } if (th->th_off > (sizeof(struct tcphdr) >> 2) && @@ -2966,7 +2952,7 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, } got_ts = 1; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; default: hlen -= MAX(opt[1], 2); opt += MAX(opt[1], 2); diff --git a/bsd/net/pf_osfp.c b/bsd/net/pf_osfp.c index 20b523d1a..c14ecdf17 100644 --- a/bsd/net/pf_osfp.c +++ b/bsd/net/pf_osfp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2011 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -61,10 +61,8 @@ #include #include -#if INET6 #include #include -#endif /* INET6 */ #define DPFPRINTF(format, x...) \ if (pf_status.debug >= PF_DEBUG_NOISY) \ @@ -118,9 +116,6 @@ struct pf_osfp_enlist * pf_osfp_fingerprint_hdr(const struct ip *ip, const struct ip6_hdr *ip6, const struct tcphdr *tcp) { -#if !INET6 -#pragma unused(ip6) -#endif /* !INET6 */ struct pf_os_fingerprint fp, *fpresult; int cnt, optlen = 0; const u_int8_t *optp; @@ -145,9 +140,7 @@ pf_osfp_fingerprint_hdr(const struct ip *ip, const struct ip6_hdr *ip6, } (void) inet_ntop(AF_INET, &ip->ip_src, srcname, (socklen_t)sizeof(srcname)); - } -#if INET6 - else if (ip6) { + } else if (ip6) { /* jumbo payload? */ fp.fp_psize = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen); fp.fp_ttl = ip6->ip6_hlim; @@ -155,9 +148,7 @@ pf_osfp_fingerprint_hdr(const struct ip *ip, const struct ip6_hdr *ip6, fp.fp_flags |= PF_OSFP_INET6; (void) inet_ntop(AF_INET6, &ip6->ip6_src, srcname, (socklen_t)sizeof(srcname)); - } -#endif - else { + } else { return NULL; } fp.fp_wsize = ntohs(tcp->th_win); @@ -200,9 +191,6 @@ pf_osfp_fingerprint_hdr(const struct ip *ip, const struct ip6_hdr *ip6, memcpy(&fp.fp_wscale, &optp[2], sizeof(fp.fp_wscale)); } -#if BYTE_ORDER != BIG_ENDIAN - NTOHS(fp.fp_wscale); -#endif fp.fp_tcpopts = (fp.fp_tcpopts << PF_OSFP_TCPOPT_BITS) | PF_OSFP_TCPOPT_WSCALE; diff --git a/bsd/net/pf_ruleset.c b/bsd/net/pf_ruleset.c index 4b3be609a..b802c11bb 100644 --- a/bsd/net/pf_ruleset.c +++ b/bsd/net/pf_ruleset.c @@ -83,9 +83,7 @@ #include #include -#if INET6 #include -#endif /* INET6 */ #ifdef KERNEL @@ -94,23 +92,6 @@ printf(format, ##x) #define rs_malloc(x) _MALLOC(x, M_TEMP, M_WAITOK) #define rs_free(x) _FREE(x, M_TEMP) -#define strrchr _strrchr - -static char * -_strrchr(const char *c, int ch) -{ - char *p = (char *)(size_t)c, *save; - - for (save = NULL;; ++p) { - if (*p == ch) { - save = (char *)p; - } - if (*p == '\0') { - return save; - } - } - /* NOTREACHED */ -} #else /* Userland equivalents so we can lend code to pfctl et al. */ diff --git a/bsd/net/pf_table.c b/bsd/net/pf_table.c index 2d172aacc..098f96856 100644 --- a/bsd/net/pf_table.c +++ b/bsd/net/pf_table.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2010 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -169,7 +169,7 @@ static void pfr_enqueue_addrs(struct pfr_ktable *, struct pfr_kentryworkq *, static void pfr_mark_addrs(struct pfr_ktable *); static struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *, struct pfr_addr *, int); -static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, int); +static struct pfr_kentry *pfr_create_kentry(struct pfr_addr *, boolean_t); static void pfr_destroy_kentries(struct pfr_kentryworkq *); static void pfr_destroy_kentry(struct pfr_kentry *); static void pfr_insert_kentries(struct pfr_ktable *, @@ -799,13 +799,11 @@ pfr_validate_addr(struct pfr_addr *ad) } break; #endif /* INET */ -#if INET6 case AF_INET6: if (ad->pfra_net > 128) { return -1; } break; -#endif /* INET6 */ default: return -1; } @@ -908,7 +906,7 @@ pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) } static struct pfr_kentry * -pfr_create_kentry(struct pfr_addr *ad, int intr) +pfr_create_kentry(struct pfr_addr *ad, boolean_t intr) { struct pfr_kentry *ke; @@ -930,7 +928,7 @@ pfr_create_kentry(struct pfr_addr *ad, int intr) ke->pfrke_af = ad->pfra_af; ke->pfrke_net = ad->pfra_net; ke->pfrke_not = ad->pfra_not; - ke->pfrke_intrpool = intr; + ke->pfrke_intrpool = (u_int8_t)intr; return ke; } @@ -985,7 +983,7 @@ pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, u_int64_t tzero) if (p != NULL) { return 0; } - p = pfr_create_kentry(ad, 1); + p = pfr_create_kentry(ad, TRUE); if (p == NULL) { return EINVAL; } @@ -1179,7 +1177,7 @@ pfr_walktree(struct radix_node *rn, void *arg) if (ke->pfrke_mark) { break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case PFRW_ENQUEUE: SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); w->pfrw_cnt++; @@ -1322,6 +1320,7 @@ pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags) } SLIST_FOREACH(q, &addq, pfrkt_workq) { if (!pfr_ktable_compare(p, q)) { + pfr_destroy_ktable(p, 0); goto _skip; } } @@ -1724,7 +1723,7 @@ _skip: if (pfr_lookup_addr(shadow, &ad, 1) != NULL) { continue; } - p = pfr_create_kentry(&ad, 0); + p = pfr_create_kentry(&ad, FALSE); if (p == NULL) { senderr(ENOMEM); } @@ -1923,7 +1922,7 @@ pfr_table_copyin_cleanup(struct pfr_table *tbl) static int pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) { - int i; + size_t i; if (!tbl->pfrt_name[0]) { return -1; @@ -1956,7 +1955,7 @@ static int pfr_fix_anchor(char *anchor) { size_t siz = MAXPATHLEN; - int i; + size_t i; if (anchor[0] == '/') { char *path; @@ -1973,7 +1972,7 @@ pfr_fix_anchor(char *anchor) if (anchor[siz - 1]) { return -1; } - for (i = strlen(anchor); i < (int)siz; i++) { + for (i = strlen(anchor); i < siz; i++) { if (anchor[i]) { return -1; } @@ -2055,6 +2054,7 @@ pfr_setflags_ktable(struct pfr_ktable *kt, int newf) LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); if (!(newf & PFR_TFLAG_REFERENCED) && + !(newf & PFR_TFLAG_REFDANCHOR) && !(newf & PFR_TFLAG_PERSIST)) { newf &= ~PFR_TFLAG_ACTIVE; } @@ -2237,7 +2237,6 @@ pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) } break; #endif /* INET */ -#if INET6 case AF_INET6: bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); @@ -2245,7 +2244,6 @@ pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) ke = NULL; } break; -#endif /* INET6 */ } match = (ke && !ke->pfrke_not); if (match) { @@ -2281,7 +2279,6 @@ pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, } break; #endif /* INET */ -#if INET6 case AF_INET6: bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); @@ -2289,7 +2286,6 @@ pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, ke = NULL; } break; -#endif /* INET6 */ default: ; } @@ -2480,12 +2476,10 @@ pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) pfr_walktree, &w); return w.pfrw_kentry; #endif /* INET */ -#if INET6 case AF_INET6: (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); return w.pfrw_kentry; -#endif /* INET6 */ default: return NULL; } diff --git a/bsd/net/pfvar.h b/bsd/net/pfvar.h index c82e61d02..6e0eb0ef5 100644 --- a/bsd/net/pfvar.h +++ b/bsd/net/pfvar.h @@ -305,7 +305,7 @@ struct pfi_dynaddr { struct pfr_ktable *pfid_kt; struct pfi_kif *pfid_kif; void *pfid_hook_cookie; - int pfid_net; /* mask or 128 */ + uint8_t pfid_net; /* mask or 128 */ int pfid_acnt4; /* address count IPv4 */ int pfid_acnt6; /* address count IPv6 */ sa_family_t pfid_af; /* rule af */ @@ -317,21 +317,14 @@ struct pfi_dynaddr { */ #if INET -#if !INET6 -#define PF_INET_ONLY -#endif /* ! INET6 */ #endif /* INET */ -#if INET6 #if !INET #define PF_INET6_ONLY #endif /* ! INET */ -#endif /* INET6 */ #if INET -#if INET6 #define PF_INET_INET6 -#endif /* INET6 */ #endif /* INET */ #else /* !KERNEL */ @@ -854,6 +847,10 @@ struct pf_rule { #define PFAPPSTATE_HIWAT 10000 /* default same as state table */ +/* PF reserved special purpose tags */ +#define PF_TAG_NAME_SYSTEM_SERVICE "com.apple.pf.system_service_tag" +#define PF_TAG_NAME_STACK_DROP "com.apple.pf.stack_drop_tag" + enum pf_extmap { PF_EXTMAP_APD = 1, /* Address-port-dependent mapping */ PF_EXTMAP_AD, /* Address-dependent mapping */ @@ -1250,8 +1247,8 @@ RB_PROTOTYPE(pf_anchor_node, pf_anchor, entry_node, pf_anchor_compare); struct pfr_table { char pfrt_anchor[MAXPATHLEN]; char pfrt_name[PF_TABLE_NAME_SIZE]; - u_int32_t pfrt_flags; - u_int8_t pfrt_fback; + uint32_t pfrt_flags; + uint8_t pfrt_fback; }; enum { PFR_FB_NONE, PFR_FB_MATCH, PFR_FB_ADDED, PFR_FB_DELETED, @@ -1263,10 +1260,10 @@ struct pfr_addr { struct in_addr _pfra_ip4addr; struct in6_addr _pfra_ip6addr; } pfra_u; - u_int8_t pfra_af; - u_int8_t pfra_net; - u_int8_t pfra_not; - u_int8_t pfra_fback; + uint8_t pfra_af; + uint8_t pfra_net; + uint8_t pfra_not; + uint8_t pfra_fback; }; #define pfra_ip4addr pfra_u._pfra_ip4addr #define pfra_ip6addr pfra_u._pfra_ip6addr @@ -1278,11 +1275,11 @@ enum { PFR_OP_BLOCK, PFR_OP_PASS, PFR_OP_ADDR_MAX, PFR_OP_TABLE_MAX }; struct pfr_astats { struct pfr_addr pfras_a; #if !defined(__LP64__) - u_int32_t _pad; + uint32_t _pad; #endif /* !__LP64__ */ - u_int64_t pfras_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; - u_int64_t pfras_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; - u_int64_t pfras_tzero; + uint64_t pfras_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + uint64_t pfras_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + uint64_t pfras_tzero; }; enum { PFR_REFCNT_RULE, PFR_REFCNT_ANCHOR, PFR_REFCNT_MAX }; @@ -1360,11 +1357,6 @@ RB_HEAD(pfi_ifhead, pfi_kif); extern struct pf_state_tree_lan_ext pf_statetbl_lan_ext; extern struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy; -/* keep synced with pfi_kif, used in RB_FIND */ -struct pfi_kif_cmp { - char pfik_name[IFNAMSIZ]; -}; - struct pfi_kif { char pfik_name[IFNAMSIZ]; RB_ENTRY(pfi_kif) pfik_tree; @@ -1416,9 +1408,7 @@ struct pf_pdesc { struct tcphdr *tcp; struct udphdr *udp; struct icmp *icmp; -#if INET6 struct icmp6_hdr *icmp6; -#endif /* INET6 */ struct pf_grev1_hdr *grev1; struct pf_esp_hdr *esp; void *any; @@ -1632,11 +1622,6 @@ struct priq_opts { u_int32_t flags; }; -struct qfq_opts { - u_int32_t flags; - u_int32_t lmax; -}; - struct hfsc_opts { /* real-time service curve */ u_int64_t rtsc_m1; /* slope of the 1st segment in bps */ @@ -1721,7 +1706,6 @@ struct pf_altq { struct priq_opts priq_opts; struct hfsc_opts hfsc_opts; struct fairq_opts fairq_opts; - struct qfq_opts qfq_opts; } pq_u; u_int32_t qid; /* return value */ @@ -2219,7 +2203,6 @@ __private_extern__ int pf_test_mbuf(int, struct ifnet *, struct mbuf **, struct ether_header *, struct ip_fw_args *); #endif /* INET */ -#if INET6 __private_extern__ int pf_test6(int, struct ifnet *, pbuf_t **, struct ether_header *, struct ip_fw_args *); __private_extern__ int pf_test6_mbuf(int, struct ifnet *, struct mbuf **, @@ -2231,7 +2214,6 @@ __private_extern__ int pf_normalize_ip6(pbuf_t *, int, struct pfi_kif *, u_short *, struct pf_pdesc *); __private_extern__ int pf_refragment6(struct ifnet *, pbuf_t **, struct pf_fragment_tag *); -#endif /* INET6 */ __private_extern__ void *pf_lazy_makewritable(struct pf_pdesc *, pbuf_t *, int); diff --git a/bsd/net/pktap.c b/bsd/net/pktap.c index 02340a977..c5d8635b3 100644 --- a/bsd/net/pktap.c +++ b/bsd/net/pktap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2019 Apple Inc. All rights reserved. + * Copyright (c) 2012-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -242,15 +242,15 @@ pktap_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) pktap->pktp_filters[0].filter_param = PKTAP_FILTER_PARAM_IF_TYPE; pktap->pktp_filters[0].filter_param_if_type = IFT_ETHER; -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX pktap->pktp_filters[1].filter_op = PKTAP_FILTER_OP_PASS; pktap->pktp_filters[1].filter_param = PKTAP_FILTER_PARAM_IF_TYPE; pktap->pktp_filters[1].filter_param_if_type = IFT_CELLULAR; -#else /* CONFIG_EMBEDDED */ +#else /* XNU_TARGET_OS_OSX */ pktap->pktp_filters[1].filter_op = PKTAP_FILTER_OP_PASS; pktap->pktp_filters[1].filter_param = PKTAP_FILTER_PARAM_IF_TYPE; pktap->pktp_filters[1].filter_param_if_type = IFT_IEEE1394; -#endif /* CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ pktap->pktp_filters[2].filter_op = PKTAP_FILTER_OP_PASS; pktap->pktp_filters[2].filter_param = PKTAP_FILTER_PARAM_IF_TYPE; @@ -447,7 +447,7 @@ pktap_getdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) sizeof(x_filter->filter_param_if_name)); } } - error = copyout(x_filters, ifd->ifd_data, + error = copyout(x_filters, CAST_USER_ADDR_T(ifd->ifd_data), PKTAP_MAX_FILTERS * sizeof(struct x_pktap_filter)); if (error) { printf("%s: PKTP_CMD_FILTER_GET copyout - error %d\n", __func__, error); @@ -464,7 +464,7 @@ pktap_getdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) error = EINVAL; break; } - error = copyout(&tap_count, ifd->ifd_data, sizeof(tap_count)); + error = copyout(&tap_count, CAST_USER_ADDR_T(ifd->ifd_data), sizeof(tap_count)); if (error) { printf("%s: PKTP_CMD_TAP_COUNT copyout - error %d\n", __func__, error); goto done; @@ -507,7 +507,7 @@ pktap_setdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) error = EINVAL; break; } - error = copyin(ifd->ifd_data, &user_filters, ifd->ifd_len); + error = copyin(CAST_USER_ADDR_T(ifd->ifd_data), &user_filters, (size_t)ifd->ifd_len); if (error) { printf("%s: copyin - error %d\n", __func__, error); goto done; @@ -1311,7 +1311,8 @@ pktap_input(struct ifnet *ifp, protocol_family_t proto, struct mbuf *m, char *start; /* Fast path */ - if (pktap_total_tap_count == 0) { + if (pktap_total_tap_count == 0 || + (m->m_pkthdr.pkt_flags & PKTF_SKIP_PKTAP) != 0) { return; } @@ -1320,7 +1321,7 @@ pktap_input(struct ifnet *ifp, protocol_family_t proto, struct mbuf *m, /* Make sure the frame header is fully contained in the mbuf */ if (frame_header != NULL && frame_header >= start && frame_header <= hdr) { size_t o_len = m->m_len; - u_int32_t pre = hdr - frame_header; + u_int32_t pre = (u_int32_t)(hdr - frame_header); if (mbuf_setdata(m, frame_header, o_len + pre) == 0) { PKTAP_LOG(PKTP_LOG_INPUT, "ifp %s proto %u pre %u post %u\n", @@ -1342,7 +1343,8 @@ pktap_output(struct ifnet *ifp, protocol_family_t proto, struct mbuf *m, u_int32_t pre, u_int32_t post) { /* Fast path */ - if (pktap_total_tap_count == 0) { + if (pktap_total_tap_count == 0 || + (m->m_pkthdr.pkt_flags & PKTF_SKIP_PKTAP) != 0) { return; } diff --git a/bsd/net/pktap.h b/bsd/net/pktap.h index 6305b2131..5e07f99d6 100644 --- a/bsd/net/pktap.h +++ b/bsd/net/pktap.h @@ -190,11 +190,11 @@ struct pktap_buffer_v2_hdr_extra { (pktap_v2_hdr_dst)->pth_ifname_offset = 0; \ (pktap_v2_hdr_dst)->pth_comm_offset = 0; \ (pktap_v2_hdr_dst)->pth_e_comm_offset = 0; \ - (pktap_v2_hdr_dst)->pth_dlt = (pktap_header_src)->pth_dlt; \ - (pktap_v2_hdr_dst)->pth_frame_pre_length = (pktap_header_src)->pth_frame_pre_length; \ - (pktap_v2_hdr_dst)->pth_frame_post_length = (pktap_header_src)->pth_frame_post_length; \ + (pktap_v2_hdr_dst)->pth_dlt = (uint16_t)(pktap_header_src)->pth_dlt; \ + (pktap_v2_hdr_dst)->pth_frame_pre_length = (uint16_t)(pktap_header_src)->pth_frame_pre_length; \ + (pktap_v2_hdr_dst)->pth_frame_post_length = (uint16_t)(pktap_header_src)->pth_frame_post_length; \ (pktap_v2_hdr_dst)->pth_iftype = (pktap_header_src)->pth_iftype; \ - (pktap_v2_hdr_dst)->pth_ipproto = (pktap_header_src)->pth_ipproto; \ + (pktap_v2_hdr_dst)->pth_ipproto = (uint16_t)(pktap_header_src)->pth_ipproto; \ (pktap_v2_hdr_dst)->pth_protocol_family = (pktap_header_src)->pth_protocol_family; \ (pktap_v2_hdr_dst)->pth_svc = (pktap_header_src)->pth_svc; \ (pktap_v2_hdr_dst)->pth_flowid = (pktap_header_src)->pth_flowid; \ diff --git a/bsd/net/pktsched/Makefile b/bsd/net/pktsched/Makefile index 884775f00..f44397fcc 100644 --- a/bsd/net/pktsched/Makefile +++ b/bsd/net/pktsched/Makefile @@ -12,8 +12,7 @@ KERNELFILES= \ PRIVATE_DATAFILES = \ pktsched.h pktsched_cbq.h pktsched_fairq.h pktsched_hfsc.h \ - pktsched_priq.h pktsched_tcq.h pktsched_rmclass.h pktsched_qfq.h \ - pktsched_fq_codel.h + pktsched_priq.h pktsched_rmclass.h pktsched_fq_codel.h PRIVATE_KERNELFILES = ${KERNELFILES} diff --git a/bsd/net/pktsched/pktsched.c b/bsd/net/pktsched/pktsched.c index f08febca3..e569f9d3b 100644 --- a/bsd/net/pktsched/pktsched.c +++ b/bsd/net/pktsched/pktsched.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2019 Apple Inc. All rights reserved. + * Copyright (c) 2011-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -44,8 +44,6 @@ #include #include #include -#include -#include #include #include @@ -72,8 +70,6 @@ pktsched_init(void) /* NOTREACHED */ } - tcq_init(); - qfq_init(); netem_init(); } @@ -84,7 +80,7 @@ init_machclk(void) * Initialize machclk_freq using the timerbase frequency * value from device specific info. */ - machclk_freq = gPEClockFrequencyInfo.timebase_frequency_hz; + machclk_freq = (uint32_t)gPEClockFrequencyInfo.timebase_frequency_hz; clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &machclk_per_sec); @@ -131,33 +127,14 @@ pktsched_setup(struct ifclassq *ifq, u_int32_t scheduler, u_int32_t sflags, rflags = (ifq->ifcq_flags & IFCQF_ENABLED); if (ifq->ifcq_type != PKTSCHEDT_NONE) { - (void) pktsched_teardown(ifq); + pktsched_teardown(ifq); /* Teardown should have succeeded */ VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); VERIFY(ifq->ifcq_disc == NULL); - VERIFY(ifq->ifcq_enqueue == NULL); - VERIFY(ifq->ifcq_dequeue == NULL); - VERIFY(ifq->ifcq_dequeue_sc == NULL); - VERIFY(ifq->ifcq_request == NULL); - } - - switch (scheduler) { - case PKTSCHEDT_TCQ: - error = tcq_setup_ifclassq(ifq, sflags, ptype); - break; - - case PKTSCHEDT_QFQ: - error = qfq_setup_ifclassq(ifq, sflags, ptype); - break; - case PKTSCHEDT_FQ_CODEL: - error = fq_if_setup_ifclassq(ifq, sflags, ptype); - break; - default: - error = ENXIO; - break; } + error = fq_if_setup_ifclassq(ifq, sflags, ptype); if (error == 0) { ifq->ifcq_flags |= rflags; } @@ -165,11 +142,9 @@ pktsched_setup(struct ifclassq *ifq, u_int32_t scheduler, u_int32_t sflags, return error; } -int +void pktsched_teardown(struct ifclassq *ifq) { - int error = 0; - IFCQ_LOCK_ASSERT_HELD(ifq); if_qflush(ifq->ifcq_ifp, 1); @@ -177,51 +152,25 @@ pktsched_teardown(struct ifclassq *ifq) ifq->ifcq_flags &= ~IFCQF_ENABLED; - switch (ifq->ifcq_type) { - case PKTSCHEDT_NONE: - break; - - case PKTSCHEDT_TCQ: - error = tcq_teardown_ifclassq(ifq); - break; - - case PKTSCHEDT_QFQ: - error = qfq_teardown_ifclassq(ifq); - break; - - case PKTSCHEDT_FQ_CODEL: - error = fq_if_teardown_ifclassq(ifq); - break; - default: - error = ENXIO; - break; + if (ifq->ifcq_type == PKTSCHEDT_FQ_CODEL) { + /* Could be PKTSCHEDT_NONE */ + fq_if_teardown_ifclassq(ifq); } - return error; + + return; } int pktsched_getqstats(struct ifclassq *ifq, u_int32_t qid, struct if_ifclassq_stats *ifqs) { - int error; + int error = 0; IFCQ_LOCK_ASSERT_HELD(ifq); - switch (ifq->ifcq_type) { - case PKTSCHEDT_TCQ: - error = tcq_getqstats_ifclassq(ifq, qid, ifqs); - break; - - case PKTSCHEDT_QFQ: - error = qfq_getqstats_ifclassq(ifq, qid, ifqs); - break; - - case PKTSCHEDT_FQ_CODEL: + if (ifq->ifcq_type == PKTSCHEDT_FQ_CODEL) { + /* Could be PKTSCHEDT_NONE */ error = fq_if_getqstats_ifclassq(ifq, qid, ifqs); - break; - default: - error = ENXIO; - break; } return error; @@ -231,6 +180,8 @@ void pktsched_pkt_encap(pktsched_pkt_t *pkt, classq_pkt_t *cpkt) { pkt->pktsched_pkt = *cpkt; + pkt->pktsched_tail = *cpkt; + pkt->pktsched_pcnt = 1; switch (cpkt->cp_ptype) { case QP_MBUF: @@ -246,6 +197,27 @@ pktsched_pkt_encap(pktsched_pkt_t *pkt, classq_pkt_t *cpkt) } } +void +pktsched_pkt_encap_chain(pktsched_pkt_t *pkt, classq_pkt_t *cpkt, + classq_pkt_t *tail, uint32_t cnt, uint32_t bytes) +{ + pkt->pktsched_pkt = *cpkt; + pkt->pktsched_tail = *tail; + pkt->pktsched_pcnt = cnt; + pkt->pktsched_plen = bytes; + + switch (cpkt->cp_ptype) { + case QP_MBUF: + break; + + + default: + VERIFY(0); + /* NOTREACHED */ + __builtin_unreachable(); + } +} + int pktsched_clone_pkt(pktsched_pkt_t *pkt1, pktsched_pkt_t *pkt2) { @@ -253,6 +225,8 @@ pktsched_clone_pkt(pktsched_pkt_t *pkt1, pktsched_pkt_t *pkt2) ASSERT(pkt1 != NULL); ASSERT(pkt1->pktsched_pkt_mbuf != NULL); + ASSERT(pkt1->pktsched_pcnt == 1); + /* allow in place clone, but make sure pkt2->pktsched_pkt won't leak */ ASSERT((pkt1 == pkt2 && pkt1->pktsched_pkt_mbuf == pkt2->pktsched_pkt_mbuf) || (pkt1 != pkt2 && @@ -277,6 +251,8 @@ pktsched_clone_pkt(pktsched_pkt_t *pkt1, pktsched_pkt_t *pkt2) pkt2->pktsched_plen = pkt1->pktsched_plen; pkt2->pktsched_ptype = pkt1->pktsched_ptype; + pkt2->pktsched_tail = pkt2->pktsched_pkt; + pkt2->pktsched_pcnt = 1; return 0; } @@ -310,20 +286,32 @@ pktsched_corrupt_packet(pktsched_pkt_t *pkt) void pktsched_free_pkt(pktsched_pkt_t *pkt) { + uint32_t cnt = pkt->pktsched_pcnt; + ASSERT(cnt != 0); + switch (pkt->pktsched_ptype) { - case QP_MBUF: - m_freem(pkt->pktsched_pkt_mbuf); - break; + case QP_MBUF: { + struct mbuf *m; + m = pkt->pktsched_pkt_mbuf; + if (cnt == 1) { + VERIFY(m->m_nextpkt == NULL); + } else { + VERIFY(m->m_nextpkt != NULL); + } + m_freem_list(m); + break; + } default: VERIFY(0); /* NOTREACHED */ __builtin_unreachable(); } - pkt->pktsched_pkt = CLASSQ_PKT_INITIALIZER(pkt->pktsched_pkt); + pkt->pktsched_tail = CLASSQ_PKT_INITIALIZER(pkt->pktsched_tail); pkt->pktsched_plen = 0; + pkt->pktsched_pcnt = 0; } mbuf_svc_class_t @@ -349,7 +337,7 @@ pktsched_get_pkt_svc(pktsched_pkt_t *pkt) void pktsched_get_pkt_vars(pktsched_pkt_t *pkt, volatile uint32_t **flags, uint64_t **timestamp, uint32_t *flowid, uint8_t *flowsrc, uint8_t *proto, - uint32_t *tcp_start_seq) + uint32_t *comp_gencnt) { switch (pkt->pktsched_ptype) { case QP_MBUF: { @@ -370,12 +358,8 @@ pktsched_get_pkt_vars(pktsched_pkt_t *pkt, volatile uint32_t **flags, if (proto != NULL) { *proto = pkth->pkt_proto; } - /* - * caller should use this value only if PKTF_START_SEQ - * is set in the mbuf packet flags - */ - if (tcp_start_seq != NULL) { - *tcp_start_seq = pkth->tx_start_seq; + if (comp_gencnt != NULL) { + *comp_gencnt = pkth->comp_gencnt; } break; diff --git a/bsd/net/pktsched/pktsched.h b/bsd/net/pktsched/pktsched.h index 624e2e58d..904f9ef70 100644 --- a/bsd/net/pktsched/pktsched.h +++ b/bsd/net/pktsched/pktsched.h @@ -59,17 +59,25 @@ extern "C" { typedef struct _pktsched_pkt_ { classq_pkt_t __pkt; + classq_pkt_t __tail; uint32_t __plen; + uint32_t __pcnt; #define pktsched_ptype __pkt.cp_ptype #define pktsched_plen __plen +#define pktsched_pcnt __pcnt #define pktsched_pkt __pkt #define pktsched_pkt_mbuf __pkt.cp_mbuf #define pktsched_pkt_kpkt __pkt.cp_kpkt +#define pktsched_tail __tail +#define pktsched_tail_mbuf __tail.cp_mbuf +#define pktsched_tail_kpkt __tail.cp_kpkt } pktsched_pkt_t; #define _PKTSCHED_PKT_INIT(_p) do { \ (_p)->pktsched_pkt = CLASSQ_PKT_INITIALIZER((_p)->pktsched_pkt);\ + (_p)->pktsched_tail = CLASSQ_PKT_INITIALIZER((_p)->pktsched_tail);\ (_p)->pktsched_plen = 0; \ + (_p)->pktsched_pcnt = 0; \ } while (0) /* macro for timeout/untimeout */ @@ -103,7 +111,7 @@ typedef u_int32_t pktsched_bitmap_t; static inline boolean_t pktsched_bit_tst(u_int32_t ix, pktsched_bitmap_t *pData) { - return *pData & (1 << ix); + return (boolean_t)(*pData & (1 << ix)); } static inline void @@ -121,13 +129,13 @@ pktsched_bit_clr(u_int32_t ix, pktsched_bitmap_t *pData) static inline pktsched_bitmap_t pktsched_ffs(pktsched_bitmap_t pData) { - return ffs(pData); + return (pktsched_bitmap_t)ffs(pData); } static inline pktsched_bitmap_t pktsched_fls(pktsched_bitmap_t pData) { - return (sizeof(pktsched_bitmap_t) << 3) - clz(pData); + return (pktsched_bitmap_t)((sizeof(pktsched_bitmap_t) << 3) - (unsigned long)clz(pData)); } static inline pktsched_bitmap_t @@ -153,9 +161,9 @@ pktsched_get_pkt_len(pktsched_pkt_t *pkt) * machine dependent clock * a 64bit high resolution time counter. */ -extern u_int32_t machclk_freq; -extern u_int64_t machclk_per_sec; -extern u_int32_t pktsched_verbose; +extern uint32_t machclk_freq; +extern uint64_t machclk_per_sec; +extern uint32_t pktsched_verbose; SYSCTL_DECL(_net_pktsched); @@ -164,7 +172,7 @@ struct if_ifclassq_stats; extern void pktsched_init(void); extern int pktsched_setup(struct ifclassq *, u_int32_t, u_int32_t, classq_pkt_type_t); -extern int pktsched_teardown(struct ifclassq *); +extern void pktsched_teardown(struct ifclassq *); extern int pktsched_getqstats(struct ifclassq *, u_int32_t, struct if_ifclassq_stats *); extern u_int64_t pktsched_abs_to_nsecs(u_int64_t); @@ -176,6 +184,8 @@ extern void pktsched_get_pkt_vars(pktsched_pkt_t *, volatile uint32_t **, uint64_t **, uint32_t *, uint8_t *, uint8_t *, uint32_t *); extern uint32_t *pktsched_get_pkt_sfb_vars(pktsched_pkt_t *, uint32_t **); extern void pktsched_pkt_encap(pktsched_pkt_t *, classq_pkt_t *); +extern void pktsched_pkt_encap_chain(pktsched_pkt_t *, classq_pkt_t *, + classq_pkt_t *, uint32_t, uint32_t); extern mbuf_svc_class_t pktsched_get_pkt_svc(pktsched_pkt_t *); extern struct flowadv_fcentry *pktsched_alloc_fcentry(pktsched_pkt_t *, struct ifnet *, int); diff --git a/bsd/net/pktsched/pktsched_fq_codel.c b/bsd/net/pktsched/pktsched_fq_codel.c index e523e8096..b34473e8b 100644 --- a/bsd/net/pktsched/pktsched_fq_codel.c +++ b/bsd/net/pktsched/pktsched_fq_codel.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019 Apple Inc. All rights reserved. + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -35,26 +35,15 @@ #include #include -static size_t fq_if_size; -static struct zone *fq_if_zone; +static ZONE_DECLARE(fq_if_zone, "pktsched_fq_if", sizeof(fq_if_t), ZC_ZFREE_CLEARMEM); static fq_if_t *fq_if_alloc(struct ifnet *, classq_pkt_type_t); static void fq_if_destroy(fq_if_t *fqs); -static void fq_if_classq_init(fq_if_t *fqs, u_int32_t priority, - u_int32_t quantum, u_int32_t drr_max, u_int32_t svc_class); -static int fq_if_enqueue_classq(struct ifclassq *, classq_pkt_t *, boolean_t *); -static void fq_if_dequeue_classq(struct ifclassq *, classq_pkt_t *); -static int fq_if_dequeue_classq_multi(struct ifclassq *, u_int32_t, - u_int32_t, classq_pkt_t *, classq_pkt_t *, u_int32_t *, u_int32_t *); -static void fq_if_dequeue_sc_classq(struct ifclassq *, mbuf_svc_class_t, - classq_pkt_t *); -static int fq_if_dequeue_sc_classq_multi(struct ifclassq *, - mbuf_svc_class_t, u_int32_t, u_int32_t, classq_pkt_t *, - classq_pkt_t *, u_int32_t *, u_int32_t *); -static void fq_if_dequeue(fq_if_t *, fq_if_classq_t *, u_int32_t, - u_int32_t, classq_pkt_t *, classq_pkt_t *, u_int32_t *, - u_int32_t *, boolean_t drvmgmt); -static int fq_if_request_classq(struct ifclassq *ifq, cqrq_t op, void *arg); +static void fq_if_classq_init(fq_if_t *fqs, uint32_t priority, + uint16_t quantum, uint32_t drr_max, uint32_t svc_class); +static void fq_if_dequeue(fq_if_t *, fq_if_classq_t *, uint32_t, + int64_t, classq_pkt_t *, classq_pkt_t *, uint32_t *, + uint32_t *, boolean_t drvmgmt); void fq_if_stat_sc(fq_if_t *fqs, cqrq_stat_sc_t *stat); static void fq_if_purge(fq_if_t *); static void fq_if_purge_classq(fq_if_t *, fq_if_classq_t *); @@ -64,9 +53,6 @@ static void fq_if_empty_new_flow(fq_t *fq, fq_if_classq_t *fq_cl, static void fq_if_empty_old_flow(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq, bool remove_hash); -#define FQ_IF_ZONE_MAX 32 /* Maximum elements in zone */ -#define FQ_IF_ZONE_NAME "pktsched_fq_if" /* zone for fq_if class */ - #define FQ_IF_FLOW_HASH_ID(_flowid_) \ (((_flowid_) >> FQ_IF_HASH_TAG_SHIFT) & FQ_IF_HASH_TAG_MASK) @@ -76,7 +62,7 @@ static void fq_if_empty_old_flow(fq_if_t *fqs, fq_if_classq_t *fq_cl, typedef void (* fq_if_append_pkt_t)(classq_pkt_t *, classq_pkt_t *); typedef boolean_t (* fq_getq_flow_t)(fq_if_t *, fq_if_classq_t *, fq_t *, - u_int32_t, u_int32_t, classq_pkt_t *, classq_pkt_t *, u_int32_t *, + int64_t, u_int32_t, classq_pkt_t *, classq_pkt_t *, u_int32_t *, u_int32_t *, boolean_t *, u_int32_t); static void @@ -89,7 +75,7 @@ fq_if_append_mbuf(classq_pkt_t *pkt, classq_pkt_t *next_pkt) static boolean_t fq_getq_flow_mbuf(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq, - u_int32_t byte_limit, u_int32_t pkt_limit, classq_pkt_t *top, + int64_t byte_limit, u_int32_t pkt_limit, classq_pkt_t *top, classq_pkt_t *last, u_int32_t *byte_cnt, u_int32_t *pkt_cnt, boolean_t *qempty, u_int32_t pflags) { @@ -135,33 +121,12 @@ fq_getq_flow_mbuf(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq, return limit_reached; } -void -fq_codel_scheduler_init(void) -{ - /* Initialize the zone for flow queue structures */ - fq_codel_init(); - - fq_if_size = sizeof(fq_if_t); - fq_if_zone = zinit(fq_if_size, (FQ_IF_ZONE_MAX * fq_if_size), 0, - FQ_IF_ZONE_NAME); - if (fq_if_zone == NULL) { - panic("%s: failed allocating from %s", __func__, - (FQ_IF_ZONE_NAME)); - } - zone_change(fq_if_zone, Z_EXPAND, TRUE); - zone_change(fq_if_zone, Z_CALLERACCT, TRUE); -} - fq_if_t * fq_if_alloc(struct ifnet *ifp, classq_pkt_type_t ptype) { fq_if_t *fqs; - fqs = zalloc(fq_if_zone); - if (fqs == NULL) { - return NULL; - } - bzero(fqs, fq_if_size); + fqs = zalloc_flags(fq_if_zone, Z_WAITOK | Z_ZERO); fqs->fqs_ifq = &ifp->if_snd; fqs->fqs_ptype = ptype; @@ -185,10 +150,10 @@ fq_if_destroy(fq_if_t *fqs) zfree(fq_if_zone, fqs); } -static inline u_int32_t +static inline uint8_t fq_if_service_to_priority(fq_if_t *fqs, mbuf_svc_class_t svc) { - u_int32_t pri; + uint8_t pri; if (fqs->fqs_flags & FQS_DRIVER_MANAGED) { switch (svc) { @@ -260,9 +225,9 @@ fq_if_service_to_priority(fq_if_t *fqs, mbuf_svc_class_t svc) return pri; } -void -fq_if_classq_init(fq_if_t *fqs, u_int32_t pri, u_int32_t quantum, - u_int32_t drr_max, u_int32_t svc_class) +static void +fq_if_classq_init(fq_if_t *fqs, uint32_t pri, uint16_t quantum, + uint32_t drr_max, uint32_t svc_class) { fq_if_classq_t *fq_cl; VERIFY(pri < FQ_IF_MAX_CLASSES); @@ -278,24 +243,17 @@ fq_if_classq_init(fq_if_t *fqs, u_int32_t pri, u_int32_t quantum, } int -fq_if_enqueue_classq(struct ifclassq *ifq, classq_pkt_t *p, boolean_t *pdrop) +fq_if_enqueue_classq(struct ifclassq *ifq, classq_pkt_t *head, + classq_pkt_t *tail, uint32_t cnt, uint32_t bytes, boolean_t *pdrop) { - u_int32_t pri; + uint8_t pri; fq_if_t *fqs; fq_if_classq_t *fq_cl; - int ret, len; + int ret; mbuf_svc_class_t svc; pktsched_pkt_t pkt; - IFCQ_LOCK_ASSERT_HELD(ifq); - if ((p->cp_ptype == QP_MBUF) && !(p->cp_mbuf->m_flags & M_PKTHDR)) { - IFCQ_CONVERT_LOCK(ifq); - m_freem(p->cp_mbuf); - *p = CLASSQ_PKT_INITIALIZER(*p); - *pdrop = TRUE; - return ENOBUFS; - } - pktsched_pkt_encap(&pkt, p); + pktsched_pkt_encap_chain(&pkt, head, tail, cnt, bytes); fqs = (fq_if_t *)ifq->ifcq_disc; svc = pktsched_get_pkt_svc(&pkt); @@ -303,16 +261,16 @@ fq_if_enqueue_classq(struct ifclassq *ifq, classq_pkt_t *p, boolean_t *pdrop) VERIFY(pri < FQ_IF_MAX_CLASSES); fq_cl = &fqs->fqs_classq[pri]; - if (svc == MBUF_SC_BK_SYS && fqs->fqs_throttle == 1) { + if (__improbable(svc == MBUF_SC_BK_SYS && fqs->fqs_throttle == 1)) { /* BK_SYS is currently throttled */ - fq_cl->fcl_stat.fcl_throttle_drops++; - IFCQ_CONVERT_LOCK(ifq); + atomic_add_32(&fq_cl->fcl_stat.fcl_throttle_drops, 1); pktsched_free_pkt(&pkt); *pdrop = TRUE; - return EQSUSPENDED; + ret = EQSUSPENDED; + goto done; } - len = pktsched_get_pkt_len(&pkt); + IFCQ_LOCK_SPIN(ifq); ret = fq_addq(fqs, &pkt, fq_cl); if (!(fqs->fqs_flags & FQS_DRIVER_MANAGED) && !FQ_IF_CLASSQ_IDLE(fq_cl)) { @@ -326,54 +284,69 @@ fq_if_enqueue_classq(struct ifclassq *ifq, classq_pkt_t *p, boolean_t *pdrop) } } - if (ret != 0) { + if (__improbable(ret != 0)) { if (ret == CLASSQEQ_SUCCESS_FC) { /* packet enqueued, return advisory feedback */ ret = EQFULL; *pdrop = FALSE; + } else if (ret == CLASSQEQ_COMPRESSED) { + ret = 0; + *pdrop = FALSE; } else { + IFCQ_UNLOCK(ifq); *pdrop = TRUE; - VERIFY(ret == CLASSQEQ_DROP || - ret == CLASSQEQ_DROP_FC || - ret == CLASSQEQ_DROP_SP); pktsched_free_pkt(&pkt); switch (ret) { case CLASSQEQ_DROP: - return ENOBUFS; + ret = ENOBUFS; + goto done; case CLASSQEQ_DROP_FC: - return EQFULL; + ret = EQFULL; + goto done; case CLASSQEQ_DROP_SP: - return EQSUSPENDED; + ret = EQSUSPENDED; + goto done; + default: + VERIFY(0); + /* NOTREACHED */ + __builtin_unreachable(); } + /* NOTREACHED */ + __builtin_unreachable(); } } else { *pdrop = FALSE; } - IFCQ_INC_LEN(ifq); - IFCQ_INC_BYTES(ifq, len); + IFCQ_ADD_LEN(ifq, cnt); + IFCQ_INC_BYTES(ifq, bytes); + IFCQ_UNLOCK(ifq); +done: return ret; } -static void +void fq_if_dequeue_classq(struct ifclassq *ifq, classq_pkt_t *pkt) { (void) fq_if_dequeue_classq_multi(ifq, 1, CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, pkt, NULL, NULL, NULL); } -static void +void fq_if_dequeue_sc_classq(struct ifclassq *ifq, mbuf_svc_class_t svc, classq_pkt_t *pkt) { fq_if_t *fqs = (fq_if_t *)ifq->ifcq_disc; + uint32_t total_pktcnt = 0, total_bytecnt = 0; fq_if_classq_t *fq_cl; - u_int32_t pri; + uint8_t pri; pri = fq_if_service_to_priority(fqs, svc); fq_cl = &fqs->fqs_classq[pri]; fq_if_dequeue(fqs, fq_cl, 1, CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, - pkt, NULL, NULL, NULL, TRUE); + pkt, NULL, &total_pktcnt, &total_bytecnt, TRUE); + + IFCQ_XMIT_ADD(ifq, total_pktcnt, total_bytecnt); } int @@ -514,7 +487,7 @@ fq_if_dequeue_sc_classq_multi(struct ifclassq *ifq, mbuf_svc_class_t svc, classq_pkt_t *last_packet, u_int32_t *retpktcnt, u_int32_t *retbytecnt) { fq_if_t *fqs = (fq_if_t *)ifq->ifcq_disc; - u_int32_t pri; + uint8_t pri; u_int32_t total_pktcnt = 0, total_bytecnt = 0; fq_if_classq_t *fq_cl; classq_pkt_t first = CLASSQ_PKT_INITIALIZER(fisrt); @@ -577,6 +550,8 @@ fq_if_dequeue_sc_classq_multi(struct ifclassq *ifq, mbuf_svc_class_t svc, *retbytecnt = total_bytecnt; } + IFCQ_XMIT_ADD(ifq, total_pktcnt, total_bytecnt); + return 0; } @@ -724,7 +699,7 @@ static int fq_if_throttle(fq_if_t *fqs, cqrq_throttle_t *tr) { struct ifclassq *ifq = fqs->fqs_ifq; - int index; + uint8_t index; #if !MACH_ASSERT #pragma unused(ifq) #endif @@ -757,7 +732,7 @@ fq_if_throttle(fq_if_t *fqs, cqrq_throttle_t *tr) void fq_if_stat_sc(fq_if_t *fqs, cqrq_stat_sc_t *stat) { - u_int32_t pri; + uint8_t pri; fq_if_classq_t *fq_cl; if (stat == NULL) { @@ -766,8 +741,8 @@ fq_if_stat_sc(fq_if_t *fqs, cqrq_stat_sc_t *stat) pri = fq_if_service_to_priority(fqs, stat->sc); fq_cl = &fqs->fqs_classq[pri]; - stat->packets = fq_cl->fcl_stat.fcl_pkt_cnt; - stat->bytes = fq_cl->fcl_stat.fcl_byte_cnt; + stat->packets = (uint32_t)fq_cl->fcl_stat.fcl_pkt_cnt; + stat->bytes = (uint32_t)fq_cl->fcl_stat.fcl_byte_cnt; } int @@ -857,10 +832,7 @@ fq_if_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags, 8, MBUF_SC_CTL); } - err = ifclassq_attach(ifq, PKTSCHEDT_FQ_CODEL, fqs, - fq_if_enqueue_classq, fq_if_dequeue_classq, - fq_if_dequeue_sc_classq, fq_if_dequeue_classq_multi, - fq_if_dequeue_sc_classq_multi, fq_if_request_classq); + err = ifclassq_attach(ifq, PKTSCHEDT_FQ_CODEL, fqs); if (err != 0) { printf("%s: error from ifclassq_attach, " @@ -1096,9 +1068,9 @@ fq_if_flow_feedback(fq_if_t *fqs, fq_t *fq, fq_if_classq_t *fq_cl) } void -fq_if_dequeue(fq_if_t *fqs, fq_if_classq_t *fq_cl, u_int32_t pktlimit, - u_int32_t bytelimit, classq_pkt_t *top, classq_pkt_t *tail, - u_int32_t *retpktcnt, u_int32_t *retbytecnt, boolean_t drvmgmt) +fq_if_dequeue(fq_if_t *fqs, fq_if_classq_t *fq_cl, uint32_t pktlimit, + int64_t bytelimit, classq_pkt_t *top, classq_pkt_t *tail, + uint32_t *retpktcnt, uint32_t *retbytecnt, boolean_t drvmgmt) { fq_t *fq = NULL, *tfq = NULL; flowq_stailq_t temp_stailq; @@ -1123,7 +1095,7 @@ fq_if_dequeue(fq_if_t *fqs, fq_if_classq_t *fq_cl, u_int32_t pktlimit, * maximum byte limit should not be greater than the budget for * this class */ - if ((int32_t)bytelimit > fq_cl->fcl_budget && !drvmgmt) { + if (bytelimit > fq_cl->fcl_budget && !drvmgmt) { bytelimit = fq_cl->fcl_budget; } @@ -1194,7 +1166,7 @@ done: } } -int +void fq_if_teardown_ifclassq(struct ifclassq *ifq) { fq_if_t *fqs = (fq_if_t *)ifq->ifcq_disc; @@ -1204,7 +1176,7 @@ fq_if_teardown_ifclassq(struct ifclassq *ifq) fq_if_destroy(fqs); ifq->ifcq_disc = NULL; - return ifclassq_detach(ifq); + ifclassq_detach(ifq); } static void @@ -1212,7 +1184,7 @@ fq_export_flowstats(fq_if_t *fqs, fq_t *fq, struct fq_codel_flowstats *flowstat) { bzero(flowstat, sizeof(*flowstat)); - flowstat->fqst_min_qdelay = fq->fq_min_qdelay; + flowstat->fqst_min_qdelay = (uint32_t)fq->fq_min_qdelay; flowstat->fqst_bytes = fq->fq_bytes; flowstat->fqst_flowhash = fq->fq_flowhash; if (fq->fq_flags & FQF_NEW_FLOW) { @@ -1277,6 +1249,8 @@ fq_if_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t qid, fcls->fcls_throttle_off = fq_cl->fcl_stat.fcl_throttle_off; fcls->fcls_throttle_drops = fq_cl->fcl_stat.fcl_throttle_drops; fcls->fcls_dup_rexmts = fq_cl->fcl_stat.fcl_dup_rexmts; + fcls->fcls_pkts_compressible = fq_cl->fcl_stat.fcl_pkts_compressible; + fcls->fcls_pkts_compressed = fq_cl->fcl_stat.fcl_pkts_compressed; /* Gather per flow stats */ flowstat_cnt = min((fcls->fcls_newflows_cnt + diff --git a/bsd/net/pktsched/pktsched_fq_codel.h b/bsd/net/pktsched/pktsched_fq_codel.h index 7d55fa5bf..ce05193bc 100644 --- a/bsd/net/pktsched/pktsched_fq_codel.h +++ b/bsd/net/pktsched/pktsched_fq_codel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Apple Inc. All rights reserved. + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -35,6 +35,7 @@ #ifdef BSD_KERNEL_PRIVATE #include +#include #endif /* BSD_KERNEL_PRIVATE */ #ifdef __cplusplus @@ -61,6 +62,8 @@ struct fcl_stat { u_int32_t fcl_throttle_off; u_int32_t fcl_throttle_drops; u_int32_t fcl_dup_rexmts; + u_int32_t fcl_pkts_compressible; + u_int32_t fcl_pkts_compressed; }; /* @@ -78,6 +81,8 @@ struct fcl_stat { /* Max number of service classes currently supported */ #define FQ_IF_MAX_CLASSES 10 +_Static_assert(FQ_IF_MAX_CLASSES < 127, + "maximum number of classes needs to fit in a single byte"); #define FQ_IF_LARGE_FLOW_BYTE_LIMIT 15000 @@ -112,10 +117,10 @@ enum fq_if_state { typedef SLIST_HEAD(, flowq) flowq_list_t; typedef STAILQ_HEAD(, flowq) flowq_stailq_t; typedef struct fq_if_classq { - u_int32_t fcl_pri; /* class priority, lower the better */ - u_int32_t fcl_service_class; /* service class */ - u_int32_t fcl_quantum; /* quantum in bytes */ - u_int32_t fcl_drr_max; /* max flows per class for DRR */ + uint32_t fcl_pri; /* class priority, lower the better */ + uint32_t fcl_service_class; /* service class */ + uint16_t fcl_quantum; /* quantum in bytes */ + uint32_t fcl_drr_max; /* max flows per class for DRR */ int64_t fcl_budget; /* budget for this classq */ flowq_stailq_t fcl_new_flows; /* List of new flows */ flowq_stailq_t fcl_old_flows; /* List of old flows */ @@ -182,11 +187,26 @@ struct fq_codel_classstats { u_int32_t fcls_dup_rexmts; u_int32_t fcls_flowstats_cnt; struct fq_codel_flowstats fcls_flowstats[FQ_IF_MAX_FLOWSTATS]; + u_int32_t fcls_pkts_compressible; + u_int32_t fcls_pkts_compressed; }; #ifdef BSD_KERNEL_PRIVATE extern void fq_codel_scheduler_init(void); +extern int fq_if_enqueue_classq(struct ifclassq *ifq, classq_pkt_t *h, + classq_pkt_t *t, uint32_t cnt, uint32_t bytes, boolean_t *pdrop); +extern void fq_if_dequeue_classq(struct ifclassq *ifq, classq_pkt_t *pkt); +extern void fq_if_dequeue_sc_classq(struct ifclassq *ifq, mbuf_svc_class_t svc, + classq_pkt_t *pkt); +extern int fq_if_dequeue_classq_multi(struct ifclassq *ifq, u_int32_t maxpktcnt, + u_int32_t maxbytecnt, classq_pkt_t *first_packet, classq_pkt_t *last_packet, + u_int32_t *retpktcnt, u_int32_t *retbytecnt); +extern int fq_if_dequeue_sc_classq_multi(struct ifclassq *ifq, + mbuf_svc_class_t svc, u_int32_t maxpktcnt, u_int32_t maxbytecnt, + classq_pkt_t *first_packet, classq_pkt_t *last_packet, u_int32_t *retpktcnt, + u_int32_t *retbytecnt); +extern int fq_if_request_classq(struct ifclassq *ifq, cqrq_t rq, void *arg); extern struct flowq *fq_if_hash_pkt(fq_if_t *, u_int32_t, mbuf_svc_class_t, u_int64_t, boolean_t, classq_pkt_type_t); extern boolean_t fq_if_at_drop_limit(fq_if_t *); @@ -197,7 +217,7 @@ extern boolean_t fq_if_add_fcentry(fq_if_t *, pktsched_pkt_t *, uint32_t, extern void fq_if_flow_feedback(fq_if_t *, struct flowq *, fq_if_classq_t *); extern int fq_if_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags, classq_pkt_type_t ptype); -extern int fq_if_teardown_ifclassq(struct ifclassq *ifq); +extern void fq_if_teardown_ifclassq(struct ifclassq *ifq); extern int fq_if_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t qid, struct if_ifclassq_stats *ifqs); extern void fq_if_destroy_flow(fq_if_t *, fq_if_classq_t *, diff --git a/bsd/net/pktsched/pktsched_netem.c b/bsd/net/pktsched/pktsched_netem.c index 0cbc0fc37..a10e2f0d4 100644 --- a/bsd/net/pktsched/pktsched_netem.c +++ b/bsd/net/pktsched/pktsched_netem.c @@ -596,12 +596,12 @@ struct heap_elem { }; struct heap { - uint32_t limit; /* max size */ - uint32_t size; /* current size */ + uint64_t limit; /* max size */ + uint64_t size; /* current size */ struct heap_elem p[0]; }; -static struct heap *heap_create(uint32_t size); +static struct heap *heap_create(uint64_t size); static int heap_insert(struct heap *h, uint64_t k, pktsched_pkt_t *p); static int heap_peek(struct heap *h, uint64_t *k, pktsched_pkt_t *p); static int heap_extract(struct heap *h, uint64_t *k, pktsched_pkt_t *p); @@ -690,7 +690,7 @@ struct netem { lck_mtx_unlock(&(_sch)->netem_lock) static struct heap * -heap_create(uint32_t limit) +heap_create(uint64_t limit) { struct heap *h = NULL; @@ -726,10 +726,10 @@ heap_insert(struct heap *h, uint64_t key, pktsched_pkt_t *pkt) ASSERT(h != NULL); if (h->size == h->limit) { - return ENOMEM; + return ENOBUFS; } - uint32_t child, parent; + uint64_t child, parent; if (pkt == NULL) { child = key; ASSERT(child < h->size); @@ -768,7 +768,7 @@ heap_peek(struct heap *h, uint64_t *key, pktsched_pkt_t *pkt) static int heap_extract(struct heap *h, uint64_t *key, pktsched_pkt_t *pkt) { - uint32_t child, parent, max; + uint64_t child, parent, max; if (h->size == 0) { netem_log(NETEM_LOG_ERROR, "warning: extract from empty heap"); @@ -1117,20 +1117,6 @@ done: return ret; } -int -netem_dequeue(struct netem *ne, pktsched_pkt_t *p, - boolean_t *ppending) -{ - int ret; - - NETEM_MTX_LOCK(ne); - netem_update_locked(ne); - ret = netem_dequeue_internal_locked(ne, p, ppending); - NETEM_MTX_UNLOCK(ne); - - return ret; -} - __attribute__((noreturn)) static void netem_output_thread_cont(void *v, wait_result_t w) @@ -1427,17 +1413,17 @@ netem_set_params(struct netem *ne, const struct if_netem_params *p) struct reordering *r = &ne->netem_reordering_model; r->reordering_p = p->ifnetem_reordering_p; - netem_log(NETEM_LOG_INFO, "success: bandwidth %d bps", tb->rate); - netem_log(NETEM_LOG_INFO, "success: corruption %d\%", + netem_log(NETEM_LOG_INFO, "success: bandwidth %llu bps", tb->rate); + netem_log(NETEM_LOG_INFO, "success: corruption %d%% ", corr->corruption_p); - netem_log(NETEM_LOG_INFO, "success: duplication %d\%", + netem_log(NETEM_LOG_INFO, "success: duplication %d%%", dup->duplication_p); netem_log(NETEM_LOG_INFO, "success: latency_ms %d jitter_ms %d", late->latency_ms, late->jitter_ms); netem_log(NETEM_LOG_INFO, "changed loss p_gr_gl %d p_gr_bl %d " "p_bl_gr %d p_bl_br %d p_br_bl %d", loss->p_gr_gl, loss->p_gr_bl, loss->p_bl_gr, loss->p_bl_br, loss->p_br_bl); - netem_log(NETEM_LOG_DEBUG, "success: reordering %d\%", + netem_log(NETEM_LOG_DEBUG, "success: reordering %d%%", r->reordering_p); NETEM_MTX_UNLOCK(ne); @@ -1574,12 +1560,4 @@ netem_enqueue(struct netem *ne, classq_pkt_t *p, boolean_t *pdrop) panic("unexpected netem call"); return 0; } - -int -netem_dequeue(struct netem *ne, pktsched_pkt_t *p, boolean_t *ppending) -{ -#pragma unused(ne, p, ppending) - panic("unexpected netem call"); - return 0; -} #endif /* !CONFIG_NETEM */ diff --git a/bsd/net/pktsched/pktsched_netem.h b/bsd/net/pktsched/pktsched_netem.h index bcd8b5a89..b347e5ea7 100644 --- a/bsd/net/pktsched/pktsched_netem.h +++ b/bsd/net/pktsched/pktsched_netem.h @@ -40,7 +40,5 @@ extern int netem_config(struct netem **ne, const char *name, extern void netem_get_params(struct netem *ne, struct if_netem_params *p); extern void netem_destroy(struct netem *ne); extern int netem_enqueue(struct netem *ne, classq_pkt_t *p, boolean_t *pdrop); -extern int netem_dequeue(struct netem *ne, pktsched_pkt_t *p, - boolean_t *ppending); __END_DECLS diff --git a/bsd/net/pktsched/pktsched_qfq.c b/bsd/net/pktsched/pktsched_qfq.c deleted file mode 100644 index 7ed8559a2..000000000 --- a/bsd/net/pktsched/pktsched_qfq.c +++ /dev/null @@ -1,1968 +0,0 @@ -/* - * Copyright (c) 2011-2019 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -/* - * Copyright (c) 2010 Fabio Checconi, Luigi Rizzo, Paolo Valente - * All rights reserved - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * Quick Fair Queueing is described in - * "QFQ: Efficient Packet Scheduling with Tight Bandwidth Distribution - * Guarantees" by Fabio Checconi, Paolo Valente, and Luigi Rizzo. - * - * This code is ported from the dummynet(4) QFQ implementation. - * See also http://info.iet.unipi.it/~luigi/qfq/ - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -#include -#include - - -/* - * function prototypes - */ -static int qfq_enqueue_ifclassq(struct ifclassq *, classq_pkt_t *, boolean_t *); -static void qfq_dequeue_ifclassq(struct ifclassq *, classq_pkt_t *); -static int qfq_request_ifclassq(struct ifclassq *, cqrq_t, void *); -static int qfq_clear_interface(struct qfq_if *); -static struct qfq_class *qfq_class_create(struct qfq_if *, u_int32_t, - u_int32_t, u_int32_t, u_int32_t, u_int32_t, classq_pkt_type_t); -static int qfq_class_destroy(struct qfq_if *, struct qfq_class *); -static int qfq_destroy_locked(struct qfq_if *); -static inline int qfq_addq(struct qfq_class *, pktsched_pkt_t *, - struct pf_mtag *); -static inline void qfq_getq(struct qfq_class *, pktsched_pkt_t *); -static void qfq_purgeq(struct qfq_if *, struct qfq_class *, u_int32_t, - u_int32_t *, u_int32_t *); -static void qfq_purge_sc(struct qfq_if *, cqrq_purge_sc_t *); -static void qfq_updateq(struct qfq_if *, struct qfq_class *, cqev_t); -static int qfq_throttle(struct qfq_if *, cqrq_throttle_t *); -static int qfq_resumeq(struct qfq_if *, struct qfq_class *); -static int qfq_suspendq(struct qfq_if *, struct qfq_class *); -static int qfq_stat_sc(struct qfq_if *, cqrq_stat_sc_t *); -static inline struct qfq_class *qfq_clh_to_clp(struct qfq_if *, u_int32_t); -static const char *qfq_style(struct qfq_if *); - -static inline int qfq_gt(u_int64_t, u_int64_t); -static inline u_int64_t qfq_round_down(u_int64_t, u_int32_t); -static inline struct qfq_group *qfq_ffs(struct qfq_if *, pktsched_bitmap_t); -static int qfq_calc_index(struct qfq_class *, u_int32_t, u_int32_t); -static inline pktsched_bitmap_t mask_from(pktsched_bitmap_t, int); -static inline u_int32_t qfq_calc_state(struct qfq_if *, struct qfq_group *); -static inline void qfq_move_groups(struct qfq_if *, pktsched_bitmap_t, - int, int); -static inline void qfq_unblock_groups(struct qfq_if *, int, u_int64_t); -static inline void qfq_make_eligible(struct qfq_if *, u_int64_t); -static inline void qfq_slot_insert(struct qfq_if *, struct qfq_group *, - struct qfq_class *, u_int64_t); -static inline void qfq_front_slot_remove(struct qfq_group *); -static inline struct qfq_class *qfq_slot_scan(struct qfq_if *, - struct qfq_group *); -static inline void qfq_slot_rotate(struct qfq_if *, struct qfq_group *, - u_int64_t); -static inline void qfq_update_eligible(struct qfq_if *, u_int64_t); -static inline int qfq_update_class(struct qfq_if *, struct qfq_group *, - struct qfq_class *); -static inline void qfq_update_start(struct qfq_if *, struct qfq_class *); -static inline void qfq_slot_remove(struct qfq_if *, struct qfq_group *, - struct qfq_class *); -static void qfq_deactivate_class(struct qfq_if *, struct qfq_class *); -static const char *qfq_state2str(int); -#if QFQ_DEBUG -static void qfq_dump_groups(struct qfq_if *, u_int32_t); -static void qfq_dump_sched(struct qfq_if *, const char *); -#endif /* QFQ_DEBUG */ - -#define QFQ_ZONE_MAX 32 /* maximum elements in zone */ -#define QFQ_ZONE_NAME "pktsched_qfq" /* zone name */ - -static unsigned int qfq_size; /* size of zone element */ -static struct zone *qfq_zone; /* zone for qfq */ - -#define QFQ_CL_ZONE_MAX 32 /* maximum elements in zone */ -#define QFQ_CL_ZONE_NAME "pktsched_qfq_cl" /* zone name */ - -static unsigned int qfq_cl_size; /* size of zone element */ -static struct zone *qfq_cl_zone; /* zone for qfq_class */ - -/* - * Maximum number of consecutive slots occupied by backlogged classes - * inside a group. This is approx lmax/lmin + 5. Used when ALTQ is - * available. - * - * XXX check because it poses constraints on MAX_INDEX - */ -#define QFQ_MAX_SLOTS 32 /* default when ALTQ is available */ - -void -qfq_init(void) -{ - qfq_size = sizeof(struct qfq_if); - qfq_zone = zinit(qfq_size, QFQ_ZONE_MAX * qfq_size, - 0, QFQ_ZONE_NAME); - if (qfq_zone == NULL) { - panic("%s: failed allocating %s", __func__, QFQ_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(qfq_zone, Z_EXPAND, TRUE); - zone_change(qfq_zone, Z_CALLERACCT, TRUE); - - qfq_cl_size = sizeof(struct qfq_class); - qfq_cl_zone = zinit(qfq_cl_size, QFQ_CL_ZONE_MAX * qfq_cl_size, - 0, QFQ_CL_ZONE_NAME); - if (qfq_cl_zone == NULL) { - panic("%s: failed allocating %s", __func__, QFQ_CL_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(qfq_cl_zone, Z_EXPAND, TRUE); - zone_change(qfq_cl_zone, Z_CALLERACCT, TRUE); -} - -struct qfq_if * -qfq_alloc(struct ifnet *ifp, int how) -{ - struct qfq_if *qif; - - qif = (how == M_WAITOK) ? zalloc(qfq_zone) : zalloc_noblock(qfq_zone); - if (qif == NULL) { - return NULL; - } - - bzero(qif, qfq_size); - qif->qif_ifq = &ifp->if_snd; - - qif->qif_maxclasses = IFCQ_SC_MAX; - /* - * TODO: adi@apple.com - * - * Ideally I would like to have the following - * but QFQ needs further modifications. - * - * qif->qif_maxslots = IFCQ_SC_MAX; - */ - qif->qif_maxslots = QFQ_MAX_SLOTS; - - if ((qif->qif_class_tbl = _MALLOC(sizeof(struct qfq_class *) * - qif->qif_maxclasses, M_DEVBUF, M_WAITOK | M_ZERO)) == NULL) { - log(LOG_ERR, "%s: %s unable to allocate class table array\n", - if_name(ifp), qfq_style(qif)); - goto error; - } - - if ((qif->qif_groups = _MALLOC(sizeof(struct qfq_group *) * - (QFQ_MAX_INDEX + 1), M_DEVBUF, M_WAITOK | M_ZERO)) == NULL) { - log(LOG_ERR, "%s: %s unable to allocate group array\n", - if_name(ifp), qfq_style(qif)); - goto error; - } - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s scheduler allocated\n", - if_name(ifp), qfq_style(qif)); - } - - return qif; - -error: - if (qif->qif_class_tbl != NULL) { - _FREE(qif->qif_class_tbl, M_DEVBUF); - qif->qif_class_tbl = NULL; - } - if (qif->qif_groups != NULL) { - _FREE(qif->qif_groups, M_DEVBUF); - qif->qif_groups = NULL; - } - zfree(qfq_zone, qif); - - return NULL; -} - -int -qfq_destroy(struct qfq_if *qif) -{ - struct ifclassq *ifq = qif->qif_ifq; - int err; - - IFCQ_LOCK(ifq); - err = qfq_destroy_locked(qif); - IFCQ_UNLOCK(ifq); - - return err; -} - -static int -qfq_destroy_locked(struct qfq_if *qif) -{ - int i; - - IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - - (void) qfq_clear_interface(qif); - - VERIFY(qif->qif_class_tbl != NULL); - _FREE(qif->qif_class_tbl, M_DEVBUF); - qif->qif_class_tbl = NULL; - - VERIFY(qif->qif_groups != NULL); - for (i = 0; i <= QFQ_MAX_INDEX; i++) { - struct qfq_group *grp = qif->qif_groups[i]; - - if (grp != NULL) { - VERIFY(grp->qfg_slots != NULL); - _FREE(grp->qfg_slots, M_DEVBUF); - grp->qfg_slots = NULL; - _FREE(grp, M_DEVBUF); - qif->qif_groups[i] = NULL; - } - } - _FREE(qif->qif_groups, M_DEVBUF); - qif->qif_groups = NULL; - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s scheduler destroyed\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif)); - } - - zfree(qfq_zone, qif); - - return 0; -} - -/* - * bring the interface back to the initial state by discarding - * all the filters and classes. - */ -static int -qfq_clear_interface(struct qfq_if *qif) -{ - struct qfq_class *cl; - int i; - - IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - - /* clear out the classes */ - for (i = 0; i < qif->qif_maxclasses; i++) { - if ((cl = qif->qif_class_tbl[i]) != NULL) { - qfq_class_destroy(qif, cl); - } - } - - return 0; -} - -/* discard all the queued packets on the interface */ -void -qfq_purge(struct qfq_if *qif) -{ - struct qfq_class *cl; - int i; - - IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - - for (i = 0; i < qif->qif_maxclasses; i++) { - if ((cl = qif->qif_class_tbl[i]) != NULL) { - qfq_purgeq(qif, cl, 0, NULL, NULL); - } - } - VERIFY(IFCQ_LEN(qif->qif_ifq) == 0); -} - -static void -qfq_purge_sc(struct qfq_if *qif, cqrq_purge_sc_t *pr) -{ - struct ifclassq *ifq = qif->qif_ifq; - u_int32_t i; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - VERIFY(pr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(pr->sc)); - VERIFY(pr->flow != 0); - - if (pr->sc != MBUF_SC_UNSPEC) { - i = MBUF_SCIDX(pr->sc); - VERIFY(i < IFCQ_SC_MAX); - - qfq_purgeq(qif, ifq->ifcq_disc_slots[i].cl, - pr->flow, &pr->packets, &pr->bytes); - } else { - u_int32_t cnt, len; - - pr->packets = 0; - pr->bytes = 0; - - for (i = 0; i < IFCQ_SC_MAX; i++) { - qfq_purgeq(qif, ifq->ifcq_disc_slots[i].cl, - pr->flow, &cnt, &len); - pr->packets += cnt; - pr->bytes += len; - } - } -} - -void -qfq_event(struct qfq_if *qif, cqev_t ev) -{ - struct qfq_class *cl; - int i; - - IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - - for (i = 0; i < qif->qif_maxclasses; i++) { - if ((cl = qif->qif_class_tbl[i]) != NULL) { - qfq_updateq(qif, cl, ev); - } - } -} - -int -qfq_add_queue(struct qfq_if *qif, u_int32_t qlimit, u_int32_t weight, - u_int32_t maxsz, u_int32_t flags, u_int32_t qid, struct qfq_class **clp, - classq_pkt_type_t ptype) -{ - struct qfq_class *cl; - u_int32_t w; - - IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - - if (qfq_clh_to_clp(qif, qid) != NULL) { - return EBUSY; - } - - /* check parameters */ - if (weight == 0 || weight > QFQ_MAX_WEIGHT) { - return EINVAL; - } - - w = (QFQ_ONE_FP / (QFQ_ONE_FP / weight)); - if (qif->qif_wsum + w > QFQ_MAX_WSUM) { - return EINVAL; - } - - if (maxsz == 0 || maxsz > (1 << QFQ_MTU_SHIFT)) { - return EINVAL; - } - - cl = qfq_class_create(qif, weight, qlimit, flags, maxsz, qid, ptype); - if (cl == NULL) { - return ENOMEM; - } - - if (clp != NULL) { - *clp = cl; - } - - return 0; -} - -static struct qfq_class * -qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit, - u_int32_t flags, u_int32_t maxsz, u_int32_t qid, classq_pkt_type_t ptype) -{ - struct ifnet *ifp; - struct ifclassq *ifq; - struct qfq_group *grp; - struct qfq_class *cl; - u_int32_t w; /* approximated weight */ - int i; - - IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - - if (qif->qif_classes >= qif->qif_maxclasses) { - log(LOG_ERR, "%s: %s out of classes! (max %d)\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), - qif->qif_maxclasses); - return NULL; - } - - ifq = qif->qif_ifq; - ifp = QFQIF_IFP(qif); - - cl = zalloc(qfq_cl_zone); - if (cl == NULL) { - return NULL; - } - - bzero(cl, qfq_cl_size); - - if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) { - qlimit = IFCQ_MAXLEN(ifq); - if (qlimit == 0) { - qlimit = DEFAULT_QLIMIT; /* use default */ - } - } - _qinit(&cl->cl_q, Q_DROPTAIL, qlimit, ptype); - cl->cl_qif = qif; - cl->cl_flags = flags; - cl->cl_handle = qid; - - /* - * Find a free slot in the class table. If the slot matching - * the lower bits of qid is free, use this slot. Otherwise, - * use the first free slot. - */ - i = qid % qif->qif_maxclasses; - if (qif->qif_class_tbl[i] == NULL) { - qif->qif_class_tbl[i] = cl; - } else { - for (i = 0; i < qif->qif_maxclasses; i++) { - if (qif->qif_class_tbl[i] == NULL) { - qif->qif_class_tbl[i] = cl; - break; - } - } - if (i == qif->qif_maxclasses) { - zfree(qfq_cl_zone, cl); - return NULL; - } - } - - w = weight; - VERIFY(w > 0 && w <= QFQ_MAX_WEIGHT); - cl->cl_lmax = maxsz; - cl->cl_inv_w = (QFQ_ONE_FP / w); - w = (QFQ_ONE_FP / cl->cl_inv_w); - VERIFY(qif->qif_wsum + w <= QFQ_MAX_WSUM); - - i = qfq_calc_index(cl, cl->cl_inv_w, cl->cl_lmax); - VERIFY(i <= QFQ_MAX_INDEX); - grp = qif->qif_groups[i]; - if (grp == NULL) { - grp = _MALLOC(sizeof(*grp), M_DEVBUF, M_WAITOK | M_ZERO); - if (grp != NULL) { - grp->qfg_index = i; - grp->qfg_slot_shift = - QFQ_MTU_SHIFT + QFQ_FRAC_BITS - (QFQ_MAX_INDEX - i); - grp->qfg_slots = _MALLOC(sizeof(struct qfq_class *) * - qif->qif_maxslots, M_DEVBUF, M_WAITOK | M_ZERO); - if (grp->qfg_slots == NULL) { - log(LOG_ERR, "%s: %s unable to allocate group " - "slots for index %d\n", if_name(ifp), - qfq_style(qif), i); - } - } else { - log(LOG_ERR, "%s: %s unable to allocate group for " - "qid=%d\n", if_name(ifp), qfq_style(qif), - cl->cl_handle); - } - if (grp == NULL || grp->qfg_slots == NULL) { - qif->qif_class_tbl[qid % qif->qif_maxclasses] = NULL; - if (grp != NULL) { - _FREE(grp, M_DEVBUF); - } - zfree(qfq_cl_zone, cl); - return NULL; - } else { - qif->qif_groups[i] = grp; - } - } - cl->cl_grp = grp; - qif->qif_wsum += w; - /* XXX cl->cl_S = qif->qif_V; ? */ - /* XXX compute qif->qif_i_wsum */ - - qif->qif_classes++; - - if (flags & QFCF_DEFAULTCLASS) { - qif->qif_default = cl; - } - - if (flags & QFCF_SFB) { - cl->cl_qflags = 0; - if (flags & QFCF_ECN) { - cl->cl_qflags |= SFBF_ECN; - } - if (flags & QFCF_FLOWCTL) { - cl->cl_qflags |= SFBF_FLOWCTL; - } - if (flags & QFCF_DELAYBASED) { - cl->cl_qflags |= SFBF_DELAYBASED; - } - if (!(cl->cl_flags & QFCF_LAZY)) { - cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle, - qlimit(&cl->cl_q), cl->cl_qflags); - } - if (cl->cl_sfb != NULL || (cl->cl_flags & QFCF_LAZY)) { - qtype(&cl->cl_q) = Q_SFB; - } - } - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s created qid=%d grp=%d weight=%d " - "qlimit=%d flags=%b\n", if_name(ifp), qfq_style(qif), - cl->cl_handle, cl->cl_grp->qfg_index, weight, qlimit, - flags, QFCF_BITS); - } - - return cl; -} - -int -qfq_remove_queue(struct qfq_if *qif, u_int32_t qid) -{ - struct qfq_class *cl; - - IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - - if ((cl = qfq_clh_to_clp(qif, qid)) == NULL) { - return EINVAL; - } - - return qfq_class_destroy(qif, cl); -} - -static int -qfq_class_destroy(struct qfq_if *qif, struct qfq_class *cl) -{ - struct ifclassq *ifq = qif->qif_ifq; - int i; -#if !MACH_ASSERT -#pragma unused(ifq) -#endif - - IFCQ_LOCK_ASSERT_HELD(ifq); - - qfq_purgeq(qif, cl, 0, NULL, NULL); - - if (cl->cl_inv_w != 0) { - qif->qif_wsum -= (QFQ_ONE_FP / cl->cl_inv_w); - cl->cl_inv_w = 0; /* reset weight to avoid run twice */ - } - - for (i = 0; i < qif->qif_maxclasses; i++) { - if (qif->qif_class_tbl[i] == cl) { - qif->qif_class_tbl[i] = NULL; - break; - } - } - qif->qif_classes--; - - if (cl->cl_qalg.ptr != NULL) { - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - sfb_destroy(cl->cl_sfb); - } - cl->cl_qalg.ptr = NULL; - qtype(&cl->cl_q) = Q_DROPTAIL; - qstate(&cl->cl_q) = QS_RUNNING; - } - - if (qif->qif_default == cl) { - qif->qif_default = NULL; - } - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s destroyed qid=%d\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), cl->cl_handle); - } - - zfree(qfq_cl_zone, cl); - - return 0; -} - -/* - * Calculate a mask to mimic what would be ffs_from() - */ -static inline pktsched_bitmap_t -mask_from(pktsched_bitmap_t bitmap, int from) -{ - return bitmap & ~((1UL << from) - 1); -} - -/* - * The state computation relies on ER=0, IR=1, EB=2, IB=3 - * First compute eligibility comparing grp->qfg_S, qif->qif_V, - * then check if someone is blocking us and possibly add EB - */ -static inline u_int32_t -qfq_calc_state(struct qfq_if *qif, struct qfq_group *grp) -{ - /* if S > V we are not eligible */ - u_int32_t state = qfq_gt(grp->qfg_S, qif->qif_V); - pktsched_bitmap_t mask = mask_from(qif->qif_bitmaps[ER], - grp->qfg_index); - struct qfq_group *next; - - if (mask) { - next = qfq_ffs(qif, mask); - if (qfq_gt(grp->qfg_F, next->qfg_F)) { - state |= EB; - } - } - - return state; -} - -/* - * In principle - * qif->qif_bitmaps[dst] |= qif->qif_bitmaps[src] & mask; - * qif->qif_bitmaps[src] &= ~mask; - * but we should make sure that src != dst - */ -static inline void -qfq_move_groups(struct qfq_if *qif, pktsched_bitmap_t mask, int src, int dst) -{ - qif->qif_bitmaps[dst] |= qif->qif_bitmaps[src] & mask; - qif->qif_bitmaps[src] &= ~mask; -} - -static inline void -qfq_unblock_groups(struct qfq_if *qif, int index, u_int64_t old_finish) -{ - pktsched_bitmap_t mask = mask_from(qif->qif_bitmaps[ER], index + 1); - struct qfq_group *next; - - if (mask) { - next = qfq_ffs(qif, mask); - if (!qfq_gt(next->qfg_F, old_finish)) { - return; - } - } - - mask = (1UL << index) - 1; - qfq_move_groups(qif, mask, EB, ER); - qfq_move_groups(qif, mask, IB, IR); -} - -/* - * perhaps - * - * old_V ^= qif->qif_V; - * old_V >>= QFQ_MIN_SLOT_SHIFT; - * if (old_V) { - * ... - * } - */ -static inline void -qfq_make_eligible(struct qfq_if *qif, u_int64_t old_V) -{ - pktsched_bitmap_t mask, vslot, old_vslot; - - vslot = qif->qif_V >> QFQ_MIN_SLOT_SHIFT; - old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT; - - if (vslot != old_vslot) { - mask = (2UL << (__fls(vslot ^ old_vslot))) - 1; - qfq_move_groups(qif, mask, IR, ER); - qfq_move_groups(qif, mask, IB, EB); - } -} - -/* - * XXX we should make sure that slot becomes less than 32. - * This is guaranteed by the input values. - * roundedS is always cl->qfg_S rounded on grp->qfg_slot_shift bits. - */ -static inline void -qfq_slot_insert(struct qfq_if *qif, struct qfq_group *grp, - struct qfq_class *cl, u_int64_t roundedS) -{ - u_int64_t slot = (roundedS - grp->qfg_S) >> grp->qfg_slot_shift; - u_int32_t i = (grp->qfg_front + slot) % qif->qif_maxslots; - - cl->cl_next = grp->qfg_slots[i]; - grp->qfg_slots[i] = cl; - pktsched_bit_set(slot, &grp->qfg_full_slots); -} - -/* - * remove the entry from the slot - */ -static inline void -qfq_front_slot_remove(struct qfq_group *grp) -{ - struct qfq_class **h = &grp->qfg_slots[grp->qfg_front]; - - *h = (*h)->cl_next; - if (!*h) { - pktsched_bit_clr(0, &grp->qfg_full_slots); - } -} - -/* - * Returns the first full queue in a group. As a side effect, - * adjust the bucket list so the first non-empty bucket is at - * position 0 in qfg_full_slots. - */ -static inline struct qfq_class * -qfq_slot_scan(struct qfq_if *qif, struct qfq_group *grp) -{ - int i; - - if (pktsched_verbose > 2) { - log(LOG_DEBUG, "%s: %s grp=%d full_slots=0x%x\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), grp->qfg_index, - grp->qfg_full_slots); - } - - if (grp->qfg_full_slots == 0) { - return NULL; - } - - i = pktsched_ffs(grp->qfg_full_slots) - 1; /* zero-based */ - if (i > 0) { - grp->qfg_front = (grp->qfg_front + i) % qif->qif_maxslots; - grp->qfg_full_slots >>= i; - } - - return grp->qfg_slots[grp->qfg_front]; -} - -/* - * adjust the bucket list. When the start time of a group decreases, - * we move the index down (modulo qif->qif_maxslots) so we don't need to - * move the objects. The mask of occupied slots must be shifted - * because we use ffs() to find the first non-empty slot. - * This covers decreases in the group's start time, but what about - * increases of the start time ? - * Here too we should make sure that i is less than 32 - */ -static inline void -qfq_slot_rotate(struct qfq_if *qif, struct qfq_group *grp, u_int64_t roundedS) -{ -#pragma unused(qif) - u_int32_t i = (grp->qfg_S - roundedS) >> grp->qfg_slot_shift; - - grp->qfg_full_slots <<= i; - grp->qfg_front = (grp->qfg_front - i) % qif->qif_maxslots; -} - -static inline void -qfq_update_eligible(struct qfq_if *qif, u_int64_t old_V) -{ - pktsched_bitmap_t ineligible; - - ineligible = qif->qif_bitmaps[IR] | qif->qif_bitmaps[IB]; - if (ineligible) { - if (!qif->qif_bitmaps[ER]) { - struct qfq_group *grp; - grp = qfq_ffs(qif, ineligible); - if (qfq_gt(grp->qfg_S, qif->qif_V)) { - qif->qif_V = grp->qfg_S; - } - } - qfq_make_eligible(qif, old_V); - } -} - -/* - * Updates the class, returns true if also the group needs to be updated. - */ -static inline int -qfq_update_class(struct qfq_if *qif, struct qfq_group *grp, - struct qfq_class *cl) -{ -#pragma unused(qif) - cl->cl_S = cl->cl_F; - if (qempty(&cl->cl_q)) { - qfq_front_slot_remove(grp); - } else { - u_int32_t len; - u_int64_t roundedS; - - len = m_pktlen((struct mbuf *)qhead(&cl->cl_q)); - cl->cl_F = cl->cl_S + (u_int64_t)len * cl->cl_inv_w; - roundedS = qfq_round_down(cl->cl_S, grp->qfg_slot_shift); - if (roundedS == grp->qfg_S) { - return 0; - } - - qfq_front_slot_remove(grp); - qfq_slot_insert(qif, grp, cl, roundedS); - } - return 1; -} - -/* - * note: CLASSQDQ_POLL returns the next packet without removing the packet - * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation. - * CLASSQDQ_REMOVE must return the same packet if called immediately - * after CLASSQDQ_POLL. - */ -void -qfq_dequeue(struct qfq_if *qif, pktsched_pkt_t *pkt) -{ - pktsched_bitmap_t er_bits = qif->qif_bitmaps[ER]; - struct ifclassq *ifq = qif->qif_ifq; - struct qfq_group *grp; - struct qfq_class *cl; - u_int64_t old_V; - u_int32_t len; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - _PKTSCHED_PKT_INIT(pkt); - - for (;;) { - if (er_bits == 0) { -#if QFQ_DEBUG - if (qif->qif_queued && pktsched_verbose > 1) { - qfq_dump_sched(qif, "start dequeue"); - } -#endif /* QFQ_DEBUG */ - /* no eligible and ready packet */ - return; - } - grp = qfq_ffs(qif, er_bits); - /* if group is non-empty, use it */ - if (grp->qfg_full_slots != 0) { - break; - } - pktsched_bit_clr(grp->qfg_index, &er_bits); -#if QFQ_DEBUG - qif->qif_emptygrp++; -#endif /* QFQ_DEBUG */ - } - VERIFY(!IFCQ_IS_EMPTY(ifq)); - - cl = grp->qfg_slots[grp->qfg_front]; - VERIFY(cl != NULL && !qempty(&cl->cl_q)); - - qfq_getq(cl, pkt); - /* qalg must be work conserving */ - VERIFY(pkt->pktsched_ptype != QP_INVALID); - len = pktsched_get_pkt_len(pkt); - -#if QFQ_DEBUG - qif->qif_queued--; -#endif /* QFQ_DEBUG */ - - IFCQ_DEC_LEN(ifq); - IFCQ_DEC_BYTES(ifq, len); - if (qempty(&cl->cl_q)) { - cl->cl_period++; - } - PKTCNTR_ADD(&cl->cl_xmitcnt, 1, len); - IFCQ_XMIT_ADD(ifq, 1, len); - - old_V = qif->qif_V; - qif->qif_V += (u_int64_t)len * QFQ_IWSUM; - - if (pktsched_verbose > 2) { - log(LOG_DEBUG, "%s: %s qid=%d dequeue pkt=0x%llx F=0x%llx " - "V=0x%llx", if_name(QFQIF_IFP(qif)), qfq_style(qif), - cl->cl_handle, - (uint64_t)VM_KERNEL_ADDRPERM(pkt->pktsched_pkt_mbuf), - cl->cl_F, qif->qif_V); - } - - if (qfq_update_class(qif, grp, cl)) { - u_int64_t old_F = grp->qfg_F; - - cl = qfq_slot_scan(qif, grp); - if (!cl) { /* group gone, remove from ER */ - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[ER]); - } else { - u_int32_t s; - u_int64_t roundedS = - qfq_round_down(cl->cl_S, grp->qfg_slot_shift); - - if (grp->qfg_S == roundedS) { - goto skip_unblock; - } - - grp->qfg_S = roundedS; - grp->qfg_F = roundedS + (2ULL << grp->qfg_slot_shift); - - /* remove from ER and put in the new set */ - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[ER]); - s = qfq_calc_state(qif, grp); - pktsched_bit_set(grp->qfg_index, &qif->qif_bitmaps[s]); - } - /* we need to unblock even if the group has gone away */ - qfq_unblock_groups(qif, grp->qfg_index, old_F); - } - -skip_unblock: - qfq_update_eligible(qif, old_V); - -#if QFQ_DEBUG - if (!qif->qif_bitmaps[ER] && qif->qif_queued && pktsched_verbose > 1) { - qfq_dump_sched(qif, "end dequeue"); - } -#endif /* QFQ_DEBUG */ -} - -/* - * Assign a reasonable start time for a new flow k in group i. - * Admissible values for hat(F) are multiples of sigma_i - * no greater than V+sigma_i . Larger values mean that - * we had a wraparound so we consider the timestamp to be stale. - * - * If F is not stale and F >= V then we set S = F. - * Otherwise we should assign S = V, but this may violate - * the ordering in ER. So, if we have groups in ER, set S to - * the F_j of the first group j which would be blocking us. - * We are guaranteed not to move S backward because - * otherwise our group i would still be blocked. - */ -static inline void -qfq_update_start(struct qfq_if *qif, struct qfq_class *cl) -{ - pktsched_bitmap_t mask; - u_int64_t limit, roundedF; - int slot_shift = cl->cl_grp->qfg_slot_shift; - - roundedF = qfq_round_down(cl->cl_F, slot_shift); - limit = qfq_round_down(qif->qif_V, slot_shift) + (1UL << slot_shift); - - if (!qfq_gt(cl->cl_F, qif->qif_V) || qfq_gt(roundedF, limit)) { - /* timestamp was stale */ - mask = mask_from(qif->qif_bitmaps[ER], cl->cl_grp->qfg_index); - if (mask) { - struct qfq_group *next = qfq_ffs(qif, mask); - if (qfq_gt(roundedF, next->qfg_F)) { - cl->cl_S = next->qfg_F; - return; - } - } - cl->cl_S = qif->qif_V; - } else { /* timestamp is not stale */ - cl->cl_S = cl->cl_F; - } -} - -int -qfq_enqueue(struct qfq_if *qif, struct qfq_class *cl, pktsched_pkt_t *pkt, - struct pf_mtag *t) -{ - struct ifclassq *ifq = qif->qif_ifq; - struct qfq_group *grp; - u_int64_t roundedS; - int len, ret, s; - - IFCQ_LOCK_ASSERT_HELD(ifq); - VERIFY(cl == NULL || cl->cl_qif == qif); - - if (cl == NULL) { - cl = qfq_clh_to_clp(qif, 0); - if (cl == NULL) { - cl = qif->qif_default; - if (cl == NULL) { - IFCQ_CONVERT_LOCK(ifq); - return CLASSQEQ_DROP; - } - } - } - - VERIFY(pkt->pktsched_ptype == qptype(&cl->cl_q)); - len = pktsched_get_pkt_len(pkt); - - ret = qfq_addq(cl, pkt, t); - if ((ret != 0) && (ret != CLASSQEQ_SUCCESS_FC)) { - VERIFY(ret == CLASSQEQ_DROP || - ret == CLASSQEQ_DROP_FC || - ret == CLASSQEQ_DROP_SP); - PKTCNTR_ADD(&cl->cl_dropcnt, 1, len); - IFCQ_DROP_ADD(ifq, 1, len); - return ret; - } - IFCQ_INC_LEN(ifq); - IFCQ_INC_BYTES(ifq, len); - -#if QFQ_DEBUG - qif->qif_queued++; -#endif /* QFQ_DEBUG */ - - /* queue was not idle, we're done */ - if (qlen(&cl->cl_q) > 1) { - goto done; - } - - /* queue was idle */ - grp = cl->cl_grp; - qfq_update_start(qif, cl); /* adjust start time */ - - /* compute new finish time and rounded start */ - cl->cl_F = cl->cl_S + (u_int64_t)len * cl->cl_inv_w; - roundedS = qfq_round_down(cl->cl_S, grp->qfg_slot_shift); - - /* - * Insert cl in the correct bucket. - * - * If cl->cl_S >= grp->qfg_S we don't need to adjust the bucket list - * and simply go to the insertion phase. Otherwise grp->qfg_S is - * decreasing, we must make room in the bucket list, and also - * recompute the group state. Finally, if there were no flows - * in this group and nobody was in ER make sure to adjust V. - */ - if (grp->qfg_full_slots != 0) { - if (!qfq_gt(grp->qfg_S, cl->cl_S)) { - goto skip_update; - } - - /* create a slot for this cl->cl_S */ - qfq_slot_rotate(qif, grp, roundedS); - - /* group was surely ineligible, remove */ - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IR]); - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IB]); - } else if (!qif->qif_bitmaps[ER] && qfq_gt(roundedS, qif->qif_V)) { - qif->qif_V = roundedS; - } - - grp->qfg_S = roundedS; - grp->qfg_F = - roundedS + (2ULL << grp->qfg_slot_shift); /* i.e. 2 sigma_i */ - s = qfq_calc_state(qif, grp); - pktsched_bit_set(grp->qfg_index, &qif->qif_bitmaps[s]); - - if (pktsched_verbose > 2) { - log(LOG_DEBUG, "%s: %s qid=%d enqueue m=0x%llx state=%s 0x%x " - "S=0x%llx F=0x%llx V=0x%llx\n", if_name(QFQIF_IFP(qif)), - qfq_style(qif), cl->cl_handle, - (uint64_t)VM_KERNEL_ADDRPERM(pkt->pktsched_pkt_mbuf), - qfq_state2str(s), - qif->qif_bitmaps[s], cl->cl_S, cl->cl_F, qif->qif_V); - } - -skip_update: - qfq_slot_insert(qif, grp, cl, roundedS); - -done: - /* successfully queued. */ - return ret; -} - -static inline void -qfq_slot_remove(struct qfq_if *qif, struct qfq_group *grp, - struct qfq_class *cl) -{ -#pragma unused(qif) - struct qfq_class **pprev; - u_int32_t i, offset; - u_int64_t roundedS; - - roundedS = qfq_round_down(cl->cl_S, grp->qfg_slot_shift); - offset = (roundedS - grp->qfg_S) >> grp->qfg_slot_shift; - i = (grp->qfg_front + offset) % qif->qif_maxslots; - - pprev = &grp->qfg_slots[i]; - while (*pprev && *pprev != cl) { - pprev = &(*pprev)->cl_next; - } - - *pprev = cl->cl_next; - if (!grp->qfg_slots[i]) { - pktsched_bit_clr(offset, &grp->qfg_full_slots); - } -} - -/* - * Called to forcibly destroy a queue. - * If the queue is not in the front bucket, or if it has - * other queues in the front bucket, we can simply remove - * the queue with no other side effects. - * Otherwise we must propagate the event up. - * XXX description to be completed. - */ -static void -qfq_deactivate_class(struct qfq_if *qif, struct qfq_class *cl) -{ - struct qfq_group *grp = cl->cl_grp; - pktsched_bitmap_t mask; - u_int64_t roundedS; - int s; - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s deactivate qid=%d grp=%d " - "full_slots=0x%x front=%d bitmaps={ER=0x%x,EB=0x%x," - "IR=0x%x,IB=0x%x}\n", - if_name(QFQIF_IFP(cl->cl_qif)), qfq_style(cl->cl_qif), - cl->cl_handle, grp->qfg_index, grp->qfg_full_slots, - grp->qfg_front, qif->qif_bitmaps[ER], qif->qif_bitmaps[EB], - qif->qif_bitmaps[IR], qif->qif_bitmaps[IB]); -#if QFQ_DEBUG - if (pktsched_verbose > 1) { - qfq_dump_sched(qif, "start deactivate"); - } -#endif /* QFQ_DEBUG */ - } - - cl->cl_F = cl->cl_S; /* not needed if the class goes away */ - qfq_slot_remove(qif, grp, cl); - - if (grp->qfg_full_slots == 0) { - /* - * Nothing left in the group, remove from all sets. - * Do ER last because if we were blocking other groups - * we must unblock them. - */ - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IR]); - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[EB]); - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IB]); - - if (pktsched_bit_tst(grp->qfg_index, &qif->qif_bitmaps[ER]) && - !(qif->qif_bitmaps[ER] & ~((1UL << grp->qfg_index) - 1))) { - mask = qif->qif_bitmaps[ER] & - ((1UL << grp->qfg_index) - 1); - if (mask) { - mask = ~((1UL << __fls(mask)) - 1); - } else { - mask = (pktsched_bitmap_t)~0UL; - } - qfq_move_groups(qif, mask, EB, ER); - qfq_move_groups(qif, mask, IB, IR); - } - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[ER]); - } else if (!grp->qfg_slots[grp->qfg_front]) { - cl = qfq_slot_scan(qif, grp); - roundedS = qfq_round_down(cl->cl_S, grp->qfg_slot_shift); - if (grp->qfg_S != roundedS) { - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[ER]); - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IR]); - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[EB]); - pktsched_bit_clr(grp->qfg_index, &qif->qif_bitmaps[IB]); - grp->qfg_S = roundedS; - grp->qfg_F = roundedS + (2ULL << grp->qfg_slot_shift); - s = qfq_calc_state(qif, grp); - pktsched_bit_set(grp->qfg_index, &qif->qif_bitmaps[s]); - } - } - qfq_update_eligible(qif, qif->qif_V); - -#if QFQ_DEBUG - if (pktsched_verbose > 1) { - qfq_dump_sched(qif, "end deactivate"); - } -#endif /* QFQ_DEBUG */ -} - -static const char * -qfq_state2str(int s) -{ - const char *c; - - switch (s) { - case ER: - c = "ER"; - break; - case IR: - c = "IR"; - break; - case EB: - c = "EB"; - break; - case IB: - c = "IB"; - break; - default: - c = "?"; - break; - } - return c; -} - -static inline int -qfq_addq(struct qfq_class *cl, pktsched_pkt_t *pkt, struct pf_mtag *t) -{ - struct qfq_if *qif = cl->cl_qif; - struct ifclassq *ifq = qif->qif_ifq; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - if (q_is_sfb(&cl->cl_q)) { - if (cl->cl_sfb == NULL) { - struct ifnet *ifp = QFQIF_IFP(qif); - - VERIFY(cl->cl_flags & QFCF_LAZY); - cl->cl_flags &= ~QFCF_LAZY; - - IFCQ_CONVERT_LOCK(ifq); - cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle, - qlimit(&cl->cl_q), cl->cl_qflags); - if (cl->cl_sfb == NULL) { - /* fall back to droptail */ - qtype(&cl->cl_q) = Q_DROPTAIL; - cl->cl_flags &= ~QFCF_SFB; - cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL); - - log(LOG_ERR, "%s: %s SFB lazy allocation " - "failed for qid=%d grp=%d, falling back " - "to DROPTAIL\n", if_name(ifp), - qfq_style(qif), cl->cl_handle, - cl->cl_grp->qfg_index); - } else if (qif->qif_throttle != IFNET_THROTTLE_OFF) { - /* if there's pending throttling, set it */ - cqrq_throttle_t tr = { 1, qif->qif_throttle }; - int err = qfq_throttle(qif, &tr); - - if (err == EALREADY) { - err = 0; - } - if (err != 0) { - tr.level = IFNET_THROTTLE_OFF; - (void) qfq_throttle(qif, &tr); - } - } - } - if (cl->cl_sfb != NULL) { - return sfb_addq(cl->cl_sfb, &cl->cl_q, pkt, t); - } - } else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) { - IFCQ_CONVERT_LOCK(ifq); - return CLASSQEQ_DROP; - } - -#if PF_ECN - if (cl->cl_flags & QFCF_CLEARDSCP) { - /* not supported for non-mbuf type packets */ - VERIFY(pkt->pktsched_ptype == QP_MBUF); - write_dsfield(m, t, 0); - } -#endif /* PF_ECN */ - - VERIFY(pkt->pktsched_ptype == qptype(&cl->cl_q)); - _addq(&cl->cl_q, &pkt->pktsched_pkt); - return 0; -} - -static inline void -qfq_getq(struct qfq_class *cl, pktsched_pkt_t *pkt) -{ - classq_pkt_t p = CLASSQ_PKT_INITIALIZER(p); - - IFCQ_LOCK_ASSERT_HELD(cl->cl_qif->qif_ifq); - - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - return sfb_getq(cl->cl_sfb, &cl->cl_q, pkt); - } - - _getq(&cl->cl_q, &p); - return pktsched_pkt_encap(pkt, &p); -} - -static void -qfq_purgeq(struct qfq_if *qif, struct qfq_class *cl, u_int32_t flow, - u_int32_t *packets, u_int32_t *bytes) -{ - struct ifclassq *ifq = qif->qif_ifq; - u_int32_t cnt = 0, len = 0, qlen; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - if ((qlen = qlen(&cl->cl_q)) == 0) { - goto done; - } - - IFCQ_CONVERT_LOCK(ifq); - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len); - } else { - _flushq_flow(&cl->cl_q, flow, &cnt, &len); - } - - if (cnt > 0) { - VERIFY(qlen(&cl->cl_q) == (qlen - cnt)); -#if QFQ_DEBUG - VERIFY(qif->qif_queued >= cnt); - qif->qif_queued -= cnt; -#endif /* QFQ_DEBUG */ - - PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len); - IFCQ_DROP_ADD(ifq, cnt, len); - - VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0); - IFCQ_LEN(ifq) -= cnt; - - if (qempty(&cl->cl_q)) { - qfq_deactivate_class(qif, cl); - } - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s purge qid=%d weight=%d " - "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n", - if_name(QFQIF_IFP(qif)), - qfq_style(qif), cl->cl_handle, - (u_int32_t)(QFQ_ONE_FP / cl->cl_inv_w), qlen, - qlen(&cl->cl_q), cnt, len, flow); - } - } -done: - if (packets != NULL) { - *packets = cnt; - } - if (bytes != NULL) { - *bytes = len; - } -} - -static void -qfq_updateq(struct qfq_if *qif, struct qfq_class *cl, cqev_t ev) -{ - IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s update qid=%d weight=%d event=%s\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), - cl->cl_handle, (u_int32_t)(QFQ_ONE_FP / cl->cl_inv_w), - ifclassq_ev2str(ev)); - } - - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - return sfb_updateq(cl->cl_sfb, ev); - } -} - -int -qfq_get_class_stats(struct qfq_if *qif, u_int32_t qid, - struct qfq_classstats *sp) -{ - struct qfq_class *cl; - - IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - - if ((cl = qfq_clh_to_clp(qif, qid)) == NULL) { - return EINVAL; - } - - sp->class_handle = cl->cl_handle; - sp->index = cl->cl_grp->qfg_index; - sp->weight = (QFQ_ONE_FP / cl->cl_inv_w); - sp->lmax = cl->cl_lmax; - sp->qlength = qlen(&cl->cl_q); - sp->qlimit = qlimit(&cl->cl_q); - sp->period = cl->cl_period; - sp->xmitcnt = cl->cl_xmitcnt; - sp->dropcnt = cl->cl_dropcnt; - - sp->qtype = qtype(&cl->cl_q); - sp->qstate = qstate(&cl->cl_q); - - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - sfb_getstats(cl->cl_sfb, &sp->sfb); - } - - return 0; -} - -static int -qfq_stat_sc(struct qfq_if *qif, cqrq_stat_sc_t *sr) -{ - struct ifclassq *ifq = qif->qif_ifq; - struct qfq_class *cl; - u_int32_t i; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - VERIFY(sr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(sr->sc)); - - i = MBUF_SCIDX(sr->sc); - VERIFY(i < IFCQ_SC_MAX); - - cl = ifq->ifcq_disc_slots[i].cl; - sr->packets = qlen(&cl->cl_q); - sr->bytes = qsize(&cl->cl_q); - - return 0; -} - -/* convert a class handle to the corresponding class pointer */ -static inline struct qfq_class * -qfq_clh_to_clp(struct qfq_if *qif, u_int32_t chandle) -{ - struct qfq_class *cl; - int i; - - IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - - /* - * First, try optimistically the slot matching the lower bits of - * the handle. If it fails, do the linear table search. - */ - i = chandle % qif->qif_maxclasses; - if ((cl = qif->qif_class_tbl[i]) != NULL && cl->cl_handle == chandle) { - return cl; - } - for (i = 0; i < qif->qif_maxclasses; i++) { - if ((cl = qif->qif_class_tbl[i]) != NULL && - cl->cl_handle == chandle) { - return cl; - } - } - - return NULL; -} - -static const char * -qfq_style(struct qfq_if *qif) -{ -#pragma unused(qif) - return "QFQ"; -} - -/* - * Generic comparison function, handling wraparound - */ -static inline int -qfq_gt(u_int64_t a, u_int64_t b) -{ - return (int64_t)(a - b) > 0; -} - -/* - * Round a precise timestamp to its slotted value - */ -static inline u_int64_t -qfq_round_down(u_int64_t ts, u_int32_t shift) -{ - return ts & ~((1ULL << shift) - 1); -} - -/* - * Return the pointer to the group with lowest index in the bitmap - */ -static inline struct qfq_group * -qfq_ffs(struct qfq_if *qif, pktsched_bitmap_t bitmap) -{ - int index = pktsched_ffs(bitmap) - 1; /* zero-based */ - VERIFY(index >= 0 && index <= QFQ_MAX_INDEX && - qif->qif_groups[index] != NULL); - return qif->qif_groups[index]; -} - -/* - * Calculate a flow index, given its weight and maximum packet length. - * index = log_2(maxlen/weight) but we need to apply the scaling. - * This is used only once at flow creation. - */ -static int -qfq_calc_index(struct qfq_class *cl, u_int32_t inv_w, u_int32_t maxlen) -{ - u_int64_t slot_size = (u_int64_t)maxlen * inv_w; - pktsched_bitmap_t size_map; - int index = 0; - - size_map = (pktsched_bitmap_t)(slot_size >> QFQ_MIN_SLOT_SHIFT); - if (!size_map) { - goto out; - } - - index = __fls(size_map) + 1; /* basically a log_2() */ - index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1))); - - if (index < 0) { - index = 0; - } -out: - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s qid=%d grp=%d W=%u, L=%u, I=%d\n", - if_name(QFQIF_IFP(cl->cl_qif)), qfq_style(cl->cl_qif), - cl->cl_handle, index, (u_int32_t)(QFQ_ONE_FP / inv_w), - maxlen, index); - } - return index; -} - -#if QFQ_DEBUG -static void -qfq_dump_groups(struct qfq_if *qif, u_int32_t mask) -{ - int i, j; - - for (i = 0; i < QFQ_MAX_INDEX + 1; i++) { - struct qfq_group *g = qif->qif_groups[i]; - - if (0 == (mask & (1 << i))) { - continue; - } - if (g == NULL) { - continue; - } - - log(LOG_DEBUG, "%s: %s [%2d] full_slots 0x%x\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), i, - g->qfg_full_slots); - log(LOG_DEBUG, "%s: %s S 0x%20llx F 0x%llx %c\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), - g->qfg_S, g->qfg_F, mask & (1 << i) ? '1' : '0'); - - for (j = 0; j < qif->qif_maxslots; j++) { - if (g->qfg_slots[j]) { - log(LOG_DEBUG, "%s: %s bucket %d 0x%llx " - "qid %d\n", if_name(QFQIF_IFP(qif)), - qfq_style(qif), j, - (uint64_t)VM_KERNEL_ADDRPERM( - g->qfg_slots[j]), - g->qfg_slots[j]->cl_handle); - } - } - } -} - -static void -qfq_dump_sched(struct qfq_if *qif, const char *msg) -{ - log(LOG_DEBUG, "%s: %s --- in %s: ---\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), msg); - log(LOG_DEBUG, "%s: %s emptygrp %d queued %d V 0x%llx\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_emptygrp, - qif->qif_queued, qif->qif_V); - log(LOG_DEBUG, "%s: %s ER 0x%08x\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_bitmaps[ER]); - log(LOG_DEBUG, "%s: %s EB 0x%08x\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_bitmaps[EB]); - log(LOG_DEBUG, "%s: %s IR 0x%08x\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_bitmaps[IR]); - log(LOG_DEBUG, "%s: %s IB 0x%08x\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_bitmaps[IB]); - qfq_dump_groups(qif, 0xffffffff); -} -#endif /* QFQ_DEBUG */ - -/* - * qfq_enqueue_ifclassq is an enqueue function to be registered to - * (*ifcq_enqueue) in struct ifclassq. - */ -static int -qfq_enqueue_ifclassq(struct ifclassq *ifq, classq_pkt_t *p, boolean_t *pdrop) -{ - u_int32_t i = 0; - int ret; - pktsched_pkt_t pkt; - struct pf_mtag *t = NULL; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - switch (p->cp_ptype) { - case QP_MBUF: { - struct mbuf *m = p->cp_mbuf; - if (!(m->m_flags & M_PKTHDR)) { - /* should not happen */ - log(LOG_ERR, "%s: packet does not have pkthdr\n", - if_name(ifq->ifcq_ifp)); - IFCQ_CONVERT_LOCK(ifq); - m_freem(m); - *p = CLASSQ_PKT_INITIALIZER(*p); - *pdrop = TRUE; - return ENOBUFS; - } - i = MBUF_SCIDX(mbuf_get_service_class(m)); - t = m_pftag(m); - break; - } - - - default: - VERIFY(0); - __builtin_unreachable(); - /* NOTREACHED */ - } - - VERIFY((u_int32_t)i < IFCQ_SC_MAX); - - pktsched_pkt_encap(&pkt, p); - - ret = qfq_enqueue(ifq->ifcq_disc, - ifq->ifcq_disc_slots[i].cl, &pkt, t); - - if ((ret != 0) && (ret != CLASSQEQ_SUCCESS_FC)) { - pktsched_free_pkt(&pkt); - *pdrop = TRUE; - } else { - *pdrop = FALSE; - } - - switch (ret) { - case CLASSQEQ_DROP: - ret = ENOBUFS; - break; - case CLASSQEQ_DROP_FC: - ret = EQFULL; - break; - case CLASSQEQ_DROP_SP: - ret = EQSUSPENDED; - break; - case CLASSQEQ_SUCCESS_FC: - ret = EQFULL; - break; - case CLASSQEQ_SUCCESS: - ret = 0; - break; - default: - VERIFY(0); - } - return ret; -} - -/* - * qfq_dequeue_ifclassq is a dequeue function to be registered to - * (*ifcq_dequeue) in struct ifclass. - * - * note: CLASSQDQ_POLL returns the next packet without removing the packet - * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation. - * CLASSQDQ_REMOVE must return the same packet if called immediately - * after CLASSQDQ_POLL. - */ -static void -qfq_dequeue_ifclassq(struct ifclassq *ifq, classq_pkt_t *cpkt) -{ - pktsched_pkt_t pkt; - _PKTSCHED_PKT_INIT(&pkt); - qfq_dequeue(ifq->ifcq_disc, &pkt); - *cpkt = pkt.pktsched_pkt; -} - -static int -qfq_request_ifclassq(struct ifclassq *ifq, cqrq_t req, void *arg) -{ - struct qfq_if *qif = (struct qfq_if *)ifq->ifcq_disc; - int err = 0; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - switch (req) { - case CLASSQRQ_PURGE: - qfq_purge(qif); - break; - - case CLASSQRQ_PURGE_SC: - qfq_purge_sc(qif, (cqrq_purge_sc_t *)arg); - break; - - case CLASSQRQ_EVENT: - qfq_event(qif, (cqev_t)arg); - break; - - case CLASSQRQ_THROTTLE: - err = qfq_throttle(qif, (cqrq_throttle_t *)arg); - break; - case CLASSQRQ_STAT_SC: - err = qfq_stat_sc(qif, (cqrq_stat_sc_t *)arg); - break; - } - return err; -} - -int -qfq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags, - classq_pkt_type_t ptype) -{ - struct ifnet *ifp = ifq->ifcq_ifp; - struct qfq_class *cl0, *cl1, *cl2, *cl3, *cl4; - struct qfq_class *cl5, *cl6, *cl7, *cl8, *cl9; - struct qfq_if *qif; - u_int32_t maxlen = 0, qflags = 0; - int err = 0; - - IFCQ_LOCK_ASSERT_HELD(ifq); - VERIFY(ifq->ifcq_disc == NULL); - VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); - - if (flags & PKTSCHEDF_QALG_SFB) { - qflags |= QFCF_SFB; - } - if (flags & PKTSCHEDF_QALG_ECN) { - qflags |= QFCF_ECN; - } - if (flags & PKTSCHEDF_QALG_FLOWCTL) { - qflags |= QFCF_FLOWCTL; - } - if (flags & PKTSCHEDF_QALG_DELAYBASED) { - qflags |= QFCF_DELAYBASED; - } - - qif = qfq_alloc(ifp, M_WAITOK); - if (qif == NULL) { - return ENOMEM; - } - - if ((maxlen = IFCQ_MAXLEN(ifq)) == 0) { - maxlen = if_sndq_maxlen; - } - - if ((err = qfq_add_queue(qif, maxlen, 300, 1200, - qflags | QFCF_LAZY, SCIDX_BK_SYS, &cl0, ptype)) != 0) { - goto cleanup; - } - - if ((err = qfq_add_queue(qif, maxlen, 600, 1400, - qflags | QFCF_LAZY, SCIDX_BK, &cl1, ptype)) != 0) { - goto cleanup; - } - - if ((err = qfq_add_queue(qif, maxlen, 2400, 600, - qflags | QFCF_DEFAULTCLASS, SCIDX_BE, &cl2, ptype)) != 0) { - goto cleanup; - } - - if ((err = qfq_add_queue(qif, maxlen, 2700, 600, - qflags | QFCF_LAZY, SCIDX_RD, &cl3, ptype)) != 0) { - goto cleanup; - } - - if ((err = qfq_add_queue(qif, maxlen, 3000, 400, - qflags | QFCF_LAZY, SCIDX_OAM, &cl4, ptype)) != 0) { - goto cleanup; - } - - if ((err = qfq_add_queue(qif, maxlen, 8000, 1000, - qflags | QFCF_LAZY, SCIDX_AV, &cl5, ptype)) != 0) { - goto cleanup; - } - - if ((err = qfq_add_queue(qif, maxlen, 15000, 1200, - qflags | QFCF_LAZY, SCIDX_RV, &cl6, ptype)) != 0) { - goto cleanup; - } - - if ((err = qfq_add_queue(qif, maxlen, 20000, 1400, - qflags | QFCF_LAZY, SCIDX_VI, &cl7, ptype)) != 0) { - goto cleanup; - } - - if ((err = qfq_add_queue(qif, maxlen, 23000, 200, - qflags | QFCF_LAZY, SCIDX_VO, &cl8, ptype)) != 0) { - goto cleanup; - } - - if ((err = qfq_add_queue(qif, maxlen, 25000, 200, - qflags, SCIDX_CTL, &cl9, ptype)) != 0) { - goto cleanup; - } - - err = ifclassq_attach(ifq, PKTSCHEDT_QFQ, qif, - qfq_enqueue_ifclassq, qfq_dequeue_ifclassq, NULL, - NULL, NULL, qfq_request_ifclassq); - - /* cache these for faster lookup */ - if (err == 0) { - ifq->ifcq_disc_slots[SCIDX_BK_SYS].qid = SCIDX_BK_SYS; - ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl = cl0; - - ifq->ifcq_disc_slots[SCIDX_BK].qid = SCIDX_BK; - ifq->ifcq_disc_slots[SCIDX_BK].cl = cl1; - - ifq->ifcq_disc_slots[SCIDX_BE].qid = SCIDX_BE; - ifq->ifcq_disc_slots[SCIDX_BE].cl = cl2; - - ifq->ifcq_disc_slots[SCIDX_RD].qid = SCIDX_RD; - ifq->ifcq_disc_slots[SCIDX_RD].cl = cl3; - - ifq->ifcq_disc_slots[SCIDX_OAM].qid = SCIDX_OAM; - ifq->ifcq_disc_slots[SCIDX_OAM].cl = cl4; - - ifq->ifcq_disc_slots[SCIDX_AV].qid = SCIDX_AV; - ifq->ifcq_disc_slots[SCIDX_AV].cl = cl5; - - ifq->ifcq_disc_slots[SCIDX_RV].qid = SCIDX_RV; - ifq->ifcq_disc_slots[SCIDX_RV].cl = cl6; - - ifq->ifcq_disc_slots[SCIDX_VI].qid = SCIDX_VI; - ifq->ifcq_disc_slots[SCIDX_VI].cl = cl7; - - ifq->ifcq_disc_slots[SCIDX_VO].qid = SCIDX_VO; - ifq->ifcq_disc_slots[SCIDX_VO].cl = cl8; - - ifq->ifcq_disc_slots[SCIDX_CTL].qid = SCIDX_CTL; - ifq->ifcq_disc_slots[SCIDX_CTL].cl = cl9; - } - -cleanup: - if (err != 0) { - (void) qfq_destroy_locked(qif); - } - - return err; -} - -int -qfq_teardown_ifclassq(struct ifclassq *ifq) -{ - struct qfq_if *qif = ifq->ifcq_disc; - int i; - - IFCQ_LOCK_ASSERT_HELD(ifq); - VERIFY(qif != NULL && ifq->ifcq_type == PKTSCHEDT_QFQ); - - (void) qfq_destroy_locked(qif); - - ifq->ifcq_disc = NULL; - for (i = 0; i < IFCQ_SC_MAX; i++) { - ifq->ifcq_disc_slots[i].qid = 0; - ifq->ifcq_disc_slots[i].cl = NULL; - } - - return ifclassq_detach(ifq); -} - -int -qfq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot, - struct if_ifclassq_stats *ifqs) -{ - struct qfq_if *qif = ifq->ifcq_disc; - - IFCQ_LOCK_ASSERT_HELD(ifq); - VERIFY(ifq->ifcq_type == PKTSCHEDT_QFQ); - - if (slot >= IFCQ_SC_MAX) { - return EINVAL; - } - - return qfq_get_class_stats(qif, ifq->ifcq_disc_slots[slot].qid, - &ifqs->ifqs_qfq_stats); -} - -static int -qfq_throttle(struct qfq_if *qif, cqrq_throttle_t *tr) -{ - struct ifclassq *ifq = qif->qif_ifq; - struct qfq_class *cl; - int err = 0; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - if (!tr->set) { - tr->level = qif->qif_throttle; - return 0; - } - - if (tr->level == qif->qif_throttle) { - return EALREADY; - } - - /* Current throttling levels only involve BK_SYS class */ - cl = ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl; - - switch (tr->level) { - case IFNET_THROTTLE_OFF: - err = qfq_resumeq(qif, cl); - break; - - case IFNET_THROTTLE_OPPORTUNISTIC: - err = qfq_suspendq(qif, cl); - break; - - default: - VERIFY(0); - /* NOTREACHED */ - } - - if (err == 0 || err == ENXIO) { - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s throttling level %sset %d->%d\n", - if_name(QFQIF_IFP(qif)), qfq_style(qif), - (err == 0) ? "" : "lazy ", qif->qif_throttle, - tr->level); - } - qif->qif_throttle = tr->level; - if (err != 0) { - err = 0; - } else { - qfq_purgeq(qif, cl, 0, NULL, NULL); - } - } else { - log(LOG_ERR, "%s: %s unable to set throttling level " - "%d->%d [error=%d]\n", if_name(QFQIF_IFP(qif)), - qfq_style(qif), qif->qif_throttle, tr->level, err); - } - - return err; -} - -static int -qfq_resumeq(struct qfq_if *qif, struct qfq_class *cl) -{ - struct ifclassq *ifq = qif->qif_ifq; - int err = 0; -#if !MACH_ASSERT -#pragma unused(ifq) -#endif - IFCQ_LOCK_ASSERT_HELD(ifq); - - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, FALSE); - } - - if (err == 0) { - qstate(&cl->cl_q) = QS_RUNNING; - } - - return err; -} - -static int -qfq_suspendq(struct qfq_if *qif, struct qfq_class *cl) -{ - struct ifclassq *ifq = qif->qif_ifq; - int err = 0; -#if !MACH_ASSERT -#pragma unused(ifq) -#endif - IFCQ_LOCK_ASSERT_HELD(ifq); - - if (q_is_sfb(&cl->cl_q)) { - if (cl->cl_sfb != NULL) { - err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, TRUE); - } else { - VERIFY(cl->cl_flags & QFCF_LAZY); - err = ENXIO; /* delayed throttling */ - } - } - - if (err == 0 || err == ENXIO) { - qstate(&cl->cl_q) = QS_SUSPENDED; - } - - return err; -} diff --git a/bsd/net/pktsched/pktsched_qfq.h b/bsd/net/pktsched/pktsched_qfq.h deleted file mode 100644 index 475e16fc1..000000000 --- a/bsd/net/pktsched/pktsched_qfq.h +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright (c) 2011-2016 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -/* - * Copyright (c) 2010 Fabio Checconi, Luigi Rizzo, Paolo Valente - * All rights reserved - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#ifndef _NET_PKTSCHED_PKTSCHED_QFQ_H_ -#define _NET_PKTSCHED_PKTSCHED_QFQ_H_ - -#ifdef PRIVATE -#include -#include -#include -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/* qfq class flags */ -#define QFCF_RED 0x0001 /* use RED */ -#define QFCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ -#define QFCF_RIO 0x0004 /* use RIO */ -#define QFCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ -#define QFCF_BLUE 0x0100 /* use BLUE */ -#define QFCF_SFB 0x0200 /* use SFB */ -#define QFCF_FLOWCTL 0x0400 /* enable flow control advisories */ -#define QFCF_DEFAULTCLASS 0x1000 /* default class */ -#define QFCF_DELAYBASED 0x2000 /* queue sizing is delay based */ -#ifdef BSD_KERNEL_PRIVATE -#define QFCF_LAZY 0x10000000 /* on-demand resource allocation */ -#endif /* BSD_KERNEL_PRIVATE */ - -#define QFCF_USERFLAGS \ - (QFCF_RED | QFCF_ECN | QFCF_RIO | QFCF_CLEARDSCP | QFCF_BLUE | \ - QFCF_SFB | QFCF_FLOWCTL | QFCF_DEFAULTCLASS) - -#ifdef BSD_KERNEL_PRIVATE -#define QFCF_BITS \ - "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" \ - "\35LAZY" -#else -#define QFCF_BITS \ - "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" -#endif /* !BSD_KERNEL_PRIVATE */ - -#define QFQ_MAX_CLASSES 32 -#define QFQ_MAX_WSHIFT 16 /* log2(max_weight) */ -#define QFQ_MAX_WEIGHT (1 << QFQ_MAX_WSHIFT) - -struct qfq_classstats { - u_int32_t class_handle; - u_int32_t index; - u_int32_t weight; - u_int32_t lmax; - - u_int32_t qlength; - u_int32_t qlimit; - u_int32_t period; - struct pktcntr xmitcnt; /* transmitted packet counter */ - struct pktcntr dropcnt; /* dropped packet counter */ - - /* RED, RIO, BLUE, SFB related info */ - classq_type_t qtype; - union { - /* RIO has 3 red stats */ - struct red_stats red[RIO_NDROPPREC]; - struct blue_stats blue; - struct sfb_stats sfb; - }; - classq_state_t qstate; -}; - -#ifdef BSD_KERNEL_PRIVATE -#define QFQ_DEBUG 1 /* enable extra debugging */ - -/* - * Virtual time computations. - * - * S, F and V are all computed in fixed point arithmetic with - * FRAC_BITS decimal bits. - * - * QFQ_MAX_INDEX is the maximum index allowed for a group. We need - * one bit per index. - * - * QFQ_MAX_WSHIFT is the maximum power of two supported as a weight. - * The layout of the bits is as below: - * - * [ MTU_SHIFT ][ FRAC_BITS ] - * [ MAX_INDEX ][ MIN_SLOT_SHIFT ] - * ^.__grp->index = 0 - * *.__grp->slot_shift - * - * where MIN_SLOT_SHIFT is derived by difference from the others. - * - * The max group index corresponds to Lmax/w_min, where - * Lmax=1<group mapping. Class weights are in the - * range [1, QFQ_MAX_WEIGHT], we need to map each class i to the - * group with the smallest index that can support the L_i / r_i - * configured for the class. - * - * grp->qfg_index is the index of the group; and grp->qfg_slot_shift - * is the shift for the corresponding (scaled) sigma_i. - * - * When computing the group index, we do (len<qif_ifq->ifcq_ifp) - -struct if_ifclassq_stats; - -extern void qfq_init(void); -extern struct qfq_if *qfq_alloc(struct ifnet *, int); -extern int qfq_destroy(struct qfq_if *); -extern void qfq_purge(struct qfq_if *); -extern void qfq_event(struct qfq_if *, cqev_t); -extern int qfq_add_queue(struct qfq_if *, u_int32_t, u_int32_t, u_int32_t, - u_int32_t, u_int32_t, struct qfq_class **, classq_pkt_type_t); -extern int qfq_remove_queue(struct qfq_if *, u_int32_t); -extern int qfq_get_class_stats(struct qfq_if *, u_int32_t, - struct qfq_classstats *); -extern int qfq_enqueue(struct qfq_if *, struct qfq_class *, pktsched_pkt_t *, - struct pf_mtag *); -extern void qfq_dequeue(struct qfq_if *, pktsched_pkt_t *); -extern int qfq_setup_ifclassq(struct ifclassq *, u_int32_t, classq_pkt_type_t); -extern int qfq_teardown_ifclassq(struct ifclassq *ifq); -extern int qfq_getqstats_ifclassq(struct ifclassq *, u_int32_t, - struct if_ifclassq_stats *); -#endif /* BSD_KERNEL_PRIVATE */ -#ifdef __cplusplus -} -#endif -#endif /* PRIVATE */ -#endif /* _NET_PKTSCHED_PKTSCHED_QFQ_H_ */ diff --git a/bsd/net/pktsched/pktsched_tcq.c b/bsd/net/pktsched/pktsched_tcq.c deleted file mode 100644 index aa4ccec53..000000000 --- a/bsd/net/pktsched/pktsched_tcq.c +++ /dev/null @@ -1,1103 +0,0 @@ -/* - * Copyright (c) 2011-2018 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -/* - * traffic class queue - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -#include -#include - - -/* - * function prototypes - */ -static int tcq_enqueue_ifclassq(struct ifclassq *, classq_pkt_t *, boolean_t *); -static void tcq_dequeue_tc_ifclassq(struct ifclassq *, mbuf_svc_class_t, - classq_pkt_t *); -static int tcq_request_ifclassq(struct ifclassq *, cqrq_t, void *); -static int tcq_clear_interface(struct tcq_if *); -static struct tcq_class *tcq_class_create(struct tcq_if *, int, u_int32_t, - int, u_int32_t, classq_pkt_type_t); -static int tcq_class_destroy(struct tcq_if *, struct tcq_class *); -static int tcq_destroy_locked(struct tcq_if *); -static inline int tcq_addq(struct tcq_class *, pktsched_pkt_t *, - struct pf_mtag *); -static inline void tcq_getq(struct tcq_class *, pktsched_pkt_t *); -static void tcq_purgeq(struct tcq_if *, struct tcq_class *, u_int32_t, - u_int32_t *, u_int32_t *); -static void tcq_purge_sc(struct tcq_if *, cqrq_purge_sc_t *); -static void tcq_updateq(struct tcq_if *, struct tcq_class *, cqev_t); -static int tcq_throttle(struct tcq_if *, cqrq_throttle_t *); -static int tcq_resumeq(struct tcq_if *, struct tcq_class *); -static int tcq_suspendq(struct tcq_if *, struct tcq_class *); -static int tcq_stat_sc(struct tcq_if *, cqrq_stat_sc_t *); -static void tcq_dequeue_cl(struct tcq_if *, struct tcq_class *, - mbuf_svc_class_t, pktsched_pkt_t *); -static inline struct tcq_class *tcq_clh_to_clp(struct tcq_if *, u_int32_t); -static const char *tcq_style(struct tcq_if *); - -#define TCQ_ZONE_MAX 32 /* maximum elements in zone */ -#define TCQ_ZONE_NAME "pktsched_tcq" /* zone name */ - -static unsigned int tcq_size; /* size of zone element */ -static struct zone *tcq_zone; /* zone for tcq */ - -#define TCQ_CL_ZONE_MAX 32 /* maximum elements in zone */ -#define TCQ_CL_ZONE_NAME "pktsched_tcq_cl" /* zone name */ - -static unsigned int tcq_cl_size; /* size of zone element */ -static struct zone *tcq_cl_zone; /* zone for tcq_class */ - -void -tcq_init(void) -{ - tcq_size = sizeof(struct tcq_if); - tcq_zone = zinit(tcq_size, TCQ_ZONE_MAX * tcq_size, - 0, TCQ_ZONE_NAME); - if (tcq_zone == NULL) { - panic("%s: failed allocating %s", __func__, TCQ_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(tcq_zone, Z_EXPAND, TRUE); - zone_change(tcq_zone, Z_CALLERACCT, TRUE); - - tcq_cl_size = sizeof(struct tcq_class); - tcq_cl_zone = zinit(tcq_cl_size, TCQ_CL_ZONE_MAX * tcq_cl_size, - 0, TCQ_CL_ZONE_NAME); - if (tcq_cl_zone == NULL) { - panic("%s: failed allocating %s", __func__, TCQ_CL_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(tcq_cl_zone, Z_EXPAND, TRUE); - zone_change(tcq_cl_zone, Z_CALLERACCT, TRUE); -} - -struct tcq_if * -tcq_alloc(struct ifnet *ifp, int how) -{ - struct tcq_if *tif; - - tif = (how == M_WAITOK) ? zalloc(tcq_zone) : zalloc_noblock(tcq_zone); - if (tif == NULL) { - return NULL; - } - - bzero(tif, tcq_size); - tif->tif_maxpri = -1; - tif->tif_ifq = &ifp->if_snd; - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s scheduler allocated\n", - if_name(ifp), tcq_style(tif)); - } - - return tif; -} - -int -tcq_destroy(struct tcq_if *tif) -{ - struct ifclassq *ifq = tif->tif_ifq; - int err; - - IFCQ_LOCK(ifq); - err = tcq_destroy_locked(tif); - IFCQ_UNLOCK(ifq); - - return err; -} - -static int -tcq_destroy_locked(struct tcq_if *tif) -{ - IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - - (void) tcq_clear_interface(tif); - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s scheduler destroyed\n", - if_name(TCQIF_IFP(tif)), tcq_style(tif)); - } - - zfree(tcq_zone, tif); - - return 0; -} - -/* - * bring the interface back to the initial state by discarding - * all the filters and classes. - */ -static int -tcq_clear_interface(struct tcq_if *tif) -{ - struct tcq_class *cl; - int pri; - - IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - - /* clear out the classes */ - for (pri = 0; pri <= tif->tif_maxpri; pri++) { - if ((cl = tif->tif_classes[pri]) != NULL) { - tcq_class_destroy(tif, cl); - } - } - - return 0; -} - -/* discard all the queued packets on the interface */ -void -tcq_purge(struct tcq_if *tif) -{ - struct tcq_class *cl; - int pri; - - IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - - for (pri = 0; pri <= tif->tif_maxpri; pri++) { - if ((cl = tif->tif_classes[pri]) != NULL && !qempty(&cl->cl_q)) { - tcq_purgeq(tif, cl, 0, NULL, NULL); - } - } - VERIFY(IFCQ_LEN(tif->tif_ifq) == 0); -} - -static void -tcq_purge_sc(struct tcq_if *tif, cqrq_purge_sc_t *pr) -{ - struct ifclassq *ifq = tif->tif_ifq; - u_int32_t i; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - VERIFY(pr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(pr->sc)); - VERIFY(pr->flow != 0); - - if (pr->sc != MBUF_SC_UNSPEC) { - i = MBUF_SCIDX(pr->sc); - VERIFY(i < IFCQ_SC_MAX); - - tcq_purgeq(tif, ifq->ifcq_disc_slots[i].cl, - pr->flow, &pr->packets, &pr->bytes); - } else { - u_int32_t cnt, len; - - pr->packets = 0; - pr->bytes = 0; - - for (i = 0; i < IFCQ_SC_MAX; i++) { - tcq_purgeq(tif, ifq->ifcq_disc_slots[i].cl, - pr->flow, &cnt, &len); - pr->packets += cnt; - pr->bytes += len; - } - } -} - -void -tcq_event(struct tcq_if *tif, cqev_t ev) -{ - struct tcq_class *cl; - int pri; - - IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - - for (pri = 0; pri <= tif->tif_maxpri; pri++) { - if ((cl = tif->tif_classes[pri]) != NULL) { - tcq_updateq(tif, cl, ev); - } - } -} - -int -tcq_add_queue(struct tcq_if *tif, int priority, u_int32_t qlimit, - int flags, u_int32_t qid, struct tcq_class **clp, classq_pkt_type_t ptype) -{ - struct tcq_class *cl; - - IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - - /* check parameters */ - if (priority >= TCQ_MAXPRI) { - return EINVAL; - } - if (tif->tif_classes[priority] != NULL) { - return EBUSY; - } - if (tcq_clh_to_clp(tif, qid) != NULL) { - return EBUSY; - } - - cl = tcq_class_create(tif, priority, qlimit, flags, qid, ptype); - if (cl == NULL) { - return ENOMEM; - } - - if (clp != NULL) { - *clp = cl; - } - - return 0; -} - -static struct tcq_class * -tcq_class_create(struct tcq_if *tif, int pri, u_int32_t qlimit, - int flags, u_int32_t qid, classq_pkt_type_t ptype) -{ - struct ifnet *ifp; - struct ifclassq *ifq; - struct tcq_class *cl; - - IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - - ifq = tif->tif_ifq; - ifp = TCQIF_IFP(tif); - - if ((cl = tif->tif_classes[pri]) != NULL) { - /* modify the class instead of creating a new one */ - if (!qempty(&cl->cl_q)) { - tcq_purgeq(tif, cl, 0, NULL, NULL); - } - - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - sfb_destroy(cl->cl_sfb); - } - cl->cl_qalg.ptr = NULL; - qtype(&cl->cl_q) = Q_DROPTAIL; - qstate(&cl->cl_q) = QS_RUNNING; - VERIFY(qptype(&cl->cl_q) == ptype); - } else { - cl = zalloc(tcq_cl_zone); - if (cl == NULL) { - return NULL; - } - - bzero(cl, tcq_cl_size); - } - - tif->tif_classes[pri] = cl; - if (flags & TQCF_DEFAULTCLASS) { - tif->tif_default = cl; - } - if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) { - qlimit = IFCQ_MAXLEN(ifq); - if (qlimit == 0) { - qlimit = DEFAULT_QLIMIT; /* use default */ - } - } - _qinit(&cl->cl_q, Q_DROPTAIL, qlimit, ptype); - cl->cl_flags = flags; - cl->cl_pri = pri; - if (pri > tif->tif_maxpri) { - tif->tif_maxpri = pri; - } - cl->cl_tif = tif; - cl->cl_handle = qid; - - if (flags & TQCF_SFB) { - cl->cl_qflags = 0; - if (flags & TQCF_ECN) { - cl->cl_qflags |= SFBF_ECN; - } - if (flags & TQCF_FLOWCTL) { - cl->cl_qflags |= SFBF_FLOWCTL; - } - if (flags & TQCF_DELAYBASED) { - cl->cl_qflags |= SFBF_DELAYBASED; - } - if (!(cl->cl_flags & TQCF_LAZY)) { - cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle, - qlimit(&cl->cl_q), cl->cl_qflags); - } - if (cl->cl_sfb != NULL || (cl->cl_flags & TQCF_LAZY)) { - qtype(&cl->cl_q) = Q_SFB; - } - } - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d " - "flags=%b\n", if_name(ifp), tcq_style(tif), - cl->cl_handle, cl->cl_pri, qlimit, flags, TQCF_BITS); - } - - return cl; -} - -int -tcq_remove_queue(struct tcq_if *tif, u_int32_t qid) -{ - struct tcq_class *cl; - - IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - - if ((cl = tcq_clh_to_clp(tif, qid)) == NULL) { - return EINVAL; - } - - return tcq_class_destroy(tif, cl); -} - -static int -tcq_class_destroy(struct tcq_if *tif, struct tcq_class *cl) -{ - struct ifclassq *ifq = tif->tif_ifq; - int pri; -#if !MACH_ASSERT -#pragma unused(ifq) -#endif - IFCQ_LOCK_ASSERT_HELD(ifq); - - if (!qempty(&cl->cl_q)) { - tcq_purgeq(tif, cl, 0, NULL, NULL); - } - - tif->tif_classes[cl->cl_pri] = NULL; - if (tif->tif_maxpri == cl->cl_pri) { - for (pri = cl->cl_pri; pri >= 0; pri--) { - if (tif->tif_classes[pri] != NULL) { - tif->tif_maxpri = pri; - break; - } - } - if (pri < 0) { - tif->tif_maxpri = -1; - } - } - - if (tif->tif_default == cl) { - tif->tif_default = NULL; - } - - if (cl->cl_qalg.ptr != NULL) { - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - sfb_destroy(cl->cl_sfb); - } - cl->cl_qalg.ptr = NULL; - qtype(&cl->cl_q) = Q_DROPTAIL; - qstate(&cl->cl_q) = QS_RUNNING; - } - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s destroyed qid=%d pri=%d\n", - if_name(TCQIF_IFP(tif)), tcq_style(tif), - cl->cl_handle, cl->cl_pri); - } - - zfree(tcq_cl_zone, cl); - return 0; -} - -int -tcq_enqueue(struct tcq_if *tif, struct tcq_class *cl, pktsched_pkt_t *pkt, - struct pf_mtag *t) -{ - struct ifclassq *ifq = tif->tif_ifq; - int len, ret; - - IFCQ_LOCK_ASSERT_HELD(ifq); - VERIFY(cl == NULL || cl->cl_tif == tif); - - if (cl == NULL) { - cl = tcq_clh_to_clp(tif, 0); - if (cl == NULL) { - cl = tif->tif_default; - if (cl == NULL) { - IFCQ_CONVERT_LOCK(ifq); - return CLASSQEQ_DROP; - } - } - } - - VERIFY(pkt->pktsched_ptype == qptype(&cl->cl_q)); - len = pktsched_get_pkt_len(pkt); - - ret = tcq_addq(cl, pkt, t); - if ((ret != 0) && (ret != CLASSQEQ_SUCCESS_FC)) { - VERIFY(ret == CLASSQEQ_DROP || - ret == CLASSQEQ_DROP_FC || - ret == CLASSQEQ_DROP_SP); - PKTCNTR_ADD(&cl->cl_dropcnt, 1, len); - IFCQ_DROP_ADD(ifq, 1, len); - return ret; - } - IFCQ_INC_LEN(ifq); - IFCQ_INC_BYTES(ifq, len); - - /* successfully queued. */ - return ret; -} - -/* - * note: CLASSQDQ_POLL returns the next packet without removing the packet - * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation. - * CLASSQDQ_REMOVE must return the same packet if called immediately - * after CLASSQDQ_POLL. - */ -void -tcq_dequeue_tc(struct tcq_if *tif, mbuf_svc_class_t sc, pktsched_pkt_t *pkt) -{ - tcq_dequeue_cl(tif, NULL, sc, pkt); -} - -static void -tcq_dequeue_cl(struct tcq_if *tif, struct tcq_class *cl, mbuf_svc_class_t sc, - pktsched_pkt_t *pkt) -{ - struct ifclassq *ifq = tif->tif_ifq; - uint32_t len; - - IFCQ_LOCK_ASSERT_HELD(ifq); - pkt->pktsched_pkt_mbuf = NULL; - - if (cl == NULL) { - cl = tcq_clh_to_clp(tif, MBUF_SCIDX(sc)); - if (cl == NULL) { - return; - } - } - - if (qempty(&cl->cl_q)) { - return; - } - - VERIFY(!IFCQ_IS_EMPTY(ifq)); - - tcq_getq(cl, pkt); - if (pkt->pktsched_pkt_mbuf != NULL) { - len = pktsched_get_pkt_len(pkt); - IFCQ_DEC_LEN(ifq); - IFCQ_DEC_BYTES(ifq, len); - if (qempty(&cl->cl_q)) { - cl->cl_period++; - } - PKTCNTR_ADD(&cl->cl_xmitcnt, 1, len); - IFCQ_XMIT_ADD(ifq, 1, len); - } -} - -static inline int -tcq_addq(struct tcq_class *cl, pktsched_pkt_t *pkt, struct pf_mtag *t) -{ - struct tcq_if *tif = cl->cl_tif; - struct ifclassq *ifq = tif->tif_ifq; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - if (q_is_sfb(&cl->cl_q)) { - if (cl->cl_sfb == NULL) { - struct ifnet *ifp = TCQIF_IFP(tif); - - VERIFY(cl->cl_flags & TQCF_LAZY); - cl->cl_flags &= ~TQCF_LAZY; - IFCQ_CONVERT_LOCK(ifq); - - cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle, - qlimit(&cl->cl_q), cl->cl_qflags); - if (cl->cl_sfb == NULL) { - /* fall back to droptail */ - qtype(&cl->cl_q) = Q_DROPTAIL; - cl->cl_flags &= ~TQCF_SFB; - cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL); - - log(LOG_ERR, "%s: %s SFB lazy allocation " - "failed for qid=%d pri=%d, falling back " - "to DROPTAIL\n", if_name(ifp), - tcq_style(tif), cl->cl_handle, - cl->cl_pri); - } else if (tif->tif_throttle != IFNET_THROTTLE_OFF) { - /* if there's pending throttling, set it */ - cqrq_throttle_t tr = { 1, tif->tif_throttle }; - int err = tcq_throttle(tif, &tr); - - if (err == EALREADY) { - err = 0; - } - if (err != 0) { - tr.level = IFNET_THROTTLE_OFF; - (void) tcq_throttle(tif, &tr); - } - } - } - if (cl->cl_sfb != NULL) { - return sfb_addq(cl->cl_sfb, &cl->cl_q, pkt, t); - } - } else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) { - IFCQ_CONVERT_LOCK(ifq); - return CLASSQEQ_DROP; - } - -#if PF_ECN - if (cl->cl_flags & TQCF_CLEARDSCP) { - /* not supported for non-BSD stack packets */ - VERIFY(pkt->pktsched_ptype == QP_MBUF); - } - write_dsfield(m, t, 0); -#endif /* PF_ECN */ - - VERIFY(pkt->pktsched_ptype == qptype(&cl->cl_q)); - _addq(&cl->cl_q, &pkt->pktsched_pkt); - - return 0; -} - -static inline void -tcq_getq(struct tcq_class *cl, pktsched_pkt_t *pkt) -{ - classq_pkt_t p = CLASSQ_PKT_INITIALIZER(p); - - IFCQ_LOCK_ASSERT_HELD(cl->cl_tif->tif_ifq); - - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - return sfb_getq(cl->cl_sfb, &cl->cl_q, pkt); - } - - _getq(&cl->cl_q, &p); - return pktsched_pkt_encap(pkt, &p); -} - -static void -tcq_purgeq(struct tcq_if *tif, struct tcq_class *cl, u_int32_t flow, - u_int32_t *packets, u_int32_t *bytes) -{ - struct ifclassq *ifq = tif->tif_ifq; - u_int32_t cnt = 0, len = 0, qlen; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - if ((qlen = qlen(&cl->cl_q)) == 0) { - goto done; - } - - IFCQ_CONVERT_LOCK(ifq); - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len); - } else { - _flushq_flow(&cl->cl_q, flow, &cnt, &len); - } - - if (cnt > 0) { - VERIFY(qlen(&cl->cl_q) == (qlen - cnt)); - - PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len); - IFCQ_DROP_ADD(ifq, cnt, len); - - VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0); - IFCQ_LEN(ifq) -= cnt; - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d " - "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n", - if_name(TCQIF_IFP(tif)), tcq_style(tif), - cl->cl_handle, cl->cl_pri, qlen, qlen(&cl->cl_q), - cnt, len, flow); - } - } -done: - if (packets != NULL) { - *packets = cnt; - } - if (bytes != NULL) { - *bytes = len; - } -} - -static void -tcq_updateq(struct tcq_if *tif, struct tcq_class *cl, cqev_t ev) -{ - IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s update qid=%d pri=%d event=%s\n", - if_name(TCQIF_IFP(tif)), tcq_style(tif), - cl->cl_handle, cl->cl_pri, ifclassq_ev2str(ev)); - } - - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - return sfb_updateq(cl->cl_sfb, ev); - } -} - -int -tcq_get_class_stats(struct tcq_if *tif, u_int32_t qid, - struct tcq_classstats *sp) -{ - struct tcq_class *cl; - - IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - - if ((cl = tcq_clh_to_clp(tif, qid)) == NULL) { - return EINVAL; - } - - sp->class_handle = cl->cl_handle; - sp->priority = cl->cl_pri; - sp->qlength = qlen(&cl->cl_q); - sp->qlimit = qlimit(&cl->cl_q); - sp->period = cl->cl_period; - sp->xmitcnt = cl->cl_xmitcnt; - sp->dropcnt = cl->cl_dropcnt; - - sp->qtype = qtype(&cl->cl_q); - sp->qstate = qstate(&cl->cl_q); - - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - sfb_getstats(cl->cl_sfb, &sp->sfb); - } - - return 0; -} - -static int -tcq_stat_sc(struct tcq_if *tif, cqrq_stat_sc_t *sr) -{ - struct ifclassq *ifq = tif->tif_ifq; - struct tcq_class *cl; - u_int32_t i; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - VERIFY(sr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(sr->sc)); - - i = MBUF_SCIDX(sr->sc); - VERIFY(i < IFCQ_SC_MAX); - - cl = ifq->ifcq_disc_slots[i].cl; - sr->packets = qlen(&cl->cl_q); - sr->bytes = qsize(&cl->cl_q); - - return 0; -} - -/* convert a class handle to the corresponding class pointer */ -static inline struct tcq_class * -tcq_clh_to_clp(struct tcq_if *tif, u_int32_t chandle) -{ - struct tcq_class *cl; - int idx; - - IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - - for (idx = tif->tif_maxpri; idx >= 0; idx--) { - if ((cl = tif->tif_classes[idx]) != NULL && - cl->cl_handle == chandle) { - return cl; - } - } - - return NULL; -} - -static const char * -tcq_style(struct tcq_if *tif) -{ -#pragma unused(tif) - return "TCQ"; -} - -/* - * tcq_enqueue_ifclassq is an enqueue function to be registered to - * (*ifcq_enqueue) in struct ifclassq. - */ -static int -tcq_enqueue_ifclassq(struct ifclassq *ifq, classq_pkt_t *p, boolean_t *pdrop) -{ - u_int32_t i = 0; - int ret; - pktsched_pkt_t pkt; - struct pf_mtag *t = NULL; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - if (p->cp_ptype == QP_MBUF) { - struct mbuf *m = p->cp_mbuf; - if (!(m->m_flags & M_PKTHDR)) { - /* should not happen */ - log(LOG_ERR, "%s: packet does not have pkthdr\n", - if_name(ifq->ifcq_ifp)); - IFCQ_CONVERT_LOCK(ifq); - m_freem(m); - *p = CLASSQ_PKT_INITIALIZER(*p); - *pdrop = TRUE; - return ENOBUFS; - } - t = m_pftag(m); - i = MBUF_SCIDX(mbuf_get_service_class(m)); - } - VERIFY((u_int32_t)i < IFCQ_SC_MAX); - - pktsched_pkt_encap(&pkt, p); - - ret = tcq_enqueue(ifq->ifcq_disc, - ifq->ifcq_disc_slots[i].cl, &pkt, t); - - if ((ret != 0) && (ret != CLASSQEQ_SUCCESS_FC)) { - pktsched_free_pkt(&pkt); - *pdrop = TRUE; - } else { - *pdrop = FALSE; - } - - switch (ret) { - case CLASSQEQ_DROP: - ret = ENOBUFS; - break; - case CLASSQEQ_DROP_FC: - ret = EQFULL; - break; - case CLASSQEQ_DROP_SP: - ret = EQSUSPENDED; - break; - case CLASSQEQ_SUCCESS_FC: - ret = EQFULL; - break; - case CLASSQEQ_SUCCESS: - ret = 0; - break; - default: - VERIFY(0); - __builtin_unreachable(); - } - return ret; -} - -/* - * tcq_dequeue_tc_ifclassq is a dequeue function to be registered to - * (*ifcq_dequeue) in struct ifclass. - * - * note: CLASSQDQ_POLL returns the next packet without removing the packet - * from the queue. CLASSQDQ_REMOVE is a normal dequeue operation. - * CLASSQDQ_REMOVE must return the same packet if called immediately - * after CLASSQDQ_POLL. - */ -static void -tcq_dequeue_tc_ifclassq(struct ifclassq *ifq, mbuf_svc_class_t sc, - classq_pkt_t *cpkt) -{ - pktsched_pkt_t pkt; - u_int32_t i = MBUF_SCIDX(sc); - - VERIFY((u_int32_t)i < IFCQ_SC_MAX); - - _PKTSCHED_PKT_INIT(&pkt); - (tcq_dequeue_cl(ifq->ifcq_disc, ifq->ifcq_disc_slots[i].cl, sc, &pkt)); - *cpkt = pkt.pktsched_pkt; -} - -static int -tcq_request_ifclassq(struct ifclassq *ifq, cqrq_t req, void *arg) -{ - struct tcq_if *tif = (struct tcq_if *)ifq->ifcq_disc; - int err = 0; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - switch (req) { - case CLASSQRQ_PURGE: - tcq_purge(tif); - break; - - case CLASSQRQ_PURGE_SC: - tcq_purge_sc(tif, (cqrq_purge_sc_t *)arg); - break; - - case CLASSQRQ_EVENT: - tcq_event(tif, (cqev_t)arg); - break; - - case CLASSQRQ_THROTTLE: - err = tcq_throttle(tif, (cqrq_throttle_t *)arg); - break; - - case CLASSQRQ_STAT_SC: - err = tcq_stat_sc(tif, (cqrq_stat_sc_t *)arg); - break; - } - return err; -} - -int -tcq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags, - classq_pkt_type_t ptype) -{ - struct ifnet *ifp = ifq->ifcq_ifp; - struct tcq_class *cl0, *cl1, *cl2, *cl3; - struct tcq_if *tif; - u_int32_t maxlen = 0, qflags = 0; - int err = 0; - - IFCQ_LOCK_ASSERT_HELD(ifq); - VERIFY(ifq->ifcq_disc == NULL); - VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); - - if (flags & PKTSCHEDF_QALG_SFB) { - qflags |= TQCF_SFB; - } - if (flags & PKTSCHEDF_QALG_ECN) { - qflags |= TQCF_ECN; - } - if (flags & PKTSCHEDF_QALG_FLOWCTL) { - qflags |= TQCF_FLOWCTL; - } - if (flags & PKTSCHEDF_QALG_DELAYBASED) { - qflags |= TQCF_DELAYBASED; - } - - tif = tcq_alloc(ifp, M_WAITOK); - if (tif == NULL) { - return ENOMEM; - } - - if ((maxlen = IFCQ_MAXLEN(ifq)) == 0) { - maxlen = if_sndq_maxlen; - } - - if ((err = tcq_add_queue(tif, 0, maxlen, - qflags | TQCF_LAZY, SCIDX_BK, &cl0, ptype)) != 0) { - goto cleanup; - } - - if ((err = tcq_add_queue(tif, 1, maxlen, - qflags | TQCF_DEFAULTCLASS, SCIDX_BE, &cl1, ptype)) != 0) { - goto cleanup; - } - - if ((err = tcq_add_queue(tif, 2, maxlen, - qflags | TQCF_LAZY, SCIDX_VI, &cl2, ptype)) != 0) { - goto cleanup; - } - - if ((err = tcq_add_queue(tif, 3, maxlen, - qflags, SCIDX_VO, &cl3, ptype)) != 0) { - goto cleanup; - } - - err = ifclassq_attach(ifq, PKTSCHEDT_TCQ, tif, - tcq_enqueue_ifclassq, NULL, tcq_dequeue_tc_ifclassq, - NULL, NULL, tcq_request_ifclassq); - - /* cache these for faster lookup */ - if (err == 0) { - /* Map {BK_SYS,BK} to TC_BK */ - ifq->ifcq_disc_slots[SCIDX_BK_SYS].qid = SCIDX_BK; - ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl = cl0; - - ifq->ifcq_disc_slots[SCIDX_BK].qid = SCIDX_BK; - ifq->ifcq_disc_slots[SCIDX_BK].cl = cl0; - - /* Map {BE,RD,OAM} to TC_BE */ - ifq->ifcq_disc_slots[SCIDX_BE].qid = SCIDX_BE; - ifq->ifcq_disc_slots[SCIDX_BE].cl = cl1; - - ifq->ifcq_disc_slots[SCIDX_RD].qid = SCIDX_BE; - ifq->ifcq_disc_slots[SCIDX_RD].cl = cl1; - - ifq->ifcq_disc_slots[SCIDX_OAM].qid = SCIDX_BE; - ifq->ifcq_disc_slots[SCIDX_OAM].cl = cl1; - - /* Map {AV,RV,VI} to TC_VI */ - ifq->ifcq_disc_slots[SCIDX_AV].qid = SCIDX_VI; - ifq->ifcq_disc_slots[SCIDX_AV].cl = cl2; - - ifq->ifcq_disc_slots[SCIDX_RV].qid = SCIDX_VI; - ifq->ifcq_disc_slots[SCIDX_RV].cl = cl2; - - ifq->ifcq_disc_slots[SCIDX_VI].qid = SCIDX_VI; - ifq->ifcq_disc_slots[SCIDX_VI].cl = cl2; - - /* Map {VO,CTL} to TC_VO */ - ifq->ifcq_disc_slots[SCIDX_VO].qid = SCIDX_VO; - ifq->ifcq_disc_slots[SCIDX_VO].cl = cl3; - - ifq->ifcq_disc_slots[SCIDX_CTL].qid = SCIDX_VO; - ifq->ifcq_disc_slots[SCIDX_CTL].cl = cl3; - } - -cleanup: - if (err != 0) { - (void) tcq_destroy_locked(tif); - } - - return err; -} - -int -tcq_teardown_ifclassq(struct ifclassq *ifq) -{ - struct tcq_if *tif = ifq->ifcq_disc; - int i; - - IFCQ_LOCK_ASSERT_HELD(ifq); - VERIFY(tif != NULL && ifq->ifcq_type == PKTSCHEDT_TCQ); - - (void) tcq_destroy_locked(tif); - - ifq->ifcq_disc = NULL; - for (i = 0; i < IFCQ_SC_MAX; i++) { - ifq->ifcq_disc_slots[i].qid = 0; - ifq->ifcq_disc_slots[i].cl = NULL; - } - - return ifclassq_detach(ifq); -} - -int -tcq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot, - struct if_ifclassq_stats *ifqs) -{ - struct tcq_if *tif = ifq->ifcq_disc; - - IFCQ_LOCK_ASSERT_HELD(ifq); - VERIFY(ifq->ifcq_type == PKTSCHEDT_TCQ); - - if (slot >= IFCQ_SC_MAX) { - return EINVAL; - } - - return tcq_get_class_stats(tif, ifq->ifcq_disc_slots[slot].qid, - &ifqs->ifqs_tcq_stats); -} - -static int -tcq_throttle(struct tcq_if *tif, cqrq_throttle_t *tr) -{ - struct ifclassq *ifq = tif->tif_ifq; - struct tcq_class *cl; - int err = 0; - - IFCQ_LOCK_ASSERT_HELD(ifq); - - if (!tr->set) { - tr->level = tif->tif_throttle; - return 0; - } - - if (tr->level == tif->tif_throttle) { - return EALREADY; - } - - /* Current throttling levels only involve BK_SYS class */ - cl = ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl; - - switch (tr->level) { - case IFNET_THROTTLE_OFF: - err = tcq_resumeq(tif, cl); - break; - - case IFNET_THROTTLE_OPPORTUNISTIC: - err = tcq_suspendq(tif, cl); - break; - - default: - VERIFY(0); - /* NOTREACHED */ - } - - if (err == 0 || err == ENXIO) { - if (pktsched_verbose) { - log(LOG_DEBUG, "%s: %s throttling %slevel set %d->%d\n", - if_name(TCQIF_IFP(tif)), tcq_style(tif), - (err == 0) ? "" : "lazy ", tif->tif_throttle, - tr->level); - } - tif->tif_throttle = tr->level; - if (err != 0) { - err = 0; - } else { - tcq_purgeq(tif, cl, 0, NULL, NULL); - } - } else { - log(LOG_ERR, "%s: %s unable to set throttling level " - "%d->%d [error=%d]\n", if_name(TCQIF_IFP(tif)), - tcq_style(tif), tif->tif_throttle, tr->level, err); - } - - return err; -} - -static int -tcq_resumeq(struct tcq_if *tif, struct tcq_class *cl) -{ - struct ifclassq *ifq = tif->tif_ifq; - int err = 0; -#if !MACH_ASSERT -#pragma unused(ifq) -#endif - IFCQ_LOCK_ASSERT_HELD(ifq); - - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, FALSE); - } - - if (err == 0) { - qstate(&cl->cl_q) = QS_RUNNING; - } - - return err; -} - -static int -tcq_suspendq(struct tcq_if *tif, struct tcq_class *cl) -{ - struct ifclassq *ifq = tif->tif_ifq; - int err = 0; -#if !MACH_ASSERT -#pragma unused(ifq) -#endif - IFCQ_LOCK_ASSERT_HELD(ifq); - - if (q_is_sfb(&cl->cl_q)) { - if (cl->cl_sfb != NULL) { - err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, TRUE); - } else { - VERIFY(cl->cl_flags & TQCF_LAZY); - err = ENXIO; /* delayed throttling */ - } - } - - if (err == 0 || err == ENXIO) { - qstate(&cl->cl_q) = QS_SUSPENDED; - } - - return err; -} diff --git a/bsd/net/pktsched/pktsched_tcq.h b/bsd/net/pktsched/pktsched_tcq.h deleted file mode 100644 index 1939b72a4..000000000 --- a/bsd/net/pktsched/pktsched_tcq.h +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright (c) 2011-2016 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -#ifndef _NET_PKTSCHED_PKTSCHED_TCQ_H_ -#define _NET_PKTSCHED_PKTSCHED_TCQ_H_ - -#ifdef PRIVATE -#include -#include -#include -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#define TCQ_MAXPRI 4 /* upper limit of the number of priorities */ - -/* tcq class flags */ -#define TQCF_RED 0x0001 /* use RED */ -#define TQCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ -#define TQCF_RIO 0x0004 /* use RIO */ -#define TQCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ -#define TQCF_BLUE 0x0100 /* use BLUE */ -#define TQCF_SFB 0x0200 /* use SFB */ -#define TQCF_FLOWCTL 0x0400 /* enable flow control advisories */ -#define TQCF_DEFAULTCLASS 0x1000 /* default class */ -#define TQCF_DELAYBASED 0x2000 /* queue sizing is delay based */ -#ifdef BSD_KERNEL_PRIVATE -#define TQCF_LAZY 0x10000000 /* on-demand resource allocation */ -#endif /* BSD_KERNEL_PRIVATE */ - -#define TQCF_USERFLAGS \ - (TQCF_RED | TQCF_ECN | TQCF_RIO | TQCF_CLEARDSCP | TQCF_BLUE | \ - TQCF_SFB | TQCF_FLOWCTL | TQCF_DEFAULTCLASS) - -#ifdef BSD_KERNEL_PRIVATE -#define TQCF_BITS \ - "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" \ - "\35LAZY" -#else -#define TQCF_BITS \ - "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL" -#endif /* !BSD_KERNEL_PRIVATE */ - -struct tcq_classstats { - u_int32_t class_handle; - u_int32_t priority; - - u_int32_t qlength; - u_int32_t qlimit; - u_int32_t period; - struct pktcntr xmitcnt; /* transmitted packet counter */ - struct pktcntr dropcnt; /* dropped packet counter */ - - /* RED, RIO, BLUE, SFB related info */ - classq_type_t qtype; - union { - /* RIO has 3 red stats */ - struct red_stats red[RIO_NDROPPREC]; - struct blue_stats blue; - struct sfb_stats sfb; - }; - classq_state_t qstate; -}; - -#ifdef BSD_KERNEL_PRIVATE -struct tcq_class { - u_int32_t cl_handle; /* class handle */ - class_queue_t cl_q; /* class queue structure */ - u_int32_t cl_qflags; /* class queue flags */ - union { - void *ptr; - struct sfb *sfb; /* SFB state */ - } cl_qalg; - int32_t cl_pri; /* priority */ - u_int32_t cl_flags; /* class flags */ - struct tcq_if *cl_tif; /* back pointer to tif */ - - /* statistics */ - u_int32_t cl_period; /* backlog period */ - struct pktcntr cl_xmitcnt; /* transmitted packet counter */ - struct pktcntr cl_dropcnt; /* dropped packet counter */ -}; - -#define cl_sfb cl_qalg.sfb - -/* - * tcq interface state - */ -struct tcq_if { - struct ifclassq *tif_ifq; /* backpointer to ifclassq */ - int tif_maxpri; /* max priority in use */ - u_int32_t tif_throttle; /* throttling level */ - struct tcq_class *tif_default; /* default class */ - struct tcq_class *tif_classes[TCQ_MAXPRI]; /* classes */ -}; - -#define TCQIF_IFP(_tif) ((_tif)->tif_ifq->ifcq_ifp) - -struct if_ifclassq_stats; - -extern void tcq_init(void); -extern struct tcq_if *tcq_alloc(struct ifnet *, int); -extern int tcq_destroy(struct tcq_if *); -extern void tcq_purge(struct tcq_if *); -extern void tcq_event(struct tcq_if *, cqev_t); -extern int tcq_add_queue(struct tcq_if *, int, u_int32_t, int, u_int32_t, - struct tcq_class **, classq_pkt_type_t); -extern int tcq_remove_queue(struct tcq_if *, u_int32_t); -extern int tcq_get_class_stats(struct tcq_if *, u_int32_t, - struct tcq_classstats *); -extern int tcq_enqueue(struct tcq_if *, struct tcq_class *, pktsched_pkt_t *, - struct pf_mtag *); -extern void tcq_dequeue_tc(struct tcq_if *, mbuf_svc_class_t, pktsched_pkt_t *); -extern int tcq_setup_ifclassq(struct ifclassq *, u_int32_t, classq_pkt_type_t); -extern int tcq_teardown_ifclassq(struct ifclassq *ifq); -extern int tcq_getqstats_ifclassq(struct ifclassq *, u_int32_t qid, - struct if_ifclassq_stats *); -#endif /* BSD_KERNEL_PRIVATE */ -#ifdef __cplusplus -} -#endif -#endif /* PRIVATE */ -#endif /* _NET_PKTSCHED_PKTSCHED_TCQ_H_ */ diff --git a/bsd/net/raw_cb.c b/bsd/net/raw_cb.c index 9fbbd01ce..7de32115b 100644 --- a/bsd/net/raw_cb.c +++ b/bsd/net/raw_cb.c @@ -108,8 +108,8 @@ raw_attach(struct socket *so, int proto) return error; } rp->rcb_socket = so; - rp->rcb_proto.sp_family = SOCK_DOM(so); - rp->rcb_proto.sp_protocol = proto; + rp->rcb_proto.sp_family = (uint16_t)SOCK_DOM(so); + rp->rcb_proto.sp_protocol = (uint16_t)proto; lck_mtx_lock(raw_mtx); LIST_INSERT_HEAD(&rawcb_list, rp, list); lck_mtx_unlock(raw_mtx); diff --git a/bsd/net/raw_usrreq.c b/bsd/net/raw_usrreq.c index eb8521ed7..80d9b0564 100644 --- a/bsd/net/raw_usrreq.c +++ b/bsd/net/raw_usrreq.c @@ -328,7 +328,8 @@ raw_usend(struct socket *so, int flags, struct mbuf *m, goto release; } - if (control && control->m_len) { + if (control != NULL) { + m_freem(control); error = EOPNOTSUPP; goto release; } diff --git a/bsd/net/restricted_in_port.c b/bsd/net/restricted_in_port.c index 7e39a0d12..a93d4f31e 100644 --- a/bsd/net/restricted_in_port.c +++ b/bsd/net/restricted_in_port.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -68,7 +68,7 @@ struct restricted_port_entry { #define RPE_FLAG_TEST 0x10 // entry for testing static struct restricted_port_entry restricted_port_list[] = { -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX /* * Network relay proxy */ @@ -100,7 +100,16 @@ static struct restricted_port_entry restricted_port_list[] = { .rpe_flags = RPE_FLAG_ENTITLEMENT | RPE_FLAG_TCP | RPE_FLAG_UDP, .rpe_entitlement = "com.apple.private.network.restricted.port.ids_cloud_service_connector", }, -#endif /* CONFIG_EMBEDDED */ +#endif /* !XNU_TARGET_OS_OSX */ + + /* + * For RDC + */ + { + .rpe_port = 55555, + .rpe_flags = RPE_FLAG_ENTITLEMENT | RPE_FLAG_TCP, + .rpe_entitlement = "com.apple.private.network.restricted.port.lights_out_management", + }, #if (DEBUG || DEVELOPMENT) /* @@ -298,7 +307,7 @@ sysctl_restricted_port_test_common(struct sysctl_oid *oidp, if (!(rpe->rpe_flags & RPE_FLAG_TEST)) { continue; } - rpe->rpe_port = value; + rpe->rpe_port = (in_port_t)value; if (test_superuser) { rpe->rpe_flags |= RPE_FLAG_SUPERUSER; rpe->rpe_flags &= ~RPE_FLAG_ENTITLEMENT; diff --git a/bsd/net/route.c b/bsd/net/route.c index 156c375b6..a31e25bb6 100644 --- a/bsd/net/route.c +++ b/bsd/net/route.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -97,11 +97,9 @@ #include #include -#if INET6 #include #include #include -#endif /* INET6 */ #include @@ -212,7 +210,7 @@ * in order to prevent the entry from being freed by the callee. */ -#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) +#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) extern void kdp_set_gateway_mac(void *gatewaymac); @@ -224,39 +222,39 @@ __private_extern__ struct rtstat rtstat = { .rts_wildcard = 0, .rts_badrtgwroute = 0 }; -struct radix_node_head *rt_tables[AF_MAX+1]; +struct radix_node_head *rt_tables[AF_MAX + 1]; -decl_lck_mtx_data(, rnh_lock_data); /* global routing tables mutex */ -lck_mtx_t *rnh_lock = &rnh_lock_data; -static lck_attr_t *rnh_lock_attr; -static lck_grp_t *rnh_lock_grp; -static lck_grp_attr_t *rnh_lock_grp_attr; +decl_lck_mtx_data(, rnh_lock_data); /* global routing tables mutex */ +lck_mtx_t *rnh_lock = &rnh_lock_data; +static lck_attr_t *rnh_lock_attr; +static lck_grp_t *rnh_lock_grp; +static lck_grp_attr_t *rnh_lock_grp_attr; /* Lock group and attribute for routing entry locks */ -static lck_attr_t *rte_mtx_attr; -static lck_grp_t *rte_mtx_grp; -static lck_grp_attr_t *rte_mtx_grp_attr; +static lck_attr_t *rte_mtx_attr; +static lck_grp_t *rte_mtx_grp; +static lck_grp_attr_t *rte_mtx_grp_attr; -int rttrash = 0; /* routes not in table but not freed */ +int rttrash = 0; /* routes not in table but not freed */ boolean_t trigger_v6_defrtr_select = FALSE; unsigned int rte_debug = 0; /* Possible flags for rte_debug */ -#define RTD_DEBUG 0x1 /* enable or disable rtentry debug facility */ -#define RTD_TRACE 0x2 /* trace alloc, free, refcnt and lock */ -#define RTD_NO_FREE 0x4 /* don't free (good to catch corruptions) */ +#define RTD_DEBUG 0x1 /* enable or disable rtentry debug facility */ +#define RTD_TRACE 0x2 /* trace alloc, free, refcnt and lock */ +#define RTD_NO_FREE 0x4 /* don't free (good to catch corruptions) */ -#define RTE_NAME "rtentry" /* name for zone and rt_lock */ +#define RTE_NAME "rtentry" /* name for zone and rt_lock */ -static struct zone *rte_zone; /* special zone for rtentry */ -#define RTE_ZONE_MAX 65536 /* maximum elements in zone */ -#define RTE_ZONE_NAME RTE_NAME /* name of rtentry zone */ +static struct zone *rte_zone; /* special zone for rtentry */ +#define RTE_ZONE_MAX 65536 /* maximum elements in zone */ +#define RTE_ZONE_NAME RTE_NAME /* name of rtentry zone */ -#define RTD_INUSE 0xFEEDFACE /* entry is in use */ -#define RTD_FREED 0xDEADBEEF /* entry is freed */ +#define RTD_INUSE 0xFEEDFACE /* entry is in use */ +#define RTD_FREED 0xDEADBEEF /* entry is freed */ -#define MAX_SCOPE_ADDR_STR_LEN (MAX_IPv6_STR_LEN + 6) +#define MAX_SCOPE_ADDR_STR_LEN (MAX_IPv6_STR_LEN + 6) /* For gdb */ __private_extern__ unsigned int ctrace_stack_size = CTRACE_STACK_SIZE; @@ -266,28 +264,28 @@ __private_extern__ unsigned int ctrace_hist_size = CTRACE_HIST_SIZE; * Debug variant of rtentry structure. */ struct rtentry_dbg { - struct rtentry rtd_entry; /* rtentry */ - struct rtentry rtd_entry_saved; /* saved rtentry */ - uint32_t rtd_inuse; /* in use pattern */ - uint16_t rtd_refhold_cnt; /* # of rtref */ - uint16_t rtd_refrele_cnt; /* # of rtunref */ - uint32_t rtd_lock_cnt; /* # of locks */ - uint32_t rtd_unlock_cnt; /* # of unlocks */ + struct rtentry rtd_entry; /* rtentry */ + struct rtentry rtd_entry_saved; /* saved rtentry */ + uint32_t rtd_inuse; /* in use pattern */ + uint16_t rtd_refhold_cnt; /* # of rtref */ + uint16_t rtd_refrele_cnt; /* # of rtunref */ + uint32_t rtd_lock_cnt; /* # of locks */ + uint32_t rtd_unlock_cnt; /* # of unlocks */ /* * Alloc and free callers. */ - ctrace_t rtd_alloc; - ctrace_t rtd_free; + ctrace_t rtd_alloc; + ctrace_t rtd_free; /* * Circular lists of rtref and rtunref callers. */ - ctrace_t rtd_refhold[CTRACE_HIST_SIZE]; - ctrace_t rtd_refrele[CTRACE_HIST_SIZE]; + ctrace_t rtd_refhold[CTRACE_HIST_SIZE]; + ctrace_t rtd_refrele[CTRACE_HIST_SIZE]; /* * Circular lists of locks and unlocks. */ - ctrace_t rtd_lock[CTRACE_HIST_SIZE]; - ctrace_t rtd_unlock[CTRACE_HIST_SIZE]; + ctrace_t rtd_lock[CTRACE_HIST_SIZE]; + ctrace_t rtd_unlock[CTRACE_HIST_SIZE]; /* * Trash list linkage */ @@ -304,7 +302,7 @@ static inline void rte_free_debug(struct rtentry *); static inline void rte_lock_debug(struct rtentry_dbg *); static inline void rte_unlock_debug(struct rtentry_dbg *); static void rt_maskedcopy(const struct sockaddr *, - struct sockaddr *, const struct sockaddr *); + struct sockaddr *, const struct sockaddr *); static void rtable_init(void **); static inline void rtref_audit(struct rtentry_dbg *); static inline void rtunref_audit(struct rtentry_dbg *); @@ -337,25 +335,22 @@ static void rt_set_idleref(struct rtentry *); static void rt_clear_idleref(struct rtentry *); static void route_event_callback(void *); static void rt_str4(struct rtentry *, char *, uint32_t, char *, uint32_t); -#if INET6 static void rt_str6(struct rtentry *, char *, uint32_t, char *, uint32_t); -#endif /* INET6 */ +static boolean_t route_ignore_protocol_cloning_for_dst(struct rtentry *, struct sockaddr *); uint32_t route_genid_inet = 0; -#if INET6 uint32_t route_genid_inet6 = 0; -#endif /* INET6 */ -#define ASSERT_SINIFSCOPE(sa) { \ - if ((sa)->sa_family != AF_INET || \ - (sa)->sa_len < sizeof (struct sockaddr_in)) \ - panic("%s: bad sockaddr_in %p\n", __func__, sa); \ +#define ASSERT_SINIFSCOPE(sa) { \ + if ((sa)->sa_family != AF_INET || \ + (sa)->sa_len < sizeof (struct sockaddr_in)) \ + panic("%s: bad sockaddr_in %p\n", __func__, sa); \ } -#define ASSERT_SIN6IFSCOPE(sa) { \ - if ((sa)->sa_family != AF_INET6 || \ - (sa)->sa_len < sizeof (struct sockaddr_in6)) \ - panic("%s: bad sockaddr_in6 %p\n", __func__, sa); \ +#define ASSERT_SIN6IFSCOPE(sa) { \ + if ((sa)->sa_family != AF_INET6 || \ + (sa)->sa_len < sizeof (struct sockaddr_in6)) \ + panic("%s: bad sockaddr_in6 %p\n", __func__, sa); \ } /* @@ -363,7 +358,7 @@ uint32_t route_genid_inet6 = 0; * specific but can be expanded in future to include other search filters. */ struct matchleaf_arg { - unsigned int ifscope; /* interface scope */ + unsigned int ifscope; /* interface scope */ }; /* @@ -371,13 +366,13 @@ struct matchleaf_arg { * of sockaddr_in for convenience). */ static struct sockaddr sin_def = { - .sa_len = sizeof (struct sockaddr_in), + .sa_len = sizeof(struct sockaddr_in), .sa_family = AF_INET, .sa_data = { 0, } }; static struct sockaddr_in6 sin6_def = { - .sin6_len = sizeof (struct sockaddr_in6), + .sin6_len = sizeof(struct sockaddr_in6), .sin6_family = AF_INET6, .sin6_port = 0, .sin6_flowinfo = 0, @@ -393,23 +388,23 @@ static struct sockaddr_in6 sin6_def = { static unsigned int primary_ifscope = IFSCOPE_NONE; static unsigned int primary6_ifscope = IFSCOPE_NONE; -#define INET_DEFAULT(sa) \ +#define INET_DEFAULT(sa) \ ((sa)->sa_family == AF_INET && SIN(sa)->sin_addr.s_addr == 0) -#define INET6_DEFAULT(sa) \ - ((sa)->sa_family == AF_INET6 && \ +#define INET6_DEFAULT(sa) \ + ((sa)->sa_family == AF_INET6 && \ IN6_IS_ADDR_UNSPECIFIED(&SIN6(sa)->sin6_addr)) -#define SA_DEFAULT(sa) (INET_DEFAULT(sa) || INET6_DEFAULT(sa)) -#define RT(r) ((struct rtentry *)r) -#define RN(r) ((struct radix_node *)r) -#define RT_HOST(r) (RT(r)->rt_flags & RTF_HOST) +#define SA_DEFAULT(sa) (INET_DEFAULT(sa) || INET6_DEFAULT(sa)) +#define RT(r) ((struct rtentry *)r) +#define RN(r) ((struct radix_node *)r) +#define RT_HOST(r) (RT(r)->rt_flags & RTF_HOST) unsigned int rt_verbose = 0; #if (DEVELOPMENT || DEBUG) SYSCTL_DECL(_net_route); SYSCTL_UINT(_net_route, OID_AUTO, verbose, CTLFLAG_RW | CTLFLAG_LOCKED, - &rt_verbose, 0, ""); + &rt_verbose, 0, ""); #endif /* (DEVELOPMENT || DEBUG) */ static void @@ -420,9 +415,10 @@ rtable_init(void **table) domain_proto_mtx_lock_assert_held(); TAILQ_FOREACH(dom, &domains, dom_entry) { - if (dom->dom_rtattach != NULL) + if (dom->dom_rtattach != NULL) { dom->dom_rtattach(&table[dom->dom_family], dom->dom_rtoffset); + } } } @@ -434,7 +430,6 @@ route_init(void) { int size; -#if INET6 _CASSERT(offsetof(struct route, ro_rt) == offsetof(struct route_in6, ro_rt)); _CASSERT(offsetof(struct route, ro_lle) == @@ -445,11 +440,11 @@ route_init(void) offsetof(struct route_in6, ro_flags)); _CASSERT(offsetof(struct route, ro_dst) == offsetof(struct route_in6, ro_dst)); -#endif /* INET6 */ - PE_parse_boot_argn("rte_debug", &rte_debug, sizeof (rte_debug)); - if (rte_debug != 0) + PE_parse_boot_argn("rte_debug", &rte_debug, sizeof(rte_debug)); + if (rte_debug != 0) { rte_debug |= RTD_DEBUG; + } rnh_lock_grp_attr = lck_grp_attr_alloc_init(); rnh_lock_grp = lck_grp_alloc_init("route", rnh_lock_grp_attr); @@ -461,23 +456,17 @@ route_init(void) rte_mtx_attr = lck_attr_alloc_init(); lck_mtx_lock(rnh_lock); - rn_init(); /* initialize all zeroes, all ones, mask table */ + rn_init(); /* initialize all zeroes, all ones, mask table */ lck_mtx_unlock(rnh_lock); rtable_init((void **)rt_tables); - if (rte_debug & RTD_DEBUG) - size = sizeof (struct rtentry_dbg); - else - size = sizeof (struct rtentry); - - rte_zone = zinit(size, RTE_ZONE_MAX * size, 0, RTE_ZONE_NAME); - if (rte_zone == NULL) { - panic("%s: failed allocating rte_zone", __func__); - /* NOTREACHED */ + if (rte_debug & RTD_DEBUG) { + size = sizeof(struct rtentry_dbg); + } else { + size = sizeof(struct rtentry); } - zone_change(rte_zone, Z_EXPAND, TRUE); - zone_change(rte_zone, Z_CALLERACCT, FALSE); - zone_change(rte_zone, Z_NOENCRYPT, TRUE); + + rte_zone = zone_create(RTE_ZONE_NAME, size, ZC_NOENCRYPT); TAILQ_INIT(&rttrash_head); } @@ -490,7 +479,7 @@ route_init(void) boolean_t rt_primary_default(struct rtentry *rt, struct sockaddr *dst) { - return (SA_DEFAULT(dst) && !(rt->rt_flags & RTF_IFSCOPE)); + return SA_DEFAULT(dst) && !(rt->rt_flags & RTF_IFSCOPE); } /* @@ -499,10 +488,11 @@ rt_primary_default(struct rtentry *rt, struct sockaddr *dst) void set_primary_ifscope(int af, unsigned int ifscope) { - if (af == AF_INET) + if (af == AF_INET) { primary_ifscope = ifscope; - else + } else { primary6_ifscope = ifscope; + } } /* @@ -511,7 +501,7 @@ set_primary_ifscope(int af, unsigned int ifscope) unsigned int get_primary_ifscope(int af) { - return (af == AF_INET ? primary_ifscope : primary6_ifscope); + return af == AF_INET ? primary_ifscope : primary6_ifscope; } /* @@ -547,7 +537,7 @@ sin_get_ifscope(struct sockaddr *sa) /* Caller must pass in sockaddr_in */ ASSERT_SINIFSCOPE(sa); - return (SINIFSCOPE(sa)->sin_scope_id); + return SINIFSCOPE(sa)->sin_scope_id; } /* @@ -559,7 +549,7 @@ sin6_get_ifscope(struct sockaddr *sa) /* Caller must pass in sockaddr_in6 */ ASSERT_SIN6IFSCOPE(sa); - return (SIN6IFSCOPE(sa)->sin6_scope_id); + return SIN6IFSCOPE(sa)->sin6_scope_id; } static inline void @@ -578,7 +568,7 @@ sin6_get_embedded_ifscope(struct sockaddr *sa) /* Caller must pass in sockaddr_in6 */ ASSERT_SIN6IFSCOPE(sa); - return (ntohs(SIN6(sa)->sin6_addr.s6_addr16[1])); + return ntohs(SIN6(sa)->sin6_addr.s6_addr16[1]); } /* @@ -599,15 +589,16 @@ sa_copy(struct sockaddr *src, struct sockaddr_storage *dst, VERIFY(af == AF_INET || af == AF_INET6); - bzero(dst, sizeof (*dst)); + bzero(dst, sizeof(*dst)); if (af == AF_INET) { - bcopy(src, dst, sizeof (struct sockaddr_in)); + bcopy(src, dst, sizeof(struct sockaddr_in)); dst->ss_len = sizeof(struct sockaddr_in); - if (pifscope == NULL || ifscope != IFSCOPE_NONE) + if (pifscope == NULL || ifscope != IFSCOPE_NONE) { sin_set_ifscope(SA(dst), ifscope); + } } else { - bcopy(src, dst, sizeof (struct sockaddr_in6)); + bcopy(src, dst, sizeof(struct sockaddr_in6)); dst->ss_len = sizeof(struct sockaddr_in6); if (pifscope != NULL && IN6_IS_SCOPE_EMBED(&SIN6(dst)->sin6_addr)) { @@ -619,8 +610,9 @@ sa_copy(struct sockaddr *src, struct sockaddr_storage *dst, * passing NULL) or setting it. */ eifscope = sin6_get_embedded_ifscope(SA(dst)); - if (eifscope != IFSCOPE_NONE && ifscope == IFSCOPE_NONE) + if (eifscope != IFSCOPE_NONE && ifscope == IFSCOPE_NONE) { ifscope = eifscope; + } if (ifscope != IFSCOPE_NONE) { /* Set ifscope from pifscope or eifscope */ sin6_set_ifscope(SA(dst), ifscope); @@ -632,8 +624,9 @@ sa_copy(struct sockaddr *src, struct sockaddr_storage *dst, * If sin6_scope_id is set but the address doesn't * contain the equivalent embedded value, set it. */ - if (ifscope != IFSCOPE_NONE && eifscope != ifscope) + if (ifscope != IFSCOPE_NONE && eifscope != ifscope) { sin6_set_embedded_ifscope(SA(dst), ifscope); + } } else if (pifscope == NULL || ifscope != IFSCOPE_NONE) { sin6_set_ifscope(SA(dst), ifscope); } @@ -644,7 +637,7 @@ sa_copy(struct sockaddr *src, struct sockaddr_storage *dst, sin6_get_ifscope(SA(dst)); } - return (SA(dst)); + return SA(dst); } /* @@ -656,7 +649,7 @@ ma_copy(int af, struct sockaddr *src, struct sockaddr_storage *dst, { VERIFY(af == AF_INET || af == AF_INET6); - bzero(dst, sizeof (*dst)); + bzero(dst, sizeof(*dst)); rt_maskedcopy(src, SA(dst), src); /* @@ -672,15 +665,15 @@ ma_copy(int af, struct sockaddr *src, struct sockaddr_storage *dst, SINIFSCOPE(dst)->sin_scope_id = ifscope; SINIFSCOPE(dst)->sin_len = offsetof(struct sockaddr_inifscope, sin_scope_id) + - sizeof (SINIFSCOPE(dst)->sin_scope_id); + sizeof(SINIFSCOPE(dst)->sin_scope_id); } else { SIN6IFSCOPE(dst)->sin6_scope_id = ifscope; SIN6IFSCOPE(dst)->sin6_len = offsetof(struct sockaddr_in6, sin6_scope_id) + - sizeof (SIN6IFSCOPE(dst)->sin6_scope_id); + sizeof(SIN6IFSCOPE(dst)->sin6_scope_id); } - return (SA(dst)); + return SA(dst); } /* @@ -691,11 +684,13 @@ sa_trim(struct sockaddr *sa, int skip) { caddr_t cp, base = (caddr_t)sa + skip; - if (sa->sa_len <= skip) - return (sa); + if (sa->sa_len <= skip) { + return sa; + } - for (cp = base + (sa->sa_len - skip); cp > base && cp[-1] == 0; ) + for (cp = base + (sa->sa_len - skip); cp > base && cp[-1] == 0;) { cp--; + } sa->sa_len = (cp - base) + skip; if (sa->sa_len < skip) { @@ -708,7 +703,7 @@ sa_trim(struct sockaddr *sa, int skip) sa->sa_len = 0; } - return (sa); + return sa; } /* @@ -732,7 +727,7 @@ rtm_scrub(int type, int idx, struct sockaddr *hint, struct sockaddr *sa, struct sockaddr_storage *ss = (struct sockaddr_storage *)buf; struct sockaddr *ret = sa; - VERIFY(buf != NULL && buflen >= sizeof (*ss)); + VERIFY(buf != NULL && buflen >= sizeof(*ss)); bzero(buf, buflen); switch (idx) { @@ -767,24 +762,25 @@ rtm_scrub(int type, int idx, struct sockaddr *hint, struct sockaddr *sa, * what was done earlier by ma_copy() on the source sockaddr. */ if (hint == NULL || - ((af = hint->sa_family) != AF_INET && af != AF_INET6)) - break; /* nothing to do */ - + ((af = hint->sa_family) != AF_INET && af != AF_INET6)) { + break; /* nothing to do */ + } skip = (af == AF_INET) ? offsetof(struct sockaddr_in, sin_addr) : offsetof(struct sockaddr_in6, sin6_addr); - if (sa->sa_len > skip && sa->sa_len <= sizeof (*ss)) { + if (sa->sa_len > skip && sa->sa_len <= sizeof(*ss)) { bcopy(sa, ss, sa->sa_len); /* * Don't use {sin,sin6}_set_ifscope() as sa_family * and sa_len for the netmask might not be set to * the corresponding expected values of the hint. */ - if (hint->sa_family == AF_INET) + if (hint->sa_family == AF_INET) { SINIFSCOPE(ss)->sin_scope_id = IFSCOPE_NONE; - else + } else { SIN6IFSCOPE(ss)->sin6_scope_id = IFSCOPE_NONE; + } ret = sa_trim(SA(ss), skip); /* @@ -793,8 +789,9 @@ rtm_scrub(int type, int idx, struct sockaddr *hint, struct sockaddr *sa, * case we return the raw value. */ if (hint->sa_family == AF_INET6 && - type != RTM_GET && type != RTM_GET2) - SA(ret)->sa_len = sizeof (struct sockaddr_in6); + type != RTM_GET && type != RTM_GET2) { + SA(ret)->sa_len = sizeof(struct sockaddr_in6); + } } break; } @@ -805,10 +802,12 @@ rtm_scrub(int type, int idx, struct sockaddr *hint, struct sockaddr *sa, * Else, if is, check if it is resolved. If not yet resolved * simply break else scrub the link layer address. */ - if ((sa->sa_family != AF_LINK) || (SDL(sa)->sdl_alen == 0)) + if ((sa->sa_family != AF_LINK) || (SDL(sa)->sdl_alen == 0)) { break; - /* fallthrough */ + } + OS_FALLTHROUGH; } + case RTAX_IFP: { if (sa->sa_family == AF_LINK && credp) { struct sockaddr_dl *sdl = SDL(buf); @@ -832,7 +831,7 @@ rtm_scrub(int type, int idx, struct sockaddr *hint, struct sockaddr *sa, break; } - return (ret); + return ret; } /* @@ -846,12 +845,13 @@ rn_match_ifscope(struct radix_node *rn, void *arg) struct matchleaf_arg *ma = arg; int af = rt_key(rt)->sa_family; - if (!(rt->rt_flags & RTF_IFSCOPE) || (af != AF_INET && af != AF_INET6)) - return (0); + if (!(rt->rt_flags & RTF_IFSCOPE) || (af != AF_INET && af != AF_INET6)) { + return 0; + } - return (af == AF_INET ? - (SINIFSCOPE(rt_key(rt))->sin_scope_id == ma->ifscope) : - (SIN6IFSCOPE(rt_key(rt))->sin6_scope_id == ma->ifscope)); + return af == AF_INET ? + (SINIFSCOPE(rt_key(rt))->sin_scope_id == ma->ifscope) : + (SIN6IFSCOPE(rt_key(rt))->sin6_scope_id == ma->ifscope); } /* @@ -861,9 +861,7 @@ void routegenid_update(void) { routegenid_inet_update(); -#if INET6 routegenid_inet6_update(); -#endif /* INET6 */ } void @@ -872,13 +870,11 @@ routegenid_inet_update(void) atomic_add_32(&route_genid_inet, 1); } -#if INET6 void routegenid_inet6_update(void) { atomic_add_32(&route_genid_inet6, 1); } -#endif /* INET6 */ /* * Packet routing routines. @@ -908,7 +904,7 @@ rtalloc_ign_common_locked(struct route *ro, uint32_t ignore, return; } RT_UNLOCK(rt); - ROUTE_RELEASE_LOCKED(ro); /* rnh_lock already held */ + ROUTE_RELEASE_LOCKED(ro); /* rnh_lock already held */ } ro->ro_rt = rtalloc1_common_locked(&ro->ro_dst, 1, ignore, ifscope); if (ro->ro_rt != NULL) { @@ -938,14 +934,42 @@ rtalloc_scoped_ign(struct route *ro, uint32_t ignore, unsigned int ifscope) static struct rtentry * rtalloc1_locked(struct sockaddr *dst, int report, uint32_t ignflags) { - return (rtalloc1_common_locked(dst, report, ignflags, IFSCOPE_NONE)); + return rtalloc1_common_locked(dst, report, ignflags, IFSCOPE_NONE); } struct rtentry * rtalloc1_scoped_locked(struct sockaddr *dst, int report, uint32_t ignflags, unsigned int ifscope) { - return (rtalloc1_common_locked(dst, report, ignflags, ifscope)); + return rtalloc1_common_locked(dst, report, ignflags, ifscope); +} + +static boolean_t +route_ignore_protocol_cloning_for_dst(struct rtentry *rt, struct sockaddr *dst) +{ + /* + * For now keep protocol cloning for any type of IPv4 + * destination. + */ + if (dst->sa_family != AF_INET6) { + return FALSE; + } + + /* + * Limit protocol route creation of IPv6 ULA destinations + * from default route, + * Just to be safe, even though it doesn't affect routability, + * still allow protocol cloned routes if we happen to hit + * default route over companion link for ULA destination. + */ + if (!IFNET_IS_COMPANION_LINK(rt->rt_ifp) && + (rt->rt_flags & RTF_GATEWAY) && + (rt->rt_flags & RTF_PRCLONING) && + SA_DEFAULT(rt_key(rt)) && + IN6_IS_ADDR_UNIQUE_LOCAL(&SIN6(dst)->sin6_addr)) { + return TRUE; + } + return FALSE; } struct rtentry * @@ -958,21 +982,42 @@ rtalloc1_common_locked(struct sockaddr *dst, int report, uint32_t ignflags, uint32_t nflags; int err = 0, msgtype = RTM_MISS; - if (rnh == NULL) + if (rnh == NULL) { goto unreachable; + } /* * Find the longest prefix or exact (in the scoped case) address match; * callee adds a reference to entry and checks for root node as well */ rt = rt_lookup(FALSE, dst, NULL, rnh, ifscope); - if (rt == NULL) + if (rt == NULL) { goto unreachable; + } + + /* + * Explicitly ignore protocol cloning for certain destinations. + * Some checks below are kind of redundant, as for now, RTF_PRCLONING + * is only set on indirect (RTF_GATEWAY) routes. + * Also, we do this only when the route lookup above, resulted in default + * route. + * This is done to ensure, the resulting indirect host route doesn't + * interfere when routing table gets configured with a indirect subnet + * route/direct subnet route that is more specific than the current + * parent route of the resulting protocol cloned route. + * + * At the crux of it all, it is a problem that we maintain host cache + * in the routing table. We should revisit this for a generic solution. + */ + if (route_ignore_protocol_cloning_for_dst(rt, dst)) { + ignflags |= RTF_PRCLONING; + } RT_LOCK_SPIN(rt); newrt = rt; nflags = rt->rt_flags & ~ignflags; RT_UNLOCK(rt); + if (report && (nflags & (RTF_CLONING | RTF_PRCLONING))) { /* * We are apparently adding (report = 0 in delete). @@ -1019,7 +1064,7 @@ rtalloc1_common_locked(struct sockaddr *dst, int report, uint32_t ignflags, def_key.ss_family = rt_key(newrt)->sa_family; defrt = rtalloc1_scoped_locked((struct sockaddr *)&def_key, - 0, 0, newrt->rt_ifp->if_index); + 0, 0, newrt->rt_ifp->if_index); if (defrt) { if (equal(rt_key(newrt), defrt->rt_gateway)) { @@ -1059,7 +1104,7 @@ miss: rt_missmsg(msgtype, &info, 0, err); } done: - return (newrt); + return newrt; } struct rtentry * @@ -1070,7 +1115,7 @@ rtalloc1(struct sockaddr *dst, int report, uint32_t ignflags) lck_mtx_lock(rnh_lock); entry = rtalloc1_locked(dst, report, ignflags); lck_mtx_unlock(rnh_lock); - return (entry); + return entry; } struct rtentry * @@ -1082,7 +1127,7 @@ rtalloc1_scoped(struct sockaddr *dst, int report, uint32_t ignflags, lck_mtx_lock(rnh_lock); entry = rtalloc1_scoped_locked(dst, report, ignflags, ifscope); lck_mtx_unlock(rnh_lock); - return (entry); + return entry; } /* @@ -1170,8 +1215,9 @@ rtfree_common(struct rtentry *rt, boolean_t locked) * close routine typically issues RTM_DELETE which clears the RTF_UP * flag on the entry so that the code below reclaims the storage. */ - if (rnh != NULL && rnh->rnh_close != NULL) + if (rnh != NULL && rnh->rnh_close != NULL) { rnh->rnh_close((struct radix_node *)rt, rnh); + } /* * If we are no longer "up" (and ref == 0) then we can free the @@ -1200,20 +1246,23 @@ rtfree_common(struct rtentry *rt, boolean_t locked) * release references on items we hold them on.. * e.g other routes and ifaddrs. */ - if ((rt_parent = rt->rt_parent) != NULL) + if ((rt_parent = rt->rt_parent) != NULL) { rt->rt_parent = NULL; + } - if ((rt_ifa = rt->rt_ifa) != NULL) + if ((rt_ifa = rt->rt_ifa) != NULL) { rt->rt_ifa = NULL; + } /* * Now free any attached link-layer info. */ if (rt->rt_llinfo != NULL) { - if (rt->rt_llinfo_free != NULL) + if (rt->rt_llinfo_free != NULL) { (*rt->rt_llinfo_free)(rt->rt_llinfo); - else + } else { R_Free(rt->rt_llinfo); + } rt->rt_llinfo = NULL; } @@ -1227,11 +1276,13 @@ rtfree_common(struct rtentry *rt, boolean_t locked) RT_UNLOCK(rt); rte_lock_destroy(rt); - if (rt_parent != NULL) + if (rt_parent != NULL) { rtfree_locked(rt_parent); + } - if (rt_ifa != NULL) + if (rt_ifa != NULL) { IFA_REMREF(rt_ifa); + } /* * The key is separately alloc'd so free it (see rt_setgate()). @@ -1258,8 +1309,9 @@ rtfree_common(struct rtentry *rt, boolean_t locked) RT_UNLOCK(rt); } done: - if (!locked) + if (!locked) { lck_mtx_unlock(rnh_lock); + } } void @@ -1291,11 +1343,12 @@ rtunref(struct rtentry *p) rt_clear_idleref(p); } - if (rte_debug & RTD_DEBUG) + if (rte_debug & RTD_DEBUG) { rtunref_audit((struct rtentry_dbg *)p); + } /* Return new value */ - return (p->rt_refcnt); + return p->rt_refcnt; } static inline void @@ -1308,8 +1361,9 @@ rtunref_audit(struct rtentry_dbg *rte) /* NOTREACHED */ } idx = atomic_add_16_ov(&rte->rtd_refrele_cnt, 1) % CTRACE_HIST_SIZE; - if (rte_debug & RTD_TRACE) + if (rte_debug & RTD_TRACE) { ctrace_record(&rte->rtd_refrele[idx]); + } } /* @@ -1332,8 +1386,9 @@ rtref(struct rtentry *p) rt_set_idleref(p); } - if (rte_debug & RTD_DEBUG) + if (rte_debug & RTD_DEBUG) { rtref_audit((struct rtentry_dbg *)p); + } } static inline void @@ -1346,8 +1401,9 @@ rtref_audit(struct rtentry_dbg *rte) /* NOTREACHED */ } idx = atomic_add_16_ov(&rte->rtd_refhold_cnt, 1) % CTRACE_HIST_SIZE; - if (rte_debug & RTD_TRACE) + if (rte_debug & RTD_TRACE) { ctrace_record(&rte->rtd_refhold[idx]); + } } void @@ -1357,22 +1413,25 @@ rtsetifa(struct rtentry *rt, struct ifaddr *ifa) RT_LOCK_ASSERT_HELD(rt); - if (rt->rt_ifa == ifa) + if (rt->rt_ifa == ifa) { return; + } /* Become a regular mutex, just in case */ RT_CONVERT_LOCK(rt); /* Release the old ifa */ - if (rt->rt_ifa) + if (rt->rt_ifa) { IFA_REMREF(rt->rt_ifa); + } /* Set rt_ifa */ rt->rt_ifa = ifa; /* Take a reference to the ifa */ - if (rt->rt_ifa) + if (rt->rt_ifa) { IFA_ADDREF(rt->rt_ifa); + } } /* @@ -1402,11 +1461,7 @@ rtredirect(struct ifnet *ifp, struct sockaddr *dst, struct sockaddr *gateway, * Transform src into the internal routing table form for * comparison against rt_gateway below. */ -#if INET6 if ((af == AF_INET) || (af == AF_INET6)) { -#else - if (af == AF_INET) { -#endif /* !INET6 */ src = sa_copy(src, &ss, &ifscope); } @@ -1421,9 +1476,10 @@ rtredirect(struct ifnet *ifp, struct sockaddr *dst, struct sockaddr *gateway, } /* Lookup route to the destination (from the original IP header) */ - rt = rtalloc1_scoped_locked(dst, 0, RTF_CLONING|RTF_PRCLONING, ifscope); - if (rt != NULL) + rt = rtalloc1_scoped_locked(dst, 0, RTF_CLONING | RTF_PRCLONING, ifscope); + if (rt != NULL) { RT_LOCK(rt); + } /* * If the redirect isn't from our current router for this dst, @@ -1452,8 +1508,9 @@ rtredirect(struct ifnet *ifp, struct sockaddr *dst, struct sockaddr *gateway, } if (error) { - if (rt != NULL) + if (rt != NULL) { RT_UNLOCK(rt); + } goto done; } @@ -1463,8 +1520,9 @@ rtredirect(struct ifnet *ifp, struct sockaddr *dst, struct sockaddr *gateway, * which use routing redirects generated by smart gateways * to dynamically build the routing tables. */ - if ((rt == NULL) || (rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) + if ((rt == NULL) || (rt_mask(rt) != NULL && rt_mask(rt)->sa_len < 2)) { goto create; + } /* * Don't listen to the redirect if it's * for a route to an interface. @@ -1479,8 +1537,9 @@ rtredirect(struct ifnet *ifp, struct sockaddr *dst, struct sockaddr *gateway, * created host route is scoped as well. */ create: - if (rt != NULL) + if (rt != NULL) { RT_UNLOCK(rt); + } flags |= RTF_GATEWAY | RTF_DYNAMIC; error = rtrequest_scoped_locked(RTM_ADD, dst, gateway, netmask, flags, NULL, ifscope); @@ -1509,27 +1568,28 @@ done: if (!error) { /* Enqueue event to refresh flow route entries */ route_event_enqueue_nwk_wq_entry(rt, NULL, ROUTE_ENTRY_REFRESH, NULL, FALSE); - if (rtp) + if (rtp) { *rtp = rt; - else + } else { rtfree_locked(rt); - } - else + } + } else { rtfree_locked(rt); + } } out: if (error) { rtstat.rts_badredirect++; } else { - if (stat != NULL) + if (stat != NULL) { (*stat)++; + } - if (af == AF_INET) + if (af == AF_INET) { routegenid_inet_update(); -#if INET6 - else if (af == AF_INET6) + } else if (af == AF_INET6) { routegenid_inet6_update(); -#endif /* INET6 */ + } } lck_mtx_unlock(rnh_lock); bzero((caddr_t)&info, sizeof(info)); @@ -1541,19 +1601,19 @@ out: } /* -* Routing table ioctl interface. -*/ + * Routing table ioctl interface. + */ int rtioctl(unsigned long req, caddr_t data, struct proc *p) { #pragma unused(p, req, data) - return (ENXIO); + return ENXIO; } struct ifaddr * ifa_ifwithroute( int flags, - const struct sockaddr *dst, + const struct sockaddr *dst, const struct sockaddr *gateway) { struct ifaddr *ifa; @@ -1562,27 +1622,28 @@ ifa_ifwithroute( ifa = ifa_ifwithroute_locked(flags, dst, gateway); lck_mtx_unlock(rnh_lock); - return (ifa); + return ifa; } struct ifaddr * ifa_ifwithroute_locked(int flags, const struct sockaddr *dst, const struct sockaddr *gateway) { - return (ifa_ifwithroute_common_locked((flags & ~RTF_IFSCOPE), dst, - gateway, IFSCOPE_NONE)); + return ifa_ifwithroute_common_locked((flags & ~RTF_IFSCOPE), dst, + gateway, IFSCOPE_NONE); } struct ifaddr * ifa_ifwithroute_scoped_locked(int flags, const struct sockaddr *dst, const struct sockaddr *gateway, unsigned int ifscope) { - if (ifscope != IFSCOPE_NONE) + if (ifscope != IFSCOPE_NONE) { flags |= RTF_IFSCOPE; - else + } else { flags &= ~RTF_IFSCOPE; + } - return (ifa_ifwithroute_common_locked(flags, dst, gateway, ifscope)); + return ifa_ifwithroute_common_locked(flags, dst, gateway, ifscope); } static struct ifaddr * @@ -1600,23 +1661,17 @@ ifa_ifwithroute_common_locked(int flags, const struct sockaddr *dst, * contains a scope ID, make sure to clear it since * interface addresses aren't scoped. */ -#if INET6 if (dst != NULL && ((dst->sa_family == AF_INET) || - (dst->sa_family == AF_INET6))) -#else - if (dst != NULL && dst->sa_family == AF_INET) -#endif /* !INET6 */ + (dst->sa_family == AF_INET6))) { dst = sa_copy(SA((uintptr_t)dst), &dst_ss, NULL); + } -#if INET6 if (gw != NULL && ((gw->sa_family == AF_INET) || - (gw->sa_family == AF_INET6))) -#else - if (gw != NULL && gw->sa_family == AF_INET) -#endif /* !INET6 */ + (gw->sa_family == AF_INET6))) { gw = sa_copy(SA((uintptr_t)gw), &gw_ss, NULL); + } if (!(flags & RTF_GATEWAY)) { /* @@ -1629,8 +1684,9 @@ ifa_ifwithroute_common_locked(int flags, const struct sockaddr *dst, if (flags & RTF_HOST) { ifa = ifa_ifwithdstaddr(dst); } - if (ifa == NULL) + if (ifa == NULL) { ifa = ifa_ifwithaddr_scoped(gw, ifscope); + } } else { /* * If we are adding a route to a remote net @@ -1639,8 +1695,9 @@ ifa_ifwithroute_common_locked(int flags, const struct sockaddr *dst, */ ifa = ifa_ifwithdstaddr(gw); } - if (ifa == NULL) + if (ifa == NULL) { ifa = ifa_ifwithnet_scoped(gw, ifscope); + } if (ifa == NULL) { /* Workaround to avoid gcc warning regarding const variable */ rt = rtalloc1_scoped_locked((struct sockaddr *)(size_t)dst, @@ -1684,8 +1741,9 @@ ifa_ifwithroute_common_locked(int flags, const struct sockaddr *dst, !equal(ifa->ifa_addr, (struct sockaddr *)(size_t)gw)) && (rt = rtalloc1_scoped_locked((struct sockaddr *)(size_t)gw, 0, 0, ifscope)) != NULL) { - if (ifa != NULL) + if (ifa != NULL) { IFA_REMREF(ifa); + } RT_LOCK_SPIN(rt); ifa = rt->rt_ifa; if (ifa != NULL) { @@ -1717,7 +1775,7 @@ ifa_ifwithroute_common_locked(int flags, const struct sockaddr *dst, ifa = NULL; } - return (ifa); + return ifa; } static int rt_fixdelete(struct radix_node *, void *); @@ -1732,8 +1790,8 @@ int rtrequest_locked(int req, struct sockaddr *dst, struct sockaddr *gateway, struct sockaddr *netmask, int flags, struct rtentry **ret_nrt) { - return (rtrequest_common_locked(req, dst, gateway, netmask, - (flags & ~RTF_IFSCOPE), ret_nrt, IFSCOPE_NONE)); + return rtrequest_common_locked(req, dst, gateway, netmask, + (flags & ~RTF_IFSCOPE), ret_nrt, IFSCOPE_NONE); } int @@ -1741,13 +1799,14 @@ rtrequest_scoped_locked(int req, struct sockaddr *dst, struct sockaddr *gateway, struct sockaddr *netmask, int flags, struct rtentry **ret_nrt, unsigned int ifscope) { - if (ifscope != IFSCOPE_NONE) + if (ifscope != IFSCOPE_NONE) { flags |= RTF_IFSCOPE; - else + } else { flags &= ~RTF_IFSCOPE; + } - return (rtrequest_common_locked(req, dst, gateway, netmask, - flags, ret_nrt, ifscope)); + return rtrequest_common_locked(req, dst, gateway, netmask, + flags, ret_nrt, ifscope); } /* @@ -1781,7 +1840,7 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, int af = dst->sa_family; void (*ifa_rtrequest)(int, struct rtentry *, struct sockaddr *); -#define senderr(x) { error = x; goto bad; } +#define senderr(x) { error = x; goto bad; } DTRACE_ROUTE6(rtrequest, int, req, struct sockaddr *, dst0, struct sockaddr *, gateway, struct sockaddr *, netmask, @@ -1791,14 +1850,16 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, /* * Find the correct routing tree to use for this Address Family */ - if ((rnh = rt_tables[af]) == NULL) + if ((rnh = rt_tables[af]) == NULL) { senderr(ESRCH); + } /* * If we are adding a host route then we don't want to put * a netmask in the tree */ - if (flags & RTF_HOST) + if (flags & RTF_HOST) { netmask = NULL; + } /* * If Scoped Routing is enabled, use a local copy of the destination @@ -1813,27 +1874,26 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, * explicitly set is inside route_output() as part of handling a * routing socket request. */ -#if INET6 if (req != RTM_RESOLVE && ((af == AF_INET) || (af == AF_INET6))) { -#else - if (req != RTM_RESOLVE && af == AF_INET) { -#endif /* !INET6 */ /* Transform dst into the internal routing table form */ dst = sa_copy(dst, &ss, &ifscope); /* Transform netmask into the internal routing table form */ - if (netmask != NULL) + if (netmask != NULL) { netmask = ma_copy(af, netmask, &mask, ifscope); + } - if (ifscope != IFSCOPE_NONE) + if (ifscope != IFSCOPE_NONE) { flags |= RTF_IFSCOPE; + } } else if ((flags & RTF_IFSCOPE) && (af != AF_INET && af != AF_INET6)) { senderr(EINVAL); } - if (ifscope == IFSCOPE_NONE) + if (ifscope == IFSCOPE_NONE) { flags &= ~RTF_IFSCOPE; + } switch (req) { case RTM_DELETE: { @@ -1844,8 +1904,9 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, * Remove the item from the tree and return it. * Complain if it is not there and do no more processing. */ - if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL) + if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == NULL) { senderr(ESRCH); + } if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) { panic("rtrequest delete"); /* NOTREACHED */ @@ -1891,9 +1952,10 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, * If the old count is 0, it implies that last reference is being * removed and there's no one listening for this route event. */ - if (old_rt_refcnt != 0) + if (old_rt_refcnt != 0) { route_event_enqueue_nwk_wq_entry(rt, NULL, ROUTE_ENTRY_DELETED, NULL, TRUE); + } /* * Now search what's left of the subtree for any cloned @@ -1919,8 +1981,9 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, /* * Remove any external references we may have. */ - if ((gwrt = rt->rt_gwroute) != NULL) + if ((gwrt = rt->rt_gwroute) != NULL) { rt->rt_gwroute = NULL; + } /* * give the protocol a chance to keep things in sync. @@ -1929,8 +1992,9 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, IFA_LOCK_SPIN(ifa); ifa_rtrequest = ifa->ifa_rtrequest; IFA_UNLOCK(ifa); - if (ifa_rtrequest != NULL) + if (ifa_rtrequest != NULL) { ifa_rtrequest(RTM_DELETE, rt, NULL); + } /* keep reference on rt_ifa */ ifa = NULL; } @@ -1979,8 +2043,9 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, * lock is dropped above, as it could lead to the same * lock being acquired if gwrt is a clone of rt. */ - if (gwrt != NULL) + if (gwrt != NULL) { rtfree_locked(gwrt); + } /* * If the caller wants it, then it can have it, @@ -1994,17 +2059,17 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, /* Dereference or deallocate the route */ rtfree_locked(rt); } - if (af == AF_INET) + if (af == AF_INET) { routegenid_inet_update(); -#if INET6 - else if (af == AF_INET6) + } else if (af == AF_INET6) { routegenid_inet6_update(); -#endif /* INET6 */ + } break; } case RTM_RESOLVE: - if (ret_nrt == NULL || (rt = *ret_nrt) == NULL) + if (ret_nrt == NULL || (rt = *ret_nrt) == NULL) { senderr(EINVAL); + } /* * According to the UNIX conformance tests, we need to return * ENETUNREACH when the parent route is RTF_REJECT. @@ -2034,15 +2099,13 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC); flags |= RTF_WASCLONED; gateway = rt->rt_gateway; - if ((netmask = rt->rt_genmask) == NULL) + if ((netmask = rt->rt_genmask) == NULL) { flags |= RTF_HOST; + } -#if INET6 - if (af != AF_INET && af != AF_INET6) -#else - if (af != AF_INET) -#endif /* !INET6 */ + if (af != AF_INET && af != AF_INET6) { goto makeroute; + } /* * When scoped routing is enabled, cloned entries are @@ -2082,8 +2145,9 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, NULL : &ifscope); /* Transform netmask into the internal routing table form */ - if (netmask != NULL) + if (netmask != NULL) { netmask = ma_copy(af, netmask, &mask, ifscope); + } goto makeroute; @@ -2098,15 +2162,17 @@ rtrequest_common_locked(int req, struct sockaddr *dst0, } else { ifa = ifa_ifwithroute_locked(flags, dst0, gateway); } - if (ifa == NULL) + if (ifa == NULL) { senderr(ENETUNREACH); + } makeroute: /* * We land up here for both RTM_RESOLVE and RTM_ADD * when we decide to create a route. */ - if ((rt = rte_alloc()) == NULL) + if ((rt = rte_alloc()) == NULL) { senderr(ENOBUFS); + } Bzero(rt, sizeof(*rt)); rte_lock_init(rt); eventhandler_lists_ctxt_init(&rt->rt_evhdlr_ctxt); @@ -2123,11 +2189,9 @@ makeroute: case AF_INET: rt->rt_tree_genid = &route_genid_inet; break; -#if INET6 case AF_INET6: rt->rt_tree_genid = &route_genid_inet6; break; -#endif /* INET6 */ default: break; } @@ -2153,10 +2217,11 @@ makeroute: /* * make sure it contains the value we want (masked if needed). */ - if (netmask) + if (netmask) { rt_maskedcopy(dst, ndst, netmask); - else + } else { Bcopy(dst, ndst, dst->sa_len); + } /* * Note that we now have a reference to the ifa. @@ -2256,8 +2321,9 @@ makeroute: IFA_LOCK_SPIN(ifa); ifa_rtrequest = ifa->ifa_rtrequest; IFA_UNLOCK(ifa); - if (ifa_rtrequest != NULL) + if (ifa_rtrequest != NULL) { ifa_rtrequest(req, rt, SA(ret_nrt ? *ret_nrt : NULL)); + } IFA_REMREF(ifa); ifa = NULL; @@ -2292,12 +2358,11 @@ makeroute: RT_ADDREF_LOCKED(rt); } - if (af == AF_INET) + if (af == AF_INET) { routegenid_inet_update(); -#if INET6 - else if (af == AF_INET6) + } else if (af == AF_INET6) { routegenid_inet6_update(); -#endif /* INET6 */ + } RT_GENID_SYNC(rt); @@ -2306,8 +2371,9 @@ makeroute: * because they weren't completed when we called it earlier, * since the node was embryonic. */ - if ((rt->rt_flags & RTF_GATEWAY) && rt->rt_gwroute != NULL) + if ((rt->rt_flags & RTF_GATEWAY) && rt->rt_gwroute != NULL) { rt_set_gwroute(rt, rt_key(rt), rt->rt_gwroute); + } if (req == RTM_ADD && !(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL) { @@ -2325,9 +2391,10 @@ makeroute: break; } bad: - if (ifa) + if (ifa) { IFA_REMREF(ifa); - return (error); + } + return error; } #undef senderr @@ -2340,7 +2407,7 @@ rtrequest(int req, struct sockaddr *dst, struct sockaddr *gateway, lck_mtx_lock(rnh_lock); error = rtrequest_locked(req, dst, gateway, netmask, flags, ret_nrt); lck_mtx_unlock(rnh_lock); - return (error); + return error; } int @@ -2354,7 +2421,7 @@ rtrequest_scoped(int req, struct sockaddr *dst, struct sockaddr *gateway, error = rtrequest_scoped_locked(req, dst, gateway, netmask, flags, ret_nrt, ifscope); lck_mtx_unlock(rnh_lock); - return (error); + return error; } /* @@ -2381,11 +2448,11 @@ rt_fixdelete(struct radix_node *rn, void *vp) * rt_setgate() on this route. */ RT_UNLOCK(rt); - return (rtrequest_locked(RTM_DELETE, rt_key(rt), NULL, - rt_mask(rt), rt->rt_flags, NULL)); + return rtrequest_locked(RTM_DELETE, rt_key(rt), NULL, + rt_mask(rt), rt->rt_flags, NULL); } RT_UNLOCK(rt); - return (0); + return 0; } /* @@ -2418,11 +2485,12 @@ rt_fixchange(struct radix_node *rn, void *vp) if (!rt->rt_parent || (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING))) { RT_UNLOCK(rt); - return (0); + return 0; } - if (rt->rt_parent == rt0) + if (rt->rt_parent == rt0) { goto delete_rt; + } /* * There probably is a function somewhere which does this... @@ -2443,13 +2511,13 @@ rt_fixchange(struct radix_node *rn, void *vp) int mlen = rt_mask(rt->rt_parent)->sa_len; if (mlen > rt_mask(rt0)->sa_len) { RT_UNLOCK(rt); - return (0); + return 0; } for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) { if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) { RT_UNLOCK(rt); - return (0); + return 0; } } } @@ -2457,7 +2525,7 @@ rt_fixchange(struct radix_node *rn, void *vp) for (i = rnh->rnh_treetop->rn_offset; i < len; i++) { if ((xk2[i] & xm1[i]) != xk1[i]) { RT_UNLOCK(rt); - return (0); + return 0; } } @@ -2471,8 +2539,8 @@ delete_rt: * prevents another thread from calling rt_setgate() on this route. */ RT_UNLOCK(rt); - return (rtrequest_locked(RTM_DELETE, rt_key(rt), NULL, - rt_mask(rt), rt->rt_flags, NULL)); + return rtrequest_locked(RTM_DELETE, rt_key(rt), NULL, + rt_mask(rt), rt->rt_flags, NULL); } /* @@ -2484,7 +2552,7 @@ delete_rt: * portion never gets deallocated (though it may change contents) and * thus greatly simplifies things. */ -#define SA_SIZE(x) (-(-((uintptr_t)(x)) & -(32))) +#define SA_SIZE(x) (-(-((uintptr_t)(x)) & -(32))) /* * Sets the gateway and/or gateway route portion of a route; may be @@ -2501,7 +2569,7 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) boolean_t loop = FALSE; if (dst->sa_family != AF_INET && dst->sa_family != AF_INET6) { - return (EINVAL); + return EINVAL; } rnh = rt_tables[dst->sa_family]; @@ -2513,7 +2581,7 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) * or is temporarily frozen, reject the modification request. */ if (rt->rt_flags & RTF_CONDEMNED) { - return (EBUSY); + return EBUSY; } /* Add an extra ref for ourselves */ @@ -2538,11 +2606,11 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) * A (cloning) network route with the destination equal to the gateway * will create an endless loop (see notes below), so disallow it. */ - if (((rt->rt_flags & (RTF_HOST|RTF_GATEWAY|RTF_LLINFO)) == + if (((rt->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) == RTF_GATEWAY) && loop) { /* Release extra ref */ RT_REMREF_LOCKED(rt); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } /* @@ -2550,8 +2618,8 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) * will interfere with keeping LLINFO in the routing * table, so disallow it. */ - if (((rt->rt_flags & (RTF_HOST|RTF_GATEWAY|RTF_LLINFO)) == - (RTF_HOST|RTF_GATEWAY)) && loop) { + if (((rt->rt_flags & (RTF_HOST | RTF_GATEWAY | RTF_LLINFO)) == + (RTF_HOST | RTF_GATEWAY)) && loop) { /* * The route might already exist if this is an RTM_CHANGE * or a routing redirect, so try to delete it. @@ -2569,7 +2637,7 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) } /* Release extra ref */ RT_REMREF_LOCKED(rt); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } /* @@ -2580,12 +2648,13 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) struct rtentry *gwrt; unsigned int ifscope; - if (dst->sa_family == AF_INET) + if (dst->sa_family == AF_INET) { ifscope = sin_get_ifscope(dst); - else if (dst->sa_family == AF_INET6) + } else if (dst->sa_family == AF_INET6) { ifscope = sin6_get_ifscope(dst); - else + } else { ifscope = IFSCOPE_NONE; + } RT_UNLOCK(rt); /* @@ -2594,8 +2663,9 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) * check for cloning loop avoidance (dst == gate). */ gwrt = rtalloc1_scoped_locked(gate, 1, RTF_PRCLONING, ifscope); - if (gwrt != NULL) + if (gwrt != NULL) { RT_LOCK_ASSERT_NOTHELD(gwrt); + } RT_LOCK(rt); /* @@ -2616,7 +2686,7 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) RT_REMREF_LOCKED(gwrt); /* Release extra ref */ RT_REMREF_LOCKED(rt); - return (EADDRINUSE); /* failure */ + return EADDRINUSE; /* failure */ } /* @@ -2627,20 +2697,21 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) if (ifscope != IFSCOPE_NONE && (rt->rt_flags & RTF_IFSCOPE) && gwrt != NULL && gwrt->rt_ifp != NULL && gwrt->rt_ifp->if_index != ifscope) { - rtfree_locked(gwrt); /* rt != gwrt, no deadlock */ + rtfree_locked(gwrt); /* rt != gwrt, no deadlock */ /* Release extra ref */ RT_REMREF_LOCKED(rt); - return ((rt->rt_flags & RTF_HOST) ? - EHOSTUNREACH : ENETUNREACH); + return (rt->rt_flags & RTF_HOST) ? + EHOSTUNREACH : ENETUNREACH; } /* Check again since we dropped the lock above */ if (rt->rt_flags & RTF_CONDEMNED) { - if (gwrt != NULL) + if (gwrt != NULL) { rtfree_locked(gwrt); + } /* Release extra ref */ RT_REMREF_LOCKED(rt); - return (EBUSY); + return EBUSY; } /* Set gateway route; callee adds ref to gwrt if non-NULL */ @@ -2684,8 +2755,9 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) } /* Release extra ref from rtalloc1() */ - if (gwrt != NULL) + if (gwrt != NULL) { RT_REMREF(gwrt); + } } /* @@ -2705,7 +2777,7 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) rt_set_gwroute(rt, dst, NULL); /* Release extra ref */ RT_REMREF_LOCKED(rt); - return (ENOBUFS); + return ENOBUFS; } /* @@ -2715,7 +2787,7 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) */ bzero(new, dlen + glen); Bcopy(dst, new, dst->sa_len); - R_Free(rt_key(rt)); /* free old block; NULL is okay */ + R_Free(rt_key(rt)); /* free old block; NULL is okay */ rt->rt_nodes->rn_key = new; rt->rt_gateway = (struct sockaddr *)(new + dlen); } @@ -2758,7 +2830,7 @@ rt_setgate(struct rtentry *rt, struct sockaddr *dst, struct sockaddr *gate) /* Release extra ref */ RT_REMREF_LOCKED(rt); - return (0); + return 0; } #undef SA_SIZE @@ -2771,9 +2843,9 @@ rt_set_gwroute(struct rtentry *rt, struct sockaddr *dst, struct rtentry *gwrt) LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK_ASSERT_HELD(rt); - if (gwrt != NULL) - RT_ADDREF(gwrt); /* for this routine */ - + if (gwrt != NULL) { + RT_ADDREF(gwrt); /* for this routine */ + } /* * Get rid of existing gateway route; if rt_gwroute is already * set to gwrt, this is slightly redundant (though safe since @@ -2782,7 +2854,7 @@ rt_set_gwroute(struct rtentry *rt, struct sockaddr *dst, struct rtentry *gwrt) if (rt->rt_gwroute != NULL) { struct rtentry *ogwrt = rt->rt_gwroute; - VERIFY(rt != ogwrt); /* sanity check */ + VERIFY(rt != ogwrt); /* sanity check */ rt->rt_gwroute = NULL; RT_UNLOCK(rt); rtfree_locked(ogwrt); @@ -2794,7 +2866,7 @@ rt_set_gwroute(struct rtentry *rt, struct sockaddr *dst, struct rtentry *gwrt) * And associate the new gateway route. */ if ((rt->rt_gwroute = gwrt) != NULL) { - RT_ADDREF(gwrt); /* for rt */ + RT_ADDREF(gwrt); /* for rt */ if (rt->rt_flags & RTF_WASCLONED) { /* rt_parent might be NULL if rt is embryonic */ @@ -2813,7 +2885,7 @@ rt_set_gwroute(struct rtentry *rt, struct sockaddr *dst, struct rtentry *gwrt) RT_UNLOCK(gwrt); } - RT_REMREF(gwrt); /* for this routine */ + RT_REMREF(gwrt); /* for this routine */ } } @@ -2825,16 +2897,18 @@ rt_maskedcopy(const struct sockaddr *src, struct sockaddr *dst, const char *srcp = &src->sa_data[0]; char *dstp = &dst->sa_data[0]; const char *maskend = (char *)dst - + MIN(netmask->sa_len, src->sa_len); + + MIN(netmask->sa_len, src->sa_len); const char *srcend = (char *)dst + src->sa_len; dst->sa_len = src->sa_len; dst->sa_family = src->sa_family; - while (dstp < maskend) + while (dstp < maskend) { *dstp++ = *srcp++ & *netmaskp++; - if (dstp < srcend) + } + if (dstp < srcend) { memset(dstp, 0, (size_t)(srcend - dstp)); + } } /* @@ -2853,8 +2927,9 @@ node_lookup(struct sockaddr *dst, struct sockaddr *netmask, rn_matchf_t *f = rn_match_ifscope; void *w = &ma; - if (af != AF_INET && af != AF_INET6) - return (NULL); + if (af != AF_INET && af != AF_INET6) { + return NULL; + } rnh = rt_tables[af]; @@ -2865,17 +2940,20 @@ node_lookup(struct sockaddr *dst, struct sockaddr *netmask, dst = sa_copy(dst, &ss, (ifscope == IFSCOPE_NONE) ? NULL : &ifscope); /* Transform netmask into the internal routing table form */ - if (netmask != NULL) + if (netmask != NULL) { netmask = ma_copy(af, netmask, &mask, ifscope); + } - if (ifscope == IFSCOPE_NONE) + if (ifscope == IFSCOPE_NONE) { f = w = NULL; + } rn = rnh->rnh_lookup_args(dst, netmask, rnh, f, w); - if (rn != NULL && (rn->rn_flags & RNF_ROOT)) + if (rn != NULL && (rn->rn_flags & RNF_ROOT)) { rn = NULL; + } - return (rn); + return rn; } /* @@ -2889,8 +2967,8 @@ node_lookup_default(int af) VERIFY(af == AF_INET || af == AF_INET6); rnh = rt_tables[af]; - return (af == AF_INET ? rnh->rnh_lookup(&sin_def, NULL, rnh) : - rnh->rnh_lookup(&sin6_def, NULL, rnh)); + return af == AF_INET ? rnh->rnh_lookup(&sin_def, NULL, rnh) : + rnh->rnh_lookup(&sin6_def, NULL, rnh); } boolean_t @@ -2898,8 +2976,9 @@ rt_ifa_is_dst(struct sockaddr *dst, struct ifaddr *ifa) { boolean_t result = FALSE; - if (ifa == NULL || ifa->ifa_addr == NULL) - return (result); + if (ifa == NULL || ifa->ifa_addr == NULL) { + return result; + } IFA_LOCK_SPIN(ifa); @@ -2908,12 +2987,13 @@ rt_ifa_is_dst(struct sockaddr *dst, struct ifaddr *ifa) SIN(dst)->sin_addr.s_addr == SIN(ifa->ifa_addr)->sin_addr.s_addr) || (dst->sa_family == AF_INET6 && - SA6_ARE_ADDR_EQUAL(SIN6(dst), SIN6(ifa->ifa_addr))))) + SA6_ARE_ADDR_EQUAL(SIN6(dst), SIN6(ifa->ifa_addr))))) { result = TRUE; + } IFA_UNLOCK(ifa); - return (result); + return result; } /* @@ -2955,25 +3035,21 @@ rt_lookup_common(boolean_t lookup_only, boolean_t coarse, struct sockaddr *dst, VERIFY(!coarse || ifscope == IFSCOPE_NONE); LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); -#if INET6 /* * While we have rnh_lock held, see if we need to schedule the timer. */ - if (nd6_sched_timeout_want) + if (nd6_sched_timeout_want) { nd6_sched_timeout(NULL, NULL); -#endif /* INET6 */ + } - if (!lookup_only) + if (!lookup_only) { netmask = NULL; + } /* * Non-scoped route lookup. */ -#if INET6 if (af != AF_INET && af != AF_INET6) { -#else - if (af != AF_INET) { -#endif /* !INET6 */ rn = rnh->rnh_matchaddr(dst, rnh); /* @@ -2981,8 +3057,9 @@ rt_lookup_common(boolean_t lookup_only, boolean_t coarse, struct sockaddr *dst, * would have done the necessary work to clear RTPRF_OURS * for certain protocol families. */ - if (rn != NULL && (rn->rn_flags & RNF_ROOT)) + if (rn != NULL && (rn->rn_flags & RNF_ROOT)) { rn = NULL; + } if (rn != NULL) { RT_LOCK_SPIN(RT(rn)); if (!(RT(rn)->rt_flags & RTF_CONDEMNED)) { @@ -2993,32 +3070,36 @@ rt_lookup_common(boolean_t lookup_only, boolean_t coarse, struct sockaddr *dst, rn = NULL; } } - return (RT(rn)); + return RT(rn); } /* Transform dst/netmask into the internal routing table form */ dst = sa_copy(dst, &dst_ss, &ifscope); - if (netmask != NULL) + if (netmask != NULL) { netmask = ma_copy(af, netmask, &mask_ss, ifscope); + } dontcare = (ifscope == IFSCOPE_NONE); #if (DEVELOPMENT || DEBUG) if (rt_verbose) { - if (af == AF_INET) + if (af == AF_INET) { (void) inet_ntop(af, &SIN(dst)->sin_addr.s_addr, - s_dst, sizeof (s_dst)); - else + s_dst, sizeof(s_dst)); + } else { (void) inet_ntop(af, &SIN6(dst)->sin6_addr, - s_dst, sizeof (s_dst)); + s_dst, sizeof(s_dst)); + } - if (netmask != NULL && af == AF_INET) + if (netmask != NULL && af == AF_INET) { (void) inet_ntop(af, &SIN(netmask)->sin_addr.s_addr, - s_netmask, sizeof (s_netmask)); - if (netmask != NULL && af == AF_INET6) + s_netmask, sizeof(s_netmask)); + } + if (netmask != NULL && af == AF_INET6) { (void) inet_ntop(af, &SIN6(netmask)->sin6_addr, - s_netmask, sizeof (s_netmask)); - else + s_netmask, sizeof(s_netmask)); + } else { *s_netmask = '\0'; + } printf("%s (%d, %d, %s, %s, %u)\n", __func__, lookup_only, coarse, s_dst, s_netmask, ifscope); } @@ -3042,8 +3123,9 @@ rt_lookup_common(boolean_t lookup_only, boolean_t coarse, struct sockaddr *dst, * we'll do a more-specific search below, scoped to the interface * of that route. */ - if (dontcare) + if (dontcare) { ifscope = get_primary_ifscope(af); + } /* * Keep the original result if either of the following is true: @@ -3060,7 +3142,7 @@ rt_lookup_common(boolean_t lookup_only, boolean_t coarse, struct sockaddr *dst, struct rtentry *rt = RT(rn); #if (DEVELOPMENT || DEBUG) if (rt_verbose) { - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); printf("%s unscoped search %p to %s->%s->%s ifa_ifp %s\n", __func__, rt, dbuf, gbuf, @@ -3083,11 +3165,12 @@ rt_lookup_common(boolean_t lookup_only, boolean_t coarse, struct sockaddr *dst, * route for local addresses */ rn = NULL; - if (dontcare) + if (dontcare) { ifscope = rt->rt_ifp->if_index; - else if (ifscope != lo_ifp->if_index || - rt_ifa_is_dst(dst, rt->rt_ifa) == FALSE) + } else if (ifscope != lo_ifp->if_index || + rt_ifa_is_dst(dst, rt->rt_ifa) == FALSE) { rn0 = NULL; + } } else if (!(rt->rt_flags & RTF_IFSCOPE)) { /* * Right interface, except that this route @@ -3112,7 +3195,7 @@ rt_lookup_common(boolean_t lookup_only, boolean_t coarse, struct sockaddr *dst, if (rt_verbose && rn != NULL) { struct rtentry *rt = RT(rn); - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); printf("%s scoped search %p to %s->%s->%s ifa %s\n", __func__, rt, dbuf, gbuf, @@ -3136,8 +3219,9 @@ rt_lookup_common(boolean_t lookup_only, boolean_t coarse, struct sockaddr *dst, */ if (rn == NULL || coarse || (rn0 != NULL && ((SA_DEFAULT(rt_key(RT(rn))) && !SA_DEFAULT(rt_key(RT(rn0)))) || - (!RT_HOST(rn) && RT_HOST(rn0))))) + (!RT_HOST(rn) && RT_HOST(rn0))))) { rn = rn0; + } /* * If we still don't have a route, use the non-scoped default @@ -3167,12 +3251,12 @@ rt_lookup_common(boolean_t lookup_only, boolean_t coarse, struct sockaddr *dst, } #if (DEVELOPMENT || DEBUG) if (rt_verbose) { - if (rn == NULL) + if (rn == NULL) { printf("%s %u return NULL\n", __func__, ifscope); - else { + } else { struct rtentry *rt = RT(rn); - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); printf("%s %u return %p to %s->%s->%s ifa_ifp %s\n", __func__, ifscope, rt, @@ -3183,23 +3267,23 @@ rt_lookup_common(boolean_t lookup_only, boolean_t coarse, struct sockaddr *dst, } } #endif - return (RT(rn)); + return RT(rn); } struct rtentry * rt_lookup(boolean_t lookup_only, struct sockaddr *dst, struct sockaddr *netmask, struct radix_node_head *rnh, unsigned int ifscope) { - return (rt_lookup_common(lookup_only, FALSE, dst, netmask, - rnh, ifscope)); + return rt_lookup_common(lookup_only, FALSE, dst, netmask, + rnh, ifscope); } struct rtentry * rt_lookup_coarse(boolean_t lookup_only, struct sockaddr *dst, struct sockaddr *netmask, struct radix_node_head *rnh) { - return (rt_lookup_common(lookup_only, TRUE, dst, netmask, - rnh, IFSCOPE_NONE)); + return rt_lookup_common(lookup_only, TRUE, dst, netmask, + rnh, IFSCOPE_NONE); } boolean_t @@ -3210,15 +3294,16 @@ rt_validate(struct rtentry *rt) if ((rt->rt_flags & (RTF_UP | RTF_CONDEMNED)) == RTF_UP) { int af = rt_key(rt)->sa_family; - if (af == AF_INET) + if (af == AF_INET) { (void) in_validate(RN(rt)); - else if (af == AF_INET6) + } else if (af == AF_INET6) { (void) in6_validate(RN(rt)); + } } else { rt = NULL; } - return (rt != NULL); + return rt != NULL; } /* @@ -3236,14 +3321,14 @@ rtinit(struct ifaddr *ifa, int cmd, int flags) error = rtinit_locked(ifa, cmd, flags); lck_mtx_unlock(rnh_lock); - return (error); + return error; } int rtinit_locked(struct ifaddr *ifa, int cmd, int flags) { struct radix_node_head *rnh; - uint8_t nbuf[128]; /* long enough for IPv6 */ + uint8_t nbuf[128]; /* long enough for IPv6 */ #if (DEVELOPMENT || DEBUG) char dbuf[MAX_IPv6_STR_LEN], gbuf[MAX_IPv6_STR_LEN]; char abuf[MAX_IPv6_STR_LEN]; @@ -3274,7 +3359,7 @@ rtinit_locked(struct ifaddr *ifa, int cmd, int flags) error = EINVAL; goto done; } - if (netmask != NULL && netmask->sa_len > sizeof (nbuf)) { + if (netmask != NULL && netmask->sa_len > sizeof(nbuf)) { log(LOG_ERR, "%s: %s failed, mask sa_len %d too large\n", __func__, rtm2str(cmd), dst->sa_len); error = EINVAL; @@ -3284,15 +3369,12 @@ rtinit_locked(struct ifaddr *ifa, int cmd, int flags) #if (DEVELOPMENT || DEBUG) if (dst->sa_family == AF_INET) { (void) inet_ntop(AF_INET, &SIN(dst)->sin_addr.s_addr, - abuf, sizeof (abuf)); - } -#if INET6 - else if (dst->sa_family == AF_INET6) { + abuf, sizeof(abuf)); + } else if (dst->sa_family == AF_INET6) { (void) inet_ntop(AF_INET6, &SIN6(dst)->sin6_addr, - abuf, sizeof (abuf)); + abuf, sizeof(abuf)); } -#endif /* INET6 */ -#endif /* (DEVELOPMENT || DEBUG) */ +#endif /* (DEVELOPMENT || DEBUG) */ if ((rnh = rt_tables[dst->sa_family]) == NULL) { error = EINVAL; @@ -3325,8 +3407,8 @@ rtinit_locked(struct ifaddr *ifa, int cmd, int flags) rt = rt_lookup_coarse(TRUE, dst, NULL, rnh); if (rt != NULL) { #if (DEVELOPMENT || DEBUG) - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); -#endif + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); +#endif /* * Ok so we found the rtentry. it has an extra reference * for us at this stage. we won't need that so @@ -3351,7 +3433,7 @@ rtinit_locked(struct ifaddr *ifa, int cmd, int flags) rt->rt_ifp->if_xname : ""), rt->rt_flags, RTF_BITS, abuf, (uint64_t)VM_KERNEL_ADDRPERM( - rt->rt_ifa), + rt->rt_ifa), (uint64_t)VM_KERNEL_ADDRPERM(ifa)); } #endif /* (DEVELOPMENT || DEBUG) */ @@ -3401,12 +3483,13 @@ rtinit_locked(struct ifaddr *ifa, int cmd, int flags) * Do the actual request */ if ((error = rtrequest_locked(cmd, dst, ifa->ifa_addr, netmask, - flags | ifa->ifa_flags, &rt)) != 0) + flags | ifa->ifa_flags, &rt)) != 0) { goto done; + } VERIFY(rt != NULL); #if (DEVELOPMENT || DEBUG) - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); #endif /* (DEVELOPMENT || DEBUG) */ switch (cmd) { case RTM_DELETE: @@ -3439,16 +3522,16 @@ rtinit_locked(struct ifaddr *ifa, int cmd, int flags) RT_LOCK(rt); if (rt->rt_ifa != ifa) { void (*ifa_rtrequest) - (int, struct rtentry *, struct sockaddr *); + (int, struct rtentry *, struct sockaddr *); #if (DEVELOPMENT || DEBUG) if (rt_verbose) { if (!(rt->rt_ifa->ifa_ifp->if_flags & - (IFF_POINTOPOINT|IFF_LOOPBACK))) { + (IFF_POINTOPOINT | IFF_LOOPBACK))) { log(LOG_ERR, "%s: %s route to %s->%s->%s, " "flags %b, ifaddr %s, rt_ifa 0x%llx != " "ifa 0x%llx\n", __func__, rtm2str(cmd), dbuf, gbuf, ((rt->rt_ifp != NULL) ? - rt->rt_ifp->if_xname : ""), rt->rt_flags, + rt->rt_ifp->if_xname : ""), rt->rt_flags, RTF_BITS, abuf, (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_ifa), (uint64_t)VM_KERNEL_ADDRPERM(ifa)); @@ -3471,8 +3554,9 @@ rtinit_locked(struct ifaddr *ifa, int cmd, int flags) * this route and ifaddr. */ ifa_rtrequest = rt->rt_ifa->ifa_rtrequest; - if (ifa_rtrequest != NULL) + if (ifa_rtrequest != NULL) { ifa_rtrequest(RTM_DELETE, rt, NULL); + } /* * Set the route's ifa. */ @@ -3482,8 +3566,9 @@ rtinit_locked(struct ifaddr *ifa, int cmd, int flags) /* * Purge any link-layer info caching. */ - if (rt->rt_llinfo_purge != NULL) + if (rt->rt_llinfo_purge != NULL) { rt->rt_llinfo_purge(rt); + } /* * Adjust route ref count for the interfaces. */ @@ -3517,8 +3602,9 @@ rtinit_locked(struct ifaddr *ifa, int cmd, int flags) * any special processing in its new form. */ ifa_rtrequest = ifa->ifa_rtrequest; - if (ifa_rtrequest != NULL) + if (ifa_rtrequest != NULL) { ifa_rtrequest(RTM_ADD, rt, NULL); + } } else { #if (DEVELOPMENT || DEBUG) if (rt_verbose) { @@ -3550,7 +3636,7 @@ rtinit_locked(struct ifaddr *ifa, int cmd, int flags) /* NOTREACHED */ } done: - return (error); + return error; } static void @@ -3563,10 +3649,10 @@ rt_set_idleref(struct rtentry *rt) * that aren't marked with RTF_NOIFREF. */ if (rt->rt_parent != NULL && !(rt->rt_flags & - (RTF_NOIFREF|RTF_BROADCAST | RTF_MULTICAST)) && - (rt->rt_flags & (RTF_UP|RTF_WASCLONED|RTF_IFREF)) == - (RTF_UP|RTF_WASCLONED)) { - rt_clear_idleref(rt); /* drop existing refcnt if any */ + (RTF_NOIFREF | RTF_BROADCAST | RTF_MULTICAST)) && + (rt->rt_flags & (RTF_UP | RTF_WASCLONED | RTF_IFREF)) == + (RTF_UP | RTF_WASCLONED)) { + rt_clear_idleref(rt); /* drop existing refcnt if any */ rt->rt_if_ref_fn = rte_if_ref; /* Become a regular mutex, just in case */ RT_CONVERT_LOCK(rt); @@ -3602,10 +3688,11 @@ rt_set_proxy(struct rtentry *rt, boolean_t set) if (rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) { struct radix_node_head *rnh = rt_tables[rt_key(rt)->sa_family]; - if (set) + if (set) { rt->rt_flags |= RTF_PROXY; - else + } else { rt->rt_flags &= ~RTF_PROXY; + } RT_UNLOCK(rt); if (rnh != NULL && rt_mask(rt)) { @@ -3635,21 +3722,23 @@ void rt_lock(struct rtentry *rt, boolean_t spin) { RT_LOCK_ASSERT_NOTHELD(rt); - if (spin) + if (spin) { lck_mtx_lock_spin(&rt->rt_lock); - else + } else { lck_mtx_lock(&rt->rt_lock); - if (rte_debug & RTD_DEBUG) + } + if (rte_debug & RTD_DEBUG) { rte_lock_debug((struct rtentry_dbg *)rt); + } } void rt_unlock(struct rtentry *rt) { - if (rte_debug & RTD_DEBUG) + if (rte_debug & RTD_DEBUG) { rte_unlock_debug((struct rtentry_dbg *)rt); + } lck_mtx_unlock(&rt->rt_lock); - } static inline void @@ -3659,8 +3748,9 @@ rte_lock_debug(struct rtentry_dbg *rte) RT_LOCK_ASSERT_HELD((struct rtentry *)rte); idx = atomic_add_32_ov(&rte->rtd_lock_cnt, 1) % CTRACE_HIST_SIZE; - if (rte_debug & RTD_TRACE) + if (rte_debug & RTD_TRACE) { ctrace_record(&rte->rtd_lock[idx]); + } } static inline void @@ -3670,17 +3760,19 @@ rte_unlock_debug(struct rtentry_dbg *rte) RT_LOCK_ASSERT_HELD((struct rtentry *)rte); idx = atomic_add_32_ov(&rte->rtd_unlock_cnt, 1) % CTRACE_HIST_SIZE; - if (rte_debug & RTD_TRACE) + if (rte_debug & RTD_TRACE) { ctrace_record(&rte->rtd_unlock[idx]); + } } static struct rtentry * rte_alloc(void) { - if (rte_debug & RTD_DEBUG) - return (rte_alloc_debug()); + if (rte_debug & RTD_DEBUG) { + return rte_alloc_debug(); + } - return ((struct rtentry *)zalloc(rte_zone)); + return (struct rtentry *)zalloc(rte_zone); } static void @@ -3728,20 +3820,20 @@ rte_if_ref(struct ifnet *ifp, int cnt) * it is expected to check via SIOCGIFGETRTREFCNT again anyway. */ if ((ifp->if_idle_flags & IFRF_IDLE_NOTIFY) && cnt < 0 && old == 1) { - bzero(&ev_msg, sizeof (ev_msg)); - bzero(&ev_data, sizeof (ev_data)); + bzero(&ev_msg, sizeof(ev_msg)); + bzero(&ev_data, sizeof(ev_data)); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_DL_SUBCLASS; - ev_msg.event_code = KEV_DL_IF_IDLE_ROUTE_REFCNT; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; + ev_msg.event_code = KEV_DL_IF_IDLE_ROUTE_REFCNT; strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ); - ev_data.if_family = ifp->if_family; - ev_data.if_unit = ifp->if_unit; - ev_msg.dv[0].data_length = sizeof (struct net_event_data); - ev_msg.dv[0].data_ptr = &ev_data; + ev_data.if_family = ifp->if_family; + ev_data.if_unit = ifp->if_unit; + ev_msg.dv[0].data_length = sizeof(struct net_event_data); + ev_msg.dv[0].data_ptr = &ev_data; dlil_post_complete_msg(NULL, &ev_msg); } @@ -3754,12 +3846,13 @@ rte_alloc_debug(void) rte = ((struct rtentry_dbg *)zalloc(rte_zone)); if (rte != NULL) { - bzero(rte, sizeof (*rte)); - if (rte_debug & RTD_TRACE) + bzero(rte, sizeof(*rte)); + if (rte_debug & RTD_TRACE) { ctrace_record(&rte->rtd_alloc); + } rte->rtd_inuse = RTD_INUSE; } - return ((struct rtentry *)rte); + return (struct rtentry *)rte; } static inline void @@ -3778,24 +3871,26 @@ rte_free_debug(struct rtentry *p) panic("rte_free: corrupted rte=%p\n", rte); /* NOTREACHED */ } - bcopy((caddr_t)p, (caddr_t)&rte->rtd_entry_saved, sizeof (*p)); + bcopy((caddr_t)p, (caddr_t)&rte->rtd_entry_saved, sizeof(*p)); /* Preserve rt_lock to help catch use-after-free cases */ bzero((caddr_t)p, offsetof(struct rtentry, rt_lock)); rte->rtd_inuse = RTD_FREED; - if (rte_debug & RTD_TRACE) + if (rte_debug & RTD_TRACE) { ctrace_record(&rte->rtd_free); + } - if (!(rte_debug & RTD_NO_FREE)) + if (!(rte_debug & RTD_NO_FREE)) { zfree(rte_zone, p); + } } void ctrace_record(ctrace_t *tr) { tr->th = current_thread(); - bzero(tr->pc, sizeof (tr->pc)); + bzero(tr->pc, sizeof(tr->pc)); (void) OSBacktrace(tr->pc, CTRACE_STACK_SIZE); } @@ -3806,16 +3901,19 @@ route_copyout(struct route *dst, const struct route *src, size_t length) bcopy(src, dst, length); /* Hold one reference for the local copy of struct route */ - if (dst->ro_rt != NULL) + if (dst->ro_rt != NULL) { RT_ADDREF(dst->ro_rt); + } /* Hold one reference for the local copy of struct lle */ - if (dst->ro_lle != NULL) + if (dst->ro_lle != NULL) { LLE_ADDREF(dst->ro_lle); + } /* Hold one reference for the local copy of struct ifaddr */ - if (dst->ro_srcia != NULL) + if (dst->ro_srcia != NULL) { IFA_ADDREF(dst->ro_srcia); + } } void @@ -3831,14 +3929,16 @@ route_copyin(struct route *src, struct route *dst, size_t length) * Ditch the cached link layer reference (dst) * since we're about to take everything there is in src */ - if (dst->ro_lle != NULL) + if (dst->ro_lle != NULL) { LLE_REMREF(dst->ro_lle); + } /* * Ditch the address in the cached copy (dst) since * we're about to take everything there is in src. */ - if (dst->ro_srcia != NULL) + if (dst->ro_srcia != NULL) { IFA_REMREF(dst->ro_srcia); + } /* * Copy everything (rt, ro_lle, srcia, flags, dst) from src; the * references to rt and/or srcia were held at the time @@ -3857,16 +3957,18 @@ route_copyin(struct route *src, struct route *dst, size_t length) dst->ro_flags = src->ro_flags; if (dst->ro_lle != src->ro_lle) { - if (dst->ro_lle != NULL) + if (dst->ro_lle != NULL) { LLE_REMREF(dst->ro_lle); + } dst->ro_lle = src->ro_lle; } else if (src->ro_lle != NULL) { LLE_REMREF(src->ro_lle); } if (dst->ro_srcia != src->ro_srcia) { - if (dst->ro_srcia != NULL) + if (dst->ro_srcia != NULL) { IFA_REMREF(dst->ro_srcia); + } dst->ro_srcia = src->ro_srcia; } else if (src->ro_srcia != NULL) { IFA_REMREF(src->ro_srcia); @@ -3883,10 +3985,12 @@ route_copyin(struct route *src, struct route *dst, size_t length) if (src->ro_rt != NULL) { rtfree(dst->ro_rt); - if (dst->ro_lle != NULL) + if (dst->ro_lle != NULL) { LLE_REMREF(dst->ro_lle); - if (dst->ro_srcia != NULL) + } + if (dst->ro_srcia != NULL) { IFA_REMREF(dst->ro_srcia); + } bcopy(src, dst, length); goto done; } @@ -3927,7 +4031,7 @@ done: * If the returned route is non-NULL, the caller is responsible for * releasing the reference and unlocking the route. */ -#define senderr(e) { error = (e); goto bad; } +#define senderr(e) { error = (e); goto bad; } errno_t route_to_gwroute(const struct sockaddr *net_dest, struct rtentry *hint0, struct rtentry **out_route) @@ -3940,8 +4044,9 @@ route_to_gwroute(const struct sockaddr *net_dest, struct rtentry *hint0, *out_route = NULL; - if (rt == NULL) - return (0); + if (rt == NULL) { + return 0; + } /* * Next hop determination. Because we may involve the gateway route @@ -3987,8 +4092,9 @@ route_to_gwroute(const struct sockaddr *net_dest, struct rtentry *hint0, /* If there's no gateway rt, look it up */ if (gwrt == NULL) { - bcopy(rt->rt_gateway, gw, MIN(sizeof (ss), + bcopy(rt->rt_gateway, gw, MIN(sizeof(ss), rt->rt_gateway->sa_len)); + gw->sa_len = MIN(sizeof(ss), rt->rt_gateway->sa_len); RT_UNLOCK(rt); goto lookup; } @@ -4004,8 +4110,9 @@ route_to_gwroute(const struct sockaddr *net_dest, struct rtentry *hint0, if (!(gwrt->rt_flags & RTF_UP)) { rt->rt_gwroute = NULL; RT_UNLOCK(gwrt); - bcopy(rt->rt_gateway, gw, MIN(sizeof (ss), + bcopy(rt->rt_gateway, gw, MIN(sizeof(ss), rt->rt_gateway->sa_len)); + gw->sa_len = MIN(sizeof(ss), rt->rt_gateway->sa_len); RT_UNLOCK(rt); rtfree(gwrt); lookup: @@ -4028,8 +4135,9 @@ lookup: RT_REMREF_LOCKED(hint); hint = NULL; RT_UNLOCK(rt); - if (gwrt != NULL) + if (gwrt != NULL) { rtfree_locked(gwrt); + } lck_mtx_unlock(rnh_lock); senderr(EHOSTUNREACH); } @@ -4041,7 +4149,7 @@ lookup: */ rt_set_gwroute(rt, rt_key(rt), gwrt); VERIFY(rt == hint); - RT_REMREF_LOCKED(rt); /* hint still holds a refcnt */ + RT_REMREF_LOCKED(rt); /* hint still holds a refcnt */ RT_UNLOCK(rt); lck_mtx_unlock(rnh_lock); rt = gwrt; @@ -4049,7 +4157,7 @@ lookup: RT_ADDREF_LOCKED(gwrt); RT_UNLOCK(gwrt); VERIFY(rt == hint); - RT_REMREF_LOCKED(rt); /* hint still holds a refcnt */ + RT_REMREF_LOCKED(rt); /* hint still holds a refcnt */ RT_UNLOCK(rt); rt = gwrt; } @@ -4077,10 +4185,11 @@ lookup: } /* Clean up "hint" now; see notes above regarding hint0 */ - if (hint == hint0) + if (hint == hint0) { RT_REMREF(hint); - else + } else { rtfree(hint); + } hint = NULL; /* rt == gwrt; if it is now down, give up */ @@ -4106,7 +4215,7 @@ lookup: /* Caller is responsible for cleaning up "rt" */ *out_route = rt; - return (0); + return 0; bad: /* Clean up route (either it is "rt" or "gwrt") */ @@ -4120,7 +4229,7 @@ bad: rtfree(rt); } } - return (error); + return error; } #undef senderr @@ -4199,7 +4308,6 @@ rt_str4(struct rtentry *rt, char *ds, uint32_t dslen, char *gs, uint32_t gslen) } } -#if INET6 static void rt_str6(struct rtentry *rt, char *ds, uint32_t dslen, char *gs, uint32_t gslen) { @@ -4230,8 +4338,6 @@ rt_str6(struct rtentry *rt, char *ds, uint32_t dslen, char *gs, uint32_t gslen) } } } -#endif /* INET6 */ - void rt_str(struct rtentry *rt, char *ds, uint32_t dslen, char *gs, uint32_t gslen) @@ -4240,21 +4346,22 @@ rt_str(struct rtentry *rt, char *ds, uint32_t dslen, char *gs, uint32_t gslen) case AF_INET: rt_str4(rt, ds, dslen, gs, gslen); break; -#if INET6 case AF_INET6: rt_str6(rt, ds, dslen, gs, gslen); break; -#endif /* INET6 */ default: - if (ds != NULL) + if (ds != NULL) { bzero(ds, dslen); - if (gs != NULL) + } + if (gs != NULL) { bzero(gs, gslen); + } break; } } -void route_event_init(struct route_event *p_route_ev, struct rtentry *rt, +void +route_event_init(struct route_event *p_route_ev, struct rtentry *rt, struct rtentry *gwrt, int route_ev_code) { VERIFY(p_route_ev != NULL); @@ -4304,29 +4411,28 @@ route_event_walktree(struct radix_node *rn, void *arg) /* Return if the entry is pending cleanup */ if (rt->rt_flags & RTPRF_OURS) { RT_UNLOCK(rt); - return (0); + return 0; } /* Return if it is not an indirect route */ if (!(rt->rt_flags & RTF_GATEWAY)) { RT_UNLOCK(rt); - return (0); + return 0; } if (rt->rt_gwroute != gwrt) { RT_UNLOCK(rt); - return (0); + return 0; } route_event_enqueue_nwk_wq_entry(rt, gwrt, p_route_ev->route_event_code, NULL, TRUE); RT_UNLOCK(rt); - return (0); + return 0; } -struct route_event_nwk_wq_entry -{ +struct route_event_nwk_wq_entry { struct nwk_wq_entry nwk_wqe; struct route_event rt_ev_arg; }; @@ -4349,20 +4455,22 @@ route_event_enqueue_nwk_wq_entry(struct rtentry *rt, struct rtentry *gwrt, */ if (route_event_code != ROUTE_EVHDLR_DEREGISTER) { /* The reference is released by route_event_callback */ - if (rt_locked) + if (rt_locked) { RT_ADDREF_LOCKED(rt); - else + } else { RT_ADDREF(rt); + } } p_rt_ev->rt_ev_arg.rt = rt; p_rt_ev->rt_ev_arg.gwrt = gwrt; p_rt_ev->rt_ev_arg.evtag = evtag; - if (gwrt != NULL) + if (gwrt != NULL) { p_gw_saddr = gwrt->rt_gateway; - else + } else { p_gw_saddr = rt->rt_gateway; + } VERIFY(p_gw_saddr->sa_len <= sizeof(p_rt_ev->rt_ev_arg.rt_addr)); bcopy(p_gw_saddr, &(p_rt_ev->rt_ev_arg.rt_addr), p_gw_saddr->sa_len); @@ -4379,47 +4487,47 @@ route_event2str(int route_event) { const char *route_event_str = "ROUTE_EVENT_UNKNOWN"; switch (route_event) { - case ROUTE_STATUS_UPDATE: - route_event_str = "ROUTE_STATUS_UPDATE"; - break; - case ROUTE_ENTRY_REFRESH: - route_event_str = "ROUTE_ENTRY_REFRESH"; - break; - case ROUTE_ENTRY_DELETED: - route_event_str = "ROUTE_ENTRY_DELETED"; - break; - case ROUTE_LLENTRY_RESOLVED: - route_event_str = "ROUTE_LLENTRY_RESOLVED"; - break; - case ROUTE_LLENTRY_UNREACH: - route_event_str = "ROUTE_LLENTRY_UNREACH"; - break; - case ROUTE_LLENTRY_CHANGED: - route_event_str = "ROUTE_LLENTRY_CHANGED"; - break; - case ROUTE_LLENTRY_STALE: - route_event_str = "ROUTE_LLENTRY_STALE"; - break; - case ROUTE_LLENTRY_TIMEDOUT: - route_event_str = "ROUTE_LLENTRY_TIMEDOUT"; - break; - case ROUTE_LLENTRY_DELETED: - route_event_str = "ROUTE_LLENTRY_DELETED"; - break; - case ROUTE_LLENTRY_EXPIRED: - route_event_str = "ROUTE_LLENTRY_EXPIRED"; - break; - case ROUTE_LLENTRY_PROBED: - route_event_str = "ROUTE_LLENTRY_PROBED"; - break; - case ROUTE_EVHDLR_DEREGISTER: - route_event_str = "ROUTE_EVHDLR_DEREGISTER"; - break; - default: - /* Init'd to ROUTE_EVENT_UNKNOWN */ - break; + case ROUTE_STATUS_UPDATE: + route_event_str = "ROUTE_STATUS_UPDATE"; + break; + case ROUTE_ENTRY_REFRESH: + route_event_str = "ROUTE_ENTRY_REFRESH"; + break; + case ROUTE_ENTRY_DELETED: + route_event_str = "ROUTE_ENTRY_DELETED"; + break; + case ROUTE_LLENTRY_RESOLVED: + route_event_str = "ROUTE_LLENTRY_RESOLVED"; + break; + case ROUTE_LLENTRY_UNREACH: + route_event_str = "ROUTE_LLENTRY_UNREACH"; + break; + case ROUTE_LLENTRY_CHANGED: + route_event_str = "ROUTE_LLENTRY_CHANGED"; + break; + case ROUTE_LLENTRY_STALE: + route_event_str = "ROUTE_LLENTRY_STALE"; + break; + case ROUTE_LLENTRY_TIMEDOUT: + route_event_str = "ROUTE_LLENTRY_TIMEDOUT"; + break; + case ROUTE_LLENTRY_DELETED: + route_event_str = "ROUTE_LLENTRY_DELETED"; + break; + case ROUTE_LLENTRY_EXPIRED: + route_event_str = "ROUTE_LLENTRY_EXPIRED"; + break; + case ROUTE_LLENTRY_PROBED: + route_event_str = "ROUTE_LLENTRY_PROBED"; + break; + case ROUTE_EVHDLR_DEREGISTER: + route_event_str = "ROUTE_EVHDLR_DEREGISTER"; + break; + default: + /* Init'd to ROUTE_EVENT_UNKNOWN */ + break; } - return route_event_str; + return route_event_str; } int @@ -4436,10 +4544,11 @@ route_op_entitlement_check(struct socket *so, * allowed accesses. */ if (soopt_cred_check(so, PRIV_NET_RESTRICTED_ROUTE_NC_READ, - allow_root, false) == 0) - return (0); - else - return (-1); + allow_root, false) == 0) { + return 0; + } else { + return -1; + } } } else if (cred != NULL) { uid_t uid = kauth_cred_getuid(cred); @@ -4448,12 +4557,13 @@ route_op_entitlement_check(struct socket *so, if (uid != 0 || !allow_root) { if (route_op_type == ROUTE_OP_READ) { if (priv_check_cred(cred, - PRIV_NET_RESTRICTED_ROUTE_NC_READ, 0) == 0) - return (0); - else - return (-1); + PRIV_NET_RESTRICTED_ROUTE_NC_READ, 0) == 0) { + return 0; + } else { + return -1; + } } } } - return (-1); + return -1; } diff --git a/bsd/net/route.h b/bsd/net/route.h index d1406262f..613d61709 100644 --- a/bsd/net/route.h +++ b/bsd/net/route.h @@ -84,7 +84,7 @@ struct rt_metrics { u_int32_t rmx_rttvar; /* estimated rtt variance */ u_int32_t rmx_pksent; /* packets sent using this route */ u_int32_t rmx_state; /* route state */ - u_int32_t rmx_filler[3]; /* will be used for T/TCP later */ + u_int32_t rmx_filler[3]; /* will be used for TCP's peer-MSS cache */ }; /* @@ -573,9 +573,7 @@ extern unsigned int rt_verbose; extern struct radix_node_head *rt_tables[AF_MAX + 1]; extern lck_mtx_t *rnh_lock; extern uint32_t route_genid_inet; /* INET route generation count */ -#if INET6 extern uint32_t route_genid_inet6; /* INET6 route generation count */ -#endif /* INET6 */ extern int rttrash; extern unsigned int rte_debug; @@ -587,9 +585,9 @@ extern void routegenid_update(void); extern void routegenid_inet_update(void); extern void routegenid_inet6_update(void); extern void rt_ifmsg(struct ifnet *); -extern void rt_missmsg(int, struct rt_addrinfo *, int, int); -extern void rt_newaddrmsg(int, struct ifaddr *, int, struct rtentry *); -extern void rt_newmaddrmsg(int, struct ifmultiaddr *); +extern void rt_missmsg(u_char, struct rt_addrinfo *, int, int); +extern void rt_newaddrmsg(u_char, struct ifaddr *, int, struct rtentry *); +extern void rt_newmaddrmsg(u_char, struct ifmultiaddr *); extern int rt_setgate(struct rtentry *, struct sockaddr *, struct sockaddr *); extern void set_primary_ifscope(int, unsigned int); extern unsigned int get_primary_ifscope(int); diff --git a/bsd/net/rtsock.c b/bsd/net/rtsock.c index 9633e6b4b..b676b9a13 100644 --- a/bsd/net/rtsock.c +++ b/bsd/net/rtsock.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -132,8 +132,8 @@ static void rt_getmetrics(struct rtentry *, struct rt_metrics *); static void rt_setif(struct rtentry *, struct sockaddr *, struct sockaddr *, struct sockaddr *, unsigned int); static int rt_xaddrs(caddr_t, caddr_t, struct rt_addrinfo *); -static struct mbuf *rt_msg1(int, struct rt_addrinfo *); -static int rt_msg2(int, struct rt_addrinfo *, caddr_t, struct walkarg *, +static struct mbuf *rt_msg1(u_char, struct rt_addrinfo *); +static int rt_msg2(u_char, struct rt_addrinfo *, caddr_t, struct walkarg *, kauth_cred_t *); static int sysctl_dumpentry(struct radix_node *rn, void *vw); static int sysctl_dumpentry_ext(struct radix_node *rn, void *vw); @@ -150,7 +150,7 @@ SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "routing"); /* Align x to 1024 (only power of 2) assuming x is positive */ #define ALIGN_BYTES(x) do { \ - x = P2ALIGN(x, 1024); \ + x = (uint32_t)P2ALIGN(x, 1024); \ } while(0) #define ROUNDUP32(a) \ @@ -703,7 +703,7 @@ report: (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev); RT_LOCK(rt); } - /* FALLTHRU */ + OS_FALLTHROUGH; case RTM_LOCK: rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits); rt->rt_rmx.rmx_locks |= @@ -790,8 +790,9 @@ rt_setexpire(struct rtentry *rt, uint64_t expiry) /* set both rt_expire and rmx_expire */ rt->rt_expire = expiry; if (expiry) { - rt->rt_rmx.rmx_expire = expiry + rt->base_calendartime - - rt->base_uptime; + rt->rt_rmx.rmx_expire = + (int32_t)(expiry + rt->base_calendartime - + rt->base_uptime); } else { rt->rt_rmx.rmx_expire = 0; } @@ -865,8 +866,8 @@ rt_getmetrics(struct rtentry *in, struct rt_metrics *out) NET_CALCULATE_CLOCKSKEW(caltime, in->base_calendartime, net_uptime(), in->base_uptime); - out->rmx_expire = in->base_calendartime + - in->rt_expire - in->base_uptime; + out->rmx_expire = (int32_t)(in->base_calendartime + + in->rt_expire - in->base_uptime); } else { out->rmx_expire = 0; } @@ -956,12 +957,9 @@ rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr, /* trigger route cache reevaluation */ if (rt_key(rt)->sa_family == AF_INET) { routegenid_inet_update(); - } -#if INET6 - else if (rt_key(rt)->sa_family == AF_INET6) { + } else if (rt_key(rt)->sa_family == AF_INET6) { routegenid_inet6_update(); } -#endif /* INET6 */ if (ifa != NULL) { struct ifaddr *oifa = rt->rt_ifa; @@ -1106,7 +1104,7 @@ rt_xaddrs(caddr_t cp, caddr_t cplim, struct rt_addrinfo *rtinfo) } static struct mbuf * -rt_msg1(int type, struct rt_addrinfo *rtinfo) +rt_msg1(u_char type, struct rt_addrinfo *rtinfo) { struct rt_msghdr *rtm; struct mbuf *m; @@ -1186,14 +1184,14 @@ rt_msg1(int type, struct rt_addrinfo *rtinfo) m_freem(m); return NULL; } - rtm->rtm_msglen = len; + rtm->rtm_msglen = (u_short)len; rtm->rtm_version = RTM_VERSION; rtm->rtm_type = type; return m; } static int -rt_msg2(int type, struct rt_addrinfo *rtinfo, caddr_t cp, struct walkarg *w, +rt_msg2(u_char type, struct rt_addrinfo *rtinfo, caddr_t cp, struct walkarg *w, kauth_cred_t* credp) { int i; @@ -1311,7 +1309,7 @@ again: rtm->rtm_version = RTM_VERSION; rtm->rtm_type = type; - rtm->rtm_msglen = len; + rtm->rtm_msglen = (u_short)len; } return len; } @@ -1323,7 +1321,7 @@ again: * destination. */ void -rt_missmsg(int type, struct rt_addrinfo *rtinfo, int flags, int error) +rt_missmsg(u_char type, struct rt_addrinfo *rtinfo, int flags, int error) { struct rt_msghdr *rtm; struct mbuf *m; @@ -1385,7 +1383,7 @@ rt_ifmsg(struct ifnet *ifp) * interface will be locked. Caller must hold rnh_lock and rt_lock. */ void -rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt) +rt_newaddrmsg(u_char cmd, struct ifaddr *ifa, int error, struct rtentry *rt) { struct rt_addrinfo info; struct sockaddr *sa = 0; @@ -1408,7 +1406,7 @@ rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt) if ((cmd == RTM_ADD && pass == 1) || (cmd == RTM_DELETE && pass == 2)) { struct ifa_msghdr *ifam; - int ncmd = cmd == RTM_ADD ? RTM_NEWADDR : RTM_DELADDR; + u_char ncmd = cmd == RTM_ADD ? RTM_NEWADDR : RTM_DELADDR; /* Lock ifp for if_lladdr */ ifnet_lock_shared(ifp); @@ -1468,7 +1466,7 @@ rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt) * there is no route state to worry about. */ void -rt_newmaddrmsg(int cmd, struct ifmultiaddr *ifma) +rt_newmaddrmsg(u_char cmd, struct ifmultiaddr *ifma) { struct rt_addrinfo info; struct mbuf *m = 0; @@ -1958,7 +1956,7 @@ sysctl_iflist2(int af, struct walkarg *w) ifm->ifm_snd_len = IFCQ_LEN(&ifp->if_snd); ifm->ifm_snd_maxlen = IFCQ_MAXLEN(&ifp->if_snd); ifm->ifm_snd_drops = - ifp->if_snd.ifcq_dropcnt.packets; + (int)ifp->if_snd.ifcq_dropcnt.packets; ifm->ifm_timer = ifp->if_timer; if_data_internal_to_if_data64(ifp, &ifp->if_data, &ifm->ifm_data); @@ -2158,7 +2156,7 @@ sysctl_rtsock SYSCTL_HANDLER_ARGS if (namelen != 3) { return EINVAL; } - af = name[0]; + af = (u_char)name[0]; Bzero(&w, sizeof(w)); w.w_op = name[1]; w.w_arg = name[2]; diff --git a/bsd/net/sixxlowpan.c b/bsd/net/sixxlowpan.c index 8ccaab009..c4386985a 100644 --- a/bsd/net/sixxlowpan.c +++ b/bsd/net/sixxlowpan.c @@ -838,6 +838,9 @@ sixxlowpan_uncompress(struct frame802154 *ieee02154hdr, u_int8_t *payload) * hdroffset negative means that we have to remove * hdrlen of extra stuff */ + if (ieee02154hdr->payload_len < hdrlen) { + return EINVAL; + } memmove(&payload[0], &payload[hdrlen], ieee02154hdr->payload_len - hdrlen); @@ -850,6 +853,9 @@ sixxlowpan_uncompress(struct frame802154 *ieee02154hdr, u_int8_t *payload) * hdrlen is the size of the decompressed header * that takes the place of compressed header of size hdroffset */ + if (ieee02154hdr->payload_len < hdroffset) { + return EINVAL; + } memmove(payload + hdrlen, payload + hdroffset, ieee02154hdr->payload_len - hdroffset); diff --git a/bsd/net/skywalk_stubs.c b/bsd/net/skywalk_stubs.c index f8af8c657..f984a3e88 100644 --- a/bsd/net/skywalk_stubs.c +++ b/bsd/net/skywalk_stubs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2019 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -41,16 +41,17 @@ _name(void) \ __builtin_unreachable(); \ } +STUB(kern_buflet_get_data_address); STUB(kern_buflet_get_data_offset); STUB(kern_buflet_get_data_length); +STUB(kern_buflet_get_data_limit); STUB(kern_buflet_get_object_address); -STUB(kern_buflet_get_object_offset); +STUB(kern_buflet_get_object_limit); STUB(kern_buflet_get_object_segment); +STUB(kern_buflet_set_data_address); STUB(kern_buflet_set_data_offset); STUB(kern_buflet_set_data_length); -STUB(kern_buflet_get_data_limit); -STUB(kern_buflet_attach_buffer); -STUB(kern_buflet_attach_buffer_with_segment_info); +STUB(kern_buflet_set_data_limit); STUB(kern_channel_advance_slot); STUB(kern_channel_available_slot_count); STUB(kern_channel_get_context); @@ -90,10 +91,11 @@ STUB(kern_nexus_get_context); STUB(kern_nexus_get_pbufpool); STUB(kern_nexus_register_domain_provider); STUB(kern_packet_clear_flow_uuid); +STUB(kern_packet_clone); +STUB(kern_packet_clone_nosleep); STUB(kern_packet_get_euuid); STUB(kern_packet_finalize); STUB(kern_packet_get_buflet_count); -STUB(kern_packet_set_buflet_count); STUB(kern_packet_get_data_length); STUB(kern_packet_get_flow_uuid); STUB(kern_packet_get_inet_checksum); @@ -109,6 +111,7 @@ STUB(kern_packet_get_object_index); STUB(kern_packet_get_policy_id); STUB(kern_packet_get_service_class); STUB(kern_packet_get_service_class_index); +STUB(kern_packet_is_high_priority); STUB(kern_packet_get_traffic_class); STUB(kern_packet_get_timestamp); STUB(kern_packet_get_transport_header_offset); @@ -148,6 +151,11 @@ STUB(kern_packet_set_vlan_tag); STUB(kern_packet_get_vlan_tag); STUB(kern_packet_get_vlan_id); STUB(kern_packet_get_vlan_priority); +STUB(kern_packet_add_buflet); +STUB(kern_packet_append); +STUB(kern_packet_get_next); +STUB(kern_packet_set_chain_counts); +STUB(kern_packet_get_chain_counts); STUB(kern_pbufpool_alloc); STUB(kern_pbufpool_alloc_batch); STUB(kern_pbufpool_alloc_batch_callback); @@ -158,11 +166,18 @@ STUB(kern_pbufpool_create); STUB(kern_pbufpool_destroy); STUB(kern_pbufpool_free); STUB(kern_pbufpool_free_batch); +STUB(kern_pbufpool_free_chain); STUB(kern_pbufpool_get_context); STUB(kern_pbufpool_get_memory_info); STUB(kern_pbufpool_alloc_buffer); STUB(kern_pbufpool_alloc_buffer_nosleep); STUB(kern_pbufpool_free_buffer); +STUB(kern_pbufpool_alloc_buflet); +STUB(kern_pbufpool_alloc_buflet_nosleep); STUB(kern_segment_get_index); +#if NETWORKING +STUB(bpf_tap_packet_in); +STUB(bpf_tap_packet_out); +#endif #undef STUB #endif /* !SKYWALK */ diff --git a/bsd/netinet/Makefile b/bsd/netinet/Makefile index dc4f2c43b..7cea9f2fd 100644 --- a/bsd/netinet/Makefile +++ b/bsd/netinet/Makefile @@ -28,8 +28,6 @@ PRIVATE_DATAFILES = \ ip_compat.h \ ip_dummynet.h \ ip_flowid.h \ - ip_fw.h \ - ip_fw2.h \ mptcp_var.h \ tcp.h \ tcp_cc.h \ @@ -39,11 +37,22 @@ PRIVATE_DATAFILES = \ udp.h \ in_stat.h +DRIVERKIT_DATAFILES = \ + in.h \ + ip.h \ + ip6.h \ + tcp.h \ + ip_compat.h \ + in_systm.h \ + udp.h + PRIVATE_KERNELFILES = ${KERNELFILES} \ ip_ecn.h ip_encap.h tcp_log.h INSTALL_MI_LIST = ${DATAFILES} +INSTALL_DRIVERKIT_MI_LIST = ${DRIVERKIT_DATAFILES} + INSTALL_MI_DIR = netinet EXPORT_MI_LIST = ${DATAFILES} ${KERNELFILES} diff --git a/bsd/netinet/flow_divert.c b/bsd/netinet/flow_divert.c index a35ed9ab2..8a2b4c4f8 100644 --- a/bsd/netinet/flow_divert.c +++ b/bsd/netinet/flow_divert.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 Apple Inc. All rights reserved. + * Copyright (c) 2012-2017, 2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -52,6 +52,7 @@ #include #include #include +#include #include #include #include @@ -59,10 +60,8 @@ #include #include #include -#if INET6 #include #include -#endif /* INET6 */ #include #include #include @@ -77,8 +76,10 @@ #define FLOW_DIVERT_WRITE_CLOSED 0x00000004 #define FLOW_DIVERT_TUNNEL_RD_CLOSED 0x00000008 #define FLOW_DIVERT_TUNNEL_WR_CLOSED 0x00000010 -#define FLOW_DIVERT_TRANSFERRED 0x00000020 #define FLOW_DIVERT_HAS_HMAC 0x00000040 +#define FLOW_DIVERT_NOTIFY_ON_RECEIVED 0x00000080 +#define FLOW_DIVERT_IMPLICIT_CONNECT 0x00000100 +#define FLOW_DIVERT_DID_SET_LOCAL_ADDR 0x00000200 #define FDLOG(level, pcb, format, ...) \ os_log_with_type(OS_LOG_DEFAULT, flow_divert_syslog_type_to_oslog_type(level), "(%u): " format "\n", (pcb)->hash, __VA_ARGS__) @@ -102,7 +103,7 @@ #define GROUP_BIT_CTL_ENQUEUE_BLOCKED 0 -#define GROUP_COUNT_MAX 32 +#define GROUP_COUNT_MAX 31 #define FLOW_DIVERT_MAX_NAME_SIZE 4096 #define FLOW_DIVERT_MAX_KEY_SIZE 1024 #define FLOW_DIVERT_MAX_TRIE_MEMORY (1024 * 1024) @@ -136,23 +137,23 @@ static struct protosw g_flow_divert_in_protosw; static struct pr_usrreqs g_flow_divert_in_usrreqs; static struct protosw g_flow_divert_in_udp_protosw; static struct pr_usrreqs g_flow_divert_in_udp_usrreqs; -#if INET6 static struct ip6protosw g_flow_divert_in6_protosw; static struct pr_usrreqs g_flow_divert_in6_usrreqs; static struct ip6protosw g_flow_divert_in6_udp_protosw; static struct pr_usrreqs g_flow_divert_in6_udp_usrreqs; -#endif /* INET6 */ static struct protosw *g_tcp_protosw = NULL; static struct ip6protosw *g_tcp6_protosw = NULL; static struct protosw *g_udp_protosw = NULL; static struct ip6protosw *g_udp6_protosw = NULL; -static errno_t -flow_divert_dup_addr(sa_family_t family, struct sockaddr *addr, struct sockaddr **dup); +ZONE_DECLARE(flow_divert_group_zone, "flow_divert_group", + sizeof(struct flow_divert_group), ZC_ZFREE_CLEARMEM | ZC_NOENCRYPT); +ZONE_DECLARE(flow_divert_pcb_zone, "flow_divert_pcb", + sizeof(struct flow_divert_pcb), ZC_ZFREE_CLEARMEM | ZC_NOENCRYPT); static errno_t -flow_divert_inp_to_sockaddr(const struct inpcb *inp, struct sockaddr **local_socket); +flow_divert_dup_addr(sa_family_t family, struct sockaddr *addr, struct sockaddr **dup); static boolean_t flow_divert_is_sockaddr_valid(struct sockaddr *addr); @@ -163,9 +164,6 @@ flow_divert_append_target_endpoint_tlv(mbuf_t connect_packet, struct sockaddr *t struct sockaddr * flow_divert_get_buffered_target_address(mbuf_t buffer); -static boolean_t -flow_divert_has_pcb_local_address(const struct inpcb *inp); - static void flow_divert_disconnect_socket(struct socket *so); @@ -312,16 +310,9 @@ done: static struct flow_divert_pcb * flow_divert_pcb_create(socket_t so) { - struct flow_divert_pcb *new_pcb = NULL; - - MALLOC_ZONE(new_pcb, struct flow_divert_pcb *, sizeof(*new_pcb), M_FLOW_DIVERT_PCB, M_WAITOK); - if (new_pcb == NULL) { - FDLOG0(LOG_ERR, &nil_pcb, "failed to allocate a pcb"); - return NULL; - } - - memset(new_pcb, 0, sizeof(*new_pcb)); + struct flow_divert_pcb *new_pcb = NULL; + new_pcb = zalloc_flags(flow_divert_pcb_zone, Z_WAITOK | Z_ZERO); lck_mtx_init(&new_pcb->mtx, flow_divert_mtx_grp, flow_divert_mtx_attr); new_pcb->so = so; new_pcb->log_level = nil_pcb.log_level; @@ -334,15 +325,9 @@ flow_divert_pcb_create(socket_t so) static void flow_divert_pcb_destroy(struct flow_divert_pcb *fd_cb) { - FDLOG(LOG_INFO, fd_cb, "Destroying, app tx %u, app rx %u, tunnel tx %u, tunnel rx %u", - fd_cb->bytes_written_by_app, fd_cb->bytes_read_by_app, fd_cb->bytes_sent, fd_cb->bytes_received); + FDLOG(LOG_INFO, fd_cb, "Destroying, app tx %u, tunnel tx %u, tunnel rx %u", + fd_cb->bytes_written_by_app, fd_cb->bytes_sent, fd_cb->bytes_received); - if (fd_cb->local_address != NULL) { - FREE(fd_cb->local_address, M_SONAME); - } - if (fd_cb->remote_address != NULL) { - FREE(fd_cb->remote_address, M_SONAME); - } if (fd_cb->connect_token != NULL) { mbuf_freem(fd_cb->connect_token); } @@ -352,7 +337,10 @@ flow_divert_pcb_destroy(struct flow_divert_pcb *fd_cb) if (fd_cb->app_data != NULL) { FREE(fd_cb->app_data, M_TEMP); } - FREE_ZONE(fd_cb, sizeof(*fd_cb), M_FLOW_DIVERT_PCB); + if (fd_cb->original_remote_endpoint != NULL) { + FREE(fd_cb->original_remote_endpoint, M_SONAME); + } + zfree(flow_divert_pcb_zone, fd_cb); } static void @@ -457,7 +445,7 @@ flow_divert_packet_find_tlv(mbuf_t packet, int offset, uint8_t type, int *err, i } } while (curr_type != type); - return cursor; + return (int)cursor; } static int @@ -588,7 +576,7 @@ done: } static void -flow_divert_add_data_statistics(struct flow_divert_pcb *fd_cb, int data_len, Boolean send) +flow_divert_add_data_statistics(struct flow_divert_pcb *fd_cb, size_t data_len, Boolean send) { struct inpcb *inp = NULL; struct ifnet *ifp = NULL; @@ -601,7 +589,11 @@ flow_divert_add_data_statistics(struct flow_divert_pcb *fd_cb, int data_len, Boo return; } - ifp = inp->inp_last_outifp; + if (inp->inp_vflag & INP_IPV4) { + ifp = inp->inp_last_outifp; + } else if (inp->inp_vflag & INP_IPV6) { + ifp = inp->in6p_last_outifp; + } if (ifp != NULL) { cell = IFNET_IS_CELLULAR(ifp); wifi = (!cell && IFNET_IS_WIFI(ifp)); @@ -621,42 +613,57 @@ flow_divert_add_data_statistics(struct flow_divert_pcb *fd_cb, int data_len, Boo static errno_t flow_divert_check_no_cellular(struct flow_divert_pcb *fd_cb) { - struct inpcb *inp = NULL; - - inp = sotoinpcb(fd_cb->so); - if (inp && INP_NO_CELLULAR(inp) && inp->inp_last_outifp && - IFNET_IS_CELLULAR(inp->inp_last_outifp)) { - return EHOSTUNREACH; + struct inpcb *inp = sotoinpcb(fd_cb->so); + if (INP_NO_CELLULAR(inp)) { + struct ifnet *ifp = NULL; + if (inp->inp_vflag & INP_IPV4) { + ifp = inp->inp_last_outifp; + } else if (inp->inp_vflag & INP_IPV6) { + ifp = inp->in6p_last_outifp; + } + if (ifp != NULL && IFNET_IS_CELLULAR(ifp)) { + FDLOG0(LOG_ERR, fd_cb, "Cellular is denied"); + return EHOSTUNREACH; + } } - return 0; } static errno_t flow_divert_check_no_expensive(struct flow_divert_pcb *fd_cb) { - struct inpcb *inp = NULL; - - inp = sotoinpcb(fd_cb->so); - if (inp && INP_NO_EXPENSIVE(inp) && inp->inp_last_outifp && - IFNET_IS_EXPENSIVE(inp->inp_last_outifp)) { - return EHOSTUNREACH; + struct inpcb *inp = sotoinpcb(fd_cb->so); + if (INP_NO_EXPENSIVE(inp)) { + struct ifnet *ifp = NULL; + if (inp->inp_vflag & INP_IPV4) { + ifp = inp->inp_last_outifp; + } else if (inp->inp_vflag & INP_IPV6) { + ifp = inp->in6p_last_outifp; + } + if (ifp != NULL && IFNET_IS_EXPENSIVE(ifp)) { + FDLOG0(LOG_ERR, fd_cb, "Expensive is denied"); + return EHOSTUNREACH; + } } - return 0; } static errno_t flow_divert_check_no_constrained(struct flow_divert_pcb *fd_cb) { - struct inpcb *inp = NULL; - - inp = sotoinpcb(fd_cb->so); - if (inp && INP_NO_CONSTRAINED(inp) && inp->inp_last_outifp && - IFNET_IS_CONSTRAINED(inp->inp_last_outifp)) { - return EHOSTUNREACH; + struct inpcb *inp = sotoinpcb(fd_cb->so); + if (INP_NO_CONSTRAINED(inp)) { + struct ifnet *ifp = NULL; + if (inp->inp_vflag & INP_IPV4) { + ifp = inp->inp_last_outifp; + } else if (inp->inp_vflag & INP_IPV6) { + ifp = inp->in6p_last_outifp; + } + if (ifp != NULL && IFNET_IS_CONSTRAINED(ifp)) { + FDLOG0(LOG_ERR, fd_cb, "Constrained is denied"); + return EHOSTUNREACH; + } } - return 0; } @@ -721,9 +728,9 @@ flow_divert_trie_insert(struct flow_divert_trie *trie, uint16_t string_start, si { uint16_t current = trie->root; uint16_t child = trie->root; - uint16_t string_end = string_start + string_len; + uint16_t string_end = string_start + (uint16_t)string_len; uint16_t string_idx = string_start; - uint16_t string_remainder = string_len; + uint16_t string_remainder = (uint16_t)string_len; while (child != NULL_TRIE_IDX) { uint16_t parent = current; @@ -956,8 +963,7 @@ static int flow_divert_add_proc_info(struct flow_divert_pcb *fd_cb, proc_t proc, const char *signing_id, mbuf_t connect_packet, bool is_effective) { int error = 0; - int cdhash_error = 0; - unsigned char cdhash[SHA1_RESULTLEN] = { 0 }; + uint8_t *cdhash = NULL; audit_token_t audit_token = {}; const char *proc_cs_id = signing_id; @@ -1000,7 +1006,7 @@ flow_divert_add_proc_info(struct flow_divert_pcb *fd_cb, proc_t proc, const char if (signing_id == NULL && proc_cs_id != NULL) { error = flow_divert_packet_append_tlv(connect_packet, (is_effective ? FLOW_DIVERT_TLV_SIGNING_ID : FLOW_DIVERT_TLV_APP_REAL_SIGNING_ID), - strlen(proc_cs_id), + (uint32_t)strlen(proc_cs_id), proc_cs_id); if (error != 0) { FDLOG(LOG_ERR, fd_cb, "failed to append the signing ID: %d", error); @@ -1008,18 +1014,18 @@ flow_divert_add_proc_info(struct flow_divert_pcb *fd_cb, proc_t proc, const char } } - cdhash_error = proc_getcdhash(proc, cdhash); - if (cdhash_error == 0) { + cdhash = cs_get_cdhash(proc); + if (cdhash != NULL) { error = flow_divert_packet_append_tlv(connect_packet, (is_effective ? FLOW_DIVERT_TLV_CDHASH : FLOW_DIVERT_TLV_APP_REAL_CDHASH), - sizeof(cdhash), + SHA1_RESULTLEN, cdhash); if (error) { FDLOG(LOG_ERR, fd_cb, "failed to append the cdhash: %d", error); goto done; } } else { - FDLOG(LOG_ERR, fd_cb, "failed to get the cdhash: %d", cdhash_error); + FDLOG0(LOG_ERR, fd_cb, "failed to get the cdhash"); } task_t task = proc_task(proc); @@ -1169,14 +1175,13 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr const void *cfil_id = NULL; size_t cfil_id_size = 0; struct inpcb *inp = sotoinpcb(so); - struct ifnet *ifp = NULL; + struct ifnet *ifp = NULL; error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_CONNECT, &connect_packet); if (error) { goto done; } - if (fd_cb->connect_token != NULL && (fd_cb->flags & FLOW_DIVERT_HAS_HMAC)) { uint32_t sid_size = 0; int find_error = flow_divert_packet_get_tlv(fd_cb->connect_token, 0, FLOW_DIVERT_TLV_SIGNING_ID, 0, NULL, &sid_size); @@ -1235,47 +1240,24 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr mbuf_pkthdr_adjustlen(connect_packet, token_len); fd_cb->connect_token = NULL; } else { - uint32_t ctl_unit = htonl(fd_cb->control_group_unit); - - error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_CTL_UNIT, sizeof(ctl_unit), &ctl_unit); - if (error) { - goto done; - } - error = flow_divert_append_target_endpoint_tlv(connect_packet, to); if (error) { goto done; } } - if (fd_cb->local_address != NULL) { - error = EALREADY; - goto done; - } else { - if (flow_divert_has_pcb_local_address(inp)) { - error = flow_divert_inp_to_sockaddr(inp, &fd_cb->local_address); - if (error) { - FDLOG0(LOG_ERR, fd_cb, "failed to get the local socket address."); - goto done; - } - } - } - - if (fd_cb->local_address != NULL) { - /* socket is bound. */ - error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_LOCAL_ADDR, - fd_cb->local_address->sa_len, fd_cb->local_address); + if (fd_cb->local_endpoint.sa.sa_family == AF_INET || fd_cb->local_endpoint.sa.sa_family == AF_INET6) { + error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_LOCAL_ADDR, fd_cb->local_endpoint.sa.sa_len, &(fd_cb->local_endpoint.sa)); if (error) { goto done; } } - if ((inp->inp_flags | INP_BOUND_IF) && inp->inp_boundifp != NULL) { - ifp = inp->inp_boundifp; - } else if (inp->inp_last_outifp != NULL) { + if (inp->inp_vflag & INP_IPV4) { ifp = inp->inp_last_outifp; + } else if (inp->inp_vflag & INP_IPV6) { + ifp = inp->in6p_last_outifp; } - if (ifp != NULL) { uint32_t flow_if_index = ifp->if_index; error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_OUT_IF_INDEX, @@ -1293,7 +1275,12 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr } } - cfil_sock_id = cfil_sock_id_from_socket(so); + if (SOCK_TYPE(so) == SOCK_DGRAM) { + cfil_sock_id = cfil_sock_id_from_datagram_socket(so, NULL, to); + } else { + cfil_sock_id = cfil_sock_id_from_socket(so); + } + if (cfil_sock_id != CFIL_SOCK_ID_NONE) { cfil_id = &cfil_sock_id; cfil_id_size = sizeof(cfil_sock_id); @@ -1303,7 +1290,7 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr } if (cfil_id != NULL && cfil_id_size > 0 && cfil_id_size <= sizeof(uuid_t)) { - error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_CFIL_ID, cfil_id_size, cfil_id); + error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_CFIL_ID, (uint32_t)cfil_id_size, cfil_id); if (error) { goto done; } @@ -1319,6 +1306,38 @@ done: return error; } +static int +flow_divert_send_connect_packet(struct flow_divert_pcb *fd_cb) +{ + int error = 0; + mbuf_t connect_packet = fd_cb->connect_packet; + mbuf_t saved_connect_packet = NULL; + + if (connect_packet != NULL) { + error = mbuf_copym(connect_packet, 0, mbuf_pkthdr_len(connect_packet), MBUF_DONTWAIT, &saved_connect_packet); + if (error) { + FDLOG0(LOG_ERR, fd_cb, "Failed to copy the connect packet"); + goto done; + } + + error = flow_divert_send_packet(fd_cb, connect_packet, TRUE); + if (error) { + goto done; + } + + fd_cb->connect_packet = saved_connect_packet; + saved_connect_packet = NULL; + } else { + error = ENOENT; + } +done: + if (saved_connect_packet != NULL) { + mbuf_freem(saved_connect_packet); + } + + return error; +} + static int flow_divert_send_connect_result(struct flow_divert_pcb *fd_cb) { @@ -1476,10 +1495,12 @@ flow_divert_send_data_packet(struct flow_divert_pcb *fd_cb, mbuf_t data, size_t } } - if (data_len > 0 && data != NULL) { + if (data_len > 0 && data_len <= INT_MAX && data != NULL) { last = m_last(packet); mbuf_setnext(last, data); - mbuf_pkthdr_adjustlen(packet, data_len); + mbuf_pkthdr_adjustlen(packet, (int)data_len); + } else { + data_len = 0; } error = flow_divert_send_packet(fd_cb, packet, force); if (error == 0 && data_len > 0) { @@ -1539,13 +1560,15 @@ flow_divert_send_buffered_data(struct flow_divert_pcb *fd_cb, Boolean force) error = flow_divert_send_data_packet(fd_cb, data, data_len, NULL, force); if (error) { - mbuf_freem(data); + if (data != NULL) { + mbuf_freem(data); + } break; } sent += data_len; } - sbdrop(&fd_cb->so->so_snd, sent); + sbdrop(&fd_cb->so->so_snd, (int)sent); sowwakeup(fd_cb->so); } else if (SOCK_TYPE(fd_cb->so) == SOCK_DGRAM) { mbuf_t data; @@ -1583,7 +1606,9 @@ flow_divert_send_buffered_data(struct flow_divert_pcb *fd_cb, Boolean force) } error = flow_divert_send_data_packet(fd_cb, data, data_len, toaddr, force); if (error) { - mbuf_freem(data); + if (data != NULL) { + mbuf_freem(data); + } break; } sent += data_len; @@ -1664,6 +1689,7 @@ flow_divert_send_app_data(struct flow_divert_pcb *fd_cb, mbuf_t data, struct soc fd_cb->so->so_snd.sb_cc, fd_cb->send_window); } } else { + mbuf_freem(pkt_data); error = ENOBUFS; } } @@ -1675,6 +1701,7 @@ flow_divert_send_app_data(struct flow_divert_pcb *fd_cb, mbuf_t data, struct soc fd_cb->so->so_snd.sb_cc, fd_cb->send_window); } } else { + mbuf_freem(remaining_data); error = ENOBUFS; } } @@ -1683,6 +1710,9 @@ flow_divert_send_app_data(struct flow_divert_pcb *fd_cb, mbuf_t data, struct soc error = flow_divert_send_data_packet(fd_cb, data, to_send, toaddr, FALSE); if (error) { FDLOG(LOG_ERR, fd_cb, "flow_divert_send_data_packet failed. send data size = %lu", to_send); + if (data != NULL) { + mbuf_freem(data); + } } else { fd_cb->send_window -= to_send; } @@ -1704,6 +1734,9 @@ flow_divert_send_app_data(struct flow_divert_pcb *fd_cb, mbuf_t data, struct soc } } } else { + if (data != NULL) { + mbuf_freem(data); + } error = ENOBUFS; } } @@ -1713,11 +1746,10 @@ flow_divert_send_app_data(struct flow_divert_pcb *fd_cb, mbuf_t data, struct soc } static int -flow_divert_send_read_notification(struct flow_divert_pcb *fd_cb, uint32_t read_count) +flow_divert_send_read_notification(struct flow_divert_pcb *fd_cb) { - int error = 0; - mbuf_t packet = NULL; - uint32_t net_read_count = htonl(read_count); + int error = 0; + mbuf_t packet = NULL; error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_READ_NOTIFY, &packet); if (error) { @@ -1725,12 +1757,6 @@ flow_divert_send_read_notification(struct flow_divert_pcb *fd_cb, uint32_t read_ goto done; } - error = flow_divert_packet_append_tlv(packet, FLOW_DIVERT_TLV_READ_COUNT, sizeof(net_read_count), &net_read_count); - if (error) { - FDLOG(LOG_ERR, fd_cb, "failed to add the read count: %d", error); - goto done; - } - error = flow_divert_send_packet(fd_cb, packet, TRUE); if (error) { goto done; @@ -1775,21 +1801,344 @@ done: return error; } +static void +flow_divert_set_local_endpoint(struct flow_divert_pcb *fd_cb, struct sockaddr *local_endpoint, bool port_only) +{ + struct inpcb *inp = sotoinpcb(fd_cb->so); + + if (local_endpoint->sa_family == AF_INET6) { + if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) && !port_only) { + fd_cb->flags |= FLOW_DIVERT_DID_SET_LOCAL_ADDR; + inp->in6p_laddr = (satosin6(local_endpoint))->sin6_addr; + } + if (inp->inp_lport == 0) { + inp->inp_lport = (satosin6(local_endpoint))->sin6_port; + } + } else if (local_endpoint->sa_family == AF_INET) { + if (inp->inp_laddr.s_addr == INADDR_ANY && !port_only) { + fd_cb->flags |= FLOW_DIVERT_DID_SET_LOCAL_ADDR; + inp->inp_laddr = (satosin(local_endpoint))->sin_addr; + } + if (inp->inp_lport == 0) { + inp->inp_lport = (satosin(local_endpoint))->sin_port; + } + } +} + +static void +flow_divert_set_remote_endpoint(struct flow_divert_pcb *fd_cb, struct sockaddr *remote_endpoint) +{ + struct inpcb *inp = sotoinpcb(fd_cb->so); + + if (remote_endpoint->sa_family == AF_INET6) { + if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) { + inp->in6p_faddr = (satosin6(remote_endpoint))->sin6_addr; + } + if (inp->inp_fport == 0) { + inp->inp_fport = (satosin6(remote_endpoint))->sin6_port; + } + } else if (remote_endpoint->sa_family == AF_INET) { + if (inp->inp_laddr.s_addr == INADDR_ANY) { + inp->inp_faddr = (satosin(remote_endpoint))->sin_addr; + } + if (inp->inp_fport == 0) { + inp->inp_fport = (satosin(remote_endpoint))->sin_port; + } + } +} + +static uint32_t +flow_divert_derive_kernel_control_unit(uint32_t ctl_unit, uint32_t *aggregate_unit) +{ + if (aggregate_unit != NULL && *aggregate_unit != 0) { + uint32_t counter; + for (counter = 0; counter < (GROUP_COUNT_MAX - 1); counter++) { + if ((*aggregate_unit) & (1 << counter)) { + break; + } + } + if (counter < (GROUP_COUNT_MAX - 1)) { + *aggregate_unit &= ~(1 << counter); + return counter + 1; + } else { + return ctl_unit; + } + } else { + return ctl_unit; + } +} + +static int +flow_divert_try_next(struct flow_divert_pcb *fd_cb) +{ + uint32_t current_ctl_unit = 0; + uint32_t next_ctl_unit = 0; + struct flow_divert_group *current_group = NULL; + struct flow_divert_group *next_group = NULL; + int error = 0; + + next_ctl_unit = flow_divert_derive_kernel_control_unit(fd_cb->policy_control_unit, &(fd_cb->aggregate_unit)); + current_ctl_unit = fd_cb->control_group_unit; + + if (current_ctl_unit == next_ctl_unit) { + FDLOG0(LOG_NOTICE, fd_cb, "Next control unit is the same as the current control unit, disabling flow divert"); + error = EALREADY; + goto done; + } + + if (next_ctl_unit == 0 || next_ctl_unit >= GROUP_COUNT_MAX) { + FDLOG0(LOG_NOTICE, fd_cb, "No more valid control units, disabling flow divert"); + error = ENOENT; + goto done; + } + + if (g_flow_divert_groups == NULL || g_active_group_count == 0) { + FDLOG0(LOG_NOTICE, fd_cb, "No active groups, disabling flow divert"); + error = ENOENT; + goto done; + } + + next_group = g_flow_divert_groups[next_ctl_unit]; + if (next_group == NULL) { + FDLOG(LOG_NOTICE, fd_cb, "Group for control unit %u does not exist", next_ctl_unit); + error = ENOENT; + goto done; + } + + current_group = fd_cb->group; + + lck_rw_lock_exclusive(&(current_group->lck)); + lck_rw_lock_exclusive(&(next_group->lck)); + + FDLOG(LOG_NOTICE, fd_cb, "Moving from %u to %u", current_ctl_unit, next_ctl_unit); + + RB_REMOVE(fd_pcb_tree, &(current_group->pcb_tree), fd_cb); + if (RB_INSERT(fd_pcb_tree, &(next_group->pcb_tree), fd_cb) != NULL) { + panic("group with unit %u already contains a connection with hash %u", next_ctl_unit, fd_cb->hash); + } + + fd_cb->group = next_group; + fd_cb->control_group_unit = next_ctl_unit; + + lck_rw_done(&(next_group->lck)); + lck_rw_done(&(current_group->lck)); + + error = flow_divert_send_connect_packet(fd_cb); + if (error) { + FDLOG(LOG_NOTICE, fd_cb, "Failed to send the connect packet to %u, disabling flow divert", next_ctl_unit); + error = ENOENT; + goto done; + } + +done: + return error; +} + +static void +flow_divert_disable(struct flow_divert_pcb *fd_cb) +{ + struct socket *so = NULL; + mbuf_t buffer; + int error = 0; + proc_t last_proc = NULL; + struct sockaddr *remote_endpoint = fd_cb->original_remote_endpoint; + bool do_connect = !(fd_cb->flags & FLOW_DIVERT_IMPLICIT_CONNECT); + struct inpcb *inp = NULL; + + so = fd_cb->so; + if (so == NULL) { + goto done; + } + + FDLOG0(LOG_NOTICE, fd_cb, "Skipped all flow divert services, disabling flow divert"); + + /* Restore the IP state */ + inp = sotoinpcb(so); + inp->inp_vflag = fd_cb->original_vflag; + inp->inp_faddr.s_addr = INADDR_ANY; + inp->inp_fport = 0; + memset(&(inp->in6p_faddr), 0, sizeof(inp->in6p_faddr)); + inp->in6p_fport = 0; + /* If flow divert set the local address, clear it out */ + if (fd_cb->flags & FLOW_DIVERT_DID_SET_LOCAL_ADDR) { + inp->inp_laddr.s_addr = INADDR_ANY; + memset(&(inp->in6p_laddr), 0, sizeof(inp->in6p_laddr)); + } + inp->inp_last_outifp = fd_cb->original_last_outifp; + inp->in6p_last_outifp = fd_cb->original_last_outifp6; + + /* Dis-associate the socket */ + so->so_flags &= ~SOF_FLOW_DIVERT; + so->so_flags1 |= SOF1_FLOW_DIVERT_SKIP; + so->so_fd_pcb = NULL; + fd_cb->so = NULL; + + /* Remove from the group */ + flow_divert_pcb_remove(fd_cb); + + FDRELEASE(fd_cb); /* Release the socket's reference */ + + /* Revert back to the original protocol */ + so->so_proto = pffindproto(SOCK_DOM(so), SOCK_PROTO(so), SOCK_TYPE(so)); + + last_proc = proc_find(so->last_pid); + + if (do_connect) { + /* Connect using the original protocol */ + error = (*so->so_proto->pr_usrreqs->pru_connect)(so, remote_endpoint, (last_proc != NULL ? last_proc : current_proc())); + if (error) { + FDLOG(LOG_ERR, fd_cb, "Failed to connect using the socket's original protocol: %d", error); + goto done; + } + } + + buffer = so->so_snd.sb_mb; + if (buffer == NULL) { + /* No buffered data, done */ + goto done; + } + + /* Send any buffered data using the original protocol */ + if (SOCK_TYPE(so) == SOCK_STREAM) { + mbuf_t data_to_send = NULL; + size_t data_len = so->so_snd.sb_cc; + + error = mbuf_copym(buffer, 0, data_len, MBUF_DONTWAIT, &data_to_send); + if (error) { + FDLOG0(LOG_ERR, fd_cb, "Failed to copy the mbuf chain in the socket's send buffer"); + goto done; + } + + sbflush(&so->so_snd); + + if (data_to_send->m_flags & M_PKTHDR) { + mbuf_pkthdr_setlen(data_to_send, data_len); + } + + error = (*so->so_proto->pr_usrreqs->pru_send)(so, + 0, + data_to_send, + NULL, + NULL, + (last_proc != NULL ? last_proc : current_proc())); + + if (error) { + FDLOG(LOG_ERR, fd_cb, "Failed to send queued data using the socket's original protocol: %d", error); + } + } else if (SOCK_TYPE(so) == SOCK_DGRAM) { + struct sockbuf *sb = &so->so_snd; + MBUFQ_HEAD(send_queue_head) send_queue; + MBUFQ_INIT(&send_queue); + + /* Flush the send buffer, moving all records to a temporary queue */ + while (sb->sb_mb != NULL) { + mbuf_t record = sb->sb_mb; + mbuf_t m = record; + sb->sb_mb = sb->sb_mb->m_nextpkt; + while (m != NULL) { + sbfree(sb, m); + m = m->m_next; + } + record->m_nextpkt = NULL; + MBUFQ_ENQUEUE(&send_queue, record); + } + SB_EMPTY_FIXUP(sb); + + while (!MBUFQ_EMPTY(&send_queue)) { + mbuf_t next_record = MBUFQ_FIRST(&send_queue); + mbuf_t addr = NULL; + mbuf_t control = NULL; + mbuf_t last_control = NULL; + mbuf_t data = NULL; + mbuf_t m = next_record; + struct sockaddr *to_endpoint = NULL; + + MBUFQ_DEQUEUE(&send_queue, next_record); + + while (m != NULL) { + if (m->m_type == MT_SONAME) { + addr = m; + } else if (m->m_type == MT_CONTROL) { + if (control == NULL) { + control = m; + } + last_control = m; + } else if (m->m_type == MT_DATA) { + data = m; + break; + } + m = m->m_next; + } + + if (addr != NULL) { + to_endpoint = flow_divert_get_buffered_target_address(addr); + if (to_endpoint == NULL) { + FDLOG0(LOG_NOTICE, fd_cb, "Failed to get the remote address from the buffer"); + } + } + + if (data == NULL) { + FDLOG0(LOG_ERR, fd_cb, "Buffered record does not contain any data"); + mbuf_freem(next_record); + continue; + } + + if (!(data->m_flags & M_PKTHDR)) { + FDLOG0(LOG_ERR, fd_cb, "Buffered data does not have a packet header"); + mbuf_freem(next_record); + continue; + } + + if (addr != NULL) { + addr->m_next = NULL; + } + + if (last_control != NULL) { + last_control->m_next = NULL; + } + + error = (*so->so_proto->pr_usrreqs->pru_send)(so, + 0, + data, + to_endpoint, + control, + (last_proc != NULL ? last_proc : current_proc())); + + if (addr != NULL) { + mbuf_freem(addr); + } + + if (error) { + FDLOG(LOG_ERR, fd_cb, "Failed to send queued data using the socket's original protocol: %d", error); + } + } + } +done: + if (last_proc != NULL) { + proc_rele(last_proc); + } + + if (error) { + so->so_error = (uint16_t)error; + flow_divert_disconnect_socket(so); + } +} + static void flow_divert_handle_connect_result(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offset) { - uint32_t connect_error; + uint32_t connect_error = 0; uint32_t ctl_unit = 0; int error = 0; struct flow_divert_group *grp = NULL; - struct sockaddr_storage local_address; + union sockaddr_in_4_6 local_endpoint = {}; + union sockaddr_in_4_6 remote_endpoint = {}; int out_if_index = 0; - struct sockaddr_storage remote_address; uint32_t send_window; uint32_t app_data_length = 0; - memset(&local_address, 0, sizeof(local_address)); - memset(&remote_address, 0, sizeof(remote_address)); + memset(&local_endpoint, 0, sizeof(local_endpoint)); + memset(&remote_endpoint, 0, sizeof(remote_endpoint)); error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_ERROR_CODE, sizeof(connect_error), &connect_error, NULL); if (error) { @@ -1797,6 +2146,7 @@ flow_divert_handle_connect_result(struct flow_divert_pcb *fd_cb, mbuf_t packet, return; } + connect_error = ntohl(connect_error); FDLOG(LOG_INFO, fd_cb, "received connect result %u", connect_error); error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_SPACE_AVAILABLE, sizeof(send_window), &send_window, NULL); @@ -1810,12 +2160,12 @@ flow_divert_handle_connect_result(struct flow_divert_pcb *fd_cb, mbuf_t packet, FDLOG0(LOG_INFO, fd_cb, "No control unit provided in the connect result"); } - error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_LOCAL_ADDR, sizeof(local_address), &local_address, NULL); + error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_LOCAL_ADDR, sizeof(local_endpoint), &(local_endpoint.sa), NULL); if (error) { FDLOG0(LOG_INFO, fd_cb, "No local address provided"); } - error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_REMOTE_ADDR, sizeof(remote_address), &remote_address, NULL); + error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_REMOTE_ADDR, sizeof(remote_endpoint), &(remote_endpoint.sa), NULL); if (error) { FDLOG0(LOG_INFO, fd_cb, "No remote address provided"); } @@ -1831,7 +2181,6 @@ flow_divert_handle_connect_result(struct flow_divert_pcb *fd_cb, mbuf_t packet, } error = 0; - connect_error = ntohl(connect_error); ctl_unit = ntohl(ctl_unit); lck_rw_lock_shared(&g_flow_divert_group_lck); @@ -1856,64 +2205,38 @@ flow_divert_handle_connect_result(struct flow_divert_pcb *fd_cb, mbuf_t packet, struct inpcb *inp = NULL; struct ifnet *ifp = NULL; struct flow_divert_group *old_group; + struct socket *so = fd_cb->so; - socket_lock(fd_cb->so, 0); + socket_lock(so, 0); - if (!(fd_cb->so->so_state & SS_ISCONNECTING)) { + if (SOCK_TYPE(so) == SOCK_STREAM && !(so->so_state & SS_ISCONNECTING)) { + FDLOG0(LOG_ERR, fd_cb, "TCP socket is not in the connecting state, ignoring connect result"); goto done; } - inp = sotoinpcb(fd_cb->so); + inp = sotoinpcb(so); if (connect_error || error) { goto set_socket_state; } - if (local_address.ss_family == 0 && fd_cb->local_address == NULL) { - error = EINVAL; - goto set_socket_state; - } - if (local_address.ss_family != 0 && fd_cb->local_address == NULL) { - if (local_address.ss_len > sizeof(local_address)) { - local_address.ss_len = sizeof(local_address); - } - fd_cb->local_address = dup_sockaddr((struct sockaddr *)&local_address, 1); - } - if (flow_divert_is_sockaddr_valid((struct sockaddr *)&local_address)) { - if (inp->inp_vflag & INP_IPV4 && local_address.ss_family == AF_INET) { - struct sockaddr_in *local_in_address = (struct sockaddr_in *)&local_address; - inp->inp_lport = local_in_address->sin_port; - memcpy(&inp->inp_laddr, &local_in_address->sin_addr, sizeof(struct in_addr)); - } else if (inp->inp_vflag & INP_IPV6 && local_address.ss_family == AF_INET6) { - struct sockaddr_in6 *local_in6_address = (struct sockaddr_in6 *)&local_address; - inp->inp_lport = local_in6_address->sin6_port; - memcpy(&inp->in6p_laddr, &local_in6_address->sin6_addr, sizeof(struct in6_addr)); + if (flow_divert_is_sockaddr_valid(&(local_endpoint.sa))) { + if (local_endpoint.sa.sa_family == AF_INET) { + local_endpoint.sa.sa_len = sizeof(struct sockaddr_in); + } else if (local_endpoint.sa.sa_family == AF_INET6) { + local_endpoint.sa.sa_len = sizeof(struct sockaddr_in6); } + fd_cb->local_endpoint = local_endpoint; + flow_divert_set_local_endpoint(fd_cb, &(local_endpoint.sa), (SOCK_TYPE(so) == SOCK_DGRAM)); } - if (remote_address.ss_family != 0) { - if (fd_cb->remote_address != NULL) { - FREE(fd_cb->remote_address, M_SONAME); - fd_cb->remote_address = NULL; - } - if (remote_address.ss_len > sizeof(remote_address)) { - remote_address.ss_len = sizeof(remote_address); + if (flow_divert_is_sockaddr_valid(&(remote_endpoint.sa)) && SOCK_TYPE(so) == SOCK_STREAM) { + if (remote_endpoint.sa.sa_family == AF_INET) { + remote_endpoint.sa.sa_len = sizeof(struct sockaddr_in); + } else if (remote_endpoint.sa.sa_family == AF_INET6) { + remote_endpoint.sa.sa_len = sizeof(struct sockaddr_in6); } - fd_cb->remote_address = dup_sockaddr((struct sockaddr *)&remote_address, 1); - if (flow_divert_is_sockaddr_valid((struct sockaddr *)&remote_address)) { - if (inp->inp_vflag & INP_IPV4 && remote_address.ss_family == AF_INET) { - struct sockaddr_in *remote_in_address = (struct sockaddr_in *)&remote_address; - inp->inp_fport = remote_in_address->sin_port; - memcpy(&inp->inp_faddr, &remote_in_address->sin_addr, sizeof(struct in_addr)); - } else if (inp->inp_vflag & INP_IPV6 && remote_address.ss_family == AF_INET6) { - struct sockaddr_in6 *remote_in6_address = (struct sockaddr_in6 *)&remote_address; - inp->inp_fport = remote_in6_address->sin6_port; - memcpy(&inp->in6p_faddr, &remote_in6_address->sin6_addr, sizeof(struct in6_addr)); - } - } - } else { - error = EINVAL; - goto set_socket_state; + flow_divert_set_remote_endpoint(fd_cb, &(remote_endpoint.sa)); } if (app_data_length > 0) { @@ -1943,7 +2266,11 @@ flow_divert_handle_connect_result(struct flow_divert_pcb *fd_cb, mbuf_t packet, } if (ifp != NULL) { - inp->inp_last_outifp = ifp; + if (inp->inp_vflag & INP_IPV4) { + inp->inp_last_outifp = ifp; + } else if (inp->inp_vflag & INP_IPV6) { + inp->in6p_last_outifp = ifp; + } } else { error = EINVAL; } @@ -1984,31 +2311,50 @@ set_socket_state: } if (connect_error || error) { + if (connect_error && fd_cb->control_group_unit != fd_cb->policy_control_unit) { + error = flow_divert_try_next(fd_cb); + if (error) { + flow_divert_disable(fd_cb); + } + goto done; + } + if (!connect_error) { flow_divert_update_closed_state(fd_cb, SHUT_RDWR, FALSE); - fd_cb->so->so_error = error; + so->so_error = (uint16_t)error; flow_divert_send_close_if_needed(fd_cb); } else { flow_divert_update_closed_state(fd_cb, SHUT_RDWR, TRUE); - fd_cb->so->so_error = connect_error; + so->so_error = (uint16_t)connect_error; } - flow_divert_disconnect_socket(fd_cb->so); + flow_divert_disconnect_socket(so); } else { #if NECP /* Update NECP client with connected five-tuple */ if (!uuid_is_null(inp->necp_client_uuid)) { - socket_unlock(fd_cb->so, 0); - necp_client_assign_from_socket(fd_cb->so->last_pid, inp->necp_client_uuid, inp); - socket_lock(fd_cb->so, 0); + socket_unlock(so, 0); + necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp); + socket_lock(so, 0); } #endif /* NECP */ flow_divert_send_buffered_data(fd_cb, FALSE); - soisconnected(fd_cb->so); + soisconnected(so); } + /* We don't need the connect packet any more */ + if (fd_cb->connect_packet != NULL) { + mbuf_freem(fd_cb->connect_packet); + fd_cb->connect_packet = NULL; + } + + /* We don't need the original remote endpoint any more */ + if (fd_cb->original_remote_endpoint != NULL) { + FREE(fd_cb->original_remote_endpoint, M_SONAME); + fd_cb->original_remote_endpoint = NULL; + } done: - socket_unlock(fd_cb->so, 0); + socket_unlock(so, 0); } FDUNLOCK(fd_cb); @@ -2042,7 +2388,7 @@ flow_divert_handle_close(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offse if (fd_cb->so != NULL) { socket_lock(fd_cb->so, 0); - fd_cb->so->so_error = ntohl(close_error); + fd_cb->so->so_error = (uint16_t)ntohl(close_error); flow_divert_update_closed_state(fd_cb, how, TRUE); @@ -2061,52 +2407,59 @@ flow_divert_handle_close(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offse } static mbuf_t -flow_divert_get_control_mbuf(struct flow_divert_pcb *fd_cb) +flow_divert_create_control_mbuf(struct flow_divert_pcb *fd_cb) { struct inpcb *inp = sotoinpcb(fd_cb->so); - if ((inp->inp_vflag & INP_IPV4) && (inp->inp_flags & INP_RECVDSTADDR)) { - struct in_addr ia = { }; - - if (fd_cb->local_address != NULL && fd_cb->local_address->sa_family == AF_INET && fd_cb->local_address->sa_len >= sizeof(struct sockaddr_in)) { - struct sockaddr_in *sin = (struct sockaddr_in *)(void *)fd_cb->local_address; - bcopy(&sin->sin_addr, &ia, sizeof(struct in_addr)); - } - - return sbcreatecontrol((caddr_t)&ia, sizeof(ia), IP_RECVDSTADDR, IPPROTO_IP); - } else if ((inp->inp_vflag & INP_IPV6) && (inp->inp_flags & IN6P_PKTINFO)) { + bool is_cfil_enabled = false; +#if CONTENT_FILTER + /* Content Filter needs to see the local address */ + is_cfil_enabled = (inp->inp_socket && inp->inp_socket->so_cfil_db != NULL); +#endif + if ((inp->inp_vflag & INP_IPV4) && + fd_cb->local_endpoint.sa.sa_family == AF_INET && + ((inp->inp_flags & INP_RECVDSTADDR) || is_cfil_enabled)) { + return sbcreatecontrol((caddr_t)&(fd_cb->local_endpoint.sin.sin_addr), sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); + } else if ((inp->inp_vflag & INP_IPV6) && + fd_cb->local_endpoint.sa.sa_family == AF_INET6 && + ((inp->inp_flags & IN6P_PKTINFO) || is_cfil_enabled)) { struct in6_pktinfo pi6; memset(&pi6, 0, sizeof(pi6)); - - if (fd_cb->local_address != NULL && fd_cb->local_address->sa_family == AF_INET6 && fd_cb->local_address->sa_len >= sizeof(struct sockaddr_in6)) { - struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)(void *)fd_cb->local_address; - bcopy(&sin6->sin6_addr, &pi6.ipi6_addr, sizeof(struct in6_addr)); - pi6.ipi6_ifindex = 0; - } + pi6.ipi6_addr = fd_cb->local_endpoint.sin6.sin6_addr; return sbcreatecontrol((caddr_t)&pi6, sizeof(pi6), IPV6_PKTINFO, IPPROTO_IPV6); } return NULL; } -static void +static int flow_divert_handle_data(struct flow_divert_pcb *fd_cb, mbuf_t packet, size_t offset) { + int error = 0; + FDLOCK(fd_cb); if (fd_cb->so != NULL) { - int error = 0; mbuf_t data = NULL; size_t data_size; struct sockaddr_storage remote_address; boolean_t got_remote_sa = FALSE; + boolean_t appended = FALSE; + boolean_t append_success = FALSE; socket_lock(fd_cb->so, 0); + if (sbspace(&fd_cb->so->so_rcv) == 0) { + error = ENOBUFS; + fd_cb->flags |= FLOW_DIVERT_NOTIFY_ON_RECEIVED; + FDLOG0(LOG_INFO, fd_cb, "Receive buffer is full, will send read notification when app reads some data"); + goto done; + } + if (SOCK_TYPE(fd_cb->so) == SOCK_DGRAM) { uint32_t val_size = 0; /* check if we got remote address with data */ memset(&remote_address, 0, sizeof(remote_address)); - error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_REMOTE_ADDR, sizeof(remote_address), &remote_address, &val_size); + error = flow_divert_packet_get_tlv(packet, (int)offset, FLOW_DIVERT_TLV_REMOTE_ADDR, sizeof(remote_address), &remote_address, &val_size); if (error || val_size > sizeof(remote_address)) { FDLOG0(LOG_INFO, fd_cb, "No remote address provided"); error = 0; @@ -2117,6 +2470,8 @@ flow_divert_handle_data(struct flow_divert_pcb *fd_cb, mbuf_t packet, size_t off /* validate the address */ if (flow_divert_is_sockaddr_valid((struct sockaddr *)&remote_address)) { got_remote_sa = TRUE; + } else { + FDLOG0(LOG_INFO, fd_cb, "Remote address is invalid"); } offset += (sizeof(uint8_t) + sizeof(uint32_t) + val_size); } @@ -2124,63 +2479,72 @@ flow_divert_handle_data(struct flow_divert_pcb *fd_cb, mbuf_t packet, size_t off data_size = (mbuf_pkthdr_len(packet) - offset); + if (fd_cb->so->so_state & SS_CANTRCVMORE) { + FDLOG(LOG_NOTICE, fd_cb, "app cannot receive any more data, dropping %lu bytes of data", data_size); + goto done; + } + + if (SOCK_TYPE(fd_cb->so) != SOCK_STREAM && SOCK_TYPE(fd_cb->so) != SOCK_DGRAM) { + FDLOG(LOG_ERR, fd_cb, "socket has an unsupported type: %d", SOCK_TYPE(fd_cb->so)); + goto done; + } + FDLOG(LOG_DEBUG, fd_cb, "received %lu bytes of data", data_size); error = mbuf_split(packet, offset, MBUF_DONTWAIT, &data); if (error || data == NULL) { FDLOG(LOG_ERR, fd_cb, "mbuf_split failed: %d", error); + goto done; + } + + if (SOCK_TYPE(fd_cb->so) == SOCK_STREAM) { + appended = (sbappendstream(&fd_cb->so->so_rcv, data) != 0); + append_success = TRUE; } else { - if (flow_divert_check_no_cellular(fd_cb) || - flow_divert_check_no_expensive(fd_cb) || - flow_divert_check_no_constrained(fd_cb)) { - flow_divert_update_closed_state(fd_cb, SHUT_RDWR, TRUE); - flow_divert_send_close(fd_cb, SHUT_RDWR); - flow_divert_disconnect_socket(fd_cb->so); - } else if (!(fd_cb->so->so_state & SS_CANTRCVMORE)) { - if (SOCK_TYPE(fd_cb->so) == SOCK_STREAM) { - int appended = sbappendstream(&fd_cb->so->so_rcv, data); - fd_cb->bytes_received += data_size; - flow_divert_add_data_statistics(fd_cb, data_size, FALSE); - fd_cb->sb_size += data_size; - if (appended) { - sorwakeup(fd_cb->so); - } - data = NULL; - } else if (SOCK_TYPE(fd_cb->so) == SOCK_DGRAM) { - struct sockaddr *append_sa; - mbuf_t mctl; - - if (got_remote_sa == TRUE) { - error = flow_divert_dup_addr(fd_cb->so->so_proto->pr_domain->dom_family, - (struct sockaddr *)&remote_address, &append_sa); - } else { - error = flow_divert_dup_addr(fd_cb->so->so_proto->pr_domain->dom_family, - fd_cb->remote_address, &append_sa); - } - if (error) { - FDLOG0(LOG_ERR, fd_cb, "failed to dup the socket address."); - } + struct sockaddr *append_sa = NULL; + mbuf_t mctl; - mctl = flow_divert_get_control_mbuf(fd_cb); - int append_error = 0; - if (sbappendaddr(&fd_cb->so->so_rcv, append_sa, data, mctl, &append_error) || append_error == EJUSTRETURN) { - fd_cb->bytes_received += data_size; - flow_divert_add_data_statistics(fd_cb, data_size, FALSE); - fd_cb->sb_size += data_size; - if (append_error == 0) { - sorwakeup(fd_cb->so); - } - data = NULL; - } - if (!error) { - FREE(append_sa, M_TEMP); - } + if (got_remote_sa == TRUE) { + error = flow_divert_dup_addr(remote_address.ss_family, (struct sockaddr *)&remote_address, &append_sa); + } else { + if (fd_cb->so->so_proto->pr_domain->dom_family == AF_INET6) { + error = in6_mapped_peeraddr(fd_cb->so, &append_sa); + } else { + error = in_getpeeraddr(fd_cb->so, &append_sa); } } + if (error) { + FDLOG0(LOG_ERR, fd_cb, "failed to dup the socket address."); + } + + mctl = flow_divert_create_control_mbuf(fd_cb); + int append_error = 0; + if (sbappendaddr(&fd_cb->so->so_rcv, append_sa, data, mctl, &append_error) || append_error == EJUSTRETURN) { + append_success = TRUE; + appended = (append_error == 0); + } else { + FDLOG(LOG_ERR, fd_cb, "failed to append %lu bytes of data: %d", data_size, append_error); + } + + if (append_sa != NULL) { + FREE(append_sa, M_SONAME); + } + } + + if (append_success) { + fd_cb->bytes_received += data_size; + flow_divert_add_data_statistics(fd_cb, data_size, FALSE); } + + if (appended) { + sorwakeup(fd_cb->so); + } +done: socket_unlock(fd_cb->so, 0); } FDUNLOCK(fd_cb); + + return error; } static void @@ -2228,7 +2592,7 @@ flow_divert_handle_group_init(struct flow_divert_group *group, mbuf_t packet, in error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_LOG_LEVEL, sizeof(log_level), &log_level, NULL); if (!error) { - nil_pcb.log_level = log_level; + nil_pcb.log_level = (uint8_t)log_level; } lck_rw_lock_exclusive(&group->lck); @@ -2262,26 +2626,11 @@ static void flow_divert_handle_properties_update(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offset) { int error = 0; - struct sockaddr_storage local_address; int out_if_index = 0; - struct sockaddr_storage remote_address; uint32_t app_data_length = 0; FDLOG0(LOG_INFO, fd_cb, "received a properties update"); - memset(&local_address, 0, sizeof(local_address)); - memset(&remote_address, 0, sizeof(remote_address)); - - error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_LOCAL_ADDR, sizeof(local_address), &local_address, NULL); - if (error) { - FDLOG0(LOG_INFO, fd_cb, "No local address provided in properties update"); - } - - error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_REMOTE_ADDR, sizeof(remote_address), &remote_address, NULL); - if (error) { - FDLOG0(LOG_INFO, fd_cb, "No remote address provided in properties update"); - } - error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_OUT_IF_INDEX, sizeof(out_if_index), &out_if_index, NULL); if (error) { FDLOG0(LOG_INFO, fd_cb, "No output if index provided in properties update"); @@ -2296,28 +2645,6 @@ flow_divert_handle_properties_update(struct flow_divert_pcb *fd_cb, mbuf_t packe if (fd_cb->so != NULL) { socket_lock(fd_cb->so, 0); - if (local_address.ss_family != 0) { - if (local_address.ss_len > sizeof(local_address)) { - local_address.ss_len = sizeof(local_address); - } - if (fd_cb->local_address != NULL) { - FREE(fd_cb->local_address, M_SONAME); - fd_cb->local_address = NULL; - } - fd_cb->local_address = dup_sockaddr((struct sockaddr *)&local_address, 1); - } - - if (remote_address.ss_family != 0) { - if (remote_address.ss_len > sizeof(remote_address)) { - remote_address.ss_len = sizeof(remote_address); - } - if (fd_cb->remote_address != NULL) { - FREE(fd_cb->remote_address, M_SONAME); - fd_cb->remote_address = NULL; - } - fd_cb->remote_address = dup_sockaddr((struct sockaddr *)&remote_address, 1); - } - if (out_if_index > 0) { struct inpcb *inp = NULL; struct ifnet *ifp = NULL; @@ -2330,7 +2657,11 @@ flow_divert_handle_properties_update(struct flow_divert_pcb *fd_cb, mbuf_t packe } if (ifp != NULL) { - inp->inp_last_outifp = ifp; + if (inp->inp_vflag & INP_IPV4) { + inp->inp_last_outifp = ifp; + } else if (inp->inp_vflag & INP_IPV6) { + inp->in6p_last_outifp = ifp; + } } ifnet_head_done(); } @@ -2365,14 +2696,17 @@ flow_divert_handle_app_map_create(struct flow_divert_group *group, mbuf_t packet { size_t bytes_mem_size; size_t child_maps_mem_size; + size_t nodes_mem_size; + size_t trie_memory_size = 0; int cursor; int error = 0; struct flow_divert_trie new_trie; int insert_error = 0; - size_t nodes_mem_size; int prefix_count = -1; int signing_id_count = 0; - size_t trie_memory_size = 0; + size_t bytes_count = 0; + size_t nodes_count = 0; + size_t maps_count = 0; lck_rw_lock_exclusive(&group->lck); @@ -2405,24 +2739,44 @@ flow_divert_handle_app_map_create(struct flow_divert_group *group, mbuf_t packet signing_id_count = 0; break; } - new_trie.bytes_count += sid_size; + if (os_add_overflow(bytes_count, sid_size, &bytes_count)) { + FDLOG0(LOG_ERR, &nil_pcb, "Overflow while incrementing number of bytes"); + signing_id_count = 0; + break; + } signing_id_count++; } if (signing_id_count == 0) { lck_rw_done(&group->lck); + FDLOG0(LOG_NOTICE, &nil_pcb, "No signing identifiers"); return; } - new_trie.nodes_count = (prefix_count + signing_id_count + 1); /* + 1 for the root node */ - new_trie.child_maps_count = (prefix_count + 1); /* + 1 for the root node */ + if (os_add3_overflow(prefix_count, signing_id_count, 1, &nodes_count)) { /* + 1 for the root node */ + lck_rw_done(&group->lck); + FDLOG0(LOG_ERR, &nil_pcb, "Overflow while computing the number of nodes"); + return; + } + + if (os_add_overflow(prefix_count, 1, &maps_count)) { /* + 1 for the root node */ + lck_rw_done(&group->lck); + FDLOG0(LOG_ERR, &nil_pcb, "Overflow while computing the number of maps"); + return; + } + + if (bytes_count > UINT16_MAX || nodes_count > UINT16_MAX || maps_count > UINT16_MAX) { + lck_rw_done(&group->lck); + FDLOG(LOG_NOTICE, &nil_pcb, "Invalid bytes count (%lu), nodes count (%lu) or maps count (%lu)", bytes_count, nodes_count, maps_count); + return; + } FDLOG(LOG_INFO, &nil_pcb, "Nodes count = %lu, child maps count = %lu, bytes_count = %lu", - new_trie.nodes_count, new_trie.child_maps_count, new_trie.bytes_count); + nodes_count, maps_count, bytes_count); - if (os_mul_overflow(sizeof(*new_trie.nodes), new_trie.nodes_count, &nodes_mem_size) || - os_mul3_overflow(sizeof(*new_trie.child_maps), CHILD_MAP_SIZE, new_trie.child_maps_count, &child_maps_mem_size) || - os_mul_overflow(sizeof(*new_trie.bytes), new_trie.bytes_count, &bytes_mem_size) || + if (os_mul_overflow(sizeof(*new_trie.nodes), (size_t)nodes_count, &nodes_mem_size) || + os_mul3_overflow(sizeof(*new_trie.child_maps), CHILD_MAP_SIZE, (size_t)maps_count, &child_maps_mem_size) || + os_mul_overflow(sizeof(*new_trie.bytes), (size_t)bytes_count, &bytes_mem_size) || os_add3_overflow(nodes_mem_size, child_maps_mem_size, bytes_mem_size, &trie_memory_size)) { FDLOG0(LOG_ERR, &nil_pcb, "Overflow while computing trie memory sizes"); lck_rw_done(&group->lck); @@ -2443,6 +2797,10 @@ flow_divert_handle_app_map_create(struct flow_divert_group *group, mbuf_t packet return; } + new_trie.bytes_count = (uint16_t)bytes_count; + new_trie.nodes_count = (uint16_t)nodes_count; + new_trie.child_maps_count = (uint16_t)maps_count; + /* Initialize the free lists */ new_trie.nodes = (struct flow_divert_trie_node *)new_trie.memory; new_trie.nodes_free_next = 0; @@ -2470,7 +2828,7 @@ flow_divert_handle_app_map_create(struct flow_divert_group *group, mbuf_t packet insert_error = EINVAL; break; } - if (new_trie.bytes_free_next + sid_size <= new_trie.bytes_count) { + if (sid_size <= UINT16_MAX && new_trie.bytes_free_next + (uint16_t)sid_size <= new_trie.bytes_count) { uint16_t new_node_idx; error = flow_divert_packet_get_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, sid_size, &TRIE_BYTE(&new_trie, new_trie.bytes_free_next), NULL); if (error) { @@ -2558,7 +2916,7 @@ flow_divert_input(mbuf_t packet, struct flow_divert_group *group) flow_divert_handle_close(fd_cb, packet, sizeof(hdr)); break; case FLOW_DIVERT_PKT_DATA: - flow_divert_handle_data(fd_cb, packet, sizeof(hdr)); + error = flow_divert_handle_data(fd_cb, packet, sizeof(hdr)); break; case FLOW_DIVERT_PKT_READ_NOTIFY: flow_divert_handle_read_notification(fd_cb, packet, sizeof(hdr)); @@ -2700,26 +3058,17 @@ flow_divert_shutdown(struct socket *so) static int flow_divert_rcvd(struct socket *so, int flags __unused) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; - uint32_t latest_sb_size; - uint32_t read_count; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + int space = sbspace(&so->so_rcv); VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); - latest_sb_size = fd_cb->so->so_rcv.sb_cc; - - if (fd_cb->sb_size < latest_sb_size) { - panic("flow divert rcvd event handler (%u): saved rcv buffer size (%u) is less than latest rcv buffer size (%u)", - fd_cb->hash, fd_cb->sb_size, latest_sb_size); - } - - read_count = fd_cb->sb_size - latest_sb_size; - - FDLOG(LOG_DEBUG, fd_cb, "app read %u bytes", read_count); - - if (read_count > 0 && flow_divert_send_read_notification(fd_cb, read_count) == 0) { - fd_cb->bytes_read_by_app += read_count; - fd_cb->sb_size = latest_sb_size; + FDLOG(LOG_DEBUG, fd_cb, "app read bytes, space = %d", space); + if ((fd_cb->flags & FLOW_DIVERT_NOTIFY_ON_RECEIVED) && + (space > 0) && + flow_divert_send_read_notification(fd_cb) == 0) { + FDLOG0(LOG_INFO, fd_cb, "Sent a read notification"); + fd_cb->flags &= ~FLOW_DIVERT_NOTIFY_ON_RECEIVED; } return 0; @@ -2744,12 +3093,9 @@ flow_divert_append_target_endpoint_tlv(mbuf_t connect_packet, struct sockaddr *t if (toaddr->sa_family == AF_INET) { port = ntohs((satosin(toaddr))->sin_port); - } -#if INET6 - else { + } else { port = ntohs((satosin6(toaddr))->sin6_port); } -#endif error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_TARGET_PORT, sizeof(port), &port); if (error) { @@ -2781,54 +3127,17 @@ flow_divert_is_sockaddr_valid(struct sockaddr *addr) return FALSE; } break; -#if INET6 case AF_INET6: if (addr->sa_len < sizeof(struct sockaddr_in6)) { return FALSE; } break; -#endif /* INET6 */ default: return FALSE; } return TRUE; } -static errno_t -flow_divert_inp_to_sockaddr(const struct inpcb *inp, struct sockaddr **local_socket) -{ - int error = 0; - union sockaddr_in_4_6 sin46; - - bzero(&sin46, sizeof(sin46)); - if (inp->inp_vflag & INP_IPV4) { - struct sockaddr_in *sin = &sin46.sin; - - sin->sin_family = AF_INET; - sin->sin_len = sizeof(*sin); - sin->sin_port = inp->inp_lport; - sin->sin_addr = inp->inp_laddr; - } else if (inp->inp_vflag & INP_IPV6) { - struct sockaddr_in6 *sin6 = &sin46.sin6; - - sin6->sin6_len = sizeof(*sin6); - sin6->sin6_family = AF_INET6; - sin6->sin6_port = inp->inp_lport; - sin6->sin6_addr = inp->in6p_laddr; - } - *local_socket = dup_sockaddr((struct sockaddr *)&sin46, 1); - if (*local_socket == NULL) { - error = ENOBUFS; - } - return error; -} - -static boolean_t -flow_divert_has_pcb_local_address(const struct inpcb *inp) -{ - return inp->inp_lport != 0; -} - static errno_t flow_divert_dup_addr(sa_family_t family, struct sockaddr *addr, struct sockaddr **dup) @@ -2844,13 +3153,9 @@ flow_divert_dup_addr(sa_family_t family, struct sockaddr *addr, ss.ss_family = family; if (ss.ss_family == AF_INET) { ss.ss_len = sizeof(struct sockaddr_in); - } -#if INET6 - else if (ss.ss_family == AF_INET6) { + } else if (ss.ss_family == AF_INET6) { ss.ss_len = sizeof(struct sockaddr_in6); - } -#endif /* INET6 */ - else { + } else { error = EINVAL; } result = (struct sockaddr *)&ss; @@ -2875,40 +3180,15 @@ flow_divert_disconnect_socket(struct socket *so) inp = sotoinpcb(so); if (inp != NULL) { -#if INET6 if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - } else -#endif /* INET6 */ - in_pcbdetach(inp); + } else { + in_pcbdetach(inp); + } } } } -static errno_t -flow_divert_getpeername(struct socket *so, struct sockaddr **sa) -{ - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; - - VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); - - return flow_divert_dup_addr(so->so_proto->pr_domain->dom_family, - fd_cb->remote_address, - sa); -} - -static errno_t -flow_divert_getsockaddr(struct socket *so, struct sockaddr **sa) -{ - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; - - VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); - - return flow_divert_dup_addr(so->so_proto->pr_domain->dom_family, - fd_cb->local_address, - sa); -} - static errno_t flow_divert_ctloutput(struct socket *so, struct sockopt *sopt) { @@ -2924,17 +3204,14 @@ flow_divert_ctloutput(struct socket *so, struct sockopt *sopt) if (SOCK_DOM(so) == PF_INET) { return g_tcp_protosw->pr_ctloutput(so, sopt); - } -#if INET6 - else if (SOCK_DOM(so) == PF_INET6) { + } else if (SOCK_DOM(so) == PF_INET6) { return g_tcp6_protosw->pr_ctloutput(so, sopt); } -#endif return 0; } -errno_t -flow_divert_connect_out(struct socket *so, struct sockaddr *to, proc_t p) +static errno_t +flow_divert_connect_out_internal(struct socket *so, struct sockaddr *to, proc_t p, bool implicit) { struct flow_divert_pcb *fd_cb = so->so_fd_pcb; int error = 0; @@ -2963,71 +3240,142 @@ flow_divert_connect_out(struct socket *so, struct sockaddr *to, proc_t p) goto done; } - if ((fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED) && !(fd_cb->flags & FLOW_DIVERT_TRANSFERRED)) { + if (fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED) { error = EALREADY; goto done; } - if (fd_cb->flags & FLOW_DIVERT_TRANSFERRED) { - FDLOG0(LOG_INFO, fd_cb, "fully transferred"); - fd_cb->flags &= ~FLOW_DIVERT_TRANSFERRED; - if (fd_cb->remote_address != NULL) { - soisconnected(fd_cb->so); - goto done; - } - } - FDLOG0(LOG_INFO, fd_cb, "Connecting"); if (fd_cb->connect_packet == NULL) { + struct sockaddr_in sin = {}; + struct ifnet *ifp = NULL; + if (to == NULL) { FDLOG0(LOG_ERR, fd_cb, "No destination address available when creating connect packet"); error = EINVAL; goto done; } + fd_cb->original_remote_endpoint = dup_sockaddr(to, 0); + if (fd_cb->original_remote_endpoint == NULL) { + FDLOG0(LOG_ERR, fd_cb, "Failed to dup the remote endpoint"); + error = ENOMEM; + goto done; + } + fd_cb->original_vflag = inp->inp_vflag; + fd_cb->original_last_outifp = inp->inp_last_outifp; + fd_cb->original_last_outifp6 = inp->in6p_last_outifp; + sinp = (struct sockaddr_in *)(void *)to; if (sinp->sin_family == AF_INET && IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) { error = EAFNOSUPPORT; goto done; } + if (to->sa_family == AF_INET6 && !(inp->inp_flags & IN6P_IPV6_V6ONLY)) { + struct sockaddr_in6 sin6 = {}; + sin6.sin6_family = AF_INET6; + sin6.sin6_len = sizeof(struct sockaddr_in6); + sin6.sin6_port = satosin6(to)->sin6_port; + sin6.sin6_addr = satosin6(to)->sin6_addr; + if (IN6_IS_ADDR_V4MAPPED(&(sin6.sin6_addr))) { + in6_sin6_2_sin(&sin, &sin6); + to = (struct sockaddr *)&sin; + } + } + + if (to->sa_family == AF_INET6) { + inp->inp_vflag &= ~INP_IPV4; + inp->inp_vflag |= INP_IPV6; + fd_cb->local_endpoint.sin6.sin6_len = sizeof(struct sockaddr_in6); + fd_cb->local_endpoint.sin6.sin6_family = AF_INET6; + fd_cb->local_endpoint.sin6.sin6_port = inp->inp_lport; + error = in6_pcbladdr(inp, to, &(fd_cb->local_endpoint.sin6.sin6_addr), &ifp); + if (error) { + FDLOG(LOG_WARNING, fd_cb, "failed to get a local IPv6 address: %d", error); + error = 0; + } + if (ifp != NULL) { + inp->in6p_last_outifp = ifp; + ifnet_release(ifp); + } + } else if (to->sa_family == AF_INET) { + inp->inp_vflag |= INP_IPV4; + inp->inp_vflag &= ~INP_IPV6; + fd_cb->local_endpoint.sin.sin_len = sizeof(struct sockaddr_in); + fd_cb->local_endpoint.sin.sin_family = AF_INET; + fd_cb->local_endpoint.sin.sin_port = inp->inp_lport; + error = in_pcbladdr(inp, to, &(fd_cb->local_endpoint.sin.sin_addr), IFSCOPE_NONE, &ifp, 0); + if (error) { + FDLOG(LOG_WARNING, fd_cb, "failed to get a local IPv4 address: %d", error); + error = 0; + } + if (ifp != NULL) { + inp->inp_last_outifp = ifp; + ifnet_release(ifp); + } + } else { + FDLOG(LOG_WARNING, fd_cb, "target address has an unsupported family: %d", to->sa_family); + } + + error = flow_divert_check_no_cellular(fd_cb) || + flow_divert_check_no_expensive(fd_cb) || + flow_divert_check_no_constrained(fd_cb); + if (error) { + goto done; + } + error = flow_divert_create_connect_packet(fd_cb, to, so, p, &connect_packet); if (error) { goto done; } + if (!implicit || SOCK_TYPE(so) == SOCK_STREAM) { + flow_divert_set_remote_endpoint(fd_cb, to); + flow_divert_set_local_endpoint(fd_cb, &(fd_cb->local_endpoint.sa), false); + } + + if (implicit) { + fd_cb->flags |= FLOW_DIVERT_IMPLICIT_CONNECT; + } + if (so->so_flags1 & SOF1_PRECONNECT_DATA) { FDLOG0(LOG_INFO, fd_cb, "Delaying sending the connect packet until send or receive"); do_send = 0; } + + fd_cb->connect_packet = connect_packet; + connect_packet = NULL; } else { FDLOG0(LOG_INFO, fd_cb, "Sending saved connect packet"); - connect_packet = fd_cb->connect_packet; - fd_cb->connect_packet = NULL; } if (do_send) { - error = flow_divert_send_packet(fd_cb, connect_packet, TRUE); + error = flow_divert_send_connect_packet(fd_cb); if (error) { goto done; } fd_cb->flags |= FLOW_DIVERT_CONNECT_STARTED; - } else { - fd_cb->connect_packet = connect_packet; - connect_packet = NULL; } - soisconnecting(so); + if (SOCK_TYPE(so) == SOCK_DGRAM) { + soisconnected(so); + } else { + soisconnecting(so); + } done: - if (error && connect_packet != NULL) { - mbuf_freem(connect_packet); - } return error; } +errno_t +flow_divert_connect_out(struct socket *so, struct sockaddr *to, proc_t p) +{ + return flow_divert_connect_out_internal(so, to, p, false); +} + static int flow_divert_connectx_out_common(struct socket *so, struct sockaddr *dst, struct proc *p, sae_connid_t *pcid, struct uio *auio, user_ssize_t *bytes_written) @@ -3092,7 +3440,6 @@ flow_divert_connectx_out(struct socket *so, struct sockaddr *src __unused, return flow_divert_connectx_out_common(so, dst, p, pcid, uio, bytes_written); } -#if INET6 static int flow_divert_connectx6_out(struct socket *so, struct sockaddr *src __unused, struct sockaddr *dst, struct proc *p, uint32_t ifscope __unused, @@ -3101,155 +3448,6 @@ flow_divert_connectx6_out(struct socket *so, struct sockaddr *src __unused, { return flow_divert_connectx_out_common(so, dst, p, pcid, uio, bytes_written); } -#endif /* INET6 */ - -static int -flow_divert_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, - uint32_t *ifindex, int32_t *soerror, user_addr_t src, socklen_t *src_len, - user_addr_t dst, socklen_t *dst_len, uint32_t *aux_type, - user_addr_t aux_data __unused, uint32_t *aux_len) -{ - int error = 0; - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; - struct ifnet *ifp = NULL; - struct inpcb *inp = sotoinpcb(so); - - VERIFY((so->so_flags & SOF_FLOW_DIVERT)); - - if (so->so_fd_pcb == NULL || inp == NULL) { - error = EINVAL; - goto out; - } - - if (cid != SAE_CONNID_ANY && cid != SAE_CONNID_ALL && cid != 1) { - error = EINVAL; - goto out; - } - - ifp = inp->inp_last_outifp; - *ifindex = ((ifp != NULL) ? ifp->if_index : 0); - *soerror = so->so_error; - *flags = 0; - - if (so->so_state & SS_ISCONNECTED) { - *flags |= (CIF_CONNECTED | CIF_PREFERRED); - } - - if (fd_cb->local_address == NULL) { - struct sockaddr_in sin; - bzero(&sin, sizeof(sin)); - sin.sin_len = sizeof(sin); - sin.sin_family = AF_INET; - *src_len = sin.sin_len; - if (src != USER_ADDR_NULL) { - error = copyout(&sin, src, sin.sin_len); - if (error != 0) { - goto out; - } - } - } else { - *src_len = fd_cb->local_address->sa_len; - if (src != USER_ADDR_NULL) { - error = copyout(fd_cb->local_address, src, fd_cb->local_address->sa_len); - if (error != 0) { - goto out; - } - } - } - - if (fd_cb->remote_address == NULL) { - struct sockaddr_in sin; - bzero(&sin, sizeof(sin)); - sin.sin_len = sizeof(sin); - sin.sin_family = AF_INET; - *dst_len = sin.sin_len; - if (dst != USER_ADDR_NULL) { - error = copyout(&sin, dst, sin.sin_len); - if (error != 0) { - goto out; - } - } - } else { - *dst_len = fd_cb->remote_address->sa_len; - if (dst != USER_ADDR_NULL) { - error = copyout(fd_cb->remote_address, dst, fd_cb->remote_address->sa_len); - if (error != 0) { - goto out; - } - } - } - - *aux_type = 0; - *aux_len = 0; - -out: - return error; -} - -static int -flow_divert_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp __unused, struct proc *p __unused) -{ - int error = 0; - - switch (cmd) { - case SIOCGCONNINFO32: { - struct so_cinforeq32 cifr; - bcopy(data, &cifr, sizeof(cifr)); - error = flow_divert_getconninfo(so, cifr.scir_cid, &cifr.scir_flags, - &cifr.scir_ifindex, &cifr.scir_error, cifr.scir_src, - &cifr.scir_src_len, cifr.scir_dst, &cifr.scir_dst_len, - &cifr.scir_aux_type, cifr.scir_aux_data, - &cifr.scir_aux_len); - if (error == 0) { - bcopy(&cifr, data, sizeof(cifr)); - } - break; - } - - case SIOCGCONNINFO64: { - struct so_cinforeq64 cifr; - bcopy(data, &cifr, sizeof(cifr)); - error = flow_divert_getconninfo(so, cifr.scir_cid, &cifr.scir_flags, - &cifr.scir_ifindex, &cifr.scir_error, cifr.scir_src, - &cifr.scir_src_len, cifr.scir_dst, &cifr.scir_dst_len, - &cifr.scir_aux_type, cifr.scir_aux_data, - &cifr.scir_aux_len); - if (error == 0) { - bcopy(&cifr, data, sizeof(cifr)); - } - break; - } - - default: - error = EOPNOTSUPP; - } - - return error; -} - -static int -flow_divert_in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, struct proc *p) -{ - int error = flow_divert_control(so, cmd, data, ifp, p); - - if (error == EOPNOTSUPP) { - error = in_control(so, cmd, data, ifp, p); - } - - return error; -} - -static int -flow_divert_in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, struct proc *p) -{ - int error = flow_divert_control(so, cmd, data, ifp, p); - - if (error == EOPNOTSUPP) { - error = in6_control(so, cmd, data, ifp, p); - } - - return error; -} static errno_t flow_divert_data_out(struct socket *so, int flags, mbuf_t data, struct sockaddr *to, mbuf_t control, struct proc *p) @@ -3279,39 +3477,40 @@ flow_divert_data_out(struct socket *so, int flags, mbuf_t data, struct sockaddr goto done; /* We don't support OOB data */ } - error = flow_divert_check_no_cellular(fd_cb) || - flow_divert_check_no_expensive(fd_cb) || - flow_divert_check_no_constrained(fd_cb); - if (error) { - goto done; +#if CONTENT_FILTER + /* + * If the socket is subject to a UDP Content Filter and no remote address is passed in, + * retrieve the CFIL saved remote address from the mbuf and use it. + */ + if (to == NULL && so->so_cfil_db) { + struct sockaddr *cfil_faddr = NULL; + cfil_tag = cfil_dgram_get_socket_state(data, NULL, NULL, &cfil_faddr, NULL); + if (cfil_tag) { + to = (struct sockaddr *)(void *)cfil_faddr; + } + FDLOG(LOG_INFO, fd_cb, "Using remote address from CFIL saved state: %p", to); } +#endif /* Implicit connect */ if (!(fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED)) { FDLOG0(LOG_INFO, fd_cb, "implicit connect"); -#if CONTENT_FILTER - /* - * If the socket is subject to a UDP Content Filter and no remote address is passed in, - * retrieve the CFIL saved remote address from the mbuf and use it. - */ - if (to == NULL && so->so_cfil_db) { - struct sockaddr *cfil_faddr = NULL; - cfil_tag = cfil_dgram_get_socket_state(data, NULL, NULL, &cfil_faddr, NULL); - if (cfil_tag) { - to = (struct sockaddr *)(void *)cfil_faddr; - } - FDLOG(LOG_INFO, fd_cb, "Using remote address from CFIL saved state: %p", to); - } -#endif - error = flow_divert_connect_out(so, to, p); + error = flow_divert_connect_out_internal(so, to, p, true); if (error) { goto done; } if (so->so_flags1 & SOF1_DATA_IDEMPOTENT) { /* Open up the send window so that the data will get sent right away */ - fd_cb->send_window = mbuf_pkthdr_len(data); + fd_cb->send_window = (uint32_t)mbuf_pkthdr_len(data); + } + } else { + error = flow_divert_check_no_cellular(fd_cb) || + flow_divert_check_no_expensive(fd_cb) || + flow_divert_check_no_constrained(fd_cb); + if (error) { + goto done; } } @@ -3319,12 +3518,13 @@ flow_divert_data_out(struct socket *so, int flags, mbuf_t data, struct sockaddr fd_cb->bytes_written_by_app += mbuf_pkthdr_len(data); error = flow_divert_send_app_data(fd_cb, data, to); + + data = NULL; + if (error) { goto done; } - data = NULL; - if (flags & PRUS_EOF) { flow_divert_shutdown(so); } @@ -3348,17 +3548,16 @@ done: static int flow_divert_preconnect(struct socket *so) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; int error = 0; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; - if (!(fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED) && fd_cb->connect_packet != NULL) { - FDLOG0(LOG_INFO, fd_cb, "Pre-connect read: sending saved connect packet"); - mbuf_t connect_packet = fd_cb->connect_packet; - fd_cb->connect_packet = NULL; + VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); - error = flow_divert_send_packet(fd_cb, connect_packet, TRUE); + if (!(fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED)) { + FDLOG0(LOG_INFO, fd_cb, "Pre-connect read: sending saved connect packet"); + error = flow_divert_send_connect_packet(so->so_fd_pcb); if (error) { - mbuf_freem(connect_packet); + return error; } fd_cb->flags |= FLOW_DIVERT_CONNECT_STARTED; @@ -3372,118 +3571,21 @@ flow_divert_preconnect(struct socket *so) static void flow_divert_set_protosw(struct socket *so) { - so->so_flags |= SOF_FLOW_DIVERT; if (SOCK_DOM(so) == PF_INET) { so->so_proto = &g_flow_divert_in_protosw; - } -#if INET6 - else { + } else { so->so_proto = (struct protosw *)&g_flow_divert_in6_protosw; } -#endif /* INET6 */ } static void flow_divert_set_udp_protosw(struct socket *so) { - so->so_flags |= SOF_FLOW_DIVERT; if (SOCK_DOM(so) == PF_INET) { so->so_proto = &g_flow_divert_in_udp_protosw; - } -#if INET6 - else { + } else { so->so_proto = (struct protosw *)&g_flow_divert_in6_udp_protosw; } -#endif /* INET6 */ -} - -static errno_t -flow_divert_attach(struct socket *so, uint32_t flow_id, uint32_t ctl_unit) -{ - int error = 0; - struct flow_divert_pcb *fd_cb = NULL; - struct ifnet *ifp = NULL; - struct inpcb *inp = NULL; - struct socket *old_so; - mbuf_t recv_data = NULL; - - socket_unlock(so, 0); - - FDLOG(LOG_INFO, &nil_pcb, "Attaching socket to flow %u", flow_id); - - /* Find the flow divert control block */ - lck_rw_lock_shared(&g_flow_divert_group_lck); - if (g_flow_divert_groups != NULL && g_active_group_count > 0) { - struct flow_divert_group *group = g_flow_divert_groups[ctl_unit]; - if (group != NULL) { - fd_cb = flow_divert_pcb_lookup(flow_id, group); - } - } - lck_rw_done(&g_flow_divert_group_lck); - - if (fd_cb == NULL) { - error = ENOENT; - goto done; - } - - FDLOCK(fd_cb); - - /* Dis-associate the flow divert control block from its current socket */ - old_so = fd_cb->so; - - inp = sotoinpcb(old_so); - - VERIFY(inp != NULL); - - socket_lock(old_so, 0); - flow_divert_disconnect_socket(old_so); - old_so->so_flags &= ~SOF_FLOW_DIVERT; - old_so->so_fd_pcb = NULL; - if (SOCK_TYPE(old_so) == SOCK_STREAM) { - old_so->so_proto = pffindproto(SOCK_DOM(old_so), IPPROTO_TCP, SOCK_STREAM); - } else if (SOCK_TYPE(old_so) == SOCK_DGRAM) { - old_so->so_proto = pffindproto(SOCK_DOM(old_so), IPPROTO_UDP, SOCK_DGRAM); - } - fd_cb->so = NULL; - /* Save the output interface */ - ifp = inp->inp_last_outifp; - if (old_so->so_rcv.sb_cc > 0) { - error = mbuf_dup(old_so->so_rcv.sb_mb, MBUF_DONTWAIT, &recv_data); - sbflush(&old_so->so_rcv); - } - socket_unlock(old_so, 0); - - /* Associate the new socket with the flow divert control block */ - socket_lock(so, 0); - so->so_fd_pcb = fd_cb; - inp = sotoinpcb(so); - inp->inp_last_outifp = ifp; - if (recv_data != NULL) { - if (sbappendstream(&so->so_rcv, recv_data)) { - sorwakeup(so); - } - } - if (SOCK_TYPE(so) == SOCK_STREAM) { - flow_divert_set_protosw(so); - } else if (SOCK_TYPE(so) == SOCK_DGRAM) { - flow_divert_set_udp_protosw(so); - } - - socket_unlock(so, 0); - - fd_cb->so = so; - fd_cb->flags |= FLOW_DIVERT_TRANSFERRED; - - FDUNLOCK(fd_cb); - -done: - socket_lock(so, 0); - - if (fd_cb != NULL) { - FDRELEASE(fd_cb); /* Release the reference obtained via flow_divert_pcb_lookup */ - } - - return error; } errno_t @@ -3499,15 +3601,9 @@ flow_divert_implicit_data_out(struct socket *so, int flags, mbuf_t data, struct } if (fd_cb == NULL) { - uint32_t fd_ctl_unit = necp_socket_get_flow_divert_control_unit(inp); - if (fd_ctl_unit > 0) { - error = flow_divert_pcb_init(so, fd_ctl_unit); - fd_cb = so->so_fd_pcb; - if (error != 0 || fd_cb == NULL) { - goto done; - } - } else { - error = ENETDOWN; + error = flow_divert_pcb_init(so); + fd_cb = so->so_fd_pcb; + if (error != 0 || fd_cb == NULL) { goto done; } } @@ -3524,11 +3620,17 @@ done: return error; } -errno_t -flow_divert_pcb_init(struct socket *so, uint32_t ctl_unit) +static errno_t +flow_divert_pcb_init_internal(struct socket *so, uint32_t ctl_unit, uint32_t aggregate_unit) { errno_t error = 0; struct flow_divert_pcb *fd_cb; + uint32_t agg_unit = aggregate_unit; + uint32_t group_unit = flow_divert_derive_kernel_control_unit(ctl_unit, &agg_unit); + + if (group_unit == 0) { + return EINVAL; + } if (so->so_flags & SOF_FLOW_DIVERT) { return EALREADY; @@ -3536,14 +3638,19 @@ flow_divert_pcb_init(struct socket *so, uint32_t ctl_unit) fd_cb = flow_divert_pcb_create(so); if (fd_cb != NULL) { - error = flow_divert_pcb_insert(fd_cb, ctl_unit); + so->so_fd_pcb = fd_cb; + so->so_flags |= SOF_FLOW_DIVERT; + fd_cb->control_group_unit = group_unit; + fd_cb->policy_control_unit = ctl_unit; + fd_cb->aggregate_unit = agg_unit; + + error = flow_divert_pcb_insert(fd_cb, group_unit); if (error) { FDLOG(LOG_ERR, fd_cb, "pcb insert failed: %d", error); + so->so_fd_pcb = NULL; + so->so_flags &= ~SOF_FLOW_DIVERT; FDRELEASE(fd_cb); } else { - fd_cb->control_group_unit = ctl_unit; - so->so_fd_pcb = fd_cb; - if (SOCK_TYPE(so) == SOCK_STREAM) { flow_divert_set_protosw(so); } else if (SOCK_TYPE(so) == SOCK_DGRAM) { @@ -3559,15 +3666,24 @@ flow_divert_pcb_init(struct socket *so, uint32_t ctl_unit) return error; } +errno_t +flow_divert_pcb_init(struct socket *so) +{ + struct inpcb *inp = sotoinpcb(so); + uint32_t aggregate_units = 0; + uint32_t ctl_unit = necp_socket_get_flow_divert_control_unit(inp, &aggregate_units); + return flow_divert_pcb_init_internal(so, ctl_unit, aggregate_units); +} + errno_t flow_divert_token_set(struct socket *so, struct sockopt *sopt) { - uint32_t ctl_unit = 0; - uint32_t key_unit = 0; - uint32_t flow_id = 0; - int error = 0; - int hmac_error = 0; - mbuf_t token = NULL; + uint32_t ctl_unit = 0; + uint32_t key_unit = 0; + uint32_t aggregate_unit = 0; + int error = 0; + int hmac_error = 0; + mbuf_t token = NULL; if (so->so_flags & SOF_FLOW_DIVERT) { error = EALREADY; @@ -3582,11 +3698,7 @@ flow_divert_token_set(struct socket *so, struct sockopt *sopt) if ((SOCK_TYPE(so) != SOCK_STREAM && SOCK_TYPE(so) != SOCK_DGRAM) || (SOCK_PROTO(so) != IPPROTO_TCP && SOCK_PROTO(so) != IPPROTO_UDP) || - (SOCK_DOM(so) != PF_INET -#if INET6 - && SOCK_DOM(so) != PF_INET6 -#endif - )) { + (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6)) { error = EINVAL; goto done; } else { @@ -3630,48 +3742,41 @@ flow_divert_token_set(struct socket *so, struct sockopt *sopt) goto done; } - /* A valid kernel control unit is required */ - ctl_unit = ntohl(ctl_unit); - if (ctl_unit == 0 || ctl_unit >= GROUP_COUNT_MAX) { - FDLOG(LOG_ERR, &nil_pcb, "Got an invalid control socket unit: %u", ctl_unit); - error = EINVAL; + error = flow_divert_packet_get_tlv(token, 0, FLOW_DIVERT_TLV_AGGREGATE_UNIT, sizeof(aggregate_unit), (void *)&aggregate_unit, NULL); + if (error && error != ENOENT) { + FDLOG(LOG_ERR, &nil_pcb, "Failed to get the aggregate unit from the token: %d", error); goto done; } - socket_unlock(so, 0); - hmac_error = flow_divert_packet_verify_hmac(token, (key_unit != 0 ? key_unit : ctl_unit)); - socket_lock(so, 0); + /* A valid kernel control unit is required */ + ctl_unit = ntohl(ctl_unit); + aggregate_unit = ntohl(aggregate_unit); - if (hmac_error && hmac_error != ENOENT) { - FDLOG(LOG_ERR, &nil_pcb, "HMAC verfication failed: %d", hmac_error); - error = hmac_error; - goto done; - } + if (ctl_unit > 0 && ctl_unit < GROUP_COUNT_MAX) { + socket_unlock(so, 0); + hmac_error = flow_divert_packet_verify_hmac(token, (key_unit != 0 ? key_unit : ctl_unit)); + socket_lock(so, 0); - error = flow_divert_packet_get_tlv(token, 0, FLOW_DIVERT_TLV_FLOW_ID, sizeof(flow_id), (void *)&flow_id, NULL); - if (error && error != ENOENT) { - FDLOG(LOG_ERR, &nil_pcb, "Failed to get the flow ID from the token: %d", error); - goto done; + if (hmac_error && hmac_error != ENOENT) { + FDLOG(LOG_ERR, &nil_pcb, "HMAC verfication failed: %d", hmac_error); + error = hmac_error; + goto done; + } } - if (flow_id == 0) { - error = flow_divert_pcb_init(so, ctl_unit); - if (error == 0) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; - int log_level = LOG_NOTICE; - - error = flow_divert_packet_get_tlv(token, 0, FLOW_DIVERT_TLV_LOG_LEVEL, - sizeof(log_level), &log_level, NULL); - if (error == 0) { - fd_cb->log_level = log_level; - } - error = 0; + error = flow_divert_pcb_init_internal(so, ctl_unit, aggregate_unit); + if (error == 0) { + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + int log_level = LOG_NOTICE; - fd_cb->connect_token = token; - token = NULL; + error = flow_divert_packet_get_tlv(token, 0, FLOW_DIVERT_TLV_LOG_LEVEL, sizeof(log_level), &log_level, NULL); + if (error == 0) { + fd_cb->log_level = (uint8_t)log_level; } - } else { - error = flow_divert_attach(so, flow_id, ctl_unit); + error = 0; + + fd_cb->connect_token = token; + token = NULL; } if (hmac_error == 0) { @@ -3730,7 +3835,7 @@ flow_divert_token_get(struct socket *so, struct sockopt *sopt) } if (fd_cb->app_data != NULL) { - error = flow_divert_packet_append_tlv(token, FLOW_DIVERT_TLV_APP_DATA, fd_cb->app_data_length, fd_cb->app_data); + error = flow_divert_packet_append_tlv(token, FLOW_DIVERT_TLV_APP_DATA, (uint32_t)fd_cb->app_data_length, fd_cb->app_data); if (error) { goto done; } @@ -3801,14 +3906,7 @@ flow_divert_kctl_connect(kern_ctl_ref kctlref __unused, struct sockaddr_ctl *sac *unitinfo = NULL; - MALLOC_ZONE(new_group, struct flow_divert_group *, sizeof(*new_group), M_FLOW_DIVERT_GROUP, M_WAITOK); - if (new_group == NULL) { - error = ENOBUFS; - goto done; - } - - memset(new_group, 0, sizeof(*new_group)); - + new_group = zalloc_flags(flow_divert_group_zone, Z_WAITOK | Z_ZERO); lck_rw_init(&new_group->lck, flow_divert_mtx_grp, flow_divert_mtx_attr); RB_INIT(&new_group->pcb_tree); new_group->ctl_unit = sac->sc_unit; @@ -3836,11 +3934,11 @@ flow_divert_kctl_connect(kern_ctl_ref kctlref __unused, struct sockaddr_ctl *sac lck_rw_done(&g_flow_divert_group_lck); - *unitinfo = new_group; - done: - if (error != 0 && new_group != NULL) { - FREE_ZONE(new_group, sizeof(*new_group), M_FLOW_DIVERT_GROUP); + if (error == 0) { + *unitinfo = new_group; + } else if (new_group != NULL) { + zfree(flow_divert_group_zone, new_group); } return error; } @@ -3855,6 +3953,10 @@ flow_divert_kctl_disconnect(kern_ctl_ref kctlref __unused, uint32_t unit, void * return EINVAL; } + if (unitinfo == NULL) { + return 0; + } + FDLOG(LOG_INFO, &nil_pcb, "disconnecting group %d", unit); lck_rw_lock_exclusive(&g_flow_divert_group_lck); @@ -3901,7 +4003,7 @@ flow_divert_kctl_disconnect(kern_ctl_ref kctlref __unused, uint32_t unit, void * lck_rw_done(&group->lck); - FREE_ZONE(group, sizeof(*group), M_FLOW_DIVERT_GROUP); + zfree(flow_divert_group_zone, group); } else { error = EINVAL; } @@ -4012,14 +4114,11 @@ flow_divert_init(void) g_flow_divert_in_usrreqs.pru_connect = flow_divert_connect_out; g_flow_divert_in_usrreqs.pru_connectx = flow_divert_connectx_out; - g_flow_divert_in_usrreqs.pru_control = flow_divert_in_control; g_flow_divert_in_usrreqs.pru_disconnect = flow_divert_close; g_flow_divert_in_usrreqs.pru_disconnectx = flow_divert_disconnectx; - g_flow_divert_in_usrreqs.pru_peeraddr = flow_divert_getpeername; g_flow_divert_in_usrreqs.pru_rcvd = flow_divert_rcvd; g_flow_divert_in_usrreqs.pru_send = flow_divert_data_out; g_flow_divert_in_usrreqs.pru_shutdown = flow_divert_shutdown; - g_flow_divert_in_usrreqs.pru_sockaddr = flow_divert_getsockaddr; g_flow_divert_in_usrreqs.pru_preconnect = flow_divert_preconnect; g_flow_divert_in_protosw.pr_usrreqs = &g_flow_divert_in_usrreqs; @@ -4044,14 +4143,11 @@ flow_divert_init(void) g_flow_divert_in_udp_usrreqs.pru_connect = flow_divert_connect_out; g_flow_divert_in_udp_usrreqs.pru_connectx = flow_divert_connectx_out; - g_flow_divert_in_udp_usrreqs.pru_control = flow_divert_in_control; g_flow_divert_in_udp_usrreqs.pru_disconnect = flow_divert_close; g_flow_divert_in_udp_usrreqs.pru_disconnectx = flow_divert_disconnectx; - g_flow_divert_in_udp_usrreqs.pru_peeraddr = flow_divert_getpeername; g_flow_divert_in_udp_usrreqs.pru_rcvd = flow_divert_rcvd; g_flow_divert_in_udp_usrreqs.pru_send = flow_divert_data_out; g_flow_divert_in_udp_usrreqs.pru_shutdown = flow_divert_shutdown; - g_flow_divert_in_udp_usrreqs.pru_sockaddr = flow_divert_getsockaddr; g_flow_divert_in_udp_usrreqs.pru_sosend_list = pru_sosend_list_notsupp; g_flow_divert_in_udp_usrreqs.pru_soreceive_list = pru_soreceive_list_notsupp; g_flow_divert_in_udp_usrreqs.pru_preconnect = flow_divert_preconnect; @@ -4069,7 +4165,6 @@ flow_divert_init(void) g_flow_divert_in_udp_protosw.pr_filter_head.tqh_last = (struct socket_filter **)(uintptr_t)0xdeadbeefdeadbeef; -#if INET6 g_tcp6_protosw = (struct ip6protosw *)pffindproto(AF_INET6, IPPROTO_TCP, SOCK_STREAM); VERIFY(g_tcp6_protosw != NULL); @@ -4079,14 +4174,11 @@ flow_divert_init(void) g_flow_divert_in6_usrreqs.pru_connect = flow_divert_connect_out; g_flow_divert_in6_usrreqs.pru_connectx = flow_divert_connectx6_out; - g_flow_divert_in6_usrreqs.pru_control = flow_divert_in6_control; g_flow_divert_in6_usrreqs.pru_disconnect = flow_divert_close; g_flow_divert_in6_usrreqs.pru_disconnectx = flow_divert_disconnectx; - g_flow_divert_in6_usrreqs.pru_peeraddr = flow_divert_getpeername; g_flow_divert_in6_usrreqs.pru_rcvd = flow_divert_rcvd; g_flow_divert_in6_usrreqs.pru_send = flow_divert_data_out; g_flow_divert_in6_usrreqs.pru_shutdown = flow_divert_shutdown; - g_flow_divert_in6_usrreqs.pru_sockaddr = flow_divert_getsockaddr; g_flow_divert_in6_usrreqs.pru_preconnect = flow_divert_preconnect; g_flow_divert_in6_protosw.pr_usrreqs = &g_flow_divert_in6_usrreqs; @@ -4111,14 +4203,11 @@ flow_divert_init(void) g_flow_divert_in6_udp_usrreqs.pru_connect = flow_divert_connect_out; g_flow_divert_in6_udp_usrreqs.pru_connectx = flow_divert_connectx6_out; - g_flow_divert_in6_udp_usrreqs.pru_control = flow_divert_in6_control; g_flow_divert_in6_udp_usrreqs.pru_disconnect = flow_divert_close; g_flow_divert_in6_udp_usrreqs.pru_disconnectx = flow_divert_disconnectx; - g_flow_divert_in6_udp_usrreqs.pru_peeraddr = flow_divert_getpeername; g_flow_divert_in6_udp_usrreqs.pru_rcvd = flow_divert_rcvd; g_flow_divert_in6_udp_usrreqs.pru_send = flow_divert_data_out; g_flow_divert_in6_udp_usrreqs.pru_shutdown = flow_divert_shutdown; - g_flow_divert_in6_udp_usrreqs.pru_sockaddr = flow_divert_getsockaddr; g_flow_divert_in6_udp_usrreqs.pru_sosend_list = pru_sosend_list_notsupp; g_flow_divert_in6_udp_usrreqs.pru_soreceive_list = pru_soreceive_list_notsupp; g_flow_divert_in6_udp_usrreqs.pru_preconnect = flow_divert_preconnect; @@ -4134,7 +4223,6 @@ flow_divert_init(void) (struct socket_filter *)(uintptr_t)0xdeadbeefdeadbeef; g_flow_divert_in6_udp_protosw.pr_filter_head.tqh_last = (struct socket_filter **)(uintptr_t)0xdeadbeefdeadbeef; -#endif /* INET6 */ flow_divert_grp_attr = lck_grp_attr_alloc_init(); if (flow_divert_grp_attr == NULL) { diff --git a/bsd/netinet/flow_divert.h b/bsd/netinet/flow_divert.h index bc1b636a7..a75b4dc63 100644 --- a/bsd/netinet/flow_divert.h +++ b/bsd/netinet/flow_divert.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 Apple Inc. All rights reserved. + * Copyright (c) 2012-2017, 2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -36,27 +36,31 @@ struct flow_divert_trie_node; struct flow_divert_pcb { decl_lck_mtx_data(, mtx); - socket_t so; - RB_ENTRY(flow_divert_pcb) rb_link; - uint32_t hash; - mbuf_t connect_token; - struct sockaddr *local_address; - struct sockaddr *remote_address; - uint32_t flags; - uint32_t send_window; - uint32_t sb_size; - struct flow_divert_group *group; - uint32_t control_group_unit; - int32_t ref_count; - uint32_t bytes_written_by_app; - uint32_t bytes_read_by_app; - uint32_t bytes_sent; - uint32_t bytes_received; - uint8_t log_level; - SLIST_ENTRY(flow_divert_pcb) tmp_list_entry; - mbuf_t connect_packet; - uint8_t *app_data; - size_t app_data_length; + socket_t so; + RB_ENTRY(flow_divert_pcb) rb_link; + uint32_t hash; + mbuf_t connect_token; + uint32_t flags; + uint32_t send_window; + struct flow_divert_group *group; + uint32_t control_group_unit; + uint32_t aggregate_unit; + uint32_t policy_control_unit; + int32_t ref_count; + uint32_t bytes_written_by_app; + uint32_t bytes_read_by_app; + uint32_t bytes_sent; + uint32_t bytes_received; + uint8_t log_level; + SLIST_ENTRY(flow_divert_pcb) tmp_list_entry; + mbuf_t connect_packet; + uint8_t *app_data; + size_t app_data_length; + union sockaddr_in_4_6 local_endpoint; + struct sockaddr *original_remote_endpoint; + struct ifnet *original_last_outifp6; + struct ifnet *original_last_outifp; + uint8_t original_vflag; }; RB_HEAD(fd_pcb_tree, flow_divert_pcb); @@ -66,12 +70,12 @@ struct flow_divert_trie { uint16_t *child_maps; uint8_t *bytes; void *memory; - size_t nodes_count; - size_t child_maps_count; - size_t bytes_count; - size_t nodes_free_next; - size_t child_maps_free_next; - size_t bytes_free_next; + uint16_t nodes_count; + uint16_t child_maps_count; + uint16_t bytes_count; + uint16_t nodes_free_next; + uint16_t child_maps_free_next; + uint16_t bytes_free_next; uint16_t root; }; @@ -91,7 +95,7 @@ void flow_divert_init(void); void flow_divert_detach(struct socket *so); errno_t flow_divert_token_set(struct socket *so, struct sockopt *sopt); errno_t flow_divert_token_get(struct socket *so, struct sockopt *sopt); -errno_t flow_divert_pcb_init(struct socket *so, uint32_t ctl_unit); +errno_t flow_divert_pcb_init(struct socket *so); errno_t flow_divert_connect_out(struct socket *so, struct sockaddr *to, proc_t p); errno_t flow_divert_implicit_data_out(struct socket *so, int flags, mbuf_t data, struct sockaddr *to, mbuf_t control, struct proc *p); diff --git a/bsd/netinet/flow_divert_proto.h b/bsd/netinet/flow_divert_proto.h index 705fa6b7e..424a5bbde 100644 --- a/bsd/netinet/flow_divert_proto.h +++ b/bsd/netinet/flow_divert_proto.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 Apple Inc. All rights reserved. + * Copyright (c) 2012-2017, 2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -63,7 +63,7 @@ #define FLOW_DIVERT_TLV_TARGET_PORT 23 #define FLOW_DIVERT_TLV_CDHASH 24 #define FLOW_DIVERT_TLV_SIGNING_ID 25 - +#define FLOW_DIVERT_TLV_AGGREGATE_UNIT 26 #define FLOW_DIVERT_TLV_PREFIX_COUNT 28 #define FLOW_DIVERT_TLV_FLAGS 29 @@ -83,11 +83,13 @@ #define FLOW_DIVERT_TOKEN_GETOPT_MAX_SIZE 128 #define FLOW_DIVERT_TOKEN_FLAG_VALIDATED 0x0000001 -#define FLOW_DIVERT_TOKEN_FLAG_TFO 0x0000002 +#define FLOW_DIVERT_TOKEN_FLAG_TFO 0x0000002 #define FLOW_DIVERT_TOKEN_FLAG_MPTCP 0x0000004 #define FLOW_DIVERT_GROUP_FLAG_NO_APP_MAP 0x0000001 +#define FLOW_DIVERT_IS_TRANSPARENT 0x80000000 + struct flow_divert_packet_header { uint8_t packet_type; uint32_t conn_id; diff --git a/bsd/netinet/icmp6.h b/bsd/netinet/icmp6.h index 786869ff1..5c3207167 100644 --- a/bsd/netinet/icmp6.h +++ b/bsd/netinet/icmp6.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -93,7 +93,12 @@ #ifndef _NETINET_ICMP6_H_ #define _NETINET_ICMP6_H_ +#ifndef DRIVERKIT #include +#include +#else +#include +#endif /* DRIVERKIT */ #define ICMPV6_PLD_MAXLEN 1232 /* IPV6_MMTU - sizeof(struct ip6_hdr) * - sizeof(struct icmp6_hdr) */ @@ -179,9 +184,10 @@ struct icmp6_hdr { #define ICMP6_TIME_EXCEED_TRANSIT 0 /* ttl==0 in transit */ #define ICMP6_TIME_EXCEED_REASSEMBLY 1 /* ttl==0 in reass */ -#define ICMP6_PARAMPROB_HEADER 0 /* erroneous header field */ -#define ICMP6_PARAMPROB_NEXTHEADER 1 /* unrecognized next header */ -#define ICMP6_PARAMPROB_OPTION 2 /* unrecognized option */ +#define ICMP6_PARAMPROB_HEADER 0 /* erroneous header field */ +#define ICMP6_PARAMPROB_NEXTHEADER 1 /* unrecognized next header */ +#define ICMP6_PARAMPROB_OPTION 2 /* unrecognized option */ +#define ICMP6_PARAMPROB_FIRSTFRAG_INCOMP_HDR 3 /* first fragment has incomplete IPv6 Header Chain */ #define ICMP6_INFOMSG_MASK 0x80 /* all informational messages */ @@ -263,10 +269,7 @@ struct nd_router_advert { /* router advertisement */ #define ND_RA_FLAG_OTHER 0x40 #define ND_RA_FLAG_HA 0x20 -/* - * Router preference values based on draft-draves-ipngwg-router-selection-01. - * These are non-standard definitions. - */ +/* Router preference values based on RFC 4191 */ #define ND_RA_FLAG_RTPREF_MASK 0x18 /* 00011000 */ #define ND_RA_FLAG_RTPREF_HIGH 0x08 /* 00001000 */ @@ -333,10 +336,10 @@ struct nd_opt_hdr { /* Neighbor discovery option header */ #define ND_OPT_REDIRECTED_HEADER 4 #define ND_OPT_MTU 5 #define ND_OPT_NONCE 14 /* RFC 3971 */ +#define ND_OPT_ROUTE_INFO 24 /* RFC 4191 */ #define ND_OPT_RDNSS 25 /* RFC 6106 */ #define ND_OPT_DNSSL 31 /* RFC 6106 */ - -#define ND_OPT_ROUTE_INFO 200 /* draft-ietf-ipngwg-router-preference, not officially assigned yet */ +#define ND_OPT_CAPTIVE_PORTAL 37 /* RFC 7710 */ struct nd_opt_prefix_info { /* prefix information */ u_int8_t nd_opt_pi_type; @@ -659,6 +662,7 @@ struct icmp6stat { u_quad_t icp6s_badra; /* bad router advertisement */ u_quad_t icp6s_badredirect; /* bad redirect message */ u_quad_t icp6s_rfc6980_drop; /* NDP packet dropped based on RFC 6980 */ + u_quad_t icp6s_badpkttoobig; /* bad packet too big */ }; /* @@ -740,7 +744,7 @@ void icmp6_error2(struct mbuf *, int, int, int, struct ifnet *); int icmp6_input(struct mbuf **, int *, int); void icmp6_reflect(struct mbuf *, size_t); void icmp6_prepare(struct mbuf *); -void icmp6_redirect_input(struct mbuf *, int); +void icmp6_redirect_input(struct mbuf *, int, int); void icmp6_redirect_output(struct mbuf *, struct rtentry *); struct ip6ctlparam; @@ -807,6 +811,7 @@ extern lck_rw_t icmp6_ifs_rwlock; } \ } while (0) +#define ICMP6_REDIRACCEPT_DEFAULT 1 extern int icmp6_rediraccept; /* accept/process redirects */ extern int icmp6_redirtimeout; /* cache time for redirect routes */ diff --git a/bsd/netinet/icmp_var.h b/bsd/netinet/icmp_var.h index c1cb82595..42982241e 100644 --- a/bsd/netinet/icmp_var.h +++ b/bsd/netinet/icmp_var.h @@ -106,14 +106,11 @@ struct icmpstat { SYSCTL_DECL(_net_inet_icmp); #ifdef ICMP_BANDLIM -extern int badport_bandlim(int); +extern boolean_t badport_bandlim(int which); #endif -#define BANDLIM_UNLIMITED -1 #define BANDLIM_ICMP_UNREACH 0 #define BANDLIM_ICMP_ECHO 1 #define BANDLIM_ICMP_TSTAMP 2 -#define BANDLIM_RST_CLOSEDPORT 3 /* No connection, and no listeners */ -#define BANDLIM_RST_OPENPORT 4 /* No connection, listener */ #define BANDLIM_MAX 4 extern struct icmpstat icmpstat; diff --git a/bsd/netinet/igmp.c b/bsd/netinet/igmp.c index 4e7daeaf1..eec638a6c 100644 --- a/bsd/netinet/igmp.c +++ b/bsd/netinet/igmp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -114,7 +114,7 @@ SLIST_HEAD(igmp_inm_relhead, in_multi); static void igi_initvar(struct igmp_ifinfo *, struct ifnet *, int); -static struct igmp_ifinfo *igi_alloc(int); +static struct igmp_ifinfo *igi_alloc(zalloc_flags_t); static void igi_free(struct igmp_ifinfo *); static void igi_delete(const struct ifnet *, struct igmp_inm_relhead *); static void igmp_dispatch_queue(struct igmp_ifinfo *, struct ifqueue *, @@ -160,7 +160,7 @@ static int igmp_v3_enqueue_filter_change(struct ifqueue *, struct in_multi *); static void igmp_v3_process_group_timers(struct igmp_ifinfo *, struct ifqueue *, struct ifqueue *, struct in_multi *, - const int); + const unsigned int); static int igmp_v3_merge_state_changes(struct in_multi *, struct ifqueue *); static void igmp_v3_suppress_group_record(struct in_multi *); @@ -302,11 +302,8 @@ static int igmp_timers_are_running; VERIFY(SLIST_EMPTY(_head)); \ } -#define IGI_ZONE_MAX 64 /* maximum elements in zone */ -#define IGI_ZONE_NAME "igmp_ifinfo" /* zone name */ - -static unsigned int igi_size; /* size of zone element */ -static struct zone *igi_zone; /* zone for igmp_ifinfo */ +static ZONE_DECLARE(igi_zone, "igmp_ifinfo", + sizeof(struct igmp_ifinfo), ZC_ZFREE_CLEARMEM); /* Store IGMPv3 record count in the module private scratch space */ #define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0] @@ -396,7 +393,7 @@ sysctl_igmp_gsr SYSCTL_HANDLER_ARGS IGMP_LOCK(); - i = igmp_gsrdelay.tv_sec; + i = (int)igmp_gsrdelay.tv_sec; error = sysctl_handle_int(oidp, &i, 0, req); if (error || !req->newptr) { @@ -582,7 +579,7 @@ igmp_ra_alloc(void) * Attach IGMP when PF_INET is attached to an interface. */ struct igmp_ifinfo * -igmp_domifattach(struct ifnet *ifp, int how) +igmp_domifattach(struct ifnet *ifp, zalloc_flags_t how) { struct igmp_ifinfo *igi; @@ -744,13 +741,10 @@ igi_initvar(struct igmp_ifinfo *igi, struct ifnet *ifp, int reattach) } static struct igmp_ifinfo * -igi_alloc(int how) +igi_alloc(zalloc_flags_t how) { - struct igmp_ifinfo *igi; - - igi = (how == M_WAITOK) ? zalloc(igi_zone) : zalloc_noblock(igi_zone); + struct igmp_ifinfo *igi = zalloc_flags(igi_zone, how | Z_ZERO); if (igi != NULL) { - bzero(igi, igi_size); lck_mtx_init(&igi->igi_lock, igmp_mtx_grp, igmp_mtx_attr); igi->igi_debug |= IFD_ALLOC; } @@ -1067,7 +1061,7 @@ igmp_v2_update_group(struct in_multi *inm, const int timer) "skipping.\n", __func__)); break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case IGMP_SG_QUERY_PENDING_MEMBER: case IGMP_G_QUERY_PENDING_MEMBER: case IGMP_IDLE_MEMBER: @@ -1102,7 +1096,7 @@ igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip, struct in_multi *inm; int is_general_query; uint32_t maxresp, nsrc, qqi; - uint16_t timer; + uint32_t timer; uint8_t qrv; struct igmp_tparams itp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 }; @@ -1475,6 +1469,7 @@ igmp_input_v1_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip, ("report suppressed for %s on ifp 0x%llx(%s)\n", _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); + OS_FALLTHROUGH; case IGMP_SLEEPING_MEMBER: inm->inm_state = IGMP_SLEEPING_MEMBER; break; @@ -1622,6 +1617,7 @@ igmp_input_v2_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip, ("report suppressed for %s on ifp 0x%llx(%s)\n", _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); + OS_FALLTHROUGH; case IGMP_LAZY_MEMBER: inm->inm_state = IGMP_LAZY_MEMBER; break; @@ -1785,7 +1781,7 @@ igmp_input(struct mbuf *m, int off) m_freem(m); return; } - srclen = sizeof(struct in_addr) * nsrc; + srclen = sizeof(struct in_addr) * (uint16_t)nsrc; if (igmplen < (IGMP_V3_QUERY_MINLEN + srclen)) { IGMPSTAT_INC(igps_rcv_tooshort); OIGMPSTAT_INC(igps_rcv_tooshort); @@ -1903,7 +1899,7 @@ igmp_timeout(void *arg) struct ifnet *ifp; struct igmp_ifinfo *igi; struct in_multi *inm; - int loop = 0, uri_sec = 0; + unsigned int loop = 0, uri_sec = 0; SLIST_HEAD(, in_multi) inm_dthead; SLIST_INIT(&inm_dthead); @@ -2175,7 +2171,7 @@ igmp_v1v2_process_group_timer(struct in_multi *inm, const int igmp_version) static void igmp_v3_process_group_timers(struct igmp_ifinfo *igi, struct ifqueue *qrq, struct ifqueue *scq, - struct in_multi *inm, const int uri_sec) + struct in_multi *inm, const unsigned int uri_sec) { int query_response_timer_expired; int state_change_retransmit_timer_expired; @@ -2245,7 +2241,7 @@ igmp_v3_process_group_timers(struct igmp_ifinfo *igi, /* XXX Clear recorded sources for next time. */ inm_clear_recorded(inm); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case IGMP_REPORTING_MEMBER: case IGMP_LEAVING_MEMBER: if (state_change_retransmit_timer_expired) { @@ -2256,7 +2252,7 @@ igmp_v3_process_group_timers(struct igmp_ifinfo *igi, * reset the timer. */ if (--inm->inm_scrv > 0) { - inm->inm_sctimer = uri_sec; + inm->inm_sctimer = (uint16_t)uri_sec; state_change_timers_running = 1; /* caller will schedule timer */ } @@ -2452,11 +2448,11 @@ igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi) IGI_LOCK(igi); SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele); IGI_UNLOCK(igi); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case IGMP_G_QUERY_PENDING_MEMBER: case IGMP_SG_QUERY_PENDING_MEMBER: inm_clear_recorded(inm); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case IGMP_REPORTING_MEMBER: inm->inm_state = IGMP_REPORTING_MEMBER; break; @@ -2592,7 +2588,7 @@ igmp_v1v2_queue_report(struct in_multi *inm, const int type) m->m_len = sizeof(struct igmp); igmp = mtod(m, struct igmp *); - igmp->igmp_type = type; + igmp->igmp_type = (u_char)type; igmp->igmp_code = 0; igmp->igmp_group = inm->inm_addr; igmp->igmp_cksum = 0; @@ -2834,7 +2830,7 @@ igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi, inm->inm_scrv = 1; } else { VERIFY(igi->igi_rv > 1); - inm->inm_scrv = igi->igi_rv; + inm->inm_scrv = (uint16_t)igi->igi_rv; } inm->inm_sctimer = 1; itp->sct = 1; @@ -2915,7 +2911,7 @@ igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi, * If record(s) were enqueued, start the state-change * report timer for this group. */ - inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv); + inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : (uint16_t)igi->igi_rv); inm->inm_sctimer = 1; itp->sct = 1; IGI_UNLOCK(igi); @@ -2987,7 +2983,7 @@ igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi, if (igi->igi_flags & IGIF_LOOPBACK) { inm->inm_scrv = 1; } else { - inm->inm_scrv = igi->igi_rv; + inm->inm_scrv = (uint16_t)igi->igi_rv; } IGMP_INET_PRINTF(inm->inm_addr, ("%s: Leaving %s/%s with %d " @@ -3084,12 +3080,13 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, struct ip_msource *ims, *nims; struct mbuf *m0, *m, *md; int error, is_filter_list_change; - int minrec0len, m0srcs, msrcs, nbytes, off; + int minrec0len, m0srcs, nbytes, off; + uint16_t msrcs; int record_has_sources; int now; int type; in_addr_t naddr; - uint8_t mode; + uint16_t mode; u_int16_t ig_numsrc; INM_LOCK_ASSERT_HELD(inm); @@ -3243,7 +3240,7 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, * Append group record. * If we have sources, we don't know how many yet. */ - ig.ig_type = type; + ig.ig_type = (u_char)type; ig.ig_datalen = 0; ig.ig_numsrc = 0; ig.ig_group = inm->inm_addr; @@ -3478,9 +3475,11 @@ igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) struct ip_msource *ims, *nims; struct mbuf *m, *m0, *md; in_addr_t naddr; - int m0srcs, nbytes, npbytes, off, rsrcs, schanged; + int m0srcs, nbytes, npbytes, off, schanged; + uint16_t rsrcs; int nallow, nblock; - uint8_t mode, now, then; + uint16_t mode; + uint8_t now, then; rectype_t crt, drt, nrt; u_int16_t ig_numsrc; @@ -3907,7 +3906,7 @@ igmp_sendpkt(struct mbuf *m) ipopts = igmp_sendra ? m_raopt : NULL; - imo = ip_allocmoptions(M_WAITOK); + imo = ip_allocmoptions(Z_WAITOK); if (imo == NULL) { m_freem(m); return; @@ -3950,9 +3949,6 @@ igmp_sendpkt(struct mbuf *m) igmp_scrub_context(m0); m->m_flags &= ~(M_PROTOFLAGS | M_IGMP_LOOP); m0->m_pkthdr.rcvif = lo_ifp; -#ifdef MAC - mac_netinet_igmp_send(ifp, m0); -#endif if (ifp->if_eflags & IFEF_TXSTART) { /* @@ -3992,7 +3988,7 @@ igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m) { struct igmp_report *igmp; struct ip *ip; - int hdrlen, igmpreclen; + unsigned int hdrlen, igmpreclen; VERIFY((m->m_flags & M_PKTHDR)); @@ -4008,6 +4004,12 @@ igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m) } m->m_flags |= M_IGMPV3_HDR; } + if (hdrlen + igmpreclen > USHRT_MAX) { + IGMP_PRINTF(("%s: invalid length %d\n", __func__, hdrlen + igmpreclen)); + m_freem(m); + return NULL; + } + IGMP_PRINTF(("%s: igmpreclen is %d\n", __func__, igmpreclen)); @@ -4028,7 +4030,7 @@ igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m) ip = mtod(m, struct ip *); ip->ip_tos = IPTOS_PREC_INTERNETCONTROL; - ip->ip_len = hdrlen + igmpreclen; + ip->ip_len = (u_short)(hdrlen + igmpreclen); ip->ip_off = IP_DF; ip->ip_p = IPPROTO_IGMP; ip->ip_sum = 0; @@ -4101,14 +4103,4 @@ igmp_init(struct protosw *pp, struct domain *dp) LIST_INIT(&igi_head); m_raopt = igmp_ra_alloc(); - - igi_size = sizeof(struct igmp_ifinfo); - igi_zone = zinit(igi_size, IGI_ZONE_MAX * igi_size, - 0, IGI_ZONE_NAME); - if (igi_zone == NULL) { - panic("%s: failed allocating %s", __func__, IGI_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(igi_zone, Z_EXPAND, TRUE); - zone_change(igi_zone, Z_CALLERACCT, FALSE); } diff --git a/bsd/netinet/igmp_var.h b/bsd/netinet/igmp_var.h index 5a592101d..a01e95afe 100644 --- a/bsd/netinet/igmp_var.h +++ b/bsd/netinet/igmp_var.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -318,7 +318,7 @@ struct igmp_tparams { extern void igmp_init(struct protosw *, struct domain *); extern int igmp_change_state(struct in_multi *, struct igmp_tparams *); -extern struct igmp_ifinfo *igmp_domifattach(struct ifnet *, int); +extern struct igmp_ifinfo *igmp_domifattach(struct ifnet *, zalloc_flags_t); extern void igmp_domifreattach(struct igmp_ifinfo *); extern void igmp_domifdetach(struct ifnet *); extern void igmp_input(struct mbuf *, int); diff --git a/bsd/netinet/in.c b/bsd/netinet/in.c index 1e8a63778..64b694bca 100644 --- a/bsd/netinet/in.c +++ b/bsd/netinet/in.c @@ -143,18 +143,18 @@ static int in_getconnids(struct socket *, sae_associd_t, uint32_t *, user_addr_t /* IPv4 Layer 2 neighbor cache management routines */ static void in_lltable_destroy_lle_unlocked(struct llentry *lle); static void in_lltable_destroy_lle(struct llentry *lle); -static struct llentry *in_lltable_new(struct in_addr addr4, u_int flags); +static struct llentry *in_lltable_new(struct in_addr addr4, uint16_t flags); static int in_lltable_match_prefix(const struct sockaddr *saddr, - const struct sockaddr *smask, u_int flags, struct llentry *lle); + const struct sockaddr *smask, uint16_t flags, struct llentry *lle); static void in_lltable_free_entry(struct lltable *llt, struct llentry *lle); -static int in_lltable_rtcheck(struct ifnet *ifp, u_int flags, const struct sockaddr *l3addr); +static int in_lltable_rtcheck(struct ifnet *ifp, uint16_t flags, const struct sockaddr *l3addr); static inline uint32_t in_lltable_hash_dst(const struct in_addr dst, uint32_t hsize); static uint32_t in_lltable_hash(const struct llentry *lle, uint32_t hsize); static void in_lltable_fill_sa_entry(const struct llentry *lle, struct sockaddr *sa); static inline struct llentry * in_lltable_find_dst(struct lltable *llt, struct in_addr dst); static void in_lltable_delete_entry(struct lltable *llt, struct llentry *lle); -static struct llentry * in_lltable_alloc(struct lltable *llt, u_int flags, const struct sockaddr *l3addr); -static struct llentry * in_lltable_lookup(struct lltable *llt, u_int flags, const struct sockaddr *l3addr); +static struct llentry * in_lltable_alloc(struct lltable *llt, uint16_t flags, const struct sockaddr *l3addr); +static struct llentry * in_lltable_lookup(struct lltable *llt, uint16_t flags, const struct sockaddr *l3addr); static int in_lltable_dump_entry(struct lltable *llt, struct llentry *lle, struct sysctl_req *wr); static struct lltable * in_lltattach(struct ifnet *ifp); @@ -203,7 +203,6 @@ static unsigned int inifa_debug; /* debugging (disabled) */ static unsigned int inifa_size; /* size of zone element */ static struct zone *inifa_zone; /* zone for in_ifaddr */ -#define INIFA_ZONE_MAX 64 /* maximum elements in zone */ #define INIFA_ZONE_NAME "in_ifaddr" /* zone name */ static const unsigned int in_extra_size = sizeof(struct in_ifextra); @@ -333,7 +332,7 @@ in_socktrim(struct sockaddr_in *ap) ap->sin_len = 0; while (--cp >= cplim) { if (*cp) { - (ap)->sin_len = cp - (char *)(ap) + 1; + (ap)->sin_len = (uint8_t)(cp - (char *)(ap) + 1); break; } } @@ -415,7 +414,7 @@ inctl_associd(struct socket *so, u_long cmd, caddr_t data) case SIOCGASSOCIDS64: /* struct so_aidreq64 */ bcopy(data, &u.a64, sizeof(u.a64)); - error = in_getassocids(so, &u.a64.sar_cnt, u.a64.sar_aidp); + error = in_getassocids(so, &u.a64.sar_cnt, (user_addr_t)u.a64.sar_aidp); if (error == 0) { bcopy(&u.a64, data, sizeof(u.a64)); } @@ -453,7 +452,7 @@ inctl_connid(struct socket *so, u_long cmd, caddr_t data) case SIOCGCONNIDS64: /* struct so_cidreq64 */ bcopy(data, &u.c64, sizeof(u.c64)); error = in_getconnids(so, u.c64.scr_aid, &u.c64.scr_cnt, - u.c64.scr_cidp); + (user_addr_t)u.c64.scr_cidp); if (error == 0) { bcopy(&u.c64, data, sizeof(u.c64)); } @@ -494,9 +493,9 @@ inctl_conninfo(struct socket *so, u_long cmd, caddr_t data) case SIOCGCONNINFO64: /* struct so_cinforeq64 */ bcopy(data, &u.ci64, sizeof(u.ci64)); error = in_getconninfo(so, u.ci64.scir_cid, &u.ci64.scir_flags, - &u.ci64.scir_ifindex, &u.ci64.scir_error, u.ci64.scir_src, - &u.ci64.scir_src_len, u.ci64.scir_dst, &u.ci64.scir_dst_len, - &u.ci64.scir_aux_type, u.ci64.scir_aux_data, + &u.ci64.scir_ifindex, &u.ci64.scir_error, (user_addr_t)u.ci64.scir_src, + &u.ci64.scir_src_len, (user_addr_t)u.ci64.scir_dst, &u.ci64.scir_dst_len, + &u.ci64.scir_aux_type, (user_addr_t)u.ci64.scir_aux_data, &u.ci64.scir_aux_len); if (error == 0) { bcopy(&u.ci64, data, sizeof(u.ci64)); @@ -537,11 +536,11 @@ inctl_autoaddr(struct ifnet *ifp, struct ifreq *ifr) intval = 0; /* be safe; clear flag if set */ error = EBUSY; } else { - ifp->if_eflags |= IFEF_AUTOCONFIGURING; + if_set_eflags(ifp, IFEF_AUTOCONFIGURING); } } if (!intval) { - ifp->if_eflags &= ~IFEF_AUTOCONFIGURING; + if_clear_eflags(ifp, IFEF_AUTOCONFIGURING); } ifnet_lock_done(ifp); @@ -575,11 +574,11 @@ inctl_arpipll(struct ifnet *ifp, struct ifreq *ifr) intval = 0; /* be safe; clear flag if set */ error = EBUSY; } else { - ifp->if_eflags |= IFEF_ARPLL; + if_set_eflags(ifp, IFEF_ARPLL); } } if (!intval) { - ifp->if_eflags &= ~IFEF_ARPLL; + if_clear_eflags(ifp, IFEF_ARPLL); } ifnet_lock_done(ifp); @@ -611,13 +610,19 @@ inctl_setrouter(struct ifnet *ifp, struct ifreq *ifr) } bcopy(&ifr->ifr_intval, &intval, sizeof(intval)); - + switch (intval) { + case 0: + case 1: + break; + default: + return EINVAL; + } ifnet_lock_exclusive(ifp); - if (intval) { - ifp->if_eflags |= IFEF_IPV4_ROUTER; - ifp->if_eflags &= ~(IFEF_ARPLL | IFEF_AUTOCONFIGURING); + if (intval != 0) { + if_set_eflags(ifp, IFEF_IPV4_ROUTER); + if_clear_eflags(ifp, (IFEF_ARPLL | IFEF_AUTOCONFIGURING)); } else { - ifp->if_eflags &= ~IFEF_IPV4_ROUTER; + if_clear_eflags(ifp, IFEF_IPV4_ROUTER); } ifnet_lock_done(ifp); @@ -699,7 +704,11 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, IFA_UNLOCK(&ia->ia_ifa); in_ifscrub(ifp, ia, 0); IFA_LOCK(&ia->ia_ifa); - ia->ia_sockmask = mask; + ia->ia_sockmask.sin_len = sizeof(struct sockaddr_in); + ia->ia_sockmask.sin_family = AF_INET; + ia->ia_sockmask.sin_port = 0; + ia->ia_sockmask.sin_addr = mask.sin_addr; + bzero(&ia->ia_sockmask.sin_zero, sizeof(ia->ia_dstaddr.sin_zero)); ia->ia_subnetmask = ntohl(ia->ia_sockmask.sin_addr.s_addr); maskIsNew = 1; @@ -709,10 +718,10 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, IFA_UNLOCK(&ia->ia_ifa); in_ifscrub(ifp, ia, 0); IFA_LOCK(&ia->ia_ifa); - ia->ia_dstaddr = broadaddr; ia->ia_dstaddr.sin_family = AF_INET; ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in); ia->ia_dstaddr.sin_port = 0; + ia->ia_dstaddr.sin_addr = broadaddr.sin_addr; bzero(&ia->ia_dstaddr.sin_zero, sizeof(ia->ia_dstaddr.sin_zero)); maskIsNew = 1; /* We lie; but the effect's the same */ } @@ -948,10 +957,11 @@ inctl_ifdstaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, IFA_LOCK(&ia->ia_ifa); dstaddr = ia->ia_dstaddr; - bcopy(&ifr->ifr_dstaddr, &ia->ia_dstaddr, sizeof(dstaddr)); ia->ia_dstaddr.sin_family = AF_INET; ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in); ia->ia_dstaddr.sin_port = 0; + bcopy(&(SIN(&ifr->ifr_dstaddr)->sin_addr), + &ia->ia_dstaddr.sin_addr, sizeof(ia->ia_dstaddr.sin_addr)); bzero(&ia->ia_dstaddr.sin_zero, sizeof(ia->ia_dstaddr.sin_zero)); IFA_UNLOCK(&ia->ia_ifa); @@ -1068,12 +1078,12 @@ inctl_ifbrdaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, case SIOCSIFBRDADDR: /* struct ifreq */ IFA_LOCK(&ia->ia_ifa); - bcopy(&ifr->ifr_broadaddr, &ia->ia_broadaddr, - sizeof(struct sockaddr_in)); ia->ia_broadaddr.sin_family = AF_INET; ia->ia_broadaddr.sin_len = sizeof(struct sockaddr_in); ia->ia_broadaddr.sin_port = 0; + bcopy(&(SIN(&ifr->ifr_broadaddr)->sin_addr), + &ia->ia_broadaddr.sin_addr, sizeof(ia->ia_broadaddr.sin_addr)); bzero(&ia->ia_broadaddr.sin_zero, sizeof(ia->ia_broadaddr.sin_zero)); ev_msg.vendor_code = KEV_VENDOR_APPLE; @@ -1219,6 +1229,7 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, struct in_ifaddr *ia = NULL; struct ifaddr *ifa; int error = 0; + int intval; /* In case it's NULL, make sure it came from the kernel */ VERIFY(so != NULL || p == kernproc); @@ -1269,6 +1280,12 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, return inctl_arpipll(ifp, ifr); /* NOTREACHED */ + case SIOCGETROUTERMODE: /* struct ifreq */ + intval = (ifp->if_eflags & IFEF_IPV4_ROUTER) != 0 ? 1 : 0; + bcopy(&intval, &ifr->ifr_intval, sizeof(intval)); + return 0; + /* NOTREACHED */ + case SIOCSETROUTERMODE: /* struct ifreq */ if (!privileged) { return EPERM; @@ -1326,7 +1343,7 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, if (!privileged) { return EPERM; } - /* FALLTHRU */ + OS_FALLTHROUGH; case SIOCGIFADDR: /* struct ifreq */ case SIOCGIFDSTADDR: /* struct ifreq */ case SIOCGIFNETMASK: /* struct ifreq */ @@ -1472,7 +1489,7 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, IFA_UNLOCK(&ia->ia_ifa); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case SIOCSIFADDR: /* struct ifreq */ case SIOCSIFDSTADDR: /* struct ifreq */ case SIOCSIFNETMASK: /* struct ifreq */ @@ -2046,14 +2063,7 @@ in_ifaddr_init(void) inifa_size = (inifa_debug == 0) ? sizeof(struct in_ifaddr) : sizeof(struct in_ifaddr_dbg); - inifa_zone = zinit(inifa_size, INIFA_ZONE_MAX * inifa_size, - 0, INIFA_ZONE_NAME); - if (inifa_zone == NULL) { - panic("%s: failed allocating %s", __func__, INIFA_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(inifa_zone, Z_EXPAND, TRUE); - zone_change(inifa_zone, Z_CALLERACCT, FALSE); + inifa_zone = zone_create(INIFA_ZONE_NAME, inifa_size, ZC_NONE); lck_mtx_init(&inifa_trash_lock, ifa_mtx_grp, ifa_mtx_attr); TAILQ_INIT(&inifa_trash_head); @@ -2384,7 +2394,7 @@ in_lltable_destroy_lle(struct llentry *lle) } static struct llentry * -in_lltable_new(struct in_addr addr4, u_int flags) +in_lltable_new(struct in_addr addr4, uint16_t flags) { #pragma unused(flags) struct in_llentry *lle; @@ -2415,7 +2425,7 @@ in_lltable_new(struct in_addr addr4, u_int flags) static int in_lltable_match_prefix(const struct sockaddr *saddr, - const struct sockaddr *smask, u_int flags, struct llentry *lle) + const struct sockaddr *smask, uint16_t flags, struct llentry *lle) { struct in_addr addr, mask, lle_addr; @@ -2478,7 +2488,7 @@ in_lltable_free_entry(struct lltable *llt, struct llentry *lle) static int -in_lltable_rtcheck(struct ifnet *ifp, u_int flags, const struct sockaddr *l3addr) +in_lltable_rtcheck(struct ifnet *ifp, uint16_t flags, const struct sockaddr *l3addr) { #pragma unused(flags) struct rtentry *rt; @@ -2559,7 +2569,7 @@ in_lltable_delete_entry(struct lltable *llt, struct llentry *lle) } static struct llentry * -in_lltable_alloc(struct lltable *llt, u_int flags, const struct sockaddr *l3addr) +in_lltable_alloc(struct lltable *llt, uint16_t flags, const struct sockaddr *l3addr) { const struct sockaddr_in *sin = (const struct sockaddr_in *) (const void *)l3addr; struct ifnet *ifp = llt->llt_ifp; @@ -2600,7 +2610,7 @@ in_lltable_alloc(struct lltable *llt, u_int flags, const struct sockaddr *l3addr * If found return lle read locked. */ static struct llentry * -in_lltable_lookup(struct lltable *llt, u_int flags, const struct sockaddr *l3addr) +in_lltable_lookup(struct lltable *llt, uint16_t flags, const struct sockaddr *l3addr) { const struct sockaddr_in *sin = (const struct sockaddr_in *)(const void *)l3addr; struct llentry *lle; @@ -2684,7 +2694,7 @@ in_lltable_dump_entry(struct lltable *llt, struct llentry *lle, } arpc.rtm.rtm_rmx.rmx_expire = - lle->la_flags & LLE_STATIC ? 0 : lle->la_expire; + lle->la_flags & LLE_STATIC ? 0 : (int32_t)lle->la_expire; arpc.rtm.rtm_flags |= (RTF_HOST | RTF_LLDATA); if (lle->la_flags & LLE_STATIC) { arpc.rtm.rtm_flags |= RTF_STATIC; diff --git a/bsd/netinet/in.h b/bsd/netinet/in.h index d5b76845e..dc391afe5 100644 --- a/bsd/netinet/in.h +++ b/bsd/netinet/in.h @@ -63,24 +63,31 @@ #ifndef _NETINET_IN_H_ #define _NETINET_IN_H_ + +#ifndef DRIVERKIT #include -#include #include /* uint(8|16|32)_t */ #ifndef KERNEL #include #endif -#include +#else +#include +#include +#endif /* DRIVERKIT */ +#include #include +#ifndef DRIVERKIT /* * POSIX 1003.1-2003 * "Inclusion of the header may also make visible all * symbols from and ". */ #include +#endif /* DRIVERKIT */ /* * The following two #includes insure htonl and family are defined @@ -478,6 +485,7 @@ struct ip_opts { #define IP_PKTINFO 26 /* get pktinfo on recv socket, set src on sent dgram */ #define IP_RECVPKTINFO IP_PKTINFO /* receive pktinfo w/dgram */ #define IP_RECVTOS 27 /* bool; receive IP TOS w/dgram */ +#define IP_DONTFRAG 28 /* don't fragment packet */ #define IP_FW_ADD 40 /* add a firewall rule to chain */ #define IP_FW_DEL 41 /* delete a firewall rule from chain */ @@ -521,7 +529,6 @@ struct ip_opts { #define MCAST_UNBLOCK_SOURCE 85 /* unblock a source */ #ifdef PRIVATE -#define IP_FORCE_OUT_IFP 69 /* not implemented; use IP_BOUND_IF instead */ #define IP_NO_IFT_CELLULAR 6969 /* for internal use only */ #define IP_NO_IFT_PDP IP_NO_IFT_CELLULAR /* deprecated */ #define IP_OUT_IF 9696 /* for internal use only */ @@ -549,6 +556,7 @@ struct ip_opts { #define IP_MAX_SOCK_SRC_FILTER 128 /* sources per socket/group */ #define IP_MAX_SOCK_MUTE_FILTER 128 /* XXX no longer used */ +#ifndef PLATFORM_DriverKit /* * Argument structure for IP_ADD_MEMBERSHIP and IP_DROP_MEMBERSHIP. */ @@ -648,6 +656,7 @@ int setsourcefilter(int, uint32_t, struct sockaddr *, socklen_t, int getsourcefilter(int, uint32_t, struct sockaddr *, socklen_t, uint32_t *, uint32_t *, struct sockaddr_storage *) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); #endif +#endif /* PLATFORM_DriverKit */ /* * Filter modes; also used to represent per-socket filter mode internally. @@ -856,6 +865,7 @@ union sockaddr_in_4_6 { #endif /* PRIVATE */ +#ifndef PLATFORM_DriverKit #ifdef KERNEL #ifdef BSD_KERNEL_PRIVATE #include @@ -950,4 +960,5 @@ int bindresvport_sa(int, struct sockaddr *); __END_DECLS #endif #endif /* !KERNEL */ +#endif /* PLATFORM_DriverKit */ #endif /* _NETINET_IN_H_ */ diff --git a/bsd/netinet/in_arp.c b/bsd/netinet/in_arp.c index d13e22e9a..01d60970a 100644 --- a/bsd/netinet/in_arp.c +++ b/bsd/netinet/in_arp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2017 Apple Inc. All rights reserved. + * Copyright (c) 2004-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -157,7 +157,7 @@ static errno_t arp_lookup_route(const struct in_addr *, int, int, route_t *, unsigned int); static int arp_getstat SYSCTL_HANDLER_ARGS; -static struct llinfo_arp *arp_llinfo_alloc(int); +static struct llinfo_arp *arp_llinfo_alloc(zalloc_flags_t); static void arp_llinfo_free(void *); static uint32_t arp_llinfo_flushq(struct llinfo_arp *); static void arp_llinfo_purge(struct rtentry *); @@ -262,9 +262,8 @@ SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, stats, 0, 0, arp_getstat, "S,arpstat", "ARP statistics (struct arpstat, net/if_arp.h)"); -static struct zone *llinfo_arp_zone; -#define LLINFO_ARP_ZONE_MAX 256 /* maximum elements in zone */ -#define LLINFO_ARP_ZONE_NAME "llinfo_arp" /* name for zone */ +static ZONE_DECLARE(llinfo_arp_zone, "llinfo_arp", + sizeof(struct llinfo_arp), ZC_ZFREE_CLEARMEM); void arp_init(void) @@ -273,28 +272,15 @@ arp_init(void) LIST_INIT(&llinfo_arp); - llinfo_arp_zone = zinit(sizeof(struct llinfo_arp), - LLINFO_ARP_ZONE_MAX * sizeof(struct llinfo_arp), 0, - LLINFO_ARP_ZONE_NAME); - if (llinfo_arp_zone == NULL) { - panic("%s: failed allocating llinfo_arp_zone", __func__); - } - - zone_change(llinfo_arp_zone, Z_EXPAND, TRUE); - zone_change(llinfo_arp_zone, Z_CALLERACCT, FALSE); - arpinit_done = 1; } static struct llinfo_arp * -arp_llinfo_alloc(int how) +arp_llinfo_alloc(zalloc_flags_t how) { - struct llinfo_arp *la; + struct llinfo_arp *la = zalloc_flags(llinfo_arp_zone, how | Z_ZERO); - la = (how == M_WAITOK) ? zalloc(llinfo_arp_zone) : - zalloc_noblock(llinfo_arp_zone); - if (la != NULL) { - bzero(la, sizeof(*la)); + if (la) { /* * The type of queue (Q_DROPHEAD) here is just a hint; * the actual logic that works on this queue performs @@ -303,7 +289,6 @@ arp_llinfo_alloc(int how) _qinit(&la->la_holdq, Q_DROPHEAD, (arp_maxhold == 0) ? (uint32_t)-1 : arp_maxhold, QP_MBUF); } - return la; } @@ -969,7 +954,7 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) RT_LOCK(rt); arpstat.txannounces++; } - /* FALLTHRU */ + OS_FALLTHROUGH; case RTM_RESOLVE: if (gate->sa_family != AF_LINK || gate->sa_len < sizeof(null_sdl)) { @@ -992,11 +977,8 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) * Case 2: This route may come from cloning, or a manual route * add with a LL address. */ - rt->rt_llinfo = la = arp_llinfo_alloc(M_WAITOK); - if (la == NULL) { - arpstat.reqnobufs++; - break; - } + rt->rt_llinfo = la = arp_llinfo_alloc(Z_WAITOK); + rt->rt_llinfo_get_ri = arp_llinfo_get_ri; rt->rt_llinfo_get_iflri = arp_llinfo_get_iflri; rt->rt_llinfo_purge = arp_llinfo_purge; @@ -1027,12 +1009,14 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) rt->rt_ifp)) { struct sockaddr_dl *gate_ll = SDL(gate); size_t broadcast_len; - ifnet_llbroadcast_copy_bytes(rt->rt_ifp, + int ret = ifnet_llbroadcast_copy_bytes(rt->rt_ifp, LLADDR(gate_ll), sizeof(gate_ll->sdl_data), &broadcast_len); - gate_ll->sdl_alen = broadcast_len; - gate_ll->sdl_family = AF_LINK; - gate_ll->sdl_len = sizeof(struct sockaddr_dl); + if (ret == 0 && broadcast_len <= UINT8_MAX) { + gate_ll->sdl_alen = (u_char)broadcast_len; + gate_ll->sdl_family = AF_LINK; + gate_ll->sdl_len = sizeof(struct sockaddr_dl); + } /* In case we're called before 1.0 sec. has elapsed */ rt_setexpire(rt, MAX(timenow, 1)); } else if (IN_LINKLOCAL(ntohl(SIN(rt_key(rt))-> @@ -1311,8 +1295,8 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, result = ifnet_llbroadcast_copy_bytes(ifp, LLADDR(ll_dest), ll_dest_len - offsetof(struct sockaddr_dl, sdl_data), &broadcast_len); - if (result == 0) { - ll_dest->sdl_alen = broadcast_len; + if (result == 0 && broadcast_len <= UINT8_MAX) { + ll_dest->sdl_alen = (u_char)broadcast_len; ll_dest->sdl_family = AF_LINK; ll_dest->sdl_len = sizeof(struct sockaddr_dl); } diff --git a/bsd/netinet/in_cksum.c b/bsd/netinet/in_cksum.c index b4cd509ff..f563c431a 100644 --- a/bsd/netinet/in_cksum.c +++ b/bsd/netinet/in_cksum.c @@ -102,7 +102,7 @@ extern uint32_t os_cpu_in_cksum(const void *, uint32_t, uint32_t); uint16_t b_sum16(const void *buf, int len) { - return os_cpu_in_cksum(buf, len, 0); + return (uint16_t)os_cpu_in_cksum(buf, len, 0); } uint16_t inet_cksum_simple(struct mbuf *, int); @@ -121,7 +121,7 @@ in_addword(uint16_t a, uint16_t b) uint64_t sum = a + b; ADDCARRY(sum); - return sum; + return (uint16_t)sum; } uint16_t @@ -133,7 +133,7 @@ in_pseudo(uint32_t a, uint32_t b, uint32_t c) sum = (uint64_t)a + b + c; REDUCE16; - return sum; + return (uint16_t)sum; } uint16_t @@ -145,7 +145,7 @@ in_pseudo64(uint64_t a, uint64_t b, uint64_t c) sum = a + b + c; REDUCE16; - return sum; + return (uint16_t)sum; } /* diff --git a/bsd/netinet/in_gif.c b/bsd/netinet/in_gif.c index 390bdf4e2..deca2dd1b 100644 --- a/bsd/netinet/in_gif.c +++ b/bsd/netinet/in_gif.c @@ -81,9 +81,7 @@ #include #include -#if INET6 #include -#endif #include @@ -145,7 +143,6 @@ in_gif_output( break; } #endif /* INET */ -#if INET6 case AF_INET6: { struct ip6_hdr *ip6; @@ -160,7 +157,6 @@ in_gif_output( tos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; break; } -#endif /* INET6 */ default: #if DEBUG printf("in_gif_output: warning: unknown family %d passed\n", @@ -293,7 +289,6 @@ in_gif_input(struct mbuf *m, int off) break; } #endif -#if INET6 case IPPROTO_IPV6: { struct ip6_hdr *ip6; @@ -316,7 +311,6 @@ in_gif_input(struct mbuf *m, int off) ip6->ip6_flow |= htonl((u_int32_t)itos << 20); break; } -#endif /* INET6 */ default: OSAddAtomic(1, &ipstat.ips_nogif); m_freem(m); diff --git a/bsd/netinet/in_mcast.c b/bsd/netinet/in_mcast.c index b4eed3bd8..38a042af9 100644 --- a/bsd/netinet/in_mcast.c +++ b/bsd/netinet/in_mcast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2017 Apple Inc. All rights reserved. + * Copyright (c) 2010-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -116,7 +116,7 @@ imf_graft(struct in_mfilter *, const uint8_t, static int imf_prune(struct in_mfilter *, const struct sockaddr_in *); static void imf_rollback(struct in_mfilter *); static void imf_reap(struct in_mfilter *); -static int imo_grow(struct ip_moptions *, size_t); +static int imo_grow(struct ip_moptions *, uint16_t); static size_t imo_match_group(const struct ip_moptions *, const struct ifnet *, const struct sockaddr_in *); static struct in_msource * @@ -153,9 +153,9 @@ static u_long in_mcast_maxgrpsrc = IP_MAX_GROUP_SRC_FILTER; SYSCTL_LONG(_net_inet_ip_mcast, OID_AUTO, maxgrpsrc, CTLFLAG_RW | CTLFLAG_LOCKED, &in_mcast_maxgrpsrc, "Max source filters per group"); -static u_long in_mcast_maxsocksrc = IP_MAX_SOCK_SRC_FILTER; -SYSCTL_LONG(_net_inet_ip_mcast, OID_AUTO, maxsocksrc, - CTLFLAG_RW | CTLFLAG_LOCKED, &in_mcast_maxsocksrc, +static u_int in_mcast_maxsocksrc = IP_MAX_SOCK_SRC_FILTER; +SYSCTL_UINT(_net_inet_ip_mcast, OID_AUTO, maxsocksrc, + CTLFLAG_RW | CTLFLAG_LOCKED, &in_mcast_maxsocksrc, IP_MAX_SOCK_SRC_FILTER, "Max source filters per socket"); int in_mcast_loop = IP_DEFAULT_MULTICAST_LOOP; @@ -192,28 +192,19 @@ struct in_multi_dbg { static TAILQ_HEAD(, in_multi_dbg) inm_trash_head; static decl_lck_mtx_data(, inm_trash_lock); -#define INM_ZONE_MAX 64 /* maximum elements in zone */ -#define INM_ZONE_NAME "in_multi" /* zone name */ #if DEBUG static unsigned int inm_debug = 1; /* debugging (enabled) */ #else static unsigned int inm_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int inm_size; /* size of zone element */ +#define INM_ZONE_NAME "in_multi" /* zone name */ static struct zone *inm_zone; /* zone for in_multi */ -#define IPMS_ZONE_MAX 64 /* maximum elements in zone */ -#define IPMS_ZONE_NAME "ip_msource" /* zone name */ - -static unsigned int ipms_size; /* size of zone element */ -static struct zone *ipms_zone; /* zone for ip_msource */ - -#define INMS_ZONE_MAX 64 /* maximum elements in zone */ -#define INMS_ZONE_NAME "in_msource" /* zone name */ - -static unsigned int inms_size; /* size of zone element */ -static struct zone *inms_zone; /* zone for in_msource */ +static ZONE_DECLARE(ipms_zone, "ip_msource", sizeof(struct ip_msource), + ZC_ZFREE_CLEARMEM); +static ZONE_DECLARE(inms_zone, "in_msource", sizeof(struct in_msource), + ZC_ZFREE_CLEARMEM); /* Lock group and attribute for in_multihead_lock lock */ static lck_attr_t *in_multihead_lock_attr; @@ -223,14 +214,14 @@ static lck_grp_attr_t *in_multihead_lock_grp_attr; static decl_lck_rw_data(, in_multihead_lock); struct in_multihead in_multihead; -static struct in_multi *in_multi_alloc(int); +static struct in_multi *in_multi_alloc(zalloc_flags_t); static void in_multi_free(struct in_multi *); static void in_multi_attach(struct in_multi *); static void inm_trace(struct in_multi *, int); -static struct ip_msource *ipms_alloc(int); +static struct ip_msource *ipms_alloc(zalloc_flags_t); static void ipms_free(struct ip_msource *); -static struct in_msource *inms_alloc(int); +static struct in_msource *inms_alloc(zalloc_flags_t); static void inms_free(struct in_msource *); static __inline int @@ -262,7 +253,7 @@ inm_is_ifp_detached(const struct in_multi *inm) * with an empty source filter list. */ static __inline__ void -imf_init(struct in_mfilter *imf, const int st0, const int st1) +imf_init(struct in_mfilter *imf, const uint8_t st0, const uint8_t st1) { memset(imf, 0, sizeof(struct in_mfilter)); RB_INIT(&imf->imf_sources); @@ -274,14 +265,14 @@ imf_init(struct in_mfilter *imf, const int st0, const int st1) * Resize the ip_moptions vector to the next power-of-two minus 1. */ static int -imo_grow(struct ip_moptions *imo, size_t newmax) +imo_grow(struct ip_moptions *imo, uint16_t newmax) { struct in_multi **nmships; struct in_multi **omships; struct in_mfilter *nmfilters; struct in_mfilter *omfilters; - size_t idx; - size_t oldmax; + uint16_t idx; + uint16_t oldmax; IMO_LOCK_ASSERT_HELD(imo); @@ -629,12 +620,8 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group, * * The initial source filter state is INCLUDE, {} as per the RFC. */ - inm = in_multi_alloc(M_WAITOK); - if (inm == NULL) { - in_multihead_lock_done(); - IFMA_REMREF(ifma); - return ENOMEM; - } + inm = in_multi_alloc(Z_WAITOK); + INM_LOCK(inm); inm->inm_addr = *group; inm->inm_ifp = ifp; @@ -721,10 +708,7 @@ inm_record_source(struct in_multi *inm, const in_addr_t naddr) if (inm->inm_nsrc == in_mcast_maxgrpsrc) { return -ENOSPC; } - nims = ipms_alloc(M_WAITOK); - if (nims == NULL) { - return -ENOMEM; - } + nims = ipms_alloc(Z_WAITOK); nims->ims_haddr = find.ims_haddr; RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims); ++inm->inm_nsrc; @@ -773,10 +757,7 @@ imf_get_source(struct in_mfilter *imf, const struct sockaddr_in *psin, if (imf->imf_nsrc == in_mcast_maxsocksrc) { return ENOSPC; } - lims = inms_alloc(M_WAITOK); - if (lims == NULL) { - return ENOMEM; - } + lims = inms_alloc(Z_WAITOK); lims->ims_haddr = find.ims_haddr; lims->imsl_st[0] = MCAST_UNDEFINED; RB_INSERT(ip_msource_tree, &imf->imf_sources, @@ -805,10 +786,7 @@ imf_graft(struct in_mfilter *imf, const uint8_t st1, { struct in_msource *lims; - lims = inms_alloc(M_WAITOK); - if (lims == NULL) { - return NULL; - } + lims = inms_alloc(Z_WAITOK); lims->ims_haddr = ntohl(psin->sin_addr.s_addr); lims->imsl_st[0] = MCAST_UNDEFINED; lims->imsl_st[1] = st1; @@ -989,10 +967,7 @@ inm_get_source(struct in_multi *inm, const in_addr_t haddr, if (inm->inm_nsrc == in_mcast_maxgrpsrc) { return ENOSPC; } - nims = ipms_alloc(M_WAITOK); - if (nims == NULL) { - return ENOMEM; - } + nims = ipms_alloc(Z_WAITOK); nims->ims_haddr = haddr; RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims); ++inm->inm_nsrc; @@ -1485,7 +1460,7 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) struct in_msource *ims; struct in_multi *inm; size_t idx; - uint16_t fmode; + uint8_t fmode; int error, doblock; unsigned int ifindex = 0; struct igmp_tparams itp; @@ -1703,7 +1678,7 @@ inp_findmoptions(struct inpcb *inp) return imo; } - imo = ip_allocmoptions(M_WAITOK); + imo = ip_allocmoptions(Z_WAITOK); if (imo == NULL) { return NULL; } @@ -1727,7 +1702,7 @@ inp_findmoptions(struct inpcb *inp) imo->imo_multicast_addr.s_addr = INADDR_ANY; imo->imo_multicast_vif = -1; imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; - imo->imo_multicast_loop = in_mcast_loop; + imo->imo_multicast_loop = !!in_mcast_loop; imo->imo_num_memberships = 0; imo->imo_max_memberships = IP_MIN_MEMBERSHIPS; imo->imo_membership = immp; @@ -1761,7 +1736,8 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) struct sockaddr_storage *ptss; struct sockaddr_storage *tss; int error; - size_t idx, nsrcs, ncsrcs; + size_t idx; + uint32_t nsrcs, ncsrcs; user_addr_t tmp_ptr; imo = inp->inp_moptions; @@ -1840,7 +1816,7 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) */ if (IS_64BIT_PROCESS(current_proc())) { - tmp_ptr = msfr64.msfr_srcs; + tmp_ptr = CAST_USER_ADDR_T(msfr64.msfr_srcs); } else { tmp_ptr = CAST_USER_ADDR_T(msfr32.msfr_srcs); } @@ -1883,7 +1859,7 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) IMO_UNLOCK(imo); if (tss != NULL) { - error = copyout(tss, tmp_ptr, ncsrcs * sizeof(*tss)); + error = copyout(tss, CAST_USER_ADDR_T(tmp_ptr), ncsrcs * sizeof(*tss)); FREE(tss, M_TEMP); if (error) { return error; @@ -2812,7 +2788,7 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt) struct in_multi *inm; size_t idx; int error; - user_addr_t tmp_ptr; + uint64_t tmp_ptr; struct igmp_tparams itp; bzero(&itp, sizeof(itp)); @@ -2896,7 +2872,7 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt) * Begin state merge transaction at socket layer. */ - imf->imf_st[1] = msfr.msfr_fmode; + imf->imf_st[1] = (uint8_t)msfr.msfr_fmode; /* * Apply any new source filters, if present. @@ -2924,7 +2900,7 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt) error = ENOMEM; goto out_imo_locked; } - error = copyin(tmp_ptr, kss, + error = copyin(CAST_USER_ADDR_T(tmp_ptr), kss, (size_t) msfr.msfr_nsrcs * sizeof(*kss)); if (error) { FREE(kss, M_TEMP); @@ -2937,7 +2913,7 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt) * will set it to INCLUDE. */ imf_leave(imf); - imf->imf_st[1] = msfr.msfr_fmode; + imf->imf_st[1] = (uint8_t)msfr.msfr_fmode; /* * Update socket layer filters at t1, lazy-allocating @@ -3379,43 +3355,18 @@ in_multi_init(void) in_multihead_lock_attr); TAILQ_INIT(&inm_trash_head); - inm_size = (inm_debug == 0) ? sizeof(struct in_multi) : + vm_size_t inm_size = (inm_debug == 0) ? sizeof(struct in_multi) : sizeof(struct in_multi_dbg); - inm_zone = zinit(inm_size, INM_ZONE_MAX * inm_size, - 0, INM_ZONE_NAME); - if (inm_zone == NULL) { - panic("%s: failed allocating %s", __func__, INM_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(inm_zone, Z_EXPAND, TRUE); - - ipms_size = sizeof(struct ip_msource); - ipms_zone = zinit(ipms_size, IPMS_ZONE_MAX * ipms_size, - 0, IPMS_ZONE_NAME); - if (ipms_zone == NULL) { - panic("%s: failed allocating %s", __func__, IPMS_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(ipms_zone, Z_EXPAND, TRUE); - - inms_size = sizeof(struct in_msource); - inms_zone = zinit(inms_size, INMS_ZONE_MAX * inms_size, - 0, INMS_ZONE_NAME); - if (inms_zone == NULL) { - panic("%s: failed allocating %s", __func__, INMS_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(inms_zone, Z_EXPAND, TRUE); + inm_zone = zone_create(INM_ZONE_NAME, inm_size, ZC_ZFREE_CLEARMEM); } static struct in_multi * -in_multi_alloc(int how) +in_multi_alloc(zalloc_flags_t how) { struct in_multi *inm; - inm = (how == M_WAITOK) ? zalloc(inm_zone) : zalloc_noblock(inm_zone); + inm = zalloc_flags(inm_zone, how | Z_ZERO); if (inm != NULL) { - bzero(inm, inm_size); lck_mtx_init(&inm->inm_lock, in_multihead_lock_grp, in_multihead_lock_attr); inm->inm_debug |= IFD_ALLOC; @@ -3695,16 +3646,9 @@ in_multihead_lock_done(void) } static struct ip_msource * -ipms_alloc(int how) +ipms_alloc(zalloc_flags_t how) { - struct ip_msource *ims; - - ims = (how == M_WAITOK) ? zalloc(ipms_zone) : zalloc_noblock(ipms_zone); - if (ims != NULL) { - bzero(ims, ipms_size); - } - - return ims; + return zalloc_flags(ipms_zone, how | Z_ZERO); } static void @@ -3714,17 +3658,9 @@ ipms_free(struct ip_msource *ims) } static struct in_msource * -inms_alloc(int how) +inms_alloc(zalloc_flags_t how) { - struct in_msource *inms; - - inms = (how == M_WAITOK) ? zalloc(inms_zone) : - zalloc_noblock(inms_zone); - if (inms != NULL) { - bzero(inms, inms_size); - } - - return inms; + return zalloc_flags(inms_zone, how | Z_ZERO); } static void diff --git a/bsd/netinet/in_pcb.c b/bsd/netinet/in_pcb.c index 4f3be1b1d..ef0731a82 100644 --- a/bsd/netinet/in_pcb.c +++ b/bsd/netinet/in_pcb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -101,10 +101,8 @@ #include #include -#if INET6 #include #include -#endif /* INET6 */ #include #include @@ -159,6 +157,8 @@ static void inp_update_necp_want_app_policy(struct inpcb *, boolean_t); #define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8)) #define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1)) +int allow_udp_port_exhaustion = 0; + /* * These configure the range of local port addresses assigned to * "unspecified" outgoing connections/packets/whatever. @@ -179,6 +179,7 @@ sysctl_net_ipport_check SYSCTL_HANDLER_ARGS { #pragma unused(arg1, arg2) int error; + int new_value = *(int *)oidp->oid_arg1; #if (DEBUG | DEVELOPMENT) int old_value = *(int *)oidp->oid_arg1; /* @@ -194,14 +195,14 @@ sysctl_net_ipport_check SYSCTL_HANDLER_ARGS } #endif /* (DEBUG | DEVELOPMENT) */ - error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); + error = sysctl_handle_int(oidp, &new_value, 0, req); if (!error) { - RANGECHK(ipport_lowfirstauto, 1, IPPORT_RESERVED - 1); - RANGECHK(ipport_lowlastauto, 1, IPPORT_RESERVED - 1); - RANGECHK(ipport_firstauto, IPPORT_RESERVED, USHRT_MAX); - RANGECHK(ipport_lastauto, IPPORT_RESERVED, USHRT_MAX); - RANGECHK(ipport_hifirstauto, IPPORT_RESERVED, USHRT_MAX); - RANGECHK(ipport_hilastauto, IPPORT_RESERVED, USHRT_MAX); + if (oidp->oid_arg1 == &ipport_lowfirstauto || oidp->oid_arg1 == &ipport_lowlastauto) { + RANGECHK(new_value, 1, IPPORT_RESERVED - 1); + } else { + RANGECHK(new_value, IPPORT_RESERVED, USHRT_MAX); + } + *(int *)oidp->oid_arg1 = new_value; } #if (DEBUG | DEVELOPMENT) @@ -243,11 +244,13 @@ SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst, SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hilast, CTLFAGS_IP_PORTRANGE, &ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", ""); +SYSCTL_INT(_net_inet_ip_portrange, OID_AUTO, ipport_allow_udp_port_exhaustion, + CTLFLAG_LOCKED | CTLFLAG_RW, &allow_udp_port_exhaustion, 0, ""); static uint32_t apn_fallbk_debug = 0; #define apn_fallbk_log(x) do { if (apn_fallbk_debug >= 1) log x; } while (0) -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX static boolean_t apn_fallbk_enabled = TRUE; SYSCTL_DECL(_net_inet); @@ -256,9 +259,9 @@ SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, enable, CTLFLAG_RW | CTLFLAG_LOCKE &apn_fallbk_enabled, 0, "APN fallback enable"); SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &apn_fallbk_debug, 0, "APN fallback debug enable"); -#else +#else /* XNU_TARGET_OS_OSX */ static boolean_t apn_fallbk_enabled = FALSE; -#endif +#endif /* XNU_TARGET_OS_OSX */ extern int udp_use_randomport; extern int tcp_use_randomport; @@ -586,9 +589,6 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p) #pragma unused(p) struct inpcb *inp; caddr_t temp; -#if CONFIG_MACF_NET - int mac_error; -#endif /* CONFIG_MACF_NET */ if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) { inp = (struct inpcb *)zalloc(pcbinfo->ipi_zone); @@ -606,16 +606,6 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p) inp->inp_gencnt = ++pcbinfo->ipi_gencnt; inp->inp_pcbinfo = pcbinfo; inp->inp_socket = so; -#if CONFIG_MACF_NET - mac_error = mac_inpcb_label_init(inp, M_WAITOK); - if (mac_error != 0) { - if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) { - zfree(pcbinfo->ipi_zone, inp); - } - return mac_error; - } - mac_inpcb_label_associate(so, inp); -#endif /* CONFIG_MACF_NET */ /* make sure inp_stat is always 64-bit aligned */ inp->inp_stat = (struct inp_stat *)P2ROUNDUP(inp->inp_stat_store, sizeof(u_int64_t)); @@ -659,7 +649,6 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p) pcbinfo->ipi_lock_attr); } -#if INET6 if (SOCK_DOM(so) == PF_INET6 && !ip6_mapped_addr_on) { inp->inp_flags |= IN6P_IPV6_V6ONLY; } @@ -667,7 +656,6 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p) if (ip6_auto_flowlabel) { inp->inp_flags |= IN6P_AUTOFLOWLABEL; } -#endif /* INET6 */ if (intcoproc_unrestricted) { inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED; } @@ -856,7 +844,7 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) struct inpcb *t; uid_t u; -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX if (ntohs(lport) < IPPORT_RESERVED && SIN(nam)->sin_addr.s_addr != 0 && !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) { @@ -870,12 +858,12 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) return EACCES; } } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ /* * Check wether the process is allowed to bind to a restricted port */ if (!current_task_can_use_restricted_in_port(lport, - so->so_proto->pr_protocol, PORT_FLAGS_BSD)) { + (uint8_t)so->so_proto->pr_protocol, PORT_FLAGS_BSD)) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); return EADDRINUSE; @@ -918,13 +906,10 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) (!(t->inp_flags2 & INP2_EXTERNAL_PORT) || !(inp->inp_flags2 & INP2_EXTERNAL_PORT) || uuid_compare(t->necp_client_uuid, inp->necp_client_uuid) != 0)) { -#if INET6 if (SIN(nam)->sin_addr.s_addr != INADDR_ANY || t->inp_laddr.s_addr != INADDR_ANY || SOCK_DOM(so) != PF_INET6 || - SOCK_DOM(t->inp_socket) != PF_INET6) -#endif /* INET6 */ - { + SOCK_DOM(t->inp_socket) != PF_INET6) { if ((t->inp_socket->so_flags & SOF_NOTIFYCONFLICT) && !(so->so_flags & SOF_NOTIFYCONFLICT)) { @@ -965,8 +950,8 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) */ anonport = TRUE; if (inp->inp_flags & INP_HIGHPORT) { - first = ipport_hifirstauto; /* sysctl */ - last = ipport_hilastauto; + first = (u_short)ipport_hifirstauto; /* sysctl */ + last = (u_short)ipport_hilastauto; lastport = &pcbinfo->ipi_lasthi; } else if (inp->inp_flags & INP_LOWPORT) { cred = kauth_cred_proc_ref(p); @@ -978,12 +963,12 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) socket_lock(so, 0); return error; } - first = ipport_lowfirstauto; /* 1023 */ - last = ipport_lowlastauto; /* 600 */ + first = (u_short)ipport_lowfirstauto; /* 1023 */ + last = (u_short)ipport_lowlastauto; /* 600 */ lastport = &pcbinfo->ipi_lastlow; } else { - first = ipport_firstauto; /* sysctl */ - last = ipport_lastauto; + first = (u_short)ipport_firstauto; /* sysctl */ + last = (u_short)ipport_lastauto; lastport = &pcbinfo->ipi_lastport; } /* No point in randomizing if only one port is available */ @@ -1897,9 +1882,6 @@ in_pcbdispose(struct inpcb *inp) so->so_saved_pcb = (caddr_t)inp; so->so_pcb = NULL; inp->inp_socket = NULL; -#if CONFIG_MACF_NET - mac_inpcb_label_destroy(inp); -#endif /* CONFIG_MACF_NET */ #if NECP necp_inpcb_dispose(inp); #endif /* NECP */ @@ -2008,11 +1990,9 @@ in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr, lck_rw_lock_shared(pcbinfo->ipi_lock); LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { -#if INET6 if (!(inp->inp_vflag & INP_IPV4)) { continue; } -#endif /* INET6 */ if (inp->inp_faddr.s_addr != faddr.s_addr || inp->inp_socket == NULL) { continue; @@ -2116,7 +2096,7 @@ in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr, { struct inpcb *inp; int matchwild = 3, wildcard; - u_short lport = lport_arg; + u_short lport = (u_short)lport_arg; KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_START, 0, 0, 0, 0, 0); @@ -2129,11 +2109,9 @@ in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr, head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { -#if INET6 if (!(inp->inp_vflag & INP_IPV4)) { continue; } -#endif /* INET6 */ if (inp->inp_faddr.s_addr == INADDR_ANY && inp->inp_laddr.s_addr == laddr.s_addr && inp->inp_lport == lport) { @@ -2172,11 +2150,9 @@ in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr, */ LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) { wildcard = 0; -#if INET6 if (!(inp->inp_vflag & INP_IPV4)) { continue; } -#endif /* INET6 */ if (inp->inp_faddr.s_addr != INADDR_ANY) { wildcard++; } @@ -2217,12 +2193,10 @@ in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr, { struct inpcbhead *head; struct inpcb *inp; - u_short fport = fport_arg, lport = lport_arg; + u_short fport = (u_short)fport_arg, lport = (u_short)lport_arg; int found = 0; struct inpcb *local_wild = NULL; -#if INET6 struct inpcb *local_wild_mapped = NULL; -#endif /* INET6 */ *uid = UID_MAX; *gid = GID_MAX; @@ -2239,11 +2213,9 @@ in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr, head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { -#if INET6 if (!(inp->inp_vflag & INP_IPV4)) { continue; } -#endif /* INET6 */ if (inp_restricted_recv(inp, ifp)) { continue; } @@ -2283,11 +2255,9 @@ in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr, head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { -#if INET6 if (!(inp->inp_vflag & INP_IPV4)) { continue; } -#endif /* INET6 */ if (inp_restricted_recv(inp, ifp)) { continue; } @@ -2310,18 +2280,16 @@ in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr, lck_rw_done(pcbinfo->ipi_lock); return found; } else if (inp->inp_laddr.s_addr == INADDR_ANY) { -#if INET6 if (inp->inp_socket && SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) { local_wild_mapped = inp; - } else -#endif /* INET6 */ - local_wild = inp; + } else { + local_wild = inp; + } } } } if (local_wild == NULL) { -#if INET6 if (local_wild_mapped != NULL) { if ((found = (local_wild_mapped->inp_socket != NULL))) { *uid = kauth_cred_getuid( @@ -2332,7 +2300,6 @@ in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr, lck_rw_done(pcbinfo->ipi_lock); return found; } -#endif /* INET6 */ lck_rw_done(pcbinfo->ipi_lock); return 0; } @@ -2356,11 +2323,9 @@ in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, { struct inpcbhead *head; struct inpcb *inp; - u_short fport = fport_arg, lport = lport_arg; + u_short fport = (u_short)fport_arg, lport = (u_short)lport_arg; struct inpcb *local_wild = NULL; -#if INET6 struct inpcb *local_wild_mapped = NULL; -#endif /* INET6 */ /* * We may have found the pcb in the last lookup - check this first. @@ -2374,11 +2339,9 @@ in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { -#if INET6 if (!(inp->inp_vflag & INP_IPV4)) { continue; } -#endif /* INET6 */ if (inp_restricted_recv(inp, ifp)) { continue; } @@ -2419,11 +2382,9 @@ in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { -#if INET6 if (!(inp->inp_vflag & INP_IPV4)) { continue; } -#endif /* INET6 */ if (inp_restricted_recv(inp, ifp)) { continue; } @@ -2447,17 +2408,15 @@ in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, return NULL; } } else if (inp->inp_laddr.s_addr == INADDR_ANY) { -#if INET6 if (SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) { local_wild_mapped = inp; - } else -#endif /* INET6 */ - local_wild = inp; + } else { + local_wild = inp; + } } } } if (local_wild == NULL) { -#if INET6 if (local_wild_mapped != NULL) { if (in_pcb_checkstate(local_wild_mapped, WNT_ACQUIRE, 0) != WNT_STOPUSING) { @@ -2469,7 +2428,6 @@ in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, return NULL; } } -#endif /* INET6 */ lck_rw_done(pcbinfo->ipi_lock); return NULL; } @@ -2528,12 +2486,11 @@ in_pcbinshash(struct inpcb *inp, int locked) } -#if INET6 if (inp->inp_vflag & INP_IPV6) { hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */; - } else -#endif /* INET6 */ - hashkey_faddr = inp->inp_faddr.s_addr; + } else { + hashkey_faddr = inp->inp_faddr.s_addr; + } inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask); @@ -2601,12 +2558,11 @@ in_pcbrehash(struct inpcb *inp) struct inpcbhead *head; u_int32_t hashkey_faddr; -#if INET6 if (inp->inp_vflag & INP_IPV6) { hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */; - } else -#endif /* INET6 */ - hashkey_faddr = inp->inp_faddr.s_addr; + } else { + hashkey_faddr = inp->inp_faddr.s_addr; + } inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport, inp->inp_fport, inp->inp_pcbinfo->ipi_hashmask); @@ -2828,7 +2784,7 @@ inpcb_to_compat(struct inpcb *inp, struct inpcb_compat *inp_compat) inp_compat->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops; } -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX void inpcb_to_xinpcb64(struct inpcb *inp, struct xinpcb64 *xinp) { @@ -2848,7 +2804,7 @@ inpcb_to_xinpcb64(struct inpcb *inp, struct xinpcb64 *xinp) xinp->inp_depend6.inp6_ifindex = 0; xinp->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ /* * The following routines implement this scheme: @@ -3249,6 +3205,7 @@ inp_reset_fc_state(struct inpcb *inp) int inp_set_fc_state(struct inpcb *inp, int advcode) { + boolean_t is_flow_controlled = INP_WAIT_FOR_IF_FEEDBACK(inp); struct inpcb *tmp_inp = NULL; /* * If there was a feedback from the interface when @@ -3281,6 +3238,10 @@ inp_set_fc_state(struct inpcb *inp, int advcode) inp->inp_socket->so_flags |= SOF_SUSPENDED; break; } + + if (!is_flow_controlled && SOCK_TYPE(inp->inp_socket) == SOCK_STREAM) { + inp_fc_throttle_tcp(inp); + } return 1; } return 0; diff --git a/bsd/netinet/in_pcb.h b/bsd/netinet/in_pcb.h index 2ba76a786..b7b32693a 100644 --- a/bsd/netinet/in_pcb.h +++ b/bsd/netinet/in_pcb.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -122,9 +122,6 @@ struct in_addr_4in6 { * stable. */ struct icmp6_filter; -#if CONFIG_MACF_NET -struct label; -#endif struct ifnet; struct inp_stat { @@ -209,9 +206,6 @@ struct inpcb { } inp_depend6; caddr_t inp_saved_ppcb; /* place to save pointer while cached */ -#if CONFIG_MACF_NET - struct label *inp_label; /* MAC label */ -#endif #if IPSEC struct inpcbpolicy *inp_sp; /* for IPsec */ #endif /* IPSEC */ @@ -376,7 +370,7 @@ struct xinpcb { u_quad_t xi_alignment_hack; }; -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX || !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) struct inpcb64_list_entry { u_int64_t le_next; u_int64_t le_prev; @@ -418,7 +412,7 @@ struct xinpcb64 { struct xsocket64 xi_socket; u_quad_t xi_alignment_hack; }; -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX || !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) */ #ifdef PRIVATE struct xinpcb_list_entry { @@ -685,7 +679,7 @@ struct inpcbinfo { #ifdef BSD_KERNEL_PRIVATE #define IN6P_RFC2292 0x02000000 /* used RFC2292 API on the socket */ -#define IN6P_MTU 0x04000000 /* receive path MTU */ +#define IN6P_MTU 0x04000000 /* receive path MTU for IPv6 */ #define INP_PKTINFO 0x08000000 /* rcv and snd PKTINFO for IPv4 */ #define INP_FLOW_SUSPENDED 0x10000000 /* flow suspended */ #define INP_NO_IFT_CELLULAR 0x20000000 /* do not use cellular interface */ @@ -717,6 +711,7 @@ struct inpcbinfo { #define INP2_CLAT46_FLOW 0x00000200 /* The flow is going to use CLAT46 path */ #define INP2_EXTERNAL_PORT 0x00000400 /* The port is registered externally, for NECP listeners */ #define INP2_NO_IFF_CONSTRAINED 0x00000800 /* do not use constrained interface */ +#define INP2_DONTFRAG 0x00001000 /* mark the DF bit in the IP header to avoid fragmentation */ /* * Flags passed to in_pcblookup*() functions. @@ -734,6 +729,8 @@ extern int ipport_firstauto; extern int ipport_lastauto; extern int ipport_hifirstauto; extern int ipport_hilastauto; +extern int allow_udp_port_exhaustion; +#define UDP_RANDOM_PORT_RESERVE 4096 /* freshly allocated PCB, it's in use */ #define INPCB_STATE_INUSE 0x1 @@ -791,9 +788,9 @@ extern int in_getsockaddr_s(struct socket *, struct sockaddr_in *); extern int in_pcb_checkstate(struct inpcb *, int, int); extern void in_pcbremlists(struct inpcb *); extern void inpcb_to_compat(struct inpcb *, struct inpcb_compat *); -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX extern void inpcb_to_xinpcb64(struct inpcb *, struct xinpcb64 *); -#endif +#endif /* XNU_TARGET_OS_OSX */ extern int get_pcblist_n(short, struct sysctl_req *, struct inpcbinfo *); @@ -826,6 +823,7 @@ extern u_int32_t inp_calc_flowhash(struct inpcb *); extern void inp_reset_fc_state(struct inpcb *); extern int inp_set_fc_state(struct inpcb *, int advcode); extern void inp_fc_unthrottle_tcp(struct inpcb *); +extern void inp_fc_throttle_tcp(struct inpcb *inp); extern void inp_flowadv(uint32_t); extern int inp_flush(struct inpcb *, int); extern int inp_findinpcb_procinfo(struct inpcbinfo *, uint32_t, struct so_procinfo *); @@ -848,5 +846,7 @@ extern void inp_copy_last_owner(struct socket *so, struct socket *head); #ifdef KERNEL_PRIVATE /* exported for PPP */ extern void inp_clear_INP_INADDR_ANY(struct socket *); +extern int inp_limit_companion_link(struct inpcbinfo *pcbinfo, u_int32_t limit); +extern int inp_recover_companion_link(struct inpcbinfo *pcbinfo); #endif /* KERNEL_PRIVATE */ #endif /* !_NETINET_IN_PCB_H_ */ diff --git a/bsd/netinet/in_pcblist.c b/bsd/netinet/in_pcblist.c index 69302e731..b87cf5d04 100644 --- a/bsd/netinet/in_pcblist.c +++ b/bsd/netinet/in_pcblist.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2018 Apple Inc. All rights reserved. + * Copyright (c) 2010-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -155,9 +155,9 @@ sbtoxsockbuf_n(struct sockbuf *sb, struct xsockbuf_n *xsb) xsb->sb_mbcnt = sb->sb_mbcnt; xsb->sb_mbmax = sb->sb_mbmax; xsb->sb_lowat = sb->sb_lowat; - xsb->sb_flags = sb->sb_flags; - xsb->sb_timeo = (short)(sb->sb_timeo.tv_sec * hz) + - sb->sb_timeo.tv_usec / tick; + xsb->sb_flags = (short)sb->sb_flags; + xsb->sb_timeo = (short)((sb->sb_timeo.tv_sec * hz) + + sb->sb_timeo.tv_usec / tick); if (xsb->sb_timeo == 0 && sb->sb_timeo.tv_usec != 0) { xsb->sb_timeo = 1; } @@ -716,34 +716,25 @@ inpcb_find_anypcb_byaddr(struct ifaddr *ifa, struct inpcbinfo *pcbinfo) static int shutdown_sockets_on_interface_proc_callout(proc_t p, void *arg) { - struct filedesc *fdp; - int i; + struct fileproc *fp; struct ifnet *ifp = (struct ifnet *)arg; if (ifp == NULL) { return PROC_RETURNED; } - proc_fdlock(p); - fdp = p->p_fd; - for (i = 0; i < fdp->fd_nfiles; i++) { - struct fileproc *fp = fdp->fd_ofiles[i]; - struct fileglob *fg; + fdt_foreach(fp, p) { + struct fileglob *fg = fp->fp_glob; struct socket *so; struct inpcb *inp; struct ifnet *inp_ifp; int error; - if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { - continue; - } - - fg = fp->f_fglob; if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) { continue; } - so = (struct socket *)fp->f_fglob->fg_data; + so = (struct socket *)fp->fp_glob->fg_data; if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) { continue; } @@ -800,3 +791,60 @@ shutdown_sockets_on_interface(struct ifnet *ifp) shutdown_sockets_on_interface_proc_callout, ifp, NULL, NULL); } + +__private_extern__ int +inp_limit_companion_link(struct inpcbinfo *pcbinfo, u_int32_t limit) +{ + struct inpcb *inp; + struct socket *so = NULL; + + lck_rw_lock_shared(pcbinfo->ipi_lock); + inp_gen_t gencnt = pcbinfo->ipi_gencnt; + for (inp = LIST_FIRST(pcbinfo->ipi_listhead); + inp != NULL; inp = LIST_NEXT(inp, inp_list)) { + if (inp->inp_gencnt <= gencnt && + inp->inp_state != INPCB_STATE_DEAD && + inp->inp_socket != NULL) { + so = inp->inp_socket; + + if ((so->so_state & SS_DEFUNCT) || so->so_state & SS_ISDISCONNECTED || + SOCK_PROTO(so) != IPPROTO_TCP || inp->inp_last_outifp == NULL || + !IFNET_IS_COMPANION_LINK(inp->inp_last_outifp)) { + continue; + } + so->so_snd.sb_flags &= ~SB_LIMITED; + u_int32_t new_size = MAX(MIN(limit, so->so_snd.sb_lowat), so->so_snd.sb_cc); + sbreserve(&so->so_snd, new_size); + so->so_snd.sb_flags |= SB_LIMITED; + } + } + lck_rw_done(pcbinfo->ipi_lock); + return 0; +} + +__private_extern__ int +inp_recover_companion_link(struct inpcbinfo *pcbinfo) +{ + struct inpcb *inp; + inp_gen_t gencnt = pcbinfo->ipi_gencnt; + struct socket *so = NULL; + + lck_rw_lock_shared(pcbinfo->ipi_lock); + for (inp = LIST_FIRST(pcbinfo->ipi_listhead); + inp != NULL; inp = LIST_NEXT(inp, inp_list)) { + if (inp->inp_gencnt <= gencnt && + inp->inp_state != INPCB_STATE_DEAD && + inp->inp_socket != NULL) { + so = inp->inp_socket; + + if (SOCK_PROTO(so) != IPPROTO_TCP || inp->inp_last_outifp == NULL || + !(so->so_snd.sb_flags & SB_LIMITED)) { + continue; + } + + so->so_snd.sb_flags &= ~SB_LIMITED; + } + } + lck_rw_done(pcbinfo->ipi_lock); + return 0; +} diff --git a/bsd/netinet/in_proto.c b/bsd/netinet/in_proto.c index 340f31f65..46d340359 100644 --- a/bsd/netinet/in_proto.c +++ b/bsd/netinet/in_proto.c @@ -89,8 +89,6 @@ #include #include #include -#include - /* * TCP/IP protocol family: IP, ICMP, UDP, TCP. @@ -247,7 +245,6 @@ static struct protosw inetsw[] = { .pr_update_last_owner = inp_update_last_owner, .pr_copy_last_owner = inp_copy_last_owner, }, -#if INET6 { .pr_type = SOCK_RAW, .pr_protocol = IPPROTO_IPV6, @@ -260,21 +257,6 @@ static struct protosw inetsw[] = { .pr_update_last_owner = inp_update_last_owner, .pr_copy_last_owner = inp_copy_last_owner, }, -#endif /* INET6 */ -#if IPDIVERT - { - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_DIVERT, - .pr_flags = PR_ATOMIC | PR_ADDR | PR_PCBLOCK, - .pr_input = div_input, - .pr_ctloutput = ip_ctloutput, - .pr_init = div_init, - .pr_usrreqs = &div_usrreqs, - .pr_lock = div_lock, - .pr_unlock = div_unlock, - .pr_getlock = div_getlock, - }, -#endif /* IPDIVERT */ /* raw wildcard */ { .pr_type = SOCK_RAW, @@ -379,7 +361,3 @@ SYSCTL_NODE(_net_inet, IPPROTO_AH, ipsec, #endif /* IPSEC */ SYSCTL_NODE(_net_inet, IPPROTO_RAW, raw, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "RAW"); -#if IPDIVERT -SYSCTL_NODE(_net_inet, IPPROTO_DIVERT, div, - CTLFLAG_RW | CTLFLAG_LOCKED, 0, "DIVERT"); -#endif /* IPDIVERT */ diff --git a/bsd/netinet/in_systm.h b/bsd/netinet/in_systm.h index c191fed8d..800ec7cfb 100644 --- a/bsd/netinet/in_systm.h +++ b/bsd/netinet/in_systm.h @@ -87,6 +87,8 @@ typedef __uint32_t n_time; /* ms since 00:00 GMT, byte rev */ #ifdef BSD_KERNEL_PRIVATE #define ABS(v) (((v) > 0) ? (v) : -(v)) +#ifndef DRIVERKIT u_int32_t iptime(void); +#endif /* DRIVERKIT */ #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET_IN_SYSTM_H_ */ diff --git a/bsd/netinet/in_tclass.c b/bsd/netinet/in_tclass.c index 2971f9191..d14323a4c 100644 --- a/bsd/netinet/in_tclass.c +++ b/bsd/netinet/in_tclass.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2019 Apple Inc. All rights reserved. + * Copyright (c) 2009-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -58,7 +58,6 @@ #include #include #include -#include #include struct net_qos_dscp_map { @@ -349,7 +348,7 @@ set_tclass_for_curr_proc(struct socket *so) strncmp(pname, tfp->tfp_pname, sizeof(tfp->tfp_pname)) == 0)) { if (tfp->tfp_class != SO_TC_UNSPEC) { - so->so_traffic_class = tfp->tfp_class; + so->so_traffic_class = (uint16_t)tfp->tfp_class; } if (tfp->tfp_qos_mode == QOS_MODE_MARKING_POLICY_ENABLE) { @@ -473,10 +472,7 @@ set_pid_tclass(struct so_tcdbg *so_tcdbg) { int error = EINVAL; proc_t p = NULL; - struct filedesc *fdp; - struct fileproc *fp; struct tclass_for_proc *tfp; - int i; pid_t pid = so_tcdbg->so_tcdbg_pid; int tclass = so_tcdbg->so_tcdbg_tclass; int netsvctype = so_tcdbg->so_tcdbg_netsvctype; @@ -505,20 +501,16 @@ set_pid_tclass(struct so_tcdbg *so_tcdbg) lck_mtx_unlock(tclass_lock); if (tfp != NULL) { - proc_fdlock(p); + struct fileproc *fp; - fdp = p->p_fd; - for (i = 0; i < fdp->fd_nfiles; i++) { + fdt_foreach(fp, p) { struct socket *so; - fp = fdp->fd_ofiles[i]; - if (fp == NULL || - (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 || - FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) { + if (FILEGLOB_DTYPE(fp->fp_glob) != DTYPE_SOCKET) { continue; } - so = (struct socket *)fp->f_fglob->fg_data; + so = (struct socket *)fp->fp_glob->fg_data; if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) { continue; } @@ -586,50 +578,40 @@ flush_pid_tclass(struct so_tcdbg *so_tcdbg) { pid_t pid = so_tcdbg->so_tcdbg_pid; int tclass = so_tcdbg->so_tcdbg_tclass; - struct filedesc *fdp; - int error = EINVAL; + struct fileproc *fp; proc_t p; - int i; + int error; p = proc_find(pid); if (p == PROC_NULL) { printf("%s proc_find(%d) failed\n", __func__, pid); - goto done; + return EINVAL; } proc_fdlock(p); - fdp = p->p_fd; - for (i = 0; i < fdp->fd_nfiles; i++) { + + fdt_foreach(fp, p) { struct socket *so; - struct fileproc *fp; - fp = fdp->fd_ofiles[i]; - if (fp == NULL || - (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 || - FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) { + if (FILEGLOB_DTYPE(fp->fp_glob) != DTYPE_SOCKET) { continue; } - so = (struct socket *)fp->f_fglob->fg_data; + so = (struct socket *)fp->fp_glob->fg_data; error = sock_setsockopt(so, SOL_SOCKET, SO_FLUSH, &tclass, sizeof(tclass)); if (error != 0) { printf("%s: setsockopt(SO_FLUSH) (so=0x%llx, fd=%d, " "tclass=%d) failed %d\n", __func__, - (uint64_t)VM_KERNEL_ADDRPERM(so), i, tclass, + (uint64_t)VM_KERNEL_ADDRPERM(so), fdt_foreach_fd(), tclass, error); - error = 0; } } - proc_fdunlock(p); - error = 0; -done: - if (p != PROC_NULL) { - proc_rele(p); - } + proc_fdunlock(p); - return error; + proc_rele(p); + return 0; } int @@ -934,7 +916,7 @@ so_set_traffic_class(struct socket *so, int optval) int oldval = so->so_traffic_class; VERIFY(SO_VALID_TC(optval)); - so->so_traffic_class = optval; + so->so_traffic_class = (uint16_t)optval; if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) && @@ -980,7 +962,7 @@ so_set_net_service_type(struct socket *so, int netsvctype) if (error != 0) { return error; } - so->so_netsvctype = netsvctype; + so->so_netsvctype = (int8_t)netsvctype; so->so_flags1 |= SOF1_TC_NET_SERV_TYPE; return 0; @@ -1051,7 +1033,7 @@ so_tc_from_control(struct mbuf *control, int *out_netsvctype) * passed using SO_TRAFFIC_CLASS */ val = val - SO_TC_NET_SERVICE_OFFSET; - /* FALLTHROUGH */ + OS_FALLTHROUGH; case SO_NET_SERVICE_TYPE: if (!IS_VALID_NET_SERVICE_TYPE(val)) { break; @@ -1123,7 +1105,7 @@ so_inc_recv_data_stat(struct socket *so, size_t pkts, size_t bytes, static inline int so_throttle_best_effort(struct socket *so, struct ifnet *ifp) { - uint32_t uptime = net_uptime(); + uint32_t uptime = (uint32_t)net_uptime(); return soissrcbesteffort(so) && net_io_policy_throttle_best_effort == 1 && ifp->if_rt_sendts > 0 && @@ -1152,7 +1134,7 @@ set_tcp_stream_priority(struct socket *so) } outifp = inp->inp_last_outifp; - uptime = net_uptime(); + uptime = (uint32_t)net_uptime(); /* * If the socket was marked as a background socket or if the @@ -1448,34 +1430,6 @@ so_svc2tc(mbuf_svc_class_t svc) } } -/* - * LRO is turned on for AV streaming class. - */ -void -so_set_lro(struct socket *so, int optval) -{ - if (optval == SO_TC_AV) { - so->so_flags |= SOF_USELRO; - } else { - if (so->so_flags & SOF_USELRO) { - /* transition to non LRO class */ - so->so_flags &= ~SOF_USELRO; - struct inpcb *inp = sotoinpcb(so); - struct tcpcb *tp = NULL; - if (inp) { - tp = intotcpcb(inp); - if (tp && (tp->t_flagsext & TF_LRO_OFFLOADED)) { - tcp_lro_remove_state(inp->inp_laddr, - inp->inp_faddr, - inp->inp_lport, - inp->inp_fport); - tp->t_flagsext &= ~TF_LRO_OFFLOADED; - } - } - } - } -} - static size_t sotc_index(int sotc) { @@ -1711,8 +1665,12 @@ set_netsvctype_dscp_map(struct net_qos_dscp_map *net_qos_dscp_map, ASSERT(0); } } - /* Network control socket traffic class is always best effort */ - net_qos_dscp_map->sotc_to_dscp[SOTCIX_CTL] = _DSCP_DF; + if (net_qos_dscp_map == &fastlane_net_qos_dscp_map) { + /* Network control socket traffic class is always best effort for fastlane*/ + net_qos_dscp_map->sotc_to_dscp[SOTCIX_CTL] = _DSCP_DF; + } else { + net_qos_dscp_map->sotc_to_dscp[SOTCIX_CTL] = _DSCP_CS6; + } /* Backround socket traffic class DSCP same as backround system */ net_qos_dscp_map->sotc_to_dscp[SOTCIX_BK] = @@ -1721,35 +1679,20 @@ set_netsvctype_dscp_map(struct net_qos_dscp_map *net_qos_dscp_map, return 0; } -/* - * out_count is an input/ouput parameter - */ -static errno_t -get_netsvctype_dscp_map(size_t *out_count, - struct netsvctype_dscp_map *netsvctype_dscp_map) +static size_t +get_netsvctype_dscp_map(struct netsvctype_dscp_map *netsvctype_dscp_map) { - size_t i; - struct net_qos_dscp_map *net_qos_dscp_map = NULL; - - /* - * Do not accept more that max number of distinct DSCPs - */ - if (out_count == NULL || netsvctype_dscp_map == NULL) { - return EINVAL; - } - if (*out_count > _MAX_DSCP) { - return EINVAL; - } + struct net_qos_dscp_map *net_qos_dscp_map; + int i; net_qos_dscp_map = &fastlane_net_qos_dscp_map; - for (i = 0; i < MIN(_NET_SERVICE_TYPE_COUNT, *out_count); i++) { + for (i = 0; i < _NET_SERVICE_TYPE_COUNT; i++) { netsvctype_dscp_map[i].netsvctype = i; netsvctype_dscp_map[i].dscp = net_qos_dscp_map->netsvctype_to_dscp[i]; } - *out_count = i; - return 0; + return i * sizeof(struct netsvctype_dscp_map); } void @@ -1773,20 +1716,16 @@ sysctl_default_netsvctype_to_dscp_map SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) int error = 0; - size_t len; - struct netsvctype_dscp_map netsvctype_dscp_map[_NET_SERVICE_TYPE_COUNT] = {}; - size_t count; if (req->oldptr == USER_ADDR_NULL) { req->oldidx = _NET_SERVICE_TYPE_COUNT * sizeof(struct netsvctype_dscp_map); } else if (req->oldlen > 0) { - count = _NET_SERVICE_TYPE_COUNT; - error = get_netsvctype_dscp_map(&count, netsvctype_dscp_map); - if (error != 0) { - goto done; - } - len = count * sizeof(struct netsvctype_dscp_map); + struct netsvctype_dscp_map netsvctype_dscp_map[_NET_SERVICE_TYPE_COUNT] = {}; + size_t len; + + len = get_netsvctype_dscp_map(netsvctype_dscp_map); + error = SYSCTL_OUT(req, netsvctype_dscp_map, MIN(len, req->oldlen)); if (error != 0) { @@ -1981,11 +1920,12 @@ sysctl_dscp_to_wifi_ac_map SYSCTL_HANDLER_ARGS struct netsvctype_dscp_map netsvctype_dscp_map[DSCP_ARRAY_SIZE] = {}; struct dcsp_msc_map dcsp_msc_map[DSCP_ARRAY_SIZE]; size_t count; - uint32_t i; if (req->oldptr == USER_ADDR_NULL) { req->oldidx = len; } else if (req->oldlen > 0) { + uint8_t i; + for (i = 0; i < DSCP_ARRAY_SIZE; i++) { netsvctype_dscp_map[i].dscp = i; netsvctype_dscp_map[i].netsvctype = diff --git a/bsd/netinet/ip.h b/bsd/netinet/ip.h index 823c337db..ab0d320c4 100644 --- a/bsd/netinet/ip.h +++ b/bsd/netinet/ip.h @@ -63,12 +63,21 @@ #ifndef _NETINET_IP_H_ #define _NETINET_IP_H_ +#ifndef DRIVERKIT #include #include /* XXX temporary hack to get u_ types */ +#else +#include +#include +#include +#include + +#include +#endif /* DRIVERKIT */ + #include #include - /* * Definitions for internet protocol version 4. * Per RFC 791, September 1981. @@ -106,7 +115,7 @@ struct ip { }; #ifdef _IP_VHL -#define IP_MAKE_VHL(v, hl) ((v) << 4 | (hl)) +#define IP_MAKE_VHL(v, hl) ((uint8_t)((v) << 4 | (hl))) #define IP_VHL_HL(vhl) ((vhl) & 0x0f) #define IP_VHL_V(vhl) ((vhl) >> 4) #define IP_VHL_BORING 0x45 diff --git a/bsd/netinet/ip6.h b/bsd/netinet/ip6.h index 3bee96451..7024165fb 100644 --- a/bsd/netinet/ip6.h +++ b/bsd/netinet/ip6.h @@ -93,7 +93,13 @@ #ifndef _NETINET_IP6_H_ #define _NETINET_IP6_H_ +#ifndef DRIVERKIT #include +#else +#include +#include +#include +#endif /* DRIVERKIT */ /* * Definition for internet protocol version 6. diff --git a/bsd/netinet/ip_compat.h b/bsd/netinet/ip_compat.h index d8de311b8..8ca7444b8 100644 --- a/bsd/netinet/ip_compat.h +++ b/bsd/netinet/ip_compat.h @@ -34,7 +34,9 @@ * * @(#)ip_compat.h 1.8 1/14/96 */ +#ifndef DRIVERKIT #include +#endif /* DRIVERKIT */ #if 0 diff --git a/bsd/netinet/ip_divert.c b/bsd/netinet/ip_divert.c deleted file mode 100644 index aef9c2c11..000000000 --- a/bsd/netinet/ip_divert.c +++ /dev/null @@ -1,829 +0,0 @@ -/* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1982, 1986, 1988, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD: src/sys/netinet/ip_divert.c,v 1.98 2004/08/17 22:05:54 andre Exp $ - */ - -#if !INET -#error "IPDIVERT requires INET." -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -/* - * Divert sockets - */ - -/* - * Allocate enough space to hold a full IP packet - */ -#define DIVSNDQ (65536 + 100) -#define DIVRCVQ (65536 + 100) - -/* - * Divert sockets work in conjunction with ipfw, see the divert(4) - * manpage for features. - * Internally, packets selected by ipfw in ip_input() or ip_output(), - * and never diverted before, are passed to the input queue of the - * divert socket with a given 'divert_port' number (as specified in - * the matching ipfw rule), and they are tagged with a 16 bit cookie - * (representing the rule number of the matching ipfw rule), which - * is passed to process reading from the socket. - * - * Packets written to the divert socket are again tagged with a cookie - * (usually the same as above) and a destination address. - * If the destination address is INADDR_ANY then the packet is - * treated as outgoing and sent to ip_output(), otherwise it is - * treated as incoming and sent to ip_input(). - * In both cases, the packet is tagged with the cookie. - * - * On reinjection, processing in ip_input() and ip_output() - * will be exactly the same as for the original packet, except that - * ipfw processing will start at the rule number after the one - * written in the cookie (so, tagging a packet with a cookie of 0 - * will cause it to be effectively considered as a standard packet). - */ - -/* Internal variables */ -static struct inpcbhead divcb; -static struct inpcbinfo divcbinfo; - -static u_int32_t div_sendspace = DIVSNDQ; /* XXX sysctl ? */ -static u_int32_t div_recvspace = DIVRCVQ; /* XXX sysctl ? */ - -/* Optimization: have this preinitialized */ -static struct sockaddr_in divsrc = { - .sin_len = sizeof(divsrc), - .sin_family = AF_INET, - .sin_port = 0, - .sin_addr = { .s_addr = 0 }, - .sin_zero = { 0, 0, 0, 0, 0, 0, 0, 0 } -}; - -/* Internal functions */ -static int div_output(struct socket *so, - struct mbuf *m, struct sockaddr_in *addr, struct mbuf *control); - -extern int load_ipfw(void); -/* - * Initialize divert connection block queue. - */ -void -div_init(struct protosw *pp, struct domain *dp) -{ -#pragma unused(dp) - static int div_initialized = 0; - struct inpcbinfo *pcbinfo; - - VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); - - if (div_initialized) { - return; - } - div_initialized = 1; - - LIST_INIT(&divcb); - divcbinfo.ipi_listhead = &divcb; - /* - * XXX We don't use the hash list for divert IP, but it's easier - * to allocate a one entry hash list than it is to check all - * over the place for ipi_hashbase == NULL. - */ - divcbinfo.ipi_hashbase = hashinit(1, M_PCB, &divcbinfo.ipi_hashmask); - divcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &divcbinfo.ipi_porthashmask); - divcbinfo.ipi_zone = zinit(sizeof(struct inpcb), (512 * sizeof(struct inpcb)), - 4096, "divzone"); - pcbinfo = &divcbinfo; - /* - * allocate lock group attribute and group for udp pcb mutexes - */ - pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init(); - - pcbinfo->ipi_lock_grp = lck_grp_alloc_init("divcb", pcbinfo->ipi_lock_grp_attr); - - /* - * allocate the lock attribute for divert pcb mutexes - */ - pcbinfo->ipi_lock_attr = lck_attr_alloc_init(); - - if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp, - pcbinfo->ipi_lock_attr)) == NULL) { - panic("%s: unable to allocate PCB lock\n", __func__); - /* NOTREACHED */ - } - - in_pcbinfo_attach(&divcbinfo); - -#if IPFIREWALL - if (!IPFW_LOADED) { - load_ipfw(); - } -#endif -} - -/* - * IPPROTO_DIVERT is not a real IP protocol; don't allow any packets - * with that protocol number to enter the system from the outside. - */ -void -div_input(struct mbuf *m, __unused int off) -{ - OSAddAtomic(1, &ipstat.ips_noproto); - m_freem(m); -} - -/* - * Divert a packet by passing it up to the divert socket at port 'port'. - * - * Setup generic address and protocol structures for div_input routine, - * then pass them along with mbuf chain. - */ -void -divert_packet(struct mbuf *m, int incoming, int port, int rule) -{ - struct ip *ip; - struct inpcb *inp; - struct socket *sa; - u_int16_t nport; - - /* Sanity check */ - KASSERT(port != 0, ("%s: port=0", __FUNCTION__)); - - divsrc.sin_port = rule; /* record matching rule */ - - /* Assure header */ - if (m->m_len < sizeof(struct ip) && - (m = m_pullup(m, sizeof(struct ip))) == 0) { - return; - } - ip = mtod(m, struct ip *); - - /* - * Record receive interface address, if any. - * But only for incoming packets. - */ - divsrc.sin_addr.s_addr = 0; - if (incoming) { - struct ifaddr *ifa; - - /* Sanity check */ - KASSERT((m->m_flags & M_PKTHDR), ("%s: !PKTHDR", __FUNCTION__)); - - /* Find IP address for receive interface */ - ifnet_lock_shared(m->m_pkthdr.rcvif); - TAILQ_FOREACH(ifa, &m->m_pkthdr.rcvif->if_addrhead, ifa_link) { - IFA_LOCK(ifa); - if (ifa->ifa_addr->sa_family != AF_INET) { - IFA_UNLOCK(ifa); - continue; - } - divsrc.sin_addr = - ((struct sockaddr_in *)(void *) ifa->ifa_addr)->sin_addr; - IFA_UNLOCK(ifa); - break; - } - ifnet_lock_done(m->m_pkthdr.rcvif); - } - /* - * Record the incoming interface name whenever we have one. - */ - bzero(&divsrc.sin_zero, sizeof(divsrc.sin_zero)); - if (m->m_pkthdr.rcvif) { - /* - * Hide the actual interface name in there in the - * sin_zero array. XXX This needs to be moved to a - * different sockaddr type for divert, e.g. - * sockaddr_div with multiple fields like - * sockaddr_dl. Presently we have only 7 bytes - * but that will do for now as most interfaces - * are 4 or less + 2 or less bytes for unit. - * There is probably a faster way of doing this, - * possibly taking it from the sockaddr_dl on the iface. - * This solves the problem of a P2P link and a LAN interface - * having the same address, which can result in the wrong - * interface being assigned to the packet when fed back - * into the divert socket. Theoretically if the daemon saves - * and re-uses the sockaddr_in as suggested in the man pages, - * this iface name will come along for the ride. - * (see div_output for the other half of this.) - */ - snprintf(divsrc.sin_zero, sizeof(divsrc.sin_zero), - "%s", if_name(m->m_pkthdr.rcvif)); - } - - /* Put packet on socket queue, if any */ - sa = NULL; - nport = htons((u_int16_t)port); - lck_rw_lock_shared(divcbinfo.ipi_lock); - LIST_FOREACH(inp, &divcb, inp_list) { - if (inp->inp_lport == nport) { - sa = inp->inp_socket; - } - } - if (sa) { - int error = 0; - - socket_lock(sa, 1); - if (sbappendaddr(&sa->so_rcv, (struct sockaddr *)&divsrc, - m, (struct mbuf *)0, &error) != 0) { - sorwakeup(sa); - } - socket_unlock(sa, 1); - } else { - m_freem(m); - OSAddAtomic(1, &ipstat.ips_noproto); - OSAddAtomic(-1, &ipstat.ips_delivered); - } - lck_rw_done(divcbinfo.ipi_lock); -} - -/* - * Deliver packet back into the IP processing machinery. - * - * If no address specified, or address is 0.0.0.0, send to ip_output(); - * otherwise, send to ip_input() and mark as having been received on - * the interface with that address. - * ###LOCK called in inet_proto mutex when from div_send. - */ -static int -div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin, - struct mbuf *control) -{ - struct inpcb *const inp = sotoinpcb(so); - struct ip *const ip = mtod(m, struct ip *); - int error = 0; - int sotc = SO_TC_UNSPEC; - - if (control != NULL) { - int ignored; - - (void) so_tc_from_control(contro, &sotc, &ignored); - - m_freem(control); /* XXX */ - control = NULL; - } - if (sotc == SO_TC_UNSPEC) { - sotc = so->so_traffic_class; - } - - /* Loopback avoidance and state recovery */ - if (sin) { - struct m_tag *mtag; - struct divert_tag *dt; - int len = 0; - char *c = sin->sin_zero; - - mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DIVERT, - sizeof(struct divert_tag), M_NOWAIT, m); - if (mtag == NULL) { - error = ENOBUFS; - goto cantsend; - } - dt = (struct divert_tag *)(mtag + 1); - dt->info = 0; - dt->cookie = sin->sin_port; - m_tag_prepend(m, mtag); - - /* - * Find receive interface with the given name or IP address. - * The name is user supplied data so don't trust it's size or - * that it is zero terminated. The name has priority. - * We are presently assuming that the sockaddr_in - * has not been replaced by a sockaddr_div, so we limit it - * to 16 bytes in total. the name is stuffed (if it exists) - * in the sin_zero[] field. - */ - while (*c++ && (len++ < sizeof(sin->sin_zero))) { - ; - } - if ((len > 0) && (len < sizeof(sin->sin_zero))) { - m->m_pkthdr.rcvif = ifunit(sin->sin_zero); - } - } - - /* Reinject packet into the system as incoming or outgoing */ - if (!sin || sin->sin_addr.s_addr == 0) { - struct ip_out_args ipoa; - struct route ro; - struct ip_moptions *imo; - - bzero(&ipoa, sizeof(ipoa)); - ipoa.ipoa_boundif = IFSCOPE_NONE; - ipoa.ipoa_flags = IPOAF_SELECT_SRCIF; - ipoa.ipoa_sotc = SO_TC_UNSPEC; - ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC; - - /* - * Don't allow both user specified and setsockopt options, - * and don't allow packet length sizes that will crash - */ - if (((ip->ip_hl != (sizeof(*ip) >> 2)) && inp->inp_options) || - ((u_short)ntohs(ip->ip_len) > m->m_pkthdr.len)) { - error = EINVAL; - goto cantsend; - } - - /* Convert fields to host order for ip_output() */ -#if BYTE_ORDER != BIG_ENDIAN - NTOHS(ip->ip_len); - NTOHS(ip->ip_off); -#endif - - OSAddAtomic(1, &ipstat.ips_rawout); - /* Copy the cached route and take an extra reference */ - inp_route_copyout(inp, &ro); - - if (sotc != SO_TC_UNSPEC) { - ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; - ipoa.ipoa_sotc = sotc; - } - set_packet_service_class(m, so, sotc, 0); - - imo = inp->inp_moptions; - if (imo != NULL) { - IMO_ADDREF(imo); - } - socket_unlock(so, 0); -#if CONFIG_MACF_NET - mac_mbuf_label_associate_inpcb(inp, m); -#endif - /* Send packet to output processing */ - error = ip_output(m, inp->inp_options, &ro, - (so->so_options & SO_DONTROUTE) | - IP_ALLOWBROADCAST | IP_RAWOUTPUT | IP_OUTARGS, - imo, &ipoa); - - socket_lock(so, 0); - if (imo != NULL) { - IMO_REMREF(imo); - } - /* Synchronize cached PCB route */ - inp_route_copyin(inp, &ro); - } else { - struct ifaddr *ifa; - - /* If no luck with the name above. check by IP address. */ - if (m->m_pkthdr.rcvif == NULL) { - struct sockaddr_in _sin; - /* - * Make sure there are no distractions for - * ifa_ifwithaddr; use sanitized version. - */ - bzero(&_sin, sizeof(_sin)); - _sin.sin_family = AF_INET; - _sin.sin_len = sizeof(struct sockaddr_in); - _sin.sin_addr.s_addr = sin->sin_addr.s_addr; - if (!(ifa = ifa_ifwithaddr(SA(&_sin)))) { - error = EADDRNOTAVAIL; - goto cantsend; - } - m->m_pkthdr.rcvif = ifa->ifa_ifp; - IFA_REMREF(ifa); - } -#if CONFIG_MACF_NET - mac_mbuf_label_associate_socket(so, m); -#endif - /* Send packet to input processing */ - proto_inject(PF_INET, m); - } - - return error; - -cantsend: - m_freem(m); - return error; -} - -static int -div_attach(struct socket *so, int proto, struct proc *p) -{ - struct inpcb *inp; - int error; - - - inp = sotoinpcb(so); - if (inp) { - panic("div_attach"); - } - if ((error = proc_suser(p)) != 0) { - return error; - } - - error = soreserve(so, div_sendspace, div_recvspace); - if (error) { - return error; - } - error = in_pcballoc(so, &divcbinfo, p); - if (error) { - return error; - } - inp = (struct inpcb *)so->so_pcb; - inp->inp_ip_p = proto; - inp->inp_vflag |= INP_IPV4; - inp->inp_flags |= INP_HDRINCL; - /* The socket is always "connected" because - * we always know "where" to send the packet */ - so->so_state |= SS_ISCONNECTED; - -#ifdef MORE_DICVLOCK_DEBUG - printf("div_attach: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x\n", - (uint64_t)VM_KERNEL_ADDRPERM(so), - (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb), - (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)), - so->so_usecount); -#endif - return 0; -} - -static int -div_detach(struct socket *so) -{ - struct inpcb *inp; - -#ifdef MORE_DICVLOCK_DEBUG - printf("div_detach: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x\n", - (uint64_t)VM_KERNEL_ADDRPERM(so), - (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb), - (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)), - so->so_usecount); -#endif - inp = sotoinpcb(so); - if (inp == 0) { - panic("div_detach: so=%p null inp\n", so); - } - in_pcbdetach(inp); - inp->inp_state = INPCB_STATE_DEAD; - return 0; -} - -static int -div_abort(struct socket *so) -{ - soisdisconnected(so); - return div_detach(so); -} - -static int -div_disconnect(struct socket *so) -{ - if ((so->so_state & SS_ISCONNECTED) == 0) { - return ENOTCONN; - } - return div_abort(so); -} - -static int -div_bind(struct socket *so, struct sockaddr *nam, struct proc *p) -{ - struct inpcb *inp; - int error; - - inp = sotoinpcb(so); - /* in_pcbbind assumes that the socket is a sockaddr_in - * and in_pcbbind requires a valid address. Since divert - * sockets don't we need to make sure the address is - * filled in properly. - * XXX -- divert should not be abusing in_pcbind - * and should probably have its own family. - */ - if (nam->sa_family != AF_INET) { - error = EAFNOSUPPORT; - } else { - ((struct sockaddr_in *)(void *)nam)->sin_addr.s_addr = INADDR_ANY; - error = in_pcbbind(inp, nam, p); - } - return error; -} - -static int -div_shutdown(struct socket *so) -{ - socantsendmore(so); - return 0; -} - -static int -div_send(struct socket *so, __unused int flags, struct mbuf *m, struct sockaddr *nam, - struct mbuf *control, __unused struct proc *p) -{ - /* Packet must have a header (but that's about it) */ - if (m->m_len < sizeof(struct ip) && - (m = m_pullup(m, sizeof(struct ip))) == 0) { - OSAddAtomic(1, &ipstat.ips_toosmall); - m_freem(m); - return EINVAL; - } - - /* Send packet */ - return div_output(so, m, SIN(nam), control); -} - -#if 0 -static int -div_pcblist SYSCTL_HANDLER_ARGS -{ -#pragma unused(oidp, arg1, arg2) - int error, i, n; - struct inpcb *inp, **inp_list; - inp_gen_t gencnt; - struct xinpgen xig; - - /* - * The process of preparing the TCB list is too time-consuming and - * resource-intensive to repeat twice on every request. - */ - lck_rw_lock_exclusive(divcbinfo.ipi_lock); - if (req->oldptr == USER_ADDR_NULL) { - n = divcbinfo.ipi_count; - req->oldidx = 2 * (sizeof xig) - + (n + n / 8) * sizeof(struct xinpcb); - lck_rw_done(divcbinfo.ipi_lock); - return 0; - } - - if (req->newptr != USER_ADDR_NULL) { - lck_rw_done(divcbinfo.ipi_lock); - return EPERM; - } - - /* - * OK, now we're committed to doing something. - */ - gencnt = divcbinfo.ipi_gencnt; - n = divcbinfo.ipi_count; - - bzero(&xig, sizeof(xig)); - xig.xig_len = sizeof xig; - xig.xig_count = n; - xig.xig_gen = gencnt; - xig.xig_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xig, sizeof xig); - if (error) { - lck_rw_done(divcbinfo.ipi_lock); - return error; - } - - inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK); - if (inp_list == 0) { - lck_rw_done(divcbinfo.ipi_lock); - return ENOMEM; - } - - for (inp = LIST_FIRST(divcbinfo.ipi_listhead), i = 0; inp && i < n; - inp = LIST_NEXT(inp, inp_list)) { -#ifdef __APPLE__ - if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) -#else - if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->p, inp)) -#endif - { inp_list[i++] = inp;} - } - n = i; - - error = 0; - for (i = 0; i < n; i++) { - inp = inp_list[i]; - if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) { - struct xinpcb xi; - - bzero(&xi, sizeof(xi)); - xi.xi_len = sizeof xi; - /* XXX should avoid extra copy */ - inpcb_to_compat(inp, &xi.xi_inp); - if (inp->inp_socket) { - sotoxsocket(inp->inp_socket, &xi.xi_socket); - } - error = SYSCTL_OUT(req, &xi, sizeof xi); - } - } - if (!error) { - /* - * Give the user an updated idea of our state. - * If the generation differs from what we told - * her before, she knows that something happened - * while we were processing this request, and it - * might be necessary to retry. - */ - bzero(&xig, sizeof(xig)); - xig.xig_len = sizeof xig; - xig.xig_gen = divcbinfo.ipi_gencnt; - xig.xig_sogen = so_gencnt; - xig.xig_count = divcbinfo.ipi_count; - error = SYSCTL_OUT(req, &xig, sizeof xig); - } - FREE(inp_list, M_TEMP); - lck_rw_done(divcbinfo.ipi_lock); - return error; -} -#endif - -__private_extern__ int -div_lock(struct socket *so, int refcount, void *lr) -{ - void *lr_saved; - - if (lr == NULL) { - lr_saved = __builtin_return_address(0); - } else { - lr_saved = lr; - } - -#ifdef MORE_DICVLOCK_DEBUG - printf("div_lock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x " - "lr=0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(so), - (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb), so->so_pcb ? - (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)) : NULL, - so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved)); -#endif - if (so->so_pcb) { - lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx); - } else { - panic("div_lock: so=%p NO PCB! lr=%p lrh= lrh= %s\n", - so, lr_saved, solockhistory_nr(so)); - /* NOTREACHED */ - } - - if (so->so_usecount < 0) { - panic("div_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n", - so, so->so_pcb, lr_saved, so->so_usecount, - solockhistory_nr(so)); - /* NOTREACHED */ - } - - if (refcount) { - so->so_usecount++; - } - so->lock_lr[so->next_lock_lr] = lr_saved; - so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX; - - return 0; -} - -__private_extern__ int -div_unlock(struct socket *so, int refcount, void *lr) -{ - void *lr_saved; - lck_mtx_t * mutex_held; - struct inpcb *inp = sotoinpcb(so); - - if (lr == NULL) { - lr_saved = __builtin_return_address(0); - } else { - lr_saved = lr; - } - -#ifdef MORE_DICVLOCK_DEBUG - printf("div_unlock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x " - "lr=0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(so), - (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb), so->so_pcb ? - (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)) : NULL, - so->so_usecount, lr_saved); -#endif - if (refcount) { - so->so_usecount--; - } - - if (so->so_usecount < 0) { - panic("div_unlock: so=%p usecount=%x lrh= %s\n", - so, so->so_usecount, solockhistory_nr(so)); - /* NOTREACHED */ - } - if (so->so_pcb == NULL) { - panic("div_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n", - so, so->so_usecount, lr_saved, solockhistory_nr(so)); - /* NOTREACHED */ - } - mutex_held = &((struct inpcb *)so->so_pcb)->inpcb_mtx; - - if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) { - lck_rw_lock_exclusive(divcbinfo.ipi_lock); - if (inp->inp_state != INPCB_STATE_DEAD) { - in_pcbdetach(inp); - } - in_pcbdispose(inp); - lck_rw_done(divcbinfo.ipi_lock); - return 0; - } - LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); - so->unlock_lr[so->next_unlock_lr] = lr_saved; - so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; - lck_mtx_unlock(mutex_held); - return 0; -} - -__private_extern__ lck_mtx_t * -div_getlock(struct socket *so, __unused int flags) -{ - struct inpcb *inpcb = (struct inpcb *)so->so_pcb; - - if (so->so_pcb) { - if (so->so_usecount < 0) { - panic("div_getlock: so=%p usecount=%x lrh= %s\n", - so, so->so_usecount, solockhistory_nr(so)); - } - return &inpcb->inpcb_mtx; - } else { - panic("div_getlock: so=%p NULL NO PCB lrh= %s\n", - so, solockhistory_nr(so)); - return so->so_proto->pr_domain->dom_mtx; - } -} - -struct pr_usrreqs div_usrreqs = { - .pru_abort = div_abort, - .pru_attach = div_attach, - .pru_bind = div_bind, - .pru_control = in_control, - .pru_detach = div_detach, - .pru_disconnect = div_disconnect, - .pru_peeraddr = in_getpeeraddr, - .pru_send = div_send, - .pru_shutdown = div_shutdown, - .pru_sockaddr = in_getsockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, -}; diff --git a/bsd/netinet/ip_divert.h b/bsd/netinet/ip_divert.h deleted file mode 100644 index 1536f0416..000000000 --- a/bsd/netinet/ip_divert.h +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -/*- - * Copyright (c) 2003 Sam Leffler, Errno Consulting - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer, - * without modification. - * 2. Redistributions in binary form must reproduce at minimum a disclaimer - * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any - * redistribution must be conditioned upon including a substantially - * similar Disclaimer requirement for further binary redistribution. - * 3. Neither the names of the above-listed copyright holders nor the names - * of any contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * NO WARRANTY - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY - * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, - * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER - * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGES. - * - * $FreeBSD: src/sys/netinet/ip_divert.h,v 1.3 2004/02/25 19:55:28 mlaier Exp $ - */ - -#ifndef _NETINET_IP_DIVERT_H_ -#define _NETINET_IP_DIVERT_H_ - -#if IPDIVERT -#ifdef BSD_KERNEL_PRIVATE -/* - * Divert socket definitions. - */ - -/* 32-bit unique unsigned value used to identify a module */ - -struct divert_tag { - u_int32_t info; /* port & flags */ - u_int16_t cookie; /* ipfw rule number */ -}; - -/* - * Return the divert cookie associated with the mbuf; if any. - */ -static __inline u_int16_t -divert_cookie(struct m_tag *mtag) -{ - return ((struct divert_tag *)(mtag + 1))->cookie; -} -static __inline u_int16_t -divert_find_cookie(struct mbuf *m) -{ - struct m_tag *mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_DIVERT, NULL); - return mtag ? divert_cookie(mtag) : 0; -} - -/* - * Return the divert info associated with the mbuf; if any. - */ -static __inline u_int32_t -divert_info(struct m_tag *mtag) -{ - return ((struct divert_tag *)(mtag + 1))->info; -} -static __inline u_int32_t -divert_find_info(struct mbuf *m) -{ - struct m_tag *mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_DIVERT, NULL); - return mtag ? divert_info(mtag) : 0; -} - -extern void div_init(struct protosw *, struct domain *); -extern void div_input(struct mbuf *, int); -lck_mtx_t * -div_getlock(struct socket *, int ); -int div_unlock(struct socket *, int, void *); -int div_lock(struct socket *, int, void *); -extern void divert_packet(struct mbuf *m, int incoming, int port, int rule); -extern struct pr_usrreqs div_usrreqs; - -#endif /* BSD_KERNEL_PRIVATE */ -#endif /* IPDIVERT */ -#endif /* _NETINET_IP_DIVERT_H_ */ diff --git a/bsd/netinet/ip_dummynet.c b/bsd/netinet/ip_dummynet.c index 3a854db05..0b4cdd095 100644 --- a/bsd/netinet/ip_dummynet.c +++ b/bsd/netinet/ip_dummynet.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2017 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -54,11 +54,10 @@ * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.84 2004/08/25 09:31:30 pjd Exp $ */ -#define DUMMYNET_DEBUG +#define DUMMYNET_DEBUG /* * This module implements IP dummynet, a bandwidth limiter/delay emulator - * used in conjunction with the ipfw package. * Description of the data structures used is in ip_dummynet.h * Here you mainly find the following blocks of code: * + variable declarations; @@ -83,7 +82,7 @@ #include #include #include -#include /* XXX */ +#include /* XXX */ #include #include #include @@ -102,33 +101,30 @@ #include #include #include -#include #include #include #include /* for ip6_input, ip6_output prototypes */ #include -static struct ip_fw default_rule; - /* * We keep a private variable for the simulation time, but we could * probably use an existing one ("softticks" in sys/kern/kern_timer.c) */ -static dn_key curr_time = 0 ; /* current simulation time */ +static dn_key curr_time = 0; /* current simulation time */ /* this is for the timer that fires to call dummynet() - we only enable the timer when - there are packets to process, otherwise it's disabled */ + * there are packets to process, otherwise it's disabled */ static int timer_enabled = 0; -static int dn_hash_size = 64 ; /* default hash size */ +static int dn_hash_size = 64; /* default hash size */ /* statistics on number of queue searches and search steps */ -static int searches, search_steps ; -static int pipe_expire = 1 ; /* expire queue if empty */ -static int dn_max_ratio = 16 ; /* max queues/buckets ratio */ +static int searches, search_steps; +static int pipe_expire = 1; /* expire queue if empty */ +static int dn_max_ratio = 16; /* max queues/buckets ratio */ -static int red_lookup_depth = 256; /* RED - default lookup table depth */ +static int red_lookup_depth = 256; /* RED - default lookup table depth */ static int red_avg_pkt_size = 512; /* RED - default medium packet size */ static int red_max_pkt_size = 1500; /* RED - default max packet size */ @@ -144,19 +140,19 @@ static int serialize = 0; * extract_heap contains pipes associated with delay lines. * */ -static struct dn_heap ready_heap, extract_heap, wfq_ready_heap ; +static struct dn_heap ready_heap, extract_heap, wfq_ready_heap; -static int heap_init(struct dn_heap *h, int size) ; -static int heap_insert (struct dn_heap *h, dn_key key1, void *p); +static int heap_init(struct dn_heap *h, int size); +static int heap_insert(struct dn_heap *h, dn_key key1, void *p); static void heap_extract(struct dn_heap *h, void *obj); -static void transmit_event(struct dn_pipe *pipe, struct mbuf **head, - struct mbuf **tail); -static void ready_event(struct dn_flow_queue *q, struct mbuf **head, - struct mbuf **tail); -static void ready_event_wfq(struct dn_pipe *p, struct mbuf **head, - struct mbuf **tail); +static void transmit_event(struct dn_pipe *pipe, struct mbuf **head, + struct mbuf **tail); +static void ready_event(struct dn_flow_queue *q, struct mbuf **head, + struct mbuf **tail); +static void ready_event_wfq(struct dn_pipe *p, struct mbuf **head, + struct mbuf **tail); /* * Packets are retrieved from queues in Dummynet in chains instead of @@ -165,48 +161,48 @@ static void ready_event_wfq(struct dn_pipe *p, struct mbuf **head, */ static void dummynet_send(struct mbuf *m); -#define HASHSIZE 16 -#define HASH(num) ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f) -static struct dn_pipe_head pipehash[HASHSIZE]; /* all pipes */ -static struct dn_flow_set_head flowsethash[HASHSIZE]; /* all flowsets */ +#define HASHSIZE 16 +#define HASH(num) ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f) +static struct dn_pipe_head pipehash[HASHSIZE]; /* all pipes */ +static struct dn_flow_set_head flowsethash[HASHSIZE]; /* all flowsets */ #ifdef SYSCTL_NODE SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, - CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Dummynet"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Dummynet"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size, - CTLFLAG_RW | CTLFLAG_LOCKED, &dn_hash_size, 0, "Default hash table size"); + CTLFLAG_RW | CTLFLAG_LOCKED, &dn_hash_size, 0, "Default hash table size"); SYSCTL_QUAD(_net_inet_ip_dummynet, OID_AUTO, curr_time, - CTLFLAG_RD | CTLFLAG_LOCKED, &curr_time, "Current tick"); + CTLFLAG_RD | CTLFLAG_LOCKED, &curr_time, "Current tick"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap, - CTLFLAG_RD | CTLFLAG_LOCKED, &ready_heap.size, 0, "Size of ready heap"); + CTLFLAG_RD | CTLFLAG_LOCKED, &ready_heap.size, 0, "Size of ready heap"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap, - CTLFLAG_RD | CTLFLAG_LOCKED, &extract_heap.size, 0, "Size of extract heap"); + CTLFLAG_RD | CTLFLAG_LOCKED, &extract_heap.size, 0, "Size of extract heap"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches, - CTLFLAG_RD | CTLFLAG_LOCKED, &searches, 0, "Number of queue searches"); + CTLFLAG_RD | CTLFLAG_LOCKED, &searches, 0, "Number of queue searches"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, - CTLFLAG_RD | CTLFLAG_LOCKED, &search_steps, 0, "Number of queue search steps"); + CTLFLAG_RD | CTLFLAG_LOCKED, &search_steps, 0, "Number of queue search steps"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, - CTLFLAG_RW | CTLFLAG_LOCKED, &pipe_expire, 0, "Expire queue if empty"); + CTLFLAG_RW | CTLFLAG_LOCKED, &pipe_expire, 0, "Expire queue if empty"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, - CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0, - "Max ratio between dynamic queues and buckets"); + CTLFLAG_RW | CTLFLAG_LOCKED, &dn_max_ratio, 0, + "Max ratio between dynamic queues and buckets"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, - CTLFLAG_RD | CTLFLAG_LOCKED, &red_lookup_depth, 0, "Depth of RED lookup table"); + CTLFLAG_RD | CTLFLAG_LOCKED, &red_lookup_depth, 0, "Depth of RED lookup table"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, - CTLFLAG_RD | CTLFLAG_LOCKED, &red_avg_pkt_size, 0, "RED Medium packet size"); + CTLFLAG_RD | CTLFLAG_LOCKED, &red_avg_pkt_size, 0, "RED Medium packet size"); SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, - CTLFLAG_RD | CTLFLAG_LOCKED, &red_max_pkt_size, 0, "RED Max packet size"); + CTLFLAG_RD | CTLFLAG_LOCKED, &red_max_pkt_size, 0, "RED Max packet size"); #endif #ifdef DUMMYNET_DEBUG -int dummynet_debug = 0; +int dummynet_debug = 0; #ifdef SYSCTL_NODE SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &dummynet_debug, - 0, "control debugging printfs"); + 0, "control debugging printfs"); #endif -#define DPRINTF(X) if (dummynet_debug) printf X +#define DPRINTF(X) if (dummynet_debug) printf X #else -#define DPRINTF(X) +#define DPRINTF(X) #endif /* dummynet lock */ @@ -238,13 +234,14 @@ static int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ); struct eventhandler_lists_ctxt dummynet_evhdlr_ctxt; -uint32_t my_random(void) +uint32_t +my_random(void) { uint32_t val; read_frandom(&val, sizeof(val)); val &= 0x7FFFFFFF; - return (val); + return val; } /* @@ -263,17 +260,18 @@ uint32_t my_random(void) #define HEAP_LEFT(x) ( 2*(x) + 1 ) #define HEAP_IS_LEFT(x) ( (x) & 1 ) #define HEAP_RIGHT(x) ( 2*(x) + 2 ) -#define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; } -#define HEAP_INCREMENT 15 +#define HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; } +#define HEAP_INCREMENT 15 -int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ) +int +cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ) { struct dn_pipe_32 user_pipe_32; - int error=0; + int error = 0; error = sooptcopyin(sopt, &user_pipe_32, sizeof(struct dn_pipe_32), sizeof(struct dn_pipe_32)); - if ( !error ){ + if (!error) { p->pipe_nr = user_pipe_32.pipe_nr; p->bandwidth = user_pipe_32.bandwidth; p->delay = user_pipe_32.delay; @@ -313,13 +311,14 @@ int cp_pipe_from_user_32( struct sockopt *sopt, struct dn_pipe *p ) } -int cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p ) +int +cp_pipe_from_user_64( struct sockopt *sopt, struct dn_pipe *p ) { struct dn_pipe_64 user_pipe_64; - int error=0; + int error = 0; error = sooptcopyin(sopt, &user_pipe_64, sizeof(struct dn_pipe_64), sizeof(struct dn_pipe_64)); - if ( !error ){ + if (!error) { p->pipe_nr = user_pipe_64.pipe_nr; p->bandwidth = user_pipe_64.bandwidth; p->delay = user_pipe_64.delay; @@ -362,64 +361,65 @@ static void cp_flow_set_to_32_user(struct dn_flow_set *set, struct dn_flow_set_32 *fs_bp) { fs_bp->fs_nr = set->fs_nr; - fs_bp->flags_fs = set->flags_fs ; - fs_bp->parent_nr = set->parent_nr ; - fs_bp->weight = set->weight ; - fs_bp->qsize = set->qsize ; - fs_bp->plr = set->plr ; - fs_bp->flow_mask = set->flow_mask ; - fs_bp->rq_size = set->rq_size ; - fs_bp->rq_elements = set->rq_elements ; - fs_bp->last_expired = set->last_expired ; - fs_bp->backlogged = set->backlogged ; - fs_bp->w_q = set->w_q ; - fs_bp->max_th = set->max_th ; - fs_bp->min_th = set->min_th ; - fs_bp->max_p = set->max_p ; - fs_bp->c_1 = set->c_1 ; - fs_bp->c_2 = set->c_2 ; - fs_bp->c_3 = set->c_3 ; - fs_bp->c_4 = set->c_4 ; - fs_bp->w_q_lookup = CAST_DOWN_EXPLICIT(user32_addr_t, set->w_q_lookup) ; - fs_bp->lookup_depth = set->lookup_depth ; - fs_bp->lookup_step = set->lookup_step ; - fs_bp->lookup_weight = set->lookup_weight ; - fs_bp->avg_pkt_size = set->avg_pkt_size ; - fs_bp->max_pkt_size = set->max_pkt_size ; + fs_bp->flags_fs = set->flags_fs; + fs_bp->parent_nr = set->parent_nr; + fs_bp->weight = set->weight; + fs_bp->qsize = set->qsize; + fs_bp->plr = set->plr; + fs_bp->flow_mask = set->flow_mask; + fs_bp->rq_size = set->rq_size; + fs_bp->rq_elements = set->rq_elements; + fs_bp->last_expired = set->last_expired; + fs_bp->backlogged = set->backlogged; + fs_bp->w_q = set->w_q; + fs_bp->max_th = set->max_th; + fs_bp->min_th = set->min_th; + fs_bp->max_p = set->max_p; + fs_bp->c_1 = set->c_1; + fs_bp->c_2 = set->c_2; + fs_bp->c_3 = set->c_3; + fs_bp->c_4 = set->c_4; + fs_bp->w_q_lookup = CAST_DOWN_EXPLICIT(user32_addr_t, set->w_q_lookup); + fs_bp->lookup_depth = set->lookup_depth; + fs_bp->lookup_step = set->lookup_step; + fs_bp->lookup_weight = set->lookup_weight; + fs_bp->avg_pkt_size = set->avg_pkt_size; + fs_bp->max_pkt_size = set->max_pkt_size; } static void cp_flow_set_to_64_user(struct dn_flow_set *set, struct dn_flow_set_64 *fs_bp) { fs_bp->fs_nr = set->fs_nr; - fs_bp->flags_fs = set->flags_fs ; - fs_bp->parent_nr = set->parent_nr ; - fs_bp->weight = set->weight ; - fs_bp->qsize = set->qsize ; - fs_bp->plr = set->plr ; - fs_bp->flow_mask = set->flow_mask ; - fs_bp->rq_size = set->rq_size ; - fs_bp->rq_elements = set->rq_elements ; - fs_bp->last_expired = set->last_expired ; - fs_bp->backlogged = set->backlogged ; - fs_bp->w_q = set->w_q ; - fs_bp->max_th = set->max_th ; - fs_bp->min_th = set->min_th ; - fs_bp->max_p = set->max_p ; - fs_bp->c_1 = set->c_1 ; - fs_bp->c_2 = set->c_2 ; - fs_bp->c_3 = set->c_3 ; - fs_bp->c_4 = set->c_4 ; - fs_bp->w_q_lookup = CAST_DOWN(user64_addr_t, set->w_q_lookup) ; - fs_bp->lookup_depth = set->lookup_depth ; - fs_bp->lookup_step = set->lookup_step ; - fs_bp->lookup_weight = set->lookup_weight ; - fs_bp->avg_pkt_size = set->avg_pkt_size ; - fs_bp->max_pkt_size = set->max_pkt_size ; + fs_bp->flags_fs = set->flags_fs; + fs_bp->parent_nr = set->parent_nr; + fs_bp->weight = set->weight; + fs_bp->qsize = set->qsize; + fs_bp->plr = set->plr; + fs_bp->flow_mask = set->flow_mask; + fs_bp->rq_size = set->rq_size; + fs_bp->rq_elements = set->rq_elements; + fs_bp->last_expired = set->last_expired; + fs_bp->backlogged = set->backlogged; + fs_bp->w_q = set->w_q; + fs_bp->max_th = set->max_th; + fs_bp->min_th = set->min_th; + fs_bp->max_p = set->max_p; + fs_bp->c_1 = set->c_1; + fs_bp->c_2 = set->c_2; + fs_bp->c_3 = set->c_3; + fs_bp->c_4 = set->c_4; + fs_bp->w_q_lookup = CAST_DOWN(user64_addr_t, set->w_q_lookup); + fs_bp->lookup_depth = set->lookup_depth; + fs_bp->lookup_step = set->lookup_step; + fs_bp->lookup_weight = set->lookup_weight; + fs_bp->avg_pkt_size = set->avg_pkt_size; + fs_bp->max_pkt_size = set->max_pkt_size; } static -void cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp) +void +cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp) { qp->id = q->id; qp->len = q->len; @@ -440,7 +440,8 @@ void cp_queue_to_32_user( struct dn_flow_queue *q, struct dn_flow_queue_32 *qp) } static -void cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp) +void +cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp) { qp->id = q->id; qp->len = q->len; @@ -461,9 +462,10 @@ void cp_queue_to_64_user( struct dn_flow_queue *q, struct dn_flow_queue_64 *qp) } static -char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp) +char * +cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp) { - char *bp; + char *bp; pipe_bp->pipe_nr = p->pipe_nr; pipe_bp->bandwidth = p->bandwidth; @@ -484,7 +486,7 @@ char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp) cp_flow_set_to_32_user( &(p->fs), &(pipe_bp->fs)); - pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; + pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10); /* * XXX the following is a hack based on ->next being the * first field in dn_pipe and dn_flow_set. The correct @@ -493,18 +495,19 @@ char *cp_pipe_to_32_user(struct dn_pipe *p, struct dn_pipe_32 *pipe_bp) */ pipe_bp->next = CAST_DOWN_EXPLICIT( user32_addr_t, DN_IS_PIPE ); /* clean pointers */ - pipe_bp->head = pipe_bp->tail = (user32_addr_t) 0 ; - pipe_bp->fs.next = (user32_addr_t)0 ; - pipe_bp->fs.pipe = (user32_addr_t)0 ; - pipe_bp->fs.rq = (user32_addr_t)0 ; + pipe_bp->head = pipe_bp->tail = (user32_addr_t) 0; + pipe_bp->fs.next = (user32_addr_t)0; + pipe_bp->fs.pipe = (user32_addr_t)0; + pipe_bp->fs.rq = (user32_addr_t)0; bp = ((char *)pipe_bp) + sizeof(struct dn_pipe_32); - return( dn_copy_set_32( &(p->fs), bp) ); + return dn_copy_set_32( &(p->fs), bp); } static -char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp) +char * +cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp) { - char *bp; + char *bp; pipe_bp->pipe_nr = p->pipe_nr; pipe_bp->bandwidth = p->bandwidth; @@ -525,7 +528,7 @@ char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp) cp_flow_set_to_64_user( &(p->fs), &(pipe_bp->fs)); - pipe_bp->delay = (pipe_bp->delay * 1000) / (hz*10) ; + pipe_bp->delay = (pipe_bp->delay * 1000) / (hz * 10); /* * XXX the following is a hack based on ->next being the * first field in dn_pipe and dn_flow_set. The correct @@ -534,37 +537,37 @@ char *cp_pipe_to_64_user(struct dn_pipe *p, struct dn_pipe_64 *pipe_bp) */ pipe_bp->next = CAST_DOWN( user64_addr_t, DN_IS_PIPE ); /* clean pointers */ - pipe_bp->head = pipe_bp->tail = USER_ADDR_NULL ; - pipe_bp->fs.next = USER_ADDR_NULL ; - pipe_bp->fs.pipe = USER_ADDR_NULL ; - pipe_bp->fs.rq = USER_ADDR_NULL ; + pipe_bp->head = pipe_bp->tail = USER_ADDR_NULL; + pipe_bp->fs.next = USER_ADDR_NULL; + pipe_bp->fs.pipe = USER_ADDR_NULL; + pipe_bp->fs.rq = USER_ADDR_NULL; bp = ((char *)pipe_bp) + sizeof(struct dn_pipe_64); - return( dn_copy_set_64( &(p->fs), bp) ); + return dn_copy_set_64( &(p->fs), bp); } static int heap_init(struct dn_heap *h, int new_size) { - struct dn_heap_entry *p; - - if (h->size >= new_size ) { - printf("dummynet: heap_init, Bogus call, have %d want %d\n", - h->size, new_size); - return 0 ; - } - new_size = (new_size + HEAP_INCREMENT ) & ~HEAP_INCREMENT ; - p = _MALLOC(new_size * sizeof(*p), M_DUMMYNET, M_DONTWAIT ); - if (p == NULL) { - printf("dummynet: heap_init, resize %d failed\n", new_size ); - return 1 ; /* error */ - } - if (h->size > 0) { - bcopy(h->p, p, h->size * sizeof(*p) ); - FREE(h->p, M_DUMMYNET); - } - h->p = p ; - h->size = new_size ; - return 0 ; + struct dn_heap_entry *p; + + if (h->size >= new_size) { + printf("dummynet: heap_init, Bogus call, have %d want %d\n", + h->size, new_size); + return 0; + } + new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT; + p = _MALLOC(new_size * sizeof(*p), M_DUMMYNET, M_DONTWAIT ); + if (p == NULL) { + printf("dummynet: heap_init, resize %d failed\n", new_size ); + return 1; /* error */ + } + if (h->size > 0) { + bcopy(h->p, p, h->size * sizeof(*p)); + FREE(h->p, M_DUMMYNET); + } + h->p = p; + h->size = new_size; + return 0; } /* @@ -589,32 +592,35 @@ heap_init(struct dn_heap *h, int new_size) static int heap_insert(struct dn_heap *h, dn_key key1, void *p) { - int son = h->elements ; - - if (p == NULL) /* data already there, set starting point */ - son = key1 ; - else { /* insert new element at the end, possibly resize */ - son = h->elements ; - if (son == h->size) /* need resize... */ - if (heap_init(h, h->elements+1) ) - return 1 ; /* failure... */ - h->p[son].object = p ; - h->p[son].key = key1 ; - h->elements++ ; - } - while (son > 0) { /* bubble up */ - int father = HEAP_FATHER(son) ; - struct dn_heap_entry tmp ; - - if (DN_KEY_LT( h->p[father].key, h->p[son].key ) ) - break ; /* found right position */ - /* son smaller than father, swap and repeat */ - HEAP_SWAP(h->p[son], h->p[father], tmp) ; + int son = h->elements; + + if (p == NULL) { /* data already there, set starting point */ + son = key1; + } else { /* insert new element at the end, possibly resize */ + son = h->elements; + if (son == h->size) { /* need resize... */ + if (heap_init(h, h->elements + 1)) { + return 1; /* failure... */ + } + } + h->p[son].object = p; + h->p[son].key = key1; + h->elements++; + } + while (son > 0) { /* bubble up */ + int father = HEAP_FATHER(son); + struct dn_heap_entry tmp; + + if (DN_KEY_LT( h->p[father].key, h->p[son].key )) { + break; /* found right position */ + } + /* son smaller than father, swap and repeat */ + HEAP_SWAP(h->p[son], h->p[father], tmp); + SET_OFFSET(h, son); + son = father; + } SET_OFFSET(h, son); - son = father ; - } - SET_OFFSET(h, son); - return 0 ; + return 0; } /* @@ -623,42 +629,44 @@ heap_insert(struct dn_heap *h, dn_key key1, void *p) static void heap_extract(struct dn_heap *h, void *obj) { - int child, father, maxelt = h->elements - 1 ; - - if (maxelt < 0) { - printf("dummynet: warning, extract from empty heap 0x%llx\n", - (uint64_t)VM_KERNEL_ADDRPERM(h)); - return ; - } - father = 0 ; /* default: move up smallest child */ - if (obj != NULL) { /* extract specific element, index is at offset */ - if (h->offset <= 0) - panic("dummynet: heap_extract from middle not supported on this heap!!!\n"); - father = *((int *)((char *)obj + h->offset)) ; - if (father < 0 || father >= h->elements) { - printf("dummynet: heap_extract, father %d out of bound 0..%d\n", - father, h->elements); - panic("dummynet: heap_extract"); - } - } - RESET_OFFSET(h, father); - child = HEAP_LEFT(father) ; /* left child */ - while (child <= maxelt) { /* valid entry */ - if (child != maxelt && DN_KEY_LT(h->p[child+1].key, h->p[child].key) ) - child = child+1 ; /* take right child, otherwise left */ - h->p[father] = h->p[child] ; - SET_OFFSET(h, father); - father = child ; - child = HEAP_LEFT(child) ; /* left child for next loop */ - } - h->elements-- ; - if (father != maxelt) { - /* - * Fill hole with last entry and bubble up, reusing the insert code - */ - h->p[father] = h->p[maxelt] ; - heap_insert(h, father, NULL); /* this one cannot fail */ - } + int child, father, maxelt = h->elements - 1; + + if (maxelt < 0) { + printf("dummynet: warning, extract from empty heap 0x%llx\n", + (uint64_t)VM_KERNEL_ADDRPERM(h)); + return; + } + father = 0; /* default: move up smallest child */ + if (obj != NULL) { /* extract specific element, index is at offset */ + if (h->offset <= 0) { + panic("dummynet: heap_extract from middle not supported on this heap!!!\n"); + } + father = *((int *)((char *)obj + h->offset)); + if (father < 0 || father >= h->elements) { + printf("dummynet: heap_extract, father %d out of bound 0..%d\n", + father, h->elements); + panic("dummynet: heap_extract"); + } + } + RESET_OFFSET(h, father); + child = HEAP_LEFT(father); /* left child */ + while (child <= maxelt) { /* valid entry */ + if (child != maxelt && DN_KEY_LT(h->p[child + 1].key, h->p[child].key)) { + child = child + 1; /* take right child, otherwise left */ + } + h->p[father] = h->p[child]; + SET_OFFSET(h, father); + father = child; + child = HEAP_LEFT(child); /* left child for next loop */ + } + h->elements--; + if (father != maxelt) { + /* + * Fill hole with last entry and bubble up, reusing the insert code + */ + h->p[father] = h->p[maxelt]; + heap_insert(h, father, NULL); /* this one cannot fail */ + } } /* @@ -668,10 +676,11 @@ heap_extract(struct dn_heap *h, void *obj) static void heapify(struct dn_heap *h) { - int i ; + int i; - for (i = 0 ; i < h->elements ; i++ ) - heap_insert(h, i , NULL) ; + for (i = 0; i < h->elements; i++) { + heap_insert(h, i, NULL); + } } /* @@ -680,9 +689,10 @@ heapify(struct dn_heap *h) static void heap_free(struct dn_heap *h) { - if (h->size >0 ) - FREE(h->p, M_DUMMYNET); - bzero(h, sizeof(*h)); + if (h->size > 0) { + FREE(h->p, M_DUMMYNET); + } + bzero(h, sizeof(*h)); } /* @@ -697,15 +707,16 @@ heap_free(struct dn_heap *h) static struct dn_pkt_tag * dn_tag_get(struct mbuf *m) { - struct m_tag *mtag = m_tag_first(m); + struct m_tag *mtag = m_tag_first(m); - if (!(mtag != NULL && - mtag->m_tag_id == KERNEL_MODULE_TAG_ID && - mtag->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET)) - panic("packet on dummynet queue w/o dummynet tag: 0x%llx", - (uint64_t)VM_KERNEL_ADDRPERM(m)); + if (!(mtag != NULL && + mtag->m_tag_id == KERNEL_MODULE_TAG_ID && + mtag->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET)) { + panic("packet on dummynet queue w/o dummynet tag: 0x%llx", + (uint64_t)VM_KERNEL_ADDRPERM(m)); + } - return (struct dn_pkt_tag *)(mtag+1); + return (struct dn_pkt_tag *)(mtag + 1); } /* @@ -729,32 +740,35 @@ dn_tag_get(struct mbuf *m) static void transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail) { - struct mbuf *m ; + struct mbuf *m; struct dn_pkt_tag *pkt = NULL; u_int64_t schedule_time; LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); - ASSERT(serialize >= 0); + ASSERT(serialize >= 0); if (serialize == 0) { while ((m = pipe->head) != NULL) { pkt = dn_tag_get(m); - if (!DN_KEY_LEQ(pkt->dn_output_time, curr_time)) + if (!DN_KEY_LEQ(pkt->dn_output_time, curr_time)) { break; + } pipe->head = m->m_nextpkt; - if (*tail != NULL) + if (*tail != NULL) { (*tail)->m_nextpkt = m; - else + } else { *head = m; + } *tail = m; } - if (*tail != NULL) + if (*tail != NULL) { (*tail)->m_nextpkt = NULL; + } } schedule_time = pkt == NULL || DN_KEY_LEQ(pkt->dn_output_time, curr_time) ? - curr_time + 1 : pkt->dn_output_time; + curr_time + 1 : pkt->dn_output_time; /* if there are leftover packets, put the pipe into the heap for next ready event */ if ((m = pipe->head) != NULL) { @@ -778,7 +792,7 @@ transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail) * in place but adjusted by a factor of 10 so that hz is functionally * equal to 1000. */ -#define SET_TICKS(_m, q, p) \ +#define SET_TICKS(_m, q, p) \ ((_m)->m_pkthdr.len*8*(hz*10) - (q)->numbytes + p->bandwidth - 1 ) / \ p->bandwidth ; @@ -788,22 +802,23 @@ transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail) */ static void move_pkt(struct mbuf *pkt, struct dn_flow_queue *q, - struct dn_pipe *p, int len) + struct dn_pipe *p, int len) { - struct dn_pkt_tag *dt = dn_tag_get(pkt); + struct dn_pkt_tag *dt = dn_tag_get(pkt); - q->head = pkt->m_nextpkt ; - q->len-- ; - q->len_bytes -= len ; + q->head = pkt->m_nextpkt; + q->len--; + q->len_bytes -= len; - dt->dn_output_time = curr_time + p->delay ; + dt->dn_output_time = curr_time + p->delay; - if (p->head == NULL) - p->head = pkt; - else - p->tail->m_nextpkt = pkt; - p->tail = pkt; - p->tail->m_nextpkt = NULL; + if (p->head == NULL) { + p->head = pkt; + } else { + p->tail->m_nextpkt = pkt; + } + p->tail = pkt; + p->tail->m_nextpkt = NULL; } /* @@ -816,59 +831,61 @@ move_pkt(struct mbuf *pkt, struct dn_flow_queue *q, static void ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail) { - struct mbuf *pkt; - struct dn_pipe *p = q->fs->pipe ; - int p_was_empty ; + struct mbuf *pkt; + struct dn_pipe *p = q->fs->pipe; + int p_was_empty; LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); - if (p == NULL) { + if (p == NULL) { printf("dummynet: ready_event pipe is gone\n"); - return ; - } - p_was_empty = (p->head == NULL) ; - - /* - * schedule fixed-rate queues linked to this pipe: - * Account for the bw accumulated since last scheduling, then - * drain as many pkts as allowed by q->numbytes and move to - * the delay line (in p) computing output time. - * bandwidth==0 (no limit) means we can drain the whole queue, - * setting len_scaled = 0 does the job. - */ - q->numbytes += ( curr_time - q->sched_time ) * p->bandwidth; - while ( (pkt = q->head) != NULL ) { - int len = pkt->m_pkthdr.len; - int len_scaled = p->bandwidth ? len*8*(hz*10) : 0 ; - if (len_scaled > q->numbytes ) - break ; - q->numbytes -= len_scaled ; - move_pkt(pkt, q, p, len); - } - /* - * If we have more packets queued, schedule next ready event - * (can only occur when bandwidth != 0, otherwise we would have - * flushed the whole queue in the previous loop). - * To this purpose we record the current time and compute how many - * ticks to go for the finish time of the packet. - */ - if ( (pkt = q->head) != NULL ) { /* this implies bandwidth != 0 */ - dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */ - q->sched_time = curr_time ; - heap_insert(&ready_heap, curr_time + t, (void *)q ); - /* XXX should check errors on heap_insert, and drain the whole - * queue on error hoping next time we are luckier. + return; + } + p_was_empty = (p->head == NULL); + + /* + * schedule fixed-rate queues linked to this pipe: + * Account for the bw accumulated since last scheduling, then + * drain as many pkts as allowed by q->numbytes and move to + * the delay line (in p) computing output time. + * bandwidth==0 (no limit) means we can drain the whole queue, + * setting len_scaled = 0 does the job. + */ + q->numbytes += (curr_time - q->sched_time) * p->bandwidth; + while ((pkt = q->head) != NULL) { + int len = pkt->m_pkthdr.len; + int len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0; + if (len_scaled > q->numbytes) { + break; + } + q->numbytes -= len_scaled; + move_pkt(pkt, q, p, len); + } + /* + * If we have more packets queued, schedule next ready event + * (can only occur when bandwidth != 0, otherwise we would have + * flushed the whole queue in the previous loop). + * To this purpose we record the current time and compute how many + * ticks to go for the finish time of the packet. */ - } else { /* RED needs to know when the queue becomes empty */ - q->q_time = curr_time; - q->numbytes = 0; - } - /* - * If the delay line was empty call transmit_event(p) now. - * Otherwise, the scheduler will take care of it. - */ - if (p_was_empty) + if ((pkt = q->head) != NULL) { /* this implies bandwidth != 0 */ + dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */ + q->sched_time = curr_time; + heap_insert(&ready_heap, curr_time + t, (void *)q ); + /* XXX should check errors on heap_insert, and drain the whole + * queue on error hoping next time we are luckier. + */ + } else { /* RED needs to know when the queue becomes empty */ + q->q_time = curr_time; + q->numbytes = 0; + } + /* + * If the delay line was empty call transmit_event(p) now. + * Otherwise, the scheduler will take care of it. + */ + if (p_was_empty) { transmit_event(p, head, tail); + } } /* @@ -882,128 +899,132 @@ ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail) static void ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail) { - int p_was_empty = (p->head == NULL) ; - struct dn_heap *sch = &(p->scheduler_heap); - struct dn_heap *neh = &(p->not_eligible_heap) ; + int p_was_empty = (p->head == NULL); + struct dn_heap *sch = &(p->scheduler_heap); + struct dn_heap *neh = &(p->not_eligible_heap); int64_t p_numbytes = p->numbytes; LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); - if (p->if_name[0] == 0) /* tx clock is simulated */ - p_numbytes += ( curr_time - p->sched_time ) * p->bandwidth; - else { /* tx clock is for real, the ifq must be empty or this is a NOP */ - if (p->ifp && !IFCQ_IS_EMPTY(&p->ifp->if_snd)) - return ; - else { - DPRINTF(("dummynet: pipe %d ready from %s --\n", - p->pipe_nr, p->if_name)); - } - } - - /* - * While we have backlogged traffic AND credit, we need to do - * something on the queue. - */ - while ( p_numbytes >=0 && (sch->elements>0 || neh->elements >0) ) { - if (sch->elements > 0) { /* have some eligible pkts to send out */ - struct dn_flow_queue *q = sch->p[0].object ; - struct mbuf *pkt = q->head; - struct dn_flow_set *fs = q->fs; - u_int64_t len = pkt->m_pkthdr.len; - int len_scaled = p->bandwidth ? len*8*(hz*10) : 0 ; - - heap_extract(sch, NULL); /* remove queue from heap */ - p_numbytes -= len_scaled ; - move_pkt(pkt, q, p, len); - - p->V += (len<sum ; /* update V */ - q->S = q->F ; /* update start time */ - if (q->len == 0) { /* Flow not backlogged any more */ - fs->backlogged-- ; - heap_insert(&(p->idle_heap), q->F, q); - } else { /* still backlogged */ - /* - * update F and position in backlogged queue, then - * put flow in not_eligible_heap (we will fix this later). - */ - len = (q->head)->m_pkthdr.len; - q->F += (len<weight ; - if (DN_KEY_LEQ(q->S, p->V)) - heap_insert(neh, q->S, q); - else - heap_insert(sch, q->F, q); - } + if (p->if_name[0] == 0) { /* tx clock is simulated */ + p_numbytes += (curr_time - p->sched_time) * p->bandwidth; + } else { /* tx clock is for real, the ifq must be empty or this is a NOP */ + if (p->ifp && !IFCQ_IS_EMPTY(&p->ifp->if_snd)) { + return; + } else { + DPRINTF(("dummynet: pipe %d ready from %s --\n", + p->pipe_nr, p->if_name)); + } } + /* - * now compute V = max(V, min(S_i)). Remember that all elements in sch - * have by definition S_i <= V so if sch is not empty, V is surely - * the max and we must not update it. Conversely, if sch is empty - * we only need to look at neh. + * While we have backlogged traffic AND credit, we need to do + * something on the queue. */ - if (sch->elements == 0 && neh->elements > 0) - p->V = MAX64 ( p->V, neh->p[0].key ); - /* move from neh to sch any packets that have become eligible */ - while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V) ) { - struct dn_flow_queue *q = neh->p[0].object ; - heap_extract(neh, NULL); - heap_insert(sch, q->F, q); - } + while (p_numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) { + if (sch->elements > 0) { /* have some eligible pkts to send out */ + struct dn_flow_queue *q = sch->p[0].object; + struct mbuf *pkt = q->head; + struct dn_flow_set *fs = q->fs; + u_int64_t len = pkt->m_pkthdr.len; + int len_scaled = p->bandwidth ? len * 8 * (hz * 10) : 0; + + heap_extract(sch, NULL); /* remove queue from heap */ + p_numbytes -= len_scaled; + move_pkt(pkt, q, p, len); + + p->V += (len << MY_M) / p->sum; /* update V */ + q->S = q->F; /* update start time */ + if (q->len == 0) { /* Flow not backlogged any more */ + fs->backlogged--; + heap_insert(&(p->idle_heap), q->F, q); + } else { /* still backlogged */ + /* + * update F and position in backlogged queue, then + * put flow in not_eligible_heap (we will fix this later). + */ + len = (q->head)->m_pkthdr.len; + q->F += (len << MY_M) / (u_int64_t) fs->weight; + if (DN_KEY_LEQ(q->S, p->V)) { + heap_insert(neh, q->S, q); + } else { + heap_insert(sch, q->F, q); + } + } + } + /* + * now compute V = max(V, min(S_i)). Remember that all elements in sch + * have by definition S_i <= V so if sch is not empty, V is surely + * the max and we must not update it. Conversely, if sch is empty + * we only need to look at neh. + */ + if (sch->elements == 0 && neh->elements > 0) { + p->V = MAX64( p->V, neh->p[0].key ); + } + /* move from neh to sch any packets that have become eligible */ + while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) { + struct dn_flow_queue *q = neh->p[0].object; + heap_extract(neh, NULL); + heap_insert(sch, q->F, q); + } - if (p->if_name[0] != '\0') {/* tx clock is from a real thing */ - p_numbytes = -1 ; /* mark not ready for I/O */ - break ; + if (p->if_name[0] != '\0') {/* tx clock is from a real thing */ + p_numbytes = -1; /* mark not ready for I/O */ + break; + } } - } - if (sch->elements == 0 && neh->elements == 0 && p_numbytes >= 0 + if (sch->elements == 0 && neh->elements == 0 && p_numbytes >= 0 && p->idle_heap.elements > 0) { + /* + * no traffic and no events scheduled. We can get rid of idle-heap. + */ + int i; + + for (i = 0; i < p->idle_heap.elements; i++) { + struct dn_flow_queue *q = p->idle_heap.p[i].object; + + q->F = 0; + q->S = q->F + 1; + } + p->sum = 0; + p->V = 0; + p->idle_heap.elements = 0; + } /* - * no traffic and no events scheduled. We can get rid of idle-heap. - */ - int i ; - - for (i = 0 ; i < p->idle_heap.elements ; i++) { - struct dn_flow_queue *q = p->idle_heap.p[i].object ; - - q->F = 0 ; - q->S = q->F + 1 ; - } - p->sum = 0 ; - p->V = 0 ; - p->idle_heap.elements = 0 ; - } - /* - * If we are getting clocks from dummynet (not a real interface) and - * If we are under credit, schedule the next ready event. - * Also fix the delivery time of the last packet. - */ - if (p->if_name[0]==0 && p_numbytes < 0) { /* this implies bandwidth >0 */ - dn_key t=0 ; /* number of ticks i have to wait */ - - if (p->bandwidth > 0) - t = ( p->bandwidth -1 - p_numbytes) / p->bandwidth ; - dn_tag_get(p->tail)->dn_output_time += t ; - p->sched_time = curr_time ; - heap_insert(&wfq_ready_heap, curr_time + t, (void *)p); - /* XXX should check errors on heap_insert, and drain the whole - * queue on error hoping next time we are luckier. + * If we are getting clocks from dummynet (not a real interface) and + * If we are under credit, schedule the next ready event. + * Also fix the delivery time of the last packet. */ - } + if (p->if_name[0] == 0 && p_numbytes < 0) { /* this implies bandwidth >0 */ + dn_key t = 0; /* number of ticks i have to wait */ + + if (p->bandwidth > 0) { + t = (p->bandwidth - 1 - p_numbytes) / p->bandwidth; + } + dn_tag_get(p->tail)->dn_output_time += t; + p->sched_time = curr_time; + heap_insert(&wfq_ready_heap, curr_time + t, (void *)p); + /* XXX should check errors on heap_insert, and drain the whole + * queue on error hoping next time we are luckier. + */ + } /* Fit (adjust if necessary) 64bit result into 32bit variable. */ - if (p_numbytes > INT_MAX) + if (p_numbytes > INT_MAX) { p->numbytes = INT_MAX; - else if (p_numbytes < INT_MIN) + } else if (p_numbytes < INT_MIN) { p->numbytes = INT_MIN; - else + } else { p->numbytes = p_numbytes; + } - /* - * If the delay line was empty call transmit_event(p) now. - * Otherwise, the scheduler will take care of it. - */ - if (p_was_empty) + /* + * If the delay line was empty call transmit_event(p) now. + * Otherwise, the scheduler will take care of it. + */ + if (p_was_empty) { transmit_event(p, head, tail); - + } } /* @@ -1013,79 +1034,84 @@ ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail) static void dummynet(__unused void * unused) { - void *p ; /* generic parameter to handler */ - struct dn_heap *h ; - struct dn_heap *heaps[3]; - struct mbuf *head = NULL, *tail = NULL; - int i; - struct dn_pipe *pe ; - struct timespec ts; - struct timeval tv; - - heaps[0] = &ready_heap ; /* fixed-rate queues */ - heaps[1] = &wfq_ready_heap ; /* wfq queues */ - heaps[2] = &extract_heap ; /* delay line */ + void *p; /* generic parameter to handler */ + struct dn_heap *h; + struct dn_heap *heaps[3]; + struct mbuf *head = NULL, *tail = NULL; + int i; + struct dn_pipe *pe; + struct timespec ts; + struct timeval tv; + + heaps[0] = &ready_heap; /* fixed-rate queues */ + heaps[1] = &wfq_ready_heap; /* wfq queues */ + heaps[2] = &extract_heap; /* delay line */ lck_mtx_lock(dn_mutex); - /* make all time measurements in milliseconds (ms) - - * here we convert secs and usecs to msecs (just divide the + /* make all time measurements in milliseconds (ms) - + * here we convert secs and usecs to msecs (just divide the * usecs and take the closest whole number). - */ - microuptime(&tv); - curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000); - - for (i=0; i < 3 ; i++) { - h = heaps[i]; - while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time) ) { - if (h->p[0].key > curr_time) - printf("dummynet: warning, heap %d is %d ticks late\n", - i, (int)(curr_time - h->p[0].key)); - p = h->p[0].object ; /* store a copy before heap_extract */ - heap_extract(h, NULL); /* need to extract before processing */ - if (i == 0) - ready_event(p, &head, &tail) ; - else if (i == 1) { - struct dn_pipe *pipe = p; - if (pipe->if_name[0] != '\0') - printf("dummynet: bad ready_event_wfq for pipe %s\n", - pipe->if_name); - else - ready_event_wfq(p, &head, &tail) ; - } else { - transmit_event(p, &head, &tail); + */ + microuptime(&tv); + curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000); + + for (i = 0; i < 3; i++) { + h = heaps[i]; + while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) { + if (h->p[0].key > curr_time) { + printf("dummynet: warning, heap %d is %d ticks late\n", + i, (int)(curr_time - h->p[0].key)); + } + p = h->p[0].object; /* store a copy before heap_extract */ + heap_extract(h, NULL); /* need to extract before processing */ + if (i == 0) { + ready_event(p, &head, &tail); + } else if (i == 1) { + struct dn_pipe *pipe = p; + if (pipe->if_name[0] != '\0') { + printf("dummynet: bad ready_event_wfq for pipe %s\n", + pipe->if_name); + } else { + ready_event_wfq(p, &head, &tail); + } + } else { + transmit_event(p, &head, &tail); + } } } - } - /* sweep pipes trying to expire idle flow_queues */ - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(pe, &pipehash[i], next) - if (pe->idle_heap.elements > 0 && - DN_KEY_LT(pe->idle_heap.p[0].key, pe->V) ) { - struct dn_flow_queue *q = pe->idle_heap.p[0].object ; - - heap_extract(&(pe->idle_heap), NULL); - q->S = q->F + 1 ; /* mark timestamp as invalid */ - pe->sum -= q->fs->weight ; + /* sweep pipes trying to expire idle flow_queues */ + for (i = 0; i < HASHSIZE; i++) { + SLIST_FOREACH(pe, &pipehash[i], next) { + if (pe->idle_heap.elements > 0 && + DN_KEY_LT(pe->idle_heap.p[0].key, pe->V)) { + struct dn_flow_queue *q = pe->idle_heap.p[0].object; + + heap_extract(&(pe->idle_heap), NULL); + q->S = q->F + 1; /* mark timestamp as invalid */ + pe->sum -= q->fs->weight; + } + } } /* check the heaps to see if there's still stuff in there, and * only set the timer if there are packets to process */ timer_enabled = 0; - for (i=0; i < 3 ; i++) { + for (i = 0; i < 3; i++) { h = heaps[i]; if (h->elements > 0) { // set the timer ts.tv_sec = 0; - ts.tv_nsec = 1 * 1000000; // 1ms + ts.tv_nsec = 1 * 1000000; // 1ms timer_enabled = 1; bsd_timeout(dummynet, NULL, &ts); break; } } - if (head != NULL) + if (head != NULL) { serialize++; + } lck_mtx_unlock(dn_mutex); @@ -1114,23 +1140,22 @@ dummynet_send(struct mbuf *m) (uint64_t)VM_KERNEL_ADDRPERM(m), pkt->dn_dir, pkt->dn_flags)); - switch (pkt->dn_dir) { + switch (pkt->dn_dir) { case DN_TO_IP_OUT: { struct route tmp_rt; /* route is already in the packet's dn_ro */ - bzero(&tmp_rt, sizeof (tmp_rt)); + bzero(&tmp_rt, sizeof(tmp_rt)); /* Force IP_RAWOUTPUT as the IP header is fully formed */ pkt->dn_flags |= IP_RAWOUTPUT | IP_FORWARDING; (void)ip_output(m, NULL, &tmp_rt, pkt->dn_flags, NULL, NULL); ROUTE_RELEASE(&tmp_rt); - break ; + break; } - case DN_TO_IP_IN : + case DN_TO_IP_IN: proto_inject(PF_INET, m); - break ; -#ifdef INET6 + break; case DN_TO_IP6_OUT: { /* routes already in the packet's dn_{ro6,pmtu} */ ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL); @@ -1139,12 +1164,11 @@ dummynet_send(struct mbuf *m) case DN_TO_IP6_IN: proto_inject(PF_INET6, m); break; -#endif /* INET6 */ default: printf("dummynet: bad switch %d!\n", pkt->dn_dir); m_freem(m); - break ; - } + break; + } } } @@ -1155,32 +1179,36 @@ dummynet_send(struct mbuf *m) static int expire_queues(struct dn_flow_set *fs) { - struct dn_flow_queue *q, *prev ; - int i, initial_elements = fs->rq_elements ; + struct dn_flow_queue *q, *prev; + int i, initial_elements = fs->rq_elements; struct timeval timenow; /* reviewed for getmicrotime usage */ getmicrotime(&timenow); - if (fs->last_expired == timenow.tv_sec) - return 0 ; - fs->last_expired = timenow.tv_sec ; - for (i = 0 ; i <= fs->rq_size ; i++) /* last one is overflow */ - for (prev=NULL, q = fs->rq[i] ; q != NULL ; ) - if (q->head != NULL || q->S != q->F+1) { - prev = q ; - q = q->next ; - } else { /* entry is idle, expire it */ - struct dn_flow_queue *old_q = q ; - - if (prev != NULL) - prev->next = q = q->next ; - else - fs->rq[i] = q = q->next ; - fs->rq_elements-- ; - FREE(old_q, M_DUMMYNET); - } - return initial_elements - fs->rq_elements ; + if (fs->last_expired == timenow.tv_sec) { + return 0; + } + fs->last_expired = timenow.tv_sec; + for (i = 0; i <= fs->rq_size; i++) { /* last one is overflow */ + for (prev = NULL, q = fs->rq[i]; q != NULL;) { + if (q->head != NULL || q->S != q->F + 1) { + prev = q; + q = q->next; + } else { /* entry is idle, expire it */ + struct dn_flow_queue *old_q = q; + + if (prev != NULL) { + prev->next = q = q->next; + } else { + fs->rq[i] = q = q->next; + } + fs->rq_elements--; + FREE(old_q, M_DUMMYNET); + } + } + } + return initial_elements - fs->rq_elements; } /* @@ -1190,29 +1218,30 @@ expire_queues(struct dn_flow_set *fs) static struct dn_flow_queue * create_queue(struct dn_flow_set *fs, int i) { - struct dn_flow_queue *q ; + struct dn_flow_queue *q; - if (fs->rq_elements > fs->rq_size * dn_max_ratio && + if (fs->rq_elements > fs->rq_size * dn_max_ratio && expire_queues(fs) == 0) { - /* - * No way to get room, use or create overflow queue. - */ - i = fs->rq_size ; - if ( fs->rq[i] != NULL ) - return fs->rq[i] ; - } - q = _MALLOC(sizeof(*q), M_DUMMYNET, M_DONTWAIT | M_ZERO); - if (q == NULL) { - printf("dummynet: sorry, cannot allocate queue for new flow\n"); - return NULL ; - } - q->fs = fs ; - q->hash_slot = i ; - q->next = fs->rq[i] ; - q->S = q->F + 1; /* hack - mark timestamp as invalid */ - fs->rq[i] = q ; - fs->rq_elements++ ; - return q ; + /* + * No way to get room, use or create overflow queue. + */ + i = fs->rq_size; + if (fs->rq[i] != NULL) { + return fs->rq[i]; + } + } + q = _MALLOC(sizeof(*q), M_DUMMYNET, M_DONTWAIT | M_ZERO); + if (q == NULL) { + printf("dummynet: sorry, cannot allocate queue for new flow\n"); + return NULL; + } + q->fs = fs; + q->hash_slot = i; + q->next = fs->rq[i]; + q->S = q->F + 1; /* hack - mark timestamp as invalid */ + fs->rq[i] = q; + fs->rq_elements++; + return q; } /* @@ -1223,222 +1252,227 @@ create_queue(struct dn_flow_set *fs, int i) static struct dn_flow_queue * find_queue(struct dn_flow_set *fs, struct ip_flow_id *id) { - int i = 0 ; /* we need i and q for new allocations */ - struct dn_flow_queue *q, *prev; - int is_v6 = IS_IP6_FLOW_ID(id); - - if ( !(fs->flags_fs & DN_HAVE_FLOW_MASK) ) - q = fs->rq[0] ; - else { - /* first, do the masking, then hash */ - id->dst_port &= fs->flow_mask.dst_port ; - id->src_port &= fs->flow_mask.src_port ; - id->proto &= fs->flow_mask.proto ; - id->flags = 0 ; /* we don't care about this one */ - if (is_v6) { - APPLY_MASK(&id->dst_ip6, &fs->flow_mask.dst_ip6); - APPLY_MASK(&id->src_ip6, &fs->flow_mask.src_ip6); - id->flow_id6 &= fs->flow_mask.flow_id6; - - i = ((id->dst_ip6.__u6_addr.__u6_addr32[0]) & 0xffff)^ - ((id->dst_ip6.__u6_addr.__u6_addr32[1]) & 0xffff)^ - ((id->dst_ip6.__u6_addr.__u6_addr32[2]) & 0xffff)^ - ((id->dst_ip6.__u6_addr.__u6_addr32[3]) & 0xffff)^ - - ((id->dst_ip6.__u6_addr.__u6_addr32[0] >> 15) & 0xffff)^ - ((id->dst_ip6.__u6_addr.__u6_addr32[1] >> 15) & 0xffff)^ - ((id->dst_ip6.__u6_addr.__u6_addr32[2] >> 15) & 0xffff)^ - ((id->dst_ip6.__u6_addr.__u6_addr32[3] >> 15) & 0xffff)^ - - ((id->src_ip6.__u6_addr.__u6_addr32[0] << 1) & 0xfffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[1] << 1) & 0xfffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff)^ - - ((id->src_ip6.__u6_addr.__u6_addr32[0] >> 16) & 0xffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[1] >> 16) & 0xffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[2] >> 16) & 0xffff)^ - ((id->src_ip6.__u6_addr.__u6_addr32[3] >> 16) & 0xffff)^ - - (id->dst_port << 1) ^ (id->src_port) ^ - (id->proto ) ^ - (id->flow_id6); - } else { - id->dst_ip &= fs->flow_mask.dst_ip ; - id->src_ip &= fs->flow_mask.src_ip ; - - i = ( (id->dst_ip) & 0xffff ) ^ - ( (id->dst_ip >> 15) & 0xffff ) ^ - ( (id->src_ip << 1) & 0xffff ) ^ - ( (id->src_ip >> 16 ) & 0xffff ) ^ - (id->dst_port << 1) ^ (id->src_port) ^ - (id->proto ); - } - i = i % fs->rq_size ; - /* finally, scan the current list for a match */ - searches++ ; - for (prev=NULL, q = fs->rq[i] ; q ; ) { - search_steps++; - if (is_v6 && - IN6_ARE_ADDR_EQUAL(&id->dst_ip6,&q->id.dst_ip6) && - IN6_ARE_ADDR_EQUAL(&id->src_ip6,&q->id.src_ip6) && - id->dst_port == q->id.dst_port && - id->src_port == q->id.src_port && - id->proto == q->id.proto && - id->flags == q->id.flags && - id->flow_id6 == q->id.flow_id6) - break ; /* found */ - - if (!is_v6 && id->dst_ip == q->id.dst_ip && - id->src_ip == q->id.src_ip && - id->dst_port == q->id.dst_port && - id->src_port == q->id.src_port && - id->proto == q->id.proto && - id->flags == q->id.flags) - break ; /* found */ - - /* No match. Check if we can expire the entry */ - if (pipe_expire && q->head == NULL && q->S == q->F+1 ) { - /* entry is idle and not in any heap, expire it */ - struct dn_flow_queue *old_q = q ; - - if (prev != NULL) - prev->next = q = q->next ; - else - fs->rq[i] = q = q->next ; - fs->rq_elements-- ; - FREE(old_q, M_DUMMYNET); - continue ; - } - prev = q ; - q = q->next ; - } - if (q && prev != NULL) { /* found and not in front */ - prev->next = q->next ; - q->next = fs->rq[i] ; - fs->rq[i] = q ; - } - } - if (q == NULL) { /* no match, need to allocate a new entry */ - q = create_queue(fs, i); - if (q != NULL) - q->id = *id ; - } - return q ; + int i = 0; /* we need i and q for new allocations */ + struct dn_flow_queue *q, *prev; + int is_v6 = IS_IP6_FLOW_ID(id); + + if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) { + q = fs->rq[0]; + } else { + /* first, do the masking, then hash */ + id->dst_port &= fs->flow_mask.dst_port; + id->src_port &= fs->flow_mask.src_port; + id->proto &= fs->flow_mask.proto; + id->flags = 0; /* we don't care about this one */ + if (is_v6) { + APPLY_MASK(&id->dst_ip6, &fs->flow_mask.dst_ip6); + APPLY_MASK(&id->src_ip6, &fs->flow_mask.src_ip6); + id->flow_id6 &= fs->flow_mask.flow_id6; + + i = ((id->dst_ip6.__u6_addr.__u6_addr32[0]) & 0xffff) ^ + ((id->dst_ip6.__u6_addr.__u6_addr32[1]) & 0xffff) ^ + ((id->dst_ip6.__u6_addr.__u6_addr32[2]) & 0xffff) ^ + ((id->dst_ip6.__u6_addr.__u6_addr32[3]) & 0xffff) ^ + + ((id->dst_ip6.__u6_addr.__u6_addr32[0] >> 15) & 0xffff) ^ + ((id->dst_ip6.__u6_addr.__u6_addr32[1] >> 15) & 0xffff) ^ + ((id->dst_ip6.__u6_addr.__u6_addr32[2] >> 15) & 0xffff) ^ + ((id->dst_ip6.__u6_addr.__u6_addr32[3] >> 15) & 0xffff) ^ + + ((id->src_ip6.__u6_addr.__u6_addr32[0] << 1) & 0xfffff) ^ + ((id->src_ip6.__u6_addr.__u6_addr32[1] << 1) & 0xfffff) ^ + ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff) ^ + ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff) ^ + + ((id->src_ip6.__u6_addr.__u6_addr32[0] >> 16) & 0xffff) ^ + ((id->src_ip6.__u6_addr.__u6_addr32[1] >> 16) & 0xffff) ^ + ((id->src_ip6.__u6_addr.__u6_addr32[2] >> 16) & 0xffff) ^ + ((id->src_ip6.__u6_addr.__u6_addr32[3] >> 16) & 0xffff) ^ + + (id->dst_port << 1) ^ (id->src_port) ^ + (id->proto) ^ + (id->flow_id6); + } else { + id->dst_ip &= fs->flow_mask.dst_ip; + id->src_ip &= fs->flow_mask.src_ip; + + i = ((id->dst_ip) & 0xffff) ^ + ((id->dst_ip >> 15) & 0xffff) ^ + ((id->src_ip << 1) & 0xffff) ^ + ((id->src_ip >> 16) & 0xffff) ^ + (id->dst_port << 1) ^ (id->src_port) ^ + (id->proto); + } + i = i % fs->rq_size; + /* finally, scan the current list for a match */ + searches++; + for (prev = NULL, q = fs->rq[i]; q;) { + search_steps++; + if (is_v6 && + IN6_ARE_ADDR_EQUAL(&id->dst_ip6, &q->id.dst_ip6) && + IN6_ARE_ADDR_EQUAL(&id->src_ip6, &q->id.src_ip6) && + id->dst_port == q->id.dst_port && + id->src_port == q->id.src_port && + id->proto == q->id.proto && + id->flags == q->id.flags && + id->flow_id6 == q->id.flow_id6) { + break; /* found */ + } + if (!is_v6 && id->dst_ip == q->id.dst_ip && + id->src_ip == q->id.src_ip && + id->dst_port == q->id.dst_port && + id->src_port == q->id.src_port && + id->proto == q->id.proto && + id->flags == q->id.flags) { + break; /* found */ + } + /* No match. Check if we can expire the entry */ + if (pipe_expire && q->head == NULL && q->S == q->F + 1) { + /* entry is idle and not in any heap, expire it */ + struct dn_flow_queue *old_q = q; + + if (prev != NULL) { + prev->next = q = q->next; + } else { + fs->rq[i] = q = q->next; + } + fs->rq_elements--; + FREE(old_q, M_DUMMYNET); + continue; + } + prev = q; + q = q->next; + } + if (q && prev != NULL) { /* found and not in front */ + prev->next = q->next; + q->next = fs->rq[i]; + fs->rq[i] = q; + } + } + if (q == NULL) { /* no match, need to allocate a new entry */ + q = create_queue(fs, i); + if (q != NULL) { + q->id = *id; + } + } + return q; } static int red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len) { - /* - * RED algorithm - * - * RED calculates the average queue size (avg) using a low-pass filter - * with an exponential weighted (w_q) moving average: - * avg <- (1-w_q) * avg + w_q * q_size - * where q_size is the queue length (measured in bytes or * packets). - * - * If q_size == 0, we compute the idle time for the link, and set - * avg = (1 - w_q)^(idle/s) - * where s is the time needed for transmitting a medium-sized packet. - * - * Now, if avg < min_th the packet is enqueued. - * If avg > max_th the packet is dropped. Otherwise, the packet is - * dropped with probability P function of avg. - * - */ - - int64_t p_b = 0; - /* queue in bytes or packets ? */ - u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len; - - DPRINTF(("\ndummynet: %d q: %2u ", (int) curr_time, q_size)); - - /* average queue size estimation */ - if (q_size != 0) { /* - * queue is not empty, avg <- avg + (q_size - avg) * w_q + * RED algorithm + * + * RED calculates the average queue size (avg) using a low-pass filter + * with an exponential weighted (w_q) moving average: + * avg <- (1-w_q) * avg + w_q * q_size + * where q_size is the queue length (measured in bytes or * packets). + * + * If q_size == 0, we compute the idle time for the link, and set + * avg = (1 - w_q)^(idle/s) + * where s is the time needed for transmitting a medium-sized packet. + * + * Now, if avg < min_th the packet is enqueued. + * If avg > max_th the packet is dropped. Otherwise, the packet is + * dropped with probability P function of avg. + * */ - int diff = SCALE(q_size) - q->avg; - int64_t v = SCALE_MUL((int64_t) diff, (int64_t) fs->w_q); - q->avg += (int) v; - } else { - /* - * queue is empty, find for how long the queue has been - * empty and use a lookup table for computing - * (1 - * w_q)^(idle_time/s) where s is the time to send a - * (small) packet. - * XXX check wraps... - */ - if (q->avg) { - u_int t = (curr_time - q->q_time) / fs->lookup_step; - - q->avg = (t < fs->lookup_depth) ? - SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; - } - } - DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q->avg))); - - /* should i drop ? */ - - if (q->avg < fs->min_th) { - q->count = -1; - return 0; /* accept packet ; */ - } - if (q->avg >= fs->max_th) { /* average queue >= max threshold */ - if (fs->flags_fs & DN_IS_GENTLE_RED) { - /* - * According to Gentle-RED, if avg is greater than max_th the - * packet is dropped with a probability - * p_b = c_3 * avg - c_4 - * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p - */ - p_b = SCALE_MUL((int64_t) fs->c_3, (int64_t) q->avg) - fs->c_4; + int64_t p_b = 0; + /* queue in bytes or packets ? */ + u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len; + + DPRINTF(("\ndummynet: %d q: %2u ", (int) curr_time, q_size)); + + /* average queue size estimation */ + if (q_size != 0) { + /* + * queue is not empty, avg <- avg + (q_size - avg) * w_q + */ + int diff = SCALE(q_size) - q->avg; + int64_t v = SCALE_MUL((int64_t) diff, (int64_t) fs->w_q); + + q->avg += (int) v; } else { - q->count = -1; - DPRINTF(("dummynet: - drop")); - return 1 ; + /* + * queue is empty, find for how long the queue has been + * empty and use a lookup table for computing + * (1 - * w_q)^(idle_time/s) where s is the time to send a + * (small) packet. + * XXX check wraps... + */ + if (q->avg) { + u_int t = (curr_time - q->q_time) / fs->lookup_step; + + q->avg = (t < fs->lookup_depth) ? + SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0; + } } - } else if (q->avg > fs->min_th) { - /* - * we compute p_b using the linear dropping function p_b = c_1 * - * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 = - * max_p * min_th / (max_th - min_th) - */ - p_b = SCALE_MUL((int64_t) fs->c_1, (int64_t) q->avg) - fs->c_2; - } - if (fs->flags_fs & DN_QSIZE_IS_BYTES) - p_b = (p_b * len) / fs->max_pkt_size; - if (++q->count == 0) - q->random = (my_random() & 0xffff); - else { - /* - * q->count counts packets arrived since last drop, so a greater - * value of q->count means a greater packet drop probability. - */ - if (SCALE_MUL(p_b, SCALE((int64_t) q->count)) > q->random) { - q->count = 0; - DPRINTF(("dummynet: - red drop")); - /* after a drop we calculate a new random value */ - q->random = (my_random() & 0xffff); - return 1; /* drop */ - } - } - /* end of RED algorithm */ - return 0 ; /* accept */ + DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q->avg))); + + /* should i drop ? */ + + if (q->avg < fs->min_th) { + q->count = -1; + return 0; /* accept packet ; */ + } + if (q->avg >= fs->max_th) { /* average queue >= max threshold */ + if (fs->flags_fs & DN_IS_GENTLE_RED) { + /* + * According to Gentle-RED, if avg is greater than max_th the + * packet is dropped with a probability + * p_b = c_3 * avg - c_4 + * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p + */ + p_b = SCALE_MUL((int64_t) fs->c_3, (int64_t) q->avg) - fs->c_4; + } else { + q->count = -1; + DPRINTF(("dummynet: - drop")); + return 1; + } + } else if (q->avg > fs->min_th) { + /* + * we compute p_b using the linear dropping function p_b = c_1 * + * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 = + * max_p * min_th / (max_th - min_th) + */ + p_b = SCALE_MUL((int64_t) fs->c_1, (int64_t) q->avg) - fs->c_2; + } + if (fs->flags_fs & DN_QSIZE_IS_BYTES) { + p_b = (p_b * len) / fs->max_pkt_size; + } + if (++q->count == 0) { + q->random = (my_random() & 0xffff); + } else { + /* + * q->count counts packets arrived since last drop, so a greater + * value of q->count means a greater packet drop probability. + */ + if (SCALE_MUL(p_b, SCALE((int64_t) q->count)) > q->random) { + q->count = 0; + DPRINTF(("dummynet: - red drop")); + /* after a drop we calculate a new random value */ + q->random = (my_random() & 0xffff); + return 1; /* drop */ + } + } + /* end of RED algorithm */ + return 0; /* accept */ } static __inline struct dn_flow_set * locate_flowset(int fs_nr) { - struct dn_flow_set *fs; - SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next) - if (fs->fs_nr == fs_nr) - return fs ; + struct dn_flow_set *fs; + SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next) { + if (fs->fs_nr == fs_nr) { + return fs; + } + } - return (NULL); + return NULL; } static __inline struct dn_pipe * @@ -1446,11 +1480,13 @@ locate_pipe(int pipe_nr) { struct dn_pipe *pipe; - SLIST_FOREACH(pipe, &pipehash[HASH(pipe_nr)], next) - if (pipe->pipe_nr == pipe_nr) - return (pipe); + SLIST_FOREACH(pipe, &pipehash[HASH(pipe_nr)], next) { + if (pipe->pipe_nr == pipe_nr) { + return pipe; + } + } - return (NULL); + return NULL; } @@ -1472,253 +1508,248 @@ locate_pipe(int pipe_nr) * */ static int -dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa, int client) +dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa) { - struct mbuf *head = NULL, *tail = NULL; - struct dn_pkt_tag *pkt; - struct m_tag *mtag; - struct dn_flow_set *fs = NULL; - struct dn_pipe *pipe ; - u_int64_t len = m->m_pkthdr.len ; - struct dn_flow_queue *q = NULL ; - int is_pipe = 0; - struct timespec ts; - struct timeval tv; - - DPRINTF(("dummynet_io m: 0x%llx pipe: %d dir: %d client: %d\n", - (uint64_t)VM_KERNEL_ADDRPERM(m), pipe_nr, dir, client)); - -#if IPFIREWALL -#if IPFW2 - if (client == DN_CLIENT_IPFW) { - ipfw_insn *cmd = fwa->fwa_ipfw_rule->cmd + fwa->fwa_ipfw_rule->act_ofs; - - if (cmd->opcode == O_LOG) - cmd += F_LEN(cmd); - is_pipe = (cmd->opcode == O_PIPE); - } -#else - if (client == DN_CLIENT_IPFW) - is_pipe = (fwa->fwa_ipfw_rule->fw_flg & IP_FW_F_COMMAND) == IP_FW_F_PIPE; -#endif -#endif /* IPFIREWALL */ + struct mbuf *head = NULL, *tail = NULL; + struct dn_pkt_tag *pkt; + struct m_tag *mtag; + struct dn_flow_set *fs = NULL; + struct dn_pipe *pipe; + u_int64_t len = m->m_pkthdr.len; + struct dn_flow_queue *q = NULL; + int is_pipe = 0; + struct timespec ts; + struct timeval tv; + + DPRINTF(("dummynet_io m: 0x%llx pipe: %d dir: %d\n", + (uint64_t)VM_KERNEL_ADDRPERM(m), pipe_nr, dir)); + #if DUMMYNET - if (client == DN_CLIENT_PF) - is_pipe = fwa->fwa_flags == DN_IS_PIPE ? 1 : 0; + is_pipe = fwa->fwa_flags == DN_IS_PIPE ? 1 : 0; #endif /* DUMMYNET */ - pipe_nr &= 0xffff ; + pipe_nr &= 0xffff; - lck_mtx_lock(dn_mutex); + lck_mtx_lock(dn_mutex); /* make all time measurements in milliseconds (ms) - - * here we convert secs and usecs to msecs (just divide the - * usecs and take the closest whole number). + * here we convert secs and usecs to msecs (just divide the + * usecs and take the closest whole number). */ - microuptime(&tv); + microuptime(&tv); curr_time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000); - /* - * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule. - */ - if (is_pipe) { + /* + * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule. + */ + if (is_pipe) { pipe = locate_pipe(pipe_nr); - if (pipe != NULL) + if (pipe != NULL) { fs = &(pipe->fs); - } else + } + } else { fs = locate_flowset(pipe_nr); + } + + + if (fs == NULL) { + goto dropit; /* this queue/pipe does not exist! */ + } + pipe = fs->pipe; + if (pipe == NULL) { /* must be a queue, try find a matching pipe */ + pipe = locate_pipe(fs->parent_nr); + + if (pipe != NULL) { + fs->pipe = pipe; + } else { + printf("dummynet: no pipe %d for queue %d, drop pkt\n", + fs->parent_nr, fs->fs_nr); + goto dropit; + } + } + q = find_queue(fs, &(fwa->fwa_id)); + if (q == NULL) { + goto dropit; /* cannot allocate queue */ + } + /* + * update statistics, then check reasons to drop pkt + */ + q->tot_bytes += len; + q->tot_pkts++; + if (fs->plr && (my_random() < fs->plr)) { + goto dropit; /* random pkt drop */ + } + if (fs->flags_fs & DN_QSIZE_IS_BYTES) { + if (q->len_bytes > fs->qsize) { + goto dropit; /* queue size overflow */ + } + } else { + if (q->len >= fs->qsize) { + goto dropit; /* queue count overflow */ + } + } + if (fs->flags_fs & DN_IS_RED && red_drops(fs, q, len)) { + goto dropit; + } + + /* XXX expensive to zero, see if we can remove it*/ + mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, + sizeof(struct dn_pkt_tag), M_NOWAIT, m); + if (mtag == NULL) { + goto dropit; /* cannot allocate packet header */ + } + m_tag_prepend(m, mtag); /* attach to mbuf chain */ + pkt = (struct dn_pkt_tag *)(mtag + 1); + bzero(pkt, sizeof(struct dn_pkt_tag)); + /* ok, i can handle the pkt now... */ + /* build and enqueue packet + parameters */ + pkt->dn_pf_rule = fwa->fwa_pf_rule; + pkt->dn_dir = dir; - if (fs == NULL){ - goto dropit ; /* this queue/pipe does not exist! */ - } - pipe = fs->pipe ; - if (pipe == NULL) { /* must be a queue, try find a matching pipe */ - pipe = locate_pipe(fs->parent_nr); - - if (pipe != NULL) - fs->pipe = pipe ; - else { - printf("dummynet: no pipe %d for queue %d, drop pkt\n", - fs->parent_nr, fs->fs_nr); - goto dropit ; - } - } - q = find_queue(fs, &(fwa->fwa_id)); - if ( q == NULL ) - goto dropit ; /* cannot allocate queue */ - /* - * update statistics, then check reasons to drop pkt - */ - q->tot_bytes += len ; - q->tot_pkts++ ; - if ( fs->plr && (my_random() < fs->plr)) - goto dropit ; /* random pkt drop */ - if ( fs->flags_fs & DN_QSIZE_IS_BYTES) { - if (q->len_bytes > fs->qsize) - goto dropit ; /* queue size overflow */ - } else { - if (q->len >= fs->qsize) - goto dropit ; /* queue count overflow */ - } - if ( fs->flags_fs & DN_IS_RED && red_drops(fs, q, len) ) - goto dropit ; - - /* XXX expensive to zero, see if we can remove it*/ - mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, - sizeof(struct dn_pkt_tag), M_NOWAIT, m); - if ( mtag == NULL ) - goto dropit ; /* cannot allocate packet header */ - m_tag_prepend(m, mtag); /* attach to mbuf chain */ - - pkt = (struct dn_pkt_tag *)(mtag+1); - bzero(pkt, sizeof(struct dn_pkt_tag)); - /* ok, i can handle the pkt now... */ - /* build and enqueue packet + parameters */ - /* - * PF is checked before ipfw so remember ipfw rule only when - * the caller is ipfw. When the caller is PF, fwa_ipfw_rule - * is a fake rule just used for convenience - */ - if (client == DN_CLIENT_IPFW) - pkt->dn_ipfw_rule = fwa->fwa_ipfw_rule; - pkt->dn_pf_rule = fwa->fwa_pf_rule; - pkt->dn_dir = dir ; - pkt->dn_client = client; - - pkt->dn_ifp = fwa->fwa_oif; - if (dir == DN_TO_IP_OUT) { + pkt->dn_ifp = fwa->fwa_oif; + if (dir == DN_TO_IP_OUT) { /* * We need to copy *ro because for ICMP pkts (and maybe others) * the caller passed a pointer into the stack; dst might also be * a pointer into *ro so it needs to be updated. */ if (fwa->fwa_ro) { - route_copyout(&pkt->dn_ro, fwa->fwa_ro, sizeof (pkt->dn_ro)); + route_copyout(&pkt->dn_ro, fwa->fwa_ro, sizeof(pkt->dn_ro)); } if (fwa->fwa_dst) { - if (fwa->fwa_dst == (struct sockaddr_in *)&fwa->fwa_ro->ro_dst) /* dst points into ro */ - fwa->fwa_dst = (struct sockaddr_in *)&(pkt->dn_ro.ro_dst) ; + if (fwa->fwa_dst == (struct sockaddr_in *)&fwa->fwa_ro->ro_dst) { /* dst points into ro */ + fwa->fwa_dst = (struct sockaddr_in *)&(pkt->dn_ro.ro_dst); + } - bcopy (fwa->fwa_dst, &pkt->dn_dst, sizeof(pkt->dn_dst)); + bcopy(fwa->fwa_dst, &pkt->dn_dst, sizeof(pkt->dn_dst)); } - } else if (dir == DN_TO_IP6_OUT) { + } else if (dir == DN_TO_IP6_OUT) { if (fwa->fwa_ro6) { route_copyout((struct route *)&pkt->dn_ro6, - (struct route *)fwa->fwa_ro6, sizeof (pkt->dn_ro6)); + (struct route *)fwa->fwa_ro6, sizeof(pkt->dn_ro6)); } if (fwa->fwa_ro6_pmtu) { route_copyout((struct route *)&pkt->dn_ro6_pmtu, - (struct route *)fwa->fwa_ro6_pmtu, sizeof (pkt->dn_ro6_pmtu)); + (struct route *)fwa->fwa_ro6_pmtu, sizeof(pkt->dn_ro6_pmtu)); } if (fwa->fwa_dst6) { - if (fwa->fwa_dst6 == (struct sockaddr_in6 *)&fwa->fwa_ro6->ro_dst) /* dst points into ro */ - fwa->fwa_dst6 = (struct sockaddr_in6 *)&(pkt->dn_ro6.ro_dst) ; + if (fwa->fwa_dst6 == (struct sockaddr_in6 *)&fwa->fwa_ro6->ro_dst) { /* dst points into ro */ + fwa->fwa_dst6 = (struct sockaddr_in6 *)&(pkt->dn_ro6.ro_dst); + } - bcopy (fwa->fwa_dst6, &pkt->dn_dst6, sizeof(pkt->dn_dst6)); + bcopy(fwa->fwa_dst6, &pkt->dn_dst6, sizeof(pkt->dn_dst6)); } pkt->dn_origifp = fwa->fwa_origifp; pkt->dn_mtu = fwa->fwa_mtu; pkt->dn_unfragpartlen = fwa->fwa_unfragpartlen; if (fwa->fwa_exthdrs) { - bcopy (fwa->fwa_exthdrs, &pkt->dn_exthdrs, sizeof(pkt->dn_exthdrs)); + bcopy(fwa->fwa_exthdrs, &pkt->dn_exthdrs, sizeof(pkt->dn_exthdrs)); /* * Need to zero out the source structure so the mbufs * won't be freed by ip6_output() */ bzero(fwa->fwa_exthdrs, sizeof(struct ip6_exthdrs)); } - } - if (dir == DN_TO_IP_OUT || dir == DN_TO_IP6_OUT) { + } + if (dir == DN_TO_IP_OUT || dir == DN_TO_IP6_OUT) { pkt->dn_flags = fwa->fwa_oflags; - if (fwa->fwa_ipoa != NULL) + if (fwa->fwa_ipoa != NULL) { pkt->dn_ipoa = *(fwa->fwa_ipoa); - } - if (q->head == NULL) - q->head = m; - else - q->tail->m_nextpkt = m; - q->tail = m; - q->len++; - q->len_bytes += len ; - - if ( q->head != m ) /* flow was not idle, we are done */ - goto done; - /* - * If we reach this point the flow was previously idle, so we need - * to schedule it. This involves different actions for fixed-rate or - * WF2Q queues. - */ - if (is_pipe) { - /* - * Fixed-rate queue: just insert into the ready_heap. - */ - dn_key t = 0 ; - if (pipe->bandwidth) - t = SET_TICKS(m, q, pipe); - q->sched_time = curr_time ; - if (t == 0) /* must process it now */ - ready_event( q , &head, &tail ); - else - heap_insert(&ready_heap, curr_time + t , q ); - } else { - /* - * WF2Q. First, compute start time S: if the flow was idle (S=F+1) - * set S to the virtual time V for the controlling pipe, and update - * the sum of weights for the pipe; otherwise, remove flow from - * idle_heap and set S to max(F,V). - * Second, compute finish time F = S + len/weight. - * Third, if pipe was idle, update V=max(S, V). - * Fourth, count one more backlogged flow. - */ - if (DN_KEY_GT(q->S, q->F)) { /* means timestamps are invalid */ - q->S = pipe->V ; - pipe->sum += fs->weight ; /* add weight of new queue */ + } + } + if (q->head == NULL) { + q->head = m; } else { - heap_extract(&(pipe->idle_heap), q); - q->S = MAX64(q->F, pipe->V ) ; + q->tail->m_nextpkt = m; } - q->F = q->S + ( len<weight; + q->tail = m; + q->len++; + q->len_bytes += len; - if (pipe->not_eligible_heap.elements == 0 && - pipe->scheduler_heap.elements == 0) - pipe->V = MAX64 ( q->S, pipe->V ); - fs->backlogged++ ; + if (q->head != m) { /* flow was not idle, we are done */ + goto done; + } /* - * Look at eligibility. A flow is not eligibile if S>V (when - * this happens, it means that there is some other flow already - * scheduled for the same pipe, so the scheduler_heap cannot be - * empty). If the flow is not eligible we just store it in the - * not_eligible_heap. Otherwise, we store in the scheduler_heap - * and possibly invoke ready_event_wfq() right now if there is - * leftover credit. - * Note that for all flows in scheduler_heap (SCH), S_i <= V, - * and for all flows in not_eligible_heap (NEH), S_i > V . - * So when we need to compute max( V, min(S_i) ) forall i in SCH+NEH, - * we only need to look into NEH. + * If we reach this point the flow was previously idle, so we need + * to schedule it. This involves different actions for fixed-rate or + * WF2Q queues. */ - if (DN_KEY_GT(q->S, pipe->V) ) { /* not eligible */ - if (pipe->scheduler_heap.elements == 0) - printf("dummynet: ++ ouch! not eligible but empty scheduler!\n"); - heap_insert(&(pipe->not_eligible_heap), q->S, q); + if (is_pipe) { + /* + * Fixed-rate queue: just insert into the ready_heap. + */ + dn_key t = 0; + if (pipe->bandwidth) { + t = SET_TICKS(m, q, pipe); + } + q->sched_time = curr_time; + if (t == 0) { /* must process it now */ + ready_event( q, &head, &tail ); + } else { + heap_insert(&ready_heap, curr_time + t, q ); + } } else { - heap_insert(&(pipe->scheduler_heap), q->F, q); - if (pipe->numbytes >= 0) { /* pipe is idle */ - if (pipe->scheduler_heap.elements != 1) - printf("dummynet: OUCH! pipe should have been idle!\n"); - DPRINTF(("dummynet: waking up pipe %d at %d\n", - pipe->pipe_nr, (int)(q->F >> MY_M))); - pipe->sched_time = curr_time ; - ready_event_wfq(pipe, &head, &tail); - } - } - } + /* + * WF2Q. First, compute start time S: if the flow was idle (S=F+1) + * set S to the virtual time V for the controlling pipe, and update + * the sum of weights for the pipe; otherwise, remove flow from + * idle_heap and set S to max(F,V). + * Second, compute finish time F = S + len/weight. + * Third, if pipe was idle, update V=max(S, V). + * Fourth, count one more backlogged flow. + */ + if (DN_KEY_GT(q->S, q->F)) { /* means timestamps are invalid */ + q->S = pipe->V; + pipe->sum += fs->weight; /* add weight of new queue */ + } else { + heap_extract(&(pipe->idle_heap), q); + q->S = MAX64(q->F, pipe->V ); + } + q->F = q->S + (len << MY_M) / (u_int64_t) fs->weight; + + if (pipe->not_eligible_heap.elements == 0 && + pipe->scheduler_heap.elements == 0) { + pipe->V = MAX64( q->S, pipe->V ); + } + fs->backlogged++; + /* + * Look at eligibility. A flow is not eligibile if S>V (when + * this happens, it means that there is some other flow already + * scheduled for the same pipe, so the scheduler_heap cannot be + * empty). If the flow is not eligible we just store it in the + * not_eligible_heap. Otherwise, we store in the scheduler_heap + * and possibly invoke ready_event_wfq() right now if there is + * leftover credit. + * Note that for all flows in scheduler_heap (SCH), S_i <= V, + * and for all flows in not_eligible_heap (NEH), S_i > V . + * So when we need to compute max( V, min(S_i) ) forall i in SCH+NEH, + * we only need to look into NEH. + */ + if (DN_KEY_GT(q->S, pipe->V)) { /* not eligible */ + if (pipe->scheduler_heap.elements == 0) { + printf("dummynet: ++ ouch! not eligible but empty scheduler!\n"); + } + heap_insert(&(pipe->not_eligible_heap), q->S, q); + } else { + heap_insert(&(pipe->scheduler_heap), q->F, q); + if (pipe->numbytes >= 0) { /* pipe is idle */ + if (pipe->scheduler_heap.elements != 1) { + printf("dummynet: OUCH! pipe should have been idle!\n"); + } + DPRINTF(("dummynet: waking up pipe %d at %d\n", + pipe->pipe_nr, (int)(q->F >> MY_M))); + pipe->sched_time = curr_time; + ready_event_wfq(pipe, &head, &tail); + } + } + } done: /* start the timer and set global if not already set */ if (!timer_enabled) { ts.tv_sec = 0; - ts.tv_nsec = 1 * 1000000; // 1ms + ts.tv_nsec = 1 * 1000000; // 1ms timer_enabled = 1; bsd_timeout(dummynet, NULL, &ts); } @@ -1729,28 +1760,29 @@ done: dummynet_send(head); } - return 0; + return 0; dropit: - if (q) - q->drops++ ; + if (q) { + q->drops++; + } lck_mtx_unlock(dn_mutex); - m_freem(m); - return ( (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS); + m_freem(m); + return (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS; } /* * Below, the ROUTE_RELEASE is only needed when (pkt->dn_dir == DN_TO_IP_OUT) * Doing this would probably save us the initial bzero of dn_pkt */ -#define DN_FREE_PKT(_m) do { \ +#define DN_FREE_PKT(_m) do { \ struct m_tag *tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, NULL); \ - if (tag) { \ - struct dn_pkt_tag *n = (struct dn_pkt_tag *)(tag+1); \ - ROUTE_RELEASE(&n->dn_ro); \ - } \ - m_tag_delete(_m, tag); \ - m_freem(_m); \ + if (tag) { \ + struct dn_pkt_tag *n = (struct dn_pkt_tag *)(tag+1); \ + ROUTE_RELEASE(&n->dn_ro); \ + } \ + m_tag_delete(_m, tag); \ + m_freem(_m); \ } while (0) /* @@ -1762,36 +1794,39 @@ dropit: static void purge_flow_set(struct dn_flow_set *fs, int all) { - struct dn_flow_queue *q, *qn ; - int i ; + struct dn_flow_queue *q, *qn; + int i; LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); - for (i = 0 ; i <= fs->rq_size ; i++ ) { - for (q = fs->rq[i] ; q ; q = qn ) { - struct mbuf *m, *mnext; + for (i = 0; i <= fs->rq_size; i++) { + for (q = fs->rq[i]; q; q = qn) { + struct mbuf *m, *mnext; - mnext = q->head; - while ((m = mnext) != NULL) { - mnext = m->m_nextpkt; - DN_FREE_PKT(m); - } - qn = q->next ; - FREE(q, M_DUMMYNET); - } - fs->rq[i] = NULL ; - } - fs->rq_elements = 0 ; - if (all) { - /* RED - free lookup table */ - if (fs->w_q_lookup) - FREE(fs->w_q_lookup, M_DUMMYNET); - if (fs->rq) - FREE(fs->rq, M_DUMMYNET); - /* if this fs is not part of a pipe, free it */ - if (fs->pipe && fs != &(fs->pipe->fs) ) - FREE(fs, M_DUMMYNET); - } + mnext = q->head; + while ((m = mnext) != NULL) { + mnext = m->m_nextpkt; + DN_FREE_PKT(m); + } + qn = q->next; + FREE(q, M_DUMMYNET); + } + fs->rq[i] = NULL; + } + fs->rq_elements = 0; + if (all) { + /* RED - free lookup table */ + if (fs->w_q_lookup) { + FREE(fs->w_q_lookup, M_DUMMYNET); + } + if (fs->rq) { + FREE(fs->rq, M_DUMMYNET); + } + /* if this fs is not part of a pipe, free it */ + if (fs->pipe && fs != &(fs->pipe->fs)) { + FREE(fs, M_DUMMYNET); + } + } } /* @@ -1802,24 +1837,23 @@ purge_flow_set(struct dn_flow_set *fs, int all) static void purge_pipe(struct dn_pipe *pipe) { - struct mbuf *m, *mnext; + struct mbuf *m, *mnext; - purge_flow_set( &(pipe->fs), 1 ); + purge_flow_set( &(pipe->fs), 1 ); - mnext = pipe->head; - while ((m = mnext) != NULL) { - mnext = m->m_nextpkt; - DN_FREE_PKT(m); - } + mnext = pipe->head; + while ((m = mnext) != NULL) { + mnext = m->m_nextpkt; + DN_FREE_PKT(m); + } - heap_free( &(pipe->scheduler_heap) ); - heap_free( &(pipe->not_eligible_heap) ); - heap_free( &(pipe->idle_heap) ); + heap_free( &(pipe->scheduler_heap)); + heap_free( &(pipe->not_eligible_heap)); + heap_free( &(pipe->idle_heap)); } /* - * Delete all pipes and heaps returning memory. Must also - * remove references from all ipfw rules to all pipes. + * Delete all pipes and heaps returning memory. */ static void dummynet_flush(void) @@ -1830,10 +1864,6 @@ dummynet_flush(void) lck_mtx_lock(dn_mutex); -#if IPFW2 - /* remove all references to pipes ...*/ - flush_pipe_ptrs(NULL); -#endif /* IPFW2 */ /* Free heaps so we don't have unwanted events. */ heap_free(&ready_heap); @@ -1845,69 +1875,18 @@ dummynet_flush(void) * * XXXGL: can we merge the for(;;) cycles into one or not? */ - for (i = 0; i < HASHSIZE; i++) + for (i = 0; i < HASHSIZE; i++) { SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) { SLIST_REMOVE(&flowsethash[i], fs, dn_flow_set, next); purge_flow_set(fs, 1); } - for (i = 0; i < HASHSIZE; i++) + } + for (i = 0; i < HASHSIZE; i++) { SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) { SLIST_REMOVE(&pipehash[i], pipe, dn_pipe, next); purge_pipe(pipe); FREE(pipe, M_DUMMYNET); } - lck_mtx_unlock(dn_mutex); -} - - -static void -dn_ipfw_rule_delete_fs(struct dn_flow_set *fs, void *r) -{ - int i ; - struct dn_flow_queue *q ; - struct mbuf *m ; - - for (i = 0 ; i <= fs->rq_size ; i++) /* last one is ovflow */ - for (q = fs->rq[i] ; q ; q = q->next ) - for (m = q->head ; m ; m = m->m_nextpkt ) { - struct dn_pkt_tag *pkt = dn_tag_get(m) ; - if (pkt->dn_ipfw_rule == r) - pkt->dn_ipfw_rule = &default_rule ; - } -} -/* - * when a firewall rule is deleted, scan all queues and remove the flow-id - * from packets matching this rule. - */ -void -dn_ipfw_rule_delete(void *r) -{ - struct dn_pipe *p ; - struct dn_flow_set *fs ; - struct dn_pkt_tag *pkt ; - struct mbuf *m ; - int i; - - lck_mtx_lock(dn_mutex); - - /* - * If the rule references a queue (dn_flow_set), then scan - * the flow set, otherwise scan pipes. Should do either, but doing - * both does not harm. - */ - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(fs, &flowsethash[i], next) - dn_ipfw_rule_delete_fs(fs, r); - - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(p, &pipehash[i], next) { - fs = &(p->fs); - dn_ipfw_rule_delete_fs(fs, r); - for (m = p->head ; m ; m = m->m_nextpkt ) { - pkt = dn_tag_get(m); - if (pkt->dn_ipfw_rule == r) - pkt->dn_ipfw_rule = &default_rule; - } } lck_mtx_unlock(dn_mutex); } @@ -1918,98 +1897,108 @@ dn_ipfw_rule_delete(void *r) static int config_red(struct dn_flow_set *p, struct dn_flow_set * x) { - int i; - - x->w_q = p->w_q; - x->min_th = SCALE(p->min_th); - x->max_th = SCALE(p->max_th); - x->max_p = p->max_p; - - x->c_1 = p->max_p / (p->max_th - p->min_th); - x->c_2 = SCALE_MUL(x->c_1, SCALE(p->min_th)); - if (x->flags_fs & DN_IS_GENTLE_RED) { - x->c_3 = (SCALE(1) - p->max_p) / p->max_th; - x->c_4 = (SCALE(1) - 2 * p->max_p); - } - - /* if the lookup table already exist, free and create it again */ - if (x->w_q_lookup) { - FREE(x->w_q_lookup, M_DUMMYNET); - x->w_q_lookup = NULL ; - } - if (red_lookup_depth == 0) { - printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth must be > 0\n"); - FREE(x, M_DUMMYNET); - return EINVAL; - } - x->lookup_depth = red_lookup_depth; - x->w_q_lookup = (u_int *) _MALLOC(x->lookup_depth * sizeof(int), + int i; + + x->w_q = p->w_q; + x->min_th = SCALE(p->min_th); + x->max_th = SCALE(p->max_th); + x->max_p = p->max_p; + + x->c_1 = p->max_p / (p->max_th - p->min_th); + x->c_2 = SCALE_MUL(x->c_1, SCALE(p->min_th)); + if (x->flags_fs & DN_IS_GENTLE_RED) { + x->c_3 = (SCALE(1) - p->max_p) / p->max_th; + x->c_4 = (SCALE(1) - 2 * p->max_p); + } + + /* if the lookup table already exist, free and create it again */ + if (x->w_q_lookup) { + FREE(x->w_q_lookup, M_DUMMYNET); + x->w_q_lookup = NULL; + } + if (red_lookup_depth == 0) { + printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth must be > 0\n"); + FREE(x, M_DUMMYNET); + return EINVAL; + } + x->lookup_depth = red_lookup_depth; + x->w_q_lookup = (u_int *) _MALLOC(x->lookup_depth * sizeof(int), M_DUMMYNET, M_DONTWAIT); - if (x->w_q_lookup == NULL) { - printf("dummynet: sorry, cannot allocate red lookup table\n"); - FREE(x, M_DUMMYNET); - return ENOSPC; - } - - /* fill the lookup table with (1 - w_q)^x */ - x->lookup_step = p->lookup_step ; - x->lookup_weight = p->lookup_weight ; - x->w_q_lookup[0] = SCALE(1) - x->w_q; - for (i = 1; i < x->lookup_depth; i++) - x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight); - if (red_avg_pkt_size < 1) - red_avg_pkt_size = 512 ; - x->avg_pkt_size = red_avg_pkt_size ; - if (red_max_pkt_size < 1) - red_max_pkt_size = 1500 ; - x->max_pkt_size = red_max_pkt_size ; - return 0 ; + if (x->w_q_lookup == NULL) { + printf("dummynet: sorry, cannot allocate red lookup table\n"); + FREE(x, M_DUMMYNET); + return ENOSPC; + } + + /* fill the lookup table with (1 - w_q)^x */ + x->lookup_step = p->lookup_step; + x->lookup_weight = p->lookup_weight; + x->w_q_lookup[0] = SCALE(1) - x->w_q; + for (i = 1; i < x->lookup_depth; i++) { + x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight); + } + if (red_avg_pkt_size < 1) { + red_avg_pkt_size = 512; + } + x->avg_pkt_size = red_avg_pkt_size; + if (red_max_pkt_size < 1) { + red_max_pkt_size = 1500; + } + x->max_pkt_size = red_max_pkt_size; + return 0; } static int alloc_hash(struct dn_flow_set *x, struct dn_flow_set *pfs) { - if (x->flags_fs & DN_HAVE_FLOW_MASK) { /* allocate some slots */ - int l = pfs->rq_size; - - if (l == 0) - l = dn_hash_size; - if (l < 4) - l = 4; - else if (l > DN_MAX_HASH_SIZE) - l = DN_MAX_HASH_SIZE; - x->rq_size = l; - } else /* one is enough for null mask */ - x->rq_size = 1; - x->rq = _MALLOC((1 + x->rq_size) * sizeof(struct dn_flow_queue *), + if (x->flags_fs & DN_HAVE_FLOW_MASK) { /* allocate some slots */ + int l = pfs->rq_size; + + if (l == 0) { + l = dn_hash_size; + } + if (l < 4) { + l = 4; + } else if (l > DN_MAX_HASH_SIZE) { + l = DN_MAX_HASH_SIZE; + } + x->rq_size = l; + } else { /* one is enough for null mask */ + x->rq_size = 1; + } + x->rq = _MALLOC((1 + x->rq_size) * sizeof(struct dn_flow_queue *), M_DUMMYNET, M_DONTWAIT | M_ZERO); - if (x->rq == NULL) { - printf("dummynet: sorry, cannot allocate queue\n"); - return ENOSPC; - } - x->rq_elements = 0; - return 0 ; + if (x->rq == NULL) { + printf("dummynet: sorry, cannot allocate queue\n"); + return ENOSPC; + } + x->rq_elements = 0; + return 0; } static void set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src) { - x->flags_fs = src->flags_fs; - x->qsize = src->qsize; - x->plr = src->plr; - x->flow_mask = src->flow_mask; - if (x->flags_fs & DN_QSIZE_IS_BYTES) { - if (x->qsize > 1024*1024) - x->qsize = 1024*1024 ; - } else { - if (x->qsize == 0) - x->qsize = 50 ; - if (x->qsize > 100) - x->qsize = 50 ; - } - /* configuring RED */ - if ( x->flags_fs & DN_IS_RED ) - config_red(src, x) ; /* XXX should check errors */ + x->flags_fs = src->flags_fs; + x->qsize = src->qsize; + x->plr = src->plr; + x->flow_mask = src->flow_mask; + if (x->flags_fs & DN_QSIZE_IS_BYTES) { + if (x->qsize > 1024 * 1024) { + x->qsize = 1024 * 1024; + } + } else { + if (x->qsize == 0) { + x->qsize = 50; + } + if (x->qsize > 100) { + x->qsize = 50; + } + } + /* configuring RED */ + if (x->flags_fs & DN_IS_RED) { + config_red(src, x); /* XXX should check errors */ + } } /* @@ -2018,126 +2007,131 @@ set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src) static int config_pipe(struct dn_pipe *p) { - int i, r; - struct dn_flow_set *pfs = &(p->fs); - struct dn_flow_queue *q; - - /* - * The config program passes parameters as follows: - * bw = bits/second (0 means no limits), - * delay = ms, must be translated into ticks. - * qsize = slots/bytes - */ - p->delay = ( p->delay * (hz*10) ) / 1000 ; - /* We need either a pipe number or a flow_set number */ - if (p->pipe_nr == 0 && pfs->fs_nr == 0) - return EINVAL ; - if (p->pipe_nr != 0 && pfs->fs_nr != 0) - return EINVAL ; - if (p->pipe_nr != 0) { /* this is a pipe */ - struct dn_pipe *x, *b; - struct dummynet_event dn_event; - lck_mtx_lock(dn_mutex); - - /* locate pipe */ - b = locate_pipe(p->pipe_nr); + int i, r; + struct dn_flow_set *pfs = &(p->fs); + struct dn_flow_queue *q; - if (b == NULL || b->pipe_nr != p->pipe_nr) { /* new pipe */ - x = _MALLOC(sizeof(struct dn_pipe), M_DUMMYNET, M_DONTWAIT | M_ZERO) ; - if (x == NULL) { - lck_mtx_unlock(dn_mutex); - printf("dummynet: no memory for new pipe\n"); - return ENOSPC; - } - x->pipe_nr = p->pipe_nr; - x->fs.pipe = x ; - /* idle_heap is the only one from which we extract from the middle. - */ - x->idle_heap.size = x->idle_heap.elements = 0 ; - x->idle_heap.offset=offsetof(struct dn_flow_queue, heap_pos); - } else { - x = b; - /* Flush accumulated credit for all queues */ - for (i = 0; i <= x->fs.rq_size; i++) - for (q = x->fs.rq[i]; q; q = q->next) - q->numbytes = 0; + /* + * The config program passes parameters as follows: + * bw = bits/second (0 means no limits), + * delay = ms, must be translated into ticks. + * qsize = slots/bytes + */ + p->delay = (p->delay * (hz * 10)) / 1000; + /* We need either a pipe number or a flow_set number */ + if (p->pipe_nr == 0 && pfs->fs_nr == 0) { + return EINVAL; } + if (p->pipe_nr != 0 && pfs->fs_nr != 0) { + return EINVAL; + } + if (p->pipe_nr != 0) { /* this is a pipe */ + struct dn_pipe *x, *b; + struct dummynet_event dn_event; + lck_mtx_lock(dn_mutex); - x->bandwidth = p->bandwidth ; - x->numbytes = 0; /* just in case... */ - bcopy(p->if_name, x->if_name, sizeof(p->if_name) ); - x->ifp = NULL ; /* reset interface ptr */ - x->delay = p->delay ; - set_fs_parms(&(x->fs), pfs); + /* locate pipe */ + b = locate_pipe(p->pipe_nr); + if (b == NULL || b->pipe_nr != p->pipe_nr) { /* new pipe */ + x = _MALLOC(sizeof(struct dn_pipe), M_DUMMYNET, M_DONTWAIT | M_ZERO); + if (x == NULL) { + lck_mtx_unlock(dn_mutex); + printf("dummynet: no memory for new pipe\n"); + return ENOSPC; + } + x->pipe_nr = p->pipe_nr; + x->fs.pipe = x; + /* idle_heap is the only one from which we extract from the middle. + */ + x->idle_heap.size = x->idle_heap.elements = 0; + x->idle_heap.offset = offsetof(struct dn_flow_queue, heap_pos); + } else { + x = b; + /* Flush accumulated credit for all queues */ + for (i = 0; i <= x->fs.rq_size; i++) { + for (q = x->fs.rq[i]; q; q = q->next) { + q->numbytes = 0; + } + } + } - if ( x->fs.rq == NULL ) { /* a new pipe */ - r = alloc_hash(&(x->fs), pfs) ; - if (r) { - lck_mtx_unlock(dn_mutex); - FREE(x, M_DUMMYNET); - return r ; - } - SLIST_INSERT_HEAD(&pipehash[HASH(x->pipe_nr)], + x->bandwidth = p->bandwidth; + x->numbytes = 0; /* just in case... */ + bcopy(p->if_name, x->if_name, sizeof(p->if_name)); + x->ifp = NULL; /* reset interface ptr */ + x->delay = p->delay; + set_fs_parms(&(x->fs), pfs); + + + if (x->fs.rq == NULL) { /* a new pipe */ + r = alloc_hash(&(x->fs), pfs); + if (r) { + lck_mtx_unlock(dn_mutex); + FREE(x, M_DUMMYNET); + return r; + } + SLIST_INSERT_HEAD(&pipehash[HASH(x->pipe_nr)], x, next); - } - lck_mtx_unlock(dn_mutex); + } + lck_mtx_unlock(dn_mutex); - bzero(&dn_event, sizeof(dn_event)); - dn_event.dn_event_code = DUMMYNET_PIPE_CONFIG; - dn_event.dn_event_pipe_config.bandwidth = p->bandwidth; - dn_event.dn_event_pipe_config.delay = p->delay; - dn_event.dn_event_pipe_config.plr = pfs->plr; + bzero(&dn_event, sizeof(dn_event)); + dn_event.dn_event_code = DUMMYNET_PIPE_CONFIG; + dn_event.dn_event_pipe_config.bandwidth = p->bandwidth; + dn_event.dn_event_pipe_config.delay = p->delay; + dn_event.dn_event_pipe_config.plr = pfs->plr; - dummynet_event_enqueue_nwk_wq_entry(&dn_event); - } else { /* config queue */ - struct dn_flow_set *x, *b ; + dummynet_event_enqueue_nwk_wq_entry(&dn_event); + } else { /* config queue */ + struct dn_flow_set *x, *b; - lck_mtx_lock(dn_mutex); - /* locate flow_set */ - b = locate_flowset(pfs->fs_nr); - - if (b == NULL || b->fs_nr != pfs->fs_nr) { /* new */ - if (pfs->parent_nr == 0) { /* need link to a pipe */ - lck_mtx_unlock(dn_mutex); - return EINVAL ; - } - x = _MALLOC(sizeof(struct dn_flow_set), M_DUMMYNET, M_DONTWAIT | M_ZERO); - if (x == NULL) { - lck_mtx_unlock(dn_mutex); - printf("dummynet: no memory for new flow_set\n"); - return ENOSPC; - } - x->fs_nr = pfs->fs_nr; - x->parent_nr = pfs->parent_nr; - x->weight = pfs->weight ; - if (x->weight == 0) - x->weight = 1 ; - else if (x->weight > 100) - x->weight = 100 ; - } else { - /* Change parent pipe not allowed; must delete and recreate */ - if (pfs->parent_nr != 0 && b->parent_nr != pfs->parent_nr) { - lck_mtx_unlock(dn_mutex); - return EINVAL ; - } - x = b; - } - set_fs_parms(x, pfs); + lck_mtx_lock(dn_mutex); + /* locate flow_set */ + b = locate_flowset(pfs->fs_nr); - if ( x->rq == NULL ) { /* a new flow_set */ - r = alloc_hash(x, pfs) ; - if (r) { - lck_mtx_unlock(dn_mutex); - FREE(x, M_DUMMYNET); - return r ; - } - SLIST_INSERT_HEAD(&flowsethash[HASH(x->fs_nr)], + if (b == NULL || b->fs_nr != pfs->fs_nr) { /* new */ + if (pfs->parent_nr == 0) { /* need link to a pipe */ + lck_mtx_unlock(dn_mutex); + return EINVAL; + } + x = _MALLOC(sizeof(struct dn_flow_set), M_DUMMYNET, M_DONTWAIT | M_ZERO); + if (x == NULL) { + lck_mtx_unlock(dn_mutex); + printf("dummynet: no memory for new flow_set\n"); + return ENOSPC; + } + x->fs_nr = pfs->fs_nr; + x->parent_nr = pfs->parent_nr; + x->weight = pfs->weight; + if (x->weight == 0) { + x->weight = 1; + } else if (x->weight > 100) { + x->weight = 100; + } + } else { + /* Change parent pipe not allowed; must delete and recreate */ + if (pfs->parent_nr != 0 && b->parent_nr != pfs->parent_nr) { + lck_mtx_unlock(dn_mutex); + return EINVAL; + } + x = b; + } + set_fs_parms(x, pfs); + + if (x->rq == NULL) { /* a new flow_set */ + r = alloc_hash(x, pfs); + if (r) { + lck_mtx_unlock(dn_mutex); + FREE(x, M_DUMMYNET); + return r; + } + SLIST_INSERT_HEAD(&flowsethash[HASH(x->fs_nr)], x, next); + } + lck_mtx_unlock(dn_mutex); } - lck_mtx_unlock(dn_mutex); - } - return 0 ; + return 0; } /* @@ -2147,16 +2141,19 @@ config_pipe(struct dn_pipe *p) static void fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs) { - int i = 0, found = 0 ; - for (; i < h->elements ;) - if ( ((struct dn_flow_queue *)h->p[i].object)->fs == fs) { - h->elements-- ; - h->p[i] = h->p[h->elements] ; - found++ ; - } else - i++ ; - if (found) - heapify(h); + int i = 0, found = 0; + for (; i < h->elements;) { + if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) { + h->elements--; + h->p[i] = h->p[h->elements]; + found++; + } else { + i++; + } + } + if (found) { + heapify(h); + } } /* @@ -2165,17 +2162,17 @@ fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs) static void pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p) { - if (h->elements > 0) { - int i = 0 ; - for (i=0; i < h->elements ; i++ ) { - if (h->p[i].object == p) { /* found it */ - h->elements-- ; - h->p[i] = h->p[h->elements] ; - heapify(h); - break ; - } + if (h->elements > 0) { + int i = 0; + for (i = 0; i < h->elements; i++) { + if (h->p[i].object == p) { /* found it */ + h->elements--; + h->p[i] = h->p[h->elements]; + heapify(h); + break; + } + } } - } } /* @@ -2184,32 +2181,35 @@ pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p) void dummynet_drain(void) { - struct dn_flow_set *fs; - struct dn_pipe *p; - struct mbuf *m, *mnext; + struct dn_flow_set *fs; + struct dn_pipe *p; + struct mbuf *m, *mnext; int i; LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); - heap_free(&ready_heap); - heap_free(&wfq_ready_heap); - heap_free(&extract_heap); - /* remove all references to this pipe from flow_sets */ - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(fs, &flowsethash[i], next) - purge_flow_set(fs, 0); + heap_free(&ready_heap); + heap_free(&wfq_ready_heap); + heap_free(&extract_heap); + /* remove all references to this pipe from flow_sets */ + for (i = 0; i < HASHSIZE; i++) { + SLIST_FOREACH(fs, &flowsethash[i], next) { + purge_flow_set(fs, 0); + } + } - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(p, &pipehash[i], next) { - purge_flow_set(&(p->fs), 0); + for (i = 0; i < HASHSIZE; i++) { + SLIST_FOREACH(p, &pipehash[i], next) { + purge_flow_set(&(p->fs), 0); - mnext = p->head; - while ((m = mnext) != NULL) { - mnext = m->m_nextpkt; - DN_FREE_PKT(m); + mnext = p->head; + while ((m = mnext) != NULL) { + mnext = m->m_nextpkt; + DN_FREE_PKT(m); + } + p->head = p->tail = NULL; + } } - p->head = p->tail = NULL ; - } } /* @@ -2218,185 +2218,192 @@ dummynet_drain(void) static int delete_pipe(struct dn_pipe *p) { - if (p->pipe_nr == 0 && p->fs.fs_nr == 0) - return EINVAL ; - if (p->pipe_nr != 0 && p->fs.fs_nr != 0) - return EINVAL ; - if (p->pipe_nr != 0) { /* this is an old-style pipe */ - struct dn_pipe *b; - struct dn_flow_set *fs; - int i; - - lck_mtx_lock(dn_mutex); - /* locate pipe */ - b = locate_pipe(p->pipe_nr); - if(b == NULL){ - lck_mtx_unlock(dn_mutex); - return EINVAL ; /* not found */ + if (p->pipe_nr == 0 && p->fs.fs_nr == 0) { + return EINVAL; } + if (p->pipe_nr != 0 && p->fs.fs_nr != 0) { + return EINVAL; + } + if (p->pipe_nr != 0) { /* this is an old-style pipe */ + struct dn_pipe *b; + struct dn_flow_set *fs; + int i; - /* Unlink from list of pipes. */ - SLIST_REMOVE(&pipehash[HASH(b->pipe_nr)], b, dn_pipe, next); + lck_mtx_lock(dn_mutex); + /* locate pipe */ + b = locate_pipe(p->pipe_nr); + if (b == NULL) { + lck_mtx_unlock(dn_mutex); + return EINVAL; /* not found */ + } -#if IPFW2 - /* remove references to this pipe from the ip_fw rules. */ - flush_pipe_ptrs(&(b->fs)); -#endif /* IPFW2 */ + /* Unlink from list of pipes. */ + SLIST_REMOVE(&pipehash[HASH(b->pipe_nr)], b, dn_pipe, next); - /* Remove all references to this pipe from flow_sets. */ - for (i = 0; i < HASHSIZE; i++) - SLIST_FOREACH(fs, &flowsethash[i], next) - if (fs->pipe == b) { - printf("dummynet: ++ ref to pipe %d from fs %d\n", - p->pipe_nr, fs->fs_nr); - fs->pipe = NULL ; - purge_flow_set(fs, 0); + + /* Remove all references to this pipe from flow_sets. */ + for (i = 0; i < HASHSIZE; i++) { + SLIST_FOREACH(fs, &flowsethash[i], next) { + if (fs->pipe == b) { + printf("dummynet: ++ ref to pipe %d from fs %d\n", + p->pipe_nr, fs->fs_nr); + fs->pipe = NULL; + purge_flow_set(fs, 0); + } + } } - fs_remove_from_heap(&ready_heap, &(b->fs)); + fs_remove_from_heap(&ready_heap, &(b->fs)); - purge_pipe(b); /* remove all data associated to this pipe */ - /* remove reference to here from extract_heap and wfq_ready_heap */ - pipe_remove_from_heap(&extract_heap, b); - pipe_remove_from_heap(&wfq_ready_heap, b); - lck_mtx_unlock(dn_mutex); + purge_pipe(b); /* remove all data associated to this pipe */ + /* remove reference to here from extract_heap and wfq_ready_heap */ + pipe_remove_from_heap(&extract_heap, b); + pipe_remove_from_heap(&wfq_ready_heap, b); + lck_mtx_unlock(dn_mutex); - FREE(b, M_DUMMYNET); - } else { /* this is a WF2Q queue (dn_flow_set) */ - struct dn_flow_set *b; + FREE(b, M_DUMMYNET); + } else { /* this is a WF2Q queue (dn_flow_set) */ + struct dn_flow_set *b; - lck_mtx_lock(dn_mutex); - /* locate set */ - b = locate_flowset(p->fs.fs_nr); - if (b == NULL) { - lck_mtx_unlock(dn_mutex); - return EINVAL ; /* not found */ - } + lck_mtx_lock(dn_mutex); + /* locate set */ + b = locate_flowset(p->fs.fs_nr); + if (b == NULL) { + lck_mtx_unlock(dn_mutex); + return EINVAL; /* not found */ + } -#if IPFW2 - /* remove references to this flow_set from the ip_fw rules. */ - flush_pipe_ptrs(b); -#endif /* IPFW2 */ - /* Unlink from list of flowsets. */ - SLIST_REMOVE( &flowsethash[HASH(b->fs_nr)], b, dn_flow_set, next); + /* Unlink from list of flowsets. */ + SLIST_REMOVE( &flowsethash[HASH(b->fs_nr)], b, dn_flow_set, next); - if (b->pipe != NULL) { - /* Update total weight on parent pipe and cleanup parent heaps */ - b->pipe->sum -= b->weight * b->backlogged ; - fs_remove_from_heap(&(b->pipe->not_eligible_heap), b); - fs_remove_from_heap(&(b->pipe->scheduler_heap), b); -#if 1 /* XXX should i remove from idle_heap as well ? */ - fs_remove_from_heap(&(b->pipe->idle_heap), b); + if (b->pipe != NULL) { + /* Update total weight on parent pipe and cleanup parent heaps */ + b->pipe->sum -= b->weight * b->backlogged; + fs_remove_from_heap(&(b->pipe->not_eligible_heap), b); + fs_remove_from_heap(&(b->pipe->scheduler_heap), b); +#if 1 /* XXX should i remove from idle_heap as well ? */ + fs_remove_from_heap(&(b->pipe->idle_heap), b); #endif + } + purge_flow_set(b, 1); + lck_mtx_unlock(dn_mutex); } - purge_flow_set(b, 1); - lck_mtx_unlock(dn_mutex); - } - return 0 ; + return 0; } /* * helper function used to copy data from kernel in DUMMYNET_GET */ static -char* dn_copy_set_32(struct dn_flow_set *set, char *bp) +char* +dn_copy_set_32(struct dn_flow_set *set, char *bp) { - int i, copied = 0 ; - struct dn_flow_queue *q; + int i, copied = 0; + struct dn_flow_queue *q; struct dn_flow_queue_32 *qp = (struct dn_flow_queue_32 *)bp; LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); - for (i = 0 ; i <= set->rq_size ; i++) - for (q = set->rq[i] ; q ; q = q->next, qp++ ) { - if (q->hash_slot != i) + for (i = 0; i <= set->rq_size; i++) { + for (q = set->rq[i]; q; q = q->next, qp++) { + if (q->hash_slot != i) { printf("dummynet: ++ at %d: wrong slot (have %d, " - "should be %d)\n", copied, q->hash_slot, i); - if (q->fs != set) + "should be %d)\n", copied, q->hash_slot, i); + } + if (q->fs != set) { printf("dummynet: ++ at %d: wrong fs ptr " "(have 0x%llx, should be 0x%llx)\n", i, (uint64_t)VM_KERNEL_ADDRPERM(q->fs), (uint64_t)VM_KERNEL_ADDRPERM(set)); - copied++ ; + } + copied++; cp_queue_to_32_user( q, qp ); /* cleanup pointers */ - qp->next = (user32_addr_t)0 ; - qp->head = qp->tail = (user32_addr_t)0 ; - qp->fs = (user32_addr_t)0 ; + qp->next = (user32_addr_t)0; + qp->head = qp->tail = (user32_addr_t)0; + qp->fs = (user32_addr_t)0; } - if (copied != set->rq_elements) + } + if (copied != set->rq_elements) { printf("dummynet: ++ wrong count, have %d should be %d\n", - copied, set->rq_elements); - return (char *)qp ; + copied, set->rq_elements); + } + return (char *)qp; } static -char* dn_copy_set_64(struct dn_flow_set *set, char *bp) +char* +dn_copy_set_64(struct dn_flow_set *set, char *bp) { - int i, copied = 0 ; - struct dn_flow_queue *q; + int i, copied = 0; + struct dn_flow_queue *q; struct dn_flow_queue_64 *qp = (struct dn_flow_queue_64 *)bp; LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); - for (i = 0 ; i <= set->rq_size ; i++) - for (q = set->rq[i] ; q ; q = q->next, qp++ ) { - if (q->hash_slot != i) + for (i = 0; i <= set->rq_size; i++) { + for (q = set->rq[i]; q; q = q->next, qp++) { + if (q->hash_slot != i) { printf("dummynet: ++ at %d: wrong slot (have %d, " - "should be %d)\n", copied, q->hash_slot, i); - if (q->fs != set) + "should be %d)\n", copied, q->hash_slot, i); + } + if (q->fs != set) { printf("dummynet: ++ at %d: wrong fs ptr " "(have 0x%llx, should be 0x%llx)\n", i, (uint64_t)VM_KERNEL_ADDRPERM(q->fs), (uint64_t)VM_KERNEL_ADDRPERM(set)); - copied++ ; + } + copied++; //bcopy(q, qp, sizeof(*q)); cp_queue_to_64_user( q, qp ); /* cleanup pointers */ - qp->next = USER_ADDR_NULL ; - qp->head = qp->tail = USER_ADDR_NULL ; - qp->fs = USER_ADDR_NULL ; + qp->next = USER_ADDR_NULL; + qp->head = qp->tail = USER_ADDR_NULL; + qp->fs = USER_ADDR_NULL; } - if (copied != set->rq_elements) + } + if (copied != set->rq_elements) { printf("dummynet: ++ wrong count, have %d should be %d\n", - copied, set->rq_elements); - return (char *)qp ; + copied, set->rq_elements); + } + return (char *)qp; } static size_t dn_calc_size(int is64user) { - struct dn_flow_set *set ; - struct dn_pipe *p ; - size_t size = 0 ; + struct dn_flow_set *set; + struct dn_pipe *p; + size_t size = 0; size_t pipesize; size_t queuesize; size_t setsize; int i; LCK_MTX_ASSERT(dn_mutex, LCK_MTX_ASSERT_OWNED); - if ( is64user ){ + if (is64user) { pipesize = sizeof(struct dn_pipe_64); queuesize = sizeof(struct dn_flow_queue_64); setsize = sizeof(struct dn_flow_set_64); - } - else { + } else { pipesize = sizeof(struct dn_pipe_32); - queuesize = sizeof( struct dn_flow_queue_32 ); + queuesize = sizeof(struct dn_flow_queue_32); setsize = sizeof(struct dn_flow_set_32); } - /* - * compute size of data structures: list of pipes and flow_sets. - */ - for (i = 0; i < HASHSIZE; i++) { - SLIST_FOREACH(p, &pipehash[i], next) - size += sizeof(*p) + - p->fs.rq_elements * sizeof(struct dn_flow_queue); - SLIST_FOREACH(set, &flowsethash[i], next) - size += sizeof (*set) + - set->rq_elements * sizeof(struct dn_flow_queue); - } - return size; + /* + * compute size of data structures: list of pipes and flow_sets. + */ + for (i = 0; i < HASHSIZE; i++) { + SLIST_FOREACH(p, &pipehash[i], next) { + size += sizeof(*p) + + p->fs.rq_elements * sizeof(struct dn_flow_queue); + } + SLIST_FOREACH(set, &flowsethash[i], next) { + size += sizeof(*set) + + set->rq_elements * sizeof(struct dn_flow_queue); + } + } + return size; } static int @@ -2414,24 +2421,27 @@ dummynet_get(struct sockopt *sopt) /* * XXX: Ugly, but we need to allocate memory with M_WAITOK flag * and we cannot use this flag while holding a mutex. - */ - if (proc_is64bit(sopt->sopt_p)) + */ + if (proc_is64bit(sopt->sopt_p)) { is64user = 1; + } for (i = 0; i < 10; i++) { size = dn_calc_size(is64user); lck_mtx_unlock(dn_mutex); buf = _MALLOC(size, M_TEMP, M_WAITOK | M_ZERO); - if (buf == NULL) - return(ENOBUFS); + if (buf == NULL) { + return ENOBUFS; + } lck_mtx_lock(dn_mutex); - if (size == dn_calc_size(is64user)) + if (size == dn_calc_size(is64user)) { break; + } FREE(buf, M_TEMP); buf = NULL; } if (buf == NULL) { lck_mtx_unlock(dn_mutex); - return(ENOBUFS); + return ENOBUFS; } bp = buf; @@ -2443,7 +2453,7 @@ dummynet_get(struct sockopt *sopt) * one at a time. After each flow_set, copy the * queue descriptor it owns. */ - if ( is64user ) { + if (is64user) { bp = cp_pipe_to_64_user(p, (struct dn_pipe_64 *)bp); } else { @@ -2455,13 +2465,13 @@ dummynet_get(struct sockopt *sopt) for (i = 0; i < HASHSIZE; i++) { SLIST_FOREACH(set, &flowsethash[i], next) { struct dn_flow_set_64 *fs_bp = - (struct dn_flow_set_64 *)bp ; + (struct dn_flow_set_64 *)bp; cp_flow_set_to_64_user(set, fs_bp); /* XXX same hack as above */ fs_bp->next = CAST_DOWN(user64_addr_t, DN_IS_QUEUE); fs_bp->pipe = USER_ADDR_NULL; - fs_bp->rq = USER_ADDR_NULL ; + fs_bp->rq = USER_ADDR_NULL; bp += sizeof(struct dn_flow_set_64); bp = dn_copy_set_64( set, bp ); } @@ -2469,7 +2479,7 @@ dummynet_get(struct sockopt *sopt) lck_mtx_unlock(dn_mutex); error = sooptcopyout(sopt, buf, size); FREE(buf, M_TEMP); - return(error); + return error; } /* @@ -2478,51 +2488,56 @@ dummynet_get(struct sockopt *sopt) static int ip_dn_ctl(struct sockopt *sopt) { - int error = 0 ; - struct dn_pipe *p, tmp_pipe; - - /* Disallow sets in really-really secure mode. */ - if (sopt->sopt_dir == SOPT_SET && securelevel >= 3) - return (EPERM); - - switch (sopt->sopt_name) { - default : - printf("dummynet: -- unknown option %d", sopt->sopt_name); - return EINVAL ; - - case IP_DUMMYNET_GET : - error = dummynet_get(sopt); - break ; - - case IP_DUMMYNET_FLUSH : - dummynet_flush() ; - break ; - - case IP_DUMMYNET_CONFIGURE : - p = &tmp_pipe ; - if (proc_is64bit(sopt->sopt_p)) - error = cp_pipe_from_user_64( sopt, p ); - else - error = cp_pipe_from_user_32( sopt, p ); - - if (error) - break ; - error = config_pipe(p); - break ; - - case IP_DUMMYNET_DEL : /* remove a pipe or queue */ - p = &tmp_pipe ; - if (proc_is64bit(sopt->sopt_p)) - error = cp_pipe_from_user_64( sopt, p ); - else - error = cp_pipe_from_user_32( sopt, p ); - if (error) - break ; - - error = delete_pipe(p); - break ; - } - return error ; + int error = 0; + struct dn_pipe *p, tmp_pipe; + + /* Disallow sets in really-really secure mode. */ + if (sopt->sopt_dir == SOPT_SET && securelevel >= 3) { + return EPERM; + } + + switch (sopt->sopt_name) { + default: + printf("dummynet: -- unknown option %d", sopt->sopt_name); + return EINVAL; + + case IP_DUMMYNET_GET: + error = dummynet_get(sopt); + break; + + case IP_DUMMYNET_FLUSH: + dummynet_flush(); + break; + + case IP_DUMMYNET_CONFIGURE: + p = &tmp_pipe; + if (proc_is64bit(sopt->sopt_p)) { + error = cp_pipe_from_user_64( sopt, p ); + } else { + error = cp_pipe_from_user_32( sopt, p ); + } + + if (error) { + break; + } + error = config_pipe(p); + break; + + case IP_DUMMYNET_DEL: /* remove a pipe or queue */ + p = &tmp_pipe; + if (proc_is64bit(sopt->sopt_p)) { + error = cp_pipe_from_user_64( sopt, p ); + } else { + error = cp_pipe_from_user_32( sopt, p ); + } + if (error) { + break; + } + + error = delete_pipe(p); + break; + } + return error; } void @@ -2540,35 +2555,19 @@ ip_dn_init(void) dn_mutex_attr = lck_attr_alloc_init(); lck_mtx_init(dn_mutex, dn_mutex_grp, dn_mutex_attr); - ready_heap.size = ready_heap.elements = 0 ; - ready_heap.offset = 0 ; + ready_heap.size = ready_heap.elements = 0; + ready_heap.offset = 0; - wfq_ready_heap.size = wfq_ready_heap.elements = 0 ; - wfq_ready_heap.offset = 0 ; + wfq_ready_heap.size = wfq_ready_heap.elements = 0; + wfq_ready_heap.offset = 0; - extract_heap.size = extract_heap.elements = 0 ; - extract_heap.offset = 0 ; + extract_heap.size = extract_heap.elements = 0; + extract_heap.offset = 0; ip_dn_ctl_ptr = ip_dn_ctl; ip_dn_io_ptr = dummynet_io; - - bzero(&default_rule, sizeof default_rule); -#if IPFIREWALL - default_rule.act_ofs = 0; - default_rule.rulenum = IPFW_DEFAULT_RULE; - default_rule.cmd_len = 1; - default_rule.set = RESVD_SET; - - default_rule.cmd[0].len = 1; - default_rule.cmd[0].opcode = -#ifdef IPFIREWALL_DEFAULT_TO_ACCEPT - (1) ? O_ACCEPT : -#endif - O_DENY; -#endif } -struct dn_event_nwk_wq_entry -{ +struct dn_event_nwk_wq_entry { struct nwk_wq_entry nwk_wqe; struct dummynet_event dn_ev_arg; }; diff --git a/bsd/netinet/ip_dummynet.h b/bsd/netinet/ip_dummynet.h index fae71f8e0..387037bcb 100644 --- a/bsd/netinet/ip_dummynet.h +++ b/bsd/netinet/ip_dummynet.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -157,7 +157,6 @@ struct dn_heap { #include /* for ip6_out_args */ struct dn_pkt_tag { - struct ip_fw *dn_ipfw_rule; /* matching IPFW rule */ void *dn_pf_rule; /* matching PF rule */ int dn_dir; /* action when packet comes out. */ #define DN_TO_IP_OUT 1 @@ -185,9 +184,6 @@ struct dn_pkt_tag { u_int32_t dn_unfragpartlen; /* for ip6_output */ struct ip6_exthdrs dn_exthdrs; /* for ip6_output */ int dn_flags; /* flags, for ip[6]_output */ - int dn_client; -#define DN_CLIENT_IPFW 1 -#define DN_CLIENT_PF 2 union { struct ip_out_args _dn_ipoa;/* output args, for ip_output. MUST COPY */ struct ip6_out_args _dn_ip6oa;/* output args, for ip_output. MUST COPY */ @@ -414,14 +410,13 @@ SLIST_HEAD(dn_pipe_head, dn_pipe); #ifdef BSD_KERNEL_PRIVATE extern uint32_t my_random(void); -void ip_dn_init(void); /* called from raw_ip.c:load_ipfw() */ +void ip_dn_init(void); typedef int ip_dn_ctl_t(struct sockopt *); /* raw_ip.c */ typedef int ip_dn_io_t(struct mbuf *m, int pipe_nr, int dir, - struct ip_fw_args *fwa, int ); + struct ip_fw_args *fwa); extern ip_dn_ctl_t *ip_dn_ctl_ptr; extern ip_dn_io_t *ip_dn_io_ptr; -void dn_ipfw_rule_delete(void *); #define DUMMYNET_LOADED (ip_dn_io_ptr != NULL) #pragma pack(4) @@ -673,23 +668,6 @@ struct dn_pipe_64 { /* a pipe */ struct dn_flow_set_64 fs; /* used with fixed-rate flows */ }; -/* - * Return the IPFW rule associated with the dummynet tag; if any. - * Make sure that the dummynet tag is not reused by lower layers. - */ -static __inline struct ip_fw * -ip_dn_claim_rule(struct mbuf *m) -{ - struct m_tag *mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_DUMMYNET, NULL); - if (mtag != NULL) { - mtag->m_tag_type = KERNEL_TAG_TYPE_NONE; - return ((struct dn_pkt_tag *)(mtag + 1))->dn_ipfw_rule; - } else { - return NULL; - } -} - #include /* Dummynet event handling declarations */ extern struct eventhandler_lists_ctxt dummynet_evhdlr_ctxt; diff --git a/bsd/netinet/ip_ecn.c b/bsd/netinet/ip_ecn.c index c7023fa4d..5cbc50b50 100644 --- a/bsd/netinet/ip_ecn.c +++ b/bsd/netinet/ip_ecn.c @@ -68,14 +68,10 @@ #include #include #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif /* * modify outer ECN (TOS) field on ingress operation (tunnel encapsulation). @@ -127,7 +123,6 @@ ip_ecn_egress(int mode, const u_int8_t *outer, u_int8_t *inner) return 1; } -#if INET6 void ip6_ecn_ingress(int mode, u_int32_t *outer, const u_int32_t *inner) { @@ -236,5 +231,3 @@ ip64_ecn_egress(int mode, const u_int8_t *outer, u_int32_t *inner) *inner |= htonl((u_int32_t)inner8 << 20); return 1; } - -#endif diff --git a/bsd/netinet/ip_encap.c b/bsd/netinet/ip_encap.c index 4aaa6fa38..294b98752 100644 --- a/bsd/netinet/ip_encap.c +++ b/bsd/netinet/ip_encap.c @@ -103,12 +103,9 @@ #include #include -#if INET6 #include #include #include -#endif - #include @@ -288,7 +285,6 @@ encap4_input(struct mbuf *m, int off) } #endif -#if INET6 int encap6_input(struct mbuf **mp, int *offp, int proto) { @@ -362,7 +358,6 @@ encap6_input(struct mbuf **mp, int *offp, int proto) /* last resort: inject to raw socket */ return rip6_input(mp, offp, proto); } -#endif static void encap_add_locked(struct encaptab *ep) diff --git a/bsd/netinet/ip_flowid.h b/bsd/netinet/ip_flowid.h index 3c68a809f..47f6df617 100644 --- a/bsd/netinet/ip_flowid.h +++ b/bsd/netinet/ip_flowid.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2012 Apple Inc. All rights reserved. + * Copyright (c) 2011-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -82,7 +82,6 @@ struct ip_flow_id { struct route_in6; struct sockaddr_in6; struct pf_rule; -struct ip_fw; /* * Arguments for calling ipfw_chk() and dummynet_io(). We put them @@ -92,8 +91,6 @@ struct ip_fw; struct ip_fw_args { struct mbuf *fwa_m; /* the mbuf chain */ struct ifnet *fwa_oif; /* output interface */ - struct sockaddr_in *fwa_next_hop; /* forward address */ - struct ip_fw *fwa_ipfw_rule; /* matching IPFW rule */ struct pf_rule *fwa_pf_rule; /* matching PF rule */ struct ether_header *fwa_eh; /* for bridged packets */ int fwa_flags; /* for dummynet */ @@ -116,7 +113,6 @@ struct ip_fw_args { u_int32_t fwa_unfragpartlen; /* for IPv6 output */ struct ip6_exthdrs *fwa_exthdrs; /* for IPv6 output */ struct ip_flow_id fwa_id; /* grabbed from IP header */ - u_int16_t fwa_divert_rule;/* divert cookie */ u_int32_t fwa_cookie; }; #define fwa_ipoa fwa_ipoa_._fwa_ipoa @@ -128,10 +124,7 @@ struct ip_fw_args { /* Allocate a separate structure for inputs args to save space and bzero time */ struct ip_fw_in_args { - struct sockaddr_in *fwai_next_hop; /* forward address */ - struct ip_fw *fwai_ipfw_rule;/* matching IPFW rule */ struct pf_rule *fwai_pf_rule; /* matching PF rule */ - u_int16_t fwai_divert_rule;/* divert cookie */ }; #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet/ip_fw.h b/bsd/netinet/ip_fw.h deleted file mode 100644 index 75f519064..000000000 --- a/bsd/netinet/ip_fw.h +++ /dev/null @@ -1,329 +0,0 @@ -/* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1993 Daniel Boulet - * Copyright (c) 1994 Ugen J.S.Antsilevich - * - * Redistribution and use in source forms, with and without modification, - * are permitted provided that this entire comment appears intact. - * - * Redistribution in binary form may occur without any restrictions. - * Obviously, it would be nice if you gave credit where credit is due - * but requiring it would be too onerous. - * - * This software is provided ``AS IS'' without any warranties of any kind. - * - */ - -#ifndef _IP_FW_H -#define _IP_FW_H -#ifdef __APPLE_API_OBSOLETE - -#include - -#ifdef IPFW2 -#include -#else /* !IPFW2, good old ipfw */ - -#include -#include /* u_ types */ - -#define IP_FW_CURRENT_API_VERSION 20 /* Version of this API */ - - -/* - * This union structure identifies an interface, either explicitly - * by name or implicitly by IP address. The flags IP_FW_F_IIFNAME - * and IP_FW_F_OIFNAME say how to interpret this structure. An - * interface unit number of -1 matches any unit number, while an - * IP address of 0.0.0.0 indicates matches any interface. - * - * The receive and transmit interfaces are only compared against the - * the packet if the corresponding bit (IP_FW_F_IIFACE or IP_FW_F_OIFACE) - * is set. Note some packets lack a receive or transmit interface - * (in which case the missing "interface" never matches). - */ - -union ip_fw_if { - struct in_addr fu_via_ip; /* Specified by IP address */ - struct { /* Specified by interface name */ -#define FW_IFNLEN 10 /* need room ! was IFNAMSIZ */ - char name[FW_IFNLEN]; - short unit; /* -1 means match any unit */ - } fu_via_if; -}; - -/* - * Format of an IP firewall descriptor - * - * fw_src, fw_dst, fw_smsk, fw_dmsk are always stored in network byte order. - * fw_flg and fw_n*p are stored in host byte order (of course). - * Port numbers are stored in HOST byte order. - */ - -struct ip_fw { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION by clients. */ - void *context; /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int64_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ - struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ - struct in_addr fw_smsk, fw_dmsk; /* Mask for src and dest IP addr */ - u_short fw_number; /* Rule number */ - u_int fw_flg; /* Flags word */ -#define IP_FW_MAX_PORTS 10 /* A reasonable maximum */ - union { - u_short fw_pts[IP_FW_MAX_PORTS]; /* Array of port numbers to match */ -#define IP_FW_ICMPTYPES_MAX 128 -#define IP_FW_ICMPTYPES_DIM (IP_FW_ICMPTYPES_MAX / (sizeof(unsigned) * 8)) - unsigned fw_icmptypes[IP_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ - } fw_uar; - u_int fw_ipflg; /* IP flags word */ - u_char fw_ipopt, fw_ipnopt; /* IP options set/unset */ - u_char fw_tcpopt, fw_tcpnopt; /* TCP options set/unset */ - u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ - long timestamp; /* timestamp (tv_sec) of last match */ - union ip_fw_if fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ - u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - struct sockaddr_in fu_fwd_ip; - } fw_un; - u_char fw_prot; /* IP protocol */ - /* - * N'of src ports and # of dst ports in ports array (dst ports - * follow src ports; max of 10 ports in all; count of 0 means - * match all ports) - */ - u_char fw_nports; - void *pipe_ptr; /* flow_set ptr for dummynet pipe */ - void *next_rule_ptr; /* next rule in case of match */ - uid_t fw_uid; /* uid to match */ - int fw_logamount; /* amount to log */ - u_int64_t fw_loghighest; /* highest number packet to log */ -}; - -/* - * extended ipfw structure... some fields in the original struct - * can be used to pass parameters up/down, namely pointers - * void *pipe_ptr - * void *next_rule_ptr - * some others can be used to pass parameters down, namely counters etc. - * u_int64_t fw_pcnt,fw_bcnt; - * long timestamp; - */ - -struct ip_fw_ext { /* extended structure */ - struct ip_fw rule; /* must be at offset 0 */ - long dont_match_prob; /* 0x7fffffff means 1.0, always fail */ - u_int dyn_type;/* type for dynamic rule */ -}; - -#define IP_FW_GETNSRCP(rule) ((rule)->fw_nports & 0x0f) -#define IP_FW_SETNSRCP(rule, n) do { \ - (rule)->fw_nports &= ~0x0f; \ - (rule)->fw_nports |= (n); \ - } while (0) -#define IP_FW_GETNDSTP(rule) ((rule)->fw_nports >> 4) -#define IP_FW_SETNDSTP(rule, n) do { \ - (rule)->fw_nports &= ~0xf0; \ - (rule)->fw_nports |= (n) << 4;\ - } while (0) - -#define fw_divert_port fw_un.fu_divert_port -#define fw_skipto_rule fw_un.fu_skipto_rule -#define fw_reject_code fw_un.fu_reject_code -#define fw_pipe_nr fw_un.fu_pipe_nr -#define fw_fwd_ip fw_un.fu_fwd_ip - -struct ip_fw_chain { - LIST_ENTRY(ip_fw_chain) next; - struct ip_fw *rule; -}; - -/* - * Flow mask/flow id for each queue. - */ -struct ipfw_flow_id { - u_int32_t dst_ip, src_ip; - u_int16_t dst_port, src_port; - u_int8_t proto; - u_int8_t flags; /* protocol-specific flags */ -}; - -/* - * dynamic ipfw rule - */ -struct ipfw_dyn_rule { - struct ipfw_dyn_rule *next; - - struct ipfw_flow_id id; - struct ipfw_flow_id mask; - struct ip_fw_chain *chain; /* pointer to parent rule */ - u_int32_t type; /* rule type */ - u_int32_t expire; /* expire time */ - u_int64_t pcnt, bcnt; /* match counters */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typ. a */ - /* combination of TCP flags) */ -}; - -/* - * Values for "flags" field . - */ -#define IP_FW_F_COMMAND 0x000000ff /* Mask for type of chain entry: */ -#define IP_FW_F_DENY 0x00000000 /* This is a deny rule */ -#define IP_FW_F_REJECT 0x00000001 /* Deny and send a response packet */ -#define IP_FW_F_ACCEPT 0x00000002 /* This is an accept rule */ -#define IP_FW_F_COUNT 0x00000003 /* This is a count rule */ -#define IP_FW_F_DIVERT 0x00000004 /* This is a divert rule */ -#define IP_FW_F_TEE 0x00000005 /* This is a tee rule */ -#define IP_FW_F_SKIPTO 0x00000006 /* This is a skipto rule */ -#define IP_FW_F_FWD 0x00000007 /* This is a "change forwarding address" rule */ -#define IP_FW_F_PIPE 0x00000008 /* This is a dummynet rule */ -#define IP_FW_F_QUEUE 0x00000009 /* This is a dummynet queue */ - -#define IP_FW_F_IN 0x00000100 /* Check inbound packets */ -#define IP_FW_F_OUT 0x00000200 /* Check outbound packets */ -#define IP_FW_F_IIFACE 0x00000400 /* Apply inbound interface test */ -#define IP_FW_F_OIFACE 0x00000800 /* Apply outbound interface test */ - -#define IP_FW_F_PRN 0x00001000 /* Print if this rule matches */ - -#define IP_FW_F_SRNG 0x00002000 /* The first two src ports are a min * - * and max range (stored in host byte * - * order). */ - -#define IP_FW_F_DRNG 0x00004000 /* The first two dst ports are a min * - * and max range (stored in host byte * - * order). */ - -#define IP_FW_F_FRAG 0x00008000 /* Fragment */ - -#define IP_FW_F_IIFNAME 0x00010000 /* In interface by name/unit (not IP) */ -#define IP_FW_F_OIFNAME 0x00020000 /* Out interface by name/unit (not IP) */ - -#define IP_FW_F_INVSRC 0x00040000 /* Invert sense of src check */ -#define IP_FW_F_INVDST 0x00080000 /* Invert sense of dst check */ - -#define IP_FW_F_ICMPBIT 0x00100000 /* ICMP type bitmap is valid */ - -#define IP_FW_F_UID 0x00200000 /* filter by uid */ - -#define IP_FW_F_RND_MATCH 0x00800000 /* probabilistic rule match */ -#define IP_FW_F_SMSK 0x01000000 /* src-port + mask */ -#define IP_FW_F_DMSK 0x02000000 /* dst-port + mask */ -#define IP_FW_BRIDGED 0x04000000 /* only match bridged packets */ -#define IP_FW_F_KEEP_S 0x08000000 /* keep state */ -#define IP_FW_F_CHECK_S 0x10000000 /* check state */ - -#define IP_FW_F_SME 0x20000000 /* source = me */ -#define IP_FW_F_DME 0x40000000 /* destination = me */ - -#define IP_FW_F_MASK 0x7FFFFFFF /* All possible flag bits mask */ - -/* - * Flags for the 'fw_ipflg' field, for comparing values of ip and its protocols. - */ -#define IP_FW_IF_TCPEST 0x00000020 /* established TCP connection */ -#define IP_FW_IF_TCPMSK 0x00000020 /* mask of all TCP values */ - -/* - * For backwards compatibility with rules specifying "via iface" but - * not restricted to only "in" or "out" packets, we define this combination - * of bits to represent this configuration. - */ - -#define IF_FW_F_VIAHACK (IP_FW_F_IN|IP_FW_F_OUT|IP_FW_F_IIFACE|IP_FW_F_OIFACE) - -/* - * Definitions for REJECT response codes. - * Values less than 256 correspond to ICMP unreachable codes. - */ -#define IP_FW_REJECT_RST 0x0100 /* TCP packets: send RST */ - -/* - * Definitions for IP option names. - */ -#define IP_FW_IPOPT_LSRR 0x01 -#define IP_FW_IPOPT_SSRR 0x02 -#define IP_FW_IPOPT_RR 0x04 -#define IP_FW_IPOPT_TS 0x08 - -/* - * Definitions for TCP option names. - */ -#define IP_FW_TCPOPT_MSS 0x01 -#define IP_FW_TCPOPT_WINDOW 0x02 -#define IP_FW_TCPOPT_SACK 0x04 -#define IP_FW_TCPOPT_TS 0x08 -#define IP_FW_TCPOPT_CC 0x10 - -/* - * Definitions for TCP flags. - */ -#define IP_FW_TCPF_FIN TH_FIN -#define IP_FW_TCPF_SYN TH_SYN -#define IP_FW_TCPF_RST TH_RST -#define IP_FW_TCPF_PSH TH_PUSH -#define IP_FW_TCPF_ACK TH_ACK -#define IP_FW_TCPF_URG TH_URG - -/* - * Main firewall chains definitions and global var's definitions. - */ -#ifdef BSD_KERNEL_PRIVATE - -#define IP_FW_PORT_DYNT_FLAG 0x10000 -#define IP_FW_PORT_TEE_FLAG 0x20000 -#define IP_FW_PORT_DENY_FLAG 0x40000 - -/* - * Function definitions. - */ -void ip_fw_init(void); - -/* Firewall hooks */ -struct ip; -struct sockopt; -typedef int ip_fw_chk_t(struct ip **, int, struct ifnet *, u_int16_t *, - struct mbuf **, struct ip_fw_chain **, struct sockaddr_in **); -typedef int ip_fw_ctl_t(struct sockopt *); -extern ip_fw_chk_t *ip_fw_chk_ptr; -extern ip_fw_ctl_t *ip_fw_ctl_ptr; -extern int fw_one_pass; -extern int fw_enable; -#define IPFW_LOADED (ip_fw_chk_ptr != NULL) -extern struct ipfw_flow_id last_pkt; -#endif /* BSD_KERNEL_PRIVATE */ - -#endif /* !IPFW2 */ -#endif /* __APPLE_API_OBSOLETE */ -#endif /* _IP_FW_H */ diff --git a/bsd/netinet/ip_fw2.c b/bsd/netinet/ip_fw2.c deleted file mode 100644 index 0ec625ec6..000000000 --- a/bsd/netinet/ip_fw2.c +++ /dev/null @@ -1,4227 +0,0 @@ -/* - * Copyright (c) 2004-2019 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -/* - * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD: src/sys/netinet/ip_fw2.c,v 1.6.2.18 2003/10/17 11:01:03 scottl Exp $ - */ - -#define DEB(x) -#define DDB(x) x - -/* - * Implement IP packet firewall (new version) - */ - -#ifndef INET -#error IPFIREWALL requires INET. -#endif /* INET */ - -#if IPFW2 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if DUMMYNET -#include -#endif /* DUMMYNET */ - -#include -#include -#include -#include -#include -#include - -#ifdef IPSEC -#include -#endif - -#include /* XXX for ETHERTYPE_IP */ - -#include "ip_fw2_compat.h" - -#include -#include - -/* - #include - */ /* XXX for in_cksum */ - -/* - * XXX This one should go in sys/mbuf.h. It is used to avoid that - * a firewall-generated packet loops forever through the firewall. - */ -#ifndef M_SKIP_FIREWALL -#define M_SKIP_FIREWALL 0x4000 -#endif - -/* - * set_disable contains one bit per set value (0..31). - * If the bit is set, all rules with the corresponding set - * are disabled. Set RESVD_SET(31) is reserved for the default rule - * and rules that are not deleted by the flush command, - * and CANNOT be disabled. - * Rules in set RESVD_SET can only be deleted explicitly. - */ -static u_int32_t set_disable; - -int fw_verbose; -static int verbose_limit; -extern int fw_bypass; - -#define IPFW_RULE_INACTIVE 1 - -/* - * list of rules for layer 3 - */ -static struct ip_fw *layer3_chain; - -MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's"); - -static int fw_debug = 0; -static int autoinc_step = 100; /* bounded to 1..1000 in add_rule() */ - -static void ipfw_kev_post_msg(u_int32_t ); - -static int Get32static_len(void); -static int Get64static_len(void); - -#ifdef SYSCTL_NODE - -static int ipfw_sysctl SYSCTL_HANDLER_ARGS; - -SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Firewall"); -SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &fw_enable, 0, ipfw_sysctl, "I", "Enable ipfw"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLFLAG_RW | CTLFLAG_LOCKED, - &autoinc_step, 0, "Rule number autincrement step"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, one_pass, - CTLFLAG_RW | CTLFLAG_LOCKED, - &fw_one_pass, 0, - "Only do a single pass through ipfw when using dummynet(4)"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug, - CTLFLAG_RW | CTLFLAG_LOCKED, - &fw_debug, 0, "Enable printing of debug ip_fw statements"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, - CTLFLAG_RW | CTLFLAG_LOCKED, - &fw_verbose, 0, "Log matches to ipfw rules"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW | CTLFLAG_LOCKED, - &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged"); - -/* - * IP FW Stealth Logging: - */ -typedef enum ipfw_stealth_stats_type { - IPFW_STEALTH_STATS_UDP, - IPFW_STEALTH_STATS_TCP, - IPFW_STEALTH_STATS_UDPv6, - IPFW_STEALTH_STATS_TCPv6, - IPFW_STEALTH_STATS_MAX, -} ipfw_stealth_stats_type_t; - -#define IPFW_STEALTH_TIMEOUT_SEC 30 - -#define DYN_KEEPALIVE_LEEWAY 15 - -// Piggybagging Stealth stats with ipfw_tick(). -#define IPFW_STEALTH_TIMEOUT_FREQUENCY (30 / dyn_keepalive_period) - -static const char* ipfw_stealth_stats_str[IPFW_STEALTH_STATS_MAX] = { - "UDP", "TCP", "UDP v6", "TCP v6", -}; - -static uint32_t ipfw_stealth_stats_needs_flush = FALSE; -static uint32_t ipfw_stealth_stats[IPFW_STEALTH_STATS_MAX]; - -static void ipfw_stealth_flush_stats(void); -void ipfw_stealth_stats_incr_udp(void); -void ipfw_stealth_stats_incr_tcp(void); -void ipfw_stealth_stats_incr_udpv6(void); -void ipfw_stealth_stats_incr_tcpv6(void); - -/* - * Description of dynamic rules. - * - * Dynamic rules are stored in lists accessed through a hash table - * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can - * be modified through the sysctl variable dyn_buckets which is - * updated when the table becomes empty. - * - * XXX currently there is only one list, ipfw_dyn. - * - * When a packet is received, its address fields are first masked - * with the mask defined for the rule, then hashed, then matched - * against the entries in the corresponding list. - * Dynamic rules can be used for different purposes: - * + stateful rules; - * + enforcing limits on the number of sessions; - * + in-kernel NAT (not implemented yet) - * - * The lifetime of dynamic rules is regulated by dyn_*_lifetime, - * measured in seconds and depending on the flags. - * - * The total number of dynamic rules is stored in dyn_count. - * The max number of dynamic rules is dyn_max. When we reach - * the maximum number of rules we do not create anymore. This is - * done to avoid consuming too much memory, but also too much - * time when searching on each packet (ideally, we should try instead - * to put a limit on the length of the list on each bucket...). - * - * Each dynamic rule holds a pointer to the parent ipfw rule so - * we know what action to perform. Dynamic rules are removed when - * the parent rule is deleted. XXX we should make them survive. - * - * There are some limitations with dynamic rules -- we do not - * obey the 'randomized match', and we do not do multiple - * passes through the firewall. XXX check the latter!!! - */ -static ipfw_dyn_rule **ipfw_dyn_v = NULL; -static u_int32_t dyn_buckets = 256; /* must be power of 2 */ -static u_int32_t curr_dyn_buckets = 256; /* must be power of 2 */ - -/* - * Timeouts for various events in handing dynamic rules. - */ -static u_int32_t dyn_ack_lifetime = 300; -static u_int32_t dyn_syn_lifetime = 20; -static u_int32_t dyn_fin_lifetime = 1; -static u_int32_t dyn_rst_lifetime = 1; -static u_int32_t dyn_udp_lifetime = 10; -static u_int32_t dyn_short_lifetime = 5; - -/* - * Keepalives are sent if dyn_keepalive is set. They are sent every - * dyn_keepalive_period seconds, in the last dyn_keepalive_interval - * seconds of lifetime of a rule. - * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower - * than dyn_keepalive_period. - */ - -static u_int32_t dyn_keepalive_interval = 25; -static u_int32_t dyn_keepalive_period = 5; -static u_int32_t dyn_keepalive = 1; /* do send keepalives */ - -static u_int32_t static_count; /* # of static rules */ -static u_int32_t static_len; /* size in bytes of static rules */ -static u_int32_t static_len_32; /* size in bytes of static rules for 32 bit client */ -static u_int32_t static_len_64; /* size in bytes of static rules for 64 bit client */ -static u_int32_t dyn_count; /* # of dynamic rules */ -static u_int32_t dyn_max = 4096; /* max # of dynamic rules */ - -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLFLAG_RW | CTLFLAG_LOCKED, - &dyn_buckets, 0, "Number of dyn. buckets"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, CTLFLAG_RD | CTLFLAG_LOCKED, - &curr_dyn_buckets, 0, "Current Number of dyn. buckets"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_count, CTLFLAG_RD | CTLFLAG_LOCKED, - &dyn_count, 0, "Number of dyn. rules"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_max, CTLFLAG_RW | CTLFLAG_LOCKED, - &dyn_max, 0, "Max number of dyn. rules"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD | CTLFLAG_LOCKED, - &static_count, 0, "Number of static rules"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW | CTLFLAG_LOCKED, - &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW | CTLFLAG_LOCKED, - &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, CTLFLAG_RW | CTLFLAG_LOCKED, - &dyn_fin_lifetime, 0, "Lifetime of dyn. rules for fin"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, CTLFLAG_RW | CTLFLAG_LOCKED, - &dyn_rst_lifetime, 0, "Lifetime of dyn. rules for rst"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW | CTLFLAG_LOCKED, - &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW | CTLFLAG_LOCKED, - &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations"); -SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW | CTLFLAG_LOCKED, - &dyn_keepalive, 0, "Enable keepalives for dyn. rules"); - - -static int -ipfw_sysctl SYSCTL_HANDLER_ARGS -{ -#pragma unused(arg1, arg2) - int error; - - error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); - if (error || !req->newptr) { - return error; - } - - ipfw_kev_post_msg(KEV_IPFW_ENABLE); - - return error; -} - -#endif /* SYSCTL_NODE */ - - -static ip_fw_chk_t ipfw_chk; - -/* firewall lock */ -lck_grp_t *ipfw_mutex_grp; -lck_grp_attr_t *ipfw_mutex_grp_attr; -lck_attr_t *ipfw_mutex_attr; -decl_lck_mtx_data(, ipfw_mutex_data); -lck_mtx_t *ipfw_mutex = &ipfw_mutex_data; - -extern void ipfwsyslog( int level, const char *format, ...); - -#define ipfwstring "ipfw:" -static size_t ipfwstringlen; - -#define dolog( a ) { \ - if ( fw_verbose == 2 ) /* Apple logging, log to ipfw.log */ \ - ipfwsyslog a ; \ - else log a ; \ -} - -#define RULESIZE64(rule) (sizeof(struct ip_fw_64) + \ - ((struct ip_fw *)(rule))->cmd_len * 4 - 4) - -#define RULESIZE32(rule) (sizeof(struct ip_fw_32) + \ - ((struct ip_fw *)(rule))->cmd_len * 4 - 4) - -void -ipfwsyslog( int level, const char *format, ...) -{ -#define msgsize 100 - - struct kev_msg ev_msg; - va_list ap; - char msgBuf[msgsize]; - char *dptr = msgBuf; - unsigned char pri; - int loglen; - - bzero(msgBuf, msgsize); - bzero(&ev_msg, sizeof(struct kev_msg)); - va_start( ap, format ); - loglen = vscnprintf(msgBuf, msgsize, format, ap); - va_end( ap ); - - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_LOG_SUBCLASS; - ev_msg.event_code = IPFWLOGEVENT; - - /* get rid of the trailing \n */ - if (loglen < msgsize) { - dptr[loglen - 1] = 0; - } else { - dptr[msgsize - 1] = 0; - } - - pri = LOG_PRI(level); - - /* remove "ipfw:" prefix if logging to ipfw log */ - if (!(strncmp( ipfwstring, msgBuf, ipfwstringlen))) { - dptr = msgBuf + ipfwstringlen; - } - - ev_msg.dv[0].data_ptr = &pri; - ev_msg.dv[0].data_length = 1; - ev_msg.dv[1].data_ptr = dptr; - ev_msg.dv[1].data_length = 100; /* bug in kern_post_msg, it can't handle size > 256-msghdr */ - ev_msg.dv[2].data_length = 0; - - kev_post_msg(&ev_msg); -} - -static inline void -ipfw_stealth_stats_incr(uint32_t type) -{ - if (type >= IPFW_STEALTH_STATS_MAX) { - return; - } - - ipfw_stealth_stats[type]++; - - if (!ipfw_stealth_stats_needs_flush) { - ipfw_stealth_stats_needs_flush = TRUE; - } -} - -void -ipfw_stealth_stats_incr_udp(void) -{ - ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_UDP); -} - -void -ipfw_stealth_stats_incr_tcp(void) -{ - ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_TCP); -} - -void -ipfw_stealth_stats_incr_udpv6(void) -{ - ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_UDPv6); -} - -void -ipfw_stealth_stats_incr_tcpv6(void) -{ - ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_TCPv6); -} - -static void -ipfw_stealth_flush_stats(void) -{ - int i; - - for (i = 0; i < IPFW_STEALTH_STATS_MAX; i++) { - if (ipfw_stealth_stats[i]) { - ipfwsyslog(LOG_INFO, "Stealth Mode connection attempt to %s %d times", - ipfw_stealth_stats_str[i], ipfw_stealth_stats[i]); - ipfw_stealth_stats[i] = 0; - } - } - ipfw_stealth_stats_needs_flush = FALSE; -} - -/* - * This macro maps an ip pointer into a layer3 header pointer of type T - */ -#define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl)) - -static __inline int -icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd) -{ - int type = L3HDR(struct icmp, ip)->icmp_type; - - return type <= ICMP_MAXTYPE && (cmd->d[0] & (1 << type)); -} - -#define TT ( (1 << ICMP_ECHO) | (1 << ICMP_ROUTERSOLICIT) | \ - (1 << ICMP_TSTAMP) | (1 << ICMP_IREQ) | (1 << ICMP_MASKREQ) ) - -static int -is_icmp_query(struct ip *ip) -{ - int type = L3HDR(struct icmp, ip)->icmp_type; - return type <= ICMP_MAXTYPE && (TT & (1 << type)); -} -#undef TT - -static int -Get32static_len(void) -{ - int diff; - int len = static_len_32; - struct ip_fw *rule; - char *useraction; - - for (rule = layer3_chain; rule; rule = rule->next) { - if (rule->reserved_1 == IPFW_RULE_INACTIVE) { - continue; - } - if (rule->act_ofs) { - useraction = (char*)ACTION_PTR( rule ); - if (((ipfw_insn*)useraction)->opcode == O_QUEUE || ((ipfw_insn*)useraction)->opcode == O_PIPE) { - diff = sizeof(ipfw_insn_pipe) - sizeof(ipfw_insn_pipe_32); - if (diff) { - len -= diff; - } - } - } - } - return len; -} - -static int -Get64static_len(void) -{ - int diff; - int len = static_len_64; - struct ip_fw *rule; - char *useraction; - - for (rule = layer3_chain; rule; rule = rule->next) { - if (rule->reserved_1 == IPFW_RULE_INACTIVE) { - continue; - } - if (rule->act_ofs) { - useraction = (char *)ACTION_PTR( rule ); - if (((ipfw_insn*)useraction)->opcode == O_QUEUE || ((ipfw_insn*)useraction)->opcode == O_PIPE) { - diff = sizeof(ipfw_insn_pipe_64) - sizeof(ipfw_insn_pipe); - if (diff) { - len += diff; - } - } - } - } - return len; -} - -static void -copyto32fw_insn( struct ip_fw_32 *fw32, struct ip_fw *user_ip_fw, int cmdsize) -{ - char *end; - char *fw32action; - char *useraction; - int justcmdsize; - int diff = 0; - int actioncopysize; - - end = ((char*)user_ip_fw->cmd) + cmdsize; - useraction = (char*)ACTION_PTR( user_ip_fw ); - fw32action = (char*)fw32->cmd + (user_ip_fw->act_ofs * sizeof(uint32_t)); - if ((justcmdsize = (fw32action - (char*)fw32->cmd))) { - bcopy( user_ip_fw->cmd, fw32->cmd, justcmdsize); - } - while (useraction < end) { - if (((ipfw_insn*)useraction)->opcode == O_QUEUE || ((ipfw_insn*)useraction)->opcode == O_PIPE) { - actioncopysize = sizeof(ipfw_insn_pipe_32); - ((ipfw_insn*)fw32action)->opcode = ((ipfw_insn*)useraction)->opcode; - ((ipfw_insn*)fw32action)->arg1 = ((ipfw_insn*)useraction)->arg1; - ((ipfw_insn*)fw32action)->len = F_INSN_SIZE(ipfw_insn_pipe_32); - diff = ((ipfw_insn*)useraction)->len - ((ipfw_insn*)fw32action)->len; - if (diff) { - fw32->cmd_len -= diff; - } - } else { - actioncopysize = (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1) * sizeof(uint32_t); - bcopy( useraction, fw32action, actioncopysize ); - } - useraction += (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1) * sizeof(uint32_t); - fw32action += actioncopysize; - } -} - -static void -copyto64fw_insn( struct ip_fw_64 *fw64, struct ip_fw *user_ip_fw, int cmdsize) -{ - char *end; - char *fw64action; - char *useraction; - int justcmdsize; - int diff; - int actioncopysize; - - end = ((char *)user_ip_fw->cmd) + cmdsize; - useraction = (char*)ACTION_PTR( user_ip_fw ); - if ((justcmdsize = (useraction - (char*)user_ip_fw->cmd))) { - bcopy( user_ip_fw->cmd, fw64->cmd, justcmdsize); - } - fw64action = (char*)fw64->cmd + justcmdsize; - while (useraction < end) { - if (((ipfw_insn*)user_ip_fw)->opcode == O_QUEUE || ((ipfw_insn*)user_ip_fw)->opcode == O_PIPE) { - actioncopysize = sizeof(ipfw_insn_pipe_64); - ((ipfw_insn*)fw64action)->opcode = ((ipfw_insn*)useraction)->opcode; - ((ipfw_insn*)fw64action)->arg1 = ((ipfw_insn*)useraction)->arg1; - ((ipfw_insn*)fw64action)->len = F_INSN_SIZE(ipfw_insn_pipe_64); - diff = ((ipfw_insn*)fw64action)->len - ((ipfw_insn*)useraction)->len; - if (diff) { - fw64->cmd_len += diff; - } - } else { - actioncopysize = (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1) * sizeof(uint32_t); - bcopy( useraction, fw64action, actioncopysize ); - } - useraction += (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1) * sizeof(uint32_t); - fw64action += actioncopysize; - } -} - -static void -copyto32fw( struct ip_fw *user_ip_fw, struct ip_fw_32 *fw32, __unused size_t copysize) -{ - size_t rulesize, cmdsize; - - fw32->version = user_ip_fw->version; - fw32->context = CAST_DOWN_EXPLICIT( user32_addr_t, user_ip_fw->context); - fw32->next = CAST_DOWN_EXPLICIT(user32_addr_t, user_ip_fw->next); - fw32->next_rule = CAST_DOWN_EXPLICIT(user32_addr_t, user_ip_fw->next_rule); - fw32->act_ofs = user_ip_fw->act_ofs; - fw32->cmd_len = user_ip_fw->cmd_len; - fw32->rulenum = user_ip_fw->rulenum; - fw32->set = user_ip_fw->set; - fw32->set_masks[0] = user_ip_fw->set_masks[0]; - fw32->set_masks[1] = user_ip_fw->set_masks[1]; - fw32->pcnt = user_ip_fw->pcnt; - fw32->bcnt = user_ip_fw->bcnt; - fw32->timestamp = user_ip_fw->timestamp; - fw32->reserved_1 = user_ip_fw->reserved_1; - fw32->reserved_2 = user_ip_fw->reserved_2; - rulesize = sizeof(struct ip_fw_32) + (user_ip_fw->cmd_len * sizeof(ipfw_insn) - 4); - cmdsize = user_ip_fw->cmd_len * sizeof(u_int32_t); - copyto32fw_insn( fw32, user_ip_fw, cmdsize ); -} - -static void -copyto64fw( struct ip_fw *user_ip_fw, struct ip_fw_64 *fw64, size_t copysize) -{ - size_t rulesize, cmdsize; - - fw64->version = user_ip_fw->version; - fw64->context = CAST_DOWN_EXPLICIT(__uint64_t, user_ip_fw->context); - fw64->next = CAST_DOWN_EXPLICIT(user64_addr_t, user_ip_fw->next); - fw64->next_rule = CAST_DOWN_EXPLICIT(user64_addr_t, user_ip_fw->next_rule); - fw64->act_ofs = user_ip_fw->act_ofs; - fw64->cmd_len = user_ip_fw->cmd_len; - fw64->rulenum = user_ip_fw->rulenum; - fw64->set = user_ip_fw->set; - fw64->set_masks[0] = user_ip_fw->set_masks[0]; - fw64->set_masks[1] = user_ip_fw->set_masks[1]; - fw64->pcnt = user_ip_fw->pcnt; - fw64->bcnt = user_ip_fw->bcnt; - fw64->timestamp = user_ip_fw->timestamp; - fw64->reserved_1 = user_ip_fw->reserved_1; - fw64->reserved_2 = user_ip_fw->reserved_2; - rulesize = sizeof(struct ip_fw_64) + (user_ip_fw->cmd_len * sizeof(ipfw_insn) - 4); - if (rulesize > copysize) { - cmdsize = copysize - sizeof(struct ip_fw_64) + 4; - } else { - cmdsize = user_ip_fw->cmd_len * sizeof(u_int32_t); - } - copyto64fw_insn( fw64, user_ip_fw, cmdsize); -} - -static int -copyfrom32fw_insn( struct ip_fw_32 *fw32, struct ip_fw *user_ip_fw, int cmdsize) -{ - char *end; - char *fw32action; - char *useraction; - int justcmdsize; - int diff; - int actioncopysize; - - end = ((char*)fw32->cmd) + cmdsize; - fw32action = (char*)ACTION_PTR( fw32 ); - if ((justcmdsize = (fw32action - (char*)fw32->cmd))) { - bcopy( fw32->cmd, user_ip_fw->cmd, justcmdsize); - } - useraction = (char*)user_ip_fw->cmd + justcmdsize; - while (fw32action < end) { - if (((ipfw_insn*)fw32action)->opcode == O_QUEUE || ((ipfw_insn*)fw32action)->opcode == O_PIPE) { - actioncopysize = sizeof(ipfw_insn_pipe); - ((ipfw_insn*)useraction)->opcode = ((ipfw_insn*)fw32action)->opcode; - ((ipfw_insn*)useraction)->arg1 = ((ipfw_insn*)fw32action)->arg1; - ((ipfw_insn*)useraction)->len = F_INSN_SIZE(ipfw_insn_pipe); - diff = ((ipfw_insn*)useraction)->len - ((ipfw_insn*)fw32action)->len; - if (diff) { - /* readjust the cmd_len */ - user_ip_fw->cmd_len += diff; - } - } else { - actioncopysize = (F_LEN((ipfw_insn*)fw32action) ? (F_LEN((ipfw_insn*)fw32action)) : 1) * sizeof(uint32_t); - bcopy( fw32action, useraction, actioncopysize ); - } - fw32action += (F_LEN((ipfw_insn*)fw32action) ? (F_LEN((ipfw_insn*)fw32action)) : 1) * sizeof(uint32_t); - useraction += actioncopysize; - } - - return useraction - (char*)user_ip_fw->cmd; -} - -static int -copyfrom64fw_insn( struct ip_fw_64 *fw64, struct ip_fw *user_ip_fw, int cmdsize) -{ - char *end; - char *fw64action; - char *useraction; - int justcmdsize; - int diff; - int actioncopysize; - - end = ((char *)fw64->cmd) + cmdsize; - fw64action = (char*)ACTION_PTR( fw64 ); - if ((justcmdsize = (fw64action - (char*)fw64->cmd))) { - bcopy( fw64->cmd, user_ip_fw->cmd, justcmdsize); - } - useraction = (char*)user_ip_fw->cmd + justcmdsize; - while (fw64action < end) { - if (((ipfw_insn*)fw64action)->opcode == O_QUEUE || ((ipfw_insn*)fw64action)->opcode == O_PIPE) { - actioncopysize = sizeof(ipfw_insn_pipe); - ((ipfw_insn*)useraction)->opcode = ((ipfw_insn*)fw64action)->opcode; - ((ipfw_insn*)useraction)->arg1 = ((ipfw_insn*)fw64action)->arg1; - ((ipfw_insn*)useraction)->len = F_INSN_SIZE(ipfw_insn_pipe); - diff = ((ipfw_insn*)fw64action)->len - ((ipfw_insn*)useraction)->len; - if (diff) { - /* readjust the cmd_len */ - user_ip_fw->cmd_len -= diff; - } - } else { - actioncopysize = (F_LEN((ipfw_insn*)fw64action) ? (F_LEN((ipfw_insn*)fw64action)) : 1) * sizeof(uint32_t); - bcopy( fw64action, useraction, actioncopysize ); - } - fw64action += (F_LEN((ipfw_insn*)fw64action) ? (F_LEN((ipfw_insn*)fw64action)) : 1) * sizeof(uint32_t); - useraction += actioncopysize; - } - return useraction - (char*)user_ip_fw->cmd; -} - -static size_t -copyfrom32fw( struct ip_fw_32 *fw32, struct ip_fw *user_ip_fw, size_t copysize) -{ - size_t rulesize, cmdsize; - - user_ip_fw->version = fw32->version; - user_ip_fw->context = CAST_DOWN(void *, fw32->context); - user_ip_fw->next = CAST_DOWN(struct ip_fw*, fw32->next); - user_ip_fw->next_rule = CAST_DOWN_EXPLICIT(struct ip_fw*, fw32->next_rule); - user_ip_fw->act_ofs = fw32->act_ofs; - user_ip_fw->cmd_len = fw32->cmd_len; - user_ip_fw->rulenum = fw32->rulenum; - user_ip_fw->set = fw32->set; - user_ip_fw->set_masks[0] = fw32->set_masks[0]; - user_ip_fw->set_masks[1] = fw32->set_masks[1]; - user_ip_fw->pcnt = fw32->pcnt; - user_ip_fw->bcnt = fw32->bcnt; - user_ip_fw->timestamp = fw32->timestamp; - user_ip_fw->reserved_1 = fw32->reserved_1; - user_ip_fw->reserved_2 = fw32->reserved_2; - rulesize = sizeof(struct ip_fw_32) + (fw32->cmd_len * sizeof(ipfw_insn) - 4); - if (rulesize > copysize) { - cmdsize = copysize - sizeof(struct ip_fw_32) - 4; - } else { - cmdsize = fw32->cmd_len * sizeof(ipfw_insn); - } - cmdsize = copyfrom32fw_insn( fw32, user_ip_fw, cmdsize); - return sizeof(struct ip_fw) + cmdsize - 4; -} - -static size_t -copyfrom64fw( struct ip_fw_64 *fw64, struct ip_fw *user_ip_fw, size_t copysize) -{ - size_t rulesize, cmdsize; - - user_ip_fw->version = fw64->version; - user_ip_fw->context = CAST_DOWN_EXPLICIT( void *, fw64->context); - user_ip_fw->next = CAST_DOWN_EXPLICIT(struct ip_fw*, fw64->next); - user_ip_fw->next_rule = CAST_DOWN_EXPLICIT(struct ip_fw*, fw64->next_rule); - user_ip_fw->act_ofs = fw64->act_ofs; - user_ip_fw->cmd_len = fw64->cmd_len; - user_ip_fw->rulenum = fw64->rulenum; - user_ip_fw->set = fw64->set; - user_ip_fw->set_masks[0] = fw64->set_masks[0]; - user_ip_fw->set_masks[1] = fw64->set_masks[1]; - user_ip_fw->pcnt = fw64->pcnt; - user_ip_fw->bcnt = fw64->bcnt; - user_ip_fw->timestamp = fw64->timestamp; - user_ip_fw->reserved_1 = fw64->reserved_1; - user_ip_fw->reserved_2 = fw64->reserved_2; - //bcopy( fw64->cmd, user_ip_fw->cmd, fw64->cmd_len * sizeof(ipfw_insn)); - rulesize = sizeof(struct ip_fw_64) + (fw64->cmd_len * sizeof(ipfw_insn) - 4); - if (rulesize > copysize) { - cmdsize = copysize - sizeof(struct ip_fw_64) - 4; - } else { - cmdsize = fw64->cmd_len * sizeof(ipfw_insn); - } - cmdsize = copyfrom64fw_insn( fw64, user_ip_fw, cmdsize); - return sizeof(struct ip_fw) + cmdsize - 4; -} - -void -externalize_flow_id(struct ipfw_flow_id *dst, struct ip_flow_id *src); -void -externalize_flow_id(struct ipfw_flow_id *dst, struct ip_flow_id *src) -{ - dst->dst_ip = src->dst_ip; - dst->src_ip = src->src_ip; - dst->dst_port = src->dst_port; - dst->src_port = src->src_port; - dst->proto = src->proto; - dst->flags = src->flags; -} - -static -void -cp_dyn_to_comp_32( struct ipfw_dyn_rule_compat_32 *dyn_rule_vers1, int *len) -{ - struct ipfw_dyn_rule_compat_32 *dyn_last = NULL; - ipfw_dyn_rule *p; - int i; - - if (ipfw_dyn_v) { - for (i = 0; i < curr_dyn_buckets; i++) { - for (p = ipfw_dyn_v[i]; p != NULL; p = p->next) { - dyn_rule_vers1->chain = (user32_addr_t)(p->rule->rulenum); - externalize_flow_id(&dyn_rule_vers1->id, &p->id); - externalize_flow_id(&dyn_rule_vers1->mask, &p->id); - dyn_rule_vers1->type = p->dyn_type; - dyn_rule_vers1->expire = p->expire; - dyn_rule_vers1->pcnt = p->pcnt; - dyn_rule_vers1->bcnt = p->bcnt; - dyn_rule_vers1->bucket = p->bucket; - dyn_rule_vers1->state = p->state; - - dyn_rule_vers1->next = CAST_DOWN_EXPLICIT( user32_addr_t, p->next); - dyn_last = dyn_rule_vers1; - - *len += sizeof(*dyn_rule_vers1); - dyn_rule_vers1++; - } - } - - if (dyn_last != NULL) { - dyn_last->next = ((user32_addr_t)0); - } - } -} - - -static -void -cp_dyn_to_comp_64( struct ipfw_dyn_rule_compat_64 *dyn_rule_vers1, int *len) -{ - struct ipfw_dyn_rule_compat_64 *dyn_last = NULL; - ipfw_dyn_rule *p; - int i; - - if (ipfw_dyn_v) { - for (i = 0; i < curr_dyn_buckets; i++) { - for (p = ipfw_dyn_v[i]; p != NULL; p = p->next) { - dyn_rule_vers1->chain = (user64_addr_t) p->rule->rulenum; - externalize_flow_id(&dyn_rule_vers1->id, &p->id); - externalize_flow_id(&dyn_rule_vers1->mask, &p->id); - dyn_rule_vers1->type = p->dyn_type; - dyn_rule_vers1->expire = p->expire; - dyn_rule_vers1->pcnt = p->pcnt; - dyn_rule_vers1->bcnt = p->bcnt; - dyn_rule_vers1->bucket = p->bucket; - dyn_rule_vers1->state = p->state; - - dyn_rule_vers1->next = CAST_DOWN(user64_addr_t, p->next); - dyn_last = dyn_rule_vers1; - - *len += sizeof(*dyn_rule_vers1); - dyn_rule_vers1++; - } - } - - if (dyn_last != NULL) { - dyn_last->next = CAST_DOWN(user64_addr_t, NULL); - } - } -} - -static int -sooptcopyin_fw( struct sockopt *sopt, struct ip_fw *user_ip_fw, size_t *size ) -{ - size_t valsize, copyinsize = 0; - int error = 0; - - valsize = sopt->sopt_valsize; - if (size) { - copyinsize = *size; - } - if (proc_is64bit(sopt->sopt_p)) { - struct ip_fw_64 *fw64 = NULL; - - if (valsize < sizeof(struct ip_fw_64)) { - return EINVAL; - } - if (!copyinsize) { - copyinsize = sizeof(struct ip_fw_64); - } - if (valsize > copyinsize) { - sopt->sopt_valsize = valsize = copyinsize; - } - - if (sopt->sopt_p != 0) { - fw64 = _MALLOC(copyinsize, M_TEMP, M_WAITOK); - if (fw64 == NULL) { - return ENOBUFS; - } - if ((error = copyin(sopt->sopt_val, fw64, valsize)) != 0) { - _FREE(fw64, M_TEMP); - return error; - } - } else { - bcopy(CAST_DOWN(caddr_t, sopt->sopt_val), fw64, valsize); - } - valsize = copyfrom64fw( fw64, user_ip_fw, valsize ); - _FREE( fw64, M_TEMP); - } else { - struct ip_fw_32 *fw32 = NULL; - - if (valsize < sizeof(struct ip_fw_32)) { - return EINVAL; - } - if (!copyinsize) { - copyinsize = sizeof(struct ip_fw_32); - } - if (valsize > copyinsize) { - sopt->sopt_valsize = valsize = copyinsize; - } - - if (sopt->sopt_p != 0) { - fw32 = _MALLOC(copyinsize, M_TEMP, M_WAITOK); - if (fw32 == NULL) { - return ENOBUFS; - } - if ((error = copyin(sopt->sopt_val, fw32, valsize)) != 0) { - _FREE( fw32, M_TEMP); - return error; - } - } else { - bcopy(CAST_DOWN(caddr_t, sopt->sopt_val), fw32, valsize); - } - valsize = copyfrom32fw( fw32, user_ip_fw, valsize); - _FREE( fw32, M_TEMP); - } - if (size) { - *size = valsize; - } - return error; -} - -/* - * The following checks use two arrays of 8 or 16 bits to store the - * bits that we want set or clear, respectively. They are in the - * low and high half of cmd->arg1 or cmd->d[0]. - * - * We scan options and store the bits we find set. We succeed if - * - * (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear - * - * The code is sometimes optimized not to store additional variables. - */ - -static int -flags_match(ipfw_insn *cmd, u_int8_t bits) -{ - u_char want_clear; - bits = ~bits; - - if (((cmd->arg1 & 0xff) & bits) != 0) { - return 0; /* some bits we want set were clear */ - } - want_clear = (cmd->arg1 >> 8) & 0xff; - if ((want_clear & bits) != want_clear) { - return 0; /* some bits we want clear were set */ - } - return 1; -} - -static int -ipopts_match(struct ip *ip, ipfw_insn *cmd) -{ - int optlen, bits = 0; - u_char *cp = (u_char *)(ip + 1); - int x = (ip->ip_hl << 2) - sizeof(struct ip); - - for (; x > 0; x -= optlen, cp += optlen) { - int opt = cp[IPOPT_OPTVAL]; - - if (opt == IPOPT_EOL) { - break; - } - if (opt == IPOPT_NOP) { - optlen = 1; - } else { - optlen = cp[IPOPT_OLEN]; - if (optlen <= 0 || optlen > x) { - return 0; /* invalid or truncated */ - } - } - switch (opt) { - default: - break; - - case IPOPT_LSRR: - bits |= IP_FW_IPOPT_LSRR; - break; - - case IPOPT_SSRR: - bits |= IP_FW_IPOPT_SSRR; - break; - - case IPOPT_RR: - bits |= IP_FW_IPOPT_RR; - break; - - case IPOPT_TS: - bits |= IP_FW_IPOPT_TS; - break; - } - } - return flags_match(cmd, bits); -} - -static int -tcpopts_match(struct ip *ip, ipfw_insn *cmd) -{ - int optlen, bits = 0; - struct tcphdr *tcp = L3HDR(struct tcphdr, ip); - u_char *cp = (u_char *)(tcp + 1); - int x = (tcp->th_off << 2) - sizeof(struct tcphdr); - - for (; x > 0; x -= optlen, cp += optlen) { - int opt = cp[0]; - if (opt == TCPOPT_EOL) { - break; - } - if (opt == TCPOPT_NOP) { - optlen = 1; - } else { - optlen = cp[1]; - if (optlen <= 0) { - break; - } - } - - switch (opt) { - default: - break; - - case TCPOPT_MAXSEG: - bits |= IP_FW_TCPOPT_MSS; - break; - - case TCPOPT_WINDOW: - bits |= IP_FW_TCPOPT_WINDOW; - break; - - case TCPOPT_SACK_PERMITTED: - case TCPOPT_SACK: - bits |= IP_FW_TCPOPT_SACK; - break; - - case TCPOPT_TIMESTAMP: - bits |= IP_FW_TCPOPT_TS; - break; - - case TCPOPT_CC: - case TCPOPT_CCNEW: - case TCPOPT_CCECHO: - bits |= IP_FW_TCPOPT_CC; - break; - } - } - return flags_match(cmd, bits); -} - -static int -iface_match(struct ifnet *ifp, ipfw_insn_if *cmd) -{ - if (ifp == NULL) { /* no iface with this packet, match fails */ - return 0; - } - /* Check by name or by IP address */ - if (cmd->name[0] != '\0') { /* match by name */ - /* Check unit number (-1 is wildcard) */ - if (cmd->p.unit != -1 && cmd->p.unit != ifp->if_unit) { - return 0; - } - /* Check name */ - if (!strncmp(ifp->if_name, cmd->name, IFNAMSIZ)) { - return 1; - } - } else { - struct ifaddr *ia; - - ifnet_lock_shared(ifp); - TAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) { - IFA_LOCK(ia); - if (ia->ifa_addr->sa_family != AF_INET) { - IFA_UNLOCK(ia); - continue; - } - if (cmd->p.ip.s_addr == ((struct sockaddr_in *) - (ia->ifa_addr))->sin_addr.s_addr) { - IFA_UNLOCK(ia); - ifnet_lock_done(ifp); - return 1; /* match */ - } - IFA_UNLOCK(ia); - } - ifnet_lock_done(ifp); - } - return 0; /* no match, fail ... */ -} - -/* - * The 'verrevpath' option checks that the interface that an IP packet - * arrives on is the same interface that traffic destined for the - * packet's source address would be routed out of. This is a measure - * to block forged packets. This is also commonly known as "anti-spoofing" - * or Unicast Reverse Path Forwarding (Unicast RFP) in Cisco-ese. The - * name of the knob is purposely reminisent of the Cisco IOS command, - * - * ip verify unicast reverse-path - * - * which implements the same functionality. But note that syntax is - * misleading. The check may be performed on all IP packets whether unicast, - * multicast, or broadcast. - */ -static int -verify_rev_path(struct in_addr src, struct ifnet *ifp) -{ - static struct route ro; - struct sockaddr_in *dst; - - bzero(&ro, sizeof(ro)); - dst = (struct sockaddr_in *)&(ro.ro_dst); - - /* Check if we've cached the route from the previous call. */ - if (src.s_addr != dst->sin_addr.s_addr) { - dst->sin_family = AF_INET; - dst->sin_len = sizeof(*dst); - dst->sin_addr = src; - - rtalloc_ign(&ro, RTF_CLONING | RTF_PRCLONING, false); - } - if (ro.ro_rt != NULL) { - RT_LOCK_SPIN(ro.ro_rt); - } else { - ROUTE_RELEASE(&ro); - return 0; /* No route */ - } - if ((ifp == NULL) || - (ro.ro_rt->rt_ifp->if_index != ifp->if_index)) { - RT_UNLOCK(ro.ro_rt); - ROUTE_RELEASE(&ro); - return 0; - } - RT_UNLOCK(ro.ro_rt); - ROUTE_RELEASE(&ro); - return 1; -} - - -static u_int64_t norule_counter; /* counter for ipfw_log(NULL...) */ - -#define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0 -#define SNP(buf) buf, sizeof(buf) - -/* - * We enter here when we have a rule with O_LOG. - * XXX this function alone takes about 2Kbytes of code! - */ -static void -ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh, - struct mbuf *m, struct ifnet *oif) -{ - const char *action; - int limit_reached = 0; - char ipv4str[MAX_IPv4_STR_LEN]; - char action2[40], proto[48], fragment[28]; - - fragment[0] = '\0'; - proto[0] = '\0'; - - if (f == NULL) { /* bogus pkt */ - if (verbose_limit != 0 && norule_counter >= verbose_limit) { - return; - } - norule_counter++; - if (norule_counter == verbose_limit) { - limit_reached = verbose_limit; - } - action = "Refuse"; - } else { /* O_LOG is the first action, find the real one */ - ipfw_insn *cmd = ACTION_PTR(f); - ipfw_insn_log *l = (ipfw_insn_log *)cmd; - - if (l->max_log != 0 && l->log_left == 0) { - return; - } - l->log_left--; - if (l->log_left == 0) { - limit_reached = l->max_log; - } - cmd += F_LEN(cmd); /* point to first action */ - if (cmd->opcode == O_PROB) { - cmd += F_LEN(cmd); - } - - action = action2; - switch (cmd->opcode) { - case O_DENY: - action = "Deny"; - break; - - case O_REJECT: - if (cmd->arg1 == ICMP_REJECT_RST) { - action = "Reset"; - } else if (cmd->arg1 == ICMP_UNREACH_HOST) { - action = "Reject"; - } else { - snprintf(SNPARGS(action2, 0), "Unreach %d", - cmd->arg1); - } - break; - - case O_ACCEPT: - action = "Accept"; - break; - case O_COUNT: - action = "Count"; - break; - case O_DIVERT: - snprintf(SNPARGS(action2, 0), "Divert %d", - cmd->arg1); - break; - case O_TEE: - snprintf(SNPARGS(action2, 0), "Tee %d", - cmd->arg1); - break; - case O_SKIPTO: - snprintf(SNPARGS(action2, 0), "SkipTo %d", - cmd->arg1); - break; - case O_PIPE: - snprintf(SNPARGS(action2, 0), "Pipe %d", - cmd->arg1); - break; - case O_QUEUE: - snprintf(SNPARGS(action2, 0), "Queue %d", - cmd->arg1); - break; - case O_FORWARD_IP: { - ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd; - int len; - - if (f->reserved_1 == IPFW_RULE_INACTIVE) { - break; - } - len = scnprintf(SNPARGS(action2, 0), "Forward to %s", - inet_ntop(AF_INET, &sa->sa.sin_addr, ipv4str, sizeof(ipv4str))); - if (sa->sa.sin_port) { - snprintf(SNPARGS(action2, len), ":%d", - sa->sa.sin_port); - } - } - break; - default: - action = "UNKNOWN"; - break; - } - } - - if (hlen == 0) { /* non-ip */ - snprintf(SNPARGS(proto, 0), "MAC"); - } else { - struct ip *ip = mtod(m, struct ip *); - /* these three are all aliases to the same thing */ - struct icmp *const icmp = L3HDR(struct icmp, ip); - struct tcphdr *const tcp = (struct tcphdr *)icmp; - struct udphdr *const udp = (struct udphdr *)icmp; - - int ip_off, offset, ip_len; - - int len; - - if (eh != NULL) { /* layer 2 packets are as on the wire */ - ip_off = ntohs(ip->ip_off); - ip_len = ntohs(ip->ip_len); - } else { - ip_off = ip->ip_off; - ip_len = ip->ip_len; - } - offset = ip_off & IP_OFFMASK; - switch (ip->ip_p) { - case IPPROTO_TCP: - len = scnprintf(SNPARGS(proto, 0), "TCP %s", - inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str))); - if (offset == 0) { - snprintf(SNPARGS(proto, len), ":%d %s:%d", - ntohs(tcp->th_sport), - inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)), - ntohs(tcp->th_dport)); - } else { - snprintf(SNPARGS(proto, len), " %s", - inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str))); - } - break; - - case IPPROTO_UDP: - len = scnprintf(SNPARGS(proto, 0), "UDP %s", - inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str))); - if (offset == 0) { - snprintf(SNPARGS(proto, len), ":%d %s:%d", - ntohs(udp->uh_sport), - inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)), - ntohs(udp->uh_dport)); - } else { - snprintf(SNPARGS(proto, len), " %s", - inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str))); - } - break; - - case IPPROTO_ICMP: - if (offset == 0) { - len = scnprintf(SNPARGS(proto, 0), - "ICMP:%u.%u ", - icmp->icmp_type, icmp->icmp_code); - } else { - len = scnprintf(SNPARGS(proto, 0), "ICMP "); - } - len += scnprintf(SNPARGS(proto, len), "%s", - inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str))); - snprintf(SNPARGS(proto, len), " %s", - inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str))); - break; - - default: - len = scnprintf(SNPARGS(proto, 0), "P:%d %s", ip->ip_p, - inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str))); - snprintf(SNPARGS(proto, len), " %s", - inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str))); - break; - } - - if (ip_off & (IP_MF | IP_OFFMASK)) { - snprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)", - ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2), - offset << 3, - (ip_off & IP_MF) ? "+" : ""); - } - } - if (oif || m->m_pkthdr.rcvif) { - dolog((LOG_AUTHPRIV | LOG_INFO, - "ipfw: %d %s %s %s via %s%d%s\n", - f ? f->rulenum : -1, - action, proto, oif ? "out" : "in", - oif ? oif->if_name : m->m_pkthdr.rcvif->if_name, - oif ? oif->if_unit : m->m_pkthdr.rcvif->if_unit, - fragment)); - } else { - dolog((LOG_AUTHPRIV | LOG_INFO, - "ipfw: %d %s %s [no if info]%s\n", - f ? f->rulenum : -1, - action, proto, fragment)); - } - if (limit_reached) { - dolog((LOG_AUTHPRIV | LOG_NOTICE, - "ipfw: limit %d reached on entry %d\n", - limit_reached, f ? f->rulenum : -1)); - } -} - -/* - * IMPORTANT: the hash function for dynamic rules must be commutative - * in source and destination (ip,port), because rules are bidirectional - * and we want to find both in the same bucket. - */ -static __inline int -hash_packet(struct ip_flow_id *id) -{ - u_int32_t i; - - i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port); - i &= (curr_dyn_buckets - 1); - return i; -} - -/** - * unlink a dynamic rule from a chain. prev is a pointer to - * the previous one, q is a pointer to the rule to delete, - * head is a pointer to the head of the queue. - * Modifies q and potentially also head. - */ -#define UNLINK_DYN_RULE(prev, head, q) { \ - ipfw_dyn_rule *old_q = q; \ - \ - /* remove a refcount to the parent */ \ - if (q->dyn_type == O_LIMIT) \ - q->parent->count--; \ - DEB(printf("ipfw: unlink entry 0x%08x %d -> 0x%08x %d, %d left\n",\ - (q->id.src_ip), (q->id.src_port), \ - (q->id.dst_ip), (q->id.dst_port), dyn_count-1 ); ) \ - if (prev != NULL) \ - prev->next = q = q->next; \ - else \ - head = q = q->next; \ - dyn_count--; \ - _FREE(old_q, M_IPFW); } - -#define TIME_LEQ(a, b) ((int)((a)-(b)) <= 0) - -/** - * Remove dynamic rules pointing to "rule", or all of them if rule == NULL. - * - * If keep_me == NULL, rules are deleted even if not expired, - * otherwise only expired rules are removed. - * - * The value of the second parameter is also used to point to identify - * a rule we absolutely do not want to remove (e.g. because we are - * holding a reference to it -- this is the case with O_LIMIT_PARENT - * rules). The pointer is only used for comparison, so any non-null - * value will do. - */ -static void -remove_dyn_rule(struct ip_fw *rule, ipfw_dyn_rule *keep_me) -{ - static u_int32_t last_remove = 0; - -#define FORCE (keep_me == NULL) - - ipfw_dyn_rule *prev, *q; - int i, pass = 0, max_pass = 0; - struct timeval timenow; - - getmicrotime(&timenow); - - if (ipfw_dyn_v == NULL || dyn_count == 0) { - return; - } - /* do not expire more than once per second, it is useless */ - if (!FORCE && last_remove == timenow.tv_sec) { - return; - } - last_remove = timenow.tv_sec; - - /* - * because O_LIMIT refer to parent rules, during the first pass only - * remove child and mark any pending LIMIT_PARENT, and remove - * them in a second pass. - */ -next_pass: - for (i = 0; i < curr_dyn_buckets; i++) { - for (prev = NULL, q = ipfw_dyn_v[i]; q;) { - /* - * Logic can become complex here, so we split tests. - */ - if (q == keep_me) { - goto next; - } - if (rule != NULL && rule != q->rule) { - goto next; /* not the one we are looking for */ - } - if (q->dyn_type == O_LIMIT_PARENT) { - /* - * handle parent in the second pass, - * record we need one. - */ - max_pass = 1; - if (pass == 0) { - goto next; - } - if (FORCE && q->count != 0) { - /* XXX should not happen! */ - printf("ipfw: OUCH! cannot remove rule," - " count %d\n", q->count); - } - } else { - if (!FORCE && - !TIME_LEQ( q->expire, timenow.tv_sec )) { - goto next; - } - } - if (q->dyn_type != O_LIMIT_PARENT || !q->count) { - UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q); - continue; - } -next: - prev = q; - q = q->next; - } - } - if (pass++ < max_pass) { - goto next_pass; - } -} - - -/** - * lookup a dynamic rule. - */ -static ipfw_dyn_rule * -lookup_dyn_rule(struct ip_flow_id *pkt, int *match_direction, - struct tcphdr *tcp) -{ - /* - * stateful ipfw extensions. - * Lookup into dynamic session queue - */ -#define MATCH_REVERSE 0 -#define MATCH_FORWARD 1 -#define MATCH_NONE 2 -#define MATCH_UNKNOWN 3 -#define BOTH_SYN (TH_SYN | (TH_SYN << 8)) -#define BOTH_FIN (TH_FIN | (TH_FIN << 8)) - - int i, dir = MATCH_NONE; - ipfw_dyn_rule *prev, *q = NULL; - struct timeval timenow; - - getmicrotime(&timenow); - - if (ipfw_dyn_v == NULL) { - goto done; /* not found */ - } - i = hash_packet( pkt ); - for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) { - if (q->dyn_type == O_LIMIT_PARENT && q->count) { - goto next; - } - if (TIME_LEQ( q->expire, timenow.tv_sec)) { /* expire entry */ - int dounlink = 1; - - /* check if entry is TCP */ - if (q->id.proto == IPPROTO_TCP) { - /* do not delete an established TCP connection which hasn't been closed by both sides */ - if ((q->state & (BOTH_SYN | BOTH_FIN)) != (BOTH_SYN | BOTH_FIN)) { - dounlink = 0; - } - } - if (dounlink) { - UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q); - continue; - } - } - if (pkt->proto == q->id.proto && - q->dyn_type != O_LIMIT_PARENT) { - if (pkt->src_ip == q->id.src_ip && - pkt->dst_ip == q->id.dst_ip && - pkt->src_port == q->id.src_port && - pkt->dst_port == q->id.dst_port) { - dir = MATCH_FORWARD; - break; - } - if (pkt->src_ip == q->id.dst_ip && - pkt->dst_ip == q->id.src_ip && - pkt->src_port == q->id.dst_port && - pkt->dst_port == q->id.src_port) { - dir = MATCH_REVERSE; - break; - } - } -next: - prev = q; - q = q->next; - } - if (q == NULL) { - goto done; /* q = NULL, not found */ - } - if (prev != NULL) { /* found and not in front */ - prev->next = q->next; - q->next = ipfw_dyn_v[i]; - ipfw_dyn_v[i] = q; - } - if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */ - u_char flags = pkt->flags & (TH_FIN | TH_SYN | TH_RST); - - q->state |= (dir == MATCH_FORWARD) ? flags : (flags << 8); - switch (q->state) { - case TH_SYN: /* opening */ - q->expire = timenow.tv_sec + dyn_syn_lifetime; - break; - - case BOTH_SYN: /* move to established */ - case BOTH_SYN | TH_FIN: /* one side tries to close */ - case BOTH_SYN | (TH_FIN << 8): - if (tcp) { -#define _SEQ_GE(a, b) ((int)(a) - (int)(b) >= 0) - u_int32_t ack = ntohl(tcp->th_ack); - if (dir == MATCH_FORWARD) { - if (q->ack_fwd == 0 || _SEQ_GE(ack, q->ack_fwd)) { - q->ack_fwd = ack; - } else { /* ignore out-of-sequence */ - break; - } - } else { - if (q->ack_rev == 0 || _SEQ_GE(ack, q->ack_rev)) { - q->ack_rev = ack; - } else { /* ignore out-of-sequence */ - break; - } - } - } - q->expire = timenow.tv_sec + dyn_ack_lifetime; - break; - - case BOTH_SYN | BOTH_FIN: /* both sides closed */ - if (dyn_fin_lifetime >= dyn_keepalive_period) { - dyn_fin_lifetime = dyn_keepalive_period - 1; - } - q->expire = timenow.tv_sec + dyn_fin_lifetime; - break; - - default: -#if 0 - /* - * reset or some invalid combination, but can also - * occur if we use keep-state the wrong way. - */ - if ((q->state & ((TH_RST << 8) | TH_RST)) == 0) { - printf("invalid state: 0x%x\n", q->state); - } -#endif - if (dyn_rst_lifetime >= dyn_keepalive_period) { - dyn_rst_lifetime = dyn_keepalive_period - 1; - } - q->expire = timenow.tv_sec + dyn_rst_lifetime; - break; - } - } else if (pkt->proto == IPPROTO_UDP) { - q->expire = timenow.tv_sec + dyn_udp_lifetime; - } else { - /* other protocols */ - q->expire = timenow.tv_sec + dyn_short_lifetime; - } -done: - if (match_direction) { - *match_direction = dir; - } - return q; -} - -static void -realloc_dynamic_table(void) -{ - /* - * Try reallocation, make sure we have a power of 2 and do - * not allow more than 64k entries. In case of overflow, - * default to 1024. - */ - - if (dyn_buckets > 65536) { - dyn_buckets = 1024; - } - if ((dyn_buckets & (dyn_buckets - 1)) != 0) { /* not a power of 2 */ - dyn_buckets = curr_dyn_buckets; /* reset */ - return; - } - curr_dyn_buckets = dyn_buckets; - if (ipfw_dyn_v != NULL) { - _FREE(ipfw_dyn_v, M_IPFW); - } - for (;;) { - ipfw_dyn_v = _MALLOC(curr_dyn_buckets * sizeof(ipfw_dyn_rule *), - M_IPFW, M_NOWAIT | M_ZERO); - if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2) { - break; - } - curr_dyn_buckets /= 2; - } -} - -/** - * Install state of type 'type' for a dynamic session. - * The hash table contains two type of rules: - * - regular rules (O_KEEP_STATE) - * - rules for sessions with limited number of sess per user - * (O_LIMIT). When they are created, the parent is - * increased by 1, and decreased on delete. In this case, - * the third parameter is the parent rule and not the chain. - * - "parent" rules for the above (O_LIMIT_PARENT). - */ -static ipfw_dyn_rule * -add_dyn_rule(struct ip_flow_id *id, u_int8_t dyn_type, struct ip_fw *rule) -{ - ipfw_dyn_rule *r; - int i; - struct timeval timenow; - - getmicrotime(&timenow); - - if (ipfw_dyn_v == NULL || - (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) { - realloc_dynamic_table(); - if (ipfw_dyn_v == NULL) { - return NULL; /* failed ! */ - } - } - i = hash_packet(id); - - r = _MALLOC(sizeof *r, M_IPFW, M_NOWAIT | M_ZERO); - if (r == NULL) { -#if IPFW_DEBUG - printf("ipfw: sorry cannot allocate state\n"); -#endif - return NULL; - } - - /* increase refcount on parent, and set pointer */ - if (dyn_type == O_LIMIT) { - ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule; - if (parent->dyn_type != O_LIMIT_PARENT) { - panic("invalid parent"); - } - parent->count++; - r->parent = parent; - rule = parent->rule; - } - - r->id = *id; - r->expire = timenow.tv_sec + dyn_syn_lifetime; - r->rule = rule; - r->dyn_type = dyn_type; - r->pcnt = r->bcnt = 0; - r->count = 0; - - r->bucket = i; - r->next = ipfw_dyn_v[i]; - ipfw_dyn_v[i] = r; - dyn_count++; - DEB(printf("ipfw: add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n", - dyn_type, - (r->id.src_ip), (r->id.src_port), - (r->id.dst_ip), (r->id.dst_port), - dyn_count ); ) - return r; -} - -/** - * lookup dynamic parent rule using pkt and rule as search keys. - * If the lookup fails, then install one. - */ -static ipfw_dyn_rule * -lookup_dyn_parent(struct ip_flow_id *pkt, struct ip_fw *rule) -{ - ipfw_dyn_rule *q; - int i; - struct timeval timenow; - - getmicrotime(&timenow); - - if (ipfw_dyn_v) { - i = hash_packet( pkt ); - for (q = ipfw_dyn_v[i]; q != NULL; q = q->next) { - if (q->dyn_type == O_LIMIT_PARENT && - rule == q->rule && - pkt->proto == q->id.proto && - pkt->src_ip == q->id.src_ip && - pkt->dst_ip == q->id.dst_ip && - pkt->src_port == q->id.src_port && - pkt->dst_port == q->id.dst_port) { - q->expire = timenow.tv_sec + dyn_short_lifetime; - DEB(printf("ipfw: lookup_dyn_parent found " - "0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(q)); ) - return q; - } - } - } - return add_dyn_rule(pkt, O_LIMIT_PARENT, rule); -} - -/** - * Install dynamic state for rule type cmd->o.opcode - * - * Returns 1 (failure) if state is not installed because of errors or because - * session limitations are enforced. - */ -static int -install_state(struct ip_fw *rule, ipfw_insn_limit *cmd, - struct ip_fw_args *args) -{ - static int last_log; - struct timeval timenow; - - ipfw_dyn_rule *q; - getmicrotime(&timenow); - - DEB(printf("ipfw: install state type %d 0x%08x %u -> 0x%08x %u\n", - cmd->o.opcode, - (args->fwa_id.src_ip), (args->fwa_id.src_port), - (args->fwa_id.dst_ip), (args->fwa_id.dst_port)); ) - - q = lookup_dyn_rule(&args->fwa_id, NULL, NULL); - - if (q != NULL) { /* should never occur */ - if (last_log != timenow.tv_sec) { - last_log = timenow.tv_sec; - printf("ipfw: install_state: entry already present, done\n"); - } - return 0; - } - - if (dyn_count >= dyn_max) { - /* - * Run out of slots, try to remove any expired rule. - */ - remove_dyn_rule(NULL, (ipfw_dyn_rule *)1); - } - - if (dyn_count >= dyn_max) { - if (last_log != timenow.tv_sec) { - last_log = timenow.tv_sec; - printf("ipfw: install_state: Too many dynamic rules\n"); - } - return 1; /* cannot install, notify caller */ - } - - switch (cmd->o.opcode) { - case O_KEEP_STATE: /* bidir rule */ - add_dyn_rule(&args->fwa_id, O_KEEP_STATE, rule); - break; - - case O_LIMIT: /* limit number of sessions */ - { - u_int16_t limit_mask = cmd->limit_mask; - struct ip_flow_id id; - ipfw_dyn_rule *parent; - - DEB(printf("ipfw: installing dyn-limit rule %d\n", - cmd->conn_limit); ) - - id.dst_ip = id.src_ip = 0; - id.dst_port = id.src_port = 0; - id.proto = args->fwa_id.proto; - - if (limit_mask & DYN_SRC_ADDR) { - id.src_ip = args->fwa_id.src_ip; - } - if (limit_mask & DYN_DST_ADDR) { - id.dst_ip = args->fwa_id.dst_ip; - } - if (limit_mask & DYN_SRC_PORT) { - id.src_port = args->fwa_id.src_port; - } - if (limit_mask & DYN_DST_PORT) { - id.dst_port = args->fwa_id.dst_port; - } - parent = lookup_dyn_parent(&id, rule); - if (parent == NULL) { - printf("ipfw: add parent failed\n"); - return 1; - } - if (parent->count >= cmd->conn_limit) { - /* - * See if we can remove some expired rule. - */ - remove_dyn_rule(rule, parent); - if (parent->count >= cmd->conn_limit) { - if (fw_verbose && last_log != timenow.tv_sec) { - last_log = timenow.tv_sec; - dolog((LOG_AUTHPRIV | LOG_DEBUG, - "drop session, too many entries\n")); - } - return 1; - } - } - add_dyn_rule(&args->fwa_id, O_LIMIT, (struct ip_fw *)parent); - } - break; - default: - printf("ipfw: unknown dynamic rule type %u\n", cmd->o.opcode); - return 1; - } - lookup_dyn_rule(&args->fwa_id, NULL, NULL); /* XXX just set lifetime */ - return 0; -} - -/* - * Generate a TCP packet, containing either a RST or a keepalive. - * When flags & TH_RST, we are sending a RST packet, because of a - * "reset" action matched the packet. - * Otherwise we are sending a keepalive, and flags & TH_ - */ -static struct mbuf * -send_pkt(struct ip_flow_id *id, u_int32_t seq, u_int32_t ack, int flags) -{ - struct mbuf *m; - struct ip *ip; - struct tcphdr *tcp; - - MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ - if (m == 0) { - return NULL; - } - m->m_pkthdr.rcvif = (struct ifnet *)0; - m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr); - m->m_data += max_linkhdr; - - ip = mtod(m, struct ip *); - bzero(ip, m->m_len); - tcp = (struct tcphdr *)(ip + 1); /* no IP options */ - ip->ip_p = IPPROTO_TCP; - tcp->th_off = 5; - /* - * Assume we are sending a RST (or a keepalive in the reverse - * direction), swap src and destination addresses and ports. - */ - ip->ip_src.s_addr = htonl(id->dst_ip); - ip->ip_dst.s_addr = htonl(id->src_ip); - tcp->th_sport = htons(id->dst_port); - tcp->th_dport = htons(id->src_port); - if (flags & TH_RST) { /* we are sending a RST */ - if (flags & TH_ACK) { - tcp->th_seq = htonl(ack); - tcp->th_ack = htonl(0); - tcp->th_flags = TH_RST; - } else { - if (flags & TH_SYN) { - seq++; - } - tcp->th_seq = htonl(0); - tcp->th_ack = htonl(seq); - tcp->th_flags = TH_RST | TH_ACK; - } - } else { - /* - * We are sending a keepalive. flags & TH_SYN determines - * the direction, forward if set, reverse if clear. - * NOTE: seq and ack are always assumed to be correct - * as set by the caller. This may be confusing... - */ - if (flags & TH_SYN) { - /* - * we have to rewrite the correct addresses! - */ - ip->ip_dst.s_addr = htonl(id->dst_ip); - ip->ip_src.s_addr = htonl(id->src_ip); - tcp->th_dport = htons(id->dst_port); - tcp->th_sport = htons(id->src_port); - } - tcp->th_seq = htonl(seq); - tcp->th_ack = htonl(ack); - tcp->th_flags = TH_ACK; - } - /* - * set ip_len to the payload size so we can compute - * the tcp checksum on the pseudoheader - * XXX check this, could save a couple of words ? - */ - ip->ip_len = htons(sizeof(struct tcphdr)); - tcp->th_sum = in_cksum(m, m->m_pkthdr.len); - /* - * now fill fields left out earlier - */ - ip->ip_ttl = ip_defttl; - ip->ip_len = m->m_pkthdr.len; - m->m_flags |= M_SKIP_FIREWALL; - - return m; -} - -/* - * sends a reject message, consuming the mbuf passed as an argument. - */ -static void -send_reject(struct ip_fw_args *args, int code, int offset, __unused int ip_len) -{ - if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */ - /* We need the IP header in host order for icmp_error(). */ - if (args->fwa_eh != NULL) { - struct ip *ip = mtod(args->fwa_m, struct ip *); - ip->ip_len = ntohs(ip->ip_len); - ip->ip_off = ntohs(ip->ip_off); - } - args->fwa_m->m_flags |= M_SKIP_FIREWALL; - icmp_error(args->fwa_m, ICMP_UNREACH, code, 0L, 0); - } else if (offset == 0 && args->fwa_id.proto == IPPROTO_TCP) { - struct tcphdr *const tcp = - L3HDR(struct tcphdr, mtod(args->fwa_m, struct ip *)); - if ((tcp->th_flags & TH_RST) == 0) { - struct mbuf *m; - - m = send_pkt(&(args->fwa_id), ntohl(tcp->th_seq), - ntohl(tcp->th_ack), - tcp->th_flags | TH_RST); - if (m != NULL) { - struct route sro; /* fake route */ - - bzero(&sro, sizeof(sro)); - ip_output(m, NULL, &sro, 0, NULL, NULL); - ROUTE_RELEASE(&sro); - } - } - m_freem(args->fwa_m); - } else { - m_freem(args->fwa_m); - } - args->fwa_m = NULL; -} - -/** - * - * Given an ip_fw *, lookup_next_rule will return a pointer - * to the next rule, which can be either the jump - * target (for skipto instructions) or the next one in the list (in - * all other cases including a missing jump target). - * The result is also written in the "next_rule" field of the rule. - * Backward jumps are not allowed, so start looking from the next - * rule... - * - * This never returns NULL -- in case we do not have an exact match, - * the next rule is returned. When the ruleset is changed, - * pointers are flushed so we are always correct. - */ - -static struct ip_fw * -lookup_next_rule(struct ip_fw *me) -{ - struct ip_fw *rule = NULL; - ipfw_insn *cmd; - - /* look for action, in case it is a skipto */ - cmd = ACTION_PTR(me); - if (cmd->opcode == O_LOG) { - cmd += F_LEN(cmd); - } - if (cmd->opcode == O_SKIPTO) { - for (rule = me->next; rule; rule = rule->next) { - if (rule->rulenum >= cmd->arg1) { - break; - } - } - } - if (rule == NULL) { /* failure or not a skipto */ - rule = me->next; - } - me->next_rule = rule; - return rule; -} - -/* - * The main check routine for the firewall. - * - * All arguments are in args so we can modify them and return them - * back to the caller. - * - * Parameters: - * - * args->fwa_m (in/out) The packet; we set to NULL when/if we nuke it. - * Starts with the IP header. - * args->fwa_eh (in) Mac header if present, or NULL for layer3 packet. - * args->fwa_oif Outgoing interface, or NULL if packet is incoming. - * The incoming interface is in the mbuf. (in) - * args->fwa_divert_rule (in/out) - * Skip up to the first rule past this rule number; - * upon return, non-zero port number for divert or tee. - * - * args->fwa_ipfw_rule Pointer to the last matching rule (in/out) - * args->fwa_next_hop Socket we are forwarding to (out). - * args->fwa_id Addresses grabbed from the packet (out) - * - * Return value: - * - * IP_FW_PORT_DENY_FLAG the packet must be dropped. - * 0 The packet is to be accepted and routed normally OR - * the packet was denied/rejected and has been dropped; - * in the latter case, *m is equal to NULL upon return. - * port Divert the packet to port, with these caveats: - * - * - If IP_FW_PORT_TEE_FLAG is set, tee the packet instead - * of diverting it (ie, 'ipfw tee'). - * - * - If IP_FW_PORT_DYNT_FLAG is set, interpret the lower - * 16 bits as a dummynet pipe number instead of diverting - */ - -static int -ipfw_chk(struct ip_fw_args *args) -{ - /* - * Local variables hold state during the processing of a packet. - * - * IMPORTANT NOTE: to speed up the processing of rules, there - * are some assumption on the values of the variables, which - * are documented here. Should you change them, please check - * the implementation of the various instructions to make sure - * that they still work. - * - * args->fwa_eh The MAC header. It is non-null for a layer2 - * packet, it is NULL for a layer-3 packet. - * - * m | args->fwa_m Pointer to the mbuf, as received from the caller. - * It may change if ipfw_chk() does an m_pullup, or if it - * consumes the packet because it calls send_reject(). - * XXX This has to change, so that ipfw_chk() never modifies - * or consumes the buffer. - * ip is simply an alias of the value of m, and it is kept - * in sync with it (the packet is supposed to start with - * the ip header). - */ - struct mbuf *m = args->fwa_m; - struct ip *ip = mtod(m, struct ip *); - - /* - * oif | args->fwa_oif If NULL, ipfw_chk has been called on the - * inbound path (ether_input, bdg_forward, ip_input). - * If non-NULL, ipfw_chk has been called on the outbound path - * (ether_output, ip_output). - */ - struct ifnet *oif = args->fwa_oif; - - struct ip_fw *f = NULL; /* matching rule */ - int retval = 0; - - /* - * hlen The length of the IPv4 header. - * hlen >0 means we have an IPv4 packet. - */ - u_int hlen = 0; /* hlen >0 means we have an IP pkt */ - - /* - * offset The offset of a fragment. offset != 0 means that - * we have a fragment at this offset of an IPv4 packet. - * offset == 0 means that (if this is an IPv4 packet) - * this is the first or only fragment. - */ - u_short offset = 0; - - /* - * Local copies of addresses. They are only valid if we have - * an IP packet. - * - * proto The protocol. Set to 0 for non-ip packets, - * or to the protocol read from the packet otherwise. - * proto != 0 means that we have an IPv4 packet. - * - * src_port, dst_port port numbers, in HOST format. Only - * valid for TCP and UDP packets. - * - * src_ip, dst_ip ip addresses, in NETWORK format. - * Only valid for IPv4 packets. - */ - u_int8_t proto; - u_int16_t src_port = 0, dst_port = 0; /* NOTE: host format */ - struct in_addr src_ip = { .s_addr = 0 }, dst_ip = { .s_addr = 0 }; /* NOTE: network format */ - u_int16_t ip_len = 0; - int pktlen; - int dyn_dir = MATCH_UNKNOWN; - ipfw_dyn_rule *q = NULL; - struct timeval timenow; - - if (m->m_flags & M_SKIP_FIREWALL || fw_bypass) { - return 0; /* accept */ - } - - /* - * Clear packet chain if we find one here. - */ - - if (m->m_nextpkt != NULL) { - m_freem_list(m->m_nextpkt); - m->m_nextpkt = NULL; - } - - lck_mtx_lock(ipfw_mutex); - - getmicrotime(&timenow); - /* - * dyn_dir = MATCH_UNKNOWN when rules unchecked, - * MATCH_NONE when checked and not matched (q = NULL), - * MATCH_FORWARD or MATCH_REVERSE otherwise (q != NULL) - */ - - pktlen = m->m_pkthdr.len; - if (args->fwa_eh == NULL || /* layer 3 packet */ - (m->m_pkthdr.len >= sizeof(struct ip) && - ntohs(args->fwa_eh->ether_type) == ETHERTYPE_IP)) { - hlen = ip->ip_hl << 2; - } - - /* - * Collect parameters into local variables for faster matching. - */ - if (hlen == 0) { /* do not grab addresses for non-ip pkts */ - proto = args->fwa_id.proto = 0; /* mark f_id invalid */ - goto after_ip_checks; - } - - proto = args->fwa_id.proto = ip->ip_p; - src_ip = ip->ip_src; - dst_ip = ip->ip_dst; - if (args->fwa_eh != NULL) { /* layer 2 packets are as on the wire */ - offset = ntohs(ip->ip_off) & IP_OFFMASK; - ip_len = ntohs(ip->ip_len); - } else { - offset = ip->ip_off & IP_OFFMASK; - ip_len = ip->ip_len; - } - pktlen = ip_len < pktlen ? ip_len : pktlen; - -#define PULLUP_TO(len) \ - do { \ - if ((m)->m_len < (len)) { \ - args->fwa_m = m = m_pullup(m, (len)); \ - if (m == 0) \ - goto pullup_failed; \ - ip = mtod(m, struct ip *); \ - } \ - } while (0) - - if (offset == 0) { - switch (proto) { - case IPPROTO_TCP: - { - struct tcphdr *tcp; - - PULLUP_TO(hlen + sizeof(struct tcphdr)); - tcp = L3HDR(struct tcphdr, ip); - dst_port = tcp->th_dport; - src_port = tcp->th_sport; - args->fwa_id.flags = tcp->th_flags; - } - break; - - case IPPROTO_UDP: - { - struct udphdr *udp; - - PULLUP_TO(hlen + sizeof(struct udphdr)); - udp = L3HDR(struct udphdr, ip); - dst_port = udp->uh_dport; - src_port = udp->uh_sport; - } - break; - - case IPPROTO_ICMP: - PULLUP_TO(hlen + 4); /* type, code and checksum. */ - args->fwa_id.flags = L3HDR(struct icmp, ip)->icmp_type; - break; - - default: - break; - } -#undef PULLUP_TO - } - - args->fwa_id.src_ip = ntohl(src_ip.s_addr); - args->fwa_id.dst_ip = ntohl(dst_ip.s_addr); - args->fwa_id.src_port = src_port = ntohs(src_port); - args->fwa_id.dst_port = dst_port = ntohs(dst_port); - -after_ip_checks: - if (args->fwa_ipfw_rule) { - /* - * Packet has already been tagged. Look for the next rule - * to restart processing. - * - * If fw_one_pass != 0 then just accept it. - * XXX should not happen here, but optimized out in - * the caller. - */ - if (fw_one_pass) { - lck_mtx_unlock(ipfw_mutex); - return 0; - } - - f = args->fwa_ipfw_rule->next_rule; - if (f == NULL) { - f = lookup_next_rule(args->fwa_ipfw_rule); - } - } else { - /* - * Find the starting rule. It can be either the first - * one, or the one after divert_rule if asked so. - */ - int skipto = args->fwa_divert_rule; - - f = layer3_chain; - if (args->fwa_eh == NULL && skipto != 0) { - if (skipto >= IPFW_DEFAULT_RULE) { - lck_mtx_unlock(ipfw_mutex); - return IP_FW_PORT_DENY_FLAG; /* invalid */ - } - while (f && f->rulenum <= skipto) { - f = f->next; - } - if (f == NULL) { /* drop packet */ - lck_mtx_unlock(ipfw_mutex); - return IP_FW_PORT_DENY_FLAG; - } - } - } - args->fwa_divert_rule = 0; /* reset to avoid confusion later */ - - /* - * Now scan the rules, and parse microinstructions for each rule. - */ - for (; f; f = f->next) { - int l, cmdlen; - ipfw_insn *cmd; - int skip_or; /* skip rest of OR block */ - -again: - if (f->reserved_1 == IPFW_RULE_INACTIVE) { - continue; - } - - if (set_disable & (1 << f->set)) { - continue; - } - - skip_or = 0; - for (l = f->cmd_len, cmd = f->cmd; l > 0; - l -= cmdlen, cmd += cmdlen) { - int match; - - /* - * check_body is a jump target used when we find a - * CHECK_STATE, and need to jump to the body of - * the target rule. - */ - -check_body: - cmdlen = F_LEN(cmd); - /* - * An OR block (insn_1 || .. || insn_n) has the - * F_OR bit set in all but the last instruction. - * The first match will set "skip_or", and cause - * the following instructions to be skipped until - * past the one with the F_OR bit clear. - */ - if (skip_or) { /* skip this instruction */ - if ((cmd->len & F_OR) == 0) { - skip_or = 0; /* next one is good */ - } - continue; - } - match = 0; /* set to 1 if we succeed */ - - switch (cmd->opcode) { - /* - * The first set of opcodes compares the packet's - * fields with some pattern, setting 'match' if a - * match is found. At the end of the loop there is - * logic to deal with F_NOT and F_OR flags associated - * with the opcode. - */ - case O_NOP: - match = 1; - break; - - case O_FORWARD_MAC: - printf("ipfw: opcode %d unimplemented\n", - cmd->opcode); - break; - -#ifndef __APPLE__ - case O_GID: -#endif - case O_UID: - /* - * We only check offset == 0 && proto != 0, - * as this ensures that we have an IPv4 - * packet with the ports info. - */ - if (offset != 0) { - break; - } - - { - struct inpcbinfo *pi; - int wildcard; - struct inpcb *pcb; - - if (proto == IPPROTO_TCP) { - wildcard = 0; - pi = &tcbinfo; - } else if (proto == IPPROTO_UDP) { - wildcard = 1; - pi = &udbinfo; - } else { - break; - } - - pcb = (oif) ? - in_pcblookup_hash(pi, - dst_ip, htons(dst_port), - src_ip, htons(src_port), - wildcard, oif) : - in_pcblookup_hash(pi, - src_ip, htons(src_port), - dst_ip, htons(dst_port), - wildcard, NULL); - - if (pcb == NULL || pcb->inp_socket == NULL) { - break; - } -#if __FreeBSD_version < 500034 -#define socheckuid(a, b) (kauth_cred_getuid((a)->so_cred) != (b)) -#endif - if (cmd->opcode == O_UID) { - match = -#ifdef __APPLE__ - (kauth_cred_getuid(pcb->inp_socket->so_cred) == (uid_t)((ipfw_insn_u32 *)cmd)->d[0]); -#else - !socheckuid(pcb->inp_socket, - (uid_t)((ipfw_insn_u32 *)cmd)->d[0]); -#endif - } -#ifndef __APPLE__ - else { - match = 0; - kauth_cred_ismember_gid(pcb->inp_socket->so_cred, - (gid_t)((ipfw_insn_u32 *)cmd)->d[0], &match); - } -#endif - /* release reference on pcb */ - in_pcb_checkstate(pcb, WNT_RELEASE, 0); - } - - break; - - case O_RECV: - match = iface_match(m->m_pkthdr.rcvif, - (ipfw_insn_if *)cmd); - break; - - case O_XMIT: - match = iface_match(oif, (ipfw_insn_if *)cmd); - break; - - case O_VIA: - match = iface_match(oif ? oif : - m->m_pkthdr.rcvif, (ipfw_insn_if *)cmd); - break; - - case O_MACADDR2: - if (args->fwa_eh != NULL) { /* have MAC header */ - u_int32_t *want = (u_int32_t *) - ((ipfw_insn_mac *)cmd)->addr; - u_int32_t *mask = (u_int32_t *) - ((ipfw_insn_mac *)cmd)->mask; - u_int32_t *hdr = (u_int32_t *)args->fwa_eh; - - match = - (want[0] == (hdr[0] & mask[0]) && - want[1] == (hdr[1] & mask[1]) && - want[2] == (hdr[2] & mask[2])); - } - break; - - case O_MAC_TYPE: - if (args->fwa_eh != NULL) { - u_int16_t t = - ntohs(args->fwa_eh->ether_type); - u_int16_t *p = - ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = cmdlen - 1; !match && i > 0; - i--, p += 2) { - match = (t >= p[0] && t <= p[1]); - } - } - break; - - case O_FRAG: - match = (hlen > 0 && offset != 0); - break; - - case O_IN: /* "out" is "not in" */ - match = (oif == NULL); - break; - - case O_LAYER2: - match = (args->fwa_eh != NULL); - break; - - case O_PROTO: - /* - * We do not allow an arg of 0 so the - * check of "proto" only suffices. - */ - match = (proto == cmd->arg1); - break; - - case O_IP_SRC: - match = (hlen > 0 && - ((ipfw_insn_ip *)cmd)->addr.s_addr == - src_ip.s_addr); - break; - - case O_IP_SRC_MASK: - case O_IP_DST_MASK: - if (hlen > 0) { - uint32_t a = - (cmd->opcode == O_IP_DST_MASK) ? - dst_ip.s_addr : src_ip.s_addr; - uint32_t *p = ((ipfw_insn_u32 *)cmd)->d; - int i = cmdlen - 1; - - for (; !match && i > 0; i -= 2, p += 2) { - match = (p[0] == (a & p[1])); - } - } - break; - - case O_IP_SRC_ME: - if (hlen > 0) { - struct ifnet *tif; - - INADDR_TO_IFP(src_ip, tif); - match = (tif != NULL); - } - break; - - case O_IP_DST_SET: - case O_IP_SRC_SET: - if (hlen > 0) { - u_int32_t *d = (u_int32_t *)(cmd + 1); - u_int32_t addr = - cmd->opcode == O_IP_DST_SET ? - args->fwa_id.dst_ip : - args->fwa_id.src_ip; - - if (addr < d[0]) { - break; - } - addr -= d[0]; /* subtract base */ - match = (addr < cmd->arg1) && - (d[1 + (addr >> 5)] & - (1 << (addr & 0x1f))); - } - break; - - case O_IP_DST: - match = (hlen > 0 && - ((ipfw_insn_ip *)cmd)->addr.s_addr == - dst_ip.s_addr); - break; - - case O_IP_DST_ME: - if (hlen > 0) { - struct ifnet *tif; - - INADDR_TO_IFP(dst_ip, tif); - match = (tif != NULL); - } - break; - - case O_IP_SRCPORT: - case O_IP_DSTPORT: - /* - * offset == 0 && proto != 0 is enough - * to guarantee that we have an IPv4 - * packet with port info. - */ - if ((proto == IPPROTO_UDP || proto == IPPROTO_TCP) - && offset == 0) { - u_int16_t x = - (cmd->opcode == O_IP_SRCPORT) ? - src_port : dst_port; - u_int16_t *p = - ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = cmdlen - 1; !match && i > 0; - i--, p += 2) { - match = (x >= p[0] && x <= p[1]); - } - } - break; - - case O_ICMPTYPE: - match = (offset == 0 && proto == IPPROTO_ICMP && - icmptype_match(ip, (ipfw_insn_u32 *)cmd)); - break; - - case O_IPOPT: - match = (hlen > 0 && ipopts_match(ip, cmd)); - break; - - case O_IPVER: - match = (hlen > 0 && cmd->arg1 == ip->ip_v); - break; - - case O_IPID: - case O_IPLEN: - case O_IPTTL: - if (hlen > 0) { /* only for IP packets */ - uint16_t x; - uint16_t *p; - int i; - - if (cmd->opcode == O_IPLEN) { - x = ip_len; - } else if (cmd->opcode == O_IPTTL) { - x = ip->ip_ttl; - } else { /* must be IPID */ - x = ntohs(ip->ip_id); - } - if (cmdlen == 1) { - match = (cmd->arg1 == x); - break; - } - /* otherwise we have ranges */ - p = ((ipfw_insn_u16 *)cmd)->ports; - i = cmdlen - 1; - for (; !match && i > 0; i--, p += 2) { - match = (x >= p[0] && x <= p[1]); - } - } - break; - - case O_IPPRECEDENCE: - match = (hlen > 0 && - (cmd->arg1 == (ip->ip_tos & 0xe0))); - break; - - case O_IPTOS: - match = (hlen > 0 && - flags_match(cmd, ip->ip_tos)); - break; - - case O_TCPFLAGS: - match = (proto == IPPROTO_TCP && offset == 0 && - flags_match(cmd, - L3HDR(struct tcphdr, ip)->th_flags)); - break; - - case O_TCPOPTS: - match = (proto == IPPROTO_TCP && offset == 0 && - tcpopts_match(ip, cmd)); - break; - - case O_TCPSEQ: - match = (proto == IPPROTO_TCP && offset == 0 && - ((ipfw_insn_u32 *)cmd)->d[0] == - L3HDR(struct tcphdr, ip)->th_seq); - break; - - case O_TCPACK: - match = (proto == IPPROTO_TCP && offset == 0 && - ((ipfw_insn_u32 *)cmd)->d[0] == - L3HDR(struct tcphdr, ip)->th_ack); - break; - - case O_TCPWIN: - match = (proto == IPPROTO_TCP && offset == 0 && - cmd->arg1 == - L3HDR(struct tcphdr, ip)->th_win); - break; - - case O_ESTAB: - /* reject packets which have SYN only */ - /* XXX should i also check for TH_ACK ? */ - match = (proto == IPPROTO_TCP && offset == 0 && - (L3HDR(struct tcphdr, ip)->th_flags & - (TH_RST | TH_ACK | TH_SYN)) != TH_SYN); - break; - - case O_LOG: - if (fw_verbose) { - ipfw_log(f, hlen, args->fwa_eh, m, oif); - } - match = 1; - break; - - case O_PROB: - match = (random() < ((ipfw_insn_u32 *)cmd)->d[0]); - break; - - case O_VERREVPATH: - /* Outgoing packets automatically pass/match */ - match = ((oif != NULL) || - (m->m_pkthdr.rcvif == NULL) || - verify_rev_path(src_ip, m->m_pkthdr.rcvif)); - break; - - case O_IPSEC: -#ifdef FAST_IPSEC - match = (m_tag_find(m, - PACKET_TAG_IPSEC_IN_DONE, NULL) != NULL); -#endif -#ifdef IPSEC - match = (ipsec_gethist(m, NULL) != NULL); -#endif - /* otherwise no match */ - break; - - /* - * The second set of opcodes represents 'actions', - * i.e. the terminal part of a rule once the packet - * matches all previous patterns. - * Typically there is only one action for each rule, - * and the opcode is stored at the end of the rule - * (but there are exceptions -- see below). - * - * In general, here we set retval and terminate the - * outer loop (would be a 'break 3' in some language, - * but we need to do a 'goto done'). - * - * Exceptions: - * O_COUNT and O_SKIPTO actions: - * instead of terminating, we jump to the next rule - * ('goto next_rule', equivalent to a 'break 2'), - * or to the SKIPTO target ('goto again' after - * having set f, cmd and l), respectively. - * - * O_LIMIT and O_KEEP_STATE: these opcodes are - * not real 'actions', and are stored right - * before the 'action' part of the rule. - * These opcodes try to install an entry in the - * state tables; if successful, we continue with - * the next opcode (match=1; break;), otherwise - * the packet * must be dropped - * ('goto done' after setting retval); - * - * O_PROBE_STATE and O_CHECK_STATE: these opcodes - * cause a lookup of the state table, and a jump - * to the 'action' part of the parent rule - * ('goto check_body') if an entry is found, or - * (CHECK_STATE only) a jump to the next rule if - * the entry is not found ('goto next_rule'). - * The result of the lookup is cached to make - * further instances of these opcodes are - * effectively NOPs. - */ - case O_LIMIT: - case O_KEEP_STATE: - if (install_state(f, - (ipfw_insn_limit *)cmd, args)) { - retval = IP_FW_PORT_DENY_FLAG; - goto done; /* error/limit violation */ - } - match = 1; - break; - - case O_PROBE_STATE: - case O_CHECK_STATE: - /* - * dynamic rules are checked at the first - * keep-state or check-state occurrence, - * with the result being stored in dyn_dir. - * The compiler introduces a PROBE_STATE - * instruction for us when we have a - * KEEP_STATE (because PROBE_STATE needs - * to be run first). - */ - if (dyn_dir == MATCH_UNKNOWN && - (q = lookup_dyn_rule(&args->fwa_id, - &dyn_dir, proto == IPPROTO_TCP ? - L3HDR(struct tcphdr, ip) : NULL)) - != NULL) { - /* - * Found dynamic entry, update stats - * and jump to the 'action' part of - * the parent rule. - */ - q->pcnt++; - q->bcnt += pktlen; - f = q->rule; - cmd = ACTION_PTR(f); - l = f->cmd_len - f->act_ofs; - goto check_body; - } - /* - * Dynamic entry not found. If CHECK_STATE, - * skip to next rule, if PROBE_STATE just - * ignore and continue with next opcode. - */ - if (cmd->opcode == O_CHECK_STATE) { - goto next_rule; - } - match = 1; - break; - - case O_ACCEPT: - retval = 0; /* accept */ - goto done; - - case O_PIPE: - case O_QUEUE: - args->fwa_ipfw_rule = f; /* report matching rule */ - retval = cmd->arg1 | IP_FW_PORT_DYNT_FLAG; - goto done; - - case O_DIVERT: - case O_TEE: - if (args->fwa_eh) { /* not on layer 2 */ - break; - } - args->fwa_divert_rule = f->rulenum; - retval = (cmd->opcode == O_DIVERT) ? - cmd->arg1 : - cmd->arg1 | IP_FW_PORT_TEE_FLAG; - goto done; - - case O_COUNT: - case O_SKIPTO: - f->pcnt++; /* update stats */ - f->bcnt += pktlen; - f->timestamp = timenow.tv_sec; - if (cmd->opcode == O_COUNT) { - goto next_rule; - } - /* handle skipto */ - if (f->next_rule == NULL) { - lookup_next_rule(f); - } - f = f->next_rule; - goto again; - - case O_REJECT: - /* - * Drop the packet and send a reject notice - * if the packet is not ICMP (or is an ICMP - * query), and it is not multicast/broadcast. - */ - if (hlen > 0 && offset == 0 && - (proto != IPPROTO_ICMP || - is_icmp_query(ip)) && - !(m->m_flags & (M_BCAST | M_MCAST)) && - !IN_MULTICAST(dst_ip.s_addr)) { - send_reject(args, cmd->arg1, - offset, ip_len); - m = args->fwa_m; - } - /* FALLTHROUGH */ - case O_DENY: - retval = IP_FW_PORT_DENY_FLAG; - goto done; - - case O_FORWARD_IP: - if (args->fwa_eh) { /* not valid on layer2 pkts */ - break; - } - if (!q || dyn_dir == MATCH_FORWARD) { - args->fwa_next_hop = - &((ipfw_insn_sa *)cmd)->sa; - } - retval = 0; - goto done; - - default: - panic("-- unknown opcode %d\n", cmd->opcode); - } /* end of switch() on opcodes */ - - if (cmd->len & F_NOT) { - match = !match; - } - - if (match) { - if (cmd->len & F_OR) { - skip_or = 1; - } - } else { - if (!(cmd->len & F_OR)) { /* not an OR block, */ - break; /* try next rule */ - } - } - } /* end of inner for, scan opcodes */ - -next_rule: ; /* try next rule */ - } /* end of outer for, scan rules */ - printf("ipfw: ouch!, skip past end of rules, denying packet\n"); - lck_mtx_unlock(ipfw_mutex); - return IP_FW_PORT_DENY_FLAG; - -done: - /* Update statistics */ - f->pcnt++; - f->bcnt += pktlen; - f->timestamp = timenow.tv_sec; - lck_mtx_unlock(ipfw_mutex); - return retval; - -pullup_failed: - if (fw_verbose) { - printf("ipfw: pullup failed\n"); - } - lck_mtx_unlock(ipfw_mutex); - return IP_FW_PORT_DENY_FLAG; -} - -/* - * When a rule is added/deleted, clear the next_rule pointers in all rules. - * These will be reconstructed on the fly as packets are matched. - * Must be called at splimp(). - */ -static void -flush_rule_ptrs(void) -{ - struct ip_fw *rule; - - for (rule = layer3_chain; rule; rule = rule->next) { - rule->next_rule = NULL; - } -} - -/* - * When pipes/queues are deleted, clear the "pipe_ptr" pointer to a given - * pipe/queue, or to all of them (match == NULL). - * Must be called at splimp(). - */ -void -flush_pipe_ptrs(struct dn_flow_set *match) -{ - struct ip_fw *rule; - - for (rule = layer3_chain; rule; rule = rule->next) { - ipfw_insn_pipe *cmd = (ipfw_insn_pipe *)ACTION_PTR(rule); - - if (cmd->o.opcode != O_PIPE && cmd->o.opcode != O_QUEUE) { - continue; - } - /* - * XXX Use bcmp/bzero to handle pipe_ptr to overcome - * possible alignment problems on 64-bit architectures. - * This code is seldom used so we do not worry too - * much about efficiency. - */ - if (match == NULL || - !bcmp(&cmd->pipe_ptr, &match, sizeof(match))) { - bzero(&cmd->pipe_ptr, sizeof(cmd->pipe_ptr)); - } - } -} - -/* - * Add a new rule to the list. Copy the rule into a malloc'ed area, then - * possibly create a rule number and add the rule to the list. - * Update the rule_number in the input struct so the caller knows it as well. - */ -static int -add_rule(struct ip_fw **head, struct ip_fw *input_rule) -{ - struct ip_fw *rule, *f, *prev; - int l = RULESIZE(input_rule); - - if (*head == NULL && input_rule->rulenum != IPFW_DEFAULT_RULE) { - return EINVAL; - } - - rule = _MALLOC(l, M_IPFW, M_WAIT | M_ZERO); - if (rule == NULL) { - printf("ipfw2: add_rule MALLOC failed\n"); - return ENOSPC; - } - - bcopy(input_rule, rule, l); - - rule->next = NULL; - rule->next_rule = NULL; - - rule->pcnt = 0; - rule->bcnt = 0; - rule->timestamp = 0; - - if (*head == NULL) { /* default rule */ - *head = rule; - goto done; - } - - /* - * If rulenum is 0, find highest numbered rule before the - * default rule, and add autoinc_step - */ - if (autoinc_step < 1) { - autoinc_step = 1; - } else if (autoinc_step > 1000) { - autoinc_step = 1000; - } - if (rule->rulenum == 0) { - /* - * locate the highest numbered rule before default - */ - for (f = *head; f; f = f->next) { - if (f->rulenum == IPFW_DEFAULT_RULE) { - break; - } - rule->rulenum = f->rulenum; - } - if (rule->rulenum < IPFW_DEFAULT_RULE - autoinc_step) { - rule->rulenum += autoinc_step; - } - input_rule->rulenum = rule->rulenum; - } - - /* - * Now insert the new rule in the right place in the sorted list. - */ - for (prev = NULL, f = *head; f; prev = f, f = f->next) { - if (f->rulenum > rule->rulenum) { /* found the location */ - if (prev) { - rule->next = f; - prev->next = rule; - } else { /* head insert */ - rule->next = *head; - *head = rule; - } - break; - } - } - flush_rule_ptrs(); -done: - static_count++; - static_len += l; - static_len_32 += RULESIZE32(input_rule); - static_len_64 += RULESIZE64(input_rule); - DEB(printf("ipfw: installed rule %d, static count now %d\n", - rule->rulenum, static_count); ) - return 0; -} - -/** - * Free storage associated with a static rule (including derived - * dynamic rules). - * The caller is in charge of clearing rule pointers to avoid - * dangling pointers. - * @return a pointer to the next entry. - * Arguments are not checked, so they better be correct. - * Must be called at splimp(). - */ -static struct ip_fw * -delete_rule(struct ip_fw **head, struct ip_fw *prev, struct ip_fw *rule) -{ - struct ip_fw *n; - int l = RULESIZE(rule); - - n = rule->next; - remove_dyn_rule(rule, NULL /* force removal */); - if (prev == NULL) { - *head = n; - } else { - prev->next = n; - } - static_count--; - static_len -= l; - static_len_32 -= RULESIZE32(rule); - static_len_64 -= RULESIZE64(rule); - -#if DUMMYNET - if (DUMMYNET_LOADED) { - dn_ipfw_rule_delete(rule); - } -#endif /* DUMMYNET */ - _FREE(rule, M_IPFW); - return n; -} - -#if DEBUG_INACTIVE_RULES -static void -print_chain(struct ip_fw **chain) -{ - struct ip_fw *rule = *chain; - - for (; rule; rule = rule->next) { - ipfw_insn *cmd = ACTION_PTR(rule); - - printf("ipfw: rule->rulenum = %d\n", rule->rulenum); - - if (rule->reserved_1 == IPFW_RULE_INACTIVE) { - printf("ipfw: rule->reserved = IPFW_RULE_INACTIVE\n"); - } - - switch (cmd->opcode) { - case O_DENY: - printf("ipfw: ACTION: Deny\n"); - break; - - case O_REJECT: - if (cmd->arg1 == ICMP_REJECT_RST) { - printf("ipfw: ACTION: Reset\n"); - } else if (cmd->arg1 == ICMP_UNREACH_HOST) { - printf("ipfw: ACTION: Reject\n"); - } - break; - - case O_ACCEPT: - printf("ipfw: ACTION: Accept\n"); - break; - case O_COUNT: - printf("ipfw: ACTION: Count\n"); - break; - case O_DIVERT: - printf("ipfw: ACTION: Divert\n"); - break; - case O_TEE: - printf("ipfw: ACTION: Tee\n"); - break; - case O_SKIPTO: - printf("ipfw: ACTION: SkipTo\n"); - break; - case O_PIPE: - printf("ipfw: ACTION: Pipe\n"); - break; - case O_QUEUE: - printf("ipfw: ACTION: Queue\n"); - break; - case O_FORWARD_IP: - printf("ipfw: ACTION: Forward\n"); - break; - default: - printf("ipfw: invalid action! %d\n", cmd->opcode); - } - } -} -#endif /* DEBUG_INACTIVE_RULES */ - -static void -flush_inactive(void *param) -{ - struct ip_fw *inactive_rule = (struct ip_fw *)param; - struct ip_fw *rule, *prev; - - lck_mtx_lock(ipfw_mutex); - - for (rule = layer3_chain, prev = NULL; rule;) { - if (rule == inactive_rule && rule->reserved_1 == IPFW_RULE_INACTIVE) { - struct ip_fw *n = rule; - - if (prev == NULL) { - layer3_chain = rule->next; - } else { - prev->next = rule->next; - } - rule = rule->next; - _FREE(n, M_IPFW); - } else { - prev = rule; - rule = rule->next; - } - } - -#if DEBUG_INACTIVE_RULES - print_chain(&layer3_chain); -#endif - lck_mtx_unlock(ipfw_mutex); -} - -static void -mark_inactive(struct ip_fw **prev, struct ip_fw **rule) -{ - int l = RULESIZE(*rule); - - if ((*rule)->reserved_1 != IPFW_RULE_INACTIVE) { - (*rule)->reserved_1 = IPFW_RULE_INACTIVE; - static_count--; - static_len -= l; - static_len_32 -= RULESIZE32(*rule); - static_len_64 -= RULESIZE64(*rule); - - timeout(flush_inactive, *rule, 30 * hz); /* 30 sec. */ - } - - *prev = *rule; - *rule = (*rule)->next; -} - -/* - * Deletes all rules from a chain (except rules in set RESVD_SET - * unless kill_default = 1). - * Must be called at splimp(). - */ -static void -free_chain(struct ip_fw **chain, int kill_default) -{ - struct ip_fw *prev, *rule; - - flush_rule_ptrs(); /* more efficient to do outside the loop */ - for (prev = NULL, rule = *chain; rule;) { - if (kill_default || rule->set != RESVD_SET) { - ipfw_insn *cmd = ACTION_PTR(rule); - - /* skip over forwarding rules so struct isn't - * deleted while pointer is still in use elsewhere - */ - if (cmd->opcode == O_FORWARD_IP) { - mark_inactive(&prev, &rule); - } else { - rule = delete_rule(chain, prev, rule); - } - } else { - prev = rule; - rule = rule->next; - } - } -} - -/** - * Remove all rules with given number, and also do set manipulation. - * Assumes chain != NULL && *chain != NULL. - * - * The argument is an u_int32_t. The low 16 bit are the rule or set number, - * the next 8 bits are the new set, the top 8 bits are the command: - * - * 0 delete rules with given number - * 1 delete rules with given set number - * 2 move rules with given number to new set - * 3 move rules with given set number to new set - * 4 swap sets with given numbers - */ -static int -del_entry(struct ip_fw **chain, u_int32_t arg) -{ - struct ip_fw *prev = NULL, *rule = *chain; - u_int16_t rulenum; /* rule or old_set */ - u_int8_t cmd, new_set; - - rulenum = arg & 0xffff; - cmd = (arg >> 24) & 0xff; - new_set = (arg >> 16) & 0xff; - - if (cmd > 4) { - return EINVAL; - } - if (new_set > RESVD_SET) { - return EINVAL; - } - if (cmd == 0 || cmd == 2) { - if (rulenum >= IPFW_DEFAULT_RULE) { - return EINVAL; - } - } else { - if (rulenum > RESVD_SET) { /* old_set */ - return EINVAL; - } - } - - switch (cmd) { - case 0: /* delete rules with given number */ - /* - * locate first rule to delete - */ - for (; rule->rulenum < rulenum; prev = rule, rule = rule->next) { - ; - } - if (rule->rulenum != rulenum) { - return EINVAL; - } - - /* - * flush pointers outside the loop, then delete all matching - * rules. prev remains the same throughout the cycle. - */ - flush_rule_ptrs(); - while (rule->rulenum == rulenum) { - ipfw_insn *insn = ACTION_PTR(rule); - - /* keep forwarding rules around so struct isn't - * deleted while pointer is still in use elsewhere - */ - if (insn->opcode == O_FORWARD_IP) { - mark_inactive(&prev, &rule); - } else { - rule = delete_rule(chain, prev, rule); - } - } - break; - - case 1: /* delete all rules with given set number */ - flush_rule_ptrs(); - while (rule->rulenum < IPFW_DEFAULT_RULE) { - if (rule->set == rulenum) { - ipfw_insn *insn = ACTION_PTR(rule); - - /* keep forwarding rules around so struct isn't - * deleted while pointer is still in use elsewhere - */ - if (insn->opcode == O_FORWARD_IP) { - mark_inactive(&prev, &rule); - } else { - rule = delete_rule(chain, prev, rule); - } - } else { - prev = rule; - rule = rule->next; - } - } - break; - - case 2: /* move rules with given number to new set */ - for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next) { - if (rule->rulenum == rulenum) { - rule->set = new_set; - } - } - break; - - case 3: /* move rules with given set number to new set */ - for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next) { - if (rule->set == rulenum) { - rule->set = new_set; - } - } - break; - - case 4: /* swap two sets */ - for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next) { - if (rule->set == rulenum) { - rule->set = new_set; - } else if (rule->set == new_set) { - rule->set = rulenum; - } - } - break; - } - return 0; -} - -/* - * Clear counters for a specific rule. - */ -static void -clear_counters(struct ip_fw *rule, int log_only) -{ - ipfw_insn_log *l = (ipfw_insn_log *)ACTION_PTR(rule); - - if (log_only == 0) { - rule->bcnt = rule->pcnt = 0; - rule->timestamp = 0; - } - if (l->o.opcode == O_LOG) { - l->log_left = l->max_log; - } -} - -/** - * Reset some or all counters on firewall rules. - * @arg frwl is null to clear all entries, or contains a specific - * rule number. - * @arg log_only is 1 if we only want to reset logs, zero otherwise. - */ -static int -zero_entry(int rulenum, int log_only) -{ - struct ip_fw *rule; - const char *msg; - - if (rulenum == 0) { - norule_counter = 0; - for (rule = layer3_chain; rule; rule = rule->next) { - clear_counters(rule, log_only); - } - msg = log_only ? "ipfw: All logging counts reset.\n" : - "ipfw: Accounting cleared.\n"; - } else { - int cleared = 0; - /* - * We can have multiple rules with the same number, so we - * need to clear them all. - */ - for (rule = layer3_chain; rule; rule = rule->next) { - if (rule->rulenum == rulenum) { - while (rule && rule->rulenum == rulenum) { - clear_counters(rule, log_only); - rule = rule->next; - } - cleared = 1; - break; - } - } - if (!cleared) { /* we did not find any matching rules */ - return EINVAL; - } - msg = log_only ? "ipfw: Entry %d logging count reset.\n" : - "ipfw: Entry %d cleared.\n"; - } - if (fw_verbose) { - dolog((LOG_AUTHPRIV | LOG_NOTICE, msg, rulenum)); - } - return 0; -} - -/* - * Check validity of the structure before insert. - * Fortunately rules are simple, so this mostly need to check rule sizes. - */ -static int -check_ipfw_struct(struct ip_fw *rule, int size) -{ - int l, cmdlen = 0; - int have_action = 0; - ipfw_insn *cmd; - - if (size < sizeof(*rule)) { - printf("ipfw: rule too short\n"); - return EINVAL; - } - /* first, check for valid size */ - l = RULESIZE(rule); - if (l != size) { - printf("ipfw: size mismatch (have %d want %d)\n", size, l); - return EINVAL; - } - /* - * Now go for the individual checks. Very simple ones, basically only - * instruction sizes. - */ - for (l = rule->cmd_len, cmd = rule->cmd; - l > 0; l -= cmdlen, cmd += cmdlen) { - cmdlen = F_LEN(cmd); - if (cmdlen > l) { - printf("ipfw: opcode %d size truncated\n", - cmd->opcode); - return EINVAL; - } - DEB(printf("ipfw: opcode %d\n", cmd->opcode); ) - switch (cmd->opcode) { - case O_PROBE_STATE: - case O_KEEP_STATE: - case O_PROTO: - case O_IP_SRC_ME: - case O_IP_DST_ME: - case O_LAYER2: - case O_IN: - case O_FRAG: - case O_IPOPT: - case O_IPTOS: - case O_IPPRECEDENCE: - case O_IPVER: - case O_TCPWIN: - case O_TCPFLAGS: - case O_TCPOPTS: - case O_ESTAB: - case O_VERREVPATH: - case O_IPSEC: - if (cmdlen != F_INSN_SIZE(ipfw_insn)) { - goto bad_size; - } - break; - case O_UID: -#ifndef __APPLE__ - case O_GID: -#endif /* __APPLE__ */ - case O_IP_SRC: - case O_IP_DST: - case O_TCPSEQ: - case O_TCPACK: - case O_PROB: - case O_ICMPTYPE: - if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) { - goto bad_size; - } - break; - - case O_LIMIT: - if (cmdlen != F_INSN_SIZE(ipfw_insn_limit)) { - goto bad_size; - } - break; - - case O_LOG: - if (cmdlen != F_INSN_SIZE(ipfw_insn_log)) { - goto bad_size; - } - - /* enforce logging limit */ - if (fw_verbose && - ((ipfw_insn_log *)cmd)->max_log == 0 && verbose_limit != 0) { - ((ipfw_insn_log *)cmd)->max_log = verbose_limit; - } - - ((ipfw_insn_log *)cmd)->log_left = - ((ipfw_insn_log *)cmd)->max_log; - - break; - - case O_IP_SRC_MASK: - case O_IP_DST_MASK: - /* only odd command lengths */ - if (!(cmdlen & 1) || cmdlen > 31) { - goto bad_size; - } - break; - - case O_IP_SRC_SET: - case O_IP_DST_SET: - if (cmd->arg1 == 0 || cmd->arg1 > 256) { - printf("ipfw: invalid set size %d\n", - cmd->arg1); - return EINVAL; - } - if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + - (cmd->arg1 + 31) / 32) { - goto bad_size; - } - break; - - case O_MACADDR2: - if (cmdlen != F_INSN_SIZE(ipfw_insn_mac)) { - goto bad_size; - } - break; - - case O_NOP: - case O_IPID: - case O_IPTTL: - case O_IPLEN: - if (cmdlen < 1 || cmdlen > 31) { - goto bad_size; - } - break; - - case O_MAC_TYPE: - case O_IP_SRCPORT: - case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */ - if (cmdlen < 2 || cmdlen > 31) { - goto bad_size; - } - break; - - case O_RECV: - case O_XMIT: - case O_VIA: - if (cmdlen != F_INSN_SIZE(ipfw_insn_if)) { - goto bad_size; - } - break; - - case O_PIPE: - case O_QUEUE: - if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe)) { - goto bad_size; - } - goto check_action; - - case O_FORWARD_IP: - if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) { - goto bad_size; - } - goto check_action; - - case O_FORWARD_MAC: /* XXX not implemented yet */ - case O_CHECK_STATE: - case O_COUNT: - case O_ACCEPT: - case O_DENY: - case O_REJECT: - case O_SKIPTO: - case O_DIVERT: - case O_TEE: - if (cmdlen != F_INSN_SIZE(ipfw_insn)) { - goto bad_size; - } -check_action: - if (have_action) { - printf("ipfw: opcode %d, multiple actions" - " not allowed\n", - cmd->opcode); - return EINVAL; - } - have_action = 1; - if (l != cmdlen) { - printf("ipfw: opcode %d, action must be" - " last opcode\n", - cmd->opcode); - return EINVAL; - } - break; - default: - printf("ipfw: opcode %d, unknown opcode\n", - cmd->opcode); - return EINVAL; - } - } - if (have_action == 0) { - printf("ipfw: missing action\n"); - return EINVAL; - } - return 0; - -bad_size: - printf("ipfw: opcode %d size %d wrong\n", - cmd->opcode, cmdlen); - return EINVAL; -} - - -static void -ipfw_kev_post_msg(u_int32_t event_code) -{ - struct kev_msg ev_msg; - - bzero(&ev_msg, sizeof(struct kev_msg)); - - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_FIREWALL_CLASS; - ev_msg.kev_subclass = KEV_IPFW_SUBCLASS; - ev_msg.event_code = event_code; - - kev_post_msg(&ev_msg); -} - -/** - * {set|get}sockopt parser. - */ -static int -ipfw_ctl(struct sockopt *sopt) -{ -#define RULE_MAXSIZE (256*sizeof(u_int32_t)) - u_int32_t api_version; - int command; - int error; - size_t size; - size_t rulesize = RULE_MAXSIZE; - struct ip_fw *bp, *buf, *rule; - int is64user = 0; - - /* copy of orig sopt to send to ipfw_get_command_and_version() */ - struct sockopt tmp_sopt = *sopt; - struct timeval timenow; - - getmicrotime(&timenow); - - /* - * Disallow modifications in really-really secure mode, but still allow - * the logging counters to be reset. - */ - if (sopt->sopt_name == IP_FW_ADD || - (sopt->sopt_dir == SOPT_SET && sopt->sopt_name != IP_FW_RESETLOG)) { -#if __FreeBSD_version >= 500034 - error = securelevel_ge(sopt->sopt_td->td_ucred, 3); - if (error) { - return error; - } -#else /* FreeBSD 4.x */ - if (securelevel >= 3) { - return EPERM; - } -#endif - } - - /* first get the command and version, then do conversion as necessary */ - error = ipfw_get_command_and_version(&tmp_sopt, &command, &api_version); - if (error) { - /* error getting the version */ - return error; - } - - if (proc_is64bit(sopt->sopt_p)) { - is64user = 1; - } - - switch (command) { - case IP_FW_GET: - { - size_t dynrulesize; - /* - * pass up a copy of the current rules. Static rules - * come first (the last of which has number IPFW_DEFAULT_RULE), - * followed by a possibly empty list of dynamic rule. - * The last dynamic rule has NULL in the "next" field. - */ - lck_mtx_lock(ipfw_mutex); - - if (is64user) { - size = Get64static_len(); - dynrulesize = sizeof(ipfw_dyn_rule_64); - if (ipfw_dyn_v) { - size += (dyn_count * dynrulesize); - } - } else { - size = Get32static_len(); - dynrulesize = sizeof(ipfw_dyn_rule_32); - if (ipfw_dyn_v) { - size += (dyn_count * dynrulesize); - } - } - - /* - * XXX todo: if the user passes a short length just to know - * how much room is needed, do not bother filling up the - * buffer, just jump to the sooptcopyout. - */ - buf = _MALLOC(size, M_TEMP, M_WAITOK | M_ZERO); - if (buf == 0) { - lck_mtx_unlock(ipfw_mutex); - error = ENOBUFS; - break; - } - - bp = buf; - for (rule = layer3_chain; rule; rule = rule->next) { - if (rule->reserved_1 == IPFW_RULE_INACTIVE) { - continue; - } - - if (is64user) { - int rulesize_64; - - copyto64fw( rule, (struct ip_fw_64 *)bp, size); - bcopy(&set_disable, &(((struct ip_fw_64*)bp)->next_rule), sizeof(set_disable)); - /* do not use macro RULESIZE64 since we want RULESIZE for ip_fw_64 */ - rulesize_64 = sizeof(struct ip_fw_64) + ((struct ip_fw_64 *)(bp))->cmd_len * 4 - 4; - bp = (struct ip_fw *)((char *)bp + rulesize_64); - } else { - int rulesize_32; - - copyto32fw( rule, (struct ip_fw_32*)bp, size); - bcopy(&set_disable, &(((struct ip_fw_32*)bp)->next_rule), sizeof(set_disable)); - /* do not use macro RULESIZE32 since we want RULESIZE for ip_fw_32 */ - rulesize_32 = sizeof(struct ip_fw_32) + ((struct ip_fw_32 *)(bp))->cmd_len * 4 - 4; - bp = (struct ip_fw *)((char *)bp + rulesize_32); - } - } - if (ipfw_dyn_v) { - int i; - ipfw_dyn_rule *p; - char *dst, *last = NULL; - - dst = (char *)bp; - for (i = 0; i < curr_dyn_buckets; i++) { - for (p = ipfw_dyn_v[i]; p != NULL; - p = p->next, dst += dynrulesize) { - if (is64user) { - ipfw_dyn_rule_64 *ipfw_dyn_dst; - - ipfw_dyn_dst = (ipfw_dyn_rule_64 *)dst; - /* - * store a non-null value in "next". - * The userland code will interpret a - * NULL here as a marker - * for the last dynamic rule. - */ - ipfw_dyn_dst->next = CAST_DOWN_EXPLICIT(user64_addr_t, dst); - ipfw_dyn_dst->rule = p->rule->rulenum; - ipfw_dyn_dst->parent = CAST_DOWN(user64_addr_t, p->parent); - ipfw_dyn_dst->pcnt = p->pcnt; - ipfw_dyn_dst->bcnt = p->bcnt; - externalize_flow_id(&ipfw_dyn_dst->id, &p->id); - ipfw_dyn_dst->expire = - TIME_LEQ(p->expire, timenow.tv_sec) ? - 0 : p->expire - timenow.tv_sec; - ipfw_dyn_dst->bucket = p->bucket; - ipfw_dyn_dst->state = p->state; - ipfw_dyn_dst->ack_fwd = p->ack_fwd; - ipfw_dyn_dst->ack_rev = p->ack_rev; - ipfw_dyn_dst->dyn_type = p->dyn_type; - ipfw_dyn_dst->count = p->count; - last = (char*)ipfw_dyn_dst; - } else { - ipfw_dyn_rule_32 *ipfw_dyn_dst; - - ipfw_dyn_dst = (ipfw_dyn_rule_32 *)dst; - /* - * store a non-null value in "next". - * The userland code will interpret a - * NULL here as a marker - * for the last dynamic rule. - */ - ipfw_dyn_dst->next = CAST_DOWN_EXPLICIT(user32_addr_t, dst); - ipfw_dyn_dst->rule = p->rule->rulenum; - ipfw_dyn_dst->parent = CAST_DOWN_EXPLICIT(user32_addr_t, p->parent); - ipfw_dyn_dst->pcnt = p->pcnt; - ipfw_dyn_dst->bcnt = p->bcnt; - externalize_flow_id(&ipfw_dyn_dst->id, &p->id); - ipfw_dyn_dst->expire = - TIME_LEQ(p->expire, timenow.tv_sec) ? - 0 : p->expire - timenow.tv_sec; - ipfw_dyn_dst->bucket = p->bucket; - ipfw_dyn_dst->state = p->state; - ipfw_dyn_dst->ack_fwd = p->ack_fwd; - ipfw_dyn_dst->ack_rev = p->ack_rev; - ipfw_dyn_dst->dyn_type = p->dyn_type; - ipfw_dyn_dst->count = p->count; - last = (char*)ipfw_dyn_dst; - } - } - } - /* mark last dynamic rule */ - if (last != NULL) { - if (is64user) { - ((ipfw_dyn_rule_64 *)last)->next = 0; - } else { - ((ipfw_dyn_rule_32 *)last)->next = 0; - } - } - } - lck_mtx_unlock(ipfw_mutex); - - /* convert back if necessary and copyout */ - if (api_version == IP_FW_VERSION_0) { - int i, len = 0; - struct ip_old_fw *buf2, *rule_vers0; - - lck_mtx_lock(ipfw_mutex); - buf2 = _MALLOC(static_count * sizeof(struct ip_old_fw), M_TEMP, M_WAITOK | M_ZERO); - if (buf2 == 0) { - lck_mtx_unlock(ipfw_mutex); - error = ENOBUFS; - } - - if (!error) { - bp = buf; - rule_vers0 = buf2; - - for (i = 0; i < static_count; i++) { - /* static rules have different sizes */ - int j = RULESIZE(bp); - ipfw_convert_from_latest(bp, rule_vers0, api_version, is64user); - bp = (struct ip_fw *)((char *)bp + j); - len += sizeof(*rule_vers0); - rule_vers0++; - } - lck_mtx_unlock(ipfw_mutex); - error = sooptcopyout(sopt, buf2, len); - _FREE(buf2, M_TEMP); - } - } else if (api_version == IP_FW_VERSION_1) { - int i, len = 0, buf_size; - struct ip_fw_compat *buf2; - size_t ipfwcompsize; - size_t ipfwdyncompsize; - char *rule_vers1; - - lck_mtx_lock(ipfw_mutex); - if (is64user) { - ipfwcompsize = sizeof(struct ip_fw_compat_64); - ipfwdyncompsize = sizeof(struct ipfw_dyn_rule_compat_64); - } else { - ipfwcompsize = sizeof(struct ip_fw_compat_32); - ipfwdyncompsize = sizeof(struct ipfw_dyn_rule_compat_32); - } - - buf_size = static_count * ipfwcompsize + - dyn_count * ipfwdyncompsize; - - buf2 = _MALLOC(buf_size, M_TEMP, M_WAITOK | M_ZERO); - if (buf2 == 0) { - lck_mtx_unlock(ipfw_mutex); - error = ENOBUFS; - } - if (!error) { - bp = buf; - rule_vers1 = (char*)buf2; - - /* first do static rules */ - for (i = 0; i < static_count; i++) { - /* static rules have different sizes */ - if (is64user) { - int rulesize_64; - ipfw_convert_from_latest(bp, (void *)rule_vers1, api_version, is64user); - rulesize_64 = sizeof(struct ip_fw_64) + ((struct ip_fw_64 *)(bp))->cmd_len * 4 - 4; - bp = (struct ip_fw *)((char *)bp + rulesize_64); - } else { - int rulesize_32; - ipfw_convert_from_latest(bp, (void *)rule_vers1, api_version, is64user); - rulesize_32 = sizeof(struct ip_fw_32) + ((struct ip_fw_32 *)(bp))->cmd_len * 4 - 4; - bp = (struct ip_fw *)((char *)bp + rulesize_32); - } - len += ipfwcompsize; - rule_vers1 += ipfwcompsize; - } - /* now do dynamic rules */ - if (is64user) { - cp_dyn_to_comp_64((struct ipfw_dyn_rule_compat_64 *)rule_vers1, &len); - } else { - cp_dyn_to_comp_32((struct ipfw_dyn_rule_compat_32 *)rule_vers1, &len); - } - - lck_mtx_unlock(ipfw_mutex); - error = sooptcopyout(sopt, buf2, len); - _FREE(buf2, M_TEMP); - } - } else { - error = sooptcopyout(sopt, buf, size); - } - - _FREE(buf, M_TEMP); - break; - } - - case IP_FW_FLUSH: - /* - * Normally we cannot release the lock on each iteration. - * We could do it here only because we start from the head all - * the times so there is no risk of missing some entries. - * On the other hand, the risk is that we end up with - * a very inconsistent ruleset, so better keep the lock - * around the whole cycle. - * - * XXX this code can be improved by resetting the head of - * the list to point to the default rule, and then freeing - * the old list without the need for a lock. - */ - - lck_mtx_lock(ipfw_mutex); - free_chain(&layer3_chain, 0 /* keep default rule */); - fw_bypass = 1; -#if DEBUG_INACTIVE_RULES - print_chain(&layer3_chain); -#endif - lck_mtx_unlock(ipfw_mutex); - break; - - case IP_FW_ADD: - { - size_t savedsopt_valsize = 0; - rule = _MALLOC(RULE_MAXSIZE, M_TEMP, M_WAITOK | M_ZERO); - if (rule == 0) { - error = ENOBUFS; - break; - } - - if (api_version != IP_FW_CURRENT_API_VERSION) { - error = ipfw_convert_to_latest(sopt, rule, api_version, is64user); - } else { - savedsopt_valsize = sopt->sopt_valsize; /* it might get modified in sooptcopyin_fw */ - error = sooptcopyin_fw( sopt, rule, &rulesize); - } - - if (!error) { - if ((api_version == IP_FW_VERSION_0) || (api_version == IP_FW_VERSION_1)) { - /* the rule has already been checked so just - * adjust sopt_valsize to match what would be expected. - */ - sopt->sopt_valsize = RULESIZE(rule); - rulesize = RULESIZE(rule); - } - error = check_ipfw_struct(rule, rulesize); - if (!error) { - lck_mtx_lock(ipfw_mutex); - error = add_rule(&layer3_chain, rule); - if (!error && fw_bypass) { - fw_bypass = 0; - } - lck_mtx_unlock(ipfw_mutex); - - size = RULESIZE(rule); - if (!error && sopt->sopt_dir == SOPT_GET) { - /* convert back if necessary and copyout */ - if (api_version == IP_FW_VERSION_0) { - struct ip_old_fw rule_vers0 = {}; - - ipfw_convert_from_latest(rule, &rule_vers0, api_version, is64user); - sopt->sopt_valsize = sizeof(struct ip_old_fw); - - error = sooptcopyout(sopt, &rule_vers0, sizeof(struct ip_old_fw)); - } else if (api_version == IP_FW_VERSION_1) { - struct ip_fw_compat rule_vers1 = {}; - ipfw_convert_from_latest(rule, &rule_vers1, api_version, is64user); - sopt->sopt_valsize = sizeof(struct ip_fw_compat); - - error = sooptcopyout(sopt, &rule_vers1, sizeof(struct ip_fw_compat)); - } else { - char *userrule; - userrule = _MALLOC(savedsopt_valsize, M_TEMP, M_WAITOK | M_ZERO); - if (userrule == NULL) { - userrule = (char*)rule; - } - if (proc_is64bit(sopt->sopt_p)) { - copyto64fw( rule, (struct ip_fw_64*)userrule, savedsopt_valsize); - } else { - copyto32fw( rule, (struct ip_fw_32*)userrule, savedsopt_valsize); - } - error = sooptcopyout(sopt, userrule, savedsopt_valsize); - if (userrule) { - _FREE(userrule, M_TEMP); - } - } - } - } - } - - _FREE(rule, M_TEMP); - break; - } - case IP_FW_DEL: - { - /* - * IP_FW_DEL is used for deleting single rules or sets, - * and (ab)used to atomically manipulate sets. - * rule->rulenum != 0 indicates single rule delete - * rule->set_masks used to manipulate sets - * rule->set_masks[0] contains info on sets to be - * disabled, swapped, or moved - * rule->set_masks[1] contains sets to be enabled. - */ - - /* there is only a simple rule passed in - * (no cmds), so use a temp struct to copy - */ - struct ip_fw temp_rule; - u_int32_t arg; - u_int8_t cmd; - - bzero(&temp_rule, sizeof(struct ip_fw)); - if (api_version != IP_FW_CURRENT_API_VERSION) { - error = ipfw_convert_to_latest(sopt, &temp_rule, api_version, is64user); - } else { - error = sooptcopyin_fw(sopt, &temp_rule, 0 ); - } - - if (!error) { - /* set_masks is used to distinguish between deleting - * single rules or atomically manipulating sets - */ - lck_mtx_lock(ipfw_mutex); - - arg = temp_rule.set_masks[0]; - cmd = (arg >> 24) & 0xff; - - if (temp_rule.rulenum) { - /* single rule */ - error = del_entry(&layer3_chain, temp_rule.rulenum); -#if DEBUG_INACTIVE_RULES - print_chain(&layer3_chain); -#endif - } else if (cmd) { - /* set reassignment - see comment above del_entry() for details */ - error = del_entry(&layer3_chain, temp_rule.set_masks[0]); -#if DEBUG_INACTIVE_RULES - print_chain(&layer3_chain); -#endif - } else if (temp_rule.set_masks[0] != 0 || - temp_rule.set_masks[1] != 0) { - /* set enable/disable */ - set_disable = - (set_disable | temp_rule.set_masks[0]) & ~temp_rule.set_masks[1] & - ~(1 << RESVD_SET); /* set RESVD_SET always enabled */ - } - - if (!layer3_chain->next) { - fw_bypass = 1; - } - lck_mtx_unlock(ipfw_mutex); - } - break; - } - case IP_FW_ZERO: - case IP_FW_RESETLOG: /* using rule->rulenum */ - { - /* there is only a simple rule passed in - * (no cmds), so use a temp struct to copy - */ - struct ip_fw temp_rule; - - bzero(&temp_rule, sizeof(struct ip_fw)); - - if (api_version != IP_FW_CURRENT_API_VERSION) { - error = ipfw_convert_to_latest(sopt, &temp_rule, api_version, is64user); - } else { - if (sopt->sopt_val != 0) { - error = sooptcopyin_fw( sopt, &temp_rule, 0); - } - } - - if (!error) { - lck_mtx_lock(ipfw_mutex); - error = zero_entry(temp_rule.rulenum, sopt->sopt_name == IP_FW_RESETLOG); - lck_mtx_unlock(ipfw_mutex); - } - break; - } - default: - printf("ipfw: ipfw_ctl invalid option %d\n", sopt->sopt_name); - error = EINVAL; - } - - if (error != EINVAL) { - switch (command) { - case IP_FW_ADD: - case IP_OLD_FW_ADD: - ipfw_kev_post_msg(KEV_IPFW_ADD); - break; - case IP_OLD_FW_DEL: - case IP_FW_DEL: - ipfw_kev_post_msg(KEV_IPFW_DEL); - break; - case IP_FW_FLUSH: - case IP_OLD_FW_FLUSH: - ipfw_kev_post_msg(KEV_IPFW_FLUSH); - break; - - default: - break; - } - } - - return error; -} - -/** - * dummynet needs a reference to the default rule, because rules can be - * deleted while packets hold a reference to them. When this happens, - * dummynet changes the reference to the default rule (it could well be a - * NULL pointer, but this way we do not need to check for the special - * case, plus here he have info on the default behaviour). - */ -struct ip_fw *ip_fw_default_rule; - -/* - * This procedure is only used to handle keepalives. It is invoked - * every dyn_keepalive_period - */ -static void -ipfw_tick(__unused void * unused) -{ - struct mbuf *m0, *m, *mnext, **mtailp; - int i; - ipfw_dyn_rule *q; - struct timeval timenow; - static int stealth_cnt = 0; - - if (ipfw_stealth_stats_needs_flush) { - stealth_cnt++; - if (!(stealth_cnt % IPFW_STEALTH_TIMEOUT_FREQUENCY)) { - ipfw_stealth_flush_stats(); - } - } - - if (dyn_keepalive == 0 || ipfw_dyn_v == NULL || dyn_count == 0) { - goto done; - } - - getmicrotime(&timenow); - - /* - * We make a chain of packets to go out here -- not deferring - * until after we drop the ipfw lock would result - * in a lock order reversal with the normal packet input -> ipfw - * call stack. - */ - m0 = NULL; - mtailp = &m0; - - lck_mtx_lock(ipfw_mutex); - for (i = 0; i < curr_dyn_buckets; i++) { - for (q = ipfw_dyn_v[i]; q; q = q->next) { - if (q->dyn_type == O_LIMIT_PARENT) { - continue; - } - if (q->id.proto != IPPROTO_TCP) { - continue; - } - if ((q->state & BOTH_SYN) != BOTH_SYN) { - continue; - } - if (TIME_LEQ( timenow.tv_sec + dyn_keepalive_interval, - q->expire)) { - continue; /* too early */ - } - if (TIME_LEQ(q->expire, timenow.tv_sec)) { - continue; /* too late, rule expired */ - } - *mtailp = send_pkt(&(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN); - if (*mtailp != NULL) { - mtailp = &(*mtailp)->m_nextpkt; - } - - *mtailp = send_pkt(&(q->id), q->ack_fwd - 1, q->ack_rev, 0); - if (*mtailp != NULL) { - mtailp = &(*mtailp)->m_nextpkt; - } - } - } - lck_mtx_unlock(ipfw_mutex); - - for (m = mnext = m0; m != NULL; m = mnext) { - struct route sro; /* fake route */ - - mnext = m->m_nextpkt; - m->m_nextpkt = NULL; - bzero(&sro, sizeof(sro)); - ip_output(m, NULL, &sro, 0, NULL, NULL); - ROUTE_RELEASE(&sro); - } -done: - timeout_with_leeway(ipfw_tick, NULL, dyn_keepalive_period * hz, - DYN_KEEPALIVE_LEEWAY * hz); -} - -void -ipfw_init(void) -{ - struct ip_fw default_rule; - - /* setup locks */ - ipfw_mutex_grp_attr = lck_grp_attr_alloc_init(); - ipfw_mutex_grp = lck_grp_alloc_init("ipfw", ipfw_mutex_grp_attr); - ipfw_mutex_attr = lck_attr_alloc_init(); - lck_mtx_init(ipfw_mutex, ipfw_mutex_grp, ipfw_mutex_attr); - - layer3_chain = NULL; - - bzero(&default_rule, sizeof default_rule); - - default_rule.act_ofs = 0; - default_rule.rulenum = IPFW_DEFAULT_RULE; - default_rule.cmd_len = 1; - default_rule.set = RESVD_SET; - - default_rule.cmd[0].len = 1; - default_rule.cmd[0].opcode = -#ifdef IPFIREWALL_DEFAULT_TO_ACCEPT - (1) ? O_ACCEPT : -#endif - O_DENY; - - if (add_rule(&layer3_chain, &default_rule)) { - printf("ipfw2: add_rule failed adding default rule\n"); - printf("ipfw2 failed initialization!!\n"); - fw_enable = 0; - } else { - ip_fw_default_rule = layer3_chain; - - #ifdef IPFIREWALL_VERBOSE - fw_verbose = 1; - #endif - #ifdef IPFIREWALL_VERBOSE_LIMIT - verbose_limit = IPFIREWALL_VERBOSE_LIMIT; - #endif - if (fw_verbose) { - if (!verbose_limit) { - printf("ipfw2 verbose logging enabled: unlimited logging by default\n"); - } else { - printf("ipfw2 verbose logging enabled: limited to %d packets/entry by default\n", - verbose_limit); - } - } - } - - ip_fw_chk_ptr = ipfw_chk; - ip_fw_ctl_ptr = ipfw_ctl; - - ipfwstringlen = strlen( ipfwstring ); - - timeout(ipfw_tick, NULL, hz); -} - -#endif /* IPFW2 */ diff --git a/bsd/netinet/ip_fw2.h b/bsd/netinet/ip_fw2.h deleted file mode 100644 index 6137ac792..000000000 --- a/bsd/netinet/ip_fw2.h +++ /dev/null @@ -1,655 +0,0 @@ -/* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -/* - * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD: src/sys/netinet/ip_fw2.h,v 1.1.2.4 2003/07/17 06:03:39 luigi Exp $ - */ - -#ifndef _IPFW2_H -#define _IPFW2_H -#ifdef __APPLE_API_OBSOLETE - -/* - * Define IP Firewall event subclass, and associated events. - */ - -/*! - * @defined KEV_IPFW_SUBCLASS - * @discussion The kernel event subclass for IP Firewall. - */ -#define KEV_IPFW_SUBCLASS 1 - -/*! - * @defined KEV_IPFW_ADD - * @discussion The event code indicating a rule has been added. - */ -#define KEV_IPFW_ADD 1 - -/*! - * @defined KEV_IPFW_DEL - * @discussion The event code indicating a rule has been removed. - */ -#define KEV_IPFW_DEL 2 - -/*! - * @defined KEV_IPFW_FLUSH - * @discussion The event code indicating the rule set has been flushed. - */ -#define KEV_IPFW_FLUSH 3 - -/*! - * @defined KEV_IPFW_ENABLE - * @discussion The event code indicating the enable flag has been changed - */ -#define KEV_IPFW_ENABLE 4 - - - -/* - * The kernel representation of ipfw rules is made of a list of - * 'instructions' (for all practical purposes equivalent to BPF - * instructions), which specify which fields of the packet - * (or its metadata) should be analysed. - * - * Each instruction is stored in a structure which begins with - * "ipfw_insn", and can contain extra fields depending on the - * instruction type (listed below). - * Note that the code is written so that individual instructions - * have a size which is a multiple of 32 bits. This means that, if - * such structures contain pointers or other 64-bit entities, - * (there is just one instance now) they may end up unaligned on - * 64-bit architectures, so the must be handled with care. - * - * "enum ipfw_opcodes" are the opcodes supported. We can have up - * to 256 different opcodes. - */ - -enum ipfw_opcodes { /* arguments (4 byte each) */ - O_NOP, - - O_IP_SRC, /* u32 = IP */ - O_IP_SRC_MASK, /* ip = IP/mask */ - O_IP_SRC_ME, /* none */ - O_IP_SRC_SET, /* u32=base, arg1=len, bitmap */ - - O_IP_DST, /* u32 = IP */ - O_IP_DST_MASK, /* ip = IP/mask */ - O_IP_DST_ME, /* none */ - O_IP_DST_SET, /* u32=base, arg1=len, bitmap */ - - O_IP_SRCPORT, /* (n)port list:mask 4 byte ea */ - O_IP_DSTPORT, /* (n)port list:mask 4 byte ea */ - O_PROTO, /* arg1=protocol */ - - O_MACADDR2, /* 2 mac addr:mask */ - O_MAC_TYPE, /* same as srcport */ - - O_LAYER2, /* none */ - O_IN, /* none */ - O_FRAG, /* none */ - - O_RECV, /* none */ - O_XMIT, /* none */ - O_VIA, /* none */ - - O_IPOPT, /* arg1 = 2*u8 bitmap */ - O_IPLEN, /* arg1 = len */ - O_IPID, /* arg1 = id */ - - O_IPTOS, /* arg1 = id */ - O_IPPRECEDENCE, /* arg1 = precedence << 5 */ - O_IPTTL, /* arg1 = TTL */ - - O_IPVER, /* arg1 = version */ - O_UID, /* u32 = id */ - O_GID, /* u32 = id */ - O_ESTAB, /* none (tcp established) */ - O_TCPFLAGS, /* arg1 = 2*u8 bitmap */ - O_TCPWIN, /* arg1 = desired win */ - O_TCPSEQ, /* u32 = desired seq. */ - O_TCPACK, /* u32 = desired seq. */ - O_ICMPTYPE, /* u32 = icmp bitmap */ - O_TCPOPTS, /* arg1 = 2*u8 bitmap */ - - O_VERREVPATH, /* none */ - - O_PROBE_STATE, /* none */ - O_KEEP_STATE, /* none */ - O_LIMIT, /* ipfw_insn_limit */ - O_LIMIT_PARENT, /* dyn_type, not an opcode. */ - - /* - * These are really 'actions'. - */ - - O_LOG, /* ipfw_insn_log */ - O_PROB, /* u32 = match probability */ - - O_CHECK_STATE, /* none */ - O_ACCEPT, /* none */ - O_DENY, /* none */ - O_REJECT, /* arg1=icmp arg (same as deny) */ - O_COUNT, /* none */ - O_SKIPTO, /* arg1=next rule number */ - O_PIPE, /* arg1=pipe number */ - O_QUEUE, /* arg1=queue number */ - O_DIVERT, /* arg1=port number */ - O_TEE, /* arg1=port number */ - O_FORWARD_IP, /* fwd sockaddr */ - O_FORWARD_MAC, /* fwd mac */ - - /* - * More opcodes. - */ - O_IPSEC, /* has ipsec history */ - - O_LAST_OPCODE /* not an opcode! */ -}; - -/* - * Template for instructions. - * - * ipfw_insn is used for all instructions which require no operands, - * a single 16-bit value (arg1), or a couple of 8-bit values. - * - * For other instructions which require different/larger arguments - * we have derived structures, ipfw_insn_*. - * - * The size of the instruction (in 32-bit words) is in the low - * 6 bits of "len". The 2 remaining bits are used to implement - * NOT and OR on individual instructions. Given a type, you can - * compute the length to be put in "len" using F_INSN_SIZE(t) - * - * F_NOT negates the match result of the instruction. - * - * F_OR is used to build or blocks. By default, instructions - * are evaluated as part of a logical AND. An "or" block - * { X or Y or Z } contains F_OR set in all but the last - * instruction of the block. A match will cause the code - * to skip past the last instruction of the block. - * - * NOTA BENE: in a couple of places we assume that - * sizeof(ipfw_insn) == sizeof(u_int32_t) - * this needs to be fixed. - * - */ -typedef struct _ipfw_insn { /* template for instructions */ - enum ipfw_opcodes opcode:8; - u_int8_t len; /* numer of 32-byte words */ -#define F_NOT 0x80 -#define F_OR 0x40 -#define F_LEN_MASK 0x3f -#define F_LEN(cmd) ((cmd)->len & F_LEN_MASK) - - u_int16_t arg1; -} ipfw_insn; - -/* - * The F_INSN_SIZE(type) computes the size, in 4-byte words, of - * a given type. - */ -#define F_INSN_SIZE(t) ((sizeof (t))/sizeof(u_int32_t)) - -/* - * This is used to store an array of 16-bit entries (ports etc.) - */ -typedef struct _ipfw_insn_u16 { - ipfw_insn o; - u_int16_t ports[2]; /* there may be more */ -} ipfw_insn_u16; - -/* - * This is used to store an array of 32-bit entries - * (uid, single IPv4 addresses etc.) - */ -typedef struct _ipfw_insn_u32 { - ipfw_insn o; - u_int32_t d[1]; /* one or more */ -} ipfw_insn_u32; - -/* - * This is used to store IP addr-mask pairs. - */ -typedef struct _ipfw_insn_ip { - ipfw_insn o; - struct in_addr addr; - struct in_addr mask; -} ipfw_insn_ip; - -/* - * This is used to forward to a given address (ip). - */ -typedef struct _ipfw_insn_sa { - ipfw_insn o; - struct sockaddr_in sa; -} ipfw_insn_sa; - -/* - * This is used for MAC addr-mask pairs. - */ -typedef struct _ipfw_insn_mac { - ipfw_insn o; - u_char addr[12]; /* dst[6] + src[6] */ - u_char mask[12]; /* dst[6] + src[6] */ -} ipfw_insn_mac; - -/* - * This is used for interface match rules (recv xx, xmit xx). - */ -typedef struct _ipfw_insn_if { - ipfw_insn o; - union { - struct in_addr ip; - int32_t unit; - } p; - char name[IFNAMSIZ]; -} ipfw_insn_if; - -/* - * This is used for pipe and queue actions, which need to store - * a single pointer (which can have different size on different - * architectures. - * Note that, because of previous instructions, pipe_ptr might - * be unaligned in the overall structure, so it needs to be - * manipulated with care. - */ -typedef struct _ipfw_insn_pipe { - ipfw_insn o; - void *pipe_ptr; /* XXX */ -} ipfw_insn_pipe; - -/* - * This is used for limit rules. - */ -typedef struct _ipfw_insn_limit { - ipfw_insn o; - u_int8_t _pad; - u_int8_t limit_mask; /* combination of DYN_* below */ -#define DYN_SRC_ADDR 0x1 -#define DYN_SRC_PORT 0x2 -#define DYN_DST_ADDR 0x4 -#define DYN_DST_PORT 0x8 - - u_int16_t conn_limit; -} ipfw_insn_limit; - -/* - * This is used for log instructions. - */ -typedef struct _ipfw_insn_log { - ipfw_insn o; - u_int32_t max_log; /* how many do we log -- 0 = all */ - u_int32_t log_left; /* how many left to log */ -} ipfw_insn_log; - -/* Version of this API */ -#define IP_FW_VERSION_NONE 0 -#define IP_FW_VERSION_0 10 /* old ipfw */ -#define IP_FW_VERSION_1 20 /* ipfw in Jaguar/Panther */ -#define IP_FW_VERSION_2 30 /* ipfw2 */ -#define IP_FW_CURRENT_API_VERSION IP_FW_VERSION_2 - -/* - * Here we have the structure representing an ipfw rule. - * - * It starts with a general area (with link fields and counters) - * followed by an array of one or more instructions, which the code - * accesses as an array of 32-bit values. - * - * Given a rule pointer r: - * - * r->cmd is the start of the first instruction. - * ACTION_PTR(r) is the start of the first action (things to do - * once a rule matched). - * - * When assembling instruction, remember the following: - * - * + if a rule has a "keep-state" (or "limit") option, then the - * first instruction (at r->cmd) MUST BE an O_PROBE_STATE - * + if a rule has a "log" option, then the first action - * (at ACTION_PTR(r)) MUST be O_LOG - * - * NOTE: we use a simple linked list of rules because we never need - * to delete a rule without scanning the list. We do not use - * queue(3) macros for portability and readability. - */ - -struct ip_fw { - u_int32_t version; /* Version of this structure. MUST be set */ - /* by clients. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION. */ - void *context; /* Context that is usable by user processes to */ - /* identify this rule. */ - struct ip_fw *next; /* linked list of rules */ - struct ip_fw *next_rule; /* ptr to next [skipto] rule */ - /* 'next_rule' is used to pass up 'set_disable' status */ - - u_int16_t act_ofs; /* offset of action in 32-bit units */ - u_int16_t cmd_len; /* # of 32-bit words in cmd */ - u_int16_t rulenum; /* rule number */ - u_int8_t set; /* rule set (0..31) */ - u_int32_t set_masks[2]; /* masks for manipulating sets atomically */ -#define RESVD_SET 31 /* set for default and persistent rules */ - u_int8_t _pad; /* padding */ - - /* These fields are present in all rules. */ - u_int64_t pcnt; /* Packet counter */ - u_int64_t bcnt; /* Byte counter */ - u_int32_t timestamp; /* tv_sec of last match */ - - u_int32_t reserved_1; /* reserved - set to 0 */ - u_int32_t reserved_2; /* reserved - set to 0 */ - - ipfw_insn cmd[1]; /* storage for commands */ -}; - -#define ACTION_PTR(rule) \ - (ipfw_insn *)( (u_int32_t *)((rule)->cmd) + ((rule)->act_ofs) ) - -#define RULESIZE(rule) (sizeof(struct ip_fw) + \ - ((struct ip_fw *)(rule))->cmd_len * 4 - 4) - -/* - * This structure is used as a flow mask and a flow id for various - * parts of the code. - */ -struct ipfw_flow_id { - u_int32_t dst_ip; - u_int32_t src_ip; - u_int16_t dst_port; - u_int16_t src_port; - u_int8_t proto; - u_int8_t flags; /* protocol-specific flags */ -}; - -/* - * Dynamic ipfw rule. - */ -typedef struct _ipfw_dyn_rule ipfw_dyn_rule; - -#ifdef XNU_KERNEL_PRIVATE - -#include - -/* - * Note: - * The internal version of "struct _ipfw_dyn_rule" differs from - * its external version because the field "id" is of type - * "struct ip_flow_id" in the internal version. The type of the - * field "id" for the external version is "ipfw_dyn_rule for - * backwards compatibility reasons. - */ - -struct _ipfw_dyn_rule { - ipfw_dyn_rule *next; /* linked list of rules. */ - struct ip_fw *rule; /* pointer to rule */ - /* 'rule' is used to pass up the rule number (from the parent) */ - - ipfw_dyn_rule *parent; /* pointer to parent rule */ - u_int64_t pcnt; /* packet match counter */ - u_int64_t bcnt; /* byte match counter */ - struct ip_flow_id id; /* (masked) flow id */ - u_int32_t expire; /* expire time */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typically a - * combination of TCP flags) - */ - u_int32_t ack_fwd; /* most recent ACKs in forward */ - u_int32_t ack_rev; /* and reverse directions (used */ - /* to generate keepalives) */ - u_int16_t dyn_type; /* rule type */ - u_int16_t count; /* refcount */ -}; -#else /* XNU_KERNEL_PRIVATE */ -struct _ipfw_dyn_rule { - ipfw_dyn_rule *next; /* linked list of rules. */ - struct ip_fw *rule; /* pointer to rule */ - /* 'rule' is used to pass up the rule number (from the parent) */ - - ipfw_dyn_rule *parent; /* pointer to parent rule */ - u_int64_t pcnt; /* packet match counter */ - u_int64_t bcnt; /* byte match counter */ - struct ipfw_flow_id id; /* (masked) flow id */ - u_int32_t expire; /* expire time */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typically a - * combination of TCP flags) - */ - u_int32_t ack_fwd; /* most recent ACKs in forward */ - u_int32_t ack_rev; /* and reverse directions (used */ - /* to generate keepalives) */ - u_int16_t dyn_type; /* rule type */ - u_int16_t count; /* refcount */ -}; -#endif /* XNU_KERNEL_PRIVATE */ - -/* - * Definitions for IP option names. - */ -#define IP_FW_IPOPT_LSRR 0x01 -#define IP_FW_IPOPT_SSRR 0x02 -#define IP_FW_IPOPT_RR 0x04 -#define IP_FW_IPOPT_TS 0x08 - -/* - * Definitions for TCP option names. - */ -#define IP_FW_TCPOPT_MSS 0x01 -#define IP_FW_TCPOPT_WINDOW 0x02 -#define IP_FW_TCPOPT_SACK 0x04 -#define IP_FW_TCPOPT_TS 0x08 -#define IP_FW_TCPOPT_CC 0x10 - -#define ICMP_REJECT_RST 0x100 /* fake ICMP code (send a TCP RST) */ - -/* - * Main firewall chains definitions and global var's definitions. - */ -#ifdef BSD_KERNEL_PRIVATE - -#pragma pack(4) -struct ip_fw_32 { - u_int32_t version; /* Version of this structure. MUST be set */ - /* by clients. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION. */ - user32_addr_t context; /* Context that is usable by user processes to */ - /* identify this rule. */ - user32_addr_t next; /* linked list of rules */ - user32_addr_t next_rule;/* ptr to next [skipto] rule */ - /* 'next_rule' is used to pass up 'set_disable' status */ - - u_int16_t act_ofs; /* offset of action in 32-bit units */ - u_int16_t cmd_len; /* # of 32-bit words in cmd */ - u_int16_t rulenum; /* rule number */ - u_int8_t set; /* rule set (0..31) */ - u_int32_t set_masks[2]; /* masks for manipulating sets atomically */ -#define RESVD_SET 31 /* set for default and persistent rules */ - u_int8_t _pad; /* padding */ - - /* These fields are present in all rules. */ - u_int64_t pcnt; /* Packet counter */ - u_int64_t bcnt; /* Byte counter */ - u_int32_t timestamp; /* tv_sec of last match */ - - u_int32_t reserved_1; /* reserved - set to 0 */ - u_int32_t reserved_2; /* reserved - set to 0 */ - - ipfw_insn cmd[1]; /* storage for commands */ -}; - -#pragma pack() - -struct ip_fw_64 { - u_int32_t version; /* Version of this structure. MUST be set */ - /* by clients. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION. */ - __uint64_t context __attribute__((aligned(8))); /* Context that is usable by user processes to */ - /* identify this rule. */ - user64_addr_t next; /* linked list of rules */ - user64_addr_t next_rule; /* ptr to next [skipto] rule */ - /* 'next_rule' is used to pass up 'set_disable' status */ - - u_int16_t act_ofs; /* offset of action in 32-bit units */ - u_int16_t cmd_len; /* # of 32-bit words in cmd */ - u_int16_t rulenum; /* rule number */ - u_int8_t set; /* rule set (0..31) */ - u_int32_t set_masks[2]; /* masks for manipulating sets atomically */ -#define RESVD_SET 31 /* set for default and persistent rules */ - u_int8_t _pad; /* padding */ - - /* These fields are present in all rules. */ - u_int64_t pcnt __attribute__((aligned(8))); /* Packet counter */ - u_int64_t bcnt __attribute__((aligned(8))); /* Byte counter */ - u_int32_t timestamp; /* tv_sec of last match */ - - u_int32_t reserved_1; /* reserved - set to 0 */ - u_int32_t reserved_2; /* reserved - set to 0 */ - - ipfw_insn cmd[1]; /* storage for commands */ -}; - - -typedef struct _ipfw_dyn_rule_64 ipfw_dyn_rule_64; -typedef struct _ipfw_dyn_rule_32 ipfw_dyn_rule_32; - -#pragma pack(4) -struct _ipfw_dyn_rule_32 { - user32_addr_t next; /* linked list of rules. */ - user32_addr_t rule; /* pointer to rule */ - /* 'rule' is used to pass up the rule number (from the parent) */ - - user32_addr_t parent; /* pointer to parent rule */ - u_int64_t pcnt; /* packet match counter */ - u_int64_t bcnt; /* byte match counter */ - struct ipfw_flow_id id; /* (masked) flow id */ - u_int32_t expire; /* expire time */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typically a - * combination of TCP flags) - */ - u_int32_t ack_fwd; /* most recent ACKs in forward */ - u_int32_t ack_rev; /* and reverse directions (used */ - /* to generate keepalives) */ - u_int16_t dyn_type; /* rule type */ - u_int16_t count; /* refcount */ -}; - -#pragma pack() - -struct _ipfw_dyn_rule_64 { - user64_addr_t next; /* linked list of rules. */ - user64_addr_t rule; /* pointer to rule */ - /* 'rule' is used to pass up the rule number (from the parent) */ - - user64_addr_t parent; /* pointer to parent rule */ - u_int64_t pcnt; /* packet match counter */ - u_int64_t bcnt; /* byte match counter */ - struct ipfw_flow_id id; /* (masked) flow id */ - u_int32_t expire; /* expire time */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typically a - * combination of TCP flags) - */ - u_int32_t ack_fwd; /* most recent ACKs in forward */ - u_int32_t ack_rev; /* and reverse directions (used */ - /* to generate keepalives) */ - u_int16_t dyn_type; /* rule type */ - u_int16_t count; /* refcount */ -}; - - -typedef struct _ipfw_insn_pipe_64 { - ipfw_insn o; - user64_addr_t pipe_ptr; /* XXX */ -} ipfw_insn_pipe_64; - -typedef struct _ipfw_insn_pipe_32 { - ipfw_insn o; - user32_addr_t pipe_ptr; /* XXX */ -} ipfw_insn_pipe_32; - - -#define IPFW_DEFAULT_RULE 65535 - -#if IPFIREWALL - -#define IP_FW_PORT_DYNT_FLAG 0x10000 -#define IP_FW_PORT_TEE_FLAG 0x20000 -#define IP_FW_PORT_DENY_FLAG 0x40000 - -#ifdef PRIVATE -#include -#else -struct ip_fw_args; -#endif -/* - * Function definitions. - */ - -/* Firewall hooks */ -struct sockopt; -struct dn_flow_set; - -void flush_pipe_ptrs(struct dn_flow_set *match); /* used by dummynet */ -void ipfw_init(void); /* called from raw_ip.c: load_ipfw() */ - -typedef int ip_fw_chk_t (struct ip_fw_args *args); -typedef int ip_fw_ctl_t (struct sockopt *); -extern ip_fw_chk_t *ip_fw_chk_ptr; -extern ip_fw_ctl_t *ip_fw_ctl_ptr; -extern int fw_one_pass; -extern int fw_enable; -#define IPFW_LOADED (ip_fw_chk_ptr != NULL) -#endif /* IPFIREWALL */ -#endif /* BSD_KERNEL_PRIVATE */ - -#endif /* __APPLE_API_OBSOLETE */ -#endif /* _IPFW2_H */ diff --git a/bsd/netinet/ip_fw2_compat.c b/bsd/netinet/ip_fw2_compat.c deleted file mode 100644 index 7360f9668..000000000 --- a/bsd/netinet/ip_fw2_compat.c +++ /dev/null @@ -1,3325 +0,0 @@ -/* - * Copyright (c) 2004-2019 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/* IPFW2 Backward Compatibility */ - -/* Convert to and from IPFW2 structures. */ - -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "ip_fw2_compat.h" - -#define FW2_DEBUG_VERBOSE 0 - -/* - * _s_x is a structure that stores a string <-> token pairs, used in - * various places in the parser. Entries are stored in arrays, - * with an entry with s=NULL as terminator. - * The search routines are match_token() and match_value(). - * Often, an element with x=0 contains an error string. - * - */ -struct _s_x { - char const *s; - int x; -}; - -#define NO_VERSION_STR "IP_FW_VERSION_NONE" -#define VERSION_ZERO_STR "IP_FW_VERSION_0" -#define VERSION_ONE_STR "IP_FW_VERSION_1" -#define CURRENT_API_VERSION_STR "IP_FW_CURRENT_API_VERSION" - -#if FW2_DEBUG_VERBOSE - -static struct _s_x f_tcpflags[] = { - { .s = "syn", .x = TH_SYN }, - { .s = "fin", .x = TH_FIN }, - { .s = "ack", .x = TH_ACK }, - { .s = "psh", .x = TH_PUSH }, - { .s = "rst", .x = TH_RST }, - { .s = "urg", .x = TH_URG }, - { .s = "tcp flag", .x = 0 }, - { .s = NULL, .x = 0 } -}; - -static struct _s_x f_tcpopts[] = { - { .s = "mss", .x = IP_FW_TCPOPT_MSS }, - { .s = "maxseg", .x = IP_FW_TCPOPT_MSS }, - { .s = "window", .x = IP_FW_TCPOPT_WINDOW }, - { .s = "sack", .x = IP_FW_TCPOPT_SACK }, - { .s = "ts", .x = IP_FW_TCPOPT_TS }, - { .s = "timestamp", .x = IP_FW_TCPOPT_TS }, - { .s = "cc", .x = IP_FW_TCPOPT_CC }, - { .s = "tcp option", .x = 0 }, - { .s = NULL, .x = 0 } -}; - - -/* - * IP options span the range 0 to 255 so we need to remap them - * (though in fact only the low 5 bits are significant). - */ -static struct _s_x f_ipopts[] = { - { .s = "ssrr", .x = IP_FW_IPOPT_SSRR}, - { .s = "lsrr", .x = IP_FW_IPOPT_LSRR}, - { .s = "rr", .x = IP_FW_IPOPT_RR}, - { .s = "ts", .x = IP_FW_IPOPT_TS}, - { .s = "ip option", .x = 0 }, - { .s = NULL, .x = 0 } -}; - -static struct _s_x f_iptos[] = { - { .s = "lowdelay", .x = IPTOS_LOWDELAY}, - { .s = "throughput", .x = IPTOS_THROUGHPUT}, - { .s = "reliability", .x = IPTOS_RELIABILITY}, - { .s = "mincost", .x = IPTOS_MINCOST}, - { .s = "congestion", .x = IPTOS_CE}, - { .s = "ecntransport", .x = IPTOS_ECT}, - { .s = "ip tos option", .x = 0}, - { .s = NULL, .x = 0 } -}; - -static struct _s_x limit_masks[] = { - { .s = "all", .x = DYN_SRC_ADDR | DYN_SRC_PORT | DYN_DST_ADDR | DYN_DST_PORT}, - { .s = "src-addr", .x = DYN_SRC_ADDR}, - { .s = "src-port", .x = DYN_SRC_PORT}, - { .s = "dst-addr", .x = DYN_DST_ADDR}, - { .s = "dst-port", .x = DYN_DST_PORT}, - { .s = NULL, .x = 0} -}; - -#endif /* !FW2_DEBUG_VERBOSE */ - -#if 0 /* version #1 */ - -static void -ipfw_print_fw_flags(u_int flags) -{ - /* print action */ - switch (flags & IP_FW_F_COMMAND_COMPAT) { - case IP_FW_F_ACCEPT_COMPAT: - printf("IP_FW_F_ACCEPT_COMPAT\n"); - break; - case IP_FW_F_COUNT_COMPAT: - printf("IP_FW_F_COUNT_COMPAT\n"); - break; - case IP_FW_F_PIPE_COMPAT: - printf("IP_FW_F_PIPE_COMPAT\n"); - break; - case IP_FW_F_QUEUE_COMPAT: - printf("IP_FW_F_QUEUE_COMPAT\n"); - break; - case IP_FW_F_SKIPTO_COMPAT: - printf("IP_FW_F_SKIPTO_COMPAT\n"); - break; - case IP_FW_F_DIVERT_COMPAT: - printf("IP_FW_F_DIVERT_COMPAT\n"); - break; - case IP_FW_F_TEE_COMPAT: - printf("IP_FW_F_TEE_COMPAT\n"); - break; - case IP_FW_F_FWD_COMPAT: - printf("IP_FW_F_FWD_COMPAT\n"); - break; - case IP_FW_F_DENY_COMPAT: - printf("IP_FW_F_DENY_COMPAT\n"); - break; - case IP_FW_F_REJECT_COMPAT: - printf("IP_FW_F_REJECT_COMPAT\n"); - break; - case IP_FW_F_CHECK_S_COMPAT: - printf("IP_FW_F_CHECK_S_COMPAT\n"); - break; - default: - printf("No action given\n"); - break; - } - - /* print commands */ - if (flags & IP_FW_F_IN_COMPAT) { - printf("IP_FW_F_IN_COMPAT\n"); - } - if (flags & IP_FW_F_OUT_COMPAT) { - printf("IP_FW_F_OUT_COMPAT\n"); - } - if (flags & IP_FW_F_IIFACE_COMPAT) { - printf("IP_FW_F_IIFACE_COMPAT\n"); - } - if (flags & IP_FW_F_OIFACE_COMPAT) { - printf("IP_FW_F_OIFACE_COMPAT\n"); - } - if (flags & IP_FW_F_PRN_COMPAT) { - printf("IP_FW_F_PRN_COMPAT\n"); - } - if (flags & IP_FW_F_SRNG_COMPAT) { - printf("IP_FW_F_SRNG_COMPAT\n"); - } - if (flags & IP_FW_F_DRNG_COMPAT) { - printf("IP_FW_F_DRNG_COMPAT\n"); - } - if (flags & IP_FW_F_FRAG_COMPAT) { - printf("IP_FW_F_FRAG_COMPAT\n"); - } - if (flags & IP_FW_F_IIFNAME_COMPAT) { - printf("IP_FW_F_IIFNAME_COMPAT\n"); - } - if (flags & IP_FW_F_OIFNAME_COMPAT) { - printf("IP_FW_F_OIFNAME_COMPAT\n"); - } - if (flags & IP_FW_F_INVSRC_COMPAT) { - printf("IP_FW_F_INVSRC_COMPAT\n"); - } - if (flags & IP_FW_F_INVDST_COMPAT) { - printf("IP_FW_F_INVDST_COMPAT\n"); - } - if (flags & IP_FW_F_ICMPBIT_COMPAT) { - printf("IP_FW_F_ICMPBIT_COMPAT\n"); - } - if (flags & IP_FW_F_UID_COMPAT) { - printf("IP_FW_F_UID_COMPAT\n"); - } - if (flags & IP_FW_F_RND_MATCH_COMPAT) { - printf("IP_FW_F_RND_MATCH_COMPAT\n"); - } - if (flags & IP_FW_F_SMSK_COMPAT) { - printf("IP_FW_F_SMSK_COMPAT\n"); - } - if (flags & IP_FW_F_DMSK_COMPAT) { - printf("IP_FW_F_DMSK_COMPAT\n"); - } - if (flags & IP_FW_BRIDGED_COMPAT) { - printf("IP_FW_BRIDGED_COMPAT\n"); - } - if (flags & IP_FW_F_KEEP_S_COMPAT) { - printf("IP_FW_F_KEEP_S_COMPAT\n"); - } - if (flags & IP_FW_F_CHECK_S_COMPAT) { - printf("IP_FW_F_CHECK_S_COMPAT\n"); - } - if (flags & IP_FW_F_SME_COMPAT) { - printf("IP_FW_F_SME_COMPAT\n"); - } - if (flags & IP_FW_F_DME_COMPAT) { - printf("IP_FW_F_DME_COMPAT\n"); - } -} - -static void -print_fw_version(u_int32_t api_version) -{ - switch (api_version) { - case IP_FW_VERSION_0: - printf("Version: %s\n", VERSION_ZERO_STR); - break; - case IP_FW_VERSION_1: - printf("Version: %s\n", VERSION_ONE_STR); - break; - case IP_FW_CURRENT_API_VERSION: - printf("Version: %s\n", CURRENT_API_VERSION_STR); - break; - case IP_FW_VERSION_NONE: - printf("Version: %s\n", NO_VERSION_STR); - break; - default: - printf("Unrecognized version\n"); - break; - } -} - -static void -print_icmptypes(ipfw_insn_u32 *cmd) -{ - int i; - char sep = ' '; - - printf(" icmptypes"); - for (i = 0; i < 32; i++) { - if ((cmd->d[0] & (1 << (i))) == 0) { - continue; - } - printf("%c%d", sep, i); - sep = ','; - } -} - -/* - * print flags set/clear in the two bitmasks passed as parameters. - * There is a specialized check for f_tcpflags. - */ -static void -print_flags(char const *name, ipfw_insn *cmd, struct _s_x *list) -{ - char const *comma = ""; - int i; - uint8_t set = cmd->arg1 & 0xff; - uint8_t clear = (cmd->arg1 >> 8) & 0xff; - - if (list == f_tcpflags && set == TH_SYN && clear == TH_ACK) { - printf(" setup"); - return; - } - - printf(" %s ", name); - for (i = 0; list[i].x != 0; i++) { - if (set & list[i].x) { - set &= ~list[i].x; - printf("%s%s", comma, list[i].s); - comma = ","; - } - if (clear & list[i].x) { - clear &= ~list[i].x; - printf("%s!%s", comma, list[i].s); - comma = ","; - } - } -} - -static int -contigmask(uint8_t *p, int len) -{ - int i, n; - - for (i = 0; i < len; i++) { - if ((p[i / 8] & (1 << (7 - (i % 8)))) == 0) { /* first bit unset */ - break; - } - } - for (n = i + 1; n < len; n++) { - if ((p[n / 8] & (1 << (7 - (n % 8)))) != 0) { - return -1; /* mask not contiguous */ - } - } - return i; -} - -/* - * Print the ip address contained in a command. - */ -static void -print_ip(ipfw_insn_ip *cmd) -{ - int len = F_LEN((ipfw_insn *)cmd); - uint32_t *a = ((ipfw_insn_u32 *)cmd)->d; - char ipv4str[MAX_IPv4_STR_LEN]; - - printf("%s ", cmd->o.len & F_NOT ? " not": ""); - - if (cmd->o.opcode == O_IP_SRC_ME || cmd->o.opcode == O_IP_DST_ME) { - printf("me"); - return; - } - - /* - * len == 2 indicates a single IP, whereas lists of 1 or more - * addr/mask pairs have len = (2n+1). We convert len to n so we - * use that to count the number of entries. - */ - for (len = len / 2; len > 0; len--, a += 2) { - int mb = /* mask length */ - (cmd->o.opcode == O_IP_SRC || cmd->o.opcode == O_IP_DST) ? - 32 : contigmask((uint8_t *)&(a[1]), 32); - if (mb == 0) { /* any */ - printf("any"); - } else { /* numeric IP followed by some kind of mask */ - printf("%s", inet_ntop(AF_INET, &a[0], ipv4str, sizeof(ipv4str))); - if (mb < 0) { - printf(":%s", inet_ntop(AF_INET, &a[1], ipv4str, sizeof(ipv4str))); - } else if (mb < 32) { - printf("/%d", mb); - } - } - if (len > 1) { - printf(","); - } - } -} - -/* - * prints a MAC address/mask pair - */ -static void -print_mac(uint8_t *addr, uint8_t *mask) -{ - int l = contigmask(mask, 48); - - if (l == 0) { - printf(" any"); - } else { - printf(" %02x:%02x:%02x:%02x:%02x:%02x", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); - if (l == -1) { - printf("&%02x:%02x:%02x:%02x:%02x:%02x", - mask[0], mask[1], mask[2], - mask[3], mask[4], mask[5]); - } else if (l < 48) { - printf("/%d", l); - } - } -} - -#endif /* !version #1 */ - -#if FW2_DEBUG_VERBOSE -static void -ipfw_print_vers2_struct(struct ip_fw *vers2_rule) -{ - int l; - ipfw_insn *cmd; - ipfw_insn_log *logptr = NULL; - char ipv4str[MAX_IPv4_STR_LEN]; - - print_fw_version(vers2_rule->version); - - printf("act_ofs: %d\n", vers2_rule->act_ofs); - printf("cmd_len: %d\n", vers2_rule->cmd_len); - printf("rulenum: %d\n", vers2_rule->rulenum); - printf("set: %d\n", vers2_rule->set); - printf("pcnt: %llu\n", vers2_rule->pcnt); - printf("bcnt: %llu\n", vers2_rule->bcnt); - printf("timestamp: %d\n", vers2_rule->timestamp); - - /* - * first print actions - */ - for (l = vers2_rule->cmd_len - vers2_rule->act_ofs, cmd = ACTION_PTR(vers2_rule); - l > 0; l -= F_LEN(cmd), cmd += F_LEN(cmd)) { - switch (cmd->opcode) { - case O_CHECK_STATE: - printf("check-state"); - break; - - case O_ACCEPT: - printf("allow"); - break; - - case O_COUNT: - printf("count"); - break; - - case O_DENY: - printf("deny"); - break; - - case O_REJECT: - if (cmd->arg1 == ICMP_REJECT_RST) { - printf("reset"); - } else if (cmd->arg1 == ICMP_UNREACH_HOST) { - printf("reject"); - } else { - printf("unreach %u", cmd->arg1); - } - break; - - case O_SKIPTO: - printf("skipto %u", cmd->arg1); - break; - - case O_PIPE: - printf("pipe %u", cmd->arg1); - break; - - case O_QUEUE: - printf("queue %u", cmd->arg1); - break; - - case O_DIVERT: - printf("divert %u", cmd->arg1); - break; - - case O_TEE: - printf("tee %u", cmd->arg1); - break; - - case O_FORWARD_IP: - { - ipfw_insn_sa *s = (ipfw_insn_sa *)cmd; - - printf("fwd %s", - inet_ntop(AF_INET, &s->sa.sin_addr, ipv4str, - sizeof(ipv4str))); - if (s->sa.sin_port) { - printf(",%d", s->sa.sin_port); - } - break; - } - - case O_LOG: /* O_LOG is printed last */ - logptr = (ipfw_insn_log *)cmd; - break; - - default: - printf("** unrecognized action %d len %d", - cmd->opcode, cmd->len); - } - } - if (logptr) { - if (logptr->max_log > 0) { - printf(" log logamount %d", logptr->max_log); - } else { - printf(" log"); - } - } - - /* - * then print the body. - */ - for (l = vers2_rule->act_ofs, cmd = vers2_rule->cmd; - l > 0; l -= F_LEN(cmd), cmd += F_LEN(cmd)) { - /* useful alias */ - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; - - switch (cmd->opcode) { - case O_PROB: - break; /* done already */ - - case O_PROBE_STATE: - break; /* no need to print anything here */ - - case O_MACADDR2: - { - ipfw_insn_mac *m = (ipfw_insn_mac *)cmd; - - if (cmd->len & F_NOT) { - printf(" not"); - } - printf(" MAC"); - print_mac(m->addr, m->mask); - print_mac(m->addr + 6, m->mask + 6); - printf("\n"); - break; - } - case O_MAC_TYPE: - { - uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { - printf("0x%04x", p[0]); - if (p[0] != p[1]) { - printf("-"); - printf("0x%04x", p[1]); - } - printf(","); - } - break; - } - case O_IP_SRC: - case O_IP_SRC_MASK: - case O_IP_SRC_ME: - print_ip((ipfw_insn_ip *)cmd); - break; - - case O_IP_DST: - case O_IP_DST_MASK: - case O_IP_DST_ME: - print_ip((ipfw_insn_ip *)cmd); - break; - - case O_IP_DSTPORT: - case O_IP_SRCPORT: - { - uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { - printf("0x%04x", p[0]); - if (p[0] != p[1]) { - printf("-"); - printf("0x%04x", p[1]); - } - printf(","); - } - break; - } - case O_PROTO: - { - printf("O_PROTO"); - - if (cmd->len & F_NOT) { - printf(" not"); - } - - printf(" %u", cmd->arg1); - - break; - } - - default: /*options ... */ - { - if (cmd->len & F_NOT && cmd->opcode != O_IN) { - printf(" not"); - } - switch (cmd->opcode) { - case O_FRAG: - printf("O_FRAG"); - break; - - case O_IN: - printf(cmd->len & F_NOT ? " out" : " O_IN"); - break; - - case O_LAYER2: - printf(" O_LAYER2"); - break; - case O_XMIT: - case O_RECV: - case O_VIA: - { - char const *s; - ipfw_insn_if *cmdif = (ipfw_insn_if *)cmd; - - if (cmd->opcode == O_XMIT) { - s = "O_XMIT"; - } else if (cmd->opcode == O_RECV) { - s = "O_RECV"; - } else { /* if (cmd->opcode == O_VIA) */ - s = "O_VIA"; - } - if (cmdif->name[0] == '\0') { - printf(" %s %s", s, - inet_ntop(AF_INET, &cmdif->p.ip, ipv4str, - sizeof(ipv4str))); - } else if (cmdif->p.unit == -1) { - printf(" %s %s*", s, cmdif->name); - } else { - printf(" %s %s%d", s, cmdif->name, - cmdif->p.unit); - } - } - break; - - case O_IPID: - if (F_LEN(cmd) == 1) { - printf(" ipid %u", cmd->arg1 ); - } else { - uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { - printf("0x%04x", p[0]); - if (p[0] != p[1]) { - printf("-"); - printf("0x%04x", p[1]); - } - printf(","); - } - } - - break; - - case O_IPTTL: - if (F_LEN(cmd) == 1) { - printf(" ipttl %u", cmd->arg1 ); - } else { - uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { - printf("0x%04x", p[0]); - if (p[0] != p[1]) { - printf("-"); - printf("0x%04x", p[1]); - } - printf(","); - } - } - - break; - - case O_IPVER: - printf(" ipver %u", cmd->arg1 ); - break; - - case O_IPPRECEDENCE: - printf(" ipprecedence %u", (cmd->arg1) >> 5 ); - break; - - case O_IPLEN: - if (F_LEN(cmd) == 1) { - printf(" iplen %u", cmd->arg1 ); - } else { - uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { - printf("0x%04x", p[0]); - if (p[0] != p[1]) { - printf("-"); - printf("0x%04x", p[1]); - } - printf(","); - } - } - - break; - - case O_IPOPT: - print_flags("ipoptions", cmd, f_ipopts); - break; - - case O_IPTOS: - print_flags("iptos", cmd, f_iptos); - break; - - case O_ICMPTYPE: - print_icmptypes((ipfw_insn_u32 *)cmd); - break; - - case O_ESTAB: - printf(" established"); - break; - - case O_TCPFLAGS: - print_flags("tcpflags", cmd, f_tcpflags); - break; - - case O_TCPOPTS: - print_flags("tcpoptions", cmd, f_tcpopts); - break; - - case O_TCPWIN: - printf(" tcpwin %d", ntohs(cmd->arg1)); - break; - - case O_TCPACK: - printf(" tcpack %u", ntohl(cmd32->d[0])); - break; - - case O_TCPSEQ: - printf(" tcpseq %u", ntohl(cmd32->d[0])); - break; - - case O_UID: - printf(" uid %u", cmd32->d[0]); - break; - - case O_GID: - printf(" gid %u", cmd32->d[0]); - break; - - case O_VERREVPATH: - printf(" verrevpath"); - break; - - case O_IPSEC: - printf(" ipsec"); - break; - - case O_NOP: - break; - - case O_KEEP_STATE: - printf(" keep-state"); - break; - - case O_LIMIT: - { - struct _s_x *p = limit_masks; - ipfw_insn_limit *c = (ipfw_insn_limit *)cmd; - uint8_t x = c->limit_mask; - char const *comma = " "; - - printf(" limit"); - for (; p->x != 0; p++) { - if ((x & p->x) == p->x) { - x &= ~p->x; - printf("%s%s", comma, p->s); - comma = ","; - } - } - printf(" %d", c->conn_limit); - - break; - } - - default: - printf(" [opcode %d len %d]", - cmd->opcode, cmd->len); - } /* switch */ - } /* default */ - } /* switch */ - } /* for */ -} - -#endif /* !FW2_DEBUG_VERBOSE */ - - -/* - * helper function, updates the pointer to cmd with the length - * of the current command, and also cleans up the first word of - * the new command in case it has been clobbered before. - * from ipfw2.c - */ -static ipfw_insn * -next_cmd(ipfw_insn *cmd) -{ - cmd += F_LEN(cmd); - bzero(cmd, sizeof(*cmd)); - return cmd; -} - -/* - * A function to fill simple commands of size 1. - * Existing flags are preserved. - * from ipfw2.c - */ -static void -fill_cmd(ipfw_insn *cmd, enum ipfw_opcodes opcode, uint16_t arg) -{ - cmd->opcode = opcode; - cmd->len = ((cmd->len) & (F_NOT | F_OR)) | 1; - cmd->arg1 = arg; -} - - -static u_int32_t -fill_compat_tcpflags(u_int32_t flags) -{ - u_int32_t flags_compat = 0; - - if (flags & TH_FIN) { - flags_compat |= IP_FW_TCPF_FIN_COMPAT; - } - if (flags & TH_SYN) { - flags_compat |= IP_FW_TCPF_SYN_COMPAT; - } - if (flags & TH_RST) { - flags_compat |= IP_FW_TCPF_RST_COMPAT; - } - if (flags & TH_PUSH) { - flags_compat |= IP_FW_TCPF_PSH_COMPAT; - } - if (flags & TH_ACK) { - flags_compat |= IP_FW_TCPF_ACK_COMPAT; - } - if (flags & TH_URG) { - flags_compat |= IP_FW_TCPF_URG_COMPAT; - } - - return flags_compat; -} - - -/* ******************************************** -* *********** Convert from Latest ************ -* ********************************************/ - -/* - * Things we're actively ignoring: - * sets, sets of addresses, blocks (NOT, OR) - */ -static void -ipfw_map_from_cmds_32(struct ip_fw_32 *curr_rule, struct ip_fw_compat_32 *compat_rule) -{ - int l; - ipfw_insn *cmd; - - for (l = curr_rule->act_ofs, cmd = curr_rule->cmd; - l > 0; - l -= F_LEN(cmd), cmd += F_LEN(cmd)) { - /* useful alias */ - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; - - switch (cmd->opcode) { - case O_PROTO: - /* protocol */ - compat_rule->fw_prot = cmd->arg1; - break; - - case O_IP_SRC_ME: - compat_rule->fw_flg |= IP_FW_F_SME_COMPAT; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; - - case O_IP_SRC_MASK: - { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - compat_rule->fw_src = ip->addr; - compat_rule->fw_smsk = ip->mask; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; - } - - case O_IP_SRC: - /* one IP */ - /* source - - * for now we only deal with one address - * per rule and ignore sets of addresses - */ - compat_rule->fw_src.s_addr = cmd32->d[0]; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; - - case O_IP_SRCPORT: - { - /* source ports */ - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, j; - - /* copy list of ports */ - for (i = F_LEN(cmd) - 1, j = 0; i > 0; i--, j++, p += 2) { - if (p[0] != p[1]) { - /* this is a range */ - compat_rule->fw_flg |= IP_FW_F_SRNG_COMPAT; - compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; - compat_rule->fw_uar_compat.fw_pts[j] = p[1]; - } else { - compat_rule->fw_uar_compat.fw_pts[j] = p[0]; - } - } - IP_FW_SETNSRCP_COMPAT(compat_rule, j); - - break; - } - - case O_IP_DST_ME: - /* destination */ - compat_rule->fw_flg |= IP_FW_F_DME_COMPAT; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; - - case O_IP_DST_MASK: - { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - compat_rule->fw_dst = ip->addr; - compat_rule->fw_dmsk = ip->mask; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; - } - case O_IP_DST: - /* one IP */ - /* dest - - * for now we only deal with one address - * per rule, and ignore sets of addresses - */ - compat_rule->fw_dst.s_addr = cmd32->d[0]; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; - - case O_IP_DSTPORT: - { - /* dest. ports */ - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, - j = IP_FW_GETNSRCP_COMPAT(compat_rule); - - /* copy list of ports */ - for (i = F_LEN(cmd) - 1; i > 0; i--, j++, p += 2) { - if (p[0] != p[1]) { - /* this is a range */ - compat_rule->fw_flg |= IP_FW_F_DRNG_COMPAT; - compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; - compat_rule->fw_uar_compat.fw_pts[j] = p[1]; - } else { - compat_rule->fw_uar_compat.fw_pts[j] = p[0]; - } - } - IP_FW_SETNDSTP_COMPAT(compat_rule, (j - IP_FW_GETNSRCP_COMPAT(compat_rule))); - - break; - } - - case O_LOG: - { - ipfw_insn_log *c = (ipfw_insn_log *)cmd; - - compat_rule->fw_flg |= IP_FW_F_PRN_COMPAT; - compat_rule->fw_logamount = c->max_log; - break; - } - case O_UID: - compat_rule->fw_flg |= IP_FW_F_UID_COMPAT; - compat_rule->fw_uid = cmd32->d[0]; - break; - - case O_IN: - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_OUT_COMPAT; - } else { - compat_rule->fw_flg |= IP_FW_F_IN_COMPAT; - } - break; - - case O_KEEP_STATE: - compat_rule->fw_flg |= IP_FW_F_KEEP_S_COMPAT; - break; - - case O_LAYER2: - compat_rule->fw_flg |= IP_FW_BRIDGED_COMPAT; - break; - - case O_XMIT: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; - ifu.fu_via_ip.s_addr = 0; - } else if (ifcmd->p.ip.s_addr != 0) { - compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; - ifu.fu_via_ip = ifcmd->p.ip; - } else { - compat_rule->fw_flg |= IP_FW_F_OIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; - } - compat_rule->fw_out_if = ifu; - - break; - } - - case O_RECV: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; - ifu.fu_via_ip.s_addr = 0; - } else if (ifcmd->p.ip.s_addr != 0) { - compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; - ifu.fu_via_ip = ifcmd->p.ip; - } else { - compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; - } - compat_rule->fw_in_if = ifu; - - break; - } - - case O_VIA: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - ifu.fu_via_ip.s_addr = 0; - } else if (ifcmd->name[0] != '\0') { - compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; - } else { - ifu.fu_via_ip = ifcmd->p.ip; - } - compat_rule->fw_flg |= IF_FW_F_VIAHACK_COMPAT; - compat_rule->fw_out_if = compat_rule->fw_in_if = ifu; - - break; - } - - case O_FRAG: - compat_rule->fw_flg |= IP_FW_F_FRAG_COMPAT; - break; - - case O_IPOPT: - /* IP options */ - compat_rule->fw_ipopt = (cmd->arg1 & 0xff); - compat_rule->fw_ipnopt = ((cmd->arg1 >> 8) & 0xff); - break; - - case O_TCPFLAGS: - /* check for "setup" */ - if ((cmd->arg1 & 0xff) == TH_SYN && - ((cmd->arg1 >> 8) & 0xff) == TH_ACK) { - compat_rule->fw_tcpf = IP_FW_TCPF_SYN_COMPAT; - compat_rule->fw_tcpnf = IP_FW_TCPF_ACK_COMPAT; - } else { - compat_rule->fw_tcpf = fill_compat_tcpflags(cmd->arg1 & 0xff); - compat_rule->fw_tcpnf = fill_compat_tcpflags((cmd->arg1 >> 8) & 0xff); - } - break; - - case O_TCPOPTS: - /* TCP options */ - compat_rule->fw_tcpopt = (cmd->arg1 & 0xff); - compat_rule->fw_tcpnopt = ((cmd->arg1 >> 8) & 0xff); - break; - - case O_ESTAB: - compat_rule->fw_ipflg |= IP_FW_IF_TCPEST_COMPAT; - break; - - case O_ICMPTYPE: - { - /* ICMP */ - /* XXX: check this */ - int i, type; - - compat_rule->fw_flg |= IP_FW_F_ICMPBIT_COMPAT; - for (i = 0; i < sizeof(uint32_t); i++) { - type = cmd32->d[0] & i; - - compat_rule->fw_uar_compat.fw_icmptypes[type / (sizeof(unsigned) * 8)] |= - 1 << (type % (sizeof(unsigned) * 8)); - } - break; - } - default: - break; - } /* switch */ - } /* for */ -} - -static void -ipfw_map_from_cmds_64(struct ip_fw_64 *curr_rule, struct ip_fw_compat_64 *compat_rule) -{ - int l; - ipfw_insn *cmd; - for (l = curr_rule->act_ofs, cmd = curr_rule->cmd; - l > 0; - l -= F_LEN(cmd), cmd += F_LEN(cmd)) { - /* useful alias */ - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; - - switch (cmd->opcode) { - case O_PROTO: - /* protocol */ - compat_rule->fw_prot = cmd->arg1; - break; - - case O_IP_SRC_ME: - compat_rule->fw_flg |= IP_FW_F_SME_COMPAT; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; - - case O_IP_SRC_MASK: - { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - compat_rule->fw_src = ip->addr; - compat_rule->fw_smsk = ip->mask; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; - } - - case O_IP_SRC: - /* one IP */ - /* source - - * for now we only deal with one address - * per rule and ignore sets of addresses - */ - compat_rule->fw_src.s_addr = cmd32->d[0]; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; - - case O_IP_SRCPORT: - { - /* source ports */ - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, j; - - /* copy list of ports */ - for (i = F_LEN(cmd) - 1, j = 0; i > 0; i--, j++, p += 2) { - if (p[0] != p[1]) { - /* this is a range */ - compat_rule->fw_flg |= IP_FW_F_SRNG_COMPAT; - compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; - compat_rule->fw_uar_compat.fw_pts[j] = p[1]; - } else { - compat_rule->fw_uar_compat.fw_pts[j] = p[0]; - } - } - IP_FW_SETNSRCP_COMPAT(compat_rule, j); - - break; - } - - case O_IP_DST_ME: - /* destination */ - compat_rule->fw_flg |= IP_FW_F_DME_COMPAT; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; - - case O_IP_DST_MASK: - { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - compat_rule->fw_dst = ip->addr; - compat_rule->fw_dmsk = ip->mask; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; - } - case O_IP_DST: - /* one IP */ - /* dest - - * for now we only deal with one address - * per rule, and ignore sets of addresses - */ - compat_rule->fw_dst.s_addr = cmd32->d[0]; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; - - case O_IP_DSTPORT: - { - /* dest. ports */ - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, - j = IP_FW_GETNSRCP_COMPAT(compat_rule); - - /* copy list of ports */ - for (i = F_LEN(cmd) - 1; i > 0; i--, j++, p += 2) { - if (p[0] != p[1]) { - /* this is a range */ - compat_rule->fw_flg |= IP_FW_F_DRNG_COMPAT; - compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; - compat_rule->fw_uar_compat.fw_pts[j] = p[1]; - } else { - compat_rule->fw_uar_compat.fw_pts[j] = p[0]; - } - } - IP_FW_SETNDSTP_COMPAT(compat_rule, (j - IP_FW_GETNSRCP_COMPAT(compat_rule))); - - break; - } - - case O_LOG: - { - ipfw_insn_log *c = (ipfw_insn_log *)cmd; - - compat_rule->fw_flg |= IP_FW_F_PRN_COMPAT; - compat_rule->fw_logamount = c->max_log; - break; - } - case O_UID: - compat_rule->fw_flg |= IP_FW_F_UID_COMPAT; - compat_rule->fw_uid = cmd32->d[0]; - break; - - case O_IN: - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_OUT_COMPAT; - } else { - compat_rule->fw_flg |= IP_FW_F_IN_COMPAT; - } - break; - - case O_KEEP_STATE: - compat_rule->fw_flg |= IP_FW_F_KEEP_S_COMPAT; - break; - - case O_LAYER2: - compat_rule->fw_flg |= IP_FW_BRIDGED_COMPAT; - break; - - case O_XMIT: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; - ifu.fu_via_ip.s_addr = 0; - } else if (ifcmd->p.ip.s_addr != 0) { - compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; - ifu.fu_via_ip = ifcmd->p.ip; - } else { - compat_rule->fw_flg |= IP_FW_F_OIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; - } - compat_rule->fw_out_if = ifu; - - break; - } - - case O_RECV: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; - ifu.fu_via_ip.s_addr = 0; - } else if (ifcmd->p.ip.s_addr != 0) { - compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; - ifu.fu_via_ip = ifcmd->p.ip; - } else { - compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; - } - compat_rule->fw_in_if = ifu; - - break; - } - - case O_VIA: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - ifu.fu_via_ip.s_addr = 0; - } else if (ifcmd->name[0] != '\0') { - compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; - } else { - ifu.fu_via_ip = ifcmd->p.ip; - } - compat_rule->fw_flg |= IF_FW_F_VIAHACK_COMPAT; - compat_rule->fw_out_if = compat_rule->fw_in_if = ifu; - - break; - } - - case O_FRAG: - compat_rule->fw_flg |= IP_FW_F_FRAG_COMPAT; - break; - - case O_IPOPT: - /* IP options */ - compat_rule->fw_ipopt = (cmd->arg1 & 0xff); - compat_rule->fw_ipnopt = ((cmd->arg1 >> 8) & 0xff); - break; - - case O_TCPFLAGS: - /* check for "setup" */ - if ((cmd->arg1 & 0xff) == TH_SYN && - ((cmd->arg1 >> 8) & 0xff) == TH_ACK) { - compat_rule->fw_tcpf = IP_FW_TCPF_SYN_COMPAT; - compat_rule->fw_tcpnf = IP_FW_TCPF_ACK_COMPAT; - } else { - compat_rule->fw_tcpf = fill_compat_tcpflags(cmd->arg1 & 0xff); - compat_rule->fw_tcpnf = fill_compat_tcpflags((cmd->arg1 >> 8) & 0xff); - } - break; - - case O_TCPOPTS: - /* TCP options */ - compat_rule->fw_tcpopt = (cmd->arg1 & 0xff); - compat_rule->fw_tcpnopt = ((cmd->arg1 >> 8) & 0xff); - break; - - case O_ESTAB: - compat_rule->fw_ipflg |= IP_FW_IF_TCPEST_COMPAT; - break; - - case O_ICMPTYPE: - { - /* ICMP */ - /* XXX: check this */ - int i, type; - - compat_rule->fw_flg |= IP_FW_F_ICMPBIT_COMPAT; - for (i = 0; i < sizeof(uint32_t); i++) { - type = cmd32->d[0] & i; - - compat_rule->fw_uar_compat.fw_icmptypes[type / (sizeof(unsigned) * 8)] |= - 1 << (type % (sizeof(unsigned) * 8)); - } - break; - } - default: - break; - } /* switch */ - } /* for */ -} - -static void -ipfw_map_from_actions_32(struct ip_fw_32 *curr_rule, struct ip_fw_compat_32 *compat_rule) -{ - int l; - ipfw_insn *cmd; - - for (l = curr_rule->cmd_len - curr_rule->act_ofs, cmd = ACTION_PTR(curr_rule); - l > 0; - l -= F_LEN(cmd), cmd += F_LEN(cmd)) { - switch (cmd->opcode) { - case O_ACCEPT: - compat_rule->fw_flg |= IP_FW_F_ACCEPT_COMPAT; - break; - case O_COUNT: - compat_rule->fw_flg |= IP_FW_F_COUNT_COMPAT; - break; - case O_PIPE: - compat_rule->fw_flg |= IP_FW_F_PIPE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_QUEUE: - compat_rule->fw_flg |= IP_FW_F_QUEUE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_SKIPTO: - compat_rule->fw_flg |= IP_FW_F_SKIPTO_COMPAT; - compat_rule->fw_skipto_rule_compat = cmd->arg1; - break; - case O_DIVERT: - compat_rule->fw_flg |= IP_FW_F_DIVERT_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_TEE: - compat_rule->fw_flg |= IP_FW_F_TEE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_FORWARD_IP: - { - ipfw_insn_sa *p = (ipfw_insn_sa *)cmd; - - compat_rule->fw_flg |= IP_FW_F_FWD_COMPAT; - compat_rule->fw_fwd_ip_compat.sin_len = p->sa.sin_len; - compat_rule->fw_fwd_ip_compat.sin_family = p->sa.sin_family; - compat_rule->fw_fwd_ip_compat.sin_port = p->sa.sin_port; - compat_rule->fw_fwd_ip_compat.sin_addr = p->sa.sin_addr; - - break; - } - case O_DENY: - compat_rule->fw_flg |= IP_FW_F_DENY_COMPAT; - break; - case O_REJECT: - compat_rule->fw_flg |= IP_FW_F_REJECT_COMPAT; - compat_rule->fw_reject_code_compat = cmd->arg1; - break; - case O_CHECK_STATE: - compat_rule->fw_flg |= IP_FW_F_CHECK_S_COMPAT; - break; - default: - break; - } - } -} - -static void -ipfw_map_from_actions_64(struct ip_fw_64 *curr_rule, struct ip_fw_compat_64 *compat_rule) -{ - int l; - ipfw_insn *cmd; - for (l = curr_rule->cmd_len - curr_rule->act_ofs, cmd = ACTION_PTR(curr_rule); - l > 0; - l -= F_LEN(cmd), cmd += F_LEN(cmd)) { - switch (cmd->opcode) { - case O_ACCEPT: - compat_rule->fw_flg |= IP_FW_F_ACCEPT_COMPAT; - break; - case O_COUNT: - compat_rule->fw_flg |= IP_FW_F_COUNT_COMPAT; - break; - case O_PIPE: - compat_rule->fw_flg |= IP_FW_F_PIPE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_QUEUE: - compat_rule->fw_flg |= IP_FW_F_QUEUE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_SKIPTO: - compat_rule->fw_flg |= IP_FW_F_SKIPTO_COMPAT; - compat_rule->fw_skipto_rule_compat = cmd->arg1; - break; - case O_DIVERT: - compat_rule->fw_flg |= IP_FW_F_DIVERT_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_TEE: - compat_rule->fw_flg |= IP_FW_F_TEE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_FORWARD_IP: - { - ipfw_insn_sa *p = (ipfw_insn_sa *)cmd; - - compat_rule->fw_flg |= IP_FW_F_FWD_COMPAT; - compat_rule->fw_fwd_ip_compat.sin_len = p->sa.sin_len; - compat_rule->fw_fwd_ip_compat.sin_family = p->sa.sin_family; - compat_rule->fw_fwd_ip_compat.sin_port = p->sa.sin_port; - compat_rule->fw_fwd_ip_compat.sin_addr = p->sa.sin_addr; - - break; - } - case O_DENY: - compat_rule->fw_flg |= IP_FW_F_DENY_COMPAT; - break; - case O_REJECT: - compat_rule->fw_flg |= IP_FW_F_REJECT_COMPAT; - compat_rule->fw_reject_code_compat = cmd->arg1; - break; - case O_CHECK_STATE: - compat_rule->fw_flg |= IP_FW_F_CHECK_S_COMPAT; - break; - default: - break; - } - } -} - -static void -ipfw_version_latest_to_one_32(struct ip_fw_32 *curr_rule, struct ip_fw_compat_32 *rule_vers1) -{ - if (!rule_vers1) { - return; - } - - bzero(rule_vers1, sizeof(struct ip_fw_compat_32)); - - rule_vers1->version = IP_FW_VERSION_1; - rule_vers1->context = CAST_DOWN_EXPLICIT(user32_addr_t, curr_rule->context); - rule_vers1->fw_number = curr_rule->rulenum; - rule_vers1->fw_pcnt = curr_rule->pcnt; - rule_vers1->fw_bcnt = curr_rule->bcnt; - rule_vers1->timestamp = curr_rule->timestamp; - - /* convert actions */ - ipfw_map_from_actions_32(curr_rule, rule_vers1); - - /* convert commands */ - ipfw_map_from_cmds_32(curr_rule, rule_vers1); - -#if FW2_DEBUG_VERBOSE - ipfw_print_vers1_struct_32(rule_vers1); -#endif -} - -static void -ipfw_version_latest_to_one_64(struct ip_fw_64 *curr_rule, struct ip_fw_compat_64 *rule_vers1) -{ - if (!rule_vers1) { - return; - } - - bzero(rule_vers1, sizeof(struct ip_fw_compat_64)); - - rule_vers1->version = IP_FW_VERSION_1; - rule_vers1->context = CAST_DOWN_EXPLICIT(__uint64_t, curr_rule->context); - rule_vers1->fw_number = curr_rule->rulenum; - rule_vers1->fw_pcnt = curr_rule->pcnt; - rule_vers1->fw_bcnt = curr_rule->bcnt; - rule_vers1->timestamp = curr_rule->timestamp; - - /* convert actions */ - ipfw_map_from_actions_64(curr_rule, rule_vers1); - - /* convert commands */ - ipfw_map_from_cmds_64(curr_rule, rule_vers1); - -#if FW2_DEBUG_VERBOSE - ipfw_print_vers1_struct_64(rule_vers1); -#endif -} - -/* first convert to version one then to version zero */ -static void -ipfw_version_latest_to_zero(struct ip_fw *curr_rule, struct ip_old_fw *rule_vers0, int is64user) -{ - if (is64user) { - struct ip_fw_compat_64 rule_vers1; - ipfw_version_latest_to_one_64((struct ip_fw_64*)curr_rule, &rule_vers1); - bzero(rule_vers0, sizeof(struct ip_old_fw)); - bcopy(&rule_vers1.fw_uar_compat, &rule_vers0->fw_uar, sizeof(rule_vers1.fw_uar_compat)); - bcopy(&rule_vers1.fw_in_if, &rule_vers0->fw_in_if, sizeof(rule_vers1.fw_in_if)); - bcopy(&rule_vers1.fw_out_if, &rule_vers0->fw_out_if, sizeof(rule_vers1.fw_out_if)); - bcopy(&rule_vers1.fw_un_compat, &rule_vers0->fw_un, sizeof(rule_vers1.fw_un_compat)); - rule_vers0->fw_pcnt = rule_vers1.fw_pcnt; - rule_vers0->fw_bcnt = rule_vers1.fw_bcnt; - rule_vers0->fw_src = rule_vers1.fw_src; - rule_vers0->fw_dst = rule_vers1.fw_dst; - rule_vers0->fw_smsk = rule_vers1.fw_smsk; - rule_vers0->fw_dmsk = rule_vers1.fw_dmsk; - rule_vers0->fw_number = rule_vers1.fw_number; - rule_vers0->fw_flg = rule_vers1.fw_flg; - rule_vers0->fw_ipopt = rule_vers1.fw_ipopt; - rule_vers0->fw_ipnopt = rule_vers1.fw_ipnopt; - rule_vers0->fw_tcpf = rule_vers1.fw_tcpf; - rule_vers0->fw_tcpnf = rule_vers1.fw_tcpnf; - rule_vers0->timestamp = rule_vers1.timestamp; - rule_vers0->fw_prot = rule_vers1.fw_prot; - rule_vers0->fw_nports = rule_vers1.fw_nports; - rule_vers0->pipe_ptr = CAST_DOWN_EXPLICIT(void*, rule_vers1.pipe_ptr); - rule_vers0->next_rule_ptr = CAST_DOWN_EXPLICIT(void*, rule_vers1.next_rule_ptr); - - if (rule_vers1.fw_ipflg & IP_FW_IF_TCPEST_COMPAT) { - rule_vers0->fw_tcpf |= IP_OLD_FW_TCPF_ESTAB; - } - } else { - struct ip_fw_compat_32 rule_vers1; - ipfw_version_latest_to_one_32((struct ip_fw_32*)curr_rule, &rule_vers1); - bzero(rule_vers0, sizeof(struct ip_old_fw)); - bcopy(&rule_vers1.fw_uar_compat, &rule_vers0->fw_uar, sizeof(rule_vers1.fw_uar_compat)); - bcopy(&rule_vers1.fw_in_if, &rule_vers0->fw_in_if, sizeof(rule_vers1.fw_in_if)); - bcopy(&rule_vers1.fw_out_if, &rule_vers0->fw_out_if, sizeof(rule_vers1.fw_out_if)); - bcopy(&rule_vers1.fw_un_compat, &rule_vers0->fw_un, sizeof(rule_vers1.fw_un_compat)); - rule_vers0->fw_pcnt = rule_vers1.fw_pcnt; - rule_vers0->fw_bcnt = rule_vers1.fw_bcnt; - rule_vers0->fw_src = rule_vers1.fw_src; - rule_vers0->fw_dst = rule_vers1.fw_dst; - rule_vers0->fw_smsk = rule_vers1.fw_smsk; - rule_vers0->fw_dmsk = rule_vers1.fw_dmsk; - rule_vers0->fw_number = rule_vers1.fw_number; - rule_vers0->fw_flg = rule_vers1.fw_flg; - rule_vers0->fw_ipopt = rule_vers1.fw_ipopt; - rule_vers0->fw_ipnopt = rule_vers1.fw_ipnopt; - rule_vers0->fw_tcpf = rule_vers1.fw_tcpf; - rule_vers0->fw_tcpnf = rule_vers1.fw_tcpnf; - rule_vers0->timestamp = rule_vers1.timestamp; - rule_vers0->fw_prot = rule_vers1.fw_prot; - rule_vers0->fw_nports = rule_vers1.fw_nports; - rule_vers0->pipe_ptr = CAST_DOWN_EXPLICIT(void*, rule_vers1.pipe_ptr); - rule_vers0->next_rule_ptr = CAST_DOWN_EXPLICIT(void*, rule_vers1.next_rule_ptr); - - if (rule_vers1.fw_ipflg & IP_FW_IF_TCPEST_COMPAT) { - rule_vers0->fw_tcpf |= IP_OLD_FW_TCPF_ESTAB; - } - } -} - -void -ipfw_convert_from_latest(struct ip_fw *curr_rule, void *old_rule, u_int32_t api_version, int is64user) -{ - switch (api_version) { - case IP_FW_VERSION_0: - { - struct ip_old_fw *rule_vers0 = old_rule; - - ipfw_version_latest_to_zero(curr_rule, rule_vers0, is64user); - break; - } - case IP_FW_VERSION_1: - { - if (is64user) { - ipfw_version_latest_to_one_64((struct ip_fw_64*)curr_rule, (struct ip_fw_compat_64 *)old_rule); - } else { - ipfw_version_latest_to_one_32((struct ip_fw_32*)curr_rule, (struct ip_fw_compat_32 *)old_rule); - } - - break; - } - case IP_FW_CURRENT_API_VERSION: - /* ipfw2 for now, don't need to do anything */ - break; - - default: - /* unknown version */ - break; - } -} - - -/* ******************************************** -* *********** Convert to Latest ************** -* ********************************************/ - -/* from ip_fw.c */ -static int -ipfw_check_vers1_struct_32(struct ip_fw_compat_32 *frwl) -{ - /* Check for invalid flag bits */ - if ((frwl->fw_flg & ~IP_FW_F_MASK_COMPAT) != 0) { - /* - * printf(("%s undefined flag bits set (flags=%x)\n", - * err_prefix, frwl->fw_flg)); - */ - return EINVAL; - } - if (frwl->fw_flg == IP_FW_F_CHECK_S_COMPAT) { - /* check-state */ - return 0; - } - /* Must apply to incoming or outgoing (or both) */ - if (!(frwl->fw_flg & (IP_FW_F_IN_COMPAT | IP_FW_F_OUT_COMPAT))) { - /* - * printf(("%s neither in nor out\n", err_prefix)); - */ - return EINVAL; - } - /* Empty interface name is no good */ - if (((frwl->fw_flg & IP_FW_F_IIFNAME_COMPAT) - && !*frwl->fw_in_if.fu_via_if_compat.name) - || ((frwl->fw_flg & IP_FW_F_OIFNAME_COMPAT) - && !*frwl->fw_out_if.fu_via_if_compat.name)) { - /* - * printf(("%s empty interface name\n", err_prefix)); - */ - return EINVAL; - } - /* Sanity check interface matching */ - if ((frwl->fw_flg & IF_FW_F_VIAHACK_COMPAT) == IF_FW_F_VIAHACK_COMPAT) { - ; /* allow "via" backwards compatibility */ - } else if ((frwl->fw_flg & IP_FW_F_IN_COMPAT) - && (frwl->fw_flg & IP_FW_F_OIFACE_COMPAT)) { - /* - * printf(("%s outgoing interface check on incoming\n", - * err_prefix)); - */ - return EINVAL; - } - /* Sanity check port ranges */ - if ((frwl->fw_flg & IP_FW_F_SRNG_COMPAT) && IP_FW_GETNSRCP_COMPAT(frwl) < 2) { - /* - * printf(("%s src range set but n_src_p=%d\n", - * err_prefix, IP_FW_GETNSRCP_COMPAT(frwl))); - */ - return EINVAL; - } - if ((frwl->fw_flg & IP_FW_F_DRNG_COMPAT) && IP_FW_GETNDSTP_COMPAT(frwl) < 2) { - /* - * printf(("%s dst range set but n_dst_p=%d\n", - * err_prefix, IP_FW_GETNDSTP_COMPAT(frwl))); - */ - return EINVAL; - } - if (IP_FW_GETNSRCP_COMPAT(frwl) + IP_FW_GETNDSTP_COMPAT(frwl) > IP_FW_MAX_PORTS_COMPAT) { - /* - * printf(("%s too many ports (%d+%d)\n", - * err_prefix, IP_FW_GETNSRCP_COMPAT(frwl), IP_FW_GETNDSTP_COMPAT(frwl))); - */ - return EINVAL; - } - /* - * Protocols other than TCP/UDP don't use port range - */ - if ((frwl->fw_prot != IPPROTO_TCP) && - (frwl->fw_prot != IPPROTO_UDP) && - (IP_FW_GETNSRCP_COMPAT(frwl) || IP_FW_GETNDSTP_COMPAT(frwl))) { - /* - * printf(("%s port(s) specified for non TCP/UDP rule\n", - * err_prefix)); - */ - return EINVAL; - } - - /* - * Rather than modify the entry to make such entries work, - * we reject this rule and require user level utilities - * to enforce whatever policy they deem appropriate. - */ - if ((frwl->fw_src.s_addr & (~frwl->fw_smsk.s_addr)) || - (frwl->fw_dst.s_addr & (~frwl->fw_dmsk.s_addr))) { - /* - * printf(("%s rule never matches\n", err_prefix)); - */ - return EINVAL; - } - - if ((frwl->fw_flg & IP_FW_F_FRAG_COMPAT) && - (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { - if (frwl->fw_nports) { - /* - * printf(("%s cannot mix 'frag' and ports\n", err_prefix)); - */ - return EINVAL; - } - if (frwl->fw_prot == IPPROTO_TCP && - frwl->fw_tcpf != frwl->fw_tcpnf) { - /* - * printf(("%s cannot mix 'frag' and TCP flags\n", err_prefix)); - */ - return EINVAL; - } - } - - /* Check command specific stuff */ - switch (frwl->fw_flg & IP_FW_F_COMMAND_COMPAT) { - case IP_FW_F_REJECT_COMPAT: - if (frwl->fw_reject_code_compat >= 0x100 - && !(frwl->fw_prot == IPPROTO_TCP - && frwl->fw_reject_code_compat == IP_FW_REJECT_RST_COMPAT)) { - /* - * printf(("%s unknown reject code\n", err_prefix)); - */ - return EINVAL; - } - break; - case IP_FW_F_DIVERT_COMPAT: /* Diverting to port zero is invalid */ - case IP_FW_F_TEE_COMPAT: - case IP_FW_F_PIPE_COMPAT: /* piping through 0 is invalid */ - case IP_FW_F_QUEUE_COMPAT: /* piping through 0 is invalid */ - if (frwl->fw_divert_port_compat == 0) { - /* - * printf(("%s can't divert to port 0\n", err_prefix)); - */ - return EINVAL; - } - break; - case IP_FW_F_DENY_COMPAT: - case IP_FW_F_ACCEPT_COMPAT: - case IP_FW_F_COUNT_COMPAT: - case IP_FW_F_SKIPTO_COMPAT: - case IP_FW_F_FWD_COMPAT: - case IP_FW_F_UID_COMPAT: - break; - default: - /* - * printf(("%s invalid command\n", err_prefix)); - */ - return EINVAL; - } - - return 0; -} - -static int -ipfw_check_vers1_struct_64(struct ip_fw_compat_64 *frwl) -{ - /* Check for invalid flag bits */ - if ((frwl->fw_flg & ~IP_FW_F_MASK_COMPAT) != 0) { - /* - * printf(("%s undefined flag bits set (flags=%x)\n", - * err_prefix, frwl->fw_flg)); - */ - - return EINVAL; - } - if (frwl->fw_flg == IP_FW_F_CHECK_S_COMPAT) { - /* check-state */ - return 0; - } - /* Must apply to incoming or outgoing (or both) */ - if (!(frwl->fw_flg & (IP_FW_F_IN_COMPAT | IP_FW_F_OUT_COMPAT))) { - /* - * printf(("%s neither in nor out\n", err_prefix)); - */ - - return EINVAL; - } - /* Empty interface name is no good */ - if (((frwl->fw_flg & IP_FW_F_IIFNAME_COMPAT) - && !*frwl->fw_in_if.fu_via_if_compat.name) - || ((frwl->fw_flg & IP_FW_F_OIFNAME_COMPAT) - && !*frwl->fw_out_if.fu_via_if_compat.name)) { - /* - * printf(("%s empty interface name\n", err_prefix)); - */ - - return EINVAL; - } - /* Sanity check interface matching */ - if ((frwl->fw_flg & IF_FW_F_VIAHACK_COMPAT) == IF_FW_F_VIAHACK_COMPAT) { - ; /* allow "via" backwards compatibility */ - } else if ((frwl->fw_flg & IP_FW_F_IN_COMPAT) - && (frwl->fw_flg & IP_FW_F_OIFACE_COMPAT)) { - /* - * printf(("%s outgoing interface check on incoming\n", - * err_prefix)); - */ - - return EINVAL; - } - /* Sanity check port ranges */ - if ((frwl->fw_flg & IP_FW_F_SRNG_COMPAT) && IP_FW_GETNSRCP_COMPAT(frwl) < 2) { - /* - * printf(("%s src range set but n_src_p=%d\n", - * err_prefix, IP_FW_GETNSRCP_COMPAT(frwl))); - */ - - return EINVAL; - } - if ((frwl->fw_flg & IP_FW_F_DRNG_COMPAT) && IP_FW_GETNDSTP_COMPAT(frwl) < 2) { - /* - * printf(("%s dst range set but n_dst_p=%d\n", - * err_prefix, IP_FW_GETNDSTP_COMPAT(frwl))); - */ - - return EINVAL; - } - if (IP_FW_GETNSRCP_COMPAT(frwl) + IP_FW_GETNDSTP_COMPAT(frwl) > IP_FW_MAX_PORTS_COMPAT) { - /* - * printf(("%s too many ports (%d+%d)\n", - * err_prefix, IP_FW_GETNSRCP_COMPAT(frwl), IP_FW_GETNDSTP_COMPAT(frwl))); - */ - - return EINVAL; - } - /* - * Protocols other than TCP/UDP don't use port range - */ - if ((frwl->fw_prot != IPPROTO_TCP) && - (frwl->fw_prot != IPPROTO_UDP) && - (IP_FW_GETNSRCP_COMPAT(frwl) || IP_FW_GETNDSTP_COMPAT(frwl))) { - /* - * printf(("%s port(s) specified for non TCP/UDP rule\n", - * err_prefix)); - */ - - return EINVAL; - } - - /* - * Rather than modify the entry to make such entries work, - * we reject this rule and require user level utilities - * to enforce whatever policy they deem appropriate. - */ - if ((frwl->fw_src.s_addr & (~frwl->fw_smsk.s_addr)) || - (frwl->fw_dst.s_addr & (~frwl->fw_dmsk.s_addr))) { - /* - * printf(("%s rule never matches\n", err_prefix)); - */ - - return EINVAL; - } - - if ((frwl->fw_flg & IP_FW_F_FRAG_COMPAT) && - (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { - if (frwl->fw_nports) { - /* - * printf(("%s cannot mix 'frag' and ports\n", err_prefix)); - */ - - return EINVAL; - } - if (frwl->fw_prot == IPPROTO_TCP && - frwl->fw_tcpf != frwl->fw_tcpnf) { - /* - * printf(("%s cannot mix 'frag' and TCP flags\n", err_prefix)); - */ - - return EINVAL; - } - } - - /* Check command specific stuff */ - switch (frwl->fw_flg & IP_FW_F_COMMAND_COMPAT) { - case IP_FW_F_REJECT_COMPAT: - if (frwl->fw_reject_code_compat >= 0x100 - && !(frwl->fw_prot == IPPROTO_TCP - && frwl->fw_reject_code_compat == IP_FW_REJECT_RST_COMPAT)) { - /* - * printf(("%s unknown reject code\n", err_prefix)); - */ - - return EINVAL; - } - break; - case IP_FW_F_DIVERT_COMPAT: /* Diverting to port zero is invalid */ - case IP_FW_F_TEE_COMPAT: - case IP_FW_F_PIPE_COMPAT: /* piping through 0 is invalid */ - case IP_FW_F_QUEUE_COMPAT: /* piping through 0 is invalid */ - if (frwl->fw_divert_port_compat == 0) { - /* - * printf(("%s can't divert to port 0\n", err_prefix)); - */ - - return EINVAL; - } - break; - case IP_FW_F_DENY_COMPAT: - case IP_FW_F_ACCEPT_COMPAT: - case IP_FW_F_COUNT_COMPAT: - case IP_FW_F_SKIPTO_COMPAT: - case IP_FW_F_FWD_COMPAT: - case IP_FW_F_UID_COMPAT: - break; - default: - /* - * printf(("%s invalid command\n", err_prefix)); - */ - - return EINVAL; - } - - return 0; -} - -static void -ipfw_convert_to_cmds_32(struct ip_fw *curr_rule, struct ip_fw_compat_32 *compat_rule) -{ - int k; - uint32_t actbuf[255], cmdbuf[255]; - ipfw_insn *action, *cmd, *src, *dst; - ipfw_insn *have_state = NULL; /* track check-state or keep-state */ - - if (!compat_rule || !curr_rule) { - return; - } - - /* preemptively check the old ip_fw rule to - * make sure it's valid before starting to copy stuff - */ - if (ipfw_check_vers1_struct_32(compat_rule)) { - /* bad rule */ - return; - } - - bzero(actbuf, sizeof(actbuf)); /* actions go here */ - bzero(cmdbuf, sizeof(cmdbuf)); - - /* fill in action */ - action = (ipfw_insn *)actbuf; - { - u_int flag = compat_rule->fw_flg; - - action->len = 1; /* default */ - - if (flag & IP_FW_F_CHECK_S_COMPAT) { - have_state = action; - action->opcode = O_CHECK_STATE; - } else { - switch (flag & IP_FW_F_COMMAND_COMPAT) { - case IP_FW_F_ACCEPT_COMPAT: - action->opcode = O_ACCEPT; - break; - case IP_FW_F_COUNT_COMPAT: - action->opcode = O_COUNT; - break; - case IP_FW_F_PIPE_COMPAT: - action->opcode = O_PIPE; - action->len = F_INSN_SIZE(ipfw_insn_pipe); - action->arg1 = compat_rule->fw_divert_port_compat; - break; - case IP_FW_F_QUEUE_COMPAT: - action->opcode = O_QUEUE; - action->len = F_INSN_SIZE(ipfw_insn_pipe); - action->arg1 = compat_rule->fw_divert_port_compat; - break; - case IP_FW_F_SKIPTO_COMPAT: - action->opcode = O_SKIPTO; - action->arg1 = compat_rule->fw_skipto_rule_compat; - break; - case IP_FW_F_DIVERT_COMPAT: - action->opcode = O_DIVERT; - action->arg1 = compat_rule->fw_divert_port_compat; - break; - case IP_FW_F_TEE_COMPAT: - action->opcode = O_TEE; - action->arg1 = compat_rule->fw_divert_port_compat; - break; - case IP_FW_F_FWD_COMPAT: - { - ipfw_insn_sa *p = (ipfw_insn_sa *)action; - - action->opcode = O_FORWARD_IP; - action->len = F_INSN_SIZE(ipfw_insn_sa); - - p->sa.sin_len = compat_rule->fw_fwd_ip_compat.sin_len; - p->sa.sin_family = compat_rule->fw_fwd_ip_compat.sin_family; - p->sa.sin_port = compat_rule->fw_fwd_ip_compat.sin_port; - p->sa.sin_addr = compat_rule->fw_fwd_ip_compat.sin_addr; - - break; - } - case IP_FW_F_DENY_COMPAT: - action->opcode = O_DENY; - action->arg1 = 0; - break; - case IP_FW_F_REJECT_COMPAT: - action->opcode = O_REJECT; - action->arg1 = compat_rule->fw_reject_code_compat; - break; - default: - action->opcode = O_NOP; - break; - } - } - - /* action is mandatory */ - if (action->opcode == O_NOP) { - return; - } - - action = next_cmd(action); - } /* end actions */ - - cmd = (ipfw_insn *)cmdbuf; - - /* this is O_CHECK_STATE, we're done */ - if (have_state) { - goto done; - } - - { - ipfw_insn *prev = NULL; - u_int flag = compat_rule->fw_flg; - - /* logging */ - if (flag & IP_FW_F_PRN_COMPAT) { - ipfw_insn_log *c = (ipfw_insn_log *)cmd; - - cmd->opcode = O_LOG; - cmd->len |= F_INSN_SIZE(ipfw_insn_log); - c->max_log = compat_rule->fw_logamount; - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* protocol */ - if (compat_rule->fw_prot != 0) { - fill_cmd(cmd, O_PROTO, compat_rule->fw_prot); - prev = cmd; - cmd = next_cmd(cmd); - } - - /* source */ - if (flag & IP_FW_F_SME_COMPAT) { - cmd->opcode = O_IP_SRC_ME; - cmd->len |= F_INSN_SIZE(ipfw_insn); - if (flag & IP_FW_F_INVSRC_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (compat_rule->fw_smsk.s_addr != 0) { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - ip->addr = compat_rule->fw_src; - ip->mask = compat_rule->fw_smsk; - cmd->opcode = O_IP_SRC_MASK; - cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ - } else { - /* one IP */ - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ - - if (compat_rule->fw_src.s_addr == 0) { - /* any */ - cmd32->o.len &= ~F_LEN_MASK; /* zero len */ - } else { - cmd32->d[0] = compat_rule->fw_src.s_addr; - cmd32->o.opcode = O_IP_SRC; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - } - } - - if (flag & IP_FW_F_INVSRC_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - - if (F_LEN(cmd) != 0) { /* !any */ - prev = cmd; - cmd = next_cmd(cmd); - } - } - - /* source ports */ - { - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, j = 0, - nports = IP_FW_GETNSRCP_COMPAT(compat_rule), - have_range = 0; - - cmd->opcode = O_IP_SRCPORT; - for (i = 0; i < nports; i++) { - if (((flag & IP_FW_F_SRNG_COMPAT) || - (flag & IP_FW_F_SMSK_COMPAT)) && !have_range) { - p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; - p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - have_range = 1; - } else { - p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - } - p += 2; - j++; - } - - if (j > 0) { - ports->o.len |= j + 1; /* leave F_NOT and F_OR untouched */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* destination */ - if (flag & IP_FW_F_DME_COMPAT) { - cmd->opcode = O_IP_DST_ME; - cmd->len |= F_INSN_SIZE(ipfw_insn); - if (flag & IP_FW_F_INVDST_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (compat_rule->fw_dmsk.s_addr != 0) { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - ip->addr = compat_rule->fw_dst; - ip->mask = compat_rule->fw_dmsk; - cmd->opcode = O_IP_DST_MASK; - cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ - } else { - /* one IP */ - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ - - if (compat_rule->fw_dst.s_addr == 0) { - /* any */ - cmd32->o.len &= ~F_LEN_MASK; /* zero len */ - } else { - cmd32->d[0] = compat_rule->fw_dst.s_addr; - cmd32->o.opcode = O_IP_DST; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - } - } - - if (flag & IP_FW_F_INVDST_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - - if (F_LEN(cmd) != 0) { /* !any */ - prev = cmd; - cmd = next_cmd(cmd); - } - } - - /* dest. ports */ - { - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i = IP_FW_GETNSRCP_COMPAT(compat_rule), - j = 0, - nports = (IP_FW_GETNDSTP_COMPAT(compat_rule) + i), - have_range = 0; - - cmd->opcode = O_IP_DSTPORT; - for (; i < nports; i++, p += 2) { - if (((flag & IP_FW_F_DRNG_COMPAT) || - (flag & IP_FW_F_DMSK_COMPAT)) && !have_range) { - /* range */ - p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; - p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - have_range = 1; - } else { - p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - } - j++; - } - - if (j > 0) { - ports->o.len |= j + 1; /* leave F_NOT and F_OR untouched */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (flag & IP_FW_F_UID_COMPAT) { - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ - - cmd32->o.opcode = O_UID; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - cmd32->d[0] = compat_rule->fw_uid; - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (flag & IP_FW_F_KEEP_S_COMPAT) { - have_state = cmd; - fill_cmd(cmd, O_KEEP_STATE, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - if (flag & IP_FW_BRIDGED_COMPAT) { - fill_cmd(cmd, O_LAYER2, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - - if ((flag & IF_FW_F_VIAHACK_COMPAT) == IF_FW_F_VIAHACK_COMPAT) { - /* via */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_in_if; - - cmd->opcode = O_VIA; - ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - - if (ifu.fu_via_ip.s_addr == 0) { - /* "any" */ - ifcmd->name[0] = '\0'; - ifcmd->o.len = 0; - } else if (compat_rule->fw_flg & IP_FW_F_IIFNAME_COMPAT) { - /* by name */ - strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); - ifcmd->p.unit = ifu.fu_via_if_compat.unit; - } else { - /* by addr */ - ifcmd->p.ip = ifu.fu_via_ip; - } - - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (flag & IP_FW_F_IN_COMPAT) { - fill_cmd(cmd, O_IN, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - if (flag & IP_FW_F_OUT_COMPAT) { - /* if the previous command was O_IN, and this - * is being set as well, it's equivalent to not - * having either command, so let's back up prev - * to the cmd before it and move cmd to prev. - */ - if (prev->opcode == O_IN) { - cmd = prev; - bzero(cmd, sizeof(*cmd)); - } else { - cmd->len ^= F_NOT; /* toggle F_NOT */ - fill_cmd(cmd, O_IN, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - } - if (flag & IP_FW_F_OIFACE_COMPAT) { - /* xmit */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_out_if; - - cmd->opcode = O_XMIT; - ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - - if (ifu.fu_via_ip.s_addr == 0) { - /* "any" */ - ifcmd->name[0] = '\0'; - ifcmd->o.len = 0; - } else if (flag & IP_FW_F_OIFNAME_COMPAT) { - /* by name */ - strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); - ifcmd->p.unit = ifu.fu_via_if_compat.unit; - } else { - /* by addr */ - ifcmd->p.ip = ifu.fu_via_ip; - } - - prev = cmd; - cmd = next_cmd(cmd); - } else if (flag & IP_FW_F_IIFACE_COMPAT) { - /* recv */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_in_if; - - cmd->opcode = O_RECV; - ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - - if (ifu.fu_via_ip.s_addr == 0) { - /* "any" */ - ifcmd->name[0] = '\0'; - ifcmd->o.len = 0; - } else if (flag & IP_FW_F_IIFNAME_COMPAT) { - /* by name */ - strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); - ifcmd->p.unit = ifu.fu_via_if_compat.unit; - } else { - /* by addr */ - ifcmd->p.ip = ifu.fu_via_ip; - } - - prev = cmd; - cmd = next_cmd(cmd); - } - } - - if (flag & IP_FW_F_FRAG_COMPAT) { - fill_cmd(cmd, O_FRAG, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* IP options */ - if (compat_rule->fw_ipopt != 0 || compat_rule->fw_ipnopt != 0) { - fill_cmd(cmd, O_IPOPT, (compat_rule->fw_ipopt & 0xff) | - (compat_rule->fw_ipnopt & 0xff) << 8); - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (compat_rule->fw_prot == IPPROTO_TCP) { - if (compat_rule->fw_ipflg & IP_FW_IF_TCPEST_COMPAT) { - fill_cmd(cmd, O_ESTAB, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* TCP options and flags */ - if (compat_rule->fw_tcpf != 0 || compat_rule->fw_tcpnf != 0) { - if ((compat_rule->fw_tcpf & IP_FW_TCPF_SYN_COMPAT) && - compat_rule->fw_tcpnf & IP_FW_TCPF_ACK_COMPAT) { - fill_cmd(cmd, O_TCPFLAGS, (TH_SYN) | ((TH_ACK) & 0xff) << 8); - - prev = cmd; - cmd = next_cmd(cmd); - } else { - fill_cmd(cmd, O_TCPFLAGS, (compat_rule->fw_tcpf & 0xff) | - (compat_rule->fw_tcpnf & 0xff) << 8); - - prev = cmd; - cmd = next_cmd(cmd); - } - } - if (compat_rule->fw_tcpopt != 0 || compat_rule->fw_tcpnopt != 0) { - fill_cmd(cmd, O_TCPOPTS, (compat_rule->fw_tcpopt & 0xff) | - (compat_rule->fw_tcpnopt & 0xff) << 8); - - prev = cmd; - cmd = next_cmd(cmd); - } - } - - /* ICMP */ - /* XXX: check this */ - if (flag & IP_FW_F_ICMPBIT_COMPAT) { - int i; - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ - - cmd32->o.opcode = O_ICMPTYPE; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - - for (i = 0; i < IP_FW_ICMPTYPES_DIM_COMPAT; i++) { - cmd32->d[0] |= compat_rule->fw_uar_compat.fw_icmptypes[i]; - } - - prev = cmd; - cmd = next_cmd(cmd); - } - } /* end commands */ - -done: - /* finally, copy everything into the current - * rule buffer in the right order. - */ - dst = curr_rule->cmd; - - /* first, do match probability */ - if (compat_rule->fw_flg & IP_FW_F_RND_MATCH_COMPAT) { - dst->opcode = O_PROB; - dst->len = 2; - *((int32_t *)(dst + 1)) = compat_rule->pipe_ptr; - dst += dst->len; - } - - /* generate O_PROBE_STATE if necessary */ - if (have_state && have_state->opcode != O_CHECK_STATE) { - fill_cmd(dst, O_PROBE_STATE, 0); - dst = next_cmd(dst); - } - - /* - * copy all commands but O_LOG, O_KEEP_STATE - */ - for (src = (ipfw_insn *)cmdbuf; src != cmd; src += k) { - k = F_LEN(src); - - switch (src->opcode) { - case O_LOG: - case O_KEEP_STATE: - break; - default: - bcopy(src, dst, k * sizeof(uint32_t)); - dst += k; - } - } - - /* - * put back the have_state command as last opcode - */ - if (have_state && have_state->opcode != O_CHECK_STATE) { - k = F_LEN(have_state); - bcopy(have_state, dst, k * sizeof(uint32_t)); - dst += k; - } - - /* - * start action section - */ - curr_rule->act_ofs = dst - curr_rule->cmd; - - /* - * put back O_LOG if necessary - */ - src = (ipfw_insn *)cmdbuf; - if (src->opcode == O_LOG) { - k = F_LEN(src); - bcopy(src, dst, k * sizeof(uint32_t)); - dst += k; - } - - /* - * copy all other actions - */ - for (src = (ipfw_insn *)actbuf; src != action; src += k) { - k = F_LEN(src); - bcopy(src, dst, k * sizeof(uint32_t)); - dst += k; - } - - curr_rule->cmd_len = (uint32_t *)dst - (uint32_t *)(curr_rule->cmd); - - return; -} - -static void -ipfw_convert_to_cmds_64(struct ip_fw *curr_rule, struct ip_fw_compat_64 *compat_rule) -{ - int k; - uint32_t actbuf[255], cmdbuf[255]; - ipfw_insn *action, *cmd, *src, *dst; - ipfw_insn *have_state = NULL; /* track check-state or keep-state */ - - if (!compat_rule || !curr_rule) { - return; - } - - /* preemptively check the old ip_fw rule to - * make sure it's valid before starting to copy stuff - */ - if (ipfw_check_vers1_struct_64(compat_rule)) { - /* bad rule */ - return; - } - - bzero(actbuf, sizeof(actbuf)); /* actions go here */ - bzero(cmdbuf, sizeof(cmdbuf)); - /* fill in action */ - action = (ipfw_insn *)actbuf; - { - u_int flag = compat_rule->fw_flg; - - action->len = 1; /* default */ - - if (flag & IP_FW_F_CHECK_S_COMPAT) { - have_state = action; - action->opcode = O_CHECK_STATE; - } else { - switch (flag & IP_FW_F_COMMAND_COMPAT) { - case IP_FW_F_ACCEPT_COMPAT: - action->opcode = O_ACCEPT; - break; - case IP_FW_F_COUNT_COMPAT: - action->opcode = O_COUNT; - break; - case IP_FW_F_PIPE_COMPAT: - action->opcode = O_PIPE; - action->len = F_INSN_SIZE(ipfw_insn_pipe); - action->arg1 = compat_rule->fw_divert_port_compat; - break; - case IP_FW_F_QUEUE_COMPAT: - action->opcode = O_QUEUE; - action->len = F_INSN_SIZE(ipfw_insn_pipe); - action->arg1 = compat_rule->fw_divert_port_compat; - break; - case IP_FW_F_SKIPTO_COMPAT: - action->opcode = O_SKIPTO; - action->arg1 = compat_rule->fw_skipto_rule_compat; - break; - case IP_FW_F_DIVERT_COMPAT: - action->opcode = O_DIVERT; - action->arg1 = compat_rule->fw_divert_port_compat; - break; - case IP_FW_F_TEE_COMPAT: - action->opcode = O_TEE; - action->arg1 = compat_rule->fw_divert_port_compat; - break; - case IP_FW_F_FWD_COMPAT: - { - ipfw_insn_sa *p = (ipfw_insn_sa *)action; - - action->opcode = O_FORWARD_IP; - action->len = F_INSN_SIZE(ipfw_insn_sa); - - p->sa.sin_len = compat_rule->fw_fwd_ip_compat.sin_len; - p->sa.sin_family = compat_rule->fw_fwd_ip_compat.sin_family; - p->sa.sin_port = compat_rule->fw_fwd_ip_compat.sin_port; - p->sa.sin_addr = compat_rule->fw_fwd_ip_compat.sin_addr; - - break; - } - case IP_FW_F_DENY_COMPAT: - action->opcode = O_DENY; - action->arg1 = 0; - break; - case IP_FW_F_REJECT_COMPAT: - action->opcode = O_REJECT; - action->arg1 = compat_rule->fw_reject_code_compat; - break; - default: - action->opcode = O_NOP; - break; - } - } - - /* action is mandatory */ - if (action->opcode == O_NOP) { - return; - } - - action = next_cmd(action); - } /* end actions */ - - cmd = (ipfw_insn *)cmdbuf; - - /* this is O_CHECK_STATE, we're done */ - if (have_state) { - goto done; - } - - { - ipfw_insn *prev = NULL; - u_int flag = compat_rule->fw_flg; - - /* logging */ - if (flag & IP_FW_F_PRN_COMPAT) { - ipfw_insn_log *c = (ipfw_insn_log *)cmd; - - cmd->opcode = O_LOG; - cmd->len |= F_INSN_SIZE(ipfw_insn_log); - c->max_log = compat_rule->fw_logamount; - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* protocol */ - if (compat_rule->fw_prot != 0) { - fill_cmd(cmd, O_PROTO, compat_rule->fw_prot); - prev = cmd; - cmd = next_cmd(cmd); - } - - /* source */ - if (flag & IP_FW_F_SME_COMPAT) { - cmd->opcode = O_IP_SRC_ME; - cmd->len |= F_INSN_SIZE(ipfw_insn); - if (flag & IP_FW_F_INVSRC_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (compat_rule->fw_smsk.s_addr != 0) { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - ip->addr = compat_rule->fw_src; - ip->mask = compat_rule->fw_smsk; - cmd->opcode = O_IP_SRC_MASK; - cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ - } else { - /* one IP */ - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ - - if (compat_rule->fw_src.s_addr == 0) { - /* any */ - cmd32->o.len &= ~F_LEN_MASK; /* zero len */ - } else { - cmd32->d[0] = compat_rule->fw_src.s_addr; - cmd32->o.opcode = O_IP_SRC; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - } - } - - if (flag & IP_FW_F_INVSRC_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - - if (F_LEN(cmd) != 0) { /* !any */ - prev = cmd; - cmd = next_cmd(cmd); - } - } - - /* source ports */ - { - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, j = 0, - nports = IP_FW_GETNSRCP_COMPAT(compat_rule), - have_range = 0; - - cmd->opcode = O_IP_SRCPORT; - for (i = 0; i < nports; i++) { - if (((flag & IP_FW_F_SRNG_COMPAT) || - (flag & IP_FW_F_SMSK_COMPAT)) && !have_range) { - p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; - p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - have_range = 1; - } else { - p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - } - p += 2; - j++; - } - - if (j > 0) { - ports->o.len |= j + 1; /* leave F_NOT and F_OR untouched */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* destination */ - if (flag & IP_FW_F_DME_COMPAT) { - cmd->opcode = O_IP_DST_ME; - cmd->len |= F_INSN_SIZE(ipfw_insn); - if (flag & IP_FW_F_INVDST_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (compat_rule->fw_dmsk.s_addr != 0) { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - ip->addr = compat_rule->fw_dst; - ip->mask = compat_rule->fw_dmsk; - cmd->opcode = O_IP_DST_MASK; - cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ - } else { - /* one IP */ - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ - - if (compat_rule->fw_dst.s_addr == 0) { - /* any */ - cmd32->o.len &= ~F_LEN_MASK; /* zero len */ - } else { - cmd32->d[0] = compat_rule->fw_dst.s_addr; - cmd32->o.opcode = O_IP_DST; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - } - } - - if (flag & IP_FW_F_INVDST_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - - if (F_LEN(cmd) != 0) { /* !any */ - prev = cmd; - cmd = next_cmd(cmd); - } - } - - /* dest. ports */ - { - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i = IP_FW_GETNSRCP_COMPAT(compat_rule), - j = 0, - nports = (IP_FW_GETNDSTP_COMPAT(compat_rule) + i), - have_range = 0; - - cmd->opcode = O_IP_DSTPORT; - for (; i < nports; i++, p += 2) { - if (((flag & IP_FW_F_DRNG_COMPAT) || - (flag & IP_FW_F_DMSK_COMPAT)) && !have_range) { - /* range */ - p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; - p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - have_range = 1; - } else { - p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - } - j++; - } - - if (j > 0) { - ports->o.len |= j + 1; /* leave F_NOT and F_OR untouched */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (flag & IP_FW_F_UID_COMPAT) { - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ - - cmd32->o.opcode = O_UID; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - cmd32->d[0] = compat_rule->fw_uid; - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (flag & IP_FW_F_KEEP_S_COMPAT) { - have_state = cmd; - fill_cmd(cmd, O_KEEP_STATE, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - if (flag & IP_FW_BRIDGED_COMPAT) { - fill_cmd(cmd, O_LAYER2, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - - if ((flag & IF_FW_F_VIAHACK_COMPAT) == IF_FW_F_VIAHACK_COMPAT) { - /* via */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_in_if; - - cmd->opcode = O_VIA; - ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - - if (ifu.fu_via_ip.s_addr == 0) { - /* "any" */ - ifcmd->name[0] = '\0'; - ifcmd->o.len = 0; - } else if (compat_rule->fw_flg & IP_FW_F_IIFNAME_COMPAT) { - /* by name */ - strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); - ifcmd->p.unit = ifu.fu_via_if_compat.unit; - } else { - /* by addr */ - ifcmd->p.ip = ifu.fu_via_ip; - } - - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (flag & IP_FW_F_IN_COMPAT) { - fill_cmd(cmd, O_IN, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - if (flag & IP_FW_F_OUT_COMPAT) { - /* if the previous command was O_IN, and this - * is being set as well, it's equivalent to not - * having either command, so let's back up prev - * to the cmd before it and move cmd to prev. - */ - if (prev->opcode == O_IN) { - cmd = prev; - bzero(cmd, sizeof(*cmd)); - } else { - cmd->len ^= F_NOT; /* toggle F_NOT */ - fill_cmd(cmd, O_IN, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - } - if (flag & IP_FW_F_OIFACE_COMPAT) { - /* xmit */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_out_if; - - cmd->opcode = O_XMIT; - ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - - if (ifu.fu_via_ip.s_addr == 0) { - /* "any" */ - ifcmd->name[0] = '\0'; - ifcmd->o.len = 0; - } else if (flag & IP_FW_F_OIFNAME_COMPAT) { - /* by name */ - strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); - ifcmd->p.unit = ifu.fu_via_if_compat.unit; - } else { - /* by addr */ - ifcmd->p.ip = ifu.fu_via_ip; - } - - prev = cmd; - cmd = next_cmd(cmd); - } else if (flag & IP_FW_F_IIFACE_COMPAT) { - /* recv */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_in_if; - - cmd->opcode = O_RECV; - ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - - if (ifu.fu_via_ip.s_addr == 0) { - /* "any" */ - ifcmd->name[0] = '\0'; - ifcmd->o.len = 0; - } else if (flag & IP_FW_F_IIFNAME_COMPAT) { - /* by name */ - strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); - ifcmd->p.unit = ifu.fu_via_if_compat.unit; - } else { - /* by addr */ - ifcmd->p.ip = ifu.fu_via_ip; - } - - prev = cmd; - cmd = next_cmd(cmd); - } - } - - if (flag & IP_FW_F_FRAG_COMPAT) { - fill_cmd(cmd, O_FRAG, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* IP options */ - if (compat_rule->fw_ipopt != 0 || compat_rule->fw_ipnopt != 0) { - fill_cmd(cmd, O_IPOPT, (compat_rule->fw_ipopt & 0xff) | - (compat_rule->fw_ipnopt & 0xff) << 8); - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (compat_rule->fw_prot == IPPROTO_TCP) { - if (compat_rule->fw_ipflg & IP_FW_IF_TCPEST_COMPAT) { - fill_cmd(cmd, O_ESTAB, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* TCP options and flags */ - if (compat_rule->fw_tcpf != 0 || compat_rule->fw_tcpnf != 0) { - if ((compat_rule->fw_tcpf & IP_FW_TCPF_SYN_COMPAT) && - compat_rule->fw_tcpnf & IP_FW_TCPF_ACK_COMPAT) { - fill_cmd(cmd, O_TCPFLAGS, (TH_SYN) | ((TH_ACK) & 0xff) << 8); - - prev = cmd; - cmd = next_cmd(cmd); - } else { - fill_cmd(cmd, O_TCPFLAGS, (compat_rule->fw_tcpf & 0xff) | - (compat_rule->fw_tcpnf & 0xff) << 8); - - prev = cmd; - cmd = next_cmd(cmd); - } - } - if (compat_rule->fw_tcpopt != 0 || compat_rule->fw_tcpnopt != 0) { - fill_cmd(cmd, O_TCPOPTS, (compat_rule->fw_tcpopt & 0xff) | - (compat_rule->fw_tcpnopt & 0xff) << 8); - - prev = cmd; - cmd = next_cmd(cmd); - } - } - - /* ICMP */ - /* XXX: check this */ - if (flag & IP_FW_F_ICMPBIT_COMPAT) { - int i; - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ - cmd32->o.opcode = O_ICMPTYPE; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - - for (i = 0; i < IP_FW_ICMPTYPES_DIM_COMPAT; i++) { - cmd32->d[0] |= compat_rule->fw_uar_compat.fw_icmptypes[i]; - } - - prev = cmd; - cmd = next_cmd(cmd); - } - } /* end commands */ -done: - /* finally, copy everything into the current - * rule buffer in the right order. - */ - dst = curr_rule->cmd; - - /* first, do match probability */ - if (compat_rule->fw_flg & IP_FW_F_RND_MATCH_COMPAT) { - dst->opcode = O_PROB; - dst->len = 2; - *((int32_t *)(dst + 1)) = compat_rule->pipe_ptr; - dst += dst->len; - } - - /* generate O_PROBE_STATE if necessary */ - if (have_state && have_state->opcode != O_CHECK_STATE) { - fill_cmd(dst, O_PROBE_STATE, 0); - dst = next_cmd(dst); - } - - /* - * copy all commands but O_LOG, O_KEEP_STATE - */ - for (src = (ipfw_insn *)cmdbuf; src != cmd; src += k) { - k = F_LEN(src); - switch (src->opcode) { - case O_LOG: - case O_KEEP_STATE: - break; - default: - bcopy(src, dst, k * sizeof(uint32_t)); - dst += k; - } - } - - /* - * put back the have_state command as last opcode - */ - if (have_state && have_state->opcode != O_CHECK_STATE) { - k = F_LEN(have_state); - bcopy(have_state, dst, k * sizeof(uint32_t)); - dst += k; - } - - /* - * start action section - */ - curr_rule->act_ofs = dst - curr_rule->cmd; - - /* - * put back O_LOG if necessary - */ - src = (ipfw_insn *)cmdbuf; - if (src->opcode == O_LOG) { - k = F_LEN(src); - bcopy(src, dst, k * sizeof(uint32_t)); - dst += k; - } - - /* - * copy all other actions - */ - for (src = (ipfw_insn *)actbuf; src != action; src += k) { - k = F_LEN(src); - bcopy(src, dst, k * sizeof(uint32_t)); - dst += k; - } - - curr_rule->cmd_len = (uint32_t *)dst - (uint32_t *)(curr_rule->cmd); - return; -} - -static int -ipfw_version_one_to_version_two_32(struct sockopt *sopt, struct ip_fw *curr_rule, - struct ip_fw_compat_32 *rule_vers1) -{ - int err = EINVAL; - struct ip_fw_compat_32 *rule_ptr; - struct ip_fw_compat_32 rule; - - if (rule_vers1) { - rule_ptr = rule_vers1; - err = 0; - } else { - /* do some basic size checking here, more extensive checking later */ - if (!sopt->sopt_val || sopt->sopt_valsize < sizeof(struct ip_fw_compat_32)) { - return err; - } - - if ((err = sooptcopyin(sopt, &rule, sizeof(struct ip_fw_compat_32), - sizeof(struct ip_fw_compat_32)))) { - return err; - } - - rule_ptr = &rule; - } - - /* deal with commands */ - ipfw_convert_to_cmds_32(curr_rule, rule_ptr); - - curr_rule->version = IP_FW_CURRENT_API_VERSION; - curr_rule->context = CAST_DOWN_EXPLICIT(void*, rule_ptr->context); - curr_rule->rulenum = rule_ptr->fw_number; - curr_rule->pcnt = rule_ptr->fw_pcnt; - curr_rule->bcnt = rule_ptr->fw_bcnt; - curr_rule->timestamp = rule_ptr->timestamp; - - -#if FW2_DEBUG_VERBOSE - ipfw_print_vers2_struct(curr_rule); -#endif - - return err; -} - -static int -ipfw_version_one_to_version_two_64(struct sockopt *sopt, struct ip_fw *curr_rule, - struct ip_fw_compat_64 *rule_vers1) -{ - int err = EINVAL; - struct ip_fw_compat_64 *rule_ptr; - struct ip_fw_compat_64 rule; - - if (rule_vers1) { - rule_ptr = rule_vers1; - err = 0; - } else { - /* do some basic size checking here, more extensive checking later */ - if (!sopt->sopt_val || sopt->sopt_valsize < sizeof(struct ip_fw_compat_64)) { - return err; - } - - if ((err = sooptcopyin(sopt, &rule, sizeof(struct ip_fw_compat_64), - sizeof(struct ip_fw_compat_64)))) { - return err; - } - rule_ptr = &rule; - } - - /* deal with commands */ - ipfw_convert_to_cmds_64(curr_rule, rule_ptr); - - curr_rule->version = IP_FW_CURRENT_API_VERSION; - curr_rule->context = CAST_DOWN_EXPLICIT( void *, rule_ptr->context); - curr_rule->rulenum = rule_ptr->fw_number; - curr_rule->pcnt = rule_ptr->fw_pcnt; - curr_rule->bcnt = rule_ptr->fw_bcnt; - curr_rule->timestamp = rule_ptr->timestamp; - - -#if FW2_DEBUG_VERBOSE - ipfw_print_vers2_struct(curr_rule); -#endif - - return err; -} - -/* This converts to whatever the latest version is. Currently the - * latest version of the firewall is ipfw2. - */ -static int -ipfw_version_one_to_latest_32(struct sockopt *sopt, struct ip_fw *curr_rule, struct ip_fw_compat_32 *rule_vers1) -{ - int err; - - /* if rule_vers1 is not null then this is coming from - * ipfw_version_zero_to_latest(), so pass that along; - * otherwise let ipfw_version_one_to_version_two() - * get the rule from sopt. - */ - err = ipfw_version_one_to_version_two_32(sopt, curr_rule, rule_vers1); - - return err; -} - -static int -ipfw_version_one_to_latest_64(struct sockopt *sopt, struct ip_fw *curr_rule, struct ip_fw_compat_64 *rule_vers1) -{ - int err; - - /* if rule_vers1 is not null then this is coming from - * ipfw_version_zero_to_latest(), so pass that along; - * otherwise let ipfw_version_one_to_version_two() - * get the rule from sopt. - */ - err = ipfw_version_one_to_version_two_64(sopt, curr_rule, rule_vers1); - - return err; -} - - -#if 0 - -/* - * XXX - ipfw_version_zero_to_one - * - * This function is only used in version #1 of ipfw, which is now deprecated. - * - */ - -static void -ipfw_version_zero_to_one(struct ip_old_fw *rule_vers0, struct ip_fw_compat *rule_vers1) -{ - bzero(rule_vers1, sizeof(struct ip_fw_compat)); - bcopy(&rule_vers0->fw_uar, &rule_vers1->fw_uar_compat, sizeof(rule_vers0->fw_uar)); - bcopy(&rule_vers0->fw_in_if, &rule_vers1->fw_in_if, sizeof(rule_vers0->fw_in_if)); - bcopy(&rule_vers0->fw_out_if, &rule_vers1->fw_out_if, sizeof(rule_vers0->fw_out_if)); - bcopy(&rule_vers0->fw_un, &rule_vers1->fw_un_compat, sizeof(rule_vers0->fw_un)); - - rule_vers1->version = 10; - rule_vers1->fw_pcnt = rule_vers0->fw_pcnt; - rule_vers1->fw_bcnt = rule_vers0->fw_bcnt; - rule_vers1->fw_src = rule_vers0->fw_src; - rule_vers1->fw_dst = rule_vers0->fw_dst; - rule_vers1->fw_smsk = rule_vers0->fw_smsk; - rule_vers1->fw_dmsk = rule_vers0->fw_dmsk; - rule_vers1->fw_number = rule_vers0->fw_number; - rule_vers1->fw_flg = rule_vers0->fw_flg; - rule_vers1->fw_ipopt = rule_vers0->fw_ipopt; - rule_vers1->fw_ipnopt = rule_vers0->fw_ipnopt; - rule_vers1->fw_tcpf = rule_vers0->fw_tcpf & ~IP_OLD_FW_TCPF_ESTAB; - rule_vers1->fw_tcpnf = rule_vers0->fw_tcpnf; - rule_vers1->timestamp = rule_vers0->timestamp; - rule_vers1->fw_prot = rule_vers0->fw_prot; - rule_vers1->fw_nports = rule_vers0->fw_nports; - rule_vers1->pipe_ptr = rule_vers0->pipe_ptr; - rule_vers1->next_rule_ptr = rule_vers0->next_rule_ptr; - rule_vers1->fw_ipflg = (rule_vers0->fw_tcpf & IP_OLD_FW_TCPF_ESTAB) ? IP_FW_IF_TCPEST_COMPAT : 0; -} - -#endif /* !ipfw_version_zero_to_one */ - -/* rule is a u_int32_t buffer[255] into which the converted - * (if necessary) rules go. - */ -int -ipfw_convert_to_latest(struct sockopt *sopt, struct ip_fw *curr_rule, int api_version, int is64user) -{ - int err = 0; - - /* the following functions copy the rules passed in and - * convert to latest structures based on version - */ - switch (api_version) { - case IP_FW_VERSION_0: - /* we're not supporting VERSION 0 */ - err = EOPNOTSUPP; - break; - - case IP_FW_VERSION_1: - /* this is the version supported in Panther */ - if (is64user) { - err = ipfw_version_one_to_latest_64(sopt, curr_rule, NULL); - } else { - err = ipfw_version_one_to_latest_32(sopt, curr_rule, NULL); - } - break; - - case IP_FW_CURRENT_API_VERSION: - /* IPFW2 for now */ - /* do nothing here... */ - break; - - default: - /* unrecognized/unsupported version */ - err = EINVAL; - break; - } - - return err; -} - -int -ipfw_get_command_and_version(struct sockopt *sopt, int *command, u_int32_t *api_version) -{ - int cmd; - int err = 0; - u_int32_t vers = IP_FW_VERSION_NONE; - - /* first deal with the oldest version */ - if (sopt->sopt_name == IP_OLD_FW_GET) { - vers = IP_FW_VERSION_0; - cmd = IP_FW_GET; - } else if (sopt->sopt_name == IP_OLD_FW_FLUSH) { - vers = IP_FW_VERSION_0; - cmd = IP_FW_FLUSH; - } else if (sopt->sopt_name == IP_OLD_FW_ZERO) { - vers = IP_FW_VERSION_0; - cmd = IP_FW_ZERO; - } else if (sopt->sopt_name == IP_OLD_FW_ADD) { - vers = IP_FW_VERSION_0; - cmd = IP_FW_ADD; - } else if (sopt->sopt_name == IP_OLD_FW_DEL) { - vers = IP_FW_VERSION_0; - cmd = IP_FW_DEL; - } else if (sopt->sopt_name == IP_OLD_FW_RESETLOG) { - vers = IP_FW_VERSION_0; - cmd = IP_FW_RESETLOG; - } else { - cmd = sopt->sopt_name; - } - - if (vers == IP_FW_VERSION_NONE) { - /* working off the fact that the offset - * is the same in both structs. - */ - struct ip_fw_64 rule; - size_t copyinsize; - - if (proc_is64bit(sopt->sopt_p)) { - copyinsize = sizeof(struct ip_fw_64); - } else { - copyinsize = sizeof(struct ip_fw_32); - } - - if (!sopt->sopt_val || sopt->sopt_valsize < copyinsize) { - return EINVAL; - } - if ((err = sooptcopyin(sopt, &rule, copyinsize, copyinsize))) { - return err; - } - - vers = rule.version; - } - - if (command) { - *command = cmd; - } - if (api_version) { - *api_version = vers; - } - - return err; -} diff --git a/bsd/netinet/ip_fw2_compat.h b/bsd/netinet/ip_fw2_compat.h deleted file mode 100644 index a26563d9e..000000000 --- a/bsd/netinet/ip_fw2_compat.h +++ /dev/null @@ -1,497 +0,0 @@ -/* IPFW backward compatibility */ - -#ifndef _IP_FW_COMPAT_H_ -#define _IP_FW_COMPAT_H_ - -/* prototypes */ -void ipfw_convert_from_latest(struct ip_fw *curr_rule, void *old_rule, u_int32_t api_version, int is64user); -int ipfw_convert_to_latest(struct sockopt *sopt, struct ip_fw *rule, int api_version, int is64user); -int ipfw_get_command_and_version(struct sockopt *sopt, int *command, u_int32_t *api_version); - - -/* - * ****************************** - * ****** IPFW version one ****** - * ****************************** - */ - -/* - * This union structure identifies an interface, either explicitly - * by name or implicitly by IP address. The flags IP_FW_F_IIFNAME - * and IP_FW_F_OIFNAME say how to interpret this structure. An - * interface unit number of -1 matches any unit number, while an - * IP address of 0.0.0.0 indicates matches any interface. - * - * The receive and transmit interfaces are only compared against the - * the packet if the corresponding bit (IP_FW_F_IIFACE or IP_FW_F_OIFACE) - * is set. Note some packets lack a receive or transmit interface - * (in which case the missing "interface" never matches). - */ - -union ip_fw_if_compat { - struct in_addr fu_via_ip; /* Specified by IP address */ - struct { /* Specified by interface name */ -#define FW_IFNLEN_COMPAT 10 /* need room ! was IFNAMSIZ */ - char name[FW_IFNLEN_COMPAT]; - short unit; /* -1 means match any unit */ - } fu_via_if_compat; -}; - -/* - * Format of an IP firewall descriptor - * - * fw_src, fw_dst, fw_smsk, fw_dmsk are always stored in network byte order. - * fw_flg and fw_n*p are stored in host byte order (of course). - * Port numbers are stored in HOST byte order. - */ - - -struct ip_fw_compat { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION by clients. */ - void *context; /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int64_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ - struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ - struct in_addr fw_smsk, fw_dmsk; /* Mask for src and dest IP addr */ - u_short fw_number; /* Rule number */ - u_int fw_flg; /* Flags word */ -#define IP_FW_MAX_PORTS_COMPAT 10 /* A reasonable maximum */ - union { - u_short fw_pts[IP_FW_MAX_PORTS_COMPAT]; /* Array of port numbers to match */ -#define IP_FW_ICMPTYPES_MAX_COMPAT 128 -#define IP_FW_ICMPTYPES_DIM_COMPAT (IP_FW_ICMPTYPES_MAX_COMPAT / (sizeof(unsigned) * 8)) - unsigned fw_icmptypes[IP_FW_ICMPTYPES_DIM_COMPAT]; /* ICMP types bitmap */ - } fw_uar_compat; - u_int fw_ipflg; /* IP flags word */ - u_char fw_ipopt, fw_ipnopt; /* IP options set/unset */ - u_char fw_tcpopt, fw_tcpnopt; /* TCP options set/unset */ - u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ - long timestamp; /* timestamp (tv_sec) of last match */ - union ip_fw_if_compat fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ - u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - struct sockaddr_in fu_fwd_ip; - } fw_un_compat; - u_char fw_prot; /* IP protocol */ - /* - * N'of src ports and # of dst ports in ports array (dst ports - * follow src ports; max of 10 ports in all; count of 0 means - * match all ports) - */ - u_char fw_nports; - void *pipe_ptr; /* flow_set ptr for dummynet pipe */ - void *next_rule_ptr; /* next rule in case of match */ - uid_t fw_uid; /* uid to match */ - int fw_logamount; /* amount to log */ - u_int64_t fw_loghighest; /* highest number packet to log */ -}; - -/* - * extended ipfw structure... some fields in the original struct - * can be used to pass parameters up/down, namely pointers - * void *pipe_ptr - * void *next_rule_ptr - * some others can be used to pass parameters down, namely counters etc. - * u_int64_t fw_pcnt,fw_bcnt; - * long timestamp; - */ - -struct ip_fw_ext_compat { /* extended structure */ - struct ip_fw rule; /* must be at offset 0 */ - long dont_match_prob; /* 0x7fffffff means 1.0, always fail */ - u_int dyn_type;/* type for dynamic rule */ -}; - -struct ip_fw_chain_compat { - LIST_ENTRY(ip_fw_chain_compat) next; - struct ip_fw_compat *rule; -}; - -/* - * dynamic ipfw rule - */ - -struct ipfw_dyn_rule_compat { - struct ipfw_dyn_rule *next; - - struct ipfw_flow_id id; - struct ipfw_flow_id mask; - struct ip_fw_chain_compat *chain; /* pointer to parent rule */ - u_int32_t type; /* rule type */ - u_int32_t expire; /* expire time */ - u_int64_t pcnt, bcnt; /* match counters */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typ. a */ - /* combination of TCP flags) */ -}; - -#ifdef BSD_KERNEL_PRIVATE -#pragma pack(4) - -struct ip_fw_compat_32 { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION by clients. */ - user32_addr_t context; /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int64_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ - struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ - struct in_addr fw_smsk, fw_dmsk;/* Mask for src and dest IP addr */ - u_short fw_number; /* Rule number */ - u_int fw_flg; /* Flags word */ -#define IP_FW_MAX_PORTS_COMPAT 10 /* A reasonable maximum */ - union { - u_short fw_pts[IP_FW_MAX_PORTS_COMPAT]; /* Array of port numbers to match */ -#define IP_FW_ICMPTYPES_MAX_COMPAT 128 -#define IP_FW_ICMPTYPES_DIM_COMPAT (IP_FW_ICMPTYPES_MAX_COMPAT / (sizeof(unsigned) * 8)) - unsigned fw_icmptypes[IP_FW_ICMPTYPES_DIM_COMPAT]; /* ICMP types bitmap */ - } fw_uar_compat; - u_int fw_ipflg; /* IP flags word */ - u_char fw_ipopt, fw_ipnopt; /* IP options set/unset */ - u_char fw_tcpopt, fw_tcpnopt; /* TCP options set/unset */ - u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ - u_int32_t timestamp; /* timestamp (tv_sec) of last match */ - union ip_fw_if_compat fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ - u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - struct sockaddr_in fu_fwd_ip; - } fw_un_compat; - u_char fw_prot; /* IP protocol */ - /* - * N'of src ports and # of dst ports in ports array (dst ports - * follow src ports; max of 10 ports in all; count of 0 means - * match all ports) - */ - u_char fw_nports; - user32_addr_t pipe_ptr; /* flow_set ptr for dummynet pipe */ - user32_addr_t next_rule_ptr; /* next rule in case of match */ - uid_t fw_uid; /* uid to match */ - int fw_logamount; /* amount to log */ - u_int64_t fw_loghighest; /* highest number packet to log */ -}; -#pragma pack() - -struct ip_fw_compat_64 { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION by clients. */ - user64_addr_t context; /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int64_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ - struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ - struct in_addr fw_smsk, fw_dmsk;/* Mask for src and dest IP addr */ - u_short fw_number; /* Rule number */ - u_int fw_flg; /* Flags word */ -#define IP_FW_MAX_PORTS_COMPAT 10 /* A reasonable maximum */ - union { - u_short fw_pts[IP_FW_MAX_PORTS_COMPAT]; /* Array of port numbers to match */ -#define IP_FW_ICMPTYPES_MAX_COMPAT 128 -#define IP_FW_ICMPTYPES_DIM_COMPAT (IP_FW_ICMPTYPES_MAX_COMPAT / (sizeof(unsigned) * 8)) - unsigned fw_icmptypes[IP_FW_ICMPTYPES_DIM_COMPAT]; /* ICMP types bitmap */ - } fw_uar_compat; - u_int fw_ipflg; /* IP flags word */ - u_char fw_ipopt, fw_ipnopt; /* IP options set/unset */ - u_char fw_tcpopt, fw_tcpnopt; /* TCP options set/unset */ - u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ - u_int64_t timestamp; /* timestamp (tv_sec) of last match */ - union ip_fw_if_compat fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ - u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - struct sockaddr_in fu_fwd_ip; - } fw_un_compat; - u_char fw_prot; /* IP protocol */ - /* - * N'of src ports and # of dst ports in ports array (dst ports - * follow src ports; max of 10 ports in all; count of 0 means - * match all ports) - */ - u_char fw_nports; - user64_addr_t pipe_ptr; /* flow_set ptr for dummynet pipe */ - user64_addr_t next_rule_ptr; /* next rule in case of match */ - uid_t fw_uid; /* uid to match */ - int fw_logamount; /* amount to log */ - u_int64_t fw_loghighest; /* highest number packet to log */ -}; - -struct ipfw_dyn_rule_compat_32 { - user32_addr_t next; - - struct ipfw_flow_id id; - struct ipfw_flow_id mask; - user32_addr_t chain; /* pointer to parent rule */ - u_int32_t type; /* rule type */ - u_int32_t expire; /* expire time */ - u_int64_t pcnt, bcnt; /* match counters */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typ. a */ - /* combination of TCP flags) */ -}; - -struct ipfw_dyn_rule_compat_64 { - user64_addr_t next; - - struct ipfw_flow_id id; - struct ipfw_flow_id mask; - user64_addr_t chain; /* pointer to parent rule */ - u_int32_t type; /* rule type */ - u_int32_t expire; /* expire time */ - u_int64_t pcnt, bcnt; /* match counters */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typ. a */ - /* combination of TCP flags) */ -}; -#endif /* BSD_KERNEL_PRIVATE */ - - -#define IP_FW_GETNSRCP_COMPAT(rule) ((rule)->fw_nports & 0x0f) -#define IP_FW_SETNSRCP_COMPAT(rule, n) do { \ - (rule)->fw_nports &= ~0x0f; \ - (rule)->fw_nports |= (n); \ - } while (0) -#define IP_FW_GETNDSTP_COMPAT(rule) ((rule)->fw_nports >> 4) -#define IP_FW_SETNDSTP_COMPAT(rule, n) do { \ - (rule)->fw_nports &= ~0xf0; \ - (rule)->fw_nports |= (n) << 4;\ - } while (0) - -#define fw_divert_port_compat fw_un_compat.fu_divert_port -#define fw_skipto_rule_compat fw_un_compat.fu_skipto_rule -#define fw_reject_code_compat fw_un_compat.fu_reject_code -#define fw_pipe_nr_compat fw_un_compat.fu_pipe_nr -#define fw_fwd_ip_compat fw_un_compat.fu_fwd_ip - -/* - * Values for "flags" field . - */ -#define IP_FW_F_COMMAND_COMPAT 0x000000ff /* Mask for type of chain entry: */ -#define IP_FW_F_DENY_COMPAT 0x00000000 /* This is a deny rule */ -#define IP_FW_F_REJECT_COMPAT 0x00000001 /* Deny and send a response packet */ -#define IP_FW_F_ACCEPT_COMPAT 0x00000002 /* This is an accept rule */ -#define IP_FW_F_COUNT_COMPAT 0x00000003 /* This is a count rule */ -#define IP_FW_F_DIVERT_COMPAT 0x00000004 /* This is a divert rule */ -#define IP_FW_F_TEE_COMPAT 0x00000005 /* This is a tee rule */ -#define IP_FW_F_SKIPTO_COMPAT 0x00000006 /* This is a skipto rule */ -#define IP_FW_F_FWD_COMPAT 0x00000007 /* This is a "change forwarding address" rule */ -#define IP_FW_F_PIPE_COMPAT 0x00000008 /* This is a dummynet rule */ -#define IP_FW_F_QUEUE_COMPAT 0x00000009 /* This is a dummynet queue */ - -#define IP_FW_F_IN_COMPAT 0x00000100 /* Check inbound packets */ -#define IP_FW_F_OUT_COMPAT 0x00000200 /* Check outbound packets */ -#define IP_FW_F_IIFACE_COMPAT 0x00000400 /* Apply inbound interface test */ -#define IP_FW_F_OIFACE_COMPAT 0x00000800 /* Apply outbound interface test */ - -#define IP_FW_F_PRN_COMPAT 0x00001000 /* Print if this rule matches */ - -#define IP_FW_F_SRNG_COMPAT 0x00002000 /* The first two src ports are a min * - * and max range (stored in host byte * - * order). */ - -#define IP_FW_F_DRNG_COMPAT 0x00004000 /* The first two dst ports are a min * - * and max range (stored in host byte * - * order). */ - -#define IP_FW_F_FRAG_COMPAT 0x00008000 /* Fragment */ - -#define IP_FW_F_IIFNAME_COMPAT 0x00010000 /* In interface by name/unit (not IP) */ -#define IP_FW_F_OIFNAME_COMPAT 0x00020000 /* Out interface by name/unit (not IP) */ - -#define IP_FW_F_INVSRC_COMPAT 0x00040000 /* Invert sense of src check */ -#define IP_FW_F_INVDST_COMPAT 0x00080000 /* Invert sense of dst check */ - -#define IP_FW_F_ICMPBIT_COMPAT 0x00100000 /* ICMP type bitmap is valid */ - -#define IP_FW_F_UID_COMPAT 0x00200000 /* filter by uid */ - -#define IP_FW_F_RND_MATCH_COMPAT 0x00800000 /* probabilistic rule match */ -#define IP_FW_F_SMSK_COMPAT 0x01000000 /* src-port + mask */ -#define IP_FW_F_DMSK_COMPAT 0x02000000 /* dst-port + mask */ -#define IP_FW_BRIDGED_COMPAT 0x04000000 /* only match bridged packets */ -#define IP_FW_F_KEEP_S_COMPAT 0x08000000 /* keep state */ -#define IP_FW_F_CHECK_S_COMPAT 0x10000000 /* check state */ - -#define IP_FW_F_SME_COMPAT 0x20000000 /* source = me */ -#define IP_FW_F_DME_COMPAT 0x40000000 /* destination = me */ - -#define IP_FW_F_MASK_COMPAT 0x7FFFFFFF /* All possible flag bits mask */ - -/* - * Flags for the 'fw_ipflg' field, for comparing values of ip and its protocols. - */ -#define IP_FW_IF_TCPEST_COMPAT 0x00000020 /* established TCP connection */ -#define IP_FW_IF_TCPMSK_COMPAT 0x00000020 /* mask of all TCP values */ - -/* - * Definitions for TCP flags. - */ -#define IP_FW_TCPF_FIN_COMPAT TH_FIN -#define IP_FW_TCPF_SYN_COMPAT TH_SYN -#define IP_FW_TCPF_RST_COMPAT TH_RST -#define IP_FW_TCPF_PSH_COMPAT TH_PUSH -#define IP_FW_TCPF_ACK_COMPAT TH_ACK -#define IP_FW_TCPF_URG_COMPAT TH_URG - -/* - * For backwards compatibility with rules specifying "via iface" but - * not restricted to only "in" or "out" packets, we define this combination - * of bits to represent this configuration. - */ - -#define IF_FW_F_VIAHACK_COMPAT (IP_FW_F_IN_COMPAT|IP_FW_F_OUT_COMPAT|IP_FW_F_IIFACE_COMPAT|IP_FW_F_OIFACE_COMPAT) - -/* - * Definitions for REJECT response codes. - * Values less than 256 correspond to ICMP unreachable codes. - */ -#define IP_FW_REJECT_RST_COMPAT 0x0100 /* TCP packets: send RST */ - - -/* - * ****************************** - * ****** IPFW version zero ***** - * ****************************** - */ - -/* - * This union structure identifies an interface, either explicitly - * by name or implicitly by IP address. The flags IP_FW_F_IIFNAME - * and IP_FW_F_OIFNAME say how to interpret this structure. An - * interface unit number of -1 matches any unit number, while an - * IP address of 0.0.0.0 indicates matches any interface. - * - * The receive and transmit interfaces are only compared against the - * the packet if the corresponding bit (IP_FW_F_IIFACE or IP_FW_F_OIFACE) - * is set. Note some packets lack a receive or transmit interface - * (in which case the missing "interface" never matches). - */ - -union ip_old_fw_if { - struct in_addr fu_via_ip; /* Specified by IP address */ - struct { /* Specified by interface name */ -#define OLD_FW_IFNLEN 10 /* need room ! was IFNAMSIZ */ - char name[OLD_FW_IFNLEN]; - short unit; /* -1 means match any unit */ - } fu_via_if; -}; - -/* - * Format of an IP firewall descriptor - * - * fw_src, fw_dst, fw_smsk, fw_dmsk are always stored in network byte order. - * fw_flg and fw_n*p are stored in host byte order (of course). - * Port numbers are stored in HOST byte order. - * Warning: setsockopt() will fail if sizeof(struct ip_fw) > MLEN (108) - */ - -struct ip_old_fw { - u_int64_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ - struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ - struct in_addr fw_smsk, fw_dmsk; /* Mask for src and dest IP addr */ - u_short fw_number; /* Rule number */ - u_int fw_flg; /* Flags word */ -#define IP_OLD_FW_MAX_PORTS 10 /* A reasonable maximum */ - union { - u_short fw_pts[IP_OLD_FW_MAX_PORTS]; /* Array of port numbers to match */ -#define IP_OLD_FW_ICMPTYPES_MAX 128 -#define IP_OLD_FW_ICMPTYPES_DIM (IP_OLD_FW_ICMPTYPES_MAX / (sizeof(unsigned) * 8)) - unsigned fw_icmptypes[IP_OLD_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ - } fw_uar; - u_char fw_ipopt, fw_ipnopt; /* IP options set/unset */ - u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ - long timestamp; /* timestamp (tv_sec) of last match */ - union ip_old_fw_if fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ - u_short fu_pipe_nr; /* pipe number (option DUMMYNET) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - struct sockaddr_in fu_fwd_ip; - } fw_un; - u_char fw_prot; /* IP protocol */ - u_char fw_nports; /* N'of src ports and # of dst ports */ - /* in ports array (dst ports follow */ - /* src ports; max of 10 ports in all; */ - /* count of 0 means match all ports) */ - void *pipe_ptr; /* Pipe ptr in case of dummynet pipe */ - void *next_rule_ptr; /* next rule in case of match */ -}; - -#define IP_OLD_FW_GETNSRCP(rule) ((rule)->fw_nports & 0x0f) -#define IP_OLD_FW_SETNSRCP(rule, n) do { \ - (rule)->fw_nports &= ~0x0f; \ - (rule)->fw_nports |= (n); \ - } while (0) -#define IP_OLD_FW_GETNDSTP(rule) ((rule)->fw_nports >> 4) -#define IP_OLD_FW_SETNDSTP(rule, n) do { \ - (rule)->fw_nports &= ~0xf0; \ - (rule)->fw_nports |= (n) << 4;\ - } while (0) - -#define old_fw_divert_port fw_un.fu_divert_port -#define old_fw_skipto_rule fw_un.fu_skipto_rule -#define old_fw_reject_code fw_un.fu_reject_code -#define old_fw_pipe_nr fw_un.fu_pipe_nr -#define old_fw_fwd_ip fw_un.fu_fwd_ip - -/* - * Values for "flags" field . - */ -#define IP_OLD_FW_F_COMMAND 0x000000ff /* Mask for type of chain entry: */ -#define IP_OLD_FW_F_DENY 0x00000000 /* This is a deny rule */ -#define IP_OLD_FW_F_REJECT 0x00000001 /* Deny and send a response packet */ -#define IP_OLD_FW_F_ACCEPT 0x00000002 /* This is an accept rule */ -#define IP_OLD_FW_F_COUNT 0x00000003 /* This is a count rule */ -#define IP_OLD_FW_F_DIVERT 0x00000004 /* This is a divert rule */ -#define IP_OLD_FW_F_TEE 0x00000005 /* This is a tee rule */ -#define IP_OLD_FW_F_SKIPTO 0x00000006 /* This is a skipto rule */ -#define IP_OLD_FW_F_FWD 0x00000007 /* This is a "change forwarding address" rule */ -#define IP_OLD_FW_F_PIPE 0x00000008 /* This is a dummynet rule */ - -#define IP_OLD_FW_F_IN 0x00000100 /* Check inbound packets */ -#define IP_OLD_FW_F_OUT 0x00000200 /* Check outbound packets */ -#define IP_OLD_FW_F_IIFACE 0x00000400 /* Apply inbound interface test */ -#define IP_OLD_FW_F_OIFACE 0x00000800 /* Apply outbound interface test */ - -#define IP_OLD_FW_F_PRN 0x00001000 /* Print if this rule matches */ - -#define IP_OLD_FW_F_SRNG 0x00002000 /* The first two src ports are a min * - * and max range (stored in host byte * - * order). */ - -#define IP_OLD_FW_F_DRNG 0x00004000 /* The first two dst ports are a min * - * and max range (stored in host byte * - * order). */ - -#define IP_OLD_FW_F_FRAG 0x00008000 /* Fragment */ - -#define IP_OLD_FW_F_IIFNAME 0x00010000 /* In interface by name/unit (not IP) */ -#define IP_OLD_FW_F_OIFNAME 0x00020000 /* Out interface by name/unit (not IP) */ - -#define IP_OLD_FW_F_INVSRC 0x00040000 /* Invert sense of src check */ -#define IP_OLD_FW_F_INVDST 0x00080000 /* Invert sense of dst check */ - -#define IP_OLD_FW_F_ICMPBIT 0x00100000 /* ICMP type bitmap is valid */ - -#define IP_OLD_FW_F_MASK 0x001FFFFF /* All possible flag bits mask */ - -/* - * For backwards compatibility with rules specifying "via iface" but - * not restricted to only "in" or "out" packets, we define this combination - * of bits to represent this configuration. - */ - -#define IF_OLD_FW_F_VIAHACK (IP_OLD_FW_F_IN|IP_OLD_FW_F_OUT|IP_OLD_FW_F_IIFACE|IP_OLD_FW_F_OIFACE) - -/* - * Definitions for TCP flags - abridged - */ -#define IP_OLD_FW_TCPF_ESTAB 0x40 - -#endif /* _IP_FW_COMPAT_H_ */ diff --git a/bsd/netinet/ip_icmp.c b/bsd/netinet/ip_icmp.c index 65adc858a..14aa9c960 100644 --- a/bsd/netinet/ip_icmp.c +++ b/bsd/netinet/ip_icmp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -106,17 +106,6 @@ #include #endif /* NECP */ -/* XXX This one should go in sys/mbuf.h. It is used to avoid that - * a firewall-generated packet loops forever through the firewall. - */ -#ifndef M_SKIP_FIREWALL -#define M_SKIP_FIREWALL 0x4000 -#endif - -#if CONFIG_MACF_NET -#include -#endif /* MAC_NET */ - /* * ICMP routines: error generation, receive packet processing, and @@ -155,11 +144,11 @@ const static int icmp_datalen = 8; /* Default values in case CONFIG_ICMP_BANDLIM is not defined in the MASTER file */ #ifndef CONFIG_ICMP_BANDLIM -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX #define CONFIG_ICMP_BANDLIM 250 -#else /* CONFIG_EMBEDDED */ +#else /* !XNU_TARGET_OS_OSX */ #define CONFIG_ICMP_BANDLIM 50 -#endif /* CONFIG_EMBEDDED */ +#endif /* !XNU_TARGET_OS_OSX */ #endif /* CONFIG_ICMP_BANDLIM */ /* @@ -218,6 +207,8 @@ icmp_error( u_int32_t nlen = 0; VERIFY((u_int)type <= ICMP_MAXTYPE); + VERIFY(code <= UINT8_MAX); + /* Expect 32-bit aligned data pointer on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(n); @@ -306,10 +297,11 @@ icmp_error( th = (struct tcphdr *)(void *)((caddr_t)oip + oiphlen); if (th != ((struct tcphdr *)P2ROUNDDOWN(th, - sizeof(u_int32_t)))) { + sizeof(u_int32_t))) || + ((th->th_off << 2) > UINT16_MAX)) { goto freeit; } - tcphlen = th->th_off << 2; + tcphlen = (uint16_t)(th->th_off << 2); /* Sanity checks */ if (tcphlen < sizeof(struct tcphdr)) { @@ -360,22 +352,19 @@ stdreply: icmpelen = max(ICMP_MINLEN, min(icmp_datalen, goto freeit; } -#if CONFIG_MACF_NET - mac_mbuf_label_associate_netlayer(n, m); -#endif /* * Further refine the payload length to the space * remaining in mbuf after including the IP header and ICMP * header. */ - icmplen = min(icmplen, M_TRAILINGSPACE(m) - - sizeof(struct ip) - ICMP_MINLEN); + icmplen = min(icmplen, (u_int)M_TRAILINGSPACE(m) - + (u_int)(sizeof(struct ip) - ICMP_MINLEN)); m_align(m, ICMP_MINLEN + icmplen); m->m_len = ICMP_MINLEN + icmplen; /* for ICMP header and data */ icp = mtod(m, struct icmp *); icmpstat.icps_outhist[type]++; - icp->icmp_type = type; + icp->icmp_type = (u_char)type; if (type == ICMP_REDIRECT) { icp->icmp_gwaddr.s_addr = dest; } else { @@ -385,15 +374,15 @@ stdreply: icmpelen = max(ICMP_MINLEN, min(icmp_datalen, * zeroed icmp_void field. */ if (type == ICMP_PARAMPROB) { - icp->icmp_pptr = code; + icp->icmp_pptr = (u_char)code; code = 0; } else if (type == ICMP_UNREACH && code == ICMP_UNREACH_NEEDFRAG && nextmtu != 0) { - icp->icmp_nextmtu = htons(nextmtu); + icp->icmp_nextmtu = htons((uint16_t)nextmtu); } } - icp->icmp_code = code; + icp->icmp_code = (u_char)code; /* * Copy icmplen worth of content from original @@ -412,17 +401,14 @@ stdreply: icmpelen = max(ICMP_MINLEN, min(icmp_datalen, /* * Set up ICMP message mbuf and copy old IP header (without options * in front of ICMP message. - * If the original mbuf was meant to bypass the firewall, the error - * reply should bypass as well. */ - m->m_flags |= n->m_flags & M_SKIP_FIREWALL; m->m_data -= sizeof(struct ip); m->m_len += sizeof(struct ip); m->m_pkthdr.len = m->m_len; m->m_pkthdr.rcvif = n->m_pkthdr.rcvif; nip = mtod(m, struct ip *); bcopy((caddr_t)oip, (caddr_t)nip, sizeof(struct ip)); - nip->ip_len = m->m_len; + nip->ip_len = (uint16_t)m->m_len; nip->ip_vhl = IP_VHL_BORING; nip->ip_p = IPPROTO_ICMP; nip->ip_tos = 0; @@ -660,7 +646,7 @@ badcode: icp->icmp_type = ICMP_ECHOREPLY; #if ICMP_BANDLIM - if (badport_bandlim(BANDLIM_ICMP_ECHO) < 0) { + if (badport_bandlim(BANDLIM_ICMP_ECHO)) { goto freeit; } else #endif @@ -684,7 +670,7 @@ badcode: icp->icmp_rtime = iptime(); icp->icmp_ttime = icp->icmp_rtime; /* bogus, do later! */ #if ICMP_BANDLIM - if (badport_bandlim(BANDLIM_ICMP_TSTAMP) < 0) { + if (badport_bandlim(BANDLIM_ICMP_TSTAMP)) { goto freeit; } else #endif @@ -890,14 +876,11 @@ match: IFA_ADDREF(&ia->ia_ifa); lck_rw_done(in_ifaddr_rwlock); } -#if CONFIG_MACF_NET - mac_netinet_icmp_reply(m); -#endif IFA_LOCK_SPIN(&ia->ia_ifa); t = IA_SIN(ia)->sin_addr; IFA_UNLOCK(&ia->ia_ifa); ip->ip_src = t; - ip->ip_ttl = ip_defttl; + ip->ip_ttl = (u_char)ip_defttl; IFA_REMREF(&ia->ia_ifa); ia = NULL; @@ -1108,13 +1091,13 @@ ip_next_mtu(int mtu, int dir) * delay with more complex code. */ -int +boolean_t badport_bandlim(int which) { static uint64_t lticks[BANDLIM_MAX + 1]; static int lpackets[BANDLIM_MAX + 1]; - uint64_t time = net_uptime(); - int secs; + uint64_t time; + uint64_t secs; const char *bandlimittype[] = { "Limiting icmp unreach response", @@ -1124,15 +1107,13 @@ badport_bandlim(int which) "Limiting open port RST response" }; - /* - * Return ok status if feature disabled or argument out of - * ranage. - */ + /* Return ok status if feature disabled or argument out of range. */ if (icmplim <= 0 || which > BANDLIM_MAX || which < 0) { - return 0; + return false; } + time = net_uptime(); secs = time - lticks[which]; /* @@ -1156,9 +1137,9 @@ badport_bandlim(int which) */ if (++lpackets[which] > icmplim) { - return -1; + return true; } - return 0; + return false; } #endif @@ -1229,7 +1210,7 @@ icmp_dgram_attach(struct socket *so, __unused int proto, struct proc *p) inp = (struct inpcb *)so->so_pcb; inp->inp_vflag |= INP_IPV4; inp->inp_ip_p = IPPROTO_ICMP; - inp->inp_ip_ttl = ip_defttl; + inp->inp_ip_ttl = (u_char)ip_defttl; return 0; } @@ -1267,6 +1248,7 @@ icmp_dgram_ctloutput(struct socket *so, struct sockopt *sopt) case IP_STRIPHDR: case IP_RECVTTL: case IP_BOUND_IF: + case IP_DONTFRAG: case IP_NO_IFT_CELLULAR: error = rip_ctloutput(so, sopt); break; diff --git a/bsd/netinet/ip_input.c b/bsd/netinet/ip_input.c index 1c162bf8c..af5c216e3 100644 --- a/bsd/netinet/ip_input.c +++ b/bsd/netinet/ip_input.c @@ -116,22 +116,15 @@ #include #include #include -#include -#include #include #include #include #include -#include #if DUMMYNET #include #endif /* DUMMYNET */ -#if CONFIG_MACF_NET -#include -#endif /* CONFIG_MACF_NET */ - #if IPSEC #include #include @@ -164,7 +157,7 @@ static struct ipq *ipq_alloc(int); static void ipq_free(struct ipq *); static void ipq_updateparams(void); static void ip_input_second_pass(struct mbuf *, struct ifnet *, - u_int32_t, int, int, struct ip_fw_in_args *, int); + int, int, struct ip_fw_in_args *); decl_lck_mtx_data(static, ipqlock); static lck_attr_t *ipqlock_attr; @@ -365,14 +358,6 @@ SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW | CTLFLAG_LOCKED, &ipstealth, 0, ""); #endif /* IPSTEALTH */ -/* Firewall hooks */ -#if IPFIREWALL -ip_fw_chk_t *ip_fw_chk_ptr; -int fw_enable = 1; -int fw_bypass = 1; -int fw_one_pass = 0; -#endif /* IPFIREWALL */ - #if DUMMYNET ip_dn_io_t *ip_dn_io_ptr; #endif /* DUMMYNET */ @@ -414,15 +399,7 @@ static void save_rte(u_char *, struct in_addr); static int ip_dooptions(struct mbuf *, int, struct sockaddr_in *); static void ip_forward(struct mbuf *, int, struct sockaddr_in *); static void frag_freef(struct ipqhead *, struct ipq *); -#if IPDIVERT -#ifdef IPDIVERT_44 -static struct mbuf *ip_reass(struct mbuf *, u_int32_t *, u_int16_t *); -#else /* !IPDIVERT_44 */ -static struct mbuf *ip_reass(struct mbuf *, u_int16_t *, u_int16_t *); -#endif /* !IPDIVERT_44 */ -#else /* !IPDIVERT */ static struct mbuf *ip_reass(struct mbuf *); -#endif /* !IPDIVERT */ static void ip_fwd_route_copyout(struct ifnet *, struct route *); static void ip_fwd_route_copyin(struct ifnet *, struct route *); static inline u_short ip_cksum(struct mbuf *, int); @@ -673,12 +650,6 @@ inaddr_hashval(u_int32_t key) } } -void -ip_proto_dispatch_in_wrapper(struct mbuf *m, int hlen, u_int8_t proto) -{ - ip_proto_dispatch_in(m, hlen, proto, 0); -} - __private_extern__ void ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto, ipfilter_t inject_ipfref) @@ -820,8 +791,8 @@ ip_input_second_pass_loop_tbl(pktchain_elm_t *tbl, struct ip_fw_in_args *args) for (i = 0; i < PKTTBL_SZ; i++) { if (tbl[i].pkte_head != NULL) { struct mbuf *m = tbl[i].pkte_head; - ip_input_second_pass(m, m->m_pkthdr.rcvif, 0, - tbl[i].pkte_npkts, tbl[i].pkte_nbytes, args, 0); + ip_input_second_pass(m, m->m_pkthdr.rcvif, + tbl[i].pkte_npkts, tbl[i].pkte_nbytes, args); if (tbl[i].pkte_npkts > 2) { ipstat.ips_rxc_chainsz_gt2++; @@ -850,19 +821,13 @@ ip_input_cpout_args(struct ip_fw_in_args *args, struct ip_fw_args *args1, bzero(args1, sizeof(struct ip_fw_args)); *done_init = TRUE; } - args1->fwa_next_hop = args->fwai_next_hop; - args1->fwa_ipfw_rule = args->fwai_ipfw_rule; args1->fwa_pf_rule = args->fwai_pf_rule; - args1->fwa_divert_rule = args->fwai_divert_rule; } static void ip_input_cpin_args(struct ip_fw_args *args1, struct ip_fw_in_args *args) { - args->fwai_next_hop = args1->fwa_next_hop; - args->fwai_ipfw_rule = args1->fwa_ipfw_rule; args->fwai_pf_rule = args1->fwa_pf_rule; - args->fwai_divert_rule = args1->fwa_divert_rule; } typedef enum { @@ -899,13 +864,7 @@ ip_input_dispatch_chain(struct mbuf *m) while (tmp_mbuf != NULL) { nxt_mbuf = mbuf_nextpkt(tmp_mbuf); mbuf_setnextpkt(tmp_mbuf, NULL); - - if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) { - tmp_mbuf = tcp_lro(tmp_mbuf, hlen); - } - if (tmp_mbuf) { - ip_proto_dispatch_in(tmp_mbuf, hlen, ip->ip_p, 0); - } + ip_proto_dispatch_in(tmp_mbuf, hlen, ip->ip_p, 0); tmp_mbuf = nxt_mbuf; if (tmp_mbuf) { ip = mtod(tmp_mbuf, struct ip *); @@ -1000,11 +959,9 @@ ip_input_adjust(struct mbuf *m, struct ip *ip, struct ifnet *inifp) /* * First pass does all essential packet validation and places on a per flow * queue for doing operations that have same outcome for all packets of a flow. - * div_info is packet divert/tee info */ static ipinput_chain_ret_t -ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, - struct ip_fw_in_args *args, int *ours, struct mbuf **modm) +ip_input_first_pass(struct mbuf *m, struct ip_fw_in_args *args, struct mbuf **modm) { struct ip *ip; struct ifnet *inifp; @@ -1012,31 +969,15 @@ ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, int retval = IPINPUT_DOCHAIN; int len = 0; struct in_addr src_ip; -#if IPFIREWALL - int i; -#endif -#if IPFIREWALL || DUMMYNET +#if DUMMYNET struct m_tag *copy; struct m_tag *p; boolean_t delete = FALSE; struct ip_fw_args args1; boolean_t init = FALSE; -#endif +#endif /* DUMMYNET */ ipfilter_t inject_filter_ref = NULL; -#if !IPFIREWALL -#pragma unused (args) -#endif - -#if !IPDIVERT -#pragma unused (div_info) -#pragma unused (ours) -#endif - -#if !IPFIREWALL_FORWARD -#pragma unused (ours) -#endif - /* Check if the mbuf is still valid after interface filter processing */ MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif); inifp = mbuf_pkthdr_rcvif(m); @@ -1047,8 +988,7 @@ ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED; -#if IPFIREWALL || DUMMYNET - +#if DUMMYNET /* * Don't bother searching for tag(s) if there's none. */ @@ -1060,34 +1000,13 @@ ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, p = m_tag_first(m); while (p) { if (p->m_tag_id == KERNEL_MODULE_TAG_ID) { -#if DUMMYNET if (p->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET) { struct dn_pkt_tag *dn_tag; dn_tag = (struct dn_pkt_tag *)(p + 1); - args->fwai_ipfw_rule = dn_tag->dn_ipfw_rule; args->fwai_pf_rule = dn_tag->dn_pf_rule; delete = TRUE; } -#endif - -#if IPDIVERT - if (p->m_tag_type == KERNEL_TAG_TYPE_DIVERT) { - struct divert_tag *div_tag; - - div_tag = (struct divert_tag *)(p + 1); - args->fwai_divert_rule = div_tag->cookie; - delete = TRUE; - } -#endif - - if (p->m_tag_type == KERNEL_TAG_TYPE_IPFORWARD) { - struct ip_fwd_tag *ipfwd_tag; - - ipfwd_tag = (struct ip_fwd_tag *)(p + 1); - args->fwai_next_hop = ipfwd_tag->next_hop; - delete = TRUE; - } if (delete) { copy = p; @@ -1107,24 +1026,17 @@ ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, } #endif -#if DUMMYNET - if (args->fwai_ipfw_rule || args->fwai_pf_rule) { + if (args->fwai_pf_rule) { /* dummynet already filtered us */ ip = mtod(m, struct ip *); hlen = IP_VHL_HL(ip->ip_vhl) << 2; inject_filter_ref = ipf_get_inject_filter(m); -#if IPFIREWALL - if (args->fwai_ipfw_rule) { - goto iphack; - } -#endif /* IPFIREWALL */ if (args->fwai_pf_rule) { goto check_with_pf; } } -#endif /* DUMMYNET */ ipfw_tags_done: -#endif /* IPFIREWALL || DUMMYNET */ +#endif /* DUMMYNET */ /* * No need to process packet twice if we've already seen it. @@ -1290,7 +1202,7 @@ ipfw_tags_done: #if DUMMYNET check_with_pf: -#endif +#endif /* DUMMYNET */ #if PF /* Invoke inbound packet filter */ if (PF_IS_ENABLED) { @@ -1330,83 +1242,7 @@ check_with_pf: } #endif -#if IPFIREWALL -#if DUMMYNET -iphack: -#endif /* DUMMYNET */ - /* - * Check if we want to allow this packet to be processed. - * Consider it to be bad if not. - */ - if (fw_enable && IPFW_LOADED) { -#if IPFIREWALL_FORWARD - /* - * If we've been forwarded from the output side, then - * skip the firewall a second time - */ - if (args->fwai_next_hop) { - *ours = 1; - return IPINPUT_DONTCHAIN; - } -#endif /* IPFIREWALL_FORWARD */ - ip_input_cpout_args(args, &args1, &init); - args1.fwa_m = m; - - i = ip_fw_chk_ptr(&args1); - m = args1.fwa_m; - - if ((i & IP_FW_PORT_DENY_FLAG) || m == NULL) { /* drop */ - if (m) { - m_freem(m); - } - ip_input_update_nstat(inifp, src_ip, 1, len); - KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); - OSAddAtomic(1, &ipstat.ips_total); - return IPINPUT_FREED; - } - ip = mtod(m, struct ip *); /* just in case m changed */ - *modm = m; - ip_input_cpin_args(&args1, args); - - if (i == 0 && args->fwai_next_hop == NULL) { /* common case */ - goto pass; - } -#if DUMMYNET - if (DUMMYNET_LOADED && (i & IP_FW_PORT_DYNT_FLAG) != 0) { - /* Send packet to the appropriate pipe */ - ip_dn_io_ptr(m, i & 0xffff, DN_TO_IP_IN, &args1, - DN_CLIENT_IPFW); - ip_input_update_nstat(inifp, src_ip, 1, len); - KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); - OSAddAtomic(1, &ipstat.ips_total); - return IPINPUT_FREED; - } -#endif /* DUMMYNET */ -#if IPDIVERT - if (i != 0 && (i & IP_FW_PORT_DYNT_FLAG) == 0) { - /* Divert or tee packet */ - *div_info = i; - *ours = 1; - return IPINPUT_DONTCHAIN; - } -#endif -#if IPFIREWALL_FORWARD - if (i == 0 && args->fwai_next_hop != NULL) { - retval = IPINPUT_DONTCHAIN; - goto pass; - } -#endif - /* - * if we get here, the packet must be dropped - */ - ip_input_update_nstat(inifp, src_ip, 1, len); - KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); - m_freem(m); - OSAddAtomic(1, &ipstat.ips_total); - return IPINPUT_FREED; - } -#endif /* IPFIREWALL */ -#if IPSEC | IPFIREWALL +#if IPSEC pass: #endif /* @@ -1416,12 +1252,7 @@ pass: * to be sent and the original packet to be freed). */ ip_nhops = 0; /* for source routed packets */ -#if IPFIREWALL - if (hlen > sizeof(struct ip) && - ip_dooptions(m, 0, args->fwai_next_hop)) { -#else /* !IPFIREWALL */ if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) { -#endif /* !IPFIREWALL */ ip_input_update_nstat(inifp, src_ip, 1, len); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); OSAddAtomic(1, &ipstat.ips_total); @@ -1429,9 +1260,7 @@ pass: } /* - * Don't chain fragmented packets as the process of determining - * if it is our fragment or someone else's plus the complexity of - * divert and fw args makes it harder to do chaining. + * Don't chain fragmented packets */ if (ip->ip_off & ~(IP_DF | IP_RF)) { return IPINPUT_DONTCHAIN; @@ -1618,7 +1447,7 @@ ip_input_check_interface(struct mbuf **mp, struct ip *ip, struct ifnet *inifp) inet_ntop(AF_INET, &ip->ip_src, src_str, sizeof(src_str)); inet_ntop(AF_INET, &ip->ip_dst, dst_str, sizeof(dst_str)); - os_log_info(OS_LOG_DEFAULT, + os_log(OS_LOG_DEFAULT, "%s: no interface match for packet from %s to %s proto %u received via %s", __func__, src_str, dst_str, ip->ip_p, inifp->if_xname); } @@ -1635,19 +1464,13 @@ ip_input_check_interface(struct mbuf **mp, struct ip *ip, struct ifnet *inifp) } static void -ip_input_second_pass(struct mbuf *m, struct ifnet *inifp, u_int32_t div_info, - int npkts_in_chain, int bytes_in_chain, struct ip_fw_in_args *args, int ours) +ip_input_second_pass(struct mbuf *m, struct ifnet *inifp, + int npkts_in_chain, int bytes_in_chain, struct ip_fw_in_args *args) { struct mbuf *tmp_mbuf = NULL; unsigned int hlen; -#if !IPFIREWALL #pragma unused (args) -#endif - -#if !IPDIVERT -#pragma unused (div_info) -#endif struct ip *ip = mtod(m, struct ip *); hlen = IP_VHL_HL(ip->ip_vhl) << 2; @@ -1668,10 +1491,6 @@ ip_input_second_pass(struct mbuf *m, struct ifnet *inifp, u_int32_t div_info, ip_input_update_nstat(inifp, ip->ip_src, npkts_in_chain, bytes_in_chain); - if (ours) { - goto ours; - } - /* * Check our list of addresses, to see if the packet is for us. * If we don't have any addresses, assume any unicast packet @@ -1694,8 +1513,7 @@ ip_input_second_pass(struct mbuf *m, struct ifnet *inifp, u_int32_t div_info, * Enable a consistency check between the destination address * and the arrival interface for a unicast packet (the RFC 1122 * strong ES model) if IP forwarding is disabled and the packet - * is not locally generated and the packet is not subject to - * 'ipfw fwd'. + * is not locally generated * * XXX - Checking also should be disabled if the destination * address is ipnat'ed to a different interface. @@ -1746,11 +1564,7 @@ ip_input_second_pass(struct mbuf *m, struct ifnet *inifp, u_int32_t div_info, OSAddAtomic(1, &ipstat.ips_cantforward); m_freem(tmp_mbuf); } else { -#if IPFIREWALL - ip_forward(tmp_mbuf, 0, args->fwai_next_hop); -#else ip_forward(tmp_mbuf, 0, NULL); -#endif } tmp_mbuf = nxt_mbuf; } @@ -1759,41 +1573,17 @@ ip_input_second_pass(struct mbuf *m, struct ifnet *inifp, u_int32_t div_info, ours: ip = mtod(m, struct ip *); /* in case it changed */ /* - * If offset or IP_MF are set, must reassemble. + * If offset is set, must reassemble. */ if (ip->ip_off & ~(IP_DF | IP_RF)) { VERIFY(npkts_in_chain == 1); - /* - * ip_reass() will return a different mbuf, and update - * the divert info in div_info and args->fwai_divert_rule. - */ -#if IPDIVERT - m = ip_reass(m, (u_int16_t *)&div_info, &args->fwai_divert_rule); -#else m = ip_reass(m); -#endif if (m == NULL) { return; } ip = mtod(m, struct ip *); /* Get the header length of the reassembled packet */ hlen = IP_VHL_HL(ip->ip_vhl) << 2; -#if IPDIVERT - /* Restore original checksum before diverting packet */ - if (div_info != 0) { - VERIFY(npkts_in_chain == 1); -#if BYTE_ORDER != BIG_ENDIAN - HTONS(ip->ip_len); - HTONS(ip->ip_off); -#endif - ip->ip_sum = 0; - ip->ip_sum = ip_cksum_hdr_in(m, hlen); -#if BYTE_ORDER != BIG_ENDIAN - NTOHS(ip->ip_off); - NTOHS(ip->ip_len); -#endif - } -#endif } /* @@ -1802,42 +1592,6 @@ ours: */ ip->ip_len -= hlen; -#if IPDIVERT - /* - * Divert or tee packet to the divert protocol if required. - * - * If div_info is zero then cookie should be too, so we shouldn't - * need to clear them here. Assume divert_packet() does so also. - */ - if (div_info != 0) { - struct mbuf *clone = NULL; - VERIFY(npkts_in_chain == 1); - - /* Clone packet if we're doing a 'tee' */ - if (div_info & IP_FW_PORT_TEE_FLAG) { - clone = m_dup(m, M_DONTWAIT); - } - - /* Restore packet header fields to original values */ - ip->ip_len += hlen; - -#if BYTE_ORDER != BIG_ENDIAN - HTONS(ip->ip_len); - HTONS(ip->ip_off); -#endif - /* Deliver packet to divert input routine */ - OSAddAtomic(1, &ipstat.ips_delivered); - divert_packet(m, 1, div_info & 0xffff, args->fwai_divert_rule); - - /* If 'tee', continue with original packet */ - if (clone == NULL) { - return; - } - m = clone; - ip = mtod(m, struct ip *); - } -#endif - #if IPSEC /* * enforce IPsec policy checking if we are seeing last header. @@ -1858,40 +1612,8 @@ ours: */ OSAddAtomic(npkts_in_chain, &ipstat.ips_delivered); -#if IPFIREWALL - if (args->fwai_next_hop && ip->ip_p == IPPROTO_TCP) { - /* TCP needs IPFORWARD info if available */ - struct m_tag *fwd_tag; - struct ip_fwd_tag *ipfwd_tag; - - VERIFY(npkts_in_chain == 1); - fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_IPFORWARD, sizeof(*ipfwd_tag), - M_NOWAIT, m); - if (fwd_tag == NULL) { - goto bad; - } - - ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag + 1); - ipfwd_tag->next_hop = args->fwai_next_hop; - - m_tag_prepend(m, fwd_tag); - - KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr, - ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len); - - /* TCP deals with its own locking */ - ip_proto_dispatch_in(m, hlen, ip->ip_p, 0); - } else { - KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr, - ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len); - - ip_input_dispatch_chain(m); - } -#else /* !IPFIREWALL */ ip_input_dispatch_chain(m); -#endif /* !IPFIREWALL */ KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); return; bad: @@ -1907,8 +1629,6 @@ ip_input_process_list(struct mbuf *packet_list) struct mbuf *packet = NULL; struct mbuf *modm = NULL; /* modified mbuf */ int retval = 0; - u_int32_t div_info = 0; - int ours = 0; #if (DEBUG || DEVELOPMENT) struct timeval start_tv; #endif /* (DEBUG || DEVELOPMENT) */ @@ -1953,11 +1673,9 @@ restart_list_process: num_pkts++; modm = NULL; - div_info = 0; bzero(&args, sizeof(args)); - retval = ip_input_first_pass(packet, &div_info, &args, - &ours, &modm); + retval = ip_input_first_pass(packet, &args, &modm); if (retval == IPINPUT_DOCHAIN) { if (modm) { @@ -2001,8 +1719,8 @@ restart_list_process: net_perf_histogram(&net_perf, 1); } #endif /* (DEBUG || DEVELOPMENT) */ - ip_input_second_pass(packet, packet->m_pkthdr.rcvif, div_info, - 1, packet->m_pkthdr.len, &args, ours); + ip_input_second_pass(packet, packet->m_pkthdr.rcvif, + 1, packet->m_pkthdr.len, &args); } if (packet_list) { @@ -2044,7 +1762,7 @@ ip_input(struct mbuf *m) m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED; -#if IPFIREWALL || DUMMYNET +#if DUMMYNET bzero(&args, sizeof(struct ip_fw_args)); /* @@ -2055,38 +1773,13 @@ ip_input(struct mbuf *m) } /* Grab info from mtags prepended to the chain */ -#if DUMMYNET if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) { struct dn_pkt_tag *dn_tag; dn_tag = (struct dn_pkt_tag *)(tag + 1); - args.fwa_ipfw_rule = dn_tag->dn_ipfw_rule; args.fwa_pf_rule = dn_tag->dn_pf_rule; - m_tag_delete(m, tag); - } -#endif /* DUMMYNET */ - -#if IPDIVERT - if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_DIVERT, NULL)) != NULL) { - struct divert_tag *div_tag; - - div_tag = (struct divert_tag *)(tag + 1); - args.fwa_divert_rule = div_tag->cookie; - - m_tag_delete(m, tag); - } -#endif - - if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_IPFORWARD, NULL)) != NULL) { - struct ip_fwd_tag *ipfwd_tag; - - ipfwd_tag = (struct ip_fwd_tag *)(tag + 1); - args.fwa_next_hop = ipfwd_tag->next_hop; - m_tag_delete(m, tag); } @@ -2096,24 +1789,17 @@ ip_input(struct mbuf *m) } #endif -#if DUMMYNET - if (args.fwa_ipfw_rule || args.fwa_pf_rule) { + if (args.fwa_pf_rule) { /* dummynet already filtered us */ ip = mtod(m, struct ip *); hlen = IP_VHL_HL(ip->ip_vhl) << 2; inject_filter_ref = ipf_get_inject_filter(m); -#if IPFIREWALL - if (args.fwa_ipfw_rule) { - goto iphack; - } -#endif /* IPFIREWALL */ if (args.fwa_pf_rule) { goto check_with_pf; } } -#endif /* DUMMYNET */ ipfw_tags_done: -#endif /* IPFIREWALL || DUMMYNET */ +#endif /* DUMMYNET */ /* * No need to process packet twice if we've already seen it. @@ -2295,71 +1981,7 @@ check_with_pf: } #endif -#if IPFIREWALL -#if DUMMYNET -iphack: -#endif /* DUMMYNET */ - /* - * Check if we want to allow this packet to be processed. - * Consider it to be bad if not. - */ - if (fw_enable && IPFW_LOADED) { -#if IPFIREWALL_FORWARD - /* - * If we've been forwarded from the output side, then - * skip the firewall a second time - */ - if (args.fwa_next_hop) { - goto ours; - } -#endif /* IPFIREWALL_FORWARD */ - - args.fwa_m = m; - - i = ip_fw_chk_ptr(&args); - m = args.fwa_m; - - if ((i & IP_FW_PORT_DENY_FLAG) || m == NULL) { /* drop */ - if (m) { - m_freem(m); - } - return; - } - ip = mtod(m, struct ip *); /* just in case m changed */ - - if (i == 0 && args.fwa_next_hop == NULL) { /* common case */ - goto pass; - } -#if DUMMYNET - if (DUMMYNET_LOADED && (i & IP_FW_PORT_DYNT_FLAG) != 0) { - /* Send packet to the appropriate pipe */ - ip_dn_io_ptr(m, i & 0xffff, DN_TO_IP_IN, &args, - DN_CLIENT_IPFW); - return; - } -#endif /* DUMMYNET */ -#if IPDIVERT - if (i != 0 && (i & IP_FW_PORT_DYNT_FLAG) == 0) { - /* Divert or tee packet */ - div_info = i; - goto ours; - } -#endif -#if IPFIREWALL_FORWARD - if (i == 0 && args.fwa_next_hop != NULL) { - goto pass; - } -#endif - /* - * if we get here, the packet must be dropped - */ - m_freem(m); - return; - } -#endif /* IPFIREWALL */ -#if IPSEC | IPFIREWALL pass: -#endif /* * Process options and, if not destined for us, * ship it on. ip_dooptions returns 1 when an @@ -2367,12 +1989,7 @@ pass: * to be sent and the original packet to be freed). */ ip_nhops = 0; /* for source routed packets */ -#if IPFIREWALL - if (hlen > sizeof(struct ip) && - ip_dooptions(m, 0, args.fwa_next_hop)) { -#else /* !IPFIREWALL */ if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) { -#endif /* !IPFIREWALL */ return; } @@ -2437,11 +2054,7 @@ pass: OSAddAtomic(1, &ipstat.ips_cantforward); m_freem(m); } else { -#if IPFIREWALL - ip_forward(m, 0, args.fwa_next_hop); -#else ip_forward(m, 0, NULL); -#endif } return; @@ -2450,36 +2063,13 @@ ours: * If offset or IP_MF are set, must reassemble. */ if (ip->ip_off & ~(IP_DF | IP_RF)) { - /* - * ip_reass() will return a different mbuf, and update - * the divert info in div_info and args.fwa_divert_rule. - */ -#if IPDIVERT - m = ip_reass(m, (u_int16_t *)&div_info, &args.fwa_divert_rule); -#else m = ip_reass(m); -#endif if (m == NULL) { return; } ip = mtod(m, struct ip *); /* Get the header length of the reassembled packet */ hlen = IP_VHL_HL(ip->ip_vhl) << 2; -#if IPDIVERT - /* Restore original checksum before diverting packet */ - if (div_info != 0) { -#if BYTE_ORDER != BIG_ENDIAN - HTONS(ip->ip_len); - HTONS(ip->ip_off); -#endif - ip->ip_sum = 0; - ip->ip_sum = ip_cksum_hdr_in(m, hlen); -#if BYTE_ORDER != BIG_ENDIAN - NTOHS(ip->ip_off); - NTOHS(ip->ip_len); -#endif - } -#endif } /* @@ -2488,40 +2078,6 @@ ours: */ ip->ip_len -= hlen; -#if IPDIVERT - /* - * Divert or tee packet to the divert protocol if required. - * - * If div_info is zero then cookie should be too, so we shouldn't - * need to clear them here. Assume divert_packet() does so also. - */ - if (div_info != 0) { - struct mbuf *clone = NULL; - - /* Clone packet if we're doing a 'tee' */ - if (div_info & IP_FW_PORT_TEE_FLAG) { - clone = m_dup(m, M_DONTWAIT); - } - - /* Restore packet header fields to original values */ - ip->ip_len += hlen; - -#if BYTE_ORDER != BIG_ENDIAN - HTONS(ip->ip_len); - HTONS(ip->ip_off); -#endif - /* Deliver packet to divert input routine */ - OSAddAtomic(1, &ipstat.ips_delivered); - divert_packet(m, 1, div_info & 0xffff, args.fwa_divert_rule); - - /* If 'tee', continue with original packet */ - if (clone == NULL) { - return; - } - m = clone; - ip = mtod(m, struct ip *); - } -#endif #if IPSEC /* @@ -2542,51 +2098,7 @@ ours: */ OSAddAtomic(1, &ipstat.ips_delivered); -#if IPFIREWALL - if (args.fwa_next_hop && ip->ip_p == IPPROTO_TCP) { - /* TCP needs IPFORWARD info if available */ - struct m_tag *fwd_tag; - struct ip_fwd_tag *ipfwd_tag; - - fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_IPFORWARD, sizeof(*ipfwd_tag), - M_NOWAIT, m); - if (fwd_tag == NULL) { - goto bad; - } - - ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag + 1); - ipfwd_tag->next_hop = args.fwa_next_hop; - - m_tag_prepend(m, fwd_tag); - - KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr, - ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len); - - /* TCP deals with its own locking */ - ip_proto_dispatch_in(m, hlen, ip->ip_p, 0); - } else { - KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr, - ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len); - - if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) { - m = tcp_lro(m, hlen); - if (m == NULL) { - return; - } - } - - ip_proto_dispatch_in(m, hlen, ip->ip_p, 0); - } -#else /* !IPFIREWALL */ - if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) { - m = tcp_lro(m, hlen); - if (m == NULL) { - return; - } - } ip_proto_dispatch_in(m, hlen, ip->ip_p, 0); -#endif /* !IPFIREWALL */ return; bad: @@ -2672,23 +2184,10 @@ done: * whole datagram. If a chain for reassembly of this datagram already * exists, then it is given as fp; otherwise have to make a chain. * - * When IPDIVERT enabled, keep additional state with each packet that - * tells us if we need to divert or tee the packet we're building. - * * The IP header is *NOT* adjusted out of iplen (but in host byte order). */ static struct mbuf * -#if IPDIVERT -ip_reass(struct mbuf *m, -#ifdef IPDIVERT_44 - u_int32_t *divinfo, -#else /* IPDIVERT_44 */ - u_int16_t *divinfo, -#endif /* IPDIVERT_44 */ - u_int16_t *divcookie) -#else /* IPDIVERT */ ip_reass(struct mbuf *m) -#endif /* IPDIVERT */ { struct ip *ip; struct mbuf *p, *q, *nq, *t; @@ -2731,9 +2230,6 @@ ip_reass(struct mbuf *m) if (ip->ip_id == fp->ipq_id && ip->ip_src.s_addr == fp->ipq_src.s_addr && ip->ip_dst.s_addr == fp->ipq_dst.s_addr && -#if CONFIG_MACF_NET - mac_ipq_label_compare(m, fp) && -#endif ip->ip_p == fp->ipq_p) { goto found; } @@ -2878,14 +2374,6 @@ found: if (fp == NULL) { goto dropfrag; } -#if CONFIG_MACF_NET - if (mac_ipq_label_init(fp, M_NOWAIT) != 0) { - ipq_free(fp); - fp = NULL; - goto dropfrag; - } - mac_ipq_label_associate(m, fp); -#endif TAILQ_INSERT_HEAD(head, fp, ipq_list); nipq++; fp->ipq_nfrags = 1; @@ -2904,29 +2392,10 @@ found: fp->ipq_csum = csum; fp->ipq_csum_flags = csum_flags; } -#if IPDIVERT - /* - * Transfer firewall instructions to the fragment structure. - * Only trust info in the fragment at offset 0. - */ - if (ip->ip_off == 0) { -#ifdef IPDIVERT_44 - fp->ipq_div_info = *divinfo; -#else - fp->ipq_divert = *divinfo; -#endif - fp->ipq_div_cookie = *divcookie; - } - *divinfo = 0; - *divcookie = 0; -#endif /* IPDIVERT */ m = NULL; /* nothing to return */ goto done; } else { fp->ipq_nfrags++; -#if CONFIG_MACF_NET - mac_ipq_label_update(m, fp); -#endif } #define GETIP(m) ((struct ip *)((m)->m_pkthdr.pkt_hdr)) @@ -3019,22 +2488,6 @@ found: fp->ipq_csum_flags = 0; } -#if IPDIVERT - /* - * Transfer firewall instructions to the fragment structure. - * Only trust info in the fragment at offset 0. - */ - if (ip->ip_off == 0) { -#ifdef IPDIVERT_44 - fp->ipq_div_info = *divinfo; -#else - fp->ipq_divert = *divinfo; -#endif - fp->ipq_div_cookie = *divcookie; - } - *divinfo = 0; - *divcookie = 0; -#endif /* IPDIVERT */ /* * Check for complete reassembly and perform frag per packet @@ -3118,22 +2571,6 @@ found: CSUM_IP_CHECKED | CSUM_IP_VALID; } -#if IPDIVERT - /* - * Extract firewall instructions from the fragment structure. - */ -#ifdef IPDIVERT_44 - *divinfo = fp->ipq_div_info; -#else - *divinfo = fp->ipq_divert; -#endif - *divcookie = fp->ipq_div_cookie; -#endif /* IPDIVERT */ - -#if CONFIG_MACF_NET - mac_mbuf_label_associate_ipq(fp, m); - mac_ipq_label_destroy(fp); -#endif /* * Create header for new ip packet by modifying header of first * packet; dequeue and discard fragment reassembly header. @@ -3178,10 +2615,6 @@ done: return NULL; dropfrag: -#if IPDIVERT - *divinfo = 0; - *divcookie = 0; -#endif /* IPDIVERT */ ipstat.ips_fragdropped++; if (fp != NULL) { fp->ipq_nfrags--; @@ -4004,9 +3437,7 @@ ip_fwd_route_copyin(struct ifnet *ifp, struct route *src) static void ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) { -#if !IPFIREWALL #pragma unused(next_hop) -#endif struct ip *ip = mtod(m, struct ip *); struct sockaddr_in *sin; struct rtentry *rt; @@ -4033,15 +3464,7 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) #endif /* PF */ dest = 0; -#if IPFIREWALL - /* - * Cache the destination address of the packet; this may be - * changed by use of 'ipfw fwd'. - */ - pkt_dst = ((next_hop != NULL) ? next_hop->sin_addr : ip->ip_dst); -#else /* !IPFIREWALL */ pkt_dst = ip->ip_dst; -#endif /* !IPFIREWALL */ #if DIAGNOSTIC if (ipprintfs) { @@ -4161,27 +3584,6 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) } RT_UNLOCK(rt); -#if IPFIREWALL - if (next_hop != NULL) { - /* Pass IPFORWARD info if available */ - struct m_tag *tag; - struct ip_fwd_tag *ipfwd_tag; - - tag = m_tag_create(KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_IPFORWARD, - sizeof(*ipfwd_tag), M_NOWAIT, m); - if (tag == NULL) { - error = ENOBUFS; - m_freem(m); - goto done; - } - - ipfwd_tag = (struct ip_fwd_tag *)(tag + 1); - ipfwd_tag->next_hop = next_hop; - - m_tag_prepend(m, tag); - } -#endif /* IPFIREWALL */ /* Mark this packet as being forwarded from another interface */ m->m_pkthdr.pkt_flags |= PKTF_FORWARDED; @@ -4347,7 +3749,7 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) } break; - case EACCES: /* ipfw denied packet */ + case EACCES: m_freem(mcopy); goto done; } @@ -4396,7 +3798,12 @@ ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, goto no_mbufs; } } - if (inp->inp_flags & INP_RECVDSTADDR) { + if (inp->inp_flags & INP_RECVDSTADDR +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + || (inp->inp_socket->so_cfil_db != NULL) +#endif + ) { mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_dst, sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP, mp); if (*mp == NULL) { diff --git a/bsd/netinet/ip_output.c b/bsd/netinet/ip_output.c index 3e92c7b94..ea79bcea4 100644 --- a/bsd/netinet/ip_output.c +++ b/bsd/netinet/ip_output.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -109,10 +109,6 @@ #include -#if CONFIG_MACF_NET -#include -#endif /* CONFIG_MACF_NET */ - #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 1) #define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 3) #define DBG_FNC_IP_OUTPUT NETDBG_CODE(DBG_NETIP, (1 << 8) | 1) @@ -132,12 +128,6 @@ #include #endif /* NECP */ -#if IPFIREWALL -#include -#if IPDIVERT -#include -#endif /* IPDIVERT */ -#endif /* IPFIREWALL */ #if DUMMYNET #include @@ -147,13 +137,6 @@ #include #endif /* PF */ -#if IPFIREWALL_FORWARD && IPFIREWALL_FORWARD_DEBUG -#define print_ip(a) \ - printf("%ld.%ld.%ld.%ld", (ntohl(a.s_addr) >> 24) & 0xFF, \ - (ntohl(a.s_addr) >> 16) & 0xFF, \ - (ntohl(a.s_addr) >> 8) & 0xFF, \ - (ntohl(a.s_addr)) & 0xFF); -#endif /* IPFIREWALL_FORWARD && IPFIREWALL_FORWARD_DEBUG */ u_short ip_id; @@ -240,10 +223,7 @@ static unsigned int imo_debug = 1; /* debugging (enabled) */ #else static unsigned int imo_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int imo_size; /* size of zone element */ static struct zone *imo_zone; /* zone for ip_moptions */ - -#define IMO_ZONE_MAX 64 /* maximum elements in zone */ #define IMO_ZONE_NAME "ip_moptions" /* zone name */ /* @@ -302,14 +282,8 @@ ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt, necp_kernel_policy_result_parameter necp_result_parameter; necp_kernel_policy_id necp_matched_policy_id = 0; #endif /* NECP */ -#if IPFIREWALL - int ipfwoff; - struct sockaddr_in *next_hop_from_ipfwd_tag = NULL; -#endif /* IPFIREWALL */ -#if IPFIREWALL || DUMMYNET - struct m_tag *tag; -#endif /* IPFIREWALL || DUMMYNET */ #if DUMMYNET + struct m_tag *tag; struct ip_out_args saved_ipoa; struct sockaddr_in dst_buf; #endif /* DUMMYNET */ @@ -320,13 +294,8 @@ ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt, #if NECP struct route necp_route; #endif /* NECP */ -#if IPFIREWALL || DUMMYNET - struct ip_fw_args args; -#endif /* IPFIREWALL || DUMMYNET */ -#if IPFIREWALL_FORWARD - struct route sro_fwd; -#endif /* IPFIREWALL_FORWARD */ #if DUMMYNET + struct ip_fw_args args; struct route saved_route; #endif /* DUMMYNET */ struct ipf_pktopts ipf_pktopts; @@ -347,9 +316,6 @@ ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt, boolean_t noexpensive : 1; /* set once */ boolean_t noconstrained : 1; /* set once */ boolean_t awdl_unrestricted : 1; /* set once */ -#if IPFIREWALL_FORWARD - boolean_t fwd_rewrite_src : 1; -#endif /* IPFIREWALL_FORWARD */ }; uint32_t raw; } ipobf = { .raw = 0 }; @@ -379,19 +345,17 @@ ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt, bzero(&ipobz, sizeof(ipobz)); ippo = &ipf_pktopts; -#if IPFIREWALL || DUMMYNET +#if DUMMYNET if (SLIST_EMPTY(&m0->m_pkthdr.tags)) { goto ipfw_tags_done; } /* Grab info from mtags prepended to the chain */ -#if DUMMYNET if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) { struct dn_pkt_tag *dn_tag; dn_tag = (struct dn_pkt_tag *)(tag + 1); - args.fwa_ipfw_rule = dn_tag->dn_ipfw_rule; args.fwa_pf_rule = dn_tag->dn_pf_rule; opt = NULL; saved_route = dn_tag->dn_ro; @@ -409,34 +373,8 @@ ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt, m_tag_delete(m0, tag); } -#endif /* DUMMYNET */ - -#if IPDIVERT - if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_DIVERT, NULL)) != NULL) { - struct divert_tag *div_tag; - - div_tag = (struct divert_tag *)(tag + 1); - args.fwa_divert_rule = div_tag->cookie; - - m_tag_delete(m0, tag); - } -#endif /* IPDIVERT */ - -#if IPFIREWALL - if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_IPFORWARD, NULL)) != NULL) { - struct ip_fwd_tag *ipfwd_tag; - - ipfwd_tag = (struct ip_fwd_tag *)(tag + 1); - next_hop_from_ipfwd_tag = ipfwd_tag->next_hop; - - m_tag_delete(m0, tag); - } -#endif /* IPFIREWALL */ - ipfw_tags_done: -#endif /* IPFIREWALL || DUMMYNET */ +#endif /* DUMMYNET */ m = m0; m->m_pkthdr.pkt_flags &= ~(PKTF_LOOP | PKTF_IFAINFO); @@ -521,7 +459,7 @@ ipfw_tags_done: #endif /* IPSEC */ #if DUMMYNET - if (args.fwa_ipfw_rule != NULL || args.fwa_pf_rule != NULL) { + if (args.fwa_pf_rule != NULL) { /* dummynet already saw us */ ip = mtod(m, struct ip *); hlen = IP_VHL_HL(ip->ip_vhl) << 2; @@ -537,11 +475,6 @@ ipfw_tags_done: RT_UNLOCK(ro->ro_rt); } -#if IPFIREWALL - if (args.fwa_ipfw_rule != NULL) { - goto skip_ipsec; - } -#endif /* IPFIREWALL */ if (args.fwa_pf_rule != NULL) { goto sendit; } @@ -552,9 +485,6 @@ loopit: packets_processed++; ipobf.isbroadcast = FALSE; ipobf.didfilter = FALSE; -#if IPFIREWALL_FORWARD - ipobf.fwd_rewrite_src = FALSE; -#endif /* IPFIREWALL_FORWARD */ VERIFY(m->m_flags & M_PKTHDR); /* @@ -579,21 +509,7 @@ loopit: } ip = mtod(m, struct ip *); -#if IPFIREWALL - /* - * rdar://8542331 - * - * When dealing with a packet chain, we need to reset "next_hop" - * because "dst" may have been changed to the gateway address below - * for the previous packet of the chain. This could cause the route - * to be inavertandly changed to the route to the gateway address - * (instead of the route to the destination). - */ - args.fwa_next_hop = next_hop_from_ipfwd_tag; - pkt_dst = args.fwa_next_hop ? args.fwa_next_hop->sin_addr : ip->ip_dst; -#else /* !IPFIREWALL */ pkt_dst = ip->ip_dst; -#endif /* !IPFIREWALL */ /* * We must not send if the packet is destined to network zero. @@ -806,7 +722,7 @@ loopit: * for correct operation (as it is for ARP). */ if (ro->ro_rt == NULL) { - unsigned long ign = RTF_PRCLONING; + uint32_t ign = RTF_PRCLONING; /* * We make an exception here: if the destination * address is INADDR_BROADCAST, allocate a protocol- @@ -1102,15 +1018,6 @@ loopit: IFA_LOCK_SPIN(&ia->ia_ifa); ip->ip_src = IA_SIN(ia)->sin_addr; IFA_UNLOCK(&ia->ia_ifa); -#if IPFIREWALL_FORWARD - /* - * Keep note that we did this - if the firewall changes - * the next-hop, our interface may change, changing the - * default source IP. It's a shame so much effort happens - * twice. Oh well. - */ - ipobf.fwd_rewrite_src = TRUE; -#endif /* IPFIREWALL_FORWARD */ } /* @@ -1146,7 +1053,6 @@ sendit: m0 = m; /* Save for later */ #if DUMMYNET args.fwa_m = m; - args.fwa_next_hop = dst; args.fwa_oif = ifp; args.fwa_ro = ro; args.fwa_dst = dst; @@ -1297,6 +1203,24 @@ sendit: goto bad; } + /* + * Update the QOS marking policy if + * 1. up layer asks it to do so + * 2. net_qos_policy_restricted is not set + * 3. qos_marking_gencount doesn't match necp_kernel_socket_policies_gencount (checked in necp_lookup_current_qos_marking) + */ + if (ipoa != NULL && + (ipoa->ipoa_flags & IPOAF_REDO_QOSMARKING_POLICY) && + net_qos_policy_restricted != 0) { + bool qos_marking = (ipoa->ipoa_flags & IPOAF_QOSMARKING_ALLOWED) ? TRUE : FALSE; + qos_marking = necp_lookup_current_qos_marking(&ipoa->qos_marking_gencount, NULL, policy_ifp, necp_result_parameter.route_rule_id, qos_marking); + if (qos_marking) { + ipoa->ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; + } else { + ipoa->ipoa_flags &= ~IPOAF_QOSMARKING_ALLOWED; + } + } + /* Set ifp to the tunnel interface, since it is compatible with the packet */ ifp = policy_ifp; ro = &necp_route; @@ -1459,7 +1383,7 @@ sendit: break; default: printf("ip4_output (ipsec): error code %d\n", error); - /* FALLTHRU */ + OS_FALLTHROUGH; case ENOENT: /* don't show these error codes to the user */ error = 0; @@ -1583,270 +1507,6 @@ sendit: skip_ipsec: #endif /* IPSEC */ -#if IPFIREWALL - /* - * Check with the firewall... - * but not if we are already being fwd'd from a firewall. - */ - if (fw_enable && IPFW_LOADED && !args.fwa_next_hop) { - struct sockaddr_in *old = dst; - - args.fwa_m = m; - args.fwa_next_hop = dst; - args.fwa_oif = ifp; - ipfwoff = ip_fw_chk_ptr(&args); - m = args.fwa_m; - dst = args.fwa_next_hop; - - /* - * On return we must do the following: - * IP_FW_PORT_DENY_FLAG -> drop the pkt (XXX new) - * 1<=off<= 0xffff -> DIVERT - * (off & IP_FW_PORT_DYNT_FLAG) -> send to a DUMMYNET pipe - * (off & IP_FW_PORT_TEE_FLAG) -> TEE the packet - * dst != old -> IPFIREWALL_FORWARD - * off==0, dst==old -> accept - * If some of the above modules is not compiled in, then - * we should't have to check the corresponding condition - * (because the ipfw control socket should not accept - * unsupported rules), but better play safe and drop - * packets in case of doubt. - */ - m0 = m; - if ((ipfwoff & IP_FW_PORT_DENY_FLAG) || m == NULL) { - if (m) { - m_freem(m); - } - error = EACCES; - goto done; - } - ip = mtod(m, struct ip *); - - if (ipfwoff == 0 && dst == old) { /* common case */ - goto pass; - } -#if DUMMYNET - if (DUMMYNET_LOADED && (ipfwoff & IP_FW_PORT_DYNT_FLAG) != 0) { - /* - * pass the pkt to dummynet. Need to include - * pipe number, m, ifp, ro, dst because these are - * not recomputed in the next pass. - * All other parameters have been already used and - * so they are not needed anymore. - * XXX note: if the ifp or ro entry are deleted - * while a pkt is in dummynet, we are in trouble! - */ - args.fwa_ro = ro; - args.fwa_dst = dst; - args.fwa_oflags = flags; - if (flags & IP_OUTARGS) { - args.fwa_ipoa = ipoa; - } - - error = ip_dn_io_ptr(m, ipfwoff & 0xffff, DN_TO_IP_OUT, - &args, DN_CLIENT_IPFW); - goto done; - } -#endif /* DUMMYNET */ -#if IPDIVERT - if (ipfwoff != 0 && (ipfwoff & IP_FW_PORT_DYNT_FLAG) == 0) { - struct mbuf *clone = NULL; - - /* Clone packet if we're doing a 'tee' */ - if ((ipfwoff & IP_FW_PORT_TEE_FLAG) != 0) { - clone = m_dup(m, M_DONTWAIT); - } - /* - * XXX - * delayed checksums are not currently compatible - * with divert sockets. - */ - if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { - in_delayed_cksum(m); - } - - /* Restore packet header fields to original values */ - -#if BYTE_ORDER != BIG_ENDIAN - HTONS(ip->ip_len); - HTONS(ip->ip_off); -#endif - - /* Deliver packet to divert input routine */ - divert_packet(m, 0, ipfwoff & 0xffff, - args.fwa_divert_rule); - - /* If 'tee', continue with original packet */ - if (clone != NULL) { - m0 = m = clone; - ip = mtod(m, struct ip *); - goto pass; - } - goto done; - } -#endif /* IPDIVERT */ -#if IPFIREWALL_FORWARD - /* - * Here we check dst to make sure it's directly reachable on - * the interface we previously thought it was. - * If it isn't (which may be likely in some situations) we have - * to re-route it (ie, find a route for the next-hop and the - * associated interface) and set them here. This is nested - * forwarding which in most cases is undesirable, except where - * such control is nigh impossible. So we do it here. - * And I'm babbling. - */ - if (ipfwoff == 0 && old != dst) { - struct in_ifaddr *ia_fw; - struct route *ro_fwd = &sro_fwd; - -#if IPFIREWALL_FORWARD_DEBUG - printf("IPFIREWALL_FORWARD: New dst ip: "); - print_ip(dst->sin_addr); - printf("\n"); -#endif /* IPFIREWALL_FORWARD_DEBUG */ - /* - * We need to figure out if we have been forwarded - * to a local socket. If so then we should somehow - * "loop back" to ip_input, and get directed to the - * PCB as if we had received this packet. This is - * because it may be dificult to identify the packets - * you want to forward until they are being output - * and have selected an interface. (e.g. locally - * initiated packets) If we used the loopback inteface, - * we would not be able to control what happens - * as the packet runs through ip_input() as - * it is done through a ISR. - */ - lck_rw_lock_shared(in_ifaddr_rwlock); - TAILQ_FOREACH(ia_fw, &in_ifaddrhead, ia_link) { - /* - * If the addr to forward to is one - * of ours, we pretend to - * be the destination for this packet. - */ - IFA_LOCK_SPIN(&ia_fw->ia_ifa); - if (IA_SIN(ia_fw)->sin_addr.s_addr == - dst->sin_addr.s_addr) { - IFA_UNLOCK(&ia_fw->ia_ifa); - break; - } - IFA_UNLOCK(&ia_fw->ia_ifa); - } - lck_rw_done(in_ifaddr_rwlock); - if (ia_fw) { - /* tell ip_input "dont filter" */ - struct m_tag *fwd_tag; - struct ip_fwd_tag *ipfwd_tag; - - fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_IPFORWARD, - sizeof(*ipfwd_tag), M_NOWAIT, m); - if (fwd_tag == NULL) { - error = ENOBUFS; - goto bad; - } - - ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag + 1); - ipfwd_tag->next_hop = args.fwa_next_hop; - - m_tag_prepend(m, fwd_tag); - - if (m->m_pkthdr.rcvif == NULL) { - m->m_pkthdr.rcvif = lo_ifp; - } - -#if BYTE_ORDER != BIG_ENDIAN - HTONS(ip->ip_len); - HTONS(ip->ip_off); -#endif - mbuf_outbound_finalize(m, PF_INET, 0); - - /* - * we need to call dlil_output to run filters - * and resync to avoid recursion loops. - */ - if (lo_ifp) { - dlil_output(lo_ifp, PF_INET, m, NULL, - SA(dst), 0, adv); - } else { - printf("%s: no loopback ifp for " - "forwarding!!!\n", __func__); - } - goto done; - } - /* - * Some of the logic for this was nicked from above. - * - * This rewrites the cached route in a local PCB. - * Is this what we want to do? - */ - ROUTE_RELEASE(ro_fwd); - bcopy(dst, &ro_fwd->ro_dst, sizeof(*dst)); - - rtalloc_ign(ro_fwd, RTF_PRCLONING, false); - - if (ro_fwd->ro_rt == NULL) { - OSAddAtomic(1, &ipstat.ips_noroute); - error = EHOSTUNREACH; - goto bad; - } - - RT_LOCK_SPIN(ro_fwd->ro_rt); - ia_fw = ifatoia(ro_fwd->ro_rt->rt_ifa); - if (ia_fw != NULL) { - /* Become a regular mutex */ - RT_CONVERT_LOCK(ro_fwd->ro_rt); - IFA_ADDREF(&ia_fw->ia_ifa); - } - ifp = ro_fwd->ro_rt->rt_ifp; - ro_fwd->ro_rt->rt_use++; - if (ro_fwd->ro_rt->rt_flags & RTF_GATEWAY) { - dst = SIN(ro_fwd->ro_rt->rt_gateway); - } - if (ro_fwd->ro_rt->rt_flags & RTF_HOST) { - /* double negation needed for bool bit field */ - ipobf.isbroadcast = - !!(ro_fwd->ro_rt->rt_flags & RTF_BROADCAST); - } else { - /* Become a regular mutex */ - RT_CONVERT_LOCK(ro_fwd->ro_rt); - ipobf.isbroadcast = - in_broadcast(dst->sin_addr, ifp); - } - RT_UNLOCK(ro_fwd->ro_rt); - ROUTE_RELEASE(ro); - ro->ro_rt = ro_fwd->ro_rt; - ro_fwd->ro_rt = NULL; - dst = SIN(&ro_fwd->ro_dst); - - /* - * If we added a default src ip earlier, - * which would have been gotten from the-then - * interface, do it again, from the new one. - */ - if (ia_fw != NULL) { - if (ipobf.fwd_rewrite_src) { - IFA_LOCK_SPIN(&ia_fw->ia_ifa); - ip->ip_src = IA_SIN(ia_fw)->sin_addr; - IFA_UNLOCK(&ia_fw->ia_ifa); - } - IFA_REMREF(&ia_fw->ia_ifa); - } - goto pass; - } -#endif /* IPFIREWALL_FORWARD */ - /* - * if we get here, none of the above matches, and - * we have to drop the pkt - */ - m_freem(m); - error = EACCES; /* not sure this is the right error msg */ - goto done; - } - -pass: -#endif /* IPFIREWALL */ /* 127/8 must not appear on wire - RFC1122 */ if (!(ifp->if_flags & IFF_LOOPBACK) && @@ -2099,9 +1759,6 @@ done: #if DUMMYNET ROUTE_RELEASE(&saved_route); #endif /* DUMMYNET */ -#if IPFIREWALL_FORWARD - ROUTE_RELEASE(&sro_fwd); -#endif /* IPFIREWALL_FORWARD */ KERNEL_DEBUG(DBG_FNC_IP_OUTPUT | DBG_FUNC_END, error, 0, 0, 0, 0); if (ip_output_measure) { @@ -2125,7 +1782,7 @@ bad: } int -ip_fragment(struct mbuf *m, struct ifnet *ifp, unsigned long mtu, int sw_csum) +ip_fragment(struct mbuf *m, struct ifnet *ifp, uint32_t mtu, int sw_csum) { struct ip *ip, *mhip; int len, hlen, mhlen, firstlen, off, error = 0; @@ -2139,7 +1796,6 @@ ip_fragment(struct mbuf *m, struct ifnet *ifp, unsigned long mtu, int sw_csum) hlen = ip->ip_hl << 2; #endif /* !_IP_VHL */ -#ifdef INET6 /* * We need to adjust the fragment sizes to account * for IPv6 fragment header if it needs to be translated @@ -2149,7 +1805,6 @@ ip_fragment(struct mbuf *m, struct ifnet *ifp, unsigned long mtu, int sw_csum) mtu -= sizeof(struct ip6_frag); } -#endif firstlen = len = (mtu - hlen) & ~7; if (len < 8) { m_freem(m); @@ -2187,7 +1842,7 @@ ip_fragment(struct mbuf *m, struct ifnet *ifp, unsigned long mtu, int sw_csum) mhip->ip_vhl = IP_MAKE_VHL(IPVERSION, mhlen >> 2); } m->m_len = mhlen; - mhip->ip_off = ((off - hlen) >> 3) + (ip->ip_off & ~IP_MF); + mhip->ip_off = (u_short)(((off - hlen) >> 3) + (ip->ip_off & ~IP_MF)); if (ip->ip_off & IP_MF) { mhip->ip_off |= IP_MF; } @@ -2211,10 +1866,6 @@ ip_fragment(struct mbuf *m, struct ifnet *ifp, unsigned long mtu, int sw_csum) M_COPY_CLASSIFIER(m, m0); M_COPY_PFTAG(m, m0); -#if CONFIG_MACF_NET - mac_netinet_fragment(m0, m); -#endif /* CONFIG_MACF_NET */ - #if BYTE_ORDER != BIG_ENDIAN HTONS(mhip->ip_off); #endif @@ -2357,7 +2008,11 @@ in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags) (uint64_t)VM_KERNEL_ADDRPERM(m), ip->ip_p, ip->ip_len, ip->ip_len, ip_len, ip_len, (mlen - hoff)); - ip_len = mlen - hoff; + if (mlen - hoff > UINT16_MAX) { + panic("%s: mlen %u - hoff %u > 65535", + __func__, mlen, hoff); + } + ip_len = (uint16_t)(mlen - hoff); } } @@ -2486,9 +2141,6 @@ ip_insertoptions(struct mbuf *m, struct mbuf *opt, int *phlen) return m; } n->m_pkthdr.rcvif = 0; -#if CONFIG_MACF_NET - mac_mbuf_label_copy(m, n); -#endif /* CONFIG_MACF_NET */ n->m_pkthdr.len = m->m_pkthdr.len + optlen; m->m_len -= sizeof(struct ip); m->m_data += sizeof(struct ip); @@ -2557,7 +2209,7 @@ ip_optcopy(struct ip *ip, struct ip *jp) dp += optlen; } } - for (optlen = dp - (u_char *)(jp + 1); optlen & 0x3; optlen++) { + for (optlen = (int)(dp - (u_char *)(jp + 1)); optlen & 0x3; optlen++) { *dp++ = IPOPT_EOL; } return optlen; @@ -2612,7 +2264,7 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) error = ENOBUFS; break; } - m->m_len = sopt->sopt_valsize; + m->m_len = (int32_t)sopt->sopt_valsize; error = sooptcopyin(sopt, mtod(m, char *), m->m_len, m->m_len); if (error) { @@ -2633,6 +2285,7 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) case IP_RECVTTL: case IP_RECVPKTINFO: case IP_RECVTOS: + case IP_DONTFRAG: error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); if (error) { @@ -2641,17 +2294,35 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) switch (sopt->sopt_name) { case IP_TOS: - inp->inp_ip_tos = optval; + if (optval > UINT8_MAX) { + error = EINVAL; + break; + } + inp->inp_ip_tos = (uint8_t)optval; break; case IP_TTL: - inp->inp_ip_ttl = optval; + if (optval > UINT8_MAX) { + error = EINVAL; + break; + } + inp->inp_ip_ttl = (uint8_t)optval; break; -#define OPTSET(bit) \ - if (optval) \ - inp->inp_flags |= bit; \ - else \ - inp->inp_flags &= ~bit; +#define OPTSET(bit) do { \ + if (optval) { \ + inp->inp_flags |= bit; \ + } else { \ + inp->inp_flags &= ~bit; \ + } \ +} while (0) + +#define OPTSET2(bit) do { \ + if (optval) { \ + inp->inp_flags2 |= bit; \ + } else { \ + inp->inp_flags2 &= ~bit; \ + } \ +} while (0) case IP_RECVOPTS: OPTSET(INP_RECVOPTS); @@ -2680,7 +2351,17 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) case IP_RECVTOS: OPTSET(INP_RECVTOS); break; - #undef OPTSET + + case IP_DONTFRAG: + /* This option is settable only for IPv4 */ + if (!(inp->inp_vflag & INP_IPV4)) { + error = EINVAL; + break; + } + OPTSET2(INP2_DONTFRAG); + break; +#undef OPTSET +#undef OPTSET2 } break; /* @@ -2878,6 +2559,7 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) case IP_PORTRANGE: case IP_RECVPKTINFO: case IP_RECVTOS: + case IP_DONTFRAG: switch (sopt->sopt_name) { case IP_TOS: optval = inp->inp_ip_tos; @@ -2888,7 +2570,7 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) break; #define OPTBIT(bit) (inp->inp_flags & bit ? 1 : 0) - +#define OPTBIT2(bit) (inp->inp_flags2 & bit ? 1 : 0) case IP_RECVOPTS: optval = OPTBIT(INP_RECVOPTS); break; @@ -2926,6 +2608,9 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) case IP_RECVTOS: optval = OPTBIT(INP_RECVTOS); break; + case IP_DONTFRAG: + optval = OPTBIT2(INP2_DONTFRAG); + break; } error = sooptcopyout(sopt, &optval, sizeof(optval)); break; @@ -3061,10 +2746,13 @@ ip_pcbopts(int optname, struct mbuf **pcbopt, struct mbuf *m) if (optlen < IPOPT_MINOFF - 1 + sizeof(struct in_addr)) { goto bad; } + if (optlen > UINT8_MAX) { + goto bad; + } m->m_len -= sizeof(struct in_addr); cnt -= sizeof(struct in_addr); optlen -= sizeof(struct in_addr); - cp[IPOPT_OLEN] = optlen; + cp[IPOPT_OLEN] = (uint8_t)optlen; /* * Move first hop before start of options. */ @@ -3097,16 +2785,10 @@ ip_moptions_init(void) { PE_parse_boot_argn("ifa_debug", &imo_debug, sizeof(imo_debug)); - imo_size = (imo_debug == 0) ? sizeof(struct ip_moptions) : + vm_size_t imo_size = (imo_debug == 0) ? sizeof(struct ip_moptions) : sizeof(struct ip_moptions_dbg); - imo_zone = zinit(imo_size, IMO_ZONE_MAX * imo_size, 0, - IMO_ZONE_NAME); - if (imo_zone == NULL) { - panic("%s: failed allocating %s", __func__, IMO_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(imo_zone, Z_EXPAND, TRUE); + imo_zone = zone_create(IMO_ZONE_NAME, imo_size, ZC_ZFREE_CLEARMEM); } void @@ -3211,13 +2893,12 @@ imo_trace(struct ip_moptions *imo, int refhold) } struct ip_moptions * -ip_allocmoptions(int how) +ip_allocmoptions(zalloc_flags_t how) { struct ip_moptions *imo; - imo = (how == M_WAITOK) ? zalloc(imo_zone) : zalloc_noblock(imo_zone); + imo = zalloc_flags(imo_zone, how | Z_ZERO); if (imo != NULL) { - bzero(imo, imo_size); lck_mtx_init(&imo->imo_lock, ifa_mtx_grp, ifa_mtx_attr); imo->imo_debug |= IFD_ALLOC; if (imo_debug != 0) { diff --git a/bsd/netinet/ip_var.h b/bsd/netinet/ip_var.h index ab4d5336d..2a45b81f5 100644 --- a/bsd/netinet/ip_var.h +++ b/bsd/netinet/ip_var.h @@ -82,9 +82,6 @@ struct ipovly { }; #ifdef BSD_KERNEL_PRIVATE -#if CONFIG_MACF_NET -struct label; -#endif /* CONFIG_MACF_NET */ /* * Ip reassembly queue structure. Each fragment * being reassembled is attached to one of these structures. @@ -94,9 +91,6 @@ struct label; struct ipq { TAILQ_ENTRY(ipq) ipq_list; /* to other reass headers */ struct mbuf *ipq_frags; /* to ip headers of fragments */ -#if CONFIG_MACF_NET - struct label *ipq_label; /* MAC label */ -#endif /* CONFIG_MACF_NET */ u_char ipq_ttl; /* time for reass q to live */ u_char ipq_p; /* protocol of this fragment */ u_short ipq_id; /* sequence id for reassembly */ @@ -104,14 +98,6 @@ struct ipq { u_int32_t ipq_nfrags; /* # frags in this packet */ uint32_t ipq_csum_flags; /* checksum flags */ uint32_t ipq_csum; /* partial checksum value */ -#if IPDIVERT -#ifdef IPDIVERT_44 - u_int32_t ipq_div_info; /* ipfw divert port & flags */ -#else /* !IPDIVERT_44 */ - u_int16_t ipq_divert; /* ipfw divert port (legacy) */ -#endif /* !IPDIVERT_44 */ - u_int16_t ipq_div_cookie; /* ipfw divert cookie */ -#endif /* IPDIVERT */ }; /* @@ -279,6 +265,7 @@ struct inpcb; struct route; struct sockopt; +#include #include /* @@ -300,10 +287,12 @@ struct ip_out_args { * AWDL_RESTRICTED */ #define IPOAF_QOSMARKING_ALLOWED 0x00000080 /* policy allows Fastlane DSCP marking */ #define IPOAF_NO_CONSTRAINED 0x00000100 /* skip IFXF_CONSTRAINED */ +#define IPOAF_REDO_QOSMARKING_POLICY 0x00000200 /* Re-evaluate QOS marking policy */ u_int32_t ipoa_retflags; /* IPOARF return flags (see below) */ #define IPOARF_IFDENIED 0x00000001 /* denied access to interface */ int ipoa_sotc; /* traffic class for Fastlane DSCP mapping */ int ipoa_netsvctype; /* network service type */ + int32_t qos_marking_gencount; }; extern struct ipstat ipstat; @@ -316,7 +305,7 @@ extern struct protosw *ip_protox[]; extern struct pr_usrreqs rip_usrreqs; extern void ip_moptions_init(void); -extern struct ip_moptions *ip_allocmoptions(int); +extern struct ip_moptions *ip_allocmoptions(zalloc_flags_t); extern int inp_getmoptions(struct inpcb *, struct sockopt *); extern int inp_setmoptions(struct inpcb *, struct sockopt *); extern void imo_addref(struct ip_moptions *, int); @@ -342,8 +331,7 @@ extern struct mbuf *ip_srcroute(void); extern void ip_stripoptions(struct mbuf *); extern void ip_initid(void); extern u_int16_t ip_randomid(void); -extern void ip_proto_dispatch_in_wrapper(struct mbuf *, int, u_int8_t); -extern int ip_fragment(struct mbuf *, struct ifnet *, unsigned long, int); +extern int ip_fragment(struct mbuf *, struct ifnet *, uint32_t, int); extern void ip_setsrcifaddr_info(struct mbuf *, uint32_t, struct in_ifaddr *); extern void ip_setdstifaddr_info(struct mbuf *, uint32_t, struct in_ifaddr *); @@ -365,13 +353,11 @@ extern void tcp_out_cksum_stats(u_int32_t); extern void udp_in_cksum_stats(u_int32_t); extern void udp_out_cksum_stats(u_int32_t); -#if INET6 extern void tcp_in6_cksum_stats(u_int32_t); extern void tcp_out6_cksum_stats(u_int32_t); extern void udp_in6_cksum_stats(u_int32_t); extern void udp_out6_cksum_stats(u_int32_t); -#endif /* INET6 */ #endif /* BSD_KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE /* for PPP/PPTP */ diff --git a/bsd/netinet/kpi_ipfilter.c b/bsd/netinet/kpi_ipfilter.c index a47c52b4a..7c6d61d1b 100644 --- a/bsd/netinet/kpi_ipfilter.c +++ b/bsd/netinet/kpi_ipfilter.c @@ -411,10 +411,10 @@ ipf_injectv4_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) } if (options != NULL && (options->ippo_flags & IPPOF_MCAST_OPTS) && - (imo = ip_allocmoptions(M_DONTWAIT)) != NULL) { + (imo = ip_allocmoptions(Z_NOWAIT)) != NULL) { imo->imo_multicast_ifp = options->ippo_mcast_ifnet; imo->imo_multicast_ttl = options->ippo_mcast_ttl; - imo->imo_multicast_loop = options->ippo_mcast_loop; + imo->imo_multicast_loop = (u_char)options->ippo_mcast_loop; } if (options != NULL) { @@ -463,7 +463,6 @@ ipf_injectv4_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) return error; } -#if INET6 static errno_t ipf_injectv6_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) { @@ -501,10 +500,10 @@ ipf_injectv6_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) } if (options != NULL && (options->ippo_flags & IPPOF_MCAST_OPTS) && - (im6o = ip6_allocmoptions(M_DONTWAIT)) != NULL) { + (im6o = ip6_allocmoptions(Z_NOWAIT)) != NULL) { im6o->im6o_multicast_ifp = options->ippo_mcast_ifnet; im6o->im6o_multicast_hlim = options->ippo_mcast_ttl; - im6o->im6o_multicast_loop = options->ippo_mcast_loop; + im6o->im6o_multicast_loop = (u_char)options->ippo_mcast_loop; } if (options != NULL) { @@ -548,7 +547,6 @@ ipf_injectv6_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) return error; } -#endif /* INET6 */ errno_t ipf_inject_output( @@ -573,11 +571,9 @@ ipf_inject_output( case 4: error = ipf_injectv4_out(data, filter_ref, options); break; -#if INET6 case 6: error = ipf_injectv6_out(data, filter_ref, options); break; -#endif default: m_freem(m); error = ENOTSUP; diff --git a/bsd/netinet/kpi_ipfilter.h b/bsd/netinet/kpi_ipfilter.h index 0aafb5c29..fcec746fc 100644 --- a/bsd/netinet/kpi_ipfilter.h +++ b/bsd/netinet/kpi_ipfilter.h @@ -245,4 +245,5 @@ extern errno_t ipf_inject_output(mbuf_t data, ipfilter_t filter_ref, __NKE_API_DEPRECATED; __END_DECLS +#undef __NKE_API_DEPRECATED #endif /* __KPI_IPFILTER__ */ diff --git a/bsd/netinet/lro_ext.h b/bsd/netinet/lro_ext.h deleted file mode 100644 index cabdfcd05..000000000 --- a/bsd/netinet/lro_ext.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2011 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -#ifndef LRO_EXT_H_ -#define LRO_EXT_H_ - -#ifdef BSD_KERNEL_PRIVATE - -/* All definitions exported from LRO go into this file */ - -extern int sw_lro; -extern int lrodebug; -extern unsigned int coalesc_sz; - -/* flow return values */ -#define TCP_LRO_NAN 0x00 /* No flow exists */ -#define TCP_LRO_CONSUMED 0x01 /* LRO consumed the packet */ -#define TCP_LRO_EJECT_FLOW 0x02 /* LRO ejected the flow */ -#define TCP_LRO_COALESCE 0x03 /* LRO to coalesce the packet */ -#define TCP_LRO_COLLISION 0x04 /* Two flows map to the same slot */ - -void tcp_lro_init(void); - -/* When doing LRO in IP call this function */ -struct mbuf* tcp_lro(struct mbuf *m, unsigned int hlen); - -/* TCP calls this to start coalescing a flow */ -int tcp_start_coalescing(struct ip *, struct tcphdr *, int tlen); - -/* TCP calls this to stop coalescing a flow */ -int tcp_lro_remove_state(struct in_addr, struct in_addr, unsigned short, - unsigned short); - -/* TCP calls this to keep the seq number updated */ -void tcp_update_lro_seq(__uint32_t, struct in_addr, struct in_addr, - unsigned short, unsigned short); - -#endif - -#endif /* LRO_EXT_H_ */ diff --git a/bsd/netinet/mptcp.c b/bsd/netinet/mptcp.c index 3b3d83765..39fabb2b1 100644 --- a/bsd/netinet/mptcp.c +++ b/bsd/netinet/mptcp.c @@ -137,7 +137,6 @@ int mptcp_fail_thresh = 1; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, fail, CTLFLAG_RW | CTLFLAG_LOCKED, &mptcp_fail_thresh, 0, "Failover threshold"); - /* * MPTCP subflows have TCP keepalives set to ON. Set a conservative keeptime * as carrier networks mostly have a 30 minute to 60 minute NAT Timeout. @@ -151,13 +150,6 @@ int mptcp_rtthist_rtthresh = 600; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, rtthist_thresh, CTLFLAG_RW | CTLFLAG_LOCKED, &mptcp_rtthist_rtthresh, 0, "Rtt threshold"); -/* - * Use RTO history for sending new data - */ -int mptcp_use_rto = 1; -SYSCTL_INT(_net_inet_mptcp, OID_AUTO, userto, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_use_rto, 0, "Disable RTO for subflow selection"); - int mptcp_rtothresh = 1500; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, rto_thresh, CTLFLAG_RW | CTLFLAG_LOCKED, &mptcp_rtothresh, 0, "RTO threshold"); @@ -173,12 +165,6 @@ uint32_t mptcp_probecnt = 5; SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, probecnt, CTLFLAG_RW | CTLFLAG_LOCKED, &mptcp_probecnt, 0, "Number of probe writes"); -/* - * Static declarations - */ -static uint16_t mptcp_input_csum(struct tcpcb *, struct mbuf *, uint64_t, - uint32_t, uint16_t, uint16_t, uint16_t); - static int mptcp_reass_present(struct socket *mp_so) { @@ -218,7 +204,7 @@ mptcp_reass_present(struct socket *mp_so) m_freem(q->tqe_m); } else { flags = !!(q->tqe_m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN); - if (sbappendstream_rcvdemux(mp_so, q->tqe_m, 0, 0)) { + if (sbappendstream_rcvdemux(mp_so, q->tqe_m)) { dowakeup = 1; } } @@ -243,7 +229,7 @@ mptcp_reass(struct socket *mp_so, struct pkthdr *phdr, int *tlenp, struct mbuf * struct tseg_qent *p = NULL; struct tseg_qent *nq; struct tseg_qent *te = NULL; - u_int16_t qlimit; + uint32_t qlimit; /* * Limit the number of segments in the reassembly queue to prevent @@ -252,7 +238,7 @@ mptcp_reass(struct socket *mp_so, struct pkthdr *phdr, int *tlenp, struct mbuf * * queue. Always keep one global queue entry spare to be able to * process the missing segment. */ - qlimit = min(max(100, mp_so->so_rcv.sb_hiwat >> 10), + qlimit = MIN(MAX(100, mp_so->so_rcv.sb_hiwat >> 10), (tcp_autorcvbuf_max >> 10)); if (mb_dsn != mp_tp->mpt_rcvnxt && (mp_tp->mpt_reassqlen + 1) >= qlimit) { @@ -306,7 +292,8 @@ mptcp_reass(struct socket *mp_so, struct pkthdr *phdr, int *tlenp, struct mbuf * */ goto out; } - m_adj(m, i); + VERIFY(i <= INT_MAX); + m_adj(m, (int)i); *tlenp -= i; phdr->mp_dsn += i; } @@ -327,7 +314,9 @@ mptcp_reass(struct socket *mp_so, struct pkthdr *phdr, int *tlenp, struct mbuf * if (i < q->tqe_len) { q->tqe_m->m_pkthdr.mp_dsn += i; q->tqe_len -= i; - m_adj(q->tqe_m, i); + + VERIFY(i <= INT_MAX); + m_adj(q->tqe_m, (int)i); break; } @@ -425,7 +414,7 @@ fallback: * assume degraded flow as this may be the first packet * without DSS, and the subflow state is not updated yet. */ - if (sbappendstream_rcvdemux(mp_so, m, 0, 0)) { + if (sbappendstream_rcvdemux(mp_so, m)) { sorwakeup(mp_so); } @@ -508,7 +497,8 @@ fallback: prev = save = NULL; continue; } else { - m_adj(m, -todrop); + VERIFY(todrop <= INT_MAX); + m_adj(m, (int)-todrop); mb_datalen -= todrop; m->m_pkthdr.mp_rlen -= todrop; } @@ -539,10 +529,12 @@ fallback: prev = save = NULL; continue; } else { - m_adj(m, (mp_tp->mpt_rcvnxt - mb_dsn)); + VERIFY((mp_tp->mpt_rcvnxt - mb_dsn) <= INT_MAX); + m_adj(m, (int)(mp_tp->mpt_rcvnxt - mb_dsn)); mb_datalen -= (mp_tp->mpt_rcvnxt - mb_dsn); mb_dsn = mp_tp->mpt_rcvnxt; - m->m_pkthdr.mp_rlen = mb_datalen; + VERIFY(mb_datalen >= 0 && mb_datalen <= USHRT_MAX); + m->m_pkthdr.mp_rlen = (uint16_t)mb_datalen; m->m_pkthdr.mp_dsn = mb_dsn; } } @@ -557,7 +549,7 @@ fallback: mptcp_sbrcv_grow(mp_tp); - if (sbappendstream_rcvdemux(mp_so, m, 0, 0)) { + if (sbappendstream_rcvdemux(mp_so, m)) { wakeup = 1; } @@ -683,8 +675,7 @@ mptcp_output(struct mptses *mpte) * 2. send buffer is filled to 7/8th with data (so we actually * have data to make use of it); */ - if (tcp_do_autosendbuf == 1 && - (mp_so->so_snd.sb_flags & (SB_AUTOSIZE | SB_TRIM)) == SB_AUTOSIZE && + if ((mp_so->so_snd.sb_flags & (SB_AUTOSIZE | SB_TRIM)) == SB_AUTOSIZE && tcp_cansbgrow(&mp_so->so_snd)) { if ((mp_tp->mpt_sndwnd / 4 * 5) >= mp_so->so_snd.sb_hiwat && mp_so->so_snd.sb_cc >= (mp_so->so_snd.sb_hiwat / 8 * 7)) { @@ -1224,39 +1215,16 @@ mptcp_input_preproc(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, return 0; } -/* - * MPTCP Checksum support - * The checksum is calculated whenever the MPTCP DSS option is included - * in the TCP packet. The checksum includes the sum of the MPTCP psuedo - * header and the actual data indicated by the length specified in the - * DSS option. - */ - -int -mptcp_validate_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, - uint32_t sseq, uint16_t dlen, uint16_t csum, uint16_t dfin) -{ - uint16_t mptcp_csum; - - mptcp_csum = mptcp_input_csum(tp, m, dsn, sseq, dlen, csum, dfin); - if (mptcp_csum) { - tp->t_mpflags |= TMPF_SND_MPFAIL; - mptcp_notify_mpfail(tp->t_inpcb->inp_socket); - m_freem(m); - tcpstat.tcps_mp_badcsum++; - return -1; - } - return 0; -} - static uint16_t mptcp_input_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, uint32_t sseq, - uint16_t dlen, uint16_t csum, uint16_t dfin) + uint16_t dlen, uint16_t csum, int dfin) { struct mptcb *mp_tp = tptomptp(tp); - uint16_t real_len = dlen - dfin; + int real_len = dlen - dfin; uint32_t sum = 0; + VERIFY(real_len >= 0); + if (mp_tp == NULL) { return 0; } @@ -1283,15 +1251,39 @@ mptcp_input_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, uint32_t sseq, sum += in_pseudo64(htonll(dsn), htonl(sseq), htons(dlen) + csum); ADDCARRY(sum); + DTRACE_MPTCP3(checksum__result, struct tcpcb *, tp, struct mbuf *, m, uint32_t, sum); - mptcplog((LOG_DEBUG, "%s: sum = %x \n", __func__, sum), - MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_VERBOSE); return ~sum & 0xffff; } -uint32_t +/* + * MPTCP Checksum support + * The checksum is calculated whenever the MPTCP DSS option is included + * in the TCP packet. The checksum includes the sum of the MPTCP psuedo + * header and the actual data indicated by the length specified in the + * DSS option. + */ + +int +mptcp_validate_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, + uint32_t sseq, uint16_t dlen, uint16_t csum, int dfin) +{ + uint16_t mptcp_csum; + + mptcp_csum = mptcp_input_csum(tp, m, dsn, sseq, dlen, csum, dfin); + if (mptcp_csum) { + tp->t_mpflags |= TMPF_SND_MPFAIL; + mptcp_notify_mpfail(tp->t_inpcb->inp_socket); + m_freem(m); + tcpstat.tcps_mp_badcsum++; + return -1; + } + return 0; +} + +uint16_t mptcp_output_csum(struct mbuf *m, uint64_t dss_val, uint32_t sseq, uint16_t dlen) { uint32_t sum = 0; @@ -1311,7 +1303,7 @@ mptcp_output_csum(struct mbuf *m, uint64_t dss_val, uint32_t sseq, uint16_t dlen mptcplog((LOG_DEBUG, "%s: sum = %x \n", __func__, sum), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); - return sum; + return (uint16_t)sum; } /* @@ -1399,8 +1391,9 @@ mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index, struct mptses *mpte = mptompte(mp); struct socket *mp_so; struct mptcb *mp_tp; - int locked = 0; uint32_t i, ifindex; + struct ifnet *ifp; + int locked = 0; ifindex = interface_index; VERIFY(ifindex != IFSCOPE_NONE); @@ -1426,8 +1419,13 @@ mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index, mp_tp = mpte->mpte_mptcb; - os_log_info(mptcp_log_handle, "%s - %lx: action: %u ifindex %u usecount %u mpt_flags %#x state %u v4 %u v6 %u nat64 %u power %u\n", + ifnet_head_lock_shared(); + ifp = ifindex2ifnet[ifindex]; + ifnet_head_done(); + + os_log(mptcp_log_handle, "%s - %lx: action: %u ifindex %u delegated to %u usecount %u mpt_flags %#x state %u v4 %u v6 %u nat64 %u power %u\n", __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), action, ifindex, + ifp && ifp->if_delegated.ifp ? ifp->if_delegated.ifp->if_index : IFSCOPE_NONE, mp->mpp_socket->so_usecount, mp_tp->mpt_flags, mp_tp->mpt_state, has_v4, has_v6, has_nat64, low_power); @@ -1460,11 +1458,6 @@ mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index, action == NECP_CLIENT_CBACTION_INITIAL) { int found_slot = 0, slot_index = -1; struct sockaddr *dst; - struct ifnet *ifp; - - ifnet_head_lock_shared(); - ifp = ifindex2ifnet[ifindex]; - ifnet_head_done(); if (ifp == NULL) { goto out; @@ -1527,6 +1520,7 @@ mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index, if (dst && (dst->sa_family == AF_INET || dst->sa_family == 0) && has_v6 && !has_nat64 && !has_v4) { if (found_slot) { + mpte->mpte_itfinfo[slot_index].ifindex = ifindex; mpte->mpte_itfinfo[slot_index].has_v4_conn = has_v4; mpte->mpte_itfinfo[slot_index].has_v6_conn = has_v6; mpte->mpte_itfinfo[slot_index].has_nat64_conn = has_nat64; diff --git a/bsd/netinet/mptcp_opt.c b/bsd/netinet/mptcp_opt.c index 6b63ab6e0..2767e5636 100644 --- a/bsd/netinet/mptcp_opt.c +++ b/bsd/netinet/mptcp_opt.c @@ -202,7 +202,7 @@ mptcp_send_mpfail(struct tcpcb *tp, u_char *opt, unsigned int optlen) struct mptcb *mp_tp = NULL; struct mptcp_mpfail_opt fail_opt; uint64_t dsn; - int len = sizeof(struct mptcp_mpfail_opt); + uint8_t len = sizeof(struct mptcp_mpfail_opt); mp_tp = tptomptp(tp); if (mp_tp == NULL) { @@ -235,11 +235,11 @@ mptcp_send_mpfail(struct tcpcb *tp, u_char *opt, unsigned int optlen) static int mptcp_send_infinite_mapping(struct tcpcb *tp, u_char *opt, unsigned int optlen) { + struct socket *so = tp->t_inpcb->inp_socket; + uint8_t len = sizeof(struct mptcp_dsn_opt); struct mptcp_dsn_opt infin_opt; struct mptcb *mp_tp = NULL; - size_t len = sizeof(struct mptcp_dsn_opt); - struct socket *so = tp->t_inpcb->inp_socket; - int csum_len = 0; + uint8_t csum_len = 0; if (!so) { return optlen; @@ -334,7 +334,7 @@ mptcp_ok_to_fin(struct tcpcb *tp, u_int64_t dsn, u_int32_t datalen) unsigned int mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, unsigned int optlen, int flags, int len, - boolean_t *p_mptcp_acknow) + boolean_t *p_mptcp_acknow, boolean_t *do_not_compress) { struct inpcb *inp = (struct inpcb *)tp->t_inpcb; struct socket *so = inp->inp_socket; @@ -374,6 +374,9 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, } else if (!(tp->t_mpflags & TMPF_INFIN_SENT)) { optlen = mptcp_send_infinite_mapping(tp, opt, optlen); } + + *do_not_compress = TRUE; + goto ret_optlen; } @@ -405,11 +408,14 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, } else { /* its a retransmission of the MP_CAPABLE ACK */ } + + *do_not_compress = TRUE; + goto ret_optlen; } if (tp->t_mpflags & TMPF_SND_JACK) { - /* Do the ACK part */ + *do_not_compress = TRUE; optlen = mptcp_setup_join_ack_opts(tp, opt, optlen); if (!tp->t_mpuna) { tp->t_mpuna = tp->snd_una; @@ -422,7 +428,7 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, goto ret_optlen; } - if (!(tp->t_mpflags & TMPF_MPTCP_TRUE)) { + if (!(tp->t_mpflags & (TMPF_MPTCP_TRUE | TMPF_PREESTABLISHED))) { goto ret_optlen; } /* @@ -440,10 +446,14 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, } else { tp->t_mpflags &= ~TMPF_SND_REM_ADDR; } + + *do_not_compress = TRUE; } if (tp->t_mpflags & TMPF_SND_MPPRIO) { optlen = mptcp_snd_mpprio(tp, opt, optlen); + + *do_not_compress = TRUE; } if (mp_tp->mpt_flags & MPTCPF_SND_64BITDSN) { @@ -497,7 +507,7 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, * XXX If this delay causes issue, remove the 2-byte padding. */ struct mptcp_dss64_ack32_opt dsn_ack_opt; - unsigned int dssoptlen = sizeof(dsn_ack_opt); + uint8_t dssoptlen = sizeof(dsn_ack_opt); uint16_t dss_csum; if (do_csum) { @@ -545,12 +555,11 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, } optlen += dssoptlen; - mptcplog((LOG_DEBUG, "%s: long DSS = %llx ACK = %llx \n", __func__, - mptcp_ntoh64(dsn_ack_opt.mdss_dsn), - mptcp_ntoh64(dsn_ack_opt.mdss_ack)), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_LOG); tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW; + + *do_not_compress = TRUE; + goto ret_optlen; } @@ -558,7 +567,7 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, (!send_64bit_dsn) && !(tp->t_mpflags & TMPF_MPTCP_ACKNOW)) { struct mptcp_dsn_opt dsn_opt; - unsigned int dssoptlen = sizeof(struct mptcp_dsn_opt); + uint8_t dssoptlen = sizeof(struct mptcp_dsn_opt); uint16_t dss_csum; if (do_csum) { @@ -599,6 +608,9 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, optlen += dssoptlen; tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW; + + *do_not_compress = TRUE; + goto ret_optlen; } @@ -608,7 +620,7 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, !(tp->t_mpflags & TMPF_SEND_DSN) && !(tp->t_mpflags & TMPF_SEND_DFIN)) { struct mptcp_data_ack_opt dack_opt; - unsigned int dssoptlen = 0; + uint8_t dssoptlen = 0; do_ack32_only: dssoptlen = sizeof(dack_opt); @@ -634,7 +646,7 @@ do_ack32_only: !(tp->t_mpflags & TMPF_SEND_DSN) && !(tp->t_mpflags & TMPF_SEND_DFIN)) { struct mptcp_data_ack64_opt dack_opt; - unsigned int dssoptlen = 0; + uint8_t dssoptlen = 0; do_ack64_only: dssoptlen = sizeof(dack_opt); @@ -664,7 +676,7 @@ do_ack64_only: (!send_64bit_ack) && (tp->t_mpflags & TMPF_MPTCP_ACKNOW)) { struct mptcp_dss_ack_opt dss_ack_opt; - unsigned int dssoptlen = sizeof(dss_ack_opt); + uint8_t dssoptlen = sizeof(dss_ack_opt); uint16_t dss_csum; if (do_csum) { @@ -721,7 +733,7 @@ do_ack64_only: (send_64bit_ack) && (tp->t_mpflags & TMPF_MPTCP_ACKNOW)) { struct mptcp_dss32_ack64_opt dss_ack_opt; - unsigned int dssoptlen = sizeof(dss_ack_opt); + uint8_t dssoptlen = sizeof(dss_ack_opt); uint16_t dss_csum; if (do_csum) { @@ -769,11 +781,14 @@ do_ack64_only: panic("optlen too large"); } tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW; + + *do_not_compress = TRUE; + goto ret_optlen; } if (tp->t_mpflags & TMPF_SEND_DFIN) { - unsigned int dssoptlen = sizeof(struct mptcp_dss_ack_opt); + uint8_t dssoptlen = sizeof(struct mptcp_dss_ack_opt); struct mptcp_dss_ack_opt dss_ack_opt; uint16_t dss_csum; @@ -821,11 +836,12 @@ do_ack64_only: } optlen += dssoptlen; + + *do_not_compress = TRUE; } ret_optlen: if (TRUE == *p_mptcp_acknow) { - VERIFY(old_mpt_flags != 0); u_int32_t new_mpt_flags = tp->t_mpflags & TMPF_MPTCP_SIGNALS; /* @@ -1058,6 +1074,8 @@ mptcp_data_ack_rcvd(struct mptcb *mp_tp, struct tcpcb *tp, u_int64_t full_dack) { uint64_t acked = full_dack - mp_tp->mpt_snduna; + VERIFY(acked <= INT_MAX); + if (acked) { struct socket *mp_so = mptetoso(mp_tp->mpt_mpte); @@ -1073,7 +1091,7 @@ mptcp_data_ack_rcvd(struct mptcb *mp_tp, struct tcpcb *tp, u_int64_t full_dack) sbdrop(&mp_so->so_snd, (int)mp_so->so_snd.sb_cc); } else { - sbdrop(&mp_so->so_snd, acked); + sbdrop(&mp_so->so_snd, (int)acked); } mp_tp->mpt_snduna += acked; diff --git a/bsd/netinet/mptcp_opt.h b/bsd/netinet/mptcp_opt.h index 5ca0e32e8..2eced99bb 100644 --- a/bsd/netinet/mptcp_opt.h +++ b/bsd/netinet/mptcp_opt.h @@ -40,7 +40,7 @@ extern unsigned mptcp_setup_syn_opts(struct socket *, u_char*, unsigned); extern unsigned mptcp_setup_join_ack_opts(struct tcpcb *, u_char*, unsigned); extern unsigned int mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, unsigned int optlen, int flags, int len, - boolean_t *p_mptcp_acknow); + boolean_t *p_mptcp_acknow, boolean_t *do_not_compress); extern void mptcp_update_dss_rcv_state(struct mptcp_dsn_opt *, struct tcpcb *, uint16_t); extern void mptcp_update_rcv_state_meat(struct mptcb *, struct tcpcb *, diff --git a/bsd/netinet/mptcp_subr.c b/bsd/netinet/mptcp_subr.c index 356298cf7..c253fc4f8 100644 --- a/bsd/netinet/mptcp_subr.c +++ b/bsd/netinet/mptcp_subr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 Apple Inc. All rights reserved. + * Copyright (c) 2012-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -64,10 +64,8 @@ #include #include #include -#if INET6 #include #include -#endif /* INET6 */ #include /* @@ -118,7 +116,7 @@ static int mptcp_subflow_soreceive(struct socket *, struct sockaddr **, static int mptcp_subflow_sosend(struct socket *, struct sockaddr *, struct uio *, struct mbuf *, struct mbuf *, int); static void mptcp_subflow_wupcall(struct socket *, void *, int); -static void mptcp_subflow_eupcall1(struct socket *, void *, uint32_t); +static void mptcp_subflow_eupcall1(struct socket *so, void *arg, long events); static void mptcp_update_last_owner(struct socket *so, struct socket *mp_so); static void mptcp_drop_tfo_data(struct mptses *, struct mptsub *); @@ -142,30 +140,26 @@ typedef enum { MPTS_EVRET_DISCONNECT_FALLBACK = 4, /* abort all but preferred */ } ev_ret_t; -static ev_ret_t mptcp_subflow_propagate_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); -static ev_ret_t mptcp_subflow_nosrcaddr_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); -static ev_ret_t mptcp_subflow_failover_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); -static ev_ret_t mptcp_subflow_ifdenied_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); -static ev_ret_t mptcp_subflow_connected_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); -static ev_ret_t mptcp_subflow_disconnected_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); -static ev_ret_t mptcp_subflow_mpstatus_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); -static ev_ret_t mptcp_subflow_mustrst_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); -static ev_ret_t mptcp_subflow_mpcantrcvmore_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); -static ev_ret_t mptcp_subflow_mpsuberror_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); -static ev_ret_t mptcp_subflow_adaptive_rtimo_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); -static ev_ret_t mptcp_subflow_adaptive_wtimo_ev(struct mptses *, struct mptsub *, uint64_t *, uint64_t); +static ev_ret_t mptcp_subflow_propagate_ev(struct mptses *, struct mptsub *, long *, long); +static ev_ret_t mptcp_subflow_nosrcaddr_ev(struct mptses *, struct mptsub *, long *, long); +static ev_ret_t mptcp_subflow_failover_ev(struct mptses *, struct mptsub *, long *, long); +static ev_ret_t mptcp_subflow_ifdenied_ev(struct mptses *, struct mptsub *, long *, long); +static ev_ret_t mptcp_subflow_connected_ev(struct mptses *, struct mptsub *, long *, long); +static ev_ret_t mptcp_subflow_disconnected_ev(struct mptses *, struct mptsub *, long *, long); +static ev_ret_t mptcp_subflow_mpstatus_ev(struct mptses *, struct mptsub *, long *, long); +static ev_ret_t mptcp_subflow_mustrst_ev(struct mptses *, struct mptsub *, long *, long); +static ev_ret_t mptcp_subflow_mpcantrcvmore_ev(struct mptses *, struct mptsub *, long *, long); +static ev_ret_t mptcp_subflow_mpsuberror_ev(struct mptses *, struct mptsub *, long *, long); +static ev_ret_t mptcp_subflow_adaptive_rtimo_ev(struct mptses *, struct mptsub *, long *, long); +static ev_ret_t mptcp_subflow_adaptive_wtimo_ev(struct mptses *, struct mptsub *, long *, long); static void mptcp_do_sha1(mptcp_key_t *, char *); static void mptcp_init_local_parms(struct mptses *); -static unsigned int mptsub_zone_size; /* size of mptsub */ -static struct zone *mptsub_zone; /* zone for mptsub */ - -static unsigned int mptopt_zone_size; /* size of mptopt */ -static struct zone *mptopt_zone; /* zone for mptopt */ - -static unsigned int mpt_subauth_entry_size; /* size of subf auth entry */ -static struct zone *mpt_subauth_zone; /* zone of subf auth entry */ +static ZONE_DECLARE(mptsub_zone, "mptsub", sizeof(struct mptsub), ZC_ZFREE_CLEARMEM); +static ZONE_DECLARE(mptopt_zone, "mptopt", sizeof(struct mptopt), ZC_ZFREE_CLEARMEM); +static ZONE_DECLARE(mpt_subauth_zone, "mptauth", + sizeof(struct mptcp_subf_auth_entry), ZC_NONE); struct mppcbinfo mtcbinfo; @@ -191,20 +185,18 @@ SYSCTL_INT(_net_inet_mptcp, OID_AUTO, alternate_port, CTLFLAG_RW | CTLFLAG_LOCKE static struct protosw mptcp_subflow_protosw; static struct pr_usrreqs mptcp_subflow_usrreqs; -#if INET6 static struct ip6protosw mptcp_subflow_protosw6; static struct pr_usrreqs mptcp_subflow_usrreqs6; -#endif /* INET6 */ static uint8_t mptcp_create_subflows_scheduled; typedef struct mptcp_subflow_event_entry { - uint64_t sofilt_hint_mask; - ev_ret_t (*sofilt_hint_ev_hdlr)( + long sofilt_hint_mask; + ev_ret_t (*sofilt_hint_ev_hdlr)( struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, - uint64_t event); + long *p_mpsofilt_hint, + long event); } mptsub_ev_entry_t; /* Using Symptoms Advisory to detect poor WiFi or poor Cell */ @@ -289,9 +281,7 @@ mptcp_init(struct protosw *pp, struct domain *dp) #pragma unused(dp) static int mptcp_initialized = 0; struct protosw *prp; -#if INET6 struct ip6protosw *prp6; -#endif /* INET6 */ VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); @@ -328,7 +318,6 @@ mptcp_init(struct protosw *pp, struct domain *dp) mptcp_subflow_protosw.pr_filter_head.tqh_last = (struct socket_filter **)(uintptr_t)0xdeadbeefdeadbeef; -#if INET6 prp6 = (struct ip6protosw *)pffindproto_locked(PF_INET6, IPPROTO_TCP, SOCK_STREAM); VERIFY(prp6 != NULL); @@ -350,18 +339,12 @@ mptcp_init(struct protosw *pp, struct domain *dp) (struct socket_filter *)(uintptr_t)0xdeadbeefdeadbeef; mptcp_subflow_protosw6.pr_filter_head.tqh_last = (struct socket_filter **)(uintptr_t)0xdeadbeefdeadbeef; -#endif /* INET6 */ bzero(&mtcbinfo, sizeof(mtcbinfo)); TAILQ_INIT(&mtcbinfo.mppi_pcbs); mtcbinfo.mppi_size = sizeof(struct mpp_mtp); - if ((mtcbinfo.mppi_zone = zinit(mtcbinfo.mppi_size, - 1024 * mtcbinfo.mppi_size, 8192, "mptcb")) == NULL) { - panic("%s: unable to allocate MPTCP PCB zone\n", __func__); - /* NOTREACHED */ - } - zone_change(mtcbinfo.mppi_zone, Z_CALLERACCT, FALSE); - zone_change(mtcbinfo.mppi_zone, Z_EXPAND, TRUE); + mtcbinfo.mppi_zone = zone_create("mptc", mtcbinfo.mppi_size, + ZC_NONE); mtcbinfo.mppi_lock_grp_attr = lck_grp_attr_alloc_init(); mtcbinfo.mppi_lock_grp = lck_grp_alloc_init("mppcb", @@ -376,39 +359,11 @@ mptcp_init(struct protosw *pp, struct domain *dp) /* attach to MP domain for garbage collection to take place */ mp_pcbinfo_attach(&mtcbinfo); - mptsub_zone_size = sizeof(struct mptsub); - if ((mptsub_zone = zinit(mptsub_zone_size, 1024 * mptsub_zone_size, - 8192, "mptsub")) == NULL) { - panic("%s: unable to allocate MPTCP subflow zone\n", __func__); - /* NOTREACHED */ - } - zone_change(mptsub_zone, Z_CALLERACCT, FALSE); - zone_change(mptsub_zone, Z_EXPAND, TRUE); - - mptopt_zone_size = sizeof(struct mptopt); - if ((mptopt_zone = zinit(mptopt_zone_size, 128 * mptopt_zone_size, - 1024, "mptopt")) == NULL) { - panic("%s: unable to allocate MPTCP option zone\n", __func__); - /* NOTREACHED */ - } - zone_change(mptopt_zone, Z_CALLERACCT, FALSE); - zone_change(mptopt_zone, Z_EXPAND, TRUE); - - mpt_subauth_entry_size = sizeof(struct mptcp_subf_auth_entry); - if ((mpt_subauth_zone = zinit(mpt_subauth_entry_size, - 1024 * mpt_subauth_entry_size, 8192, "mptauth")) == NULL) { - panic("%s: unable to allocate MPTCP address auth zone \n", - __func__); - /* NOTREACHED */ - } - zone_change(mpt_subauth_zone, Z_CALLERACCT, FALSE); - zone_change(mpt_subauth_zone, Z_EXPAND, TRUE); - mptcp_log_handle = os_log_create("com.apple.xnu.net.mptcp", "mptcp"); } int -mptcpstats_get_index_by_ifindex(struct mptcp_itf_stats *stats, int ifindex, boolean_t create) +mptcpstats_get_index_by_ifindex(struct mptcp_itf_stats *stats, u_short ifindex, boolean_t create) { int i, index = -1; @@ -521,8 +476,8 @@ mptcp_session_create(struct mppcb *mpp) mpte->mpte_itfinfo = &mpte->_mpte_itfinfo[0]; mpte->mpte_itfinfo_size = MPTE_ITFINFO_SIZE; - if (mptcp_alternate_port) { - mpte->mpte_alternate_port = htons(mptcp_alternate_port); + if (mptcp_alternate_port > 0 && mptcp_alternate_port < UINT16_MAX) { + mpte->mpte_alternate_port = htons((uint16_t)mptcp_alternate_port); } mpte->mpte_last_cellicon_set = tcp_now; @@ -765,7 +720,6 @@ mptcp_synthesize_nat64(struct in6_addr *addr, uint32_t len, 0x00, 0x00, 0x00, 0x00}, }; const char *ptrv4 = (const char *)addrv4; - char buf[MAX_IPv6_STR_LEN]; char *ptr = (char *)addr; if (IN_ZERONET(ntohl(addrv4->s_addr)) || // 0.0.0.0/8 Source hosts on local network @@ -813,10 +767,6 @@ mptcp_synthesize_nat64(struct in6_addr *addr, uint32_t len, panic("NAT64-prefix len is wrong: %u\n", len); } - os_log_info(mptcp_log_handle, "%s: nat64prefix-len %u synthesized %s\n", - __func__, len, - inet_ntop(AF_INET6, (void *)addr, buf, sizeof(buf))); - return 0; } @@ -865,6 +815,60 @@ mptcp_subflow_disconnecting(struct mptsub *mpts) return false; } +/* + * In Handover mode, only create cell subflow if + * - Symptoms marked WiFi as weak: + * Here, if we are sending data, then we can check the RTO-state. That is a + * stronger signal of WiFi quality than the Symptoms indicator. + * If however we are not sending any data, the only thing we can do is guess + * and thus bring up Cell. + * + * - Symptoms marked WiFi as unknown: + * In this state we don't know what the situation is and thus remain + * conservative, only bringing up cell if there are retransmissions going on. + */ +static boolean_t +mptcp_handover_use_cellular(struct mptses *mpte, struct tcpcb *tp) +{ + int unusable_state = mptcp_is_wifi_unusable_for_session(mpte); + + if (unusable_state == 0) { + /* WiFi is good - don't use cell */ + return false; + } + + if (unusable_state == -1) { + /* + * We are in unknown state, only use Cell if we have confirmed + * that WiFi is bad. + */ + if (mptetoso(mpte)->so_snd.sb_cc != 0 && tp->t_rxtshift >= mptcp_fail_thresh * 2) { + return true; + } else { + return false; + } + } + + if (unusable_state == 1) { + /* + * WiFi is confirmed to be bad from Symptoms-Framework. + * If we are sending data, check the RTOs. + * Otherwise, be pessimistic and use Cell. + */ + if (mptetoso(mpte)->so_snd.sb_cc != 0) { + if (tp->t_rxtshift >= mptcp_fail_thresh * 2) { + return true; + } else { + return false; + } + } else { + return true; + } + } + + return false; +} + void mptcp_check_subflows_and_add(struct mptses *mpte) { @@ -939,32 +943,13 @@ mptcp_check_subflows_and_add(struct mptses *mpte) need_to_ask_symptoms = TRUE; } - /* - * In Handover mode, only create cell subflow if - * 1. Wi-Fi Assist is active - * 2. Symptoms marked WiFi as weak - * 3. We are experiencing RTOs or we are not sending data. - * - * This covers the scenario, where: - * 1. We send and get retransmission timeouts (thus, - * we confirmed that WiFi is indeed bad). - * 2. We are not sending and the server tries to send. - * Establshing a cell-subflow gives the server a - * chance to send us some data over cell if WiFi - * is dead. We establish the subflow with the - * backup-bit set, so the server is not allowed to - * send on this subflow as long as WiFi is providing - * good performance. - */ - if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER && - !IFNET_IS_CELLULAR(subifp) && - !mptcp_subflow_disconnecting(mpts) && - (mptcp_is_wifi_unusable_for_session(mpte) == 0 || - (tp->t_rxtshift < mptcp_fail_thresh * 2 && mptetoso(mpte)->so_snd.sb_cc))) { - os_log_debug(mptcp_log_handle, - "%s - %lx: handover, wifi state %d rxt %u first-party %u sb_cc %u ifindex %u this %u rtt %u rttvar %u rto %u\n", + if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER) { + os_log(mptcp_log_handle, + "%s - %lx: handover: cell %u wifi-state %d flags %#x rxt %u first-party %u sb_cc %u ifindex %u this %u rtt %u rttvar %u rto %u\n", __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), + IFNET_IS_CELLULAR(subifp), mptcp_is_wifi_unusable_for_session(mpte), + mpts->mpts_flags, tp->t_rxtshift, !!(mpte->mpte_flags & MPTE_FIRSTPARTY), mptetoso(mpte)->so_snd.sb_cc, @@ -972,11 +957,17 @@ mptcp_check_subflows_and_add(struct mptses *mpte) tp->t_srtt >> TCP_RTT_SHIFT, tp->t_rttvar >> TCP_RTTVAR_SHIFT, tp->t_rxtcur); - found = TRUE; - /* We found a proper subflow on WiFi - no need for cell */ - want_cellular = FALSE; - break; + if (!IFNET_IS_CELLULAR(subifp) && + !mptcp_subflow_disconnecting(mpts) && + (mpts->mpts_flags & MPTSF_CONNECTED) && + !mptcp_handover_use_cellular(mpte, tp)) { + found = TRUE; + + /* We found a proper subflow on WiFi - no need for cell */ + want_cellular = FALSE; + break; + } } else if (mpte->mpte_svctype == MPTCP_SVCTYPE_TARGET_BASED) { uint64_t time_now = mach_continuous_time(); @@ -997,16 +988,6 @@ mptcp_check_subflows_and_add(struct mptses *mpte) want_cellular = FALSE; break; } - } else { - os_log_debug(mptcp_log_handle, - "%s - %lx: svc %u cell %u flags %#x unusable %d rtx %u first %u sbcc %u rtt %u rttvar %u rto %u\n", - __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), - mpte->mpte_svctype, IFNET_IS_CELLULAR(subifp), mpts->mpts_flags, - mptcp_is_wifi_unusable_for_session(mpte), tp->t_rxtshift, - !!(mpte->mpte_flags & MPTE_FIRSTPARTY), mptetoso(mpte)->so_snd.sb_cc, - tp->t_srtt >> TCP_RTT_SHIFT, - tp->t_rttvar >> TCP_RTTVAR_SHIFT, - tp->t_rxtcur); } if (subifp->if_index == ifindex && @@ -1060,7 +1041,7 @@ mptcp_check_subflows_and_add(struct mptses *mpte) nat64prefixes[j].prefix_len, &((struct sockaddr_in *)(void *)dst)->sin_addr); if (error != 0) { - os_log_info(mptcp_log_handle, "%s - %lx: cannot synthesize this addr\n", + os_log_error(mptcp_log_handle, "%s - %lx: cannot synthesize this addr\n", __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte)); continue; } @@ -1174,14 +1155,9 @@ mptcp_handover_subflows_remove(struct mptses *mpte) os_log_debug(mptcp_log_handle, "%s - %lx: rxt %u sb_cc %u unusable %d\n", __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), tp->t_rxtshift, mptetoso(mpte)->so_snd.sb_cc, wifi_unusable); - /* Is this subflow in good condition? */ - if (tp->t_rxtshift == 0 && mptetoso(mpte)->so_snd.sb_cc) { - found_working_subflow = true; - } - - /* Or WiFi is fine */ - if (!wifi_unusable) { + if (!mptcp_handover_use_cellular(mpte, tp)) { found_working_subflow = true; + break; } } @@ -1380,17 +1356,9 @@ mptcp_sched_create_subflows(struct mptses *mpte) * Allocate an MPTCP socket option structure. */ struct mptopt * -mptcp_sopt_alloc(int how) +mptcp_sopt_alloc(zalloc_flags_t how) { - struct mptopt *mpo; - - mpo = (how == M_WAITOK) ? zalloc(mptopt_zone) : - zalloc_noblock(mptopt_zone); - if (mpo != NULL) { - bzero(mpo, mptopt_zone_size); - } - - return mpo; + return zalloc_flags(mptopt_zone, how | Z_ZERO); } /* @@ -1452,14 +1420,7 @@ mptcp_sopt_find(struct mptses *mpte, struct sockopt *sopt) static struct mptsub * mptcp_subflow_alloc(void) { - struct mptsub *mpts = zalloc(mptsub_zone); - - if (mpts == NULL) { - return NULL; - } - - bzero(mpts, mptsub_zone_size); - return mpts; + return zalloc_flags(mptsub_zone, Z_WAITOK | Z_ZERO); } /* @@ -1612,6 +1573,7 @@ mptcp_subflow_socreate(struct mptses *mpte, struct mptsub *mpts, int dom, os_log_error(mptcp_log_handle, "%s - %lx: Couldn't find proc for pid %u\n", __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), mp_so->last_pid); + mptcp_subflow_free(mpts); return ESRCH; } @@ -1811,11 +1773,9 @@ mptcp_subflow_socreate(struct mptses *mpte, struct mptsub *mpts, int dom, case PF_INET: (*so)->so_proto = &mptcp_subflow_protosw; break; -#if INET6 case PF_INET6: (*so)->so_proto = (struct protosw *)&mptcp_subflow_protosw6; break; -#endif /* INET6 */ default: VERIFY(0); /* NOTREACHED */ @@ -1911,7 +1871,7 @@ mptcp_subflow_soconnectx(struct mptses *mpte, struct mptsub *mpts) dport = ntohs(SIN6(dst)->sin6_port); } - os_log_info(mptcp_log_handle, + os_log(mptcp_log_handle, "%s - %lx: ifindex %u dst %s:%d pended %u\n", __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), mpts->mpts_ifscope, dbuf, dport, !!(mpts->mpts_flags & MPTSF_CONNECT_PENDING)); @@ -1983,7 +1943,9 @@ mptcp_adj_rmap(struct socket *so, struct mbuf *m, int off, uint64_t dsn, } m->m_pkthdr.mp_dsn += off; m->m_pkthdr.mp_rseq += off; - m->m_pkthdr.mp_rlen = m->m_pkthdr.len; + + VERIFY(m_pktlen(m) < UINT16_MAX); + m->m_pkthdr.mp_rlen = (uint16_t)m_pktlen(m); } else { if (!(mpts->mpts_flags & MPTSF_FULLY_ESTABLISHED)) { /* data arrived without an DSS option mapping */ @@ -2001,7 +1963,9 @@ mptcp_adj_rmap(struct socket *so, struct mbuf *m, int off, uint64_t dsn, m->m_pkthdr.pkt_flags |= PKTF_MPTCP; m->m_pkthdr.mp_dsn = dsn + off; m->m_pkthdr.mp_rseq = rseq + off; - m->m_pkthdr.mp_rlen = m->m_pkthdr.len; + + VERIFY(m_pktlen(m) < UINT16_MAX); + m->m_pkthdr.mp_rlen = (uint16_t)m_pktlen(m); } } @@ -2339,7 +2303,7 @@ mptcp_subflow_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgsnd); - error = sosendcheck(so, NULL, top->m_pkthdr.len, 0, 1, 0, &sblocked, NULL); + error = sosendcheck(so, NULL, top->m_pkthdr.len, 0, 1, 0, &sblocked); if (error) { goto out; } @@ -2943,7 +2907,7 @@ mptcp_subflow_output(struct mptses *mpte, struct mptsub *mpts, int flags) struct tcpcb *tp; uint64_t mpt_dsn = 0, off = 0; int sb_cc = 0, error = 0, wakeup = 0; - uint32_t dss_csum; + uint16_t dss_csum; uint16_t tot_sent = 0; boolean_t reinjected = FALSE; @@ -3166,15 +3130,15 @@ dont_reinject: head = tail = NULL; while (tot_sent < sb_cc) { - ssize_t mlen; + int32_t mlen; mlen = mpt_mbuf->m_len; mlen -= off; - mlen = min(mlen, sb_cc - tot_sent); + mlen = MIN(mlen, sb_cc - tot_sent); if (mlen < 0) { os_log_error(mptcp_log_handle, "%s - %lx: mlen %d mp_rlen %u off %u sb_cc %u tot_sent %u\n", - __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), (int)mlen, mpt_mbuf->m_pkthdr.mp_rlen, + __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), mlen, mpt_mbuf->m_pkthdr.mp_rlen, (uint32_t)off, sb_cc, tot_sent); goto out; } @@ -3386,8 +3350,8 @@ mptcp_add_reinjectq(struct mptses *mpte, struct mbuf *m) struct mbuf *tmp = n->m_nextpkt; mptcplog((LOG_DEBUG, "%s m is covering that guy dsn %u len %u dsn %u len %u\n", - __func__, m->m_pkthdr.mp_dsn, m->m_pkthdr.mp_rlen, - n->m_pkthdr.mp_dsn, n->m_pkthdr.mp_rlen), + __func__, (uint32_t)m->m_pkthdr.mp_dsn, m->m_pkthdr.mp_rlen, + (uint32_t)n->m_pkthdr.mp_dsn, n->m_pkthdr.mp_rlen), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); m->m_nextpkt = NULL; @@ -3406,7 +3370,7 @@ mptcp_add_reinjectq(struct mptses *mpte, struct mbuf *m) /* m is already fully covered by the previous mbuf in the queue */ if (prev->m_pkthdr.mp_dsn + prev->m_pkthdr.mp_rlen >= m->m_pkthdr.mp_dsn + m->m_pkthdr.len) { mptcplog((LOG_DEBUG, "%s prev covers us from %u with len %u\n", - __func__, prev->m_pkthdr.mp_dsn, prev->m_pkthdr.mp_rlen), + __func__, (uint32_t)prev->m_pkthdr.mp_dsn, prev->m_pkthdr.mp_rlen), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); goto dont_queue; } @@ -3607,7 +3571,7 @@ mptcp_clean_reinjectq(struct mptses *mpte) * Subflow socket control event upcall. */ static void -mptcp_subflow_eupcall1(struct socket *so, void *arg, uint32_t events) +mptcp_subflow_eupcall1(struct socket *so, void *arg, long events) { #pragma unused(so) struct mptsub *mpts = arg; @@ -3636,7 +3600,7 @@ mptcp_subflow_eupcall1(struct socket *so, void *arg, uint32_t events) */ static ev_ret_t mptcp_subflow_events(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint) + long *p_mpsofilt_hint) { ev_ret_t ret = MPTS_EVRET_OK; int i, mpsub_ev_entry_count = sizeof(mpsub_ev_entry_tbl) / @@ -3657,10 +3621,6 @@ mptcp_subflow_events(struct mptses *mpte, struct mptsub *mpts, DTRACE_MPTCP3(subflow__events, struct mptses *, mpte, struct mptsub *, mpts, uint32_t, mpts->mpts_evctl); - mptcplog((LOG_DEBUG, "%s cid %d events=%b\n", __func__, - mpts->mpts_connid, mpts->mpts_evctl, SO_FILT_HINT_BITS), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE); - /* * Process all the socket filter hints and reset the hint * once it is handled @@ -3680,28 +3640,12 @@ mptcp_subflow_events(struct mptses *mpte, struct mptsub *mpts, } } - /* - * We should be getting only events specified via sock_catchevents(), - * so loudly complain if we have any unprocessed one(s). - */ - if (mpts->mpts_evctl || ret < MPTS_EVRET_OK) { - mptcplog((LOG_WARNING, "%s%s: cid %d evret %d unhandled events=%b\n", __func__, - (mpts->mpts_evctl && ret == MPTS_EVRET_OK) ? "MPTCP_ERROR " : "", - mpts->mpts_connid, - ret, mpts->mpts_evctl, SO_FILT_HINT_BITS), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); - } else { - mptcplog((LOG_DEBUG, "%s: Done, events %b\n", __func__, - mpts->mpts_evctl, SO_FILT_HINT_BITS), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE); - } - return ret; } static ev_ret_t mptcp_subflow_propagate_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { struct socket *mp_so, *so; struct mptcb *mp_tp; @@ -3710,10 +3654,6 @@ mptcp_subflow_propagate_ev(struct mptses *mpte, struct mptsub *mpts, mp_tp = mpte->mpte_mptcb; so = mpts->mpts_socket; - mptcplog((LOG_DEBUG, "%s: cid %d event %d\n", __func__, - mpts->mpts_connid, event), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); - /* * We got an event for this subflow that might need to be propagated, * based on the state of the MPTCP connection. @@ -3733,7 +3673,7 @@ mptcp_subflow_propagate_ev(struct mptses *mpte, struct mptsub *mpts, */ static ev_ret_t mptcp_subflow_nosrcaddr_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { #pragma unused(p_mpsofilt_hint, event) struct socket *mp_so; @@ -3766,7 +3706,7 @@ mptcp_subflow_nosrcaddr_ev(struct mptses *mpte, struct mptsub *mpts, static ev_ret_t mptcp_subflow_mpsuberror_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { #pragma unused(event, p_mpsofilt_hint) struct socket *so, *mp_so; @@ -3795,7 +3735,7 @@ mptcp_subflow_mpsuberror_ev(struct mptses *mpte, struct mptsub *mpts, */ static ev_ret_t mptcp_subflow_mpcantrcvmore_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { #pragma unused(event) struct mptcb *mp_tp = mpte->mpte_mptcb; @@ -3821,7 +3761,7 @@ mptcp_subflow_mpcantrcvmore_ev(struct mptses *mpte, struct mptsub *mpts, */ static ev_ret_t mptcp_subflow_failover_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { #pragma unused(event, p_mpsofilt_hint) struct mptsub *mpts_alt = NULL; @@ -3885,7 +3825,7 @@ done: */ static ev_ret_t mptcp_subflow_ifdenied_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { mptcplog((LOG_DEBUG, "%s: cid %d\n", __func__, mpts->mpts_connid), MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); @@ -3993,12 +3933,56 @@ mptcp_handle_ipv6_connection(struct mptses *mpte, const struct mptsub *mpts) } } +static void +mptcp_try_alternate_port(struct mptses *mpte, struct mptsub *mpts) +{ + struct inpcb *inp; + + if (!mptcp_ok_to_create_subflows(mpte->mpte_mptcb)) { + return; + } + + inp = sotoinpcb(mpts->mpts_socket); + if (inp == NULL) { + return; + } + + /* Should we try the alternate port? */ + if (mpte->mpte_alternate_port && + inp->inp_fport != mpte->mpte_alternate_port) { + union sockaddr_in_4_6 dst; + struct sockaddr_in *dst_in = (struct sockaddr_in *)&dst; + + memcpy(&dst, &mpts->mpts_dst, mpts->mpts_dst.sa_len); + + dst_in->sin_port = mpte->mpte_alternate_port; + + mptcp_subflow_add(mpte, NULL, (struct sockaddr *)&dst, + mpts->mpts_ifscope, NULL); + } else { /* Else, we tried all we could, mark this interface as non-MPTCP */ + unsigned int i; + + if (inp->inp_last_outifp == NULL) { + return; + } + + for (i = 0; i < mpte->mpte_itfinfo_size; i++) { + struct mpt_itf_info *info = &mpte->mpte_itfinfo[i]; + + if (inp->inp_last_outifp->if_index == info->ifindex) { + info->no_mptcp_support = 1; + break; + } + } + } +} + /* * Handle SO_FILT_HINT_CONNECTED subflow socket event. */ static ev_ret_t mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { #pragma unused(event, p_mpsofilt_hint) struct socket *mp_so, *so; @@ -4156,30 +4140,7 @@ mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, mptcp_check_subflows_and_remove(mpte); } else { - unsigned int i; - - /* Should we try the alternate port? */ - if (mpte->mpte_alternate_port && - inp->inp_fport != mpte->mpte_alternate_port) { - union sockaddr_in_4_6 dst; - struct sockaddr_in *dst_in = (struct sockaddr_in *)&dst; - - memcpy(&dst, &mpts->mpts_dst, mpts->mpts_dst.sa_len); - - dst_in->sin_port = mpte->mpte_alternate_port; - - mptcp_subflow_add(mpte, NULL, (struct sockaddr *)&dst, - mpts->mpts_ifscope, NULL); - } else { /* Else, we tried all we could, mark this interface as non-MPTCP */ - for (i = 0; i < mpte->mpte_itfinfo_size; i++) { - struct mpt_itf_info *info = &mpte->mpte_itfinfo[i]; - - if (inp->inp_last_outifp->if_index == info->ifindex) { - info->no_mptcp_support = 1; - break; - } - } - } + mptcp_try_alternate_port(mpte, mpts); tcpstat.tcps_join_fallback++; if (IFNET_IS_CELLULAR(inp->inp_last_outifp)) { @@ -4206,7 +4167,7 @@ mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, */ static ev_ret_t mptcp_subflow_disconnected_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { #pragma unused(event, p_mpsofilt_hint) struct socket *mp_so, *so; @@ -4238,6 +4199,11 @@ mptcp_subflow_disconnected_ev(struct mptses *mpte, struct mptsub *mpts, __func__), MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); } mpts->mpts_flags &= ~MPTSF_MPCAP_CTRSET; + } else { + if (so->so_flags & SOF_MP_SEC_SUBFLOW && + !(mpts->mpts_flags & MPTSF_CONNECTED)) { + mptcp_try_alternate_port(mpte, mpts); + } } if (mp_tp->mpt_state < MPTCPS_ESTABLISHED || @@ -4261,7 +4227,7 @@ mptcp_subflow_disconnected_ev(struct mptses *mpte, struct mptsub *mpts, */ static ev_ret_t mptcp_subflow_mpstatus_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { #pragma unused(event, p_mpsofilt_hint) ev_ret_t ret = MPTS_EVRET_OK; @@ -4317,7 +4283,7 @@ done: */ static ev_ret_t mptcp_subflow_mustrst_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { #pragma unused(event) struct socket *mp_so, *so; @@ -4329,7 +4295,6 @@ mptcp_subflow_mustrst_ev(struct mptses *mpte, struct mptsub *mpts, so = mpts->mpts_socket; /* We got an invalid option or a fast close */ - struct tcptemp *t_template; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = NULL; @@ -4340,22 +4305,25 @@ mptcp_subflow_mustrst_ev(struct mptses *mpte, struct mptsub *mpts, tp->t_mpflags |= TMPF_RESET; - t_template = tcp_maketemplate(tp); - if (t_template) { - struct tcp_respond_args tra; + if (tp->t_state != TCPS_CLOSED) { + struct tcptemp *t_template = tcp_maketemplate(tp); - bzero(&tra, sizeof(tra)); - if (inp->inp_flags & INP_BOUND_IF) { - tra.ifscope = inp->inp_boundifp->if_index; - } else { - tra.ifscope = IFSCOPE_NONE; - } - tra.awdl_unrestricted = 1; + if (t_template) { + struct tcp_respond_args tra; - tcp_respond(tp, t_template->tt_ipgen, - &t_template->tt_t, (struct mbuf *)NULL, - tp->rcv_nxt, tp->snd_una, TH_RST, &tra); - (void) m_free(dtom(t_template)); + bzero(&tra, sizeof(tra)); + if (inp->inp_flags & INP_BOUND_IF) { + tra.ifscope = inp->inp_boundifp->if_index; + } else { + tra.ifscope = IFSCOPE_NONE; + } + tra.awdl_unrestricted = 1; + + tcp_respond(tp, t_template->tt_ipgen, + &t_template->tt_t, (struct mbuf *)NULL, + tp->rcv_nxt, tp->snd_una, TH_RST, &tra); + (void) m_free(dtom(t_template)); + } } if (!(mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) && is_fastclose) { @@ -4381,7 +4349,6 @@ mptcp_subflow_mustrst_ev(struct mptses *mpte, struct mptsub *mpts, mptcp_subflow_abort(mpts, ECONNABORTED); - if (mp_tp->mpt_gc_ticks == MPT_GC_TICKS) { mp_tp->mpt_gc_ticks = MPT_GC_TICKS_FAST; } @@ -4391,7 +4358,7 @@ mptcp_subflow_mustrst_ev(struct mptses *mpte, struct mptsub *mpts, static ev_ret_t mptcp_subflow_adaptive_rtimo_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { #pragma unused(event) bool found_active = false; @@ -4421,7 +4388,7 @@ mptcp_subflow_adaptive_rtimo_ev(struct mptses *mpte, struct mptsub *mpts, static ev_ret_t mptcp_subflow_adaptive_wtimo_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + long *p_mpsofilt_hint, long event) { #pragma unused(event) bool found_active = false; @@ -4650,7 +4617,7 @@ mptcp_gc(struct mppcbinfo *mppi) * Drop a MPTCP connection, reporting the specified error. */ struct mptses * -mptcp_drop(struct mptses *mpte, struct mptcb *mp_tp, int errno) +mptcp_drop(struct mptses *mpte, struct mptcb *mp_tp, u_short errno) { struct socket *mp_so = mptetoso(mpte); @@ -4708,7 +4675,7 @@ void mptcp_subflow_workloop(struct mptses *mpte) { boolean_t connect_pending = FALSE, disconnect_fallback = FALSE; - uint64_t mpsofilt_hint_mask = SO_FILT_HINT_LOCKED; + long mpsofilt_hint_mask = SO_FILT_HINT_LOCKED; struct mptsub *mpts, *tmpts; struct socket *mp_so; @@ -4803,7 +4770,7 @@ relaunch: mpts->mpts_flags |= MPTSF_MP_DEGRADED; if (mpts->mpts_flags & (MPTSF_DISCONNECTING | - MPTSF_DISCONNECTED | MPTSF_CONNECT_PENDING)) { + MPTSF_DISCONNECTED)) { continue; } @@ -5276,7 +5243,8 @@ mptcp_insert_dsn(struct mppcb *mpp, struct mbuf *m) VERIFY(m->m_flags & M_PKTHDR); m->m_pkthdr.pkt_flags |= (PKTF_MPTCP | PKTF_MPSO); m->m_pkthdr.mp_dsn = mp_tp->mpt_sndmax; - m->m_pkthdr.mp_rlen = m_pktlen(m); + VERIFY(m_pktlen(m) >= 0 && m_pktlen(m) < UINT16_MAX); + m->m_pkthdr.mp_rlen = (uint16_t)m_pktlen(m); mp_tp->mpt_sndmax += m_pktlen(m); m = m->m_next; } @@ -5289,6 +5257,8 @@ mptcp_fallback_sbdrop(struct socket *so, struct mbuf *m, int len) uint64_t data_ack; uint64_t dsn; + VERIFY(len >= 0); + if (!m || len == 0) { return; } @@ -5527,12 +5497,12 @@ mptcp_act_on_txfail(struct socket *so) * Support for MP_FAIL option */ int -mptcp_get_map_for_dsn(struct socket *so, u_int64_t dsn_fail, u_int32_t *tcp_seq) +mptcp_get_map_for_dsn(struct socket *so, uint64_t dsn_fail, uint32_t *tcp_seq) { struct mbuf *m = so->so_snd.sb_mb; - u_int64_t dsn; + uint16_t datalen; + uint64_t dsn; int off = 0; - u_int32_t datalen; if (m == NULL) { return -1; @@ -5545,10 +5515,8 @@ mptcp_get_map_for_dsn(struct socket *so, u_int64_t dsn_fail, u_int32_t *tcp_seq) datalen = m->m_pkthdr.mp_rlen; if (MPTCP_SEQ_LEQ(dsn, dsn_fail) && (MPTCP_SEQ_GEQ(dsn + datalen, dsn_fail))) { - off = dsn_fail - dsn; + off = (int)(dsn_fail - dsn); *tcp_seq = m->m_pkthdr.mp_rseq + off; - mptcplog((LOG_DEBUG, "%s: %llu %llu \n", __func__, dsn, - dsn_fail), MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG); return 0; } @@ -5946,7 +5914,6 @@ fill_mptcp_subflow(struct socket *so, mptcp_flow_t *flow, struct mptsub *mpts) tcp_getconninfo(so, &flow->flow_ci); inp = sotoinpcb(so); -#if INET6 if ((inp->inp_vflag & INP_IPV6) != 0) { flow->flow_src.ss_family = AF_INET6; flow->flow_dst.ss_family = AF_INET6; @@ -5956,9 +5923,7 @@ fill_mptcp_subflow(struct socket *so, mptcp_flow_t *flow, struct mptsub *mpts) SIN6(&flow->flow_dst)->sin6_port = inp->in6p_fport; SIN6(&flow->flow_src)->sin6_addr = inp->in6p_laddr; SIN6(&flow->flow_dst)->sin6_addr = inp->in6p_faddr; - } else -#endif - if ((inp->inp_vflag & INP_IPV4) != 0) { + } else if ((inp->inp_vflag & INP_IPV4) != 0) { flow->flow_src.ss_family = AF_INET; flow->flow_dst.ss_family = AF_INET; flow->flow_src.ss_len = sizeof(struct sockaddr_in); @@ -6141,7 +6106,7 @@ mptcp_notsent_lowat_check(struct socket *so) ((notsent - (mp_tp->mpt_sndnxt - mp_tp->mpt_snduna)) <= mp_tp->mpt_notsent_lowat)) { mptcplog((LOG_DEBUG, "MPTCP Sender: " - "lowat %d notsent %d actual %d \n", + "lowat %d notsent %d actual %llu \n", mp_tp->mpt_notsent_lowat, notsent, notsent - (mp_tp->mpt_sndnxt - mp_tp->mpt_snduna)), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); @@ -6426,7 +6391,8 @@ int mptcp_is_wifi_unusable_for_session(struct mptses *mpte) { if (mpte->mpte_flags & MPTE_FIRSTPARTY) { - if (mptcp_advisory.sa_wifi_status) { + if (mpte->mpte_svctype != MPTCP_SVCTYPE_HANDOVER && + mptcp_advisory.sa_wifi_status) { return symptoms_is_wifi_lossy() ? 1 : 0; } @@ -6635,9 +6601,10 @@ mptcp_clear_cellicon(void) * Returns true if the icon has been flipped to WiFi. */ static boolean_t -__mptcp_unset_cellicon(long val) +__mptcp_unset_cellicon(uint32_t val) { - if (OSAddAtomic(-val, &mptcp_cellicon_refcount) != 1) { + VERIFY(val < INT32_MAX); + if (OSAddAtomic((int32_t)-val, &mptcp_cellicon_refcount) != 1) { return false; } diff --git a/bsd/netinet/mptcp_timer.c b/bsd/netinet/mptcp_timer.c index ac7595aea..b1fba1579 100644 --- a/bsd/netinet/mptcp_timer.c +++ b/bsd/netinet/mptcp_timer.c @@ -69,7 +69,7 @@ SYSCTL_INT(_net_inet_mptcp, OID_AUTO, tw, CTLFLAG_RW | CTLFLAG_LOCKED, static int mptcp_cancel_urgency_timer(struct mptses *mpte); static int -mptcp_timer_demux(struct mptses *mpte, uint32_t now_msecs) +mptcp_timer_demux(struct mptses *mpte, uint64_t now_msecs) { struct mptcb *mp_tp = NULL; mp_tp = mpte->mpte_mptcb; @@ -82,8 +82,7 @@ mptcp_timer_demux(struct mptses *mpte, uint32_t now_msecs) if (mp_tp->mpt_rxtstart == 0) { break; } - if ((now_msecs - mp_tp->mpt_rxtstart) > - (mptcp_rto * hz)) { + if ((now_msecs - mp_tp->mpt_rxtstart) > (mptcp_rto * hz)) { if (MPTCP_SEQ_GT(mp_tp->mpt_snduna, mp_tp->mpt_rtseq)) { mp_tp->mpt_timer_vals = 0; mp_tp->mpt_rtseq = 0; @@ -133,8 +132,8 @@ mptcp_timer(struct mppcbinfo *mppi) { struct mppcb *mpp, *tmpp; struct timeval now; - u_int32_t now_msecs; uint32_t resched_timer = 0; + uint64_t now_msecs; LCK_MTX_ASSERT(&mppi->mppi_lock, LCK_MTX_ASSERT_OWNED); diff --git a/bsd/netinet/mptcp_usrreq.c b/bsd/netinet/mptcp_usrreq.c index db728ca45..ff16b486f 100644 --- a/bsd/netinet/mptcp_usrreq.c +++ b/bsd/netinet/mptcp_usrreq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 Apple Inc. All rights reserved. + * Copyright (c) 2012-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -126,16 +126,12 @@ mptcp_usr_attach(struct socket *mp_so, int proto, struct proc *p) VERIFY(mpsotomppcb(mp_so) == NULL); error = mptcp_attach(mp_so, p); - if (error != 0) { + if (error) { goto out; } - /* - * XXX: adi@apple.com - * - * Might want to use a different SO_LINGER timeout than TCP's? - */ + if ((mp_so->so_options & SO_LINGER) && mp_so->so_linger == 0) { - mp_so->so_linger = TCP_LINGERTIME * hz; + mp_so->so_linger = (short)(TCP_LINGERTIME * hz); } out: return error; @@ -247,36 +243,17 @@ mptcp_entitlement_check(struct socket *mp_so, uint8_t svctype) return 0; } - /* Now, take a look at exceptions configured through sysctl */ -#if (DEVELOPMENT || DEBUG) - if (mptcp_disable_entitlements) { - return 0; - } -#endif - if (svctype == MPTCP_SVCTYPE_AGGREGATE) { if (mptcp_developer_mode) { return 0; } - goto deny; - } - - /* Second, check for regular users that are within the data-limits */ - if (soopt_cred_check(mp_so, PRIV_NET_PRIVILEGED_MULTIPATH, TRUE, FALSE) == 0) { - return 0; + os_log_error(mptcp_log_handle, "%s - %lx: MPTCP prohibited on svc %u\n", + __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), mpte->mpte_svctype); + return -1; } - if (mp_so->so_flags & SOF_DELEGATED && - soopt_cred_check(mp_so, PRIV_NET_PRIVILEGED_MULTIPATH, TRUE, TRUE) == 0) { - return 0; - } - -deny: - os_log_error(mptcp_log_handle, "%s - %lx: MPTCP prohibited on svc %u\n", - __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), svctype); - - return -1; + return 0; } /* @@ -619,13 +596,18 @@ mptcp_getconninfo(struct mptses *mpte, sae_connid_t *cid, uint32_t *flags, return 0; } else { /* Per-interface stats */ - const struct mptsub *mpts, *orig_mpts; + const struct mptsub *mpts, *orig_mpts = NULL; struct conninfo_tcp tcp_ci; const struct inpcb *inp; struct socket *so; int error = 0; int index; + /* cid is thus an ifindex - range-check first! */ + if (*cid > USHRT_MAX) { + return EINVAL; + } + bzero(&tcp_ci, sizeof(tcp_ci)); /* First, get a subflow to fill in the "regular" info. */ @@ -741,7 +723,7 @@ interface_info: * nor anything in the stats, return EINVAL. Because the * ifindex belongs to something that doesn't exist. */ - index = mptcpstats_get_index_by_ifindex(mpte->mpte_itfstats, *cid, false); + index = mptcpstats_get_index_by_ifindex(mpte->mpte_itfstats, (u_short)(*cid), false); if (index == -1) { os_log_error(mptcp_log_handle, "%s - %lx: Asking for too many ifindex: %u subcount %u, mpts? %s\n", @@ -825,7 +807,7 @@ mptcp_usr_control(struct socket *mp_so, u_long cmd, caddr_t data, struct so_aidreq64 aidr; bcopy(data, &aidr, sizeof(aidr)); error = mptcp_getassocids(mpte, &aidr.sar_cnt, - aidr.sar_aidp); + (user_addr_t)aidr.sar_aidp); if (error == 0) { bcopy(&aidr, data, sizeof(aidr)); } @@ -847,7 +829,7 @@ mptcp_usr_control(struct socket *mp_so, u_long cmd, caddr_t data, struct so_cidreq64 cidr; bcopy(data, &cidr, sizeof(cidr)); error = mptcp_getconnids(mpte, cidr.scr_aid, &cidr.scr_cnt, - cidr.scr_cidp); + (user_addr_t)cidr.scr_cidp); if (error == 0) { bcopy(&cidr, data, sizeof(cidr)); } @@ -873,8 +855,9 @@ mptcp_usr_control(struct socket *mp_so, u_long cmd, caddr_t data, bcopy(data, &cifr, sizeof(cifr)); error = mptcp_getconninfo(mpte, &cifr.scir_cid, &cifr.scir_flags, &cifr.scir_ifindex, &cifr.scir_error, - cifr.scir_src, &cifr.scir_src_len, cifr.scir_dst, - &cifr.scir_dst_len, &cifr.scir_aux_type, cifr.scir_aux_data, + (user_addr_t)cifr.scir_src, &cifr.scir_src_len, + (user_addr_t)cifr.scir_dst, &cifr.scir_dst_len, + &cifr.scir_aux_type, (user_addr_t)cifr.scir_aux_data, &cifr.scir_aux_len); if (error == 0) { bcopy(&cifr, data, sizeof(cifr)); @@ -1141,12 +1124,11 @@ out: * Copy the contents of uio into a properly sized mbuf chain. */ static int -mptcp_uiotombuf(struct uio *uio, int how, int space, uint32_t align, - struct mbuf **top) +mptcp_uiotombuf(struct uio *uio, int how, user_ssize_t space, struct mbuf **top) { struct mbuf *m, *mb, *nm = NULL, *mtail = NULL; - user_ssize_t resid, tot, len, progress; /* must be user_ssize_t */ - int error; + int progress, len, error; + user_ssize_t resid, tot; VERIFY(top != NULL && *top == NULL); @@ -1156,24 +1138,17 @@ mptcp_uiotombuf(struct uio *uio, int how, int space, uint32_t align, */ resid = uio_resid(uio); if (space > 0) { - tot = imin(resid, space); + tot = MIN(resid, space); } else { tot = resid; } - /* - * The smallest unit is a single mbuf with pkthdr. - * We can't align past it. - */ - if (align >= MHLEN) { + if (tot < 0 || tot > INT_MAX) { return EINVAL; } - /* - * Give us the full allocation or nothing. - * If space is zero return the smallest empty mbuf. - */ - if ((len = tot + align) == 0) { + len = (int)tot; + if (len == 0) { len = 1; } @@ -1214,12 +1189,12 @@ mptcp_uiotombuf(struct uio *uio, int how, int space, uint32_t align, } m = nm; - m->m_data += align; progress = 0; /* Fill all mbufs with uio data and update header information. */ for (mb = m; mb != NULL; mb = mb->m_next) { - len = imin(M_TRAILINGSPACE(mb), tot - progress); + /* tot >= 0 && tot <= INT_MAX (see above) */ + len = MIN((int)M_TRAILINGSPACE(mb), (int)(tot - progress)); error = uiomove(mtod(mb, char *), len, uio); if (error != 0) { @@ -1246,8 +1221,7 @@ mptcp_usr_sosend(struct socket *mp_so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags) { #pragma unused(addr) - int32_t space; - user_ssize_t resid; + user_ssize_t resid, space; int error, sendflags; struct proc *p = current_proc(); int sblocked = 0; @@ -1266,8 +1240,7 @@ mptcp_usr_sosend(struct socket *mp_so, struct sockaddr *addr, struct uio *uio, VERIFY(mp_so->so_type == SOCK_STREAM); VERIFY(!(mp_so->so_flags & SOF_MP_SUBFLOW)); - if ((flags & (MSG_OOB | MSG_DONTROUTE)) || - (mp_so->so_flags & SOF_ENABLE_MSGS)) { + if (flags & (MSG_OOB | MSG_DONTROUTE)) { error = EOPNOTSUPP; socket_unlock(mp_so, 1); goto out; @@ -1280,7 +1253,8 @@ mptcp_usr_sosend(struct socket *mp_so, struct sockaddr *addr, struct uio *uio, * hand, a negative resid causes us to loop sending 0-length * segments to the protocol. */ - if (resid < 0 || (flags & MSG_EOR) || control != NULL) { + if (resid < 0 || resid > INT_MAX || + (flags & MSG_EOR) || control != NULL) { error = EINVAL; socket_unlock(mp_so, 1); goto out; @@ -1290,7 +1264,7 @@ mptcp_usr_sosend(struct socket *mp_so, struct sockaddr *addr, struct uio *uio, do { error = sosendcheck(mp_so, NULL, resid, 0, 0, flags, - &sblocked, NULL); + &sblocked); if (error != 0) { goto release; } @@ -1301,7 +1275,7 @@ mptcp_usr_sosend(struct socket *mp_so, struct sockaddr *addr, struct uio *uio, /* * Copy the data from userland into an mbuf chain. */ - error = mptcp_uiotombuf(uio, M_WAITOK, space, 0, &top); + error = mptcp_uiotombuf(uio, M_WAITOK, space, &top); if (error != 0) { socket_lock(mp_so, 0); goto release; @@ -1713,12 +1687,12 @@ mptcp_setopt(struct mptses *mpte, struct sockopt *sopt) goto err_out; } - if (mptcp_entitlement_check(mp_so, optval) < 0) { + if (mptcp_entitlement_check(mp_so, (uint8_t)optval) < 0) { error = EACCES; goto err_out; } - mpte->mpte_svctype = optval; + mpte->mpte_svctype = (uint8_t)optval; mpte->mpte_flags |= MPTE_SVCTYPE_CHECKED; goto out; @@ -1735,7 +1709,7 @@ mptcp_setopt(struct mptses *mpte, struct sockopt *sopt) goto err_out; } - mpte->mpte_alternate_port = optval; + mpte->mpte_alternate_port = (uint16_t)optval; goto out; case MPTCP_FORCE_ENABLE: @@ -1830,7 +1804,7 @@ mptcp_setopt(struct mptses *mpte, struct sockopt *sopt) if (rec) { /* search for an existing one; if not found, allocate */ if ((mpo = mptcp_sopt_find(mpte, sopt)) == NULL) { - mpo = mptcp_sopt_alloc(M_WAITOK); + mpo = mptcp_sopt_alloc(Z_WAITOK); } if (mpo == NULL) { @@ -1935,7 +1909,7 @@ mptcp_fill_info(struct mptses *mpte, struct tcp_info *ti) bzero(ti, sizeof(*ti)); - ti->tcpi_state = mp_tp->mpt_state; + ti->tcpi_state = (uint8_t)mp_tp->mpt_state; /* tcpi_options */ /* tcpi_snd_wscale */ /* tcpi_rcv_wscale */ @@ -1956,8 +1930,8 @@ mptcp_fill_info(struct mptses *mpte, struct tcp_info *ti) /* tcpi_snd_cwnd */ /* tcpi_rcv_space */ ti->tcpi_snd_wnd = mp_tp->mpt_sndwnd; - ti->tcpi_snd_nxt = mp_tp->mpt_sndnxt; - ti->tcpi_rcv_nxt = mp_tp->mpt_rcvnxt; + ti->tcpi_snd_nxt = (uint32_t)mp_tp->mpt_sndnxt; + ti->tcpi_rcv_nxt = (uint32_t)mp_tp->mpt_rcvnxt; if (acttp) { ti->tcpi_last_outif = (acttp->t_inpcb->inp_last_outifp == NULL) ? 0 : acttp->t_inpcb->inp_last_outifp->if_index; @@ -2027,6 +2001,7 @@ mptcp_getopt(struct mptses *mpte, struct sockopt *sopt) case PERSIST_TIMEOUT: /* Only case for which we have a non-zero default */ optval = tcp_max_persist_timeout; + OS_FALLTHROUGH; case TCP_NODELAY: case TCP_RXT_FINDROP: case TCP_KEEPALIVE: diff --git a/bsd/netinet/mptcp_var.h b/bsd/netinet/mptcp_var.h index c1063229d..cc16b1c70 100644 --- a/bsd/netinet/mptcp_var.h +++ b/bsd/netinet/mptcp_var.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2017 Apple Inc. All rights reserved. + * Copyright (c) 2012-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -200,9 +200,9 @@ static inline int mptcp_subflow_cwnd_space(struct socket *so) { struct tcpcb *tp = sototcpcb(so); - int cwnd = min(tp->snd_wnd, tp->snd_cwnd) - (so->so_snd.sb_cc); + int cwnd = (int)(MIN(tp->snd_wnd, tp->snd_cwnd) - (so->so_snd.sb_cc)); - return min(cwnd, sbspace(&so->so_snd)); + return MIN(cwnd, sbspace(&so->so_snd)); } @@ -227,15 +227,15 @@ struct mptopt { * Note that mpts_flags and mpts_evctl are modified via atomic operations. */ struct mptsub { - TAILQ_ENTRY(mptsub) mpts_entry; /* glue to peer subflows */ - uint32_t mpts_refcnt; /* reference count */ - uint32_t mpts_flags; /* see flags below */ - uint32_t mpts_evctl; /* subflow control events */ - sae_connid_t mpts_connid; /* subflow connection ID */ - int mpts_oldintval; /* sopt_val before sosetopt */ - struct mptses *mpts_mpte; /* back ptr to MPTCP session */ - struct socket *mpts_socket; /* subflow socket */ - struct sockaddr *mpts_src; /* source address */ + TAILQ_ENTRY(mptsub) mpts_entry; /* glue to peer subflows */ + uint32_t mpts_refcnt; /* reference count */ + uint32_t mpts_flags; /* see flags below */ + long mpts_evctl; /* subflow control events */ + sae_connid_t mpts_connid; /* subflow connection ID */ + int mpts_oldintval; /* sopt_val before sosetopt */ + struct mptses *mpts_mpte; /* back ptr to MPTCP session */ + struct socket *mpts_socket; /* subflow socket */ + struct sockaddr *mpts_src; /* source address */ union { /* destination address */ @@ -356,11 +356,12 @@ struct mptcp_subf_auth_entry { * Keep in sync with bsd/dev/dtrace/scripts/mptcp.d. */ struct mptcb { - struct mptses *mpt_mpte; /* back ptr to MPTCP session */ + struct mptses *mpt_mpte; /* back ptr to MPTCP session */ mptcp_state_t mpt_state; /* MPTCP state */ - uint32_t mpt_flags; /* see flags below */ - uint32_t mpt_version; /* MPTCP proto version */ - int mpt_softerror; /* error not yet reported */ + uint32_t mpt_flags; /* see flags below */ + uint8_t mpt_version; /* MPTCP proto version */ + uint8_t mpt_peer_version; /* Version from peer */ + u_short mpt_softerror; /* error not yet reported */ /* * Authentication and metadata invariants */ @@ -374,10 +375,10 @@ struct mptcb { * Data ACKs do not. */ int mpt_rxtshift; /* num of consecutive retrans */ - uint32_t mpt_rxtstart; /* time at which rxt started */ + uint64_t mpt_rxtstart; /* time at which rxt started */ uint64_t mpt_rtseq; /* seq # being tracked */ + uint64_t mpt_timewait; /* timewait */ uint32_t mpt_timer_vals; /* timer related values */ - uint32_t mpt_timewait; /* timewait */ /* * Sending side */ @@ -393,8 +394,8 @@ struct mptcb { */ uint64_t mpt_rcvnxt; /* Next expected DSN */ uint64_t mpt_remote_idsn; /* Peer's IDSN */ + uint64_t mpt_rcvadv; uint32_t mpt_rcvwnd; - uint32_t mpt_rcvadv; LIST_HEAD(, mptcp_subf_auth_entry) mpt_subauth_list; /* address IDs */ /* * Fastclose @@ -409,10 +410,9 @@ struct mptcb { int32_t mpt_gc_ticks; /* Used for zombie deletion */ uint32_t mpt_notsent_lowat; /* TCP_NOTSENT_LOWAT support */ - uint32_t mpt_peer_version; /* Version from peer */ struct tsegqe_head mpt_segq; - uint16_t mpt_reassqlen; /* length of reassembly queue */ + uint32_t mpt_reassqlen; /* length of reassembly queue */ }; /* valid values for mpt_flags (see also notes on mpts_flags above) */ @@ -568,8 +568,8 @@ extern void mptcp_check_subflows_and_add(struct mptses *mpte); extern void mptcp_check_subflows_and_remove(struct mptses *mpte); extern void mptcpstats_inc_switch(struct mptses *mpte, const struct mptsub *mpts); extern void mptcpstats_update(struct mptcp_itf_stats *stats, const struct mptsub *mpts); -extern int mptcpstats_get_index_by_ifindex(struct mptcp_itf_stats *stats, int ifindex, boolean_t create); -extern struct mptses *mptcp_drop(struct mptses *, struct mptcb *, int); +extern int mptcpstats_get_index_by_ifindex(struct mptcp_itf_stats *stats, u_short ifindex, boolean_t create); +extern struct mptses *mptcp_drop(struct mptses *mpte, struct mptcb *mp_tp, u_short errno); extern struct mptses *mptcp_close(struct mptses *, struct mptcb *); extern int mptcp_lock(struct socket *, int, void *); extern int mptcp_unlock(struct socket *, int, void *); @@ -579,7 +579,7 @@ extern void mptcp_subflow_workloop(struct mptses *); extern void mptcp_sched_create_subflows(struct mptses *); extern void mptcp_finish_usrclosed(struct mptses *mpte); -extern struct mptopt *mptcp_sopt_alloc(int); +extern struct mptopt *mptcp_sopt_alloc(zalloc_flags_t); extern const char *mptcp_sopt2str(int, int); extern void mptcp_sopt_free(struct mptopt *); extern void mptcp_sopt_insert(struct mptses *, struct mptopt *); @@ -624,7 +624,7 @@ extern void mptcp_output_getm_dsnmap64(struct socket *so, int off, uint16_t *data_len, uint16_t *dss_csum); extern void mptcp_act_on_txfail(struct socket *); extern struct mptsub *mptcp_get_subflow(struct mptses *mpte, struct mptsub **preferred); -extern int mptcp_get_map_for_dsn(struct socket *, u_int64_t, u_int32_t *); +extern int mptcp_get_map_for_dsn(struct socket *so, uint64_t dsn_fail, uint32_t *tcp_seq); extern int32_t mptcp_adj_sendlen(struct socket *so, int32_t off); extern void mptcp_sbrcv_grow(struct mptcb *mp_tp); extern int32_t mptcp_sbspace(struct mptcb *); @@ -649,16 +649,15 @@ extern void mptcp_unset_cellicon(struct mptses *mpte, struct mptsub *mpts, uint3 extern void mptcp_reset_rexmit_state(struct tcpcb *tp); extern void mptcp_reset_keepalive(struct tcpcb *tp); extern int mptcp_validate_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, - uint32_t sseq, uint16_t dlen, uint16_t csum, - uint16_t dfin); + uint32_t sseq, uint16_t dlen, uint16_t csum, int dfin); __END_DECLS #endif /* BSD_KERNEL_PRIVATE */ #ifdef PRIVATE typedef struct mptcp_flow { - size_t flow_len; - size_t flow_tcpci_offset; + uint64_t flow_len; + uint64_t flow_tcpci_offset; uint32_t flow_flags; sae_connid_t flow_cid; struct sockaddr_storage flow_src; @@ -670,9 +669,9 @@ typedef struct mptcp_flow { } mptcp_flow_t; typedef struct conninfo_mptcp { - size_t mptcpci_len; - size_t mptcpci_flow_offset; /* offsetof first flow */ - size_t mptcpci_nflows; /* number of subflows */ + uint64_t mptcpci_len; + uint64_t mptcpci_flow_offset; /* offsetof first flow */ + uint64_t mptcpci_nflows; /* number of subflows */ uint32_t mptcpci_state; /* MPTCP level state */ uint32_t mptcpci_mpte_flags; /* Session flags */ uint32_t mptcpci_flags; /* MPTCB flags */ diff --git a/bsd/netinet/raw_ip.c b/bsd/netinet/raw_ip.c index b3838d3e7..a140e8925 100644 --- a/bsd/netinet/raw_ip.c +++ b/bsd/netinet/raw_ip.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -97,11 +97,8 @@ #include #include -#if INET6 #include -#endif /* INET6 */ -#include #if IPSEC #include @@ -109,13 +106,8 @@ #if DUMMYNET #include -#endif - -#if CONFIG_MACF_NET -#include -#endif /* MAC_NET */ +#endif /* DUMMYNET */ -int load_ipfw(void); int rip_detach(struct socket *); int rip_abort(struct socket *); int rip_disconnect(struct socket *); @@ -126,10 +118,7 @@ int rip_shutdown(struct socket *); struct inpcbhead ripcb; struct inpcbinfo ripcbinfo; -/* control hooks for ipfw and dummynet */ -#if IPFIREWALL -ip_fw_ctl_t *ip_fw_ctl_ptr; -#endif /* IPFIREWALL */ +/* control hooks for dummynet */ #if DUMMYNET ip_dn_ctl_t *ip_dn_ctl_ptr; #endif /* DUMMYNET */ @@ -171,8 +160,8 @@ rip_init(struct protosw *pp, struct domain *dp) ripcbinfo.ipi_hashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_hashmask); ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_porthashmask); - ripcbinfo.ipi_zone = zinit(sizeof(struct inpcb), - (4096 * sizeof(struct inpcb)), 4096, "ripzone"); + ripcbinfo.ipi_zone = zone_create("ripzone", sizeof(struct inpcb), + ZC_NONE); pcbinfo = &ripcbinfo; /* @@ -223,11 +212,9 @@ rip_input(struct mbuf *m, int iphlen) ripsrc.sin_addr = ip->ip_src; lck_rw_lock_shared(ripcbinfo.ipi_lock); LIST_FOREACH(inp, &ripcb, inp_list) { -#if INET6 if ((inp->inp_vflag & INP_IPV4) == 0) { continue; } -#endif if (inp->inp_ip_p && (inp->inp_ip_p != ip->ip_p)) { continue; } @@ -249,24 +236,19 @@ rip_input(struct mbuf *m, int iphlen) #if NECP if (n && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0, - &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) { + &ip->ip_dst, &ip->ip_src, ifp, 0, NULL, NULL, NULL, NULL)) { m_freem(n); /* do not inject data to pcb */ skipit = 1; } #endif /* NECP */ -#if CONFIG_MACF_NET - if (n && skipit == 0) { - if (mac_inpcb_check_deliver(last, n, AF_INET, - SOCK_RAW) != 0) { - m_freem(n); - skipit = 1; - } - } -#endif if (n && skipit == 0) { int error = 0; if ((last->inp_flags & INP_CONTROLOPTS) != 0 || +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + (last->inp_socket->so_cfil_db != NULL) || +#endif (last->inp_socket->so_options & SO_TIMESTAMP) != 0 || (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { @@ -310,24 +292,20 @@ rip_input(struct mbuf *m, int iphlen) skipit = 0; #if NECP if (last && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0, - &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) { + &ip->ip_dst, &ip->ip_src, ifp, 0, NULL, NULL, NULL, NULL)) { m_freem(m); OSAddAtomic(1, &ipstat.ips_delivered); /* do not inject data to pcb */ skipit = 1; } #endif /* NECP */ -#if CONFIG_MACF_NET - if (last && skipit == 0) { - if (mac_inpcb_check_deliver(last, m, AF_INET, SOCK_RAW) != 0) { - skipit = 1; - m_freem(m); - } - } -#endif if (skipit == 0) { if (last) { if ((last->inp_flags & INP_CONTROLOPTS) != 0 || +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + (last->inp_socket->so_cfil_db != NULL) || +#endif (last->inp_socket->so_options & SO_TIMESTAMP) != 0 || (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { @@ -394,7 +372,7 @@ rip_output( struct m_tag *cfil_tag = NULL; bool cfil_faddr_use = false; uint32_t cfil_so_state_change_cnt = 0; - short cfil_so_options = 0; + uint32_t cfil_so_options = 0; int cfil_inp_flags = 0; struct sockaddr *cfil_faddr = NULL; struct sockaddr_in *cfil_sin; @@ -530,9 +508,13 @@ rip_output( } else { ip->ip_tos = inp->inp_ip_tos; } - ip->ip_off = 0; + if (inp->inp_flags2 & INP2_DONTFRAG) { + ip->ip_off = IP_DF; + } else { + ip->ip_off = 0; + } ip->ip_p = inp->inp_ip_p; - ip->ip_len = m->m_pkthdr.len; + ip->ip_len = (uint16_t)m->m_pkthdr.len; ip->ip_src = inp->inp_laddr; ip->ip_dst.s_addr = dst; ip->ip_ttl = inp->inp_ip_ttl; @@ -568,6 +550,7 @@ rip_output( necp_kernel_policy_id policy_id; necp_kernel_policy_id skip_policy_id; u_int32_t route_rule_id; + u_int32_t pass_flags; /* * We need a route to perform NECP route rule checks @@ -608,12 +591,12 @@ rip_output( } if (!necp_socket_is_allowed_to_send_recv_v4(inp, 0, 0, - &ip->ip_src, &ip->ip_dst, NULL, &policy_id, &route_rule_id, &skip_policy_id)) { + &ip->ip_src, &ip->ip_dst, NULL, 0, &policy_id, &route_rule_id, &skip_policy_id, &pass_flags)) { m_freem(m); return EHOSTUNREACH; } - necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id); + necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id, pass_flags); if (net_qos_policy_restricted != 0) { struct ifnet *rt_ifp = NULL; @@ -622,8 +605,7 @@ rip_output( rt_ifp = inp->inp_route.ro_rt->rt_ifp; } - necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, - NULL, route_rule_id); + necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, route_rule_id); } } #endif /* NECP */ @@ -656,10 +638,6 @@ rip_output( m->m_pkthdr.tx_rawip_e_pid = 0; } -#if CONFIG_MACF_NET - mac_mbuf_label_associate_inpcb(inp, m); -#endif - imo = inp->inp_moptions; if (imo != NULL) { IMO_ADDREF(imo); @@ -723,24 +701,6 @@ rip_output( return error; } -#if IPFIREWALL -int -load_ipfw(void) -{ - kern_return_t err; - - ipfw_init(); - -#if DUMMYNET - if (!DUMMYNET_LOADED) { - ip_dn_init(); - } -#endif /* DUMMYNET */ - err = 0; - - return err == 0 && ip_fw_ctl_ptr == NULL ? -1 : err; -} -#endif /* IPFIREWALL */ /* * Raw IP socket option processing. @@ -772,21 +732,6 @@ rip_ctloutput(struct socket *so, struct sockopt *sopt) error = sooptcopyout(sopt, &optval, sizeof optval); break; -#if IPFIREWALL - case IP_FW_ADD: - case IP_FW_GET: - case IP_OLD_FW_ADD: - case IP_OLD_FW_GET: - if (ip_fw_ctl_ptr == 0) { - error = load_ipfw(); - } - if (ip_fw_ctl_ptr && error == 0) { - error = ip_fw_ctl_ptr(sopt); - } else { - error = ENOPROTOOPT; - } - break; -#endif /* IPFIREWALL */ #if DUMMYNET case IP_DUMMYNET_GET: @@ -835,27 +780,6 @@ rip_ctloutput(struct socket *so, struct sockopt *sopt) } break; -#if IPFIREWALL - case IP_FW_ADD: - case IP_FW_DEL: - case IP_FW_FLUSH: - case IP_FW_ZERO: - case IP_FW_RESETLOG: - case IP_OLD_FW_ADD: - case IP_OLD_FW_DEL: - case IP_OLD_FW_FLUSH: - case IP_OLD_FW_ZERO: - case IP_OLD_FW_RESETLOG: - if (ip_fw_ctl_ptr == 0) { - error = load_ipfw(); - } - if (ip_fw_ctl_ptr && error == 0) { - error = ip_fw_ctl_ptr(sopt); - } else { - error = ENOPROTOOPT; - } - break; -#endif /* IPFIREWALL */ #if DUMMYNET case IP_DUMMYNET_CONFIGURE: @@ -870,7 +794,7 @@ rip_ctloutput(struct socket *so, struct sockopt *sopt) error = ENOPROTOOPT; } break; -#endif +#endif /* DUMMYNET */ case SO_FLUSH: if ((error = sooptcopyin(sopt, &optval, sizeof(optval), @@ -1021,8 +945,9 @@ rip_attach(struct socket *so, int proto, struct proc *p) } inp = (struct inpcb *)so->so_pcb; inp->inp_vflag |= INP_IPV4; - inp->inp_ip_p = proto; - inp->inp_ip_ttl = ip_defttl; + VERIFY(proto <= UINT8_MAX); + inp->inp_ip_p = (u_char)proto; + inp->inp_ip_ttl = (u_char)ip_defttl; return 0; } @@ -1215,12 +1140,11 @@ rip_unlock(struct socket *so, int refcount, void *debug) lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx); lck_rw_lock_exclusive(ripcbinfo.ipi_lock); if (inp->inp_state != INPCB_STATE_DEAD) { -#if INET6 if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - } else -#endif /* INET6 */ - in_pcbdetach(inp); + } else { + in_pcbdetach(inp); + } } in_pcbdispose(inp); lck_rw_done(ripcbinfo.ipi_lock); @@ -1338,7 +1262,7 @@ SYSCTL_PROC(_net_inet_raw, OID_AUTO /*XXX*/, pcblist, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX static int rip_pcblist64 SYSCTL_HANDLER_ARGS @@ -1444,7 +1368,7 @@ SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist64, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, rip_pcblist64, "S,xinpcb64", "List of active raw IP sockets"); -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ static int diff --git a/bsd/netinet/tcp.h b/bsd/netinet/tcp.h index 06e25d6bb..b63fc818b 100644 --- a/bsd/netinet/tcp.h +++ b/bsd/netinet/tcp.h @@ -63,11 +63,19 @@ #ifndef _NETINET_TCP_H_ #define _NETINET_TCP_H_ -#include +#ifndef DRIVERKIT #include +#endif /* DRIVERKIT */ + #include #include /* __uint32_t */ +#ifndef DRIVERKIT +#include +#else +#include +#endif /* DRIVERKIT */ + #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) typedef __uint32_t tcp_seq; typedef __uint32_t tcp_cc; /* connection count per rfc1644 */ @@ -237,11 +245,7 @@ struct tcphdr { #define TCP_MEASURE_BW_BURST 0x203 /* Burst size to use for bandwidth measurement */ #define TCP_PEER_PID 0x204 /* Lookup pid of the process we're connected to */ #define TCP_ADAPTIVE_READ_TIMEOUT 0x205 /* Read timeout used as a multiple of RTT */ -/* - * Enable message delivery on a socket, this feature is currently unsupported and - * is subjected to change in future. - */ -#define TCP_ENABLE_MSGS 0x206 +#define TCP_OPTION_UNUSED_0 0x206 /* UNUSED */ #define TCP_ADAPTIVE_WRITE_TIMEOUT 0x207 /* Write timeout used as a multiple of RTT */ #define TCP_NOTIMEWAIT 0x208 /* Avoid going into time-wait */ #define TCP_DISABLE_BLACKHOLE_DETECTION 0x209 /* disable PMTU blackhole detection */ @@ -478,17 +482,17 @@ typedef struct conninfo_tcp { #pragma pack() struct mptcp_itf_stats { - uint16_t ifindex; - uint16_t switches; - uint32_t is_expensive:1; - uint64_t mpis_txbytes __attribute__((aligned(8))); - uint64_t mpis_rxbytes __attribute__((aligned(8))); - uint64_t mpis_wifi_txbytes __attribute__((aligned(8))); - uint64_t mpis_wifi_rxbytes __attribute__((aligned(8))); - uint64_t mpis_wired_txbytes __attribute__((aligned(8))); - uint64_t mpis_wired_rxbytes __attribute__((aligned(8))); - uint64_t mpis_cell_txbytes __attribute__((aligned(8))); - uint64_t mpis_cell_rxbytes __attribute__((aligned(8))); + u_short ifindex; + uint16_t switches; + uint32_t is_expensive:1; + uint64_t mpis_txbytes __attribute__((aligned(8))); + uint64_t mpis_rxbytes __attribute__((aligned(8))); + uint64_t mpis_wifi_txbytes __attribute__((aligned(8))); + uint64_t mpis_wifi_rxbytes __attribute__((aligned(8))); + uint64_t mpis_wired_txbytes __attribute__((aligned(8))); + uint64_t mpis_wired_rxbytes __attribute__((aligned(8))); + uint64_t mpis_cell_txbytes __attribute__((aligned(8))); + uint64_t mpis_cell_rxbytes __attribute__((aligned(8))); }; /* Version solely used to let libnetcore survive */ diff --git a/bsd/netinet/tcp_cache.c b/bsd/netinet/tcp_cache.c index aad8ce29c..8095d924c 100644 --- a/bsd/netinet/tcp_cache.c +++ b/bsd/netinet/tcp_cache.c @@ -103,12 +103,12 @@ struct tcp_cache_key { struct tcp_cache { SLIST_ENTRY(tcp_cache) list; - u_int32_t tc_last_access; + uint32_t tc_last_access; struct tcp_cache_key tc_key; - u_int8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX]; - u_int8_t tc_tfo_cookie_len; + uint8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX]; + uint8_t tc_tfo_cookie_len; }; struct tcp_cache_head { @@ -125,7 +125,7 @@ struct tcp_cache_key_src { int af; }; -static u_int32_t tcp_cache_hash_seed; +static uint32_t tcp_cache_hash_seed; size_t tcp_cache_size; @@ -156,11 +156,14 @@ static uint32_t tcp_backoff_maximum = 65536; SYSCTL_UINT(_net_inet_tcp, OID_AUTO, backoff_maximum, CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_backoff_maximum, 0, "Maximum time for which we won't try TFO"); -SYSCTL_SKMEM_TCP_INT(OID_AUTO, ecn_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, tcp_ecn_timeout, 60, "Initial minutes to wait before re-trying ECN"); +static uint32_t tcp_ecn_timeout = 60; -SYSCTL_SKMEM_TCP_INT(OID_AUTO, disable_tcp_heuristics, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, disable_tcp_heuristics, 0, "Set to 1, to disable all TCP heuristics (TFO, ECN, MPTCP)"); +SYSCTL_UINT(_net_inet_tcp, OID_AUTO, ecn_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_ecn_timeout, 60, "Initial minutes to wait before re-trying ECN"); + +static int disable_tcp_heuristics = 0; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, disable_tcp_heuristics, CTLFLAG_RW | CTLFLAG_LOCKED, + &disable_tcp_heuristics, 0, "Set to 1, to disable all TCP heuristics (TFO, ECN, MPTCP)"); static uint32_t tcp_min_to_hz(uint32_t minutes) @@ -218,8 +221,8 @@ tcp_min_to_hz(uint32_t minutes) * Might be worth moving this to a library so that others * (e.g., scale_to_powerof2()) can use this as well instead of a while-loop. */ -static u_int32_t -tcp_cache_roundup2(u_int32_t a) +static uint32_t +tcp_cache_roundup2(uint32_t a) { a--; a |= a >> 1; @@ -272,10 +275,10 @@ tcp_cache_hash_src(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key } } -static u_int16_t +static uint16_t tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache_key *key) { - u_int32_t hash; + uint32_t hash; bzero(key, sizeof(struct tcp_cache_key)); @@ -294,7 +297,7 @@ tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache_key *key) hash = net_flowhash(key, sizeof(struct tcp_cache_key), tcp_cache_hash_seed); - return hash & (tcp_cache_size - 1); + return (uint16_t)(hash & (tcp_cache_size - 1)); } static void @@ -320,7 +323,7 @@ tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, struct tcp_cache *tpcache = NULL; struct tcp_cache_head *head; struct tcp_cache_key key; - u_int16_t hash; + uint16_t hash; int i = 0; hash = tcp_cache_hash(tcks, &key); @@ -341,11 +344,11 @@ tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, if ((tpcache == NULL) && create) { if (i >= TCP_CACHE_BUCKET_SIZE) { struct tcp_cache *oldest_cache = NULL; - u_int32_t max_age = 0; + uint32_t max_age = 0; /* Look for the oldest tcp_cache in the bucket */ SLIST_FOREACH(tpcache, &head->tcp_caches, list) { - u_int32_t age = tcp_now - tpcache->tc_last_access; + uint32_t age = tcp_now - tpcache->tc_last_access; if (age > max_age) { max_age = age; oldest_cache = tpcache; @@ -362,6 +365,7 @@ tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, tpcache = _MALLOC(sizeof(struct tcp_cache), M_TEMP, M_NOWAIT | M_ZERO); if (tpcache == NULL) { + os_log_error(OS_LOG_DEFAULT, "%s could not allocate cache", __func__); goto out_null; } @@ -408,7 +412,7 @@ tcp_cache_key_src_create(struct tcpcb *tp, struct tcp_cache_key_src *tcks) } static void -tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t len) +tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, uint8_t len) { struct tcp_cache_head *head; struct tcp_cache *tpcache; @@ -427,7 +431,7 @@ tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_in } void -tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len) +tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, uint8_t len) { struct tcp_cache_key_src tcks; @@ -436,7 +440,7 @@ tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len) } static int -tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t *len) +tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, uint8_t *len) { struct tcp_cache_head *head; struct tcp_cache *tpcache; @@ -475,7 +479,7 @@ tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_in * Returns 1 if the cookie has been found and written. */ int -tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t *len) +tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, uint8_t *len) { struct tcp_cache_key_src tcks; @@ -512,10 +516,10 @@ tcp_cache_get_cookie_len(struct tcpcb *tp) return tcp_cache_get_cookie_len_common(&tcks); } -static u_int16_t +static uint16_t tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key) { - u_int32_t hash; + uint32_t hash; bzero(key, sizeof(struct tcp_heuristic_key)); @@ -524,7 +528,7 @@ tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *ke hash = net_flowhash(key, sizeof(struct tcp_heuristic_key), tcp_cache_hash_seed); - return hash & (tcp_cache_size - 1); + return (uint16_t)(hash & (tcp_cache_size - 1)); } static void @@ -554,7 +558,7 @@ tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks, struct tcp_heuristic *tpheur = NULL; struct tcp_heuristics_head *head; struct tcp_heuristic_key key; - u_int16_t hash; + uint16_t hash; int i = 0; hash = tcp_heuristics_hash(tcks, &key); @@ -575,11 +579,11 @@ tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks, if ((tpheur == NULL) && create) { if (i >= TCP_CACHE_BUCKET_SIZE) { struct tcp_heuristic *oldest_heur = NULL; - u_int32_t max_age = 0; + uint32_t max_age = 0; /* Look for the oldest tcp_heur in the bucket */ SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) { - u_int32_t age = tcp_now - tpheur->th_last_access; + uint32_t age = tcp_now - tpheur->th_last_access; if (age > max_age) { max_age = age; oldest_heur = tpheur; @@ -597,6 +601,7 @@ tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks, tpheur = _MALLOC(sizeof(struct tcp_heuristic), M_TEMP, M_NOWAIT | M_ZERO); if (tpheur == NULL) { + os_log_error(OS_LOG_DEFAULT, "%s could not allocate cache", __func__); goto out_null; } @@ -631,7 +636,7 @@ out_null: } static void -tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, u_int8_t flags) +tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, uint8_t flags) { struct tcp_heuristics_head *head; struct tcp_heuristic *tpheur; @@ -789,7 +794,7 @@ tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src *tcks) static void tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, - u_int32_t flags) + uint32_t flags) { struct tcp_heuristics_head *head; struct tcp_heuristic *tpheur; @@ -1301,7 +1306,7 @@ tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache *necp_buffer, boolean_t tcp_heuristic_do_tfo_with_address(struct ifnet *ifp, union sockaddr_in_4_6 *local_address, union sockaddr_in_4_6 *remote_address, - u_int8_t *cookie, u_int8_t *cookie_len) + uint8_t *cookie, uint8_t *cookie_len) { struct tcp_cache_key_src tcks; @@ -1427,6 +1432,9 @@ static int sysctl_cleartfo SYSCTL_HANDLER_ARGS val = oldval; error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) { + if (error) { + os_log_error(OS_LOG_DEFAULT, "%s could not parse int: %d", __func__, error); + } return error; } @@ -1461,10 +1469,10 @@ tcp_cache_init(void) * On machines with > 4GB of memory, we have a cache-size of 1024 entries, * thus about 327KB. * - * Side-note: we convert to u_int32_t. If sane_size is more than + * Side-note: we convert to uint32_t. If sane_size is more than * 16000 TB, we loose precision. But, who cares? :) */ - tcp_cache_size = tcp_cache_roundup2((u_int32_t)(sane_size_meg >> 2)); + tcp_cache_size = tcp_cache_roundup2((uint32_t)(sane_size_meg >> 2)); if (tcp_cache_size < 32) { tcp_cache_size = 32; } else if (tcp_cache_size > 1024) { diff --git a/bsd/netinet/tcp_cc.c b/bsd/netinet/tcp_cc.c index 2eb6faf90..461b180f9 100644 --- a/bsd/netinet/tcp_cc.c +++ b/bsd/netinet/tcp_cc.c @@ -42,8 +42,9 @@ #include #include -SYSCTL_SKMEM_TCP_INT(OID_AUTO, cc_debug, CTLFLAG_RW | CTLFLAG_LOCKED, - int, tcp_cc_debug, 0, "Enable debug data collection"); +static int tcp_cc_debug; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, cc_debug, CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_cc_debug, 0, "Enable debug data collection"); extern struct tcp_cc_algo tcp_cc_newreno; SYSCTL_INT(_net_inet_tcp, OID_AUTO, newreno_sockets, @@ -220,8 +221,6 @@ tcp_ccdbg_trace(struct tcpcb *tp, struct tcphdr *th, int32_t event) tp->t_ccstate->cub_last_max; dbg_state.u.cubic_state.ccd_tcp_win = tp->t_ccstate->cub_tcp_win; - dbg_state.u.cubic_state.ccd_target_win = - tp->t_ccstate->cub_target_win; dbg_state.u.cubic_state.ccd_avg_lastmax = tp->t_ccstate->cub_avg_lastmax; dbg_state.u.cubic_state.ccd_mean_deviation = @@ -282,9 +281,6 @@ tcp_bad_rexmt_fix_sndbuf(struct tcpcb *tp) /* * Calculate initial cwnd according to RFC3390. - * - * Keep the old ss_fltsz sysctl for ABI compabitility issues. - * but it will be overriden if tcp_do_rfc3390 sysctl when it is set. */ void tcp_cc_cwnd_init_or_reset(struct tcpcb *tp) @@ -292,12 +288,12 @@ tcp_cc_cwnd_init_or_reset(struct tcpcb *tp) if (tp->t_flags & TF_LOCAL) { tp->snd_cwnd = tp->t_maxseg * ss_fltsz_local; } else { - /* initial congestion window according to RFC 3390 */ - if (tcp_do_rfc3390) { + if (tcp_cubic_minor_fixes) { + tp->snd_cwnd = tcp_initial_cwnd(tp); + } else { + /* initial congestion window according to RFC 3390 */ tp->snd_cwnd = min(4 * tp->t_maxseg, max(2 * tp->t_maxseg, TCP_CC_CWND_INIT_BYTES)); - } else { - tp->snd_cwnd = tp->t_maxseg * ss_fltsz; } } } @@ -334,12 +330,50 @@ tcp_cc_delay_ack(struct tcpcb *tp, struct tcphdr *th) } break; case 3: - if ((tp->t_flags & TF_RXWIN0SENT) == 0 && - (th->th_flags & TH_PUSH) == 0 && - ((tp->t_unacksegs == 1) || - ((tp->t_flags & TF_STRETCHACK) && - tp->t_unacksegs < maxseg_unacked))) { - return 1; + if (tcp_ack_strategy == TCP_ACK_STRATEGY_LEGACY) { + if ((tp->t_flags & TF_RXWIN0SENT) == 0 && + (th->th_flags & TH_PUSH) == 0 && + ((tp->t_unacksegs == 1) || + ((tp->t_flags & TF_STRETCHACK) && + tp->t_unacksegs < maxseg_unacked))) { + return 1; + } + } else { + uint32_t recwin; + + /* Get the receive-window we would announce */ + recwin = tcp_sbspace(tp); + if (recwin > (uint32_t)(TCP_MAXWIN << tp->rcv_scale)) { + recwin = (uint32_t)(TCP_MAXWIN << tp->rcv_scale); + } + + /* Delay ACK, if: + * + * 1. We are not sending a zero-window + * 2. We are not forcing fast ACKs + * 3. We have more than the low-water mark in receive-buffer + * 4. The receive-window is not increasing + * 5. We have less than or equal of an MSS unacked or + * Window actually has been growing larger than the initial value by half of it. + * (this makes sure that during ramp-up we ACK every second MSS + * until we pass the tcp_recvspace * 1.5-threshold) + * 6. We haven't waited for half a BDP + * + * (a note on 6: The receive-window is + * roughly 2 BDP. Thus, recwin / 4 means half a BDP and + * thus we enforce an ACK roughly twice per RTT - even + * if the app does not read) + */ + if ((tp->t_flags & TF_RXWIN0SENT) == 0 && + tp->t_forced_acks == 0 && + tp->t_inpcb->inp_socket->so_rcv.sb_cc > tp->t_inpcb->inp_socket->so_rcv.sb_lowat && + recwin <= tp->t_last_recwin && + (tp->rcv_nxt - tp->last_ack_sent <= tp->t_maxseg || + recwin > (uint32_t)(tcp_recvspace + (tcp_recvspace >> 1))) && + (tp->rcv_nxt - tp->last_ack_sent) < (recwin >> 2)) { + tp->t_stat.acks_delayed++; + return 1; + } } break; } @@ -431,7 +465,11 @@ tcp_cc_adjust_nonvalidated_cwnd(struct tcpcb *tp) tp->t_pipeack = tcp_get_max_pipeack(tp); tcp_clear_pipeack_state(tp); tp->snd_cwnd = (max(tp->t_pipeack, tp->t_lossflightsize) >> 1); - tp->snd_cwnd = max(tp->snd_cwnd, TCP_CC_CWND_INIT_BYTES); + if (tcp_cubic_minor_fixes) { + tp->snd_cwnd = max(tp->snd_cwnd, tp->t_maxseg); + } else { + tp->snd_cwnd = max(tp->snd_cwnd, TCP_CC_CWND_INIT_BYTES); + } tp->snd_cwnd += tp->t_maxseg * tcprexmtthresh; tp->t_flagsext &= ~TF_CWND_NONVALIDATED; } diff --git a/bsd/netinet/tcp_cc.h b/bsd/netinet/tcp_cc.h index 3f484dac0..8bd1778ae 100644 --- a/bsd/netinet/tcp_cc.h +++ b/bsd/netinet/tcp_cc.h @@ -222,6 +222,7 @@ extern struct zone *tcp_cc_zone; extern struct tcp_cc_algo* tcp_cc_algo_list[TCP_CC_ALGO_COUNT]; #define CC_ALGO(tp) (tcp_cc_algo_list[tp->tcp_cc_index]) +#define TCP_CC_CWND_INIT_PKTS 10 #define TCP_CC_CWND_INIT_BYTES 4380 /* * The congestion window will have to be reset after a @@ -243,5 +244,15 @@ extern void tcp_cc_adjust_nonvalidated_cwnd(struct tcpcb *tp); extern u_int32_t tcp_get_max_pipeack(struct tcpcb *tp); extern void tcp_clear_pipeack_state(struct tcpcb *tp); +static inline uint32_t +tcp_initial_cwnd(struct tcpcb *tp) +{ + if (tcp_cubic_minor_fixes) { + return TCP_CC_CWND_INIT_PKTS * tp->t_maxseg; + } else { + return TCP_CC_CWND_INIT_BYTES; + } +} + #endif /* KERNEL_PRIVATE */ #endif /* _NETINET_CC_H_ */ diff --git a/bsd/netinet/tcp_cubic.c b/bsd/netinet/tcp_cubic.c index a347a0dcb..b835ec0ab 100644 --- a/bsd/netinet/tcp_cubic.c +++ b/bsd/netinet/tcp_cubic.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014 Apple Inc. All rights reserved. + * Copyright (c) 2013-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -37,9 +37,7 @@ #include #include -#if INET6 #include -#endif /* INET6 */ #include #include @@ -64,7 +62,6 @@ static void tcp_cubic_after_timeout(struct tcpcb *tp); static int tcp_cubic_delay_ack(struct tcpcb *tp, struct tcphdr *th); static void tcp_cubic_switch_cc(struct tcpcb *tp, u_int16_t old_index); static uint32_t tcp_cubic_update(struct tcpcb *tp, u_int32_t rtt); -static uint32_t tcp_cubic_tcpwin(struct tcpcb *tp, struct tcphdr *th); static inline void tcp_cubic_clear_state(struct tcpcb *tp); @@ -85,9 +82,11 @@ struct tcp_cc_algo tcp_cc_cubic = { .switch_to = tcp_cubic_switch_cc }; -const float tcp_cubic_backoff = 0.2f; /* multiplicative decrease factor */ -const float tcp_cubic_coeff = 0.4f; -const float tcp_cubic_fast_convergence_factor = 0.875f; +static float tcp_cubic_backoff = 0.2f; /* multiplicative decrease factor */ +static float tcp_cubic_coeff = 0.4f; +static float tcp_cubic_fast_convergence_factor = 0.875f; + +static float tcp_cubic_beta = 0.8f; SYSCTL_SKMEM_TCP_INT(OID_AUTO, cubic_tcp_friendliness, CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_cubic_tcp_friendliness, 0, "Enable TCP friendliness"); @@ -98,11 +97,27 @@ SYSCTL_SKMEM_TCP_INT(OID_AUTO, cubic_fast_convergence, CTLFLAG_RW | CTLFLAG_LOCK SYSCTL_SKMEM_TCP_INT(OID_AUTO, cubic_use_minrtt, CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_cubic_use_minrtt, 0, "use a min of 5 sec rtt"); +SYSCTL_SKMEM_TCP_INT(OID_AUTO, cubic_minor_fixes, CTLFLAG_RW | CTLFLAG_LOCKED, + int, tcp_cubic_minor_fixes, 1, "Minor fixes to TCP Cubic"); + +SYSCTL_SKMEM_TCP_INT(OID_AUTO, cubic_rfc_compliant, CTLFLAG_RW | CTLFLAG_LOCKED, + int, tcp_cubic_rfc_compliant, 1, "RFC Compliance for TCP Cubic"); + static int tcp_cubic_init(struct tcpcb *tp) { OSIncrementAtomic((volatile SInt32 *)&tcp_cc_cubic.num_sockets); + if (tcp_cubic_rfc_compliant) { + tcp_cubic_backoff = 0.3f; /* multiplicative decrease factor */ + tcp_cubic_fast_convergence_factor = 0.85f; + tcp_cubic_beta = 0.7f; + } else { + tcp_cubic_backoff = 0.2f; /* multiplicative decrease factor */ + tcp_cubic_fast_convergence_factor = 0.875f; + tcp_cubic_beta = 0.8f; + } + VERIFY(tp->t_ccstate != NULL); tcp_cubic_clear_state(tp); return 0; @@ -140,8 +155,8 @@ tcp_cubic_cwnd_init_or_reset(struct tcpcb *tp) * loss and Cubic will enter steady-state too early. It is better * to always probe to find the initial slow-start threshold. */ - if (tp->t_inpcb->inp_stat->txbytes <= TCP_CC_CWND_INIT_BYTES - && tp->snd_ssthresh < (TCP_MAXWIN << TCP_MAX_WINSHIFT)) { + if (tp->t_inpcb->inp_stat->txbytes <= tcp_initial_cwnd(tp) && + tp->snd_ssthresh < (TCP_MAXWIN << TCP_MAX_WINSHIFT)) { tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; } @@ -177,25 +192,24 @@ tcp_cubic_update(struct tcpcb *tp, u_int32_t rtt) tp->t_ccstate->cub_epoch_start = 1; } if (win < tp->t_ccstate->cub_last_max) { - VERIFY(current_task() == kernel_task); - /* * Compute cubic epoch period, this is the time * period that the window will take to increase to * last_max again after backoff due to loss. */ - K = (tp->t_ccstate->cub_last_max - win) - / tp->t_maxseg / tcp_cubic_coeff; + if (tcp_cubic_minor_fixes) { + K = ((float)tp->t_ccstate->cub_last_max - win) / tp->t_maxseg / tcp_cubic_coeff; + } else { + K = (tp->t_ccstate->cub_last_max - win) / tp->t_maxseg / tcp_cubic_coeff; + } K = cbrtf(K); tp->t_ccstate->cub_epoch_period = K * TCP_RETRANSHZ; /* Origin point */ - tp->t_ccstate->cub_origin_point = - tp->t_ccstate->cub_last_max; + tp->t_ccstate->cub_origin_point = tp->t_ccstate->cub_last_max; } else { tp->t_ccstate->cub_epoch_period = 0; tp->t_ccstate->cub_origin_point = win; } - tp->t_ccstate->cub_target_win = 0; } VERIFY(tp->t_ccstate->cub_origin_point > 0); @@ -203,8 +217,7 @@ tcp_cubic_update(struct tcpcb *tp, u_int32_t rtt) * Compute the target window for the next RTT using smoothed RTT * as an estimate for next RTT. */ - elapsed_time = timer_diff(tcp_now, 0, - tp->t_ccstate->cub_epoch_start, 0); + elapsed_time = timer_diff(tcp_now, 0, tp->t_ccstate->cub_epoch_start, 0); if (tcp_cubic_use_minrtt) { elapsed_time += max(tcp_cubic_use_minrtt, rtt); @@ -214,8 +227,7 @@ tcp_cubic_update(struct tcpcb *tp, u_int32_t rtt) var = (elapsed_time - tp->t_ccstate->cub_epoch_period) / TCP_RETRANSHZ; var = var * var * var * (tcp_cubic_coeff * tp->t_maxseg); - tp->t_ccstate->cub_target_win = (u_int32_t)(tp->t_ccstate->cub_origin_point + var); - return tp->t_ccstate->cub_target_win; + return (u_int32_t)(tp->t_ccstate->cub_origin_point + var); } /* @@ -243,30 +255,69 @@ tcp_cubic_update(struct tcpcb *tp, u_int32_t rtt) * a backoff of 0.5 and additive increase of 1 packet per RTT. * * TCP window at time t can be calculated using the following equation - * with beta as 0.8 + * with tcp_beta_cubic * - * W(t) <- Wmax * beta + 3 * ((1 - beta)/(1 + beta)) * t/RTT + * W(t) <- Wmax * tcp_beta_cubic + 3 * ((1 - tcp_beta_cubic)/(1 + tcp_beta_cubic)) * t/RTT * */ static uint32_t tcp_cubic_tcpwin(struct tcpcb *tp, struct tcphdr *th) { if (tp->t_ccstate->cub_tcp_win == 0) { + /* Start of the epoch, we set the tcp_win to whatever Cubic decided + * at the beginning of the epoch. + */ tp->t_ccstate->cub_tcp_win = min(tp->snd_cwnd, tp->snd_wnd); - tp->t_ccstate->cub_tcp_bytes_acked = 0; + if (tcp_cubic_minor_fixes) { + tp->t_ccstate->cub_tcp_bytes_acked = BYTES_ACKED(th, tp); + } else { + tp->t_ccstate->cub_tcp_bytes_acked = 0; + } } else { - tp->t_ccstate->cub_tcp_bytes_acked += - BYTES_ACKED(th, tp); - if (tp->t_ccstate->cub_tcp_bytes_acked >= - tp->t_ccstate->cub_tcp_win) { - tp->t_ccstate->cub_tcp_bytes_acked -= - tp->t_ccstate->cub_tcp_win; - tp->t_ccstate->cub_tcp_win += tp->t_maxseg; + tp->t_ccstate->cub_tcp_bytes_acked += BYTES_ACKED(th, tp); + + if (tcp_cubic_minor_fixes) { + /* + * Increase by ai_factor * MSS, once per RTT. Counting bytes_acked + * against the snd_cwnd represents exactly one RTT at full rate. + */ + while (tp->t_ccstate->cub_tcp_bytes_acked >= tp->snd_cwnd) { + /* Enough bytes have been ACK'd for TCP to do AIMD*/ + tp->t_ccstate->cub_tcp_bytes_acked -= tp->snd_cwnd; + + if (tp->snd_cwnd >= tp->t_ccstate->cub_last_max || !tcp_cubic_rfc_compliant) { + tp->t_ccstate->cub_tcp_win += tp->t_maxseg; + } else { + /* Increase-rate from Section 4.2, RFC 8312 */ + float ai_factor = (float)3 * (1 - tcp_cubic_beta) / (1 + tcp_cubic_beta); + + tp->t_ccstate->cub_tcp_win += (uint32_t)(tp->t_maxseg * ai_factor); + } + } + } else { + if (tp->t_ccstate->cub_tcp_bytes_acked >= tp->t_ccstate->cub_tcp_win) { + tp->t_ccstate->cub_tcp_bytes_acked -= tp->t_ccstate->cub_tcp_win; + tp->t_ccstate->cub_tcp_win += tp->t_maxseg; + } } } return tp->t_ccstate->cub_tcp_win; } +static uint32_t +tcp_round_to(uint32_t val, uint32_t round) +{ + if (tcp_cubic_minor_fixes) { + /* + * Round up or down based on the middle. Meaning, if we round upon a + * multiple of 10, 16 will round to 20 and 14 will round to 10. + */ + return ((val + (round / 2)) / round) * round; + } else { + return (val / round) * round; + } +} + /* * Handle an in-sequence ack during congestion avoidance phase. */ @@ -274,6 +325,7 @@ static void tcp_cubic_congestion_avd(struct tcpcb *tp, struct tcphdr *th) { u_int32_t cubic_target_win, tcp_win, rtt; + u_int64_t incr_win = UINT32_MAX; /* Do not increase congestion window in non-validated phase */ if (tcp_cc_is_cwnd_nonvalidated(tp) != 0) { @@ -293,9 +345,7 @@ tcp_cubic_congestion_avd(struct tcpcb *tp, struct tcphdr *th) /* Compute TCP window if a multiplicative decrease of 0.2 is used */ tcp_win = tcp_cubic_tcpwin(tp, th); - if (tp->snd_cwnd < tcp_win && - (tcp_cubic_tcp_friendliness == 1 || - TCP_CUBIC_ENABLE_TCPMODE(tp))) { + if (tp->snd_cwnd < tcp_win && tcp_cubic_minor_fixes == 0 && TCP_CUBIC_ENABLE_TCPMODE(tp)) { /* this connection is in TCP-friendly region */ if (tp->t_bytes_acked >= tp->snd_cwnd) { tp->t_bytes_acked -= tp->snd_cwnd; @@ -310,17 +360,39 @@ tcp_cubic_congestion_avd(struct tcpcb *tp, struct tcphdr *th) * need to be acknowledged before we can increase * the cwnd by one segment. */ - u_int64_t incr_win; - incr_win = tp->snd_cwnd * tp->t_maxseg; + incr_win = (uint64_t)tp->snd_cwnd * tp->t_maxseg; incr_win /= (cubic_target_win - tp->snd_cwnd); - if (incr_win > 0 && - tp->t_bytes_acked >= incr_win) { - tp->t_bytes_acked -= incr_win; - tp->snd_cwnd = - min((tp->snd_cwnd + tp->t_maxseg), - TCP_MAXWIN << tp->snd_scale); + if (!tcp_cubic_minor_fixes) { + if (incr_win > 0 && + tp->t_bytes_acked >= incr_win) { + tp->t_bytes_acked -= incr_win; + tp->snd_cwnd = + min((tp->snd_cwnd + tp->t_maxseg), + TCP_MAXWIN << tp->snd_scale); + } + } + } + } + + if (tcp_cubic_minor_fixes) { + tcp_win = tcp_round_to(tcp_win, tp->t_maxseg); + + if (tp->snd_cwnd < tcp_win) { + uint64_t tcp_incr_win; + + tcp_incr_win = (uint64_t)tp->snd_cwnd * tp->t_maxseg; + tcp_incr_win /= (tcp_win - tp->snd_cwnd); + + if (tcp_incr_win < incr_win) { + /* this connection is in TCP-friendly region */ + incr_win = tcp_incr_win; } } + + if (incr_win > 0 && tp->t_bytes_acked >= incr_win) { + tp->t_bytes_acked -= incr_win; + tp->snd_cwnd = min(tp->snd_cwnd + tp->t_maxseg, TCP_MAXWIN << tp->snd_scale); + } } } @@ -343,14 +415,19 @@ tcp_cubic_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) uint32_t acked, abc_lim, incr; acked = BYTES_ACKED(th, tp); - abc_lim = (tcp_do_rfc3465_lim2 && - tp->snd_nxt == tp->snd_max) ? - 2 * tp->t_maxseg : tp->t_maxseg; + if (tcp_cubic_minor_fixes) { + /* + * Maximum burst-size is limited to the initial congestion-window. + * We know that the network can survive this kind of burst. + */ + abc_lim = tcp_initial_cwnd(tp); + } else { + abc_lim = (tp->snd_nxt == tp->snd_max) ? 2 * tp->t_maxseg : tp->t_maxseg; + } incr = min(acked, abc_lim); tp->snd_cwnd += incr; - tp->snd_cwnd = min(tp->snd_cwnd, - TCP_MAXWIN << tp->snd_scale); + tp->snd_cwnd = min(tp->snd_cwnd, TCP_MAXWIN << tp->snd_scale); } } @@ -361,13 +438,16 @@ tcp_cubic_pre_fr(struct tcpcb *tp) int32_t dev; tp->t_ccstate->cub_epoch_start = 0; tp->t_ccstate->cub_tcp_win = 0; - tp->t_ccstate->cub_target_win = 0; tp->t_ccstate->cub_tcp_bytes_acked = 0; win = min(tp->snd_cwnd, tp->snd_wnd); if (tp->t_flagsext & TF_CWND_NONVALIDATED) { tp->t_lossflightsize = tp->snd_max - tp->snd_una; - win = (max(tp->t_pipeack, tp->t_lossflightsize)) >> 1; + if (tcp_flow_control_response) { + win = max(tp->t_pipeack, tp->t_lossflightsize); + } else { + win = (max(tp->t_pipeack, tp->t_lossflightsize)) >> 1; + } } else { tp->t_lossflightsize = 0; } @@ -379,13 +459,10 @@ tcp_cubic_pre_fr(struct tcpcb *tp) * loss occurred, it indicates that capacity available in the * network has gone down. This can happen if a new flow has started * and it is capturing some of the bandwidth. To reach convergence - * quickly, backoff a little more. Disable fast convergence to - * disable this behavior. + * quickly, backoff a little more. */ - if (win < tp->t_ccstate->cub_last_max && - tcp_cubic_fast_convergence == 1) { - tp->t_ccstate->cub_last_max = (u_int32_t)(win * - tcp_cubic_fast_convergence_factor); + if (win < tp->t_ccstate->cub_last_max && tcp_cubic_minor_fixes) { + tp->t_ccstate->cub_last_max = (uint32_t)((float)win * tcp_cubic_fast_convergence_factor); } else { tp->t_ccstate->cub_last_max = win; } @@ -434,11 +511,11 @@ tcp_cubic_pre_fr(struct tcpcb *tp) /* Backoff congestion window by tcp_cubic_backoff factor */ win = (u_int32_t)(win - (win * tcp_cubic_backoff)); - win = (win / tp->t_maxseg); - if (win < 2) { - win = 2; + win = tcp_round_to(win, tp->t_maxseg); + if (win < 2 * tp->t_maxseg) { + win = 2 * tp->t_maxseg; } - tp->snd_ssthresh = win * tp->t_maxseg; + tp->snd_ssthresh = win; tcp_cc_resize_sndbuf(tp); } @@ -446,12 +523,37 @@ static void tcp_cubic_post_fr(struct tcpcb *tp, struct tcphdr *th) { uint32_t flight_size = 0; + uint32_t ack; - if (SEQ_LEQ(th->th_ack, tp->snd_max)) { - flight_size = tp->snd_max - th->th_ack; + if (th != NULL) { + ack = th->th_ack; + } else { + ack = tp->snd_una; } - if (SACK_ENABLED(tp) && tp->t_lossflightsize > 0) { + if (SEQ_LEQ(ack, tp->snd_max) && (!tcp_cubic_minor_fixes || tcp_flow_control_response)) { + flight_size = tp->snd_max - ack; + } else if (tcp_cubic_minor_fixes) { + /* + * Cubic Minor Fixes: snd_max - th_ack is a very very bad estimate + * of the flight size. Either the app is sending at full speed and + * flight_size *is* snd_sshtresh, or the app is not sending at full + * speed and congestion-window validation would have kicked in earlier. + * + * Except that for the latter, snd_ssthresh is way too high. + * When we exit recovery we will burst a lot of data out... + * + * So, tcp_flow_control_response brings us back to the old behavior. + * Too many feature-flags... + */ + flight_size = tp->snd_ssthresh; + } + + /* + * Cubic Minor Fixes: t_lossflightsize is always 0, because of + * EXIT_FASTRECOVERY. This here is basically dead code... + */ + if (SACK_ENABLED(tp) && tp->t_lossflightsize > 0 && !tcp_cubic_minor_fixes) { u_int32_t total_rxt_size = 0, ncwnd; /* * When SACK is enabled, the number of retransmitted bytes @@ -487,7 +589,6 @@ tcp_cubic_post_fr(struct tcpcb *tp, struct tcphdr *th) tp->snd_cwnd = tp->snd_ssthresh; } tp->t_ccstate->cub_tcp_win = 0; - tp->t_ccstate->cub_target_win = 0; tp->t_ccstate->cub_tcp_bytes_acked = 0; } @@ -548,5 +649,4 @@ tcp_cubic_clear_state(struct tcpcb *tp) tp->t_ccstate->cub_tcp_win = 0; tp->t_ccstate->cub_tcp_bytes_acked = 0; tp->t_ccstate->cub_epoch_period = 0; - tp->t_ccstate->cub_target_win = 0; } diff --git a/bsd/netinet/tcp_debug.c b/bsd/netinet/tcp_debug.c index 68a9a4807..801897108 100644 --- a/bsd/netinet/tcp_debug.c +++ b/bsd/netinet/tcp_debug.c @@ -83,9 +83,7 @@ #include #include #include -#if INET6 #include -#endif #include #include #include @@ -114,21 +112,13 @@ void *ipgen; struct tcphdr *th; int req; { -#if INET6 int isipv6; -#endif /* INET6 */ tcp_seq seq, ack; int len, flags; struct tcp_debug *td = &tcp_debug[tcp_debx++]; -#if INET6 isipv6 = (ipgen != NULL && ((struct ip *)ipgen)->ip_v == 6) ? 1 : 0; -#endif /* INET6 */ - td->td_family = -#if INET6 - (isipv6 != 0) ? AF_INET6 : -#endif - AF_INET; + td->td_family = (isipv6 != 0) ? AF_INET6 : AF_INET; if (tcp_debx == TCP_NDEBUG) { tcp_debx = 0; } @@ -148,14 +138,12 @@ int req; sizeof(td->td_ti.ti_i)); bzero((caddr_t)td->td_ip6buf, sizeof(td->td_ip6buf)); break; -#if INET6 case AF_INET6: bcopy((caddr_t)ipgen, (caddr_t)td->td_ip6buf, sizeof(td->td_ip6buf)); bzero((caddr_t)&td->td_ti.ti_i, sizeof(td->td_ti.ti_i)); break; -#endif default: bzero((caddr_t)td->td_ip6buf, sizeof(td->td_ip6buf)); bzero((caddr_t)&td->td_ti.ti_i, @@ -172,13 +160,11 @@ int req; td->td_ti.ti_t = *th; bzero((caddr_t)&td->td_ti6.th, sizeof(td->td_ti6.th)); break; -#if INET6 case AF_INET6: td->td_ti6.th = *th; bzero((caddr_t)&td->td_ti.ti_t, sizeof(td->td_ti.ti_t)); break; -#endif default: bzero((caddr_t)&td->td_ti.ti_t, sizeof(td->td_ti.ti_t)); @@ -209,11 +195,7 @@ int req; } seq = th->th_seq; ack = th->th_ack; - len = -#if INET6 - isipv6 ? ((struct ip6_hdr *)ipgen)->ip6_plen : -#endif - ((struct ip *)ipgen)->ip_len; + len = isipv6 ? ((struct ip6_hdr *)ipgen)->ip6_plen : ((struct ip *)ipgen)->ip_len; if (act == TA_OUTPUT) { seq = ntohl(seq); ack = ntohl(ack); diff --git a/bsd/netinet/tcp_debug.h b/bsd/netinet/tcp_debug.h index 4a850d5c3..4032791ca 100644 --- a/bsd/netinet/tcp_debug.h +++ b/bsd/netinet/tcp_debug.h @@ -78,7 +78,7 @@ struct tcp_debug { */ struct tcpiphdr td_ti; struct { -#if !defined(KERNEL) && defined(INET6) +#if !defined(KERNEL) struct ip6_hdr ip6; #else u_char ip6buf[40]; /* sizeof(struct ip6_hdr) */ diff --git a/bsd/netinet/tcp_input.c b/bsd/netinet/tcp_input.c index 9803eaf09..901c0338d 100644 --- a/bsd/netinet/tcp_input.c +++ b/bsd/netinet/tcp_input.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -79,9 +79,9 @@ #include #include #include -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX #include -#endif +#endif /* XNU_TARGET_OS_OSX */ #include #include /* before tcp_seq.h, for tcp_random18() */ @@ -104,13 +104,11 @@ #include #include #include -#if INET6 #include #include #include #include #include -#endif #include #include #include @@ -120,9 +118,7 @@ #include #include #include -#if INET6 #include -#endif #include #if TCPDEBUG #include @@ -133,18 +129,11 @@ struct tcphdr tcp_savetcp; #if IPSEC #include -#if INET6 #include -#endif #include #endif /*IPSEC*/ -#if CONFIG_MACF_NET || CONFIG_MACF_SOCKET -#include -#endif /* CONFIG_MACF_NET || CONFIG_MACF_SOCKET */ - #include -#include #if MPTCP #include #include @@ -164,32 +153,38 @@ struct tcphdr tcp_savetcp; struct tcpstat tcpstat; +SYSCTL_SKMEM_TCP_INT(OID_AUTO, flow_control_response, + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_flow_control_response, 1, + "Improved response to Flow-control events"); + static int log_in_vain = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW | CTLFLAG_LOCKED, &log_in_vain, 0, "Log all incoming TCP connections"); +SYSCTL_SKMEM_TCP_INT(OID_AUTO, ack_strategy, + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_ack_strategy, TCP_ACK_STRATEGY_MODERN, + "Revised TCP ACK-strategy, avoiding stretch-ACK implementation"); + static int blackhole = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW | CTLFLAG_LOCKED, &blackhole, 0, "Do not send RST when dropping refused connections"); +SYSCTL_SKMEM_TCP_INT(OID_AUTO, aggressive_rcvwnd_inc, + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_aggressive_rcvwnd_inc, 1, + "Be more aggressive about increasing the receive-window."); + SYSCTL_SKMEM_TCP_INT(OID_AUTO, delayed_ack, CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_delack_enabled, 3, "Delay ACK to try and piggyback it onto a data packet"); -SYSCTL_SKMEM_TCP_INT(OID_AUTO, tcp_lq_overflow, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_lq_overflow, 1, - "Listen Queue Overflow"); - SYSCTL_SKMEM_TCP_INT(OID_AUTO, recvbg, CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_recv_bg, 0, "Receive background"); -#if TCP_DROP_SYNFIN SYSCTL_SKMEM_TCP_INT(OID_AUTO, drop_synfin, CTLFLAG_RW | CTLFLAG_LOCKED, static int, drop_synfin, 1, "Drop TCP packets with SYN+FIN set"); -#endif SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "TCP Segment Reassembly Queue"); @@ -222,15 +217,6 @@ u_int32_t tcp_autorcvbuf_inc_shift = 3; SYSCTL_SKMEM_TCP_INT(OID_AUTO, recv_allowed_iaj, CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_allowed_iaj, ALLOWED_IAJ, "Allowed inter-packet arrival jiter"); -#if (DEVELOPMENT || DEBUG) -SYSCTL_INT(_net_inet_tcp, OID_AUTO, acc_iaj_high_thresh, - CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_acc_iaj_high_thresh, 0, - "Used in calculating maximum accumulated IAJ"); - -SYSCTL_INT(_net_inet_tcp, OID_AUTO, autorcvbufincshift, - CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_autorcvbuf_inc_shift, 0, - "Shift for increment in receive socket buffer size"); -#endif /* (DEVELOPMENT || DEBUG) */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, doautorcvbuf, CTLFLAG_RW | CTLFLAG_LOCKED, u_int32_t, tcp_do_autorcvbuf, 1, @@ -241,50 +227,9 @@ SYSCTL_SKMEM_TCP_INT(OID_AUTO, autotunereorder, "Enable automatic socket buffer tuning even when reordering is present"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, autorcvbufmax, - CTLFLAG_RW | CTLFLAG_LOCKED, u_int32_t, tcp_autorcvbuf_max, 512 * 1024, + CTLFLAG_RW | CTLFLAG_LOCKED, u_int32_t, tcp_autorcvbuf_max, 2 * 1024 * 1024, "Maximum receive socket buffer size"); -#if CONFIG_EMBEDDED -int sw_lro = 1; -#else -int sw_lro = 0; -#endif /* !CONFIG_EMBEDDED */ -SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_LOCKED, - &sw_lro, 0, "Used to coalesce TCP packets"); - -int lrodebug = 0; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, lrodbg, - CTLFLAG_RW | CTLFLAG_LOCKED, &lrodebug, 0, - "Used to debug SW LRO"); - -int lro_start = 4; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro_startcnt, - CTLFLAG_RW | CTLFLAG_LOCKED, &lro_start, 0, - "Segments for starting LRO computed as power of 2"); - -int limited_txmt = 1; -int early_rexmt = 1; -int sack_ackadv = 1; -int tcp_dsack_enable = 1; - -#if (DEVELOPMENT || DEBUG) -SYSCTL_INT(_net_inet_tcp, OID_AUTO, limited_transmit, - CTLFLAG_RW | CTLFLAG_LOCKED, &limited_txmt, 0, - "Enable limited transmit"); - -SYSCTL_INT(_net_inet_tcp, OID_AUTO, early_rexmt, - CTLFLAG_RW | CTLFLAG_LOCKED, &early_rexmt, 0, - "Enable Early Retransmit"); - -SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack_ackadv, - CTLFLAG_RW | CTLFLAG_LOCKED, &sack_ackadv, 0, - "Use SACK with cumulative ack advancement as a dupack"); - -SYSCTL_INT(_net_inet_tcp, OID_AUTO, dsack_enable, - CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_dsack_enable, 0, - "use DSACK TCP option to report duplicate segments"); - -#endif /* (DEVELOPMENT || DEBUG) */ int tcp_disable_access_to_stats = 1; SYSCTL_INT(_net_inet_tcp, OID_AUTO, disable_access_to_stats, CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_disable_access_to_stats, 0, @@ -294,10 +239,15 @@ SYSCTL_SKMEM_TCP_INT(OID_AUTO, challengeack_limit, CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_challengeack_limit, 10, "Maximum number of challenge ACKs per connection per second"); +/* TO BE REMOVED */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_rfc5961, CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_do_rfc5961, 1, "Enable/Disable full RFC 5961 compliance"); +SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_better_lr, + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_do_better_lr, 1, + "Improved TCP Loss Recovery"); + extern int tcp_acc_iaj_high; extern int tcp_acc_iaj_react_limit; @@ -324,18 +274,15 @@ static inline void tcp_adaptive_rwtimo_check(struct tcpcb *, int); #if TRAFFIC_MGT static inline void update_iaj_state(struct tcpcb *tp, uint32_t tlen, int reset_size); -void compute_iaj(struct tcpcb *tp, int nlropkts, int lro_delay_factor); -static void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj); +static inline void compute_iaj(struct tcpcb *tp); +static inline void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj); #endif /* TRAFFIC_MGT */ -#if INET6 static inline unsigned int tcp_maxmtu6(struct rtentry *); -#endif - unsigned int get_maxmtu(struct rtentry *); static void tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sb, - struct tcpopt *to, u_int32_t tlen, u_int32_t rcvbuf_max); + struct tcpopt *to, uint32_t tlen); void tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sb); static void tcp_sbsnd_trim(struct sockbuf *sbsnd); static inline void tcp_sbrcv_tstmp_check(struct tcpcb *tp); @@ -361,23 +308,7 @@ static void tcp_bad_rexmt_check(struct tcpcb *tp, struct tcphdr *th, #define TCP_EARLY_REXMT_WIN (60 * TCP_RETRANSHZ) /* 60 seconds */ #define TCP_EARLY_REXMT_LIMIT 10 -extern void ipfwsyslog( int level, const char *format, ...); -extern int fw_verbose; - -#if IPFIREWALL -extern void ipfw_stealth_stats_incr_tcp(void); - -#define log_in_vain_log( a ) { \ - if ( (log_in_vain == 3 ) && (fw_verbose == 2)) { /* Apple logging, log to ipfw.log */ \ - ipfwsyslog a ; \ - } else if ( (log_in_vain == 4 ) && (fw_verbose == 2)) { \ - ipfw_stealth_stats_incr_tcp(); \ - } \ - else log a ; \ -} -#else #define log_in_vain_log( a ) { log a; } -#endif int tcp_rcvunackwin = TCPTV_UNACKWIN; int tcp_maxrcvidle = TCPTV_MAXRCVIDLE; @@ -447,43 +378,13 @@ isqrt(unsigned int val) return g; } -/* - * With LRO, roughly estimate the inter arrival time between - * each sub coalesced packet as an average. Count the delay - * cur_iaj to be the delay between the last packet received - * and the first packet of the LRO stream. Due to round off errors - * cur_iaj may be the same as lro_delay_factor. Averaging has - * round off errors too. lro_delay_factor may be close to 0 - * in steady state leading to lower values fed to compute_iaj_meat. - */ -void -compute_iaj(struct tcpcb *tp, int nlropkts, int lro_delay_factor) +static inline void +compute_iaj(struct tcpcb *tp) { - uint32_t cur_iaj = tcp_now - tp->iaj_rcv_ts; - uint32_t timediff = 0; - - if (cur_iaj >= lro_delay_factor) { - cur_iaj = cur_iaj - lro_delay_factor; - } - - compute_iaj_meat(tp, cur_iaj); - - if (nlropkts <= 1) { - return; - } - - nlropkts--; - - timediff = lro_delay_factor / nlropkts; - - while (nlropkts > 0) { - compute_iaj_meat(tp, timediff); - nlropkts--; - } + compute_iaj_meat(tp, (tcp_now - tp->iaj_rcv_ts)); } -static -void +static inline void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj) { /* When accumulated IAJ reaches MAX_ACC_IAJ in milliseconds, @@ -650,8 +551,6 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; int flags = 0; - struct mbuf *oodata = NULL; - int copy_oodata = 0; u_int16_t qlimit; boolean_t cell = IFNET_IS_CELLULAR(ifp); boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp)); @@ -672,6 +571,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, * Reset the stretch-ack algorithm at this point. */ tcp_reset_stretch_ack(tp); + tp->t_forced_acks = TCP_FORCED_ACKS_COUNT; #if TRAFFIC_MGT if (tp->acc_iaj > 0) { @@ -687,7 +587,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, * process the missing segment. */ qlimit = min(max(100, so->so_rcv.sb_hiwat >> 10), - (TCP_AUTORCVBUF_MAX(ifp) >> 10)); + (tcp_autorcvbuf_max >> 10)); if (th->th_seq != tp->rcv_nxt && (tp->t_reassqlen + 1) >= qlimit) { tcp_reass_overflows++; @@ -726,7 +626,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, /* conversion to int (in i) handles seq wraparound */ i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; if (i > 0) { - if (TCP_DSACK_ENABLED(tp) && i > 1) { + if (i > 1) { /* * Note duplicate data sequnce numbers * to report in DSACK option @@ -800,7 +700,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, * dsack_set will be true if a previous entry has some of * the duplicate sequence space. */ - if (TCP_DSACK_ENABLED(tp) && i > 1 && !dsack_set) { + if (i > 1 && !dsack_set) { if (tp->t_dsack_lseq == 0) { tp->t_dsack_lseq = q->tqe_th->th_seq; tp->t_dsack_rseq = @@ -841,16 +741,6 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, LIST_INSERT_AFTER(p, te, tqe_q); } - /* - * New out-of-order data exists, and is pointed to by - * queue entry te. Set copy_oodata to 1 so out-of-order data - * can be copied off to sockbuf after in-order data - * is copied off. - */ - if (!(so->so_state & SS_CANTRCVMORE)) { - copy_oodata = 1; - } - present: /* * Present data to user, advancing rcv_nxt through @@ -861,21 +751,6 @@ present: } q = LIST_FIRST(&tp->t_segq); if (!q || q->tqe_th->th_seq != tp->rcv_nxt) { - /* Stop using LRO once out of order packets arrive */ - if (tp->t_flagsext & TF_LRO_OFFLOADED) { - tcp_lro_remove_state(inp->inp_laddr, inp->inp_faddr, - th->th_dport, th->th_sport); - tp->t_flagsext &= ~TF_LRO_OFFLOADED; - } - - /* - * continue processing if out-of-order data - * can be delivered - */ - if (q && (so->so_flags & SOF_ENABLE_MSGS)) { - goto msg_unordered_delivery; - } - return 0; } @@ -907,32 +782,18 @@ present: struct tcphdr saved_tcphdr = {}; so_recv_data_stat(so, q->tqe_m, 0); /* XXXX */ - if (so->so_flags & SOF_ENABLE_MSGS) { - /* - * Append the inorder data as a message to the - * receive socket buffer. Also check to see if - * the data we are about to deliver is the same - * data that we wanted to pass up to the user - * out of order. If so, reset copy_oodata -- - * the received data filled a gap, and - * is now in order! - */ - if (q == te) { - copy_oodata = 0; - } - } memcpy(&saved_tcphdr, th, sizeof(struct tcphdr)); - if (sbappendstream_rcvdemux(so, q->tqe_m, - q->tqe_th->th_seq - (tp->irs + 1), 0)) { - *dowakeup = 1; + + if (q->tqe_th->th_flags & TH_PUSH) { + tp->t_flagsext |= TF_LAST_IS_PSH; + } else { + tp->t_flagsext &= ~TF_LAST_IS_PSH; } - th = &saved_tcphdr; - if (tp->t_flagsext & TF_LRO_OFFLOADED) { - tcp_update_lro_seq(tp->rcv_nxt, - inp->inp_laddr, inp->inp_faddr, - th->th_dport, th->th_sport); + if (sbappendstream_rcvdemux(so, q->tqe_m)) { + *dowakeup = 1; } + th = &saved_tcphdr; } zfree(tcp_reass_zone, q); tp->t_reassqlen--; @@ -940,16 +801,13 @@ present: } while (q && q->tqe_th->th_seq == tp->rcv_nxt); tp->t_flagsext &= ~TF_REASS_INPROG; -#if INET6 if ((inp->inp_vflag & INP_IPV6) != 0) { KERNEL_DEBUG(DBG_LAYER_BEG, ((inp->inp_fport << 16) | inp->inp_lport), (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | (inp->in6p_faddr.s6_addr16[0] & 0xffff)), 0, 0, 0); - } else -#endif - { + } else { KERNEL_DEBUG(DBG_LAYER_BEG, ((inp->inp_fport << 16) | inp->inp_lport), (((inp->inp_laddr.s_addr & 0xffff) << 16) | @@ -957,25 +815,6 @@ present: 0, 0, 0); } -msg_unordered_delivery: - /* Deliver out-of-order data as a message */ - if (te && (so->so_flags & SOF_ENABLE_MSGS) && copy_oodata && te->tqe_len) { - /* - * make a copy of the mbuf to be delivered up to - * the user, and add it to the sockbuf - */ - oodata = m_copym(te->tqe_m, 0, M_COPYALL, M_DONTWAIT); - if (oodata != NULL) { - if (sbappendmsgstream_rcv(&so->so_rcv, oodata, - te->tqe_th->th_seq - (tp->irs + 1), 1)) { - *dowakeup = 1; - tcpstat.tcps_msg_unopkts++; - } else { - tcpstat.tcps_msg_unoappendfail++; - } - } - } - return flags; } @@ -984,8 +823,7 @@ msg_unordered_delivery: * probe recovers the last packet. */ static void -tcp_reduce_congestion_window( - struct tcpcb *tp) +tcp_reduce_congestion_window(struct tcpcb *tp) { /* * If the current tcp cc module has @@ -1053,7 +891,6 @@ tcp_keepalive_reset(struct tcpcb *tp) * TCP input routine, follows pages 65-76 of the * protocol specification dated September, 1981 very closely. */ -#if INET6 int tcp6_input(struct mbuf **mp, int *offp, int proto) { @@ -1089,7 +926,6 @@ tcp6_input(struct mbuf **mp, int *offp, int proto) tcp_input(m, *offp); return IPPROTO_DONE; } -#endif /* Depending on the usage of mbuf space in the system, this function * will return true or false. This is used to determine if a socket @@ -1153,7 +989,7 @@ tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sbrcv, */ static void tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv, - struct tcpopt *to, u_int32_t pktlen, u_int32_t rcvbuf_max) + struct tcpopt *to, uint32_t pktlen) { struct socket *so = sbrcv->sb_so; @@ -1167,7 +1003,7 @@ tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv, if (tcp_do_autorcvbuf == 0 || (sbrcv->sb_flags & SB_AUTOSIZE) == 0 || tcp_cansbgrow(sbrcv) == 0 || - sbrcv->sb_hiwat >= rcvbuf_max || + sbrcv->sb_hiwat >= tcp_autorcvbuf_max || (tp->t_flagsext & TF_RECV_THROTTLE) || (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) || (!tcp_autotune_reorder && !LIST_EMPTY(&tp->t_segq))) { @@ -1175,14 +1011,6 @@ tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv, goto out; } - if (TSTMP_GT(tcp_now, - tp->rfbuf_ts + TCPTV_RCVBUFIDLE)) { - /* If there has been an idle period in the - * connection, just restart the measurement - */ - goto out; - } - if (!TSTMP_SUPPORTED(tp)) { /* * Timestamp option is not supported on this connection. @@ -1213,40 +1041,67 @@ tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv, * on the link. */ if (TSTMP_GEQ(to->to_tsecr, tp->rfbuf_ts)) { - if (tp->rfbuf_cnt + pktlen > (sbrcv->sb_hiwat - - (sbrcv->sb_hiwat >> 1))) { - int32_t rcvbuf_inc, min_incr; - + if (tcp_aggressive_rcvwnd_inc) { tp->rfbuf_cnt += pktlen; - /* - * Increment the receive window by a - * multiple of maximum sized segments. - * This will prevent a connection from - * sending smaller segments on wire if it - * is limited by the receive window. - * - * Set the ideal size based on current - * bandwidth measurements. We set the - * ideal size on receive socket buffer to - * be twice the bandwidth delay product. - */ - rcvbuf_inc = (tp->rfbuf_cnt << 1) - - sbrcv->sb_hiwat; + } - /* - * Make the increment equal to 8 segments - * at least - */ - min_incr = tp->t_maxseg << tcp_autorcvbuf_inc_shift; - if (rcvbuf_inc < min_incr) { - rcvbuf_inc = min_incr; + if ((tcp_aggressive_rcvwnd_inc == 0 && + tp->rfbuf_cnt + pktlen > (sbrcv->sb_hiwat - + (sbrcv->sb_hiwat >> 1))) || + (tcp_aggressive_rcvwnd_inc && + tp->rfbuf_cnt > tp->rfbuf_space)) { + int32_t rcvbuf_inc; + uint32_t idealsize; + + if (tcp_aggressive_rcvwnd_inc == 0) { + int32_t min_incr; + + tp->rfbuf_cnt += pktlen; + /* + * Increment the receive window by a + * multiple of maximum sized segments. + * This will prevent a connection from + * sending smaller segments on wire if it + * is limited by the receive window. + * + * Set the ideal size based on current + * bandwidth measurements. We set the + * ideal size on receive socket buffer to + * be twice the bandwidth delay product. + */ + rcvbuf_inc = (tp->rfbuf_cnt << 1) + - sbrcv->sb_hiwat; + + /* + * Make the increment equal to 8 segments + * at least + */ + min_incr = tp->t_maxseg << tcp_autorcvbuf_inc_shift; + if (rcvbuf_inc < min_incr) { + rcvbuf_inc = min_incr; + } + + idealsize = (tp->rfbuf_cnt << 1); + } else { + if (tp->rfbuf_cnt > tp->rfbuf_space + (tp->rfbuf_space >> 1)) { + rcvbuf_inc = (tp->rfbuf_cnt << 2) - sbrcv->sb_hiwat; + idealsize = (tp->rfbuf_cnt << 2); + } else { + rcvbuf_inc = (tp->rfbuf_cnt << 1) - sbrcv->sb_hiwat; + idealsize = (tp->rfbuf_cnt << 1); + } } - rcvbuf_inc = - (rcvbuf_inc / tp->t_maxseg) * tp->t_maxseg; - tcp_sbrcv_reserve(tp, sbrcv, - sbrcv->sb_hiwat + rcvbuf_inc, - (tp->rfbuf_cnt << 1), rcvbuf_max); + tp->rfbuf_space = tp->rfbuf_cnt; + + if (rcvbuf_inc > 0) { + rcvbuf_inc = + (rcvbuf_inc / tp->t_maxseg) * tp->t_maxseg; + + tcp_sbrcv_reserve(tp, sbrcv, + sbrcv->sb_hiwat + rcvbuf_inc, + idealsize, tcp_autorcvbuf_max); + } } /* Measure instantaneous receive bandwidth */ if (tp->t_bwmeas != NULL && tp->rfbuf_cnt > 0 && @@ -1269,7 +1124,7 @@ tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv, } out: /* Restart the measurement */ - tp->rfbuf_ts = 0; + tp->rfbuf_ts = tcp_now; tp->rfbuf_cnt = 0; return; } @@ -1325,8 +1180,7 @@ tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sbrcv) void tcp_sbsnd_trim(struct sockbuf *sbsnd) { - if (tcp_do_autosendbuf == 1 && - ((sbsnd->sb_flags & (SB_AUTOSIZE | SB_TRIM)) == + if (((sbsnd->sb_flags & (SB_AUTOSIZE | SB_TRIM)) == (SB_AUTOSIZE | SB_TRIM)) && (sbsnd->sb_idealsize > 0) && (sbsnd->sb_hiwat > sbsnd->sb_idealsize)) { @@ -1474,15 +1328,13 @@ tcp_detect_bad_rexmt(struct tcpcb *tp, struct tcphdr *th, return 0; } if (TSTMP_SUPPORTED(tp)) { - if (rxtime > 0 && (to->to_flags & TOF_TS) - && to->to_tsecr != 0 - && TSTMP_LT(to->to_tsecr, rxtime)) { + if (rxtime > 0 && (to->to_flags & TOF_TS) && to->to_tsecr != 0 && + TSTMP_LT(to->to_tsecr, rxtime)) { return 1; } } else { - if ((tp->t_rxtshift == 1 - || (tp->t_flagsext & TF_SENT_TLPROBE)) - && rxtime > 0) { + if ((tp->t_rxtshift == 1 || (tp->t_flagsext & TF_SENT_TLPROBE)) && + rxtime > 0) { tdiff = (int32_t)(tcp_now - rxtime); if (tdiff < bad_rexmt_win) { return 1; @@ -1527,7 +1379,7 @@ tcp_bad_rexmt_restore_state(struct tcpcb *tp, struct tcphdr *th) /* Do not use the loss flight size in this case */ tp->t_lossflightsize = 0; } - tp->snd_cwnd = max(tp->snd_cwnd, TCP_CC_CWND_INIT_BYTES); + tp->snd_cwnd = max(tp->snd_cwnd, tcp_initial_cwnd(tp)); tp->snd_recover = tp->snd_recover_prev; tp->snd_nxt = tp->snd_max; @@ -1563,10 +1415,9 @@ tcp_bad_rexmt_check(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) ++tcpstat.tcps_sndrexmitbad; tcp_bad_rexmt_restore_state(tp, th); tcp_ccdbg_trace(tp, th, TCP_CC_BAD_REXMT_RECOVERY); - } else if ((tp->t_flagsext & TF_SENT_TLPROBE) - && tp->t_tlphighrxt > 0 - && SEQ_GEQ(th->th_ack, tp->t_tlphighrxt) - && !tcp_detect_bad_rexmt(tp, th, to, tp->t_tlpstart)) { + } else if ((tp->t_flagsext & TF_SENT_TLPROBE) && tp->t_tlphighrxt > 0 && + SEQ_GEQ(th->th_ack, tp->t_tlphighrxt) && + !tcp_detect_bad_rexmt(tp, th, to, tp->t_tlpstart)) { /* * check DSACK information also to make sure that * the TLP was indeed needed @@ -1622,6 +1473,8 @@ out: if (tp->t_pmtud_start_ts > 0) { tp->t_pmtud_start_ts = 0; } + + tp->t_pmtud_lastseg_size = 0; } /* @@ -1643,12 +1496,9 @@ tcp_early_rexmt_check(struct tcpcb *tp, struct tcphdr *th) int32_t snd_len; struct socket *so = tp->t_inpcb->inp_socket; - if (early_rexmt && (SACK_ENABLED(tp) || - tp->t_early_rexmt_count < TCP_EARLY_REXMT_LIMIT) && + if ((SACK_ENABLED(tp) || tp->t_early_rexmt_count < TCP_EARLY_REXMT_LIMIT) && SEQ_GT(tp->snd_max, tp->snd_una) && - (tp->t_dupacks == 1 || - (SACK_ENABLED(tp) && - !TAILQ_EMPTY(&tp->snd_holes)))) { + (tp->t_dupacks == 1 || (SACK_ENABLED(tp) && !TAILQ_EMPTY(&tp->snd_holes)))) { /* * If there are only a few outstanding * segments on the connection, we might need @@ -1913,9 +1763,43 @@ tcp_handle_wakeup(struct socket *so, int read_wakeup, int write_wakeup) } } +static void +tcp_update_snd_una(struct tcpcb *tp, uint32_t ack) +{ + tp->snd_una = ack; + if (SACK_ENABLED(tp) && SEQ_LT(tp->send_highest_sack, tp->snd_una)) { + tp->send_highest_sack = tp->snd_una; + + /* If we move our marker, we need to start fresh */ + tp->t_new_dupacks = 0; + } +} + +static bool +tcp_syn_data_valid(struct tcpcb *tp, struct tcphdr *tcp_hdr, int tlen) +{ + /* No data? */ + if (tlen <= 0) { + return false; + } + + /* Not the right sequence-number? */ + if (tcp_hdr->th_seq != tp->irs) { + return false; + } + + /* We could have wrapped around, check that */ + if (tp->t_inpcb->inp_stat->rxbytes > INT32_MAX) { + return false; + } + + return true; +} + void tcp_input(struct mbuf *m, int off0) { + int exiting_fr = 0; struct tcphdr *th; struct ip *ip = NULL; struct inpcb *inp; @@ -1930,27 +1814,21 @@ tcp_input(struct mbuf *m, int off0) int read_wakeup = 0; int write_wakeup = 0; struct in_addr laddr; -#if INET6 struct in6_addr laddr6; -#endif int dropsocket = 0; int iss = 0, nosock = 0; - u_int32_t tiwin, sack_bytes_acked = 0; + u_int32_t tiwin, sack_bytes_acked = 0, sack_bytes_newly_acked = 0; struct tcpopt to; /* options in this segment */ #if TCPDEBUG short ostate = 0; #endif -#if IPFIREWALL - struct sockaddr_in *next_hop = NULL; - struct m_tag *fwd_tag; -#endif /* IPFIREWALL */ u_char ip_ecn = IPTOS_ECN_NOTECT; unsigned int ifscope; uint8_t isconnected, isdisconnected; struct ifnet *ifp = m->m_pkthdr.rcvif; - int pktf_sw_lro_pkt = (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) ? 1 : 0; - int nlropkts = (pktf_sw_lro_pkt == 1) ? m->m_pkthdr.lro_npkts : 1; - int turnoff_lro = 0, win; + int segment_count = m->m_pkthdr.seg_cnt ? : 1; + int win; + u_int16_t pf_tag = 0; #if MPTCP struct mptcb *mp_tp = NULL; #endif /* MPTCP */ @@ -1980,39 +1858,24 @@ tcp_input(struct mbuf *m, int off0) stat += npkts; \ } while (0) - TCP_INC_VAR(tcpstat.tcps_rcvtotal, nlropkts); -#if IPFIREWALL - /* Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. */ - if (!SLIST_EMPTY(&m->m_pkthdr.tags)) { - fwd_tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_IPFORWARD, NULL); - } else { - fwd_tag = NULL; + if (tcp_ack_strategy == TCP_ACK_STRATEGY_LEGACY) { + segment_count = 1; } - if (fwd_tag != NULL) { - struct ip_fwd_tag *ipfwd_tag = - (struct ip_fwd_tag *)(fwd_tag + 1); + TCP_INC_VAR(tcpstat.tcps_rcvtotal, segment_count); - next_hop = ipfwd_tag->next_hop; - m_tag_delete(m, fwd_tag); - } -#endif /* IPFIREWALL */ - -#if INET6 struct ip6_hdr *ip6 = NULL; int isipv6; -#endif /* INET6 */ - int rstreason; /* For badport_bandlim accounting purposes */ - struct proc *proc0 = current_proc(); + struct proc *kernel_proc = current_proc(); KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_START, 0, 0, 0, 0, 0); -#if INET6 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; -#endif bzero((char *)&to, sizeof(to)); -#if INET6 + if (m->m_flags & M_PKTHDR) { + pf_tag = m_pftag(m)->pftag_tag; + } + if (isipv6) { /* * Expect 32-bit aligned data pointer on @@ -2052,9 +1915,7 @@ tcp_input(struct mbuf *m, int off0) struct tcphdr *, th); ip_ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; - } else -#endif /* INET6 */ - { + } else { /* * Get IP and TCP header together in first mbuf. * Note: IP leaves IP header in first mbuf. @@ -2082,10 +1943,8 @@ tcp_input(struct mbuf *m, int off0) goto dropnosock; } -#if INET6 /* Re-initialization for later version check */ ip->ip_v = IPVERSION; -#endif ip_ecn = (ip->ip_tos & IPTOS_ECN_MASK); DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL, @@ -2111,14 +1970,11 @@ tcp_input(struct mbuf *m, int off0) } tlen -= off; /* tlen is used instead of ti->ti_len */ if (off > sizeof(struct tcphdr)) { -#if INET6 if (isipv6) { IP6_EXTHDR_CHECK(m, off0, off, return ); ip6 = mtod(m, struct ip6_hdr *); th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0); - } else -#endif /* INET6 */ - { + } else { if (m->m_len < sizeof(struct ip) + off) { if ((m = m_pullup(m, sizeof(struct ip) + off)) == 0) { tcpstat.tcps_rcvshort++; @@ -2150,20 +2006,17 @@ tcp_input(struct mbuf *m, int off0) } thflags = th->th_flags; -#if TCP_DROP_SYNFIN /* - * If the drop_synfin option is enabled, drop all packets with - * both the SYN and FIN bits set. This prevents e.g. nmap from - * identifying the TCP/IP stack. + * Drop all packets with both the SYN and FIN bits set. + * This prevents e.g. nmap from identifying the TCP/IP stack. * * This is a violation of the TCP specification. */ - if (drop_synfin && (thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN)) { + if ((thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN)) { IF_TCP_STATINC(ifp, synfin); TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "drop SYN FIN"); goto dropnosock; } -#endif /* * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options, @@ -2212,44 +2065,11 @@ findpcb: isconnected = FALSE; isdisconnected = FALSE; -#if IPFIREWALL_FORWARD - if (next_hop != NULL -#if INET6 - && isipv6 == 0 /* IPv6 support is not yet */ -#endif /* INET6 */ - ) { - /* - * Diverted. Pretend to be the destination. - * already got one like this? - */ - inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport, - ip->ip_dst, th->th_dport, 0, m->m_pkthdr.rcvif); - if (!inp) { - /* - * No, then it's new. Try find the ambushing socket - */ - if (!next_hop->sin_port) { - inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, - th->th_sport, next_hop->sin_addr, - th->th_dport, 1, m->m_pkthdr.rcvif); - } else { - inp = in_pcblookup_hash(&tcbinfo, - ip->ip_src, th->th_sport, - next_hop->sin_addr, - ntohs(next_hop->sin_port), 1, - m->m_pkthdr.rcvif); - } - } - } else -#endif /* IPFIREWALL_FORWARD */ - { -#if INET6 - if (isipv6) { - inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_src, th->th_sport, - &ip6->ip6_dst, th->th_dport, 1, - m->m_pkthdr.rcvif); - } else -#endif /* INET6 */ + if (isipv6) { + inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_src, th->th_sport, + &ip6->ip6_dst, th->th_dport, 1, + m->m_pkthdr.rcvif); + } else { inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport, ip->ip_dst, th->th_dport, 1, m->m_pkthdr.rcvif); } @@ -2272,19 +2092,12 @@ findpcb: */ if (inp == NULL) { if (log_in_vain) { -#if INET6 char dbuf[MAX_IPv6_STR_LEN], sbuf[MAX_IPv6_STR_LEN]; -#else /* INET6 */ - char dbuf[MAX_IPv4_STR_LEN], sbuf[MAX_IPv4_STR_LEN]; -#endif /* INET6 */ -#if INET6 if (isipv6) { inet_ntop(AF_INET6, &ip6->ip6_dst, dbuf, sizeof(dbuf)); inet_ntop(AF_INET6, &ip6->ip6_src, sbuf, sizeof(sbuf)); - } else -#endif - { + } else { inet_ntop(AF_INET, &ip->ip_dst, dbuf, sizeof(dbuf)); inet_ntop(AF_INET, &ip->ip_src, sbuf, sizeof(sbuf)); } @@ -2308,13 +2121,8 @@ findpcb: case 4: if ((thflags & TH_SYN) && !(thflags & TH_ACK) && !(m->m_flags & (M_BCAST | M_MCAST)) && -#if INET6 ((isipv6 && !IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) || - (!isipv6 && ip->ip_dst.s_addr != ip->ip_src.s_addr)) -#else - ip->ip_dst.s_addr != ip->ip_src.s_addr -#endif - ) { + (!isipv6 && ip->ip_dst.s_addr != ip->ip_src.s_addr))) { log_in_vain_log((LOG_INFO, "Stealth Mode connection attempt to TCP %s:%d from %s:%d\n", dbuf, ntohs(th->th_dport), @@ -2344,7 +2152,6 @@ findpcb: } } } - rstreason = BANDLIM_RST_CLOSEDPORT; IF_TCP_STATINC(ifp, noconnnolist); TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "closed port"); goto dropwithresetnosock; @@ -2410,7 +2217,6 @@ findpcb: tp = intotcpcb(inp); if (tp == NULL) { - rstreason = BANDLIM_RST_CLOSEDPORT; IF_TCP_STATINC(ifp, noconnlist); TCP_LOG_DROP_PKT(TCP_LOG_HDR, th, ifp, "tp is NULL"); goto dropwithreset; @@ -2427,7 +2233,7 @@ findpcb: if (so->so_state & SS_ISCONNECTED) { // Connected TCP sockets have a fully-bound local and remote, // so the policy check doesn't need to override addresses - if (!necp_socket_is_allowed_to_send_recv(inp, ifp, NULL, NULL, NULL)) { + if (!necp_socket_is_allowed_to_send_recv(inp, ifp, pf_tag, NULL, NULL, NULL, NULL)) { TCP_LOG_DROP_NECP(TCP_LOG_HDR, th, intotcpcb(inp), false); IF_TCP_STATINC(ifp, badformat); goto drop; @@ -2445,21 +2251,19 @@ findpcb: * with NECP. */ (void) inp_update_policy(inp); -#if INET6 + if (isipv6) { if (!necp_socket_is_allowed_to_send_recv_v6(inp, th->th_dport, th->th_sport, &ip6->ip6_dst, - &ip6->ip6_src, ifp, NULL, NULL, NULL)) { + &ip6->ip6_src, ifp, pf_tag, NULL, NULL, NULL, NULL)) { TCP_LOG_DROP_NECP(TCP_LOG_HDR, th, intotcpcb(inp), false); IF_TCP_STATINC(ifp, badformat); goto drop; } - } else -#endif - { + } else { if (!necp_socket_is_allowed_to_send_recv_v4(inp, th->th_dport, th->th_sport, &ip->ip_dst, &ip->ip_src, - ifp, NULL, NULL, NULL)) { + ifp, pf_tag, NULL, NULL, NULL, NULL)) { TCP_LOG_DROP_NECP(TCP_LOG_HDR, th, intotcpcb(inp), false); IF_TCP_STATINC(ifp, badformat); goto drop; @@ -2471,7 +2275,7 @@ findpcb: prev_t_state = tp->t_state; /* If none of the FIN|SYN|RST|ACK flag is set, drop */ - if (tcp_do_rfc5961 && (thflags & TH_ACCEPT) == 0) { + if ((thflags & TH_ACCEPT) == 0) { TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "rfc5961 TH_ACCEPT == 0"); goto drop; } @@ -2483,14 +2287,6 @@ findpcb: tiwin = th->th_win; } - -#if CONFIG_MACF_NET - if (mac_inpcb_check_deliver(inp, m, AF_INET, SOCK_STREAM)) { - TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "mac_inpcb_check_deliver failed"); - goto drop; - } -#endif - /* Avoid processing packets while closing a listen socket */ if (tp->t_state == TCPS_LISTEN && (so->so_options & SO_ACCEPTCONN) == 0) { @@ -2502,13 +2298,12 @@ findpcb: #if TCPDEBUG if (so->so_options & SO_DEBUG) { ostate = tp->t_state; -#if INET6 if (isipv6) { bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); - } else -#endif /* INET6 */ - bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); + } else { + bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); + } tcp_savetcp = *th; } #endif @@ -2518,9 +2313,7 @@ findpcb: struct socket *oso; struct sockaddr_storage from; struct sockaddr_storage to2; -#if INET6 struct inpcb *oinp = sotoinpcb(so); -#endif /* INET6 */ struct ifnet *head_ifscope; unsigned int head_nocell, head_recvanyif, head_noexpensive, head_awdl_unrestricted, @@ -2558,7 +2351,6 @@ findpcb: TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN with ACK"); tp = NULL; tcpstat.tcps_badsyn++; - rstreason = BANDLIM_RST_OPENPORT; goto dropwithreset; } @@ -2569,16 +2361,13 @@ findpcb: } KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_START, 0, 0, 0, 0, 0); if (th->th_dport == th->th_sport) { -#if INET6 if (isipv6) { if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad tuple same port"); goto drop; } - } else -#endif /* INET6 */ - if (ip->ip_dst.s_addr == ip->ip_src.s_addr) { + } else if (ip->ip_dst.s_addr == ip->ip_src.s_addr) { TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad tuple same IPv4 address"); goto drop; } @@ -2595,16 +2384,13 @@ findpcb: TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "mbuf M_BCAST | M_MCAST"); goto drop; } -#if INET6 if (isipv6) { if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "IN6_IS_ADDR_MULTICAST"); goto drop; } - } else -#endif - if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || + } else if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { @@ -2613,7 +2399,6 @@ findpcb: } -#if INET6 /* * If deprecated address is forbidden, * we do not accept SYN to deprecated interface @@ -2636,16 +2421,13 @@ findpcb: &ia6_flags) == 0) { if (ia6_flags & IN6_IFF_DEPRECATED) { tp = NULL; - rstreason = BANDLIM_RST_OPENPORT; IF_TCP_STATINC(ifp, deprecate6); TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "deprecated IPv6 address"); goto dropwithreset; } } } -#endif if (so->so_filt || check_cfil) { -#if INET6 if (isipv6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)&from; @@ -2664,9 +2446,7 @@ findpcb: sin6->sin6_flowinfo = 0; sin6->sin6_addr = ip6->ip6_dst; sin6->sin6_scope_id = 0; - } else -#endif - { + } else { struct sockaddr_in *sin = (struct sockaddr_in*)&from; sin->sin_len = sizeof(*sin); @@ -2766,29 +2546,24 @@ findpcb: if (head_external_port) { inp->inp_flags2 |= INP2_EXTERNAL_PORT; } -#if INET6 if (isipv6) { inp->in6p_laddr = ip6->ip6_dst; } else { inp->inp_vflag &= ~INP_IPV6; inp->inp_vflag |= INP_IPV4; -#endif /* INET6 */ - inp->inp_laddr = ip->ip_dst; -#if INET6 - } -#endif /* INET6 */ + inp->inp_laddr = ip->ip_dst; + } inp->inp_lport = th->th_dport; if (in_pcbinshash(inp, 0) != 0) { /* * Undo the assignments above if we failed to * put the PCB on the hash lists. */ -#if INET6 if (isipv6) { inp->in6p_laddr = in6addr_any; - } else -#endif /* INET6 */ - inp->inp_laddr.s_addr = INADDR_ANY; + } else { + inp->inp_laddr.s_addr = INADDR_ANY; + } inp->inp_lport = 0; socket_lock(oso, 0); /* release ref on parent */ socket_unlock(oso, 1); @@ -2796,7 +2571,6 @@ findpcb: goto drop; } socket_lock(oso, 0); -#if INET6 if (isipv6) { /* * Inherit socket options from the listening @@ -2818,9 +2592,7 @@ findpcb: ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT); } - } else -#endif /* INET6 */ - { + } else { inp->inp_options = ip_srcroute(); inp->inp_ip_tos = oinp->inp_ip_tos; } @@ -2858,7 +2630,7 @@ findpcb: /* now drop the reference on the listener */ socket_unlock(oso, 1); - tcp_set_max_rwinscale(tp, so, ifp); + tcp_set_max_rwinscale(tp, so); #if CONTENT_FILTER if (check_cfil) { @@ -2891,7 +2663,7 @@ findpcb: * certain criteria defined in tcp_stretch_ack_enable function. */ if ((tp->t_flagsext & TF_RCVUNACK_WAITSS) != 0) { - TCP_INC_VAR(tp->rcv_waitforss, nlropkts); + TCP_INC_VAR(tp->rcv_waitforss, segment_count); } if (tcp_stretch_ack_enable(tp, thflags)) { tp->t_flags |= TF_STRETCHACK; @@ -2911,11 +2683,14 @@ findpcb: } /* - * Keep track of how many bytes were received in the LRO packet + * Clear TE_SENDECE if TH_CWR is set. This is harmless, so we don't + * bother doing extensive checks for state and whatnot. */ - if ((pktf_sw_lro_pkt) && (nlropkts > 2)) { - tp->t_lropktlen += tlen; + if (thflags & TH_CWR) { + tp->ecn_flags &= ~TE_SENDECE; + tp->t_ecn_recv_cwr++; } + /* * Explicit Congestion Notification - Flag that we need to send ECT if * + The IP Congestion experienced flag was set. @@ -2938,25 +2713,17 @@ findpcb: tp->ecn_flags |= TE_SENDECE; } - /* - * Clear TE_SENDECE if TH_CWR is set. This is harmless, so we don't - * bother doing extensive checks for state and whatnot. - */ - if (thflags & TH_CWR) { - tp->ecn_flags &= ~TE_SENDECE; - tp->t_ecn_recv_cwr++; - } - /* * If we received an explicit notification of congestion in * ip tos ecn bits or by the CWR bit in TCP header flags, reset - * the ack-strteching state. We need to handle ECN notification if + * the ack-stretching state. We need to handle ECN notification if * an ECN setup SYN was sent even once. */ - if (tp->t_state == TCPS_ESTABLISHED - && (tp->ecn_flags & TE_SETUPSENT) - && (ip_ecn == IPTOS_ECN_CE || (thflags & TH_CWR))) { + if (tp->t_state == TCPS_ESTABLISHED && + (tp->ecn_flags & TE_SETUPSENT) && + (ip_ecn == IPTOS_ECN_CE || (thflags & TH_CWR))) { tcp_reset_stretch_ack(tp); + tp->t_forced_acks = TCP_FORCED_ACKS_COUNT; CLEAR_IAJ_STATE(tp); } @@ -2984,18 +2751,6 @@ findpcb: } } - /* - * Try to determine if we are receiving a packet after a long time. - * Use our own approximation of idletime to roughly measure remote - * end's idle time. Since slowstart is used after an idle period - * we want to avoid doing LRO if the remote end is not up to date - * on initial window support and starts with 1 or 2 packets as its IW. - */ - if (sw_lro && (tp->t_flagsext & TF_LRO_OFFLOADED) && - ((tcp_now - tp->t_rcvtime) >= (TCP_IDLETIMEOUT(tp)))) { - turnoff_lro = 1; - } - /* Update rcvtime as a new segment was received on the connection */ tp->t_rcvtime = tcp_now; @@ -3025,8 +2780,6 @@ findpcb: (void) tcp_output(tp); tcp_check_timer_state(tp); socket_unlock(so, 1); - KERNEL_DEBUG(DBG_FNC_TCP_INPUT | - DBG_FUNC_END, 0, 0, 0, 0, 0); return; } #endif /* MPTCP */ @@ -3052,18 +2805,15 @@ findpcb: */ if (tp->t_state == TCPS_ESTABLISHED && (thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK | TH_ECE | TH_PUSH)) == TH_ACK && - ((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) == 0) && + ((tp->t_flags & TF_NEEDFIN) == 0) && ((to.to_flags & TOF_TS) == 0 || TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && th->th_seq == tp->rcv_nxt && LIST_EMPTY(&tp->t_segq)) { int seg_size = tlen; if (tp->iaj_pktcnt <= IAJ_IGNORE_PKTCNT) { - TCP_INC_VAR(tp->iaj_pktcnt, nlropkts); + TCP_INC_VAR(tp->iaj_pktcnt, segment_count); } - if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) { - seg_size = m->m_pkthdr.lro_pktlen; - } if (tp->iaj_size == 0 || seg_size > tp->iaj_size || (seg_size == tp->iaj_size && tp->iaj_rcv_ts == 0)) { /* @@ -3078,12 +2828,7 @@ findpcb: * Compute inter-arrival jitter taking * this packet as the second packet */ - if (pktf_sw_lro_pkt) { - compute_iaj(tp, nlropkts, - m->m_pkthdr.lro_elapsed); - } else { - compute_iaj(tp, 1, 0); - } + compute_iaj(tp); } if (seg_size < tp->iaj_size) { /* @@ -3096,7 +2841,7 @@ findpcb: * iaj_size, we try to learn the iaj_size * again. */ - TCP_INC_VAR(tp->iaj_small_pkt, nlropkts); + TCP_INC_VAR(tp->iaj_small_pkt, segment_count); if (tp->iaj_small_pkt > RESET_IAJ_SIZE_THRESH) { update_iaj_state(tp, seg_size, 1); } else { @@ -3130,7 +2875,7 @@ findpcb: */ if (tp->t_state == TCPS_ESTABLISHED && (thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK | TH_ECE | TH_CWR)) == TH_ACK && - ((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) == 0) && + ((tp->t_flags & TF_NEEDFIN) == 0) && ((to.to_flags & TOF_TS) == 0 || TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && th->th_seq == tp->rcv_nxt && @@ -3183,17 +2928,14 @@ findpcb: } tcp_ccdbg_trace(tp, th, TCP_CC_INSEQ_ACK_RCVD); sbdrop(&so->so_snd, acked); - if (so->so_flags & SOF_ENABLE_MSGS) { - VERIFY(acked <= so->so_msg_state->msg_serial_bytes); - so->so_msg_state->msg_serial_bytes -= acked; - } tcp_sbsnd_trim(&so->so_snd); if (SEQ_GT(tp->snd_una, tp->snd_recover) && SEQ_LEQ(th->th_ack, tp->snd_recover)) { tp->snd_recover = th->th_ack - 1; } - tp->snd_una = th->th_ack; + + tcp_update_snd_una(tp, th->th_ack); TCP_RESET_REXMT_STATE(tp); @@ -3206,8 +2948,11 @@ findpcb: if (tp->t_dupacks > 0) { tp->t_dupacks = 0; tp->t_rexmtthresh = tcprexmtthresh; + tp->t_new_dupacks = 0; } + tp->sackhint.sack_bytes_acked = 0; + /* * If all outstanding data are acked, stop * retransmit timer, otherwise restart timer @@ -3221,9 +2966,7 @@ findpcb: tp->t_timer[TCPT_REXMT] = 0; tp->t_timer[TCPT_PTO] = 0; } else if (tp->t_timer[TCPT_PERSIST] == 0) { - tp->t_timer[TCPT_REXMT] = - OFFSET_FROM_START(tp, - tp->t_rxtcur); + tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur); } if (!SLIST_EMPTY(&tp->t_rxt_segments) && !TCP_DSACK_SEQ_IN_WINDOW(tp, @@ -3257,8 +3000,7 @@ findpcb: KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return; } - } else if (th->th_ack == tp->snd_una && - LIST_EMPTY(&tp->t_segq) && + } else if (th->th_ack == tp->snd_una && LIST_EMPTY(&tp->t_segq) && tlen <= tcp_sbspace(tp)) { /* * this is a pure, in-sequence data packet @@ -3266,30 +3008,6 @@ findpcb: * we have enough buffer space to take it. */ - /* - * If this is a connection in steady state, start - * coalescing packets belonging to this flow. - */ - if (turnoff_lro) { - tcp_lro_remove_state(tp->t_inpcb->inp_laddr, - tp->t_inpcb->inp_faddr, - tp->t_inpcb->inp_lport, - tp->t_inpcb->inp_fport); - tp->t_flagsext &= ~TF_LRO_OFFLOADED; - tp->t_idleat = tp->rcv_nxt; - } else if (sw_lro && !pktf_sw_lro_pkt && !isipv6 && - (so->so_flags & SOF_USELRO) && - !IFNET_IS_CELLULAR(m->m_pkthdr.rcvif) && - (m->m_pkthdr.rcvif->if_type != IFT_LOOP) && - ((th->th_seq - tp->irs) > - (tp->t_maxseg << lro_start)) && - ((tp->t_idleat == 0) || ((th->th_seq - - tp->t_idleat) > (tp->t_maxseg << lro_start)))) { - tp->t_flagsext |= TF_LRO_OFFLOADED; - tcp_start_coalescing(ip, th, tlen); - tp->t_idleat = 0; - } - /* Clean receiver SACK report if present */ if (SACK_ENABLED(tp) && tp->rcv_numsacks) { tcp_clean_sackreport(tp); @@ -3306,16 +3024,11 @@ findpcb: * rcv_nxt. */ tp->rcv_up = tp->rcv_nxt; - TCP_INC_VAR(tcpstat.tcps_rcvpack, nlropkts); + TCP_INC_VAR(tcpstat.tcps_rcvpack, segment_count); tcpstat.tcps_rcvbyte += tlen; if (nstat_collect) { - if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) { - INP_ADD_STAT(inp, cell, wifi, wired, - rxpackets, m->m_pkthdr.lro_npkts); - } else { - INP_ADD_STAT(inp, cell, wifi, wired, - rxpackets, 1); - } + INP_ADD_STAT(inp, cell, wifi, wired, + rxpackets, 1); INP_ADD_STAT(inp, cell, wifi, wired, rxbytes, tlen); inp_set_activity_bitmap(inp); @@ -3330,8 +3043,7 @@ findpcb: tcp_compute_rtt(tp, &to, th); } - tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen, - TCP_AUTORCVBUF_MAX(ifp)); + tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen); /* * Add data to socket buffer. @@ -3352,26 +3064,29 @@ findpcb: ip = (struct ip *)&saved_hdr[0]; } memcpy(&saved_tcphdr, th, sizeof(struct tcphdr)); - if (sbappendstream_rcvdemux(so, m, - th->th_seq - (tp->irs + 1), 0)) { + + if (th->th_flags & TH_PUSH) { + tp->t_flagsext |= TF_LAST_IS_PSH; + } else { + tp->t_flagsext &= ~TF_LAST_IS_PSH; + } + + if (sbappendstream_rcvdemux(so, m)) { mptcp_handle_input(so); read_wakeup = 1; } th = &saved_tcphdr; -#if INET6 if (isipv6) { KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport), (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])), th->th_seq, th->th_ack, th->th_win); - } else -#endif - { + } else { KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport), (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)), th->th_seq, th->th_ack, th->th_win); } - TCP_INC_VAR(tp->t_unacksegs, nlropkts); + TCP_INC_VAR(tp->t_unacksegs, segment_count); if (DELAY_ACK(tp, th)) { if ((tp->t_flags & TF_DELACK) == 0) { tp->t_flags |= TF_DELACK; @@ -3419,12 +3134,13 @@ findpcb: * Ensure that the subflow receive window isn't greater * than the connection level receive window. */ - if ((tp->t_mpflags & TMPF_MPTCP_TRUE) && - (mp_tp = tptomptp(tp))) { + if ((tp->t_mpflags & TMPF_MPTCP_TRUE) && (mp_tp = tptomptp(tp))) { socket_lock_assert_owned(mptetoso(mp_tp->mpt_mpte)); + int64_t recwin_conn = (int64_t)(mp_tp->mpt_rcvadv - mp_tp->mpt_rcvnxt); - if (tp->rcv_wnd > (int)(mp_tp->mpt_rcvadv - (uint32_t)mp_tp->mpt_rcvnxt)) { - tp->rcv_wnd = mp_tp->mpt_rcvadv - (uint32_t)mp_tp->mpt_rcvnxt; + VERIFY(recwin_conn < INT32_MAX && recwin_conn > INT32_MIN); + if (recwin_conn > 0 && tp->rcv_wnd > (uint32_t)recwin_conn) { + tp->rcv_wnd = (uint32_t)recwin_conn; tcpstat.tcps_mp_reducedwin++; } } @@ -3434,7 +3150,7 @@ findpcb: /* * Initialize tp->rcv_nxt, and tp->irs, select an initial * tp->iss, and send a segment: - * + * * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss. * Fill in remote peer address fields if not previously specified. * Enter SYN_RECEIVED state, and process any other fields of this @@ -3442,9 +3158,7 @@ findpcb: */ case TCPS_LISTEN: { struct sockaddr_in *sin; -#if INET6 struct sockaddr_in6 *sin6; -#endif socket_lock_assert_owned(so); @@ -3452,7 +3166,6 @@ findpcb: tp->t_log_flags = 0; tp->t_flagsext &= ~TF_LOGGED_CONN_SUMMARY; -#if INET6 if (isipv6) { MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6, M_SONAME, M_NOWAIT); @@ -3470,16 +3183,14 @@ findpcb: inp->in6p_laddr = ip6->ip6_dst; } if (in6_pcbconnect(inp, (struct sockaddr *)sin6, - proc0)) { + kernel_proc)) { inp->in6p_laddr = laddr6; FREE(sin6, M_SONAME); TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, " LISTEN in6_pcbconnect failed"); goto drop; } FREE(sin6, M_SONAME); - } else -#endif - { + } else { socket_lock_assert_owned(so); MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_NOWAIT); @@ -3496,7 +3207,7 @@ findpcb: if (inp->inp_laddr.s_addr == INADDR_ANY) { inp->inp_laddr = ip->ip_dst; } - if (in_pcbconnect(inp, (struct sockaddr *)sin, proc0, + if (in_pcbconnect(inp, (struct sockaddr *)sin, kernel_proc, IFSCOPE_NONE, NULL)) { inp->inp_laddr = laddr; FREE(sin, M_SONAME); @@ -3542,7 +3253,6 @@ findpcb: if (inp->inp_flowhash == 0) { inp->inp_flowhash = inp_calc_flowhash(inp); } -#if INET6 /* update flowinfo - RFC 6437 */ if (inp->inp_flow == 0 && inp->in6p_flags & IN6P_AUTOFLOWLABEL) { @@ -3550,7 +3260,6 @@ findpcb: inp->inp_flow |= (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK); } -#endif /* INET6 */ /* reset the incomp processing flag */ so->so_flags &= ~(SOF_INCOMP_INPROGRESS); @@ -3565,6 +3274,8 @@ findpcb: */ TCP_LOG_CONNECT(tp, false, 0); + tcp_add_fsw_flow(tp, ifp); + goto trimthenstep6; } @@ -3576,7 +3287,6 @@ findpcb: if ((thflags & TH_ACK) && (SEQ_LEQ(th->th_ack, tp->snd_una) || SEQ_GT(th->th_ack, tp->snd_max))) { - rstreason = BANDLIM_RST_OPENPORT; IF_TCP_STATINC(ifp, ooopacket); TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_RECEIVED bad ACK"); goto dropwithreset; @@ -3611,7 +3321,6 @@ findpcb: if ((thflags & TH_ACK) && (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) { - rstreason = BANDLIM_UNLIMITED; IF_TCP_STATINC(ifp, ooopacket); TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_SENT bad ACK"); goto dropwithreset; @@ -3639,7 +3348,6 @@ findpcb: (SO_FILT_HINT_LOCKED | SO_FILT_HINT_CONNRESET)); tp = tcp_drop(tp, ECONNREFUSED); - postevent(so, 0, EV_RESET); } TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_SENT got RST"); goto drop; @@ -3678,11 +3386,6 @@ findpcb: tp->ecn_flags &= ~TE_SENDIPECT; } -#if CONFIG_MACF_NET && CONFIG_MACF_SOCKET - /* XXXMAC: recursive lock: SOCK_LOCK(so); */ - mac_socketpeer_label_associate_mbuf(m, so); - /* XXXMAC: SOCK_UNLOCK(so); */ -#endif /* Do window scaling on this connection? */ if (TCP_WINDOW_SCALE_ENABLED(tp)) { tp->snd_scale = tp->requested_s_scale; @@ -3727,7 +3430,7 @@ findpcb: * If there's data, delay ACK; if there's also a FIN * ACKNOW will be turned on later. */ - TCP_INC_VAR(tp->t_unacksegs, nlropkts); + TCP_INC_VAR(tp->t_unacksegs, segment_count); if (DELAY_ACK(tp, th) && tlen != 0) { if ((tp->t_flags & TF_DELACK) == 0) { tp->t_flags |= TF_DELACK; @@ -3773,6 +3476,7 @@ findpcb: */ inp_count_sndbytes(inp, th->th_ack); } + tp->t_forced_acks = TCP_FORCED_ACKS_COUNT; #if MPTCP /* * Do not send the connect notification for additional @@ -3804,9 +3508,8 @@ findpcb: } else { /* * Received initial SYN in SYN-SENT[*] state => simul- - * taneous open. If segment contains CC option and there is - * a cached CC, apply TAO test; if it succeeds, connection is - * half-synchronized. Otherwise, do 3-way handshake: + * taneous open. + * Do 3-way handshake: * SYN-SENT -> SYN-RECEIVED * SYN-SENT* -> SYN-RECEIVED* */ @@ -3870,9 +3573,9 @@ trimthenstep6: * in accordance with RFC 5961 Section 4.2 */ case TCPS_ESTABLISHED: - if (thflags & TH_SYN) { + if (thflags & TH_SYN && tlen <= 0) { /* Drop the packet silently if we have reached the limit */ - if (tcp_do_rfc5961 && tcp_is_ack_ratelimited(tp)) { + if (tcp_is_ack_ratelimited(tp)) { TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "ESTABLISHED rfc5961 rate limited"); goto drop; } else { @@ -3964,7 +3667,7 @@ trimthenstep6: (tp->rcv_wnd == 0 && ((tp->last_ack_sent == th->th_seq) || ((tp->last_ack_sent - 1) == th->th_seq)))) { - if (tcp_do_rfc5961 == 0 || tp->last_ack_sent == th->th_seq) { + if (tp->last_ack_sent == th->th_seq) { switch (tp->t_state) { case TCPS_SYN_RECEIVED: IF_TCP_STATINC(ifp, rstinsynrcv); @@ -3972,11 +3675,6 @@ trimthenstep6: goto close; case TCPS_ESTABLISHED: - if (tcp_do_rfc5961 == 0 && tp->last_ack_sent != th->th_seq) { - tcpstat.tcps_badrst++; - TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "ESTABLISHED rfc5961 bad RST"); - goto drop; - } if (TCP_ECN_ENABLED(tp) && tp->snd_una == tp->iss + 1 && SEQ_GT(tp->snd_max, tp->snd_una)) { @@ -3987,15 +3685,12 @@ trimthenstep6: */ tcp_heuristic_ecn_droprst(tp); } + OS_FALLTHROUGH; case TCPS_FIN_WAIT_1: case TCPS_CLOSE_WAIT: - /* - * Drop through ... - */ case TCPS_FIN_WAIT_2: so->so_error = ECONNRESET; close: - postevent(so, 0, EV_RESET); soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_CONNRESET)); @@ -4012,7 +3707,7 @@ close: case TCPS_TIME_WAIT: break; } - } else if (tcp_do_rfc5961) { + } else { tcpstat.tcps_badrst++; /* Drop if we have reached the ACK limit */ if (tcp_is_ack_ratelimited(tp)) { @@ -4095,7 +3790,6 @@ close: * for the "LAND" DoS attack. */ if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { - rstreason = BANDLIM_RST_OPENPORT; IF_TCP_STATINC(ifp, dospacket); TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "SYN_RECEIVED bad SEQ"); goto dropwithreset; @@ -4141,7 +3835,7 @@ close: * If the SYN bit was originally set, then only send * an ACK if we are not rate-limiting this connection. */ - if (tcp_do_rfc5961 && is_syn_set) { + if (is_syn_set) { if (!tcp_is_ack_ratelimited(tp)) { tcpstat.tcps_synchallenge++; tp->t_flags |= TF_ACKNOW; @@ -4163,7 +3857,7 @@ close: tcpstat.tcps_rcvpartdupbyte += todrop; } - if (TCP_DSACK_ENABLED(tp) && todrop > 1) { + if (todrop > 1) { /* * Note the duplicate data sequence space so that * it can be reported in DSACK option. @@ -4223,7 +3917,6 @@ close: if (close_it) { tp = tcp_close(tp); tcpstat.tcps_rcvafterclose++; - rstreason = BANDLIM_UNLIMITED; IF_TCP_STATINC(ifp, cleanup); goto dropwithreset; } @@ -4308,7 +4001,7 @@ close: * Perform rate limitation in doing so. */ if (thflags & TH_SYN) { - if (tcp_do_rfc5961) { + if (!tcp_syn_data_valid(tp, th, tlen)) { tcpstat.tcps_badsyn++; /* Drop if we have reached ACK limit */ if (tcp_is_ack_ratelimited(tp)) { @@ -4321,12 +4014,12 @@ close: goto dropafterack; } } else { - tp = tcp_drop(tp, ECONNRESET); - rstreason = BANDLIM_UNLIMITED; - postevent(so, 0, EV_RESET); - IF_TCP_STATINC(ifp, synwindow); - TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "bad SYN"); - goto dropwithreset; + /* + * Received SYN (/ACK) with data. + * Move sequence number along to process the data. + */ + th->th_seq++; + thflags &= ~TH_SYN; } } @@ -4336,13 +4029,11 @@ close: * later processing; else drop segment and return. */ if ((thflags & TH_ACK) == 0) { - if (tp->t_state == TCPS_SYN_RECEIVED || - (tp->t_flags & TF_NEEDSYN)) { + if (tp->t_state == TCPS_SYN_RECEIVED) { if ((tfo_enabled(tp))) { /* * So, we received a valid segment while in - * SYN-RECEIVED (TF_NEEDSYN is actually never - * set, so this is dead code). + * SYN-RECEIVED. * As this cannot be an RST (see that if a bit * higher), and it does not have the ACK-flag * set, we want to retransmit the SYN/ACK. @@ -4422,6 +4113,7 @@ close: nstat_route_connect_success( tp->t_inpcb->inp_route.ro_rt); } + /* * The SYN is acknowledged but una is not updated * yet. So pass the value of ack to compute @@ -4429,6 +4121,7 @@ close: */ inp_count_sndbytes(inp, th->th_ack); } + tp->t_forced_acks = TCP_FORCED_ACKS_COUNT; /* * If segment contains data or ACK, will call tcp_reass() * later; if not, do so now to pass queued data to user. @@ -4500,16 +4193,14 @@ close: * includes a SACK with its ACK. */ if (SACK_ENABLED(tp) && - (to.to_nsacks > 0 || - !TAILQ_EMPTY(&tp->snd_holes))) { - tcp_sack_doack(tp, &to, th, - &sack_bytes_acked); + (to.to_nsacks > 0 || !TAILQ_EMPTY(&tp->snd_holes))) { + tcp_sack_doack(tp, &to, th, &sack_bytes_acked, &sack_bytes_newly_acked); } goto process_ACK; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; /* * In ESTABLISHED state: drop duplicate ACKs; ACK out of range @@ -4528,14 +4219,14 @@ close: case TCPS_TIME_WAIT: if (SEQ_GT(th->th_ack, tp->snd_max)) { tcpstat.tcps_rcvacktoomuch++; - if (tcp_do_rfc5961 && tcp_is_ack_ratelimited(tp)) { + if (tcp_is_ack_ratelimited(tp)) { TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "rfc5961 rcvacktoomuch"); goto drop; } else { goto dropafterack; } } - if (tcp_do_rfc5961 && SEQ_LT(th->th_ack, tp->snd_una - tp->max_sndwnd)) { + if (SEQ_LT(th->th_ack, tp->snd_una - tp->max_sndwnd)) { if (tcp_is_ack_ratelimited(tp)) { TCP_LOG_DROP_PCB(TCP_LOG_HDR, th, tp, false, "rfc5961 bad ACK"); goto drop; @@ -4560,7 +4251,7 @@ close: if (SACK_ENABLED(tp) && (to.to_nsacks > 0 || !TAILQ_EMPTY(&tp->snd_holes))) { - tcp_sack_doack(tp, &to, th, &sack_bytes_acked); + tcp_sack_doack(tp, &to, th, &sack_bytes_acked, &sack_bytes_newly_acked); } #if MPTCP @@ -4602,6 +4293,7 @@ close: if (SEQ_LEQ(th->th_ack, tp->snd_una)) { if (tlen == 0 && (tiwin == tp->snd_wnd || (to.to_nsacks > 0 && sack_bytes_acked > 0))) { + uint32_t old_dupacks; /* * If both ends send FIN at the same time, * then the ack will be a duplicate ack @@ -4614,6 +4306,7 @@ close: break; } process_dupack: + old_dupacks = tp->t_dupacks; #if MPTCP /* * MPTCP options that are ignored must @@ -4643,7 +4336,50 @@ process_dupack: } tcpstat.tcps_rcvdupack++; - ++tp->t_dupacks; + if (SACK_ENABLED(tp) && tcp_do_better_lr) { + tp->t_dupacks += max(1, sack_bytes_acked / tp->t_maxseg); + } else { + ++tp->t_dupacks; + } + + tp->sackhint.sack_bytes_acked += sack_bytes_acked; + + if (SACK_ENABLED(tp) && tcp_do_better_lr) { + tp->t_new_dupacks += (sack_bytes_newly_acked / tp->t_maxseg); + + if (tp->t_new_dupacks >= tp->t_rexmtthresh && IN_FASTRECOVERY(tp)) { + /* Let's restart the retransmission */ + tcp_sack_lost_rexmit(tp); + + /* + * If the current tcp cc module has + * defined a hook for tasks to run + * before entering FR, call it + */ + if (CC_ALGO(tp)->pre_fr != NULL) { + CC_ALGO(tp)->pre_fr(tp); + } + + ENTER_FASTRECOVERY(tp); + + if (tp->t_flags & TF_SENTFIN) { + tp->snd_recover = tp->snd_max - 1; + } else { + tp->snd_recover = tp->snd_max; + } + tp->t_rtttime = 0; + + if (TCP_ECN_ENABLED(tp)) { + tp->ecn_flags |= TE_SENDCWR; + } + + if (tp->t_flagsext & TF_CWND_NONVALIDATED) { + tcp_cc_adjust_nonvalidated_cwnd(tp); + } else { + tp->snd_cwnd = tp->snd_ssthresh; + } + } + } /* * Check if we need to reset the limit on @@ -4686,11 +4422,11 @@ process_dupack: * network. */ if (tp->t_timer[TCPT_REXMT] == 0 || - (th->th_ack != tp->snd_una - && sack_bytes_acked == 0)) { + (th->th_ack != tp->snd_una && sack_bytes_acked == 0)) { tp->t_dupacks = 0; tp->t_rexmtthresh = tcprexmtthresh; - } else if (tp->t_dupacks > tp->t_rexmtthresh || + tp->t_new_dupacks = 0; + } else if ((tp->t_dupacks > tp->t_rexmtthresh && (!tcp_do_better_lr || old_dupacks >= tp->t_rexmtthresh)) || IN_FASTRECOVERY(tp)) { /* * If this connection was seeing packet @@ -4709,18 +4445,23 @@ process_dupack: break; } - if (SACK_ENABLED(tp) - && IN_FASTRECOVERY(tp)) { + /* + * Dup acks mean that packets have left the + * network (they're now cached at the receiver) + * so bump cwnd by the amount in the receiver + * to keep a constant cwnd packets in the + * network. + */ + if (SACK_ENABLED(tp) && IN_FASTRECOVERY(tp)) { int awnd; /* * Compute the amount of data in flight first. * We can inject new data into the pipe iff - * we have less than 1/2 the original window's - * worth of data in flight. + * we have less than snd_ssthres worth of data in + * flight. */ - awnd = (tp->snd_nxt - tp->snd_fack) + - tp->sackhint.sack_bytes_rexmit; + awnd = (tp->snd_nxt - tp->snd_fack) + tp->sackhint.sack_bytes_rexmit; if (awnd < tp->snd_ssthresh) { tp->snd_cwnd += tp->t_maxseg; if (tp->snd_cwnd > tp->snd_ssthresh) { @@ -4742,7 +4483,8 @@ process_dupack: (void) tcp_output(tp); goto drop; - } else if (tp->t_dupacks == tp->t_rexmtthresh) { + } else if ((!tcp_do_better_lr && tp->t_dupacks == tp->t_rexmtthresh) || + (tcp_do_better_lr && tp->t_dupacks >= tp->t_rexmtthresh)) { tcp_seq onxt = tp->snd_nxt; /* @@ -4760,8 +4502,7 @@ process_dupack: break; } } else { - if (SEQ_LEQ(th->th_ack, - tp->snd_recover)) { + if (SEQ_LEQ(th->th_ack, tp->snd_recover)) { tp->t_dupacks = 0; break; } @@ -4817,19 +4558,19 @@ process_dupack: tcpstat.tcps_sack_recovery_episode++; tp->t_sack_recovery_episode++; tp->sack_newdata = tp->snd_nxt; - tp->snd_cwnd = tp->t_maxseg; - tp->t_flagsext &= - ~TF_CWND_NONVALIDATED; + if (tcp_do_better_lr) { + tp->snd_cwnd = tp->snd_ssthresh; + } else { + tp->snd_cwnd = tp->t_maxseg; + } + tp->t_flagsext &= ~TF_CWND_NONVALIDATED; /* Process any window updates */ if (tiwin > tp->snd_wnd) { - tcp_update_window( - tp, thflags, - th, tiwin, tlen); + tcp_update_window(tp, thflags, th, tiwin, tlen); } - tcp_ccdbg_trace(tp, th, - TCP_CC_ENTER_FASTRECOVERY); + tcp_ccdbg_trace(tp, th, TCP_CC_ENTER_FASTRECOVERY); (void) tcp_output(tp); goto drop; } @@ -4838,27 +4579,22 @@ process_dupack: /* Process any window updates */ if (tiwin > tp->snd_wnd) { - tcp_update_window(tp, - thflags, - th, tiwin, tlen); + tcp_update_window(tp, thflags, th, tiwin, tlen); } (void) tcp_output(tp); if (tp->t_flagsext & TF_CWND_NONVALIDATED) { tcp_cc_adjust_nonvalidated_cwnd(tp); } else { - tp->snd_cwnd = tp->snd_ssthresh + - tp->t_maxseg * tp->t_dupacks; + tp->snd_cwnd = tp->snd_ssthresh + tp->t_maxseg * tp->t_dupacks; } if (SEQ_GT(onxt, tp->snd_nxt)) { tp->snd_nxt = onxt; } - tcp_ccdbg_trace(tp, th, - TCP_CC_ENTER_FASTRECOVERY); + tcp_ccdbg_trace(tp, th, TCP_CC_ENTER_FASTRECOVERY); goto drop; - } else if (limited_txmt && - ALLOW_LIMITED_TRANSMIT(tp) && + } else if (ALLOW_LIMITED_TRANSMIT(tp) && (!(SACK_ENABLED(tp)) || sack_bytes_acked > 0) && (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)) > 0) { u_int32_t incr = (tp->t_maxseg * tp->t_dupacks); @@ -4900,6 +4636,9 @@ process_dupack: } tcp_ccdbg_trace(tp, th, TCP_CC_PARTIAL_ACK); } else { + if (tcp_cubic_minor_fixes) { + exiting_fr = 1; + } EXIT_FASTRECOVERY(tp); if (CC_ALGO(tp)->post_fr != NULL) { CC_ALGO(tp)->post_fr(tp, th); @@ -4935,28 +4674,7 @@ process_dupack: */ tp->t_dupacks = 0; tp->t_rexmtthresh = tcprexmtthresh; - } - - - /* - * If we reach this point, ACK is not a duplicate, - * i.e., it ACKs something we sent. - */ - if (tp->t_flags & TF_NEEDSYN) { - /* - * T/TCP: Connection was half-synchronized, and our - * SYN has been ACK'd (so connection is now fully - * synchronized). Go to non-starred state, - * increment snd_una for ACK of SYN, and check if - * we can do window scaling. - */ - tp->t_flags &= ~TF_NEEDSYN; - tp->snd_una++; - /* Do window scaling? */ - if (TCP_WINDOW_SCALE_ENABLED(tp)) { - tp->snd_scale = tp->requested_s_scale; - tp->rcv_scale = tp->request_r_scale; - } + tp->t_new_dupacks = 0; } process_ACK: @@ -5053,7 +4771,7 @@ process_ACK: * The calculations in this function assume that snd_una is * not updated yet. */ - if (!IN_FASTRECOVERY(tp)) { + if (!IN_FASTRECOVERY(tp) && !exiting_fr) { if (CC_ALGO(tp)->ack_rcvd != NULL) { CC_ALGO(tp)->ack_rcvd(tp, th); } @@ -5062,17 +4780,9 @@ process_ACK: if (acked > so->so_snd.sb_cc) { tp->snd_wnd -= so->so_snd.sb_cc; sbdrop(&so->so_snd, (int)so->so_snd.sb_cc); - if (so->so_flags & SOF_ENABLE_MSGS) { - so->so_msg_state->msg_serial_bytes -= - (int)so->so_snd.sb_cc; - } ourfinisacked = 1; } else { sbdrop(&so->so_snd, acked); - if (so->so_flags & SOF_ENABLE_MSGS) { - so->so_msg_state->msg_serial_bytes -= - acked; - } tcp_sbsnd_trim(&so->so_snd); tp->snd_wnd -= acked; ourfinisacked = 0; @@ -5089,7 +4799,7 @@ process_ACK: EXIT_FASTRECOVERY(tp); } - tp->snd_una = th->th_ack; + tcp_update_snd_una(tp, th->th_ack); if (SACK_ENABLED(tp)) { if (SEQ_GT(tp->snd_una, tp->snd_recover)) { @@ -5204,8 +4914,7 @@ process_ACK: * a SACK option. So counting it as one duplicate * ack is ok. */ - if (sack_ackadv == 1 && - tp->t_state == TCPS_ESTABLISHED && + if (tp->t_state == TCPS_ESTABLISHED && SACK_ENABLED(tp) && sack_bytes_acked > 0 && to.to_nsacks > 0 && tp->t_dupacks == 0 && SEQ_LEQ(th->th_ack, tp->snd_una) && tlen == 0 && @@ -5259,7 +4968,6 @@ step6: (tp->rcv_up - tp->rcv_nxt) - 1; if (so->so_oobmark == 0) { so->so_state |= SS_RCVATMARK; - postevent(so, 0, EV_OOB); } sohasoutofband(so); tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); @@ -5341,7 +5049,7 @@ dodata: * fast retransmit can work). */ if (th->th_seq == tp->rcv_nxt && LIST_EMPTY(&tp->t_segq)) { - TCP_INC_VAR(tp->t_unacksegs, nlropkts); + TCP_INC_VAR(tp->t_unacksegs, segment_count); /* * Calculate the RTT on the receiver only if the * connection is in streaming mode and the last @@ -5363,22 +5071,16 @@ dodata: } tp->rcv_nxt += tlen; thflags = th->th_flags & TH_FIN; - TCP_INC_VAR(tcpstat.tcps_rcvpack, nlropkts); + TCP_INC_VAR(tcpstat.tcps_rcvpack, segment_count); tcpstat.tcps_rcvbyte += tlen; if (nstat_collect) { - if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) { - INP_ADD_STAT(inp, cell, wifi, wired, - rxpackets, m->m_pkthdr.lro_npkts); - } else { - INP_ADD_STAT(inp, cell, wifi, wired, - rxpackets, 1); - } + INP_ADD_STAT(inp, cell, wifi, wired, + rxpackets, 1); INP_ADD_STAT(inp, cell, wifi, wired, rxbytes, tlen); inp_set_activity_bitmap(inp); } - tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen, - TCP_AUTORCVBUF_MAX(ifp)); + tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen); so_recv_data_stat(so, m, drop_hdrlen); if (isipv6) { @@ -5389,8 +5091,14 @@ dodata: ip = (struct ip *)&saved_hdr[0]; } memcpy(&saved_tcphdr, th, sizeof(struct tcphdr)); - if (sbappendstream_rcvdemux(so, m, - th->th_seq - (tp->irs + 1), 0)) { + + if (th->th_flags & TH_PUSH) { + tp->t_flagsext |= TF_LAST_IS_PSH; + } else { + tp->t_flagsext &= ~TF_LAST_IS_PSH; + } + + if (sbappendstream_rcvdemux(so, m)) { read_wakeup = 1; } th = &saved_tcphdr; @@ -5404,7 +5112,7 @@ dodata: } if (tcp_autotune_reorder) { - tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen, TCP_AUTORCVBUF_MAX(ifp)); + tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen); } memcpy(&saved_tcphdr, th, sizeof(struct tcphdr)); @@ -5427,14 +5135,11 @@ dodata: } if (tp->t_flags & TF_DELACK) { -#if INET6 if (isipv6) { KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport), (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])), th->th_seq, th->th_ack, th->th_win); - } else -#endif - { + } else { KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport), (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)), th->th_seq, th->th_ack, th->th_win); @@ -5460,7 +5165,6 @@ dodata: if (thflags & TH_FIN) { if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { socantrcvmore(so); - postevent(so, 0, EV_FIN); /* * If connection is half-synchronized * (ie NEEDSYN flag on) then delay ACK, @@ -5468,15 +5172,8 @@ dodata: * Otherwise, since we received a FIN then no * more input can be expected, send ACK now. */ - TCP_INC_VAR(tp->t_unacksegs, nlropkts); - if (DELAY_ACK(tp, th) && (tp->t_flags & TF_NEEDSYN)) { - if ((tp->t_flags & TF_DELACK) == 0) { - tp->t_flags |= TF_DELACK; - tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack); - } - } else { - tp->t_flags |= TF_ACKNOW; - } + TCP_INC_VAR(tp->t_unacksegs, segment_count); + tp->t_flags |= TF_ACKNOW; tp->rcv_nxt++; } switch (tp->t_state) { @@ -5486,6 +5183,7 @@ dodata: */ case TCPS_SYN_RECEIVED: tp->t_starttime = tcp_now; + OS_FALLTHROUGH; case TCPS_ESTABLISHED: DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, struct tcpcb *, tp, int32_t, TCPS_CLOSE_WAIT); @@ -5576,7 +5274,6 @@ dropafterack: if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && (SEQ_GT(tp->snd_una, th->th_ack) || SEQ_GT(th->th_ack, tp->snd_max))) { - rstreason = BANDLIM_RST_OPENPORT; IF_TCP_STATINC(ifp, dospacket); goto dropwithreset; } @@ -5608,15 +5305,12 @@ dropwithreset: if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST)) { goto drop; } -#if INET6 if (isipv6) { if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { goto drop; } - } else -#endif /* INET6 */ - if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || + } else if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { @@ -5624,15 +5318,6 @@ dropwithreset: } /* IPv6 anycast check is done at tcp6_input() */ - /* - * Perform bandwidth limiting. - */ -#if ICMP_BANDLIM - if (badport_bandlim(rstreason) < 0) { - goto drop; - } -#endif - #if TCPDEBUG if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) { tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, @@ -5758,14 +5443,12 @@ tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt, struct tcphdr *th, (char *)&to->to_tsecr, sizeof(to->to_tsecr)); NTOHL(to->to_tsecr); /* Re-enable sending Timestamps if we received them */ - if (!(tp->t_flags & TF_REQ_TSTMP) && - tcp_do_rfc1323 == 1) { + if (!(tp->t_flags & TF_REQ_TSTMP)) { tp->t_flags |= TF_REQ_TSTMP; } break; case TCPOPT_SACK_PERMITTED: - if (!tcp_do_sack || - optlen != TCPOLEN_SACK_PERMITTED) { + if (optlen != TCPOLEN_SACK_PERMITTED) { continue; } if (th->th_flags & TH_SYN) { @@ -5997,7 +5680,7 @@ tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th) /* Compute the max of the pipeack samples */ pipe_ack_val = tcp_get_max_pipeack(tp); tp->t_pipeack = (pipe_ack_val > - TCP_CC_CWND_INIT_BYTES) ? + tcp_initial_cwnd(tp)) ? pipe_ack_val : 0; } /* start another measurement */ @@ -6044,7 +5727,7 @@ tcp_xmit_timer(struct tcpcb *tp, int rtt, (tsecr == 0 || TSTMP_GEQ(tsecr, tp->t_badrexmt_time))) { /* - * We received a new ACk after a + * We received a new ACK after a * spurious timeout. Adapt retransmission * timer as described in rfc 4015. */ @@ -6123,6 +5806,7 @@ tcp_xmit_timer(struct tcpcb *tp, int rtt, */ tp->t_srtt = rtt << TCP_RTT_SHIFT; tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); + tp->t_rttbest = tp->t_srtt + tp->t_rttvar; } compute_rto: @@ -6185,7 +5869,6 @@ tcp_maxmtu(struct rtentry *rt) return maxmtu; } -#if INET6 static inline unsigned int tcp_maxmtu6(struct rtentry *rt) { @@ -6210,7 +5893,6 @@ tcp_maxmtu6(struct rtentry *rt) return maxmtu; } -#endif unsigned int get_maxmtu(struct rtentry *rt) @@ -6266,14 +5948,11 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) u_int32_t bufsize; struct inpcb *inp; struct socket *so; - struct rmxp_tao *taop; int origoffer = offer; u_int32_t sb_max_corrected; int isnetlocal = 0; -#if INET6 int isipv6; int min_protoh; -#endif inp = tp->t_inpcb; @@ -6285,30 +5964,19 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) return; } -#if INET6 isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; min_protoh = isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : sizeof(struct tcpiphdr); -#else -#define min_protoh (sizeof (struct tcpiphdr)) -#endif -#if INET6 if (isipv6) { rt = tcp_rtlookup6(inp, input_ifscope); - } else -#endif /* INET6 */ - { + } else { rt = tcp_rtlookup(inp, input_ifscope); } isnetlocal = (tp->t_flags & TF_LOCAL); if (rt == NULL) { - tp->t_maxopd = tp->t_maxseg = -#if INET6 - isipv6 ? tcp_v6mssdflt : -#endif /* INET6 */ - tcp_mssdflt; + tp->t_maxopd = tp->t_maxseg = isipv6 ? tcp_v6mssdflt : tcp_mssdflt; return; } ifp = rt->rt_ifp; @@ -6324,24 +5992,18 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) tp->t_flags |= TF_SLOWLINK; } - taop = rmx_taop(rt->rt_rmx); /* - * Offer == -1 means that we didn't receive SYN yet, - * use cached value in that case; + * Offer == -1 means that we didn't receive SYN yet. Use 0 then. */ if (offer == -1) { - offer = taop->tao_mssopt; + offer = rt->rt_rmx.rmx_filler[0]; } /* * Offer == 0 means that there was no MSS on the SYN segment, * in this case we use tcp_mssdflt. */ if (offer == 0) { - offer = -#if INET6 - isipv6 ? tcp_v6mssdflt : -#endif /* INET6 */ - tcp_mssdflt; + offer = isipv6 ? tcp_v6mssdflt : tcp_mssdflt; } else { /* * Prevent DoS attack with too small MSS. Round up @@ -6356,7 +6018,7 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) */ offer = max(offer, 64); } - taop->tao_mssopt = offer; + rt->rt_rmx.rmx_filler[0] = offer; /* * While we're here, check if there's an initial rtt @@ -6369,11 +6031,7 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) tp->t_rttmin = isnetlocal ? tcp_TCPTV_MIN : TCPTV_REXMTMIN; } -#if INET6 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt)); -#else - mss = tcp_maxmtu(rt); -#endif #if NECP // At this point, the mss is just the MTU. Adjust if necessary. @@ -6383,14 +6041,11 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) mss -= min_protoh; if (rt->rt_rmx.rmx_mtu == 0) { -#if INET6 if (isipv6) { if (!isnetlocal) { mss = min(mss, tcp_v6mssdflt); } - } else -#endif /* INET6 */ - if (!isnetlocal) { + } else if (!isnetlocal) { mss = min(mss, tcp_mssdflt); } } @@ -6507,31 +6162,20 @@ tcp_mssopt(struct tcpcb *tp) { struct rtentry *rt; int mss; -#if INET6 int isipv6; int min_protoh; -#endif -#if INET6 isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) ? 1 : 0; min_protoh = isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : sizeof(struct tcpiphdr); -#else -#define min_protoh (sizeof (struct tcpiphdr)) -#endif -#if INET6 if (isipv6) { rt = tcp_rtlookup6(tp->t_inpcb, IFSCOPE_NONE); - } else -#endif /* INET6 */ - rt = tcp_rtlookup(tp->t_inpcb, IFSCOPE_NONE); + } else { + rt = tcp_rtlookup(tp->t_inpcb, IFSCOPE_NONE); + } if (rt == NULL) { - return -#if INET6 - isipv6 ? tcp_v6mssdflt : -#endif /* INET6 */ - tcp_mssdflt; + return isipv6 ? tcp_v6mssdflt : tcp_mssdflt; } /* * Slower link window correction: @@ -6544,11 +6188,7 @@ tcp_mssopt(struct tcpcb *tp) tp->t_flags |= TF_SLOWLINK; } -#if INET6 mss = (isipv6 ? tcp_maxmtu6(rt) : tcp_maxmtu(rt)); -#else - mss = tcp_maxmtu(rt); -#endif /* Route locked during lookup above */ RT_UNLOCK(rt); @@ -6581,7 +6221,6 @@ tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) * is called) */ tp->snd_cwnd = tp->t_maxseg + BYTES_ACKED(th, tp); - tp->t_flags |= TF_ACKNOW; (void) tcp_output(tp); tp->snd_cwnd = ocwnd; if (SEQ_GT(onxt, tp->snd_nxt)) { @@ -6858,9 +6497,6 @@ tcp_set_recv_bg(struct socket *so) if (!IS_TCP_RECV_BG(so)) { so->so_flags1 |= SOF1_TRAFFIC_MGT_TCP_RECVBG; } - - /* Unset Large Receive Offload on background sockets */ - so_set_lro(so, SO_TC_BK); } void @@ -6869,18 +6505,57 @@ tcp_clear_recv_bg(struct socket *so) if (IS_TCP_RECV_BG(so)) { so->so_flags1 &= ~(SOF1_TRAFFIC_MGT_TCP_RECVBG); } +} + +void +inp_fc_throttle_tcp(struct inpcb *inp) +{ + struct tcpcb *tp = inp->inp_ppcb; + + if (!tcp_flow_control_response) { + return; + } /* - * Set/unset use of Large Receive Offload depending on - * the traffic class + * Back off the slow-start threshold and enter + * congestion avoidance phase */ - so_set_lro(so, so->so_traffic_class); + if (CC_ALGO(tp)->pre_fr != NULL) { + CC_ALGO(tp)->pre_fr(tp); + } } void inp_fc_unthrottle_tcp(struct inpcb *inp) { struct tcpcb *tp = inp->inp_ppcb; + + if (tcp_flow_control_response) { + if (CC_ALGO(tp)->post_fr != NULL) { + CC_ALGO(tp)->post_fr(tp, NULL); + } + + tp->t_bytes_acked = 0; + + /* + * Reset retransmit shift as we know that the reason + * for delay in sending a packet is due to flow + * control on the outgoing interface. There is no need + * to backoff retransmit timer. + */ + TCP_RESET_REXMT_STATE(tp); + + tp->t_flagsext &= ~TF_CWND_NONVALIDATED; + + /* + * Start the output stream again. Since we are + * not retransmitting data, do not reset the + * retransmit timer or rtt calculation. + */ + tcp_output(tp); + return; + } + /* * Back off the slow-start threshold and enter * congestion avoidance phase @@ -6920,7 +6595,7 @@ tcp_getstat SYSCTL_HANDLER_ARGS int error; struct tcpstat *stat; stat = &tcpstat; -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX struct tcpstat zero_stat; if (tcp_disable_access_to_stats && @@ -6929,7 +6604,7 @@ tcp_getstat SYSCTL_HANDLER_ARGS stat = &zero_stat; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ if (req->oldptr == 0) { req->oldlen = (size_t)sizeof(struct tcpstat); @@ -6953,10 +6628,6 @@ tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen) struct ip *ip = mtod(m, struct ip *); struct ipovly *ipov = (struct ipovly *)ip; - if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_DID_CSUM) { - return 0; - } - /* ip_stripoptions() must have been called before we get here */ ASSERT((ip->ip_hl << 2) == sizeof(*ip)); @@ -7048,14 +6719,9 @@ tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen) } break; } -#if INET6 case AF_INET6: { struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); - if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_DID_CSUM) { - return 0; - } - if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) || (m->m_pkthdr.pkt_flags & PKTF_LOOP)) && (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)) { @@ -7123,7 +6789,6 @@ tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen) } break; } -#endif /* INET6 */ default: VERIFY(0); /* NOTREACHED */ diff --git a/bsd/netinet/tcp_ledbat.c b/bsd/netinet/tcp_ledbat.c index f2a588b27..f345934a7 100644 --- a/bsd/netinet/tcp_ledbat.c +++ b/bsd/netinet/tcp_ledbat.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2014 Apple Inc. All rights reserved. + * Copyright (c) 2010-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -37,9 +37,7 @@ #include #include -#if INET6 #include -#endif #include #include #include @@ -64,7 +62,7 @@ void tcp_ledbat_pre_fr(struct tcpcb *tp); void tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th); void tcp_ledbat_after_idle(struct tcpcb *tp); void tcp_ledbat_after_timeout(struct tcpcb *tp); -int tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th); +static int tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th); void tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index); struct tcp_cc_algo tcp_cc_ledbat = { @@ -257,9 +255,9 @@ tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) * greater than or equal to the congestion window. */ - u_int cw = tp->snd_cwnd; - u_int incr = tp->t_maxseg; - int acked = 0; + uint32_t cw = tp->snd_cwnd; + uint32_t incr = tp->t_maxseg; + uint32_t acked = 0; acked = BYTES_ACKED(th, tp); tp->t_bytes_acked += acked; @@ -279,10 +277,9 @@ tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) */ u_int abc_lim; - abc_lim = (tcp_do_rfc3465_lim2 && - tp->snd_nxt == tp->snd_max) ? incr * 2 : incr; + abc_lim = (tp->snd_nxt == tp->snd_max) ? incr * 2 : incr; - incr = lmin(acked, abc_lim); + incr = ulmin(acked, abc_lim); } if (tp->t_bytes_acked >= cw) { tp->t_bytes_acked -= cw; @@ -315,7 +312,11 @@ tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) { int32_t ss; - ss = tp->snd_max - th->th_ack; + if (th) { + ss = tp->snd_max - th->th_ack; + } else { + ss = tp->snd_max - tp->snd_una; + } /* * Complete ack. Inflate the congestion window to @@ -390,14 +391,18 @@ tcp_ledbat_after_timeout(struct tcpcb *tp) * */ -int +static int tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th) { - if ((tp->t_flags & TF_RXWIN0SENT) == 0 && - (th->th_flags & TH_PUSH) == 0 && (tp->t_unacksegs == 1)) { - return 1; + if (tcp_ack_strategy == TCP_ACK_STRATEGY_MODERN) { + return tcp_cc_delay_ack(tp, th); + } else { + if ((tp->t_flags & TF_RXWIN0SENT) == 0 && + (th->th_flags & TH_PUSH) == 0 && (tp->t_unacksegs == 1)) { + return 1; + } + return 0; } - return 0; } /* Change a connection to use ledbat. First, lower bg_ssthresh value diff --git a/bsd/netinet/tcp_log.c b/bsd/netinet/tcp_log.c index 0fcdb4807..470a3366e 100644 --- a/bsd/netinet/tcp_log.c +++ b/bsd/netinet/tcp_log.c @@ -32,9 +32,7 @@ #include #include -#if INET6 #include -#endif /* INET6 */ #if !TCPDEBUG #define TCPSTATES @@ -199,7 +197,7 @@ tcp_log_is_rate_limited(void) } static void -tcp_log_inp_addresses(struct inpcb *inp, char *lbuf, size_t lbuflen, char *fbuf, size_t fbuflen) +tcp_log_inp_addresses(struct inpcb *inp, char *lbuf, socklen_t lbuflen, char *fbuf, socklen_t fbuflen) { /* * Ugly but %{private} does not work in the kernel version of os_log() @@ -572,7 +570,7 @@ tcp_log_connection_summary(struct tcpcb *tp) "rtt: %u.%u ms " \ "rttvar: %u.%u ms " \ "pkt rxmit: %u " \ - "ooo pkts: %u dup bytes in: %u " \ + "ooo pkts: %u dup bytes in: %u ACKs delayed: %u delayed ACKs sent: %u " \ "so_error: %d " \ "svc/tc: %u" @@ -586,7 +584,7 @@ tcp_log_connection_summary(struct tcpcb *tp) tp->t_srtt >> TCP_RTT_SHIFT, tp->t_srtt - ((tp->t_srtt >> TCP_RTT_SHIFT) << TCP_RTT_SHIFT), \ tp->t_rttvar >> TCP_RTTVAR_SHIFT, tp->t_rttvar - ((tp->t_rttvar >> TCP_RTTVAR_SHIFT) << TCP_RTTVAR_SHIFT), \ tp->t_stat.rxmitpkts, \ - tp->t_rcvoopack, tp->t_stat.rxduplicatebytes, \ + tp->t_rcvoopack, tp->t_stat.rxduplicatebytes, tp->t_stat.acks_delayed, tp->t_stat.delayed_acks_sent, \ so->so_error, \ (so->so_flags1 & SOF1_TC_NET_SERV_TYPE) ? so->so_netsvctype : so->so_traffic_class @@ -603,7 +601,7 @@ tcp_log_connection_summary(struct tcpcb *tp) static bool tcp_log_pkt_addresses(void *hdr, struct tcphdr *th, bool outgoing, - char *lbuf, size_t lbuflen, char *fbuf, size_t fbuflen) + char *lbuf, socklen_t lbuflen, char *fbuf, socklen_t fbuflen) { bool isipv6; uint8_t thflags; @@ -936,3 +934,4 @@ tcp_log_message(const char *func_name, int line_no, struct tcpcb *tp, const char #undef TCP_LOG_MESSAGE_FMT #undef TCP_LOG_MESSAGE_ARGS } + diff --git a/bsd/netinet/tcp_log.h b/bsd/netinet/tcp_log.h index 040948f92..6a3c10f8d 100644 --- a/bsd/netinet/tcp_log.h +++ b/bsd/netinet/tcp_log.h @@ -38,15 +38,11 @@ #include #include #include -#if INET6 #include -#endif #include #include -#if INET6 #include -#endif #include @@ -72,6 +68,7 @@ extern int tcp_log_privacy; X(TLEF_DROP_NECP, 0x1000, dropnecp) \ X(TLEF_DROP_PCB, 0x2000, droppcb) \ X(TLEF_DROP_PKT, 0x4000, droppkt) \ + X(TLEF_FSW_FLOW, 0x8000, fswflow) /* * Flag values for tcp_log_enabled @@ -97,7 +94,7 @@ extern void tcp_log_rt_rtt(const char *func_name, int line_no, struct tcpcb *tp, extern void tcp_log_rtt_change(const char *func_name, int line_no, struct tcpcb *tp, int old_srtt, int old_rttvar); extern void tcp_log_keepalive(const char *func_name, int line_no, struct tcpcb *tp, int32_t idle_time); extern void tcp_log_message(const char *func_name, int line_no, struct tcpcb *tp, const char *format, ...); - +extern void tcp_log_fsw_flow(const char *func_name, int line_no, struct tcpcb *tp, const char *format, ...); static inline bool tcp_is_log_enabled(struct tcpcb *tp, uint32_t req_flags) @@ -181,6 +178,9 @@ tcp_is_log_enabled(struct tcpcb *tp, uint32_t req_flags) (tcp_log_enable_flags & TLEF_DROP_PKT)) \ tcp_log_drop_pkt((hdr), (th), (ifp), (reason)) +#define TCP_LOG_FSW_FLOW(tp, format, ...) if (tcp_is_log_enabled(tp, TLEF_FSW_FLOW)) \ + tcp_log_fsw_flow(__func__, __LINE__, (tp), format, ##__VA_ARGS__) + #define TCP_LOG(tp, format, ...) \ tcp_log_message(__func__, __LINE__, tp, format, ## __VA_ARGS__) diff --git a/bsd/netinet/tcp_lro.c b/bsd/netinet/tcp_lro.c deleted file mode 100644 index 8aef977e3..000000000 --- a/bsd/netinet/tcp_lro.c +++ /dev/null @@ -1,927 +0,0 @@ -/* - * Copyright (c) 2011-2013 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -unsigned int lrocount = 0; /* A counter used for debugging only */ -unsigned int lro_seq_outoforder = 0; /* Counter for debugging */ -unsigned int lro_seq_mismatch = 0; /* Counter for debugging */ -unsigned int lro_flushes = 0; /* Counter for tracking number of flushes */ -unsigned int lro_single_flushes = 0; -unsigned int lro_double_flushes = 0; -unsigned int lro_good_flushes = 0; - -unsigned int coalesc_sz = LRO_MX_COALESCE_PKTS; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro_sz, CTLFLAG_RW | CTLFLAG_LOCKED, - &coalesc_sz, 0, "Max coalescing size"); - -unsigned int coalesc_time = LRO_MX_TIME_TO_BUFFER; -SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro_time, CTLFLAG_RW | CTLFLAG_LOCKED, - &coalesc_time, 0, "Max coalescing time"); - -struct lro_flow lro_flow_list[TCP_LRO_NUM_FLOWS]; - -char lro_flow_map[TCP_LRO_FLOW_MAP]; - -static lck_attr_t *tcp_lro_mtx_attr = NULL; /* mutex attributes */ -static lck_grp_t *tcp_lro_mtx_grp = NULL; /* mutex group */ -static lck_grp_attr_t *tcp_lro_mtx_grp_attr = NULL; /* mutex group attrs */ -decl_lck_mtx_data(, tcp_lro_lock); /* Used to synchronize updates */ - -unsigned int lro_byte_count = 0; - -uint64_t lro_deadline = 0; /* LRO's sense of time - protected by tcp_lro_lock */ -uint32_t lro_timer_set = 0; - -/* Some LRO stats */ -u_int32_t lro_pkt_count = 0; /* Number of packets encountered in an LRO period */ -thread_call_t tcp_lro_timer; - -extern u_int32_t kipf_count; - -static void tcp_lro_timer_proc(void*, void*); -static void lro_update_stats(struct mbuf*); -static void lro_update_flush_stats(struct mbuf *); -static void tcp_lro_flush_flows(void); -static void tcp_lro_sched_timer(uint64_t); -static void lro_proto_input(struct mbuf *); - -static struct mbuf *lro_tcp_xsum_validate(struct mbuf*, struct ip *, - struct tcphdr*); -static struct mbuf *tcp_lro_process_pkt(struct mbuf*, int); - -void -tcp_lro_init(void) -{ - int i; - - bzero(lro_flow_list, sizeof(struct lro_flow) * TCP_LRO_NUM_FLOWS); - for (i = 0; i < TCP_LRO_FLOW_MAP; i++) { - lro_flow_map[i] = TCP_LRO_FLOW_UNINIT; - } - - /* - * allocate lock group attribute, group and attribute for tcp_lro_lock - */ - tcp_lro_mtx_grp_attr = lck_grp_attr_alloc_init(); - tcp_lro_mtx_grp = lck_grp_alloc_init("tcplro", tcp_lro_mtx_grp_attr); - tcp_lro_mtx_attr = lck_attr_alloc_init(); - lck_mtx_init(&tcp_lro_lock, tcp_lro_mtx_grp, tcp_lro_mtx_attr); - - tcp_lro_timer = thread_call_allocate(tcp_lro_timer_proc, NULL); - if (tcp_lro_timer == NULL) { - panic_plain("%s: unable to allocate lro timer", __func__); - } - - return; -} - -static int -tcp_lro_matching_tuple(struct ip* ip_hdr, struct tcphdr *tcp_hdr, int *hash, - int *flow_id ) -{ - struct lro_flow *flow; - tcp_seq seqnum; - unsigned int off = 0; - int payload_len = 0; - - *hash = LRO_HASH(ip_hdr->ip_src.s_addr, ip_hdr->ip_dst.s_addr, - tcp_hdr->th_sport, tcp_hdr->th_dport, (TCP_LRO_FLOW_MAP - 1)); - - *flow_id = lro_flow_map[*hash]; - if (*flow_id == TCP_LRO_FLOW_NOTFOUND) { - return TCP_LRO_NAN; - } - - seqnum = tcp_hdr->th_seq; - off = tcp_hdr->th_off << 2; - payload_len = ip_hdr->ip_len - off; - - flow = &lro_flow_list[*flow_id]; - - if ((flow->lr_faddr.s_addr == ip_hdr->ip_src.s_addr) && - (flow->lr_laddr.s_addr == ip_hdr->ip_dst.s_addr) && - (flow->lr_fport == tcp_hdr->th_sport) && - (flow->lr_lport == tcp_hdr->th_dport)) { - if (flow->lr_tcphdr == NULL) { - if (ntohl(seqnum) == flow->lr_seq) { - return TCP_LRO_COALESCE; - } - if (lrodebug >= 4) { - printf("%s: seqnum = %x, lr_seq = %x\n", - __func__, ntohl(seqnum), flow->lr_seq); - } - lro_seq_mismatch++; - if (SEQ_GT(ntohl(seqnum), flow->lr_seq)) { - lro_seq_outoforder++; - /* - * Whenever we receive out of order packets it - * signals loss and recovery and LRO doesn't - * let flows recover quickly. So eject. - */ - flow->lr_flags |= LRO_EJECT_REQ; - } - return TCP_LRO_NAN; - } - - if (flow->lr_flags & LRO_EJECT_REQ) { - if (lrodebug) { - printf("%s: eject. \n", __func__); - } - return TCP_LRO_EJECT_FLOW; - } - if (SEQ_GT(tcp_hdr->th_ack, flow->lr_tcphdr->th_ack)) { - if (lrodebug) { - printf("%s: th_ack = %x flow_ack = %x \n", - __func__, tcp_hdr->th_ack, - flow->lr_tcphdr->th_ack); - } - return TCP_LRO_EJECT_FLOW; - } - - if (ntohl(seqnum) == (ntohl(lro_flow_list[*flow_id].lr_tcphdr->th_seq) + lro_flow_list[*flow_id].lr_len)) { - return TCP_LRO_COALESCE; - } else { - /* LRO does not handle loss recovery well, eject */ - flow->lr_flags |= LRO_EJECT_REQ; - return TCP_LRO_EJECT_FLOW; - } - } - if (lrodebug) { - printf("tcp_lro_matching_tuple: collision \n"); - } - return TCP_LRO_COLLISION; -} - -static void -tcp_lro_init_flow(int flow_id, struct ip* ip_hdr, struct tcphdr *tcp_hdr, - int hash, u_int32_t timestamp, int payload_len) -{ - struct lro_flow *flow = NULL; - - flow = &lro_flow_list[flow_id]; - - flow->lr_hash_map = hash; - flow->lr_faddr.s_addr = ip_hdr->ip_src.s_addr; - flow->lr_laddr.s_addr = ip_hdr->ip_dst.s_addr; - flow->lr_fport = tcp_hdr->th_sport; - flow->lr_lport = tcp_hdr->th_dport; - lro_flow_map[hash] = flow_id; - flow->lr_timestamp = timestamp; - flow->lr_seq = ntohl(tcp_hdr->th_seq) + payload_len; - flow->lr_flags = 0; - return; -} - -static void -tcp_lro_coalesce(int flow_id, struct mbuf *lro_mb, struct tcphdr *tcphdr, - int payload_len, int drop_hdrlen, struct tcpopt *topt, - u_int32_t* tsval, u_int32_t* tsecr, int thflags) -{ - struct lro_flow *flow = NULL; - struct mbuf *last; - struct ip *ip = NULL; - - flow = &lro_flow_list[flow_id]; - if (flow->lr_mhead) { - if (lrodebug) { - printf("%s: lr_mhead %x %d \n", __func__, flow->lr_seq, - payload_len); - } - m_adj(lro_mb, drop_hdrlen); - - last = flow->lr_mtail; - while (last->m_next != NULL) { - last = last->m_next; - } - last->m_next = lro_mb; - - flow->lr_mtail = lro_mb; - - ip = mtod(flow->lr_mhead, struct ip *); - ip->ip_len += lro_mb->m_pkthdr.len; - flow->lr_mhead->m_pkthdr.len += lro_mb->m_pkthdr.len; - - if (flow->lr_len == 0) { - panic_plain("%s: Inconsistent LRO flow state", __func__); - } - flow->lr_len += payload_len; - flow->lr_seq += payload_len; - /* - * This bit is re-OR'd each time a packet is added to the - * large coalesced packet. - */ - flow->lr_mhead->m_pkthdr.pkt_flags |= PKTF_SW_LRO_PKT; - flow->lr_mhead->m_pkthdr.lro_npkts++; /* for tcpstat.tcps_rcvpack */ - if (flow->lr_mhead->m_pkthdr.lro_pktlen < - lro_mb->m_pkthdr.lro_pktlen) { - /* - * For TCP Inter Arrival Jitter calculation, return max - * size encountered while coalescing a stream of pkts. - */ - flow->lr_mhead->m_pkthdr.lro_pktlen = - lro_mb->m_pkthdr.lro_pktlen; - } - /* Update the timestamp value */ - if (topt->to_flags & TOF_TS) { - if ((flow->lr_tsval) && - (TSTMP_GT(topt->to_tsval, ntohl(*(flow->lr_tsval))))) { - *(flow->lr_tsval) = htonl(topt->to_tsval); - } - if ((flow->lr_tsecr) && - (topt->to_tsecr != 0) && - (TSTMP_GT(topt->to_tsecr, ntohl(*(flow->lr_tsecr))))) { - if (lrodebug >= 2) { - printf("%s: instantaneous RTT = %d \n", __func__, - topt->to_tsecr - ntohl(*(flow->lr_tsecr))); - } - *(flow->lr_tsecr) = htonl(topt->to_tsecr); - } - } - /* Coalesce the flags */ - if (thflags) { - flow->lr_tcphdr->th_flags |= thflags; - } - /* Update receive window */ - flow->lr_tcphdr->th_win = tcphdr->th_win; - } else { - if (lro_mb) { - flow->lr_mhead = flow->lr_mtail = lro_mb; - flow->lr_mhead->m_pkthdr.pkt_flags |= PKTF_SW_LRO_PKT; - flow->lr_tcphdr = tcphdr; - if ((topt) && (topt->to_flags & TOF_TS)) { - ASSERT(tsval != NULL); - ASSERT(tsecr != NULL); - flow->lr_tsval = tsval; - flow->lr_tsecr = tsecr; - } - flow->lr_len = payload_len; - calculate_tcp_clock(); - flow->lr_timestamp = tcp_now; - tcp_lro_sched_timer(0); - } - flow->lr_seq = ntohl(tcphdr->th_seq) + payload_len; - } - if (lro_mb) { - tcpstat.tcps_coalesced_pack++; - } - return; -} - -static struct mbuf * -tcp_lro_eject_flow(int flow_id) -{ - struct mbuf *mb = NULL; - - mb = lro_flow_list[flow_id].lr_mhead; - ASSERT(lro_flow_map[lro_flow_list[flow_id].lr_hash_map] == flow_id); - lro_flow_map[lro_flow_list[flow_id].lr_hash_map] = TCP_LRO_FLOW_UNINIT; - bzero(&lro_flow_list[flow_id], sizeof(struct lro_flow)); - - return mb; -} - -static struct mbuf* -tcp_lro_eject_coalesced_pkt(int flow_id) -{ - struct mbuf *mb = NULL; - mb = lro_flow_list[flow_id].lr_mhead; - lro_flow_list[flow_id].lr_mhead = - lro_flow_list[flow_id].lr_mtail = NULL; - lro_flow_list[flow_id].lr_tcphdr = NULL; - return mb; -} - -static struct mbuf* -tcp_lro_insert_flow(struct mbuf *lro_mb, struct ip *ip_hdr, - struct tcphdr *tcp_hdr, int payload_len, - int drop_hdrlen, int hash, struct tcpopt *topt, - u_int32_t *tsval, u_int32_t *tsecr) -{ - int i; - int slot_available = 0; - int candidate_flow = 0; - u_int32_t oldest_timestamp; - struct mbuf *mb = NULL; - int collision = 0; - - oldest_timestamp = tcp_now; - - /* handle collision */ - if (lro_flow_map[hash] != TCP_LRO_FLOW_UNINIT) { - if (lrodebug) { - collision = 1; - } - candidate_flow = lro_flow_map[hash]; - tcpstat.tcps_flowtbl_collision++; - goto kick_flow; - } - - for (i = 0; i < TCP_LRO_NUM_FLOWS; i++) { - if (lro_flow_list[i].lr_mhead == NULL) { - candidate_flow = i; - slot_available = 1; - break; - } - if (oldest_timestamp >= lro_flow_list[i].lr_timestamp) { - candidate_flow = i; - oldest_timestamp = lro_flow_list[i].lr_timestamp; - } - } - - if (!slot_available) { - tcpstat.tcps_flowtbl_full++; -kick_flow: - /* kick the oldest flow */ - mb = tcp_lro_eject_flow(candidate_flow); - - if (lrodebug) { - if (!slot_available) { - printf("%s: slot unavailable.\n", __func__); - } - if (collision) { - printf("%s: collision.\n", __func__); - } - } - } else { - candidate_flow = i; /* this is now the flow to be used */ - } - - tcp_lro_init_flow(candidate_flow, ip_hdr, tcp_hdr, hash, - tcp_now, payload_len); - tcp_lro_coalesce(candidate_flow, lro_mb, tcp_hdr, payload_len, - drop_hdrlen, topt, tsval, tsecr, 0); - return mb; -} - -struct mbuf* -tcp_lro_process_pkt(struct mbuf *lro_mb, int drop_hdrlen) -{ - int flow_id = TCP_LRO_FLOW_UNINIT; - int hash; - unsigned int off = 0; - int eject_flow = 0; - int optlen; - int retval = 0; - struct mbuf *mb = NULL; - int payload_len = 0; - u_char *optp = NULL; - int thflags = 0; - struct tcpopt to; - int ret_response = TCP_LRO_CONSUMED; - int coalesced = 0, tcpflags = 0, unknown_tcpopts = 0; - u_int8_t ecn; - struct ip *ip_hdr; - struct tcphdr *tcp_hdr; - - if (lro_mb->m_len < drop_hdrlen) { - if ((lro_mb = m_pullup(lro_mb, drop_hdrlen)) == NULL) { - tcpstat.tcps_rcvshort++; - m_freem(lro_mb); - if (lrodebug) { - printf("tcp_lro_process_pkt:mbuf too short.\n"); - } - return NULL; - } - } - - ip_hdr = mtod(lro_mb, struct ip*); - tcp_hdr = (struct tcphdr *)((caddr_t)ip_hdr + sizeof(struct ip)); - - /* Just in case */ - lro_mb->m_pkthdr.pkt_flags &= ~PKTF_SW_LRO_DID_CSUM; - - if ((lro_mb = lro_tcp_xsum_validate(lro_mb, ip_hdr, tcp_hdr)) == NULL) { - if (lrodebug) { - printf("tcp_lro_process_pkt: TCP xsum failed.\n"); - } - return NULL; - } - - /* Update stats */ - lro_pkt_count++; - - /* Avoids checksumming in tcp_input */ - lro_mb->m_pkthdr.pkt_flags |= PKTF_SW_LRO_DID_CSUM; - - off = tcp_hdr->th_off << 2; - optlen = off - sizeof(struct tcphdr); - payload_len = ip_hdr->ip_len - off; - optp = (u_char *)(tcp_hdr + 1); - /* - * Do quick retrieval of timestamp options ("options - * prediction?"). If timestamp is the only option and it's - * formatted as recommended in RFC 1323 appendix A, we - * quickly get the values now and not bother calling - * tcp_dooptions(), etc. - */ - bzero(&to, sizeof(to)); - if ((optlen == TCPOLEN_TSTAMP_APPA || - (optlen > TCPOLEN_TSTAMP_APPA && - optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) && - *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) && - (tcp_hdr->th_flags & TH_SYN) == 0) { - to.to_flags |= TOF_TS; - to.to_tsval = ntohl(*(u_int32_t *)(void *)(optp + 4)); - to.to_tsecr = ntohl(*(u_int32_t *)(void *)(optp + 8)); - } else { - /* - * If TCP timestamps are not in use, or not the first option, - * skip LRO path since timestamps are used to avoid LRO - * from introducing additional latencies for retransmissions - * and other slow-paced transmissions. - */ - to.to_flags = to.to_tsecr = 0; - eject_flow = 1; - } - - /* list all the conditions that can trigger a flow ejection here */ - - thflags = tcp_hdr->th_flags; - if (thflags & (TH_SYN | TH_URG | TH_ECE | TH_CWR | TH_PUSH | TH_RST | TH_FIN)) { - eject_flow = tcpflags = 1; - } - - if (optlen && !((optlen == TCPOLEN_TSTAMP_APPA) && - (to.to_flags & TOF_TS))) { - eject_flow = unknown_tcpopts = 1; - } - - if (payload_len <= LRO_MIN_COALESC_SZ) { /* zero payload ACK */ - eject_flow = 1; - } - - /* Can't coalesce ECN marked packets. */ - ecn = ip_hdr->ip_tos & IPTOS_ECN_MASK; - if (ecn == IPTOS_ECN_CE) { - /* - * ECN needs quick notification - */ - if (lrodebug) { - printf("%s: ECE bits set.\n", __func__); - } - eject_flow = 1; - } - - lck_mtx_lock_spin(&tcp_lro_lock); - - retval = tcp_lro_matching_tuple(ip_hdr, tcp_hdr, &hash, &flow_id); - - switch (retval) { - case TCP_LRO_NAN: - lck_mtx_unlock(&tcp_lro_lock); - ret_response = TCP_LRO_FLOW_NOTFOUND; - break; - - case TCP_LRO_COALESCE: - if ((payload_len != 0) && (unknown_tcpopts == 0) && - (tcpflags == 0) && (ecn != IPTOS_ECN_CE) && (to.to_flags & TOF_TS)) { - tcp_lro_coalesce(flow_id, lro_mb, tcp_hdr, payload_len, - drop_hdrlen, &to, - (to.to_flags & TOF_TS) ? (u_int32_t *)(void *)(optp + 4) : NULL, - (to.to_flags & TOF_TS) ? (u_int32_t *)(void *)(optp + 8) : NULL, - thflags); - if (lrodebug >= 2) { - printf("tcp_lro_process_pkt: coalesce len = %d. flow_id = %d payload_len = %d drop_hdrlen = %d optlen = %d lport = %d seqnum = %x.\n", - lro_flow_list[flow_id].lr_len, flow_id, - payload_len, drop_hdrlen, optlen, - ntohs(lro_flow_list[flow_id].lr_lport), - ntohl(tcp_hdr->th_seq)); - } - if (lro_flow_list[flow_id].lr_mhead->m_pkthdr.lro_npkts >= coalesc_sz) { - eject_flow = 1; - } - coalesced = 1; - } - if (eject_flow) { - mb = tcp_lro_eject_coalesced_pkt(flow_id); - lro_flow_list[flow_id].lr_seq = ntohl(tcp_hdr->th_seq) + - payload_len; - calculate_tcp_clock(); - u_int8_t timestamp = tcp_now - lro_flow_list[flow_id].lr_timestamp; - lck_mtx_unlock(&tcp_lro_lock); - if (mb) { - mb->m_pkthdr.lro_elapsed = timestamp; - lro_proto_input(mb); - } - if (!coalesced) { - if (lrodebug >= 2) { - printf("%s: pkt payload_len = %d \n", __func__, payload_len); - } - lro_proto_input(lro_mb); - } - } else { - lck_mtx_unlock(&tcp_lro_lock); - } - break; - - case TCP_LRO_EJECT_FLOW: - mb = tcp_lro_eject_coalesced_pkt(flow_id); - calculate_tcp_clock(); - u_int8_t timestamp = tcp_now - lro_flow_list[flow_id].lr_timestamp; - lck_mtx_unlock(&tcp_lro_lock); - if (mb) { - if (lrodebug) { - printf("tcp_lro_process_pkt eject_flow, len = %d\n", mb->m_pkthdr.len); - } - mb->m_pkthdr.lro_elapsed = timestamp; - lro_proto_input(mb); - } - - lro_proto_input(lro_mb); - break; - - case TCP_LRO_COLLISION: - lck_mtx_unlock(&tcp_lro_lock); - ret_response = TCP_LRO_FLOW_NOTFOUND; - break; - - default: - lck_mtx_unlock(&tcp_lro_lock); - panic_plain("%s: unrecognized type %d", __func__, retval); - } - - if (ret_response == TCP_LRO_FLOW_NOTFOUND) { - lro_proto_input(lro_mb); - } - return NULL; -} - -static void -tcp_lro_timer_proc(void *arg1, void *arg2) -{ -#pragma unused(arg1, arg2) - - lck_mtx_lock_spin(&tcp_lro_lock); - lro_timer_set = 0; - lck_mtx_unlock(&tcp_lro_lock); - tcp_lro_flush_flows(); -} - -static void -tcp_lro_flush_flows(void) -{ - int i = 0; - struct mbuf *mb; - struct lro_flow *flow; - int tcpclock_updated = 0; - - lck_mtx_lock(&tcp_lro_lock); - - while (i < TCP_LRO_NUM_FLOWS) { - flow = &lro_flow_list[i]; - if (flow->lr_mhead != NULL) { - if (!tcpclock_updated) { - calculate_tcp_clock(); - tcpclock_updated = 1; - } - - if (lrodebug >= 2) { - printf("tcp_lro_flush_flows: len =%d n_pkts = %d %d %d \n", - flow->lr_len, - flow->lr_mhead->m_pkthdr.lro_npkts, - flow->lr_timestamp, tcp_now); - } - - u_int8_t timestamp = tcp_now - flow->lr_timestamp; - - mb = tcp_lro_eject_flow(i); - - if (mb) { - mb->m_pkthdr.lro_elapsed = timestamp; - lck_mtx_unlock(&tcp_lro_lock); - lro_update_flush_stats(mb); - lro_proto_input(mb); - lck_mtx_lock(&tcp_lro_lock); - } - } - i++; - } - lck_mtx_unlock(&tcp_lro_lock); -} - -/* - * Must be called with tcp_lro_lock held. - * The hint is non-zero for longer waits. The wait time dictated by coalesc_time - * takes precedence, so lro_timer_set is not set for the hint case - */ -static void -tcp_lro_sched_timer(uint64_t hint) -{ - if (lro_timer_set) { - return; - } - - lro_timer_set = 1; - if (!hint) { - /* the intent is to wake up every coalesc_time msecs */ - clock_interval_to_deadline(coalesc_time, - (NSEC_PER_SEC / TCP_RETRANSHZ), &lro_deadline); - } else { - clock_interval_to_deadline(hint, NSEC_PER_SEC / TCP_RETRANSHZ, - &lro_deadline); - } - thread_call_enter_delayed(tcp_lro_timer, lro_deadline); -} - -struct mbuf* -tcp_lro(struct mbuf *m, unsigned int hlen) -{ - struct ip *ip_hdr; - unsigned int tlen; - struct tcphdr * tcp_hdr = NULL; - unsigned int off = 0; - - if (kipf_count != 0) { - return m; - } - - /* - * Experiments on cellular show that the RTT is much higher - * than the coalescing time of 5 msecs, causing lro to flush - * 80% of the time on a single packet. Increasing - * coalescing time for cellular does not show marked - * improvement to throughput either. Loopback perf is hurt - * by the 5 msec latency and it already sends large packets. - */ - if (IFNET_IS_CELLULAR(m->m_pkthdr.rcvif) || - (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) { - return m; - } - - ip_hdr = mtod(m, struct ip*); - - /* don't deal with IP options */ - if (hlen != sizeof(struct ip)) { - return m; - } - - /* only TCP is coalesced */ - if (ip_hdr->ip_p != IPPROTO_TCP) { - return m; - } - - if (m->m_len < (int32_t) sizeof(struct tcpiphdr)) { - if (lrodebug) { - printf("tcp_lro m_pullup \n"); - } - if ((m = m_pullup(m, sizeof(struct tcpiphdr))) == NULL) { - tcpstat.tcps_rcvshort++; - if (lrodebug) { - printf("ip_lro: rcvshort.\n"); - } - return NULL; - } - ip_hdr = mtod(m, struct ip*); - } - - tcp_hdr = (struct tcphdr *)((caddr_t)ip_hdr + hlen); - tlen = ip_hdr->ip_len; //ignore IP header bytes len - m->m_pkthdr.lro_pktlen = tlen; /* Used to return max pkt encountered to tcp */ - m->m_pkthdr.lro_npkts = 1; /* Initialize a counter to hold num pkts coalesced */ - m->m_pkthdr.lro_elapsed = 0; /* Initialize the field to carry elapsed time */ - off = tcp_hdr->th_off << 2; - if (off < sizeof(struct tcphdr) || off > tlen) { - tcpstat.tcps_rcvbadoff++; - if (lrodebug) { - printf("ip_lro: TCP off greater than TCP header.\n"); - } - return m; - } - - return tcp_lro_process_pkt(m, hlen + off); -} - -static void -lro_proto_input(struct mbuf *m) -{ - struct ip* ip_hdr = mtod(m, struct ip*); - - if (lrodebug >= 3) { - printf("lro_proto_input: ip_len = %d \n", - ip_hdr->ip_len); - } - lro_update_stats(m); - ip_proto_dispatch_in_wrapper(m, ip_hdr->ip_hl << 2, ip_hdr->ip_p); -} - -static struct mbuf * -lro_tcp_xsum_validate(struct mbuf *m, struct ip *ip, struct tcphdr * th) -{ - /* Expect 32-bit aligned data pointer on strict-align platforms */ - MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); - - /* we shouldn't get here for IP with options; hence sizeof (ip) */ - if (tcp_input_checksum(AF_INET, m, th, sizeof(*ip), ip->ip_len)) { - if (lrodebug) { - printf("%s: bad xsum and drop m = 0x%llx.\n", __func__, - (uint64_t)VM_KERNEL_ADDRPERM(m)); - } - m_freem(m); - return NULL; - } - - return m; -} - -/* - * When TCP detects a stable, steady flow without out of ordering, - * with a sufficiently high cwnd, it invokes LRO. - */ -int -tcp_start_coalescing(struct ip *ip_hdr, struct tcphdr *tcp_hdr, int tlen) -{ - int hash; - int flow_id; - struct mbuf *eject_mb; - struct lro_flow *lf; - - hash = LRO_HASH(ip_hdr->ip_src.s_addr, ip_hdr->ip_dst.s_addr, - tcp_hdr->th_sport, tcp_hdr->th_dport, - (TCP_LRO_FLOW_MAP - 1)); - - - lck_mtx_lock_spin(&tcp_lro_lock); - flow_id = lro_flow_map[hash]; - if (flow_id != TCP_LRO_FLOW_NOTFOUND) { - lf = &lro_flow_list[flow_id]; - if ((lf->lr_faddr.s_addr == ip_hdr->ip_src.s_addr) && - (lf->lr_laddr.s_addr == ip_hdr->ip_dst.s_addr) && - (lf->lr_fport == tcp_hdr->th_sport) && - (lf->lr_lport == tcp_hdr->th_dport)) { - if ((lf->lr_tcphdr == NULL) && - (lf->lr_seq != (tcp_hdr->th_seq + tlen))) { - lf->lr_seq = tcp_hdr->th_seq + tlen; - } - lf->lr_flags &= ~LRO_EJECT_REQ; - } - lck_mtx_unlock(&tcp_lro_lock); - return 0; - } - - HTONL(tcp_hdr->th_seq); - HTONL(tcp_hdr->th_ack); - eject_mb = - tcp_lro_insert_flow(NULL, ip_hdr, tcp_hdr, tlen, 0, hash, - NULL, NULL, NULL); - - lck_mtx_unlock(&tcp_lro_lock); - - NTOHL(tcp_hdr->th_seq); - NTOHL(tcp_hdr->th_ack); - if (lrodebug >= 3) { - printf("%s: src = %x dst = %x sport = %d dport = %d seq %x \n", - __func__, ip_hdr->ip_src.s_addr, ip_hdr->ip_dst.s_addr, - tcp_hdr->th_sport, tcp_hdr->th_dport, tcp_hdr->th_seq); - } - ASSERT(eject_mb == NULL); - return 0; -} - -/* - * When TCP detects loss or idle condition, it stops offloading - * to LRO. - */ -int -tcp_lro_remove_state(struct in_addr saddr, struct in_addr daddr, - unsigned short sport, unsigned short dport) -{ - int hash, flow_id; - struct lro_flow *lf; - - hash = LRO_HASH(daddr.s_addr, saddr.s_addr, dport, sport, - (TCP_LRO_FLOW_MAP - 1)); - lck_mtx_lock_spin(&tcp_lro_lock); - flow_id = lro_flow_map[hash]; - if (flow_id == TCP_LRO_FLOW_UNINIT) { - lck_mtx_unlock(&tcp_lro_lock); - return 0; - } - lf = &lro_flow_list[flow_id]; - if ((lf->lr_faddr.s_addr == daddr.s_addr) && - (lf->lr_laddr.s_addr == saddr.s_addr) && - (lf->lr_fport == dport) && - (lf->lr_lport == sport)) { - if (lrodebug) { - printf("%s: %x %x\n", __func__, - lf->lr_flags, lf->lr_seq); - } - lf->lr_flags |= LRO_EJECT_REQ; - } - lck_mtx_unlock(&tcp_lro_lock); - return 0; -} - -void -tcp_update_lro_seq(__uint32_t rcv_nxt, struct in_addr saddr, struct in_addr daddr, - unsigned short sport, unsigned short dport) -{ - int hash, flow_id; - struct lro_flow *lf; - - hash = LRO_HASH(daddr.s_addr, saddr.s_addr, dport, sport, - (TCP_LRO_FLOW_MAP - 1)); - lck_mtx_lock_spin(&tcp_lro_lock); - flow_id = lro_flow_map[hash]; - if (flow_id == TCP_LRO_FLOW_UNINIT) { - lck_mtx_unlock(&tcp_lro_lock); - return; - } - lf = &lro_flow_list[flow_id]; - if ((lf->lr_faddr.s_addr == daddr.s_addr) && - (lf->lr_laddr.s_addr == saddr.s_addr) && - (lf->lr_fport == dport) && - (lf->lr_lport == sport) && - (lf->lr_tcphdr == NULL)) { - lf->lr_seq = (tcp_seq)rcv_nxt; - } - lck_mtx_unlock(&tcp_lro_lock); - return; -} - -static void -lro_update_stats(struct mbuf *m) -{ - switch (m->m_pkthdr.lro_npkts) { - case 0: /* fall through */ - case 1: - break; - - case 2: - tcpstat.tcps_lro_twopack++; - break; - - case 3: /* fall through */ - case 4: - tcpstat.tcps_lro_multpack++; - break; - - default: - tcpstat.tcps_lro_largepack++; - break; - } - return; -} - -static void -lro_update_flush_stats(struct mbuf *m) -{ - lro_flushes++; - switch (m->m_pkthdr.lro_npkts) { - case 0: ASSERT(0); - case 1: lro_single_flushes++; - break; - case 2: lro_double_flushes++; - break; - default: lro_good_flushes++; - break; - } - return; -} diff --git a/bsd/netinet/tcp_lro.h b/bsd/netinet/tcp_lro.h deleted file mode 100644 index 50ef3d841..000000000 --- a/bsd/netinet/tcp_lro.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2011 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -#ifndef TCP_LRO_H_ -#define TCP_LRO_H_ - -#ifdef BSD_KERNEL_PRIVATE - -#define TCP_LRO_NUM_FLOWS (16) /* must be <= 255 for char lro_flow_map */ -#define TCP_LRO_FLOW_MAP (1024) - -struct lro_flow { - struct mbuf *lr_mhead; /* coalesced mbuf chain head */ - struct mbuf *lr_mtail; /* coalesced mbuf chain tail */ - struct tcphdr *lr_tcphdr; /* ptr to TCP hdr in frame */ - u_int32_t *lr_tsval; /* address of tsval in frame */ - u_int32_t *lr_tsecr; /* tsecr field in TCP header */ - tcp_seq lr_seq; /* next expected seq num */ - unsigned int lr_len; /* length of LRO frame */ - struct in_addr lr_faddr; /* foreign address */ - struct in_addr lr_laddr; /* local address */ - unsigned short int lr_fport; /* foreign port */ - unsigned short int lr_lport; /* local port */ - u_int32_t lr_timestamp; /* for ejecting the flow */ - unsigned short int lr_hash_map; /* back pointer to hash map */ - unsigned short int lr_flags; /* pad */ -} __attribute__((aligned(8))); - -/* lr_flags - only 16 bits available */ -#define LRO_EJECT_REQ 0x1 - - -#define TCP_LRO_FLOW_UNINIT TCP_LRO_NUM_FLOWS+1 -#define TCP_LRO_FLOW_NOTFOUND TCP_LRO_FLOW_UNINIT - -/* Max packets to be coalesced before pushing to app */ -#define LRO_MX_COALESCE_PKTS (8) - -/* - * Min num of bytes in a packet to trigger coalescing - */ -#define LRO_MIN_COALESC_SZ (1300) - -/* - * Max amount of time to wait before flushing flows in msecs. - * Units are in msecs. - * This number has been carefully chosen and should be altered with care. - */ -#define LRO_MX_TIME_TO_BUFFER 10 - -/* similar to INP_PCBHASH */ -#define LRO_HASH(faddr, laddr, fport, lport, mask) \ - (((faddr) ^ ((laddr) >> 16) ^ ntohs((lport) ^ (fport))) & (mask)) -#endif - -#endif /* TCP_LRO_H_ */ diff --git a/bsd/netinet/tcp_newreno.c b/bsd/netinet/tcp_newreno.c index 7e9e778ac..2da158e2d 100644 --- a/bsd/netinet/tcp_newreno.c +++ b/bsd/netinet/tcp_newreno.c @@ -71,9 +71,7 @@ #include #include -#if INET6 #include -#endif #include #include #include @@ -180,47 +178,32 @@ tcp_newreno_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) * greater than or equal to the congestion window. */ - u_int cw = tp->snd_cwnd; - u_int incr = tp->t_maxseg; - int acked = 0; + uint32_t cw = tp->snd_cwnd; + uint32_t incr = tp->t_maxseg; + uint32_t acked = 0; acked = BYTES_ACKED(th, tp); - if (tcp_do_rfc3465) { - if (cw >= tp->snd_ssthresh) { - tp->t_bytes_acked += acked; - if (tp->t_bytes_acked >= cw) { - /* Time to increase the window. */ - tp->t_bytes_acked -= cw; - } else { - /* No need to increase yet. */ - incr = 0; - } + if (cw >= tp->snd_ssthresh) { + tp->t_bytes_acked += acked; + if (tp->t_bytes_acked >= cw) { + /* Time to increase the window. */ + tp->t_bytes_acked -= cw; } else { - /* - * If the user explicitly enables RFC3465 - * use 2*SMSS for the "L" param. Otherwise - * use the more conservative 1*SMSS. - * - * (See RFC 3465 2.3 Choosing the Limit) - */ - uint32_t abc_lim; - abc_lim = (tcp_do_rfc3465_lim2 && - tp->snd_nxt == tp->snd_max) ? incr * 2 - : incr; - - incr = lmin(acked, abc_lim); + /* No need to increase yet. */ + incr = 0; } } else { /* - * If the window gives us less than ssthresh packets - * in flight, open exponentially (segsz per packet). - * Otherwise open linearly: segsz per window - * (segsz^2 / cwnd per packet). + * If the user explicitly enables RFC3465 + * use 2*SMSS for the "L" param. Otherwise + * use the more conservative 1*SMSS. + * + * (See RFC 3465 2.3 Choosing the Limit) */ + uint32_t abc_lim; + abc_lim = (tp->snd_nxt == tp->snd_max) ? incr * 2 : incr; - if (cw >= tp->snd_ssthresh) { - incr = max((incr * incr / cw), 1); - } + incr = ulmin(acked, abc_lim); } tp->snd_cwnd = min(cw + incr, TCP_MAXWIN << tp->snd_scale); } @@ -244,7 +227,11 @@ tcp_newreno_post_fr(struct tcpcb *tp, struct tcphdr *th) { int32_t ss; - ss = tp->snd_max - th->th_ack; + if (th) { + ss = tp->snd_max - th->th_ack; + } else { + ss = tp->snd_max - tp->snd_una; + } /* * Complete ack. Inflate the congestion window to @@ -351,7 +338,7 @@ tcp_newreno_switch_cc(struct tcpcb *tp, uint16_t old_index) } else { cwnd = cwnd / 2 / tp->t_maxseg; } - tp->snd_cwnd = max(TCP_CC_CWND_INIT_BYTES, cwnd * tp->t_maxseg); + tp->snd_cwnd = max(tcp_initial_cwnd(tp), cwnd * tp->t_maxseg); /* Start counting bytes for RFC 3465 again */ tp->t_bytes_acked = 0; diff --git a/bsd/netinet/tcp_output.c b/bsd/netinet/tcp_output.c index 7c1988f1f..84fe091cc 100644 --- a/bsd/netinet/tcp_output.c +++ b/bsd/netinet/tcp_output.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -67,7 +67,7 @@ * Version 2.0. */ -#define _IP_VHL +#define _IP_VHL #include @@ -95,13 +95,11 @@ #include #include #include -#if INET6 #include #include #include -#endif #include -#define TCPOUTFLAGS +#define TCPOUTFLAGS #include #include #include @@ -120,43 +118,43 @@ #include #endif /*IPSEC*/ -#if CONFIG_MACF_NET -#include -#endif /* MAC_SOCKET */ - -#include #if MPTCP #include #include #include +#include #endif #include -#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 1) -#define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 3) -#define DBG_FNC_TCP_OUTPUT NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1) +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 3) +#define DBG_FNC_TCP_OUTPUT NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1) SYSCTL_SKMEM_TCP_INT(OID_AUTO, path_mtu_discovery, - CTLFLAG_RW | CTLFLAG_LOCKED, int, path_mtu_discovery, 1, - "Enable Path MTU Discovery"); - -SYSCTL_SKMEM_TCP_INT(OID_AUTO, slowstart_flightsize, - CTLFLAG_RW | CTLFLAG_LOCKED, int, ss_fltsz, 1, - "Slow start flight size"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, path_mtu_discovery, 1, + "Enable Path MTU Discovery"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, local_slowstart_flightsize, - CTLFLAG_RW | CTLFLAG_LOCKED, int, ss_fltsz_local, 8, - "Slow start flight size for local networks"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, ss_fltsz_local, 8, + "Slow start flight size for local networks"); -int tcp_do_tso = 1; +int tcp_do_tso = 1; SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_do_tso, 0, "Enable TCP Segmentation Offload"); + &tcp_do_tso, 0, "Enable TCP Segmentation Offload"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, ecn_setup_percentage, CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_ecn_setup_percentage, 100, "Max ECN setup percentage"); +SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_ack_compression, + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_do_ack_compression, 1, + "Enable TCP ACK compression (on (cell only): 1, off: 0, on (all interfaces): 2)"); + +SYSCTL_SKMEM_TCP_INT(OID_AUTO, ack_compression_rate, + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_ack_compression_rate, TCP_COMP_CHANGE_RATE, + "Rate at which we force sending new ACKs (in ms)"); + static int sysctl_change_ecn_setting SYSCTL_HANDLER_ARGS { @@ -166,8 +164,9 @@ sysctl_change_ecn_setting SYSCTL_HANDLER_ARGS err = sysctl_io_number(req, tcp_ecn_outbound, sizeof(int32_t), &i, &changed); - if (err != 0 || req->newptr == USER_ADDR_NULL) + if (err != 0 || req->newptr == USER_ADDR_NULL) { return err; + } if (changed) { if ((tcp_ecn_outbound == 0 || tcp_ecn_outbound == 1) && @@ -185,10 +184,9 @@ sysctl_change_ecn_setting SYSCTL_HANDLER_ARGS ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { if (!IFNET_IS_CELLULAR(ifp)) { - ifnet_lock_exclusive(ifp); - ifp->if_eflags &= ~IFEF_ECN_DISABLE; - ifp->if_eflags &= ~IFEF_ECN_ENABLE; - ifnet_lock_done(ifp); + if_clear_eflags(ifp, + IFEF_ECN_ENABLE | + IFEF_ECN_DISABLE); } } ifnet_head_done(); @@ -200,10 +198,8 @@ sysctl_change_ecn_setting SYSCTL_HANDLER_ARGS ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { if (!IFNET_IS_CELLULAR(ifp)) { - ifnet_lock_exclusive(ifp); - ifp->if_eflags |= IFEF_ECN_ENABLE; - ifp->if_eflags &= ~IFEF_ECN_DISABLE; - ifnet_lock_done(ifp); + if_set_eflags(ifp, IFEF_ECN_ENABLE); + if_clear_eflags(ifp, IFEF_ECN_DISABLE); } } ifnet_head_done(); @@ -232,52 +228,40 @@ SYSCTL_PROC(_net_inet_tcp, OID_AUTO, ecn_negotiate_in, "Initiate ECN for inbound connections"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, packetchain, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_packet_chaining, 50, - "Enable TCP output packet chaining"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_packet_chaining, 50, + "Enable TCP output packet chaining"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, socket_unlocked_on_output, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_output_unlocked, 1, - "Unlock TCP when sending packets down to IP"); - -SYSCTL_SKMEM_TCP_INT(OID_AUTO, rfc3390, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_do_rfc3390, 1, - "Calculate intial slowstart cwnd depending on MSS"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_output_unlocked, 1, + "Unlock TCP when sending packets down to IP"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, min_iaj_win, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_min_iaj_win, MIN_IAJ_WIN, - "Minimum recv win based on inter-packet arrival jitter"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_min_iaj_win, MIN_IAJ_WIN, + "Minimum recv win based on inter-packet arrival jitter"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, acc_iaj_react_limit, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_acc_iaj_react_limit, - ACC_IAJ_REACT_LIMIT, "Accumulated IAJ when receiver starts to react"); - -SYSCTL_SKMEM_TCP_INT(OID_AUTO, doautosndbuf, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_do_autosendbuf, 1, - "Enable send socket buffer auto-tuning"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_acc_iaj_react_limit, + ACC_IAJ_REACT_LIMIT, "Accumulated IAJ when receiver starts to react"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, autosndbufinc, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_autosndbuf_inc, - 8 * 1024, "Increment in send socket bufffer size"); + CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_autosndbuf_inc, + 8 * 1024, "Increment in send socket bufffer size"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, autosndbufmax, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_autosndbuf_max, 512 * 1024, - "Maximum send socket buffer size"); - -SYSCTL_SKMEM_TCP_INT(OID_AUTO, ack_prioritize, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_prioritize_acks, 1, - "Prioritize pure acks"); + CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_autosndbuf_max, 2 * 1024 * 1024, + "Maximum send socket buffer size"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_recvbg, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_use_rtt_recvbg, 1, - "Use RTT for bg recv algorithm"); + CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_use_rtt_recvbg, 1, + "Use RTT for bg recv algorithm"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, recv_throttle_minwin, - CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_recv_throttle_minwin, 16 * 1024, - "Minimum recv win for throttling"); + CTLFLAG_RW | CTLFLAG_LOCKED, uint32_t, tcp_recv_throttle_minwin, 16 * 1024, + "Minimum recv win for throttling"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, enable_tlp, - CTLFLAG_RW | CTLFLAG_LOCKED, - int32_t, tcp_enable_tlp, 1, "Enable Tail loss probe"); + CTLFLAG_RW | CTLFLAG_LOCKED, + int32_t, tcp_enable_tlp, 1, "Enable Tail loss probe"); static int32_t packchain_newlist = 0; static int32_t packchain_looped = 0; @@ -288,28 +272,25 @@ static int32_t packchain_sent = 0; extern int ipsec_bypass; #endif -extern int slowlink_wsize; /* window correction for slow links */ -#if IPFIREWALL -extern int fw_enable; /* firewall check for packet chaining */ -extern int fw_bypass; /* firewall check: disable packet chaining if there is rules */ -#endif /* IPFIREWALL */ +extern int slowlink_wsize; /* window correction for slow links */ extern u_int32_t dlil_filter_disable_tso_count; extern u_int32_t kipf_count; static int tcp_ip_output(struct socket *, struct tcpcb *, struct mbuf *, int, struct mbuf *, int, int, boolean_t); -static struct mbuf* tcp_send_lroacks(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th); static int tcp_recv_throttle(struct tcpcb *tp); -static int32_t tcp_tfo_check(struct tcpcb *tp, int32_t len) +static int32_t +tcp_tfo_check(struct tcpcb *tp, int32_t len) { struct socket *so = tp->t_inpcb->inp_socket; unsigned int optlen = 0; unsigned int cookie_len; - if (tp->t_flags & TF_NOOPT) + if (tp->t_flags & TF_NOOPT) { goto fallback; + } if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) && !tcp_heuristic_do_tfo(tp)) { @@ -318,45 +299,54 @@ static int32_t tcp_tfo_check(struct tcpcb *tp, int32_t len) goto fallback; } - if (so->so_flags1 & SOF1_DATA_AUTHENTICATED) + if (so->so_flags1 & SOF1_DATA_AUTHENTICATED) { return len; + } optlen += TCPOLEN_MAXSEG; - if (tp->t_flags & TF_REQ_SCALE) + if (tp->t_flags & TF_REQ_SCALE) { optlen += 4; + } #if MPTCP if ((so->so_flags & SOF_MP_SUBFLOW) && mptcp_enable && (tp->t_rxtshift <= mptcp_mpcap_retries || - (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FORCE_ENABLE))) + (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FORCE_ENABLE))) { optlen += sizeof(struct mptcp_mpcapable_opt_common) + sizeof(mptcp_key_t); + } #endif /* MPTCP */ - if (tp->t_flags & TF_REQ_TSTMP) + if (tp->t_flags & TF_REQ_TSTMP) { optlen += TCPOLEN_TSTAMP_APPA; + } - if (SACK_ENABLED(tp)) + if (SACK_ENABLED(tp)) { optlen += TCPOLEN_SACK_PERMITTED; + } /* Now, decide whether to use TFO or not */ /* Don't even bother trying if there is no space at all... */ - if (MAX_TCPOPTLEN - optlen < TCPOLEN_FASTOPEN_REQ) + if (MAX_TCPOPTLEN - optlen < TCPOLEN_FASTOPEN_REQ) { goto fallback; + } cookie_len = tcp_cache_get_cookie_len(tp); - if (cookie_len == 0) + if (cookie_len == 0) { /* No cookie, so we request one */ return 0; + } /* There is not enough space for the cookie, so we cannot do TFO */ - if (MAX_TCPOPTLEN - optlen < cookie_len) + if (MAX_TCPOPTLEN - optlen < cookie_len) { goto fallback; + } /* Do not send SYN+data if there is more in the queue than MSS */ - if (so->so_snd.sb_cc > (tp->t_maxopd - MAX_TCPOPTLEN)) + if (so->so_snd.sb_cc > (tp->t_maxopd - MAX_TCPOPTLEN)) { goto fallback; + } /* Ok, everything looks good. We can go on and do TFO */ return len; @@ -367,16 +357,17 @@ fallback: } /* Returns the number of bytes written to the TCP option-space */ -static unsigned -tcp_tfo_write_cookie_rep(struct tcpcb *tp, unsigned optlen, u_char *opt) +static unsigned int +tcp_tfo_write_cookie_rep(struct tcpcb *tp, unsigned int optlen, u_char *opt) { u_char out[CCAES_BLOCK_SIZE]; unsigned ret = 0; u_char *bp; if ((MAX_TCPOPTLEN - optlen) < - (TCPOLEN_FASTOPEN_REQ + TFO_COOKIE_LEN_DEFAULT)) + (TCPOLEN_FASTOPEN_REQ + TFO_COOKIE_LEN_DEFAULT)) { return ret; + } tcp_tfo_gen_cookie(tp->t_inpcb, out, sizeof(out)); @@ -393,16 +384,21 @@ tcp_tfo_write_cookie_rep(struct tcpcb *tp, unsigned optlen, u_char *opt) return ret; } -static unsigned -tcp_tfo_write_cookie(struct tcpcb *tp, unsigned optlen, int32_t len, - u_char *opt) +static unsigned int +tcp_tfo_write_cookie(struct tcpcb *tp, unsigned int optlen, int32_t len, + u_char *opt) { - u_int8_t tfo_len = MAX_TCPOPTLEN - optlen - TCPOLEN_FASTOPEN_REQ; + uint8_t tfo_len; struct socket *so = tp->t_inpcb->inp_socket; unsigned ret = 0; int res; u_char *bp; + if (TCPOLEN_FASTOPEN_REQ > MAX_TCPOPTLEN - optlen) { + return 0; + } + tfo_len = (uint8_t)(MAX_TCPOPTLEN - optlen - TCPOLEN_FASTOPEN_REQ); + if (so->so_flags1 & SOF1_DATA_AUTHENTICATED) { /* If there is some data, let's track it */ if (len > 0) { @@ -449,11 +445,9 @@ tcp_tfo_write_cookie(struct tcpcb *tp, unsigned optlen, int32_t len, } static inline bool -tcp_send_ecn_flags_on_syn(struct tcpcb *tp, struct socket *so) +tcp_send_ecn_flags_on_syn(struct tcpcb *tp) { - return !((tp->ecn_flags & TE_SETUPSENT || - (so->so_flags & SOF_MP_SUBFLOW) || - (tfo_enabled(tp)))); + return !(tp->ecn_flags & TE_SETUPSENT); } void @@ -502,8 +496,9 @@ tcp_set_ecn(struct tcpcb *tp, struct ifnet *ifp) return; check_heuristic: - if (!tcp_heuristic_do_ecn(tp)) + if (!tcp_heuristic_do_ecn(tp)) { tp->ecn_flags &= ~TE_ENABLE_ECN; + } /* * If the interface setting, system-level setting and heuristics @@ -516,9 +511,35 @@ check_heuristic: * Use the random value in iss for randomizing * this selection */ - if ((tp->iss % 100) >= tcp_ecn_setup_percentage) + if ((tp->iss % 100) >= tcp_ecn_setup_percentage) { tp->ecn_flags &= ~TE_ENABLE_ECN; + } + } +} + +int +tcp_flight_size(struct tcpcb *tp) +{ + int ret; + + VERIFY(tp->sackhint.sack_bytes_acked >= 0); + VERIFY(tp->sackhint.sack_bytes_rexmit >= 0); + + /* + * RFC6675, SetPipe (), SACK'd bytes are discounted. All the rest is still in-flight. + */ + ret = tp->snd_nxt - tp->snd_una - tp->sackhint.sack_bytes_acked; + + if (ret < 0) { + /* + * This happens when the RTO-timer fires because snd_nxt gets artificially + * decreased. If we then receive some SACK-blogs, sack_bytes_acked is + * going to be high. + */ + ret = 0; } + + return ret; } /* @@ -553,16 +574,14 @@ tcp_output(struct tcpcb *tp) struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; int32_t len, recwin, sendwin, off; - int flags, error; + uint8_t flags; + int error; struct mbuf *m; struct ip *ip = NULL; - struct ipovly *ipov = NULL; -#if INET6 struct ip6_hdr *ip6 = NULL; -#endif /* INET6 */ struct tcphdr *th; u_char opt[TCP_MAXOLEN]; - unsigned ipoptlen, optlen, hdrlen; + unsigned int ipoptlen, optlen, hdrlen; int idle, sendalot, lost = 0; int i, sack_rxmit; int tso = 0; @@ -570,23 +589,17 @@ tcp_output(struct tcpcb *tp) tcp_seq old_snd_nxt = 0; struct sackhole *p; #if IPSEC - unsigned ipsec_optlen = 0; + unsigned int ipsec_optlen = 0; #endif /* IPSEC */ int idle_time = 0; struct mbuf *packetlist = NULL; struct mbuf *tp_inp_options = inp->inp_depend4.inp4_options; -#if INET6 - int isipv6 = inp->inp_vflag & INP_IPV6 ; -#else - int isipv6 = 0; -#endif - short packchain_listadd = 0; + int isipv6 = inp->inp_vflag & INP_IPV6; + int packchain_listadd = 0; int so_options = so->so_options; struct rtentry *rt; u_int32_t svc_flags = 0, allocated_len; - u_int32_t lro_ackmore = (tp->t_lropktlen != 0) ? 1 : 0; - struct mbuf *mnext = NULL; - int sackoptlen = 0; + unsigned int sackoptlen = 0; #if MPTCP boolean_t mptcp_acknow; #endif /* MPTCP */ @@ -595,6 +608,8 @@ tcp_output(struct tcpcb *tp) boolean_t wired = FALSE; boolean_t sack_rescue_rxt = FALSE; int sotc = so->so_traffic_class; + boolean_t do_not_compress = FALSE; + boolean_t sack_rxmted = FALSE; /* * Determine length of data that should be transmitted, @@ -620,11 +635,13 @@ tcp_output(struct tcpcb *tp) * Do some other tasks that need to be done after * idle time */ - if (!SLIST_EMPTY(&tp->t_rxt_segments)) + if (!SLIST_EMPTY(&tp->t_rxt_segments)) { tcp_rxtseg_clean(tp); + } /* If stretch ack was auto-disabled, re-evaluate it */ tcp_cc_after_idle_stretchack(tp); + tp->t_forced_acks = TCP_FORCED_ACKS_COUNT; } tp->t_flags &= ~TF_LASTIDLE; if (idle) { @@ -649,25 +666,22 @@ again: #if MPTCP mptcp_acknow = FALSE; #endif + do_not_compress = FALSE; - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0); -#if INET6 if (isipv6) { KERNEL_DEBUG(DBG_LAYER_BEG, - ((inp->inp_fport << 16) | inp->inp_lport), - (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | - (inp->in6p_faddr.s6_addr16[0] & 0xffff)), - sendalot,0,0); - } else -#endif - - { + ((inp->inp_fport << 16) | inp->inp_lport), + (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | + (inp->in6p_faddr.s6_addr16[0] & 0xffff)), + sendalot, 0, 0); + } else { KERNEL_DEBUG(DBG_LAYER_BEG, - ((inp->inp_fport << 16) | inp->inp_lport), - (((inp->inp_laddr.s_addr & 0xffff) << 16) | - (inp->inp_faddr.s_addr & 0xffff)), - sendalot,0,0); + ((inp->inp_fport << 16) | inp->inp_lport), + (((inp->inp_laddr.s_addr & 0xffff) << 16) | + (inp->inp_faddr.s_addr & 0xffff)), + sendalot, 0, 0); } /* * If the route generation id changed, we need to check that our @@ -692,12 +706,14 @@ again: if (isipv6) { ia6 = ifa_foraddr6(&inp->in6p_laddr); - if (ia6 != NULL) + if (ia6 != NULL) { found_srcaddr = 1; + } } else { ia = ifa_foraddr(inp->inp_laddr.s_addr); - if (ia != NULL) + if (ia != NULL) { found_srcaddr = 1; + } } /* check that the source address is still valid */ @@ -725,8 +741,9 @@ again: } } - if (tp->t_pktlist_head != NULL) + if (tp->t_pktlist_head != NULL) { m_freem_list(tp->t_pktlist_head); + } TCP_PKTLIST_CLEAR(tp); /* drop connection if source address isn't available */ @@ -738,11 +755,13 @@ again: return 0; /* silently ignore, keep data in socket: address may be back */ } } - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } - if (ia6 != NULL) + if (ia6 != NULL) { IFA_REMREF(&ia6->ia_ifa); + } /* * Address is still valid; check for multipages capability @@ -755,8 +774,9 @@ again: soif2kcl(so, (ifp->if_eflags & IFEF_2KCL)); tcp_set_ecn(tp, ifp); } - if (rt->rt_flags & RTF_UP) + if (rt->rt_flags & RTF_UP) { RT_GENID_SYNC(rt); + } /* * See if we should do MTU discovery. Don't do it if: * 1) it is disabled via the sysctl @@ -767,10 +787,11 @@ again: if (!path_mtu_discovery || ((rt != NULL) && (!(rt->rt_flags & RTF_UP) || - (rt->rt_rmx.rmx_locks & RTV_MTU)))) + (rt->rt_rmx.rmx_locks & RTV_MTU)))) { tp->t_flags &= ~TF_PMTUD; - else + } else { tp->t_flags |= TF_PMTUD; + } RT_UNLOCK(rt); } @@ -786,14 +807,16 @@ again: * snd_nxt. There may be SACK information that allows us to avoid * resending already delivered data. Adjust snd_nxt accordingly. */ - if (SACK_ENABLED(tp) && SEQ_LT(tp->snd_nxt, tp->snd_max)) + if (SACK_ENABLED(tp) && SEQ_LT(tp->snd_nxt, tp->snd_max)) { tcp_sack_adjust(tp); + } sendalot = 0; off = tp->snd_nxt - tp->snd_una; sendwin = min(tp->snd_wnd, tp->snd_cwnd); - if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) + if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) { sendwin = min(sendwin, slowlink_wsize); + } flags = tcp_outflags[tp->t_state]; /* @@ -815,9 +838,20 @@ again: (p = tcp_sack_output(tp, &sack_bytes_rxmt))) { int32_t cwin; - cwin = min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt; - if (cwin < 0) + if (tcp_do_better_lr) { + cwin = min(tp->snd_wnd, tp->snd_cwnd) - tcp_flight_size(tp); + if (cwin <= 0 && sack_rxmted == FALSE) { + /* Allow to clock out at least on per period */ + cwin = tp->t_maxseg; + } + + sack_rxmted = TRUE; + } else { + cwin = min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt; + } + if (cwin < 0) { cwin = 0; + } /* Do not retransmit SACK segments beyond snd_recover */ if (SEQ_GT(p->end, tp->snd_recover)) { /* @@ -834,10 +868,11 @@ again: */ p = NULL; goto after_sack_rexmit; - } else + } else { /* Can rexmit part of the current hole */ len = ((int32_t)min(cwin, - tp->snd_recover - p->rxmit)); + tp->snd_recover - p->rxmit)); + } } else { len = ((int32_t)min(cwin, p->end - p->rxmit)); } @@ -845,6 +880,9 @@ again: off = p->rxmit - tp->snd_una; sack_rxmit = 1; sendalot = 1; + /* Everything sent after snd_nxt will allow us to account for fast-retransmit of the retransmitted segment */ + tp->send_highest_sack = tp->snd_nxt; + tp->t_new_dupacks = 0; tcpstat.tcps_sack_rexmits++; tcpstat.tcps_sack_rexmit_bytes += min(len, tp->t_maxseg); @@ -857,10 +895,9 @@ after_sack_rexmit: * Get standard flags, and add SYN or FIN if requested by 'hidden' * state flags. */ - if (tp->t_flags & TF_NEEDFIN) + if (tp->t_flags & TF_NEEDFIN) { flags |= TH_FIN; - if (tp->t_flags & TF_NEEDSYN) - flags |= TH_SYN; + } /* * If in persist timeout with window of 0, send 1 byte. @@ -886,8 +923,9 @@ after_sack_rexmit: * to send then the probe will be the FIN * itself. */ - if (off < so->so_snd.sb_cc) + if (off < so->so_snd.sb_cc) { flags &= ~TH_FIN; + } sendwin = 1; } else { tp->t_timer[TCPT_PERSIST] = 0; @@ -917,18 +955,22 @@ after_sack_rexmit: } else { int32_t cwin; - cwin = tp->snd_cwnd - - (tp->snd_nxt - tp->sack_newdata) - - sack_bytes_rxmt; - if (cwin < 0) + if (tcp_do_better_lr) { + cwin = tp->snd_cwnd - tcp_flight_size(tp); + } else { + cwin = tp->snd_cwnd - + (tp->snd_nxt - tp->sack_newdata) - + sack_bytes_rxmt; + } + if (cwin < 0) { cwin = 0; - /* + } + /* * We are inside of a SACK recovery episode and are * sending new data, having retransmitted all the * data possible in the scoreboard. */ - len = min(so->so_snd.sb_cc, tp->snd_wnd) - - off; + len = min(so->so_snd.sb_cc, tp->snd_wnd) - off; /* * Don't remove this (len > 0) check ! * We explicitly check for len > 0 here (although it @@ -962,8 +1004,9 @@ after_sack_rexmit: * must have been advanced to cover it. */ if ((tp->t_flags & TF_SENTFIN) && - tp->snd_max == tp->snd_recover) + tp->snd_max == tp->snd_recover) { tp->snd_nxt--; + } off = tp->snd_nxt - tp->snd_una; sendalot = 0; @@ -991,7 +1034,7 @@ after_sack_rexmit: len++; if (len > 0 && tp->t_state == TCPS_SYN_SENT) { while (inp->inp_sndinprog_cnt == 0 && - tp->t_pktlist_head != NULL) { + tp->t_pktlist_head != NULL) { packetlist = tp->t_pktlist_head; packchain_listadd = tp->t_lastchain; packchain_sent++; @@ -1009,14 +1052,14 @@ after_sack_rexmit: * resume close */ if (inp->inp_sndinprog_cnt == 0 && - (tp->t_flags & TF_CLOSING)) { + (tp->t_flags & TF_CLOSING)) { tp->t_flags &= ~TF_CLOSING; (void) tcp_close(tp); } else { tcp_check_timer_state(tp); } KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, - 0,0,0,0,0); + 0, 0, 0, 0, 0); return 0; } } @@ -1039,11 +1082,13 @@ after_sack_rexmit: /* * Don't send a RST with data. */ - if (flags & TH_RST) + if (flags & TH_RST) { len = 0; + } - if ((flags & TH_SYN) && tp->t_state <= TCPS_SYN_SENT && tfo_enabled(tp)) + if ((flags & TH_SYN) && tp->t_state <= TCPS_SYN_SENT && tfo_enabled(tp)) { len = tcp_tfo_check(tp, len); + } /* * The check here used to be (len < 0). Some times len is zero @@ -1069,8 +1114,9 @@ after_sack_rexmit: TCP_RESET_REXMT_STATE(tp); tp->snd_nxt = tp->snd_una; off = 0; - if (tp->t_timer[TCPT_PERSIST] == 0) + if (tp->t_timer[TCPT_PERSIST] == 0) { tcp_setpersist(tp); + } } } @@ -1083,8 +1129,7 @@ after_sack_rexmit: * 3. our send window (slow start and congestion controlled) is * larger than sent but unacknowledged data in send buffer. */ - if (tcp_do_autosendbuf == 1 && - !INP_WAIT_FOR_IF_FEEDBACK(inp) && !IN_FASTRECOVERY(tp) && + if (!INP_WAIT_FOR_IF_FEEDBACK(inp) && !IN_FASTRECOVERY(tp) && (so->so_snd.sb_flags & (SB_AUTOSIZE | SB_TRIM)) == SB_AUTOSIZE && tcp_cansbgrow(&so->so_snd)) { if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat && @@ -1105,7 +1150,7 @@ after_sack_rexmit: * * TSO may only be used if we are in a pure bulk sending state. * The presence of TCP-MD5, SACK retransmits, SACK advertizements, - * ipfw rules and IP options, as well as disabling hardware checksum + * filters and IP options, as well as disabling hardware checksum * offload prevent using TSO. With TSO the TCP header is the same * (except for the sequence number) for all generated packets. This * makes it impossible to transmit any options which vary per generated @@ -1120,22 +1165,20 @@ after_sack_rexmit: * Pre-calculate here as we save another lookup into the darknesses * of IPsec that way and can actually decide if TSO is ok. */ - if (ipsec_bypass == 0) - ipsec_optlen = ipsec_hdrsiz_tcp(tp); + if (ipsec_bypass == 0) { + ipsec_optlen = (unsigned int)ipsec_hdrsiz_tcp(tp); + } #endif if (len > tp->t_maxseg) { if ((tp->t_flags & TF_TSO) && tcp_do_tso && hwcksum_tx && ip_use_randomid && kipf_count == 0 && dlil_filter_disable_tso_count == 0 && - tp->rcv_numsacks == 0 && sack_rxmit == 0 && + tp->rcv_numsacks == 0 && sack_rxmit == 0 && sack_bytes_rxmt == 0 && inp->inp_options == NULL && inp->in6p_options == NULL #if IPSEC && ipsec_optlen == 0 -#endif -#if IPFIREWALL - && (fw_enable == 0 || fw_bypass) #endif ) { tso = 1; @@ -1145,6 +1188,8 @@ after_sack_rexmit: sendalot = 1; tso = 0; } + } else { + tso = 0; } /* Send one segment or less as a tail loss probe */ @@ -1166,19 +1211,21 @@ after_sack_rexmit: int newlen = len; if (tp->t_state >= TCPS_ESTABLISHED && (tp->t_mpflags & TMPF_SND_MPPRIO || - tp->t_mpflags & TMPF_SND_REM_ADDR || - tp->t_mpflags & TMPF_SND_MPFAIL || - tp->t_mpflags & TMPF_SND_KEYS || - tp->t_mpflags & TMPF_SND_JACK)) { + tp->t_mpflags & TMPF_SND_REM_ADDR || + tp->t_mpflags & TMPF_SND_MPFAIL || + tp->t_mpflags & TMPF_SND_KEYS || + tp->t_mpflags & TMPF_SND_JACK)) { if (len > 0) { len = 0; + tso = 0; } /* * On a new subflow, don't try to send again, because * we are still waiting for the fourth ack. */ - if (!(tp->t_mpflags & TMPF_PREESTABLISHED)) + if (!(tp->t_mpflags & TMPF_PREESTABLISHED)) { sendalot = 1; + } mptcp_acknow = TRUE; } else { mptcp_acknow = FALSE; @@ -1195,41 +1242,21 @@ after_sack_rexmit: if (newlen < len) { len = newlen; - } - } -#endif /* MPTCP */ - - /* - * If the socket is capable of doing unordered send, - * pull the amount of data that can be sent from the - * unordered priority queues to the serial queue in - * the socket buffer. If bytes are not yet available - * in the highest priority message, we may not be able - * to send any new data. - */ - if (so->so_flags & SOF_ENABLE_MSGS) { - if ((off + len) > - so->so_msg_state->msg_serial_bytes) { - sbpull_unordered_data(so, off, len); - - /* check if len needs to be modified */ - if ((off + len) > - so->so_msg_state->msg_serial_bytes) { - len = so->so_msg_state->msg_serial_bytes - off; - if (len <= 0) { - len = 0; - tcpstat.tcps_msg_sndwaithipri++; - } + if (len <= tp->t_maxseg) { + tso = 0; } } } +#endif /* MPTCP */ if (sack_rxmit) { - if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc)) + if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc)) { flags &= ~TH_FIN; + } } else { - if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc)) + if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc)) { flags &= ~TH_FIN; + } } /* * Compare available window to amount of window @@ -1260,18 +1287,21 @@ after_sack_rexmit: if (recwin > 0 && tcp_recv_throttle(tp)) { uint32_t min_iaj_win = tcp_min_iaj_win * tp->t_maxseg; uint32_t bg_rwintop = tp->rcv_adv; - if (SEQ_LT(bg_rwintop, tp->rcv_nxt + min_iaj_win)) + if (SEQ_LT(bg_rwintop, tp->rcv_nxt + min_iaj_win)) { bg_rwintop = tp->rcv_nxt + min_iaj_win; + } recwin = imin((int32_t)(bg_rwintop - tp->rcv_nxt), recwin); - if (recwin < 0) + if (recwin < 0) { recwin = 0; + } } } #endif /* TRAFFIC_MGT */ - if (recwin > (int32_t)(TCP_MAXWIN << tp->rcv_scale)) + if (recwin > (int32_t)(TCP_MAXWIN << tp->rcv_scale)) { recwin = (int32_t)(TCP_MAXWIN << tp->rcv_scale); + } if (!(so->so_flags & SOF_MP_SUBFLOW)) { if (recwin < (int32_t)(tp->rcv_adv - tp->rcv_nxt)) { @@ -1279,10 +1309,12 @@ after_sack_rexmit: } } else { struct mptcb *mp_tp = tptomptp(tp); + int64_t recwin_announced = (int64_t)(mp_tp->mpt_rcvadv - mp_tp->mpt_rcvnxt); /* Don't remove what we announced at the MPTCP-layer */ - if (recwin < (int32_t)(mp_tp->mpt_rcvadv - (uint32_t)mp_tp->mpt_rcvnxt)) { - recwin = (int32_t)(mp_tp->mpt_rcvadv - (uint32_t)mp_tp->mpt_rcvnxt); + VERIFY(recwin_announced < INT32_MAX && recwin_announced > INT32_MIN); + if (recwin < (int32_t)recwin_announced) { + recwin = (int32_t)recwin_announced; } } @@ -1299,12 +1331,15 @@ after_sack_rexmit: * data (receiver may be limited the window size) */ if (len) { - if (tp->t_flagsext & TF_FORCE) + if (tp->t_flagsext & TF_FORCE) { goto send; - if (SEQ_LT(tp->snd_nxt, tp->snd_max)) + } + if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { goto send; - if (sack_rxmit) + } + if (sack_rxmit) { goto send; + } /* * If this here is the first segment after SYN/ACK and TFO @@ -1313,8 +1348,9 @@ after_sack_rexmit: if (tp->t_state == TCPS_SYN_RECEIVED && tfo_enabled(tp) && (tp->t_tfo_flags & TFO_F_COOKIE_VALID) && - tp->snd_nxt == tp->iss + 1) + tp->snd_nxt == tp->iss + 1) { goto send; + } /* * Send new data on the connection only if it is @@ -1322,8 +1358,9 @@ after_sack_rexmit: */ if (!INP_WAIT_FOR_IF_FEEDBACK(inp) || tp->t_state != TCPS_ESTABLISHED) { - if (len >= tp->t_maxseg) + if (len >= tp->t_maxseg) { goto send; + } if (!(tp->t_flags & TF_MORETOCOME) && (idle || tp->t_flags & TF_NODELAY || @@ -1331,22 +1368,24 @@ after_sack_rexmit: ALLOW_LIMITED_TRANSMIT(tp)) && (tp->t_flags & TF_NOPUSH) == 0 && (len + off >= so->so_snd.sb_cc || - /* - * MPTCP needs to respect the DSS-mappings. So, it - * may be sending data that *could* have been - * coalesced, but cannot because of - * mptcp_adj_sendlen(). - */ - so->so_flags & SOF_MP_SUBFLOW)) + /* + * MPTCP needs to respect the DSS-mappings. So, it + * may be sending data that *could* have been + * coalesced, but cannot because of + * mptcp_adj_sendlen(). + */ + so->so_flags & SOF_MP_SUBFLOW)) { goto send; - if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) + } + if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) { goto send; + } } else { tcpstat.tcps_fcholdpacket++; } } - if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN)) { + if (recwin > 0) { /* * "adv" is the amount we can increase the window, * taking into account that we are limited by @@ -1354,43 +1393,79 @@ after_sack_rexmit: */ int32_t adv, oldwin = 0; adv = imin(recwin, (int)TCP_MAXWIN << tp->rcv_scale) - - (tp->rcv_adv - tp->rcv_nxt); + (tp->rcv_adv - tp->rcv_nxt); - if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) + if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) { oldwin = tp->rcv_adv - tp->rcv_nxt; + } - if (adv >= (int32_t) (2 * tp->t_maxseg)) { - /* - * Update only if the resulting scaled value of - * the window changed, or if there is a change in - * the sequence since the last ack. This avoids - * what appears as dupe ACKS (see rdar://5640997) - * - * If streaming is detected avoid sending too many - * window updates. We will depend on the delack - * timer to send a window update when needed. - */ - if (!(tp->t_flags & TF_STRETCHACK) && - (tp->last_ack_sent != tp->rcv_nxt || - ((oldwin + adv) >> tp->rcv_scale) > - (oldwin >> tp->rcv_scale))) { - goto send; + if (tcp_ack_strategy == TCP_ACK_STRATEGY_LEGACY) { + if (adv >= (int32_t) (2 * tp->t_maxseg)) { + /* + * Update only if the resulting scaled value of + * the window changed, or if there is a change in + * the sequence since the last ack. This avoids + * what appears as dupe ACKS (see rdar://5640997) + * + * If streaming is detected avoid sending too many + * window updates. We will depend on the delack + * timer to send a window update when needed. + * + * If there is more data to read, don't send an ACK. + * Otherwise we will end up sending many ACKs if the + * application is doing micro-reads. + */ + if (!(tp->t_flags & TF_STRETCHACK) && + (tp->last_ack_sent != tp->rcv_nxt || + ((oldwin + adv) >> tp->rcv_scale) > + (oldwin >> tp->rcv_scale))) { + goto send; + } + } + } else { + if (adv >= (int32_t) (2 * tp->t_maxseg)) { + /* + * ACK every second full-sized segment, if the + * ACK is advancing or the window becomes bigger + */ + if (so->so_rcv.sb_cc < so->so_rcv.sb_lowat && + (tp->last_ack_sent != tp->rcv_nxt || + ((oldwin + adv) >> tp->rcv_scale) > + (oldwin >> tp->rcv_scale))) { + goto send; + } + } else if (tp->t_flags & TF_DELACK) { + /* + * If we delayed the ACK and the window + * is not advancing by a lot (< 2MSS), ACK + * immediately if the last incoming packet had + * the push flag set and we emptied the buffer. + * + * This takes care of a sender doing small + * repeated writes with Nagle enabled. + */ + if (so->so_rcv.sb_cc == 0 && + tp->last_ack_sent != tp->rcv_nxt && + (tp->t_flagsext & TF_LAST_IS_PSH)) { + goto send; + } } - } - if (4 * adv >= (int32_t) so->so_rcv.sb_hiwat) + if (4 * adv >= (int32_t) so->so_rcv.sb_hiwat) { goto send; + } /* * Make sure that the delayed ack timer is set if * we delayed sending a window update because of * streaming detection. */ - if ((tp->t_flags & TF_STRETCHACK) && + if (tcp_ack_strategy == TCP_ACK_STRATEGY_LEGACY && + (tp->t_flags & TF_STRETCHACK) && !(tp->t_flags & TF_DELACK)) { tp->t_flags |= TF_DELACK; tp->t_timer[TCPT_DELACK] = - OFFSET_FROM_START(tp, tcp_delack); + OFFSET_FROM_START(tp, tcp_delack); } } @@ -1398,24 +1473,31 @@ after_sack_rexmit: * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW * is also a catch-all for the retransmit timer timeout case. */ - if (tp->t_flags & TF_ACKNOW) + if (tp->t_flags & TF_ACKNOW) { + if (tp->t_forced_acks > 0) { + tp->t_forced_acks--; + } goto send; - if ((flags & TH_RST) || - ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) + } + if ((flags & TH_RST) || (flags & TH_SYN)) { goto send; - if (SEQ_GT(tp->snd_up, tp->snd_una)) + } + if (SEQ_GT(tp->snd_up, tp->snd_una)) { goto send; + } #if MPTCP - if (mptcp_acknow) + if (mptcp_acknow) { goto send; + } #endif /* MPTCP */ /* * If our state indicates that FIN should be sent * and we have not yet done so, then we need to send. */ if ((flags & TH_FIN) && - (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una)) + (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una)) { goto send; + } /* * In SACK, it is possible for tcp_output to fail to send a segment * after the retransmission timer has been turned off. Make sure @@ -1426,7 +1508,7 @@ after_sack_rexmit: tp->t_timer[TCPT_REXMT] == 0 && tp->t_timer[TCPT_PERSIST] == 0) { tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, - tp->t_rxtcur); + tp->t_rxtcur); goto just_return; } /* @@ -1462,7 +1544,7 @@ just_return: * but if there is some packets left in the packet list, send them now. */ while (inp->inp_sndinprog_cnt == 0 && - tp->t_pktlist_head != NULL) { + tp->t_pktlist_head != NULL) { packetlist = tp->t_pktlist_head; packchain_listadd = tp->t_lastchain; packchain_sent++; @@ -1475,13 +1557,13 @@ just_return: } /* tcp was closed while we were in ip; resume close */ if (inp->inp_sndinprog_cnt == 0 && - (tp->t_flags & TF_CLOSING)) { + (tp->t_flags & TF_CLOSING)) { tp->t_flags &= ~TF_CLOSING; (void) tcp_close(tp); } else { tcp_check_timer_state(tp); } - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return 0; send: @@ -1490,10 +1572,13 @@ send: * the max segment size. */ if (len > 0) { - if (len >= tp->t_maxseg) + do_not_compress = TRUE; + + if (len >= tp->t_maxseg) { tp->t_flags |= TF_MAXSEGSNT; - else + } else { tp->t_flags &= ~TF_MAXSEGSNT; + } } /* * Before ESTABLISHED, force sending of initial options @@ -1504,12 +1589,11 @@ send: * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES */ optlen = 0; -#if INET6 - if (isipv6) - hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr); - else -#endif - hdrlen = sizeof (struct tcpiphdr); + if (isipv6) { + hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); + } else { + hdrlen = sizeof(struct tcpiphdr); + } if (flags & TH_SYN) { tp->snd_nxt = tp->iss; if ((tp->t_flags & TF_NOOPT) == 0) { @@ -1526,9 +1610,9 @@ send: (tp->t_flags & TF_RCVD_SCALE))) { *((u_int32_t *)(void *)(opt + optlen)) = htonl( TCPOPT_NOP << 24 | - TCPOPT_WINDOW << 16 | - TCPOLEN_WINDOW << 8 | - tp->request_r_scale); + TCPOPT_WINDOW << 16 | + TCPOLEN_WINDOW << 8 | + tp->request_r_scale); optlen += 4; } #if MPTCP @@ -1543,23 +1627,19 @@ send: * Send a timestamp and echo-reply if this is a SYN and our side * wants to use timestamps (TF_REQ_TSTMP is set) or both our side * and our peer have sent timestamps in our SYN's. - */ - if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && - (flags & TH_RST) == 0 && + */ + if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && + (flags & TH_RST) == 0 && ((flags & TH_ACK) == 0 || - (tp->t_flags & TF_RCVD_TSTMP))) { + (tp->t_flags & TF_RCVD_TSTMP))) { u_int32_t *lp = (u_int32_t *)(void *)(opt + optlen); - /* Form timestamp option as shown in appendix A of RFC 1323. */ - *lp++ = htonl(TCPOPT_TSTAMP_HDR); - *lp++ = htonl(tcp_now); - *lp = htonl(tp->ts_recent); - optlen += TCPOLEN_TSTAMP_APPA; - } - - /* Note the timestamp for receive buffer autosizing */ - if (tp->rfbuf_ts == 0 && (so->so_rcv.sb_flags & SB_AUTOSIZE)) - tp->rfbuf_ts = tcp_now; + /* Form timestamp option as shown in appendix A of RFC 1323. */ + *lp++ = htonl(TCPOPT_TSTAMP_HDR); + *lp++ = htonl(tcp_now); + *lp = htonl(tp->ts_recent); + optlen += TCPOLEN_TSTAMP_APPA; + } if (SACK_ENABLED(tp) && ((tp->t_flags & TF_NOOPT) == 0)) { /* @@ -1601,19 +1681,21 @@ send: tp->t_mpflags |= TMPF_MPTCP_ACKNOW; } optlen = mptcp_setup_opts(tp, off, &opt[0], optlen, flags, - len, &mptcp_acknow); + len, &mptcp_acknow, &do_not_compress); tp->t_mpflags &= ~TMPF_SEND_DSN; } #endif /* MPTCP */ if (tfo_enabled(tp) && !(tp->t_flags & TF_NOOPT) && - (flags & (TH_SYN | TH_ACK)) == TH_SYN) + (flags & (TH_SYN | TH_ACK)) == TH_SYN) { optlen += tcp_tfo_write_cookie(tp, optlen, len, opt); + } if (tfo_enabled(tp) && (flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK) && - (tp->t_tfo_flags & TFO_F_OFFER_COOKIE)) + (tp->t_tfo_flags & TFO_F_OFFER_COOKIE)) { optlen += tcp_tfo_write_cookie_rep(tp, optlen, opt); + } if (SACK_ENABLED(tp) && ((tp->t_flags & TF_NOOPT) == 0)) { /* @@ -1641,6 +1723,7 @@ send: nsack = min(nsack, (tp->rcv_numsacks + (TCP_SEND_DSACK_OPT(tp) ? 1 : 0))); sackoptlen = (2 + nsack * TCPOLEN_SACK); + VERIFY(sackoptlen < UINT8_MAX); /* * First we need to pad options so that the @@ -1649,12 +1732,13 @@ send: */ padlen = (MAX_TCPOPTLEN - optlen - sackoptlen) % 4; optlen += padlen; - while (padlen-- > 0) + while (padlen-- > 0) { *bp++ = TCPOPT_NOP; + } tcpstat.tcps_sack_send_blocks++; *bp++ = TCPOPT_SACK; - *bp++ = sackoptlen; + *bp++ = (uint8_t)sackoptlen; lp = (u_int32_t *)(void *)bp; /* @@ -1712,7 +1796,7 @@ send: if ((flags & (TH_SYN | TH_ACK)) == (TH_SYN | TH_ACK) && (tp->ecn_flags & TE_ENABLE_ECN)) { if (tp->ecn_flags & TE_SETUPRECEIVED) { - if (tcp_send_ecn_flags_on_syn(tp, so)) { + if (tcp_send_ecn_flags_on_syn(tp)) { /* * Setting TH_ECE makes this an ECN-setup * SYN-ACK @@ -1723,7 +1807,7 @@ send: * Record that we sent the ECN-setup and * default to setting IP ECT. */ - tp->ecn_flags |= (TE_SETUPSENT|TE_SENDIPECT); + tp->ecn_flags |= (TE_SETUPSENT | TE_SENDIPECT); tcpstat.tcps_ecn_server_setup++; tcpstat.tcps_ecn_server_success++; } else { @@ -1754,7 +1838,7 @@ send: } } else if ((flags & (TH_SYN | TH_ACK)) == TH_SYN && (tp->ecn_flags & TE_ENABLE_ECN)) { - if (tcp_send_ecn_flags_on_syn(tp, so)) { + if (tcp_send_ecn_flags_on_syn(tp)) { /* * Setting TH_ECE and TH_CWR makes this an * ECN-setup SYN @@ -1810,21 +1894,18 @@ send: tp->t_dsack_lseq = 0; tp->t_dsack_rseq = 0; -#if INET6 - if (isipv6) + if (isipv6) { ipoptlen = ip6_optlen(inp); - else -#endif - { + } else { if (tp_inp_options) { ipoptlen = tp_inp_options->m_len - - offsetof(struct ipoption, ipopt_list); + offsetof(struct ipoption, ipopt_list); } else { ipoptlen = 0; } } #if IPSEC - ipoptlen += ipsec_optlen; + ipoptlen += ipsec_optlen; #endif /* @@ -1850,39 +1931,46 @@ send: int32_t tso_maxlen; tso_maxlen = tp->tso_max_segment_size ? - tp->tso_max_segment_size : TCP_MAXWIN; + tp->tso_max_segment_size : TCP_MAXWIN; if (len > tso_maxlen - hdrlen - optlen) { len = tso_maxlen - hdrlen - optlen; - len = len - (len % (tp->t_maxopd - optlen)); sendalot = 1; } else if (tp->t_flags & TF_NEEDFIN) { sendalot = 1; } + + if (len % (tp->t_maxopd - optlen) != 0) { + len = len - (len % (tp->t_maxopd - optlen)); + sendalot = 1; + } } else { len = tp->t_maxopd - optlen - ipoptlen; sendalot = 1; } } - if (max_linkhdr + hdrlen > MCLBYTES) + if (max_linkhdr + hdrlen > MCLBYTES) { panic("tcphdr too big"); + } /* Check if there is enough data in the send socket * buffer to start measuring bandwidth */ if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 && - (tp->t_bwmeas != NULL) && - (tp->t_flagsext & TF_BWMEAS_INPROGRESS) == 0) { + (tp->t_bwmeas != NULL) && + (tp->t_flagsext & TF_BWMEAS_INPROGRESS) == 0) { tp->t_bwmeas->bw_size = min(min( - (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)), - tp->snd_cwnd), tp->snd_wnd); + (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)), + tp->snd_cwnd), tp->snd_wnd); if (tp->t_bwmeas->bw_minsize > 0 && - tp->t_bwmeas->bw_size < tp->t_bwmeas->bw_minsize) + tp->t_bwmeas->bw_size < tp->t_bwmeas->bw_minsize) { tp->t_bwmeas->bw_size = 0; - if (tp->t_bwmeas->bw_maxsize > 0) + } + if (tp->t_bwmeas->bw_maxsize > 0) { tp->t_bwmeas->bw_size = min(tp->t_bwmeas->bw_size, tp->t_bwmeas->bw_maxsize); + } if (tp->t_bwmeas->bw_size > 0) { tp->t_flagsext |= TF_BWMEAS_INPROGRESS; tp->t_bwmeas->bw_start = tp->snd_max; @@ -1897,15 +1985,19 @@ send: * the template for sends on this connection. */ if (len) { - tp->t_pmtud_lastseg_size = len + optlen + ipoptlen; - if ((tp->t_flagsext & TF_FORCE) && len == 1) + /* Remember what the last head-of-line packet-size was */ + if (tp->t_pmtud_lastseg_size == 0 && tp->snd_nxt == tp->snd_una) { + ASSERT(len + optlen + ipoptlen <= IP_MAXPACKET); + tp->t_pmtud_lastseg_size = (uint16_t)(len + optlen + ipoptlen); + } + if ((tp->t_flagsext & TF_FORCE) && len == 1) { tcpstat.tcps_sndprobe++; - else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { + } else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) { tcpstat.tcps_sndrexmitpack++; tcpstat.tcps_sndrexmitbyte += len; if (nstat_collect) { nstat_route_tx(inp->inp_route.ro_rt, 1, - len, NSTAT_TX_FLAG_RETRANSMIT); + len, NSTAT_TX_FLAG_RETRANSMIT); INP_ADD_STAT(inp, cell, wifi, wired, txpackets, 1); INP_ADD_STAT(inp, cell, wifi, wired, @@ -1948,24 +2040,24 @@ send: /* minimum length we are going to allocate */ allocated_len = MHLEN; - if (MHLEN < hdrlen + max_linkhdr) { + if (MHLEN < hdrlen + max_linkhdr) { MGETHDR(m, M_DONTWAIT, MT_HEADER); if (m == NULL) { error = ENOBUFS; goto out; } - MCLGET(m, M_DONTWAIT); - if ((m->m_flags & M_EXT) == 0) { - m_freem(m); - error = ENOBUFS; - goto out; - } + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_freem(m); + error = ENOBUFS; + goto out; + } m->m_data += max_linkhdr; m->m_len = hdrlen; allocated_len = MCLBYTES; } if (len <= allocated_len - hdrlen - max_linkhdr) { - if (m == NULL) { + if (m == NULL) { VERIFY(allocated_len <= MHLEN); MGETHDR(m, M_DONTWAIT, MT_HEADER); if (m == NULL) { @@ -1977,7 +2069,9 @@ send: } /* makes sure we still have data left to be sent at this point */ if (so->so_snd.sb_mb == NULL || off < 0) { - if (m != NULL) m_freem(m); + if (m != NULL) { + m_freem(m); + } error = 0; /* should we return an error? */ goto out; } @@ -2021,10 +2115,10 @@ send: * it acted on to fullfill the current request, * whether a valid 'hint' was passed in or not. */ - if ((m = m_copym_with_hdrs(so->so_snd.sb_mb, + if ((m = m_copym_with_hdrs(so->so_snd.sb_mb, off, len, M_DONTWAIT, NULL, NULL, copymode)) == NULL) { - error = ENOBUFS; + error = ENOBUFS; goto out; } m->m_data += max_linkhdr; @@ -2039,19 +2133,21 @@ send: * * On SYN-segments we should not add the PUSH-flag. */ - if (off + len == so->so_snd.sb_cc && !(flags & TH_SYN)) + if (off + len == so->so_snd.sb_cc && !(flags & TH_SYN)) { flags |= TH_PUSH; + } } else { - if (tp->t_flags & TF_ACKNOW) + if (tp->t_flags & TF_ACKNOW) { tcpstat.tcps_sndacks++; - else if (flags & (TH_SYN|TH_FIN|TH_RST)) + } else if (flags & (TH_SYN | TH_FIN | TH_RST)) { tcpstat.tcps_sndctrl++; - else if (SEQ_GT(tp->snd_up, tp->snd_una)) + } else if (SEQ_GT(tp->snd_up, tp->snd_una)) { tcpstat.tcps_sndurg++; - else + } else { tcpstat.tcps_sndwinup++; + } - MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ if (m == NULL) { error = ENOBUFS; goto out; @@ -2068,16 +2164,36 @@ send: m->m_len = hdrlen; } m->m_pkthdr.rcvif = 0; -#if CONFIG_MACF_NET - mac_mbuf_label_associate_inpcb(inp, m); -#endif -#if INET6 + + /* Any flag other than pure-ACK: Do not compress! */ + if (flags & ~(TH_ACK)) { + do_not_compress = TRUE; + } + + if (tp->rcv_scale == 0) { + do_not_compress = TRUE; + } + + if (do_not_compress || (tcp_do_ack_compression == 1 && !cell) || __improbable(!tcp_do_ack_compression)) { + m->m_pkthdr.comp_gencnt = 0; + } else { + if (TSTMP_LT(tp->t_comp_lastinc + tcp_ack_compression_rate, tcp_now)) { + tp->t_comp_gencnt++; + /* 0 means no compression, thus jump this */ + if (tp->t_comp_gencnt <= TCP_ACK_COMPRESSION_DUMMY) { + tp->t_comp_gencnt = TCP_ACK_COMPRESSION_DUMMY + 1; + } + tp->t_comp_lastinc = tcp_now; + } + m->m_pkthdr.comp_gencnt = tp->t_comp_gencnt; + } + if (isipv6) { ip6 = mtod(m, struct ip6_hdr *); th = (struct tcphdr *)(void *)(ip6 + 1); tcp_fillheaders(tp, ip6, th); if ((tp->ecn_flags & TE_SENDIPECT) != 0 && len && - !SEQ_LT(tp->snd_nxt, tp->snd_max) && !sack_rxmit) { + !SEQ_LT(tp->snd_nxt, tp->snd_max) && !sack_rxmit) { ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20); } svc_flags |= PKT_SCF_IPV6; @@ -2085,11 +2201,8 @@ send: m_pftag(m)->pftag_hdr = (void *)ip6; m_pftag(m)->pftag_flags |= PF_TAG_HDR_INET6; #endif /* PF_ECN */ - } else -#endif /* INET6 */ - { + } else { ip = mtod(m, struct ip *); - ipov = (struct ipovly *)ip; th = (struct tcphdr *)(void *)(ip + 1); /* this picks up the pseudo header (w/o the length) */ tcp_fillheaders(tp, ip, th); @@ -2110,8 +2223,9 @@ send: * If resending a FIN, be sure not to use a new sequence number. */ if ((flags & TH_FIN) && (tp->t_flags & TF_SENTFIN) && - tp->snd_nxt == tp->snd_max) + tp->snd_nxt == tp->snd_max) { tp->snd_nxt--; + } /* * If we are doing retransmissions, then snd_nxt will * not reflect the first unsent octet. For ACK only @@ -2129,7 +2243,7 @@ send: * retransmissions. */ if (sack_rxmit == 0) { - if (len || (flags & (TH_SYN|TH_FIN)) || + if (len || (flags & (TH_SYN | TH_FIN)) || tp->t_timer[TCPT_PERSIST]) { th->th_seq = htonl(tp->snd_nxt); if (len > 0) { @@ -2141,9 +2255,10 @@ send: tcp_rxtseg_insert(tp, tp->snd_nxt, (tp->snd_nxt + len - 1)); } - if (len > 0) + if (len > 0) { m->m_pkthdr.pkt_flags |= PKTF_TCP_REXMT; + } } } else { th->th_seq = htonl(tp->snd_max); @@ -2163,10 +2278,11 @@ send: tp->last_ack_sent = tp->rcv_nxt; if (optlen) { bcopy(opt, th + 1, optlen); - th->th_off = (sizeof (struct tcphdr) + optlen) >> 2; + th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; } th->th_flags = flags; - th->th_win = htons((u_short) (recwin>>tp->rcv_scale)); + th->th_win = htons((u_short) (recwin >> tp->rcv_scale)); + tp->t_last_recwin = recwin; if (!(so->so_flags & SOF_MP_SUBFLOW)) { if (recwin > 0 && SEQ_LT(tp->rcv_adv, tp->rcv_nxt + recwin)) { tp->rcv_adv = tp->rcv_nxt + recwin; @@ -2177,8 +2293,8 @@ send: tp->rcv_adv = tp->rcv_nxt + recwin; } - if (recwin > 0 && SEQ_LT(mp_tp->mpt_rcvadv, (uint32_t)mp_tp->mpt_rcvnxt + recwin)) { - mp_tp->mpt_rcvadv = (uint32_t)mp_tp->mpt_rcvnxt + recwin; + if (recwin > 0 && MPTCP_SEQ_LT(mp_tp->mpt_rcvadv, mp_tp->mpt_rcvnxt + recwin)) { + mp_tp->mpt_rcvadv = mp_tp->mpt_rcvnxt + recwin; } } @@ -2190,10 +2306,11 @@ send: * to read more data then can be buffered prior to transmitting on * the connection. */ - if (th->th_win == 0) + if (th->th_win == 0) { tp->t_flags |= TF_RXWIN0SENT; - else + } else { tp->t_flags &= ~TF_RXWIN0SENT; + } if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); @@ -2205,7 +2322,7 @@ send: * so that it doesn't drift into the send window on sequence * number wraparound. */ - tp->snd_up = tp->snd_una; /* drag it along */ + tp->snd_up = tp->snd_una; /* drag it along */ } /* @@ -2221,10 +2338,10 @@ send: */ if (tp->t_state != TCPS_ESTABLISHED && (tp->t_state == TCPS_CLOSING || tp->t_state == TCPS_TIME_WAIT - || tp->t_state == TCPS_LAST_ACK || (th->th_flags & TH_RST))) + || tp->t_state == TCPS_LAST_ACK || (th->th_flags & TH_RST))) { m->m_pkthdr.pkt_flags |= PKTF_LAST_PKT; + } -#if INET6 if (isipv6) { /* * ip6_plen is not need to be filled now, and will be filled @@ -2232,18 +2349,17 @@ send: */ m->m_pkthdr.csum_flags = CSUM_TCPIPV6; m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); - if (len + optlen) + if (len + optlen) { th->th_sum = in_addword(th->th_sum, - htons((u_short)(optlen + len))); - } - else -#endif /* INET6 */ - { + htons((u_short)(optlen + len))); + } + } else { m->m_pkthdr.csum_flags = CSUM_TCP; m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); - if (len + optlen) + if (len + optlen) { th->th_sum = in_addword(th->th_sum, - htons((u_short)(optlen + len))); + htons((u_short)(optlen + len))); + } } /* @@ -2251,12 +2367,11 @@ send: * The TCP pseudo header checksum is always provided. */ if (tso) { -#if INET6 - if (isipv6) + if (isipv6) { m->m_pkthdr.csum_flags |= CSUM_TSO_IPV6; - else -#endif /* INET6 */ + } else { m->m_pkthdr.csum_flags |= CSUM_TSO_IPV4; + } m->m_pkthdr.tso_segsz = tp->t_maxopd - optlen; } else { @@ -2274,17 +2389,19 @@ send: /* * Advance snd_nxt over sequence space of this segment. */ - if (flags & (TH_SYN|TH_FIN)) { - if (flags & TH_SYN) + if (flags & (TH_SYN | TH_FIN)) { + if (flags & TH_SYN) { tp->snd_nxt++; + } if ((flags & TH_FIN) && - !(tp->t_flags & TF_SENTFIN)) { + !(tp->t_flags & TF_SENTFIN)) { tp->snd_nxt++; tp->t_flags |= TF_SENTFIN; } } - if (sack_rxmit) + if (sack_rxmit) { goto timer; + } if (sack_rescue_rxt == TRUE) { tp->snd_nxt = old_snd_nxt; sack_rescue_rxt = FALSE; @@ -2316,14 +2433,14 @@ send: timer: if (tp->t_timer[TCPT_REXMT] == 0 && ((sack_rxmit && tp->snd_nxt != tp->snd_max) || - tp->snd_nxt != tp->snd_una || (flags & TH_FIN))) { + tp->snd_nxt != tp->snd_una || (flags & TH_FIN))) { if (tp->t_timer[TCPT_PERSIST]) { tp->t_timer[TCPT_PERSIST] = 0; tp->t_persist_stop = 0; TCP_RESET_REXMT_STATE(tp); } tp->t_timer[TCPT_REXMT] = - OFFSET_FROM_START(tp, tp->t_rxtcur); + OFFSET_FROM_START(tp, tp->t_rxtcur); } /* @@ -2338,27 +2455,39 @@ timer: tp->snd_nxt == tp->snd_max && SEQ_GT(tp->snd_nxt, tp->snd_una) && tp->t_rxtshift == 0 && - (tp->t_flagsext & (TF_SENT_TLPROBE|TF_PKTS_REORDERED)) == 0) { - u_int32_t pto, srtt; - - /* - * Using SRTT alone to set PTO can cause spurious - * retransmissions on wireless networks where there - * is a lot of variance in RTT. Taking variance - * into account will avoid this. - */ - srtt = tp->t_srtt >> TCP_RTT_SHIFT; - pto = ((TCP_REXMTVAL(tp)) * 3) >> 1; - pto = max (2 * srtt, pto); - if ((tp->snd_max - tp->snd_una) == tp->t_maxseg) - pto = max(pto, - (((3 * pto) >> 2) + tcp_delack * 2)); - else - pto = max(10, pto); + (tp->t_flagsext & (TF_SENT_TLPROBE | TF_PKTS_REORDERED)) == 0) { + uint32_t pto, srtt; + + if (tcp_do_better_lr) { + srtt = tp->t_srtt >> TCP_RTT_SHIFT; + pto = 2 * srtt; + if ((tp->snd_max - tp->snd_una) <= tp->t_maxseg) { + pto += tcp_delack; + } else { + pto += 2; + } + } else { + /* + * Using SRTT alone to set PTO can cause spurious + * retransmissions on wireless networks where there + * is a lot of variance in RTT. Taking variance + * into account will avoid this. + */ + srtt = tp->t_srtt >> TCP_RTT_SHIFT; + pto = ((TCP_REXMTVAL(tp)) * 3) >> 1; + pto = max(2 * srtt, pto); + if ((tp->snd_max - tp->snd_una) == tp->t_maxseg) { + pto = max(pto, + (((3 * pto) >> 2) + tcp_delack * 2)); + } else { + pto = max(10, pto); + } + } /* if RTO is less than PTO, choose RTO instead */ - if (tp->t_rxtcur < pto) + if (tp->t_rxtcur < pto) { pto = tp->t_rxtcur; + } tp->t_timer[TCPT_PTO] = OFFSET_FROM_START(tp, pto); } @@ -2368,10 +2497,11 @@ timer: * persist mode (no window) we do not update snd_nxt. */ int xlen = len; - if (flags & TH_SYN) + if (flags & TH_SYN) { ++xlen; + } if ((flags & TH_FIN) && - !(tp->t_flags & TF_SENTFIN)) { + !(tp->t_flags & TF_SENTFIN)) { ++xlen; tp->t_flags |= TF_SENTFIN; } @@ -2385,8 +2515,9 @@ timer: /* * Trace. */ - if (so_options & SO_DEBUG) + if (so_options & SO_DEBUG) { tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0); + } #endif /* @@ -2395,7 +2526,6 @@ timer: * to handle ttl and tos; we could keep them in * the template, but need a way to checksum without them. */ -#if INET6 /* * m->m_pkthdr.len should have been set before cksum calcuration, * because in6_cksum() need it. @@ -2415,18 +2545,17 @@ timer: ((inp->inp_fport << 16) | inp->inp_lport), (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | (inp->in6p_faddr.s6_addr16[0] & 0xffff)), - sendalot,0,0); - } else -#endif /* INET6 */ - { - ip->ip_len = m->m_pkthdr.len; - ip->ip_ttl = inp->inp_ip_ttl; /* XXX */ + sendalot, 0, 0); + } else { + ASSERT(m->m_pkthdr.len <= IP_MAXPACKET); + ip->ip_len = (u_short)m->m_pkthdr.len; + ip->ip_ttl = inp->inp_ip_ttl; /* XXX */ ip->ip_tos |= (inp->inp_ip_tos & ~IPTOS_ECN_MASK);/* XXX */ - KERNEL_DEBUG(DBG_LAYER_BEG, - ((inp->inp_fport << 16) | inp->inp_lport), - (((inp->inp_laddr.s_addr & 0xffff) << 16) | - (inp->inp_faddr.s_addr & 0xffff)), 0,0,0); - } + KERNEL_DEBUG(DBG_LAYER_BEG, + ((inp->inp_fport << 16) | inp->inp_lport), + (((inp->inp_laddr.s_addr & 0xffff) << 16) | + (inp->inp_faddr.s_addr & 0xffff)), 0, 0, 0); + } /* * See if we should do MTU discovery. @@ -2437,35 +2566,36 @@ timer: * 4) the MTU is not locked (if it is, then discovery has been * disabled for that route) */ -#if INET6 - if (!isipv6) -#endif /* INET6 */ - if (path_mtu_discovery && (tp->t_flags & TF_PMTUD)) + if (!isipv6) { + if (path_mtu_discovery && (tp->t_flags & TF_PMTUD)) { ip->ip_off |= IP_DF; + } + } #if NECP { necp_kernel_policy_id policy_id; necp_kernel_policy_id skip_policy_id; u_int32_t route_rule_id; - if (!necp_socket_is_allowed_to_send_recv(inp, NULL, &policy_id, &route_rule_id, &skip_policy_id)) { + u_int32_t pass_flags; + if (!necp_socket_is_allowed_to_send_recv(inp, NULL, 0, &policy_id, &route_rule_id, &skip_policy_id, &pass_flags)) { TCP_LOG_DROP_NECP(isipv6 ? (void *)ip6 : (void *)ip, th, tp, true); m_freem(m); error = EHOSTUNREACH; goto out; } - necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id); + necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id, pass_flags); if (net_qos_policy_restricted != 0) { - necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, - NULL, route_rule_id); + necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, route_rule_id); } } #endif /* NECP */ #if IPSEC - if (inp->inp_sp != NULL) + if (inp->inp_sp != NULL) { ipsec_setsocket(m, so); + } #endif /*IPSEC*/ /* @@ -2482,10 +2612,11 @@ timer: m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC | PKTF_FLOW_ADV); m->m_pkthdr.pkt_proto = IPPROTO_TCP; m->m_pkthdr.tx_tcp_pid = so->last_pid; - if (so->so_flags & SOF_DELEGATED) + if (so->so_flags & SOF_DELEGATED) { m->m_pkthdr.tx_tcp_e_pid = so->e_pid; - else + } else { m->m_pkthdr.tx_tcp_e_pid = 0; + } m->m_nextpkt = NULL; @@ -2498,15 +2629,15 @@ timer: * 3. Only ACK flag is set. * 4. there is no outstanding data on this connection. */ - if (tcp_prioritize_acks != 0 && len == 0 && - (inp->inp_last_outifp->if_eflags & - (IFEF_TXSTART | IFEF_NOACKPRI)) == IFEF_TXSTART) { + if (len == 0 && (inp->inp_last_outifp->if_eflags & (IFEF_TXSTART | IFEF_NOACKPRI)) == IFEF_TXSTART) { if (th->th_flags == TH_ACK && tp->snd_una == tp->snd_max && - tp->t_timer[TCPT_REXMT] == 0) + tp->t_timer[TCPT_REXMT] == 0) { svc_flags |= PKT_SCF_TCP_ACK; - if (th->th_flags & TH_SYN) + } + if (th->th_flags & TH_SYN) { svc_flags |= PKT_SCF_TCP_SYN; + } } set_packet_service_class(m, so, sotc, svc_flags); } else { @@ -2524,16 +2655,13 @@ timer: tp->t_pktlist_sentlen += len; tp->t_lastchain++; -#if INET6 if (isipv6) { DTRACE_TCP5(send, struct mbuf *, m, struct inpcb *, inp, - struct ip6 *, ip6, struct tcpcb *, tp, struct tcphdr *, - th); - } else -#endif /* INET6 */ - { + struct ip6 *, ip6, struct tcpcb *, tp, struct tcphdr *, + th); + } else { DTRACE_TCP5(send, struct mbuf *, m, struct inpcb *, inp, - struct ip *, ip, struct tcpcb *, tp, struct tcphdr *, th); + struct ip *, ip, struct tcpcb *, tp, struct tcphdr *, th); } if (tp->t_pktlist_head != NULL) { @@ -2544,30 +2672,6 @@ timer: tp->t_pktlist_head = tp->t_pktlist_tail = m; } - if (lro_ackmore && !sackoptlen && tp->t_timer[TCPT_PERSIST] == 0 && - (th->th_flags & TH_ACK) == TH_ACK && len == 0 && - tp->t_state == TCPS_ESTABLISHED) { - /* For a pure ACK, see if you need to send more of them */ - mnext = tcp_send_lroacks(tp, m, th); - if (mnext) { - tp->t_pktlist_tail->m_nextpkt = mnext; - if (mnext->m_nextpkt == NULL) { - tp->t_pktlist_tail = mnext; - tp->t_lastchain++; - } else { - struct mbuf *tail, *next; - next = mnext->m_nextpkt; - tail = next->m_nextpkt; - while (tail) { - next = tail; - tail = tail->m_nextpkt; - tp->t_lastchain++; - } - tp->t_pktlist_tail = next; - } - } - } - if (sendalot == 0 || (tp->t_state != TCPS_ESTABLISHED) || (tp->snd_cwnd <= (tp->snd_wnd / 8)) || (tp->t_flags & TF_ACKNOW) || @@ -2575,7 +2679,7 @@ timer: tp->t_lastchain >= tcp_packet_chaining) { error = 0; while (inp->inp_sndinprog_cnt == 0 && - tp->t_pktlist_head != NULL) { + tp->t_pktlist_head != NULL) { packetlist = tp->t_pktlist_head; packchain_listadd = tp->t_lastchain; packchain_sent++; @@ -2601,7 +2705,7 @@ timer: } /* tcp was closed while we were in ip; resume close */ if (inp->inp_sndinprog_cnt == 0 && - (tp->t_flags & TF_CLOSING)) { + (tp->t_flags & TF_CLOSING)) { tp->t_flags &= ~TF_CLOSING; (void) tcp_close(tp); return 0; @@ -2632,23 +2736,36 @@ timer: if (SEQ_GT((p->rxmit - lost), tp->snd_una)) { p->rxmit -= lost; + + if (SEQ_LT(p->rxmit, p->start)) { + p->rxmit = p->start; + } } else { lost = p->rxmit - tp->snd_una; p->rxmit = tp->snd_una; + + if (SEQ_LT(p->rxmit, p->start)) { + p->rxmit = p->start; + } } tp->sackhint.sack_bytes_rexmit -= lost; + if (tp->sackhint.sack_bytes_rexmit < 0) { + tp->sackhint.sack_bytes_rexmit = 0; + } } else { if (SEQ_GT((tp->snd_nxt - lost), - tp->snd_una)) + tp->snd_una)) { tp->snd_nxt -= lost; - else + } else { tp->snd_nxt = tp->snd_una; + } } } } out: - if (tp->t_pktlist_head != NULL) + if (tp->t_pktlist_head != NULL) { m_freem_list(tp->t_pktlist_head); + } TCP_PKTLIST_CLEAR(tp); if (error == ENOBUFS) { @@ -2660,13 +2777,14 @@ out: if (tp->t_timer[TCPT_REXMT] == 0 && tp->t_timer[TCPT_PERSIST] == 0 && (len != 0 || (flags & (TH_SYN | TH_FIN)) != 0 || - so->so_snd.sb_cc > 0)) + so->so_snd.sb_cc > 0)) { tp->t_timer[TCPT_REXMT] = - OFFSET_FROM_START(tp, tp->t_rxtcur); + OFFSET_FROM_START(tp, tp->t_rxtcur); + } tp->snd_cwnd = tp->t_maxseg; tp->t_bytes_acked = 0; tcp_check_timer_state(tp); - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); tcp_ccdbg_trace(tp, NULL, TCP_CC_OUTPUT_ERROR); return 0; @@ -2684,13 +2802,14 @@ out: * immediatly retry with MSS sized segments generated * by this function. */ - if (tso) + if (tso) { tp->t_flags &= ~TF_TSO; + } tcp_mtudisc(inp, 0); tcp_check_timer_state(tp); - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return 0; } /* @@ -2700,19 +2819,20 @@ out: if ((error == EHOSTUNREACH || error == ENETDOWN) && TCPS_HAVERCVDSYN(tp->t_state) && !inp_restricted_send(inp, inp->inp_last_outifp)) { - tp->t_softerror = error; - error = 0; + tp->t_softerror = error; + error = 0; } tcp_check_timer_state(tp); - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return error; } tcpstat.tcps_sndtotal++; - KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END,0,0,0,0,0); - if (sendalot) + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); + if (sendalot) { goto again; + } tcp_check_timer_state(tp); @@ -2731,13 +2851,13 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, struct ip_out_args ipoa; struct route ro; struct ifnet *outif = NULL; + bool check_qos_marking_again = (so->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE) ? FALSE : TRUE; bzero(&ipoa, sizeof(ipoa)); ipoa.ipoa_boundif = IFSCOPE_NONE; ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR; ipoa.ipoa_sotc = SO_TC_UNSPEC; ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC; -#if INET6 struct ip6_out_args ip6oa; struct route_in6 ro6; @@ -2749,93 +2869,84 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, struct flowadv *adv = (isipv6 ? &ip6oa.ip6oa_flowadv : &ipoa.ipoa_flowadv); -#else /* INET6 */ - struct flowadv *adv = &ipoa.ipoa_flowadv; -#endif /* !INET6 */ /* If socket was bound to an ifindex, tell ip_output about it */ if (inp->inp_flags & INP_BOUND_IF) { -#if INET6 if (isipv6) { ip6oa.ip6oa_boundif = inp->inp_boundifp->if_index; ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; - } else -#endif /* INET6 */ - { + } else { ipoa.ipoa_boundif = inp->inp_boundifp->if_index; ipoa.ipoa_flags |= IPOAF_BOUND_IF; } } if (INP_NO_CELLULAR(inp)) { -#if INET6 - if (isipv6) + if (isipv6) { ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR; - else -#endif /* INET6 */ + } else { ipoa.ipoa_flags |= IPOAF_NO_CELLULAR; + } } if (INP_NO_EXPENSIVE(inp)) { -#if INET6 - if (isipv6) + if (isipv6) { ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE; - else -#endif /* INET6 */ + } else { ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE; - + } } if (INP_NO_CONSTRAINED(inp)) { -#if INET6 - if (isipv6) + if (isipv6) { ip6oa.ip6oa_flags |= IP6OAF_NO_CONSTRAINED; - else -#endif /* INET6 */ + } else { ipoa.ipoa_flags |= IPOAF_NO_CONSTRAINED; + } } if (INP_AWDL_UNRESTRICTED(inp)) { -#if INET6 - if (isipv6) + if (isipv6) { ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED; - else -#endif /* INET6 */ + } else { ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED; - + } } -#if INET6 if (INP_INTCOPROC_ALLOWED(inp) && isipv6) { ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED; } if (isipv6) { ip6oa.ip6oa_sotc = so->so_traffic_class; ip6oa.ip6oa_netsvctype = so->so_netsvctype; - } else -#endif /* INET6 */ - { + ip6oa.qos_marking_gencount = inp->inp_policyresult.results.qos_marking_gencount; + } else { ipoa.ipoa_sotc = so->so_traffic_class; ipoa.ipoa_netsvctype = so->so_netsvctype; + ipoa.qos_marking_gencount = inp->inp_policyresult.results.qos_marking_gencount; } if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { -#if INET6 - if (isipv6) + if (isipv6) { ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED; - else -#endif /* INET6 */ + } else { ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; + } + } + if (check_qos_marking_again) { + if (isipv6) { + ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY; + } else { + ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY; + } } -#if INET6 - if (isipv6) + if (isipv6) { flags |= IPV6_OUTARGS; - else -#endif /* INET6 */ + } else { flags |= IP_OUTARGS; + } /* Copy the cached route and take an extra reference */ -#if INET6 - if (isipv6) + if (isipv6) { in6p_route_copyout(inp, &ro6); - else -#endif /* INET6 */ + } else { inp_route_copyout(inp, &ro); + } /* * Make sure ACK/DELACK conditions are cleared before @@ -2861,7 +2972,6 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, if (tcp_output_unlocked && !so->so_upcallusecount && (tp->t_state == TCPS_ESTABLISHED) && (sack_in_progress == 0) && !IN_FASTRECOVERY(tp) && !(so->so_flags & SOF_MP_SUBFLOW)) { - unlocked = TRUE; socket_unlock(so, 0); } @@ -2875,12 +2985,9 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, chain = tcp_packet_chaining > 1 #if IPSEC - && ipsec_bypass + && ipsec_bypass #endif -#if IPFIREWALL - && (fw_enable == 0 || fw_bypass) -#endif - ; // I'm important, not extraneous + ; // I'm important, not extraneous while (pkt != NULL) { struct mbuf *npkt = pkt->m_nextpkt; @@ -2900,14 +3007,12 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, */ cnt = 0; } -#if INET6 if (isipv6) { error = ip6_output_list(pkt, cnt, inp->in6p_outputopts, &ro6, flags, NULL, NULL, &ip6oa); ifdenied = (ip6oa.ip6oa_retflags & IP6OARF_IFDENIED); } else { -#endif /* INET6 */ error = ip_output_list(pkt, cnt, opt, &ro, flags, NULL, &ipoa); ifdenied = (ipoa.ipoa_retflags & IPOARF_IFDENIED); @@ -2919,15 +3024,17 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, * the callee had taken care of everything; else * we need to free the rest of the chain ourselves. */ - if (!chain) + if (!chain) { m_freem_list(npkt); + } break; } pkt = npkt; } - if (unlocked) + if (unlocked) { socket_lock(so, 0); + } /* * Enter flow controlled state if the connection is established @@ -2945,10 +3052,11 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, int rc; rc = inp_set_fc_state(inp, adv->code); - if (rc == 1) + if (rc == 1) { tcp_ccdbg_trace(tp, NULL, ((adv->code == FADV_FLOW_CONTROLLED) ? TCP_CC_FLOW_CONTROL : TCP_CC_SUSPEND)); + } } /* @@ -2956,18 +3064,18 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, * packets are dropped. Return ENOBUFS, to update the * pcb state. */ - if (adv->code == FADV_SUSPENDED) + if (adv->code == FADV_SUSPENDED) { error = ENOBUFS; + } VERIFY(inp->inp_sndinprog_cnt > 0); - if ( --inp->inp_sndinprog_cnt == 0) { + if (--inp->inp_sndinprog_cnt == 0) { inp->inp_flags &= ~(INP_FC_FEEDBACK); if (inp->inp_sndingprog_waiters > 0) { wakeup(&inp->inp_sndinprog_cnt); } } -#if INET6 if (isipv6) { /* * When an NECP IP tunnel policy forces the outbound interface, @@ -2979,10 +3087,28 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, } else if (ro6.ro_rt != NULL) { outif = ro6.ro_rt->rt_ifp; } - } else -#endif /* INET6 */ - if (ro.ro_rt != NULL) + } else { + if (ro.ro_rt != NULL) { outif = ro.ro_rt->rt_ifp; + } + } + if (check_qos_marking_again) { + uint32_t qos_marking_gencount; + bool allow_qos_marking; + if (isipv6) { + qos_marking_gencount = ip6oa.qos_marking_gencount; + allow_qos_marking = ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED ? TRUE : FALSE; + } else { + qos_marking_gencount = ipoa.qos_marking_gencount; + allow_qos_marking = ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED ? TRUE : FALSE; + } + inp->inp_policyresult.results.qos_marking_gencount = qos_marking_gencount; + if (allow_qos_marking == TRUE) { + inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED; + } else { + inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED; + } + } if (outif != NULL && outif != inp->inp_last_outifp) { /* Update the send byte count */ @@ -2992,24 +3118,23 @@ tcp_ip_output(struct socket *so, struct tcpcb *tp, struct mbuf *pkt, so->so_snd.sb_flags &= ~SB_SNDBYTE_CNT; } inp->inp_last_outifp = outif; - } if (error != 0 && ifdenied && - (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp) || INP_NO_CONSTRAINED(inp))) + (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp) || INP_NO_CONSTRAINED(inp))) { soevent(so, - (SO_FILT_HINT_LOCKED|SO_FILT_HINT_IFDENIED)); + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED)); + } /* Synchronize cached PCB route & options */ -#if INET6 - if (isipv6) + if (isipv6) { in6p_route_copyin(inp, &ro6); - else -#endif /* INET6 */ + } else { inp_route_copyin(inp, &ro); + } if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift == 0 && - tp->t_inpcb->inp_route.ro_rt != NULL) { + tp->t_inpcb->inp_route.ro_rt != NULL) { /* If we found the route and there is an rtt on it * reset the retransmit timer */ @@ -3046,111 +3171,13 @@ tcp_setpersist(struct tcpcb *tp) tcptv_persmin_val, TCPTV_PERSMAX, 0); tp->t_timer[TCPT_PERSIST] = OFFSET_FROM_START(tp, tp->t_timer[TCPT_PERSIST]); - if (tp->t_rxtshift < TCP_MAXRXTSHIFT) + if (tp->t_rxtshift < TCP_MAXRXTSHIFT) { tp->t_rxtshift++; -} - -/* - * Send as many acks as data coalesced. Every other packet when stretch - * ACK is not enabled. Every 8 packets, if stretch ACK is enabled. - */ -static struct mbuf* -tcp_send_lroacks(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th) -{ - struct mbuf *mnext = NULL, *ack_chain = NULL, *tail = NULL; - int count = 0; - tcp_seq org_ack = ntohl(th->th_ack); - tcp_seq prev_ack = 0; - int tack_offset = 28; /* IPv6 and IP options not supported */ - int twin_offset = 34; /* IPv6 and IP options not supported */ - int ack_size = (tp->t_flags & TF_STRETCHACK) ? - (maxseg_unacked * tp->t_maxseg) : (tp->t_maxseg << 1); - int segs_acked = (tp->t_flags & TF_STRETCHACK) ? maxseg_unacked : 2; - struct mbuf *prev_ack_pkt = NULL; - struct socket *so = tp->t_inpcb->inp_socket; - unsigned short winsz = ntohs(th->th_win); - unsigned int scaled_win = winsz<rcv_scale; - tcp_seq win_rtedge = org_ack + scaled_win; - - count = tp->t_lropktlen/tp->t_maxseg; - - prev_ack = (org_ack - tp->t_lropktlen) + ack_size; - if (prev_ack < org_ack) { - ack_chain = m_dup(m, M_DONTWAIT); - if (ack_chain) { - th->th_ack = htonl(prev_ack); - /* Keep adv window constant for duplicated ACK packets */ - scaled_win = win_rtedge - prev_ack; - if (scaled_win > (int32_t)(TCP_MAXWIN << tp->rcv_scale)) - scaled_win = (int32_t)(TCP_MAXWIN << tp->rcv_scale); - th->th_win = htons(scaled_win>>tp->rcv_scale); - if (lrodebug == 5) { - printf("%s: win = %d winsz = %d sc = %d" - " lro_len %d %d\n", - __func__, scaled_win>>tp->rcv_scale, winsz, - tp->rcv_scale, tp->t_lropktlen, count); - } - tail = ack_chain; - count -= segs_acked; /* accounts for prev_ack packet */ - count = (count <= segs_acked) ? 0 : count - segs_acked; - tcpstat.tcps_sndacks++; - so_tc_update_stats(m, so, m_get_service_class(m)); - } else { - return NULL; - } - } - else { - tp->t_lropktlen = 0; - return NULL; } - - prev_ack_pkt = ack_chain; - - while (count > 0) { - if ((prev_ack + ack_size) < org_ack) { - prev_ack += ack_size; - } else { - /* - * The last ACK sent must have the ACK number that TCP - * thinks is the last sent ACK number. - */ - prev_ack = org_ack; - } - mnext = m_dup(prev_ack_pkt, M_DONTWAIT); - if (mnext) { - /* Keep adv window constant for duplicated ACK packets */ - scaled_win = win_rtedge - prev_ack; - if (scaled_win > (int32_t)(TCP_MAXWIN << tp->rcv_scale)) - scaled_win = (int32_t)(TCP_MAXWIN << tp->rcv_scale); - winsz = htons(scaled_win>>tp->rcv_scale); - if (lrodebug == 5) { - printf("%s: winsz = %d ack %x count %d\n", - __func__, scaled_win>>tp->rcv_scale, - prev_ack, count); - } - bcopy(&winsz, mtod(prev_ack_pkt, caddr_t) + twin_offset, 2); - HTONL(prev_ack); - bcopy(&prev_ack, mtod(prev_ack_pkt, caddr_t) + tack_offset, 4); - NTOHL(prev_ack); - tail->m_nextpkt = mnext; - tail = mnext; - count -= segs_acked; - tcpstat.tcps_sndacks++; - so_tc_update_stats(m, so, m_get_service_class(m)); - } else { - if (lrodebug == 5) { - printf("%s: failed to alloc mbuf.\n", __func__); - } - break; - } - prev_ack_pkt = mnext; - } - tp->t_lropktlen = 0; - return ack_chain; } static int -tcp_recv_throttle (struct tcpcb *tp) +tcp_recv_throttle(struct tcpcb *tp) { uint32_t base_rtt, newsize; struct sockbuf *sbrcv = &tp->t_inpcb->inp_socket->so_rcv; @@ -3166,8 +3193,9 @@ tcp_recv_throttle (struct tcpcb *tp) * If the connection is already being throttled, leave it * in that state until rtt comes closer to base rtt */ - if (tp->t_flagsext & TF_RECV_THROTTLE) + if (tp->t_flagsext & TF_RECV_THROTTLE) { return 1; + } base_rtt = get_base_rtt(tp); @@ -3178,8 +3206,9 @@ tcp_recv_throttle (struct tcpcb *tp) */ if (tp->t_rttcur > (base_rtt + target_qdelay)) { tp->t_flagsext |= TF_RECV_THROTTLE; - if (tp->t_recv_throttle_ts == 0) + if (tp->t_recv_throttle_ts == 0) { tp->t_recv_throttle_ts = tcp_now; + } /* * Reduce the recv socket buffer size to * minimize latecy. @@ -3204,8 +3233,9 @@ tcp_recv_throttle (struct tcpcb *tp) * Timestamps are not supported or there is no good RTT * measurement. Use IPDV in this case. */ - if (tp->acc_iaj > tcp_acc_iaj_react_limit) + if (tp->acc_iaj > tcp_acc_iaj_react_limit) { return 1; + } return 0; } diff --git a/bsd/netinet/tcp_sack.c b/bsd/netinet/tcp_sack.c index a7c8e38d2..731b6b275 100644 --- a/bsd/netinet/tcp_sack.c +++ b/bsd/netinet/tcp_sack.c @@ -33,17 +33,17 @@ * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. + * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: + * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. + * may be used to endorse or promote products derived from this software + * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE @@ -81,13 +81,10 @@ #include #include #include -#if INET6 #include #include #include -#endif #include -//#define TCPOUTFLAGS #include #include #include @@ -120,19 +117,6 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack_globalholes, CTLFLAG_RD | CTLFLAG_LOCKE &tcp_sack_globalholes, 0, "Global number of TCP SACK holes currently allocated"); -static int tcp_detect_reordering = 1; -static int tcp_dsack_ignore_hw_duplicates = 0; - -#if (DEVELOPMENT || DEBUG) -SYSCTL_INT(_net_inet_tcp, OID_AUTO, detect_reordering, - CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_detect_reordering, 0, ""); - -SYSCTL_INT(_net_inet_tcp, OID_AUTO, ignore_hw_duplicates, - CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_dsack_ignore_hw_duplicates, 0, ""); -#endif /* (DEVELOPMENT || DEBUG) */ - extern struct zone *sack_hole_zone; #define TCP_VALIDATE_SACK_SEQ_NUMBERS(_tp_, _sb_, _ack_) \ @@ -221,8 +205,7 @@ tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end) /* * Copy the saved SACK blocks back. */ - bcopy(saved_blks, &tp->sackblks[num_head], - sizeof(struct sackblk) * num_saved); + bcopy(saved_blks, &tp->sackblks[num_head], sizeof(struct sackblk) * num_saved); } /* Save the number of SACK blocks. */ @@ -235,6 +218,9 @@ tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end) if ((tp->t_flags & TF_STRETCHACK) != 0 && tp->rcv_numsacks > 0) { tcp_reset_stretch_ack(tp); } + if (tp->rcv_numsacks > 0) { + tp->t_forced_acks = TCP_FORCED_ACKS_COUNT; + } #if TRAFFIC_MGT if (tp->acc_iaj > 0 && tp->rcv_numsacks > 0) { @@ -372,15 +358,21 @@ tcp_sack_detect_reordering(struct tcpcb *tp, struct sackhole *s, /* * Detect reordering from SACK information by checking * if recently sacked data was never retransmitted from this hole. + * + * First, we look for the byte in the list of retransmitted segments. This one + * will contain even the segments that are retransmitted thanks to RTO/TLP. + * + * Then, we check the sackhole which indicates whether or not the sackhole + * was subject to retransmission. */ - if (SEQ_LT(s->rxmit, sacked_seq)) { + if (SEQ_LT(s->rxmit, sacked_seq) && + (!tcp_do_better_lr || tcp_rxtseg_find(tp, sacked_seq - 1, sacked_seq - 1) == NULL)) { reordered = 1; tcpstat.tcps_avoid_rxmt++; } if (reordered) { - if (tcp_detect_reordering == 1 && - !(tp->t_flagsext & TF_PKTS_REORDERED)) { + if (!(tp->t_flagsext & TF_PKTS_REORDERED)) { tp->t_flagsext |= TF_PKTS_REORDERED; tcpstat.tcps_detect_reordering++; } @@ -426,6 +418,16 @@ tcp_sack_detect_reordering(struct tcpcb *tp, struct sackhole *s, } } +static void +tcp_sack_update_byte_counter(struct tcpcb *tp, uint32_t start, uint32_t end, + uint32_t *newbytes_acked, uint32_t *towards_fr_acked) +{ + *newbytes_acked += (end - start); + if (SEQ_GEQ(start, tp->send_highest_sack)) { + *towards_fr_acked += (end - start); + } +} + /* * Process cumulative ACK and the TCP SACK option to update the scoreboard. * tp->snd_holes is an ordered list of holes (oldest to newest, in terms of @@ -433,7 +435,7 @@ tcp_sack_detect_reordering(struct tcpcb *tp, struct sackhole *s, */ void tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, - u_int32_t *newbytes_acked) + u_int32_t *newbytes_acked, uint32_t *after_rexmit_acked) { struct sackhole *cur, *temp; struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1], *sblkp; @@ -494,7 +496,6 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, * the logic that adds holes to the tail of the scoreboard). */ tp->snd_fack = SEQ_MAX(tp->snd_una, th_ack); - *newbytes_acked += (tp->snd_fack - tp->snd_una); } old_snd_fack = tp->snd_fack; @@ -521,7 +522,7 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, temp = tcp_sackhole_insert(tp, tp->snd_fack, sblkp->start, NULL); if (temp != NULL) { tp->snd_fack = sblkp->end; - *newbytes_acked += (sblkp->end - sblkp->start); + tcp_sack_update_byte_counter(tp, sblkp->start, sblkp->end, newbytes_acked, after_rexmit_acked); /* Go to the previous sack block. */ sblkp--; @@ -540,13 +541,13 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, } if (sblkp >= sack_blocks && SEQ_LT(tp->snd_fack, sblkp->end)) { - *newbytes_acked += (sblkp->end - tp->snd_fack); + tcp_sack_update_byte_counter(tp, tp->snd_fack, sblkp->end, newbytes_acked, after_rexmit_acked); tp->snd_fack = sblkp->end; } } } else if (SEQ_LT(tp->snd_fack, sblkp->end)) { /* fack is advanced. */ - *newbytes_acked += (sblkp->end - tp->snd_fack); + tcp_sack_update_byte_counter(tp, tp->snd_fack, sblkp->end, newbytes_acked, after_rexmit_acked); tp->snd_fack = sblkp->end; } /* We must have at least one SACK hole in scoreboard */ @@ -573,11 +574,15 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, continue; } tp->sackhint.sack_bytes_rexmit -= (cur->rxmit - cur->start); + if (tp->sackhint.sack_bytes_rexmit < 0) { + tp->sackhint.sack_bytes_rexmit = 0; + } + if (SEQ_LEQ(sblkp->start, cur->start)) { /* Data acks at least the beginning of hole */ if (SEQ_GEQ(sblkp->end, cur->end)) { /* Acks entire hole, so delete hole */ - *newbytes_acked += (cur->end - cur->start); + tcp_sack_update_byte_counter(tp, cur->start, cur->end, newbytes_acked, after_rexmit_acked); tcp_sack_detect_reordering(tp, cur, cur->end, old_snd_fack); @@ -591,7 +596,7 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, continue; } else { /* Move start of hole forward */ - *newbytes_acked += (sblkp->end - cur->start); + tcp_sack_update_byte_counter(tp, cur->start, sblkp->end, newbytes_acked, after_rexmit_acked); tcp_sack_detect_reordering(tp, cur, sblkp->end, old_snd_fack); cur->start = sblkp->end; @@ -601,7 +606,7 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, /* Data acks at least the end of hole */ if (SEQ_GEQ(sblkp->end, cur->end)) { /* Move end of hole backward */ - *newbytes_acked += (cur->end - sblkp->start); + tcp_sack_update_byte_counter(tp, sblkp->start, cur->end, newbytes_acked, after_rexmit_acked); tcp_sack_detect_reordering(tp, cur, cur->end, old_snd_fack); cur->end = sblkp->start; @@ -611,12 +616,12 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, * ACKs some data in the middle of a hole; * need to split current hole */ - *newbytes_acked += (sblkp->end - sblkp->start); tcp_sack_detect_reordering(tp, cur, sblkp->end, old_snd_fack); temp = tcp_sackhole_insert(tp, sblkp->end, cur->end, cur); if (temp != NULL) { + tcp_sack_update_byte_counter(tp, sblkp->start, sblkp->end, newbytes_acked, after_rexmit_acked); if (SEQ_GT(cur->rxmit, temp->rxmit)) { temp->rxmit = cur->rxmit; tp->sackhint.sack_bytes_rexmit @@ -662,6 +667,8 @@ tcp_free_sackholes(struct tcpcb *tp) tcp_sackhole_remove(tp, q); } tp->sackhint.sack_bytes_rexmit = 0; + tp->sackhint.sack_bytes_acked = 0; + tp->t_new_dupacks = 0; tp->sackhint.nexthole = NULL; tp->sack_newdata = 0; } @@ -687,9 +694,13 @@ tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th) if (((BYTES_ACKED(th, tp)) / tp->t_maxseg) > 2) { num_segs = 2; } - tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit + - (tp->snd_nxt - tp->sack_newdata) + - num_segs * tp->t_maxseg); + if (tcp_do_better_lr) { + tp->snd_cwnd = tcp_flight_size(tp) + num_segs * tp->t_maxseg; + } else { + tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit + + (tp->snd_nxt - tp->sack_newdata) + + num_segs * tp->t_maxseg); + } if (tp->snd_cwnd > tp->snd_ssthresh) { tp->snd_cwnd = tp->snd_ssthresh; } @@ -783,6 +794,23 @@ out: return hole; } +void +tcp_sack_lost_rexmit(struct tcpcb *tp) +{ + struct sackhole *hole = TAILQ_FIRST(&tp->snd_holes); + + while (hole) { + hole->rxmit = hole->start; + hole->rxmit_start = tcp_now; + + hole = TAILQ_NEXT(hole, scblink); + } + + tp->sackhint.nexthole = TAILQ_FIRST(&tp->snd_holes); + tp->sackhint.sack_bytes_rexmit = 0; + tp->sack_newdata = tp->snd_nxt; +} + /* * After a timeout, the SACK list may be rebuilt. This SACK information * should be used to avoid retransmitting SACKed data. This function @@ -924,11 +952,6 @@ tcp_sack_process_dsack(struct tcpcb *tp, struct tcpopt *to, tcpstat.tcps_dsack_recvd++; tp->t_dsack_recvd++; - /* ignore DSACK option, if DSACK is disabled */ - if (tp->t_flagsext & TF_DISABLE_DSACK) { - return TRUE; - } - /* If the DSACK is for TLP mark it as such */ if ((tp->t_flagsext & TF_SENT_TLPROBE) && first_sack.end == tp->t_tlphighrxt) { @@ -955,27 +978,8 @@ tcp_sack_process_dsack(struct tcpcb *tp, struct tcpopt *to, tcpstat.tcps_dsack_ackloss++; return TRUE; - } else if ((rxseg = tcp_rxtseg_find(tp, first_sack.start, - (first_sack.end - 1))) == NULL) { - /* - * Duplicate notification was not triggered by a - * retransmission. This might be due to network duplication, - * disable further DSACK processing. - */ - if (!tcp_dsack_ignore_hw_duplicates) { - tp->t_flagsext |= TF_DISABLE_DSACK; - tcpstat.tcps_dsack_disable++; - } } else { - /* - * If the segment was retransmitted only once, mark it as - * spurious. Otherwise ignore the duplicate notification. - */ - if (rxseg->rx_count == 1) { - rxseg->rx_flags |= TCP_RXT_SPURIOUS; - } else { - rxseg->rx_flags &= ~TCP_RXT_SPURIOUS; - } + tcp_rxtseg_set_spurious(tp, first_sack.start, (first_sack.end - 1)); } return TRUE; } diff --git a/bsd/netinet/tcp_seq.h b/bsd/netinet/tcp_seq.h index 963912783..11c1a6110 100644 --- a/bsd/netinet/tcp_seq.h +++ b/bsd/netinet/tcp_seq.h @@ -107,7 +107,7 @@ #define tcp_sendseqinit(tp) \ (tp)->snd_una = (tp)->snd_nxt = (tp)->snd_max = (tp)->snd_up = \ - (tp)->snd_recover = (tp)->iss + (tp)->snd_recover = (tp)->send_highest_sack = (tp)->iss #define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * TCP_RETRANSHZ) /* timestamp wrap-around time */ diff --git a/bsd/netinet/tcp_subr.c b/bsd/netinet/tcp_subr.c index 20c8a7b61..fe3a0192a 100644 --- a/bsd/netinet/tcp_subr.c +++ b/bsd/netinet/tcp_subr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -68,7 +68,6 @@ #include #include -#include #include #include #include @@ -99,20 +98,14 @@ #include #include #include -#if INET6 #include #include -#endif #include -#if INET6 #include -#endif #include #include #include -#if INET6 #include -#endif #include #include #include @@ -123,9 +116,7 @@ #include #include -#if INET6 #include -#endif #include #if TCPDEBUG #include @@ -136,9 +127,7 @@ #if IPSEC #include -#if INET6 #include -#endif #endif /* IPSEC */ #if NECP @@ -147,10 +136,6 @@ #undef tcp_minmssoverload -#if CONFIG_MACF_NET -#include -#endif /* MAC_NET */ - #include #include #include @@ -159,12 +144,9 @@ #include #include -#include - #define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2)) static tcp_cc tcp_ccgen; -extern int tcp_lq_overflow; extern struct tcptimerlist tcp_timer_list; extern struct tcptailq tcp_tw_tailq; @@ -172,11 +154,9 @@ extern struct tcptailq tcp_tw_tailq; SYSCTL_SKMEM_TCP_INT(TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_mssdflt, TCP_MSS, "Default TCP Maximum Segment Size"); -#if INET6 SYSCTL_SKMEM_TCP_INT(TCPCTL_V6MSSDFLT, v6mssdflt, CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_v6mssdflt, TCP6_MSS, "Default TCP Maximum Segment Size for IPv6"); -#endif int tcp_sysctl_fastopenkey(struct sysctl_oid *, void *, int, struct sysctl_req *); @@ -211,22 +191,6 @@ SYSCTL_SKMEM_TCP_INT(OID_AUTO, microuptime_init, CTLFLAG_RD | CTLFLAG_LOCKED, */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, minmss, CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_minmss, TCP_MINMSS, "Minmum TCP Maximum Segment Size"); -int tcp_do_rfc1323 = 1; -#if (DEVELOPMENT || DEBUG) -SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, - CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_rfc1323, 0, - "Enable rfc1323 (high performance TCP) extensions"); -#endif /* (DEVELOPMENT || DEBUG) */ - -// Not used -static int tcp_do_rfc1644 = 0; -SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, - CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_rfc1644, 0, - "Enable rfc1644 (TTCP) extensions"); - -SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_tcpdrain, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, do_tcpdrain, 0, - "Enable tcp_drain routine for extra help when low on mbufs"); SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED, &tcbinfo.ipi_count, 0, "Number of active PCBs"); @@ -272,7 +236,10 @@ SYSCTL_SKMEM_TCP_INT(OID_AUTO, init_rtt_from_cache, "Initalize RTT from route cache"); #endif /* (DEVELOPMENT || DEBUG) */ -static void tcp_cleartaocache(void); +static int tso_debug = 0; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, tso_debug, CTLFLAG_RW | CTLFLAG_LOCKED, + &tso_debug, 0, "TSO verbosity"); + static void tcp_notify(struct inpcb *, int); struct zone *sack_hole_zone; @@ -288,8 +255,6 @@ static void tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb); #define TCP_BWMEAS_BURST_MINSIZE 6 #define TCP_BWMEAS_BURST_MAXSIZE 25 -static uint32_t bwmeas_elm_size; - /* * Target size of TCP PCB hash tables. Must be a power of two. * @@ -341,21 +306,18 @@ void tcp_tfo_gen_cookie(struct inpcb *inp, u_char *out, size_t blk_size) { u_char in[CCAES_BLOCK_SIZE]; -#if INET6 int isipv6 = inp->inp_vflag & INP_IPV6; -#endif VERIFY(blk_size == CCAES_BLOCK_SIZE); bzero(&in[0], CCAES_BLOCK_SIZE); bzero(&out[0], CCAES_BLOCK_SIZE); -#if INET6 if (isipv6) { memcpy(in, &inp->in6p_faddr, sizeof(struct in6_addr)); - } else -#endif /* INET6 */ - memcpy(in, &inp->inp_faddr, sizeof(struct in_addr)); + } else { + memcpy(in, &inp->inp_faddr, sizeof(struct in_addr)); + } aes_encrypt_cbc(in, NULL, 1, out, &tfo_ctx); } @@ -489,8 +451,6 @@ tcp_init(struct protosw *pp, struct domain *dp) tcp_initialized = 1; tcp_ccgen = 1; - tcp_cleartaocache(); - tcp_keepinit = TCPTV_KEEP_INIT; tcp_keepidle = TCPTV_KEEP_IDLE; tcp_keepintvl = TCPTV_KEEPINTVL; @@ -507,7 +467,7 @@ tcp_init(struct protosw *pp, struct domain *dp) /* expose initial uptime/now via systcl for utcp to keep time sync */ tcp_now_init = tcp_now; tcp_microuptime_init = - tcp_uptime.tv_usec + (tcp_uptime.tv_sec * USEC_PER_SEC); + (uint32_t)(tcp_uptime.tv_usec + (tcp_uptime.tv_sec * USEC_PER_SEC)); SYSCTL_SKMEM_UPDATE_FIELD(tcp.microuptime_init, tcp_microuptime_init); SYSCTL_SKMEM_UPDATE_FIELD(tcp.now_init, tcp_now_init); @@ -557,60 +517,32 @@ tcp_init(struct protosw *pp, struct domain *dp) &tcbinfo.ipi_hashmask); tcbinfo.ipi_porthashbase = hashinit(tcp_tcbhashsize, M_PCB, &tcbinfo.ipi_porthashmask); - str_size = P2ROUNDUP(sizeof(struct inp_tp), sizeof(u_int64_t)); - tcbinfo.ipi_zone = zinit(str_size, 120000 * str_size, 8192, "tcpcb"); - zone_change(tcbinfo.ipi_zone, Z_CALLERACCT, FALSE); - zone_change(tcbinfo.ipi_zone, Z_EXPAND, TRUE); + str_size = (vm_size_t)P2ROUNDUP(sizeof(struct inp_tp), sizeof(u_int64_t)); + tcbinfo.ipi_zone = zone_create("tcpcb", str_size, ZC_NONE); tcbinfo.ipi_gc = tcp_gc; tcbinfo.ipi_timer = tcp_itimer; in_pcbinfo_attach(&tcbinfo); - str_size = P2ROUNDUP(sizeof(struct sackhole), sizeof(u_int64_t)); - sack_hole_zone = zinit(str_size, 120000 * str_size, 8192, - "sack_hole zone"); - zone_change(sack_hole_zone, Z_CALLERACCT, FALSE); - zone_change(sack_hole_zone, Z_EXPAND, TRUE); - - str_size = P2ROUNDUP(sizeof(struct tseg_qent), sizeof(u_int64_t)); - tcp_reass_zone = zinit(str_size, (nmbclusters >> 4) * str_size, - 0, "tcp_reass_zone"); - if (tcp_reass_zone == NULL) { - panic("%s: failed allocating tcp_reass_zone", __func__); - /* NOTREACHED */ - } - zone_change(tcp_reass_zone, Z_CALLERACCT, FALSE); - zone_change(tcp_reass_zone, Z_EXPAND, TRUE); + str_size = (vm_size_t)P2ROUNDUP(sizeof(struct sackhole), sizeof(u_int64_t)); + sack_hole_zone = zone_create("sack_hole zone", str_size, ZC_NONE); - bwmeas_elm_size = P2ROUNDUP(sizeof(struct bwmeas), sizeof(u_int64_t)); - tcp_bwmeas_zone = zinit(bwmeas_elm_size, (100 * bwmeas_elm_size), 0, - "tcp_bwmeas_zone"); - if (tcp_bwmeas_zone == NULL) { - panic("%s: failed allocating tcp_bwmeas_zone", __func__); - /* NOTREACHED */ - } - zone_change(tcp_bwmeas_zone, Z_CALLERACCT, FALSE); - zone_change(tcp_bwmeas_zone, Z_EXPAND, TRUE); + str_size = (vm_size_t)P2ROUNDUP(sizeof(struct tseg_qent), sizeof(u_int64_t)); + tcp_reass_zone = zone_create("tcp_reass_zone", str_size, ZC_NONE); + + str_size = (vm_size_t)P2ROUNDUP(sizeof(struct bwmeas), sizeof(u_int64_t)); + tcp_bwmeas_zone = zone_create("tcp_bwmeas_zone", str_size, ZC_ZFREE_CLEARMEM); - str_size = P2ROUNDUP(sizeof(struct tcp_ccstate), sizeof(u_int64_t)); - tcp_cc_zone = zinit(str_size, 20000 * str_size, 0, "tcp_cc_zone"); - zone_change(tcp_cc_zone, Z_CALLERACCT, FALSE); - zone_change(tcp_cc_zone, Z_EXPAND, TRUE); + str_size = (vm_size_t)P2ROUNDUP(sizeof(struct tcp_ccstate), sizeof(u_int64_t)); + tcp_cc_zone = zone_create("tcp_cc_zone", str_size, ZC_NONE); - str_size = P2ROUNDUP(sizeof(struct tcp_rxt_seg), sizeof(u_int64_t)); - tcp_rxt_seg_zone = zinit(str_size, 10000 * str_size, 0, - "tcp_rxt_seg_zone"); - zone_change(tcp_rxt_seg_zone, Z_CALLERACCT, FALSE); - zone_change(tcp_rxt_seg_zone, Z_EXPAND, TRUE); + str_size = (vm_size_t)P2ROUNDUP(sizeof(struct tcp_rxt_seg), sizeof(u_int64_t)); + tcp_rxt_seg_zone = zone_create("tcp_rxt_seg_zone", str_size, ZC_NONE); -#if INET6 #define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) -#else /* INET6 */ -#define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) -#endif /* INET6 */ if (max_protohdr < TCP_MINPROTOHDR) { _max_protohdr = TCP_MINPROTOHDR; - _max_protohdr = max_protohdr; /* round it up */ + _max_protohdr = (int)max_protohdr; /* round it up */ } if (max_linkhdr + max_protohdr > MCLBYTES) { panic("tcp_init"); @@ -651,9 +583,6 @@ tcp_init(struct protosw *pp, struct domain *dp) tcp_uptime_lock = lck_spin_alloc_init(tcp_uptime_mtx_grp, tcp_uptime_mtx_attr); - /* Initialize TCP LRO data structures */ - tcp_lro_init(); - /* Initialize TCP Cache */ tcp_cache_init(); @@ -670,12 +599,12 @@ tcp_init(struct protosw *pp, struct domain *dp) PE_parse_boot_argn("tcp_log", &tcp_log_enable_flags, sizeof(tcp_log_enable_flags)); /* - * If more than 60 MB of mbuf pool is available, increase the + * If more than 4GB of actual memory is available, increase the * maximum allowed receive and send socket buffer size. */ - if (nmbclusters > 30720) { - tcp_autorcvbuf_max = 2 * 1024 * 1024; - tcp_autosndbuf_max = 2 * 1024 * 1024; + if (mem_actual >= (1ULL << (GBSHIFT + 2))) { + tcp_autorcvbuf_max = 4 * 1024 * 1024; + tcp_autosndbuf_max = 4 * 1024 * 1024; SYSCTL_SKMEM_UPDATE_FIELD(tcp.autorcvbufmax, tcp_autorcvbuf_max); SYSCTL_SKMEM_UPDATE_FIELD(tcp.autosndbufmax, tcp_autosndbuf_max); @@ -693,7 +622,6 @@ tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr) struct inpcb *inp = tp->t_inpcb; struct tcphdr *tcp_hdr = (struct tcphdr *)tcp_ptr; -#if INET6 if ((inp->inp_vflag & INP_IPV6) != 0) { struct ip6_hdr *ip6; @@ -709,9 +637,7 @@ tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr) ip6->ip6_dst = inp->in6p_faddr; tcp_hdr->th_sum = in6_pseudo(&inp->in6p_laddr, &inp->in6p_faddr, htonl(sizeof(struct tcphdr) + IPPROTO_TCP)); - } else -#endif - { + } else { struct ip *ip = (struct ip *) ip_ptr; ip->ip_vhl = IP_VHL_BORING; @@ -753,7 +679,7 @@ tcp_maketemplate(struct tcpcb *tp) m = m_get(M_DONTWAIT, MT_HEADER); if (m == NULL) { - return 0; + return NULL; } m->m_len = sizeof(struct tcptemp); n = mtod(m, struct tcptemp *); @@ -778,50 +704,44 @@ tcp_maketemplate(struct tcpcb *tp) */ void tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, - tcp_seq ack, tcp_seq seq, int flags, struct tcp_respond_args *tra) + tcp_seq ack, tcp_seq seq, uint8_t flags, struct tcp_respond_args *tra) { - int tlen; + uint16_t tlen; int win = 0; struct route *ro = 0; struct route sro; struct ip *ip; struct tcphdr *nth; -#if INET6 struct route_in6 *ro6 = 0; struct route_in6 sro6; struct ip6_hdr *ip6; int isipv6; -#endif /* INET6 */ struct ifnet *outif; int sotc = SO_TC_UNSPEC; + bool check_qos_marking_again = FALSE; -#if INET6 isipv6 = IP_VHL_V(((struct ip *)ipgen)->ip_vhl) == 6; ip6 = ipgen; -#endif /* INET6 */ ip = ipgen; if (tp) { + check_qos_marking_again = tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE ? FALSE : TRUE; if (!(flags & TH_RST)) { win = tcp_sbspace(tp); if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale) { win = (int32_t)TCP_MAXWIN << tp->rcv_scale; } } -#if INET6 if (isipv6) { ro6 = &tp->t_inpcb->in6p_route; - } else -#endif /* INET6 */ - ro = &tp->t_inpcb->inp_route; + } else { + ro = &tp->t_inpcb->inp_route; + } } else { -#if INET6 if (isipv6) { ro6 = &sro6; bzero(ro6, sizeof(*ro6)); - } else -#endif /* INET6 */ - { + } else { ro = &sro; bzero(ro, sizeof(*ro)); } @@ -833,7 +753,6 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, } tlen = 0; m->m_data += max_linkhdr; -#if INET6 if (isipv6) { VERIFY((MHLEN - max_linkhdr) >= (sizeof(*ip6) + sizeof(*nth))); @@ -841,9 +760,7 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, sizeof(struct ip6_hdr)); ip6 = mtod(m, struct ip6_hdr *); nth = (struct tcphdr *)(void *)(ip6 + 1); - } else -#endif /* INET6 */ - { + } else { VERIFY((MHLEN - max_linkhdr) >= (sizeof(*ip) + sizeof(*nth))); bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); @@ -864,15 +781,12 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, /* m_len is set later */ tlen = 0; #define xchg(a, b, type) { type t; t = a; a = b; b = t; } -#if INET6 if (isipv6) { /* Expect 32-bit aligned IP on strict-align platforms */ IP6_HDR_STRICT_ALIGNMENT_CHECK(ip6); xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); nth = (struct tcphdr *)(void *)(ip6 + 1); - } else -#endif /* INET6 */ - { + } else { /* Expect 32-bit aligned IP on strict-align platforms */ IP_HDR_STRICT_ALIGNMENT_CHECK(ip); xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); @@ -890,17 +804,14 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, xchg(nth->th_dport, nth->th_sport, n_short); #undef xchg } -#if INET6 if (isipv6) { ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + tlen)); tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr); - } else -#endif - { + } else { tlen += sizeof(struct tcpiphdr); ip->ip_len = tlen; - ip->ip_ttl = ip_defttl; + ip->ip_ttl = (uint8_t)ip_defttl; } m->m_len = tlen; m->m_pkthdr.len = tlen; @@ -908,21 +819,6 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, if (tra->keep_alive) { m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE; } -#if CONFIG_MACF_NET - if (tp != NULL && tp->t_inpcb != NULL) { - /* - * Packet is associated with a socket, so allow the - * label of the response to reflect the socket label. - */ - mac_mbuf_label_associate_inpcb(tp->t_inpcb, m); - } else { - /* - * Packet is not associated with a socket, so possibly - * update the label in place. - */ - mac_netinet_tcp_reply(m); - } -#endif nth->th_seq = htonl(seq); nth->th_ack = htonl(ack); @@ -935,7 +831,6 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, nth->th_win = htons((u_short)win); } nth->th_urp = 0; -#if INET6 if (isipv6) { nth->th_sum = 0; nth->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, @@ -944,9 +839,7 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, ro6 && ro6->ro_rt ? ro6->ro_rt->rt_ifp : NULL); - } else -#endif /* INET6 */ - { + } else { nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); m->m_pkthdr.csum_flags = CSUM_TCP; @@ -959,7 +852,7 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, #endif #if NECP - necp_mark_packet_from_socket(m, tp ? tp->t_inpcb : NULL, 0, 0, 0); + necp_mark_packet_from_socket(m, tp ? tp->t_inpcb : NULL, 0, 0, 0, 0); #endif /* NECP */ #if IPSEC @@ -976,8 +869,12 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, svc_flags |= PKT_SCF_IPV6; } sotc = tp->t_inpcb->inp_socket->so_traffic_class; - set_packet_service_class(m, tp->t_inpcb->inp_socket, - sotc, svc_flags); + if ((flags & TH_RST) == 0) { + set_packet_service_class(m, tp->t_inpcb->inp_socket, + sotc, svc_flags); + } else { + m_set_service_class(m, MBUF_SC_BK_SYS); + } /* Embed flowhash and flow control flags */ m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB; @@ -986,9 +883,17 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, m->m_pkthdr.pkt_proto = IPPROTO_TCP; m->m_pkthdr.tx_tcp_pid = tp->t_inpcb->inp_socket->last_pid; m->m_pkthdr.tx_tcp_e_pid = tp->t_inpcb->inp_socket->e_pid; + + if (flags & TH_RST) { + m->m_pkthdr.comp_gencnt = tp->t_comp_gencnt; + } + } else { + if (flags & TH_RST) { + m->m_pkthdr.comp_gencnt = TCP_ACK_COMPRESSION_DUMMY; + m_set_service_class(m, MBUF_SC_BK_SYS); + } } -#if INET6 if (isipv6) { struct ip6_out_args ip6oa; bzero(&ip6oa, sizeof(ip6oa)); @@ -1020,11 +925,25 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED; } + ip6oa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount; + if (check_qos_marking_again) { + ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY; + } ip6oa.ip6oa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype; } (void) ip6_output(m, NULL, ro6, IPV6_OUTARGS, NULL, NULL, &ip6oa); + if (check_qos_marking_again) { + struct inpcb *inp = tp->t_inpcb; + inp->inp_policyresult.results.qos_marking_gencount = ip6oa.qos_marking_gencount; + if (ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED) { + inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED; + } else { + inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED; + } + } + if (tp != NULL && ro6 != NULL && ro6->ro_rt != NULL && (outif = ro6->ro_rt->rt_ifp) != tp->t_inpcb->in6p_last_outifp) { @@ -1034,9 +953,7 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, if (ro6 == &sro6) { ROUTE_RELEASE(ro6); } - } else -#endif /* INET6 */ - { + } else { struct ip_out_args ipoa; bzero(&ipoa, sizeof(ipoa)); ipoa.ipoa_boundif = tra->ifscope; @@ -1064,6 +981,10 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; } + if (!(tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE)) { + ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY; + } + ipoa.qos_marking_gencount = tp->t_inpcb->inp_policyresult.results.qos_marking_gencount; ipoa.ipoa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype; } if (ro != &sro) { @@ -1075,6 +996,15 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, */ (void) ip_output(m, NULL, &sro, IP_OUTARGS, NULL, &ipoa); + if (check_qos_marking_again) { + struct inpcb *inp = tp->t_inpcb; + inp->inp_policyresult.results.qos_marking_gencount = ipoa.qos_marking_gencount; + if (ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED) { + inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED; + } else { + inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED; + } + } if (tp != NULL && sro.ro_rt != NULL && (outif = sro.ro_rt->rt_ifp) != tp->t_inpcb->inp_last_outifp) { @@ -1101,9 +1031,7 @@ tcp_newtcpcb(struct inpcb *inp) struct inp_tp *it; struct tcpcb *tp; struct socket *so = inp->inp_socket; -#if INET6 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; -#endif /* INET6 */ calculate_tcp_clock(); @@ -1116,18 +1044,10 @@ tcp_newtcpcb(struct inpcb *inp) bzero((char *) tp, sizeof(struct tcpcb)); LIST_INIT(&tp->t_segq); - tp->t_maxseg = tp->t_maxopd = -#if INET6 - isipv6 ? tcp_v6mssdflt : -#endif /* INET6 */ - tcp_mssdflt; + tp->t_maxseg = tp->t_maxopd = isipv6 ? tcp_v6mssdflt : tcp_mssdflt; - if (tcp_do_rfc1323) { - tp->t_flags = (TF_REQ_SCALE | TF_REQ_TSTMP); - } - if (tcp_do_sack) { - tp->t_flagsext |= TF_SACK_ENABLE; - } + tp->t_flags = (TF_REQ_SCALE | TF_REQ_TSTMP); + tp->t_flagsext |= TF_SACK_ENABLE; TAILQ_INIT(&tp->snd_holes); SLIST_INIT(&tp->t_rxt_segments); @@ -1157,7 +1077,7 @@ tcp_newtcpcb(struct inpcb *inp) CC_ALGO(tp)->init(tp); } - tp->snd_cwnd = TCP_CC_CWND_INIT_BYTES; + tp->snd_cwnd = tcp_initial_cwnd(tp); tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT; tp->t_rcvtime = tcp_now; @@ -1166,7 +1086,10 @@ tcp_newtcpcb(struct inpcb *inp) tp->t_persist_timeout = tcp_max_persist_timeout; tp->t_persist_stop = 0; tp->t_flagsext |= TF_RCVUNACK_WAITSS; - tp->t_rexmtthresh = tcprexmtthresh; + tp->t_rexmtthresh = (uint8_t)tcprexmtthresh; + tp->rfbuf_ts = tcp_now; + tp->rfbuf_space = tcp_initial_cwnd(tp); + tp->t_forced_acks = TCP_FORCED_ACKS_COUNT; /* Enable bandwidth measurement on this connection */ tp->t_flagsext |= TF_MEASURESNDBW; @@ -1181,12 +1104,20 @@ tcp_newtcpcb(struct inpcb *inp) tp->t_twentry.tqe_next = NULL; tp->t_twentry.tqe_prev = NULL; + if (__probable(tcp_do_ack_compression)) { + read_frandom(&tp->t_comp_gencnt, sizeof(tp->t_comp_gencnt)); + if (tp->t_comp_gencnt <= TCP_ACK_COMPRESSION_DUMMY) { + tp->t_comp_gencnt = TCP_ACK_COMPRESSION_DUMMY + 1; + } + tp->t_comp_lastinc = tcp_now; + } + /* * IPv4 TTL initialization is necessary for an IPv6 socket as well, * because the socket may be bound to an IPv6 wildcard address, * which may match an IPv4-mapped IPv6 address. */ - inp->inp_ip_ttl = ip_defttl; + inp->inp_ip_ttl = (uint8_t)ip_defttl; inp->inp_ppcb = (caddr_t)tp; return tp; /* XXX */ } @@ -1216,7 +1147,7 @@ tcp_drop(struct tcpcb *tp, int errno) if (errno == ETIMEDOUT && tp->t_softerror) { errno = tp->t_softerror; } - so->so_error = errno; + so->so_error = (u_short)errno; TCP_LOG_CONNECTION_SUMMARY(tp); @@ -1374,14 +1305,14 @@ tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs, if (ifs->bw_sndbw_max > 0) { /* convert from bytes per ms to bits per second */ ifs->bw_sndbw_max *= 8000; - stat->lim_ul_max_bandwidth = max(stat->lim_ul_max_bandwidth, + stat->lim_ul_max_bandwidth = MAX(stat->lim_ul_max_bandwidth, ifs->bw_sndbw_max); } if (ifs->bw_rcvbw_max > 0) { /* convert from bytes per ms to bits per second */ ifs->bw_rcvbw_max *= 8000; - stat->lim_dl_max_bandwidth = max(stat->lim_dl_max_bandwidth, + stat->lim_dl_max_bandwidth = MAX(stat->lim_dl_max_bandwidth, ifs->bw_rcvbw_max); } @@ -1412,7 +1343,7 @@ tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs, if (stat->lim_rtt_min == 0) { stat->lim_rtt_min = ifs->rttmin; } else { - stat->lim_rtt_min = min(stat->lim_rtt_min, ifs->rttmin); + stat->lim_rtt_min = MIN(stat->lim_rtt_min, ifs->rttmin); } /* connection timeouts */ @@ -1436,9 +1367,7 @@ tcp_close(struct tcpcb *tp) { struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; -#if INET6 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; -#endif /* INET6 */ struct route *ro; struct rtentry *rt; int dosavessthresh; @@ -1449,6 +1378,8 @@ tcp_close(struct tcpcb *tp) return NULL; } + tcp_del_fsw_flow(tp); + tcp_canceltimers(tp); KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp, 0, 0, 0, 0); @@ -1475,11 +1406,7 @@ tcp_close(struct tcpcb *tp) DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, struct tcpcb *, tp, int32_t, TCPS_CLOSED); -#if INET6 ro = (isipv6 ? (struct route *)&inp->in6p_route : &inp->inp_route); -#else - ro = &inp->inp_route; -#endif rt = ro->ro_rt; if (rt != NULL) { RT_LOCK_SPIN(rt); @@ -1500,7 +1427,6 @@ tcp_close(struct tcpcb *tp) u_int32_t i = 0; bool log_rtt = false; -#if INET6 if (isipv6) { struct sockaddr_in6 *sin6; @@ -1511,9 +1437,7 @@ tcp_close(struct tcpcb *tp) if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { goto no_valid_rt; } - } else -#endif /* INET6 */ - if (ROUTE_UNUSABLE(ro) || + } else if (ROUTE_UNUSABLE(ro) || SIN(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) { DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, struct tcpcb *, tp, @@ -1589,10 +1513,8 @@ tcp_close(struct tcpcb *tp) i = 2; } i *= (u_int32_t)(tp->t_maxseg + -#if INET6 isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : -#endif /* INET6 */ sizeof(struct tcpiphdr)); if (rt->rt_rmx.rmx_ssthresh) { rt->rt_rmx.rmx_ssthresh = @@ -1607,7 +1529,7 @@ tcp_close(struct tcpcb *tp) /* * Mark route for deletion if no information is cached. */ - if (rt != NULL && (so->so_flags & SOF_OVERFLOW) && tcp_lq_overflow) { + if (rt != NULL && (so->so_flags & SOF_OVERFLOW)) { if (!(rt->rt_rmx.rmx_locks & RTV_RTT) && rt->rt_rmx.rmx_rtt == 0) { rt->rt_flags |= RTF_DELCLONE; @@ -1653,14 +1575,6 @@ no_valid_rt: */ sodisconnectwakeup(so); - /* - * Clean up any LRO state - */ - if (tp->t_flagsext & TF_LRO_OFFLOADED) { - tcp_lro_remove_state(inp->inp_laddr, inp->inp_faddr, - inp->inp_lport, inp->inp_fport); - tp->t_flagsext &= ~TF_LRO_OFFLOADED; - } /* * Make sure to clear the TCP Keep Alive Offload as it is * ref counted on the interface @@ -1698,12 +1612,11 @@ no_valid_rt: tp->t_tfo_flags &= ~TFO_F_COOKIE_VALID; } -#if INET6 if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - } else -#endif /* INET6 */ - in_pcbdetach(inp); + } else { + in_pcbdetach(inp); + } /* * Call soisdisconnected after detach because it might unlock the socket @@ -1732,12 +1645,6 @@ tcp_freeq(struct tcpcb *tp) } -/* - * Walk the tcpbs, if existing, and flush the reassembly queue, - * if there is one when do_tcpdrain is enabled - * Also defunct the extended background idle socket - * Do it next time if the pcbinfo lock is in use - */ void tcp_drain(void) { @@ -1760,10 +1667,6 @@ tcp_drain(void) } tp = intotcpcb(inp); - if (do_tcpdrain) { - tcp_freeq(tp); - } - so_drain_extended_bk_idle(inp->inp_socket); socket_unlock(inp->inp_socket, 1); @@ -1817,12 +1720,7 @@ struct bwmeas * tcp_bwmeas_alloc(struct tcpcb *tp) { struct bwmeas *elm; - elm = zalloc(tcp_bwmeas_zone); - if (elm == NULL) { - return elm; - } - - bzero(elm, bwmeas_elm_size); + elm = zalloc_flags(tcp_bwmeas_zone, Z_ZERO | Z_WAITOK); elm->bw_minsizepkts = TCP_BWMEAS_BURST_MINSIZE; elm->bw_minsize = elm->bw_minsizepkts * tp->t_maxseg; return elm; @@ -2053,7 +1951,7 @@ SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX static void tcpcb_to_xtcpcb64(struct tcpcb *tp, struct xtcpcb64 *otp) @@ -2235,7 +2133,7 @@ SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections"); -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ static int tcp_pcblist_n SYSCTL_HANDLER_ARGS @@ -2473,7 +2371,6 @@ tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet *ifp socket_unlock(inp->inp_socket, 1); } -#if INET6 void tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) { @@ -2616,7 +2513,6 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) } socket_unlock(inp->inp_socket, 1); } -#endif /* INET6 */ /* @@ -2671,7 +2567,7 @@ tcp_new_isn(struct tcpcb *tp) tcp_seq new_isn; struct timeval timenow; u_char isn_secret[32]; - int isn_last_reseed = 0; + long isn_last_reseed = 0; MD5_CTX isn_ctx; /* Use arc4random for SYN-ACKs when not in exact RFC1948 mode. */ @@ -2703,15 +2599,12 @@ tcp_new_isn(struct tcpcb *tp) sizeof(u_short)); MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short)); -#if INET6 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) { MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, sizeof(struct in6_addr)); MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, sizeof(struct in6_addr)); - } else -#endif - { + } else { MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, sizeof(struct in_addr)); MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, @@ -2747,20 +2640,14 @@ tcp_drop_syn_sent(struct inpcb *inp, int errno) * This duplicates some code in the tcp_mss() function in tcp_input.c. */ void -tcp_mtudisc( - struct inpcb *inp, - __unused int errno - ) +tcp_mtudisc(struct inpcb *inp, __unused int errno) { struct tcpcb *tp = intotcpcb(inp); struct rtentry *rt; - struct rmxp_tao *taop; struct socket *so = inp->inp_socket; - int offered; int mss; u_int32_t mtu; u_int32_t protoHdrOverhead = sizeof(struct tcpiphdr); -#if INET6 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; /* @@ -2774,20 +2661,16 @@ tcp_mtudisc( protoHdrOverhead = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); } -#endif /* INET6 */ if (tp != NULL) { -#if INET6 if (isipv6) { rt = tcp_rtlookup6(inp, IFSCOPE_NONE); - } else -#endif /* INET6 */ - rt = tcp_rtlookup(inp, IFSCOPE_NONE); + } else { + rt = tcp_rtlookup(inp, IFSCOPE_NONE); + } if (!rt || !rt->rt_rmx.rmx_mtu) { tp->t_maxopd = tp->t_maxseg = -#if INET6 isipv6 ? tcp_v6mssdflt : -#endif /* INET6 */ tcp_mssdflt; /* Route locked during lookup above */ @@ -2796,8 +2679,6 @@ tcp_mtudisc( } return; } - taop = rmx_taop(rt->rt_rmx); - offered = taop->tao_mssopt; mtu = rt->rt_rmx.rmx_mtu; /* Route locked during lookup above */ @@ -2809,8 +2690,8 @@ tcp_mtudisc( #endif /* NECP */ mss = mtu - protoHdrOverhead; - if (offered) { - mss = min(mss, offered); + if (tp->t_maxopd) { + mss = min(mss, tp->t_maxopd); } /* * XXX - The above conditional probably violates the TCP @@ -2963,7 +2844,6 @@ tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope) return rt; } -#if INET6 struct rtentry * tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope) { @@ -3066,7 +2946,6 @@ tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope) */ return rt; } -#endif /* INET6 */ #if IPSEC /* compute ESP/AH header size for TCP, including outer IP header. */ @@ -3077,9 +2956,7 @@ ipsec_hdrsiz_tcp(struct tcpcb *tp) struct mbuf *m; size_t hdrsiz; struct ip *ip; -#if INET6 struct ip6_hdr *ip6 = NULL; -#endif /* INET6 */ struct tcphdr *th; if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) { @@ -3090,7 +2967,6 @@ ipsec_hdrsiz_tcp(struct tcpcb *tp) return 0; } -#if INET6 if ((inp->inp_vflag & INP_IPV6) != 0) { ip6 = mtod(m, struct ip6_hdr *); th = (struct tcphdr *)(void *)(ip6 + 1); @@ -3098,9 +2974,7 @@ ipsec_hdrsiz_tcp(struct tcpcb *tp) sizeof(struct ip6_hdr) + sizeof(struct tcphdr); tcp_fillheaders(tp, ip6, th); hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); - } else -#endif /* INET6 */ - { + } else { ip = mtod(m, struct ip *); th = (struct tcphdr *)(ip + 1); m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); @@ -3112,53 +2986,6 @@ ipsec_hdrsiz_tcp(struct tcpcb *tp) } #endif /* IPSEC */ -/* - * Return a pointer to the cached information about the remote host. - * The cached information is stored in the protocol specific part of - * the route metrics. - */ -struct rmxp_tao * -tcp_gettaocache(struct inpcb *inp) -{ - struct rtentry *rt; - struct rmxp_tao *taop; - -#if INET6 - if ((inp->inp_vflag & INP_IPV6) != 0) { - rt = tcp_rtlookup6(inp, IFSCOPE_NONE); - } else -#endif /* INET6 */ - rt = tcp_rtlookup(inp, IFSCOPE_NONE); - - /* Make sure this is a host route and is up. */ - if (rt == NULL || - (rt->rt_flags & (RTF_UP | RTF_HOST)) != (RTF_UP | RTF_HOST)) { - /* Route locked during lookup above */ - if (rt != NULL) { - RT_UNLOCK(rt); - } - return NULL; - } - - taop = rmx_taop(rt->rt_rmx); - /* Route locked during lookup above */ - RT_UNLOCK(rt); - return taop; -} - -/* - * Clear all the TAO cache entries, called from tcp_init. - * - * XXX - * This routine is just an empty one, because we assume that the routing - * routing tables are initialized at the same time when TCP, so there is - * nothing in the cache left over. - */ -static void -tcp_cleartaocache(void) -{ -} - int tcp_lock(struct socket *so, int refcount, void *lr) { @@ -3312,13 +3139,6 @@ tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb) if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) { return; } - /* - * If message delivery is enabled, do not count - * unordered bytes in receive buffer towards hiwat - */ - if (so->so_flags & SOF_ENABLE_MSGS) { - rcvbuf = rcvbuf - so->so_msg_state->msg_uno_bytes; - } if (tcp_do_autorcvbuf == 1 && tcp_cansbgrow(sb) && @@ -3354,17 +3174,6 @@ tcp_sbspace(struct tcpcb *tp) /* hiwat might have changed */ rcvbuf = sb->sb_hiwat; - /* - * If message delivery is enabled, do not count - * unordered bytes in receive buffer towards hiwat mark. - * This value is used to return correct rwnd that does - * not reflect the extra unordered bytes added to the - * receive socket buffer. - */ - if (so->so_flags & SOF_ENABLE_MSGS) { - rcvbuf = rcvbuf - so->so_msg_state->msg_uno_bytes; - } - space = ((int32_t) imin((rcvbuf - sb->sb_cc), (sb->sb_mbmax - sb->sb_mbcnt))); if (space < 0) { @@ -3406,53 +3215,115 @@ tcp_sbspace(struct tcpcb *tp) void tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp) { -#if INET6 struct inpcb *inp; int isipv6; -#endif /* INET6 */ + struct ifnet *tunnel_ifp = NULL; +#define IFNET_TSO_MASK (IFNET_TSO_IPV6 | IFNET_TSO_IPV4) + + tp->t_flags &= ~TF_TSO; + + if (ifp == NULL) { + return; + } + #if MPTCP /* * We can't use TSO if this tcpcb belongs to an MPTCP session. */ if (tp->t_mpflags & TMPF_MPTCP_TRUE) { - tp->t_flags &= ~TF_TSO; return; } #endif -#if INET6 inp = tp->t_inpcb; isipv6 = (inp->inp_vflag & INP_IPV6) != 0; + /* + * We can't use TSO if the TSO capability of the tunnel interface does + * not match the capability of another interface known by TCP + */ + if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) { + u_int tunnel_if_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index; + + if (tunnel_if_index != 0) { + ifnet_head_lock_shared(); + tunnel_ifp = ifindex2ifnet[tunnel_if_index]; + ifnet_head_done(); + } + + if (tunnel_ifp == NULL) { + return; + } + + if ((ifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) { + if (tso_debug > 0) { + os_log(OS_LOG_DEFAULT, + "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with ifp %s", + __func__, + ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport), + tunnel_ifp->if_xname, ifp->if_xname); + } + return; + } + if (inp->inp_last_outifp != NULL && + (inp->inp_last_outifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) { + if (tso_debug > 0) { + os_log(OS_LOG_DEFAULT, + "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_last_outifp %s", + __func__, + ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport), + tunnel_ifp->if_xname, inp->inp_last_outifp->if_xname); + } + return; + } + if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp != NULL && + (inp->inp_boundifp->if_hwassist & IFNET_TSO_MASK) != (tunnel_ifp->if_hwassist & IFNET_TSO_MASK)) { + if (tso_debug > 0) { + os_log(OS_LOG_DEFAULT, + "%s: %u > %u TSO 0 tunnel_ifp %s hwassist mismatch with inp_boundifp %s", + __func__, + ntohs(tp->t_inpcb->inp_lport), ntohs(tp->t_inpcb->inp_fport), + tunnel_ifp->if_xname, inp->inp_boundifp->if_xname); + } + return; + } + } + if (isipv6) { - if (ifp && (ifp->if_hwassist & IFNET_TSO_IPV6)) { + if (ifp->if_hwassist & IFNET_TSO_IPV6) { tp->t_flags |= TF_TSO; if (ifp->if_tso_v6_mtu != 0) { tp->tso_max_segment_size = ifp->if_tso_v6_mtu; } else { tp->tso_max_segment_size = TCP_MAXWIN; } - } else { - tp->t_flags &= ~TF_TSO; } - } else -#endif /* INET6 */ - - { - if (ifp && (ifp->if_hwassist & IFNET_TSO_IPV4)) { + } else { + if (ifp->if_hwassist & IFNET_TSO_IPV4) { tp->t_flags |= TF_TSO; if (ifp->if_tso_v4_mtu != 0) { tp->tso_max_segment_size = ifp->if_tso_v4_mtu; } else { tp->tso_max_segment_size = TCP_MAXWIN; } - } else { - tp->t_flags &= ~TF_TSO; + if (INTF_ADJUST_MTU_FOR_CLAT46(ifp)) { + tp->tso_max_segment_size -= + CLAT46_HDR_EXPANSION_OVERHD; + } } } + + if (tso_debug > 1) { + os_log(OS_LOG_DEFAULT, "%s: %u > %u TSO %d ifp %s", + __func__, + ntohs(tp->t_inpcb->inp_lport), + ntohs(tp->t_inpcb->inp_fport), + (tp->t_flags & TF_TSO) != 0, + ifp != NULL ? ifp->if_xname : ""); + } } -#define TIMEVAL_TO_TCPHZ(_tv_) ((_tv_).tv_sec * TCP_RETRANSHZ + \ - (_tv_).tv_usec / TCP_RETRANSHZ_TO_USEC) +#define TIMEVAL_TO_TCPHZ(_tv_) ((uint32_t)((_tv_).tv_sec * TCP_RETRANSHZ + \ + (_tv_).tv_usec / TCP_RETRANSHZ_TO_USEC)) /* * Function to calculate the tcp clock. The tcp clock will get updated @@ -3512,35 +3383,24 @@ calculate_tcp_clock(void) * defined by the constant tcp_autorcvbuf_max. */ void -tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so, struct ifnet *ifp) +tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so) { uint32_t maxsockbufsize; - uint32_t rcvbuf_max; - if (!tcp_do_rfc1323) { - tp->request_r_scale = 0; - return; - } + tp->request_r_scale = MAX((uint8_t)tcp_win_scale, tp->request_r_scale); + maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ? + so->so_rcv.sb_hiwat : tcp_autorcvbuf_max; /* - * When we start a connection and don't know about the interface, set - * the scaling factor simply to the max - we can always announce less. + * Window scale should not exceed what is needed + * to send the max receive window size; adding 1 to TCP_MAXWIN + * ensures that. */ - if (!ifp || (IFNET_IS_CELLULAR(ifp) && (ifp->if_eflags & IFEF_3CA))) { - rcvbuf_max = (tcp_autorcvbuf_max << 1); - } else { - rcvbuf_max = tcp_autorcvbuf_max; - } - - tp->request_r_scale = max(tcp_win_scale, tp->request_r_scale); - maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ? - so->so_rcv.sb_hiwat : rcvbuf_max; - while (tp->request_r_scale < TCP_MAX_WINSHIFT && - (TCP_MAXWIN << tp->request_r_scale) < maxsockbufsize) { + ((TCP_MAXWIN + 1) << tp->request_r_scale) < maxsockbufsize) { tp->request_r_scale++; } - tp->request_r_scale = min(tp->request_r_scale, TCP_MAX_WINSHIFT); + tp->request_r_scale = MIN(tp->request_r_scale, TCP_MAX_WINSHIFT); } int @@ -3587,7 +3447,7 @@ void tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end) { struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL; - u_int32_t rxcount = 0; + uint16_t rxcount = 0; if (SLIST_EMPTY(&tp->t_rxt_segments)) { tp->t_dsack_lastuna = tp->snd_una; @@ -3678,6 +3538,33 @@ tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end) return NULL; } +void +tcp_rxtseg_set_spurious(struct tcpcb *tp, tcp_seq start, tcp_seq end) +{ + struct tcp_rxt_seg *rxseg; + if (SLIST_EMPTY(&tp->t_rxt_segments)) { + return; + } + + SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { + if (SEQ_GEQ(rxseg->rx_start, start) && + SEQ_LEQ(rxseg->rx_end, end)) { + /* + * If the segment was retransmitted only once, mark it as + * spurious. + */ + if (rxseg->rx_count == 1) { + rxseg->rx_flags |= TCP_RXT_SPURIOUS; + } + } + + if (SEQ_GEQ(rxseg->rx_start, end)) { + break; + } + } + return; +} + void tcp_rxtseg_clean(struct tcpcb *tp) { @@ -3712,8 +3599,7 @@ tcp_rxtseg_detect_bad_rexmt(struct tcpcb *tp, tcp_seq th_ack) bad_rexmt = TRUE; SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { - if (rxseg->rx_count > 1 || - !(rxseg->rx_flags & TCP_RXT_SPURIOUS)) { + if (!(rxseg->rx_flags & TCP_RXT_SPURIOUS)) { bad_rexmt = FALSE; break; } @@ -4002,10 +3888,10 @@ tcp_fill_keepalive_offload_frames(ifnet_t ifp, frame->ether_type = (inp->inp_vflag & INP_IPV4) ? IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 : IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6; - frame->interval = tp->t_keepidle > 0 ? tp->t_keepidle : - tcp_keepidle; - frame->keep_cnt = TCP_CONN_KEEPCNT(tp); - frame->keep_retry = TCP_CONN_KEEPINTVL(tp); + frame->interval = (uint16_t)(tp->t_keepidle > 0 ? tp->t_keepidle : + tcp_keepidle); + frame->keep_cnt = (uint8_t)TCP_CONN_KEEPCNT(tp); + frame->keep_retry = (uint16_t)TCP_CONN_KEEPINTVL(tp); if (so->so_options & SO_NOWAKEFROMSLEEP) { frame->flags |= IFNET_KEEPALIVE_OFFLOAD_FLAG_NOWAKEFROMSLEEP; @@ -4015,8 +3901,9 @@ tcp_fill_keepalive_offload_frames(ifnet_t ifp, frame->local_seq = tp->snd_nxt; frame->remote_seq = tp->rcv_nxt; if (inp->inp_vflag & INP_IPV4) { - frame->length = frame_data_offset + - sizeof(struct ip) + sizeof(struct tcphdr); + ASSERT(frame_data_offset + sizeof(struct ip) + sizeof(struct tcphdr) <= UINT8_MAX); + frame->length = (uint8_t)(frame_data_offset + + sizeof(struct ip) + sizeof(struct tcphdr)); frame->reply_length = frame->length; frame->addr_length = sizeof(struct in_addr); @@ -4027,8 +3914,9 @@ tcp_fill_keepalive_offload_frames(ifnet_t ifp, } else { struct in6_addr *ip6; - frame->length = frame_data_offset + - sizeof(struct ip6_hdr) + sizeof(struct tcphdr); + ASSERT(frame_data_offset + sizeof(struct ip6_hdr) + sizeof(struct tcphdr) <= UINT8_MAX); + frame->length = (uint8_t)(frame_data_offset + + sizeof(struct ip6_hdr) + sizeof(struct tcphdr)); frame->reply_length = frame->length; frame->addr_length = sizeof(struct in6_addr); @@ -4165,7 +4053,6 @@ tcp_notify_kao_timeout(ifnet_t ifp, struct tcpcb *tp = inp->inp_ppcb; tcpstat.tcps_keepdrops++; - postevent(so, 0, EV_TIMEOUT); soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); tp = tcp_drop(tp, ETIMEDOUT); @@ -4266,10 +4153,11 @@ tcp_get_notify_ack_count(struct tcpcb *tp, struct tcp_notify_ack_complete *retid) { struct tcp_notify_ack_marker *elm; - size_t complete = 0; + uint32_t complete = 0; SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) { if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) { + ASSERT(complete < UINT32_MAX); complete++; } else { break; @@ -4469,3 +4357,4 @@ tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs, tcp_flow_lim_stats(ifs, &ifp->if_lim_stat); ifnet_lock_done(ifp); } + diff --git a/bsd/netinet/tcp_timer.c b/bsd/netinet/tcp_timer.c index df9551689..5012199aa 100644 --- a/bsd/netinet/tcp_timer.c +++ b/bsd/netinet/tcp_timer.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -85,9 +85,7 @@ #include #include #include -#if INET6 #include -#endif #include #include #include @@ -96,9 +94,7 @@ #include #include #include -#if INET6 #include -#endif #include #if TCPDEBUG #include @@ -448,6 +444,9 @@ sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS err = sysctl_io_number(req, tcp_change_mss_recommended, sizeof(int32_t), &i, &changed); if (changed) { + if (i < 0 || i > UINT16_MAX) { + return EINVAL; + } ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { if (IFNET_IS_CELLULAR(ifp)) { @@ -458,7 +457,7 @@ sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS /* Set MSS recommended */ new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID; - new_cell_sr->mss_recommended = i; + new_cell_sr->mss_recommended = (uint16_t)i; err = ifnet_link_status_report(ifp, new_cell_sr, sizeof(new_cell_sr)); if (err == 0) { tcp_change_mss_recommended = i; @@ -538,6 +537,8 @@ add_to_time_wait(struct tcpcb *tp, uint32_t delay) socket_post_kev_msg_closed(tp->t_inpcb->inp_socket); } + tcp_del_fsw_flow(tp); + /* 19182803: Notify nstat that connection is closing before waiting. */ nstat_pcb_detach(tp->t_inpcb); @@ -640,12 +641,11 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait) if (inp->inp_state != INPCB_STATE_DEAD) { /* Become a regular mutex */ lck_mtx_convert_spin(&inp->inpcb_mtx); -#if INET6 if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - } else -#endif /* INET6 */ - in_pcbdetach(inp); + } else { + in_pcbdetach(inp); + } } VERIFY(so->so_usecount > 0); so->so_usecount--; @@ -686,12 +686,11 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait) } if (inp->inp_state != INPCB_STATE_DEAD) { -#if INET6 if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - } else -#endif /* INET6 */ - in_pcbdetach(inp); + } else { + in_pcbdetach(inp); + } } if (mp_so) { @@ -897,6 +896,21 @@ tcp_pmtud_revert_segment_size(struct tcpcb *tp) tcp_update_mss_locked(tp->t_inpcb->inp_socket, NULL); } +static uint32_t +tcp_pmtud_black_holed_next_mss(struct tcpcb *tp) +{ + /* Reduce the MSS to intermediary value */ + if (tp->t_maxopd > tcp_pmtud_black_hole_mss) { + return tcp_pmtud_black_hole_mss; + } else { + if (tp->t_inpcb->inp_vflag & INP_IPV4) { + return tcp_mssdflt; + } else { + return tcp_v6mssdflt; + } + } +} + /* * TCP timer processing. */ @@ -909,12 +923,8 @@ tcp_timers(struct tcpcb *tp, int timer) #if TCPDEBUG int ostate; #endif - -#if INET6 - int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0; -#endif /* INET6 */ u_int64_t accsleep_ms; - u_int32_t last_sleep_ms = 0; + u_int64_t last_sleep_ms = 0; so = tp->t_inpcb->inp_socket; idle_time = tcp_now - tp->t_rcvtime; @@ -1001,7 +1011,6 @@ tcp_timers(struct tcpcb *tp, int timer) } } tp->t_rxtshift = TCP_MAXRXTSHIFT; - postevent(so, 0, EV_TIMEOUT); soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); @@ -1159,8 +1168,7 @@ retransmit_packet: !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) && (tp->t_state == TCPS_ESTABLISHED)) { if ((tp->t_flags & TF_PMTUD) && - ((tp->t_flags & TF_MAXSEGSNT) - || tp->t_pmtud_lastseg_size > tcp_pmtud_black_hole_mss) && + tp->t_pmtud_lastseg_size > tcp_pmtud_black_holed_next_mss(tp) && tp->t_rxtshift == 2) { /* * Enter Path MTU Black-hole Detection mechanism: @@ -1180,15 +1188,7 @@ retransmit_packet: tp->t_pmtud_start_ts++; } /* Reduce the MSS to intermediary value */ - if (tp->t_maxopd > tcp_pmtud_black_hole_mss) { - tp->t_maxopd = tcp_pmtud_black_hole_mss; - } else { - tp->t_maxopd = /* use the default MSS */ -#if INET6 - isipv6 ? tcp_v6mssdflt : -#endif /* INET6 */ - tcp_mssdflt; - } + tp->t_maxopd = tcp_pmtud_black_holed_next_mss(tp); tp->t_maxseg = tp->t_maxopd - optlen; /* @@ -1239,12 +1239,11 @@ retransmit_packet: * retransmit times until then. */ if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { -#if INET6 - if (isipv6) { + if (!(tp->t_inpcb->inp_vflag & INP_IPV4)) { in6_losing(tp->t_inpcb); - } else -#endif /* INET6 */ - in_losing(tp->t_inpcb); + } else { + in_losing(tp->t_inpcb); + } tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); tp->t_srtt = 0; } @@ -1321,7 +1320,6 @@ fc_output: ((tp->t_persist_stop != 0) && TSTMP_LEQ(tp->t_persist_stop, tcp_now))) { tcpstat.tcps_persistdrop++; - postevent(so, 0, EV_TIMEOUT); soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); tp = tcp_drop(tp, ETIMEDOUT); @@ -1338,6 +1336,12 @@ fc_output: * or drop connection if idle for too long. */ case TCPT_KEEP: +#if FLOW_DIVERT + if (tp->t_inpcb->inp_socket->so_flags & SOF_FLOW_DIVERT) { + break; + } +#endif /* FLOW_DIVERT */ + tcpstat.tcps_keeptimeo++; #if MPTCP /* @@ -1511,6 +1515,7 @@ fc_output: } tcp_reset_stretch_ack(tp); } + tp->t_forced_acks = TCP_FORCED_ACKS_COUNT; /* * If we are measuring inter packet arrival jitter @@ -1521,6 +1526,7 @@ fc_output: CLEAR_IAJ_STATE(tp); tcpstat.tcps_delack++; + tp->t_stat.delayed_acks_sent++; (void) tcp_output(tp); } break; @@ -1532,7 +1538,6 @@ fc_output: (tp->t_mpflags & TMPF_JOINED_FLOW)) { if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) { tcpstat.tcps_timeoutdrop++; - postevent(so, 0, EV_TIMEOUT); soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); @@ -1589,7 +1594,7 @@ fc_output: tp->t_rxtshift > 0 || tp->snd_max == tp->snd_una || !SACK_ENABLED(tp) || - !TAILQ_EMPTY(&tp->snd_holes) || + (tcp_do_better_lr != 1 && !TAILQ_EMPTY(&tp->snd_holes)) || IN_FASTRECOVERY(tp)) && !(tp->t_flagsext & TF_IF_PROBING)) { break; @@ -1766,7 +1771,6 @@ fc_output: break; dropit: tcpstat.tcps_keepdrops++; - postevent(so, 0, EV_TIMEOUT); soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); tp = tcp_drop(tp, ETIMEDOUT); @@ -2142,7 +2146,7 @@ tcp_run_timerlist(void * arg1, void * arg2) } if (!LIST_EMPTY(&listp->lhead)) { - u_int16_t next_mode = 0; + uint32_t next_mode = 0; if ((list_mode & TCP_TIMERLIST_10MS_MODE) || (listp->pref_mode & TCP_TIMERLIST_10MS_MODE)) { next_mode = TCP_TIMERLIST_10MS_MODE; @@ -2463,13 +2467,13 @@ tcp_report_stats(void) /* send packet loss rate, shift by 10 for precision */ if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) { var = tcpstat.tcps_sndrexmitpack << 10; - stat.send_plr = (var * 100) / tcpstat.tcps_sndpack; + stat.send_plr = (uint32_t)((var * 100) / tcpstat.tcps_sndpack); } /* recv packet loss rate, shift by 10 for precision */ if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) { var = tcpstat.tcps_recovered_pkts << 10; - stat.recv_plr = (var * 100) / tcpstat.tcps_rcvpack; + stat.recv_plr = (uint32_t)((var * 100) / tcpstat.tcps_rcvpack); } /* RTO after tail loss, shift by 10 for precision */ @@ -2477,14 +2481,14 @@ tcp_report_stats(void) && tcpstat.tcps_tailloss_rto > 0) { var = tcpstat.tcps_tailloss_rto << 10; stat.send_tlrto_rate = - (var * 100) / tcpstat.tcps_sndrexmitpack; + (uint32_t)((var * 100) / tcpstat.tcps_sndrexmitpack); } /* packet reordering */ if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) { var = tcpstat.tcps_reordered_pkts << 10; stat.send_reorder_rate = - (var * 100) / tcpstat.tcps_sndpack; + (uint32_t)((var * 100) / tcpstat.tcps_sndpack); } if (tcp_ecn_outbound == 1) { diff --git a/bsd/netinet/tcp_timer.h b/bsd/netinet/tcp_timer.h index 8b8435722..8d09d9cb6 100644 --- a/bsd/netinet/tcp_timer.h +++ b/bsd/netinet/tcp_timer.h @@ -205,8 +205,6 @@ extern int tcptv_persmin_val; */ #define TCP_RCV_SS_PKTCOUNT 512 -/* Receiver idle time, for rcv socket buffer resizing */ -#define TCPTV_RCVBUFIDLE (TCP_RETRANSHZ/2) #define TCPTV_TWTRUNC 8 /* RTO factor to truncate TW */ #define TCP_LINGERTIME 120 /* linger at most 2 minutes */ @@ -303,13 +301,13 @@ struct tcptimerlist { #define TCP_CONN_KEEPIDLE(tp) \ ((tp)->t_keepidle && \ ((tp)->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ? \ - (tp)->t_keepidle : tcp_keepidle) + (tp)->t_keepidle : (uint32_t)tcp_keepidle) #define TCP_CONN_KEEPINIT(tp) \ - (((tp)->t_keepinit > 0) ? (tp)->t_keepinit : tcp_keepinit) + (((tp)->t_keepinit > 0) ? (tp)->t_keepinit : (uint32_t)tcp_keepinit) #define TCP_CONN_KEEPCNT(tp) \ - (((tp)->t_keepcnt > 0) ? (tp)->t_keepcnt : tcp_keepcnt) + (((tp)->t_keepcnt > 0) ? (tp)->t_keepcnt : (uint32_t)tcp_keepcnt) #define TCP_CONN_KEEPINTVL(tp) \ - (((tp)->t_keepintvl > 0) ? (tp)->t_keepintvl : tcp_keepintvl) + (((tp)->t_keepintvl > 0) ? (tp)->t_keepintvl : (uint32_t)tcp_keepintvl) #define TCP_CONN_MAXIDLE(tp) \ (TCP_CONN_KEEPCNT(tp) * TCP_CONN_KEEPINTVL(tp)) diff --git a/bsd/netinet/tcp_usrreq.c b/bsd/netinet/tcp_usrreq.c index 92c445448..c2389347c 100644 --- a/bsd/netinet/tcp_usrreq.c +++ b/bsd/netinet/tcp_usrreq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -67,12 +67,10 @@ #include #include #include -#if INET6 #include -#endif /* INET6 */ -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX #include -#endif +#endif /* XNU_TARGET_OS_OSX */ #include #include #include @@ -87,18 +85,12 @@ #include #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include #include -#if INET6 #include -#endif #include #include #include @@ -135,11 +127,9 @@ static int tcp_get_mpkl_send_info(struct mbuf *, struct so_mpkl_send_info *); */ static int tcp_attach(struct socket *, struct proc *); static int tcp_connect(struct tcpcb *, struct sockaddr *, struct proc *); -#if INET6 static int tcp6_connect(struct tcpcb *, struct sockaddr *, struct proc *); static int tcp6_usr_connect(struct socket *, struct sockaddr *, struct proc *); -#endif /* INET6 */ static struct tcpcb *tcp_disconnect(struct tcpcb *); static struct tcpcb *tcp_usrclosed(struct tcpcb *); extern void tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sb); @@ -189,7 +179,7 @@ tcp_usr_attach(struct socket *so, __unused int proto, struct proc *p) } if ((so->so_options & SO_LINGER) && so->so_linger == 0) { - so->so_linger = TCP_LINGERTIME * hz; + so->so_linger = (short)(TCP_LINGERTIME * hz); } tp = sototcpcb(so); out: @@ -232,18 +222,18 @@ out: } #if NECP -#define COMMON_START() TCPDEBUG0; \ +#define COMMON_START_ALLOW_FLOW_DIVERT(allow) TCPDEBUG0; \ do { \ if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \ return (EINVAL); \ - if (necp_socket_should_use_flow_divert(inp)) \ + if (!(allow) && necp_socket_should_use_flow_divert(inp)) \ return (EPROTOTYPE); \ tp = intotcpcb(inp); \ TCPDEBUG1(); \ calculate_tcp_clock(); \ } while (0) #else /* NECP */ -#define COMMON_START() TCPDEBUG0; \ +#define COMMON_START_ALLOW_FLOW_DIVERT(allow) TCPDEBUG0; \ do { \ if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \ return (EINVAL); \ @@ -253,6 +243,7 @@ do { \ } while (0) #endif /* !NECP */ +#define COMMON_START() COMMON_START_ALLOW_FLOW_DIVERT(false) #define COMMON_END(req) out: TCPDEBUG2(req); return error; goto out @@ -278,7 +269,7 @@ tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) struct tcpcb *tp; struct sockaddr_in *sinp; - COMMON_START(); + COMMON_START_ALLOW_FLOW_DIVERT(true); if (nam->sa_family != 0 && nam->sa_family != AF_INET) { error = EAFNOSUPPORT; @@ -313,7 +304,6 @@ tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) COMMON_END(PRU_BIND); } -#if INET6 static int tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) { @@ -322,7 +312,7 @@ tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) struct tcpcb *tp; struct sockaddr_in6 *sin6p; - COMMON_START(); + COMMON_START_ALLOW_FLOW_DIVERT(true); if (nam->sa_family != 0 && nam->sa_family != AF_INET6) { error = EAFNOSUPPORT; @@ -360,7 +350,6 @@ tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) } COMMON_END(PRU_BIND); } -#endif /* INET6 */ /* * Prepare to accept connections. @@ -393,7 +382,6 @@ tcp_usr_listen(struct socket *so, struct proc *p) COMMON_END(PRU_LISTEN); } -#if INET6 static int tcp6_usr_listen(struct socket *so, struct proc *p) { @@ -415,7 +403,6 @@ tcp6_usr_listen(struct socket *so, struct proc *p) TCP_LOG_LISTEN(tp, error); COMMON_END(PRU_LISTEN); } -#endif /* INET6 */ static int tcp_connect_complete(struct socket *so) @@ -426,7 +413,7 @@ tcp_connect_complete(struct socket *so) /* TFO delays the tcp_output until later, when the app calls write() */ if (so->so_flags1 & SOF1_PRECONNECT_DATA) { - if (!necp_socket_is_allowed_to_send_recv(sotoinpcb(so), NULL, NULL, NULL, NULL)) { + if (!necp_socket_is_allowed_to_send_recv(sotoinpcb(so), NULL, 0, NULL, NULL, NULL, NULL)) { TCP_LOG_DROP_NECP(NULL, NULL, tp, true); return EHOSTUNREACH; } @@ -487,16 +474,10 @@ tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) #endif /* CONTENT_FILTER */ #if FLOW_DIVERT if (necp_socket_should_use_flow_divert(inp)) { - uint32_t fd_ctl_unit = necp_socket_get_flow_divert_control_unit(inp); - if (fd_ctl_unit > 0) { - error = flow_divert_pcb_init(so, fd_ctl_unit); - if (error == 0) { - error = flow_divert_connect_out(so, nam, p); - } - } else { - error = ENETDOWN; + error = flow_divert_pcb_init(so); + if (error == 0) { + error = flow_divert_connect_out(so, nam, p); } - return error; } #endif /* FLOW_DIVERT */ @@ -580,11 +561,9 @@ tcp_usr_connectx_common(struct socket *so, int af, case AF_INET: error = tcp_usr_connect(so, dst, p); break; -#if INET6 case AF_INET6: error = tcp6_usr_connect(so, dst, p); break; -#endif /* INET6 */ default: VERIFY(0); /* NOTREACHED */ @@ -644,7 +623,6 @@ tcp_usr_connectx(struct socket *so, struct sockaddr *src, pcid, flags, arg, arglen, uio, bytes_written); } -#if INET6 static int tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) { @@ -674,16 +652,10 @@ tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) #endif /* CONTENT_FILTER */ #if FLOW_DIVERT if (necp_socket_should_use_flow_divert(inp)) { - uint32_t fd_ctl_unit = necp_socket_get_flow_divert_control_unit(inp); - if (fd_ctl_unit > 0) { - error = flow_divert_pcb_init(so, fd_ctl_unit); - if (error == 0) { - error = flow_divert_connect_out(so, nam, p); - } - } else { - error = ENETDOWN; + error = flow_divert_pcb_init(so); + if (error == 0) { + error = flow_divert_connect_out(so, nam, p); } - return error; } #endif /* FLOW_DIVERT */ @@ -750,7 +722,6 @@ tcp6_usr_connectx(struct socket *so, struct sockaddr*src, return tcp_usr_connectx_common(so, AF_INET6, src, dst, p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written); } -#endif /* INET6 */ /* * Initiate disconnect from peer. @@ -833,7 +804,6 @@ tcp_usr_accept(struct socket *so, struct sockaddr **nam) COMMON_END(PRU_ACCEPT); } -#if INET6 static int tcp6_usr_accept(struct socket *so, struct sockaddr **nam) { @@ -866,7 +836,6 @@ tcp6_usr_accept(struct socket *so, struct sockaddr **nam) in6_mapped_peeraddr(so, nam); COMMON_END(PRU_ACCEPT); } -#endif /* INET6 */ /* * Mark the connection as being incapable of further output. @@ -944,7 +913,7 @@ tcp_usr_shutdown(struct socket *so) * After a receive, possibly send window update to peer. */ static int -tcp_usr_rcvd(struct socket *so, __unused int flags) +tcp_usr_rcvd(struct socket *so, int flags) { int error = 0; struct inpcb *inp = sotoinpcb(so); @@ -957,6 +926,10 @@ tcp_usr_rcvd(struct socket *so, __unused int flags) } tcp_sbrcv_trim(tp, &so->so_rcv); + if (flags & MSG_WAITALL) { + tp->t_flags |= TF_ACKNOW; + } + /* * This tcp_output is solely there to trigger window-updates. * However, we really do not want these window-updates while we @@ -1011,14 +984,11 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, int error = 0; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp; - uint32_t msgpri = MSG_PRI_DEFAULT; uint32_t mpkl_len = 0; /* length of mbuf chain */ uint32_t mpkl_seq; /* sequence number where new data is added */ struct so_mpkl_send_info mpkl_send_info = {}; -#if INET6 int isipv6; -#endif TCPDEBUG0; if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD @@ -1048,9 +1018,7 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, TCPDEBUG1(); goto out; } -#if INET6 isipv6 = nam && nam->sa_family == AF_INET6; -#endif /* INET6 */ tp = intotcpcb(inp); TCPDEBUG1(); @@ -1068,19 +1036,6 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, } if (control != NULL) { - if (so->so_flags & SOF_ENABLE_MSGS) { - /* Get the msg priority from control mbufs */ - error = tcp_get_msg_priority(control, &msgpri); - if (error) { - m_freem(control); - if (m != NULL) { - m_freem(m); - } - control = NULL; - m = NULL; - goto out; - } - } if (control->m_len > 0 && net_mpklog_enabled) { error = tcp_get_mpkl_send_info(control, &mpkl_send_info); /* @@ -1106,22 +1061,12 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, control = NULL; } - if (so->so_flags & SOF_ENABLE_MSGS) { - VERIFY(m->m_flags & M_PKTHDR); - m->m_pkthdr.msg_pri = msgpri; - } - /* MPTCP sublow socket buffers must not be compressed */ VERIFY(!(so->so_flags & SOF_MP_SUBFLOW) || (so->so_snd.sb_flags & SB_NOCOMPRESS)); if (!(flags & PRUS_OOB) || (so->so_flags1 & SOF1_PRECONNECT_DATA)) { - /* Call msg send if message delivery is enabled */ - if (so->so_flags & SOF_ENABLE_MSGS) { - sbappendmsg_snd(&so->so_snd, m); - } else { - sbappendstream(&so->so_snd, m); - } + sbappendstream(&so->so_snd, m); if (nam && tp->t_state < TCPS_SYN_SENT) { /* @@ -1130,12 +1075,11 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, * initialize maxseg/maxopd using peer's cached * MSS. */ -#if INET6 if (isipv6) { error = tcp6_connect(tp, nam, p); - } else -#endif /* INET6 */ - error = tcp_connect(tp, nam, p); + } else { + error = tcp_connect(tp, nam, p); + } if (error) { TCP_LOG_CONNECT(tp, true, error); goto out; @@ -1191,12 +1135,11 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, * initialize maxseg/maxopd using peer's cached * MSS. */ -#if INET6 if (isipv6) { error = tcp6_connect(tp, nam, p); - } else -#endif /* INET6 */ - error = tcp_connect(tp, nam, p); + } else { + error = tcp_connect(tp, nam, p); + } if (error) { TCP_LOG_CONNECT(tp, true, error); goto out; @@ -1345,7 +1288,6 @@ struct pr_usrreqs tcp_usrreqs = { .pru_preconnect = tcp_usr_preconnect, }; -#if INET6 struct pr_usrreqs tcp6_usrreqs = { .pru_abort = tcp_usr_abort, .pru_accept = tcp6_usr_accept, @@ -1368,7 +1310,6 @@ struct pr_usrreqs tcp6_usrreqs = { .pru_soreceive = soreceive, .pru_preconnect = tcp_usr_preconnect, }; -#endif /* INET6 */ /* * Common subroutine to open a TCP connection to remote host specified @@ -1487,7 +1428,7 @@ skip_oinp: inp->inp_flowhash = inp_calc_flowhash(inp); } - tcp_set_max_rwinscale(tp, so, outif); + tcp_set_max_rwinscale(tp, so); soisconnecting(so); tcpstat.tcps_connattempt++; @@ -1500,6 +1441,8 @@ skip_oinp: nstat_route_connect_attempt(inp->inp_route.ro_rt); } + tcp_add_fsw_flow(tp, outif); + done: if (outif != NULL) { ifnet_release(outif); @@ -1508,7 +1451,6 @@ done: return error; } -#if INET6 static int tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p) { @@ -1588,7 +1530,7 @@ tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p) (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK); } - tcp_set_max_rwinscale(tp, so, outif); + tcp_set_max_rwinscale(tp, so); soisconnecting(so); tcpstat.tcps_connattempt++; @@ -1602,6 +1544,8 @@ tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p) nstat_route_connect_attempt(inp->inp_route.ro_rt); } + tcp_add_fsw_flow(tp, outif); + done: if (outif != NULL) { ifnet_release(outif); @@ -1609,7 +1553,6 @@ done: return error; } -#endif /* INET6 */ /* * Export TCP internal state information via a struct tcp_info @@ -1621,7 +1564,7 @@ tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti) bzero(ti, sizeof(*ti)); - ti->tcpi_state = tp->t_state; + ti->tcpi_state = (uint8_t)tp->t_state; ti->tcpi_flowhash = inp->inp_flowhash; if (tp->t_state > TCPS_LISTEN) { @@ -1691,7 +1634,7 @@ tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti) ti->tcpi_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes; if (tp->t_state > TCPS_LISTEN) { - ti->tcpi_synrexmits = tp->t_stat.rxmitsyns; + ti->tcpi_synrexmits = (uint8_t)tp->t_stat.rxmitsyns; } ti->tcpi_cell_rxpackets = inp->inp_cstat->rxpackets; ti->tcpi_cell_rxbytes = inp->inp_cstat->rxbytes; @@ -1801,13 +1744,13 @@ tcp_fill_info_for_info_tuple(struct info_tuple *itpl, struct tcp_info *ti) ina6_local = itpl->itpl_local_sin6.sin6_addr; if (IN6_IS_SCOPE_LINKLOCAL(&ina6_local) && itpl->itpl_local_sin6.sin6_scope_id) { - ina6_local.s6_addr16[1] = htons(itpl->itpl_local_sin6.sin6_scope_id); + ina6_local.s6_addr16[1] = htons((uint16_t)itpl->itpl_local_sin6.sin6_scope_id); } ina6_remote = itpl->itpl_remote_sin6.sin6_addr; if (IN6_IS_SCOPE_LINKLOCAL(&ina6_remote) && itpl->itpl_remote_sin6.sin6_scope_id) { - ina6_remote.s6_addr16[1] = htons(itpl->itpl_remote_sin6.sin6_scope_id); + ina6_remote.s6_addr16[1] = htons((uint16_t)itpl->itpl_remote_sin6.sin6_scope_id); } inp = in6_pcblookup_hash(pcbinfo, @@ -1842,7 +1785,7 @@ tcp_connection_fill_info(struct tcpcb *tp, struct tcp_connection_info *tci) struct inpcb *inp = tp->t_inpcb; bzero(tci, sizeof(*tci)); - tci->tcpi_state = tp->t_state; + tci->tcpi_state = (uint8_t)tp->t_state; if (tp->t_state > TCPS_LISTEN) { if (TSTMP_SUPPORTED(tp)) { tci->tcpi_options |= TCPCI_OPT_TIMESTAMPS; @@ -2085,12 +2028,11 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) if (sopt->sopt_level != IPPROTO_TCP && !(sopt->sopt_level == SOL_SOCKET && (sopt->sopt_name == SO_FLUSH || sopt->sopt_name == SO_TRAFFIC_MGT_BACKGROUND))) { -#if INET6 if (SOCK_CHECK_DOM(so, PF_INET6)) { error = ip6_ctloutput(so, sopt); - } else -#endif /* INET6 */ - error = ip_ctloutput(so, sopt); + } else { + error = ip_ctloutput(so, sopt); + } return error; } tp = intotcpcb(inp); @@ -2383,7 +2325,7 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) mptcp_reset_keepalive(tp); } } else { - tp->t_adaptive_rtimo = optval; + tp->t_adaptive_rtimo = (uint8_t)optval; } break; case TCP_ADAPTIVE_WRITE_TIMEOUT: @@ -2397,50 +2339,7 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) error = EINVAL; break; } else { - tp->t_adaptive_wtimo = optval; - } - break; - case TCP_ENABLE_MSGS: - error = sooptcopyin(sopt, &optval, sizeof(optval), - sizeof(optval)); - if (error) { - break; - } - if (optval < 0 || optval > 1) { - error = EINVAL; - } else if (optval == 1) { - /* - * Check if messages option is already - * enabled, if so return. - */ - if (so->so_flags & SOF_ENABLE_MSGS) { - VERIFY(so->so_msg_state != NULL); - break; - } - - /* - * allocate memory for storing message - * related state - */ - VERIFY(so->so_msg_state == NULL); - MALLOC(so->so_msg_state, - struct msg_state *, - sizeof(struct msg_state), - M_TEMP, M_WAITOK | M_ZERO); - if (so->so_msg_state == NULL) { - error = ENOMEM; - break; - } - - /* Enable message delivery */ - so->so_flags |= SOF_ENABLE_MSGS; - } else { - /* - * Can't disable message delivery on socket - * because of restrictions imposed by - * encoding/decoding - */ - error = EINVAL; + tp->t_adaptive_wtimo = (uint8_t)optval; } break; case TCP_SENDMOREACKS: @@ -2748,14 +2647,6 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) optval = 0; } break; - - case TCP_ENABLE_MSGS: - if (so->so_flags & SOF_ENABLE_MSGS) { - optval = 1; - } else { - optval = 0; - } - break; case TCP_SENDMOREACKS: if (tp->t_flagsext & TF_NOSTRETCHACK) { optval = 1; @@ -2898,9 +2789,7 @@ tcp_attach(struct socket *so, struct proc *p) struct tcpcb *tp; struct inpcb *inp; int error; -#if INET6 int isipv6 = SOCK_CHECK_DOM(so, PF_INET6) != 0; -#endif error = in_pcballoc(so, &tcbinfo, p); if (error) { @@ -2927,24 +2816,22 @@ tcp_attach(struct socket *so, struct proc *p) so->so_snd.sb_flags |= SB_AUTOSIZE; } -#if INET6 if (isipv6) { inp->inp_vflag |= INP_IPV6; inp->in6p_hops = -1; /* use kernel default */ - } else -#endif /* INET6 */ - inp->inp_vflag |= INP_IPV4; + } else { + inp->inp_vflag |= INP_IPV4; + } tp = tcp_newtcpcb(inp); if (tp == NULL) { int nofd = so->so_state & SS_NOFDREF; /* XXX */ so->so_state &= ~SS_NOFDREF; /* don't free the socket yet */ -#if INET6 if (isipv6) { in6_pcbdetach(inp); - } else -#endif /* INET6 */ - in_pcbdetach(inp); + } else { + in_pcbdetach(inp); + } so->so_state |= nofd; return ENOBUFS; } @@ -3061,7 +2948,6 @@ tcp_out_cksum_stats(u_int32_t len) tcpstat.tcps_snd_swcsum_bytes += len; } -#if INET6 void tcp_in6_cksum_stats(u_int32_t len) { @@ -3075,38 +2961,6 @@ tcp_out6_cksum_stats(u_int32_t len) tcpstat.tcps_snd6_swcsum++; tcpstat.tcps_snd6_swcsum_bytes += len; } -#endif /* INET6 */ - -/* - * When messages are enabled on a TCP socket, the message priority - * is sent as a control message. This function will extract it. - */ -int -tcp_get_msg_priority(struct mbuf *control, uint32_t *msgpri) -{ - struct cmsghdr *cm; - - if (control == NULL) { - return EINVAL; - } - - for (cm = M_FIRST_CMSGHDR(control); - is_cmsg_valid(control, cm); - cm = M_NXT_CMSGHDR(control, cm)) { - if (cm->cmsg_level == SOL_SOCKET && - cm->cmsg_type == SCM_MSG_PRIORITY) { - if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) { - return EINVAL; - } - *msgpri = *(uint32_t *)(void *)CMSG_DATA(cm); - if (*msgpri < MSG_PRI_MIN || *msgpri > MSG_PRI_MAX) { - return EINVAL; - } - break; - } - } - return 0; -} int tcp_get_mpkl_send_info(struct mbuf *control, diff --git a/bsd/netinet/tcp_var.h b/bsd/netinet/tcp_var.h index 95e1903ee..3a93146ae 100644 --- a/bsd/netinet/tcp_var.h +++ b/bsd/netinet/tcp_var.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -174,6 +174,7 @@ struct sackhole { struct sackhint { struct sackhole *nexthole; int sack_bytes_rexmit; + int sack_bytes_acked; }; struct tcp_rxt_seg { @@ -228,7 +229,6 @@ struct tcp_ccstate { u_int32_t tc_origin_point; /* window at the start of an epoch */ u_int32_t tc_tcp_win; /* computed tcp win */ u_int32_t tc_tcp_bytes_acked; /* bytes acked */ - u_int32_t tc_target_win; /* cubic target win */ u_int32_t tc_avg_lastmax; /* Average of last max */ u_int32_t tc_mean_deviation; /* Mean absolute deviation */ float tc_epoch_period; /* K parameter */ @@ -239,7 +239,6 @@ struct tcp_ccstate { #define cub_tcp_win __u__._cubic_state_.tc_tcp_win #define cub_tcp_bytes_acked __u__._cubic_state_.tc_tcp_bytes_acked #define cub_epoch_period __u__._cubic_state_.tc_epoch_period -#define cub_target_win __u__._cubic_state_.tc_target_win #define cub_avg_lastmax __u__._cubic_state_.tc_avg_lastmax #define cub_mean_dev __u__._cubic_state_.tc_mean_deviation } __u__; @@ -268,7 +267,7 @@ struct tcpcb { #define TF_REQ_TSTMP 0x00080 /* have/will request timestamps */ #define TF_RCVD_TSTMP 0x00100 /* a timestamp was received in SYN */ #define TF_SACK_PERMIT 0x00200 /* other side said I could SACK */ -#define TF_NEEDSYN 0x00400 /* send SYN (implicit state) */ +#define TF_NEEDSYN 0x00400 /* send SYN (implicit state) - unused but needed for backwards compatibility */ #define TF_NEEDFIN 0x00800 /* send FIN (implicit state) */ #define TF_NOPUSH 0x01000 /* don't push */ #define TF_REQ_CC 0x02000 /* have/will request CC */ @@ -305,42 +304,44 @@ struct tcpcb { tcp_seq rcv_nxt; /* receive next */ tcp_seq rcv_adv; /* advertised window */ - u_int32_t rcv_wnd; /* receive window */ + uint32_t rcv_wnd; /* receive window */ + uint32_t t_last_recwin; tcp_seq rcv_up; /* receive urgent pointer */ - u_int32_t snd_wnd; /* send window */ - u_int32_t snd_cwnd; /* congestion-controlled window */ - u_int32_t snd_ssthresh; /* snd_cwnd size threshold for + uint32_t snd_wnd; /* send window */ + uint32_t snd_cwnd; /* congestion-controlled window */ + uint32_t snd_ssthresh; /* snd_cwnd size threshold for * for slow start exponential to * linear switch */ tcp_seq snd_recover; /* for use in NewReno Fast Recovery */ - u_int32_t t_maxopd; /* mss plus options */ - u_int32_t t_rcvtime; /* time at which a packet was received */ - u_int32_t t_sndtime; /* time at which we last sent new data */ - u_int32_t t_starttime; /* time connection was established */ + uint32_t t_maxopd; /* mss plus options */ + uint32_t t_rcvtime; /* time at which a packet was received */ + uint32_t t_sndtime; /* time at which we last sent new data */ + uint32_t t_starttime; /* time connection was established */ int t_rtttime; /* tcp clock when rtt calculation was started */ tcp_seq t_rtseq; /* sequence number being timed */ - u_int32_t rfbuf_ts; /* recv buffer autoscaling timestamp */ - u_int32_t rfbuf_cnt; /* recv buffer autoscaling byte count */ - - int t_rxtcur; /* current retransmit value (ticks) */ - u_int t_maxseg; /* maximum segment size */ - int t_srtt; /* smoothed round-trip time */ - int t_rttvar; /* variance in round-trip time */ - - u_int64_t t_accsleep_ms; /* accumulated sleep time since last boot */ - u_int16_t t_reassqlen; /* length of reassembly queue */ - u_int16_t t_rxtshift; /* log(2) of rexmt exp. backoff */ - u_int32_t t_rttmin; /* minimum rtt allowed */ - u_int32_t t_rttbest; /* best rtt we've seen */ - u_int32_t t_rttcur; /* most recent value of rtt */ - u_int32_t t_rttupdated; /* number of times rtt sampled */ - u_int32_t t_rxt_conndroptime; /* retxmt conn gets dropped after this time, when set */ - u_int32_t t_rxtstart; /* time at which retransmission started */ - u_int32_t max_sndwnd; /* largest window peer has offered */ + uint32_t rfbuf_ts; /* recv buffer autoscaling timestamp */ + uint32_t rfbuf_cnt; /* recv buffer autoscaling byte count */ + uint32_t rfbuf_space; /* Current "ideal" estimate of the space */ + + int t_rxtcur; /* current retransmit value (ticks) */ + unsigned int t_maxseg; /* maximum segment size */ + int t_srtt; /* smoothed round-trip time */ + int t_rttvar; /* variance in round-trip time */ + + uint64_t t_accsleep_ms; /* accumulated sleep time since last boot */ + uint16_t t_reassqlen; /* length of reassembly queue */ + uint16_t t_rxtshift; /* log(2) of rexmt exp. backoff */ + uint32_t t_rttmin; /* minimum rtt allowed */ + uint32_t t_rttbest; /* best rtt we've seen */ + uint32_t t_rttcur; /* most recent value of rtt */ + uint32_t t_rttupdated; /* number of times rtt sampled */ + uint32_t t_rxt_conndroptime; /* retxmt conn gets dropped after this time, when set */ + uint32_t t_rxtstart; /* time at which retransmission started */ + uint32_t max_sndwnd; /* largest window peer has offered */ int t_softerror; /* possible error not yet reported */ /* out-of-band data */ @@ -369,13 +370,21 @@ struct tcpcb { /* RFC 3465 variables */ u_int32_t t_bytes_acked; /* ABC "bytes_acked" parameter */ - int t_lastchain; /* amount of packets chained last time around */ - u_int16_t t_unacksegs; /* received but unacked segments for delaying acks */ - u_int8_t t_rexmtthresh; /* duplicate ack threshold for entering fast recovery */ - u_int8_t t_rtimo_probes; /* number of adaptive rtimo probes sent */ - u_int32_t t_persist_timeout; /* ZWP persistence limit as set by PERSIST_TIMEOUT */ - u_int32_t t_persist_stop; /* persistence limit deadline if triggered by ZWP */ - u_int32_t t_notsent_lowat; /* Low water for not sent data */ + int t_lastchain; /* amount of packets chained last time around */ + uint16_t t_unacksegs; /* received but unacked segments for delaying acks */ + + /* + * Pretty arbitrary value ;-) + * Goal is to make sure that some ACKs are being sent more frequently + * to allow the other side to ramp-up. + */ +#define TCP_FORCED_ACKS_COUNT 16 + uint16_t t_forced_acks; /* count of pure ACKs that need to be forced out */ + uint8_t t_rexmtthresh; /* duplicate ack threshold for entering fast recovery */ + uint8_t t_rtimo_probes; /* number of adaptive rtimo probes sent */ + uint32_t t_persist_timeout; /* ZWP persistence limit as set by PERSIST_TIMEOUT */ + uint32_t t_persist_stop; /* persistence limit deadline if triggered by ZWP */ + uint32_t t_notsent_lowat; /* Low water for not sent data */ /* Receiver state for stretch-ack algorithm */ u_int32_t rcv_unackwin; /* to measure win for stretching acks */ @@ -417,7 +426,7 @@ struct tcpcb { u_int32_t t_badrexmt_time; /* bad rexmt detection time */ /* Packet reordering metric */ - u_int16_t t_reorderwin; /* Reordering late time offset */ + u_int32_t t_reorderwin; /* Reordering late time offset */ /* SACK related state */ int16_t snd_numholes; /* number of holes seen by sender */ @@ -429,6 +438,8 @@ struct tcpcb { int rcv_numsacks; /* # distinct sack blks present */ struct sackblk sackblks[MAX_SACK_BLKS]; /* seq nos. of sack blocks */ struct sackhint sackhint; /* SACK scoreboard hint */ + tcp_seq send_highest_sack; /* Sequence number of fresh data sent after the most recent fast-retransmit */ + int t_new_dupacks; /* Dupacks received above send_highest_sack */ struct mbuf *t_pktlist_head; /* First packet in transmit chain */ struct mbuf *t_pktlist_tail; /* Last packet in transmit chain */ @@ -442,21 +453,23 @@ struct tcpcb { u_int32_t tso_max_segment_size; /* TSO maximum segment unit for NIC */ u_int16_t t_pmtud_lastseg_size; /* size of the last sent segment */ - u_int16_t t_pmtud_saved_maxopd; /* MSS saved before performing PMTU-D BlackHole detection */ + u_int32_t t_pmtud_saved_maxopd; /* MSS saved before performing PMTU-D BlackHole detection */ u_int32_t t_pmtud_start_ts; /* Time of PMTUD blackhole detection */ struct{ u_int32_t rxduplicatebytes; u_int32_t rxoutoforderbytes; u_int32_t txretransmitbytes; - u_int8_t synrxtshift; - u_int8_t rxmitsyns; + u_int16_t synrxtshift; + u_int16_t rxmitsyns; u_int16_t unused_pad_to_8; u_int32_t rxmitpkts; + uint32_t delayed_acks_sent; + uint32_t acks_delayed; } t_stat; u_int8_t t_notify_ack_count; u_int8_t t_ecn_recv_ce_pkt; /* Received packet with CE-bit set (independent from last_ack_sent) */ - u_int16_t t_cached_maxopd; /* default for MSS adjustment using link status report */ + u_int32_t t_cached_maxopd; /* default for MSS adjustment using link status report */ uint32_t bg_ssthresh; /* Slow start threshold until delay increases */ uint32_t t_flagsext; /* Another field to accommodate more flags */ @@ -464,7 +477,7 @@ struct tcpcb { #define TF_RCVUNACK_WAITSS 0x2 /* set when the receiver should not stretch acks */ #define TF_BWMEAS_INPROGRESS 0x4 /* Indicate BW meas is happening */ #define TF_MEASURESNDBW 0x8 /* Measure send bw on this connection */ -#define TF_LRO_OFFLOADED 0x10 /* Connection LRO offloaded */ +#define TF_LAST_IS_PSH 0x10 /* Indicates whether the last packet in the rcv socket buffer had the PUSH-flag set */ #define TF_SACK_ENABLE 0x20 /* SACK is enabled */ #define TF_RECOMPUTE_RTT 0x40 /* recompute RTT after spurious retransmit */ #define TF_DETECT_READSTALL 0x80 /* Used to detect a stall during read operation */ @@ -477,7 +490,6 @@ struct tcpcb { #define TF_FORCE 0x8000 /* force 1 byte out */ #define TF_DISABLE_STRETCHACK 0x10000 /* auto-disable stretch ack */ #define TF_NOBLACKHOLE_DETECTION 0x20000 /* Disable PMTU blackhole detection */ -#define TF_DISABLE_DSACK 0x40000 /* Ignore DSACK due to n/w duplication */ #define TF_RESCUE_RXT 0x80000 /* SACK rescue retransmit */ #define TF_CWND_NONVALIDATED 0x100000 /* cwnd non validated */ #define TF_IF_PROBING 0x200000 /* Trigger interface probe timeout */ @@ -498,7 +510,6 @@ struct tcpcb { uint32_t std_dev_iaj; /* Standard deviation */ #endif /* TRAFFIC_MGT */ struct bwmeas *t_bwmeas; /* State for bandwidth measurement */ - uint32_t t_lropktlen; /* Bytes in a LRO frame */ tcp_seq t_idleat; /* rcv_nxt at idle time */ TAILQ_ENTRY(tcpcb) t_twentry; /* link for time wait queue */ struct tcp_ccstate *t_ccstate; /* congestion control related state */ @@ -626,6 +637,13 @@ struct tcpcb { u_int32_t t_log_flags; /* TCP logging flags*/ u_int32_t t_connect_time; /* time when the connection started */ + + uint32_t t_comp_gencnt; /* Current compression generation-count */ + uint32_t t_comp_lastinc; /* Last time the gen-count was changed - should change every TCP_COMP_CHANGE_RATE ms */ +#define TCP_COMP_CHANGE_RATE 5 /* Intervals at which we change the gencnt. Means that worst-case we send one ACK every TCP_COMP_CHANGE_RATE ms */ + + uuid_t t_fsw_uuid; + uuid_t t_flow_uuid; }; #define IN_FASTRECOVERY(tp) (tp->t_flags & TF_FASTRECOVERY) @@ -638,23 +656,26 @@ struct tcpcb { * should be just a trickle and it will help to improve performance. * We also do not want to back off twice in the same RTT. */ -#define ENTER_FASTRECOVERY(_tp_) do { \ - (_tp_)->t_flags |= TF_FASTRECOVERY; \ - if (INP_IS_FLOW_CONTROLLED((_tp_)->t_inpcb)) \ - inp_reset_fc_state((_tp_)->t_inpcb); \ - if (!SLIST_EMPTY(&tp->t_rxt_segments)) \ - tcp_rxtseg_clean(tp); \ +#define ENTER_FASTRECOVERY(_tp_) do { \ + (_tp_)->t_flags |= TF_FASTRECOVERY; \ + if (INP_IS_FLOW_CONTROLLED((_tp_)->t_inpcb)) \ + inp_reset_fc_state((_tp_)->t_inpcb); \ + if (!SLIST_EMPTY(&tp->t_rxt_segments)) \ + tcp_rxtseg_clean(tp); \ + (_tp_)->t_new_dupacks = 0; \ } while(0) -#define EXIT_FASTRECOVERY(_tp_) do { \ - (_tp_)->t_flags &= ~TF_FASTRECOVERY; \ - (_tp_)->t_dupacks = 0; \ - (_tp_)->t_rexmtthresh = tcprexmtthresh; \ - (_tp_)->t_bytes_acked = 0; \ - (_tp_)->ecn_flags &= ~TE_INRECOVERY; \ - (_tp_)->t_timer[TCPT_PTO] = 0; \ - (_tp_)->t_flagsext &= ~TF_RESCUE_RXT; \ - (_tp_)->t_lossflightsize = 0; \ +#define EXIT_FASTRECOVERY(_tp_) do { \ + (_tp_)->t_flags &= ~TF_FASTRECOVERY; \ + (_tp_)->t_dupacks = 0; \ + (_tp_)->t_new_dupacks = 0; \ + (_tp_)->t_rexmtthresh = (uint8_t)tcprexmtthresh; \ + (_tp_)->t_bytes_acked = 0; \ + (_tp_)->ecn_flags &= ~TE_INRECOVERY; \ + (_tp_)->t_timer[TCPT_PTO] = 0; \ + (_tp_)->t_flagsext &= ~TF_RESCUE_RXT; \ + (_tp_)->t_lossflightsize = 0; \ + (_tp_)->sackhint.sack_bytes_acked = 0; \ } while(0) /* @@ -662,10 +683,10 @@ struct tcpcb { * the retransmit threshold, use Limited Transmit algorithm */ extern int tcprexmtthresh; -#define ALLOW_LIMITED_TRANSMIT(_tp_) \ - ((_tp_)->t_dupacks > 0 && \ - (_tp_)->t_dupacks < (_tp_)->t_rexmtthresh && \ - ((_tp_)->t_flagsext & (TF_PKTS_REORDERED|TF_DELAY_RECOVERY)) \ +#define ALLOW_LIMITED_TRANSMIT(_tp_) \ + ((_tp_)->t_dupacks > 0 && \ + (_tp_)->t_dupacks < (_tp_)->t_rexmtthresh && \ + ((_tp_)->t_flagsext & (TF_PKTS_REORDERED|TF_DELAY_RECOVERY)) \ != (TF_PKTS_REORDERED|TF_DELAY_RECOVERY)) /* @@ -698,10 +719,6 @@ extern int tcprexmtthresh; #define TCP_SEND_DSACK_OPT(_tp_) \ ((_tp_)->t_dsack_lseq > 0 && (_tp_)->t_dsack_rseq > 0) -/* Check if DSACK option should be processed */ -#define TCP_DSACK_ENABLED(tp) (tcp_dsack_enable == 1 && \ - !(tp->t_flagsext & TF_DISABLE_DSACK)) - /* * Returns true if a DSACK sequence is within the max send window that will * be accepted. In order to set a window to validate sequence numbers, the @@ -713,7 +730,7 @@ extern int tcprexmtthresh; * DSACK option. Choosing a much larger limit means that the memory for * retransmit segments can be held for a longer time. */ -#define TCP_DSACK_MAX_SEND_WINDOW(_tp_) ((_tp_)->t_maxseg << 8) +#define TCP_DSACK_MAX_SEND_WINDOW(_tp_) (MIN((_tp_)->snd_wnd, tcp_autosndbuf_max)) #define TCP_DSACK_SEQ_IN_WINDOW(_tp_, _seq_, _una_) \ (SEQ_LEQ((_seq_), (_tp_)->snd_max) && \ SEQ_GEQ((_seq_), ((_una_) - TCP_DSACK_MAX_SEND_WINDOW(_tp_)))) @@ -724,9 +741,6 @@ extern int tcprexmtthresh; mptcp_reset_rexmit_state((_tp_)); \ } while(0); -#define TCP_AUTORCVBUF_MAX(_ifp_) (((_ifp_) != NULL && (IFNET_IS_CELLULAR((_ifp_))) && ((_ifp_)->if_eflags & IFEF_3CA)) ? \ - (tcp_autorcvbuf_max << 1) : tcp_autorcvbuf_max) - #define TCP_IF_STATE_CHANGED(tp, probe_if_index) \ (probe_if_index > 0 && tp->t_inpcb->inp_last_outifp != NULL && \ probe_if_index == tp->t_inpcb->inp_last_outifp->if_index) @@ -757,23 +771,6 @@ struct tcpopt { u_char *to_tfo; /* pointer to the TFO cookie */ }; -/* - * The TAO cache entry which is stored in the protocol family specific - * portion of the route metrics. - */ -struct rmxp_tao { - tcp_cc tao_cc; /* latest CC in valid SYN */ - tcp_cc tao_ccsent; /* latest CC sent to peer */ - u_short tao_mssopt; /* peer's cached MSS */ -#ifdef notyet - u_short tao_flags; /* cache status flags */ -#define TAOF_DONT 0x0001 /* peer doesn't understand rfc1644 */ -#define TAOF_OK 0x0002 /* peer does understand rfc1644 */ -#define TAOF_UNDEF 0 /* we don't know yet */ -#endif /* notyet */ -}; -#define rmx_taop(r) ((struct rmxp_tao *)(r).rmx_filler) - #define intotcpcb(ip) ((struct tcpcb *)(ip)->inp_ppcb) #define sototcpcb(so) (intotcpcb(sotoinpcb(so))) @@ -836,7 +833,7 @@ struct tcpcb { #define TF_REQ_TSTMP 0x00080 /* have/will request timestamps */ #define TF_RCVD_TSTMP 0x00100 /* a timestamp was received in SYN */ #define TF_SACK_PERMIT 0x00200 /* other side said I could SACK */ -#define TF_NEEDSYN 0x00400 /* send SYN (implicit state) */ +#define TF_NEEDSYN 0x00400 /* send SYN (implicit state) - unused but needed for backwards compatibility */ #define TF_NEEDFIN 0x00800 /* send FIN (implicit state) */ #define TF_NOPUSH 0x01000 /* don't push */ #define TF_REQ_CC 0x02000 /* have/will request CC */ @@ -1027,14 +1024,6 @@ struct tcpstat { u_int32_t tcps_rxtfindrop; /* drop conn after retransmitting FIN */ u_int32_t tcps_fcholdpacket; /* packets withheld because of flow control */ - /* LRO related stats */ - u_int32_t tcps_coalesced_pack; /* number of coalesced packets */ - u_int32_t tcps_flowtbl_full; /* times flow table was full */ - u_int32_t tcps_flowtbl_collision; /* collisions in flow tbl */ - u_int32_t tcps_lro_twopack; /* 2 packets coalesced */ - u_int32_t tcps_lro_multpack; /* 3 or 4 pkts coalesced */ - u_int32_t tcps_lro_largepack; /* 5 or more pkts coalesced */ - u_int32_t tcps_limited_txt; /* Limited transmit used */ u_int32_t tcps_early_rexmt; /* Early retransmit used */ u_int32_t tcps_sack_ackadv; /* Cumulative ack advanced along with sack */ @@ -1048,9 +1037,9 @@ struct tcpstat { u_int32_t tcps_snd_swcsum_bytes; /* tcp swcksum (outbound), bytes */ u_int32_t tcps_snd6_swcsum; /* tcp6 swcksum (outbound), packets */ u_int32_t tcps_snd6_swcsum_bytes; /* tcp6 swcksum (outbound), bytes */ - u_int32_t tcps_msg_unopkts; /* unordered packet on TCP msg stream */ - u_int32_t tcps_msg_unoappendfail; /* failed to append unordered pkt */ - u_int32_t tcps_msg_sndwaithipri; /* send is waiting for high priority data */ + u_int32_t tcps_unused_1; + u_int32_t tcps_unused_2; + u_int32_t tcps_unused_3; /* MPTCP Related stats */ u_int32_t tcps_invalid_mpcap; /* Invalid MPTCP capable opts */ @@ -1238,7 +1227,7 @@ struct xtcpcb { u_quad_t xt_alignment_hack; }; -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX || !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) struct xtcpcb64 { u_int32_t xt_len; @@ -1319,7 +1308,7 @@ struct xtcpcb64 { u_quad_t xt_alignment_hack; }; -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX || !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) */ #ifdef PRIVATE @@ -1475,23 +1464,6 @@ struct tcpprogressreq { (tp)->t_lastchain = (tp)->t_pktlist_sentlen = 0; \ } -#define TCPCTL_NAMES { \ - { 0, 0 }, \ - { "rfc1323", CTLTYPE_INT }, \ - { "rfc1644", CTLTYPE_INT }, \ - { "mssdflt", CTLTYPE_INT }, \ - { "stats", CTLTYPE_STRUCT }, \ - { "rttdflt", CTLTYPE_INT }, \ - { "keepidle", CTLTYPE_INT }, \ - { "keepintvl", CTLTYPE_INT }, \ - { "sendspace", CTLTYPE_INT }, \ - { "recvspace", CTLTYPE_INT }, \ - { "keepinit", CTLTYPE_INT }, \ - { "pcblist", CTLTYPE_STRUCT }, \ - { "delacktime", CTLTYPE_INT }, \ - { "v6mssdflt", CTLTYPE_INT }, \ -} - extern int tcp_TCPTV_MIN; #ifdef SYSCTL_DECL @@ -1509,28 +1481,32 @@ extern int tcp_minmss; extern int tcp_tfo_halfcnt; extern int tcp_tfo_backlog; extern int tcp_fastopen; -extern int ss_fltsz; extern int ss_fltsz_local; -extern int tcp_do_rfc3390; /* Calculate ss_fltsz according to RFC 3390 */ -extern int tcp_do_rfc1323; extern int target_qdelay; extern u_int32_t tcp_now; /* for RFC 1323 timestamps */ extern struct timeval tcp_uptime; extern lck_spin_t *tcp_uptime_lock; extern int tcp_delack_enabled; -extern int tcp_do_sack; /* SACK enabled/disabled */ -extern int tcp_do_rfc3465; -extern int tcp_do_rfc3465_lim2; extern int maxseg_unacked; extern int tcp_use_newreno; extern struct zone *tcp_reass_zone; extern struct zone *tcp_rxt_seg_zone; extern int tcp_ecn_outbound; extern int tcp_ecn_inbound; -extern u_int32_t tcp_do_autorcvbuf; -extern u_int32_t tcp_autorcvbuf_max; -extern u_int32_t tcp_autorcvbuf_inc_shift; +extern uint32_t tcp_do_autorcvbuf; +extern uint32_t tcp_autorcvbuf_max; extern int tcp_recv_bg; +extern int tcp_do_ack_compression; +/* + * Dummy value used for when there is no flow and we want to ensure that compression + * can happen. + */ +#define TCP_ACK_COMPRESSION_DUMMY 1 + +extern int tcp_do_better_lr; +extern int tcp_cubic_minor_fixes; +extern int tcp_cubic_rfc_compliant; +extern int tcp_flow_control_response; struct protosw; struct domain; @@ -1554,8 +1530,6 @@ struct tcpcb * tcp_drop(struct tcpcb *, int); void tcp_drain(void); void tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt); -struct rmxp_tao * -tcp_gettaocache(struct inpcb *); void tcp_init(struct protosw *, struct domain *); void tcp_input(struct mbuf *, int); void tcp_mss(struct tcpcb *, int, unsigned int); @@ -1566,7 +1540,7 @@ struct tcpcb * tcp_newtcpcb(struct inpcb *); int tcp_output(struct tcpcb *); void tcp_respond(struct tcpcb *, void *, struct tcphdr *, struct mbuf *, - tcp_seq, tcp_seq, int, struct tcp_respond_args *); + tcp_seq, tcp_seq, uint8_t, struct tcp_respond_args *); struct rtentry * tcp_rtlookup(struct inpcb *, unsigned int); void tcp_setpersist(struct tcpcb *); @@ -1583,7 +1557,7 @@ void tcp_trace(int, int, struct tcpcb *, void *, struct tcphdr *, int); void tcp_fill_info(struct tcpcb *, struct tcp_info *); void tcp_sack_doack(struct tcpcb *, struct tcpopt *, struct tcphdr *, - u_int32_t *); + u_int32_t *, uint32_t *); extern boolean_t tcp_sack_process_dsack(struct tcpcb *, struct tcpopt *, struct tcphdr *); int tcp_detect_bad_rexmt(struct tcpcb *, struct tcphdr *, struct tcpopt *, @@ -1594,14 +1568,16 @@ void tcp_sack_adjust(struct tcpcb *tp); struct sackhole *tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt); void tcp_sack_partialack(struct tcpcb *, struct tcphdr *); void tcp_free_sackholes(struct tcpcb *tp); +void tcp_sack_lost_rexmit(struct tcpcb *tp); int32_t tcp_sbspace(struct tcpcb *tp); void tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp); void tcp_set_ecn(struct tcpcb *tp, struct ifnet *ifp); +int tcp_flight_size(struct tcpcb *tp); void tcp_reset_stretch_ack(struct tcpcb *tp); extern void tcp_get_ports_used(u_int32_t, int, u_int32_t, bitstr_t *); uint32_t tcp_count_opportunistic(unsigned int ifindex, u_int32_t flags); uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa); -void tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so, struct ifnet *ifp); +void tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so); struct bwmeas* tcp_bwmeas_alloc(struct tcpcb *tp); void tcp_bwmeas_free(struct tcpcb *tp); extern int32_t timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2); @@ -1643,6 +1619,7 @@ extern void add_to_time_wait(struct tcpcb *, uint32_t delay); extern void tcp_pmtud_revert_segment_size(struct tcpcb *tp); extern void tcp_rxtseg_insert(struct tcpcb *, tcp_seq, tcp_seq); extern struct tcp_rxt_seg *tcp_rxtseg_find(struct tcpcb *, tcp_seq, tcp_seq); +extern void tcp_rxtseg_set_spurious(struct tcpcb *tp, tcp_seq start, tcp_seq end); extern void tcp_rxtseg_clean(struct tcpcb *); extern boolean_t tcp_rxtseg_detect_bad_rexmt(struct tcpcb *, tcp_seq); extern boolean_t tcp_rxtseg_dsack_for_tlp(struct tcpcb *); @@ -1680,7 +1657,7 @@ extern bool tcp_notify_ack_active(struct socket *so); #if MPTCP extern int mptcp_input_preproc(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, int drop_hdrlen); -extern uint32_t mptcp_output_csum(struct mbuf *m, uint64_t dss_val, +extern uint16_t mptcp_output_csum(struct mbuf *m, uint64_t dss_val, uint32_t sseq, uint16_t dlen); extern int mptcp_adj_mss(struct tcpcb *, boolean_t); extern void mptcp_insert_rmap(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th); @@ -1689,6 +1666,14 @@ extern void mptcp_insert_rmap(struct tcpcb *tp, struct mbuf *m, struct tcphdr *t __private_extern__ void tcp_update_stats_per_flow( struct ifnet_stats_per_flow *, struct ifnet *); +#define TCP_ACK_STRATEGY_LEGACY 0 +#define TCP_ACK_STRATEGY_MODERN 1 + +extern int tcp_ack_strategy; + +#define tcp_add_fsw_flow(...) +#define tcp_del_fsw_flow(...) + #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET_TCP_VAR_H_ */ diff --git a/bsd/netinet/udp.h b/bsd/netinet/udp.h index bad136e63..4c3100176 100644 --- a/bsd/netinet/udp.h +++ b/bsd/netinet/udp.h @@ -62,8 +62,15 @@ #ifndef _NETINET_UDP_H_ #define _NETINET_UDP_H_ + +#ifndef DRIVERKIT #include #include /* u_short */ +#else +#include +#include +#include +#endif /* DRIVERKIT */ /* * Udp protocol header. diff --git a/bsd/netinet/udp_usrreq.c b/bsd/netinet/udp_usrreq.c index 818f05cce..e789cb7bc 100644 --- a/bsd/netinet/udp_usrreq.c +++ b/bsd/netinet/udp_usrreq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -87,17 +87,13 @@ #include #include #include -#if INET6 #include -#endif /* INET6 */ #include #include #include -#if INET6 #include #include #include -#endif /* INET6 */ #include #include #include @@ -162,24 +158,7 @@ struct inpcbinfo udbinfo; /* Garbage collection performed during most recent udp_gc() run */ static boolean_t udp_gc_done = FALSE; -#if IPFIREWALL -extern int fw_verbose; -extern void ipfwsyslog(int level, const char *format, ...); -extern void ipfw_stealth_stats_incr_udp(void); - -/* Apple logging, log to ipfw.log */ -#define log_in_vain_log(a) { \ - if ((udp_log_in_vain == 3) && (fw_verbose == 2)) { \ - ipfwsyslog a; \ - } else if ((udp_log_in_vain == 4) && (fw_verbose == 2)) { \ - ipfw_stealth_stats_incr_udp(); \ - } else { \ - log a; \ - } \ -} -#else /* !IPFIREWALL */ #define log_in_vain_log(a) { log a; } -#endif /* !IPFIREWALL */ static int udp_getstat SYSCTL_HANDLER_ARGS; struct udpstat udpstat; /* from udp_var.h */ @@ -197,7 +176,6 @@ SYSCTL_INT(_net_inet_udp, OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED, &udp_use_randomport, 0, "Randomize UDP port numbers"); -#if INET6 struct udp_in6 { struct sockaddr_in6 uin6_sin; u_char uin6_init_done : 1; @@ -221,10 +199,6 @@ int udp_send(struct socket *, int, struct mbuf *, struct sockaddr *, struct mbuf *, struct proc *); static void udp_append(struct inpcb *, struct ip *, struct mbuf *, int, struct sockaddr_in *, struct udp_in6 *, struct udp_ip6 *, struct ifnet *); -#else /* !INET6 */ -static void udp_append(struct inpcb *, struct ip *, struct mbuf *, int, - struct sockaddr_in *, struct ifnet *); -#endif /* !INET6 */ static int udp_input_checksum(struct mbuf *, struct udphdr *, int, int); int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *, struct mbuf *, struct proc *); @@ -255,7 +229,6 @@ udp_init(struct protosw *pp, struct domain *dp) { #pragma unused(dp) static int udp_initialized = 0; - vm_size_t str_size; struct inpcbinfo *pcbinfo; VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); @@ -275,8 +248,7 @@ udp_init(struct protosw *pp, struct domain *dp) &udbinfo.ipi_hashmask); udbinfo.ipi_porthashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.ipi_porthashmask); - str_size = (vm_size_t) sizeof(struct inpcb); - udbinfo.ipi_zone = zinit(str_size, 80000 * str_size, 8192, "udpcb"); + udbinfo.ipi_zone = zone_create("udpcb", sizeof(struct inpcb), ZC_NONE); pcbinfo = &udbinfo; /* @@ -310,23 +282,24 @@ udp_input(struct mbuf *m, int iphlen) struct sockaddr_in udp_in; struct ip_moptions *imo = NULL; int foundmembership = 0, ret = 0; -#if INET6 struct udp_in6 udp_in6; struct udp_ip6 udp_ip6; -#endif /* INET6 */ struct ifnet *ifp = m->m_pkthdr.rcvif; boolean_t cell = IFNET_IS_CELLULAR(ifp); boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp)); boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp)); + u_int16_t pf_tag = 0; bzero(&udp_in, sizeof(udp_in)); udp_in.sin_len = sizeof(struct sockaddr_in); udp_in.sin_family = AF_INET; -#if INET6 bzero(&udp_in6, sizeof(udp_in6)); udp_in6.uin6_sin.sin6_len = sizeof(struct sockaddr_in6); udp_in6.uin6_sin.sin6_family = AF_INET6; -#endif /* INET6 */ + + if (m->m_flags & M_PKTHDR) { + pf_tag = m_pftag(m)->pftag_tag; + } udpstat.udps_ipackets++; @@ -429,9 +402,7 @@ udp_input(struct mbuf *m, int iphlen) * Locate pcb(s) for datagram. * (Algorithm copied from raw_intr().) */ -#if INET6 udp_in6.uin6_init_done = udp_ip6.uip6_init_done = 0; -#endif /* INET6 */ LIST_FOREACH(inp, &udb, inp_list) { #if IPSEC int skipit; @@ -445,11 +416,9 @@ udp_input(struct mbuf *m, int iphlen) __func__, inp); /* NOTREACHED */ } -#if INET6 if ((inp->inp_vflag & INP_IPV4) == 0) { continue; } -#endif /* INET6 */ if (inp_restricted_recv(inp, ifp)) { continue; } @@ -534,7 +503,7 @@ udp_input(struct mbuf *m, int iphlen) skipit = 0; if (!necp_socket_is_allowed_to_send_recv_v4(inp, uh->uh_dport, uh->uh_sport, &ip->ip_dst, - &ip->ip_src, ifp, NULL, NULL, NULL)) { + &ip->ip_src, ifp, pf_tag, NULL, NULL, NULL, NULL)) { /* do not inject data to pcb */ skipit = 1; } @@ -546,15 +515,9 @@ udp_input(struct mbuf *m, int iphlen) if (reuse_sock) { n = m_copy(m, 0, M_COPYALL); } -#if INET6 udp_append(inp, ip, m, iphlen + sizeof(struct udphdr), &udp_in, &udp_in6, &udp_ip6, ifp); -#else /* !INET6 */ - udp_append(inp, ip, m, - iphlen + sizeof(struct udphdr), - &udp_in, ifp); -#endif /* !INET6 */ mcast_delivered++; m = n; @@ -707,7 +670,7 @@ udp_input(struct mbuf *m, int iphlen) goto bad; } #if ICMP_BANDLIM - if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0) { + if (badport_bandlim(BANDLIM_ICMP_UNREACH)) { goto bad; } #endif /* ICMP_BANDLIM */ @@ -731,7 +694,7 @@ udp_input(struct mbuf *m, int iphlen) } #if NECP if (!necp_socket_is_allowed_to_send_recv_v4(inp, uh->uh_dport, - uh->uh_sport, &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) { + uh->uh_sport, &ip->ip_dst, &ip->ip_src, ifp, pf_tag, NULL, NULL, NULL, NULL)) { udp_unlock(inp->inp_socket, 1, 0); IF_UDP_STATINC(ifp, badipsec); goto bad; @@ -745,10 +708,13 @@ udp_input(struct mbuf *m, int iphlen) udp_in.sin_port = uh->uh_sport; udp_in.sin_addr = ip->ip_src; if ((inp->inp_flags & INP_CONTROLOPTS) != 0 || +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + (inp->inp_socket->so_cfil_db != NULL) || +#endif (inp->inp_socket->so_options & SO_TIMESTAMP) != 0 || (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { -#if INET6 if (inp->inp_vflag & INP_IPV6) { int savedflags; @@ -757,9 +723,7 @@ udp_input(struct mbuf *m, int iphlen) inp->inp_flags &= ~INP_UNMAPPABLEOPTS; ret = ip6_savecontrol(inp, m, &opts); inp->inp_flags = savedflags; - } else -#endif /* INET6 */ - { + } else { ret = ip_savecontrol(inp, &opts, ip, m); } if (ret != 0) { @@ -772,13 +736,10 @@ udp_input(struct mbuf *m, int iphlen) KERNEL_DEBUG(DBG_LAYER_IN_END, uh->uh_dport, uh->uh_sport, save_ip.ip_src.s_addr, save_ip.ip_dst.s_addr, uh->uh_ulen); -#if INET6 if (inp->inp_vflag & INP_IPV6) { in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin); append_sa = (struct sockaddr *)&udp_in6.uin6_sin; - } else -#endif /* INET6 */ - { + } else { append_sa = (struct sockaddr *)&udp_in; } if (nstat_collect) { @@ -804,7 +765,6 @@ bad: KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); } -#if INET6 static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip) { @@ -823,20 +783,14 @@ ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip) ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr; } } -#endif /* INET6 */ /* * subroutine of udp_input(), mainly for source code readability. */ static void -#if INET6 udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off, struct sockaddr_in *pudp_in, struct udp_in6 *pudp_in6, struct udp_ip6 *pudp_ip6, struct ifnet *ifp) -#else /* !INET6 */ -udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off, - struct sockaddr_in *pudp_in, struct ifnet *ifp) -#endif /* !INET6 */ { struct sockaddr *append_sa; struct mbuf *opts = 0; @@ -845,17 +799,14 @@ udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off, boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp)); int ret = 0; -#if CONFIG_MACF_NET - if (mac_inpcb_check_deliver(last, n, AF_INET, SOCK_DGRAM) != 0) { - m_freem(n); - return; - } -#endif /* CONFIG_MACF_NET */ if ((last->inp_flags & INP_CONTROLOPTS) != 0 || +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + (last->inp_socket->so_cfil_db != NULL) || +#endif (last->inp_socket->so_options & SO_TIMESTAMP) != 0 || (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { -#if INET6 if (last->inp_vflag & INP_IPV6) { int savedflags; @@ -871,25 +822,22 @@ udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off, goto error; } last->inp_flags = savedflags; - } else -#endif /* INET6 */ - { + } else { ret = ip_savecontrol(last, &opts, ip, n); if (ret != 0) { goto error; } } } -#if INET6 if (last->inp_vflag & INP_IPV6) { if (pudp_in6->uin6_init_done == 0) { in6_sin_2_v4mapsin6(pudp_in, &pudp_in6->uin6_sin); pudp_in6->uin6_init_done = 1; } append_sa = (struct sockaddr *)&pudp_in6->uin6_sin; - } else -#endif /* INET6 */ - append_sa = (struct sockaddr *)pudp_in; + } else { + append_sa = (struct sockaddr *)pudp_in; + } if (nstat_collect) { INP_ADD_STAT(last, cell, wifi, wired, rxpackets, 1); INP_ADD_STAT(last, cell, wifi, wired, rxbytes, @@ -917,7 +865,7 @@ error: void udp_notify(struct inpcb *inp, int errno) { - inp->inp_socket->so_error = errno; + inp->inp_socket->so_error = (u_short)errno; sorwakeup(inp->inp_socket); sowwakeup(inp->inp_socket); } @@ -945,10 +893,13 @@ udp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet * if } if (ip) { struct udphdr uh; + struct icmp *icp = NULL; bcopy(((caddr_t)ip + (ip->ip_hl << 2)), &uh, sizeof(uh)); inp = in_pcblookup_hash(&udbinfo, faddr, uh.uh_dport, ip->ip_src, uh.uh_sport, 0, NULL); + icp = (struct icmp *)(void *)((caddr_t)ip - offsetof(struct icmp, icmp_ip)); + if (inp != NULL && inp->inp_socket != NULL) { udp_lock(inp->inp_socket, 1, 0); if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == @@ -956,7 +907,14 @@ udp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet * if udp_unlock(inp->inp_socket, 1, 0); return; } - (*notify)(inp, inetctlerrmap[cmd]); + if (cmd == PRC_MSGSIZE && !uuid_is_null(inp->necp_client_uuid)) { + uuid_t null_uuid; + uuid_clear(null_uuid); + necp_update_flow_protoctl_event(null_uuid, inp->necp_client_uuid, + PRC_MSGSIZE, ntohs(icp->icmp_nextmtu), 0); + } else { + (*notify)(inp, inetctlerrmap[cmd]); + } udp_unlock(inp->inp_socket, 1, 0); } } else { @@ -1047,7 +1005,7 @@ udp_ctloutput(struct socket *so, struct sockopt *sopt) inp->inp_keepalive_data = NULL; } - inp->inp_keepalive_datalen = min( + inp->inp_keepalive_datalen = (uint8_t)min( ka.ka_data_len, UDP_KEEPALIVE_OFFLOAD_DATA_SIZE); if (inp->inp_keepalive_datalen > 0) { @@ -1066,7 +1024,7 @@ udp_ctloutput(struct socket *so, struct sockopt *sopt) } else { inp->inp_keepalive_datalen = 0; } - inp->inp_keepalive_interval = + inp->inp_keepalive_interval = (uint8_t) min(UDP_KEEPALIVE_INTERVAL_MAX_SECONDS, ka.ka_interval); inp->inp_keepalive_type = ka.ka_type; @@ -1228,7 +1186,7 @@ SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist, "S,xinpcb", "List of active UDP sockets"); -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX static int udp_pcblist64 SYSCTL_HANDLER_ARGS @@ -1350,7 +1308,7 @@ SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist64, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist64, "S,xinpcb64", "List of active UDP sockets"); -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ static int udp_pcblist_n SYSCTL_HANDLER_ARGS @@ -1476,14 +1434,15 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, struct ip_moptions *mopts; struct route ro; struct ip_out_args ipoa; + bool sndinprog_cnt_used = false; #if CONTENT_FILTER struct m_tag *cfil_tag = NULL; bool cfil_faddr_use = false; - bool sndinprog_cnt_used = false; uint32_t cfil_so_state_change_cnt = 0; - short cfil_so_options = 0; + uint32_t cfil_so_options = 0; struct sockaddr *cfil_faddr = NULL; #endif + bool check_qos_marking_again = (so->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE) ? FALSE : TRUE; bzero(&ipoa, sizeof(ipoa)); ipoa.ipoa_boundif = IFSCOPE_NONE; @@ -1754,10 +1713,6 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, } } -#if CONFIG_MACF_NET - mac_mbuf_label_associate_inpcb(inp, m); -#endif /* CONFIG_MACF_NET */ - if (inp->inp_flowhash == 0) { inp->inp_flowhash = inp_calc_flowhash(inp); } @@ -1790,6 +1745,16 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, ui->ui_dport = fport; ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr)); + /* + * Set the Don't Fragment bit in the IP header. + */ + if (inp->inp_flags2 & INP2_DONTFRAG) { + struct ip *ip; + + ip = (struct ip *)&ui->ui_i; + ip->ip_off |= IP_DF; + } + /* * Set up checksum to pseudo header checksum and output datagram. * @@ -1818,7 +1783,7 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, } else { ui->ui_sum = 0; } - ((struct ip *)ui)->ip_len = sizeof(struct udpiphdr) + len; + ((struct ip *)ui)->ip_len = (uint16_t)(sizeof(struct udpiphdr) + len); ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */ if (tos != IPTOS_UNSPEC) { ((struct ip *)ui)->ip_tos = (uint8_t)(tos & IPTOS_MASK); @@ -1835,6 +1800,7 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, necp_kernel_policy_id policy_id; necp_kernel_policy_id skip_policy_id; u_int32_t route_rule_id; + u_int32_t pass_flags; /* * We need a route to perform NECP route rule checks @@ -1869,22 +1835,25 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, } if (!necp_socket_is_allowed_to_send_recv_v4(inp, lport, fport, - &laddr, &faddr, NULL, &policy_id, &route_rule_id, &skip_policy_id)) { + &laddr, &faddr, NULL, 0, &policy_id, &route_rule_id, &skip_policy_id, &pass_flags)) { error = EHOSTUNREACH; goto abort; } - necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id); + necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id, pass_flags); if (net_qos_policy_restricted != 0) { - necp_socket_update_qos_marking(inp, - inp->inp_route.ro_rt, NULL, route_rule_id); + necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, route_rule_id); } } #endif /* NECP */ if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; } + if (check_qos_marking_again) { + ipoa.ipoa_flags |= IPOAF_REDO_QOSMARKING_POLICY; + } + ipoa.qos_marking_gencount = inp->inp_policyresult.results.qos_marking_gencount; #if IPSEC if (inp->inp_sp != NULL && ipsec_setsocket(m, inp->inp_socket) != 0) { @@ -1947,6 +1916,16 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, IMO_REMREF(mopts); } + if (check_qos_marking_again) { + inp->inp_policyresult.results.qos_marking_gencount = ipoa.qos_marking_gencount; + + if (ipoa.ipoa_flags & IPOAF_QOSMARKING_ALLOWED) { + inp->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED; + } else { + inp->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED; + } + } + if (error == 0 && nstat_collect) { boolean_t cell, wifi, wired; @@ -2014,7 +1993,7 @@ abort: (outifp = rt->rt_ifp) != inp->inp_last_outifp) { inp->inp_last_outifp = outifp; /* no reference needed */ - so->so_pktheadroom = P2ROUNDUP( + so->so_pktheadroom = (uint16_t)P2ROUNDUP( sizeof(struct udphdr) + sizeof(struct ip) + ifnet_hdrlen(outifp) + @@ -2066,13 +2045,7 @@ release: u_int32_t udp_sendspace = 9216; /* really max datagram size */ /* 187 1K datagrams (approx 192 KB) */ -u_int32_t udp_recvspace = 187 * (1024 + -#if INET6 - sizeof(struct sockaddr_in6) -#else /* !INET6 */ - sizeof(struct sockaddr_in) -#endif /* !INET6 */ - ); +u_int32_t udp_recvspace = 187 * (1024 + sizeof(struct sockaddr_in6)); /* Check that the values of udp send and recv space do not exceed sb_max */ static int @@ -2151,7 +2124,7 @@ udp_attach(struct socket *so, int proto, struct proc *p) } inp = (struct inpcb *)so->so_pcb; inp->inp_vflag |= INP_IPV4; - inp->inp_ip_ttl = ip_defttl; + inp->inp_ip_ttl = (uint8_t)ip_defttl; if (nstat_collect) { nstat_udp_new_pcb(inp); } @@ -2211,15 +2184,9 @@ udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p) #if NECP #if FLOW_DIVERT if (necp_socket_should_use_flow_divert(inp)) { - uint32_t fd_ctl_unit = - necp_socket_get_flow_divert_control_unit(inp); - if (fd_ctl_unit > 0) { - error = flow_divert_pcb_init(so, fd_ctl_unit); - if (error == 0) { - error = flow_divert_connect_out(so, nam, p); - } - } else { - error = ENETDOWN; + error = flow_divert_pcb_init(so); + if (error == 0) { + error = flow_divert_connect_out(so, nam, p); } return error; } @@ -2287,11 +2254,9 @@ udp_connectx_common(struct socket *so, int af, struct sockaddr *src, struct sock case AF_INET: error = udp_connect(so, dst, p); break; -#if INET6 case AF_INET6: error = udp6_connect(so, dst, p); break; -#endif /* INET6 */ default: VERIFY(0); /* NOTREACHED */ @@ -2584,12 +2549,11 @@ udp_gc(struct inpcbinfo *ipi) so = inp->inp_socket; if (so->so_usecount == 0) { if (inp->inp_state != INPCB_STATE_DEAD) { -#if INET6 if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - } else -#endif /* INET6 */ - in_pcbdetach(inp); + } else { + in_pcbdetach(inp); + } } in_pcbdispose(inp); } else { @@ -2625,7 +2589,6 @@ udp_out_cksum_stats(u_int32_t len) udpstat.udps_snd_swcsum_bytes += len; } -#if INET6 void udp_in6_cksum_stats(u_int32_t len) { @@ -2639,7 +2602,6 @@ udp_out6_cksum_stats(u_int32_t len) udpstat.udps_snd6_swcsum++; udpstat.udps_snd6_swcsum_bytes += len; } -#endif /* INET6 */ /* * Checksum extended UDP header and data. @@ -2860,9 +2822,9 @@ udp_fill_keepalive_offload_frames(ifnet_t ifp, struct udphdr *udp; frame = &frames_array[frame_index]; - frame->length = frame_data_offset + + frame->length = (uint8_t)(frame_data_offset + sizeof(struct udpiphdr) + - inp->inp_keepalive_datalen; + inp->inp_keepalive_datalen); frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4; frame->interval = inp->inp_keepalive_interval; @@ -2926,10 +2888,10 @@ udp_fill_keepalive_offload_frames(ifnet_t ifp, VERIFY(inp->inp_vflag & INP_IPV6); frame = &frames_array[frame_index]; - frame->length = frame_data_offset + + frame->length = (uint8_t)(frame_data_offset + sizeof(struct ip6_hdr) + sizeof(struct udphdr) + - inp->inp_keepalive_datalen; + inp->inp_keepalive_datalen); frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6; frame->interval = inp->inp_keepalive_interval; @@ -2962,7 +2924,7 @@ udp_fill_keepalive_offload_frames(ifnet_t ifp, ip6->ip6_vfc &= ~IPV6_VERSION_MASK; ip6->ip6_vfc |= IPV6_VERSION; ip6->ip6_nxt = IPPROTO_UDP; - ip6->ip6_hlim = ip6_defhlim; + ip6->ip6_hlim = (uint8_t)ip6_defhlim; ip6->ip6_plen = htons(sizeof(struct udphdr) + (u_short)inp->inp_keepalive_datalen); ip6->ip6_src = inp->in6p_laddr; diff --git a/bsd/netinet/udp_var.h b/bsd/netinet/udp_var.h index 8c662f05f..1148f7dc0 100644 --- a/bsd/netinet/udp_var.h +++ b/bsd/netinet/udp_var.h @@ -136,10 +136,8 @@ struct udpstat { { "pcblist", CTLTYPE_STRUCT }, \ } -#ifdef INET6 #define udp6stat udpstat #define udp6s_opackets udps_opackets -#endif /* INET6 */ SYSCTL_DECL(_net_inet_udp); diff --git a/bsd/netinet6/Makefile b/bsd/netinet6/Makefile index c1b816806..3404e5933 100644 --- a/bsd/netinet6/Makefile +++ b/bsd/netinet6/Makefile @@ -15,12 +15,14 @@ PRIVATE_DATAFILES = \ in6.h \ in6_pcb.h \ in6_var.h \ - ip6_fw.h \ ip6_var.h \ mld6_var.h \ nd6.h \ scope6_var.h +DRIVERKIT_DATAFILES = \ + in6.h + PRIVATE_KERNELFILES = \ ah6.h esp6.h esp_rijndael.h esp_chachapoly.h \ in6_gif.h in6_ifattach.h ip6_ecn.h ip6protosw.h \ @@ -28,6 +30,8 @@ PRIVATE_KERNELFILES = \ INSTALL_MI_LIST = ${DATAFILES} +INSTALL_DRIVERKIT_MI_LIST = ${DRIVERKIT_DATAFILES} + INSTALL_MI_DIR = netinet6 EXPORT_MI_LIST = ${DATAFILES} diff --git a/bsd/netinet6/ah.h b/bsd/netinet6/ah.h index 149d8b911..c4c368129 100644 --- a/bsd/netinet6/ah.h +++ b/bsd/netinet6/ah.h @@ -66,8 +66,8 @@ struct ah_algorithm_state { struct ah_algorithm { int (*sumsiz)(struct secasvar *); int (*mature)(struct secasvar *); - int keymin; /* in bits */ - int keymax; /* in bits */ + u_int16_t keymin; /* in bits */ + u_int16_t keymax; /* in bits */ const char *name; int (*init)(struct ah_algorithm_state *, struct secasvar *); void (*update)(struct ah_algorithm_state *, caddr_t, size_t); @@ -79,7 +79,7 @@ struct ah_algorithm { extern const struct ah_algorithm *ah_algorithm_lookup(int); /* cksum routines */ -extern int ah_hdrlen(struct secasvar *); +extern size_t ah_hdrlen(struct secasvar *); extern size_t ah_hdrsiz(struct ipsecrequest *); extern void ah4_input(struct mbuf *, int); diff --git a/bsd/netinet6/ah_core.c b/bsd/netinet6/ah_core.c index 578e06257..7ce33bb64 100644 --- a/bsd/netinet6/ah_core.c +++ b/bsd/netinet6/ah_core.c @@ -84,26 +84,18 @@ #include #include -#if INET6 #include #include #include -#endif #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #if IPSEC_ESP #include -#if INET6 #include #endif -#endif #include #include #include @@ -295,9 +287,9 @@ ah_keyed_md5_mature( static int ah_keyed_md5_init(struct ah_algorithm_state *state, struct secasvar *sav) { - size_t padlen; size_t keybitlen; u_int8_t buf[32] __attribute__((aligned(4))); + unsigned int padlen; if (!state) { panic("ah_keyed_md5_init: what?"); @@ -358,7 +350,8 @@ ah_keyed_md5_loop(struct ah_algorithm_state *state, caddr_t addr, size_t len) panic("ah_keyed_md5_loop: what?"); } - MD5Update((MD5_CTX *)state->foo, addr, len); + VERIFY(len <= UINT_MAX); + MD5Update((MD5_CTX *)state->foo, addr, (uint)len); } static void @@ -589,7 +582,8 @@ ah_hmac_md5_loop(struct ah_algorithm_state *state, caddr_t addr, size_t len) panic("ah_hmac_md5_loop: what?"); } ctxt = (MD5_CTX *)(void *)(((caddr_t)state->foo) + 128); - MD5Update(ctxt, addr, len); + VERIFY(len <= UINT_MAX); + MD5Update(ctxt, addr, (uint)len); } static void @@ -1298,7 +1292,8 @@ again: error = ENOBUFS; goto fail; } - m_copydata(m, off, hlen, mtod(n, caddr_t)); + VERIFY(hlen <= INT_MAX); + m_copydata(m, off, (int)hlen, mtod(n, caddr_t)); /* * IP options processing. @@ -1466,7 +1461,6 @@ fail: } #endif -#if INET6 /* * Go generate the checksum. This function won't modify the mbuf chain * except AH itself. @@ -1705,4 +1699,3 @@ fail: } return error; } -#endif diff --git a/bsd/netinet6/ah_input.c b/bsd/netinet6/ah_input.c index a2620405f..1716bd5e7 100644 --- a/bsd/netinet6/ah_input.c +++ b/bsd/netinet6/ah_input.c @@ -88,26 +88,18 @@ #include #include #include -#if INET6 #include -#endif -#if INET6 #include #include #include #include #include -#endif #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include #include #if IPSEC_DEBUG @@ -139,7 +131,7 @@ ah4_input(struct mbuf *m, int off) u_char *cksum; struct secasvar *sav = NULL; u_int16_t nxt; - size_t hlen; + u_int8_t hlen; size_t stripsiz = 0; sa_family_t ifamily; @@ -160,9 +152,9 @@ ah4_input(struct mbuf *m, int off) ah = (struct ah *)(void *)(((caddr_t)ip) + off); nxt = ah->ah_nxt; #ifdef _IP_VHL - hlen = IP_VHL_HL(ip->ip_vhl) << 2; + hlen = (u_int8_t)(IP_VHL_HL(ip->ip_vhl) << 2); #else - hlen = ip->ip_hl << 2; + hlen = (u_int8_t)(ip->ip_hl << 2); #endif /* find the sassoc. */ @@ -248,7 +240,8 @@ ah4_input(struct mbuf *m, int off) } if (m->m_len < off + sizeof(struct ah) + sizoff + siz1) { - m = m_pullup(m, off + sizeof(struct ah) + sizoff + siz1); + VERIFY((off + sizeof(struct ah) + sizoff + siz1) <= INT_MAX); + m = m_pullup(m, (int)(off + sizeof(struct ah) + sizoff + siz1)); if (!m) { ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup\n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); @@ -293,7 +286,15 @@ ah4_input(struct mbuf *m, int off) * some of IP header fields are flipped to the host endian. * convert them back to network endian. VERY stupid. */ - ip->ip_len = htons(ip->ip_len + hlen); + if ((ip->ip_len + hlen) > UINT16_MAX) { + ipseclog((LOG_DEBUG, "IPv4 AH input: " + "bad length ip header len %u, total len %u\n", + ip->ip_len, hlen)); + IPSEC_STAT_INCREMENT(ipsecstat.in_inval); + goto fail; + } + + ip->ip_len = htons((u_int16_t)(ip->ip_len + hlen)); ip->ip_off = htons(ip->ip_off); if (ah4_calccksum(m, (caddr_t)cksum, siz1, algo, sav)) { FREE(cksum, M_TEMP); @@ -333,49 +334,7 @@ ah4_input(struct mbuf *m, int off) m->m_flags |= M_AUTHIPHDR; m->m_flags |= M_AUTHIPDGM; -#if 0 - /* - * looks okey, but we need more sanity check. - * XXX should elaborate. - */ - if (ah->ah_nxt == IPPROTO_IPIP || ah->ah_nxt == IPPROTO_IP) { - struct ip *nip; - size_t sizoff; - - sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; - - if (m->m_len < off + sizeof(struct ah) + sizoff + siz1 + hlen) { - m = m_pullup(m, off + sizeof(struct ah) - + sizoff + siz1 + hlen); - if (!m) { - ipseclog((LOG_DEBUG, - "IPv4 AH input: can't pullup\n")); - IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - goto fail; - } - } - - nip = (struct ip *)((u_char *)(ah + 1) + sizoff + siz1); - if (nip->ip_src.s_addr != ip->ip_src.s_addr - || nip->ip_dst.s_addr != ip->ip_dst.s_addr) { - m->m_flags &= ~M_AUTHIPHDR; - m->m_flags &= ~M_AUTHIPDGM; - } - } -#if INET6 - else if (ah->ah_nxt == IPPROTO_IPV6) { - m->m_flags &= ~M_AUTHIPHDR; - m->m_flags &= ~M_AUTHIPDGM; - } -#endif /*INET6*/ -#endif /*0*/ - - if (m->m_flags & M_AUTHIPHDR - && m->m_flags & M_AUTHIPDGM) { -#if 0 - ipseclog((LOG_DEBUG, - "IPv4 AH input: authentication succeess\n")); -#endif + if (m->m_flags & M_AUTHIPHDR && m->m_flags & M_AUTHIPDGM) { IPSEC_STAT_INCREMENT(ipsecstat.in_ahauthsucc); } else { ipseclog((LOG_WARNING, @@ -403,7 +362,7 @@ ah4_input(struct mbuf *m, int off) /* RFC 2402 */ stripsiz = sizeof(struct newah) + siz1; } - if (ipsec4_tunnel_validate(m, off + stripsiz, nxt, sav, &ifamily)) { + if (ipsec4_tunnel_validate(m, (int)(off + stripsiz), nxt, sav, &ifamily)) { ifaddr_t ifa; struct sockaddr_storage addr; struct sockaddr_in *ipaddr; @@ -424,7 +383,7 @@ ah4_input(struct mbuf *m, int off) goto fail; } tos = ip->ip_tos; - m_adj(m, off + stripsiz); + m_adj(m, (int)(off + stripsiz)); if (m->m_len < sizeof(*ip)) { m = m_pullup(m, sizeof(*ip)); if (!m) { @@ -548,11 +507,11 @@ ah4_input(struct mbuf *m, int off) } ip = mtod(m, struct ip *); #ifdef IPLEN_FLIPPED - ip->ip_len = ip->ip_len - stripsiz; + ip->ip_len = (u_short)(ip->ip_len - stripsiz); #else ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz); #endif - ip->ip_p = nxt; + ip->ip_p = (u_char)nxt; /* forget about IP hdr checksum, the check has already been passed */ key_sa_recordxfer(sav, m); @@ -594,7 +553,7 @@ ah4_input(struct mbuf *m, int off) IPSEC_STAT_INCREMENT(ipsecstat.in_polvio); goto fail; } - ip_proto_dispatch_in(m, off, nxt, 0); + ip_proto_dispatch_in(m, off, (u_int8_t)nxt, 0); } else { m_freem(m); } @@ -624,7 +583,6 @@ fail: } #endif /* INET */ -#if INET6 int ah6_input(struct mbuf **mp, int *offp, int proto) { @@ -722,7 +680,8 @@ ah6_input(struct mbuf **mp, int *offp, int proto) IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); goto fail; } - IP6_EXTHDR_CHECK(m, off, sizeof(struct ah) + sizoff + siz1, + VERIFY((sizeof(struct ah) + sizoff + siz1) <= INT_MAX); + IP6_EXTHDR_CHECK(m, off, (int)(sizeof(struct ah) + sizoff + siz1), {return IPPROTO_DONE;}); ip6 = mtod(m, struct ip6_hdr *); ah = (struct ah *)(void *)(mtod(m, caddr_t) + off); @@ -789,42 +748,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) m->m_flags |= M_AUTHIPHDR; m->m_flags |= M_AUTHIPDGM; -#if 0 - /* - * looks okey, but we need more sanity check. - * XXX should elaborate. - */ - if (ah->ah_nxt == IPPROTO_IPV6) { - struct ip6_hdr *nip6; - size_t sizoff; - - sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; - - IP6_EXTHDR_CHECK(m, off, sizeof(struct ah) + sizoff + siz1 - + sizeof(struct ip6_hdr), - {return IPPROTO_DONE;}); - - nip6 = (struct ip6_hdr *)((u_char *)(ah + 1) + sizoff + siz1); - if (!IN6_ARE_ADDR_EQUAL(&nip6->ip6_src, &ip6->ip6_src) - || !IN6_ARE_ADDR_EQUAL(&nip6->ip6_dst, &ip6->ip6_dst)) { - m->m_flags &= ~M_AUTHIPHDR; - m->m_flags &= ~M_AUTHIPDGM; - } - } else if (ah->ah_nxt == IPPROTO_IPIP) { - m->m_flags &= ~M_AUTHIPHDR; - m->m_flags &= ~M_AUTHIPDGM; - } else if (ah->ah_nxt == IPPROTO_IP) { - m->m_flags &= ~M_AUTHIPHDR; - m->m_flags &= ~M_AUTHIPDGM; - } -#endif - - if (m->m_flags & M_AUTHIPHDR - && m->m_flags & M_AUTHIPDGM) { -#if 0 - ipseclog((LOG_DEBUG, - "IPv6 AH input: authentication succeess\n")); -#endif + if (m->m_flags & M_AUTHIPHDR && m->m_flags & M_AUTHIPDGM) { IPSEC_STAT_INCREMENT(ipsec6stat.in_ahauthsucc); } else { ipseclog((LOG_WARNING, @@ -852,7 +776,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) /* RFC 2402 */ stripsiz = sizeof(struct newah) + siz1; } - if (ipsec6_tunnel_validate(m, off + stripsiz, nxt, sav, &ifamily)) { + if (ipsec6_tunnel_validate(m, (int)(off + stripsiz), nxt, sav, &ifamily)) { ifaddr_t ifa; struct sockaddr_storage addr; struct sockaddr_in6 *ip6addr; @@ -872,7 +796,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) } flowinfo = ip6->ip6_flow; - m_adj(m, off + stripsiz); + m_adj(m, (int)(off + stripsiz)); if (m->m_len < sizeof(*ip6)) { /* * m_pullup is prohibited in KAME IPv6 input processing @@ -900,14 +824,12 @@ ah6_input(struct mbuf **mp, int *offp, int proto) goto fail; } -#if 1 /* * should the inner packet be considered authentic? * see comment in ah4_input(). */ m->m_flags &= ~M_AUTHIPHDR; m->m_flags &= ~M_AUTHIPDGM; -#endif key_sa_recordxfer(sav, m); if (ipsec_addhist(m, IPPROTO_AH, spi) != 0 || @@ -965,7 +887,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) * This is necessary because AH will be stripped off below. */ prvnxtp = ip6_get_prevhdr(m, off); /* XXX */ - *prvnxtp = nxt; + *prvnxtp = (u_int8_t)nxt; ip6 = mtod(m, struct ip6_hdr *); /* @@ -978,7 +900,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) m->m_pkthdr.len -= stripsiz; ip6 = mtod(m, struct ip6_hdr *); /* XXX jumbogram */ - ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz); + ip6->ip6_plen = htons((u_int16_t)(ntohs(ip6->ip6_plen) - stripsiz)); key_sa_recordxfer(sav, m); if (ipsec_addhist(m, IPPROTO_AH, spi) != 0) { @@ -1124,4 +1046,3 @@ ah6_ctlinput(int cmd, struct sockaddr *sa, void *d) /* we normally notify any pcb here */ } } -#endif /* INET6 */ diff --git a/bsd/netinet6/ah_output.c b/bsd/netinet6/ah_output.c index 10f39472b..fa6388fbc 100644 --- a/bsd/netinet6/ah_output.c +++ b/bsd/netinet6/ah_output.c @@ -86,20 +86,14 @@ #include #include -#if INET6 #include #include #include -#endif #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include #include @@ -237,6 +231,8 @@ ah4_output(struct mbuf *m, struct secasvar *sav) ahlen = plen + sizeof(struct newah); } + VERIFY(ahlen <= UINT16_MAX); + /* * grow the mbuf to accomodate AH. */ @@ -259,7 +255,7 @@ ah4_output(struct mbuf *m, struct secasvar *sav) m_freem(m); return ENOBUFS; } - n->m_len = ahlen; + n->m_len = (int32_t)ahlen; n->m_next = m->m_next; m->m_next = n; m->m_pkthdr.len += ahlen; @@ -279,9 +275,10 @@ ah4_output(struct mbuf *m, struct secasvar *sav) if (sav->flags & SADB_X_EXT_OLD) { struct ah *ahdr; + VERIFY((plen >> 2) <= UINT8_MAX); ahdr = (struct ah *)(void *)ahdrpos; ahsumpos = (u_char *)(ahdr + 1); - ahdr->ah_len = plen >> 2; + ahdr->ah_len = (u_int8_t)(plen >> 2); ahdr->ah_nxt = ip->ip_p; ahdr->ah_reserve = htons(0); ahdr->ah_spi = spi; @@ -289,9 +286,10 @@ ah4_output(struct mbuf *m, struct secasvar *sav) } else { struct newah *ahdr; + VERIFY(((plen >> 2) + 1) <= UINT8_MAX); ahdr = (struct newah *)(void *)ahdrpos; ahsumpos = (u_char *)(ahdr + 1); - ahdr->ah_len = (plen >> 2) + 1; /* plus one for seq# */ + ahdr->ah_len = (u_int8_t)((plen >> 2) + 1); /* plus one for seq# */ ahdr->ah_nxt = ip->ip_p; ahdr->ah_reserve = htons(0); ahdr->ah_spi = spi; @@ -322,7 +320,7 @@ ah4_output(struct mbuf *m, struct secasvar *sav) */ ip->ip_p = IPPROTO_AH; if (ahlen < (IP_MAXPACKET - ntohs(ip->ip_len))) { - ip->ip_len = htons(ntohs(ip->ip_len) + ahlen); + ip->ip_len = htons(ntohs(ip->ip_len) + (u_int16_t)ahlen); } else { ipseclog((LOG_ERR, "IPv4 AH output: size exceeds limit\n")); IPSEC_STAT_INCREMENT(ipsecstat.out_inval); @@ -372,11 +370,11 @@ ah4_output(struct mbuf *m, struct secasvar *sav) #endif /* Calculate AH length */ -int +size_t ah_hdrlen(struct secasvar *sav) { const struct ah_algorithm *algo; - int plen, ahlen; + size_t plen, ahlen; algo = ah_algorithm_lookup(sav->alg_auth); if (!algo) { @@ -395,7 +393,6 @@ ah_hdrlen(struct secasvar *sav) return ahlen; } -#if INET6 /* * Fill in the Authentication Header and calculate checksum. */ @@ -410,7 +407,7 @@ ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, u_char *ahsumpos = NULL; size_t plen; /*AH payload size in bytes*/ int error = 0; - int ahlen; + size_t ahlen; struct ip6_hdr *ip6; if (m->m_len < sizeof(struct ip6_hdr)) { @@ -424,6 +421,8 @@ ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, return 0; } + VERIFY(ahlen <= UINT16_MAX); + for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) { ; } @@ -446,7 +445,7 @@ ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, return ENOBUFS; } } - mah->m_len = ahlen; + mah->m_len = (int32_t)ahlen; mah->m_next = md; mprev->m_next = mah; m->m_pkthdr.len += ahlen; @@ -458,8 +457,9 @@ ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, m_freem(m); return EINVAL; } + ip6 = mtod(m, struct ip6_hdr *); - ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(struct ip6_hdr)); + ip6->ip6_plen = htons((u_int16_t)(m->m_pkthdr.len - sizeof(struct ip6_hdr))); if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[0] == NULL) { ipseclog((LOG_DEBUG, "ah6_output: internal error: " @@ -487,10 +487,11 @@ ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, struct ah *ahdr = mtod(mah, struct ah *); plen = mah->m_len - sizeof(struct ah); + VERIFY((plen >> 2) <= UINT8_MAX); ahsumpos = (u_char *)(ahdr + 1); ahdr->ah_nxt = *nexthdrp; *nexthdrp = IPPROTO_AH; - ahdr->ah_len = plen >> 2; + ahdr->ah_len = (u_int8_t)(plen >> 2); ahdr->ah_reserve = htons(0); ahdr->ah_spi = spi; bzero(ahdr + 1, plen); @@ -498,10 +499,11 @@ ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, struct newah *ahdr = mtod(mah, struct newah *); plen = mah->m_len - sizeof(struct newah); + VERIFY(((plen >> 2) + 1) <= UINT8_MAX); ahsumpos = (u_char *)(ahdr + 1); ahdr->ah_nxt = *nexthdrp; *nexthdrp = IPPROTO_AH; - ahdr->ah_len = (plen >> 2) + 1; /* plus one for seq# */ + ahdr->ah_len = (u_int8_t)((plen >> 2) + 1); /* plus one for seq# */ ahdr->ah_reserve = htons(0); ahdr->ah_spi = spi; if (sav->replay[0]->count == ~0) { @@ -542,9 +544,7 @@ ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, return error; } -#endif -#if INET /* * Find the final destination if there is loose/strict source routing option. * Returns NULL if there's no source routing options. @@ -637,4 +637,3 @@ ah4_finaldst(struct mbuf *m) } return NULL; } -#endif diff --git a/bsd/netinet6/esp.h b/bsd/netinet6/esp.h index c72ac1c72..a294c8b8b 100644 --- a/bsd/netinet6/esp.h +++ b/bsd/netinet6/esp.h @@ -99,12 +99,12 @@ struct esptail { struct secasvar; struct esp_algorithm { - size_t padbound; /* pad boundary, in byte */ + uint32_t padbound; /* pad boundary, in byte */ int ivlenval; /* iv length, in byte */ int (*mature)(struct secasvar *); - int keymin; /* in bits */ - int keymax; /* in bits */ - int (*schedlen)(const struct esp_algorithm *); + u_int16_t keymin; /* in bits */ + u_int16_t keymax; /* in bits */ + size_t (*schedlen)(const struct esp_algorithm *); const char *name; int (*ivlen)(const struct esp_algorithm *, struct secasvar *); int (*decrypt)(struct mbuf *, size_t, @@ -119,8 +119,8 @@ struct esp_algorithm { struct secasvar *, u_int8_t *, u_int8_t *); /* For Authenticated Encryption Methods */ size_t icvlen; - int (*finalizedecrypt)(struct secasvar *, u_int8_t *, uint); - int (*finalizeencrypt)(struct secasvar *, u_int8_t *, uint); + int (*finalizedecrypt)(struct secasvar *, u_int8_t *, size_t); + int (*finalizeencrypt)(struct secasvar *, u_int8_t *, size_t); }; extern os_log_t esp_mpkl_log_object; diff --git a/bsd/netinet6/esp_chachapoly.c b/bsd/netinet6/esp_chachapoly.c index 85450de0a..5c3e3acf0 100644 --- a/bsd/netinet6/esp_chachapoly.c +++ b/bsd/netinet6/esp_chachapoly.c @@ -142,7 +142,7 @@ esp_chachapoly_mature(struct secasvar *sav) return 0; } -int +size_t esp_chachapoly_schedlen(__unused const struct esp_algorithm *algo) { return sizeof(esp_chachapoly_ctx_s); @@ -211,7 +211,7 @@ esp_chachapoly_ivlen(const struct esp_algorithm *algo, int esp_chachapoly_encrypt_finalize(struct secasvar *sav, unsigned char *tag, - unsigned int tag_bytes) + size_t tag_bytes) { esp_chachapoly_ctx_t esp_ccp_ctx; int rc = 0; @@ -219,7 +219,7 @@ esp_chachapoly_encrypt_finalize(struct secasvar *sav, ESP_CHECK_ARG(sav); ESP_CHECK_ARG(tag); if (tag_bytes != ESP_CHACHAPOLY_ICV_LEN) { - esp_log_err("ChaChaPoly Invalid tag_bytes %u, SPI 0x%08x", + esp_log_err("ChaChaPoly Invalid tag_bytes %zu, SPI 0x%08x", tag_bytes, ntohl(sav->spi)); return EINVAL; } @@ -237,7 +237,7 @@ esp_chachapoly_encrypt_finalize(struct secasvar *sav, int esp_chachapoly_decrypt_finalize(struct secasvar *sav, unsigned char *tag, - unsigned int tag_bytes) + size_t tag_bytes) { esp_chachapoly_ctx_t esp_ccp_ctx; int rc = 0; @@ -245,7 +245,7 @@ esp_chachapoly_decrypt_finalize(struct secasvar *sav, ESP_CHECK_ARG(sav); ESP_CHECK_ARG(tag); if (tag_bytes != ESP_CHACHAPOLY_ICV_LEN) { - esp_log_err("ChaChaPoly Invalid tag_bytes %u, SPI 0x%08x", + esp_log_err("ChaChaPoly Invalid tag_bytes %zu, SPI 0x%08x", tag_bytes, ntohl(sav->spi)); return EINVAL; } @@ -274,7 +274,7 @@ esp_chachapoly_encrypt(struct mbuf *m, // head of mbuf chain uint8_t *sp; // buffer of a given encryption round size_t len; // length of a given encryption round const int32_t ivoff = (int32_t)off + (int32_t)sizeof(struct newesp); // IV offset - const int32_t bodyoff = ivoff + ivlen; // body offset + const size_t bodyoff = ivoff + ivlen; // body offset int rc = 0; // return code of corecrypto operations struct newesp esp_hdr; // ESP header for AAD _Static_assert(sizeof(esp_hdr) == 8, "Bad size"); diff --git a/bsd/netinet6/esp_chachapoly.h b/bsd/netinet6/esp_chachapoly.h index 79f2c5af3..97fb157c7 100644 --- a/bsd/netinet6/esp_chachapoly.h +++ b/bsd/netinet6/esp_chachapoly.h @@ -38,15 +38,15 @@ #define ESP_CHACHAPOLY_ICV_LEN 16 #define ESP_CHACHAPOLY_KEYBITS_WITH_SALT 288 /* 32 bytes key + 4 bytes salt */ -int esp_chachapoly_schedlen(const struct esp_algorithm *); +size_t esp_chachapoly_schedlen(const struct esp_algorithm *); int esp_chachapoly_schedule(const struct esp_algorithm *, struct secasvar *); int esp_chachapoly_encrypt(struct mbuf *, size_t, size_t, struct secasvar *, const struct esp_algorithm *, int); int esp_chachapoly_decrypt(struct mbuf *, size_t, struct secasvar *, const struct esp_algorithm *, int); -int esp_chachapoly_encrypt_finalize(struct secasvar *, unsigned char *, unsigned int); -int esp_chachapoly_decrypt_finalize(struct secasvar *, unsigned char *, unsigned int); +int esp_chachapoly_encrypt_finalize(struct secasvar *, unsigned char *, size_t); +int esp_chachapoly_decrypt_finalize(struct secasvar *, unsigned char *, size_t); int esp_chachapoly_mature(struct secasvar *); int esp_chachapoly_ivlen(const struct esp_algorithm *, struct secasvar *); diff --git a/bsd/netinet6/esp_core.c b/bsd/netinet6/esp_core.c index 28ce42881..a7e767e2e 100644 --- a/bsd/netinet6/esp_core.c +++ b/bsd/netinet6/esp_core.c @@ -79,24 +79,16 @@ #include #include -#if INET6 #include #include #include -#endif #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include #include #include @@ -125,7 +117,7 @@ static int esp_descbc_ivlen(const struct esp_algorithm *, struct secasvar *); static int esp_des_schedule(const struct esp_algorithm *, struct secasvar *); -static int esp_des_schedlen(const struct esp_algorithm *); +static size_t esp_des_schedlen(const struct esp_algorithm *); static int esp_des_blockdecrypt(const struct esp_algorithm *, struct secasvar *, u_int8_t *, u_int8_t *); static int esp_des_blockencrypt(const struct esp_algorithm *, @@ -133,7 +125,7 @@ static int esp_des_blockencrypt(const struct esp_algorithm *, static int esp_cbc_mature(struct secasvar *); static int esp_3des_schedule(const struct esp_algorithm *, struct secasvar *); -static int esp_3des_schedlen(const struct esp_algorithm *); +static size_t esp_3des_schedlen(const struct esp_algorithm *); static int esp_3des_blockdecrypt(const struct esp_algorithm *, struct secasvar *, u_int8_t *, u_int8_t *); static int esp_3des_blockencrypt(const struct esp_algorithm *, @@ -468,7 +460,7 @@ esp_descbc_ivlen( return 8; } -static int +static size_t esp_des_schedlen( __unused const struct esp_algorithm *algo) { @@ -637,7 +629,7 @@ esp_gcm_mature(struct secasvar *sav) return 0; } -static int +static size_t esp_3des_schedlen( __unused const struct esp_algorithm *algo) { @@ -756,8 +748,9 @@ esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, } } + VERIFY(ivoff <= INT_MAX); /* grab iv */ - m_copydata(m, ivoff, ivlen, (caddr_t) iv); + m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv); /* extend iv */ if (ivlen == blocklen) { @@ -798,7 +791,7 @@ esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, /* skip bodyoff */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { - sn = bodyoff - soff; + sn = (int)(bodyoff - soff); break; } @@ -864,7 +857,7 @@ esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, } d->m_len = 0; - d->m_len = (M_TRAILINGSPACE(d) / blocklen) * blocklen; + d->m_len = (int)((M_TRAILINGSPACE(d) / blocklen) * blocklen); if (d->m_len > i) { d->m_len = i; } @@ -993,13 +986,15 @@ esp_cbc_encrypt( } } + VERIFY(ivoff <= INT_MAX); + /* put iv into the packet. if we are in derived mode, use seqno. */ if (derived) { - m_copydata(m, ivoff, ivlen, (caddr_t) iv); + m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv); } else { bcopy(sav->iv, iv, ivlen); /* maybe it is better to overwrite dest, not source */ - m_copyback(m, ivoff, ivlen, (caddr_t) iv); + m_copyback(m, (int)ivoff, ivlen, (caddr_t) iv); } /* extend iv */ @@ -1041,7 +1036,7 @@ esp_cbc_encrypt( /* skip bodyoff */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { - sn = bodyoff - soff; + sn = (int)(bodyoff - soff); break; } @@ -1107,7 +1102,7 @@ esp_cbc_encrypt( } d->m_len = 0; - d->m_len = (M_TRAILINGSPACE(d) / blocklen) * blocklen; + d->m_len = (int)((M_TRAILINGSPACE(d) / blocklen) * blocklen); if (d->m_len > i) { d->m_len = i; } diff --git a/bsd/netinet6/esp_input.c b/bsd/netinet6/esp_input.c index 849dc02af..1723ed2a5 100644 --- a/bsd/netinet6/esp_input.c +++ b/bsd/netinet6/esp_input.c @@ -93,30 +93,20 @@ #include #include #include -#if INET6 #include -#endif -#if INET6 #include #include #include #include #include -#endif #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include #include #include @@ -137,7 +127,6 @@ extern lck_mtx_t *sadb_mutex; -#if INET #define ESPMAXLEN \ (sizeof(struct esp) < sizeof(struct newesp) \ ? sizeof(struct newesp) : sizeof(struct esp)) @@ -147,7 +136,7 @@ esp4_input_strip_udp_encap(struct mbuf *m, int iphlen) { // strip the udp header that's encapsulating ESP struct ip *ip; - size_t stripsiz = sizeof(struct udphdr); + u_int8_t stripsiz = (u_int8_t)sizeof(struct udphdr); ip = mtod(m, __typeof__(ip)); ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen); @@ -165,7 +154,7 @@ esp6_input_strip_udp_encap(struct mbuf *m, int ip6hlen) { // strip the udp header that's encapsulating ESP struct ip6_hdr *ip6; - size_t stripsiz = sizeof(struct udphdr); + u_int8_t stripsiz = (u_int8_t)sizeof(struct udphdr); ip6 = mtod(m, __typeof__(ip6)); ovbcopy((caddr_t)ip6, (caddr_t)(((u_char *)ip6) + stripsiz), ip6hlen); @@ -184,13 +173,13 @@ esp_input_log(struct mbuf *m, struct secasvar *sav, u_int32_t spi, u_int32_t seq if (net_mpklog_enabled && (sav->sah->ipsec_if->if_xflags & IFXF_MPK_LOG) == IFXF_MPK_LOG) { struct tcphdr th = {}; - size_t iphlen = 0; u_int32_t proto_len = 0; + u_int8_t iphlen = 0; u_int8_t proto = 0; struct ip *inner_ip = mtod(m, struct ip *); if (IP_VHL_V(inner_ip->ip_vhl) == 4) { - iphlen = IP_VHL_HL(inner_ip->ip_vhl) << 2; + iphlen = (u_int8_t)(IP_VHL_HL(inner_ip->ip_vhl) << 2); proto = inner_ip->ip_p; } else if (IP_VHL_V(inner_ip->ip_vhl) == 6) { struct ip6_hdr *inner_ip6 = mtod(m, struct ip6_hdr *); @@ -222,9 +211,7 @@ struct mbuf * esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) { struct ip *ip; -#if INET6 struct ip6_hdr *ip6; -#endif /* INET6 */ struct esp *esp; struct esptail esptail; u_int32_t spi; @@ -234,8 +221,8 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) u_int16_t nxt; const struct esp_algorithm *algo; int ivlen; - size_t hlen; size_t esplen; + u_int8_t hlen; sa_family_t ifamily; struct mbuf *out_m = NULL; mbuf_traffic_class_t traffic_class = 0; @@ -275,7 +262,7 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) } esp = (struct esp *)(void *)(((u_int8_t *)ip) + off); #ifdef _IP_VHL - hlen = IP_VHL_HL(ip->ip_vhl) << 2; + hlen = (u_int8_t)(IP_VHL_HL(ip->ip_vhl) << 2); #else hlen = ip->ip_hl << 2; #endif @@ -334,7 +321,8 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) unsigned char saved_icv[AH_MAXSUMSIZE]; if (algo->finalizedecrypt) { siz = algo->icvlen; - m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv); + VERIFY(siz <= USHRT_MAX); + m_copydata(m, m->m_pkthdr.len - (u_short)siz, (u_short)siz, (caddr_t) saved_icv); goto delay_icv; } @@ -351,7 +339,8 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) /* * check for sequence number. */ - if (ipsec_chkreplay(seq, sav, traffic_class)) { + _CASSERT(MBUF_TC_MAX <= UINT8_MAX); + if (ipsec_chkreplay(seq, sav, (u_int8_t)traffic_class)) { ; /*okey*/ } else { IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay); @@ -384,7 +373,7 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) goto bad; } - m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]); + m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) &sum0[0]); if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) { ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n", @@ -403,10 +392,10 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) delay_icv: /* strip off the authentication data */ - m_adj(m, -siz); + m_adj(m, (int)-siz); ip = mtod(m, struct ip *); #ifdef IPLEN_FLIPPED - ip->ip_len = ip->ip_len - siz; + ip->ip_len = ip->ip_len - (u_short)siz; #else ip->ip_len = htons(ntohs(ip->ip_len) - siz); #endif @@ -418,7 +407,7 @@ delay_icv: * update sequence number. */ if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL) { - if (ipsec_updatereplay(seq, sav, traffic_class)) { + if (ipsec_updatereplay(seq, sav, (u_int8_t)traffic_class)) { IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay); goto bad; } @@ -447,7 +436,7 @@ noreplaycheck: } if (m->m_len < off + esplen + ivlen) { - m = m_pullup(m, off + esplen + ivlen); + m = m_pullup(m, (int)(off + esplen + ivlen)); if (!m) { ipseclog((LOG_DEBUG, "IPv4 ESP input: can't pullup in esp4_input\n")); @@ -512,10 +501,10 @@ noreplaycheck: } /* strip off the trailing pad area. */ - m_adj(m, -taillen); + m_adj(m, (int)-taillen); ip = mtod(m, struct ip *); #ifdef IPLEN_FLIPPED - ip->ip_len = ip->ip_len - taillen; + ip->ip_len = ip->ip_len - (u_short)taillen; #else ip->ip_len = htons(ntohs(ip->ip_len) - taillen); #endif @@ -550,7 +539,7 @@ noreplaycheck: } /* was it transmitted over the IPsec tunnel SA? */ - if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) { + if (ipsec4_tunnel_validate(m, (int)(off + esplen + ivlen), nxt, sav, &ifamily)) { ifaddr_t ifa; struct sockaddr_storage addr; @@ -565,7 +554,7 @@ noreplaycheck: int sum; tos = ip->ip_tos; - m_adj(m, off + esplen + ivlen); + m_adj(m, (int)(off + esplen + ivlen)); if (ifamily == AF_INET) { struct sockaddr_in *ipaddr; @@ -607,7 +596,6 @@ noreplaycheck: ipaddr->sin_family = AF_INET; ipaddr->sin_len = sizeof(*ipaddr); ipaddr->sin_addr = ip->ip_dst; -#if INET6 } else if (ifamily == AF_INET6) { struct sockaddr_in6 *ip6addr; @@ -651,7 +639,6 @@ noreplaycheck: ip6addr->sin6_family = AF_INET6; ip6addr->sin6_len = sizeof(*ip6addr); ip6addr->sin6_addr = ip6->ip6_dst; -#endif /* INET6 */ } else { ipseclog((LOG_ERR, "ipsec tunnel unsupported address family " "in ESP input\n")); @@ -730,11 +717,11 @@ noreplaycheck: ip = mtod(m, struct ip *); #ifdef IPLEN_FLIPPED - ip->ip_len = ip->ip_len - stripsiz; + ip->ip_len = ip->ip_len - (u_short)stripsiz; #else ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz); #endif - ip->ip_p = nxt; + ip->ip_p = (u_int8_t)nxt; key_sa_recordxfer(sav, m); if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) { @@ -812,7 +799,7 @@ noreplaycheck: int mlen; if ((mlen = m_length2(m, NULL)) < hlen) { ipseclog((LOG_DEBUG, - "IPv4 ESP input: decrypted packet too short %d < %zu\n", + "IPv4 ESP input: decrypted packet too short %d < %u\n", mlen, hlen)); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); ifnet_release(ipsec_if); @@ -845,7 +832,7 @@ noreplaycheck: } } - ip_proto_dispatch_in(m, off, nxt, 0); + ip_proto_dispatch_in(m, off, (u_int8_t)nxt, 0); } else { m_freem(m); } @@ -874,9 +861,6 @@ bad: KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4, 0, 0, 0, 0); return out_m; } -#endif /* INET */ - -#if INET6 int esp6_input(struct mbuf **mp, int *offp, int proto) @@ -897,12 +881,12 @@ esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface) u_int32_t spi; u_int32_t seq; struct secasvar *sav = NULL; - size_t taillen; u_int16_t nxt; char *nproto; const struct esp_algorithm *algo; int ivlen; size_t esplen; + u_int16_t taillen; sa_family_t ifamily; mbuf_traffic_class_t traffic_class = 0; @@ -1011,7 +995,8 @@ esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface) unsigned char saved_icv[AH_MAXSUMSIZE]; if (algo->finalizedecrypt) { siz = algo->icvlen; - m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv); + VERIFY(siz <= UINT16_MAX); + m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) saved_icv); goto delay_icv; } @@ -1029,7 +1014,7 @@ esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface) /* * check for sequence number. */ - if (ipsec_chkreplay(seq, sav, traffic_class)) { + if (ipsec_chkreplay(seq, sav, (u_int8_t)traffic_class)) { ; /*okey*/ } else { IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay); @@ -1062,7 +1047,7 @@ esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface) goto bad; } - m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]); + m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) &sum0[0]); if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) { ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n", @@ -1081,9 +1066,9 @@ esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface) delay_icv: /* strip off the authentication data */ - m_adj(m, -siz); + m_adj(m, (int)-siz); ip6 = mtod(m, struct ip6_hdr *); - ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz); + ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - (u_int16_t)siz); m->m_flags |= M_AUTHIPDGM; IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc); @@ -1093,7 +1078,7 @@ delay_icv: * update sequence number. */ if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL) { - if (ipsec_updatereplay(seq, sav, traffic_class)) { + if (ipsec_updatereplay(seq, sav, (u_int8_t)traffic_class)) { IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay); goto bad; } @@ -1122,7 +1107,7 @@ noreplaycheck: } #ifndef PULLDOWN_TEST - IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/ + IP6_EXTHDR_CHECK(m, off, (int)(esplen + ivlen), return IPPROTO_DONE); /*XXX*/ #else IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen); if (esp == NULL) { @@ -1222,7 +1207,7 @@ noreplaycheck: /* was it transmitted over the IPsec tunnel SA? */ - if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) { + if (ipsec6_tunnel_validate(m, (int)(off + esplen + ivlen), nxt, sav, &ifamily)) { ifaddr_t ifa; struct sockaddr_storage addr; @@ -1235,7 +1220,7 @@ noreplaycheck: */ u_int32_t flowinfo; /*net endian*/ flowinfo = ip6->ip6_flow; - m_adj(m, off + esplen + ivlen); + m_adj(m, (int)(off + esplen + ivlen)); if (ifamily == AF_INET6) { struct sockaddr_in6 *ip6addr; @@ -1376,16 +1361,17 @@ noreplaycheck: * even in m_pulldown case, we need to strip off ESP so that * we can always compute checksum for AH correctly. */ - size_t stripsiz; + u_int16_t stripsiz; char *prvnxtp; /* * Set the next header field of the previous header correctly. */ prvnxtp = ip6_get_prevhdr(m, off); /* XXX */ - *prvnxtp = nxt; + *prvnxtp = (u_int8_t)nxt; - stripsiz = esplen + ivlen; + VERIFY(esplen + ivlen <= UINT16_MAX); + stripsiz = (u_int16_t)(esplen + ivlen); ip6 = mtod(m, struct ip6_hdr *); if (m->m_len >= stripsiz + off) { @@ -1645,4 +1631,3 @@ esp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) /* we normally notify any pcb here */ } } -#endif /* INET6 */ diff --git a/bsd/netinet6/esp_output.c b/bsd/netinet6/esp_output.c index 9401200f3..c867a5ce9 100644 --- a/bsd/netinet6/esp_output.c +++ b/bsd/netinet6/esp_output.c @@ -89,24 +89,16 @@ #include #include -#if INET6 #include #include #include -#endif #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include #include @@ -257,7 +249,7 @@ esp_output( struct tcphdr th = {}; u_int32_t spi; u_int32_t seq; - u_int32_t inner_payload_len = 0; + size_t inner_payload_len = 0; u_int8_t inner_protocol = 0; u_int8_t nxt = 0; size_t plen; /*payload length to be encrypted*/ @@ -274,18 +266,14 @@ esp_output( KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen, 0, 0, 0, 0); switch (af) { -#if INET case AF_INET: afnumber = 4; stat = &ipsecstat; break; -#endif -#if INET6 case AF_INET6: afnumber = 6; stat = &ipsec6stat; break; -#endif default: ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af)); KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1, 0, 0, 0, 0); @@ -297,22 +285,18 @@ esp_output( SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) { u_int8_t dscp = 0; switch (af) { -#if INET case AF_INET: { struct ip *ip = mtod(m, struct ip *); dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT; break; } -#endif /*INET*/ -#if INET6 case AF_INET6: { struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); dscp = (ntohl(ip6->ip6_flow) & IP6FLOW_DSCP_MASK) >> IP6FLOW_DSCP_SHIFT; break; } -#endif /*INET6*/ default: panic("esp_output: should not reach here"); } @@ -322,7 +306,6 @@ esp_output( /* some sanity check */ if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] == NULL) { switch (af) { -#if INET case AF_INET: { struct ip *ip; @@ -336,15 +319,12 @@ esp_output( IPSEC_STAT_INCREMENT(ipsecstat.out_inval); break; } -#endif /*INET*/ -#if INET6 case AF_INET6: ipseclog((LOG_DEBUG, "esp6_output: internal error: " "sav->replay is null: SPI=%u\n", (u_int32_t)ntohl(sav->spi))); IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); break; -#endif /*INET6*/ default: panic("esp_output: should not reach here"); } @@ -375,12 +355,8 @@ esp_output( * chase the header chain. * XXX sequential number */ -#if INET struct ip *ip = NULL; -#endif -#if INET6 struct ip6_hdr *ip6 = NULL; -#endif size_t esplen; /* sizeof(struct esp/newesp) */ size_t hlen = 0; /* ip header len */ @@ -414,7 +390,6 @@ esp_output( } switch (af) { -#if INET case AF_INET: ip = mtod(m, struct ip *); #ifdef _IP_VHL @@ -423,13 +398,10 @@ esp_output( hlen = ip->ip_hl << 2; #endif break; -#endif -#if INET6 case AF_INET6: ip6 = mtod(m, struct ip6_hdr *); hlen = sizeof(*ip6); break; -#endif } /* grab info for packet logging */ @@ -459,7 +431,7 @@ esp_output( if (inner_protocol == IPPROTO_TCP) { if ((int)(iphlen + sizeof(th)) <= (m->m_pkthdr.len - m->m_len)) { - m_copydata(md, iphlen, sizeof(th), (u_int8_t *)&th); + m_copydata(md, (int)iphlen, sizeof(th), (u_int8_t *)&th); } inner_payload_len = m->m_pkthdr.len - m->m_len - iphlen - (th.th_off << 2); @@ -475,7 +447,7 @@ esp_output( if (inner_protocol == IPPROTO_TCP) { if ((int)(iphlen + sizeof(th)) <= m->m_pkthdr.len) { - m_copydata(m, iphlen, sizeof(th), (u_int8_t *)&th); + m_copydata(m, (int)iphlen, sizeof(th), (u_int8_t *)&th); } inner_payload_len = m->m_pkthdr.len - iphlen - (th.th_off << 2); @@ -549,7 +521,8 @@ esp_output( error = ENOBUFS; goto fail; } - n->m_len = esphlen; + VERIFY(esphlen <= INT32_MAX); + n->m_len = (int)esphlen; mprev->m_next = n; n->m_next = md; m->m_pkthdr.len += esphlen; @@ -573,10 +546,9 @@ esp_output( } switch (af) { -#if INET case AF_INET: if (esphlen < (IP_MAXPACKET - ntohs(ip->ip_len))) { - ip->ip_len = htons(ntohs(ip->ip_len) + esphlen); + ip->ip_len = htons(ntohs(ip->ip_len) + (u_short)esphlen); } else { ipseclog((LOG_ERR, "IPv4 ESP output: size exceeds limit\n")); @@ -586,12 +558,9 @@ esp_output( goto fail; } break; -#endif -#if INET6 case AF_INET6: /* total packet length will be computed in ip6_output() */ break; -#endif } } @@ -627,9 +596,7 @@ esp_output( /* * find the last mbuf. make some room for ESP trailer. */ -#if INET struct ip *ip = NULL; -#endif size_t padbound; u_char *extend; int i; @@ -652,16 +619,12 @@ esp_output( /* random padding */ switch (af) { -#if INET case AF_INET: randpadmax = ip4_esp_randpad; break; -#endif -#if INET6 case AF_INET6: randpadmax = ip6_esp_randpad; break; -#endif default: randpadmax = -1; break; @@ -669,10 +632,10 @@ esp_output( if (randpadmax < 0 || plen + extendsiz >= randpadmax) { ; } else { - int pad; + size_t pad; /* round */ - randpadmax = (randpadmax / padbound) * padbound; + randpadmax = (int)((randpadmax / padbound) * padbound); pad = (randpadmax - plen + extendsiz) / padbound; if (pad > 0) { @@ -695,12 +658,6 @@ esp_output( } } -#if DIAGNOSTIC - if (extendsiz > MLEN || extendsiz >= 256) { - panic("extendsiz too big in esp_output"); - } -#endif - n = m; while (n->m_next) { n = n->m_next; @@ -713,7 +670,7 @@ esp_output( */ if (!(n->m_flags & M_EXT) && extendsiz < M_TRAILINGSPACE(n)) { extend = mtod(n, u_char *) + n->m_len; - n->m_len += extendsiz; + n->m_len += (int)extendsiz; m->m_pkthdr.len += extendsiz; } else { struct mbuf *nn; @@ -727,7 +684,8 @@ esp_output( goto fail; } extend = mtod(nn, u_char *); - nn->m_len = extendsiz; + VERIFY(extendsiz <= INT_MAX); + nn->m_len = (int)extendsiz; nn->m_next = NULL; n->m_next = nn; n = nn; @@ -771,15 +729,15 @@ esp_output( esptail = (struct esptail *) (mtod(n, u_int8_t *) + n->m_len - sizeof(struct esptail)); esptail->esp_nxt = nxt; - esptail->esp_padlen = extendsiz - 2; + VERIFY((extendsiz - 2) <= UINT8_MAX); + esptail->esp_padlen = (u_int8_t)(extendsiz - 2); /* modify IP header (for ESP header part only) */ switch (af) { -#if INET case AF_INET: ip = mtod(m, struct ip *); if (extendsiz < (IP_MAXPACKET - ntohs(ip->ip_len))) { - ip->ip_len = htons(ntohs(ip->ip_len) + extendsiz); + ip->ip_len = htons(ntohs(ip->ip_len) + (u_short)extendsiz); } else { ipseclog((LOG_ERR, "IPv4 ESP output: size exceeds limit\n")); @@ -789,12 +747,9 @@ esp_output( goto fail; } break; -#endif -#if INET6 case AF_INET6: /* total packet length will be computed in ip6_output() */ break; -#endif } } @@ -900,7 +855,7 @@ fill_icv: error = ENOBUFS; goto fail; } - nn->m_len = siz; + nn->m_len = (int)siz; nn->m_next = NULL; n->m_next = nn; n = nn; @@ -911,11 +866,10 @@ fill_icv: /* modify IP header (for ESP header part only) */ switch (af) { - #if INET case AF_INET: ip = mtod(m, struct ip *); if (siz < (IP_MAXPACKET - ntohs(ip->ip_len))) { - ip->ip_len = htons(ntohs(ip->ip_len) + siz); + ip->ip_len = htons(ntohs(ip->ip_len) + (u_short)siz); } else { ipseclog((LOG_ERR, "IPv4 ESP output: size exceeds limit\n")); @@ -925,12 +879,9 @@ fill_icv: goto fail; } break; - #endif - #if INET6 case AF_INET6: /* total packet length will be computed in ip6_output() */ break; - #endif } } @@ -941,11 +892,12 @@ fill_icv: switch (af) { case AF_INET: ip = mtod(m, struct ip *); - udp->uh_ulen = htons(ntohs(ip->ip_len) - (IP_VHL_HL(ip->ip_vhl) << 2)); + udp->uh_ulen = htons((u_int16_t)(ntohs(ip->ip_len) - (IP_VHL_HL(ip->ip_vhl) << 2))); break; case AF_INET6: ip6 = mtod(m, struct ip6_hdr *); - udp->uh_ulen = htons(plen + siz + extendsiz + esphlen); + VERIFY((plen + siz + extendsiz + esphlen) <= UINT16_MAX); + udp->uh_ulen = htons((u_int16_t)(plen + siz + extendsiz + esphlen)); udp->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(udp->uh_ulen) + IPPROTO_UDP)); m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT); m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); @@ -962,7 +914,7 @@ noantireplay: ntohl(spi), seq, ntohs(th.th_sport), ntohs(th.th_dport), ntohl(th.th_seq), ntohl(th.th_ack), - th.th_flags, inner_payload_len); + inner_payload_len, th.th_flags); } lck_mtx_lock(sadb_mutex); @@ -979,15 +931,10 @@ noantireplay: return 0; fail: -#if 1 KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 7, error, 0, 0, 0); return error; -#else - panic("something bad in esp_output"); -#endif } -#if INET int esp4_output( struct mbuf *m, @@ -1003,9 +950,7 @@ esp4_output( /* XXX assumes that m->m_next points to payload */ return esp_output(m, &ip->ip_p, m->m_next, AF_INET, sav); } -#endif /*INET*/ -#if INET6 int esp6_output( struct mbuf *m, @@ -1020,4 +965,3 @@ esp6_output( } return esp_output(m, nexthdrp, md, AF_INET6, sav); } -#endif /*INET6*/ diff --git a/bsd/netinet6/esp_rijndael.c b/bsd/netinet6/esp_rijndael.c index 9a768a004..65e6dd4c9 100644 --- a/bsd/netinet6/esp_rijndael.c +++ b/bsd/netinet6/esp_rijndael.c @@ -95,7 +95,7 @@ typedef struct { ccgcm_ctx ctxt[0]; } aes_gcm_ctx; -int +size_t esp_aes_schedlen( __unused const struct esp_algorithm *algo) { @@ -195,8 +195,10 @@ esp_cbc_decrypt_aes( return EINVAL; } + VERIFY(ivoff <= INT_MAX); + /* grab iv */ - m_copydata(m, ivoff, ivlen, (caddr_t) iv); + m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv); s = m; soff = sn = dn = 0; @@ -206,7 +208,7 @@ esp_cbc_decrypt_aes( /* skip header/IV offset */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { - sn = bodyoff - soff; + sn = (int)(bodyoff - soff); break; } @@ -271,7 +273,7 @@ esp_cbc_decrypt_aes( m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data)); } - d->m_len = M_TRAILINGSPACE(d); + d->m_len = (int)M_TRAILINGSPACE(d); d->m_len -= d->m_len % AES_BLOCKLEN; if (d->m_len > i) { d->m_len = i; @@ -394,8 +396,10 @@ esp_cbc_encrypt_aes( bodyoff = off + sizeof(struct newesp) + ivlen; } + VERIFY(ivoff <= INT_MAX); + /* put iv into the packet */ - m_copyback(m, ivoff, ivlen, sav->iv); + m_copyback(m, (int)ivoff, ivlen, sav->iv); ivp = (u_int8_t *) sav->iv; if (m->m_pkthdr.len < bodyoff) { @@ -420,7 +424,7 @@ esp_cbc_encrypt_aes( /* skip headers/IV */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { - sn = bodyoff - soff; + sn = (int)(bodyoff - soff); break; } @@ -485,7 +489,7 @@ esp_cbc_encrypt_aes( m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data)); } - d->m_len = M_TRAILINGSPACE(d); + d->m_len = (int)M_TRAILINGSPACE(d); d->m_len -= d->m_len % AES_BLOCKLEN; if (d->m_len > i) { d->m_len = i; @@ -582,7 +586,7 @@ esp_cbc_encrypt_aes( return 0; } -int +size_t esp_gcm_schedlen( __unused const struct esp_algorithm *algo) { @@ -626,7 +630,7 @@ esp_gcm_schedule( __unused const struct esp_algorithm *algo, int esp_gcm_encrypt_finalize(struct secasvar *sav, - unsigned char *tag, unsigned int tag_bytes) + unsigned char *tag, size_t tag_bytes) { aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN); return aes_encrypt_finalize_gcm(tag, tag_bytes, ctx->encrypt); @@ -634,7 +638,7 @@ esp_gcm_encrypt_finalize(struct secasvar *sav, int esp_gcm_decrypt_finalize(struct secasvar *sav, - unsigned char *tag, unsigned int tag_bytes) + unsigned char *tag, size_t tag_bytes) { aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN); return aes_decrypt_finalize_gcm(tag, tag_bytes, ctx->decrypt); @@ -692,6 +696,8 @@ esp_gcm_encrypt_aes( return EINVAL; } + VERIFY(ivoff <= INT_MAX); + /* * The IV is now generated within corecrypto and * is provided to ESP using aes_encrypt_inc_iv_gcm(). @@ -701,7 +707,7 @@ esp_gcm_encrypt_aes( * this value will get the latest IV. */ memcpy(sav->iv, (nonce + ESP_GCM_SALT_LEN), ivlen); - m_copyback(m, ivoff, ivlen, sav->iv); + m_copyback(m, (int)ivoff, ivlen, sav->iv); bzero(nonce, ESP_GCM_SALT_LEN + ivlen); if (m->m_pkthdr.len < bodyoff) { @@ -711,10 +717,12 @@ esp_gcm_encrypt_aes( return EINVAL; } + VERIFY(off <= INT_MAX); + /* Set Additional Authentication Data */ if (!(sav->flags & SADB_X_EXT_OLD)) { struct newesp esp; - m_copydata(m, off, sizeof(esp), (caddr_t) &esp); + m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp); if (aes_encrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->encrypt)) { ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__)); m_freem(m); @@ -730,7 +738,7 @@ esp_gcm_encrypt_aes( /* skip headers/IV */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { - sn = bodyoff - soff; + sn = (int)(bodyoff - soff); break; } @@ -786,7 +794,7 @@ esp_gcm_encrypt_aes( m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data)); } - d->m_len = M_TRAILINGSPACE(d); + d->m_len = (int)M_TRAILINGSPACE(d); if (d->m_len > i) { d->m_len = i; @@ -912,8 +920,10 @@ esp_gcm_decrypt_aes( return EINVAL; } + VERIFY(ivoff <= INT_MAX); + /* grab iv */ - m_copydata(m, ivoff, ivlen, (caddr_t) iv); + m_copydata(m, (int)ivoff, ivlen, (caddr_t) iv); /* Set IV */ memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN); @@ -928,10 +938,12 @@ esp_gcm_decrypt_aes( } bzero(nonce, sizeof(nonce)); + VERIFY(off <= INT_MAX); + /* Set Additional Authentication Data */ if (!(sav->flags & SADB_X_EXT_OLD)) { struct newesp esp; - m_copydata(m, off, sizeof(esp), (caddr_t) &esp); + m_copydata(m, (int)off, sizeof(esp), (caddr_t) &esp); if (aes_decrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->decrypt)) { ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__)); return EINVAL; @@ -946,7 +958,7 @@ esp_gcm_decrypt_aes( /* skip header/IV offset */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { - sn = bodyoff - soff; + sn = (int)(bodyoff - soff); break; } @@ -1002,7 +1014,7 @@ esp_gcm_decrypt_aes( m_adj(d, IPSEC_GET_P2UNALIGNED_OFS(d->m_data)); } - d->m_len = M_TRAILINGSPACE(d); + d->m_len = (int)M_TRAILINGSPACE(d); if (d->m_len > i) { d->m_len = i; diff --git a/bsd/netinet6/esp_rijndael.h b/bsd/netinet6/esp_rijndael.h index f579fe66b..9140b8934 100644 --- a/bsd/netinet6/esp_rijndael.h +++ b/bsd/netinet6/esp_rijndael.h @@ -60,7 +60,7 @@ #include #ifdef BSD_KERNEL_PRIVATE -int esp_aes_schedlen(const struct esp_algorithm *); +size_t esp_aes_schedlen(const struct esp_algorithm *); int esp_aes_schedule(const struct esp_algorithm *, struct secasvar *); int esp_cbc_decrypt_aes(struct mbuf *, size_t, struct secasvar *, const struct esp_algorithm *, int); @@ -68,10 +68,10 @@ int esp_cbc_encrypt_aes(struct mbuf *, size_t, size_t, struct secasvar *, const struct esp_algorithm *, int); -int esp_gcm_schedlen(const struct esp_algorithm *); +size_t esp_gcm_schedlen(const struct esp_algorithm *); int esp_gcm_schedule(const struct esp_algorithm *, struct secasvar *); int esp_gcm_encrypt_aes(struct mbuf *, size_t, size_t, struct secasvar *, const struct esp_algorithm *, int); int esp_gcm_decrypt_aes(struct mbuf *, size_t, struct secasvar *, const struct esp_algorithm *, int); -int esp_gcm_encrypt_finalize(struct secasvar *, unsigned char *, unsigned int); -int esp_gcm_decrypt_finalize(struct secasvar *, unsigned char *, unsigned int); +int esp_gcm_encrypt_finalize(struct secasvar *, unsigned char *, size_t); +int esp_gcm_decrypt_finalize(struct secasvar *, unsigned char *, size_t); #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet6/frag6.c b/bsd/netinet6/frag6.c index 001cd1f12..fd1799d34 100644 --- a/bsd/netinet6/frag6.c +++ b/bsd/netinet6/frag6.c @@ -91,6 +91,17 @@ * Define it to get a correct behavior on per-interface statistics. */ #define IN6_IFSTAT_STRICT +struct ip6asfrag { + struct ip6asfrag *ip6af_down; + struct ip6asfrag *ip6af_up; + struct mbuf *ip6af_m; + int ip6af_offset; /* offset in ip6af_m to next header */ + int ip6af_frglen; /* fragmentable part length */ + int ip6af_off; /* fragment offset */ + u_int16_t ip6af_mff; /* more fragment bit in frag off */ +}; + +#define IP6_REASS_MBUF(ip6af) ((ip6af)->ip6af_m) MBUFQ_HEAD(fq6_head); @@ -105,6 +116,7 @@ static void frag6_enq(struct ip6asfrag *, struct ip6asfrag *); static void frag6_deq(struct ip6asfrag *); static void frag6_insque(struct ip6q *, struct ip6q *); static void frag6_remque(struct ip6q *); +static void frag6_purgef(struct ip6q *, struct fq6_head *, struct fq6_head *); static void frag6_freef(struct ip6q *, struct fq6_head *, struct fq6_head *); static int frag6_timeout_run; /* frag6 timer is scheduled to run */ @@ -282,7 +294,8 @@ frag6_input(struct mbuf **mp, int *offp, int proto) struct ip6_frag *ip6f = NULL; struct ip6q *q6 = NULL; struct ip6asfrag *af6 = NULL, *ip6af = NULL, *af6dwn = NULL; - int offset = *offp, nxt = 0, i = 0, next = 0; + int offset = *offp, i = 0, next = 0; + u_int8_t nxt = 0; int first_frag = 0; int fragoff = 0, frgpartlen = 0; /* must be larger than u_int16_t */ struct ifnet *dstifp = NULL; @@ -290,6 +303,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto) uint32_t csum = 0, csum_flags = 0; struct fq6_head diq6 = {}; int locked = 0; + boolean_t drop_fragq = FALSE; VERIFY(m->m_flags & M_PKTHDR); @@ -461,7 +475,11 @@ frag6_input(struct mbuf **mp, int *offp, int proto) if (q6 == &ip6q) { /* - * the first fragment to arrive, create a reassembly queue. + * Create a reassembly queue as this is the first fragment to + * arrive. + * By first frag, we don't mean the one with offset 0, but + * any of the fragments of the fragmented packet that has + * reached us first. */ first_frag = 1; @@ -487,6 +505,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto) q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ q6->ip6q_nfrag = 0; + q6->ip6q_flags = 0; /* * If the first fragment has valid checksum offload @@ -498,6 +517,10 @@ frag6_input(struct mbuf **mp, int *offp, int proto) } } + if (q6->ip6q_flags & IP6QF_DIRTY) { + goto dropfrag; + } + /* * If it's the 1st fragment, record the length of the * unfragmentable part and the next header of the fragment header. @@ -550,7 +573,8 @@ frag6_input(struct mbuf **mp, int *offp, int proto) if (!ip6_pkt_has_ulp(m)) { lck_mtx_unlock(&ip6qlock); locked = 0; - icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 0); + icmp6_error(m, ICMP6_PARAM_PROB, + ICMP6_PARAMPROB_FIRSTFRAG_INCOMP_HDR, 0); m = NULL; goto done; } @@ -634,84 +658,49 @@ frag6_input(struct mbuf **mp, int *offp, int proto) } } -#if 0 /* - * If there is a preceding segment, it may provide some of - * our data already. If so, drop the data from the incoming - * segment. If it provides all of our data, drop us. + * As per RFC 8200 reassembly rules, we MUST drop the entire + * chain of fragments for a packet to be assembled, if we receive + * any overlapping fragments. + * https://tools.ietf.org/html/rfc8200#page-20 * - * If some of the data is dropped from the preceding - * segment, then it's checksum is invalidated. + * To avoid more conditional code, just reuse frag6_freef and defer + * its call to post fragment insertion in the queue. */ if (af6->ip6af_up != (struct ip6asfrag *)q6) { - i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen - - ip6af->ip6af_off; - if (i > 0) { - if (i >= ip6af->ip6af_frglen) { + if (af6->ip6af_up->ip6af_off == ip6af->ip6af_off) { + if (af6->ip6af_up->ip6af_frglen != ip6af->ip6af_frglen) { + drop_fragq = TRUE; + } else { + /* + * XXX Ideally we should be comparing the entire + * packet here but for now just use off and fraglen + * to ignore a duplicate fragment. + */ + ip6af_free(ip6af); goto dropfrag; } - m_adj(IP6_REASS_MBUF(ip6af), i); - q6->ip6q_csum_flags = 0; - ip6af->ip6af_off += i; - ip6af->ip6af_frglen -= i; + } else { + i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen + - ip6af->ip6af_off; + if (i > 0) { + drop_fragq = TRUE; + } } } - /* - * While we overlap succeeding segments trim them or, - * if they are completely covered, dequeue them. - */ - while (af6 != (struct ip6asfrag *)q6 && - ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) { - i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; - if (i < af6->ip6af_frglen) { - af6->ip6af_frglen -= i; - af6->ip6af_off += i; - m_adj(IP6_REASS_MBUF(af6), i); - q6->ip6q_csum_flags = 0; - break; - } - af6 = af6->ip6af_down; - m_freem(IP6_REASS_MBUF(af6->ip6af_up)); - frag6_deq(af6->ip6af_up); - } -#else - /* - * If the incoming framgent overlaps some existing fragments in - * the reassembly queue, drop it, since it is dangerous to override - * existing fragments from a security point of view. - * We don't know which fragment is the bad guy - here we trust - * fragment that came in earlier, with no real reason. - * - * Note: due to changes after disabling this part, mbuf passed to - * m_adj() below now does not meet the requirement. - */ - if (af6->ip6af_up != (struct ip6asfrag *)q6) { - i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen - - ip6af->ip6af_off; - if (i > 0) { -#if 0 /* suppress the noisy log */ - log(LOG_ERR, "%d bytes of a fragment from %s " - "overlaps the previous fragment\n", - i, ip6_sprintf(&q6->ip6q_src)); -#endif - ip6af_free(ip6af); - goto dropfrag; - } - } if (af6 != (struct ip6asfrag *)q6) { + /* + * Given that we break when af6->ip6af_off > ip6af->ip6af_off, + * we shouldn't need a check for duplicate fragment here. + * For now just assert. + */ + VERIFY(af6->ip6af_off != ip6af->ip6af_off); i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; if (i > 0) { -#if 0 /* suppress the noisy log */ - log(LOG_ERR, "%d bytes of a fragment from %s " - "overlaps the succeeding fragment", - i, ip6_sprintf(&q6->ip6q_src)); -#endif - ip6af_free(ip6af); - goto dropfrag; + drop_fragq = TRUE; } } -#endif /* * If this fragment contains similar checksum offload info @@ -725,7 +714,6 @@ frag6_input(struct mbuf **mp, int *offp, int proto) } insert: - /* * Stick new segment in its place; * check for complete reassembly. @@ -735,12 +723,53 @@ insert: frag6_enq(ip6af, af6->ip6af_up); frag6_nfrags++; q6->ip6q_nfrag++; -#if 0 /* xxx */ - if (q6 != ip6q.ip6q_next) { - frag6_remque(q6); - frag6_insque(q6, &ip6q); + + /* + * This holds true, when we receive overlapping fragments. + * We must silently drop all the fragments we have received + * so far. + * Also mark q6 as dirty, so as to not add any new fragments to it. + * Make sure even q6 marked dirty is kept till timer expires for + * reassembly and when that happens, silenty get rid of q6 + */ + if (drop_fragq) { + struct fq6_head dfq6 = {0}; + MBUFQ_INIT(&dfq6); /* for deferred frees */ + q6->ip6q_flags |= IP6QF_DIRTY; + /* Purge all the fragments but do not free q6 */ + frag6_purgef(q6, &dfq6, NULL); + af6 = NULL; + + /* free fragments that need to be freed */ + if (!MBUFQ_EMPTY(&dfq6)) { + MBUFQ_DRAIN(&dfq6); + } + VERIFY(MBUFQ_EMPTY(&dfq6)); + /* + * Just in case the above logic got anything added + * to diq6, drain it. + * Please note that these mbufs are not present in the + * fragment queue and are added to diq6 for sending + * ICMPv6 error. + * Given that the current fragment was an overlapping + * fragment and the RFC requires us to not send any + * ICMPv6 errors while purging the entire queue. + * Just empty it out. + */ + if (!MBUFQ_EMPTY(&diq6)) { + MBUFQ_DRAIN(&diq6); + } + VERIFY(MBUFQ_EMPTY(&diq6)); + /* + * MBUFQ_DRAIN would have drained all the mbufs + * in the fragment queue. + * This shouldn't be needed as we are returning IPPROTO_DONE + * from here but change the passed mbuf pointer to NULL. + */ + *mp = NULL; + lck_mtx_unlock(&ip6qlock); + return IPPROTO_DONE; } -#endif next = 0; for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; af6 = af6->ip6af_down) { @@ -788,7 +817,7 @@ insert: ADDCARRY(csum); - m->m_pkthdr.csum_rx_val = csum; + m->m_pkthdr.csum_rx_val = (u_int16_t)csum; m->m_pkthdr.csum_rx_start = sizeof(struct ip6_hdr); m->m_pkthdr.csum_flags = q6->ip6q_csum_flags; } else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) || @@ -802,7 +831,7 @@ insert: offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); ip6af_free(ip6af); ip6 = mtod(m, struct ip6_hdr *); - ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr)); + ip6->ip6_plen = htons((uint16_t)(next + offset - sizeof(struct ip6_hdr))); ip6->ip6_src = q6->ip6q_src; ip6->ip6_dst = q6->ip6q_dst; if (q6->ip6q_ecn == IPTOS_ECN_CE) { @@ -907,13 +936,20 @@ dropfrag: } /* - * Free a fragment reassembly header and all - * associated datagrams. + * This routine removes the enqueued frames from the passed fragment + * header and enqueues those to dfq6 which is an out-arg for the dequeued + * fragments. + * If the caller also provides diq6, this routine also enqueues the 0 offset + * fragment to that list as it potentially gets used by the caller + * to prepare the relevant ICMPv6 error message (time exceeded or + * param problem). + * It leaves the fragment header object (q6) intact. */ -void -frag6_freef(struct ip6q *q6, struct fq6_head *dfq6, struct fq6_head *diq6) +static void +frag6_purgef(struct ip6q *q6, struct fq6_head *dfq6, struct fq6_head *diq6) { - struct ip6asfrag *af6, *down6; + struct ip6asfrag *af6 = NULL; + struct ip6asfrag *down6 = NULL; LCK_MTX_ASSERT(&ip6qlock, LCK_MTX_ASSERT_OWNED); @@ -925,10 +961,12 @@ frag6_freef(struct ip6q *q6, struct fq6_head *dfq6, struct fq6_head *diq6) frag6_deq(af6); /* - * Return ICMP time exceeded error for the 1st fragment. - * Just free other fragments. + * If caller wants to generate ICMP time-exceeded, + * as indicated by the argument diq6, return it for + * the first fragment and add others to the fragment + * free queue. */ - if (af6->ip6af_off == 0) { + if (af6->ip6af_off == 0 && diq6 != NULL) { struct ip6_hdr *ip6; /* adjust pointer */ @@ -937,13 +975,28 @@ frag6_freef(struct ip6q *q6, struct fq6_head *dfq6, struct fq6_head *diq6) /* restore source and destination addresses */ ip6->ip6_src = q6->ip6q_src; ip6->ip6_dst = q6->ip6q_dst; - MBUFQ_ENQUEUE(diq6, m); } else { MBUFQ_ENQUEUE(dfq6, m); } ip6af_free(af6); } +} + +/* + * This routine removes the enqueued frames from the passed fragment + * header and enqueues those to dfq6 which is an out-arg for the dequeued + * fragments. + * If the caller also provides diq6, this routine also enqueues the 0 offset + * fragment to that list as it potentially gets used by the caller + * to prepare the relevant ICMPv6 error message (time exceeded or + * param problem). + * It also remove the fragment header object from the queue and frees it. + */ +static void +frag6_freef(struct ip6q *q6, struct fq6_head *dfq6, struct fq6_head *diq6) +{ + frag6_purgef(q6, dfq6, diq6); frag6_remque(q6); frag6_nfragpackets--; frag6_nfrags -= q6->ip6q_nfrag; @@ -1007,6 +1060,7 @@ frag6_timeout(void *arg) { #pragma unused(arg) struct fq6_head dfq6, diq6; + struct fq6_head *diq6_tmp = NULL; struct ip6q *q6; MBUFQ_INIT(&dfq6); /* for deferred frees */ @@ -1028,7 +1082,13 @@ frag6_timeout(void *arg) if (q6->ip6q_prev->ip6q_ttl == 0) { ip6stat.ip6s_fragtimeout++; /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ - frag6_freef(q6->ip6q_prev, &dfq6, &diq6); + /* + * Avoid sending ICMPv6 Time Exceeded for fragment headers + * that are marked dirty. + */ + diq6_tmp = (q6->ip6q_prev->ip6q_flags & IP6QF_DIRTY) ? + NULL : &diq6; + frag6_freef(q6->ip6q_prev, &dfq6, diq6_tmp); } } } @@ -1042,7 +1102,13 @@ frag6_timeout(void *arg) ip6q.ip6q_prev) { ip6stat.ip6s_fragoverflow++; /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ - frag6_freef(ip6q.ip6q_prev, &dfq6, &diq6); + /* + * Avoid sending ICMPv6 Time Exceeded for fragment headers + * that are marked dirty. + */ + diq6_tmp = (ip6q.ip6q_prev->ip6q_flags & IP6QF_DIRTY) ? + NULL : &diq6; + frag6_freef(ip6q.ip6q_prev, &dfq6, diq6_tmp); } } /* re-arm the purge timer if there's work to do */ @@ -1079,6 +1145,7 @@ void frag6_drain(void) { struct fq6_head dfq6, diq6; + struct fq6_head *diq6_tmp = NULL; MBUFQ_INIT(&dfq6); /* for deferred frees */ MBUFQ_INIT(&diq6); /* for deferred ICMP time exceeded errors */ @@ -1087,7 +1154,13 @@ frag6_drain(void) while (ip6q.ip6q_next != &ip6q) { ip6stat.ip6s_fragdropped++; /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ - frag6_freef(ip6q.ip6q_next, &dfq6, &diq6); + /* + * Avoid sending ICMPv6 Time Exceeded for fragment headers + * that are marked dirty. + */ + diq6_tmp = (ip6q.ip6q_next->ip6q_flags & IP6QF_DIRTY) ? + NULL : &diq6; + frag6_freef(ip6q.ip6q_next, &dfq6, diq6_tmp); } lck_mtx_unlock(&ip6qlock); diff --git a/bsd/netinet6/icmp6.c b/bsd/netinet6/icmp6.c index b6f48a666..9c9fd07c8 100644 --- a/bsd/netinet6/icmp6.c +++ b/bsd/netinet6/icmp6.c @@ -162,7 +162,7 @@ static int icmp6_ratelimit(const struct in6_addr *, const int, const int); static const char *icmp6_redirect_diag(struct in6_addr *, struct in6_addr *, struct in6_addr *); static struct mbuf *ni6_input(struct mbuf *, int); -static struct mbuf *ni6_nametodns(const char *, int, int); +static struct mbuf *ni6_nametodns(const char *, uint32_t, int); static int ni6_dnsmatch(const char *, int, const char *, int); static int ni6_addrs(struct icmp6_nodeinfo *, struct ifnet **, char *); @@ -298,7 +298,6 @@ icmp6_error_flag(struct mbuf *m, int type, int code, int param, int flags) struct icmp6_hdr *icmp6; u_int preplen; int off; - int nxt; icmp6stat.icp6s_error++; @@ -355,37 +354,47 @@ icmp6_error_flag(struct mbuf *m, int type, int code, int param, int flags) /* * If we are about to send ICMPv6 against ICMPv6 error/redirect, * don't do it. + * + * We want to check for that for all ICMP error types, other than + * ICMP6_PARAM_PROB when it is being sent in response of first frag + * with incomplete header. + * That also includes the case when the first frag has incomplete ICMPv6 + * header. The check below in that case would fail the IP6_EXTHDR_CHECK + * and would otherwise prevent us from sending the error back. */ - nxt = -1; - off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt); - if (off >= 0 && nxt == IPPROTO_ICMPV6) { - struct icmp6_hdr *icp; + if (type != ICMP6_PARAM_PROB || + code != ICMP6_PARAMPROB_FIRSTFRAG_INCOMP_HDR) { + int nxt = -1; + off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt); + if (off >= 0 && nxt == IPPROTO_ICMPV6) { + struct icmp6_hdr *icp; #ifndef PULLDOWN_TEST - IP6_EXTHDR_CHECK(m, 0, off + sizeof(struct icmp6_hdr), return ); - icp = (struct icmp6_hdr *)(mtod(m, caddr_t) + off); + IP6_EXTHDR_CHECK(m, 0, off + sizeof(struct icmp6_hdr), return ); + icp = (struct icmp6_hdr *)(mtod(m, caddr_t) + off); #else - IP6_EXTHDR_GET(icp, struct icmp6_hdr *, m, off, - sizeof(*icp)); - if (icp == NULL) { - icmp6stat.icp6s_tooshort++; - return; - } + IP6_EXTHDR_GET(icp, struct icmp6_hdr *, m, off, + sizeof(*icp)); + if (icp == NULL) { + icmp6stat.icp6s_tooshort++; + return; + } #endif - if (icp->icmp6_type < ICMP6_ECHO_REQUEST || - icp->icmp6_type == ND_REDIRECT) { - /* - * ICMPv6 error - * Special case: for redirect (which is - * informational) we must not send icmp6 error. - */ - icmp6stat.icp6s_canterror++; - goto freeit; + if (icp->icmp6_type < ICMP6_ECHO_REQUEST || + icp->icmp6_type == ND_REDIRECT) { + /* + * ICMPv6 error + * Special case: for redirect (which is + * informational) we must not send icmp6 error. + */ + icmp6stat.icp6s_canterror++; + goto freeit; + } else { + /* ICMPv6 informational - send the error */ + } } else { - /* ICMPv6 informational - send the error */ + /* non-ICMPv6 - send the error */ } - } else { - /* non-ICMPv6 - send the error */ } oip6 = mtod(m, struct ip6_hdr *); /* adjust pointer */ @@ -422,8 +431,8 @@ icmp6_error_flag(struct mbuf *m, int type, int code, int param, int flags) in6_clearscope(&oip6->ip6_dst); icmp6 = (struct icmp6_hdr *)(nip6 + 1); - icmp6->icmp6_type = type; - icmp6->icmp6_code = code; + icmp6->icmp6_type = (uint8_t)type; + icmp6->icmp6_code = (uint8_t)code; icmp6->icmp6_pptr = htonl((u_int32_t)param); /* @@ -607,6 +616,10 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) case ICMP6_PACKET_TOO_BIG: icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_pkttoobig); + if (ntohl(icmp6->icmp6_mtu) < IPV6_MMTU) { + icmp6stat.icp6s_badpkttoobig++; + goto freeit; + } code = PRC_MSGSIZE; @@ -890,11 +903,11 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { /* give up local */ - icmp6_redirect_input(m, off); + icmp6_redirect_input(m, off, icmp6len); m = NULL; goto freeit; } - icmp6_redirect_input(n, off); + icmp6_redirect_input(n, off, icmp6len); /* m stays. */ goto rate_limit_checked; @@ -1262,7 +1275,7 @@ icmp6_mtudisc_update(struct ip6ctlparam *ip6cp, int validated) * - joins NI group address at in6_ifattach() time only, does not cope * with hostname changes by sethostname(3) */ -#define hostnamelen strlen(hostname) +#define hostnamelen (uint32_t)strlen(hostname) static struct mbuf * ni6_input(struct mbuf *m, int off) { @@ -1319,7 +1332,8 @@ ni6_input(struct mbuf *m, int off) * [RFC4602, Section 5.] */ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { - if (!IN6_IS_ADDR_MC_LINKLOCAL(&ip6->ip6_dst)) { + if (!IN6_IS_ADDR_MC_LINKLOCAL(&ip6->ip6_dst) && + !IN6_IS_ADDR_MC_UNICAST_BASED_LINKLOCAL(&ip6->ip6_dst)) { goto bad; } /* else it's a link-local multicast, fine */ @@ -1347,7 +1361,7 @@ ni6_input(struct mbuf *m, int off) if (ni6->ni_code == ICMP6_NI_SUBJ_FQDN && subjlen == 0) { break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case NI_QTYPE_FQDN: case NI_QTYPE_NODEADDR: case NI_QTYPE_IPV4ADDR: @@ -1586,7 +1600,7 @@ ni6_input(struct mbuf *m, int off) nni6->ni_code = ICMP6_NI_SUCCESS; n->m_pkthdr.len = n->m_len = sizeof(struct ip6_hdr) + sizeof(struct icmp6_nodeinfo); - lenlim = M_TRAILINGSPACE(n); + lenlim = (int)M_TRAILINGSPACE(n); copied = ni6_store_addrs(ni6, nni6, ifp, lenlim); /* XXX: reset mbuf length */ n->m_pkthdr.len = n->m_len = sizeof(struct ip6_hdr) + @@ -1625,13 +1639,14 @@ bad: static struct mbuf * ni6_nametodns( const char *name, - int namelen, + uint32_t namelen, int old) /* return pascal string if non-zero */ { struct mbuf *m; char *cp, *ep; const char *p, *q; - int i, len, nterm; + int i, nterm; + uint32_t len; if (old) { len = namelen + 1; @@ -1654,7 +1669,7 @@ ni6_nametodns( if (old) { m->m_len = len; - *mtod(m, char *) = namelen; + *mtod(m, char *) = (char)namelen; bcopy(name, mtod(m, char *) + 1, namelen); return m; } else { @@ -1701,7 +1716,7 @@ ni6_nametodns( if (i <= 0 || i >= 64) { goto fail; } - *cp++ = i; + *cp++ = (char)i; bcopy(p, cp, i); cp += i; p = q; @@ -1716,7 +1731,7 @@ ni6_nametodns( while (nterm-- > 0) { *cp++ = '\0'; } - m->m_len = cp - mtod(m, char *); + m->m_len = (int32_t)(cp - mtod(m, char *)); return m; } @@ -2058,7 +2073,7 @@ again: ltime = ND6_INFINITE_LIFETIME; } else { if (lt->ia6ti_expire > now) { - ltime = htonl(lt->ia6ti_expire - now); + ltime = htonl((uint32_t)(lt->ia6ti_expire - now)); } else { ltime = 0; } @@ -2168,6 +2183,10 @@ icmp6_rip6_input(struct mbuf **mp, int off) struct mbuf *n; if ((n = m_copy(m, 0, (int)M_COPYALL)) != NULL) { if ((last->in6p_flags & INP_CONTROLOPTS) != 0 || +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + (last->in6p_socket->so_cfil_db != NULL) || +#endif (last->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { @@ -2194,6 +2213,10 @@ icmp6_rip6_input(struct mbuf **mp, int off) } if (last) { if ((last->in6p_flags & INP_CONTROLOPTS) != 0 || +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + (last->in6p_socket->so_cfil_db != NULL) || +#endif (last->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { @@ -2274,10 +2297,10 @@ icmp6_reflect(struct mbuf *m, size_t off) l = off - sizeof(struct ip6_hdr); m_copydata(m, 0, sizeof(nip6), (caddr_t)&nip6); - m_adj(m, l); + m_adj(m, (int)l); l = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr); if (m->m_len < l) { - if ((m_ip6hdr = m_pulldown(m, 0, l, NULL)) == NULL) { + if ((m_ip6hdr = m_pulldown(m, 0, (int)l, NULL)) == NULL) { return; } } @@ -2286,7 +2309,7 @@ icmp6_reflect(struct mbuf *m, size_t off) size_t l; l = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr); if (m->m_len < l) { - if ((m_ip6hdr = m_pulldown(m, 0, l, NULL)) == NULL) { + if ((m_ip6hdr = m_pulldown(m, 0, (int)l, NULL)) == NULL) { return; } } @@ -2412,7 +2435,7 @@ icmp6_reflect(struct mbuf *m, size_t off) ip6->ip6_hlim = ndi->chlim; lck_mtx_unlock(&ndi->lock); } else { - ip6->ip6_hlim = ip6_defhlim; + ip6->ip6_hlim = (uint8_t)ip6_defhlim; } /* Use the same traffic class as in the request to match IPv4 */ icmp6->icmp6_cksum = 0; @@ -2458,13 +2481,12 @@ icmp6_redirect_diag(struct in6_addr *src6, } void -icmp6_redirect_input(struct mbuf *m, int off) +icmp6_redirect_input(struct mbuf *m, int off, int icmp6len) { struct ifnet *ifp = NULL; struct ip6_hdr *ip6 = NULL; struct nd_redirect *nd_rd = NULL; char *lladdr = NULL; - int icmp6len = 0; int lladdrlen = 0; u_char *redirhdr = NULL; int redirhdrlen = 0; @@ -2489,7 +2511,7 @@ icmp6_redirect_input(struct mbuf *m, int off) * If we are an advertising router on this interface, * don't update route by icmp6 redirect. */ - if (ifp->if_eflags & IFEF_IPV6_ROUTER) { + if (ifp->if_ipv6_router_mode == IPV6_ROUTER_MODE_EXCLUSIVE) { goto freeit; } if (!icmp6_rediraccept) { @@ -2497,7 +2519,6 @@ icmp6_redirect_input(struct mbuf *m, int off) } ip6 = mtod(m, struct ip6_hdr *); - icmp6len = ntohs(ip6->ip6_plen); src6 = ip6->ip6_src; #ifndef PULLDOWN_TEST IP6_EXTHDR_CHECK(m, off, icmp6len, return ); @@ -2717,7 +2738,7 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) * If we are not a router to begin with, or not an advertising * router on this interface, don't send icmp6 redirect. */ - if (!ip6_forwarding || !(ifp->if_eflags & IFEF_IPV6_ROUTER)) { + if (!ip6_forwarding || ifp->if_ipv6_router_mode != IPV6_ROUTER_MODE_EXCLUSIVE) { goto fail; } @@ -2767,7 +2788,7 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) m->m_pkthdr.rcvif = NULL; m->m_len = 0; maxlen = M_TRAILINGSPACE(m); - maxlen = min(IPV6_MMTU, maxlen); + maxlen = MIN(IPV6_MMTU, maxlen); /* just for safety */ if (maxlen < sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr) + ((sizeof(struct nd_opt_hdr) + ifp->if_addrlen + 7) & ~7)) { @@ -2875,7 +2896,7 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) rt_router->rt_gateway) && sdl->sdl_alen) { nd_opt = (struct nd_opt_hdr *)p; nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR; - nd_opt->nd_opt_len = len >> 3; + nd_opt->nd_opt_len = (uint8_t)(len >> 3); lladdr = (char *)(nd_opt + 1); bcopy(LLADDR(sdl), lladdr, ifp->if_addrlen); p += len; @@ -2886,7 +2907,7 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) nolladdropt:; - m->m_pkthdr.len = m->m_len = p - (u_char *)ip6; + m->m_pkthdr.len = m->m_len = (int32_t)(p - (u_char *)ip6); /* just to be safe */ #ifdef M_DECRYPTED /*not openbsd*/ @@ -2907,7 +2928,7 @@ nolladdropt:; * compute the maximum size for icmp6 redirect header option. * XXX room for auth header? */ - len = maxlen - (p - (u_char *)ip6); + len = (int)(maxlen - (p - (u_char *)ip6)); len &= ~7; /* This is just for simplicity. */ @@ -2960,9 +2981,9 @@ nolladdropt:; nd_opt_rh = (struct nd_opt_rd_hdr *)p; bzero(nd_opt_rh, sizeof(*nd_opt_rh)); nd_opt_rh->nd_opt_rh_type = ND_OPT_REDIRECTED_HEADER; - nd_opt_rh->nd_opt_rh_len = len >> 3; + nd_opt_rh->nd_opt_rh_len = (uint8_t)(len >> 3); p += sizeof(*nd_opt_rh); - m->m_pkthdr.len = m->m_len = p - (u_char *)ip6; + m->m_pkthdr.len = m->m_len = (int32_t)(p - (u_char *)ip6); /* connect m0 to m */ m->m_next = m0; @@ -2976,7 +2997,7 @@ noredhdropt:; in6_clearscope(&nd_rd->nd_rd_target); in6_clearscope(&nd_rd->nd_rd_dst); - ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(struct ip6_hdr)); + ip6->ip6_plen = htons((uint16_t)(m->m_pkthdr.len - sizeof(struct ip6_hdr))); nd_rd->nd_rd_cksum = 0; nd_rd->nd_rd_cksum @@ -2985,6 +3006,7 @@ noredhdropt:; /* send the packet to outside... */ ip6oa.ip6oa_boundif = ifp->if_index; ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; + ip6oa.ip6oa_flags |= IP6OAF_DONT_FRAG; ip6_output(m, NULL, NULL, IPV6_OUTARGS, NULL, &outif, &ip6oa); if (outif) { @@ -3015,7 +3037,7 @@ int icmp6_ctloutput(struct socket *so, struct sockopt *sopt) { int error = 0; - int optlen; + size_t optlen; struct inpcb *inp = sotoinpcb(so); int level, op, optname; @@ -3076,7 +3098,7 @@ icmp6_ctloutput(struct socket *so, struct sockopt *sopt) break; } error = sooptcopyout(sopt, inp->in6p_icmp6filt, - min(sizeof(struct icmp6_filter), optlen)); + MIN(sizeof(struct icmp6_filter), optlen)); break; } diff --git a/bsd/netinet6/in6.c b/bsd/netinet6/in6.c index 176bd11cc..dd680119e 100644 --- a/bsd/netinet6/in6.c +++ b/bsd/netinet6/in6.c @@ -164,9 +164,12 @@ const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; const struct in6_addr in6addr_linklocal_allv2routers = IN6ADDR_LINKLOCAL_ALLV2ROUTERS_INIT; +const struct in6_addr in6addr_multicast_prefix = + IN6ADDR_MULTICAST_PREFIX; const struct in6_addr in6mask0 = IN6MASK0; const struct in6_addr in6mask7 = IN6MASK7; +const struct in6_addr in6mask8 = IN6MASK8; const struct in6_addr in6mask16 = IN6MASK16; const struct in6_addr in6mask32 = IN6MASK32; const struct in6_addr in6mask64 = IN6MASK64; @@ -196,26 +199,24 @@ static int in6ctl_alifetime(struct in6_ifaddr *, u_long, struct in6_ifreq *, static int in6ctl_aifaddr(struct ifnet *, struct in6_aliasreq *); static void in6ctl_difaddr(struct ifnet *, struct in6_ifaddr *); static int in6_autoconf(struct ifnet *, int); -static int in6_setrouter(struct ifnet *, int); +static int in6_setrouter(struct ifnet *, ipv6_router_mode_t); static int in6_ifinit(struct ifnet *, struct in6_ifaddr *, int); static int in6_ifaupdate_aux(struct in6_ifaddr *, struct ifnet *, int); static void in6_unlink_ifa(struct in6_ifaddr *, struct ifnet *); -static struct in6_ifaddr *in6_ifaddr_alloc(int); +static struct in6_ifaddr *in6_ifaddr_alloc(zalloc_flags_t); static void in6_ifaddr_attached(struct ifaddr *); static void in6_ifaddr_detached(struct ifaddr *); static void in6_ifaddr_free(struct ifaddr *); static void in6_ifaddr_trace(struct ifaddr *, int); #if defined(__LP64__) -static void in6_cgareq_32_to_64(struct in6_cgareq_32 *, +static void in6_cgareq_32_to_64(const struct in6_cgareq_32 *, struct in6_cgareq_64 *); #else -static void in6_cgareq_64_to_32(struct in6_cgareq_64 *, +static void in6_cgareq_64_to_32(const struct in6_cgareq_64 *, struct in6_cgareq_32 *); #endif static struct in6_aliasreq *in6_aliasreq_to_native(void *, int, struct in6_aliasreq *); -static struct in6_cgareq *in6_cgareq_to_native(void *, int, - struct in6_cgareq *); static int in6_to_kamescope(struct sockaddr_in6 *, struct ifnet *); static int in6_getassocids(struct socket *, uint32_t *, user_addr_t); static int in6_getconnids(struct socket *, sae_associd_t, uint32_t *, @@ -273,10 +274,7 @@ static unsigned int in6ifa_debug = 1; /* debugging (enabled) */ #else static unsigned int in6ifa_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int in6ifa_size; /* size of zone element */ static struct zone *in6ifa_zone; /* zone for in6_ifaddr */ - -#define IN6IFA_ZONE_MAX 64 /* maximum elements in zone */ #define IN6IFA_ZONE_NAME "in6_ifaddr" /* zone name */ struct eventhandler_lists_ctxt in6_evhdlr_ctxt; @@ -516,8 +514,8 @@ in6_aliasreq_64_to_32(struct in6_aliasreq_64 *src, struct in6_aliasreq_32 *dst) dst->ifra_dstaddr = src->ifra_dstaddr; dst->ifra_prefixmask = src->ifra_prefixmask; dst->ifra_flags = src->ifra_flags; - dst->ifra_lifetime.ia6t_expire = src->ifra_lifetime.ia6t_expire; - dst->ifra_lifetime.ia6t_preferred = src->ifra_lifetime.ia6t_preferred; + dst->ifra_lifetime.ia6t_expire = (u_int32_t)src->ifra_lifetime.ia6t_expire; + dst->ifra_lifetime.ia6t_preferred = (u_int32_t)src->ifra_lifetime.ia6t_preferred; dst->ifra_lifetime.ia6t_vltime = src->ifra_lifetime.ia6t_vltime; dst->ifra_lifetime.ia6t_pltime = src->ifra_lifetime.ia6t_pltime; } @@ -538,8 +536,8 @@ in6_aliasreq_32_to_64(struct in6_aliasreq_32 *src, struct in6_aliasreq_64 *dst) } #if defined(__LP64__) -void -in6_cgareq_32_to_64(struct in6_cgareq_32 *src, +static void +in6_cgareq_32_to_64(const struct in6_cgareq_32 *src, struct in6_cgareq_64 *dst) { bzero(dst, sizeof(*dst)); @@ -554,12 +552,13 @@ in6_cgareq_32_to_64(struct in6_cgareq_32 *src, dst->cgar_lifetime.ia6t_preferred = src->cgar_lifetime.ia6t_preferred; dst->cgar_lifetime.ia6t_vltime = src->cgar_lifetime.ia6t_vltime; dst->cgar_lifetime.ia6t_pltime = src->cgar_lifetime.ia6t_pltime; + dst->cgar_collision_count = src->cgar_collision_count; } #endif #if !defined(__LP64__) -void -in6_cgareq_64_to_32(struct in6_cgareq_64 *src, +static void +in6_cgareq_64_to_32(const struct in6_cgareq_64 *src, struct in6_cgareq_32 *dst) { bzero(dst, sizeof(*dst)); @@ -574,6 +573,7 @@ in6_cgareq_64_to_32(struct in6_cgareq_64 *src, dst->cgar_lifetime.ia6t_preferred = src->cgar_lifetime.ia6t_preferred; dst->cgar_lifetime.ia6t_vltime = src->cgar_lifetime.ia6t_vltime; dst->cgar_lifetime.ia6t_pltime = src->cgar_lifetime.ia6t_pltime; + dst->cgar_collision_count = src->cgar_collision_count; } #endif @@ -598,25 +598,25 @@ in6_aliasreq_to_native(void *data, int data_is_64, struct in6_aliasreq *dst) return dst; } -static struct in6_cgareq * -in6_cgareq_to_native(void *data, int is64, struct in6_cgareq *dst) +void +in6_cgareq_copy_from_user(const void *user_data, int user_is_64, + struct in6_cgareq *cgareq) { #if defined(__LP64__) - if (is64) { - bcopy(data, dst, sizeof(*dst)); + if (user_is_64) { + bcopy(user_data, cgareq, sizeof(*cgareq)); } else { - in6_cgareq_32_to_64((struct in6_cgareq_32 *)data, - (struct in6_cgareq_64 *)dst); + in6_cgareq_32_to_64((const struct in6_cgareq_32 *)user_data, + (struct in6_cgareq_64 *)cgareq); } #else - if (is64) { - in6_cgareq_64_to_32((struct in6_cgareq_64 *)data, - (struct in6_cgareq_32 *)dst); + if (user_is_64) { + in6_cgareq_64_to_32((const struct in6_cgareq_64 *)user_data, + (struct in6_cgareq_32 *)cgareq); } else { - bcopy(data, dst, sizeof(*dst)); + bcopy(user_data, cgareq, sizeof(*cgareq)); } #endif /* __LP64__ */ - return dst; } static __attribute__((noinline)) int @@ -870,7 +870,7 @@ in6ctl_cgastart(struct ifnet *ifp, u_long cmd, caddr_t data) * to be centralized, so that it can be passed around to other * routines that are expecting the kernel form. */ - in6_cgareq_to_native(data, is64, &llcgasr); + in6_cgareq_copy_from_user(data, is64, &llcgasr); /* * NOTE: All the interface specific DLIL attachements @@ -1160,7 +1160,7 @@ in6ctl_clat46start(struct ifnet *ifp) if ((pr->ndpr_stateflags & NDPRF_CLAT46) == 0 && pr->ndpr_vltime != 0) { - NDPR_ADDREF_LOCKED(pr); /* Take reference for rest of the processing */ + NDPR_ADDREF(pr); /* Take reference for rest of the processing */ NDPR_UNLOCK(pr); break; } else { @@ -1171,14 +1171,16 @@ in6ctl_clat46start(struct ifnet *ifp) lck_mtx_unlock(nd6_mutex); if (pr != NULL) { - if ((ia6 = in6_pfx_newpersistaddr(pr, FALSE, &error, TRUE)) == NULL) { - nd6log0(error, "Could not configure CLAT46 address on interface " - "%s.\n", ifp->if_xname); + if ((ia6 = in6_pfx_newpersistaddr(pr, FALSE, &error, + TRUE, CLAT46_COLLISION_COUNT_OFFSET)) == NULL) { + nd6log0(error, + "Could not configure CLAT46 address on" + " interface %s.\n", ifp->if_xname); } else { IFA_LOCK(&ia6->ia_ifa); NDPR_LOCK(pr); ia6->ia6_ndpr = pr; - NDPR_ADDREF_LOCKED(pr); /* for addr reference */ + NDPR_ADDREF(pr); /* for addr reference */ pr->ndpr_stateflags |= NDPRF_CLAT46; pr->ndpr_addrcnt++; VERIFY(pr->ndpr_addrcnt != 0); @@ -1331,7 +1333,7 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, } error = in6ctl_clat46start(ifp); if (error == 0) { - ifp->if_eflags |= IFEF_CLAT46; + if_set_eflags(ifp, IFEF_CLAT46); } goto done; @@ -1347,7 +1349,12 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * flushed. * XXX Probably STOP equivalent is not needed here. */ - ifp->if_eflags &= ~IFEF_CLAT46; + if_clear_eflags(ifp, IFEF_CLAT46); + goto done; + case SIOCGETROUTERMODE_IN6: /* struct in6_ifreq */ + intval = ifp->if_ipv6_router_mode; + bcopy(&intval, &((struct in6_ifreq *)(void *)data)->ifr_intval, + sizeof(intval)); goto done; case SIOCSETROUTERMODE_IN6: /* struct in6_ifreq */ if (!privileged) { @@ -1356,8 +1363,16 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, } bcopy(&((struct in6_ifreq *)(void *)data)->ifr_intval, &intval, sizeof(intval)); - - error = in6_setrouter(ifp, intval); + switch (intval) { + case IPV6_ROUTER_MODE_DISABLED: + case IPV6_ROUTER_MODE_EXCLUSIVE: + case IPV6_ROUTER_MODE_HYBRID: + break; + default: + error = EINVAL; + goto done; + } + error = in6_setrouter(ifp, (ipv6_router_mode_t)intval); goto done; case SIOCPROTOATTACH_IN6_32: /* struct in6_aliasreq_32 */ @@ -1389,13 +1404,15 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, case SIOCSDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */ case SIOCSDEFIFACE_IN6_64: /* struct in6_ndifreq_64 */ case SIOCSIFINFO_FLAGS: /* struct in6_ndireq */ - case SIOCGIFCGAPREP_IN6: /* struct in6_ifreq */ - case SIOCSIFCGAPREP_IN6: /* struct in6_ifreq */ + case SIOCGIFCGAPREP_IN6_32: /* struct in6_cgareq_32 */ + case SIOCGIFCGAPREP_IN6_64: /* struct in6_cgareq_64 */ + case SIOCSIFCGAPREP_IN6_32: /* struct in6_cgareq_32 */ + case SIOCSIFCGAPREP_IN6_64: /* struct in6_cgareq_32 */ if (!privileged) { error = EPERM; goto done; } - /* FALLTHRU */ + OS_FALLTHROUGH; case OSIOCGIFINFO_IN6: /* struct in6_ondireq */ case SIOCGIFINFO_IN6: /* struct in6_ondireq */ case SIOCGDRLST_IN6_32: /* struct in6_drlist_32 */ @@ -1481,7 +1498,7 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, error = EPERM; goto done; } - /* FALLTHRU */ + OS_FALLTHROUGH; case SIOCGIFADDR_IN6: /* struct in6_ifreq */ case SIOCGIFDSTADDR_IN6: /* struct in6_ifreq */ case SIOCGIFNETMASK_IN6: /* struct in6_ifreq */ @@ -1556,7 +1573,7 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, error = EADDRNOTAVAIL; goto done; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case SIOCAIFADDR_IN6_32: /* struct in6_aliasreq_32 */ case SIOCAIFADDR_IN6_64: /* struct in6_aliasreq_64 */ VERIFY(sa6 != NULL); @@ -1639,7 +1656,8 @@ done: static __attribute__((noinline)) int in6ctl_aifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra) { - int i, error, addtmp, plen; + int i, error, addtmp; + uint8_t plen; struct nd_prefix pr0, *pr; struct in6_ifaddr *ia; @@ -1671,7 +1689,7 @@ in6ctl_aifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra) VERIFY(ia != NULL); /* Now, make the prefix on-link on the interface. */ - plen = in6_mask2len(&ifra->ifra_prefixmask.sin6_addr, NULL); + plen = (uint8_t)in6_mask2len(&ifra->ifra_prefixmask.sin6_addr, NULL); if (plen == 128) { goto done; } @@ -1706,8 +1724,13 @@ in6ctl_aifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra) */ pr0.ndpr_raf_onlink = 1; /* should be configurable? */ pr0.ndpr_raf_auto = !!(ifra->ifra_flags & IN6_IFF_AUTOCONF); - pr0.ndpr_vltime = ifra->ifra_lifetime.ia6t_vltime; - pr0.ndpr_pltime = ifra->ifra_lifetime.ia6t_pltime; + if (ifra->ifra_flags & (IN6_IFF_AUTOCONF | IN6_IFF_DYNAMIC)) { + pr0.ndpr_vltime = ifra->ifra_lifetime.ia6t_vltime; + pr0.ndpr_pltime = ifra->ifra_lifetime.ia6t_pltime; + } else { + pr0.ndpr_vltime = ND6_INFINITE_LIFETIME; + pr0.ndpr_pltime = ND6_INFINITE_LIFETIME; + } pr0.ndpr_stateflags |= NDPRF_STATIC; lck_mtx_init(&pr0.ndpr_lock, ifa_mtx_grp, ifa_mtx_attr); @@ -1739,7 +1762,7 @@ in6ctl_aifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra) ++pr->ndpr_addrcnt; VERIFY(pr->ndpr_addrcnt != 0); ia->ia6_ndpr = pr; - NDPR_ADDREF_LOCKED(pr); /* for addr reference */ + NDPR_ADDREF(pr); /* for addr reference */ /* * If this is the first autoconf address from the prefix, @@ -1804,7 +1827,7 @@ in6ctl_difaddr(struct ifnet *ifp, struct in6_ifaddr *ia) IFA_LOCK(&ia->ia_ifa); bzero(&pr0, sizeof(pr0)); pr0.ndpr_ifp = ifp; - pr0.ndpr_plen = in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL); + pr0.ndpr_plen = (uint8_t)in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL); if (pr0.ndpr_plen == 128) { IFA_UNLOCK(&ia->ia_ifa); goto purgeaddr; @@ -1854,19 +1877,17 @@ in6_autoconf(struct ifnet *ifp, int enable) * SIOCAUTOCONF_START from being set in that mode. */ ifnet_lock_exclusive(ifp); - if (ifp->if_eflags & IFEF_IPV6_ROUTER) { - ifp->if_eflags &= ~IFEF_ACCEPT_RTADV; + if (ifp->if_ipv6_router_mode == IPV6_ROUTER_MODE_EXCLUSIVE) { + if_clear_eflags(ifp, IFEF_ACCEPT_RTADV); error = EBUSY; } else { - ifp->if_eflags |= IFEF_ACCEPT_RTADV; + if_set_eflags(ifp, IFEF_ACCEPT_RTADV); } ifnet_lock_done(ifp); } else { struct in6_ifaddr *ia = NULL; - ifnet_lock_exclusive(ifp); - ifp->if_eflags &= ~IFEF_ACCEPT_RTADV; - ifnet_lock_done(ifp); + if_clear_eflags(ifp, IFEF_ACCEPT_RTADV); /* Remove autoconfigured address from interface */ lck_rw_lock_exclusive(&in6_ifaddr_rwlock); @@ -1903,20 +1924,28 @@ in6_autoconf(struct ifnet *ifp, int enable) } /* - * Handle SIOCSETROUTERMODE_IN6 to set or clear the IPv6 router mode flag on - * the interface. Entering or exiting this mode will result in the removal of + * Handle SIOCSETROUTERMODE_IN6 to set the IPv6 router mode on the interface + * Entering or exiting IPV6_ROUTER_MODE_EXCLUSIVE will result in the removal of * autoconfigured IPv6 addresses on the interface. */ static __attribute__((noinline)) int -in6_setrouter(struct ifnet *ifp, int enable) +in6_setrouter(struct ifnet *ifp, ipv6_router_mode_t mode) { + int error = 0; + ipv6_router_mode_t prev_mode; + VERIFY(ifp != NULL); if (ifp->if_flags & IFF_LOOPBACK) { return ENODEV; } - if (enable) { + prev_mode = ifp->if_ipv6_router_mode; + if (prev_mode == mode) { + /* no mode change, there's nothing to do */ + return 0; + } + if (mode == IPV6_ROUTER_MODE_EXCLUSIVE) { struct nd_ifinfo *ndi = NULL; ndi = ND_IFINFO(ifp); @@ -1933,21 +1962,17 @@ in6_setrouter(struct ifnet *ifp, int enable) } } - ifnet_lock_exclusive(ifp); - if (enable) { - ifp->if_eflags |= IFEF_IPV6_ROUTER; - } else { - ifp->if_eflags &= ~IFEF_IPV6_ROUTER; - } - ifnet_lock_done(ifp); - + ifp->if_ipv6_router_mode = mode; lck_mtx_lock(nd6_mutex); - defrouter_select(ifp); + defrouter_select(ifp, NULL); lck_mtx_unlock(nd6_mutex); - - if_allmulti(ifp, enable); - - return in6_autoconf(ifp, FALSE); + if_allmulti(ifp, (mode == IPV6_ROUTER_MODE_EXCLUSIVE)); + if (mode == IPV6_ROUTER_MODE_EXCLUSIVE || + (prev_mode == IPV6_ROUTER_MODE_EXCLUSIVE + && mode == IPV6_ROUTER_MODE_DISABLED)) { + error = in6_autoconf(ifp, FALSE); + } + return error; } static int @@ -2215,9 +2240,46 @@ in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags) IFA_LOCK(ifa); LIST_INSERT_HEAD(&ia->ia6_memberships, imm, i6mm_chain); IFA_UNLOCK(ifa); - } #undef MLTMASK_LEN + /* + * create a ff00::/8 route + */ + bzero(&mltmask, sizeof(mltmask)); + mltmask.sin6_len = sizeof(struct sockaddr_in6); + mltmask.sin6_family = AF_INET6; + mltmask.sin6_addr = in6mask8; +#define MLTMASK_LEN_8_BITS 1 /* ff00::/8 mltmask's masklen (=8bit=1octet) */ + + bzero(&mltaddr, sizeof(mltaddr)); + mltaddr.sin6_len = sizeof(struct sockaddr_in6); + mltaddr.sin6_family = AF_INET6; + mltaddr.sin6_addr = in6addr_multicast_prefix; + + rt = rtalloc1_scoped((struct sockaddr *)&mltaddr, 0, 0UL, + ia->ia_ifp->if_index); + if (rt) { + if (memcmp(&mltaddr.sin6_addr, &((struct sockaddr_in6 *) + (void *)rt_key(rt))->sin6_addr, MLTMASK_LEN_8_BITS)) { + rtfree(rt); + rt = NULL; + } + } + if (!rt) { + error = rtrequest_scoped(RTM_ADD, + (struct sockaddr *)&mltaddr, + (struct sockaddr *)&ia->ia_addr, + (struct sockaddr *)&mltmask, RTF_UP | RTF_CLONING, + NULL, ia->ia_ifp->if_index); + if (error) { + goto unwind; + } + } else { + rtfree(rt); + } + } +#undef MLTMASK_LEN_8_BITS + /* Ensure nd6_service() is scheduled as soon as it's convenient */ ++nd6_sched_timeout_want; @@ -2330,7 +2392,7 @@ in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int ifaupflags, } /* Validate prefix length extracted from ifra_prefixmask structure. */ - plen = in6_mask2len(&ifra->ifra_prefixmask.sin6_addr, + plen = (uint8_t)in6_mask2len(&ifra->ifra_prefixmask.sin6_addr, (u_char *)&ifra->ifra_prefixmask + ifra->ifra_prefixmask.sin6_len); if (plen <= 0) { error = EINVAL; @@ -2361,7 +2423,26 @@ in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int ifaupflags, * address already exists. If so, then we don't allocate and link a * new one here. */ - ia = in6ifa_ifpwithaddr(ifp, &ifra->ifra_addr.sin6_addr); + struct sockaddr_in6 lookup_address = ifra->ifra_addr; + if (IN6_IS_ADDR_LINKLOCAL(&lookup_address.sin6_addr)) { + if (lookup_address.sin6_addr.s6_addr16[1] == 0) { + /* link ID is not embedded by the user */ + lookup_address.sin6_addr.s6_addr16[1] = + htons(ifp->if_index); + } else if (lookup_address.sin6_addr.s6_addr16[1] != + htons(ifp->if_index)) { + error = EINVAL; /* link ID contradicts */ + goto done; + } + if (lookup_address.sin6_scope_id != 0 && + lookup_address.sin6_scope_id != + (u_int32_t)ifp->if_index) { + error = EINVAL; + goto done; + } + } + + ia = in6ifa_ifpwithaddr(ifp, &lookup_address.sin6_addr); if (ia != NULL) { ifa = &ia->ia_ifa; } @@ -2400,13 +2481,13 @@ in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int ifaupflags, timenow = net_uptime(); if (ia == NULL) { - int how; + zalloc_flags_t how; /* Is this the first new IPv6 address for the interface? */ ifaupflags |= IN6_IFAUPDATE_NEWADDR; /* Allocate memory for IPv6 interface address structure. */ - how = !(ifaupflags & IN6_IFAUPDATE_NOWAIT) ? M_WAITOK : 0; + how = (ifaupflags & IN6_IFAUPDATE_NOWAIT) ? Z_NOWAIT : Z_WAITOK; ia = in6_ifaddr_alloc(how); if (ia == NULL) { error = ENOBUFS; @@ -3128,7 +3209,7 @@ in6_localaddr(struct in6_addr *in6) { struct in6_ifaddr *ia; - if (IN6_IS_ADDR_LOOPBACK(in6) || IN6_IS_ADDR_LINKLOCAL(in6)) { + if (IN6_IS_ADDR_LOOPBACK(in6) || IN6_IS_ADDR_LINKLOCAL(in6) || IN6_IS_ADDR_MC_UNICAST_BASED_LINKLOCAL(in6)) { return 1; } @@ -3992,8 +4073,8 @@ in6_post_msg(struct ifnet *ifp, u_int32_t event_code, struct in6_ifaddr *ifa, /* retrieve time as calendar time (last arg is 1) */ in6ifa_getlifetime(ifa, &ia6_lt, 1); - in6_event_data.ia_lifetime.ia6t_expire = ia6_lt.ia6t_expire; - in6_event_data.ia_lifetime.ia6t_preferred = ia6_lt.ia6t_preferred; + in6_event_data.ia_lifetime.ia6t_expire = (u_int32_t)ia6_lt.ia6t_expire; + in6_event_data.ia_lifetime.ia6t_preferred = (u_int32_t)ia6_lt.ia6t_preferred; in6_event_data.ia_lifetime.ia6t_vltime = ia6_lt.ia6t_vltime; in6_event_data.ia_lifetime.ia6t_pltime = ia6_lt.ia6t_pltime; IFA_UNLOCK(&ifa->ia_ifa); @@ -4029,31 +4110,22 @@ in6_ifaddr_init(void) PE_parse_boot_argn("ifa_debug", &in6ifa_debug, sizeof(in6ifa_debug)); - in6ifa_size = (in6ifa_debug == 0) ? sizeof(struct in6_ifaddr) : + vm_size_t in6ifa_size = (in6ifa_debug == 0) ? sizeof(struct in6_ifaddr) : sizeof(struct in6_ifaddr_dbg); - in6ifa_zone = zinit(in6ifa_size, IN6IFA_ZONE_MAX * in6ifa_size, - 0, IN6IFA_ZONE_NAME); - if (in6ifa_zone == NULL) { - panic("%s: failed allocating %s", __func__, IN6IFA_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(in6ifa_zone, Z_EXPAND, TRUE); - zone_change(in6ifa_zone, Z_CALLERACCT, FALSE); + in6ifa_zone = zone_create(IN6IFA_ZONE_NAME, in6ifa_size, ZC_ZFREE_CLEARMEM); lck_mtx_init(&in6ifa_trash_lock, ifa_mtx_grp, ifa_mtx_attr); TAILQ_INIT(&in6ifa_trash_head); } static struct in6_ifaddr * -in6_ifaddr_alloc(int how) +in6_ifaddr_alloc(zalloc_flags_t how) { struct in6_ifaddr *in6ifa; - in6ifa = (how == M_WAITOK) ? zalloc(in6ifa_zone) : - zalloc_noblock(in6ifa_zone); + in6ifa = zalloc_flags(in6ifa_zone, how | Z_ZERO); if (in6ifa != NULL) { - bzero(in6ifa, in6ifa_size); in6ifa->ia_ifa.ifa_free = in6_ifaddr_free; in6ifa->ia_ifa.ifa_debug |= IFD_ALLOC; in6ifa->ia_ifa.ifa_del_wc = &in6ifa->ia_ifa.ifa_debug; @@ -4406,7 +4478,7 @@ in6_lltable_destroy_lle(struct llentry *lle) static struct llentry * -in6_lltable_new(const struct in6_addr *addr6, u_int flags) +in6_lltable_new(const struct in6_addr *addr6, uint16_t flags) { #pragma unused(flags) struct in6_llentry *lle; @@ -4438,7 +4510,7 @@ in6_lltable_new(const struct in6_addr *addr6, u_int flags) static int in6_lltable_match_prefix(const struct sockaddr *saddr, - const struct sockaddr *smask, u_int flags, struct llentry *lle) + const struct sockaddr *smask, uint16_t flags, struct llentry *lle) { const struct in6_addr *addr, *mask, *lle_addr; @@ -4497,7 +4569,7 @@ in6_lltable_free_entry(struct lltable *llt, struct llentry *lle) static int in6_lltable_rtcheck(struct ifnet *ifp, - u_int flags, const struct sockaddr *l3addr) + uint16_t flags, const struct sockaddr *l3addr) { #pragma unused(flags) struct rtentry *rt; @@ -4590,7 +4662,7 @@ in6_lltable_delete_entry(struct lltable *llt, struct llentry *lle) } static struct llentry * -in6_lltable_alloc(struct lltable *llt, u_int flags, +in6_lltable_alloc(struct lltable *llt, uint16_t flags, const struct sockaddr *l3addr) { const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)(const void *)l3addr; @@ -4615,7 +4687,7 @@ in6_lltable_alloc(struct lltable *llt, u_int flags, log(LOG_INFO, "lla_lookup: new lle malloc failed\n"); return NULL; } - lle->la_flags = flags; + lle->la_flags = (uint16_t)flags; if ((flags & LLE_IFADDR) == LLE_IFADDR) { lltable_set_entry_addr(ifp, lle, LLADDR(SDL(ifp->if_lladdr->ifa_addr))); lle->la_flags |= LLE_STATIC; @@ -4629,7 +4701,7 @@ in6_lltable_alloc(struct lltable *llt, u_int flags, } static struct llentry * -in6_lltable_lookup(struct lltable *llt, u_int flags, +in6_lltable_lookup(struct lltable *llt, uint16_t flags, const struct sockaddr *l3addr) { const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)(const void *)l3addr; @@ -4722,9 +4794,9 @@ in6_lltable_dump_entry(struct lltable *llt, struct llentry *lle, clock_usec_t usecs; clock_get_calendar_microtime(&secs, &usecs); - ndpc.rtm.rtm_rmx.rmx_expire = lle->la_expire + + ndpc.rtm.rtm_rmx.rmx_expire = (int32_t)(lle->la_expire + lle->lle_remtime / hz + - secs - net_uptime(); + secs - net_uptime()); } ndpc.rtm.rtm_flags |= (RTF_HOST | RTF_LLDATA); if (lle->la_flags & LLE_STATIC) { diff --git a/bsd/netinet6/in6.h b/bsd/netinet6/in6.h index fa67c43a4..25471750c 100644 --- a/bsd/netinet6/in6.h +++ b/bsd/netinet6/in6.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2018 Apple Inc. All rights reserved. + * Copyright (c) 2008-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -90,14 +90,21 @@ * @(#)in.h 8.3 (Berkeley) 1/3/94 */ +#ifndef DRIVERKIT #ifndef __KAME_NETINET_IN_H_INCLUDED_ #error "do not include netinet6/in6.h directly, include netinet/in.h. " \ " see RFC2553" #endif +#endif /* DRIVERKIT */ #ifndef _NETINET6_IN6_H_ #define _NETINET6_IN6_H_ +#ifndef DRIVERKIT #include +#else +#include +#endif /* DRIVERKIT */ + #include #include @@ -187,6 +194,8 @@ struct sockaddr_in6 { #define IN6MASK0 {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} #define IN6MASK7 {{{ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} +#define IN6MASK8 {{{ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} #define IN6MASK16 {{{ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} #define IN6MASK32 {{{ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, \ @@ -199,6 +208,8 @@ struct sockaddr_in6 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} #endif +#ifndef PLATFORM_DriverKit + #ifdef KERNEL_PRIVATE extern const struct sockaddr_in6 sa6_any; @@ -269,6 +280,7 @@ extern const struct in6_addr in6mask128; #define IN6ADDR_V4MAPPED_INIT \ {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }}} +#define IN6ADDR_MULTICAST_PREFIX IN6MASK8 #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ extern const struct in6_addr in6addr_any; @@ -382,6 +394,16 @@ extern const struct in6_addr in6addr_linklocal_allv2routers; */ #define IN6_IS_ADDR_MULTICAST(a) ((a)->s6_addr[0] == 0xff) +#define IPV6_ADDR_MC_FLAGS(a) ((a)->s6_addr[1] & 0xf0) + +#define IPV6_ADDR_MC_FLAGS_TRANSIENT 0x10 +#define IPV6_ADDR_MC_FLAGS_PREFIX 0x20 +#define IPV6_ADDR_MC_FLAGS_UNICAST_BASED (IPV6_ADDR_MC_FLAGS_TRANSIENT | IPV6_ADDR_MC_FLAGS_PREFIX) + +#define IN6_IS_ADDR_UNICAST_BASED_MULTICAST(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (IPV6_ADDR_MC_FLAGS(a) == IPV6_ADDR_MC_FLAGS_UNICAST_BASED)) + /* * Unique Local IPv6 Unicast Addresses (per RFC 4193) */ @@ -398,14 +420,19 @@ extern const struct in6_addr in6addr_linklocal_allv2routers; * Multicast Scope */ #ifdef KERNEL /* refers nonstandard items */ +#define IN6_IS_ADDR_MC_UNICAST_BASED_LINKLOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (IPV6_ADDR_MC_FLAGS(a) == IPV6_ADDR_MC_FLAGS_UNICAST_BASED) && \ + (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_LINKLOCAL)) #define IN6_IS_ADDR_MC_NODELOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_NODELOCAL)) #define IN6_IS_ADDR_MC_INTFACELOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_INTFACELOCAL)) -#define IN6_IS_ADDR_MC_LINKLOCAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_LINKLOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (IPV6_ADDR_MC_FLAGS(a) != IPV6_ADDR_MC_FLAGS_UNICAST_BASED) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_LINKLOCAL)) #define IN6_IS_ADDR_MC_SITELOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ @@ -420,8 +447,9 @@ extern const struct in6_addr in6addr_linklocal_allv2routers; #define IN6_IS_ADDR_MC_NODELOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_NODELOCAL)) -#define IN6_IS_ADDR_MC_LINKLOCAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_LINKLOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (IPV6_ADDR_MC_FLAGS(a) != IPV6_ADDR_MC_FLAGS_UNICAST_BASED) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_LINKLOCAL)) #define IN6_IS_ADDR_MC_SITELOCAL(a) \ (IN6_IS_ADDR_MULTICAST(a) && \ @@ -534,9 +562,9 @@ struct route_in6 { #define IPV6_SOCKOPT_RESERVED1 3 /* reserved for future use */ #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ #define IPV6_UNICAST_HOPS 4 /* int; IP6 hops */ -#define IPV6_MULTICAST_IF 9 /* __uint8_t; set/get IP6 multicast i/f */ -#define IPV6_MULTICAST_HOPS 10 /* __uint8_t; set/get IP6 multicast hops */ -#define IPV6_MULTICAST_LOOP 11 /* __uint8_t; set/get IP6 mcast loopback */ +#define IPV6_MULTICAST_IF 9 /* u_int; set/get IP6 multicast i/f */ +#define IPV6_MULTICAST_HOPS 10 /* int; set/get IP6 multicast hops */ +#define IPV6_MULTICAST_LOOP 11 /* u_int; set/get IP6 mcast loopback */ #define IPV6_JOIN_GROUP 12 /* ip6_mreq; join a group membership */ #define IPV6_LEAVE_GROUP 13 /* ip6_mreq; leave a group membership */ @@ -1007,5 +1035,6 @@ extern struct in6_addr *inet6_rth_getaddr(const void *, int); __END_DECLS #endif /* !KERNEL */ +#endif /* PLATFORM_DriverKit */ #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ #endif /* !_NETINET6_IN6_H_ */ diff --git a/bsd/netinet6/in6_cga.c b/bsd/netinet6/in6_cga.c index 00352e1cd..11c1318c2 100644 --- a/bsd/netinet6/in6_cga.c +++ b/bsd/netinet6/in6_cga.c @@ -141,7 +141,7 @@ in6_cga_is_prepare_valid(const struct in6_cga_prepare *prepare, * from bullet 4 of the algorithm. * * @param prepare Pointer to object containing modifier, - * security level & externsion to be used. + * security level & extension to be used. * @param pubkey Public key used for IID generation * @param collisions Collission count on DAD failure * XXX We are not really re-generating IID on DAD @@ -153,7 +153,8 @@ in6_cga_is_prepare_valid(const struct in6_cga_prepare *prepare, */ static void in6_cga_generate_iid(const struct in6_cga_prepare *prepare, - const struct iovec *pubkey, u_int8_t collisions, struct in6_addr *in6) + const struct iovec *pubkey, u_int8_t collisions, + struct in6_addr *in6, struct ifnet *ifp) { SHA1_CTX ctx; u_int8_t sha1[SHA1_RESULTLEN]; @@ -168,11 +169,14 @@ in6_cga_generate_iid(const struct in6_cga_prepare *prepare, SHA1Update(&ctx, in6->s6_addr, 8); SHA1Update(&ctx, &collisions, 1); SHA1Update(&ctx, pubkey->iov_base, pubkey->iov_len); + if (ifp->network_id_len) { + SHA1Update(&ctx, &ifp->network_id, ifp->network_id_len); + } /* FUTURE: extension fields */ SHA1Final(sha1, &ctx); in6->s6_addr8[8] = - (prepare->cga_security_level << 5) | (sha1[0] & 0x1c); + (u_int8_t)((prepare->cga_security_level << 5) | (sha1[0] & 0x1c)); in6->s6_addr8[9] = sha1[1]; in6->s6_addr8[10] = sha1[2]; in6->s6_addr8[11] = sha1[3]; @@ -338,7 +342,7 @@ in6_cga_parameters_prepare(void *output, size_t max, int in6_cga_generate(struct in6_cga_prepare *prepare, u_int8_t collisions, - struct in6_addr *in6) + struct in6_addr *in6, struct ifnet *ifp) { int error; const struct iovec *pubkey; @@ -356,7 +360,7 @@ in6_cga_generate(struct in6_cga_prepare *prepare, u_int8_t collisions, pubkey = &in6_cga.cga_pubkey; if (pubkey->iov_base != NULL) { - in6_cga_generate_iid(prepare, pubkey, collisions, in6); + in6_cga_generate_iid(prepare, pubkey, collisions, in6, ifp); error = 0; } else { error = EADDRNOTAVAIL; diff --git a/bsd/netinet6/in6_cksum.c b/bsd/netinet6/in6_cksum.c index 4df0855dc..41350d560 100644 --- a/bsd/netinet6/in6_cksum.c +++ b/bsd/netinet6/in6_cksum.c @@ -177,7 +177,7 @@ in6_pseudo(const struct in6_addr *src, const struct in6_addr *dst, uint32_t x) /* fold in carry bits */ ADDCARRY(sum); - return sum; + return (uint16_t)sum; } /* diff --git a/bsd/netinet6/in6_gif.c b/bsd/netinet6/in6_gif.c index f98fc60e3..2d45c94cc 100644 --- a/bsd/netinet6/in6_gif.c +++ b/bsd/netinet6/in6_gif.c @@ -80,16 +80,12 @@ #include #endif #include -#if INET6 #include #include #include #include -#endif #include -#if INET6 #include -#endif #include @@ -139,7 +135,6 @@ in6_gif_output( break; } #endif -#if INET6 case AF_INET6: { proto = IPPROTO_IPV6; @@ -153,7 +148,6 @@ in6_gif_output( itos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; break; } -#endif default: #if DEBUG printf("in6_gif_output: warning: unknown family %d passed\n", @@ -264,7 +258,6 @@ in6_gif_input(struct mbuf **mp, int *offp, int proto) m_adj(m, *offp); switch (proto) { -#if INET case IPPROTO_IPV4: { struct ip *ip; @@ -295,8 +288,6 @@ in6_gif_input(struct mbuf **mp, int *offp, int proto) } break; } -#endif /* INET */ -#if INET6 case IPPROTO_IPV6: { af = AF_INET6; @@ -314,7 +305,6 @@ in6_gif_input(struct mbuf **mp, int *offp, int proto) } break; } -#endif default: ip6stat.ip6s_nogif++; m_freem(m); diff --git a/bsd/netinet6/in6_ifattach.c b/bsd/netinet6/in6_ifattach.c index 194e996e1..5cc7cad1d 100644 --- a/bsd/netinet6/in6_ifattach.c +++ b/bsd/netinet6/in6_ifattach.c @@ -131,7 +131,7 @@ get_rand_iid( { SHA1_CTX ctxt; u_int8_t digest[SHA1_RESULTLEN]; - int hostnlen; + size_t hostnlen; /* generate 8 bytes of pseudo-random value. */ bzero(&ctxt, sizeof(ctxt)); @@ -511,7 +511,7 @@ in6_ifattach_linklocal(struct ifnet *ifp, struct in6_aliasreq *ifra) lck_mtx_init(&pr0.ndpr_lock, ifa_mtx_grp, ifa_mtx_attr); pr0.ndpr_ifp = ifp; /* this should be 64 at this moment. */ - pr0.ndpr_plen = in6_mask2len(&ifra->ifra_prefixmask.sin6_addr, NULL); + pr0.ndpr_plen = (u_char)in6_mask2len(&ifra->ifra_prefixmask.sin6_addr, NULL); pr0.ndpr_mask = ifra->ifra_prefixmask.sin6_addr; pr0.ndpr_prefix = ifra->ifra_addr; /* apply the mask for safety. (nd6_prelist_add will apply it again) */ @@ -619,14 +619,14 @@ int in6_nigroup( struct ifnet *ifp, const char *name, - int namelen, + size_t namelen, struct in6_addr *in6) { const char *p; u_char *q; SHA1_CTX ctxt; u_int8_t digest[SHA1_RESULTLEN]; - char l; + size_t l; char n[64]; /* a single label must not exceed 63 chars */ if (!namelen || !name) { @@ -804,8 +804,7 @@ skipmcast: error = in6_ifattach_loopback(ifp); if (error != 0) { log(LOG_ERR, "%s: in6_ifattach_loopback returned %d\n", - __func__, error, ifp->if_name, - ifp->if_unit); + __func__, error); return error; } } @@ -989,8 +988,8 @@ in6_ifattach_llcgareq(struct ifnet *ifp, struct in6_cgareq *llcgasr) ifra.ifra_flags = IN6_IFF_SECURED; in6_cga_node_lock(); - if (in6_cga_generate(&llcgasr->cgar_cgaprep, 0, - &ifra.ifra_addr.sin6_addr)) { + if (in6_cga_generate(&llcgasr->cgar_cgaprep, llcgasr->cgar_collision_count, + &ifra.ifra_addr.sin6_addr, ifp)) { in6_cga_node_unlock(); return EADDRNOTAVAIL; } diff --git a/bsd/netinet6/in6_ifattach.h b/bsd/netinet6/in6_ifattach.h index e0622d043..10ab82655 100644 --- a/bsd/netinet6/in6_ifattach.h +++ b/bsd/netinet6/in6_ifattach.h @@ -69,7 +69,7 @@ extern void in6_ifdetach(struct ifnet *); extern int in6_iid_from_hw(struct ifnet *, struct in6_addr *); extern void in6_iid_mktmp(struct ifnet *, u_int8_t *, const u_int8_t *, int); extern void in6_tmpaddrtimer(void *); -extern int in6_nigroup(struct ifnet *, const char *, int, struct in6_addr *); +extern int in6_nigroup(struct ifnet *, const char *, size_t, struct in6_addr *); #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET6_IN6_IFATTACH_H_ */ diff --git a/bsd/netinet6/in6_mcast.c b/bsd/netinet6/in6_mcast.c index 4635a2b18..0f6330fa2 100644 --- a/bsd/netinet6/in6_mcast.c +++ b/bsd/netinet6/in6_mcast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2018 Apple Inc. All rights reserved. + * Copyright (c) 2010-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -107,7 +107,7 @@ im6f_graft(struct in6_mfilter *, const uint8_t, static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *); static void im6f_rollback(struct in6_mfilter *); static void im6f_reap(struct in6_mfilter *); -static int im6o_grow(struct ip6_moptions *, size_t); +static int im6o_grow(struct ip6_moptions *); static size_t im6o_match_group(const struct ip6_moptions *, const struct ifnet *, const struct sockaddr_in6 *); static struct in6_msource * @@ -192,29 +192,17 @@ static unsigned int in6m_debug = 1; /* debugging (enabled) */ #else static unsigned int in6m_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int in6m_size; /* size of zone element */ static struct zone *in6m_zone; /* zone for in6_multi */ - -#define IN6M_ZONE_MAX 64 /* maximum elements in zone */ #define IN6M_ZONE_NAME "in6_multi" /* zone name */ -static unsigned int imm_size; /* size of zone element */ -static struct zone *imm_zone; /* zone for in6_multi_mship */ - -#define IMM_ZONE_MAX 64 /* maximum elements in zone */ -#define IMM_ZONE_NAME "in6_multi_mship" /* zone name */ - -#define IP6MS_ZONE_MAX 64 /* maximum elements in zone */ -#define IP6MS_ZONE_NAME "ip6_msource" /* zone name */ +static ZONE_DECLARE(imm_zone, "in6_multi_mship", + sizeof(struct in6_multi_mship), ZC_ZFREE_CLEARMEM); -static unsigned int ip6ms_size; /* size of zone element */ -static struct zone *ip6ms_zone; /* zone for ip6_msource */ +static ZONE_DECLARE(ip6ms_zone, "ip6_msource", + sizeof(struct ip6_msource), ZC_ZFREE_CLEARMEM); -#define IN6MS_ZONE_MAX 64 /* maximum elements in zone */ -#define IN6MS_ZONE_NAME "in6_msource" /* zone name */ - -static unsigned int in6ms_size; /* size of zone element */ -static struct zone *in6ms_zone; /* zone for in6_msource */ +static ZONE_DECLARE(in6ms_zone, "in6_msource", + sizeof(struct in6_msource), ZC_ZFREE_CLEARMEM); /* Lock group and attribute for in6_multihead_lock lock */ static lck_attr_t *in6_multihead_lock_attr; @@ -224,16 +212,16 @@ static lck_grp_attr_t *in6_multihead_lock_grp_attr; static decl_lck_rw_data(, in6_multihead_lock); struct in6_multihead in6_multihead; -static struct in6_multi *in6_multi_alloc(int); +static struct in6_multi *in6_multi_alloc(zalloc_flags_t); static void in6_multi_free(struct in6_multi *); static void in6_multi_attach(struct in6_multi *); -static struct in6_multi_mship *in6_multi_mship_alloc(int); +static struct in6_multi_mship *in6_multi_mship_alloc(zalloc_flags_t); static void in6_multi_mship_free(struct in6_multi_mship *); static void in6m_trace(struct in6_multi *, int); -static struct ip6_msource *ip6ms_alloc(int); +static struct ip6_msource *ip6ms_alloc(zalloc_flags_t); static void ip6ms_free(struct ip6_msource *); -static struct in6_msource *in6ms_alloc(int); +static struct in6_msource *in6ms_alloc(zalloc_flags_t); static void in6ms_free(struct in6_msource *); /* @@ -265,7 +253,7 @@ in6m_is_ifp_detached(const struct in6_multi *inm) * with an empty source filter list. */ static __inline__ void -im6f_init(struct in6_mfilter *imf, const int st0, const int st1) +im6f_init(struct in6_mfilter *imf, const uint8_t st0, const uint8_t st1) { memset(imf, 0, sizeof(struct in6_mfilter)); RB_INIT(&imf->im6f_sources); @@ -277,7 +265,7 @@ im6f_init(struct in6_mfilter *imf, const int st0, const int st1) * Resize the ip6_moptions vector to the next power-of-two minus 1. */ static int -im6o_grow(struct ip6_moptions *imo, size_t newmax) +im6o_grow(struct ip6_moptions *imo) { struct in6_multi **nmships; struct in6_multi **omships; @@ -285,6 +273,7 @@ im6o_grow(struct ip6_moptions *imo, size_t newmax) struct in6_mfilter *omfilters; size_t idx; size_t oldmax; + size_t newmax; IM6O_LOCK_ASSERT_HELD(imo); @@ -293,9 +282,7 @@ im6o_grow(struct ip6_moptions *imo, size_t newmax) omships = imo->im6o_membership; omfilters = imo->im6o_mfilters; oldmax = imo->im6o_max_memberships; - if (newmax == 0) { - newmax = ((oldmax + 1) * 2) - 1; - } + newmax = ((oldmax + 1) * 2) - 1; if (newmax > IPV6_MAX_MEMBERSHIPS) { return ETOOMANYREFS; @@ -322,7 +309,7 @@ im6o_grow(struct ip6_moptions *imo, size_t newmax) im6f_init(&nmfilters[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); } - imo->im6o_max_memberships = newmax; + imo->im6o_max_memberships = (u_short)newmax; return 0; } @@ -559,12 +546,8 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, * The initial source filter state is INCLUDE, {} as per the RFC. * Pending state-changes per group are subject to a bounds check. */ - inm = in6_multi_alloc(M_WAITOK); - if (inm == NULL) { - in6_multihead_lock_done(); - IFMA_REMREF(ifma); - return ENOMEM; - } + inm = in6_multi_alloc(Z_WAITOK); + IN6M_LOCK(inm); inm->in6m_addr = *group; inm->in6m_ifp = ifp; @@ -656,10 +639,7 @@ in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr) if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) { return -ENOSPC; } - nims = ip6ms_alloc(M_WAITOK); - if (nims == NULL) { - return -ENOMEM; - } + nims = ip6ms_alloc(Z_WAITOK); nims->im6s_addr = find.im6s_addr; RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); ++inm->in6m_nsrc; @@ -707,10 +687,7 @@ im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, if (imf->im6f_nsrc == in6_mcast_maxsocksrc) { return ENOSPC; } - lims = in6ms_alloc(M_WAITOK); - if (lims == NULL) { - return ENOMEM; - } + lims = in6ms_alloc(Z_WAITOK); lims->im6s_addr = find.im6s_addr; lims->im6sl_st[0] = MCAST_UNDEFINED; RB_INSERT(ip6_msource_tree, &imf->im6f_sources, @@ -739,10 +716,7 @@ im6f_graft(struct in6_mfilter *imf, const uint8_t st1, { struct in6_msource *lims; - lims = in6ms_alloc(M_WAITOK); - if (lims == NULL) { - return NULL; - } + lims = in6ms_alloc(Z_WAITOK); lims->im6s_addr = psin->sin6_addr; lims->im6sl_st[0] = MCAST_UNDEFINED; lims->im6sl_st[1] = st1; @@ -919,10 +893,7 @@ in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr, if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) { return ENOSPC; } - nims = ip6ms_alloc(M_WAITOK); - if (nims == NULL) { - return ENOMEM; - } + nims = ip6ms_alloc(Z_WAITOK); nims->im6s_addr = *addr; RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); ++inm->in6m_nsrc; @@ -1220,11 +1191,7 @@ in6_joingroup(struct ifnet *ifp, struct in6_addr *mcaddr, *errorp = 0; - imm = in6_multi_mship_alloc(M_WAITOK); - if (imm == NULL) { - *errorp = ENOBUFS; - return NULL; - } + imm = in6_multi_mship_alloc(Z_WAITOK); error = in6_mc_join(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay); if (error) { @@ -1433,7 +1400,7 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) struct in6_msource *ims; struct in6_multi *inm; size_t idx; - uint16_t fmode; + uint8_t fmode; int error, doblock; struct mld_tparams mtp; @@ -1618,7 +1585,7 @@ in6p_findmoptions(struct inpcb *inp) return imo; } - imo = ip6_allocmoptions(M_WAITOK); + imo = ip6_allocmoptions(Z_WAITOK); if (imo == NULL) { return NULL; } @@ -1639,8 +1606,8 @@ in6p_findmoptions(struct inpcb *inp) } imo->im6o_multicast_ifp = NULL; - imo->im6o_multicast_hlim = ip6_defmcasthlim; - imo->im6o_multicast_loop = in6_mcast_loop; + imo->im6o_multicast_hlim = (u_char)ip6_defmcasthlim; + imo->im6o_multicast_loop = (u_char)in6_mcast_loop; imo->im6o_num_memberships = 0; imo->im6o_max_memberships = IPV6_MIN_MEMBERSHIPS; imo->im6o_membership = immp; @@ -1730,7 +1697,7 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) } if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) { - msfr.msfr_nsrcs = in6_mcast_maxsocksrc; + msfr.msfr_nsrcs = (uint32_t)in6_mcast_maxsocksrc; } (void)in6_setscope(&gsa->sin6_addr, ifp, NULL); @@ -1765,7 +1732,7 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) tss = NULL; if (IS_64BIT_PROCESS(current_proc())) { - tmp_ptr = msfr64.msfr_srcs; + tmp_ptr = (user_addr_t)msfr64.msfr_srcs; } else { tmp_ptr = CAST_USER_ADDR_T(msfr32.msfr_srcs); } @@ -1814,7 +1781,7 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) } } - msfr.msfr_nsrcs = ncsrcs; + msfr.msfr_nsrcs = (uint32_t)ncsrcs; if (IS_64BIT_PROCESS(current_proc())) { msfr64.msfr_ifindex = msfr.msfr_ifindex; msfr64.msfr_fmode = msfr.msfr_fmode; @@ -2252,7 +2219,7 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) if (is_new) { if (imo->im6o_num_memberships == imo->im6o_max_memberships) { - error = im6o_grow(imo, 0); + error = im6o_grow(imo); if (error) { goto out_imo_locked; } @@ -2840,7 +2807,7 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) * Begin state merge transaction at socket layer. */ - imf->im6f_st[1] = msfr.msfr_fmode; + imf->im6f_st[1] = (uint8_t)msfr.msfr_fmode; /* * Apply any new source filters, if present. @@ -2855,7 +2822,7 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) unsigned int i; if (IS_64BIT_PROCESS(current_proc())) { - tmp_ptr = msfr64.msfr_srcs; + tmp_ptr = (user_addr_t)msfr64.msfr_srcs; } else { tmp_ptr = CAST_USER_ADDR_T(msfr32.msfr_srcs); } @@ -2882,7 +2849,7 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) * will set it to INCLUDE. */ im6f_leave(imf); - imf->im6f_st[1] = msfr.msfr_fmode; + imf->im6f_st[1] = (uint8_t)msfr.msfr_fmode; /* * Update socket layer filters at t1, lazy-allocating @@ -3024,7 +2991,7 @@ ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) break; } IM6O_LOCK(im6o); - im6o->im6o_multicast_hlim = hlim; + im6o->im6o_multicast_hlim = (u_char)hlim; IM6O_UNLOCK(im6o); IM6O_REMREF(im6o); /* from in6p_findmoptions() */ break; @@ -3055,7 +3022,7 @@ ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) break; } IM6O_LOCK(im6o); - im6o->im6o_multicast_loop = loop; + im6o->im6o_multicast_loop = (u_char)loop; IM6O_UNLOCK(im6o); IM6O_REMREF(im6o); /* from in6p_findmoptions() */ break; @@ -3213,52 +3180,18 @@ in6_multi_init(void) in6_multihead_lock_attr); TAILQ_INIT(&in6m_trash_head); - in6m_size = (in6m_debug == 0) ? sizeof(struct in6_multi) : + vm_size_t in6m_size = (in6m_debug == 0) ? sizeof(struct in6_multi) : sizeof(struct in6_multi_dbg); - in6m_zone = zinit(in6m_size, IN6M_ZONE_MAX * in6m_size, - 0, IN6M_ZONE_NAME); - if (in6m_zone == NULL) { - panic("%s: failed allocating %s", __func__, IN6M_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(in6m_zone, Z_EXPAND, TRUE); - - imm_size = sizeof(struct in6_multi_mship); - imm_zone = zinit(imm_size, IMM_ZONE_MAX * imm_size, 0, IMM_ZONE_NAME); - if (imm_zone == NULL) { - panic("%s: failed allocating %s", __func__, IMM_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(imm_zone, Z_EXPAND, TRUE); - - ip6ms_size = sizeof(struct ip6_msource); - ip6ms_zone = zinit(ip6ms_size, IP6MS_ZONE_MAX * ip6ms_size, - 0, IP6MS_ZONE_NAME); - if (ip6ms_zone == NULL) { - panic("%s: failed allocating %s", __func__, IP6MS_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(ip6ms_zone, Z_EXPAND, TRUE); - - in6ms_size = sizeof(struct in6_msource); - in6ms_zone = zinit(in6ms_size, IN6MS_ZONE_MAX * in6ms_size, - 0, IN6MS_ZONE_NAME); - if (in6ms_zone == NULL) { - panic("%s: failed allocating %s", __func__, IN6MS_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(in6ms_zone, Z_EXPAND, TRUE); + in6m_zone = zone_create(IN6M_ZONE_NAME, in6m_size, ZC_ZFREE_CLEARMEM); } static struct in6_multi * -in6_multi_alloc(int how) +in6_multi_alloc(zalloc_flags_t how) { struct in6_multi *in6m; - in6m = (how == M_WAITOK) ? zalloc(in6m_zone) : - zalloc_noblock(in6m_zone); + in6m = zalloc_flags(in6m_zone, how | Z_ZERO); if (in6m != NULL) { - bzero(in6m, in6m_size); lck_mtx_init(&in6m->in6m_lock, in6_multihead_lock_grp, in6_multihead_lock_attr); in6m->in6m_debug |= IFD_ALLOC; @@ -3507,16 +3440,9 @@ in6m_trace(struct in6_multi *in6m, int refhold) } static struct in6_multi_mship * -in6_multi_mship_alloc(int how) +in6_multi_mship_alloc(zalloc_flags_t how) { - struct in6_multi_mship *imm; - - imm = (how == M_WAITOK) ? zalloc(imm_zone) : zalloc_noblock(imm_zone); - if (imm != NULL) { - bzero(imm, imm_size); - } - - return imm; + return zalloc_flags(imm_zone, how | Z_ZERO); } static void @@ -3557,17 +3483,9 @@ in6_multihead_lock_done(void) } static struct ip6_msource * -ip6ms_alloc(int how) +ip6ms_alloc(zalloc_flags_t how) { - struct ip6_msource *i6ms; - - i6ms = (how == M_WAITOK) ? zalloc(ip6ms_zone) : - zalloc_noblock(ip6ms_zone); - if (i6ms != NULL) { - bzero(i6ms, ip6ms_size); - } - - return i6ms; + return zalloc_flags(ip6ms_zone, how | Z_ZERO); } static void @@ -3577,17 +3495,9 @@ ip6ms_free(struct ip6_msource *i6ms) } static struct in6_msource * -in6ms_alloc(int how) +in6ms_alloc(zalloc_flags_t how) { - struct in6_msource *in6ms; - - in6ms = (how == M_WAITOK) ? zalloc(in6ms_zone) : - zalloc_noblock(in6ms_zone); - if (in6ms != NULL) { - bzero(in6ms, in6ms_size); - } - - return in6ms; + return zalloc_flags(in6ms_zone, how | Z_ZERO); } static void diff --git a/bsd/netinet6/in6_pcb.c b/bsd/netinet6/in6_pcb.c index 1af6aa583..bd106c031 100644 --- a/bsd/netinet6/in6_pcb.c +++ b/bsd/netinet6/in6_pcb.c @@ -130,13 +130,9 @@ #if IPSEC #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include #endif /* IPSEC */ @@ -195,10 +191,10 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) int wild = 0, reuseport = (so->so_options & SO_REUSEPORT); struct ifnet *outif = NULL; struct sockaddr_in6 sin6; -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX int error; kauth_cred_t cred; -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ if (TAILQ_EMPTY(&in6_ifaddrhead)) { /* XXX broken! */ return EADDRNOTAVAIL; @@ -304,7 +300,7 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) struct inpcb *t; uid_t u; -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX if (ntohs(lport) < IPV6PORT_RESERVED && !IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) && !(inp->inp_flags2 & INP2_EXTERNAL_PORT)) { @@ -318,12 +314,12 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) return EACCES; } } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ /* * Check wether the process is allowed to bind to a restricted port */ if (!current_task_can_use_restricted_in_port(lport, - so->so_proto->pr_protocol, PORT_FLAGS_BSD)) { + (uint8_t)SOCK_PROTO(so), PORT_FLAGS_BSD)) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); return EADDRINUSE; @@ -936,7 +932,7 @@ in6_pcbnotify(struct inpcbinfo *pcbinfo, struct sockaddr *dst, u_int fport_arg, struct inpcbhead *head = pcbinfo->ipi_listhead; struct inpcb *inp, *ninp; struct sockaddr_in6 sa6_src, *sa6_dst; - u_short fport = fport_arg, lport = lport_arg; + uint16_t fport = (uint16_t)fport_arg, lport = (uint16_t)lport_arg; u_int32_t flowinfo; int errno; @@ -991,9 +987,11 @@ in6_pcbnotify(struct inpcbinfo *pcbinfo, struct sockaddr *dst, u_int fport_arg, * sockets disconnected. * XXX: should we avoid to notify the value to TCP sockets? */ - if (cmd == PRC_MSGSIZE) { + if (cmd == PRC_MSGSIZE && cmdarg != NULL) { + socket_lock(inp->inp_socket, 1); ip6_notify_pmtu(inp, (struct sockaddr_in6 *)(void *)dst, (u_int32_t *)cmdarg); + socket_unlock(inp->inp_socket, 1); } /* @@ -1042,7 +1040,7 @@ in6_pcblookup_local(struct inpcbinfo *pcbinfo, struct in6_addr *laddr, { struct inpcb *inp; int matchwild = 3, wildcard; - u_short lport = lport_arg; + uint16_t lport = (uint16_t)lport_arg; struct inpcbporthead *porthash; struct inpcb *match = NULL; struct inpcbport *phd; @@ -1182,7 +1180,7 @@ in6_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, { struct inpcbhead *head; struct inpcb *inp; - u_short fport = fport_arg, lport = lport_arg; + uint16_t fport = (uint16_t)fport_arg, lport = (uint16_t)lport_arg; int found; *uid = UID_MAX; @@ -1295,7 +1293,7 @@ in6_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, { struct inpcbhead *head; struct inpcb *inp; - u_short fport = fport_arg, lport = lport_arg; + uint16_t fport = (uint16_t)fport_arg, lport = (uint16_t)lport_arg; lck_rw_lock_shared(pcbinfo->ipi_lock); diff --git a/bsd/netinet6/in6_pcb.h b/bsd/netinet6/in6_pcb.h index dcf8a8f30..73444845a 100644 --- a/bsd/netinet6/in6_pcb.h +++ b/bsd/netinet6/in6_pcb.h @@ -118,7 +118,7 @@ extern int in6_getsockaddr(struct socket *, struct sockaddr **); extern int in6_getsockaddr_s(struct socket *, struct sockaddr_in6 *); extern int in6_mapped_sockaddr(struct socket *so, struct sockaddr **nam); extern int in6_mapped_peeraddr(struct socket *so, struct sockaddr **nam); -extern int in6_selecthlim(struct in6pcb *, struct ifnet *); +extern uint8_t in6_selecthlim(struct in6pcb *, struct ifnet *); extern int in6_pcbsetport(struct in6_addr *, struct inpcb *, struct proc *, int); extern void init_sin6(struct sockaddr_in6 *sin6, struct mbuf *m); diff --git a/bsd/netinet6/in6_proto.c b/bsd/netinet6/in6_proto.c index 7a1f23c44..6ee55d379 100644 --- a/bsd/netinet6/in6_proto.c +++ b/bsd/netinet6/in6_proto.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2019 Apple Inc. All rights reserved. + * Copyright (c) 2008-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -130,19 +130,13 @@ #if IPSEC #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #if IPSEC_ESP #include -#if INET6 #include #endif -#endif #endif /*IPSEC*/ #include @@ -461,7 +455,7 @@ int ip6_v6only = 0; /* Mapped addresses off by default - Radar 3347 int ip6_neighborgcthresh = 1024; /* Threshold # of NDP entries for GC */ int ip6_maxifprefixes = 16; /* Max acceptable prefixes via RA per IF */ -int ip6_maxifdefrouters = 16; /* Max acceptable def routers via RA */ +int ip6_maxifdefrouters = 64; /* Max acceptable default or RTI routers via RA */ int ip6_maxdynroutes = 1024; /* Max # of routes created via redirect */ int ip6_only_allow_rfc4193_prefix = 0; /* Only allow RFC4193 style Unique Local IPv6 Unicast prefixes */ @@ -526,20 +520,24 @@ sysctl_ip6_temppltime SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg2) int error = 0; - int old; + int value = 0; error = SYSCTL_OUT(req, arg1, sizeof(int)); if (error || !req->newptr) { return error; } - old = ip6_temp_preferred_lifetime; - error = SYSCTL_IN(req, arg1, sizeof(int)); - if (ip6_temp_preferred_lifetime > ND6_MAX_LIFETIME || - ip6_temp_preferred_lifetime < - ip6_desync_factor + ip6_temp_regen_advance) { - ip6_temp_preferred_lifetime = old; + + error = SYSCTL_IN(req, &value, sizeof(value)); + if (error) { + return error; + } + + if (value > ND6_MAX_LIFETIME || + value < ip6_desync_factor + ip6_temp_regen_advance) { return EINVAL; } + + ip6_temp_preferred_lifetime = value; return error; } @@ -548,22 +546,51 @@ sysctl_ip6_tempvltime SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg2) int error = 0; - int old; + int value = 0; error = SYSCTL_OUT(req, arg1, sizeof(int)); if (error || !req->newptr) { return error; } - old = ip6_temp_valid_lifetime; - error = SYSCTL_IN(req, arg1, sizeof(int)); - if (ip6_temp_valid_lifetime > ND6_MAX_LIFETIME || - ip6_temp_valid_lifetime < ip6_temp_preferred_lifetime) { - ip6_temp_valid_lifetime = old; + + error = SYSCTL_IN(req, &value, sizeof(value)); + if (error) { + return error; + } + + if (value > ND6_MAX_LIFETIME || + value < ip6_temp_preferred_lifetime) { return EINVAL; } + + ip6_temp_valid_lifetime = value; return error; } +static int +sysctl_ip6_cga_conflict_retries SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg2) + int error = 0; + int value = 0; + + error = SYSCTL_OUT(req, arg1, sizeof(int)); + if (error || !req->newptr) { + return error; + } + + error = SYSCTL_IN(req, &value, sizeof(value)); + if (error) { + return error; + } + if (value > IPV6_CGA_CONFLICT_RETRIES_MAX || value < 0) { + return EINVAL; + } + + ip6_cga_conflict_retries = value; + return 0; +} + static int ip6_getstat SYSCTL_HANDLER_ARGS { @@ -648,6 +675,55 @@ SYSCTL_INT(_net_inet6_ip6, OID_AUTO, SYSCTL_INT(_net_inet6_ip6, OID_AUTO, clat_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &clat_debug, 0, ""); +SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, + cga_conflict_retries, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ip6_cga_conflict_retries, 0, sysctl_ip6_cga_conflict_retries, "IU", ""); + +/* + * One single sysctl to set v6 stack profile for IPv6 compliance testing. + * A lot of compliance test suites are not aware of other enhancements in IPv6 + * protocol and expect some arguably obsolete behavior. + */ +int v6_compliance_profile = 0; +static int +sysctl_set_v6_compliance_profile SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg2) + int changed, error; + int value = *(int *) arg1; + + error = sysctl_io_number(req, value, sizeof(value), &value, &changed); + if (error || !changed) { + return error; + } + + if (value != 0 && value != 1) { + return ERANGE; + } + + if (value == 1) { + ip6_use_tempaddr = 0; + dad_enhanced = 0; + icmp6_rediraccept = 1; + nd6_optimistic_dad = 0; + nd6_process_rti = ND6_PROCESS_RTI_ENABLE; + } else { + ip6_use_tempaddr = IP6_USE_TMPADDR_DEFAULT; + dad_enhanced = ND6_DAD_ENHANCED_DEFAULT; + icmp6_rediraccept = ICMP6_REDIRACCEPT_DEFAULT; + nd6_optimistic_dad = ND6_OPTIMISTIC_DAD_DEFAULT; + nd6_process_rti = ND6_PROCESS_RTI_DEFAULT; + } + + v6_compliance_profile = value; + return 0; +} + +SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, compliance_profile, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &v6_compliance_profile, 0, sysctl_set_v6_compliance_profile, + "I", "set IPv6 compliance profile"); + /* net.inet6.icmp6 */ SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_REDIRACCEPT, rediraccept, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp6_rediraccept, 0, ""); diff --git a/bsd/netinet6/in6_src.c b/bsd/netinet6/in6_src.c index 4af6fed4a..df079e308 100644 --- a/bsd/netinet6/in6_src.c +++ b/bsd/netinet6/in6_src.c @@ -105,6 +105,7 @@ #include #include #include +#include #include #include @@ -154,6 +155,9 @@ SYSCTL_INT(_net_inet6_ip6, OID_AUTO, select_src_strong_end, struct in6_addrpolicy defaultaddrpolicy; int ip6_prefer_tempaddr = 1; + +int ip6_cga_conflict_retries = IPV6_CGA_CONFLICT_RETRIES_DEFAULT; + #ifdef ENABLE_ADDRSEL extern lck_mtx_t *addrsel_mutex; #define ADDRSEL_LOCK() lck_mtx_lock(addrsel_mutex) @@ -162,6 +166,8 @@ extern lck_mtx_t *addrsel_mutex; #define ADDRSEL_LOCK() #define ADDRSEL_UNLOCK() #endif +extern int udp_use_randomport; +extern int tcp_use_randomport; static int selectroute(struct sockaddr_in6 *, struct sockaddr_in6 *, struct ip6_pktopts *, struct ip6_moptions *, struct in6_ifaddr **, @@ -929,7 +935,7 @@ selectroute(struct sockaddr_in6 *srcsock, struct sockaddr_in6 *dstsock, ifp = ifp0 = ((ifscope <= if_index) ? ifindex2ifnet[ifscope] : NULL); ifnet_head_done(); - if (norouteok || retrt == NULL || IN6_IS_ADDR_MULTICAST(dst)) { + if (norouteok || retrt == NULL || IN6_IS_ADDR_MC_LINKLOCAL(dst)) { /* * We do not have to check or get the route for * multicast. If the caller didn't ask/care for @@ -951,9 +957,10 @@ selectroute(struct sockaddr_in6 *srcsock, struct sockaddr_in6 *dstsock, */ if (IN6_IS_ADDR_MULTICAST(dst) && mopts != NULL) { IM6O_LOCK(mopts); - if ((ifp = ifp0 = mopts->im6o_multicast_ifp) != NULL) { + ifp = ifp0 = mopts->im6o_multicast_ifp; + if (ifp != NULL && IN6_IS_ADDR_MC_LINKLOCAL(dst)) { IM6O_UNLOCK(mopts); - goto done; /* we do not need a route for multicast. */ + goto done; /* we don't need a route for link-local multicast */ } IM6O_UNLOCK(mopts); } @@ -1188,16 +1195,13 @@ getroute: if (ro->ro_rt == NULL) { struct sockaddr_in6 *sa6; - if (ro->ro_rt != NULL) { - RT_UNLOCK(ro->ro_rt); - } /* No route yet, so try to acquire one */ bzero(&ro->ro_dst, sizeof(struct sockaddr_in6)); sa6 = (struct sockaddr_in6 *)&ro->ro_dst; sa6->sin6_family = AF_INET6; sa6->sin6_len = sizeof(struct sockaddr_in6); sa6->sin6_addr = *dst; - if (IN6_IS_ADDR_MULTICAST(dst)) { + if (IN6_IS_ADDR_MC_LINKLOCAL(dst)) { ro->ro_rt = rtalloc1_scoped( &((struct route *)ro)->ro_dst, 0, 0, ifscope); } else { @@ -1504,29 +1508,29 @@ in6_selectroute(struct sockaddr_in6 *srcsock, struct sockaddr_in6 *dstsock, /* * Default hop limit selection. The precedence is as follows: - * 1. Hoplimit value specified via ioctl. + * 1. Hoplimit value specified via socket option. * 2. (If the outgoing interface is detected) the current * hop limit of the interface specified by router advertisement. * 3. The system default hoplimit. */ -int +uint8_t in6_selecthlim(struct in6pcb *in6p, struct ifnet *ifp) { if (in6p && in6p->in6p_hops >= 0) { - return in6p->in6p_hops; + return (uint8_t)in6p->in6p_hops; } else if (NULL != ifp) { - u_int8_t chlim; + uint8_t chlim; struct nd_ifinfo *ndi = ND_IFINFO(ifp); if (ndi && ndi->initialized) { /* access chlim without lock, for performance */ chlim = ndi->chlim; } else { - chlim = ip6_defhlim; + chlim = (uint8_t)ip6_defhlim; } return chlim; } - return ip6_defhlim; + return (uint8_t)ip6_defhlim; } /* @@ -1538,10 +1542,10 @@ in6_pcbsetport(struct in6_addr *laddr, struct inpcb *inp, struct proc *p, int locked) { struct socket *so = inp->inp_socket; - u_int16_t lport = 0, first, last, *lastport; + uint16_t lport = 0, first, last, *lastport, rand_port; int count, error = 0, wild = 0; boolean_t counting_down; - bool found; + bool found, randomport; struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; kauth_cred_t cred; #pragma unused(laddr) @@ -1573,9 +1577,13 @@ in6_pcbsetport(struct in6_addr *laddr, struct inpcb *inp, struct proc *p, wild = INPLOOKUP_WILDCARD; } + randomport = (so->so_flags & SOF_BINDRANDOMPORT) > 0 || + (so->so_type == SOCK_STREAM ? tcp_use_randomport : + udp_use_randomport) > 0; + if (inp->inp_flags & INP_HIGHPORT) { - first = ipport_hifirstauto; /* sysctl */ - last = ipport_hilastauto; + first = (uint16_t)ipport_hifirstauto; /* sysctl */ + last = (uint16_t)ipport_hilastauto; lastport = &pcbinfo->ipi_lasthi; } else if (inp->inp_flags & INP_LOWPORT) { cred = kauth_cred_proc_ref(p); @@ -1587,14 +1595,18 @@ in6_pcbsetport(struct in6_addr *laddr, struct inpcb *inp, struct proc *p, } return error; } - first = ipport_lowfirstauto; /* 1023 */ - last = ipport_lowlastauto; /* 600 */ + first = (uint16_t)ipport_lowfirstauto; /* 1023 */ + last = (uint16_t)ipport_lowlastauto; /* 600 */ lastport = &pcbinfo->ipi_lastlow; } else { - first = ipport_firstauto; /* sysctl */ - last = ipport_lastauto; + first = (uint16_t)ipport_firstauto; /* sysctl */ + last = (uint16_t)ipport_lastauto; lastport = &pcbinfo->ipi_lastport; } + + if (first == last) { + randomport = false; + } /* * Simple check to ensure all ports are not used up causing * a deadlock here. @@ -1602,10 +1614,18 @@ in6_pcbsetport(struct in6_addr *laddr, struct inpcb *inp, struct proc *p, found = false; if (first > last) { /* counting down */ + if (randomport) { + read_frandom(&rand_port, sizeof(rand_port)); + *lastport = first - (rand_port % (first - last)); + } count = first - last; counting_down = TRUE; } else { /* counting up */ + if (randomport) { + read_frandom(&rand_port, sizeof(rand_port)); + *lastport = first + (rand_port % (first - last)); + } count = last - first; counting_down = FALSE; } @@ -2146,7 +2166,7 @@ in6_embedscope(struct in6_addr *in6, const struct sockaddr_in6 *sin6, ifp = ifindex2ifnet[pi->ipi6_ifindex]; ifnet_head_done(); } - in6->s6_addr16[1] = htons(pi->ipi6_ifindex); + in6->s6_addr16[1] = htons((uint16_t)pi->ipi6_ifindex); } else if (in6p != NULL && IN6_IS_ADDR_MULTICAST(in6) && in6p->in6p_moptions != NULL && im6o_multicast_ifp != NULL) { ifp = im6o_multicast_ifp; diff --git a/bsd/netinet6/in6_var.h b/bsd/netinet6/in6_var.h index bd1a424ae..26f61ad7c 100644 --- a/bsd/netinet6/in6_var.h +++ b/bsd/netinet6/in6_var.h @@ -165,6 +165,9 @@ struct in6_ifaddr { TAILQ_ENTRY(in6_ifaddr) ia6_hash; /* hash bucket entry */ int ia6_flags; + /* cga collision count */ + uint8_t ia6_cga_collision_count; + struct in6_addrlifetime_i ia6_lifetime; /* * the creation time of this address, which is @@ -392,50 +395,21 @@ struct in6_cga_nodecfg { struct in6_cga_prepare cga_prepare; }; -/* - * XXX in6_llstartreq will be removed once - * configd adopts the more generically named - * in6_cgareq structure. - */ -struct in6_llstartreq { - char llsr_name[IFNAMSIZ]; - int llsr_flags; - struct in6_cga_prepare llsr_cgaprep; - struct in6_addrlifetime llsr_lifetime; -}; - struct in6_cgareq { char cgar_name[IFNAMSIZ]; int cgar_flags; struct in6_cga_prepare cgar_cgaprep; struct in6_addrlifetime cgar_lifetime; + uint8_t cgar_collision_count; }; #ifdef BSD_KERNEL_PRIVATE -/* - * XXX Corresponding versions of in6_llstartreq - * will be removed after the new in6_cgareq is - * adopted by configd - */ -struct in6_llstartreq_32 { - char llsr_name[IFNAMSIZ]; - int llsr_flags; - struct in6_cga_prepare llsr_cgaprep; - struct in6_addrlifetime_32 llsr_lifetime; -}; - -struct in6_llstartreq_64 { - char llsr_name[IFNAMSIZ]; - int llsr_flags; - struct in6_cga_prepare llsr_cgaprep; - struct in6_addrlifetime_64 llsr_lifetime; -}; - struct in6_cgareq_32 { char cgar_name[IFNAMSIZ]; int cgar_flags; struct in6_cga_prepare cgar_cgaprep; struct in6_addrlifetime_32 cgar_lifetime; + uint8_t cgar_collision_count; }; struct in6_cgareq_64 { @@ -443,6 +417,7 @@ struct in6_cgareq_64 { int cgar_flags; struct in6_cga_prepare cgar_cgaprep; struct in6_addrlifetime_64 cgar_lifetime; + uint8_t cgar_collision_count; }; #endif /* !BSD_KERNEL_PRIVATE */ @@ -740,10 +715,37 @@ void in6_post_msg(struct ifnet *, u_int32_t, struct in6_ifaddr *, uint8_t *mac); #endif /* BSD_KERNEL_PRIVATE */ /* - * enable/disable IPv6 router mode on interface. + * SIOCSETROUTERMODE_IN6 + * Set the IPv6 router mode on an interface. + * + * IPV6_ROUTER_MODE_DISABLED + * - disable IPv6 router mode if it is enabled + * - if the previous mode was IPV6_ROUTER_MODE_EXCUSIVE, + * scrubs all IPv6 auto-configured addresses + * + * IPV6_ROUTER_MODE_EXCLUSIVE + * - act exclusively as an IPv6 router on the interface + * - disables accepting external Router Advertisements + * - scrubs all IPv6 auto-configured addresses + * - disables optimistic dad + * - disables ND6 prefix proxy, if enabled + * - used by the internet sharing/personal hotspot feature + * + * IPV6_ROUTER_MODE_HYBRID + * - act as both an IPv6 router and IPv6 client on the interface + * - does not modify whether to accept Router Advertisements + * - does not scrub any addresses + * - used when acting as the gateway/router for an otherwise isolated + * network whose existence is likely advertised via a + * a Route Information Option in a Router Advertisement */ +#define IPV6_ROUTER_MODE_DISABLED 0 +#define IPV6_ROUTER_MODE_EXCLUSIVE 1 +#define IPV6_ROUTER_MODE_HYBRID 2 #define SIOCSETROUTERMODE_IN6 _IOWR('i', 136, struct in6_ifreq) +#define SIOCGETROUTERMODE_IN6 _IOWR('i', 137, struct in6_ifreq) + /* * start secure link-local interface addresses */ @@ -753,8 +755,17 @@ void in6_post_msg(struct ifnet *, u_int32_t, struct in6_ifaddr *, uint8_t *mac); #define SIOCLL_CGASTART_64 _IOW('i', 160, struct in6_cgareq_64) #endif +/* + * get/set the CGA parameters + */ #define SIOCGIFCGAPREP_IN6 _IOWR('i', 187, struct in6_cgareq) #define SIOCSIFCGAPREP_IN6 _IOWR('i', 188, struct in6_cgareq) +#ifdef BSD_KERNEL_PRIVATE +#define SIOCGIFCGAPREP_IN6_32 _IOWR('i', 187, struct in6_cgareq_32) +#define SIOCGIFCGAPREP_IN6_64 _IOWR('i', 187, struct in6_cgareq_64) +#define SIOCSIFCGAPREP_IN6_32 _IOWR('i', 188, struct in6_cgareq_32) +#define SIOCSIFCGAPREP_IN6_64 _IOWR('i', 188, struct in6_cgareq_64) +#endif #define SIOCCLAT46_START _IOWR('i', 189, struct in6_ifreq) #define SIOCCLAT46_STOP _IOWR('i', 190, struct in6_ifreq) @@ -1197,12 +1208,14 @@ extern int in6_cga_stop(void); extern ssize_t in6_cga_parameters_prepare(void *, size_t, const struct in6_addr *, u_int8_t, const struct in6_cga_modifier *); extern int in6_cga_generate(struct in6_cga_prepare *, u_int8_t, - struct in6_addr *); + struct in6_addr *, struct ifnet *); extern int in6_getconninfo(struct socket *, sae_connid_t, uint32_t *, uint32_t *, int32_t *, user_addr_t, socklen_t *, user_addr_t, socklen_t *, uint32_t *, user_addr_t, uint32_t *); extern void in6_ip6_to_sockaddr(const struct in6_addr *ip6, u_int16_t port, struct sockaddr_in6 *sin6, u_int32_t maxlen); +extern void in6_cgareq_copy_from_user(const void *, int, + struct in6_cgareq *cgareq); #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET6_IN6_VAR_H_ */ diff --git a/bsd/netinet6/ip6_forward.c b/bsd/netinet6/ip6_forward.c index 57c44b85c..02cac6958 100644 --- a/bsd/netinet6/ip6_forward.c +++ b/bsd/netinet6/ip6_forward.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2018 Apple Inc. All rights reserved. + * Copyright (c) 2009-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -90,9 +90,7 @@ #if IPSEC #include -#if INET6 #include -#endif #include extern int ipsec_bypass; #endif /* IPSEC */ @@ -100,7 +98,6 @@ extern int ipsec_bypass; #include #if DUMMYNET -#include #include #endif /* DUMMYNET */ @@ -393,7 +390,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, break; default: printf("ip6_output (ipsec): error code %d\n", error); - /* fall through */ + OS_FALLTHROUGH; case ENOENT: /* don't show these error codes to the user */ break; diff --git a/bsd/netinet6/ip6_fw.c b/bsd/netinet6/ip6_fw.c deleted file mode 100644 index 7e2151c4b..000000000 --- a/bsd/netinet6/ip6_fw.c +++ /dev/null @@ -1,1515 +0,0 @@ -/* - * Copyright (c) 2003-2012 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -/* $FreeBSD: src/sys/netinet6/ip6_fw.c,v 1.2.2.9 2002/04/28 05:40:27 suz Exp $ */ -/* $KAME: ip6_fw.c,v 1.21 2001/01/24 01:25:32 itojun Exp $ */ - -/* - * Copyright (C) 1998, 1999, 2000 and 2001 WIDE Project. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the project nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * Copyright (c) 1993 Daniel Boulet - * Copyright (c) 1994 Ugen J.S.Antsilevich - * Copyright (c) 1996 Alex Nash - * - * Redistribution and use in source forms, with and without modification, - * are permitted provided that this entire comment appears intact. - * - * Redistribution in binary form may occur without any restrictions. - * Obviously, it would be nice if you gave credit where credit is due - * but requiring it would be too onerous. - * - * This software is provided ``AS IS'' without any warranties of any kind. - */ - -/* - * Implement IPv6 packet firewall - */ - - -#ifdef IP6DIVERT -#error "NOT SUPPORTED IPV6 DIVERT" -#endif -#ifdef IP6FW_DIVERT_RESTART -#error "NOT SUPPORTED IPV6 DIVERT" -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -MALLOC_DEFINE(M_IP6FW, "Ip6Fw/Ip6Acct", "Ip6Fw/Ip6Acct chain's"); - -static int fw6_debug = 0; -#ifdef IPV6FIREWALL_VERBOSE -static int fw6_verbose = 1; -#else -static int fw6_verbose = 0; -#endif -#ifdef IPV6FIREWALL_VERBOSE_LIMIT -static int fw6_verbose_limit = IPV6FIREWALL_VERBOSE_LIMIT; -#else -static int fw6_verbose_limit = 0; -#endif - -LIST_HEAD(ip6_fw_head, ip6_fw_chain) ip6_fw_chain; - -static void ip6fw_kev_post_msg(u_int32_t ); - -#ifdef SYSCTL_NODE -static int ip6fw_sysctl SYSCTL_HANDLER_ARGS; - -SYSCTL_DECL(_net_inet6_ip6); -SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Firewall"); -SYSCTL_PROC(_net_inet6_ip6_fw, OID_AUTO, enable, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &ip6_fw_enable, 0, ip6fw_sysctl, "I", "Enable ip6fw"); -SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &fw6_debug, 0, ""); -SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, verbose, CTLFLAG_RW | CTLFLAG_LOCKED, &fw6_verbose, 0, ""); -SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, verbose_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &fw6_verbose_limit, 0, ""); - -static int -ip6fw_sysctl SYSCTL_HANDLER_ARGS -{ -#pragma unused(arg1, arg2) - int error; - - error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); - if (error || !req->newptr) { - return error; - } - - ip6fw_kev_post_msg(KEV_IP6FW_ENABLE); - - return error; -} - -#endif - -#define dprintf(a) do { \ - if (fw6_debug) \ - printf a; \ - } while (0) -#define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0 - -static int add_entry6 __P((struct ip6_fw_head *chainptr, struct ip6_fw *frwl)); -static int del_entry6 __P((struct ip6_fw_head *chainptr, u_short number)); -static int zero_entry6 __P((struct ip6_fw *frwl)); -static struct ip6_fw *check_ip6fw_struct __P((struct ip6_fw *m)); -static int ip6opts_match __P((struct ip6_hdr **ip6, struct ip6_fw *f, - struct mbuf **m, - int *off, int *nxt, u_short *offset)); -static int port_match6 __P((u_short *portptr, int nports, u_short port, - int range_flag)); -static int tcp6flg_match __P((struct tcphdr *tcp6, struct ip6_fw *f)); -static int icmp6type_match __P((struct icmp6_hdr * icmp, struct ip6_fw * f)); -static void ip6fw_report __P((struct ip6_fw *f, struct ip6_hdr *ip6, - struct ifnet *rif, struct ifnet *oif, int off, int nxt)); - -static int ip6_fw_chk __P((struct ip6_hdr **pip6, - struct ifnet *oif, u_int16_t *cookie, struct mbuf **m)); -static int ip6_fw_ctl __P((struct sockopt *)); -static void cp_to_user_64( struct ip6_fw_64 *userrule_64, struct ip6_fw *rule); -static void cp_from_user_64( struct ip6_fw_64 *userrule_64, struct ip6_fw *rule); -static void cp_to_user_32( struct ip6_fw_32 *userrule_32, struct ip6_fw *rule); -static void cp_from_user_32( struct ip6_fw_32 *userrule_32, struct ip6_fw *rule); - -static char err_prefix[] = "ip6_fw_ctl:"; - -/* - * Returns 1 if the port is matched by the vector, 0 otherwise - */ -static -__inline int -port_match6(u_short *portptr, int nports, u_short port, int range_flag) -{ - if (!nports) { - return 1; - } - if (range_flag) { - if (portptr[0] <= port && port <= portptr[1]) { - return 1; - } - nports -= 2; - portptr += 2; - } - while (nports-- > 0) { - if (*portptr++ == port) { - return 1; - } - } - return 0; -} - -static int -tcp6flg_match(struct tcphdr *tcp6, struct ip6_fw *f) -{ - u_char flg_set, flg_clr; - - /* - * If an established connection is required, reject packets that - * have only SYN of RST|ACK|SYN set. Otherwise, fall through to - * other flag requirements. - */ - if ((f->fw_ipflg & IPV6_FW_IF_TCPEST) && - ((tcp6->th_flags & (IPV6_FW_TCPF_RST | IPV6_FW_TCPF_ACK | - IPV6_FW_TCPF_SYN)) == IPV6_FW_TCPF_SYN)) { - return 0; - } - - flg_set = tcp6->th_flags & f->fw_tcpf; - flg_clr = tcp6->th_flags & f->fw_tcpnf; - - if (flg_set != f->fw_tcpf) { - return 0; - } - if (flg_clr) { - return 0; - } - - return 1; -} - -static int -icmp6type_match(struct icmp6_hdr *icmp6, struct ip6_fw *f) -{ - int type; - - if (!(f->fw_flg & IPV6_FW_F_ICMPBIT)) { - return 1; - } - - type = icmp6->icmp6_type; - - /* check for matching type in the bitmap */ - if (type < IPV6_FW_ICMPTYPES_DIM * sizeof(unsigned) * 8 && - (f->fw_icmp6types[type / (sizeof(unsigned) * 8)] & - (1U << (type % (8 * sizeof(unsigned)))))) { - return 1; - } - - return 0; /* no match */ -} - -static int -is_icmp6_query(struct ip6_hdr *ip6, int off) -{ - const struct icmp6_hdr *icmp6; - int icmp6_type; - - icmp6 = (struct icmp6_hdr *)((caddr_t)ip6 + off); - icmp6_type = icmp6->icmp6_type; - - if (icmp6_type == ICMP6_ECHO_REQUEST || - icmp6_type == ICMP6_MEMBERSHIP_QUERY || - icmp6_type == ICMP6_WRUREQUEST || - icmp6_type == ICMP6_FQDN_QUERY || - icmp6_type == ICMP6_NI_QUERY) { - return 1; - } - - return 0; -} - -static int -ip6opts_match(struct ip6_hdr **pip6, struct ip6_fw *f, struct mbuf **m, - int *off, int *nxt, u_short *offset) -{ - int len; - struct ip6_hdr *ip6 = *pip6; - struct ip6_ext *ip6e; - u_char opts, nopts, nopts_sve; - - opts = f->fw_ip6opt; - nopts = nopts_sve = f->fw_ip6nopt; - - *nxt = ip6->ip6_nxt; - *off = sizeof(struct ip6_hdr); - len = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr); - while (*off < len) { - ip6e = (struct ip6_ext *)((caddr_t) ip6 + *off); - if ((*m)->m_len < *off + sizeof(*ip6e)) { - goto opts_check; /* XXX */ - } - switch (*nxt) { - case IPPROTO_FRAGMENT: - if ((*m)->m_len >= *off + sizeof(struct ip6_frag)) { - struct ip6_frag *ip6f; - - ip6f = (struct ip6_frag *) ((caddr_t)ip6 + *off); - *offset = ip6f->ip6f_offlg & IP6F_OFF_MASK; - } - opts &= ~IPV6_FW_IP6OPT_FRAG; - nopts &= ~IPV6_FW_IP6OPT_FRAG; - *off += sizeof(struct ip6_frag); - break; - case IPPROTO_AH: - opts &= ~IPV6_FW_IP6OPT_AH; - nopts &= ~IPV6_FW_IP6OPT_AH; - *off += (ip6e->ip6e_len + 2) << 2; - break; - default: - switch (*nxt) { - case IPPROTO_HOPOPTS: - opts &= ~IPV6_FW_IP6OPT_HOPOPT; - nopts &= ~IPV6_FW_IP6OPT_HOPOPT; - break; - case IPPROTO_ROUTING: - opts &= ~IPV6_FW_IP6OPT_ROUTE; - nopts &= ~IPV6_FW_IP6OPT_ROUTE; - break; - case IPPROTO_ESP: - opts &= ~IPV6_FW_IP6OPT_ESP; - nopts &= ~IPV6_FW_IP6OPT_ESP; - break; - case IPPROTO_NONE: - opts &= ~IPV6_FW_IP6OPT_NONXT; - nopts &= ~IPV6_FW_IP6OPT_NONXT; - goto opts_check; - break; - case IPPROTO_DSTOPTS: - opts &= ~IPV6_FW_IP6OPT_OPTS; - nopts &= ~IPV6_FW_IP6OPT_OPTS; - break; - default: - goto opts_check; - break; - } - *off += (ip6e->ip6e_len + 1) << 3; - break; - } - *nxt = ip6e->ip6e_nxt; - } -opts_check: - if (f->fw_ip6opt == f->fw_ip6nopt) { /* XXX */ - return 1; - } - - if (opts == 0 && nopts == nopts_sve) { - return 1; - } else { - return 0; - } -} - -static -__inline int -iface_match(struct ifnet *ifp, union ip6_fw_if *ifu, int byname) -{ - /* Check by name or by IP address */ - if (byname) { - /* Check unit number (-1 is wildcard) */ - if (ifu->fu_via_if.unit != -1 - && ifp->if_unit != ifu->fu_via_if.unit) { - return 0; - } - /* Check name */ - if (strncmp(ifp->if_name, ifu->fu_via_if.name, IP6FW_IFNLEN)) { - return 0; - } - return 1; - } else if (!IN6_IS_ADDR_UNSPECIFIED(&ifu->fu_via_ip6)) { /* Zero == wildcard */ - struct ifaddr *ia; - - ifnet_lock_shared(ifp); - for (ia = ifp->if_addrlist.tqh_first; ia; - ia = ia->ifa_list.tqe_next) { - IFA_LOCK_SPIN(ia); - if (ia->ifa_addr->sa_family != AF_INET6) { - IFA_UNLOCK(ia); - continue; - } - if (!IN6_ARE_ADDR_EQUAL(&ifu->fu_via_ip6, - &(((struct sockaddr_in6 *) - (ia->ifa_addr))->sin6_addr))) { - IFA_UNLOCK(ia); - continue; - } - IFA_UNLOCK(ia); - ifnet_lock_done(ifp); - return 1; - } - ifnet_lock_done(ifp); - return 0; - } - return 1; -} - -static void -ip6fw_report(struct ip6_fw *f, struct ip6_hdr *ip6, - struct ifnet *rif, struct ifnet *oif, int off, int nxt) -{ - static int counter; - struct tcphdr *const tcp6 = (struct tcphdr *) ((caddr_t) ip6 + off); - struct udphdr *const udp = (struct udphdr *) ((caddr_t) ip6 + off); - struct icmp6_hdr *const icmp6 = (struct icmp6_hdr *) ((caddr_t) ip6 + off); - int count; - const char *action; - char action2[32], proto[102], name[18]; - int len; - - count = f ? f->fw_pcnt : ++counter; - if (fw6_verbose_limit != 0 && count > fw6_verbose_limit) { - return; - } - - /* Print command name */ - snprintf(SNPARGS(name, 0), "ip6fw: %d", f ? f->fw_number : -1); - - action = action2; - if (!f) { - action = "Refuse"; - } else { - switch (f->fw_flg & IPV6_FW_F_COMMAND) { - case IPV6_FW_F_DENY: - action = "Deny"; - break; - case IPV6_FW_F_REJECT: - if (f->fw_reject_code == IPV6_FW_REJECT_RST) { - action = "Reset"; - } else { - action = "Unreach"; - } - break; - case IPV6_FW_F_ACCEPT: - action = "Accept"; - break; - case IPV6_FW_F_COUNT: - action = "Count"; - break; - case IPV6_FW_F_DIVERT: - snprintf(SNPARGS(action2, 0), "Divert %d", - f->fw_divert_port); - break; - case IPV6_FW_F_TEE: - snprintf(SNPARGS(action2, 0), "Tee %d", - f->fw_divert_port); - break; - case IPV6_FW_F_SKIPTO: - snprintf(SNPARGS(action2, 0), "SkipTo %d", - f->fw_skipto_rule); - break; - default: - action = "UNKNOWN"; - break; - } - } - - switch (nxt) { - case IPPROTO_TCP: - len = scnprintf(SNPARGS(proto, 0), "TCP [%s]", - ip6_sprintf(&ip6->ip6_src)); - if (off > 0) { - len += scnprintf(SNPARGS(proto, len), ":%d ", - ntohs(tcp6->th_sport)); - } else { - len += scnprintf(SNPARGS(proto, len), " "); - } - len += scnprintf(SNPARGS(proto, len), "[%s]", - ip6_sprintf(&ip6->ip6_dst)); - if (off > 0) { - scnprintf(SNPARGS(proto, len), ":%d", - ntohs(tcp6->th_dport)); - } - break; - case IPPROTO_UDP: - len = scnprintf(SNPARGS(proto, 0), "UDP [%s]", - ip6_sprintf(&ip6->ip6_src)); - if (off > 0) { - len += scnprintf(SNPARGS(proto, len), ":%d ", - ntohs(udp->uh_sport)); - } else { - len += scnprintf(SNPARGS(proto, len), " "); - } - len += scnprintf(SNPARGS(proto, len), "[%s]", - ip6_sprintf(&ip6->ip6_dst)); - if (off > 0) { - scnprintf(SNPARGS(proto, len), ":%d", - ntohs(udp->uh_dport)); - } - break; - case IPPROTO_ICMPV6: - if (off > 0) { - len = scnprintf(SNPARGS(proto, 0), "IPV6-ICMP:%u.%u ", - icmp6->icmp6_type, icmp6->icmp6_code); - } else { - len = scnprintf(SNPARGS(proto, 0), "IPV6-ICMP "); - } - len += scnprintf(SNPARGS(proto, len), "[%s]", - ip6_sprintf(&ip6->ip6_src)); - scnprintf(SNPARGS(proto, len), " [%s]", - ip6_sprintf(&ip6->ip6_dst)); - break; - default: - len = scnprintf(SNPARGS(proto, 0), "P:%d [%s]", nxt, - ip6_sprintf(&ip6->ip6_src)); - scnprintf(SNPARGS(proto, len), " [%s]", - ip6_sprintf(&ip6->ip6_dst)); - break; - } - - if (oif) { - log(LOG_AUTHPRIV | LOG_INFO, "%s %s %s out via %s\n", - name, action, proto, if_name(oif)); - } else if (rif) { - log(LOG_AUTHPRIV | LOG_INFO, "%s %s %s in via %s\n", - name, action, proto, if_name(rif)); - } else { - log(LOG_AUTHPRIV | LOG_INFO, "%s %s %s", - name, action, proto); - } - if (fw6_verbose_limit != 0 && count == fw6_verbose_limit) { - log(LOG_AUTHPRIV | LOG_INFO, "ip6fw: limit reached on entry %d\n", - f ? f->fw_number : -1); - } -} - -/* - * Parameters: - * - * ip Pointer to packet header (struct ip6_hdr *) - * hlen Packet header length - * oif Outgoing interface, or NULL if packet is incoming - * #ifndef IP6FW_DIVERT_RESTART - * *cookie Ignore all divert/tee rules to this port (if non-zero) - * #else - * *cookie Skip up to the first rule past this rule number; - * #endif - * *m The packet; we set to NULL when/if we nuke it. - * - * Return value: - * - * 0 The packet is to be accepted and routed normally OR - * the packet was denied/rejected and has been dropped; - * in the latter case, *m is equal to NULL upon return. - * port Divert the packet to port. - */ - -static int -ip6_fw_chk(struct ip6_hdr **pip6, - struct ifnet *oif, u_int16_t *cookie, struct mbuf **m) -{ - struct ip6_fw_chain *chain; - struct ip6_fw *rule = NULL; - struct ip6_hdr *ip6 = *pip6; - struct ifnet *const rif = ((*m)->m_flags & M_LOOP) ? lo_ifp : (*m)->m_pkthdr.rcvif; - u_short offset = 0; - int off = sizeof(struct ip6_hdr), nxt = ip6->ip6_nxt; - u_short src_port, dst_port; -#ifdef IP6FW_DIVERT_RESTART - u_int16_t skipto = *cookie; -#else - u_int16_t ignport = ntohs(*cookie); -#endif - struct timeval timenow; - struct tcp_respond_args tra; - - getmicrotime(&timenow); - - *cookie = 0; - /* - * Go down the chain, looking for enlightment - * #ifdef IP6FW_DIVERT_RESTART - * If we've been asked to start at a given rule immediatly, do so. - * #endif - */ - chain = LIST_FIRST(&ip6_fw_chain); -#ifdef IP6FW_DIVERT_RESTART - if (skipto) { - if (skipto >= 65535) { - goto dropit; - } - while (chain && (chain->rule->fw_number <= skipto)) { - chain = LIST_NEXT(chain, chain); - } - if (!chain) { - goto dropit; - } - } -#endif /* IP6FW_DIVERT_RESTART */ - for (; chain; chain = LIST_NEXT(chain, chain)) { - struct ip6_fw *const f = chain->rule; - - if (oif) { - /* Check direction outbound */ - if (!(f->fw_flg & IPV6_FW_F_OUT)) { - continue; - } - } else { - /* Check direction inbound */ - if (!(f->fw_flg & IPV6_FW_F_IN)) { - continue; - } - } - -#define IN6_ARE_ADDR_MASKEQUAL(x, y, z) (\ - (((x)->s6_addr32[0] & (y)->s6_addr32[0]) == (z)->s6_addr32[0]) && \ - (((x)->s6_addr32[1] & (y)->s6_addr32[1]) == (z)->s6_addr32[1]) && \ - (((x)->s6_addr32[2] & (y)->s6_addr32[2]) == (z)->s6_addr32[2]) && \ - (((x)->s6_addr32[3] & (y)->s6_addr32[3]) == (z)->s6_addr32[3])) - - /* If src-addr doesn't match, not this rule. */ - if (((f->fw_flg & IPV6_FW_F_INVSRC) != 0) ^ - (!IN6_ARE_ADDR_MASKEQUAL(&ip6->ip6_src, &f->fw_smsk, &f->fw_src))) { - continue; - } - - /* If dest-addr doesn't match, not this rule. */ - if (((f->fw_flg & IPV6_FW_F_INVDST) != 0) ^ - (!IN6_ARE_ADDR_MASKEQUAL(&ip6->ip6_dst, &f->fw_dmsk, &f->fw_dst))) { - continue; - } - -#undef IN6_ARE_ADDR_MASKEQUAL - /* Interface check */ - if ((f->fw_flg & IF6_FW_F_VIAHACK) == IF6_FW_F_VIAHACK) { - struct ifnet *const iface = oif ? oif : rif; - - /* Backwards compatibility hack for "via" */ - if (!iface || !iface_match(iface, - &f->fw_in_if, f->fw_flg & IPV6_FW_F_OIFNAME)) { - continue; - } - } else { - /* Check receive interface */ - if ((f->fw_flg & IPV6_FW_F_IIFACE) - && (!rif || !iface_match(rif, - &f->fw_in_if, f->fw_flg & IPV6_FW_F_IIFNAME))) { - continue; - } - /* Check outgoing interface */ - if ((f->fw_flg & IPV6_FW_F_OIFACE) - && (!oif || !iface_match(oif, - &f->fw_out_if, f->fw_flg & IPV6_FW_F_OIFNAME))) { - continue; - } - } - - /* Check IP options */ - if (!ip6opts_match(&ip6, f, m, &off, &nxt, &offset)) { - continue; - } - - /* Fragments */ - if ((f->fw_flg & IPV6_FW_F_FRAG) && !offset) { - continue; - } - - /* Check protocol; if wildcard, match */ - if (f->fw_prot == IPPROTO_IPV6) { - goto got_match; - } - - /* If different, don't match */ - if (nxt != f->fw_prot) { - continue; - } - -#define PULLUP_TO(len) do { \ - if ((*m)->m_len < (len) \ - && (*m = m_pullup(*m, (len))) == 0) { \ - goto dropit; \ - } \ - *pip6 = ip6 = mtod(*m, struct ip6_hdr *); \ - } while (0) - - /* Protocol specific checks */ - switch (nxt) { - case IPPROTO_TCP: - { - struct tcphdr *tcp6; - - if (offset == 1) { /* cf. RFC 1858 */ - PULLUP_TO(off + 4); /* XXX ? */ - goto bogusfrag; - } - if (offset != 0) { - /* - * TCP flags and ports aren't available in this - * packet -- if this rule specified either one, - * we consider the rule a non-match. - */ - if (f->fw_nports != 0 || - f->fw_tcpf != f->fw_tcpnf) { - continue; - } - - break; - } - PULLUP_TO(off + 14); - tcp6 = (struct tcphdr *) ((caddr_t)ip6 + off); - if (((f->fw_tcpf != f->fw_tcpnf) || - (f->fw_ipflg & IPV6_FW_IF_TCPEST)) && - !tcp6flg_match(tcp6, f)) { - continue; - } - src_port = ntohs(tcp6->th_sport); - dst_port = ntohs(tcp6->th_dport); - goto check_ports; - } - - case IPPROTO_UDP: - { - struct udphdr *udp; - - if (offset != 0) { - /* - * Port specification is unavailable -- if this - * rule specifies a port, we consider the rule - * a non-match. - */ - if (f->fw_nports != 0) { - continue; - } - - break; - } - PULLUP_TO(off + 4); - udp = (struct udphdr *) ((caddr_t)ip6 + off); - src_port = ntohs(udp->uh_sport); - dst_port = ntohs(udp->uh_dport); -check_ports: - if (!port_match6(&f->fw_pts[0], - IPV6_FW_GETNSRCP(f), src_port, - f->fw_flg & IPV6_FW_F_SRNG)) { - continue; - } - if (!port_match6(&f->fw_pts[IPV6_FW_GETNSRCP(f)], - IPV6_FW_GETNDSTP(f), dst_port, - f->fw_flg & IPV6_FW_F_DRNG)) { - continue; - } - break; - } - - case IPPROTO_ICMPV6: - { - struct icmp6_hdr *icmp; - - if (offset != 0) { /* Type isn't valid */ - break; - } - PULLUP_TO(off + 2); - icmp = (struct icmp6_hdr *) ((caddr_t)ip6 + off); - if (!icmp6type_match(icmp, f)) { - continue; - } - break; - } -#undef PULLUP_TO - -bogusfrag: - if (fw6_verbose) { - ip6fw_report(NULL, ip6, rif, oif, off, nxt); - } - goto dropit; - } - -got_match: -#ifndef IP6FW_DIVERT_RESTART - /* Ignore divert/tee rule if socket port is "ignport" */ - switch (f->fw_flg & IPV6_FW_F_COMMAND) { - case IPV6_FW_F_DIVERT: - case IPV6_FW_F_TEE: - if (f->fw_divert_port == ignport) { - continue; /* ignore this rule */ - } - break; - } - -#endif /* IP6FW_DIVERT_RESTART */ - /* Update statistics */ - f->fw_pcnt += 1; - f->fw_bcnt += ntohs(ip6->ip6_plen); - f->timestamp = timenow.tv_sec; - - /* Log to console if desired */ - if ((f->fw_flg & IPV6_FW_F_PRN) && fw6_verbose) { - ip6fw_report(f, ip6, rif, oif, off, nxt); - } - - /* Take appropriate action */ - switch (f->fw_flg & IPV6_FW_F_COMMAND) { - case IPV6_FW_F_ACCEPT: - return 0; - case IPV6_FW_F_COUNT: - continue; - case IPV6_FW_F_DIVERT: -#ifdef IP6FW_DIVERT_RESTART - *cookie = f->fw_number; -#else - *cookie = htons(f->fw_divert_port); -#endif /* IP6FW_DIVERT_RESTART */ - return f->fw_divert_port; - case IPV6_FW_F_TEE: - /* - * XXX someday tee packet here, but beware that you - * can't use m_copym() or m_copypacket() because - * the divert input routine modifies the mbuf - * (and these routines only increment reference - * counts in the case of mbuf clusters), so need - * to write custom routine. - */ - continue; - case IPV6_FW_F_SKIPTO: -#ifdef DIAGNOSTIC - while (chain->chain.le_next - && chain->chain.le_next->rule->fw_number - < f->fw_skipto_rule) -#else - while (chain->chain.le_next->rule->fw_number - < f->fw_skipto_rule) -#endif - { chain = chain->chain.le_next;} - continue; - } - - /* Deny/reject this packet using this rule */ - rule = f; - break; - } - -#ifdef DIAGNOSTIC - /* Rule 65535 should always be there and should always match */ - if (!chain) { - panic("ip6_fw: chain"); - } -#endif - - /* - * At this point, we're going to drop the packet. - * Send a reject notice if all of the following are true: - * - * - The packet matched a reject rule - * - The packet is not an ICMP packet, or is an ICMP query packet - * - The packet is not a multicast or broadcast packet - */ - if ((rule->fw_flg & IPV6_FW_F_COMMAND) == IPV6_FW_F_REJECT - && (nxt != IPPROTO_ICMPV6 || is_icmp6_query(ip6, off)) - && !((*m)->m_flags & (M_BCAST | M_MCAST)) - && !IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { - switch (rule->fw_reject_code) { - case IPV6_FW_REJECT_RST: - { - struct tcphdr *const tcp = - (struct tcphdr *) ((caddr_t)ip6 + off); - struct { - struct ip6_hdr ip6; - struct tcphdr th; - } ti; - tcp_seq ack, seq; - int flags; - - if (offset != 0 || (tcp->th_flags & TH_RST)) { - break; - } - - ti.ip6 = *ip6; - ti.th = *tcp; - ti.th.th_seq = ntohl(ti.th.th_seq); - ti.th.th_ack = ntohl(ti.th.th_ack); - ti.ip6.ip6_nxt = IPPROTO_TCP; - if (ti.th.th_flags & TH_ACK) { - ack = 0; - seq = ti.th.th_ack; - flags = TH_RST; - } else { - ack = ti.th.th_seq; - if (((*m)->m_flags & M_PKTHDR) != 0) { - ack += (*m)->m_pkthdr.len - off - - (ti.th.th_off << 2); - } else if (ip6->ip6_plen) { - ack += ntohs(ip6->ip6_plen) + sizeof(*ip6) - - off - (ti.th.th_off << 2); - } else { - m_freem(*m); - *m = 0; - break; - } - seq = 0; - flags = TH_RST | TH_ACK; - } - bcopy(&ti, ip6, sizeof(ti)); - bzero(&tra, sizeof(tra)); - tra.ifscope = IFSCOPE_NONE; - tra.awdl_unrestricted = 1; - tcp_respond(NULL, ip6, (struct tcphdr *)(ip6 + 1), - *m, ack, seq, flags, &tra); - *m = NULL; - break; - } - default: /* Send an ICMP unreachable using code */ - if (oif) { - (*m)->m_pkthdr.rcvif = oif; - } - icmp6_error(*m, ICMP6_DST_UNREACH, - rule->fw_reject_code, 0); - *m = NULL; - break; - } - } - -dropit: - /* - * Finally, drop the packet. - */ - if (*m) { - m_freem(*m); - *m = NULL; - } - return 0; -} - -static int -add_entry6(struct ip6_fw_head *chainptr, struct ip6_fw *frwl) -{ - struct ip6_fw *ftmp = 0; - struct ip6_fw_chain *fwc = 0, *fcp, *fcpl = 0; - u_short nbr = 0; - - fwc = _MALLOC(sizeof *fwc, M_IP6FW, M_WAITOK); - ftmp = _MALLOC(sizeof *ftmp, M_IP6FW, M_WAITOK); - if (!fwc || !ftmp) { - dprintf(("%s malloc said no\n", err_prefix)); - if (fwc) { - FREE(fwc, M_IP6FW); - } - if (ftmp) { - FREE(ftmp, M_IP6FW); - } - return ENOSPC; - } - - bcopy(frwl, ftmp, sizeof(struct ip6_fw)); - ftmp->fw_in_if.fu_via_if.name[IP6FW_IFNLEN - 1] = '\0'; - ftmp->fw_pcnt = 0L; - ftmp->fw_bcnt = 0L; - fwc->rule = ftmp; - - if (!chainptr->lh_first) { - LIST_INSERT_HEAD(chainptr, fwc, chain); - return 0; - } else if (ftmp->fw_number == (u_short) - 1) { - if (fwc) { - FREE(fwc, M_IP6FW); - } - if (ftmp) { - FREE(ftmp, M_IP6FW); - } - dprintf(("%s bad rule number\n", err_prefix)); - return EINVAL; - } - - /* If entry number is 0, find highest numbered rule and add 100 */ - if (ftmp->fw_number == 0) { - for (fcp = chainptr->lh_first; fcp; fcp = fcp->chain.le_next) { - if (fcp->rule->fw_number != (u_short) - 1) { - nbr = fcp->rule->fw_number; - } else { - break; - } - } - if (nbr < (u_short) - 1 - 100) { - nbr += 100; - } - ftmp->fw_number = nbr; - } - - /* Got a valid number; now insert it, keeping the list ordered */ - for (fcp = chainptr->lh_first; fcp; fcp = fcp->chain.le_next) { - if (fcp->rule->fw_number > ftmp->fw_number) { - if (fcpl) { - LIST_INSERT_AFTER(fcpl, fwc, chain); - } else { - LIST_INSERT_HEAD(chainptr, fwc, chain); - } - break; - } else { - fcpl = fcp; - } - } - - bcopy(ftmp, frwl, sizeof(struct ip6_fw)); - return 0; -} - -static int -del_entry6(struct ip6_fw_head *chainptr, u_short number) -{ - struct ip6_fw_chain *fcp; - - fcp = chainptr->lh_first; - if (number != (u_short) - 1) { - for (; fcp; fcp = fcp->chain.le_next) { - if (fcp->rule->fw_number == number) { - LIST_REMOVE(fcp, chain); - FREE(fcp->rule, M_IP6FW); - FREE(fcp, M_IP6FW); - return 0; - } - } - } - - return EINVAL; -} - -static int -zero_entry6(struct ip6_fw *frwl) -{ - struct ip6_fw_chain *fcp; - - /* - * It's possible to insert multiple chain entries with the - * same number, so we don't stop after finding the first - * match if zeroing a specific entry. - */ - for (fcp = ip6_fw_chain.lh_first; fcp; fcp = fcp->chain.le_next) { - if (!frwl || frwl->fw_number == 0 || frwl->fw_number == fcp->rule->fw_number) { - fcp->rule->fw_bcnt = fcp->rule->fw_pcnt = 0; - fcp->rule->timestamp = 0; - } - } - - if (fw6_verbose) { - if (frwl) { - log(LOG_AUTHPRIV | LOG_NOTICE, - "ip6fw: Entry %d cleared.\n", frwl->fw_number); - } else { - log(LOG_AUTHPRIV | LOG_NOTICE, - "ip6fw: Accounting cleared.\n"); - } - } - - return 0; -} - -static struct ip6_fw * -check_ip6fw_struct(struct ip6_fw *frwl) -{ - /* Check for invalid flag bits */ - if ((frwl->fw_flg & ~IPV6_FW_F_MASK) != 0) { - dprintf(("%s undefined flag bits set (flags=%x)\n", - err_prefix, frwl->fw_flg)); - return NULL; - } - /* Must apply to incoming or outgoing (or both) */ - if (!(frwl->fw_flg & (IPV6_FW_F_IN | IPV6_FW_F_OUT))) { - dprintf(("%s neither in nor out\n", err_prefix)); - return NULL; - } - /* Empty interface name is no good */ - if (((frwl->fw_flg & IPV6_FW_F_IIFNAME) - && !*frwl->fw_in_if.fu_via_if.name) - || ((frwl->fw_flg & IPV6_FW_F_OIFNAME) - && !*frwl->fw_out_if.fu_via_if.name)) { - dprintf(("%s empty interface name\n", err_prefix)); - return NULL; - } - /* Sanity check interface matching */ - if ((frwl->fw_flg & IF6_FW_F_VIAHACK) == IF6_FW_F_VIAHACK) { - ; /* allow "via" backwards compatibility */ - } else if ((frwl->fw_flg & IPV6_FW_F_IN) - && (frwl->fw_flg & IPV6_FW_F_OIFACE)) { - dprintf(("%s outgoing interface check on incoming\n", - err_prefix)); - return NULL; - } - /* Sanity check port ranges */ - if ((frwl->fw_flg & IPV6_FW_F_SRNG) && IPV6_FW_GETNSRCP(frwl) < 2) { - dprintf(("%s src range set but n_src_p=%d\n", - err_prefix, IPV6_FW_GETNSRCP(frwl))); - return NULL; - } - if ((frwl->fw_flg & IPV6_FW_F_DRNG) && IPV6_FW_GETNDSTP(frwl) < 2) { - dprintf(("%s dst range set but n_dst_p=%d\n", - err_prefix, IPV6_FW_GETNDSTP(frwl))); - return NULL; - } - if (IPV6_FW_GETNSRCP(frwl) + IPV6_FW_GETNDSTP(frwl) > IPV6_FW_MAX_PORTS) { - dprintf(("%s too many ports (%d+%d)\n", - err_prefix, IPV6_FW_GETNSRCP(frwl), IPV6_FW_GETNDSTP(frwl))); - return NULL; - } - /* - * Protocols other than TCP/UDP don't use port range - */ - if ((frwl->fw_prot != IPPROTO_TCP) && - (frwl->fw_prot != IPPROTO_UDP) && - (IPV6_FW_GETNSRCP(frwl) || IPV6_FW_GETNDSTP(frwl))) { - dprintf(("%s port(s) specified for non TCP/UDP rule\n", - err_prefix)); - return NULL; - } - - /* - * Rather than modify the entry to make such entries work, - * we reject this rule and require user level utilities - * to enforce whatever policy they deem appropriate. - */ - if ((frwl->fw_src.s6_addr32[0] & (~frwl->fw_smsk.s6_addr32[0])) || - (frwl->fw_src.s6_addr32[1] & (~frwl->fw_smsk.s6_addr32[1])) || - (frwl->fw_src.s6_addr32[2] & (~frwl->fw_smsk.s6_addr32[2])) || - (frwl->fw_src.s6_addr32[3] & (~frwl->fw_smsk.s6_addr32[3])) || - (frwl->fw_dst.s6_addr32[0] & (~frwl->fw_dmsk.s6_addr32[0])) || - (frwl->fw_dst.s6_addr32[1] & (~frwl->fw_dmsk.s6_addr32[1])) || - (frwl->fw_dst.s6_addr32[2] & (~frwl->fw_dmsk.s6_addr32[2])) || - (frwl->fw_dst.s6_addr32[3] & (~frwl->fw_dmsk.s6_addr32[3]))) { - dprintf(("%s rule never matches\n", err_prefix)); - return NULL; - } - - if ((frwl->fw_flg & IPV6_FW_F_FRAG) && - (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { - if (frwl->fw_nports) { - dprintf(("%s cannot mix 'frag' and ports\n", err_prefix)); - return NULL; - } - if (frwl->fw_prot == IPPROTO_TCP && - frwl->fw_tcpf != frwl->fw_tcpnf) { - dprintf(("%s cannot mix 'frag' with TCP flags\n", err_prefix)); - return NULL; - } - } - - /* Check command specific stuff */ - switch (frwl->fw_flg & IPV6_FW_F_COMMAND) { - case IPV6_FW_F_REJECT: - if (frwl->fw_reject_code >= 0x100 - && !(frwl->fw_prot == IPPROTO_TCP - && frwl->fw_reject_code == IPV6_FW_REJECT_RST)) { - dprintf(("%s unknown reject code\n", err_prefix)); - return NULL; - } - break; - case IPV6_FW_F_DIVERT: /* Diverting to port zero is invalid */ - case IPV6_FW_F_TEE: - if (frwl->fw_divert_port == 0) { - dprintf(("%s can't divert to port 0\n", err_prefix)); - return NULL; - } - break; - case IPV6_FW_F_DENY: - case IPV6_FW_F_ACCEPT: - case IPV6_FW_F_COUNT: - case IPV6_FW_F_SKIPTO: - break; - default: - dprintf(("%s invalid command\n", err_prefix)); - return NULL; - } - - return frwl; -} - -static void -ip6fw_kev_post_msg(u_int32_t event_code) -{ - struct kev_msg ev_msg; - - bzero(&ev_msg, sizeof(struct kev_msg)); - - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_FIREWALL_CLASS; - ev_msg.kev_subclass = KEV_IP6FW_SUBCLASS; - ev_msg.event_code = event_code; - - kev_post_msg(&ev_msg); -} - - -static void -cp_to_user_64( struct ip6_fw_64 *userrule_64, struct ip6_fw *rule) -{ - userrule_64->version = rule->version; - userrule_64->context = CAST_USER_ADDR_T(rule->context); - userrule_64->fw_pcnt = rule->fw_pcnt; - userrule_64->fw_bcnt = rule->fw_bcnt; - userrule_64->fw_src = rule->fw_src; - userrule_64->fw_dst = rule->fw_dst; - userrule_64->fw_smsk = rule->fw_smsk; - userrule_64->fw_dmsk = rule->fw_dmsk; - userrule_64->fw_number = rule->fw_number; - userrule_64->fw_flg = rule->fw_flg; - userrule_64->fw_ipflg = rule->fw_ipflg; - bcopy( rule->fw_pts, userrule_64->fw_pts, IPV6_FW_MAX_PORTS); - userrule_64->fw_ip6opt = rule->fw_ip6opt; - userrule_64->fw_ip6nopt = rule->fw_ip6nopt; - userrule_64->fw_tcpf = rule->fw_tcpf; - userrule_64->fw_tcpnf = rule->fw_tcpnf; - bcopy( rule->fw_icmp6types, userrule_64->fw_icmp6types, sizeof(userrule_64->fw_icmp6types)); - userrule_64->fw_in_if = rule->fw_in_if; - userrule_64->fw_out_if = rule->fw_out_if; - userrule_64->timestamp = rule->timestamp; - userrule_64->fw_un.fu_divert_port = rule->fw_un.fu_divert_port; - userrule_64->fw_prot = rule->fw_prot; - userrule_64->fw_nports = rule->fw_nports; -} - - -static void -cp_from_user_64( struct ip6_fw_64 *userrule_64, struct ip6_fw *rule) -{ - rule->version = userrule_64->version; - rule->context = CAST_DOWN(void *, userrule_64->context); - rule->fw_pcnt = userrule_64->fw_pcnt; - rule->fw_bcnt = userrule_64->fw_bcnt; - rule->fw_src = userrule_64->fw_src; - rule->fw_dst = userrule_64->fw_dst; - rule->fw_smsk = userrule_64->fw_smsk; - rule->fw_dmsk = userrule_64->fw_dmsk; - rule->fw_number = userrule_64->fw_number; - rule->fw_flg = userrule_64->fw_flg; - rule->fw_ipflg = userrule_64->fw_ipflg; - bcopy( userrule_64->fw_pts, rule->fw_pts, IPV6_FW_MAX_PORTS); - rule->fw_ip6opt = userrule_64->fw_ip6opt; - rule->fw_ip6nopt = userrule_64->fw_ip6nopt; - rule->fw_tcpf = userrule_64->fw_tcpf; - rule->fw_tcpnf = userrule_64->fw_tcpnf; - bcopy( userrule_64->fw_icmp6types, rule->fw_icmp6types, sizeof(userrule_64->fw_icmp6types)); - rule->fw_in_if = userrule_64->fw_in_if; - rule->fw_out_if = userrule_64->fw_out_if; - rule->timestamp = CAST_DOWN( long, userrule_64->timestamp); - rule->fw_un.fu_divert_port = userrule_64->fw_un.fu_divert_port; - rule->fw_prot = userrule_64->fw_prot; - rule->fw_nports = userrule_64->fw_nports; -} - - -static void -cp_to_user_32( struct ip6_fw_32 *userrule_32, struct ip6_fw *rule) -{ - userrule_32->version = rule->version; - userrule_32->context = CAST_DOWN_EXPLICIT( user32_addr_t, rule->context); - userrule_32->fw_pcnt = rule->fw_pcnt; - userrule_32->fw_bcnt = rule->fw_bcnt; - userrule_32->fw_src = rule->fw_src; - userrule_32->fw_dst = rule->fw_dst; - userrule_32->fw_smsk = rule->fw_smsk; - userrule_32->fw_dmsk = rule->fw_dmsk; - userrule_32->fw_number = rule->fw_number; - userrule_32->fw_flg = rule->fw_flg; - userrule_32->fw_ipflg = rule->fw_ipflg; - bcopy( rule->fw_pts, userrule_32->fw_pts, IPV6_FW_MAX_PORTS); - userrule_32->fw_ip6opt = rule->fw_ip6opt; - userrule_32->fw_ip6nopt = rule->fw_ip6nopt; - userrule_32->fw_tcpf = rule->fw_tcpf; - userrule_32->fw_tcpnf = rule->fw_tcpnf; - bcopy( rule->fw_icmp6types, userrule_32->fw_icmp6types, sizeof(rule->fw_icmp6types)); - userrule_32->fw_in_if = rule->fw_in_if; - userrule_32->fw_out_if = rule->fw_out_if; - userrule_32->timestamp = rule->timestamp; - userrule_32->fw_un.fu_divert_port = rule->fw_un.fu_divert_port; - userrule_32->fw_prot = rule->fw_prot; - userrule_32->fw_nports = rule->fw_nports; -} - - -static void -cp_from_user_32( struct ip6_fw_32 *userrule_32, struct ip6_fw *rule) -{ - rule->version = userrule_32->version; - rule->context = CAST_DOWN(void *, userrule_32->context); - rule->fw_pcnt = userrule_32->fw_pcnt; - rule->fw_bcnt = userrule_32->fw_bcnt; - rule->fw_src = userrule_32->fw_src; - rule->fw_dst = userrule_32->fw_dst; - rule->fw_smsk = userrule_32->fw_smsk; - rule->fw_dmsk = userrule_32->fw_dmsk; - rule->fw_number = userrule_32->fw_number; - rule->fw_flg = userrule_32->fw_flg; - rule->fw_ipflg = userrule_32->fw_ipflg; - bcopy( userrule_32->fw_pts, rule->fw_pts, IPV6_FW_MAX_PORTS); - rule->fw_ip6opt = userrule_32->fw_ip6opt; - rule->fw_ip6nopt = userrule_32->fw_ip6nopt; - rule->fw_tcpf = userrule_32->fw_tcpf; - rule->fw_tcpnf = userrule_32->fw_tcpnf; - bcopy( userrule_32->fw_icmp6types, rule->fw_icmp6types, sizeof(userrule_32->fw_icmp6types)); - rule->fw_in_if = userrule_32->fw_in_if; - rule->fw_out_if = userrule_32->fw_out_if; - rule->timestamp = CAST_DOWN(long, userrule_32->timestamp); - rule->fw_un.fu_divert_port = userrule_32->fw_un.fu_divert_port; - rule->fw_prot = userrule_32->fw_prot; - rule->fw_nports = userrule_32->fw_nports; -} - -static int -ip6_fw_ctl(struct sockopt *sopt) -{ - int error = 0; - int valsize; - struct ip6_fw rule; - int is64user = 0; - size_t userrulesize; - - if (securelevel >= 3 && - (sopt->sopt_dir != SOPT_GET || sopt->sopt_name != IPV6_FW_GET)) { - return EPERM; - } - - if (proc_is64bit(sopt->sopt_p)) { - is64user = 1; - userrulesize = sizeof(struct ip6_fw_64); - } else { - userrulesize = sizeof(struct ip6_fw_32); - } - - /* We ALWAYS expect the client to pass in a rule structure so that we can - * check the version of the API that they are using. In the case of a - * IPV6_FW_GET operation, the first rule of the output buffer passed to us - * must have the version set. */ - if (!sopt->sopt_val || sopt->sopt_valsize < userrulesize) { - return EINVAL; - } - - /* save sopt->sopt_valsize */ - valsize = sopt->sopt_valsize; - - if (is64user) { - struct ip6_fw_64 userrule_64; - - if ((error = sooptcopyin(sopt, &userrule_64, userrulesize, userrulesize))) { - return error; - } - - cp_from_user_64( &userrule_64, &rule ); - } else { - struct ip6_fw_32 userrule_32; - - if ((error = sooptcopyin(sopt, &userrule_32, userrulesize, userrulesize))) { - return error; - } - - cp_from_user_32( &userrule_32, &rule ); - } - - if (rule.version != IPV6_FW_CURRENT_API_VERSION) { - return EINVAL; - } - rule.version = 0xFFFFFFFF; /* version is meaningless once rules "make it in the door". */ - - switch (sopt->sopt_name) { - case IPV6_FW_GET: - { - struct ip6_fw_chain *fcp; - struct ip6_fw *buf; - size_t size = 0; - size_t rulesize = 0; - - if (is64user) { - rulesize = sizeof(struct ip6_fw_64); - } else { - rulesize = sizeof(struct ip6_fw_32); - } - - LIST_FOREACH(fcp, &ip6_fw_chain, chain) - size += rulesize; - - buf = _MALLOC(size, M_TEMP, M_WAITOK); - if (!buf) { - error = ENOBUFS; - } else { - //struct ip6_fw *bp = buf; - caddr_t bp = (caddr_t)buf; - - LIST_FOREACH(fcp, &ip6_fw_chain, chain) - { - //bcopy(fcp->rule, bp, sizeof *bp); - if (is64user) { - cp_to_user_64((struct ip6_fw_64*)bp, fcp->rule); - } else { - cp_to_user_32((struct ip6_fw_32*)bp, fcp->rule); - } - - ((struct ip6_fw*)bp)->version = IPV6_FW_CURRENT_API_VERSION; - //bp++; - bp += rulesize; - } - } - - if (buf) { - sopt->sopt_valsize = valsize; - error = sooptcopyout(sopt, buf, size); - FREE(buf, M_TEMP); - } - - break; - } - - case IPV6_FW_FLUSH: - while (ip6_fw_chain.lh_first && - ip6_fw_chain.lh_first->rule->fw_number != (u_short) - 1) { - struct ip6_fw_chain *fcp = ip6_fw_chain.lh_first; - LIST_REMOVE(ip6_fw_chain.lh_first, chain); - FREE(fcp->rule, M_IP6FW); - FREE(fcp, M_IP6FW); - } - ip6fw_kev_post_msg(KEV_IP6FW_FLUSH); - break; - - case IPV6_FW_ZERO: - error = zero_entry6(&rule); - break; - - case IPV6_FW_ADD: - if (check_ip6fw_struct(&rule)) { - error = add_entry6(&ip6_fw_chain, &rule); - - ip6fw_kev_post_msg(KEV_IP6FW_ADD); - } else { - error = EINVAL; - } - - if (is64user) { - struct ip6_fw_64 userrule_64; - cp_to_user_64( &userrule_64, &rule); - error = sooptcopyout(sopt, &userrule_64, userrulesize); - } else { - struct ip6_fw_32 userrule_32; - cp_to_user_32( &userrule_32, &rule); - error = sooptcopyout(sopt, &userrule_32, userrulesize); - } - break; - - case IPV6_FW_DEL: - if (rule.fw_number == (u_short) - 1) { - dprintf(("%s can't delete rule 65535\n", err_prefix)); - error = EINVAL; - } else { - error = del_entry6(&ip6_fw_chain, rule.fw_number); - - ip6fw_kev_post_msg(KEV_IP6FW_DEL); - } - break; - - default: - dprintf(("%s invalid option %d\n", err_prefix, sopt->sopt_name)); - error = EINVAL; - } - - return error; -} - -void -ip6_fw_init(void) -{ - struct ip6_fw default_rule; - - ip6_fw_chk_ptr = ip6_fw_chk; - ip6_fw_ctl_ptr = ip6_fw_ctl; - LIST_INIT(&ip6_fw_chain); - - bzero(&default_rule, sizeof default_rule); - default_rule.fw_prot = IPPROTO_IPV6; - default_rule.fw_number = (u_short) - 1; -#ifdef IPV6FIREWALL_DEFAULT_TO_ACCEPT - default_rule.fw_flg |= IPV6_FW_F_ACCEPT; -#else - default_rule.fw_flg |= IPV6_FW_F_DENY; -#endif - default_rule.fw_flg |= IPV6_FW_F_IN | IPV6_FW_F_OUT; - if (check_ip6fw_struct(&default_rule) == NULL || - add_entry6(&ip6_fw_chain, &default_rule)) { - panic("%s", __FUNCTION__); - } - - printf("IPv6 packet filtering initialized, "); -#ifdef IPV6FIREWALL_DEFAULT_TO_ACCEPT - printf("default to accept, "); -#endif -#ifndef IPV6FIREWALL_VERBOSE - printf("logging disabled\n"); -#else - if (fw6_verbose_limit == 0) { - printf("unlimited logging\n"); - } else { - printf("logging limited to %d packets/entry\n", - fw6_verbose_limit); - } -#endif -} diff --git a/bsd/netinet6/ip6_fw.h b/bsd/netinet6/ip6_fw.h deleted file mode 100644 index 81757354b..000000000 --- a/bsd/netinet6/ip6_fw.h +++ /dev/null @@ -1,348 +0,0 @@ -/* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1993 Daniel Boulet - * Copyright (c) 1994 Ugen J.S.Antsilevich - * - * Redistribution and use in source forms, with and without modification, - * are permitted provided that this entire comment appears intact. - * - * Redistribution in binary form may occur without any restrictions. - * Obviously, it would be nice if you gave credit where credit is due - * but requiring it would be too onerous. - * - * This software is provided ``AS IS'' without any warranties of any kind. - * - */ - -#ifndef _IP6_FW_H -#define _IP6_FW_H -#ifdef __APPLE_API_OBSOLETE - -#include - -/* - * Define IPv6 Firewall event subclass, and associated events. - */ - -/*! - * @defined KEV_IP6FW_SUBCLASS - * @discussion The kernel event subclass for IPv6 Firewall. - */ -#define KEV_IP6FW_SUBCLASS 2 - -/*! - * @defined KEV_IP6FW_ADD - * @discussion The event code indicating a rule has been added. - */ -#define KEV_IP6FW_ADD 1 - -/*! - * @defined KEV_IP6FW_DEL - * @discussion The event code indicating a rule has been removed. - */ -#define KEV_IP6FW_DEL 2 - -/*! - * @defined KEV_IP6FW_FLUSH - * @discussion The event code indicating the rule set has been flushed. - */ -#define KEV_IP6FW_FLUSH 3 - -/*! - * @defined KEV_IP6FW_FLUSH - * @discussion The event code indicating the enable flag has been changed - */ -#define KEV_IP6FW_ENABLE 4 - - -#include - -#define IPV6_FW_CURRENT_API_VERSION 20 /* Version of this API */ - - -/* - * This union structure identifies an interface, either explicitly - * by name or implicitly by IP address. The flags IP_FW_F_IIFNAME - * and IP_FW_F_OIFNAME say how to interpret this structure. An - * interface unit number of -1 matches any unit number, while an - * IP address of 0.0.0.0 indicates matches any interface. - * - * The receive and transmit interfaces are only compared against the - * the packet if the corresponding bit (IP_FW_F_IIFACE or IP_FW_F_OIFACE) - * is set. Note some packets lack a receive or transmit interface - * (in which case the missing "interface" never matches). - */ - -union ip6_fw_if { - struct in6_addr fu_via_ip6; /* Specified by IPv6 address */ - struct { /* Specified by interface name */ -#define IP6FW_IFNLEN IFNAMSIZ - char name[IP6FW_IFNLEN]; - short unit; /* -1 means match any unit */ - } fu_via_if; -}; - -/* - * Format of an IP firewall descriptor - * - * fw_src, fw_dst, fw_smsk, fw_dmsk are always stored in network byte order. - * fw_flg and fw_n*p are stored in host byte order (of course). - * Port numbers are stored in HOST byte order. - * Warning: setsockopt() will fail if sizeof(struct ip_fw) > MLEN (108) - */ - - -struct ip6_fw { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP6_FW_CURRENT_API_VERSION by clients. */ - void *context; /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int32_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ - struct in6_addr fw_src, fw_dst; /* Source and destination IPv6 addr */ - struct in6_addr fw_smsk, fw_dmsk; /* Mask for src and dest IPv6 addr */ - u_short fw_number; /* Rule number */ - u_short fw_flg; /* Flags word */ -#define IPV6_FW_MAX_PORTS 10 /* A reasonable maximum */ - u_int fw_ipflg; /* IP flags word */ - u_short fw_pts[IPV6_FW_MAX_PORTS]; /* Array of port numbers to match */ - u_char fw_ip6opt, fw_ip6nopt; /* IPv6 options set/unset */ - u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ -#define IPV6_FW_ICMPTYPES_DIM (256 / (sizeof(unsigned) * 8)) - unsigned fw_icmp6types[IPV6_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ - long timestamp; /* timestamp (tv_sec) of last match */ - union ip6_fw_if fw_in_if, fw_out_if;/* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IP6DIVERT) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - } fw_un; - u_char fw_prot; /* IPv6 protocol */ - u_char fw_nports; /* N'of src ports and # of dst ports */ - /* in ports array (dst ports follow */ - /* src ports; max of 10 ports in all; */ - /* count of 0 means match all ports) */ -}; - -#if defined(BSD_KERNEL_PRIVATE) -#pragma pack(4) - -struct ip6_fw_32 { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP6_FW_CURRENT_API_VERSION by clients. */ - user32_addr_t context; /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int32_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ - struct in6_addr fw_src, fw_dst; /* Source and destination IPv6 addr */ - struct in6_addr fw_smsk, fw_dmsk; /* Mask for src and dest IPv6 addr */ - u_short fw_number; /* Rule number */ - u_short fw_flg; /* Flags word */ -#define IPV6_FW_MAX_PORTS 10 /* A reasonable maximum */ - u_int fw_ipflg; /* IP flags word */ - u_short fw_pts[IPV6_FW_MAX_PORTS]; /* Array of port numbers to match */ - u_char fw_ip6opt, fw_ip6nopt; /* IPv6 options set/unset */ - u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ -#define IPV6_FW_ICMPTYPES_DIM (256 / (sizeof(unsigned) * 8)) - unsigned fw_icmp6types[IPV6_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ - user32_time_t timestamp; /* timestamp (tv_sec) of last match */ - union ip6_fw_if fw_in_if, fw_out_if;/* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IP6DIVERT) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - } fw_un; - u_char fw_prot; /* IPv6 protocol */ - u_char fw_nports; /* N'of src ports and # of dst ports */ - /* in ports array (dst ports follow */ - /* src ports; max of 10 ports in all; */ - /* count of 0 means match all ports) */ -}; - -#pragma pack() - -struct ip6_fw_64 { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP6_FW_CURRENT_API_VERSION by clients. */ - __uint64_t context __attribute__((aligned(8))); /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int32_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ - struct in6_addr fw_src, fw_dst; /* Source and destination IPv6 addr */ - struct in6_addr fw_smsk, fw_dmsk; /* Mask for src and dest IPv6 addr */ - u_short fw_number; /* Rule number */ - u_short fw_flg; /* Flags word */ -#define IPV6_FW_MAX_PORTS 10 /* A reasonable maximum */ - u_int fw_ipflg; /* IP flags word */ - u_short fw_pts[IPV6_FW_MAX_PORTS]; /* Array of port numbers to match */ - u_char fw_ip6opt, fw_ip6nopt; /* IPv6 options set/unset */ - u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ -#define IPV6_FW_ICMPTYPES_DIM (256 / (sizeof(unsigned) * 8)) - unsigned fw_icmp6types[IPV6_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ - user64_time_t timestamp; /* timestamp (tv_sec) of last match */ - union ip6_fw_if fw_in_if, fw_out_if;/* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IP6DIVERT) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - } fw_un; - u_char fw_prot; /* IPv6 protocol */ - u_char fw_nports; /* N'of src ports and # of dst ports */ - /* in ports array (dst ports follow */ - /* src ports; max of 10 ports in all; */ - /* count of 0 means match all ports) */ -}; - - -#endif /* BSD_KERNEL_PRIVATE */ - -#define IPV6_FW_GETNSRCP(rule) ((rule)->fw_nports & 0x0f) -#define IPV6_FW_SETNSRCP(rule, n) do { \ - (rule)->fw_nports &= ~0x0f; \ - (rule)->fw_nports |= (n); \ - } while (0) -#define IPV6_FW_GETNDSTP(rule) ((rule)->fw_nports >> 4) -#define IPV6_FW_SETNDSTP(rule, n) do { \ - (rule)->fw_nports &= ~0xf0; \ - (rule)->fw_nports |= (n) << 4;\ - } while (0) - -#define fw_divert_port fw_un.fu_divert_port -#define fw_skipto_rule fw_un.fu_skipto_rule -#define fw_reject_code fw_un.fu_reject_code - -struct ip6_fw_chain { - LIST_ENTRY(ip6_fw_chain) chain; - struct ip6_fw *rule; -}; - -/* - * Values for "flags" field . - */ -#define IPV6_FW_F_IN 0x0001 /* Check inbound packets */ -#define IPV6_FW_F_OUT 0x0002 /* Check outbound packets */ -#define IPV6_FW_F_IIFACE 0x0004 /* Apply inbound interface test */ -#define IPV6_FW_F_OIFACE 0x0008 /* Apply outbound interface test */ - -#define IPV6_FW_F_COMMAND 0x0070 /* Mask for type of chain entry: */ -#define IPV6_FW_F_DENY 0x0000 /* This is a deny rule */ -#define IPV6_FW_F_REJECT 0x0010 /* Deny and send a response packet */ -#define IPV6_FW_F_ACCEPT 0x0020 /* This is an accept rule */ -#define IPV6_FW_F_COUNT 0x0030 /* This is a count rule */ -#define IPV6_FW_F_DIVERT 0x0040 /* This is a divert rule */ -#define IPV6_FW_F_TEE 0x0050 /* This is a tee rule */ -#define IPV6_FW_F_SKIPTO 0x0060 /* This is a skipto rule */ - -#define IPV6_FW_F_PRN 0x0080 /* Print if this rule matches */ - -#define IPV6_FW_F_SRNG 0x0100 /* The first two src ports are a min * - * and max range (stored in host byte * - * order). */ - -#define IPV6_FW_F_DRNG 0x0200 /* The first two dst ports are a min * - * and max range (stored in host byte * - * order). */ - -#define IPV6_FW_F_IIFNAME 0x0400 /* In interface by name/unit (not IP) */ -#define IPV6_FW_F_OIFNAME 0x0800 /* Out interface by name/unit (not IP) */ - -#define IPV6_FW_F_INVSRC 0x1000 /* Invert sense of src check */ -#define IPV6_FW_F_INVDST 0x2000 /* Invert sense of dst check */ - -#define IPV6_FW_F_FRAG 0x4000 /* Fragment */ - -#define IPV6_FW_F_ICMPBIT 0x8000 /* ICMP type bitmap is valid */ - -#define IPV6_FW_F_MASK 0xFFFF /* All possible flag bits mask */ - -/* - * Flags for the 'fw_ipflg' field, for comparing values of ip and its protocols. */ -#define IPV6_FW_IF_TCPEST 0x00000020 /* established TCP connection */ -#define IPV6_FW_IF_TCPMSK 0x00000020 /* mask of all TCP values */ - -/* - * For backwards compatibility with rules specifying "via iface" but - * not restricted to only "in" or "out" packets, we define this combination - * of bits to represent this configuration. - */ - -#define IF6_FW_F_VIAHACK (IPV6_FW_F_IN|IPV6_FW_F_OUT|IPV6_FW_F_IIFACE|IPV6_FW_F_OIFACE) - -/* - * Definitions for REJECT response codes. - * Values less than 256 correspond to ICMP unreachable codes. - */ -#define IPV6_FW_REJECT_RST 0x0100 /* TCP packets: send RST */ - -/* - * Definitions for IPv6 option names. - */ -#define IPV6_FW_IP6OPT_HOPOPT 0x01 -#define IPV6_FW_IP6OPT_ROUTE 0x02 -#define IPV6_FW_IP6OPT_FRAG 0x04 -#define IPV6_FW_IP6OPT_ESP 0x08 -#define IPV6_FW_IP6OPT_AH 0x10 -#define IPV6_FW_IP6OPT_NONXT 0x20 -#define IPV6_FW_IP6OPT_OPTS 0x40 - -/* - * Definitions for TCP flags. - */ -#define IPV6_FW_TCPF_FIN TH_FIN -#define IPV6_FW_TCPF_SYN TH_SYN -#define IPV6_FW_TCPF_RST TH_RST -#define IPV6_FW_TCPF_PSH TH_PUSH -#define IPV6_FW_TCPF_ACK TH_ACK -#define IPV6_FW_TCPF_URG TH_URG - -/* - * Main firewall chains definitions and global var's definitions. - */ -#ifdef BSD_KERNEL_PRIVATE - -#define M_IP6FW M_IPFW - - -/* - * Function definitions. - */ -void ip6_fw_init(void); -void load_ip6fw(void); - -/* Firewall hooks */ -struct ip6_hdr; -struct sockopt; -typedef int ip6_fw_chk_t(struct ip6_hdr**, struct ifnet*, - u_short *, struct mbuf**); -typedef int ip6_fw_ctl_t(struct sockopt *); -extern ip6_fw_chk_t *ip6_fw_chk_ptr; -extern ip6_fw_ctl_t *ip6_fw_ctl_ptr; -extern int ip6_fw_enable; - -#endif /* BSD_KERNEL_PRIVATE */ - -#endif /* __APPLE_API_OBSOLETE */ -#endif /* _IP6_FW_H */ diff --git a/bsd/netinet6/ip6_id.c b/bsd/netinet6/ip6_id.c index 508274bf4..ff6b49af3 100644 --- a/bsd/netinet6/ip6_id.c +++ b/bsd/netinet6/ip6_id.c @@ -212,7 +212,7 @@ pmod(u_int32_t gen, u_int32_t expo, u_int32_t mod) u >>= 1; t = (t * t) % mod; } - return s; + return (u_int32_t)s; } /* diff --git a/bsd/netinet6/ip6_input.c b/bsd/netinet6/ip6_input.c index 0a9a2d13a..e1b9091a5 100644 --- a/bsd/netinet6/ip6_input.c +++ b/bsd/netinet6/ip6_input.c @@ -148,7 +148,6 @@ extern int ipsec_bypass; #endif /* IPSEC */ #if DUMMYNET -#include #include #endif /* DUMMYNET */ @@ -785,7 +784,7 @@ ip6_input_check_interface(struct mbuf *m, struct ip6_hdr *ip6, struct ifnet *ini inet_ntop(AF_INET6, &ip6->ip6_src, src_str, sizeof(src_str)); inet_ntop(AF_INET6, &ip6->ip6_dst, dst_str, sizeof(dst_str)); - os_log_info(OS_LOG_DEFAULT, + os_log(OS_LOG_DEFAULT, "%s: no interface match for packet from %s to %s proto %u received via %s", __func__, src_str, dst_str, ip6->ip6_nxt, inifp->if_xname); } @@ -1212,7 +1211,7 @@ hbhcheck: in6_ifstat_inc(inifp, ifs6_in_discard); in6_ifstat_inc(inifp, ifs6_in_hdrerr); icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, - (caddr_t)&ip6->ip6_plen - (caddr_t)ip6); + (int)((caddr_t)&ip6->ip6_plen - (caddr_t)ip6)); goto done; } /* ip6_hopopts_input() ensures that mbuf is contiguous */ @@ -1397,7 +1396,7 @@ injectit: result = filter->ipf_filter.ipf_input( filter->ipf_filter.cookie, - (mbuf_t *)&m, off, nxt); + (mbuf_t *)&m, off, (uint8_t)nxt); if (result == EJUSTRETURN) { ipf_unref(); goto done; @@ -1453,7 +1452,7 @@ ip6_setsrcifaddr_info(struct mbuf *m, uint32_t src_idx, struct in6_ifaddr *ia6) m->m_pkthdr.src_iff = (ia6->ia6_flags & 0xffff); } else { m->m_pkthdr.src_iff = 0; - m->m_pkthdr.src_ifindex = src_idx; + m->m_pkthdr.src_ifindex = (uint16_t)src_idx; if (src_idx != 0) { m->m_pkthdr.pkt_flags |= PKTF_IFAINFO; } @@ -1478,7 +1477,7 @@ ip6_setdstifaddr_info(struct mbuf *m, uint32_t dst_idx, struct in6_ifaddr *ia6) m->m_pkthdr.dst_iff = (ia6->ia6_flags & 0xffff); } else { m->m_pkthdr.dst_iff = 0; - m->m_pkthdr.dst_ifindex = dst_idx; + m->m_pkthdr.dst_ifindex = (uint16_t)dst_idx; if (dst_idx != 0) { m->m_pkthdr.pkt_flags |= PKTF_IFAINFO; } @@ -1602,7 +1601,7 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, /* XXX stat */ icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, - erroff + opt + 1 - opthead); + (int)(erroff + opt + 1 - opthead)); return -1; } optlen = IP6OPT_RTALERT_LEN; @@ -1619,7 +1618,7 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, /* XXX stat */ icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, - erroff + opt + 1 - opthead); + (int)(erroff + opt + 1 - opthead)); return -1; } optlen = IP6OPT_JUMBO_LEN; @@ -1633,7 +1632,7 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, ip6stat.ip6s_badoptions++; icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, - erroff + opt - opthead); + (int)(erroff + opt - opthead)); return -1; } @@ -1657,7 +1656,7 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, ip6stat.ip6s_badoptions++; icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, - erroff + opt + 2 - opthead); + (int)(erroff + opt + 2 - opthead)); return -1; } #endif @@ -1669,7 +1668,7 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, ip6stat.ip6s_badoptions++; icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, - erroff + opt + 2 - opthead); + (int)(erroff + opt + 2 - opthead)); return -1; } *plenp = jumboplen; @@ -1704,7 +1703,7 @@ bad: * is not continuous in order to return an ICMPv6 error. */ int -ip6_unknown_opt(uint8_t *optp, struct mbuf *m, int off) +ip6_unknown_opt(uint8_t *optp, struct mbuf *m, size_t off) { struct ip6_hdr *ip6; @@ -1718,7 +1717,7 @@ ip6_unknown_opt(uint8_t *optp, struct mbuf *m, int off) case IP6OPT_TYPE_FORCEICMP: /* send ICMP even if multicasted */ ip6stat.ip6s_badoptions++; - icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_OPTION, off); + icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_OPTION, (int)off); return -1; case IP6OPT_TYPE_ICMP: /* send ICMP if not multicasted */ @@ -1729,7 +1728,7 @@ ip6_unknown_opt(uint8_t *optp, struct mbuf *m, int off) m_freem(m); } else { icmp6_error(m, ICMP6_PARAM_PROB, - ICMP6_PARAMPROB_OPTION, off); + ICMP6_PARAMPROB_OPTION, (int)off); } return -1; } @@ -1818,7 +1817,12 @@ ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp, } // Send IN6P_PKTINFO for v4-mapped address - if ((inp->inp_flags & IN6P_PKTINFO) != 0) { + if ((inp->inp_flags & IN6P_PKTINFO) != 0 +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + || (inp->inp_socket->so_cfil_db != NULL) +#endif + ) { struct in6_pktinfo pi6 = { .ipi6_addr = IN6ADDR_V4MAPPED_INIT, .ipi6_ifindex = (m && m->m_pkthdr.rcvif) ? m->m_pkthdr.rcvif->if_index : 0, @@ -1839,7 +1843,12 @@ ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp, } /* RFC 2292 sec. 5 */ - if ((inp->inp_flags & IN6P_PKTINFO) != 0) { + if ((inp->inp_flags & IN6P_PKTINFO) != 0 +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + || (inp->inp_socket->so_cfil_db != NULL) +#endif + ) { struct in6_pktinfo pi6; bcopy(&ip6->ip6_dst, &pi6.ipi6_addr, sizeof(struct in6_addr)); diff --git a/bsd/netinet6/ip6_output.c b/bsd/netinet6/ip6_output.c index ccb9a37d0..60508e3a4 100644 --- a/bsd/netinet6/ip6_output.c +++ b/bsd/netinet6/ip6_output.c @@ -148,12 +148,7 @@ extern int ipsec_bypass; #include #endif /* NECP */ -#if CONFIG_MACF_NET -#include -#endif /* CONFIG_MACF_NET */ - #if DUMMYNET -#include #include #endif /* DUMMYNET */ @@ -184,9 +179,10 @@ static void ip6_output_checksum(struct ifnet *, uint32_t, struct mbuf *, int, uint32_t, uint32_t); extern int udp_ctloutput(struct socket *, struct sockopt *); static int ip6_fragment_packet(struct mbuf **m, - struct ip6_pktopts *opt, struct ip6_exthdrs *exthdrsp, struct ifnet *ifp, - uint32_t mtu, uint32_t unfragpartlen, - struct route_in6 *ro_pmtu, int nxt0, uint32_t optlen); + struct ip6_pktopts *opt, struct ip6_out_args * ip6oa, + struct ip6_exthdrs *exthdrsp, struct ifnet *ifp, + uint32_t mtu, uint32_t unfragpartlen, struct route_in6 *ro_pmtu, + int nxt0, uint32_t optlen); SYSCTL_DECL(_net_inet6_ip6); @@ -234,10 +230,7 @@ static unsigned int im6o_debug = 1; /* debugging (enabled) */ static unsigned int im6o_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int im6o_size; /* size of zone element */ static struct zone *im6o_zone; /* zone for ip6_moptions */ - -#define IM6O_ZONE_MAX 64 /* maximum elements in zone */ #define IM6O_ZONE_NAME "ip6_moptions" /* zone name */ /* @@ -604,6 +597,23 @@ loopit: necp_get_ifnet_from_result_parameter( &necp_result_parameter); + /* + * Update the QOS marking policy if + * 1. upper layer asks it to do so + * 2. net_qos_policy_restricted is not set + * 3. qos_marking_gencount doesn't match necp_kernel_socket_policies_gencount (checked in necp_lookup_current_qos_marking) + */ + if (ip6oa != NULL && (ip6oa->ip6oa_flags & IP6OAF_REDO_QOSMARKING_POLICY) && + net_qos_policy_restricted != 0) { + bool qos_marking = (ip6oa->ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED) != 0; + qos_marking = necp_lookup_current_qos_marking(&ip6oa->qos_marking_gencount, NULL, policy_ifp, necp_result_parameter.route_rule_id, qos_marking); + if (qos_marking) { + ip6oa->ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED; + } else { + ip6oa->ip6oa_flags &= ~IP6OAF_QOSMARKING_ALLOWED; + } + } + if (policy_ifp == ifp) { goto skip_ipsec; } else { @@ -766,7 +776,7 @@ skip_ipsec: } ip6->ip6_plen = 0; } else { - ip6->ip6_plen = htons(plen); + ip6->ip6_plen = htons((uint16_t)plen); } /* * Concatenate headers and fill in next header fields. @@ -859,7 +869,7 @@ skip_ipsec: (ip6->ip6_dst.s6_addr16[1] == 0) && (ro != NULL)) { fixscope = 1; ip6->ip6_dst.s6_addr16[1] = - htons(ro->ro_dst.sin6_scope_id); + htons((uint16_t)ro->ro_dst.sin6_scope_id); } ipf_ref(); @@ -900,7 +910,7 @@ skip_ipsec: #if IPSEC if (ip6obf.needipsec) { - int segleft_org; + uint8_t segleft_org; /* * pointers after IPsec headers are not valid any more. @@ -935,7 +945,7 @@ skip_ipsec: default: printf("ip6_output (ipsec): error code %d\n", error); - /* FALLTHRU */ + OS_FALLTHROUGH; case ENOENT: /* don't show these error codes to the user */ error = 0; @@ -1016,7 +1026,7 @@ skip_ipsec: ip6->ip6_hlim = im6o->im6o_multicast_hlim; IM6O_UNLOCK(im6o); } else { - ip6->ip6_hlim = ip6_defmcasthlim; + ip6->ip6_hlim = (uint8_t)ip6_defmcasthlim; } } @@ -1088,7 +1098,7 @@ skip_ipsec: default: printf("ip6_output (ipsec): error code %d\n", error); - /* FALLTHRU */ + OS_FALLTHROUGH; case ENOENT: /* don't show these error codes to the user */ error = 0; @@ -1510,7 +1520,7 @@ check_with_pf: * back as a chain of packets and original mbuf is freed. Otherwise, m * is unchanged. */ - error = ip6_fragment_packet(&m, opt, + error = ip6_fragment_packet(&m, opt, ip6oa, &exthdrs, ifp, mtu, unfragpartlen, ro_pmtu, nxt0, optlen); @@ -1679,15 +1689,16 @@ bad: static int ip6_fragment_packet(struct mbuf **mptr, struct ip6_pktopts *opt, - struct ip6_exthdrs *exthdrsp, struct ifnet *ifp, uint32_t mtu, - uint32_t unfragpartlen, struct route_in6 *ro_pmtu, - int nxt0, uint32_t optlen) + struct ip6_out_args *ip6oa, struct ip6_exthdrs *exthdrsp, + struct ifnet *ifp, uint32_t mtu, uint32_t unfragpartlen, + struct route_in6 *ro_pmtu, int nxt0, uint32_t optlen) { VERIFY(NULL != mptr); struct mbuf *m = *mptr; int error = 0; - size_t tlen = m->m_pkthdr.len; - boolean_t dontfrag = (opt != NULL && (opt->ip6po_flags & IP6PO_DONTFRAG)); + uint32_t tlen = m->m_pkthdr.len; + boolean_t dontfrag = (opt != NULL && (opt->ip6po_flags & IP6PO_DONTFRAG)) || + (ip6oa != NULL && (ip6oa->ip6oa_flags & IP6OAF_DONT_FRAG)); if (m->m_pkthdr.pkt_flags & PKTF_FORWARDED) { dontfrag = TRUE; @@ -1785,7 +1796,7 @@ ip6_do_fragmentation(struct mbuf **mptr, uint32_t optlen, struct ifnet *ifp, struct mbuf *first_mbufp = NULL; struct mbuf *last_mbufp = NULL; - size_t tlen = morig->m_pkthdr.len; + uint32_t tlen = morig->m_pkthdr.len; /* try to fragment the packet. case 1-b */ if ((morig->m_pkthdr.csum_flags & CSUM_TSO_IPV6)) { @@ -1801,7 +1812,7 @@ ip6_do_fragmentation(struct mbuf **mptr, uint32_t optlen, struct ifnet *ifp, in6_ifstat_inc(ifp, ifs6_out_fragfail); return EMSGSIZE; } else { - size_t hlen, len, off; + uint32_t hlen, off, len; struct mbuf **mnext = NULL; struct ip6_frag *ip6f; u_char nextproto; @@ -1900,12 +1911,6 @@ ip6_do_fragmentation(struct mbuf **mptr, uint32_t optlen, struct ifnet *ifp, M_COPY_CLASSIFIER(new_m, morig); M_COPY_PFTAG(new_m, morig); -#ifdef notyet -#if CONFIG_MACF_NET - mac_create_fragment(morig, new_m); -#endif /* CONFIG_MACF_NET */ -#endif /* notyet */ - ip6f->ip6f_reserved = 0; ip6f->ip6f_ident = id; ip6f->ip6f_nxt = nextproto; @@ -2044,7 +2049,7 @@ in6_finalize_cksum(struct mbuf *m, uint32_t hoff, int32_t optlen, ip6->ip6_plen, ip6->ip6_plen, plen, plen, (mlen - (hoff + hlen))); } - plen = mlen - (hoff + hlen); + plen = (uint16_t)(mlen - (hoff + hlen)); } } @@ -2058,7 +2063,7 @@ in6_finalize_cksum(struct mbuf *m, uint32_t hoff, int32_t optlen, } else { /* caller supplied the original transport number; use it */ if (nxt0 >= 0) { - nxt = nxt0; + nxt = (uint8_t)nxt0; } olen = optlen; } @@ -2353,7 +2358,7 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) struct inpcb *in6p = sotoinpcb(so); int error = 0, optval = 0; int level, op = -1, optname = 0; - int optlen = 0; + size_t optlen = 0; struct proc *p; lck_mtx_t *mutex_held = NULL; @@ -2425,7 +2430,7 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) if (!privileged) { break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case IPV6_UNICAST_HOPS: case IPV6_HOPLIMIT: case IPV6_RECVPKTINFO: @@ -2451,11 +2456,11 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) error = EINVAL; } else { /* -1 = kernel default */ - in6p->in6p_hops = optval; + in6p->in6p_hops = (short)optval; if (in6p->inp_vflag & INP_IPV4) { in6p->inp_ip_ttl = - optval; + (uint8_t)optval; } } break; @@ -3043,7 +3048,8 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) int ip6_raw_ctloutput(struct socket *so, struct sockopt *sopt) { - int error = 0, optval, optlen; + int error = 0, optval; + size_t optlen; const int icmp6off = offsetof(struct icmp6_hdr, icmp6_cksum); struct inpcb *in6p = sotoinpcb(so); int level, op, optname; @@ -3450,16 +3456,10 @@ ip6_moptions_init(void) { PE_parse_boot_argn("ifa_debug", &im6o_debug, sizeof(im6o_debug)); - im6o_size = (im6o_debug == 0) ? sizeof(struct ip6_moptions) : + vm_size_t im6o_size = (im6o_debug == 0) ? sizeof(struct ip6_moptions) : sizeof(struct ip6_moptions_dbg); - im6o_zone = zinit(im6o_size, IM6O_ZONE_MAX * im6o_size, 0, - IM6O_ZONE_NAME); - if (im6o_zone == NULL) { - panic("%s: failed allocating %s", __func__, IM6O_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(im6o_zone, Z_EXPAND, TRUE); + im6o_zone = zone_create(IM6O_ZONE_NAME, im6o_size, ZC_ZFREE_CLEARMEM); } void @@ -3564,14 +3564,12 @@ im6o_trace(struct ip6_moptions *im6o, int refhold) } struct ip6_moptions * -ip6_allocmoptions(int how) +ip6_allocmoptions(zalloc_flags_t how) { struct ip6_moptions *im6o; - im6o = (how == M_WAITOK) ? - zalloc(im6o_zone) : zalloc_noblock(im6o_zone); + im6o = zalloc_flags(im6o_zone, how | Z_ZERO); if (im6o != NULL) { - bzero(im6o, im6o_size); lck_mtx_init(&im6o->im6o_lock, ifa_mtx_grp, ifa_mtx_attr); im6o->im6o_debug |= IFD_ALLOC; if (im6o_debug != 0) { diff --git a/bsd/netinet6/ip6_var.h b/bsd/netinet6/ip6_var.h index a9b9cab15..63b5c6a7d 100644 --- a/bsd/netinet6/ip6_var.h +++ b/bsd/netinet6/ip6_var.h @@ -95,8 +95,10 @@ #include #ifdef BSD_KERNEL_PRIVATE +#include #include +struct ip6asfrag; /* * IP6 reassembly queue structure. Each fragment * being reassembled is attached to one of these structures. @@ -112,26 +114,13 @@ struct ip6q { struct ip6q *ip6q_next; struct ip6q *ip6q_prev; int ip6q_unfrglen; /* len of unfragmentable part */ -#ifdef notyet - u_char *ip6q_nxtp; -#endif int ip6q_nfrag; /* # of fragments */ uint32_t ip6q_csum_flags; /* checksum flags */ uint32_t ip6q_csum; /* partial checksum value */ + uint32_t ip6q_flags; +#define IP6QF_DIRTY 0x00000001 }; -struct ip6asfrag { - struct ip6asfrag *ip6af_down; - struct ip6asfrag *ip6af_up; - struct mbuf *ip6af_m; - int ip6af_offset; /* offset in ip6af_m to next header */ - int ip6af_frglen; /* fragmentable part length */ - int ip6af_off; /* fragment offset */ - u_int16_t ip6af_mff; /* more fragment bit in frag off */ -}; - -#define IP6_REASS_MBUF(ip6af) (*(struct mbuf **)&((ip6af)->ip6af_m)) - struct ip6_moptions { decl_lck_mtx_data(, im6o_lock); uint32_t im6o_refcnt; /* ref count */ @@ -366,6 +355,8 @@ struct ip6stat { u_quad_t ip6s_clat464_plat64_pfx_setfail; u_quad_t ip6s_clat464_plat64_pfx_getfail; + u_quad_t ip6s_overlap_frag_drop; + u_quad_t ip6s_rcv_if_weak_match; u_quad_t ip6s_rcv_if_no_match; }; @@ -453,10 +444,13 @@ struct ip6_out_args { #define IP6OAF_NO_LOW_POWER 0x00000200 /* skip low power */ #define IP6OAF_NO_CONSTRAINED 0x00000400 /* skip IFXF_CONSTRAINED */ #define IP6OAF_SKIP_PF 0x00000800 /* skip PF */ +#define IP6OAF_DONT_FRAG 0x00001000 /* Don't fragment */ +#define IP6OAF_REDO_QOSMARKING_POLICY 0x00002000 /* Re-evaluate QOS marking policy */ u_int32_t ip6oa_retflags; /* IP6OARF return flags (see below) */ #define IP6OARF_IFDENIED 0x00000001 /* denied access to interface */ int ip6oa_sotc; /* traffic class for Fastlane DSCP mapping */ int ip6oa_netsvctype; + int32_t qos_marking_gencount; }; extern struct ip6stat ip6stat; /* statistics */ @@ -501,6 +495,11 @@ extern int ip6_prefer_tempaddr; /* whether to use the default scope zone when unspecified */ extern int ip6_use_defzone; +/* how many times to try allocating cga address after conflict */ +extern int ip6_cga_conflict_retries; +#define IPV6_CGA_CONFLICT_RETRIES_DEFAULT 3 +#define IPV6_CGA_CONFLICT_RETRIES_MAX 10 + extern struct pr_usrreqs rip6_usrreqs; extern struct pr_usrreqs icmp6_dgram_usrreqs; @@ -524,14 +523,14 @@ extern void ip6_setdstifaddr_info(struct mbuf *, uint32_t, struct in6_ifaddr *); extern int ip6_getsrcifaddr_info(struct mbuf *, uint32_t *, uint32_t *); extern int ip6_getdstifaddr_info(struct mbuf *, uint32_t *, uint32_t *); extern void ip6_freepcbopts(struct ip6_pktopts *); -extern int ip6_unknown_opt(u_int8_t *, struct mbuf *, int); +extern int ip6_unknown_opt(u_int8_t *, struct mbuf *, size_t); extern char *ip6_get_prevhdr(struct mbuf *, int); extern int ip6_nexthdr(struct mbuf *, int, int, int *); extern int ip6_lasthdr(struct mbuf *, int, int, int *); extern boolean_t ip6_pkt_has_ulp(struct mbuf *m); extern void ip6_moptions_init(void); -extern struct ip6_moptions *ip6_allocmoptions(int); +extern struct ip6_moptions *ip6_allocmoptions(zalloc_flags_t); extern void im6o_addref(struct ip6_moptions *, int); extern void im6o_remref(struct ip6_moptions *); diff --git a/bsd/netinet6/ipsec.c b/bsd/netinet6/ipsec.c index c5610feed..2e11f4515 100644 --- a/bsd/netinet6/ipsec.c +++ b/bsd/netinet6/ipsec.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2018 Apple Inc. All rights reserved. + * Copyright (c) 2008-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -82,6 +82,7 @@ #include #include +#include #include #include @@ -96,35 +97,23 @@ #include #include #include -#if INET6 #include -#endif #include #include #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #if IPSEC_ESP #include -#if INET6 #include #endif -#endif #include #include #include @@ -172,9 +161,7 @@ void *sleep_wake_handle = NULL; bool ipsec_save_wake_pkt = false; SYSCTL_DECL(_net_inet_ipsec); -#if INET6 SYSCTL_DECL(_net_inet6_ipsec6); -#endif /* net.inet.ipsec */ SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS, stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, ""); @@ -214,7 +201,6 @@ SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipse SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port, CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, ""); -#if INET6 struct ipsecstat ipsec6stat; int ip6_esp_trans_deflev = IPSEC_LEVEL_USE; int ip6_esp_net_deflev = IPSEC_LEVEL_USE; @@ -243,39 +229,32 @@ SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, ""); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD, esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, ""); -#endif /* INET6 */ SYSCTL_DECL(_net_link_generic_system); struct ipsec_wake_pkt_info ipsec_wake_pkt; -static int ipsec_setspidx_interface(struct secpolicyindex *, u_int, struct mbuf *, +static int ipsec_setspidx_interface(struct secpolicyindex *, u_int8_t, struct mbuf *, int, int, int); -static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int, u_int, +static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int8_t, u_int, struct mbuf *, int); static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb); -#if INET6 static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb); -#endif static int ipsec_setspidx(struct mbuf *, struct secpolicyindex *, int, int); static void ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *, int); static int ipsec4_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *); -#if INET6 static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *, int); static int ipsec6_setspidx_ipaddr(struct mbuf *, struct secpolicyindex *); -#endif static struct inpcbpolicy *ipsec_newpcbpolicy(void); static void ipsec_delpcbpolicy(struct inpcbpolicy *); static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src); static int ipsec_set_policy(struct secpolicy **pcb_sp, int optname, caddr_t request, size_t len, int priv); -static void vshiftl(unsigned char *, int, int); +static void vshiftl(unsigned char *, int, size_t); static int ipsec_in_reject(struct secpolicy *, struct mbuf *); -#if INET6 static int ipsec64_encapsulate(struct mbuf *, struct secasvar *); static int ipsec6_update_routecache_and_output(struct ipsec_output_state *state, struct secasvar *sav); static int ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav); -#endif static struct ipsec_tag *ipsec_addaux(struct mbuf *); static struct ipsec_tag *ipsec_findaux(struct mbuf *); static void ipsec_optaux(struct mbuf *, struct ipsec_tag *); @@ -283,7 +262,11 @@ int ipsec_send_natt_keepalive(struct secasvar *sav); bool ipsec_fill_offload_frame(ifnet_t ifp, struct secasvar *sav, struct ifnet_keepalive_offload_frame *frame, size_t frame_data_offset); extern bool IOPMCopySleepWakeUUIDKey(char *, size_t); -extern void *registerSleepWakeInterest(void *, void *, void *); + +typedef IOReturn (*IOServiceInterestHandler)( void * target, void * refCon, + UInt32 messageType, void * provider, + void * messageArgument, vm_size_t argSize ); +extern void *registerSleepWakeInterest(IOServiceInterestHandler, void *, void *); static int sysctl_def_policy SYSCTL_HANDLER_ARGS @@ -322,7 +305,7 @@ sysctl_def_policy SYSCTL_HANDLER_ARGS */ struct secpolicy * ipsec4_getpolicybysock(struct mbuf *m, - u_int dir, + u_int8_t dir, struct socket *so, int *error) { @@ -345,11 +328,9 @@ ipsec4_getpolicybysock(struct mbuf *m, case PF_INET: pcbsp = sotoinpcb(so)->inp_sp; break; -#if INET6 case PF_INET6: pcbsp = sotoin6pcb(so)->in6p_sp; break; -#endif } if (!pcbsp) { @@ -364,12 +345,10 @@ ipsec4_getpolicybysock(struct mbuf *m, /* set spidx in pcb */ *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so)); break; -#if INET6 case PF_INET6: /* set spidx in pcb */ *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so)); break; -#endif default: panic("ipsec4_getpolicybysock: unsupported address family\n"); } @@ -528,7 +507,7 @@ ipsec4_getpolicybysock(struct mbuf *m, */ struct secpolicy * ipsec4_getpolicybyaddr(struct mbuf *m, - u_int dir, + u_int8_t dir, int flag, int *error) { @@ -596,7 +575,7 @@ ipsec4_getpolicybyaddr(struct mbuf *m, */ int ipsec4_getpolicybyinterface(struct mbuf *m, - u_int dir, + u_int8_t dir, int *flags, struct ip_out_args *ipoa, struct secpolicy **sp) @@ -655,7 +634,6 @@ ipsec4_getpolicybyinterface(struct mbuf *m, } -#if INET6 /* * For OUTBOUND packet having a socket. Searching SPD for packet, * and return a pointer to SP. @@ -668,7 +646,7 @@ ipsec4_getpolicybyinterface(struct mbuf *m, */ struct secpolicy * ipsec6_getpolicybysock(struct mbuf *m, - u_int dir, + u_int8_t dir, struct socket *so, int *error) { @@ -845,7 +823,7 @@ ipsec6_getpolicybysock(struct mbuf *m, struct secpolicy * ipsec6_getpolicybyaddr(struct mbuf *m, - u_int dir, + u_int8_t dir, int flag, int *error) { @@ -905,7 +883,7 @@ ipsec6_getpolicybyaddr(struct mbuf *m, */ int ipsec6_getpolicybyinterface(struct mbuf *m, - u_int dir, + u_int8_t dir, int flag, struct ip6_out_args *ip6oap, int *noipsec, @@ -965,7 +943,6 @@ ipsec6_getpolicybyinterface(struct mbuf *m, return 0; } -#endif /* INET6 */ /* * set IP address into spidx from mbuf. @@ -980,7 +957,7 @@ ipsec6_getpolicybyinterface(struct mbuf *m, static int ipsec_setspidx_mbuf( struct secpolicyindex *spidx, - u_int dir, + u_int8_t dir, __unused u_int family, struct mbuf *m, int needport) @@ -1011,7 +988,7 @@ bad: static int ipsec_setspidx_interface( struct secpolicyindex *spidx, - u_int dir, + u_int8_t dir, struct mbuf *m, int needport, int ifindex, @@ -1092,7 +1069,6 @@ bad: return error; } -#if INET6 static int ipsec6_setspidx_in6pcb(struct mbuf *m, struct in6pcb *pcb) { @@ -1134,7 +1110,6 @@ bad: bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx)); return error; } -#endif /* * configure security policy index (src/dst/proto/sport/dport) @@ -1208,7 +1183,6 @@ ipsec_setspidx(struct mbuf *m, } ipsec4_get_ulp(m, spidx, needport); return 0; -#if INET6 case 6: if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) { KEYDEBUG(KEYDEBUG_IPSEC_DUMP, @@ -1223,7 +1197,6 @@ ipsec_setspidx(struct mbuf *m, } ipsec6_get_ulp(m, spidx, needport); return 0; -#endif default: KEYDEBUG(KEYDEBUG_IPSEC_DUMP, printf("ipsec_setspidx: " @@ -1346,7 +1319,6 @@ ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx) return 0; } -#if INET6 static void ipsec6_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, @@ -1375,9 +1347,10 @@ ipsec6_get_ulp(struct mbuf *m, return; } + VERIFY(nxt <= UINT8_MAX); switch (nxt) { case IPPROTO_TCP: - spidx->ul_proto = nxt; + spidx->ul_proto = (u_int8_t)nxt; if (!needport) { break; } @@ -1389,7 +1362,7 @@ ipsec6_get_ulp(struct mbuf *m, ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport; break; case IPPROTO_UDP: - spidx->ul_proto = nxt; + spidx->ul_proto = (u_int8_t)nxt; if (!needport) { break; } @@ -1403,7 +1376,7 @@ ipsec6_get_ulp(struct mbuf *m, case IPPROTO_ICMPV6: default: /* XXX intermediate headers??? */ - spidx->ul_proto = nxt; + spidx->ul_proto = (u_int8_t)nxt; break; } } @@ -1448,7 +1421,6 @@ ipsec6_setspidx_ipaddr(struct mbuf *m, return 0; } -#endif static struct inpcbpolicy * ipsec_newpcbpolicy(void) @@ -1749,7 +1721,6 @@ ipsec4_delete_pcbpolicy(struct inpcb *inp) return 0; } -#if INET6 int ipsec6_set_policy(struct in6pcb *in6p, int optname, @@ -1832,7 +1803,6 @@ ipsec6_delete_pcbpolicy(struct in6pcb *in6p) return 0; } -#endif /* * return current level. @@ -1867,22 +1837,18 @@ ipsec_get_reqlevel(struct ipsecrequest *isr) /* set default level */ switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) { -#if INET case AF_INET: esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev); esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev); ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev); ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev); break; -#endif -#if INET6 case AF_INET6: esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev); esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev); ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev); ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev); break; -#endif /* INET6 */ default: panic("key_get_reqlevel: Unknown family. %d\n", ((struct sockaddr *)&isr->sp->spidx.src)->sa_family); @@ -2089,7 +2055,6 @@ ipsec4_in_reject(struct mbuf *m, struct inpcb *inp) return 0; } -#if INET6 /* * Check AH/ESP integrity. * This function is called from tcp6_input(), udp6_input(), @@ -2145,7 +2110,6 @@ ipsec6_in_reject(struct mbuf *m, struct in6pcb *in6p) /* NOTREACHED */ return 0; } -#endif /* * compute the byte size to be occupied by IPsec header. @@ -2207,11 +2171,9 @@ ipsec_hdrsiz(struct secpolicy *sp) case AF_INET: clen += sizeof(struct ip); break; -#if INET6 case AF_INET6: clen += sizeof(struct ip6_hdr); break; -#endif default: ipseclog((LOG_ERR, "ipsec_hdrsiz: " "unknown AF %d in IPsec tunnel SA\n", @@ -2227,7 +2189,7 @@ ipsec_hdrsiz(struct secpolicy *sp) /* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */ size_t -ipsec4_hdrsiz(struct mbuf *m, u_int dir, struct inpcb *inp) +ipsec4_hdrsiz(struct mbuf *m, u_int8_t dir, struct inpcb *inp) { struct secpolicy *sp = NULL; int error; @@ -2266,12 +2228,11 @@ ipsec4_hdrsiz(struct mbuf *m, u_int dir, struct inpcb *inp) return size; } -#if INET6 /* This function is called from ipsec6_hdrsize_tcp(), * and maybe from ip6_forward.() */ size_t -ipsec6_hdrsiz(struct mbuf *m, u_int dir, struct in6pcb *in6p) +ipsec6_hdrsiz(struct mbuf *m, u_int8_t dir, struct in6pcb *in6p) { struct secpolicy *sp = NULL; int error; @@ -2307,9 +2268,7 @@ ipsec6_hdrsiz(struct mbuf *m, u_int dir, struct in6pcb *in6p) return size; } -#endif /*INET6*/ -#if INET /* * encapsulate for ipsec tunnel. * ip->ip_src must be fixed later on. @@ -2319,8 +2278,8 @@ ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav) { struct ip *oip; struct ip *ip; - size_t hlen; size_t plen; + u_int32_t hlen; /* can't tunnel between different AFs */ if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family @@ -2329,13 +2288,6 @@ ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav) m_freem(m); return EINVAL; } -#if 0 - /* XXX if the dst is myself, perform nothing. */ - if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) { - m_freem(m); - return EINVAL; - } -#endif if (m->m_len < sizeof(*ip)) { panic("ipsec4_encapsulate: assumption failed (first mbuf length)"); @@ -2411,7 +2363,7 @@ ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav) } ip->ip_p = IPPROTO_IPIP; if (plen + sizeof(struct ip) < IP_MAXPACKET) { - ip->ip_len = htons(plen + sizeof(struct ip)); + ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip))); } else { ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: " "leave ip_len as is (invalid packet)\n")); @@ -2432,9 +2384,7 @@ ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav) return 0; } -#endif /*INET*/ -#if INET6 int ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav) { @@ -2449,13 +2399,6 @@ ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav) m_freem(m); return EINVAL; } -#if 0 - /* XXX if the dst is myself, perform nothing. */ - if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) { - m_freem(m); - return EINVAL; - } -#endif plen = m->m_pkthdr.len; @@ -2498,7 +2441,7 @@ ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav) /* ECN consideration. */ ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow); if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) { - ip6->ip6_plen = htons(plen); + ip6->ip6_plen = htons((u_int16_t)plen); } else { /* ip6->ip6_plen will be updated in ip6_output() */ } @@ -2529,13 +2472,6 @@ ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav) m_freem(m); return EINVAL; } -#if 0 - /* XXX if the dst is myself, perform nothing. */ - if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) { - m_freem(m); - return EINVAL; - } -#endif plen = m->m_pkthdr.len; ip6 = mtod(m, struct ip6_hdr *); @@ -2583,12 +2519,12 @@ ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav) /* construct new IPv4 header. see RFC 2401 5.1.2.1 */ /* ECN consideration. */ - ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow); + ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6i->ip6_flow); if (plen + sizeof(struct ip) < IP_MAXPACKET) { - ip->ip_len = htons(plen + sizeof(struct ip)); + ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip))); } else { - ip->ip_len = htons(plen); + ip->ip_len = htons((u_int16_t)plen); ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: " "leave ip_len as is (invalid packet)\n")); } @@ -2714,13 +2650,13 @@ ipsec6_update_routecache_and_output( return error; } ip6 = mtod(state->m, struct ip6_hdr *); - ip6->ip6_plen = htons(plen); + ip6->ip6_plen = htons((u_int16_t)plen); ipsec_set_pkthdr_for_interface(sav->sah->ipsec_if, state->m, AF_INET6); ipsec_set_ip6oa_for_interface(sav->sah->ipsec_if, &ip6oa); /* Increment statistics */ - ifnet_stat_increment_out(sav->sah->ipsec_if, 1, mbuf_pkthdr_len(state->m), 0); + ifnet_stat_increment_out(sav->sah->ipsec_if, 1, (u_int32_t)mbuf_pkthdr_len(state->m), 0); /* Send to ip6_output */ bzero(&ro6_new, sizeof(ro6_new)); @@ -2752,8 +2688,8 @@ ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav) struct ip6_hdr *ip6; struct ip *oip; struct ip *ip; - size_t hlen; size_t plen; + u_int32_t hlen; m = state->m; if (!m) { @@ -2767,13 +2703,6 @@ ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav) m_freem(m); return EINVAL; } -#if 0 - /* XXX if the dst is myself, perform nothing. */ - if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) { - m_freem(m); - return EINVAL; - } -#endif if (m->m_len < sizeof(*ip)) { panic("ipsec46_encapsulate: assumption failed (first mbuf length)"); @@ -2855,9 +2784,9 @@ ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav) /* construct new IPv6 header. see RFC 2401 5.1.2.2 */ /* ECN consideration. */ - ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos); + ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip->ip_tos); if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) { - ip6->ip6_plen = htons(plen); + ip6->ip6_plen = htons((u_int16_t)plen); } else { /* ip6->ip6_plen will be updated in ip6_output() */ } @@ -2873,8 +2802,6 @@ ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav) return 0; } -#endif /*INET6*/ - /* * Check the variable replay window. * ipsec_chkreplay() performs replay check before ICV verification. @@ -2890,9 +2817,9 @@ ipsec_chkreplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index) { const struct secreplay *replay; u_int32_t diff; - int fr; - u_int32_t wsizeb; /* constant: bits of window size */ - int frlast; /* constant: last frame */ + size_t fr; + size_t wsizeb; /* constant: bits of window size */ + size_t frlast; /* constant: last frame */ /* sanity check */ @@ -2962,9 +2889,9 @@ ipsec_updatereplay(u_int32_t seq, struct secasvar *sav, u_int8_t replay_index) { struct secreplay *replay; u_int32_t diff; - int fr; - u_int32_t wsizeb; /* constant: bits of window size */ - int frlast; /* constant: last frame */ + size_t fr; + size_t wsizeb; /* constant: bits of window size */ + size_t frlast; /* constant: last frame */ /* sanity check */ if (sav == NULL) { @@ -3065,9 +2992,10 @@ ok: * wsize: buffer size (bytes). */ static void -vshiftl(unsigned char *bitmap, int nbit, int wsize) +vshiftl(unsigned char *bitmap, int nbit, size_t wsize) { - int s, j, i; + size_t i; + int s, j; unsigned char over; for (j = 0; j < nbit; j += 8) { @@ -3113,7 +3041,6 @@ ipsec4_logpacketstr(struct ip *ip, u_int32_t spi) return buf; } -#if INET6 const char * ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi) { @@ -3139,7 +3066,6 @@ ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi) return buf; } -#endif /*INET6*/ const char * ipsec_logsastr(struct secasvar *sav) @@ -3166,9 +3092,7 @@ ipsec_logsastr(struct secasvar *sav) snprintf(p, sizeof(buf) - (p - buf), "src=%d.%d.%d.%d dst=%d.%d.%d.%d", s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]); - } -#if INET6 - else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) { + } else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) { snprintf(p, sizeof(buf) - (p - buf), "src=%s", ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr)); @@ -3179,7 +3103,6 @@ ipsec_logsastr(struct secasvar *sav) " dst=%s", ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr)); } -#endif while (p && *p) { p++; } @@ -3496,7 +3419,7 @@ ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused i isr->saidx.mode == IPSEC_MODE_TRANSPORT) { if (ip->ip_p == IPPROTO_UDP) { struct udphdr *udp; - size_t hlen; + u_int32_t hlen; #ifdef _IP_VHL hlen = IP_VHL_HL(ip->ip_vhl) << 2; #else @@ -3566,7 +3489,6 @@ bad: #endif -#if INET6 /* * IPsec output logic for IPv6, transport mode. */ @@ -3578,8 +3500,8 @@ ipsec6_output_trans_internal( struct mbuf *mprev) { struct ip6_hdr *ip6; + size_t plen; int error = 0; - int plen; /* validity check */ if (sav == NULL || sav->sah == NULL) { @@ -3633,7 +3555,7 @@ ipsec6_output_trans_internal( goto bad; } ip6 = mtod(state->m, struct ip6_hdr *); - ip6->ip6_plen = htons(plen); + ip6->ip6_plen = htons((u_int16_t)plen); return 0; bad: @@ -3787,10 +3709,10 @@ static int ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar *sav, int *must_be_last) { struct ip6_hdr *ip6; - int error = 0; - int plen; struct sockaddr_in6* dst6; struct route_in6 *ro6; + size_t plen; + int error = 0; /* validity check */ if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) { @@ -4043,7 +3965,7 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar goto bad; } ip6 = mtod(state->m, struct ip6_hdr *); - ip6->ip6_plen = htons(plen); + ip6->ip6_plen = htons((u_int16_t)plen); done: return 0; @@ -4260,7 +4182,6 @@ bad: state->m = NULL; return error; } -#endif /*INET6*/ #if INET /* @@ -4308,7 +4229,6 @@ ipsec4_splithdr(struct mbuf *m) } #endif -#if INET6 struct mbuf * ipsec6_splithdr(struct mbuf *m) { @@ -4345,7 +4265,6 @@ ipsec6_splithdr(struct mbuf *m) } return m; } -#endif /* validate inbound IPsec tunnel packet. */ int @@ -4473,7 +4392,6 @@ ipsec4_tunnel_validate( return 1; } -#if INET6 /* validate inbound IPsec tunnel packet. */ int ipsec6_tunnel_validate( @@ -4584,7 +4502,6 @@ ipsec6_tunnel_validate( return 1; } -#endif /* * Make a mbuf chain for encryption. @@ -4935,8 +4852,8 @@ ipsec_send_natt_keepalive( bzero(m_mtod(m), m->m_len); m->m_pkthdr.len = m->m_len; - ip->ip_len = m->m_len; - ip->ip_ttl = ip_defttl; + ip->ip_len = (u_short)m->m_len; + ip->ip_ttl = (u_char)ip_defttl; ip->ip_p = IPPROTO_UDP; if (sav->sah->dir != IPSEC_DIR_INBOUND) { ip->ip_src = ((struct sockaddr_in*)&sav->sah->saidx.src)->sin_addr; @@ -4998,7 +4915,7 @@ ipsec_send_natt_keepalive( ip6->ip6_vfc &= ~IPV6_VERSION_MASK; ip6->ip6_vfc |= IPV6_VERSION; ip6->ip6_nxt = IPPROTO_UDP; - ip6->ip6_hlim = ip6_defhlim; + ip6->ip6_hlim = (u_int8_t)ip6_defhlim; ip6->ip6_plen = htons(sizeof(struct udphdr) + 1); if (sav->sah->dir != IPSEC_DIR_INBOUND) { ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr; @@ -5091,7 +5008,7 @@ ipsec_fill_offload_frame(ifnet_t ifp, ip = (__typeof__(ip))(void *)(data + frame_data_offset); uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip)); - frame->length = frame_data_offset + sizeof(struct udpiphdr) + 1; + frame->length = (u_int8_t)(frame_data_offset + sizeof(struct udpiphdr) + 1); frame->type = IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC; frame->ether_type = IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4; @@ -5117,7 +5034,7 @@ ipsec_fill_offload_frame(ifnet_t ifp, } else { ip->ip_id = ip_randomid(); } - ip->ip_ttl = ip_defttl; + ip->ip_ttl = (u_char)ip_defttl; ip->ip_p = IPPROTO_UDP; ip->ip_sum = 0; if (sav->sah->dir != IPSEC_DIR_INBOUND) { @@ -5144,7 +5061,7 @@ ipsec_fill_offload_frame(ifnet_t ifp, } else if (sav->natt_interval != 0) { frame->interval = sav->natt_interval; } else { - frame->interval = natt_keepalive_interval; + frame->interval = (u_int16_t)natt_keepalive_interval; } return TRUE; } @@ -5198,7 +5115,7 @@ ipsec_save_wake_packet(struct mbuf *wake_mbuf, u_int32_t spi, u_int32_t seq) goto done; } - u_int16_t max_len = (wake_mbuf->m_pkthdr.len > IPSEC_MAX_WAKE_PKT_LEN) ? IPSEC_MAX_WAKE_PKT_LEN : wake_mbuf->m_pkthdr.len; + u_int16_t max_len = (wake_mbuf->m_pkthdr.len > IPSEC_MAX_WAKE_PKT_LEN) ? IPSEC_MAX_WAKE_PKT_LEN : (u_int16_t)wake_mbuf->m_pkthdr.len; m_copydata(wake_mbuf, 0, max_len, (void *)ipsec_wake_pkt.wake_pkt); ipsec_wake_pkt.wake_pkt_len = max_len; @@ -5212,11 +5129,19 @@ ipsec_save_wake_packet(struct mbuf *wake_mbuf, u_int32_t spi, u_int32_t seq) ipsec_wake_pkt.wake_pkt_seq, ipsec_wake_pkt.wake_pkt_len)); - struct kev_msg ev_msg = { 0 }; + struct kev_msg ev_msg; + bzero(&ev_msg, sizeof(ev_msg)); + ev_msg.vendor_code = KEV_VENDOR_APPLE; ev_msg.kev_class = KEV_NETWORK_CLASS; ev_msg.kev_subclass = KEV_IPSEC_SUBCLASS; - ev_msg.kev_subclass = KEV_IPSEC_WAKE_PACKET; + ev_msg.event_code = KEV_IPSEC_WAKE_PACKET; + + struct ipsec_wake_pkt_event_data event_data; + strlcpy(event_data.wake_uuid, ipsec_wake_pkt.wake_uuid, sizeof(event_data.wake_uuid)); + ev_msg.dv[0].data_ptr = &event_data; + ev_msg.dv[0].data_length = sizeof(event_data); + int result = kev_post_msg(&ev_msg); if (result != 0) { os_log_error(OS_LOG_DEFAULT, "%s: kev_post_msg() failed with error %d for wake uuid %s", @@ -5269,18 +5194,33 @@ ipsec_sleep_wake_handler(void *target, void *refCon, UInt32 messageType, #pragma unused(target, refCon, provider, messageArgument, argSize) switch (messageType) { case kIOMessageSystemWillSleep: + { ipsec_get_local_ports(); + ipsec_save_wake_pkt = false; memset(&ipsec_wake_pkt, 0, sizeof(ipsec_wake_pkt)); IOPMCopySleepWakeUUIDKey(ipsec_wake_pkt.wake_uuid, sizeof(ipsec_wake_pkt.wake_uuid)); ipseclog((LOG_NOTICE, "ipsec: system will sleep, uuid: %s", ipsec_wake_pkt.wake_uuid)); break; - case kIOMessageSystemWillPowerOn: - ipsec_save_wake_pkt = true; - ipseclog((LOG_NOTICE, - "ipsec: system will powered on, uuid: %s", ipsec_wake_pkt.wake_uuid)); + } + case kIOMessageSystemHasPoweredOn: + { + char wake_reason[128] = {0}; + size_t size = sizeof(wake_reason); + if (kernel_sysctlbyname("kern.wakereason", wake_reason, &size, NULL, 0) == 0) { + if (strnstr(wake_reason, "wlan", size) == 0 || + strnstr(wake_reason, "WL.OutboxNotEmpty", size) == 0 || + strnstr(wake_reason, "baseband", size) == 0 || + strnstr(wake_reason, "bluetooth", size) == 0 || + strnstr(wake_reason, "BT.OutboxNotEmpty", size) == 0) { + ipsec_save_wake_pkt = true; + ipseclog((LOG_NOTICE, + "ipsec: system has powered on, uuid: %s reason %s", ipsec_wake_pkt.wake_uuid, wake_reason)); + } + } break; + } default: break; } diff --git a/bsd/netinet6/ipsec.h b/bsd/netinet6/ipsec.h index 0ce45fb5f..12f8bebca 100644 --- a/bsd/netinet6/ipsec.h +++ b/bsd/netinet6/ipsec.h @@ -74,7 +74,7 @@ struct secpolicyindex { struct sockaddr_storage dst; /* IP dst address for SP */ u_int8_t prefs; /* prefix length in bits for src */ u_int8_t prefd; /* prefix length in bits for dst */ - u_int16_t ul_proto; /* upper layer Protocol */ + u_int8_t ul_proto; /* upper layer Protocol */ ifnet_t internal_if; /* Interface a matching packet is bound to */ struct secpolicyaddrrange src_range; /* IP src address range for SP */ struct secpolicyaddrrange dst_range; /* IP dst address range for SP */ @@ -244,6 +244,10 @@ struct ipsec_wake_pkt_info { u_int16_t wake_pkt_len; }; +struct ipsec_wake_pkt_event_data { + uuid_string_t wake_uuid; +}; + #ifdef BSD_KERNEL_PRIVATE /* * Definitions for IPsec & Key sysctl operations. @@ -360,11 +364,11 @@ extern bool ipsec_save_wake_pkt; #define ipseclog(x) do { if (ipsec_debug != 0) _ipsec_log x; } while (0) -extern struct secpolicy *ipsec4_getpolicybysock(struct mbuf *, u_int, +extern struct secpolicy *ipsec4_getpolicybysock(struct mbuf *, u_int8_t, struct socket *, int *); -extern struct secpolicy *ipsec4_getpolicybyaddr(struct mbuf *, u_int, int, +extern struct secpolicy *ipsec4_getpolicybyaddr(struct mbuf *, u_int8_t, int, int *); -extern int ipsec4_getpolicybyinterface(struct mbuf *, u_int, int *, +extern int ipsec4_getpolicybyinterface(struct mbuf *, u_int8_t, int *, struct ip_out_args *, struct secpolicy **); extern u_int ipsec_get_reqlevel(struct ipsecrequest *); @@ -385,7 +389,7 @@ struct tcpcb; extern int ipsec_chkreplay(u_int32_t, struct secasvar *, u_int8_t); extern int ipsec_updatereplay(u_int32_t, struct secasvar *, u_int8_t); -extern size_t ipsec4_hdrsiz(struct mbuf *, u_int, struct inpcb *); +extern size_t ipsec4_hdrsiz(struct mbuf *, u_int8_t, struct inpcb *); extern size_t ipsec_hdrsiz_tcp(struct tcpcb *); extern size_t ipsec_hdrsiz(struct secpolicy *); @@ -401,10 +405,8 @@ extern int ipsec4_output(struct ipsec_output_state *, struct secpolicy *, int); extern struct mbuf * ipsec4_splithdr(struct mbuf *); extern int ipsec4_encapsulate(struct mbuf *, struct secasvar *); #endif -#if INET6 extern struct mbuf * ipsec6_splithdr(struct mbuf *); extern int ipsec6_encapsulate(struct mbuf *, struct secasvar *); -#endif extern int ipsec4_tunnel_validate(struct mbuf *, int, u_int, struct secasvar *, sa_family_t *); extern struct mbuf *ipsec_copypkt(struct mbuf *); extern void ipsec_delaux(struct mbuf *); diff --git a/bsd/netinet6/ipsec6.h b/bsd/netinet6/ipsec6.h index 7e8ff5bb1..1306a93bc 100644 --- a/bsd/netinet6/ipsec6.h +++ b/bsd/netinet6/ipsec6.h @@ -53,12 +53,12 @@ extern int ip6_esp_randpad; struct ip6_out_args; -extern struct secpolicy *ipsec6_getpolicybysock(struct mbuf *, u_int, +extern struct secpolicy *ipsec6_getpolicybysock(struct mbuf *, u_int8_t, struct socket *, int *); -extern struct secpolicy *ipsec6_getpolicybyaddr(struct mbuf *, u_int, int, +extern struct secpolicy *ipsec6_getpolicybyaddr(struct mbuf *, u_int8_t, int, int *); extern int ipsec6_getpolicybyinterface(struct mbuf *, - u_int, int, struct ip6_out_args *, int *, struct secpolicy **); + u_int8_t, int, struct ip6_out_args *, int *, struct secpolicy **); struct inpcb; @@ -70,7 +70,7 @@ extern int ipsec6_in_reject(struct mbuf *, struct inpcb *); struct tcp6cb; -extern size_t ipsec6_hdrsiz(struct mbuf *, u_int, struct inpcb *); +extern size_t ipsec6_hdrsiz(struct mbuf *, u_int8_t, struct inpcb *); struct ip6_hdr; extern const char *ipsec6_logpacketstr(struct ip6_hdr *, u_int32_t); diff --git a/bsd/netinet6/mld6.c b/bsd/netinet6/mld6.c index 9f5d020bf..a056a9cc6 100644 --- a/bsd/netinet6/mld6.c +++ b/bsd/netinet6/mld6.c @@ -167,7 +167,7 @@ static decl_lck_mtx_data(, mld_mtx); SLIST_HEAD(mld_in6m_relhead, in6_multi); static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int); -static struct mld_ifinfo *mli_alloc(int); +static struct mld_ifinfo *mli_alloc(zalloc_flags_t); static void mli_free(struct mld_ifinfo *); static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *); static void mld_dispatch_packet(struct mbuf *); @@ -189,7 +189,7 @@ static int mld_v1_input_report(struct ifnet *, struct mbuf *, const struct ip6_hdr *, /*const*/ struct mld_hdr *); static void mld_v1_process_group_timer(struct in6_multi *, const int); static void mld_v1_process_querier_timers(struct mld_ifinfo *); -static int mld_v1_transmit_report(struct in6_multi *, const int); +static int mld_v1_transmit_report(struct in6_multi *, const uint8_t); static uint32_t mld_v1_update_group(struct in6_multi *, const int); static void mld_v2_cancel_link_timers(struct mld_ifinfo *); static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *); @@ -254,11 +254,8 @@ static unsigned int mld_mli_list_genid; VERIFY(SLIST_EMPTY(_head)); \ } -#define MLI_ZONE_MAX 64 /* maximum elements in zone */ -#define MLI_ZONE_NAME "mld_ifinfo" /* zone name */ - -static unsigned int mli_size; /* size of zone element */ -static struct zone *mli_zone; /* zone for mld_ifinfo */ +static ZONE_DECLARE(mli_zone, "mld_ifinfo", + sizeof(struct mld_ifinfo), ZC_ZFREE_CLEARMEM); SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */ @@ -352,7 +349,7 @@ sysctl_mld_gsr SYSCTL_HANDLER_ARGS MLD_LOCK(); - i = mld_gsrdelay.tv_sec; + i = (int)mld_gsrdelay.tv_sec; error = sysctl_handle_int(oidp, &i, 0, req); if (error || !req->newptr) { @@ -565,7 +562,7 @@ mld_is_addr_reported(const struct in6_addr *addr) return 0; } - if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) { + if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL && !IN6_IS_ADDR_UNICAST_BASED_MULTICAST(addr)) { struct in6_addr tmp = *addr; in6_clearscope(&tmp); if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) { @@ -580,7 +577,7 @@ mld_is_addr_reported(const struct in6_addr *addr) * Attach MLD when PF_INET6 is attached to an interface. */ struct mld_ifinfo * -mld_domifattach(struct ifnet *ifp, int how) +mld_domifattach(struct ifnet *ifp, zalloc_flags_t how) { struct mld_ifinfo *mli; @@ -753,13 +750,10 @@ mli_initvar(struct mld_ifinfo *mli, struct ifnet *ifp, int reattach) } static struct mld_ifinfo * -mli_alloc(int how) +mli_alloc(zalloc_flags_t how) { - struct mld_ifinfo *mli; - - mli = (how == M_WAITOK) ? zalloc(mli_zone) : zalloc_noblock(mli_zone); + struct mld_ifinfo *mli = zalloc_flags(mli_zone, how | Z_ZERO); if (mli != NULL) { - bzero(mli, mli_size); lck_mtx_init(&mli->mli_lock, mld_mtx_grp, mld_mtx_attr); mli->mli_debug |= IFD_ALLOC; } @@ -1009,7 +1003,7 @@ mld_v1_update_group(struct in6_multi *inm, const int timer) "skipping.\n", __func__)); break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case MLD_SG_QUERY_PENDING_MEMBER: case MLD_G_QUERY_PENDING_MEMBER: case MLD_IDLE_MEMBER: @@ -1045,9 +1039,8 @@ mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, struct mld_ifinfo *mli; struct mldv2_query *mld; struct in6_multi *inm; - uint32_t maxdelay, nsrc, qqi; + uint32_t maxdelay, nsrc, qqi, timer; int err = 0, is_general_query; - uint16_t timer; uint8_t qrv; struct mld_tparams mtp = { .qpt = 0, .it = 0, .cst = 0, .sct = 0 }; @@ -1079,9 +1072,9 @@ mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off); maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */ - if (maxdelay >= 32768) { - maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) << - (MLD_MRC_EXP(maxdelay) + 3); + if (maxdelay > SHRT_MAX) { + maxdelay = (MLD_MRC_MANT((uint16_t)maxdelay) | 0x1000) << + (MLD_MRC_EXP((uint16_t)maxdelay) + 3); } timer = maxdelay / MLD_TIMER_SCALE; if (timer == 0) { @@ -1479,6 +1472,7 @@ mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, "ifp 0x%llx(%s)\n", __func__, ip6_sprintf(&mld->mld_addr), (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); + OS_FALLTHROUGH; case MLD_LAZY_MEMBER: inm->in6m_state = MLD_LAZY_MEMBER; break; @@ -2023,7 +2017,7 @@ mld_v2_process_group_timers(struct mld_ifinfo *mli, inm->in6m_state = MLD_REPORTING_MEMBER; in6m_clear_recorded(inm); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case MLD_REPORTING_MEMBER: case MLD_LEAVING_MEMBER: if (state_change_retransmit_timer_expired) { @@ -2034,7 +2028,7 @@ mld_v2_process_group_timers(struct mld_ifinfo *mli, * reset the timer. */ if (--inm->in6m_scrv > 0) { - inm->in6m_sctimer = uri_sec; + inm->in6m_sctimer = (uint16_t)uri_sec; state_change_timers_running6 = 1; /* caller will schedule timer */ } @@ -2184,11 +2178,11 @@ mld_v2_cancel_link_timers(struct mld_ifinfo *mli) SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm, in6m_nrele); MLI_UNLOCK(mli); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case MLD_G_QUERY_PENDING_MEMBER: case MLD_SG_QUERY_PENDING_MEMBER: in6m_clear_recorded(inm); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case MLD_REPORTING_MEMBER: inm->in6m_state = MLD_REPORTING_MEMBER; break; @@ -2235,7 +2229,7 @@ mld_v1_process_querier_timers(struct mld_ifinfo *mli) * Transmit an MLDv1 report immediately. */ static int -mld_v1_transmit_report(struct in6_multi *in6m, const int type) +mld_v1_transmit_report(struct in6_multi *in6m, const uint8_t type) { struct ifnet *ifp; struct in6_ifaddr *ia; @@ -2456,7 +2450,7 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, if ((ifp->if_flags & IFF_LOOPBACK) || (mli->mli_flags & MLIF_SILENT) || (IFNET_IS_CELLULAR(ifp) && - IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr)) || + (IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr) || IN6_IS_ADDR_MC_UNICAST_BASED_LINKLOCAL(&inm->in6m_addr))) || !mld_is_addr_reported(&inm->in6m_addr)) { MLD_PRINTF(("%s: not kicking state machine for silent group\n", __func__)); @@ -2543,13 +2537,13 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, * use this delay if sooner than the existing one. */ VERIFY(mli->mli_rv > 1); - inm->in6m_scrv = mli->mli_rv; + inm->in6m_scrv = (uint16_t)mli->mli_rv; if (delay) { if (inm->in6m_sctimer > 1) { inm->in6m_sctimer = - min(inm->in6m_sctimer, delay); + MIN(inm->in6m_sctimer, (uint16_t)delay); } else { - inm->in6m_sctimer = delay; + inm->in6m_sctimer = (uint16_t)delay; } } else { inm->in6m_sctimer = 1; @@ -2635,7 +2629,7 @@ mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli, * If record(s) were enqueued, start the state-change * report timer for this group. */ - inm->in6m_scrv = mli->mli_rv; + inm->in6m_scrv = (uint16_t)mli->mli_rv; inm->in6m_sctimer = 1; mtp->sct = 1; MLI_UNLOCK(mli); @@ -2704,7 +2698,7 @@ mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli, */ IF_DRAIN(&inm->in6m_scq); inm->in6m_timer = 0; - inm->in6m_scrv = mli->mli_rv; + inm->in6m_scrv = (uint16_t)mli->mli_rv; MLD_PRINTF(("%s: Leaving %s/%s with %d " "pending retransmissions.\n", __func__, ip6_sprintf(&inm->in6m_addr), @@ -2804,7 +2798,7 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, int minrec0len, m0srcs, msrcs, nbytes, off; int record_has_sources; int now; - int type; + uint8_t type; uint8_t mode; IN6M_LOCK_ASSERT_HELD(inm); @@ -2822,7 +2816,7 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, record_has_sources = 1; pmr = NULL; type = MLD_DO_NOTHING; - mode = inm->in6m_st[1].iss_fmode; + mode = (uint8_t)inm->in6m_st[1].iss_fmode; /* * If we did not transition out of ASM mode during t0->t1, @@ -3050,7 +3044,7 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, } MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__, msrcs)); - pmr->mr_numsrc = htons(msrcs); + pmr->mr_numsrc = htons((uint16_t)msrcs); nbytes += (msrcs * sizeof(struct in6_addr)); } @@ -3145,7 +3139,7 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, break; } } - pmr->mr_numsrc = htons(msrcs); + pmr->mr_numsrc = htons((uint16_t)msrcs); nbytes += (msrcs * sizeof(struct in6_addr)); MLD_PRINTF(("%s: enqueueing next packet\n", __func__)); @@ -3211,7 +3205,7 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) } ifp = inm->in6m_ifp; /* interface */ - mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */ + mode = (uint8_t)inm->in6m_st[1].iss_fmode; /* filter mode at t1 */ crt = REC_NONE; /* current group record type */ drt = REC_NONE; /* mask of completed group record types */ nrt = REC_NONE; /* record type for current node */ @@ -3376,7 +3370,7 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) } else if (crt == REC_BLOCK) { pmr->mr_type = MLD_BLOCK_OLD_SOURCES; } - pmr->mr_numsrc = htons(rsrcs); + pmr->mr_numsrc = htons((uint16_t)rsrcs); /* * Count the new group record, and enqueue this * packet if it wasn't already queued. @@ -3619,7 +3613,7 @@ mld_dispatch_packet(struct mbuf *m) return; } - im6o = ip6_allocmoptions(M_WAITOK); + im6o = ip6_allocmoptions(Z_WAITOK); if (im6o == NULL) { m_freem(m); return; @@ -3816,14 +3810,4 @@ mld_init(void) mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER; mld_po.ip6po_flags = IP6PO_DONTFRAG; LIST_INIT(&mli_head); - - mli_size = sizeof(struct mld_ifinfo); - mli_zone = zinit(mli_size, MLI_ZONE_MAX * mli_size, - 0, MLI_ZONE_NAME); - if (mli_zone == NULL) { - panic("%s: failed allocating %s", __func__, MLI_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(mli_zone, Z_EXPAND, TRUE); - zone_change(mli_zone, Z_CALLERACCT, FALSE); } diff --git a/bsd/netinet6/mld6_var.h b/bsd/netinet6/mld6_var.h index cc7886a02..553428e27 100644 --- a/bsd/netinet6/mld6_var.h +++ b/bsd/netinet6/mld6_var.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2017 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -239,7 +239,7 @@ struct mld_tparams { extern int mld_change_state(struct in6_multi *, struct mld_tparams *, const int); -extern struct mld_ifinfo *mld_domifattach(struct ifnet *, int); +extern struct mld_ifinfo *mld_domifattach(struct ifnet *, zalloc_flags_t); extern void mld_domifreattach(struct mld_ifinfo *); extern void mld_domifdetach(struct ifnet *); extern void mld_fasttimo(void); diff --git a/bsd/netinet6/nd6.c b/bsd/netinet6/nd6.c index e55d05571..8e7d18301 100644 --- a/bsd/netinet6/nd6.c +++ b/bsd/netinet6/nd6.c @@ -132,10 +132,7 @@ int nd6_debug = 1; int nd6_debug = 0; #endif -int nd6_optimistic_dad = - (ND6_OPTIMISTIC_DAD_LINKLOCAL | ND6_OPTIMISTIC_DAD_AUTOCONF | - ND6_OPTIMISTIC_DAD_TEMPORARY | ND6_OPTIMISTIC_DAD_DYNAMIC | - ND6_OPTIMISTIC_DAD_SECURED | ND6_OPTIMISTIC_DAD_MANUAL); +int nd6_optimistic_dad = ND6_OPTIMISTIC_DAD_DEFAULT; /* for debugging? */ static int nd6_inuse, nd6_allocated; @@ -176,9 +173,9 @@ static lck_grp_t *nd_if_lock_grp = NULL; static lck_attr_t *nd_if_lock_attr = NULL; /* Protected by nd6_mutex */ -struct nd_drhead nd_defrouter; +struct nd_drhead nd_defrouter_list; struct nd_prhead nd_prefix = { .lh_first = 0 }; - +struct nd_rtihead nd_rti_list; /* * nd6_timeout() is scheduled on a demand basis. nd6_timeout_run is used * to indicate whether or not a timeout has been scheduled. The rnh_lock @@ -208,7 +205,7 @@ static struct sockaddr_in6 all1_sa; static int regen_tmpaddr(struct in6_ifaddr *); extern lck_mtx_t *nd6_mutex; -static struct llinfo_nd6 *nd6_llinfo_alloc(int); +static struct llinfo_nd6 *nd6_llinfo_alloc(zalloc_flags_t); static void nd6_llinfo_free(void *); static void nd6_llinfo_purge(struct rtentry *); static void nd6_llinfo_get_ri(struct rtentry *, struct rt_reach_info *); @@ -222,6 +219,12 @@ static int nd6_is_new_addr_neighbor(struct sockaddr_in6 *, struct ifnet *); static int nd6_siocgdrlst(void *, int); static int nd6_siocgprlst(void *, int); +static void nd6_router_select_rti_entries(struct ifnet *); +static void nd6_purge_interface_default_routers(struct ifnet *); +static void nd6_purge_interface_rti_entries(struct ifnet *); +static void nd6_purge_interface_prefixes(struct ifnet *); +static void nd6_purge_interface_llinfo(struct ifnet *); + static int nd6_sysctl_drlist SYSCTL_HANDLER_ARGS; static int nd6_sysctl_prlist SYSCTL_HANDLER_ARGS; @@ -247,9 +250,8 @@ static int nd6_sysctl_prlist SYSCTL_HANDLER_ARGS; (_ln)->ln_flags |= ND6_LNF_IN_USE; \ } while (0) -static struct zone *llinfo_nd6_zone; -#define LLINFO_ND6_ZONE_MAX 256 /* maximum elements in zone */ -#define LLINFO_ND6_ZONE_NAME "llinfo_nd6" /* name for zone */ +static ZONE_DECLARE(llinfo_nd6_zone, "llinfo_nd6", + sizeof(struct llinfo_nd6), ZC_ZFREE_CLEARMEM); extern int tvtohz(struct timeval *); @@ -272,6 +274,14 @@ SYSCTL_INT(_net_inet6_ip6, OID_AUTO, maxchainsent, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxchainsent, 0, "use dlil_output_list"); +SYSCTL_DECL(_net_inet6_icmp6); +int nd6_process_rti = ND6_PROCESS_RTI_DEFAULT; + +SYSCTL_INT(_net_inet6_icmp6, OID_AUTO, nd6_process_rti, CTLFLAG_RW | CTLFLAG_LOCKED, + &nd6_process_rti, 0, + "Enable/disable processing of Route Information Option in the " + "IPv6 Router Advertisement."); + void nd6_init(void) { @@ -286,25 +296,15 @@ nd6_init(void) } /* initialization of the default router list */ - TAILQ_INIT(&nd_defrouter); + TAILQ_INIT(&nd_defrouter_list); + TAILQ_INIT(&nd_rti_list); nd_if_lock_grp_attr = lck_grp_attr_alloc_init(); nd_if_lock_grp = lck_grp_alloc_init("nd_if_lock", nd_if_lock_grp_attr); nd_if_lock_attr = lck_attr_alloc_init(); - llinfo_nd6_zone = zinit(sizeof(struct llinfo_nd6), - LLINFO_ND6_ZONE_MAX * sizeof(struct llinfo_nd6), 0, - LLINFO_ND6_ZONE_NAME); - if (llinfo_nd6_zone == NULL) { - panic("%s: failed allocating llinfo_nd6_zone", __func__); - } - - zone_change(llinfo_nd6_zone, Z_EXPAND, TRUE); - zone_change(llinfo_nd6_zone, Z_CALLERACCT, FALSE); - nd6_nbr_init(); nd6_rtr_init(); - nd6_prproxy_init(); nd6_init_done = 1; @@ -313,17 +313,9 @@ nd6_init(void) } static struct llinfo_nd6 * -nd6_llinfo_alloc(int how) +nd6_llinfo_alloc(zalloc_flags_t how) { - struct llinfo_nd6 *ln; - - ln = (how == M_WAITOK) ? zalloc(llinfo_nd6_zone) : - zalloc_noblock(llinfo_nd6_zone); - if (ln != NULL) { - bzero(ln, sizeof(*ln)); - } - - return ln; + return zalloc_flags(llinfo_nd6_zone, how | Z_ZERO); } static void @@ -733,8 +725,20 @@ nd6_options(union nd_opts *ndopts) break; case ND_OPT_RDNSS: case ND_OPT_DNSSL: + case ND_OPT_CAPTIVE_PORTAL: /* ignore */ break; + case ND_OPT_ROUTE_INFO: + if (nd6_process_rti) { + if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) { + ndopts->nd_opt_array[nd_opt->nd_opt_type] + = nd_opt; + } + ndopts->nd_opts_rti_end = + (struct nd_opt_route_info *)nd_opt; + break; + } + OS_FALLTHROUGH; default: /* * Unknown options must be silently ignored, @@ -770,44 +774,16 @@ struct nd6svc_arg { uint32_t found; }; -/* - * ND6 service routine to expire default route list and prefix list - */ + static void -nd6_service(void *arg) +nd6_service_neighbor_cache(struct nd6svc_arg *ap, uint64_t timenow) { - struct nd6svc_arg *ap = arg; struct llinfo_nd6 *ln; - struct nd_defrouter *dr = NULL; - struct nd_prefix *pr = NULL; struct ifnet *ifp = NULL; - struct in6_ifaddr *ia6, *nia6; - uint64_t timenow; boolean_t send_nc_failure_kev = FALSE; - struct nd_drhead nd_defrouter_tmp; - struct nd_defrouter *ndr = NULL; struct radix_node_head *rnh = rt_tables[AF_INET6]; LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); - /* - * Since we may drop rnh_lock and nd6_mutex below, we want - * to run this entire operation single threaded. - */ - while (nd6_service_busy) { - nd6log2(debug, "%s: %s is blocked by %d waiters\n", - __func__, ap->draining ? "drainer" : "timer", - nd6_service_waiters); - nd6_service_waiters++; - (void) msleep(nd6_service_wc, rnh_lock, (PZERO - 1), - __func__, NULL); - LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); - } - - /* We are busy now; tell everyone else to go away */ - nd6_service_busy = TRUE; - - net_update_uptime(); - timenow = net_uptime(); again: /* * send_nc_failure_kev gets set when default router's IPv6 address @@ -923,7 +899,7 @@ again: * entries without oustanding route refcnt. */ if (ln->ln_state > ND6_LLINFO_INCOMPLETE) { - ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE); + ND6_CACHE_STATE_TRANSITION(ln, (short)ND6_LLINFO_STALE); } else { ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_PURGE); } @@ -1088,7 +1064,7 @@ again: lck_mtx_lock(rnh_lock); goto again; } - ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE); /* XXX */ + ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE); /* XXX */ ln_setexpire(ln, timenow + nd6_gctimer); RT_UNLOCK(rt); ap->aging_lazy++; @@ -1165,19 +1141,29 @@ again: RT_UNLOCK(rt); ln = next; } - lck_mtx_unlock(rnh_lock); +} +static void +nd6_service_expired_default_router(struct nd6svc_arg *ap, uint64_t timenow) +{ + struct nd_defrouter *dr = NULL; + struct nd_defrouter *ndr = NULL; + struct nd_drhead nd_defrouter_tmp; /* expire default router list */ TAILQ_INIT(&nd_defrouter_tmp); + LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(nd6_mutex); - TAILQ_FOREACH_SAFE(dr, &nd_defrouter, dr_entry, ndr) { + + TAILQ_FOREACH_SAFE(dr, &nd_defrouter_list, dr_entry, ndr) { ap->found++; if (dr->expire != 0 && dr->expire < timenow) { VERIFY(dr->ifp != NULL); in6_ifstat_inc(dr->ifp, ifs6_defrtr_expiry_cnt); - in6_event_enqueue_nwk_wq_entry(IN6_NDP_RTR_EXPIRY, dr->ifp, - &dr->rtaddr, dr->rtlifetime); + if ((dr->stateflags & NDDRF_INELIGIBLE) == 0) { + in6_event_enqueue_nwk_wq_entry(IN6_NDP_RTR_EXPIRY, dr->ifp, + &dr->rtaddr, dr->rtlifetime); + } if (dr->ifp != NULL && dr->ifp->if_type == IFT_CELLULAR) { /* @@ -1218,7 +1204,7 @@ again: * router list. * Remove the reference after calling defrtrlist_del */ - TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); + TAILQ_REMOVE(&nd_defrouter_list, dr, dr_entry); TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry); } } else { @@ -1243,10 +1229,11 @@ again: */ TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, ndr) { TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry); - defrtrlist_del(dr); + defrtrlist_del(dr, NULL); NDDR_REMREF(dr); /* remove list reference */ } + /* XXX TBD: Also iterate through RTI router lists */ /* * Also check if default router selection needs to be triggered * for default interface, to avoid an issue with co-existence of @@ -1254,17 +1241,195 @@ again: * discovery/selection. */ if (trigger_v6_defrtr_select) { - defrouter_select(NULL); + defrouter_select(NULL, NULL); trigger_v6_defrtr_select = FALSE; } lck_mtx_unlock(nd6_mutex); +} + +static void +nd6_service_expired_route_info(struct nd6svc_arg *ap, uint64_t timenow) +{ + struct nd_route_info *rti = NULL; + struct nd_route_info *rti_next = NULL; + + LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); + lck_mtx_lock(nd6_mutex); + nd6_rti_list_wait(__func__); + + TAILQ_FOREACH_SAFE(rti, &nd_rti_list, nd_rti_entry, rti_next) { + struct nd_defrouter *dr = NULL; + struct nd_defrouter *ndr = NULL; + struct nd_route_info rti_tmp = {}; + + rti_tmp.nd_rti_prefix = rti->nd_rti_prefix; + rti_tmp.nd_rti_prefixlen = rti->nd_rti_prefixlen; + TAILQ_INIT(&rti_tmp.nd_rti_router_list); + + TAILQ_FOREACH_SAFE(dr, &rti->nd_rti_router_list, dr_entry, ndr) { + ap->found++; + if (dr->expire != 0 && dr->expire < timenow) { + VERIFY(dr->ifp != NULL); + if (dr->ifp != NULL && + dr->ifp->if_type == IFT_CELLULAR) { + /* + * Don't expire these routes over cellular. + * XXX Should we change this for non default routes? + */ + dr->expire += dr->rtlifetime; + nd6log2(debug, + "%s: Refreshing expired default router entry " + "%s for interface %s\n", __func__, + ip6_sprintf(&dr->rtaddr), if_name(dr->ifp)); + } else { + ap->killed++; + /* + * Remove the entry from rti entry's router list + * and add it to the temp list. + * Remove the reference after calling defrtrlist_del + */ + TAILQ_REMOVE(&rti->nd_rti_router_list, dr, dr_entry); + TAILQ_INSERT_TAIL(&rti_tmp.nd_rti_router_list, dr, dr_entry); + } + } else { + if (dr->expire == 0 || (dr->stateflags & NDDRF_STATIC)) { + ap->sticky++; + } else { + ap->aging_lazy++; + } + } + } + + /* + * Keep the following separate from the above + * iteration of nd_defrouter because it's not safe + * to call defrtrlist_del while iterating global default + * router list. Global list has to be traversed + * while holding nd6_mutex throughout. + * + * The following call to defrtrlist_del should be + * safe as we are iterating a local list of + * default routers. + */ + TAILQ_FOREACH_SAFE(dr, &rti_tmp.nd_rti_router_list, dr_entry, ndr) { + TAILQ_REMOVE(&rti_tmp.nd_rti_router_list, dr, dr_entry); + defrtrlist_del(dr, &rti->nd_rti_router_list); + NDDR_REMREF(dr); /* remove list reference */ + } + + /* + * The above may have removed an entry from default router list. + * If it did and the list is now empty, remove the rti as well. + */ + if (TAILQ_EMPTY(&rti->nd_rti_router_list)) { + TAILQ_REMOVE(&nd_rti_list, rti, nd_rti_entry); + ndrti_free(rti); + } + } + + LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); + nd6_rti_list_signal_done(); + lck_mtx_unlock(nd6_mutex); +} + + +/* + * @function nd6_handle_duplicated_ip6_addr + * + * @brief + * Handle a duplicated IPv6 secured non-termporary address + * + * @discussion + * If the collision count hasn't been exceeded, removes the old + * conflicting IPv6 address, increments the collision count, + * and allocates a new address. + * + * Returns TRUE if the old address was removed, and the locks + * (in6_ifaddr_rwlock, ia6->ia_ifa) were unlocked. + */ +static boolean_t +nd6_handle_duplicated_ip6_addr(struct in6_ifaddr *ia6) +{ + uint8_t collision_count; + int error = 0; + struct in6_ifaddr *new_ia6; + struct nd_prefix *pr; + struct ifnet *ifp; + + LCK_RW_ASSERT(&in6_ifaddr_rwlock, LCK_RW_ASSERT_EXCLUSIVE); + IFA_LOCK_ASSERT_HELD(&ia6->ia_ifa); + + /* don't retry too many times */ + collision_count = ia6->ia6_cga_collision_count; + if (collision_count >= ip6_cga_conflict_retries) { + return FALSE; + } + + /* need the prefix to allocate a new address */ + pr = ia6->ia6_ndpr; + if (pr == NULL) { + return FALSE; + } + NDPR_ADDREF(pr); + ifp = pr->ndpr_ifp; + log(LOG_DEBUG, + "%s: %s duplicated (collision count %d)\n", + ifp->if_xname, ip6_sprintf(&ia6->ia_addr.sin6_addr), + collision_count); + + /* remove the old address */ + IFA_UNLOCK(&ia6->ia_ifa); + lck_rw_done(&in6_ifaddr_rwlock); + in6_purgeaddr(&ia6->ia_ifa); + + /* allocate a new address with new collision count */ + collision_count++; + new_ia6 = in6_pfx_newpersistaddr(pr, 1, &error, FALSE, collision_count); + if (new_ia6 != NULL) { + log(LOG_DEBUG, + "%s: %s new (collision count %d)\n", + ifp->if_xname, ip6_sprintf(&new_ia6->ia_addr.sin6_addr), + collision_count); + IFA_LOCK(&new_ia6->ia_ifa); + NDPR_LOCK(pr); + new_ia6->ia6_ndpr = pr; + NDPR_ADDREF(pr); /* for addr reference */ + pr->ndpr_addrcnt++; + VERIFY(pr->ndpr_addrcnt != 0); + NDPR_UNLOCK(pr); + IFA_UNLOCK(&new_ia6->ia_ifa); + IFA_REMREF(&new_ia6->ia_ifa); + } else { + log(LOG_ERR, "%s: in6_pfx_newpersistaddr failed %d\n", + __func__, error); + } + + /* release extra prefix reference */ + NDPR_REMREF(pr); + return TRUE; +} + +static boolean_t +secured_address_is_duplicated(int flags) +{ +#define _IN6_IFF_DUPLICATED_AUTOCONF_SECURED \ + (IN6_IFF_DUPLICATED | IN6_IFF_AUTOCONF | IN6_IFF_SECURED) + return (flags & _IN6_IFF_DUPLICATED_AUTOCONF_SECURED) == + _IN6_IFF_DUPLICATED_AUTOCONF_SECURED; +} +static void +nd6_service_ip6_addr(struct nd6svc_arg *ap, uint64_t timenow) +{ + struct in6_ifaddr *ia6 = NULL; + struct in6_ifaddr *nia6 = NULL; /* * expire interface addresses. * in the past the loop was inside prefix expiry processing. - * However, from a stricter speci-confrmance standpoint, we should + * However, from a stricter spec-conformance standpoint, we should * rather separate address lifetimes and prefix lifetimes. */ + addrloop: lck_rw_lock_exclusive(&in6_ifaddr_rwlock); @@ -1279,6 +1444,20 @@ addrloop: * away since we drop in6_ifaddr_rwlock below. */ IFA_ADDREF_LOCKED(&ia6->ia_ifa); + + /* check for duplicated secured address */ + if (secured_address_is_duplicated(ia6->ia6_flags) && + nd6_handle_duplicated_ip6_addr(ia6)) { + /* + * nd6_handle_duplicated_ip6_addr() unlocked + * (in6_ifaddr_rwlock, ia6->ia_ifa) already. + * Still need to release extra reference on + * ia6->ia_ifa taken above. + */ + IFA_REMREF(&ia6->ia_ifa); + goto addrloop; + } + /* check address lifetime */ if (IFA6_IS_INVALID(ia6, timenow)) { /* @@ -1401,7 +1580,14 @@ addrloop: IFA_REMREF(&ia6->ia_ifa); } lck_rw_done(&in6_ifaddr_rwlock); +} +static void +nd6_service_expired_prefix(struct nd6svc_arg *ap, uint64_t timenow) +{ + struct nd_prefix *pr = NULL; + + LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(nd6_mutex); /* expire prefix list */ pr = nd_prefix.lh_first; @@ -1426,7 +1612,7 @@ addrloop: * separate. NEVER perform in6_purgeaddr here. */ pr->ndpr_stateflags |= NDPRF_PROCESSED_SERVICE; - NDPR_ADDREF_LOCKED(pr); + NDPR_ADDREF(pr); prelist_remove(pr); NDPR_UNLOCK(pr); @@ -1456,6 +1642,57 @@ addrloop: NDPR_UNLOCK(pr); } lck_mtx_unlock(nd6_mutex); +} + + +/* + * ND6 service routine to expire default route list and prefix list + */ +static void +nd6_service(void *arg) +{ + struct nd6svc_arg *ap = arg; + uint64_t timenow; + + LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); + /* + * Since we may drop rnh_lock and nd6_mutex below, we want + * to run this entire operation single threaded. + */ + while (nd6_service_busy) { + nd6log2(debug, "%s: %s is blocked by %d waiters\n", + __func__, ap->draining ? "drainer" : "timer", + nd6_service_waiters); + nd6_service_waiters++; + (void) msleep(nd6_service_wc, rnh_lock, (PZERO - 1), + __func__, NULL); + LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); + } + + /* We are busy now; tell everyone else to go away */ + nd6_service_busy = TRUE; + net_update_uptime(); + timenow = net_uptime(); + + /* Iterate and service neighbor cache entries */ + nd6_service_neighbor_cache(ap, timenow); + + /* + * There is lock ordering requirement and rnh_lock + * has to be released before acquiring nd6_mutex. + */ + lck_mtx_unlock(rnh_lock); + + /* Iterate and service expired default router */ + nd6_service_expired_default_router(ap, timenow); + /* Iterate and service expired route information entries */ + nd6_service_expired_route_info(ap, timenow); + + /* Iterate and service expired/duplicated IPv6 address */ + nd6_service_ip6_addr(ap, timenow); + + /* Iterate and service expired IPv6 prefixes */ + nd6_service_expired_prefix(ap, timenow); lck_mtx_lock(rnh_lock); /* We're done; let others enter */ @@ -1466,7 +1703,6 @@ addrloop: } } - static int nd6_need_draining = 0; void @@ -1733,24 +1969,18 @@ regen_tmpaddr(struct in6_ifaddr *ia6) return -1; } -/* - * Nuke neighbor cache/prefix/default router management table, right before - * ifp goes away. - */ -void -nd6_purge(struct ifnet *ifp) +static void +nd6_purge_interface_default_routers(struct ifnet *ifp) { - struct llinfo_nd6 *ln; - struct nd_defrouter *dr, *ndr; - struct nd_prefix *pr, *npr; - boolean_t removed; - struct nd_drhead nd_defrouter_tmp; + struct nd_defrouter *dr = NULL; + struct nd_defrouter *ndr = NULL; + struct nd_drhead nd_defrouter_tmp = {}; TAILQ_INIT(&nd_defrouter_tmp); - /* Nuke default router list entries toward ifp */ - lck_mtx_lock(nd6_mutex); - TAILQ_FOREACH_SAFE(dr, &nd_defrouter, dr_entry, ndr) { + LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); + + TAILQ_FOREACH_SAFE(dr, &nd_defrouter_list, dr_entry, ndr) { if (dr->ifp != ifp) { continue; } @@ -1775,7 +2005,7 @@ nd6_purge(struct ifnet *ifp) * For that reason, installed ones must be inserted * at the tail and uninstalled ones at the head */ - TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); + TAILQ_REMOVE(&nd_defrouter_list, dr, dr_entry); if (dr->stateflags & NDDRF_INSTALLED) { TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry); @@ -1795,12 +2025,21 @@ nd6_purge(struct ifnet *ifp) */ TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, ndr) { TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry); - defrtrlist_del(dr); + defrtrlist_del(dr, NULL); NDDR_REMREF(dr); /* remove list reference */ } +} + +static void +nd6_purge_interface_prefixes(struct ifnet *ifp) +{ + boolean_t removed = FALSE; + struct nd_prefix *pr = NULL; + struct nd_prefix *npr = NULL; + + LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); /* Nuke prefix list entries toward ifp */ - removed = FALSE; for (pr = nd_prefix.lh_first; pr; pr = npr) { NDPR_LOCK(pr); npr = pr->ndpr_next; @@ -1822,7 +2061,7 @@ nd6_purge(struct ifnet *ifp) * by itself. * (jinmei@kame.net 20010129) */ - NDPR_ADDREF_LOCKED(pr); + NDPR_ADDREF(pr); prelist_remove(pr); NDPR_UNLOCK(pr); NDPR_REMREF(pr); @@ -1835,25 +2074,107 @@ nd6_purge(struct ifnet *ifp) if (removed) { pfxlist_onlink_check(); } - lck_mtx_unlock(nd6_mutex); +} - /* cancel default outgoing interface setting */ - if (nd6_defifindex == ifp->if_index) { - nd6_setdefaultiface(0); +static void +nd6_router_select_rti_entries(struct ifnet *ifp) +{ + struct nd_route_info *rti = NULL; + struct nd_route_info *rti_next = NULL; + + nd6_rti_list_wait(__func__); + + TAILQ_FOREACH_SAFE(rti, &nd_rti_list, nd_rti_entry, rti_next) { + defrouter_select(ifp, &rti->nd_rti_router_list); } - /* - * Perform default router selection even when we are a router, - * if Scoped Routing is enabled. - */ - lck_mtx_lock(nd6_mutex); - /* refresh default router list */ - defrouter_select(ifp); - lck_mtx_unlock(nd6_mutex); + nd6_rti_list_signal_done(); +} - /* - * Nuke neighbor cache entries for the ifp. - * Note that rt->rt_ifp may not be the same as ifp, +static void +nd6_purge_interface_rti_entries(struct ifnet *ifp) +{ + struct nd_route_info *rti = NULL; + struct nd_route_info *rti_next = NULL; + + nd6_rti_list_wait(__func__); + + TAILQ_FOREACH_SAFE(rti, &nd_rti_list, nd_rti_entry, rti_next) { + struct nd_route_info rti_tmp = {}; + struct nd_defrouter *dr = NULL; + struct nd_defrouter *ndr = NULL; + + rti_tmp.nd_rti_prefix = rti->nd_rti_prefix; + rti_tmp.nd_rti_prefixlen = rti->nd_rti_prefixlen; + TAILQ_INIT(&rti_tmp.nd_rti_router_list); + + TAILQ_FOREACH_SAFE(dr, &rti->nd_rti_router_list, dr_entry, ndr) { + /* + * If ifp is provided, skip the entries that don't match. + * Else it is treated as a purge. + */ + if (ifp != NULL && dr->ifp != ifp) { + continue; + } + + /* + * Remove the entry from rti's router list + * and add it to the temp list. + * Remove the reference after calling defrtrlist_del. + * + * The uninstalled entries have to be iterated first + * when we call defrtrlist_del. + * This is to ensure that we don't end up calling + * router selection when there are other + * uninstalled candidate default routers on + * the interface. + * If we don't respect that order, we may end + * up missing out on some entries. + * + * For that reason, installed ones must be inserted + * at the tail and uninstalled ones at the head + */ + + TAILQ_REMOVE(&rti->nd_rti_router_list, dr, dr_entry); + if (dr->stateflags & NDDRF_INSTALLED) { + TAILQ_INSERT_TAIL(&rti_tmp.nd_rti_router_list, dr, dr_entry); + } else { + TAILQ_INSERT_HEAD(&rti_tmp.nd_rti_router_list, dr, dr_entry); + } + } + + /* + * The following call to defrtrlist_del should be + * safe as we are iterating a local list of + * routers. + * + * We don't really need nd6_mutex here but keeping + * it as it is to avoid changing assertios held in + * the functions in the call-path. + */ + TAILQ_FOREACH_SAFE(dr, &rti_tmp.nd_rti_router_list, dr_entry, ndr) { + TAILQ_REMOVE(&rti_tmp.nd_rti_router_list, dr, dr_entry); + defrtrlist_del(dr, &rti->nd_rti_router_list); + NDDR_REMREF(dr); /* remove list reference */ + } + /* + * The above may have removed an entry from default router list. + * If it did and the list is now empty, remove the rti as well. + */ + if (TAILQ_EMPTY(&rti->nd_rti_router_list)) { + TAILQ_REMOVE(&nd_rti_list, rti, nd_rti_entry); + ndrti_free(rti); + } + } + + nd6_rti_list_signal_done(); +} + +static void +nd6_purge_interface_llinfo(struct ifnet *ifp) +{ + struct llinfo_nd6 *ln = NULL; + /* Note that rt->rt_ifp may not be the same as ifp, * due to KAME goto ours hack. See RTM_RESOLVE case in * nd6_rtrequest(), and ip6_input(). */ @@ -1894,6 +2215,47 @@ again: lck_mtx_unlock(rnh_lock); } +/* + * Nuke neighbor cache/prefix/default router management table, right before + * ifp goes away. + */ +void +nd6_purge(struct ifnet *ifp) +{ + LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); + lck_mtx_lock(nd6_mutex); + + /* Nuke default router list entries toward ifp */ + nd6_purge_interface_default_routers(ifp); + + /* Nuke prefix list entries toward ifp */ + nd6_purge_interface_prefixes(ifp); + + /* Nuke route info option entries toward ifp */ + nd6_purge_interface_rti_entries(ifp); + + lck_mtx_unlock(nd6_mutex); + + /* cancel default outgoing interface setting */ + if (nd6_defifindex == ifp->if_index) { + nd6_setdefaultiface(0); + } + + /* + * Perform default router selection even when we are a router, + * if Scoped Routing is enabled. + * XXX ?Should really not be needed since when defrouter_select + * was changed to work on interface. + */ + lck_mtx_lock(nd6_mutex); + /* refresh default router list */ + defrouter_select(ifp, NULL); + lck_mtx_unlock(nd6_mutex); + + /* Nuke neighbor cache entries for the ifp. */ + nd6_purge_interface_llinfo(ifp); +} + /* * Upon success, the returned route will be locked and the caller is * responsible for releasing the reference and doing RT_UNLOCK(rt). @@ -2200,7 +2562,8 @@ nd6_free(struct rtentry *rt) * not harmful, it was not really necessary. Perform default router * selection even when we are a router, if Scoped Routing is enabled. */ - dr = defrouter_lookup(&SIN6(rt_key(rt))->sin6_addr, rt->rt_ifp); + /* XXX TDB Handle lists in route information option as well */ + dr = defrouter_lookup(NULL, &SIN6(rt_key(rt))->sin6_addr, rt->rt_ifp); if ((ln && ln->ln_router) || dr) { /* @@ -2246,7 +2609,10 @@ nd6_free(struct rtentry *rt) /* * refresh default router list */ - defrouter_select(rt->rt_ifp); + defrouter_select(rt->rt_ifp, NULL); + + /* Loop through all RTI's as well and trigger router selection. */ + nd6_router_select_rti_entries(rt->rt_ifp); } RT_LOCK_ASSERT_NOTHELD(rt); lck_mtx_unlock(nd6_mutex); @@ -2382,25 +2748,25 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) break; } } - /* - * In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here. - * We don't do that here since llinfo is not ready yet. - * - * There are also couple of other things to be discussed: - * - unsolicited NA code needs improvement beforehand - * - RFC4861 says we MAY send multicast unsolicited NA - * (7.2.6 paragraph 4), however, it also says that we - * SHOULD provide a mechanism to prevent multicast NA storm. - * we don't have anything like it right now. - * note that the mechanism needs a mutual agreement - * between proxies, which means that we need to implement - * a new protocol, or a new kludge. - * - from RFC4861 6.2.4, host MUST NOT send an unsolicited RA. - * we need to check ip6forwarding before sending it. - * (or should we allow proxy ND configuration only for - * routers? there's no mention about proxy ND from hosts) - */ - /* FALLTHROUGH */ + /* + * In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here. + * We don't do that here since llinfo is not ready yet. + * + * There are also couple of other things to be discussed: + * - unsolicited NA code needs improvement beforehand + * - RFC4861 says we MAY send multicast unsolicited NA + * (7.2.6 paragraph 4), however, it also says that we + * SHOULD provide a mechanism to prevent multicast NA storm. + * we don't have anything like it right now. + * note that the mechanism needs a mutual agreement + * between proxies, which means that we need to implement + * a new protocol, or a new kludge. + * - from RFC4861 6.2.4, host MUST NOT send an unsolicited RA. + * we need to check ip6forwarding before sending it. + * (or should we allow proxy ND configuration only for + * routers? there's no mention about proxy ND from hosts) + */ + OS_FALLTHROUGH; case RTM_RESOLVE: if (!(ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK))) { /* @@ -2431,10 +2797,7 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) * Case 2: This route may come from cloning, or a manual route * add with a LL address. */ - rt->rt_llinfo = ln = nd6_llinfo_alloc(M_WAITOK); - if (ln == NULL) { - break; - } + rt->rt_llinfo = ln = nd6_llinfo_alloc(Z_WAITOK); nd6_allocated++; rt->rt_llinfo_get_ri = nd6_llinfo_get_ri; @@ -2654,7 +3017,7 @@ nd6_siocgdrlst(void *data, int data_is_64) LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); - dr = TAILQ_FIRST(&nd_defrouter); + dr = TAILQ_FIRST(&nd_defrouter_list); /* XXX Handle mapped defrouter entries */ /* For 64-bit process */ @@ -2682,8 +3045,8 @@ nd6_siocgdrlst(void *data, int data_is_64) ip6_sprintf(&drl_64->defrouter[i].rtaddr)); } drl_64->defrouter[i].flags = dr->flags; - drl_64->defrouter[i].rtlifetime = dr->rtlifetime; - drl_64->defrouter[i].expire = nddr_getexpire(dr); + drl_64->defrouter[i].rtlifetime = (u_short)dr->rtlifetime; + drl_64->defrouter[i].expire = (u_long)nddr_getexpire(dr); drl_64->defrouter[i].if_index = dr->ifp->if_index; i++; dr = TAILQ_NEXT(dr, dr_entry); @@ -2714,8 +3077,8 @@ nd6_siocgdrlst(void *data, int data_is_64) ip6_sprintf(&drl_32->defrouter[i].rtaddr)); } drl_32->defrouter[i].flags = dr->flags; - drl_32->defrouter[i].rtlifetime = dr->rtlifetime; - drl_32->defrouter[i].expire = nddr_getexpire(dr); + drl_32->defrouter[i].rtlifetime = (u_short)dr->rtlifetime; + drl_32->defrouter[i].expire = (u_int32_t)nddr_getexpire(dr); drl_32->defrouter[i].if_index = dr->ifp->if_index; i++; dr = TAILQ_NEXT(dr, dr_entry); @@ -2766,7 +3129,7 @@ nd6_siocgprlst(void *data, int data_is_64) prl_64->prefix[i].vltime = pr->ndpr_vltime; prl_64->prefix[i].pltime = pr->ndpr_pltime; prl_64->prefix[i].if_index = pr->ndpr_ifp->if_index; - prl_64->prefix[i].expire = ndpr_getexpire(pr); + prl_64->prefix[i].expire = (u_long)ndpr_getexpire(pr); pfr = pr->ndpr_advrtrs.lh_first; j = 0; @@ -2789,7 +3152,8 @@ nd6_siocgprlst(void *data, int data_is_64) j++; pfr = pfr->pfr_next; } - prl_64->prefix[i].advrtrs = j; + ASSERT(j <= USHRT_MAX); + prl_64->prefix[i].advrtrs = (u_short)j; prl_64->prefix[i].origin = PR_ORIG_RA; NDPR_UNLOCK(pr); @@ -2822,7 +3186,7 @@ nd6_siocgprlst(void *data, int data_is_64) prl_32->prefix[i].vltime = pr->ndpr_vltime; prl_32->prefix[i].pltime = pr->ndpr_pltime; prl_32->prefix[i].if_index = pr->ndpr_ifp->if_index; - prl_32->prefix[i].expire = ndpr_getexpire(pr); + prl_32->prefix[i].expire = (u_int32_t)ndpr_getexpire(pr); pfr = pr->ndpr_advrtrs.lh_first; j = 0; @@ -2845,7 +3209,8 @@ nd6_siocgprlst(void *data, int data_is_64) j++; pfr = pfr->pfr_next; } - prl_32->prefix[i].advrtrs = j; + ASSERT(j <= USHRT_MAX); + prl_32->prefix[i].advrtrs = (u_short)j; prl_32->prefix[i].origin = PR_ORIG_RA; NDPR_UNLOCK(pr); @@ -2918,7 +3283,12 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) bcopy(&ndi->recalctm, &ondi->ndi.recalctm, sizeof(int)); ondi->ndi.chlim = ndi->chlim; - ondi->ndi.receivedra = 0; + /* + * The below truncation is fine as we mostly use it for + * debugging purpose. + */ + ondi->ndi.receivedra = (uint8_t)ndi->ndefrouters; + ondi->ndi.collision_count = (uint8_t)ndi->cga_collision_count; lck_mtx_unlock(&ndi->lock); break; } @@ -2959,10 +3329,13 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) /* * xxx sumikawa: should not delete route if default * route equals to the top of default router list + * + * XXX TODO: Needs to be done for RTI as well + * Is very specific flush command with ndp for default routers. */ lck_mtx_lock(nd6_mutex); defrouter_reset(); - defrouter_select(ifp); + defrouter_select(ifp, NULL); lck_mtx_unlock(nd6_mutex); /* xxx sumikawa: flush prefix list */ break; @@ -2988,7 +3361,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) continue; } /* do we really have to remove addresses as well? */ - NDPR_ADDREF_LOCKED(pr); + NDPR_ADDREF(pr); NDPR_UNLOCK(pr); lck_rw_lock_exclusive(&in6_ifaddr_rwlock); bool from_begining = true; @@ -3047,7 +3420,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) TAILQ_INIT(&nd_defrouter_tmp); lck_mtx_lock(nd6_mutex); - if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) { + if ((dr = TAILQ_FIRST(&nd_defrouter_list)) != NULL) { /* * The first entry of the list may be stored in * the routing table, so we'll delete it later. @@ -3064,15 +3437,15 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) * router list. * Remove the reference after calling defrtrlist_de */ - TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); + TAILQ_REMOVE(&nd_defrouter_list, dr, dr_entry); TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry); } } - dr = TAILQ_FIRST(&nd_defrouter); + dr = TAILQ_FIRST(&nd_defrouter_list); if (ifp == lo_ifp || dr->ifp == ifp) { - TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); + TAILQ_REMOVE(&nd_defrouter_list, dr, dr_entry); TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry); } } @@ -3090,9 +3463,13 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) */ TAILQ_FOREACH_SAFE(dr, &nd_defrouter_tmp, dr_entry, next) { TAILQ_REMOVE(&nd_defrouter_tmp, dr, dr_entry); - defrtrlist_del(dr); + defrtrlist_del(dr, NULL); NDDR_REMREF(dr); /* remove list reference */ } + + /* For now flush RTI routes here as well to avoid any regressions */ + nd6_purge_interface_rti_entries((ifp == lo_ifp) ? NULL : ifp); + lck_mtx_unlock(nd6_mutex); break; } @@ -3128,7 +3505,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) nbi_32.state = ln->ln_state; nbi_32.asked = ln->ln_asked; nbi_32.isrouter = ln->ln_router; - nbi_32.expire = ln_getexpire(ln); + nbi_32.expire = (int)ln_getexpire(ln); RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); bcopy(&nbi_32, data, sizeof(nbi_32)); @@ -3166,7 +3543,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) nbi_64.state = ln->ln_state; nbi_64.asked = ln->ln_asked; nbi_64.isrouter = ln->ln_router; - nbi_64.expire = ln_getexpire(ln); + nbi_64.expire = (int)ln_getexpire(ln); RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); bcopy(&nbi_64, data, sizeof(nbi_64)); @@ -3210,36 +3587,61 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) return error; /* NOTREACHED */ } - case SIOCGIFCGAPREP_IN6: - case SIOCSIFCGAPREP_IN6: - { - struct in6_cgareq *p_cgareq = - (struct in6_cgareq *)(void *)data; - struct nd_ifinfo *ndi = ND_IFINFO(ifp); - - struct in6_cga_modifier *req_cga_mod = - &(p_cgareq->cgar_cgaprep.cga_modifier); - struct in6_cga_modifier *ndi_cga_mod = NULL; + case SIOCGIFCGAPREP_IN6_32: + case SIOCGIFCGAPREP_IN6_64: { + /* get CGA parameters */ + union { + struct in6_cgareq_32 *cga32; + struct in6_cgareq_64 *cga64; + void *data; + } cgareq_u; + struct nd_ifinfo *ndi; + struct in6_cga_modifier *ndi_cga_mod; + struct in6_cga_modifier *req_cga_mod; + ndi = ND_IFINFO(ifp); if ((NULL == ndi) || !ndi->initialized) { error = EINVAL; break; } - + cgareq_u.data = data; + req_cga_mod = (cmd == SIOCGIFCGAPREP_IN6_64) + ? &(cgareq_u.cga64->cgar_cgaprep.cga_modifier) + : &(cgareq_u.cga32->cgar_cgaprep.cga_modifier); lck_mtx_lock(&ndi->lock); ndi_cga_mod = &(ndi->local_cga_modifier); + bcopy(ndi_cga_mod, req_cga_mod, sizeof(*req_cga_mod)); + lck_mtx_unlock(&ndi->lock); + break; + } + case SIOCSIFCGAPREP_IN6_32: + case SIOCSIFCGAPREP_IN6_64: + { + /* set CGA parameters */ + struct in6_cgareq cgareq; + int is64; + struct nd_ifinfo *ndi; + struct in6_cga_modifier *ndi_cga_mod; + struct in6_cga_modifier *req_cga_mod; - if (cmd == SIOCSIFCGAPREP_IN6) { - bcopy(req_cga_mod, ndi_cga_mod, sizeof(*ndi_cga_mod)); - ndi->cga_initialized = TRUE; - } else { - bcopy(ndi_cga_mod, req_cga_mod, sizeof(*req_cga_mod)); + ndi = ND_IFINFO(ifp); + if ((NULL == ndi) || !ndi->initialized) { + error = EINVAL; + break; } - + is64 = (cmd == SIOCSIFCGAPREP_IN6_64); + in6_cgareq_copy_from_user(data, is64, &cgareq); + req_cga_mod = &cgareq.cgar_cgaprep.cga_modifier; + lck_mtx_lock(&ndi->lock); + ndi_cga_mod = &(ndi->local_cga_modifier); + bcopy(req_cga_mod, ndi_cga_mod, sizeof(*ndi_cga_mod)); + ndi->cga_initialized = TRUE; + ndi->cga_collision_count = 0; lck_mtx_unlock(&ndi->lock); - return error; - /* NOTREACHED */ + break; } + default: + break; } return error; } @@ -3260,7 +3662,7 @@ nd6_cache_lladdr(struct ifnet *ifp, struct in6_addr *from, char *lladdr, int do_update; int olladdr; int llchange; - int newstate = 0; + short newstate = 0; uint64_t timenow; boolean_t sched_timeout = FALSE; struct nd_ifinfo *ndi = NULL; @@ -3536,10 +3938,15 @@ fail: * if Scoped Routing is enabled. */ if (do_update && ln->ln_router) { + /* + * XXX TODO: This should also be iterated over router list + * for route information option's router lists as well. + */ RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); lck_mtx_lock(nd6_mutex); - defrouter_select(ifp); + defrouter_select(ifp, NULL); + nd6_router_select_rti_entries(ifp); lck_mtx_unlock(nd6_mutex); } else { RT_REMREF_LOCKED(rt); @@ -4288,7 +4695,7 @@ nd6_lookup_ipv6(ifnet_t ifp, const struct sockaddr_in6 *ip6_dest, } if (route->rt_gateway->sa_family != AF_LINK) { - printf("%s: route %s on %s%d gateway address not AF_LINK\n", + nd6log0(error, "%s: route %s on %s%d gateway address not AF_LINK\n", __func__, ip6_sprintf(&ip6_dest->sin6_addr), route->rt_ifp->if_name, route->rt_ifp->if_unit); result = EADDRNOTAVAIL; @@ -4298,7 +4705,7 @@ nd6_lookup_ipv6(ifnet_t ifp, const struct sockaddr_in6 *ip6_dest, sdl = SDL(route->rt_gateway); if (sdl->sdl_alen == 0) { /* this should be impossible, but we bark here for debugging */ - printf("%s: route %s on %s%d sdl_alen == 0\n", __func__, + nd6log(error, "%s: route %s on %s%d sdl_alen == 0\n", __func__, ip6_sprintf(&ip6_dest->sin6_addr), route->rt_ifp->if_name, route->rt_ifp->if_unit); result = EHOSTUNREACH; @@ -4449,13 +4856,11 @@ done: int nd6_if_disable(struct ifnet *ifp, boolean_t enable) { - ifnet_lock_shared(ifp); if (enable) { - ifp->if_eflags |= IFEF_IPV6_DISABLED; + if_set_eflags(ifp, IFEF_IPV6_DISABLED); } else { - ifp->if_eflags &= ~IFEF_IPV6_DISABLED; + if_clear_eflags(ifp, IFEF_IPV6_DISABLED); } - ifnet_lock_done(ifp); return 0; } @@ -4481,7 +4886,7 @@ nd6_sysctl_drlist SYSCTL_HANDLER_ARGS d.rtaddr.sin6_family = AF_INET6; d.rtaddr.sin6_len = sizeof(d.rtaddr); - TAILQ_FOREACH(dr, &nd_defrouter, dr_entry) { + TAILQ_FOREACH(dr, &nd_defrouter_list, dr_entry) { d.rtaddr.sin6_addr = dr->rtaddr; if (in6_recoverscope(&d.rtaddr, &dr->rtaddr, dr->ifp) != 0) { @@ -4491,8 +4896,8 @@ nd6_sysctl_drlist SYSCTL_HANDLER_ARGS } d.flags = dr->flags; d.stateflags = dr->stateflags; - d.rtlifetime = dr->rtlifetime; - d.expire = nddr_getexpire(dr); + d.rtlifetime = (u_short)dr->rtlifetime; + d.expire = (int)nddr_getexpire(dr); d.if_index = dr->ifp->if_index; error = SYSCTL_OUT(req, &d, sizeof(d)); if (error != 0) { @@ -4506,7 +4911,7 @@ nd6_sysctl_drlist SYSCTL_HANDLER_ARGS d.rtaddr.sin6_family = AF_INET6; d.rtaddr.sin6_len = sizeof(d.rtaddr); - TAILQ_FOREACH(dr, &nd_defrouter, dr_entry) { + TAILQ_FOREACH(dr, &nd_defrouter_list, dr_entry) { d.rtaddr.sin6_addr = dr->rtaddr; if (in6_recoverscope(&d.rtaddr, &dr->rtaddr, dr->ifp) != 0) { @@ -4516,8 +4921,8 @@ nd6_sysctl_drlist SYSCTL_HANDLER_ARGS } d.flags = dr->flags; d.stateflags = dr->stateflags; - d.rtlifetime = dr->rtlifetime; - d.expire = nddr_getexpire(dr); + d.rtlifetime = (u_short)dr->rtlifetime; + d.expire = (int)nddr_getexpire(dr); d.if_index = dr->ifp->if_index; error = SYSCTL_OUT(req, &d, sizeof(d)); if (error != 0) { @@ -4569,7 +4974,7 @@ nd6_sysctl_prlist SYSCTL_HANDLER_ARGS p.vltime = pr->ndpr_vltime; p.pltime = pr->ndpr_pltime; p.if_index = pr->ndpr_ifp->if_index; - p.expire = ndpr_getexpire(pr); + p.expire = (u_long)ndpr_getexpire(pr); p.refcnt = pr->ndpr_addrcnt; p.flags = pr->ndpr_stateflags; p.advrtrs = 0; @@ -4620,7 +5025,7 @@ nd6_sysctl_prlist SYSCTL_HANDLER_ARGS p.vltime = pr->ndpr_vltime; p.pltime = pr->ndpr_pltime; p.if_index = pr->ndpr_ifp->if_index; - p.expire = ndpr_getexpire(pr); + p.expire = (u_int32_t)ndpr_getexpire(pr); p.refcnt = pr->ndpr_addrcnt; p.flags = pr->ndpr_stateflags; p.advrtrs = 0; @@ -4671,7 +5076,7 @@ in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia) } if (optdad) { - if ((ifp->if_eflags & IFEF_IPV6_ROUTER) != 0) { + if (ifp->if_ipv6_router_mode == IPV6_ROUTER_MODE_EXCLUSIVE) { optdad = 0; } else { lck_mtx_lock(&ndi->lock); diff --git a/bsd/netinet6/nd6.h b/bsd/netinet6/nd6.h index 1ff88945f..d1f611fb8 100644 --- a/bsd/netinet6/nd6.h +++ b/bsd/netinet6/nd6.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -197,6 +197,9 @@ struct nd_ifinfo_compat { #define ND6_IFF_REPLICATED 0x100 /* sleep proxy registered */ #define ND6_IFF_DAD 0x200 /* Perform DAD on the interface */ +extern int dad_enhanced; +#define ND6_DAD_ENHANCED_DEFAULT 1 + struct in6_nbrinfo { char ifname[IFNAMSIZ]; /* if name, e.g. "en0" */ struct in6_addr addr; /* IPv6 address of the neighbor */ @@ -241,6 +244,12 @@ struct in6_drlist { }; #if defined(BSD_KERNEL_PRIVATE) +#define ND6_PROCESS_RTI_ENABLE 1 +#define ND6_PROCESS_RTI_DISABLE 0 +#define ND6_PROCESS_RTI_DEFAULT ND6_PROCESS_RTI_ENABLE + +extern int nd6_process_rti; + struct in6_drlist_32 { char ifname[IFNAMSIZ]; struct { @@ -265,10 +274,11 @@ struct in6_drlist_64 { #endif /* BSD_KERNEL_PRIVATE */ /* valid values for stateflags */ -#define NDDRF_INSTALLED 0x1 /* installed in the routing table */ -#define NDDRF_IFSCOPE 0x2 /* installed as a scoped route */ -#define NDDRF_STATIC 0x4 /* for internal use only */ -#define NDDRF_MAPPED 0x8 /* Default router addr is mapped to a different one for routing */ +#define NDDRF_INSTALLED 0x01 /* installed in the routing table */ +#define NDDRF_IFSCOPE 0x02 /* installed as a scoped route */ +#define NDDRF_STATIC 0x04 /* for internal use only */ +#define NDDRF_MAPPED 0x08 /* Default router addr is mapped to a different one for routing */ +#define NDDRF_INELIGIBLE 0x10 /* Default router entry is ineligible for default router selection */ struct in6_defrouter { struct sockaddr_in6 rtaddr; @@ -408,7 +418,13 @@ struct in6_ondireq { u_int32_t flags; /* Flags */ int recalctm; /* BaseReacable re-calculation timer */ u_int8_t chlim; /* CurHopLimit */ + /* Number of routers learned on the interface */ u_int8_t receivedra; + /* + * The current collision count value + * being used for secure address generation. + */ + u_int8_t collision_count; } ndi; }; @@ -455,6 +471,8 @@ struct in6_ndifreq_64 { #define NDPRF_PROCESSED_SERVICE 0x10000 #define NDPRF_DEFUNCT 0x20000 #define NDPRF_CLAT46 0x40000 + +#define CLAT46_COLLISION_COUNT_OFFSET 128 #endif /* protocol constants */ @@ -507,6 +525,7 @@ struct in6_ndifreq_64 { (((MIN_RANDOM_FACTOR * (x >> 10)) + (RandomULong() & \ ((MAX_RANDOM_FACTOR - MIN_RANDOM_FACTOR) * (x >> 10)))) /1000) +#define IP6_USE_TMPADDR_DEFAULT 1 /* prefix expiry times */ #define ND6_PREFIX_EXPIRY_UNSPEC -1 #define ND6_PREFIX_EXPIRY_NEVER 0 @@ -514,6 +533,7 @@ struct in6_ndifreq_64 { TAILQ_HEAD(nd_drhead, nd_defrouter); struct nd_defrouter { decl_lck_mtx_data(, nddr_lock); + decl_lck_mtx_data(, nddr_ref_lock); TAILQ_ENTRY(nd_defrouter) dr_entry; struct in6_addr rtaddr; u_int32_t nddr_refcount; @@ -523,7 +543,7 @@ struct nd_defrouter { u_int64_t base_uptime; /* uptime at creation */ u_char flags; /* flags on RA message */ u_char stateflags; - u_short rtlifetime; + u_int32_t rtlifetime; int err; struct ifnet *ifp; struct in6_addr rtaddr_mapped; /* Mapped gateway address for routing */ @@ -550,24 +570,52 @@ struct nd_defrouter { #define NDDR_UNLOCK(_nddr) \ lck_mtx_unlock(&(_nddr)->nddr_lock) +#define NDDR_REF_LOCK(_nddr) \ + lck_mtx_lock(&(_nddr)->nddr_ref_lock) + +#define NDDR_REF_LOCK_SPIN(_nddr) \ + lck_mtx_lock_spin(&(_nddr)->nddr_ref_lock) + +#define NDDR_REF_UNLOCK(_nddr) \ + lck_mtx_unlock(&(_nddr)->nddr_ref_lock) + #define NDDR_ADDREF(_nddr) \ - nddr_addref(_nddr, 0) + nddr_addref(_nddr) -#define NDDR_ADDREF_LOCKED(_nddr) \ - nddr_addref(_nddr, 1) +#define NDDR_REMREF(_nddr) \ + nddr_remref(_nddr) \ -#define NDDR_REMREF(_nddr) do { \ - (void) nddr_remref(_nddr, 0); \ -} while (0) +TAILQ_HEAD(nd_rtihead, nd_route_info); +/* + * The ordering below is important and it should always start + * with nd_drhead as the first element. + * It gets passed in as the generic nd_drhead to router management code. + * The extra information stored here includes the prefix/prefix-length + * which the router list belongs to. + */ +struct nd_route_info { + struct nd_drhead nd_rti_router_list; + TAILQ_ENTRY(nd_route_info) nd_rti_entry; + struct in6_addr nd_rti_prefix; + u_int8_t nd_rti_prefixlen; +}; -#define NDDR_REMREF_LOCKED(_nddr) \ - nddr_remref(_nddr, 1) +struct nd_route_info *ndrti_alloc(void); +void nd6_rti_list_wait(const char *); +void nd6_rti_list_signal_done(void); +void ndrti_free(struct nd_route_info *rti); +void nd6_rtilist_remove(struct nd_route_info *); +void nd6_rtilist_update(struct nd_route_info *, struct nd_defrouter *); +int nd6_rtilist_add(struct nd_route_info *, struct nd_defrouter *, + struct nd_route_info **); +void nd6_rti_purge(struct nd_route_info *); /* define struct prproxy_sols_tree */ RB_HEAD(prproxy_sols_tree, nd6_prproxy_soltgt); struct nd_prefix { decl_lck_mtx_data(, ndpr_lock); + decl_lck_mtx_data(, ndpr_ref_lock); u_int32_t ndpr_refcount; /* reference count */ u_int32_t ndpr_debug; /* see ifa_debug flags */ struct ifnet *ndpr_ifp; @@ -628,18 +676,20 @@ struct nd_prefix { #define NDPR_UNLOCK(_ndpr) \ lck_mtx_unlock(&(_ndpr)->ndpr_lock) -#define NDPR_ADDREF(_ndpr) \ - ndpr_addref(_ndpr, 0) +#define NDPR_REF_LOCK(_ndpr) \ + lck_mtx_lock(&(_ndpr)->ndpr_ref_lock) -#define NDPR_ADDREF_LOCKED(_ndpr) \ - ndpr_addref(_ndpr, 1) +#define NDPR_REF_LOCK_SPIN(_ndpr) \ + lck_mtx_lock_spin(&(_ndpr)->ndpr_ref_lock) -#define NDPR_REMREF(_ndpr) do { \ - (void) ndpr_remref(_ndpr, 0); \ -} while (0) +#define NDPR_REF_UNLOCK(_ndpr) \ + lck_mtx_unlock(&(_ndpr)->ndpr_ref_lock) + +#define NDPR_ADDREF(_ndpr) \ + ndpr_addref(_ndpr) -#define NDPR_REMREF_LOCKED(_ndpr) \ - ndpr_remref(_ndpr, 1) +#define NDPR_REMREF(_ndpr) \ + ndpr_remref(_ndpr) \ /* * Message format for use in obtaining information about prefixes @@ -748,7 +798,8 @@ extern int nd6_accept_6to4; extern int nd6_maxnudhint; extern int nd6_gctimer; extern struct llinfo_nd6 llinfo_nd6; -extern struct nd_drhead nd_defrouter; +extern struct nd_drhead nd_defrouter_list; +extern struct nd_rtihead nd_rti_list; extern struct nd_prhead nd_prefix; extern int nd6_debug; extern int nd6_onlink_ns_rfc4861; @@ -767,6 +818,11 @@ extern int nd6_optimistic_dad; #define ND6_OPTIMISTIC_DAD_SECURED (1 << 4) #define ND6_OPTIMISTIC_DAD_MANUAL (1 << 5) +#define ND6_OPTIMISTIC_DAD_DEFAULT \ + (ND6_OPTIMISTIC_DAD_LINKLOCAL | ND6_OPTIMISTIC_DAD_AUTOCONF | \ + ND6_OPTIMISTIC_DAD_TEMPORARY | ND6_OPTIMISTIC_DAD_DYNAMIC | \ + ND6_OPTIMISTIC_DAD_SECURED | ND6_OPTIMISTIC_DAD_MANUAL) + /* nd6_rtr.c */ extern int nd6_defifindex; extern int ip6_desync_factor; /* seconds */ @@ -776,7 +832,7 @@ extern u_int32_t ip6_temp_valid_lifetime; /* seconds */ extern int ip6_temp_regen_advance; /* seconds */ union nd_opts { - struct nd_opt_hdr *nd_opt_array[16]; /* max = target address list */ + struct nd_opt_hdr *nd_opt_array[26]; /* max = Route information option */ struct { struct nd_opt_hdr *zero; struct nd_opt_hdr *src_lladdr; @@ -794,10 +850,21 @@ union nd_opts { struct nd_opt_hdr *__res13; struct nd_opt_nonce *nonce; struct nd_opt_hdr *__res15; + struct nd_opt_hdr *__res16; + struct nd_opt_hdr *__res17; + struct nd_opt_hdr *__res18; + struct nd_opt_hdr *__res19; + struct nd_opt_hdr *__res20; + struct nd_opt_hdr *__res21; + struct nd_opt_hdr *__res22; + struct nd_opt_hdr *__res23; + struct nd_opt_route_info *rti_beg; + struct nd_opt_hdr *__res25; struct nd_opt_hdr *search; /* multiple opts */ struct nd_opt_hdr *last; /* multiple opts */ int done; - struct nd_opt_prefix_info *pi_end; /* multiple opts, end */ + struct nd_opt_prefix_info *pi_end; /* multiple prefix opts, end */ + struct nd_opt_route_info *rti_end; /* multiple route info opts, end */ } nd_opt_each; }; #define nd_opts_src_lladdr nd_opt_each.src_lladdr @@ -807,6 +874,8 @@ union nd_opts { #define nd_opts_rh nd_opt_each.rh #define nd_opts_mtu nd_opt_each.mtu #define nd_opts_nonce nd_opt_each.nonce +#define nd_opts_rti nd_opt_each.rti_beg +#define nd_opts_rti_end nd_opt_each.rti_end #define nd_opts_search nd_opt_each.search #define nd_opts_last nd_opt_each.last #define nd_opts_done nd_opt_each.done @@ -858,7 +927,6 @@ extern void nd6_ns_output(struct ifnet *, const struct in6_addr *, extern caddr_t nd6_ifptomac(struct ifnet *); extern void nd6_dad_start(struct ifaddr *, int *); extern void nd6_dad_stop(struct ifaddr *); -extern void nd6_dad_duplicated(struct ifaddr *); extern void nd6_llreach_alloc(struct rtentry *, struct ifnet *, void *, unsigned int, boolean_t); extern void nd6_llreach_set_reachable(struct ifnet *, void *, unsigned int); @@ -871,15 +939,17 @@ extern void nd6_alt_node_absent(struct ifnet *, struct sockaddr_in6 *, struct so /* nd6_rtr.c */ extern struct in6_ifaddr *in6_pfx_newpersistaddr(struct nd_prefix *, int, - int *, boolean_t); + int *, boolean_t, uint8_t); extern void nd6_rtr_init(void); extern void nd6_rs_input(struct mbuf *, int, int); extern void nd6_ra_input(struct mbuf *, int, int); extern void prelist_del(struct nd_prefix *); -extern void defrouter_select(struct ifnet *); +extern struct nd_defrouter *defrtrlist_update(struct nd_defrouter *, + struct nd_drhead *); +extern void defrouter_select(struct ifnet *, struct nd_drhead *); extern void defrouter_reset(void); extern int defrtrlist_ioctl(u_long, caddr_t); -extern void defrtrlist_del(struct nd_defrouter *); +extern void defrtrlist_del(struct nd_defrouter *, struct nd_drhead *); extern int defrtrlist_add_static(struct nd_defrouter *); extern int defrtrlist_del_static(struct nd_defrouter *); extern void prelist_remove(struct nd_prefix *); @@ -891,23 +961,23 @@ extern int nd6_prefix_onlink(struct nd_prefix *); extern int nd6_prefix_onlink_scoped(struct nd_prefix *, unsigned int); extern int nd6_prefix_offlink(struct nd_prefix *); extern void pfxlist_onlink_check(void); -extern struct nd_defrouter *defrouter_lookup(struct in6_addr *, struct ifnet *); +extern struct nd_defrouter *defrouter_lookup(struct nd_drhead *, + struct in6_addr *, struct ifnet *); extern struct nd_prefix *nd6_prefix_lookup(struct nd_prefix *, int); extern int in6_init_prefix_ltimes(struct nd_prefix *ndpr); extern void rt6_flush(struct in6_addr *, struct ifnet *); extern int nd6_setdefaultiface(int); extern int in6_tmpifadd(const struct in6_ifaddr *, int); -extern void nddr_addref(struct nd_defrouter *, int); -extern struct nd_defrouter *nddr_remref(struct nd_defrouter *, int); +extern void nddr_addref(struct nd_defrouter *); +extern struct nd_defrouter *nddr_remref(struct nd_defrouter *); extern uint64_t nddr_getexpire(struct nd_defrouter *); -extern void ndpr_addref(struct nd_prefix *, int); -extern struct nd_prefix *ndpr_remref(struct nd_prefix *, int); +extern void ndpr_addref(struct nd_prefix *); +extern struct nd_prefix *ndpr_remref(struct nd_prefix *); extern uint64_t ndpr_getexpire(struct nd_prefix *); /* nd6_prproxy.c */ struct ip6_hdr; extern u_int32_t nd6_prproxy; -extern void nd6_prproxy_init(void); extern int nd6_if_prproxy(struct ifnet *, boolean_t); extern void nd6_prproxy_prelist_update(struct nd_prefix *, struct nd_prefix *); extern boolean_t nd6_prproxy_ifaddr(struct in6_ifaddr *); diff --git a/bsd/netinet6/nd6_nbr.c b/bsd/netinet6/nd6_nbr.c index 5b57e6387..829c7a7b1 100644 --- a/bsd/netinet6/nd6_nbr.c +++ b/bsd/netinet6/nd6_nbr.c @@ -95,10 +95,8 @@ #if IPSEC #include -#if INET6 #include #endif -#endif struct dadq; static struct dadq *nd6_dad_find(struct ifaddr *, struct nd_opt_nonce *); @@ -112,15 +110,10 @@ static void dad_addref(struct dadq *, int); static void dad_remref(struct dadq *); static struct dadq *nd6_dad_attach(struct dadq *, struct ifaddr *); static void nd6_dad_detach(struct dadq *, struct ifaddr *); +static void nd6_dad_duplicated(struct ifaddr *); static int dad_maxtry = 15; /* max # of *tries* to transmit DAD packet */ -static unsigned int dad_size; /* size of zone element */ -static struct zone *dad_zone; /* zone for dadq */ - -#define DAD_ZONE_MAX 64 /* maximum elements in zone */ -#define DAD_ZONE_NAME "nd6_dad" /* zone name */ - #define DAD_LOCK_ASSERT_HELD(_dp) \ LCK_MTX_ASSERT(&(_dp)->dad_lock, LCK_MTX_ASSERT_OWNED) @@ -162,7 +155,7 @@ SYSCTL_INT(_net_inet6_icmp6, OID_AUTO, nd6_llreach_base, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_llreach_base, 0, "default ND6 link-layer reachability max lifetime (in seconds)"); -int dad_enhanced = 1; +int dad_enhanced = ND6_DAD_ENHANCED_DEFAULT; SYSCTL_DECL(_net_inet6_ip6); SYSCTL_INT(_net_inet6_ip6, OID_AUTO, dad_enhanced, CTLFLAG_RW | CTLFLAG_LOCKED, &dad_enhanced, 0, @@ -498,7 +491,7 @@ nd6_ns_input( } /* Are we an advertising router on this interface? */ - advrouter = (ifp->if_eflags & IFEF_IPV6_ROUTER); + advrouter = (ifp->if_ipv6_router_mode != IPV6_ROUTER_MODE_DISABLED); /* * If the source address is unspecified address, entries must not @@ -584,6 +577,7 @@ nd6_ns_output( struct route_in6 ro; struct ip6_out_args ip6oa; u_int32_t rtflags = 0; + boolean_t is_optimistic = FALSE; if ((ifp->if_eflags & IFEF_IPV6_ND6ALT) || IN6_IS_ADDR_MULTICAST(taddr6)) { return; @@ -626,7 +620,7 @@ nd6_ns_output( if (daddr6 == NULL || IN6_IS_ADDR_MULTICAST(daddr6)) { m->m_flags |= M_MCAST; - im6o = ip6_allocmoptions(M_DONTWAIT); + im6o = ip6_allocmoptions(Z_NOWAIT); if (im6o == NULL) { m_freem(m); return; @@ -743,13 +737,20 @@ nd6_ns_output( * Resolution. */ ia = in6ifa_ifpwithaddr(ifp, src); - if (!ia || (ia->ia6_flags & IN6_IFF_OPTIMISTIC)) { + if (ia == NULL) { nd6log(debug, "nd6_ns_output: no preferred source " "available: dst=%s\n", ip6_sprintf(&dst_sa.sin6_addr)); goto bad; } + if (ia->ia6_flags & IN6_IFF_OPTIMISTIC) { + is_optimistic = TRUE; + nd6log(debug, + "nd6_ns_output: preferred source " + "available is optimistic: dst=%s\n", + ip6_sprintf(&dst_sa.sin6_addr)); + } } } else { /* @@ -777,13 +778,20 @@ nd6_ns_output( * spec implementation * --- --- * DAD packet MUST NOT do not add the option + * Source is optimistic MUST NOT do not add the option * there's no link layer address: * impossible do not add the option * there's link layer address: * Multicast NS MUST add one add the option * Unicast NS SHOULD add one add the option + * + * XXX We deviate from RFC 4429 and still use optimistic DAD as source + * for address resolution. However to ensure that we do not interfere + * with neighbor cache entries of other neighbors, we MUST ensure + * that SLLAO is not sent. Also note, sending multicast NS without SLLAO + * is also a deviation from RFC 4861. */ - if (nonce == NULL && (mac = nd6_ifptomac(ifp))) { + if (nonce == NULL && (mac = nd6_ifptomac(ifp)) && !is_optimistic) { int optlen = sizeof(struct nd_opt_hdr) + ifp->if_addrlen; struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd_ns + 1); /* 8 byte alignments... */ @@ -794,7 +802,7 @@ nd6_ns_output( icmp6len += optlen; bzero((caddr_t)nd_opt, optlen); nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR; - nd_opt->nd_opt_len = optlen >> 3; + nd_opt->nd_opt_len = (uint8_t)(optlen >> 3); bcopy(mac, (caddr_t)(nd_opt + 1), ifp->if_addrlen); } /* @@ -814,7 +822,7 @@ nd6_ns_output( icmp6len += optlen; bzero((caddr_t)nd_opt, optlen); nd_opt->nd_opt_type = ND_OPT_NONCE; - nd_opt->nd_opt_len = optlen >> 3; + nd_opt->nd_opt_len = (uint8_t)(optlen >> 3); bcopy(nonce, (caddr_t)(nd_opt + 1), ND_OPT_NONCE_LEN); } ip6->ip6_plen = htons((u_short)icmp6len); @@ -849,6 +857,7 @@ nd6_ns_output( } ip6oa.ip6oa_flags |= IP6OAF_SKIP_PF; + ip6oa.ip6oa_flags |= IP6OAF_DONT_FRAG; ip6_output(m, NULL, NULL, flags, im6o, &outif, &ip6oa); if (outif) { icmp6_ifstat_inc(outif, ifs6_out_msg); @@ -1082,7 +1091,7 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) route_event_enqueue_nwk_wq_entry(rt, NULL, ROUTE_LLENTRY_RESOLVED, NULL, TRUE); - if ((ln->ln_router = is_router) != 0) { + if ((ln->ln_router = (short)is_router) != 0) { struct radix_node_head *rnh = NULL; struct route_event rt_ev; route_event_init(&rt_ev, rt, NULL, ROUTE_LLENTRY_RESOLVED); @@ -1254,10 +1263,14 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) RT_UNLOCK(rt); lck_mtx_lock(nd6_mutex); - dr = defrouter_lookup(in6, rt_ifp); + /* + * XXX Handle router lists for route information option + * as well. + */ + dr = defrouter_lookup(NULL, in6, rt_ifp); if (dr) { - TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); - defrtrlist_del(dr); + TAILQ_REMOVE(&nd_defrouter_list, dr, dr_entry); + defrtrlist_del(dr, NULL); NDDR_REMREF(dr); /* remove list reference */ NDDR_REMREF(dr); lck_mtx_unlock(nd6_mutex); @@ -1274,7 +1287,7 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) } RT_LOCK(rt); } - ln->ln_router = is_router; + ln->ln_router = (short)is_router; } if (send_nc_alive_kev && (ifp->if_addrlen == IF_LLREACH_MAXLEN)) { @@ -1417,7 +1430,7 @@ nd6_na_output( if (IN6_IS_ADDR_MULTICAST(&daddr6)) { m->m_flags |= M_MCAST; - im6o = ip6_allocmoptions(M_DONTWAIT); + im6o = ip6_allocmoptions(Z_NOWAIT); if (im6o == NULL) { m_freem(m); return; @@ -1526,7 +1539,7 @@ nd6_na_output( icmp6len += optlen; bzero((caddr_t)nd_opt, optlen); nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR; - nd_opt->nd_opt_len = optlen >> 3; + nd_opt->nd_opt_len = (uint8_t)(optlen >> 3); bcopy(mac, (caddr_t)(nd_opt + 1), ifp->if_addrlen); } else { flags &= ~ND_NA_FLAG_OVERRIDE; @@ -1548,6 +1561,7 @@ nd6_na_output( } ip6oa.ip6oa_flags |= IP6OAF_SKIP_PF; + ip6oa.ip6oa_flags |= IP6OAF_DONT_FRAG; ip6_output(m, NULL, NULL, IPV6_OUTARGS, im6o, &outif, &ip6oa); if (outif) { icmp6_ifstat_inc(outif, ifs6_out_msg); @@ -1618,6 +1632,7 @@ struct dadq { uint32_t dad_nonce[ND_OPT_NONCE_LEN32]; }; +static ZONE_DECLARE(dad_zone, "nd6_dad", sizeof(struct dadq), ZC_ZFREE_CLEARMEM); static struct dadq_head dadq; void @@ -1627,15 +1642,6 @@ nd6_nbr_init(void) TAILQ_INIT(&dadq); - dad_size = sizeof(struct dadq); - dad_zone = zinit(dad_size, DAD_ZONE_MAX * dad_size, 0, DAD_ZONE_NAME); - if (dad_zone == NULL) { - panic("%s: failed allocating %s", __func__, DAD_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(dad_zone, Z_EXPAND, TRUE); - zone_change(dad_zone, Z_CALLERACCT, FALSE); - bzero(&hostrtmask, sizeof hostrtmask); hostrtmask.sin6_family = AF_INET6; hostrtmask.sin6_len = sizeof hostrtmask; @@ -1743,14 +1749,7 @@ nd6_dad_start( return; } - dp = zalloc(dad_zone); - if (dp == NULL) { - nd6log0(error, "nd6_dad_start: memory allocation failed for %s(%s)\n", - ip6_sprintf(&ia->ia_addr.sin6_addr), - ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???"); - return; - } - bzero(dp, dad_size); + dp = zalloc_flags(dad_zone, Z_WAITOK | Z_ZERO); lck_mtx_init(&dp->dad_lock, ifa_mtx_grp, ifa_mtx_attr); /* Callee adds one reference for us */ @@ -2049,7 +2048,7 @@ done: } } -void +static void nd6_dad_duplicated(struct ifaddr *ifa) { struct in6_ifaddr *ia = (struct in6_ifaddr *)ifa; @@ -2164,7 +2163,9 @@ nd6_dad_duplicated(struct ifaddr *ifa) nd6_if_disable(ifp, TRUE); } - log(LOG_ERR, "%s: manual intervention required!\n", if_name(ifp)); + log(LOG_ERR, + "%s: manual intervention may be required.\n", + if_name(ifp)); /* Send an event to the configuration agent so that the * duplicate address will be notified to the user and will @@ -2246,7 +2247,8 @@ nd6_dad_ns_input(struct ifaddr *ifa, char *lladdr, ++dp->dad_ns_icount; if (lladdr && lladdrlen >= ETHER_ADDR_LEN) { memcpy(dp->dad_lladdr, lladdr, ETHER_ADDR_LEN); - dp->dad_lladdrlen = lladdrlen; + /* fine to truncate as it is compared against sdl_alen */ + dp->dad_lladdrlen = (uint8_t)lladdrlen; } DAD_UNLOCK(dp); DAD_REMREF(dp); @@ -2336,7 +2338,7 @@ nd6_dad_na_input(struct mbuf *m, struct ifnet *ifp, struct in6_addr *taddr, DAD_LOCK_SPIN(dp); if (lladdr != NULL && lladdrlen >= ETHER_ADDR_LEN) { memcpy(dp->dad_lladdr, lladdr, ETHER_ADDR_LEN); - dp->dad_lladdrlen = lladdrlen; + dp->dad_lladdrlen = (uint8_t)lladdrlen; } dp->dad_na_icount++; DAD_UNLOCK(dp); @@ -2440,8 +2442,9 @@ nd6_alt_node_addr_decompose(struct ifnet *ifp, struct sockaddr *sa, struct in6_addr *in6 = &sin6a->sin6_addr; VERIFY(sa->sa_len == sizeof *sin6); + VERIFY(strlen(ifp->if_name) <= IFNAMSIZ); - sdl->sdl_nlen = strlen(ifp->if_name); + sdl->sdl_nlen = (u_char)strlen(ifp->if_name); bcopy(ifp->if_name, sdl->sdl_data, sdl->sdl_nlen); if (in6->s6_addr[11] == 0xff && in6->s6_addr[12] == 0xfe) { sdl->sdl_alen = ETHER_ADDR_LEN; diff --git a/bsd/netinet6/nd6_prproxy.c b/bsd/netinet6/nd6_prproxy.c index 100b4c482..733fc9081 100644 --- a/bsd/netinet6/nd6_prproxy.c +++ b/bsd/netinet6/nd6_prproxy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2016 Apple Inc. All rights reserved. + * Copyright (c) 2011-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -141,7 +141,7 @@ SLIST_HEAD(nd6_prproxy_prelist_head, nd6_prproxy_prelist); static void nd6_prproxy_prelist_setroute(boolean_t enable, struct nd6_prproxy_prelist_head *, struct nd6_prproxy_prelist_head *); -static struct nd6_prproxy_prelist *nd6_ndprl_alloc(int); +static struct nd6_prproxy_prelist *nd6_ndprl_alloc(zalloc_flags_t); static void nd6_ndprl_free(struct nd6_prproxy_prelist *); static struct nd6_prproxy_solsrc *nd6_solsrc_alloc(int); static void nd6_solsrc_free(struct nd6_prproxy_solsrc *); @@ -177,23 +177,14 @@ RB_PROTOTYPE_SC_PREV(__private_extern__, prproxy_sols_tree, nd6_prproxy_soltgt, static u_int32_t nd6_max_tgt_sols = ND6_MAX_TGT_SOLS_DEFAULT; static u_int32_t nd6_max_src_sols = ND6_MAX_SRC_SOLS_DEFAULT; -static unsigned int ndprl_size; /* size of zone element */ -static struct zone *ndprl_zone; /* nd6_prproxy_prelist zone */ +static ZONE_DECLARE(ndprl_zone, "nd6_prproxy_prelist", + sizeof(struct nd6_prproxy_prelist), ZC_ZFREE_CLEARMEM); /* nd6_prproxy_prelist zone */ -#define NDPRL_ZONE_MAX 256 /* maximum elements in zone */ -#define NDPRL_ZONE_NAME "nd6_prproxy_prelist" /* name for zone */ +static ZONE_DECLARE(solsrc_zone, "nd6_prproxy_solsrc", + sizeof(struct nd6_prproxy_solsrc), ZC_ZFREE_CLEARMEM); /* nd6_prproxy_solsrc zone */ -static unsigned int solsrc_size; /* size of zone element */ -static struct zone *solsrc_zone; /* nd6_prproxy_solsrc zone */ - -#define SOLSRC_ZONE_MAX 256 /* maximum elements in zone */ -#define SOLSRC_ZONE_NAME "nd6_prproxy_solsrc" /* name for zone */ - -static unsigned int soltgt_size; /* size of zone element */ -static struct zone *soltgt_zone; /* nd6_prproxy_soltgt zone */ - -#define SOLTGT_ZONE_MAX 256 /* maximum elements in zone */ -#define SOLTGT_ZONE_NAME "nd6_prproxy_soltgt" /* name for zone */ +static ZONE_DECLARE(soltgt_zone, "nd6_prproxy_soltgt", + sizeof(struct nd6_prproxy_soltgt), ZC_ZFREE_CLEARMEM); /* nd6_prproxy_soltgt zone */ /* The following is protected by ndpr_lock */ RB_GENERATE_PREV(prproxy_sols_tree, nd6_prproxy_soltgt, @@ -218,55 +209,10 @@ SYSCTL_UINT(_net_inet6_icmp6, OID_AUTO, prproxy_cnt, CTLFLAG_RD | CTLFLAG_LOCKED, &nd6_prproxy, 0, "total number of proxied prefixes"); -/* - * Called by nd6_init() during initialization time. - */ -void -nd6_prproxy_init(void) -{ - ndprl_size = sizeof(struct nd6_prproxy_prelist); - ndprl_zone = zinit(ndprl_size, NDPRL_ZONE_MAX * ndprl_size, 0, - NDPRL_ZONE_NAME); - if (ndprl_zone == NULL) { - panic("%s: failed allocating ndprl_zone", __func__); - } - - zone_change(ndprl_zone, Z_EXPAND, TRUE); - zone_change(ndprl_zone, Z_CALLERACCT, FALSE); - - solsrc_size = sizeof(struct nd6_prproxy_solsrc); - solsrc_zone = zinit(solsrc_size, SOLSRC_ZONE_MAX * solsrc_size, 0, - SOLSRC_ZONE_NAME); - if (solsrc_zone == NULL) { - panic("%s: failed allocating solsrc_zone", __func__); - } - - zone_change(solsrc_zone, Z_EXPAND, TRUE); - zone_change(solsrc_zone, Z_CALLERACCT, FALSE); - - soltgt_size = sizeof(struct nd6_prproxy_soltgt); - soltgt_zone = zinit(soltgt_size, SOLTGT_ZONE_MAX * soltgt_size, 0, - SOLTGT_ZONE_NAME); - if (soltgt_zone == NULL) { - panic("%s: failed allocating soltgt_zone", __func__); - } - - zone_change(soltgt_zone, Z_EXPAND, TRUE); - zone_change(soltgt_zone, Z_CALLERACCT, FALSE); -} - static struct nd6_prproxy_prelist * -nd6_ndprl_alloc(int how) +nd6_ndprl_alloc(zalloc_flags_t how) { - struct nd6_prproxy_prelist *ndprl; - - ndprl = (how == M_WAITOK) ? zalloc(ndprl_zone) : - zalloc_noblock(ndprl_zone); - if (ndprl != NULL) { - bzero(ndprl, ndprl_size); - } - - return ndprl; + return zalloc_flags(ndprl_zone, how | Z_ZERO); } static void @@ -405,7 +351,7 @@ nd6_prproxy_prelist_setroute(boolean_t enable, /* * Enable/disable prefix proxying on an interface; typically called - * as part of handling SIOCSIFINFO_FLAGS[IFEF_IPV6_ROUTER]. + * as part of handling SIOCSIFINFO_FLAGS[SETROUTERMODE_IN6] */ int nd6_if_prproxy(struct ifnet *ifp, boolean_t enable) @@ -417,7 +363,7 @@ nd6_if_prproxy(struct ifnet *ifp, boolean_t enable) /* Can't be enabled if we are an advertising router on the interface */ ifnet_lock_shared(ifp); - if (enable && (ifp->if_eflags & IFEF_IPV6_ROUTER)) { + if (enable && (ifp->if_ipv6_router_mode == IPV6_ROUTER_MODE_EXCLUSIVE)) { ifnet_lock_done(ifp); return EBUSY; } @@ -457,11 +403,11 @@ nd6_if_prproxy(struct ifnet *ifp, boolean_t enable) if (enable && (pr->ndpr_stateflags & NDPRF_ONLINK) && nd6_need_cache(ifp)) { pr->ndpr_stateflags |= NDPRF_PRPROXY; - NDPR_ADDREF_LOCKED(pr); + NDPR_ADDREF(pr); NDPR_UNLOCK(pr); } else if (!enable) { pr->ndpr_stateflags &= ~NDPRF_PRPROXY; - NDPR_ADDREF_LOCKED(pr); + NDPR_ADDREF(pr); NDPR_UNLOCK(pr); } else { NDPR_UNLOCK(pr); @@ -472,7 +418,7 @@ nd6_if_prproxy(struct ifnet *ifp, boolean_t enable) break; } - up = nd6_ndprl_alloc(M_WAITOK); + up = nd6_ndprl_alloc(Z_WAITOK); if (up == NULL) { NDPR_REMREF(pr); continue; @@ -511,7 +457,7 @@ nd6_if_prproxy(struct ifnet *ifp, boolean_t enable) } NDPR_UNLOCK(fwd); - down = nd6_ndprl_alloc(M_WAITOK); + down = nd6_ndprl_alloc(Z_WAITOK); if (down == NULL) { continue; } @@ -810,7 +756,7 @@ nd6_prproxy_prelist_update(struct nd_prefix *pr_cur, struct nd_prefix *pr_up) enable = (pr_up->ndpr_stateflags & NDPRF_PRPROXY); NDPR_UNLOCK(pr_up); - up = nd6_ndprl_alloc(M_WAITOK); + up = nd6_ndprl_alloc(Z_WAITOK); if (up == NULL) { lck_mtx_unlock(nd6_mutex); goto done; @@ -837,7 +783,7 @@ nd6_prproxy_prelist_update(struct nd_prefix *pr_cur, struct nd_prefix *pr_up) } NDPR_UNLOCK(pr); - down = nd6_ndprl_alloc(M_WAITOK); + down = nd6_ndprl_alloc(Z_WAITOK); if (down == NULL) { continue; } @@ -970,7 +916,7 @@ nd6_prproxy_ns_output(struct ifnet *ifp, struct ifnet *exclifp, fwd_ifp = fwd->ndpr_ifp; NDPR_UNLOCK(fwd); - ndprl = nd6_ndprl_alloc(M_WAITOK); + ndprl = nd6_ndprl_alloc(Z_WAITOK); if (ndprl == NULL) { continue; } @@ -1086,7 +1032,7 @@ nd6_prproxy_ns_input(struct ifnet *ifp, struct in6_addr *saddr, fwd_ifp = fwd->ndpr_ifp; NDPR_UNLOCK(fwd); - ndprl = nd6_ndprl_alloc(M_WAITOK); + ndprl = nd6_ndprl_alloc(Z_WAITOK); if (ndprl == NULL) { continue; } @@ -1202,7 +1148,7 @@ nd6_prproxy_na_input(struct ifnet *ifp, struct in6_addr *saddr, VERIFY(!IN6_IS_ADDR_UNSPECIFIED(&daddr) && fwd_ifp); NDPR_UNLOCK(pr); - ndprl = nd6_ndprl_alloc(M_WAITOK); + ndprl = nd6_ndprl_alloc(Z_WAITOK); if (ndprl == NULL) { break; /* bail out */ } @@ -1237,7 +1183,7 @@ nd6_prproxy_na_input(struct ifnet *ifp, struct in6_addr *saddr, fwd_ifp = fwd->ndpr_ifp; NDPR_UNLOCK(fwd); - ndprl = nd6_ndprl_alloc(M_WAITOK); + ndprl = nd6_ndprl_alloc(Z_WAITOK); if (ndprl == NULL) { continue; } @@ -1309,15 +1255,7 @@ nd6_prproxy_na_input(struct ifnet *ifp, struct in6_addr *saddr, static struct nd6_prproxy_solsrc * nd6_solsrc_alloc(int how) { - struct nd6_prproxy_solsrc *ssrc; - - ssrc = (how == M_WAITOK) ? zalloc(solsrc_zone) : - zalloc_noblock(solsrc_zone); - if (ssrc != NULL) { - bzero(ssrc, solsrc_size); - } - - return ssrc; + return zalloc_flags(solsrc_zone, how | Z_ZERO); } static void @@ -1496,10 +1434,8 @@ nd6_soltgt_alloc(int how) { struct nd6_prproxy_soltgt *soltgt; - soltgt = (how == M_WAITOK) ? zalloc(soltgt_zone) : - zalloc_noblock(soltgt_zone); + soltgt = zalloc_flags(soltgt_zone, how | Z_ZERO); if (soltgt != NULL) { - bzero(soltgt, soltgt_size); TAILQ_INIT(&soltgt->soltgt_q); } return soltgt; diff --git a/bsd/netinet6/nd6_rti.c b/bsd/netinet6/nd6_rti.c new file mode 100644 index 000000000..16c2ab3c9 --- /dev/null +++ b/bsd/netinet6/nd6_rti.c @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NDRTI_ZONE_NAME "nd6_route_info" /* zone name */ + +extern lck_mtx_t *nd6_mutex; +static struct nd_route_info *nd6_rti_lookup(struct nd_route_info *); + +static ZONE_DECLARE(ndrti_zone, "nd6_route_info", + sizeof(struct nd_route_info), ZC_ZFREE_CLEARMEM); + +static boolean_t nd6_rti_list_busy = FALSE; /* protected by nd6_mutex */ + + +void +nd6_rti_list_wait(const char *func) +{ + LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); + while (nd6_rti_list_busy) { + nd6log2(debug, "%s: someone else is operating " + "on rti list. Entering sleep.\n", func); + (void) msleep(&nd6_rti_list_busy, nd6_mutex, (PZERO - 1), + func, NULL); + LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); + } + nd6_rti_list_busy = TRUE; +} + +void +nd6_rti_list_signal_done(void) +{ + LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); + nd6_rti_list_busy = FALSE; + wakeup(&nd6_rti_list_busy); +} + +struct nd_route_info * +ndrti_alloc(void) +{ + return zalloc_flags(ndrti_zone, Z_WAITOK | Z_ZERO); +} + +void +ndrti_free(struct nd_route_info *rti) +{ + if (!TAILQ_EMPTY(&rti->nd_rti_router_list)) { + panic("%s: rti freed with non-empty router list", __func__); + } + zfree(ndrti_zone, rti); +} + +static struct nd_route_info * +nd6_rti_lookup(struct nd_route_info *rti) +{ + struct nd_route_info *tmp_rti = NULL; + + LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); + + TAILQ_FOREACH(tmp_rti, &nd_rti_list, nd_rti_entry) { + if (IN6_ARE_ADDR_EQUAL(&tmp_rti->nd_rti_prefix, &rti->nd_rti_prefix) && + tmp_rti->nd_rti_prefixlen == rti->nd_rti_prefixlen) { + break; + } + } + return tmp_rti; +} + +void +nd6_rtilist_update(struct nd_route_info *new_rti, struct nd_defrouter *dr) +{ + struct nd_route_info *rti = NULL; + + lck_mtx_lock(nd6_mutex); + VERIFY(new_rti != NULL && dr != NULL); + nd6_rti_list_wait(__func__); + + if ((rti = nd6_rti_lookup(new_rti)) != NULL) { + (void)defrtrlist_update(dr, &rti->nd_rti_router_list); + /* + * The above may have removed an entry from default router list. + * If it did and the list is now empty, remove the rti as well. + */ + if (TAILQ_EMPTY(&rti->nd_rti_router_list)) { + TAILQ_REMOVE(&nd_rti_list, rti, nd_rti_entry); + ndrti_free(rti); + } + } else if (dr->rtlifetime != 0) { + rti = ndrti_alloc(); + TAILQ_INIT(&rti->nd_rti_router_list); + rti->nd_rti_prefix = new_rti->nd_rti_prefix; + rti->nd_rti_prefixlen = new_rti->nd_rti_prefixlen; + (void)defrtrlist_update(dr, &rti->nd_rti_router_list); + TAILQ_INSERT_HEAD(&nd_rti_list, rti, nd_rti_entry); + } + /* If rti doesn't exist and lifetime is 0, simply ignore */ + nd6_rti_list_signal_done(); + lck_mtx_unlock(nd6_mutex); +} + +void +nd6_rti_purge(struct nd_route_info *new_rti) +{ + VERIFY(new_rti != NULL); + LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); + + struct nd_route_info *rti = NULL; + nd6_rti_list_wait(__func__); + + if ((rti = nd6_rti_lookup(new_rti)) != NULL) { + struct nd_defrouter *dr = NULL; + struct nd_defrouter *ndr = NULL; + + TAILQ_FOREACH_SAFE(dr, &rti->nd_rti_router_list, dr_entry, ndr) { + TAILQ_REMOVE(&rti->nd_rti_router_list, dr, dr_entry); + defrtrlist_del(dr, &rti->nd_rti_router_list); + NDDR_REMREF(dr); + } + TAILQ_REMOVE(&nd_rti_list, rti, nd_rti_entry); + ndrti_free(rti); + } + nd6_rti_list_signal_done(); +} diff --git a/bsd/netinet6/nd6_rtr.c b/bsd/netinet6/nd6_rtr.c index e47f614ba..7bbdd6dbc 100644 --- a/bsd/netinet6/nd6_rtr.c +++ b/bsd/netinet6/nd6_rtr.c @@ -54,8 +54,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ - - #include #include #include @@ -94,18 +92,16 @@ #include -static void defrouter_addreq(struct nd_defrouter *, boolean_t); -static void defrouter_delreq(struct nd_defrouter *); +static void defrouter_addreq(struct nd_defrouter *, struct nd_route_info *, boolean_t); +static void defrouter_delreq(struct nd_defrouter *, struct nd_route_info *); static struct nd_defrouter *defrtrlist_update_common(struct nd_defrouter *, - boolean_t); -static struct nd_defrouter *defrtrlist_update(struct nd_defrouter *); - + struct nd_drhead *, boolean_t); static struct nd_pfxrouter *pfxrtr_lookup(struct nd_prefix *, struct nd_defrouter *); static void pfxrtr_add(struct nd_prefix *, struct nd_defrouter *); static void pfxrtr_del(struct nd_pfxrouter *, struct nd_prefix *); static struct nd_pfxrouter *find_pfxlist_reachable_router(struct nd_prefix *); -static void nd6_rtmsg(int, struct rtentry *); +static void nd6_rtmsg(u_char, struct rtentry *); static int nd6_prefix_onlink_common(struct nd_prefix *, boolean_t, unsigned int); @@ -117,7 +113,7 @@ static void in6_init_address_ltimes(struct nd_prefix *, static int rt6_deleteroute(struct radix_node *, void *); -static struct nd_defrouter *nddr_alloc(int); +static struct nd_defrouter *nddr_alloc(zalloc_flags_t); static void nddr_free(struct nd_defrouter *); static void nddr_trace(struct nd_defrouter *, int); @@ -131,7 +127,7 @@ static struct ifnet *nd6_defifp = NULL; int nd6_defifindex = 0; static unsigned int nd6_defrouter_genid; -int ip6_use_tempaddr = 1; /* use temp addr by default for testing now */ +int ip6_use_tempaddr = IP6_USE_TMPADDR_DEFAULT; /* use temp addr by default for testing now */ int nd6_accept_6to4 = 1; @@ -157,6 +153,7 @@ static boolean_t nd_defrouter_busy; static void *nd_defrouter_waitchan = &nd_defrouter_busy; static int nd_defrouter_waiters = 0; +#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) /* RTPREF_MEDIUM has to be 0! */ #define RTPREF_HIGH 1 #define RTPREF_MEDIUM 0 @@ -181,10 +178,7 @@ struct nd_prefix_dbg { }; static unsigned int ndpr_debug; /* debug flags */ -static unsigned int ndpr_size; /* size of zone element */ static struct zone *ndpr_zone; /* zone for nd_prefix */ - -#define NDPR_ZONE_MAX 64 /* maximum elements in zone */ #define NDPR_ZONE_NAME "nd6_prefix" /* zone name */ #define NDDR_TRACE_HIST_SIZE 32 /* size of trace history */ @@ -197,62 +191,46 @@ struct nd_defrouter_dbg { uint16_t nddr_refhold_cnt; /* # of ref */ uint16_t nddr_refrele_cnt; /* # of rele */ /* - * Circular lists of ndpr_addref and ndpr_remref callers. + * Circular lists of nddr_addref and nddr_remref callers. */ ctrace_t nddr_refhold[NDDR_TRACE_HIST_SIZE]; ctrace_t nddr_refrele[NDDR_TRACE_HIST_SIZE]; }; static unsigned int nddr_debug; /* debug flags */ -static unsigned int nddr_size; /* size of zone element */ static struct zone *nddr_zone; /* zone for nd_defrouter */ - -#define NDDR_ZONE_MAX 64 /* maximum elements in zone */ #define NDDR_ZONE_NAME "nd6_defrouter" /* zone name */ -static unsigned int ndprtr_size; /* size of zone element */ -static struct zone *ndprtr_zone; /* zone for nd_pfxrouter */ +static ZONE_DECLARE(ndprtr_zone, "nd6_pfxrouter", + sizeof(struct nd_pfxrouter), ZC_NONE); -#define NDPRTR_ZONE_MAX 64 /* maximum elements in zone */ -#define NDPRTR_ZONE_NAME "nd6_pfxrouter" /* zone name */ +#define TWOHOUR (120*60) +extern int nd6_process_rti; /* Default to 0 for now */ -void -nd6_rtr_init(void) + +static void +nd6_prefix_glb_init(void) { PE_parse_boot_argn("ifa_debug", &ndpr_debug, sizeof(ndpr_debug)); - PE_parse_boot_argn("ifa_debug", &nddr_debug, sizeof(nddr_debug)); - - ndpr_size = (ndpr_debug == 0) ? sizeof(struct nd_prefix) : + vm_size_t ndpr_size = (ndpr_debug == 0) ? sizeof(struct nd_prefix) : sizeof(struct nd_prefix_dbg); - ndpr_zone = zinit(ndpr_size, NDPR_ZONE_MAX * ndpr_size, 0, - NDPR_ZONE_NAME); - if (ndpr_zone == NULL) { - panic("%s: failed allocating %s", __func__, NDPR_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(ndpr_zone, Z_EXPAND, TRUE); - zone_change(ndpr_zone, Z_CALLERACCT, FALSE); + ndpr_zone = zone_create(NDPR_ZONE_NAME, ndpr_size, ZC_ZFREE_CLEARMEM); +} - nddr_size = (nddr_debug == 0) ? sizeof(struct nd_defrouter) : +static void +nd6_defrouter_glb_init(void) +{ + PE_parse_boot_argn("ifa_debug", &nddr_debug, sizeof(nddr_debug)); + vm_size_t nddr_size = (nddr_debug == 0) ? sizeof(struct nd_defrouter) : sizeof(struct nd_defrouter_dbg); - nddr_zone = zinit(nddr_size, NDDR_ZONE_MAX * nddr_size, 0, - NDDR_ZONE_NAME); - if (nddr_zone == NULL) { - panic("%s: failed allocating %s", __func__, NDDR_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(nddr_zone, Z_EXPAND, TRUE); - zone_change(nddr_zone, Z_CALLERACCT, FALSE); + nddr_zone = zone_create(NDDR_ZONE_NAME, nddr_size, ZC_ZFREE_CLEARMEM); +} - ndprtr_size = sizeof(struct nd_pfxrouter); - ndprtr_zone = zinit(ndprtr_size, NDPRTR_ZONE_MAX * ndprtr_size, 0, - NDPRTR_ZONE_NAME); - if (ndprtr_zone == NULL) { - panic("%s: failed allocating %s", __func__, NDPRTR_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(ndprtr_zone, Z_EXPAND, TRUE); - zone_change(ndprtr_zone, Z_CALLERACCT, FALSE); +void +nd6_rtr_init(void) +{ + nd6_prefix_glb_init(); + nd6_defrouter_glb_init(); } /* @@ -280,7 +258,7 @@ nd6_rs_input( MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); /* If I'm not a router, ignore it. */ - if (!ip6_forwarding || !(ifp->if_eflags & IFEF_IPV6_ROUTER)) { + if (!ip6_forwarding || ifp->if_ipv6_router_mode == IPV6_ROUTER_MODE_DISABLED) { goto freeit; } @@ -350,6 +328,11 @@ bad: m_freem(m); } +#define ND_OPT_LEN_TO_BYTE_SCALE 3 /* ND opt len is in units of 8 octets */ + +#define ND_OPT_LEN_RTI_MIN 1 +#define ND_OPT_LEN_RTI_MAX 3 +#define ND_OPT_RTI_PFXLEN_MAX 128 /* * Receive Router Advertisement Message. * @@ -380,6 +363,7 @@ nd6_ra_input( struct nd_prefix_list *prfl; struct nd_defrouter dr0; u_int32_t advreachable; + boolean_t rti_defrtr_processed = FALSE; #if (DEVELOPMENT || DEBUG) if (ip6_accept_rtadv == 0) { @@ -390,19 +374,22 @@ nd6_ra_input( MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); /* - * Discard RA unless IFEF_ACCEPT_RTADV is set (as host), or when - * IFEF_IPV6_ROUTER is set (as router) but the RA is not locally - * generated. For convenience, we allow locally generated (rtadvd) + * Accept the RA if IFEF_ACCEPT_RTADV is set, or when + * we're acting as a router and the RA is locally generated. + * For convenience, we allow locally generated (rtadvd) * RAs to be processed on the advertising interface, as a router. * * Note that we don't test against ip6_forwarding as we could be * both a host and a router on different interfaces, hence the * check against the per-interface flags. */ - if (!(ifp->if_eflags & (IFEF_ACCEPT_RTADV | IFEF_IPV6_ROUTER)) || - ((ifp->if_eflags & IFEF_IPV6_ROUTER) && - (ia6 = ifa_foraddr6(&saddr6)) == NULL)) { - goto freeit; + if ((ifp->if_eflags & IFEF_ACCEPT_RTADV) == 0) { + if (ifp->if_ipv6_router_mode == IPV6_ROUTER_MODE_EXCLUSIVE && + (ia6 = ifa_foraddr6(&saddr6)) != NULL) { + /* accept locally generated RA */ + } else { + goto freeit; + } } if (ia6 != NULL) { @@ -446,14 +433,8 @@ nd6_ra_input( } ndi = ND_IFINFO(ifp); - VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); + VERIFY(NULL != ndi && TRUE == ndi->initialized); lck_mtx_lock(&ndi->lock); - bzero(&dr0, sizeof(dr0)); - dr0.rtaddr = saddr6; - dr0.flags = nd_ra->nd_ra_flags_reserved; - dr0.rtlifetime = ntohs(nd_ra->nd_ra_router_lifetime); - dr0.expire = net_uptime() + dr0.rtlifetime; - dr0.ifp = ifp; /* unspecified or not? (RFC 2461 6.3.4) */ if (advreachable) { advreachable = ntohl(advreachable); @@ -480,9 +461,180 @@ nd6_ra_input( } } lck_mtx_unlock(&ndi->lock); - lck_mtx_lock(nd6_mutex); - dr = defrtrlist_update(&dr0); - lck_mtx_unlock(nd6_mutex); + + /* Initialize nd_defrouter invariants for RA processing */ + bzero(&dr0, sizeof(dr0)); + dr0.rtaddr = saddr6; + dr0.ifp = ifp; + + /* + * Route Information Option + */ + if (ndopts.nd_opts_rti && IFNET_IS_ETHERNET(ifp)) { + struct nd_opt_hdr *rt = NULL; + struct sockaddr_in6 rti_gateway = {0}; + + rti_gateway.sin6_family = AF_INET6; + rti_gateway.sin6_len = sizeof(rti_gateway); + memcpy(&rti_gateway.sin6_addr, &saddr6, sizeof(rti_gateway.sin6_addr)); + + for (rt = (struct nd_opt_hdr *)ndopts.nd_opts_rti; + rt <= (struct nd_opt_hdr *)ndopts.nd_opts_rti_end; + rt = (struct nd_opt_hdr *)((caddr_t)rt + + (rt->nd_opt_len << ND_OPT_LEN_TO_BYTE_SCALE))) { + struct sockaddr_in6 rti_prefix = {}; + struct nd_route_info rti = {}; + struct nd_opt_route_info *rti_opt = NULL; + u_int32_t rounded_prefix_bytes = 0; + + if (rt->nd_opt_type != ND_OPT_ROUTE_INFO) { + continue; + } + + rti_opt = (struct nd_opt_route_info *)rt; + if ((rti_opt->nd_opt_rti_len < ND_OPT_LEN_RTI_MIN) || + (rti_opt->nd_opt_rti_len > ND_OPT_LEN_RTI_MAX)) { + nd6log(info, + "%s: invalid option " + "len %d for route information option, " + "ignored\n", __func__, + rti_opt->nd_opt_rti_len); + continue; + } + + if (rti_opt->nd_opt_rti_prefixlen > ND_OPT_RTI_PFXLEN_MAX) { + nd6log(info, + "%s: invalid prefix length %d " + "in the route information option, " + "ignored\n", __func__, rti_opt->nd_opt_rti_prefixlen); + continue; + } + + if (rti_opt->nd_opt_rti_prefixlen != 0 && + rti_opt->nd_opt_rti_prefixlen <= 64 && + rti_opt->nd_opt_rti_len == ND_OPT_LEN_RTI_MIN) { + nd6log(info, + "%s: invalid prefix " + "len %d is OOB for route information option, " + "with total option length of %d. Ignored.\n", + __func__, rti_opt->nd_opt_rti_prefixlen, + rti_opt->nd_opt_rti_len); + continue; + } + + if (rti_opt->nd_opt_rti_prefixlen > 64 && + rti_opt->nd_opt_rti_len != ND_OPT_LEN_RTI_MAX) { + nd6log(info, + "%s: invalid prefix " + "len %d is OOB for route information option, " + "with total option length of %d. Ignored.\n", + __func__, rti_opt->nd_opt_rti_prefixlen, + rti_opt->nd_opt_rti_len); + continue; + } + + if ((rti_opt->nd_opt_rti_flags & ND_RA_FLAG_RTPREF_MASK) == + ND_RA_FLAG_RTPREF_RSV) { + nd6log(info, + "%s: using reserved preference mask, " + "ignored\n", __func__); + continue; + } + + rti_prefix.sin6_family = AF_INET6; + rti_prefix.sin6_len = sizeof(rti_prefix); + + rounded_prefix_bytes = rti_opt->nd_opt_rti_prefixlen >> 3; + if (rti_opt->nd_opt_rti_prefixlen & 0x7) { + rounded_prefix_bytes++; + } + memcpy(&rti_prefix.sin6_addr, rti_opt + 1, rounded_prefix_bytes); + + nd6log(info, "%s: received RA with route opt, " + "prefix %s/%u pref %u lifetime %u\n", __func__, + ip6_sprintf(&rti_prefix.sin6_addr), + rti_opt->nd_opt_rti_prefixlen, + rti_opt->nd_opt_rti_flags, + ntohl(rti_opt->nd_opt_rti_lifetime)); + + dr0.flags = rti_opt->nd_opt_rti_flags; + dr0.stateflags = 0; + + /* + * https://tools.ietf.org/html/rfc4191#section-3.1 + * Type C Host requirements: + * The Router Preference and Lifetime values in a + * ::/0 Route Information Option override the + * preference and lifetime values in the Router + * Advertisement header. + */ + if (IN6_IS_ADDR_UNSPECIFIED(&rti_prefix.sin6_addr)) { + rti_defrtr_processed = TRUE; + /* + * If the router lifetime is 0, set the state flag + * to dummy, so that it is skipped and not used as a + * default router. + * Set the lifetime to 2 hrs to make sure we get rid + * of the router eventually if this was indeed for a router + * going away. + * + * We partly have to do this to ensure advertised prefixes + * stay onlink. + * A periodic RA would also keep refreshing the cached + * neighbor cache entry if it contains source link layer + * information. + */ + if (rti_opt->nd_opt_rti_lifetime == 0) { + dr0.rtlifetime = TWOHOUR; + dr0.stateflags |= NDDRF_INELIGIBLE; + } else { + dr0.rtlifetime = ntohl(rti_opt->nd_opt_rti_lifetime); + } + dr0.expire = net_uptime() + dr0.rtlifetime; + + lck_mtx_lock(nd6_mutex); + dr = defrtrlist_update(&dr0, NULL); + lck_mtx_unlock(nd6_mutex); + continue; + } + + dr0.rtlifetime = ntohl(rti_opt->nd_opt_rti_lifetime); + dr0.expire = net_uptime() + dr0.rtlifetime; + bzero(&rti, sizeof(rti)); + rti.nd_rti_prefixlen = rti_opt->nd_opt_rti_prefixlen; + rti.nd_rti_prefix = rti_prefix.sin6_addr; + nd6_rtilist_update(&rti, &dr0); + } + } + + if (!rti_defrtr_processed) { + dr0.flags = nd_ra->nd_ra_flags_reserved; + dr0.stateflags = 0; + /* + * If the router lifetime is 0, set the state flag + * to dummy, so that it is skipped and not used as a + * default router. + * Set the lifetime to 2 hrs to make sure we get rid + * of the router eventually if this was indeed for a router + * going away. + * + * We partly have to do this to ensure advertised prefixes + * stay onlink. + * A periodic RA would also keep refreshing the cached + * neighbor cache entry if it contains source link layer + * information. + */ + if (nd_ra->nd_ra_router_lifetime == 0) { + dr0.rtlifetime = TWOHOUR; + dr0.stateflags |= NDDRF_INELIGIBLE; + } else { + dr0.rtlifetime = ntohs(nd_ra->nd_ra_router_lifetime); + } + dr0.expire = net_uptime() + dr0.rtlifetime; + lck_mtx_lock(nd6_mutex); + dr = defrtrlist_update(&dr0, NULL); + lck_mtx_unlock(nd6_mutex); + } /* * prefix @@ -495,7 +647,7 @@ nd6_ra_input( for (pt = (struct nd_opt_hdr *)ndopts.nd_opts_pi; pt <= (struct nd_opt_hdr *)ndopts.nd_opts_pi_end; pt = (struct nd_opt_hdr *)((caddr_t)pt + - (pt->nd_opt_len << 3))) { + (pt->nd_opt_len << ND_OPT_LEN_TO_BYTE_SCALE))) { struct in6_addr pi_mask; bzero(&pi_mask, sizeof(pi_mask)); @@ -626,12 +778,12 @@ nd6_ra_input( } } + /* * MTU */ if (ndopts.nd_opts_mtu && ndopts.nd_opts_mtu->nd_opt_mtu_len == 1) { mtu = ntohl(ndopts.nd_opts_mtu->nd_opt_mtu_mtu); - /* lower bound */ if (mtu < IPV6_MMTU) { nd6log(info, "nd6_ra_input: bogus mtu option " @@ -731,7 +883,7 @@ bad: /* tell the change to user processes watching the routing socket. */ static void -nd6_rtmsg(int cmd, struct rtentry *rt) +nd6_rtmsg(u_char cmd, struct rtentry *rt) { struct rt_addrinfo info; struct ifnet *ifp = rt->rt_ifp; @@ -754,13 +906,14 @@ nd6_rtmsg(int cmd, struct rtentry *rt) } static void -defrouter_addreq(struct nd_defrouter *new, boolean_t scoped) +defrouter_addreq(struct nd_defrouter *new, struct nd_route_info *rti, boolean_t scoped) { - struct sockaddr_in6 def, mask, gate; + struct sockaddr_in6 key, mask, gate; struct rtentry *newrt = NULL; unsigned int ifscope; int err; struct nd_ifinfo *ndi = ND_IFINFO(new->ifp); + int rtflags = RTF_GATEWAY; LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_NOTOWNED); NDDR_LOCK_ASSERT_NOTHELD(new); @@ -773,8 +926,7 @@ defrouter_addreq(struct nd_defrouter *new, boolean_t scoped) if (new->stateflags & NDDRF_INSTALLED) { goto out; } - - if (new->ifp->if_eflags & IFEF_IPV6_ROUTER) { + if (new->ifp->if_ipv6_router_mode == IPV6_ROUTER_MODE_EXCLUSIVE) { nd6log2(info, "%s: ignoring router %s, scoped=%d, " "static=%d on advertising interface\n", if_name(new->ifp), ip6_sprintf(&new->rtaddr), scoped, @@ -786,13 +938,23 @@ defrouter_addreq(struct nd_defrouter *new, boolean_t scoped) "static=%d\n", if_name(new->ifp), ip6_sprintf(&new->rtaddr), scoped, (new->stateflags & NDDRF_STATIC) ? 1 : 0); - Bzero(&def, sizeof(def)); + Bzero(&key, sizeof(key)); Bzero(&mask, sizeof(mask)); Bzero(&gate, sizeof(gate)); - def.sin6_len = mask.sin6_len = gate.sin6_len + key.sin6_len = mask.sin6_len = gate.sin6_len = sizeof(struct sockaddr_in6); - def.sin6_family = mask.sin6_family = gate.sin6_family = AF_INET6; + key.sin6_family = mask.sin6_family = gate.sin6_family = AF_INET6; + + if (rti != NULL) { + key.sin6_addr = rti->nd_rti_prefix; + in6_len2mask(&mask.sin6_addr, rti->nd_rti_prefixlen); + if (rti->nd_rti_prefixlen == ND_OPT_RTI_PFXLEN_MAX) { + rtflags |= RTF_HOST; + } else { + rtflags |= RTF_PRCLONING; + } + } if (new->stateflags & NDDRF_MAPPED) { gate.sin6_addr = new->rtaddr_mapped; @@ -834,9 +996,9 @@ defrouter_addreq(struct nd_defrouter *new, boolean_t scoped) } } - err = rtrequest_scoped(RTM_ADD, (struct sockaddr *)&def, + err = rtrequest_scoped(RTM_ADD, (struct sockaddr *)&key, (struct sockaddr *)&gate, (struct sockaddr *)&mask, - RTF_GATEWAY, &newrt, ifscope); + rtflags, &newrt, ifscope); if (newrt) { RT_LOCK(newrt); @@ -863,6 +1025,7 @@ out: struct nd_defrouter * defrouter_lookup( + struct nd_drhead *nd_router_listp, struct in6_addr *addr, struct ifnet *ifp) { @@ -870,11 +1033,15 @@ defrouter_lookup( LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); - for (dr = TAILQ_FIRST(&nd_defrouter); dr; + if (nd_router_listp == NULL) { + nd_router_listp = &nd_defrouter_list; + } + + for (dr = TAILQ_FIRST(nd_router_listp); dr; dr = TAILQ_NEXT(dr, dr_entry)) { NDDR_LOCK(dr); if (dr->ifp == ifp && IN6_ARE_ADDR_EQUAL(addr, &dr->rtaddr)) { - NDDR_ADDREF_LOCKED(dr); + NDDR_ADDREF(dr); NDDR_UNLOCK(dr); return dr; } @@ -890,9 +1057,9 @@ defrouter_lookup( * not be called from anywhere else. */ static void -defrouter_delreq(struct nd_defrouter *dr) +defrouter_delreq(struct nd_defrouter *dr, struct nd_route_info *rti) { - struct sockaddr_in6 def, mask, gate; + struct sockaddr_in6 key, mask, gate; struct rtentry *oldrt = NULL; unsigned int ifscope; int err; @@ -914,14 +1081,19 @@ defrouter_delreq(struct nd_defrouter *dr) ip6_sprintf(&dr->rtaddr), (dr->stateflags & NDDRF_IFSCOPE) ? 1 : 0, (dr->stateflags & NDDRF_STATIC) ? 1 : 0); - Bzero(&def, sizeof(def)); + Bzero(&key, sizeof(key)); Bzero(&mask, sizeof(mask)); Bzero(&gate, sizeof(gate)); - def.sin6_len = mask.sin6_len = gate.sin6_len + key.sin6_len = mask.sin6_len = gate.sin6_len = sizeof(struct sockaddr_in6); - def.sin6_family = mask.sin6_family = gate.sin6_family = AF_INET6; + key.sin6_family = mask.sin6_family = gate.sin6_family = AF_INET6; + + if (rti != NULL) { + key.sin6_addr = rti->nd_rti_prefix; + in6_len2mask(&mask.sin6_addr, rti->nd_rti_prefixlen); + } /* * The router entry may be mapped to a different address. * If that is the case, use the mapped address as gateway @@ -944,7 +1116,7 @@ defrouter_delreq(struct nd_defrouter *dr) NDDR_UNLOCK(dr); err = rtrequest_scoped(RTM_DELETE, - (struct sockaddr *)&def, (struct sockaddr *)&gate, + (struct sockaddr *)&key, (struct sockaddr *)&gate, (struct sockaddr *)&mask, RTF_GATEWAY, &oldrt, ifscope); if (oldrt) { @@ -982,17 +1154,17 @@ defrouter_reset(void) LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); - dr = TAILQ_FIRST(&nd_defrouter); + dr = TAILQ_FIRST(&nd_defrouter_list); while (dr) { NDDR_LOCK(dr); if (dr->stateflags & NDDRF_INSTALLED) { - NDDR_ADDREF_LOCKED(dr); + NDDR_ADDREF(dr); NDDR_UNLOCK(dr); lck_mtx_unlock(nd6_mutex); - defrouter_delreq(dr); + defrouter_delreq(dr, NULL); lck_mtx_lock(nd6_mutex); NDDR_REMREF(dr); - dr = TAILQ_FIRST(&nd_defrouter); + dr = TAILQ_FIRST(&nd_defrouter_list); } else { NDDR_UNLOCK(dr); dr = TAILQ_NEXT(dr, dr_entry); @@ -1003,7 +1175,7 @@ defrouter_reset(void) bzero(&drany, sizeof(drany)); lck_mtx_init(&drany.nddr_lock, ifa_mtx_grp, ifa_mtx_attr); lck_mtx_unlock(nd6_mutex); - defrouter_delreq(&drany); + defrouter_delreq(&drany, NULL); lck_mtx_destroy(&drany.nddr_lock, ifa_mtx_grp); lck_mtx_lock(nd6_mutex); } @@ -1021,7 +1193,7 @@ defrtrlist_ioctl(u_long cmd, caddr_t data) case SIOCDRADD_IN6_32: /* struct in6_defrouter_32 */ case SIOCDRADD_IN6_64: /* struct in6_defrouter_64 */ ++add; - /* FALLTHRU */ + OS_FALLTHROUGH; case SIOCDRDEL_IN6_32: /* struct in6_defrouter_32 */ case SIOCDRDEL_IN6_64: /* struct in6_defrouter_64 */ bzero(&dr0, sizeof(dr0)); @@ -1101,7 +1273,7 @@ defrtrlist_ioctl(u_long cmd, caddr_t data) * only after calling this routine. */ void -defrtrlist_del(struct nd_defrouter *dr) +defrtrlist_del(struct nd_defrouter *dr, struct nd_drhead *nd_router_listp) { #if (DEVELOPMENT || DEBUG) struct nd_defrouter *dr_itr = NULL; @@ -1109,10 +1281,19 @@ defrtrlist_del(struct nd_defrouter *dr) struct nd_prefix *pr; struct ifnet *ifp = dr->ifp; struct nd_ifinfo *ndi = NULL; - boolean_t resetmtu; + boolean_t resetmtu = FALSE; + struct nd_route_info *rti = NULL; LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); + if (nd_router_listp == NULL) { + nd_router_listp = &nd_defrouter_list; + } + + if (nd_router_listp != &nd_defrouter_list) { + rti = (struct nd_route_info *)nd_router_listp; + } + #if (DEVELOPMENT || DEBUG) /* * Verify that the router is not in the global default @@ -1123,15 +1304,21 @@ defrtrlist_del(struct nd_defrouter *dr) * Also we can't use ASSERT here as that is not defined * for development builds. */ - TAILQ_FOREACH(dr_itr, &nd_defrouter, dr_entry) + TAILQ_FOREACH(dr_itr, nd_router_listp, dr_entry) VERIFY(dr != dr_itr); #endif ++nd6_defrouter_genid; /* * Flush all the routing table entries that use the router * as a next hop. + * + * XXX Note that for a router advertising itself as default router + * and also advertising route information option, the following + * code will have the default router entry and router entry of + * RTI step over each other. + * The following therefore may not be efficient but won't be + * causing blocking issues. */ - /* above is a good condition? */ NDDR_ADDREF(dr); lck_mtx_unlock(nd6_mutex); if (dr->stateflags & NDDRF_MAPPED) { @@ -1139,38 +1326,42 @@ defrtrlist_del(struct nd_defrouter *dr) } else { rt6_flush(&dr->rtaddr, ifp); } - lck_mtx_lock(nd6_mutex); NDDR_REMREF(dr); - nd6log2(info, "%s: freeing defrouter %s\n", if_name(dr->ifp), + nd6log2(info, "%s: freeing route to %s with gateway %s\n", if_name(dr->ifp), + (rti == NULL)? "::" : ip6_sprintf(&rti->nd_rti_prefix), ip6_sprintf(&dr->rtaddr)); /* * Delete it from the routing table. */ NDDR_ADDREF(dr); lck_mtx_unlock(nd6_mutex); - defrouter_delreq(dr); + defrouter_delreq(dr, rti); lck_mtx_lock(nd6_mutex); NDDR_REMREF(dr); /* - * Also delete all the pointers to the router in each prefix lists. + * The following should mostly be limited to when we are working + * with a default router entry and not a router entry from + * rti router list. */ - for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { - struct nd_pfxrouter *pfxrtr; + if (rti == NULL) { + /* + * Also delete all the pointers to the router in each prefix lists. + */ + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { + struct nd_pfxrouter *pfxrtr; - NDPR_LOCK(pr); - if ((pfxrtr = pfxrtr_lookup(pr, dr)) != NULL) { - pfxrtr_del(pfxrtr, pr); + NDPR_LOCK(pr); + if ((pfxrtr = pfxrtr_lookup(pr, dr)) != NULL) { + pfxrtr_del(pfxrtr, pr); + } + NDPR_UNLOCK(pr); } - NDPR_UNLOCK(pr); + pfxlist_onlink_check(); } - - pfxlist_onlink_check(); - - resetmtu = FALSE; ndi = ND_IFINFO(ifp); - VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); + VERIFY(NULL != ndi && TRUE == ndi->initialized); lck_mtx_lock(&ndi->lock); VERIFY(ndi->ndefrouters >= 0); if (ndi->ndefrouters > 0 && --ndi->ndefrouters == 0) { @@ -1178,13 +1369,12 @@ defrtrlist_del(struct nd_defrouter *dr) resetmtu = TRUE; } lck_mtx_unlock(&ndi->lock); - /* * If the router is the primary one, choose a new one. * We always try to pick another eligible router * on this interface as we do scoped routing */ - defrouter_select(ifp); + defrouter_select(ifp, nd_router_listp); if (resetmtu) { nd6_setmtu(ifp); @@ -1204,14 +1394,14 @@ defrtrlist_add_static(struct nd_defrouter *new) new->flags &= ND_RA_FLAG_RTPREF_MASK; lck_mtx_lock(nd6_mutex); - dr = defrouter_lookup(&new->rtaddr, new->ifp); + dr = defrouter_lookup(NULL, &new->rtaddr, new->ifp); if (dr != NULL && !(dr->stateflags & NDDRF_STATIC)) { err = EINVAL; } else { if (dr != NULL) { NDDR_REMREF(dr); } - dr = defrtrlist_update(new); + dr = defrtrlist_update(new, NULL); if (dr != NULL) { err = dr->err; } else { @@ -1232,15 +1422,15 @@ defrtrlist_del_static(struct nd_defrouter *new) struct nd_defrouter *dr; lck_mtx_lock(nd6_mutex); - dr = defrouter_lookup(&new->rtaddr, new->ifp); + dr = defrouter_lookup(NULL, &new->rtaddr, new->ifp); if (dr == NULL || !(dr->stateflags & NDDRF_STATIC)) { if (dr != NULL) { NDDR_REMREF(dr); } dr = NULL; } else { - TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); - defrtrlist_del(dr); + TAILQ_REMOVE(&nd_defrouter_list, dr, dr_entry); + defrtrlist_del(dr, NULL); NDDR_REMREF(dr); /* remove list reference */ NDDR_REMREF(dr); } @@ -1307,7 +1497,7 @@ rtpref(struct nd_defrouter *dr) * we do not need to classify the cases by ifdef. */ void -defrouter_select(struct ifnet *ifp) +defrouter_select(struct ifnet *ifp, struct nd_drhead *nd_router_listp) { struct nd_defrouter *dr = NULL; struct nd_defrouter *selected_dr = NULL; @@ -1317,9 +1507,24 @@ defrouter_select(struct ifnet *ifp) struct nd_ifinfo *ndi = NULL; unsigned int genid = 0; boolean_t is_installed_reachable = FALSE; + struct nd_route_info *rti = NULL; + boolean_t scoped = TRUE; + boolean_t is_rti_rtrlist = FALSE; LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); + if (nd_router_listp == NULL) { + nd_router_listp = &nd_defrouter_list; + } + + if (nd_router_listp != &nd_defrouter_list) { + rti = (struct nd_route_info *)nd_router_listp; + /* XXX For now we treat RTI routes as un-scoped */ + scoped = FALSE; + is_rti_rtrlist = TRUE; + } + + if (ifp == NULL) { ifp = nd6_defifp; if (ifp == NULL) { @@ -1333,7 +1538,13 @@ defrouter_select(struct ifnet *ifp) __func__, __LINE__, if_name(ifp)); } - if (ifp == lo_ifp) { + /* + * When we are working with RTI router list, the nd6_defifp may be + * NULL. That is the scenario when the network may not have WAN + * v6 connectivity and the only RAs we may be getting are with lifetime + * 0. + */ + if (ifp == lo_ifp && !is_rti_rtrlist) { nd6log2(info, "%s:%d: Return early. " "Default router select called for loopback.\n", @@ -1341,11 +1552,11 @@ defrouter_select(struct ifnet *ifp) return; } - if (ifp->if_eflags & IFEF_IPV6_ROUTER) { + if (ifp->if_ipv6_router_mode == IPV6_ROUTER_MODE_EXCLUSIVE) { nd6log2(info, "%s:%d: Return early. " "Default router select called for interface" - " %s with IFEF_IPV6_ROUTER flag set\n", + " %s in IPV6_ROUTER_MODE_EXCLUSIVE\n", __func__, __LINE__, if_name(ifp)); return; } @@ -1354,7 +1565,7 @@ defrouter_select(struct ifnet *ifp) * Let's handle easy case (3) first: * If default router list is empty, there's nothing to be done. */ - if (!TAILQ_FIRST(&nd_defrouter)) { + if (!TAILQ_FIRST(nd_router_listp)) { nd6log2(info, "%s:%d: Return early. " "Default router is empty.\n", __func__, __LINE__); @@ -1374,7 +1585,14 @@ defrouter_select(struct ifnet *ifp) return; } - if (ndi->ndefrouters == 0) { + /* + * RTI router list routes are installed as unscoped. + * Since there can be only one unscoped route, we need to + * go through the entire list and consider all interfaces. + * Further, for now, RTI option is only processed on Ethernet + * type interfaces only. + */ + if (ndi->ndefrouters == 0 && !is_rti_rtrlist) { nd6log2(info, "%s:%d: Return early. " "%s does not have any default routers.\n", @@ -1404,7 +1622,7 @@ defrouter_select(struct ifnet *ifp) * installed_dr = currently installed primary router */ genid = nd6_defrouter_genid; - dr = TAILQ_FIRST(&nd_defrouter); + dr = TAILQ_FIRST(nd_router_listp); while (dr != NULL) { struct in6_addr rtaddr; @@ -1413,9 +1631,16 @@ defrouter_select(struct ifnet *ifp) NDDR_LOCK(dr); drifp = dr->ifp; - if (drifp != ifp) { + if (drifp != ifp && !is_rti_rtrlist) { + NDDR_UNLOCK(dr); + dr = TAILQ_NEXT(dr, dr_entry); + continue; + } + + if (dr->stateflags & NDDRF_INELIGIBLE) { NDDR_UNLOCK(dr); dr = TAILQ_NEXT(dr, dr_entry); + nd6log(info, "Ignoring dummy entry for default router."); continue; } @@ -1425,7 +1650,7 @@ defrouter_select(struct ifnet *ifp) * there's no point checking for reachability as * there's nothing else to choose from. */ - if (ndi->ndefrouters == 1) { + if (ndi->ndefrouters == 1 && !is_rti_rtrlist) { nd6log2(info, "%s:%d: Fast forward default router selection " "as interface %s has learned only one default " @@ -1436,7 +1661,7 @@ defrouter_select(struct ifnet *ifp) if (dr->stateflags & NDDRF_INSTALLED) { installed_dr = dr; } - NDDR_ADDREF_LOCKED(selected_dr); + NDDR_ADDREF(selected_dr); NDDR_UNLOCK(dr); goto install_route; } @@ -1447,7 +1672,7 @@ defrouter_select(struct ifnet *ifp) rtaddr = dr->rtaddr; } - NDDR_ADDREF_LOCKED(dr); /* for this for loop */ + NDDR_ADDREF(dr); /* for this for loop */ NDDR_UNLOCK(dr); /* Callee returns a locked route upon success */ @@ -1489,14 +1714,14 @@ defrouter_select(struct ifnet *ifp) drrele = selected_dr; } selected_dr = dr; - NDDR_ADDREF_LOCKED(selected_dr); + NDDR_ADDREF(selected_dr); } /* Record the currently installed router */ if (dr->stateflags & NDDRF_INSTALLED) { if (installed_dr == NULL) { installed_dr = dr; - NDDR_ADDREF_LOCKED(installed_dr); + NDDR_ADDREF(installed_dr); if (dr->stateflags & NDDRF_MAPPED) { rtaddr = installed_dr->rtaddr_mapped; } else { @@ -1505,7 +1730,7 @@ defrouter_select(struct ifnet *ifp) NDDR_UNLOCK(dr); lck_mtx_unlock(nd6_mutex); /* Callee returns a locked route upon success */ - if ((rt = nd6_lookup(&rtaddr, 0, ifp, 0)) != NULL) { + if ((rt = nd6_lookup(&rtaddr, 0, installed_dr->ifp, 0)) != NULL) { RT_LOCK_ASSERT_HELD(rt); if ((ln = rt->rt_llinfo) != NULL && ND6_IS_LLINFO_PROBREACH(ln)) { @@ -1521,7 +1746,7 @@ defrouter_select(struct ifnet *ifp) /* this should not happen; warn for diagnosis */ nd6log(error, "defrouter_select: more than one " "default router is installed for interface :%s.\n", - if_name(ifp)); + if_name(installed_dr->ifp)); NDDR_UNLOCK(dr); } } else { @@ -1548,7 +1773,7 @@ defrouter_select(struct ifnet *ifp) installed_dr = NULL; } - if (ndi->ndefrouters == 0) { + if (ndi->ndefrouters == 0 && !is_rti_rtrlist) { nd6log2(info, "%s:%d: Interface %s no longer " "has any default routers. Abort.\n", @@ -1564,7 +1789,7 @@ defrouter_select(struct ifnet *ifp) is_installed_reachable = FALSE; genid = nd6_defrouter_genid; - dr = TAILQ_FIRST(&nd_defrouter); + dr = TAILQ_FIRST(nd_router_listp); } else { dr = TAILQ_NEXT(dr, dr_entry); } @@ -1580,7 +1805,10 @@ defrouter_select(struct ifnet *ifp) if (installed_dr) { for (dr = TAILQ_NEXT(installed_dr, dr_entry); dr; dr = TAILQ_NEXT(dr, dr_entry)) { - if (installed_dr->ifp != dr->ifp) { + if (installed_dr->ifp != dr->ifp && !is_rti_rtrlist) { + continue; + } + if (dr->stateflags & NDDRF_INELIGIBLE) { continue; } selected_dr = dr; @@ -1593,9 +1821,12 @@ defrouter_select(struct ifnet *ifp) * one on the list, select the first one from the list */ if ((installed_dr == NULL) || (selected_dr == NULL)) { - for (dr = TAILQ_FIRST(&nd_defrouter); dr; + for (dr = TAILQ_FIRST(nd_router_listp); dr; dr = TAILQ_NEXT(dr, dr_entry)) { - if (dr->ifp == ifp) { + if (dr->stateflags & NDDRF_INELIGIBLE) { + continue; + } + if (dr->ifp == ifp || is_rti_rtrlist) { selected_dr = dr; break; } @@ -1652,25 +1883,25 @@ install_route: "%s. Installing new default route.\n", __func__, __LINE__, if_name(ifp)); if (installed_dr != NULL) { - defrouter_delreq(installed_dr); + defrouter_delreq(installed_dr, rti); } /* * Install scoped route if the interface is not * the default nd6 interface. */ - defrouter_addreq(selected_dr, - (selected_dr->ifp != nd6_defifp)); + defrouter_addreq(selected_dr, rti, + scoped && (selected_dr->ifp != nd6_defifp)); } else if (((installed_dr->stateflags & NDDRF_IFSCOPE) && (installed_dr->ifp == nd6_defifp)) || - (!(installed_dr->stateflags & NDDRF_IFSCOPE) && + (scoped && !(installed_dr->stateflags & NDDRF_IFSCOPE) && (installed_dr->ifp != nd6_defifp))) { nd6log(info, "%s:%d: Need to reinstall default route for interface " "%s as its scope has changed.\n", __func__, __LINE__, if_name(ifp)); - defrouter_delreq(installed_dr); - defrouter_addreq(installed_dr, - (installed_dr->ifp != nd6_defifp)); + defrouter_delreq(installed_dr, rti); + defrouter_addreq(installed_dr, rti, + scoped && (installed_dr->ifp != nd6_defifp)); } else { nd6log2(info, "%s:%d: No need to change the default " @@ -1695,7 +1926,7 @@ out: } static struct nd_defrouter * -defrtrlist_update_common(struct nd_defrouter *new, boolean_t scoped) +defrtrlist_update_common(struct nd_defrouter *new, struct nd_drhead *nd_router_listp, boolean_t scoped) { struct nd_defrouter *dr, *n; struct ifnet *ifp = new->ifp; @@ -1704,17 +1935,55 @@ defrtrlist_update_common(struct nd_defrouter *new, boolean_t scoped) LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); - if ((dr = defrouter_lookup(&new->rtaddr, ifp)) != NULL) { + if (nd_router_listp == NULL) { + nd_router_listp = &nd_defrouter_list; + } + + /* + * If we are not operating on default router list, + * it implies we are operating on RTI's router list. + * XXX For now we manage RTI routes un-scoped. + */ + if (nd_router_listp != &nd_defrouter_list) { + scoped = FALSE; + } + + if ((dr = defrouter_lookup(nd_router_listp, &new->rtaddr, ifp)) != NULL) { /* entry exists */ - if (new->rtlifetime == 0) { - TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); - defrtrlist_del(dr); + /* + * 1. If previous entry was not dummy and new is, + * delete it and return NULL. + * 2. If previous entry was dummy and the new one + * is also dummy, simply return dr. + * 3. If previous was dummy but new one is not, + * make sure we perform default router selection again. + */ + /* If the router was not added as a dummy and there's + * been a change (lifetime advertised was 0, communicated + * as NDDRF_INELIGIBLE flag), remove the entry. + */ + if ((new->stateflags & NDDRF_INELIGIBLE) != 0 && + (dr->stateflags & NDDRF_INELIGIBLE) == 0) { + TAILQ_REMOVE(nd_router_listp, dr, dr_entry); + defrtrlist_del(dr, nd_router_listp); NDDR_REMREF(dr); /* remove list reference */ NDDR_REMREF(dr); dr = NULL; + return NULL; } else { int oldpref = rtpref(dr); struct nd_defrouter *p = NULL; + boolean_t dummy_change = FALSE; + /* + * If new one is not dummy but the old one was, + * reset the stateflag. + */ + if ((new->stateflags & NDDRF_INELIGIBLE) == 0 && + (dr->stateflags & NDDRF_INELIGIBLE) != 0) { + dummy_change = TRUE; + dr->stateflags &= ~NDDRF_INELIGIBLE; + } + /* override */ dr->flags = new->flags; /* xxx flag check */ dr->rtlifetime = new->rtlifetime; @@ -1728,11 +1997,11 @@ defrtrlist_update_common(struct nd_defrouter *new, boolean_t scoped) * it's already at that position. */ /* same preference and scoped; just return */ - if (rtpref(new) == oldpref && scoped) { + if (rtpref(new) == oldpref && scoped && dummy_change == FALSE) { return dr; } - n = TAILQ_FIRST(&nd_defrouter); + n = TAILQ_FIRST(nd_router_listp); while (n != NULL) { /* preference changed; sort it */ if (rtpref(new) != oldpref) { @@ -1751,7 +2020,7 @@ defrtrlist_update_common(struct nd_defrouter *new, boolean_t scoped) /* nothing has changed, just return */ if (n == NULL && (scoped || - !(dr->stateflags & NDDRF_IFSCOPE))) { + !(dr->stateflags & NDDRF_IFSCOPE)) && dummy_change == FALSE) { return dr; } @@ -1764,34 +2033,28 @@ defrtrlist_update_common(struct nd_defrouter *new, boolean_t scoped) * defrouter_select() below will handle routing * changes later. */ - TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); + TAILQ_REMOVE(nd_router_listp, dr, dr_entry); new->stateflags = dr->stateflags; n = dr; goto insert; } - return dr; } VERIFY(dr == NULL); - - /* entry does not exist */ - if (new->rtlifetime == 0) { - return NULL; - } - - n = nddr_alloc(M_WAITOK); - if (n == NULL) { - return NULL; - } + n = nddr_alloc(Z_WAITOK); ndi = ND_IFINFO(ifp); VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); lck_mtx_lock(&ndi->lock); + if (ip6_maxifdefrouters >= 0 && ndi->ndefrouters >= ip6_maxifdefrouters) { lck_mtx_unlock(&ndi->lock); nddr_free(n); + nd6log(error, "%s: ignoring router addition as we have hit the " + "max limit of %d for max default routers.\n", __func__, + ip6_maxifdefrouters); return NULL; } @@ -1833,7 +2096,7 @@ insert: */ /* insert at the end of the group */ - for (dr = TAILQ_FIRST(&nd_defrouter); dr; + for (dr = TAILQ_FIRST(nd_router_listp); dr; dr = TAILQ_NEXT(dr, dr_entry)) { if (rtpref(n) > rtpref(dr) || (!scoped && rtpref(n) == rtpref(dr))) { @@ -1843,21 +2106,21 @@ insert: if (dr) { TAILQ_INSERT_BEFORE(dr, n, dr_entry); } else { - TAILQ_INSERT_TAIL(&nd_defrouter, n, dr_entry); + TAILQ_INSERT_TAIL(nd_router_listp, n, dr_entry); } - defrouter_select(ifp); + defrouter_select(ifp, nd_router_listp); return n; } -static struct nd_defrouter * -defrtrlist_update(struct nd_defrouter *new) +struct nd_defrouter * +defrtrlist_update(struct nd_defrouter *new, struct nd_drhead *nd_router_list) { struct nd_defrouter *dr; LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); - dr = defrtrlist_update_common(new, + dr = defrtrlist_update_common(new, nd_router_list, (nd6_defifp != NULL && new->ifp != nd6_defifp)); return dr; @@ -1941,7 +2204,7 @@ nd6_prefix_lookup(struct nd_prefix *pr, int nd6_prefix_expiry) if (nd6_prefix_expiry != ND6_PREFIX_EXPIRY_UNSPEC) { search->ndpr_expire = nd6_prefix_expiry; } - NDPR_ADDREF_LOCKED(search); + NDPR_ADDREF(search); NDPR_UNLOCK(search); break; } @@ -1997,7 +2260,7 @@ nd6_prelist_add(struct nd_prefix *pr, struct nd_defrouter *dr, new->ndpr_lastupdate = net_uptime(); if (newp != NULL) { *newp = new; - NDPR_ADDREF_LOCKED(new); /* for caller */ + NDPR_ADDREF(new); /* for caller */ } /* initialization */ LIST_INIT(&new->ndpr_advrtrs); @@ -2086,7 +2349,7 @@ prelist_remove(struct nd_prefix *pr) * when executing "ndp -p". */ if (pr->ndpr_stateflags & NDPRF_ONLINK) { - NDPR_ADDREF_LOCKED(pr); + NDPR_ADDREF(pr); NDPR_UNLOCK(pr); lck_mtx_unlock(nd6_mutex); if ((e = nd6_prefix_offlink(pr)) != 0) { @@ -2098,7 +2361,7 @@ prelist_remove(struct nd_prefix *pr) } lck_mtx_lock(nd6_mutex); NDPR_LOCK(pr); - if (NDPR_REMREF_LOCKED(pr) == NULL) { + if (NDPR_REMREF(pr) == NULL) { return; } } @@ -2130,7 +2393,7 @@ prelist_remove(struct nd_prefix *pr) lck_mtx_unlock(&ndi->lock); /* This must not be the last reference to the nd_prefix */ - if (NDPR_REMREF_LOCKED(pr) == NULL) { + if (NDPR_REMREF(pr) == NULL) { panic("%s: unexpected (missing) refcnt ndpr=%p", __func__, pr); /* NOTREACHED */ } @@ -2199,7 +2462,7 @@ prelist_update( pr->ndpr_lastupdate = net_uptime(); } - NDPR_ADDREF_LOCKED(pr); + NDPR_ADDREF(pr); if (new->ndpr_raf_onlink && (pr->ndpr_stateflags & NDPRF_ONLINK) == 0) { int e; @@ -2326,8 +2589,6 @@ prelist_update( * proceed to 5.5.3. (e): update the lifetimes according to the * "two hours" rule and the privacy extension. */ -#define TWOHOUR (120*60) - /* retrieve time as uptime (last arg is 0) */ in6ifa_getlifetime(ifa6, <6_tmp, 0); @@ -2341,7 +2602,7 @@ prelist_update( remaininglifetime = 0; } else { remaininglifetime = lt6_tmp.ia6t_vltime - - (timenow - ifa6->ia6_updatetime); + (uint32_t)(timenow - ifa6->ia6_updatetime); } /* when not updating, keep the current stored lifetime. */ lt6_tmp.ia6t_vltime = remaininglifetime; @@ -2387,8 +2648,8 @@ prelist_update( (u_int32_t)((timenow - ifa6->ia6_createtime) + ip6_desync_factor)) { maxvltime = ip6_temp_valid_lifetime - - (timenow - ifa6->ia6_createtime) - - ip6_desync_factor; + (uint32_t)((timenow - ifa6->ia6_createtime) + + ip6_desync_factor); } else { maxvltime = 0; } @@ -2396,8 +2657,8 @@ prelist_update( (u_int32_t)((timenow - ifa6->ia6_createtime) + ip6_desync_factor)) { maxpltime = ip6_temp_preferred_lifetime - - (timenow - ifa6->ia6_createtime) - - ip6_desync_factor; + (uint32_t)((timenow - ifa6->ia6_createtime) + + ip6_desync_factor); } else { maxpltime = 0; } @@ -2426,7 +2687,7 @@ prelist_update( * No address matched and the valid lifetime is non-zero. * Create a new address. */ - if ((ia6 = in6_pfx_newpersistaddr(new, mcast, &error, FALSE)) + if ((ia6 = in6_pfx_newpersistaddr(new, mcast, &error, FALSE, 0)) != NULL) { /* * note that we should use pr (not new) for reference. @@ -2434,7 +2695,7 @@ prelist_update( IFA_LOCK(&ia6->ia_ifa); NDPR_LOCK(pr); ia6->ia6_ndpr = pr; - NDPR_ADDREF_LOCKED(pr); /* for addr reference */ + NDPR_ADDREF(pr); /* for addr reference */ pr->ndpr_addrcnt++; VERIFY(pr->ndpr_addrcnt != 0); NDPR_UNLOCK(pr); @@ -2470,11 +2731,13 @@ prelist_update( * stateless translation. */ if (IS_INTF_CLAT46(ifp)) { - if ((ia6 = in6_pfx_newpersistaddr(new, mcast, &error, TRUE)) != NULL) { + if ((ia6 = in6_pfx_newpersistaddr(new, mcast, + &error, TRUE, CLAT46_COLLISION_COUNT_OFFSET)) + != NULL) { IFA_LOCK(&ia6->ia_ifa); NDPR_LOCK(pr); ia6->ia6_ndpr = pr; - NDPR_ADDREF_LOCKED(pr); /* for addr reference */ + NDPR_ADDREF(pr); /* for addr reference */ pr->ndpr_addrcnt++; VERIFY(pr->ndpr_addrcnt != 0); pr->ndpr_stateflags |= NDPRF_CLAT46; @@ -2527,14 +2790,14 @@ end: * Neighbor Discover Default Router structure reference counting routines. */ static struct nd_defrouter * -nddr_alloc(int how) +nddr_alloc(zalloc_flags_t how) { struct nd_defrouter *dr; - dr = (how == M_WAITOK) ? zalloc(nddr_zone) : zalloc_noblock(nddr_zone); - if (dr != NULL) { - bzero(dr, nddr_size); + dr = zalloc_flags(nddr_zone, how | Z_ZERO); + if (dr) { lck_mtx_init(&dr->nddr_lock, ifa_mtx_grp, ifa_mtx_attr); + lck_mtx_init(&dr->nddr_ref_lock, ifa_mtx_grp, ifa_mtx_attr); dr->nddr_debug |= IFD_ALLOC; if (nddr_debug != 0) { dr->nddr_debug |= IFD_DEBUG; @@ -2547,7 +2810,6 @@ nddr_alloc(int how) static void nddr_free(struct nd_defrouter *dr) { - NDDR_LOCK(dr); if (dr->nddr_debug & IFD_ATTACHED) { panic("%s: attached nddr %p is being freed", __func__, dr); /* NOTREACHED */ @@ -2556,9 +2818,8 @@ nddr_free(struct nd_defrouter *dr) /* NOTREACHED */ } dr->nddr_debug &= ~IFD_ALLOC; - NDDR_UNLOCK(dr); - lck_mtx_destroy(&dr->nddr_lock, ifa_mtx_grp); + lck_mtx_destroy(&dr->nddr_ref_lock, ifa_mtx_grp); zfree(nddr_zone, dr); } @@ -2587,35 +2848,22 @@ nddr_trace(struct nd_defrouter *dr, int refhold) } void -nddr_addref(struct nd_defrouter *nddr, int locked) +nddr_addref(struct nd_defrouter *nddr) { - if (!locked) { - NDDR_LOCK_SPIN(nddr); - } else { - NDDR_LOCK_ASSERT_HELD(nddr); - } - + NDDR_REF_LOCK_SPIN(nddr); if (++nddr->nddr_refcount == 0) { panic("%s: nddr %p wraparound refcnt\n", __func__, nddr); /* NOTREACHED */ } else if (nddr->nddr_trace != NULL) { (*nddr->nddr_trace)(nddr, TRUE); } - - if (!locked) { - NDDR_UNLOCK(nddr); - } + NDDR_REF_UNLOCK(nddr); } struct nd_defrouter * -nddr_remref(struct nd_defrouter *nddr, int locked) +nddr_remref(struct nd_defrouter *nddr) { - if (!locked) { - NDDR_LOCK_SPIN(nddr); - } else { - NDDR_LOCK_ASSERT_HELD(nddr); - } - + NDDR_REF_LOCK_SPIN(nddr); if (nddr->nddr_refcount == 0) { panic("%s: nddr %p negative refcnt\n", __func__, nddr); /* NOTREACHED */ @@ -2624,15 +2872,12 @@ nddr_remref(struct nd_defrouter *nddr, int locked) } if (--nddr->nddr_refcount == 0) { - NDDR_UNLOCK(nddr); + NDDR_REF_UNLOCK(nddr); nddr_free(nddr); nddr = NULL; + } else { + NDDR_REF_UNLOCK(nddr); } - - if (!locked && nddr != NULL) { - NDDR_UNLOCK(nddr); - } - return nddr; } @@ -2666,10 +2911,10 @@ ndpr_alloc(int how) { struct nd_prefix *pr; - pr = (how == M_WAITOK) ? zalloc(ndpr_zone) : zalloc_noblock(ndpr_zone); + pr = zalloc_flags(ndpr_zone, how | Z_ZERO); if (pr != NULL) { - bzero(pr, ndpr_size); lck_mtx_init(&pr->ndpr_lock, ifa_mtx_grp, ifa_mtx_attr); + lck_mtx_init(&pr->ndpr_ref_lock, ifa_mtx_grp, ifa_mtx_attr); RB_INIT(&pr->ndpr_prproxy_sols); pr->ndpr_debug |= IFD_ALLOC; if (ndpr_debug != 0) { @@ -2683,7 +2928,6 @@ ndpr_alloc(int how) static void ndpr_free(struct nd_prefix *pr) { - NDPR_LOCK(pr); if (pr->ndpr_debug & IFD_ATTACHED) { panic("%s: attached ndpr %p is being freed", __func__, pr); /* NOTREACHED */ @@ -2703,9 +2947,8 @@ ndpr_free(struct nd_prefix *pr) /* NOTREACHED */ } pr->ndpr_debug &= ~IFD_ALLOC; - NDPR_UNLOCK(pr); - lck_mtx_destroy(&pr->ndpr_lock, ifa_mtx_grp); + lck_mtx_destroy(&pr->ndpr_ref_lock, ifa_mtx_grp); zfree(ndpr_zone, pr); } @@ -2734,35 +2977,22 @@ ndpr_trace(struct nd_prefix *pr, int refhold) } void -ndpr_addref(struct nd_prefix *ndpr, int locked) +ndpr_addref(struct nd_prefix *ndpr) { - if (!locked) { - NDPR_LOCK_SPIN(ndpr); - } else { - NDPR_LOCK_ASSERT_HELD(ndpr); - } - + NDPR_REF_LOCK_SPIN(ndpr); if (++ndpr->ndpr_refcount == 0) { panic("%s: ndpr %p wraparound refcnt\n", __func__, ndpr); /* NOTREACHED */ } else if (ndpr->ndpr_trace != NULL) { (*ndpr->ndpr_trace)(ndpr, TRUE); } - - if (!locked) { - NDPR_UNLOCK(ndpr); - } + NDPR_REF_UNLOCK(ndpr); } struct nd_prefix * -ndpr_remref(struct nd_prefix *ndpr, int locked) +ndpr_remref(struct nd_prefix *ndpr) { - if (!locked) { - NDPR_LOCK_SPIN(ndpr); - } else { - NDPR_LOCK_ASSERT_HELD(ndpr); - } - + NDPR_REF_LOCK_SPIN(ndpr); if (ndpr->ndpr_refcount == 0) { panic("%s: ndpr %p negative refcnt\n", __func__, ndpr); /* NOTREACHED */ @@ -2777,15 +3007,12 @@ ndpr_remref(struct nd_prefix *ndpr, int locked) ndpr->ndpr_addrcnt); /* NOTREACHED */ } - NDPR_UNLOCK(ndpr); + NDPR_REF_UNLOCK(ndpr); ndpr_free(ndpr); ndpr = NULL; + } else { + NDPR_REF_UNLOCK(ndpr); } - - if (!locked && ndpr != NULL) { - NDPR_UNLOCK(ndpr); - } - return ndpr; } @@ -2918,10 +3145,10 @@ pfxlist_onlink_check(void) pr = pr->ndpr_next; continue; } - NDPR_ADDREF_LOCKED(pr); + NDPR_ADDREF(pr); if (pr->ndpr_raf_onlink && find_pfxlist_reachable_router(pr) && (pr->ndpr_debug & IFD_ATTACHED)) { - if (NDPR_REMREF_LOCKED(pr) == NULL) { + if (NDPR_REMREF(pr) == NULL) { pr = NULL; } else { NDPR_UNLOCK(pr); @@ -2948,7 +3175,7 @@ pfxlist_onlink_check(void) * that does not advertise any prefixes. */ if (pr == NULL) { - for (dr = TAILQ_FIRST(&nd_defrouter); dr; + for (dr = TAILQ_FIRST(&nd_defrouter_list); dr; dr = TAILQ_NEXT(dr, dr_entry)) { struct nd_prefix *pr0; @@ -2966,7 +3193,7 @@ pfxlist_onlink_check(void) } } } - if (pr != NULL || (TAILQ_FIRST(&nd_defrouter) && pfxrtr == NULL)) { + if (pr != NULL || (TAILQ_FIRST(&nd_defrouter_list) && pfxrtr == NULL)) { /* * There is at least one prefix that has a reachable router, * or at least a router which probably does not advertise @@ -2991,7 +3218,7 @@ pfxlist_onlink_check(void) pr = pr->ndpr_next; continue; } - NDPR_ADDREF_LOCKED(pr); + NDPR_ADDREF(pr); if ((pr->ndpr_stateflags & NDPRF_DETACHED) == 0 && find_pfxlist_reachable_router(pr) == NULL && (pr->ndpr_debug & IFD_ATTACHED)) { @@ -3055,7 +3282,7 @@ pfxlist_onlink_check(void) continue; } pr->ndpr_stateflags |= NDPRF_PROCESSED_ONLINK; - NDPR_ADDREF_LOCKED(pr); + NDPR_ADDREF(pr); if ((pr->ndpr_stateflags & NDPRF_DETACHED) != 0 && (pr->ndpr_stateflags & NDPRF_ONLINK) != 0) { NDPR_UNLOCK(pr); @@ -3143,9 +3370,9 @@ pfxlist_onlink_check(void) IFA_UNLOCK(&ifa->ia_ifa); NDPR_LOCK(ndpr); - NDPR_ADDREF_LOCKED(ndpr); + NDPR_ADDREF(ndpr); if (find_pfxlist_reachable_router(ndpr)) { - if (NDPR_REMREF_LOCKED(ndpr) == NULL) { + if (NDPR_REMREF(ndpr) == NULL) { found = 0; } else { NDPR_UNLOCK(ndpr); @@ -3172,7 +3399,7 @@ pfxlist_onlink_check(void) } IFA_UNLOCK(&ifa->ia_ifa); NDPR_LOCK(ndpr); - NDPR_ADDREF_LOCKED(ndpr); + NDPR_ADDREF(ndpr); if (find_pfxlist_reachable_router(ndpr)) { NDPR_UNLOCK(ndpr); IFA_LOCK(&ifa->ia_ifa); @@ -3241,7 +3468,7 @@ nd6_prefix_equal_lookup(struct nd_prefix *pr, boolean_t primary_only) &opr->ndpr_prefix.sin6_addr, pr->ndpr_plen) && (!primary_only || !(opr->ndpr_stateflags & NDPRF_IFSCOPE))) { - NDPR_ADDREF_LOCKED(opr); + NDPR_ADDREF(opr); NDPR_UNLOCK(opr); return opr; } @@ -3366,6 +3593,7 @@ nd6_prefix_onlink_common(struct nd_prefix *pr, boolean_t force_scoped, u_int32_t rtflags; int error = 0, prproxy = 0; struct rtentry *rt = NULL; + u_char prefix_len = 0; LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); @@ -3448,10 +3676,11 @@ nd6_prefix_onlink_common(struct nd_prefix *pr, boolean_t force_scoped, mask6.sin6_len = sizeof(mask6); mask6.sin6_addr = pr->ndpr_mask; prefix = pr->ndpr_prefix; + prefix_len = pr->ndpr_plen; if ((rt = pr->ndpr_rt) != NULL) { pr->ndpr_rt = NULL; } - NDPR_ADDREF_LOCKED(pr); /* keep reference for this routine */ + NDPR_ADDREF(pr); /* keep reference for this routine */ NDPR_UNLOCK(pr); IFA_LOCK_SPIN(ifa); @@ -3469,6 +3698,31 @@ nd6_prefix_onlink_common(struct nd_prefix *pr, boolean_t force_scoped, lck_mtx_unlock(nd6_mutex); + /* + * check if it conflicts with a indirect prefix route added by RIO + * if so, remove the rti entry. + */ + if (ifscope == IFSCOPE_NONE) { + struct rtentry *temp_route = NULL; + LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); + lck_mtx_lock(rnh_lock); + temp_route = rt_lookup(TRUE, (struct sockaddr *)&prefix, (struct sockaddr *)&mask6, rt_tables[AF_INET6], IFSCOPE_NONE); + lck_mtx_unlock(rnh_lock); + + if (temp_route != NULL && temp_route->rt_flags & RTF_GATEWAY && temp_route->rt_ifp != NULL) { + struct nd_route_info rti = {}; + bzero(&rti, sizeof(rti)); + rti.nd_rti_prefixlen = prefix_len; + rti.nd_rti_prefix = prefix.sin6_addr; + lck_mtx_lock(nd6_mutex); + nd6_rti_purge(&rti); + lck_mtx_unlock(nd6_mutex); + } + if (temp_route != NULL) { + rtfree(temp_route); + } + } + if (rt != NULL) { rtfree(rt); rt = NULL; @@ -3613,7 +3867,7 @@ nd6_prefix_offlink(struct nd_prefix *pr) if ((ndpr_rt = pr->ndpr_rt) != NULL) { pr->ndpr_rt = NULL; } - NDPR_ADDREF_LOCKED(pr); /* keep reference for this routine */ + NDPR_ADDREF(pr); /* keep reference for this routine */ NDPR_UNLOCK(pr); ifscope = (pr->ndpr_stateflags & NDPRF_IFSCOPE) ? @@ -3669,7 +3923,8 @@ nd6_prefix_offlink(struct nd_prefix *pr) } struct in6_ifaddr * -in6_pfx_newpersistaddr(struct nd_prefix *pr, int mcast, int *errorp, boolean_t is_clat46) +in6_pfx_newpersistaddr(struct nd_prefix *pr, int mcast, int *errorp, + boolean_t is_clat46, uint8_t collision_count) { struct in6_ifaddr *ia6 = NULL; struct ifnet *ifp = NULL; @@ -3766,35 +4021,22 @@ in6_pfx_newpersistaddr(struct nd_prefix *pr, int mcast, int *errorp, boolean_t i IFA_REMREF(&ia6->ia_ifa); ia6 = NULL; } else { - in6_cga_node_lock(); struct in6_cga_prepare local_cga_prepare; + struct in6_cga_prepare *prepare_p; + + + in6_cga_node_lock(); - /* - * XXX For now the collision count is not used in the classical - * way for secure addresses. - * Use a different collision count value to generate reserved - * address for stateless CLAT46 - */ if (ndi->cga_initialized) { bcopy(&(ndi->local_cga_modifier), &(local_cga_prepare.cga_modifier), sizeof(local_cga_prepare.cga_modifier)); - if (!is_clat46) { - error = in6_cga_generate(&local_cga_prepare, 0, - &ifra.ifra_addr.sin6_addr); - } else { - error = in6_cga_generate(&local_cga_prepare, 1, - &ifra.ifra_addr.sin6_addr); - } + prepare_p = &local_cga_prepare; } else { - if (!is_clat46) { - error = in6_cga_generate(NULL, 0, - &ifra.ifra_addr.sin6_addr); - } else { - error = in6_cga_generate(NULL, 1, - &ifra.ifra_addr.sin6_addr); - } + prepare_p = NULL; } + error = in6_cga_generate(prepare_p, collision_count, + &ifra.ifra_addr.sin6_addr, ifp); in6_cga_node_unlock(); if (error == 0) { ifra.ifra_flags |= IN6_IFF_SECURED; @@ -3863,6 +4105,9 @@ in6_pfx_newpersistaddr(struct nd_prefix *pr, int mcast, int *errorp, boolean_t i if_name(ifp), error); error = EADDRNOTAVAIL; goto done; + } else { + /* remember the collision count */ + ia6->ia6_cga_collision_count = collision_count; } VERIFY(ia6 != NULL); @@ -3888,7 +4133,7 @@ in6_tmpifadd(const struct in6_ifaddr *ia0, int forcegen) int i, error, ifaupdate; int trylimit = 3; /* XXX: adhoc value */ u_int32_t randid[2]; - time_t vltime0, pltime0; + uint32_t vltime0, pltime0; uint64_t timenow = net_uptime(); struct in6_addr addr; struct nd_prefix *ndpr; @@ -3944,7 +4189,7 @@ again: if (ia0->ia6_lifetime.ia6ti_vltime != ND6_INFINITE_LIFETIME) { vltime0 = IFA6_IS_INVALID(ia0, timenow) ? 0 : (ia0->ia6_lifetime.ia6ti_vltime - - (timenow - ia0->ia6_updatetime)); + (uint32_t)(timenow - ia0->ia6_updatetime)); if (vltime0 > ip6_temp_valid_lifetime) { vltime0 = ip6_temp_valid_lifetime; } @@ -3954,7 +4199,7 @@ again: if (ia0->ia6_lifetime.ia6ti_pltime != ND6_INFINITE_LIFETIME) { pltime0 = IFA6_IS_DEPRECATED(ia0, timenow) ? 0 : (ia0->ia6_lifetime.ia6ti_pltime - - (timenow - ia0->ia6_updatetime)); + (uint32_t)(timenow - ia0->ia6_updatetime)); if (pltime0 > ip6_temp_preferred_lifetime - ip6_desync_factor) { pltime0 = ip6_temp_preferred_lifetime - ip6_desync_factor; @@ -4014,7 +4259,7 @@ again: NDPR_LOCK(newia->ia6_ndpr); newia->ia6_ndpr->ndpr_addrcnt++; VERIFY(newia->ia6_ndpr->ndpr_addrcnt != 0); - NDPR_ADDREF_LOCKED(newia->ia6_ndpr); /* for addr reference */ + NDPR_ADDREF(newia->ia6_ndpr); /* for addr reference */ NDPR_UNLOCK(newia->ia6_ndpr); IFA_UNLOCK(&newia->ia_ifa); /* @@ -4098,6 +4343,9 @@ in6_init_address_ltimes(struct nd_prefix *new, struct in6_addrlifetime *lt6) * Delete all the routing table entries that use the specified gateway. * XXX: this function causes search through all entries of routing table, so * it shouldn't be called when acting as a router. + * + * This should really be working on entries that have a specific + * parent route. */ void rt6_flush( @@ -4211,14 +4459,19 @@ nd6_setdefaultiface( * if the list is NOT empty. */ if (odef_ifp != NULL) { - defrouter_select(odef_ifp); + defrouter_select(odef_ifp, NULL); } if (nd6_defifp != NULL) { - defrouter_select(nd6_defifp); + defrouter_select(nd6_defifp, NULL); nd6_prefix_sync(nd6_defifp); } + /* + * XXX For now we managed RTI routes as un-scoped. + * Therefore we ignore the change in nd6_defifindex + * for RTI routes for now. + */ /* * Our current implementation assumes one-to-one mapping between * interfaces and links, so it would be natural to use the diff --git a/bsd/netinet6/nd6_send.c b/bsd/netinet6/nd6_send.c index cd59d78dd..fc74e1590 100644 --- a/bsd/netinet6/nd6_send.c +++ b/bsd/netinet6/nd6_send.c @@ -100,8 +100,8 @@ sysctl_cga_parameters SYSCTL_HANDLER_ARGS } if (req->newlen > SYSCTL_CGA_PARAMETERS_BUFFER_SIZE) { - log(LOG_ERR, "%s: input buffer size error [len=%u]\n", __func__, - req->newlen); + log(LOG_ERR, "%s: input buffer size error [len=%zu]\n", + __func__, req->newlen); return EINVAL; } diff --git a/bsd/netinet6/nd6_var.h b/bsd/netinet6/nd6_var.h index c16edced5..811554d6a 100644 --- a/bsd/netinet6/nd6_var.h +++ b/bsd/netinet6/nd6_var.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Apple Inc. All rights reserved. + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -79,6 +79,7 @@ struct nd_ifinfo { int32_t ndefrouters; boolean_t cga_initialized; struct in6_cga_modifier local_cga_modifier; + uint8_t cga_collision_count; }; #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET6_ND6_VAR_H_ */ diff --git a/bsd/netinet6/raw_ip6.c b/bsd/netinet6/raw_ip6.c index 15b48d475..fc705f4f0 100644 --- a/bsd/netinet6/raw_ip6.c +++ b/bsd/netinet6/raw_ip6.c @@ -203,13 +203,17 @@ rip6_input( #if NECP if (n && !necp_socket_is_allowed_to_send_recv_v6(in6p, 0, 0, - &ip6->ip6_dst, &ip6->ip6_src, ifp, NULL, NULL, NULL)) { + &ip6->ip6_dst, &ip6->ip6_src, ifp, 0, NULL, NULL, NULL, NULL)) { m_freem(n); /* do not inject data into pcb */ } else #endif /* NECP */ if (n) { if ((last->in6p_flags & INP_CONTROLOPTS) != 0 || +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + (last->in6p_socket->so_cfil_db != NULL) || +#endif (last->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { @@ -239,7 +243,7 @@ rip6_input( #if NECP if (last && !necp_socket_is_allowed_to_send_recv_v6(in6p, 0, 0, - &ip6->ip6_dst, &ip6->ip6_src, ifp, NULL, NULL, NULL)) { + &ip6->ip6_dst, &ip6->ip6_src, ifp, 0, NULL, NULL, NULL, NULL)) { m_freem(m); ip6stat.ip6s_delivered--; /* do not inject data into pcb */ @@ -247,6 +251,10 @@ rip6_input( #endif /* NECP */ if (last) { if ((last->in6p_flags & INP_CONTROLOPTS) != 0 || +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + (last->in6p_socket->so_cfil_db != NULL) || +#endif (last->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { @@ -278,7 +286,7 @@ rip6_input( char *prvnxtp = ip6_get_prevhdr(m, *offp); /* XXX */ icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_NEXTHEADER, - prvnxtp - mtod(m, char *)); + (int)(prvnxtp - mtod(m, char *))); } ip6stat.ip6s_delivered--; } @@ -296,8 +304,8 @@ rip6_ctlinput( void *d, __unused struct ifnet *ifp) { - struct ip6_hdr *ip6; - struct mbuf *m; + struct ip6_hdr *ip6 = NULL; + struct mbuf *m = NULL; void *cmdarg = NULL; int off = 0; struct ip6ctlparam *ip6cp = NULL; @@ -332,6 +340,7 @@ rip6_ctlinput( } else { m = NULL; ip6 = NULL; + cmdarg = NULL; sa6_src = &sa6_any; } @@ -369,7 +378,7 @@ rip6_output( struct m_tag *cfil_tag = NULL; bool cfil_faddr_use = false; uint32_t cfil_so_state_change_cnt = 0; - short cfil_so_options = 0; + uint32_t cfil_so_options = 0; struct sockaddr *cfil_faddr = NULL; struct sockaddr_in6 *cfil_sin6 = NULL; #endif @@ -560,7 +569,7 @@ rip6_output( */ ifnet_head_lock_shared(); if (optp && (pi = optp->ip6po_pktinfo) && pi->ipi6_ifindex) { - ip6->ip6_dst.s6_addr16[1] = htons(pi->ipi6_ifindex); + ip6->ip6_dst.s6_addr16[1] = htons((uint16_t)pi->ipi6_ifindex); oifp = ifindex2ifnet[pi->ipi6_ifindex]; if (oifp != NULL) { ifnet_reference(oifp); @@ -673,6 +682,7 @@ rip6_output( necp_kernel_policy_id policy_id; necp_kernel_policy_id skip_policy_id; u_int32_t route_rule_id; + u_int32_t pass_flags; /* * We need a route to perform NECP route rule checks @@ -711,16 +721,15 @@ rip6_output( } if (!necp_socket_is_allowed_to_send_recv_v6(in6p, 0, 0, - &ip6->ip6_src, &ip6->ip6_dst, NULL, &policy_id, &route_rule_id, &skip_policy_id)) { + &ip6->ip6_src, &ip6->ip6_dst, NULL, 0, &policy_id, &route_rule_id, &skip_policy_id, &pass_flags)) { error = EHOSTUNREACH; goto bad; } - necp_mark_packet_from_socket(m, in6p, policy_id, route_rule_id, skip_policy_id); + necp_mark_packet_from_socket(m, in6p, policy_id, route_rule_id, skip_policy_id, pass_flags); if (net_qos_policy_restricted != 0) { - necp_socket_update_qos_marking(in6p, in6p->in6p_route.ro_rt, - NULL, route_rule_id); + necp_socket_update_qos_marking(in6p, in6p->in6p_route.ro_rt, route_rule_id); } } #endif /* NECP */ diff --git a/bsd/netinet6/route6.c b/bsd/netinet6/route6.c index 87af9c753..0a3d9b1ce 100644 --- a/bsd/netinet6/route6.c +++ b/bsd/netinet6/route6.c @@ -114,7 +114,7 @@ route6_input(struct mbuf **mp, int *offp, int proto) } ip6stat.ip6s_badoptions++; icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, - (caddr_t)&rh->ip6r_type - (caddr_t)ip6); + (int)((caddr_t)&rh->ip6r_type - (caddr_t)ip6)); return IPPROTO_DONE; } diff --git a/bsd/netinet6/udp6_output.c b/bsd/netinet6/udp6_output.c index eba8e037e..8b8c031cd 100644 --- a/bsd/netinet6/udp6_output.c +++ b/bsd/netinet6/udp6_output.c @@ -170,14 +170,15 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, struct socket *so = in6p->in6p_socket; struct route_in6 ro; int flowadv = 0; + bool sndinprog_cnt_used = false; #if CONTENT_FILTER struct m_tag *cfil_tag = NULL; bool cfil_faddr_use = false; - bool sndinprog_cnt_used = false; uint32_t cfil_so_state_change_cnt = 0; struct sockaddr *cfil_faddr = NULL; struct sockaddr_in6 *cfil_sin6 = NULL; #endif + bool check_qos_marking_again = (so->so_flags1 & SOF1_QOSMARKING_POLICY_OVERRIDE) ? FALSE : TRUE; bzero(&ip6oa, sizeof(ip6oa)); ip6oa.ip6oa_boundif = IFSCOPE_NONE; @@ -439,6 +440,7 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, necp_kernel_policy_id policy_id; necp_kernel_policy_id skip_policy_id; u_int32_t route_rule_id; + u_int32_t pass_flags; /* * We need a route to perform NECP route rule checks @@ -472,22 +474,25 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, in6p->inp_policyresult.results.qos_marking_gencount = 0; } - if (!necp_socket_is_allowed_to_send_recv_v6(in6p, in6p->in6p_lport, fport, laddr, faddr, NULL, &policy_id, &route_rule_id, &skip_policy_id)) { + if (!necp_socket_is_allowed_to_send_recv_v6(in6p, in6p->in6p_lport, fport, laddr, faddr, NULL, 0, &policy_id, &route_rule_id, &skip_policy_id, &pass_flags)) { error = EHOSTUNREACH; goto release; } - necp_mark_packet_from_socket(m, in6p, policy_id, route_rule_id, skip_policy_id); + necp_mark_packet_from_socket(m, in6p, policy_id, route_rule_id, skip_policy_id, pass_flags); if (net_qos_policy_restricted != 0) { - necp_socket_update_qos_marking(in6p, in6p->in6p_route.ro_rt, - NULL, route_rule_id); + necp_socket_update_qos_marking(in6p, in6p->in6p_route.ro_rt, route_rule_id); } } #endif /* NECP */ if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED; } + if (check_qos_marking_again) { + ip6oa.ip6oa_flags |= IP6OAF_REDO_QOSMARKING_POLICY; + } + ip6oa.qos_marking_gencount = in6p->inp_policyresult.results.qos_marking_gencount; #if IPSEC if (in6p->in6p_sp != NULL && ipsec_setsocket(m, so) != 0) { @@ -542,6 +547,15 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, IM6O_REMREF(im6o); } + if (check_qos_marking_again) { + in6p->inp_policyresult.results.qos_marking_gencount = ip6oa.qos_marking_gencount; + if (ip6oa.ip6oa_flags & IP6OAF_QOSMARKING_ALLOWED) { + in6p->inp_socket->so_flags1 |= SOF1_QOSMARKING_ALLOWED; + } else { + in6p->inp_socket->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED; + } + } + if (error == 0 && nstat_collect) { boolean_t cell, wifi, wired; @@ -573,7 +587,7 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, if (ro.ro_rt != NULL) { struct ifnet *outif = ro.ro_rt->rt_ifp; - so->so_pktheadroom = P2ROUNDUP( + so->so_pktheadroom = (uint16_t)P2ROUNDUP( sizeof(struct udphdr) + hlen + ifnet_hdrlen(outif) + @@ -626,7 +640,7 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, if (outif != NULL && outif != in6p->in6p_last_outifp) { in6p->in6p_last_outifp = outif; - so->so_pktheadroom = P2ROUNDUP( + so->so_pktheadroom = (uint16_t)P2ROUNDUP( sizeof(struct udphdr) + hlen + ifnet_hdrlen(outif) + diff --git a/bsd/netinet6/udp6_usrreq.c b/bsd/netinet6/udp6_usrreq.c index 356d7b99d..3f589dae0 100644 --- a/bsd/netinet6/udp6_usrreq.c +++ b/bsd/netinet6/udp6_usrreq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -203,13 +203,11 @@ udp6_append(struct inpcb *last, struct ip6_hdr *ip6, boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp)); boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp)); -#if CONFIG_MACF_NET - if (mac_inpcb_check_deliver(last, n, AF_INET6, SOCK_DGRAM) != 0) { - m_freem(n); - return; - } -#endif /* CONFIG_MACF_NET */ if ((last->in6p_flags & INP_CONTROLOPTS) != 0 || +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + (last->in6p_socket->so_cfil_db != NULL) || +#endif (last->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { @@ -251,6 +249,7 @@ udp6_input(struct mbuf **mp, int *offp, int proto) struct sockaddr_in6 udp_in6; struct inpcbinfo *pcbinfo = &udbinfo; struct sockaddr_in6 fromsa; + u_int16_t pf_tag = 0; IP6_EXTHDR_CHECK(m, off, sizeof(struct udphdr), return IPPROTO_DONE); @@ -263,6 +262,10 @@ udp6_input(struct mbuf **mp, int *offp, int proto) wifi = (!cell && IFNET_IS_WIFI(ifp)); wired = (!wifi && IFNET_IS_WIRED(ifp)); + if (m->m_flags & M_PKTHDR) { + pf_tag = m_pftag(m)->pftag_tag; + } + udpstat.udps_ipackets++; plen = ntohs(ip6->ip6_plen) - off + sizeof(*ip6); @@ -412,7 +415,7 @@ udp6_input(struct mbuf **mp, int *offp, int proto) skipit = 0; if (!necp_socket_is_allowed_to_send_recv_v6(in6p, uh->uh_dport, uh->uh_sport, &ip6->ip6_dst, - &ip6->ip6_src, ifp, NULL, NULL, NULL)) { + &ip6->ip6_src, ifp, pf_tag, NULL, NULL, NULL, NULL)) { /* do not inject data to pcb */ skipit = 1; } @@ -579,7 +582,7 @@ udp6_input(struct mbuf **mp, int *offp, int proto) } #if NECP if (!necp_socket_is_allowed_to_send_recv_v6(in6p, uh->uh_dport, - uh->uh_sport, &ip6->ip6_dst, &ip6->ip6_src, ifp, NULL, NULL, NULL)) { + uh->uh_sport, &ip6->ip6_dst, &ip6->ip6_src, ifp, pf_tag, NULL, NULL, NULL, NULL)) { in_pcb_checkstate(in6p, WNT_RELEASE, 0); IF_UDP_STATINC(ifp, badipsec); goto bad; @@ -601,6 +604,10 @@ udp6_input(struct mbuf **mp, int *offp, int proto) init_sin6(&udp_in6, m); /* general init */ udp_in6.sin6_port = uh->uh_sport; if ((in6p->in6p_flags & INP_CONTROLOPTS) != 0 || +#if CONTENT_FILTER + /* Content Filter needs to see local address */ + (in6p->in6p_socket->so_cfil_db != NULL) || +#endif (in6p->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (in6p->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || (in6p->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { @@ -648,7 +655,9 @@ udp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) struct ip6ctlparam *ip6cp = NULL; struct icmp6_hdr *icmp6 = NULL; const struct sockaddr_in6 *sa6_src = NULL; + void *cmdarg = NULL; void (*notify)(struct inpcb *, int) = udp_notify; + struct inpcb *in6p; struct udp_portonly { u_int16_t uh_sport; u_int16_t uh_dport; @@ -678,10 +687,12 @@ udp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) m = ip6cp->ip6c_m; ip6 = ip6cp->ip6c_ip6; off = ip6cp->ip6c_off; + cmdarg = ip6cp->ip6c_cmdarg; sa6_src = ip6cp->ip6c_src; } else { m = NULL; ip6 = NULL; + cmdarg = NULL; sa6_src = &sa6_any; } @@ -698,9 +709,18 @@ udp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) bzero(&uh, sizeof(uh)); m_copydata(m, off, sizeof(*uhp), (caddr_t)&uh); + in6p = in6_pcblookup_hash(&udbinfo, &ip6->ip6_dst, uh.uh_dport, + &ip6->ip6_src, uh.uh_sport, 0, NULL); + if (cmd == PRC_MSGSIZE && in6p != NULL && !uuid_is_null(in6p->necp_client_uuid)) { + uuid_t null_uuid; + uuid_clear(null_uuid); + necp_update_flow_protoctl_event(null_uuid, in6p->necp_client_uuid, + PRC_MSGSIZE, ntohl(icmp6->icmp6_mtu), 0); + } + (void) in6_pcbnotify(&udbinfo, sa, uh.uh_dport, (struct sockaddr*)ip6cp->ip6c_src, uh.uh_sport, - cmd, NULL, notify); + cmd, cmdarg, notify); } /* * XXX The else condition here was broken for a long time. @@ -761,7 +781,7 @@ udp6_attach(struct socket *so, int proto, struct proc *p) * because the socket may be bound to an IPv6 wildcard address, * which may match an IPv4-mapped IPv6 address. */ - inp->inp_ip_ttl = ip_defttl; + inp->inp_ip_ttl = (u_char)ip_defttl; if (nstat_collect) { nstat_udp_new_pcb(inp); } @@ -874,14 +894,9 @@ udp6_connect(struct socket *so, struct sockaddr *nam, struct proc *p) #if defined(NECP) && defined(FLOW_DIVERT) do_flow_divert: if (should_use_flow_divert) { - uint32_t fd_ctl_unit = necp_socket_get_flow_divert_control_unit(inp); - if (fd_ctl_unit > 0) { - error = flow_divert_pcb_init(so, fd_ctl_unit); - if (error == 0) { - error = flow_divert_connect_out(so, nam, p); - } - } else { - error = ENETDOWN; + error = flow_divert_pcb_init(so); + if (error == 0) { + error = flow_divert_connect_out(so, nam, p); } return error; } diff --git a/bsd/netkey/key.c b/bsd/netkey/key.c index 2639379d1..235d4fcc1 100644 --- a/bsd/netkey/key.c +++ b/bsd/netkey/key.c @@ -91,11 +91,9 @@ #include #include -#if INET6 #include #include #include -#endif /* INET6 */ #include #include @@ -106,19 +104,13 @@ #include #include -#if INET6 #include -#endif #include -#if INET6 #include -#endif #if IPSEC_ESP #include -#if INET6 #include #endif -#endif /* randomness */ @@ -447,7 +439,7 @@ static struct secasvar *key_do_allocsa_policy(struct secashead *, u_int, u_int16 static int key_do_get_translated_port(struct secashead *, struct secasvar *, u_int); static void key_delsp(struct secpolicy *); static struct secpolicy *key_getsp(struct secpolicyindex *); -static u_int32_t key_newreqid(void); +static u_int16_t key_newreqid(void); static struct mbuf *key_gather_mbuf(struct mbuf *, const struct sadb_msghdr *, int, int, int *); static int key_spdadd(struct socket *, struct mbuf *, @@ -488,19 +480,13 @@ static struct mbuf *key_setsadbmsg(u_int8_t, u_int16_t, u_int8_t, u_int32_t, pid_t, u_int16_t); static struct mbuf *key_setsadbsa(struct secasvar *); static struct mbuf *key_setsadbaddr(u_int16_t, - struct sockaddr *, u_int8_t, u_int16_t); -static struct mbuf *key_setsadbipsecif(ifnet_t, ifnet_t, ifnet_t, int); -#if 0 -static struct mbuf *key_setsadbident(u_int16_t, u_int16_t, caddr_t, - int, u_int64_t); -#endif + struct sockaddr *, size_t, u_int8_t); +static struct mbuf *key_setsadbipsecif(ifnet_t, ifnet_t, ifnet_t, u_int8_t); static struct mbuf *key_setsadbxsa2(u_int8_t, u_int32_t, u_int32_t, u_int16_t); static struct mbuf *key_setsadbxpolicy(u_int16_t, u_int8_t, u_int32_t); static void *key_newbuf(const void *, u_int); -#if INET6 static int key_ismyaddr6(struct sockaddr_in6 *); -#endif static void key_update_natt_keepalive_timestamp(struct secasvar *, struct secasvar *); /* flags for key_cmpsaidx() */ @@ -519,7 +505,7 @@ static int key_sockaddrcmp(struct sockaddr *, struct sockaddr *, int); static int key_is_addr_in_range(struct sockaddr_storage *, struct secpolicyaddrrange *); static int key_bbcmp(caddr_t, caddr_t, u_int); static void key_srandom(void); -static u_int16_t key_satype2proto(u_int8_t); +static u_int8_t key_satype2proto(u_int8_t); static u_int8_t key_proto2satype(u_int16_t); static int key_getspi(struct socket *, struct mbuf *, @@ -528,8 +514,6 @@ static u_int32_t key_do_getnewspi(struct sadb_spirange *, struct secasindex *); static int key_update(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_add(struct socket *, struct mbuf *, const struct sadb_msghdr *); -static int key_setident(struct secashead *, struct mbuf *, - const struct sadb_msghdr *); static struct mbuf *key_getmsgbuf_x1(struct mbuf *, const struct sadb_msghdr *); static int key_delete(struct socket *, struct mbuf *, const struct sadb_msghdr *); @@ -566,22 +550,6 @@ static int key_align(struct mbuf *, struct sadb_msghdr *); static struct mbuf *key_alloc_mbuf(int); static int key_getsastat(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_migrate(struct socket *, struct mbuf *, const struct sadb_msghdr *); -static int key_setsaval2(struct secasvar *sav, - u_int8_t satype, - u_int8_t alg_auth, - u_int8_t alg_enc, - u_int32_t flags, - u_int8_t replay, - struct sadb_key *key_auth, - u_int16_t key_auth_len, - struct sadb_key *key_enc, - u_int16_t key_enc_len, - u_int16_t natt_port, - u_int32_t seq, - u_int32_t spi, - u_int32_t pid, - struct sadb_lifetime *lifetime_hard, - struct sadb_lifetime *lifetime_soft); static void bzero_keys(const struct sadb_msghdr *); extern int ipsec_bypass; @@ -655,10 +623,8 @@ key_init(struct protosw *pp, struct domain *dp) ip4_def_policy.policy = IPSEC_POLICY_NONE; ip4_def_policy.refcnt++; /*never reclaim this*/ #endif -#if INET6 ip6_def_policy.policy = IPSEC_POLICY_NONE; ip6_def_policy.refcnt++; /*never reclaim this*/ -#endif key_timehandler_running = 0; @@ -1086,7 +1052,7 @@ key_send_delete(struct secasvar *sav) } m = key_setsadbmsg(SADB_DELETE, 0, - satype, 0, 0, sav->refcnt - 1); + satype, 0, 0, (u_int16_t)(sav->refcnt - 1)); if (!m) { goto msgfail; } @@ -1131,8 +1097,10 @@ key_send_delete(struct secasvar *sav) for (m = result; m; m = m->m_next) { result->m_pkthdr.len += m->m_len; } + + VERIFY(PFKEY_UNIT64(result->m_pkthdr.len) <= UINT16_MAX); mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); + (u_int16_t)PFKEY_UNIT64(result->m_pkthdr.len); if (key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED)) { @@ -1374,45 +1342,6 @@ key_allocsa_extended(u_int family, continue; } -#if 0 /* don't check src */ - /* check src address */ - switch (family) { - case AF_INET: - bzero(&sin, sizeof(sin)); - sin.sin_family = AF_INET; - sin.sin_len = sizeof(sin); - bcopy(src, &sin.sin_addr, - sizeof(sin.sin_addr)); - if (key_sockaddrcmp((struct sockaddr*)&sin, - (struct sockaddr *)&sav->sah->saidx.src, 0) != 0) { - continue; - } - break; - case AF_INET6: - bzero(&sin6, sizeof(sin6)); - sin6.sin6_family = AF_INET6; - sin6.sin6_len = sizeof(sin6); - bcopy(src, &sin6.sin6_addr, - sizeof(sin6.sin6_addr)); - if (IN6_IS_SCOPE_LINKLOCAL(&sin6.sin6_addr)) { - /* kame fake scopeid */ - sin6.sin6_scope_id = - ntohs(sin6.sin6_addr.s6_addr16[1]); - sin6.sin6_addr.s6_addr16[1] = 0; - } - if (key_sockaddrcmp((struct sockaddr*)&sin6, - (struct sockaddr *)&sav->sah->saidx.src, 0) != 0) { - continue; - } - break; - default: - ipseclog((LOG_DEBUG, "key_allocsa: " - "unknown address family=%d.\n", - family)); - continue; - } - -#endif /* check dst address */ switch (family) { case AF_INET: @@ -2110,7 +2039,7 @@ key_msg2sp( /* allocate new reqid id if reqid is zero. */ if (xisr->sadb_x_ipsecrequest_reqid == 0) { - u_int32_t reqid; + u_int16_t reqid; if ((reqid = key_newreqid()) == 0) { key_freesp(newsp, KEY_SADB_UNLOCKED); *error = ENOBUFS; @@ -2226,11 +2155,11 @@ key_msg2sp( return newsp; } -static u_int32_t +static u_int16_t key_newreqid(void) { lck_mtx_lock(sadb_mutex); - static u_int32_t auto_reqid = IPSEC_MANUAL_REQID_MAX + 1; + static u_int16_t auto_reqid = IPSEC_MANUAL_REQID_MAX + 1; int done = 0; /* The reqid must be limited to 16 bits because the PF_KEY message format only uses @@ -2278,7 +2207,7 @@ key_sp2msg( struct secpolicy *sp) { struct sadb_x_policy *xpl; - int tlen; + u_int tlen; caddr_t p; struct mbuf *m; @@ -2288,6 +2217,11 @@ key_sp2msg( } tlen = key_getspreqmsglen(sp); + if (PFKEY_UNIT64(tlen) > UINT16_MAX) { + ipseclog((LOG_ERR, "key_getspreqmsglen returned length %u\n", + tlen)); + return NULL; + } m = key_alloc_mbuf(tlen); if (!m || m->m_next) { /*XXX*/ @@ -2302,9 +2236,9 @@ key_sp2msg( xpl = mtod(m, struct sadb_x_policy *); bzero(xpl, tlen); - xpl->sadb_x_policy_len = PFKEY_UNIT64(tlen); + xpl->sadb_x_policy_len = (u_int16_t)PFKEY_UNIT64(tlen); xpl->sadb_x_policy_exttype = SADB_X_EXT_POLICY; - xpl->sadb_x_policy_type = sp->policy; + xpl->sadb_x_policy_type = (u_int16_t)sp->policy; xpl->sadb_x_policy_dir = sp->spidx.dir; xpl->sadb_x_policy_id = sp->id; p = (caddr_t)xpl + sizeof(*xpl); @@ -2319,8 +2253,8 @@ key_sp2msg( xisr->sadb_x_ipsecrequest_proto = isr->saidx.proto; xisr->sadb_x_ipsecrequest_mode = isr->saidx.mode; - xisr->sadb_x_ipsecrequest_level = isr->level; - xisr->sadb_x_ipsecrequest_reqid = isr->saidx.reqid; + xisr->sadb_x_ipsecrequest_level = (u_int8_t)isr->level; + xisr->sadb_x_ipsecrequest_reqid = (u_int16_t)isr->saidx.reqid; p += sizeof(*xisr); bcopy(&isr->saidx.src, p, isr->saidx.src.ss_len); @@ -2688,8 +2622,8 @@ key_spdadd( microtime(&tv); newsp->created = tv.tv_sec; newsp->lastused = tv.tv_sec; - newsp->lifetime = lft ? lft->sadb_lifetime_addtime : 0; - newsp->validtime = lft ? lft->sadb_lifetime_usetime : 0; + newsp->lifetime = (long)(lft ? lft->sadb_lifetime_addtime : 0); + newsp->validtime = (long)(lft ? lft->sadb_lifetime_usetime : 0); if (outgoing_if != NULL) { ifnet_find_by_name(outgoing_if, &newsp->outgoing_if); @@ -2775,7 +2709,9 @@ key_spdadd( } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); + + VERIFY(PFKEY_UNIT64(n->m_pkthdr.len) <= UINT16_MAX); + newmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(n->m_pkthdr.len); off = 0; mpolicy = m_pulldown(n, PFKEY_ALIGN8(sizeof(struct sadb_msg)), @@ -2975,7 +2911,8 @@ key_spddelete( newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); + VERIFY(PFKEY_UNIT64(n->m_pkthdr.len) <= UINT16_MAX); + newmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); @@ -3082,7 +3019,8 @@ key_spddelete2( newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); + VERIFY(PFKEY_UNIT64(n->m_pkthdr.len) <= UINT16_MAX); + newmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); @@ -3146,7 +3084,8 @@ key_spdenable( } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); + VERIFY(PFKEY_UNIT64(n->m_pkthdr.len) <= UINT16_MAX); + newmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); @@ -3210,7 +3149,8 @@ key_spddisable( } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); + VERIFY(PFKEY_UNIT64(n->m_pkthdr.len) <= UINT16_MAX); + newmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); @@ -3311,7 +3251,9 @@ key_spdacquire( /* get a entry to check whether sent message or not. */ lck_mtx_lock(sadb_mutex); + sp->refcnt++; if ((newspacq = key_getspacq(&sp->spidx)) != NULL) { + key_freesp(sp, KEY_SADB_LOCKED); if (key_blockacq_count < newspacq->count) { /* reset counter and do send message. */ newspacq->count = 0; @@ -3324,9 +3266,11 @@ key_spdacquire( } else { /* make new entry for blocking to send SADB_ACQUIRE. */ if ((newspacq = key_newspacq(&sp->spidx)) == NULL) { + key_freesp(sp, KEY_SADB_LOCKED); lck_mtx_unlock(sadb_mutex); return ENOBUFS; } + key_freesp(sp, KEY_SADB_LOCKED); /* add to acqtree */ LIST_INSERT_HEAD(&spacqtree, newspacq, chain); key_start_timehandler(); @@ -3345,8 +3289,9 @@ key_spdacquire( result->m_pkthdr.len += m->m_len; } + VERIFY(PFKEY_UNIT64(result->m_pkthdr.len) <= UINT16_MAX); mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); + (u_int16_t)PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, m, KEY_SENDUP_REGISTERED); @@ -3408,7 +3353,7 @@ key_spdflush( m->m_pkthdr.len = m->m_len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); newmsg = mtod(m, struct sadb_msg *); newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len); + newmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(m->m_pkthdr.len); return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } @@ -3515,13 +3460,13 @@ end: static struct mbuf * key_setdumpsp( struct secpolicy *sp, - u_int8_t type, + u_int8_t msg_type, u_int32_t seq, u_int32_t pid) { struct mbuf *result = NULL, *m; - m = key_setsadbmsg(type, 0, SADB_SATYPE_UNSPEC, seq, pid, sp->refcnt); + m = key_setsadbmsg(msg_type, 0, SADB_SATYPE_UNSPEC, seq, pid, (u_int16_t)sp->refcnt); if (!m) { goto fail; } @@ -3609,8 +3554,13 @@ key_setdumpsp( result->m_pkthdr.len += m->m_len; } + if (PFKEY_UNIT64(result->m_pkthdr.len) >= UINT16_MAX) { + ipseclog((LOG_DEBUG, "key_setdumpsp: packet header length > UINT16_MAX\n")); + goto fail; + } + mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); + (u_int16_t)PFKEY_UNIT64(result->m_pkthdr.len); return result; @@ -3798,8 +3748,13 @@ key_spdexpire( result->m_pkthdr.len += m->m_len; } + if (PFKEY_UNIT64(result->m_pkthdr.len) >= UINT16_MAX) { + ipseclog((LOG_DEBUG, "key_setdumpsp: packet header length > UINT16_MAX\n")); + goto fail; + } + mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); + (u_int16_t)PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); @@ -3899,6 +3854,10 @@ key_delsah( panic("key_delsah: NULL pointer is passed.\n"); } + if (sah->use_count > 0) { + return; + } + /* searching all SA registerd in the secindex. */ for (stateidx = 0; stateidx < _ARRAYLEN(saorder_state_any); @@ -3938,14 +3897,6 @@ key_delsah( sah->ipsec_if = NULL; } - if (sah->idents) { - KFREE(sah->idents); - } - - if (sah->identd) { - KFREE(sah->identd); - } - /* remove from tree of SA index */ if (__LIST_CHAINED(sah)) { LIST_REMOVE(sah, chain); @@ -4109,104 +4060,6 @@ key_newsav( return newsav; } -/* - * allocating a new SA with LARVAL state. key_add() and key_getspi() call, - * and copy the values passed into new buffer. - * When SAD message type is GETSPI: - * to set sequence number from acq_seq++, - * to set zero to SPI. - * not to call key_setsava(). - * OUT: NULL : fail - * others : pointer to new secasvar. - */ -struct secasvar * -key_newsav2(struct secashead *sah, - u_int8_t satype, - u_int8_t alg_auth, - u_int8_t alg_enc, - u_int32_t flags, - u_int8_t replay, - struct sadb_key *key_auth, - u_int16_t key_auth_len, - struct sadb_key *key_enc, - u_int16_t key_enc_len, - u_int16_t natt_port, - u_int32_t seq, - u_int32_t spi, - u_int32_t pid, - struct sadb_lifetime *lifetime_hard, - struct sadb_lifetime *lifetime_soft) -{ - struct secasvar *newsav; - - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - - /* sanity check */ - if (sah == NULL) { - panic("key_newsa: NULL pointer is passed.\n"); - } - - KMALLOC_NOWAIT(newsav, struct secasvar *, sizeof(struct secasvar)); - if (newsav == NULL) { - lck_mtx_unlock(sadb_mutex); - KMALLOC_WAIT(newsav, struct secasvar *, sizeof(struct secasvar)); - lck_mtx_lock(sadb_mutex); - if (newsav == NULL) { - ipseclog((LOG_DEBUG, "key_newsa: No more memory.\n")); - return NULL; - } - } - bzero((caddr_t)newsav, sizeof(struct secasvar)); - - newsav->seq = seq; - key_setspi(newsav, spi); - - if (key_setsaval2(newsav, - satype, - alg_auth, - alg_enc, - flags, - replay, - key_auth, - key_auth_len, - key_enc, - key_enc_len, - natt_port, - seq, - spi, - pid, - lifetime_hard, - lifetime_soft)) { - key_delsav(newsav); - return NULL; - } - - /* reset created */ - { - struct timeval tv; - microtime(&tv); - newsav->created = tv.tv_sec; - } - - newsav->pid = pid; - - /* add to satree */ - newsav->sah = sah; - newsav->refcnt = 1; - if (spi && key_auth && key_auth_len && key_enc && key_enc_len) { - newsav->state = SADB_SASTATE_MATURE; - LIST_INSERT_TAIL(&sah->savtree[SADB_SASTATE_MATURE], newsav, - secasvar, chain); - } else { - newsav->state = SADB_SASTATE_LARVAL; - LIST_INSERT_TAIL(&sah->savtree[SADB_SASTATE_LARVAL], newsav, - secasvar, chain); - } - ipsec_sav_count++; - - return newsav; -} - static int key_migratesav(struct secasvar *sav, struct secashead *newsah) @@ -4225,12 +4078,8 @@ key_migratesav(struct secasvar *sav, return 0; } -/* - * free() SA variable entry. - */ -void -key_delsav( - struct secasvar *sav) +static void +key_reset_sav(struct secasvar *sav) { LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); @@ -4239,18 +4088,8 @@ key_delsav( panic("key_delsav: NULL pointer is passed.\n"); } - if (sav->refcnt > 0) { - return; /* can't free */ - } - /* remove from SA header */ - if (__LIST_CHAINED(sav)) { - LIST_REMOVE(sav, chain); - ipsec_sav_count--; - } - - if (sav->spihash.le_prev || sav->spihash.le_next) { - LIST_REMOVE(sav, spihash); - } + sav->remote_ike_port = 0; + sav->natt_encapsulated_src_port = 0; if (sav->key_auth != NULL) { bzero(_KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); @@ -4266,6 +4105,7 @@ key_delsav( bzero(sav->sched, sav->schedlen); KFREE(sav->sched); sav->sched = NULL; + sav->schedlen = 0; } for (int i = 0; i < MAX_REPLAY_WINDOWS; i++) { @@ -4291,6 +4131,38 @@ key_delsav( sav->iv = NULL; } + return; +} + +/* + * free() SA variable entry. + */ +void +key_delsav( + struct secasvar *sav) +{ + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); + + /* sanity check */ + if (sav == NULL) { + panic("key_delsav: NULL pointer is passed.\n"); + } + + if (sav->refcnt > 0) { + return; /* can't free */ + } + /* remove from SA header */ + if (__LIST_CHAINED(sav)) { + LIST_REMOVE(sav, chain); + ipsec_sav_count--; + } + + if (sav->spihash.le_prev || sav->spihash.le_next) { + LIST_REMOVE(sav, spihash); + } + + key_reset_sav(sav); + KFREE(sav); return; @@ -4472,20 +4344,8 @@ key_setsaval( } /* initialization */ - for (int i = 0; i < MAX_REPLAY_WINDOWS; i++) { - sav->replay[i] = NULL; - } - sav->key_auth = NULL; - sav->key_enc = NULL; - sav->sched = NULL; - sav->schedlen = 0; - sav->iv = NULL; - sav->lft_c = NULL; - sav->lft_h = NULL; - sav->lft_s = NULL; - sav->remote_ike_port = 0; + key_reset_sav(sav); sav->natt_last_activity = natt_now; - sav->natt_encapsulated_src_port = 0; /* SA */ if (mhp->ext[SADB_EXT_SA] != NULL) { @@ -4752,292 +4612,7 @@ key_setsaval( return 0; fail: - /* initialization */ - for (int i = 0; i < MAX_REPLAY_WINDOWS; i++) { - if (sav->replay[i] != NULL) { - keydb_delsecreplay(sav->replay[i]); - sav->replay[i] = NULL; - } - } - if (sav->key_auth != NULL) { - bzero(_KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); - KFREE(sav->key_auth); - sav->key_auth = NULL; - } - if (sav->key_enc != NULL) { - bzero(_KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)); - KFREE(sav->key_enc); - sav->key_enc = NULL; - } - if (sav->sched) { - bzero(sav->sched, sav->schedlen); - KFREE(sav->sched); - sav->sched = NULL; - } - if (sav->iv != NULL) { - KFREE(sav->iv); - sav->iv = NULL; - } - if (sav->lft_c != NULL) { - KFREE(sav->lft_c); - sav->lft_c = NULL; - } - if (sav->lft_h != NULL) { - KFREE(sav->lft_h); - sav->lft_h = NULL; - } - if (sav->lft_s != NULL) { - KFREE(sav->lft_s); - sav->lft_s = NULL; - } - - return error; -} - -/* - * copy SA values from PF_KEY message except *SPI, SEQ, PID, STATE and TYPE*. - * You must update these if need. - * OUT: 0: success. - * !0: failure. - * - * does not modify mbuf. does not free mbuf on error. - */ -int -key_setsaval2(struct secasvar *sav, - u_int8_t satype, - u_int8_t alg_auth, - u_int8_t alg_enc, - u_int32_t flags, - u_int8_t replay, - struct sadb_key *key_auth, - u_int16_t key_auth_len, - struct sadb_key *key_enc, - u_int16_t key_enc_len, - u_int16_t natt_port, - u_int32_t seq, - u_int32_t spi, - u_int32_t pid, - struct sadb_lifetime *lifetime_hard, - struct sadb_lifetime *lifetime_soft) -{ -#if IPSEC_ESP - const struct esp_algorithm *algo; -#endif - int error = 0; - struct timeval tv; - - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - - /* initialization */ - for (int i = 0; i < MAX_REPLAY_WINDOWS; i++) { - sav->replay[i] = NULL; - } - - sav->key_auth = NULL; - sav->key_enc = NULL; - sav->sched = NULL; - sav->schedlen = 0; - sav->iv = NULL; - sav->lft_c = NULL; - sav->lft_h = NULL; - sav->lft_s = NULL; - sav->remote_ike_port = 0; - sav->natt_last_activity = natt_now; - sav->natt_encapsulated_src_port = 0; - - sav->alg_auth = alg_auth; - sav->alg_enc = alg_enc; - sav->flags = flags; - sav->pid = pid; - sav->seq = seq; - key_setspi(sav, htonl(spi)); - - /* - * Verify that a nat-traversal port was specified if - * the nat-traversal flag is set. - */ - if ((sav->flags & SADB_X_EXT_NATT) != 0) { - if (natt_port == 0) { - ipseclog((LOG_DEBUG, "key_setsaval2: natt port not set.\n")); - error = EINVAL; - goto fail; - } - sav->remote_ike_port = natt_port; - } - - /* - * Verify if SADB_X_EXT_NATT_MULTIPLEUSERS flag is set that - * SADB_X_EXT_NATT is set and SADB_X_EXT_NATT_KEEPALIVE is not - * set (we're not behind nat) - otherwise clear it. - */ - if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) { - if ((sav->flags & SADB_X_EXT_NATT) == 0 || - (sav->flags & SADB_X_EXT_NATT_KEEPALIVE) != 0) { - sav->flags &= ~SADB_X_EXT_NATT_MULTIPLEUSERS; - } - } - - /* replay window */ - if ((flags & SADB_X_EXT_OLD) == 0) { - if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) == - SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) { - uint32_t range = (1ULL << (sizeof(((struct secreplay *)0)->count) * 8)) / MAX_REPLAY_WINDOWS; - for (int i = 0; i < MAX_REPLAY_WINDOWS; i++) { - sav->replay[i] = keydb_newsecreplay(replay); - if (sav->replay[i] == NULL) { - ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); - error = ENOBUFS; - goto fail; - } - /* Allowed range for sequence per traffic class */ - sav->replay[i]->count = i * range; - sav->replay[i]->lastseq = ((i + 1) * range) - 1; - } - } else { - sav->replay[0] = keydb_newsecreplay(replay); - if (sav->replay[0] == NULL) { - ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); - error = ENOBUFS; - goto fail; - } - sav->replay[0]->lastseq = ~0; - } - } - - /* Authentication keys */ - sav->key_auth = (__typeof__(sav->key_auth))key_newbuf(key_auth, key_auth_len); - if (sav->key_auth == NULL) { - ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); - error = ENOBUFS; - goto fail; - } - - /* Encryption key */ - sav->key_enc = (__typeof__(sav->key_enc))key_newbuf(key_enc, key_enc_len); - if (sav->key_enc == NULL) { - ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); - error = ENOBUFS; - goto fail; - } - - /* set iv */ - sav->ivlen = 0; - - if (satype == SADB_SATYPE_ESP) { -#if IPSEC_ESP - algo = esp_algorithm_lookup(sav->alg_enc); - if (algo && algo->ivlen) { - sav->ivlen = (*algo->ivlen)(algo, sav); - } - if (sav->ivlen != 0) { - KMALLOC_NOWAIT(sav->iv, caddr_t, sav->ivlen); - if (sav->iv == 0) { - lck_mtx_unlock(sadb_mutex); - KMALLOC_WAIT(sav->iv, caddr_t, sav->ivlen); - lck_mtx_lock(sadb_mutex); - if (sav->iv == 0) { - ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); - error = ENOBUFS; - goto fail; - } - } - /* initialize */ - if (sav->alg_enc == SADB_X_EALG_AES_GCM) { - bzero(sav->iv, sav->ivlen); - } else { - key_randomfill(sav->iv, sav->ivlen); - } - } -#endif - } - - /* reset created */ - microtime(&tv); - sav->created = tv.tv_sec; - - /* make lifetime for CURRENT */ - KMALLOC_NOWAIT(sav->lft_c, struct sadb_lifetime *, - sizeof(struct sadb_lifetime)); - if (sav->lft_c == NULL) { - lck_mtx_unlock(sadb_mutex); - KMALLOC_WAIT(sav->lft_c, struct sadb_lifetime *, - sizeof(struct sadb_lifetime)); - lck_mtx_lock(sadb_mutex); - if (sav->lft_c == NULL) { - ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); - error = ENOBUFS; - goto fail; - } - } - - microtime(&tv); - - sav->lft_c->sadb_lifetime_len = - PFKEY_UNIT64(sizeof(struct sadb_lifetime)); - sav->lft_c->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; - sav->lft_c->sadb_lifetime_allocations = 0; - sav->lft_c->sadb_lifetime_bytes = 0; - sav->lft_c->sadb_lifetime_addtime = tv.tv_sec; - sav->lft_c->sadb_lifetime_usetime = 0; - - /* lifetimes for HARD and SOFT */ - sav->lft_h = (__typeof__(sav->lft_h))key_newbuf(lifetime_hard, - sizeof(*lifetime_hard)); - if (sav->lft_h == NULL) { - ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); - error = ENOBUFS; - goto fail; - } - sav->lft_s = (__typeof__(sav->lft_s))key_newbuf(lifetime_soft, - sizeof(*lifetime_soft)); - if (sav->lft_s == NULL) { - ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); - error = ENOBUFS; - goto fail; - } - - return 0; - -fail: - /* initialization */ - for (int i = 0; i < MAX_REPLAY_WINDOWS; i++) { - if (sav->replay[i] != NULL) { - keydb_delsecreplay(sav->replay[i]); - sav->replay[i] = NULL; - } - } - if (sav->key_auth != NULL) { - bzero(_KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); - KFREE(sav->key_auth); - sav->key_auth = NULL; - } - if (sav->key_enc != NULL) { - bzero(_KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)); - KFREE(sav->key_enc); - sav->key_enc = NULL; - } - if (sav->sched) { - bzero(sav->sched, sav->schedlen); - KFREE(sav->sched); - sav->sched = NULL; - } - if (sav->iv != NULL) { - KFREE(sav->iv); - sav->iv = NULL; - } - if (sav->lft_c != NULL) { - KFREE(sav->lft_c); - sav->lft_c = NULL; - } - if (sav->lft_h != NULL) { - KFREE(sav->lft_h); - sav->lft_h = NULL; - } - if (sav->lft_s != NULL) { - KFREE(sav->lft_s); - sav->lft_s = NULL; - } - + key_reset_sav(sav); return error; } @@ -5225,7 +4800,7 @@ key_setdumpsa( SADB_EXT_IDENTITY_DST, SADB_EXT_SENSITIVITY, }; - m = key_setsadbmsg(type, 0, satype, seq, pid, sav->refcnt); + m = key_setsadbmsg(type, 0, satype, seq, pid, (u_int16_t)sav->refcnt); if (m == NULL) { goto fail; } @@ -5366,8 +4941,9 @@ key_setdumpsa( result->m_pkthdr.len += m->m_len; } + VERIFY(PFKEY_UNIT64(result->m_pkthdr.len) <= UINT16_MAX); mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); + (u_int16_t)PFKEY_UNIT64(result->m_pkthdr.len); return result; @@ -5435,7 +5011,7 @@ key_setsadbsa( { struct mbuf *m; struct sadb_sa *p; - int len; + u_int16_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_sa)); m = key_alloc_mbuf(len); @@ -5468,12 +5044,12 @@ static struct mbuf * key_setsadbaddr( u_int16_t exttype, struct sockaddr *saddr, - u_int8_t prefixlen, - u_int16_t ul_proto) + size_t prefixlen, + u_int8_t ul_proto) { struct mbuf *m; struct sadb_address *p; - size_t len; + u_int16_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_address)) + PFKEY_ALIGN8(saddr->sa_len); @@ -5503,7 +5079,12 @@ key_setsadbaddr( ; /*XXX*/ } } - p->sadb_address_prefixlen = prefixlen; + if (prefixlen >= UINT8_MAX) { + ipseclog((LOG_ERR, "key_setsadbaddr: bad prefix length %zu", prefixlen)); + m_freem(m); + return NULL; + } + p->sadb_address_prefixlen = (u_int8_t)prefixlen; p->sadb_address_reserved = 0; bcopy(saddr, @@ -5517,11 +5098,11 @@ static struct mbuf * key_setsadbipsecif(ifnet_t internal_if, ifnet_t outgoing_if, ifnet_t ipsec_if, - int init_disabled) + u_int8_t init_disabled) { struct mbuf *m; struct sadb_x_ipsecif *p; - size_t len; + u_int16_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_x_ipsecif)); m = key_alloc_mbuf(len); @@ -5561,7 +5142,7 @@ key_setsadbsession_id(u_int64_t session_ids[]) { struct mbuf *m; struct sadb_session_id *p; - size_t len; + u_int16_t len; len = PFKEY_ALIGN8(sizeof(*p)); m = key_alloc_mbuf(len); @@ -5591,9 +5172,9 @@ key_setsadbsastat(u_int32_t dir, struct sastat *stats, u_int32_t max_stats) { - struct mbuf *m; + struct mbuf *m; struct sadb_sastat *p; - int list_len, len; + size_t list_len, len; if (!stats) { return NULL; @@ -5601,48 +5182,12 @@ key_setsadbsastat(u_int32_t dir, list_len = sizeof(*stats) * max_stats; len = PFKEY_ALIGN8(sizeof(*p)) + PFKEY_ALIGN8(list_len); - m = key_alloc_mbuf(len); - if (!m || m->m_next) { /*XXX*/ - if (m) { - m_freem(m); - } + if (PFKEY_UNIT64(len) >= UINT16_MAX) { + ipseclog((LOG_ERR, "key_setsadbsastat: length is too big: %zu\n", len)); return NULL; } - p = mtod(m, __typeof__(p)); - - bzero(p, len); - p->sadb_sastat_len = PFKEY_UNIT64(len); - p->sadb_sastat_exttype = SADB_EXT_SASTAT; - p->sadb_sastat_dir = dir; - p->sadb_sastat_list_len = max_stats; - if (list_len) { - bcopy(stats, - mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(*p)), - list_len); - } - - return m; -} - -#if 0 -/* - * set data into sadb_ident. - */ -static struct mbuf * -key_setsadbident( - u_int16_t exttype, - u_int16_t idtype, - caddr_t string, - int stringlen, - u_int64_t id) -{ - struct mbuf *m; - struct sadb_ident *p; - size_t len; - - len = PFKEY_ALIGN8(sizeof(struct sadb_ident)) + PFKEY_ALIGN8(stringlen); - m = key_alloc_mbuf(len); + m = key_alloc_mbuf((int)len); if (!m || m->m_next) { /*XXX*/ if (m) { m_freem(m); @@ -5650,22 +5195,21 @@ key_setsadbident( return NULL; } - p = mtod(m, struct sadb_ident *); + p = mtod(m, __typeof__(p)); bzero(p, len); - p->sadb_ident_len = PFKEY_UNIT64(len); - p->sadb_ident_exttype = exttype; - p->sadb_ident_type = idtype; - p->sadb_ident_reserved = 0; - p->sadb_ident_id = id; - - bcopy(string, - mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(struct sadb_ident)), - stringlen); + p->sadb_sastat_len = (u_int16_t)PFKEY_UNIT64(len); + p->sadb_sastat_exttype = SADB_EXT_SASTAT; + p->sadb_sastat_dir = dir; + p->sadb_sastat_list_len = max_stats; + if (list_len) { + bcopy(stats, + mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(*p)), + list_len); + } return m; } -#endif /* * set data into sadb_x_sa2. @@ -5679,7 +5223,7 @@ key_setsadbxsa2( { struct mbuf *m; struct sadb_x_sa2 *p; - size_t len; + u_int16_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_x_sa2)); m = key_alloc_mbuf(len); @@ -5716,7 +5260,7 @@ key_setsadbxpolicy( { struct mbuf *m; struct sadb_x_policy *p; - size_t len; + u_int16_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_x_policy)); m = key_alloc_mbuf(len); @@ -5804,16 +5348,13 @@ key_ismyaddr( lck_rw_done(in_ifaddr_rwlock); break; #endif -#if INET6 case AF_INET6: return key_ismyaddr6((struct sockaddr_in6 *)(void *)sa); -#endif } return 0; } -#if INET6 /* * compare my own address for IPv6. * 1: ours @@ -5865,7 +5406,6 @@ key_ismyaddr6( return 0; } -#endif /*INET6*/ /* * compare two secasindex structure. @@ -6329,7 +5869,7 @@ key_bbcmp( } if (bits > 0) { - mask = ~((1 << (8 - bits)) - 1); + mask = (u_int8_t)(~((1 << (8 - bits)) - 1)); if ((*p1 & mask) != (*p2 & mask)) { return 0; } @@ -6354,7 +5894,7 @@ key_timehandler(void) struct secpolicy **spbuf = NULL, **spptr = NULL; struct secasvar **savexbuf = NULL, **savexptr = NULL; struct secasvar **savkabuf = NULL, **savkaptr = NULL; - u_int64_t total_req_size = 0; + size_t total_req_size = 0; u_int32_t spbufcount = 0, savbufcount = 0, spcount = 0, savexcount = 0, savkacount = 0, cnt; int stop_handler = 1; /* stop the timehandler */ @@ -6851,7 +6391,7 @@ key_randomfill( * OUT: * 0: invalid satype. */ -static u_int16_t +static u_int8_t key_satype2proto( u_int8_t satype) { @@ -6897,6 +6437,7 @@ key_get_ipsec_if_from_message(const struct sadb_msghdr *mhp, int message_type) ipsecifopts = (struct sadb_x_ipsecif *)(void *)mhp->ext[message_type]; if (ipsecifopts != NULL) { if (ipsecifopts->sadb_x_ipsecif_ipsec_if[0]) { + ipsecifopts->sadb_x_ipsecif_ipsec_if[IFXNAMSIZ - 1] = '\0'; ifnet_find_by_name(ipsecifopts->sadb_x_ipsecif_ipsec_if, &ipsec_if); } } @@ -6913,11 +6454,18 @@ key_get_outgoing_ifindex_from_message(const struct sadb_msghdr *mhp, int message ipsecifopts = (struct sadb_x_ipsecif *)(void *)mhp->ext[message_type]; if (ipsecifopts != NULL) { if (ipsecifopts->sadb_x_ipsecif_outgoing_if[0]) { + ipsecifopts->sadb_x_ipsecif_outgoing_if[IFXNAMSIZ - 1] = '\0'; ifnet_find_by_name(ipsecifopts->sadb_x_ipsecif_outgoing_if, &outgoing_if); } } - return outgoing_if ? outgoing_if->if_index : 0; + u_int outgoing_if_index = 0; + if (outgoing_if != NULL) { + outgoing_if_index = outgoing_if->if_index; + ifnet_release(outgoing_if); + } + + return outgoing_if_index; } /* %%% PF_KEY */ @@ -6980,8 +6528,6 @@ key_getspi( src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); - ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); - /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_getspi: invalid satype is passed.\n")); @@ -7026,6 +6572,8 @@ key_getspi( ; /*???*/ } + ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); + /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); @@ -7036,6 +6584,9 @@ key_getspi( (void *)mhp->ext[SADB_EXT_SPIRANGE], &saidx); if (spi == 0) { lck_mtx_unlock(sadb_mutex); + if (ipsec_if != NULL) { + ifnet_release(ipsec_if); + } return key_senderror(so, m, EINVAL); } @@ -7044,12 +6595,24 @@ key_getspi( /* create a new SA index: key_addspi is always used for inbound spi */ if ((newsah = key_newsah(&saidx, ipsec_if, key_get_outgoing_ifindex_from_message(mhp, SADB_X_EXT_IPSECIF), IPSEC_DIR_INBOUND, SECURITY_ASSOCIATION_PFKEY)) == NULL) { lck_mtx_unlock(sadb_mutex); + if (ipsec_if != NULL) { + ifnet_release(ipsec_if); + } ipseclog((LOG_DEBUG, "key_getspi: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } } + if (ipsec_if != NULL) { + ifnet_release(ipsec_if); + ipsec_if = NULL; + } + + // Increment use count, since key_newsav() could release sadb_mutex lock + newsah->use_count++; + if ((newsah->flags & SECURITY_ASSOCIATION_CUSTOM_IPSEC) == SECURITY_ASSOCIATION_CUSTOM_IPSEC) { + newsah->use_count--; lck_mtx_unlock(sadb_mutex); ipseclog((LOG_ERR, "key_getspi: custom ipsec exists\n")); return key_senderror(so, m, EEXIST); @@ -7060,10 +6623,20 @@ key_getspi( newsav = key_newsav(m, mhp, newsah, &error, so); if (newsav == NULL) { /* XXX don't free new SA index allocated in above. */ + newsah->use_count--; lck_mtx_unlock(sadb_mutex); return key_senderror(so, m, error); } + if (newsah->state == SADB_SASTATE_DEAD) { + newsah->use_count--; + key_sa_chgstate(newsav, SADB_SASTATE_DEAD); + key_freesav(newsav, KEY_SADB_LOCKED); + lck_mtx_unlock(sadb_mutex); + ipseclog((LOG_ERR, "key_getspi: security association head is dead\n")); + return key_senderror(so, m, EINVAL); + } + /* set spi */ key_setspi(newsav, htonl(spi)); @@ -7080,7 +6653,7 @@ key_getspi( } } #endif - + newsah->use_count--; lck_mtx_unlock(sadb_mutex); { @@ -7151,76 +6724,17 @@ key_getspi( newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_seq = newsav->seq; newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); + VERIFY(PFKEY_UNIT64(n->m_pkthdr.len) <= UINT16_MAX); + newmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ONE); } } -u_int32_t -key_getspi2(struct sockaddr *src, - struct sockaddr *dst, - u_int8_t proto, - u_int8_t mode, - u_int32_t reqid, - struct sadb_spirange *spirange) -{ - u_int32_t spi; - struct secasindex saidx; - - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - - /* XXX boundary check against sa_len */ - KEY_SETSECASIDX(proto, mode, reqid, src, dst, 0, &saidx); - - /* make sure if port number is zero. */ - switch (((struct sockaddr *)&saidx.src)->sa_family) { - case AF_INET: - if (((struct sockaddr *)&saidx.src)->sa_len != sizeof(struct sockaddr_in)) { - return 0; - } - ((struct sockaddr_in *)&saidx.src)->sin_port = 0; - break; - case AF_INET6: - if (((struct sockaddr *)&saidx.src)->sa_len != sizeof(struct sockaddr_in6)) { - return 0; - } - ((struct sockaddr_in6 *)&saidx.src)->sin6_port = 0; - break; - default: - ; /*???*/ - } - switch (((struct sockaddr *)&saidx.dst)->sa_family) { - case AF_INET: - if (((struct sockaddr *)&saidx.dst)->sa_len != sizeof(struct sockaddr_in)) { - return 0; - } - ((struct sockaddr_in *)&saidx.dst)->sin_port = 0; - break; - case AF_INET6: - if (((struct sockaddr *)&saidx.dst)->sa_len != sizeof(struct sockaddr_in6)) { - return 0; - } - ((struct sockaddr_in6 *)&saidx.dst)->sin6_port = 0; - break; - default: - ; /*???*/ - } - - lck_mtx_lock(sadb_mutex); - - /* SPI allocation */ - spi = key_do_getnewspi(spirange, &saidx); - - lck_mtx_unlock(sadb_mutex); - - return spi; -} - /* * allocating new SPI - * called by key_getspi() and key_getspi2(). + * called by key_getspi(). * OUT: * 0: failure. * others: success. @@ -7302,13 +6816,13 @@ key_update( struct mbuf *m, const struct sadb_msghdr *mhp) { - struct sadb_sa *sa0; - struct sadb_address *src0, *dst0; + struct sadb_sa *sa0 = NULL; + struct sadb_address *src0 = NULL, *dst0 = NULL; ifnet_t ipsec_if = NULL; struct secasindex saidx; - struct secashead *sah; - struct secasvar *sav; - u_int16_t proto; + struct secashead *sah = NULL; + struct secasvar *sav = NULL; + u_int8_t proto; u_int8_t mode; u_int32_t reqid; u_int16_t flags2; @@ -7324,6 +6838,7 @@ key_update( /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_update: invalid satype is passed.\n")); + bzero_keys(mhp); return key_senderror(so, m, EINVAL); } @@ -7339,12 +6854,14 @@ key_update( (mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL && mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) { ipseclog((LOG_DEBUG, "key_update: invalid message is passed.\n")); + bzero_keys(mhp); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { ipseclog((LOG_DEBUG, "key_update: invalid message is passed.\n")); + bzero_keys(mhp); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { @@ -7365,8 +6882,15 @@ key_update( dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); + u_int ipsec_if_index = 0; + if (ipsec_if != NULL) { + ipsec_if_index = ipsec_if->if_index; + ifnet_release(ipsec_if); + ipsec_if = NULL; + } + /* XXX boundary check against sa_len */ - KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); + KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, ipsec_if_index, &saidx); lck_mtx_lock(sadb_mutex); @@ -7374,47 +6898,52 @@ key_update( if ((sah = key_getsah(&saidx, SECURITY_ASSOCIATION_PFKEY)) == NULL) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, "key_update: no SA index found.\n")); + bzero_keys(mhp); return key_senderror(so, m, ENOENT); } - /* set spidx if there */ - /* XXX rewrite */ - error = key_setident(sah, m, mhp); - if (error) { - lck_mtx_unlock(sadb_mutex); - return key_senderror(so, m, error); - } + // Increment use count, since key_setsaval() could release sadb_mutex lock + sah->use_count++; if ((sav = key_getsavbyspi(sah, sa0->sadb_sa_spi)) == NULL) { - lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, "key_update: no such a SA found (spi:%u)\n", (u_int32_t)ntohl(sa0->sadb_sa_spi))); - return key_senderror(so, m, EINVAL); + error = EINVAL; + goto fail; } + // Increment reference count, since key_setsaval() could release sadb_mutex lock + sav->refcnt++; + /* validity check */ if (sav->sah->saidx.proto != proto) { ipseclog((LOG_DEBUG, "key_update: protocol mismatched (DB=%u param=%u)\n", sav->sah->saidx.proto, proto)); - lck_mtx_unlock(sadb_mutex); - return key_senderror(so, m, EINVAL); + error = EINVAL; + goto fail; } if (sav->pid != mhp->msg->sadb_msg_pid) { ipseclog((LOG_DEBUG, "key_update: pid mismatched (DB:%u param:%u)\n", sav->pid, mhp->msg->sadb_msg_pid)); - lck_mtx_unlock(sadb_mutex); - return key_senderror(so, m, EINVAL); + error = EINVAL; + goto fail; } /* copy sav values */ error = key_setsaval(sav, m, mhp); if (error) { - lck_mtx_unlock(sadb_mutex); - return key_senderror(so, m, error); + goto fail; + } + + if (sah->state == SADB_SASTATE_DEAD) { + ipseclog((LOG_ERR, + "key_update: security association head is dead\n")); + error = EINVAL; + goto fail; } sav->flags2 = flags2; @@ -7434,10 +6963,11 @@ key_update( /* check SA values to be mature. */ if ((error = key_mature(sav)) != 0) { - lck_mtx_unlock(sadb_mutex); - return key_senderror(so, m, error); + goto fail; } + key_freesav(sav, KEY_SADB_LOCKED); + sah->use_count--; lck_mtx_unlock(sadb_mutex); { @@ -7450,9 +6980,21 @@ key_update( return key_senderror(so, m, ENOBUFS); } + bzero_keys(mhp); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } +fail: + if (sav != NULL) { + key_freesav(sav, KEY_SADB_LOCKED); + } + if (sah != NULL) { + sah->use_count--; + } + + lck_mtx_unlock(sadb_mutex); + bzero_keys(mhp); + return key_senderror(so, m, error); } static int @@ -7472,7 +7014,7 @@ key_migrate(struct socket *so, struct secashead *sah = NULL; struct secashead *newsah = NULL; struct secasvar *sav = NULL; - u_int16_t proto; + u_int8_t proto; LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); @@ -7515,8 +7057,15 @@ key_migrate(struct socket *so, ipsec_if0 = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); ipsec_if1 = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_MIGRATE_IPSECIF); + u_int ipsec_if0_index = 0; + if (ipsec_if0 != NULL) { + ipsec_if0_index = ipsec_if0->if_index; + ifnet_release(ipsec_if0); + ipsec_if0 = NULL; + } + /* Find existing SAH and SAV */ - KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if0 ? ipsec_if0->if_index : 0, &saidx0); + KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if0_index, &saidx0); LIST_FOREACH(sah, &sahtree, chain) { if (sah->state != SADB_SASTATE_MATURE) { @@ -7533,12 +7082,18 @@ key_migrate(struct socket *so, } if (sah == NULL) { lck_mtx_unlock(sadb_mutex); + if (ipsec_if1 != NULL) { + ifnet_release(ipsec_if1); + } ipseclog((LOG_DEBUG, "key_migrate: no mature SAH found.\n")); return key_senderror(so, m, ENOENT); } if (sav == NULL) { lck_mtx_unlock(sadb_mutex); + if (ipsec_if1 != NULL) { + ifnet_release(ipsec_if1); + } ipseclog((LOG_DEBUG, "key_migrate: no SA found.\n")); return key_senderror(so, m, ENOENT); } @@ -7549,11 +7104,19 @@ key_migrate(struct socket *so, if ((newsah = key_getsah(&saidx1, SECURITY_ASSOCIATION_ANY)) == NULL) { if ((newsah = key_newsah(&saidx1, ipsec_if1, key_get_outgoing_ifindex_from_message(mhp, SADB_X_EXT_MIGRATE_IPSECIF), sah->dir, SECURITY_ASSOCIATION_PFKEY)) == NULL) { lck_mtx_unlock(sadb_mutex); + if (ipsec_if1 != NULL) { + ifnet_release(ipsec_if1); + } ipseclog((LOG_DEBUG, "key_migrate: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } } + if (ipsec_if1 != NULL) { + ifnet_release(ipsec_if1); + ipsec_if1 = NULL; + } + if ((newsah->flags & SECURITY_ASSOCIATION_CUSTOM_IPSEC) == SECURITY_ASSOCIATION_CUSTOM_IPSEC) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_ERR, "key_migrate: custom ipsec exists\n")); @@ -7609,7 +7172,8 @@ key_migrate(struct socket *so, } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); + VERIFY(PFKEY_UNIT64(n->m_pkthdr.len) <= UINT16_MAX); + newmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); @@ -7637,13 +7201,13 @@ key_add( struct mbuf *m, const struct sadb_msghdr *mhp) { - struct sadb_sa *sa0; - struct sadb_address *src0, *dst0; + struct sadb_sa *sa0 = NULL; + struct sadb_address *src0 = NULL, *dst0 = NULL; ifnet_t ipsec_if = NULL; struct secasindex saidx; - struct secashead *newsah; - struct secasvar *newsav; - u_int16_t proto; + struct secashead *newsah = NULL; + struct secasvar *newsav = NULL; + u_int8_t proto; u_int8_t mode; u_int32_t reqid; int error; @@ -7709,42 +7273,42 @@ key_add( if ((newsah = key_getsah(&saidx, SECURITY_ASSOCIATION_ANY)) == NULL) { /* create a new SA header: key_addspi is always used for outbound spi */ if ((newsah = key_newsah(&saidx, ipsec_if, key_get_outgoing_ifindex_from_message(mhp, SADB_X_EXT_IPSECIF), IPSEC_DIR_OUTBOUND, SECURITY_ASSOCIATION_PFKEY)) == NULL) { - lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, "key_add: No more memory.\n")); - bzero_keys(mhp); - return key_senderror(so, m, ENOBUFS); + error = ENOBUFS; + goto fail; } } - if ((newsah->flags & SECURITY_ASSOCIATION_CUSTOM_IPSEC) == SECURITY_ASSOCIATION_CUSTOM_IPSEC) { - lck_mtx_unlock(sadb_mutex); - ipseclog((LOG_ERR, "key_add: custom ipsec exists\n")); - bzero_keys(mhp); - return key_senderror(so, m, EEXIST); + if (ipsec_if != NULL) { + ifnet_release(ipsec_if); + ipsec_if = NULL; } - /* set spidx if there */ - /* XXX rewrite */ - error = key_setident(newsah, m, mhp); - if (error) { - lck_mtx_unlock(sadb_mutex); - bzero_keys(mhp); - return key_senderror(so, m, error); + // Increment use count, since key_newsav() could release sadb_mutex lock + newsah->use_count++; + + if ((newsah->flags & SECURITY_ASSOCIATION_CUSTOM_IPSEC) == SECURITY_ASSOCIATION_CUSTOM_IPSEC) { + ipseclog((LOG_ERR, "key_add: custom ipsec exists\n")); + error = EEXIST; + goto fail; } /* create new SA entry. */ /* We can create new SA only if SPI is different. */ if (key_getsavbyspi(newsah, sa0->sadb_sa_spi)) { - lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, "key_add: SA already exists.\n")); - bzero_keys(mhp); - return key_senderror(so, m, EEXIST); + error = EEXIST; + goto fail; } newsav = key_newsav(m, mhp, newsah, &error, so); if (newsav == NULL) { - lck_mtx_unlock(sadb_mutex); - bzero_keys(mhp); - return key_senderror(so, m, error); + goto fail; + } + + if (newsah->state == SADB_SASTATE_DEAD) { + ipseclog((LOG_ERR, "key_add: security association head is dead\n")); + error = EINVAL; + goto fail; } /* @@ -7759,12 +7323,10 @@ key_add( /* check SA values to be mature. */ if ((error = key_mature(newsav)) != 0) { - key_freesav(newsav, KEY_SADB_LOCKED); - lck_mtx_unlock(sadb_mutex); - bzero_keys(mhp); - return key_senderror(so, m, error); + goto fail; } + newsah->use_count--; lck_mtx_unlock(sadb_mutex); /* @@ -7789,90 +7351,20 @@ key_add( m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } -} - -/* m is retained */ -static int -key_setident( - struct secashead *sah, - struct mbuf *m, - const struct sadb_msghdr *mhp) -{ - const struct sadb_ident *idsrc, *iddst; - int idsrclen, iddstlen; - - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - - /* sanity check */ - if (sah == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { - panic("key_setident: NULL pointer is passed.\n"); - } - - /* don't make buffer if not there */ - if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL && - mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) { - sah->idents = NULL; - sah->identd = NULL; - return 0; - } - - if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL || - mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) { - ipseclog((LOG_DEBUG, "key_setident: invalid identity.\n")); - return EINVAL; - } - - idsrc = (const struct sadb_ident *) - (void *)mhp->ext[SADB_EXT_IDENTITY_SRC]; - iddst = (const struct sadb_ident *) - (void *)mhp->ext[SADB_EXT_IDENTITY_DST]; - idsrclen = mhp->extlen[SADB_EXT_IDENTITY_SRC]; - iddstlen = mhp->extlen[SADB_EXT_IDENTITY_DST]; - - /* validity check */ - if (idsrc->sadb_ident_type != iddst->sadb_ident_type) { - ipseclog((LOG_DEBUG, "key_setident: ident type mismatch.\n")); - return EINVAL; - } - - switch (idsrc->sadb_ident_type) { - case SADB_IDENTTYPE_PREFIX: - case SADB_IDENTTYPE_FQDN: - case SADB_IDENTTYPE_USERFQDN: - default: - /* XXX do nothing */ - sah->idents = NULL; - sah->identd = NULL; - return 0; +fail: + if (newsav != NULL) { + key_sa_chgstate(newsav, SADB_SASTATE_DEAD); + key_freesav(newsav, KEY_SADB_LOCKED); } - - /* make structure */ - KMALLOC_NOWAIT(sah->idents, struct sadb_ident *, idsrclen); - if (sah->idents == NULL) { - lck_mtx_unlock(sadb_mutex); - KMALLOC_WAIT(sah->idents, struct sadb_ident *, idsrclen); - lck_mtx_lock(sadb_mutex); - if (sah->idents == NULL) { - ipseclog((LOG_DEBUG, "key_setident: No more memory.\n")); - return ENOBUFS; - } + if (newsah != NULL) { + newsah->use_count--; } - KMALLOC_NOWAIT(sah->identd, struct sadb_ident *, iddstlen); - if (sah->identd == NULL) { - lck_mtx_unlock(sadb_mutex); - KMALLOC_WAIT(sah->identd, struct sadb_ident *, iddstlen); - lck_mtx_lock(sadb_mutex); - if (sah->identd == NULL) { - KFREE(sah->idents); - sah->idents = NULL; - ipseclog((LOG_DEBUG, "key_setident: No more memory.\n")); - return ENOBUFS; - } + lck_mtx_unlock(sadb_mutex); + if (ipsec_if != NULL) { + ifnet_release(ipsec_if); } - bcopy(idsrc, sah->idents, idsrclen); - bcopy(iddst, sah->identd, iddstlen); - - return 0; + bzero_keys(mhp); + return key_senderror(so, m, error); } /* @@ -7909,8 +7401,9 @@ key_getmsgbuf_x1( } } mtod(n, struct sadb_msg *)->sadb_msg_errno = 0; + VERIFY(PFKEY_UNIT64(n->m_pkthdr.len) <= UINT16_MAX); mtod(n, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(n->m_pkthdr.len); + (u_int16_t)PFKEY_UNIT64(n->m_pkthdr.len); return n; } @@ -7990,8 +7483,16 @@ key_delete( dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); + u_int ipsec_if_index = 0; + if (ipsec_if != NULL) { + ipsec_if_index = ipsec_if->if_index; + ifnet_release(ipsec_if); + ipsec_if = NULL; + } + /* XXX boundary check against sa_len */ - KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); + KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if_index, &saidx); + /* get a SA header */ LIST_FOREACH(sah, &sahtree, chain) { @@ -8040,7 +7541,8 @@ key_delete( } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); + VERIFY(PFKEY_UNIT64(n->m_pkthdr.len) <= UINT16_MAX); + newmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); @@ -8070,8 +7572,15 @@ key_delete_all( dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); + u_int ipsec_if_index = 0; + if (ipsec_if != NULL) { + ipsec_if_index = ipsec_if->if_index; + ifnet_release(ipsec_if); + ipsec_if = NULL; + } + /* XXX boundary check against sa_len */ - KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); + KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if_index, &saidx); LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) { @@ -8128,7 +7637,8 @@ key_delete_all( } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); + VERIFY(PFKEY_UNIT64(n->m_pkthdr.len) <= UINT16_MAX); + newmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); @@ -8192,8 +7702,15 @@ key_get( dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); + u_int ipsec_if_index = 0; + if (ipsec_if != NULL) { + ipsec_if_index = ipsec_if->if_index; + ifnet_release(ipsec_if); + ipsec_if = NULL; + } + /* XXX boundary check against sa_len */ - KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); + KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if_index, &saidx); lck_mtx_lock(sadb_mutex); @@ -8273,7 +7790,7 @@ key_getsastatbyspi_one(u_int32_t spi, sav = key_getsavbyspi(sah, spi); if (sav) { stat->spi = sav->spi; - stat->created = sav->created; + stat->created = (u_int32_t)sav->created; if (sav->lft_c) { bcopy(sav->lft_c, &stat->lft_c, sizeof(stat->lft_c)); } else { @@ -8298,10 +7815,10 @@ static int key_getsastatbyspi(struct sastat *stat_arg, u_int32_t max_stat_arg, struct sastat *stat_res, - u_int32_t stat_res_size, + u_int64_t stat_res_size, u_int32_t *max_stat_res) { - int cur, found = 0; + u_int32_t cur, found = 0; if (stat_arg == NULL || stat_res == NULL || @@ -8309,7 +7826,7 @@ key_getsastatbyspi(struct sastat *stat_arg, return -1; } - u_int32_t max_stats = stat_res_size / (sizeof(struct sastat)); + u_int64_t max_stats = stat_res_size / (sizeof(struct sastat)); max_stats = ((max_stat_arg <= max_stats) ? max_stat_arg : max_stats); for (cur = 0; cur < max_stats; cur++) { @@ -8352,9 +7869,10 @@ key_getcomb_esp(void) struct sadb_comb *comb; const struct esp_algorithm *algo; struct mbuf *result = NULL, *m, *n; - int encmin; - int i, off, o; + u_int16_t encmin; + int off, o; int totlen; + u_int8_t i; const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb)); m = NULL; @@ -8368,7 +7886,7 @@ key_getcomb_esp(void) continue; } if (algo->keymin < ipsec_esp_keymin) { - encmin = ipsec_esp_keymin; + encmin = (u_int16_t)ipsec_esp_keymin; } else { encmin = algo->keymin; } @@ -8444,8 +7962,8 @@ key_getcomb_ah(void) struct sadb_comb *comb; const struct ah_algorithm *algo; struct mbuf *m; - int keymin; - int i; + u_int16_t keymin; + u_int8_t i; const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb)); m = NULL; @@ -8465,7 +7983,7 @@ key_getcomb_ah(void) continue; } if (algo->keymin < ipsec_ah_keymin) { - keymin = ipsec_ah_keymin; + keymin = (u_int16_t)ipsec_ah_keymin; } else { keymin = algo->keymin; } @@ -8542,7 +8060,8 @@ key_getprop( prop = mtod(m, struct sadb_prop *); bzero(prop, sizeof(*prop)); - prop->sadb_prop_len = PFKEY_UNIT64(totlen); + VERIFY(totlen <= UINT16_MAX); + prop->sadb_prop_len = (u_int16_t)PFKEY_UNIT64(totlen); prop->sadb_prop_exttype = SADB_EXT_PROPOSAL; prop->sadb_prop_replay = 32; /* XXX */ @@ -8654,7 +8173,7 @@ key_acquire( /* set sadb_x_policy */ if (sp) { - m = key_setsadbxpolicy(sp->policy, sp->spidx.dir, sp->id); + m = key_setsadbxpolicy((u_int16_t)sp->policy, sp->spidx.dir, sp->id); if (!m) { error = ENOBUFS; goto fail; @@ -8662,50 +8181,6 @@ key_acquire( m_cat(result, m); } - /* XXX identity (optional) */ -#if 0 - if (idexttype && fqdn) { - /* create identity extension (FQDN) */ - struct sadb_ident *id; - int fqdnlen; - - fqdnlen = strlen(fqdn) + 1; /* +1 for terminating-NUL */ - id = (struct sadb_ident *)p; - bzero(id, sizeof(*id) + PFKEY_ALIGN8(fqdnlen)); - id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(fqdnlen)); - id->sadb_ident_exttype = idexttype; - id->sadb_ident_type = SADB_IDENTTYPE_FQDN; - bcopy(fqdn, id + 1, fqdnlen); - p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(fqdnlen); - } - - if (idexttype) { - /* create identity extension (USERFQDN) */ - struct sadb_ident *id; - int userfqdnlen; - - if (userfqdn) { - /* +1 for terminating-NUL */ - userfqdnlen = strlen(userfqdn) + 1; - } else { - userfqdnlen = 0; - } - id = (struct sadb_ident *)p; - bzero(id, sizeof(*id) + PFKEY_ALIGN8(userfqdnlen)); - id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(userfqdnlen)); - id->sadb_ident_exttype = idexttype; - id->sadb_ident_type = SADB_IDENTTYPE_USERFQDN; - /* XXX is it correct? */ - if (curproc && curproc->p_cred) { - id->sadb_ident_id = curproc->p_cred->p_ruid; - } - if (userfqdn && userfqdnlen) { - bcopy(userfqdn, id + 1, userfqdnlen); - } - p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(userfqdnlen); - } -#endif - /* XXX sensitivity (optional) */ /* create proposal/combination extension */ @@ -8735,8 +8210,9 @@ key_acquire( result->m_pkthdr.len += m->m_len; } + VERIFY(PFKEY_UNIT64(result->m_pkthdr.len) <= UINT16_MAX); mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); + (u_int16_t)PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); @@ -8965,9 +8441,16 @@ key_acquire2( dst0 = (const struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); + u_int ipsec_if_index = 0; + if (ipsec_if != NULL) { + ipsec_if_index = ipsec_if->if_index; + ifnet_release(ipsec_if); + ipsec_if = NULL; + } + /* XXX boundary check against sa_len */ /* cast warnings */ - KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); + KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if_index, &saidx); /* get a SA index */ LIST_FOREACH(sah, &sahtree, chain) { @@ -9062,9 +8545,9 @@ setmsg: struct mbuf *n; struct sadb_msg *newmsg; struct sadb_supported *sup; - u_int len, alen, elen; + u_int16_t len, alen, elen; int off; - int i; + u_int8_t i; struct sadb_alg *alg; /* create new sadb_msg to reply. */ @@ -9114,13 +8597,14 @@ setmsg: m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off); newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(len); + VERIFY(PFKEY_UNIT64(len) <= UINT16_MAX); + newmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(len); off += PFKEY_ALIGN8(sizeof(struct sadb_msg)); /* for authentication algorithm */ if (alen) { sup = (struct sadb_supported *)(void *)(mtod(n, caddr_t) + off); - sup->sadb_supported_len = PFKEY_UNIT64(alen); + sup->sadb_supported_len = (u_int16_t)PFKEY_UNIT64(alen); sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_AUTH; off += PFKEY_ALIGN8(sizeof(*sup)); @@ -9164,8 +8648,9 @@ setmsg: * give NULL to get the value preferred by * algorithm XXX SADB_X_EXT_DERIV ? */ + VERIFY((*ealgo->ivlen)(ealgo, NULL) <= UINT8_MAX); alg->sadb_alg_ivlen = - (*ealgo->ivlen)(ealgo, NULL); + (u_int8_t)((*ealgo->ivlen)(ealgo, NULL)); } else { alg->sadb_alg_ivlen = 0; } @@ -9264,7 +8749,7 @@ static int key_expire( struct secasvar *sav) { - int satype; + u_int8_t satype; struct mbuf *result = NULL, *m; int len; int error = -1; @@ -9284,7 +8769,7 @@ key_expire( } /* set msg header */ - m = key_setsadbmsg(SADB_EXPIRE, 0, satype, sav->seq, 0, sav->refcnt); + m = key_setsadbmsg(SADB_EXPIRE, 0, satype, sav->seq, 0, (u_int16_t)sav->refcnt); if (!m) { error = ENOBUFS; goto fail; @@ -9370,8 +8855,9 @@ key_expire( result->m_pkthdr.len += m->m_len; } + VERIFY(PFKEY_UNIT64(result->m_pkthdr.len) <= UINT16_MAX); mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); + (u_int16_t)PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); @@ -9404,7 +8890,7 @@ key_flush( struct secashead *sah, *nextsah; struct secasvar *sav, *nextsav; u_int16_t proto; - u_int8_t state; + u_int state; u_int stateidx; /* sanity check */ @@ -9462,7 +8948,8 @@ key_flush( m->m_pkthdr.len = m->m_len = sizeof(struct sadb_msg); newmsg = mtod(m, struct sadb_msg *); newmsg->sadb_msg_errno = 0; - newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len); + VERIFY(PFKEY_UNIT64(m->m_pkthdr.len) <= UINT16_MAX); + newmsg->sadb_msg_len = (uint16_t)PFKEY_UNIT64(m->m_pkthdr.len); return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } @@ -9499,7 +8986,7 @@ key_dump( u_int16_t proto; u_int stateidx; u_int8_t satype; - u_int8_t state; + u_int state; struct mbuf *n; int error = 0; @@ -10007,7 +9494,7 @@ key_parse( if (msg->sadb_msg_type == SADB_X_PROMISC) { break; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; default: ipseclog((LOG_DEBUG, "key_parse: invalid type %u is passed.\n", msg->sadb_msg_satype)); @@ -10078,7 +9565,7 @@ senderror: } else { bzero_mbuf(m); } - msg->sadb_msg_errno = error; + msg->sadb_msg_errno = (u_int8_t)error; return key_sendup_mbuf(so, m, target); } @@ -10097,7 +9584,7 @@ key_senderror( } msg = mtod(m, struct sadb_msg *); - msg->sadb_msg_errno = code; + msg->sadb_msg_errno = (u_int8_t)code; return key_sendup_mbuf(so, m, KEY_SENDUP_ONE); } @@ -10113,8 +9600,8 @@ key_align( { struct mbuf *n; struct sadb_ext *ext; - size_t off, end; - int extlen; + size_t end; + int off, extlen; int toff; /* sanity check */ @@ -10132,7 +9619,7 @@ key_align( mhp->ext[0] = (struct sadb_ext *)mhp->msg; /*XXX backward compat */ end = PFKEY_UNUNIT64(mhp->msg->sadb_msg_len); - extlen = end; /*just in case extlen is not updated*/ + extlen = (int)end; /*just in case extlen is not updated*/ for (off = sizeof(struct sadb_msg); off < end; off += extlen) { n = m_pulldown(m, off, sizeof(struct sadb_ext), &toff); if (!n) { @@ -10200,7 +9687,7 @@ key_align( if (off + extlen > end) { ipseclog((LOG_DEBUG, "key_align: ext type %u invalid ext length %d " - "offset %zu sadb message total len %zu is passed.\n", + "offset %d sadb message total len %zu is passed.\n", ext->sadb_ext_type, extlen, off, end)); bzero_mbuf(m); m_freem(m); @@ -10475,7 +9962,7 @@ key_alloc_mbuf( n->m_next = NULL; n->m_len = 0; - n->m_len = M_TRAILINGSPACE(n); + n->m_len = (int)M_TRAILINGSPACE(n); /* use the bottom of mbuf, hoping we can prepend afterwards */ if (n->m_len > len) { t = (n->m_len - len) & ~(sizeof(long) - 1); @@ -10541,8 +10028,13 @@ key_setdumpsastats(u_int32_t dir, result->m_pkthdr.len += m->m_len; } + if (PFKEY_UNIT64(result->m_pkthdr.len) > UINT16_MAX) { + ipseclog((LOG_ERR, "key_setdumpsastats: length too nbug: %u", result->m_pkthdr.len)); + goto fail; + } + mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); + (u_int16_t)PFKEY_UNIT64(result->m_pkthdr.len); return result; @@ -10566,7 +10058,7 @@ key_getsastat(struct socket *so, const struct sadb_msghdr *mhp) { struct sadb_session_id *session_id; - u_int64_t bufsize = 0; + size_t bufsize = 0; u_int32_t arg_count, res_count; struct sadb_sastat *sa_stats_arg; struct sastat *sa_stats_sav = NULL; @@ -10799,7 +10291,12 @@ key_send_delsp(struct secpolicy *sp) result->m_pkthdr.len += m->m_len; } - mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); + if (PFKEY_UNIT64(result->m_pkthdr.len) >= UINT16_MAX) { + ipseclog((LOG_ERR, "key_send_delsp: length too big: %d", result->m_pkthdr.len)); + goto fail; + } + + mtod(result, struct sadb_msg *)->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); diff --git a/bsd/netkey/key.h b/bsd/netkey/key.h index d07289b62..aca9509a5 100644 --- a/bsd/netkey/key.h +++ b/bsd/netkey/key.h @@ -88,28 +88,6 @@ extern void key_sa_chgstate(struct secasvar *, u_int8_t); extern void key_sa_stir_iv(struct secasvar *); extern void key_delsah(struct secashead *sah); extern struct secashead *key_newsah2(struct secasindex *saidx, u_int8_t dir); -extern u_int32_t key_getspi2(struct sockaddr *src, - struct sockaddr *dst, - u_int8_t proto, - u_int8_t mode, - u_int32_t reqid, - struct sadb_spirange *spirange); -extern struct secasvar * key_newsav2(struct secashead *sah, - u_int8_t satype, - u_int8_t alg_auth, - u_int8_t alg_enc, - u_int32_t flags, - u_int8_t replay, - struct sadb_key *key_auth, - u_int16_t key_auth_len, - struct sadb_key *key_enc, - u_int16_t key_enc_len, - u_int16_t natt_port, - u_int32_t seq, - u_int32_t spi, - u_int32_t pid, - struct sadb_lifetime *lifetime_hard, - struct sadb_lifetime *lifetime_soft); extern void key_delsav(struct secasvar *sav); extern struct secpolicy *key_getspbyid(u_int32_t); extern void key_delsp_for_ipsec_if(ifnet_t ipsec_if); diff --git a/bsd/netkey/key_debug.c b/bsd/netkey/key_debug.c index ac3a0cb0c..11dae7ef4 100644 --- a/bsd/netkey/key_debug.c +++ b/bsd/netkey/key_debug.c @@ -69,7 +69,7 @@ static void kdebug_secreplay(struct secreplay *); #endif #ifndef KERNEL -#define panic(param) { printf(param); exit(-1); } +#define panic(param) { printf(param); exit(-1); } #endif /* NOTE: host byte order */ @@ -77,14 +77,15 @@ static void kdebug_secreplay(struct secreplay *); /* %%%: about struct sadb_msg */ void kdebug_sadb(base) - struct sadb_msg *base; +struct sadb_msg *base; { struct sadb_ext *ext; int tlen, extlen; /* sanity check */ - if (base == NULL) + if (base == NULL) { panic("kdebug_sadb: NULL pointer was passed.\n"); + } printf("sadb_msg{ version=%u type=%u errno=%u satype=%u\n", base->sadb_msg_version, base->sadb_msg_type, @@ -149,12 +150,12 @@ kdebug_sadb(base) case SADB_X_EXT_SA2: kdebug_sadb_x_sa2(ext); break; - case SADB_EXT_SESSION_ID: + case SADB_EXT_SESSION_ID: kdebug_sadb_session_id(ext); - break; - case SADB_EXT_SASTAT: + break; + case SADB_EXT_SASTAT: kdebug_sadb_sastat(ext); - break; + break; default: printf("kdebug_sadb: invalid ext_type %u was passed.\n", ext->sadb_ext_type); @@ -171,47 +172,48 @@ kdebug_sadb(base) static void kdebug_sadb_prop(ext) - struct sadb_ext *ext; +struct sadb_ext *ext; { struct sadb_prop *prop = (struct sadb_prop *)ext; struct sadb_comb *comb; int len; /* sanity check */ - if (ext == NULL) + if (ext == NULL) { panic("kdebug_sadb_prop: NULL pointer was passed.\n"); + } len = (PFKEY_UNUNIT64(prop->sadb_prop_len) - sizeof(*prop)) - / sizeof(*comb); + / sizeof(*comb); comb = (struct sadb_comb *)(prop + 1); printf("sadb_prop{ replay=%u\n", prop->sadb_prop_replay); while (len--) { printf("sadb_comb{ auth=%u encrypt=%u " - "flags=0x%04x reserved=0x%08x\n", - comb->sadb_comb_auth, comb->sadb_comb_encrypt, - comb->sadb_comb_flags, comb->sadb_comb_reserved); + "flags=0x%04x reserved=0x%08x\n", + comb->sadb_comb_auth, comb->sadb_comb_encrypt, + comb->sadb_comb_flags, comb->sadb_comb_reserved); printf(" auth_minbits=%u auth_maxbits=%u " - "encrypt_minbits=%u encrypt_maxbits=%u\n", - comb->sadb_comb_auth_minbits, - comb->sadb_comb_auth_maxbits, - comb->sadb_comb_encrypt_minbits, - comb->sadb_comb_encrypt_maxbits); + "encrypt_minbits=%u encrypt_maxbits=%u\n", + comb->sadb_comb_auth_minbits, + comb->sadb_comb_auth_maxbits, + comb->sadb_comb_encrypt_minbits, + comb->sadb_comb_encrypt_maxbits); printf(" soft_alloc=%u hard_alloc=%u " - "soft_bytes=%lu hard_bytes=%lu\n", - comb->sadb_comb_soft_allocations, - comb->sadb_comb_hard_allocations, - (u_int32_t)comb->sadb_comb_soft_bytes, - (u_int32_t)comb->sadb_comb_hard_bytes); + "soft_bytes=%lu hard_bytes=%lu\n", + comb->sadb_comb_soft_allocations, + comb->sadb_comb_hard_allocations, + (u_int32_t)comb->sadb_comb_soft_bytes, + (u_int32_t)comb->sadb_comb_hard_bytes); printf(" soft_alloc=%lu hard_alloc=%lu " - "soft_bytes=%lu hard_bytes=%lu }\n", - (u_int32_t)comb->sadb_comb_soft_addtime, - (u_int32_t)comb->sadb_comb_hard_addtime, - (u_int32_t)comb->sadb_comb_soft_usetime, - (u_int32_t)comb->sadb_comb_hard_usetime); + "soft_bytes=%lu hard_bytes=%lu }\n", + (u_int32_t)comb->sadb_comb_soft_addtime, + (u_int32_t)comb->sadb_comb_hard_addtime, + (u_int32_t)comb->sadb_comb_soft_usetime, + (u_int32_t)comb->sadb_comb_hard_usetime); comb++; } printf("}\n"); @@ -221,14 +223,15 @@ kdebug_sadb_prop(ext) static void kdebug_sadb_identity(ext) - struct sadb_ext *ext; +struct sadb_ext *ext; { struct sadb_ident *id = (struct sadb_ident *)ext; int len; /* sanity check */ - if (ext == NULL) + if (ext == NULL) { panic("kdebug_sadb_identity: NULL pointer was passed.\n"); + } len = PFKEY_UNUNIT64(id->sadb_ident_len) - sizeof(*id); printf("sadb_ident_%s{", @@ -236,7 +239,7 @@ kdebug_sadb_identity(ext) switch (id->sadb_ident_type) { default: printf(" type=%d id=%lu", - id->sadb_ident_type, (u_int32_t)id->sadb_ident_id); + id->sadb_ident_type, (u_int32_t)id->sadb_ident_id); if (len) { #ifdef KERNEL ipsec_hexdump((caddr_t)(id + 1), len); /*XXX cast ?*/ @@ -246,10 +249,11 @@ kdebug_sadb_identity(ext) p = (char *)(id + 1); ep = p + len; for (/*nothing*/; *p && p < ep; p++) { - if (isprint(*p)) + if (isprint(*p)) { printf("%c", *p & 0xff); - else + } else { printf("\\%03o", *p & 0xff); + } } #endif printf("\""); @@ -264,24 +268,25 @@ kdebug_sadb_identity(ext) static void kdebug_sadb_supported(ext) - struct sadb_ext *ext; +struct sadb_ext *ext; { struct sadb_supported *sup = (struct sadb_supported *)ext; struct sadb_alg *alg; int len; /* sanity check */ - if (ext == NULL) + if (ext == NULL) { panic("kdebug_sadb_supported: NULL pointer was passed.\n"); + } len = (PFKEY_UNUNIT64(sup->sadb_supported_len) - sizeof(*sup)) - / sizeof(*alg); + / sizeof(*alg); alg = (struct sadb_alg *)(sup + 1); printf("sadb_sup{\n"); while (len--) { printf(" { id=%d ivlen=%d min=%d max=%d }\n", - alg->sadb_alg_id, alg->sadb_alg_ivlen, - alg->sadb_alg_minbits, alg->sadb_alg_maxbits); + alg->sadb_alg_id, alg->sadb_alg_ivlen, + alg->sadb_alg_minbits, alg->sadb_alg_maxbits); alg++; } printf("}\n"); @@ -291,33 +296,35 @@ kdebug_sadb_supported(ext) static void kdebug_sadb_lifetime(ext) - struct sadb_ext *ext; +struct sadb_ext *ext; { struct sadb_lifetime *lft = (struct sadb_lifetime *)ext; /* sanity check */ - if (ext == NULL) + if (ext == NULL) { printf("kdebug_sadb_lifetime: NULL pointer was passed.\n"); + } printf("sadb_lifetime{ alloc=%u, bytes=%u\n", - lft->sadb_lifetime_allocations, - (u_int32_t)lft->sadb_lifetime_bytes); + lft->sadb_lifetime_allocations, + (u_int32_t)lft->sadb_lifetime_bytes); printf(" addtime=%u, usetime=%u }\n", - (u_int32_t)lft->sadb_lifetime_addtime, - (u_int32_t)lft->sadb_lifetime_usetime); + (u_int32_t)lft->sadb_lifetime_addtime, + (u_int32_t)lft->sadb_lifetime_usetime); return; } static void kdebug_sadb_sa(ext) - struct sadb_ext *ext; +struct sadb_ext *ext; { struct sadb_sa *sa = (struct sadb_sa *)ext; /* sanity check */ - if (ext == NULL) + if (ext == NULL) { panic("kdebug_sadb_sa: NULL pointer was passed.\n"); + } printf("sadb_sa{ spi=%u replay=%u state=%u\n", (u_int32_t)ntohl(sa->sadb_sa_spi), sa->sadb_sa_replay, @@ -330,13 +337,14 @@ kdebug_sadb_sa(ext) static void kdebug_sadb_address(ext) - struct sadb_ext *ext; +struct sadb_ext *ext; { struct sadb_address *addr = (struct sadb_address *)ext; /* sanity check */ - if (ext == NULL) + if (ext == NULL) { panic("kdebug_sadb_address: NULL pointer was passed.\n"); + } printf("sadb_address{ proto=%u prefixlen=%u reserved=0x%02x%02x }\n", addr->sadb_address_proto, addr->sadb_address_prefixlen, @@ -350,13 +358,14 @@ kdebug_sadb_address(ext) static void kdebug_sadb_key(ext) - struct sadb_ext *ext; +struct sadb_ext *ext; { struct sadb_key *key = (struct sadb_key *)ext; /* sanity check */ - if (ext == NULL) + if (ext == NULL) { panic("kdebug_sadb_key: NULL pointer was passed.\n"); + } printf("sadb_key{ bits=%u reserved=%u\n", key->sadb_key_bits, key->sadb_key_reserved); @@ -364,27 +373,28 @@ kdebug_sadb_key(ext) /* sanity check 2 */ if ((key->sadb_key_bits >> 3) > - (PFKEY_UNUNIT64(key->sadb_key_len) - sizeof(struct sadb_key))) { + (PFKEY_UNUNIT64(key->sadb_key_len) - sizeof(struct sadb_key))) { printf("kdebug_sadb_key: key length mismatch, bit:%d len:%ld.\n", - key->sadb_key_bits >> 3, - (long)PFKEY_UNUNIT64(key->sadb_key_len) - sizeof(struct sadb_key)); + key->sadb_key_bits >> 3, + (long)PFKEY_UNUNIT64(key->sadb_key_len) - sizeof(struct sadb_key)); } ipsec_hexdump((caddr_t)key + sizeof(struct sadb_key), - key->sadb_key_bits >> 3); + key->sadb_key_bits >> 3); printf(" }\n"); return; } static void kdebug_sadb_x_sa2(ext) - struct sadb_ext *ext; +struct sadb_ext *ext; { struct sadb_x_sa2 *sa2 = (struct sadb_x_sa2 *)ext; /* sanity check */ - if (ext == NULL) + if (ext == NULL) { panic("kdebug_sadb_x_sa2: NULL pointer was passed.\n"); + } printf("sadb_x_sa2{ mode=%u reqid=%u\n", sa2->sadb_x_sa2_mode, sa2->sadb_x_sa2_reqid); @@ -397,39 +407,41 @@ kdebug_sadb_x_sa2(ext) static void kdebug_sadb_session_id(ext) - struct sadb_ext *ext; +struct sadb_ext *ext; { - struct sadb_session_id *p = (__typeof__(p))ext; + struct sadb_session_id *p = (__typeof__(p))ext; /* sanity check */ - if (ext == NULL) - panic("kdebug_sadb_session_id: NULL pointer was passed.\n"); + if (ext == NULL) { + panic("kdebug_sadb_session_id: NULL pointer was passed.\n"); + } printf("sadb_session_id{ id0=%llx, id1=%llx}\n", - p->sadb_session_id_v[0], - p->sadb_session_id_v[1]); + p->sadb_session_id_v[0], + p->sadb_session_id_v[1]); return; } static void kdebug_sadb_sastat(ext) - struct sadb_ext *ext; +struct sadb_ext *ext; { - struct sadb_sastat *p = (__typeof__(p))ext; + struct sadb_sastat *p = (__typeof__(p))ext; struct sastat *stats; int i; /* sanity check */ - if (ext == NULL) - panic("kdebug_sadb_sastat: NULL pointer was passed.\n"); + if (ext == NULL) { + panic("kdebug_sadb_sastat: NULL pointer was passed.\n"); + } printf("sadb_sastat{ dir=%u num=%u\n", - p->sadb_sastat_dir, p->sadb_sastat_list_len); + p->sadb_sastat_dir, p->sadb_sastat_list_len); stats = (__typeof__(stats))(p + 1); for (i = 0; i < p->sadb_sastat_list_len; i++) { - printf(" spi=%x,\n", - stats[i].spi); + printf(" spi=%x,\n", + stats[i].spi); } printf("}\n"); @@ -438,18 +450,19 @@ kdebug_sadb_sastat(ext) void kdebug_sadb_x_policy(ext) - struct sadb_ext *ext; +struct sadb_ext *ext; { struct sadb_x_policy *xpl = (struct sadb_x_policy *)ext; struct sockaddr *addr; /* sanity check */ - if (ext == NULL) + if (ext == NULL) { panic("kdebug_sadb_x_policy: NULL pointer was passed.\n"); + } printf("sadb_x_policy{ type=%u dir=%u id=%x }\n", - xpl->sadb_x_policy_type, xpl->sadb_x_policy_dir, - xpl->sadb_x_policy_id); + xpl->sadb_x_policy_type, xpl->sadb_x_policy_dir, + xpl->sadb_x_policy_id); if (xpl->sadb_x_policy_type == IPSEC_POLICY_IPSEC) { int tlen; @@ -460,17 +473,17 @@ kdebug_sadb_x_policy(ext) while (tlen > 0) { printf(" { len=%u proto=%u mode=%u level=%u reqid=%u\n", - xisr->sadb_x_ipsecrequest_len, - xisr->sadb_x_ipsecrequest_proto, - xisr->sadb_x_ipsecrequest_mode, - xisr->sadb_x_ipsecrequest_level, - xisr->sadb_x_ipsecrequest_reqid); + xisr->sadb_x_ipsecrequest_len, + xisr->sadb_x_ipsecrequest_proto, + xisr->sadb_x_ipsecrequest_mode, + xisr->sadb_x_ipsecrequest_level, + xisr->sadb_x_ipsecrequest_reqid); if (xisr->sadb_x_ipsecrequest_len > sizeof(*xisr)) { addr = (struct sockaddr *)(xisr + 1); kdebug_sockaddr(addr); addr = (struct sockaddr *)((caddr_t)addr - + addr->sa_len); + + addr->sa_len); kdebug_sockaddr(addr); } @@ -490,11 +503,12 @@ kdebug_sadb_x_policy(ext) tlen -= xisr->sadb_x_ipsecrequest_len; xisr = (struct sadb_x_ipsecrequest *)((caddr_t)xisr - + xisr->sadb_x_ipsecrequest_len); + + xisr->sadb_x_ipsecrequest_len); } - if (tlen != 0) + if (tlen != 0) { panic("kdebug_sadb_x_policy: wrong policy struct.\n"); + } } return; @@ -504,14 +518,15 @@ kdebug_sadb_x_policy(ext) /* %%%: about SPD and SAD */ void kdebug_secpolicy(sp) - struct secpolicy *sp; +struct secpolicy *sp; { /* sanity check */ - if (sp == NULL) + if (sp == NULL) { panic("kdebug_secpolicy: NULL pointer was passed.\n"); + } printf("secpolicy{ refcnt=%u state=%u policy=%u\n", - sp->refcnt, sp->state, sp->policy); + sp->refcnt, sp->state, sp->policy); kdebug_secpolicyindex(&sp->spidx); @@ -524,17 +539,15 @@ kdebug_secpolicy(sp) printf(" type=none }\n"); break; case IPSEC_POLICY_IPSEC: - { + { struct ipsecrequest *isr; for (isr = sp->req; isr != NULL; isr = isr->next) { - printf(" level=%u\n", isr->level); kdebug_secasindex(&isr->saidx); - } printf(" }\n"); - } - break; + } + break; case IPSEC_POLICY_BYPASS: printf(" type=bypass }\n"); break; @@ -543,7 +556,7 @@ kdebug_secpolicy(sp) break; default: printf("kdebug_secpolicy: Invalid policy found. %d\n", - sp->policy); + sp->policy); break; } @@ -552,21 +565,22 @@ kdebug_secpolicy(sp) void kdebug_secpolicyindex(spidx) - struct secpolicyindex *spidx; +struct secpolicyindex *spidx; { /* sanity check */ - if (spidx == NULL) + if (spidx == NULL) { panic("kdebug_secpolicyindex: NULL pointer was passed.\n"); + } printf("secpolicyindex{ dir=%u prefs=%u prefd=%u ul_proto=%u internal_if=%s\n", - spidx->dir, spidx->prefs, spidx->prefd, spidx->ul_proto, - (spidx->internal_if) ? spidx->internal_if->if_xname : "N/A"); + spidx->dir, spidx->prefs, spidx->prefd, spidx->ul_proto, + (spidx->internal_if) ? spidx->internal_if->if_xname : "N/A"); ipsec_hexdump((caddr_t)&spidx->src, - ((struct sockaddr *)&spidx->src)->sa_len); + ((struct sockaddr *)&spidx->src)->sa_len); printf("\n"); ipsec_hexdump((caddr_t)&spidx->dst, - ((struct sockaddr *)&spidx->dst)->sa_len); + ((struct sockaddr *)&spidx->dst)->sa_len); printf("}\n"); return; @@ -574,20 +588,21 @@ kdebug_secpolicyindex(spidx) void kdebug_secasindex(saidx) - struct secasindex *saidx; +struct secasindex *saidx; { /* sanity check */ - if (saidx == NULL) + if (saidx == NULL) { panic("kdebug_secpolicyindex: NULL pointer was passed.\n"); + } printf("secasindex{ mode=%u proto=%u\n", - saidx->mode, saidx->proto); + saidx->mode, saidx->proto); ipsec_hexdump((caddr_t)&saidx->src, - ((struct sockaddr *)&saidx->src)->sa_len); + ((struct sockaddr *)&saidx->src)->sa_len); printf("\n"); ipsec_hexdump((caddr_t)&saidx->dst, - ((struct sockaddr *)&saidx->dst)->sa_len); + ((struct sockaddr *)&saidx->dst)->sa_len); printf("\n"); return; @@ -595,11 +610,12 @@ kdebug_secasindex(saidx) void kdebug_secasv(sav) - struct secasvar *sav; +struct secasvar *sav; { /* sanity check */ - if (sav == NULL) + if (sav == NULL) { panic("kdebug_secasv: NULL pointer was passed.\n"); + } printf("secas{"); kdebug_secasindex(&sav->sah->saidx); @@ -609,24 +625,30 @@ kdebug_secasv(sav) printf(" spi=%u flags=%u\n", (u_int32_t)ntohl(sav->spi), sav->flags); - if (sav->key_auth != NULL) + if (sav->key_auth != NULL) { kdebug_sadb_key((struct sadb_ext *)sav->key_auth); - if (sav->key_enc != NULL) + } + if (sav->key_enc != NULL) { kdebug_sadb_key((struct sadb_ext *)sav->key_enc); + } if (sav->iv != NULL) { printf(" iv="); ipsec_hexdump(sav->iv, sav->ivlen ? sav->ivlen : 8); printf("\n"); } - if (sav->replay[0] != NULL) + if (sav->replay[0] != NULL) { kdebug_secreplay(sav->replay[0]); - if (sav->lft_c != NULL) + } + if (sav->lft_c != NULL) { kdebug_sadb_lifetime((struct sadb_ext *)sav->lft_c); - if (sav->lft_h != NULL) + } + if (sav->lft_h != NULL) { kdebug_sadb_lifetime((struct sadb_ext *)sav->lft_h); - if (sav->lft_s != NULL) + } + if (sav->lft_s != NULL) { kdebug_sadb_lifetime((struct sadb_ext *)sav->lft_s); + } #if notyet /* XXX: misc[123] ? */ @@ -637,15 +659,17 @@ kdebug_secasv(sav) static void kdebug_secreplay(rpl) - struct secreplay *rpl; +struct secreplay *rpl; { - int len, l; + size_t len; + int l; /* sanity check */ - if (rpl == NULL) + if (rpl == NULL) { panic("kdebug_secreplay: NULL pointer was passed.\n"); + } - printf(" secreplay{ count=%u wsize=%u seq=%u lastseq=%u", + printf(" secreplay{ count=%u wsize=%zu seq=%u lastseq=%u", rpl->count, rpl->wsize, rpl->seq, rpl->lastseq); if (rpl->bitmap == NULL) { @@ -656,8 +680,9 @@ kdebug_secreplay(rpl) printf("\n bitmap { "); for (len = 0; len < rpl->wsize; len++) { - for (l = 7; l >= 0; l--) + for (l = 7; l >= 0; l--) { printf("%u", (((rpl->bitmap)[len] >> l) & 1) ? 1 : 0); + } } printf(" }\n"); @@ -666,11 +691,12 @@ kdebug_secreplay(rpl) void kdebug_mbufhdr(m) - struct mbuf *m; +struct mbuf *m; { /* sanity check */ - if (m == NULL) + if (m == NULL) { return; + } printf("mbuf(0x%llx){ m_next:0x%llx m_nextpkt:0x%llx m_data:0x%llx " "m_len:%d m_type:0x%02x m_flags:0x%02x }\n", @@ -688,11 +714,11 @@ kdebug_mbufhdr(m) if (m->m_flags & M_EXT) { printf(" m_ext{ ext_buf:0x%llx ext_free:0x%llx " - "ext_size:%u ext_ref:0x%llx }\n", - (uint64_t)VM_KERNEL_ADDRPERM(m->m_ext.ext_buf), - (uint64_t)VM_KERNEL_ADDRPERM(m_get_ext_free(m)), - m->m_ext.ext_size, - (uint64_t)VM_KERNEL_ADDRPERM(m_get_rfa(m)); + "ext_size:%u ext_ref:0x%llx }\n", + (uint64_t)VM_KERNEL_ADDRPERM(m->m_ext.ext_buf), + (uint64_t)VM_KERNEL_ADDRPERM(m_get_ext_free(m)), + m->m_ext.ext_size, + (uint64_t)VM_KERNEL_ADDRPERM(m_get_rfa(m))); } return; @@ -700,7 +726,7 @@ kdebug_mbufhdr(m) void kdebug_mbuf(m0) - struct mbuf *m0; +struct mbuf *m0; { struct mbuf *m = m0; int i, j; @@ -709,10 +735,12 @@ kdebug_mbuf(m0) kdebug_mbufhdr(m); printf(" m_data:\n"); for (i = 0; i < m->m_len; i++) { - if (i && i % 32 == 0) + if (i && i % 32 == 0) { printf("\n"); - if (i % 4 == 0) + } + if (i % 4 == 0) { printf(" "); + } printf("%02x", mtod(m, u_char *)[i]); j++; } @@ -725,16 +753,15 @@ kdebug_mbuf(m0) void kdebug_sockaddr(addr) - struct sockaddr *addr; +struct sockaddr *addr; { struct sockaddr_in *sin4; -#ifdef INET6 struct sockaddr_in6 *sin6; -#endif /* sanity check */ - if (addr == NULL) + if (addr == NULL) { panic("kdebug_sockaddr: NULL pointer was passed.\n"); + } /* NOTE: We deal with port number as host byte order. */ printf("sockaddr{ len=%u family=%u", addr->sa_len, addr->sa_family); @@ -745,7 +772,6 @@ kdebug_sockaddr(addr) printf(" port=%u\n", ntohs(sin4->sin_port)); ipsec_hexdump((caddr_t)&sin4->sin_addr, sizeof(sin4->sin_addr)); break; -#ifdef INET6 case AF_INET6: sin6 = (struct sockaddr_in6 *)addr; printf(" port=%u\n", ntohs(sin6->sin6_port)); @@ -754,7 +780,6 @@ kdebug_sockaddr(addr) ipsec_hexdump((caddr_t)&sin6->sin6_addr, sizeof(sin6->sin6_addr)); break; -#endif } printf(" }\n"); @@ -764,13 +789,14 @@ kdebug_sockaddr(addr) void ipsec_bindump(buf, len) - caddr_t buf; - int len; +caddr_t buf; +int len; { int i; - for (i = 0; i < len; i++) + for (i = 0; i < len; i++) { printf("%c", (unsigned char)buf[i]); + } return; } @@ -778,18 +804,24 @@ ipsec_bindump(buf, len) void ipsec_hexdump(buf, len) - caddr_t buf; - int len; +caddr_t buf; +int len; { int i; for (i = 0; i < len; i++) { - if (i != 0 && i % 32 == 0) printf("\n"); - if (i % 4 == 0) printf(" "); + if (i != 0 && i % 32 == 0) { + printf("\n"); + } + if (i % 4 == 0) { + printf(" "); + } printf("%02x", (unsigned char)buf[i]); } #if 0 - if (i % 32 != 0) printf("\n"); + if (i % 32 != 0) { + printf("\n"); + } #endif return; diff --git a/bsd/netkey/keydb.c b/bsd/netkey/keydb.c index 5292414f1..b309e656c 100644 --- a/bsd/netkey/keydb.c +++ b/bsd/netkey/keydb.c @@ -197,7 +197,7 @@ struct secasvar *p; * secreplay management */ struct secreplay * -keydb_newsecreplay(size_t wsize) +keydb_newsecreplay(u_int8_t wsize) { struct secreplay *p; diff --git a/bsd/netkey/keydb.h b/bsd/netkey/keydb.h index 92e7e9655..b2e775d8f 100644 --- a/bsd/netkey/keydb.h +++ b/bsd/netkey/keydb.h @@ -59,10 +59,6 @@ struct secashead { struct secasindex saidx; - struct sadb_ident *idents; /* source identity */ - struct sadb_ident *identd; /* destination identity */ - /* XXX I don't know how to use them. */ - ifnet_t ipsec_if; u_int outgoing_if; u_int8_t dir; /* IPSEC_DIR_INBOUND or IPSEC_DIR_OUTBOUND */ @@ -74,6 +70,7 @@ struct secashead { struct route_in6 sa_route; /* route cache */ uint16_t flags; + u_int32_t use_count; }; #define MAX_REPLAY_WINDOWS 4 @@ -125,8 +122,8 @@ struct secasvar { /* replay prevention */ struct secreplay { + u_int8_t wsize; /* window size */ u_int32_t count; - u_int wsize; /* window size, i.g. 4 bytes */ u_int32_t seq; /* used by sender */ u_int32_t lastseq; /* used by sender/receiver */ caddr_t bitmap; /* used by receiver */ @@ -174,7 +171,7 @@ extern struct secashead *keydb_newsecashead(void); // extern void keydb_refsecasvar(struct secasvar *); // not used // extern void keydb_freesecasvar(struct secasvar *); // not used /* secreplay */ -extern struct secreplay *keydb_newsecreplay(size_t); +extern struct secreplay *keydb_newsecreplay(u_int8_t); extern void keydb_delsecreplay(struct secreplay *); /* secreg */ // extern struct secreg *keydb_newsecreg(void); // not used diff --git a/bsd/netkey/keysock.c b/bsd/netkey/keysock.c index 83dd1d874..038588c88 100644 --- a/bsd/netkey/keysock.c +++ b/bsd/netkey/keysock.c @@ -212,12 +212,13 @@ key_sendup0(struct rawcb *rp, struct mbuf *m, int promisc) return ENOBUFS; } m->m_pkthdr.len += sizeof(*pmsg); + VERIFY(PFKEY_UNIT64(m->m_pkthdr.len) <= UINT16_MAX); pmsg = mtod(m, struct sadb_msg *); bzero(pmsg, sizeof(*pmsg)); pmsg->sadb_msg_version = PF_KEY_V2; pmsg->sadb_msg_type = SADB_X_PROMISC; - pmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len); + pmsg->sadb_msg_len = (u_int16_t)PFKEY_UNIT64(m->m_pkthdr.len); /* pid and seq? */ PFKEY_STAT_INCREMENT(pfkeystat.in_msgtype[pmsg->sadb_msg_type]); diff --git a/bsd/nfs/gss/gss_krb5_mech.c b/bsd/nfs/gss/gss_krb5_mech.c index 747b7fb6d..d0a36689a 100644 --- a/bsd/nfs/gss/gss_krb5_mech.c +++ b/bsd/nfs/gss/gss_krb5_mech.c @@ -93,13 +93,13 @@ static int krb5_n_fold(const void *instr, size_t len, void *foldstr, size_t size size_t gss_mbuf_len(mbuf_t, size_t); errno_t gss_prepend_mbuf(mbuf_t *, uint8_t *, size_t); errno_t gss_append_mbuf(mbuf_t, uint8_t *, size_t); -errno_t gss_strip_mbuf(mbuf_t, ssize_t); -int mbuf_walk(mbuf_t, size_t, size_t, size_t, int (*)(void *, uint8_t *, uint32_t), void *); +errno_t gss_strip_mbuf(mbuf_t, int); +int mbuf_walk(mbuf_t, size_t, size_t, size_t, int (*)(void *, uint8_t *, size_t), void *); void do_crypt_init(crypt_walker_ctx_t, int, crypto_ctx_t, cccbc_ctx *); -int do_crypt(void *, uint8_t *, uint32_t); +int do_crypt(void *, uint8_t *, size_t); void do_hmac_init(hmac_walker_ctx_t, crypto_ctx_t, void *); -int do_hmac(void *, uint8_t *, uint32_t); +int do_hmac(void *, uint8_t *, size_t); void krb5_make_usage(uint32_t, uint8_t, uint8_t[KRB5_USAGE_LEN]); void krb5_key_derivation(crypto_ctx_t, const void *, size_t, void **, size_t); @@ -107,9 +107,9 @@ void cc_key_schedule_create(crypto_ctx_t); void gss_crypto_ctx_free(crypto_ctx_t); int gss_crypto_ctx_init(struct crypto_ctx *, lucid_context_t); -errno_t krb5_crypt_mbuf(crypto_ctx_t, mbuf_t *, uint32_t, int, cccbc_ctx *); +errno_t krb5_crypt_mbuf(crypto_ctx_t, mbuf_t *, size_t, int, cccbc_ctx *); int krb5_mic(crypto_ctx_t, gss_buffer_t, gss_buffer_t, gss_buffer_t, uint8_t *, int *, int, int); -int krb5_mic_mbuf(crypto_ctx_t, gss_buffer_t, mbuf_t, uint32_t, uint32_t, gss_buffer_t, uint8_t *, int *, int, int); +int krb5_mic_mbuf(crypto_ctx_t, gss_buffer_t, mbuf_t, size_t, size_t, gss_buffer_t, uint8_t *, int *, int, int); uint32_t gss_krb5_cfx_get_mic(uint32_t *, gss_ctx_id_t, gss_qop_t, gss_buffer_t, gss_buffer_t); uint32_t gss_krb5_cfx_verify_mic(uint32_t *, gss_ctx_id_t, gss_buffer_t, gss_buffer_t, gss_qop_t *); @@ -405,7 +405,7 @@ gss_append_mbuf(mbuf_t chain, uint8_t *bytes, size_t size) } errno_t -gss_strip_mbuf(mbuf_t chain, ssize_t size) +gss_strip_mbuf(mbuf_t chain, int size) { if (chain == NULL) { return EINVAL; @@ -428,7 +428,7 @@ gss_strip_mbuf(mbuf_t chain, ssize_t size) * padding should be done before calling this routine. */ int -mbuf_walk(mbuf_t mbp, size_t offset, size_t len, size_t blocksize, int (*crypto_fn)(void *, uint8_t *data, uint32_t length), void *ctx) +mbuf_walk(mbuf_t mbp, size_t offset, size_t len, size_t blocksize, int (*crypto_fn)(void *, uint8_t *data, size_t length), void *ctx) { mbuf_t mb; size_t mlen, residue; @@ -525,18 +525,19 @@ mbuf_walk(mbuf_t mbp, size_t offset, size_t len, size_t blocksize, int (*crypto_ void do_crypt_init(crypt_walker_ctx_t wctx, int encrypt, crypto_ctx_t cctx, cccbc_ctx *ks) { + memset(wctx, 0, sizeof(*wctx)); + wctx->length = 0; wctx->ccmode = encrypt ? cctx->enc_mode : cctx->dec_mode; - wctx->crypt_ctx = ks; MALLOC(wctx->iv, cccbc_iv *, wctx->ccmode->block_size, M_TEMP, M_WAITOK | M_ZERO); cccbc_set_iv(wctx->ccmode, wctx->iv, NULL); } int -do_crypt(void *walker, uint8_t *data, uint32_t len) +do_crypt(void *walker, uint8_t *data, size_t len) { struct crypt_walker_ctx *wctx = (crypt_walker_ctx_t)walker; - uint32_t nblocks; + size_t nblocks; nblocks = len / wctx->ccmode->block_size; assert(len % wctx->ccmode->block_size == 0); @@ -557,7 +558,7 @@ do_hmac_init(hmac_walker_ctx_t wctx, crypto_ctx_t cctx, void *key) } int -do_hmac(void *walker, uint8_t *data, uint32_t len) +do_hmac(void *walker, uint8_t *data, size_t len) { hmac_walker_ctx_t wctx = (hmac_walker_ctx_t)walker; @@ -614,7 +615,7 @@ krb5_mic(crypto_ctx_t ctx, gss_buffer_t header, gss_buffer_t bp, gss_buffer_t tr int krb5_mic_mbuf(crypto_ctx_t ctx, gss_buffer_t header, - mbuf_t mbp, uint32_t offset, uint32_t len, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse) + mbuf_t mbp, size_t offset, size_t len, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse) { struct hmac_walker_ctx wctx; uint8_t digest[ctx->di->output_size]; @@ -668,13 +669,13 @@ krb5_mic_mbuf(crypto_ctx_t ctx, gss_buffer_t header, errno_t /* __attribute__((optnone)) */ -krb5_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, uint32_t len, int encrypt, cccbc_ctx *ks) +krb5_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, size_t len, int encrypt, cccbc_ctx *ks) { struct crypt_walker_ctx wctx; const struct ccmode_cbc *ccmode = encrypt ? ctx->enc_mode : ctx->dec_mode; size_t plen = len; size_t cts_len = 0; - mbuf_t mb, lmb; + mbuf_t mb, lmb = NULL; int error; if (!(ctx->flags & CRYPTO_KS_ALLOCED)) { @@ -700,7 +701,7 @@ krb5_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, uint32_t len, int encrypt, cccbc_ plen = ccmode->block_size; } else { /* determine where the last two blocks are */ - uint32_t r = len % ccmode->block_size; + size_t r = len % ccmode->block_size; cts_len = r ? r + ccmode->block_size : 2 * ccmode->block_size; plen = len - cts_len; @@ -796,7 +797,7 @@ rr13(unsigned char *buf, size_t len) s2 = 8 - s1; } b2 = (b1 + 1) % bytes; - buf[i] = (tmp[b1] << s1) | (tmp[b2] >> s2); + buf[i] = 0xff & ((tmp[b1] << s1) | (tmp[b2] >> s2)); } } return 0; @@ -829,7 +830,7 @@ krb5_n_fold(const void *instr, size_t len, void *foldstr, size_t size) /* if len < size we need at most N * len bytes, ie < 2 * size; * if len > size we need at most 2 * len */ int ret = 0; - size_t maxlen = 2 * max(size, len); + size_t maxlen = 2 * lmax(size, len); size_t l = 0; unsigned char tmp[maxlen]; unsigned char buf[len]; @@ -949,6 +950,7 @@ cc_key_schedule_create(crypto_ctx_t ctx) cccbc_init(ctx->dec_mode, ctx->ks.dec, ctx->keylen, ctx->key); } } + OS_FALLTHROUGH; case 1: { if (ctx->ks.enc == NULL) { krb5_make_usage(lctx->initiate ? @@ -1355,7 +1357,8 @@ krb5_cfx_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, size_t *len, int encrypt, int errno_t error; if (encrypt) { - read_random(confounder, ccmode->block_size); + assert(ccmode->block_size <= UINT_MAX); + read_random(confounder, (u_int)ccmode->block_size); error = gss_prepend_mbuf(mbp, confounder, ccmode->block_size); if (error) { return error; @@ -1436,7 +1439,8 @@ krb5_cfx_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, size_t *len, int encrypt, int return EBADRPC; } /* strip off the confounder */ - error = gss_strip_mbuf(*mbp, ccmode->block_size); + assert(ccmode->block_size <= INT_MAX); + error = gss_strip_mbuf(*mbp, (int)ccmode->block_size); if (error) { return error; } @@ -1480,12 +1484,13 @@ gss_krb5_cfx_wrap_mbuf(uint32_t *minor, /* minor_status */ lctx->send_seq++; if (conf_flag) { uint8_t pad[cctx->mpad]; - uint16_t plen = 0; + size_t plen = 0; token.Flags |= CFXSealed; memset(pad, 0, cctx->mpad); if (cctx->mpad > 1) { - plen = htons(cctx->mpad - ((len + sizeof(gss_cfx_wrap_token_desc)) % cctx->mpad)); + size_t val = cctx->mpad - ((len + sizeof(gss_cfx_wrap_token_desc)) % cctx->mpad); + plen = sizeof(val) > sizeof(uint32_t) ? htonll(val) : htonl(val); token.EC[0] = ((plen >> 8) & 0xff); token.EC[1] = (plen & 0xff); } @@ -1514,7 +1519,7 @@ gss_krb5_cfx_wrap_mbuf(uint32_t *minor, /* minor_status */ if (error == 0) { error = gss_append_mbuf(*mbp, digest, cctx->digest_size); if (error == 0) { - uint16_t plen = htons(cctx->digest_size); + uint32_t plen = htonl(cctx->digest_size); memcpy(token.EC, &plen, 2); error = gss_prepend_mbuf(mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc)); } @@ -1552,7 +1557,7 @@ gss_krb5_cfx_unwrap_mbuf(uint32_t * minor, /* minor_status */ lucid_context_t lctx = &ctx->gss_lucid_ctx; crypto_ctx_t cctx = &ctx->gss_cryptor; int error, conf; - uint16_t ec = 0, rrc = 0; + uint32_t ec = 0, rrc = 0; uint64_t seq; int reverse = (*qop == GSS_C_QOP_REVERSE); int initiate = lctx->initiate ? (reverse ? 0 : 1) : (reverse ? 1 : 0); @@ -1748,7 +1753,7 @@ gss_krb5_der_length_get(uint8_t **pp) * Determine size of ASN.1 DER length */ static int -gss_krb5_der_length_size(int len) +gss_krb5_der_length_size(size_t len) { return len < (1 << 7) ? 1 : @@ -1761,7 +1766,7 @@ gss_krb5_der_length_size(int len) * Encode an ASN.1 DER length field */ static void -gss_krb5_der_length_put(uint8_t **pp, int len) +gss_krb5_der_length_put(uint8_t **pp, size_t len) { int sz = gss_krb5_der_length_size(len); uint8_t *p = *pp; @@ -2084,7 +2089,8 @@ gss_krb5_3des_wrap_mbuf(uint32_t *minor, header.value = &tokbody; /* Prepend confounder */ - read_random(confounder, ccmode->block_size); + assert(ccmode->block_size <= UINT_MAX); + read_random(confounder, (u_int)ccmode->block_size); *minor = gss_prepend_mbuf(mbp, confounder, ccmode->block_size); if (*minor) { return GSS_S_FAILURE; @@ -2133,7 +2139,7 @@ gss_krb5_3des_unwrap_mbuf(uint32_t *minor, { crypto_ctx_t cctx = &ctx->gss_cryptor; const struct ccmode_cbc *ccmode = cctx->dec_mode; - size_t length = 0, offset; + size_t length = 0, offset = 0; gss_buffer_desc hash; uint8_t hashval[cctx->digest_size]; gss_buffer_desc itoken; @@ -2222,7 +2228,8 @@ gss_krb5_3des_unwrap_mbuf(uint32_t *minor, /* Strip the confounder and trailing pad bytes */ gss_strip_mbuf(smb, -padlen); - gss_strip_mbuf(smb, ccmode->block_size); + assert(ccmode->block_size <= INT_MAX); + gss_strip_mbuf(smb, (int)ccmode->block_size); if (*mbp != smb) { mbuf_freem(*mbp); @@ -2461,7 +2468,7 @@ gss_krb5_wrap_mbuf(uint32_t *minor, /* minor_status */ size_t len, /* length */ int *conf_state /* conf state */) { - uint32_t major, minor_stat = 0; + uint32_t major = GSS_S_FAILURE, minor_stat = 0; mbuf_t smb, tmb; int conf_val = 0; @@ -2516,7 +2523,7 @@ gss_krb5_unwrap_mbuf(uint32_t * minor, /* minor_status */ int *conf_flag, /* conf_state */ gss_qop_t *qop /* qop state */) { - uint32_t major, minor_stat = 0; + uint32_t major = GSS_S_FAILURE, minor_stat = 0; gss_qop_t qop_val = GSS_C_QOP_DEFAULT; int conf_val = 0; mbuf_t smb, tmb; @@ -2567,7 +2574,7 @@ gss_krb5_unwrap_mbuf(uint32_t * minor, /* minor_status */ #include static int -xdr_lucid_context(void *data, size_t length, lucid_context_t lctx) +xdr_lucid_context(void *data, uint32_t length, lucid_context_t lctx) { struct xdrbuf xb; int error = 0; diff --git a/bsd/nfs/gss/gss_krb5_mech.h b/bsd/nfs/gss/gss_krb5_mech.h index b900347ce..bf00a65a2 100644 --- a/bsd/nfs/gss/gss_krb5_mech.h +++ b/bsd/nfs/gss/gss_krb5_mech.h @@ -234,17 +234,17 @@ struct key_schedule { typedef struct crypto_ctx { uint32_t etype; - uint32_t mpad; /* Message padding */ uint32_t flags; + size_t mpad; /* Message padding */ lck_mtx_t *lock; lucid_context_t gss_ctx; /* Back pointer to lucid context */ - uint32_t keylen; void *key; /* Points to session key from lucid context */ const struct ccdigest_info *di; const struct ccmode_cbc *enc_mode; const struct ccmode_cbc *dec_mode; struct key_schedule ks; uint32_t digest_size; + uint32_t keylen; void *ckey[2]; /* Derived checksum key. Same as key for DES3 */ } *crypto_ctx_t; diff --git a/bsd/nfs/krpc_subr.c b/bsd/nfs/krpc_subr.c index 345762a0b..763051ec0 100644 --- a/bsd/nfs/krpc_subr.c +++ b/bsd/nfs/krpc_subr.c @@ -358,7 +358,9 @@ krpc_call( if (sotype == SOCK_STREAM) { /* first, fill in RPC record marker */ u_int32_t *recmark = mbuf_data(mhead); - *recmark = htonl(0x80000000 | (mbuf_pkthdr_len(mhead) - 4)); + size_t pkthdr_len = mbuf_pkthdr_len(mhead); + assert(pkthdr_len <= UINT32_MAX); + *recmark = htonl(0x80000000 | (uint32_t)(pkthdr_len - 4)); call = (struct rpc_call *)(recmark + 1); } else { call = mbuf_data(mhead); @@ -393,7 +395,12 @@ krpc_call( msg.msg_namelen = 0; } else { msg.msg_name = mbuf_data(nam); - msg.msg_namelen = mbuf_len(nam); + if (mbuf_len(nam) > UINT_MAX) { + printf("krpc_call: mbuf_len is too long: EINVAL\n"); + error = EINVAL; + goto out; + } + msg.msg_namelen = (uint32_t)mbuf_len(nam); } error = sock_sendmbuf(so, &msg, m, 0, 0); if (error) { @@ -447,7 +454,7 @@ krpc_call( if (error) { goto out; } - len = ntohl(len) & ~0x80000000; + len = ntohll(len) & ~0x80000000; /* * This is SERIOUS! We are out of sync with the sender * and forcing a disconnect/reconnect is all I can do. @@ -592,7 +599,13 @@ gotreply: len += ntohl(reply->rp_u.rpu_ok.rp_auth.rp_alen); len = (len + 3) & ~3; /* XXX? */ } - mbuf_adj(m, len); + + if (len > INT_MAX) { + error = EINVAL; + goto out; + } + + mbuf_adj(m, (int)len); /* result */ *data = m; diff --git a/bsd/nfs/nfs.h b/bsd/nfs/nfs.h index 60828d4c0..a27fa20e5 100644 --- a/bsd/nfs/nfs.h +++ b/bsd/nfs/nfs.h @@ -91,6 +91,7 @@ extern int nfs_ticks; #define NFS_MAXATTRTIMO 60 #define NFS_MINDIRATTRTIMO 5 /* directory attribute cache timeout in sec */ #define NFS_MAXDIRATTRTIMO 60 +#define NFS_MAXPORT 0xffff #define NFS_IOSIZE (1024 * 1024) /* suggested I/O size */ #define NFS_RWSIZE 32768 /* Def. read/write data size <= 32K */ #define NFS_WSIZE NFS_RWSIZE /* Def. write data size <= 32K */ @@ -128,6 +129,8 @@ extern int nfs_ticks; #define NFSRV_NDMAXDATA(n) \ (((n)->nd_vers == NFS_VER3) ? (((n)->nd_nam2) ? \ NFS_MAXDGRAMDATA : NFSRV_MAXDATA) : NFS_V2MAXDATA) +#define NFS_PORT_INVALID(port) \ + (((port) > NFS_MAXPORT) || ((port) < 0)) /* * The IO_METASYNC flag should be implemented for local file systems. @@ -223,6 +226,25 @@ extern int nfs_ticks; #define NFS_LOCK_MODE_DISABLED 1 /* do not support advisory file locking */ #define NFS_LOCK_MODE_LOCAL 2 /* perform advisory file locking locally */ +#define NFS_STRLEN_INT(str) \ + (int)strnlen(str, INT_MAX) +#define NFS_UIO_ADDIOV(a_uio, a_baseaddr, a_length) \ + assert(a_length <= UINT32_MAX); uio_addiov(a_uio, a_baseaddr, (uint32_t)(a_length)); +#define NFS_BZERO(off, bytes) \ + do { \ + uint32_t bytes32 = bytes > UINT32_MAX ? UINT32_MAX : (uint32_t)(bytes); \ + bzero(off, bytes32); \ + if (bytes > UINT32_MAX) { \ + bzero(off + bytes32, (uint32_t)(bytes - UINT32_MAX)); \ + } \ + } while(0); +#define NFS_ZFREE(zone, ptr) \ + do { \ + if ((ptr)) { \ + zfree((zone), (ptr)); \ + (ptr) = NULL; \ + } \ + } while (0); \ /* Supported encryption types for kerberos session keys */ typedef enum nfs_supported_kerberos_etypes { @@ -250,7 +272,7 @@ struct nfs_args { #else struct sockaddr *addr; /* file server address */ #endif - int addrlen; /* length of address */ + uint8_t addrlen; /* length of address */ int sotype; /* Socket type */ int proto; /* and Protocol */ #ifdef KERNEL @@ -298,7 +320,7 @@ struct nfs_args { struct user_nfs_args { int version; /* args structure version number */ user_addr_t addr __attribute((aligned(8))); /* file server address */ - int addrlen; /* length of address */ + uint8_t addrlen; /* length of address */ int sotype; /* Socket type */ int proto; /* and Protocol */ user_addr_t fh __attribute((aligned(8))); /* File handle to be mounted */ @@ -417,7 +439,7 @@ struct nfs_exphandle { uint32_t nxh_expid; /* Export ID */ uint16_t nxh_flags; /* export handle flags */ uint8_t nxh_reserved; /* future use */ - uint8_t nxh_fidlen; /* length of File ID */ + uint32_t nxh_fidlen; /* length of File ID */ }; /* nxh_flags */ @@ -517,7 +539,7 @@ struct user_nfs_export_args { /* descriptor describing following records */ struct nfs_export_stat_desc { uint32_t rec_vers; /* version of export stat records */ - uint32_t rec_count; /* total record count */ + uint64_t rec_count; /* total record count */ }__attribute__((__packed__)); /* export stat record containing path and stat counters */ @@ -542,8 +564,8 @@ struct nfs_user_stat_user_rec { uint64_t ops; uint64_t bytes_read; uint64_t bytes_written; - uint32_t tm_start; - uint32_t tm_last; + time_t tm_start; + time_t tm_last; }__attribute__((__packed__)); /* Active user list path record format */ @@ -621,8 +643,8 @@ struct nfs_user_stat_node { uint64_t ops; uint64_t bytes_read; uint64_t bytes_written; - uint32_t tm_start; - uint32_t tm_last; + time_t tm_start; + time_t tm_last; }; /* Hash table for active user nodes */ @@ -790,11 +812,12 @@ struct nfs_testmapid { /* * fs.nfs sysctl(3) identifiers */ -#define NFS_NFSSTATS 1 /* struct: struct nfsstats */ -#define NFS_EXPORTSTATS 3 /* gets exported directory stats */ -#define NFS_USERSTATS 4 /* gets exported directory active user stats */ -#define NFS_USERCOUNT 5 /* gets current count of active nfs users */ -#define NFS_MOUNTINFO 6 /* gets information about an NFS mount */ +#define NFS_NFSSTATS 1 /* struct: struct nfsstats */ +#define NFS_EXPORTSTATS 3 /* gets exported directory stats */ +#define NFS_USERSTATS 4 /* gets exported directory active user stats */ +#define NFS_USERCOUNT 5 /* gets current count of active nfs users */ +#define NFS_MOUNTINFO 6 /* gets information about an NFS mount */ +#define NFS_NFSZEROSTATS 7 /* zero nfs statistics */ #ifndef NFS_WDELAYHASHSIZ #define NFS_WDELAYHASHSIZ 16 /* and with this */ @@ -819,8 +842,8 @@ struct nfs_testmapid { (int)(B), (int)(C), (int)(D), (int)(E), 0) #ifdef MALLOC_DECLARE -MALLOC_DECLARE(M_NFSREQ); MALLOC_DECLARE(M_NFSMNT); +MALLOC_DECLARE(M_NFSBIO); MALLOC_DECLARE(M_NFSDIROFF); MALLOC_DECLARE(M_NFSRVDESC); MALLOC_DECLARE(M_NFSD); @@ -866,7 +889,7 @@ struct nfsm_chain { mbuf_t nmc_mhead; /* mbuf chain head */ mbuf_t nmc_mcur; /* current mbuf */ caddr_t nmc_ptr; /* pointer into current mbuf */ - uint32_t nmc_left; /* bytes remaining in current mbuf */ + size_t nmc_left; /* bytes remaining in current mbuf */ uint32_t nmc_flags; /* flags for this nfsm_chain */ }; #define NFSM_CHAIN_FLAG_ADD_CLUSTERS 0x1 /* always add mbuf clusters */ @@ -880,13 +903,22 @@ struct gss_seq { uint32_t gss_seqnum; }; +/** + * nfsreq callback args + */ +struct nfsreq_cbargs { + off_t offset; + size_t length; + uint32_t stategenid; +}; + /* * async NFS request callback info */ struct nfsreq_cbinfo { void (*rcb_func)(struct nfsreq *); /* async request callback function */ struct nfsbuf *rcb_bp; /* buffer I/O RPC is for */ - uint32_t rcb_args[3]; /* additional callback args */ + struct nfsreq_cbargs rcb_args; /* nfsreq callback args */ }; /* @@ -927,12 +959,12 @@ struct nfsreq { struct nfsmount *r_nmp; /* NFS mount point */ uint64_t r_xid; /* RPC transaction ID */ uint32_t r_procnum; /* NFS procedure number */ - uint32_t r_mreqlen; /* request length */ + size_t r_mreqlen; /* request length */ int r_flags; /* flags on request, see below */ int r_lflags; /* flags protected by list mutex, see below */ int r_refs; /* # outstanding references */ uint8_t r_delay; /* delay to use for jukebox error */ - uint8_t r_retry; /* max retransmission count */ + uint32_t r_retry; /* max retransmission count */ uint8_t r_rexmit; /* current retrans count */ int r_rtt; /* RTT for rpc */ thread_t r_thread; /* thread that did I/O system call */ @@ -942,7 +974,7 @@ struct nfsreq { time_t r_resendtime; /* time of next jukebox error resend */ struct nfs_gss_clnt_ctx *r_gss_ctx; /* RPCSEC_GSS context */ SLIST_HEAD(, gss_seq) r_gss_seqlist; /* RPCSEC_GSS sequence numbers */ - uint32_t r_gss_argoff; /* RPCSEC_GSS offset to args */ + size_t r_gss_argoff; /* RPCSEC_GSS offset to args */ uint32_t r_gss_arglen; /* RPCSEC_GSS arg length */ uint32_t r_auth; /* security flavor request sent with */ uint32_t *r_wrongsec; /* wrongsec: other flavors to try */ @@ -993,7 +1025,7 @@ extern lck_grp_t *nfs_request_grp; #define RL_WAITING 0x0002 /* Someone waiting for lock. */ #define RL_QUEUED 0x0004 /* request is on the queue */ -extern u_int32_t nfs_xid, nfs_xidwrap; +extern u_int64_t nfs_xid, nfs_xidwrap; extern int nfs_iosize, nfs_allow_async, nfs_statfs_rate_limit; extern int nfs_access_cache_timeout, nfs_access_delete, nfs_access_dotzfs, nfs_access_for_getattr; extern int nfs_lockd_mounts, nfs_lockd_request_sent; @@ -1001,6 +1033,7 @@ extern int nfs_tprintf_initial_delay, nfs_tprintf_delay; extern int nfsiod_thread_count, nfsiod_thread_max, nfs_max_async_writes; extern int nfs_idmap_ctrl, nfs_callback_port; extern int nfs_is_mobile, nfs_readlink_nocache, nfs_root_steals_ctx; +extern uint32_t nfs_tcp_sockbuf; extern uint32_t nfs_squishy_flags; extern uint32_t nfs_debug_ctl; @@ -1040,13 +1073,14 @@ struct nfsrv_sock { mbuf_t ns_frag; int ns_flag; int ns_sotype; - int ns_cc; - int ns_reclen; + size_t ns_cc; + size_t ns_reclen; int ns_reccnt; - u_int32_t ns_sref; + int ns_sobufsize; + u_int32_t ns_sref; time_t ns_timestamp; /* socket timestamp */ lck_mtx_t ns_wgmutex; /* mutex for write gather fields */ - u_quad_t ns_wgtime; /* next Write deadline (usec) */ + time_t ns_wgtime; /* next Write deadline (usec) */ LIST_HEAD(, nfsrv_descript) ns_tq; /* Write gather lists */ LIST_HEAD(nfsrv_wg_delayhash, nfsrv_descript) ns_wdelayhashtbl[NFS_WDELAYHASHSIZ]; }; @@ -1104,7 +1138,7 @@ struct nfsd { * Some fields are used only when write request gathering is performed. */ struct nfsrv_descript { - u_quad_t nd_time; /* Write deadline (usec) */ + time_t nd_time; /* Write deadline (usec) */ off_t nd_off; /* Start byte offset */ off_t nd_eoff; /* and end byte offset */ LIST_ENTRY(nfsrv_descript) nd_hash; /* Hash list */ @@ -1119,7 +1153,7 @@ struct nfsrv_descript { int nd_vers; /* NFS version */ int nd_len; /* Length of this write */ int nd_repstat; /* Reply status */ - u_int32_t nd_retxid; /* Reply xid */ + u_int32_t nd_retxid; /* Reply xid */ struct timeval nd_starttime; /* Time RPC initiated */ struct nfs_filehandle nd_fh; /* File handle */ uint32_t nd_sec; /* Security flavor */ @@ -1211,7 +1245,7 @@ int nfs_rpc_record_read(socket_t, struct nfs_rpc_record_state *, int, int *, int nfs_getattr(nfsnode_t, struct nfs_vattr *, vfs_context_t, int); int nfs_getattrcache(nfsnode_t, struct nfs_vattr *, int); int nfs_loadattrcache(nfsnode_t, struct nfs_vattr *, u_int64_t *, int); -int nfs_attrcachetimeout(nfsnode_t); +long nfs_attrcachetimeout(nfsnode_t); int nfs_buf_page_inval(vnode_t vp, off_t offset); int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int); @@ -1250,8 +1284,8 @@ int nfs_portmap_lookup(struct nfsmount *, vfs_context_t, struct sockaddr *, void nfs_location_next(struct nfs_fs_locations *, struct nfs_location_index *); int nfs_location_index_cmp(struct nfs_location_index *, struct nfs_location_index *); -void nfs_location_mntfromname(struct nfs_fs_locations *, struct nfs_location_index, char *, int, int); -int nfs_socket_create(struct nfsmount *, struct sockaddr *, int, in_port_t, uint32_t, uint32_t, int, struct nfs_socket **); +void nfs_location_mntfromname(struct nfs_fs_locations *, struct nfs_location_index, char *, size_t, int); +int nfs_socket_create(struct nfsmount *, struct sockaddr *, uint8_t, in_port_t, uint32_t, uint32_t, int, struct nfs_socket **); void nfs_socket_destroy(struct nfs_socket *); void nfs_socket_options(struct nfsmount *, struct nfs_socket *); void nfs_connect_upcall(socket_t, void *, int); @@ -1265,7 +1299,7 @@ int nfs_lookitup(nfsnode_t, char *, int, vfs_context_t, nfsnode_t *); void nfs_dulookup_init(struct nfs_dulookup *, nfsnode_t, const char *, int, vfs_context_t); void nfs_dulookup_start(struct nfs_dulookup *, nfsnode_t, vfs_context_t); void nfs_dulookup_finish(struct nfs_dulookup *, nfsnode_t, vfs_context_t); -int nfs_dir_buf_cache_lookup(nfsnode_t, nfsnode_t *, struct componentname *, vfs_context_t, int); +int nfs_dir_buf_cache_lookup(nfsnode_t, nfsnode_t *, struct componentname *, vfs_context_t, int, int *); int nfs_dir_buf_search(struct nfsbuf *, struct componentname *, fhandle_t *, struct nfs_vattr *, uint64_t *, time_t *, daddr64_t *, int); void nfs_name_cache_purge(nfsnode_t, nfsnode_t, struct componentname *, vfs_context_t); @@ -1287,6 +1321,7 @@ int nfs_parsefattr(struct nfsmount *nmp, struct nfsm_chain *, int, void nfs_vattr_set_supported(uint32_t *, struct vnode_attr *); void nfs_vattr_set_bitmap(struct nfsmount *, uint32_t *, struct vnode_attr *); void nfs3_pathconf_cache(struct nfsmount *, struct nfs_fsattr *); +int nfs3_check_lockmode(struct nfsmount *, struct sockaddr *, int, int); int nfs3_mount_rpc(struct nfsmount *, struct sockaddr *, int, int, char *, vfs_context_t, int, fhandle_t *, struct nfs_sec *); void nfs3_umount_rpc(struct nfsmount *, vfs_context_t, int); void nfs_rdirplus_update_node_attrs(nfsnode_t, struct direntry *, fhandle_t *, struct nfs_vattr *, uint64_t *); @@ -1309,7 +1344,7 @@ void nfs_open_file_destroy(struct nfs_open_file *); int nfs_open_file_set_busy(struct nfs_open_file *, thread_t); void nfs_open_file_clear_busy(struct nfs_open_file *); void nfs_open_file_add_open(struct nfs_open_file *, uint32_t, uint32_t, int); -void nfs_open_file_remove_open_find(struct nfs_open_file *, uint32_t, uint32_t, uint32_t *, uint32_t *, int*); +void nfs_open_file_remove_open_find(struct nfs_open_file *, uint32_t, uint32_t, uint8_t *, uint8_t *, int *); void nfs_open_file_remove_open(struct nfs_open_file *, uint32_t, uint32_t); void nfs_get_stateid(nfsnode_t, thread_t, kauth_cred_t, nfs_stateid *); int nfs_check_for_locks(struct nfs_open_owner *, struct nfs_open_file *); @@ -1374,6 +1409,7 @@ int nfs_vnop_open(struct vnop_open_args *); int nfs_vnop_close(struct vnop_close_args *); int nfs_vnop_advlock(struct vnop_advlock_args *); int nfs_vnop_mmap(struct vnop_mmap_args *); +int nfs_vnop_mmap_check(struct vnop_mmap_check_args *ap); int nfs_vnop_mnomap(struct vnop_mnomap_args *); #if CONFIG_NFS4 @@ -1403,7 +1439,7 @@ int nfs4_read_rpc_async_finish(nfsnode_t, struct nfsreq *, uio_t, size_t *, int nfs4_write_rpc_async(nfsnode_t, uio_t, size_t, thread_t, kauth_cred_t, int, struct nfsreq_cbinfo *, struct nfsreq **); int nfs4_write_rpc_async_finish(nfsnode_t, struct nfsreq *, int *, size_t *, uint64_t *); int nfs4_readdir_rpc(nfsnode_t, struct nfsbuf *, vfs_context_t); -int nfs4_readlink_rpc(nfsnode_t, char *, uint32_t *, vfs_context_t); +int nfs4_readlink_rpc(nfsnode_t, char *, size_t *, vfs_context_t); int nfs4_commit_rpc(nfsnode_t, uint64_t, uint64_t, kauth_cred_t, uint64_t); int nfs4_lookup_rpc_async(nfsnode_t, char *, int, vfs_context_t, struct nfsreq **); int nfs4_lookup_rpc_async_finish(nfsnode_t, char *, int, vfs_context_t, struct nfsreq *, u_int64_t *, fhandle_t *, struct nfs_vattr *); @@ -1427,7 +1463,7 @@ int nfs3_read_rpc_async_finish(nfsnode_t, struct nfsreq *, uio_t, size_t *, int nfs3_write_rpc_async(nfsnode_t, uio_t, size_t, thread_t, kauth_cred_t, int, struct nfsreq_cbinfo *, struct nfsreq **); int nfs3_write_rpc_async_finish(nfsnode_t, struct nfsreq *, int *, size_t *, uint64_t *); int nfs3_readdir_rpc(nfsnode_t, struct nfsbuf *, vfs_context_t); -int nfs3_readlink_rpc(nfsnode_t, char *, uint32_t *, vfs_context_t); +int nfs3_readlink_rpc(nfsnode_t, char *, size_t *, vfs_context_t); int nfs3_commit_rpc(nfsnode_t, uint64_t, uint64_t, kauth_cred_t, uint64_t); int nfs3_lookup_rpc_async(nfsnode_t, char *, int, vfs_context_t, struct nfsreq **); int nfs3_lookup_rpc_async_finish(nfsnode_t, char *, int, vfs_context_t, struct nfsreq *, u_int64_t *, fhandle_t *, struct nfs_vattr *); @@ -1499,7 +1535,7 @@ int nfsrv_statfs(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t int nfsrv_symlink(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); int nfsrv_write(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -void nfs_interval_timer_start(thread_call_t, int); +void nfs_interval_timer_start(thread_call_t, time_t); int nfs_use_cache(struct nfsmount *); void nfs_up(struct nfsmount *, thread_t, int, const char *); void nfs_down(struct nfsmount *, thread_t, int, int, const char *, int); @@ -1547,6 +1583,19 @@ void nfs_printf(unsigned int, unsigned int, const char *, ...) __printflike(3, 4 void nfs_dump_mbuf(const char *, int, const char *, mbuf_t); int nfs_mountopts(struct nfsmount *, char *, int); +#if XNU_KERNEL_PRIVATE +#include + +ZONE_VIEW_DECLARE(ZV_NFSDIROFF); +extern zone_t nfs_buf_zone; +extern zone_t nfsrv_descript_zone; +extern zone_t nfsnode_zone; +extern zone_t nfs_fhandle_zone; +extern zone_t nfs_req_zone; +extern zone_t nfsrv_descript_zone; +extern zone_t nfsmnt_zone; + +#endif /* XNU_KERNEL_PRIVATE */ __END_DECLS #endif /* KERNEL */ diff --git a/bsd/nfs/nfs4_subs.c b/bsd/nfs/nfs4_subs.c index 4d15ab0e4..22627a247 100644 --- a/bsd/nfs/nfs4_subs.c +++ b/bsd/nfs/nfs4_subs.c @@ -115,7 +115,8 @@ nfs4_init_clientid(struct nfsmount *nmp) { struct nfs_client_id *ncip, *ncip2; struct sockaddr *saddr; - int error, len, len2, cmp; + int error, cmp; + long len, len2; struct vfsstatfs *vsfs; static uint8_t en0addr[6]; @@ -160,10 +161,10 @@ nfs4_init_clientid(struct nfsmount *nmp) *(uint32_t*)ncip->nci_id = 0; len = sizeof(uint32_t); - len2 = min(sizeof(en0addr), ncip->nci_idlen - len); + len2 = lmin(sizeof(en0addr), ncip->nci_idlen - len); bcopy(en0addr, &ncip->nci_id[len], len2); len += sizeof(en0addr); - len2 = min(saddr->sa_len, ncip->nci_idlen - len); + len2 = lmin(saddr->sa_len, ncip->nci_idlen - len); bcopy(saddr, &ncip->nci_id[len], len2); len += len2; if (len < ncip->nci_idlen) { @@ -241,7 +242,7 @@ nfs4_setclientid(struct nfsmount *nmp) char raddr[MAX_IPv6_STR_LEN]; char uaddr[MAX_IPv6_STR_LEN + 16]; int ualen = 0; - in_port_t port; + in_port_t port = 0; thd = current_thread(); cred = IS_VALID_CRED(nmp->nm_mcred) ? nmp->nm_mcred : vfs_context_ucred(vfs_context_kernel()); @@ -274,6 +275,9 @@ nfs4_setclientid(struct nfsmount *nmp) } else if (ss.ss_family == AF_INET6) { sinaddr = &((struct sockaddr_in6*)&ss)->sin6_addr; port = nfs4_cb_port6; + } else { + error = EINVAL; + nfsmout_if(error); } if (sinaddr && port && (inet_ntop(ss.ss_family, sinaddr, raddr, sizeof(raddr)) == raddr)) { /* assemble r_addr = universal address (nmp->nm_nso->nso_so source IP addr + port) */ @@ -482,12 +486,13 @@ out: int nfs4_secinfo_rpc(struct nfsmount *nmp, struct nfsreq_secinfo_args *siap, kauth_cred_t cred, uint32_t *sec, int *seccountp) { - int error = 0, status, nfsvers, numops, namelen, fhsize; + int error = 0, status, nfsvers, numops, fhsize; vnode_t dvp = NULLVP; nfsnode_t np, dnp; u_char *fhp; const char *vname = NULL, *name; uint64_t xid; + size_t namelen; struct nfsm_chain nmreq, nmrep; *seccountp = 0; @@ -700,7 +705,7 @@ nfs4_get_fs_locations( { int error = 0, numops, status; uint32_t bitmap[NFS_ATTR_BITMAP_LEN]; - struct nfsreq rq, *req = &rq; + struct nfsreq *req; struct nfsreq_secinfo_args si; struct nfsm_chain nmreq, nmrep; uint64_t xid; @@ -713,6 +718,7 @@ nfs4_get_fs_locations( return EINVAL; } + req = zalloc(nfs_req_zone); nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); @@ -747,6 +753,7 @@ nfs4_get_fs_locations( nfsmout_if(error); error = nfs4_parsefattr(&nmrep, NULL, NULL, NULL, NULL, nfslsp); nfsmout: + NFS_ZFREE(nfs_req_zone, req); nfsm_chain_cleanup(&nmrep); nfsm_chain_cleanup(&nmreq); return error; @@ -1255,7 +1262,7 @@ nfs4_map_domain(char *id, char **atp) otw_domain_len = strnlen(otw_nfs4domain, MAXPATHLEN); otw_id_2_at_len = at - id + 1; - MALLOC_ZONE(dsnode, char*, MAXPATHLEN, M_NAMEI, M_WAITOK); + dsnode = zalloc(ZV_NAMEI); /* first try to map nfs4 domain to dsnode for scoped lookups */ error = kauth_cred_nfs4domain2dsnode(otw_nfs4domain, dsnode); if (!error) { @@ -1281,7 +1288,7 @@ nfs4_map_domain(char *id, char **atp) *at = '\0'; } } - FREE_ZONE(dsnode, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, dsnode); if (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_SUCCESSFUL_MAPPINGS) { printf("nfs4_id2guid: after domain mapping id is %s\n", id); @@ -1496,7 +1503,7 @@ nfs4_addv4domain(char *id, size_t *idlen) size_t domain_len; char *mapped_domain; - MALLOC_ZONE(nfs4domain, char*, MAXPATHLEN, M_NAMEI, M_WAITOK); + nfs4domain = zalloc(ZV_NAMEI); error = kauth_cred_dsnode2nfs4domain(dsnode, nfs4domain); if (!error) { domain_len = strnlen(nfs4domain, MAXPATHLEN); @@ -1517,7 +1524,7 @@ nfs4_addv4domain(char *id, size_t *idlen) error = ENOSPC; } } - FREE_ZONE(nfs4domain, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, nfs4domain); } else if (at == NULL) { /* * If we didn't find an 'at' then cp points to the end of id passed in. @@ -1622,7 +1629,7 @@ nfs4_guid2id(guid_t *guidp, char *id, size_t *idlen, int isgroup) */ if (*idlen < MAXPATHLEN) { - MALLOC_ZONE(id1buf, char*, MAXPATHLEN, M_NAMEI, M_WAITOK); + id1buf = zalloc(ZV_NAMEI); id1 = id1buf; id1len = MAXPATHLEN; } else { @@ -1680,7 +1687,7 @@ nfs4_guid2id(guid_t *guidp, char *id, size_t *idlen, int isgroup) nfs4_mapguid_log(error, "End of routine", guidp, isgroup, id1); if (id1buf) { - FREE_ZONE(id1buf, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, id1buf); } return error; @@ -1772,7 +1779,7 @@ nfs4_parsefattr( size_t slen; char sbuf[64], *s; struct nfs_fsattr nfsa_dummy; - struct nfs_vattr nva_dummy; + struct nfs_vattr *nva_dummy = NULL; struct dqblk dqb_dummy; kauth_acl_t acl = NULL; uint32_t ace_type, ace_flags, ace_mask; @@ -1784,7 +1791,8 @@ nfs4_parsefattr( nfsap = &nfsa_dummy; } if (!nvap) { - nvap = &nva_dummy; + MALLOC(nva_dummy, struct nfs_vattr *, sizeof(*nva_dummy), M_TEMP, M_WAITOK); + nvap = nva_dummy; } if (!dqbp) { dqbp = &dqb_dummy; @@ -1954,7 +1962,7 @@ nfs4_parsefattr( nfsm_assert(error, (attrbytes >= 0), EBADRPC); } nfsmout_if(error); - if ((nvap != &nva_dummy) && !error2) { + if ((nvap != nva_dummy) && !error2) { nvap->nva_acl = acl; acl = NULL; } @@ -2265,7 +2273,12 @@ nfs4_parsefattr( attrbytes -= NFSX_UNSIGNED + nfsm_rndup(val); } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) { - nfsm_chain_get_32(error, nmc, nvap->nva_mode); + nfsm_chain_get_32(error, nmc, val); + if (val > ALLPERMS) { + error = EBADRPC; + } else { + nvap->nva_mode = (mode_t)val; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_NO_TRUNC)) { @@ -2465,6 +2478,7 @@ nfsmout: kauth_acl_free(nvap->nva_acl); nvap->nva_acl = NULL; } + FREE(nva_dummy, M_TEMP); return error; } diff --git a/bsd/nfs/nfs4_vnops.c b/bsd/nfs/nfs4_vnops.c index f6619bfe2..4e1c7641f 100644 --- a/bsd/nfs/nfs4_vnops.c +++ b/bsd/nfs/nfs4_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006-2019 Apple Inc. All rights reserved. + * Copyright (c) 2006-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -273,11 +273,11 @@ nfsmout: } int -nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx) +nfs4_readlink_rpc(nfsnode_t np, char *buf, size_t *buflenp, vfs_context_t ctx) { struct nfsmount *nmp; int error = 0, lockerror = ENOENT, status, numops; - uint32_t len = 0; + size_t len = 0; u_int64_t xid; struct nfsm_chain nmreq, nmrep; struct nfsreq_secinfo_args si; @@ -369,8 +369,8 @@ nfs4_read_rpc_async( NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); - // PUTFH, READ, GETATTR - numops = 3; + // PUTFH, READ + numops = 2; nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED); nfsm_chain_add_compound_header(error, &nmreq, "read", nmp->nm_minor_vers, numops); numops--; @@ -382,9 +382,6 @@ nfs4_read_rpc_async( nfsm_chain_add_stateid(error, &nmreq, &stateid); nfsm_chain_add_64(error, &nmreq, offset); nfsm_chain_add_32(error, &nmreq, len); - numops--; - nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR); - nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np); nfsm_chain_build_done(error, &nmreq); nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); @@ -435,8 +432,6 @@ nfs4_read_rpc_async_finish( *lenp = MIN(retlen, *lenp); error = nfsm_chain_get_uio(&nmrep, *lenp, uio); } - nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); - nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); if (!lockerror) { nfs_node_unlock(np); } @@ -508,7 +503,7 @@ nfs4_write_rpc_async( } numops--; nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR); - nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np); + nfsm_chain_add_bitmap_supported(error, &nmreq, nfs4_getattr_write_bitmap, nmp, np); nfsm_chain_build_done(error, &nmreq); nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); @@ -580,7 +575,15 @@ nfs4_write_rpc_async_finish( } lck_mtx_unlock(&nmp->nm_lock); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); + + /* + * NFSv4 WRITE RPCs contain partial GETATTR requests - only type, change, size, metadatatime and modifytime are requested. + * In such cases, we do not update the time stamp - but the requested attributes. + */ + np->n_vattr.nva_flags |= NFS_FFLAG_PARTIAL_WRITE; nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); + np->n_vattr.nva_flags &= ~NFS_FFLAG_PARTIAL_WRITE; + nfsmout: if (!lockerror) { nfs_node_unlock(np); @@ -781,15 +784,16 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) struct nfsmount *nmp; int error = 0, lockerror, nfsvers, namedattr, rdirplus, bigcookies, numops; int i, status, more_entries = 1, eof, bp_dropped = 0; + uint16_t namlen, reclen; uint32_t nmreaddirsize, nmrsize; - uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed; - uint64_t cookie, lastcookie, xid, savedxid; + uint32_t namlen32, skiplen, fhlen, xlen, attrlen; + uint64_t padlen, cookie, lastcookie, xid, savedxid, space_free, space_needed; struct nfsm_chain nmreq, nmrep, nmrepsave; - fhandle_t fh; - struct nfs_vattr nvattr, *nvattrp; + fhandle_t *fh; + struct nfs_vattr *nvattr, *nvattrp; struct nfs_dir_buf_header *ndbhp; struct direntry *dp; - char *padstart, padlen; + char *padstart; const char *tag; uint32_t entry_attrs[NFS_ATTR_BITMAP_LEN]; struct timeval now; @@ -833,6 +837,9 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) return lockerror; } + fh = zalloc(nfs_fhandle_zone); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + /* determine cookie to use, and move dp to the right offset */ ndbhp = (struct nfs_dir_buf_header*)bp->nb_data; dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp); @@ -856,12 +863,12 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) */ if (((bp->nb_lblkno == 0) && (ndbhp->ndbh_count == 0)) && !(dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) { - fh.fh_len = 0; - fhlen = rdirplus ? fh.fh_len + 1 : 0; + fh->fh_len = 0; + fhlen = rdirplus ? fh->fh_len + 1 : 0; xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0; /* "." */ namlen = 1; - reclen = NFS_DIRENTRY_LEN(namlen + xlen); + reclen = NFS_DIRENTRY_LEN_16(namlen + xlen); if (xlen) { bzero(&dp->d_name[namlen + 1], xlen); } @@ -883,7 +890,7 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) /* ".." */ namlen = 2; - reclen = NFS_DIRENTRY_LEN(namlen + xlen); + reclen = NFS_DIRENTRY_LEN_16(namlen + xlen); if (xlen) { bzero(&dp->d_name[namlen + 1], xlen); } @@ -974,7 +981,12 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) while (more_entries) { /* Entry: COOKIE, NAME, FATTR */ nfsm_chain_get_64(error, &nmrep, cookie); - nfsm_chain_get_32(error, &nmrep, namlen); + nfsm_chain_get_32(error, &nmrep, namlen32); + if (namlen32 > UINT16_MAX) { + error = EBADRPC; + goto nfsmout; + } + namlen = (uint16_t)namlen32; nfsmout_if(error); if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) { /* we've got a big cookie, make sure flag is set */ @@ -998,7 +1010,7 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0; xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0; attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0; - reclen = NFS_DIRENTRY_LEN(namlen + xlen); + reclen = NFS_DIRENTRY_LEN_16(namlen + xlen); space_needed = reclen + attrlen; space_free = nfs_dir_buf_freespace(bp, rdirplus); if (space_needed > space_free) { @@ -1041,8 +1053,8 @@ nextbuffer: nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen)); } nfsmout_if(error); - nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr; - error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL); + nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : nvattr; + error = nfs4_parsefattr(&nmrep, NULL, nvattrp, fh, NULL, NULL); if (!error && NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_ACL)) { /* we do NOT want ACLs returned to us here */ NFS_BITMAP_CLR(nvattrp->nva_bitmap, NFS_FATTR_ACL); @@ -1055,7 +1067,7 @@ nextbuffer: /* OK, we may not have gotten all of the attributes but we will use what we can. */ if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) { /* set this up to look like a referral trigger */ - nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, &fh); + nfs4_default_attrs_for_referral_trigger(dnp, dp->d_name, namlen, nvattrp, fh); } error = 0; } @@ -1080,10 +1092,11 @@ nextbuffer: if (rdirplus) { /* fileid is already in d_fileno, so stash xid in attrs */ nvattrp->nva_fileid = savedxid; + nvattrp->nva_flags |= NFS_FFLAG_FILEID_CONTAINS_XID; if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEHANDLE)) { - fhlen = fh.fh_len + 1; + fhlen = fh->fh_len + 1; xlen = fhlen + sizeof(time_t); - reclen = NFS_DIRENTRY_LEN(namlen + xlen); + reclen = NFS_DIRENTRY_LEN_16(namlen + xlen); space_needed = reclen + attrlen; if (space_needed > space_free) { /* didn't actually have the room... move on to next buffer */ @@ -1091,19 +1104,19 @@ nextbuffer: goto nextbuffer; } /* pack the file handle into the record */ - dp->d_name[dp->d_namlen + 1] = fh.fh_len; - bcopy(fh.fh_data, &dp->d_name[dp->d_namlen + 2], fh.fh_len); + dp->d_name[dp->d_namlen + 1] = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */ + bcopy(fh->fh_data, &dp->d_name[dp->d_namlen + 2], fh->fh_len); } else { /* mark the file handle invalid */ - fh.fh_len = 0; - fhlen = fh.fh_len + 1; + fh->fh_len = 0; + fhlen = fh->fh_len + 1; xlen = fhlen + sizeof(time_t); - reclen = NFS_DIRENTRY_LEN(namlen + xlen); + reclen = NFS_DIRENTRY_LEN_16(namlen + xlen); bzero(&dp->d_name[dp->d_namlen + 1], fhlen); } *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec; dp->d_reclen = reclen; - nfs_rdirplus_update_node_attrs(dnp, dp, &fh, nvattrp, &savedxid); + nfs_rdirplus_update_node_attrs(dnp, dp, fh, nvattrp, &savedxid); } padstart = dp->d_name + dp->d_namlen + 1 + xlen; ndbhp->ndbh_count++; @@ -1153,6 +1166,8 @@ nfsmout: } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); + NFS_ZFREE(nfs_fhandle_zone, fh); + FREE(nvattr, M_TEMP); return bp_dropped ? NFSERR_DIRBUFDROPPED : error; } @@ -1348,12 +1363,7 @@ nfs4_commit_rpc( return 0; } nfsvers = nmp->nm_vers; - - if (count > UINT32_MAX) { - count32 = 0; - } else { - count32 = count; - } + count32 = count > UINT32_MAX ? 0 : (uint32_t)count; NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -1418,7 +1428,7 @@ nfs4_pathconf_rpc( struct nfsm_chain nmreq, nmrep; struct nfsmount *nmp = NFSTONMP(np); uint32_t bitmap[NFS_ATTR_BITMAP_LEN]; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; struct nfsreq_secinfo_args si; if (nfs_mount_gone(nmp)) { @@ -1430,7 +1440,8 @@ nfs4_pathconf_rpc( } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); - NVATTR_INIT(&nvattr); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + NVATTR_INIT(nvattr); nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); @@ -1462,19 +1473,20 @@ nfs4_pathconf_rpc( nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsmout_if(error); - error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL); + error = nfs4_parsefattr(&nmrep, nfsap, nvattr, NULL, NULL, NULL); nfsmout_if(error); if ((lockerror = nfs_node_lock(np))) { error = lockerror; } if (!error) { - nfs_loadattrcache(np, &nvattr, &xid, 0); + nfs_loadattrcache(np, nvattr, &xid, 0); } if (!lockerror) { nfs_node_unlock(np); } nfsmout: - NVATTR_CLEANUP(&nvattr); + NVATTR_CLEANUP(nvattr); + FREE(nvattr, M_TEMP); nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); return error; @@ -1491,7 +1503,7 @@ nfs4_vnop_getattr( { struct vnode_attr *vap = ap->a_vap; struct nfsmount *nmp; - struct nfs_vattr nva; + struct nfs_vattr *nva; int error, acls, ngaflags; nmp = VTONMP(ap->a_vp); @@ -1504,105 +1516,108 @@ nfs4_vnop_getattr( if (VATTR_IS_ACTIVE(vap, va_acl) && acls) { ngaflags |= NGA_ACL; } - error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags); + MALLOC(nva, struct nfs_vattr *, sizeof(*nva), M_TEMP, M_WAITOK); + error = nfs_getattr(VTONFS(ap->a_vp), nva, ap->a_context, ngaflags); if (error) { - return error; + goto out; } /* copy what we have in nva to *a_vap */ - if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) { - dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2); + if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_RAWDEV)) { + dev_t rdev = makedev(nva->nva_rawdev.specdata1, nva->nva_rawdev.specdata2); VATTR_RETURN(vap, va_rdev, rdev); } - if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS)) { - VATTR_RETURN(vap, va_nlink, nva.nva_nlink); + if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_NUMLINKS)) { + VATTR_RETURN(vap, va_nlink, nva->nva_nlink); } - if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE)) { - VATTR_RETURN(vap, va_data_size, nva.nva_size); + if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SIZE)) { + VATTR_RETURN(vap, va_data_size, nva->nva_size); } // VATTR_RETURN(vap, va_data_alloc, ???); // VATTR_RETURN(vap, va_total_size, ???); - if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED)) { - VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes); + if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_SPACE_USED)) { + VATTR_RETURN(vap, va_total_alloc, nva->nva_bytes); } - if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) { - VATTR_RETURN(vap, va_uid, nva.nva_uid); + if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) { + VATTR_RETURN(vap, va_uid, nva->nva_uid); } - if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) { - VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid); + if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER)) { + VATTR_RETURN(vap, va_uuuid, nva->nva_uuuid); } - if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) { - VATTR_RETURN(vap, va_gid, nva.nva_gid); + if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) { + VATTR_RETURN(vap, va_gid, nva->nva_gid); } - if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) { - VATTR_RETURN(vap, va_guuid, nva.nva_guuid); + if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_OWNER_GROUP)) { + VATTR_RETURN(vap, va_guuid, nva->nva_guuid); } if (VATTR_IS_ACTIVE(vap, va_mode)) { - if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE)) { - VATTR_RETURN(vap, va_mode, 0777); + if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_MODE)) { + VATTR_RETURN(vap, va_mode, ACCESSPERMS); } else { - VATTR_RETURN(vap, va_mode, nva.nva_mode); + VATTR_RETURN(vap, va_mode, nva->nva_mode); } } if (VATTR_IS_ACTIVE(vap, va_flags) && - (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) || - NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) || - (nva.nva_flags & NFS_FFLAG_TRIGGER))) { + (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) || + NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) || + (nva->nva_flags & NFS_FFLAG_TRIGGER))) { uint32_t flags = 0; - if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) && - (nva.nva_flags & NFS_FFLAG_ARCHIVED)) { + if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_ARCHIVE) && + (nva->nva_flags & NFS_FFLAG_ARCHIVED)) { flags |= SF_ARCHIVED; } - if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) && - (nva.nva_flags & NFS_FFLAG_HIDDEN)) { + if (NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_HIDDEN) && + (nva->nva_flags & NFS_FFLAG_HIDDEN)) { flags |= UF_HIDDEN; } VATTR_RETURN(vap, va_flags, flags); } - if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) { - vap->va_create_time.tv_sec = nva.nva_timesec[NFSTIME_CREATE]; - vap->va_create_time.tv_nsec = nva.nva_timensec[NFSTIME_CREATE]; + if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_CREATE)) { + vap->va_create_time.tv_sec = nva->nva_timesec[NFSTIME_CREATE]; + vap->va_create_time.tv_nsec = nva->nva_timensec[NFSTIME_CREATE]; VATTR_SET_SUPPORTED(vap, va_create_time); } - if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_ACCESS)) { - vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS]; - vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS]; + if (VATTR_IS_ACTIVE(vap, va_access_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_ACCESS)) { + vap->va_access_time.tv_sec = nva->nva_timesec[NFSTIME_ACCESS]; + vap->va_access_time.tv_nsec = nva->nva_timensec[NFSTIME_ACCESS]; VATTR_SET_SUPPORTED(vap, va_access_time); } - if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_MODIFY)) { - vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY]; - vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY]; + if (VATTR_IS_ACTIVE(vap, va_modify_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_MODIFY)) { + vap->va_modify_time.tv_sec = nva->nva_timesec[NFSTIME_MODIFY]; + vap->va_modify_time.tv_nsec = nva->nva_timensec[NFSTIME_MODIFY]; VATTR_SET_SUPPORTED(vap, va_modify_time); } - if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_METADATA)) { - vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE]; - vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE]; + if (VATTR_IS_ACTIVE(vap, va_change_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_METADATA)) { + vap->va_change_time.tv_sec = nva->nva_timesec[NFSTIME_CHANGE]; + vap->va_change_time.tv_nsec = nva->nva_timensec[NFSTIME_CHANGE]; VATTR_SET_SUPPORTED(vap, va_change_time); } - if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_BACKUP)) { - vap->va_backup_time.tv_sec = nva.nva_timesec[NFSTIME_BACKUP]; - vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP]; + if (VATTR_IS_ACTIVE(vap, va_backup_time) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TIME_BACKUP)) { + vap->va_backup_time.tv_sec = nva->nva_timesec[NFSTIME_BACKUP]; + vap->va_backup_time.tv_nsec = nva->nva_timensec[NFSTIME_BACKUP]; VATTR_SET_SUPPORTED(vap, va_backup_time); } - if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID)) { - VATTR_RETURN(vap, va_fileid, nva.nva_fileid); + if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_FILEID)) { + VATTR_RETURN(vap, va_fileid, nva->nva_fileid); } - if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE)) { - VATTR_RETURN(vap, va_type, nva.nva_type); + if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_TYPE)) { + VATTR_RETURN(vap, va_type, nva->nva_type); } - if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE)) { - VATTR_RETURN(vap, va_filerev, nva.nva_change); + if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva->nva_bitmap, NFS_FATTR_CHANGE)) { + VATTR_RETURN(vap, va_filerev, nva->nva_change); } if (VATTR_IS_ACTIVE(vap, va_acl) && acls) { - VATTR_RETURN(vap, va_acl, nva.nva_acl); - nva.nva_acl = NULL; + VATTR_RETURN(vap, va_acl, nva->nva_acl); + nva->nva_acl = NULL; } // other attrs we might support someday: // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */); - NVATTR_CLEANUP(&nva); + NVATTR_CLEANUP(nva); +out: + FREE(nva, M_TEMP); return error; } @@ -1836,7 +1851,7 @@ nfs_mount_state_in_use_end(struct nfsmount *nmp, int error) int restart = nfs_mount_state_error_should_restart(error); if (nfs_mount_gone(nmp)) { - return restart; + return ENXIO; } lck_mtx_lock(&nmp->nm_lock); if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) { @@ -2393,8 +2408,8 @@ nfs_open_file_remove_open_find( struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode, - uint32_t *newAccessMode, - uint32_t *newDenyMode, + uint8_t *newAccessMode, + uint8_t *newDenyMode, int *delegated) { /* @@ -2482,7 +2497,7 @@ nfs_open_file_remove_open_find( void nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t denyMode) { - uint32_t newAccessMode, newDenyMode; + uint8_t newAccessMode, newDenyMode; int delegated = 0; lck_mtx_lock(&nofp->nof_lock); @@ -2803,7 +2818,7 @@ nfs4_open( vnode_t dvp = NULL; struct componentname cn; const char *vname = NULL; - size_t namelen; + uint32_t namelen; char smallname[128]; char *filename = NULL; int error = 0, readtoo = 0; @@ -2909,7 +2924,8 @@ nfs_vnop_mmap( vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; nfsnode_t np = VTONFS(vp); - int error = 0, accessMode, denyMode, delegated; + int error = 0, delegated = 0; + uint8_t accessMode, denyMode; struct nfsmount *nmp; struct nfs_open_owner *noop = NULL; struct nfs_open_file *nofp = NULL; @@ -2962,10 +2978,10 @@ restart: } #if CONFIG_NFS4 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) { - nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(nofp, NULL); nofp = NULL; if (!error) { + nfs_mount_state_in_use_end(nmp, 0); goto restart; } } @@ -3163,6 +3179,42 @@ out: return error; } +int +nfs_vnop_mmap_check( + struct vnop_mmap_check_args /* { + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * int a_flags; + * vfs_context_t a_context; + * } */*ap) +{ + vfs_context_t ctx = ap->a_context; + vnode_t vp = ap->a_vp; + struct nfsmount *nmp = VTONMP(vp); + struct vnop_access_args naa; + int error = 0; + + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + + if (vnode_isreg(vp)) { + /* + * We only need to ensure that a page-in will be + * possible with these credentials. Everything + * else has been checked at other layers. + */ + naa.a_desc = &vnop_access_desc; + naa.a_vp = vp; + naa.a_action = KAUTH_VNODE_READ_DATA; + naa.a_context = ctx; + + /* compute actual success/failure based on accessibility */ + error = nfs_vnop_access(&naa); + } + + return error; +} int nfs_vnop_mnomap( @@ -3222,9 +3274,9 @@ loop: lck_mtx_unlock(&np->n_openlock); #if CONFIG_NFS4 if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) { - nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(nofp, NULL); if (!error) { + nfs_mount_state_in_use_end(nmp, 0); goto loop; } } @@ -4161,8 +4213,10 @@ restart: /* release any currently held shared lock before sleeping */ nfs_open_state_clear_busy(np); busy = 0; - nfs_mount_state_in_use_end(nmp, 0); - inuse = 0; + if (inuse) { + nfs_mount_state_in_use_end(nmp, 0); + inuse = 0; + } error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx); flocknflp = NULL; if (!error2) { @@ -4197,8 +4251,10 @@ restart: /* looks like we have a recover pending... restart */ nfs_open_state_clear_busy(np); busy = 0; - nfs_mount_state_in_use_end(nmp, 0); - inuse = 0; + if (inuse) { + nfs_mount_state_in_use_end(nmp, 0); + inuse = 0; + } goto restart; } if (!error && (np->n_flag & NREVOKE)) { @@ -4370,8 +4426,10 @@ error_out: lck_mtx_unlock(&np->n_openlock); nfs_open_state_clear_busy(np); - nfs_mount_state_in_use_end(nmp, error); + if (inuse) { + nfs_mount_state_in_use_end(nmp, error); + } if (nflp2) { nfs_file_lock_destroy(nflp2); } @@ -4865,7 +4923,7 @@ nfs4_reopen(struct nfs_open_file *nofp, thread_t thd) struct componentname cn; const char *vname = NULL; const char *name = NULL; - size_t namelen; + uint32_t namelen; char smallname[128]; char *filename = NULL; int error = 0, done = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0; @@ -5107,7 +5165,7 @@ nfs4_open_rpc_internal( { struct nfsmount *nmp; struct nfs_open_owner *noop = nofp->nof_owner; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; int error = 0, open_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status; int nfsvers, namedattrs, numops, exclusive = 0, gotuid, gotgid; u_int64_t xid, savedxid = 0; @@ -5118,9 +5176,9 @@ nfs4_open_rpc_internal( uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen; uint32_t rflags, delegation, recall; struct nfs_stateid stateid, dstateid, *sid; - fhandle_t fh; - struct nfsreq rq, *req = &rq; - struct nfs_dulookup dul; + fhandle_t *fh; + struct nfsreq *req; + struct nfs_dulookup *dul; char sbuf[64], *s; uint32_t ace_type, ace_flags, ace_mask, len, slen; struct kauth_ace ace; @@ -5136,6 +5194,7 @@ nfs4_open_rpc_internal( } nfsvers = nmp->nm_vers; namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR); + bzero(&dstateid, sizeof(dstateid)); if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { return EINVAL; } @@ -5162,12 +5221,18 @@ nfs4_open_rpc_internal( if ((error = nfs_open_owner_set_busy(noop, thd))) { return error; } + + fh = zalloc(nfs_fhandle_zone); + req = zalloc(nfs_req_zone); + MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + again: rflags = delegation = recall = 0; ace.ace_flags = 0; s = sbuf; slen = sizeof(sbuf); - NVATTR_INIT(&nvattr); + NVATTR_INIT(nvattr); NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, cnp->cn_nameptr, cnp->cn_namelen); nfsm_chain_null(&nmreq); @@ -5224,20 +5289,20 @@ again: nfsmout_if(error); if (create && !namedattrs) { - nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); + nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); } error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req); if (!error) { if (create && !namedattrs) { - nfs_dulookup_start(&dul, dnp, ctx); + nfs_dulookup_start(dul, dnp, ctx); } error = nfs_request_async_finish(req, &nmrep, &xid, &status); savedxid = xid; } if (create && !namedattrs) { - nfs_dulookup_finish(&dul, dnp, ctx); + nfs_dulookup_finish(dul, dnp, ctx); } if ((lockerror = nfs_node_lock(dnp))) { @@ -5315,14 +5380,14 @@ again: } nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsmout_if(error); - error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL); + error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL); nfsmout_if(error); - if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) { + if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) { printf("nfs: open/create didn't return filehandle? %s\n", cnp->cn_nameptr); error = EBADRPC; goto nfsmout; } - if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) { + if (!create && np && !NFS_CMPFH(np, fh->fh_data, fh->fh_len)) { // XXX for the open case, what if fh doesn't match the vnode we think we're opening? // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes. if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) { @@ -5345,8 +5410,8 @@ again: if (rflags & NFS_OPEN_RESULT_CONFIRM) { nfs_node_unlock(dnp); lockerror = ENOENT; - NVATTR_CLEANUP(&nvattr); - error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid); + NVATTR_CLEANUP(nvattr); + error = nfs4_open_confirm_rpc(nmp, dnp, fh->fh_data, fh->fh_len, noop, sid, thd, cred, nvattr, &xid); nfsmout_if(error); savedxid = xid; if ((lockerror = nfs_node_lock(dnp))) { @@ -5371,15 +5436,15 @@ nfsmout: if (!lockerror) { nfs_node_unlock(dnp); } - if (!error && !np && fh.fh_len) { + if (!error && !np && fh->fh_len) { /* create the vnode with the filehandle and attributes */ xid = savedxid; - error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp); + error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &newnp); if (!error) { newvp = NFSTOV(newnp); } } - NVATTR_CLEANUP(&nvattr); + NVATTR_CLEANUP(nvattr); if (!busyerror) { nfs_node_clear_busy(dnp); } @@ -5405,7 +5470,7 @@ nfsmout: } else { /* give the delegation back */ if (np) { - if (NFS_CMPFH(np, fh.fh_data, fh.fh_len)) { + if (NFS_CMPFH(np, fh->fh_data, fh->fh_len)) { /* update delegation state and return it */ lck_mtx_lock(&np->n_openlock); np->n_openflags &= ~N_DELEG_MASK; @@ -5421,13 +5486,13 @@ nfsmout: } lck_mtx_unlock(&np->n_openlock); /* don't need to send a separate delegreturn for fh */ - fh.fh_len = 0; + fh->fh_len = 0; } /* return np's current delegation */ nfs4_delegation_return(np, 0, thd, cred); } - if (fh.fh_len) { /* return fh's delegation if it wasn't for np */ - nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred); + if (fh->fh_len) { /* return fh's delegation if it wasn't for np */ + nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred); } } } @@ -5459,6 +5524,10 @@ nfsmout: } } nfs_open_owner_clear_busy(noop); + NFS_ZFREE(nfs_fhandle_zone, fh); + NFS_ZFREE(nfs_req_zone, req); + FREE(dul, M_TEMP); + FREE(nvattr, M_TEMP); return error; } @@ -5475,7 +5544,7 @@ nfs4_claim_delegated_open_rpc( { struct nfsmount *nmp; struct nfs_open_owner *noop = nofp->nof_owner; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; int error = 0, lockerror = ENOENT, status; int nfsvers, numops; u_int64_t xid; @@ -5483,7 +5552,7 @@ nfs4_claim_delegated_open_rpc( struct nfsm_chain nmreq, nmrep; uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen; uint32_t rflags = 0, delegation, recall = 0; - fhandle_t fh; + fhandle_t *fh; struct nfs_stateid dstateid; char sbuf[64], *s = sbuf; uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf); @@ -5491,7 +5560,7 @@ nfs4_claim_delegated_open_rpc( vnode_t dvp = NULL; const char *vname = NULL; const char *name = NULL; - size_t namelen; + uint32_t namelen; char smallname[128]; char *filename = NULL; struct nfsreq_secinfo_args si; @@ -5500,6 +5569,8 @@ nfs4_claim_delegated_open_rpc( if (nfs_mount_gone(nmp)) { return ENXIO; } + fh = zalloc(nfs_fhandle_zone); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); nfsvers = nmp->nm_vers; nfs_node_lock_force(np); @@ -5556,7 +5627,7 @@ nfs4_claim_delegated_open_rpc( if ((error = nfs_open_owner_set_busy(noop, NULL))) { goto out; } - NVATTR_INIT(&nvattr); + NVATTR_INIT(nvattr); delegation = NFS_OPEN_DELEGATE_NONE; dstateid = np->n_dstateid; NFSREQ_SECINFO_SET(&si, VTONFS(dvp), NULL, 0, filename, namelen); @@ -5692,27 +5763,29 @@ nfs4_claim_delegated_open_rpc( } nfsmout_if(error); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); - error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL); + error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL); nfsmout_if(error); - if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) { + if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) { printf("nfs: open reclaim didn't return filehandle? %s\n", filename ? filename : "???"); error = EBADRPC; goto nfsmout; } - if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) { + if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) { // XXX what if fh doesn't match the vnode we think we're re-opening? // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes. if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) { printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???"); } } - error = nfs_loadattrcache(np, &nvattr, &xid, 1); + error = nfs_loadattrcache(np, nvattr, &xid, 1); nfsmout_if(error); if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) { nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK; } nfsmout: - NVATTR_CLEANUP(&nvattr); + NVATTR_CLEANUP(nvattr); + FREE(nvattr, M_TEMP); + NFS_ZFREE(nfs_fhandle_zone, fh); nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); if (!lockerror) { @@ -5755,7 +5828,7 @@ nfs4_open_reclaim_rpc( { struct nfsmount *nmp; struct nfs_open_owner *noop = nofp->nof_owner; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; int error = 0, lockerror = ENOENT, status; int nfsvers, numops; u_int64_t xid; @@ -5763,7 +5836,7 @@ nfs4_open_reclaim_rpc( struct nfsm_chain nmreq, nmrep; uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen; uint32_t rflags = 0, delegation, recall = 0; - fhandle_t fh; + fhandle_t *fh; struct nfs_stateid dstateid; char sbuf[64], *s = sbuf; uint32_t ace_type, ace_flags, ace_mask, len, slen = sizeof(sbuf); @@ -5780,7 +5853,9 @@ nfs4_open_reclaim_rpc( return error; } - NVATTR_INIT(&nvattr); + fh = zalloc(nfs_fhandle_zone); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + NVATTR_INIT(nvattr); delegation = NFS_OPEN_DELEGATE_NONE; dstateid = np->n_dstateid; NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); @@ -5920,14 +5995,14 @@ nfs4_open_reclaim_rpc( } nfsmout_if(error); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); - error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL); + error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL); nfsmout_if(error); - if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) { + if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) { NP(np, "nfs: open reclaim didn't return filehandle?"); error = EBADRPC; goto nfsmout; } - if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) { + if (!NFS_CMPFH(np, fh->fh_data, fh->fh_len)) { // XXX what if fh doesn't match the vnode we think we're re-opening? // That should be pretty hard in this case, given that we are doing // the open reclaim using the file handle (and not a dir/name pair). @@ -5936,7 +6011,7 @@ nfs4_open_reclaim_rpc( NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch"); } } - error = nfs_loadattrcache(np, &nvattr, &xid, 1); + error = nfs_loadattrcache(np, nvattr, &xid, 1); nfsmout_if(error); if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) { nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK; @@ -5944,7 +6019,9 @@ nfs4_open_reclaim_rpc( nfsmout: // if (!error) // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny); - NVATTR_CLEANUP(&nvattr); + NVATTR_CLEANUP(nvattr); + FREE(nvattr, M_TEMP); + NFS_ZFREE(nfs_fhandle_zone, fh); nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); if (!lockerror) { @@ -6468,7 +6545,7 @@ int nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred) { struct nfsmount *nmp; - fhandle_t fh; + fhandle_t *fh; nfs_stateid dstateid; int error; @@ -6477,6 +6554,8 @@ nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred) return ENXIO; } + fh = zalloc(nfs_fhandle_zone); + /* first, make sure the node's marked for delegation return */ lck_mtx_lock(&np->n_openlock); np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING); @@ -6495,10 +6574,10 @@ nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred) /* return the delegation */ lck_mtx_lock(&np->n_openlock); dstateid = np->n_dstateid; - fh.fh_len = np->n_fhsize; - bcopy(np->n_fhp, &fh.fh_data, fh.fh_len); + fh->fh_len = np->n_fhsize; + bcopy(np->n_fhp, fh->fh_data, fh->fh_len); lck_mtx_unlock(&np->n_openlock); - error = nfs4_delegreturn_rpc(NFSTONMP(np), fh.fh_data, fh.fh_len, &dstateid, flags, thd, cred); + error = nfs4_delegreturn_rpc(NFSTONMP(np), fh->fh_data, fh->fh_len, &dstateid, flags, thd, cred); /* assume delegation is gone for all errors except ETIMEDOUT, NFSERR_*MOVED */ if ((error != ETIMEDOUT) && (error != NFSERR_MOVED) && (error != NFSERR_LEASE_MOVED)) { lck_mtx_lock(&np->n_openlock); @@ -6538,7 +6617,7 @@ out: } nfs_open_state_clear_busy(np); - + NFS_ZFREE(nfs_fhandle_zone, fh); return error; } @@ -6779,11 +6858,11 @@ restart: } if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) { /* This shouldn't happen given that this is a new, nodeless nofp */ - nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(newnofp, vfs_context_thread(ctx)); nfs_open_file_destroy(newnofp); newnofp = NULL; if (!error) { + nfs_mount_state_in_use_end(nmp, 0); goto restart; } } @@ -6915,19 +6994,19 @@ nfs4_create_rpc( nfsnode_t *npp) { struct nfsmount *nmp; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; int error = 0, create_error = EIO, lockerror = ENOENT, busyerror = ENOENT, status; int nfsvers, namedattrs, numops; - u_int64_t xid, savedxid = 0; + u_int64_t xid = 0, savedxid = 0; nfsnode_t np = NULL; vnode_t newvp = NULL; struct nfsm_chain nmreq, nmrep; uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen; const char *tag; nfs_specdata sd; - fhandle_t fh; - struct nfsreq rq, *req = &rq; - struct nfs_dulookup dul; + fhandle_t *fh; + struct nfsreq *req; + struct nfs_dulookup *dul; struct nfsreq_secinfo_args si; nmp = NFSTONMP(dnp); @@ -6966,15 +7045,19 @@ nfs4_create_rpc( return EINVAL; } + fh = zalloc(nfs_fhandle_zone); + req = zalloc(nfs_req_zone); + MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx); error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx)); if (!namedattrs) { - nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); + nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); } NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0); - NVATTR_INIT(&nvattr); + NVATTR_INIT(nvattr); nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); @@ -7016,7 +7099,7 @@ nfs4_create_rpc( vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); if (!error) { if (!namedattrs) { - nfs_dulookup_start(&dul, dnp, ctx); + nfs_dulookup_start(dul, dnp, ctx); } error = nfs_request_async_finish(req, &nmrep, &xid, &status); } @@ -7040,9 +7123,9 @@ nfs4_create_rpc( nfs_vattr_set_supported(bitmap, vap); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsmout_if(error); - error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL); + error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL); nfsmout_if(error); - if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE)) { + if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE)) { printf("nfs: create/%s didn't return filehandle? %s\n", tag, cnp->cn_nameptr); error = EBADRPC; goto nfsmout; @@ -7071,20 +7154,25 @@ nfsmout: nfs_getattr(dnp, NULL, ctx, NGA_CACHED); } - if (!error && fh.fh_len) { + if (!error && fh->fh_len) { /* create the vnode with the filehandle and attributes */ xid = savedxid; - error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); + error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np); if (!error) { newvp = NFSTOV(np); } } - NVATTR_CLEANUP(&nvattr); if (!namedattrs) { - nfs_dulookup_finish(&dul, dnp, ctx); + nfs_dulookup_finish(dul, dnp, ctx); } + NVATTR_CLEANUP(nvattr); + NFS_ZFREE(nfs_fhandle_zone, fh); + NFS_ZFREE(nfs_req_zone, req); + FREE(dul, M_TEMP); + FREE(nvattr, M_TEMP); + /* * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry * if we can succeed in looking up the object. @@ -7346,7 +7434,7 @@ nfs4_vnop_rmdir( int error = 0, namedattrs; nfsnode_t np = VTONFS(vp); nfsnode_t dnp = VTONFS(dvp); - struct nfs_dulookup dul; + struct nfs_dulookup *dul; if (vnode_vtype(vp) != VDIR) { return EINVAL; @@ -7362,9 +7450,10 @@ nfs4_vnop_rmdir( return error; } + MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK); if (!namedattrs) { - nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); - nfs_dulookup_start(&dul, dnp, ctx); + nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); + nfs_dulookup_start(dul, dnp, ctx); } error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen, @@ -7374,7 +7463,7 @@ nfs4_vnop_rmdir( /* nfs_getattr() will check changed and purge caches */ nfs_getattr(dnp, NULL, ctx, NGA_CACHED); if (!namedattrs) { - nfs_dulookup_finish(&dul, dnp, ctx); + nfs_dulookup_finish(dul, dnp, ctx); } nfs_node_clear_busy2(dnp, np); @@ -7398,6 +7487,7 @@ nfs4_vnop_rmdir( } lck_mtx_unlock(nfs_node_hash_mutex); } + FREE(dul, M_TEMP); return error; } @@ -7434,10 +7524,10 @@ nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx) struct nfsm_chain nmreq, nmrep; u_int64_t xid; uint32_t bitmap[NFS_ATTR_BITMAP_LEN]; - fhandle_t fh; - struct nfs_vattr nvattr; + fhandle_t *fh; + struct nfs_vattr *nvattr; struct componentname cn; - struct nfsreq rq, *req = &rq; + struct nfsreq *req; struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); @@ -7449,13 +7539,16 @@ nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx) } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); - NVATTR_INIT(&nvattr); + fh = zalloc(nfs_fhandle_zone); + req = zalloc(nfs_req_zone); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + NVATTR_INIT(nvattr); nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); bzero(&cn, sizeof(cn)); cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */ - cn.cn_namelen = strlen(_PATH_FORKSPECIFIER); + cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER); cn.cn_nameiop = LOOKUP; if (np->n_attrdirfh) { @@ -7502,31 +7595,34 @@ nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx) nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsmout_if(error); - error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL); + error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL); nfsmout_if(error); - if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) { + if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) { error = ENOENT; goto nfsmout; } - if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) { + if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) { /* (re)allocate attrdir fh buffer */ if (np->n_attrdirfh) { FREE(np->n_attrdirfh, M_TEMP); } - MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK); + MALLOC(np->n_attrdirfh, u_char*, fh->fh_len + 1, M_TEMP, M_WAITOK); } if (!np->n_attrdirfh) { error = ENOMEM; goto nfsmout; } /* cache the attrdir fh in the node */ - *np->n_attrdirfh = fh.fh_len; - bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len); + *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */ + bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len); /* create node for attrdir */ // XXX can't set parent correctly (to np) yet - error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp); + error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp); nfsmout: - NVATTR_CLEANUP(&nvattr); + NVATTR_CLEANUP(nvattr); + NFS_ZFREE(nfs_fhandle_zone, fh); + NFS_ZFREE(nfs_req_zone, req); + FREE(nvattr, M_TEMP); nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); @@ -7587,16 +7683,16 @@ nfs4_named_attr_get( int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT; int create, guarded, prefetch, truncate, noopbusy = 0; int open, status, numops, hadattrdir, negnamecache; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; struct vnode_attr vattr; nfsnode_t adnp = NULL, anp = NULL; vnode_t avp = NULL; - u_int64_t xid, savedxid = 0; + u_int64_t xid = 0, savedxid = 0; struct nfsm_chain nmreq, nmrep; uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen; - uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen; + uint32_t denyMode = 0, rflags, delegation, recall, eof, rlen, retlen; nfs_stateid stateid, dstateid; - fhandle_t fh; + fhandle_t *fh; struct nfs_open_owner *noop = NULL; struct nfs_open_file *newnofp = NULL, *nofp = NULL; struct vnop_access_args naa; @@ -7606,11 +7702,10 @@ nfs4_named_attr_get( char sbuf[64], *s; uint32_t ace_type, ace_flags, ace_mask, len, slen; struct kauth_ace ace; - struct nfsreq rq, *req = &rq; + struct nfsreq *req; struct nfsreq_secinfo_args si; *anpp = NULL; - fh.fh_len = 0; rflags = delegation = recall = eof = rlen = retlen = 0; ace.ace_flags = 0; s = sbuf; @@ -7620,7 +7715,12 @@ nfs4_named_attr_get( if (nfs_mount_gone(nmp)) { return ENXIO; } - NVATTR_INIT(&nvattr); + fh = zalloc(nfs_fhandle_zone); + req = zalloc(nfs_req_zone); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + NVATTR_INIT(nvattr); + fh->fh_len = 0; + bzero(&dstateid, sizeof(dstateid)); negnamecache = !NMFLAG(nmp, NONEGNAMECACHE); thd = vfs_context_thread(ctx); cred = vfs_context_ucred(ctx); @@ -7630,13 +7730,14 @@ nfs4_named_attr_get( prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH); if (!create) { - error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED); + error = nfs_getattr(np, nvattr, ctx, NGA_CACHED); if (error) { - return error; + goto out_free; } - if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) && - !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { - return ENOATTR; + if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) && + !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { + error = ENOATTR; + goto out_free; } } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) { /* shouldn't happen... but just be safe */ @@ -7656,12 +7757,13 @@ nfs4_named_attr_get( } noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1); if (!noop) { - return ENOMEM; + error = ENOMEM; + goto out_free; } } if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) { - return error; + goto out_free; } adnp = nfs4_named_attr_dir_get(np, 0, ctx); @@ -7691,7 +7793,7 @@ nfs4_named_attr_get( case 0: /* cache miss */ /* try dir buf cache lookup */ - error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0); + error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0, NULL); if (!error && anp) { /* dir buf cache hit */ *anpp = anp; @@ -7700,7 +7802,7 @@ nfs4_named_attr_get( if (error != -1) { /* cache miss */ break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case -1: /* cache hit, not really an error */ OSAddAtomic64(1, &nfsstats.lookupcache_hits); @@ -7719,7 +7821,7 @@ nfs4_named_attr_get( /* compute actual success/failure based on accessibility */ error = nfs_vnop_access(&naa); - /* FALLTHROUGH */ + OS_FALLTHROUGH; default: /* we either found it, or hit an error */ if (!error && guarded) { @@ -7753,11 +7855,12 @@ restart: error = EIO; } if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) { - nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(newnofp, vfs_context_thread(ctx)); nfs_open_file_destroy(newnofp); newnofp = NULL; if (!error) { + nfs_mount_state_in_use_end(nmp, 0); + inuse = 0; goto restart; } } @@ -7929,28 +8032,28 @@ restart: nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsmout_if(error); - error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL); + error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL); nfsmout_if(error); - if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) { - if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) { + if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) && fh->fh_len) { + if (!np->n_attrdirfh || (*np->n_attrdirfh != fh->fh_len)) { /* (re)allocate attrdir fh buffer */ if (np->n_attrdirfh) { FREE(np->n_attrdirfh, M_TEMP); } - MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK); + MALLOC(np->n_attrdirfh, u_char*, fh->fh_len + 1, M_TEMP, M_WAITOK); } if (np->n_attrdirfh) { /* remember the attrdir fh in the node */ - *np->n_attrdirfh = fh.fh_len; - bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len); + *np->n_attrdirfh = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfs4_parsefattr() */ + bcopy(fh->fh_data, np->n_attrdirfh + 1, fh->fh_len); /* create busied node for attrdir */ struct componentname cn; bzero(&cn, sizeof(cn)); cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */ - cn.cn_namelen = strlen(_PATH_FORKSPECIFIER); + cn.cn_namelen = NFS_STRLEN_INT(_PATH_FORKSPECIFIER); cn.cn_nameiop = LOOKUP; // XXX can't set parent correctly (to np) yet - error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp); + error = nfs_nget(NFSTOMP(np), NULL, &cn, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, 0, &adnp); if (!error) { adlockerror = 0; /* set the node busy */ @@ -7961,8 +8064,8 @@ restart: error = 0; } } - NVATTR_CLEANUP(&nvattr); - fh.fh_len = 0; + NVATTR_CLEANUP(nvattr); + fh->fh_len = 0; } if (open) { nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN); @@ -8031,9 +8134,9 @@ restart: } nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsmout_if(error); - error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL); + error = nfs4_parsefattr(&nmrep, NULL, nvattr, fh, NULL, NULL); nfsmout_if(error); - if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) { + if (!NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh->fh_len) { error = EIO; goto nfsmout; } @@ -8059,8 +8162,8 @@ restart: nfs_node_unlock(adnp); adlockerror = ENOENT; } - NVATTR_CLEANUP(&nvattr); - error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid); + NVATTR_CLEANUP(nvattr); + error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh->fh_data, fh->fh_len, noop, &newnofp->nof_stateid, thd, cred, nvattr, &xid); nfsmout_if(error); savedxid = xid; if ((adlockerror = nfs_node_lock(adnp))) { @@ -8090,10 +8193,10 @@ nfsmout: nfs_node_unlock(adnp); adlockerror = ENOENT; } - if (!error && !anp && fh.fh_len) { + if (!error && !anp && fh->fh_len) { /* create the vnode with the filehandle and attributes */ xid = savedxid; - error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp); + error = nfs_nget(NFSTOMP(np), adnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &anp); if (!error) { *anpp = anp; nfs_node_unlock(anp); @@ -8137,7 +8240,7 @@ nfsmout: } } } - NVATTR_CLEANUP(&nvattr); + NVATTR_CLEANUP(nvattr); if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) { if (!error && anp && !recall) { /* stuff the delegation state in the node */ @@ -8157,7 +8260,7 @@ nfsmout: } else { /* give the delegation back */ if (anp) { - if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) { + if (NFS_CMPFH(anp, fh->fh_data, fh->fh_len)) { /* update delegation state and return it */ lck_mtx_lock(&anp->n_openlock); anp->n_openflags &= ~N_DELEG_MASK; @@ -8173,13 +8276,13 @@ nfsmout: } lck_mtx_unlock(&anp->n_openlock); /* don't need to send a separate delegreturn for fh */ - fh.fh_len = 0; + fh->fh_len = 0; } /* return anp's current delegation */ nfs4_delegation_return(anp, 0, thd, cred); } - if (fh.fh_len) { /* return fh's delegation if it wasn't for anp */ - nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred); + if (fh->fh_len) { /* return fh's delegation if it wasn't for anp */ + nfs4_delegreturn_rpc(nmp, fh->fh_data, fh->fh_len, &dstateid, 0, thd, cred); } } } @@ -8213,6 +8316,7 @@ nfsmout: } goto restart; } + inuse = 0; if (noop) { if (noopbusy) { nfs_open_owner_clear_busy(noop); @@ -8244,7 +8348,7 @@ nfsmout: */ struct nfsbuf *bp = NULL; int lastpg; - uint32_t pagemask; + nfsbufpgs pagemask, pagemaskand; retlen = MIN(retlen, rlen); @@ -8266,14 +8370,15 @@ nfsmout: } /* calculate page mask for the range of data read */ - lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE; - pagemask = ((1 << (lastpg + 1)) - 1); + lastpg = (retlen - 1) / PAGE_SIZE; + nfs_buf_pgs_get_page_mask(&pagemask, lastpg + 1); if (!error) { error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp); } /* don't save the data if dirty or potential I/O conflict */ - if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) && + nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &pagemaskand); + if (!error && bp && !bp->nb_dirtyoff && !nfs_buf_pgs_is_set(&pagemaskand) && timevalcmp(&anp->n_lastio, &now, <)) { OSAddAtomic64(1, &nfsstats.read_bios); CLR(bp->nb_flags, (NB_DONE | NB_ASYNC)); @@ -8323,12 +8428,19 @@ nfsmout: if (adnp) { vnode_put(NFSTOV(adnp)); } + if (inuse) { + nfs_mount_state_in_use_end(nmp, error); + } if (error && *anpp) { vnode_put(NFSTOV(*anpp)); *anpp = NULL; } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); +out_free: + NFS_ZFREE(nfs_fhandle_zone, fh); + NFS_ZFREE(nfs_req_zone, req); + FREE(nvattr, M_TEMP); return error; } @@ -8351,7 +8463,7 @@ nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_contex bzero(&cn, sizeof(cn)); cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *); - cn.cn_namelen = strlen(name); + cn.cn_namelen = NFS_STRLEN_INT(name); cn.cn_nameiop = DELETE; cn.cn_flags = 0; @@ -8412,7 +8524,7 @@ nfs4_vnop_getxattr( { vfs_context_t ctx = ap->a_context; struct nfsmount *nmp; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; struct componentname cn; nfsnode_t anp; int error = 0, isrsrcfork; @@ -8425,18 +8537,21 @@ nfs4_vnop_getxattr( if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) { return ENOTSUP; } - error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED); + + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED); if (error) { - return error; + goto out; } - if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) && - !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { - return ENOATTR; + if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) && + !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { + error = ENOATTR; + goto out; } bzero(&cn, sizeof(cn)); cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *); - cn.cn_namelen = strlen(ap->a_name); + cn.cn_namelen = NFS_STRLEN_INT(ap->a_name); cn.cn_nameiop = LOOKUP; cn.cn_flags = MAKEENTRY; @@ -8458,6 +8573,8 @@ nfs4_vnop_getxattr( if (anp) { vnode_put(NFSTOV(anp)); } +out: + FREE(nvattr, M_TEMP); return error; } @@ -8509,7 +8626,7 @@ nfs4_vnop_setxattr( bzero(&cn, sizeof(cn)); cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *); - cn.cn_namelen = strlen(name); + cn.cn_namelen = NFS_STRLEN_INT(name); cn.cn_nameiop = CREATE; cn.cn_flags = MAKEENTRY; @@ -8678,7 +8795,7 @@ nfs4_vnop_listxattr( nfsnode_t adnp = NULL; struct nfsmount *nmp; int error, done, i; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; uint64_t cookie, nextcookie, lbn = 0; struct nfsbuf *bp = NULL; struct nfs_dir_buf_header *ndbhp; @@ -8693,17 +8810,19 @@ nfs4_vnop_listxattr( return ENOTSUP; } - error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + error = nfs_getattr(np, nvattr, ctx, NGA_CACHED); if (error) { - return error; + goto out_free; } - if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) && - !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { - return 0; + if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) && + !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { + error = 0; + goto out_free; } if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) { - return error; + goto out_free; } adnp = nfs4_named_attr_dir_get(np, 1, ctx); nfs_node_clear_busy(np); @@ -8741,7 +8860,7 @@ nfs4_vnop_listxattr( nfs_node_unlock(adnp); } /* nfs_getattr() will check changed and purge caches */ - if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED))) { + if ((error = nfs_getattr(adnp, nvattr, ctx, NGA_UNCACHED))) { goto out; } @@ -8827,6 +8946,8 @@ out: if (adnp) { vnode_put(NFSTOV(adnp)); } +out_free: + FREE(nvattr, M_TEMP); return error; } @@ -8845,7 +8966,7 @@ nfs4_vnop_getnamedstream( { vfs_context_t ctx = ap->a_context; struct nfsmount *nmp; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; struct componentname cn; nfsnode_t anp; int error = 0; @@ -8858,18 +8979,21 @@ nfs4_vnop_getnamedstream( if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) { return ENOTSUP; } - error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED); + + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + error = nfs_getattr(VTONFS(ap->a_vp), nvattr, ctx, NGA_CACHED); if (error) { - return error; + goto out; } - if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) && - !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { - return ENOATTR; + if (NFS_BITMAP_ISSET(nvattr->nva_bitmap, NFS_FATTR_NAMED_ATTR) && + !(nvattr->nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { + error = ENOATTR; + goto out; } bzero(&cn, sizeof(cn)); cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *); - cn.cn_namelen = strlen(ap->a_name); + cn.cn_namelen = NFS_STRLEN_INT(ap->a_name); cn.cn_nameiop = LOOKUP; cn.cn_flags = MAKEENTRY; @@ -8883,6 +9007,8 @@ nfs4_vnop_getnamedstream( } else if (anp) { vnode_put(NFSTOV(anp)); } +out: + FREE(nvattr, M_TEMP); return error; } @@ -8914,7 +9040,7 @@ nfs4_vnop_makenamedstream( bzero(&cn, sizeof(cn)); cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *); - cn.cn_namelen = strlen(ap->a_name); + cn.cn_namelen = NFS_STRLEN_INT(ap->a_name); cn.cn_nameiop = CREATE; cn.cn_flags = MAKEENTRY; diff --git a/bsd/nfs/nfs_bio.c b/bsd/nfs/nfs_bio.c index e7ddfaaa5..0e7c29ea9 100644 --- a/bsd/nfs/nfs_bio.c +++ b/bsd/nfs/nfs_bio.c @@ -117,6 +117,8 @@ int nfs_nbdwrite; int nfs_buf_timer_on = 0; thread_t nfsbufdelwrithd = NULL; +ZONE_DECLARE(nfsbuf_zone, "NFS bio", sizeof(struct nfsbuf), ZC_NONE); + lck_grp_t *nfs_buf_lck_grp; lck_mtx_t *nfs_buf_mutex; @@ -139,6 +141,8 @@ lck_mtx_t *nfs_buf_mutex; /* fraction of total nfsbufs that nfsbuffreemetacnt should exceed before bothering to call nfs_buf_freeup() */ #define META_FREEUP_MIN_FRAC 2 +#define NFS_ROUND_BLOCK(p, blksize) ((((uint64_t)(p) + blksize - 1) & ~((uint64_t)blksize - 1)) / blksize) + #define NFS_BUF_FREEUP() \ do { \ /* only call nfs_buf_freeup() if it has work to do: */ \ @@ -148,6 +152,63 @@ lck_mtx_t *nfs_buf_mutex; nfs_buf_freeup(0); \ } while (0) +void +nfs_buf_pgs_get_page_mask(nfsbufpgs *nfsbp, off_t page) +{ + off_t page_pos = page / NBPGS_ELEMENT_PAGES; + off_t max_page = NBPGS_STRUCT_SIZE * 8; + NBPGS_ERASE(nfsbp); + + if (page >= max_page) { + nfs_buf_pgs_bit_not(nfsbp); + return; + } + + NBPGS_SET(nfsbp, page); + nfsbp->pages[page_pos]--; + for (off_t i = page_pos - 1; i >= 0; i--) { + nfsbp->pages[i] = ~0; + } +} + +void +nfs_buf_pgs_bit_not(nfsbufpgs *nfsbp) +{ + for (uint32_t i = 0; i < NBPGS_ELEMENTS; i++) { + nfsbp->pages[i] = ~nfsbp->pages[i]; + } +} + +void +nfs_buf_pgs_bit_and(nfsbufpgs *nfsbp_src1, nfsbufpgs *nfsbp_src2, nfsbufpgs *nfsbp_dst) +{ + for (uint32_t i = 0; i < NBPGS_ELEMENTS; i++) { + nfsbp_dst->pages[i] = nfsbp_src1->pages[i] & nfsbp_src2->pages[i]; + } +} + +void +nfs_buf_pgs_set_pages_between(nfsbufpgs *nfsbp, off_t firstpg, off_t lastpg) +{ + nfsbufpgs pagemaskfirst, pagemasklast; + + nfs_buf_pgs_get_page_mask(&pagemasklast, lastpg); + nfs_buf_pgs_get_page_mask(&pagemaskfirst, firstpg); + nfs_buf_pgs_bit_not(&pagemaskfirst); + nfs_buf_pgs_bit_and(&pagemaskfirst, &pagemasklast, nfsbp); +} + +int +nfs_buf_pgs_is_set(nfsbufpgs *nfsbp) +{ + for (uint32_t i = 0; i < NBPGS_ELEMENTS; i++) { + if (nfsbp->pages[i] != 0) { + return 1; + } + } + return 0; +} + /* * Initialize nfsbuf lists */ @@ -161,12 +222,12 @@ nfs_nbinit(void) nfsbuffreecnt = nfsbuffreemetacnt = nfsbufdelwricnt = 0; nfsbufmin = 128; /* size nfsbufmax to cover at most half sane_size (w/default buf size) */ - nfsbufmax = (sane_size >> PAGE_SHIFT) / (2 * (NFS_RWSIZE >> PAGE_SHIFT)); + nfsbufmax = (int)(sane_size >> PAGE_SHIFT) / (2 * (NFS_RWSIZE >> PAGE_SHIFT)); nfsbufmetamax = nfsbufmax / 4; nfsneedbuffer = 0; nfs_nbdwrite = 0; - nfsbufhashtbl = hashinit(nfsbufmax / 4, M_TEMP, &nfsbufhash); + nfsbufhashtbl = hashinit(nfsbufmax / 4, M_NFSBIO, &nfsbufhash); TAILQ_INIT(&nfsbuffree); TAILQ_INIT(&nfsbuffreemeta); TAILQ_INIT(&nfsbufdelwri); @@ -282,9 +343,9 @@ nfs_buf_freeup(int timer) } /* if buf was NB_META, dump buffer */ if (ISSET(fbp->nb_flags, NB_META) && fbp->nb_data) { - kfree(fbp->nb_data, fbp->nb_bufsize); + kheap_free(KHEAP_DATA_BUFFERS, fbp->nb_data, fbp->nb_bufsize); } - FREE(fbp, M_TEMP); + NFS_ZFREE(nfsbuf_zone, fbp); } } @@ -383,7 +444,7 @@ nfs_buf_page_inval(vnode_t vp, off_t offset) * If it does, we can't let the pager drop the page. */ if (bp->nb_dirtyend > 0) { - int start = offset - NBOFF(bp); + off_t start = offset - NBOFF(bp); if ((bp->nb_dirtyend > start) && (bp->nb_dirtyoff < (start + PAGE_SIZE))) { /* @@ -472,7 +533,8 @@ nfs_buf_upl_check(struct nfsbuf *bp) } pl = ubc_upl_pageinfo(bp->nb_pagelist); - bp->nb_valid = bp->nb_dirty = 0; + NBPGS_ERASE(&bp->nb_valid); + NBPGS_ERASE(&bp->nb_dirty); for (i = 0; i < npages; i++, fileoffset += PAGE_SIZE_64) { /* anything beyond the end of the file is not valid or dirty */ @@ -541,7 +603,7 @@ nfs_buf_map(struct nfsbuf *bp) void nfs_buf_normalize_valid_range(nfsnode_t np, struct nfsbuf *bp) { - int pg, npg; + off_t pg, npg; /* pull validoff back to start of contiguous valid page range */ pg = bp->nb_validoff / PAGE_SIZE; while (pg >= 0 && NBPGVALID(bp, pg)) { @@ -828,7 +890,7 @@ loop: } else if (!lrubp) { bp = metabp; } else { - int32_t lru_stale_time, meta_stale_time; + time_t lru_stale_time, meta_stale_time; lru_stale_time = lrubp->nb_timestamp + NFSBUF_LRU_STALE; meta_stale_time = metabp->nb_timestamp + NFSBUF_META_STALE; if (lru_stale_time <= meta_stale_time) { @@ -871,7 +933,7 @@ loop: } } else if (ISSET(bp->nb_flags, NB_META)) { if (bp->nb_data) { - kfree(bp->nb_data, bp->nb_bufsize); + kheap_free(KHEAP_DATA_BUFFERS, bp->nb_data, bp->nb_bufsize); bp->nb_data = NULL; } nfsbufmetacnt--; @@ -880,20 +942,15 @@ loop: bp->nb_error = 0; bp->nb_validoff = bp->nb_validend = -1; bp->nb_dirtyoff = bp->nb_dirtyend = 0; - bp->nb_valid = 0; - bp->nb_dirty = 0; + NBPGS_ERASE(&bp->nb_valid); + NBPGS_ERASE(&bp->nb_dirty); bp->nb_verf = 0; } else { /* no buffer to reuse */ if ((nfsbufcnt < nfsbufmax) && ((operation != NBLK_META) || (nfsbufmetacnt < nfsbufmetamax))) { /* just alloc a new one */ - MALLOC(bp, struct nfsbuf *, sizeof(struct nfsbuf), M_TEMP, M_WAITOK); - if (!bp) { - lck_mtx_unlock(nfs_buf_mutex); - FSDBG_BOT(541, np, blkno, 0, error); - return ENOMEM; - } + bp = zalloc(nfsbuf_zone); nfsbufcnt++; /* @@ -954,16 +1011,17 @@ buffer_setup: case NBLK_META: SET(bp->nb_flags, NB_META); if ((bp->nb_bufsize != bufsize) && bp->nb_data) { - kfree(bp->nb_data, bp->nb_bufsize); + kheap_free(KHEAP_DATA_BUFFERS, bp->nb_data, bp->nb_bufsize); bp->nb_data = NULL; bp->nb_validoff = bp->nb_validend = -1; bp->nb_dirtyoff = bp->nb_dirtyend = 0; - bp->nb_valid = 0; - bp->nb_dirty = 0; + NBPGS_ERASE(&bp->nb_valid); + NBPGS_ERASE(&bp->nb_dirty); CLR(bp->nb_flags, NB_CACHE); } if (!bp->nb_data) { - bp->nb_data = kalloc(bufsize); + bp->nb_data = kheap_alloc(KHEAP_DATA_BUFFERS, + bufsize, Z_WAITOK); } if (!bp->nb_data) { /* Ack! couldn't allocate the data buffer! */ @@ -1079,7 +1137,7 @@ nfs_buf_release(struct nfsbuf *bp, int freeup) * Abort the pages on error or: if this is an invalid or * non-needcommit nocache buffer AND no pages are dirty. */ - if (ISSET(bp->nb_flags, NB_ERROR) || (!bp->nb_dirty && (ISSET(bp->nb_flags, NB_INVAL) || + if (ISSET(bp->nb_flags, NB_ERROR) || (!nfs_buf_pgs_is_set(&bp->nb_dirty) && (ISSET(bp->nb_flags, NB_INVAL) || (ISSET(bp->nb_flags, NB_NOCACHE) && !ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI)))))) { if (ISSET(bp->nb_flags, (NB_READ | NB_INVAL | NB_NOCACHE))) { upl_flags = UPL_ABORT_DUMP_PAGES; @@ -1594,7 +1652,7 @@ nfs_buf_read_finish(struct nfsbuf *bp) /* update valid range */ bp->nb_validoff = 0; bp->nb_validend = bp->nb_endio; - if (bp->nb_endio < (int)bp->nb_bufsize) { + if (bp->nb_endio < bp->nb_bufsize) { /* * The read may be short because we have unflushed writes * that are extending the file size and the reads hit the @@ -1616,7 +1674,7 @@ nfs_buf_read_finish(struct nfsbuf *bp) ((NBOFF(bp) + bp->nb_validend) > 0x100000000LL)) { bp->nb_validend = 0x100000000LL - NBOFF(bp); } - bp->nb_valid = (uint32_t)(1LLU << (round_page_32(bp->nb_validend) / PAGE_SIZE)) - 1; + nfs_buf_pgs_get_page_mask(&bp->nb_valid, round_page_64(bp->nb_validend) / PAGE_SIZE); if (bp->nb_validend & PAGE_MASK) { /* zero-fill remainder of last page */ bzero(bp->nb_data + bp->nb_validend, PAGE_SIZE - (bp->nb_validend & PAGE_MASK)); @@ -1634,8 +1692,10 @@ nfs_buf_read_rpc(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) struct nfsmount *nmp; nfsnode_t np = bp->nb_np; int error = 0, nfsvers, async; - int offset, nrpcs; - uint32_t nmrsize, length, len; + int offset; + uint64_t length, nrpcs; + uint32_t nmrsize; + size_t len; off_t boff; struct nfsreq *req; struct nfsreq_cbinfo cb; @@ -1684,12 +1744,12 @@ nfs_buf_read_rpc(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) error = bp->nb_error; break; } - len = (length > nmrsize) ? nmrsize : length; - cb.rcb_args[0] = offset; - cb.rcb_args[1] = len; + len = (length > nmrsize) ? nmrsize : (uint32_t)length; + cb.rcb_args.offset = offset; + cb.rcb_args.length = len; #if CONFIG_NFS4 if (nmp->nm_vers >= NFS_VER4) { - cb.rcb_args[2] = nmp->nm_stategenid; + cb.rcb_args.stategenid = nmp->nm_stategenid; } #endif req = NULL; @@ -1747,10 +1807,11 @@ void nfs_buf_read_rpc_finish(struct nfsreq *req) { struct nfsmount *nmp; - size_t rlen; + size_t rlen, length; struct nfsreq_cbinfo cb; struct nfsbuf *bp; - int error = 0, nfsvers, offset, length, eof = 0, multasyncrpc, finished; + int error = 0, nfsvers, eof = 0, multasyncrpc, finished; + off_t offset; void *wakeme = NULL; struct nfsreq *rreq = NULL; nfsnode_t np; @@ -1784,8 +1845,8 @@ finish: } nfsvers = nmp->nm_vers; - offset = cb.rcb_args[0]; - rlen = length = cb.rcb_args[1]; + offset = cb.rcb_args.offset; + rlen = length = cb.rcb_args.length; auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, UIO_READ, &uio_buf, sizeof(uio_buf)); @@ -1806,9 +1867,9 @@ finish: #if CONFIG_NFS4 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) { lck_mtx_lock(&nmp->nm_lock); - if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid)) { + if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args.stategenid == nmp->nm_stategenid)) { NP(np, "nfs_buf_read_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery", - error, NBOFF(bp) + offset, cb.rcb_args[2], nmp->nm_stategenid); + error, NBOFF(bp) + offset, cb.rcb_args.stategenid, nmp->nm_stategenid); nfs_need_recover(nmp, error); } lck_mtx_unlock(&nmp->nm_lock); @@ -1868,9 +1929,9 @@ finish: eofrem = np->n_size - (NBOFF(bp) + offset + rlen); rem = (rpcrem < eofrem) ? rpcrem : eofrem; if (rem > 0) { - bzero(bp->nb_data + offset + rlen, rem); + NFS_BZERO(bp->nb_data + offset + rlen, rem); } - } else if (((int)rlen < length) && !ISSET(bp->nb_flags, NB_ERROR)) { + } else if ((rlen < length) && !ISSET(bp->nb_flags, NB_ERROR)) { /* * short read * @@ -1883,11 +1944,11 @@ readagain: #endif offset += rlen; length -= rlen; - cb.rcb_args[0] = offset; - cb.rcb_args[1] = length; + cb.rcb_args.offset = offset; + cb.rcb_args.length = length; #if CONFIG_NFS4 if (nmp->nm_vers >= NFS_VER4) { - cb.rcb_args[2] = nmp->nm_stategenid; + cb.rcb_args.stategenid = nmp->nm_stategenid; } #endif error = nmp->nm_funcs->nf_read_rpc_async(np, NBOFF(bp) + offset, length, thd, cred, &cb, &rreq); @@ -1993,12 +2054,12 @@ nfs_buf_readahead(nfsnode_t np, int ioflag, daddr64_t *rabnp, daddr64_t lastrabn continue; } if ((ioflag & IO_NOCACHE) && ISSET(bp->nb_flags, NB_CACHE) && - !bp->nb_dirty && !ISSET(bp->nb_flags, (NB_DELWRI | NB_NCRDAHEAD))) { + !nfs_buf_pgs_is_set(&bp->nb_dirty) && !ISSET(bp->nb_flags, (NB_DELWRI | NB_NCRDAHEAD))) { CLR(bp->nb_flags, NB_CACHE); - bp->nb_valid = 0; + NBPGS_ERASE(&bp->nb_valid); bp->nb_validoff = bp->nb_validend = -1; } - if ((bp->nb_dirtyend <= 0) && !bp->nb_dirty && + if ((bp->nb_dirtyend <= 0) && !nfs_buf_pgs_is_set(&bp->nb_dirty) && !ISSET(bp->nb_flags, (NB_CACHE | NB_DELWRI))) { SET(bp->nb_flags, (NB_READ | NB_ASYNC)); if (ioflag & IO_NOCACHE) { @@ -2028,8 +2089,8 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) struct nfsbuf *bp = NULL; struct nfsmount *nmp = VTONMP(vp); daddr64_t lbn, rabn = 0, lastrabn, maxrabn = -1; - off_t diff; - int error = 0, n = 0, on = 0; + off_t diff, on = 0, n = 0; + int error = 0, n32; int nfsvers, biosize, modified, readaheads = 0; thread_t thd; kauth_cred_t cred; @@ -2154,7 +2215,7 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) io_resid = diff; } if (io_resid > 0) { - int count = (io_resid > INT_MAX) ? INT_MAX : io_resid; + int count = (io_resid > INT_MAX) ? INT_MAX : (int)io_resid; error = cluster_copy_ubc_data(vp, uio, &count, 0); if (error) { nfs_data_unlock(np); @@ -2164,7 +2225,7 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) } /* count any biocache reads that we just copied directly */ if (lbn != (uio_offset(uio) / biosize)) { - OSAddAtomic64((uio_offset(uio) / biosize) - lbn, &nfsstats.biocache_reads); + OSAddAtomic64(NFS_ROUND_BLOCK(uio_offset(uio), biosize) - lbn, &nfsstats.biocache_reads); FSDBG(514, np, 0xcacefeed, uio_offset(uio), error); } } @@ -2194,10 +2255,11 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) return error; } readaheads = 1; + OSAddAtomic64(rabn - lbn, &nfsstats.biocache_reads); + } else { + OSAddAtomic64(1, &nfsstats.biocache_reads); } - OSAddAtomic64(1, &nfsstats.biocache_reads); - /* * If the block is in the cache and has the required data * in a valid region, just copy it out. @@ -2226,7 +2288,7 @@ again: * Invalidate the data if it wasn't just read * in as part of a "nocache readahead". */ - if (bp->nb_dirty || (bp->nb_dirtyend > 0)) { + if (nfs_buf_pgs_is_set(&bp->nb_dirty) || (bp->nb_dirtyend > 0)) { /* so write the buffer out and try again */ SET(bp->nb_flags, NB_NOCACHE); goto flushbuffer; @@ -2238,9 +2300,9 @@ again: } /* if any pages are valid... */ - if (bp->nb_valid) { + if (nfs_buf_pgs_is_set(&bp->nb_valid)) { /* ...check for any invalid pages in the read range */ - int pg, firstpg, lastpg, dirtypg; + off_t pg, firstpg, lastpg, dirtypg; dirtypg = firstpg = lastpg = -1; pg = on / PAGE_SIZE; while (pg <= (on + n - 1) / PAGE_SIZE) { @@ -2260,8 +2322,8 @@ again: if (bp->nb_validoff < 0) { /* valid range isn't set up, so */ /* set it to what we know is valid */ - bp->nb_validoff = trunc_page(on); - bp->nb_validend = round_page(on + n); + bp->nb_validoff = trunc_page_64(on); + bp->nb_validend = round_page_64(on + n); nfs_buf_normalize_valid_range(np, bp); } goto buffer_ready; @@ -2287,11 +2349,11 @@ flushbuffer: } goto again; } - if (!bp->nb_dirty && bp->nb_dirtyend <= 0 && + if (!nfs_buf_pgs_is_set(&bp->nb_dirty) && bp->nb_dirtyend <= 0 && (lastpg - firstpg + 1) > (biosize / PAGE_SIZE) / 2) { /* we need to read in more than half the buffer and the */ /* buffer's not dirty, so just fetch the whole buffer */ - bp->nb_valid = 0; + NBPGS_ERASE(&bp->nb_valid); } else { /* read the page range in */ uio_t auio; @@ -2303,7 +2365,7 @@ flushbuffer: if (!auio) { error = ENOMEM; } else { - uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + (firstpg * PAGE_SIZE)), + NFS_UIO_ADDIOV(auio, CAST_USER_ADDR_T(bp->nb_data + (firstpg * PAGE_SIZE)), ((lastpg - firstpg + 1) * PAGE_SIZE)); error = nfs_read_rpc(np, auio, ctx); } @@ -2317,8 +2379,8 @@ flushbuffer: return error; } /* Make sure that the valid range is set to cover this read. */ - bp->nb_validoff = trunc_page_32(on); - bp->nb_validend = round_page_32(on + n); + bp->nb_validoff = trunc_page_64(on); + bp->nb_validend = round_page_64(on + n); nfs_buf_normalize_valid_range(np, bp); if (uio_resid(auio) > 0) { /* if short read, must have hit EOF, */ @@ -2332,7 +2394,7 @@ flushbuffer: } } /* if no pages are valid, read the whole block */ - if (!bp->nb_valid) { + if (!nfs_buf_pgs_is_set(&bp->nb_valid)) { if (!IS_VALID_CRED(bp->nb_rcred) && IS_VALID_CRED(cred)) { kauth_cred_ref(cred); bp->nb_rcred = cred; @@ -2360,7 +2422,11 @@ buffer_ready: } if (n > 0) { NFS_BUF_MAP(bp); - error = uiomove(bp->nb_data + on, n, uio); + n32 = n > INT_MAX ? INT_MAX : (int)n; + error = uiomove(bp->nb_data + on, n32, uio); + if (!error && n > n32) { + error = uiomove(bp->nb_data + on + n32, (int)(n - n32), uio); + } } @@ -2432,8 +2498,8 @@ nfs_buf_write(struct nfsbuf *bp) thread_t thd; kauth_cred_t cred; proc_t p = current_proc(); - int iomode, doff, dend, firstpg, lastpg; - uint32_t pagemask; + int iomode; + off_t doff, dend, firstpg, lastpg; FSDBG_TOP(553, bp, NBOFF(bp), bp->nb_flags, 0); @@ -2547,6 +2613,7 @@ nfs_buf_write(struct nfsbuf *bp) } if (!error && (bp->nb_dirtyend > 0)) { /* there's a dirty range that needs to be written out */ + nfsbufpgs pagemask, pagemaskand; NFS_BUF_MAP(bp); doff = bp->nb_dirtyoff; @@ -2564,7 +2631,7 @@ nfs_buf_write(struct nfsbuf *bp) } /* if dend page is dirty, move dend to start of next page */ if ((dend & PAGE_MASK) && NBPGDIRTY(bp, dend / PAGE_SIZE)) { - dend = round_page_32(dend); + dend = round_page_64(dend); } /* try to expand write range to include trailing dirty pages */ if (!(dend & PAGE_MASK)) { @@ -2577,17 +2644,23 @@ nfs_buf_write(struct nfsbuf *bp) dend = np->n_size - NBOFF(bp); } /* calculate range of complete pages being written */ - firstpg = round_page_32(doff) / PAGE_SIZE; - lastpg = (trunc_page_32(dend) - 1) / PAGE_SIZE; - /* calculate mask for that page range */ - pagemask = ((1 << (lastpg + 1)) - 1) & ~((1 << firstpg) - 1); + if (dend > doff) { + firstpg = doff / PAGE_SIZE; + lastpg = (dend - 1) / PAGE_SIZE; + /* calculate mask for that page range */ + nfs_buf_pgs_set_pages_between(&pagemask, firstpg, lastpg + 1); + } else { + NBPGS_ERASE(&pagemask); + } /* * compare page mask to nb_dirty; if there are other dirty pages * then write FILESYNC; otherwise, write UNSTABLE if async and * not needcommit/stable; otherwise write FILESYNC */ - if (bp->nb_dirty & ~pagemask) { + nfs_buf_pgs_bit_not(&pagemask); + nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &pagemaskand); + if (nfs_buf_pgs_is_set(&pagemaskand)) { iomode = NFS_WRITE_FILESYNC; } else if ((bp->nb_flags & (NB_ASYNC | NB_NEEDCOMMIT | NB_STABLE)) == NB_ASYNC) { iomode = NFS_WRITE_UNSTABLE; @@ -2610,7 +2683,7 @@ nfs_buf_write(struct nfsbuf *bp) * pages pushed out. */ } else { - if (!error && bp->nb_dirty) { /* write out any dirty pages */ + if (!error && nfs_buf_pgs_is_set(&bp->nb_dirty)) { /* write out any dirty pages */ error = nfs_buf_write_dirty_pages(bp, thd, cred); } nfs_buf_iodone(bp); @@ -2671,8 +2744,7 @@ nfs_buf_write_finish(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) { nfsnode_t np = bp->nb_np; int error = (bp->nb_flags & NB_ERROR) ? bp->nb_error : 0; - int firstpg, lastpg; - uint32_t pagemask; + off_t firstpg, lastpg; if ((error == EINTR) || (error == ERESTART)) { CLR(bp->nb_flags, NB_ERROR); @@ -2680,13 +2752,19 @@ nfs_buf_write_finish(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) } if (!error) { + nfsbufpgs pagemask; /* calculate range of complete pages being written */ - firstpg = round_page_32(bp->nb_offio) / PAGE_SIZE; - lastpg = (trunc_page_32(bp->nb_endio) - 1) / PAGE_SIZE; - /* calculate mask for that page range written */ - pagemask = ((1 << (lastpg + 1)) - 1) & ~((1 << firstpg) - 1); + if (bp->nb_endio > bp->nb_offio) { + firstpg = bp->nb_offio / PAGE_SIZE; + lastpg = (bp->nb_endio - 1) / PAGE_SIZE; + /* calculate mask for that page range written */ + nfs_buf_pgs_set_pages_between(&pagemask, firstpg, lastpg + 1); + } else { + NBPGS_ERASE(&pagemask); + } /* clear dirty bits for pages we've written */ - bp->nb_dirty &= ~pagemask; + nfs_buf_pgs_bit_not(&pagemask); + nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &bp->nb_dirty); } /* manage needcommit state */ @@ -2764,7 +2842,7 @@ nfs_buf_write_finish(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) bp->nb_dirtyoff = bp->nb_dirtyend = 0; } - if (!error && bp->nb_dirty) { + if (!error && nfs_buf_pgs_is_set(&bp->nb_dirty)) { nfs_buf_write_dirty_pages(bp, thd, cred); } nfs_buf_iodone(bp); @@ -2783,12 +2861,12 @@ nfs_buf_write_dirty_pages(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) nfsnode_t np = bp->nb_np; struct nfsmount *nmp = NFSTONMP(np); int error = 0, commit, iomode, iomode2, len, pg, count, npages, off; - uint32_t dirty = bp->nb_dirty; + nfsbufpgs dirty; uint64_t wverf; uio_t auio; char uio_buf[UIO_SIZEOF(1)]; - if (!bp->nb_dirty) { + if (!nfs_buf_pgs_is_set(&bp->nb_dirty)) { return 0; } @@ -2803,7 +2881,7 @@ nfs_buf_write_dirty_pages(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) &uio_buf, sizeof(uio_buf)); again: - dirty = bp->nb_dirty; + NBPGS_COPY(&dirty, &bp->nb_dirty); wverf = bp->nb_verf; commit = NFS_WRITE_FILESYNC; for (pg = 0; pg < npages; pg++) { @@ -2840,7 +2918,7 @@ again: } /* clear dirty bits */ while (count--) { - dirty &= ~(1 << pg); + NBPGS_UNSET(&dirty, pg); if (count) { /* leave pg on last page */ pg++; } @@ -2857,7 +2935,7 @@ again: } } if (!error) { - bp->nb_dirty = dirty; + NBPGS_COPY(&bp->nb_dirty, &dirty); } else { SET(bp->nb_flags, NB_ERROR); bp->nb_error = error; @@ -2874,12 +2952,14 @@ nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred struct nfsmount *nmp; nfsnode_t np = bp->nb_np; int error = 0, nfsvers, async; - int offset, nrpcs; - uint32_t nmwsize, length, len; + int64_t nrpcs; + size_t len; + uint32_t nmwsize; struct nfsreq *req; struct nfsreq_cbinfo cb; uio_t auio; char uio_buf[UIO_SIZEOF(1)]; + off_t offset, length; nmp = NFSTONMP(np); if (nfs_mount_gone(nmp)) { @@ -2909,7 +2989,7 @@ nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf)); - uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); + NFS_UIO_ADDIOV(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); bp->nb_rpcs = nrpcs = (length + nmwsize - 1) / nmwsize; if (async && (nrpcs > 1)) { @@ -2923,12 +3003,12 @@ nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred error = bp->nb_error; break; } - len = (length > nmwsize) ? nmwsize : length; - cb.rcb_args[0] = offset; - cb.rcb_args[1] = len; + len = (length > nmwsize) ? nmwsize : (uint32_t)length; + cb.rcb_args.offset = offset; + cb.rcb_args.length = len; #if CONFIG_NFS4 if (nmp->nm_vers >= NFS_VER4) { - cb.rcb_args[2] = nmp->nm_stategenid; + cb.rcb_args.stategenid = nmp->nm_stategenid; } #endif if (async && ((error = nfs_async_write_start(nmp)))) { @@ -2992,10 +3072,11 @@ nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred void nfs_buf_write_rpc_finish(struct nfsreq *req) { - int error = 0, nfsvers, offset, length, multasyncrpc, finished; + int error = 0, nfsvers, multasyncrpc, finished; int committed = NFS_WRITE_FILESYNC; uint64_t wverf = 0; - size_t rlen; + off_t offset; + size_t rlen, length; void *wakeme = NULL; struct nfsreq_cbinfo cb; struct nfsreq *wreq = NULL; @@ -3032,8 +3113,8 @@ finish: } nfsvers = nmp->nm_vers; - offset = cb.rcb_args[0]; - rlen = length = cb.rcb_args[1]; + offset = cb.rcb_args.offset; + rlen = length = cb.rcb_args.length; /* finish the RPC */ error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req, &committed, &rlen, &wverf); @@ -3050,9 +3131,9 @@ finish: #if CONFIG_NFS4 if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) { lck_mtx_lock(&nmp->nm_lock); - if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid)) { + if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args.stategenid == nmp->nm_stategenid)) { NP(np, "nfs_buf_write_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery", - error, NBOFF(bp) + offset, cb.rcb_args[2], nmp->nm_stategenid); + error, NBOFF(bp) + offset, cb.rcb_args.stategenid, nmp->nm_stategenid); nfs_need_recover(nmp, error); } lck_mtx_unlock(&nmp->nm_lock); @@ -3123,6 +3204,10 @@ finish: bp->nb_verf = wverf; } + if ((rlen > 0) && (bp->nb_offio < (offset + (int)rlen))) { + bp->nb_offio = offset + rlen; + } + /* * check for a short write * @@ -3130,7 +3215,7 @@ finish: * need to issue another write for the rest of it. * (Don't bother if the buffer hit an error or stale wverf.) */ - if (((int)rlen < length) && !(bp->nb_flags & (NB_STALEWVERF | NB_ERROR))) { + if ((rlen < length) && !(bp->nb_flags & (NB_STALEWVERF | NB_ERROR))) { #if CONFIG_NFS4 writeagain: #endif @@ -3141,11 +3226,11 @@ writeagain: UIO_WRITE, &uio_buf, sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); - cb.rcb_args[0] = offset; - cb.rcb_args[1] = length; + cb.rcb_args.offset = offset; + cb.rcb_args.length = length; #if CONFIG_NFS4 if (nmp->nm_vers >= NFS_VER4) { - cb.rcb_args[2] = nmp->nm_stategenid; + cb.rcb_args.stategenid = nmp->nm_stategenid; } #endif // XXX iomode should really match the original request @@ -3211,6 +3296,10 @@ out: if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); } + + if (cb.rcb_func && np->n_needcommitcnt >= NFS_A_LOT_OF_NEEDCOMMITS) { + nfs_flushcommits(np, 1); + } } /* @@ -3222,11 +3311,11 @@ nfs_flushcommits(nfsnode_t np, int nowait) struct nfsmount *nmp; struct nfsbuf *bp, *prevlbp, *lbp; struct nfsbuflists blist, commitlist; - int error = 0, retv, wcred_set, flags, dirty; + int error = 0, retv, wcred_set, flags; u_quad_t off, endoff, toff; - uint64_t wverf; - u_int32_t count; + uint64_t wverf, count; kauth_cred_t wcred = NULL; + nfsbufpgs dirty; FSDBG_TOP(557, np, 0, 0, 0); @@ -3362,7 +3451,8 @@ nfs_flushcommits(nfsnode_t np, int nowait) if (retv) { /* Unable to create the UPL, the VM object probably no longer exists. */ printf("nfs_flushcommits: upl create failed %d\n", retv); - bp->nb_valid = bp->nb_dirty = 0; + NBPGS_ERASE(&bp->nb_valid); + NBPGS_ERASE(&bp->nb_dirty); } } nfs_buf_upl_check(bp); @@ -3434,7 +3524,8 @@ nfs_flushcommits(nfsnode_t np, int nowait) CLR(bp->nb_flags, (NB_READ | NB_DONE | NB_ERROR | NB_DELWRI)); /* if block still has dirty pages, we don't want it to */ /* be released in nfs_buf_iodone(). So, don't set NB_ASYNC. */ - if (!(dirty = bp->nb_dirty)) { + NBPGS_COPY(&dirty, &bp->nb_dirty); + if (!nfs_buf_pgs_is_set(&dirty)) { SET(bp->nb_flags, NB_ASYNC); } else { CLR(bp->nb_flags, NB_ASYNC); @@ -3448,7 +3539,7 @@ nfs_flushcommits(nfsnode_t np, int nowait) bp->nb_dirtyoff = bp->nb_dirtyend = 0; nfs_buf_iodone(bp); - if (dirty) { + if (nfs_buf_pgs_is_set(&dirty)) { /* throw it back in as a delayed write buffer */ CLR(bp->nb_flags, NB_DONE); nfs_buf_write_delayed(bp); @@ -3742,9 +3833,10 @@ nfs_vinvalbuf_internal( (NBOFF(bp) < (off_t)np->n_size)) { /* extra paranoia: make sure we're not */ /* somehow leaving any dirty data around */ + nfsbufpgs pagemask; int mustwrite = 0; - int end = (NBOFF(bp) + bp->nb_bufsize > (off_t)np->n_size) ? - ((off_t)np->n_size - NBOFF(bp)) : bp->nb_bufsize; + off_t end = (NBOFF(bp) + bp->nb_bufsize > (off_t)np->n_size) ? + (np->n_size - NBOFF(bp)) : bp->nb_bufsize; if (!ISSET(bp->nb_flags, NB_PAGELIST)) { error = nfs_buf_upl_setup(bp); if (error == EINVAL) { @@ -3754,7 +3846,8 @@ nfs_vinvalbuf_internal( } else if (error) { printf("nfs_vinvalbuf: upl setup failed %d\n", error); } - bp->nb_valid = bp->nb_dirty = 0; + NBPGS_ERASE(&bp->nb_valid); + NBPGS_ERASE(&bp->nb_dirty); } nfs_buf_upl_check(bp); /* check for any dirty data before the EOF */ @@ -3770,8 +3863,9 @@ nfs_vinvalbuf_internal( mustwrite++; } } - bp->nb_dirty &= (1 << (round_page_32(end) / PAGE_SIZE)) - 1; - if (bp->nb_dirty) { + nfs_buf_pgs_get_page_mask(&pagemask, round_page_64(end) / PAGE_SIZE); + nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &bp->nb_dirty); + if (nfs_buf_pgs_is_set(&bp->nb_dirty)) { mustwrite++; } /* also make sure we'll have a credential to do the write */ @@ -4053,16 +4147,16 @@ again: lck_mtx_lock(&req->r_mtx); if (req->r_flags & R_RESENDQ) { lck_mtx_lock(&nmp->nm_lock); - if (req->r_rchain.tqe_next != NFSREQNOLIST) { + if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) { NFS_BIO_DBG("Proccessing async request on resendq. Removing"); TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); + req->r_flags &= ~R_RESENDQ; req->r_rchain.tqe_next = NFSREQNOLIST; assert(req->r_refs > 1); /* Remove resendq reference */ req->r_refs--; } lck_mtx_unlock(&nmp->nm_lock); - req->r_flags &= ~R_RESENDQ; } lck_mtx_unlock(&req->r_mtx); @@ -4098,6 +4192,7 @@ again: /* * queue up async I/O request for resend + * Must be called with req->r_mtx locked. */ void nfs_asyncio_resend(struct nfsreq *req) diff --git a/bsd/nfs/nfs_boot.c b/bsd/nfs/nfs_boot.c index 65728d3b1..b3980c9c6 100644 --- a/bsd/nfs/nfs_boot.c +++ b/bsd/nfs/nfs_boot.c @@ -210,18 +210,9 @@ nfs_boot_init(struct nfs_diskless *nd) } /* get the root path information */ - MALLOC_ZONE(nd->nd_root.ndm_path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (!nd->nd_root.ndm_path) { - printf("nfs_boot: can't allocate root path buffer\n"); - error = ENOMEM; - goto failed; - } - MALLOC_ZONE(nd->nd_root.ndm_mntfrom, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (!nd->nd_root.ndm_mntfrom) { - printf("nfs_boot: can't allocate root mntfrom buffer\n"); - error = ENOMEM; - goto failed; - } + nd->nd_root.ndm_path = zalloc(ZV_NAMEI); + nd->nd_root.ndm_mntfrom = zalloc(ZV_NAMEI); + sin_p = &nd->nd_root.ndm_saddr; bzero((caddr_t)sin_p, sizeof(*sin_p)); sin_p->sin_len = sizeof(*sin_p); @@ -271,18 +262,8 @@ nfs_boot_init(struct nfs_diskless *nd) #if !defined(NO_MOUNT_PRIVATE) if (do_bpgetfile) { /* get private path */ - MALLOC_ZONE(nd->nd_private.ndm_path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (!nd->nd_private.ndm_path) { - printf("nfs_boot: can't allocate private path buffer\n"); - error = ENOMEM; - goto failed; - } - MALLOC_ZONE(nd->nd_private.ndm_mntfrom, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (!nd->nd_private.ndm_mntfrom) { - printf("nfs_boot: can't allocate private host buffer\n"); - error = ENOMEM; - goto failed; - } + nd->nd_private.ndm_path = zalloc(ZV_NAMEI); + nd->nd_private.ndm_mntfrom = zalloc(ZV_NAMEI); error = bp_getfile(&bp_sin, "private", &nd->nd_private.ndm_saddr, nd->nd_private.ndm_host, @@ -290,12 +271,7 @@ nfs_boot_init(struct nfs_diskless *nd) if (!error) { char * check_path = NULL; - MALLOC_ZONE(check_path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (!check_path) { - printf("nfs_boot: can't allocate check_path buffer\n"); - error = ENOMEM; - goto failed; - } + check_path = zalloc(ZV_NAMEI); snprintf(check_path, MAXPATHLEN, "%s/private", nd->nd_root.ndm_path); if ((nd->nd_root.ndm_saddr.sin_addr.s_addr == nd->nd_private.ndm_saddr.sin_addr.s_addr) @@ -303,7 +279,7 @@ nfs_boot_init(struct nfs_diskless *nd) /* private path is prefix of root path, don't mount */ nd->nd_private.ndm_saddr.sin_addr.s_addr = 0; } - FREE_ZONE(check_path, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, check_path); } else { /* private key not defined, don't mount */ nd->nd_private.ndm_saddr.sin_addr.s_addr = 0; @@ -416,7 +392,7 @@ mbuf_get_with_len(size_t msg_len, mbuf_t *m) * String representation for RPC. */ struct rpc_string { - u_int32_t len; /* length without null or padding */ + size_t len; /* length without null or padding */ u_char data[4]; /* data (longer, of course) */ /* data is padded to a long-word boundary */ }; @@ -534,7 +510,7 @@ bp_whoami(struct sockaddr_in *bpsin, goto bad; } str = (struct rpc_string *)p; - cn_len = ntohl(str->len); + cn_len = ntohll(str->len); if ((msg_len - 4) < cn_len) { goto bad; } @@ -553,7 +529,7 @@ bp_whoami(struct sockaddr_in *bpsin, goto bad; } str = (struct rpc_string *)p; - dn_len = ntohl(str->len); + dn_len = ntohll(str->len); if ((msg_len - 4) < dn_len) { goto bad; } @@ -576,10 +552,10 @@ bp_whoami(struct sockaddr_in *bpsin, goto bad; } p = (u_char*)gw_ip; - *p++ = ntohl(bia->addr[0]); - *p++ = ntohl(bia->addr[1]); - *p++ = ntohl(bia->addr[2]); - *p++ = ntohl(bia->addr[3]); + *p++ = ntohl(bia->addr[0]) & 0xff; + *p++ = ntohl(bia->addr[1]) & 0xff; + *p++ = ntohl(bia->addr[2]) & 0xff; + *p++ = ntohl(bia->addr[3]) & 0xff; goto out; bad: @@ -636,14 +612,14 @@ bp_getfile(struct sockaddr_in *bpsin, bzero(p, msg_len); /* client name (hostname) */ str = (struct rpc_string *)p; - str->len = htonl(cn_len); + str->len = htonll(cn_len); lck_mtx_lock(&hostname_lock); bcopy(hostname, str->data, cn_len); lck_mtx_unlock(&hostname_lock); p += RPC_STR_SIZE(cn_len); /* key name (root or swap) */ str = (struct rpc_string *)p; - str->len = htonl(key_len); + str->len = htonll(key_len); bcopy(key, str->data, key_len); /* RPC: bootparam/getfile */ @@ -664,7 +640,7 @@ bp_getfile(struct sockaddr_in *bpsin, goto bad; } str = (struct rpc_string *)p; - sn_len = ntohl(str->len); + sn_len = ntohll(str->len); if ((msg_len - 4) < sn_len) { goto bad; } @@ -689,10 +665,10 @@ bp_getfile(struct sockaddr_in *bpsin, sin->sin_len = sizeof(*sin); sin->sin_family = AF_INET; q = (u_char*) &sin->sin_addr; - *q++ = ntohl(bia->addr[0]); - *q++ = ntohl(bia->addr[1]); - *q++ = ntohl(bia->addr[2]); - *q++ = ntohl(bia->addr[3]); + *q++ = ntohl(bia->addr[0]) & 0xff; + *q++ = ntohl(bia->addr[1]) & 0xff; + *q++ = ntohl(bia->addr[2]) & 0xff; + *q++ = ntohl(bia->addr[3]) & 0xff; p += sizeof(*bia); msg_len -= sizeof(*bia); @@ -701,7 +677,7 @@ bp_getfile(struct sockaddr_in *bpsin, goto bad; } str = (struct rpc_string *)p; - path_len = ntohl(str->len); + path_len = ntohll(str->len); if ((msg_len - 4) < path_len) { goto bad; } @@ -742,8 +718,8 @@ md_mount(struct sockaddr_in *mdsin, /* mountd server address */ u_char data[NFSX_V3FHMAX + sizeof(u_int32_t)]; } *rdata; mbuf_t m; - size_t mlen; - int error, slen; + size_t mlen, slen; + int error; int mntversion = v3 ? RPCMNT_VER3 : RPCMNT_VER1; int proto = (sotype == SOCK_STREAM) ? IPPROTO_TCP : IPPROTO_UDP; in_port_t mntport, nfsport; @@ -772,7 +748,7 @@ md_mount(struct sockaddr_in *mdsin, /* mountd server address */ return error; } str = mbuf_data(m); - str->len = htonl(slen); + str->len = htonll(slen); bcopy(path, str->data, slen); /* Do RPC to mountd. */ diff --git a/bsd/nfs/nfs_gss.c b/bsd/nfs/nfs_gss.c index 71188d12a..42f8ea0ac 100644 --- a/bsd/nfs/nfs_gss.c +++ b/bsd/nfs/nfs_gss.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2015 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -145,7 +145,7 @@ static int nfs_gss_clnt_ctx_find(struct nfsreq *); static int nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *); static int nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *); static int nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *); -static uint8_t *nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, uint32_t *); +static uint8_t *nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, size_t *); static int nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t); void nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *); static void nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *); @@ -167,7 +167,7 @@ static void nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t); static void host_release_special_port(mach_port_t); static mach_port_t host_copy_special_port(mach_port_t); -static void nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *); +static void nfs_gss_mach_alloc_buffer(u_char *, size_t, vm_map_copy_t *); static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *); static int nfs_gss_mchain_length(mbuf_t); @@ -453,8 +453,7 @@ rpc_gss_priv_data_restore(gss_ctx_id_t ctx, mbuf_t *mb_head, size_t len) { uint32_t major, error; mbuf_t mb = *mb_head, next; - uint32_t plen; - size_t length; + size_t plen, length; gss_qop_t qop = GSS_C_QOP_REVERSE; /* Chop of the opaque length */ @@ -553,7 +552,7 @@ static char * nfs_gss_clnt_ctx_name(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp, char *buf, int len) { char *np; - int nlen; + size_t nlen; const char *server = ""; if (nmp && nmp->nm_mountp) { @@ -573,7 +572,7 @@ nfs_gss_clnt_ctx_name(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp, char *b nlen = np ? strlen(cp->gss_clnt_display) : 0; } if (nlen) { - snprintf(buf, len, "[%s] %.*s %d/%d %s", server, nlen, np, + snprintf(buf, len, "[%s] %.*s %d/%d %s", server, nlen > INT_MAX ? INT_MAX : (int)nlen, np, kauth_cred_getasid(cp->gss_clnt_cred), kauth_cred_getuid(cp->gss_clnt_cred), cp->gss_clnt_principal ? "" : "[from default cred] "); @@ -615,7 +614,7 @@ nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1, kauth_cred_t cred2) * so that defaults can be set by service identities. */ -static void +static int nfs_gss_clnt_mnt_ref(struct nfsmount *nmp) { int error; @@ -623,21 +622,23 @@ nfs_gss_clnt_mnt_ref(struct nfsmount *nmp) if (nmp == NULL || !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) { - return; + return EINVAL; } error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL); if (!error) { - vnode_ref(rvp); + error = vnode_ref(rvp); vnode_put(rvp); } + + return error; } /* - * Unbusy the mout. See above comment, + * Unbusy the mount. See above comment, */ -static void +static int nfs_gss_clnt_mnt_rele(struct nfsmount *nmp) { int error; @@ -645,7 +646,7 @@ nfs_gss_clnt_mnt_rele(struct nfsmount *nmp) if (nmp == NULL || !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) { - return; + return EINVAL; } error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL); @@ -653,26 +654,28 @@ nfs_gss_clnt_mnt_rele(struct nfsmount *nmp) vnode_rele(rvp); vnode_put(rvp); } + + return error; } int nfs_root_steals_ctx = 0; static int -nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t plen, uint32_t nt) +nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, size_t plen, uint32_t nt) { struct nfsmount *nmp = req->r_nmp; - struct nfs_gss_clnt_ctx *cp; - struct nfsreq treq; + struct nfs_gss_clnt_ctx *cp, *tcp; + struct nfsreq *treq; int error = 0; struct timeval now; char CTXBUF[NFS_CTXBUFSZ]; - bzero(&treq, sizeof(struct nfsreq)); - treq.r_nmp = nmp; + treq = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO); + treq->r_nmp = nmp; microuptime(&now); lck_mtx_lock(&nmp->nm_lock); - TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { + TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) { lck_mtx_lock(cp->gss_clnt_mtx); if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n", @@ -698,9 +701,9 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t lck_mtx_unlock(cp->gss_clnt_mtx); NFS_GSS_DBG("Marking %s for deletion because %s does not match\n", NFS_GSS_CTX(req, cp), principal); - NFS_GSS_DBG("len = (%d,%d), nt = (%d,%d)\n", cp->gss_clnt_prinlen, plen, + NFS_GSS_DBG("len = (%zu,%zu), nt = (%d,%d)\n", cp->gss_clnt_prinlen, plen, cp->gss_clnt_prinnt, nt); - treq.r_gss_ctx = cp; + treq->r_gss_ctx = cp; cp = NULL; break; } @@ -716,6 +719,7 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt, cp->gss_clnt_nctime, now.tv_sec); lck_mtx_unlock(cp->gss_clnt_mtx); lck_mtx_unlock(&nmp->nm_lock); + NFS_ZFREE(nfs_req_zone, treq); return NFSERR_EAUTH; } if (cp->gss_clnt_refcnt) { @@ -732,6 +736,7 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t lck_mtx_unlock(cp->gss_clnt_mtx); if (error) { lck_mtx_unlock(&nmp->nm_lock); + NFS_ZFREE(nfs_req_zone, treq); return error; } cp = ncp; @@ -750,6 +755,7 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t req->r_gss_ctx = cp; lck_mtx_unlock(cp->gss_clnt_mtx); lck_mtx_unlock(&nmp->nm_lock); + NFS_ZFREE(nfs_req_zone, treq); return 0; } lck_mtx_unlock(cp->gss_clnt_mtx); @@ -768,6 +774,7 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t nfs_gss_clnt_ctx_ref(req, cp); lck_mtx_unlock(&nmp->nm_lock); NFS_GSS_DBG("Root stole context %s\n", NFS_GSS_CTX(req, NULL)); + NFS_ZFREE(nfs_req_zone, treq); return 0; } } @@ -786,6 +793,7 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK | M_ZERO); if (cp == NULL) { lck_mtx_unlock(&nmp->nm_lock); + NFS_ZFREE(nfs_req_zone, treq); return ENOMEM; } cp->gss_clnt_cred = req->r_cred; @@ -798,9 +806,12 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t cp->gss_clnt_prinlen = plen; cp->gss_clnt_prinnt = nt; cp->gss_clnt_flags |= GSS_CTX_STICKY; - nfs_gss_clnt_mnt_ref(nmp); + if (!nfs_gss_clnt_mnt_ref(nmp)) { + cp->gss_clnt_flags |= GSS_CTX_USECOUNT; + } } } else { + uint32_t oldflags = cp->gss_clnt_flags; nfs_gss_clnt_ctx_clean(cp); if (principal) { /* @@ -816,6 +827,14 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t * match and we will fall through here. */ cp->gss_clnt_flags |= GSS_CTX_STICKY; + + /* + * We are preserving old flags if it set, and we take a ref if not set. + * Also, because of the short circuit we will not take extra refs here. + */ + if ((oldflags & GSS_CTX_USECOUNT) || !nfs_gss_clnt_mnt_ref(nmp)) { + cp->gss_clnt_flags |= GSS_CTX_USECOUNT; + } } } @@ -831,8 +850,8 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t } /* Remove any old matching contex that had a different principal */ - nfs_gss_clnt_ctx_unref(&treq); - + nfs_gss_clnt_ctx_unref(treq); + NFS_ZFREE(nfs_req_zone, treq); return error; } @@ -1066,10 +1085,10 @@ nfs_gss_clnt_verf_get( struct nfs_gss_clnt_ctx *cp = req->r_gss_ctx; struct nfsm_chain nmc_tmp; struct gss_seq *gsp; - uint32_t reslen, offset; + uint32_t reslen; int error = 0; mbuf_t results_mbuf, prev_mbuf, pad_mbuf; - size_t ressize; + size_t ressize, offset; reslen = 0; *accepted_statusp = 0; @@ -1725,7 +1744,7 @@ nfsmout: */ static uint8_t * -nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, uint32_t *len) +nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, size_t *len) { char *svcname, *d, *server; int lindx, sindx; @@ -1899,7 +1918,7 @@ nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32 mach_msg_type_number_t otokenlen; int error = 0; uint8_t *principal = NULL; - uint32_t plen = 0; + size_t plen = 0; int32_t nt = GSSD_STRING_NAME; vm_map_copy_t pname = NULL; vm_map_copy_t svcname = NULL; @@ -2057,7 +2076,7 @@ retry: } if (cp->gss_clnt_display == NULL && *display_name != '\0') { - int dlen = strnlen(display_name, MAX_DISPLAY_STR) + 1; /* Add extra byte to include '\0' */ + size_t dlen = strnlen(display_name, MAX_DISPLAY_STR) + 1; /* Add extra byte to include '\0' */ if (dlen < MAX_DISPLAY_STR) { MALLOC(cp->gss_clnt_display, char *, dlen, M_TEMP, M_WAITOK); @@ -2272,8 +2291,8 @@ nfs_gss_clnt_ctx_unref(struct nfsreq *req) } if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { destroy = 1; - if (cp->gss_clnt_flags & GSS_CTX_STICKY) { - nfs_gss_clnt_mnt_rele(nmp); + if ((cp->gss_clnt_flags & GSS_CTX_USECOUNT) && !nfs_gss_clnt_mnt_rele(nmp)) { + cp->gss_clnt_flags &= ~GSS_CTX_USECOUNT; } if (cp->gss_clnt_nctime) { on_neg_cache = 1; @@ -2592,14 +2611,14 @@ nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp) struct nfs_gss_clnt_ctx *cp; struct nfsm_chain nmreq, nmrep; int error, status; - struct nfsreq req; - req.r_nmp = nmp; + struct nfsreq *req; if (!nmp) { return; } - + req = zalloc(nfs_req_zone); + req->r_nmp = nmp; lck_mtx_lock(&nmp->nm_lock); while ((cp = TAILQ_FIRST(&nmp->nm_gsscl))) { TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); @@ -2611,7 +2630,7 @@ nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp) } cp->gss_clnt_refcnt++; lck_mtx_unlock(cp->gss_clnt_mtx); - req.r_gss_ctx = cp; + req->r_gss_ctx = cp; lck_mtx_unlock(&nmp->nm_lock); /* @@ -2643,11 +2662,12 @@ nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp) lck_mtx_lock(cp->gss_clnt_mtx); cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY); lck_mtx_unlock(cp->gss_clnt_mtx); - nfs_gss_clnt_ctx_unref(&req); + nfs_gss_clnt_ctx_unref(req); lck_mtx_lock(&nmp->nm_lock); } lck_mtx_unlock(&nmp->nm_lock); assert(TAILQ_EMPTY(&nmp->nm_gsscl)); + NFS_ZFREE(nfs_req_zone, req); } @@ -2657,15 +2677,16 @@ nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp) int nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred) { - struct nfs_gss_clnt_ctx *cp; - struct nfsreq req; + struct nfs_gss_clnt_ctx *cp, *tcp; + struct nfsreq *req; - req.r_nmp = nmp; + req = zalloc(nfs_req_zone); + req->r_nmp = nmp; NFS_GSS_DBG("Enter\n"); NFS_GSS_CLNT_CTX_DUMP(nmp); lck_mtx_lock(&nmp->nm_lock); - TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { + TAILQ_FOREACH_SAFE(cp, &nmp->nm_gsscl, gss_clnt_entries, tcp) { lck_mtx_lock(cp->gss_clnt_mtx); if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) { if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { @@ -2679,7 +2700,7 @@ nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred) cp->gss_clnt_refcnt++; cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY); lck_mtx_unlock(cp->gss_clnt_mtx); - req.r_gss_ctx = cp; + req->r_gss_ctx = cp; lck_mtx_unlock(&nmp->nm_lock); /* * Drop the reference to remove it if its @@ -2689,7 +2710,8 @@ nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred) kauth_cred_getasid(cp->gss_clnt_cred), kauth_cred_getuid(cp->gss_clnt_cred), cp->gss_clnt_refcnt); - nfs_gss_clnt_ctx_unref(&req); + nfs_gss_clnt_ctx_unref(req); + NFS_ZFREE(nfs_req_zone, req); return 0; } lck_mtx_unlock(cp->gss_clnt_mtx); @@ -2697,6 +2719,7 @@ nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred) lck_mtx_unlock(&nmp->nm_lock); + NFS_ZFREE(nfs_req_zone, req); NFS_GSS_DBG("Returning ENOENT\n"); return ENOENT; } @@ -2706,21 +2729,20 @@ nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred) */ int nfs_gss_clnt_ctx_set_principal(struct nfsmount *nmp, vfs_context_t ctx, - uint8_t *principal, uint32_t princlen, uint32_t nametype) + uint8_t *principal, size_t princlen, uint32_t nametype) { - struct nfsreq req; + struct nfsreq *req; int error; NFS_GSS_DBG("Enter:\n"); - bzero(&req, sizeof(struct nfsreq)); - req.r_nmp = nmp; - req.r_gss_ctx = NULL; - req.r_auth = nmp->nm_auth; - req.r_thread = vfs_context_thread(ctx); - req.r_cred = vfs_context_ucred(ctx); + req = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO); + req->r_nmp = nmp; + req->r_auth = nmp->nm_auth; + req->r_thread = vfs_context_thread(ctx); + req->r_cred = vfs_context_ucred(ctx); - error = nfs_gss_clnt_ctx_find_principal(&req, principal, princlen, nametype); + error = nfs_gss_clnt_ctx_find_principal(req, principal, princlen, nametype); NFS_GSS_DBG("nfs_gss_clnt_ctx_find_principal returned %d\n", error); /* * We don't care about auth errors. Those would indicate that the context is in the @@ -2732,8 +2754,8 @@ nfs_gss_clnt_ctx_set_principal(struct nfsmount *nmp, vfs_context_t ctx, } /* We're done with this request */ - nfs_gss_clnt_ctx_unref(&req); - + nfs_gss_clnt_ctx_unref(req); + NFS_ZFREE(nfs_req_zone, req); return error; } @@ -2744,7 +2766,7 @@ int nfs_gss_clnt_ctx_get_principal(struct nfsmount *nmp, vfs_context_t ctx, struct user_nfs_gss_principal *p) { - struct nfsreq req; + struct nfsreq *req; int error = 0; struct nfs_gss_clnt_ctx *cp; kauth_cred_t cred = vfs_context_ucred(ctx); @@ -2757,13 +2779,14 @@ nfs_gss_clnt_ctx_get_principal(struct nfsmount *nmp, vfs_context_t ctx, p->princlen = 0; p->flags = 0; - req.r_nmp = nmp; + req = zalloc_flags(nfs_req_zone, Z_WAITOK); + req->r_nmp = nmp; lck_mtx_lock(&nmp->nm_lock); TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { lck_mtx_lock(cp->gss_clnt_mtx); if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n", - NFS_GSS_CTX(&req, cp), + NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt); lck_mtx_unlock(cp->gss_clnt_mtx); continue; @@ -2782,6 +2805,7 @@ out: p->flags |= NFS_IOC_NO_CRED_FLAG; /* No credentials, valid or invalid on this mount */ NFS_GSS_DBG("No context found for session %d by uid %d\n", kauth_cred_getasid(cred), kauth_cred_getuid(cred)); + NFS_ZFREE(nfs_req_zone, req); return 0; } @@ -2816,9 +2840,10 @@ out: lck_mtx_unlock(&nmp->nm_lock); - req.r_gss_ctx = cp; - NFS_GSS_DBG("Found context %s\n", NFS_GSS_CTX(&req, NULL)); - nfs_gss_clnt_ctx_unref(&req); + req->r_gss_ctx = cp; + NFS_GSS_DBG("Found context %s\n", NFS_GSS_CTX(req, NULL)); + nfs_gss_clnt_ctx_unref(req); + NFS_ZFREE(nfs_req_zone, req); return error; } #endif /* CONFIG_NFS_CLIENT */ @@ -3004,10 +3029,10 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) uint32_t handle, handle_len; uint32_t major; struct nfs_gss_svc_ctx *cp = NULL; - uint32_t flavor = 0, header_len; + uint32_t flavor = 0; int error = 0; - uint32_t arglen, start; - size_t argsize; + uint32_t arglen; + size_t argsize, start, header_len; gss_buffer_desc cksum; struct nfsm_chain nmc_tmp; mbuf_t reply_mbuf, prev_mbuf, pad_mbuf; @@ -3153,7 +3178,7 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) temp_pcred.cr_uid = cp->gss_svc_uid; bcopy(cp->gss_svc_gids, temp_pcred.cr_groups, sizeof(gid_t) * cp->gss_svc_ngroups); - temp_pcred.cr_ngroups = cp->gss_svc_ngroups; + temp_pcred.cr_ngroups = (short)cp->gss_svc_ngroups; nd->nd_cr = posix_cred_create(&temp_pcred); if (nd->nd_cr == NULL) { @@ -3484,7 +3509,7 @@ nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t * switch (cp->gss_svc_proc) { case RPCSEC_GSS_INIT: nfs_gss_svc_ctx_insert(cp); - /* FALLTHRU */ + OS_FALLTHROUGH; case RPCSEC_GSS_CONTINUE_INIT: /* Get the token from the request */ @@ -3888,7 +3913,7 @@ host_copy_special_port(mach_port_t mp) * complete. */ static void -nfs_gss_mach_alloc_buffer(u_char *buf, uint32_t buflen, vm_map_copy_t *addr) +nfs_gss_mach_alloc_buffer(u_char *buf, size_t buflen, vm_map_copy_t *addr) { kern_return_t kr; vm_offset_t kmem_buf; diff --git a/bsd/nfs/nfs_gss.h b/bsd/nfs/nfs_gss.h index 302489486..01aaabee7 100644 --- a/bsd/nfs/nfs_gss.h +++ b/bsd/nfs/nfs_gss.h @@ -92,7 +92,7 @@ struct nfs_gss_clnt_ctx { int32_t gss_clnt_refcnt; // Reference count kauth_cred_t gss_clnt_cred; // Owner of this context uint8_t *gss_clnt_principal; // Principal to use for this credential - uint32_t gss_clnt_prinlen; // Length of principal + size_t gss_clnt_prinlen; // Length of principal gssd_nametype gss_clnt_prinnt; // Name type of principal char *gss_clnt_display; // display name of principal uint32_t gss_clnt_proc; // Current GSS proc for cred @@ -107,7 +107,7 @@ struct nfs_gss_clnt_ctx { uint32_t gss_clnt_verflen; // RPC verifier length from server uint8_t *gss_clnt_verf; // RPC verifier from server uint8_t *gss_clnt_svcname; // Service name e.g. "nfs/big.apple.com" - uint32_t gss_clnt_svcnamlen; // Service name length + size_t gss_clnt_svcnamlen; // Service name length gssd_nametype gss_clnt_svcnt; // Service name type gssd_cred gss_clnt_cred_handle; // Opaque cred handle from gssd gssd_ctx gss_clnt_context; // Opaque context handle from gssd @@ -129,6 +129,7 @@ struct nfs_gss_clnt_ctx { #define GSS_NEEDSEQ 0x00000008 // Need a sequence number #define GSS_NEEDCTX 0x00000010 // Need the context #define GSS_CTX_DESTROY 0x00000020 // Context is being destroyed, don't cache +#define GSS_CTX_USECOUNT 0x00000040 // Mount vnode's user count has been updated /* * The server's RPCSEC_GSS context information @@ -194,7 +195,7 @@ void nfs_gss_clnt_ctx_ref(struct nfsreq *, struct nfs_gss_clnt_ctx *); void nfs_gss_clnt_ctx_unref(struct nfsreq *); void nfs_gss_clnt_ctx_unmount(struct nfsmount *); int nfs_gss_clnt_ctx_remove(struct nfsmount *, kauth_cred_t); -int nfs_gss_clnt_ctx_set_principal(struct nfsmount *, vfs_context_t, uint8_t *, uint32_t, uint32_t); +int nfs_gss_clnt_ctx_set_principal(struct nfsmount *, vfs_context_t, uint8_t *, size_t, uint32_t); int nfs_gss_clnt_ctx_get_principal(struct nfsmount *, vfs_context_t, struct user_nfs_gss_principal *); int nfs_gss_svc_cred_get(struct nfsrv_descript *, struct nfsm_chain *); int nfs_gss_svc_verf_put(struct nfsrv_descript *, struct nfsm_chain *); diff --git a/bsd/nfs/nfs_ioctl.h b/bsd/nfs/nfs_ioctl.h index 5f9b1fc2d..c4eff26ea 100644 --- a/bsd/nfs/nfs_ioctl.h +++ b/bsd/nfs/nfs_ioctl.h @@ -43,7 +43,7 @@ * fsctl (vnop_ioctl) to set the callers credentials associated with the vnode's mount */ struct nfs_gss_principal { - uint32_t princlen; /* length of data */ + size_t princlen; /* length of data */ uint32_t nametype; /* nametype of data */ #ifdef KERNEL user32_addr_t principal; /* principal data in userspace */ @@ -56,7 +56,7 @@ struct nfs_gss_principal { #ifdef KERNEL /* LP64 version of nfs_gss_principal */ struct user_nfs_gss_principal { - uint32_t princlen; /* length of data */ + size_t princlen; /* length of data */ uint32_t nametype; /* nametype of data */ user64_addr_t principal; /* principal data in userspace */ uint32_t flags; /* Returned flags */ diff --git a/bsd/nfs/nfs_lock.c b/bsd/nfs/nfs_lock.c index 352e1b61e..4b0d19631 100644 --- a/bsd/nfs/nfs_lock.c +++ b/bsd/nfs/nfs_lock.c @@ -436,7 +436,8 @@ nfs3_lockd_request( int interruptable, slpflag; struct nfsmount *nmp; struct timeval now; - int timeo, starttime, endtime, lastmsg, wentdown = 0; + int timeo, wentdown = 0; + long starttime, endtime, lastmsg; struct timespec ts; struct sockaddr *saddr; diff --git a/bsd/nfs/nfs_node.c b/bsd/nfs/nfs_node.c index 60bd5609f..c47fa9263 100644 --- a/bsd/nfs/nfs_node.c +++ b/bsd/nfs/nfs_node.c @@ -98,6 +98,9 @@ static lck_grp_t *nfs_node_lck_grp; static lck_grp_t *nfs_data_lck_grp; lck_mtx_t *nfs_node_hash_mutex; +ZONE_DECLARE(nfsnode_zone, "NFS node", + sizeof(struct nfsnode), ZC_ZFREE_CLEARMEM); + #define NFS_NODE_DBG(...) NFS_DBG(NFS_FAC_NODE, 7, ## __VA_ARGS__) /* @@ -194,7 +197,7 @@ nfs_nget( nfsnode_t dnp, struct componentname *cnp, u_char *fhp, - int fhsize, + uint32_t fhsize, struct nfs_vattr *nvap, u_int64_t *xidp, uint32_t auth, @@ -207,7 +210,8 @@ nfs_nget( int error, nfsvers; mount_t mp2; struct vnode_fsparam vfsp; - uint32_t vid; + uint32_t vid, cn_namelen; + u_long nfshash; FSDBG_TOP(263, mp, dnp, flags, npp); @@ -219,10 +223,11 @@ nfs_nget( return error; } nfsvers = VFSTONFS(mp)->nm_vers; - - nhpp = NFSNOHASH(nfs_hash(fhp, fhsize)); + cn_namelen = cnp ? cnp->cn_namelen : 0; + nfshash = nfs_hash(fhp, fhsize); loop: lck_mtx_lock(nfs_node_hash_mutex); + nhpp = NFSNOHASH(nfshash); for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) { mp2 = (np->n_hflag & NHINIT) ? np->n_mount : NFSTOMP(np); if (mp != mp2 || np->n_fhsize != fhsize || @@ -230,15 +235,15 @@ loop: continue; } if (nvap && (nvap->nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) && - cnp && (cnp->cn_namelen > (fhsize - (int)sizeof(dnp)))) { + cnp && (cn_namelen > (fhsize - sizeof(dnp)))) { /* The name was too long to fit in the file handle. Check it against the node's name. */ int namecmp = 0; const char *vname = vnode_getname(NFSTOV(np)); if (vname) { - if (cnp->cn_namelen != (int)strlen(vname)) { + if (cn_namelen != strlen(vname)) { namecmp = 1; } else { - namecmp = strncmp(vname, cnp->cn_nameptr, cnp->cn_namelen); + namecmp = strncmp(vname, cnp->cn_nameptr, cn_namelen); } vnode_putname(vname); } @@ -247,8 +252,8 @@ loop: } } FSDBG(263, dnp, np, np->n_flag, 0xcace0000); - /* if the node is locked, sleep on it */ - if ((np->n_hflag & NHLOCKED) && !(flags & NG_NOCREATE)) { + /* if the node is being initialized or locked, sleep on it */ + if ((np->n_hflag & NHINIT) || ((np->n_hflag & NHLOCKED) && !(flags & NG_NOCREATE))) { np->n_hflag |= NHLOCKWANT; FSDBG(263, dnp, np, np->n_flag, 0xcace2222); msleep(np, nfs_node_hash_mutex, PDROP | PINOD, "nfs_nget", NULL); @@ -356,13 +361,13 @@ loop: cmp = nfs_case_insensitive(mp) ? strncasecmp : strncmp; - if (vp->v_name && (size_t)cnp->cn_namelen != strnlen(vp->v_name, MAXPATHLEN)) { + if (vp->v_name && cn_namelen != strnlen(vp->v_name, MAXPATHLEN)) { update_flags |= VNODE_UPDATE_NAME; } - if (vp->v_name && cnp->cn_namelen && (*cmp)(cnp->cn_nameptr, vp->v_name, cnp->cn_namelen)) { + if (vp->v_name && cn_namelen && (*cmp)(cnp->cn_nameptr, vp->v_name, cn_namelen)) { update_flags |= VNODE_UPDATE_NAME; } - if ((vp->v_name == NULL && cnp->cn_namelen != 0) || (vp->v_name != NULL && cnp->cn_namelen == 0)) { + if ((vp->v_name == NULL && cn_namelen != 0) || (vp->v_name != NULL && cn_namelen == 0)) { update_flags |= VNODE_UPDATE_NAME; } if (vnode_parent(vp) != NFSTOV(dnp)) { @@ -370,8 +375,8 @@ loop: } if (update_flags) { NFS_NODE_DBG("vnode_update_identity old name %s new name %.*s update flags = %x\n", - vp->v_name, cnp->cn_namelen, cnp->cn_nameptr ? cnp->cn_nameptr : "", update_flags); - vnode_update_identity(vp, NFSTOV(dnp), cnp->cn_nameptr, cnp->cn_namelen, 0, update_flags); + vp->v_name, cn_namelen, cnp->cn_nameptr ? cnp->cn_nameptr : "", update_flags); + vnode_update_identity(vp, NFSTOV(dnp), cnp->cn_nameptr, cn_namelen, 0, update_flags); } } @@ -395,14 +400,7 @@ loop: * before calling getnewvnode(). Anyone finding it in the * hash before initialization is complete will wait for it. */ - MALLOC_ZONE(np, nfsnode_t, sizeof *np, M_NFSNODE, M_WAITOK); - if (!np) { - lck_mtx_unlock(nfs_node_hash_mutex); - *npp = 0; - FSDBG_BOT(263, dnp, *npp, 0x80000001, ENOMEM); - return ENOMEM; - } - bzero(np, sizeof *np); + np = zalloc_flags(nfsnode_zone, Z_WAITOK | Z_ZERO); np->n_hflag |= (NHINIT | NHLOCKED); np->n_mount = mp; np->n_auth = auth; @@ -414,7 +412,7 @@ loop: np->n_monlink.le_next = NFSNOLIST; /* ugh... need to keep track of ".zfs" directories to workaround server bugs */ - if ((nvap->nva_type == VDIR) && cnp && (cnp->cn_namelen == 4) && + if ((nvap->nva_type == VDIR) && cnp && (cn_namelen == 4) && (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == 'z') && (cnp->cn_nameptr[2] == 'f') && (cnp->cn_nameptr[3] == 's')) { np->n_flag |= NISDOTZFS; @@ -423,7 +421,7 @@ loop: np->n_flag |= NISDOTZFSCHILD; } - if (dnp && cnp && ((cnp->cn_namelen != 2) || + if (dnp && cnp && ((cn_namelen != 2) || (cnp->cn_nameptr[0] != '.') || (cnp->cn_nameptr[1] != '.'))) { vnode_t dvp = NFSTOV(dnp); if (!vnode_get(dvp)) { @@ -436,11 +434,10 @@ loop: /* setup node's file handle */ if (fhsize > NFS_SMALLFH) { - MALLOC_ZONE(np->n_fhp, u_char *, - fhsize, M_NFSBIGFH, M_WAITOK); + MALLOC(np->n_fhp, u_char *, fhsize, M_NFSBIGFH, M_WAITOK); if (!np->n_fhp) { lck_mtx_unlock(nfs_node_hash_mutex); - FREE_ZONE(np, sizeof *np, M_NFSNODE); + NFS_ZFREE(nfsnode_zone, np); *npp = 0; FSDBG_BOT(263, dnp, *npp, 0x80000002, ENOMEM); return ENOMEM; @@ -491,9 +488,9 @@ loop: lck_rw_destroy(&np->n_datalock, nfs_data_lck_grp); lck_mtx_destroy(&np->n_openlock, nfs_open_grp); if (np->n_fhsize > NFS_SMALLFH) { - FREE_ZONE(np->n_fhp, np->n_fhsize, M_NFSBIGFH); + FREE(np->n_fhp, M_NFSBIGFH); } - FREE_ZONE(np, sizeof *np, M_NFSNODE); + NFS_ZFREE(nfsnode_zone, np); *npp = 0; FSDBG_BOT(263, dnp, *npp, 0x80000003, error); return error; @@ -585,9 +582,9 @@ loop: lck_rw_destroy(&np->n_datalock, nfs_data_lck_grp); lck_mtx_destroy(&np->n_openlock, nfs_open_grp); if (np->n_fhsize > NFS_SMALLFH) { - FREE_ZONE(np->n_fhp, np->n_fhsize, M_NFSBIGFH); + FREE(np->n_fhp, M_NFSBIGFH); } - FREE_ZONE(np, sizeof *np, M_NFSNODE); + NFS_ZFREE(nfsnode_zone, np); *npp = 0; FSDBG_BOT(263, dnp, *npp, 0x80000004, error); return error; @@ -624,7 +621,7 @@ nfs_vnop_inactive( vfs_context_t ctx = ap->a_context; nfsnode_t np; struct nfs_sillyrename *nsp; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; int unhash, attrerr, busyerror, error, inuse, busied, force; struct nfs_open_file *nofp; struct componentname cn; @@ -641,6 +638,7 @@ nfs_vnop_inactive( nmp = NFSTONMP(np); mp = vnode_mount(vp); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); restart: force = (!mp || vfs_isforce(mp)); @@ -661,7 +659,10 @@ restart: NP(np, "nfs_vnop_inactive: still open: %d", np->n_openrefcnt); #endif lck_mtx_unlock(&np->n_openlock); - return 0; + if (inuse) { + nfs_mount_state_in_use_end(nmp, 0); + } + goto out_free; } TAILQ_FOREACH(nofp, &np->n_opens, nof_link) { @@ -695,10 +696,10 @@ restart: if (busied) { nfs_open_file_clear_busy(nofp); } - if (inuse) { - nfs_mount_state_in_use_end(nmp, 0); - } if (!nfs4_reopen(nofp, NULL)) { + if (inuse) { + nfs_mount_state_in_use_end(nmp, 0); + } goto restart; } } @@ -732,17 +733,21 @@ restart: } else if (!force) { lck_mtx_unlock(&np->n_openlock); if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) { + int should_restart = 0; if (busied) { nfs_open_file_clear_busy(nofp); } - if (inuse) { - nfs_mount_state_in_use_end(nmp, 0); - } #if CONFIG_NFS4 if (!nfs4_reopen(nofp, NULL)) { - goto restart; + should_restart = 1; } #endif + if (should_restart) { + if (inuse) { + nfs_mount_state_in_use_end(nmp, 0); + } + goto restart; + } } error = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx); if (error) { @@ -809,7 +814,7 @@ restart: np->n_flag &= (NMODIFIED); nfs_node_unlock(np); FSDBG_BOT(264, vp, np, np->n_flag, 0); - return 0; + goto out_free; } nfs_node_unlock(np); @@ -819,11 +824,11 @@ restart: nfs_vinvalbuf2(vp, V_SAVE, vfs_context_thread(ctx), nsp->nsr_cred, 1); /* try to get the latest attributes */ - attrerr = nfs_getattr(np, &nvattr, ctx, NGA_UNCACHED); + attrerr = nfs_getattr(np, nvattr, ctx, NGA_UNCACHED); /* Check if we should remove it from the node hash. */ /* Leave it if inuse or it has multiple hard links. */ - if (vnode_isinuse(vp, 0) || (!attrerr && (nvattr.nva_nlink > 1))) { + if (vnode_isinuse(vp, 0) || (!attrerr && (nvattr->nva_nlink > 1))) { unhash = 0; } else { unhash = 1; @@ -895,9 +900,10 @@ restart: kauth_cred_unref(&nsp->nsr_cred); } vnode_rele(NFSTOV(nsp->nsr_dnp)); - FREE_ZONE(nsp, sizeof(*nsp), M_NFSREQ); - + FREE(nsp, M_TEMP); FSDBG_BOT(264, vp, np, np->n_flag, 0); +out_free: + FREE(nvattr, M_TEMP); return 0; } @@ -952,8 +958,6 @@ nfs_vnop_reclaim( if ((np->n_openflags & N_DELEG_MASK) && !force) { /* try to return the delegation */ np->n_openflags &= ~N_DELEG_MASK; - nfs4_delegreturn_rpc(nmp, np->n_fhp, np->n_fhsize, &np->n_dstateid, - R_RECOVER, vfs_context_thread(ctx), vfs_context_ucred(ctx)); } if (np->n_attrdirfh) { FREE(np->n_attrdirfh, M_TEMP); @@ -1069,7 +1073,7 @@ nfs_vnop_reclaim( kauth_cred_unref(&np->n_sillyrename->nsr_cred); } vnode_rele(NFSTOV(np->n_sillyrename->nsr_dnp)); - FREE_ZONE(np->n_sillyrename, sizeof(*np->n_sillyrename), M_NFSREQ); + FREE(np->n_sillyrename, M_TEMP); } vnode_removefsref(vp); @@ -1087,10 +1091,10 @@ nfs_vnop_reclaim( */ nfs_node_lock_force(np); if ((vnode_vtype(vp) == VDIR) && np->n_cookiecache) { - FREE_ZONE(np->n_cookiecache, sizeof(struct nfsdmap), M_NFSDIROFF); + NFS_ZFREE(ZV_NFSDIROFF, np->n_cookiecache); } if (np->n_fhsize > NFS_SMALLFH) { - FREE_ZONE(np->n_fhp, np->n_fhsize, M_NFSBIGFH); + FREE(np->n_fhp, M_NFSBIGFH); } if (np->n_vattr.nva_acl) { kauth_acl_free(np->n_vattr.nva_acl); @@ -1111,7 +1115,7 @@ nfs_vnop_reclaim( lck_mtx_destroy(&np->n_openlock, nfs_open_grp); FSDBG_BOT(265, vp, np, np->n_flag, 0xd1ed1e); - FREE_ZONE(np, sizeof(struct nfsnode), M_NFSNODE); + NFS_ZFREE(nfsnode_zone, np); return 0; } diff --git a/bsd/nfs/nfs_serv.c b/bsd/nfs/nfs_serv.c index 189978adf..31db576a6 100644 --- a/bsd/nfs/nfs_serv.c +++ b/bsd/nfs/nfs_serv.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -928,11 +928,11 @@ nfsrv_read( mbuf_t *mrepp) { int error, attrerr, mreadcnt; - uint32_t reqlen, maxlen, count, len, tlen, left; + uint32_t reqlen, maxlen, count, len, tlen; mbuf_t mread, m; vnode_t vp; struct nfs_filehandle nfh; - struct nfs_export *nx; + struct nfs_export *nx = NULL; struct nfs_export_options *nxo; uio_t auio = NULL; char *uio_bufp = NULL; @@ -1012,12 +1012,12 @@ nfsrv_read( if ((u_quad_t)off >= vap->va_data_size) { count = 0; } else if (((u_quad_t)off + reqlen) > vap->va_data_size) { - count = nfsm_rndup(vap->va_data_size - off); + count = (int)nfsm_rndup(vap->va_data_size - off); } else { count = reqlen; } - len = left = count; + len = count; if (count > 0) { /* get mbuf list to hold read data */ error = nfsm_mbuf_get_list(count, &mread, &mreadcnt); @@ -1142,7 +1142,8 @@ nfsrv_fmod_timer(__unused void *param0, __unused void *param1) struct nfsrv_fmod_hashhead *headp, firehead; struct nfsrv_fmod *fp, *nfp, *pfp; uint64_t timenow, next_deadline; - int interval = 0, i, fmod_fire; + time_t interval = 0; + int i, fmod_fire; LIST_INIT(&firehead); lck_mtx_lock(nfsrv_fmod_mutex); @@ -1222,7 +1223,7 @@ again: * entry is ready to send its fsevent. */ if (nfsrv_fmod_pending > 0) { - interval = (next_deadline - timenow) / (1000 * 1000); + interval = ((time_t)(next_deadline - timenow)) / (1000 * 1000); if (interval < nfsrv_fmod_min_interval) { interval = nfsrv_fmod_min_interval; } @@ -1328,7 +1329,7 @@ nfsrv_write( mbuf_t m; vnode_t vp; struct nfs_filehandle nfh; - struct nfs_export *nx; + struct nfs_export *nx = NULL; struct nfs_export_options *nxo; uio_t auio = NULL; char *uio_bufp = NULL; @@ -1439,7 +1440,7 @@ nfsrv_write( } nfsmerr_if(error); for (m = nmreq->nmc_mcur; m; m = mbuf_next(m)) { - if ((mlen = mbuf_len(m)) > 0) { + if ((mlen = (int)mbuf_len(m)) > 0) { uio_addiov(auio, CAST_USER_ADDR_T((caddr_t)mbuf_data(m)), mlen); } } @@ -1555,13 +1556,14 @@ nfsrv_writegather( struct nfsrv_wg_delayhash *wpp; uid_t saved_uid; struct vnode_attr preattr, postattr; - int error, mlen, i, ioflags, tlen; + int error, mlen, i, ioflags; + size_t tlen; int preattrerr, postattrerr; vnode_t vp; mbuf_t m; uio_t auio = NULL; char *uio_bufp = NULL; - u_quad_t cur_usec; + time_t cur_usec; struct timeval now; struct nfsm_chain *nmreq, nmrep; @@ -1579,7 +1581,7 @@ nfsrv_writegather( nd->nd_mrep = NULL; nd->nd_stable = NFS_WRITE_FILESYNC; microuptime(&now); - cur_usec = (u_quad_t)now.tv_sec * 1000000 + (u_quad_t)now.tv_usec; + cur_usec = now.tv_sec * 1000000 + now.tv_usec; nd->nd_time = cur_usec + ((nd->nd_vers == NFS_VER3) ? nfsrv_wg_delay_v3 : nfsrv_wg_delay); @@ -1680,7 +1682,7 @@ nfsmerr: */ loop1: microuptime(&now); - cur_usec = (u_quad_t)now.tv_sec * 1000000 + (u_quad_t)now.tv_usec; + cur_usec = now.tv_sec * 1000000 + now.tv_usec; for (nd = slp->ns_tq.lh_first; nd; nd = owp) { owp = nd->nd_tq.le_next; if (nd->nd_time > cur_usec) { @@ -1883,7 +1885,8 @@ loop1: int nfsrv_wg_coalesce(struct nfsrv_descript *owp, struct nfsrv_descript *nd) { - int overlap, error; + int error; + off_t overlap; mbuf_t mp, mpnext; struct nfsrv_descript *p; @@ -1895,7 +1898,7 @@ nfsrv_wg_coalesce(struct nfsrv_descript *owp, struct nfsrv_descript *nd) return EIO; } if (overlap > 0) { - mbuf_adj(nd->nd_nmreq.nmc_mhead, overlap); + mbuf_adj(nd->nd_nmreq.nmc_mhead, (int)overlap); } mp = owp->nd_nmreq.nmc_mhead; while ((mpnext = mbuf_next(mp))) { @@ -1938,13 +1941,13 @@ void nfsrv_wg_timer(__unused void *param0, __unused void *param1) { struct timeval now; - uint64_t cur_usec, next_usec; - int interval; + time_t cur_usec, next_usec; + time_t interval; struct nfsrv_sock *slp; int writes_pending = 0; microuptime(&now); - cur_usec = (uint64_t)now.tv_sec * 1000000 + (uint64_t)now.tv_usec; + cur_usec = now.tv_sec * 1000000 + now.tv_usec; next_usec = cur_usec + (NFSRV_WGATHERDELAY * 1000); lck_mtx_lock(nfsd_mutex); @@ -2017,13 +2020,14 @@ nfsrv_create( struct vnode_attr dpreattr, dpostattr, postattr; struct vnode_attr va, *vap = &va; struct nameidata ni; - int error, rdev, dpreattrerr, dpostattrerr, postattrerr; + int error, dpreattrerr, dpostattrerr, postattrerr; int how, exclusive_flag; uint32_t len = 0, cnflags; + uint64_t rdev; vnode_t vp, dvp, dirp; struct nfs_filehandle nfh; struct nfs_export *nx = NULL; - struct nfs_export_options *nxo; + struct nfs_export_options *nxo = NULL; u_quad_t tempsize; u_char cverf[NFSX_V3CREATEVERF]; uid_t saved_uid; @@ -2091,6 +2095,7 @@ nfsrv_create( error = EEXIST; break; } + OS_FALLTHROUGH; case NFS_CREATE_UNCHECKED: error = nfsm_chain_get_sattr(nd, nmreq, vap); break; @@ -2213,7 +2218,7 @@ nfsrv_create( } } else if (vap->va_type == VCHR || vap->va_type == VBLK || vap->va_type == VFIFO) { - if (vap->va_type == VCHR && rdev == (int)0xffffffff) { + if (vap->va_type == VCHR && rdev == 0xffffffff) { VATTR_SET(vap, va_type, VFIFO); } if (vap->va_type != VFIFO) { @@ -2389,7 +2394,7 @@ nfsrv_mknod( vnode_t vp, dvp, dirp; struct nfs_filehandle nfh; struct nfs_export *nx = NULL; - struct nfs_export_options *nxo; + struct nfs_export_options *nxo = NULL; uid_t saved_uid; kauth_acl_t xacl = NULL; struct nfsm_chain *nmreq, nmrep; @@ -2641,7 +2646,7 @@ nfsrv_remove( struct vnode_attr dpreattr, dpostattr; struct nfs_filehandle nfh; struct nfs_export *nx = NULL; - struct nfs_export_options *nxo; + struct nfs_export_options *nxo = NULL; struct nfsm_chain *nmreq, nmrep; error = 0; @@ -2708,7 +2713,7 @@ nfsrv_remove( if (!error) { #if CONFIG_FSE char *path = NULL; - int plen; + int plen = 0; fse_info finfo; if (nfsrv_fsevents_enabled && need_fsevent(FSE_DELETE, dvp)) { @@ -3143,10 +3148,8 @@ auth_exit: mount_ref(locked_mp, 0); /* make a copy of to path to pass to nfsrv_namei() again */ - MALLOC_ZONE(topath, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (topath) { - bcopy(toni.ni_cnd.cn_pnbuf, topath, tolen + 1); - } + topath = zalloc(ZV_NAMEI); + bcopy(toni.ni_cnd.cn_pnbuf, topath, tolen + 1); /* * nameidone has to happen before we vnode_put(tdvp) @@ -3160,10 +3163,8 @@ auth_exit: vnode_put(tdvp); /* make a copy of from path to pass to nfsrv_namei() again */ - MALLOC_ZONE(frompath, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (frompath) { - bcopy(fromni.ni_cnd.cn_pnbuf, frompath, fromlen + 1); - } + frompath = zalloc(ZV_NAMEI); + bcopy(fromni.ni_cnd.cn_pnbuf, frompath, fromlen + 1); /* * nameidone has to happen before we vnode_put(fdvp) @@ -3407,10 +3408,10 @@ nfsmout: vnode_put(tdirp); } if (frompath) { - FREE_ZONE(frompath, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, frompath); } if (topath) { - FREE_ZONE(topath, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, topath); } if (saved_cred) { kauth_cred_unref(&saved_cred); @@ -3614,7 +3615,7 @@ nfsrv_symlink( vnode_t vp, dvp, dirp; struct nfs_filehandle nfh; struct nfs_export *nx = NULL; - struct nfs_export_options *nxo; + struct nfs_export_options *nxo = NULL; uio_t auio = NULL; char uio_buf[UIO_SIZEOF(1)]; struct nfsm_chain *nmreq, nmrep; @@ -3853,7 +3854,7 @@ nfsrv_mkdir( vnode_t vp, dvp, dirp; struct nfs_filehandle nfh; struct nfs_export *nx = NULL; - struct nfs_export_options *nxo; + struct nfs_export_options *nxo = NULL; uid_t saved_uid; kauth_acl_t xacl = NULL; struct nfsm_chain *nmreq, nmrep; @@ -4086,7 +4087,7 @@ nfsrv_rmdir( struct vnode_attr dpreattr, dpostattr; struct nfs_filehandle nfh; struct nfs_export *nx = NULL; - struct nfs_export_options *nxo; + struct nfs_export_options *nxo = NULL; struct nameidata ni; struct nfsm_chain *nmreq, nmrep; @@ -4164,7 +4165,7 @@ nfsrv_rmdir( if (!error) { #if CONFIG_FSE char *path = NULL; - int plen; + int plen = 0; fse_info finfo; if (nfsrv_fsevents_enabled && need_fsevent(FSE_DELETE, dvp)) { @@ -4277,7 +4278,7 @@ nfsrv_readdir( char uio_buf[UIO_SIZEOF(1)]; int len, nlen, rem, xfer, error, attrerr; int siz, count, fullsiz, eofflag, nentries; - u_quad_t off, toff, verf; + u_quad_t off, toff, verf = 0; int vnopflag; struct nfsm_chain *nmreq, nmrep; @@ -4787,7 +4788,7 @@ nfsrv_commit( { vnode_t vp; struct nfs_filehandle nfh; - struct nfs_export *nx; + struct nfs_export *nx = NULL; struct nfs_export_options *nxo; int error, preattrerr, postattrerr, count; struct vnode_attr preattr, postattr; @@ -5014,7 +5015,7 @@ nfsmerr: maxsize = NFS_MAXDGRAMDATA; prefsize = NFS_PREFDGRAMDATA; } else { - maxsize = prefsize = NFSRV_MAXDATA; + maxsize = prefsize = slp->ns_sobufsize ? slp->ns_sobufsize / 2 : NFSRV_MAXDATA; } nfsm_chain_add_32(error, &nmrep, maxsize); @@ -5051,8 +5052,8 @@ nfsrv_pathconf( vfs_context_t ctx, mbuf_t *mrepp) { - int error, attrerr, linkmax, namemax; - int chownres, notrunc, case_sensitive, case_preserving; + int error, attrerr, linkmax = 0, namemax = 0; + int chownres = 0, notrunc = 0, case_sensitive = 0, case_preserving = 0; vnode_t vp; struct vnode_attr attr; struct nfs_filehandle nfh; diff --git a/bsd/nfs/nfs_socket.c b/bsd/nfs/nfs_socket.c index b0fad27f1..eaca59ada 100644 --- a/bsd/nfs/nfs_socket.c +++ b/bsd/nfs/nfs_socket.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -118,6 +118,11 @@ boolean_t current_thread_aborted(void); kern_return_t thread_terminate(thread_t); +ZONE_DECLARE(nfs_fhandle_zone, "fhandle", sizeof(struct fhandle), ZC_NONE); +ZONE_DECLARE(nfs_req_zone, "NFS req", sizeof(struct nfsreq), ZC_NONE); +ZONE_DECLARE(nfsrv_descript_zone, "NFSV3 srvdesc", + sizeof(struct nfsrv_descript), ZC_NONE); + #if CONFIG_NFS_SERVER int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */ @@ -315,7 +320,7 @@ nfs_location_index_cmp(struct nfs_location_index *nlip1, struct nfs_location_ind * Get the mntfromname (or path portion only) for a given location. */ void -nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, int size, int pathonly) +nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_index idx, char *s, size_t size, int pathonly) { struct nfs_fs_location *fsl = locs->nl_locations[idx.nli_loc]; char *p; @@ -530,7 +535,7 @@ int nfs_socket_create( struct nfsmount *nmp, struct sockaddr *sa, - int sotype, + uint8_t sotype, in_port_t port, uint32_t protocol, uint32_t vers, @@ -693,7 +698,7 @@ nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso) * Soft mounts will want to abort sooner. */ struct timeval timeo; - int on = 1, proto; + int on = 1, proto, reserve, error; timeo.tv_usec = 0; timeo.tv_sec = (NMFLAG(nmp, SOFT) || nfs_can_squish(nmp)) ? 5 : 60; @@ -708,11 +713,23 @@ nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso) sock_setsockopt(nso->nso_so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); } } - if (nso->nso_sotype == SOCK_DGRAM || nso->nso_saddr->sa_family == AF_LOCAL) { /* set socket buffer sizes for UDP */ - int reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : (2 * 1024 * 1024); - sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve)); - sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve)); + + /* set socket buffer sizes for UDP/TCP */ + reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_wsize * 2); + { + error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve)); + } + + if (error) { + log(LOG_INFO, "nfs_socket_options: error %d setting SO_SNDBUF to %u\n", error, reserve); + } + + reserve = (nso->nso_sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : MAX(nfs_tcp_sockbuf, nmp->nm_rsize * 2); + error = sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve)); + if (error) { + log(LOG_INFO, "nfs_socket_options: error %d setting SO_RCVBUF to %u\n", error, reserve); } + /* set SO_NOADDRERR to detect network changes ASAP */ sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on)); /* just playin' it safe with upcalls */ @@ -1212,7 +1229,7 @@ nfs_connect(struct nfsmount *nmp, int verbose, int timeo) uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM; fhandle_t *fh = NULL; char *path = NULL; - in_port_t port; + in_port_t port = 0; int addrtotal = 0; /* paranoia... check that we have at least one address in the locations */ @@ -1389,10 +1406,10 @@ keepsearching: vfs_statfs(nmp->nm_mountp)->f_mntfromname); } if (fh) { - FREE(fh, M_TEMP); + NFS_ZFREE(nfs_fhandle_zone, fh); } if (path) { - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, path); } NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, error); @@ -1617,24 +1634,30 @@ keepsearching: } } } + if (!error) { + error = nfs3_check_lockmode(nmp, saddr, nso->nso_sotype, timeo); + if (error) { + nfs_socket_search_update_error(&nss, error); + nfs_socket_destroy(nso); + return error; + } + } if (saddr) { - MALLOC(fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK | M_ZERO); + fh = zalloc(nfs_fhandle_zone); } if (saddr && fh) { - MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + path = zalloc(ZV_NAMEI); } if (!saddr || !fh || !path) { if (!error) { error = ENOMEM; } if (fh) { - FREE(fh, M_TEMP); + NFS_ZFREE(nfs_fhandle_zone, fh); } if (path) { - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, path); } - fh = NULL; - path = NULL; nfs_socket_search_update_error(&nss, error); nfs_socket_destroy(nso); goto keepsearching; @@ -1682,6 +1705,7 @@ keepsearching: if (found && (nmp->nm_auth == RPCAUTH_NONE)) { found = 0; } + OS_FALLTHROUGH; case RPCAUTH_NONE: case RPCAUTH_KRB5: case RPCAUTH_KRB5I: @@ -1696,17 +1720,15 @@ keepsearching: } error = !found ? EAUTH : 0; } - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); - path = NULL; + NFS_ZFREE(ZV_NAMEI, path); if (error) { nfs_socket_search_update_error(&nss, error); - FREE(fh, M_TEMP); - fh = NULL; + NFS_ZFREE(nfs_fhandle_zone, fh); nfs_socket_destroy(nso); goto keepsearching; } if (nmp->nm_fh) { - FREE(nmp->nm_fh, M_TEMP); + NFS_ZFREE(nfs_fhandle_zone, nmp->nm_fh); } nmp->nm_fh = fh; fh = NULL; @@ -1848,10 +1870,10 @@ keepsearching: nmp->nm_nss = NULL; nfs_socket_search_cleanup(&nss); if (fh) { - FREE(fh, M_TEMP); + NFS_ZFREE(nfs_fhandle_zone, fh); } if (path) { - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, path); } NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname); return 0; @@ -2145,14 +2167,21 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) if (!req) { break; } + /* acquire both locks in the right order: first req->r_mtx and then nmp->nm_lock */ + lck_mtx_unlock(&nmp->nm_lock); + lck_mtx_lock(&req->r_mtx); + lck_mtx_lock(&nmp->nm_lock); + if ((req->r_flags & R_RESENDQ) == 0 || (req->r_rchain.tqe_next == NFSREQNOLIST)) { + lck_mtx_unlock(&req->r_mtx); + continue; + } TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); + req->r_flags &= ~R_RESENDQ; req->r_rchain.tqe_next = NFSREQNOLIST; lck_mtx_unlock(&nmp->nm_lock); - lck_mtx_lock(&req->r_mtx); /* Note that we have a reference on the request that was taken nfs_asyncio_resend */ if (req->r_error || req->r_nmrep.nmc_mhead) { dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT); - req->r_flags &= ~R_RESENDQ; wakeup(req); lck_mtx_unlock(&req->r_mtx); if (dofinish) { @@ -2188,9 +2217,6 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) error = nfs_request_send(req, 0); } lck_mtx_lock(&req->r_mtx); - if (req->r_flags & R_RESENDQ) { - req->r_flags &= ~R_RESENDQ; - } if (error) { req->r_error = error; } @@ -2214,9 +2240,6 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) error = nfs_send(req, 0); lck_mtx_lock(&req->r_mtx); if (!error) { - if (req->r_flags & R_RESENDQ) { - req->r_flags &= ~R_RESENDQ; - } wakeup(req); lck_mtx_unlock(&req->r_mtx); nfs_request_rele(req); @@ -2225,9 +2248,6 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) } } req->r_error = error; - if (req->r_flags & R_RESENDQ) { - req->r_flags &= ~R_RESENDQ; - } wakeup(req); dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT); lck_mtx_unlock(&req->r_mtx); @@ -2451,11 +2471,17 @@ nfs4_mount_callback_setup(struct nfsmount *nmp) } so = nfs4_cb_so; + if (NFS_PORT_INVALID(nfs_callback_port)) { + error = EINVAL; + log(LOG_INFO, "nfs callback setup: error %d nfs_callback_port %d is not valid\n", error, nfs_callback_port); + goto fail; + } + sock_setsockopt(so, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); sin.sin_len = sizeof(struct sockaddr_in); sin.sin_family = AF_INET; sin.sin_addr.s_addr = htonl(INADDR_ANY); - sin.sin_port = htons(nfs_callback_port); /* try to use specified port */ + sin.sin_port = htons((in_port_t)nfs_callback_port); /* try to use specified port */ error = sock_bind(so, (struct sockaddr *)&sin); if (error) { log(LOG_INFO, "nfs callback setup: error %d binding listening IPv4 socket\n", error); @@ -2501,7 +2527,7 @@ nfs4_mount_callback_setup(struct nfsmount *nmp) sock_setsockopt(so6, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); sock_setsockopt(so6, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on)); /* try to use specified port or same port as IPv4 */ - port = nfs_callback_port ? nfs_callback_port : nfs4_cb_port; + port = nfs_callback_port ? (in_port_t)nfs_callback_port : nfs4_cb_port; ipv6_bind_again: sin6.sin6_len = sizeof(struct sockaddr_in6); sin6.sin6_family = AF_INET6; @@ -2579,6 +2605,10 @@ nfs4_mount_callback_shutdown(struct nfsmount *nmp) struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 }; lck_mtx_lock(nfs_global_mutex); + if (nmp->nm_cbid == 0) { + lck_mtx_unlock(nfs_global_mutex); + return; + } TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink); /* wait for any callbacks in progress to complete */ while (nmp->nm_cbrefs) { @@ -2806,7 +2836,7 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) mbuf_t mhead = NULL, mrest = NULL, m; struct msghdr msg; struct nfsmount *nmp; - fhandle_t fh; + fhandle_t *fh; nfsnode_t np; nfs_stateid stateid; uint32_t bitmap[NFS_ATTR_BITMAP_LEN], rbitmap[NFS_ATTR_BITMAP_LEN], bmlen, truncate, attrbytes; @@ -2817,6 +2847,7 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) size_t sentlen = 0; xid = numops = op = status = procnum = taglen = cbid = 0; + fh = zalloc(nfs_fhandle_zone); nfsm_chain_dissect_init(error, &nmreq, mreq); nfsm_chain_get_32(error, &nmreq, xid); // RPC XID @@ -2929,7 +2960,7 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) case NFS_OP_CB_GETATTR: // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS) np = NULL; - nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh); + nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh); bmlen = NFS_ATTR_BITMAP_LEN; nfsm_chain_get_bitmap(error, &nmreq, bitmap, bmlen); if (error) { @@ -2938,7 +2969,7 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) numops = 0; /* don't process any more ops */ } else { /* find the node for the file handle */ - error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np); + error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np); if (error || !np) { status = NFSERR_BADHANDLE; error = 0; @@ -2995,14 +3026,14 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) np = NULL; nfsm_chain_get_stateid(error, &nmreq, &stateid); nfsm_chain_get_32(error, &nmreq, truncate); - nfsm_chain_get_fh(error, &nmreq, NFS_VER4, &fh); + nfsm_chain_get_fh(error, &nmreq, NFS_VER4, fh); if (error) { status = error; error = 0; numops = 0; /* don't process any more ops */ } else { /* find the node for the file handle */ - error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh.fh_data, fh.fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np); + error = nfs_nget(nmp->nm_mountp, NULL, NULL, fh->fh_data, fh->fh_len, NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &np); if (error || !np) { status = NFSERR_BADHANDLE; error = 0; @@ -3160,6 +3191,7 @@ out: if (mreq) { mbuf_freem(mreq); } + NFS_ZFREE(nfs_fhandle_zone, fh); return error; } #endif /* CONFIG_NFS4 */ @@ -3581,6 +3613,7 @@ again: if (sotype != SOCK_STREAM) { break; } + OS_FALLTHROUGH; case EPIPE: case EADDRNOTAVAIL: case ENETDOWN: @@ -3809,6 +3842,7 @@ nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep) u_int32_t reply = 0, rxid = 0; int error = 0, asyncioq, t1; + bzero(&nmrep, sizeof(nmrep)); /* Get the xid and check that it is an rpc reply */ nfsm_chain_dissect_init(error, &nmrep, mrep); nfsm_chain_get_32(error, &nmrep, rxid); @@ -4021,16 +4055,10 @@ nfs_request_create( req = *reqp; if (!req) { /* allocate a new NFS request structure */ - MALLOC_ZONE(newreq, struct nfsreq*, sizeof(*newreq), M_NFSREQ, M_WAITOK); - if (!newreq) { - mbuf_freem(nmrest->nmc_mhead); - nmrest->nmc_mhead = NULL; - return ENOMEM; - } - req = newreq; + req = newreq = zalloc_flags(nfs_req_zone, Z_WAITOK | Z_ZERO); + } else { + bzero(req, sizeof(*req)); } - - bzero(req, sizeof(*req)); if (req == newreq) { req->r_flags = R_ALLOCATED; } @@ -4038,7 +4066,7 @@ nfs_request_create( nmp = VFSTONFS(np ? NFSTOMP(np) : mp); if (nfs_mount_gone(nmp)) { if (newreq) { - FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ); + NFS_ZFREE(nfs_req_zone, newreq); } return ENXIO; } @@ -4049,7 +4077,7 @@ nfs_request_create( mbuf_freem(nmrest->nmc_mhead); nmrest->nmc_mhead = NULL; if (newreq) { - FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ); + NFS_ZFREE(nfs_req_zone, newreq); } return ENXIO; } @@ -4156,14 +4184,11 @@ nfs_request_destroy(struct nfsreq *req) wakeup(req2); } } - assert((req->r_flags & R_RESENDQ) == 0); /* XXX should we just remove this conditional, we should have a reference if we're resending */ - if (req->r_rchain.tqe_next != NFSREQNOLIST) { + if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) { TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); + req->r_flags &= ~R_RESENDQ; req->r_rchain.tqe_next = NFSREQNOLIST; - if (req->r_flags & R_RESENDQ) { - req->r_flags &= ~R_RESENDQ; - } } if (req->r_cchain.tqe_next != NFSREQNOLIST) { TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain); @@ -4210,7 +4235,7 @@ nfs_request_destroy(struct nfsreq *req) } lck_mtx_destroy(&req->r_mtx, nfs_request_grp); if (req->r_flags & R_ALLOCATED) { - FREE_ZONE(req, sizeof(*req), M_NFSREQ); + NFS_ZFREE(nfs_req_zone, req); } } @@ -4322,6 +4347,18 @@ nfs_request_send(struct nfsreq *req, int wait) OSAddAtomic64(1, &nfsstats.rpcrequests); + /* + * Make sure the request is not in the queue. + */ + if (req->r_lflags & RL_QUEUED) { +#if DEVELOPMENT + panic("nfs_request_send: req %p is already in global requests queue", req); +#else + TAILQ_REMOVE(&nfs_reqq, req, r_chain); + req->r_lflags &= ~RL_QUEUED; +#endif /* DEVELOPMENT */ + } + /* * Chain request into list of outstanding requests. Be sure * to put it LAST so timer finds oldest requests first. @@ -4816,11 +4853,12 @@ nfs_request2( u_int64_t *xidp, int *status) { - struct nfsreq rq, *req = &rq; + struct nfsreq *req; int error; + req = zalloc_flags(nfs_req_zone, Z_WAITOK); if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) { - return error; + goto out_free; } req->r_flags |= (flags & (R_OPTMASK | R_SOFT)); if (si) { @@ -4848,6 +4886,8 @@ nfs_request2( FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error); nfs_request_rele(req); +out_free: + NFS_ZFREE(nfs_req_zone, req); return error; } @@ -4870,18 +4910,20 @@ nfs_request_gss( struct nfsm_chain *nmrepp, int *status) { - struct nfsreq rq, *req = &rq; + struct nfsreq *req; int error, wait = 1; + req = zalloc_flags(nfs_req_zone, Z_WAITOK); if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) { - return error; + goto out_free; } req->r_flags |= (flags & R_OPTMASK); if (cp == NULL) { printf("nfs_request_gss request has no context\n"); nfs_request_rele(req); - return NFSERR_EAUTH; + error = NFSERR_EAUTH; + goto out_free; } nfs_gss_clnt_ctx_ref(req, cp); @@ -4918,7 +4960,8 @@ nfs_request_gss( nfs_gss_clnt_ctx_unref(req); nfs_request_rele(req); - +out_free: + NFS_ZFREE(nfs_req_zone, req); return error; } #endif /* CONFIG_NFS_GSS */ @@ -4973,17 +5016,15 @@ nfs_request_async( nmp = req->r_nmp; if ((req->r_flags & R_RESENDQ) && !nfs_mount_gone(nmp)) { lck_mtx_lock(&nmp->nm_lock); - if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) { + if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) { /* * It's not going to get off the resend queue if we're in recovery. * So, just take it off ourselves. We could be holding mount state * busy and thus holding up the start of recovery. */ TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); + req->r_flags &= ~R_RESENDQ; req->r_rchain.tqe_next = NFSREQNOLIST; - if (req->r_flags & R_RESENDQ) { - req->r_flags &= ~R_RESENDQ; - } lck_mtx_unlock(&nmp->nm_lock); req->r_flags |= R_SENDING; lck_mtx_unlock(&req->r_mtx); @@ -5041,17 +5082,15 @@ nfs_request_async_finish( if ((nmp = req->r_nmp)) { lck_mtx_lock(&nmp->nm_lock); - if ((nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) { + if ((req->r_flags & R_RESENDQ) && (nmp->nm_state & NFSSTA_RECOVER) && (req->r_rchain.tqe_next != NFSREQNOLIST)) { /* * It's not going to get off the resend queue if we're in recovery. * So, just take it off ourselves. We could be holding mount state * busy and thus holding up the start of recovery. */ TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); + req->r_flags &= ~R_RESENDQ; req->r_rchain.tqe_next = NFSREQNOLIST; - if (req->r_flags & R_RESENDQ) { - req->r_flags &= ~R_RESENDQ; - } /* Remove the R_RESENDQ reference */ assert(req->r_refs > 0); req->r_refs--; @@ -5875,8 +5914,7 @@ nfs_portmap_lookup( struct nfsm_chain nmreq, nmrep; mbuf_t mreq; int error = 0, ip, pmprog, pmvers, pmproc; - uint32_t ualen = 0; - uint32_t port; + uint32_t ualen = 0, scopeid = 0, port32; uint64_t xid = 0; char uaddr[MAX_IPv6_STR_LEN + 16]; @@ -5951,9 +5989,13 @@ tryagain: /* grab port from portmap response */ if (ip == 4) { - nfsm_chain_get_32(error, &nmrep, port); + nfsm_chain_get_32(error, &nmrep, port32); if (!error) { - ((struct sockaddr_in*)sa)->sin_port = htons(port); + if (NFS_PORT_INVALID(port32)) { + error = EBADRPC; + } else { + ((struct sockaddr_in*)sa)->sin_port = htons((in_port_t)port32); + } } } else { /* get uaddr string and convert to sockaddr */ @@ -5976,9 +6018,16 @@ tryagain: NFS_SOCK_DBG("Got uaddr %s\n", uaddr); if (!error) { uaddr[ualen] = '\0'; + if (ip == 6) { + scopeid = ((struct sockaddr_in6*)saddr)->sin6_scope_id; + } if (!nfs_uaddr2sockaddr(uaddr, saddr)) { error = EIO; } + if (ip == 6 && scopeid != ((struct sockaddr_in6*)saddr)->sin6_scope_id) { + NFS_SOCK_DBG("Setting scope_id from %u to %u\n", ((struct sockaddr_in6*)saddr)->sin6_scope_id, scopeid); + ((struct sockaddr_in6*)saddr)->sin6_scope_id = scopeid; + } } } } @@ -6038,6 +6087,7 @@ nfs_msg(thread_t thd, #define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */ uint32_t nfs_squishy_flags = NFS_SQUISH_MOBILE_ONLY | NFS_SQUISH_AUTOMOUNTED_ONLY | NFS_SQUISH_QUICK; +uint32_t nfs_tcp_sockbuf = 128 * 1024; /* Default value of tcp_sendspace and tcp_recvspace */ int32_t nfs_is_mobile; #define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */ @@ -6652,7 +6702,8 @@ nfsrv_getstream(struct nfsrv_sock *slp, int waitflag) { mbuf_t m; char *cp1, *cp2, *mdata; - int len, mlen, error; + int error; + size_t len, mlen; mbuf_t om, m2, recm; u_int32_t recmark; @@ -6814,11 +6865,7 @@ nfsrv_dorec( if (!(slp->ns_flag & (SLP_VALID | SLP_DOREC)) || (slp->ns_rec == NULL)) { return ENOBUFS; } - MALLOC_ZONE(nd, struct nfsrv_descript *, - sizeof(struct nfsrv_descript), M_NFSRVDESC, M_WAITOK); - if (!nd) { - return ENOMEM; - } + nd = zalloc(nfsrv_descript_zone); m = slp->ns_rec; slp->ns_rec = mbuf_nextpkt(m); if (slp->ns_rec) { @@ -6849,7 +6896,7 @@ nfsrv_dorec( if (nd->nd_gss_context) { nfs_gss_svc_ctx_deref(nd->nd_gss_context); } - FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC); + NFS_ZFREE(nfsrv_descript_zone, nd); return error; } nd->nd_mrep = NULL; @@ -6872,7 +6919,7 @@ nfsrv_getreq(struct nfsrv_descript *nd) int error = 0; uid_t user_id; gid_t group_id; - int ngroups; + short ngroups; uint32_t val; nd->nd_cr = NULL; @@ -6964,7 +7011,7 @@ nfsrv_getreq(struct nfsrv_descript *nd) } } nfsmout_if(error); - ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); + ngroups = (len >= NGROUPS) ? NGROUPS : (short)(len + 1); if (ngroups > 1) { nfsrv_group_sort(&temp_pcred.cr_groups[0], ngroups); } diff --git a/bsd/nfs/nfs_subs.c b/bsd/nfs/nfs_subs.c index b16d31846..b4be3353f 100644 --- a/bsd/nfs/nfs_subs.c +++ b/bsd/nfs/nfs_subs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -149,10 +149,12 @@ vtonfs_type(enum vtype vtype, int nfsvers) if (nfsvers > NFS_VER2) { return NFSOCK; } + return NFNON; case VFIFO: if (nfsvers > NFS_VER2) { return NFFIFO; } + return NFNON; case VBAD: case VSTR: case VCPLX: @@ -181,18 +183,22 @@ nfstov_type(nfstype nvtype, int nfsvers) if (nfsvers > NFS_VER2) { return VSOCK; } + OS_FALLTHROUGH; case NFFIFO: if (nfsvers > NFS_VER2) { return VFIFO; } + OS_FALLTHROUGH; case NFATTRDIR: if (nfsvers > NFS_VER3) { return VDIR; } + OS_FALLTHROUGH; case NFNAMEDATTR: if (nfsvers > NFS_VER3) { return VREG; } + OS_FALLTHROUGH; default: return VNON; } @@ -313,7 +319,7 @@ nfsm_mbuf_get_list(size_t size, mbuf_t *mp, int *mbcnt) len = 0; while (len < size) { - nfsm_mbuf_get(error, &m, (size - len)); + nfsm_mbuf_getcluster(error, &m, (size - len)); if (error) { break; } @@ -359,7 +365,7 @@ nfsm_chain_new_mbuf(struct nfsm_chain *nmc, size_t sizehint) } /* allocate a new mbuf */ - nfsm_mbuf_get(error, &mb, sizehint); + nfsm_mbuf_getcluster(error, &mb, sizehint); if (error) { return error; } @@ -393,9 +399,9 @@ nfsm_chain_new_mbuf(struct nfsm_chain *nmc, size_t sizehint) * Add "len" bytes of opaque data pointed to by "buf" to the given chain. */ int -nfsm_chain_add_opaque_f(struct nfsm_chain *nmc, const u_char *buf, uint32_t len) +nfsm_chain_add_opaque_f(struct nfsm_chain *nmc, const u_char *buf, size_t len) { - uint32_t paddedlen, tlen; + size_t paddedlen, tlen; int error; paddedlen = nfsm_rndup(len); @@ -436,9 +442,9 @@ nfsm_chain_add_opaque_f(struct nfsm_chain *nmc, const u_char *buf, uint32_t len) * Do not XDR pad. */ int -nfsm_chain_add_opaque_nopad_f(struct nfsm_chain *nmc, const u_char *buf, uint32_t len) +nfsm_chain_add_opaque_nopad_f(struct nfsm_chain *nmc, const u_char *buf, size_t len) { - uint32_t tlen; + size_t tlen; int error; while (len > 0) { @@ -464,9 +470,9 @@ nfsm_chain_add_opaque_nopad_f(struct nfsm_chain *nmc, const u_char *buf, uint32_ * Add "len" bytes of data from "uio" to the given chain. */ int -nfsm_chain_add_uio(struct nfsm_chain *nmc, uio_t uio, uint32_t len) +nfsm_chain_add_uio(struct nfsm_chain *nmc, uio_t uio, size_t len) { - uint32_t paddedlen, tlen; + size_t paddedlen, tlen; int error; paddedlen = nfsm_rndup(len); @@ -481,10 +487,8 @@ nfsm_chain_add_uio(struct nfsm_chain *nmc, uio_t uio, uint32_t len) tlen = MIN(nmc->nmc_left, paddedlen); if (tlen) { if (len) { - if (tlen > len) { - tlen = len; - } - uiomove(nmc->nmc_ptr, tlen, uio); + tlen = MIN(INT32_MAX, MIN(tlen, len)); + uiomove(nmc->nmc_ptr, (int)tlen, uio); } else { bzero(nmc->nmc_ptr, tlen); } @@ -503,11 +507,11 @@ nfsm_chain_add_uio(struct nfsm_chain *nmc, uio_t uio, uint32_t len) * Find the length of the NFS mbuf chain * up to the current encoding/decoding offset. */ -int +size_t nfsm_chain_offset(struct nfsm_chain *nmc) { mbuf_t mb; - int len = 0; + size_t len = 0; for (mb = nmc->nmc_mhead; mb; mb = mbuf_next(mb)) { if (mb == nmc->nmc_mcur) { @@ -525,7 +529,7 @@ nfsm_chain_offset(struct nfsm_chain *nmc) * Advance an nfsm_chain by "len" bytes. */ int -nfsm_chain_advance(struct nfsm_chain *nmc, uint32_t len) +nfsm_chain_advance(struct nfsm_chain *nmc, size_t len) { mbuf_t mb; @@ -553,9 +557,9 @@ nfsm_chain_advance(struct nfsm_chain *nmc, uint32_t len) * Reverse decode offset in an nfsm_chain by "len" bytes. */ int -nfsm_chain_reverse(struct nfsm_chain *nmc, uint32_t len) +nfsm_chain_reverse(struct nfsm_chain *nmc, size_t len) { - uint32_t mlen, new_offset; + size_t mlen, new_offset; int error = 0; mlen = nmc->nmc_ptr - (caddr_t) mbuf_data(nmc->nmc_mcur); @@ -587,7 +591,8 @@ int nfsm_chain_get_opaque_pointer_f(struct nfsm_chain *nmc, uint32_t len, u_char **pptr) { mbuf_t mbcur, mb; - uint32_t left, need, mblen, cplen, padlen; + uint32_t padlen; + size_t mblen, cplen, need, left; u_char *ptr; int error = 0; @@ -626,7 +631,7 @@ nfsm_chain_get_opaque_pointer_f(struct nfsm_chain *nmc, uint32_t len, u_char **p * The needed bytes won't fit in the current mbuf so we'll * allocate a new mbuf to hold the contiguous range of data. */ - nfsm_mbuf_get(error, &mb, len); + nfsm_mbuf_getcluster(error, &mb, len); if (error) { return error; } @@ -746,9 +751,9 @@ nfsm_chain_get_opaque_pointer_f(struct nfsm_chain *nmc, uint32_t len, u_char **p * The nfsm_chain is advanced by nfsm_rndup("len") bytes. */ int -nfsm_chain_get_opaque_f(struct nfsm_chain *nmc, uint32_t len, u_char *buf) +nfsm_chain_get_opaque_f(struct nfsm_chain *nmc, size_t len, u_char *buf) { - uint32_t cplen, padlen; + size_t cplen, padlen; int error = 0; padlen = nfsm_rndup(len) - len; @@ -792,9 +797,9 @@ nfsm_chain_get_opaque_f(struct nfsm_chain *nmc, uint32_t len, u_char *buf) * The nfsm_chain is advanced by nfsm_rndup("len") bytes. */ int -nfsm_chain_get_uio(struct nfsm_chain *nmc, uint32_t len, uio_t uio) +nfsm_chain_get_uio(struct nfsm_chain *nmc, size_t len, uio_t uio) { - uint32_t cplen, padlen; + size_t cplen, padlen; int error = 0; padlen = nfsm_rndup(len) - len; @@ -804,7 +809,8 @@ nfsm_chain_get_uio(struct nfsm_chain *nmc, uint32_t len, uio_t uio) /* copy as much as we need/can */ cplen = MIN(nmc->nmc_left, len); if (cplen) { - error = uiomove(nmc->nmc_ptr, cplen, uio); + cplen = MIN(cplen, INT32_MAX); + error = uiomove(nmc->nmc_ptr, (int)cplen, uio); if (error) { return error; } @@ -836,7 +842,7 @@ nfsm_chain_get_uio(struct nfsm_chain *nmc, uint32_t len, uio_t uio) #if CONFIG_NFS_CLIENT int -nfsm_chain_add_string_nfc(struct nfsm_chain *nmc, const uint8_t *s, uint32_t slen) +nfsm_chain_add_string_nfc(struct nfsm_chain *nmc, const uint8_t *s, size_t slen) { uint8_t smallbuf[64]; uint8_t *nfcname = smallbuf; @@ -846,10 +852,8 @@ nfsm_chain_add_string_nfc(struct nfsm_chain *nmc, const uint8_t *s, uint32_t sle error = utf8_normalizestr(s, slen, nfcname, &nfclen, buflen, UTF_PRECOMPOSED | UTF_NO_NULL_TERM); if (error == ENAMETOOLONG) { buflen = MAXPATHLEN; - MALLOC_ZONE(nfcname, uint8_t *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (nfcname) { - error = utf8_normalizestr(s, slen, nfcname, &nfclen, buflen, UTF_PRECOMPOSED | UTF_NO_NULL_TERM); - } + nfcname = zalloc(ZV_NAMEI); + error = utf8_normalizestr(s, slen, nfcname, &nfclen, buflen, UTF_PRECOMPOSED | UTF_NO_NULL_TERM); } /* if we got an error, just use the original string */ @@ -860,7 +864,7 @@ nfsm_chain_add_string_nfc(struct nfsm_chain *nmc, const uint8_t *s, uint32_t sle } if (nfcname && (nfcname != smallbuf)) { - FREE_ZONE(nfcname, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, nfcname); } return error; } @@ -1054,7 +1058,7 @@ nfs_get_xid(uint64_t *xidp) nfs_xidwrap++; nfs_xid++; } - *xidp = nfs_xid + ((uint64_t)nfs_xidwrap << 32); + *xidp = nfs_xid + (nfs_xidwrap << 32); lck_mtx_unlock(nfs_request_mutex); } @@ -1090,12 +1094,12 @@ nfsm_rpchead( * Just a wrapper around kauth_cred_getgroups to handle the case of a server supporting less * than NGROUPS. */ -static int -get_auxiliary_groups(kauth_cred_t cred, gid_t groups[NGROUPS], int count) +static size_t +get_auxiliary_groups(kauth_cred_t cred, gid_t groups[NGROUPS], size_t count) { gid_t pgid; - int maxcount = count < NGROUPS ? count + 1 : NGROUPS; - int i; + size_t maxcount = count < NGROUPS ? count + 1 : NGROUPS; + size_t i; for (i = 0; i < NGROUPS; i++) { groups[i] = -2; /* Initialize to the nobody group */ @@ -1131,11 +1135,12 @@ nfsm_rpchead2(__unused struct nfsmount *nmp, int sotype, int prog, int vers, int kauth_cred_t cred, struct nfsreq *req, mbuf_t mrest, u_int64_t *xidp, mbuf_t *mreqp) { mbuf_t mreq, mb; - int error, i, auth_len = 0, authsiz, reqlen; + size_t i; + int error, auth_len = 0, authsiz, reqlen; size_t headlen; struct nfsm_chain nmreq; gid_t grouplist[NGROUPS]; - int groupcount; + size_t groupcount = 0; /* calculate expected auth length */ switch (auth_type) { @@ -1144,15 +1149,12 @@ nfsm_rpchead2(__unused struct nfsmount *nmp, int sotype, int prog, int vers, int break; case RPCAUTH_SYS: { - int count = nmp->nm_numgrps < NGROUPS ? nmp->nm_numgrps : NGROUPS; + size_t count = nmp->nm_numgrps < NGROUPS ? nmp->nm_numgrps : NGROUPS; if (!cred) { return EINVAL; } groupcount = get_auxiliary_groups(cred, grouplist, count); - if (groupcount < 0) { - return EINVAL; - } auth_len = ((uint32_t)groupcount + 5) * NFSX_UNSIGNED; break; } @@ -1265,7 +1267,7 @@ add_cred: case RPCAUTH_KRB5P: error = nfs_gss_clnt_cred_put(req, &nmreq, mrest); if (error == ENEEDAUTH) { - int count = nmp->nm_numgrps < NGROUPS ? nmp->nm_numgrps : NGROUPS; + size_t count = nmp->nm_numgrps < NGROUPS ? nmp->nm_numgrps : NGROUPS; /* * Use sec=sys for this user @@ -1273,9 +1275,6 @@ add_cred: error = 0; req->r_auth = auth_type = RPCAUTH_SYS; groupcount = get_auxiliary_groups(cred, grouplist, count); - if (groupcount < 0) { - return EINVAL; - } auth_len = ((uint32_t)groupcount + 5) * NFSX_UNSIGNED; authsiz = nfsm_rndup(auth_len); goto add_cred; @@ -1331,8 +1330,7 @@ nfs_parsefattr( int error = 0; enum vtype vtype; nfstype nvtype; - u_short vmode; - uint32_t val, val2; + uint32_t vmode, val, val2; dev_t rdev; val = val2 = 0; @@ -1573,47 +1571,62 @@ nfs_loadattrcache( } if (/* Oh, C... */ #if CONFIG_NFS4 - ((nmp->nm_vers >= NFS_VER4) && (nvap->nva_change != npnvap->nva_change)) || + ((nmp->nm_vers >= NFS_VER4) && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_CHANGE) && (nvap->nva_change != npnvap->nva_change)) || #endif - (NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_TIME_MODIFY) && + (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_TIME_MODIFY) && ((nvap->nva_timesec[NFSTIME_MODIFY] != npnvap->nva_timesec[NFSTIME_MODIFY]) || (nvap->nva_timensec[NFSTIME_MODIFY] != npnvap->nva_timensec[NFSTIME_MODIFY])))) { events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE; } - if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_RAWDEV) && + if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_RAWDEV) && ((nvap->nva_rawdev.specdata1 != npnvap->nva_rawdev.specdata1) || (nvap->nva_rawdev.specdata2 != npnvap->nva_rawdev.specdata2))) { events |= VNODE_EVENT_ATTRIB; } - if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_FILEID) && + if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_FILEID) && (nvap->nva_fileid != npnvap->nva_fileid)) { events |= VNODE_EVENT_ATTRIB; } - if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_ARCHIVE) && + if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ARCHIVE) && ((nvap->nva_flags & NFS_FFLAG_ARCHIVED) != (npnvap->nva_flags & NFS_FFLAG_ARCHIVED))) { events |= VNODE_EVENT_ATTRIB; } - if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_HIDDEN) && + if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_HIDDEN) && ((nvap->nva_flags & NFS_FFLAG_HIDDEN) != (npnvap->nva_flags & NFS_FFLAG_HIDDEN))) { events |= VNODE_EVENT_ATTRIB; } - if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_TIME_CREATE) && + if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_TIME_CREATE) && ((nvap->nva_timesec[NFSTIME_CREATE] != npnvap->nva_timesec[NFSTIME_CREATE]) || (nvap->nva_timensec[NFSTIME_CREATE] != npnvap->nva_timensec[NFSTIME_CREATE]))) { events |= VNODE_EVENT_ATTRIB; } - if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_TIME_BACKUP) && + if (!events && NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_TIME_BACKUP) && ((nvap->nva_timesec[NFSTIME_BACKUP] != npnvap->nva_timesec[NFSTIME_BACKUP]) || (nvap->nva_timensec[NFSTIME_BACKUP] != npnvap->nva_timensec[NFSTIME_BACKUP]))) { events |= VNODE_EVENT_ATTRIB; } } +#if CONFIG_NFS4 /* Copy the attributes to the attribute cache */ - bcopy((caddr_t)nvap, (caddr_t)npnvap, sizeof(*nvap)); + if (nmp->nm_vers >= NFS_VER4 && npnvap->nva_flags & NFS_FFLAG_PARTIAL_WRITE) { + /* + * NFSv4 WRITE RPCs contain partial GETATTR requests - only type, change, size, metadatatime and modifytime are requested. + * In such cases, we do not update the time stamp - but the requested attributes. + */ + NFS_BITMAP_COPY_ATTR(nvap, npnvap, TYPE, type); + NFS_BITMAP_COPY_ATTR(nvap, npnvap, CHANGE, change); + NFS_BITMAP_COPY_ATTR(nvap, npnvap, SIZE, size); + NFS_BITMAP_COPY_TIME(nvap, npnvap, METADATA, CHANGE); + NFS_BITMAP_COPY_TIME(nvap, npnvap, MODIFY, MODIFY); + } else +#endif /* CONFIG_NFS4 */ + { + bcopy((caddr_t)nvap, (caddr_t)npnvap, sizeof(*nvap)); + microuptime(&now); + np->n_attrstamp = now.tv_sec; + } - microuptime(&now); - np->n_attrstamp = now.tv_sec; np->n_xid = *xidp; /* NFS_FFLAG_IS_ATTR and NFS_FFLAG_TRIGGER_REFERRAL need to be sticky... */ if (vp && xattr) { @@ -1623,7 +1636,7 @@ nfs_loadattrcache( nvap->nva_flags |= referral; } - if (NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_ACL)) { + if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) { /* we're updating the ACL */ if (nvap->nva_acl) { /* make a copy of the acl for the cache */ @@ -1642,7 +1655,7 @@ nfs_loadattrcache( acl = NULL; } } - if (NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_ACL)) { + if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL)) { /* update the ACL timestamp */ np->n_aclstamp = now.tv_sec; } else { @@ -1720,13 +1733,13 @@ out: * Calculate the attribute timeout based on * how recently the file has been modified. */ -int +long nfs_attrcachetimeout(nfsnode_t np) { struct nfsmount *nmp; struct timeval now; int isdir; - uint32_t timeo; + long timeo; nmp = NFSTONMP(np); if (nfs_mount_gone(nmp)) { @@ -1778,7 +1791,7 @@ nfs_getattrcache(nfsnode_t np, struct nfs_vattr *nvaper, int flags) { struct nfs_vattr *nvap; struct timeval nowup; - int32_t timeo; + long timeo; struct nfsmount *nmp; /* Check if the attributes are valid. */ @@ -2099,7 +2112,7 @@ nfs_uaddr2sockaddr(const char *uaddr, struct sockaddr *addr) sin6->sin6_family = AF_INET6; bcopy(a, &sin6->sin6_addr.s6_addr, sizeof(struct in6_addr)); if ((dots == 5) || (dots == 2)) { - sin6->sin6_port = htons((a[16] << 8) | a[17]); + sin6->sin6_port = htons((in_port_t)((a[16] << 8) | a[17])); } if (pscope) { for (p = pscope; IS_DIGIT(*p); p++) { @@ -2114,7 +2127,7 @@ nfs_uaddr2sockaddr(const char *uaddr, struct sockaddr *addr) ifnet_release(interface); } } else { /* decimal number */ - sin6->sin6_scope_id = strtoul(pscope, NULL, 10); + sin6->sin6_scope_id = (uint32_t)strtoul(pscope, NULL, 10); } /* XXX should we also embed scope id for linklocal? */ } @@ -2134,7 +2147,7 @@ nfs_uaddr2sockaddr(const char *uaddr, struct sockaddr *addr) sin->sin_family = AF_INET; bcopy(a, &sin->sin_addr.s_addr, sizeof(struct in_addr)); if (dots == 5) { - sin->sin_port = htons((a[4] << 8) | a[5]); + sin->sin_port = htons((in_port_t)((a[4] << 8) | a[5])); } } return 1; @@ -2242,11 +2255,11 @@ nfs_mountopts(struct nfsmount *nmp, char *buf, int buflen) * interval milliseconds in the future. */ void -nfs_interval_timer_start(thread_call_t call, int interval) +nfs_interval_timer_start(thread_call_t call, time_t interval) { uint64_t deadline; - clock_interval_to_deadline(interval, 1000 * 1000, &deadline); + clock_interval_to_deadline((int)interval, 1000 * 1000, &deadline); thread_call_enter_delayed(call, deadline); } @@ -2312,10 +2325,7 @@ nfsm_chain_get_path_namei( * Get a buffer for the name to be translated, and copy the * name into the buffer. */ - MALLOC_ZONE(cnp->cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (!cnp->cn_pnbuf) { - return ENOMEM; - } + cnp->cn_pnbuf = zalloc(ZV_NAMEI); cnp->cn_pnlen = MAXPATHLEN; cnp->cn_flags |= HASBUF; @@ -2334,7 +2344,7 @@ nfsm_chain_get_path_namei( out: if (error) { if (cnp->cn_pnbuf) { - FREE_ZONE(cnp->cn_pnbuf, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, cnp->cn_pnbuf); } cnp->cn_flags &= ~HASBUF; } else { @@ -2419,7 +2429,7 @@ out: tmppn = cnp->cn_pnbuf; cnp->cn_pnbuf = NULL; cnp->cn_flags &= ~HASBUF; - FREE_ZONE(tmppn, cnp->cn_pnlen, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, tmppn); } return error; } @@ -2432,7 +2442,8 @@ void nfsm_adj(mbuf_t mp, int len, int nul) { mbuf_t m, mnext; - int count, i, mlen; + int count, i; + long mlen; char *cp; /* @@ -2500,7 +2511,8 @@ nfsm_adj(mbuf_t mp, int len, int nul) int nfsm_chain_trim_data(struct nfsm_chain *nmc, int len, int *mlen) { - int cnt = 0, dlen, adjust; + int cnt = 0; + long dlen, adjust; caddr_t data; mbuf_t m; @@ -2743,7 +2755,8 @@ nfsrv_hang_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) struct radix_node *rn; struct sockaddr *saddr, *smask; struct domain *dom; - int i, error; + size_t i; + int error; unsigned int net; user_addr_t uaddr; kauth_cred_t cred; @@ -2767,7 +2780,7 @@ nfsrv_hang_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) bzero(&temp_pcred, sizeof(temp_pcred)); temp_pcred.cr_uid = nxna.nxna_cred.cr_uid; temp_pcred.cr_ngroups = nxna.nxna_cred.cr_ngroups; - for (i = 0; i < nxna.nxna_cred.cr_ngroups && i < NGROUPS; i++) { + for (i = 0; i < (size_t)nxna.nxna_cred.cr_ngroups && i < NGROUPS; i++) { temp_pcred.cr_groups[i] = nxna.nxna_cred.cr_groups[i]; } cred = posix_cred_create(&temp_pcred); @@ -2816,20 +2829,20 @@ nfsrv_hang_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) } else { smask = NULL; } - i = saddr->sa_family; - if ((rnh = nx->nx_rtable[i]) == 0) { + sa_family_t family = saddr->sa_family; + if ((rnh = nx->nx_rtable[family]) == 0) { /* * Seems silly to initialize every AF when most are not * used, do so on demand here */ TAILQ_FOREACH(dom, &domains, dom_entry) { - if (dom->dom_family == i && dom->dom_rtattach) { - dom->dom_rtattach((void **)&nx->nx_rtable[i], + if (dom->dom_family == family && dom->dom_rtattach) { + dom->dom_rtattach((void **)&nx->nx_rtable[family], dom->dom_rtoffset); break; } } - if ((rnh = nx->nx_rtable[i]) == 0) { + if ((rnh = nx->nx_rtable[family]) == 0) { if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); } @@ -2873,8 +2886,8 @@ nfsrv_hang_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) */ gid_t groups[NGROUPS]; gid_t groups2[NGROUPS]; - int groupcount = NGROUPS; - int group2count = NGROUPS; + size_t groupcount = NGROUPS; + size_t group2count = NGROUPS; if (!kauth_cred_getgroups(cred, groups, &groupcount) && !kauth_cred_getgroups(cred2, groups2, &group2count) && @@ -3385,7 +3398,7 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) xnd.ni_op = OP_LOOKUP; #endif xnd.ni_cnd.cn_flags = LOCKLEAF; - xnd.ni_pathlen = pathlen - 1; + xnd.ni_pathlen = (uint32_t)pathlen - 1; // pathlen max value is equal to MAXPATHLEN xnd.ni_cnd.cn_nameptr = xnd.ni_cnd.cn_pnbuf = path; xnd.ni_startdir = mvp; xnd.ni_usedvp = mvp; @@ -3906,7 +3919,7 @@ nfsrv_fhmatch(struct nfs_filehandle *fh1, struct nfs_filehandle *fh2) * If found, the node's tm_last timestamp is updated and the node is returned. * * If not found, a new node is allocated (or reclaimed via LRU), initialized, and returned. - * Returns NULL if a new node could not be allcoated. + * Returns NULL if a new node could not be allocated OR saddr length exceeds sizeof(unode->sock). * * The list's user_mutex lock MUST be held. */ @@ -3937,6 +3950,11 @@ nfsrv_get_user_stat_node(struct nfs_active_user_list *list, struct sockaddr *sad return unode; } + if (saddr->sa_len > sizeof(((struct nfs_user_stat_node *)0)->sock)) { + /* saddr length exceeds maximum value */ + return NULL; + } + if (list->node_count < nfsrv_user_stat_max_nodes) { /* Allocate a new node */ MALLOC(unode, struct nfs_user_stat_node *, sizeof(struct nfs_user_stat_node), @@ -3964,7 +3982,7 @@ nfsrv_get_user_stat_node(struct nfs_active_user_list *list, struct sockaddr *sad /* Initialize the node */ unode->uid = uid; - bcopy(saddr, &unode->sock, saddr->sa_len); + bcopy(saddr, &unode->sock, MIN(saddr->sa_len, sizeof(unode->sock))); microtime(&now); unode->ops = 0; unode->bytes_read = 0; @@ -4071,7 +4089,7 @@ nfsrv_active_user_list_reclaim(void) struct nfs_user_stat_hashtbl_head oldlist; struct nfs_user_stat_node *unode, *unode_next; struct timeval now; - uint32_t tstale; + long tstale; LIST_INIT(&oldlist); diff --git a/bsd/nfs/nfs_syscalls.c b/bsd/nfs/nfs_syscalls.c index adb45d85d..90cba6ed4 100644 --- a/bsd/nfs/nfs_syscalls.c +++ b/bsd/nfs/nfs_syscalls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -173,6 +173,7 @@ SYSCTL_INT(_vfs_generic_nfs_client, OID_AUTO, idmap_ctrl, CTLFLAG_RW | CTLFLAG_L SYSCTL_INT(_vfs_generic_nfs_client, OID_AUTO, callback_port, CTLFLAG_RW | CTLFLAG_LOCKED, &nfs_callback_port, 0, ""); SYSCTL_INT(_vfs_generic_nfs_client, OID_AUTO, is_mobile, CTLFLAG_RW | CTLFLAG_LOCKED, &nfs_is_mobile, 0, ""); SYSCTL_INT(_vfs_generic_nfs_client, OID_AUTO, squishy_flags, CTLFLAG_RW | CTLFLAG_LOCKED, &nfs_squishy_flags, 0, ""); +SYSCTL_UINT(_vfs_generic_nfs_client, OID_AUTO, tcp_sockbuf, CTLFLAG_RW | CTLFLAG_LOCKED, &nfs_tcp_sockbuf, 0, ""); SYSCTL_UINT(_vfs_generic_nfs_client, OID_AUTO, debug_ctl, CTLFLAG_RW | CTLFLAG_LOCKED, &nfs_debug_ctl, 0, ""); SYSCTL_INT(_vfs_generic_nfs_client, OID_AUTO, readlink_nocache, CTLFLAG_RW | CTLFLAG_LOCKED, &nfs_readlink_nocache, 0, ""); #if CONFIG_NFS_GSS @@ -570,7 +571,7 @@ getfh( { vnode_t vp; struct nfs_filehandle nfh; - int error, fhlen, fidlen; + int error, fhlen = 0, fidlen; struct nameidata nd; char path[MAXPATHLEN], real_mntonname[MAXPATHLEN], *ptr; size_t pathlen; @@ -646,7 +647,7 @@ getfh( ptr++; } LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) { - int len = strlen(nx->nx_path); + size_t len = strlen(nx->nx_path); if (len == 0) { // we've hit the export entry for the root directory break; } @@ -816,9 +817,9 @@ fhopen(proc_t p __no_nfs_server_unused, } fp = nfp; - fp->f_fglob->fg_flag = fmode & FMASK; - fp->f_fglob->fg_ops = &vnops; - fp->f_fglob->fg_data = (caddr_t)vp; + fp->fp_glob->fg_flag = fmode & FMASK; + fp->fp_glob->fg_ops = &vnops; + fp->fp_glob->fg_data = (caddr_t)vp; // XXX do we really need to support this with fhopen()? if (fmode & (O_EXLOCK | O_SHLOCK)) { @@ -834,16 +835,16 @@ fhopen(proc_t p __no_nfs_server_unused, if ((fmode & FNONBLOCK) == 0) { type |= F_WAIT; } - if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, type, ctx, NULL))) { + if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_SETLK, &lf, type, ctx, NULL))) { struct vfs_context context = *vfs_context_current(); /* Modify local copy (to not damage thread copy) */ - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; - vn_close(vp, fp->f_fglob->fg_flag, &context); + vn_close(vp, fp->fp_glob->fg_flag, &context); fp_free(p, indx, fp); - return error; + goto bad; } - fp->f_fglob->fg_flag |= FHASLOCK; + fp->fp_glob->fg_flag |= FWASLOCKED; } vnode_put(vp); @@ -957,8 +958,9 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) { struct nfsrv_sock *slp; int error = 0, sodomain, sotype, soprotocol, on = 1; - int first; + int first, sobufsize; struct timeval timeo; + u_quad_t sbmaxsize; /* make sure mbuf constants are set up */ if (!nfs_mbuf_mhlen) { @@ -990,14 +992,18 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) if ((sodomain == AF_INET) && (soprotocol == IPPROTO_TCP)) { sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); } - if (sotype == SOCK_DGRAM || sodomain == AF_LOCAL) { /* set socket buffer sizes for UDP */ - int reserve = (sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : (2 * 1024 * 1024); - error |= sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve)); - error |= sock_setsockopt(so, SOL_SOCKET, SO_RCVBUF, &reserve, sizeof(reserve)); - if (error) { - log(LOG_INFO, "nfssvc_addsock: UDP socket buffer setting error(s) %d\n", error); - error = 0; - } + + /* Calculate maximum supported socket buffers sizes */ + sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); + + /* Set socket buffer sizes for UDP/TCP */ + sobufsize = min(sbmaxsize, (sotype == SOCK_DGRAM) ? NFS_UDPSOCKBUF : NFSRV_TCPSOCKBUF); + error |= sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &sobufsize, sizeof(sobufsize)); + error |= sock_setsockopt(so, SOL_SOCKET, SO_RCVBUF, &sobufsize, sizeof(sobufsize)); + + if (error) { + log(LOG_INFO, "nfssvc_addsock: socket buffer setting error(s) %d\n", error); + error = 0; } sock_nointerrupt(so, 0); @@ -1113,6 +1119,7 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) slp->ns_so = so; slp->ns_sotype = sotype; slp->ns_nam = mynam; + slp->ns_sobufsize = sobufsize; /* set up the socket up-call */ nfsrv_uc_addsock(slp, first); @@ -1166,13 +1173,13 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) int nfssvc_nfsd(void) { - mbuf_t m, mrep; + mbuf_t m, mrep = NULL; struct nfsrv_sock *slp; struct nfsd *nfsd; struct nfsrv_descript *nd = NULL; int error = 0, cacherep, writes_todo; int siz, procrastinate, opcnt = 0; - u_quad_t cur_usec; + time_t cur_usec; struct timeval now; struct vfs_context context; struct timespec to; @@ -1305,8 +1312,7 @@ nfssvc_nfsd(void) writes_todo = 0; if (error && (slp->ns_wgtime || (slp->ns_flag & SLP_DOWRITES))) { microuptime(&now); - cur_usec = (u_quad_t)now.tv_sec * 1000000 + - (u_quad_t)now.tv_usec; + cur_usec = (now.tv_sec * 1000000) + now.tv_usec; if (slp->ns_wgtime <= cur_usec) { error = 0; cacherep = RC_DOIT; @@ -1330,8 +1336,7 @@ nfssvc_nfsd(void) if (nd->nd_gss_context) { nfs_gss_svc_ctx_deref(nd->nd_gss_context); } - FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC); - nd = NULL; + NFS_ZFREE(nfsrv_descript_zone, nd); } nfsd->nfsd_slp = NULL; nfsd->nfsd_flag &= ~NFSD_REQINPROG; @@ -1417,7 +1422,7 @@ nfssvc_nfsd(void) } OSAddAtomic64(1, &nfsstats.srvrpccnt[nd->nd_procnum]); nfsrv_updatecache(nd, TRUE, mrep); - /* FALLTHRU */ + OS_FALLTHROUGH; case RC_REPLY: if (nd->nd_gss_mb != NULL) { // It's RPCSEC_GSS @@ -1488,7 +1493,7 @@ nfssvc_nfsd(void) if (nd->nd_gss_context) { nfs_gss_svc_ctx_deref(nd->nd_gss_context); } - FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC); + NFS_ZFREE(nfsrv_descript_zone, nd); nfsrv_slpderef(slp); lck_mtx_lock(nfsd_mutex); goto done; @@ -1512,8 +1517,7 @@ nfssvc_nfsd(void) if (nd->nd_gss_context) { nfs_gss_svc_ctx_deref(nd->nd_gss_context); } - FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC); - nd = NULL; + NFS_ZFREE(nfsrv_descript_zone, nd); } /* @@ -1523,8 +1527,7 @@ nfssvc_nfsd(void) writes_todo = 0; if (slp->ns_wgtime) { microuptime(&now); - cur_usec = (u_quad_t)now.tv_sec * 1000000 + - (u_quad_t)now.tv_usec; + cur_usec = (now.tv_sec * 1000000) + now.tv_usec; if (slp->ns_wgtime <= cur_usec) { cacherep = RC_DOIT; writes_todo = 1; @@ -1670,7 +1673,7 @@ nfsrv_slpfree(struct nfsrv_sock *slp) if (nwp->nd_gss_context) { nfs_gss_svc_ctx_deref(nwp->nd_gss_context); } - FREE_ZONE(nwp, sizeof(*nwp), M_NFSRVDESC); + NFS_ZFREE(nfsrv_descript_zone, nwp); } LIST_INIT(&slp->ns_tq); diff --git a/bsd/nfs/nfs_vfsops.c b/bsd/nfs/nfs_vfsops.c index ef5457410..82e3c594c 100644 --- a/bsd/nfs/nfs_vfsops.c +++ b/bsd/nfs/nfs_vfsops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -130,12 +130,16 @@ * NFS client globals */ +ZONE_DECLARE(nfsmnt_zone, "NFS mount", + sizeof(struct nfsmount), ZC_ZFREE_CLEARMEM); + int nfs_ticks; static lck_grp_t *nfs_global_grp, *nfs_mount_grp; lck_mtx_t *nfs_global_mutex; uint32_t nfs_fs_attr_bitmap[NFS_ATTR_BITMAP_LEN]; uint32_t nfs_object_attr_bitmap[NFS_ATTR_BITMAP_LEN]; uint32_t nfs_getattr_bitmap[NFS_ATTR_BITMAP_LEN]; +uint32_t nfs4_getattr_write_bitmap[NFS_ATTR_BITMAP_LEN]; struct nfsclientidlist nfsclientids; /* NFS requests */ @@ -144,8 +148,8 @@ lck_grp_t *nfs_request_grp; lck_mtx_t *nfs_request_mutex; thread_call_t nfs_request_timer_call; int nfs_request_timer_on; -u_int32_t nfs_xid = 0; -u_int32_t nfs_xidwrap = 0; /* to build a (non-wrapping) 64 bit xid */ +u_int64_t nfs_xid = 0; +u_int64_t nfs_xidwrap = 0; /* to build a (non-wrapping) 64 bit xid */ thread_call_t nfs_buf_timer_call; @@ -346,9 +350,11 @@ nfs_vfs_init(__unused struct vfsconf *vfsp) /* NFSv4 stuff */ NFS4_PER_FS_ATTRIBUTES(nfs_fs_attr_bitmap); NFS4_PER_OBJECT_ATTRIBUTES(nfs_object_attr_bitmap); + NFS4_DEFAULT_WRITE_ATTRIBUTES(nfs4_getattr_write_bitmap); NFS4_DEFAULT_ATTRIBUTES(nfs_getattr_bitmap); for (i = 0; i < NFS_ATTR_BITMAP_LEN; i++) { nfs_getattr_bitmap[i] &= nfs_object_attr_bitmap[i]; + nfs4_getattr_write_bitmap[i] &= nfs_object_attr_bitmap[i]; } TAILQ_INIT(&nfsclientids); #endif @@ -615,7 +621,7 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx) */ if ((statfsrate > 0) && (statfsrate < 1000000)) { struct timeval now; - uint32_t stamp; + time_t stamp; microuptime(&now); lck_mtx_lock(&nmp->nm_lock); @@ -1049,20 +1055,16 @@ tryagain: if (error) { if (error == EHOSTDOWN || error == EHOSTUNREACH) { if (nd.nd_root.ndm_mntfrom) { - FREE_ZONE(nd.nd_root.ndm_mntfrom, - MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, nd.nd_root.ndm_mntfrom); } if (nd.nd_root.ndm_path) { - FREE_ZONE(nd.nd_root.ndm_path, - MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, nd.nd_root.ndm_path); } if (nd.nd_private.ndm_mntfrom) { - FREE_ZONE(nd.nd_private.ndm_mntfrom, - MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, nd.nd_private.ndm_mntfrom); } if (nd.nd_private.ndm_path) { - FREE_ZONE(nd.nd_private.ndm_path, - MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, nd.nd_private.ndm_path); } return error; } @@ -1157,16 +1159,16 @@ tryagain: #endif /* NO_MOUNT_PRIVATE */ if (nd.nd_root.ndm_mntfrom) { - FREE_ZONE(nd.nd_root.ndm_mntfrom, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, nd.nd_root.ndm_mntfrom); } if (nd.nd_root.ndm_path) { - FREE_ZONE(nd.nd_root.ndm_path, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, nd.nd_root.ndm_path); } if (nd.nd_private.ndm_mntfrom) { - FREE_ZONE(nd.nd_private.ndm_mntfrom, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, nd.nd_private.ndm_mntfrom); } if (nd.nd_private.ndm_path) { - FREE_ZONE(nd.nd_private.ndm_path, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, nd.nd_private.ndm_path); } /* Get root attributes (for the time). */ @@ -1197,7 +1199,7 @@ nfs_mount_diskless( uint32_t mattrs[NFS_MATTR_BITMAP_LEN]; uint32_t mflags_mask[NFS_MFLAG_BITMAP_LEN]; uint32_t mflags[NFS_MFLAG_BITMAP_LEN]; - uint32_t argslength_offset, attrslength_offset, end_offset; + uint64_t argslength_offset, attrslength_offset, end_offset; if ((error = vfs_rootmountalloc("nfs", ndmntp->ndm_mntfrom, &mp))) { printf("nfs_mount_diskless: NFS not configured\n"); @@ -1336,7 +1338,7 @@ nfs_mount_diskless( #if CONFIG_MACF mac_mount_label_destroy(mp); #endif - FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); + NFS_ZFREE(mount_zone, mp); } else { *mpp = mp; } @@ -1369,7 +1371,7 @@ nfs_mount_diskless_private( struct xdrbuf xb; uint32_t mattrs[NFS_MATTR_BITMAP_LEN]; uint32_t mflags_mask[NFS_MFLAG_BITMAP_LEN], mflags[NFS_MFLAG_BITMAP_LEN]; - uint32_t argslength_offset, attrslength_offset, end_offset; + uint64_t argslength_offset, attrslength_offset, end_offset; procp = current_proc(); /* XXX */ xb_init(&xb, XDRBUF_NONE); @@ -1401,14 +1403,15 @@ nfs_mount_diskless_private( */ NDINIT(&nd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(mntname), ctx); - if ((error = namei(&nd))) { - printf("nfs_mountroot: private namei failed!\n"); - goto out; - } + error = namei(&nd); { /* undo vnode_ref() in mimic main()! */ vnode_rele(rootvnode); } + if (error) { + printf("nfs_mountroot: private namei failed!\n"); + goto out; + } nameidone(&nd); vp = nd.ni_vp; @@ -1442,15 +1445,7 @@ nfs_mount_diskless_private( /* * Allocate and initialize the filesystem. */ - mp = _MALLOC_ZONE((u_int32_t)sizeof(struct mount), M_MOUNT, M_WAITOK); - if (!mp) { - printf("nfs_mountroot: unable to allocate mount structure\n"); - vnode_put(vp); - error = ENOMEM; - goto out; - } - bzero((char *)mp, sizeof(struct mount)); - + mp = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO); /* Initialize the default IO constraints */ mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS; mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32; @@ -1611,7 +1606,7 @@ nfs_mount_diskless_private( #if CONFIG_MACF mac_mount_label_destroy(mp); #endif - FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); + NFS_ZFREE(mount_zone, mp); goto out; } @@ -1639,20 +1634,18 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar u_char nfh[NFS4_FHSIZE]; char *mntfrom, *endserverp, *frompath, *p, *cp; struct sockaddr_storage ss; - void *sinaddr; + void *sinaddr = NULL; char uaddr[MAX_IPv6_STR_LEN]; uint32_t mattrs[NFS_MATTR_BITMAP_LEN]; uint32_t mflags_mask[NFS_MFLAG_BITMAP_LEN], mflags[NFS_MFLAG_BITMAP_LEN]; - uint32_t nfsvers, nfslockmode = 0, argslength_offset, attrslength_offset, end_offset; + uint32_t nfsvers, nfslockmode = 0; + size_t argslength_offset, attrslength_offset, end_offset; struct xdrbuf xb; *xdrbufp = NULL; /* allocate a temporary buffer for mntfrom */ - MALLOC_ZONE(mntfrom, char*, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (!mntfrom) { - return ENOMEM; - } + mntfrom = zalloc(ZV_NAMEI); args64bit = (inkernel || vfs_context_is64bit(ctx)); argsp = args64bit ? (void*)&args : (void*)&tempargs; @@ -1661,10 +1654,13 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar switch (argsversion) { case 3: argsize -= NFS_ARGSVERSION4_INCSIZE; + OS_FALLTHROUGH; case 4: argsize -= NFS_ARGSVERSION5_INCSIZE; + OS_FALLTHROUGH; case 5: argsize -= NFS_ARGSVERSION6_INCSIZE; + OS_FALLTHROUGH; case 6: break; default: @@ -1770,7 +1766,7 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar if (inkernel) { bcopy(CAST_DOWN(void *, args.addr), &ss, args.addrlen); } else { - if ((size_t)args.addrlen > sizeof(struct sockaddr_storage)) { + if (args.addrlen > sizeof(struct sockaddr_storage)) { error = EINVAL; } else { error = copyin(args.addr, &ss, args.addrlen); @@ -2039,7 +2035,7 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar } nfsmout: xb_cleanup(&xb); - FREE_ZONE(mntfrom, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, mntfrom); return error; } @@ -2102,6 +2098,21 @@ nfs_vfs_mount(mount_t mp, vnode_t vp, user_addr_t data, vfs_context_t ctx) } else { error = copyin(data, xdrbuf, argslength); } + + if (!inkernel) { + /* Recheck buffer size to avoid double fetch vulnerability */ + struct xdrbuf xb; + uint32_t _version, _length; + xb_init_buffer(&xb, xdrbuf, 2 * XDRWORD); + xb_get_32(error, &xb, _version); /* version */ + xb_get_32(error, &xb, _length); /* args length */ + if (_length != argslength) { + printf("nfs: actual buffer length (%u) does not match the initial value (%u)\n", _length, argslength); + error = EINVAL; + break; + } + } + break; default: error = EPROGMISMATCH; @@ -2169,17 +2180,13 @@ nfs3_mount( if (error) { goto out; } - /* If the server indicates all pathconf info is */ - /* the same, grab a copy of that info now */ - if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_HOMOGENEOUS) && - (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_HOMOGENEOUS)) { - struct nfs_fsattr nfsa; - if (!nfs3_pathconf_rpc(*npp, &nfsa, ctx)) { - /* cache a copy of the results */ - lck_mtx_lock(&nmp->nm_lock); - nfs3_pathconf_cache(nmp, &nfsa); - lck_mtx_unlock(&nmp->nm_lock); - } + /* grab a copy of root info now (even if server does not support FSF_HOMOGENEOUS) */ + struct nfs_fsattr nfsa; + if (!nfs3_pathconf_rpc(*npp, &nfsa, ctx)) { + /* cache a copy of the results */ + lck_mtx_lock(&nmp->nm_lock); + nfs3_pathconf_cache(nmp, &nfsa); + lck_mtx_unlock(&nmp->nm_lock); } } out: @@ -2219,10 +2226,7 @@ nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nf nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); - MALLOC_ZONE(link, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (!link) { - error = ENOMEM; - } + link = zalloc(ZV_NAMEI); // PUTFH, READLINK numops = 2; @@ -2337,7 +2341,7 @@ nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nf } nfsmout: if (link) { - FREE_ZONE(link, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, link); } if (nfsp2.np_components) { for (comp = 0; comp < nfsp2.np_compcount; comp++) { @@ -2404,7 +2408,7 @@ nfs4_mount( goto nfsmout; } for (comp = 0; comp < nfsp->np_compcount; comp++) { - int slen = strlen(nfsp->np_components[comp]); + size_t slen = strlen(nfsp->np_components[comp]); MALLOC(fspath.np_components[comp], char *, slen + 1, M_TEMP, M_WAITOK | M_ZERO); if (!fspath.np_components[comp]) { error = ENOMEM; @@ -2588,7 +2592,7 @@ nocomponents: goto nfsmout; } for (comp2 = 0; comp2 < nfsp->np_compcount; comp2++) { - int slen = strlen(nfsp->np_components[comp2]); + size_t slen = strlen(nfsp->np_components[comp2]); MALLOC(fspath2.np_components[comp2], char *, slen + 1, M_TEMP, M_WAITOK | M_ZERO); if (!fspath2.np_components[comp2]) { /* clean up fspath2, then error out */ @@ -2958,7 +2962,7 @@ mountnfs( uint32_t *mflags_mask; uint32_t *mflags; uint32_t argslength, attrslength; - uid_t set_owner; + uid_t set_owner = 0; struct nfs_location_index firstloc = { .nli_flags = NLI_VALID, .nli_loc = 0, @@ -2985,13 +2989,7 @@ mountnfs( return 0; } else { /* allocate an NFS mount structure for this mount */ - MALLOC_ZONE(nmp, struct nfsmount *, - sizeof(struct nfsmount), M_NFSMNT, M_WAITOK); - if (!nmp) { - xb_free(xdrbuf); - return ENOMEM; - } - bzero((caddr_t)nmp, sizeof(struct nfsmount)); + nmp = zalloc_flags(nfsmnt_zone, Z_WAITOK | Z_ZERO); lck_mtx_init(&nmp->nm_lock, nfs_mount_grp, LCK_ATTR_NULL); TAILQ_INIT(&nmp->nm_resendq); TAILQ_INIT(&nmp->nm_iodq); @@ -3165,7 +3163,7 @@ mountnfs( break; } #endif - /* FALLTHROUGH */ + OS_FALLTHROUGH; case NFS_LOCK_MODE_ENABLED: nmp->nm_lockmode = val; break; @@ -3283,10 +3281,20 @@ mountnfs( nfsmerr_if(error); } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) { - xb_get_32(error, &xb, nmp->nm_nfsport); + xb_get_32(error, &xb, val); + if (NFS_PORT_INVALID(val)) { + error = EINVAL; + nfsmerr_if(error); + } + nmp->nm_nfsport = (in_port_t)val; } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT)) { - xb_get_32(error, &xb, nmp->nm_mountport); + xb_get_32(error, &xb, val); + if (NFS_PORT_INVALID(val)) { + error = EINVAL; + nfsmerr_if(error); + } + nmp->nm_mountport = (in_port_t)val; } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) { /* convert from time to 0.1s units */ @@ -3314,10 +3322,7 @@ mountnfs( } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) { nfsmerr_if(error); - MALLOC(nmp->nm_fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK | M_ZERO); - if (!nmp->nm_fh) { - error = ENOMEM; - } + nmp->nm_fh = zalloc(nfs_fhandle_zone); xb_get_32(error, &xb, nmp->nm_fh->fh_len); nfsmerr_if(error); if ((size_t)nmp->nm_fh->fh_len > sizeof(nmp->nm_fh->fh_data)) { @@ -3861,14 +3866,14 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) struct nfsmount *nmp = NFSTONMP(np); char fstype[MFSTYPENAMELEN], *mntfromname = NULL, *path = NULL, *relpath, *p, *cp; int error = 0, pathbuflen = MAXPATHLEN, i, mntflags = 0, referral, skipcopy = 0; - size_t nlen; + size_t nlen, rlen, mlen, mlen2, count; struct xdrbuf xb, xbnew; uint32_t mattrs[NFS_MATTR_BITMAP_LEN]; uint32_t newmattrs[NFS_MATTR_BITMAP_LEN]; uint32_t newmflags[NFS_MFLAG_BITMAP_LEN]; uint32_t newmflags_mask[NFS_MFLAG_BITMAP_LEN]; - uint32_t argslength = 0, val, count, mlen, mlen2, rlen, relpathcomps; - uint32_t argslength_offset, attrslength_offset, end_offset; + uint32_t val, relpathcomps; + uint64_t argslength = 0, argslength_offset, attrslength_offset, end_offset; uint32_t numlocs, loc, numserv, serv, numaddr, addr, numcomp, comp; char buf[XDRWORD]; struct nfs_fs_locations nfsls; @@ -3885,16 +3890,8 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) } /* allocate a couple path buffers we need */ - MALLOC_ZONE(mntfromname, char *, pathbuflen, M_NAMEI, M_WAITOK); - if (!mntfromname) { - error = ENOMEM; - goto nfsmerr; - } - MALLOC_ZONE(path, char *, pathbuflen, M_NAMEI, M_WAITOK); - if (!path) { - error = ENOMEM; - goto nfsmerr; - } + mntfromname = zalloc(ZV_NAMEI); + path = zalloc(ZV_NAMEI); /* get the path for the directory being mounted on */ error = vn_getpath(vp, path, &pathbuflen); @@ -3970,7 +3967,7 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) } while (0) #define xb_copy_opaque(E, XBSRC, XBDST) \ do { \ - uint32_t __count, __val; \ + uint32_t __count = 0, __val; \ xb_copy_32((E), (XBSRC), (XBDST), __count); \ if (E) break; \ __count = nfsm_rndup(__count); \ @@ -4291,10 +4288,10 @@ nfsmerr: nfs_fs_locations_cleanup(&nfsls); } if (path) { - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, path); } if (mntfromname) { - FREE_ZONE(mntfromname, MAXPATHLEN, M_NAMEI); + NFS_ZFREE(ZV_NAMEI, mntfromname); } if (!error) { nfs_ephemeral_mount_harvester_start(); @@ -4626,16 +4623,49 @@ nfs_ephemeral_mount_harvester_start(void) #endif +/* + * Send a STAT protocol request to the server to verify statd is running. + * rpc-statd service, which responsible to provide locks for the NFS server, is disabled by default on Ubuntu. + * Please see Radar 45969553 for more info. + */ +int +nfs3_check_lockmode(struct nfsmount *nmp, struct sockaddr *sa, int sotype, int timeo) +{ + struct sockaddr_storage ss; + int error, port = 0; + + if (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED) { + bcopy(sa, &ss, sa->sa_len); + error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss, NULL, RPCPROG_STAT, RPCMNT_VER1, NM_OMFLAG(nmp, MNTUDP) ? SOCK_DGRAM : sotype, timeo); + if (!error) { + if (ss.ss_family == AF_INET) { + port = ntohs(((struct sockaddr_in*)&ss)->sin_port); + } else if (ss.ss_family == AF_INET6) { + port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port); + } else if (ss.ss_family == AF_LOCAL) { + port = (((struct sockaddr_un*)&ss)->sun_path[0] != '\0'); + } + + if (!port) { + printf("nfs: STAT(NSM) rpc service is not available, unable to mount with current lock mode.\n"); + return EPROGUNAVAIL; + } + } + } + return 0; +} + /* * Send a MOUNT protocol MOUNT request to the server to get the initial file handle (and security). */ int nfs3_mount_rpc(struct nfsmount *nmp, struct sockaddr *sa, int sotype, int nfsvers, char *path, vfs_context_t ctx, int timeo, fhandle_t *fh, struct nfs_sec *sec) { - int error = 0, slen, mntproto; + int error = 0, mntproto; thread_t thd = vfs_context_thread(ctx); kauth_cred_t cred = vfs_context_ucred(ctx); uint64_t xid = 0; + size_t slen; struct nfsm_chain nmreq, nmrep; mbuf_t mreq; uint32_t mntvers, mntport, val; @@ -4711,6 +4741,7 @@ nfs3_mount_rpc(struct nfsmount *nmp, struct sockaddr *sa, int sotype, int nfsver if (!error && val) { error = val; } + nfsmout_if(error); nfsm_chain_get_fh(error, &nmrep, nfsvers, fh); if (!error && (nfsvers > NFS_VER2)) { sec->count = NX_MAX_SEC_FLAVORS; @@ -4729,14 +4760,16 @@ nfsmout: void nfs3_umount_rpc(struct nfsmount *nmp, vfs_context_t ctx, int timeo) { - int error = 0, slen, mntproto; + int error = 0, mntproto; thread_t thd = vfs_context_thread(ctx); kauth_cred_t cred = vfs_context_ucred(ctx); char *path; uint64_t xid = 0; + size_t slen; struct nfsm_chain nmreq, nmrep; mbuf_t mreq; - uint32_t mntvers, mntport; + uint32_t mntvers; + in_port_t mntport; struct sockaddr_storage ss; struct sockaddr *saddr = (struct sockaddr*)&ss; @@ -5093,9 +5126,10 @@ nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags) } if (req->r_flags & R_RESENDQ) { lck_mtx_lock(&nmp->nm_lock); - req->r_flags &= ~R_RESENDQ; - if (req->r_rchain.tqe_next != NFSREQNOLIST) { + if ((req->r_flags & R_RESENDQ) && req->r_rchain.tqe_next != NFSREQNOLIST) { TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); + req->r_flags &= ~R_RESENDQ; + req->r_rchain.tqe_next = NFSREQNOLIST; /* * Queue up the request so that we can unreference them * with out holding nfs_request_mutex @@ -5262,11 +5296,11 @@ nfs_mount_cleanup(struct nfsmount *nmp) lck_mtx_destroy(&nmp->nm_lock, nfs_mount_grp); if (nmp->nm_fh) { - FREE(nmp->nm_fh, M_TEMP); + NFS_ZFREE(nfs_fhandle_zone, nmp->nm_fh); } - FREE_ZONE(nmp, sizeof(struct nfsmount), M_NFSMNT); + NFS_ZFREE(nfsmnt_zone, nmp); } /* @@ -5314,10 +5348,10 @@ nfs_vfs_quotactl( } #else -static int +static in_port_t nfs_sa_getport(struct sockaddr *sa, int *error) { - int port = 0; + in_port_t port = 0; if (sa->sa_family == AF_INET6) { port = ntohs(((struct sockaddr_in6*)sa)->sin6_port); @@ -5331,7 +5365,7 @@ nfs_sa_getport(struct sockaddr *sa, int *error) } static void -nfs_sa_setport(struct sockaddr *sa, int port) +nfs_sa_setport(struct sockaddr *sa, in_port_t port) { if (sa->sa_family == AF_INET6) { ((struct sockaddr_in6*)sa)->sin6_port = htons(port); @@ -5343,12 +5377,13 @@ nfs_sa_setport(struct sockaddr *sa, int port) int nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struct dqblk *dqb) { - int error = 0, slen, timeo; - int rqport = 0, rqproto, rqvers = (type == GRPQUOTA) ? RPCRQUOTA_EXT_VER : RPCRQUOTA_VER; + int error = 0, timeo; + int rqproto, rqvers = (type == GRPQUOTA) ? RPCRQUOTA_EXT_VER : RPCRQUOTA_VER; + in_port_t rqport = 0; thread_t thd = vfs_context_thread(ctx); kauth_cred_t cred = vfs_context_ucred(ctx); char *path; - uint64_t xid = 0; + uint64_t slen, xid = 0; struct nfsm_chain nmreq, nmrep; mbuf_t mreq; uint32_t val = 0, bsize = 0; @@ -5780,7 +5815,7 @@ nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb) struct xdrbuf xbinfo, xborig; char sotype[16]; uint32_t origargsvers, origargslength; - uint32_t infolength_offset, curargsopaquelength_offset, curargslength_offset, attrslength_offset, curargs_end_offset, end_offset; + size_t infolength_offset, curargsopaquelength_offset, curargslength_offset, attrslength_offset, curargs_end_offset, end_offset; uint32_t miattrs[NFS_MIATTR_BITMAP_LEN]; uint32_t miflags_mask[NFS_MIFLAG_BITMAP_LEN]; uint32_t miflags[NFS_MIFLAG_BITMAP_LEN]; @@ -6172,9 +6207,6 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, user_addr_t newp, size_t newlen, vfs_context_t ctx) { int error = 0, val; -#ifndef CONFIG_EMBEDDED - int softnobrowse; -#endif struct sysctl_req *req = NULL; union union_vfsidctl vc; mount_t mp; @@ -6198,8 +6230,9 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, struct nfs_user_stat_desc ustat_desc = {}; struct nfs_user_stat_user_rec ustat_rec; struct nfs_user_stat_path_rec upath_rec; - uint bytes_avail, bytes_total, recs_copied; - uint numExports, numRecs; + uint bytes_total, recs_copied; + uint numExports; + size_t bytes_avail, numRecs; #endif /* CONFIG_NFS_SERVER */ /* @@ -6215,9 +6248,9 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, case VFS_CTL_TIMEO: case VFS_CTL_NOLOCKS: case VFS_CTL_NSTATUS: -#ifndef CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) case VFS_CTL_QUERY: -#endif +#endif /* XNU_TARGET_OS_OSX */ req = CAST_DOWN(struct sysctl_req *, oldp); if (req == NULL) { return EFAULT; @@ -6244,10 +6277,10 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, req->newlen = vc.vc32.vc_len; } break; -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) case VFS_CTL_QUERY: return EPERM; -#endif +#endif /* ! XNU_TARGET_OS_OSX */ } switch (name[0]) { @@ -6275,6 +6308,9 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, return copyin(newp, &nfsstats, sizeof nfsstats); } return 0; + case NFS_NFSZEROSTATS: + bzero(&nfsstats, sizeof nfsstats); + return 0; case NFS_MOUNTINFO: /* read in the fsid */ if (*oldlenp < sizeof(fsid)) { @@ -6594,11 +6630,11 @@ ustat_skip: lck_mtx_unlock(&nmp->nm_lock); } break; -#ifndef CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) case VFS_CTL_QUERY: lck_mtx_lock(&nmp->nm_lock); /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */ - softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE)); + int softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE)); if (!softnobrowse && (nmp->nm_state & NFSSTA_TIMEO)) { vq.vq_flags |= VQ_NOTRESP; } @@ -6615,7 +6651,7 @@ ustat_skip: lck_mtx_unlock(&nmp->nm_lock); error = SYSCTL_OUT(req, &vq, sizeof(vq)); break; -#endif +#endif /* XNU_TARGET_OS_OSX */ case VFS_CTL_TIMEO: if (req->oldptr != USER_ADDR_NULL) { lck_mtx_lock(&nmp->nm_lock); @@ -6701,6 +6737,7 @@ ustat_skip: if (numThreads > 0) { struct timeval now; time_t sendtime; + uint64_t waittime; microuptime(&now); count = 0; @@ -6718,7 +6755,8 @@ ustat_skip: } } } - nsp->ns_waittime = now.tv_sec - sendtime; + waittime = now.tv_sec - sendtime; + nsp->ns_waittime = waittime > UINT32_MAX ? UINT32_MAX : (uint32_t)waittime; } lck_mtx_unlock(&nmp->nm_lock); diff --git a/bsd/nfs/nfs_vnops.c b/bsd/nfs/nfs_vnops.c index 4f9208b25..b03463b43 100644 --- a/bsd/nfs/nfs_vnops.c +++ b/bsd/nfs/nfs_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -164,43 +164,44 @@ int nfs3_vnop_symlink(struct vnop_symlink_args *); vnop_t **nfsv2_vnodeop_p; static const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { { .opve_op = &vnop_default_desc, .opve_impl = (vnop_t *)vn_default_error }, - { .opve_op = &vnop_lookup_desc, .opve_impl = (vnop_t *)nfs_vnop_lookup }, /* lookup */ - { .opve_op = &vnop_create_desc, .opve_impl = (vnop_t *)nfs3_vnop_create }, /* create */ - { .opve_op = &vnop_mknod_desc, .opve_impl = (vnop_t *)nfs3_vnop_mknod }, /* mknod */ - { .opve_op = &vnop_open_desc, .opve_impl = (vnop_t *)nfs_vnop_open }, /* open */ - { .opve_op = &vnop_close_desc, .opve_impl = (vnop_t *)nfs_vnop_close }, /* close */ - { .opve_op = &vnop_access_desc, .opve_impl = (vnop_t *)nfs_vnop_access }, /* access */ - { .opve_op = &vnop_getattr_desc, .opve_impl = (vnop_t *)nfs3_vnop_getattr }, /* getattr */ - { .opve_op = &vnop_setattr_desc, .opve_impl = (vnop_t *)nfs_vnop_setattr }, /* setattr */ - { .opve_op = &vnop_read_desc, .opve_impl = (vnop_t *)nfs_vnop_read }, /* read */ - { .opve_op = &vnop_write_desc, .opve_impl = (vnop_t *)nfs_vnop_write }, /* write */ - { .opve_op = &vnop_ioctl_desc, .opve_impl = (vnop_t *)nfs_vnop_ioctl }, /* ioctl */ - { .opve_op = &vnop_select_desc, .opve_impl = (vnop_t *)nfs_vnop_select }, /* select */ - { .opve_op = &vnop_revoke_desc, .opve_impl = (vnop_t *)nfs_vnop_revoke }, /* revoke */ - { .opve_op = &vnop_mmap_desc, .opve_impl = (vnop_t *)nfs_vnop_mmap }, /* mmap */ - { .opve_op = &vnop_mnomap_desc, .opve_impl = (vnop_t *)nfs_vnop_mnomap }, /* mnomap */ - { .opve_op = &vnop_fsync_desc, .opve_impl = (vnop_t *)nfs_vnop_fsync }, /* fsync */ - { .opve_op = &vnop_remove_desc, .opve_impl = (vnop_t *)nfs_vnop_remove }, /* remove */ - { .opve_op = &vnop_link_desc, .opve_impl = (vnop_t *)nfs3_vnop_link }, /* link */ - { .opve_op = &vnop_rename_desc, .opve_impl = (vnop_t *)nfs_vnop_rename }, /* rename */ - { .opve_op = &vnop_mkdir_desc, .opve_impl = (vnop_t *)nfs3_vnop_mkdir }, /* mkdir */ - { .opve_op = &vnop_rmdir_desc, .opve_impl = (vnop_t *)nfs3_vnop_rmdir }, /* rmdir */ - { .opve_op = &vnop_symlink_desc, .opve_impl = (vnop_t *)nfs3_vnop_symlink }, /* symlink */ - { .opve_op = &vnop_readdir_desc, .opve_impl = (vnop_t *)nfs_vnop_readdir }, /* readdir */ - { .opve_op = &vnop_readlink_desc, .opve_impl = (vnop_t *)nfs_vnop_readlink }, /* readlink */ - { .opve_op = &vnop_inactive_desc, .opve_impl = (vnop_t *)nfs_vnop_inactive }, /* inactive */ - { .opve_op = &vnop_reclaim_desc, .opve_impl = (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ - { .opve_op = &vnop_strategy_desc, .opve_impl = (vnop_t *)err_strategy }, /* strategy */ - { .opve_op = &vnop_pathconf_desc, .opve_impl = (vnop_t *)nfs_vnop_pathconf }, /* pathconf */ - { .opve_op = &vnop_advlock_desc, .opve_impl = (vnop_t *)nfs_vnop_advlock }, /* advlock */ - { .opve_op = &vnop_bwrite_desc, .opve_impl = (vnop_t *)err_bwrite }, /* bwrite */ - { .opve_op = &vnop_pagein_desc, .opve_impl = (vnop_t *)nfs_vnop_pagein }, /* Pagein */ - { .opve_op = &vnop_pageout_desc, .opve_impl = (vnop_t *)nfs_vnop_pageout }, /* Pageout */ - { .opve_op = &vnop_copyfile_desc, .opve_impl = (vnop_t *)err_copyfile }, /* Copyfile */ - { .opve_op = &vnop_blktooff_desc, .opve_impl = (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ - { .opve_op = &vnop_offtoblk_desc, .opve_impl = (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ - { .opve_op = &vnop_blockmap_desc, .opve_impl = (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ - { .opve_op = &vnop_monitor_desc, .opve_impl = (vnop_t *)nfs_vnop_monitor }, /* monitor */ + { .opve_op = &vnop_lookup_desc, .opve_impl = (vnop_t *)nfs_vnop_lookup }, /* lookup */ + { .opve_op = &vnop_create_desc, .opve_impl = (vnop_t *)nfs3_vnop_create }, /* create */ + { .opve_op = &vnop_mknod_desc, .opve_impl = (vnop_t *)nfs3_vnop_mknod }, /* mknod */ + { .opve_op = &vnop_open_desc, .opve_impl = (vnop_t *)nfs_vnop_open }, /* open */ + { .opve_op = &vnop_close_desc, .opve_impl = (vnop_t *)nfs_vnop_close }, /* close */ + { .opve_op = &vnop_access_desc, .opve_impl = (vnop_t *)nfs_vnop_access }, /* access */ + { .opve_op = &vnop_getattr_desc, .opve_impl = (vnop_t *)nfs3_vnop_getattr }, /* getattr */ + { .opve_op = &vnop_setattr_desc, .opve_impl = (vnop_t *)nfs_vnop_setattr }, /* setattr */ + { .opve_op = &vnop_read_desc, .opve_impl = (vnop_t *)nfs_vnop_read }, /* read */ + { .opve_op = &vnop_write_desc, .opve_impl = (vnop_t *)nfs_vnop_write }, /* write */ + { .opve_op = &vnop_ioctl_desc, .opve_impl = (vnop_t *)nfs_vnop_ioctl }, /* ioctl */ + { .opve_op = &vnop_select_desc, .opve_impl = (vnop_t *)nfs_vnop_select }, /* select */ + { .opve_op = &vnop_revoke_desc, .opve_impl = (vnop_t *)nfs_vnop_revoke }, /* revoke */ + { .opve_op = &vnop_mmap_desc, .opve_impl = (vnop_t *)nfs_vnop_mmap }, /* mmap */ + { .opve_op = &vnop_mmap_check_desc, .opve_impl = (vnop_t *)nfs_vnop_mmap_check }, /* mmap_check */ + { .opve_op = &vnop_mnomap_desc, .opve_impl = (vnop_t *)nfs_vnop_mnomap }, /* mnomap */ + { .opve_op = &vnop_fsync_desc, .opve_impl = (vnop_t *)nfs_vnop_fsync }, /* fsync */ + { .opve_op = &vnop_remove_desc, .opve_impl = (vnop_t *)nfs_vnop_remove }, /* remove */ + { .opve_op = &vnop_link_desc, .opve_impl = (vnop_t *)nfs3_vnop_link }, /* link */ + { .opve_op = &vnop_rename_desc, .opve_impl = (vnop_t *)nfs_vnop_rename }, /* rename */ + { .opve_op = &vnop_mkdir_desc, .opve_impl = (vnop_t *)nfs3_vnop_mkdir }, /* mkdir */ + { .opve_op = &vnop_rmdir_desc, .opve_impl = (vnop_t *)nfs3_vnop_rmdir }, /* rmdir */ + { .opve_op = &vnop_symlink_desc, .opve_impl = (vnop_t *)nfs3_vnop_symlink }, /* symlink */ + { .opve_op = &vnop_readdir_desc, .opve_impl = (vnop_t *)nfs_vnop_readdir }, /* readdir */ + { .opve_op = &vnop_readlink_desc, .opve_impl = (vnop_t *)nfs_vnop_readlink }, /* readlink */ + { .opve_op = &vnop_inactive_desc, .opve_impl = (vnop_t *)nfs_vnop_inactive }, /* inactive */ + { .opve_op = &vnop_reclaim_desc, .opve_impl = (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ + { .opve_op = &vnop_strategy_desc, .opve_impl = (vnop_t *)err_strategy }, /* strategy */ + { .opve_op = &vnop_pathconf_desc, .opve_impl = (vnop_t *)nfs_vnop_pathconf }, /* pathconf */ + { .opve_op = &vnop_advlock_desc, .opve_impl = (vnop_t *)nfs_vnop_advlock }, /* advlock */ + { .opve_op = &vnop_bwrite_desc, .opve_impl = (vnop_t *)err_bwrite }, /* bwrite */ + { .opve_op = &vnop_pagein_desc, .opve_impl = (vnop_t *)nfs_vnop_pagein }, /* Pagein */ + { .opve_op = &vnop_pageout_desc, .opve_impl = (vnop_t *)nfs_vnop_pageout }, /* Pageout */ + { .opve_op = &vnop_copyfile_desc, .opve_impl = (vnop_t *)err_copyfile }, /* Copyfile */ + { .opve_op = &vnop_blktooff_desc, .opve_impl = (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ + { .opve_op = &vnop_offtoblk_desc, .opve_impl = (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ + { .opve_op = &vnop_blockmap_desc, .opve_impl = (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ + { .opve_op = &vnop_monitor_desc, .opve_impl = (vnop_t *)nfs_vnop_monitor }, /* monitor */ { .opve_op = NULL, .opve_impl = NULL } }; const struct vnodeopv_desc nfsv2_vnodeop_opv_desc = @@ -211,52 +212,53 @@ const struct vnodeopv_desc nfsv2_vnodeop_opv_desc = vnop_t **nfsv4_vnodeop_p; static const struct vnodeopv_entry_desc nfsv4_vnodeop_entries[] = { { &vnop_default_desc, (vnop_t *)vn_default_error }, - { &vnop_lookup_desc, (vnop_t *)nfs_vnop_lookup }, /* lookup */ - { &vnop_create_desc, (vnop_t *)nfs4_vnop_create }, /* create */ - { &vnop_mknod_desc, (vnop_t *)nfs4_vnop_mknod }, /* mknod */ - { &vnop_open_desc, (vnop_t *)nfs_vnop_open }, /* open */ - { &vnop_close_desc, (vnop_t *)nfs_vnop_close }, /* close */ - { &vnop_access_desc, (vnop_t *)nfs_vnop_access }, /* access */ - { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */ - { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ - { &vnop_read_desc, (vnop_t *)nfs_vnop_read }, /* read */ - { &vnop_write_desc, (vnop_t *)nfs_vnop_write }, /* write */ - { &vnop_ioctl_desc, (vnop_t *)nfs_vnop_ioctl }, /* ioctl */ - { &vnop_select_desc, (vnop_t *)nfs_vnop_select }, /* select */ - { &vnop_revoke_desc, (vnop_t *)nfs_vnop_revoke }, /* revoke */ - { &vnop_mmap_desc, (vnop_t *)nfs_vnop_mmap }, /* mmap */ - { &vnop_mnomap_desc, (vnop_t *)nfs_vnop_mnomap }, /* mnomap */ - { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ - { &vnop_remove_desc, (vnop_t *)nfs_vnop_remove }, /* remove */ - { &vnop_link_desc, (vnop_t *)nfs4_vnop_link }, /* link */ - { &vnop_rename_desc, (vnop_t *)nfs_vnop_rename }, /* rename */ - { &vnop_mkdir_desc, (vnop_t *)nfs4_vnop_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (vnop_t *)nfs4_vnop_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (vnop_t *)nfs4_vnop_symlink }, /* symlink */ - { &vnop_readdir_desc, (vnop_t *)nfs_vnop_readdir }, /* readdir */ - { &vnop_readlink_desc, (vnop_t *)nfs_vnop_readlink }, /* readlink */ - { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ - { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (vnop_t *)err_strategy }, /* strategy */ - { &vnop_pathconf_desc, (vnop_t *)nfs_vnop_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (vnop_t *)nfs_vnop_advlock }, /* advlock */ - { &vnop_bwrite_desc, (vnop_t *)err_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ - { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ - { &vnop_copyfile_desc, (vnop_t *)err_copyfile }, /* Copyfile */ - { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ - { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */ - { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */ - { &vnop_removexattr_desc, (vnop_t *)nfs4_vnop_removexattr },/* removexattr */ - { &vnop_listxattr_desc, (vnop_t *)nfs4_vnop_listxattr },/* listxattr */ + { &vnop_lookup_desc, (vnop_t *)nfs_vnop_lookup }, /* lookup */ + { &vnop_create_desc, (vnop_t *)nfs4_vnop_create }, /* create */ + { &vnop_mknod_desc, (vnop_t *)nfs4_vnop_mknod }, /* mknod */ + { &vnop_open_desc, (vnop_t *)nfs_vnop_open }, /* open */ + { &vnop_close_desc, (vnop_t *)nfs_vnop_close }, /* close */ + { &vnop_access_desc, (vnop_t *)nfs_vnop_access }, /* access */ + { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */ + { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ + { &vnop_read_desc, (vnop_t *)nfs_vnop_read }, /* read */ + { &vnop_write_desc, (vnop_t *)nfs_vnop_write }, /* write */ + { &vnop_ioctl_desc, (vnop_t *)nfs_vnop_ioctl }, /* ioctl */ + { &vnop_select_desc, (vnop_t *)nfs_vnop_select }, /* select */ + { &vnop_revoke_desc, (vnop_t *)nfs_vnop_revoke }, /* revoke */ + { &vnop_mmap_desc, (vnop_t *)nfs_vnop_mmap }, /* mmap */ + { &vnop_mmap_check_desc, (vnop_t *)nfs_vnop_mmap_check }, /* mmap_check */ + { &vnop_mnomap_desc, (vnop_t *)nfs_vnop_mnomap }, /* mnomap */ + { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ + { &vnop_remove_desc, (vnop_t *)nfs_vnop_remove }, /* remove */ + { &vnop_link_desc, (vnop_t *)nfs4_vnop_link }, /* link */ + { &vnop_rename_desc, (vnop_t *)nfs_vnop_rename }, /* rename */ + { &vnop_mkdir_desc, (vnop_t *)nfs4_vnop_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (vnop_t *)nfs4_vnop_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (vnop_t *)nfs4_vnop_symlink }, /* symlink */ + { &vnop_readdir_desc, (vnop_t *)nfs_vnop_readdir }, /* readdir */ + { &vnop_readlink_desc, (vnop_t *)nfs_vnop_readlink }, /* readlink */ + { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ + { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (vnop_t *)err_strategy }, /* strategy */ + { &vnop_pathconf_desc, (vnop_t *)nfs_vnop_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (vnop_t *)nfs_vnop_advlock }, /* advlock */ + { &vnop_bwrite_desc, (vnop_t *)err_bwrite }, /* bwrite */ + { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ + { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ + { &vnop_copyfile_desc, (vnop_t *)err_copyfile }, /* Copyfile */ + { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ + { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */ + { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */ + { &vnop_removexattr_desc, (vnop_t *)nfs4_vnop_removexattr }, /* removexattr */ + { &vnop_listxattr_desc, (vnop_t *)nfs4_vnop_listxattr }, /* listxattr */ #if NAMEDSTREAMS - { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */ - { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */ - { &vnop_removenamedstream_desc, (vnop_t *)nfs4_vnop_removenamedstream },/* removenamedstream */ + { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */ + { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */ + { &vnop_removenamedstream_desc, (vnop_t *)nfs4_vnop_removenamedstream }, /* removenamedstream */ #endif - { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ + { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ { NULL, NULL } }; const struct vnodeopv_desc nfsv4_vnodeop_opv_desc = @@ -464,6 +466,23 @@ int nfs_getattr_internal(nfsnode_t, struct nfs_vattr *, vfs_context_t, int); int nfs_refresh_fh(nfsnode_t, vfs_context_t); +ZONE_VIEW_DEFINE(ZV_NFSDIROFF, "NFSV3 diroff", + KHEAP_ID_DATA_BUFFERS, sizeof(struct nfsdmap)); + +static void +nfs_dir_buf_cache_lookup_boundaries(struct nfsbuf *bp, int *sof, int *eof) +{ + if (bp) { + struct nfs_dir_buf_header *ndbhp = (struct nfs_dir_buf_header*)bp->nb_data; + if (sof && bp->nb_lblkno == 0) { + *sof = 1; + } + if (eof && ISSET(ndbhp->ndbh_flags, NDB_EOF)) { + *eof = 1; + } + } +} + /* * Update nfsnode attributes to avoid extra getattr calls for each direntry. * This function should be called only if RDIRPLUS flag is enabled. @@ -475,6 +494,8 @@ nfs_rdirplus_update_node_attrs(nfsnode_t dnp, struct direntry *dp, fhandle_t *fh struct componentname cn; int isdot = (dp->d_namlen == 1) && (dp->d_name[0] == '.'); int isdotdot = (dp->d_namlen == 2) && (dp->d_name[0] == '.') && (dp->d_name[1] == '.'); + int should_update_fileid = nvattrp->nva_flags & NFS_FFLAG_FILEID_CONTAINS_XID; + uint64_t xid = 0; if (isdot || isdotdot) { return; @@ -486,7 +507,15 @@ nfs_rdirplus_update_node_attrs(nfsnode_t dnp, struct direntry *dp, fhandle_t *fh cn.cn_namelen = dp->d_namlen; cn.cn_nameiop = LOOKUP; + /* xid might be stashed in nva_fileid is rdirplus is enabled */ + if (should_update_fileid) { + xid = nvattrp->nva_fileid; + nvattrp->nva_fileid = dp->d_fileno; + } nfs_nget(NFSTOMP(dnp), dnp, &cn, fhp->fh_data, fhp->fh_len, nvattrp, savedxidp, RPCAUTH_UNKNOWN, NG_NOCREATE, &np); + if (should_update_fileid) { + nvattrp->nva_fileid = xid; + } if (np) { nfs_node_unlock(np); vnode_put(NFSTOV(np)); @@ -521,7 +550,7 @@ nfs_node_access_slot(nfsnode_t np, uid_t uid, int add) int nfs3_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx) { - int error = 0, lockerror = ENOENT, status, slot; + int error = 0, lockerror = ENOENT, status = 0, slot; uint32_t access_result = 0; u_int64_t xid; struct nfsm_chain nmreq, nmrep; @@ -906,10 +935,10 @@ restart: } #if CONFIG_NFS4 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) { - nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(nofp, vfs_context_thread(ctx)); nofp = NULL; if (!error) { + nfs_mount_state_in_use_end(nmp, 0); goto restart; } } @@ -1126,7 +1155,7 @@ nfs_vnop_close( } error1 = error; - /* fflag should contain some combination of: FREAD, FWRITE, FHASLOCK */ + /* fflag should contain some combination of: FREAD, FWRITE */ accessMode = 0; if (fflag & FREAD) { accessMode |= NFS_OPEN_SHARE_ACCESS_READ; @@ -1141,24 +1170,8 @@ nfs_vnop_close( // denyMode = NFS_OPEN_SHARE_DENY_WRITE; // else // denyMode = NFS_OPEN_SHARE_DENY_NONE; -#if 0 // Not yet - if (fflag & FHASLOCK) { - /* XXX assume FHASLOCK is for the deny mode and not flock */ - /* FHASLOCK flock will be unlocked in the close path, but the flag is not cleared. */ - if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) { - denyMode = NFS_OPEN_SHARE_DENY_BOTH; - } else if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) { - denyMode = NFS_OPEN_SHARE_DENY_WRITE; - } else { - denyMode = NFS_OPEN_SHARE_DENY_NONE; - } - } else { - denyMode = NFS_OPEN_SHARE_DENY_NONE; - } -#else // XXX don't do deny modes just yet (and never do it for !v4) denyMode = NFS_OPEN_SHARE_DENY_NONE; -#endif if (!accessMode) { /* @@ -1213,10 +1226,10 @@ restart: error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0); #if CONFIG_NFS4 if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) { - nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(nofp, NULL); nofp = NULL; if (!error) { + nfs_mount_state_in_use_end(nmp, 0); goto restart; } } @@ -1275,7 +1288,7 @@ nfs_close( struct nfs_lock_owner *nlop; #endif int error = 0, changed = 0, delegated = 0, closed = 0, downgrade = 0; - uint32_t newAccessMode, newDenyMode; + uint8_t newAccessMode, newDenyMode; /* warn if modes don't match current state */ if (((accessMode & nofp->nof_access) != accessMode) || ((denyMode & nofp->nof_deny) != denyMode)) { @@ -1431,7 +1444,7 @@ nfs3_getattr_rpc( u_int64_t *xidp) { struct nfsmount *nmp = mp ? VFSTONFS(mp) : NFSTONMP(np); - int error = 0, status, nfsvers, rpcflags = 0; + int error = 0, status = 0, nfsvers, rpcflags = 0; struct nfsm_chain nmreq, nmrep; if (nfs_mount_gone(nmp)) { @@ -1485,7 +1498,8 @@ nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx) nfsnode_t dnp; const char *v_name = vnode_getname(vp); char *name; - int namelen, fhsize, refreshed; + int namelen, refreshed; + uint32_t fhsize; int error, wanted = 0; uint8_t *fhp; struct timespec ts = {.tv_sec = 2, .tv_nsec = 0}; @@ -1502,7 +1516,7 @@ nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx) } dnp = VTONFS(dvp); - namelen = strlen(v_name); + namelen = NFS_STRLEN_INT(v_name); MALLOC(name, char *, namelen + 1, M_TEMP, M_WAITOK); if (name == NULL) { vnode_putname(v_name); @@ -1513,7 +1527,7 @@ nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx) vnode_putname(v_name); /* Allocate the maximum size file handle */ - MALLOC(fhp, uint8_t *, NFS4_FHSIZE, M_TEMP, M_WAITOK); + MALLOC(fhp, uint8_t *, NFS4_FHSIZE, M_FHANDLE, M_WAITOK); if (fhp == NULL) { FREE(name, M_TEMP); return ESTALE; @@ -1521,7 +1535,7 @@ nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx) if ((error = nfs_node_lock(np))) { FREE(name, M_TEMP); - FREE(fhp, M_TEMP); + FREE(fhp, M_FHANDLE); return ESTALE; } @@ -1609,7 +1623,7 @@ nfsmout: } FREE(name, M_TEMP); - FREE(fhp, M_TEMP); + FREE(fhp, M_FHANDLE); return error ? ESTALE : 0; } @@ -1634,10 +1648,10 @@ int nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, int flags) { struct nfsmount *nmp; - int error = 0, nfsvers, inprogset = 0, wanted = 0, avoidfloods; - struct nfs_vattr nvattr; + int error = 0, nfsvers, inprogset = 0, wanted = 0, avoidfloods = 0; + struct nfs_vattr *nvattr = NULL; struct timespec ts = { .tv_sec = 2, .tv_nsec = 0 }; - u_int64_t xid; + u_int64_t xid = 0; FSDBG_TOP(513, np->n_size, np, np->n_vattr.nva_size, np->n_flag); @@ -1649,7 +1663,8 @@ nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, in nfsvers = nmp->nm_vers; if (!nvap) { - nvap = &nvattr; + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + nvap = nvattr; } NVATTR_INIT(nvap); @@ -1844,8 +1859,9 @@ nfsmout: } } - if (nvap == &nvattr) { + if (nvattr != NULL) { NVATTR_CLEANUP(nvap); + FREE(nvattr, M_TEMP); } else if (!(flags & NGA_ACL)) { /* make sure we don't return an ACL if it wasn't asked for */ NFS_BITMAP_CLR(nvap->nva_bitmap, NFS_FATTR_ACL); @@ -1897,7 +1913,7 @@ nfs3_vnop_getattr( int error; nfsnode_t np; uint64_t supported_attrs; - struct nfs_vattr nva; + struct nfs_vattr *nva; struct vnode_attr *vap = ap->a_vap; struct nfsmount *nmp; dev_t rdev; @@ -1934,34 +1950,37 @@ nfs3_vnop_getattr( } } - error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, NGA_CACHED); + MALLOC(nva, struct nfs_vattr *, sizeof(*nva), M_TEMP, M_WAITOK); + error = nfs_getattr(VTONFS(ap->a_vp), nva, ap->a_context, NGA_CACHED); if (error) { - return error; + goto out; } /* copy nva to *a_vap */ - VATTR_RETURN(vap, va_type, nva.nva_type); - VATTR_RETURN(vap, va_mode, nva.nva_mode); - rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2); + VATTR_RETURN(vap, va_type, nva->nva_type); + VATTR_RETURN(vap, va_mode, nva->nva_mode); + rdev = makedev(nva->nva_rawdev.specdata1, nva->nva_rawdev.specdata2); VATTR_RETURN(vap, va_rdev, rdev); - VATTR_RETURN(vap, va_uid, nva.nva_uid); - VATTR_RETURN(vap, va_gid, nva.nva_gid); - VATTR_RETURN(vap, va_nlink, nva.nva_nlink); - VATTR_RETURN(vap, va_fileid, nva.nva_fileid); - VATTR_RETURN(vap, va_data_size, nva.nva_size); - VATTR_RETURN(vap, va_data_alloc, nva.nva_bytes); - vap->va_access_time.tv_sec = nva.nva_timesec[NFSTIME_ACCESS]; - vap->va_access_time.tv_nsec = nva.nva_timensec[NFSTIME_ACCESS]; + VATTR_RETURN(vap, va_uid, nva->nva_uid); + VATTR_RETURN(vap, va_gid, nva->nva_gid); + VATTR_RETURN(vap, va_nlink, nva->nva_nlink); + VATTR_RETURN(vap, va_fileid, nva->nva_fileid); + VATTR_RETURN(vap, va_data_size, nva->nva_size); + VATTR_RETURN(vap, va_data_alloc, nva->nva_bytes); + vap->va_access_time.tv_sec = nva->nva_timesec[NFSTIME_ACCESS]; + vap->va_access_time.tv_nsec = nva->nva_timensec[NFSTIME_ACCESS]; VATTR_SET_SUPPORTED(vap, va_access_time); - vap->va_modify_time.tv_sec = nva.nva_timesec[NFSTIME_MODIFY]; - vap->va_modify_time.tv_nsec = nva.nva_timensec[NFSTIME_MODIFY]; + vap->va_modify_time.tv_sec = nva->nva_timesec[NFSTIME_MODIFY]; + vap->va_modify_time.tv_nsec = nva->nva_timensec[NFSTIME_MODIFY]; VATTR_SET_SUPPORTED(vap, va_modify_time); - vap->va_change_time.tv_sec = nva.nva_timesec[NFSTIME_CHANGE]; - vap->va_change_time.tv_nsec = nva.nva_timensec[NFSTIME_CHANGE]; + vap->va_change_time.tv_sec = nva->nva_timesec[NFSTIME_CHANGE]; + vap->va_change_time.tv_nsec = nva->nva_timensec[NFSTIME_CHANGE]; VATTR_SET_SUPPORTED(vap, va_change_time); // VATTR_RETURN(vap, va_encoding, 0xffff /* kTextEncodingUnknown */); +out: + FREE(nva, M_TEMP); return error; } @@ -1985,7 +2004,7 @@ nfs_vnop_setattr( int error = 0; int biosize, nfsvers, namedattrs; u_quad_t origsize, vapsize; - struct nfs_dulookup dul; + struct nfs_dulookup *dul; nfsnode_t dnp = NULL; int dul_in_progress = 0; vnode_t dvp = NULL; @@ -2074,10 +2093,10 @@ restart: error = EIO; } if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) { - nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(nofp, vfs_context_thread(ctx)); nofp = NULL; if (!error) { + nfs_mount_state_in_use_end(nmp, 0); goto restart; } } @@ -2085,6 +2104,7 @@ restart: error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx)); } if (error) { + nfs_mount_state_in_use_end(nmp, 0); nfs_open_owner_rele(noop); return error; } @@ -2097,9 +2117,8 @@ restart: if (nfs_mount_state_error_should_restart(error)) { nfs_open_file_clear_busy(nofp); nofp = NULL; - if (nfs_mount_state_in_use_end(nmp, error)) { - goto restart; - } + nfs_mount_state_in_use_end(nmp, error); + goto restart; } } } @@ -2107,8 +2126,10 @@ restart: nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE); if (np->n_size > vap->va_data_size) { /* shrinking? */ daddr64_t obn, bn; - int neweofoff, mustwrite; + int mustwrite; + off_t neweofoff; struct nfsbuf *bp; + nfsbufpgs pagemask; obn = (np->n_size - 1) / biosize; bn = vap->va_data_size / biosize; @@ -2141,8 +2162,9 @@ restart: mustwrite++; } } - bp->nb_dirty &= (1 << round_page_32(neweofoff) / PAGE_SIZE) - 1; - if (bp->nb_dirty) { + nfs_buf_pgs_get_page_mask(&pagemask, round_page_64(neweofoff) / PAGE_SIZE); + nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &bp->nb_dirty); + if (nfs_buf_pgs_is_set(&bp->nb_dirty)) { mustwrite++; } if (!mustwrite) { @@ -2198,18 +2220,31 @@ restart: VATTR_IS_ACTIVE(vap, va_access_time) || (vap->va_vaflags & VA_UTIMES_NULL)) { if ((error = nfs_node_lock(np))) { +#if CONFIG_NFS4 + if (nfsvers >= NFS_VER4) { + nfs_mount_state_in_use_end(nmp, 0); + } +#endif return error; } if ((np->n_flag & NMODIFIED) && (vnode_vtype(vp) == VREG)) { nfs_node_unlock(np); error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1); if (error == EINTR) { +#if CONFIG_NFS4 + if (nfsvers >= NFS_VER4) { + nfs_mount_state_in_use_end(nmp, 0); + } +#endif return error; } } else { nfs_node_unlock(np); } } + + MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK); + if ((VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid) || VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid)) && !(error = nfs_node_lock(np))) { @@ -2224,8 +2259,8 @@ restart: vnode_put(dvp); vnode_putname(vname); } else { - nfs_dulookup_init(&dul, dnp, vname, strlen(vname), ctx); - nfs_dulookup_start(&dul, dnp, ctx); + nfs_dulookup_init(dul, dnp, vname, NFS_STRLEN_INT(vname), ctx); + nfs_dulookup_start(dul, dnp, ctx); dul_in_progress = 1; } } else { @@ -2244,12 +2279,13 @@ restart: } if (dul_in_progress) { - nfs_dulookup_finish(&dul, dnp, ctx); + nfs_dulookup_finish(dul, dnp, ctx); nfs_node_clear_busy(dnp); vnode_put(dvp); vnode_putname(vname); } + FREE(dul, M_TEMP); FSDBG_BOT(512, np->n_size, vap->va_data_size, np->n_vattr.nva_size, error); if (VATTR_IS_ACTIVE(vap, va_data_size)) { if (error && (origsize != np->n_size) && @@ -2322,7 +2358,7 @@ nfs3_setattr_rpc( vfs_context_t ctx) { struct nfsmount *nmp = NFSTONMP(np); - int error = 0, lockerror = ENOENT, status, wccpostattr = 0, nfsvers; + int error = 0, lockerror = ENOENT, status = 0, wccpostattr = 0, nfsvers; u_int64_t xid, nextxid; struct nfsm_chain nmreq, nmrep; @@ -2509,17 +2545,21 @@ nfs_vnop_lookup( struct nfsmount *nmp; mount_t mp; int nfsvers, error, busyerror = ENOENT, isdot, isdotdot, negnamecache; - u_int64_t xid; - struct nfs_vattr nvattr; - int ngflags; + u_int64_t xid = 0; + struct nfs_vattr *nvattr; + int ngflags, skipdu = 0; struct vnop_access_args naa; - fhandle_t fh; - struct nfsreq rq, *req = &rq; + fhandle_t *fh; + struct nfsreq *req; *vpp = NULLVP; dnp = VTONFS(dvp); - NVATTR_INIT(&nvattr); + + fh = zalloc(nfs_fhandle_zone); + req = zalloc_flags(nfs_req_zone, Z_WAITOK); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + NVATTR_INIT(nvattr); mp = vnode_mount(dvp); nmp = VFSTONFS(mp); @@ -2547,17 +2587,21 @@ nfs_vnop_lookup( /* cache miss */ if ((nfsvers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) { /* if rdirplus, try dir buf cache lookup */ - error = nfs_dir_buf_cache_lookup(dnp, &np, cnp, ctx, 0); + error = nfs_dir_buf_cache_lookup(dnp, &np, cnp, ctx, 0, &skipdu); if (!error && np) { /* dir buf cache hit */ *vpp = NFSTOV(np); error = -1; + } else if (skipdu) { + /* Skip lookup for du files */ + error = ENOENT; + goto error_return; } } if (error != -1) { /* cache miss */ break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case -1: /* cache hit, not really an error */ OSAddAtomic64(1, &nfsstats.lookupcache_hits); @@ -2573,7 +2617,7 @@ nfs_vnop_lookup( /* compute actual success/failure based on accessibility */ error = nfs_vnop_access(&naa); - /* FALLTHROUGH */ + OS_FALLTHROUGH; default: /* unexpected error from cache_lookup */ goto error_return; @@ -2590,7 +2634,7 @@ nfs_vnop_lookup( } } if (isdotdot || isdot) { - fh.fh_len = 0; + fh->fh_len = 0; goto found; } #if CONFIG_NFS4 @@ -2608,7 +2652,7 @@ nfs_vnop_lookup( goto error_return; } if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME) && - (cnp->cn_namelen > (int)nmp->nm_fsattr.nfsa_maxname)) { + (cnp->cn_namelen > nmp->nm_fsattr.nfsa_maxname)) { error = ENAMETOOLONG; goto error_return; } @@ -2620,11 +2664,11 @@ nfs_vnop_lookup( error = nmp->nm_funcs->nf_lookup_rpc_async(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &req); nfsmout_if(error); - error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, req, &xid, &fh, &nvattr); + error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, req, &xid, fh, nvattr); nfsmout_if(error); /* is the file handle the same as this directory's file handle? */ - isdot = NFS_CMPFH(dnp, fh.fh_data, fh.fh_len); + isdot = NFS_CMPFH(dnp, fh->fh_data, fh->fh_len); found: if (flags & ISLASTCN) { @@ -2655,13 +2699,13 @@ found: } newvp = dvp; nfs_node_lock_force(dnp); - if (fh.fh_len && (dnp->n_xid <= xid)) { - nfs_loadattrcache(dnp, &nvattr, &xid, 0); + if (fh->fh_len && (dnp->n_xid <= xid)) { + nfs_loadattrcache(dnp, nvattr, &xid, 0); } nfs_node_unlock(dnp); } else { ngflags = (cnp->cn_flags & MAKEENTRY) ? NG_MAKEENTRY : 0; - error = nfs_nget(mp, dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, ngflags, &np); + error = nfs_nget(mp, dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, ngflags, &np); if (error) { goto error_return; } @@ -2690,7 +2734,10 @@ nfsmout: nfs_node_unlock(dnp); } error_return: - NVATTR_CLEANUP(&nvattr); + NVATTR_CLEANUP(nvattr); + NFS_ZFREE(nfs_fhandle_zone, fh); + NFS_ZFREE(nfs_req_zone, req); + FREE(nvattr, M_TEMP); if (!busyerror) { nfs_node_clear_busy(dnp); } @@ -2719,11 +2766,11 @@ nfs_vnop_readlink( nfsnode_t np = VTONFS(ap->a_vp); struct nfsmount *nmp; int error = 0, nfsvers; - uint32_t buflen; + size_t buflen; uio_t uio = ap->a_uio; struct nfsbuf *bp = NULL; - struct timespec ts; - int timeo; + struct timespec ts = { .tv_sec = 0, .tv_nsec = 0 }; + long timeo = 0; if (vnode_vtype(ap->a_vp) != VLNK) { return EPERM; @@ -2763,7 +2810,7 @@ retry: } if (nfs_readlink_nocache) { - NFS_VNOP_DBG("timeo = %d ts.tv_sec = %ld need refresh = %d cached = %d\n", timeo, ts.tv_sec, + NFS_VNOP_DBG("timeo = %ld ts.tv_sec = %ld need refresh = %d cached = %d\n", timeo, ts.tv_sec, (np->n_rltim.tv_sec + timeo) < ts.tv_sec || nfs_readlink_nocache > 1, ISSET(bp->nb_flags, NB_CACHE) == NB_CACHE); /* n_rltim is synchronized by the associated nfs buf */ @@ -2793,14 +2840,18 @@ readagain: bp->nb_validoff = 0; bp->nb_validend = buflen; np->n_rltim = ts; - NFS_VNOP_DBG("readlink of %.*s\n", bp->nb_validend, (char *)bp->nb_data); + NFS_VNOP_DBG("readlink of %.*s\n", (int32_t)bp->nb_validend, (char *)bp->nb_data); } } else { - NFS_VNOP_DBG("got cached link of %.*s\n", bp->nb_validend, (char *)bp->nb_data); + NFS_VNOP_DBG("got cached link of %.*s\n", (int32_t)bp->nb_validend, (char *)bp->nb_data); } if (!error && (bp->nb_validend > 0)) { - error = uiomove(bp->nb_data, bp->nb_validend, uio); + int validend32 = bp->nb_validend > INT_MAX ? INT_MAX : (int)bp->nb_validend; + error = uiomove(bp->nb_data, validend32, uio); + if (!error && bp->nb_validend > validend32) { + error = uiomove(bp->nb_data + validend32, (int)(bp->nb_validend - validend32), uio); + } } FSDBG(531, np, bp->nb_validend, 0, error); nfs_buf_release(bp, 1); @@ -2811,11 +2862,11 @@ readagain: * Do a readlink RPC. */ int -nfs3_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx) +nfs3_readlink_rpc(nfsnode_t np, char *buf, size_t *buflenp, vfs_context_t ctx) { struct nfsmount *nmp; int error = 0, lockerror = ENOENT, nfsvers, status; - uint32_t len; + size_t len; u_int64_t xid; struct nfsm_chain nmreq, nmrep; @@ -2849,7 +2900,7 @@ nfs3_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx) } if (len >= *buflenp) { if (np->n_size && (np->n_size < *buflenp)) { - len = np->n_size; + len = (size_t)np->n_size; } else { len = *buflenp - 1; } @@ -2879,7 +2930,7 @@ nfs_read_rpc(nfsnode_t np, uio_t uio, vfs_context_t ctx) size_t nmrsize, len, retlen; user_ssize_t tsiz; off_t txoffset; - struct nfsreq rq, *req = &rq; + struct nfsreq *req; #if CONFIG_NFS4 uint32_t stategenid = 0, restart = 0; #endif @@ -2898,6 +2949,7 @@ nfs_read_rpc(nfsnode_t np, uio_t uio, vfs_context_t ctx) return EFBIG; } + req = zalloc_flags(nfs_req_zone, Z_WAITOK); while (tsiz > 0) { len = retlen = (tsiz > (user_ssize_t)nmrsize) ? nmrsize : (size_t)tsiz; FSDBG(536, np, txoffset, len, 0); @@ -2950,6 +3002,7 @@ nfs_read_rpc(nfsnode_t np, uio_t uio, vfs_context_t ctx) } } + NFS_ZFREE(nfs_req_zone, req); FSDBG_BOT(536, np, eof, uio_resid(uio), error); return error; } @@ -3001,8 +3054,8 @@ nfs3_read_rpc_async_finish( size_t *lenp, int *eofp) { - int error = 0, lockerror, nfsvers, status, eof = 0; - size_t retlen = 0; + int error = 0, lockerror, nfsvers, status = 0, eof = 0; + uint32_t retlen = 0; uint64_t xid; struct nfsmount *nmp; struct nfsm_chain nmrep; @@ -3082,8 +3135,10 @@ nfs_vnop_write( struct nfsbuf *bp; struct nfsmount *nmp = VTONMP(vp); daddr64_t lbn; - int biosize; - int n, on, error = 0; + uint32_t biosize; + int error = 0; + off_t n, on; + int n32; off_t boff, start, end; uio_t auio; char auio_buf[UIO_SIZEOF(1)]; @@ -3178,7 +3233,7 @@ nfs_vnop_write( */ struct nfsbuf *eofbp = NULL; daddr64_t eofbn = np->n_size / biosize; - int eofoff = np->n_size % biosize; + uint32_t eofoff = np->n_size % biosize; lbn = uio_offset(uio) / biosize; if (eofoff && (eofbn < lbn)) { @@ -3268,7 +3323,7 @@ again: * that's just masquerading as new written data.) */ if (bp->nb_dirtyend > 0) { - if (on > bp->nb_dirtyend || (on + n) < bp->nb_dirtyoff || bp->nb_dirty) { + if (on > bp->nb_dirtyend || (on + n) < bp->nb_dirtyoff || nfs_buf_pgs_is_set(&bp->nb_dirty)) { FSDBG(515, np, uio_offset(uio), bp, 0xd15c001); /* write/commit buffer "synchronously" */ /* (NB_STABLE indicates that data writes should be FILESYNC) */ @@ -3280,15 +3335,21 @@ again: } goto again; } - } else if (bp->nb_dirty) { - int firstpg, lastpg; - u_int32_t pagemask; + } else if (nfs_buf_pgs_is_set(&bp->nb_dirty)) { + off_t firstpg = 0, lastpg = 0; + nfsbufpgs pagemask, pagemaskand; /* calculate write range pagemask */ - firstpg = on / PAGE_SIZE; - lastpg = (on + n - 1) / PAGE_SIZE; - pagemask = ((1 << (lastpg + 1)) - 1) & ~((1 << firstpg) - 1); + if (n > 0) { + firstpg = on / PAGE_SIZE; + lastpg = (on + n - 1) / PAGE_SIZE; + nfs_buf_pgs_set_pages_between(&pagemask, firstpg, lastpg + 1); + } else { + NBPGS_ERASE(&pagemask); + } /* check if there are dirty pages outside the write range */ - if (bp->nb_dirty & ~pagemask) { + nfs_buf_pgs_bit_not(&pagemask); + nfs_buf_pgs_bit_and(&bp->nb_dirty, &pagemask, &pagemaskand); + if (nfs_buf_pgs_is_set(&pagemaskand)) { FSDBG(515, np, uio_offset(uio), bp, 0xd15c002); /* write/commit buffer "synchronously" */ /* (NB_STABLE indicates that data writes should be FILESYNC) */ @@ -3304,7 +3365,7 @@ again: /* make sure that the dirty range encompasses those pages */ if (NBPGDIRTY(bp, firstpg) || NBPGDIRTY(bp, lastpg)) { FSDBG(515, np, uio_offset(uio), bp, 0xd15c003); - bp->nb_dirtyoff = min(on, firstpg * PAGE_SIZE); + bp->nb_dirtyoff = MIN(on, firstpg * PAGE_SIZE); if (NBPGDIRTY(bp, lastpg)) { bp->nb_dirtyend = (lastpg + 1) * PAGE_SIZE; /* clip to EOF */ @@ -3337,21 +3398,20 @@ again: if ((lbn == eofbn) && ISSET(bp->nb_flags, NB_CACHE)) { /* ...check that all pages in buffer are valid */ int endpg = ((neweofoff ? neweofoff : biosize) - 1) / PAGE_SIZE; - u_int32_t pagemask; + nfsbufpgs pagemask, pagemaskand; /* pagemask only has to extend to last page being written to */ - pagemask = (1 << (endpg + 1)) - 1; + nfs_buf_pgs_get_page_mask(&pagemask, endpg + 1); FSDBG(515, 0xb1ffa001, bp->nb_valid, pagemask, 0); - if ((bp->nb_valid & pagemask) != pagemask) { + nfs_buf_pgs_bit_and(&bp->nb_valid, &pagemask, &pagemaskand); + if (!NBPGS_IS_EQUAL(&pagemaskand, &pagemask)) { /* zerofill any hole */ if (on > bp->nb_validend) { - int i; - for (i = bp->nb_validend / PAGE_SIZE; i <= (on - 1) / PAGE_SIZE; i++) { + for (off_t i = bp->nb_validend / PAGE_SIZE; i <= (on - 1) / PAGE_SIZE; i++) { NBPGVALID_SET(bp, i); } NFS_BUF_MAP(bp); FSDBG(516, bp, bp->nb_validend, on - bp->nb_validend, 0xf01e); - bzero((char *)bp->nb_data + bp->nb_validend, - on - bp->nb_validend); + NFS_BZERO((char *)bp->nb_data + bp->nb_validend, on - bp->nb_validend); } /* zerofill any trailing data in the last page */ if (neweofoff) { @@ -3404,8 +3464,7 @@ again: * page cache. */ if (!ISSET(bp->nb_flags, NB_CACHE) && (n < biosize)) { - int firstpg, lastpg, dirtypg; - int firstpgoff, lastpgoff; + off_t firstpgoff, lastpgoff, firstpg, lastpg, dirtypg; start = end = -1; firstpg = on / PAGE_SIZE; firstpgoff = on & PAGE_MASK; @@ -3473,7 +3532,7 @@ again: /* there's a dirty page in the way, so just do two reads */ /* we'll read the preceding data here */ uio_reset(auio, boff + start, UIO_SYSSPACE, UIO_READ); - uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + start), on - start); + NFS_UIO_ADDIOV(auio, CAST_USER_ADDR_T(bp->nb_data + start), on - start); error = nfs_read_rpc(np, auio, ctx); if (error) { /* couldn't read the data, so treat buffer as synchronous NOCACHE */ @@ -3493,7 +3552,7 @@ again: bp->nb_validend = on; } if ((off_t)np->n_size > boff + bp->nb_validend) { - bp->nb_validend = min(np->n_size - (boff + start), biosize); + bp->nb_validend = MIN(np->n_size - (boff + start), biosize); } /* validate any pages before the write offset */ for (; start < on / PAGE_SIZE; start += PAGE_SIZE) { @@ -3525,12 +3584,12 @@ again: * read nothing. So, just zero the buffer instead. */ FSDBG(516, bp, start, end - start, 0xd00dee00); - bzero(bp->nb_data + start, end - start); + NFS_BZERO(bp->nb_data + start, end - start); error = 0; } else { /* now we'll read the (rest of the) data */ uio_reset(auio, boff + start, UIO_SYSSPACE, UIO_READ); - uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + start), end - start); + NFS_UIO_ADDIOV(auio, CAST_USER_ADDR_T(bp->nb_data + start), end - start); error = nfs_read_rpc(np, auio, ctx); if (error) { /* couldn't read the data, so treat buffer as synchronous NOCACHE */ @@ -3551,14 +3610,14 @@ again: bp->nb_validend = end; } if ((off_t)np->n_size > boff + bp->nb_validend) { - bp->nb_validend = min(np->n_size - (boff + start), biosize); + bp->nb_validend = MIN(np->n_size - (boff + start), biosize); } /* validate any pages before the write offset's page */ - for (; start < (off_t)trunc_page_32(on); start += PAGE_SIZE) { + for (; start < (off_t)trunc_page_64(on); start += PAGE_SIZE) { NBPGVALID_SET(bp, start / PAGE_SIZE); } /* validate any pages after the range of pages being written to */ - for (; (end - 1) > (off_t)round_page_32(on + n - 1); end -= PAGE_SIZE) { + for (; (end - 1) > (off_t)round_page_64(on + n - 1); end -= PAGE_SIZE) { NBPGVALID_SET(bp, (end - 1) / PAGE_SIZE); } } @@ -3578,7 +3637,15 @@ skipread: nfs_node_unlock(np); NFS_BUF_MAP(bp); - error = uiomove((char *)bp->nb_data + on, n, uio); + if (n < 0) { + error = EINVAL; + } else { + n32 = n > INT_MAX ? INT_MAX : (int)n; + error = uiomove(bp->nb_data + on, n32, uio); + if (!error && n > n32) { + error = uiomove(bp->nb_data + on + n32, (int)(n - n32), uio); + } + } if (error) { SET(bp->nb_flags, NB_ERROR); nfs_buf_release(bp, 1); @@ -3600,8 +3667,8 @@ skipread: */ } if (bp->nb_dirtyend > 0) { - bp->nb_dirtyoff = min(on, bp->nb_dirtyoff); - bp->nb_dirtyend = max((on + n), bp->nb_dirtyend); + bp->nb_dirtyoff = MIN(on, bp->nb_dirtyoff); + bp->nb_dirtyend = MAX((on + n), bp->nb_dirtyend); } else { bp->nb_dirtyoff = on; bp->nb_dirtyend = on + n; @@ -3611,8 +3678,8 @@ skipread: bp->nb_validoff = bp->nb_dirtyoff; bp->nb_validend = bp->nb_dirtyend; } else { - bp->nb_validoff = min(bp->nb_validoff, bp->nb_dirtyoff); - bp->nb_validend = max(bp->nb_validend, bp->nb_dirtyend); + bp->nb_validoff = MIN(bp->nb_validoff, bp->nb_dirtyoff); + bp->nb_validend = MAX(bp->nb_validend, bp->nb_dirtyend); } if (!ISSET(bp->nb_flags, NB_CACHE)) { nfs_buf_normalize_valid_range(np, bp); @@ -3637,6 +3704,9 @@ skipread: if (error) { goto out; } + if (np->n_needcommitcnt >= NFS_A_LOT_OF_NEEDCOMMITS) { + nfs_flushcommits(np, 1); + } } else if (((n + on) == biosize) || (ioflag & IO_APPEND) || (ioflag & IO_NOCACHE) || ISSET(bp->nb_flags, NB_NOCACHE)) { SET(bp->nb_flags, NB_ASYNC); @@ -3655,10 +3725,6 @@ skipread: nfs_buf_write_delayed(bp); } - - if (np->n_needcommitcnt >= NFS_A_LOT_OF_NEEDCOMMITS) { - nfs_flushcommits(np, 1); - } } while (uio_resid(uio) > 0 && n > 0); out: @@ -3699,10 +3765,10 @@ nfs_write_rpc2( { struct nfsmount *nmp; int error = 0, nfsvers; - int wverfset, commit, committed; - uint64_t wverf = 0, wverf2; - size_t nmwsize, totalsize, tsiz, len, rlen; - struct nfsreq rq, *req = &rq; + int wverfset, commit = 0, committed; + uint64_t wverf = 0, wverf2 = 0; + size_t nmwsize, totalsize, tsiz, len, rlen = 0; + struct nfsreq *req; #if CONFIG_NFS4 uint32_t stategenid = 0, restart = 0; #endif @@ -3737,6 +3803,7 @@ nfs_write_rpc2( return EIO; } + req = zalloc_flags(nfs_req_zone, Z_WAITOK); while (tsiz > 0) { len = (tsiz > nmwsize) ? nmwsize : tsiz; FSDBG(537, np, uio_offset(uio), len, 0); @@ -3828,6 +3895,7 @@ nfs_write_rpc2( if (error) { uio_setresid(uio, tsiz); } + NFS_ZFREE(nfs_req_zone, req); FSDBG_BOT(537, np, committed, uio_resid(uio), error); return error; } @@ -3997,15 +4065,15 @@ nfs3_vnop_mknod( nfsnode_t np = NULL; struct nfsmount *nmp; nfsnode_t dnp = VTONFS(dvp); - struct nfs_vattr nvattr; - fhandle_t fh; - int error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0; + struct nfs_vattr *nvattr; + fhandle_t *fh; + int error = 0, lockerror = ENOENT, busyerror = ENOENT, status = 0, wccpostattr = 0; struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 }; u_int32_t rdev; u_int64_t xid = 0, dxid; int nfsvers, gotuid, gotgid; struct nfsm_chain nmreq, nmrep; - struct nfsreq rq, *req = &rq; + struct nfsreq *req; nmp = VTONMP(dvp); if (nfs_mount_gone(nmp)) { @@ -4044,6 +4112,10 @@ nfs3_vnop_mknod( nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); + fh = zalloc(nfs_fhandle_zone); + req = zalloc_flags(nfs_req_zone, Z_WAITOK); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers) + 4 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers)); @@ -4081,7 +4153,7 @@ nfs3_vnop_mknod( dnp->n_flag &= ~NNEGNCENTRIES; cache_purge_negatives(dvp); } - error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr); + error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, fh, nvattr); } if (nfsvers == NFS_VER3) { nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid); @@ -4104,8 +4176,8 @@ nfsmout: nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED); } - if (!error && fh.fh_len) { - error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); + if (!error && fh->fh_len) { + error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np); } if (!error && !np) { error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np); @@ -4118,9 +4190,9 @@ nfsmout: } if (!error && (gotuid || gotgid) && - (!newvp || nfs_getattrcache(np, &nvattr, 0) || - (gotuid && (nvattr.nva_uid != vap->va_uid)) || - (gotgid && (nvattr.nva_gid != vap->va_gid)))) { + (!newvp || nfs_getattrcache(np, nvattr, 0) || + (gotuid && (nvattr->nva_uid != vap->va_uid)) || + (gotgid && (nvattr->nva_gid != vap->va_gid)))) { /* clear ID bits if server didn't use them (or we can't tell) */ VATTR_CLEAR_SUPPORTED(vap, va_uid); VATTR_CLEAR_SUPPORTED(vap, va_gid); @@ -4134,6 +4206,9 @@ nfsmout: *vpp = newvp; nfs_node_unlock(np); } + NFS_ZFREE(nfs_fhandle_zone, fh); + NFS_ZFREE(nfs_req_zone, req); + FREE(nvattr, M_TEMP); return error; } @@ -4156,20 +4231,20 @@ nfs3_vnop_create( vnode_t dvp = ap->a_dvp; struct vnode_attr *vap = ap->a_vap; struct componentname *cnp = ap->a_cnp; - struct nfs_vattr nvattr; - fhandle_t fh; + struct nfs_vattr *nvattr; + fhandle_t *fh; nfsnode_t np = NULL; struct nfsmount *nmp; nfsnode_t dnp = VTONFS(dvp); vnode_t newvp = NULL; - int error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0, fmode = 0; + int error = 0, lockerror = ENOENT, busyerror = ENOENT, status = 0, wccpostattr = 0, fmode = 0; struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 }; int nfsvers, gotuid, gotgid; - u_int64_t xid, dxid; + u_int64_t xid = 0, dxid; uint32_t val; struct nfsm_chain nmreq, nmrep; - struct nfsreq rq, *req = &rq; - struct nfs_dulookup dul; + struct nfsreq *req; + struct nfs_dulookup *dul; int dul_in_progress = 0; int namedattrs; @@ -4203,10 +4278,15 @@ nfs3_vnop_create( } } + fh = zalloc(nfs_fhandle_zone); + req = zalloc_flags(nfs_req_zone, Z_WAITOK); + MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + again: error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx)); if (!namedattrs) { - nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); + nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); } nfsm_chain_null(&nmreq); @@ -4244,7 +4324,7 @@ again: vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); if (!error) { if (!namedattrs) { - nfs_dulookup_start(&dul, dnp, ctx); + nfs_dulookup_start(dul, dnp, ctx); dul_in_progress = 1; } error = nfs_request_async_finish(req, &nmrep, &xid, &status); @@ -4259,7 +4339,7 @@ again: dnp->n_flag &= ~NNEGNCENTRIES; cache_purge_negatives(dvp); } - error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr); + error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, fh, nvattr); } if (nfsvers == NFS_VER3) { nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid); @@ -4282,8 +4362,8 @@ nfsmout: nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED); } - if (!error && fh.fh_len) { - error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); + if (!error && fh->fh_len) { + error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np); } if (!error && !np) { error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np); @@ -4293,7 +4373,7 @@ nfsmout: } if (dul_in_progress) { - nfs_dulookup_finish(&dul, dnp, ctx); + nfs_dulookup_finish(dul, dnp, ctx); } if (!busyerror) { nfs_node_clear_busy(dnp); @@ -4328,9 +4408,9 @@ nfsmout: *ap->a_vpp = newvp; } if (!error && (gotuid || gotgid) && - (!newvp || nfs_getattrcache(np, &nvattr, 0) || - (gotuid && (nvattr.nva_uid != vap->va_uid)) || - (gotgid && (nvattr.nva_gid != vap->va_gid)))) { + (!newvp || nfs_getattrcache(np, nvattr, 0) || + (gotuid && (nvattr->nva_uid != vap->va_uid)) || + (gotgid && (nvattr->nva_gid != vap->va_gid)))) { /* clear ID bits if server didn't use them (or we can't tell) */ VATTR_CLEAR_SUPPORTED(vap, va_uid); VATTR_CLEAR_SUPPORTED(vap, va_gid); @@ -4338,6 +4418,10 @@ nfsmout: if (!error) { nfs_node_unlock(np); } + NFS_ZFREE(nfs_fhandle_zone, fh); + NFS_ZFREE(nfs_req_zone, req); + FREE(dul, M_TEMP); + FREE(nvattr, M_TEMP); return error; } @@ -4370,9 +4454,9 @@ nfs_vnop_remove( nfsnode_t dnp = VTONFS(dvp); nfsnode_t np = VTONFS(vp); int error = 0, nfsvers, namedattrs, inuse, gotattr = 0, flushed = 0, setsize = 0; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; struct nfsmount *nmp; - struct nfs_dulookup dul; + struct nfs_dulookup *dul; /* XXX prevent removing a sillyrenamed file? */ @@ -4380,13 +4464,20 @@ nfs_vnop_remove( if (nfs_mount_gone(nmp)) { return ENXIO; } + + if (vnode_isdir(vp)) { + return EPERM; + } + nfsvers = nmp->nm_vers; namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR); + MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); again_relock: error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)); if (error) { - return error; + goto out_free; } /* lock the node while we remove the file */ @@ -4399,8 +4490,9 @@ again_relock: lck_mtx_unlock(nfs_node_hash_mutex); if (!namedattrs) { - nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); + nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); } + again: inuse = vnode_isinuse(vp, 0); if ((ap->a_flags & VNODE_REMOVE_NODELETEBUSY) && inuse) { @@ -4409,13 +4501,13 @@ again: goto out; } if (inuse && !gotattr) { - if (nfs_getattr(np, &nvattr, ctx, NGA_CACHED)) { - nvattr.nva_nlink = 1; + if (nfs_getattr(np, nvattr, ctx, NGA_CACHED)) { + nvattr->nva_nlink = 1; } gotattr = 1; goto again; } - if (!inuse || (np->n_sillyrename && (nvattr.nva_nlink > 1))) { + if (!inuse || (np->n_sillyrename && (nvattr->nva_nlink > 1))) { if (!inuse && !flushed) { /* flush all the buffers first */ /* unlock the node */ lck_mtx_lock(nfs_node_hash_mutex); @@ -4433,10 +4525,10 @@ again: nfs_node_lock_force(np); NATTRINVALIDATE(np); nfs_node_unlock(np); - return error; + goto out_free; } if (!namedattrs) { - nfs_dulookup_finish(&dul, dnp, ctx); + nfs_dulookup_finish(dul, dnp, ctx); } goto again_relock; } @@ -4453,7 +4545,7 @@ again: nfs_name_cache_purge(dnp, np, cnp, ctx); if (!namedattrs) { - nfs_dulookup_start(&dul, dnp, ctx); + nfs_dulookup_start(dul, dnp, ctx); } /* Do the rpc */ @@ -4499,7 +4591,7 @@ again: } } else if (!np->n_sillyrename) { if (!namedattrs) { - nfs_dulookup_start(&dul, dnp, ctx); + nfs_dulookup_start(dul, dnp, ctx); } error = nfs_sillyrename(dnp, np, cnp, ctx); nfs_node_lock_force(np); @@ -4510,14 +4602,14 @@ again: NATTRINVALIDATE(np); nfs_node_unlock(np); if (!namedattrs) { - nfs_dulookup_start(&dul, dnp, ctx); + nfs_dulookup_start(dul, dnp, ctx); } } /* nfs_getattr() will check changed and purge caches */ nfs_getattr(dnp, NULL, ctx, NGA_CACHED); if (!namedattrs) { - nfs_dulookup_finish(&dul, dnp, ctx); + nfs_dulookup_finish(dul, dnp, ctx); } out: /* unlock the node */ @@ -4532,6 +4624,9 @@ out: if (setsize) { ubc_setsize(vp, 0); } +out_free: + FREE(dul, M_TEMP); + FREE(nvattr, M_TEMP); return error; } @@ -4559,7 +4654,7 @@ nfs3_remove_rpc( thread_t thd, kauth_cred_t cred) { - int error = 0, lockerror = ENOENT, status, wccpostattr = 0; + int error = 0, lockerror = ENOENT, status = 0, wccpostattr = 0; struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 }; struct nfsmount *nmp; int nfsvers; @@ -4640,7 +4735,7 @@ nfs_vnop_rename( struct componentname *fcnp = ap->a_fcnp; int error, nfsvers, inuse = 0, tvprecycle = 0, locked = 0; mount_t fmp, tdmp, tmp; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; struct nfsmount *nmp; fdnp = VTONFS(fdvp); @@ -4659,6 +4754,8 @@ nfs_vnop_rename( return error; } + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + if (tvp && (tvp != fvp)) { /* lock the node while we rename over the existing file */ lck_mtx_lock(nfs_node_hash_mutex); @@ -4720,7 +4817,7 @@ nfs_vnop_rename( if (tvp && (tvp != fvp) && !tnp->n_sillyrename) { nfs_node_lock_force(tnp); tvprecycle = (!error && !vnode_isinuse(tvp, 0) && - (nfs_getattrcache(tnp, &nvattr, 0) || (nvattr.nva_nlink == 1))); + (nfs_getattrcache(tnp, nvattr, 0) || (nvattr->nva_nlink == 1))); nfs_node_unlock(tnp); lck_mtx_lock(nfs_node_hash_mutex); if (tvprecycle && (tnp->n_hflag & NHHASHED)) { @@ -4790,6 +4887,7 @@ out: lck_mtx_unlock(nfs_node_hash_mutex); } nfs_node_clear_busy4(fdnp, fnp, tdnp, tnp); + FREE(nvattr, M_TEMP); return error; } @@ -4806,7 +4904,7 @@ nfs3_rename_rpc( int tnamelen, vfs_context_t ctx) { - int error = 0, lockerror = ENOENT, status, fwccpostattr = 0, twccpostattr = 0; + int error = 0, lockerror = ENOENT, status = 0, fwccpostattr = 0, twccpostattr = 0; struct timespec fpremtime = { .tv_sec = 0, .tv_nsec = 0 }, tpremtime = { .tv_sec = 0, .tv_nsec = 0 }; struct nfsmount *nmp; int nfsvers; @@ -4891,7 +4989,7 @@ nfs3_vnop_link( vnode_t vp = ap->a_vp; vnode_t tdvp = ap->a_tdvp; struct componentname *cnp = ap->a_cnp; - int error = 0, lockerror = ENOENT, status, wccpostattr = 0, attrflag = 0; + int error = 0, lockerror = ENOENT, status = 0, wccpostattr = 0, attrflag = 0; struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 }; struct nfsmount *nmp; nfsnode_t np = VTONFS(vp); @@ -4999,9 +5097,10 @@ nfs3_vnop_symlink( vnode_t dvp = ap->a_dvp; struct vnode_attr *vap = ap->a_vap; struct componentname *cnp = ap->a_cnp; - struct nfs_vattr nvattr; - fhandle_t fh; - int slen, error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0; + struct nfs_vattr *nvattr; + fhandle_t *fh; + int error = 0, lockerror = ENOENT, busyerror = ENOENT, status = 0, wccpostattr = 0; + size_t slen; struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 }; vnode_t newvp = NULL; int nfsvers, gotuid, gotgid; @@ -5010,8 +5109,8 @@ nfs3_vnop_symlink( nfsnode_t dnp = VTONFS(dvp); struct nfsmount *nmp; struct nfsm_chain nmreq, nmrep; - struct nfsreq rq, *req = &rq; - struct nfs_dulookup dul; + struct nfsreq *req; + struct nfs_dulookup *dul; int namedattrs; int dul_in_progress = 0; @@ -5039,9 +5138,14 @@ nfs3_vnop_symlink( gotuid = VATTR_IS_ACTIVE(vap, va_uid); gotgid = VATTR_IS_ACTIVE(vap, va_gid); + fh = zalloc(nfs_fhandle_zone); + req = zalloc_flags(nfs_req_zone, Z_WAITOK); + MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx)); if (!namedattrs) { - nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); + nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); } nfsm_chain_null(&nmreq); @@ -5066,7 +5170,7 @@ nfs3_vnop_symlink( vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); if (!error) { if (!namedattrs) { - nfs_dulookup_start(&dul, dnp, ctx); + nfs_dulookup_start(dul, dnp, ctx); dul_in_progress = 1; } error = nfs_request_async_finish(req, &nmrep, &xid, &status); @@ -5082,9 +5186,9 @@ nfs3_vnop_symlink( cache_purge_negatives(dvp); } if (nfsvers == NFS_VER3) { - error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr); + error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, fh, nvattr); } else { - fh.fh_len = 0; + fh->fh_len = 0; } } if (nfsvers == NFS_VER3) { @@ -5108,15 +5212,15 @@ nfsmout: nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED); } - if (!error && fh.fh_len) { - error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); + if (!error && fh->fh_len) { + error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np); } if (!error && np) { newvp = NFSTOV(np); } if (dul_in_progress) { - nfs_dulookup_finish(&dul, dnp, ctx); + nfs_dulookup_finish(dul, dnp, ctx); } /* @@ -5141,9 +5245,9 @@ nfsmout: nfs_node_clear_busy(dnp); } if (!error && (gotuid || gotgid) && - (!newvp || nfs_getattrcache(np, &nvattr, 0) || - (gotuid && (nvattr.nva_uid != vap->va_uid)) || - (gotgid && (nvattr.nva_gid != vap->va_gid)))) { + (!newvp || nfs_getattrcache(np, nvattr, 0) || + (gotuid && (nvattr->nva_uid != vap->va_uid)) || + (gotgid && (nvattr->nva_gid != vap->va_gid)))) { /* clear ID bits if server didn't use them (or we can't tell) */ VATTR_CLEAR_SUPPORTED(vap, va_uid); VATTR_CLEAR_SUPPORTED(vap, va_gid); @@ -5157,6 +5261,10 @@ nfsmout: nfs_node_unlock(np); *ap->a_vpp = newvp; } + NFS_ZFREE(nfs_fhandle_zone, fh); + NFS_ZFREE(nfs_req_zone, req); + FREE(dul, M_TEMP); + FREE(nvattr, M_TEMP); return error; } @@ -5178,19 +5286,19 @@ nfs3_vnop_mkdir( vnode_t dvp = ap->a_dvp; struct vnode_attr *vap = ap->a_vap; struct componentname *cnp = ap->a_cnp; - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; nfsnode_t np = NULL; struct nfsmount *nmp; nfsnode_t dnp = VTONFS(dvp); vnode_t newvp = NULL; - int error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0; + int error = 0, lockerror = ENOENT, busyerror = ENOENT, status = 0, wccpostattr = 0; struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 }; int nfsvers, gotuid, gotgid; u_int64_t xid = 0, dxid; - fhandle_t fh; + fhandle_t *fh; struct nfsm_chain nmreq, nmrep; - struct nfsreq rq, *req = &rq; - struct nfs_dulookup dul; + struct nfsreq *req; + struct nfs_dulookup *dul; int namedattrs; int dul_in_progress = 0; @@ -5216,9 +5324,14 @@ nfs3_vnop_mkdir( gotuid = VATTR_IS_ACTIVE(vap, va_uid); gotgid = VATTR_IS_ACTIVE(vap, va_gid); + fh = zalloc(nfs_fhandle_zone); + req = zalloc_flags(nfs_req_zone, Z_WAITOK); + MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx)); if (!namedattrs) { - nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); + nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); } nfsm_chain_null(&nmreq); @@ -5241,7 +5354,7 @@ nfs3_vnop_mkdir( vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); if (!error) { if (!namedattrs) { - nfs_dulookup_start(&dul, dnp, ctx); + nfs_dulookup_start(dul, dnp, ctx); dul_in_progress = 1; } error = nfs_request_async_finish(req, &nmrep, &xid, &status); @@ -5256,7 +5369,7 @@ nfs3_vnop_mkdir( dnp->n_flag &= ~NNEGNCENTRIES; cache_purge_negatives(dvp); } - error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr); + error = nfsm_chain_get_fh_attr(nmp, &nmrep, dnp, ctx, nfsvers, &xid, fh, nvattr); } if (nfsvers == NFS_VER3) { nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid); @@ -5279,15 +5392,15 @@ nfsmout: nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED); } - if (!error && fh.fh_len) { - error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); + if (!error && fh->fh_len) { + error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np); } if (!error && np) { newvp = NFSTOV(np); } if (dul_in_progress) { - nfs_dulookup_finish(&dul, dnp, ctx); + nfs_dulookup_finish(dul, dnp, ctx); } /* @@ -5312,9 +5425,9 @@ nfsmout: nfs_node_clear_busy(dnp); } if (!error && (gotuid || gotgid) && - (!newvp || nfs_getattrcache(np, &nvattr, 0) || - (gotuid && (nvattr.nva_uid != vap->va_uid)) || - (gotgid && (nvattr.nva_gid != vap->va_gid)))) { + (!newvp || nfs_getattrcache(np, nvattr, 0) || + (gotuid && (nvattr->nva_uid != vap->va_uid)) || + (gotgid && (nvattr->nva_gid != vap->va_gid)))) { /* clear ID bits if server didn't use them (or we can't tell) */ VATTR_CLEAR_SUPPORTED(vap, va_uid); VATTR_CLEAR_SUPPORTED(vap, va_gid); @@ -5328,6 +5441,10 @@ nfsmout: nfs_node_unlock(np); *ap->a_vpp = newvp; } + NFS_ZFREE(nfs_fhandle_zone, fh); + NFS_ZFREE(nfs_req_zone, req); + FREE(dul, M_TEMP); + FREE(nvattr, M_TEMP); return error; } @@ -5348,7 +5465,7 @@ nfs3_vnop_rmdir( vnode_t vp = ap->a_vp; vnode_t dvp = ap->a_dvp; struct componentname *cnp = ap->a_cnp; - int error = 0, lockerror = ENOENT, status, wccpostattr = 0; + int error = 0, lockerror = ENOENT, status = 0, wccpostattr = 0; struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 }; struct nfsmount *nmp; nfsnode_t np = VTONFS(vp); @@ -5356,8 +5473,8 @@ nfs3_vnop_rmdir( int nfsvers; u_int64_t xid; struct nfsm_chain nmreq, nmrep; - struct nfsreq rq, *req = &rq; - struct nfs_dulookup dul; + struct nfsreq *req; + struct nfs_dulookup *dul; int namedattrs; int dul_in_progress = 0; @@ -5376,8 +5493,11 @@ nfs3_vnop_rmdir( return error; } + req = zalloc_flags(nfs_req_zone, Z_WAITOK); + MALLOC(dul, struct nfs_dulookup *, sizeof(*dul), M_TEMP, M_WAITOK); + if (!namedattrs) { - nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); + nfs_dulookup_init(dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); } nfsm_chain_null(&nmreq); @@ -5394,7 +5514,7 @@ nfs3_vnop_rmdir( vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); if (!error) { if (!namedattrs) { - nfs_dulookup_start(&dul, dnp, ctx); + nfs_dulookup_start(dul, dnp, ctx); dul_in_progress = 1; } error = nfs_request_async_finish(req, &nmrep, &xid, &status); @@ -5425,7 +5545,7 @@ nfsmout: nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED); } if (dul_in_progress) { - nfs_dulookup_finish(&dul, dnp, ctx); + nfs_dulookup_finish(dul, dnp, ctx); } nfs_node_clear_busy2(dnp, np); @@ -5449,6 +5569,8 @@ nfsmout: } lck_mtx_unlock(nfs_node_hash_mutex); } + NFS_ZFREE(nfs_req_zone, req); + FREE(dul, M_TEMP); return error; } @@ -5491,7 +5613,8 @@ nfs_vnop_readdir( nfsnode_t dnp = VTONFS(dvp); struct nfsmount *nmp; uio_t uio = ap->a_uio; - int error, nfsvers, extended, numdirent, bigcookies, ptc, done, attrcachetimeout; + int error, nfsvers, extended, numdirent, bigcookies, ptc, done; + long attrcachetimeout; uint16_t i, iptc, rlen, nlen; uint64_t cookie, nextcookie, lbn = 0; struct nfsbuf *bp = NULL; @@ -5679,9 +5802,9 @@ getbuffer: } rlen = NFS_DIRENT_LEN(nlen); dent.d_reclen = rlen; - dent.d_ino = dp->d_ino; + dent.d_ino = (ino_t)dp->d_ino; dent.d_type = dp->d_type; - dent.d_namlen = nlen; + dent.d_namlen = (uint8_t)nlen; strlcpy(dent.d_name, dp->d_name, nlen + 1); } /* check that the record fits */ @@ -5753,11 +5876,11 @@ nfs_invaldir(nfsnode_t dnp) /* * calculate how much space is available for additional directory entries. */ -uint32_t +uint64_t nfs_dir_buf_freespace(struct nfsbuf *bp, int rdirplus) { struct nfs_dir_buf_header *ndbhp = (struct nfs_dir_buf_header*)bp->nb_data; - uint32_t space; + uint64_t space; if (!ndbhp) { return 0; @@ -5794,13 +5917,7 @@ nfs_dir_cookie_cache(nfsnode_t dnp, uint64_t cookie, uint64_t lbn) ndcc = dnp->n_cookiecache; if (!ndcc) { /* allocate the cookie cache structure */ - MALLOC_ZONE(dnp->n_cookiecache, struct nfsdmap *, - sizeof(struct nfsdmap), M_NFSDIROFF, M_WAITOK); - if (!dnp->n_cookiecache) { - nfs_node_unlock(dnp); - return; - } - ndcc = dnp->n_cookiecache; + ndcc = dnp->n_cookiecache = zalloc(ZV_NFSDIROFF); ndcc->free = 0; ndcc->mru = -1; memset(ndcc->next, -1, NFSNUMCOOKIES); @@ -6027,7 +6144,7 @@ nfs_dir_buf_search( for (i = 0; i < ndbhp->ndbh_count; i++) { nextlbn = dp->d_seekoff; if ((cnp->cn_namelen == dp->d_namlen) && !strcmp(cnp->cn_nameptr, dp->d_name)) { - fhlen = dp->d_name[dp->d_namlen + 1]; + fhlen = (uint8_t)dp->d_name[dp->d_namlen + 1]; nvattrp = NFS_DIR_BUF_NVATTR(bp, i); if ((ndbhp->ndbh_ncgen != bp->nb_np->n_ncgen) || (fhlen == 0) || (nvattrp->nva_type == VNON) || (nvattrp->nva_fileid == 0)) { @@ -6047,6 +6164,7 @@ nfs_dir_buf_search( bcopy(nvap, nvattrp, sizeof(*nvap)); dp->d_fileno = nvattrp->nva_fileid; nvattrp->nva_fileid = *xidp; + nvap->nva_flags |= NFS_FFLAG_FILEID_CONTAINS_XID; *(time_t*)(&dp->d_name[dp->d_namlen + 2 + fhp->fh_len]) = *attrstampp; } error = 0; @@ -6059,6 +6177,7 @@ nfs_dir_buf_search( bcopy(nvattrp, nvap, sizeof(*nvap)); *xidp = nvap->nva_fileid; nvap->nva_fileid = dp->d_fileno; + nvap->nva_flags &= ~NFS_FFLAG_FILEID_CONTAINS_XID; error = 0; break; } @@ -6075,14 +6194,14 @@ nfs_dir_buf_search( * Note: should only be called with RDIRPLUS directory buffers */ int -nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cnp, vfs_context_t ctx, int purge) +nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cnp, vfs_context_t ctx, int purge, int *skipdu) { nfsnode_t newnp; struct nfsmount *nmp; int error = 0, i, found = 0, count = 0; u_int64_t xid; - struct nfs_vattr nvattr; - fhandle_t fh; + struct nfs_vattr *nvattr; + fhandle_t *fh; time_t attrstamp = 0; thread_t thd = vfs_context_thread(ctx); struct nfsbuf *bp, *lastbp, *foundbp; @@ -6091,6 +6210,7 @@ nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cn int dotunder = (cnp->cn_namelen > 2) && (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == '_'); int isdot = (cnp->cn_namelen == 1) && (cnp->cn_nameptr[0] == '.'); int isdotdot = (cnp->cn_namelen == 2) && (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == '.'); + int eof = 0, sof = 0, skipped = 0; nmp = NFSTONMP(dnp); if (nfs_mount_gone(nmp)) { @@ -6104,17 +6224,22 @@ nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cn return 0; } + fh = zalloc(nfs_fhandle_zone); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + /* first check most recent buffer (and next one too) */ lbn = dnp->n_lastdbl; for (i = 0; i < 2; i++) { if ((error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ | NBLK_ONLYVALID, &bp))) { - return error; + goto out; } if (!bp) { + skipped = 1; break; } count++; - error = nfs_dir_buf_search(bp, cnp, &fh, &nvattr, &xid, &attrstamp, &nextlbn, purge ? NDBS_PURGE : 0); + nfs_dir_buf_cache_lookup_boundaries(bp, &sof, &eof); + error = nfs_dir_buf_search(bp, cnp, fh, nvattr, &xid, &attrstamp, &nextlbn, purge ? NDBS_PURGE : 0); nfs_buf_release(bp, 0); if (error == ESRCH) { error = 0; @@ -6131,6 +6256,11 @@ nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cn goto done; } + /* If we detect that we fetched full directory listing we should avoid sending lookups for ._ files */ + if (dotunder && !found && !error && eof && sof && !skipped && skipdu) { + *skipdu = 1; + } + /* * Scan the list of buffers, keeping them in order. * Note that itercomplete inserts each of the remaining buffers @@ -6140,6 +6270,7 @@ nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cn * Also note: the LIST_INSERT_AFTER(lastbp) is only safe because * we don't drop nfs_buf_mutex. */ + eof = sof = skipped = 0; if (!nfs_buf_iterprepare(dnp, &blist, NBI_CLEAN)) { lastbp = foundbp = NULL; while ((bp = LIST_FIRST(&blist))) { @@ -6151,9 +6282,11 @@ nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cn } lastbp = bp; if (error || found) { + skipped = 1; continue; } if (!purge && dotunder && (count > 100)) { /* don't waste too much time looking for ._ files */ + skipped = 1; continue; } nfs_buf_refget(bp); @@ -6161,11 +6294,13 @@ nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cn if (nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0)) { /* just skip this buffer */ nfs_buf_refrele(bp); + skipped = 1; continue; } nfs_buf_refrele(bp); count++; - error = nfs_dir_buf_search(bp, cnp, &fh, &nvattr, &xid, &attrstamp, NULL, purge ? NDBS_PURGE : 0); + nfs_dir_buf_cache_lookup_boundaries(bp, &sof, &eof); + error = nfs_dir_buf_search(bp, cnp, fh, nvattr, &xid, &attrstamp, NULL, purge ? NDBS_PURGE : 0); if (error == ESRCH) { error = 0; } else { @@ -6181,33 +6316,43 @@ nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cn } nfs_buf_itercomplete(dnp, &blist, NBI_CLEAN); } + + /* If we detect that we fetched full directory listing we should avoid sending lookups for ._ files */ + if (dotunder && !found && !error && eof && sof && !skipped && skipdu) { + *skipdu = 1; + } + done: lck_mtx_unlock(nfs_buf_mutex); if (!error && found && !purge) { - error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, - &nvattr, &xid, dnp->n_auth, NG_MAKEENTRY, &newnp); + error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, + fh->fh_len, nvattr, &xid, dnp->n_auth, NG_MAKEENTRY, + &newnp); if (error) { - return error; + goto out; } newnp->n_attrstamp = attrstamp; *npp = newnp; nfs_node_unlock(newnp); /* check if the dir buffer's attrs are out of date */ - if (!nfs_getattr(newnp, &nvattr, ctx, NGA_CACHED) && + if (!nfs_getattr(newnp, nvattr, ctx, NGA_CACHED) && (newnp->n_attrstamp != attrstamp)) { /* they are, so update them */ error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ | NBLK_ONLYVALID, &bp); if (!error && bp) { attrstamp = newnp->n_attrstamp; xid = newnp->n_xid; - nfs_dir_buf_search(bp, cnp, &fh, &nvattr, &xid, &attrstamp, NULL, NDBS_UPDATE); + nfs_dir_buf_search(bp, cnp, fh, nvattr, &xid, &attrstamp, NULL, NDBS_UPDATE); nfs_buf_release(bp, 0); } error = 0; } } +out: + NFS_ZFREE(nfs_fhandle_zone, fh); + FREE(nvattr, M_TEMP); return error; } @@ -6222,7 +6367,7 @@ nfs_name_cache_purge(nfsnode_t dnp, nfsnode_t np, struct componentname *cnp, vfs cache_purge(NFSTOV(np)); if (nmp && (nmp->nm_vers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) { - nfs_dir_buf_cache_lookup(dnp, NULL, cnp, ctx, 1); + nfs_dir_buf_cache_lookup(dnp, NULL, cnp, ctx, 1, NULL); } } @@ -6234,17 +6379,19 @@ nfs3_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) { struct nfsmount *nmp; int error = 0, lockerror, nfsvers, rdirplus, bigcookies; - int i, status, attrflag, fhflag, more_entries = 1, eof, bp_dropped = 0; + int i, status = 0, attrflag, fhflag, more_entries = 1, eof, bp_dropped = 0; uint32_t nmreaddirsize, nmrsize; - uint32_t namlen, skiplen, fhlen, xlen, attrlen, reclen, space_free, space_needed; - uint64_t cookie, lastcookie, xid, savedxid, fileno; + uint32_t namlen, skiplen, fhlen, xlen, attrlen; + uint64_t cookie, lastcookie, xid, savedxid, fileno, space_free, space_needed; struct nfsm_chain nmreq, nmrep, nmrepsave; - fhandle_t fh; + fhandle_t *fh; struct nfs_vattr *nvattrp; struct nfs_dir_buf_header *ndbhp; struct direntry *dp; - char *padstart, padlen; + char *padstart; struct timeval now; + uint16_t reclen; + size_t padlen; nmp = NFSTONMP(dnp); if (nfs_mount_gone(nmp)) { @@ -6254,10 +6401,12 @@ nfs3_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) nmreaddirsize = nmp->nm_readdirsize; nmrsize = nmp->nm_rsize; bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES; + fh = zalloc(nfs_fhandle_zone); noplus: rdirplus = ((nfsvers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) ? 1 : 0; if ((lockerror = nfs_node_lock(dnp))) { + NFS_ZFREE(nfs_fhandle_zone, fh); return lockerror; } @@ -6370,7 +6519,7 @@ noplus: fhlen = rdirplus ? (1 + dnp->n_fhsize) : 0; xlen = rdirplus ? (fhlen + sizeof(time_t)) : 0; attrlen = rdirplus ? sizeof(struct nfs_vattr) : 0; - reclen = NFS_DIRENTRY_LEN(namlen + xlen); + reclen = NFS_DIRENTRY_LEN_16(namlen + xlen); space_needed = reclen + attrlen; space_free = nfs_dir_buf_freespace(bp, rdirplus); if (space_needed > space_free) { @@ -6401,7 +6550,7 @@ nextbuffer: } nmrepsave = nmrep; dp->d_fileno = fileno; - dp->d_namlen = namlen; + dp->d_namlen = (uint16_t)namlen; dp->d_reclen = reclen; dp->d_type = DT_UNKNOWN; nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name); @@ -6437,6 +6586,7 @@ nextbuffer: dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type)); /* fileid is already in d_fileno, so stash xid in attrs */ nvattrp->nva_fileid = savedxid; + nvattrp->nva_flags |= NFS_FFLAG_FILEID_CONTAINS_XID; } else { /* mark the attributes invalid */ bzero(nvattrp, sizeof(struct nfs_vattr)); @@ -6445,11 +6595,11 @@ nextbuffer: nfsm_chain_get_32(error, &nmrep, fhflag); nfsmout_if(error); if (fhflag) { - nfsm_chain_get_fh(error, &nmrep, NFS_VER3, &fh); + nfsm_chain_get_fh(error, &nmrep, NFS_VER3, fh); nfsmout_if(error); - fhlen = fh.fh_len + 1; + fhlen = fh->fh_len + 1; xlen = fhlen + sizeof(time_t); - reclen = NFS_DIRENTRY_LEN(namlen + xlen); + reclen = NFS_DIRENTRY_LEN_16(namlen + xlen); space_needed = reclen + attrlen; if (space_needed > space_free) { /* didn't actually have the room... move on to next buffer */ @@ -6457,19 +6607,19 @@ nextbuffer: goto nextbuffer; } /* pack the file handle into the record */ - dp->d_name[dp->d_namlen + 1] = fh.fh_len; - bcopy(fh.fh_data, &dp->d_name[dp->d_namlen + 2], fh.fh_len); + dp->d_name[dp->d_namlen + 1] = (unsigned char)fh->fh_len; /* No truncation because fh_len's value is checked during nfsm_chain_get_fh() */ + bcopy(fh->fh_data, &dp->d_name[dp->d_namlen + 2], fh->fh_len); } else { /* mark the file handle invalid */ - fh.fh_len = 0; - fhlen = fh.fh_len + 1; + fh->fh_len = 0; + fhlen = fh->fh_len + 1; xlen = fhlen + sizeof(time_t); - reclen = NFS_DIRENTRY_LEN(namlen + xlen); + reclen = NFS_DIRENTRY_LEN_16(namlen + xlen); bzero(&dp->d_name[dp->d_namlen + 1], fhlen); } *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec; dp->d_reclen = reclen; - nfs_rdirplus_update_node_attrs(dnp, dp, &fh, nvattrp, &savedxid); + nfs_rdirplus_update_node_attrs(dnp, dp, fh, nvattrp, &savedxid); } padstart = dp->d_name + dp->d_namlen + 1 + xlen; ndbhp->ndbh_count++; @@ -6521,6 +6671,7 @@ nfsmout: } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); + NFS_ZFREE(nfs_fhandle_zone, fh); return bp_dropped ? NFSERR_DIRBUFDROPPED : error; } @@ -6548,7 +6699,7 @@ nfs_sillyrename( { struct nfs_sillyrename *nsp; int error; - short pid; + pid_t pid; kauth_cred_t cred; uint32_t num; struct nfsmount *nmp; @@ -6560,8 +6711,8 @@ nfs_sillyrename( nfs_name_cache_purge(dnp, np, cnp, ctx); - MALLOC_ZONE(nsp, struct nfs_sillyrename *, - sizeof(struct nfs_sillyrename), M_NFSREQ, M_WAITOK); + MALLOC(nsp, struct nfs_sillyrename *, + sizeof(struct nfs_sillyrename), M_TEMP, M_WAITOK); if (!nsp) { return ENOMEM; } @@ -6623,7 +6774,7 @@ bad: bad_norele: nsp->nsr_cred = NOCRED; kauth_cred_unref(&cred); - FREE_ZONE(nsp, sizeof(*nsp), M_NFSREQ); + FREE(nsp, M_TEMP); return error; } @@ -6671,7 +6822,7 @@ nfs3_lookup_rpc_async_finish( fhandle_t *fhp, struct nfs_vattr *nvap) { - int error = 0, lockerror = ENOENT, status, nfsvers, attrflag; + int error = 0, lockerror = ENOENT, status = 0, nfsvers, attrflag; u_int64_t xid; struct nfsmount *nmp; struct nfsm_chain nmrep; @@ -6742,10 +6893,10 @@ nfs_lookitup( int error = 0; nfsnode_t np, newnp = NULL; u_int64_t xid; - fhandle_t fh; + fhandle_t *fh; struct nfsmount *nmp; - struct nfs_vattr nvattr; - struct nfsreq rq, *req = &rq; + struct nfs_vattr *nvattr; + struct nfsreq *req; nmp = NFSTONMP(dnp); if (nfs_mount_gone(nmp)) { @@ -6753,31 +6904,34 @@ nfs_lookitup( } if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME) && - (namelen > (int)nmp->nm_fsattr.nfsa_maxname)) { + (namelen > nmp->nm_fsattr.nfsa_maxname)) { return ENAMETOOLONG; } - NVATTR_INIT(&nvattr); + fh = zalloc(nfs_fhandle_zone); + req = zalloc_flags(nfs_req_zone, Z_WAITOK); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + NVATTR_INIT(nvattr); /* check for lookup of "." */ if ((name[0] == '.') && (namelen == 1)) { /* skip lookup, we know who we are */ - fh.fh_len = 0; + fh->fh_len = 0; newnp = dnp; goto nfsmout; } error = nmp->nm_funcs->nf_lookup_rpc_async(dnp, name, namelen, ctx, &req); nfsmout_if(error); - error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, name, namelen, ctx, req, &xid, &fh, &nvattr); + error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, name, namelen, ctx, req, &xid, fh, nvattr); nfsmout_if(!npp || error); if (*npp) { np = *npp; - if (fh.fh_len != np->n_fhsize) { + if (fh->fh_len != np->n_fhsize) { u_char *oldbuf = (np->n_fhsize > NFS_SMALLFH) ? np->n_fhp : NULL; - if (fh.fh_len > NFS_SMALLFH) { - MALLOC_ZONE(np->n_fhp, u_char *, fh.fh_len, M_NFSBIGFH, M_WAITOK); + if (fh->fh_len > NFS_SMALLFH) { + MALLOC(np->n_fhp, u_char *, fh->fh_len, M_NFSBIGFH, M_WAITOK); if (!np->n_fhp) { np->n_fhp = oldbuf; error = ENOMEM; @@ -6787,20 +6941,20 @@ nfs_lookitup( np->n_fhp = &np->n_fh[0]; } if (oldbuf) { - FREE_ZONE(oldbuf, np->n_fhsize, M_NFSBIGFH); + FREE(oldbuf, M_NFSBIGFH); } } - bcopy(fh.fh_data, np->n_fhp, fh.fh_len); - np->n_fhsize = fh.fh_len; + bcopy(fh->fh_data, np->n_fhp, fh->fh_len); + np->n_fhsize = fh->fh_len; nfs_node_lock_force(np); - error = nfs_loadattrcache(np, &nvattr, &xid, 0); + error = nfs_loadattrcache(np, nvattr, &xid, 0); nfs_node_unlock(np); nfsmout_if(error); newnp = np; - } else if (NFS_CMPFH(dnp, fh.fh_data, fh.fh_len)) { + } else if (NFS_CMPFH(dnp, fh->fh_data, fh->fh_len)) { nfs_node_lock_force(dnp); if (dnp->n_xid <= xid) { - error = nfs_loadattrcache(dnp, &nvattr, &xid, 0); + error = nfs_loadattrcache(dnp, nvattr, &xid, 0); } nfs_node_unlock(dnp); nfsmout_if(error); @@ -6810,8 +6964,8 @@ nfs_lookitup( bzero(cnp, sizeof(*cnp)); cnp->cn_nameptr = name; cnp->cn_namelen = namelen; - error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, - &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); + error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh->fh_data, fh->fh_len, + nvattr, &xid, req->r_auth, NG_MAKEENTRY, &np); nfsmout_if(error); newnp = np; } @@ -6820,7 +6974,10 @@ nfsmout: if (npp && !*npp && !error) { *npp = newnp; } - NVATTR_CLEANUP(&nvattr); + NVATTR_CLEANUP(nvattr); + NFS_ZFREE(nfs_fhandle_zone, fh); + NFS_ZFREE(nfs_req_zone, req); + FREE(nvattr, M_TEMP); return error; } @@ -6867,7 +7024,7 @@ nfs_dulookup_init(struct nfs_dulookup *dulp, nfsnode_t dnp, const char *name, in if (nmp && (nmp->nm_vers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) { /* if rdirplus, try dir buf cache lookup */ nfsnode_t du_np = NULL; - if (!nfs_dir_buf_cache_lookup(dnp, &du_np, &dulp->du_cn, ctx, 0) && du_np) { + if (!nfs_dir_buf_cache_lookup(dnp, &du_np, &dulp->du_cn, ctx, 0, NULL) && du_np) { /* dir buf cache hit */ du_vp = NFSTOV(du_np); vnode_put(du_vp); @@ -6908,16 +7065,18 @@ nfs_dulookup_finish(struct nfs_dulookup *dulp, nfsnode_t dnp, vfs_context_t ctx) int error; nfsnode_t du_np; u_int64_t xid; - fhandle_t fh; - struct nfs_vattr nvattr; + fhandle_t *fh; + struct nfs_vattr *nvattr; if (!nmp || !(dulp->du_flags & NFS_DULOOKUP_INPROG)) { goto out; } - NVATTR_INIT(&nvattr); + fh = zalloc(nfs_fhandle_zone); + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); + NVATTR_INIT(nvattr); error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, dulp->du_cn.cn_nameptr, - dulp->du_cn.cn_namelen, ctx, &dulp->du_req, &xid, &fh, &nvattr); + dulp->du_cn.cn_namelen, ctx, &dulp->du_req, &xid, fh, nvattr); dulp->du_flags &= ~NFS_DULOOKUP_INPROG; if (error == ENOENT) { /* add a negative entry in the name cache */ @@ -6926,14 +7085,16 @@ nfs_dulookup_finish(struct nfs_dulookup *dulp, nfsnode_t dnp, vfs_context_t ctx) dnp->n_flag |= NNEGNCENTRIES; nfs_node_unlock(dnp); } else if (!error) { - error = nfs_nget(NFSTOMP(dnp), dnp, &dulp->du_cn, fh.fh_data, fh.fh_len, - &nvattr, &xid, dulp->du_req.r_auth, NG_MAKEENTRY, &du_np); + error = nfs_nget(NFSTOMP(dnp), dnp, &dulp->du_cn, fh->fh_data, fh->fh_len, + nvattr, &xid, dulp->du_req.r_auth, NG_MAKEENTRY, &du_np); if (!error) { nfs_node_unlock(du_np); vnode_put(NFSTOV(du_np)); } } - NVATTR_CLEANUP(&nvattr); + NVATTR_CLEANUP(nvattr); + NFS_ZFREE(nfs_fhandle_zone, fh); + FREE(nvattr, M_TEMP); out: if (dulp->du_flags & NFS_DULOOKUP_INPROG) { nfs_request_async_cancel(&dulp->du_req); @@ -6956,7 +7117,7 @@ nfs3_commit_rpc( uint64_t wverf) { struct nfsmount *nmp; - int error = 0, lockerror, status, wccpostattr = 0, nfsvers; + int error = 0, lockerror, status = 0, wccpostattr = 0, nfsvers; struct timespec premtime = { .tv_sec = 0, .tv_nsec = 0 }; u_int64_t xid, newwverf; uint32_t count32; @@ -6971,12 +7132,7 @@ nfs3_commit_rpc( return 0; } nfsvers = nmp->nm_vers; - - if (count > UINT32_MAX) { - count32 = 0; - } else { - count32 = count; - } + count32 = count > UINT32_MAX ? 0 : (uint32_t)count; nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); @@ -7061,7 +7217,7 @@ nfs3_pathconf_rpc( vfs_context_t ctx) { u_int64_t xid; - int error = 0, lockerror, status, nfsvers; + int error = 0, lockerror, status = 0, nfsvers; struct nfsm_chain nmreq, nmrep; struct nfsmount *nmp = NFSTONMP(np); uint32_t val = 0; @@ -7190,8 +7346,8 @@ nfs_vnop_pathconf( if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) { break; /* Yes */ } - /* No... so just return an error */ - /* FALLTHROUGH */ + /* No... so just return an error */ + return EINVAL; default: /* don't bother contacting the server if we know the answer */ return EINVAL; @@ -7203,8 +7359,8 @@ nfs_vnop_pathconf( lck_mtx_lock(&nmp->nm_lock); if (nmp->nm_vers == NFS_VER3) { - if (!(nmp->nm_state & NFSSTA_GOTPATHCONF)) { - /* no pathconf info cached */ + if (!(nmp->nm_state & NFSSTA_GOTPATHCONF) || (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_HOMOGENEOUS) && nmp->nm_dnp != np)) { + /* no pathconf info cached OR we were asked for non-root pathconf and filesystem does not support FSF_HOMOGENEOUS */ lck_mtx_unlock(&nmp->nm_lock); NFS_CLEAR_ATTRIBUTES(nfsa.nfsa_bitmap); error = nfs3_pathconf_rpc(np, &nfsa, ap->a_context); @@ -7584,7 +7740,7 @@ nfs_vnop_ioctl( int error = ENOTTY; #if CONFIG_NFS_GSS struct user_nfs_gss_principal gprinc = {}; - uint32_t len; + size_t len; #endif if (mp == NULL) { @@ -7622,7 +7778,7 @@ nfs_vnop_ioctl( gprinc.nametype = tp->nametype; gprinc.principal = CAST_USER_ADDR_T(tp->principal); } - NFS_DBG(NFS_FAC_GSS, 7, "Enter NFS_FSCTL_SET_CRED (64-bit=%d): principal length %d name type %d usr pointer 0x%llx\n", vfs_context_is64bit(ctx), gprinc.princlen, gprinc.nametype, (unsigned long long)gprinc.principal); + NFS_DBG(NFS_FAC_GSS, 7, "Enter NFS_FSCTL_SET_CRED (64-bit=%d): principal length %zu name type %d usr pointer 0x%llx\n", vfs_context_is64bit(ctx), gprinc.princlen, gprinc.nametype, gprinc.principal); if (gprinc.princlen > MAXPATHLEN) { return EINVAL; } @@ -7631,9 +7787,10 @@ nfs_vnop_ioctl( if (p == NULL) { return ENOMEM; } - error = copyin(gprinc.principal, p, gprinc.princlen); + assert((user_addr_t)gprinc.principal == gprinc.principal); + error = copyin((user_addr_t)gprinc.principal, p, gprinc.princlen); if (error) { - NFS_DBG(NFS_FAC_GSS, 7, "NFS_FSCTL_SET_CRED could not copy in princiapl data of len %d: %d\n", + NFS_DBG(NFS_FAC_GSS, 7, "NFS_FSCTL_SET_CRED could not copy in princiapl data of len %zu: %d\n", gprinc.princlen, error); FREE(p, M_TEMP); return error; @@ -7666,7 +7823,8 @@ nfs_vnop_ioctl( upp->nametype = gprinc.nametype; upp->flags = gprinc.flags; if (gprinc.principal) { - error = copyout((void *)gprinc.principal, upp->principal, len); + assert((user_addr_t)upp->principal == upp->principal); + error = copyout((void *)gprinc.principal, (user_addr_t)upp->principal, len); } else { upp->principal = USER_ADDR_NULL; } @@ -7686,11 +7844,13 @@ nfs_vnop_ioctl( } } if (error) { - NFS_DBG(NFS_FAC_GSS, 7, "NFS_FSCTL_GET_CRED could not copy out princiapl data of len %d: %d\n", + NFS_DBG(NFS_FAC_GSS, 7, "NFS_FSCTL_GET_CRED could not copy out princiapl data of len %zu: %d\n", gprinc.princlen, error); } if (gprinc.principal) { - FREE(gprinc.principal, M_TEMP); + void *ptr = (void *)gprinc.principal; + gprinc.principal = 0; + FREE(ptr, M_TEMP); } #endif /* CONFIG_NFS_GSS */ } @@ -7736,9 +7896,9 @@ nfs_vnop_pagein( { vnode_t vp = ap->a_vp; upl_t pl = ap->a_pl; - size_t size = ap->a_size; + upl_size_t size = (upl_size_t)ap->a_size; off_t f_offset = ap->a_f_offset; - vm_offset_t pl_offset = ap->a_pl_offset; + upl_offset_t pl_offset = ap->a_pl_offset; int flags = ap->a_flags; thread_t thd; kauth_cred_t cred; @@ -7767,7 +7927,7 @@ nfs_vnop_pagein( } if (size <= 0) { - printf("nfs_pagein: invalid size %ld", size); + printf("nfs_pagein: invalid size %u", size); if (!nofreeupl) { (void) ubc_upl_abort_range(pl, pl_offset, size, 0); } @@ -7866,7 +8026,7 @@ tryagain: } if (retsize < iosize) { /* Just zero fill the rest of the valid area. */ - int zcnt = iosize - retsize; + size_t zcnt = iosize - retsize; bzero((char *)rxaddr + retsize, zcnt); FSDBG(324, uio_offset(uio), retsize, zcnt, rxaddr); uio_update(uio, zcnt); @@ -7911,9 +8071,15 @@ cancel: if (!nofreeupl) { if (error) { - ubc_upl_abort_range(pl, pl_offset, size, - UPL_ABORT_ERROR | - UPL_ABORT_FREE_ON_EMPTY); + /* + * See comment in vnode_pagein() on handling EAGAIN, even though UPL_NOCOMMIT flag + * is not set, we will not abort this upl, since VM subsystem will handle it. + */ + if (error != EAGAIN && error != EPERM) { + ubc_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_ERROR | + UPL_ABORT_FREE_ON_EMPTY); + } } else { ubc_upl_commit_range(pl, pl_offset, size, UPL_COMMIT_CLEAR_DIRTY | @@ -8068,9 +8234,10 @@ nfs_vnop_pageout( { vnode_t vp = ap->a_vp; upl_t pl = ap->a_pl; - size_t size = ap->a_size; + upl_size_t size = (upl_size_t)ap->a_size; off_t f_offset = ap->a_f_offset; - vm_offset_t pl_offset = ap->a_pl_offset; + upl_offset_t pl_offset = ap->a_pl_offset; + upl_offset_t pgsize; int flags = ap->a_flags; nfsnode_t np = VTONFS(vp); thread_t thd; @@ -8084,10 +8251,10 @@ nfs_vnop_pageout( uio_t auio; char uio_buf[UIO_SIZEOF(1)]; int nofreeupl = flags & UPL_NOCOMMIT; - size_t nmwsize, biosize, iosize, pgsize, txsize, rxsize, xsize, remsize; + size_t nmwsize, biosize, iosize, remsize; struct nfsreq *req[MAXPAGINGREQS]; int nextsend, nextwait, wverfset, commit; - uint64_t wverf, wverf2; + uint64_t wverf, wverf2, xsize, txsize, rxsize; #if CONFIG_NFS4 uint32_t stategenid = 0; #endif @@ -8101,7 +8268,7 @@ nfs_vnop_pageout( } if (size <= 0) { - printf("nfs_pageout: invalid size %ld", size); + printf("nfs_pageout: invalid size %u", size); if (!nofreeupl) { ubc_upl_abort_range(pl, pl_offset, size, 0); } @@ -8127,7 +8294,7 @@ nfs_vnop_pageout( off = f_offset + iosize; /* need make sure we do things on block boundaries */ xsize = biosize - (off % biosize); - if (off + xsize > f_offset + size) { + if (off + (off_t)xsize > f_offset + (off_t)size) { xsize = f_offset + size - off; } lbn = (daddr64_t)(off / biosize); @@ -8196,10 +8363,10 @@ nfs_vnop_pageout( (bp->nb_dirtyend > end)) { /* clip dirty region, if necessary */ if (bp->nb_dirtyoff < start) { - bp->nb_dirtyend = min(bp->nb_dirtyend, start); + bp->nb_dirtyend = MIN(bp->nb_dirtyend, start); } if (bp->nb_dirtyend > end) { - bp->nb_dirtyoff = max(bp->nb_dirtyoff, end); + bp->nb_dirtyoff = MAX(bp->nb_dirtyoff, end); } FSDBG(323, bp, bp->nb_dirtyoff, bp->nb_dirtyend, 0xd00dee00); /* we're leaving this block dirty */ @@ -8265,7 +8432,7 @@ nfs_vnop_pageout( xsize = size; } - pgsize = round_page_64(xsize); + pgsize = (upl_offset_t)round_page_64(xsize); if ((size > pgsize) && !nofreeupl) { ubc_upl_abort_range(pl, pl_offset + pgsize, size - pgsize, UPL_ABORT_FREE_ON_EMPTY); @@ -8277,8 +8444,8 @@ nfs_vnop_pageout( * releasing it in the VM page cache */ if ((u_quad_t)f_offset < np->n_size && (u_quad_t)f_offset + size > np->n_size) { - size_t io = np->n_size - f_offset; - bzero((caddr_t)(ioaddr + io), size - io); + uint64_t io = np->n_size - f_offset; + NFS_BZERO((caddr_t)(ioaddr + io), size - io); FSDBG(321, np->n_size, f_offset, f_offset + io, size - io); } nfs_data_unlock_noupdate(np); @@ -8307,7 +8474,7 @@ tryagain: } /* send requests while we need to and have available slots */ while ((txsize > 0) && (req[nextsend] == NULL)) { - iosize = MIN(nmwsize, txsize); + iosize = (size_t)MIN(nmwsize, txsize); uio_reset(auio, txoffset, UIO_SYSSPACE, UIO_WRITE); uio_addiov(auio, CAST_USER_ADDR_T(txaddr), iosize); FSDBG(323, uio_offset(auio), iosize, txaddr, txsize); @@ -8332,7 +8499,7 @@ tryagain: } /* wait while we need to and break out if more requests to send */ while ((rxsize > 0) && req[nextwait]) { - iosize = remsize = MIN(nmwsize, rxsize); + iosize = remsize = (size_t)MIN(nmwsize, rxsize); error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req[nextwait], &iomode, &iosize, &wverf2); req[nextwait] = NULL; nextwait = (nextwait + 1) % MAXPAGINGREQS; @@ -8627,7 +8794,7 @@ void nfs_vnode_notify(nfsnode_t np, uint32_t events) { struct nfsmount *nmp = NFSTONMP(np); - struct nfs_vattr nvattr; + struct nfs_vattr *nvattr; struct vnode_attr vattr, *vap = NULL; struct timeval now; @@ -8640,20 +8807,22 @@ nfs_vnode_notify(nfsnode_t np, uint32_t events) events |= np->n_events; np->n_events = 0; np->n_evtstamp = now.tv_sec; + MALLOC(nvattr, struct nfs_vattr *, sizeof(*nvattr), M_TEMP, M_WAITOK); vfs_get_notify_attributes(&vattr); - if (!nfs_getattrcache(np, &nvattr, 0)) { + if (!nfs_getattrcache(np, nvattr, 0)) { vap = &vattr; VATTR_INIT(vap); VATTR_RETURN(vap, va_fsid, vfs_statfs(nmp->nm_mountp)->f_fsid.val[0]); - VATTR_RETURN(vap, va_fileid, nvattr.nva_fileid); - VATTR_RETURN(vap, va_mode, nvattr.nva_mode); - VATTR_RETURN(vap, va_uid, nvattr.nva_uid); - VATTR_RETURN(vap, va_gid, nvattr.nva_gid); - VATTR_RETURN(vap, va_nlink, nvattr.nva_nlink); + VATTR_RETURN(vap, va_fileid, nvattr->nva_fileid); + VATTR_RETURN(vap, va_mode, nvattr->nva_mode); + VATTR_RETURN(vap, va_uid, nvattr->nva_uid); + VATTR_RETURN(vap, va_gid, nvattr->nva_gid); + VATTR_RETURN(vap, va_nlink, nvattr->nva_nlink); } vnode_notify(NFSTOV(np), events, vap); + FREE(nvattr, M_TEMP); } #endif /* CONFIG_NFS_CLIENT */ diff --git a/bsd/nfs/nfsm_subs.h b/bsd/nfs/nfsm_subs.h index 4c6c8f56f..1fff4e3e9 100644 --- a/bsd/nfs/nfsm_subs.h +++ b/bsd/nfs/nfsm_subs.h @@ -79,20 +79,20 @@ int nfsm_rpchead(struct nfsreq *, mbuf_t, u_int64_t *, mbuf_t *); int nfsm_rpchead2(struct nfsmount *, int, int, int, int, int, kauth_cred_t, struct nfsreq *, mbuf_t, u_int64_t *, mbuf_t *); int nfsm_chain_new_mbuf(struct nfsm_chain *, size_t); -int nfsm_chain_add_opaque_f(struct nfsm_chain *, const u_char *, uint32_t); -int nfsm_chain_add_opaque_nopad_f(struct nfsm_chain *, const u_char *, uint32_t); -int nfsm_chain_add_uio(struct nfsm_chain *, uio_t, uint32_t); +int nfsm_chain_add_opaque_f(struct nfsm_chain *, const u_char *, size_t); +int nfsm_chain_add_opaque_nopad_f(struct nfsm_chain *, const u_char *, size_t); +int nfsm_chain_add_uio(struct nfsm_chain *, uio_t, size_t); int nfsm_chain_add_fattr4_f(struct nfsm_chain *, struct vnode_attr *, struct nfsmount *); int nfsm_chain_add_v2sattr_f(struct nfsm_chain *, struct vnode_attr *, uint32_t); int nfsm_chain_add_v3sattr_f(struct nfsmount *, struct nfsm_chain *, struct vnode_attr *); -int nfsm_chain_add_string_nfc(struct nfsm_chain *, const uint8_t *, uint32_t); +int nfsm_chain_add_string_nfc(struct nfsm_chain *, const uint8_t *, size_t); -int nfsm_chain_advance(struct nfsm_chain *, uint32_t); -int nfsm_chain_offset(struct nfsm_chain *); -int nfsm_chain_reverse(struct nfsm_chain *, uint32_t); +int nfsm_chain_advance(struct nfsm_chain *, size_t); +size_t nfsm_chain_offset(struct nfsm_chain *); +int nfsm_chain_reverse(struct nfsm_chain *, size_t); int nfsm_chain_get_opaque_pointer_f(struct nfsm_chain *, uint32_t, u_char **); -int nfsm_chain_get_opaque_f(struct nfsm_chain *, uint32_t, u_char *); -int nfsm_chain_get_uio(struct nfsm_chain *, uint32_t, uio_t); +int nfsm_chain_get_opaque_f(struct nfsm_chain *, size_t, u_char *); +int nfsm_chain_get_uio(struct nfsm_chain *, size_t, uio_t); int nfsm_chain_get_fh_attr(struct nfsmount *, struct nfsm_chain *, nfsnode_t, vfs_context_t, int, uint64_t *, fhandle_t *, struct nfs_vattr *); int nfsm_chain_get_wcc_data_f(struct nfsm_chain *, nfsnode_t, struct timespec *, int *, u_int64_t *); @@ -210,6 +210,29 @@ int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); (E) = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, (MBP)); \ } while (0) +/* + * get an mbuf with size of M16KCLBYTES given a size hint + * According to mbuf_getcluster() documentation, clusters greater than 4096 bytes might + * not be available in all configurations; the caller must additionally check for ENOTSUP. + * */ +#define nfsm_mbuf_getcluster(E, MBP, SIZEHINT) \ + do { \ + *(MBP) = NULL; \ + if ((size_t)(SIZEHINT) > MBIGCLBYTES) { \ + (E) = mbuf_getcluster(MBUF_WAITOK, MBUF_TYPE_DATA, M16KCLBYTES, (MBP)); \ + if ((E) == 0) { \ + break; \ + } \ + } \ + if ((size_t)(SIZEHINT) > MCLBYTES) { \ + (E) = mbuf_getcluster(MBUF_WAITOK, MBUF_TYPE_DATA, MBIGCLBYTES, (MBP)); \ + if ((E) == 0) { \ + break; \ + } \ + } \ + nfsm_mbuf_get(E, MBP, SIZEHINT); \ + } while (0) + /* * macros for building NFS mbuf chains @@ -298,7 +321,7 @@ int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); /* add buffer of opaque data to an mbuf chain */ #define nfsm_chain_add_opaque(E, NMC, BUF, LEN) \ do { \ - uint32_t rndlen = nfsm_rndup(LEN); \ + size_t rndlen = nfsm_rndup(LEN); \ if (E) break; \ if ((NMC)->nmc_left < rndlen) { \ (E) = nfsm_chain_add_opaque_f((NMC), (const u_char*)(BUF), (LEN)); \ @@ -555,6 +578,7 @@ int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); #define nfsm_chain_get_32(E, NMC, LVAL) \ do { \ uint32_t __tmp32, *__tmpptr; \ + (LVAL) = 0; \ if (E) break; \ if ((NMC)->nmc_left >= NFSX_UNSIGNED) { \ __tmpptr = (uint32_t*)(NMC)->nmc_ptr; \ @@ -572,6 +596,7 @@ int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); #define nfsm_chain_get_64(E, NMC, LVAL) \ do { \ uint64_t __tmp64, *__tmpptr; \ + (LVAL) = 0; \ if (E) break; \ if ((NMC)->nmc_left >= 2 * NFSX_UNSIGNED) { \ __tmpptr = (uint64_t*)(NMC)->nmc_ptr; \ @@ -607,7 +632,7 @@ int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); /* copy the next consecutive bytes of opaque data from an mbuf chain */ #define nfsm_chain_get_opaque(E, NMC, LEN, PTR) \ do { \ - uint32_t rndlen; \ + size_t rndlen; \ if (E) break; \ rndlen = nfsm_rndup(LEN); \ if (rndlen < (LEN)) { \ @@ -777,7 +802,7 @@ int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); #define nfsm_chain_check_change_info(E, NMC, DNP) \ do { \ - uint64_t __ci_before, __ci_after; \ + uint64_t __ci_before = 0, __ci_after = 0; \ uint32_t __ci_atomic = 0; \ nfsm_chain_get_32((E), (NMC), __ci_atomic); \ nfsm_chain_get_64((E), (NMC), __ci_before); \ diff --git a/bsd/nfs/nfsmount.h b/bsd/nfs/nfsmount.h index 0743b8383..e34b4fbc0 100644 --- a/bsd/nfs/nfsmount.h +++ b/bsd/nfs/nfsmount.h @@ -79,13 +79,13 @@ struct nfs_fsattr { uint32_t nfsa_flags; /* file system flags */ uint32_t nfsa_lease; /* lease time in seconds */ - uint32_t nfsa_maxname; /* maximum filename size */ + int32_t nfsa_maxname; /* maximum filename size */ uint32_t nfsa_maxlink; /* maximum # links */ uint32_t nfsa_bsize; /* block size */ uint32_t nfsa_pad; /* UNUSED */ uint64_t nfsa_maxfilesize; /* maximum file size */ - uint64_t nfsa_maxread; /* maximum read size */ - uint64_t nfsa_maxwrite; /* maximum write size */ + uint32_t nfsa_maxread; /* maximum read size */ + uint32_t nfsa_maxwrite; /* maximum write size */ uint64_t nfsa_files_avail; /* file slots available */ uint64_t nfsa_files_free; /* file slots free */ uint64_t nfsa_files_total; /* file slots total */ @@ -224,7 +224,7 @@ struct nfs_funcs { int (*nf_setattr_rpc)(nfsnode_t, struct vnode_attr *, vfs_context_t); int (*nf_read_rpc_async)(nfsnode_t, off_t, size_t, thread_t, kauth_cred_t, struct nfsreq_cbinfo *, struct nfsreq **); int (*nf_read_rpc_async_finish)(nfsnode_t, struct nfsreq *, uio_t, size_t *, int *); - int (*nf_readlink_rpc)(nfsnode_t, char *, uint32_t *, vfs_context_t); + int (*nf_readlink_rpc)(nfsnode_t, char *, size_t *, vfs_context_t); int (*nf_write_rpc_async)(nfsnode_t, uio_t, size_t, thread_t, kauth_cred_t, int, struct nfsreq_cbinfo *, struct nfsreq **); int (*nf_write_rpc_async_finish)(nfsnode_t, struct nfsreq *, int *, size_t *, uint64_t *); int (*nf_commit_rpc)(nfsnode_t, uint64_t, uint64_t, kauth_cred_t, uint64_t); @@ -243,7 +243,7 @@ struct nfs_funcs { struct nfs_client_id { TAILQ_ENTRY(nfs_client_id) nci_link; /* list of client IDs */ char *nci_id; /* client id buffer */ - int nci_idlen; /* length of client id buffer */ + long nci_idlen; /* length of client id buffer */ }; TAILQ_HEAD(nfsclientidlist, nfs_client_id); extern struct nfsclientidlist nfsclientids; @@ -284,10 +284,10 @@ struct nfsmount { uint32_t nm_biosize; /* buffer I/O size */ uint32_t nm_readdirsize; /* Size of a readdir rpc */ uint32_t nm_readahead; /* Num. of blocks to readahead */ - uint32_t nm_acregmin; /* reg file min attr cache timeout */ - uint32_t nm_acregmax; /* reg file max attr cache timeout */ - uint32_t nm_acdirmin; /* dir min attr cache timeout */ - uint32_t nm_acdirmax; /* dir max attr cache timeout */ + time_t nm_acregmin; /* reg file min attr cache timeout */ + time_t nm_acregmax; /* reg file max attr cache timeout */ + time_t nm_acdirmin; /* dir min attr cache timeout */ + time_t nm_acdirmax; /* dir max attr cache timeout */ uint32_t nm_auth; /* security mechanism flavor being used */ uint32_t nm_writers; /* Number of nodes open for writing */ uint32_t nm_mappers; /* Number of nodes that have mmapped */ @@ -295,9 +295,9 @@ struct nfsmount { struct nfs_sec nm_servsec; /* server's acceptable security mechanism flavors */ struct nfs_etype nm_etype; /* If using kerberos, the support session key encryption types */ fhandle_t *nm_fh; /* initial file handle */ - uint8_t nm_lockmode; /* advisory file locking mode */ + uint32_t nm_lockmode; /* advisory file locking mode */ /* mount info */ - uint32_t nm_fsattrstamp; /* timestamp for fs attrs */ + time_t nm_fsattrstamp; /* timestamp for fs attrs */ struct nfs_fsattr nm_fsattr; /* file system attributes */ uint64_t nm_verf; /* v3/v4 write verifier */ union { @@ -307,7 +307,7 @@ struct nfsmount { int udp_cwnd; /* UDP request congestion window */ struct nfs_reqqhead udp_cwndq; /* requests waiting on cwnd */ struct sockaddr *rqsaddr;/* cached rquota socket address */ - uint32_t rqsaddrstamp; /* timestamp of rquota socket address */ + uint64_t rqsaddrstamp; /* timestamp of rquota socket address */ } v3; struct { /* v4 specific fields */ struct nfs_client_id *longid; /* client ID, long form */ diff --git a/bsd/nfs/nfsnode.h b/bsd/nfs/nfsnode.h index 83fa44505..7f7f80293 100644 --- a/bsd/nfs/nfsnode.h +++ b/bsd/nfs/nfsnode.h @@ -89,6 +89,13 @@ struct nfs_sillyrename { char nsr_name[20]; }; +/* + * NFS buf pages struct + */ +typedef struct { + uint64_t pages[8]; +} nfsbufpgs; + /* * The nfsbuf is the nfs equivalent to a struct buf. */ @@ -96,32 +103,32 @@ struct nfsbuf { LIST_ENTRY(nfsbuf) nb_hash; /* hash chain */ LIST_ENTRY(nfsbuf) nb_vnbufs; /* nfsnode's nfsbuf chain */ TAILQ_ENTRY(nfsbuf) nb_free; /* free list position if not active. */ - volatile uint32_t nb_flags; /* NB_* flags. */ - volatile uint32_t nb_lflags; /* NBL_* flags. */ os_refcnt_t nb_refs; /* outstanding references. */ - uint32_t nb_bufsize; /* buffer size */ daddr64_t nb_lblkno; /* logical block number. */ uint64_t nb_verf; /* V3 write verifier */ - int nb_commitlevel; /* lowest write commit level */ time_t nb_timestamp; /* buffer timestamp */ - int nb_error; /* errno value. */ - u_int32_t nb_valid; /* valid pages in buf */ - u_int32_t nb_dirty; /* dirty pages in buf */ - int nb_validoff; /* offset in buffer of valid region. */ - int nb_validend; /* offset of end of valid region. */ - int nb_dirtyoff; /* offset in buffer of dirty region. */ - int nb_dirtyend; /* offset of end of dirty region. */ - int nb_offio; /* offset in buffer of I/O region. */ - int nb_endio; /* offset of end of I/O region. */ - int nb_rpcs; /* Count of RPCs remaining for this buffer. */ + nfsbufpgs nb_valid; /* valid pages in buf */ + nfsbufpgs nb_dirty; /* dirty pages in buf */ caddr_t nb_data; /* mapped buffer */ nfsnode_t nb_np; /* nfsnode buffer belongs to */ kauth_cred_t nb_rcred; /* read credentials reference */ kauth_cred_t nb_wcred; /* write credentials reference */ void * nb_pagelist; /* upl */ + volatile uint32_t nb_flags; /* NB_* flags. */ + volatile uint32_t nb_lflags; /* NBL_* flags. */ + uint32_t nb_bufsize; /* buffer size */ + int nb_error; /* errno value. */ + int nb_commitlevel; /* lowest write commit level */ + off_t nb_validoff; /* offset in buffer of valid region. */ + off_t nb_validend; /* offset of end of valid region. */ + off_t nb_dirtyoff; /* offset in buffer of dirty region. */ + off_t nb_dirtyend; /* offset of end of dirty region. */ + off_t nb_offio; /* offset in buffer of I/O region. */ + off_t nb_endio; /* offset of end of I/O region. */ + uint64_t nb_rpcs; /* Count of RPCs remaining for this buffer. */ }; -#define NFS_MAXBSIZE (32 * PAGE_SIZE) /* valid/dirty page masks limit buffer size */ +#define NFS_MAXBSIZE (8 * 64 * PAGE_SIZE) /* valid/dirty page masks limit buffer size */ #define NFS_A_LOT_OF_NEEDCOMMITS 256 /* max# uncommitted buffers for a node */ #define NFS_A_LOT_OF_DELAYED_WRITES MAX(nfsbufcnt/8,512) /* max# "delwri" buffers in system */ @@ -177,12 +184,24 @@ struct nfsbuf { #define NBAC_NOWAIT 0x01 /* Don't wait if buffer is busy */ #define NBAC_REMOVE 0x02 /* Remove from free list once buffer is acquired */ +/* macros for nfsbufpgs */ +#define NBPGS_STRUCT_SIZE sizeof(nfsbufpgs) +#define NBPGS_ELEMENT_SIZE sizeof(((nfsbufpgs *)0)->pages[0]) +#define NBPGS_ELEMENT_PAGES (8 * NBPGS_ELEMENT_SIZE) +#define NBPGS_ELEMENTS (NBPGS_STRUCT_SIZE / NBPGS_ELEMENT_SIZE) +#define NBPGS_ERASE(NFSBP) bzero((NFSBP), NBPGS_STRUCT_SIZE) +#define NBPGS_COPY(NFSBPDST, NFSBPSRC) memcpy((NFSBPDST), (NFSBPSRC), NBPGS_STRUCT_SIZE) +#define NBPGS_IS_EQUAL(NFSBP1, NFSBP2) (memcmp((NFSBP1), (NFSBP2), NBPGS_STRUCT_SIZE) == 0) +#define NBPGS_GET(NFSBP, P) ((NFSBP)->pages[((P)/NBPGS_ELEMENT_PAGES)] & (1LLU << ((P) % NBPGS_ELEMENT_PAGES))) +#define NBPGS_SET(NFSBP, P) ((NFSBP)->pages[((P)/NBPGS_ELEMENT_PAGES)] |= (1LLU << ((P) % NBPGS_ELEMENT_PAGES))) +#define NBPGS_UNSET(NFSBP, P) ((NFSBP)->pages[((P)/NBPGS_ELEMENT_PAGES)] &= ~(1LLU << ((P) % NBPGS_ELEMENT_PAGES))) + /* some convenience macros... */ #define NBOFF(BP) ((off_t)(BP)->nb_lblkno * (off_t)(BP)->nb_bufsize) -#define NBPGVALID(BP, P) (((BP)->nb_valid >> (P)) & 0x1) -#define NBPGDIRTY(BP, P) (((BP)->nb_dirty >> (P)) & 0x1) -#define NBPGVALID_SET(BP, P) ((BP)->nb_valid |= (1 << (P))) -#define NBPGDIRTY_SET(BP, P) ((BP)->nb_dirty |= (1 << (P))) +#define NBPGVALID(BP, P) NBPGS_GET(&((BP)->nb_valid), (P)) +#define NBPGDIRTY(BP, P) NBPGS_GET(&((BP)->nb_dirty), (P)) +#define NBPGVALID_SET(BP, P) NBPGS_SET(&((BP)->nb_valid), (P)) +#define NBPGDIRTY_SET(BP, P) NBPGS_SET(&((BP)->nb_dirty), (P)) #define NBUFSTAMPVALID(BP) ((BP)->nb_timestamp != ~0) #define NBUFSTAMPINVALIDATE(BP) ((BP)->nb_timestamp = ~0) @@ -248,7 +267,7 @@ extern struct nfsbuffreehead nfsbuffree, nfsbufdelwri; struct nfs_dir_buf_header { uint16_t ndbh_flags; /* flags (see below) */ uint16_t ndbh_count; /* # of entries */ - uint32_t ndbh_entry_end; /* end offset of direntry data */ + off_t ndbh_entry_end; /* end offset of direntry data */ uint32_t ndbh_ncgen; /* name cache generation# */ uint32_t ndbh_pad; /* reserved */ }; @@ -263,6 +282,8 @@ struct nfs_dir_buf_header { (&((struct nfs_vattr*)((char*)((BP)->nb_data) + (BP)->nb_bufsize))[-((IDX)+1)]) #define NFS_DIRENTRY_LEN(namlen) \ ((sizeof(struct direntry) + (namlen) - (MAXPATHLEN-1) + 7) & ~7) +#define NFS_DIRENTRY_LEN_16(namlen) \ + (uint16_t)(NFS_DIRENTRY_LEN(namlen)); assert((namlen) <= UINT16_MAX); #define NFS_DIRENT_LEN(namlen) \ ((sizeof(struct dirent) - (NAME_MAX+1)) + (((namlen) + 1 + 3) &~ 3)) #define NFS_DIRENTRY_NEXT(DP) \ @@ -315,7 +336,7 @@ struct nfsdmap { struct nfs_vattr { enum vtype nva_type; /* vnode type (for create) */ - uint32_t nva_mode; /* file's access mode (and type) */ + mode_t nva_mode; /* file's access mode (and type) */ uid_t nva_uid; /* owner user id */ gid_t nva_gid; /* owner group id */ guid_t nva_uuuid; /* owner user UUID */ @@ -330,8 +351,8 @@ struct nfs_vattr { uint64_t nva_size; /* file size in bytes */ uint64_t nva_bytes; /* bytes of disk space held by file */ uint64_t nva_change; /* change attribute */ - int64_t nva_timesec[NFSTIME_COUNT]; - int32_t nva_timensec[NFSTIME_COUNT]; + time_t nva_timesec[NFSTIME_COUNT]; + long nva_timensec[NFSTIME_COUNT]; uint32_t nva_bitmap[NFS_ATTR_BITMAP_LEN]; /* attributes that are valid */ /* FPnfs only. */ @@ -346,6 +367,8 @@ struct nfs_vattr { #define NFS_FFLAG_HAS_NAMED_ATTRS 0x0004 /* file has named attributes */ #define NFS_FFLAG_TRIGGER 0x0008 /* node is a trigger/mirror mount point */ #define NFS_FFLAG_TRIGGER_REFERRAL 0x0010 /* trigger is a referral */ +#define NFS_FFLAG_PARTIAL_WRITE 0x0020 /* partial attribute for NFSv4 writes */ +#define NFS_FFLAG_FILEID_CONTAINS_XID 0x0040 /* xid might be stashed in nva_fileid is rdirplus is enabled */ #define NFS_FFLAG_IS_ATTR 0x8000 /* file is a named attribute file/directory */ /* FPnfs only */ #define NFS_FFLAG_FPNFS_BSD_FLAGS 0x01000000 @@ -495,7 +518,7 @@ struct nfs_file_lock { uint64_t nfl_end; /* ending offset (inclusive) */ uint32_t nfl_blockcnt; /* # locks blocked on this lock */ uint16_t nfl_flags; /* see below */ - uint8_t nfl_type; /* lock type: read/write */ + uint16_t nfl_type; /* lock type: read/write */ }; /* nfl_flags */ #define NFS_FILE_LOCK_ALLOC 0x01 /* lock was allocated */ @@ -571,7 +594,7 @@ struct nfsnode { time_t n_aclstamp; /* ACL cache timestamp */ time_t n_evtstamp; /* last vnode event timestamp */ uint32_t n_events; /* pending vnode events */ - u_int8_t n_access[NFS_ACCESS_CACHE_SIZE + 1]; /* ACCESS cache */ + uint32_t n_access[NFS_ACCESS_CACHE_SIZE + 1]; /* ACCESS cache */ uid_t n_accessuid[NFS_ACCESS_CACHE_SIZE]; /* credentials having access */ time_t n_accessstamp[NFS_ACCESS_CACHE_SIZE]; /* access cache timestamp */ time_t n_rdirplusstamp_sof; /* Readdirplus sof timestamp */ @@ -609,7 +632,7 @@ struct nfsnode { struct nfsdmap *nd_cookiecache; /* dir cookie cache */ } n_un3; uint32_t n_flag; /* node flags */ - u_short n_fhsize; /* size in bytes, of fh */ + uint32_t n_fhsize; /* size in bytes, of fh */ u_short n_hflag; /* node hash flags */ u_short n_bflag; /* node buffer flags */ u_short n_mflag; /* node mount flags */ @@ -842,12 +865,12 @@ void nfs_data_update_size(nfsnode_t, int); /* other stuff */ int nfs_removeit(struct nfs_sillyrename *); -int nfs_nget(mount_t, nfsnode_t, struct componentname *, u_char *, int, struct nfs_vattr *, u_int64_t *, uint32_t, int, nfsnode_t*); +int nfs_nget(mount_t, nfsnode_t, struct componentname *, u_char *, uint32_t, struct nfs_vattr *, u_int64_t *, uint32_t, int, nfsnode_t*); int nfs_mount_is_dirty(mount_t); void nfs_dir_cookie_cache(nfsnode_t, uint64_t, uint64_t); int nfs_dir_cookie_to_lbn(nfsnode_t, uint64_t, int *, uint64_t *); void nfs_invaldir(nfsnode_t); -uint32_t nfs_dir_buf_freespace(struct nfsbuf *, int); +uint64_t nfs_dir_buf_freespace(struct nfsbuf *, int); /* nfsbuf functions */ void nfs_nbinit(void); @@ -873,6 +896,12 @@ errno_t nfs_buf_acquire(struct nfsbuf *, int, int, int); int nfs_buf_iterprepare(nfsnode_t, struct nfsbuflists *, int); void nfs_buf_itercomplete(nfsnode_t, struct nfsbuflists *, int); +void nfs_buf_pgs_get_page_mask(nfsbufpgs *, off_t); +void nfs_buf_pgs_bit_not(nfsbufpgs *); +void nfs_buf_pgs_bit_and(nfsbufpgs *, nfsbufpgs *, nfsbufpgs *); +void nfs_buf_pgs_set_pages_between(nfsbufpgs *, off_t, off_t); +int nfs_buf_pgs_is_set(nfsbufpgs *); + int nfs_bioread(nfsnode_t, uio_t, int, vfs_context_t); int nfs_buf_readahead(nfsnode_t, int, daddr64_t *, daddr64_t, thread_t, kauth_cred_t); int nfs_buf_readdir(struct nfsbuf *, vfs_context_t); diff --git a/bsd/nfs/nfsproto.h b/bsd/nfs/nfsproto.h index 1ade820c3..a25ab7aea 100644 --- a/bsd/nfs/nfsproto.h +++ b/bsd/nfs/nfsproto.h @@ -89,20 +89,16 @@ #define NFS_V2MAXDATA 8192 #define NFS_MAXDGRAMDATA 16384 #define NFS_PREFDGRAMDATA 8192 - -#ifdef XNU_TARGET_OS_IOS -#define NFS_MAXDATA (32 * PAGE_SIZE) /* Same as NFS_MAXBSIZE from nfsnode.h */ -#else /* TARGET_OS_IOS */ -#define NFS_MAXDATA (64*1024) -#endif /* TARGET_OS_IOS */ - -#define NFSRV_MAXDATA (64*1024) // XXX not ready for >64K +#define NFS_MAXDATA (8 * 64 * PAGE_SIZE) /* Same as NFS_MAXBSIZE from nfsnode.h */ #define NFS_MAXPATHLEN 1024 #define NFS_MAXNAMLEN 255 -#define NFS_MAXPACKET (16*1024*1024) -#define NFS_UDPSOCKBUF (224*1024) +#define NFS_MAXPACKET (16 * 1024 * 1024) +#define NFS_UDPSOCKBUF (224 * 1024) #define NFS_FABLKSIZE 512 /* Size in bytes of a block wrt fa_blocks */ +#define NFSRV_MAXDATA NFS_MAXDATA +#define NFSRV_TCPSOCKBUF (2 * NFSRV_MAXDATA) + #define NFS4_CALLBACK_PROG 0x4E465343 /* "NFSC" */ #define NFS4_CALLBACK_PROG_VERSION 1 @@ -357,6 +353,21 @@ typedef enum { NFNON=0, NFREG=1, NFDIR=2, NFBLK=3, NFCHR=4, NFLNK=5, #define NFS_BITMAP_SET(B, I) (((uint32_t *)(B))[(I)/32] |= 1U<<((I)%32)) #define NFS_BITMAP_CLR(B, I) (((uint32_t *)(B))[(I)/32] &= ~(1U<<((I)%32))) #define NFS_BITMAP_ISSET(B, I) (((uint32_t *)(B))[(I)/32] & (1U<<((I)%32))) +#define NFS_BITMAP_COPY_ATTR(FROM, TO, WHICH, ATTR) \ + do { \ + if (NFS_BITMAP_ISSET(((FROM)->nva_bitmap), (NFS_FATTR_##WHICH))) { \ + (TO)->nva_##ATTR = (FROM)->nva_##ATTR; \ + NFS_BITMAP_SET(((TO)->nva_bitmap), (NFS_FATTR_##WHICH)); \ + } \ + } while (0) +#define NFS_BITMAP_COPY_TIME(FROM, TO, WHICH, ATTR) \ + do { \ + if (NFS_BITMAP_ISSET(((FROM)->nva_bitmap), (NFS_FATTR_TIME_##WHICH))) { \ + (TO)->nva_timesec[NFSTIME_##ATTR] = (FROM)->nva_timesec[NFSTIME_##ATTR]; \ + (TO)->nva_timensec[NFSTIME_##ATTR] = (FROM)->nva_timensec[NFSTIME_##ATTR]; \ + NFS_BITMAP_SET(((TO)->nva_bitmap), (NFS_FATTR_TIME_##WHICH)); \ + } \ + } while (0) #define NFS_BITMAP_ZERO(B, L) \ do { \ int __i; \ @@ -367,6 +378,7 @@ typedef enum { NFNON=0, NFREG=1, NFDIR=2, NFBLK=3, NFCHR=4, NFLNK=5, extern uint32_t nfs_fs_attr_bitmap[NFS_ATTR_BITMAP_LEN]; extern uint32_t nfs_object_attr_bitmap[NFS_ATTR_BITMAP_LEN]; extern uint32_t nfs_getattr_bitmap[NFS_ATTR_BITMAP_LEN]; +extern uint32_t nfs4_getattr_write_bitmap[NFS_ATTR_BITMAP_LEN]; #define NFS_CLEAR_ATTRIBUTES(A) NFS_BITMAP_ZERO((A), NFS_ATTR_BITMAP_LEN) #define NFS_COPY_ATTRIBUTES(SRC, DST) \ @@ -621,6 +633,21 @@ extern uint32_t nfs_getattr_bitmap[NFS_ATTR_BITMAP_LEN]; NFS_BITMAP_SET((A), NFS_FATTR_MOUNTED_ON_FILEID); \ } while (0) +/* + * NFSv4 WRITE RPCs contain partial GETATTR requests - only type, change, size, metadatatime and modifytime are requested. + * In such cases, we do not update the time stamp - but the requested attributes. + */ +#define NFS4_DEFAULT_WRITE_ATTRIBUTES(A) \ + do { \ + /* required: */ \ + NFS_BITMAP_SET((A), NFS_FATTR_TYPE); \ + NFS_BITMAP_SET((A), NFS_FATTR_CHANGE); \ + NFS_BITMAP_SET((A), NFS_FATTR_SIZE); \ + /* optional: */ \ + NFS_BITMAP_SET((A), NFS_FATTR_TIME_METADATA); \ + NFS_BITMAP_SET((A), NFS_FATTR_TIME_MODIFY); \ + } while (0) + /* attributes requested when we want to do a "statfs" */ #define NFS4_STATFS_ATTRIBUTES(A) \ do { \ diff --git a/bsd/nfs/rpcv2.h b/bsd/nfs/rpcv2.h index f23f98572..d518d563f 100644 --- a/bsd/nfs/rpcv2.h +++ b/bsd/nfs/rpcv2.h @@ -148,6 +148,7 @@ #define RPCMNT_NAMELEN 255 #define RPCMNT_PATHLEN 1024 #define RPCPROG_NFS 100003 +#define RPCPROG_STAT 100024 #define RPCPROG_RQUOTA 100011 #define RPCRQUOTA_VER 1 diff --git a/bsd/nfs/xdr_subs.h b/bsd/nfs/xdr_subs.h index 36e4dc929..1af6749cd 100644 --- a/bsd/nfs/xdr_subs.h +++ b/bsd/nfs/xdr_subs.h @@ -109,8 +109,8 @@ struct xdrbuf { union { struct { char * xbb_base; /* base address of buffer */ - uint32_t xbb_size; /* size of buffer */ - uint32_t xbb_len; /* length of data in buffer */ + size_t xbb_size; /* size of buffer */ + size_t xbb_len; /* length of data in buffer */ } xb_buffer; } xb_u; char * xb_ptr; /* pointer to current position */ @@ -134,10 +134,10 @@ void xb_free(void *); int xb_grow(struct xdrbuf *); void xb_set_cur_buf_len(struct xdrbuf *); char *xb_buffer_base(struct xdrbuf *); -int xb_advance(struct xdrbuf *, uint32_t); -int xb_offset(struct xdrbuf *); -int xb_seek(struct xdrbuf *, uint32_t); -int xb_add_bytes(struct xdrbuf *, const char *, uint32_t, int); +int xb_advance(struct xdrbuf *, size_t); +size_t xb_offset(struct xdrbuf *); +int xb_seek(struct xdrbuf *, size_t); +int xb_add_bytes(struct xdrbuf *, const char *, size_t, int); int xb_get_bytes(struct xdrbuf *, char *, uint32_t, int); #ifdef _NFS_XDR_SUBS_FUNCS_ @@ -221,9 +221,9 @@ xb_set_cur_buf_len(struct xdrbuf *xbp) * advance forward through existing data in xdrbuf */ int -xb_advance(struct xdrbuf *xbp, uint32_t len) +xb_advance(struct xdrbuf *xbp, size_t len) { - uint32_t tlen; + size_t tlen; while (len) { if (xbp->xb_left <= 0) { @@ -242,10 +242,10 @@ xb_advance(struct xdrbuf *xbp, uint32_t len) /* * Calculate the current offset in the XDR buffer. */ -int +size_t xb_offset(struct xdrbuf *xbp) { - uint32_t offset = 0; + size_t offset = 0; switch (xbp->xb_type) { case XDRBUF_BUFFER: @@ -262,7 +262,7 @@ xb_offset(struct xdrbuf *xbp) * Seek to the given offset in the existing data in the XDR buffer. */ int -xb_seek(struct xdrbuf *xbp, uint32_t offset) +xb_seek(struct xdrbuf *xbp, size_t offset) { switch (xbp->xb_type) { case XDRBUF_BUFFER: @@ -347,9 +347,9 @@ xb_grow(struct xdrbuf *xbp) * Add "count" bytes of opaque data pointed to by "buf" to the given XDR buffer. */ int -xb_add_bytes(struct xdrbuf *xbp, const char *buf, uint32_t count, int nopad) +xb_add_bytes(struct xdrbuf *xbp, const char *buf, size_t count, int nopad) { - uint32_t len, tlen; + size_t len, tlen; int error; len = nopad ? count : xdr_rndup(count); @@ -395,7 +395,7 @@ xb_add_bytes(struct xdrbuf *xbp, const char *buf, uint32_t count, int nopad) int xb_get_bytes(struct xdrbuf *xbp, char *buf, uint32_t count, int nopad) { - uint32_t len, tlen; + size_t len, tlen; len = nopad ? count : xdr_rndup(count); @@ -499,6 +499,7 @@ xb_get_bytes(struct xdrbuf *xbp, char *buf, uint32_t count, int nopad) #define xb_get_32(E, XB, LVAL) \ do { \ uint32_t __tmp; \ + (LVAL) = (typeof((LVAL))) 0; \ if (E) break; \ (E) = xb_get_bytes((XB), (char*)&__tmp, XDRWORD, 0); \ if (E) break; \ @@ -509,6 +510,7 @@ xb_get_bytes(struct xdrbuf *xbp, char *buf, uint32_t count, int nopad) #define xb_get_64(E, XB, LVAL) \ do { \ uint64_t __tmp; \ + (LVAL) = 0; \ if (E) break; \ (E) = xb_get_bytes((XB), (char*)&__tmp, 2 * XDRWORD, 0); \ if (E) break; \ diff --git a/bsd/pgo/profile_runtime_data.c b/bsd/pgo/profile_runtime_data.c index 3bfc33d96..f6c506055 100644 --- a/bsd/pgo/profile_runtime_data.c +++ b/bsd/pgo/profile_runtime_data.c @@ -1,5 +1,19 @@ +#include + /* * This tells compiler_rt not to include userspace-specific stuff writing * profile data to a file. */ int __llvm_profile_runtime = 0; + +/* compiler-rt requires this. It uses it to page-align + * certain things inside its buffers. + */ + +extern int getpagesize(void); + +int +getpagesize() +{ + return PAGE_SIZE; +} diff --git a/bsd/pthread/priority_private.h b/bsd/pthread/priority_private.h index dbfff7e54..a67a73996 100644 --- a/bsd/pthread/priority_private.h +++ b/bsd/pthread/priority_private.h @@ -93,33 +93,33 @@ */ typedef unsigned long pthread_priority_t; -#define _PTHREAD_PRIORITY_FLAGS_MASK 0xff000000 +#define _PTHREAD_PRIORITY_FLAGS_MASK 0xff000000u #define _PTHREAD_PRIORITY_FLAGS_SHIFT (24ull) -#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 -#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000 /* dispatch only */ -#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 /* dispatch only */ -#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 -#define _PTHREAD_PRIORITY_SCHED_PRI_MASK 0x0000ffff -#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 /* dispatch only */ -#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000 /* unused */ -#define _PTHREAD_PRIORITY_FALLBACK_FLAG 0x04000000 -#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 -#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 +#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000u +#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000u /* dispatch only */ +#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000u /* dispatch only */ +#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000u +#define _PTHREAD_PRIORITY_SCHED_PRI_MASK 0x0000ffffu +#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000u /* dispatch only */ +#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000u /* unused */ +#define _PTHREAD_PRIORITY_FALLBACK_FLAG 0x04000000u +#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000u +#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000u #define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG _PTHREAD_PRIORITY_FALLBACK_FLAG // compat -#define _PTHREAD_PRIORITY_ENCODING_MASK 0x00a00000 +#define _PTHREAD_PRIORITY_ENCODING_MASK 0x00a00000u #define _PTHREAD_PRIORITY_ENCODING_SHIFT (22ull) -#define _PTHREAD_PRIORITY_ENCODING_V0 0x00000000 -#define _PTHREAD_PRIORITY_ENCODING_V1 0x00400000 /* unused */ -#define _PTHREAD_PRIORITY_ENCODING_V2 0x00800000 /* unused */ -#define _PTHREAD_PRIORITY_ENCODING_V3 0x00a00000 /* unused */ +#define _PTHREAD_PRIORITY_ENCODING_V0 0x00000000u +#define _PTHREAD_PRIORITY_ENCODING_V1 0x00400000u /* unused */ +#define _PTHREAD_PRIORITY_ENCODING_V2 0x00800000u /* unused */ +#define _PTHREAD_PRIORITY_ENCODING_V3 0x00a00000u /* unused */ -#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x003fff00 -#define _PTHREAD_PRIORITY_VALID_QOS_CLASS_MASK 0x00003f00 +#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x003fff00u +#define _PTHREAD_PRIORITY_VALID_QOS_CLASS_MASK 0x00003f00u #define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull) -#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff +#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ffu #define _PTHREAD_PRIORITY_PRIORITY_SHIFT (0) #if PRIVATE @@ -134,6 +134,10 @@ typedef unsigned long pthread_priority_t; #include // THREAD_QOS_* #include +// pthread_priority_t's type is unfortunately 64bits on LP64 +// so we use this type for people who need to store it in structs +typedef unsigned int pthread_priority_compact_t; + __attribute__((always_inline, const)) static inline bool _pthread_priority_has_qos(pthread_priority_t pp) @@ -146,11 +150,11 @@ _pthread_priority_has_qos(pthread_priority_t pp) } __attribute__((always_inline, const)) -static inline pthread_priority_t +static inline pthread_priority_compact_t _pthread_priority_make_from_thread_qos(thread_qos_t qos, int relpri, unsigned long flags) { - pthread_priority_t pp = (flags & _PTHREAD_PRIORITY_FLAGS_MASK); + pthread_priority_compact_t pp = (flags & _PTHREAD_PRIORITY_FLAGS_MASK); if (qos && qos < THREAD_QOS_LAST) { pp |= (1 << (_PTHREAD_PRIORITY_QOS_CLASS_SHIFT + qos - 1)); pp |= ((uint8_t)relpri - 1) & _PTHREAD_PRIORITY_PRIORITY_MASK; @@ -159,21 +163,21 @@ _pthread_priority_make_from_thread_qos(thread_qos_t qos, int relpri, } __attribute__((always_inline, const)) -static inline pthread_priority_t +static inline pthread_priority_compact_t _pthread_event_manager_priority(void) { return _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; } __attribute__((always_inline, const)) -static inline pthread_priority_t +static inline pthread_priority_compact_t _pthread_unspecified_priority(void) { return _pthread_priority_make_from_thread_qos(THREAD_QOS_UNSPECIFIED, 0, 0); } __attribute__((always_inline, const)) -static inline pthread_priority_t +static inline pthread_priority_compact_t _pthread_default_priority(unsigned long flags) { return _pthread_priority_make_from_thread_qos(THREAD_QOS_LEGACY, 0, flags); @@ -218,7 +222,7 @@ _pthread_priority_relpri(pthread_priority_t pp) * Normalize and validate QoS/relpri */ __attribute__((const)) -pthread_priority_t +pthread_priority_compact_t _pthread_priority_normalize(pthread_priority_t pp); /* @@ -226,7 +230,7 @@ _pthread_priority_normalize(pthread_priority_t pp); * Normalize and validate QoS/relpri */ __attribute__((const)) -pthread_priority_t +pthread_priority_compact_t _pthread_priority_normalize_for_ipc(pthread_priority_t pp); /* @@ -234,7 +238,7 @@ _pthread_priority_normalize_for_ipc(pthread_priority_t pp); * of base_pp and _pthread_priority_make_from_thread_qos(qos, 0, 0) */ __attribute__((const)) -pthread_priority_t +pthread_priority_compact_t _pthread_priority_combine(pthread_priority_t base_pp, thread_qos_t qos); #endif // KERNEL diff --git a/bsd/pthread/pthread_priority.c b/bsd/pthread/pthread_priority.c index 05fef52c6..199c5db69 100644 --- a/bsd/pthread/pthread_priority.c +++ b/bsd/pthread/pthread_priority.c @@ -33,7 +33,7 @@ #define QOS_MIN_RELATIVE_PRIORITY -15 #endif -pthread_priority_t +pthread_priority_compact_t _pthread_priority_normalize(pthread_priority_t pp) { if (pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) { @@ -52,7 +52,7 @@ _pthread_priority_normalize(pthread_priority_t pp) return _pthread_unspecified_priority(); } -pthread_priority_t +pthread_priority_compact_t _pthread_priority_normalize_for_ipc(pthread_priority_t pp) { if (_pthread_priority_has_qos(pp)) { @@ -66,7 +66,7 @@ _pthread_priority_normalize_for_ipc(pthread_priority_t pp) return _pthread_unspecified_priority(); } -pthread_priority_t +pthread_priority_compact_t _pthread_priority_combine(pthread_priority_t base_pp, thread_qos_t qos) { if (base_pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) { @@ -75,10 +75,10 @@ _pthread_priority_combine(pthread_priority_t base_pp, thread_qos_t qos) if (base_pp & _PTHREAD_PRIORITY_FALLBACK_FLAG) { if (!qos) { - return base_pp; + return (pthread_priority_compact_t)base_pp; } } else if (qos < _pthread_priority_thread_qos(base_pp)) { - return base_pp; + return (pthread_priority_compact_t)base_pp; } return _pthread_priority_make_from_thread_qos(qos, 0, diff --git a/bsd/pthread/pthread_workqueue.c b/bsd/pthread/pthread_workqueue.c index 10e17d52e..bc4bd4812 100644 --- a/bsd/pthread/pthread_workqueue.c +++ b/bsd/pthread/pthread_workqueue.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2017 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -107,14 +107,15 @@ struct workq_usec_var { CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \ workq_sysctl_handle_usecs, "I", "") -static lck_grp_t *workq_lck_grp; -static lck_attr_t *workq_lck_attr; -static lck_grp_attr_t *workq_lck_grp_attr; +static LCK_GRP_DECLARE(workq_lck_grp, "workq"); os_refgrp_decl(static, workq_refgrp, "workq", NULL); +static ZONE_DECLARE(workq_zone_workqueue, "workq.wq", + sizeof(struct workqueue), ZC_NONE); +static ZONE_DECLARE(workq_zone_threadreq, "workq.threadreq", + sizeof(struct workq_threadreq_s), ZC_CACHING); + static struct mpsc_daemon_queue workq_deallocate_queue; -static zone_t workq_zone_workqueue; -static zone_t workq_zone_threadreq; WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS); WORKQ_SYSCTL_USECS(wq_reduce_pool_window, WQ_REDUCE_POOL_WINDOW_USECS); @@ -254,7 +255,7 @@ _wq_bucket(thread_qos_t qos) } #define WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(tha) \ - ((tha) >> WQ_THACTIVE_QOS_SHIFT) + ((thread_qos_t)((tha) >> WQ_THACTIVE_QOS_SHIFT)) static inline thread_qos_t _wq_thactive_best_constrained_req_qos(struct workqueue *wq) @@ -394,7 +395,7 @@ workq_lock_spin_is_acquired_kdp(struct workqueue *wq) static inline void workq_lock_spin(struct workqueue *wq) { - lck_spin_lock_grp(&wq->wq_lock, workq_lck_grp); + lck_spin_lock_grp(&wq->wq_lock, &workq_lck_grp); } static inline void @@ -406,7 +407,7 @@ workq_lock_held(__assert_only struct workqueue *wq) static inline bool workq_lock_try(struct workqueue *wq) { - return lck_spin_try_lock_grp(&wq->wq_lock, workq_lck_grp); + return lck_spin_try_lock_grp(&wq->wq_lock, &workq_lck_grp); } static inline void @@ -866,7 +867,7 @@ workq_add_new_idle_thread(proc_t p, struct workqueue *wq) wq->wq_creations++; wq->wq_thidlecount++; - uth->uu_workq_stackaddr = th_stackaddr; + uth->uu_workq_stackaddr = (user_addr_t)th_stackaddr; TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry); WQ_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0, 0); @@ -1073,7 +1074,7 @@ workq_priority_for_req(workq_threadreq_t req) return thread_workq_pri_for_qos(qos); } -static inline struct priority_queue * +static inline struct priority_queue_sched_max * workq_priority_queue_for_req(struct workqueue *wq, workq_threadreq_t req) { if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) { @@ -1104,9 +1105,12 @@ workq_threadreq_enqueue(struct workqueue *wq, workq_threadreq_t req) wq->wq_event_manager_threadreq = req; return true; } - if (priority_queue_insert(workq_priority_queue_for_req(wq, req), - &req->tr_entry, workq_priority_for_req(req), - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + + struct priority_queue_sched_max *q = workq_priority_queue_for_req(wq, req); + priority_queue_entry_set_sched_pri(q, &req->tr_entry, + workq_priority_for_req(req), false); + + if (priority_queue_insert(q, &req->tr_entry)) { if ((req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) { _wq_thactive_refresh_best_constrained_req_qos(wq); } @@ -1132,7 +1136,7 @@ workq_threadreq_dequeue(struct workqueue *wq, workq_threadreq_t req) return true; } if (priority_queue_remove(workq_priority_queue_for_req(wq, req), - &req->tr_entry, PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + &req->tr_entry)) { if ((req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) { _wq_thactive_refresh_best_constrained_req_qos(wq); } @@ -1195,12 +1199,12 @@ workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags) } else if (now - wq->wq_thread_call_last_run <= wq->wq_timer_interval) { wq->wq_timer_interval *= 2; if (wq->wq_timer_interval > wq_max_timer_interval.abstime) { - wq->wq_timer_interval = wq_max_timer_interval.abstime; + wq->wq_timer_interval = (uint32_t)wq_max_timer_interval.abstime; } } else if (now - wq->wq_thread_call_last_run > 2 * wq->wq_timer_interval) { wq->wq_timer_interval /= 2; if (wq->wq_timer_interval < wq_stalled_window.abstime) { - wq->wq_timer_interval = wq_stalled_window.abstime; + wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime; } } @@ -1442,7 +1446,7 @@ workq_deallocate_queue_invoke(mpsc_queue_chain_t e, turnstile_cleanup(); turnstile_deallocate(ts); - lck_spin_destroy(&wq->wq_lock, workq_lck_grp); + lck_spin_destroy(&wq->wq_lock, &workq_lck_grp); zfree(workq_zone_workqueue, wq); } @@ -1479,7 +1483,7 @@ workq_open(struct proc *p, __unused struct workq_open_args *uap, } if (wq_init_constrained_limit) { - uint32_t limit, num_cpus = ml_get_max_cpus(); + uint32_t limit, num_cpus = ml_wait_max_cpus(); /* * set up the limit for the constrained pool @@ -1524,7 +1528,7 @@ workq_open(struct proc *p, __unused struct workq_open_args *uap, thread_qos_t mgr_priority_hint = task_get_default_manager_qos(current_task()); pthread_priority_t pp = _pthread_priority_make_from_thread_qos(mgr_priority_hint, 0, 0); wq->wq_event_manager_priority = (uint32_t)pp; - wq->wq_timer_interval = wq_stalled_window.abstime; + wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime; wq->wq_proc = p; turnstile_prepare((uintptr_t)wq, &wq->wq_turnstile, turnstile_alloc(), TURNSTILE_WORKQS); @@ -1532,12 +1536,9 @@ workq_open(struct proc *p, __unused struct workq_open_args *uap, TAILQ_INIT(&wq->wq_thrunlist); TAILQ_INIT(&wq->wq_thnewlist); TAILQ_INIT(&wq->wq_thidlelist); - priority_queue_init(&wq->wq_overcommit_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); - priority_queue_init(&wq->wq_constrained_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); - priority_queue_init(&wq->wq_special_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); + priority_queue_init(&wq->wq_overcommit_queue); + priority_queue_init(&wq->wq_constrained_queue); + priority_queue_init(&wq->wq_special_queue); wq->wq_delayed_call = thread_call_allocate_with_options( workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL, @@ -1549,7 +1550,7 @@ workq_open(struct proc *p, __unused struct workq_open_args *uap, workq_kill_old_threads_call, wq, THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE); - lck_spin_init(&wq->wq_lock, workq_lck_grp, workq_lck_attr); + lck_spin_init(&wq->wq_lock, &workq_lck_grp, LCK_ATTR_NULL); WQ_TRACE_WQ(TRACE_wq_create | DBG_FUNC_NONE, wq, VM_KERNEL_ADDRHIDE(wq), 0, 0, 0); @@ -1618,15 +1619,15 @@ workq_mark_exiting(struct proc *p) * It is hence safe to do the tear down without holding any lock. */ priority_queue_destroy(&wq->wq_overcommit_queue, - struct workq_threadreq_s, tr_entry, ^(void *e){ + struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){ workq_threadreq_destroy(p, e); }); priority_queue_destroy(&wq->wq_constrained_queue, - struct workq_threadreq_s, tr_entry, ^(void *e){ + struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){ workq_threadreq_destroy(p, e); }); priority_queue_destroy(&wq->wq_special_queue, - struct workq_threadreq_s, tr_entry, ^(void *e){ + struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){ workq_threadreq_destroy(p, e); }); @@ -1799,7 +1800,7 @@ qos: } old_pri = new_pri = uth->uu_workq_pri; - new_pri.qos_req = new_policy.qos_tier; + new_pri.qos_req = (thread_qos_t)new_policy.qos_tier; workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, force_run); workq_unlock(wq); } @@ -2221,7 +2222,7 @@ workq_reqthreads(struct proc *p, uint32_t reqcount, pthread_priority_t pp) goto exiting; } - req->tr_count = reqcount; + req->tr_count = (uint16_t)reqcount; if (workq_threadreq_enqueue(wq, req)) { /* This can drop the workqueue lock, and take it again */ workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS); @@ -2352,7 +2353,7 @@ workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t req, WQ_TRACE_WQ(TRACE_wq_thread_request_modify | DBG_FUNC_NONE, wq, workq_trace_req_id(req), qos, 0, 0); - struct priority_queue *pq = workq_priority_queue_for_req(wq, req); + struct priority_queue_sched_max *pq = workq_priority_queue_for_req(wq, req); workq_threadreq_t req_max; /* @@ -2361,8 +2362,7 @@ workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t req, * If we dequeue the root item of the constrained priority queue, * maintain the best constrained request qos invariant. */ - if (priority_queue_remove(pq, &req->tr_entry, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + if (priority_queue_remove(pq, &req->tr_entry)) { if ((req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) { _wq_thactive_refresh_best_constrained_req_qos(wq); } @@ -2382,8 +2382,9 @@ workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t req, req_max = priority_queue_max(pq, struct workq_threadreq_s, tr_entry); if (req_max && req_max->tr_qos >= qos) { - priority_queue_insert(pq, &req->tr_entry, workq_priority_for_req(req), - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); + priority_queue_entry_set_sched_pri(pq, &req->tr_entry, + workq_priority_for_req(req), false); + priority_queue_insert(pq, &req->tr_entry); workq_unlock(wq); return; } @@ -2767,6 +2768,8 @@ workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth, setup_flags &= ~WQ_SETUP_CLEAR_VOUCHER; } + WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0, 0); + if (uth->uu_workq_flags & UT_WORKQ_RUNNING) { /* * While we'd dropped the lock to unset our voucher, someone came @@ -2774,7 +2777,6 @@ workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth, * event their thread_wakeup() was ineffectual. To correct for that, * we just run the continuation ourselves. */ - WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0, 0); workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags); __builtin_unreachable(); } @@ -2788,7 +2790,6 @@ workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth, thread_set_pending_block_hint(uth->uu_thread, kThreadWaitParkedWorkQueue); assert_wait(workq_parked_wait_event(uth), THREAD_INTERRUPTIBLE); workq_unlock(wq); - WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0, 0); thread_block(workq_unpark_continue); __builtin_unreachable(); } @@ -2895,15 +2896,10 @@ workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth, static workq_threadreq_t workq_threadreq_select_for_creator(struct workqueue *wq) { - workq_threadreq_t req_qos, req_pri, req_tmp; + workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr; thread_qos_t qos = THREAD_QOS_UNSPECIFIED; uint8_t pri = 0; - req_tmp = wq->wq_event_manager_threadreq; - if (req_tmp && workq_may_start_event_mgr_thread(wq, NULL)) { - return req_tmp; - } - /* * Compute the best priority request, and ignore the turnstile for now */ @@ -2911,7 +2907,27 @@ workq_threadreq_select_for_creator(struct workqueue *wq) req_pri = priority_queue_max(&wq->wq_special_queue, struct workq_threadreq_s, tr_entry); if (req_pri) { - pri = priority_queue_entry_key(&wq->wq_special_queue, &req_pri->tr_entry); + pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue, + &req_pri->tr_entry); + } + + /* + * Handle the manager thread request. The special queue might yield + * a higher priority, but the manager always beats the QoS world. + */ + + req_mgr = wq->wq_event_manager_threadreq; + if (req_mgr && workq_may_start_event_mgr_thread(wq, NULL)) { + uint32_t mgr_pri = wq->wq_event_manager_priority; + + if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) { + mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK; + } else { + mgr_pri = thread_workq_pri_for_qos( + _pthread_priority_thread_qos(mgr_pri)); + } + + return mgr_pri >= pri ? req_mgr : req_pri; } /* @@ -2971,7 +2987,7 @@ workq_threadreq_select_for_creator(struct workqueue *wq) static workq_threadreq_t workq_threadreq_select(struct workqueue *wq, struct uthread *uth) { - workq_threadreq_t req_qos, req_pri, req_tmp; + workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr; uintptr_t proprietor; thread_qos_t qos = THREAD_QOS_UNSPECIFIED; uint8_t pri = 0; @@ -2980,16 +2996,11 @@ workq_threadreq_select(struct workqueue *wq, struct uthread *uth) uth = NULL; } - req_tmp = wq->wq_event_manager_threadreq; - if (req_tmp && workq_may_start_event_mgr_thread(wq, uth)) { - return req_tmp; - } - /* * Compute the best priority request (special or turnstile) */ - pri = turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile, + pri = (uint8_t)turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile, &proprietor); if (pri) { struct kqworkloop *kqwl = (struct kqworkloop *)proprietor; @@ -3004,10 +3015,30 @@ workq_threadreq_select(struct workqueue *wq, struct uthread *uth) req_tmp = priority_queue_max(&wq->wq_special_queue, struct workq_threadreq_s, tr_entry); - if (req_tmp && pri < priority_queue_entry_key(&wq->wq_special_queue, + if (req_tmp && pri < priority_queue_entry_sched_pri(&wq->wq_special_queue, &req_tmp->tr_entry)) { req_pri = req_tmp; - pri = priority_queue_entry_key(&wq->wq_special_queue, &req_tmp->tr_entry); + pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue, + &req_tmp->tr_entry); + } + + /* + * Handle the manager thread request. The special queue might yield + * a higher priority, but the manager always beats the QoS world. + */ + + req_mgr = wq->wq_event_manager_threadreq; + if (req_mgr && workq_may_start_event_mgr_thread(wq, uth)) { + uint32_t mgr_pri = wq->wq_event_manager_priority; + + if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) { + mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK; + } else { + mgr_pri = thread_workq_pri_for_qos( + _pthread_priority_thread_qos(mgr_pri)); + } + + return mgr_pri >= pri ? req_mgr : req_pri; } /* @@ -3640,15 +3671,6 @@ workqueue_get_pwq_state_kdp(void * v) void workq_init(void) { - workq_lck_grp_attr = lck_grp_attr_alloc_init(); - workq_lck_attr = lck_attr_alloc_init(); - workq_lck_grp = lck_grp_alloc_init("workq", workq_lck_grp_attr); - - workq_zone_workqueue = zinit(sizeof(struct workqueue), - 1024 * sizeof(struct workqueue), 8192, "workq.wq"); - workq_zone_threadreq = zinit(sizeof(struct workq_threadreq_s), - 1024 * sizeof(struct workq_threadreq_s), 8192, "workq.threadreq"); - clock_interval_to_absolutetime_interval(wq_stalled_window.usecs, NSEC_PER_USEC, &wq_stalled_window.abstime); clock_interval_to_absolutetime_interval(wq_reduce_pool_window.usecs, diff --git a/bsd/pthread/workqueue_internal.h b/bsd/pthread/workqueue_internal.h index 67a0b9b2b..7d277572a 100644 --- a/bsd/pthread/workqueue_internal.h +++ b/bsd/pthread/workqueue_internal.h @@ -172,7 +172,7 @@ __options_decl(workq_tr_flags_t, uint8_t, { typedef struct workq_threadreq_s { union { - struct priority_queue_entry tr_entry; + struct priority_queue_entry_sched tr_entry; thread_t tr_thread; }; uint16_t tr_count; @@ -245,9 +245,9 @@ struct workqueue { struct workq_uthread_head wq_thnewlist; struct workq_uthread_head wq_thidlelist; - struct priority_queue wq_overcommit_queue; - struct priority_queue wq_constrained_queue; - struct priority_queue wq_special_queue; + struct priority_queue_sched_max wq_overcommit_queue; + struct priority_queue_sched_max wq_constrained_queue; + struct priority_queue_sched_max wq_special_queue; workq_threadreq_t wq_event_manager_threadreq; }; diff --git a/bsd/security/audit/audit.c b/bsd/security/audit/audit.c index 0a5f138ae..0a3217735 100644 --- a/bsd/security/audit/audit.c +++ b/bsd/security/audit/audit.c @@ -1,5 +1,5 @@ /*- - * Copyright (c) 1999-2019 Apple Inc. + * Copyright (c) 1999-2020 Apple Inc. * Copyright (c) 2006-2007 Robert N. M. Watson * All rights reserved. * @@ -74,7 +74,6 @@ #include #include -#include #include #include @@ -87,6 +86,7 @@ MALLOC_DEFINE(M_AUDITDATA, "audit_data", "Audit data storage"); MALLOC_DEFINE(M_AUDITPATH, "audit_path", "Audit path storage"); MALLOC_DEFINE(M_AUDITTEXT, "audit_text", "Audit text storage"); +KALLOC_HEAP_DEFINE(KHEAP_AUDIT, "Audit", KHEAP_ID_DEFAULT); /* * Audit control settings that are set/read by system calls and are hence @@ -180,7 +180,8 @@ struct cv audit_watermark_cv; */ static struct cv audit_fail_cv; -static zone_t audit_record_zone; +static ZONE_DECLARE(audit_record_zone, "audit_zone", + sizeof(struct kaudit_record), ZC_NONE); /* * Kernel audit information. This will store the current audit address @@ -329,7 +330,6 @@ audit_init(void) audit_kinfo.ai_termid.at_type = AU_IPv4; audit_kinfo.ai_termid.at_addr[0] = INADDR_ANY; - _audit_lck_grp_init(); mtx_init(&audit_mtx, "audit_mtx", NULL, MTX_DEF); KINFO_LOCK_INIT(); cv_init(&audit_worker_cv, "audit_worker_cv"); @@ -337,11 +337,6 @@ audit_init(void) cv_init(&audit_watermark_cv, "audit_watermark_cv"); cv_init(&audit_fail_cv, "audit_fail_cv"); - audit_record_zone = zinit(sizeof(struct kaudit_record), - AQ_HIWATER * sizeof(struct kaudit_record), 8192, "audit_zone"); -#if CONFIG_MACF - audit_mac_init(); -#endif /* Init audit session subsystem. */ audit_session_init(); @@ -955,6 +950,9 @@ audit_proc_coredump(proc_t proc, char *path, int errcode) */ uthread = curthread(); ar = audit_new(AUE_CORE, proc, uthread); + if (ar == NULL) { + return; + } if (path != NULL) { pathp = &ar->k_ar.ar_arg_upath1; *pathp = malloc(MAXPATHLEN, M_AUDITPATH, M_WAITOK); diff --git a/bsd/security/audit/audit.h b/bsd/security/audit/audit.h index 903099074..c0590de2f 100644 --- a/bsd/security/audit/audit.h +++ b/bsd/security/audit/audit.h @@ -177,7 +177,7 @@ void audit_subcall_enter(au_event_t event, void audit_subcall_exit(int error, struct uthread *uthread); -extern struct auditinfo_addr *audit_default_aia_p; +extern struct auditinfo_addr * const audit_default_aia_p; /* * The remaining kernel functions are conditionally compiled in as they are @@ -247,9 +247,9 @@ void audit_arg_auditon(struct kaudit_record *ar, void audit_arg_file(struct kaudit_record *ar, struct proc *p, struct fileproc *fp); void audit_arg_argv(struct kaudit_record *ar, char *argv, int argc, - int length); + size_t length); void audit_arg_envv(struct kaudit_record *ar, char *envv, int envc, - int length); + size_t length); void audit_arg_identity(struct kaudit_record *ar); void audit_arg_mach_port1(struct kaudit_record *ar, mach_port_name_t port); diff --git a/bsd/security/audit/audit_arg.c b/bsd/security/audit/audit_arg.c index f667243a1..472b15372 100644 --- a/bsd/security/audit/audit_arg.c +++ b/bsd/security/audit/audit_arg.c @@ -75,7 +75,6 @@ #include #include -#include #include #include @@ -430,7 +429,7 @@ audit_arg_text(struct kaudit_record *ar, char *text) M_WAITOK); } - strncpy(ar->k_ar.ar_arg_text, text, MAXPATHLEN); + strlcpy(ar->k_ar.ar_arg_text, text, MAXPATHLEN); ARG_SET_VALID(ar, ARG_TEXT); } @@ -576,15 +575,15 @@ audit_arg_file(struct kaudit_record *ar, __unused proc_t p, struct sockaddr_in *sin; struct sockaddr_in6 *sin6; - switch (FILEGLOB_DTYPE(fp->f_fglob)) { + switch (FILEGLOB_DTYPE(fp->fp_glob)) { case DTYPE_VNODE: /* case DTYPE_FIFO: */ audit_arg_vnpath_withref(ar, - (struct vnode *)fp->f_fglob->fg_data, ARG_VNODE1); + (struct vnode *)fp->fp_glob->fg_data, ARG_VNODE1); break; case DTYPE_SOCKET: - so = (struct socket *)fp->f_fglob->fg_data; + so = (struct socket *)fp->fp_glob->fg_data; if (SOCK_CHECK_DOM(so, PF_INET)) { if (so->so_pcb == NULL) { break; @@ -858,7 +857,7 @@ audit_arg_mach_port2(struct kaudit_record *ar, mach_port_name_t port) * Audit the argument strings passed to exec. */ void -audit_arg_argv(struct kaudit_record *ar, char *argv, int argc, int length) +audit_arg_argv(struct kaudit_record *ar, char *argv, int argc, size_t length) { if (audit_argv == 0 || argc == 0) { return; @@ -876,7 +875,7 @@ audit_arg_argv(struct kaudit_record *ar, char *argv, int argc, int length) * Audit the environment strings passed to exec. */ void -audit_arg_envv(struct kaudit_record *ar, char *envv, int envc, int length) +audit_arg_envv(struct kaudit_record *ar, char *envv, int envc, size_t length) { if (audit_arge == 0 || envc == 0) { return; @@ -909,7 +908,7 @@ audit_sysclose(struct kaudit_record *ar, proc_t p, int fd) return; } - audit_arg_vnpath_withref(ar, (struct vnode *)fp->f_fglob->fg_data, + audit_arg_vnpath_withref(ar, (struct vnode *)fp->fp_glob->fg_data, ARG_VNODE1); fp_drop(p, fd, fp, 0); } diff --git a/bsd/security/audit/audit_bsd.c b/bsd/security/audit/audit_bsd.c index 5cfc16778..2f3adbace 100644 --- a/bsd/security/audit/audit_bsd.c +++ b/bsd/security/audit/audit_bsd.c @@ -50,6 +50,7 @@ #include #include #include +#include #include @@ -66,7 +67,7 @@ struct mhdr { /* * The lock group for the audit subsystem. */ -static lck_grp_t *audit_lck_grp = NULL; +static LCK_GRP_DECLARE(audit_lck_grp, "Audit"); #define AUDIT_MHMAGIC 0x4D656C53 @@ -134,11 +135,10 @@ audit_sysctl_malloc_debug(__unused struct sysctl_oid *oidp, __unused void *arg1, if (req->oldlen < AU_MALLOC_DBINFO_SZ) { return ENOMEM; } - amdi_ptr = (au_malloc_debug_info_t *)kalloc(AU_MALLOC_DBINFO_SZ); + amdi_ptr = kheap_alloc(KHEAP_TEMP, AU_MALLOC_DBINFO_SZ, Z_WAITOK | Z_ZERO); if (amdi_ptr == NULL) { return ENOMEM; } - bzero(amdi_ptr, AU_MALLOC_DBINFO_SZ); /* * Build the record array. @@ -168,7 +168,7 @@ audit_sysctl_malloc_debug(__unused struct sysctl_oid *oidp, __unused void *arg1, req->oldlen = sz; err = SYSCTL_OUT(req, amdi_ptr, sz); - kfree(amdi_ptr, AU_MALLOC_DBINFO_SZ); + kheap_free(KHEAP_TEMP, amdi_ptr, AU_MALLOC_DBINFO_SZ); return err; } @@ -196,23 +196,13 @@ _audit_malloc(size_t size, au_malloc_type_t * type, int flags) if (size == 0) { return NULL; } - if (flags & M_NOWAIT) { - hdr = (void *)kalloc_noblock(memsize); - } else { - hdr = (void *)kalloc(memsize); - if (hdr == NULL) { - panic("_audit_malloc: kernel memory exhausted"); - } - } + hdr = kheap_alloc(KHEAP_AUDIT, memsize, flags); if (hdr == NULL) { return NULL; } hdr->mh_size = memsize; hdr->mh_type = type; hdr->mh_magic = AUDIT_MHMAGIC; - if (flags & M_ZERO) { - memset(hdr->mh_data, 0, size); - } #if AUDIT_MALLOC_DEBUG if (type != NULL && type->mt_type < NUM_MALLOC_TYPES) { OSAddAtomic64(memsize, &type->mt_size); @@ -253,7 +243,7 @@ _audit_free(void *addr, __unused au_malloc_type_t *type) OSAddAtomic(-1, &type->mt_inuse); } #endif /* AUDIT_MALLOC_DEBUG */ - kfree(hdr, hdr->mh_size); + kheap_free(KHEAP_AUDIT, hdr, hdr->mh_size); } /* @@ -339,7 +329,7 @@ _audit_mtx_init(struct mtx *mp, const char *lckname) _audit_mtx_init(struct mtx *mp, __unused const char *lckname) #endif { - mp->mtx_lock = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL); + mp->mtx_lock = lck_mtx_alloc_init(&audit_lck_grp, LCK_ATTR_NULL); KASSERT(mp->mtx_lock != NULL, ("_audit_mtx_init: Could not allocate a mutex.")); #if DIAGNOSTIC @@ -351,7 +341,7 @@ void _audit_mtx_destroy(struct mtx *mp) { if (mp->mtx_lock) { - lck_mtx_free(mp->mtx_lock, audit_lck_grp); + lck_mtx_free(mp->mtx_lock, &audit_lck_grp); mp->mtx_lock = NULL; } } @@ -366,7 +356,7 @@ _audit_rw_init(struct rwlock *lp, const char *lckname) _audit_rw_init(struct rwlock *lp, __unused const char *lckname) #endif { - lp->rw_lock = lck_rw_alloc_init(audit_lck_grp, LCK_ATTR_NULL); + lp->rw_lock = lck_rw_alloc_init(&audit_lck_grp, LCK_ATTR_NULL); KASSERT(lp->rw_lock != NULL, ("_audit_rw_init: Could not allocate a rw lock.")); #if DIAGNOSTIC @@ -378,7 +368,7 @@ void _audit_rw_destroy(struct rwlock *lp) { if (lp->rw_lock) { - lck_rw_free(lp->rw_lock, audit_lck_grp); + lck_rw_free(lp->rw_lock, &audit_lck_grp); lp->rw_lock = NULL; } } @@ -414,7 +404,7 @@ _audit_rlck_init(struct rlck *lp, const char *lckname) _audit_rlck_init(struct rlck *lp, __unused const char *lckname) #endif { - lp->rl_mtx = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL); + lp->rl_mtx = lck_mtx_alloc_init(&audit_lck_grp, LCK_ATTR_NULL); KASSERT(lp->rl_mtx != NULL, ("_audit_rlck_init: Could not allocate a recursive lock.")); #if DIAGNOSTIC @@ -461,7 +451,7 @@ void _audit_rlck_destroy(struct rlck *lp) { if (lp->rl_mtx) { - lck_mtx_free(lp->rl_mtx, audit_lck_grp); + lck_mtx_free(lp->rl_mtx, &audit_lck_grp); lp->rl_mtx = NULL; } } @@ -494,7 +484,7 @@ _audit_slck_init(struct slck *lp, const char *lckname) _audit_slck_init(struct slck *lp, __unused const char *lckname) #endif { - lp->sl_mtx = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL); + lp->sl_mtx = lck_mtx_alloc_init(&audit_lck_grp, LCK_ATTR_NULL); KASSERT(lp->sl_mtx != NULL, ("_audit_slck_init: Could not allocate a sleep lock.")); #if DIAGNOSTIC @@ -580,7 +570,7 @@ void _audit_slck_destroy(struct slck *lp) { if (lp->sl_mtx) { - lck_mtx_free(lp->sl_mtx, audit_lck_grp); + lck_mtx_free(lp->sl_mtx, &audit_lck_grp); lp->sl_mtx = NULL; } } @@ -641,32 +631,38 @@ _audit_ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) return rv; } -/* - * Initialize lock group for audit related locks/mutexes. - */ -void -_audit_lck_grp_init(void) +int +audit_send_trigger(unsigned int trigger) { - audit_lck_grp = lck_grp_alloc_init("Audit", LCK_GRP_ATTR_NULL); + mach_port_t audit_port; + int error; - KASSERT(audit_lck_grp != NULL, - ("audit_get_lck_grp: Could not allocate the audit lock group.")); + error = host_get_audit_control_port(host_priv_self(), &audit_port); + if (error == KERN_SUCCESS && audit_port != MACH_PORT_NULL) { + (void)audit_triggers(audit_port, trigger); + ipc_port_release_send(audit_port); + return 0; + } else { + printf("Cannot get audit control port\n"); + return error; + } } int -audit_send_trigger(unsigned int trigger) +audit_send_analytics(char* signing_id, char* process_name) { mach_port_t audit_port; int error; error = host_get_audit_control_port(host_priv_self(), &audit_port); if (error == KERN_SUCCESS && audit_port != MACH_PORT_NULL) { - (void)audit_triggers(audit_port, trigger); + (void)audit_analytics(audit_port, signing_id, process_name); ipc_port_release_send(audit_port); return 0; } else { - printf("Cannot get audit control port\n"); + printf("Cannot get audit control port for analytics \n"); return error; } } + #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_bsd.h b/bsd/security/audit/audit_bsd.h index 74425201c..b59642628 100644 --- a/bsd/security/audit/audit_bsd.h +++ b/bsd/security/audit/audit_bsd.h @@ -34,6 +34,7 @@ #include #if defined(_KERNEL) || defined(KERNEL) +#include #if DIAGNOSTIC #ifdef KASSERT @@ -312,11 +313,6 @@ void _audit_rw_destroy(struct rwlock *lp); #define slck_assert(lp, wht) #endif /* DIAGNOSTIC */ -/* - * Synchronization initialization. - */ -void _audit_lck_grp_init(void); - /* * BSD (IPv6) event rate limiter. */ diff --git a/bsd/security/audit/audit_bsm.c b/bsd/security/audit/audit_bsm.c index 18e98c0f5..f4c7a2856 100644 --- a/bsd/security/audit/audit_bsm.c +++ b/bsd/security/audit/audit_bsm.c @@ -1,6 +1,5 @@ /* - * Copyright (c) 1999-2016 Apple Inc. - * All rights reserved. + * Copyright (c) 1999-2020 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -390,7 +389,7 @@ audit_sys_auditon(struct audit_record *ar, struct au_record *rec) kau_write(rec, tok); break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case A_SETPOLICY: tok = au_to_arg32(3, "length", ar->ar_arg_len); kau_write(rec, tok); @@ -430,7 +429,7 @@ audit_sys_auditon(struct audit_record *ar, struct au_record *rec) kau_write(rec, tok); break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case A_SETQCTRL: tok = au_to_arg32(3, "length", ar->ar_arg_len); kau_write(rec, tok); @@ -482,7 +481,7 @@ audit_sys_auditon(struct audit_record *ar, struct au_record *rec) kau_write(rec, tok); break; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case A_SETCOND: tok = au_to_arg32(3, "length", ar->ar_arg_len); kau_write(rec, tok); @@ -670,7 +669,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg32(2, "sd", ar->ar_arg_value32); kau_write(rec, tok); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case AUE_ACCEPT: case AUE_BIND: case AUE_LISTEN: @@ -792,15 +791,18 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg32(1, "setaudit_addr:port", ar->ar_arg_termid_addr.at_port); kau_write(rec, tok); - if (ar->ar_arg_termid_addr.at_type == AU_IPv6) { + switch (ar->ar_arg_termid_addr.at_type) { + case AU_IPv6: tok = au_to_in_addr_ex((struct in6_addr *) &ar->ar_arg_termid_addr.at_addr[0]); - } - if (ar->ar_arg_termid_addr.at_type == AU_IPv4) { + kau_write(rec, tok); + break; + case AU_IPv4: tok = au_to_in_addr((struct in_addr *) &ar->ar_arg_termid_addr.at_addr[0]); + kau_write(rec, tok); + break; } - kau_write(rec, tok); } break; @@ -812,7 +814,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg32(1, "cmd", ar->ar_arg_cmd); kau_write(rec, tok); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case AUE_AUDITON_GETCAR: case AUE_AUDITON_GETCLASS: @@ -989,7 +991,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg32(0, "child PID", ar->ar_arg_pid); kau_write(rec, tok); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case AUE_EXECVE: if (ARG_IS_VALID(kar, ARG_ARGV)) { @@ -1053,9 +1055,11 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) case AUE_READ: case AUE_READV: case AUE_PREAD: + case AUE_PREADV: case AUE_WRITE: case AUE_WRITEV: case AUE_PWRITE: + case AUE_PWRITEV: FD_VNODE1_TOKENS; break; @@ -1267,7 +1271,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) #if CONFIG_MACF case AUE_MAC_MOUNT: PROCESS_MAC_TOKENS; - /* FALLTHROUGH */ + OS_FALLTHROUGH; #endif case AUE_MOUNT: /* XXX Need to handle NFS mounts */ @@ -1279,7 +1283,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_text(ar->ar_arg_text); kau_write(rec, tok); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case AUE_UMOUNT: case AUE_UNMOUNT: @@ -1302,7 +1306,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) case AUE_MSGCTL: ar->ar_event = audit_msgctl_to_event(ar->ar_arg_svipc_cmd); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case AUE_MSGRCV: case AUE_MSGSND: @@ -1534,7 +1538,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) case AUE_SEMCTL: ar->ar_event = audit_semctl_to_event(ar->ar_arg_svipc_cmd); - /* FALLTHROUGH */ + OS_FALLTHROUGH; case AUE_SEMOP: if (ARG_IS_VALID(kar, ARG_SVIPC_ID)) { @@ -1727,7 +1731,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg32(3, "mode", ar->ar_arg_mode); kau_write(rec, tok); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case AUE_SHMUNLINK: if (ARG_IS_VALID(kar, ARG_TEXT)) { @@ -1762,7 +1766,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg32(4, "value", ar->ar_arg_value32); kau_write(rec, tok); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case AUE_SEMUNLINK: if (ARG_IS_VALID(kar, ARG_TEXT)) { @@ -1825,7 +1829,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) ar->ar_arg_opq_size); kau_write(rec, tok); } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case AUE_UMASK: if (ARG_IS_VALID(kar, ARG_MASK)) { diff --git a/bsd/security/audit/audit_mac.c b/bsd/security/audit/audit_mac.c index 705dad083..18567474f 100644 --- a/bsd/security/audit/audit_mac.c +++ b/bsd/security/audit/audit_mac.c @@ -1,5 +1,5 @@ /*- - * Copyright (c) 1999-2008 Apple Inc. + * Copyright (c) 1999-2020 Apple Inc. * All rights reserved. * * @APPLE_BSD_LICENSE_HEADER_START@ @@ -55,7 +55,6 @@ #include #include -#include #include #include @@ -69,18 +68,8 @@ #define MAC_ARG_PREFIX "arg: " #define MAC_ARG_PREFIX_LEN 5 -zone_t audit_mac_label_zone; -extern zone_t mac_audit_data_zone; - -void -audit_mac_init(void) -{ - /* Assume 3 MAC labels for each audit record: two for vnodes, - * one for creds. - */ - audit_mac_label_zone = zinit(MAC_AUDIT_LABEL_LEN, - AQ_HIWATER * 3 * MAC_AUDIT_LABEL_LEN, 8192, "audit_mac_label_zone"); -} +ZONE_DECLARE(audit_mac_label_zone, "audit_mac_label_zone", + MAC_AUDIT_LABEL_LEN, ZC_NONE); int audit_mac_new(proc_t p, struct kaudit_record *ar) @@ -102,7 +91,7 @@ audit_mac_new(proc_t p, struct kaudit_record *ar) * grab space for the reconds. */ ar->k_ar.ar_mac_records = (struct mac_audit_record_list_t *) - kalloc(sizeof(*ar->k_ar.ar_mac_records)); + kheap_alloc(KHEAP_AUDIT, sizeof(*ar->k_ar.ar_mac_records), Z_WAITOK); if (ar->k_ar.ar_mac_records == NULL) { zfree(audit_mac_label_zone, ar->k_ar.ar_cred_mac_labels); return 1; @@ -128,7 +117,7 @@ audit_mac_free(struct kaudit_record *ar) zfree(audit_mac_label_zone, ar->k_ar.ar_cred_mac_labels); } if (ar->k_ar.ar_arg_mac_string != NULL) { - kfree(ar->k_ar.ar_arg_mac_string, + kheap_free(KHEAP_AUDIT, ar->k_ar.ar_arg_mac_string, MAC_MAX_LABEL_BUF_LEN + MAC_ARG_PREFIX_LEN); } @@ -139,10 +128,11 @@ audit_mac_free(struct kaudit_record *ar) while (head != NULL) { next = LIST_NEXT(head, records); zfree(mac_audit_data_zone, head->data); - kfree(head, sizeof(*head)); + kheap_free(KHEAP_AUDIT, head, sizeof(*head)); head = next; } - kfree(ar->k_ar.ar_mac_records, sizeof(*ar->k_ar.ar_mac_records)); + kheap_free(KHEAP_AUDIT, ar->k_ar.ar_mac_records, + sizeof(*ar->k_ar.ar_mac_records)); } int @@ -155,8 +145,9 @@ audit_mac_syscall_enter(unsigned short code, proc_t p, struct uthread *uthread, (void *)uthread->uu_arg); if (error == MAC_AUDIT_YES) { uthread->uu_ar = audit_new(event, p, uthread); - uthread->uu_ar->k_ar.ar_forced_by_mac = 1; - au_to_text("Forced by a MAC policy"); + if (uthread->uu_ar) { + uthread->uu_ar->k_ar.ar_forced_by_mac = 1; + } return 1; } else if (error == MAC_AUDIT_NO) { return 0; @@ -208,13 +199,13 @@ audit_mac_data(int type, int len, u_char *data) struct mac_audit_record *record; if (audit_enabled == 0) { - kfree(data, len); + zfree(mac_audit_data_zone, data); return ENOTSUP; } cur = currecord(); if (cur == NULL) { - kfree(data, len); + zfree(mac_audit_data_zone, data); return ENOTSUP; } @@ -223,9 +214,9 @@ audit_mac_data(int type, int len, u_char *data) * allocation fails - this is consistent with the rest of the * audit implementation. */ - record = kalloc(sizeof(*record)); + record = kheap_alloc(KHEAP_AUDIT, sizeof(*record), Z_WAITOK); if (record == NULL) { - kfree(data, len); + zfree(mac_audit_data_zone, data); return 0; } @@ -241,12 +232,12 @@ void audit_arg_mac_string(struct kaudit_record *ar, char *string) { if (ar->k_ar.ar_arg_mac_string == NULL) { - ar->k_ar.ar_arg_mac_string = - kalloc(MAC_MAX_LABEL_BUF_LEN + MAC_ARG_PREFIX_LEN); + ar->k_ar.ar_arg_mac_string = kheap_alloc(KHEAP_AUDIT, + MAC_MAX_LABEL_BUF_LEN + MAC_ARG_PREFIX_LEN, Z_WAITOK); } /* - * XXX This should be a rare event. If kalloc() returns NULL, + * XXX This should be a rare event. If kheap_alloc() returns NULL, * the system is low on kernel virtual memory. To be * consistent with the rest of audit, just return * (may need to panic if required to for audit). @@ -257,9 +248,9 @@ audit_arg_mac_string(struct kaudit_record *ar, char *string) } } - strncpy(ar->k_ar.ar_arg_mac_string, MAC_ARG_PREFIX, + strlcpy(ar->k_ar.ar_arg_mac_string, MAC_ARG_PREFIX, MAC_ARG_PREFIX_LEN); - strncpy(ar->k_ar.ar_arg_mac_string + MAC_ARG_PREFIX_LEN, string, + strlcpy(ar->k_ar.ar_arg_mac_string + MAC_ARG_PREFIX_LEN, string, MAC_MAX_LABEL_BUF_LEN); ARG_SET_VALID(ar, ARG_MAC_STRING); } diff --git a/bsd/security/audit/audit_pipe.c b/bsd/security/audit/audit_pipe.c index 6b096473f..82a5472be 100644 --- a/bsd/security/audit/audit_pipe.c +++ b/bsd/security/audit/audit_pipe.c @@ -228,7 +228,7 @@ static read_write_fcn_t audit_pipe_read; static ioctl_fcn_t audit_pipe_ioctl; static select_fcn_t audit_pipe_poll; -static struct cdevsw audit_pipe_cdevsw = { +static const struct cdevsw audit_pipe_cdevsw = { .d_open = audit_pipe_open, .d_close = audit_pipe_close, .d_read = audit_pipe_read, diff --git a/bsd/security/audit/audit_private.h b/bsd/security/audit/audit_private.h index e9e2e6dec..738a871d3 100644 --- a/bsd/security/audit/audit_private.h +++ b/bsd/security/audit/audit_private.h @@ -1,5 +1,5 @@ /*- - * Copyright (c) 1999-2016 Apple Inc. + * Copyright (c) 1999-2020 Apple Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -59,6 +59,15 @@ MALLOC_DECLARE(M_AUDITDATA); MALLOC_DECLARE(M_AUDITPATH); MALLOC_DECLARE(M_AUDITTEXT); #endif +KALLOC_HEAP_DECLARE(KHEAP_AUDIT); +#if CONFIG_AUDIT +/* + * mac_audit_data_zone is the zone used for data pushed into the audit + * record by policies. Using a zone simplifies memory management of this + * data, and allows tracking of the amount of data in flight. + */ +extern zone_t mac_audit_data_zone; +#endif /* * Audit control variables that are usually set/read via system calls and @@ -421,6 +430,7 @@ au_event_t audit_fcntl_command_event(int cmd, int oflags, int error); * asynchronously. */ int audit_send_trigger(unsigned int trigger); +int audit_send_analytics(char* id, char* name); /* * Accessor functions to manage global audit state. @@ -455,7 +465,6 @@ void audit_pipe_submit_user(void *record, u_int record_len); /* * Audit MAC prototypes. */ -void audit_mac_init(void); int audit_mac_new(proc_t p, struct kaudit_record *ar); void audit_mac_free(struct kaudit_record *ar); int audit_mac_syscall_enter(unsigned short code, proc_t p, diff --git a/bsd/security/audit/audit_session.c b/bsd/security/audit/audit_session.c index fed263471..80290b43c 100644 --- a/bsd/security/audit/audit_session.c +++ b/bsd/security/audit/audit_session.c @@ -99,7 +99,7 @@ static au_sentry_t audit_default_se = { .se_procnt = 1, }; -struct auditinfo_addr *audit_default_aia_p = &audit_default_se.se_auinfo; +struct auditinfo_addr * const audit_default_aia_p = &audit_default_se.se_auinfo; /* Copied from */ #define IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND 0x1 @@ -337,7 +337,7 @@ static read_write_fcn_t audit_sdev_read; static ioctl_fcn_t audit_sdev_ioctl; static select_fcn_t audit_sdev_poll; -static struct cdevsw audit_sdev_cdevsw = { +static const struct cdevsw audit_sdev_cdevsw = { .d_open = audit_sdev_open, .d_close = audit_sdev_close, .d_read = audit_sdev_read, @@ -526,13 +526,12 @@ audit_sysctl_session_debug(__unused struct sysctl_oid *oidp, * We hold the lock over the alloc since we don't want the table to * grow on us. Therefore, use the non-blocking version of kalloc(). */ - sed_tab = (au_sentry_debug_t *)kalloc_noblock(entry_cnt * - sizeof(au_sentry_debug_t)); + sed_tab = (au_sentry_debug_t *)kheap_alloc(KHEAP_TEMP, + entry_cnt * sizeof(au_sentry_debug_t), Z_NOWAIT | Z_ZERO); if (sed_tab == NULL) { AUDIT_SENTRY_RUNLOCK(); return ENOMEM; } - bzero(sed_tab, entry_cnt * sizeof(au_sentry_debug_t)); /* * Walk the audit session hash table and build the record array. @@ -566,7 +565,7 @@ audit_sysctl_session_debug(__unused struct sysctl_oid *oidp, req->oldlen = sz; err = SYSCTL_OUT(req, sed_tab, sz); - kfree(sed_tab, entry_cnt * sizeof(au_sentry_debug_t)); + kheap_free(KHEAP_TEMP, sed_tab, entry_cnt * sizeof(au_sentry_debug_t)); return err; } diff --git a/bsd/security/audit/audit_syscalls.c b/bsd/security/audit/audit_syscalls.c index 4db4b53f7..74ca12d14 100644 --- a/bsd/security/audit/audit_syscalls.c +++ b/bsd/security/audit/audit_syscalls.c @@ -71,8 +71,6 @@ #include #include -#include -#include #include #if CONFIG_MACF @@ -169,6 +167,9 @@ audit(proc_t p, struct audit_args *uap, __unused int32_t *retval) }; token_t *id_tok = NULL; boolean_t kern_events_allowed = FALSE; + char *signing_id = NULL; + char process_name[MAXCOMLEN + 1] = {}; + int signer_type = 0; error = suser(kauth_cred_get(), &p->p_acflag); if (error) { @@ -278,6 +279,9 @@ audit(proc_t p, struct audit_args *uap, __unused int32_t *retval) goto free_out; } + signing_id = id_info.signing_id; + signer_type = id_info.signer_type; + /* Copy the original buffer up to but not including the trailer */ memcpy(full_rec, rec, uap->length - AUDIT_TRAILER_SIZE); bytes_copied = uap->length - AUDIT_TRAILER_SIZE; @@ -327,6 +331,12 @@ audit(proc_t p, struct audit_args *uap, __unused int32_t *retval) */ ar->k_ar_commit |= (AR_PRESELECT_USER_TRAIL | AR_PRESELECT_USER_PIPE); + // Send data for analytics for non-platform binaries only + if (signer_type == 0 && add_identity_token) { + proc_name(proc_pid(p), process_name, sizeof(process_name)); + (void)audit_send_analytics(signing_id, process_name); + } + free_out: /* * If rec was allocated, it must be freed if an identity token was added diff --git a/bsd/sys/Makefile b/bsd/sys/Makefile index b813428ca..905fad159 100644 --- a/bsd/sys/Makefile +++ b/bsd/sys/Makefile @@ -31,14 +31,14 @@ DATAFILES = \ timeb.h times.h tprintf.h trace.h tty.h ttychars.h ttycom.h \ ttydefaults.h ttydev.h types.h ubc.h ucontext.h ucred.h uio.h un.h unistd.h unpcb.h \ user.h utfconv.h utsname.h vadvise.h vcmd.h \ - vm.h vmmeter.h vmparam.h vnioctl.h vnode.h vnode_if.h vstat.h wait.h xattr.h \ + vm.h vmmeter.h vmparam.h vnioctl.h vnode.h vnode_if.h vsock.h vstat.h wait.h xattr.h \ _select.h _structs.h _types.h _endian.h domain.h protosw.h \ spawn.h timex.h commpage.h log_data.h # Installs header file for DriverKit drivers - # $(DSTROOT)/System/DriverKit/System/usr/include/ DRIVERKIT_DATAFILES = \ - cdefs.h _types.h + cdefs.h _types.h _endian.h # Installs header file for Apple internal use in user level - # $(DSTROOT)/System/Library/Frameworks/System.framework/PrivateHeaders @@ -98,6 +98,7 @@ PRIVATE_DATAFILES = \ ulock.h \ unpcb.h \ ux_exception.h \ + vsock.h \ work_interval.h \ process_policy.h \ proc_uuid_policy.h \ @@ -128,7 +129,7 @@ KERNELFILES = \ select.h signal.h socket.h socketvar.h sockio.h stat.h stdio.h \ sysctl.h syslimits.h syslog.h systm.h sys_domain.h time.h \ types.h ubc.h ucontext.h ucred.h uio.h un.h unistd.h unpcb.h \ - utfconv.h vm.h vmparam.h vnode.h vnode_if.h xattr.h \ + utfconv.h unicode.h vm.h vmparam.h vnode.h vnode_if.h vsock.h xattr.h \ _select.h _structs.h _types.h _endian.h protosw.h domain.h \ kpi_mbuf.h kpi_socket.h kpi_socketfilter.h \ ttycom.h termios.h msg.h \ @@ -157,6 +158,7 @@ PRIVATE_KERNELFILES = \ kern_memorystatus_notify.h \ kpi_private.h \ ktrace.h \ + linker_set.h \ mach_swapon.h \ monotonic.h \ msgbuf.h \ @@ -184,17 +186,18 @@ PRIVATE_KERNELFILES = \ fsevents.h \ work_interval.h \ kern_sysctl.h \ + vsock_transport.h \ XNU_ONLY_EXPORTS = \ bsdtask_info.h \ file_internal.h \ filedesc.h \ guarded.h \ - linker_set.h \ mount_internal.h \ munge.h \ pipe.h \ proc_internal.h \ + proc_require.h \ pthread_internal.h \ resourcevar.h \ semaphore.h \ @@ -202,13 +205,21 @@ XNU_ONLY_EXPORTS = \ uio_internal.h \ ulock.h \ ux_exception.h \ - vnode_internal.h + vnode_internal.h \ + vsock_domain.h + +MODULEMAP_INCDIR_FILES = \ + sys__types.modulemap \ + sys_types.modulemap \ + sys_cdefs.modulemap # /usr/include INSTALL_MI_LIST = ${DATAFILES} INSTALL_DRIVERKIT_MI_LIST = ${DRIVERKIT_DATAFILES} +INSTALL_MODULEMAP_INCDIR_MI_LIST = ${MODULEMAP_INCDIR_FILES} + INSTALL_MI_GEN_LIST = syscall.h _posix_availability.h _symbol_aliasing.h INSTALL_DRIVERKIT_MI_GEN_LIST = _posix_availability.h _symbol_aliasing.h @@ -217,7 +228,7 @@ INSTALL_MI_DIR = sys EXPORT_MI_LIST = ${KERNELFILES} ${PRIVATE_KERNELFILES} ${XNU_ONLY_EXPORTS} -EXPORT_MI_GEN_LIST = syscall.h sysproto.h kdebugevents.h +EXPORT_MI_GEN_LIST = syscall.h sysproto.h EXPORT_MI_DIR = sys @@ -237,41 +248,35 @@ INSTALL_KF_MI_LIST = ${KERNELFILES} INSTALL_KF_MI_GEN_LIST = MAKESYSCALLS = $(SRCROOT)/bsd/kern/makesyscalls.sh -MAKEKDEBUGEVENTS = $(SRCROOT)/bsd/kern/makekdebugevents.py $(OBJROOT)/cscope.genhdrs: $(_v)mkdir -p $(OBJROOT)/cscope.genhdrs -$(OBJROOT)/syscall.codes: $(SRCROOT)/bsd/kern/syscalls.master $(MAKESYSCALLS) - $(call makelog,[$(CMD_MC)] $(ColorH)GENERATING$(Color0) $(ColorLF)$@$(Color0) from $(ColorF)$<$(Color0)) +$(OBJROOT)/syscall.codes: $(TARGET)/bsd.syscalls.master + @$(LOG_GENERATE) "$(ColorLF)$@$(Color0) from $(ColorF)$( $@ $(OBJROOT)/trace.codes: $(SRCROOT)/bsd/kern/trace_codes $(OBJROOT)/syscall.codes $(_v)sort -g $(SRCROOT)/bsd/kern/trace_codes $(OBJROOT)/syscall.codes >$@ -syscall.h: $(SRCROOT)/bsd/kern/syscalls.master $(MAKESYSCALLS) $(OBJROOT)/cscope.genhdrs - $(call makelog,[$(CMD_MC)] $(ColorH)GENERATING$(Color0) $(ColorLF)bsd/sys/$@$(Color0) from $(ColorF)$<$(Color0)) +syscall.h: $(TARGET)/bsd.syscalls.master $(OBJROOT)/cscope.genhdrs + @$(LOG_GENERATE) "$(ColorLF)bsd/sys/$@$(Color0) from $(ColorF)$( $(OBJROOT)/cscope.genhdrs/$@.path $(_v)$(MAKESYSCALLS) $< header > /dev/null -sysproto.h: $(SRCROOT)/bsd/kern/syscalls.master $(MAKESYSCALLS) $(OBJROOT)/cscope.genhdrs - $(call makelog,[$(CMD_MC)] $(ColorH)GENERATING$(Color0) $(ColorLF)bsd/sys/$@$(Color0) from $(ColorF)$<$(Color0)) +sysproto.h: $(TARGET)/bsd.syscalls.master $(OBJROOT)/cscope.genhdrs + @$(LOG_GENERATE) "bsd/sys/$@$(Color0) from $(ColorF)$( $(OBJROOT)/cscope.genhdrs/$@.path $(_v)$(MAKESYSCALLS) $< proto > /dev/null -kdebugevents.h: $(OBJROOT)/trace.codes $(MAKEKDEBUGEVENTS) $(OBJROOT)/cscope.genhdrs - $(call makelog,[$(CMD_MC)] $(ColorH)GENERATING$(Color0) $(ColorLF)bsd/sys/$@$(Color0) from $(ColorF)$<$(Color0)) - @echo "$(OBJPATH)/bsd/sys/$@" > $(OBJROOT)/cscope.genhdrs/$@.path - $(_v)$(MAKEKDEBUGEVENTS) $< > "$(OBJPATH)/bsd/sys/$@" - MAKE_POSIX_AVAILABILITY = $(SRCROOT)/bsd/sys/make_posix_availability.sh _posix_availability.h: $(MAKE_POSIX_AVAILABILITY) - $(call makelog,[$(CMD_MC)] $(ColorH)GENERATING$(Color0) $(ColorLF)bsd/sys/$@$(Color0)) + @$(LOG_GENERATE) "bsd/sys/$@$(Color0)" $(_v)$(MAKE_POSIX_AVAILABILITY) "$@" MAKE_SYMBOL_ALIASING = $(SRCROOT)/bsd/sys/make_symbol_aliasing.sh _symbol_aliasing.h: $(MAKE_SYMBOL_ALIASING) - $(call makelog,[$(CMD_MC)] $(ColorH)GENERATING$(Color0) $(ColorLF)bsd/sys/$@$(Color0)) + @$(LOG_GENERATE) "bsd/sys/$@$(Color0)" $(_v)$(MAKE_SYMBOL_ALIASING) "$(SDKROOT)" "$@" # generated headers needed early (used by iig during installhdrs of iokit/DriverKit) @@ -284,10 +289,10 @@ TRACE_CODES_DEST = \ $(TRACE_CODES_DEST): $(OBJROOT)/trace.codes $(_v)$(MKDIR) $(DSTROOT)/$(INSTALL_SHARE_MISC_DIR) - $(call makelog,INSTALL $(@F)) + @$(LOG) INSTALL "$(@F)" $(_v)$(INSTALL) $(INSTALL_FLAGS) $(OBJROOT)/trace.codes $@ -do_textfiles_install:: $(TRACE_CODES_DEST) +do_textfiles_install_mi:: $(TRACE_CODES_DEST) include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/bsd/sys/_endian.h b/bsd/sys/_endian.h index 4b8daa852..79fa01669 100644 --- a/bsd/sys/_endian.h +++ b/bsd/sys/_endian.h @@ -127,6 +127,7 @@ __END_DECLS #else /* __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN */ +#ifndef DRIVERKIT #include #define ntohs(x) __DARWIN_OSSwapInt16(x) @@ -134,11 +135,23 @@ __END_DECLS #define ntohl(x) __DARWIN_OSSwapInt32(x) #define htonl(x) __DARWIN_OSSwapInt32(x) +#else /* DRIVERKIT */ +#define ntohs(x) ((__uint16_t)__builtin_bswap16((__uint16_t)(x))) +#define htons(x) ((__uint16_t)__builtin_bswap16((__uint16_t)(x))) + +#define ntohl(x) ((__uint32_t)__builtin_bswap32((__uint32_t)(x))) +#define htonl(x) ((__uint32_t)__builtin_bswap32((__uint32_t)(x))) +#endif /* DRIVERKIT */ #if defined(KERNEL) || (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) +#ifndef DRIVERKIT #define ntohll(x) __DARWIN_OSSwapInt64(x) #define htonll(x) __DARWIN_OSSwapInt64(x) +#else /* DRIVERKIT */ +#define ntohll(x) ((__uint64_t)__builtin_bswap64((__uint64_t)(x))) +#define htonll(x) ((__uint64_t)__builtin_bswap64((__uint64_t)(x))) +#endif /* DRIVERKIT */ #define NTOHL(x) (x) = ntohl((__uint32_t)x) #define NTOHS(x) (x) = ntohs((__uint16_t)x) diff --git a/bsd/sys/_select.h b/bsd/sys/_select.h index 567d62185..174814916 100644 --- a/bsd/sys/_select.h +++ b/bsd/sys/_select.h @@ -36,8 +36,13 @@ #ifndef _SYS__SELECT_H_ #define _SYS__SELECT_H_ +#include /* __DARWIN_EXTSN_C, __DARWIN_1050, __DARWIN_ALIAS_C */ +#include /* fd_set */ +#include /* struct timeval */ + int select(int, fd_set * __restrict, fd_set * __restrict, fd_set * __restrict, struct timeval * __restrict) + #if defined(_DARWIN_C_SOURCE) || defined(_DARWIN_UNLIMITED_SELECT) __DARWIN_EXTSN_C(select) #else /* !_DARWIN_C_SOURCE && !_DARWIN_UNLIMITED_SELECT */ diff --git a/bsd/sys/_types/Makefile b/bsd/sys/_types/Makefile index 5b5373961..c427d2f46 100644 --- a/bsd/sys/_types/Makefile +++ b/bsd/sys/_types/Makefile @@ -121,11 +121,17 @@ DRIVERKIT_DATAFILES = \ _u_int32_t.h \ _u_int64_t.h \ _u_int8_t.h \ + _u_int.h \ + _u_char.h \ + _u_short.h \ _uintptr_t.h \ _uuid_t.h \ _va_list.h \ _wchar_t.h \ _wint_t.h \ + _in_addr_t.h\ + _in_port_t.h\ + _sa_family_t.h # Installs header file for Apple internal use in user level - # $(DSTROOT)/System/Library/Frameworks/System.framework/PrivateHeaders diff --git a/bsd/sys/_types/_fd_def.h b/bsd/sys/_types/_fd_def.h index d32ee5153..e4516fd33 100644 --- a/bsd/sys/_types/_fd_def.h +++ b/bsd/sys/_types/_fd_def.h @@ -54,7 +54,7 @@ typedef struct fd_set { } fd_set; #if !KERNEL -int __darwin_check_fd_set_overflow(int, const void *, int) __attribute__((__weak_import__)); +int __darwin_check_fd_set_overflow(int, const void *, int) __API_AVAILABLE(macosx(10.16), ios(14.0), tvos(14.0), watchos(7.0)); #endif __END_DECLS @@ -62,6 +62,10 @@ __END_DECLS __header_always_inline int __darwin_check_fd_set(int _a, const void *_b) { +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunguarded-availability-new" +#endif if ((uintptr_t)&__darwin_check_fd_set_overflow != (uintptr_t) 0) { #if defined(_DARWIN_UNLIMITED_SELECT) || defined(_DARWIN_C_SOURCE) return __darwin_check_fd_set_overflow(_a, _b, 1); @@ -71,6 +75,9 @@ __darwin_check_fd_set(int _a, const void *_b) } else { return 1; } +#ifdef __clang__ +#pragma clang diagnostic pop +#endif } /* This inline avoids argument side-effect issues with FD_ISSET() */ diff --git a/bsd/sys/_types/_int8_t.h b/bsd/sys/_types/_int8_t.h index 9176298a5..14eb9f7ea 100644 --- a/bsd/sys/_types/_int8_t.h +++ b/bsd/sys/_types/_int8_t.h @@ -27,5 +27,5 @@ */ #ifndef _INT8_T #define _INT8_T -typedef __signed char int8_t; +typedef signed char int8_t; #endif /* _INT8_T */ diff --git a/bsd/sys/_types/_ucontext.h b/bsd/sys/_types/_ucontext.h index 65184e44e..236042293 100644 --- a/bsd/sys/_types/_ucontext.h +++ b/bsd/sys/_types/_ucontext.h @@ -38,6 +38,7 @@ #include /* __darwin_size_t */ #include /* _STRUCT_MCONTEXT */ #include /* __darwin_sigset_t */ +#include /* _STRUCT_SIGALTSTACK */ _STRUCT_UCONTEXT { diff --git a/bsd/sys/_types/_ucontext64.h b/bsd/sys/_types/_ucontext64.h index 028f77a5d..b177398f1 100644 --- a/bsd/sys/_types/_ucontext64.h +++ b/bsd/sys/_types/_ucontext64.h @@ -38,6 +38,7 @@ #include /* __darwin_size_t */ #include /* _STRUCT_MCONTEXT */ #include /* __darwin_sigset_t */ +#include /* _STRUCT_SIGALTSTACK */ _STRUCT_UCONTEXT64 { diff --git a/bsd/sys/acct.h b/bsd/sys/acct.h index 2f86e79bc..1fbf1c75f 100644 --- a/bsd/sys/acct.h +++ b/bsd/sys/acct.h @@ -70,9 +70,12 @@ #include #include +#include /* u_int8_t */ #include /* u_int16_t */ #include /* u_int32_t */ #include /* uid_t */ +#include /* gid_t */ +#include /* dev_t */ /* * Accounting structures; these use a comp_t type which is a 3 bits base 8 diff --git a/bsd/sys/aio_kern.h b/bsd/sys/aio_kern.h index 8412a2c48..cd808e7a0 100644 --- a/bsd/sys/aio_kern.h +++ b/bsd/sys/aio_kern.h @@ -38,51 +38,10 @@ #ifndef _SYS_AIO_KERN_H_ #define _SYS_AIO_KERN_H_ -#ifdef KERNEL +#ifdef KERNEL_PRIVATE -struct aio_workq_entry { - TAILQ_ENTRY( aio_workq_entry ) aio_workq_link; /* Protected by queue lock */ - TAILQ_ENTRY( aio_workq_entry ) aio_proc_link; /* Proteced by proc's aio lock */ - - /* Proc lock */ - void *group_tag; /* identifier used to group IO requests */ - - /* Initialized and never changed, safe to access */ - struct proc *procp; /* user proc that queued this request */ - user_addr_t uaiocbp; /* pointer passed in from user land */ - struct user_aiocb aiocb; /* copy of aiocb from user land */ - vm_map_t aio_map; /* user land map we have a reference to */ - thread_t thread; /* thread that queued this request */ - - /* Entry lock */ - int aio_refcount; - user_ssize_t returnval; /* return value from read / write request */ - int errorval; /* error value from read / write request */ - int flags; -}; typedef struct aio_workq_entry aio_workq_entry; -/* - * definitions for aio_workq_entry.flags - */ -#define AIO_READ 0x00000001 /* a read */ -#define AIO_WRITE 0x00000002 /* a write */ -#define AIO_FSYNC 0x00000004 /* aio_fsync with op = O_SYNC */ -#define AIO_DSYNC 0x00000008 /* aio_fsync with op = O_DSYNC (not supported yet) */ -#define AIO_LIO 0x00000010 /* lio_listio generated IO */ -#define AIO_DO_FREE 0x00000800 /* entry should be freed when last reference is dropped. */ - /* set by aio_return() and _aio_exit() */ -#define AIO_DISABLE 0x00002000 /* process is trying to exit or exec and we need */ - /* to not try to send a signal from do_aio_completion() */ -#define AIO_CLOSE_WAIT 0x00004000 /* process is trying to close and is */ - /* waiting for one or more active IO requests to */ - /* complete */ -#define AIO_EXIT_WAIT 0x00008000 /* process is trying to exit or exec and is */ - /* waiting for one or more active IO requests to */ - /* complete */ - -#define AIO_LIO_NOTIFY 0x00010000 /* wait for list complete */ - /* * Prototypes */ @@ -105,6 +64,6 @@ aio_init(void); task_t get_aiotask(void); -#endif /* KERNEL */ +#endif /* KERNEL_PRIVATE */ #endif /* _SYS_AIO_KERN_H_ */ diff --git a/bsd/sys/attr.h b/bsd/sys/attr.h index d5eecd682..0ef2b47d8 100644 --- a/bsd/sys/attr.h +++ b/bsd/sys/attr.h @@ -60,7 +60,9 @@ #define FSOPT_NOFIRMLINKPATH 0x00000080 #endif /* FSOPT_NOFIRMLINKPATH */ #define FSOPT_FOLLOW_FIRMLINK 0x00000100 +#endif /* PRIVATE */ #define FSOPT_RETURN_REALDEV 0x00000200 +#ifdef PRIVATE #ifndef FSOPT_ISREALFSID /*a copy is in fsgetpath.h */ #define FSOPT_ISREALFSID FSOPT_RETURN_REALDEV #endif @@ -253,6 +255,10 @@ typedef struct vol_capabilities_attr { * that implies multiple volumes must be mounted in order to boot and root the * operating system. Typically, this means a read-only system volume and a * writable data volume. + * + * VOL_CAP_FMT_SEALED: When set, this volume is cryptographically sealed. + * Any modifications to volume data or metadata will be detected and may + * render the volume unusable. */ #define VOL_CAP_FMT_PERSISTENTOBJECTIDS 0x00000001 #define VOL_CAP_FMT_SYMBOLICLINKS 0x00000002 @@ -279,6 +285,7 @@ typedef struct vol_capabilities_attr { #define VOL_CAP_FMT_NO_PERMISSIONS 0x00400000 #define VOL_CAP_FMT_SHARED_SPACE 0x00800000 #define VOL_CAP_FMT_VOL_GROUPS 0x01000000 +#define VOL_CAP_FMT_SEALED 0x02000000 /* * VOL_CAP_INT_SEARCHFS: When set, the volume implements the @@ -533,8 +540,9 @@ typedef struct vol_attributes_attr { #define ATTR_CMNEXT_REALFSID 0x00000080 #define ATTR_CMNEXT_CLONEID 0x00000100 #define ATTR_CMNEXT_EXT_FLAGS 0x00000200 +#define ATTR_CMNEXT_RECURSIVE_GENCOUNT 0x00000400 -#define ATTR_CMNEXT_VALIDMASK 0x000003fc +#define ATTR_CMNEXT_VALIDMASK 0x000007fc #define ATTR_CMNEXT_SETMASK 0x00000000 /* Deprecated fork attributes */ diff --git a/bsd/sys/buf_internal.h b/bsd/sys/buf_internal.h index 2bd3511b2..279f5f8b0 100644 --- a/bsd/sys/buf_internal.h +++ b/bsd/sys/buf_internal.h @@ -291,14 +291,16 @@ extern struct buf *buf_headers; /* The buffer headers. */ /* * Definitions for the buffer free lists. */ -#define BQUEUES 6 /* number of free buffer queues */ -#define BQ_LOCKED 0 /* super-blocks &c */ -#define BQ_LRU 1 /* lru, useful buffers */ -#define BQ_AGE 2 /* rubbish */ -#define BQ_EMPTY 3 /* buffer headers with no memory */ -#define BQ_META 4 /* buffer containing metadata */ -#define BQ_LAUNDRY 5 /* buffers that need cleaning */ +enum bq_opts { + BQ_LOCKED = 0, /* super-blocks &c */ + BQ_LRU = 1, /* lru, useful buffers */ + BQ_AGE = 2, /* rubbish */ + BQ_EMPTY = 3, /* buffer headers with no memory */ + BQ_META = 4, /* buffer containing metadata */ + BQ_LAUNDRY = 5, /* buffers that need cleaning */ + BQUEUES = 6 /* number of free buffer queues */ +}; __BEGIN_DECLS @@ -314,7 +316,7 @@ void buf_list_unlock(void); void cluster_init(void); -int count_busy_buffers(void); +uint32_t count_busy_buffers(void); int buf_flushdirtyblks_skipinfo(vnode_t, int, int, const char *); void buf_wait_for_shadow_io(vnode_t, daddr64_t); @@ -340,8 +342,8 @@ struct bufstats { long bufs_miss; /* not incore. not in VM */ long bufs_sleeps; /* buffer starvation */ long bufs_eblk; /* Calls to geteblk */ - long bufs_iobufmax; /* Max. number of IO buffers used */ - long bufs_iobufinuse; /* number of IO buffers in use */ + uint32_t bufs_iobufmax; /* Max. number of IO buffers used */ + uint32_t bufs_iobufinuse; /* number of IO buffers in use */ long bufs_iobufsleeps; /* IO buffer starvation */ long bufs_iobufinuse_vdev; /* number of IO buffers in use by * diskimages */ diff --git a/bsd/sys/callout.h b/bsd/sys/callout.h deleted file mode 100644 index df833aa3b..000000000 --- a/bsd/sys/callout.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/*- - * Copyright (c) 1990, 1993 - * The Regents of the University of California. All rights reserved. - * (c) UNIX System Laboratories, Inc. - * All or some portions of this file are derived from material licensed - * to the University of California by American Telephone and Telegraph - * Co. or Unix System Laboratories, Inc. and are reproduced herein with - * the permission of UNIX System Laboratories, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)callout.h 8.2 (Berkeley) 1/21/94 - */ - -#ifndef _SYS_CALLOUT_H_ -#define _SYS_CALLOUT_H_ - -#include - -#include - - -#ifdef __APPLE_API_OBSOLETE -#define CALLOUT_PRI_SOFTINT0 0 -#define CALLOUT_PRI_SOFTINT1 1 -#define CALLOUT_PRI_RETRACE 2 -#define CALLOUT_PRI_DSP 3 -#define CALLOUT_PRI_THREAD 4 /* run in a thread */ -#define CALLOUT_PRI_NOW 5 /* must be last */ -#define N_CALLOUT_PRI 6 - -#endif /* __APPLE_API_OBSOLETE */ -#endif /* _SYS_CALLOUT_H_ */ diff --git a/bsd/sys/cdefs.h b/bsd/sys/cdefs.h index 066b91859..c00ca16de 100644 --- a/bsd/sys/cdefs.h +++ b/bsd/sys/cdefs.h @@ -174,6 +174,15 @@ #define __cold #endif +/* __exported denotes symbols that should be exported even when symbols + * are hidden by default. + * __exported_push/_exported_pop are pragmas used to delimit a range of + * symbols that should be exported even when symbols are hidden by default. + */ +#define __exported __attribute__((__visibility__("default"))) +#define __exported_push _Pragma("GCC visibility push(default)") +#define __exported_pop _Pragma("GCC visibility pop") + /* __deprecated causes the compiler to produce a warning when encountering * code using the deprecated functionality. * __deprecated_msg() does the same, and compilers that support it will print @@ -206,9 +215,42 @@ #endif /* !defined(KERNEL) || defined(KERNEL_PRIVATE) */ /* __unavailable causes the compiler to error out when encountering - * code using the tagged function of variable. + * code using the tagged function + */ +#if __has_attribute(unavailable) +#define __unavailable __attribute__((__unavailable__)) +#else +#define __unavailable +#endif + +#if defined(KERNEL) && !defined(KERNEL_PRIVATE) +#define __kpi_unavailable __unavailable +#else /* !defined(KERNEL) || defined(KERNEL_PRIVATE) */ +#define __kpi_unavailable +#endif /* !defined(KERNEL) || defined(KERNEL_PRIVATE) */ + +#if defined(KERNEL) +#if defined(XNU_KERNEL_PRIVATE) +/* This macro is meant to be used for kpi deprecated to x86 3rd parties + * but should be marked as unavailable for arm macOS devices. + * XNU: nothing (API is still available) + * 1st party kexts: __deprecated + * 3rd party kexts macOS x86: __deprecated + * 3rd party kexts macOS arm: __unavailable */ -#define __unavailable __attribute__((__unavailable__)) +#define __kpi_deprecated_arm64_macos_unavailable +#elif defined(KERNEL_PRIVATE) +#define __kpi_deprecated_arm64_macos_unavailable __deprecated +#else /* !defined(XNU_KERNEL_PRIVATE) */ +#if TARGET_OS_OSX && defined(__arm64__) +#define __kpi_deprecated_arm64_macos_unavailable __unavailable +#else /* !TARGET_OS_OSX || !defined(__arm64__) */ +#define __kpi_deprecated_arm64_macos_unavailable __deprecated +#endif /* !TARGET_OS_OSX || !defined(__arm64__) */ +#endif /* !defined(XNU_KERNEL_PRIVATE) */ +#else /* !defined(KERNEL) */ +#define __kpi_deprecated_arm64_macos_unavailable +#endif /* !defined(KERNEL) */ /* Delete pseudo-keywords wherever they are not available or needed. */ #ifndef __dead @@ -559,9 +601,19 @@ #endif /* PLATFORM_DriverKit */ #ifdef PLATFORM_MacOSX /* Platform: MacOSX */ +#if defined(__i386__) #define __DARWIN_ONLY_64_BIT_INO_T 0 -/* #undef __DARWIN_ONLY_UNIX_CONFORMANCE (automatically set for 64-bit) */ +#define __DARWIN_ONLY_UNIX_CONFORMANCE 0 #define __DARWIN_ONLY_VERS_1050 0 +#elif defined(__x86_64__) +#define __DARWIN_ONLY_64_BIT_INO_T 0 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 0 +#else +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 +#endif #endif /* PLATFORM_MacOSX */ #endif /* KERNEL */ @@ -583,14 +635,6 @@ * pre-10.5, and it is the default compilation environment, revert the * compilation environment to pre-__DARWIN_UNIX03. */ -#if !defined(__DARWIN_ONLY_UNIX_CONFORMANCE) -# if defined(__LP64__) -# define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -# else /* !__LP64__ */ -# define __DARWIN_ONLY_UNIX_CONFORMANCE 0 -# endif /* __LP64__ */ -#endif /* !__DARWIN_ONLY_UNIX_CONFORMANCE */ - #if !defined(__DARWIN_UNIX03) # if defined(KERNEL) # define __DARWIN_UNIX03 0 @@ -952,12 +996,12 @@ #if defined(__cplusplus) #define __container_of(ptr, type, field) __extension__({ \ - const typeof(((type *)nullptr)->field) *__ptr = (ptr); \ + const __typeof__(((type *)nullptr)->field) *__ptr = (ptr); \ (type *)((uintptr_t)__ptr - offsetof(type, field)); \ }) #else #define __container_of(ptr, type, field) __extension__({ \ - const typeof(((type *)NULL)->field) *__ptr = (ptr); \ + const __typeof__(((type *)NULL)->field) *__ptr = (ptr); \ (type *)((uintptr_t)__ptr - offsetof(type, field)); \ }) #endif diff --git a/bsd/sys/clist.h b/bsd/sys/clist.h deleted file mode 100644 index f4161a638..000000000 --- a/bsd/sys/clist.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ -/*- - * Copyright (c) 1990, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)clist.h 8.1 (Berkeley) 6/4/93 - */ - -#ifndef _SYS_CLIST_H_ -#define _SYS_CLIST_H_ - - -#ifdef KERNEL_PRIVATE - -struct cblock { - struct cblock *c_next; /* next cblock in queue */ - char c_quote[CBQSIZE]; /* quoted characters */ - char c_info[CBSIZE]; /* characters */ -}; - -extern struct cblock *cfree, *cfreelist; -extern int cfreecount, nclist; -#endif /* KERNEL_PRIVATE */ - -#endif /* _SYS_CLIST_H_ */ diff --git a/bsd/sys/codesign.h b/bsd/sys/codesign.h index 7b5d56379..00b574984 100644 --- a/bsd/sys/codesign.h +++ b/bsd/sys/codesign.h @@ -39,6 +39,7 @@ #define MAC_VNODE_CHECK_DYLD_SIM 0x1 /* tells the MAC framework that dyld-sim is being loaded */ #define CLEAR_LV_ENTITLEMENT "com.apple.private.security.clear-library-validation" +#define OVERRIDE_PLUGIN_HOST_ENTITLEMENT "com.apple.private.security.override-plugin-host-detection" /* csops operations */ #define CS_OPS_STATUS 0 /* return status */ @@ -90,10 +91,12 @@ int cs_valid(struct proc *); int cs_process_enforcement(struct proc *); int cs_process_global_enforcement(void); int cs_system_enforcement(void); +int cs_vm_supports_4k_translations(void); int cs_require_lv(struct proc *); int csproc_forced_lv(struct proc* p); int cs_system_require_lv(void); uint32_t cs_entitlement_flags(struct proc *p); +int cs_entitlements_blob_get_vnode(struct vnode *, off_t, void **, size_t *); int cs_entitlements_blob_get(struct proc *, void **, size_t *); #ifdef KERNEL_PRIVATE int cs_entitlements_dictionary_copy(struct proc *, void **); @@ -138,15 +141,22 @@ void csblob_entitlements_dictionary_set(struct cs_blob *csblob, void */ const char * csproc_get_teamid(struct proc *); +const char * csproc_get_identity(struct proc *); const char * csvnode_get_teamid(struct vnode *, off_t); int csproc_get_platform_binary(struct proc *); int csproc_get_prod_signed(struct proc *); const char * csfg_get_teamid(struct fileglob *); +const char * csfg_get_supplement_teamid(struct fileglob *); int csfg_get_path(struct fileglob *, char *, int *); int csfg_get_platform_binary(struct fileglob *); +int csfg_get_supplement_platform_binary(struct fileglob *); uint8_t * csfg_get_cdhash(struct fileglob *, uint64_t, size_t *); +uint8_t * csfg_get_supplement_cdhash(struct fileglob *, uint64_t, size_t *); +const uint8_t * csfg_get_supplement_linkage_cdhash(struct fileglob *, uint64_t, size_t *); int csfg_get_prod_signed(struct fileglob *); +int csfg_get_supplement_prod_signed(struct fileglob *fg); unsigned int csfg_get_signer_type(struct fileglob *); +unsigned int csfg_get_supplement_signer_type(struct fileglob *); const char *csfg_get_identity(struct fileglob *fg, off_t offset); unsigned int csproc_get_signer_type(struct proc *); @@ -166,9 +176,9 @@ void cs_blob_free(struct cs_blob *blob); #ifdef XNU_KERNEL_PRIVATE -void cs_init(void); int cs_allow_invalid(struct proc *); int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed); +void cs_process_invalidated(struct proc *); int csproc_get_platform_path(struct proc *); #if !SECURE_KERNEL diff --git a/bsd/sys/conf.h b/bsd/sys/conf.h index 68cec736c..0fb01991f 100644 --- a/bsd/sys/conf.h +++ b/bsd/sys/conf.h @@ -297,7 +297,7 @@ __BEGIN_DECLS #ifdef KERNEL_PRIVATE void devsw_init(void); extern struct cdevsw cdevsw[]; -extern int cdevsw_setkqueueok(int, struct cdevsw*, int); +extern int cdevsw_setkqueueok(int, const struct cdevsw*, int); #endif /* KERNEL_PRIVATE */ #ifdef BSD_KERNEL_PRIVATE @@ -306,12 +306,12 @@ extern void devsw_unlock(dev_t, int); #endif /* BSD_KERNEL_PRIVATE */ int bdevsw_isfree(int); -int bdevsw_add(int, struct bdevsw *); -int bdevsw_remove(int, struct bdevsw *); +int bdevsw_add(int, const struct bdevsw *); +int bdevsw_remove(int, const struct bdevsw *); int cdevsw_isfree(int); -int cdevsw_add(int, struct cdevsw *); -int cdevsw_add_with_bdev(int index, struct cdevsw * csw, int bdev); -int cdevsw_remove(int, struct cdevsw *); +int cdevsw_add(int, const struct cdevsw *); +int cdevsw_add_with_bdev(int index, const struct cdevsw * csw, int bdev); +int cdevsw_remove(int, const struct cdevsw *); int isdisk(dev_t, int); __END_DECLS #endif /* KERNEL */ diff --git a/bsd/sys/cprotect.h b/bsd/sys/cprotect.h index bfb333614..080dcc203 100644 --- a/bsd/sys/cprotect.h +++ b/bsd/sys/cprotect.h @@ -38,6 +38,7 @@ #include #include #include +#include __BEGIN_DECLS @@ -67,6 +68,7 @@ enum { #endif #define CP_MAX_WRAPPEDKEYSIZE 128 /* The size of the largest allowed key */ +#define VFS_CP_MAX_CACHEBUFLEN 64 /* Maximum size of the cached key */ /* lock events from AppleKeyStore */ enum { @@ -99,6 +101,27 @@ typedef uint64_t cp_crypto_id_t; typedef struct cprotect *cprotect_t; typedef struct cpx *cpx_t; +#ifdef BSD_KERNEL_PRIVATE +/* Not for consumption outside of XNU */ +typedef uint32_t cpx_flags_t; +/* + * This is a CPX structure with a fixed-length key buffer. We need this defined in a header + * so that we can use this structure to allocate the memory for the zone(s) properly. + */ +typedef struct fcpx { +#ifdef DEBUG + uint32_t cpx_magic1; +#endif // DEBUG + aes_encrypt_ctx *cpx_iv_aes_ctx_ptr;// Context used for generating the IV + cpx_flags_t cpx_flags; + uint16_t cpx_max_key_len; + uint16_t cpx_key_len; + uint8_t cpx_cached_key[VFS_CP_MAX_CACHEBUFLEN]; + //Fixed length all the way through +} fcpx_t; + +#endif // BSD_KERNEL_PRIVATE + typedef struct cp_key { uint8_t len; void *key; @@ -155,11 +178,14 @@ typedef int backup_key_t(cp_cred_t access, const cp_wrapped_key_t wrapped_key_in * fields; cpx provides opacity and allows us to modify behavior internally * without requiring kext changes. */ -cpx_t cpx_alloc(size_t key_size); +cpx_t cpx_alloc(size_t key_size, bool needs_ctx); +int cpx_alloc_ctx(cpx_t cpx); +void cpx_free_ctx(cpx_t cpx); void cpx_init(cpx_t, size_t key_len); +void cpx_init_ctx_ptr(cpx_t cpx); void cpx_free(cpx_t); void cpx_writeprotect(cpx_t cpx); -__attribute__((const)) size_t cpx_size(size_t key_size); +__attribute__((const)) size_t cpx_size(size_t key_len); __attribute__((pure)) bool cpx_is_sep_wrapped_key(const struct cpx *); void cpx_set_is_sep_wrapped_key(struct cpx *, bool); __attribute__((pure)) bool cpx_is_composite_key(const struct cpx *); diff --git a/bsd/sys/csr.h b/bsd/sys/csr.h index 7c083d461..ac3503392 100644 --- a/bsd/sys/csr.h +++ b/bsd/sys/csr.h @@ -38,37 +38,48 @@ typedef uint32_t csr_config_t; typedef uint32_t csr_op_t; -/* Rootless configuration flags */ +/* CSR configuration flags */ #define CSR_ALLOW_UNTRUSTED_KEXTS (1 << 0) #define CSR_ALLOW_UNRESTRICTED_FS (1 << 1) #define CSR_ALLOW_TASK_FOR_PID (1 << 2) #define CSR_ALLOW_KERNEL_DEBUGGER (1 << 3) #define CSR_ALLOW_APPLE_INTERNAL (1 << 4) -#define CSR_ALLOW_DESTRUCTIVE_DTRACE (1 << 5) /* name deprecated */ -#define CSR_ALLOW_UNRESTRICTED_DTRACE (1 << 5) -#define CSR_ALLOW_UNRESTRICTED_NVRAM (1 << 6) -#define CSR_ALLOW_DEVICE_CONFIGURATION (1 << 7) -#define CSR_ALLOW_ANY_RECOVERY_OS (1 << 8) -#define CSR_ALLOW_UNAPPROVED_KEXTS (1 << 9) +#define CSR_ALLOW_DESTRUCTIVE_DTRACE (1 << 5) /* name deprecated */ +#define CSR_ALLOW_UNRESTRICTED_DTRACE (1 << 5) +#define CSR_ALLOW_UNRESTRICTED_NVRAM (1 << 6) +#define CSR_ALLOW_DEVICE_CONFIGURATION (1 << 7) +#define CSR_ALLOW_ANY_RECOVERY_OS (1 << 8) +#define CSR_ALLOW_UNAPPROVED_KEXTS (1 << 9) #define CSR_ALLOW_EXECUTABLE_POLICY_OVERRIDE (1 << 10) +#define CSR_ALLOW_UNAUTHENTICATED_ROOT (1 << 11) #define CSR_VALID_FLAGS (CSR_ALLOW_UNTRUSTED_KEXTS | \ - CSR_ALLOW_UNRESTRICTED_FS | \ - CSR_ALLOW_TASK_FOR_PID | \ - CSR_ALLOW_KERNEL_DEBUGGER | \ - CSR_ALLOW_APPLE_INTERNAL | \ - CSR_ALLOW_UNRESTRICTED_DTRACE | \ - CSR_ALLOW_UNRESTRICTED_NVRAM | \ - CSR_ALLOW_DEVICE_CONFIGURATION | \ - CSR_ALLOW_ANY_RECOVERY_OS | \ - CSR_ALLOW_UNAPPROVED_KEXTS | \ - CSR_ALLOW_EXECUTABLE_POLICY_OVERRIDE) + CSR_ALLOW_UNRESTRICTED_FS | \ + CSR_ALLOW_TASK_FOR_PID | \ + CSR_ALLOW_KERNEL_DEBUGGER | \ + CSR_ALLOW_APPLE_INTERNAL | \ + CSR_ALLOW_UNRESTRICTED_DTRACE | \ + CSR_ALLOW_UNRESTRICTED_NVRAM | \ + CSR_ALLOW_DEVICE_CONFIGURATION | \ + CSR_ALLOW_ANY_RECOVERY_OS | \ + CSR_ALLOW_UNAPPROVED_KEXTS | \ + CSR_ALLOW_EXECUTABLE_POLICY_OVERRIDE | \ + CSR_ALLOW_UNAUTHENTICATED_ROOT) #define CSR_ALWAYS_ENFORCED_FLAGS (CSR_ALLOW_DEVICE_CONFIGURATION | CSR_ALLOW_ANY_RECOVERY_OS) +/* Flags set by `csrutil disable`. */ +#define CSR_DISABLE_FLAGS (CSR_ALLOW_UNTRUSTED_KEXTS | \ + CSR_ALLOW_UNRESTRICTED_FS | \ + CSR_ALLOW_TASK_FOR_PID | \ + CSR_ALLOW_KERNEL_DEBUGGER | \ + CSR_ALLOW_APPLE_INTERNAL | \ + CSR_ALLOW_UNRESTRICTED_DTRACE | \ + CSR_ALLOW_UNRESTRICTED_NVRAM) + /* CSR capabilities that a booter can give to the system */ -#define CSR_CAPABILITY_UNLIMITED (1 << 0) -#define CSR_CAPABILITY_CONFIG (1 << 1) +#define CSR_CAPABILITY_UNLIMITED (1 << 0) +#define CSR_CAPABILITY_CONFIG (1 << 1) #define CSR_CAPABILITY_APPLE_INTERNAL (1 << 2) #define CSR_VALID_CAPABILITIES (CSR_CAPABILITY_UNLIMITED | CSR_CAPABILITY_CONFIG | CSR_CAPABILITY_APPLE_INTERNAL) @@ -86,10 +97,6 @@ enum csr_syscalls { __BEGIN_DECLS -#ifdef XNU_KERNEL_PRIVATE -void csr_init(void); -#endif - /* Syscalls */ int csr_check(csr_config_t mask); int csr_get_active_config(csr_config_t *config); diff --git a/bsd/sys/disk.h b/bsd/sys/disk.h index 66d317902..99222c0cd 100644 --- a/bsd/sys/disk.h +++ b/bsd/sys/disk.h @@ -349,9 +349,7 @@ typedef struct dk_apfs_wbc_range { #endif /* KERNEL */ #ifdef PRIVATE -#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) #define _DKIOCSETSTATIC _IO('d', 84) -#endif /* (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) */ #endif /* PRIVATE */ #endif /* _SYS_DISK_H_ */ diff --git a/bsd/sys/dtrace.h b/bsd/sys/dtrace.h index a5e6aae66..b5a0d018d 100644 --- a/bsd/sys/dtrace.h +++ b/bsd/sys/dtrace.h @@ -303,6 +303,7 @@ typedef enum dtrace_probespec { #define DIF_VAR_ARGS 0x0000 /* arguments array */ #define DIF_VAR_REGS 0x0001 /* registers array */ #define DIF_VAR_UREGS 0x0002 /* user registers array */ +#define DIF_VAR_VMREGS 0x0003 /* virtual machine registers array */ #define DIF_VAR_CURTHREAD 0x0100 /* thread pointer */ #define DIF_VAR_TIMESTAMP 0x0101 /* timestamp */ #define DIF_VAR_VTIMESTAMP 0x0102 /* virtual timestamp */ @@ -339,12 +340,13 @@ typedef enum dtrace_probespec { #if defined(__APPLE__) #define DIF_VAR_PTHREAD_SELF 0x0200 /* Apple specific PTHREAD_SELF (Not currently supported!) */ #define DIF_VAR_DISPATCHQADDR 0x0201 /* Apple specific dispatch queue addr */ -#define DIF_VAR_MACHTIMESTAMP 0x0202 /* mach_absolute_timestamp() */ +#define DIF_VAR_MACHTIMESTAMP 0x0202 /* mach_absolute_time() */ #define DIF_VAR_CPU 0x0203 /* cpu number */ #define DIF_VAR_CPUINSTRS 0x0204 /* cpu instructions */ #define DIF_VAR_CPUCYCLES 0x0205 /* cpu cycles */ #define DIF_VAR_VINSTRS 0x0206 /* virtual instructions */ #define DIF_VAR_VCYCLES 0x0207 /* virtual cycles */ +#define DIF_VAR_MACHCTIMESTAMP 0x0208 /* mach_continuous_time() */ #endif /* __APPLE __ */ #define DIF_SUBR_RAND 0 @@ -404,7 +406,11 @@ typedef enum dtrace_probespec { #define DIF_SUBR_VM_KERNEL_ADDRPERM 200 #define DIF_SUBR_KDEBUG_TRACE 201 #define DIF_SUBR_KDEBUG_TRACE_STRING 202 -#define DIF_SUBR_APPLE_MAX 202 /* max apple-specific subroutine value */ +#define DIF_SUBR_MTONS 203 +#define DIF_SUBR_PHYSMEM_READ 204 +#define DIF_SUBR_PHYSMEM_WRITE 205 +#define DIF_SUBR_KVTOPHYS 206 +#define DIF_SUBR_APPLE_MAX 206 /* max apple-specific subroutine value */ #endif /* __APPLE__ */ typedef uint32_t dif_instr_t; diff --git a/bsd/sys/dtrace_glue.h b/bsd/sys/dtrace_glue.h index be144f4d4..ee65fe167 100644 --- a/bsd/sys/dtrace_glue.h +++ b/bsd/sys/dtrace_glue.h @@ -32,6 +32,7 @@ #ifdef KERNEL_BUILD #include +#include #include #include #include @@ -143,8 +144,6 @@ extern cpu_core_t *cpu_core; extern unsigned int dtrace_max_cpus; /* max number of enabled cpus */ #define NCPU dtrace_max_cpus -extern int cpu_number(void); /* From #include . Called from probe context, must blacklist. */ - #define CPU (&(cpu_list[cpu_number()])) /* Pointer to current CPU */ #define CPU_ON_INTR(cpup) ml_at_interrupt_context() /* always invoked on current cpu */ @@ -464,7 +463,6 @@ extern int vuprintf(const char *, va_list); extern hrtime_t dtrace_abs_to_nano(uint64_t); -__private_extern__ const char * strstr(const char *, const char *); const void* bsearch(const void*, const void*, size_t, size_t, int (*compar)(const void *, const void *)); int dtrace_copy_maxsize(void); diff --git a/bsd/sys/dtrace_impl.h b/bsd/sys/dtrace_impl.h index e7bb58697..bb70011d2 100644 --- a/bsd/sys/dtrace_impl.h +++ b/bsd/sys/dtrace_impl.h @@ -915,6 +915,7 @@ typedef struct dtrace_vstate { #define DTRACE_MSTATE_USTACKDEPTH 0x00000200 #define DTRACE_MSTATE_UCALLER 0x00000400 #define DTRACE_MSTATE_MACHTIMESTAMP 0x00000800 +#define DTRACE_MSTATE_MACHCTIMESTAMP 0x00001000 typedef struct dtrace_mstate { uintptr_t dtms_scratch_base; /* base of scratch space */ @@ -926,6 +927,7 @@ typedef struct dtrace_mstate { uint64_t dtms_timestamp; /* cached timestamp */ hrtime_t dtms_walltimestamp; /* cached wall timestamp */ uint64_t dtms_machtimestamp; /* cached mach absolute timestamp */ + uint64_t dtms_machctimestamp; /* cached mach continuous timestamp */ int dtms_stackdepth; /* cached stackdepth */ int dtms_ustackdepth; /* cached ustackdepth */ struct dtrace_probe *dtms_probe; /* current probe */ @@ -1377,6 +1379,7 @@ extern uint64_t dtrace_load64(uintptr_t); extern int dtrace_canload(uint64_t, size_t, dtrace_mstate_t*, dtrace_vstate_t*); extern uint64_t dtrace_getreg(struct regs *, uint_t); +extern uint64_t dtrace_getvmreg(uint_t); extern int dtrace_getstackdepth(int); extern void dtrace_getupcstack(uint64_t *, int); extern void dtrace_getufpstack(uint64_t *, uint64_t *, int); @@ -1402,6 +1405,9 @@ extern void dtrace_copystr(uintptr_t, uintptr_t, size_t, volatile uint16_t *); extern void* dtrace_ptrauth_strip(void*, uint64_t); extern int dtrace_is_valid_ptrauth_key(uint64_t); +extern uint64_t dtrace_physmem_read(uint64_t, size_t); +extern void dtrace_physmem_write(uint64_t, uint64_t, size_t); + /* * DTrace state handling */ diff --git a/bsd/sys/ev.h b/bsd/sys/ev.h index 5f1e8813d..99d1195c5 100644 --- a/bsd/sys/ev.h +++ b/bsd/sys/ev.h @@ -30,7 +30,6 @@ #ifndef _SYS_EV_H_ #define _SYS_EV_H_ - #include #include @@ -78,42 +77,4 @@ __END_DECLS #endif - -#ifdef BSD_KERNEL_PRIVATE - - - -struct eventreq32 { - int er_type; - int er_handle; - uint32_t er_data; - int er_rcnt; - int er_wcnt; - int er_ecnt; - int er_eventbits; -}; - -struct eventreq64 { - int er_type; - int er_handle; - user_addr_t er_data; - int er_rcnt; - int er_wcnt; - int er_ecnt; - int er_eventbits; -}; - -struct eventqelt { - TAILQ_ENTRY(eventqelt) ee_slist; - TAILQ_ENTRY(eventqelt) ee_plist; - struct eventreq64 ee_req; - struct proc * ee_proc; - u_int ee_flags; -#define EV_QUEUED 0x01 - u_int ee_eventmask; -}; - -int waitevent_close(struct proc *p, struct fileproc *); -#endif /* BSD_KERNEL_PRIVATE */ - #endif /* _SYS_EV_H_ */ diff --git a/bsd/sys/event.h b/bsd/sys/event.h index 6303c4915..cd76a0528 100644 --- a/bsd/sys/event.h +++ b/bsd/sys/event.h @@ -465,7 +465,7 @@ enum { #define NOTE_EXEC 0x20000000 /* process exec'd */ #define NOTE_REAP ((unsigned int)eNoteReapDeprecated /* 0x10000000 */ ) /* process reaped */ #define NOTE_SIGNAL 0x08000000 /* shared with EVFILT_SIGNAL */ -#define NOTE_EXITSTATUS 0x04000000 /* exit status to be returned, valid for child process only */ +#define NOTE_EXITSTATUS 0x04000000 /* exit status to be returned, valid for child process or when allowed to signal target pid */ #define NOTE_EXIT_DETAIL 0x02000000 /* provide details on reasons for exit */ #define NOTE_PDATAMASK 0x000fffff /* mask for signal & exit status */ @@ -670,6 +670,7 @@ SLIST_HEAD(klist, knote); #ifdef XNU_KERNEL_PRIVATE #include +#include #include #include /* FREAD, FWRITE */ #include /* panic */ @@ -701,9 +702,18 @@ __options_decl(kn_status_t, uint16_t /* 12 bits really */, { KN_SUPPRESSED = 0x800, /* event is suppressed during delivery */ }); -#define KNOTE_KQ_BITSIZE 42 -_Static_assert(KNOTE_KQ_BITSIZE > VM_KERNEL_POINTER_SIGNIFICANT_BITS, - "Make sure sign extending kn_kq_packed is legit"); +#if __LP64__ +#define KNOTE_KQ_PACKED_BITS 42 +#define KNOTE_KQ_PACKED_SHIFT 0 +#define KNOTE_KQ_PACKED_BASE 0 +#else +#define KNOTE_KQ_PACKED_BITS 32 +#define KNOTE_KQ_PACKED_SHIFT 0 +#define KNOTE_KQ_PACKED_BASE 0 +#endif + +_Static_assert(!VM_PACKING_IS_BASE_RELATIVE(KNOTE_KQ_PACKED), + "Make sure the knote pointer packing is based on arithmetic shifts"); struct kqueue; struct knote { @@ -719,9 +729,9 @@ struct knote { kn_vnode_kqok:1, kn_vnode_use_ofst:1; #if __LP64__ - intptr_t kn_kq_packed : KNOTE_KQ_BITSIZE; + uintptr_t kn_kq_packed : KNOTE_KQ_PACKED_BITS; #else - intptr_t kn_kq_packed; + uintptr_t kn_kq_packed; #endif /* per filter stash of data (pointer, uint32_t or uint64_t) */ @@ -795,7 +805,7 @@ struct knote { static inline struct kqueue * knote_get_kq(struct knote *kn) { - return (struct kqueue *)kn->kn_kq_packed; + return (struct kqueue *)VM_UNPACK_POINTER(kn->kn_kq_packed, KNOTE_KQ_PACKED); } static inline int @@ -1125,7 +1135,7 @@ extern const struct filterops *knote_fops(struct knote *kn); extern struct turnstile *kqueue_turnstile(struct kqueue *); extern struct turnstile *kqueue_alloc_turnstile(struct kqueue *); -int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize); +int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize); int kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize, int32_t *nkqueues_out); int kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf, diff --git a/bsd/sys/eventhandler.h b/bsd/sys/eventhandler.h index 7934d169e..307546942 100644 --- a/bsd/sys/eventhandler.h +++ b/bsd/sys/eventhandler.h @@ -56,6 +56,8 @@ #ifndef _SYS_EVENTHANDLER_H_ #define _SYS_EVENTHANDLER_H_ +#include + #include #include #include @@ -126,7 +128,7 @@ typedef struct eventhandler_entry *eventhandler_tag; EHL_UNLOCK((list)); \ _t = (struct eventhandler_entry_ ## name *)_ep; \ evhlog((LOG_DEBUG, "eventhandler_invoke: executing %p", \ - VM_KERNEL_UNSLIDE((void *)_t->eh_func))); \ + (void *)VM_KERNEL_UNSLIDE((void *)_t->eh_func))); \ _t->eh_func(_ep->ee_arg , ## __VA_ARGS__); \ EHL_LOCK_SPIN((list)); \ } \ @@ -181,7 +183,7 @@ do { \ } while (0) #define EVENTHANDLER_REGISTER(evthdlr_ref, name, func, arg, priority) \ - eventhandler_register(evthdlr_ref, NULL, #name, func, arg, priority) + eventhandler_register(evthdlr_ref, NULL, #name, ptrauth_nop_cast(void *, &func), arg, priority) #define EVENTHANDLER_DEREGISTER(evthdlr_ref, name, tag) \ do { \ diff --git a/bsd/sys/fcntl.h b/bsd/sys/fcntl.h index f0f301865..d68a27bde 100644 --- a/bsd/sys/fcntl.h +++ b/bsd/sys/fcntl.h @@ -109,96 +109,97 @@ * which was documented to use FREAD/FWRITE, continues to work. */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define FREAD 0x0001 -#define FWRITE 0x0002 +#define FREAD 0x00000001 +#define FWRITE 0x00000002 #endif -#define O_NONBLOCK 0x0004 /* no delay */ -#define O_APPEND 0x0008 /* set append mode */ +#define O_NONBLOCK 0x00000004 /* no delay */ +#define O_APPEND 0x00000008 /* set append mode */ #include #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define O_SHLOCK 0x0010 /* open with shared file lock */ -#define O_EXLOCK 0x0020 /* open with exclusive file lock */ -#define O_ASYNC 0x0040 /* signal pgrp when data ready */ +#define O_SHLOCK 0x00000010 /* open with shared file lock */ +#define O_EXLOCK 0x00000020 /* open with exclusive file lock */ +#define O_ASYNC 0x00000040 /* signal pgrp when data ready */ #define O_FSYNC O_SYNC /* source compatibility: do not use */ -#define O_NOFOLLOW 0x0100 /* don't follow symlinks */ +#define O_NOFOLLOW 0x00000100 /* don't follow symlinks */ #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define O_CREAT 0x0200 /* create if nonexistant */ -#define O_TRUNC 0x0400 /* truncate to zero length */ -#define O_EXCL 0x0800 /* error if already exists */ +#define O_CREAT 0x00000200 /* create if nonexistant */ +#define O_TRUNC 0x00000400 /* truncate to zero length */ +#define O_EXCL 0x00000800 /* error if already exists */ #ifdef KERNEL -#define FMARK 0x1000 /* mark during gc() */ -#define FDEFER 0x2000 /* defer for next gc pass */ -#define FHASLOCK 0x4000 /* descriptor holds advisory lock */ -#endif - -#if __DARWIN_C_LEVEL >= 200809L -/* - * Descriptor value for the current working directory - */ -#define AT_FDCWD -2 - -/* - * Flags for the at functions - */ -#define AT_EACCESS 0x0010 /* Use effective ids in access check */ -#define AT_SYMLINK_NOFOLLOW 0x0020 /* Act on the symlink itself not the target */ -#define AT_SYMLINK_FOLLOW 0x0040 /* Act on target of symlink */ -#define AT_REMOVEDIR 0x0080 /* Path refers to directory */ -#if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#ifdef PRIVATE -#define AT_REMOVEDIR_DATALESS 0x0100 /* Remove a dataless directory without materializing first */ -#endif -#define AT_REALDEV 0x0200 /* Return real device inodes resides on for fstatat(2) */ -#define AT_FDONLY 0x0400 /* Use only the fd and Ignore the path for fstatat(2) */ -#endif +#define FMARK 0x00001000 /* mark during gc() */ +#define FDEFER 0x00002000 /* defer for next gc pass */ +#define FWASLOCKED 0x00004000 /* has or has had an advisory fcntl lock */ +#define FHASLOCK FWASLOCKED /* obsolete compatibility name */ #endif #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define O_EVTONLY 0x8000 /* descriptor requested for event notifications only */ +#define O_EVTONLY 0x00008000 /* descriptor requested for event notifications only */ #endif #ifdef KERNEL -#define FWASWRITTEN 0x10000 /* descriptor was written */ +#define FWASWRITTEN 0x00010000 /* descriptor was written */ #endif -#define O_NOCTTY 0x20000 /* don't assign controlling terminal */ +#define O_NOCTTY 0x00020000 /* don't assign controlling terminal */ #ifdef KERNEL -#define FNOCACHE 0x40000 /* fcntl(F_NOCACHE, 1) */ -#define FNORDAHEAD 0x80000 /* fcntl(F_RDAHEAD, 0) */ +#define FNOCACHE 0x00040000 /* fcntl(F_NOCACHE, 1) */ +#define FNORDAHEAD 0x00080000 /* fcntl(F_RDAHEAD, 0) */ #endif #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define O_DIRECTORY 0x100000 -#define O_SYMLINK 0x200000 /* allow open of a symlink */ +#define O_DIRECTORY 0x00100000 +#define O_SYMLINK 0x00200000 /* allow open of a symlink */ #endif +// O_DSYNC 0x00400000 /* synch I/O data integrity */ #include #ifdef KERNEL -#define FNODIRECT 0x800000 /* fcntl(F_NODIRECT, 1) */ +#define FNODIRECT 0x00800000 /* fcntl(F_NODIRECT, 1) */ #endif #if __DARWIN_C_LEVEL >= 200809L -#define O_CLOEXEC 0x1000000 /* implicitly set FD_CLOEXEC */ +#define O_CLOEXEC 0x01000000 /* implicitly set FD_CLOEXEC */ #endif #ifdef KERNEL -#define FENCRYPTED 0x2000000 +#define FENCRYPTED 0x02000000 +#define FSINGLE_WRITER 0x04000000 /* fcntl(F_SINGLE_WRITER, 1) */ +#define O_CLOFORK 0x08000000 /* implicitly set FD_CLOFORK */ +#define FUNENCRYPTED 0x10000000 #endif -#ifdef KERNEL -#define FSINGLE_WRITER 0x4000000 /* fcntl(F_SINGLE_WRITER, 1) */ +#if __DARWIN_C_LEVEL >= __DARWIN_C_FULL +#define O_NOFOLLOW_ANY 0x20000000 /* no symlinks allowed in path */ #endif #ifdef KERNEL -#define O_CLOFORK 0x8000000 /* implicitly set FD_CLOFORK */ +/* End of File status flags (fileglob::fg_flag) */ #endif -#ifdef KERNEL -#define FUNENCRYPTED 0x10000000 +#if __DARWIN_C_LEVEL >= 200809L +/* + * Descriptor value for the current working directory + */ +#define AT_FDCWD -2 + +/* + * Flags for the at functions + */ +#define AT_EACCESS 0x0010 /* Use effective ids in access check */ +#define AT_SYMLINK_NOFOLLOW 0x0020 /* Act on the symlink itself not the target */ +#define AT_SYMLINK_FOLLOW 0x0040 /* Act on target of symlink */ +#define AT_REMOVEDIR 0x0080 /* Path refers to directory */ +#if __DARWIN_C_LEVEL >= __DARWIN_C_FULL +#ifdef PRIVATE +#define AT_REMOVEDIR_DATALESS 0x0100 /* Remove a dataless directory without materializing first */ +#endif +#define AT_REALDEV 0x0200 /* Return real device inodes resides on for fstatat(2) */ +#define AT_FDONLY 0x0400 /* Use only the fd and Ignore the path for fstatat(2) */ +#endif #endif /* Data Protection Flags */ @@ -266,7 +267,7 @@ #define F_FLUSH_DATA 40 #define F_CHKCLEAN 41 /* Used for regression test */ #define F_PREALLOCATE 42 /* Preallocate storage */ -#define F_SETSIZE 43 /* Truncate a file without zeroing space */ +#define F_SETSIZE 43 /* Truncate a file. Equivalent to calling truncate(2) */ #define F_RDADVISE 44 /* Issue an advisory read async with no copy to user */ #define F_RDAHEAD 45 /* turn read ahead off/on for this fd */ /* @@ -379,6 +380,10 @@ #define F_GETPATH_NOFIRMLINK 102 /* return the full path without firmlinks of the fd */ +#define F_ADDFILESIGS_INFO 103 /* Add signature from same file, return information */ +#define F_ADDFILESUPPL 104 /* Add supplemental signature from same file with fd reference to original */ +#define F_GETSIGSINFO 105 /* Look up code signature information attached to a file or slice */ + // FS-specific fcntl()'s numbers begin at 0x00010000 and go up #define FCNTL_FS_SPECIFIC_BASE 0x00010000 @@ -485,61 +490,23 @@ struct radvisory { #pragma pack() #endif /* KERNEL */ -#ifndef KERNEL -/** Information the user passes in to get the codeblobs out of the kernel */ -typedef struct fcodeblobs { - void *f_cd_hash; - size_t f_hash_size; - void *f_cd_buffer; - size_t f_cd_size; - unsigned int *f_out_size; - int f_arch; - int __padding; -} fcodeblobs_t; -#endif /* KERNEL */ - -#ifdef KERNEL -typedef struct user32_fcodeblobs { - user32_addr_t f_cd_hash; - user32_size_t f_hash_size; - user32_addr_t f_cd_buffer; - user32_size_t f_cd_size; - user32_addr_t f_out_size; - int f_arch; -} user32_fcodeblobs_t; - -/* LP64 version of fcodeblobs */ -typedef struct user64_fcodeblobs { - user64_addr_t f_cd_hash; - user64_size_t f_hash_size; - user64_addr_t f_cd_buffer; - user64_size_t f_cd_size; - user64_addr_t f_out_size; - int f_arch; - int __padding; -} user64_fcodeblobs_t; - -/* kernel version of fcodeblobs */ -typedef struct user_fcodeblobs { - user_addr_t f_cd_hash; - user_size_t f_hash_size; - user_addr_t f_cd_buffer; - user_size_t f_cd_size; - user_addr_t f_out_size; - int f_arch; -} user_fcodeblobs_t; -#endif /* KERNEL */ - /* * detached code signatures data type - * information passed by user to system used by F_ADDSIGS and F_ADDFILESIGS. * F_ADDFILESIGS is a shortcut for files that contain their own signature and * doesn't require mapping of the file in order to load the signature. */ +#define USER_FSIGNATURES_CDHASH_LEN 20 typedef struct fsignatures { off_t fs_file_start; void *fs_blob_start; size_t fs_blob_size; + + /* The following fields are only applicable to F_ADDFILESIGS_INFO (64bit only). */ + /* Prior to F_ADDFILESIGS_INFO, this struct ended after fs_blob_size. */ + size_t fs_fsignatures_size;// input: size of this struct (for compatibility) + char fs_cdhash[USER_FSIGNATURES_CDHASH_LEN]; // output: cdhash + int fs_hash_type;// output: hash algorithm type for cdhash } fsignatures_t; #ifdef KERNEL /* LP64 version of fsignatures. all pointers @@ -559,9 +526,37 @@ typedef struct user_fsignatures { /* F_ADDFILESIGS: offset of signature */ /* in Mach-O image */ user_size_t fs_blob_size; /* size of signature blob */ + + /* The following fields are only applicable to F_ADDFILESIGS_INFO. */ + /* Prior to F_ADDFILESIGS_INFO, this struct ended after fs_blob_size. */ + user_size_t fs_fsignatures_size;// input: size of this struct (for compatibility) + char fs_cdhash[USER_FSIGNATURES_CDHASH_LEN]; // output: cdhash + int fs_hash_type;//output: hash algorithm type for cdhash } user_fsignatures_t; #endif /* KERNEL */ +typedef struct fsupplement { + off_t fs_file_start; /* offset of Mach-O image in FAT file */ + off_t fs_blob_start; /* offset of signature in Mach-O image */ + size_t fs_blob_size; /* signature blob size */ + int fs_orig_fd; /* address of original image */ +} fsupplement_t; + +#ifdef KERNEL +/* LP64 version of fsupplement. + * Supplements are not supported for 32 bit. + * WARNING - keep in sync with fsupplement. + */ + +typedef struct user_fsupplement { + off_t fs_file_start; /* offset of Mach-O image in FAT file */ + off_t fs_blob_start; /* offset of signature in Mach-O image */ + size_t fs_blob_size; /* signature blob size */ + int fs_orig_fd; /* file descriptor to original image */ +} user_fsupplement_t; +#endif /* KERNEL */ + + /* * DYLD needs to check if the object is allowed to be combined * into the main binary. This is done between the code signature @@ -598,6 +593,19 @@ typedef struct user_fchecklv { #endif /* KERNEL */ +/* At this time F_GETSIGSINFO can only indicate platformness. + * As additional requestable information is defined, new keys will be added and the + * fgetsigsinfo_t structure will be lengthened to add space for the additional information + */ +#define GETSIGSINFO_PLATFORM_BINARY 1 + +/* fgetsigsinfo_t used by F_GETSIGSINFO command */ +typedef struct fgetsigsinfo { + off_t fg_file_start; /* IN: Offset in the file to look for a signature, -1 for any signature */ + int fg_info_request; /* IN: Key indicating the info requested */ + int fg_sig_is_platform; /* OUT: 1 if the signature is a plat form binary, 0 if not */ +} fgetsigsinfo_t; + /* lock operations for flock(2) */ #define LOCK_SH 0x01 /* shared file lock */ diff --git a/bsd/sys/file.h b/bsd/sys/file.h index d9f6b1a5c..4ff58dd40 100644 --- a/bsd/sys/file.h +++ b/bsd/sys/file.h @@ -101,6 +101,7 @@ struct fileglob; struct fileproc; struct vnode; int fp_getfvp(struct proc *p, int fd, struct fileproc **resultfp, struct vnode **resultvp); +int fp_get_pipe_id(proc_t p, int fd, uint64_t *result_pipe_id); struct vnode *fg_get_vnode(struct fileglob *fg); #endif /* KERNEL_PRIVATE */ __END_DECLS diff --git a/bsd/sys/file_internal.h b/bsd/sys/file_internal.h index e5fde0760..90175d11c 100644 --- a/bsd/sys/file_internal.h +++ b/bsd/sys/file_internal.h @@ -68,7 +68,7 @@ #include #include -#ifdef KERNEL +#ifdef XNU_KERNEL_PRIVATE #include #include #include @@ -78,14 +78,20 @@ #include #include +__BEGIN_DECLS + +#pragma GCC visibility push(hidden) + struct proc; struct uio; struct knote; struct kevent_qos_s; - -#ifdef __APPLE_API_UNSTABLE - struct file; +#ifndef _KAUTH_CRED_T +#define _KAUTH_CRED_T +typedef struct ucred *kauth_cred_t; +typedef struct posix_cred *posix_cred_t; +#endif /* !_KAUTH_CRED_T */ __options_decl(fileproc_vflags_t, unsigned int, { FPV_NONE = 0, @@ -97,24 +103,17 @@ __options_decl(fileproc_vflags_t, unsigned int, { * One entry for each open kernel vnode and socket. */ struct fileproc { - unsigned int f_flags; - _Atomic fileproc_vflags_t f_vflags; - os_refcnt_t f_iocount; - struct fileglob * f_fglob; - void *f_wset; + unsigned int fp_flags; + _Atomic fileproc_vflags_t fp_vflags; + os_refcnt_t fp_iocount; + struct fileglob * fp_glob; + void *fp_wset; }; #define FILEPROC_NULL (struct fileproc *)0 -#define FP_INCREATE 0x0001 -#define FP_INCLOSE 0x0002 #define FP_INSELECT 0x0004 -#define FP_UNUSED 0x0008 /* unused (was FP_INCHRREAD) */ -#define FP_WRITTEN 0x0010 -#define FP_CLOSING 0x0020 -#define FP_WAITCLOSE 0x0040 #define FP_AIOISSUED 0x0080 -#define FP_WAITEVENT 0x0100 #define FP_SELCONFLICT 0x0200 /* select conflict on an individual fp */ /* squeeze a "type" value into the upper flag bits */ @@ -122,7 +121,7 @@ struct fileproc { #define _FP_TYPESHIFT 24 #define FP_TYPEMASK (0x7 << _FP_TYPESHIFT) /* 8 "types" of fileproc */ -#define FILEPROC_TYPE(fp) ((fp)->f_flags & FP_TYPEMASK) +#define FILEPROC_TYPE(fp) ((fp)->fp_flags & FP_TYPEMASK) #define FP_ISGUARDED(fp, attribs) \ ((FILEPROC_TYPE(fp) == FTYPE_GUARDED) ? fp_isguarded(fp, attribs) : 0) @@ -132,18 +131,6 @@ typedef enum { FTYPE_GUARDED = (1 << _FP_TYPESHIFT) } fileproc_type_t; -#define FP_VALID_FLAGS (FP_INCREATE | FP_INCLOSE | FP_INSELECT |\ - FP_WRITTEN | FP_CLOSING | FP_WAITCLOSE |\ - FP_AIOISSUED | FP_WAITEVENT | FP_SELCONFLICT | _FP_TYPEMASK) - -#ifndef _KAUTH_CRED_T -#define _KAUTH_CRED_T -struct ucred; -typedef struct ucred *kauth_cred_t; -struct posix_cred; -typedef struct posix_cred *posix_cred_t; -#endif /* !_KAUTH_CRED_T */ - /* file types */ typedef enum { DTYPE_VNODE = 1, /* file */ @@ -158,7 +145,7 @@ typedef enum { } file_type_t; /* defines for fg_lflags */ -#define FG_TERM 0x01 /* the fileglob is terminating .. */ +// was FG_TERM 0x01 #define FG_INSMSGQ 0x02 /* insert to msgqueue pending .. */ #define FG_WINSMSGQ 0x04 /* wait for the fielglob is in msgque */ #define FG_RMMSGQ 0x08 /* the fileglob is being removed from msgqueue */ @@ -188,34 +175,190 @@ struct fileops { }; struct fileglob { - LIST_ENTRY(fileglob) f_msglist;/* list of active files */ - int32_t fg_flag; /* see fcntl.h */ - int32_t fg_count; /* reference count */ - int32_t fg_msgcount; /* references from message queue */ - int32_t fg_lflags; /* file global flags */ - kauth_cred_t fg_cred; /* credentials associated with descriptor */ + LIST_ENTRY(fileglob) f_msglist; /* list of files in unix messages */ + uint32_t fg_flag; /* (atomic) see fcntl.h */ + os_ref_atomic_t fg_count; /* reference count */ + uint32_t fg_msgcount; /* references from message queue */ + int32_t fg_lflags; /* file global flags */ + kauth_cred_t fg_cred; /* credentials associated with descriptor */ const struct fileops *fg_ops; - off_t fg_offset; - void *fg_data; /* vnode or socket or SHM or semaphore */ - void *fg_vn_data; /* Per fd vnode data, used for directories */ - lck_mtx_t fg_lock; + off_t fg_offset; + void *fg_data; /* vnode or socket or SHM or semaphore */ + void *fg_vn_data; /* Per fd vnode data, used for directories */ + lck_mtx_t fg_lock; #if CONFIG_MACF - struct label *fg_label; /* JMM - use the one in the cred? */ + struct label *fg_label; /* JMM - use the one in the cred? */ #endif }; -#ifdef __APPLE_API_PRIVATE -LIST_HEAD(fmsglist, fileglob); -extern struct fmsglist fmsghead; /* head of list of open files */ extern int maxfiles; /* kernel limit on number of open files */ extern int nfiles; /* actual number of open files */ extern int maxfilesperproc; +os_refgrp_decl_extern(f_refgrp); /* os_refgrp_t for file refcounts */ #define FILEGLOB_DTYPE(fg) ((const file_type_t)((fg)->fg_ops->fo_type)) -#endif /* __APPLE_API_PRIVATE */ +#pragma mark files (struct fileglob) -__BEGIN_DECLS +/*! + * @function fg_ref + * + * @brief + * Acquire a file reference on the specified file. + * + * @param fg + * The specified file + */ +void +fg_ref(struct fileglob *fg); + +/*! + * @function fg_drop + * + * @brief + * Drops a file reference on the specified file. + * + * @discussion + * No locks should be held when calling this function. + * + * @param p + * The process making the request, + * or PROC_NULL if the file belongs to a message/fileport. + * + * @param fg + * The file being closed. + * + * @returns + * 0 Success + * ??? Any error that @c fileops::fo_close can return + */ +int +fg_drop(proc_t p, struct fileglob *fg); + +/*! + * @function fg_sendable + * + * @brief + * Returns whether a particular file can be sent over IPC. + */ +bool +fg_sendable(struct fileglob *fg); + +#pragma mark file descriptor entries (struct fileproc) + +/*! + * @function fp_get_ftype + * + * @brief + * Get the fileproc pointer for the given fd from the per process, with the + * specified file type, and with an I/O reference. + * + * @param p + * The process in which fd lives. + * + * @param fd + * The file descriptor index to lookup. + * + * @param ftype + * The required file type. + * + * @param err + * The error to return if the file exists but isn't of the specified type. + * + * @param fpp + * The returned fileproc when the call returns 0. + * + * @returns + * 0 Success (@c fpp is set) + * EBADF Bad file descriptor + * @c err There is an entry, but it isn't of the specified type. + */ +extern int +fp_get_ftype(proc_t p, int fd, file_type_t ftype, int err, struct fileproc **fpp); + +/*! + * @function fp_get_noref_locked + * + * @brief + * Get the fileproc pointer for the given fd from the per process + * open file table without taking an explicit reference on it. + * + * @description + * This function assumes that the @c proc_fdlock is held, as the caller + * doesn't hold an I/O reference for the returned fileproc. + * + * Because there is no reference explicitly taken, the returned + * fileproc pointer is only valid so long as the @c proc_fdlock + * remains held by the caller. + * + * @param p + * The process in which fd lives. + * + * @param fd + * The file descriptor index to lookup. + * + * @returns + * - the fileproc on success + * - FILEPROC_NULL on error + */ +extern struct fileproc * +fp_get_noref_locked(proc_t p, int fd); + +/*! + * @function fp_get_noref_locked_with_iocount + * + * @brief + * Similar to fp_get_noref_locked(), but allows returning files that are + * closing. + * + * @discussion + * Some parts of the kernel take I/O references on fileprocs but only remember + * the file descriptor for which they did that. + * + * These interfaces later need to drop that reference, but if the file is + * already closing, then fp_get_noref_locked() will refuse to resolve + * the file descriptor. + * + * This interface allows the lookup (but will assert that the fileproc has + * enouhg I/O references). + * + * @warning + * New code should NOT use this function, it is required for interfaces + * that acquire iocounts without remembering the fileproc pointer, + * which is bad practice. + */ +extern struct fileproc * +fp_get_noref_locked_with_iocount(proc_t p, int fd); + +/*! + * @function fp_close_and_unlock + * + * @brief + * Close the given file descriptor entry. + * + * @description + * This function assumes that the @c proc_fdlock is held, + * and that the caller holds no additional I/O reference + * on the specified file descriptor entry. + * + * The @c proc_fdlock is unlocked upon return. + * + * @param p + * The process in which fd lives. + * + * @param fd + * The file descriptor index being closed. + * + * @param fp + * The fileproc entry associated with @c fd. + * + * @returns + * 0 Success + * EBADF Bad file descriptor + * ??? Any error that @c fileops::fo_close can return. + */ +extern int +fp_close_and_unlock(proc_t p, int fd, struct fileproc *fp, int flags); /* wrappers for fp->f_ops->fo_... */ int fo_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx); @@ -236,32 +379,12 @@ int fo_no_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx); int fo_no_drain(struct fileproc *fp, vfs_context_t ctx); int fo_no_kqfilter(struct fileproc *, struct knote *, struct kevent_qos_s *kev); -void fileproc_drain(proc_t, struct fileproc *); int fp_tryswap(proc_t, int fd, struct fileproc *nfp); int fp_drop(struct proc *p, int fd, struct fileproc *fp, int locked); -int fp_drop_written(proc_t p, int fd, struct fileproc *fp); -int fp_drop_event(proc_t p, int fd, struct fileproc *fp); void fp_free(struct proc * p, int fd, struct fileproc * fp); -struct kqueue; -int fp_getfkq(struct proc *p, int fd, struct fileproc **resultfp, struct kqueue **resultkq); -struct psemnode; -int fp_getfpsem(struct proc *p, int fd, struct fileproc **resultfp, struct psemnode **resultpsem); -struct pshmnode; -int fp_getfpshm(struct proc *p, int fd, struct fileproc **resultfp, struct pshmnode **resultpshm); -struct pipe; -int fp_getfpipe(struct proc *p, int fd, struct fileproc **resultfp, struct pipe **resultpipe); -struct atalk; -int fp_getfatalk(struct proc *p, int fd, struct fileproc **resultfp, struct atalk **resultatalk); -struct vnode; -int fp_getfvpandvid(struct proc *p, int fd, struct fileproc **resultfp, struct vnode **resultvp, uint32_t * vidp); -struct socket; -int fp_getfsock(struct proc *p, int fd, struct fileproc **resultfp, struct socket **results); int fp_lookup(struct proc *p, int fd, struct fileproc **resultfp, int locked); int fp_isguarded(struct fileproc *fp, u_int attribs); int fp_guard_exception(proc_t p, int fd, struct fileproc *fp, u_int attribs); -int closef_locked(struct fileproc *fp, struct fileglob *fg, struct proc *p); -int close_internal_locked(proc_t p, int fd, struct fileproc *fp, int flags); -int fileport_makefd_internal(proc_t p, ipc_port_t port, int uf_flags, int *fd); struct nameidata; struct vnode_attr; int open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, @@ -269,18 +392,7 @@ int open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, int32_t *retval); int chdir_internal(proc_t p, vfs_context_t ctx, struct nameidata *ndp, int per_thread); int kqueue_internal(struct proc *p, fp_allocfn_t, void *cra, int32_t *retval); -void fg_insertuipc(struct fileglob * fg); -boolean_t fg_insertuipc_mark(struct fileglob * fg); -void fg_removeuipc(struct fileglob * fg); -boolean_t fg_removeuipc_mark(struct fileglob * fg); -void unp_gc_wait(void); -void procfdtbl_reservefd(struct proc * p, int fd); -void procfdtbl_markclosefd(struct proc * p, int fd); void procfdtbl_releasefd(struct proc * p, int fd, struct fileproc * fp); -void procfdtbl_waitfd(struct proc * p, int fd); -void procfdtbl_clearfd(struct proc * p, int fd); -boolean_t file_issendable(struct proc * p, struct fileproc *fp); -extern int fdgetf_noref(proc_t, int, struct fileproc **); extern struct fileproc *fileproc_alloc_init(void *crargs); extern void fileproc_free(struct fileproc *fp); extern void guarded_fileproc_free(struct fileproc *fp); @@ -290,10 +402,17 @@ extern int falloc_guarded(struct proc *p, struct fileproc **fp, int *fd, vfs_context_t ctx, const guardid_t *guard, u_int attrs); extern void fileproc_modify_vflags(struct fileproc *fp, fileproc_vflags_t vflags, boolean_t clearflags); fileproc_vflags_t fileproc_get_vflags(struct fileproc *fp); -__END_DECLS -#endif /* __APPLE_API_UNSTABLE */ +#pragma mark internal version of syscalls + +int fileport_makefd(proc_t p, ipc_port_t port, int uf_flags, int *fd); +int dup2(proc_t p, int from, int to, int *fd); +int close_nocancel(proc_t p, int fd); + +#pragma GCC visibility pop + +__END_DECLS -#endif /* KERNEL */ +#endif /* XNU_KERNEL_PRIVATE */ #endif /* !_SYS_FILE_INTERNAL_H_ */ diff --git a/bsd/sys/filedesc.h b/bsd/sys/filedesc.h index aebf4b054..bdddd553d 100644 --- a/bsd/sys/filedesc.h +++ b/bsd/sys/filedesc.h @@ -102,7 +102,7 @@ struct filedesc { int fd_nfiles; /* number of open files allocated */ int fd_lastfile; /* high-water mark of fd_ofiles */ int fd_freefile; /* approx. next free file */ - u_short fd_cmask; /* mask for file creation */ + mode_t fd_cmask; /* mask for file creation */ int fd_flags; int fd_knlistsize; /* size of knlist */ struct klist *fd_knlist; /* list of attached knotes */ @@ -128,29 +128,108 @@ struct filedesc { #define UF_FORKCLOSE 0x02 /* auto-close on fork */ #define UF_RESERVED 0x04 /* open pending / in progress */ #define UF_CLOSING 0x08 /* close in progress */ - -#ifdef KERNEL #define UF_RESVWAIT 0x10 /* close in progress */ #define UF_INHERIT 0x20 /* "inherit-on-exec" */ #define UF_VALID_FLAGS \ (UF_EXCLOSE | UF_FORKCLOSE | UF_RESERVED | UF_CLOSING |\ UF_RESVWAIT | UF_INHERIT) -#endif /* KERNEL */ /* * Storage required per open file descriptor. */ #define OFILESIZE (sizeof(struct file *) + sizeof(char)) -#ifdef KERNEL +/*! + * @struct fdt_iterator + * + * @brief + * Type used to iterate a file descriptor table. + */ +struct fdt_iterator { + int fdti_fd; + struct fileproc *fdti_fp; +}; + +/*! + * @function fdt_next + * + * @brief + * Seek the iterator forward. + * + * @param p + * The process for which the file descriptor table is being iterated. + * + * @param fd + * The current file file descriptor to scan from (exclusive). + * + * @param only_settled + * When true, only fileprocs with @c UF_RESERVED set are returned. + * If false, fileprocs that are in flux (@c UF_RESERVED is set) are returned. + * + * @returns + * The next iterator position. + * If @c fdti_fp is NULL, the iteration is done. + */ +extern struct fdt_iterator +fdt_next(proc_t p, int fd, bool only_settled); + +/*! + * @function fdt_next + * + * @brief + * Seek the iterator backwards. + * + * @param p + * The process for which the file descriptor table is being iterated. + * + * @param fd + * The current file file descriptor to scan from (exclusive). + * + * @param only_settled + * When true, only fileprocs with @c UF_RESERVED set are returned. + * If false, fileprocs that are in flux (@c UF_RESERVED is set) are returned. + * + * @returns + * The next iterator position. + * If @c fdti_fp is NULL, the iteration is done. + */ +extern struct fdt_iterator +fdt_prev(proc_t p, int fd, bool only_settled); + +/*! + * @def fdt_foreach + * + * @brief + * Convenience macro around @c fdt_next() to enumerates fileprocs in a process + * file descriptor table. + * + * @param fp + * The iteration variable. + * + * @param p + * The process for which the file descriptor table is being iterated. + */ +#define fdt_foreach(fp, p) \ + for (struct fdt_iterator __fdt_it = fdt_next(p, -1, true); \ + ((fp) = __fdt_it.fdti_fp); \ + __fdt_it = fdt_next(p, __fdt_it.fdti_fd, true)) + +/*! + * @def fdt_foreach_fd + * + * @brief + * When in an @c fdt_foreach() loop, return the current file descriptor + * being inspected. + */ +#define fdt_foreach_fd() __fdt_it.fdti_fd + /* * Kernel global variables and routines. */ extern int dupfdopen(struct filedesc *fdp, int indx, int dfd, int mode, int error); extern int fdalloc(proc_t p, int want, int *result); -extern void fdrelse(proc_t p, int fd); extern int fdavail(proc_t p, int n); #define fdfile(p, fd) \ (&(p)->p_fd->fd_ofiles[(fd)]) @@ -168,7 +247,6 @@ extern int fdavail(proc_t p, int n); extern int falloc(proc_t p, struct fileproc **resultfp, int *resultfd, vfs_context_t ctx); -#ifdef __APPLE_API_PRIVATE typedef struct fileproc *(*fp_allocfn_t)(void *); extern int falloc_withalloc(proc_t p, struct fileproc **resultfp, int *resultfd, vfs_context_t ctx, @@ -177,9 +255,6 @@ extern int falloc_withalloc(proc_t p, struct fileproc **resultfp, extern struct filedesc *fdcopy(proc_t p, struct vnode *uth_cdir); extern void fdfree(proc_t p); extern void fdexec(proc_t p, short flags, int self_exec); -#endif /* __APPLE_API_PRIVATE */ - -#endif /* KERNEL */ #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/sys/fsctl.h b/bsd/sys/fsctl.h index 3c2c3783c..0f40383bd 100644 --- a/bsd/sys/fsctl.h +++ b/bsd/sys/fsctl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2014 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -348,7 +348,14 @@ struct fsioc_cas_bsdflags { #define FSIOC_THAW_EXTENTS _IO('h', 21) #define FSCTL_THAW_EXTENTS IOCBASECMD(FSIOC_THAW_EXTENTS) -/* this FSCTL selector is duplicated in XNU with the intent of making the VFS/generic one the only one eventually */ +/* Selectors on 'h' from 22 -> 49 are defined in HFS repo */ + +#define FSIOC_EVAL_ROOTAUTH _IO('h', 50) +#define FSCTL_EVAL_ROOTAUTH IOCBASECMD(FSIOC_EVAL_ROOTAUTH) + +/* This op should only be invoked from within the kernel */ +#define FSIOC_KERNEL_ROOTAUTH _IOW('h', 51, uint32_t) + #define FIRMLINK_STRUCT_LEN 1032 typedef struct generic_firmlink { uint8_t array[FIRMLINK_STRUCT_LEN]; @@ -366,6 +373,5 @@ int fsctl(const char *, unsigned long, void*, unsigned int); int ffsctl(int, unsigned long, void*, unsigned int); __END_DECLS - #endif /* !KERNEL */ #endif /* !_SYS_FSCTL_H_ */ diff --git a/bsd/sys/gmon.h b/bsd/sys/gmon.h index ef7b40d85..f8e429d85 100644 --- a/bsd/sys/gmon.h +++ b/bsd/sys/gmon.h @@ -64,6 +64,7 @@ #ifndef _SYS_GMON_H_ #define _SYS_GMON_H_ #include +#include /* * Structure prepended to gmon.out profiling data file. diff --git a/bsd/sys/imageboot.h b/bsd/sys/imageboot.h index e42f1e39d..07299c21d 100644 --- a/bsd/sys/imageboot.h +++ b/bsd/sys/imageboot.h @@ -28,6 +28,9 @@ #ifndef _IMAGEBOOT_H_ #define _IMAGEBOOT_H_ +struct kalloc_heap; +struct vnode; + typedef enum imageboot_type { IMAGEBOOT_NONE, IMAGEBOOT_DMG, @@ -35,9 +38,16 @@ typedef enum imageboot_type { } imageboot_type_t; imageboot_type_t imageboot_needed(void); +bool imageboot_desired(void); void imageboot_setup(imageboot_type_t type); int imageboot_format_is_valid(const char *root_path); int imageboot_mount_image(const char *root_path, int height, imageboot_type_t type); +int imageboot_pivot_image(const char *image_path, imageboot_type_t type, const char *mount_path, const char *outgoing_root_path, const bool rooted_dmg); +int imageboot_read_file(struct kalloc_heap *kheap, const char *path, void **bufp, size_t *bufszp); +int imageboot_read_file_from_offset(struct kalloc_heap *kheap, const char *path, off_t offset, void **bufp, size_t *bufszp); + +struct vnode * +imgboot_get_image_file(const char *path, off_t *fsize, int *errp); #define IMAGEBOOT_CONTAINER_ARG "container-dmg" #define IMAGEBOOT_ROOT_ARG "root-dmg" diff --git a/bsd/sys/imgact.h b/bsd/sys/imgact.h index ca681fc2b..30cabc1f9 100644 --- a/bsd/sys/imgact.h +++ b/bsd/sys/imgact.h @@ -95,6 +95,8 @@ struct image_params { char *ip_strings; /* base address for strings */ char *ip_strendp; /* current end pointer */ + char *ip_subsystem_root_path; /* filepath for the subsystem root */ + int ip_argspace; /* remaining space of NCARGS limit (argv+envv) */ int ip_strspace; /* remaining total string space */ @@ -120,9 +122,11 @@ struct image_params { void *ip_px_persona; /* persona args */ void *ip_px_pcred_info; /* posix cred args */ void *ip_cs_error; /* codesigning error reason */ + char *ip_inherited_shared_region_id; /* inherited shared region id for ptr auth */ uint64_t ip_dyld_fsid; uint64_t ip_dyld_fsobjid; + uint64_t ip_inherited_jop_pid; unsigned int ip_simulator_binary; /* simulator binary flags */ ipc_port_t ip_sc_port; /* SUID port. */ @@ -144,8 +148,11 @@ struct image_params { #define IMGPF_HIGH_BITS_ASLR 0x00000200 /* randomize high bits of ASLR slide */ #define IMGPF_IS_64BIT_DATA 0x00000400 /* exec to a 64Bit register state */ #define IMGPF_DRIVER 0x00000800 /* exec of a driver binary (no LC_MAIN) */ +#define IMGPF_RESLIDE 0x000001000 /* reslide the shared cache */ +#define IMGPF_PLUGIN_HOST_DISABLE_A_KEYS 0x000002000 /* process hosts plugins, disable ptr auth A keys */ #define IMGPF_NOJOP 0x80000000 + /* * Simulator binary flags */ @@ -153,4 +160,5 @@ struct image_params { #define IMGPF_SB_TRUE 1 /* Binary is a simulator binary */ #define IMGPF_SB_FALSE 2 /* Binary is not a simulator binary */ + #endif /* !_SYS_IMGACT */ diff --git a/bsd/sys/ioccom.h b/bsd/sys/ioccom.h index cabce8cf7..f9d23260e 100644 --- a/bsd/sys/ioccom.h +++ b/bsd/sys/ioccom.h @@ -83,7 +83,7 @@ #define IOC_OUT (__uint32_t)0x40000000 /* copy parameters in */ #define IOC_IN (__uint32_t)0x80000000 -/* copy paramters in and out */ +/* copy parameters in and out */ #define IOC_INOUT (IOC_IN|IOC_OUT) /* mask for IN/OUT/VOID */ #define IOC_DIRMASK (__uint32_t)0xe0000000 diff --git a/bsd/sys/kas_info.h b/bsd/sys/kas_info.h index 5818462c4..995d0a84f 100644 --- a/bsd/sys/kas_info.h +++ b/bsd/sys/kas_info.h @@ -42,7 +42,8 @@ __BEGIN_DECLS /* The slide of the main kernel compared to its static link address */ #define KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR (0) /* returns uint64_t */ -#define KAS_INFO_MAX_SELECTOR (1) +#define KAS_INFO_KERNEL_SEGMENT_VMADDR_SELECTOR (1) +#define KAS_INFO_MAX_SELECTOR (2) #ifndef KERNEL diff --git a/bsd/sys/kasl.h b/bsd/sys/kasl.h index 1de38642a..1320d65be 100644 --- a/bsd/sys/kasl.h +++ b/bsd/sys/kasl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Apple Inc. All rights reserved. + * Copyright (c) 2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -37,9 +37,9 @@ #endif /* BSD_KERNEL_PRIVATE */ extern int -kern_asl_msg(int level, const char *facility, int num_pairs, ...); +kern_asl_msg(int level, const char *facility, size_t num_pairs, ...); -extern int escape_str(char *str, int len, int buflen); +extern int escape_str(char *str, size_t len, size_t buflen); extern void fpxlog_init(void); extern void fpxlog(int, uint32_t, uint32_t, uint32_t); diff --git a/bsd/sys/kauth.h b/bsd/sys/kauth.h index 701390408..66cb12e92 100644 --- a/bsd/sys/kauth.h +++ b/bsd/sys/kauth.h @@ -38,7 +38,10 @@ #include #include #include +#include /* u_int8_t, etc. */ #include /* __offsetof() */ +#include /* uid_t */ +#include /* gid_t */ #include /* NGROUPS_MAX */ #ifdef __APPLE_API_EVOLVING @@ -293,13 +296,13 @@ extern kauth_cred_t kauth_cred_setresuid(kauth_cred_t cred, uid_t ruid, uid_t eu extern kauth_cred_t kauth_cred_setresgid(kauth_cred_t cred, gid_t rgid, gid_t egid, gid_t svgid); extern kauth_cred_t kauth_cred_setuidgid(kauth_cred_t cred, uid_t uid, gid_t gid); extern kauth_cred_t kauth_cred_setsvuidgid(kauth_cred_t cred, uid_t uid, gid_t gid); -extern kauth_cred_t kauth_cred_setgroups(kauth_cred_t cred, gid_t *groups, int groupcount, uid_t gmuid); +extern kauth_cred_t kauth_cred_setgroups(kauth_cred_t cred, gid_t *groups, size_t groupcount, uid_t gmuid); struct uthread; extern void kauth_cred_uthread_update(struct uthread *, proc_t); #ifdef CONFIG_MACF extern void kauth_proc_label_update_execve(struct proc *p, struct vfs_context *ctx, struct vnode *vp, off_t offset, struct vnode *scriptvp, struct label *scriptlabel, struct label *execlabel, unsigned int *csflags, void *psattr, int *disjoint, int *update_return); #endif -extern int kauth_cred_getgroups(kauth_cred_t _cred, gid_t *_groups, int *_groupcount); +extern int kauth_cred_getgroups(kauth_cred_t _cred, gid_t *_groups, size_t *_groupcount); extern int kauth_cred_gid_subset(kauth_cred_t _cred1, kauth_cred_t _cred2, int *_resultp); struct auditinfo_addr; extern kauth_cred_t kauth_cred_setauditinfo(kauth_cred_t, au_session_t *); @@ -788,6 +791,7 @@ extern void kauth_cred_init(void); extern void kauth_identity_init(void); extern void kauth_groups_init(void); extern void kauth_resolver_init(void); +extern void kauth_resolver_identity_reset(void); #endif __END_DECLS #endif /* XNU_KERNEL_PRIVATE */ diff --git a/bsd/sys/kdebug.h b/bsd/sys/kdebug.h index a61252890..f45871353 100644 --- a/bsd/sys/kdebug.h +++ b/bsd/sys/kdebug.h @@ -144,8 +144,10 @@ __BEGIN_DECLS /* **** The Kernel Debug Sub Classes for Mach (DBG_MACH) **** */ #define DBG_MACH_EXCP_KTRAP_x86 0x02 /* Kernel Traps on x86 */ -#define DBG_MACH_EXCP_DFLT 0x03 /* Data Translation Fault */ -#define DBG_MACH_EXCP_IFLT 0x04 /* Inst Translation Fault */ +#define DBG_MACH_EXCP_DFLT 0x03 /* deprecated name */ +#define DBG_MACH_EXCP_SYNC_ARM 0x03 /* arm/arm64 synchronous exception */ +#define DBG_MACH_EXCP_IFLT 0x04 /* deprecated name */ +#define DBG_MACH_EXCP_SERR_ARM 0x04 /* arm/arm64 SError (async) exception */ #define DBG_MACH_EXCP_INTR 0x05 /* Interrupts */ #define DBG_MACH_EXCP_ALNG 0x06 /* Alignment Exception */ #define DBG_MACH_EXCP_UTRAP_x86 0x07 /* User Traps on x86 */ @@ -179,6 +181,7 @@ __BEGIN_DECLS #define DBG_MACH_SHAREDREGION 0xA8 /* Shared region */ #define DBG_MACH_SCHED_CLUTCH 0xA9 /* Clutch scheduler */ #define DBG_MACH_IO 0xAA /* I/O */ +#define DBG_MACH_WORKGROUP 0xAB /* Workgroup subsystem */ /* Codes for DBG_MACH_IO */ #define DBC_MACH_IO_MMIO_READ 0x1 @@ -253,14 +256,34 @@ __BEGIN_DECLS #define MACH_QUIESCENT_COUNTER 0x38 /* quiescent counter tick */ #define MACH_TURNSTILE_USER_CHANGE 0x39 /* base priority change because of turnstile */ #define MACH_AMP_RECOMMENDATION_CHANGE 0x3a /* Thread group recommendation change */ +#define MACH_AMP_PERFCTL_POLICY_CHANGE 0x3b /* AMP policy for perfctl cluster recommendation */ #define MACH_TURNSTILE_KERNEL_CHANGE 0x40 /* sched priority change because of turnstile */ - -/* Codes for Clutch Scheduler (DBG_MACH_SCHED_CLUTCH) */ -#define MACH_SCHED_CLUTCH_ROOT_BUCKET_STATE 0x0 -#define MACH_SCHED_CLUTCH_TG_BUCKET_STATE 0x1 -#define MACH_SCHED_CLUTCH_THREAD_SELECT 0x2 -#define MACH_SCHED_CLUTCH_THREAD_STATE 0x3 -#define MACH_SCHED_CLUTCH_TG_BUCKET_PRI 0x4 +#define MACH_SCHED_WI_AUTO_JOIN 0x41 /* work interval auto join events */ +#define MACH_SCHED_WI_DEFERRED_FINISH 0x42 /* work interval pending finish events for auto-join thread groups */ +#define MACH_PSET_AVG_EXEC_TIME 0x50 + +/* Codes for Clutch/Edge Scheduler (DBG_MACH_SCHED_CLUTCH) */ +#define MACH_SCHED_CLUTCH_ROOT_BUCKET_STATE 0x0 /* __unused */ +#define MACH_SCHED_CLUTCH_TG_BUCKET_STATE 0x1 /* __unused */ +#define MACH_SCHED_CLUTCH_THREAD_SELECT 0x2 /* Thread selection events for Clutch scheduler */ +#define MACH_SCHED_CLUTCH_THREAD_STATE 0x3 /* __unused */ +#define MACH_SCHED_CLUTCH_TG_BUCKET_PRI 0x4 /* Clutch bucket priority update event */ +/* Edge Scheduler Tracepoints */ +#define MACH_SCHED_EDGE_CLUSTER_OVERLOAD 0x5 /* Cluster experienced overload; migrating threads to other clusters */ +#define MACH_SCHED_EDGE_STEAL 0x6 /* Per-cluster avg. thread execution time */ +#define MACH_SCHED_EDGE_REBAL_RUNNABLE 0x7 /* Rebalance runnable threads on a foreign cluster */ +#define MACH_SCHED_EDGE_REBAL_RUNNING 0x8 /* Rebalance running threads on a foreign cluster */ +#define MACH_SCHED_EDGE_SHOULD_YIELD 0x9 /* Edge decisions for thread yield */ +#define MACH_SCHED_CLUTCH_THR_COUNT 0xa /* Clutch scheduler runnable thread counts */ +#define MACH_SCHED_EDGE_LOAD_AVG 0xb /* Per-cluster load average */ + +/* Codes for workgroup interval subsystem (DBG_MACH_WORKGROUP) */ +#define WORKGROUP_INTERVAL_CREATE 0x0 /* work interval creation */ +#define WORKGROUP_INTERVAL_DESTROY 0x1 /* work interval destruction */ +#define WORKGROUP_INTERVAL_CHANGE 0x2 /* thread work interval change */ +#define WORKGROUP_INTERVAL_START 0x3 /* work interval start call */ +#define WORKGROUP_INTERVAL_UPDATE 0x4 /* work interval update call */ +#define WORKGROUP_INTERVAL_FINISH 0x5 /* work interval finish call */ /* Variants for MACH_MULTIQ_DEQUEUE */ #define MACH_MULTIQ_BOUND 1 @@ -268,16 +291,17 @@ __BEGIN_DECLS #define MACH_MULTIQ_GLOBAL 3 /* Arguments for vm_fault (DBG_MACH_VM) */ -#define DBG_ZERO_FILL_FAULT 1 -#define DBG_PAGEIN_FAULT 2 -#define DBG_COW_FAULT 3 -#define DBG_CACHE_HIT_FAULT 4 -#define DBG_NZF_PAGE_FAULT 5 -#define DBG_GUARD_FAULT 6 -#define DBG_PAGEINV_FAULT 7 -#define DBG_PAGEIND_FAULT 8 -#define DBG_COMPRESSOR_FAULT 9 +#define DBG_ZERO_FILL_FAULT 1 +#define DBG_PAGEIN_FAULT 2 +#define DBG_COW_FAULT 3 +#define DBG_CACHE_HIT_FAULT 4 +#define DBG_NZF_PAGE_FAULT 5 +#define DBG_GUARD_FAULT 6 +#define DBG_PAGEINV_FAULT 7 +#define DBG_PAGEIND_FAULT 8 +#define DBG_COMPRESSOR_FAULT 9 #define DBG_COMPRESSOR_SWAPIN_FAULT 10 +#define DBG_COR_FAULT 11 /* Codes for IPC (DBG_MACH_IPC) */ #define MACH_TASK_SUSPEND 0x0 /* Suspended a task */ @@ -302,6 +326,7 @@ __BEGIN_DECLS #define MACH_THREAD_GROUP_NAME 0x3 #define MACH_THREAD_GROUP_NAME_FREE 0x4 #define MACH_THREAD_GROUP_FLAGS 0x5 +#define MACH_THREAD_GROUP_BLOCK 0x6 /* Codes for coalitions (DBG_MACH_COALITION) */ #define MACH_COALITION_NEW 0x0 @@ -333,6 +358,8 @@ __BEGIN_DECLS #define PMAP__TTE 0x13 #define PMAP__SWITCH_USER_TTB 0x14 #define PMAP__UPDATE_CACHING 0x15 +#define PMAP__ATTRIBUTE_CLEAR_RANGE 0x16 +#define PMAP__CLEAR_USER_TTB 0x17 /* Codes for clock (DBG_MACH_CLOCK) */ #define MACH_EPOCH_CHANGE 0x0 /* wake epoch change */ @@ -641,9 +668,17 @@ __BEGIN_DECLS /* The Kernel Debug Sub Classes for DBG_MONOTONIC */ #define DBG_MT_INSTRS_CYCLES 1 #define DBG_MT_DEBUG 2 +#define DBG_MT_RESOURCES_PROC_EXIT 3 +#define DBG_MT_RESOURCES_THR_EXIT 4 #define DBG_MT_TMPTH 0xfe #define DBG_MT_TMPCPU 0xff +/* Kernel Debug events for the DBG_MT_RESOURCES_PROC_EXIT subclass */ +#define DBG_MT_INSTRS_CYCLES_PROC_EXIT MTDBG_RESOURCES_ON_PROC_EXIT(0) + +/* Kernel Debug events for the DBG_MT_RESOURCES_THR_EXIT subclass */ +#define DBG_MT_INSTRS_CYCLES_THR_EXIT MTDBG_RESOURCES_ON_THR_EXIT(0) + /* The Kernel Debug Sub Classes for DBG_MISC */ #define DBG_MISC_COREBRIGHTNESS 0x01 #define DBG_MISC_VIDEOENG 0x02 @@ -672,6 +707,8 @@ __BEGIN_DECLS #define DBG_DYLD_UUID_SHARED_CACHE_32_A (12) #define DBG_DYLD_UUID_SHARED_CACHE_32_B (13) #define DBG_DYLD_UUID_SHARED_CACHE_32_C (14) +#define DBG_DYLD_AOT_UUID_MAP_A (15) +#define DBG_DYLD_AOT_UUID_MAP_B (16) /* The Kernel Debug modifiers for the DBG_DKRW sub class */ #define DKIO_DONE 0x01 @@ -729,7 +766,7 @@ __BEGIN_DECLS /* task only attributes */ #define IMP_TASK_POLICY_DARWIN_BG_IOPOL 0x27 -#define IMP_TASK_POLICY_TAL 0x28 +/* unused, was IMP_TASK_POLICY_TAL 0x28 */ #define IMP_TASK_POLICY_BOOST 0x29 #define IMP_TASK_POLICY_ROLE 0x2A /* unused 0x2B */ @@ -870,6 +907,9 @@ __BEGIN_DECLS #define ARIADNEDBG_CODE(SubClass, code) KDBG_CODE(DBG_ARIADNE, SubClass, code) #define DAEMONDBG_CODE(SubClass, code) KDBG_CODE(DBG_DAEMON, SubClass, code) #define CPUPM_CODE(code) IOKDBG_CODE(DBG_IOCPUPM, code) +#define MTDBG_CODE(SubClass, code) KDBG_CODE(DBG_MONOTONIC, SubClass, code) +#define MTDBG_RESOURCES_ON_PROC_EXIT(code) MTDBG_CODE(DBG_MT_RESOURCES_PROC_EXIT, code) +#define MTDBG_RESOURCES_ON_THR_EXIT(code) MTDBG_CODE(DBG_MT_RESOURCES_THR_EXIT, code) #define KMEM_ALLOC_CODE MACHDBG_CODE(DBG_MACH_LEAKS, 0) #define KMEM_ALLOC_CODE_2 MACHDBG_CODE(DBG_MACH_LEAKS, 1) diff --git a/bsd/sys/kdebug_kernel.h b/bsd/sys/kdebug_kernel.h index c366472b1..3b31648a9 100644 --- a/bsd/sys/kdebug_kernel.h +++ b/bsd/sys/kdebug_kernel.h @@ -299,15 +299,9 @@ extern unsigned int kdebug_enable; } \ } while (0) -#define KERNEL_DEBUG_EARLY(x, a, b, c, d) \ - do { \ - kernel_debug_early((uint32_t)(x), (uintptr_t)(a), (uintptr_t)(b), \ - (uintptr_t)(c), (uintptr_t)(d)); \ - } while (0) #else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) */ #define KERNEL_DEBUG_CONSTANT(x, a, b, c, d, e) do {} while (0) #define KERNEL_DEBUG_CONSTANT1(x, a, b, c, d, e) do {} while (0) -#define KERNEL_DEBUG_EARLY(x, a, b, c, d) do {} while (0) #endif /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) */ /* @@ -329,6 +323,7 @@ extern unsigned int kdebug_enable; (uintptr_t)(d), 0); \ } \ } while (0) + #define KERNEL_DEBUG_CONSTANT_IST1(x, a, b, c, d, e) \ do { \ if (KDBG_IMPROBABLE(kdebug_enable)) { \ @@ -336,9 +331,17 @@ extern unsigned int kdebug_enable; (uintptr_t)(d), (uintptr_t)(e)); \ } \ } while (0) + +#define KERNEL_DEBUG_EARLY(x, a, b, c, d) \ + do { \ + kernel_debug_early((uint32_t)(x), (uintptr_t)(a), (uintptr_t)(b), \ + (uintptr_t)(c), (uintptr_t)(d)); \ + } while (0) + #else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) */ #define KERNEL_DEBUG_CONSTANT_IST(type, x, a, b, c, d, e) do {} while (0) #define KERNEL_DEBUG_CONSTANT_IST1(x, a, b, c, d, e) do {} while (0) +#define KERNEL_DEBUG_EARLY(x, a, b, c, d) do {} while (0) #endif /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) */ #if NO_KDEBUG @@ -400,6 +403,7 @@ void kernel_debug_filtered(uint32_t debugid, uintptr_t arg1, uintptr_t arg2, #pragma mark - xnu API #ifdef XNU_KERNEL_PRIVATE + /* Used in early boot to log events. */ void kernel_debug_early(uint32_t debugid, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4); @@ -411,9 +415,17 @@ void kernel_debug_string_simple(uint32_t eventid, const char *str); extern void kdebug_reset(void); void kdbg_dump_trace_to_file(const char *); -void kdebug_init(unsigned int n_events, char *filterdesc, bool wrapping); + +enum kdebug_opts { + KDOPT_WRAPPING = 0x1, + KDOPT_ATBOOT = 0x2, +}; + +void kdebug_init(unsigned int n_events, char *filterdesc, + enum kdebug_opts opts); void kdebug_trace_start(unsigned int n_events, const char *filterdesc, - bool wrapping, bool at_wake); + enum kdebug_opts opts); +uint64_t kdebug_wake(void); void kdebug_free_early_buf(void); void release_storage_unit(int cpu, uint32_t storage_unit); bool allocate_storage_unit(int cpu); @@ -425,7 +437,7 @@ void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, #define KDBG_VFS_LOOKUP_FLAG_LOOKUP 0x01 #define KDBG_VFS_LOOKUP_FLAG_NOPROCFILT 0x02 -void kdebug_vfs_lookup(long *dbg_parms, int dbg_namelen, void *dp, +void kdebug_vfs_lookup(unsigned long *path_words, int path_len, void *vnp, uint32_t flags); #endif /* XNU_KERNEL_PRIVATE */ @@ -433,7 +445,7 @@ void kdebug_vfs_lookup(long *dbg_parms, int dbg_namelen, void *dp, #ifdef KERNEL_PRIVATE #define NUMPARMS 23 -void kdebug_lookup_gen_events(long *dbg_parms, int dbg_namelen, void *dp, +void kdebug_lookup_gen_events(long *path_words, int path_len, void *vnp, bool lookup); #pragma mark - EnergyTracing diff --git a/bsd/sys/kern_control.h b/bsd/sys/kern_control.h index 3fac13c86..5395efc71 100644 --- a/bsd/sys/kern_control.h +++ b/bsd/sys/kern_control.h @@ -251,6 +251,13 @@ typedef void * kern_ctl_ref; * the extended fields within the kern_ctl_reg structure. */ #define CTL_FLAG_REG_CRIT 0x10 + +/*! + * @defined CTL_FLAG_REG_SETUP + * @discussion This flag indicates that this kernel control utilizes the + * the setup callback field within the kern_ctl_reg structure. + */ +#define CTL_FLAG_REG_SETUP 0x20 #endif /* KERNEL_PRIVATE */ /* Data flags for controllers */ @@ -442,6 +449,22 @@ typedef errno_t (*ctl_send_list_func)(kern_ctl_ref kctlref, u_int32_t unit, void typedef errno_t (*ctl_bind_func)(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo); + +/*! + * @typedef ctl_setup_func + * @discussion The ctl_setup_func is an optional function that allows the client + * to pick a unit number in the case that the caller hasn't specified one + * @param unit A placeholder for a pointer to the unit number that is selected with + * this kernel control instance + * @param unitinfo A placeholder for a pointer to the optional user-defined + * private data associated with this kernel control instance. This + * opaque info will be provided to the user when the rest of the + * callback routines are executed. For example, it can be used + * to pass a pointer to an instance-specific data structure in + * order for the user to keep track of the states related to this + * kernel control instance. + */ +typedef errno_t (*ctl_setup_func)(u_int32_t *unit, void **unitinfo); #endif /* KERNEL_PRIVATE */ /*! @@ -499,6 +522,7 @@ struct kern_ctl_reg { ctl_rcvd_func ctl_rcvd; /* Only valid if CTL_FLAG_REG_EXTENDED is set */ ctl_send_list_func ctl_send_list;/* Only valid if CTL_FLAG_REG_EXTENDED is set */ ctl_bind_func ctl_bind; + ctl_setup_func ctl_setup; #endif /* KERNEL_PRIVATE */ }; @@ -652,6 +676,8 @@ void kctl_fill_socketinfo(struct socket *, struct socket_info *); u_int32_t ctl_id_by_name(const char *name); errno_t ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize); + +extern const u_int32_t ctl_maxunit; #endif /* KERNEL_PRIVATE */ __END_DECLS diff --git a/bsd/sys/kern_event.h b/bsd/sys/kern_event.h index 8a54be549..a8934d594 100644 --- a/bsd/sys/kern_event.h +++ b/bsd/sys/kern_event.h @@ -99,6 +99,22 @@ */ #define KEV_IEEE80211_CLASS 6 +/* + * The following struct is KPI, but it was originally defined with a trailing + * array member of size one, intended to be used as a Variable-Length Array. + * That's problematic because the compiler doesn't know that the array is + * accessed out-of-bounds and can assume it isn't. This makes + * -Warray-bounds-pointer-arithmetic sad. We can't just change the code because + * it requires users to also change their uses of the class, at a minimum + * because kern_event_msg's size changes when making the last member a VLA. This + * macro allows users of this KPI to opt-in to the new behavior. + */ +#if defined(XNU_KERN_EVENT_DATA_IS_VLA) +#define XNU_KERN_EVENT_DATA_SIZE /* nothing, it's a VLA */ +#else +#define XNU_KERN_EVENT_DATA_SIZE 1 +#endif + /*! * @struct kern_event_msg * @discussion This structure is prepended to all kernel events. This @@ -129,7 +145,7 @@ struct kern_event_msg { u_int32_t kev_subclass; /* Component within layer */ u_int32_t id; /* Monotonically increasing value */ u_int32_t event_code; /* unique code */ - u_int32_t event_data[1]; /* One or more data words */ + u_int32_t event_data[XNU_KERN_EVENT_DATA_SIZE]; /* One or more data words */ }; /*! diff --git a/bsd/sys/kern_memorystatus.h b/bsd/sys/kern_memorystatus.h index 2ba2a8452..48b03e0de 100644 --- a/bsd/sys/kern_memorystatus.h +++ b/bsd/sys/kern_memorystatus.h @@ -102,6 +102,12 @@ enum { kMemorystatusLevelCritical = 3 }; +#define KEV_DIRTYSTATUS_SUBCLASS 4 + +enum { + kDirtyStatusChangeNote = 1 +}; + typedef struct memorystatus_priority_entry { pid_t pid; int32_t priority; @@ -188,6 +194,7 @@ typedef struct jetsam_snapshot_entry { uint64_t jse_coalition_jetsam_id; /* we only expose coalition id for COALITION_TYPE_JETSAM */ struct timeval64 cpu_time; uint64_t jse_thaw_count; + uint64_t jse_frozen_to_swap_pages; } memorystatus_jetsam_snapshot_entry_t; typedef struct jetsam_snapshot { @@ -199,6 +206,20 @@ typedef struct jetsam_snapshot { memorystatus_jetsam_snapshot_entry_t entries[]; } memorystatus_jetsam_snapshot_t; +typedef enum dirty_status_change_event_type { + kDirtyStatusChangedDirty = 0x0, + kDirtyStatusChangedClean = 0x1 +} dirty_status_change_event_type_t; + +typedef struct dirty_status_change_event { + pid_t dsc_pid; + char dsc_process_name[(2 * MAXCOMLEN) + 1]; + dirty_status_change_event_type_t dsc_event_type; + uint64_t dsc_time; + uint64_t dsc_pages; + int32_t dsc_priority; +} dirty_status_change_event_t; + /* TODO - deprecate; see */ #define kMaxSnapshotEntries 192 @@ -207,6 +228,11 @@ typedef struct jetsam_snapshot { */ extern memorystatus_jetsam_snapshot_t *memorystatus_jetsam_snapshot; extern memorystatus_jetsam_snapshot_t *memorystatus_jetsam_snapshot_copy; +#if CONFIG_FREEZE +extern memorystatus_jetsam_snapshot_t *memorystatus_jetsam_snapshot_freezer; +extern unsigned int memorystatus_jetsam_snapshot_freezer_max; +extern unsigned int memorystatus_jetsam_snapshot_freezer_size; +#endif /* CONFIG_FREEZE */ extern unsigned int memorystatus_jetsam_snapshot_count; extern unsigned int memorystatus_jetsam_snapshot_copy_count; extern unsigned int memorystatus_jetsam_snapshot_max; @@ -316,16 +342,16 @@ int memorystatus_control(uint32_t command, int32_t pid, uint32_t flags, void *bu * if they would prefer being jetsam'ed in the idle band to being frozen in an elevated band. */ #define MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE 19 /* Return the freezable state of a process. */ -#if CONFIG_FREEZE -#if DEVELOPMENT || DEBUG #define MEMORYSTATUS_CMD_FREEZER_CONTROL 20 -#endif /* DEVELOPMENT || DEBUG */ -#endif /* CONFIG_FREEZE */ #define MEMORYSTATUS_CMD_GET_AGGRESSIVE_JETSAM_LENIENT_MODE 21 /* Query if the lenient mode for aggressive jetsam is enabled. */ #define MEMORYSTATUS_CMD_INCREASE_JETSAM_TASK_LIMIT 22 /* Used by DYLD to increase the jetsam active and inactive limits, when using roots */ +#if PRIVATE +#define MEMORYSTATUS_CMD_SET_JETSAM_SNAPSHOT_OWNERSHIP 23 /* Used by unit tests in the development kernel only. */ +#endif /* PRIVATE */ + /* Commands that act on a group of processes */ #define MEMORYSTATUS_CMD_GRP_SET_PROPERTIES 100 @@ -358,6 +384,12 @@ typedef struct memorystatus_jetsam_panic_options { #define MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY 0x8 /* Set jetsam priorities for a group of pids */ #define MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY 0x10 /* Set probability of use for a group of processes */ +#if PRIVATE +#define MEMORYSTATUS_FLAGS_SNAPSHOT_TAKE_OWNERSHIP 0x20 /* Only used by xnu unit tests. */ +#define MEMORYSTATUS_FLAGS_SNAPSHOT_DROP_OWNERSHIP 0x40 /* Only used by xnu unit tests. */ +#endif /* PRIVATE */ + +#define MEMORYSTATUS_FLAGS_SNAPSHOT_FREEZER 0x80 /* A snapshot buffer containing app kills since last consumption */ /* * For use with memorystatus_control: * MEMORYSTATUS_CMD_GET_JETSAM_SNAPSHOT @@ -471,6 +503,7 @@ typedef struct memorystatus_memlimit_properties2 { #define P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND 0x00010000 /* if set, the process will go into this band & stay there when in the background instead * of the aging bands and/or the IDLE band. */ #define P_MEMSTAT_PRIORITY_ASSERTION 0x00020000 /* jetsam priority is being driven by an assertion */ +#define P_MEMSTAT_FREEZE_CONSIDERED 0x00040000 /* This process has been considered for the freezer. */ /* @@ -515,7 +548,7 @@ extern memorystatus_internal_probabilities_t *memorystatus_global_probabilities_ extern size_t memorystatus_global_probabilities_size; -extern void memorystatus_init(void) __attribute__((section("__TEXT, initcode"))); +extern void memorystatus_init(void); extern void memorystatus_init_at_boot_snapshot(void); @@ -563,6 +596,7 @@ void memorystatus_proc_flags_unsafe(void * v, boolean_t *is_dirty, boolean_t *is #if __arm64__ void memorystatus_act_on_legacy_footprint_entitlement(proc_t p, boolean_t footprint_increase); void memorystatus_act_on_ios13extended_footprint_entitlement(proc_t p); +void memorystatus_act_on_entitled_task_limit(proc_t p); #endif /* __arm64__ */ #endif /* CONFIG_MEMORYSTATUS */ diff --git a/bsd/sys/kern_memorystatus_freeze.h b/bsd/sys/kern_memorystatus_freeze.h index c3894b394..c56ba0b4e 100644 --- a/bsd/sys/kern_memorystatus_freeze.h +++ b/bsd/sys/kern_memorystatus_freeze.h @@ -34,6 +34,7 @@ #include #include #include +#include // command/proc_name_t typedef struct memorystatus_freeze_entry { int32_t pid; @@ -41,6 +42,10 @@ typedef struct memorystatus_freeze_entry { uint32_t pages; } memorystatus_freeze_entry_t; +#ifdef PRIVATE +#define FREEZE_PROCESSES_MAX 20 +#endif /* PRIVATE */ + #ifdef XNU_KERNEL_PRIVATE extern unsigned long freeze_threshold_percentage; @@ -63,7 +68,6 @@ extern int memorystatus_freeze_process_sync(proc_t p); #define FREEZE_PAGES_MAX (max_task_footprint_mb == 0 ? INT_MAX : (max_task_footprint_mb << (20 - PAGE_SHIFT))) #define FREEZE_SUSPENDED_THRESHOLD_DEFAULT 4 -#define FREEZE_PROCESSES_MAX 20 #define FREEZE_DAILY_MB_MAX_DEFAULT 1024 #define FREEZE_DEGRADATION_BUDGET_THRESHOLD 25 //degraded perf. when the daily budget left falls below this threshold percentage @@ -113,4 +117,20 @@ int memorystatus_freezer_control(int32_t flags, user_addr_t buffer, size_t buffe #endif /* XNU_KERNEL_PRIVATE */ +#ifdef PRIVATE +/* Lists all the processes that are currently in the freezer. */ +#define FREEZER_CONTROL_GET_PROCS (2) + +#define FREEZER_CONTROL_GET_PROCS_MAX_COUNT (FREEZE_PROCESSES_MAX * 2) + +typedef struct _global_frozen_procs { + size_t gfp_num_frozen; + struct { + pid_t fp_pid; + proc_name_t fp_name; + } gfp_procs[FREEZER_CONTROL_GET_PROCS_MAX_COUNT]; +} global_frozen_procs_t; + +#endif /* PRIVATE */ + #endif /* SYS_MEMORYSTATUS_FREEZE_H */ diff --git a/bsd/sys/kern_memorystatus_notify.h b/bsd/sys/kern_memorystatus_notify.h index ee6c5a014..254204767 100644 --- a/bsd/sys/kern_memorystatus_notify.h +++ b/bsd/sys/kern_memorystatus_notify.h @@ -38,8 +38,9 @@ extern vm_pressure_level_t memorystatus_vm_pressure_level; extern boolean_t memorystatus_hwm_candidates; -boolean_t memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused boolean_t is_fatal, boolean_t exceeded); -int memorystatus_send_note(int event_code, void *data, size_t data_length); +boolean_t memorystatus_warn_process(const proc_t p, __unused boolean_t is_active, __unused boolean_t is_fatal, boolean_t exceeded); +int memorystatus_send_note(int event_code, void *data, uint32_t data_length); +int memorystatus_send_dirty_status_change_note(void *data, uint32_t data_length); void memorystatus_send_low_swap_note(void); void consider_vm_pressure_events(void); diff --git a/bsd/sys/kern_sysctl.h b/bsd/sys/kern_sysctl.h index 72d7c82fd..ae875d8cd 100644 --- a/bsd/sys/kern_sysctl.h +++ b/bsd/sys/kern_sysctl.h @@ -52,7 +52,7 @@ typedef struct _vmobject_list_output_ vmobject_list_output_data_t; typedef struct _vmobject_list_output_ *vmobject_list_output_t; struct _vmobject_list_output_ { - int64_t entries; /* int64_t for alignment reasons, instead of int32_t */ + uint64_t entries; vm_object_query_data_t data[0]; }; #endif /* _KERN_SYSCTL_H_ */ diff --git a/bsd/sys/kernel_types.h b/bsd/sys/kernel_types.h index f5fbe196d..856d41a52 100644 --- a/bsd/sys/kernel_types.h +++ b/bsd/sys/kernel_types.h @@ -66,6 +66,9 @@ typedef struct vnode * vnode_t; struct proc; typedef struct proc * proc_t; +struct proc_ident; +typedef struct proc_ident * proc_ident_t; + struct uio; typedef struct uio * uio_t; @@ -111,6 +114,7 @@ typedef struct mount * mount_t; typedef struct vnode * vnode_t; #endif typedef struct proc * proc_t; +typedef struct proc_ident * proc_ident_t; typedef struct uio * uio_t; typedef struct user_iovec * user_iovec_t; typedef struct vfs_context * vfs_context_t; diff --git a/bsd/sys/kpi_mbuf.h b/bsd/sys/kpi_mbuf.h index bfeafb186..970356ec2 100644 --- a/bsd/sys/kpi_mbuf.h +++ b/bsd/sys/kpi_mbuf.h @@ -508,7 +508,7 @@ extern errno_t mbuf_cluster_get_prop(mbuf_t mbuf, u_int32_t *prop); * smaller during subsequent requests. */ extern errno_t mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, - mbuf_t *mbuf); + mbuf_t *mbuf) __NKE_API_DEPRECATED; /*! @@ -2048,4 +2048,5 @@ extern errno_t mbuf_set_keepalive_flag(mbuf_t mbuf, boolean_t is_keepalive); } __END_DECLS +#undef __NKE_API_DEPRECATED #endif /* __KPI_MBUF__ */ diff --git a/bsd/sys/kpi_socket.h b/bsd/sys/kpi_socket.h index 7eaa3367a..bd2425ee9 100644 --- a/bsd/sys/kpi_socket.h +++ b/bsd/sys/kpi_socket.h @@ -88,7 +88,7 @@ typedef void (*sock_upcall)(socket_t so, void *cookie, int waitf); * @param cookie The cookie passed in when the socket was created. * @param event Indicates the event as defined by SO_FILT_HINT_* */ -typedef void (*sock_evupcall)(socket_t so, void *cookie, u_int32_t event); +typedef void (*sock_evupcall)(socket_t so, void *cookie, long event); #endif /* KERNEL_PRIVATE */ /*! @@ -606,10 +606,10 @@ extern void sock_setupcalls_locked(socket_t sock, * indicating the registered event(s). */ extern errno_t sock_catchevents(socket_t sock, sock_evupcall event_callback, - void *event_context, u_int32_t event_mask); + void *event_context, long event_mask); extern void sock_catchevents_locked(socket_t sock, sock_evupcall ecallback, - void *econtext, u_int32_t emask); + void *econtext, long emask); /* @@ -623,4 +623,5 @@ extern int sock_iskernel(socket_t); #endif /* KERNEL_PRIVATE */ __END_DECLS +#undef __NKE_API_DEPRECATED #endif /* __KPI_SOCKET__ */ diff --git a/bsd/sys/kpi_socketfilter.h b/bsd/sys/kpi_socketfilter.h index 5af14bec0..82522a89f 100644 --- a/bsd/sys/kpi_socketfilter.h +++ b/bsd/sys/kpi_socketfilter.h @@ -738,4 +738,5 @@ extern errno_t sockopt_copyout(sockopt_t sopt, void *data, size_t length) __NKE_API_DEPRECATED; __END_DECLS +#undef __NKE_API_DEPRECATED #endif /* __KPI_SOCKETFILTER__ */ diff --git a/bsd/sys/linker_set.h b/bsd/sys/linker_set.h index 820bc2e36..23b3a38a3 100644 --- a/bsd/sys/linker_set.h +++ b/bsd/sys/linker_set.h @@ -62,7 +62,7 @@ * The following macros are used to declare global sets of objects, which * are collected by the linker into a `linker set' as defined below. * For Mach-O, this is done by constructing a separate segment inside the - * __DATA section for each set. The contents of this segment are an array + * __DATA_CONST section for each set. The contents of this segment are an array * of pointers to the objects in the set. * * Note that due to limitations of the Mach-O format, there cannot @@ -73,12 +73,41 @@ #ifdef KERNEL # include # include + +# define MACH_HEADER_TYPE kernel_mach_header_t +# define GETSECTIONDATA_VARIANT getsectdatafromheader +# define SECTDATA_SIZE_TYPE unsigned long +# define MH_EXECUTE_HEADER &_mh_execute_header +# define IMAGE_SLIDE_CORRECT 0 #else # include # include # include +# include +# include + +# if __LP64__ +# define MACH_HEADER_TYPE struct mach_header_64 +# define GETSECTIONDATA_VARIANT getsectdatafromheader_64 +# define SECTDATA_SIZE_TYPE uint64_t +# define MH_EXECUTE_HEADER _NSGetMachExecuteHeader() +# else +# define MACH_HEADER_TYPE struct mach_header +# define GETSECTIONDATA_VARIANT getsectdatafromheader +# define SECTDATA_SIZE_TYPE uint32_t +# define MH_EXECUTE_HEADER _NSGetMachExecuteHeader() +# endif #endif +#if __LP64__ +# define LINKER_SET_ENTRY_PACKED +# define LINKER_SET_SEGMENT __DATA_CONST +# define LINKER_SET_SEGMENT_CSTR "__DATA_CONST" +#else +# define LINKER_SET_ENTRY_PACKED __attribute__((packed)) +# define LINKER_SET_SEGMENT __DATA +# define LINKER_SET_SEGMENT_CSTR "__DATA" +#endif /* * Private macros, not to be used outside this header file. @@ -86,13 +115,13 @@ * The objective of this macro stack is to produce the following output, * given SET and SYM as arguments: * - * void const * __set_SET_sym_SYM __attribute__((section("__DATA,SET"))) = & SYM + * void const * __set_SET_sym_SYM __attribute__((section("__DATA_CONST,SET"))) = & SYM */ /* Wrap entries in a type that can be blacklisted from KASAN */ struct linker_set_entry { void *ptr; -} __attribute__((packed)); +} LINKER_SET_ENTRY_PACKED; #ifdef __LS_VA_STRINGIFY__ # undef __LS_VA_STRINGIFY__ @@ -104,7 +133,7 @@ struct linker_set_entry { #define __LS_VA_STRCONCAT(_x, _y) __LS_VA_STRINGIFY(_x,_y) #define __LINKER_MAKE_SET(_set, _sym) \ /*__unused*/ /*static*/ const struct linker_set_entry /*const*/ __set_##_set##_sym_##_sym \ - __attribute__ ((section(__LS_VA_STRCONCAT(__DATA,_set)),used)) = { (void *)&_sym } + __attribute__ ((section(__LS_VA_STRCONCAT(LINKER_SET_SEGMENT,_set)),used)) = { (void *)&_sym } /* the line above is very fragile - if your compiler breaks linker sets, * just play around with "static", "const", "used" etc. :-) */ @@ -165,7 +194,7 @@ struct linker_set_entry { (((_cast)(LINKER_SET_OBJECT_BEGIN(_object, _set)))[_i]) #define LINKER_SET_FOREACH(_pvar, _cast, _set) \ - LINKER_SET_OBJECT_FOREACH((kernel_mach_header_t *)&_mh_execute_header, _pvar, _cast, _set) + LINKER_SET_OBJECT_FOREACH((MACH_HEADER_TYPE *)MH_EXECUTE_HEADER, _pvar, _cast, _set) /* * Implementation. @@ -176,29 +205,55 @@ struct linker_set_entry { * Returns an upper bound to the linker set (base + size). */ +static __inline intptr_t +__linker_get_slide(struct mach_header *_header) +{ +#ifndef KERNEL + /* + * Gross. + * + * We cannot get the image slide directly from the header, so we need to + * determine the image's index and ask for the slide of that index. + */ + uint32_t i = 0; + for (i = 0; i < _dyld_image_count(); i++) { + const struct mach_header *hdr = _dyld_get_image_header(i); + if (_header == hdr) { + return _dyld_get_image_vmaddr_slide(i); + } + } + return 0; +#else + (void)_header; + return 0; +#endif +} + static __inline void ** -__linker_set_object_begin(kernel_mach_header_t *_header, const char *_set) +__linker_set_object_begin(MACH_HEADER_TYPE *_header, const char *_set) __attribute__((__const__)); static __inline void ** -__linker_set_object_begin(kernel_mach_header_t *_header, const char *_set) +__linker_set_object_begin(MACH_HEADER_TYPE *_header, const char *_set) { - void *_set_begin; - unsigned long _size; + char *_set_begin; + SECTDATA_SIZE_TYPE _size; - _set_begin = getsectdatafromheader(_header, "__DATA", _set, &_size); - return (void **) _set_begin; + _set_begin = (char *)GETSECTIONDATA_VARIANT(_header, LINKER_SET_SEGMENT_CSTR, _set, &_size); + _set_begin += __linker_get_slide((struct mach_header *)_header); + return (void **)(uintptr_t)_set_begin; } static __inline void ** -__linker_set_object_limit(kernel_mach_header_t *_header, const char *_set) +__linker_set_object_limit(MACH_HEADER_TYPE *_header, const char *_set) __attribute__((__const__)); static __inline void ** -__linker_set_object_limit(kernel_mach_header_t *_header, const char *_set) +__linker_set_object_limit(MACH_HEADER_TYPE *_header, const char *_set) { - void *_set_begin; - unsigned long _size; + char *_set_begin; + SECTDATA_SIZE_TYPE _size; - _set_begin = getsectdatafromheader(_header, "__DATA", _set, &_size); + _set_begin = (char *)GETSECTIONDATA_VARIANT(_header, LINKER_SET_SEGMENT_CSTR, _set, &_size); + _set_begin += __linker_get_slide((struct mach_header *)_header); return (void **) ((uintptr_t) _set_begin + _size); } diff --git a/bsd/sys/lockstat.h b/bsd/sys/lockstat.h index 35c8e30b0..8e49c5a76 100644 --- a/bsd/sys/lockstat.h +++ b/bsd/sys/lockstat.h @@ -41,7 +41,6 @@ extern "C" { #define LS_LCK_MTX_TRY_SPIN_LOCK "lck_mtx_try_spin_lock" #define LS_LCK_MTX_EXT_LOCK "lck_mtx_ext_lock" #define LS_LCK_MTX_EXT_UNLOCK "lck_mtx_ext_unlock" -#define LS_LCK_MTX_EXT_TRY_LOCK "lck_mtx_ext_try_lock" #define LS_LCK_MTX_LOCK_SPIN_LOCK "lck_mtx_lock_spin" #define LS_LCK_SPIN_LOCK "lck_spin_lock" @@ -52,8 +51,11 @@ extern "C" { #define LS_LCK_RW_DONE "lck_rw_done" #define LS_LCK_RW_TRY_LOCK_EXCL "lck_rw_try_lock_exclusive" #define LS_LCK_RW_TRY_LOCK_SHARED "lck_rw_try_lock_shared" -#define LS_LCK_RW_LOCK_SHARED_TO_EXCL "lck_rw_shared_to_exclusive" -#define LS_LCK_RW_LOCK_EXCL_TO_SHARED "lck_rw_exclusive_to_shared" +#define LS_LCK_RW_LOCK_SHARED_TO_EXCL "lck_rw_lock_shared_to_exclusive" +#define LS_LCK_RW_LOCK_EXCL_TO_SHARED "lck_rw_lock_exclusive_to_shared" +#define LS_LCK_TICKET_LOCK "lck_ticket_lock" +#define LS_LCK_TICKET_UNLOCK "lck_ticket_unlock" + #define LS_ACQUIRE "acquire" #define LS_RELEASE "release" @@ -64,16 +66,13 @@ extern "C" { #define LS_TYPE_ADAPTIVE "adaptive" #define LS_TYPE_SPIN "spin" -#define LS_TYPE_ILK "interlock" /* OS X only */ -#define LS_TYPE_THREAD "thread" /* Solaris only */ #define LS_TYPE_RW "rw" -#define LS_TYPE_RWUPGRADE "rwupgrade" /* OS X only */ +#define LS_TYPE_TICKET "ticket" #define LSA_ACQUIRE (LS_TYPE_ADAPTIVE "-" LS_ACQUIRE) #define LSA_RELEASE (LS_TYPE_ADAPTIVE "-" LS_RELEASE) #define LSA_SPIN (LS_TYPE_ADAPTIVE "-" LS_SPIN) #define LSA_BLOCK (LS_TYPE_ADAPTIVE "-" LS_BLOCK) -#define LSA_ILK_SPIN (LS_TYPE_ILK "-" LS_SPIN) #define LSS_ACQUIRE (LS_TYPE_SPIN "-" LS_ACQUIRE) #define LSS_RELEASE (LS_TYPE_SPIN "-" LS_RELEASE) #define LSS_SPIN (LS_TYPE_SPIN "-" LS_SPIN) @@ -82,9 +81,10 @@ extern "C" { #define LSR_BLOCK (LS_TYPE_RW "-" LS_BLOCK) #define LSR_SPIN (LS_TYPE_RW "-" LS_SPIN) #define LSR_UPGRADE (LS_TYPE_RW "-" LS_UPGRADE) -#define LSR_UPGRADE_BLOCK (LS_TYPE_RWUPGRADE "-" LS_BLOCK) #define LSR_DOWNGRADE (LS_TYPE_RW "-" LS_DOWNGRADE) -#define LST_SPIN (LS_TYPE_THREAD "-" LS_SPIN) +#define LST_ACQUIRE (LS_TYPE_TICKET "-" LS_ACQUIRE) +#define LST_RELEASE (LS_TYPE_TICKET "-" LS_RELEASE) +#define LST_SPIN (LS_TYPE_TICKET "-" LS_SPIN) #ifdef __cplusplus } diff --git a/bsd/sys/make_symbol_aliasing.sh b/bsd/sys/make_symbol_aliasing.sh index 8e98dee4b..24fb244de 100755 --- a/bsd/sys/make_symbol_aliasing.sh +++ b/bsd/sys/make_symbol_aliasing.sh @@ -77,16 +77,21 @@ cat <= ${value}" - echo "#define __DARWIN_ALIAS_STARTING_IPHONE_${str}(x) x" - echo "#else" - echo "#define __DARWIN_ALIAS_STARTING_IPHONE_${str}(x)" - echo "#endif" - echo "" + set -- $(echo "$ver" | tr '.' ' ') + ver_major=$1 + ver_minor=$2 + ver_rel=$3 + if [ -z "$ver_rel" ]; then + # don't produce these defines for releases with tertiary release numbers + value=$(printf "%d%02d00" ${ver_major} ${ver_minor}) + str=$(printf "__IPHONE_%d_%d" ${ver_major} ${ver_minor}) + echo "#if defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ >= ${value}" + echo "#define __DARWIN_ALIAS_STARTING_IPHONE_${str}(x) x" + echo "#else" + echo "#define __DARWIN_ALIAS_STARTING_IPHONE_${str}(x)" + echo "#endif" + echo "" + fi done for ver in $(${AVAILABILITY_PL} --macosx) ; do diff --git a/bsd/sys/malloc.h b/bsd/sys/malloc.h index 66dccfa5b..4bf21625f 100644 --- a/bsd/sys/malloc.h +++ b/bsd/sys/malloc.h @@ -83,236 +83,90 @@ #define M_ZERO 0x0004 /* bzero the allocation */ #define M_NULL 0x0008 /* return NULL if space is unavailable*/ -#ifdef BSD_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE + +#include +#include -#define KMEMSTATS 0 +ZONE_VIEW_DECLARE(ZV_NAMEI); /* * Types of memory to be allocated (not all are used by us) */ -#define M_FREE 0 /* should be on free list */ -#define M_MBUF 1 /* mbuf */ #define M_DEVBUF 2 /* device driver memory */ -#define M_SOCKET 3 /* socket structure */ #define M_PCB 4 /* protocol control block */ #define M_RTABLE 5 /* routing tables */ -#define M_HTABLE 6 /* IMP host tables */ -#define M_FTABLE 7 /* fragment reassembly header */ -#define M_ZOMBIE 8 /* zombie proc status */ #define M_IFADDR 9 /* interface address */ -#define M_SOOPTS 10 /* socket options */ #define M_SONAME 11 /* socket name */ -#define M_NAMEI 12 /* namei path name buffer */ -#define M_GPROF 13 /* kernel profiling buffer */ -#define M_IOCTLOPS 14 /* ioctl data buffer */ -#define M_MAPMEM 15 /* mapped memory descriptors */ -#define M_CRED 16 /* credentials */ #define M_PGRP 17 /* process group header */ -#define M_SESSION 18 /* session header */ -#define M_IOV32 19 /* large iov's for 32 bit process */ -#define M_MOUNT 20 /* vfs mount struct */ #define M_FHANDLE 21 /* network file handle */ -#define M_NFSREQ 22 /* NFS request header */ -#define M_NFSMNT 23 /* NFS mount structure */ #define M_NFSNODE 24 /* NFS vnode private part */ #define M_VNODE 25 /* Dynamically allocated vnodes */ #define M_CACHE 26 /* Dynamically allocated cache entries */ #define M_DQUOT 27 /* UFS quota entries */ #define M_PROC_UUID_POLICY 28 /* proc UUID policy entries */ #define M_SHM 29 /* SVID compatible shared memory segments */ -#define M_PLIMIT 30 /* plimit structures */ -#define M_SIGACTS 31 /* sigacts structures */ -#define M_VMOBJ 32 /* VM object structure */ -#define M_VMOBJHASH 33 /* VM object hash structure */ -#define M_VMPMAP 34 /* VM pmap */ -#define M_VMPVENT 35 /* VM phys-virt mapping entry */ -#define M_VMPAGER 36 /* XXX: VM pager struct */ -#define M_VMPGDATA 37 /* XXX: VM pager private data */ -#define M_FILEPROC 38 /* Open file structure */ -#define M_FILEDESC 39 /* Open file descriptor table */ #define M_LOCKF 40 /* Byte-range locking structures */ #define M_PROC 41 /* Proc structures */ -#define M_PSTATS 42 /* pstats proc sub-structures */ -#define M_SEGMENT 43 /* Segment for LFS */ -#define M_LFSNODE 44 /* LFS vnode private part */ -#define M_FFSNODE 45 /* FFS vnode private part */ -#define M_MFSNODE 46 /* MFS vnode private part */ -#define M_NQLEASE 47 /* XXX: Nqnfs lease */ -#define M_NQMHOST 48 /* XXX: Nqnfs host address table */ #define M_NETADDR 49 /* Export host address structure */ #define M_NFSSVC 50 /* NFS server structure */ -#define M_NFSUID 51 /* XXX: NFS uid mapping structure */ #define M_NFSD 52 /* NFS server daemon structure */ #define M_IPMOPTS 53 /* internet multicast options */ -#define M_IPMADDR 54 /* internet multicast address */ #define M_IFMADDR 55 /* link-level multicast address */ -#define M_MRTABLE 56 /* multicast routing tables */ -#define M_ISOFSMNT 57 /* ISOFS mount structure */ -#define M_ISOFSNODE 58 /* ISOFS vnode private part */ -#define M_NFSRVDESC 59 /* NFS server socket descriptor */ -#define M_NFSDIROFF 60 /* NFS directory offset data */ +#define M_NFSBIO 58 /* NFS client I/O buffers */ #define M_NFSBIGFH 61 /* NFS version 3 file handle */ -#define M_MSDOSFSMNT 62 /* MSDOS FS mount structure */ -#define M_MSDOSFSFAT 63 /* MSDOS FS fat table */ -#define M_MSDOSFSNODE 64 /* MSDOS FS vnode private part */ #define M_TTYS 65 /* allocated tty structures */ -#define M_EXEC 66 /* argument lists & other mem used by exec */ -#define M_MISCFSMNT 67 /* miscfs mount structures */ -#define M_MISCFSNODE 68 /* miscfs vnode private part */ -#define M_ADOSFSMNT 69 /* adosfs mount structures */ -#define M_ADOSFSNODE 70 /* adosfs vnode private part */ -#define M_ANODE 71 /* adosfs anode structures and tables. */ -#define M_BUFHDR 72 /* File buffer cache headers */ #define M_OFILETABL 73 /* Open file descriptor table */ -#define M_MCLUST 74 /* mbuf cluster buffers */ -/* unused 75 */ -/* unused 76 */ -/* unused 77 */ -/* unused 78 */ -/* unused 79 */ #define M_TEMP 80 /* misc temporary data buffers */ #define M_SECA 81 /* security associations, key management */ #define M_DEVFS 82 -#define M_IPFW 83 /* IP Forwarding/NAT */ #define M_UDFNODE 84 /* UDF inodes */ #define M_UDFMNT 85 /* UDF mount structures */ -#define M_IP6NDP 86 /* IPv6 Neighbour Discovery*/ #define M_IP6OPT 87 /* IPv6 options management */ -#define M_IP6MISC 88 /* IPv6 misc. memory */ -/* unused 89 */ -#define M_IGMP 90 -/* unused 91 */ -/* unused 92 */ -#define M_SPECINFO 93 /* special file node */ #define M_KQUEUE 94 /* kqueue system */ -/* unused 95 */ -#define M_CLRDAHEAD 96 /* storage for cluster read-ahead state */ -#define M_CLWRBEHIND 97 /* storage for cluster write-behind state */ -#define M_IOV64 98 /* large iov's for 64 bit process */ -#define M_FILEGLOB 99 /* fileglobal */ #define M_KAUTH 100 /* kauth subsystem */ #define M_DUMMYNET 101 /* dummynet */ -/* M_UNSAFEFS 102 */ -#define M_MACPIPELABEL 103 /* MAC pipe labels */ #define M_MACTEMP 104 /* MAC framework */ #define M_SBUF 105 /* string buffers */ -#define M_EXTATTR 106 /* extended attribute */ #define M_SELECT 107 /* per-thread select memory */ -/* M_TRAFFIC_MGT 108 */ -#if FS_COMPRESSION -#define M_DECMPFS_CNODE 109 /* decmpfs cnode structures */ -#endif /* FS_COMPRESSION */ #define M_INMFILTER 110 /* IPv4 multicast PCB-layer source filter */ -#define M_IPMSOURCE 111 /* IPv4 multicast IGMP-layer source filter */ #define M_IN6MFILTER 112 /* IPv6 multicast PCB-layer source filter */ #define M_IP6MOPTS 113 /* IPv6 multicast options */ -#define M_IP6MSOURCE 114 /* IPv6 multicast MLD-layer source filter */ -#define M_FLOW_DIVERT_PCB 115 /* flow divert control block */ -#define M_FLOW_DIVERT_GROUP 116 /* flow divert socket group */ #define M_IP6CGA 117 -#define M_NECP 118 /* General NECP policy data */ -#define M_NECP_SESSION_POLICY 119 /* NECP session policies */ -#define M_NECP_SOCKET_POLICY 120 /* NECP socket-level policies */ -#define M_NECP_IP_POLICY 121 /* NECP IP-level policies */ +#define M_NECP 118 /* General NECP policy data */ #define M_FD_VN_DATA 122 /* Per fd vnode data */ #define M_FD_DIRBUF 123 /* Directory entries' buffer */ #define M_NETAGENT 124 /* Network Agents */ #define M_EVENTHANDLER 125 /* Eventhandler */ #define M_LLTABLE 126 /* Link layer table */ #define M_NWKWQ 127 /* Network work queue */ -#define M_CFIL 128 /* Content Filter */ +#define M_CFIL 128 /* Content Filter */ #define M_LAST 129 /* Must be last type + 1 */ -#else /* BSD_KERNEL_PRIVATE */ - -#define M_RTABLE 5 /* routing tables */ -#define M_IFADDR 9 /* interface address (IOFireWireIP)*/ -#define M_LOCKF 40 /* Byte-range locking structures (msdos) */ -#define M_TEMP 80 /* misc temporary data buffers */ -#define M_KAUTH 100 /* kauth subsystem (smb) */ -#define M_SONAME 11 /* socket name (smb) */ -#define M_PCB 4 /* protocol control block (smb) */ -#define M_UDFNODE 84 /* UDF inodes (udf)*/ -#define M_UDFMNT 85 /* UDF mount structures (udf)*/ - -#endif /* BSD_KERNEL_PRIVATE */ - -#ifdef BSD_KERNEL_PRIVATE - -#if KMEMSTATS -struct kmemstats { - long ks_inuse; /* # of packets of this type currently - * in use */ - long ks_calls; /* total packets of this type ever allocated */ - long ks_memuse; /* total memory held in bytes */ - u_short ks_limblocks; /* number of times blocked for hitting limit */ - u_short ks_mapblocks; /* number of times blocked for kernel map */ - long ks_maxused; /* maximum number ever used */ - long ks_limit; /* most that are allowed to exist */ - long ks_size; /* sizes of this thing that are allocated */ - long ks_spare; -}; - -extern struct kmemstats kmemstats[]; -#endif /* KMEMSTATS */ - -#endif /* BSD_KERNEL_PRIVATE */ - -/* - * The malloc/free primatives used - * by the BSD kernel code. - */ -#ifdef XNU_KERNEL_PRIVATE - -#include - -#define MALLOC(space, cast, size, type, flags) \ - ({ VM_ALLOC_SITE_STATIC(0, 0); \ +#define MALLOC(space, cast, size, type, flags) \ + ({ VM_ALLOC_SITE_STATIC(0, 0); \ (space) = (cast)__MALLOC(size, type, flags, &site); }) -#define REALLOC(space, cast, addr, size, type, flags) \ - ({ VM_ALLOC_SITE_STATIC(0, 0); \ +#define REALLOC(space, cast, addr, size, type, flags) \ + ({ VM_ALLOC_SITE_STATIC(0, 0); \ (space) = (cast)__REALLOC(addr, size, type, flags, &site); }) -#define _MALLOC(size, type, flags) \ - ({ VM_ALLOC_SITE_STATIC(0, 0); \ +#define _MALLOC(size, type, flags) \ + ({ VM_ALLOC_SITE_STATIC(0, 0); \ __MALLOC(size, type, flags, &site); }) -#define _REALLOC(addr, size, type, flags) \ - ({ VM_ALLOC_SITE_STATIC(0, 0); \ + +#define _REALLOC(addr, size, type, flags) \ + ({ VM_ALLOC_SITE_STATIC(0, 0); \ __REALLOC(addr, size, type, flags, &site); }) -#define _MALLOC_ZONE(size, type, flags) \ - ({ VM_ALLOC_SITE_STATIC(0, 0); \ - __MALLOC_ZONE(size, type, flags, &site); }) +#define _FREE(addr, type) \ + (kheap_free_addr)(KHEAP_DEFAULT, addr) -#define FREE(addr, type) \ -_Pragma("clang diagnostic push") \ -_Pragma("clang diagnostic ignored \"-Wshadow\"") \ - do { \ - _Static_assert(sizeof (addr) == sizeof (void *) || sizeof (addr) == sizeof (mach_vm_address_t), "addr is not a pointer"); \ - void *__tmp_addr = (void *)addr; \ - int __tmp_type = type; \ - addr = (typeof(addr)) NULL; \ - _FREE(__tmp_addr, __tmp_type); \ - } while (0) \ -_Pragma("clang diagnostic pop") - -#define MALLOC_ZONE(space, cast, size, type, flags) \ - (space) = (cast)_MALLOC_ZONE(size, type, flags) +#define FREE(addr, type) \ + kheap_free_addr(KHEAP_DEFAULT, addr) -#define FREE_ZONE(addr, size, type) \ -_Pragma("clang diagnostic push") \ -_Pragma("clang diagnostic ignored \"-Wshadow\"") \ - do { \ - _Static_assert(sizeof (addr) == sizeof (void *) || sizeof (addr) == sizeof (mach_vm_address_t), "addr is not a pointer"); \ - void *__tmp_addr = (void *)addr; \ - size_t __tmp_size = size; \ - int __tmp_type = type; \ - addr = (typeof(addr)) NULL; \ - _FREE_ZONE(__tmp_addr, __tmp_size, __tmp_type); \ - } while (0) \ -_Pragma("clang diagnostic pop") +#pragma GCC visibility push(hidden) extern void *__MALLOC( size_t size, @@ -320,10 +174,6 @@ extern void *__MALLOC( int flags, vm_allocation_site_t *site) __attribute__((alloc_size(1))); -extern void _FREE( - void *addr, - int type); - extern void *__REALLOC( void *addr, size_t size, @@ -331,28 +181,25 @@ extern void *__REALLOC( int flags, vm_allocation_site_t *site) __attribute__((alloc_size(2))); -extern void *__MALLOC_ZONE( - size_t size, - int type, - int flags, - vm_allocation_site_t *site); - -extern void _FREE_ZONE( - void *elem, - size_t size, - int type); - +#pragma GCC visibility pop #else /* XNU_KERNEL_PRIVATE */ +#define M_PCB 4 /* protocol control block (smb) */ +#define M_RTABLE 5 /* routing tables */ +#define M_IFADDR 9 /* interface address (IOFireWireIP)*/ +#define M_SONAME 11 /* socket name (smb) */ +#define M_LOCKF 40 /* Byte-range locking structures (msdos) */ +#define M_TEMP 80 /* misc temporary data buffers */ +#define M_UDFNODE 84 /* UDF inodes (udf)*/ +#define M_UDFMNT 85 /* UDF mount structures (udf)*/ +#define M_KAUTH 100 /* kauth subsystem (smb) */ + #define MALLOC(space, cast, size, type, flags) \ (space) = (cast)_MALLOC(size, type, flags) #define FREE(addr, type) \ _FREE((void *)addr, type) -#define REALLOC(space, cast, addr, size, type, flags) \ - (space) = (cast)_REALLOC(addr, size, type, flags) - #define MALLOC_ZONE(space, cast, size, type, flags) \ (space) = (cast)_MALLOC_ZONE(size, type, flags) @@ -368,12 +215,6 @@ extern void _FREE( void *addr, int type); -extern void *_REALLOC( - void *addr, - size_t size, - int type, - int flags); - extern void *_MALLOC_ZONE( size_t size, int type, @@ -384,7 +225,6 @@ extern void _FREE_ZONE( size_t size, int type); - #endif /* !XNU_KERNEL_PRIVATE */ #endif /* KERNEL */ diff --git a/bsd/sys/mbuf.h b/bsd/sys/mbuf.h index ff2653d96..75369750d 100644 --- a/bsd/sys/mbuf.h +++ b/bsd/sys/mbuf.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2019 Apple Inc. All rights reserved. + * Copyright (c) 1999-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -165,14 +165,14 @@ struct m_hdr { * Packet tag structure (see below for details). */ struct m_tag { - u_int64_t m_tag_cookie; /* Error checking */ + uint64_t m_tag_cookie; /* Error checking */ #ifndef __LP64__ - u_int32_t pad; /* For structure alignment */ + uint32_t pad; /* For structure alignment */ #endif /* !__LP64__ */ SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */ - u_int16_t m_tag_type; /* Module specific type */ - u_int16_t m_tag_len; /* Length of data */ - u_int32_t m_tag_id; /* Module ID */ + uint16_t m_tag_type; /* Module specific type */ + uint16_t m_tag_len; /* Length of data */ + uint32_t m_tag_id; /* Module ID */ }; #define M_TAG_ALIGN(len) \ @@ -225,6 +225,10 @@ struct pf_mtag { #endif /* PF_ECN */ }; +/* System reserved PF tags */ +#define PF_TAG_ID_SYSTEM_SERVICE 0xff00 +#define PF_TAG_ID_STACK_DROP 0xff01 + /* * PF fragment tag */ @@ -242,40 +246,30 @@ struct pf_fragment_tag { struct tcp_pktinfo { union { struct { - u_int32_t segsz; /* segment size (actual MSS) */ - u_int32_t start_seq; /* start seq of this packet */ + uint32_t segsz; /* segment size (actual MSS) */ + uint32_t start_seq; /* start seq of this packet */ pid_t pid; pid_t e_pid; } __tx; struct { - u_int16_t lro_pktlen; /* max seg size encountered */ - u_int8_t lro_npkts; /* # of coalesced TCP pkts */ - u_int8_t lro_timediff; /* time spent in LRO */ + uint8_t seg_cnt; /* # of coalesced TCP pkts */ } __rx; } __offload; - union { - u_int32_t pri; /* send msg priority */ - u_int32_t seq; /* recv msg sequence # */ - } __msgattr; #define tso_segsz proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.segsz #define tx_start_seq proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.start_seq #define tx_tcp_pid proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.pid #define tx_tcp_e_pid proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.e_pid -#define lro_pktlen proto_mtag.__pr_u.tcp.tm_tcp.__offload.__rx.lro_pktlen -#define lro_npkts proto_mtag.__pr_u.tcp.tm_tcp.__offload.__rx.lro_npkts -#define lro_elapsed proto_mtag.__pr_u.tcp.tm_tcp.__offload.__rx.lro_timediff -#define msg_pri proto_mtag.__pr_u.tcp.tm_tcp.__msgattr.pri -#define msg_seq proto_mtag.__pr_u.tcp.tm_tcp.__msgattr.seq +#define seg_cnt proto_mtag.__pr_u.tcp.tm_tcp.__offload.__rx.seg_cnt }; /* * MPTCP mbuf tag */ struct mptcp_pktinfo { - u_int64_t mtpi_dsn; /* MPTCP Data Sequence Number */ - u_int32_t mtpi_rel_seq; /* Relative Seq Number */ - u_int16_t mtpi_length; /* Length of mapping */ - u_int16_t mtpi_csum; + uint64_t mtpi_dsn; /* MPTCP Data Sequence Number */ + uint32_t mtpi_rel_seq; /* Relative Seq Number */ + uint16_t mtpi_length; /* Length of mapping */ + uint16_t mtpi_csum; #define mp_dsn proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_dsn #define mp_rseq proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_rel_seq #define mp_rlen proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_length @@ -453,6 +447,9 @@ struct pkthdr { */ SLIST_HEAD(packet_tags, m_tag) tags; /* list of external tags */ union builtin_mtag builtin_mtag; + + uint32_t comp_gencnt; + uint32_t padding; /* * Module private scratch space (32-bit aligned), currently 16-bytes * large. Anything stored here is not guaranteed to survive across @@ -538,8 +535,7 @@ struct pkthdr { #define PKTF_INET_RESOLVE 0x40 /* IPv4 resolver packet */ #define PKTF_INET6_RESOLVE 0x80 /* IPv6 resolver packet */ #define PKTF_RESOLVE_RTR 0x100 /* pkt is for resolving router */ -#define PKTF_SW_LRO_PKT 0x200 /* pkt is a large coalesced pkt */ -#define PKTF_SW_LRO_DID_CSUM 0x400 /* IP and TCP checksums done by LRO */ +#define PKTF_SKIP_PKTAP 0x200 /* pkt has already passed through pktap */ #define PKTF_MPTCP 0x800 /* TCP with MPTCP metadata */ #define PKTF_MPSO 0x1000 /* MPTCP socket meta data */ #define PKTF_LOOP 0x2000 /* loopbacked packet */ @@ -678,9 +674,14 @@ struct mbuf { (CSUM_DELAY_IP | CSUM_DELAY_DATA | CSUM_DELAY_IPV6_DATA | \ CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_ZERO_INVERT) -#define CSUM_RX_FLAGS \ +#define CSUM_RX_FULL_FLAGS \ (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_PSEUDO_HDR | \ - CSUM_DATA_VALID | CSUM_PARTIAL) + CSUM_DATA_VALID) + +#define CSUM_RX_FLAGS \ + (CSUM_RX_FULL_FLAGS | CSUM_PARTIAL) + + /* * Note: see also IF_HWASSIST_CSUM defined in @@ -1049,8 +1050,8 @@ struct name { \ ((struct mbuf *)(void *)((char *)(head)->mq_last - \ __builtin_offsetof(struct mbuf, m_nextpkt)))) -#define max_linkhdr P2ROUNDUP(_max_linkhdr, sizeof (u_int32_t)) -#define max_protohdr P2ROUNDUP(_max_protohdr, sizeof (u_int32_t)) +#define max_linkhdr (int)P2ROUNDUP(_max_linkhdr, sizeof (uint32_t)) +#define max_protohdr (int)P2ROUNDUP(_max_protohdr, sizeof (uint32_t)) #endif /* XNU_KERNEL_PRIVATE */ /* @@ -1398,7 +1399,7 @@ extern int _max_linkhdr; /* largest link-level header */ /* Use max_protohdr instead of _max_protohdr */ extern int _max_protohdr; /* largest protocol header */ -__private_extern__ unsigned int mbuf_default_ncl(int, u_int64_t); +__private_extern__ unsigned int mbuf_default_ncl(uint64_t); __private_extern__ void mbinit(void); __private_extern__ struct mbuf *m_clattach(struct mbuf *, int, caddr_t, void (*)(caddr_t, u_int, caddr_t), u_int, caddr_t, int, int); diff --git a/bsd/sys/mcache.h b/bsd/sys/mcache.h index db058126b..243965bd7 100644 --- a/bsd/sys/mcache.h +++ b/bsd/sys/mcache.h @@ -400,8 +400,6 @@ __private_extern__ void mcache_buffer_log(mcache_audit_t *, void *, mcache_t *, struct timeval *); __private_extern__ void mcache_set_pattern(u_int64_t, void *, size_t); __private_extern__ void *mcache_verify_pattern(u_int64_t, void *, size_t); -__private_extern__ void *mcache_verify_set_pattern(u_int64_t, u_int64_t, - void *, size_t); __private_extern__ void mcache_audit_free_verify(mcache_audit_t *, void *, size_t, size_t); __private_extern__ void mcache_audit_free_verify_set(mcache_audit_t *, diff --git a/bsd/sys/memory_maintenance.h b/bsd/sys/memory_maintenance.h index 75d6bf5e7..6b35379e7 100644 --- a/bsd/sys/memory_maintenance.h +++ b/bsd/sys/memory_maintenance.h @@ -39,7 +39,7 @@ /* * The kern.darkboot sysctl can be controlled from kexts or userspace. If * processes want to change the sysctl value, they require the - * 'com.apple.kern.darkboot' entitlement. + * 'com.apple.private.kernel.darkboot' entitlement. * * Operating the kern.darkboot sysctl is done via using the commands below: * diff --git a/bsd/sys/mman.h b/bsd/sys/mman.h index abe0b93c0..a6e093f59 100644 --- a/bsd/sys/mman.h +++ b/bsd/sys/mman.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -89,6 +89,12 @@ #include #include +#ifndef KERNEL +#if __DARWIN_C_LEVEL >= 200809L +#include +#endif /* __DARWIN_C_LEVEL */ +#endif /* KERNEL */ + /* * Protections are chosen from these bits, or-ed together */ @@ -145,9 +151,17 @@ #define MAP_RESILIENT_CODESIGN 0x2000 /* no code-signing failures */ #define MAP_RESILIENT_MEDIA 0x4000 /* no backing-store failures */ -#if !defined(CONFIG_EMBEDDED) -#define MAP_32BIT 0x8000 /* Return virtual addresses <4G only: Requires entitlement */ -#endif /* !defined(CONFIG_EMBEDDED) */ +#if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101500 +#define MAP_32BIT 0x8000 /* Return virtual addresses <4G only */ +#endif /* defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101500 */ + + +/* + * Flags used to support translated processes. + */ +#define MAP_TRANSLATED_ALLOW_EXECUTE 0x20000 /* allow execute in translated processes */ + +#define MAP_UNIX03 0x40000 /* UNIX03 compliance */ #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ @@ -211,6 +225,26 @@ #define MINCORE_ANONYMOUS 0x80 /* Page belongs to an anonymous object */ #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#ifdef PRIVATE + +/* + * Crypt ID for decryption flow + */ +#define CRYPTID_NO_ENCRYPTION 0 /* File is unencrypted */ +#define CRYPTID_APP_ENCRYPTION 1 /* App binary is encrypted */ +#define CRYPTID_MODEL_ENCRYPTION 2 /* ML Model is encrypted */ + +/* + * Model encryption header + */ +typedef struct { + __uint64_t version; + __uint64_t originalSize; + __uint64_t reserved[4]; +} model_encryption_header_t; + +#endif /* #ifdef PRIVATE */ + #ifndef KERNEL diff --git a/bsd/sys/mount.h b/bsd/sys/mount.h index c3e884bad..6ff9616b3 100644 --- a/bsd/sys/mount.h +++ b/bsd/sys/mount.h @@ -456,7 +456,7 @@ union union_vfsidctl { /* the fields vc_vers and vc_fsid are compatible */ * New style VFS sysctls, do not reuse/conflict with the namespace for * private sysctls. */ -#define VFS_CTL_STATFS 0x00010001 /* statfs */ +#define VFS_CTL_OSTATFS 0x00010001 /* old legacy statfs */ #define VFS_CTL_UMOUNT 0x00010002 /* unmount */ #define VFS_CTL_QUERY 0x00010003 /* anything wrong? (vfsquery) */ #define VFS_CTL_NEWADDR 0x00010004 /* reconnect to new address */ @@ -466,6 +466,19 @@ union union_vfsidctl { /* the fields vc_vers and vc_fsid are compatible */ #define VFS_CTL_DISC 0x00010008 /* server disconnected */ #define VFS_CTL_SERVERINFO 0x00010009 /* information about fs server */ #define VFS_CTL_NSTATUS 0x0001000A /* netfs mount status */ +#define VFS_CTL_STATFS64 0x0001000B /* statfs64 */ + +#ifndef KERNEL +/* + * Automatically select the correct VFS_CTL_*STATFS* flavor based + * on what "struct statfs" layout the client will use. + */ +#if __DARWIN_64_BIT_INO_T +#define VFS_CTL_STATFS VFS_CTL_STATFS64 +#else +#define VFS_CTL_STATFS VFS_CTL_OSTATFS +#endif +#endif /* ! KERNEL */ struct vfsquery { u_int32_t vq_flags; @@ -566,7 +579,7 @@ struct vfs_fsentry { struct vfsops { /*! - * @function vfs_mount + * @field vfs_mount * @abstract Perform filesystem-specific operations required for mounting. * @discussion Typical operations include setting the mount-specific data with vfs_setfsprivate(). * Note that if a mount call fails, the filesystem must clean up any state it has constructed, because @@ -581,7 +594,7 @@ struct vfsops { int (*vfs_mount)(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context); /*! - * @function vfs_start + * @field vfs_start * @abstract Mark a mount as ready to be used. * @discussion After receiving this calldown, a filesystem will be hooked into the mount list and should expect * calls down from the VFS layer. @@ -593,7 +606,7 @@ struct vfsops { int (*vfs_start)(struct mount *mp, int flags, vfs_context_t context); /*! - * @function vfs_unmount + * @field vfs_unmount * @abstract Perform filesystem-specific cleanup as part of unmount. * @discussion If the unmount downcall succeeds, VFS considers itself authorized to destroy all * state related to the mount. @@ -605,7 +618,7 @@ struct vfsops { int (*vfs_unmount)(struct mount *mp, int mntflags, vfs_context_t context); /*! - * @function vfs_root + * @field vfs_root * @abstract Get the root vnode of a filesystem. * @discussion Upon success, should return with an iocount held on the root vnode which the caller will * drop with vnode_put(). @@ -617,7 +630,7 @@ struct vfsops { int (*vfs_root)(struct mount *mp, struct vnode **vpp, vfs_context_t context); /*! - * @function vfs_quotactl + * @field vfs_quotactl * @abstract Manipulate quotas for a volume. * @param mp Mount for which to manipulate quotas. * @param cmds Detailed in "quotactl" manual page. @@ -629,7 +642,7 @@ struct vfsops { int (*vfs_quotactl)(struct mount *mp, int cmds, uid_t uid, caddr_t arg, vfs_context_t context); /*! - * @function vfs_getattr + * @field vfs_getattr * @abstract Get filesystem attributes. * @discussion See VFSATTR_RETURN, VFSATTR_ACTIVE, VFSATTR_SET_SUPPORTED, VFSATTR_WANTED macros. * @param mp Mount for which to get parameters. @@ -642,7 +655,7 @@ struct vfsops { /* int (*vfs_statfs)(struct mount *mp, struct vfsstatfs *sbp, vfs_context_t context);*/ /*! - * @function vfs_sync + * @field vfs_sync * @abstract Flush all filesystem data to backing store. * @discussion vfs_sync will be called as part of the sync() system call and during unmount. * @param mp Mountpoint to sync. @@ -653,7 +666,7 @@ struct vfsops { int (*vfs_sync)(struct mount *mp, int waitfor, vfs_context_t context); /*! - * @function vfs_vget + * @field vfs_vget * @abstract Get a vnode by file id (inode number). * @discussion This routine is chiefly used to build paths to vnodes. Result should be turned with an iocount that the * caller will drop with vnode_put(). @@ -665,7 +678,7 @@ struct vfsops { int (*vfs_vget)(struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context); /*! - * @function vfs_fhtovp + * @field vfs_fhtovp * @abstract Get the vnode corresponding to a file handle. * @discussion Filesystems can return handles to files which are independent of their (transient) vnode identities. * vfs_thtovp converts that persistent handle back to a vnode. The vnode should be returned with an iocount which @@ -681,7 +694,7 @@ struct vfsops { vfs_context_t context); /*! - * @function vfs_vptofh + * @field vfs_vptofh * @abstract Get a persistent handle corresponding to a vnode. * @param mp Mount against which to convert the vnode to a handle. * @param fhlen Size of buffer provided for handle; set to size of actual handle returned. @@ -692,7 +705,7 @@ struct vfsops { int (*vfs_vptofh)(struct vnode *vp, int *fhlen, unsigned char *fhp, vfs_context_t context); /*! - * @function vfs_init + * @field vfs_init * @abstract Prepare a filesystem for having instances mounted. * @discussion This routine is called once, before any particular instance of a filesystem * is mounted; it allows the filesystem to initialize whatever global data structures @@ -706,7 +719,7 @@ struct vfsops { int (*vfs_init)(struct vfsconf *); /*! - * @function vfs_sysctl + * @field vfs_sysctl * @abstract Broad interface for querying and controlling filesystem. * @discussion VFS defines VFS_CTL_QUERY as a generic status request which is answered * with the VQ_* macros in a "struct vfsquery." @@ -718,7 +731,7 @@ struct vfsops { int (*vfs_sysctl)(int *, u_int, user_addr_t, size_t *, user_addr_t, size_t, vfs_context_t context); /*! - * @function vfs_setattr + * @field vfs_setattr * @abstract Set filesystem attributes. * @discussion The other side of the vfs_getattr coin. Currently only called to set volume name. * @param mp Mount on which to set attributes. @@ -730,7 +743,7 @@ struct vfsops { int (*vfs_setattr)(struct mount *mp, struct vfs_attr *, vfs_context_t context); /*! - * @function vfs_ioctl + * @field vfs_ioctl * @abstract File system control operations. * @discussion Unlike vfs_sysctl, this is specific to a particular volume. * @param mp The mount to execute the command on. @@ -748,7 +761,7 @@ struct vfsops { int flags, vfs_context_t context); /*! - * @function vfs_vget_snapdir + * @field vfs_vget_snapdir * @abstract Get the vnode for the snapshot directory of a filesystem. * @discussion Upon success, should return with an iocount held on the root vnode which the caller will * drop with vnode_put(). @@ -795,11 +808,17 @@ typedef struct fs_role_mount_args { OS_ENUM(vfs_roles, uint32_t, VFS_SYSTEM_ROLE = 1, + VFS_RECOVERY_ROLE = 4, VFS_VM_ROLE = 8, + VFS_PREBOOT_ROLE = 16, VFS_DATA_ROLE = 64); #define VFSIOC_MOUNT_BYROLE _IOW('V', 4, fs_role_mount_args_t) +// When this is defined, it is safe to use VFS_RECOVERY_ROLE and +// VFS_PREBOOT_ROLE. +#define VFSIOC_MOUNT_BYROLE_has_recovery 1 + #endif /* KERNEL */ /* @@ -1045,14 +1064,18 @@ void vfs_clearextendedsecurity(mount_t mp); * @abstract Mark a filesystem as unable to use swap files. * @param mp Mount to mark. */ +#ifdef KERNEL_PRIVATE void vfs_setnoswap(mount_t mp); +#endif /*! * @function vfs_clearnoswap * @abstract Mark a filesystem as capable of using swap files. * @param mp Mount to mark. */ +#ifdef KERNEL_PRIVATE void vfs_clearnoswap(mount_t mp); +#endif /*! * @function vfs_setlocklocal @@ -1312,6 +1335,17 @@ void vfs_setcompoundopen(mount_t mp); uint64_t vfs_throttle_mask(mount_t mp); int vfs_isswapmount(mount_t mp); boolean_t vfs_context_is_dataless_manipulator(vfs_context_t); +boolean_t vfs_context_can_resolve_triggers(vfs_context_t); +void vfs_setmntsystem(mount_t mp); +void vfs_setmntsystemdata(mount_t mp); +void vfs_setmntswap(mount_t mp); +boolean_t vfs_is_basesystem(mount_t mp); + +OS_ENUM(bsd_bootfail_mode, uint32_t, + BSD_BOOTFAIL_SEAL_BROKEN = 1, + BSD_BOOTFAIL_MEDIA_MISSING = 2); + +boolean_t bsd_boot_to_recovery(bsd_bootfail_mode_t mode, uuid_t volume_uuid, boolean_t reboot); struct vnode_trigger_info; @@ -1392,6 +1426,14 @@ void mount_set_noreaddirext(mount_t); */ void vfs_get_statfs64(struct mount *mp, struct statfs64 *sfs); +/*! + * @function vfs_mount_id + * @abstract Retrieve the system-wide unique mount ID for a mount point. + * The ID is generated at mount and does not change on remount. + * @param mp Mountpoint for which to get the mount ID. + */ +uint64_t vfs_mount_id(mount_t mp); + #endif /* KERNEL_PRIVATE */ __END_DECLS @@ -1439,6 +1481,9 @@ int statfs64(const char *, struct statfs64 *) __OSX_AVAILABLE_BUT_DEPRECATED #endif /* !__DARWIN_ONLY_64_BIT_INO_T */ int unmount(const char *, int); int getvfsbyname(const char *, struct vfsconf *); +#if PRIVATE +int pivot_root(const char *, const char *) __OSX_AVAILABLE(10.16); +#endif __END_DECLS #endif /* KERNEL */ diff --git a/bsd/sys/mount_internal.h b/bsd/sys/mount_internal.h index a8010e620..2af3a1283 100644 --- a/bsd/sys/mount_internal.h +++ b/bsd/sys/mount_internal.h @@ -108,14 +108,14 @@ struct mount { TAILQ_ENTRY(mount) mnt_list; /* mount list */ int32_t mnt_count; /* reference on the mount */ lck_mtx_t mnt_mlock; /* mutex that protects mount point */ - const struct vfsops *mnt_op; /* operations on fs */ - struct vfstable *mnt_vtable; /* configuration info */ - struct vnode *mnt_vnodecovered; /* vnode we mounted on */ + const struct vfsops * XNU_PTRAUTH_SIGNED_PTR("mount.vfsops") mnt_op; /* operations on fs */ + struct vfstable * XNU_PTRAUTH_SIGNED_PTR("mount.mnt_vtable") mnt_vtable; /* configuration info */ + struct vnode * XNU_PTRAUTH_SIGNED_PTR("mount.mnt_vnodecovered") mnt_vnodecovered; /* vnode we mounted on */ struct vnodelst mnt_vnodelist; /* list of vnodes this mount */ struct vnodelst mnt_workerqueue; /* list of vnodes this mount */ struct vnodelst mnt_newvnodes; /* list of vnodes this mount */ uint32_t mnt_flag; /* flags */ - uint32_t mnt_kern_flag; /* kernel only flags */ + uint32_t mnt_kern_flag; /* kernel only flags. NOTE: See mnt_supl_kern_flags below! */ uint32_t mnt_compound_ops; /* Available compound operations */ uint32_t mnt_lflag; /* mount life cycle flags */ uint32_t mnt_maxsymlinklen; /* max size of short symlink */ @@ -157,8 +157,8 @@ struct mount { uid_t mnt_fsowner; gid_t mnt_fsgroup; - struct label *mnt_mntlabel; /* MAC mount label */ - struct label *mnt_fslabel; /* MAC default fs label */ + struct label * XNU_PTRAUTH_SIGNED_PTR("mount.mnt_mntlabel") mnt_mntlabel; /* MAC mount label */ + struct label * XNU_PTRAUTH_SIGNED_PTR("mount.mnt_fslabel") mnt_fslabel; /* MAC default fs label */ /* * cache the rootvp of the last mount point @@ -199,6 +199,9 @@ struct mount { void *mnt_disk_conditioner_info; lck_mtx_t mnt_iter_lock; /* mutex that protects iteration of vnodes */ + + uint64_t mnt_mount_id; /* system-wide unique mount ID */ + uint32_t mnt_supl_kern_flag; /* Supplemental kernel-only mount flags */ }; /* @@ -242,17 +245,17 @@ extern struct mount * dead_mountp; * because the bits here were broken out from the high bits * of the mount flags. */ -#define MNTK_SYSTEM 0x00000040 /* Volume associated with system volume (do not allow unmount) */ -#define MNTK_NOSWAP 0x00000080 /* swap files cannot be used on this mount */ +#define MNTK_SYSTEM 0x00000040 /* Volume associated with system volume (do not allow unmount) */ +#define MNTK_NOSWAP 0x00000080 /* swap files cannot be used on this mount */ #define MNTK_SWAP_MOUNT 0x00000100 /* we are swapping to this mount */ -#define MNTK_DENY_READDIREXT 0x00000200 /* Deny Extended-style readdir's for this volume */ +#define MNTK_DENY_READDIREXT 0x00000200 /* Deny Extended-style readdir's for this volume */ #define MNTK_PERMIT_UNMOUNT 0x00000400 /* Allow (non-forced) unmounts by UIDs other than the one that mounted the volume */ #define MNTK_TYPENAME_OVERRIDE 0x00000800 /* override the fstypename for statfs() */ #define MNTK_KERNEL_MOUNT 0x00001000 /* mount came from kernel side */ #ifdef CONFIG_IMGSRC_ACCESS #define MNTK_HAS_MOVED 0x00002000 -#define MNTK_BACKS_ROOT 0x00004000 #endif /* CONFIG_IMGSRC_ACCESS */ +#define MNTK_BACKS_ROOT 0x00004000 /* mount contains a disk image backing the root filesystem - therefore it mustn't be unmounted */ #define MNTK_AUTH_CACHE_TTL 0x00008000 /* rights cache has TTL - TTL of 0 disables cache */ #define MNTK_PATH_FROM_ID 0x00010000 /* mounted file system supports id-to-path lookups */ #define MNTK_UNMOUNT_PREFLIGHT 0x00020000 /* mounted file system wants preflight check during unmount */ @@ -260,19 +263,30 @@ extern struct mount * dead_mountp; #define MNTK_EXTENDED_ATTRS 0x00080000 /* mounted file system supports Extended Attributes VNOPs */ #define MNTK_LOCK_LOCAL 0x00100000 /* advisory locking is done above the VFS itself */ #define MNTK_VIRTUALDEV 0x00200000 /* mounted on a virtual device i.e. a disk image */ -#define MNTK_ROOTDEV 0x00400000 /* this filesystem resides on the same device as the root */ +#define MNTK_ROOTDEV 0x00400000 /* this filesystem resides on the same device as the root - appears unused as of 2020 */ #define MNTK_SSD 0x00800000 /* underlying device is of the solid state variety */ #define MNTK_UNMOUNT 0x01000000 /* unmount in progress */ -#define MNTK_MWAIT 0x02000000 /* waiting for unmount to finish */ +#define MNTK_MWAIT 0x02000000 /* waiting for unmount to finish - appears unused as of 2020 */ #define MNTK_WANTRDWR 0x04000000 /* upgrade to read/write requested */ -#if REV_ENDIAN_FS -#define MNT_REVEND 0x08000000 /* Reverse endian FS */ -#endif /* REV_ENDIAN_FS */ -#define MNTK_DIR_HARDLINKS 0x10000000 /* mounted file system supports directory hard links */ -#define MNTK_AUTH_OPAQUE 0x20000000 /* authorisation decisions are not made locally */ -#define MNTK_AUTH_OPAQUE_ACCESS 0x40000000 /* VNOP_ACCESS is reliable for remote auth */ +#define MNTK_SYSTEMDATA 0x08000000 /* volume is a Data volume tightly linked with System root volume. Firmlinks, etc */ +#define MNTK_DIR_HARDLINKS 0x10000000 /* mounted file system supports directory hard links */ +#define MNTK_AUTH_OPAQUE 0x20000000 /* authorisation decisions are not made locally */ +#define MNTK_AUTH_OPAQUE_ACCESS 0x40000000 /* VNOP_ACCESS is reliable for remote auth */ #define MNTK_EXTENDED_SECURITY 0x80000000 /* extended security supported */ + +/* + * Internal supplemental FS control flags stored in mnt_supl_kern_flag + * + * NOTE: The 32 bits in the above-mentioned 32bit flag word (mnt_kern_flag) have been + * exhausted, so this is intended as a supplement. + */ +#define MNTK_SUPL_BASESYSTEM 0x00000001 + + +/* + * Mount Lifecycle Flags (stored in mnt_lflag) + */ #define MNT_LNOTRESP 0x00000001 /* mount not responding */ #define MNT_LUNMOUNT 0x00000002 /* mount in unmount */ #define MNT_LFORCE 0x00000004 /* mount in forced unmount */ @@ -280,7 +294,7 @@ extern struct mount * dead_mountp; #define MNT_LITER 0x00000010 /* mount in iteration */ #define MNT_LNEWVN 0x00000020 /* mount has new vnodes created */ #define MNT_LWAIT 0x00000040 /* wait for unmount op */ -#define MNT_LUNUSED 0x00000080 /* available flag bit, used to be MNT_LITERWAIT */ +#define MNT_LMOUNT 0x00000080 /* not finished mounting */ #define MNT_LDEAD 0x00000100 /* mount already unmounted*/ #define MNT_LNOSUB 0x00000200 /* submount - no recursion */ @@ -292,6 +306,11 @@ extern struct mount * dead_mountp; #define NFSV4_MAX_FH_SIZE 128 #define NFSV3_MAX_FH_SIZE 64 #define NFSV2_MAX_FH_SIZE 32 + +#ifdef MALLOC_DECLARE +MALLOC_DECLARE(M_FHANDLE); +#endif + struct fhandle { unsigned int fh_len; /* length of file handle */ unsigned char fh_data[NFS_MAX_FH_SIZE]; /* file handle value */ @@ -442,8 +461,17 @@ int mount_refdrain(mount_t); /* vfs_rootmountalloc should be kept as a private api */ errno_t vfs_rootmountalloc(const char *, const char *, mount_t *mpp); -int vfs_mount_rosv_data(void); -int vfs_mount_vm(void); +int vfs_mount_recovery(void); + +typedef uint32_t vfs_switch_root_flags_t; +#define VFSSR_VIRTUALDEV_PROHIBITED 0x01 /* Not allowed to pivot into virtual devices (disk images). + * This is really just because we don't have a great way to find + * the filesystem that backs the image in order to set + * MNTK_BACKS_ROOT on it, which would prevent even forced-unmounts. + * Also, lots of disk images are backed by userspace processes, + * which also seems like a bad idea for the root filesystem. */ + +int vfs_switch_root(const char *, const char *, vfs_switch_root_flags_t); int vfs_mountroot(void); void vfs_unmountall(void); @@ -471,6 +499,12 @@ void mount_iterreset(mount_t); #define KERNEL_MOUNT_SNAPSHOT 0x04 /* Mounting a snapshot */ #define KERNEL_MOUNT_DATAVOL 0x08 /* mount the data volume */ #define KERNEL_MOUNT_VMVOL 0x10 /* mount the VM volume */ +#define KERNEL_MOUNT_PREBOOTVOL 0x20 /* mount the Preboot volume */ +#define KERNEL_MOUNT_RECOVERYVOL 0x40 /* mount the Recovery volume */ +#define KERNEL_MOUNT_BASESYSTEMROOT 0x80 /* mount a base root volume "instead of" the full root volume (only used during bsd_init) */ + +/* mask for checking if any of the "mount volume by role" flags are set */ +#define KERNEL_MOUNT_VOLBYROLE_MASK (KERNEL_MOUNT_DATAVOL | KERNEL_MOUNT_VMVOL | KERNEL_MOUNT_PREBOOTVOL | KERNEL_MOUNT_RECOVERYVOL) #if NFSCLIENT || DEVFS || ROUTEFS @@ -499,6 +533,8 @@ extern lck_mtx_t * sync_mtx_lck; extern int sync_timeout_seconds; +extern zone_t mount_zone; + __END_DECLS #endif /* !_SYS_MOUNT_INTERNAL_H_ */ diff --git a/bsd/sys/munge.h b/bsd/sys/munge.h index 5ad78a7c1..43c30b05f 100644 --- a/bsd/sys/munge.h +++ b/bsd/sys/munge.h @@ -94,12 +94,15 @@ int munge_wlwwlwlw(const void *regs, void *args); int munge_wll(const void *regs, void *args); int munge_wllww(const void *regs, void *args); int munge_wlll(const void *regs, void *args); +int munge_wlllww(const void *regs, void *args); int munge_wllll(const void *regs, void *args); int munge_wllwwll(const void *regs, void *args); int munge_wwwlw(const void *regs, void *args); int munge_wwwlww(const void *regs, void *args); +int munge_wwwlwww(const void *regs, void *args); int munge_wwwl(const void *regs, void *args); int munge_wwwwlw(const void *regs, void *args); +int munge_wwwwllww(const void *regs, void *args); int munge_wwwwl(const void *regs, void *args); int munge_wwwwwl(const void *regs, void *args); int munge_wwwwwlww(const void *regs, void *args); @@ -144,12 +147,15 @@ void munge_wlwwlwlw(void *args); void munge_wll(void *args); void munge_wllww(void *args); void munge_wlll(void *args); +void munge_wlllww(void *args); void munge_wllll(void *args); void munge_wllwwll(void *args); void munge_wwwlw(void *args); void munge_wwwlww(void *args); +void munge_wwwlwww(void *args); void munge_wwwl(void *args); void munge_wwwwlw(void *args); +void munge_wwwwllww(void *args); void munge_wwwwl(void *args); void munge_wwwwwl(void *args); void munge_wwwwwlww(void *args); diff --git a/bsd/sys/namei.h b/bsd/sys/namei.h index aa7aa4b4b..b74b412a1 100644 --- a/bsd/sys/namei.h +++ b/bsd/sys/namei.h @@ -144,7 +144,7 @@ struct nameidata { #define NAMEI_COMPOUNDRENAME 0x100 #define NAMEI_COMPOUND_OP_MASK (NAMEI_COMPOUNDOPEN | NAMEI_COMPOUNDREMOVE | NAMEI_COMPOUNDMKDIR | NAMEI_COMPOUNDRMDIR | NAMEI_COMPOUNDRENAME) -#define NAMEI_NOPROCLOCK 0x1000 /* do not take process lock (set by vnode_lookup) */ +#define NAMEI_NOFOLLOW_ANY 0x1000 /* no symlinks allowed in the path */ #ifdef KERNEL /* diff --git a/bsd/sys/netboot.h b/bsd/sys/netboot.h index d9317bbc6..942d0c518 100644 --- a/bsd/sys/netboot.h +++ b/bsd/sys/netboot.h @@ -44,7 +44,7 @@ int netboot_root(void); boolean_t netboot_iaddr(struct in_addr * iaddr_p); boolean_t netboot_rootpath(struct in_addr * server_ip, - char * name, int name_len, - char * path, int path_len); + char * name, size_t name_len, + char * path, size_t path_len); #endif /* _SYS_NETBOOT_H */ diff --git a/bsd/sys/param.h b/bsd/sys/param.h index ca57a6725..3117b36a6 100644 --- a/bsd/sys/param.h +++ b/bsd/sys/param.h @@ -207,10 +207,10 @@ #define MAXSYMLINKS 32 /* Bit map related macros. */ -#define setbit(a, i) (((char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) -#define clrbit(a, i) (((char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) -#define isset(a, i) (((char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) -#define isclr(a, i) ((((char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) +#define setbit(a, i) (((unsigned char *)(a))[(i)/NBBY] |= 1u<<((i)%NBBY)) +#define clrbit(a, i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1u<<((i)%NBBY))) +#define isset(a, i) (((unsigned char *)(a))[(i)/NBBY] & (1u<<((i)%NBBY))) +#define isclr(a, i) ((((unsigned char *)(a))[(i)/NBBY] & (1u<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany @@ -228,24 +228,6 @@ #define MAX(a, b) (((a)>(b))?(a):(b)) #endif /* MAX */ -/* - * Constants for setting the parameters of the kernel memory allocator. - * - * 2 ** MINBUCKET is the smallest unit of memory that will be - * allocated. It must be at least large enough to hold a pointer. - * - * Units of memory less or equal to MAXALLOCSAVE will permanently - * allocate physical memory; requests for these size pieces of - * memory are quite fast. Allocations greater than MAXALLOCSAVE must - * always allocate and free physical memory; requests for these - * size allocations should be done infrequently as they will be slow. - * - * Constraints: CLBYTES <= MAXALLOCSAVE <= 2 ** (MINBUCKET + 14), and - * MAXALLOCSIZE must be a power of two. - */ -#define MINBUCKET 4 /* 4 => min allocation of 16 bytes */ -#define MAXALLOCSAVE (2 * CLBYTES) - /* * Scale factor for scaled integers used to count %cpu time and load avgs. * diff --git a/bsd/sys/persona.h b/bsd/sys/persona.h index 7b1d26944..9a3292fec 100644 --- a/bsd/sys/persona.h +++ b/bsd/sys/persona.h @@ -31,7 +31,11 @@ #ifdef PRIVATE #include +#ifdef KERNEL +__enum_decl(persona_type_t, int, { +#else /* !KERNEL */ enum { +#endif /* KERNEL */ PERSONA_INVALID = 0, PERSONA_GUEST = 1, PERSONA_MANAGED = 2, @@ -43,7 +47,11 @@ enum { PERSONA_ENTERPRISE = 8, PERSONA_TYPE_MAX = PERSONA_ENTERPRISE, +#ifdef KERNEL +}); +#else /* !KERNEL */ }; +#endif /* KERNEL */ #define PERSONA_ID_NONE ((uid_t)-1) @@ -268,7 +276,7 @@ struct persona { int32_t pna_valid; uid_t pna_id; - int pna_type; + persona_type_t pna_type; char pna_login[MAXLOGNAME + 1]; char *pna_path; @@ -422,7 +430,7 @@ void persona_put(struct persona *persona); * 0: Success * != 0: failure (BSD errno value ESRCH or EINVAL) */ -int persona_find_by_type(int persona_type, struct persona **persona, +int persona_find_by_type(persona_type_t persona_type, struct persona **persona, size_t *plen); #ifdef XNU_KERNEL_PRIVATE @@ -433,12 +441,12 @@ int persona_find_by_type(int persona_type, struct persona **persona, /* * In-kernel persona API */ -extern uint32_t g_max_personas; +extern const uint32_t g_max_personas; void personas_bootstrap(void); struct persona *persona_alloc(uid_t id, const char *login, - int type, char *path, int *error); + persona_type_t type, char *path, int *error); int persona_init_begin(struct persona *persona); void persona_init_end(struct persona *persona, int error); @@ -481,8 +489,8 @@ uid_t persona_get_uid(struct persona *persona); int persona_set_gid(struct persona *persona, gid_t gid); gid_t persona_get_gid(struct persona *persona); -int persona_set_groups(struct persona *persona, gid_t *groups, unsigned ngroups, uid_t gmuid); -int persona_get_groups(struct persona *persona, unsigned *ngroups, gid_t *groups, unsigned groups_sz); +int persona_set_groups(struct persona *persona, gid_t *groups, size_t ngroups, uid_t gmuid); +int persona_get_groups(struct persona *persona, size_t *ngroups, gid_t *groups, size_t groups_sz); uid_t persona_get_gmuid(struct persona *persona); @@ -491,7 +499,7 @@ int persona_get_login(struct persona *persona, char login[MAXLOGNAME + 1]); /* returns a reference that must be released with persona_put() */ struct persona *persona_proc_get(pid_t pid); -int persona_find_all(const char *login, uid_t uid, int persona_type, +int persona_find_all(const char *login, uid_t uid, persona_type_t persona_type, struct persona **persona, size_t *plen); #else /* !CONFIG_PERSONAS */ diff --git a/bsd/sys/pipe.h b/bsd/sys/pipe.h index e53f05080..83c91ef46 100644 --- a/bsd/sys/pipe.h +++ b/bsd/sys/pipe.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2004-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -59,6 +59,7 @@ #ifdef KERNEL #include +#include #endif #include /* for TAILQ macros */ #include @@ -103,7 +104,11 @@ struct pipebuf { u_int in; /* in pointer */ u_int out; /* out pointer */ u_int size; /* size of buffer */ - caddr_t buffer; /* kva of buffer */ +#if KERNEL + caddr_t OS_PTRAUTH_SIGNED_PTR("pipe.buffer") buffer; /* kva of buffer */ +#else + caddr_t buffer; /* kva of buffer */ +#endif /* KERNEL */ }; @@ -157,7 +162,6 @@ struct pipe { struct pipe *pipe_peer; /* link with other direction */ u_int pipe_state; /* pipe status info */ int pipe_busy; /* busy flag, mostly to handle rundown sanely */ - TAILQ_HEAD(, eventqelt) pipe_evlist; lck_mtx_t *pipe_mtxp; /* shared mutex between both pipes */ struct timespec st_atimespec; /* time of last access */ struct timespec st_mtimespec; /* time of last data modification */ @@ -172,8 +176,10 @@ struct pipe { #define PIPE_LOCK_ASSERT(pipe, type) LCK_MTX_ASSERT(PIPE_MTX(pipe), (type)) __BEGIN_DECLS -void pipeinit(void); extern int pipe_stat(struct pipe *, void *, int); +#ifdef BSD_KERNEL_PRIVATE +extern uint64_t pipe_id(struct pipe *); +#endif __END_DECLS #endif /* KERNEL */ diff --git a/bsd/sys/priv.h b/bsd/sys/priv.h index 940debadf..38b00b0ef 100644 --- a/bsd/sys/priv.h +++ b/bsd/sys/priv.h @@ -145,9 +145,13 @@ #define PRIV_VFS_SNAPSHOT_REVERT 14003 /* Allow reverting filesystem to a previous snapshot */ #define PRIV_VFS_DATALESS_RESOLVER 14004 /* Allow registration as dataless file resolver */ #define PRIV_VFS_DATALESS_MANIPULATION 14005 /* Allow process to inspect dataless directories / manipulate dataless objects */ +#define PRIV_VFS_SETSIZE 14006 /* Allow resizing a file without zeroing space */ #define PRIV_APFS_EMBED_DRIVER 14100 /* Allow embedding an EFI driver into the APFS container */ -#define PRIV_APFS_FUSION_DEBUG 14101 /* Allow getting internal statistics and controlling the APFS Fusion container */ +#define PRIV_APFS_DEBUG 14101 /* Allow to control a debugging features of the APFS container */ +#define PRIV_APFS_FUSION_DEBUG 14101 /* Old constant name, superceded by PRIV_APFS_DEBUG. + * I had to use a numeric value instead of a name because this file is parsed by some + * script in the Sandbox project and it may produce a non-compilable output */ #define PRIV_APFS_FUSION_ALLOW_PIN_FASTPROMOTE 14102 /* Allow changing pinned/fastPromote inode flags in APFS Fusion container */ // #define PRIV_APFS_UNUSED 14103 #define PRIV_APFS_SET_FREE_SPACE_CHANGE_THRESHOLD 14104 /* Allow setting the free space change notification threshold */ diff --git a/bsd/sys/proc.h b/bsd/sys/proc.h index 65ab9901a..34c019cef 100644 --- a/bsd/sys/proc.h +++ b/bsd/sys/proc.h @@ -92,6 +92,7 @@ struct session; struct pgrp; struct proc; +struct proc_ident; /* Exported fields for kern sysctls */ struct extern_proc { @@ -233,6 +234,8 @@ __BEGIN_DECLS extern proc_t kernproc; extern int proc_is_classic(proc_t p); +extern bool proc_is_exotic(proc_t p); +extern bool proc_is_alien(proc_t p); proc_t current_proc_EXTERNAL(void); extern int msleep(void *chan, lck_mtx_t *mtx, int pri, const char *wmesg, struct timespec * ts ); @@ -266,6 +269,8 @@ void proc_selfname(char * buf, int size); /* find a process with a given pid. This comes with a reference which needs to be dropped by proc_rele */ extern proc_t proc_find(int pid); +/* find a process with a given process identity */ +extern proc_t proc_find_ident(struct proc_ident const *i); /* returns a handle to current process which is referenced. The reference needs to be dropped with proc_rele */ extern proc_t proc_self(void); /* releases the held reference on the process */ @@ -276,8 +281,12 @@ extern int proc_pid(proc_t); extern int proc_ppid(proc_t); /* returns the original pid of the parent of a given process */ extern int proc_original_ppid(proc_t); +/* returns the start time of the given process */ +extern int proc_starttime(proc_t, struct timeval *); /* returns the platform (macos, ios, watchos, tvos, ...) of the given process */ -extern uint32_t proc_platform(proc_t); +extern uint32_t proc_platform(const proc_t); +/* returns the minimum sdk version used by the current process */ +extern uint32_t proc_min_sdk(proc_t); /* returns the sdk version used by the current process */ extern uint32_t proc_sdk(proc_t); /* returns 1 if the process is marked for no remote hangs */ @@ -293,6 +302,8 @@ extern boolean_t proc_send_synchronous_EXC_RESOURCE(proc_t p); extern int proc_is64bit(proc_t); /* this routine returns 1 if the process is running with a 64bit register state, else 0 */ extern int proc_is64bit_data(proc_t); +/* this routine returns 1 if the process is initproc */ +extern int proc_isinitproc(proc_t); /* is this process exiting? */ extern int proc_exiting(proc_t); /* returns whether the process has started down proc_exit() */ @@ -335,7 +346,7 @@ pid_t proc_pgrpid(proc_t p); * @function proc_sessionid * @abstract Get the process session id for the passed-in process. * @param p Process whose session id to grab. - * @return session id for "p", or -1 on failure + * @return session id of current process. */ pid_t proc_sessionid(proc_t p); @@ -357,6 +368,8 @@ extern int msleep1(void *chan, lck_mtx_t *mtx, int pri, const char *wmesg, task_t proc_task(proc_t); extern int proc_pidversion(proc_t); +extern proc_t proc_parent(proc_t); +extern void proc_parent_audit_token(proc_t, audit_token_t *); extern uint32_t proc_persona_id(proc_t); extern uint32_t proc_getuid(proc_t); extern uint32_t proc_getgid(proc_t); @@ -391,6 +404,12 @@ extern void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid); /* return 1 if process is forcing case-sensitive HFS+ access, 0 for default */ extern int proc_is_forcing_hfs_case_sensitivity(proc_t); +/* return true if the process is translated, false for default */ +extern boolean_t proc_is_translated(proc_t); + +/* true if the process ignores errors from content protection APIs */ +extern bool proc_ignores_content_protection(proc_t proc); + /*! * @function proc_exitstatus * @abstract KPI to determine a process's exit status. @@ -431,9 +450,30 @@ extern off_t proc_getexecutableoffset(proc_t p); extern vnode_t proc_getexecutablevnode(proc_t); /* Returned with iocount, use vnode_put() to drop */ extern int networking_memstatus_callout(proc_t p, uint32_t); +/* System call filtering for BSD syscalls, mach traps and kobject routines. */ #define SYSCALL_MASK_UNIX 0 +#define SYSCALL_MASK_MACH 1 +#define SYSCALL_MASK_KOBJ 2 + +#define SYSCALL_FILTER_CALLBACK_VERSION 1 +typedef int (*syscall_filter_cbfunc_t)(proc_t p, int num); +typedef int (*kobject_filter_cbfunc_t)(proc_t p, int msgid, int idx); +struct syscall_filter_callbacks { + int version; + const syscall_filter_cbfunc_t unix_filter_cbfunc; + const syscall_filter_cbfunc_t mach_filter_cbfunc; + const kobject_filter_cbfunc_t kobj_filter_cbfunc; +}; +typedef struct syscall_filter_callbacks * syscall_filter_cbs_t; + +extern int proc_set_syscall_filter_callbacks(syscall_filter_cbs_t callback); +extern int proc_set_syscall_filter_index(int which, int num, int index); extern size_t proc_get_syscall_filter_mask_size(int which); extern int proc_set_syscall_filter_mask(proc_t p, int which, unsigned char *maskptr, size_t masklen); + +extern int proc_set_filter_message_flag(proc_t p, boolean_t flag); +extern int proc_get_filter_message_flag(proc_t p, boolean_t *flag); + #endif /* KERNEL_PRIVATE */ __END_DECLS @@ -458,6 +498,8 @@ __BEGIN_DECLS int pid_suspend(int pid); int pid_resume(int pid); +int task_inspect_for_pid(unsigned int target_tport, int pid, unsigned int *t); +int task_read_for_pid(unsigned int target_tport, int pid, unsigned int *t); #if defined(__arm__) || defined(__arm64__) int pid_hibernate(int pid); diff --git a/bsd/sys/proc_info.h b/bsd/sys/proc_info.h index 086ad7842..a1b9a372a 100644 --- a/bsd/sys/proc_info.h +++ b/bsd/sys/proc_info.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005-2017 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2005-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -108,7 +108,8 @@ struct proc_uniqidentifierinfo { uint8_t p_uuid[16]; /* UUID of the main executable */ uint64_t p_uniqueid; /* 64 bit unique identifier for process */ uint64_t p_puniqueid; /* unique identifier for process's parent */ - uint64_t p_reserve2; /* reserved for future use */ + int32_t p_idversion; /* pid version */ + uint32_t p_reserve2; /* reserved for future use */ uint64_t p_reserve3; /* reserved for future use */ uint64_t p_reserve4; /* reserved for future use */ }; @@ -530,6 +531,17 @@ struct kern_ctl_info { char kcsi_name[MAX_KCTL_NAME]; /* unique nke identifier, provided by DTS */ }; +/* + * VSock Sockets + */ + +struct vsock_sockinfo { + uint32_t local_cid; + uint32_t local_port; + uint32_t remote_cid; + uint32_t remote_port; +}; + /* soi_state */ #define SOI_S_NOFDREF 0x0001 /* no file table ref any more */ @@ -564,7 +576,8 @@ enum { SOCKINFO_UN = 3, SOCKINFO_NDRV = 4, SOCKINFO_KERN_EVENT = 5, - SOCKINFO_KERN_CTL = 6 + SOCKINFO_KERN_CTL = 6, + SOCKINFO_VSOCK = 7, }; struct socket_info { @@ -594,6 +607,7 @@ struct socket_info { struct ndrv_info pri_ndrv; /* SOCKINFO_NDRV */ struct kern_event_info pri_kern_event; /* SOCKINFO_KERN_EVENT */ struct kern_ctl_info pri_kern_ctl; /* SOCKINFO_KERN_CTL */ + struct vsock_sockinfo pri_vsock; /* SOCKINFO_VSOCK */ } soi_proto; }; @@ -957,8 +971,17 @@ struct proc_fileportinfo { #define PROC_INFO_CALL_CANUSEFGHW 0xc #define PROC_INFO_CALL_PIDDYNKQUEUEINFO 0xd #define PROC_INFO_CALL_UDATA_INFO 0xe + +/* __proc_info_extended_id() flags */ +#define PIF_COMPARE_IDVERSION 0x01 +#define PIF_COMPARE_UNIQUEID 0x02 + #endif /* PRIVATE */ +#ifdef KERNEL_PRIVATE +extern int proc_fdlist(proc_t p, struct proc_fdinfo *buf, size_t *count); +#endif + #ifdef XNU_KERNEL_PRIVATE #ifndef pshmnode struct pshmnode; diff --git a/bsd/sys/proc_internal.h b/bsd/sys/proc_internal.h index 57e827b40..2bf615a71 100644 --- a/bsd/sys/proc_internal.h +++ b/bsd/sys/proc_internal.h @@ -193,8 +193,8 @@ struct proc; struct proc { LIST_ENTRY(proc) p_list; /* List of all processes. */ - void * task; /* corresponding task (static)*/ - struct proc * p_pptr; /* Pointer to parent process.(LL) */ + void * XNU_PTRAUTH_SIGNED_PTR("proc.task") task; /* corresponding task (static)*/ + struct proc * XNU_PTRAUTH_SIGNED_PTR("proc.p_pptr") p_pptr; /* Pointer to parent process.(LL) */ pid_t p_ppid; /* process's parent pid number */ pid_t p_original_ppid; /* process's original parent pid number, doesn't change if reparented */ pid_t p_pgrpid; /* process group id of the process (LL)*/ @@ -220,7 +220,6 @@ struct proc { TAILQ_HEAD(, uthread) p_uthlist; /* List of uthreads (PL) */ LIST_ENTRY(proc) p_hash; /* Hash chain. (LL)*/ - TAILQ_HEAD(, eventqelt) p_evlist; /* (PL) */ #if CONFIG_PERSONAS struct persona *p_persona; @@ -231,7 +230,7 @@ struct proc { lck_mtx_t p_ucred_mlock; /* mutex lock to protect p_ucred */ /* substructures: */ - kauth_cred_t p_ucred; /* Process owner's identity. (PUCL) */ + kauth_cred_t XNU_PTRAUTH_SIGNED_PTR("proc.p_ucred") p_ucred; /* Process owner's identity. (PUCL) */ struct filedesc *p_fd; /* Ptr to open files structure. (PFDL) */ struct pstats *p_stats; /* Accounting/statistics (PL). */ struct plimit *p_limit; /* Process limits.(PL) */ @@ -239,9 +238,6 @@ struct proc { struct sigacts *p_sigacts; /* Signal actions, state (PL) */ lck_spin_t p_slock; /* spin lock for itimer/profil protection */ -#define p_rlimit p_limit->pl_rlimit - - struct plimit *p_olimit; /* old process limits - not inherited by child (PL) */ int p_siglist; /* signals captured back from threads */ unsigned int p_flag; /* P_* flags. (atomic bit ops) */ unsigned int p_lflag; /* local flags (PL) */ @@ -301,7 +297,7 @@ struct proc { u_int p_argslen; /* Length of process arguments. */ int p_argc; /* saved argc for sysctl_procargs() */ user_addr_t user_stack; /* where user stack was allocated */ - struct vnode *p_textvp; /* Vnode of executable. */ + struct vnode * XNU_PTRAUTH_SIGNED_PTR("proc.p_textvp") p_textvp; /* Vnode of executable. */ off_t p_textoff; /* offset in executable vnode */ sigset_t p_sigmask; /* DEPRECATED */ @@ -319,7 +315,7 @@ struct proc { uint8_t p_xhighbits; /* Stores the top byte of exit status to avoid truncation*/ pid_t p_contproc; /* last PID to send us a SIGCONT (PL) */ - struct pgrp *p_pgrp; /* Pointer to process group. (LL) */ + struct pgrp * XNU_PTRAUTH_SIGNED_PTR("proc.p_pgrp") p_pgrp; /* Pointer to process group. (LL) */ uint32_t p_csflags; /* flags for codesign (PL) */ uint32_t p_pcaction; /* action for process control on starvation */ uint8_t p_uuid[16]; /* from LC_UUID load command */ @@ -333,13 +329,13 @@ struct proc { uint8_t *syscall_filter_mask; /* syscall filter bitmask (length: nsysent bits) */ uint32_t p_platform; + uint32_t p_min_sdk; uint32_t p_sdk; /* End area that is copied on creation. */ /* XXXXXXXXXXXXX End of BCOPY'ed on fork (AIOLOCK)XXXXXXXXXXXXXXXX */ #define p_endcopy p_aio_total_count int p_aio_total_count; /* all allocated AIO requests for this proc */ - int p_aio_active_count; /* all unfinished AIO requests for this proc */ TAILQ_HEAD(, aio_workq_entry ) p_aio_activeq; /* active async IO requests */ TAILQ_HEAD(, aio_workq_entry ) p_aio_doneq; /* completed async IO requests */ @@ -416,10 +412,23 @@ struct proc { struct os_reason *p_exit_reason; -#if !CONFIG_EMBEDDED +#if CONFIG_PROC_UDATA_STORAGE uint64_t p_user_data; /* general-purpose storage for userland-provided data */ -#endif /* !CONFIG_EMBEDDED */ +#endif /* CONFIG_PROC_UDATA_STORAGE */ + + char * p_subsystem_root_path; lck_rw_t p_dirs_lock; /* keeps fd_cdir and fd_rdir stable across a lookup */ + pid_t p_sessionid; +}; + +/* + * Identify a process uniquely. + * proc_ident's fields match 1-1 with those in struct proc. + */ +struct proc_ident { + uint64_t p_uniqueid; + pid_t p_pid; + int p_idversion; }; #define PGRPID_DEAD 0xdeaddead @@ -465,7 +474,7 @@ struct proc { #define P_LKQWDRAIN 0x00004000 #define P_LKQWDRAINWAIT 0x00008000 #define P_LKQWDEAD 0x00010000 -#define P_LLIMCHANGE 0x00020000 +#define P_LLIMCHANGE 0x00020000 /* process is changing its plimit (rlim_cur, rlim_max) */ #define P_LLIMWAIT 0x00040000 #define P_LWAITED 0x00080000 #define P_LINSIGNAL 0x00100000 @@ -512,7 +521,9 @@ struct proc { #define P_VFS_IOPOLICY_ATIME_UPDATES 0x0002 #define P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES 0x0004 #define P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME 0x0008 -#define P_VFS_IOPOLICY_VALID_MASK (P_VFS_IOPOLICY_ATIME_UPDATES | P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY | P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES | P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME) +#define P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE 0x0010 +#define P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION 0x0020 +#define P_VFS_IOPOLICY_VALID_MASK (P_VFS_IOPOLICY_ATIME_UPDATES | P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY | P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES | P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME | P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE | P_VFS_IOPOLICY_IGNORE_CONTENT_PROTECTION) /* process creation arguments */ #define PROC_CREATE_FORK 0 /* independent child (running) */ @@ -642,6 +653,29 @@ struct user64_extern_proc { }; #endif /* KERNEL */ +#ifdef BSD_KERNEL_PRIVATE + +#include /* for struct os_refcnt pl_refcnt */ + +/* + * Kernel shareable process resource limits: + * Because this structure is moderately large but changed infrequently, it is normally + * shared copy-on-write after a fork. The pl_refcnt variable records the number of + * "processes" (NOT threads) currently sharing the plimit. A plimit is freed when the + * last referencing process exits the system. The refcnt of the plimit is a race-free + * _Atomic variable. We allocate new plimits in proc_limitupdate and free them + * in proc_limitdrop/proc_limitupdate. + */ +struct plimit { + struct rlimit pl_rlimit[RLIM_NLIMITS]; + os_refcnt_t pl_refcnt; /* number of processes using this plimit */ +}; + +extern rlim_t proc_limitgetcur(proc_t p, int which, boolean_t to_lock_proc); +extern void proc_limitsetcur_internal(proc_t p, int which, rlim_t value); +extern struct proc proc0; +#endif /* BSD_KERNEL_PRIVATE */ + /* * We use process IDs <= PID_MAX; PID_MAX + 1 must also fit in a pid_t, * as it is used to represent "no process group". @@ -690,8 +724,8 @@ LIST_HEAD(proclist, proc); extern struct proclist allproc; /* List of all processes. */ extern struct proclist zombproc; /* List of zombie processes. */ -extern struct proc *initproc; -extern void procinit(void); +extern struct proc * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc; +extern void procinit(void); extern void proc_lock(struct proc *); extern void proc_unlock(struct proc *); extern void proc_spinlock(struct proc *); @@ -717,8 +751,7 @@ __private_extern__ struct proc *pzfind(pid_t); /* Find zombie by id. */ __private_extern__ struct proc *proc_find_zombref(pid_t); /* Find zombie by id. */ __private_extern__ void proc_drop_zombref(struct proc * p); /* Find zombie by id. */ - -extern int chgproccnt(uid_t uid, int diff); +extern size_t chgproccnt(uid_t uid, int diff); extern void pinsertchild(struct proc *parent, struct proc *child); extern int setsid_internal(struct proc *p); #ifndef __cplusplus @@ -768,7 +801,6 @@ extern void session_unlock(struct session * sess); extern struct session * pgrp_session(struct pgrp * pgrp); extern void session_rele(struct session *sess); extern int isbackground(proc_t p, struct tty *tp); -extern proc_t proc_parent(proc_t); extern proc_t proc_parentholdref(proc_t); extern int proc_parentdropref(proc_t, int); int itimerfix(struct timeval *tv); @@ -803,6 +835,7 @@ extern lck_mtx_t * pthread_list_mlock; #endif /* PSYNCH */ struct uthread * current_uthread(void); + /* process iteration */ #define ALLPROC_FOREACH(var) \ @@ -878,4 +911,10 @@ pid_t dtrace_proc_selfpid(void); pid_t dtrace_proc_selfppid(void); uid_t dtrace_proc_selfruid(void); +extern zone_t proc_zone; +extern zone_t proc_stats_zone; +extern zone_t proc_sigacts_zone; + +extern struct proc_ident proc_ident(proc_t p); + #endif /* !_SYS_PROC_INTERNAL_H_ */ diff --git a/bsd/sys/proc_require.h b/bsd/sys/proc_require.h new file mode 100644 index 000000000..1e68dd228 --- /dev/null +++ b/bsd/sys/proc_require.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _SYS_PROC_REQUIRE_H_ +#define _SYS_PROC_REQUIRE_H_ + +typedef struct proc * proc_t; + +/* Used by proc_require for validation of proc zone */ +__options_closed_decl(proc_require_flags_t, unsigned int, { + PROC_REQUIRE_ALLOW_USER = 0x0, //always on, just means we allow lookup of any non kernel proc + PROC_REQUIRE_ALLOW_NULL = 0x1, + PROC_REQUIRE_ALLOW_KERNPROC = 0x2, +}); + +/* validates that 'proc' comes from within the proc zone */ +void proc_require(proc_t proc, proc_require_flags_t flags); + +#endif // _SYS_PROC_REQUIRE_H_ diff --git a/bsd/sys/process_policy.h b/bsd/sys/process_policy.h index 8ba38fb19..3e389da3d 100644 --- a/bsd/sys/process_policy.h +++ b/bsd/sys/process_policy.h @@ -34,9 +34,20 @@ #include #include -#ifndef XNU_KERNEL_PRIVATE +#if defined(XNU_KERNEL_PRIVATE) + +#if defined(XNU_TARGET_OS_OSX) +#define PROCESS_POLICY_OSX 1 +#else /* defined(XNU_TARGET_OS_OSX) */ +#define PROCESS_POLICY_OSX 0 +#endif /* defined(XNU_TARGET_OS_OSX) */ + +#else /* defined(XNU_KERNEL_PRIVATE) */ + #include -#endif + +#define PROCESS_POLICY_OSX !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) +#endif /* defined(XNU_KERNEL_PRIVATE) */ __BEGIN_DECLS @@ -65,13 +76,15 @@ __BEGIN_DECLS #define PROC_POLICY_HARDWARE_ACCESS 2 /* access to various hardware */ #define PROC_POLICY_RESOURCE_STARVATION 3 /* behavior on resource starvation */ #define PROC_POLICY_RESOURCE_USAGE 4 /* behavior on resource consumption */ -#if CONFIG_EMBEDDED || (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) +#if !PROCESS_POLICY_OSX #define PROC_POLICY_APP_LIFECYCLE 5 /* app life cycle management */ -#else /* CONFIG_EMBEDDED */ +#else /* !PROCESS_POLICY_OSX */ #define PROC_POLICY_RESERVED 5 /* behavior on resource consumption */ -#endif /* CONFIG_EMBEDDED */ +#endif /* !PROCESS_POLICY_OSX */ #define PROC_POLICY_APPTYPE 6 /* behavior on resource consumption */ #define PROC_POLICY_BOOST 7 /* importance boost/drop */ +#define PROC_POLICY_NO_SMT 8 /* Disallow Simultaneous Multi-Threading */ +#define PROC_POLICY_TECS 9 /* Enable CPU security for threads */ /* sub policies for background policy */ #define PROC_POLICY_BG_NONE 0 /* none */ @@ -79,11 +92,11 @@ __BEGIN_DECLS #define PROC_POLICY_BG_DISKTHROTTLE 2 /* disk accesses throttled */ #define PROC_POLICY_BG_NETTHROTTLE 4 /* network accesses throttled */ #define PROC_POLICY_BG_GPUDENY 8 /* no access to GPU */ -#if CONFIG_EMBEDDED || (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) +#if !PROCESS_POLICY_OSX #define PROC_POLICY_BG_ALL 0x0F -#else /* CONFIG_EMBEDDED */ +#else /* !PROCESS_POLICY_OSX */ #define PROC_POLICY_BG_ALL 0x07 -#endif /* CONFIG_EMBEDDED */ +#endif /* !PROCESS_POLICY_OSX */ #define PROC_POLICY_BG_DEFAULT PROC_POLICY_BG_ALL /* sub policies for hardware */ @@ -169,20 +182,20 @@ typedef struct proc_policy_cpuusage_attr { uint64_t ppattr_cpu_attr_deadline; /* 64bit deadline in nsecs */ } proc_policy_cpuusage_attr_t; -#if CONFIG_EMBEDDED || (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) +#if !PROCESS_POLICY_OSX /* sub policies for app lifecycle management */ #define PROC_POLICY_APPLIFE_NONE 0 /* does nothing.. */ #define PROC_POLICY_APPLIFE_STATE 1 /* sets the app to various lifecycle states */ #define PROC_POLICY_APPLIFE_DEVSTATUS 2 /* notes the device in inactive or short/long term */ #define PROC_POLICY_APPLIFE_PIDBIND 3 /* a thread is to be bound to another processes app state */ -#endif /* CONFIG_EMBEDDED */ +#endif /* !PROCESS_POLICY_OSX */ /* sub policies for PROC_POLICY_APPTYPE */ #define PROC_POLICY_APPTYPE_NONE 0 /* does nothing.. */ #define PROC_POLICY_APPTYPE_MODIFY 1 /* sets the app to various lifecycle states */ -#if CONFIG_EMBEDDED || (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) +#if !PROCESS_POLICY_OSX #define PROC_POLICY_APPTYPE_THREADTHR 2 /* notes the device in inactive or short/long term */ -#endif /* CONFIG_EMBEDDED */ +#endif /* !PROCESS_POLICY_OSX */ /* exported apptypes for PROC_POLICY_APPTYPE */ #define PROC_POLICY_OSX_APPTYPE_TAL 1 /* TAL-launched app */ diff --git a/bsd/sys/queue.h b/bsd/sys/queue.h index 23dc242c6..46b382ed7 100644 --- a/bsd/sys/queue.h +++ b/bsd/sys/queue.h @@ -499,7 +499,8 @@ __MISMATCH_TAGS_POP LIST_FIRST((head)) != NULL && \ LIST_FIRST((head))->field.le_prev != \ &LIST_FIRST((head)))) \ - panic("Bad list head %p first->prev != head", (head)); \ + panic("Bad list head %p first->prev != head @%u", \ + (head), __LINE__); \ } while (0) #define LIST_CHECK_NEXT(elm, field) do { \ @@ -507,12 +508,14 @@ __MISMATCH_TAGS_POP LIST_NEXT((elm), field) != NULL && \ LIST_NEXT((elm), field)->field.le_prev != \ &((elm)->field.le_next))) \ - panic("Bad link elm %p next->prev != elm", (elm)); \ + panic("Bad link elm %p next->prev != elm @%u", \ + (elm), __LINE__); \ } while (0) #define LIST_CHECK_PREV(elm, field) do { \ if (__improbable(*(elm)->field.le_prev != (elm))) \ - panic("Bad link elm %p prev->next != elm", (elm)); \ + panic("Bad link elm %p prev->next != elm @%u", \ + (elm), __LINE__); \ } while (0) #else #define LIST_CHECK_HEAD(head, field) @@ -628,7 +631,8 @@ __MISMATCH_TAGS_POP TAILQ_FIRST((head)) != NULL && \ TAILQ_FIRST((head))->field.tqe_prev != \ &TAILQ_FIRST((head)))) \ - panic("Bad tailq head %p first->prev != head", (head)); \ + panic("Bad tailq head %p first->prev != head @%u", \ + (head), __LINE__); \ } while (0) #define TAILQ_CHECK_NEXT(elm, field) do { \ @@ -636,12 +640,14 @@ __MISMATCH_TAGS_POP TAILQ_NEXT((elm), field) != NULL && \ TAILQ_NEXT((elm), field)->field.tqe_prev != \ &((elm)->field.tqe_next))) \ - panic("Bad tailq elm %p next->prev != elm", (elm)); \ + panic("Bad tailq elm %p next->prev != elm @%u", \ + (elm), __LINE__); \ } while(0) #define TAILQ_CHECK_PREV(elm, field) do { \ if (__improbable(*(elm)->field.tqe_prev != (elm))) \ - panic("Bad tailq elm %p prev->next != elm", (elm)); \ + panic("Bad tailq elm %p prev->next != elm @%u", \ + (elm), __LINE__); \ } while(0) #else #define TAILQ_CHECK_HEAD(head, field) @@ -837,19 +843,22 @@ __MISMATCH_TAGS_POP if (__improbable( \ CIRCLEQ_FIRST((head)) != ((void*)(head)) && \ CIRCLEQ_FIRST((head))->field.cqe_prev != ((void*)(head))))\ - panic("Bad circleq head %p first->prev != head", (head)); \ + panic("Bad circleq head %p first->prev != head @%u", \ + (head), __LINE__); \ } while(0) #define CIRCLEQ_CHECK_NEXT(head, elm, field) do { \ if (__improbable( \ CIRCLEQ_NEXT((elm), field) != ((void*)(head)) && \ CIRCLEQ_NEXT((elm), field)->field.cqe_prev != (elm))) \ - panic("Bad circleq elm %p next->prev != elm", (elm)); \ + panic("Bad circleq elm %p next->prev != elm @%u", \ + (elm), __LINE__); \ } while(0) #define CIRCLEQ_CHECK_PREV(head, elm, field) do { \ if (__improbable( \ CIRCLEQ_PREV((elm), field) != ((void*)(head)) && \ CIRCLEQ_PREV((elm), field)->field.cqe_next != (elm))) \ - panic("Bad circleq elm %p prev->next != elm", (elm)); \ + panic("Bad circleq elm %p prev->next != elm @%u", \ + (elm), __LINE__); \ } while(0) #else #define CIRCLEQ_CHECK_HEAD(head, field) diff --git a/bsd/sys/reason.h b/bsd/sys/reason.h index 516521f9f..3014085be 100644 --- a/bsd/sys/reason.h +++ b/bsd/sys/reason.h @@ -114,11 +114,12 @@ void os_reason_set_description_data(os_reason_t cur_reason, uint32_t type, void #define OS_REASON_SECURITY 26 #define OS_REASON_ENDPOINTSECURITY 27 #define OS_REASON_PAC_EXCEPTION 28 +#define OS_REASON_BLUETOOTH_CHIP 29 /* * Update whenever new OS_REASON namespaces are added. */ -#define OS_REASON_MAX_VALID_NAMESPACE OS_REASON_PAC_EXCEPTION +#define OS_REASON_MAX_VALID_NAMESPACE OS_REASON_BLUETOOTH_CHIP #define OS_REASON_BUFFER_MAX_SIZE 5120 @@ -132,6 +133,7 @@ void os_reason_set_description_data(os_reason_t cur_reason, uint32_t type, void #define OS_REASON_FLAG_ONE_TIME_FAILURE 0x80 /* Whatever caused this reason to be created was a one time issue */ #define OS_REASON_FLAG_NO_CRASHED_TID 0x100 /* Don't include the TID that processed the exit in the crash report */ #define OS_REASON_FLAG_ABORT 0x200 /* Reason created from abort_* rather than terminate_* */ +#define OS_REASON_FLAG_SHAREDREGION_FAULT 0x400 /* Fault happened within the shared cache region */ /* * Set of flags that are allowed to be passed from userspace @@ -244,7 +246,9 @@ int terminate_with_payload(int pid, uint32_t reason_namespace, uint64_t reason_c #define EXEC_EXIT_REASON_DECRYPT 11 #define EXEC_EXIT_REASON_UPX 12 #define EXEC_EXIT_REASON_NO32EXEC 13 - +#define EXEC_EXIT_REASON_WRONG_PLATFORM 14 +#define EXEC_EXIT_REASON_MAIN_FD_ALLOC 15 +#define EXEC_EXIT_REASON_COPYOUT_ROSETTA 16 /* * guard reasons */ diff --git a/bsd/sys/resource.h b/bsd/sys/resource.h index 0cc5a3983..b3baf8421 100644 --- a/bsd/sys/resource.h +++ b/bsd/sys/resource.h @@ -212,7 +212,13 @@ struct rusage { #define RUSAGE_INFO_V2 2 #define RUSAGE_INFO_V3 3 #define RUSAGE_INFO_V4 4 -#define RUSAGE_INFO_CURRENT RUSAGE_INFO_V4 +#define RUSAGE_INFO_V5 5 +#define RUSAGE_INFO_CURRENT RUSAGE_INFO_V5 + +/* + * Flags for RUSAGE_INFO_V5 + */ +#define RU_PROC_RUNS_RESLIDE 0x00000001 /* proc has reslid shared cache */ typedef void *rusage_info_t; @@ -342,7 +348,47 @@ struct rusage_info_v4 { uint64_t ri_runnable_time; }; -typedef struct rusage_info_v4 rusage_info_current; +struct rusage_info_v5 { + uint8_t ri_uuid[16]; + uint64_t ri_user_time; + uint64_t ri_system_time; + uint64_t ri_pkg_idle_wkups; + uint64_t ri_interrupt_wkups; + uint64_t ri_pageins; + uint64_t ri_wired_size; + uint64_t ri_resident_size; + uint64_t ri_phys_footprint; + uint64_t ri_proc_start_abstime; + uint64_t ri_proc_exit_abstime; + uint64_t ri_child_user_time; + uint64_t ri_child_system_time; + uint64_t ri_child_pkg_idle_wkups; + uint64_t ri_child_interrupt_wkups; + uint64_t ri_child_pageins; + uint64_t ri_child_elapsed_abstime; + uint64_t ri_diskio_bytesread; + uint64_t ri_diskio_byteswritten; + uint64_t ri_cpu_time_qos_default; + uint64_t ri_cpu_time_qos_maintenance; + uint64_t ri_cpu_time_qos_background; + uint64_t ri_cpu_time_qos_utility; + uint64_t ri_cpu_time_qos_legacy; + uint64_t ri_cpu_time_qos_user_initiated; + uint64_t ri_cpu_time_qos_user_interactive; + uint64_t ri_billed_system_time; + uint64_t ri_serviced_system_time; + uint64_t ri_logical_writes; + uint64_t ri_lifetime_max_phys_footprint; + uint64_t ri_instructions; + uint64_t ri_cycles; + uint64_t ri_billed_energy; + uint64_t ri_serviced_energy; + uint64_t ri_interval_max_phys_footprint; + uint64_t ri_runnable_time; + uint64_t ri_flags; +}; + +typedef struct rusage_info_v5 rusage_info_current; #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ @@ -499,6 +545,8 @@ struct proc_rlimit_control_wakeupmon { #define IOPOL_TYPE_VFS_ATIME_UPDATES 2 #define IOPOL_TYPE_VFS_MATERIALIZE_DATALESS_FILES 3 #define IOPOL_TYPE_VFS_STATFS_NO_DATA_VOLUME 4 +#define IOPOL_TYPE_VFS_TRIGGER_RESOLVE 5 +#define IOPOL_TYPE_VFS_IGNORE_CONTENT_PROTECTION 6 /* scope */ #define IOPOL_SCOPE_PROCESS 0 @@ -532,6 +580,12 @@ struct proc_rlimit_control_wakeupmon { #define IOPOL_VFS_STATFS_NO_DATA_VOLUME_DEFAULT 0 #define IOPOL_VFS_STATFS_FORCE_NO_DATA_VOLUME 1 +#define IOPOL_VFS_TRIGGER_RESOLVE_DEFAULT 0 +#define IOPOL_VFS_TRIGGER_RESOLVE_OFF 1 + +#define IOPOL_VFS_CONTENT_PROTECTION_DEFAULT 0 +#define IOPOL_VFS_CONTENT_PROTECTION_IGNORE 1 + #ifdef PRIVATE /* * Structures for use in communicating via iopolicysys() between Libc and the diff --git a/bsd/sys/resourcevar.h b/bsd/sys/resourcevar.h index 6d9244314..4140c5115 100644 --- a/bsd/sys/resourcevar.h +++ b/bsd/sys/resourcevar.h @@ -66,6 +66,7 @@ #include #include +#include /* * Kernel per-process accounting / statistics @@ -100,33 +101,15 @@ struct pstats { #endif // KERNEL }; -/* - * Kernel shareable process resource limits. Because this structure - * is moderately large but changes infrequently, it is normally - * shared copy-on-write after forks. If a group of processes - * ("threads") share modifications, the PL_SHAREMOD flag is set, - * and a copy must be made for the child of a new fork that isn't - * sharing modifications to the limits. - */ -/* - * Modifications are done with the list lock held (p_limit as well)and access indv - * limits can be done without limit as we keep the old copy in p_olimit. Which is - * dropped in proc_exit. This way all access will have a valid kernel address - */ -struct plimit { - struct rlimit pl_rlimit[RLIM_NLIMITS]; - int pl_refcnt; /* number of references */ -}; - #ifdef KERNEL -void calcru(struct proc *p, struct timeval *up, struct timeval *sp, - struct timeval *ip); -void ruadd(struct rusage *ru, struct rusage *ru2); -void update_rusage_info_child(struct rusage_info_child *ru, rusage_info_current *ru_current); + +void calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *ip); +void ruadd(struct rusage *ru, struct rusage *ru2); +void update_rusage_info_child(struct rusage_info_child *ru, rusage_info_current *ru_current); void proc_limitget(proc_t p, int whichi, struct rlimit * limp); -void proc_limitdrop(proc_t p, int exiting); void proc_limitfork(proc_t parent, proc_t child); -int proc_limitreplace(proc_t p); +void proc_limitdrop(proc_t p); +void proc_limitupdate(proc_t p, struct rlimit *newrlim, uint8_t which); void proc_limitblock(proc_t); void proc_limitunblock(proc_t); #endif /* KERNEL */ diff --git a/bsd/sys/sbuf.h b/bsd/sys/sbuf.h index 0a874635e..a86f6f765 100644 --- a/bsd/sys/sbuf.h +++ b/bsd/sys/sbuf.h @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2000 Poul-Henning Kamp and Dag-Erling Co•dan Sm¿rgrav + * Copyright (c) 2000 Poul-Henning Kamp and Dag-Erling Coïdan Smørgrav * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,6 +33,7 @@ #include #include +#include /* * Structure definition @@ -53,9 +54,7 @@ struct sbuf { }; __BEGIN_DECLS -/* - * API functions - */ +#ifdef KERNEL_PRIVATE struct sbuf *sbuf_new(struct sbuf *, char *, int, int); void sbuf_clear(struct sbuf *); int sbuf_setpos(struct sbuf *, int); @@ -73,6 +72,7 @@ char *sbuf_data(struct sbuf *); int sbuf_len(struct sbuf *); int sbuf_done(struct sbuf *); void sbuf_delete(struct sbuf *); +#endif #ifdef KERNEL struct uio; diff --git a/bsd/sys/sdt_impl.h b/bsd/sys/sdt_impl.h index 837e38c74..73c18a172 100644 --- a/bsd/sys/sdt_impl.h +++ b/bsd/sys/sdt_impl.h @@ -35,7 +35,9 @@ extern const char *sdt_prefix; typedef struct sdt_probedesc { char *sdpd_name; /* name of this probe */ + int sdpd_namelen; char *sdpd_func; /* APPLE NOTE: function name */ + const char *sdpd_prov; /* APPLE NOTE: provider name */ unsigned long sdpd_offset; /* offset of call in text */ struct sdt_probedesc *sdpd_next; /* next static probe */ } sdt_probedesc_t; diff --git a/bsd/sys/shm.h b/bsd/sys/shm.h index 226a5fea3..ca4deffe8 100644 --- a/bsd/sys/shm.h +++ b/bsd/sys/shm.h @@ -109,7 +109,11 @@ typedef unsigned short shmatt_t; * headers at this time, to avoid the resulting namespace * pollution, which is why we discourages its use. */ +#if __arm64__ +#define SHMLBA (16*1024) /* [XSI] Segment low boundary address multiple*/ +#else /* __arm64__ */ #define SHMLBA 4096 /* [XSI] Segment low boundary address multiple*/ +#endif /* __arm64__ */ /* "official" access mode definitions; somewhat braindead since you have * to specify (SHM_* >> 3) for group and (SHM_* >> 6) for world permissions */ diff --git a/bsd/sys/signalvar.h b/bsd/sys/signalvar.h index a209acb0f..91d80a6b1 100644 --- a/bsd/sys/signalvar.h +++ b/bsd/sys/signalvar.h @@ -92,7 +92,6 @@ struct sigacts { user_addr_t ps_sigreturn_token; /* random token used to validate sigreturn arguments */ _Atomic uint32_t ps_sigreturn_validation; /* sigreturn argument validation state */ int ps_flags; /* signal flags, below */ - struct kern_sigaltstack ps_sigstk; /* sp, length & flags */ int ps_sig; /* for core dump/debugger XXX */ int ps_code; /* for core dump/debugger XXX */ int ps_addr; /* for core dump/debugger XXX */ @@ -253,9 +252,8 @@ int sig_try_locked(struct proc *p); #if defined(KERNEL_PRIVATE) /* Forward-declare these for consumers of the SDK that don't know about BSD types */ struct proc; -typedef struct proc * proc_t; struct os_reason; -void psignal_sigkill_with_reason(proc_t p, struct os_reason *signal_reason); +void psignal_sigkill_with_reason(struct proc *p, struct os_reason *signal_reason); #endif /* defined(KERNEL_PRIVATE) */ #ifdef XNU_KERNEL_PRIVATE diff --git a/bsd/sys/socket.h b/bsd/sys/socket.h index e851212be..a48b0f8c0 100644 --- a/bsd/sys/socket.h +++ b/bsd/sys/socket.h @@ -171,8 +171,8 @@ #define SO_ERROR 0x1007 /* get error status and clear */ #define SO_TYPE 0x1008 /* get socket type */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define SO_LABEL 0x1010 /* socket's MAC label */ -#define SO_PEERLABEL 0x1011 /* socket's peer MAC label */ +#define SO_LABEL 0x1010 /* deprecated */ +#define SO_PEERLABEL 0x1011 /* deprecated */ #ifdef __APPLE__ #define SO_NREAD 0x1020 /* APPLE: get 1st-packet byte count */ #define SO_NKE 0x1021 /* APPLE: Install socket-level NKE */ @@ -302,7 +302,7 @@ #define SO_RECV_TRAFFIC_CLASS 0x1087 /* Receive traffic class (bool) */ #define SO_TRAFFIC_CLASS_DBG 0x1088 /* Debug traffic class (struct so_tcdbg) */ -#define SO_TRAFFIC_CLASS_STATS 0x1089 /* Traffic class statistics */ +#define SO_OPTION_UNUSED_0 0x1089 /* Traffic class statistics */ #define SO_PRIVILEGED_TRAFFIC_CLASS 0x1090 /* Privileged traffic class (bool) */ #define SO_DEFUNCTIT 0x1091 /* Defunct a socket (only in internal builds) */ #define SO_DEFUNCTOK 0x1100 /* can be defunct'd */ @@ -342,12 +342,13 @@ #define SO_INTCOPROC_ALLOW 0x1118 /* Try to use internal co-processor interfaces. */ #endif /* PRIVATE */ -#define SO_NETSVC_MARKING_LEVEL 0x1119 /* Get QoS marking in effect for socket */ +#define SO_NETSVC_MARKING_LEVEL 0x1119 /* Get QoS marking in effect for socket */ #ifdef PRIVATE -#define SO_NECP_LISTENUUID 0x1120 /* NECP client UUID for listener */ -#define SO_MPKL_SEND_INFO 0x1122 /* (struct so_mpkl_send_info) */ -#define SO_STATISTICS_EVENT 0x1123 /* int64 argument, an event in statistics collection */ +#define SO_NECP_LISTENUUID 0x1120 /* NECP client UUID for listener */ +#define SO_MPKL_SEND_INFO 0x1122 /* (struct so_mpkl_send_info) */ +#define SO_STATISTICS_EVENT 0x1123 /* int64 argument, an event in statistics collection */ +#define SO_WANT_KEV_SOCKET_CLOSED 0x1124 /* want delivery of KEV_SOCKET_CLOSED (int) */ #endif /* PRIVATE */ /* * Network Service Type for option SO_NET_SERVICE_TYPE @@ -592,7 +593,8 @@ struct so_np_extensions { #ifdef PRIVATE #define AF_MULTIPATH 39 #endif /* PRIVATE */ -#define AF_MAX 40 +#define AF_VSOCK 40 /* VM Sockets */ +#define AF_MAX 41 #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* @@ -687,6 +689,7 @@ struct sockaddr_storage { #ifdef PRIVATE #define PF_MULTIPATH AF_MULTIPATH #endif /* PRIVATE */ +#define PF_VSOCK AF_VSOCK #define PF_MAX AF_MAX /* @@ -749,6 +752,7 @@ struct sockaddr_storage { { "netbios", CTLTYPE_NODE }, \ { "ppp", CTLTYPE_NODE }, \ { "hdrcomplete", CTLTYPE_NODE }, \ + { "vsock", CTLTYPE_NODE }, \ } #endif /* KERNEL_PRIVATE */ @@ -1016,8 +1020,15 @@ struct user32_sa_endpoints { #define MSG_NBIO 0x20000 /* FIONBIO mode, used by fifofs */ #define MSG_SKIPCFIL 0x40000 /* skip pass content filter */ #endif +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ + +#if __DARWIN_C_LEVEL >= 200809L +#define MSG_NOSIGNAL 0x80000 /* do not generate SIGPIPE on EOF */ +#endif /* __DARWIN_C_LEVEL */ + +#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) #ifdef KERNEL -#define MSG_USEUPCALL 0x80000000 /* Inherit upcall in sock_accept */ +#define MSG_USEUPCALL 0x80000000 /* Inherit upcall in sock_accept */ #endif #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ @@ -1096,7 +1107,7 @@ struct cmsgcred { #define CMSG_LEN(l) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + (l)) #ifdef KERNEL -#define CMSG_ALIGN(n) __DARWIN_ALIGN32(n) +#define CMSG_ALIGN(n) ((typeof(n))__DARWIN_ALIGN32(n)) #endif #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ @@ -1108,8 +1119,6 @@ struct cmsgcred { #define SCM_TIMESTAMP_MONOTONIC 0x04 /* timestamp (uint64_t) */ #ifdef PRIVATE -#define SCM_SEQNUM 0x05 /* TCP unordered recv seq no */ -#define SCM_MSG_PRIORITY 0x06 /* TCP unordered snd priority */ #define SCM_TIMESTAMP_CONTINUOUS 0x07 /* timestamp (uint64_t) */ #define SCM_MPKL_SEND_INFO 0x08 /* send info for multi-layer packet logging (struct so_mpkl_send_info) */ #define SCM_MPKL_RECV_INFO 0x09 /* receive info for multi-layer packet logging (struct so_mpkl_recv_info */ @@ -1147,7 +1156,7 @@ struct omsghdr { #define SHUT_WR 1 /* shut down the writing side */ #define SHUT_RDWR 2 /* shut down both sides */ -#if !defined(_POSIX_C_SOURCE) +#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* * sendfile(2) header/trailer struct */ @@ -1186,7 +1195,7 @@ struct user32_sf_hdtr { #endif /* KERNEL */ -#endif /* !_POSIX_C_SOURCE */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ #ifdef PRIVATE #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) @@ -1297,7 +1306,7 @@ struct so_cinforeq64 { /* valid connection info auxiliary data types */ #define CIAUX_TCP 0x1 /* TCP auxiliary data (conninfo_tcp_t) */ -#define CIAUX_MPTCP 0x2 /* MPTCP auxiliary data (conninfo_mptcp_t) */ +#define CIAUX_MPTCP 0x2 /* MPTCP auxiliary data (conninfo_multipathtcp) */ /* * Structure for SIOC{S,G}CONNORDER @@ -1324,6 +1333,14 @@ struct kev_netpolicy_ifdenied { __uint32_t ev_if_functional_type; }; +/* + * KEV_NETPOLICY_NETDENIED event structure + */ +struct kev_netpolicy_netdenied { + struct netpolicy_event_data ev_data; + __uint32_t ev_network_type; +}; + /* * Network Service Type to DiffServ Code Point mapping */ diff --git a/bsd/sys/socketvar.h b/bsd/sys/socketvar.h index cbb856761..c5124169a 100644 --- a/bsd/sys/socketvar.h +++ b/bsd/sys/socketvar.h @@ -114,37 +114,9 @@ struct data_stats { u_int64_t txpackets; u_int64_t txbytes; }; - -#define MSG_PRI_0 0 /* TCP message priority, lowest */ -#define MSG_PRI_1 1 -#define MSG_PRI_2 2 -#define MSG_PRI_3 3 /* TCP message priority, highest */ -#define MSG_PRI_MAX MSG_PRI_3 -#define MSG_PRI_MIN MSG_PRI_0 -#define MSG_PRI_COUNT 4 -#define MSG_PRI_DEFAULT MSG_PRI_1 #endif /* PRIVATE */ #ifdef KERNEL_PRIVATE -/* State for TCP message send or receive */ -struct msg_priq { - struct mbuf *msgq_head; /* first mbuf in the queue */ - struct mbuf *msgq_tail; /* last mbuf in the queue */ - struct mbuf *msgq_lastmsg; /* last message in the queue */ - u_int32_t msgq_flags; /* flags per priority queue */ -#define MSGQ_MSG_NOTDONE 0x1 /* set when EOR of a msg is not seen */ - u_int32_t msgq_bytes; /* data bytes in this queue */ -}; - -struct msg_state { - struct msg_priq msg_priq[MSG_PRI_COUNT]; /* priority queues */ - u_int32_t msg_serial_bytes; /* bytes moved to serial queue */ - u_int32_t msg_uno_bytes; /* out-of-order chars in rcv buffer */ -}; - -/* mbuf flag used to indicate out of order data received */ -#define M_UNORDERED_DATA M_PROTO1 - /* * Kernel structure per socket. * Contains send and receive buffer queues, @@ -227,15 +199,16 @@ struct socket { #define SB_NOCOMPRESS 0x1000 /* do not compress socket buffer */ #define SB_SNDBYTE_CNT 0x2000 /* keep track of snd bytes per interface */ #define SB_UPCALL_LOCK 0x4000 /* Keep socket locked when doing the upcall */ +#define SB_LIMITED 0x8000 /* Socket buffer size limited */ + /* XXX Note that Unix domain socket's sb_flags is defined as short */ caddr_t so_tpcb; /* Misc. protocol control block, used * by some kexts */ - void (*so_event)(struct socket *, void *, u_int32_t); + void (*so_event)(struct socket *, void *, long); void *so_eventarg; /* Arg for above */ kauth_cred_t so_cred; /* cred of who opened the socket */ /* NB: generation count must not be first; easiest to make it last. */ so_gen_t so_gencnt; /* generation count */ - TAILQ_HEAD(, eventqelt) so_evlist; STAILQ_ENTRY(socket) so_cache_ent; /* socache entry */ caddr_t so_saved_pcb; /* Saved pcb when cacheing */ u_int32_t cache_timestamp; /* time socket was cached */ @@ -271,8 +244,6 @@ struct socket { #define SOF_NOTSENT_LOWAT 0x00080000 /* A different lowat on not sent * data has been set */ #define SOF_KNOTE 0x00100000 /* socket is on the EV_SOCK klist */ -#define SOF_USELRO 0x00200000 /* TCP must use LRO on these sockets */ -#define SOF_ENABLE_MSGS 0x00400000 /* TCP must enable message delivery */ #define SOF_FLOW_DIVERT 0x00800000 /* Flow Divert is enabled */ #define SOF_MP_SUBFLOW 0x01000000 /* is a multipath subflow socket */ #define SOF_MP_SEC_SUBFLOW 0x04000000 /* Set up secondary flow */ @@ -283,10 +254,10 @@ struct socket { uint32_t so_upcallusecount; /* number of upcalls in progress */ int so_usecount; /* refcounting of socket use */ int so_retaincnt; - u_int32_t so_filteruse; /* usecount for the socket filters */ - u_int16_t so_traffic_class; + uint32_t so_filteruse; /* usecount for the socket filters */ + uint16_t so_traffic_class; int8_t so_netsvctype; - u_int8_t so_restrictions; + uint8_t so_restrictions; thread_t so_send_filt_thread; /* for debug pruposes */ @@ -307,7 +278,6 @@ struct socket { struct data_stats so_tc_stats[SO_TC_STATS_MAX]; struct klist so_klist; /* klist for EV_SOCK events */ - struct msg_state *so_msg_state; /* unordered snd/rcv state */ struct flow_divert_pcb *so_fd_pcb; /* Flow Divert control block */ #if CONTENT_FILTER @@ -320,16 +290,16 @@ struct socket { pid_t e_pid; /* pid of the effective owner */ u_int64_t e_upid; /* upid of the effective owner */ -#if defined(XNU_TARGET_OS_OSX) +#if XNU_TARGET_OS_OSX pid_t so_rpid; /* pid of the responsible process */ -#endif +#endif /* XNU_TARGET_OS_OSX */ uuid_t last_uuid; /* uuid of most recent accessor */ uuid_t e_uuid; /* uuid of effective owner */ uuid_t so_vuuid; /* UUID of the Voucher originator */ -#if defined(XNU_TARGET_OS_OSX) +#if XNU_TARGET_OS_OSX uuid_t so_ruuid; /* UUID of the responsible process */ -#endif +#endif /* XNU_TARGET_OS_OSX */ int32_t so_policy_gencnt; /* UUID policy gencnt */ @@ -342,8 +312,7 @@ struct socket { #define SOF1_PRECONNECT_DATA 0x00000020 /* request for preconnect data */ #define SOF1_EXTEND_BK_IDLE_WANTED 0x00000040 /* option set */ #define SOF1_EXTEND_BK_IDLE_INPROG 0x00000080 /* socket */ -#define SOF1_CACHED_IN_SOCK_LAYER 0x00000100 /* bundled with inpcb and - * tcpcb */ +#define SOF1_CACHED_IN_SOCK_LAYER 0x00000100 /* bundled with inpcb and tcpcb */ #define SOF1_TFO_REWIND 0x00000200 /* rewind mptcp meta data */ #define SOF1_CELLFALLBACK 0x00000400 /* Initiated by cell fallback */ #define SOF1_QOSMARKING_ALLOWED 0x00000800 /* policy allows DSCP map */ @@ -360,6 +329,8 @@ struct socket { #define SOF1_DNS_COUNTED 0x00400000 /* socket counted to send DNS queries */ #define SOF1_MPKL_SEND_INFO 0x00800000 /* SO_MPKL_SEND_INFO option is set */ #define SOF1_INBOUND 0x01000000 /* Created via a passive listener */ +#define SOF1_WANT_KEV_SOCK_CLOSED 0x02000000 /* Want generation of KEV_SOCKET_CLOSED event */ +#define SOF1_FLOW_DIVERT_SKIP 0x04000000 /* Flow divert already declined to handle the socket */ u_int64_t so_extended_bk_start; @@ -462,7 +433,7 @@ struct xsocket { uid_t so_uid; /* XXX */ }; -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX || !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) struct xsocket64 { u_int32_t xso_len; /* length of this structure */ u_int64_t xso_so; /* makes a convenient handle */ @@ -484,7 +455,7 @@ struct xsocket64 { struct xsockbuf so_snd; uid_t so_uid; /* XXX */ }; -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX || !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) */ #ifdef PRIVATE #define XSO_SOCKET 0x001 @@ -728,7 +699,6 @@ extern int sothrottlelog; extern int sorestrictrecv; extern int sorestrictsend; extern int somaxconn; -extern uint32_t tcp_do_autosendbuf; extern uint32_t tcp_autosndbuf_max; extern uint32_t tcp_autosndbuf_inc; extern u_int32_t sotcdb; @@ -794,7 +764,7 @@ extern int soopt_cred_check(struct socket *so, int priv, boolean_t allow_root, boolean_t ignore_delegate); extern int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp); -extern int soreserve(struct socket *so, u_int32_t sndcc, u_int32_t rcvcc); +extern int soreserve(struct socket *so, uint32_t sndcc, uint32_t rcvcc); extern void soreserve_preconnect(struct socket *so, unsigned int pre_cc); extern void sorwakeup(struct socket *so); extern int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, @@ -805,7 +775,7 @@ extern int sosend_list(struct socket *so, struct uio **uio, u_int uiocnt, int flags); extern int soreceive_list(struct socket *so, struct recv_msg_elem *msgarray, u_int msgcnt, int *flags); -extern void sonullevent(struct socket *so, void *arg, uint32_t hint); +extern void sonullevent(struct socket *so, void *arg, long hint); extern struct mbuf *sbconcat_mbufs(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0, struct mbuf *control); @@ -822,7 +792,7 @@ __BEGIN_DECLS extern void socketinit(void); extern struct sockaddr *dup_sockaddr(struct sockaddr *sa, int canwait); extern int getsock(struct filedesc *fdp, int fd, struct file **fpp); -extern int sockargs(struct mbuf **mp, user_addr_t data, int buflen, int type); +extern int sockargs(struct mbuf **mp, user_addr_t data, socklen_t buflen, int type); extern void get_sockev_state(struct socket *, u_int32_t *); extern void so_update_last_owner_locked(struct socket *, struct proc *); extern void so_update_policy(struct socket *); @@ -834,15 +804,10 @@ extern int sbappend_nodrop(struct sockbuf *sb, struct mbuf *m); extern int sbappendstream(struct sockbuf *sb, struct mbuf *m); extern int sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control, int *error_out); -extern int sbappendmsgstream_rcv(struct sockbuf *sb, struct mbuf *m, - uint32_t seqnum, int flags); -extern int sbappendstream_rcvdemux(struct socket *so, struct mbuf *m, - uint32_t seqnum, int flags); +extern int sbappendstream_rcvdemux(struct socket *so, struct mbuf *m); #if MPTCP extern int sbappendmptcpstream_rcv(struct sockbuf *sb, struct mbuf *m); #endif /* MPTCP */ -extern int sbappendmsg_snd(struct sockbuf *sb, struct mbuf *m); -extern void sbpull_unordered_data(struct socket *, int32_t, int32_t); extern void sbcheck(struct sockbuf *sb); extern void sblastmbufchk(struct sockbuf *, const char *); extern void sblastrecordchk(struct sockbuf *, const char *); @@ -888,7 +853,7 @@ extern int soconnectxlocked(struct socket *so, struct sockaddr *src, sae_connid_t *, uint32_t, void *, u_int32_t, uio_t, user_ssize_t *); extern int sodisconnectx(struct socket *so, sae_associd_t, sae_connid_t); extern int sodisconnectxlocked(struct socket *so, sae_associd_t, sae_connid_t); -extern void soevupcall(struct socket *, u_int32_t); +extern void soevupcall(struct socket *so, long hint); /* flags for socreate_internal */ #define SOCF_MPTCP 0x1 /* MPTCP-subflow */ extern int socreate_internal(int dom, struct socket **aso, int type, int proto, @@ -936,15 +901,15 @@ extern int soshutdown(struct socket *so, int how); extern int soshutdownlock(struct socket *so, int how); extern int soshutdownlock_final(struct socket *so, int how); extern void sotoxsocket(struct socket *so, struct xsocket *xso); -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX extern void sotoxsocket64(struct socket *so, struct xsocket64 *xso); -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ extern int sosendallatonce(struct socket *so); extern int soreadable(struct socket *so); extern int sowriteable(struct socket *so); extern void sowwakeup(struct socket *so); extern int sosendcheck(struct socket *, struct sockaddr *, user_ssize_t, - int32_t, int32_t, int, int *, struct mbuf *); + int32_t, int32_t, int, int *); extern int soo_ioctl(struct fileproc *, u_long, caddr_t, vfs_context_t); extern int soo_stat(struct socket *, void *, int); @@ -974,7 +939,6 @@ extern mbuf_svc_class_t so_tc2msc(int); extern int so_svc2tc(mbuf_svc_class_t); extern u_int8_t tcp_cansbgrow(struct sockbuf *sb); -extern int tcp_get_msg_priority(struct mbuf *, uint32_t *); extern void set_tcp_stream_priority(struct socket *so); extern int so_set_net_service_type(struct socket *, int); @@ -993,13 +957,11 @@ extern void socket_tclass_init(void); extern int so_set_tcdbg(struct socket *, struct so_tcdbg *); extern int sogetopt_tcdbg(struct socket *, struct sockopt *); #endif /* (DEVELOPMENT || DEBUG) */ -extern void so_set_lro(struct socket *, int); extern int so_isdstlocal(struct socket *); extern void so_recv_data_stat(struct socket *, struct mbuf *, size_t); extern void so_inc_recv_data_stat(struct socket *, size_t, size_t, uint32_t); extern int so_wait_for_if_feedback(struct socket *); -extern int msgq_sbspace(struct socket *so, struct mbuf *control); extern int soopt_getm(struct sockopt *sopt, struct mbuf **mp); extern int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m); extern int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m); @@ -1012,15 +974,6 @@ extern void mptcp_postproc_sbdrop(struct mbuf *, u_int64_t, u_int32_t, extern void netpolicy_post_msg(uint32_t, struct netpolicy_event_data *, uint32_t); -/* - * Socket operation routines. - * These routines are called by the routines in - * sys_socket.c or from a system process, and - * implement the semantics of socket operations by - * switching out to the protocol specific routines. - */ -extern void postevent(struct socket *, struct sockbuf *, int); -extern void evsofree(struct socket *); extern int tcp_notsent_lowat_check(struct socket *so); diff --git a/bsd/sys/sockio.h b/bsd/sys/sockio.h index e0a96050c..87d500627 100644 --- a/bsd/sys/sockio.h +++ b/bsd/sys/sockio.h @@ -223,8 +223,8 @@ #define SIOCSETOT _IOW('s', 128, int) /* deprecated */ #endif /* PRIVATE */ -#define SIOCGIFMAC _IOWR('i', 130, struct ifreq) /* get IF MAC label */ -#define SIOCSIFMAC _IOW('i', 131, struct ifreq) /* set IF MAC label */ +#define SIOCGIFMAC _IOWR('i', 130, struct ifreq) /* deprecated */ +#define SIOCSIFMAC _IOW('i', 131, struct ifreq) /* deprecated */ #define SIOCSIFKPI _IOW('i', 134, struct ifreq) /* set interface kext param - root only */ #define SIOCGIFKPI _IOWR('i', 135, struct ifreq) /* get interface kext param */ @@ -324,10 +324,8 @@ #define SIOCSIFLOWINTERNET _IOWR('i', 191, struct ifreq) #define SIOCGIFLOWINTERNET _IOWR('i', 192, struct ifreq) -#if INET6 #define SIOCGIFNAT64PREFIX _IOWR('i', 193, struct if_nat64req) #define SIOCSIFNAT64PREFIX _IOWR('i', 194, struct if_nat64req) -#endif #define SIOCGIFNEXUS _IOWR('i', 195, struct if_nexusreq) #define SIOCGIFPROTOLIST _IOWR('i', 196, struct if_protolistreq) /* get list of attached protocols */ #ifdef BSD_KERNEL_PRIVATE @@ -344,9 +342,7 @@ #define SIOCGIFLOWPOWER _IOWR('i', 199, struct ifreq) /* Low Power Mode */ #define SIOCSIFLOWPOWER _IOWR('i', 200, struct ifreq) /* Low Power Mode */ -#if INET6 #define SIOCGIFCLAT46ADDR _IOWR('i', 201, struct if_clat46req) -#endif /* INET6 */ #define SIOCGIFMPKLOG _IOWR('i', 202, struct ifreq) /* Multi-layer Packet Logging */ #define SIOCSIFMPKLOG _IOWR('i', 203, struct ifreq) /* Multi-layer Packet Logging */ @@ -358,6 +354,9 @@ #define SIOCGIFNOACKPRIO _IOWR('i', 207, struct ifreq) /* get interface no ack prioritization flag */ #define SIOCSIFNOACKPRIO _IOWR('i', 208, struct ifreq) /* mark interface no ack prioritization flagd */ +#define SIOCGETROUTERMODE _IOWR('i', 209, struct ifreq) /* get IPv4 router mode state */ + +#define SIOCSIFNETWORKID _IOWR('i', 210, struct if_netidreq) /* set Network Identifier for a given interface */ #endif /* PRIVATE */ #endif /* !_SYS_SOCKIO_H_ */ diff --git a/bsd/sys/spawn.h b/bsd/sys/spawn.h index 4bafc11c2..b5bf709b6 100644 --- a/bsd/sys/spawn.h +++ b/bsd/sys/spawn.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2006-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -65,7 +65,9 @@ #define POSIX_SPAWN_SETSID 0x0400 #ifdef PRIVATE /* unused 0x0800 */ -/* unused 0x1000 */ +#if (DEBUG || DEVELOPMENT) +#define _POSIX_SPAWN_FORCE_4K_PAGES 0x1000 +#endif /* (DEBUG || DEVELOPMENT) */ #define _POSIX_SPAWN_ALLOW_DATA_EXEC 0x2000 #endif /* PRIVATE */ #define POSIX_SPAWN_CLOEXEC_DEFAULT 0x4000 @@ -73,6 +75,8 @@ #define _POSIX_SPAWN_HIGH_BITS_ASLR 0x8000 #endif /* PRIVATE */ +#define _POSIX_SPAWN_RESLIDE 0x0800 + /* * Possible values to be set for the process control actions on resource starvation. * POSIX_SPAWN_PCONTROL_THROTTLE indicates that the process is to be throttled on starvation. diff --git a/bsd/sys/spawn_internal.h b/bsd/sys/spawn_internal.h index f0b7866f1..d669abe92 100644 --- a/bsd/sys/spawn_internal.h +++ b/bsd/sys/spawn_internal.h @@ -48,6 +48,7 @@ #include #include #include /* COALITION_NUM_TYPES */ +#include #include /* @@ -77,6 +78,7 @@ typedef enum { PSPA_AU_SESSION = 2, PSPA_IMP_WATCHPORTS = 3, PSPA_REGISTERED_PORTS = 4, + PSPA_PTRAUTH_TASK_PORT = 5, PSPA_SUID_CRED = 6, } pspa_t; @@ -227,11 +229,16 @@ typedef struct _posix_spawnattr { int psa_memlimit_inactive; /* jetsam memory limit (in MB) when process is inactive */ uint64_t psa_qos_clamp; /* QoS Clamp to set on the new process */ - uint64_t psa_darwin_role; /* PRIO_DARWIN_ROLE to set on the new process */ + task_role_t psa_darwin_role; /* PRIO_DARWIN_ROLE to set on the new process */ int psa_thread_limit; /* thread limit */ uint64_t psa_max_addr; /* Max valid VM address */ + bool psa_no_smt; + bool psa_tecs; + int psa_platform; /* Plaform for the binary */ + cpu_subtype_t psa_subcpuprefs[NBINPREFS]; /* subcpu affinity prefs*/ + uint32_t psa_options; /* More options to be passed to posix_spawn */ /* * NOTE: Extensions array pointers must stay at the end so that * everything above this point stays the same size on different bitnesses @@ -242,6 +249,7 @@ typedef struct _posix_spawnattr { struct _posix_spawn_coalition_info *psa_coalition_info; /* coalition info */ struct _posix_spawn_persona_info *psa_persona_info; /* spawn new process into given persona */ struct _posix_spawn_posix_cred_info *psa_posix_cred_info; /* posix creds: uid/gid/groups */ + char *psa_subsystem_root_path; /* pass given path in apple strings */ } *_posix_spawnattr_t; /* @@ -312,7 +320,7 @@ typedef struct _posix_spawnattr { #define POSIX_SPAWN_PROC_TYPE_MASK 0x00000F00 #define POSIX_SPAWN_PROC_TYPE_APP_DEFAULT 0x00000100 -#define POSIX_SPAWN_PROC_TYPE_APP_TAL 0x00000200 +#define POSIX_SPAWN_PROC_TYPE_APP_TAL 0x00000200 /* unused */ #define POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD 0x00000300 #define POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE 0x00000400 @@ -332,6 +340,12 @@ typedef struct _posix_spawnattr { #define POSIX_SPAWN_DARWIN_ROLE_NONE 0x00000000 /* Other possible values are specified by PRIO_DARWIN_ROLE in sys/resource.h */ +/* Other posix spawn options passed through psa_options */ +__options_decl(posix_spawn_options, uint32_t, { + PSA_OPTION_NONE = 0, + PSA_OPTION_PLUGIN_HOST_DISABLE_A_KEYS = 0x1, +}); + /* * Allowable posix_spawn() file actions */ @@ -365,7 +379,7 @@ typedef struct _psfa_action { mach_port_name_t psfaa_fileport; /* fileport to operate on */ }; union { - struct _psfaa_open { + struct { int psfao_oflag; /* open flags to use */ mode_t psfao_mode; /* mode for open */ char psfao_path[PATH_MAX]; /* path to open */ @@ -445,6 +459,9 @@ struct _posix_spawn_args_desc { __darwin_size_t posix_cred_info_size; struct _posix_spawn_posix_cred_info *posix_cred_info; + + __darwin_size_t subsystem_root_path_size; + char *subsystem_root_path; }; #ifdef KERNEL @@ -470,6 +487,8 @@ struct user32__posix_spawn_args_desc { uint32_t persona_info; uint32_t posix_cred_info_size; uint32_t posix_cred_info; + uint32_t subsystem_root_path_size; + uint32_t subsystem_root_path; }; struct user__posix_spawn_args_desc { @@ -487,6 +506,8 @@ struct user__posix_spawn_args_desc { user_addr_t persona_info; user_size_t posix_cred_info_size; user_addr_t posix_cred_info; + user_size_t subsystem_root_path_size; + user_addr_t subsystem_root_path; }; diff --git a/bsd/sys/stackshot.h b/bsd/sys/stackshot.h index b5c00135c..2086ebcdb 100644 --- a/bsd/sys/stackshot.h +++ b/bsd/sys/stackshot.h @@ -36,7 +36,7 @@ typedef struct stackshot_config { /* Input options */ int sc_pid; /* PID to trace, or -1 for the entire system */ - uint32_t sc_flags; /* Stackshot flags */ + uint64_t sc_flags; /* Stackshot flags */ uint64_t sc_delta_timestamp; /* Retrieve a delta stackshot of system state that has changed since this time */ /* Stackshot results */ @@ -46,21 +46,23 @@ typedef struct stackshot_config { /* Internals */ uint64_t sc_out_buffer_addr; /* Location where the kernel should copy the address of the newly mapped buffer in user space */ uint64_t sc_out_size_addr; /* Location where the kernel should copy the size of the stackshot buffer */ + uint64_t sc_pagetable_mask; /* Mask of page table levels to dump, must pass STACKSHOT_PAGE_TABLES */ } stackshot_config_t; #ifndef KERNEL stackshot_config_t * stackshot_config_create(void); int stackshot_config_set_pid(stackshot_config_t * stackshot_config, int pid); -int stackshot_config_set_flags(stackshot_config_t * stackshot_config, uint32_t flags); +int stackshot_config_set_flags(stackshot_config_t * stackshot_config, uint64_t flags); int stackshot_capture_with_config(stackshot_config_t * stackshot_config); void * stackshot_config_get_stackshot_buffer(stackshot_config_t * stackshot_config); uint32_t stackshot_config_get_stackshot_size(stackshot_config_t * stackshot_config); int stackshot_config_set_size_hint(stackshot_config_t * stackshot_config, uint32_t suggested_size); int stackshot_config_set_delta_timestamp(stackshot_config_t * stackshot_config, uint64_t delta_timestamp); +int stackshot_config_set_pagetable_mask(stackshot_config_t * stackshot_config, uint32_t mask); int stackshot_config_dealloc_buffer(stackshot_config_t * stackshot_config); int stackshot_config_dealloc(stackshot_config_t * stackshot_config); -#endif /* KERNEL */ +#endif /* ! KERNEL */ #endif /* _SYS_STACKSHOT_H */ diff --git a/bsd/sys/stat.h b/bsd/sys/stat.h index ca2a54e5b..ec5858ebd 100644 --- a/bsd/sys/stat.h +++ b/bsd/sys/stat.h @@ -540,6 +540,7 @@ extern void munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp); #endif #endif +#if __DARWIN_C_LEVEL >= __DARWIN_C_FULL /* * Extended flags ("EF") returned by ATTR_CMNEXT_EXT_FLAGS from getattrlist/getattrlistbulk */ @@ -548,7 +549,8 @@ extern void munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp); #define EF_IS_SYNC_ROOT 0x00000004 /* file is a sync root for iCloud */ #define EF_IS_PURGEABLE 0x00000008 /* file is purgeable */ #define EF_IS_SPARSE 0x00000010 /* file has at least one sparse region */ - +#define EF_IS_SYNTHETIC 0x00000020 /* a synthetic directory/symlink */ +#endif #ifndef KERNEL diff --git a/bsd/sys/sys__types.modulemap b/bsd/sys/sys__types.modulemap new file mode 100644 index 000000000..5263a2ef5 --- /dev/null +++ b/bsd/sys/sys__types.modulemap @@ -0,0 +1,54 @@ +// Module map for sys/_types.h and sys/_types/. +// +// See also: stdint.modulemap +// See also: sys_types.modulemap + +// Module for sys/_types/ and sys/_types.h. +module Darwin_C_stdint._sys_types { + export * + + module _types { + export * + header "sys/_types.h" + } + module _int16_t { + export * + header "sys/_types/_int16_t.h" + } + module _int32_t { + export * + header "sys/_types/_int32_t.h" + } + module _int64_t { + export * + header "sys/_types/_int64_t.h" + } + module _int8_t { + export * + header "sys/_types/_int8_t.h" + } + module _intptr_t { + export * + header "sys/_types/_intptr_t.h" + } + module _u_int16_t { + export * + header "sys/_types/_u_int16_t.h" + } + module _u_int32_t { + export * + header "sys/_types/_u_int32_t.h" + } + module _u_int64_t { + export * + header "sys/_types/_u_int64_t.h" + } + module _u_int8_t { + export * + header "sys/_types/_u_int8_t.h" + } + module _uintptr_t { + export * + header "sys/_types/_uintptr_t.h" + } +} diff --git a/bsd/sys/sys_cdefs.modulemap b/bsd/sys/sys_cdefs.modulemap new file mode 100644 index 000000000..5347077d3 --- /dev/null +++ b/bsd/sys/sys_cdefs.modulemap @@ -0,0 +1,10 @@ +// Module map for sys/cdefs.h. +// +// See also: stdint.modulemap + +module Darwin_C_stdint._sys_cdefs { + export * + header "sys/cdefs.h" + textual header "sys/_symbol_aliasing.h" + textual header "sys/_posix_availability.h" +} diff --git a/bsd/sys/sys_domain.h b/bsd/sys/sys_domain.h index d14a5abcb..a0bda7358 100644 --- a/bsd/sys/sys_domain.h +++ b/bsd/sys/sys_domain.h @@ -56,7 +56,7 @@ struct sockaddr_sys { #ifdef PRIVATE struct xsystmgen { u_int32_t xg_len; /* length of this structure */ - u_int32_t xg_count; /* number of PCBs at this time */ + u_int64_t xg_count; /* number of PCBs at this time */ u_int64_t xg_gen; /* generation count at this time */ u_int64_t xg_sogen; /* current socket generation count */ }; diff --git a/bsd/sys/sys_types.modulemap b/bsd/sys/sys_types.modulemap new file mode 100644 index 000000000..1e85703b1 --- /dev/null +++ b/bsd/sys/sys_types.modulemap @@ -0,0 +1,25 @@ +// Module map for sys/types.h and some of sys/_types/. +// +// See also: module.modulemap +// See also: sys__types.modulemap (part of Darwin_C_stdint). + +module Darwin.POSIX.sys.types { + export * + + header "sys/types.h" + umbrella "sys/_types" + + // See sys__types.modulemap. + exclude header "sys/_types/_int16_t.h" + exclude header "sys/_types/_int32_t.h" + exclude header "sys/_types/_int64_t.h" + exclude header "sys/_types/_int8_t.h" + exclude header "sys/_types/_intptr_t.h" + exclude header "sys/_types/_u_int16_t.h" + exclude header "sys/_types/_u_int32_t.h" + exclude header "sys/_types/_u_int64_t.h" + exclude header "sys/_types/_u_int8_t.h" + exclude header "sys/_types/_uintptr_t.h" + + module * { export * } +} diff --git a/bsd/sys/sysctl.h b/bsd/sys/sysctl.h index f37e9a07d..74a04166b 100644 --- a/bsd/sys/sysctl.h +++ b/bsd/sys/sysctl.h @@ -85,6 +85,7 @@ #else #ifndef XNU_KERNEL_PRIVATE #include +#include #endif #endif @@ -227,6 +228,9 @@ SLIST_HEAD(sysctl_oid_list, sysctl_oid); * changing the KPI used for non-static (un)registration in * KEXTs. * + * Non CTLFLAG_OID2 based sysctls are deprecated and unavailable + * to non Intel platforms. + * * This depends on the fact that people declare SYSCTLs, * rather than declaring sysctl_oid structures. All new code * should avoid declaring struct sysctl_oid's directly without @@ -243,7 +247,7 @@ SLIST_HEAD(sysctl_oid_list, sysctl_oid); * get for your sysctl. */ struct sysctl_oid { - struct sysctl_oid_list *oid_parent; + struct sysctl_oid_list * OS_PTRAUTH_SIGNED_PTR("sysctl_oid.oid_parent") oid_parent; SLIST_ENTRY(sysctl_oid) oid_link; int oid_number; int oid_kind; @@ -283,6 +287,9 @@ void sysctl_register_oid(struct sysctl_oid *oidp); void sysctl_unregister_oid(struct sysctl_oid *oidp); void sysctl_load_devicetree_entries(void); +#define nvram_osenvironment "osenvironment" +void sysctl_set_osenvironment(unsigned int size, const void* value); +void sysctl_unblock_osenvironment(void); /* Deprecated */ void sysctl_register_fixed(void) __deprecated; @@ -787,22 +794,22 @@ struct kinfo_proc { */ struct user32_pcred { - char pc_lock[72]; /* opaque content */ - user32_addr_t pc_ucred; /* Current credentials. */ - uid_t p_ruid; /* Real user id. */ - uid_t p_svuid; /* Saved effective user id. */ - gid_t p_rgid; /* Real group id. */ - gid_t p_svgid; /* Saved effective group id. */ - int p_refcnt; /* Number of references. */ + char pc_lock[72]; /* opaque content */ + user32_addr_t pc_ucred; /* Current credentials. */ + uid_t p_ruid; /* Real user id. */ + uid_t p_svuid; /* Saved effective user id. */ + gid_t p_rgid; /* Real group id. */ + gid_t p_svgid; /* Saved effective group id. */ + int p_refcnt; /* Number of references. */ }; struct user64_pcred { - char pc_lock[72]; /* opaque content */ - user64_addr_t pc_ucred; /* Current credentials. */ - uid_t p_ruid; /* Real user id. */ - uid_t p_svuid; /* Saved effective user id. */ - gid_t p_rgid; /* Real group id. */ - gid_t p_svgid; /* Saved effective group id. */ - int p_refcnt; /* Number of references. */ + char pc_lock[72]; /* opaque content */ + user64_addr_t pc_ucred; /* Current credentials. */ + uid_t p_ruid; /* Real user id. */ + uid_t p_svuid; /* Saved effective user id. */ + gid_t p_rgid; /* Real group id. */ + gid_t p_svgid; /* Saved effective group id. */ + int p_refcnt; /* Number of references. */ }; /* LP64 version of kinfo_proc. all pointers @@ -819,7 +826,7 @@ struct user32_kinfo_proc { struct user32_vmspace e_vm; /* address space */ pid_t e_ppid; /* parent process id */ pid_t e_pgid; /* process group id */ - short e_jobc; /* job control counter */ + int e_jobc; /* job control counter */ dev_t e_tdev; /* controlling tty dev */ pid_t e_tpgid; /* tty process group id */ user32_addr_t e_tsess; /* tty session pointer */ @@ -843,7 +850,7 @@ struct user64_kinfo_proc { struct user_vmspace e_vm; /* address space */ pid_t e_ppid; /* parent process id */ pid_t e_pgid; /* process group id */ - short e_jobc; /* job control counter */ + int e_jobc; /* job control counter */ dev_t e_tdev; /* controlling tty dev */ pid_t e_tpgid; /* tty process group id */ user64_addr_t e_tsess __attribute((aligned(8))); /* tty session pointer */ @@ -933,8 +940,8 @@ struct user64_loadavg { /* * CTL_HW identifiers */ -#define HW_MACHINE 1 /* string: machine class */ -#define HW_MODEL 2 /* string: specific machine model */ +#define HW_MACHINE 1 /* string: machine class (deprecated: use HW_PRODUCT) */ +#define HW_MODEL 2 /* string: specific machine model (deprecated: use HW_TARGET) */ #define HW_NCPU 3 /* int: number of cpus */ #define HW_BYTEORDER 4 /* int: machine byte order */ #define HW_PHYSMEM 5 /* int: total memory */ @@ -958,12 +965,14 @@ struct user64_loadavg { #define HW_TB_FREQ 23 /* int: Bus Frequency */ #define HW_MEMSIZE 24 /* uint64_t: physical ram size */ #define HW_AVAILCPU 25 /* int: number of available CPUs */ -#define HW_MAXID 26 /* number of valid hw ids */ +#define HW_TARGET 26 /* string: model identifier */ +#define HW_PRODUCT 27 /* string: product identifier */ +#define HW_MAXID 28 /* number of valid hw ids */ #define CTL_HW_NAMES { \ { 0, 0 }, \ - { "machine", CTLTYPE_STRING }, \ - { "model", CTLTYPE_STRING }, \ + { "machine", CTLTYPE_STRING }, /* Deprecated: use hw.product */ \ + { "model", CTLTYPE_STRING }, /* Deprecated: use hw.target */ \ { "ncpu", CTLTYPE_INT }, \ { "byteorder", CTLTYPE_INT }, \ { "physmem", CTLTYPE_INT }, \ @@ -986,7 +995,9 @@ struct user64_loadavg { { "l3cachesize", CTLTYPE_INT }, \ { "tbfrequency", CTLTYPE_INT }, \ { "memsize", CTLTYPE_QUAD }, \ - { "availcpu", CTLTYPE_INT } \ + { "availcpu", CTLTYPE_INT }, \ + { "target", CTLTYPE_STRING }, \ + { "product", CTLTYPE_STRING }, \ } /* @@ -1135,7 +1146,7 @@ struct user64_loadavg { #define CTL_DEBUG_MAXID 20 -#if (CTL_MAXID != 9) || (KERN_MAXID != 72) || (VM_MAXID != 6) || (HW_MAXID != 26) || (USER_MAXID != 21) || (CTL_DEBUG_MAXID != 20) +#if (CTL_MAXID != 9) || (KERN_MAXID != 72) || (VM_MAXID != 6) || (HW_MAXID != 28) || (USER_MAXID != 21) || (CTL_DEBUG_MAXID != 20) #error Use the SYSCTL_*() macros and OID_AUTO instead! #endif diff --git a/bsd/sys/sysent.h b/bsd/sys/sysent.h index b68b3cded..3e2774b74 100644 --- a/bsd/sys/sysent.h +++ b/bsd/sys/sysent.h @@ -54,10 +54,7 @@ struct sysent { /* system call table */ */ }; -#ifndef __INIT_SYSENT_C__ -extern struct sysent sysent[]; -#endif /* __INIT_SYSENT_C__ */ - +extern const struct sysent sysent[]; extern const unsigned int nsysent; /* diff --git a/bsd/sys/syslimits.h b/bsd/sys/syslimits.h index 28424700f..c43ab4730 100644 --- a/bsd/sys/syslimits.h +++ b/bsd/sys/syslimits.h @@ -68,11 +68,27 @@ #include #if !defined(_ANSI_SOURCE) + +/* max bytes for an exec function */ +#ifdef XNU_KERNEL_PRIVATE +#if defined(XNU_TARGET_OS_OSX) +#define ARG_MAX (1024 * 1024) +#else +#define ARG_MAX (256 * 1024) +#endif +#else /* XNU_KERNEL_PRIVATE */ +#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) +#define ARG_MAX (1024 * 1024) +#else +#define ARG_MAX (256 * 1024) +#endif +#endif /* XNU_KERNEL_PRIVATE */ + /* * Note: CHILD_MAX *must* be less than hard_maxproc, which is set at * compile time; you *cannot* set it higher than the hard limit!! */ -#define ARG_MAX (256 * 1024) /* max bytes for an exec function */ + #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) #define CHILD_MAX 266 /* max simultaneous processes */ #define GID_MAX 2147483647U /* max value for a gid_t (2^31-2) */ diff --git a/bsd/sys/syslog.h b/bsd/sys/syslog.h index facf1c036..61b1c9d5e 100644 --- a/bsd/sys/syslog.h +++ b/bsd/sys/syslog.h @@ -324,7 +324,7 @@ struct reg_desc { #include __BEGIN_DECLS -void log(int, const char *, ...); +void log(int, const char *, ...) __printflike(2, 3); #ifdef XNU_KERNEL_PRIVATE int vaddlog(const char *, va_list) __printflike(1, 0); void logtime(time_t); diff --git a/bsd/sys/systm.h b/bsd/sys/systm.h index dfcd2e731..8066c95f8 100644 --- a/bsd/sys/systm.h +++ b/bsd/sys/systm.h @@ -118,13 +118,13 @@ __END_DECLS #ifdef BSD_KERNEL_PRIVATE extern char version[]; /* system version */ -extern const char copyright[]; /* system copyright */ +extern const char *const copyright; /* system copyright */ extern int boothowto; /* reboot flags, from console subsystem */ extern int show_space; extern int minimalboot; -#if CONFIG_EMBEDDED +#if CONFIG_DARKBOOT extern int darkboot; #endif @@ -190,7 +190,7 @@ void throttle_info_end_io(buf_t bp); void timeout(void (*)(void *), void *arg, int ticks); void timeout_with_leeway(void (*)(void *), void *arg, int ticks, int leeway_ticks); void untimeout(void (*)(void *), void *arg); -int bsd_hostname(char *, int, int*); +int bsd_hostname(char *, size_t, size_t*); int vslock(user_addr_t addr, user_size_t len); int vsunlock(user_addr_t addr, user_size_t len, int dirtied); #endif /* KERNEL_PRIVATE */ @@ -202,6 +202,7 @@ int enosys(void); int enxio(void); int eopnotsupp(void); void *hashinit(int count, int type, u_long *hashmask); +void hashdestroy(void *, int type, u_long hashmask); void ovbcopy(const void *from, void *to, size_t len); int fubyte(user_addr_t addr); int fuibyte(user_addr_t addr); diff --git a/bsd/sys/time.h b/bsd/sys/time.h index 7b55c8234..c94a0d04e 100644 --- a/bsd/sys/time.h +++ b/bsd/sys/time.h @@ -136,6 +136,12 @@ struct itimerval { (ts)->tv_sec = (tv)->tv_sec; \ (ts)->tv_nsec = (tv)->tv_usec * 1000; \ } +#ifdef KERNEL +#define TIMEVAL64_TO_TIMESPEC(tv, ts){ \ + (ts)->tv_sec = (time_t)((tv)->tv_sec); \ + (ts)->tv_nsec = (tv)->tv_usec * 1000; \ +} +#endif #define TIMESPEC_TO_TIMEVAL(tv, ts) { \ (tv)->tv_sec = (ts)->tv_sec; \ (tv)->tv_usec = (ts)->tv_nsec / 1000; \ diff --git a/bsd/sys/tty.h b/bsd/sys/tty.h index 5c3609ccb..b849f3aeb 100644 --- a/bsd/sys/tty.h +++ b/bsd/sys/tty.h @@ -221,7 +221,7 @@ struct clist; #endif #define TS_IOCTL_NOT_OK 0x1000000 /* Workaround */ -#define TS_PGRPHUP 0x2000000 /* Don't change Foregroud process group */ +#define TS_REVOKE 0x2000000 /* Terminal getting revoked */ /* Character type information. */ @@ -234,8 +234,8 @@ struct clist; #define RETURN 6 struct speedtab { - int sp_speed; /* Speed. */ - int sp_code; /* Code. */ + int sp_speed; + int sp_code; }; /* Modem control commands (driver). */ @@ -327,8 +327,6 @@ int ttysleep(struct tty *tp, int ttywait(struct tty *tp); struct tty *ttymalloc(void); void ttyfree(struct tty *); -void ttysetpgrphup(struct tty *tp); -void ttyclrpgrphup(struct tty *tp); #ifdef XNU_KERNEL_PRIVATE extern void ttyhold(struct tty *tp); diff --git a/bsd/sys/ttycom.h b/bsd/sys/ttycom.h index 3fdc94b85..4aadeade5 100644 --- a/bsd/sys/ttycom.h +++ b/bsd/sys/ttycom.h @@ -182,6 +182,7 @@ struct winsize { #define TIOCPTYUNLK _IO('t', 82) /* unlockpt(3) */ #ifdef KERNEL #define TIOCREVOKE _IO('t', 81) +#define TIOCREVOKECLEAR _IO('t', 80) #endif #define TTYDISC 0 /* termios tty line discipline */ diff --git a/bsd/sys/ubc.h b/bsd/sys/ubc.h index e0a5cca0f..e05fdd8d0 100644 --- a/bsd/sys/ubc.h +++ b/bsd/sys/ubc.h @@ -84,11 +84,12 @@ int ubc_range_op(vnode_t, off_t, off_t, int, int *); int ubc_setcred(struct vnode *, struct proc *) __deprecated; /* code signing */ struct cs_blob; -struct cs_blob *ubc_cs_blob_get(vnode_t, cpu_type_t, off_t); +struct cs_blob *ubc_cs_blob_get(vnode_t, cpu_type_t, cpu_subtype_t, off_t); +struct cs_blob *ubc_cs_blob_get_supplement(vnode_t, off_t); /* apis to handle generation count for cs blob */ void cs_blob_reset_cache(void); -int ubc_cs_blob_revalidate(vnode_t, struct cs_blob *, struct image_params *, int); +int ubc_cs_blob_revalidate(vnode_t, struct cs_blob *, struct image_params *, int, uint32_t); int ubc_cs_generation_check(vnode_t); int cs_entitlements_blob_get(proc_t, void **, size_t *); diff --git a/bsd/sys/ubc_internal.h b/bsd/sys/ubc_internal.h index 030feca59..50f97527a 100644 --- a/bsd/sys/ubc_internal.h +++ b/bsd/sys/ubc_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2008 Apple Inc. All rights reserved. + * Copyright (c) 1999-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -51,6 +51,7 @@ #include #include +#include #define UBC_INFO_NULL ((struct ubc_info *) 0) @@ -99,8 +100,9 @@ struct cs_hash; uint8_t cs_hash_type(struct cs_hash const *); struct cs_blob { - struct cs_blob *csb_next; + struct cs_blob * XNU_PTRAUTH_SIGNED_PTR("cs_blob.csb_next") csb_next; cpu_type_t csb_cpu_type; + cpu_subtype_t csb_cpu_subtype; unsigned int csb_flags; off_t csb_base_offset; /* Offset of Mach-O binary in fat binary */ off_t csb_start_offset; /* Blob coverage area start, from csb_base_offset */ @@ -109,17 +111,22 @@ struct cs_blob { vm_offset_t csb_mem_offset; vm_address_t csb_mem_kaddr; unsigned char csb_cdhash[CS_CDHASH_LEN]; + ptrauth_generic_signature_t csb_cdhash_signature; const struct cs_hash *csb_hashtype; - vm_size_t csb_hash_pagesize; /* each hash entry represent this many bytes in the file */ - vm_size_t csb_hash_pagemask; - vm_size_t csb_hash_pageshift; - vm_size_t csb_hash_firstlevel_pagesize; /* First hash this many bytes, then hash the hashes together */ - const CS_CodeDirectory *csb_cd; - const char *csb_teamid; - const CS_GenericBlob *csb_entitlements_blob; /* raw blob, subrange of csb_mem_kaddr */ - void * csb_entitlements; /* The entitlements as an OSDictionary */ +#if CONFIG_SUPPLEMENTAL_SIGNATURES + unsigned char csb_linkage[CS_CDHASH_LEN]; + const struct cs_hash *csb_linkage_hashtype; +#endif + int csb_hash_pageshift; + int csb_hash_firstlevel_pageshift; /* First hash this many bytes, then hash the hashes together */ + const CS_CodeDirectory * XNU_PTRAUTH_SIGNED_PTR("cs_blob.csb_cd") csb_cd; + const char * XNU_PTRAUTH_SIGNED_PTR("cs_blob.csb_teamid") csb_teamid; +#if CONFIG_SUPPLEMENTAL_SIGNATURES + char * XNU_PTRAUTH_SIGNED_PTR("cs_blob.csb_supplement_teamid") csb_supplement_teamid; +#endif + const CS_GenericBlob * XNU_PTRAUTH_SIGNED_PTR("cs_blob.csb_entitlements_blob") csb_entitlements_blob; /* raw blob, subrange of csb_mem_kaddr */ + void * XNU_PTRAUTH_SIGNED_PTR("cs_blob.csb_entitlements") csb_entitlements; /* The entitlements as an OSDictionary */ unsigned int csb_signer_type; - unsigned int csb_reconstituted; /* signature has potentially been modified after validation */ /* The following two will be replaced by the csb_signer_type. */ unsigned int csb_platform_binary:1; @@ -134,7 +141,7 @@ struct cs_blob { struct ubc_info { memory_object_t ui_pager; /* pager */ memory_object_control_t ui_control; /* VM control for the pager */ - vnode_t ui_vnode; /* vnode for this ubc_info */ + vnode_t XNU_PTRAUTH_SIGNED_PTR("ubc_info.ui_vnode") ui_vnode; /* vnode for this ubc_info */ kauth_cred_t ui_ucred; /* holds credentials for NFS paging */ off_t ui_size; /* file size for the vnode */ uint32_t ui_flags; /* flags */ @@ -145,9 +152,12 @@ struct ubc_info { struct timespec cs_mtime; /* modify time of file when * first cs_blob was loaded */ - struct cs_blob *cs_blobs; /* for CODE SIGNING */ + struct cs_blob * XNU_PTRAUTH_SIGNED_PTR("ubc_info.cs_blobs") cs_blobs; /* for CODE SIGNING */ +#if CONFIG_SUPPLEMENTAL_SIGNATURES + struct cs_blob * XNU_PTRAUTH_SIGNED_PTR("ubc_info.cs_blob_supplement") cs_blob_supplement;/* supplemental blob (note that there can only be one supplement) */ +#endif #if CHECK_CS_VALIDATION_BITMAP - void *cs_valid_bitmap; /* right now: used only for signed files on the read-only root volume */ + void * XNU_PTRAUTH_SIGNED_PTR("ubc_info.cs_valid_bitmap") cs_valid_bitmap; /* right now: used only for signed files on the read-only root volume */ uint64_t cs_valid_bitmap_size; /* Save original bitmap size in case the file size changes. * In the future, we may want to reconsider changing the * underlying bitmap to reflect the new file size changes. @@ -171,7 +181,7 @@ struct ubc_info { */ __BEGIN_DECLS -__private_extern__ void ubc_init(void); + __private_extern__ int ubc_umount(mount_t mp); __private_extern__ void ubc_unmountall(void); __private_extern__ memory_object_t ubc_getpager(vnode_t); @@ -204,9 +214,14 @@ int UBCINFOEXISTS(const struct vnode *); /* code signing */ struct cs_blob; -int ubc_cs_blob_add(vnode_t, cpu_type_t, off_t, vm_address_t *, vm_size_t, struct image_params *, int, struct cs_blob **); -int ubc_cs_sigpup_add(vnode_t, vm_address_t, vm_size_t); +int ubc_cs_blob_add(vnode_t, uint32_t, cpu_type_t, cpu_subtype_t, off_t, vm_address_t *, vm_size_t, struct image_params *, int, struct cs_blob **); +#if CONFIG_SUPPLEMENTAL_SIGNATURES +int ubc_cs_blob_add_supplement(vnode_t, vnode_t, off_t, vm_address_t *, vm_size_t, struct cs_blob **); +#endif struct cs_blob *ubc_get_cs_blobs(vnode_t); +#if CONFIG_SUPPLEMENTAL_SIGNATURES +struct cs_blob *ubc_get_cs_supplement(vnode_t); +#endif void ubc_get_cs_mtime(vnode_t, struct timespec *); int ubc_cs_getcdhash(vnode_t, off_t, unsigned char *); kern_return_t ubc_cs_blob_allocate(vm_offset_t *, vm_size_t *); diff --git a/bsd/sys/ucontext.h b/bsd/sys/ucontext.h index 06f867686..b8b9644ed 100644 --- a/bsd/sys/ucontext.h +++ b/bsd/sys/ucontext.h @@ -33,7 +33,6 @@ #include #include -#include #include #include diff --git a/bsd/sys/ucred.h b/bsd/sys/ucred.h index b013af853..08be2fc3a 100644 --- a/bsd/sys/ucred.h +++ b/bsd/sys/ucred.h @@ -80,6 +80,7 @@ struct label; #ifdef __APPLE_API_UNSTABLE #ifdef KERNEL #include +#include /* * In-kernel credential structure. @@ -105,14 +106,15 @@ struct ucred { uid_t cr_uid; /* effective user id */ uid_t cr_ruid; /* real user id */ uid_t cr_svuid; /* saved user id */ - short cr_ngroups; /* number of groups in advisory list */ + u_short cr_ngroups; /* number of groups in advisory list */ gid_t cr_groups[NGROUPS];/* advisory group list */ gid_t cr_rgid; /* real group id */ gid_t cr_svgid; /* saved group id */ uid_t cr_gmuid; /* UID for group membership purposes */ int cr_flags; /* flags on credential */ } cr_posix; - struct label *cr_label; /* MAC label */ + struct label * OS_PTRAUTH_SIGNED_PTR("ucred.cr_label") cr_label; /* MAC label */ + /* * NOTE: If anything else (besides the flags) * added after the label, you must change diff --git a/bsd/sys/uio.h b/bsd/sys/uio.h index 1c848d670..94f73e832 100644 --- a/bsd/sys/uio.h +++ b/bsd/sys/uio.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2008 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -64,8 +64,10 @@ #ifndef _SYS_UIO_H_ #define _SYS_UIO_H_ +#include #include #include +#include /* * [XSI] The ssize_t and size_t types shall be defined as described @@ -76,7 +78,7 @@ /* * [XSI] Structure whose address is passed as the second parameter to the - * readv() and writev() functions. + * readv(), preadv(), writev() and pwritev() functions. */ #include @@ -255,7 +257,16 @@ __END_DECLS __BEGIN_DECLS ssize_t readv(int, const struct iovec *, int) __DARWIN_ALIAS_C(readv); ssize_t writev(int, const struct iovec *, int) __DARWIN_ALIAS_C(writev); + +#if (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)) || defined(_DARWIN_C_SOURCE) + +ssize_t preadv(int, const struct iovec *, int, off_t) __DARWIN_NOCANCEL(preadv) __API_AVAILABLE(macos(10.16), ios(14.0), watchos(7.0), tvos(14.0)); +ssize_t pwritev(int, const struct iovec *, int, off_t) __DARWIN_NOCANCEL(pwritev) __API_AVAILABLE(macos(10.16), ios(14.0), watchos(7.0), tvos(14.0)); + +#endif /* #if (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)) || defined(_DARWIN_C_SOURCE) */ + __END_DECLS + #endif /* !KERNEL */ #endif /* !_SYS_UIO_H_ */ diff --git a/bsd/sys/ulock.h b/bsd/sys/ulock.h index b86d10eef..71aa72654 100644 --- a/bsd/sys/ulock.h +++ b/bsd/sys/ulock.h @@ -63,6 +63,8 @@ ulock_owner_value_to_port_name(uint32_t uval) extern int __ulock_wait(uint32_t operation, void *addr, uint64_t value, uint32_t timeout); /* timeout is specified in microseconds */ +extern int __ulock_wait2(uint32_t operation, void *addr, uint64_t value, + uint64_t timeout, uint64_t value2); extern int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value); #endif /* !KERNEL */ @@ -92,6 +94,7 @@ extern int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value); */ #define ULF_WAKE_ALL 0x00000100 #define ULF_WAKE_THREAD 0x00000200 +#define ULF_WAKE_ALLOW_NON_OWNER 0x00000400 /* * operation bits [23, 16] contain the flags for __ulock_wait @@ -130,7 +133,8 @@ extern int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value); #define ULF_WAKE_MASK (ULF_NO_ERRNO | \ ULF_WAKE_ALL | \ - ULF_WAKE_THREAD) + ULF_WAKE_THREAD | \ + ULF_WAKE_ALLOW_NON_OWNER) #endif /* PRIVATE */ diff --git a/bsd/sys/un.h b/bsd/sys/un.h index 6dd075754..f367c4e97 100644 --- a/bsd/sys/un.h +++ b/bsd/sys/un.h @@ -90,6 +90,7 @@ struct sockaddr_un { #define LOCAL_PEEREPID 0x003 /* retrieve eff. peer pid */ #define LOCAL_PEERUUID 0x004 /* retrieve peer UUID */ #define LOCAL_PEEREUUID 0x005 /* retrieve eff. peer UUID */ +#define LOCAL_PEERTOKEN 0x006 /* retrieve peer audit token */ #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ diff --git a/bsd/sys/unicode.h b/bsd/sys/unicode.h new file mode 100644 index 000000000..0aed692a8 --- /dev/null +++ b/bsd/sys/unicode.h @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef unicode_h +#define unicode_h + +#ifdef KERNEL_PRIVATE + +#include +#include + +/* + * WARNING - callers that use the following Unicode normalization interface for on-disk + * structures should be aware that the implementation will be periodically updated for + * the latest Unicode standard version. + */ + +enum { + /* Maximum size of UTF32 reordering buffer for stream-safe format */ + kNCFStreamSafeBufMax = 32 +}; + +/* + * utf8_normalizeOptCaseFoldAndHash + * + * Convert a given UTF-8 string to UTF-32 in one of the following normalized forms, + * as specified by the case_sens parameter, and feed the result incrementally to + * the provided hash function callback: + * - "canonical caseless form" (case-folded NFD, as described by definition D145 + * in chapter 3 of The Unicode Standard); for case-insensitive behavior. + * - standard NFD; for case-sensitive behavior (if case_sens = true). + * + * The input string should be valid UTF-8 that meets the criteria for stream safe + * text as described in http://unicode.org/reports/tr15/#Stream_Safe_Text_Format. + * It should not contain ASCII 0x00 or '/'. + * + * str: The input UTF-8 string (need not be 0 terminated) + * str_len: The byte length of the input string (excluding any 0 terminator) + * case_sens: False for case-insensitive behavior; generates canonical caseless form. + * True for case-sensitive behavior; generates standard NFD. + * hash_func: A pointer to a hashing function to compute the hash of the + * normalized/case-folded result. buf contains buf_len bytes + * of data to be added to the hash using the caller-supplied + * context (ctx). + * hash_ctx: The context for the hash function. + * + * Returns: 0 on success, or + * EILSEQ: The input string contains illegal ASCII-range characters + * (0x00 or '/'), or is not well-formed stream-safe UTF-8, or + * contains codepoints that are non-characters or unassigned in + * the version of Unicode currently supported. + */ +int utf8_normalizeOptCaseFoldAndHash(const char *str, + size_t str_len, + bool case_sens, + void (*hash_func)(void *buf, size_t buf_len, void *ctx), + void *hash_ctx); + +/* + * utf8_normalizeOptCaseFoldAndCompare + * + * Determine whether two UTF-8 strings are equal after converting each to one of the + * following normalized forms, as specified by the case_sens parameter: + * - "canonical caseless form" (case-folded NFD); for case-insensitive comparison. + * - standard NFD; for case-sensitive comparison (if case_sens = true). + * On success, sets are_equal to true if the strings are equal, or false if they are not. + * + * The input strings should be valid UTF-8 that meet the criteria for stream safe + * text as described in http://unicode.org/reports/tr15/#Stream_Safe_Text_Format. + * They should not contain ASCII 0x00 or '/'. + * + * strA: A UTF-8 string to be compared (need not be 0 terminated) + * strA_len: The byte length of strA (excluding any 0 terminator) + * strB: The second UTF-8 string to be compared (need not be 0 terminated) + * strB_len: The byte length of strB (excluding any 0 terminator) + * case_sens: False for case-insensitive behavior; compares canonical caseless forms. + * True for case-sensitive behavior; compares standard NFD forms. + * are_equal: On success, set to true if the strings are equal, or set to false + * if they are not. + * + * Returns: 0 on success, or + * EILSEQ: One or both of the input strings contains illegal ASCII-range + * characters (0x00 or '/'), or is not well-formed stream-safe UTF-8, + * or contains codepoints that are non-characters or unassigned in + * the version of Unicode currently supported. + * Note: The comparison may terminate early when a difference is + * detected, and may return 0 and set *are_equal=false even + * if one or both strings are invalid. + */ +int utf8_normalizeOptCaseFoldAndCompare(const char *strA, + size_t strA_len, + const char *strB, + size_t strB_len, + bool case_sens, + bool *are_equal); + +/* + * utf8_normalizeOptCaseFold + * + * Convert a given UTF-8 string to UTF-32 in one of the following normalized forms, + * as specified by the case_sens parameter, and copy the result to the ustr + * buffer: + * - "canonical caseless form" (case-folded NFD, as described by definition D145 + * in chapter 3 of The Unicode Standard); for case-insensitive behavior. + * - standard NFD; for case-sensitive behavior (if case_sens = true). + * + * The input string should be valid UTF-8 that meets the criteria for stream safe + * text as described in http://unicode.org/reports/tr15/#Stream_Safe_Text_Format. + * It should not contain ASCII 0x00 or '/'. + * + * str: The input UTF-8 string (need not be 0 terminated) + * str_len: The byte length of the input string (excluding any 0 terminator) + * case_sens: False for case-insensitive behavior; generates canonical caseless form. + * True for case-sensitive behavior; generates standard NFD. + * ustr: A pointer to a buffer for the resulting UTF-32 string. + * ustr_size: The capacity of ustr, in UTF-32 units. + * ustr_len: Pointer to a value that will be filled in with the actual length + * in UTF-32 units of the string copied to ustr. + * + * Returns: 0 on success, or + * EILSEQ: The input string contains illegal ASCII-range characters + * (0x00 or '/'), or is not well-formed stream-safe UTF-8, or + * contains codepoints that are non-characters or unassigned in + * the version of Unicode currently supported. + * ENOMEM: ustr_size is insufficient for the resulting string. In this + * case the value returned in *ustr_len is invalid. + */ +int utf8_normalizeOptCaseFold(const char *str, + size_t str_len, + bool case_sens, + int32_t *ustr, + int32_t ustr_size, + int32_t *ustr_len); + +/* + * utf8_normalizeOptCaseFoldToUTF8 + * + * Convert a given UTF-8 string to UTF-8 in one of the following normalized forms, + * as specified by the case_sens parameter, and copy the result to the ustr + * buffer: + * - "canonical caseless form" (case-folded NFD, as described by definition D145 + * in chapter 3 of The Unicode Standard); for case-insensitive behavior. + * - standard NFD; for case-sensitive behavior (if case_sens = true). + * + * The input string should be valid UTF-8 that meets the criteria for stream safe + * text as described in http://unicode.org/reports/tr15/#Stream_Safe_Text_Format. + * It should not contain ASCII 0x00 or '/'. + * + * str: The input UTF-8 string (need not be 0 terminated) + * str_len: The byte length of the input string (excluding any 0 terminator) + * case_sens: False for case-insensitive behavior; generates canonical caseless form. + * True for case-sensitive behavior; generates standard NFD. + * ustr: A pointer to a buffer for the resulting UTF-8 string. + * ustr_size: The capacity of ustr, in bytes. + * ustr_len: Pointer to a value that will be filled in with the actual length + * in bytes of the string copied to ustr. + * + * Returns: 0 on success, or + * EILSEQ: The input string contains illegal ASCII-range characters + * (0x00 or '/'), or is not well-formed stream-safe UTF-8, or + * contains codepoints that are non-characters or unassigned in + * the version of Unicode currently supported. + * ENOMEM: ustr_size is insufficient for the resulting string. In this + * case the value returned in *ustr_len is invalid. + */ +int utf8_normalizeOptCaseFoldToUTF8(const char *str, + size_t str_len, + bool case_sens, + char *ustr, + size_t ustr_size, + size_t *ustr_len); + +/* + * utf8_normalizeOptCaseFoldAndMatchSubstring + * + * Determine whether the normalized UTF32 string derived from a specified UTF-8 string + * strA contains another UTF32 string ustrB which has already been normalized, typically + * with normalizeOptCaseFold. The normalization for both strings is one of the following, + * as specified by the case_sens parameter: + * - "canonical caseless form" (case-folded NFD); for case-insensitive comparison. + * - standard NFD; for case-sensitive comparison (if case_sens = true). + * On success, sets are_equal to true if strA contains ustrB, or false otherwise. + * + * The input string strA should be valid UTF-8 that meets the criteria for stream safe + * text as described in http://unicode.org/reports/tr15/#Stream_Safe_Text_Format. + * It should not contain ASCII 0x00 or '/'. + * + * strA: A UTF-8 string (need not be 0 terminated) in which to search for the + * substring specified by ustrB. + * strA_len: The byte length of strA (excluding any 0 terminator) + * ustrB: A normalized UTF-32 substring (need not be 0 terminated) to be searched + * for in the UTF-32 string resulting from converting strA to the normalized + * UTF-32 form specified by the case_sens parameter; ustrB must already be + * in that form. Normally this will be produced using normalizeOptCaseFold. + * ustrB_len: The length of ustrB in UTF-32 units (excluding any 0 terminator). + * case_sens: False for case-insensitive matching; compares canonical caseless forms. + * True for case-sensitive matching; compares standard NFD forms. + * buf: Pointer to caller-supplied working memory for storing the portion of + * strA which has been converted to normalized UTF-32. + * buf_size: The size of buf. + * has_match: On success, set to true if strA (when converter to UTF-32 and normalized + * per case_sens) contains ustrB, set to false otherwise. + * + * Returns: 0 on success, or + * EILSEQ: strA contains illegal ASCII-range characters (0x00 or '/'), or is + * not well-formed stream-safe UTF-8, or contains codepoints that are + * non-characters or unassigned in the version of Unicode currently + * supported. + * Note: The search may terminate early when a match is detected, and + * may return 0 and set *has_match=true even if strA is invalid. + * ENOMEM: buf_size is insufficient. + */ +int utf8_normalizeOptCaseFoldAndMatchSubstring(const char *strA, + size_t strA_len, + const int32_t *ustrB, + int32_t ustrB_len, + bool case_sens, + void *buf, + size_t buf_size, + bool *has_match); + +/* + * utf8_normalizeOptCaseFoldGetUVersion + * + * Get the Unicode and code version currently associated with the normalizeOptCaseFold + * functions. The caller allocates the version array and passes it to the function, + * which will fill out the array as follows: + * version[0] = Unicode major version; for Unicode 6.3.0 this would be 6 + * version[1] = Unicode minor version; for Unicode 6.3.0 this would be 3 + * version[2] = Unicode patch version; for Unicode 6.3.0 this would be 0 + * version[3] = Code revision level; for any given Unicode version, this value starts + * at 0 and is incremented for each significant revision to the + * normalizeOptCaseFold functions. + */ +void utf8_normalizeOptCaseFoldGetUVersion(unsigned char version[4]); + +#endif /* KERNEL_PRIVATE */ + +#endif /* unicode_h */ diff --git a/bsd/sys/unpcb.h b/bsd/sys/unpcb.h index b79e41f55..502aae89a 100644 --- a/bsd/sys/unpcb.h +++ b/bsd/sys/unpcb.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Apple Inc. All rights reserved. + * Copyright (c) 2008-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -204,7 +204,7 @@ struct xunpcb { u_quad_t xu_alignment_hack; }; -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX || !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) struct xunpcb64_list_entry { u_int64_t le_next; @@ -238,7 +238,7 @@ struct xunpcb64 { struct xsocket64 xu_socket; }; -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX || !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) */ #pragma pack() diff --git a/bsd/sys/user.h b/bsd/sys/user.h index 8d4001be4..53fdcdb94 100644 --- a/bsd/sys/user.h +++ b/bsd/sys/user.h @@ -178,6 +178,7 @@ struct uthread { } uus_workq_park_data; /* saved for parked workq threads */ struct _ulock_wait_data { + struct ull *ull; thread_t owner_thread; thread_t old_owner; int32_t *retval; diff --git a/bsd/sys/vnode.h b/bsd/sys/vnode.h index 3a727171c..0ae525055 100644 --- a/bsd/sys/vnode.h +++ b/bsd/sys/vnode.h @@ -110,8 +110,8 @@ enum vtagtype { VT_HFS, VT_ZFS, VT_DEVFS, VT_WEBDAV, VT_UDF, /* 21 - 25 */ VT_AFP, VT_CDDA, VT_CIFS, VT_OTHER, VT_APFS, - /* 26 */ - VT_LOCKERFS, + /* 26 - 27*/ + VT_LOCKERFS, VT_BINDFS, }; #define HAVE_VT_LOCKERFS 1 @@ -561,6 +561,7 @@ struct vnode_trigger_param { #define VNODE_ATTR_va_private_size (1LL<<43) /* 80000000000 */ #define VNODE_ATTR_va_clone_id (1LL<<44) /* 100000000000 */ #define VNODE_ATTR_va_extflags (1LL<<45) /* 200000000000 */ +#define VNODE_ATTR_va_recursive_gencount (1LL<<46) /* 400000000000 */ #define VNODE_ATTR_BIT(n) (VNODE_ATTR_ ## n) @@ -612,7 +613,8 @@ struct vnode_trigger_param { VNODE_ATTR_BIT(va_write_gencount) | \ VNODE_ATTR_BIT(va_private_size) | \ VNODE_ATTR_BIT(va_clone_id) | \ - VNODE_ATTR_BIT(va_extflags)) + VNODE_ATTR_BIT(va_extflags) | \ + VNODE_ATTR_BIT(va_recursive_gencount)) /* * Read-only attributes. @@ -644,7 +646,8 @@ struct vnode_trigger_param { VNODE_ATTR_BIT(va_write_gencount) | \ VNODE_ATTR_BIT(va_private_size) | \ VNODE_ATTR_BIT(va_clone_id) | \ - VNODE_ATTR_BIT(va_extflags)) + VNODE_ATTR_BIT(va_extflags) | \ + VNODE_ATTR_BIT(va_recursive_gencount)) /* * Attributes that can be applied to a new file object. @@ -751,6 +754,7 @@ struct vnode_attr { uint64_t va_private_size; /* If the file were deleted, how many bytes would be freed immediately */ uint64_t va_clone_id; /* If a file is cloned this is a unique id shared by all "perfect" clones */ uint64_t va_extflags; /* extended file/directory flags */ + uint64_t va_recursive_gencount; /* for dir-stats enabled directories */ /* add new fields here only */ }; @@ -2036,6 +2040,7 @@ int vnode_iterate(struct mount *mp, int flags, int (*callout)(struct vnode * #ifdef BSD_KERNEL_PRIVATE #define VNODE_ALWAYS 0x400 #define VNODE_DRAINO 0x800 +#define VNODE_PAGER 0x1000 #endif /* BSD_KERNEL_PRIVATE */ /* @@ -2285,7 +2290,6 @@ int vaccess(mode_t file_mode, uid_t uid, gid_t gid, int check_mountedon(dev_t dev, enum vtype type, int *errorp); int vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash); void vnode_reclaim(vnode_t); -vnode_t current_rootdir(void); vnode_t current_workingdir(void); void *vnode_vfsfsprivate(vnode_t); struct vfsstatfs *vnode_vfsstatfs(vnode_t); @@ -2409,6 +2413,9 @@ int is_package_name(const char *name, int len); int vfs_context_issuser(vfs_context_t); int vfs_context_iskernel(vfs_context_t); vfs_context_t vfs_context_kernel(void); /* get from 1st kernel thread */ +#ifdef XNU_KERNEL_PRIVATE +void vfs_set_context_kernel(vfs_context_t); /* set from 1st kernel thread */ +#endif /* XNU_KERNEL_PRIVATE */ vnode_t vfs_context_cwd(vfs_context_t); vnode_t vfs_context_get_cwd(vfs_context_t); /* get cwd with iocount */ int vnode_isnoflush(vnode_t); diff --git a/bsd/sys/vnode_if.h b/bsd/sys/vnode_if.h index 7959ff764..c5bec079a 100644 --- a/bsd/sys/vnode_if.h +++ b/bsd/sys/vnode_if.h @@ -87,6 +87,9 @@ #include +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdocumentation" + #ifdef KERNEL extern struct vnodeop_desc vnop_default_desc; @@ -144,6 +147,9 @@ extern struct vnodeop_desc vnop_offtoblk_desc; extern struct vnodeop_desc vnop_blockmap_desc; extern struct vnodeop_desc vnop_strategy_desc; extern struct vnodeop_desc vnop_bwrite_desc; +#ifdef KERNEL_PRIVATE +extern struct vnodeop_desc vnop_verify_desc; +#endif #ifdef __APPLE_API_UNSTABLE @@ -461,11 +467,12 @@ struct vnop_read_args { * @param vp The vnode to read from. * @param uio Description of request, including file offset, amount of data requested, destination address for data, * and whether that destination is in kernel or user space. + * @param ioflag IO flags as defined in vnode.h, e.g. IO_SYNC, IO_NODELOCKED * @param ctx Context against which to authenticate read request. * @return 0 for success or a filesystem-specific error. VNOP_READ() can return success even if less data was * read than originally requested; returning an error value should indicate that something actually went wrong. */ -extern errno_t VNOP_READ(vnode_t vp, struct uio *uio, int, vfs_context_t ctx); +extern errno_t VNOP_READ(vnode_t vp, struct uio *uio, int ioflag, vfs_context_t ctx); struct vnop_write_args { struct vnodeop_desc *a_desc; @@ -485,6 +492,7 @@ struct vnop_write_args { * @param vp The vnode to write to. * @param uio Description of request, including file offset, amount of data to write, source address for data, * and whether that destination is in kernel or user space. + * @param ioflag IO flags as defined in vnode.h, e.g. IO_SYNC, IO_NODELOCKED * @param ctx Context against which to authenticate write request. * @return 0 for success or a filesystem-specific error. VNOP_WRITE() can return success even if less data was * written than originally requested; returning an error value should indicate that something actually went wrong. @@ -1808,10 +1816,49 @@ extern errno_t VNOP_REMOVENAMEDSTREAM(vnode_t, vnode_t, const char *, int flags, #endif // NAMEDSTREAMS +__options_decl(vnode_verify_flags_t, uint32_t, { + VNODE_VERIFY_DEFAULT = 0, +}); + +#define VNODE_VERIFY_DEFAULT VNODE_VERIFY_DEFAULT + +struct vnop_verify_args { + struct vnodeop_desc *a_desc; + vnode_t a_vp; + off_t a_foffset; + uint8_t *a_buf; + size_t a_bufsize; + size_t *a_verifyblksize; + vnode_verify_flags_t a_flags; + vfs_context_t a_context; +}; + +/*! + * @function VNOP_VERIFY + * @abstract Call down to a filesystem to verify file data for integrity. + * @discussion VNOP_VERIFY() returns whether file data being read has been verified to be what was written. + * This does not impose a specific mechanism for ensuring integrity beyond requiring that this be done in + * multiples of a verify block size (analogous to a filesystem block size but it can be per file) + * @param vp The vnode for which data is to be verified. + * @param foffset Offset (in bytes) at which region to be verified starts. + * @param buf buffer containing file data at foffset. If this is NULL, then only the verification block size is + * being requested. + * @param bufsize size of data buffer to be verified. + * @param verifyblksize pointer to size of verification block size in use for this file. If the verification block size is 0, + * no verification will be performed. The verification block size can be any value which is a power of two upto 128KiB. + * @param flags modifier flags. + * @param ctx Context to authenticate for verify request; currently often set to NULL. + * @return 0 for success, else an error code. + */ +#ifdef XNU_KERNEL_PRIVATE +extern errno_t VNOP_VERIFY(vnode_t, off_t, uint8_t *, size_t, size_t *, vnode_verify_flags_t, vfs_context_t); +#endif /* XNU_KERNEL_PRIVATE */ + #endif // defined(__APPLE_API_UNSTABLE) __END_DECLS #endif /* KERNEL */ +#pragma clang diagnostic pop /* #pragma clang diagnostic ignored "-Wdocumentation" */ #endif /* !_SYS_VNODE_IF_H_ */ diff --git a/bsd/sys/vnode_internal.h b/bsd/sys/vnode_internal.h index 0bc52b141..a25c11c3e 100644 --- a/bsd/sys/vnode_internal.h +++ b/bsd/sys/vnode_internal.h @@ -139,16 +139,16 @@ struct vnode { int32_t v_kusecount; /* count of in-kernel refs */ int32_t v_usecount; /* reference count of users */ int32_t v_iocount; /* iocounters */ - void * v_owner; /* act that owns the vnode */ + void * XNU_PTRAUTH_SIGNED_PTR("vnode.v_owner") v_owner; /* act that owns the vnode */ uint16_t v_type; /* vnode type */ uint16_t v_tag; /* type of underlying data */ uint32_t v_id; /* identity of vnode contents */ union { - struct mount *vu_mountedhere;/* ptr to mounted vfs (VDIR) */ - struct socket *vu_socket; /* unix ipc (VSOCK) */ - struct specinfo *vu_specinfo; /* device (VCHR, VBLK) */ - struct fifoinfo *vu_fifoinfo; /* fifo (VFIFO) */ - struct ubc_info *vu_ubcinfo; /* valid for (VREG) */ + struct mount * XNU_PTRAUTH_SIGNED_PTR("vnode.v_data") vu_mountedhere; /* ptr to mounted vfs (VDIR) */ + struct socket * XNU_PTRAUTH_SIGNED_PTR("vnode.vu_socket") vu_socket; /* unix ipc (VSOCK) */ + struct specinfo * XNU_PTRAUTH_SIGNED_PTR("vnode.vu_specinfo") vu_specinfo; /* device (VCHR, VBLK) */ + struct fifoinfo * XNU_PTRAUTH_SIGNED_PTR("vnode.vu_fifoinfo") vu_fifoinfo; /* fifo (VFIFO) */ + struct ubc_info * XNU_PTRAUTH_SIGNED_PTR("vnode.vu_ubcinfo") vu_ubcinfo; /* valid for (VREG) */ } v_un; struct buflists v_cleanblkhd; /* clean blocklist head */ struct buflists v_dirtyblkhd; /* dirty blocklist head */ @@ -158,7 +158,7 @@ struct vnode { * by the name_cache_lock held in * excluive mode */ - kauth_cred_t v_cred; /* last authorized credential */ + kauth_cred_t XNU_PTRAUTH_SIGNED_PTR("vnode.v_cred") v_cred; /* last authorized credential */ kauth_action_t v_authorized_actions; /* current authorized actions for v_cred */ int v_cred_timestamp; /* determine if entry is stale for MNTK_AUTH_OPAQUE */ int v_nc_generation; /* changes when nodes are removed from the name cache */ @@ -168,10 +168,10 @@ struct vnode { int32_t v_numoutput; /* num of writes in progress */ int32_t v_writecount; /* reference count of writers */ const char *v_name; /* name component of the vnode */ - vnode_t v_parent; /* pointer to parent vnode */ + vnode_t XNU_PTRAUTH_SIGNED_PTR("vnode.v_parent") v_parent; /* pointer to parent vnode */ struct lockf *v_lockf; /* advisory lock list head */ int(**v_op)(void *); /* vnode operations vector */ - mount_t v_mount; /* ptr to vfs we are in */ + mount_t XNU_PTRAUTH_SIGNED_PTR("vnode.v_mount") v_mount; /* ptr to vfs we are in */ void * v_data; /* private data for fs */ #if CONFIG_MACF struct label *v_label; /* MAC security label */ @@ -458,6 +458,8 @@ int vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action uint32_t flags, vfs_context_t ctx, void *reserved); /* End of authorization subroutines */ +void vnode_attr_handle_mnt_ignore_ownership(struct vnode_attr *vap, mount_t mp, vfs_context_t ctx); + #define VN_CREATE_NOAUTH (1<<0) #define VN_CREATE_NOINHERIT (1<<1) #define VN_CREATE_UNION (1<<2) @@ -506,11 +508,15 @@ int vnode_ref_ext(vnode_t, int, int); void vnode_rele_internal(vnode_t, int, int, int); #ifdef BSD_KERNEL_PRIVATE int vnode_getalways(vnode_t); +int vnode_getalways_from_pager(vnode_t); int vget_internal(vnode_t, int, int); errno_t vnode_getiocount(vnode_t, unsigned int, int); #endif /* BSD_KERNEL_PRIVATE */ int vnode_get_locked(vnode_t); int vnode_put_locked(vnode_t); +#ifdef BSD_KERNEL_PRIVATE +int vnode_put_from_pager(vnode_t); +#endif /* BSD_KERNEL_PRIVATE */ int vnode_issock(vnode_t); int vnode_isaliased(vnode_t); @@ -611,6 +617,10 @@ void nspace_resolver_exited(struct proc *); int vnode_materialize_dataless_file(vnode_t, uint64_t); +int vnode_isinuse_locked(vnode_t, int, int ); + #endif /* BSD_KERNEL_PRIVATE */ +extern bool rootvp_is_ssd; + #endif /* !_SYS_VNODE_INTERNAL_H_ */ diff --git a/bsd/sys/vsock.h b/bsd/sys/vsock.h new file mode 100644 index 000000000..853cbab03 --- /dev/null +++ b/bsd/sys/vsock.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _VSOCK_H_ +#define _VSOCK_H_ + +#include + +__BEGIN_DECLS + +#include +#include +#include + +#define VMADDR_CID_ANY (-1U) +#define VMADDR_CID_HYPERVISOR 0 +#define VMADDR_CID_RESERVED 1 +#define VMADDR_CID_HOST 2 + +#define VMADDR_PORT_ANY (-1U) + +#define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IOR('s', 209, uint32_t) + +struct sockaddr_vm { + __uint8_t svm_len; /* total length */ + sa_family_t svm_family; /* Address family: AF_VSOCK */ + __uint16_t svm_reserved1; + __uint32_t svm_port; /* Port # in host byte order */ + __uint32_t svm_cid; /* Address in host byte order */ +} __attribute__((__packed__)); + +typedef u_quad_t vsock_gen_t; + +struct xvsockpcb { + u_int32_t xv_len; /* length of this structure */ + u_int64_t xv_vsockpp; + u_int32_t xvp_local_cid; /* local address cid */ + u_int32_t xvp_local_port; /* local address port */ + u_int32_t xvp_remote_cid; /* remote address cid */ + u_int32_t xvp_remote_port; /* remote address port */ + u_int32_t xvp_rxcnt; /* bytes received */ + u_int32_t xvp_txcnt; /* bytes transmitted */ + u_int32_t xvp_peer_rxhiwat; /* peer's receive buffer */ + u_int32_t xvp_peer_rxcnt; /* bytes received by peer */ + pid_t xvp_last_pid; /* last pid */ + vsock_gen_t xvp_gencnt; /* vsock generation count */ + struct xsocket xv_socket; +}; + +struct xvsockpgen { + u_int32_t xvg_len; /* length of this structure */ + u_int64_t xvg_count; /* number of PCBs at this time */ + vsock_gen_t xvg_gen; /* generation count at this time */ + so_gen_t xvg_sogen; /* current socket generation count */ +}; + +__END_DECLS + +#endif /* _VSOCK_H_ */ diff --git a/bsd/sys/vsock_domain.h b/bsd/sys/vsock_domain.h new file mode 100644 index 000000000..48049016f --- /dev/null +++ b/bsd/sys/vsock_domain.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + + #ifndef _VSOCK_DOMAIN_H_ + #define _VSOCK_DOMAIN_H_ + #ifdef BSD_KERNEL_PRIVATE + + #include + #include + +/* VSock Protocol Control Block */ + +struct vsockpcb { + TAILQ_ENTRY(vsockpcb) all; + LIST_ENTRY(vsockpcb) bound; + struct socket *so; + struct vsock_address local_address; + struct vsock_address remote_address; + struct vsock_transport *transport; + uint32_t fwd_cnt; + uint32_t tx_cnt; + uint32_t peer_buf_alloc; + uint32_t peer_fwd_cnt; + uint32_t last_buf_alloc; + uint32_t last_fwd_cnt; + size_t waiting_send_size; + vsock_gen_t vsock_gencnt; +}; + +/* VSock Protocol Control Block Info */ + +struct vsockpcbinfo { + // PCB locking. + lck_attr_t *vsock_lock_attr; + lck_grp_t *vsock_lock_grp; + lck_grp_attr_t *vsock_lock_grp_attr; + lck_rw_t *all_lock; + lck_rw_t *bound_lock; + // PCB lists. + TAILQ_HEAD(, vsockpcb) all; + LIST_HEAD(, vsockpcb) bound; + // Port generation. + uint32_t last_port; + lck_mtx_t port_lock; + // Counts. + uint64_t all_pcb_count; + vsock_gen_t vsock_gencnt; +}; + +#endif /* BSD_KERNEL_PRIVATE */ +#endif /* _VSOCK_DOMAIN_H_ */ diff --git a/bsd/sys/vsock_transport.h b/bsd/sys/vsock_transport.h new file mode 100644 index 000000000..c584aed4d --- /dev/null +++ b/bsd/sys/vsock_transport.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _VSOCK_TRANSPORT_H_ +#define _VSOCK_TRANSPORT_H_ +#ifdef KERNEL_PRIVATE + +#include + +__BEGIN_DECLS + +#include +#include +#include + +#define VSOCK_MAX_PACKET_SIZE 65536 + +enum vsock_operation { + VSOCK_REQUEST = 0, + VSOCK_RESPONSE = 1, + VSOCK_PAYLOAD = 2, + VSOCK_SHUTDOWN = 3, + VSOCK_SHUTDOWN_RECEIVE = 4, + VSOCK_SHUTDOWN_SEND = 5, + VSOCK_RESET = 6, + VSOCK_CREDIT_UPDATE = 7, + VSOCK_CREDIT_REQUEST = 8, +}; + +struct vsock_address { + uint32_t cid; + uint32_t port; +}; + +struct vsock_transport { + void *provider; + int (*get_cid)(void *provider, uint32_t *cid); + int (*attach_socket)(void *provider); + int (*detach_socket)(void *provider); + int (*put_message)(void *provider, struct vsock_address src, struct vsock_address dst, + enum vsock_operation op, uint32_t buf_alloc, uint32_t fwd_cnt, mbuf_t m); +}; + +extern int vsock_add_transport(struct vsock_transport *transport); +extern int vsock_remove_transport(struct vsock_transport *transport); +extern int vsock_reset_transport(struct vsock_transport *transport); +extern int vsock_put_message(struct vsock_address src, struct vsock_address dst, + enum vsock_operation op, uint32_t buf_alloc, uint32_t fwd_cnt, mbuf_t m); + +__END_DECLS + +#endif /* KERNEL_PRIVATE */ +#endif /* _VSOCK_TRANSPORT_H_ */ diff --git a/bsd/sys/work_interval.h b/bsd/sys/work_interval.h index 695a28ea1..91567cf83 100644 --- a/bsd/sys/work_interval.h +++ b/bsd/sys/work_interval.h @@ -101,14 +101,58 @@ __BEGIN_DECLS * management decisions, clients should strive to be as accurate as possible. * Failure to do so will adversely impact system power and performance. * + * Work Interval Auto Join Support: + * + * Work intervals support an optional flag WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN + * which allows RT threads from the same home thread group to join work + * intervals via wakeup relationship tracking. Based on the join policy, + * RT threads can temporarily join the work interval of other RT threads + * which make them runnable. The auto joined thread remains in the work + * interval until it blocks or terminates. The mechanism works through + * make runnable heuristic and it should be used with extreme caution. + * If a client specifies this flag, it gives up explicit control over its + * thread group membership and threads unrelated to the work interval + * could become part of the thread group. This could lead to serious power + * and performance issues. If the make runnable heuristic does not work + * for a client use case, it should adopt work_interval_join_port() or + * work_interval_join() to explicitly declare its intent. + * + * Work Interval Deferred Finish Support: + * + * Another advanced feature for work intervals is the ability to defer the finish + * calls for the work interval until all auto-joined threads for the work interval + * have blocked or terminated. This feature is enabled via an optional flag + * WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH and is valid only if the work interval + * is configured with the WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN flag as well. The + * deferred finish mechanism allows the work interval to defer the finish call + * for the work interval until all auto-join threads have blocked/terminated + * (and have therefore un-joined the work interval) or one of the work interval + * threads calls start for the next frame. The deferred finish works only for + * workloads that have no temporal overlap across frames i.e. previous frame has to + * finish before next frame can start. This feature should be used with caution + * since auto-joined threads would delay finish calls to the performance controller + * which could lead to poor performance and battery life. */ /* Flags to be passed with work_interval_create() */ /* If interval is joinable, create no longer implicitly joins, you must use work_interval_join */ -#define WORK_INTERVAL_FLAG_JOINABLE (0x1) +#define WORK_INTERVAL_FLAG_JOINABLE (0x1) /* Only threads that join the group are measured together, otherwise the group is the creator's home group */ -#define WORK_INTERVAL_FLAG_GROUP (0x2) +#define WORK_INTERVAL_FLAG_GROUP (0x2) +/* Specifies that the work interval is being created by a client who doesn't + * necessarily have the PRIV_WORK_INTERVAL entitlement. Skip privilege checks. + * This can only be masked in for work intervals of types COREAUDIO, CA_CLIENT + * and DEFAULT */ +#define WORK_INTERVAL_FLAG_UNRESTRICTED (0x4) + +/* [Advanced Flag] Read section on "Work Interval Auto Join Support" above for details */ +#define WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN (0x8) +/* [Advanced Flag] Read section on "Work Interval Deferred Finish Support" above for details */ +#define WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH (0x10) + +/* Kernel-supplied flag: Work interval has been ignored by the kernel */ +#define WORK_INTERVAL_FLAG_IGNORED (0x20) /* Flags to describe the interval flavor to the performance controller */ #define WORK_INTERVAL_TYPE_MASK (0xF0000000) @@ -155,6 +199,13 @@ typedef struct work_interval_instance *work_interval_instance_t; */ int work_interval_create(work_interval_t *interval_handle, uint32_t flags); +/* Returns the flags used for the work interval when it was created. + * + * May fail with EINVAL if the port isn't from a prior call to + * work_interval_copy_port. + */ +int work_interval_get_flags_from_port(mach_port_t port, uint32_t *flags); + /* * Notify the power management subsystem that the work for a current interval has completed @@ -229,6 +280,7 @@ int work_interval_leave(void); #define WORK_INTERVAL_OPERATION_NOTIFY 0x00000003 /* arg is a work_interval_notification_t */ #define WORK_INTERVAL_OPERATION_CREATE2 0x00000004 /* arg is a work_interval_create_params */ #define WORK_INTERVAL_OPERATION_JOIN 0x00000005 /* arg is a port_name */ +#define WORK_INTERVAL_OPERATION_GET_FLAGS 0x00000009 /* arg is a port name */ struct work_interval_notification { uint64_t start; @@ -241,8 +293,8 @@ struct work_interval_notification { typedef struct work_interval_notification *work_interval_notification_t; struct work_interval_create_params { - uint64_t wicp_id; /* out param */ - uint32_t wicp_port; /* out param */ + uint64_t wicp_id; /* in/out param */ + mach_port_name_t wicp_port; /* in/out param */ uint32_t wicp_create_flags; }; diff --git a/bsd/tests/bsd_tests.c b/bsd/tests/bsd_tests.c index 7ce85fdd7..debde98cf 100644 --- a/bsd/tests/bsd_tests.c +++ b/bsd/tests/bsd_tests.c @@ -49,6 +49,9 @@ #ifdef __arm64__ extern kern_return_t arm64_lock_test(void); #endif +#if defined(__arm__) || defined(__arm64__) +extern kern_return_t pmap_test(void); +#endif /* defined(__arm__) || defined(__arm64__) */ kern_return_t kalloc_test(void); kern_return_t ipi_test(void); #if defined(KERNEL_INTEGRITY_CTRR) @@ -66,6 +69,9 @@ struct xnupost_test bsd_post_tests[] = { #ifdef __arm64__ XNUPOST_TEST_CONFIG_BASIC(arm64_lock_test), #endif +#if defined(__arm__) || defined(__arm64__) + XNUPOST_TEST_CONFIG_BASIC(pmap_test), +#endif /* defined(__arm__) || defined(__arm64__) */ #if defined(KERNEL_INTEGRITY_CTRR) XNUPOST_TEST_CONFIG_BASIC(ctrr_test), #endif @@ -172,46 +178,46 @@ xnupost_copyout_test(xnupost_test_t t, mach_vm_address_t outaddr) { /* code to copyout test config */ int kret = 0; - uint32_t namelen = 0; + size_t namelen = 0; - kret = copyout(&t->xt_config, outaddr, sizeof(uint16_t)); + kret = copyout(&t->xt_config, (user_addr_t)outaddr, sizeof(uint16_t)); if (kret) { return kret; } outaddr += sizeof(uint16_t); - kret = copyout(&t->xt_test_num, outaddr, sizeof(uint16_t)); + kret = copyout(&t->xt_test_num, (user_addr_t)outaddr, sizeof(uint16_t)); if (kret) { return kret; } outaddr += sizeof(uint16_t); - kret = copyout(&t->xt_retval, outaddr, sizeof(uint32_t)); + kret = copyout(&t->xt_retval, (user_addr_t)outaddr, sizeof(uint32_t)); if (kret) { return kret; } outaddr += sizeof(uint32_t); - kret = copyout(&t->xt_expected_retval, outaddr, sizeof(uint32_t)); + kret = copyout(&t->xt_expected_retval, (user_addr_t)outaddr, sizeof(uint32_t)); if (kret) { return kret; } outaddr += sizeof(uint32_t); - kret = copyout(&t->xt_begin_time, outaddr, sizeof(uint64_t)); + kret = copyout(&t->xt_begin_time, (user_addr_t)outaddr, sizeof(uint64_t)); if (kret) { return kret; } outaddr += sizeof(uint64_t); - kret = copyout(&t->xt_end_time, outaddr, sizeof(uint64_t)); + kret = copyout(&t->xt_end_time, (user_addr_t)outaddr, sizeof(uint64_t)); if (kret) { return kret; } outaddr += sizeof(uint64_t); namelen = strnlen(t->xt_name, XNUPOST_TNAME_MAXLEN); - kret = copyout(t->xt_name, outaddr, namelen); + kret = copyout(t->xt_name, (user_addr_t)outaddr, namelen); if (kret) { return kret; } @@ -234,7 +240,7 @@ xnupost_get_estimated_testdata_size(void) } int -xnupost_export_testdata(void * outp, uint32_t size, uint32_t * lenp) +xnupost_export_testdata(void * outp, size_t size_in, uint32_t * lenp) { struct kcdata_descriptor kcd; mach_vm_address_t user_addr = 0; @@ -244,6 +250,11 @@ xnupost_export_testdata(void * outp, uint32_t size, uint32_t * lenp) char kctype_name[32] = "xnupost_test_config"; mach_timebase_info_data_t timebase = {0, 0}; uint32_t length_to_copy = 0; + unsigned int size = (unsigned int)size_in; + + if (size_in > UINT_MAX) { + return ENOSPC; + } #define RET_IF_OP_FAIL \ do { \ @@ -259,20 +270,20 @@ xnupost_export_testdata(void * outp, uint32_t size, uint32_t * lenp) clock_timebase_info(&timebase); kret = kcdata_get_memory_addr(&kcd, KCDATA_TYPE_TIMEBASE, sizeof(timebase), &user_addr); RET_IF_OP_FAIL; - kret = copyout(&timebase, user_addr, sizeof(timebase)); + kret = copyout(&timebase, (user_addr_t)user_addr, sizeof(timebase)); RET_IF_OP_FAIL; /* save boot-args and osversion string */ length_to_copy = MIN((uint32_t)(strlen(version) + 1), OSVERSIZE); kret = kcdata_get_memory_addr(&kcd, STACKSHOT_KCTYPE_OSVERSION, length_to_copy, &user_addr); RET_IF_OP_FAIL; - kret = copyout(&version[0], user_addr, length_to_copy); + kret = copyout(&version[0], (user_addr_t)user_addr, length_to_copy); RET_IF_OP_FAIL; length_to_copy = MIN((uint32_t)(strlen(PE_boot_args()) + 1), BOOT_LINE_LENGTH); kret = kcdata_get_memory_addr(&kcd, STACKSHOT_KCTYPE_BOOTARGS, length_to_copy, &user_addr); RET_IF_OP_FAIL; - kret = copyout(PE_boot_args(), user_addr, length_to_copy); + kret = copyout(PE_boot_args(), (user_addr_t)user_addr, length_to_copy); RET_IF_OP_FAIL; /* add type definition to buffer */ diff --git a/bsd/tests/copyio_tests.c b/bsd/tests/copyio_tests.c index f3594be79..af219b9b6 100644 --- a/bsd/tests/copyio_tests.c +++ b/bsd/tests/copyio_tests.c @@ -51,11 +51,11 @@ struct copyio_test_data { /* VM map of the current userspace process. */ vm_map_t user_map; /* The start of a `copyio_test_buf_size'-sized region mapped into userspace. */ - mach_vm_offset_t user_addr; + user_addr_t user_addr; /* The start of a page-sized region that guaranteed to be unmapped in userspace. */ - mach_vm_offset_t unmapped_addr; + user_addr_t unmapped_addr; /* The start of a page-sized region mapped at the largest possible userspace address. */ - mach_vm_offset_t user_lastpage_addr; + user_addr_t user_lastpage_addr; /* Kernel mapping of the physical pages mapped at `user_addr'. */ void *kern_addr; @@ -252,7 +252,7 @@ copyinstr_test(struct copyio_test_data *data) data->thread_ptr = &lencopied; err = copyio_test_run_in_thread(copyinstr_from_kernel, data); -#if defined(CONFIG_EMBEDDED) +#if defined(__arm__) || defined (__arm64__) T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from kernel address in kernel_task thread should return EFAULT"); #else T_EXPECT_EQ_INT(err, 0, "copyinstr() from kernel address in kernel_task thread should succeed"); @@ -275,7 +275,7 @@ copyinstr_test(struct copyio_test_data *data) char *kern_unterminated_addr = (char *)data->kern_addr + copyio_test_buf_size - unterminated_size; memset(kern_unterminated_addr, 'A', unterminated_size); - mach_vm_offset_t user_unterminated_addr = data->user_addr + copyio_test_buf_size - unterminated_size; + user_addr_t user_unterminated_addr = data->user_addr + copyio_test_buf_size - unterminated_size; err = copyinstr(user_unterminated_addr, in_buf, copyio_test_buf_size, &lencopied); T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from userspace region without NULL terminator should return EFAULT"); } @@ -324,7 +324,7 @@ copyoutstr_test(struct copyio_test_data *data) data->thread_ptr = &lencopied; err = copyio_test_run_in_thread(copyoutstr_to_kernel, data); -#if defined(CONFIG_EMBEDDED) +#if defined(__arm__) || defined (__arm64__) T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() to kernel address in kernel_task thread should return EFAULT"); #else T_EXPECT_EQ_INT(err, 0, "copyoutstr() to kernel address in kernel_task thread should succeed"); @@ -356,7 +356,7 @@ copyin_atomic64_from_kernel(struct copyio_test_data *data) static int copyout_atomic32_to_kernel(struct copyio_test_data *data) { - return copyout_atomic32(data->thread_data, (user_addr_t)data->kern_addr); + return copyout_atomic32((uint32_t)data->thread_data, (user_addr_t)data->kern_addr); } static int @@ -454,7 +454,7 @@ copyout_atomic64_to_kernel(struct copyio_test_data *data) static int copyin_atomic32_wait_if_equals_from_kernel(struct copyio_test_data *data) { - return copyin_atomic32_wait_if_equals((uintptr_t)data->kern_addr, data->thread_data); + return copyin_atomic32_wait_if_equals((uintptr_t)data->kern_addr, (uint32_t)data->thread_data); } static void @@ -492,6 +492,7 @@ kern_return_t copyio_test(void) { struct copyio_test_data data = {}; + mach_vm_offset_t user_addr = 0; kern_return_t ret = KERN_SUCCESS; data.buf1 = kalloc(copyio_test_buf_size); @@ -513,18 +514,21 @@ copyio_test(void) assert(proc->p_pid == 1); data.user_map = get_task_map_reference(proc->task); - ret = mach_vm_allocate_kernel(data.user_map, &data.user_addr, copyio_test_buf_size + PAGE_SIZE, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_NONE); + user_addr = data.user_addr; + ret = mach_vm_allocate_kernel(data.user_map, &user_addr, copyio_test_buf_size + PAGE_SIZE, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_NONE); if (ret) { T_FAIL("mach_vm_allocate_kernel(user_addr) failed: %d", ret); goto err_user_alloc; } + data.user_addr = (user_addr_t)user_addr; - data.user_lastpage_addr = get_map_max(data.user_map) - PAGE_SIZE; - ret = mach_vm_allocate_kernel(data.user_map, &data.user_lastpage_addr, PAGE_SIZE, VM_FLAGS_FIXED, VM_KERN_MEMORY_NONE); + user_addr = get_map_max(data.user_map) - PAGE_SIZE; + ret = mach_vm_allocate_kernel(data.user_map, &user_addr, PAGE_SIZE, VM_FLAGS_FIXED, VM_KERN_MEMORY_NONE); if (ret) { T_FAIL("mach_vm_allocate_kernel(user_lastpage_addr) failed: %d", ret); goto err_user_lastpage_alloc; } + data.user_lastpage_addr = (user_addr_t)user_addr; data.unmapped_addr = data.user_addr + copyio_test_buf_size; mach_vm_deallocate(data.user_map, data.unmapped_addr, PAGE_SIZE); diff --git a/bsd/tests/ptrauth_data_tests_sysctl.c b/bsd/tests/ptrauth_data_tests_sysctl.c new file mode 100644 index 000000000..677160fd0 --- /dev/null +++ b/bsd/tests/ptrauth_data_tests_sysctl.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#if DEVELOPMENT || DEBUG +#if __has_feature(ptrauth_calls) + +#include +#include +#include +#include +#include +#include + + +#include +#include +#include + +extern kern_return_t ptrauth_data_tests(void); + +/* + * Given an existing PAC pointer (ptr), its declaration type (decl), the (key) + * used to sign it and the string discriminator (discr), extract the raw pointer + * along with the signature and compare it with one computed on the fly + * via ptrauth_sign_unauthenticated(). + * + * If the two mismatch, return an error and fail the test. + */ +#define VALIDATE_PTR(decl, ptr, key, discr) { \ + decl raw = *(decl *)&(ptr); \ + decl cmp = ptrauth_sign_unauthenticated(ptr, key, \ + ptrauth_blend_discriminator(&ptr, ptrauth_string_discriminator(discr))); \ + if (cmp != raw) { \ + printf("kern.run_pac_test: %s (%s) (discr=%s) is not signed as expected (%p vs %p)\n", #decl, #ptr, #discr, raw, cmp); \ + kr = EINVAL; \ + } \ +} + +/* + * Allocate the containing structure, and store a pointer to the desired member, + * which should be subject to pointer signing. + */ +#define ALLOC_VALIDATE_DATA_PTR(structure, decl, member, discr) { \ + structure *tmp = kheap_alloc(KHEAP_TEMP, sizeof(structure), Z_WAITOK | Z_ZERO); \ + if (!tmp) return ENOMEM; \ + tmp->member = (void*)0xffffffff41414141; \ + VALIDATE_DATA_PTR(decl, tmp->member, discr) \ + kheap_free(KHEAP_TEMP, tmp, sizeof(structure)); \ +} + +#define VALIDATE_DATA_PTR(decl, ptr, discr) VALIDATE_PTR(decl, ptr, ptrauth_key_process_independent_data, discr) + +/* + * Validate that a pointer that is supposed to be signed, is, and that the signature + * matches based on signing key, location and discriminator + */ +static int +sysctl_run_ptrauth_data_tests SYSCTL_HANDLER_ARGS +{ + #pragma unused(arg1, arg2, oidp) + + unsigned int dummy; + int error, changed, kr; + error = sysctl_io_number(req, 0, sizeof(dummy), &dummy, &changed); + if (error || !changed) { + return error; + } + + /* proc_t */ + ALLOC_VALIDATE_DATA_PTR(struct proc, void *, task, "proc.task"); + ALLOC_VALIDATE_DATA_PTR(struct proc, struct proc *, p_pptr, "proc.p_pptr"); + ALLOC_VALIDATE_DATA_PTR(struct proc, struct vnode *, p_textvp, "proc.p_textvp"); + ALLOC_VALIDATE_DATA_PTR(struct proc, struct pgrp *, p_pgrp, "proc.p_pgrp"); + + /* cs_blob */ + ALLOC_VALIDATE_DATA_PTR(struct cs_blob, struct cs_blob *, csb_next, "cs_blob.csb_next"); + ALLOC_VALIDATE_DATA_PTR(struct cs_blob, const CS_CodeDirectory *, csb_cd, "cs_blob.csb_cd"); + ALLOC_VALIDATE_DATA_PTR(struct cs_blob, const char *, csb_teamid, "cs_blob.csb_teamid"); + ALLOC_VALIDATE_DATA_PTR(struct cs_blob, const CS_GenericBlob *, csb_entitlements_blob, "cs_blob.csb_entitlements_blob"); + ALLOC_VALIDATE_DATA_PTR(struct cs_blob, void *, csb_entitlements, "cs_blob.csb_entitlements"); + + /* The rest of the tests live in osfmk/ */ + kr = ptrauth_data_tests(); + + if (error == 0) { + error = mach_to_bsd_errno(kr); + } + + return kr; +} + +SYSCTL_PROC(_kern, OID_AUTO, run_ptrauth_data_tests, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, + 0, 0, sysctl_run_ptrauth_data_tests, "I", ""); + +#endif /* __has_feature(ptrauth_calls) */ +#endif /* DEVELOPMENT || DEBUG */ diff --git a/bsd/vfs/doc_tombstone.c b/bsd/vfs/doc_tombstone.c index 17fbf4ef8..a978e0471 100644 --- a/bsd/vfs/doc_tombstone.c +++ b/bsd/vfs/doc_tombstone.c @@ -51,10 +51,8 @@ doc_tombstone_get(void) ut = get_bsdthread_info(current_thread()); if (ut->t_tombstone == NULL) { - ut->t_tombstone = kalloc(sizeof(struct doc_tombstone)); - if (ut->t_tombstone) { - memset(ut->t_tombstone, 0, sizeof(struct doc_tombstone)); - } + ut->t_tombstone = kalloc_flags(sizeof(struct doc_tombstone), + Z_WAITOK | Z_ZERO); } return ut->t_tombstone; @@ -71,7 +69,7 @@ doc_tombstone_get(void) void doc_tombstone_clear(struct doc_tombstone *ut, vnode_t *old_vpp) { - uint32_t old_id = ut->t_lastop_document_id; + uint64_t old_id = ut->t_lastop_document_id; ut->t_lastop_document_id = 0; ut->t_lastop_parent = NULL; @@ -116,13 +114,16 @@ doc_tombstone_clear(struct doc_tombstone *ut, vnode_t *old_vpp) bool doc_tombstone_should_ignore_name(const char *nameptr, int len) { + size_t real_len; if (len == 0) { - len = strlen(nameptr); + real_len = strlen(nameptr); + } else { + real_len = (size_t)len; } if (strncmp(nameptr, "atmp", 4) == 0 - || (len > 4 && strncmp(nameptr + len - 4, ".bak", 4) == 0) - || (len > 4 && strncmp(nameptr + len - 4, ".tmp", 4) == 0)) { + || (real_len > 4 && strncmp(nameptr + real_len - 4, ".bak", 4) == 0) + || (real_len > 4 && strncmp(nameptr + real_len - 4, ".tmp", 4) == 0)) { return true; } diff --git a/bsd/vfs/kpi_vfs.c b/bsd/vfs/kpi_vfs.c index dff5f82d3..f4ef76113 100644 --- a/bsd/vfs/kpi_vfs.c +++ b/bsd/vfs/kpi_vfs.c @@ -84,13 +84,14 @@ #include #include #include +#include #include #include #include #include #include #include -#include +#include #include #include #include @@ -106,7 +107,7 @@ #include #include -#include +#include #include #include @@ -144,8 +145,13 @@ static void xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, vfs_context_t ctx); #endif /* CONFIG_APPLEDOUBLE */ +extern lck_rw_t * rootvnode_rw_lock; + static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp); +static ZONE_VIEW_DEFINE(ZV_VFS_CONTEXT, "vfs_context", + KHEAP_ID_DEFAULT, sizeof(struct vfs_context)); + /* * vnode_setneedinactive * @@ -444,6 +450,12 @@ vfs_mntlabel(mount_t mp) return (void*)mp->mnt_mntlabel; } +uint64_t +vfs_mount_id(mount_t mp) +{ + return mp->mnt_mount_id; +} + /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */ uint64_t vfs_flags(mount_t mp) @@ -642,6 +654,30 @@ vfs_setextendedsecurity(mount_t mp) mount_unlock(mp); } +void +vfs_setmntsystem(mount_t mp) +{ + mount_lock(mp); + mp->mnt_kern_flag |= MNTK_SYSTEM; + mount_unlock(mp); +} + +void +vfs_setmntsystemdata(mount_t mp) +{ + mount_lock(mp); + mp->mnt_kern_flag |= MNTK_SYSTEMDATA; + mount_unlock(mp); +} + +void +vfs_setmntswap(mount_t mp) +{ + mount_lock(mp); + mp->mnt_kern_flag |= (MNTK_SYSTEM | MNTK_SWAP_MOUNT); + mount_unlock(mp); +} + void vfs_clearextendedsecurity(mount_t mp) { @@ -686,6 +722,12 @@ vfs_setmaxsymlen(mount_t mp, uint32_t symlen) mp->mnt_maxsymlinklen = symlen; } +boolean_t +vfs_is_basesystem(mount_t mp) +{ + return ((mp->mnt_supl_kern_flag & MNTK_SUPL_BASESYSTEM) == 0) ? false : true; +} + /* return a pointer to the RO vfs_statfs associated with mount_t */ struct vfsstatfs * vfs_statfs(mount_t mp) @@ -725,7 +767,7 @@ vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) * the volume name */ if (vfs_isrdonly(mp) && - !((mp->mnt_flag & MNT_ROOTFS) && (vfa->f_active == VFSATTR_f_vol_name))) { + !((strcmp(mp->mnt_vfsstat.f_fstypename, "apfs") == 0) && (vfa->f_active == VFSATTR_f_vol_name))) { return EROFS; } @@ -899,9 +941,8 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle) return EINVAL; } - MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP, - M_WAITOK); - bzero(newvfstbl, sizeof(struct vfstable)); + newvfstbl = kheap_alloc(KHEAP_TEMP, sizeof(struct vfstable), + Z_WAITOK | Z_ZERO); newvfstbl->vfc_vfsops = vfe->vfe_vfsops; strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN); if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) { @@ -965,9 +1006,7 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle) /* XXX - shouldn't be M_TEMP */ descsize = desccount * vfs_opv_numops * sizeof(PFI); - MALLOC(descptr, PFI *, descsize, - M_TEMP, M_WAITOK); - bzero(descptr, descsize); + descptr = kheap_alloc(KHEAP_DEFAULT, descsize, Z_WAITOK | Z_ZERO); newvfstbl->vfc_descptr = descptr; newvfstbl->vfc_descsize = descsize; @@ -1069,7 +1108,7 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle) (*newvfstbl->vfc_vfsops->vfs_init)(&vfsc); } - FREE(newvfstbl, M_TEMP); + kheap_free(KHEAP_TEMP, newvfstbl, sizeof(struct vfstable)); return 0; } @@ -1084,6 +1123,7 @@ vfs_fsremove(vfstable_t handle) { struct vfstable * vfstbl = (struct vfstable *)handle; void *old_desc = NULL; + size_t descsize = 0; errno_t err; /* Preflight check for any mounts */ @@ -1099,14 +1139,15 @@ vfs_fsremove(vfstable_t handle) */ if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) { old_desc = vfstbl->vfc_descptr; + descsize = vfstbl->vfc_descsize; } err = vfstable_del(vfstbl); mount_list_unlock(); /* free the descriptor if the delete was successful */ - if (err == 0 && old_desc) { - FREE(old_desc, M_TEMP); + if (err == 0) { + kheap_free(KHEAP_DEFAULT, old_desc, descsize); } return err; @@ -1180,6 +1221,20 @@ vfs_context_is64bit(vfs_context_t ctx) return 0; } +boolean_t +vfs_context_can_resolve_triggers(vfs_context_t ctx) +{ + proc_t proc = vfs_context_proc(ctx); + + if (proc) { + if (proc->p_vfs_iopolicy & + P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE) { + return false; + } + return true; + } + return false; +} /* * vfs_context_proc @@ -1348,7 +1403,7 @@ vfs_context_create(vfs_context_t ctx) { vfs_context_t newcontext; - newcontext = (vfs_context_t)kalloc(sizeof(struct vfs_context)); + newcontext = zalloc_flags(ZV_VFS_CONTEXT, Z_WAITOK | Z_ZERO); if (newcontext) { kauth_cred_t safecred; @@ -1373,7 +1428,7 @@ vfs_context_t vfs_context_current(void) { vfs_context_t ctx = NULL; - volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread()); + uthread_t ut = (uthread_t)get_bsdthread_info(current_thread()); if (ut != NULL) { if (ut->uu_context.vc_ucred != NULL) { @@ -1398,24 +1453,22 @@ vfs_context_current(void) * This is also used by imageboot_setup(), called early from bsd_init() after * kernproc has been given a credential. * - * Note: The use of proc_thread() here is a convenience to avoid inclusion - * of many Mach headers to do the reference directly rather than indirectly; - * we will need to forego this convenience when we reture proc_thread(). */ static struct vfs_context kerncontext; vfs_context_t vfs_context_kernel(void) { - if (kerncontext.vc_ucred == NOCRED) { - kerncontext.vc_ucred = kernproc->p_ucred; - } - if (kerncontext.vc_thread == NULL) { - kerncontext.vc_thread = proc_thread(kernproc); - } - return &kerncontext; } +/* + * Called early in bsd_init() when kernproc sets its thread and cred context. + */ +void +vfs_set_context_kernel(vfs_context_t ctx) +{ + kerncontext = *ctx; +} int vfs_context_rele(vfs_context_t ctx) @@ -1424,7 +1477,7 @@ vfs_context_rele(vfs_context_t ctx) if (IS_VALID_CRED(ctx->vc_ucred)) { kauth_cred_unref(&ctx->vc_ucred); } - kfree(ctx, sizeof(struct vfs_context)); + zfree(ZV_VFS_CONTEXT, ctx); } return 0; } @@ -1514,7 +1567,9 @@ vfs_rootvnode(void) { int error; + lck_rw_lock_shared(rootvnode_rw_lock); error = vnode_get(rootvnode); + lck_rw_unlock_shared(rootvnode_rw_lock); if (error) { return (vnode_t)0; } else { @@ -2028,7 +2083,12 @@ vnode_clearmountedon(vnode_t vp) void vnode_settag(vnode_t vp, int tag) { - vp->v_tag = tag; + /* + * We only assign enum values to v_tag, but add an assert to make sure we + * catch it in dev/debug builds if this ever change. + */ + assert(tag >= SHRT_MIN && tag <= SHRT_MAX); + vp->v_tag = (uint16_t)tag; } int @@ -2170,21 +2230,6 @@ current_workingdir(void) return vfs_context_cwd(vfs_context_current()); } -/* returns vnode ref to current root(chroot) directory */ -vnode_t -current_rootdir(void) -{ - proc_t proc = current_proc(); - struct vnode * vp; - - if ((vp = proc->p_fd->fd_rdir)) { - if ((vnode_getwithref(vp))) { - return NULL; - } - } - return vp; -} - /* * Get a filesec and optional acl contents from an extended attribute. * Function will attempt to retrive ACL, UUID, and GUID information using a @@ -2264,7 +2309,7 @@ vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) } /* get buffer and uio */ - if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) || + if (((fsec = kauth_filesec_alloc((int)fsec_size)) == NULL) || ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) || uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) { KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL"); @@ -2408,6 +2453,54 @@ out: return error; } +/* + * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here. + */ +void +vnode_attr_handle_mnt_ignore_ownership(struct vnode_attr *vap, mount_t mp, vfs_context_t ctx) +{ + uid_t nuid; + gid_t ngid; + + if (VATTR_IS_ACTIVE(vap, va_uid)) { + if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) { + nuid = vap->va_uid; + } else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) { + nuid = mp->mnt_fsowner; + if (nuid == KAUTH_UID_NONE) { + nuid = 99; + } + } else if (VATTR_IS_SUPPORTED(vap, va_uid)) { + nuid = vap->va_uid; + } else { + /* this will always be something sensible */ + nuid = mp->mnt_fsowner; + } + if ((nuid == 99) && !vfs_context_issuser(ctx)) { + nuid = kauth_cred_getuid(vfs_context_ucred(ctx)); + } + VATTR_RETURN(vap, va_uid, nuid); + } + if (VATTR_IS_ACTIVE(vap, va_gid)) { + if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) { + ngid = vap->va_gid; + } else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) { + ngid = mp->mnt_fsgroup; + if (ngid == KAUTH_GID_NONE) { + ngid = 99; + } + } else if (VATTR_IS_SUPPORTED(vap, va_gid)) { + ngid = vap->va_gid; + } else { + /* this will always be something sensible */ + ngid = mp->mnt_fsgroup; + } + if ((ngid == 99) && !vfs_context_issuser(ctx)) { + ngid = kauth_cred_getgid(vfs_context_ucred(ctx)); + } + VATTR_RETURN(vap, va_gid, ngid); + } +} /* * Returns: 0 Success @@ -2425,8 +2518,6 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) kauth_filesec_t fsec; kauth_acl_t facl; int error; - uid_t nuid; - gid_t ngid; /* * Reject attempts to fetch unknown attributes. @@ -2537,53 +2628,14 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) } #endif - /* - * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here. - */ - if (VATTR_IS_ACTIVE(vap, va_uid)) { - if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_uid)) { - nuid = vap->va_uid; - } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) { - nuid = vp->v_mount->mnt_fsowner; - if (nuid == KAUTH_UID_NONE) { - nuid = 99; - } - } else if (VATTR_IS_SUPPORTED(vap, va_uid)) { - nuid = vap->va_uid; - } else { - /* this will always be something sensible */ - nuid = vp->v_mount->mnt_fsowner; - } - if ((nuid == 99) && !vfs_context_issuser(ctx)) { - nuid = kauth_cred_getuid(vfs_context_ucred(ctx)); - } - VATTR_RETURN(vap, va_uid, nuid); - } - if (VATTR_IS_ACTIVE(vap, va_gid)) { - if (vfs_context_issuser(ctx) && VATTR_IS_SUPPORTED(vap, va_gid)) { - ngid = vap->va_gid; - } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) { - ngid = vp->v_mount->mnt_fsgroup; - if (ngid == KAUTH_GID_NONE) { - ngid = 99; - } - } else if (VATTR_IS_SUPPORTED(vap, va_gid)) { - ngid = vap->va_gid; - } else { - /* this will always be something sensible */ - ngid = vp->v_mount->mnt_fsgroup; - } - if ((ngid == 99) && !vfs_context_issuser(ctx)) { - ngid = kauth_cred_getgid(vfs_context_ucred(ctx)); - } - VATTR_RETURN(vap, va_gid, ngid); - } + vnode_attr_handle_mnt_ignore_ownership(vap, vp->v_mount, ctx); /* * Synthesise some values that can be reasonably guessed. */ if (!VATTR_IS_SUPPORTED(vap, va_iosize)) { - VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize); + assert(vp->v_mount->mnt_vfsstat.f_iosize <= UINT32_MAX); + VATTR_RETURN(vap, va_iosize, (uint32_t)vp->v_mount->mnt_vfsstat.f_iosize); } if (!VATTR_IS_SUPPORTED(vap, va_flags)) { @@ -3717,6 +3769,11 @@ VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ct } } + if ((command == DKIOCISSOLIDSTATE) && (vp == rootvp) && rootvp_is_ssd && data) { + *data = 1; + return 0; + } + a.a_desc = &vnop_ioctl_desc; a.a_vp = vp; a.a_command = command; @@ -4156,30 +4213,31 @@ vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, s * source has a "._" prefix. */ + size_t xfromname_len = 0; + size_t xtoname_len = 0; if (!NATIVE_XATTR(fdvp) && !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) { - size_t len; int error; /* Get source attribute file name. */ - len = fcnp->cn_namelen + 3; - if (len > sizeof(smallname1)) { - MALLOC(xfromname, char *, len, M_TEMP, M_WAITOK); + xfromname_len = fcnp->cn_namelen + 3; + if (xfromname_len > sizeof(smallname1)) { + xfromname = kheap_alloc(KHEAP_TEMP, xfromname_len, Z_WAITOK); } else { xfromname = &smallname1[0]; } - strlcpy(xfromname, "._", len); - strlcat(xfromname, fcnp->cn_nameptr, len); + strlcpy(xfromname, "._", xfromname_len); + strlcat(xfromname, fcnp->cn_nameptr, xfromname_len); /* Get destination attribute file name. */ - len = tcnp->cn_namelen + 3; - if (len > sizeof(smallname2)) { - MALLOC(xtoname, char *, len, M_TEMP, M_WAITOK); + xtoname_len = tcnp->cn_namelen + 3; + if (xtoname_len > sizeof(smallname2)) { + xtoname = kheap_alloc(KHEAP_TEMP, xtoname_len, Z_WAITOK); } else { xtoname = &smallname2[0]; } - strlcpy(xtoname, "._", len); - strlcat(xtoname, tcnp->cn_nameptr, len); + strlcpy(xtoname, "._", xtoname_len); + strlcat(xtoname, tcnp->cn_nameptr, xtoname_len); /* * Look up source attribute file, keep reference on it if exists. @@ -4187,7 +4245,7 @@ vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, s * in the rename syscall. It's OK if the source file does not exist, since this * is only for AppleDouble files. */ - MALLOC(fromnd, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK); + fromnd = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK); NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx); fromnd->ni_dvp = fdvp; @@ -4282,7 +4340,7 @@ vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, s * Note that tdvp already has an iocount reference. Make sure to check that we * get a valid vnode from namei. */ - MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK); + tond = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK); NDINIT(tond, RENAME, OP_RENAME, NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE, CAST_USER_ADDR_T(xtoname), ctx); @@ -4376,18 +4434,14 @@ ad_error: nameidone(tond); } if (xfromname && xfromname != &smallname1[0]) { - FREE(xfromname, M_TEMP); + kheap_free(KHEAP_TEMP, xfromname, xfromname_len); } if (xtoname && xtoname != &smallname2[0]) { - FREE(xtoname, M_TEMP); + kheap_free(KHEAP_TEMP, xtoname, xtoname_len); } #endif /* CONFIG_APPLEDOUBLE */ - if (fromnd) { - FREE(fromnd, M_TEMP); - } - if (tond) { - FREE(tond, M_TEMP); - } + kheap_free(KHEAP_TEMP, fromnd, sizeof(struct nameidata)); + kheap_free(KHEAP_TEMP, tond, sizeof(struct nameidata)); return _err; } @@ -4853,18 +4907,19 @@ xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int forc struct nameidata nd; char smallname[64]; char *filename = NULL; - size_t len; + size_t alloc_len; + size_t copy_len; if ((basename == NULL) || (basename[0] == '\0') || (basename[0] == '.' && basename[1] == '_')) { return; } filename = &smallname[0]; - len = snprintf(filename, sizeof(smallname), "._%s", basename); - if (len >= sizeof(smallname)) { - len++; /* snprintf result doesn't include '\0' */ - MALLOC(filename, char *, len, M_TEMP, M_WAITOK); - len = snprintf(filename, len, "._%s", basename); + alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename); + if (alloc_len >= sizeof(smallname)) { + alloc_len++; /* snprintf result doesn't include '\0' */ + filename = kheap_alloc(KHEAP_TEMP, alloc_len, Z_WAITOK); + copy_len = snprintf(filename, alloc_len, "._%s", basename); } NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(filename), ctx); @@ -4920,7 +4975,7 @@ out1: vnode_put(xvp); out2: if (filename && filename != &smallname[0]) { - FREE(filename, M_TEMP); + kheap_free(KHEAP_TEMP, filename, alloc_len); } } @@ -4946,7 +5001,7 @@ xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, len = snprintf(filename, sizeof(smallname), "._%s", basename); if (len >= sizeof(smallname)) { len++; /* snprintf result doesn't include '\0' */ - MALLOC(filename, char *, len, M_TEMP, M_WAITOK); + filename = kheap_alloc(KHEAP_TEMP, len, Z_WAITOK); len = snprintf(filename, len, "._%s", basename); } NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE, @@ -4973,7 +5028,7 @@ xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, vnode_put(xvp); out2: if (filename && filename != &smallname[0]) { - FREE(filename, M_TEMP); + kheap_free(KHEAP_TEMP, filename, len); } } #endif /* CONFIG_APPLEDOUBLE */ @@ -5819,6 +5874,58 @@ VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno) return _err; } +#if 0 +/* +*# +*#% ap vp L L L +*# +*/ +struct vnop_verify_args { + struct vnodeop_desc *a_desc; + vnode_t a_vp; + off_t a_foffset; + char *a_buf; + size_t a_bufsize; + size_t *a_verifyblksize; + int a_flags; + vfs_context_t a_context; +}; +#endif + +errno_t +VNOP_VERIFY(struct vnode *vp, off_t foffset, uint8_t *buf, size_t bufsize, + size_t *verify_block_size, vnode_verify_flags_t flags, vfs_context_t ctx) +{ + int _err; + struct vnop_verify_args a; + + if (ctx == NULL) { + ctx = vfs_context_current(); + } + a.a_desc = &vnop_verify_desc; + a.a_vp = vp; + a.a_foffset = foffset; + a.a_buf = buf; + a.a_bufsize = bufsize; + a.a_verifyblksize = verify_block_size; + a.a_flags = flags; + a.a_context = ctx; + + _err = (*vp->v_op[vnop_verify_desc.vdesc_offset])(&a); + DTRACE_FSINFO(verify, vnode_t, vp); + + /* It is not an error for a filesystem to not support this VNOP */ + if (_err == ENOTSUP) { + if (!buf && verify_block_size) { + *verify_block_size = 0; + } + + _err = 0; + } + + return _err; +} + #if 0 /* *# diff --git a/bsd/vfs/vfs_attrlist.c b/bsd/vfs/vfs_attrlist.c index d7ba26600..c0bc4c89e 100644 --- a/bsd/vfs/vfs_attrlist.c +++ b/bsd/vfs/vfs_attrlist.c @@ -44,12 +44,12 @@ #include #include #include -#include +#include #include #include #include #include -#include +#include #include #include @@ -59,6 +59,22 @@ #define ATTR_TIME_SIZE -1 +static int readdirattr(vnode_t, struct fd_vn_data *, uio_t, struct attrlist *, + uint64_t, int *, int *, vfs_context_t ctx) __attribute__((noinline)); + +static void +vattr_get_alt_data(vnode_t, struct attrlist *, struct vnode_attr *, int, int, + int, vfs_context_t) __attribute__((noinline)); + +static void get_error_attributes(vnode_t, struct attrlist *, uint64_t, user_addr_t, + size_t, int, caddr_t, vfs_context_t) __attribute__((noinline)); + +static int getvolattrlist(vfs_context_t, vnode_t, struct attrlist *, user_addr_t, + size_t, uint64_t, enum uio_seg, int) __attribute__((noinline)); + +static int get_direntry(vfs_context_t, vnode_t, struct fd_vn_data *, int *, + struct direntry **) __attribute__((noinline)); + /* * Structure describing the state of an in-progress attrlist operation. */ @@ -130,8 +146,8 @@ attrlist_pack_variable2(struct _attrlist_buf *ab, const void *source, ssize_t co * Note that we may be able to pack the fixed width attref, but not * the variable (if there's no room). */ - ar.attr_dataoffset = ab->varcursor - ab->fixedcursor; - ar.attr_length = count + extcount; + ar.attr_dataoffset = (int32_t)(ab->varcursor - ab->fixedcursor); + ar.attr_length = (u_int32_t)(count + extcount); attrlist_pack_fixed(ab, &ar, sizeof(ar)); /* @@ -188,7 +204,7 @@ attrlist_pack_variable(struct _attrlist_buf *ab, const void *source, ssize_t cou * allocated buffer space. */ static void -attrlist_pack_string(struct _attrlist_buf *ab, const char *source, ssize_t count) +attrlist_pack_string(struct _attrlist_buf *ab, const char *source, size_t count) { struct attrreference ar; ssize_t fit, space; @@ -206,8 +222,8 @@ attrlist_pack_string(struct _attrlist_buf *ab, const char *source, ssize_t count /* * Construct the fixed-width attribute that refers to this string. */ - ar.attr_dataoffset = ab->varcursor - ab->fixedcursor; - ar.attr_length = count + 1; + ar.attr_dataoffset = (int32_t)(ab->varcursor - ab->fixedcursor); + ar.attr_length = (u_int32_t)count + 1; attrlist_pack_fixed(ab, &ar, sizeof(ar)); /* @@ -224,7 +240,7 @@ attrlist_pack_string(struct _attrlist_buf *ab, const char *source, ssize_t count space = ab->allocated - (ab->varcursor - ab->base); fit = lmin(count, space); if (space > 0) { - int bytes_to_zero; + long bytes_to_zero; /* * If there is space remaining, copy data in, and @@ -246,7 +262,7 @@ attrlist_pack_string(struct _attrlist_buf *ab, const char *source, ssize_t count * Zero out any additional bytes we might have as a * result of rounding up. */ - bytes_to_zero = min((roundup(fit, 4) - fit), + bytes_to_zero = lmin((roundup(fit, 4) - fit), space - fit); if (bytes_to_zero) { bzero(&(ab->varcursor[fit]), bytes_to_zero); @@ -288,7 +304,7 @@ attrlist_pack_string(struct _attrlist_buf *ab, const char *source, ssize_t count struct user64_timespec us = {.tv_sec = v.tv_sec, .tv_nsec = v.tv_nsec}; \ ATTR_PACK(&b, us); \ } else { \ - struct user32_timespec us = {.tv_sec = v.tv_sec, .tv_nsec = v.tv_nsec}; \ + struct user32_timespec us = {.tv_sec = (user32_time_t)v.tv_sec, .tv_nsec = (user32_long_t)v.tv_nsec}; \ ATTR_PACK(&b, us); \ } \ } while(0) @@ -547,6 +563,7 @@ static struct getattrlist_attrtab getattrlist_common_extended_tab[] = { {.attr = ATTR_CMNEXT_REALFSID, .bits = VATTR_BIT(va_fsid64), .size = sizeof(fsid_t), .action = KAUTH_VNODE_READ_ATTRIBUTES}, {.attr = ATTR_CMNEXT_CLONEID, .bits = VATTR_BIT(va_clone_id), .size = sizeof(uint64_t), .action = KAUTH_VNODE_READ_ATTRIBUTES}, {.attr = ATTR_CMNEXT_EXT_FLAGS, .bits = VATTR_BIT(va_extflags), .size = sizeof(uint64_t), .action = KAUTH_VNODE_READ_ATTRIBUTES}, + {.attr = ATTR_CMNEXT_RECURSIVE_GENCOUNT, .bits = VATTR_BIT(va_recursive_gencount), .size = sizeof(uint64_t), .action = KAUTH_VNODE_READ_ATTRIBUTES}, {.attr = 0, .bits = 0, .size = 0, .action = 0} }; @@ -981,12 +998,7 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, if (vs.f_active != 0) { /* If we're going to ask for f_vol_name, allocate a buffer to point it at */ if (VFSATTR_IS_ACTIVE(&vs, f_vol_name)) { - vs.f_vol_name = (char *) kalloc(MAXPATHLEN); - if (vs.f_vol_name == NULL) { - error = ENOMEM; - VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: could not allocate f_vol_name buffer"); - goto out; - } + vs.f_vol_name = (char *) zalloc(ZV_NAMEI); vs.f_vol_name[0] = '\0'; } @@ -1193,7 +1205,7 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, goto out; } - MALLOC(ab.base, char *, ab.allocated, M_TEMP, M_ZERO | M_WAITOK); + ab.base = kheap_alloc(KHEAP_TEMP, ab.allocated, Z_ZERO | Z_WAITOK); if (ab.base == NULL) { error = ENOMEM; VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: could not allocate %d for copy buffer", ab.allocated); @@ -1485,6 +1497,14 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, vs.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] |= VOL_CAP_FMT_NO_PERMISSIONS; } + /* + * ATTR_CMN_USERACCESS attribute was previously set by file-system drivers, thus volume capabilitiy + * VOL_CAP_INT_USERACCESS was conditionally enabled. ATTR_CMN_USERACCESS is now set inside VFS, + * regardless of underlying volume type thus we always set VOL_CAP_INT_USERACCESS. + */ + vs.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_USERACCESS; + vs.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_USERACCESS; + ATTR_PACK(&ab, vs.f_capabilities); ab.actual.volattr |= ATTR_VOL_CAPABILITIES; } @@ -1533,7 +1553,7 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, * of the result buffer, even if we copied less out. The caller knows how big a buffer * they gave us, so they can always check for truncation themselves. */ - *(uint32_t *)ab.base = (options & FSOPT_REPORT_FULLSIZE) ? ab.needed : imin(bufferSize, ab.needed); + *(uint32_t *)ab.base = (options & FSOPT_REPORT_FULLSIZE) ? (uint32_t)ab.needed : (uint32_t)lmin(bufferSize, ab.needed); /* Return attribute set output if requested. */ if (return_valid && @@ -1549,21 +1569,19 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, if (UIO_SEG_IS_USER_SPACE(segflg)) { error = copyout(ab.base, CAST_USER_ADDR_T(attributeBuffer), - ulmin(bufferSize, ab.needed)); + ulmin((uint32_t)bufferSize, (uint32_t)ab.needed)); } else { - bcopy(ab.base, (void *)attributeBuffer, (size_t)ulmin(bufferSize, ab.needed)); + bcopy(ab.base, (void *)attributeBuffer, (size_t)ulmin((uint32_t)bufferSize, (uint32_t)ab.needed)); } out: if (vs.f_vol_name != NULL) { - kfree(vs.f_vol_name, MAXPATHLEN); + zfree(ZV_NAMEI, vs.f_vol_name); } if (release_str) { vnode_putname(cnp); } - if (ab.base != NULL) { - FREE(ab.base, M_TEMP); - } + kheap_free(KHEAP_TEMP, ab.base, ab.allocated); VFS_DEBUG(ctx, vp, "ATTRLIST - returning %d", error); if (root_vp != NULL) { @@ -2381,6 +2399,16 @@ attr_pack_common_extended(mount_t mp, struct vnode *vp, struct attrlist *alp, } } + if (alp->forkattr & ATTR_CMNEXT_RECURSIVE_GENCOUNT) { + if (VATTR_IS_SUPPORTED(vap, va_recursive_gencount)) { + ATTR_PACK8((*abp), vap->va_recursive_gencount); + abp->actual.forkattr |= ATTR_CMNEXT_RECURSIVE_GENCOUNT; + } else if (!return_valid || pack_invalid) { + uint64_t zero_val = 0; + ATTR_PACK8((*abp), zero_val); + } + } + return 0; } @@ -2737,13 +2765,7 @@ vfs_attr_pack_internal(mount_t mp, vnode_t vp, uio_t auio, struct attrlist *alp, //if a path is requested, allocate a temporary buffer to build it if (vp && (alp->commonattr & (ATTR_CMN_FULLPATH))) { - fullpathptr = (char*) kalloc(MAXPATHLEN); - if (fullpathptr == NULL) { - error = ENOMEM; - VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: cannot allocate fullpath buffer"); - goto out; - } - bzero(fullpathptr, MAXPATHLEN); + fullpathptr = (char*) zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO); apaths.fullpathptr = fullpathptr; apaths.fullpathlenp = &fullpathlen; } @@ -2751,25 +2773,13 @@ vfs_attr_pack_internal(mount_t mp, vnode_t vp, uio_t auio, struct attrlist *alp, // only interpret fork attributes if they're used as new common attributes if (vp && use_fork) { if (alp->forkattr & (ATTR_CMNEXT_RELPATH)) { - relpathptr = (char*) kalloc(MAXPATHLEN); - if (relpathptr == NULL) { - error = ENOMEM; - VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: cannot allocate relpath buffer"); - goto out; - } - bzero(relpathptr, MAXPATHLEN); + relpathptr = (char*) zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO); apaths.relpathptr = relpathptr; apaths.relpathlenp = &relpathlen; } if (alp->forkattr & (ATTR_CMNEXT_NOFIRMLINKPATH)) { - REALpathptr = (char*) kalloc(MAXPATHLEN); - if (REALpathptr == NULL) { - error = ENOMEM; - VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: cannot allocate canonpath buffer"); - goto out; - } - bzero(REALpathptr, MAXPATHLEN); + REALpathptr = (char*) zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO); apaths.REALpathptr = REALpathptr; apaths.REALpathlenp = &REALpathlen; } @@ -2821,8 +2831,7 @@ vfs_attr_pack_internal(mount_t mp, vnode_t vp, uio_t auio, struct attrlist *alp, * and big enough. */ if (uio_isuserspace(auio) || (buf_size < ab.allocated)) { - MALLOC(ab.base, char *, ab.allocated, M_TEMP, - M_ZERO | M_WAITOK); + ab.base = kheap_alloc(KHEAP_TEMP, ab.allocated, Z_ZERO | Z_WAITOK); alloc_local_buf = 1; } else { /* @@ -2937,7 +2946,7 @@ vfs_attr_pack_internal(mount_t mp, vnode_t vp, uio_t auio, struct attrlist *alp, * of the result buffer, even if we copied less out. The caller knows how big a buffer * they gave us, so they can always check for truncation themselves. */ - *(uint32_t *)ab.base = (options & FSOPT_REPORT_FULLSIZE) ? ab.needed : imin(ab.allocated, ab.needed); + *(uint32_t *)ab.base = (options & FSOPT_REPORT_FULLSIZE) ? (uint32_t)ab.needed : (uint32_t)lmin(ab.allocated, ab.needed); /* Return attribute set output if requested. */ if (return_valid) { @@ -2951,11 +2960,11 @@ vfs_attr_pack_internal(mount_t mp, vnode_t vp, uio_t auio, struct attrlist *alp, bcopy(&ab.actual, ab.base + sizeof(uint32_t), sizeof(ab.actual)); } - copy_size = imin(buf_size, ab.allocated); + copy_size = lmin(buf_size, ab.allocated); /* Only actually copyout as much out as the user buffer can hold */ if (alloc_local_buf) { - error = uiomove(ab.base, copy_size, auio); + error = uiomove(ab.base, (int)copy_size, auio); } else { off_t orig_offset = uio_offset(auio); @@ -2980,16 +2989,16 @@ out: vnode_putname(vname); } if (fullpathptr) { - kfree(fullpathptr, MAXPATHLEN); + zfree(ZV_NAMEI, fullpathptr); } if (relpathptr) { - kfree(relpathptr, MAXPATHLEN); + zfree(ZV_NAMEI, relpathptr); } if (REALpathptr) { - kfree(REALpathptr, MAXPATHLEN); + zfree(ZV_NAMEI, REALpathptr); } - if (ab.base != NULL && alloc_local_buf) { - FREE(ab.base, M_TEMP); + if (alloc_local_buf) { + kheap_free(KHEAP_TEMP, ab.base, ab.allocated); } return error; } @@ -3055,7 +3064,7 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, user_addr_t attributeBuffer, size_t bufferSize, uint64_t options, enum uio_seg segflg, char* authoritative_name, struct ucred *file_cred) { - struct vnode_attr va; + struct vnode_attr *va; kauth_action_t action; ssize_t fixedsize; char *va_name; @@ -3086,7 +3095,8 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, attributeBuffer, bufferSize); - VATTR_INIT(&va); + va = kheap_alloc(KHEAP_TEMP, sizeof(struct vnode_attr), Z_WAITOK); + VATTR_INIT(va); va_name = NULL; if (alp->bitmapcount != ATTR_BIT_MAP_COUNT) { @@ -3156,7 +3166,7 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, goto out; } /* Keep invalid attrs from being uninitialized */ - bzero(&va, sizeof(va)); + bzero(va, sizeof(*va)); } /* Pick up the vnode type. If the FS is bad and changes vnode types on us, we @@ -3167,7 +3177,7 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, /* * Set up the vnode_attr structure and authorise. */ - if ((error = getattrlist_setupvattr(alp, &va, &fixedsize, &action, proc_is64, (vtype == VDIR), use_fork)) != 0) { + if ((error = getattrlist_setupvattr(alp, va, &fixedsize, &action, proc_is64, (vtype == VDIR), use_fork)) != 0) { VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: setup for request failed"); goto out; } @@ -3177,20 +3187,14 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, } - if (va.va_active != 0) { - uint64_t va_active = va.va_active; + if (va->va_active != 0) { + uint64_t va_active = va->va_active; /* * If we're going to ask for va_name, allocate a buffer to point it at */ - if (VATTR_IS_ACTIVE(&va, va_name)) { - MALLOC_ZONE(va_name, char *, MAXPATHLEN, M_NAMEI, - M_WAITOK); - if (va_name == NULL) { - error = ENOMEM; - VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: cannot allocate va_name buffer"); - goto out; - } + if (VATTR_IS_ACTIVE(va, va_name)) { + va_name = zalloc(ZV_NAMEI); /* * If we have an authoritative_name, prefer that name. * @@ -3201,21 +3205,21 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, */ if (authoritative_name) { /* Don't ask the file system */ - VATTR_CLEAR_ACTIVE(&va, va_name); + VATTR_CLEAR_ACTIVE(va, va_name); strlcpy(va_name, authoritative_name, MAXPATHLEN); } } - va.va_name = authoritative_name ? NULL : va_name; + va->va_name = authoritative_name ? NULL : va_name; if (options & FSOPT_RETURN_REALDEV) { - va.va_vaflags |= VA_REALFSID; + va->va_vaflags |= VA_REALFSID; } /* * Call the filesystem. */ - if ((error = vnode_getattr(vp, &va, ctx)) != 0) { + if ((error = vnode_getattr(vp, va, ctx)) != 0) { VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: filesystem returned %d", error); goto out; } @@ -3230,7 +3234,7 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, * opportunity to change the values of attributes * retrieved. */ - error = mac_vnode_check_getattr(ctx, file_cred, vp, &va); + error = mac_vnode_check_getattr(ctx, file_cred, vp, va); if (error) { VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: MAC framework returned %d", error); goto out; @@ -3247,26 +3251,27 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, * A (buggy) filesystem may change fields which belong * to us. We try to deal with that here as well. */ - va.va_active = va_active; + va->va_active = va_active; if (authoritative_name && va_name) { - VATTR_SET_ACTIVE(&va, va_name); - if (!(VATTR_IS_SUPPORTED(&va, va_name))) { - VATTR_SET_SUPPORTED(&va, va_name); + VATTR_SET_ACTIVE(va, va_name); + if (!(VATTR_IS_SUPPORTED(va, va_name))) { + VATTR_SET_SUPPORTED(va, va_name); } } - va.va_name = va_name; + va->va_name = va_name; } - error = vfs_attr_pack_internal(vp->v_mount, vp, auio, alp, options, &va, NULL, ctx, + error = vfs_attr_pack_internal(vp->v_mount, vp, auio, alp, options, va, NULL, ctx, 0, vtype, fixedsize); out: if (va_name) { - FREE_ZONE(va_name, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, va_name); } - if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) { - kauth_acl_free(va.va_acl); + if (VATTR_IS_SUPPORTED(va, va_acl) && (va->va_acl != NULL)) { + kauth_acl_free(va->va_acl); } + kheap_free(KHEAP_TEMP, va, sizeof(struct vnode_attr)); VFS_DEBUG(ctx, vp, "ATTRLIST - returning %d", error); return error; @@ -3286,13 +3291,12 @@ fgetattrlist(proc_t p, struct fgetattrlist_args *uap, __unused int32_t *retval) fp = NULL; error = 0; - if ((error = file_vnode(uap->fd, &vp)) != 0) { + if ((error = fp_get_ftype(p, uap->fd, DTYPE_VNODE, EINVAL, &fp)) != 0) { return error; } + vp = (struct vnode *)fp->fp_glob->fg_data; - if ((error = fp_lookup(p, uap->fd, &fp, 0)) != 0 || - (error = vnode_getwithref(vp)) != 0) { - vp = NULL; + if ((error = vnode_getwithref(vp)) != 0) { goto out; } @@ -3301,7 +3305,7 @@ fgetattrlist(proc_t p, struct fgetattrlist_args *uap, __unused int32_t *retval) */ error = copyin(uap->alist, &al, sizeof(al)); if (error) { - goto out; + goto out_vnode_put; } /* Default to using the vnode's name. */ @@ -3309,16 +3313,12 @@ fgetattrlist(proc_t p, struct fgetattrlist_args *uap, __unused int32_t *retval) uap->bufferSize, uap->options, (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : \ UIO_USERSPACE32), NULL, - fp->f_fglob->fg_cred); + fp->fp_glob->fg_cred); +out_vnode_put: + vnode_put(vp); out: - if (fp) { - fp_drop(p, uap->fd, fp, 0); - } - if (vp) { - vnode_put(vp); - } - file_drop(uap->fd); + fp_drop(p, uap->fd, fp, 0); return error; } @@ -3438,7 +3438,7 @@ refill_fd_direntries(vfs_context_t ctx, vnode_t dvp, struct fd_vn_data *fvd, if (fvd->fv_eofflag) { *eofflagp = 1; if (fvd->fv_buf) { - FREE(fvd->fv_buf, M_FD_DIRBUF); + kheap_free(KHEAP_DATA_BUFFERS, fvd->fv_buf, fvd->fv_bufallocsiz); fvd->fv_buf = NULL; } return 0; @@ -3470,7 +3470,8 @@ retry_alloc: * not copied out to user space. */ if (!fvd->fv_buf) { - MALLOC(fvd->fv_buf, caddr_t, rdirbufsiz, M_FD_DIRBUF, M_WAITOK); + fvd->fv_buf = kheap_alloc(KHEAP_DATA_BUFFERS, rdirbufsiz, Z_WAITOK); + fvd->fv_bufallocsiz = rdirbufsiz; fvd->fv_bufdone = 0; } @@ -3498,8 +3499,6 @@ retry_alloc: fvd->fv_bufsiz = rdirbufused; fvd->fv_bufdone = 0; bzero(fvd->fv_buf + rdirbufused, rdirbufsiz - rdirbufused); - /* Cache allocation size the Filesystem responded to */ - fvd->fv_bufallocsiz = rdirbufsiz; } else if (!eofflag && (rdirbufsiz < FV_DIRBUF_MAX_SIZ)) { /* * Some Filesystems have higher requirements for the @@ -3511,8 +3510,7 @@ retry_alloc: * from VNOP_READDIR is ignored until at least FV_DIRBUF_MAX_SIZ * has been attempted. */ - FREE(fvd->fv_buf, M_FD_DIRBUF); - fvd->fv_buf = NULL; + kheap_free(KHEAP_DATA_BUFFERS, fvd->fv_buf, fvd->fv_bufallocsiz); rdirbufsiz = 2 * rdirbufsiz; fvd->fv_bufallocsiz = 0; goto retry_alloc; @@ -3539,8 +3537,10 @@ retry_alloc: * time to free up directory entry buffer. */ if ((error || eofflag) && fvd->fv_buf) { - FREE(fvd->fv_buf, M_FD_DIRBUF); - fvd->fv_buf = NULL; + kheap_free(KHEAP_DATA_BUFFERS, fvd->fv_buf, fvd->fv_bufallocsiz); + if (error) { + fvd->fv_bufallocsiz = 0; + } } *eofflagp = eofflag; @@ -3642,7 +3642,7 @@ get_error_attributes(vnode_t vp, struct attrlist *alp, uint64_t options, { size_t fsiz, vsiz; struct _attrlist_buf ab; - int namelen; + size_t namelen; kauth_action_t action; struct attrlist al; int needs_error_attr = (alp->commonattr & ATTR_CMN_ERROR); @@ -3689,7 +3689,7 @@ get_error_attributes(vnode_t vp, struct attrlist *alp, uint64_t options, ab.needed = fsiz + vsiz; /* Fill in the size needed */ - *((uint32_t *)ab.base) = ab.needed; + *((uint32_t *)ab.base) = (uint32_t)ab.needed; if (ab.needed > (ssize_t)kern_attr_buf_siz) { goto out; } @@ -3784,7 +3784,7 @@ readdirattr(vnode_t dvp, struct fd_vn_data *fvd, uio_t auio, return error; } - MALLOC(kern_attr_buf, caddr_t, kern_attr_buf_siz, M_TEMP, M_WAITOK); + kern_attr_buf = kheap_alloc(KHEAP_TEMP, kern_attr_buf_siz, Z_WAITOK); while (uio_resid(auio) > (user_ssize_t)MIN_BUF_SIZE_REQUIRED) { struct direntry *dp; @@ -3823,8 +3823,7 @@ readdirattr(vnode_t dvp, struct fd_vn_data *fvd, uio_t auio, */ if (dp->d_name[dp->d_namlen] != '\0') { if (!max_path_name_buf) { - MALLOC(max_path_name_buf, caddr_t, MAXPATHLEN, - M_TEMP, M_WAITOK); + max_path_name_buf = zalloc_flags(ZV_NAMEI, Z_WAITOK); } bcopy(dp->d_name, max_path_name_buf, dp->d_namlen); max_path_name_buf[dp->d_namlen] = '\0'; @@ -3937,7 +3936,7 @@ readdirattr(vnode_t dvp, struct fd_vn_data *fvd, uio_t auio, entlen += pad_bytes; } *((uint32_t *)kern_attr_buf) = (uint32_t)entlen; - error = uiomove(kern_attr_buf, min(entlen, kern_attr_buf_siz), + error = uiomove(kern_attr_buf, min((int)entlen, (int)kern_attr_buf_siz), auio); if (error) { @@ -3957,13 +3956,13 @@ readdirattr(vnode_t dvp, struct fd_vn_data *fvd, uio_t auio, } if (max_path_name_buf) { - FREE(max_path_name_buf, M_TEMP); + zfree(ZV_NAMEI, max_path_name_buf); } /* * At this point, kern_attr_buf is always allocated */ - FREE(kern_attr_buf, M_TEMP); + kheap_free(KHEAP_TEMP, kern_attr_buf, kern_attr_buf_siz); /* * Always set the offset to the last succesful offset @@ -3974,6 +3973,11 @@ readdirattr(vnode_t dvp, struct fd_vn_data *fvd, uio_t auio, return error; } +/* common attributes that only require KAUTH_VNODE_LIST_DIRECTORY */ +#define LIST_DIR_ATTRS (ATTR_CMN_NAME | ATTR_CMN_OBJTYPE | \ + ATTR_CMN_FILEID | ATTR_CMN_RETURNED_ATTRS | \ + ATTR_CMN_ERROR) + /* * int getattrlistbulk(int dirfd, struct attrlist *alist, void *attributeBuffer, * size_t bufferSize, uint64_t options) @@ -4015,7 +4019,7 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) ut = get_bsdthread_info(current_thread()); segflg = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; - if ((fp->f_fglob->fg_flag & FREAD) == 0) { + if ((fp->fp_glob->fg_flag & FREAD) == 0) { /* * AUDIT_ARG(vnpath_withref, dvp, ARG_VNODE1); */ @@ -4053,7 +4057,7 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) #if CONFIG_MACF error = mac_file_check_change_offset(vfs_context_ucred(ctx), - fp->f_fglob); + fp->fp_glob); if (error) { goto out; } @@ -4084,12 +4088,12 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) #endif /* MAC */ /* - * If the only item requested is file names, we can let that past with - * just LIST_DIRECTORY. If they want any other attributes, that means - * they need SEARCH as well. + * Requested attributes that are available in the direntry struct, with the addition + * of ATTR_CMN_RETURNED_ATTRS and ATTR_CMN_ERROR, can be let past with just LIST_DIRECTORY. + * Any other requested attributes require SEARCH as well. */ action = KAUTH_VNODE_LIST_DIRECTORY; - if ((al.commonattr & ~ATTR_CMN_NAME) || al.fileattr || al.dirattr) { + if ((al.commonattr & ~LIST_DIR_ATTRS) || al.fileattr || al.dirattr) { action |= KAUTH_VNODE_SEARCH; } @@ -4098,7 +4102,7 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) goto out; } - fvdata = (struct fd_vn_data *)fp->f_fglob->fg_vn_data; + fvdata = (struct fd_vn_data *)fp->fp_glob->fg_vn_data; if (!fvdata) { panic("Directory expected to have fg_vn_data"); } @@ -4111,12 +4115,10 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) * traversal needs to be restarted (Any existing state in the * directory buffer is removed as well). */ - if (!fp->f_fglob->fg_offset) { + if (!fp->fp_glob->fg_offset) { fvdata->fv_offset = 0; - if (fvdata->fv_buf) { - FREE(fvdata->fv_buf, M_FD_DIRBUF); - } - fvdata->fv_buf = NULL; + kheap_free(KHEAP_DATA_BUFFERS, fvdata->fv_buf, + fvdata->fv_bufallocsiz); fvdata->fv_bufsiz = 0; fvdata->fv_bufdone = 0; fvdata->fv_soff = 0; @@ -4141,7 +4143,7 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) !(al.commonattr & ATTR_CMN_OBJTYPE)) { error = ENOTSUP; } else { - struct vnode_attr va; + struct vnode_attr *va; char *va_name; if (fvdata->fv_eofflag && !fvdata->fv_buf) { @@ -4156,12 +4158,13 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) eofflag = 0; count = 0; - VATTR_INIT(&va); - MALLOC(va_name, char *, MAXPATHLEN, M_TEMP, - M_WAITOK | M_ZERO); - va.va_name = va_name; + va = kheap_alloc(KHEAP_TEMP, sizeof(struct vnode_attr), Z_WAITOK); + + VATTR_INIT(va); + va_name = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_ZERO); + va->va_name = va_name; - (void)getattrlist_setupvattr_all(&al, &va, VNON, NULL, + (void)getattrlist_setupvattr_all(&al, va, VNON, NULL, IS_64BIT_PROCESS(p), (uap->options & FSOPT_ATTR_CMN_EXTENDED)); /* @@ -4169,11 +4172,12 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) * filesystem to be rapidly aged. */ ut->uu_flag |= UT_KERN_RAGE_VNODES; - error = VNOP_GETATTRLISTBULK(dvp, &al, &va, auio, NULL, + error = VNOP_GETATTRLISTBULK(dvp, &al, va, auio, NULL, options, &eofflag, &count, ctx); ut->uu_flag &= ~UT_KERN_RAGE_VNODES; - FREE(va_name, M_TEMP); + zfree(ZV_NAMEI, va_name); + kheap_free(KHEAP_TEMP, va, sizeof(struct vnode_attr)); /* * cache state of eofflag. @@ -4200,7 +4204,7 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) if (count) { fvdata->fv_offset = uio_offset(auio); - fp->f_fglob->fg_offset = fvdata->fv_offset; + fp->fp_glob->fg_offset = fvdata->fv_offset; *retval = count; error = 0; } else if (!error && !eofflag) { @@ -4236,14 +4240,14 @@ attrlist_unpack_fixed(char **cursor, char *end, void *buf, ssize_t size) } #define ATTR_UNPACK(v) do {if ((error = attrlist_unpack_fixed(&cursor, bufend, &v, sizeof(v))) != 0) goto out;} while(0); -#define ATTR_UNPACK_CAST(t, v) do { t _f; ATTR_UNPACK(_f); v = _f;} while(0) +#define ATTR_UNPACK_CAST(t, v) do { t _f; ATTR_UNPACK(_f); v = (typeof(v))_f;} while(0) #define ATTR_UNPACK_TIME(v, is64) \ do { \ if (is64) { \ struct user64_timespec us; \ ATTR_UNPACK(us); \ - v.tv_sec = us.tv_sec; \ - v.tv_nsec = us.tv_nsec; \ + v.tv_sec = (unsigned long)us.tv_sec; \ + v.tv_nsec = (long)us.tv_nsec; \ } else { \ struct user32_timespec us; \ ATTR_UNPACK(us); \ @@ -4325,9 +4329,7 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con /* * If the caller's bitmaps indicate that there are no attributes to set, - * then exit early. In particular, we want to avoid the MALLOC below - * since the caller's bufferSize could be zero, and MALLOC of zero bytes - * returns a NULL pointer, which would cause setattrlist to return ENOMEM. + * then exit early. */ if (al.commonattr == 0 && (al.volattr & ~ATTR_VOL_INFO) == 0 && @@ -4352,7 +4354,7 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con error = ENOMEM; goto out; } - MALLOC(user_buf, char *, uap->bufferSize, M_TEMP, M_WAITOK); + user_buf = kheap_alloc(KHEAP_DATA_BUFFERS, uap->bufferSize, Z_WAITOK); if (user_buf == NULL) { VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: could not allocate %d bytes for buffer", uap->bufferSize); error = ENOMEM; @@ -4642,9 +4644,7 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con /* all done and successful */ out: - if (user_buf != NULL) { - FREE(user_buf, M_TEMP); - } + kheap_free(KHEAP_DATA_BUFFERS, user_buf, uap->bufferSize); VFS_DEBUG(ctx, vp, "ATTRLIST - set returning %d", error); return error; } @@ -4655,7 +4655,7 @@ setattrlist(proc_t p, struct setattrlist_args *uap, __unused int32_t *retval) struct vfs_context *ctx; struct nameidata nd; vnode_t vp = NULL; - u_long nameiflags; + uint32_t nameiflags; int error = 0; ctx = vfs_context_current(); diff --git a/bsd/vfs/vfs_bio.c b/bsd/vfs/vfs_bio.c index 5ce788691..6f3b17ae6 100644 --- a/bsd/vfs/vfs_bio.c +++ b/bsd/vfs/vfs_bio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -81,7 +81,7 @@ #include #include #include -#include +#include #include #include #include @@ -135,11 +135,10 @@ int bdwrite_internal(buf_t, int); extern void disk_conditioner_delay(buf_t, int, int, uint64_t); /* zone allocated buffer headers */ -static void bufzoneinit(void); static void bcleanbuf_thread_init(void); static void bcleanbuf_thread(void); -static zone_t buf_hdr_zone; +static ZONE_DECLARE(buf_hdr_zone, "buf headers", sizeof(struct buf), ZC_NONE); static int buf_hdr_count; @@ -175,7 +174,7 @@ static lck_mtx_t *iobuffer_mtxp; static lck_mtx_t *buf_mtxp; static lck_mtx_t *buf_gc_callout; -static int buf_busycount; +static uint32_t buf_busycount; #define FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE 16 typedef struct { @@ -190,7 +189,7 @@ buf_timestamp(void) { struct timeval t; microuptime(&t); - return t.tv_sec; + return (int)t.tv_sec; } /* @@ -444,31 +443,24 @@ bufattr_setcpx(__unused bufattr_t bap, __unused struct cpx *cpx) #endif /* !CONFIG_PROTECT */ bufattr_t -bufattr_alloc() +bufattr_alloc(void) { - bufattr_t bap; - MALLOC(bap, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK); - if (bap == NULL) { - return NULL; - } - - bzero(bap, sizeof(struct bufattr)); - return bap; + return kheap_alloc(KHEAP_DEFAULT, sizeof(struct bufattr), + Z_WAITOK | Z_ZERO); } void bufattr_free(bufattr_t bap) { - if (bap) { - FREE(bap, M_TEMP); - } + kheap_free(KHEAP_DEFAULT, bap, sizeof(struct bufattr)); } bufattr_t bufattr_dup(bufattr_t bap) { bufattr_t new_bufattr; - MALLOC(new_bufattr, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK); + new_bufattr = kheap_alloc(KHEAP_DEFAULT, sizeof(struct bufattr), + Z_WAITOK); if (new_bufattr == NULL) { return NULL; } @@ -527,17 +519,11 @@ bufattr_markmeta(bufattr_t bap) } int -#if !CONFIG_EMBEDDED bufattr_delayidlesleep(bufattr_t bap) -#else /* !CONFIG_EMBEDDED */ -bufattr_delayidlesleep(__unused bufattr_t bap) -#endif /* !CONFIG_EMBEDDED */ { -#if !CONFIG_EMBEDDED if ((bap->ba_flags & BA_DELAYIDLESLEEP)) { return 1; } -#endif /* !CONFIG_EMBEDDED */ return 0; } @@ -800,6 +786,7 @@ buf_t buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg) { buf_t io_bp; + int add1, add2; if (io_offset < 0 || io_size < 0) { return NULL; @@ -814,7 +801,10 @@ buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), v return NULL; } - if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount)) { + if (os_add_overflow(io_offset, io_size, &add1) || os_add_overflow(add1, bp->b_uploffset, &add2)) { + return NULL; + } + if ((add2 & PAGE_MASK) && ((uint32_t)add1 < (uint32_t)bp->b_bcount)) { return NULL; } } @@ -1272,9 +1262,9 @@ buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_b */ bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes); } else { - io_bp->b_bcount = io_contig_bytes; - io_bp->b_bufsize = io_contig_bytes; - io_bp->b_resid = io_contig_bytes; + io_bp->b_bcount = (uint32_t)io_contig_bytes; + io_bp->b_bufsize = (uint32_t)io_contig_bytes; + io_bp->b_resid = (uint32_t)io_contig_bytes; io_bp->b_blkno = io_blkno; buf_reset(io_bp, io_direction); @@ -1402,7 +1392,7 @@ buf_strategy(vnode_t devvp, void *ap) /* Set block number to force biodone later */ bp->b_blkno = -1; buf_clear(bp); - } else if ((long)contig_bytes < bp->b_bcount) { + } else if (contig_bytes < (size_t)bp->b_bcount) { return buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes); } } @@ -2160,9 +2150,6 @@ bufinit(void) printf("using %d buffer headers and %d cluster IO buffer headers\n", nbuf_headers, niobuf_headers); - /* Set up zones used by the buffer cache */ - bufzoneinit(); - /* start the bcleanbuf() thread */ bcleanbuf_thread_init(); @@ -2179,62 +2166,7 @@ bufinit(void) #define MINMETA 512 #define MAXMETA 16384 -struct meta_zone_entry { - zone_t mz_zone; - vm_size_t mz_size; - vm_size_t mz_max; - const char *mz_name; -}; - -struct meta_zone_entry meta_zones[] = { - {.mz_zone = NULL, .mz_size = (MINMETA * 1), .mz_max = 128 * (MINMETA * 1), .mz_name = "buf.512" }, - {.mz_zone = NULL, .mz_size = (MINMETA * 2), .mz_max = 64 * (MINMETA * 2), .mz_name = "buf.1024" }, - {.mz_zone = NULL, .mz_size = (MINMETA * 4), .mz_max = 16 * (MINMETA * 4), .mz_name = "buf.2048" }, - {.mz_zone = NULL, .mz_size = (MINMETA * 8), .mz_max = 512 * (MINMETA * 8), .mz_name = "buf.4096" }, - {.mz_zone = NULL, .mz_size = (MINMETA * 16), .mz_max = 512 * (MINMETA * 16), .mz_name = "buf.8192" }, - {.mz_zone = NULL, .mz_size = (MINMETA * 32), .mz_max = 512 * (MINMETA * 32), .mz_name = "buf.16384" }, - {.mz_zone = NULL, .mz_size = 0, .mz_max = 0, .mz_name = "" } /* End */ -}; - -/* - * Initialize the meta data zones - */ -static void -bufzoneinit(void) -{ - int i; - - for (i = 0; meta_zones[i].mz_size != 0; i++) { - meta_zones[i].mz_zone = - zinit(meta_zones[i].mz_size, - meta_zones[i].mz_max, - PAGE_SIZE, - meta_zones[i].mz_name); - zone_change(meta_zones[i].mz_zone, Z_CALLERACCT, FALSE); - } - buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers"); - zone_change(buf_hdr_zone, Z_CALLERACCT, FALSE); -} - -static __inline__ zone_t -getbufzone(size_t size) -{ - int i; - - if ((size % 512) || (size < MINMETA) || (size > MAXMETA)) { - panic("getbufzone: incorect size = %lu", size); - } - - for (i = 0; meta_zones[i].mz_size != 0; i++) { - if (meta_zones[i].mz_size >= size) { - break; - } - } - - return meta_zones[i].mz_zone; -} - - +KALLOC_HEAP_DEFINE(KHEAP_VFS_BIO, "vfs_bio", KHEAP_ID_DATA_BUFFERS); static struct buf * bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype) @@ -2575,17 +2507,23 @@ static void buf_free_meta_store(buf_t bp) { if (bp->b_bufsize) { - if (ISSET(bp->b_flags, B_ZALLOC)) { - zone_t z; - - z = getbufzone(bp->b_bufsize); - zfree(z, bp->b_datap); - } else { - kmem_free(kernel_map, bp->b_datap, bp->b_bufsize); - } + uintptr_t datap = bp->b_datap; + int bufsize = bp->b_bufsize; bp->b_datap = (uintptr_t)NULL; bp->b_bufsize = 0; + + /* + * Ensure the assignment of b_datap has global visibility + * before we free the region. + */ + OSMemoryBarrier(); + + if (ISSET(bp->b_flags, B_ZALLOC)) { + kheap_free(KHEAP_VFS_BIO, datap, bufsize); + } else { + kmem_free(kernel_map, datap, bufsize); + } } } @@ -2708,7 +2646,7 @@ void buf_brelse(buf_t bp) { struct bqueues *bufq; - long whichq; + int whichq; upl_t upl; int need_wakeup = 0; int need_bp_wakeup = 0; @@ -3164,7 +3102,7 @@ start: * in cases where the data on disk beyond (blkno + b_bcount) * is invalid, we may end up doing extra I/O. */ - if (operation == BLK_META && bp->b_bcount < size) { + if (operation == BLK_META && bp->b_bcount < (uint32_t)size) { /* * Since we are going to read in the whole size first * we first have to ensure that any pending delayed write @@ -3183,7 +3121,7 @@ start: clear_bdone = TRUE; } - if (bp->b_bufsize != size) { + if (bp->b_bufsize != (uint32_t)size) { allocbuf(bp, size); } } @@ -3197,6 +3135,7 @@ start: * cache pages we're gathering. */ upl_flags |= UPL_WILL_MODIFY; + OS_FALLTHROUGH; case BLK_READ: upl_flags |= UPL_PRECIOUS; if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { @@ -3328,6 +3267,7 @@ start: * we're gathering. */ upl_flags |= UPL_WILL_MODIFY; + OS_FALLTHROUGH; case BLK_READ: { off_t f_offset; size_t contig_bytes; @@ -3413,7 +3353,7 @@ start: * disk, than we can't cache the physical mapping * in the buffer header */ - if ((long)contig_bytes < bp->b_bcount) { + if ((uint32_t)contig_bytes < bp->b_bcount) { bp->b_blkno = bp->b_lblkno; } } else { @@ -3502,7 +3442,7 @@ recycle_buf_from_pool(int nsize) lck_mtx_lock_spin(buf_mtxp); TAILQ_FOREACH(bp, &bufqueues[BQ_META], b_freelist) { - if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != nsize) { + if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != (uint32_t)nsize) { continue; } ptr = (void *)bp->b_datap; @@ -3524,11 +3464,9 @@ int recycle_buf_failed = 0; static void * grab_memory_for_meta_buf(int nsize) { - zone_t z; void *ptr; boolean_t was_vmpriv; - z = getbufzone(nsize); /* * make sure we're NOT priviliged so that @@ -3539,7 +3477,7 @@ grab_memory_for_meta_buf(int nsize) */ was_vmpriv = set_vm_privilege(FALSE); - ptr = zalloc_nopagewait(z); + ptr = kheap_alloc(KHEAP_VFS_BIO, nsize, Z_NOPAGEWAIT); if (was_vmpriv == TRUE) { set_vm_privilege(TRUE); @@ -3557,7 +3495,7 @@ grab_memory_for_meta_buf(int nsize) set_vm_privilege(TRUE); } - ptr = zalloc(z); + ptr = kheap_alloc(KHEAP_VFS_BIO, nsize, Z_WAITOK); if (was_vmpriv == FALSE) { set_vm_privilege(FALSE); @@ -3597,15 +3535,12 @@ allocbuf(buf_t bp, int size) int nsize = roundup(size, MINMETA); if (bp->b_datap) { - vm_offset_t elem = (vm_offset_t)bp->b_datap; + void *elem = (void *)bp->b_datap; if (ISSET(bp->b_flags, B_ZALLOC)) { - if (bp->b_bufsize < nsize) { - zone_t zprev; - + if (bp->b_bufsize < (uint32_t)nsize) { /* reallocate to a bigger size */ - zprev = getbufzone(bp->b_bufsize); if (nsize <= MAXMETA) { desired_size = nsize; @@ -3616,8 +3551,8 @@ allocbuf(buf_t bp, int size) kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); CLR(bp->b_flags, B_ZALLOC); } - bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize); - zfree(zprev, elem); + bcopy(elem, (caddr_t)bp->b_datap, bp->b_bufsize); + kheap_free(KHEAP_VFS_BIO, elem, bp->b_bufsize); } else { desired_size = bp->b_bufsize; } @@ -3626,8 +3561,8 @@ allocbuf(buf_t bp, int size) /* reallocate to a bigger size */ bp->b_datap = (uintptr_t)NULL; kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); - bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize); - kmem_free(kernel_map, elem, bp->b_bufsize); + bcopy(elem, (caddr_t)bp->b_datap, bp->b_bufsize); + kmem_free(kernel_map, (vm_offset_t)elem, bp->b_bufsize); } else { desired_size = bp->b_bufsize; } @@ -3649,7 +3584,7 @@ allocbuf(buf_t bp, int size) panic("allocbuf: NULL b_datap"); } } - bp->b_bufsize = desired_size; + bp->b_bufsize = (uint32_t)desired_size; bp->b_bcount = size; return 0; @@ -4375,7 +4310,7 @@ count_lock_queue(void) * Return a count of 'busy' buffers. Used at the time of shutdown. * note: This is also called from the mach side in debug context in kdp.c */ -int +uint32_t count_busy_buffers(void) { return buf_busycount + bufstats.bufs_iobufinuse; @@ -4459,7 +4394,7 @@ alloc_io_buf(vnode_t vp, int priv) } } - while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) || + while ((((uint32_t)(niobuf_headers - NRESERVEDIOBUFS) < bufstats.bufs_iobufinuse) && !priv) || (bp = iobufqueue.tqh_first) == NULL) { bufstats.bufs_iobufsleeps++; @@ -4725,7 +4660,7 @@ brecover_data(buf_t bp) panic("Failed to create UPL"); } - for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) { + for (upl_offset = 0; (uint32_t)upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) { if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) { ubc_upl_abort(upl, 0); goto dump_buffer; diff --git a/bsd/vfs/vfs_cache.c b/bsd/vfs/vfs_cache.c index e6a802323..9d4ed9a6d 100644 --- a/bsd/vfs/vfs_cache.c +++ b/bsd/vfs/vfs_cache.c @@ -78,7 +78,7 @@ #include #include #include -#include +#include #include #include #include @@ -109,6 +109,8 @@ * Structures associated with name cacheing. */ +ZONE_DECLARE(namecache_zone, "namecache", sizeof(struct namecache), ZC_NONE); + LIST_HEAD(nchashhead, namecache) * nchashtbl; /* Hash Table */ u_long nchashmask; u_long nchash; /* size of hash table - 1 */ @@ -151,8 +153,13 @@ lck_grp_t * strcache_lck_grp; lck_grp_attr_t * strcache_lck_grp_attr; lck_attr_t * strcache_lck_attr; +lck_grp_t * rootvnode_lck_grp; +lck_grp_attr_t * rootvnode_lck_grp_attr; +lck_attr_t * rootvnode_lck_attr; + lck_rw_t * namecache_rw_lock; lck_rw_t * strtable_rw_lock; +lck_rw_t * rootvnode_rw_lock; #define NUM_STRCACHE_LOCKS 1024 @@ -437,7 +444,7 @@ build_path_with_parent(vnode_t first_vp, vnode_t parent_vp, char *buff, int bufl char *end; char *mntpt_end; const char *str; - int len; + unsigned int len; int ret = 0; int fixhardlink; @@ -466,6 +473,16 @@ again: *end = '\0'; mntpt_end = NULL; + /* + * Catch a special corner case here: chroot to /full/path/to/dir, chdir to + * it, then open it. Without this check, the path to it will be + * /full/path/to/dir instead of "/". + */ + if (proc_root_dir_vp == first_vp) { + *--end = '/'; + goto out; + } + /* * holding the NAME_CACHE_LOCK in shared mode is * sufficient to stabilize both the vp->v_parent chain @@ -553,11 +570,11 @@ again: } goto out_unlock; } - len = strlen(str); + len = (unsigned int)strlen(str); /* * Check that there's enough space (including space for the '/') */ - if ((end - buff) < (len + 1)) { + if ((unsigned int)(end - buff) < (len + 1)) { ret = ENOSPC; goto out_unlock; } @@ -641,7 +658,7 @@ again: if (fixhardlink) { VATTR_WANTED(&va, va_name); - MALLOC_ZONE(va.va_name, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); + va.va_name = zalloc(ZV_NAMEI); } else { va.va_name = NULL; } @@ -653,7 +670,7 @@ again: if (fixhardlink) { if ((ret == 0) && (VATTR_IS_SUPPORTED(&va, va_name))) { str = va.va_name; - vnode_update_identity(vp, NULL, str, strlen(str), 0, VNODE_UPDATE_NAME); + vnode_update_identity(vp, NULL, str, (unsigned int)strlen(str), 0, VNODE_UPDATE_NAME); } else if (vp->v_name) { str = vp->v_name; ret = 0; @@ -661,12 +678,12 @@ again: ret = ENOENT; goto bad_news; } - len = strlen(str); + len = (unsigned int)strlen(str); /* * Check that there's enough space. */ - if ((end - buff) < (len + 1)) { + if ((unsigned int)(end - buff) < (len + 1)) { ret = ENOSPC; } else { /* Copy the name backwards. */ @@ -681,7 +698,7 @@ again: *--end = '/'; } bad_news: - FREE_ZONE(va.va_name, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, va.va_name); } if (ret || !VATTR_IS_SUPPORTED(&va, va_parentid)) { ret = ENOENT; @@ -857,7 +874,7 @@ vnode_getname(vnode_t vp) NAME_CACHE_LOCK_SHARED(); if (vp->v_name) { - name = vfs_addname(vp->v_name, strlen(vp->v_name), 0, 0); + name = vfs_addname(vp->v_name, (unsigned int)strlen(vp->v_name), 0, 0); } NAME_CACHE_UNLOCK(); @@ -899,7 +916,7 @@ vnode_getname_printable(vnode_t vp) * and returns it. */ NAME_CACHE_LOCK_SHARED(); - name = vfs_addname(dev_name, strlen(dev_name), 0, 0); + name = vfs_addname(dev_name, (unsigned int)strlen(dev_name), 0, 0); NAME_CACHE_UNLOCK(); return name; } @@ -942,6 +959,10 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, u const char *vname = NULL; const char *tname = NULL; + if (name_len < 0) { + return; + } + if (flags & VNODE_UPDATE_PARENT) { if (dvp && vnode_ref(dvp) != 0) { dvp = NULLVP; @@ -960,7 +981,7 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, u if (name != vp->v_name) { if (name && *name) { if (name_len == 0) { - name_len = strlen(name); + name_len = (int)strlen(name); } tname = vfs_addname(name, name_len, name_hashval, 0); } @@ -1465,7 +1486,7 @@ vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t acti * authorized actions if the TTL is active and * it has expired */ - vp->v_cred_timestamp = tv.tv_sec; + vp->v_cred_timestamp = (int)tv.tv_sec; } vp->v_authorized_actions |= action; @@ -1554,7 +1575,7 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, hash = 1; } cnp->cn_hash = hash; - cnp->cn_namelen = cp - cnp->cn_nameptr; + cnp->cn_namelen = (int)(cp - cnp->cn_nameptr); ndp->ni_pathlen -= cnp->cn_namelen; ndp->ni_next = cp; @@ -1662,6 +1683,18 @@ skiprsrcfork: *dp_authorized = 1; if ((cnp->cn_flags & (ISLASTCN | ISDOTDOT))) { + /* + * Moving the firmlinks section to be first to catch a corner case: + * When using DOTDOT to get a parent of a firmlink, we want the + * firmlink source to be resolved even if cn_nameiop != LOOKUP. + * This is because lookup() traverses DOTDOT by calling VNOP_LOOKUP + * and has no notion about firmlinks + */ +#if CONFIG_FIRMLINKS + if (cnp->cn_flags & ISDOTDOT && dp->v_fmlink && (dp->v_flag & VFMLINKTARGET)) { + dp = dp->v_fmlink; + } +#endif if (cnp->cn_nameiop != LOOKUP) { break; } @@ -1671,13 +1704,8 @@ skiprsrcfork: if (cnp->cn_flags & NOCACHE) { break; } - if (cnp->cn_flags & ISDOTDOT) { -#if CONFIG_FIRMLINKS - if (dp->v_fmlink && (dp->v_flag & VFMLINKTARGET)) { - dp = dp->v_fmlink; - } -#endif + if (cnp->cn_flags & ISDOTDOT) { /* * Force directory hardlinks to go to * file system for ".." requests. @@ -2223,7 +2251,7 @@ cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cn /* * Allocate one more entry */ - ncp = (struct namecache *)_MALLOC_ZONE(sizeof(*ncp), M_CACHE, M_WAITOK); + ncp = zalloc(namecache_zone); numcache++; } else { /* @@ -2265,7 +2293,7 @@ cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cn // FSEvents doesn't always decompose diacritical unicode chars in the paths of the changed directories // const char *vn_name = vp ? vp->v_name : NULL; - unsigned int len = vn_name ? strlen(vn_name) : 0; + unsigned int len = vn_name ? (unsigned int)strlen(vn_name) : 0; if (vn_name && ncp && ncp->nc_name && strncmp(ncp->nc_name, vn_name, len) != 0) { unsigned int hash = hash_string(vn_name, len); @@ -2414,6 +2442,17 @@ nchinit(void) for (i = 0; i < NUM_STRCACHE_LOCKS; i++) { lck_mtx_init(&strcache_mtx_locks[i], strcache_lck_grp, strcache_lck_attr); } + + /* Allocate root vnode lock group attribute and group */ + rootvnode_lck_grp_attr = lck_grp_attr_alloc_init(); + + rootvnode_lck_grp = lck_grp_alloc_init("rootvnode", rootvnode_lck_grp_attr); + + /* Allocate rootvnode lock attribute */ + rootvnode_lck_attr = lck_attr_alloc_init(); + + /* Allocate rootvnode lock */ + rootvnode_rw_lock = lck_rw_alloc_init(rootvnode_lck_grp, rootvnode_lck_attr); } void @@ -2528,7 +2567,7 @@ cache_delete(struct namecache *ncp, int free_entry) ncp->nc_name = NULL; if (free_entry) { TAILQ_REMOVE(&nchead, ncp, nc_entry); - FREE_ZONE(ncp, sizeof(*ncp), M_CACHE); + zfree(namecache_zone, ncp); numcache--; } } @@ -2685,7 +2724,8 @@ resize_string_ref_table(void) lck_rw_done(strtable_rw_lock); return; } - new_table = hashinit((string_table_mask + 1) * 2, M_CACHE, &new_mask); + assert(string_table_mask < INT32_MAX); + new_table = hashinit((int)(string_table_mask + 1) * 2, M_CACHE, &new_mask); if (new_table == NULL) { printf("failed to resize the hash table.\n"); @@ -2796,7 +2836,7 @@ add_name_internal(const char *name, uint32_t len, u_int hashval, boolean_t need_ /* * it wasn't already there so add it. */ - MALLOC(entry, string_t *, sizeof(string_t) + len + 1, M_TEMP, M_WAITOK); + entry = kheap_alloc(KHEAP_DEFAULT, sizeof(string_t) + len + 1, Z_WAITOK); if (head->lh_first == NULL) { OSAddAtomic(1, &filled_buckets); @@ -2869,9 +2909,7 @@ vfs_removename(const char *nameref) lck_mtx_unlock(&strcache_mtx_locks[lock_index]); lck_rw_done(strtable_rw_lock); - if (entry != NULL) { - FREE(entry, M_TEMP); - } + kheap_free_addr(KHEAP_DEFAULT, entry); return retval; } diff --git a/bsd/vfs/vfs_cluster.c b/bsd/vfs/vfs_cluster.c index 181614fcb..5de58feb9 100644 --- a/bsd/vfs/vfs_cluster.c +++ b/bsd/vfs/vfs_cluster.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2014 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -67,7 +67,7 @@ #include #include #include -#include +#include #include #include #include @@ -157,12 +157,15 @@ struct cl_direct_read_lock { static LIST_HEAD(cl_direct_read_locks, cl_direct_read_lock) cl_direct_read_locks[CL_DIRECT_READ_LOCK_BUCKETS]; -static lck_spin_t cl_direct_read_spin_lock; +static LCK_GRP_DECLARE(cl_mtx_grp, "cluster I/O"); +static LCK_MTX_DECLARE(cl_transaction_mtxp, &cl_mtx_grp); +static LCK_SPIN_DECLARE(cl_direct_read_spin_lock, &cl_mtx_grp); -static lck_grp_t *cl_mtx_grp; -static lck_attr_t *cl_mtx_attr; -static lck_grp_attr_t *cl_mtx_grp_attr; -static lck_mtx_t *cl_transaction_mtxp; +static ZONE_DECLARE(cl_rd_zone, "cluster_read", + sizeof(struct cl_readahead), ZC_ZFREE_CLEARMEM | ZC_NOENCRYPT); + +static ZONE_DECLARE(cl_wr_zone, "cluster_write", + sizeof(struct cl_writebehind), ZC_ZFREE_CLEARMEM | ZC_NOENCRYPT); #define IO_UNKNOWN 0 #define IO_DIRECT 1 @@ -194,18 +197,18 @@ static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int t static int cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference); static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, - int (*)(buf_t, void *), void *callback_arg); + int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline)); static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length, - int flags, int (*)(buf_t, void *), void *callback_arg); + int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline)); static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length, - int (*)(buf_t, void *), void *callback_arg, int flags); + int (*)(buf_t, void *), void *callback_arg, int flags) __attribute__((noinline)); static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, - off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg); + off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline)); static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, - int *write_type, u_int32_t *write_length, int flags, int (*)(buf_t, void *), void *callback_arg); + int *write_type, u_int32_t *write_length, int flags, int (*)(buf_t, void *), void *callback_arg) __attribute__((noinline)); static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, - int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag); + int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag) __attribute__((noinline)); static void cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes, boolean_t *first_pass, off_t write_off, int write_cnt, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); @@ -276,17 +279,17 @@ int (*bootcache_contains_block)(dev_t device, u_int64_t blkno) = NULL; #define WRITE_BEHIND 1 #define WRITE_BEHIND_SSD 1 -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) #define PREFETCH 1 #define PREFETCH_SSD 1 uint32_t speculative_prefetch_max = (2048 * 1024); /* maximum bytes in a specluative read-ahead */ uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead */ -#else +#else /* XNU_TARGET_OS_OSX */ #define PREFETCH 3 #define PREFETCH_SSD 2 uint32_t speculative_prefetch_max = (MAX_UPL_SIZE_BYTES * 3); /* maximum bytes in a specluative read-ahead */ uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead on SSDs*/ -#endif +#endif /* ! XNU_TARGET_OS_OSX */ #define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * (base)) @@ -312,25 +315,6 @@ SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_max_iosize, CTLFLAG_RW | CTLFLAG_LO void cluster_init(void) { - /* - * allocate lock group attribute and group - */ - cl_mtx_grp_attr = lck_grp_attr_alloc_init(); - cl_mtx_grp = lck_grp_alloc_init("cluster I/O", cl_mtx_grp_attr); - - /* - * allocate the lock attribute - */ - cl_mtx_attr = lck_attr_alloc_init(); - - cl_transaction_mtxp = lck_mtx_alloc_init(cl_mtx_grp, cl_mtx_attr); - - if (cl_transaction_mtxp == NULL) { - panic("cluster_init: failed to allocate cl_transaction_mtxp"); - } - - lck_spin_init(&cl_direct_read_spin_lock, cl_mtx_grp, cl_mtx_attr); - for (int i = 0; i < CL_DIRECT_READ_LOCK_BUCKETS; ++i) { LIST_INIT(&cl_direct_read_locks[i]); } @@ -414,19 +398,17 @@ cluster_get_rap(vnode_t vp) ubc = vp->v_ubcinfo; if ((rap = ubc->cl_rahead) == NULL) { - MALLOC_ZONE(rap, struct cl_readahead *, sizeof *rap, M_CLRDAHEAD, M_WAITOK); - - bzero(rap, sizeof *rap); + rap = zalloc_flags(cl_rd_zone, Z_WAITOK | Z_ZERO); rap->cl_lastr = -1; - lck_mtx_init(&rap->cl_lockr, cl_mtx_grp, cl_mtx_attr); + lck_mtx_init(&rap->cl_lockr, &cl_mtx_grp, LCK_ATTR_NULL); vnode_lock(vp); if (ubc->cl_rahead == NULL) { ubc->cl_rahead = rap; } else { - lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp); - FREE_ZONE(rap, sizeof *rap, M_CLRDAHEAD); + lck_mtx_destroy(&rap->cl_lockr, &cl_mtx_grp); + zfree(cl_rd_zone, rap); rap = ubc->cl_rahead; } vnode_unlock(vp); @@ -465,18 +447,17 @@ cluster_get_wbp(vnode_t vp, int flags) return (struct cl_writebehind *)NULL; } - MALLOC_ZONE(wbp, struct cl_writebehind *, sizeof *wbp, M_CLWRBEHIND, M_WAITOK); + wbp = zalloc_flags(cl_wr_zone, Z_WAITOK | Z_ZERO); - bzero(wbp, sizeof *wbp); - lck_mtx_init(&wbp->cl_lockw, cl_mtx_grp, cl_mtx_attr); + lck_mtx_init(&wbp->cl_lockw, &cl_mtx_grp, LCK_ATTR_NULL); vnode_lock(vp); if (ubc->cl_wbehind == NULL) { ubc->cl_wbehind = wbp; } else { - lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp); - FREE_ZONE(wbp, sizeof *wbp, M_CLWRBEHIND); + lck_mtx_destroy(&wbp->cl_lockw, &cl_mtx_grp); + zfree(cl_wr_zone, wbp); wbp = ubc->cl_wbehind; } vnode_unlock(vp); @@ -767,7 +748,7 @@ cluster_iodone(buf_t bp, void *callback_arg) cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0); if (cbp_head->b_trans_next || !(cbp_head->b_flags & B_EOT)) { - lck_mtx_lock_spin(cl_transaction_mtxp); + lck_mtx_lock_spin(&cl_transaction_mtxp); bp->b_flags |= B_TDONE; @@ -780,7 +761,7 @@ cluster_iodone(buf_t bp, void *callback_arg) KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END, cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0); - lck_mtx_unlock(cl_transaction_mtxp); + lck_mtx_unlock(&cl_transaction_mtxp); return 0; } @@ -789,7 +770,7 @@ cluster_iodone(buf_t bp, void *callback_arg) KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END, cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0); - lck_mtx_unlock(cl_transaction_mtxp); + lck_mtx_unlock(&cl_transaction_mtxp); wakeup(cbp); return 0; @@ -799,7 +780,7 @@ cluster_iodone(buf_t bp, void *callback_arg) transaction_complete = TRUE; } } - lck_mtx_unlock(cl_transaction_mtxp); + lck_mtx_unlock(&cl_transaction_mtxp); if (transaction_complete == FALSE) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END, @@ -1024,7 +1005,7 @@ cluster_wait_IO(buf_t cbp_head, int async) bool done = true; buf_t last = NULL; - lck_mtx_lock_spin(cl_transaction_mtxp); + lck_mtx_lock_spin(&cl_transaction_mtxp); for (cbp = cbp_head; cbp; last = cbp, cbp = cbp->b_trans_next) { if (!ISSET(cbp->b_flags, B_TDONE)) { @@ -1037,7 +1018,7 @@ cluster_wait_IO(buf_t cbp_head, int async) DTRACE_IO1(wait__start, buf_t, last); do { - msleep(last, cl_transaction_mtxp, PSPIN | (PRIBIO + 1), "cluster_wait_IO", NULL); + msleep(last, &cl_transaction_mtxp, PSPIN | (PRIBIO + 1), "cluster_wait_IO", NULL); /* * We should only have been woken up if all the @@ -1056,7 +1037,7 @@ cluster_wait_IO(buf_t cbp_head, int async) last->b_trans_next = NULL; } - lck_mtx_unlock(cl_transaction_mtxp); + lck_mtx_unlock(&cl_transaction_mtxp); } else { // !async for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) { buf_biowait(cbp); @@ -1299,7 +1280,7 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no * so we'll go ahead and zero out the portion of the page we can't * read in from the file */ - zero_offset = upl_offset + non_rounded_size; + zero_offset = (int)(upl_offset + non_rounded_size); } else if (!ISSET(flags, CL_READ) && ISSET(flags, CL_DIRECT_IO)) { assert(ISSET(flags, CL_COMMIT)); @@ -1430,7 +1411,7 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no if (cbp_head) { buf_t prev_cbp; - int bytes_in_last_page; + uint32_t bytes_in_last_page; /* * first we have to wait for the the current outstanding I/Os @@ -1510,11 +1491,11 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no trans_count = 0; } } - if (vnode_pageout(vp, upl, trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) { + if (vnode_pageout(vp, upl, (upl_offset_t)trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) { error = EINVAL; } e_offset = round_page_64(f_offset + 1); - io_size = e_offset - f_offset; + io_size = (u_int)(e_offset - f_offset); f_offset += io_size; upl_offset += io_size; @@ -1588,7 +1569,7 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no */ bytes_to_zero = non_rounded_size; if (!(flags & CL_NOZERO)) { - bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset; + bytes_to_zero = (int)((((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset); } zero_offset = 0; @@ -1598,7 +1579,7 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no pg_count = 0; - cluster_zero(upl, upl_offset, bytes_to_zero, real_bp); + cluster_zero(upl, (upl_offset_t)upl_offset, bytes_to_zero, real_bp); if (cbp_head) { int pg_resid; @@ -1611,7 +1592,7 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no */ commit_offset = (upl_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK; - pg_resid = commit_offset - upl_offset; + pg_resid = (int)(commit_offset - upl_offset); if (bytes_to_zero >= pg_resid) { /* @@ -1654,7 +1635,8 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no assert(!upl_associated_upl(upl)); if ((flags & CL_COMMIT) && pg_count) { - ubc_upl_commit_range(upl, commit_offset, pg_count * PAGE_SIZE, + ubc_upl_commit_range(upl, (upl_offset_t)commit_offset, + pg_count * PAGE_SIZE, UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY); } upl_offset += io_size; @@ -1723,7 +1705,7 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no * we can finally issue the i/o on the transaction. */ if (aligned_ofs > upl_offset) { - io_size = aligned_ofs - upl_offset; + io_size = (u_int)(aligned_ofs - upl_offset); pg_count--; } } @@ -1736,9 +1718,13 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no * bufs from the alloc_io_buf pool */ priv = 1; - } else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT)) { + } else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT) && !cbp_head) { /* * Throttle the speculative IO + * + * We can only throttle this if it is the first iobuf + * for the transaction. alloc_io_buf implements + * additional restrictions for diskimages anyway. */ priv = 0; } else { @@ -1775,7 +1761,7 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no cbp->b_blkno = blkno; cbp->b_bcount = io_size; - if (buf_setupl(cbp, upl, upl_offset)) { + if (buf_setupl(cbp, upl, (uint32_t)upl_offset)) { panic("buf_setupl failed\n"); } #if CONFIG_IOSCHED @@ -1909,8 +1895,9 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no } if (ISSET(flags, CL_COMMIT)) { - cluster_handle_associated_upl(iostate, upl, upl_offset, - upl_end_offset - upl_offset); + cluster_handle_associated_upl(iostate, upl, + (upl_offset_t)upl_offset, + (upl_size_t)(upl_end_offset - upl_offset)); } // Free all the IO buffers in this transaction @@ -1959,9 +1946,10 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no int upl_flags; pg_offset = upl_offset & PAGE_MASK; - abort_size = (upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK; + abort_size = (int)((upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK); - upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, abort_size, error, io_flags, vp); + upl_flags = cluster_ioerror(upl, (int)(upl_offset - pg_offset), + abort_size, error, io_flags, vp); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE, upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0); @@ -2023,7 +2011,7 @@ cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, in return 0; } if ((off_t)size > (filesize - f_offset)) { - size = filesize - f_offset; + size = (u_int)(filesize - f_offset); } pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE; @@ -2082,7 +2070,7 @@ cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct return; } } - r_addr = max(extent->e_addr, rap->cl_maxra) + 1; + r_addr = MAX(extent->e_addr, rap->cl_maxra) + 1; f_offset = (off_t)(r_addr * PAGE_SIZE_64); size_of_prefetch = 0; @@ -2105,7 +2093,7 @@ cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct if (read_size > max_prefetch / PAGE_SIZE) { rap->cl_ralen = max_prefetch / PAGE_SIZE; } else { - rap->cl_ralen = read_size; + rap->cl_ralen = (int)read_size; } } size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag); @@ -2188,7 +2176,7 @@ cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offs if (size < max_size) { io_size = size; } else { - io_size = max_size; + io_size = (int)max_size; } rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; @@ -2261,7 +2249,7 @@ cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offse if (size < max_size) { io_size = size; } else { - io_size = max_size; + io_size = (int)max_size; } rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; @@ -2504,6 +2492,8 @@ cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, in KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START, (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0); + assert(vm_map_page_shift(current_map()) >= PAGE_SHIFT); + max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE); io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO; @@ -2525,7 +2515,7 @@ cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, in iostate.io_error = 0; iostate.io_wanted = 0; - lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr); + lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL); mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask; devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize; @@ -2634,12 +2624,12 @@ next_dwrite: vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map; for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) { pages_in_pl = 0; - upl_size = upl_needed_size; + upl_size = (upl_size_t)upl_needed_size; upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; kret = vm_map_get_upl(map, - (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), + vm_map_trunc_page(iov_base, vm_map_page_mask(map)), &upl_size, &upl, NULL, @@ -2832,7 +2822,7 @@ wait_for_dwrites: retval = iostate.io_error; } - lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp); + lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp); if (io_throttled == TRUE && retval == 0) { retval = EAGAIN; @@ -2901,7 +2891,7 @@ cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, iostate.io_error = 0; iostate.io_wanted = 0; - lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr); + lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL); next_cwrite: io_size = *write_length; @@ -2912,13 +2902,13 @@ next_cwrite: upl_needed_size = upl_offset + io_size; pages_in_pl = 0; - upl_size = upl_needed_size; + upl_size = (upl_size_t)upl_needed_size; upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map; kret = vm_map_get_upl(map, - (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), + vm_map_trunc_page(iov_base, vm_map_page_mask(map)), &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0); if (kret != KERN_SUCCESS) { @@ -3045,7 +3035,7 @@ wait_for_cwrites: error = iostate.io_error; } - lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp); + lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp); if (error == 0 && tail_size) { error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg); @@ -3497,7 +3487,7 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old } if (uio) { write_off = uio->uio_offset; - write_cnt = uio_resid(uio); + write_cnt = (int)uio_resid(uio); /* * delay updating the sequential write info * in the control block until we've obtained @@ -3536,7 +3526,7 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old if ((start_offset + total_size) > max_io_size) { total_size = max_io_size - start_offset; } - xfer_resid = total_size; + xfer_resid = (int)total_size; retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1); @@ -3580,11 +3570,11 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old upl_size = max_io_size; } - pages_in_upl = upl_size / PAGE_SIZE; - io_size = upl_size - start_offset; + pages_in_upl = (int)(upl_size / PAGE_SIZE); + io_size = (int)(upl_size - start_offset); if ((long long)io_size > total_size) { - io_size = total_size; + io_size = (int)total_size; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0); @@ -3597,7 +3587,7 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old */ kret = ubc_create_upl_kernel(vp, upl_f_offset, - upl_size, + (int)upl_size, &upl, &pl, UPL_SET_LITE | ((uio != NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY), @@ -3620,7 +3610,7 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old read_size = PAGE_SIZE; if ((upl_f_offset + read_size) > oldEOF) { - read_size = oldEOF - upl_f_offset; + read_size = (int)(oldEOF - upl_f_offset); } retval = cluster_io(vp, upl, 0, upl_f_offset, read_size, @@ -3635,7 +3625,8 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); if (upl_size > PAGE_SIZE) { - ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); + ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size, + UPL_ABORT_FREE_ON_EMPTY); } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE, @@ -3652,13 +3643,13 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old upl_offset = upl_size - PAGE_SIZE; if ((upl_f_offset + start_offset + io_size) < oldEOF && - !upl_valid_page(pl, upl_offset / PAGE_SIZE)) { + !upl_valid_page(pl, (int)(upl_offset / PAGE_SIZE))) { int read_size; read_size = PAGE_SIZE; if ((off_t)(upl_f_offset + upl_offset + read_size) > oldEOF) { - read_size = oldEOF - (upl_f_offset + upl_offset); + read_size = (int)(oldEOF - (upl_f_offset + upl_offset)); } retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size, @@ -3670,10 +3661,10 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old * need to release the rest of the pages in the upl without * modifying there state and mark the failed page in error */ - ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); + ubc_upl_abort_range(upl, (upl_offset_t)upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); if (upl_size > PAGE_SIZE) { - ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); + ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size, UPL_ABORT_FREE_ON_EMPTY); } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE, @@ -3687,7 +3678,7 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old while (zero_cnt && xfer_resid) { if (zero_cnt < (long long)xfer_resid) { - bytes_to_zero = zero_cnt; + bytes_to_zero = (int)zero_cnt; } else { bytes_to_zero = xfer_resid; } @@ -3708,7 +3699,7 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested); if (retval) { - ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); + ubc_upl_abort_range(upl, 0, (upl_size_t)upl_size, UPL_ABORT_FREE_ON_EMPTY); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE, upl, 0, 0, retval, 0); @@ -3720,7 +3711,7 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old } while (xfer_resid && zero_cnt1 && retval == 0) { if (zero_cnt1 < (long long)xfer_resid) { - bytes_to_zero = zero_cnt1; + bytes_to_zero = (int)zero_cnt1; } else { bytes_to_zero = xfer_resid; } @@ -3749,7 +3740,7 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old * if the file gets extended again in such a way as to leave a * hole starting at this EOF, we'll have zero's in the correct spot */ - cluster_zero(upl, io_size, upl_size - io_size, NULL); + cluster_zero(upl, io_size, (int)(upl_size - io_size), NULL); } /* * release the upl now if we hold one since... @@ -3769,7 +3760,7 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old * of this vnode is in progress, we will deadlock if the pages being flushed intersect the pages * we hold since the flushing context is holding the cluster lock. */ - ubc_upl_commit_range(upl, 0, upl_size, + ubc_upl_commit_range(upl, 0, (upl_size_t)upl_size, UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); check_cluster: /* @@ -4022,11 +4013,12 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file while (io_req_size && uio->uio_offset < filesize && retval == 0) { max_size = filesize - uio->uio_offset; + bool leftover_upl_aborted = false; if ((off_t)(io_req_size) < max_size) { io_size = io_req_size; } else { - io_size = max_size; + io_size = (u_int32_t)max_size; } if (!(flags & IO_NOCACHE)) { @@ -4041,7 +4033,7 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file * the cache and have issued an I/O, than we'll assume that we're likely * to continue to miss in the cache and it's to our advantage to try and prefetch */ - if (last_request_offset && last_ioread_offset && (size_of_prefetch = (last_request_offset - last_ioread_offset))) { + if (last_request_offset && last_ioread_offset && (size_of_prefetch = (u_int32_t)(last_request_offset - last_ioread_offset))) { if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) { /* * we've already issued I/O for this request and @@ -4224,7 +4216,7 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file * the end of the last physical block associated with the file */ if (iolock_inited == FALSE) { - lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr); + lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL); iolock_inited = TRUE; } @@ -4232,7 +4224,33 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file io_size = (last_pg - start_pg) * PAGE_SIZE; if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) { - io_size = filesize - (upl_f_offset + upl_offset); + io_size = (u_int32_t)(filesize - (upl_f_offset + upl_offset)); + } + + /* + * Find out if this needs verification, we'll have to manage the UPL + * diffrently if so. Note that this call only lets us know if + * verification is enabled on this mount point, the actual verification + * is performed in the File system. + */ + size_t verify_block_size = 0; + if ((VNOP_VERIFY(vp, start_offset, NULL, 0, &verify_block_size, VNODE_VERIFY_DEFAULT, NULL) == 0) /* && verify_block_size */) { + for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) { + if (!upl_valid_page(pl, uio_last)) { + break; + } + } + if (uio_last < pages_in_upl) { + /* + * there were some invalid pages beyond the valid pages + * that we didn't issue an I/O for, just release them + * unchanged now, so that any prefetch/readahed can + * include them + */ + ubc_upl_abort_range(upl, uio_last * PAGE_SIZE, + (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); + leftover_upl_aborted = true; + } } /* @@ -4263,20 +4281,22 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file */ u_int val_size; - for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) { - if (!upl_valid_page(pl, uio_last)) { - break; + if (!leftover_upl_aborted) { + for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) { + if (!upl_valid_page(pl, uio_last)) { + break; + } + } + if (uio_last < pages_in_upl) { + /* + * there were some invalid pages beyond the valid pages + * that we didn't issue an I/O for, just release them + * unchanged now, so that any prefetch/readahed can + * include them + */ + ubc_upl_abort_range(upl, uio_last * PAGE_SIZE, + (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); } - } - if (uio_last < pages_in_upl) { - /* - * there were some invalid pages beyond the valid pages - * that we didn't issue an I/O for, just release them - * unchanged now, so that any prefetch/readahed can - * include them - */ - ubc_upl_abort_range(upl, uio_last * PAGE_SIZE, - (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); } /* @@ -4287,7 +4307,7 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file val_size = (uio_last * PAGE_SIZE) - start_offset; if (val_size > max_size) { - val_size = max_size; + val_size = (u_int)max_size; } if (val_size > io_req_size) { @@ -4298,7 +4318,7 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file last_ioread_offset = uio->uio_offset + val_size; } - if ((size_of_prefetch = (last_request_offset - last_ioread_offset)) && prefetch_enabled) { + if ((size_of_prefetch = (u_int32_t)(last_request_offset - last_ioread_offset)) && prefetch_enabled) { if ((last_ioread_offset - (uio->uio_offset + val_size)) <= upl_size) { /* * if there's still I/O left to do for this request, and... @@ -4391,7 +4411,12 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file * their state */ if (error) { - ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); + if (leftover_upl_aborted) { + ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, (uio_last - start_pg) * PAGE_SIZE, + UPL_ABORT_FREE_ON_EMPTY); + } else { + ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); + } } else { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0); @@ -4453,7 +4478,7 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file */ cluster_iostate_wait(&iostate, 0, "cluster_read_copy"); - lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp); + lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp); } if (rap != NULL) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END, @@ -4491,8 +4516,8 @@ cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type) lck_spin_unlock(&cl_direct_read_spin_lock); if (new_lck) { // Someone beat us to it, ditch the allocation - lck_rw_destroy(&new_lck->rw_lock, cl_mtx_grp); - FREE(new_lck, M_TEMP); + lck_rw_destroy(&new_lck->rw_lock, &cl_mtx_grp); + kheap_free(KHEAP_DEFAULT, new_lck, sizeof(cl_direct_read_lock_t)); } lck_rw_lock(&lck->rw_lock, type); return lck; @@ -4510,9 +4535,9 @@ cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type) lck_spin_unlock(&cl_direct_read_spin_lock); // Allocate a new lock - MALLOC(new_lck, cl_direct_read_lock_t *, sizeof(*new_lck), - M_TEMP, M_WAITOK); - lck_rw_init(&new_lck->rw_lock, cl_mtx_grp, cl_mtx_attr); + new_lck = kheap_alloc(KHEAP_DEFAULT, sizeof(cl_direct_read_lock_t), + Z_WAITOK); + lck_rw_init(&new_lck->rw_lock, &cl_mtx_grp, LCK_ATTR_NULL); new_lck->vp = vp; new_lck->ref_count = 1; @@ -4529,8 +4554,8 @@ cluster_unlock_direct_read(cl_direct_read_lock_t *lck) if (lck->ref_count == 1) { LIST_REMOVE(lck, chain); lck_spin_unlock(&cl_direct_read_spin_lock); - lck_rw_destroy(&lck->rw_lock, cl_mtx_grp); - FREE(lck, M_TEMP); + lck_rw_destroy(&lck->rw_lock, &cl_mtx_grp); + kheap_free(KHEAP_DEFAULT, lck, sizeof(cl_direct_read_lock_t)); } else { --lck->ref_count; lck_spin_unlock(&cl_direct_read_spin_lock); @@ -4583,6 +4608,8 @@ cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, user_addr_t last_iov_base = 0; user_addr_t next_iov_base = 0; + assert(vm_map_page_shift(current_map()) >= PAGE_SHIFT); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START, (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0); @@ -4614,7 +4641,7 @@ cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, iostate.io_error = 0; iostate.io_wanted = 0; - lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr); + lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL); devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize; mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask; @@ -4646,6 +4673,20 @@ next_dread: offset_in_file = (u_int32_t)uio->uio_offset & (devblocksize - 1); offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask; + if (vm_map_page_mask(current_map()) < PAGE_MASK) { + /* + * XXX TODO4K + * Direct I/O might not work as expected from a 16k kernel space + * to a 4k user space because each 4k chunk might point to + * a different 16k physical page... + * Let's go the "misaligned" way. + */ + if (!misaligned) { + DEBUG4K_VFS("forcing misaligned\n"); + } + misaligned = 1; + } + if (offset_in_file || offset_in_iovbase) { /* * one of the 2 important offsets is misaligned @@ -4678,7 +4719,7 @@ next_dread: } if ((off_t)io_req_size > max_io_size) { - io_req_size = max_io_size; + io_req_size = (u_int32_t)max_io_size; } /* @@ -4875,7 +4916,7 @@ next_dread: vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map; for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) { pages_in_pl = 0; - upl_size = upl_needed_size; + upl_size = (upl_size_t)upl_needed_size; upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; if (no_zero_fill) { upl_flags |= UPL_NOZEROFILL; @@ -5055,19 +5096,24 @@ wait_for_dreads: retval = iostate.io_error; } - lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp); + lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp); if (io_throttled == TRUE && retval == 0) { retval = EAGAIN; } - for (next_iov_base = orig_iov_base; next_iov_base < last_iov_base; next_iov_base += PAGE_SIZE) { + vm_map_offset_t current_page_size, current_page_mask; + current_page_size = vm_map_page_size(current_map()); + current_page_mask = vm_map_page_mask(current_map()); + for (next_iov_base = orig_iov_base; + next_iov_base < last_iov_base; + next_iov_base += current_page_size) { /* * This is specifically done for pmap accounting purposes. * vm_pre_fault() will call vm_fault() to enter the page into * the pmap if there isn't _a_ physical page for that VA already. */ - vm_pre_fault(vm_map_trunc_page(next_iov_base, PAGE_MASK), VM_PROT_READ); + vm_pre_fault(vm_map_trunc_page(next_iov_base, current_page_mask), VM_PROT_READ); } if (io_req_size && retval == 0) { @@ -5147,7 +5193,7 @@ cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, iostate.io_error = 0; iostate.io_wanted = 0; - lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr); + lck_mtx_init(&iostate.io_mtxp, &cl_mtx_grp, LCK_ATTR_NULL); next_cread: io_size = *read_length; @@ -5155,7 +5201,7 @@ next_cread: max_size = filesize - uio->uio_offset; if (io_size > max_size) { - io_size = max_size; + io_size = (u_int32_t)max_size; } iov_base = uio_curriovbase(uio); @@ -5164,7 +5210,7 @@ next_cread: upl_needed_size = upl_offset + io_size; pages_in_pl = 0; - upl_size = upl_needed_size; + upl_size = (upl_size_t)upl_needed_size; upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; @@ -5173,7 +5219,7 @@ next_cread: vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map; kret = vm_map_get_upl(map, - (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), + vm_map_trunc_page(iov_base, vm_map_page_mask(map)), &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_END, @@ -5298,7 +5344,7 @@ wait_for_creads: error = iostate.io_error; } - lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp); + lck_mtx_destroy(&iostate.io_mtxp, &cl_mtx_grp); if (error == 0 && tail_size) { error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg); @@ -5352,7 +5398,7 @@ cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t m vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map; if ((vm_map_get_upl(map, - (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), + vm_map_trunc_page(iov_base, vm_map_page_mask(map)), &upl_size, &upl, NULL, NULL, &upl_flags, VM_KERN_MEMORY_FILE, 0)) != KERN_SUCCESS) { /* * the user app must have passed in an invalid address @@ -5381,6 +5427,13 @@ cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t m } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_END, iov_base, *io_type, *io_length, retval, 0); + if (*io_type == IO_DIRECT && + vm_map_page_shift(current_map()) < PAGE_SHIFT) { + /* no direct I/O for sub-page-size address spaces */ + DEBUG4K_VFS("io_type IO_DIRECT -> IO_COPY\n"); + *io_type = IO_COPY; + } + return retval; } @@ -5420,23 +5473,17 @@ advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*c return EINVAL; } - if (resid < 0) { + if (f_offset < 0 || resid < 0) { return EINVAL; } max_io_size = cluster_max_io_size(vp->v_mount, CL_READ); -#if CONFIG_EMBEDDED - if (max_io_size > speculative_prefetch_max_iosize) { - max_io_size = speculative_prefetch_max_iosize; - } -#else if (disk_conditioner_mount_is_ssd(vp->v_mount)) { if (max_io_size > speculative_prefetch_max_iosize) { max_io_size = speculative_prefetch_max_iosize; } } -#endif KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START, (int)f_offset, resid, (int)filesize, 0, 0); @@ -5457,7 +5504,7 @@ advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*c if (resid < max_size) { io_size = resid; } else { - io_size = max_size; + io_size = (int)max_size; } upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; @@ -5563,7 +5610,7 @@ advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*c io_size = (last_pg - start_pg) * PAGE_SIZE; if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) { - io_size = filesize - (upl_f_offset + upl_offset); + io_size = (int)(filesize - (upl_f_offset + upl_offset)); } /* @@ -5756,22 +5803,18 @@ cluster_release(struct ubc_info *ubc) if (wbp->cl_scmap) { vfs_drt_control(&(wbp->cl_scmap), 0); } + lck_mtx_destroy(&wbp->cl_lockw, &cl_mtx_grp); + zfree(cl_wr_zone, wbp); + ubc->cl_wbehind = NULL; } else { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, 0, 0, 0, 0); } - rap = ubc->cl_rahead; - - if (wbp != NULL) { - lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp); - FREE_ZONE(wbp, sizeof *wbp, M_CLWRBEHIND); - } if ((rap = ubc->cl_rahead)) { - lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp); - FREE_ZONE(rap, sizeof *rap, M_CLRDAHEAD); + lck_mtx_destroy(&rap->cl_lockr, &cl_mtx_grp); + zfree(cl_rd_zone, rap); + ubc->cl_rahead = NULL; } - ubc->cl_rahead = NULL; - ubc->cl_wbehind = NULL; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_END, ubc, rap, wbp, 0, 0); } @@ -6036,7 +6079,7 @@ cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, return 0; } - size = EOF - upl_f_offset; + size = (int)(EOF - upl_f_offset); upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK; pages_in_upl = upl_size / PAGE_SIZE; @@ -6558,7 +6601,7 @@ cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int m if ((io_size = *io_resid)) { start_offset = (int)(uio->uio_offset & PAGE_MASK_64); - xsize = uio_resid(uio); + xsize = (int)uio_resid(uio); retval = memory_object_control_uiomove(control, uio->uio_offset - start_offset, uio, start_offset, io_size, mark_dirty, take_reference); @@ -6663,7 +6706,7 @@ is_file_clean(vnode_t vp, off_t filesize) } while(0); -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) /* * Hash table moduli. * @@ -6696,7 +6739,7 @@ is_file_clean(vnode_t vp, off_t filesize) #define DRT_LARGE_ALLOCATION 32768 /* 144 bytes spare */ #define DRT_XLARGE_ALLOCATION 131072 /* 208 bytes spare */ -#else +#else /* XNU_TARGET_OS_OSX */ /* * Hash table moduli. * @@ -6729,7 +6772,7 @@ is_file_clean(vnode_t vp, off_t filesize) #define DRT_LARGE_ALLOCATION 131072 /* 208 bytes spare */ #define DRT_XLARGE_ALLOCATION 524288 /* 304 bytes spare */ -#endif +#endif /* ! XNU_TARGET_OS_OSX */ /* *** nothing below here has secret dependencies on DRT_BITVECTOR_PAGES *** */ @@ -7179,7 +7222,7 @@ vfs_drt_do_mark_pages( * Work out how many pages we're modifying in this * hashtable entry. */ - pgoff = (offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE; + pgoff = (int)((offset - DRT_ALIGN_ADDRESS(offset)) / PAGE_SIZE); pgcount = min((length / PAGE_SIZE), (DRT_BITVECTOR_PAGES - pgoff)); /* diff --git a/bsd/vfs/vfs_conf.c b/bsd/vfs/vfs_conf.c index fb97b1864..219e3877d 100644 --- a/bsd/vfs/vfs_conf.c +++ b/bsd/vfs/vfs_conf.c @@ -98,6 +98,7 @@ extern struct vfsops null_vfsops; extern struct vfsops devfs_vfsops; extern const struct vfsops routefs_vfsops; extern struct vfsops nullfs_vfsops; +extern struct vfsops bindfs_vfsops; #if MOCKFS extern struct vfsops mockfs_vfsops; @@ -116,6 +117,7 @@ enum fs_type_num { FT_SYNTHFS = 20, FT_ROUTEFS = 21, FT_NULLFS = 22, + FT_BINDFS = 23, FT_MOCKFS = 0x6D6F636B }; @@ -200,6 +202,24 @@ static struct vfstable vfstbllist[] = { }, #endif /* NULLFS */ +#if BINDFS + { + .vfc_vfsops = &bindfs_vfsops, + .vfc_name = "bindfs", + .vfc_typenum = FT_BINDFS, + .vfc_refcount = 0, + .vfc_flags = MNT_DONTBROWSE | MNT_RDONLY, + .vfc_mountroot = NULL, + .vfc_next = NULL, + .vfc_reserved1 = 0, + .vfc_reserved2 = 0, + .vfc_vfsflags = VFC_VFS64BITREADY, + .vfc_descptr = NULL, + .vfc_descsize = 0, + .vfc_sysctl = NULL + }, +#endif /* BINDFS */ + #if MOCKFS /* If we are configured for it, mockfs should always be the last standard entry (and thus the last FS we attempt mountroot with) */ { @@ -313,6 +333,7 @@ extern const struct vnodeopv_desc mockfs_vnodeop_opv_desc; #endif /* MOCKFS */ extern const struct vnodeopv_desc nullfs_vnodeop_opv_desc; +extern const struct vnodeopv_desc bindfs_vnodeop_opv_desc; const struct vnodeopv_desc *vfs_opv_descs[] = { &dead_vnodeop_opv_desc, @@ -348,6 +369,9 @@ const struct vnodeopv_desc *vfs_opv_descs[] = { #if NULLFS &nullfs_vnodeop_opv_desc, #endif /* NULLFS */ +#if BINDFS + &bindfs_vnodeop_opv_desc, +#endif /* BINDFS */ #if MOCKFS &mockfs_vnodeop_opv_desc, #endif /* MOCKFS */ diff --git a/bsd/vfs/vfs_cprotect.c b/bsd/vfs/vfs_cprotect.c index edb00a315..8355c0127 100644 --- a/bsd/vfs/vfs_cprotect.c +++ b/bsd/vfs/vfs_cprotect.c @@ -47,8 +47,7 @@ * to read/write it. */ -// cpx_flags -typedef uint32_t cpx_flags_t; +// cpx_flags defined in cprotect.h enum { CPX_SEP_WRAPPEDKEY = 0x01, CPX_IV_AES_CTX_INITIALIZED = 0x02, @@ -63,27 +62,41 @@ enum { CPX_WRITE_PROTECTABLE = 0x40 }; +/* + * variable-length CPX structure. See fixed-length variant in cprotect.h + */ struct cpx { #if DEBUG uint32_t cpx_magic1; #endif - aes_encrypt_ctx cpx_iv_aes_ctx; // Context used for generating the IV + aes_encrypt_ctx *cpx_iv_aes_ctx_ptr;// Pointer to context used for generating the IV cpx_flags_t cpx_flags; uint16_t cpx_max_key_len; uint16_t cpx_key_len; + //fixed length up to here. cpx_cached_key is variable-length uint8_t cpx_cached_key[]; }; +/* Allows us to switch between CPX types */ +typedef union cpxunion { + struct cpx cpx_var; + fcpx_t cpx_fixed; +} cpxunion_t; + +ZONE_DECLARE(cpx_zone, "cpx", + sizeof(struct fcpx), ZC_ZFREE_CLEARMEM); +ZONE_DECLARE(aes_ctz_zone, "AES ctx", + sizeof(aes_encrypt_ctx), ZC_ZFREE_CLEARMEM | ZC_NOENCRYPT); + +// Note: see struct fcpx defined in sys/cprotect.h + // -- cpx_t accessors -- size_t -cpx_size(size_t key_size) +cpx_size(size_t key_len) { - size_t size = sizeof(struct cpx) + key_size; - -#if DEBUG - size += 4; // Extra for magic -#endif + // This should pick up the 'magic' word in DEBUG for free. + size_t size = sizeof(struct cpx) + key_len; return size; } @@ -95,7 +108,7 @@ cpx_sizex(const struct cpx *cpx) } cpx_t -cpx_alloc(size_t key_len) +cpx_alloc(size_t key_len, bool needs_ctx) { cpx_t cpx = NULL; @@ -105,6 +118,10 @@ cpx_alloc(size_t key_len) * This way, we can write-protect as needed. */ size_t cpsize = cpx_size(key_len); + + // silence warning for needs_ctx + (void) needs_ctx; + if (cpsize < PAGE_SIZE) { /* * Don't use MALLOC to allocate the page-sized structure. Instead, @@ -127,15 +144,56 @@ cpx_alloc(size_t key_len) panic("cpx_size too large ! (%lu)", cpsize); } #else - /* If key page write protection disabled, just switch to kernel MALLOC */ - MALLOC(cpx, cpx_t, cpx_size(key_len), M_TEMP, M_WAITOK); + /* If key page write protection disabled, just switch to zalloc */ + + // error out if you try to request a key that's too big + if (key_len > VFS_CP_MAX_CACHEBUFLEN) { + return NULL; + } + + // the actual key array is fixed-length, but the amount of usable content can vary, via 'key_len' + cpx = zalloc_flags(cpx_zone, Z_WAITOK | Z_ZERO); + + // if our encryption type needs it, alloc the context + if (needs_ctx) { + cpx_alloc_ctx(cpx); + } + #endif cpx_init(cpx, key_len); return cpx; } -/* this is really a void function */ +int +cpx_alloc_ctx(cpx_t cpx) +{ +#if CONFIG_KEYPAGE_WP + (void) cpx; +#else + if (cpx->cpx_iv_aes_ctx_ptr) { + // already allocated? + return 0; + } + + cpx->cpx_iv_aes_ctx_ptr = zalloc_flags(aes_ctz_zone, Z_WAITOK | Z_ZERO); +#endif // CONFIG_KEYPAGE_WP + + return 0; +} + +void +cpx_free_ctx(cpx_t cpx) +{ +#if CONFIG_KEYPAGE_WP + (void) cpx; +# else + if (cpx->cpx_iv_aes_ctx_ptr) { + zfree(aes_ctz_zone, cpx->cpx_iv_aes_ctx_ptr); + } +#endif // CONFIG_KEYPAGE_WP +} + void cpx_writeprotect(cpx_t cpx) { @@ -179,8 +237,9 @@ cpx_free(cpx_t cpx) return; } #else - bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); - FREE(cpx, M_TEMP); + // free the context if it wasn't already freed + cpx_free_ctx(cpx); + zfree(cpx_zone, cpx); return; #endif } @@ -194,7 +253,8 @@ cpx_init(cpx_t cpx, size_t key_len) #endif cpx->cpx_flags = 0; cpx->cpx_key_len = 0; - cpx->cpx_max_key_len = key_len; + assert(key_len <= UINT16_MAX); + cpx->cpx_max_key_len = (uint16_t)key_len; } bool @@ -307,16 +367,18 @@ cpx_key(const struct cpx *cpx) void cpx_set_aes_iv_key(struct cpx *cpx, void *iv_key) { - aes_encrypt_key128(iv_key, &cpx->cpx_iv_aes_ctx); - SET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_USE_OFFSET_FOR_IV); - CLR(cpx->cpx_flags, CPX_IV_AES_CTX_VFS); + if (cpx->cpx_iv_aes_ctx_ptr) { + aes_encrypt_key128(iv_key, cpx->cpx_iv_aes_ctx_ptr); + SET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_USE_OFFSET_FOR_IV); + CLR(cpx->cpx_flags, CPX_IV_AES_CTX_VFS); + } } aes_encrypt_ctx * cpx_iv_aes_ctx(struct cpx *cpx) { if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) { - return &cpx->cpx_iv_aes_ctx; + return cpx->cpx_iv_aes_ctx_ptr; } SHA1_CTX sha1ctxt; @@ -335,14 +397,16 @@ cpx_iv_aes_ctx(struct cpx *cpx) cpx_set_aes_iv_key(cpx, digest); SET(cpx->cpx_flags, CPX_IV_AES_CTX_VFS); - return &cpx->cpx_iv_aes_ctx; + return cpx->cpx_iv_aes_ctx_ptr; } void cpx_flush(cpx_t cpx) { bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); - bzero(&cpx->cpx_iv_aes_ctx, sizeof(cpx->cpx_iv_aes_ctx)); + if (cpx->cpx_iv_aes_ctx_ptr) { + bzero(cpx->cpx_iv_aes_ctx_ptr, sizeof(aes_encrypt_ctx)); + } cpx->cpx_flags = 0; cpx->cpx_key_len = 0; } @@ -361,7 +425,7 @@ cpx_copy(const struct cpx *src, cpx_t dst) memcpy(cpx_key(dst), cpx_key(src), key_len); dst->cpx_flags = src->cpx_flags; if (ISSET(dst->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) { - dst->cpx_iv_aes_ctx = src->cpx_iv_aes_ctx; + *(dst->cpx_iv_aes_ctx_ptr) = *(src->cpx_iv_aes_ctx_ptr); // deep copy } } diff --git a/bsd/vfs/vfs_disk_conditioner.c b/bsd/vfs/vfs_disk_conditioner.c index 7df2f287b..846170a89 100644 --- a/bsd/vfs/vfs_disk_conditioner.c +++ b/bsd/vfs/vfs_disk_conditioner.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -124,9 +124,19 @@ disk_conditioner_delay(buf_t bp, int extents, int total_size, uint64_t already_e } // scale access time by (distance in blocks from previous I/O / maximum blocks) - access_time_scale = weighted_scale_factor((double)blkdiff / BLK_MAX(mp)); + access_time_scale = weighted_scale_factor((double)blkdiff / (double)BLK_MAX(mp)); + if (__builtin_isnan(access_time_scale)) { + return; + } // most cases should pass in extents==1 for optimal delay calculation, otherwise just multiply delay by extents - delay_usec = (uint64_t)(((uint64_t)extents * info->access_time_usec) * access_time_scale); + double temp = (((double)extents * (double)info->access_time_usec) * access_time_scale); + if (temp <= 0) { + delay_usec = 0; + } else if (temp >= (double)(18446744073709549568ULL)) { /* highest 64-bit unsigned integer representable as a double */ + delay_usec = UINT64_MAX; + } else { + delay_usec = (uint64_t)temp; + } if (info->read_throughput_mbps && (bp->b_flags & B_READ)) { delay_usec += (uint64_t)(total_size / ((double)(info->read_throughput_mbps * 1024 * 1024 / 8) / USEC_PER_SEC)); @@ -153,7 +163,8 @@ disk_conditioner_delay(buf_t bp, int extents, int total_size, uint64_t already_e while (delay_usec) { microuptime(&start); - delay(delay_usec); + assert(delay_usec <= INT_MAX); + delay((int)delay_usec); microuptime(&elapsed); timevalsub(&elapsed, &start); if (elapsed.tv_sec * USEC_PER_SEC < delay_usec) { diff --git a/bsd/vfs/vfs_fsevents.c b/bsd/vfs/vfs_fsevents.c index 5b5455b4b..fb8e519e2 100644 --- a/bsd/vfs/vfs_fsevents.c +++ b/bsd/vfs/vfs_fsevents.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2019 Apple Inc. All rights reserved. + * Copyright (c) 2004-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include #include @@ -217,19 +217,12 @@ fsevents_internal_init(void) PE_get_default("kern.maxkfsevents", &max_kfs_events, sizeof(max_kfs_events)); - event_zone = zinit(sizeof(kfs_event), - max_kfs_events * sizeof(kfs_event), - max_kfs_events * sizeof(kfs_event), - "fs-event-buf"); - if (event_zone == NULL) { - printf("fsevents: failed to initialize the event zone.\n"); - } - - // mark the zone as exhaustible so that it will not - // ever grow beyond what we initially filled it with - zone_change(event_zone, Z_EXHAUST, TRUE); - zone_change(event_zone, Z_COLLECT, FALSE); - zone_change(event_zone, Z_CALLERACCT, FALSE); + event_zone = zone_create_ext("fs-event-buf", sizeof(kfs_event), + ZC_NOGC | ZC_NOCALLOUT, ZONE_ID_ANY, ^(zone_t z) { + // mark the zone as exhaustible so that it will not + // ever grow beyond what we initially filled it with + zone_set_exhaustible(z, max_kfs_events * sizeof(kfs_event)); + }); if (zfill(event_zone, max_kfs_events) < max_kfs_events) { printf("fsevents: failed to pre-fill the event zone.\n"); @@ -352,6 +345,7 @@ static int last_nlen = 0; static int last_vid = -1; static uint64_t last_coalesced_time = 0; static void *last_event_ptr = NULL; +static pid_t last_pid = -1; int last_coalesced = 0; static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 }; @@ -440,6 +434,7 @@ add_fsevent(int type, vfs_context_t ctx, ...) if (type == last_event_type && (elapsed < 1000000000) + && (last_pid == p->p_pid) && ((vid && vid == last_vid && last_ptr == ptr) || @@ -459,6 +454,7 @@ add_fsevent(int type, vfs_context_t ctx, ...) last_vid = vid; last_event_type = type; last_coalesced_time = now; + last_pid = p->p_pid; } } va_start(ap, ctx); @@ -538,14 +534,14 @@ add_fsevent(int type, vfs_context_t ctx, ...) OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags); last_event_ptr = kfse; - kfse->type = type; + kfse->type = (int16_t)type; kfse->abstime = now; kfse->pid = p->p_pid; if (type == FSE_RENAME || type == FSE_EXCHANGE || type == FSE_CLONE) { memset(kfse_dest, 0, sizeof(kfs_event)); kfse_dest->refcount = 1; OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags); - kfse_dest->type = type; + kfse_dest->type = (int16_t)type; kfse_dest->pid = p->p_pid; kfse_dest->abstime = now; @@ -717,7 +713,7 @@ add_fsevent(int type, vfs_context_t ctx, ...) } // store the path by adding it to the global string table - cur->len = pathbuff_len; + cur->len = (u_int16_t)pathbuff_len; cur->str = vfs_addname(pathbuff, pathbuff_len, 0, 0); if (cur->str == NULL || cur->str[0] == '\0') { panic("add_fsevent: was not able to add path %s to event %p.\n", pathbuff, cur); @@ -770,7 +766,7 @@ add_fsevent(int type, vfs_context_t ctx, ...) case FSE_ARG_INT32: { uint32_t ival = (uint32_t)va_arg(ap, int32_t); - kfse->uid = (ino64_t)ival; + kfse->uid = ival; break; } @@ -955,10 +951,8 @@ add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_even // Note: the event_queue follows the fs_event_watcher struct // in memory so we only have to do one allocation - MALLOC(watcher, - fs_event_watcher *, - sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *), - M_TEMP, M_WAITOK); + watcher = kheap_alloc(KHEAP_DEFAULT, + sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *), Z_WAITOK); if (watcher == NULL) { return ENOMEM; } @@ -1005,7 +999,8 @@ add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_even if (i >= MAX_WATCHERS) { printf("fsevents: too many watchers!\n"); unlock_watch_table(); - FREE(watcher, M_TEMP); + kheap_free(KHEAP_DEFAULT, watcher, + sizeof(fs_event_watcher) + watcher->eventq_size * sizeof(kfs_event *)); return ENOSPC; } @@ -1086,16 +1081,12 @@ remove_watcher(fs_event_watcher *target) } lck_rw_unlock_exclusive(&event_handling_lock); - if (watcher->event_list) { - FREE(watcher->event_list, M_TEMP); - watcher->event_list = NULL; - } - if (watcher->devices_not_to_watch) { - FREE(watcher->devices_not_to_watch, M_TEMP); - watcher->devices_not_to_watch = NULL; - } - FREE(watcher, M_TEMP); - + kheap_free(KHEAP_DEFAULT, watcher->event_list, + watcher->num_events * sizeof(int8_t)); + kheap_free(KHEAP_DEFAULT, watcher->devices_not_to_watch, + watcher->num_devices * sizeof(dev_t)); + kheap_free(KHEAP_DEFAULT, watcher, + sizeof(fs_event_watcher) + watcher->eventq_size * sizeof(kfs_event *)); return; } @@ -1633,7 +1624,7 @@ get_out: void fsevent_unmount(__unused struct mount *mp, __unused vfs_context_t ctx) { -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) dev_t dev = mp->mnt_vfsstat.f_fsid.val[0]; int error, waitcount = 0; struct timespec ts = {.tv_sec = 1, .tv_nsec = 0}; @@ -1691,7 +1682,7 @@ fsevent_unmount(__unused struct mount *mp, __unused vfs_context_t ctx) } } unlock_watch_table(); -#endif +#endif /* ! XNU_TARGET_OS_OSX */ } @@ -1714,7 +1705,7 @@ static int fseventsf_read(struct fileproc *fp, struct uio *uio, __unused int flags, __unused vfs_context_t ctx) { - fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; + fsevent_handle *fseh = (struct fsevent_handle *)fp->fp_glob->fg_data; int error; error = fmod_watch(fseh->watcher, uio); @@ -1740,7 +1731,7 @@ typedef struct fsevent_dev_filter_args64 { static int fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx) { - fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; + fsevent_handle *fseh = (struct fsevent_handle *)fp->fp_glob->fg_data; int ret = 0; fsevent_dev_filter_args64 *devfilt_args, _devfilt_args; @@ -1794,7 +1785,7 @@ fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx handle_dev_filter: { - int new_num_devices; + int new_num_devices, old_num_devices = 0; dev_t *devices_not_to_watch, *tmp = NULL; if (devfilt_args->num_devices > 256) { @@ -1808,40 +1799,38 @@ handle_dev_filter: tmp = fseh->watcher->devices_not_to_watch; fseh->watcher->devices_not_to_watch = NULL; + old_num_devices = fseh->watcher->num_devices; fseh->watcher->num_devices = new_num_devices; unlock_watch_table(); - if (tmp) { - FREE(tmp, M_TEMP); - } + kheap_free(KHEAP_DEFAULT, tmp, old_num_devices * sizeof(dev_t)); break; } - MALLOC(devices_not_to_watch, dev_t *, - new_num_devices * sizeof(dev_t), - M_TEMP, M_WAITOK); + devices_not_to_watch = kheap_alloc(KHEAP_DEFAULT, + new_num_devices * sizeof(dev_t), Z_WAITOK); if (devices_not_to_watch == NULL) { ret = ENOMEM; break; } - ret = copyin(devfilt_args->devices, + ret = copyin((user_addr_t)devfilt_args->devices, (void *)devices_not_to_watch, new_num_devices * sizeof(dev_t)); if (ret) { - FREE(devices_not_to_watch, M_TEMP); + kheap_free(KHEAP_DEFAULT, devices_not_to_watch, + new_num_devices * sizeof(dev_t)); break; } lock_watch_table(); + old_num_devices = fseh->watcher->num_devices; fseh->watcher->num_devices = new_num_devices; tmp = fseh->watcher->devices_not_to_watch; fseh->watcher->devices_not_to_watch = devices_not_to_watch; unlock_watch_table(); - if (tmp) { - FREE(tmp, M_TEMP); - } + kheap_free(KHEAP_DEFAULT, tmp, old_num_devices * sizeof(dev_t)); break; } @@ -1875,7 +1864,7 @@ handle_dev_filter: static int fseventsf_select(struct fileproc *fp, int which, __unused void *wql, vfs_context_t ctx) { - fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; + fsevent_handle *fseh = (struct fsevent_handle *)fp->fp_glob->fg_data; int ready = 0; if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) { @@ -1920,7 +1909,7 @@ fseventsf_close(struct fileglob *fg, __unused vfs_context_t ctx) fseh->watcher = NULL; remove_watcher(watcher); - FREE(fseh, M_TEMP); + kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle)); return 0; } @@ -2051,7 +2040,7 @@ static int fseventsf_kqfilter(struct fileproc *fp, struct knote *kn, __unused struct kevent_qos_s *kev) { - fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; + fsevent_handle *fseh = (struct fsevent_handle *)fp->fp_glob->fg_data; int res; kn->kn_hook = (void*)fseh; @@ -2074,7 +2063,7 @@ static int fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx) { int counter = 0; - fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; + fsevent_handle *fseh = (struct fsevent_handle *)fp->fp_glob->fg_data; // if there are people still waiting, sleep for 10ms to // let them clean up and get out of there. however we @@ -2122,11 +2111,12 @@ fseventsread(__unused dev_t dev, __unused struct uio *uio, __unused int ioflag) static int -parse_buffer_and_add_events(const char *buffer, int bufsize, vfs_context_t ctx, long *remainder) +parse_buffer_and_add_events(const char *buffer, size_t bufsize, vfs_context_t ctx, size_t *remainder) { const fse_info *finfo, *dest_finfo; const char *path, *ptr, *dest_path, *event_start = buffer; - int path_len, type, dest_path_len, err = 0; + size_t path_len, dest_path_len; + int type, err = 0; ptr = buffer; @@ -2232,9 +2222,9 @@ char *write_buffer = NULL; static int fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag) { - int error = 0, count; + int error = 0; + size_t count, offset = 0, remainder = 0; vfs_context_t ctx = vfs_context_current(); - long offset = 0, remainder; lck_mtx_lock(&event_writer_lock); @@ -2254,18 +2244,13 @@ fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag) // after it. // while (uio_resid(uio)) { - if (uio_resid(uio) > (WRITE_BUFFER_SIZE - offset)) { - count = WRITE_BUFFER_SIZE - offset; - } else { - count = uio_resid(uio); - } + count = MIN(WRITE_BUFFER_SIZE - offset, (size_t)uio_resid(uio)); - error = uiomove(write_buffer + offset, count, uio); + error = uiomove(write_buffer + offset, (int)count, uio); if (error) { break; } - // printf("fsevents: write: copied in %d bytes (offset: %ld)\n", count, offset); error = parse_buffer_and_add_events(write_buffer, offset + count, ctx, &remainder); if (error) { break; @@ -2277,14 +2262,8 @@ fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag) // through the loop. note that the remainder always starts // at an event boundary. // - if (remainder != 0) { - // printf("fsevents: write: an event spanned a %d byte boundary. remainder: %ld\n", - // WRITE_BUFFER_SIZE, remainder); - memmove(write_buffer, (write_buffer + count + offset) - remainder, remainder); - offset = remainder; - } else { - offset = 0; - } + memmove(write_buffer, (write_buffer + count + offset) - remainder, remainder); + offset = remainder; } lck_mtx_unlock(&event_writer_lock); @@ -2355,33 +2334,31 @@ fseventsioctl(__unused dev_t dev, u_long cmd, caddr_t data, __unused int flag, s fse_clone_args = (fsevent_clone_args64 *)data; handle_clone: - if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) { + if (fse_clone_args->num_events <= 0 || fse_clone_args->num_events > 4096) { return EINVAL; } - MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle), - M_TEMP, M_WAITOK); + fseh = kheap_alloc(KHEAP_DEFAULT, sizeof(fsevent_handle), Z_WAITOK | Z_ZERO); if (fseh == NULL) { return ENOMEM; } - memset(fseh, 0, sizeof(fsevent_handle)); klist_init(&fseh->knotes); - MALLOC(event_list, int8_t *, - fse_clone_args->num_events * sizeof(int8_t), - M_TEMP, M_WAITOK); + event_list = kheap_alloc(KHEAP_DEFAULT, + fse_clone_args->num_events * sizeof(int8_t), Z_WAITOK); if (event_list == NULL) { - FREE(fseh, M_TEMP); + kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle)); return ENOMEM; } - error = copyin(fse_clone_args->event_list, + error = copyin((user_addr_t)fse_clone_args->event_list, (void *)event_list, fse_clone_args->num_events * sizeof(int8_t)); if (error) { - FREE(event_list, M_TEMP); - FREE(fseh, M_TEMP); + kheap_free(KHEAP_DEFAULT, event_list, + fse_clone_args->num_events * sizeof(int8_t)); + kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle)); return error; } @@ -2392,8 +2369,9 @@ handle_clone: error = vslock((user_addr_t)fse_clone_args->fd, sizeof(int32_t)); if (error) { - FREE(event_list, M_TEMP); - FREE(fseh, M_TEMP); + kheap_free(KHEAP_DEFAULT, event_list, + fse_clone_args->num_events * sizeof(int8_t)); + kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle)); return error; } @@ -2405,8 +2383,9 @@ handle_clone: if (error) { vsunlock((user_addr_t)fse_clone_args->fd, sizeof(int32_t), 0); - FREE(event_list, M_TEMP); - FREE(fseh, M_TEMP); + kheap_free(KHEAP_DEFAULT, event_list, + fse_clone_args->num_events * sizeof(int8_t)); + kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle)); return error; } @@ -2417,21 +2396,22 @@ handle_clone: remove_watcher(fseh->watcher); vsunlock((user_addr_t)fse_clone_args->fd, sizeof(int32_t), 0); - FREE(event_list, M_TEMP); - FREE(fseh, M_TEMP); + kheap_free(KHEAP_DEFAULT, event_list, + fse_clone_args->num_events * sizeof(int8_t)); + kheap_free(KHEAP_DEFAULT, fseh, sizeof(fsevent_handle)); return error; } proc_fdlock(p); - f->f_fglob->fg_flag = FREAD | FWRITE; - f->f_fglob->fg_ops = &fsevents_fops; - f->f_fglob->fg_data = (caddr_t) fseh; + f->fp_glob->fg_flag = FREAD | FWRITE; + f->fp_glob->fg_ops = &fsevents_fops; + f->fp_glob->fg_data = (caddr_t) fseh; /* * We can safely hold the proc_fdlock across this copyout() * because of the vslock() call above. The vslock() call * also ensures that we will never get an error, so assert * this. */ - error = copyout((void *)&fd, fse_clone_args->fd, sizeof(int32_t)); + error = copyout((void *)&fd, (user_addr_t)fse_clone_args->fd, sizeof(int32_t)); assert(error == 0); procfdtbl_releasefd(p, fd, NULL); @@ -2463,22 +2443,20 @@ fsevents_wakeup(fs_event_watcher *watcher) * A struct describing which functions will get invoked for certain * actions. */ -static struct cdevsw fsevents_cdevsw = +static const struct cdevsw fsevents_cdevsw = { - fseventsopen, /* open */ - fseventsclose, /* close */ - fseventsread, /* read */ - fseventswrite, /* write */ - fseventsioctl, /* ioctl */ - (stop_fcn_t *)&nulldev, /* stop */ - (reset_fcn_t *)&nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + .d_open = fseventsopen, + .d_close = fseventsclose, + .d_read = fseventsread, + .d_write = fseventswrite, + .d_ioctl = fseventsioctl, + .d_stop = (stop_fcn_t *)&nulldev, + .d_reset = (reset_fcn_t *)&nulldev, + .d_select = eno_select, + .d_mmap = eno_mmap, + .d_strategy = eno_strat, + .d_reserved_1 = eno_getc, + .d_reserved_2 = eno_putc, }; @@ -2514,10 +2492,7 @@ fsevents_init(void) char * get_pathbuff(void) { - char *path; - - MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - return path; + return zalloc(ZV_NAMEI); } void @@ -2526,7 +2501,7 @@ release_pathbuff(char *path) if (path == NULL) { return; } - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, path); } int @@ -2642,15 +2617,13 @@ create_fsevent_from_kevent(vnode_t vp, uint32_t kevents, struct vnode_attr *vap) char * get_pathbuff(void) { - char *path; - MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - return path; + return zalloc(ZV_NAMEI); } void release_pathbuff(char *path) { - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, path); } int diff --git a/bsd/vfs/vfs_fslog.c b/bsd/vfs/vfs_fslog.c index 37ac39c09..4ba18afa4 100644 --- a/bsd/vfs/vfs_fslog.c +++ b/bsd/vfs/vfs_fslog.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include #include @@ -41,7 +41,7 @@ #include #include -#include +#include #include @@ -203,7 +203,7 @@ novel_fpx_event(const uuid_t uuid, uint32_t code, uint32_t xcpt) DPRINTF_FPX_EVENT("reusing", fe); } else { /* add a new element to the list */ - fe = kalloc(sizeof(*fe)); + fe = zalloc_permanent_type(struct fpx_event); } memcpy(fe->fe_uuid, uuid, sizeof(uuid_t)); fe->fe_code = code; diff --git a/bsd/vfs/vfs_init.c b/bsd/vfs/vfs_init.c index d17cb02bd..441d9269f 100644 --- a/bsd/vfs/vfs_init.c +++ b/bsd/vfs/vfs_init.c @@ -82,7 +82,7 @@ #include #include #include -#include +#include #include #if CONFIG_MACF @@ -102,6 +102,8 @@ #define DODEBUG(A) #endif +ZONE_DECLARE(mount_zone, "mount", sizeof(struct mount), ZC_ZFREE_CLEARMEM); + __private_extern__ void vntblinit(void); extern const struct vnodeopv_desc *vfs_opv_descs[]; @@ -162,9 +164,8 @@ vfs_opv_init(void) * Also handle backwards compatibility. */ if (*opv_desc_vector_p == NULL) { - MALLOC(*opv_desc_vector_p, PFIvp*, - vfs_opv_numops * sizeof(PFIvp), M_TEMP, M_WAITOK); - bzero(*opv_desc_vector_p, vfs_opv_numops * sizeof(PFIvp)); + *opv_desc_vector_p = kheap_alloc(KHEAP_DEFAULT, + vfs_opv_numops * sizeof(PFIvp), Z_WAITOK | Z_ZERO); DODEBUG(printf("vector at %x allocated\n", opv_desc_vector_p)); } @@ -448,7 +449,7 @@ vfsinit(void) struct sysctl_oid *oidp = NULL; struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, vfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", ""); - MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK); + oidp = kheap_alloc(KHEAP_DEFAULT, sizeof(struct sysctl_oid), Z_WAITOK); *oidp = oid; /* Memory for VFS oid held by vfsentry forever */ @@ -480,9 +481,7 @@ vfsinit(void) /* * create a mount point for dead vnodes */ - MALLOC_ZONE(mp, struct mount *, sizeof(struct mount), - M_MOUNT, M_WAITOK); - bzero((char *)mp, sizeof(struct mount)); + mp = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO); /* Initialize the default IO constraints */ mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS; mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32; @@ -589,7 +588,7 @@ vfstable_add(struct vfstable *nvfsp) if (nvfsp->vfc_vfsops->vfs_sysctl) { struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, nvfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", ""); - MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK); + oidp = kheap_alloc(KHEAP_DEFAULT, sizeof(struct sysctl_oid), Z_WAITOK); *oidp = oid; } @@ -609,8 +608,8 @@ findslot: if (allocated == NULL) { mount_list_unlock(); /* out of static slots; allocate one instead */ - MALLOC(allocated, struct vfstable *, sizeof(struct vfstable), - M_TEMP, M_WAITOK); + allocated = kheap_alloc(KHEAP_DEFAULT, sizeof(struct vfstable), + Z_WAITOK); goto findslot; } else { slotp = allocated; @@ -651,7 +650,7 @@ findslot: if (allocated && allocated != slotp) { /* did allocation, but ended up using static slot */ - FREE(allocated, M_TEMP); + kheap_free(KHEAP_DEFAULT, allocated, sizeof(struct vfstable)); } return slotp; @@ -697,8 +696,7 @@ vfstable_del(struct vfstable * vtbl) if ((*vcpp)->vfc_sysctl) { sysctl_unregister_oid((*vcpp)->vfc_sysctl); (*vcpp)->vfc_sysctl->oid_name = NULL; - FREE((*vcpp)->vfc_sysctl, M_TEMP); - (*vcpp)->vfc_sysctl = NULL; + kheap_free(KHEAP_DEFAULT, (*vcpp)->vfc_sysctl, sizeof(struct sysctl_oid)); } /* Unlink entry */ @@ -724,7 +722,7 @@ vfstable_del(struct vfstable * vtbl) */ numregistered_fses--; mount_list_unlock(); - FREE(vcdelp, M_TEMP); + kheap_free(KHEAP_DEFAULT, vcdelp, sizeof(struct vfstable)); mount_list_lock(); } diff --git a/bsd/vfs/vfs_lookup.c b/bsd/vfs/vfs_lookup.c index 9aad31d57..6a1700105 100644 --- a/bsd/vfs/vfs_lookup.c +++ b/bsd/vfs/vfs_lookup.c @@ -81,19 +81,19 @@ #include #include #include -#include +#include #include #include #include #include /* For _PC_NAME_MAX */ #include #include -#include +#include #include -#include /* to get the prototype for strstr() in sys/dtrace_glue.h */ #if CONFIG_MACF #include #endif +#include #include @@ -113,7 +113,7 @@ static int vfs_getrealpath(const char * path, char * realpath, size_t bufsize, v #endif static int lookup_traverse_mountpoints(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, int vbusyflags, vfs_context_t ctx); -static int lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx); +static int lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, bool* dp_has_iocount, vfs_context_t ctx); static int lookup_authorize_search(vnode_t dp, struct componentname *cnp, int dp_authorized_in_cache, vfs_context_t ctx); static void lookup_consider_update_cache(vnode_t dvp, vnode_t vp, struct componentname *cnp, int nc_generation); static int lookup_handle_found_vnode(struct nameidata *ndp, struct componentname *cnp, int rdonly, @@ -125,7 +125,7 @@ static int lookup_handle_emptyname(struct nameidata *ndp, struct co static int lookup_handle_rsrc_fork(vnode_t dp, struct nameidata *ndp, struct componentname *cnp, int wantparent, vfs_context_t ctx); #endif - +extern lck_rw_t * rootvnode_rw_lock; /* * Convert a pathname into a pointer to a locked inode. @@ -169,7 +169,7 @@ namei(struct nameidata *ndp) struct vnode *dp; /* the directory we are searching */ struct vnode *usedvp = ndp->ni_dvp; /* store pointer to vp in case we must loop due to * heavy vnode pressure */ - u_long cnpflags = ndp->ni_cnd.cn_flags; /* store in case we have to restore after loop */ + uint32_t cnpflags = ndp->ni_cnd.cn_flags; /* store in case we have to restore after loop */ int error; struct componentname *cnp = &ndp->ni_cnd; vfs_context_t ctx = cnp->cn_context; @@ -183,8 +183,11 @@ namei(struct nameidata *ndp) int volfs_restarts = 0; #endif size_t bytes_copied = 0; - bool take_proc_lock = !(ndp->ni_flag & NAMEI_NOPROCLOCK); - bool proc_lock_taken = false; + vnode_t rootdir_with_usecount = NULLVP; + vnode_t startdir_with_usecount = NULLVP; + vnode_t usedvp_dp = NULLVP; + int32_t old_count = 0; + bool dp_has_iocount = false; fdp = p->p_fd; @@ -256,12 +259,7 @@ retry_copy: cnp->cn_pnlen, &bytes_copied); } if (error == ENAMETOOLONG && !(cnp->cn_flags & HASBUF)) { - MALLOC_ZONE(cnp->cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (cnp->cn_pnbuf == NULL) { - error = ENOMEM; - goto error_out; - } - + cnp->cn_pnbuf = zalloc(ZV_NAMEI); cnp->cn_flags |= HASBUF; cnp->cn_pnlen = MAXPATHLEN; bytes_copied = 0; @@ -271,7 +269,8 @@ retry_copy: if (error) { goto error_out; } - ndp->ni_pathlen = bytes_copied; + assert(bytes_copied <= MAXPATHLEN); + ndp->ni_pathlen = (u_int)bytes_copied; bytes_copied = 0; /* @@ -302,29 +301,30 @@ retry_copy: char * realpath; int realpath_err; /* Attempt to resolve a legacy volfs style pathname. */ - MALLOC_ZONE(realpath, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (realpath) { - /* - * We only error out on the ENAMETOOLONG cases where we know that - * vfs_getrealpath translation succeeded but the path could not fit into - * MAXPATHLEN characters. In other failure cases, we may be dealing with a path - * that legitimately looks like /.vol/1234/567 and is not meant to be translated - */ - if ((realpath_err = vfs_getrealpath(&cnp->cn_pnbuf[6], realpath, MAXPATHLEN, ctx))) { - FREE_ZONE(realpath, MAXPATHLEN, M_NAMEI); - if (realpath_err == ENOSPC || realpath_err == ENAMETOOLONG) { - error = ENAMETOOLONG; - goto error_out; - } - } else { - if (cnp->cn_flags & HASBUF) { - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); - } - cnp->cn_pnbuf = realpath; - cnp->cn_pnlen = MAXPATHLEN; - ndp->ni_pathlen = strlen(realpath) + 1; - cnp->cn_flags |= HASBUF | CN_VOLFSPATH; + realpath = zalloc(ZV_NAMEI); + /* + * We only error out on the ENAMETOOLONG cases where we know that + * vfs_getrealpath translation succeeded but the path could not fit into + * MAXPATHLEN characters. In other failure cases, we may be dealing with a path + * that legitimately looks like /.vol/1234/567 and is not meant to be translated + */ + if ((realpath_err = vfs_getrealpath(&cnp->cn_pnbuf[6], realpath, MAXPATHLEN, ctx))) { + zfree(ZV_NAMEI, realpath); + if (realpath_err == ENOSPC || realpath_err == ENAMETOOLONG) { + error = ENAMETOOLONG; + goto error_out; } + } else { + size_t tmp_len; + if (cnp->cn_flags & HASBUF) { + zfree(ZV_NAMEI, cnp->cn_pnbuf); + } + cnp->cn_pnbuf = realpath; + cnp->cn_pnlen = MAXPATHLEN; + tmp_len = strlen(realpath) + 1; + assert(tmp_len <= UINT_MAX); + ndp->ni_pathlen = (u_int)tmp_len; + cnp->cn_flags |= HASBUF | CN_VOLFSPATH; } } #endif /* CONFIG_VOLFS */ @@ -346,20 +346,18 @@ retry_copy: error = ENOENT; goto error_out; } - ndp->ni_loopcnt = 0; + if (ndp->ni_flag & NAMEI_NOFOLLOW_ANY) { + ndp->ni_loopcnt = MAXSYMLINKS; + } else { + ndp->ni_loopcnt = 0; + } /* * determine the starting point for the translation. - * - * We hold the proc_dirs lock across the lookup so that the - * process rootdir and cwd are stable (i.e. the usecounts - * on them are mainatained for the duration of the lookup) */ - if (take_proc_lock) { - assert(proc_lock_taken == false); - proc_dirs_lock_shared(p); - proc_lock_taken = true; - } + proc_dirs_lock_shared(p); + lck_rw_lock_shared(rootvnode_rw_lock); + if (!(fdp->fd_flags & FD_CHROOT)) { ndp->ni_rootdir = rootvnode; } else { @@ -373,6 +371,8 @@ retry_copy: /* This should be a panic */ printf("fdp->fd_rdir is not set\n"); } + lck_rw_unlock_shared(rootvnode_rw_lock); + proc_dirs_unlock_shared(p); error = ENOENT; goto error_out; } @@ -390,16 +390,59 @@ retry_copy: } else if (cnp->cn_flags & USEDVP) { dp = ndp->ni_dvp; ndp->ni_usedvp = dp; + usedvp_dp = dp; } else { dp = vfs_context_cwd(ctx); } if (dp == NULLVP || (dp->v_lflag & VL_DEAD)) { dp = NULLVP; + lck_rw_unlock_shared(rootvnode_rw_lock); + proc_dirs_unlock_shared(p); error = ENOENT; goto error_out; } + /* + * We need our own usecount on the root vnode and the starting dir across + * the lookup. There's two things that be done here. We can hold the locks + * (which protect the existing usecounts on the directories) across the + * lookup or take our own usecount. Holding the locks across the lookup can + * cause deadlock issues if we re-enter namei on the same thread so the + * correct thing to do is to acquire our own usecount. + * + * Ideally, the usecount should be obtained by vnode_get->vnode_ref->vnode_put. + * However when this vnode is the rootvnode, that sequence will produce a + * lot of vnode mutex locks and unlocks on a single vnode (the rootvnode) + * and will be highly contended and degrade performance. Since we have + * an existing usecount protected by the locks we hold, we'll just use + * an atomic op to increment the usecount on a vnode which already has one + * and can't be released becasue we have the locks which protect against that + * happening. + */ + rootdir_with_usecount = ndp->ni_rootdir; + old_count = os_atomic_inc_orig(&rootdir_with_usecount->v_usecount, relaxed); + if (old_count < 1) { + panic("(1) invalid pre-increment usecount (%d) for rootdir vnode %p", + old_count, rootdir_with_usecount); + } else if (old_count == INT32_MAX) { + panic("(1) usecount overflow for vnode %p", rootdir_with_usecount); + } + + if ((dp != rootdir_with_usecount) && (dp != usedvp_dp)) { + old_count = os_atomic_inc_orig(&dp->v_usecount, relaxed); + if (old_count < 1) { + panic("(2) invalid pre-increment usecount (%d) for vnode %p", old_count, dp); + } else if (old_count == INT32_MAX) { + panic("(2) usecount overflow for vnode %p", dp); + } + startdir_with_usecount = dp; + } + + /* Now that we have our usecount, release the locks */ + lck_rw_unlock_shared(rootvnode_rw_lock); + proc_dirs_unlock_shared(p); + ndp->ni_dvp = NULLVP; ndp->ni_vp = NULLVP; @@ -429,19 +472,51 @@ retry_copy: * Check for symbolic link */ if ((cnp->cn_flags & ISSYMLINK) == 0) { - if (proc_lock_taken) { - proc_dirs_unlock_shared(p); - proc_lock_taken = false; + if (startdir_with_usecount) { + vnode_rele(startdir_with_usecount); + startdir_with_usecount = NULLVP; + } + if (rootdir_with_usecount) { + lck_rw_lock_shared(rootvnode_rw_lock); + if (rootdir_with_usecount == rootvnode) { + old_count = os_atomic_dec_orig(&rootdir_with_usecount->v_usecount, relaxed); + if (old_count < 2) { + /* + * There needs to have been at least 1 usecount left on the rootvnode + */ + panic("(3) Unexpected pre-decrement value (%d) of usecount for rootvnode %p", + old_count, rootdir_with_usecount); + } + rootdir_with_usecount = NULLVP; + } + lck_rw_unlock_shared(rootvnode_rw_lock); + if (rootdir_with_usecount) { + vnode_rele(rootdir_with_usecount); + rootdir_with_usecount = NULLVP; + } } + return 0; } continue_symlink: /* Gives us a new path to process, and a starting dir */ - error = lookup_handle_symlink(ndp, &dp, ctx); + error = lookup_handle_symlink(ndp, &dp, &dp_has_iocount, ctx); if (error != 0) { break; } + if (dp_has_iocount) { + if ((dp != rootdir_with_usecount) && (dp != startdir_with_usecount) && + (dp != usedvp_dp)) { + if (startdir_with_usecount) { + vnode_rele(startdir_with_usecount); + } + vnode_ref_ext(dp, 0, VNODE_REF_FORCE); + startdir_with_usecount = dp; + } + vnode_put(dp); + dp_has_iocount = false; + } } /* * only come here if we fail to handle a SYMLINK... @@ -457,13 +532,32 @@ out_drop: vnode_put(ndp->ni_vp); } error_out: - if (proc_lock_taken) { - proc_dirs_unlock_shared(p); - proc_lock_taken = false; + if (startdir_with_usecount) { + vnode_rele(startdir_with_usecount); + startdir_with_usecount = NULLVP; + } + if (rootdir_with_usecount) { + lck_rw_lock_shared(rootvnode_rw_lock); + if (rootdir_with_usecount == rootvnode) { + old_count = os_atomic_dec_orig(&rootdir_with_usecount->v_usecount, relaxed); + if (old_count < 2) { + /* + * There needs to have been at least 1 usecount left on the rootvnode + */ + panic("(4) Unexpected pre-decrement value (%d) of usecount for rootvnode %p", + old_count, rootdir_with_usecount); + } + lck_rw_unlock_shared(rootvnode_rw_lock); + } else { + lck_rw_unlock_shared(rootvnode_rw_lock); + vnode_rele(rootdir_with_usecount); + } + rootdir_with_usecount = NULLVP; } + if ((cnp->cn_flags & HASBUF)) { cnp->cn_flags &= ~HASBUF; - FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + zfree(ZV_NAMEI, cnp->cn_pnbuf); } cnp->cn_pnbuf = NULL; ndp->ni_vp = NULLVP; @@ -1358,7 +1452,7 @@ lookup_traverse_union(vnode_t dvp, vnode_t *new_dvp, vfs_context_t ctx) { char *path = NULL, *pp; const char *name, *np; - int len; + size_t len; int error = 0; struct nameidata nd; vnode_t vp = dvp; @@ -1373,7 +1467,7 @@ lookup_traverse_union(vnode_t dvp, vnode_t *new_dvp, vfs_context_t ctx) return 0; } - path = (char *) kalloc(MAXPATHLEN); + path = (char *) zalloc(ZV_NAMEI); if (path == NULL) { error = ENOMEM; goto done; @@ -1395,7 +1489,7 @@ lookup_traverse_union(vnode_t dvp, vnode_t *new_dvp, vfs_context_t ctx) goto done; } len = strlen(name); - if ((len + 1) > (pp - path)) { // Enough space for this name ? + if ((len + 1) > (size_t)(pp - path)) { // Enough space for this name ? error = ENAMETOOLONG; vnode_putname(name); goto done; @@ -1421,7 +1515,7 @@ lookup_traverse_union(vnode_t dvp, vnode_t *new_dvp, vfs_context_t ctx) nameidone(&nd); done: if (path) { - kfree(path, MAXPATHLEN); + zfree(ZV_NAMEI, path); } return error; } @@ -1568,7 +1662,7 @@ out: * at which to start a lookup with a resolved path, and all other iocounts dropped. */ static int -lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) +lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, bool *new_dp_has_iocount, vfs_context_t ctx) { int error; char *cp; /* pointer into pathname argument */ @@ -1587,6 +1681,7 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) vnode_t dp; char *tmppn; u_int rsrclen = (cnp->cn_flags & CN_WANTSRSRCFORK) ? sizeof(_PATH_RSRCFORKSPEC) : 0; + bool dp_has_iocount = false; if (ndp->ni_loopcnt++ >= MAXSYMLINKS) { return ELOOP; @@ -1603,10 +1698,7 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) } if (need_newpathbuf) { - MALLOC_ZONE(cp, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (cp == NULL) { - return ENOMEM; - } + cp = zalloc(ZV_NAMEI); } else { cp = cnp->cn_pnbuf; } @@ -1618,7 +1710,7 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) error = VNOP_READLINK(ndp->ni_vp, auio, ctx); if (error) { if (need_newpathbuf) { - FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, cp); } return error; } @@ -1631,21 +1723,19 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) linklen = MAXPATHLEN - (u_int)uio_resid(auio); if (linklen + ndp->ni_pathlen + rsrclen > MAXPATHLEN) { if (need_newpathbuf) { - FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, cp); } return ENAMETOOLONG; } if (need_newpathbuf) { - long len = cnp->cn_pnlen; - tmppn = cnp->cn_pnbuf; bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); cnp->cn_pnbuf = cp; cnp->cn_pnlen = MAXPATHLEN; if ((cnp->cn_flags & HASBUF)) { - FREE_ZONE(tmppn, len, M_NAMEI); + zfree(ZV_NAMEI, tmppn); } else { cnp->cn_flags |= HASBUF; } @@ -1663,11 +1753,10 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) dp = ndp->ni_dvp; /* - * get rid of references returned via 'lookup' + * get rid of reference returned via 'lookup' + * ni_dvp is released only if we restart at /. */ vnode_put(ndp->ni_vp); - vnode_put(ndp->ni_dvp); /* ALWAYS have a dvp for a symlink */ - ndp->ni_vp = NULLVP; ndp->ni_dvp = NULLVP; @@ -1675,6 +1764,7 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) * Check if symbolic link restarts us at the root */ if (*(cnp->cn_nameptr) == '/') { + vnode_put(dp); /* ALWAYS have a dvp for a symlink */ while (*(cnp->cn_nameptr) == '/') { cnp->cn_nameptr++; ndp->ni_pathlen--; @@ -1682,9 +1772,12 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) if ((dp = ndp->ni_rootdir) == NULLVP) { return ENOENT; } + } else { + dp_has_iocount = true; } *new_dp = dp; + *new_dp_has_iocount = dp_has_iocount; return 0; } @@ -1815,124 +1908,109 @@ nameidone(struct nameidata *ndp) ndp->ni_cnd.cn_pnbuf = NULL; ndp->ni_cnd.cn_flags &= ~HASBUF; - FREE_ZONE(tmp, ndp->ni_cnd.cn_pnlen, M_NAMEI); + zfree(ZV_NAMEI, tmp); } } /* - * Log (part of) a pathname using the KERNEL_DEBUG_CONSTANT mechanism, as used - * by fs_usage. The path up to and including the current component name are - * logged. Up to NUMPARMS*4 bytes of pathname will be logged. If the path - * to be logged is longer than that, then the last NUMPARMS*4 bytes are logged. - * That is, the truncation removes the leading portion of the path. - * - * The logging is done via multiple KERNEL_DEBUG_CONSTANT calls. The first one - * is marked with DBG_FUNC_START. The last one is marked with DBG_FUNC_END - * (in addition to DBG_FUNC_START if it is also the first). There may be - * intermediate ones with neither DBG_FUNC_START nor DBG_FUNC_END. + * Log (part of) a pathname using kdebug, as used by fs_usage. The path up to + * and including the current component name are logged. Up to NUMPARMS * 4 + * bytes of pathname will be logged. If the path to be logged is longer than + * that, then the last NUMPARMS * 4 bytes are logged. That is, the truncation + * removes the leading portion of the path. * - * The first KERNEL_DEBUG_CONSTANT passes the vnode pointer and 12 bytes of - * pathname. The remaining KERNEL_DEBUG_CONSTANT calls add 16 bytes of pathname - * each. The minimum number of KERNEL_DEBUG_CONSTANT calls required to pass - * the path are used. Any excess padding in the final KERNEL_DEBUG_CONSTANT - * (because not all of the 12 or 16 bytes are needed for the remainder of the - * path) is set to zero bytes, or '>' if there is more path beyond the - * current component name (usually because an intermediate component was not - * found). + * The logging is done via multiple KDBG_RELEASE calls. The first one is marked + * with DBG_FUNC_START. The last one is marked with DBG_FUNC_END (in addition + * to DBG_FUNC_START if it is also the first). There may be intermediate ones + * with neither DBG_FUNC_START nor DBG_FUNC_END. * - * NOTE: If the path length is greater than NUMPARMS*4, or is not of the form - * 12+N*16, there will be no padding. + * The first event passes the vnode pointer and 24 or 32 (on K32, 12 or 24) + * bytes of pathname. The remaining events add 32 (on K32, 16) bytes of + * pathname each. The minimum number of events required to pass the path are + * used. Any excess padding in the final event (because not all of the 24 or 32 + * (on K32, 12 or 16) bytes are needed for the remainder of the path) is set to + * zero bytes, or '>' if there is more path beyond the current component name + * (usually because an intermediate component was not found). * - * TODO: If there is more path beyond the current component name, should we - * force some padding? For example, a lookup for /foo_bar_baz/spam that - * fails because /foo_bar_baz is not found will only log "/foo_bar_baz", with - * no '>' padding. But /foo_bar/spam would log "/foo_bar>>>>". + * NOTE: If the path length is greater than NUMPARMS * 4, or is not of the form + * 24 + N * 32 (or on K32, 12 + N * 16), there will be no padding. */ #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) void -kdebug_vfs_lookup(long *dbg_parms, int dbg_namelen, void *dp, uint32_t flags) +kdebug_vfs_lookup(unsigned long *path_words, int path_len, void *vnp, + uint32_t flags) { - int code; - unsigned int i; - bool lookup = flags & KDBG_VFS_LOOKUP_FLAG_LOOKUP; bool noprocfilt = flags & KDBG_VFS_LOOKUP_FLAG_NOPROCFILT; - /* - * In the event that we collect multiple, consecutive pathname - * entries, we must mark the start of the path's string and the end. - */ - if (lookup) { - code = VFS_LOOKUP | DBG_FUNC_START; - } else { - code = VFS_LOOKUP_DONE | DBG_FUNC_START; - } + assert(path_len >= 0); - if (dbg_namelen <= (int)(3 * sizeof(long))) { + int code = ((flags & KDBG_VFS_LOOKUP_FLAG_LOOKUP) ? VFS_LOOKUP : + VFS_LOOKUP_DONE) | DBG_FUNC_START; + + if (path_len <= (3 * (int)sizeof(long))) { code |= DBG_FUNC_END; } if (noprocfilt) { - KDBG_RELEASE_NOPROCFILT(code, kdebug_vnode(dp), dbg_parms[0], - dbg_parms[1], dbg_parms[2]); + KDBG_RELEASE_NOPROCFILT(code, kdebug_vnode(vnp), path_words[0], + path_words[1], path_words[2]); } else { - KDBG_RELEASE(code, kdebug_vnode(dp), dbg_parms[0], dbg_parms[1], - dbg_parms[2]); + KDBG_RELEASE(code, kdebug_vnode(vnp), path_words[0], path_words[1], + path_words[2]); } code &= ~DBG_FUNC_START; - for (i = 3, dbg_namelen -= (3 * sizeof(long)); dbg_namelen > 0; i += 4, dbg_namelen -= (4 * sizeof(long))) { - if (dbg_namelen <= (int)(4 * sizeof(long))) { + for (int i = 3; i * (int)sizeof(long) < path_len; i += 4) { + if ((i + 4) * (int)sizeof(long) >= path_len) { code |= DBG_FUNC_END; } if (noprocfilt) { - KDBG_RELEASE_NOPROCFILT(code, dbg_parms[i], dbg_parms[i + 1], - dbg_parms[i + 2], dbg_parms[i + 3]); + KDBG_RELEASE_NOPROCFILT(code, path_words[i], path_words[i + 1], + path_words[i + 2], path_words[i + 3]); } else { - KDBG_RELEASE(code, dbg_parms[i], dbg_parms[i + 1], dbg_parms[i + 2], - dbg_parms[i + 3]); + KDBG_RELEASE(code, path_words[i], path_words[i + 1], + path_words[i + 2], path_words[i + 3]); } } } void -kdebug_lookup_gen_events(long *dbg_parms, int dbg_namelen, void *dp, - bool lookup) +kdebug_lookup_gen_events(long *path_words, int path_len, void *vnp, bool lookup) { - kdebug_vfs_lookup(dbg_parms, dbg_namelen, dp, + assert(path_len >= 0); + kdebug_vfs_lookup((unsigned long *)path_words, path_len, vnp, lookup ? KDBG_VFS_LOOKUP_FLAG_LOOKUP : 0); } void -kdebug_lookup(vnode_t dp, struct componentname *cnp) +kdebug_lookup(vnode_t vnp, struct componentname *cnp) { - int dbg_namelen; - char *dbg_nameptr; - long dbg_parms[NUMPARMS]; - - /* Collect the pathname for tracing */ - dbg_namelen = (cnp->cn_nameptr - cnp->cn_pnbuf) + cnp->cn_namelen; - dbg_nameptr = cnp->cn_nameptr + cnp->cn_namelen; + unsigned long path_words[NUMPARMS]; - if (dbg_namelen > (int)sizeof(dbg_parms)) { - dbg_namelen = sizeof(dbg_parms); - } - dbg_nameptr -= dbg_namelen; + /* + * Truncate the leading portion of the path to fit in path_words. + */ + char *path_end = cnp->cn_nameptr + cnp->cn_namelen; + size_t path_len = MIN(path_end - cnp->cn_pnbuf, + (ssize_t)sizeof(path_words)); + assert(path_len >= 0); + char *path_trunc = path_end - path_len; - /* Copy the (possibly truncated) path itself */ - memcpy(dbg_parms, dbg_nameptr, dbg_namelen); + memcpy(path_words, path_trunc, path_len); - /* Pad with '\0' or '>' */ - if (dbg_namelen < (int)sizeof(dbg_parms)) { - memset((char *)dbg_parms + dbg_namelen, - *(cnp->cn_nameptr + cnp->cn_namelen) ? '>' : 0, - sizeof(dbg_parms) - dbg_namelen); + /* + * Pad with '\0' or '>'. + */ + if (path_len < (ssize_t)sizeof(path_words)) { + bool complete_str = *(cnp->cn_nameptr + cnp->cn_namelen) == '\0'; + memset((char *)path_words + path_len, complete_str ? '\0' : '>', + sizeof(path_words) - path_len); } - kdebug_vfs_lookup(dbg_parms, dbg_namelen, (void *)dp, - KDBG_VFS_LOOKUP_FLAG_LOOKUP); + kdebug_vfs_lookup(path_words, (int)path_len, vnp, KDBG_VFS_LOOKUP_FLAG_LOOKUP); } #else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) */ @@ -1994,7 +2072,7 @@ vfs_getrealpath(const char * path, char * realpath, size_t bufsize, vfs_context_ struct mount *mp = NULL; char *str; char ch; - uint32_t id; + unsigned long id; ino64_t ino; int error; int length; @@ -2009,7 +2087,10 @@ vfs_getrealpath(const char * path, char * realpath, size_t bufsize, vfs_context_ } ch = *str; - mp = mount_lookupby_volfsid(id, 1); + if (id > INT_MAX) { + return ENOENT; + } + mp = mount_lookupby_volfsid((int)id, 1); if (mp == NULL) { return EINVAL; /* unexpected failure */ } @@ -2052,11 +2133,11 @@ vfs_getrealpath(const char * path, char * realpath, size_t bufsize, vfs_context_ realpath[0] = '\0'; /* Get the absolute path to this vnode. */ - error = build_path(vp, realpath, bufsize, &length, 0, ctx); + error = build_path(vp, realpath, (int)bufsize, &length, 0, ctx); vnode_put(vp); if (error == 0 && *str != '\0') { - int attempt = strlcat(realpath, str, MAXPATHLEN); + size_t attempt = strlcat(realpath, str, MAXPATHLEN); if (attempt > MAXPATHLEN) { error = ENAMETOOLONG; } diff --git a/bsd/vfs/vfs_quota.c b/bsd/vfs/vfs_quota.c index 63c4afcb5..b58c75e27 100644 --- a/bsd/vfs/vfs_quota.c +++ b/bsd/vfs/vfs_quota.c @@ -67,7 +67,7 @@ #include #include #include -#include +#include #include #include #include @@ -119,6 +119,8 @@ TAILQ_HEAD(dqfreelist, dquot) dqfreelist; */ TAILQ_HEAD(dqdirtylist, dquot) dqdirtylist; +ZONE_VIEW_DEFINE(ZV_DQUOT, "FS quota entries", KHEAP_ID_DEFAULT, + sizeof(struct dquot)); static int dqlookup(struct quotafile *, u_int32_t, struct dqblk *, u_int32_t *); static int dqsync_locked(struct dquot *dq); @@ -595,7 +597,7 @@ relookup: * but we found the dq we were looking for in * the cache the 2nd time through so free it */ - _FREE(ndq, M_DQUOT); + zfree(ZV_DQUOT, ndq); } *dqp = dq; @@ -620,12 +622,12 @@ relookup: } else if (numdquot < desireddquot) { if (ndq == NULL) { /* - * drop the quota list lock since MALLOC may block + * drop the quota list lock since zalloc may block */ dq_list_unlock(); - ndq = (struct dquot *)_MALLOC(sizeof *dq, M_DQUOT, M_WAITOK); - bzero((char *)ndq, sizeof *dq); + ndq = (struct dquot *)zalloc_flags(ZV_DQUOT, + Z_WAITOK | Z_ZERO); listlockval = dq_list_lock(); /* @@ -655,7 +657,7 @@ relookup: * but we're now at the limit of our cache size * so free it */ - _FREE(ndq, M_DQUOT); + zfree(ZV_DQUOT, ndq); } tablefull("dquot"); *dqp = NODQUOT; @@ -738,7 +740,7 @@ relookup: * but we didn't need it, so free it after * we've droped the quota list lock */ - _FREE(ndq, M_DQUOT); + zfree(ZV_DQUOT, ndq); } error = dqlookup(qfp, id, &dq->dq_dqb, &dq->dq_index); diff --git a/bsd/vfs/vfs_subr.c b/bsd/vfs/vfs_subr.c index 08459f529..4ae72ee3d 100644 --- a/bsd/vfs/vfs_subr.c +++ b/bsd/vfs/vfs_subr.c @@ -90,7 +90,7 @@ #include #include #include -#include +#include #include #include #include @@ -107,6 +107,7 @@ #include #include #include +#include #include #include @@ -128,7 +129,8 @@ #include /* kalloc()/kfree() */ #include /* delay_for_interval() */ #include /* OSAddAtomic() */ -#if !CONFIG_EMBEDDED +#include +#if defined(XNU_TARGET_OS_OSX) #include #endif @@ -155,6 +157,12 @@ extern lck_attr_t *trigger_vnode_lck_attr; extern lck_mtx_t * mnt_list_mtx_lock; +ZONE_DECLARE(specinfo_zone, "specinfo", + sizeof(struct specinfo), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM); + +ZONE_DECLARE(vnode_zone, "vnodes", + sizeof(struct vnode), ZC_NOENCRYPT | ZC_NOGC | ZC_ZFREE_CLEARMEM); + enum vtype iftovt_tab[16] = { VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, @@ -192,8 +200,6 @@ __private_extern__ void vntblinit(void); __private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int); -extern int system_inshutdown; - static void vnode_list_add(vnode_t); static void vnode_async_list_add(vnode_t); static void vnode_list_remove(vnode_t); @@ -209,7 +215,6 @@ static void vnode_dropiocount(vnode_t); static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev); static int vnode_reload(vnode_t); -static int vnode_isinuse_locked(vnode_t, int, int); static int unmount_callback(mount_t, __unused void *); @@ -265,8 +270,16 @@ static int vfs_unmountall_started = 0; * builds standalone. */ #define PLATFORM_DATA_VOLUME_MOUNT_POINT "/System/Volumes/Data" -#define PLATFORM_VM_VOLUME_MOUNT_POINT "/private/var/vm" +/* + * These could be in PlatformSupport but aren't yet + */ +#define PLATFORM_PREBOOT_VOLUME_MOUNT_POINT "/System/Volumes/Preboot" +#define PLATFORM_RECOVERY_VOLUME_MOUNT_POINT "/System/Volumes/Recovery" + +#if CONFIG_MOUNT_VM +#define PLATFORM_VM_VOLUME_MOUNT_POINT "/System/Volumes/VM" +#endif struct mntlist mountlist; /* mounted filesystem list */ static int nummounts = 0; @@ -371,6 +384,10 @@ vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, int error = 0; struct timespec ts; + if (output_target < 0) { + return EINVAL; + } + KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0); if (vp->v_numoutput > output_target) { @@ -616,7 +633,7 @@ vnode_iterate_clear(mount_t mp) mp->mnt_lflag &= ~MNT_LITER; } -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) #include @@ -650,7 +667,7 @@ vnode_iterate_panic_hook(panic_hook_t *hook_) } panic_dump_mem((void *)(((vm_offset_t)hook->mp - 4096) & ~4095), 12288); } -#endif //CONFIG_EMBEDDED +#endif /* defined(__x86_64__) */ int vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *), @@ -685,7 +702,7 @@ vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *), return ret; } -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) struct vnode_iterate_panic_hook hook; hook.mp = mp; hook.vp = NULL; @@ -694,7 +711,7 @@ vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *), /* iterate over all the vnodes */ while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) { vp = TAILQ_FIRST(&mp->mnt_workerqueue); -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) hook.vp = vp; #endif TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes); @@ -747,7 +764,7 @@ vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *), } out: -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) panic_unhook(&hook.hook); #endif (void)vnode_iterate_reloadq(mp); @@ -1005,7 +1022,7 @@ vfs_rootmountfailed(mount_t mp) mac_mount_label_destroy(mp); #endif - FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); + zfree(mount_zone, mp); } /* @@ -1019,9 +1036,7 @@ vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname) { mount_t mp; - mp = _MALLOC_ZONE(sizeof(struct mount), M_MOUNT, M_WAITOK); - bzero((char *)mp, sizeof(struct mount)); - + mp = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO); /* Initialize the default IO constraints */ mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS; mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32; @@ -1194,7 +1209,7 @@ vfs_mountroot(void) mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT; } -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) uint32_t speed; if (MNTK_VIRTUALDEV & mp->mnt_kern_flag) { @@ -1205,7 +1220,7 @@ vfs_mountroot(void) speed = 256; } vc_progress_setdiskspeed(speed); -#endif +#endif /* XNU_TARGET_OS_OSX */ /* * Probe root file system for additional features. */ @@ -1286,69 +1301,510 @@ fail: return ENODEV; } +static int +cache_purge_callback(mount_t mp, __unused void * arg) +{ + cache_purgevfs(mp); + return VFS_RETURNED; +} + +extern lck_rw_t * rootvnode_rw_lock; +extern void set_rootvnode(vnode_t); + + +static int +mntonname_fixup_callback(mount_t mp, __unused void *arg) +{ + int error = 0; + + if ((strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/", sizeof("/")) == 0) || + (strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/dev", sizeof("/dev")) == 0)) { + return 0; + } + + if ((error = vfs_busy(mp, LK_NOWAIT))) { + printf("vfs_busy failed with %d for %s\n", error, mp->mnt_vfsstat.f_mntonname); + return -1; + } + + int pathlen = MAXPATHLEN; + if ((error = vn_getpath_ext(mp->mnt_vnodecovered, NULL, mp->mnt_vfsstat.f_mntonname, &pathlen, VN_GETPATH_FSENTER))) { + printf("vn_getpath_ext failed with %d for mnt_vnodecovered of %s\n", error, mp->mnt_vfsstat.f_mntonname); + } + + vfs_unbusy(mp); + + return error; +} + +static int +clear_mntk_backs_root_callback(mount_t mp, __unused void *arg) +{ + lck_rw_lock_exclusive(&mp->mnt_rwlock); + mp->mnt_kern_flag &= ~MNTK_BACKS_ROOT; + lck_rw_done(&mp->mnt_rwlock); + return VFS_RETURNED; +} + +static int +verify_incoming_rootfs(vnode_t *incoming_rootvnodep, vfs_context_t ctx, + vfs_switch_root_flags_t flags) +{ + mount_t mp; + vnode_t tdp; + vnode_t incoming_rootvnode_with_iocount = *incoming_rootvnodep; + vnode_t incoming_rootvnode_with_usecount = NULLVP; + int error = 0; + + if (vnode_vtype(incoming_rootvnode_with_iocount) != VDIR) { + printf("Incoming rootfs path not a directory\n"); + error = ENOTDIR; + goto done; + } + + /* + * Before we call VFS_ROOT, we have to let go of the iocount already + * acquired, but before doing that get a usecount. + */ + vnode_ref_ext(incoming_rootvnode_with_iocount, 0, VNODE_REF_FORCE); + incoming_rootvnode_with_usecount = incoming_rootvnode_with_iocount; + vnode_lock_spin(incoming_rootvnode_with_usecount); + if ((mp = incoming_rootvnode_with_usecount->v_mount)) { + mp->mnt_crossref++; + vnode_unlock(incoming_rootvnode_with_usecount); + } else { + vnode_unlock(incoming_rootvnode_with_usecount); + printf("Incoming rootfs root vnode does not have associated mount\n"); + error = ENOTDIR; + goto done; + } + + if (vfs_busy(mp, LK_NOWAIT)) { + printf("Incoming rootfs root vnode mount is busy\n"); + error = ENOENT; + goto out; + } + + vnode_put(incoming_rootvnode_with_iocount); + incoming_rootvnode_with_iocount = NULLVP; + + error = VFS_ROOT(mp, &tdp, ctx); + + if (error) { + printf("Could not get rootvnode of incoming rootfs\n"); + } else if (tdp != incoming_rootvnode_with_usecount) { + vnode_put(tdp); + tdp = NULLVP; + printf("Incoming rootfs root vnode mount is is not a mountpoint\n"); + error = EINVAL; + goto out_busy; + } else { + incoming_rootvnode_with_iocount = tdp; + tdp = NULLVP; + } + + if ((flags & VFSSR_VIRTUALDEV_PROHIBITED) != 0) { + lck_rw_lock_shared(&mp->mnt_rwlock); + if (mp->mnt_flag & MNTK_VIRTUALDEV) { + error = ENODEV; + } + lck_rw_done(&mp->mnt_rwlock); + if (error) { + printf("Incoming rootfs is backed by a virtual device; cannot switch to it"); + goto out_busy; + } + } + +out_busy: + vfs_unbusy(mp); + +out: + vnode_lock(incoming_rootvnode_with_usecount); + mp->mnt_crossref--; + if (mp->mnt_crossref < 0) { + panic("mount cross refs -ve"); + } + vnode_unlock(incoming_rootvnode_with_usecount); + +done: + if (incoming_rootvnode_with_usecount) { + vnode_rele(incoming_rootvnode_with_usecount); + incoming_rootvnode_with_usecount = NULLVP; + } + + if (error && incoming_rootvnode_with_iocount) { + vnode_put(incoming_rootvnode_with_iocount); + incoming_rootvnode_with_iocount = NULLVP; + } + + *incoming_rootvnodep = incoming_rootvnode_with_iocount; + return error; +} + /* - * Mount the data volume of an ROSV volume group + * vfs_switch_root() + * + * Move the current root volume, and put a different volume at the root. + * + * incoming_vol_old_path: This is the path where the incoming root volume + * is mounted when this function begins. + * outgoing_vol_new_path: This is the path where the outgoing root volume + * will be mounted when this function (successfully) ends. + * Note: Do not use a leading slash. + * + * Volumes mounted at several fixed points (including /dev) will be preserved + * at the same absolute path. That means they will move within the folder + * hierarchy during the pivot operation. For example, /dev before the pivot + * will be at /dev after the pivot. + * + * If any filesystem has MNTK_BACKS_ROOT set, it will be cleared. If the + * incoming root volume is actually a disk image backed by some other + * filesystem, it is the caller's responsibility to re-set MNTK_BACKS_ROOT + * as appropriate. */ int -vfs_mount_rosv_data(void) +vfs_switch_root(const char *incoming_vol_old_path, + const char *outgoing_vol_new_path, + vfs_switch_root_flags_t flags) { -#if CONFIG_ROSV_STARTUP - int error = 0; - int do_rosv_mounts = 0; + // grumble grumble +#define countof(x) (sizeof(x) / sizeof(x[0])) - error = vnode_get(rootvnode); + struct preserved_mount { + vnode_t pm_rootvnode; + mount_t pm_mount; + vnode_t pm_new_covered_vp; + vnode_t pm_old_covered_vp; + const char *pm_path; + }; + + vfs_context_t ctx = vfs_context_kernel(); + vnode_t incoming_rootvnode = NULLVP; + vnode_t outgoing_vol_new_covered_vp = NULLVP; + vnode_t incoming_vol_old_covered_vp = NULLVP; + mount_t outgoing = NULL; + mount_t incoming = NULL; + + struct preserved_mount devfs = { NULLVP, NULL, NULLVP, NULLVP, "dev" }; + struct preserved_mount preboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Preboot" }; + struct preserved_mount recovery = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Recovery" }; + struct preserved_mount vm = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/VM" }; + struct preserved_mount update = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Update" }; + struct preserved_mount iscPreboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/iSCPreboot" }; + struct preserved_mount hardware = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Hardware" }; + struct preserved_mount xarts = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/xarts" }; + struct preserved_mount factorylogs = { NULLVP, NULL, NULLVP, NULLVP, "FactoryLogs" }; + struct preserved_mount idiags = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Diags" }; + + struct preserved_mount *preserved[10]; + preserved[0] = &devfs; + preserved[1] = &preboot; + preserved[2] = &recovery; + preserved[3] = &vm; + preserved[4] = &update; + preserved[5] = &iscPreboot; + preserved[6] = &hardware; + preserved[7] = &xarts; + preserved[8] = &factorylogs; + preserved[9] = &idiags; + + int error; + + printf("%s : shuffling mount points : %s <-> / <-> %s\n", __FUNCTION__, incoming_vol_old_path, outgoing_vol_new_path); + + if (outgoing_vol_new_path[0] == '/') { + // I should have written this to be more helpful and just advance the pointer forward past the slash + printf("Do not use a leading slash in outgoing_vol_new_path\n"); + return EINVAL; + } + + // Set incoming_rootvnode. + // Find the vnode representing the mountpoint of the new root + // filesystem. That will be the new root directory. + error = vnode_lookup(incoming_vol_old_path, 0, &incoming_rootvnode, ctx); if (error) { - /* root must be mounted first */ - printf("vnode_get(rootvnode) failed with error %d\n", error); - return error; + printf("Incoming rootfs root vnode not found\n"); + error = ENOENT; + goto done; } - printf("NOTE: Attempting ROSV mount\n"); - struct vfs_attr vfsattr; - VFSATTR_INIT(&vfsattr); - VFSATTR_WANTED(&vfsattr, f_capabilities); - if (vfs_getattr(rootvnode->v_mount, &vfsattr, vfs_context_kernel()) == 0 && - VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) { - if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_VOL_GROUPS) && - (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_VOL_GROUPS)) { - printf("NOTE: DETECTED ROSV CONFIG\n"); - do_rosv_mounts = 1; + /* + * This function drops the icoount and sets the vnode to NULL on error. + */ + error = verify_incoming_rootfs(&incoming_rootvnode, ctx, flags); + if (error) { + goto done; + } + + /* + * Set outgoing_vol_new_covered_vp. + * Find the vnode representing the future mountpoint of the old + * root filesystem, inside the directory incoming_rootvnode. + * Right now it's at "/incoming_vol_old_path/outgoing_vol_new_path". + * soon it will become "/oldrootfs_path_after", which will be covered. + */ + error = vnode_lookupat(outgoing_vol_new_path, 0, &outgoing_vol_new_covered_vp, ctx, incoming_rootvnode); + if (error) { + printf("Outgoing rootfs path not found, abandoning / switch, error = %d\n", error); + error = ENOENT; + goto done; + } + if (vnode_vtype(outgoing_vol_new_covered_vp) != VDIR) { + printf("Outgoing rootfs path is not a directory, abandoning / switch\n"); + error = ENOTDIR; + goto done; + } + + /* + * Find the preserved mounts - see if they are mounted. Get their root + * vnode if they are. If they aren't, leave rootvnode NULL which will + * be the signal to ignore this mount later on. + * + * Also get preserved mounts' new_covered_vp. + * Find the node representing the folder "dev" inside the directory newrootvnode. + * Right now it's at "/incoming_vol_old_path/dev". + * Soon it will become /dev, which will be covered by the devfs mountpoint. + */ + for (size_t i = 0; i < countof(preserved); i++) { + struct preserved_mount *pmi = preserved[i]; + + error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_rootvnode, ctx, rootvnode); + if (error) { + printf("skipping preserved mountpoint because not found or error: %d: %s\n", error, pmi->pm_path); + // not fatal. try the next one in the list. + continue; + } + bool is_mountpoint = false; + vnode_lock_spin(pmi->pm_rootvnode); + if ((pmi->pm_rootvnode->v_flag & VROOT) != 0) { + is_mountpoint = true; + } + vnode_unlock(pmi->pm_rootvnode); + if (!is_mountpoint) { + printf("skipping preserved mountpoint because not a mountpoint: %s\n", pmi->pm_path); + vnode_put(pmi->pm_rootvnode); + pmi->pm_rootvnode = NULLVP; + // not fatal. try the next one in the list. + continue; + } + + error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_new_covered_vp, ctx, incoming_rootvnode); + if (error) { + printf("preserved new mount directory not found or error: %d: %s\n", error, pmi->pm_path); + error = ENOENT; + goto done; + } + if (vnode_vtype(pmi->pm_new_covered_vp) != VDIR) { + printf("preserved new mount directory not directory: %s\n", pmi->pm_path); + error = ENOTDIR; + goto done; } + + printf("will preserve mountpoint across pivot: /%s\n", pmi->pm_path); } - if (!do_rosv_mounts) { - vnode_put(rootvnode); - //bail out if config not supported - return 0; + /* + * -- + * At this point, everything has been prepared and all error conditions + * have been checked. We check everything we can before this point; + * from now on we start making destructive changes, and we can't stop + * until we reach the end. + * ---- + */ + + /* this usecount is transferred to the mnt_vnodecovered */ + vnode_ref_ext(outgoing_vol_new_covered_vp, 0, VNODE_REF_FORCE); + /* this usecount is transferred to set_rootvnode */ + vnode_ref_ext(incoming_rootvnode, 0, VNODE_REF_FORCE); + + + for (size_t i = 0; i < countof(preserved); i++) { + struct preserved_mount *pmi = preserved[i]; + if (pmi->pm_rootvnode == NULLVP) { + continue; + } + + /* this usecount is transferred to the mnt_vnodecovered */ + vnode_ref_ext(pmi->pm_new_covered_vp, 0, VNODE_REF_FORCE); + + /* The new_covered_vp is a mountpoint from now on. */ + vnode_lock_spin(pmi->pm_new_covered_vp); + pmi->pm_new_covered_vp->v_flag |= VMOUNT; + vnode_unlock(pmi->pm_new_covered_vp); } - char datapath[] = PLATFORM_DATA_VOLUME_MOUNT_POINT; /* !const because of internal casting */ + /* The outgoing_vol_new_covered_vp is a mountpoint from now on. */ + vnode_lock_spin(outgoing_vol_new_covered_vp); + outgoing_vol_new_covered_vp->v_flag |= VMOUNT; + vnode_unlock(outgoing_vol_new_covered_vp); - /* Mount the data volume */ - printf("attempting kernel mount for data volume... \n"); - error = kernel_mount(rootvnode->v_mount->mnt_vfsstat.f_fstypename, NULLVP, NULLVP, - datapath, (rootvnode->v_mount), 0, 0, (KERNEL_MOUNT_DATAVOL), vfs_context_kernel()); - if (error) { - printf("Failed to mount data volume (%d)\n", error); + /* + * Identify the mount_ts of the mounted filesystems that are being + * manipulated: outgoing rootfs, incoming rootfs, and the preserved + * mounts. + */ + outgoing = rootvnode->v_mount; + incoming = incoming_rootvnode->v_mount; + for (size_t i = 0; i < countof(preserved); i++) { + struct preserved_mount *pmi = preserved[i]; + if (pmi->pm_rootvnode == NULLVP) { + continue; + } + + pmi->pm_mount = pmi->pm_rootvnode->v_mount; } - vnode_put(rootvnode); + lck_rw_lock_exclusive(rootvnode_rw_lock); - return error; + /* Setup incoming as the new rootfs */ + lck_rw_lock_exclusive(&incoming->mnt_rwlock); + incoming_vol_old_covered_vp = incoming->mnt_vnodecovered; + incoming->mnt_vnodecovered = NULLVP; + strlcpy(incoming->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN); + incoming->mnt_flag |= MNT_ROOTFS; + lck_rw_done(&incoming->mnt_rwlock); -#else - return 0; -#endif + /* + * The preserved mountpoints will now be moved to + * incoming_rootnode/pm_path, and then by the end of the function, + * since incoming_rootnode is going to /, the preserved mounts + * will be end up back at /pm_path + */ + for (size_t i = 0; i < countof(preserved); i++) { + struct preserved_mount *pmi = preserved[i]; + if (pmi->pm_rootvnode == NULLVP) { + continue; + } + + lck_rw_lock_exclusive(&pmi->pm_mount->mnt_rwlock); + pmi->pm_old_covered_vp = pmi->pm_mount->mnt_vnodecovered; + pmi->pm_mount->mnt_vnodecovered = pmi->pm_new_covered_vp; + vnode_lock_spin(pmi->pm_new_covered_vp); + pmi->pm_new_covered_vp->v_mountedhere = pmi->pm_mount; + vnode_unlock(pmi->pm_new_covered_vp); + lck_rw_done(&pmi->pm_mount->mnt_rwlock); + } + + /* + * The old root volume now covers outgoing_vol_new_covered_vp + * on the new root volume. Remove the ROOTFS marker. + * Now it is to be found at outgoing_vol_new_path + */ + lck_rw_lock_exclusive(&outgoing->mnt_rwlock); + outgoing->mnt_vnodecovered = outgoing_vol_new_covered_vp; + strlcpy(outgoing->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN); + strlcat(outgoing->mnt_vfsstat.f_mntonname, outgoing_vol_new_path, MAXPATHLEN); + outgoing->mnt_flag &= ~MNT_ROOTFS; + vnode_lock_spin(outgoing_vol_new_covered_vp); + outgoing_vol_new_covered_vp->v_mountedhere = outgoing; + vnode_unlock(outgoing_vol_new_covered_vp); + lck_rw_done(&outgoing->mnt_rwlock); + + /* + * Finally, remove the mount_t linkage from the previously covered + * vnodes on the old root volume. These were incoming_vol_old_path, + * and each preserved mounts's "/pm_path". The filesystems previously + * mounted there have already been moved away. + */ + vnode_lock_spin(incoming_vol_old_covered_vp); + incoming_vol_old_covered_vp->v_flag &= ~VMOUNT; + incoming_vol_old_covered_vp->v_mountedhere = NULL; + vnode_unlock(incoming_vol_old_covered_vp); + + for (size_t i = 0; i < countof(preserved); i++) { + struct preserved_mount *pmi = preserved[i]; + if (pmi->pm_rootvnode == NULLVP) { + continue; + } + + vnode_lock_spin(pmi->pm_old_covered_vp); + pmi->pm_old_covered_vp->v_flag &= ~VMOUNT; + pmi->pm_old_covered_vp->v_mountedhere = NULL; + vnode_unlock(pmi->pm_old_covered_vp); + } + + /* + * Clear the name cache since many cached names are now invalid. + */ + vfs_iterate(0 /* flags */, cache_purge_callback, NULL); + + /* + * Actually change the rootvnode! And finally drop the lock that + * prevents concurrent vnode_lookups. + */ + set_rootvnode(incoming_rootvnode); + lck_rw_unlock_exclusive(rootvnode_rw_lock); + + if (!(incoming->mnt_kern_flag & MNTK_VIRTUALDEV) && + !(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV)) { + /* + * Switch the order of mount structures in the mountlist, new root + * mount moves to the head of the list followed by /dev and the other + * preserved mounts then all the preexisting mounts (old rootfs + any + * others) + */ + mount_list_lock(); + for (size_t i = 0; i < countof(preserved); i++) { + struct preserved_mount *pmi = preserved[i]; + if (pmi->pm_rootvnode == NULLVP) { + continue; + } + + TAILQ_REMOVE(&mountlist, pmi->pm_mount, mnt_list); + TAILQ_INSERT_HEAD(&mountlist, pmi->pm_mount, mnt_list); + } + TAILQ_REMOVE(&mountlist, incoming, mnt_list); + TAILQ_INSERT_HEAD(&mountlist, incoming, mnt_list); + mount_list_unlock(); + } + + /* + * Fixups across all volumes + */ + vfs_iterate(0 /* flags */, mntonname_fixup_callback, NULL); + vfs_iterate(0 /* flags */, clear_mntk_backs_root_callback, NULL); + + error = 0; + +done: + for (size_t i = 0; i < countof(preserved); i++) { + struct preserved_mount *pmi = preserved[i]; + + if (pmi->pm_rootvnode) { + vnode_put(pmi->pm_rootvnode); + } + if (pmi->pm_new_covered_vp) { + vnode_put(pmi->pm_new_covered_vp); + } + if (pmi->pm_old_covered_vp) { + vnode_rele(pmi->pm_old_covered_vp); + } + } + + if (outgoing_vol_new_covered_vp) { + vnode_put(outgoing_vol_new_covered_vp); + } + + if (incoming_vol_old_covered_vp) { + vnode_rele(incoming_vol_old_covered_vp); + } + + if (incoming_rootvnode) { + vnode_put(incoming_rootvnode); + } + + printf("%s : done shuffling mount points with error: %d\n", __FUNCTION__, error); + return error; } /* - * Mount the VM volume of a container + * Mount the Recovery volume of a container */ int -vfs_mount_vm(void) +vfs_mount_recovery(void) { -#if CONFIG_MOUNT_VM +#if CONFIG_MOUNT_PREBOOTRECOVERY int error = 0; error = vnode_get(rootvnode); @@ -1358,17 +1814,17 @@ vfs_mount_vm(void) return error; } - char vmpath[] = PLATFORM_VM_VOLUME_MOUNT_POINT; /* !const because of internal casting */ + char recoverypath[] = PLATFORM_RECOVERY_VOLUME_MOUNT_POINT; /* !const because of internal casting */ - /* Mount the VM volume */ - printf("attempting kernel mount for vm volume... \n"); + /* Mount the recovery volume */ + printf("attempting kernel mount for recovery volume... \n"); error = kernel_mount(rootvnode->v_mount->mnt_vfsstat.f_fstypename, NULLVP, NULLVP, - vmpath, (rootvnode->v_mount), 0, 0, (KERNEL_MOUNT_VMVOL), vfs_context_kernel()); + recoverypath, (rootvnode->v_mount), 0, 0, (KERNEL_MOUNT_RECOVERYVOL), vfs_context_kernel()); if (error) { - printf("Failed to mount vm volume (%d)\n", error); + printf("Failed to mount recovery volume (%d)\n", error); } else { - printf("mounted VM volume\n"); + printf("mounted recovery volume\n"); } vnode_put(rootvnode); @@ -1645,12 +2101,12 @@ found_alias: } if (vp == NULL || vp->v_tag != VT_NON) { if (sin == NULL) { - MALLOC_ZONE(sin, struct specinfo *, sizeof(struct specinfo), - M_SPECINFO, M_WAITOK); + sin = zalloc_flags(specinfo_zone, Z_WAITOK | Z_ZERO); + } else { + bzero(sin, sizeof(struct specinfo)); } nvp->v_specinfo = sin; - bzero(nvp->v_specinfo, sizeof(struct specinfo)); nvp->v_rdev = nvp_rdev; nvp->v_specflags = 0; nvp->v_speclastr = -1; @@ -1689,7 +2145,7 @@ found_alias: } if (sin) { - FREE_ZONE(sin, sizeof(struct specinfo), M_SPECINFO); + zfree(specinfo_zone, sin); } if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0) { @@ -1774,7 +2230,9 @@ vnode_ref_ext(vnode_t vp, int fmode, int flags) } } } - vp->v_usecount++; + + /* Enable atomic ops on v_usecount without the vnode lock */ + os_atomic_inc(&vp->v_usecount, relaxed); if (fmode & FWRITE) { if (++vp->v_writecount <= 0) { @@ -1822,17 +2280,25 @@ out: boolean_t vnode_on_reliable_media(vnode_t vp) { - if (!(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) && (vp->v_mount->mnt_flag & MNT_LOCAL)) { + mount_t mp = vp->v_mount; + + /* + * A NULL mountpoint would imply it's not attached to a any filesystem. + * This can only happen with a vnode created by bdevvp(). We'll consider + * those as not unreliable as the primary use of this function is determine + * which vnodes are to be handed off to the async cleaner thread for + * reclaim. + */ + if (!mp || (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV) && (mp->mnt_flag & MNT_LOCAL))) { return TRUE; } + return FALSE; } static void -vnode_async_list_add(vnode_t vp) +vnode_async_list_add_locked(vnode_t vp) { - vnode_list_lock(); - if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) { panic("vnode_async_list_add: %p is in wrong state", vp); } @@ -1841,6 +2307,14 @@ vnode_async_list_add(vnode_t vp) vp->v_listflag |= VLIST_ASYNC_WORK; async_work_vnodes++; +} + +static void +vnode_async_list_add(vnode_t vp) +{ + vnode_list_lock(); + + vnode_async_list_add_locked(vp); vnode_list_unlock(); @@ -2030,6 +2504,8 @@ vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter) void vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked) { + int32_t old_usecount; + if (!locked) { vnode_lock_spin(vp); } @@ -2038,8 +2514,17 @@ vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked) lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED); } #endif - if (--vp->v_usecount < 0) { - panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag); + /* Enable atomic ops on v_usecount without the vnode lock */ + old_usecount = os_atomic_dec_orig(&vp->v_usecount, relaxed); + if (old_usecount < 1) { + /* + * Because we allow atomic ops on usecount (in lookup only, under + * specific conditions of already having a usecount) it is + * possible that when the vnode is examined, its usecount is + * different than what will be printed in this panic message. + */ + panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", + vp, old_usecount - 1, vp->v_tag, vp->v_type, vp->v_flag); } if (fmode & FWRITE) { @@ -2402,6 +2887,10 @@ vclean(vnode_t vp, int flags) clflags |= IO_REVOKE; } +#if CONFIG_MACF + mac_vnode_notify_reclaim(vp); +#endif + if (active && (flags & DOCLOSE)) { VNOP_CLOSE(vp, clflags, ctx); } @@ -2688,7 +3177,7 @@ vgone(vnode_t vp, int flags) { struct specinfo *tmp = vp->v_specinfo; vp->v_specinfo = NULL; - FREE_ZONE(tmp, sizeof(struct specinfo), M_SPECINFO); + zfree(specinfo_zone, tmp); } } } @@ -2967,7 +3456,7 @@ static int max_ext_width; static int extension_cmp(const void *a, const void *b) { - return strlen((const char *)a) - strlen((const char *)b); + return (int)(strlen((const char *)a) - strlen((const char *)b)); } @@ -2989,6 +3478,7 @@ __private_extern__ int set_package_extensions_table(user_addr_t data, int nentries, int maxwidth) { char *new_exts, *old_exts; + int old_nentries = 0, old_maxwidth = 0; int error; if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) { @@ -2997,14 +3487,15 @@ set_package_extensions_table(user_addr_t data, int nentries, int maxwidth) // allocate one byte extra so we can guarantee null termination - MALLOC(new_exts, char *, (nentries * maxwidth) + 1, M_TEMP, M_WAITOK); + new_exts = kheap_alloc(KHEAP_DATA_BUFFERS, (nentries * maxwidth) + 1, + Z_WAITOK); if (new_exts == NULL) { return ENOMEM; } error = copyin(data, new_exts, nentries * maxwidth); if (error) { - FREE(new_exts, M_TEMP); + kheap_free(KHEAP_DATA_BUFFERS, new_exts, (nentries * maxwidth) + 1); return error; } @@ -3015,15 +3506,16 @@ set_package_extensions_table(user_addr_t data, int nentries, int maxwidth) lck_mtx_lock(pkg_extensions_lck); old_exts = extension_table; + old_nentries = nexts; + old_maxwidth = max_ext_width; extension_table = new_exts; nexts = nentries; max_ext_width = maxwidth; lck_mtx_unlock(pkg_extensions_lck); - if (old_exts) { - FREE(old_exts, M_TEMP); - } + kheap_free(KHEAP_DATA_BUFFERS, old_exts, + (old_nentries * old_maxwidth) + 1); return 0; } @@ -3032,10 +3524,14 @@ set_package_extensions_table(user_addr_t data, int nentries, int maxwidth) int is_package_name(const char *name, int len) { - int i, extlen; + int i; + size_t extlen; const char *ptr, *name_ext; - if (len <= 3) { + // if the name is less than 3 bytes it can't be of the + // form A.B and if it begins with a "." then it is also + // not a package. + if (len <= 3 || name[0] == '.') { return 0; } @@ -3079,6 +3575,10 @@ vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *compone char *ptr, *end; int comp = 0; + if (pathlen < 0) { + return EINVAL; + } + *component = -1; if (*path != '/') { return EINVAL; @@ -3102,7 +3602,7 @@ vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *compone } *end = '\0'; - if (is_package_name(ptr, end - ptr)) { + if (is_package_name(ptr, (int)(end - ptr))) { *component = comp; break; } @@ -3126,6 +3626,10 @@ vn_searchfs_inappropriate_name(const char *name, int len) int bad_len[] = { 6 }; int i; + if (len < 0) { + return EINVAL; + } + for (i = 0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) { if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) { return 1; @@ -3322,10 +3826,8 @@ unmount_callback(mount_t mp, void *arg) mount_ref(mp, 0); mount_iterdrop(mp); // avoid vfs_iterate deadlock in dounmount() - MALLOC_ZONE(mntname, void *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (mntname) { - strlcpy(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN); - } + mntname = zalloc(ZV_NAMEI); + strlcpy(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN); error = dounmount(mp, MNT_FORCE, 1, vfs_context_current()); if (error) { @@ -3336,7 +3838,7 @@ unmount_callback(mount_t mp, void *arg) } } if (mntname) { - FREE_ZONE(mntname, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, mntname); } return VFS_RETURNED; @@ -3562,7 +4064,7 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp) } if (readmaxcnt) { - mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt; + mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX :(uint32_t) readmaxcnt; } if (readblockcnt) { @@ -3575,7 +4077,7 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp) } if (writemaxcnt) { - mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt; + mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : (uint32_t)writemaxcnt; } if (writeblockcnt) { @@ -3628,7 +4130,7 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp) } else { temp = 0; } - mp->mnt_alignmentmask = temp; + mp->mnt_alignmentmask = (uint32_t)temp; if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH) { @@ -3637,7 +4139,7 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp) temp = MNT_DEFAULT_IOQUEUE_DEPTH; } - mp->mnt_ioqueue_depth = temp; + mp->mnt_ioqueue_depth = (uint32_t)temp; mp->mnt_ioscale = MNT_IOSCALE(mp->mnt_ioqueue_depth); if (mp->mnt_ioscale > 1) { @@ -3763,7 +4265,7 @@ mount_fillfsids(fsid_t *fsidlst, int count) actual = 0; mount_list_lock(); TAILQ_FOREACH(mp, &mountlist, mnt_list) { - if (actual <= count) { + if (actual < count) { fsidlst[actual] = mp->mnt_vfsstat.f_fsid; actual++; } @@ -3780,7 +4282,7 @@ mount_fillfsids(fsid_t *fsidlst, int count) * having *actual filled out even in the error case is depended upon. */ static int -sysctl_vfs_getvfslist(fsid_t *fsidlst, int count, int *actual) +sysctl_vfs_getvfslist(fsid_t *fsidlst, unsigned long count, unsigned long *actual) { struct mount *mp; @@ -3800,7 +4302,8 @@ static int sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - int actual, error; + unsigned long actual; + int error; size_t space; fsid_t *fsidlst; @@ -3827,7 +4330,7 @@ again: return ENOMEM; } - MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK | M_ZERO); + fsidlst = kheap_alloc(KHEAP_TEMP, req->oldlen, Z_WAITOK | Z_ZERO); if (fsidlst == NULL) { return ENOMEM; } @@ -3839,14 +4342,14 @@ again: * slept in malloc above. If this is the case then try again. */ if (error == ENOMEM) { - FREE(fsidlst, M_TEMP); + kheap_free(KHEAP_TEMP, fsidlst, req->oldlen); req->oldlen = space; goto again; } if (error == 0) { error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t)); } - FREE(fsidlst, M_TEMP); + kheap_free(KHEAP_TEMP, fsidlst, req->oldlen); return error; } @@ -3860,11 +4363,22 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, union union_vfsidctl vc; struct mount *mp; struct vfsstatfs *sp; - int *name, flags, namelen; + int *name, namelen; + int flags = 0; int error = 0, gotref = 0; vfs_context_t ctx = vfs_context_current(); proc_t p = req->p; /* XXX req->p != current_proc()? */ boolean_t is_64_bit; + union { + struct statfs64 sfs64; + struct user64_statfs osfs64; + struct user32_statfs osfs32; + } *sfsbuf; + + if (req->newptr == USER_ADDR_NULL) { + error = EINVAL; + goto out; + } name = arg1; namelen = arg2; @@ -3913,6 +4427,12 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, } switch (name[0]) { case VFS_CTL_UMOUNT: +#if CONFIG_MACF + error = mac_mount_check_umount(ctx, mp); + if (error != 0) { + goto out; + } +#endif req->newidx = 0; if (is_64_bit) { req->newptr = vc.vc64.vc_ptr; @@ -3932,7 +4452,8 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, /* safedounmount consumes a ref */ error = safedounmount(mp, flags, ctx); break; - case VFS_CTL_STATFS: + case VFS_CTL_OSTATFS: + case VFS_CTL_STATFS64: #if CONFIG_MACF error = mac_mount_check_stat(ctx, mp); if (error != 0) { @@ -3956,37 +4477,48 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT))) { goto out; } - if (is_64_bit) { - struct user64_statfs sfs; - bzero(&sfs, sizeof(sfs)); - sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK; - sfs.f_type = mp->mnt_vtable->vfc_typenum; - sfs.f_bsize = (user64_long_t)sp->f_bsize; - sfs.f_iosize = (user64_long_t)sp->f_iosize; - sfs.f_blocks = (user64_long_t)sp->f_blocks; - sfs.f_bfree = (user64_long_t)sp->f_bfree; - sfs.f_bavail = (user64_long_t)sp->f_bavail; - sfs.f_files = (user64_long_t)sp->f_files; - sfs.f_ffree = (user64_long_t)sp->f_ffree; - sfs.f_fsid = sp->f_fsid; - sfs.f_owner = sp->f_owner; + + sfsbuf = kheap_alloc(KHEAP_TEMP, sizeof(*sfsbuf), Z_WAITOK); + + if (name[0] == VFS_CTL_STATFS64) { + struct statfs64 *sfs = &sfsbuf->sfs64; + + vfs_get_statfs64(mp, sfs); + error = SYSCTL_OUT(req, sfs, sizeof(*sfs)); + } else if (is_64_bit) { + struct user64_statfs *sfs = &sfsbuf->osfs64; + + bzero(sfs, sizeof(*sfs)); + sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; + sfs->f_type = (short)mp->mnt_vtable->vfc_typenum; + sfs->f_bsize = (user64_long_t)sp->f_bsize; + sfs->f_iosize = (user64_long_t)sp->f_iosize; + sfs->f_blocks = (user64_long_t)sp->f_blocks; + sfs->f_bfree = (user64_long_t)sp->f_bfree; + sfs->f_bavail = (user64_long_t)sp->f_bavail; + sfs->f_files = (user64_long_t)sp->f_files; + sfs->f_ffree = (user64_long_t)sp->f_ffree; + sfs->f_fsid = sp->f_fsid; + sfs->f_owner = sp->f_owner; #ifdef CONFIG_NFS_CLIENT if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) { - strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN); + strlcpy(&sfs->f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN); } else #endif /* CONFIG_NFS_CLIENT */ { - strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN); + strlcpy(sfs->f_fstypename, sp->f_fstypename, MFSNAMELEN); } - strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN); - strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN); + strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN); + strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN); - error = SYSCTL_OUT(req, &sfs, sizeof(sfs)); + error = SYSCTL_OUT(req, sfs, sizeof(*sfs)); } else { - struct user32_statfs sfs; - bzero(&sfs, sizeof(sfs)); - sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK; - sfs.f_type = mp->mnt_vtable->vfc_typenum; + struct user32_statfs *sfs = &sfsbuf->osfs32; + long temp; + + bzero(sfs, sizeof(*sfs)); + sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; + sfs->f_type = (short)mp->mnt_vtable->vfc_typenum; /* * It's possible for there to be more than 2^^31 blocks in the filesystem, so we @@ -4014,37 +4546,44 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, } } #define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s))) - sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift); - sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift); - sfs.f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift); + sfs->f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift); + sfs->f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift); + sfs->f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift); #undef __SHIFT_OR_CLIP - sfs.f_bsize = (user32_long_t)(sp->f_bsize << shift); - sfs.f_iosize = lmax(sp->f_iosize, sp->f_bsize); + sfs->f_bsize = (user32_long_t)(sp->f_bsize << shift); + temp = lmax(sp->f_iosize, sp->f_bsize); + if (temp > INT32_MAX) { + error = EINVAL; + kheap_free(KHEAP_TEMP, sfsbuf, sizeof(*sfsbuf)); + goto out; + } + sfs->f_iosize = (user32_long_t)temp; } else { - sfs.f_bsize = (user32_long_t)sp->f_bsize; - sfs.f_iosize = (user32_long_t)sp->f_iosize; - sfs.f_blocks = (user32_long_t)sp->f_blocks; - sfs.f_bfree = (user32_long_t)sp->f_bfree; - sfs.f_bavail = (user32_long_t)sp->f_bavail; + sfs->f_bsize = (user32_long_t)sp->f_bsize; + sfs->f_iosize = (user32_long_t)sp->f_iosize; + sfs->f_blocks = (user32_long_t)sp->f_blocks; + sfs->f_bfree = (user32_long_t)sp->f_bfree; + sfs->f_bavail = (user32_long_t)sp->f_bavail; } - sfs.f_files = (user32_long_t)sp->f_files; - sfs.f_ffree = (user32_long_t)sp->f_ffree; - sfs.f_fsid = sp->f_fsid; - sfs.f_owner = sp->f_owner; + sfs->f_files = (user32_long_t)sp->f_files; + sfs->f_ffree = (user32_long_t)sp->f_ffree; + sfs->f_fsid = sp->f_fsid; + sfs->f_owner = sp->f_owner; #ifdef CONFIG_NFS_CLIENT if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) { - strlcpy(&sfs.f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN); + strlcpy(&sfs->f_fstypename[0], &mp->fstypename_override[0], MFSNAMELEN); } else #endif /* CONFIG_NFS_CLIENT */ { - strlcpy(sfs.f_fstypename, sp->f_fstypename, MFSNAMELEN); + strlcpy(sfs->f_fstypename, sp->f_fstypename, MFSNAMELEN); } - strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN); - strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN); + strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN); + strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN); - error = SYSCTL_OUT(req, &sfs, sizeof(sfs)); + error = SYSCTL_OUT(req, sfs, sizeof(*sfs)); } + kheap_free(KHEAP_TEMP, sfsbuf, sizeof(*sfsbuf)); break; default: error = ENOTSUP; @@ -4454,6 +4993,7 @@ new_vnode(vnode_t *vpp) { vnode_t vp; uint32_t retries = 0, max_retries = 100; /* retry incase of tablefull */ + uint32_t bdevvp_vnodes = 0; int force_alloc = 0, walk_count = 0; boolean_t need_reliable_vp = FALSE; int deferred; @@ -4488,8 +5028,7 @@ retry: numvnodes++; vnode_list_unlock(); - MALLOC_ZONE(vp, struct vnode *, sizeof(*vp), M_VNODE, M_WAITOK); - bzero((char *)vp, sizeof(*vp)); + vp = zalloc_flags(vnode_zone, Z_WAITOK | Z_ZERO); VLISTNONE(vp); /* avoid double queue removal */ lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr); @@ -4497,7 +5036,7 @@ retry: klist_init(&vp->v_knotes); nanouptime(&ts); - vp->v_id = ts.tv_nsec; + vp->v_id = (uint32_t)ts.tv_nsec; vp->v_flag = VSTANDARD; #if CONFIG_MACF @@ -4521,6 +5060,16 @@ retry: panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp); } + /* + * skip free vnodes created by bdevvp as they are + * typically not fully constructedi and may have issues + * in getting reclaimed. + */ + if (vp->v_flag & VBDEVVP) { + bdevvp_vnodes++; + continue; + } + // if we're a dependency-capable process, skip vnodes that can // cause recycling deadlocks. (i.e. this process is diskimages // helper and the vnode is in a disk image). Querying the @@ -4559,6 +5108,16 @@ retry: */ walk_count = 0; TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) { + /* + * skip free vnodes created by bdevvp as they are + * typically not fully constructedi and may have issues + * in getting reclaimed. + */ + if (vp->v_flag & VBDEVVP) { + bdevvp_vnodes++; + continue; + } + // if we're a dependency-capable process, skip vnodes that can // cause recycling deadlocks. (i.e. this process is diskimages // helper and the vnode is in a disk image). Querying the @@ -4621,9 +5180,9 @@ retry: vnode_list_unlock(); tablefull("vnode"); - log(LOG_EMERG, "%d desired, %d numvnodes, " - "%d free, %d dead, %d async, %d rage\n", - desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes); + log(LOG_EMERG, "%d desired, %ld numvnodes, " + "%ld free, %ld dead, %ld async, %d rage %d bdevvp\n", + desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes, bdevvp_vnodes); #if CONFIG_JETSAM #if DEVELOPMENT || DEBUG @@ -4690,7 +5249,7 @@ steal_this_vp: microuptime(&elapsed_tv); timevalsub(&elapsed_tv, &initial_tv); - elapsed_msecs = elapsed_tv.tv_sec * 1000 + elapsed_tv.tv_usec / 1000; + elapsed_msecs = (int)(elapsed_tv.tv_sec * 1000 + elapsed_tv.tv_usec / 1000); if (elapsed_msecs >= 100) { /* @@ -4837,16 +5396,10 @@ vnode_getalways(vnode_t vp) return vget_internal(vp, 0, VNODE_ALWAYS); } -int -vnode_put(vnode_t vp) +__private_extern__ int +vnode_getalways_from_pager(vnode_t vp) { - int retval; - - vnode_lock_spin(vp); - retval = vnode_put_locked(vp); - vnode_unlock(vp); - - return retval; + return vget_internal(vp, 0, VNODE_ALWAYS | VNODE_PAGER); } static inline void @@ -4860,8 +5413,8 @@ vn_set_dead(vnode_t vp) vp->v_lflag |= VL_DEAD; } -int -vnode_put_locked(vnode_t vp) +static int +vnode_put_internal_locked(vnode_t vp, bool from_pager) { vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */ @@ -4877,7 +5430,8 @@ retry: vnode_dropiocount(vp); return 0; } - if ((vp->v_lflag & (VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE) { + + if (((vp->v_lflag & (VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE)) { vp->v_lflag &= ~VL_NEEDINACTIVE; vnode_unlock(vp); @@ -4898,8 +5452,27 @@ retry: vp->v_lflag &= ~VL_NEEDINACTIVE; if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) { - vnode_lock_convert(vp); - vnode_reclaim_internal(vp, 1, 1, 0); + if (from_pager) { + /* + * We can't initiate reclaim when called from the pager + * because it will deadlock with itself so we hand it + * off to the async cleaner thread. + */ + if (VONLIST(vp)) { + if (!(vp->v_listflag & VLIST_ASYNC_WORK)) { + vnode_list_lock(); + vnode_list_remove_locked(vp); + vnode_async_list_add_locked(vp); + vnode_list_unlock(); + } + wakeup(&vnode_async_work_list); + } else { + vnode_async_list_add(vp); + } + } else { + vnode_lock_convert(vp); + vnode_reclaim_internal(vp, 1, 1, 0); + } } vnode_dropiocount(vp); vnode_list_add(vp); @@ -4907,6 +5480,37 @@ retry: return 0; } +int +vnode_put_locked(vnode_t vp) +{ + return vnode_put_internal_locked(vp, false); +} + +int +vnode_put(vnode_t vp) +{ + int retval; + + vnode_lock_spin(vp); + retval = vnode_put_internal_locked(vp, false); + vnode_unlock(vp); + + return retval; +} + +int +vnode_put_from_pager(vnode_t vp) +{ + int retval; + + vnode_lock_spin(vp); + /* Cannot initiate reclaim while paging */ + retval = vnode_put_internal_locked(vp, true); + vnode_unlock(vp); + + return retval; +} + /* is vnode_t in use by others? */ int vnode_isinuse(vnode_t vp, int refcnt) @@ -4926,7 +5530,7 @@ vnode_iocount(vnode_t vp) return vp->v_iocount; } -static int +int vnode_isinuse_locked(vnode_t vp, int refcnt, int locked) { int retval = 0; @@ -5068,6 +5672,7 @@ vnode_getiocount(vnode_t vp, unsigned int vid, int vflags) int always = vflags & VNODE_ALWAYS; int beatdrain = vflags & VNODE_DRAINO; int withvid = vflags & VNODE_WITHID; + int forpager = vflags & VNODE_PAGER; for (;;) { int sleepflg = 0; @@ -5156,8 +5761,8 @@ vnode_getiocount(vnode_t vp, unsigned int vid, int vflags) if (withvid && vid != vp->v_id) { return ENOENT; } - if (++vp->v_references >= UNAGE_THRESHHOLD || - (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD)) { + if (!forpager && (++vp->v_references >= UNAGE_THRESHHOLD || + (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD))) { vp->v_references = 0; vnode_list_remove(vp); } @@ -5196,6 +5801,7 @@ void vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags) { int isfifo = 0; + bool clear_tty_revoke = false; if (!locked) { vnode_lock(vp); @@ -5208,15 +5814,27 @@ vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags) vn_clearunionwait(vp, 1); - if (vnode_istty(vp) && (flags & REVOKEALL) && vp->v_usecount && - (vp->v_iocount > 1)) { + /* + * We have to force any terminals in reads to return and give up + * their iocounts. It's important to do this after VL_TERMINATE + * has been set to ensure new reads are blocked while the + * revoke is in progress. + */ + if (vnode_istty(vp) && (flags & REVOKEALL) && (vp->v_iocount > 1)) { vnode_unlock(vp); VNOP_IOCTL(vp, TIOCREVOKE, (caddr_t)NULL, 0, vfs_context_kernel()); + clear_tty_revoke = true; vnode_lock(vp); } vnode_drain(vp); + if (clear_tty_revoke) { + vnode_unlock(vp); + VNOP_IOCTL(vp, TIOCREVOKECLEAR, (caddr_t)NULL, 0, vfs_context_kernel()); + vnode_lock(vp); + } + isfifo = (vp->v_type == VFIFO); if (vp->v_type != VBAD) { @@ -5250,7 +5868,7 @@ vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags) fip = vp->v_fifoinfo; vp->v_fifoinfo = NULL; - FREE(fip, M_TEMP); + kheap_free(KHEAP_DEFAULT, fip, sizeof(struct fifoinfo)); } vp->v_type = VBAD; @@ -5379,7 +5997,7 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, cnp = param->vnfs_cnp; vp->v_op = param->vnfs_vops; - vp->v_type = param->vnfs_vtype; + vp->v_type = (uint16_t)param->vnfs_vtype; vp->v_data = param->vnfs_fsnode; if (param->vnfs_markroot) { @@ -5462,7 +6080,7 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, vclean(vp, 0); vp->v_op = param->vnfs_vops; - vp->v_type = param->vnfs_vtype; + vp->v_type = (uint16_t)param->vnfs_vtype; vp->v_data = param->vnfs_fsnode; vp->v_lflag = 0; vp->v_mount = NULL; @@ -5483,9 +6101,8 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, if (vp->v_type == VFIFO) { struct fifoinfo *fip; - MALLOC(fip, struct fifoinfo *, - sizeof(*fip), M_TEMP, M_WAITOK); - bzero(fip, sizeof(struct fifoinfo)); + fip = kheap_alloc(KHEAP_DEFAULT, sizeof(struct fifoinfo), + Z_WAITOK | Z_ZERO); vp->v_fifoinfo = fip; } /* The file systems must pass the address of the location where @@ -5700,7 +6317,7 @@ vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg) count = mount_getvfscnt(); count += 10; - fsid_list = (fsid_t *)kalloc(count * sizeof(fsid_t)); + fsid_list = kheap_alloc(KHEAP_TEMP, count * sizeof(fsid_t), Z_WAITOK); allocmem = (void *)fsid_list; actualcount = mount_fillfsids(fsid_list, count); @@ -5766,7 +6383,7 @@ vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg) } out: - kfree(allocmem, (count * sizeof(fsid_t))); + kheap_free(KHEAP_TEMP, allocmem, (count * sizeof(fsid_t))); return ret; } @@ -5871,7 +6488,7 @@ mount_list_add(mount_t mp) int res; mount_list_lock(); - if (system_inshutdown != 0) { + if (get_system_inshutdown() != 0) { res = -1; } else { TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); @@ -5959,14 +6576,19 @@ errno_t vnode_lookupat(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx, vnode_t start_dvp) { - struct nameidata nd; - int error; + struct nameidata *ndp; + int error = 0; u_int32_t ndflags = 0; if (ctx == NULL) { return EINVAL; } + ndp = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK); + if (!ndp) { + return ENOMEM; + } + if (flags & VNODE_LOOKUP_NOFOLLOW) { ndflags = NOFOLLOW; } else { @@ -5982,26 +6604,26 @@ vnode_lookupat(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx, } /* XXX AUDITVNPATH1 needed ? */ - NDINIT(&nd, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE, + NDINIT(ndp, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); if (start_dvp && (path[0] != '/')) { - nd.ni_dvp = start_dvp; - nd.ni_cnd.cn_flags |= USEDVP; - /* Don't take proc lock vnode_lookupat with a startdir specified */ - nd.ni_flag |= NAMEI_NOPROCLOCK; + ndp->ni_dvp = start_dvp; + ndp->ni_cnd.cn_flags |= USEDVP; } - if ((error = namei(&nd))) { - return error; + if ((error = namei(ndp))) { + goto out_free; } - nd.ni_cnd.cn_flags &= ~USEDVP; + ndp->ni_cnd.cn_flags &= ~USEDVP; - *vpp = nd.ni_vp; - nameidone(&nd); + *vpp = ndp->ni_vp; + nameidone(ndp); - return 0; +out_free: + kheap_free(KHEAP_TEMP, ndp, sizeof(struct nameidata)); + return error; } errno_t @@ -6013,7 +6635,7 @@ vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx) errno_t vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx) { - struct nameidata nd; + struct nameidata *ndp = NULL; int error; u_int32_t ndflags = 0; int lflags = flags; @@ -6022,6 +6644,11 @@ vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_ ctx = vfs_context_current(); } + ndp = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK); + if (!ndp) { + return ENOMEM; + } + if (fmode & O_NOFOLLOW) { lflags |= VNODE_LOOKUP_NOFOLLOW; } @@ -6041,15 +6668,16 @@ vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_ } /* XXX AUDITVNPATH1 needed ? */ - NDINIT(&nd, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE, + NDINIT(ndp, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); - if ((error = vn_open(&nd, fmode, cmode))) { + if ((error = vn_open(ndp, fmode, cmode))) { *vpp = NULL; } else { - *vpp = nd.ni_vp; + *vpp = ndp->ni_vp; } + kheap_free(KHEAP_TEMP, ndp, sizeof(struct nameidata)); return error; } @@ -8546,6 +9174,10 @@ vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp } } + if (mp) { + vnode_attr_handle_mnt_ignore_ownership(vap, mp, ctx); + } + result = vnode_attr_authorize_internal(vcp, mp, rights, is_suser, &found_deny, noimmutable, FALSE); @@ -9515,7 +10147,7 @@ rmdir_remove_orphaned_appleDouble(vnode_t vp, vfs_context_t ctx, int * restart_f { #define UIO_BUFF_SIZE 2048 uio_t auio = NULL; - int eofflag, siz = UIO_BUFF_SIZE, nentries = 0; + int eofflag, siz = UIO_BUFF_SIZE, alloc_size = 0, nentries = 0; int open_flag = 0, full_erase_flag = 0; char uio_buf[UIO_SIZEOF(1)]; char *rbuf = NULL; @@ -9548,7 +10180,8 @@ rmdir_remove_orphaned_appleDouble(vnode_t vp, vfs_context_t ctx, int * restart_f /* * set up UIO */ - MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK); + rbuf = kheap_alloc(KHEAP_DATA_BUFFERS, siz, Z_WAITOK); + alloc_size = siz; if (rbuf) { auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); @@ -9721,7 +10354,7 @@ outsc: if (auio) { uio_free(auio); } - FREE(rbuf, M_TEMP); + kheap_free(KHEAP_DATA_BUFFERS, rbuf, alloc_size); if (saved_nodatalessfaults == false) { ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS; @@ -10049,7 +10682,7 @@ vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, byte = *((char *)tinfo->vnt_data); } #endif - MALLOC(rp, vnode_resolve_t, sizeof(*rp), M_TEMP, M_WAITOK); + rp = kheap_alloc(KHEAP_DEFAULT, sizeof(struct vnode_resolve), Z_WAITOK); if (rp == NULL) { return ENOMEM; } @@ -10079,7 +10712,7 @@ vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, return result; out: - FREE(rp, M_TEMP); + kheap_free(KHEAP_DEFAULT, rp, sizeof(struct vnode_resolve)); return result; } @@ -10094,7 +10727,7 @@ vnode_resolver_release(vnode_resolve_t rp) } lck_mtx_destroy(&rp->vr_lock, trigger_vnode_lck_grp); - FREE(rp, M_TEMP); + kheap_free(KHEAP_DEFAULT, rp, sizeof(struct vnode_resolve)); } /* Called after the vnode has been drained */ @@ -10179,6 +10812,14 @@ vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx) enum resolver_status status; uint32_t seq; + /* + * N.B. we cannot call vfs_context_can_resolve_triggers() + * here because we really only want to suppress that in + * the event the trigger will be resolved by something in + * user-space. Any triggers that are resolved by the kernel + * do not pose a threat of deadlock. + */ + /* Only trigger on topmost vnodes */ if ((vp->v_resolve == NULL) || (vp->v_resolve->vr_resolve_func == NULL) || @@ -10480,7 +11121,7 @@ vfs_nested_trigger_unmounts(mount_t mp, int flags, vfs_context_t ctx) int vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, vfs_context_t ctx) { - struct nameidata nd; + struct nameidata *ndp; int res; vnode_t rvp, vp; struct vnode_trigger_param vtp; @@ -10493,6 +11134,11 @@ vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, TRIG_LOG("Adding trigger at %s\n", relpath); TRIG_LOG("Trying VFS_ROOT\n"); + ndp = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK); + if (!ndp) { + return ENOMEM; + } + /* * We do a lookup starting at the root of the mountpoint, unwilling * to cross into other mountpoints. @@ -10504,17 +11150,17 @@ vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, TRIG_LOG("Trying namei\n"); - NDINIT(&nd, LOOKUP, OP_LOOKUP, USEDVP | NOCROSSMOUNT | FOLLOW, UIO_SYSSPACE, + NDINIT(ndp, LOOKUP, OP_LOOKUP, USEDVP | NOCROSSMOUNT | FOLLOW, UIO_SYSSPACE, CAST_USER_ADDR_T(relpath), ctx); - nd.ni_dvp = rvp; - res = namei(&nd); + ndp->ni_dvp = rvp; + res = namei(ndp); if (res != 0) { vnode_put(rvp); goto out; } - vp = nd.ni_vp; - nameidone(&nd); + vp = ndp->ni_vp; + nameidone(ndp); vnode_put(rvp); TRIG_LOG("Trying vnode_resolver_create()\n"); @@ -10537,6 +11183,7 @@ vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, res = vnode_resolver_create(mp, vp, &vtp, TRUE); vnode_put(vp); out: + kheap_free(KHEAP_TEMP, ndp, sizeof(struct nameidata)); TRIG_LOG("Returning %d\n", res); return res; } @@ -10566,25 +11213,29 @@ vnode_should_flush_after_write(vnode_t vp, int ioflag) * vnodes' paths */ +#define NPATH_WORDS (MAXPATHLEN / sizeof(unsigned long)) struct vnode_trace_paths_context { uint64_t count; - long path[MAXPATHLEN / sizeof(long) + 1]; /* + 1 in case sizeof (long) does not divide MAXPATHLEN */ + /* + * Must be a multiple of 4, then -1, for tracing! + */ + unsigned long path[NPATH_WORDS + (4 - (NPATH_WORDS % 4)) - 1]; }; static int -vnode_trace_path_callback(struct vnode *vp, void *arg) +vnode_trace_path_callback(struct vnode *vp, void *vctx) { - int len, rv; - struct vnode_trace_paths_context *ctx; - - ctx = arg; + struct vnode_trace_paths_context *ctx = vctx; + size_t path_len = sizeof(ctx->path); - len = sizeof(ctx->path); - rv = vn_getpath(vp, (char *)ctx->path, &len); - /* vn_getpath() NUL-terminates, and len includes the NUL */ + int getpath_len = (int)path_len; + if (vn_getpath(vp, (char *)ctx->path, &getpath_len) == 0) { + /* vn_getpath() NUL-terminates, and len includes the NUL. */ + assert(getpath_len >= 0); + path_len = (size_t)getpath_len; - if (!rv) { - kdebug_vfs_lookup(ctx->path, len, vp, + assert(path_len <= sizeof(ctx->path)); + kdebug_vfs_lookup(ctx->path, (int)path_len, vp, KDBG_VFS_LOOKUP_FLAG_LOOKUP | KDBG_VFS_LOOKUP_FLAG_NOPROCFILT); if (++(ctx->count) == 1000) { diff --git a/bsd/vfs/vfs_support.c b/bsd/vfs/vfs_support.c index ca3609541..03986f1d5 100644 --- a/bsd/vfs/vfs_support.c +++ b/bsd/vfs/vfs_support.c @@ -741,7 +741,8 @@ int nop_pagein(struct vnop_pagein_args *ap) { if (!(ap->a_flags & UPL_NOCOMMIT)) { - ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + assert(ap->a_size <= UINT32_MAX); + ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); } return EINVAL; } @@ -750,7 +751,8 @@ int err_pagein(struct vnop_pagein_args *ap) { if (!(ap->a_flags & UPL_NOCOMMIT)) { - ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + assert(ap->a_size <= UINT32_MAX); + ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); } return ENOTSUP; } @@ -770,7 +772,8 @@ int nop_pageout(struct vnop_pageout_args *ap) { if (!(ap->a_flags & UPL_NOCOMMIT)) { - ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + assert(ap->a_size <= UINT32_MAX); + ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); } return EINVAL; } @@ -779,7 +782,8 @@ int err_pageout(struct vnop_pageout_args *ap) { if (!(ap->a_flags & UPL_NOCOMMIT)) { - ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + assert(ap->a_size <= UINT32_MAX); + ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); } return ENOTSUP; } diff --git a/bsd/vfs/vfs_syscalls.c b/bsd/vfs/vfs_syscalls.c index f27adda4d..cb7cf97fd 100644 --- a/bsd/vfs/vfs_syscalls.c +++ b/bsd/vfs/vfs_syscalls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995-2019 Apple Inc. All rights reserved. + * Copyright (c) 1995-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -83,7 +83,7 @@ #include #include #include -#include +#include #include #include #include @@ -123,6 +123,7 @@ #include #include +#include #include #include @@ -146,14 +147,14 @@ #if CONFIG_FSE #define GET_PATH(x) \ - (x) = get_pathbuff(); + ((x) = get_pathbuff()) #define RELEASE_PATH(x) \ - release_pathbuff(x); + release_pathbuff(x) #else #define GET_PATH(x) \ - MALLOC_ZONE((x), char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + ((x) = zalloc(ZV_NAMEI)) #define RELEASE_PATH(x) \ - FREE_ZONE((x), MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, x) #endif /* CONFIG_FSE */ #ifndef HFS_GET_BOOT_INFO @@ -168,6 +169,12 @@ #define APFSIOC_REVERT_TO_SNAPSHOT _IOW('J', 1, u_int64_t) #endif +/* + * If you need accounting for KM_FD_VN_DATA consider using + * ZONE_VIEW_DEFINE to define a zone view. + */ +#define KM_FD_VN_DATA KHEAP_DEFAULT + extern void disk_conditioner_unmount(mount_t mp); /* struct for checkdirs iteration */ @@ -235,13 +242,11 @@ int mount_locker_protoboot(const char *fsname, const char *mntpoint, //snapshot functions #if CONFIG_MNT_ROOTSNAP -static int snapshot_root(int dirfd, user_addr_t name, uint32_t flags, vfs_context_t ctx); +static int __attribute__ ((noinline)) snapshot_root(int dirfd, user_addr_t name, uint32_t flags, vfs_context_t ctx); #else -static int snapshot_root(int dirfd, user_addr_t name, uint32_t flags, vfs_context_t ctx) __attribute__((unused)); +static int __attribute__ ((noinline)) snapshot_root(int dirfd, user_addr_t name, uint32_t flags, vfs_context_t ctx) __attribute__((unused)); #endif -int (*union_dircheckp)(struct vnode **, struct fileproc *, vfs_context_t); - __private_extern__ int sync_internal(void); @@ -252,6 +257,8 @@ extern lck_grp_t *fd_vn_lck_grp; extern lck_grp_attr_t *fd_vn_lck_grp_attr; extern lck_attr_t *fd_vn_lck_attr; +extern lck_rw_t * rootvnode_rw_lock; + /* * incremented each time a mount or unmount operation occurs * used to invalidate the cached value of the rootvp in the @@ -262,6 +269,9 @@ uint32_t mount_generation = 0; /* counts number of mount and unmount operations */ unsigned int vfs_nummntops = 0; +/* system-wide, per-boot unique mount ID */ +static _Atomic uint64_t mount_unique_id = 1; + extern const struct fileops vnops; #if CONFIG_APPLEDOUBLE extern errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *); @@ -300,7 +310,7 @@ kernel_mount(char *fstype, vnode_t pvp, vnode_t vp, const char *path, if (vp == NULLVP) { error = namei(&nd); if (error) { - if (kern_flags & (KERNEL_MOUNT_SNAPSHOT | KERNEL_MOUNT_VMVOL | KERNEL_MOUNT_DATAVOL)) { + if (kern_flags & (KERNEL_MOUNT_SNAPSHOT | KERNEL_MOUNT_VOLBYROLE_MASK)) { printf("failed to locate mount-on path: %s ", path); } return error; @@ -312,7 +322,7 @@ kernel_mount(char *fstype, vnode_t pvp, vnode_t vp, const char *path, char *pnbuf = CAST_DOWN(char *, path); nd.ni_cnd.cn_pnbuf = pnbuf; - nd.ni_cnd.cn_pnlen = strlen(pnbuf) + 1; + nd.ni_cnd.cn_pnlen = (int)(strlen(pnbuf) + 1); did_namei = FALSE; } @@ -394,11 +404,11 @@ fmount(__unused proc_t p, struct fmount_args *uap, __unused int32_t *retval) } memset(&cn, 0, sizeof(struct componentname)); - MALLOC(cn.cn_pnbuf, char *, MAXPATHLEN, M_TEMP, M_WAITOK); + cn.cn_pnbuf = zalloc_flags(ZV_NAMEI, Z_WAITOK); cn.cn_pnlen = MAXPATHLEN; if ((error = vn_getpath(vp, cn.cn_pnbuf, &cn.cn_pnlen)) != 0) { - FREE(cn.cn_pnbuf, M_TEMP); + zfree(ZV_NAMEI, cn.cn_pnbuf); vnode_put(pvp); vnode_put(vp); file_drop(uap->fd); @@ -407,7 +417,7 @@ fmount(__unused proc_t p, struct fmount_args *uap, __unused int32_t *retval) error = mount_common(fstypename, pvp, vp, &cn, uap->data, flags, 0, labelstr, FALSE, ctx); - FREE(cn.cn_pnbuf, M_TEMP); + zfree(ZV_NAMEI, cn.cn_pnbuf); vnode_put(pvp); vnode_put(vp); file_drop(uap->fd); @@ -454,6 +464,7 @@ __mac_mount(struct proc *p, register struct __mac_mount_args *uap, __unused int3 struct nameidata nd; size_t dummy = 0; char *labelstr = NULL; + size_t labelsz = 0; int flags = uap->flags; int error; #if CONFIG_IMGSRC_ACCESS || CONFIG_MACF @@ -502,8 +513,8 @@ __mac_mount(struct proc *p, register struct __mac_mount_args *uap, __unused int3 if (is_64bit) { struct user64_mac mac64; error = copyin(uap->mac_p, &mac64, sizeof(mac64)); - mac.m_buflen = mac64.m_buflen; - mac.m_string = mac64.m_string; + mac.m_buflen = (user_size_t)mac64.m_buflen; + mac.m_string = (user_addr_t)mac64.m_string; } else { struct user32_mac mac32; error = copyin(uap->mac_p, &mac32, sizeof(mac32)); @@ -518,7 +529,8 @@ __mac_mount(struct proc *p, register struct __mac_mount_args *uap, __unused int3 error = EINVAL; goto out; } - MALLOC(labelstr, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); + labelsz = mac.m_buflen; + labelstr = kheap_alloc(KHEAP_TEMP, labelsz, Z_WAITOK); error = copyinstr(mac.m_string, labelstr, mac.m_buflen, &ulen); if (error) { goto out; @@ -579,9 +591,7 @@ __mac_mount(struct proc *p, register struct __mac_mount_args *uap, __unused int3 out: #if CONFIG_MACF - if (labelstr) { - FREE(labelstr, M_MACTEMP); - } + kheap_free(KHEAP_DEFAULT, labelstr, labelsz); #endif /* CONFIG_MACF */ if (vp) { @@ -628,6 +638,7 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, struct vfstable *vfsp = (struct vfstable *)0; struct proc *p = vfs_context_proc(ctx); int error, flag = 0; + bool flag_set = false; user_addr_t devpath = USER_ADDR_NULL; int ronly = 0; int mntalloc = 0; @@ -635,10 +646,11 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, boolean_t is_rwlock_locked = FALSE; boolean_t did_rele = FALSE; boolean_t have_usecount = FALSE; + boolean_t did_set_lmount = FALSE; -#if CONFIG_ROSV_STARTUP || CONFIG_MOUNT_VM +#if CONFIG_ROSV_STARTUP || CONFIG_MOUNT_VM || CONFIG_BASESYSTEMROOT /* Check for mutually-exclusive flag bits */ - uint32_t checkflags = (internal_flags & (KERNEL_MOUNT_DATAVOL | KERNEL_MOUNT_VMVOL)); + uint32_t checkflags = (internal_flags & (KERNEL_MOUNT_VOLBYROLE_MASK | KERNEL_MOUNT_BASESYSTEMROOT)); int bitcount = 0; while (checkflags != 0) { checkflags &= (checkflags - 1); @@ -662,13 +674,15 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, } mp = vp->v_mount; - /* unmount in progress return error */ + /* if unmount or mount in progress, return error */ mount_lock_spin(mp); - if (mp->mnt_lflag & MNT_LUNMOUNT) { + if (mp->mnt_lflag & (MNT_LUNMOUNT | MNT_LMOUNT)) { mount_unlock(mp); error = EBUSY; goto out1; } + mp->mnt_lflag |= MNT_LMOUNT; + did_set_lmount = TRUE; mount_unlock(mp); lck_rw_lock_exclusive(&mp->mnt_rwlock); is_rwlock_locked = TRUE; @@ -702,14 +716,12 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, flags |= MNT_REMOVABLE; } -#ifdef CONFIG_IMGSRC_ACCESS /* Can't downgrade the backer of the root FS */ if ((mp->mnt_kern_flag & MNTK_BACKS_ROOT) && (!vfs_isrdonly(mp)) && (flags & MNT_RDONLY)) { error = ENOTSUP; goto out1; } -#endif /* CONFIG_IMGSRC_ACCESS */ /* * Only root, or the user that did the original mount is @@ -736,6 +748,7 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, } } flag = mp->mnt_flag; + flag_set = true; @@ -774,10 +787,11 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, /* * VFC_VFSLOCALARGS is not currently supported for kernel mounts, - * except in ROSV configs. + * except in ROSV configs and for the initial BaseSystem root. */ if (kernelmount && (vfsp->vfc_vfsflags & VFC_VFSLOCALARGS) && - ((internal_flags & (KERNEL_MOUNT_DATAVOL | KERNEL_MOUNT_VMVOL)) == 0)) { + ((internal_flags & KERNEL_MOUNT_VOLBYROLE_MASK) == 0) && + ((internal_flags & KERNEL_MOUNT_BASESYSTEMROOT) == 0)) { error = EINVAL; /* unsupported request */ goto out1; } @@ -790,9 +804,7 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, /* * Allocate and initialize the filesystem (mount_t) */ - MALLOC_ZONE(mp, struct mount *, (u_int32_t)sizeof(struct mount), - M_MOUNT, M_WAITOK); - bzero((char *)mp, (u_int32_t)sizeof(struct mount)); + mp = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO); mntalloc = 1; /* Initialize the default IO constraints */ @@ -808,6 +820,9 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, mp->mnt_realrootvp = NULLVP; mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL; + mp->mnt_lflag |= MNT_LMOUNT; + did_set_lmount = TRUE; + TAILQ_INIT(&mp->mnt_vnodelist); TAILQ_INIT(&mp->mnt_workerqueue); TAILQ_INIT(&mp->mnt_newvnodes); @@ -830,6 +845,7 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, mp->mnt_vfsstat.f_owner = kauth_cred_getuid(vfs_context_ucred(ctx)); mp->mnt_throttle_mask = LOWPRI_MAX_NUM_DEV - 1; mp->mnt_devbsdunit = 0; + mp->mnt_mount_id = os_atomic_inc_orig(&mount_unique_id, relaxed); /* XXX 3762912 hack to support HFS filesystem 'owner' - filesystem may update later */ vfs_setowner(mp, KAUTH_UID_NONE, KAUTH_GID_NONE); @@ -892,31 +908,71 @@ update: } #endif /* - * Process device path for local file systems if requested + * Process device path for local file systems if requested. + * + * Snapshot and mount-by-role mounts do not use this path; they are + * passing other opaque data in the device path field. + * + * Basesystemroot mounts pass a device path to be resolved here, + * but it's just a char * already inside the kernel, which + * kernel_mount() shoved into a user_addr_t to call us. So for such + * mounts we must skip copyin (both of the address and of the string + * (in NDINIT). */ if (vfsp->vfc_vfsflags & VFC_VFSLOCALARGS && - !(internal_flags & (KERNEL_MOUNT_SNAPSHOT | KERNEL_MOUNT_DATAVOL | KERNEL_MOUNT_VMVOL))) { - //snapshot, vm, datavolume mounts are special - if (vfs_context_is64bit(ctx)) { - if ((error = copyin(fsmountargs, (caddr_t)&devpath, sizeof(devpath)))) { - goto out1; - } - fsmountargs += sizeof(devpath); - } else { - user32_addr_t tmp; - if ((error = copyin(fsmountargs, (caddr_t)&tmp, sizeof(tmp)))) { - goto out1; + !(internal_flags & (KERNEL_MOUNT_SNAPSHOT | KERNEL_MOUNT_VOLBYROLE_MASK))) { + boolean_t do_copyin_devpath = true; +#if CONFIG_BASESYSTEMROOT + if (internal_flags & KERNEL_MOUNT_BASESYSTEMROOT) { + // KERNEL_MOUNT_BASESYSTEMROOT implies subtle behavior worh nothing: + // We have been passed fsmountargs, which is typed as a user_addr_t, + // but is actually a char ** pointing to a (kernelspace) string. + // We manually unpack it with a series of casts and dereferences + // that reverses what was done just above us on the stack in + // imageboot_pivot_image(). + // After retrieving the path to the dev node (which we will NDINIT + // in a moment), we pass NULL fsmountargs on to the filesystem. + _Static_assert(sizeof(char **) == sizeof(fsmountargs), "fsmountargs should fit a (kernel) address"); + char **devnamepp = (char **)fsmountargs; + char *devnamep = *devnamepp; + devpath = CAST_USER_ADDR_T(devnamep); + do_copyin_devpath = false; + fsmountargs = USER_ADDR_NULL; + + //Now that we have a mp, denote that this mount is for the basesystem. + mp->mnt_supl_kern_flag |= MNTK_SUPL_BASESYSTEM; + } +#endif // CONFIG_BASESYSTEMROOT + + if (do_copyin_devpath) { + if (vfs_context_is64bit(ctx)) { + if ((error = copyin(fsmountargs, (caddr_t)&devpath, sizeof(devpath)))) { + goto out1; + } + fsmountargs += sizeof(devpath); + } else { + user32_addr_t tmp; + if ((error = copyin(fsmountargs, (caddr_t)&tmp, sizeof(tmp)))) { + goto out1; + } + /* munge into LP64 addr */ + devpath = CAST_USER_ADDR_T(tmp); + fsmountargs += sizeof(tmp); } - /* munge into LP64 addr */ - devpath = CAST_USER_ADDR_T(tmp); - fsmountargs += sizeof(tmp); } /* Lookup device and authorize access to it */ if ((devpath)) { struct nameidata nd; - NDINIT(&nd, LOOKUP, OP_MOUNT, FOLLOW, UIO_USERSPACE, devpath, ctx); + enum uio_seg seg = UIO_USERSPACE; +#if CONFIG_BASESYSTEMROOT + if (internal_flags & KERNEL_MOUNT_BASESYSTEMROOT) { + seg = UIO_SYSSPACE; + } +#endif // CONFIG_BASESYSTEMROOT + + NDINIT(&nd, LOOKUP, OP_MOUNT, FOLLOW, seg, devpath, ctx); if ((error = namei(&nd))) { goto out1; } @@ -1108,6 +1164,49 @@ update: } #else error = EINVAL; +#endif + } else if ((internal_flags & KERNEL_MOUNT_PREBOOTVOL) || (internal_flags & KERNEL_MOUNT_RECOVERYVOL)) { +#if CONFIG_MOUNT_PREBOOTRECOVERY + struct mount *origin_mp = (struct mount*)fsmountargs; + uint32_t mount_role = 0; + if (internal_flags & KERNEL_MOUNT_PREBOOTVOL) { + mount_role = VFS_PREBOOT_ROLE; + } else if (internal_flags & KERNEL_MOUNT_RECOVERYVOL) { + mount_role = VFS_RECOVERY_ROLE; + } + + if (mount_role != 0) { + fs_role_mount_args_t frma = {origin_mp, mount_role}; + error = VFS_IOCTL(mp, VFSIOC_MOUNT_BYROLE, (caddr_t)&frma, 0, ctx); + if (error) { + printf("MOUNT-BY-ROLE (%d) failed! (%d)", mount_role, error); + } else { + // NOT YET - need to qualify how this interacts with shutdown, ERP/ERB, etc + /* Mark volume associated with system volume */ + //mp->mnt_kern_flag |= MNTK_SYSTEM; + /* Attempt to acquire the mnt_devvp and set it up */ + struct vnode *mp_devvp = NULL; + if (mp->mnt_vfsstat.f_mntfromname[0] != 0) { + errno_t lerr = vnode_lookup(mp->mnt_vfsstat.f_mntfromname, + 0, &mp_devvp, vfs_context_kernel()); + if (!lerr) { + mp->mnt_devvp = mp_devvp; + //vnode_lookup took an iocount, need to drop it. + vnode_put(mp_devvp); + + // now set `device_vnode` to the devvp that was acquired. + // note that though the iocount above was dropped, the mount acquires + // an implicit reference against the device. + device_vnode = mp_devvp; + } + } + } + } else { + printf("MOUNT-BY-ROLE (%d) failed - ROLE UNRECOGNIZED! (%d)", mount_role, error); + error = EINVAL; + } +#else + error = EINVAL; #endif } else { error = VFS_MOUNT(mp, device_vnode, fsmountargs, ctx); @@ -1140,14 +1239,14 @@ update: #if CONFIG_MACF error = mac_mount_check_mount_late(ctx, mp); if (error != 0) { - goto out3; + goto out4; } if (vfs_flags(mp) & MNT_MULTILABEL) { error = VFS_ROOT(mp, &rvp, ctx); if (error) { printf("%s() VFS_ROOT returned %d\n", __func__, error); - goto out3; + goto out4; } error = vnode_label(mp, NULL, rvp, NULL, 0, ctx); /* @@ -1156,7 +1255,7 @@ update: vnode_put(rvp); if (error) { - goto out3; + goto out4; } } #endif /* MAC */ @@ -1294,7 +1393,8 @@ update: #if CONFIG_MACF mac_mount_label_destroy(mp); #endif - FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); + zfree(mount_zone, mp); + did_set_lmount = false; } exit: /* @@ -1304,6 +1404,12 @@ exit: vnode_put(devvp); } + if (did_set_lmount) { + mount_lock_spin(mp); + mp->mnt_lflag &= ~MNT_LMOUNT; + mount_unlock(mp); + } + return error; /* Error condition exits */ @@ -1347,9 +1453,18 @@ out2: out1: /* Release mnt_rwlock only when it was taken */ if (is_rwlock_locked == TRUE) { + if (flag_set) { + mp->mnt_flag = flag; /* restore mnt_flag value */ + } lck_rw_done(&mp->mnt_rwlock); } + if (did_set_lmount) { + mount_lock_spin(mp); + mp->mnt_lflag &= ~MNT_LMOUNT; + mount_unlock(mp); + } + if (mntalloc) { if (mp->mnt_crossref) { mount_dropcrossref(mp, vp, 0); @@ -1358,7 +1473,7 @@ out1: #if CONFIG_MACF mac_mount_label_destroy(mp); #endif - FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); + zfree(mount_zone, mp); } } if (vfsp_ref) { @@ -1588,7 +1703,7 @@ mount_begin_update(mount_t mp, vfs_context_t ctx, int flags) /* unmount in progress return error */ mount_lock_spin(mp); - if (mp->mnt_lflag & MNT_LUNMOUNT) { + if (mp->mnt_lflag & (MNT_LUNMOUNT | MNT_LMOUNT)) { mount_unlock(mp); return EBUSY; } @@ -1695,7 +1810,7 @@ relocate_imageboot_source(vnode_t pvp, vnode_t vp, height = mia64.mi_height; flags = mia64.mi_flags; - devpath = mia64.mi_devpath; + devpath = (user_addr_t)mia64.mi_devpath; } else { struct user32_mnt_imgsrc_args mia32; error = copyin(fsmountargs, &mia32, sizeof(mia32)); @@ -1743,7 +1858,7 @@ relocate_imageboot_source(vnode_t pvp, vnode_t vp, IMGSRC_DEBUG("got old root vnode\n"); - MALLOC(old_mntonname, char*, MAXPATHLEN, M_TEMP, M_WAITOK); + old_mntonname = zalloc_flags(ZV_NAMEI, Z_WAITOK); /* Can only move once */ mp = vnode_mount(rvp); @@ -1840,7 +1955,7 @@ relocate_imageboot_source(vnode_t pvp, vnode_t vp, mount_end_update(mp); vnode_put(rvp); - FREE(old_mntonname, M_TEMP); + zfree(ZV_NAMEI, old_mntonname); vfs_notify_mount(pvp); @@ -1870,7 +1985,7 @@ out1: out0: vnode_put(rvp); - FREE(old_mntonname, M_TEMP); + zfree(ZV_NAMEI, old_mntonname); return error; } @@ -2083,8 +2198,10 @@ checkdirs(vnode_t olddp, vfs_context_t ctx) if (rootvnode == olddp) { vnode_ref(newdp); + lck_rw_lock_exclusive(rootvnode_rw_lock); tvp = rootvnode; rootvnode = newdp; + lck_rw_unlock_exclusive(rootvnode_rw_lock); vnode_rele(tvp); } @@ -2153,6 +2270,8 @@ vfs_unmountbyfsid(fsid_t *fsid, int flags, vfs_context_t ctx) return safedounmount(mp, flags, ctx); } +#define ROLE_ACCOUNT_UNMOUNT_ENTITLEMENT \ + "com.apple.private.vfs.role-account-unmount" /* * The mount struct comes with a mount ref which will be consumed. @@ -2175,10 +2294,15 @@ safedounmount(struct mount *mp, int flags, vfs_context_t ctx) } /* - * Skip authorization if the mount is tagged as permissive and - * this is not a forced-unmount attempt. + * Skip authorization in two cases: + * - If the process running the unmount has ROLE_ACCOUNT_UNMOUNT_ENTITLEMENT. + * This entitlement allows non-root processes unmount volumes mounted by + * other processes. + * - If the mount is tagged as permissive and this is not a forced-unmount + * attempt. */ - if (!(((mp->mnt_kern_flag & MNTK_PERMIT_UNMOUNT) != 0) && ((flags & MNT_FORCE) == 0))) { + if (!IOTaskHasEntitlement(current_task(), ROLE_ACCOUNT_UNMOUNT_ENTITLEMENT) && + (!(((mp->mnt_kern_flag & MNTK_PERMIT_UNMOUNT) != 0) && ((flags & MNT_FORCE) == 0)))) { /* * Only root, or the user that did the original mount is * permitted to unmount this filesystem. @@ -2189,19 +2313,22 @@ safedounmount(struct mount *mp, int flags, vfs_context_t ctx) } } /* - * Don't allow unmounting the root file system (or the associated VM or DATA mounts) . + * Don't allow unmounting the root file system, or other volumes + * associated with it (for example, the associated VM or DATA mounts) . */ if ((mp->mnt_flag & MNT_ROOTFS) || (mp->mnt_kern_flag & MNTK_SYSTEM)) { error = EBUSY; /* the root (or associated volumes) is always busy */ goto out; } -#ifdef CONFIG_IMGSRC_ACCESS + /* + * If the mount is providing the root filesystem's disk image + * (i.e. imageboot), don't allow unmounting + */ if (mp->mnt_kern_flag & MNTK_BACKS_ROOT) { error = EBUSY; goto out; } -#endif /* CONFIG_IMGSRC_ACCESS */ return dounmount(mp, flags, 1, ctx); @@ -2240,7 +2367,7 @@ dounmount(struct mount *mp, int flags, int withref, vfs_context_t ctx) * If already an unmount in progress just return EBUSY. * Even a forced unmount cannot override. */ - if (mp->mnt_lflag & MNT_LUNMOUNT) { + if (mp->mnt_lflag & (MNT_LUNMOUNT | MNT_LMOUNT)) { if (withref != 0) { mount_drop(mp, 1); } @@ -2490,7 +2617,7 @@ out: #if CONFIG_MACF mac_mount_label_destroy(mp); #endif - FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); + zfree(mount_zone, mp); } else { panic("dounmount: no coveredvp"); } @@ -2516,7 +2643,7 @@ dounmount_submounts(struct mount *mp, int flags, vfs_context_t ctx) TAILQ_FOREACH(smp, &mountlist, mnt_list) count++; fsids_sz = count * sizeof(fsid_t); - MALLOC(fsids, fsid_t *, fsids_sz, M_TEMP, M_NOWAIT); + fsids = kheap_alloc(KHEAP_TEMP, fsids_sz, Z_NOWAIT); if (fsids == NULL) { mount_list_unlock(); goto out; @@ -2556,9 +2683,7 @@ dounmount_submounts(struct mount *mp, int flags, vfs_context_t ctx) } } out: - if (fsids) { - FREE(fsids, M_TEMP); - } + kheap_free(KHEAP_TEMP, fsids, fsids_sz); } void @@ -2581,7 +2706,7 @@ mount_dropcrossref(mount_t mp, vnode_t dp, int need_put) #if CONFIG_MACF mac_mount_label_destroy(mp); #endif - FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); + zfree(mount_zone, mp); return; } if (need_put) { @@ -2696,10 +2821,17 @@ int sync_timeout_seconds = 5; #define SYNC_THREAD_RUN 0x0001 #define SYNC_THREAD_RUNNING 0x0002 +#if CONFIG_PHYS_WRITE_ACCT +thread_t pm_sync_thread; +#endif /* CONFIG_PHYS_WRITE_ACCT */ + static void sync_thread(__unused void *arg, __unused wait_result_t wr) { sync_type_t sync_type; +#if CONFIG_PHYS_WRITE_ACCT + pm_sync_thread = current_thread(); +#endif /* CONFIG_PHYS_WRITE_ACCT */ lck_mtx_lock(sync_mtx_lck); while (sync_thread_state & SYNC_THREAD_RUN) { @@ -2721,6 +2853,9 @@ sync_thread(__unused void *arg, __unused wait_result_t wr) */ wakeup(&sync_thread_state); sync_thread_state &= ~SYNC_THREAD_RUNNING; +#if CONFIG_PHYS_WRITE_ACCT + pm_sync_thread = NULL; +#endif /* CONFIG_PHYS_WRITE_ACCT */ lck_mtx_unlock(sync_mtx_lck); if (print_vmpage_stat) { @@ -2818,7 +2953,7 @@ quotactl(proc_t p, struct quotactl_args *uap, __unused int32_t *retval) case Q_QUOTAON: /* uap->arg specifies a file from which to take the quotas */ fnamelen = MAXPATHLEN; - datap = kalloc(MAXPATHLEN); + datap = zalloc(ZV_NAMEI); error = copyinstr(uap->arg, datap, MAXPATHLEN, &fnamelen); break; case Q_GETQUOTA: @@ -2855,7 +2990,7 @@ quotactl(proc_t p, struct quotactl_args *uap, __unused int32_t *retval) switch (quota_cmd) { case Q_QUOTAON: if (datap != NULL) { - kfree(datap, MAXPATHLEN); + zfree(ZV_NAMEI, datap); } break; case Q_GETQUOTA: @@ -3014,7 +3149,7 @@ vfs_get_statfs64(struct mount *mp, struct statfs64 *sfs) sfs->f_type = mp->mnt_vtable->vfc_typenum; sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; sfs->f_fssubtype = vsfs->f_fssubtype; - sfs->f_flags_ext = ((mp->mnt_kern_flag & MNTK_SYSTEM) && !(mp->mnt_kern_flag & MNTK_SWAP_MOUNT) && !(mp->mnt_flag & MNT_ROOTFS)) ? MNT_EXT_ROOT_DATA_VOL : 0; + sfs->f_flags_ext = (mp->mnt_kern_flag & MNTK_SYSTEMDATA) ? MNT_EXT_ROOT_DATA_VOL : 0; if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) { strlcpy(&sfs->f_fstypename[0], &mp->fstypename_override[0], MFSTYPENAMELEN); } else { @@ -3032,44 +3167,56 @@ statfs64(__unused struct proc *p, struct statfs64_args *uap, __unused int32_t *r { struct mount *mp; int error; - struct nameidata nd; - struct statfs64 sfs; + struct nameidata *ndp; + struct statfs64 *sfsp; vfs_context_t ctxp = vfs_context_current(); vnode_t vp; + union { + struct nameidata nd; + struct statfs64 sfs; + } *__nameidata_statfs64; - NDINIT(&nd, LOOKUP, OP_STATFS, FOLLOW | AUDITVNPATH1, + __nameidata_statfs64 = kheap_alloc(KHEAP_TEMP, sizeof(*__nameidata_statfs64), + Z_WAITOK); + ndp = &__nameidata_statfs64->nd; + + NDINIT(ndp, LOOKUP, OP_STATFS, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, uap->path, ctxp); - error = namei(&nd); + error = namei(ndp); if (error != 0) { - return error; + goto out; } - vp = nd.ni_vp; + vp = ndp->ni_vp; mp = vp->v_mount; - nameidone(&nd); + nameidone(ndp); #if CONFIG_MACF error = mac_mount_check_stat(ctxp, mp); if (error != 0) { vnode_put(vp); - return error; + goto out; } #endif error = vfs_update_vfsstat(mp, ctxp, VFS_USER_EVENT); if (error != 0) { vnode_put(vp); - return error; + goto out; } - vfs_get_statfs64(mp, &sfs); - if ((mp->mnt_kern_flag & MNTK_SYSTEM) && !(mp->mnt_kern_flag & MNTK_SWAP_MOUNT) && !(mp->mnt_flag & MNT_ROOTFS) && + sfsp = &__nameidata_statfs64->sfs; + vfs_get_statfs64(mp, sfsp); + if ((mp->mnt_kern_flag & MNTK_SYSTEMDATA) && (p->p_vfs_iopolicy & P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME)) { /* This process does not want to see a seperate data volume mountpoint */ - strlcpy(&sfs.f_mntonname[0], "/", sizeof("/")); + strlcpy(&sfsp->f_mntonname[0], "/", sizeof("/")); } - error = copyout(&sfs, uap->buf, sizeof(sfs)); + error = copyout(sfsp, uap->buf, sizeof(*sfsp)); vnode_put(vp); +out: + kheap_free(KHEAP_TEMP, __nameidata_statfs64, sizeof(*__nameidata_statfs64)); + return error; } @@ -3116,7 +3263,7 @@ fstatfs64(__unused struct proc *p, struct fstatfs64_args *uap, __unused int32_t } vfs_get_statfs64(mp, &sfs); - if ((mp->mnt_kern_flag & MNTK_SYSTEM) && !(mp->mnt_kern_flag & MNTK_SWAP_MOUNT) && !(mp->mnt_flag & MNT_ROOTFS) && + if ((mp->mnt_kern_flag & MNTK_SYSTEMDATA) && (p->p_vfs_iopolicy & P_VFS_IOPOLICY_STATFS_NO_DATA_VOLUME)) { /* This process does not want to see a seperate data volume mountpoint */ strlcpy(&sfs.f_mntonname[0], "/", sizeof("/")); @@ -3266,21 +3413,21 @@ __mac_getfsstat(__unused proc_t p, struct __mac_getfsstat_args *uap, int *retval } /* Copy in the array */ - MALLOC(mp0, u_int32_t *, macsize, M_MACTEMP, M_WAITOK); + mp0 = kheap_alloc(KHEAP_TEMP, macsize, Z_WAITOK); if (mp0 == NULL) { return ENOMEM; } error = copyin(uap->mac, mp0, macsize); if (error) { - FREE(mp0, M_MACTEMP); + kheap_free(KHEAP_TEMP, mp0, macsize); return error; } /* Normalize to an array of user_addr_t */ - MALLOC(mp, user_addr_t *, count * sizeof(user_addr_t), M_MACTEMP, M_WAITOK); + mp = kheap_alloc(KHEAP_TEMP, count * sizeof(user_addr_t), Z_WAITOK); if (mp == NULL) { - FREE(mp0, M_MACTEMP); + kheap_free(KHEAP_TEMP, mp0, macsize); return ENOMEM; } @@ -3291,7 +3438,7 @@ __mac_getfsstat(__unused proc_t p, struct __mac_getfsstat_args *uap, int *retval mp[i] = (user_addr_t)mp0[i]; } } - FREE(mp0, M_MACTEMP); + kheap_free(KHEAP_TEMP, mp0, macsize); } #endif @@ -3301,13 +3448,13 @@ __mac_getfsstat(__unused proc_t p, struct __mac_getfsstat_args *uap, int *retval fst.flags = uap->flags; fst.count = 0; fst.error = 0; - fst.maxcount = maxcount; + fst.maxcount = (int)maxcount; vfs_iterate(VFS_ITERATE_NOSKIP_UNMOUNT, getfsstat_callback, &fst); if (mp) { - FREE(mp, M_MACTEMP); + kheap_free(KHEAP_TEMP, mp, count * sizeof(user_addr_t)); } if (fst.error) { @@ -3508,7 +3655,7 @@ common_fchdir(proc_t p, struct fchdir_args *uap, int per_thread) vnode_t tdp; vnode_t tvp; struct mount *mp; - int error; + int error, should_put = 1; vfs_context_t ctx = vfs_context_current(); AUDIT_ARG(fd, uap->fd); @@ -3578,6 +3725,7 @@ common_fchdir(proc_t p, struct fchdir_args *uap, int per_thread) goto out; } vnode_put(vp); + should_put = 0; if (per_thread) { thread_t th = vfs_context_thread(ctx); @@ -3588,7 +3736,8 @@ common_fchdir(proc_t p, struct fchdir_args *uap, int per_thread) OSBitOrAtomic(P_THCWD, &p->p_flag); } else { vnode_rele(vp); - return ENOENT; + error = ENOENT; + goto out; } } else { proc_dirs_lock_exclusive(p); @@ -3602,11 +3751,11 @@ common_fchdir(proc_t p, struct fchdir_args *uap, int per_thread) if (tvp) { vnode_rele(tvp); } - file_drop(uap->fd); - return 0; out: - vnode_put(vp); + if (should_put) { + vnode_put(vp); + } file_drop(uap->fd); return error; @@ -3813,6 +3962,110 @@ chroot(proc_t p, struct chroot_args *uap, __unused int32_t *retval) return 0; } +#define PATHSTATICBUFLEN 256 +#define PIVOT_ROOT_ENTITLEMENT \ + "com.apple.private.vfs.pivot-root" + +#if defined(XNU_TARGET_OS_OSX) +int +pivot_root(proc_t p, struct pivot_root_args *uap, __unused int *retval) +{ + int error; + char new_rootfs_path_before[PATHSTATICBUFLEN] = {0}; + char old_rootfs_path_after[PATHSTATICBUFLEN] = {0}; + char *new_rootfs_path_before_buf = NULL; + char *old_rootfs_path_after_buf = NULL; + char *incoming = NULL; + char *outgoing = NULL; + vnode_t incoming_rootvp = NULLVP; + size_t bytes_copied; + + /* + * XXX : Additional restrictions needed + * - perhaps callable only once. + */ + if ((error = suser(kauth_cred_get(), &p->p_acflag))) { + return error; + } + + /* + * pivot_root can be executed by launchd only. + * Enforce entitlement. + */ + if ((p->p_pid != 1) || !IOTaskHasEntitlement(current_task(), PIVOT_ROOT_ENTITLEMENT)) { + return EPERM; + } + + error = copyinstr(uap->new_rootfs_path_before, &new_rootfs_path_before[0], PATHSTATICBUFLEN, &bytes_copied); + if (error == ENAMETOOLONG) { + new_rootfs_path_before_buf = zalloc_flags(ZV_NAMEI, Z_WAITOK); + error = copyinstr(uap->new_rootfs_path_before, new_rootfs_path_before_buf, MAXPATHLEN, &bytes_copied); + } + + if (error) { + goto out; + } + + error = copyinstr(uap->old_rootfs_path_after, &old_rootfs_path_after[0], PATHSTATICBUFLEN, &bytes_copied); + if (error == ENAMETOOLONG) { + old_rootfs_path_after_buf = zalloc_flags(ZV_NAMEI, Z_WAITOK); + error = copyinstr(uap->old_rootfs_path_after, old_rootfs_path_after_buf, MAXPATHLEN, &bytes_copied); + } + if (error) { + goto out; + } + + if (new_rootfs_path_before_buf) { + incoming = new_rootfs_path_before_buf; + } else { + incoming = &new_rootfs_path_before[0]; + } + + if (old_rootfs_path_after_buf) { + outgoing = old_rootfs_path_after_buf; + } else { + outgoing = &old_rootfs_path_after[0]; + } + + /* + * The proposed incoming FS MUST be authenticated (i.e. not a chunklist DMG). + * Userland is not allowed to pivot to an image. + */ + error = vnode_lookup(incoming, 0, &incoming_rootvp, vfs_context_kernel()); + if (error) { + goto out; + } + error = VNOP_IOCTL(incoming_rootvp, FSIOC_KERNEL_ROOTAUTH, NULL, 0, vfs_context_kernel()); + if (error) { + goto out; + } + + error = vfs_switch_root(incoming, outgoing, VFSSR_VIRTUALDEV_PROHIBITED); + +out: + if (incoming_rootvp != NULLVP) { + vnode_put(incoming_rootvp); + incoming_rootvp = NULLVP; + } + + if (old_rootfs_path_after_buf) { + zfree(ZV_NAMEI, old_rootfs_path_after_buf); + } + + if (new_rootfs_path_before_buf) { + zfree(ZV_NAMEI, new_rootfs_path_before_buf); + } + + return error; +} +#else +int +pivot_root(proc_t p, __unused struct pivot_root_args *uap, int *retval) +{ + return nosys(p, NULL, retval); +} +#endif /* XNU_TARGET_OS_OSX */ + /* * Common routine for chroot and chdir. * @@ -3864,8 +4117,8 @@ fg_vn_data_alloc(void) struct fd_vn_data *fvdata; /* Allocate per fd vnode data */ - MALLOC(fvdata, struct fd_vn_data *, (sizeof(struct fd_vn_data)), - M_FD_VN_DATA, M_WAITOK | M_ZERO); + fvdata = kheap_alloc(KM_FD_VN_DATA, sizeof(struct fd_vn_data), + Z_WAITOK | Z_ZERO); lck_mtx_init(&fvdata->fv_lock, fd_vn_lck_grp, fd_vn_lck_attr); return fvdata; } @@ -3878,11 +4131,9 @@ fg_vn_data_free(void *fgvndata) { struct fd_vn_data *fvdata = (struct fd_vn_data *)fgvndata; - if (fvdata->fv_buf) { - FREE(fvdata->fv_buf, M_FD_DIRBUF); - } + kheap_free(KHEAP_DATA_BUFFERS, fvdata->fv_buf, fvdata->fv_bufallocsiz); lck_mtx_destroy(&fvdata->fv_lock, fd_vn_lck_grp); - FREE(fvdata, M_FD_VN_DATA); + kheap_free(KM_FD_VN_DATA, fvdata, sizeof(struct fd_vn_data)); } /* @@ -3913,7 +4164,6 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, vnode_t vp; int flags, oflags; int type, indx, error; - struct flock lf; struct vfs_context context; oflags = uflags; @@ -3952,14 +4202,15 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, uu->uu_dupfd = 0; vp = ndp->ni_vp; - fp->f_fglob->fg_flag = flags & (FMASK | O_EVTONLY | FENCRYPTED | FUNENCRYPTED); - fp->f_fglob->fg_ops = &vnops; - fp->f_fglob->fg_data = (caddr_t)vp; + fp->fp_glob->fg_flag = flags & (FMASK | O_EVTONLY | FENCRYPTED | FUNENCRYPTED); + fp->fp_glob->fg_ops = &vnops; + fp->fp_glob->fg_data = (caddr_t)vp; if (flags & (O_EXLOCK | O_SHLOCK)) { - lf.l_whence = SEEK_SET; - lf.l_start = 0; - lf.l_len = 0; + struct flock lf = { + .l_whence = SEEK_SET, + }; + if (flags & O_EXLOCK) { lf.l_type = F_WRLCK; } else { @@ -3970,16 +4221,16 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, type |= F_WAIT; } #if CONFIG_MACF - error = mac_file_check_lock(vfs_context_ucred(ctx), fp->f_fglob, + error = mac_file_check_lock(vfs_context_ucred(ctx), fp->fp_glob, F_SETLK, &lf); if (error) { goto bad; } #endif - if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, type, ctx, NULL))) { + if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_SETLK, &lf, type, ctx, NULL))) { goto bad; } - fp->f_fglob->fg_flag |= FHASLOCK; + fp->fp_glob->fg_flag |= FWASLOCKED; } /* try to truncate by setting the size attribute */ @@ -3991,9 +4242,9 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, * For directories we hold some additional information in the fd. */ if (vnode_vtype(vp) == VDIR) { - fp->f_fglob->fg_vn_data = fg_vn_data_alloc(); + fp->fp_glob->fg_vn_data = fg_vn_data_alloc(); } else { - fp->f_fglob->fg_vn_data = NULL; + fp->fp_glob->fg_vn_data = NULL; } vnode_put(vp); @@ -4006,7 +4257,7 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, !(flags & O_NOCTTY)) { int tmp = 0; - (void)(*fp->f_fglob->fg_ops->fo_ioctl)(fp, (int)TIOCSCTTY, + (void)(*fp->fp_glob->fg_ops->fo_ioctl)(fp, (int)TIOCSCTTY, (caddr_t)&tmp, ctx); } @@ -4021,7 +4272,7 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, #if CONFIG_SECLUDED_MEMORY if (secluded_for_filecache && - FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_VNODE && + FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_VNODE && vnode_vtype(vp) == VREG) { memory_object_control_t moc; @@ -4029,7 +4280,7 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, if (moc == MEMORY_OBJECT_CONTROL_NULL) { /* nothing to do... */ - } else if (fp->f_fglob->fg_flag & FWRITE) { + } else if (fp->fp_glob->fg_flag & FWRITE) { /* writable -> no longer eligible for secluded pages */ memory_object_mark_eligible_for_secluded(moc, FALSE); @@ -4100,20 +4351,20 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, return 0; bad: context = *vfs_context_current(); - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; - if ((fp->f_fglob->fg_flag & FHASLOCK) && - (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_VNODE)) { - lf.l_whence = SEEK_SET; - lf.l_start = 0; - lf.l_len = 0; - lf.l_type = F_UNLCK; + if ((fp->fp_glob->fg_flag & FWASLOCKED) && + (FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_VNODE)) { + struct flock lf = { + .l_whence = SEEK_SET, + .l_type = F_UNLCK, + }; (void)VNOP_ADVLOCK( - vp, (caddr_t)fp->f_fglob, F_UNLCK, &lf, F_FLOCK, ctx, NULL); + vp, (caddr_t)fp->fp_glob, F_UNLCK, &lf, F_FLOCK, ctx, NULL); } - vn_close(vp, fp->f_fglob->fg_flag, &context); + vn_close(vp, fp->fp_glob->fg_flag, &context); vnode_put(vp); fp_free(p, indx, fp); @@ -4308,20 +4559,33 @@ openat_internal(vfs_context_t ctx, user_addr_t path, int flags, int mode, int fd, enum uio_seg segflg, int *retval) { struct filedesc *fdp = (vfs_context_proc(ctx))->p_fd; - struct vnode_attr va; - struct nameidata nd; + struct { + struct vnode_attr va; + struct nameidata nd; + } *__open_data; + struct vnode_attr *vap; + struct nameidata *ndp; int cmode; + int error; - VATTR_INIT(&va); + __open_data = kheap_alloc(KHEAP_TEMP, sizeof(*__open_data), Z_WAITOK); + vap = &__open_data->va; + ndp = &__open_data->nd; + + VATTR_INIT(vap); /* Mask off all but regular access permissions */ cmode = ((mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; - VATTR_SET(&va, va_mode, cmode & ACCESSPERMS); + VATTR_SET(vap, va_mode, cmode & ACCESSPERMS); - NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, + NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, segflg, path, ctx); - return open1at(ctx, &nd, flags, &va, fileproc_alloc_init, NULL, - retval, fd); + error = open1at(ctx, ndp, flags, vap, fileproc_alloc_init, NULL, + retval, fd); + + kheap_free(KHEAP_TEMP, __open_data, sizeof(*__open_data)); + + return error; } int @@ -4406,7 +4670,7 @@ openbyid_np(__unused proc_t p, struct openbyid_np_args *uap, int *retval) /*resolve path from fsis, objid*/ do { - MALLOC(buf, char *, buflen + 1, M_TEMP, M_WAITOK); + buf = kheap_alloc(KHEAP_TEMP, buflen + 1, Z_WAITOK); if (buf == NULL) { return ENOMEM; } @@ -4415,7 +4679,7 @@ openbyid_np(__unused proc_t p, struct openbyid_np_args *uap, int *retval) buf, FSOPT_ISREALFSID, &pathlen); if (error) { - FREE(buf, M_TEMP); + kheap_free(KHEAP_TEMP, buf, buflen + 1); buf = NULL; } } while (error == ENOSPC && (buflen += MAXPATHLEN)); @@ -4429,7 +4693,7 @@ openbyid_np(__unused proc_t p, struct openbyid_np_args *uap, int *retval) error = openat_internal( ctx, (user_addr_t)buf, uap->oflags, 0, AT_FDCWD, UIO_SYSSPACE, retval); - FREE(buf, M_TEMP); + kheap_free(KHEAP_TEMP, buf, buflen + 1); return error; } @@ -4458,7 +4722,7 @@ mknod(proc_t p, struct mknod_args *uap, __unused int32_t *retval) return mkfifo1(ctx, uap->path, &va); } - AUDIT_ARG(mode, uap->mode); + AUDIT_ARG(mode, (mode_t)uap->mode); AUDIT_ARG(value32, uap->dev); if ((error = suser(vfs_context_ucred(ctx), &p->p_acflag))) { @@ -4665,23 +4929,6 @@ mkfifo(proc_t p, struct mkfifo_args *uap, __unused int32_t *retval) return mkfifo1(vfs_context_current(), uap->path, &va); } - -static char * -my_strrchr(char *p, int ch) -{ - char *save; - - for (save = NULL;; ++p) { - if (*p == ch) { - save = p; - } - if (!*p) { - return save; - } - } - /* NOTREACHED */ -} - extern int safe_getpath_new(struct vnode *dvp, char *leafname, char *path, int _len, int *truncated_path, int firmlink); extern int safe_getpath(struct vnode *dvp, char *leafname, char *path, int _len, int *truncated_path); extern int safe_getpath_no_firmlink(struct vnode *dvp, char *leafname, char *path, int _len, int *truncated_path); @@ -4707,11 +4954,11 @@ safe_getpath_new(struct vnode *dvp, char *leafname, char *path, int _len, int *t // the string got truncated! *truncated_path = 1; - ptr = my_strrchr(path, '/'); + ptr = strrchr(path, '/'); if (ptr) { *ptr = '\0'; // chop off the string at the last directory component } - len = strlen(path) + 1; + len = (int)strlen(path) + 1; } } } else if (ret == 0) { @@ -4802,9 +5049,6 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, error = nameiat(&nd, fd1); if (error) { - if (error == EPERM) { - printf("XXX 54841485: nameiat() src EPERM\n"); - } return error; } vp = nd.ni_vp; @@ -4818,7 +5062,6 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, if (vp->v_type == VDIR) { if (!ISSET(vp->v_mount->mnt_kern_flag, MNTK_DIR_HARDLINKS)) { error = EPERM; /* POSIX */ - printf("XXX 54841485: VDIR EPERM\n"); goto out; } @@ -4846,9 +5089,6 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, nd.ni_dirp = link; error = nameiat(&nd, fd2); if (error != 0) { - if (error == EPERM) { - printf("XXX 54841485: nameiat() dst EPERM\n"); - } goto out; } dvp = nd.ni_dvp; @@ -4856,18 +5096,12 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, #if CONFIG_MACF if ((error = mac_vnode_check_link(ctx, dvp, vp, &nd.ni_cnd)) != 0) { - if (error == EPERM) { - printf("XXX 54841485: mac_vnode_check_link() EPERM\n"); - } goto out2; } #endif /* or to anything that kauth doesn't want us to (eg. immutable items) */ if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_LINKTARGET, ctx)) != 0) { - if (error == EPERM) { - printf("XXX 54841485: vnode_authorize() LINKTARGET EPERM\n"); - } goto out2; } @@ -4884,18 +5118,12 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, /* authorize creation of the target note */ if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx)) != 0) { - if (error == EPERM) { - printf("XXX 54841485: vnode_authorize() ADD_FILE EPERM\n"); - } goto out2; } /* and finally make the link */ error = VNOP_LINK(vp, dvp, &nd.ni_cnd, ctx); if (error) { - if (error == EPERM) { - printf("XXX 54841485: VNOP_LINK() EPERM\n"); - } goto out2; } @@ -4923,10 +5151,6 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, /* build the path to the new link file */ GET_PATH(target_path); - if (target_path == NULL) { - error = ENOMEM; - goto out2; - } len = safe_getpath(dvp, nd.ni_cnd.cn_nameptr, target_path, MAXPATHLEN, &truncated); @@ -4935,10 +5159,6 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, if (has_listeners) { /* build the path to file we are linking to */ GET_PATH(link_to_path); - if (link_to_path == NULL) { - error = ENOMEM; - goto out2; - } link_name_len = MAXPATHLEN; if (vn_getpath(vp, link_to_path, &link_name_len) == 0) { @@ -5046,7 +5266,7 @@ symlinkat_internal(vfs_context_t ctx, user_addr_t path_data, int fd, error = 0; if (UIO_SEG_IS_USER_SPACE(segflg)) { - MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + path = zalloc(ZV_NAMEI); error = copyinstr(path_data, path, MAXPATHLEN, &dummy); } else { path = (char *)path_data; @@ -5182,7 +5402,7 @@ skipit: vnode_put(dvp); out: if (path && (path != (char *)path_data)) { - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, path); } return error; @@ -5358,18 +5578,10 @@ continue_lookup: if (need_event || has_listeners) { if (path == NULL) { GET_PATH(path); - if (path == NULL) { - error = ENOMEM; - goto out; - } } len_path = safe_getpath(dvp, nd.ni_cnd.cn_nameptr, path, MAXPATHLEN, &truncated_path); if (no_firmlink_path == NULL) { GET_PATH(no_firmlink_path); - if (no_firmlink_path == NULL) { - error = ENOMEM; - goto out; - } } len_no_firmlink_path = safe_getpath_no_firmlink(dvp, nd.ni_cnd.cn_nameptr, no_firmlink_path, MAXPATHLEN, &truncated_no_firmlink_path); } @@ -5565,10 +5777,10 @@ lseek(proc_t p, struct lseek_args *uap, off_t *retval) #if CONFIG_MACF if (uap->whence == L_INCR && uap->offset == 0) { error = mac_file_check_get_offset(vfs_context_ucred(ctx), - fp->f_fglob); + fp->fp_glob); } else { error = mac_file_check_change_offset(vfs_context_ucred(ctx), - fp->f_fglob); + fp->fp_glob); } if (error) { file_drop(uap->fd); @@ -5582,7 +5794,7 @@ lseek(proc_t p, struct lseek_args *uap, off_t *retval) switch (uap->whence) { case L_INCR: - offset += fp->f_fglob->fg_offset; + offset += fp->fp_glob->fg_offset; break; case L_XTND: if ((error = vnode_size(vp, &file_size, ctx)) != 0) { @@ -5616,8 +5828,8 @@ lseek(proc_t p, struct lseek_args *uap, off_t *retval) error = EINVAL; } else { /* Success */ - fp->f_fglob->fg_offset = offset; - *retval = fp->f_fglob->fg_offset; + fp->fp_glob->fg_offset = offset; + *retval = fp->fp_glob->fg_offset; } } } @@ -5750,7 +5962,8 @@ access_extended(__unused proc_t p, struct access_extended_args *uap, __unused in errno_t *result = NULL; errno_t error = 0; int wantdelete = 0; - unsigned int desc_max, desc_actual, i, j; + size_t desc_max, desc_actual; + unsigned int i, j; struct vfs_context context; struct nameidata nd; int niopts; @@ -5780,7 +5993,7 @@ access_extended(__unused proc_t p, struct access_extended_args *uap, __unused in if (uap->size <= sizeof(stack_input)) { input = stack_input; } else { - MALLOC(input, struct accessx_descriptor *, uap->size, M_TEMP, M_WAITOK); + input = kheap_alloc(KHEAP_DATA_BUFFERS, uap->size, Z_WAITOK); if (input == NULL) { error = ENOMEM; goto out; @@ -5876,7 +6089,8 @@ access_extended(__unused proc_t p, struct access_extended_args *uap, __unused in error = ENOMEM; goto out; } - MALLOC(result, errno_t *, desc_actual * sizeof(errno_t), M_TEMP, M_WAITOK | M_ZERO); + result = kheap_alloc(KHEAP_DATA_BUFFERS, desc_actual * sizeof(errno_t), + Z_WAITOK | Z_ZERO); if (result == NULL) { error = ENOMEM; goto out; @@ -5967,10 +6181,10 @@ access_extended(__unused proc_t p, struct access_extended_args *uap, __unused in out: if (input && input != stack_input) { - FREE(input, M_TEMP); + kheap_free(KHEAP_DATA_BUFFERS, input, uap->size); } if (result) { - FREE(result, M_TEMP); + kheap_free(KHEAP_DATA_BUFFERS, result, desc_actual * sizeof(errno_t)); } if (vp) { vnode_put(vp); @@ -6179,7 +6393,7 @@ fstatat_internal(vfs_context_t ctx, user_addr_t path, user_addr_t ub, * going to let them get the basic stat information. */ error = vn_stat_noauth(nd.ni_vp, statptr, NULL, isstat64, needsrealdev, ctx, - fp->f_fglob->fg_cred); + fp->fp_glob->fg_cred); } else { error = vn_stat(nd.ni_vp, statptr, (xsecurity != USER_ADDR_NULL ? &fsec : NULL), isstat64, needsrealdev, ctx); @@ -6484,6 +6698,10 @@ readlinkat_internal(vfs_context_t ctx, int fd, user_addr_t path, struct nameidata nd; char uio_buf[UIO_SIZEOF(1)]; + if (bufsize > INT32_MAX) { + return EINVAL; + } + NDINIT(&nd, LOOKUP, OP_READLINK, NOFOLLOW | AUDITVNPATH1, seg, path, ctx); @@ -6514,7 +6732,7 @@ readlinkat_internal(vfs_context_t ctx, int fd, user_addr_t path, } vnode_put(vp); - *retval = bufsize - (int)uio_resid(auio); + *retval = (int)(bufsize - uio_resid(auio)); return error; } @@ -7169,8 +7387,8 @@ getutimes(user_addr_t usrtvp, struct timespec *tsp) if (error) { return error; } - TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]); - TIMEVAL_TO_TIMESPEC(&tv[1], &tsp[1]); + TIMEVAL64_TO_TIMESPEC(&tv[0], &tsp[0]); + TIMEVAL64_TO_TIMESPEC(&tv[1], &tsp[1]); } else { struct user32_timeval tv[2]; error = copyin(usrtvp, (void *)tv, sizeof(tv)); @@ -7318,7 +7536,7 @@ futimes(__unused proc_t p, struct futimes_args *uap, __unused int32_t *retval) */ /* ARGSUSED */ int -truncate(__unused proc_t p, struct truncate_args *uap, __unused int32_t *retval) +truncate(proc_t p, struct truncate_args *uap, __unused int32_t *retval) { vnode_t vp; struct vnode_attr va; @@ -7326,10 +7544,18 @@ truncate(__unused proc_t p, struct truncate_args *uap, __unused int32_t *retval) int error; struct nameidata nd; kauth_action_t action; + rlim_t fsize_limit; if (uap->length < 0) { return EINVAL; } + + fsize_limit = proc_limitgetcur(p, RLIMIT_FSIZE, TRUE); + if ((rlim_t)uap->length > fsize_limit) { + psignal(p, SIGXFSZ); + return EFBIG; + } + NDINIT(&nd, LOOKUP, OP_TRUNCATE, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, uap->path, ctx); if ((error = namei(&nd))) { @@ -7381,17 +7607,24 @@ ftruncate(proc_t p, struct ftruncate_args *uap, int32_t *retval) struct fileproc *fp; int error; int fd = uap->fd; + rlim_t fsize_limit; AUDIT_ARG(fd, uap->fd); if (uap->length < 0) { return EINVAL; } + fsize_limit = proc_limitgetcur(p, RLIMIT_FSIZE, TRUE); + if ((rlim_t)uap->length > fsize_limit) { + psignal(p, SIGXFSZ); + return EFBIG; + } + if ((error = fp_lookup(p, fd, &fp, 0))) { return error; } - switch (FILEGLOB_DTYPE(fp->f_fglob)) { + switch (FILEGLOB_DTYPE(fp->fp_glob)) { case DTYPE_PSXSHM: error = pshm_truncate(p, fp, uap->fd, uap->length, retval); goto out; @@ -7402,9 +7635,9 @@ ftruncate(proc_t p, struct ftruncate_args *uap, int32_t *retval) goto out; } - vp = (vnode_t)fp->f_fglob->fg_data; + vp = (vnode_t)fp->fp_glob->fg_data; - if ((fp->f_fglob->fg_flag & FWRITE) == 0) { + if ((fp->fp_glob->fg_flag & FWRITE) == 0) { AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1); error = EINVAL; goto out; @@ -7418,7 +7651,7 @@ ftruncate(proc_t p, struct ftruncate_args *uap, int32_t *retval) #if CONFIG_MACF error = mac_vnode_check_truncate(ctx, - fp->f_fglob->fg_cred, vp); + fp->fp_glob->fg_cred, vp); if (error) { (void)vnode_put(vp); goto out; @@ -7430,7 +7663,7 @@ ftruncate(proc_t p, struct ftruncate_args *uap, int32_t *retval) #if CONFIG_MACF if (error == 0) { - mac_vnode_notify_truncate(ctx, fp->f_fglob->fg_cred, vp); + mac_vnode_notify_truncate(ctx, fp->fp_glob->fg_cred, vp); } #endif @@ -7532,7 +7765,7 @@ fsync_common(proc_t p, struct fsync_args *uap, int flags) (vp->v_flag & VISNAMEDSTREAM) && (vp->v_parent != NULLVP) && vnode_isshadow(vp) && - (fp->f_flags & FP_WRITTEN)) { + (fp->fp_glob->fg_flag & FWASWRITTEN)) { (void) vnode_flushnamedstream(vp->v_parent, vp, ctx); } #endif @@ -7955,7 +8188,7 @@ fclonefileat(__unused proc_t p, struct fclonefileat_args *uap, return error; } - if ((fp->f_fglob->fg_flag & FREAD) == 0) { + if ((fp->fp_glob->fg_flag & FREAD) == 0) { AUDIT_ARG(vnpath_withref, fvp, ARG_VNODE1); error = EBADF; goto out; @@ -7981,7 +8214,7 @@ rename_submounts_callback(mount_t mp, void *arg) { int error = 0; mount_t pmp = (mount_t)arg; - int prefix_len = strlen(pmp->mnt_vfsstat.f_mntonname); + int prefix_len = (int)strlen(pmp->mnt_vfsstat.f_mntonname); if (strncmp(mp->mnt_vfsstat.f_mntonname, pmp->mnt_vfsstat.f_mntonname, prefix_len) != 0) { return 0; @@ -8025,6 +8258,7 @@ renameat_internal(vfs_context_t ctx, int fromfd, user_addr_t from, vnode_t tvp, tdvp; vnode_t fvp, fdvp; + vnode_t mnt_fvp; struct nameidata *fromnd, *tond; int error; int do_retry; @@ -8039,6 +8273,7 @@ renameat_internal(vfs_context_t ctx, int fromfd, user_addr_t from, int from_len = 0, to_len = 0; int from_len_no_firmlink = 0, to_len_no_firmlink = 0; int holding_mntlock; + int vn_authorize_skipped; mount_t locked_mp = NULL; vnode_t oparent = NULLVP; #if CONFIG_FSE @@ -8054,7 +8289,7 @@ renameat_internal(vfs_context_t ctx, int fromfd, user_addr_t from, struct nameidata from_node, to_node; struct vnode_attr fv_attr, tv_attr; } * __rename_data; - MALLOC(__rename_data, void *, sizeof(*__rename_data), M_TEMP, M_WAITOK); + __rename_data = kheap_alloc(KHEAP_TEMP, sizeof(*__rename_data), Z_WAITOK); fromnd = &__rename_data->from_node; tond = &__rename_data->to_node; @@ -8065,7 +8300,9 @@ retry: fvp = tvp = NULL; fdvp = tdvp = NULL; fvap = tvap = NULL; + mnt_fvp = NULLVP; mntrename = FALSE; + vn_authorize_skipped = FALSE; NDINIT(fromnd, DELETE, OP_UNLINK, WANTPARENT | AUDITVNPATH1, segflg, from, ctx); @@ -8126,8 +8363,31 @@ continue_lookup: } if (tvp && ISSET(flags, VFS_RENAME_EXCL)) { - error = EEXIST; - goto out1; + int32_t pval = 0; + int err = 0; + + /* + * We allow rename with VFS_RENAME_EXCL flag for an existing file which + * has the same name as target iff the following conditions are met: + * 1. the target file system is case insensitive + * 2. source and target directories are the same + * 3. source and target files are the same + * 4. name only differs in case (determined by underlying filesystem) + */ + if (fvp != tvp || fdvp != tdvp) { + error = EEXIST; + goto out1; + } + + /* + * Assume that the target file system is case sensitive if + * _PC_CASE_SENSITIVE selector isn't supported. + */ + err = VNOP_PATHCONF(tvp, _PC_CASE_SENSITIVE, &pval, ctx); + if (err != 0 || pval != 0) { + error = EEXIST; + goto out1; + } } batched = vnode_compound_rename_available(fdvp); @@ -8173,20 +8433,12 @@ continue_lookup: if (need_event || has_listeners) { if (from_name == NULL) { GET_PATH(from_name); - if (from_name == NULL) { - error = ENOMEM; - goto out1; - } } from_len = safe_getpath(fdvp, fromnd->ni_cnd.cn_nameptr, from_name, MAXPATHLEN, &from_truncated); if (from_name_no_firmlink == NULL) { GET_PATH(from_name_no_firmlink); - if (from_name_no_firmlink == NULL) { - error = ENOMEM; - goto out1; - } } from_len_no_firmlink = safe_getpath_no_firmlink(fdvp, fromnd->ni_cnd.cn_nameptr, from_name_no_firmlink, MAXPATHLEN, &from_truncated_no_firmlink); @@ -8195,20 +8447,12 @@ continue_lookup: if (need_event || need_kpath2 || has_listeners) { if (to_name == NULL) { GET_PATH(to_name); - if (to_name == NULL) { - error = ENOMEM; - goto out1; - } } to_len = safe_getpath(tdvp, tond->ni_cnd.cn_nameptr, to_name, MAXPATHLEN, &to_truncated); if (to_name_no_firmlink == NULL) { GET_PATH(to_name_no_firmlink); - if (to_name_no_firmlink == NULL) { - error = ENOMEM; - goto out1; - } } to_len_no_firmlink = safe_getpath_no_firmlink(tdvp, tond->ni_cnd.cn_nameptr, to_name_no_firmlink, MAXPATHLEN, &to_truncated_no_firmlink); @@ -8234,24 +8478,6 @@ continue_lookup: goto skipped_lookup; } - if (!batched) { - error = vn_authorize_renamex_with_paths(fdvp, fvp, &fromnd->ni_cnd, from_name, tdvp, tvp, &tond->ni_cnd, to_name, ctx, flags, NULL); - if (error) { - if (error == ENOENT) { - if (retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) { - /* - * We encountered a race where after doing the namei, tvp stops - * being valid. If so, simply re-drive the rename call from the - * top. - */ - do_retry = 1; - retry_count += 1; - } - } - goto out1; - } - } - /* * If the source and destination are the same (i.e. they're * links to the same vnode) and the target file system is @@ -8268,6 +8494,7 @@ continue_lookup: */ if (VNOP_PATHCONF(fvp, _PC_CASE_SENSITIVE, &pathconf_val, ctx) != 0 || pathconf_val != 0) { + vn_authorize_skipped = TRUE; goto out1; } } @@ -8277,7 +8504,7 @@ continue_lookup: * - target must not exist * - target must reside in the same directory as source * - union mounts cannot be renamed - * - "/" cannot be renamed + * - the root fs, and tightly-linked system volumes, cannot be renamed * * XXX Handle this in VFS after a continued lookup (if we missed * in the cache to start off) @@ -8304,7 +8531,11 @@ continue_lookup: error = ENOENT; goto out1; } - vnode_put(fvp); + /* + * Save the 'fvp' as it is needed for vn_authorize_renamex_with_paths() + * later. + */ + mnt_fvp = fvp; fvp = coveredvp; mntrename = TRUE; @@ -8342,6 +8573,7 @@ continue_lookup: if (fromnd->ni_cnd.cn_namelen == tond->ni_cnd.cn_namelen && !bcmp(fromnd->ni_cnd.cn_nameptr, tond->ni_cnd.cn_nameptr, fromnd->ni_cnd.cn_namelen)) { + vn_authorize_skipped = TRUE; goto out1; } } @@ -8399,6 +8631,10 @@ continue_lookup: vnode_put(fvp); vnode_put(fdvp); + if (mnt_fvp != NULLVP) { + vnode_put(mnt_fvp); + } + mount_lock_renames(locked_mp); holding_mntlock = 1; @@ -8422,16 +8658,42 @@ continue_lookup: } } - // save these off so we can later verify that fvp is the same - oname = fvp->v_name; - oparent = fvp->v_parent; - -skipped_lookup: - error = vn_rename(fdvp, &fvp, &fromnd->ni_cnd, fvap, - tdvp, &tvp, &tond->ni_cnd, tvap, - flags, ctx); - - if (holding_mntlock) { + if (!batched) { + error = vn_authorize_renamex_with_paths(fdvp, mntrename ? mnt_fvp : fvp, + &fromnd->ni_cnd, from_name, tdvp, tvp, &tond->ni_cnd, to_name, ctx, + flags, NULL); + if (error) { + if (error == ENOENT) { + if (retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) { + /* + * We encountered a race where after doing the namei, + * tvp stops being valid. If so, simply re-drive the rename + * call from the top. + */ + do_retry = 1; + retry_count += 1; + } + } + goto out1; + } + } + + /* Release the 'mnt_fvp' now that it is no longer needed. */ + if (mnt_fvp != NULLVP) { + vnode_put(mnt_fvp); + mnt_fvp = NULLVP; + } + + // save these off so we can later verify that fvp is the same + oname = fvp->v_name; + oparent = fvp->v_parent; + +skipped_lookup: + error = vn_rename(fdvp, &fvp, &fromnd->ni_cnd, fvap, + tdvp, &tvp, &tond->ni_cnd, tvap, + flags, ctx); + + if (holding_mntlock) { /* * we can drop our serialization * lock now @@ -8594,7 +8856,7 @@ skipped_lookup: error = EBUSY; goto out1; } - MALLOC_ZONE(tobuf, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + tobuf = zalloc(ZV_NAMEI); if (UIO_SEG_IS_USER_SPACE(segflg)) { error = copyinstr(to, tobuf, MAXPATHLEN, &len); @@ -8620,12 +8882,12 @@ skipped_lookup: vfs_iterate(0, rename_submounts_callback, (void *)mp); /* append name to prefix */ - maxlen = MAXPATHLEN - (pathend - mp->mnt_vfsstat.f_mntonname); + maxlen = MAXPATHLEN - (int)(pathend - mp->mnt_vfsstat.f_mntonname); bzero(pathend, maxlen); strlcpy(pathend, mpname, maxlen); } - FREE_ZONE(tobuf, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, tobuf); vfs_unbusy(mp); @@ -8651,6 +8913,21 @@ skipped_lookup: vnode_update_identity(fvp, tdvp, tond->ni_cnd.cn_nameptr, tond->ni_cnd.cn_namelen, tond->ni_cnd.cn_hash, update_flags); } out1: + /* + * There are some cases (for e.g. 'fvp == tvp') when vn_authorize was + * skipped earlier as no actual rename was performed. + */ + if (vn_authorize_skipped && error == 0) { + error = vn_authorize_renamex_with_paths(fdvp, fvp, + &fromnd->ni_cnd, from_name, tdvp, tvp, &tond->ni_cnd, to_name, ctx, + flags, NULL); + if (error && error == ENOENT) { + if (retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) { + do_retry = 1; + retry_count += 1; + } + } + } if (to_name != NULL) { RELEASE_PATH(to_name); to_name = NULL; @@ -8696,7 +8973,9 @@ out1: } vnode_put(fdvp); } - + if (mnt_fvp != NULLVP) { + vnode_put(mnt_fvp); + } /* * If things changed after we did the namei, then we will re-drive * this rename call from the top. @@ -8706,7 +8985,7 @@ out1: goto retry; } - FREE(__rename_data, M_TEMP); + kheap_free(KHEAP_TEMP, __rename_data, sizeof(*__rename_data)); return error; } @@ -9025,7 +9304,7 @@ continue_lookup: } #if CONFIG_FSE - fse_info finfo; + fse_info finfo = {0}; need_event = need_fsevent(FSE_DELETE, dvp); if (need_event) { @@ -9045,20 +9324,12 @@ continue_lookup: if (need_event || has_listeners) { if (path == NULL) { GET_PATH(path); - if (path == NULL) { - error = ENOMEM; - goto out; - } } len_path = safe_getpath(dvp, nd.ni_cnd.cn_nameptr, path, MAXPATHLEN, &truncated_path); if (no_firmlink_path == NULL) { GET_PATH(no_firmlink_path); - if (no_firmlink_path == NULL) { - error = ENOMEM; - goto out; - } } len_no_firmlink_path = safe_getpath_no_firmlink(dvp, nd.ni_cnd.cn_nameptr, no_firmlink_path, MAXPATHLEN, &truncated_no_firmlink_path); @@ -9234,7 +9505,7 @@ vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag, uio_t auio; struct direntry *entry64; struct dirent *dep; - int bytesread; + size_t bytesread; int error; /* @@ -9255,7 +9526,7 @@ vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag, * prevent uio_resid() * 3 / 8 from overflowing. */ bufsize = 3 * MIN((user_size_t)uio_resid(uio), 87371u) / 8; - MALLOC(bufptr, void *, bufsize, M_TEMP, M_WAITOK); + bufptr = kheap_alloc(KHEAP_DATA_BUFFERS, bufsize, Z_WAITOK); if (bufptr == NULL) { return ENOMEM; } @@ -9269,15 +9540,16 @@ vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag, dep = (struct dirent *)bufptr; bytesread = bufsize - uio_resid(auio); - MALLOC(entry64, struct direntry *, sizeof(struct direntry), - M_TEMP, M_WAITOK); + entry64 = kheap_alloc(KHEAP_TEMP, sizeof(struct direntry), Z_WAITOK); /* * Convert all the entries and copy them out to user's buffer. */ while (error == 0 && (char *)dep < ((char *)bufptr + bytesread)) { - size_t enbufsize = DIRENT64_LEN(dep->d_namlen); - - if (DIRENT_END(dep) > ((char *)bufptr + bytesread) || + /* First check that the dirent struct up to d_name is within the buffer */ + if ((char*)dep + offsetof(struct dirent, d_name) > ((char *)bufptr + bytesread) || + /* Check that the length of the entire dirent is within the buffer */ + DIRENT_END(dep) > ((char *)bufptr + bytesread) || + /* Check that the actual length including the name doesn't exceed d_reclen */ DIRENT_LEN(dep->d_namlen) > dep->d_reclen) { printf("%s: %s: Bad dirent recived from directory %s\n", __func__, vp->v_mount->mnt_vfsstat.f_mntonname, @@ -9286,11 +9558,13 @@ vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag, break; } + size_t enbufsize = DIRENT64_LEN(dep->d_namlen); + bzero(entry64, enbufsize); /* Convert a dirent to a dirent64. */ entry64->d_ino = dep->d_ino; entry64->d_seekoff = 0; - entry64->d_reclen = enbufsize; + entry64->d_reclen = (uint16_t)enbufsize; entry64->d_namlen = dep->d_namlen; entry64->d_type = dep->d_type; bcopy(dep->d_name, entry64->d_name, dep->d_namlen + 1); @@ -9307,8 +9581,8 @@ vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag, uio->uio_offset = auio->uio_offset; } uio_free(auio); - FREE(bufptr, M_TEMP); - FREE(entry64, M_TEMP); + kheap_free(KHEAP_DATA_BUFFERS, bufptr, bufsize); + kheap_free(KHEAP_TEMP, entry64, sizeof(struct direntry)); return error; } } @@ -9335,7 +9609,7 @@ getdirentries_common(int fd, user_addr_t bufp, user_size_t bufsize, ssize_t *byt if (error) { return error; } - if ((fp->f_fglob->fg_flag & FREAD) == 0) { + if ((fp->fp_glob->fg_flag & FREAD) == 0) { AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1); error = EBADF; goto out; @@ -9346,7 +9620,7 @@ getdirentries_common(int fd, user_addr_t bufp, user_size_t bufsize, ssize_t *byt } #if CONFIG_MACF - error = mac_file_check_change_offset(vfs_context_ucred(&context), fp->f_fglob); + error = mac_file_check_change_offset(vfs_context_ucred(&context), fp->fp_glob); if (error) { goto out; } @@ -9371,16 +9645,16 @@ unionread: } #endif /* MAC */ - loff = fp->f_fglob->fg_offset; + loff = fp->fp_glob->fg_offset; auio = uio_createwithbuffer(1, loff, spacetype, UIO_READ, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, bufp, bufsize); if (flags & VNODE_READDIR_EXTENDED) { error = vnode_readdir64(vp, auio, flags, eofflag, &numdirent, &context); - fp->f_fglob->fg_offset = uio_offset(auio); + fp->fp_glob->fg_offset = uio_offset(auio); } else { error = VNOP_READDIR(vp, auio, 0, eofflag, &numdirent, &context); - fp->f_fglob->fg_offset = uio_offset(auio); + fp->fp_glob->fg_offset = uio_offset(auio); } if (error) { (void)vnode_put(vp); @@ -9388,23 +9662,12 @@ unionread: } if ((user_ssize_t)bufsize == uio_resid(auio)) { - if (union_dircheckp) { - error = union_dircheckp(&vp, fp, &context); - if (error == -1) { - goto unionread; - } - if (error) { - (void)vnode_put(vp); - goto out; - } - } - if ((vp->v_mount->mnt_flag & MNT_UNION)) { struct vnode *tvp = vp; if (lookup_traverse_union(tvp, &vp, &context) == 0) { vnode_ref(vp); - fp->f_fglob->fg_data = (caddr_t) vp; - fp->f_fglob->fg_offset = 0; + fp->fp_glob->fg_data = (caddr_t) vp; + fp->fp_glob->fg_offset = 0; vnode_rele(tvp); vnode_put(tvp); goto unionread; @@ -9444,7 +9707,7 @@ getdirentries(__unused struct proc *p, struct getdirentries_args *uap, int32_t * user32_long_t base = (user32_long_t)offset; error = copyout((caddr_t)&base, uap->basep, sizeof(user32_long_t)); } - *retval = bytesread; + *retval = (int)bytesread; } return error; } @@ -9633,7 +9896,7 @@ getdirentriesattr(proc_t p, struct getdirentriesattr_args *uap, int32_t *retval) uint32_t count = 0, savecount = 0; uint32_t newstate = 0; int error, eofflag; - uint32_t loff = 0; + off_t loff = 0; struct attrlist attributelist; vfs_context_t ctx = vfs_context_current(); int fd = uap->fd; @@ -9653,7 +9916,7 @@ getdirentriesattr(proc_t p, struct getdirentriesattr_args *uap, int32_t *retval) if ((error = fp_getfvp(p, fd, &fp, &vp))) { return error; } - if ((fp->f_fglob->fg_flag & FREAD) == 0) { + if ((fp->fp_glob->fg_flag & FREAD) == 0) { AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1); error = EBADF; goto out; @@ -9662,7 +9925,7 @@ getdirentriesattr(proc_t p, struct getdirentriesattr_args *uap, int32_t *retval) #if CONFIG_MACF error = mac_file_check_change_offset(vfs_context_ucred(ctx), - fp->f_fglob); + fp->fp_glob); if (error) { goto out; } @@ -9691,7 +9954,7 @@ unionread: #endif /* MAC */ /* set up the uio structure which will contain the users return buffer */ - loff = fp->f_fglob->fg_offset; + loff = fp->fp_glob->fg_offset; auio = uio_createwithbuffer(1, loff, spacetype, UIO_READ, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, uap->buffer, uap->buffersize); @@ -9711,7 +9974,7 @@ unionread: * info, so truncate before extending again */ error = VNOP_READDIRATTR(vp, &attributelist, auio, count, - (u_long)(uint32_t)uap->options, &newstate, &eofflag, &count, ctx); + (uint32_t)uap->options, &newstate, &eofflag, &count, ctx); } if (error) { @@ -9732,11 +9995,11 @@ unionread: } else { // Empty buffer struct vnode *tvp = vp; if (lookup_traverse_union(tvp, &vp, ctx) == 0) { - vnode_ref_ext(vp, fp->f_fglob->fg_flag & O_EVTONLY, 0); - fp->f_fglob->fg_data = (caddr_t) vp; - fp->f_fglob->fg_offset = 0; // reset index for new dir + vnode_ref_ext(vp, fp->fp_glob->fg_flag & O_EVTONLY, 0); + fp->fp_glob->fg_data = (caddr_t) vp; + fp->fp_glob->fg_offset = 0; // reset index for new dir count = savecount; - vnode_rele_internal(tvp, fp->f_fglob->fg_flag & O_EVTONLY, 0, 0); + vnode_rele_internal(tvp, fp->fp_glob->fg_flag & O_EVTONLY, 0, 0); vnode_put(tvp); goto unionread; } @@ -9749,7 +10012,7 @@ unionread: if (error) { goto out; } - fp->f_fglob->fg_offset = uio_offset(auio); /* should be multiple of dirent, not variable */ + fp->fp_glob->fg_offset = uio_offset(auio); /* should be multiple of dirent, not variable */ if ((error = copyout((caddr_t) &count, uap->count, sizeof(count)))) { goto out; @@ -9858,10 +10121,6 @@ exchangedata(__unused proc_t p, struct exchangedata_args *uap, __unused int32_t kauth_authorize_fileop_has_listeners()) { GET_PATH(fpath); GET_PATH(spath); - if (fpath == NULL || spath == NULL) { - error = ENOMEM; - goto out; - } flen = safe_getpath(fvp, NULL, fpath, MAXPATHLEN, &from_truncated); slen = safe_getpath(svp, NULL, spath, MAXPATHLEN, &to_truncated); @@ -9937,8 +10196,8 @@ uint32_t freespace_mb(vnode_t vp) { vfs_update_vfsstat(vp->v_mount, vfs_context_current(), VFS_USER_EVENT); - return ((uint64_t)vp->v_mount->mnt_vfsstat.f_bavail * - vp->v_mount->mnt_vfsstat.f_bsize) >> 20; + return (uint32_t)(((uint64_t)vp->v_mount->mnt_vfsstat.f_bavail * + vp->v_mount->mnt_vfsstat.f_bsize) >> 20); } #if CONFIG_SEARCHFS @@ -9960,7 +10219,7 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) uio_t auio = NULL; int spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; uint32_t nummatches; - int mallocsize; + size_t mallocsize; uint32_t nameiflags; vfs_context_t ctx = vfs_context_current(); char uio_buf[UIO_SIZEOF(1)]; @@ -10014,7 +10273,7 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) mallocsize = searchblock.sizeofsearchparams1 + searchblock.sizeofsearchparams2 + sizeof(struct attrlist) + sizeof(struct searchstate) + (2 * sizeof(uint32_t)); - MALLOC(searchparams1, void *, mallocsize, M_TEMP, M_WAITOK); + searchparams1 = kheap_alloc(KHEAP_DATA_BUFFERS, mallocsize, Z_WAITOK); /* Now set up the various pointers to the correct place in our newly allocated memory */ @@ -10180,12 +10439,12 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) searchparams1, searchparams2, &searchblock.searchattrs, - (u_long)searchblock.maxmatches, + (uint32_t)searchblock.maxmatches, &timelimit, returnattrs, &nummatches, - (u_long)uap->scriptcode, - (u_long)uap->options, + (uint32_t)uap->scriptcode, + (uint32_t)uap->options, auio, (struct searchstate *) &state->ss_fsstate, ctx); @@ -10219,7 +10478,7 @@ saveandexit: freeandexit: - FREE(searchparams1, M_TEMP); + kheap_free(KHEAP_DATA_BUFFERS, searchparams1, mallocsize); return error; } /* end of searchfs system call */ @@ -10294,6 +10553,7 @@ searchfs(__unused proc_t p, __unused struct searchfs_args *uap, __unused int32_t struct nspace_resolver_request { LIST_ENTRY(nspace_resolver_request) r_hashlink; + vnode_t r_vp; uint32_t r_req_id; int r_resolver_error; int r_flags; @@ -10454,7 +10714,7 @@ nspace_resolver_req_mark_complete( } static void -nspace_resolver_req_completed(uint32_t req_id, int resolver_error) +nspace_resolver_req_completed(uint32_t req_id, int resolver_error, uint64_t orig_gencount) { struct nspace_resolver_request *req; @@ -10466,10 +10726,59 @@ nspace_resolver_req_completed(uint32_t req_id, int resolver_error) req = nspace_resolver_req_lookup(req_id); if (req) { + mount_t locked_mp = NULL; + + locked_mp = req->r_vp->v_mount; + mount_ref(locked_mp, 0); + mount_lock_renames(locked_mp); + + // + // if the resolver isn't already returning an error and we have an + // orig_gencount, then get an iocount on the request vnode and check + // that the gencount on req->r_vp has not changed. + // + // note: a ref was taken on req->r_vp when the request was created + // and that ref will be dropped by that thread when it wakes up. + // + if (resolver_error == 0 && + orig_gencount != 0 && + vnode_getwithref(req->r_vp) == 0) { + struct vnode_attr va; + uint64_t cur_gencount; + + VATTR_INIT(&va); + VATTR_WANTED(&va, va_recursive_gencount); + + if (vnode_getattr(req->r_vp, &va, vfs_context_kernel()) == 0) { + cur_gencount = va.va_recursive_gencount; + } else { + cur_gencount = 0; + } + + if (resolver_error == 0 && cur_gencount && orig_gencount && cur_gencount != orig_gencount) { + printf("nspace.complete: gencount changed! (orig %llu cur %llu)\n", orig_gencount, cur_gencount); + + // this error will be returned to the thread that initiated the + // materialization of req->r_vp. + resolver_error = EBUSY; + + // note: we explicitly do not return an error to the caller (i.e. + // the thread that did the materialization) because they said they + // don't want one. + } + + vnode_put(req->r_vp); + } + + mount_unlock_renames(locked_mp); + mount_drop(locked_mp, 0); + nspace_resolver_req_mark_complete(req, resolver_error); } NSPACE_REQ_UNLOCK(); + + return; } static struct proc *nspace_resolver_proc; @@ -10717,7 +11026,8 @@ sysctl_nspace_complete(__unused struct sysctl_oid *oidp, __unused void *arg1, { struct proc *p = req->p; uint32_t req_status[2] = { 0, 0 }; - int error, is_resolver, changed = 0; + uint64_t gencount = 0; + int error, is_resolver, changed = 0, gencount_changed; error = nspace_resolver_get_proc_state(p, &is_resolver); if (error) { @@ -10734,6 +11044,15 @@ sysctl_nspace_complete(__unused struct sysctl_oid *oidp, __unused void *arg1, return error; } + // get the gencount if it was passed + error = sysctl_io_opaque(req, &gencount, sizeof(gencount), + &gencount_changed); + if (error) { + gencount = 0; + // we ignore the error because the gencount was optional + error = 0; + } + /* * req_status[0] is the req_id * @@ -10741,7 +11060,7 @@ sysctl_nspace_complete(__unused struct sysctl_oid *oidp, __unused void *arg1, */ if (error == 0 && changed) { nspace_resolver_req_completed(req_status[0], - (int)req_status[1]); + (int)req_status[1], gencount); } return error; } @@ -10882,11 +11201,7 @@ resolve_nspace_item_ext( return ETIMEDOUT; } - MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (path == NULL) { - error = ENOMEM; - goto out_release_port; - } + path = zalloc(ZV_NAMEI); path_len = MAXPATHLEN; error = vn_getpath(vp, path, &path_len); @@ -10896,10 +11211,17 @@ resolve_nspace_item_ext( req.r_resolver_error = 0; req.r_flags = 0; + if ((error = vnode_ref(vp)) == 0) { // take a ref so that the vnode doesn't go away + req.r_vp = vp; + } else { + goto out_release_port; + } + NSPACE_REQ_LOCK(); error = nspace_resolver_req_add(&req); NSPACE_REQ_UNLOCK(); if (error) { + vnode_rele(req.r_vp); goto out_release_port; } @@ -10917,12 +11239,13 @@ resolve_nspace_item_ext( NSPACE_REQ_LOCK(); nspace_resolver_req_remove(&req); NSPACE_REQ_UNLOCK(); + vnode_rele(req.r_vp); goto out_release_port; } // Give back the memory we allocated earlier while // we wait; we no longer need it. - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, path); path = NULL; // Request has been submitted to the resolver. @@ -10930,11 +11253,13 @@ resolve_nspace_item_ext( // Upon requrn, the request will have been removed // from the lookup table. error = nspace_resolver_req_wait(&req); + + vnode_rele(req.r_vp); } out_release_port: if (path != NULL) { - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, path); } ipc_port_release_send(mp); @@ -11009,6 +11334,122 @@ cas_bsdflags_setattr(vnode_t vp, void *arg, vfs_context_t ctx) return VNOP_IOCTL(vp, FSIOC_CAS_BSDFLAGS, arg, FWRITE, ctx); } +static int __attribute__((noinline)) +handle_sync_volume(vnode_t vp, vnode_t *arg_vp, caddr_t data, vfs_context_t ctx) +{ + struct vfs_attr vfa; + mount_t mp = vp->v_mount; + unsigned arg; + int error; + + /* record vid of vp so we can drop it below. */ + uint32_t vvid = vp->v_id; + + /* + * Then grab mount_iterref so that we can release the vnode. + * Without this, a thread may call vnode_iterate_prepare then + * get into a deadlock because we've never released the root vp + */ + error = mount_iterref(mp, 0); + if (error) { + return error; + } + vnode_put(vp); + + arg = MNT_NOWAIT; + if (*(uint32_t*)data & FSCTL_SYNC_WAIT) { + arg = MNT_WAIT; + } + + /* + * If the filessytem supports multiple filesytems in a + * partition (For eg APFS volumes in a container, it knows + * that the waitfor argument to VFS_SYNC are flags. + */ + VFSATTR_INIT(&vfa); + VFSATTR_WANTED(&vfa, f_capabilities); + if ((vfs_getattr(mp, &vfa, vfs_context_current()) == 0) && + VFSATTR_IS_SUPPORTED(&vfa, f_capabilities) && + ((vfa.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_SHARED_SPACE)) && + ((vfa.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_SHARED_SPACE))) { + arg |= MNT_VOLUME; + } + + /* issue the sync for this volume */ + (void)sync_callback(mp, &arg); + + /* + * Then release the mount_iterref once we're done syncing; it's not + * needed for the VNOP_IOCTL below + */ + mount_iterdrop(mp); + + if (arg & FSCTL_SYNC_FULLSYNC) { + /* re-obtain vnode iocount on the root vp, if possible */ + error = vnode_getwithvid(vp, vvid); + if (error == 0) { + error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx); + vnode_put(vp); + } + } + /* mark the argument VP as having been released */ + *arg_vp = NULL; + return error; +} + +#if ROUTEFS +static int __attribute__((noinline)) +handle_routes(user_addr_t udata) +{ + char routepath[MAXPATHLEN]; + size_t len = 0; + int error; + + if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) { + return error; + } + bzero(routepath, MAXPATHLEN); + error = copyinstr(udata, &routepath[0], MAXPATHLEN, &len); + if (error) { + return error; + } + error = routefs_kernel_mount(routepath); + return error; +} +#endif + +static int __attribute__((noinline)) +handle_flags(vnode_t vp, caddr_t data, vfs_context_t ctx) +{ + struct fsioc_cas_bsdflags *cas = (struct fsioc_cas_bsdflags *)data; + struct vnode_attr va; + int error; + + VATTR_INIT(&va); + VATTR_SET(&va, va_flags, cas->new_flags); + + error = chflags0(vp, &va, cas_bsdflags_setattr, cas, ctx); + return error; +} + +static int __attribute__((noinline)) +handle_auth(vnode_t vp, u_long cmd, caddr_t data, u_long options, vfs_context_t ctx) +{ + struct mount *mp = NULL; + errno_t rootauth = 0; + + mp = vp->v_mount; + + /* + * query the underlying FS and see if it reports something + * sane for this vnode. If volume is authenticated via + * chunklist, leave that for the caller to determine. + */ + rootauth = VNOP_IOCTL(vp, cmd, data, (int)options, ctx); + + return rootauth; +} + /* * Make a filesystem-specific control call: */ @@ -11040,7 +11481,7 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long memp = NULL; if (size > sizeof(stkbuf)) { - if ((memp = (caddr_t)kalloc(size)) == 0) { + if ((memp = (caddr_t)kheap_alloc(KHEAP_TEMP, size, Z_WAITOK)) == 0) { return ENOMEM; } data = memp; @@ -11053,7 +11494,7 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long error = copyin(udata, data, size); if (error) { if (memp) { - kfree(memp, size); + kheap_free(KHEAP_TEMP, memp, size); } return error; } @@ -11080,87 +11521,15 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long /* Check to see if it's a generic command */ switch (cmd) { - case FSIOC_SYNC_VOLUME: { - struct vfs_attr vfa; - mount_t mp = vp->v_mount; - unsigned arg; - - - /* record vid of vp so we can drop it below. */ - uint32_t vvid = vp->v_id; - - /* - * Then grab mount_iterref so that we can release the vnode. - * Without this, a thread may call vnode_iterate_prepare then - * get into a deadlock because we've never released the root vp - */ - error = mount_iterref(mp, 0); - if (error) { - break; - } - vnode_put(vp); - - arg = MNT_NOWAIT; - if (*(uint32_t*)data & FSCTL_SYNC_WAIT) { - arg = MNT_WAIT; - } - - /* - * If the filessytem supports multiple filesytems in a - * partition (For eg APFS volumes in a container, it knows - * that the waitfor argument to VFS_SYNC are flags. - */ - VFSATTR_INIT(&vfa); - VFSATTR_WANTED(&vfa, f_capabilities); - if ((vfs_getattr(mp, &vfa, vfs_context_current()) == 0) && - VFSATTR_IS_SUPPORTED(&vfa, f_capabilities) && - ((vfa.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_SHARED_SPACE)) && - ((vfa.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_SHARED_SPACE))) { - arg |= MNT_VOLUME; - } - - /* issue the sync for this volume */ - (void)sync_callback(mp, &arg); - - /* - * Then release the mount_iterref once we're done syncing; it's not - * needed for the VNOP_IOCTL below - */ - mount_iterdrop(mp); - - if (arg & FSCTL_SYNC_FULLSYNC) { - /* re-obtain vnode iocount on the root vp, if possible */ - error = vnode_getwithvid(vp, vvid); - if (error == 0) { - error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx); - vnode_put(vp); - } - } - /* mark the argument VP as having been released */ - *arg_vp = NULL; - } - break; + case FSIOC_SYNC_VOLUME: + error = handle_sync_volume(vp, arg_vp, data, ctx); + break; - case FSIOC_ROUTEFS_SETROUTEID: { + case FSIOC_ROUTEFS_SETROUTEID: #if ROUTEFS - char routepath[MAXPATHLEN]; - size_t len = 0; - - if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) { - break; - } - bzero(routepath, MAXPATHLEN); - error = copyinstr(udata, &routepath[0], MAXPATHLEN, &len); - if (error) { - break; - } - error = routefs_kernel_mount(routepath); - if (error) { - break; - } + error = handle_routes(udata); #endif - } - break; + break; case FSIOC_SET_PACKAGE_EXTS: { user_addr_t ext_strings; @@ -11181,7 +11550,10 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long } if (is64bit) { - ext_strings = ((user64_package_ext_info *)data)->strings; + if (sizeof(user64_addr_t) > sizeof(user_addr_t)) { + assert(((user64_package_ext_info *)data)->strings <= UINT32_MAX); + } + ext_strings = (user_addr_t)((user64_package_ext_info *)data)->strings; num_entries = ((user64_package_ext_info *)data)->num_entries; max_width = ((user64_package_ext_info *)data)->max_width; } else { @@ -11201,6 +11573,21 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long if (vp->v_mount) { mount_lock(vp->v_mount); if (data[0] != 0) { + int i; + for (i = 0; i < MFSTYPENAMELEN; i++) { + if (!data[i]) { + goto continue_copy; + } + } + /* + * Getting here means we have a user data string which has no + * NULL termination in its first MFSTYPENAMELEN bytes. + * This is bogus, let's avoid strlcpy-ing the read data and + * return an error. + */ + error = EINVAL; + goto unlock; +continue_copy: strlcpy(&vp->v_mount->fstypename_override[0], data, MFSTYPENAMELEN); vp->v_mount->mnt_kern_flag |= MNTK_TYPENAME_OVERRIDE; if (vfs_isrdonly(vp->v_mount) && strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) { @@ -11214,6 +11601,7 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long vp->v_mount->mnt_kern_flag &= ~MNTK_TYPENAME_OVERRIDE; vp->v_mount->fstypename_override[0] = '\0'; } +unlock: mount_unlock(vp->v_mount); } } @@ -11229,26 +11617,30 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long } break; - case FSIOC_CAS_BSDFLAGS: { - struct fsioc_cas_bsdflags *cas = (struct fsioc_cas_bsdflags *)data; - struct vnode_attr va; - - VATTR_INIT(&va); - VATTR_SET(&va, va_flags, cas->new_flags); - - error = chflags0(vp, &va, cas_bsdflags_setattr, cas, ctx); - } - break; + case FSIOC_CAS_BSDFLAGS: + error = handle_flags(vp, data, ctx); + break; case FSIOC_FD_ONLY_OPEN_ONCE: { + error = 0; if (vnode_usecount(vp) > 1) { - error = EBUSY; - } else { - error = 0; + vnode_lock_spin(vp); + if (vp->v_lflag & VL_HASSTREAMS) { + if (vnode_isinuse_locked(vp, 1, 1)) { + error = EBUSY; + } + } else if (vnode_usecount(vp) > 1) { + error = EBUSY; + } + vnode_unlock(vp); } } break; + case FSIOC_EVAL_ROOTAUTH: + error = handle_auth(vp, cmd, data, options, ctx); + break; + default: { /* other, known commands shouldn't be passed down here */ switch (cmd) { @@ -11275,11 +11667,12 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long case F_BARRIERFSYNC: case F_FREEZE_FS: case F_THAW_FS: + case FSIOC_KERNEL_ROOTAUTH: error = EINVAL; goto outdrop; } /* Invoke the filesystem-specific code */ - error = VNOP_IOCTL(vp, cmd, data, options, ctx); + error = VNOP_IOCTL(vp, cmd, data, (int)options, ctx); } } /* end switch stmt */ @@ -11293,7 +11686,7 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long outdrop: if (memp) { - kfree(memp, size); + kheap_free(KHEAP_TEMP, memp, size); } return error; @@ -11305,11 +11698,11 @@ fsctl(proc_t p, struct fsctl_args *uap, __unused int32_t *retval) { int error; struct nameidata nd; - u_long nameiflags; + uint32_t nameiflags; vnode_t vp = NULL; vfs_context_t ctx = vfs_context_current(); - AUDIT_ARG(cmd, uap->cmd); + AUDIT_ARG(cmd, (int)uap->cmd); AUDIT_ARG(value32, uap->options); /* Get the vnode for the file we are getting info on: */ nameiflags = 0; @@ -11361,7 +11754,7 @@ ffsctl(proc_t p, struct ffsctl_args *uap, __unused int32_t *retval) int fd = -1; AUDIT_ARG(fd, uap->fd); - AUDIT_ARG(cmd, uap->cmd); + AUDIT_ARG(cmd, (int)uap->cmd); AUDIT_ARG(value32, uap->options); /* Get the vnode for the file we are getting info on: */ @@ -11395,6 +11788,27 @@ ffsctl(proc_t p, struct ffsctl_args *uap, __unused int32_t *retval) } /* end of fsctl system call */ +#define FILESEC_ACCESS_ENTITLEMENT \ + "com.apple.private.vfs.filesec-access" + +static int +xattr_entitlement_check(const char *attrname, vfs_context_t ctx, bool setting) +{ + if (strcmp(attrname, KAUTH_FILESEC_XATTR) == 0) { + /* + * get: root and tasks with FILESEC_ACCESS_ENTITLEMENT. + * set: only tasks with FILESEC_ACCESS_ENTITLEMENT. + */ + if ((!setting && vfs_context_issuser(ctx)) || + IOTaskHasEntitlement(current_task(), + FILESEC_ACCESS_ENTITLEMENT)) { + return 0; + } + } + + return EPERM; +} + /* * Retrieve the data of an extended attribute. */ @@ -11429,11 +11843,9 @@ getxattr(proc_t p, struct getxattr_args *uap, user_ssize_t *retval) if (error != 0) { goto out; } - if (xattr_protected(attrname)) { - if (!vfs_context_issuser(ctx) || strcmp(attrname, "com.apple.system.Security") != 0) { - error = EPERM; - goto out; - } + if (xattr_protected(attrname) && + (error = xattr_entitlement_check(attrname, ctx, false)) != 0) { + goto out; } /* * the specific check for 0xffffffff is a hack to preserve @@ -11491,6 +11903,7 @@ fgetxattr(proc_t p, struct fgetxattr_args *uap, user_ssize_t *retval) { vnode_t vp; char attrname[XATTR_MAXNAMELEN + 1]; + vfs_context_t ctx = vfs_context_current(); uio_t auio = NULL; int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; size_t attrsize = 0; @@ -11513,8 +11926,8 @@ fgetxattr(proc_t p, struct fgetxattr_args *uap, user_ssize_t *retval) if (error != 0) { goto out; } - if (xattr_protected(attrname)) { - error = EPERM; + if (xattr_protected(attrname) && + (error = xattr_entitlement_check(attrname, ctx, false)) != 0) { goto out; } if (uap->value && uap->size > 0) { @@ -11566,12 +11979,16 @@ setxattr(proc_t p, struct setxattr_args *uap, int *retval) /* Otherwise return the default error from copyinstr to detect ERANGE, etc */ return error; } - if (xattr_protected(attrname)) { - return EPERM; + if (xattr_protected(attrname) && + (error = xattr_entitlement_check(attrname, ctx, true)) != 0) { + return error; } if (uap->size != 0 && uap->value == 0) { return EINVAL; } + if (uap->size > INT_MAX) { + return E2BIG; + } nameiflags = (uap->options & XATTR_NOFOLLOW) ? 0 : FOLLOW; NDINIT(&nd, LOOKUP, OP_SETXATTR, nameiflags, spacetype, uap->path, ctx); @@ -11606,14 +12023,12 @@ fsetxattr(proc_t p, struct fsetxattr_args *uap, int *retval) { vnode_t vp; char attrname[XATTR_MAXNAMELEN + 1]; + vfs_context_t ctx = vfs_context_current(); uio_t auio = NULL; int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; size_t namelen; int error; char uio_buf[UIO_SIZEOF(1)]; -#if CONFIG_FSE - vfs_context_t ctx = vfs_context_current(); -#endif if (uap->options & (XATTR_NOFOLLOW | XATTR_NOSECURITY | XATTR_NODEFAULT)) { return EINVAL; @@ -11628,12 +12043,16 @@ fsetxattr(proc_t p, struct fsetxattr_args *uap, int *retval) /* Otherwise return the default error from copyinstr to detect ERANGE, etc */ return error; } - if (xattr_protected(attrname)) { - return EPERM; + if (xattr_protected(attrname) && + (error = xattr_entitlement_check(attrname, ctx, true)) != 0) { + return error; } if (uap->size != 0 && uap->value == 0) { return EINVAL; } + if (uap->size > INT_MAX) { + return E2BIG; + } if ((error = file_vnode(uap->fd, &vp))) { return error; } @@ -11928,7 +12347,7 @@ unionget: bpflags |= BUILDPATH_NO_FIRMLINK; } bpflags |= BUILDPATH_CHECK_MOVED; - error = build_path(vp, buf, bufsize, &length, bpflags, ctx); + error = build_path(vp, buf, (int)bufsize, &length, bpflags, ctx); vnode_put(vp); if (error) { @@ -11946,26 +12365,24 @@ unionget: AUDIT_ARG(text, buf); - if (kdebug_enable) { - long dbg_parms[NUMPARMS]; - int dbg_namelen; + if (kdebug_debugid_enabled(VFS_LOOKUP) && length > 0) { + unsigned long path_words[NUMPARMS]; + size_t path_len = sizeof(path_words); - dbg_namelen = (int)sizeof(dbg_parms); + if ((size_t)length < path_len) { + memcpy((char *)path_words, buf, length); + memset((char *)path_words + length, 0, path_len - length); - if (length < dbg_namelen) { - memcpy((char *)dbg_parms, buf, length); - memset((char *)dbg_parms + length, 0, dbg_namelen - length); - - dbg_namelen = length; + path_len = length; } else { - memcpy((char *)dbg_parms, buf + (length - dbg_namelen), dbg_namelen); + memcpy((char *)path_words, buf + (length - path_len), path_len); } - kdebug_vfs_lookup(dbg_parms, dbg_namelen, (void *)vp, + kdebug_vfs_lookup(path_words, (int)path_len, vp, KDBG_VFS_LOOKUP_FLAG_LOOKUP); } - *pathlen = (user_ssize_t)length; /* may be superseded by error */ + *pathlen = length; /* may be superseded by error */ out: return error; @@ -11975,7 +12392,7 @@ out: * Obtain the full pathname of a file system object by id. */ static int -fsgetpath_extended(user_addr_t buf, int bufsize, user_addr_t user_fsid, uint64_t objid, +fsgetpath_extended(user_addr_t buf, user_size_t bufsize, user_addr_t user_fsid, uint64_t objid, uint32_t options, user_ssize_t *retval) { vfs_context_t ctx = vfs_context_current(); @@ -11998,7 +12415,7 @@ fsgetpath_extended(user_addr_t buf, int bufsize, user_addr_t user_fsid, uint64_t if (bufsize > PAGE_SIZE || bufsize <= 0) { return EINVAL; } - MALLOC(realpath, char *, bufsize, M_TEMP, M_WAITOK | M_ZERO); + realpath = kheap_alloc(KHEAP_TEMP, bufsize, Z_WAITOK | Z_ZERO); if (realpath == NULL) { return ENOMEM; } @@ -12014,9 +12431,7 @@ fsgetpath_extended(user_addr_t buf, int bufsize, user_addr_t user_fsid, uint64_t *retval = (user_ssize_t)length; /* may be superseded by error */ out: - if (realpath) { - FREE(realpath, M_TEMP); - } + kheap_free(KHEAP_TEMP, realpath, bufsize); return error; } @@ -12054,7 +12469,7 @@ munge_statfs(struct mount *mp, struct vfsstatfs *sfsp, my_size = copy_size = sizeof(sfs); bzero(&sfs, my_size); sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK; - sfs.f_type = mp->mnt_vtable->vfc_typenum; + sfs.f_type = (short)mp->mnt_vtable->vfc_typenum; sfs.f_reserved1 = (short)sfsp->f_fssubtype; sfs.f_bsize = (user64_long_t)sfsp->f_bsize; sfs.f_iosize = (user64_long_t)sfsp->f_iosize; @@ -12084,7 +12499,7 @@ munge_statfs(struct mount *mp, struct vfsstatfs *sfsp, bzero(&sfs, my_size); sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK; - sfs.f_type = mp->mnt_vtable->vfc_typenum; + sfs.f_type = (short)mp->mnt_vtable->vfc_typenum; sfs.f_reserved1 = (short)sfsp->f_fssubtype; /* @@ -12130,7 +12545,7 @@ munge_statfs(struct mount *mp, struct vfsstatfs *sfsp, sfs.f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sfsp->f_bavail, shift); #undef __SHIFT_OR_CLIP sfs.f_bsize = (user32_long_t)(sfsp->f_bsize << shift); - sfs.f_iosize = lmax(sfsp->f_iosize, sfsp->f_bsize); + sfs.f_iosize = (int)lmax(sfsp->f_iosize, sfsp->f_bsize); } else { /* filesystem is small enough to be reported honestly */ sfs.f_bsize = (user32_long_t)sfsp->f_bsize; @@ -12216,12 +12631,12 @@ munge_user32_stat(struct stat *sbp, struct user32_stat *usbp) usbp->st_gid = sbp->st_gid; usbp->st_rdev = sbp->st_rdev; #ifndef _POSIX_C_SOURCE - usbp->st_atimespec.tv_sec = sbp->st_atimespec.tv_sec; - usbp->st_atimespec.tv_nsec = sbp->st_atimespec.tv_nsec; - usbp->st_mtimespec.tv_sec = sbp->st_mtimespec.tv_sec; - usbp->st_mtimespec.tv_nsec = sbp->st_mtimespec.tv_nsec; - usbp->st_ctimespec.tv_sec = sbp->st_ctimespec.tv_sec; - usbp->st_ctimespec.tv_nsec = sbp->st_ctimespec.tv_nsec; + usbp->st_atimespec.tv_sec = (user32_time_t)sbp->st_atimespec.tv_sec; + usbp->st_atimespec.tv_nsec = (user32_long_t)sbp->st_atimespec.tv_nsec; + usbp->st_mtimespec.tv_sec = (user32_time_t)sbp->st_mtimespec.tv_sec; + usbp->st_mtimespec.tv_nsec = (user32_long_t)sbp->st_mtimespec.tv_nsec; + usbp->st_ctimespec.tv_sec = (user32_time_t)sbp->st_ctimespec.tv_sec; + usbp->st_ctimespec.tv_nsec = (user32_long_t)sbp->st_ctimespec.tv_nsec; #else usbp->st_atime = sbp->st_atime; usbp->st_atimensec = sbp->st_atimensec; @@ -12297,14 +12712,14 @@ munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp) usbp->st_gid = sbp->st_gid; usbp->st_rdev = sbp->st_rdev; #ifndef _POSIX_C_SOURCE - usbp->st_atimespec.tv_sec = sbp->st_atimespec.tv_sec; - usbp->st_atimespec.tv_nsec = sbp->st_atimespec.tv_nsec; - usbp->st_mtimespec.tv_sec = sbp->st_mtimespec.tv_sec; - usbp->st_mtimespec.tv_nsec = sbp->st_mtimespec.tv_nsec; - usbp->st_ctimespec.tv_sec = sbp->st_ctimespec.tv_sec; - usbp->st_ctimespec.tv_nsec = sbp->st_ctimespec.tv_nsec; - usbp->st_birthtimespec.tv_sec = sbp->st_birthtimespec.tv_sec; - usbp->st_birthtimespec.tv_nsec = sbp->st_birthtimespec.tv_nsec; + usbp->st_atimespec.tv_sec = (user32_time_t)sbp->st_atimespec.tv_sec; + usbp->st_atimespec.tv_nsec = (user32_long_t)sbp->st_atimespec.tv_nsec; + usbp->st_mtimespec.tv_sec = (user32_time_t)sbp->st_mtimespec.tv_sec; + usbp->st_mtimespec.tv_nsec = (user32_long_t)sbp->st_mtimespec.tv_nsec; + usbp->st_ctimespec.tv_sec = (user32_time_t)sbp->st_ctimespec.tv_sec; + usbp->st_ctimespec.tv_nsec = (user32_long_t)sbp->st_ctimespec.tv_nsec; + usbp->st_birthtimespec.tv_sec = (user32_time_t)sbp->st_birthtimespec.tv_sec; + usbp->st_birthtimespec.tv_nsec = (user32_long_t)sbp->st_birthtimespec.tv_nsec; #else usbp->st_atime = sbp->st_atime; usbp->st_atimensec = sbp->st_atimensec; @@ -12423,7 +12838,7 @@ vnode_get_snapshot(int dirfd, vnode_t *rvpp, vnode_t *sdvpp, goto out; } - MALLOC(name_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK); + name_buf = zalloc_flags(ZV_NAMEI, Z_WAITOK); error = copyinstr(name, name_buf, MAXPATHLEN, &name_len); if (error) { goto out1; @@ -12466,7 +12881,7 @@ vnode_get_snapshot(int dirfd, vnode_t *rvpp, vnode_t *sdvpp, error = namei(ndp); out1: - FREE(name_buf, M_TEMP); + zfree(ZV_NAMEI, name_buf); out: if (error) { if (*sdvpp) { @@ -12496,41 +12911,50 @@ out: * Since this requires superuser privileges, vnode_authorize calls are not * made. */ -static int +static int __attribute__((noinline)) snapshot_create(int dirfd, user_addr_t name, __unused uint32_t flags, vfs_context_t ctx) { vnode_t rvp, snapdvp; int error; - struct nameidata namend; + struct nameidata *ndp; - error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, &namend, CREATE, + ndp = kheap_alloc(KHEAP_TEMP, sizeof(*ndp), Z_WAITOK); + + error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, ndp, CREATE, OP_LINK, ctx); if (error) { - return error; + goto out; } - if (namend.ni_vp) { - vnode_put(namend.ni_vp); + if (ndp->ni_vp) { + vnode_put(ndp->ni_vp); error = EEXIST; } else { - struct vnode_attr va; + struct vnode_attr *vap; vnode_t vp = NULLVP; - VATTR_INIT(&va); - VATTR_SET(&va, va_type, VREG); - VATTR_SET(&va, va_mode, 0); + vap = kheap_alloc(KHEAP_TEMP, sizeof(*vap), Z_WAITOK); - error = vn_create(snapdvp, &vp, &namend, &va, + VATTR_INIT(vap); + VATTR_SET(vap, va_type, VREG); + VATTR_SET(vap, va_mode, 0); + + error = vn_create(snapdvp, &vp, ndp, vap, VN_CREATE_NOAUTH | VN_CREATE_NOINHERIT, 0, NULL, ctx); if (!error && vp) { vnode_put(vp); } + + kheap_free(KHEAP_TEMP, vap, sizeof(*vap)); } - nameidone(&namend); + nameidone(ndp); vnode_put(snapdvp); vnode_put(rvp); +out: + kheap_free(KHEAP_TEMP, ndp, sizeof(*ndp)); + return error; } @@ -12540,28 +12964,32 @@ snapshot_create(int dirfd, user_addr_t name, __unused uint32_t flags, * get the vnode for the unnamed snapshot directory and the snapshot and * delete the snapshot. */ -static int +static int __attribute__((noinline)) snapshot_delete(int dirfd, user_addr_t name, __unused uint32_t flags, vfs_context_t ctx) { vnode_t rvp, snapdvp; int error; - struct nameidata namend; + struct nameidata *ndp; - error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, &namend, DELETE, + ndp = kheap_alloc(KHEAP_TEMP, sizeof(*ndp), Z_WAITOK); + + error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, ndp, DELETE, OP_UNLINK, ctx); if (error) { goto out; } - error = VNOP_REMOVE(snapdvp, namend.ni_vp, &namend.ni_cnd, + error = VNOP_REMOVE(snapdvp, ndp->ni_vp, &ndp->ni_cnd, VNODE_REMOVE_SKIP_NAMESPACE_EVENT, ctx); - vnode_put(namend.ni_vp); - nameidone(&namend); + vnode_put(ndp->ni_vp); + nameidone(ndp); vnode_put(snapdvp); vnode_put(rvp); out: + kheap_free(KHEAP_TEMP, ndp, sizeof(*ndp)); + return error; } @@ -12570,7 +12998,7 @@ out: * * Marks the filesystem to revert to the given snapshot on next mount. */ -static int +static int __attribute__((noinline)) snapshot_revert(int dirfd, user_addr_t name, __unused uint32_t flags, vfs_context_t ctx) { @@ -12588,10 +13016,10 @@ snapshot_revert(int dirfd, user_addr_t name, __unused uint32_t flags, } mp = vnode_mount(rvp); - MALLOC(name_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK); + name_buf = zalloc_flags(ZV_NAMEI, Z_WAITOK); error = copyinstr(name, name_buf, MAXPATHLEN, &name_len); if (error) { - FREE(name_buf, M_TEMP); + zfree(ZV_NAMEI, name_buf); vnode_put(rvp); return error; } @@ -12599,7 +13027,7 @@ snapshot_revert(int dirfd, user_addr_t name, __unused uint32_t flags, #if CONFIG_MACF error = mac_mount_check_snapshot_revert(ctx, mp, name_buf); if (error) { - FREE(name_buf, M_TEMP); + zfree(ZV_NAMEI, name_buf); vnode_put(rvp); return error; } @@ -12612,7 +13040,7 @@ snapshot_revert(int dirfd, user_addr_t name, __unused uint32_t flags, error = mount_iterref(mp, 0); vnode_put(rvp); if (error) { - FREE(name_buf, M_TEMP); + zfree(ZV_NAMEI, name_buf); return error; } @@ -12627,7 +13055,7 @@ snapshot_revert(int dirfd, user_addr_t name, __unused uint32_t flags, error = VFS_IOCTL(mp, VFSIOC_REVERT_SNAPSHOT, (caddr_t)&revert_data, 0, ctx); mount_iterdrop(mp); - FREE(name_buf, M_TEMP); + zfree(ZV_NAMEI, name_buf); if (error) { /* If there was any error, try again using VNOP_IOCTL */ @@ -12662,7 +13090,7 @@ snapshot_revert(int dirfd, user_addr_t name, __unused uint32_t flags, * rename(2) (which has to deal with a lot more complications). It differs * slightly from rename(2) in that EEXIST is returned if the new name exists. */ -static int +static int __attribute__((noinline)) snapshot_rename(int dirfd, user_addr_t old, user_addr_t new, __unused uint32_t flags, vfs_context_t ctx) { @@ -12678,7 +13106,7 @@ snapshot_rename(int dirfd, user_addr_t old, user_addr_t new, struct nameidata to_node; } * __rename_data; - MALLOC(__rename_data, void *, sizeof(*__rename_data), M_TEMP, M_WAITOK); + __rename_data = kheap_alloc(KHEAP_TEMP, sizeof(*__rename_data), Z_WAITOK); fromnd = &__rename_data->from_node; tond = &__rename_data->to_node; @@ -12689,7 +13117,7 @@ snapshot_rename(int dirfd, user_addr_t old, user_addr_t new, } fvp = fromnd->ni_vp; - MALLOC(newname_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK); + newname_buf = zalloc_flags(ZV_NAMEI, Z_WAITOK); error = copyinstr(new, newname_buf, MAXPATHLEN, &name_len); if (error) { goto out1; @@ -12747,13 +13175,13 @@ snapshot_rename(int dirfd, user_addr_t old, user_addr_t new, out2: nameidone(tond); out1: - FREE(newname_buf, M_TEMP); + zfree(ZV_NAMEI, newname_buf); vnode_put(fvp); vnode_put(snapdvp); vnode_put(rvp); nameidone(fromnd); out: - FREE(__rename_data, M_TEMP); + kheap_free(KHEAP_TEMP, __rename_data, sizeof(*__rename_data)); return error; } @@ -12763,7 +13191,7 @@ out: * get the vnode for the unnamed snapshot directory and the snapshot and * mount the snapshot. */ -static int +static int __attribute__((noinline)) snapshot_mount(int dirfd, user_addr_t name, user_addr_t directory, __unused user_addr_t mnt_data, __unused uint32_t flags, vfs_context_t ctx) { @@ -12778,8 +13206,8 @@ snapshot_mount(int dirfd, user_addr_t name, user_addr_t directory, struct nameidata dirnd; } * __snapshot_mount_data; - MALLOC(__snapshot_mount_data, void *, sizeof(*__snapshot_mount_data), - M_TEMP, M_WAITOK); + __snapshot_mount_data = kheap_alloc(KHEAP_TEMP, + sizeof(*__snapshot_mount_data), Z_WAITOK); snapndp = &__snapshot_mount_data->snapnd; dirndp = &__snapshot_mount_data->dirnd; @@ -12836,7 +13264,8 @@ out1: vnode_put(rvp); nameidone(snapndp); out: - FREE(__snapshot_mount_data, M_TEMP); + kheap_free(KHEAP_TEMP, __snapshot_mount_data, + sizeof(*__snapshot_mount_data)); return error; } @@ -12845,7 +13274,7 @@ out: * * Marks the filesystem to root from the given snapshot on next boot. */ -static int +static int __attribute__((noinline)) snapshot_root(int dirfd, user_addr_t name, __unused uint32_t flags, vfs_context_t ctx) { @@ -12863,10 +13292,10 @@ snapshot_root(int dirfd, user_addr_t name, __unused uint32_t flags, } mp = vnode_mount(rvp); - MALLOC(name_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK); + name_buf = zalloc_flags(ZV_NAMEI, Z_WAITOK); error = copyinstr(name, name_buf, MAXPATHLEN, &name_len); if (error) { - FREE(name_buf, M_TEMP); + zfree(ZV_NAMEI, name_buf); vnode_put(rvp); return error; } @@ -12880,7 +13309,7 @@ snapshot_root(int dirfd, user_addr_t name, __unused uint32_t flags, error = mount_iterref(mp, 0); vnode_put(rvp); if (error) { - FREE(name_buf, M_TEMP); + zfree(ZV_NAMEI, name_buf); return error; } @@ -12896,7 +13325,7 @@ snapshot_root(int dirfd, user_addr_t name, __unused uint32_t flags, error = VFS_IOCTL(mp, VFSIOC_ROOT_SNAPSHOT, (caddr_t)&root_data, 0, ctx); mount_iterdrop(mp); - FREE(name_buf, M_TEMP); + zfree(ZV_NAMEI, name_buf); return error; } @@ -12920,10 +13349,10 @@ fs_snapshot(__unused proc_t p, struct fs_snapshot_args *uap, } /* - * Enforce user authorization for snapshot modification operations + * Enforce user authorization for snapshot modification operations, + * or if trying to root from snapshot. */ - if ((uap->op != SNAPSHOT_OP_MOUNT) && - (uap->op != SNAPSHOT_OP_ROOT)) { + if (uap->op != SNAPSHOT_OP_MOUNT) { vnode_t dvp = NULLVP; vnode_t devvp = NULLVP; mount_t mp; diff --git a/bsd/vfs/vfs_unicode.c b/bsd/vfs/vfs_unicode.c new file mode 100644 index 000000000..19b2d331a --- /dev/null +++ b/bsd/vfs/vfs_unicode.c @@ -0,0 +1,1137 @@ +/* + * Copyright (C) 2016-2020 Apple, Inc. All rights reserved. + * Some portions covered by other copyrights, listed below. + *--- + * Copyright (C) 2016 and later: Unicode, Inc. and others. + * License & terms of use: http://www.unicode.org/copyright.html + *--- + * Copyright (C) 1999-2015, International Business Machines + * Corporation and others. All Rights Reserved. + * + * add APPLE_OSREFERENCE_LICENSE_HEADER stuff... + */ + +#include +#include +#include +#include "vfs_unicode_data.h" +#define STATIC_UNLESS_TEST static + +enum { + /* Maximum number of UTF8 bytes from one Unicode code point (one UTF32 code unit) */ + kMaxUTF8BytesPerChar = 4 +}; + +/* local prototypes used by exported functions (and themselves exported for testing) */ +STATIC_UNLESS_TEST +int32_t utf8ToU32Code(int32_t u32char, const char** srcPtr, const char* srcLimit); +STATIC_UNLESS_TEST +int32_t normalizeOptCaseFoldU32Char(int32_t u32char, bool case_sens, + int32_t u32NormFoldBuf[kNFCSingleCharDecompMax], + uint8_t combClass[kNFCSingleCharDecompMax]); +/* local prototypes used by exported functions (not exported for separate testing) */ +static int nextBaseAndAnyMarks(const char** strP, const char *strLimit, bool case_sens, + int32_t* unorm, uint8_t* unormcc, int32_t* unormlenP, int32_t* unormstartP, + int32_t* buf, uint8_t* bufcc, int32_t* buflenP, + bool* needReorderP, bool* startP); +void doReorder(int32_t* buf, uint8_t* bufcc, int32_t buflen); +int32_t u32CharToUTF8Bytes(uint32_t u32char, uint8_t utf8Bytes[kMaxUTF8BytesPerChar]); + +/* + * utf8_normalizeOptCaseFoldGetUVersion + * + * version[0] = Unicode major version; for Unicode 6.3.0 this would be 6 + * version[1] = Unicode minor version; for Unicode 6.3.0 this would be 3 + * version[2] = Unicode patch version; for Unicode 6.3.0 this would be 0 + * version[3] = Code revision level; for any given Unicode version, this value starts + * at 0 and is incremented for each significant revision to the + * normalizeOptCaseFold functions. + */ +void +utf8_normalizeOptCaseFoldGetUVersion(unsigned char version[4]) +{ + version[0] = 13; + version[1] = 0; + version[2] = 0; + version[3] = 0; + return; +} + +/* + * utf8_normalizeOptCaseFoldAndHash + * + * str: The input UTF-8 string (need not be 0 terminated) + * str_len: The byte length of the input string (excluding any 0 terminator) + * case_sens: False for case-insensitive behavior; generates canonical caseless form. + * True for case-sensitive behavior; generates standard NFD. + * hash_func: A pointer to a hashing function to compute the hash of the + * normalized/case-folded result. buf contains buf_len bytes + * of data to be added to the hash using the caller-supplied + * context (ctx). + * hash_ctx: The context for the hash function. + * + * Returns: 0 on success, or + * EILSEQ: The input string contains illegal ASCII-range characters + * (0x00 or '/'), or is not well-formed stream-safe UTF-8, or + * contains codepoints that are non-characters or unassigned in + * the version of Unicode currently supported (Unicode 9.0). + */ + +int +utf8_normalizeOptCaseFoldAndHash(const char *str, + size_t str_len, + bool case_sens, + void (*hash_func)(void *buf, size_t buf_len, void *ctx), + void *hash_ctx) +{ + const char *strLimit = str + str_len; + + /* Data for the next pending single-char norm from input; + * This will always begin with a base char (combining class 0) + * or the first character in the string, which may no be a base */ + int32_t unorm[kNFCSingleCharDecompMax]; + uint8_t unormcc[kNFCSingleCharDecompMax]; + int32_t unormlen = 0; + int32_t unormstart = 0; + + bool start = true; + + /* main loop: + * Each input character may be normalized to a sequence of one or more characters, + * some of which may have non-zero combining class. Any sequence of characters + * with non-zero combining class resulting from one or more input characters needs + * to be accumulated in the main buffer so we can reorder as necessary before + * calling the hash function. + * + * At the beginning of the main loop: The normalization buffer and main buffer are + * both empty. + * + * Each time through the main loop we do the following: + * 1. If there are characters available in the normalization result buffer (from the + * result of normalizing a previous input character), copy the first character and + * any following characters that have non-zero combining class to the main buffer. + * 2. If there is nothing left in the normalization buffer, then loop processing + * input characters as follows: + * a) Get the next input character from UTF8, get its normalized and case-folded + * result in the normalization buffer. + * b) If the first character in the normalization buffer has combining class 0, + * break; we will handle this normalization buffer next time through the main + * loop. + * c) Else copy the current normalization buffer (which has only combining marks) + * to the main buffer, and continue with the loop processing input characters. + * 3. At this point the first character in the main buffer may or may not have + * combining class 0, but any subsequent characters (up to the the limit for + * stream safe text) will be combining characters with nonzero combining class. + * Reorder the combining marks if necessary into canonical order. + * 4. Call the hash function for each character in the main buffer. + * + */ + do { + /* Data for the buffers being built up from input */ + int32_t buf[kNCFStreamSafeBufMax]; + uint8_t bufcc[kNCFStreamSafeBufMax]; + int32_t buflen = 0; + bool needReorder = false; + int err; + + err = nextBaseAndAnyMarks(&str, strLimit, case_sens, unorm, unormcc, &unormlen, &unormstart, + buf, bufcc, &buflen, &needReorder, &start); + if (err != 0) { + return err; + } + + if (buflen > 0) { + /* Now buffer should have all of the combining marks up to the next base char. + * Normally it will also start with the last base char encountered (unless the + * UTF8 string began with a combining mark). */ + /* Now reorder combining marks if necessary. */ + if (needReorder) { + doReorder(buf, bufcc, buflen); + } + /* Now write to hash func */ + hash_func(buf, buflen * sizeof(buf[0]), hash_ctx); + } + /* OK so far, top of loop clears buffers to start refilling again */ + } while (str < strLimit || unormlen > 0); + return 0; +} + +/* + * utf8_normalizeOptCaseFoldAndCompare + * + * strA: A UTF-8 string to be compared (need not be 0 terminated) + * strA_len: The byte length of strA (excluding any 0 terminator) + * strB: The second UTF-8 string to be compared (need not be 0 terminated) + * strB_len: The byte length of strB (excluding any 0 terminator) + * case_sens: False for case-insensitive behavior; compares canonical caseless forms. + * True for case-sensitive behavior; compares standard NFD forms. + * are_equal: On success, set to true if the strings are equal, or set to false + * if they are not. + * + * Returns: 0 on success, or + * EILSEQ: One or both of the input strings contains illegal ASCII-range + * characters (0x00 or '/'), or is not well-formed stream-safe UTF-8, + * or contains codepoints that are non-characters or unassigned in + * the version of Unicode currently supported (Unicode 9.0). + * Note: The comparison may terminate early when a difference is + * detected, and may return 0 and set *are_equal=false even + * if one or both strings are invalid. + */ +enum { kNFCSingleCharDecompMaxPlusPushback = kNFCSingleCharDecompMax + 4 }; /* room for 03B9 pushback(s) */ + +int +utf8_normalizeOptCaseFoldAndCompare(const char *strA, + size_t strA_len, + const char *strB, + size_t strB_len, + bool case_sens, + bool *are_equal) +{ + const char *strALimit = strA + strA_len; + const char *strBLimit = strB + strB_len; + + /* Data for the next pending single-char norms from each input; + * These will always begin with a base char (combining class 0) + * or the first character in the string, which may not be a base */ + int32_t unormA[kNFCSingleCharDecompMaxPlusPushback], unormB[kNFCSingleCharDecompMaxPlusPushback]; + uint8_t unormAcc[kNFCSingleCharDecompMaxPlusPushback], unormBcc[kNFCSingleCharDecompMaxPlusPushback]; + int32_t unormAlen = 0, unormBlen = 0; + int32_t unormAstart = 0, unormBstart = 0; + + bool startA = true, startB = true; + + /* main loop: + * The main loop here is similar to the main loop in utf8_normalizeOptCaseFoldAndHash, + * described above. The differences are: + * - We keep a normalization buffer and main buffer for each string. + * - In the main loop, we do steps 1-3 for each string. + * - In step 4, instead of calling the hash function, we compare the two main + * buffers; if they are unequal, we return a non-equal result. + * - After the end of the main loop, if we still have data for one string but + * not the other, return a non-equal result, else return an equal result. + */ + do { + /* Data for the buffers being built up from each input */ + int32_t bufA[kNCFStreamSafeBufMax], bufB[kNCFStreamSafeBufMax]; + uint8_t bufAcc[kNCFStreamSafeBufMax], bufBcc[kNCFStreamSafeBufMax]; + int32_t bufAlen = 0, bufBlen = 0; + bool needReorderA = false, needReorderB = false; + int err; + + err = nextBaseAndAnyMarks(&strA, strALimit, case_sens, unormA, unormAcc, &unormAlen, &unormAstart, + bufA, bufAcc, &bufAlen, &needReorderA, &startA); + if (err != 0) { + return err; + } + err = nextBaseAndAnyMarks(&strB, strBLimit, case_sens, unormB, unormBcc, &unormBlen, &unormBstart, + bufB, bufBcc, &bufBlen, &needReorderB, &startB); + if (err != 0) { + return err; + } + + if (bufAlen > 0 || bufBlen > 0) { + /* Now each buffer should have all of the combining marks up to the next base char. + * Normally it will also start with the last base char encountered (unless the + * UTF8 string began with a combining mark). */ + /* Now reorder combining marks if necessary. */ + if (needReorderA) { + doReorder(bufA, bufAcc, bufAlen); + } + if (needReorderB) { + doReorder(bufB, bufBcc, bufBlen); + } + /* handle 03B9 pushback */ + int32_t idx; + if (!case_sens) { + if (bufAlen > 1 && bufA[bufAlen - 1] == 0x03B9 && unormAstart == 0) { + int32_t tailCount = 0; + while (tailCount < kNFCSingleCharDecompMaxPlusPushback - unormAlen && bufAlen > 1 && bufA[bufAlen - 1] == 0x03B9) { + tailCount++; + bufAlen--; + } + for (idx = unormAlen; idx > 0; idx--) { + unormA[idx - 1 + tailCount] = unormA[idx - 1]; + unormAcc[idx - 1 + tailCount] = unormAcc[idx - 1]; + } + for (idx = 0; idx < tailCount; idx++) { + unormA[idx] = 0x03B9; + unormAcc[idx] = 0; + } + unormAlen += tailCount; + } + if (bufBlen > 1 && bufB[bufBlen - 1] == 0x03B9 && unormBstart == 0) { + int32_t tailCount = 0; + while (tailCount < kNFCSingleCharDecompMaxPlusPushback - unormBlen && bufBlen > 1 && bufB[bufBlen - 1] == 0x03B9) { + tailCount++; + bufBlen--; + } + for (idx = unormBlen; idx > 0; idx--) { + unormB[idx - 1 + tailCount] = unormB[idx - 1]; + unormBcc[idx - 1 + tailCount] = unormBcc[idx - 1]; + } + for (idx = 0; idx < tailCount; idx++) { + unormB[idx] = 0x03B9; + unormBcc[idx] = 0; + } + unormBlen += tailCount; + } + } + /* Now compare the buffers. */ + if (bufAlen != bufBlen || memcmp(bufA, bufB, bufAlen * sizeof(bufA[0])) != 0) { + *are_equal = false; + return 0; + } + } + /* OK so far, top of loop clears buffers to start refilling again */ + } while ((strA < strALimit || unormAlen > 0) && (strB < strBLimit || unormBlen > 0)); + + *are_equal = (strA == strALimit && unormAlen == 0 && strB == strBLimit && unormBlen == 0); + return 0; +} + +/* + * utf8_normalizeOptCaseFold + * + * str: The input UTF-8 string (need not be 0 terminated) + * str_len: The byte length of the input string (excluding any 0 terminator) + * case_sens: False for case-insensitive behavior; generates canonical caseless form. + * True for case-sensitive behavior; generates standard NFD. + * ustr: A pointer to a buffer for the resulting UTF-32 string. + * ustr_size: The capacity of ustr, in UTF-32 units. + * ustr_len: Pointer to a value that will be filled in with the actual length + * in UTF-32 units of the string copied to ustr. + * + * Returns: 0 on success, or + * EILSEQ: The input string contains illegal ASCII-range characters + * (0x00 or '/'), or is not well-formed stream-safe UTF-8, or + * contains codepoints that are non-characters or unassigned in + * the version of Unicode currently supported. + * ENOMEM: ustr_size is insufficient for the resulting string. In this + * case the value returned in *ustr_len is invalid. + */ +int +utf8_normalizeOptCaseFold(const char *str, + size_t str_len, + bool case_sens, + int32_t *ustr, + int32_t ustr_size, + int32_t *ustr_len) +{ + const char *strLimit = str + str_len; + int32_t *ustrCur = ustr; + const int32_t *ustrLimit = ustr + ustr_size; + + /* Data for the next pending single-char norm from input; + * This will always begin with a base char (combining class 0) */ + int32_t unorm[kNFCSingleCharDecompMax]; + uint8_t unormcc[kNFCSingleCharDecompMax]; + int32_t unormlen = 0; + int32_t unormstart = 0; + + bool start = true; + + *ustr_len = 0; + do { + /* Data for the buffers being built up from input */ + int32_t buf[kNCFStreamSafeBufMax]; + uint8_t bufcc[kNCFStreamSafeBufMax]; + int32_t buflen = 0; + bool needReorder = false; + int err; + + err = nextBaseAndAnyMarks(&str, strLimit, case_sens, unorm, unormcc, &unormlen, &unormstart, + buf, bufcc, &buflen, &needReorder, &start); + if (err != 0) { + return err; + } + + if (buflen > 0) { + if (needReorder) { + doReorder(buf, bufcc, buflen); + } + /* Now copy to output buffer */ + int32_t idx; + if (ustrCur + buflen > ustrLimit) { + return ENOMEM; + } + for (idx = 0; idx < buflen; idx++) { + *ustrCur++ = buf[idx]; + } + } + /* OK so far, top of loop clears buffers to start refilling again */ + } while (str < strLimit || unormlen > 0); + *ustr_len = (uint32_t)(ustrCur - ustr); // XXXpjr: the explicit (uint32_t) cast wasn't present in the original code drop + return 0; +} + +/* + * utf8_normalizeOptCaseFoldToUTF8 + * (This is similar to normalizeOptCaseFold except that this has a different output + * buffer type, and adds conversion to UTF8 while copying to output buffer) + * + * str: The input UTF-8 string (need not be 0 terminated) + * str_len: The byte length of the input string (excluding any 0 terminator) + * case_sens: False for case-insensitive behavior; generates canonical caseless form. + * True for case-sensitive behavior; generates standard NFD. + * ustr: A pointer to a buffer for the resulting UTF-8 string. + * ustr_size: The capacity of ustr, in bytes. + * ustr_len: Pointer to a value that will be filled in with the actual length + * in bytes of the string copied to ustr. + * + * Returns: 0 on success, or + * EILSEQ: The input string contains illegal ASCII-range characters + * (0x00 or '/'), or is not well-formed stream-safe UTF-8, or + * contains codepoints that are non-characters or unassigned in + * the version of Unicode currently supported. + * ENOMEM: ustr_size is insufficient for the resulting string. In this + * case the value returned in *ustr_len is invalid. + */ +int +utf8_normalizeOptCaseFoldToUTF8(const char *str, + size_t str_len, + bool case_sens, + char *ustr, + size_t ustr_size, + size_t *ustr_len) +{ + const char *strLimit = str + str_len; + char *ustrCur = ustr; + const char *ustrLimit = ustr + ustr_size; + + /* Data for the next pending single-char norm from input; + * This will always begin with a base char (combining class 0) */ + int32_t unorm[kNFCSingleCharDecompMax]; + uint8_t unormcc[kNFCSingleCharDecompMax]; + int32_t unormlen = 0; + int32_t unormstart = 0; + + bool start = true; + + *ustr_len = 0; + do { + /* Data for the buffers being built up from input */ + int32_t buf[kNCFStreamSafeBufMax]; + uint8_t bufcc[kNCFStreamSafeBufMax]; + int32_t buflen = 0; + bool needReorder = false; + int err; + + err = nextBaseAndAnyMarks(&str, strLimit, case_sens, unorm, unormcc, &unormlen, &unormstart, + buf, bufcc, &buflen, &needReorder, &start); + if (err != 0) { + return err; + } + + if (buflen > 0) { + uint8_t utf8Bytes[kMaxUTF8BytesPerChar]; + int32_t *bufPtr = buf; + if (needReorder) { + doReorder(buf, bufcc, buflen); + } + /* Now copy to output buffer */ + while (buflen-- > 0) { + int32_t idx, utf8Len = u32CharToUTF8Bytes((uint32_t)*bufPtr++, utf8Bytes); + if (ustrCur + utf8Len > ustrLimit) { + return ENOMEM; + } + for (idx = 0; idx < utf8Len; idx++) { + *ustrCur++ = (char)utf8Bytes[idx]; + } + } + } + /* OK so far, top of loop clears buffers to start refilling again */ + } while (str < strLimit || unormlen > 0); + *ustr_len = ustrCur - ustr; + return 0; +} + +/* + * utf8_normalizeOptCaseFoldAndMatchSubstring + * + * strA: A UTF-8 string (need not be 0 terminated) in which to search for the + * substring specified by ustrB. + * strA_len: The byte length of strA (excluding any 0 terminator) + * ustrB: A normalized UTF-32 substring (need not be 0 terminated) to be searched + * for in the UTF-32 string resulting from converting strA to the normalized + * UTF-32 form specified by the case_sens parameter; ustrB must already be + * in that form. + * ustrB_len: The length of ustrB in UTF-32 units (excluding any 0 terminator). + * case_sens: False for case-insensitive matching; compares canonical caseless forms. + * True for case-sensitive matching; compares standard NFD forms. + * buf: Pointer to caller-supplied working memory for storing the portion of + * strA which has been converted to normalized UTF-32. + * buf_size: The size of buf. + * has_match: On success, set to true if strA (when converter to UTF-32 and normalized + * per case_sens) contains ustrB, set to false otherwise. + * + * Returns: 0 on success, or + * EILSEQ: strA contains illegal ASCII-range characters (0x00 or '/'), or is + * not well-formed stream-safe UTF-8, or contains codepoints that are + * non-characters or unassigned in the version of Unicode currently + * supported. + * Note: The search may terminate early when a match is detected, and + * may return 0 and set *has_match=true even if strA is invalid. + * ENOMEM: buf_size is insufficient. + */ +int +utf8_normalizeOptCaseFoldAndMatchSubstring(const char *strA, + size_t strA_len, + const int32_t *ustrB, + int32_t ustrB_len, + bool case_sens, + void *buf, + size_t buf_size, + bool *has_match) +{ + /* + * ustrA represents the current position in the UTF-32 normalized version of strA + * at which we want to test for a match; ustrANormEnd is the position beyond that + * which is just after the end of what has already been converted from strA to + * UTF-32 normalized form. + * Each time through the main loop: + * - The first task is to make sure we have enough of strA converted to UTF32 + * normalized form to test for match with ustrB at the current match position. + * If we don't, then convert more of strA to UTF-32 normalized form until we + * have enough to compare with ustrB. To do this, run a loop which is like the + * main loop in utf8_normalizeOptCaseFoldAndHash except that in step 4, instead of + * calling the hash function, we copy the normalized buffer to ustrANormEnd, + * advancing the latter. We keep doing this until we have enough additional + * converted to match with ustrB. + * - Then we test for match of ustrB at the current ustrA position. If there is + * a match we return; otherwise, if there is more strA to convert we advance + * ustrA and repeat the main loop, otherwise we return without a match. + */ + if (ustrB_len == 0) { /* always matches */ + *has_match = true; + return 0; + } + *has_match = false; /* initialize return value */ + if (ustrB_len > 2 * strA_len) { + /* If ustrB is clearly too long to find in strA, don't bother normalizing strA. + * A UTF-8 character of 1 byte (ASCII) will normalize to 1 UTF-32 unit. + * A UTF-8 character of 2-4 bytes will normalize to a maximum of 4 UTF-32 units. + * The maximum expansion from unnormalized UTF-8 byte length to normalized + * UTF-32 unit length is thus 2. */ + return 0; + } + + const char *strALimit = strA + strA_len; + int32_t *ustrA = (int32_t *)buf; + const int32_t *ustrALimit = ustrA + (buf_size / sizeof(int32_t)); + int32_t *ustrANormEnd = ustrA; /* how far we have already normalized in ustrA */ + + /* Data for the next pending single-char norms from each input; + * These will always begin with a base char (combining class 0) + * or the first character in the string, which may not be a base */ + int32_t unormA[kNFCSingleCharDecompMax]; + uint8_t unormAcc[kNFCSingleCharDecompMax]; + int32_t unormAlen = 0; + int32_t unormAstart = 0; + + bool startA = true; + + while (true) { + /* convert enough more of strA to normalized UTF-32 in ustrA to check for match */ + if (ustrANormEnd - ustrA < ustrB_len) { + do { + /* Data for the buffers being built up from each input */ + int32_t bufA[kNCFStreamSafeBufMax]; + uint8_t bufAcc[kNCFStreamSafeBufMax]; + int32_t bufAlen = 0; + bool needReorderA = false; + int err; + + err = nextBaseAndAnyMarks(&strA, strALimit, case_sens, unormA, unormAcc, &unormAlen, &unormAstart, + bufA, bufAcc, &bufAlen, &needReorderA, &startA); + if (err != 0) { + return err; + } + + if (bufAlen > 0) { + /* Now each buffer should have all of the combining marks up to the next base char. + * Normally it will also start with the last base char encountered (unless the + * UTF8 string began with a combining mark). */ + /* Now reorder combining marks if necessary. Should be rare, and sequences should + * usually be short when does occur => simple bubblesort should be sufficient. */ + if (needReorderA) { + doReorder(bufA, bufAcc, bufAlen); + } + /* Now copy to working buffer */ + int32_t idx; + if (ustrANormEnd + bufAlen > ustrALimit) { + return ENOMEM; + } + for (idx = 0; idx < bufAlen; idx++) { + *ustrANormEnd++ = bufA[idx]; + } + } + /* OK so far, top of loop clears buffers to start refilling again */ + } while ((ustrANormEnd - ustrA < ustrB_len) && (strA < strALimit || unormAlen > 0)); + } + + if (ustrANormEnd - ustrA < ustrB_len) { + return 0; /* not enough of strA left for match */ + } + /* check for match, return if so */ + if (memcmp(ustrA, ustrB, ustrB_len * sizeof(ustrB[0])) == 0) { + *has_match = true; + return 0; + } + ustrA++; /* advance match position */ + } +} + +/* nextBaseAndAnyMarks: + * Guts of code to get next bufferful of base character (or first char in string) + * and all trailing combining marks. + * This is called each time through the main loop of functions above, and does the + * following: + * 1. If there are characters available in the normalization result buffer (from the + * result of normalizing a previous input character), copy the first character and + * any following characters that have non-zero combining class to the main buffer. + * 2. If there is nothing left in the normalization buffer, then loop processing + * input characters as follows: + * a) Get the next input character from UTF8, get its normalized and case-folded + * result in the normalization buffer. + * b) If the first character in the normalization buffer has combining class 0, + * break; we will handle this normalization buffer next time through the main + * loop. + * c) Else copy the current normalization buffer (which has only combining marks) + * to the main buffer, and continue with the loop processing input characters. + */ + +static int +nextBaseAndAnyMarks(const char** strP, const char *strLimit, bool case_sens, + int32_t* unorm, uint8_t* unormcc, int32_t* unormlenP, int32_t* unormstartP, + int32_t* buf, uint8_t* bufcc, int32_t* buflenP, + bool* needReorderP, bool* startP) +{ + /* update buffers for str */ + if (*unormlenP > 0 && *unormstartP < *unormlenP) { + /* unorm begins with a base char; buflen should be 0 */ + *needReorderP = false; + for (*buflenP = 0; true;) { + if (*buflenP > 0 && unormcc[*unormstartP] > 0 && unormcc[*unormstartP] < bufcc[(*buflenP) - 1]) { + *needReorderP = true; + } + buf[*buflenP] = unorm[*unormstartP]; + bufcc[(*buflenP)++] = unormcc[(*unormstartP)++]; + if (*unormstartP >= *unormlenP || unormcc[*unormstartP] == 0) { + break; + } + } + } + if (*unormstartP >= *unormlenP) { + *unormstartP = *unormlenP = 0; + while (*strP < strLimit) { + int32_t idx; + uint32_t bytevalue = (uint8_t)*(*strP)++; + /* '/' is not produced by NFD decomposition from another character so we can + * check for it before normalization */ + if (bytevalue == 0 || bytevalue == 0x2F /*'/'*/) { + return EILSEQ; + } + if (bytevalue < 0x80) { + unorm[0] = (!case_sens && bytevalue >= 'A' && bytevalue <= 'Z')? bytevalue += 0x20: bytevalue; + *unormlenP = 1; + unormcc[0] = 0; + *startP = false; + break; + } else { + int32_t u32char = utf8ToU32Code(bytevalue, strP, strLimit); + if (u32char <= 0) { + return EILSEQ; + } + *unormlenP = normalizeOptCaseFoldU32Char(u32char, case_sens, unorm, unormcc); + if (*unormlenP <= 0) { + return EILSEQ; + } + if (unormcc[0] == 0 || *startP) { + *startP = false; + break; + } + } + /* the latest char decomposes to just combining sequence, add to buffer being built */ + if (*buflenP + *unormlenP > kNCFStreamSafeBufMax) { + return EILSEQ; + } + for (idx = 0; idx < *unormlenP; idx++, (*buflenP)++) { + if (*buflenP > 0 && unormcc[idx] > 0 && unormcc[idx] < bufcc[(*buflenP) - 1]) { + *needReorderP = true; + } + buf[*buflenP] = unorm[idx]; + bufcc[*buflenP] = unormcc[idx]; + } + *unormlenP = 0; + } + } + return 0; +} + +/* local prototypes used only by internal functions */ +static void swapBufCharCCWithPrevious(int32_t jdx, int32_t buf[], uint8_t bufcc[]); +static int32_t adjustCase(bool case_sens, int32_t uSeqLen, + int32_t u32NormFoldBuf[kNFCSingleCharDecompMax]); +static uint8_t getCombClassU32Char(int32_t u32char); +static int32_t decomposeHangul(int32_t u32char, int32_t u32NormFoldBuf[kNFCSingleCharDecompMax]); + +/* Reorder combining marks if necessary. Should be rare, and sequences should + * usually be short when does occur => simple bubblesort should be sufficient. */ +void +doReorder(int32_t* buf, uint8_t* bufcc, int32_t buflen) +{ + int32_t idx, jdx; + for (idx = 0; idx < buflen - 1; idx++) { + for (jdx = buflen - 1; jdx > idx; jdx--) { + if (bufcc[jdx] < bufcc[jdx - 1]) { + swapBufCharCCWithPrevious(jdx, buf, bufcc); + } + } + } +} +/* swap function for bubblesort */ +static void +swapBufCharCCWithPrevious(int32_t jdx, int32_t buf[], uint8_t bufcc[]) +{ + int32_t bufchar = buf[jdx]; + uint8_t bufccval = bufcc[jdx]; + buf[jdx] = buf[jdx - 1]; + bufcc[jdx] = bufcc[jdx - 1]; + buf[jdx - 1] = bufchar; + bufcc[jdx - 1] = bufccval; +} + +/* + * u32CharToUTF8Bytes, map a valid Unicode character (UTF32 code point) to 1..4 UTF8 bytes, + * and returns the number of UTF8 bytes. + * + * adapted from ICU macro U8_APPEND_UNSAFE (utf8.h). + */ +int32_t +u32CharToUTF8Bytes(uint32_t u32char, uint8_t utf8Bytes[kMaxUTF8BytesPerChar]) +{ + int32_t idx = 0; + if (u32char <= 0x7F) { + utf8Bytes[idx++] = (uint8_t)u32char; + } else { + if (u32char <= 0x7FF) { + utf8Bytes[idx++] = (uint8_t)((u32char >> 6) | 0xC0); + } else { + if (u32char <= 0xFFFF) { + utf8Bytes[idx++] = (uint8_t)((u32char >> 12) | 0xE0); + } else { + utf8Bytes[idx++] = (uint8_t)((u32char >> 18) | 0xF0); + utf8Bytes[idx++] = (uint8_t)(((u32char >> 12) & 0x3F) | 0x80); + } + utf8Bytes[idx++] = (uint8_t)(((u32char >> 6) & 0x3F) | 0x80); + } + utf8Bytes[idx++] = (uint8_t)((u32char & 0x3F) | 0x80); + } + return idx; +} + +/* two macros adapted from ICU's utf8.h */ +#define U8_COUNT_TRAIL_BYTES_LOC(leadByte) \ +((uint8_t)(leadByte)<0XF0 ? \ +((uint8_t)(leadByte)>=0XC0)+((uint8_t)(leadByte)>=0XE0) : \ +(uint8_t)(leadByte)<0XFE ? 3+((uint8_t)(leadByte)>=0XF8)+((uint8_t)(leadByte)>=0XFC) : 0) + +#define U8_MASK_LEAD_BYTE_LOC(leadByte, countTrailBytes) ((leadByte)&=(1<<(6-(countTrailBytes)))-1) + +/* array adapted from ICU's utf_impl.c */ +static const int32_t utf8_minLegal[4] = { 0, 0X80, 0x800, 0x10000 }; + +/* + * utf8ToU32Code, map a non-ASCII byte value plus a buffer of trail bytes to a UTF32 code point + * + * adapted from ICU macro U8_NEXT (utf8.h) and function utf8_nextCharSafeBody (utf_impl.c); + * verified to produce the same results (adusted for the difference in API signature). + * + * assumes at entry that: + * 1. a non-ASCII byte value (>= 0x80) that purports to be the beginning of a UTF8 character + * has been read, and its value is in u32char + * 2. *srcPtr points to the input buffer just after that non-ASCII byte, i.e. it purportedly + * points to the trail bytes for that UTF8 char. + * 3. srcLimit points to end of the input buffer (just after the last byte in the buffer) + * + * For a valid and complete UTF8 character, the function returns its value and advances + * *srcPtr to the first byte after the UTF8 char. Otherwise, the function returns -1 + * (and the value in *srcPtr is undefined). + * Note that while it does not map to surrogate values (generates an error for malformed + * UTF-8 that would map to values in 0xD800..0xD8FF), it does output noncharacter values + * whose low 16 bits are 0xFFFE or 0xFFFF without generating an error. + * + * equivalences used in adapted ICU code: + * UChar = uint16_t + * UChar32 = int32_t + * + * This has been validated against ICU behavior. + */ +STATIC_UNLESS_TEST +int32_t +utf8ToU32Code(int32_t u32char, const char** srcPtr, const char* srcLimit) +{ + const char* src = *srcPtr; + uint8_t pt1, pt2; + if (0xE0 < u32char && u32char <= 0xEC && src + 1 < srcLimit && (pt1 = (uint8_t)(src[0] - 0x80)) <= 0x3F && (pt2 = (uint8_t)(src[1] - 0x80)) <= 0x3F) { + /* handle U+1000..U+CFFF */ + /* no need for (u32char&0xF) because the upper bits are truncated after <<12 in the cast to (uint16_t) */ + u32char = (uint16_t)((u32char << 12) | (pt1 << 6) | pt2); + src += 2; + } else if (u32char < 0xE0 && u32char >= 0xC2 && src < srcLimit && (pt1 = (uint8_t)(src[0] - 0x80)) <= 0x3F) { + /* handle U+0080..U+07FF */ + u32char = ((u32char & 0x1F) << 6) | pt1; + src++; + } else { + /* "complicated" and error cases, adapted from ICU's utf8_nextCharSafeBody() */ + uint8_t count = U8_COUNT_TRAIL_BYTES_LOC(u32char); + if (src + count <= srcLimit) { + uint8_t trail; + + U8_MASK_LEAD_BYTE_LOC(u32char, count); + switch (count) { + /* branches 3, 2 fall through to the next one */ + case 0: /* count==0 for illegally leading trail bytes and the illegal bytes 0XFE and 0XFF */ + case 5: + case 4: /* count>=4 is always illegal: no more than 3 trail bytes in Unicode's UTF-8 */ + break; + case 3: + trail = *src++ - 0X80; + u32char = (u32char << 6) | trail; + /* u32char>=0x110 would result in code point>0x10FFFF, outside Unicode */ + if (u32char >= 0x110 || trail > 0X3F) { + break; + } + case 2: + trail = *src++ - 0X80; + u32char = (u32char << 6) | trail; + /* + * test for a surrogate D800..DFFF: + * before the last (u32char<<6), a surrogate is u32char=360..37F + */ + if (((u32char & 0xFFE0) == 0x360) || trail > 0X3F) { + break; + } + case 1: + trail = *src++ - 0X80; + u32char = (u32char << 6) | trail; + if (trail > 0X3F) { + break; + } + /* correct sequence - all trail bytes have (b7..b6)==(10) */ + if (u32char >= utf8_minLegal[count]) { + *srcPtr = src; + return u32char; + } + /* no default branch to optimize switch() - all values are covered */ + } + } + u32char = -1; + } + *srcPtr = src; + return u32char; +} + +/* + * normalizeCaseFoldU32Code, map a single UTF32 code point to its normalized result + * and the combining classes for each resulting char, or indicate it is invalid. + * + * The normalized and case-folded result might be up to 4 UTF32 characters (current + * max, could change in the future). + * + * u32char - input UTF32 code point + * case_sens - false for case insensiive => casefold, true for case sensitive => NFD only + * u32NormFoldBuf - output buffer of length kNFCSingleCharDecompMax (assume to be at least 3) + * to receive the normalize result. + * combClass - output buffer of length kNFCSingleCharDecompMax (assume to be at least 3) + * to receive the combining classes for the characters in u32NormFoldBuf. If + * the first entry has non-zero combining class, the remaining entries do too. + * + * returns -1 if input code point is invalid, 0 if the buffer length kNFCSingleCharDecompMax + * is insufficient (though it is assumed to be at least 3), else the length of the + * normalized and case-folded result (currently in the range 1..4). + * + * This has been validated against ICU behavior. + * + * This function is highly dependent on the structure of the data trie; for details on + * that structure, see comments in normalizeCaseFoldData.h + */ +STATIC_UNLESS_TEST +int32_t +normalizeOptCaseFoldU32Char(int32_t u32char, bool case_sens, + int32_t u32NormFoldBuf[kNFCSingleCharDecompMax], + uint8_t combClass[kNFCSingleCharDecompMax]) +{ + combClass[0] = 0; + /* return hi-range PUA as self, except non-characters */ + if (u32char >= kU32HiPUAStart) { + if ((u32char & 0xFFFE) == 0xFFFE) { + return -1; + } + u32NormFoldBuf[0] = u32char; + return 1; + } + /* for trie lookup, shift the range 0xE0000-0xE01FF down to be just after the range */ + /* 0 - 0x313FF; everything in between in currently invalid. */ + int32_t u32charLookup = u32char; + if (u32charLookup >= kU32LowRangeLimit) { + u32charLookup -= (kU32HiRangeStart - kU32LowRangeLimit); + if (u32charLookup < kU32LowRangeLimit || u32charLookup >= (kU32LowRangeLimit + kU32HiRangeLen)) { + return -1; /* in the large range of currently-unassigned code points */ + } + } + /* Now we have u32charLookup either in 0..0x313FF representing u32char itself, + * or in 0x31400..0x315FF representing u32char 0xE0000..0xE01FF; look it up in + * the trie that identifies unassigneds in this range, or maps others to + * decomps or combining class or just self. */ + uint16_t trieValue; + /* TrieHi */ + trieValue = nfTrieHi[u32charLookup >> kNFTrieHiShift]; + if (trieValue == kInvalidCodeFlag) { + return -1; + } + if (trieValue == 0 || (trieValue & kFlagTestMask) == kCombClassFlag) { /* return self; */ + u32NormFoldBuf[0] = u32char; + combClass[0] = trieValue & kFlagValueMask; + return 1; + } + if (trieValue == kHangulMask) { + combClass[1] = combClass[2] = 0; + return decomposeHangul(u32char, u32NormFoldBuf); + } + /* TrieMid */ + trieValue = nfTrieMid[trieValue & kNextIndexValueMask][(u32charLookup >> kNFTrieMidShift) & kNFTrieMidMask]; + if (trieValue == kInvalidCodeFlag) { + return -1; + } + if (trieValue == 0 || (trieValue & kFlagTestMask) == kCombClassFlag) { + u32NormFoldBuf[0] = u32char; + combClass[0] = trieValue & kFlagValueMask; + return adjustCase(case_sens, 1, u32NormFoldBuf); + } + if ((trieValue & kFlagTestMask) == kInvMaskFlag) { + uint16_t invalidMask = nfU16InvMasks[trieValue & kFlagValueMask]; + uint16_t testBit = (uint16_t)(1 << (u32charLookup & kNFTrieLoMask)); + if (testBit & invalidMask) { + /* invalid */ + return -1; + } else { + /* treat like trieValue == 0 above */ + u32NormFoldBuf[0] = u32char; + return adjustCase(case_sens, 1, u32NormFoldBuf);; + } + } + if (trieValue == kHangulMask) { + combClass[1] = combClass[2] = 0; + return decomposeHangul(u32char, u32NormFoldBuf); + } + /* TrieLo */ + trieValue = nfTrieLo[trieValue & kNextIndexValueMask][u32charLookup & kNFTrieLoMask]; + if (trieValue == kInvalidCodeFlag) { + return -1; + } + if (trieValue == kHangulMask) { + combClass[1] = combClass[2] = 0; + return decomposeHangul(u32char, u32NormFoldBuf); + } + if (trieValue < kToU16Seq2Mask || trieValue > kSpecialsEnd) { + if (trieValue == 0 || (trieValue & kFlagTestMask) == kCombClassFlag) { + u32NormFoldBuf[0] = u32char; + combClass[0] = trieValue & kFlagValueMask; + } else { + u32NormFoldBuf[0] = trieValue; + } + return adjustCase(case_sens, 1, u32NormFoldBuf);; + } + const uint16_t* u16SeqPtr = NULL; + const int32_t* u32SeqPtr = NULL; + int32_t uSeqLen = 0; + switch (trieValue & kSpecialsMask) { + case kToU16Seq2Mask: + if (case_sens && (trieValue & kToSeqCaseFoldMask)) { + /* don't use the mapping, it is only for case folding */ + u32NormFoldBuf[0] = u32char; + /* already have combClass[0] = 0 */ + return 1; + } + u16SeqPtr = nfU16Seq2[trieValue & kToSeqIndexMask]; + uSeqLen = 2; + break; + case kToU16Seq3Mask: + if (case_sens && (trieValue & kToSeqCaseFoldMask)) { + /* don't use the mapping, it is only for case folding */ + u32NormFoldBuf[0] = u32char; + /* already have combClass[0] = 0 */ + return 1; + } + u16SeqPtr = nfU16Seq3[trieValue & kToSeqIndexMask]; + uSeqLen = 3; + break; + case kToU16SeqMiscMask: + u16SeqPtr = &nfU16SeqMisc[trieValue & kToSeqMiscIndexMask]; + uSeqLen = *u16SeqPtr & kToSeqMiscLenMask; + combClass[0] = (uint8_t)(*u16SeqPtr++ >> kToSeqMiscCCShift); + break; + case kToU32CharMask: + if (case_sens && (trieValue & kToSeqCaseFoldMask)) { + /* don't use the mapping, it is only for case folding */ + u32NormFoldBuf[0] = u32char; + /* already have combClass[0] = 0 */ + return 1; + } + u32SeqPtr = &nfU32Char[trieValue & kToSeqIndexMask]; + uSeqLen = 1; + break; + case kToU32SeqMiscMask: + u32SeqPtr = &nfU32SeqMisc[trieValue & kToSeqMiscIndexMask]; + uSeqLen = *u32SeqPtr & kToSeqMiscLenMask; + combClass[0] = (uint8_t)(*u32SeqPtr++ >> kToSeqMiscCCShift); + break; + default: + return -1; + } + if (kNFCSingleCharDecompMax < uSeqLen) { + return 0; + } + int32_t idx; + for (idx = 0; idx < uSeqLen; idx++) { + u32NormFoldBuf[idx] = (u16SeqPtr)? *u16SeqPtr++: *u32SeqPtr++; + if (idx > 0) { + combClass[idx] = getCombClassU32Char(u32NormFoldBuf[idx]); + } + } + return adjustCase(case_sens, uSeqLen, u32NormFoldBuf); +} + +/* + * adjustCase, final adjustments to normalizeOptCaseFoldU32Char for case folding + * + * case_sens - false for case insensiive => casefold, true for case sensitive => NFD only + * uSeqLen - length of the sequence specified in the u32NormFoldBuf + * u32NormFoldBuf - buffer of length kNFCSingleCharDecompMax (assume to be at least 3) + * with normalized result. + * + * returns uSeqLen if input code point is invalid, 0 if the buffer length kNFCSingleCharDecompMax + * is insufficient (though it is assumed to be at least 3), else the length of the + * normalized and case-folded result (currently in the range 1..4). + * + * This function is a reduced version of normalizeOptCaseFoldU32Char above. + */ + +static int32_t +adjustCase(bool case_sens, int32_t uSeqLen, + int32_t u32NormFoldBuf[kNFCSingleCharDecompMax]) +{ + if (!case_sens && uSeqLen > 0) { + if (u32NormFoldBuf[0] < kSimpleCaseFoldLimit) { + u32NormFoldBuf[0] = nfBasicCF[u32NormFoldBuf[0]]; + /* There is one case in which this maps to a character with different combining + * class: U+0345 (cc 240) casefolds to U+03B9 (cc 0). However when this is the + * first or only character in the sequence, we want to keep the original + * combining class, so nothing special to do here. + */ + } + /* The following is the only case where we have a casefolding after the first + * character in the sequence. Don't worry about combining class here. that gets + * set later for characters after the first. + */ + if (uSeqLen > 1 && u32NormFoldBuf[uSeqLen - 1] == 0x0345) { + u32NormFoldBuf[uSeqLen - 1] = 0x03B9; + } + } + return uSeqLen; +} + +/* + * getCombClassU32Char, map a single character (in UTF32 form) to its combining class. + * + * u32char - input UTF32 code point. This is assumed to be a valid character that does + * not have a decomposition. + * + * returns combining class of the character. + * + * This is only called for characters after the first is a decomposition expansion. In + * this situation, if we encounter U+03B9 (combining class 0), it is only there as the + * case-folding of U+0345 (combining class 240). In this case it is the combining class + * for U+0345 that we want. In the non-casefold case we won't see U+03B9 here at all. + * + * This function is a reduced version of normalizeOptCaseFoldU32Char above. + */ +static uint8_t +getCombClassU32Char(int32_t u32char) +{ + if (u32char >= kU32HiPUAStart) { + return 0; + } + if (u32char == 0x03B9) { + return 240; + } + /* for trie lookup, shift the range 0xE0000-0xE01FF down to be just after the range */ + /* 0 - 0x313FF; everything in between in currently invalid. */ + int32_t u32charLookup = u32char; + if (u32charLookup >= kU32LowRangeLimit) { + u32charLookup -= (kU32HiRangeStart - kU32LowRangeLimit); + } + /* Now we have u32charLookup either in 0..0x313FF representing u32char itself, + * or in 0x31400..0x315FF representing u32char 0xE0000..0xE01FF; look it up in + * the trie that identifies unassigneds in this range, or maps others to + * decomps or combining class or just self. */ + uint16_t trieValue; + /* TrieHi */ + trieValue = nfTrieHi[u32charLookup >> kNFTrieHiShift]; + if (trieValue == 0 || (trieValue & kFlagTestMask) == kCombClassFlag) { + return trieValue & kFlagValueMask; + } + /* TrieMid */ + trieValue = nfTrieMid[trieValue & kNextIndexValueMask][(u32charLookup >> kNFTrieMidShift) & kNFTrieMidMask]; + if (trieValue == 0 || (trieValue & kFlagTestMask) == kCombClassFlag) { /* return self; */ + return trieValue & kFlagValueMask; + } + if ((trieValue & kFlagTestMask) == kInvMaskFlag) { + return 0; + } + /* TrieLo */ + trieValue = nfTrieLo[trieValue & kNextIndexValueMask][u32charLookup & kNFTrieMidMask]; + return ((trieValue & kFlagTestMask) == kCombClassFlag)? (trieValue & kFlagValueMask): 0; +} + +/* + * decomposeHangul, map a single UTF32 code point for a composed Hangul + * in the range AC00-D7A3, using algorithmic decomp + * + * The normalized result will be 2 or 3 UTF32 characters. + * + * u32char - input UTF32 code point + * u32NormFoldBuf - output buffer of length kNFCSingleCharDecompMax (assume to be at least 3) + * to receive the normalize result. + * + * returns the length of the normalized result (2..3). + * + * Adapted from ICU Hangul:decompose in normalizer2impl.h + * + */ + +enum { + HANGUL_BASE=0xAC00, + JAMO_L_BASE=0x1100, /* "lead" jamo */ + JAMO_V_BASE=0x1161, /* "vowel" jamo */ + JAMO_T_BASE=0x11A7, /* "trail" jamo */ + JAMO_L_COUNT=19, + JAMO_V_COUNT=21, + JAMO_T_COUNT=28, +}; + +static int32_t +decomposeHangul(int32_t u32char, int32_t u32NormFoldBuf[kNFCSingleCharDecompMax]) +{ + u32char -= HANGUL_BASE; + int32_t tIndex = u32char % JAMO_T_COUNT; + u32char /= JAMO_T_COUNT; + u32NormFoldBuf[0] = (uint16_t)(JAMO_L_BASE + u32char / JAMO_V_COUNT); + u32NormFoldBuf[1] = (uint16_t)(JAMO_V_BASE + u32char % JAMO_V_COUNT); + if (tIndex == 0) { + return 2; + } + u32NormFoldBuf[2] = (uint16_t)(JAMO_T_BASE + tIndex); + return 3; +} diff --git a/bsd/vfs/vfs_unicode_data.h b/bsd/vfs/vfs_unicode_data.h new file mode 100644 index 000000000..ce467df14 --- /dev/null +++ b/bsd/vfs/vfs_unicode_data.h @@ -0,0 +1,1255 @@ +/* + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +#ifndef vfs_unicode_data_h +#define vfs_unicode_data_h + +/* + * Trie structure + * + * The trie (with associated code in normalizeCaseFoldU32Code) provides a way to + * map a single Unicode code point either to: + * - a flag indicating that it is invalid (either a noncharacter, or unassigned in + * the currently-supported version of Unicode); else + * - its NFD-normalized form if it has one - which might be a sequence + * of up to 4 code points - along with the combining class for the first of those + * code points; else + * - its case-folded form in cases where NFD normalization has no effect; else + * - its combining class (a number in the range 0..254) if non-zero; else + * - an indication that none of the above apply. + * + * The trie takes advantage of certain aspects of the layout of Unicode code point space, + * which is divided up as follows (check the ranges and numeric values below for each + * new Unicode version): + * + * 0000-313FF Mix of assigned characters and non-characters; notable subranges: + * 0000-007F ASCII range, characters in this range are never looked up in the trie. + * - They have no decompositions, and all have combining class 0. + * Some have case foldings. + * AC00-D7A3 Hangul syllables, which have algorithmic decompositions (not looked up + * in the trie) to conjoining jamo in the range 1100-11C2, and have + * combining class 0 (and no case folding!). None of these are the result + * of any decomposition or folding. + * D7A4-D7FF Mix of invalid and Hangul jamo. None of these have decompositions, + * foldings or nonzero combining class, and none of these are the result + * of any decomposition or folding. + * D800-DFFF Surrogate code points. These are not valid code characters and will not + * be produced by conversion from UTF-8. + * E000-F8FF Private use characters. These have no decompositions or case foldings, + * and all have combining class 0. None of these are the result of any + * decomposition or folding. + * + * 31400-DFFFF Invalid + * E0000-E01FF Mix of assigned characters and non-characters + * E0200-EFFFF Invalid + * F0000-10FFFF Private use (no decomps or case foldings, combining class 0) except that + * FFFFE-FFFFF and 10FFFE-10FFFF are invalid. + * + * The ranges 31400-DFFFF and E0200-10FFFF are handled by code. + * The range E0000-E01FF is handled for trie lookup by shifting it down to follow the + * range 0000-313FF. Thus the trie handles data lookup for "lookup codepoints" in the + * range 0000-315FF (18 bits, instead of the full 21-bit range for Unicode codepoints), + * with the lookup codepoints 31400-315FF corresponding to real code points E0000-E01FF. + * + * Note that no decomposition or folding produces a value in the range AC00-F8FF, and + * no code point in AC00-F8FF has a non-algorithmic decomposition, a case folding, or + * a non-zero combining class. Thus the trie can use values in this range for special + * purposes. + * + * For case folding we use a simple table of the single-character foldings up through + * code point 04FF. This is used for both folding of the first char in sequences that + * result from NFD decompositions, as well as folding oc characters that don't change + * with NFD decomposition and have simple case-foldings to another single character. + * For other case-folding situations we use a flag in the normal to enable additional + * mappings for case folding. + * + * The trie is divided into 3 stages. The first (nfTrieHi) maps the high-order bits + * (bits 8 and up) of the lookup codepoint to a one of the following trie values or + * value ranges which specifies certain behavior for the range of lookup codepoint + * with those high-order bits: + * 0: => every lookup codepoint in the range corresponds to a Unicode + * codepoint that is valid and has no decomposition, case folding, + * or non-zero combining class. + * kInvalidCodeFlag: => every lookup codepoint in the range corresponds to a Unicode + * codepoint that is invalid for the currently-supported version of + * Unicode. + * if neither of those, then check trie lookup value & kFlagTestMask for the following: + * kHangulMask: => every lookup codepoint in the range corresponds to a Unicode + * codepoint that is a composed Hangul syllable with an algorithmic + * decomposition. but no case folding or non-zero combining class + * kCombClassFlag: => every lookup codepoint in the range corresponds to a Unicode + * codepoint that has a non-zero combining class, which is specified + * in the low-order 8 bits of the trie value. + * Otherwise the high-order 4 bits of the trie value match kNextIndexMask, and the + * low-order 12 bits specify an index into the middle trie stage nfTrieMid. + * + * Each entry in the second or middle trie stage (nfTrieMid) maps (for a particular + * combination of high bits) the middle-order bits (bits 4-7) to one of the following + * trie values or value ranges which specifies certain behavior for the range of lookup + * codepoint with the specified high-order and middle-order bits: + * 0: (as for the high-order stage above) + * kInvalidCodeFlag: (as for the high-order stage above) + * if neither of those, then check trie lookup value & kFlagTestMask for the following: + * kHangulMask: (as for the high-order stage above) + * kCombClassFlag: (as for the high-order stage above) + * kInvMaskFlag: => every lookup codepoint in the range corresponds to a Unicode + * codepoint that should be treated having the trie value either 0 + * (no special handling) or kInvalidCodeFlag (invalid). The low-order + * 8 bits of the trie value specifies an index into nfU16InvMasks, + * which provides a bitmask indicating which of the 16 codepoints in + * the range should be treated as having trie value kInvalidCodeFlag. + * Otherwise the high-order 4 bits of the trie value match kNextIndexMask, and the + * low-order 12 bits specify an index into the low trie stage nfTrieLo. + * + * Each entry in the third or low trie stage (nfTrieLo) maps (for a particular + * combination of bits 4 and higher) the low-order bits (bits 0-3) to one of the following + * trie values which specifies certain behavior for the specific lookup codepoint with the + * specified high-, middle-, and low-order bits: + * 0: (as for the high-order stage above) + * kInvalidCodeFlag: (as for the high-order stage above) + * if neither of those, then check trie lookup value & kFlagTestMask for the following: + * kHangulMask: (as for the high-order stage above) + * kCombClassFlag: (as for the high-order stage above) + * Otherwise the high-order 4 bits of the trie value match one of the following masks, + * which determine the result: + * kToU16Seq2Mask: => decomposition/folding to a sequence of 2 BMP codepoints; the + * first has combining class 0. The low order bits of the trie value + * are an index into nfU16Seq2 which provides the sequence. + * kToU16Seq3Mask: => decomposition/folding to a sequence of 3 BMP codepoints; the + * first has combining class 0. The low order bits of the trie value + * are an index into nfU16Seq3 which provides the sequence. + * kToU16SeqMiscMask: => mapping to a BMP sequence of length up to 4 in which the first + * character may have a nonzero combining class. The low order bits + * of the trie value are an index into nfU16SeqMisc; the element + * at that index has the sequence length in the low 4 bits and the + * combining class of the first element in the bits 4 and up; the + * following elements are the UNicode codepoints of the sequence. + * kToU32CharMask: => decomposition/folding to a single non-BMP codepoint, with + * combining class 0. The low order bits of the trie value are an + * index into nfU32Char which provides the result character. + * kToU32SeqMiscMask: => mapping to a non-BMP sequence of length up to 4 in which the + * first character may have a nonzero combining class. The low order + * bits of the trie value are an index into nfU32SeqMisc; the element + * at that index has the sequence length in the low 4 bits and the + * combining class of the first element in the bits 4 and up; the + * following elements are the UNicode codepoints of the sequence. + * + */ + +enum { + kU32LowRangeLimit = 0x31400, + kU32HiRangeStart = 0xE0000, + kU32HiRangeLen = 0x200, + kU32HiPUAStart = 0xF0000, + kNFTrieHiShift = 8, + kNFTrieMidShift = 4, + kNFTrieMidMask = 0x0F, + kNFTrieLoMask = 0x0F, + /* for hi/mid trie stages only: */ + kNextIndexMask = 0xC000, + kNextIndexValueMask = 0x0FFF, + /* for any trie stage */ + kInvalidCodeFlag = 0xFFFF, + kFlagTestMask = 0xFF00, /* to test for flag */ + kFlagValueMask = 0x00FF, + kHangulMask = 0xAC00, /* flag to use algorithmic Hangul decomp */ + kCombClassFlag = 0xAD00, /* low 8 bits will be comb class */ + /* for mid trie stage only */ + kInvMaskFlag = 0xAE00, /* low 8 bits will be index into invalid mask table */ + /* for lo trie stage only */ + kSpecialsStart = 0xAC00, + kSpecialsMask = 0xF000, + kToU16Seq2Mask = 0xB000, /*to BFFF low 11 bits are index (max 758, 10 bits) in table of 2-element u16 sequences; 12th bit is casefold flag */ + kToU16Seq3Mask = 0xC000, /*to CFFF low 11 bits are index (max 220, 8 bits) in table of 3-element u16 sequences; 12th bit is casefold flag */ + kToU16SeqMiscMask = 0xD000, /*to D6FF low 10 bits are offset (max 200, 8 bits) in table of misc u16 sequences (with length/cc in prefix) */ + kToU32CharMask = 0xE000, /*to EFFF low 11 bits are index (max 305, 9 bits) in table of u32 chars; 12th bit is casefold flag */ + kToU32SeqMiscMask = 0xF000, /*to F8FF low 7 bits are offset (max 84, 7 bits) in table of misc u32 sequences (with length/cc in prefix) */ + kSpecialsEnd = 0xF8FF, + + kToSeqIndexMask = 0x07FF, + kToSeqCaseFoldMask = 0x0800, + kToSeqMiscIndexMask = 0x03FF, + kToSeqMiscLenMask = 0x000F, + kToSeqMiscCCShift = 4, + kNFCSingleCharDecompMax = 4, /* assumed to be at least 3 by code */ + kSimpleCaseFoldLimit = 0x0500, /* characters less than this mostly handled by simple casefold table */ +}; + +/* Start generated data. */ +/* hiCount: 790, nfTrieHi size 1580 */ +/* midCount: 140, nfTrieMid size 4480 */ +/* loCount: 424, nfTrieLo size 13568 */ +/* u16InvMasksIndex: 128, nfU16InvMasks size 256 */ +/* u16Seq2Index: 773, nfU16Seq2 size 3092 */ +/* u16Seq3Index: 222, nfU16Seq3 size 1332 */ +/* u16SeqMiscOffset: 198, nfU16SeqMisc size 396 */ +/* u32CharIndex: 887, nfU32Char size 3548 */ +/* u32SeqMiscOffset: 87, nfU32SeqMisc size 348 */ +/* basicCFCount: 1280, nfBasicCF size 2560 */ + +static uint16_t nfTrieHi[790] = { + /*begidx: uchar */ + /* 0x000: 0x0000 */ 0xC000, 0xC001, 0xC002, 0xC003, 0xC004, 0xC005, 0xC006, 0xC007, 0xC008, 0xC009, 0xC00A, 0xC00B, 0xC00C, 0xC00D, 0xC00E, 0xC00F, + /* 0x010: 0x1000 */ 0xC010, 0x0000, 0xC011, 0xC012, 0x0000, 0x0000, 0xC013, 0xC014, 0xC015, 0xC016, 0xC017, 0xC018, 0xC019, 0xC01A, 0xC01B, 0xC01C, + /* 0x020: 0x2000 */ 0xC01D, 0xC01E, 0xC01F, 0xC020, 0xC021, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC022, 0xC023, 0xC024, 0xC025, 0xC026, 0xC027, + /* 0x030: 0x3000 */ 0xC028, 0xC029, 0xC02A, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x040: 0x4000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x050: 0x5000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x060: 0x6000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x070: 0x7000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x080: 0x8000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x090: 0x9000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC02B, + /* 0x0A0: 0xA000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0xC02C, 0x0000, 0xC02D, 0xC02E, 0xC02F, 0xC030, 0xC031, 0xC032, 0xAC00, 0xAC00, 0xAC00, 0xAC00, + /* 0x0B0: 0xB000 */ 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, + /* 0x0C0: 0xC000 */ 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, + /* 0x0D0: 0xD000 */ 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xC033, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x0E0: 0xE000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x0F0: 0xF000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC034, 0xC035, 0xC036, 0x0000, 0xC037, 0xC038, 0xC039, + /* 0x100:0x10000 */ 0xC03A, 0xC03B, 0xC03C, 0xC03D, 0xC03E, 0xC03F, 0x0000, 0xC040, 0xC041, 0xC042, 0xC043, 0xC044, 0xC045, 0xC046, 0xC047, 0xC048, + /* 0x110:0x11000 */ 0xC049, 0xC04A, 0xC04B, 0xC04C, 0xC04D, 0xC04E, 0xC04F, 0xC050, 0xC051, 0xC052, 0xC053, 0xFFFF, 0xC054, 0xC055, 0xC056, 0xC057, + /* 0x120:0x12000 */ 0x0000, 0x0000, 0x0000, 0xC058, 0xC059, 0xC05A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x130:0x13000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0xC05B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x140:0x14000 */ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xC05C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x150:0x15000 */ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x160:0x16000 */ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xC05D, 0xC05E, 0xFFFF, 0xFFFF, 0xC05F, 0xC060, + /* 0x170:0x17000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x180:0x18000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC061, 0x0000, 0x0000, 0x0000, 0x0000, 0xC062, 0xC063, 0xFFFF, 0xFFFF, + /* 0x190:0x19000 */ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x1A0:0x1A000 */ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x1B0:0x1B000 */ 0x0000, 0xC064, 0xC065, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC066, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x1C0:0x1C000 */ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x1D0:0x1D000 */ 0xC067, 0xC068, 0xC069, 0xC06A, 0xC06B, 0xC06C, 0xC06D, 0xC06E, 0x0000, 0x0000, 0xC06F, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x1E0:0x1E000 */ 0xC070, 0xC071, 0xC072, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC073, 0xC074, 0xFFFF, 0xFFFF, 0xC075, 0xC076, 0xC077, 0xFFFF, + /* 0x1F0:0x1F000 */ 0xC078, 0xC079, 0xC07A, 0x0000, 0x0000, 0x0000, 0xC07B, 0xC07C, 0xC07D, 0xC07E, 0xC07F, 0xC080, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x200:0x20000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x210:0x21000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x220:0x22000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x230:0x23000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x240:0x24000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x250:0x25000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x260:0x26000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x270:0x27000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x280:0x28000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x290:0x29000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x2A0:0x2A000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC081, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x2B0:0x2B000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC082, 0xC083, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x2C0:0x2C000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC084, 0x0000, + /* 0x2D0:0x2D000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x2E0:0x2E000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC085, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x2F0:0x2F000 */ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC086, 0xC087, 0xC088, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + /* 0x300:0x30000 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0x310:0x31000 */ 0x0000, 0x0000, 0x0000, 0xC089, 0xC08A, 0xC08B +}; +static uint16_t nfTrieMid[140][16] = { + /* 0x000 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC000, 0xC001, 0xC002, 0xC003 }, + /* 0x001 */ { 0xC004, 0xC005, 0xC006, 0xC007, 0xC008, 0xC009, 0xC00A, 0xC00B, 0x0000, 0x0000, 0xC00C, 0xC00D, 0xC00E, 0xC00F, 0xC010, 0xC011 }, + /* 0x002 */ { 0xC012, 0xC013, 0xC014, 0xC015, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x003 */ { 0xADE6, 0xC016, 0xC017, 0xC018, 0xC019, 0xC01A, 0xC01B, 0xC01C, 0xC01D, 0xC01E, 0xC01F, 0xC020, 0xC021, 0xC022, 0x0000, 0x0000 }, + /* 0x004 */ { 0xC023, 0xC024, 0x0000, 0xC025, 0x0000, 0xC026, 0x0000, 0xC027, 0xC028, 0x0000, 0x0000, 0x0000, 0xC029, 0xC02A, 0xC02B, 0xC02C }, + /* 0x005 */ { 0xC02D, 0xC02E, 0xC02F, 0xC030, 0xC031, 0xC032, 0x0000, 0x0000, 0xC033, 0xC034, 0xC035, 0xC036, 0xC037, 0x0000, 0xAE00, 0xAE01 }, + /* 0x006 */ { 0x0000, 0xC038, 0xC039, 0x0000, 0xC03A, 0xC03B, 0x0000, 0xC03C, 0x0000, 0x0000, 0x0000, 0x0000, 0xC03D, 0xC03E, 0xC03F, 0x0000 }, + /* 0x007 */ { 0xAE02, 0xC040, 0x0000, 0xC041, 0xC042, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE03, 0x0000, 0x0000, 0xC043, 0xC044 }, + /* 0x008 */ { 0x0000, 0xC045, 0xC046, 0xAE04, 0x0000, 0xC047, 0xAE05, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0xAE06, 0xAE07, 0xC048, 0xC049, 0xC04A }, + /* 0x009 */ { 0x0000, 0x0000, 0xC04B, 0xC04C, 0xC04D, 0xC04E, 0x0000, 0x0000, 0xAE08, 0xAE09, 0xAE0A, 0xC04F, 0xC050, 0xC051, 0xAE0B, 0xC052 }, + /* 0x00A */ { 0xAE0C, 0xAE09, 0xAE0A, 0xC053, 0xC054, 0xC055, 0xAE0D, 0xAE0E, 0xAE0F, 0xAE10, 0xAE0A, 0xC056, 0xC057, 0xAE11, 0xAE0B, 0xAE12 }, + /* 0x00B */ { 0xAE13, 0xAE09, 0xAE0A, 0xC058, 0xC059, 0xC05A, 0xAE0B, 0xAE07, 0xAE14, 0xC05B, 0xAE15, 0xAE16, 0xC05C, 0xAE17, 0xAE0D, 0xAE05 }, + /* 0x00C */ { 0xAE18, 0xAE19, 0xAE0A, 0xAE1A, 0xC05D, 0xC05E, 0xAE0B, 0xAE1B, 0xAE18, 0xAE19, 0xAE0A, 0xC05F, 0xC060, 0xAE1C, 0xAE0B, 0xAE1D }, + /* 0x00D */ { 0xAE18, 0xAE19, 0x0000, 0xC061, 0xC062, 0xAE1E, 0xAE0B, 0x0000, 0xAE1F, 0xAE20, 0x0000, 0xAE21, 0xC063, 0xC064, 0xAE0D, 0xAE22 }, + /* 0x00E */ { 0xAE23, 0x0000, 0x0000, 0xC065, 0xC066, 0xAE24, 0xFFFF, 0xFFFF, 0xAE25, 0x0000, 0xAE26, 0xC067, 0xC068, 0xAE27, 0xFFFF, 0xFFFF }, + /* 0x00F */ { 0x0000, 0xC069, 0x0000, 0xC06A, 0xC06B, 0xC06C, 0xC06D, 0xC06E, 0xC06F, 0xC070, 0xC071, 0xC072, 0xC073, 0xAE05, 0xFFFF, 0xFFFF }, + /* 0x010 */ { 0x0000, 0x0000, 0xC074, 0xC075, 0x0000, 0x0000, 0x0000, 0x0000, 0xC076, 0x0000, 0xC077, 0xC078, 0xC079, 0x0000, 0x0000, 0x0000 }, + /* 0x011 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xAE28, 0xAE29, 0x0000, 0x0000, 0xAE28, 0x0000, 0x0000, 0xAE2A, 0xAE2B, 0xAE2C, 0x0000, 0x0000 }, + /* 0x012 */ { 0x0000, 0xAE2B, 0x0000, 0x0000, 0x0000, 0xC07A, 0x0000, 0xAE2D, 0x0000, 0xAE2E, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC07B }, + /* 0x013 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE2D, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE2F }, + /* 0x014 */ { 0xAE18, 0xC07C, 0x0000, 0xC07D, 0x0000, 0xAE30, 0xAE18, 0xAE31, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC07E, 0xAE2E, 0xAE2E }, + /* 0x015 */ { 0xAE04, 0xAE2E, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE2F, 0x0000, 0x0000, 0xC07F, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE32 }, + /* 0x016 */ { 0x0000, 0xAE04, 0xAE24, 0xC080, 0xAE33, 0x0000, 0xAE34, 0xAE01, 0x0000, 0x0000, 0xAE24, 0x0000, 0xAE2E, 0xAE35, 0x0000, 0x0000 }, + /* 0x017 */ { 0x0000, 0xC081, 0x0000, 0x0000, 0x0000, 0xAE04, 0xC082, 0xC083, 0xAE2E, 0xAE2E, 0xAE34, 0xC084, 0xC085, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x018 */ { 0xC086, 0xC087, 0x0000, 0xC088, 0xC089, 0x0000, 0xC08A, 0xC08B, 0x0000, 0x0000, 0xC08C, 0x0000, 0x0000, 0x0000, 0xC08D, 0xC08E }, + /* 0x019 */ { 0x0000, 0x0000, 0x0000, 0xC08F, 0xAE1A, 0x0000, 0x0000, 0x0000, 0xC090, 0xC091, 0xC092, 0xC093, 0xAE07, 0xC094, 0xC095, 0xC096 }, + /* 0x01A */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC097, 0xC098, 0xADE6, 0xC099 }, + /* 0x01B */ { 0xC09A, 0xC09B, 0xC09C, 0xC09D, 0xC09E, 0xC09F, 0xC0A0, 0xC0A1, 0xC0A2, 0xC0A3, 0xC0A4, 0xC0A5, 0xC0A6, 0xC0A7, 0xC0A8, 0xC0A9 }, + /* 0x01C */ { 0xC0AA, 0xC0AB, 0xC0AC, 0xC0AD, 0xC0AE, 0xC0AF, 0xC0B0, 0xC0B1, 0xC0B2, 0xC0B3, 0xC0B4, 0xC0B5, 0xC0B6, 0xC0B7, 0xC0B8, 0xC0B9 }, + /* 0x01D */ { 0xC0BA, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE06, 0xAE36, 0xAE04, 0xAE2D, 0x0000, 0x0000, 0xFFFF, 0xC0BB, 0xC0BC, 0xC0BD }, + /* 0x01E */ { 0x0000, 0x0000, 0xC0BE, 0xC0BF, 0x0000, 0x0000, 0xC0C0, 0x0000, 0xC0C1, 0xC0C2, 0xC0C3, 0x0000, 0xC0C4, 0x0000, 0x0000, 0x0000 }, + /* 0x01F */ { 0xC0C5, 0x0000, 0xC0C6, 0x0000, 0xC0C7, 0x0000, 0xC0C8, 0xC0C9, 0xC0CA, 0x0000, 0xC0CB, 0x0000, 0x0000, 0x0000, 0xC0CC, 0x0000 }, + /* 0x020 */ { 0x0000, 0x0000, 0xC0CD, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x021 */ { 0x0000, 0x0000, 0xAE0E, 0xFFFF, 0xAE05, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC0CE, 0xC0CF, 0x0000, 0x0000, 0x0000 }, + /* 0x022 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC0D0, 0x0000, 0x0000 }, + /* 0x023 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE0B, 0x0000, 0xAE37, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x024 */ { 0xC0D1, 0xC0D2, 0xC0D3, 0x0000, 0x0000, 0xAE04, 0xC0D4, 0xC0D5, 0xC0D6, 0xC0D7, 0xC0D8, 0xC0D9, 0xC0DA, 0xC0DB, 0xC0DC, 0xC0DD }, + /* 0x025 */ { 0x0000, 0x0000, 0xAE38, 0x0000, 0x0000, 0x0000, 0xAE39, 0xC0DE, 0x0000, 0xAE0E, 0xAE3A, 0xAE3A, 0xAE3A, 0xAE3A, 0xADE6, 0xADE6 }, + /* 0x026 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE3B, 0xFFFF, 0xFFFF, 0x0000, 0xAE3C, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE30 }, + /* 0x027 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE32, 0xFFFF, 0xAE24 }, + /* 0x028 */ { 0x0000, 0x0000, 0xC0DF, 0x0000, 0xC0E0, 0xC0E1, 0xC0E2, 0xC0E3, 0x0000, 0xC0E4, 0xC0E5, 0xC0E6, 0xC0E7, 0xC0E8, 0x0000, 0xC0E9 }, + /* 0x029 */ { 0xAE3D, 0x0000, 0x0000, 0xAE23, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE04, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE30, 0x0000 }, + /* 0x02A */ { 0x0000, 0xAE04, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x02B */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE2D }, + /* 0x02C */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE2D, 0x0000, 0x0000, 0x0000, 0xAE0E, 0x0000, 0x0000, 0x0000 }, + /* 0x02D */ { 0x0000, 0x0000, 0xAE24, 0xFFFF, 0xC0EA, 0xC0EB, 0xC0EC, 0xC0ED, 0xC0EE, 0xC0EF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC0F0 }, + /* 0x02E */ { 0x0000, 0x0000, 0xC0F1, 0xC0F2, 0xC0F3, 0xC0F4, 0xC0F5, 0xC0F6, 0xC0F7, 0xC0F8, 0xC0F9, 0xC0FA, 0xC0FB, 0xFFFF, 0xFFFF, 0xC0FC }, + /* 0x02F */ { 0xC0FD, 0x0000, 0xC0FE, 0xAE2E, 0x0000, 0x0000, 0x0000, 0xAE07, 0x0000, 0x0000, 0x0000, 0x0000, 0xC0FF, 0xAE2E, 0xADE6, 0xC100 }, + /* 0x030 */ { 0x0000, 0x0000, 0xC101, 0x0000, 0x0000, 0xC102, 0x0000, 0xAE2D, 0x0000, 0x0000, 0x0000, 0xC103, 0xC104, 0xAE16, 0x0000, 0xAE04 }, + /* 0x031 */ { 0x0000, 0x0000, 0x0000, 0xAE0E, 0xAE34, 0xAE27, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC105, 0xC106, 0xAE3E, 0x0000, 0xC107 }, + /* 0x032 */ { 0xAE3F, 0xAE40, 0xAE3A, 0x0000, 0x0000, 0x0000, 0xAE24, 0xC108, 0xC109, 0xC10A, 0xC10B, 0xC10C, 0x0000, 0x0000, 0xC10D, 0xAE2E }, + /* 0x033 */ { 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xC10E, 0x0000, 0xAE41, 0x0000, 0x0000, 0xAE24 }, + /* 0x034 */ { 0xC10F, 0xC110, 0xC111, 0xC112, 0xC113, 0xC114, 0xC115, 0xC116, 0xC117, 0xC118, 0xC119, 0xC11A, 0xC11B, 0xC11C, 0xC11D, 0xC11E }, + /* 0x035 */ { 0xC11F, 0xC120, 0xC121, 0xC122, 0xC123, 0xC124, 0xC125, 0xC126, 0xC127, 0xC128, 0xC129, 0xC12A, 0xC12B, 0xC12C, 0xFFFF, 0xFFFF }, + /* 0x036 */ { 0xC12D, 0xC12E, 0xC12F, 0xC130, 0xC131, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE03, 0xAE42, 0x0000, 0x0000 }, + /* 0x037 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE43, 0x0000, 0x0000, 0xAE07, 0xFFFF, 0xFFFF, 0xAE34 }, + /* 0x038 */ { 0x0000, 0xAE2E, 0xC132, 0x0000, 0x0000, 0xAE44, 0xAE45, 0xAE06, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE46 }, + /* 0x039 */ { 0xAE23, 0x0000, 0xC133, 0xC134, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE04, 0xAE47, 0xAE48, 0xAE3A, 0xAE49 }, + /* 0x03A */ { 0xAE4A, 0x0000, 0xAE2C, 0xAE4B, 0xAE34, 0xAE34, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE05 }, + /* 0x03B */ { 0xAE4C, 0x0000, 0x0000, 0xAE4D, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE04, 0xAE2D, 0xAE11, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xC135 }, + /* 0x03C */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0xAE2D, 0x0000, 0x0000, 0x0000, 0xAE11, 0xC136, 0xAE24 }, + /* 0x03D */ { 0x0000, 0x0000, 0xAE4E, 0x0000, 0xAE05, 0x0000, 0x0000, 0xC137, 0x0000, 0xAE02, 0x0000, 0x0000, 0xAE4F, 0xAE32, 0xFFFF, 0xFFFF }, + /* 0x03E */ { 0xC138, 0xC139, 0xC13A, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE34, 0xAE2E, 0xC13B, 0xC13C, 0xC13D, 0x0000, 0xAE24 }, + /* 0x03F */ { 0x0000, 0x0000, 0xAE07, 0x0000, 0x0000, 0x0000, 0xAE50, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x040 */ { 0x0000, 0x0000, 0x0000, 0xAE0E, 0x0000, 0xAE32, 0xAE07, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x041 */ { 0xAE51, 0x0000, 0x0000, 0xAE52, 0x0000, 0xAE37, 0x0000, 0x0000, 0x0000, 0xAE04, 0xAE1B, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0xAE53 }, + /* 0x042 */ { 0x0000, 0xAE54, 0x0000, 0xAE55, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0xAE56, 0x0000, 0xAE43, 0x0000, 0x0000 }, + /* 0x043 */ { 0xC13E, 0xAE57, 0x0000, 0xC13F, 0xAE2F, 0xAE2F, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xC140, 0xAE0E }, + /* 0x044 */ { 0x0000, 0x0000, 0x0000, 0xAE58, 0x0000, 0xAE59, 0x0000, 0xAE5A, 0x0000, 0xAE5B, 0xAE5C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x045 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xAE2F, 0xFFFF, 0xFFFF, 0xFFFF, 0xC141, 0xC142, 0xC143, 0xC144, 0x0000, 0x0000, 0x0000, 0xAE5D }, + /* 0x046 */ { 0x0000, 0x0000, 0xC145, 0xAE2E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x047 */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0xAE04, 0x0000, 0x0000, 0xC146, 0xAE03, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x048 */ { 0x0000, 0x0000, 0xAE07, 0x0000, 0xC147, 0xC148, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0xAE24, 0xFFFF, 0x0000, 0xAE0E }, + /* 0x049 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xC149, 0xAE43, 0x0000, 0xC14A, 0x0000, 0xC14B, 0xC14C, 0xC14D, 0xAE5E, 0x0000, 0xAE2F, 0xAE2E }, + /* 0x04A */ { 0xC14E, 0x0000, 0xC14F, 0xC150, 0xAE07, 0x0000, 0x0000, 0xC151, 0x0000, 0x0000, 0x0000, 0x0000, 0xC152, 0x0000, 0xAE23, 0xAE01 }, + /* 0x04B */ { 0x0000, 0xAE10, 0x0000, 0xC153, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAE5F, 0xAE02, 0xAE2E, 0x0000, 0x0000, 0x0000, 0xC154, 0xAE2E }, + /* 0x04C */ { 0xAE08, 0xAE09, 0xAE0A, 0xC155, 0xC156, 0xAE60, 0xC157, 0xC158, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x04D */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xC159, 0xC15A, 0xAE03, 0xFFFF, 0x0000, 0x0000, 0x0000, 0xC15B, 0xC15C, 0xAE2E, 0xFFFF, 0xFFFF }, + /* 0x04E */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0xC15D, 0xC15E, 0xAE34, 0xFFFF, 0xFFFF }, + /* 0x04F */ { 0x0000, 0x0000, 0x0000, 0xC15F, 0xAE01, 0xAE2E, 0xAE2D, 0xFFFF, 0x0000, 0x0000, 0x0000, 0xC160, 0xAE2E, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x050 */ { 0x0000, 0xAE61, 0xC161, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x051 */ { 0x0000, 0x0000, 0x0000, 0xC162, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC163, 0xC164, 0x0000, 0x0000, 0x0000, 0xAE62 }, + /* 0x052 */ { 0xAE63, 0xAE64, 0x0000, 0xC165, 0xC166, 0xAE2E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAE65, 0x0000, 0x0000, 0xAE65, 0xC167, 0xFFFF }, + /* 0x053 */ { 0x0000, 0x0000, 0x0000, 0xC168, 0xC169, 0x0000, 0x0000, 0x0000, 0x0000, 0xC16A, 0xAE3B, 0xFFFF, 0x0000, 0x0000, 0x0000, 0xAE2F }, + /* 0x054 */ { 0xAE0A, 0x0000, 0x0000, 0xC16B, 0xAE32, 0x0000, 0xAE2D, 0x0000, 0x0000, 0xAE43, 0xAE66, 0xAE0E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x055 */ { 0xAE67, 0x0000, 0x0000, 0xAE68, 0xC16C, 0xAE2E, 0xAE69, 0x0000, 0xAE04, 0xC16D, 0xAE2E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x056 */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0xAE2F }, + /* 0x057 */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAE11, 0x0000, 0x0000, 0x0000, 0xAE6A }, + /* 0x058 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE2E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x059 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE04, 0xAE01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x05A */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xAE30, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x05B */ { 0x0000, 0x0000, 0xAE04, 0xAE2F, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x05C */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xAE0E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x05D */ { 0x0000, 0x0000, 0x0000, 0xAE2F, 0x0000, 0xAE04, 0xAE16, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0xAE34, 0xC16E }, + /* 0x05E */ { 0x0000, 0x0000, 0x0000, 0xC16F, 0xAE32, 0xAE3C, 0xAE10, 0xAE6B, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x05F */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC170, 0xC171, 0x0000, 0x0000, 0x0000, 0xAE05, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x060 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xAE00, 0x0000, 0x0000, 0x0000, 0xAE39, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAE01, 0xC172 }, + /* 0x061 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE07 }, + /* 0x062 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE32, 0xFFFF, 0xFFFF }, + /* 0x063 */ { 0xAE2F, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x064 */ { 0x0000, 0xAE04, 0xFFFF, 0xFFFF, 0xFFFF, 0xAE3B, 0xAE6C, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x065 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE24 }, + /* 0x066 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE05, 0xAE2D, 0xAE2F, 0xC173, 0xAE30, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x067 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE32 }, + /* 0x068 */ { 0x0000, 0x0000, 0xAE6D, 0x0000, 0x0000, 0xC174, 0xC175, 0xC176, 0xC177, 0x0000, 0xC178, 0xC179, 0xC17A, 0x0000, 0xAE2F, 0xFFFF }, + /* 0x069 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xC17B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0xAE30 }, + /* 0x06A */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE0E, 0x0000, 0xAE2F, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x06B */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE06, 0x0000, 0x0000, 0x0000, 0xAE18, 0xAE6E, 0xAE6F, 0xAE70, 0x0000, 0x0000, 0x0000 }, + /* 0x06C */ { 0xAE71, 0xAE72, 0x0000, 0xAE73, 0xAE74, 0xAE19, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x06D */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE59, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x06E */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE75, 0x0000, 0x0000, 0x0000 }, + /* 0x06F */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE24, 0xAE3E, 0xAE23, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x070 */ { 0xC17C, 0xC17D, 0xC17E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x071 */ { 0x0000, 0x0000, 0xAE2D, 0xC17F, 0xAE16, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x072 */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xC180, 0xAE55 }, + /* 0x073 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE76, 0xC181, 0xFFFF, 0xFFFF }, + /* 0x074 */ { 0xC182, 0xC183, 0xC184, 0x0000, 0xC185, 0xAE16, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x075 */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAE23, 0x0000, 0x0000, 0x0000, 0xAE01, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x076 */ { 0xAE23, 0x0000, 0x0000, 0xAE34, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x077 */ { 0xAE70, 0x0000, 0xAE77, 0xAE78, 0xAE79, 0xAE7A, 0xAE7B, 0xAE7C, 0xAE3C, 0xAE24, 0xAE7D, 0xAE24, 0xFFFF, 0xFFFF, 0xFFFF, 0xAE03 }, + /* 0x078 */ { 0x0000, 0x0000, 0xAE24, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE30, 0xAE04, 0xAE23, 0xAE23, 0xAE23, 0x0000, 0xAE32 }, + /* 0x079 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE34, 0xFFFF, 0xFFFF, 0xFFFF, 0xAE0D, 0x0000 }, + /* 0x07A */ { 0xAE3B, 0x0000, 0x0000, 0xAE24, 0xAE2F, 0xAE03, 0xAE32, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x07B */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE07, 0xAE2D, 0xAE2D }, + /* 0x07C */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE30, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE2F, 0xAE24, 0xFFFF }, + /* 0x07D */ { 0xAE24, 0x0000, 0x0000, 0x0000, 0xAE07, 0xAE2E, 0x0000, 0x0000, 0xAE07, 0x0000, 0xAE34, 0xAE03, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x07E */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE0A, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE4A, 0x0000, 0x0000, 0x0000 }, + /* 0x07F */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE30, 0xAE34, 0xAE7E, 0xAE0E, 0x0000, 0xAE2F, 0xAE0E, 0xAE3B, 0xAE0E, 0xFFFF, 0xFFFF }, + /* 0x080 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE44, 0x0000, 0x0000, 0xAE05, 0xFFFF, 0xFFFF, 0xAE2E }, + /* 0x081 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE34, 0xFFFF, 0xFFFF }, + /* 0x082 */ { 0x0000, 0x0000, 0x0000, 0xAE01, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x083 */ { 0x0000, 0xAE34, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x084 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE03, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x085 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAE11, 0xFFFF }, + /* 0x086 */ { 0xC186, 0xC187, 0xC188, 0xC189, 0xC18A, 0xC18B, 0xC18C, 0xC18D, 0xC18E, 0xC18F, 0xC190, 0xC191, 0xC192, 0xC193, 0xC194, 0xC195 }, + /* 0x087 */ { 0xC196, 0xC197, 0xC198, 0xC199, 0xC19A, 0xC19B, 0xC19C, 0xC19D, 0xC19E, 0xC19F, 0xC1A0, 0xC1A1, 0xC1A2, 0xC1A3, 0xC1A4, 0xC1A5 }, + /* 0x088 */ { 0xC1A6, 0xC1A7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x089 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xAE05, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x08A */ { 0xAE7F, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x08B */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF } +}; +static uint16_t nfTrieLo[424][16] = { + /* 0x000 */ { 0xB000, 0xB001, 0xB002, 0xB003, 0xB004, 0xB005, 0x0000, 0xB006, 0xB007, 0xB008, 0xB009, 0xB00A, 0xB00B, 0xB00C, 0xB00D, 0xB00E }, + /* 0x001 */ { 0x0000, 0xB00F, 0xB010, 0xB011, 0xB012, 0xB013, 0xB014, 0x0000, 0x0000, 0xB015, 0xB016, 0xB017, 0xB018, 0xB019, 0x0000, 0xB81A }, + /* 0x002 */ { 0xB01B, 0xB01C, 0xB01D, 0xB01E, 0xB01F, 0xB020, 0x0000, 0xB021, 0xB022, 0xB023, 0xB024, 0xB025, 0xB026, 0xB027, 0xB028, 0xB029 }, + /* 0x003 */ { 0x0000, 0xB02A, 0xB02B, 0xB02C, 0xB02D, 0xB02E, 0xB02F, 0x0000, 0x0000, 0xB030, 0xB031, 0xB032, 0xB033, 0xB034, 0x0000, 0xB035 }, + /* 0x004 */ { 0xB036, 0xB037, 0xB038, 0xB039, 0xB03A, 0xB03B, 0xB03C, 0xB03D, 0xB03E, 0xB03F, 0xB040, 0xB041, 0xB042, 0xB043, 0xB044, 0xB045 }, + /* 0x005 */ { 0x0000, 0x0000, 0xB046, 0xB047, 0xB048, 0xB049, 0xB04A, 0xB04B, 0xB04C, 0xB04D, 0xB04E, 0xB04F, 0xB050, 0xB051, 0xB052, 0xB053 }, + /* 0x006 */ { 0xB054, 0xB055, 0xB056, 0xB057, 0xB058, 0xB059, 0x0000, 0x0000, 0xB05A, 0xB05B, 0xB05C, 0xB05D, 0xB05E, 0xB05F, 0xB060, 0xB061 }, + /* 0x007 */ { 0xB062, 0x0000, 0x0000, 0x0000, 0xB063, 0xB064, 0xB065, 0xB066, 0x0000, 0xB067, 0xB068, 0xB069, 0xB06A, 0xB06B, 0xB06C, 0x0000 }, + /* 0x008 */ { 0x0000, 0x0000, 0x0000, 0xB06D, 0xB06E, 0xB06F, 0xB070, 0xB071, 0xB072, 0xB873, 0x0000, 0x0000, 0xB074, 0xB075, 0xB076, 0xB077 }, + /* 0x009 */ { 0xB078, 0xB079, 0x0000, 0x0000, 0xB07A, 0xB07B, 0xB07C, 0xB07D, 0xB07E, 0xB07F, 0xB080, 0xB081, 0xB082, 0xB083, 0xB084, 0xB085 }, + /* 0x00A */ { 0xB086, 0xB087, 0xB088, 0xB089, 0xB08A, 0xB08B, 0x0000, 0x0000, 0xB08C, 0xB08D, 0xB08E, 0xB08F, 0xB090, 0xB091, 0xB092, 0xB093 }, + /* 0x00B */ { 0xB094, 0xB095, 0xB096, 0xB097, 0xB098, 0xB099, 0xB09A, 0xB09B, 0xB09C, 0xB09D, 0xB09E, 0xB09F, 0xB0A0, 0xB0A1, 0xB0A2, 0x0000 }, + /* 0x00C */ { 0xB0A3, 0xB0A4, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB0A5 }, + /* 0x00D */ { 0xB0A6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x00E */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB0A7, 0xB0A8, 0xB0A9 }, + /* 0x00F */ { 0xB0AA, 0xB0AB, 0xB0AC, 0xB0AD, 0xB0AE, 0xC000, 0xC001, 0xC002, 0xC003, 0xC004, 0xC005, 0xC006, 0xC007, 0x0000, 0xC008, 0xC009 }, + /* 0x010 */ { 0xC00A, 0xC00B, 0xB0AF, 0xB0B0, 0x0000, 0x0000, 0xB0B1, 0xB0B2, 0xB0B3, 0xB0B4, 0xB0B5, 0xB0B6, 0xC00C, 0xC00D, 0xB0B7, 0xB0B8 }, + /* 0x011 */ { 0xB0B9, 0x0000, 0x0000, 0x0000, 0xB0BA, 0xB0BB, 0x0000, 0x0000, 0xB0BC, 0xB0BD, 0xC00E, 0xC00F, 0xB0BE, 0xB0BF, 0xB0C0, 0xB0C1 }, + /* 0x012 */ { 0xB0C2, 0xB0C3, 0xB0C4, 0xB0C5, 0xB0C6, 0xB0C7, 0xB0C8, 0xB0C9, 0xB0CA, 0xB0CB, 0xB0CC, 0xB0CD, 0xB0CE, 0xB0CF, 0xB0D0, 0xB0D1 }, + /* 0x013 */ { 0xB0D2, 0xB0D3, 0xB0D4, 0xB0D5, 0xB0D6, 0xB0D7, 0xB0D8, 0xB0D9, 0xB0DA, 0xB0DB, 0xB0DC, 0xB0DD, 0x0000, 0x0000, 0xB0DE, 0xB0DF }, + /* 0x014 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB0E0, 0xB0E1, 0xB0E2, 0xB0E3, 0xC010, 0xC011, 0xC012, 0xC013, 0xB0E4, 0xB0E5 }, + /* 0x015 */ { 0xC014, 0xC015, 0xB0E6, 0xB0E7, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x016 */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE8, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADE8, 0xADD8, 0xADDC, 0xADDC, 0xADDC, 0xADDC }, + /* 0x017 */ { 0xADDC, 0xADCA, 0xADCA, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADCA, 0xADCA, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC }, + /* 0x018 */ { 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xAD01, 0xAD01, 0xAD01, 0xAD01, 0xAD01, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x019 */ { 0xD000, 0xD002, 0xADE6, 0xD004, 0xD006, 0xADF0, 0xADE6, 0xADDC, 0xADDC, 0xADDC, 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADDC, 0x0000 }, + /* 0x01A */ { 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADE6, 0xADE8, 0xADDC, 0xADDC, 0xADE6, 0xADE9, 0xADEA, 0xADEA, 0xADE9 }, + /* 0x01B */ { 0xADEA, 0xADEA, 0xADE9, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x01C */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x02B9, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x003B, 0x0000 }, + /* 0x01D */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0xB0E8, 0xB0E9, 0x00B7, 0xB0EA, 0xB0EB, 0xB0EC, 0xFFFF, 0xB0ED, 0xFFFF, 0xB0EE, 0xB0EF }, + /* 0x01E */ { 0xC016, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x01F */ { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB0F0, 0xB0F1, 0xB0F2, 0xB0F3, 0xB0F4, 0xB0F5 }, + /* 0x020 */ { 0xC017, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x021 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB0F6, 0xB0F7, 0xB0F8, 0xB0F9, 0xB0FA, 0x0000 }, + /* 0x022 */ { 0x0000, 0x0000, 0x0000, 0xB0FB, 0xB0FC, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x023 */ { 0xB0FD, 0xB0FE, 0x0000, 0xB0FF, 0x0000, 0x0000, 0x0000, 0xB100, 0x0000, 0x0000, 0x0000, 0x0000, 0xB101, 0xB102, 0xB103, 0x0000 }, + /* 0x024 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB104, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x025 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB105, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x026 */ { 0xB106, 0xB107, 0x0000, 0xB108, 0x0000, 0x0000, 0x0000, 0xB109, 0x0000, 0x0000, 0x0000, 0x0000, 0xB10A, 0xB10B, 0xB10C, 0x0000 }, + /* 0x027 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB10D, 0xB10E, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x028 */ { 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x029 */ { 0x0000, 0xB10F, 0xB110, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x02A */ { 0xB111, 0xB112, 0xB113, 0xB114, 0x0000, 0x0000, 0xB115, 0xB116, 0x0000, 0x0000, 0xB117, 0xB118, 0xB119, 0xB11A, 0xB11B, 0xB11C }, + /* 0x02B */ { 0x0000, 0x0000, 0xB11D, 0xB11E, 0xB11F, 0xB120, 0xB121, 0xB122, 0x0000, 0x0000, 0xB123, 0xB124, 0xB125, 0xB126, 0xB127, 0xB128 }, + /* 0x02C */ { 0xB129, 0xB12A, 0xB12B, 0xB12C, 0xB12D, 0xB12E, 0x0000, 0x0000, 0xB12F, 0xB130, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x02D */ { 0xE800, 0x0000, 0xE801, 0x0000, 0xE802, 0x0000, 0xE803, 0x0000, 0xE804, 0x0000, 0xE805, 0x0000, 0xE806, 0x0000, 0xE807, 0x0000 }, + /* 0x02E */ { 0xE808, 0x0000, 0xE809, 0x0000, 0xE80A, 0x0000, 0xE80B, 0x0000, 0xE80C, 0x0000, 0xE80D, 0x0000, 0xE80E, 0x0000, 0xE80F, 0x0000 }, + /* 0x02F */ { 0xE810, 0x0000, 0xE811, 0x0000, 0xE812, 0x0000, 0xE813, 0x0000, 0xE814, 0x0000, 0xE815, 0x0000, 0xE816, 0x0000, 0xE817, 0x0000 }, + /* 0x030 */ { 0xFFFF, 0xE818, 0xE819, 0xE81A, 0xE81B, 0xE81C, 0xE81D, 0xE81E, 0xE81F, 0xE820, 0xE821, 0xE822, 0xE823, 0xE824, 0xE825, 0xE826 }, + /* 0x031 */ { 0xE827, 0xE828, 0xE829, 0xE82A, 0xE82B, 0xE82C, 0xE82D, 0xE82E, 0xE82F, 0xE830, 0xE831, 0xE832, 0xE833, 0xE834, 0xE835, 0xE836 }, + /* 0x032 */ { 0xE837, 0xE838, 0xE839, 0xE83A, 0xE83B, 0xE83C, 0xE83D, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x033 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB931, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000 }, + /* 0x034 */ { 0xFFFF, 0xADDC, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xADE6, 0xADDE, 0xADDC, 0xADE6, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x035 */ { 0xADE6, 0xADE6, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xADDE, 0xADE4, 0xADE6 }, + /* 0x036 */ { 0xAD0A, 0xAD0B, 0xAD0C, 0xAD0D, 0xAD0E, 0xAD0F, 0xAD10, 0xAD11, 0xAD12, 0xAD13, 0xAD13, 0xAD14, 0xAD15, 0xAD16, 0x0000, 0xAD17 }, + /* 0x037 */ { 0x0000, 0xAD18, 0xAD19, 0x0000, 0xADE6, 0xADDC, 0x0000, 0xAD12, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x038 */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xAD1E, 0xAD1F, 0xAD20, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000 }, + /* 0x039 */ { 0x0000, 0x0000, 0xB132, 0xB133, 0xB134, 0xB135, 0xB136, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x03A */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD1B, 0xAD1C, 0xAD1D, 0xAD1E, 0xAD1F }, + /* 0x03B */ { 0xAD20, 0xAD21, 0xAD22, 0xADE6, 0xADE6, 0xADDC, 0xADDC, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xADDC }, + /* 0x03C */ { 0xAD23, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x03D */ { 0xB137, 0x0000, 0xB138, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x03E */ { 0x0000, 0x0000, 0x0000, 0xB139, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0x0000, 0x0000, 0xADE6 }, + /* 0x03F */ { 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0x0000, 0x0000, 0xADE6, 0xADE6, 0x0000, 0xADDC, 0xADE6, 0xADE6, 0xADDC, 0x0000, 0x0000 }, + /* 0x040 */ { 0x0000, 0xAD24, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x041 */ { 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xADDC, 0xADDC, 0xADDC, 0xADE6, 0xADDC, 0xADDC, 0xADE6, 0xADDC, 0xADE6 }, + /* 0x042 */ { 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0xADDC, 0xADE6, 0xADDC, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000 }, + /* 0x043 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x044 */ { 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xADDC, 0x0000, 0x0000 }, + /* 0x045 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x046 */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xFFFF, 0xFFFF }, + /* 0x047 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDC, 0xADDC, 0xADDC, 0xFFFF, 0xFFFF, 0x0000, 0xFFFF }, + /* 0x048 */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xADDC, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x049 */ { 0xADE6, 0xADE6, 0x0000, 0xADDC, 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADDC, 0xADDC }, + /* 0x04A */ { 0xAD1B, 0xAD1C, 0xAD1D, 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xADDC, 0xADDC, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x04B */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB13A, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x04C */ { 0x0000, 0xB13B, 0x0000, 0x0000, 0xB13C, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD07, 0x0000, 0x0000, 0x0000 }, + /* 0x04D */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0x0000, 0x0000 }, + /* 0x04E */ { 0x0000, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0x0000, 0x0000, 0x0000, 0xB13D, 0xB13E, 0xB13F, 0xB140, 0xB141, 0xB142, 0xB143, 0xB144 }, + /* 0x04F */ { 0x0000, 0xFFFF, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xAD07, 0x0000, 0x0000, 0x0000 }, + /* 0x050 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xB145, 0xB146, 0xAD09, 0x0000, 0xFFFF }, + /* 0x051 */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xB147, 0xB148, 0xFFFF, 0xB149 }, + /* 0x052 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xFFFF }, + /* 0x053 */ { 0x0000, 0xFFFF, 0x0000, 0xB14A, 0xFFFF, 0x0000, 0xB14B, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xAD07, 0xFFFF, 0x0000, 0x0000 }, + /* 0x054 */ { 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xAD09, 0xFFFF, 0xFFFF }, + /* 0x055 */ { 0xFFFF, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xB14C, 0xB14D, 0xB14E, 0x0000, 0xFFFF, 0xB14F, 0xFFFF }, + /* 0x056 */ { 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xAD07, 0x0000, 0x0000, 0x0000 }, + /* 0x057 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xAD09, 0xFFFF, 0xFFFF }, + /* 0x058 */ { 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xAD07, 0x0000, 0x0000, 0x0000 }, + /* 0x059 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x0000, 0xB150, 0xFFFF, 0xFFFF, 0xB151, 0xB152, 0xAD09, 0xFFFF, 0xFFFF }, + /* 0x05A */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xB153, 0xB154, 0xFFFF, 0x0000 }, + /* 0x05B */ { 0x0000, 0xFFFF, 0x0000, 0x0000, 0xB155, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0xFFFF, 0x0000, 0x0000 }, + /* 0x05C */ { 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xB156, 0xB157, 0xB158, 0xAD09, 0xFFFF, 0xFFFF }, + /* 0x05D */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xB159, 0xFFFF, 0x0000, 0x0000, 0x0000, 0xAD09, 0xFFFF, 0xFFFF }, + /* 0x05E */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAD54, 0xAD5B, 0xFFFF, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x05F */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xAD07, 0x0000, 0x0000, 0x0000 }, + /* 0x060 */ { 0xB15A, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0xB15B, 0xB15C, 0xFFFF, 0xB15D, 0xC018, 0x0000, 0xAD09, 0xFFFF, 0xFFFF }, + /* 0x061 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xAD09, 0x0000, 0x0000, 0x0000 }, + /* 0x062 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xB15E, 0xB15F, 0xB160, 0xAD09, 0x0000, 0x0000 }, + /* 0x063 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xAD09, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000 }, + /* 0x064 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xB161, 0x0000, 0xB162, 0xC019, 0xB163, 0x0000 }, + /* 0x065 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD67, 0xAD67, 0xAD09, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000 }, + /* 0x066 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD6B, 0xAD6B, 0xAD6B, 0xAD6B, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x067 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD76, 0xAD76, 0xAD09, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF }, + /* 0x068 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0xFFFF, 0xAD7A, 0xAD7A, 0xAD7A, 0xAD7A, 0x0000, 0x0000, 0xFFFF, 0xFFFF }, + /* 0x069 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDC, 0xADDC, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x06A */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDC, 0x0000, 0xADDC, 0x0000, 0xADD8, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x06B */ { 0x0000, 0x0000, 0x0000, 0xB164, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0xB165, 0x0000, 0x0000 }, + /* 0x06C */ { 0x0000, 0x0000, 0xB166, 0x0000, 0x0000, 0x0000, 0x0000, 0xB167, 0x0000, 0x0000, 0x0000, 0x0000, 0xB168, 0x0000, 0x0000, 0x0000 }, + /* 0x06D */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB169, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x06E */ { 0xFFFF, 0xAD81, 0xAD82, 0xD009, 0xAD84, 0xD00C, 0xB16A, 0x0000, 0xB16B, 0x0000, 0xAD82, 0xAD82, 0xAD82, 0xAD82, 0x0000, 0x0000 }, + /* 0x06F */ { 0xAD82, 0xD00F, 0xADE6, 0xADE6, 0xAD09, 0x0000, 0xADE6, 0xADE6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x070 */ { 0x0000, 0x0000, 0x0000, 0xB16C, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0xB16D, 0x0000, 0x0000 }, + /* 0x071 */ { 0x0000, 0x0000, 0xB16E, 0x0000, 0x0000, 0x0000, 0x0000, 0xB16F, 0x0000, 0x0000, 0x0000, 0x0000, 0xB170, 0x0000, 0x0000, 0x0000 }, + /* 0x072 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB171, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000 }, + /* 0x073 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDC, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000 }, + /* 0x074 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB172, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x075 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD07, 0x0000, 0xAD09, 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x076 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDC, 0x0000, 0x0000 }, + /* 0x077 */ { 0xE83E, 0xE83F, 0xE840, 0xE841, 0xE842, 0xE843, 0xE844, 0xE845, 0xE846, 0xE847, 0xE848, 0xE849, 0xE84A, 0xE84B, 0xE84C, 0xE84D }, + /* 0x078 */ { 0xE84E, 0xE84F, 0xE850, 0xE851, 0xE852, 0xE853, 0xE854, 0xE855, 0xE856, 0xE857, 0xE858, 0xE859, 0xE85A, 0xE85B, 0xE85C, 0xE85D }, + /* 0x079 */ { 0xE85E, 0xE85F, 0xE860, 0xE861, 0xE862, 0xE863, 0xFFFF, 0xE864, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE865, 0xFFFF, 0xFFFF }, + /* 0x07A */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x07B */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xE866, 0xE867, 0xE868, 0xE869, 0xE86A, 0xE86B, 0xFFFF, 0xFFFF }, + /* 0x07C */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x07D */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x07E */ { 0x0000, 0x0000, 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xFFFF, 0xFFFF }, + /* 0x07F */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE4, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x080 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDE, 0xADE6, 0xADDC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x081 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADDC, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x0000, 0x0000 }, + /* 0x082 */ { 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x083 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xFFFF, 0xFFFF, 0xADDC }, + /* 0x084 */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADE6, 0xADE6, 0xADDC, 0x0000, 0xADDC }, + /* 0x085 */ { 0xADDC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x086 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB173, 0x0000, 0xB174, 0x0000, 0xB175, 0x0000, 0xB176, 0x0000, 0xB177, 0x0000 }, + /* 0x087 */ { 0x0000, 0x0000, 0xB178, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x088 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xAD07, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB179, 0x0000, 0xB17A, 0x0000, 0x0000 }, + /* 0x089 */ { 0xB17B, 0xB17C, 0x0000, 0xB17D, 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x08A */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x08B */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x08C */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x08D */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD07, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x08E */ { 0x0000, 0x0000, 0xAD09, 0xAD09, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x08F */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD07, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x090 */ { 0xE86C, 0xE86D, 0xE86E, 0xE86F, 0xE870, 0xE871, 0xE872, 0xE873, 0xE874, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x091 */ { 0xE875, 0xE876, 0xE877, 0xE878, 0xE879, 0xE87A, 0xE87B, 0xE87C, 0xE87D, 0xE87E, 0xE87F, 0xE880, 0xE881, 0xE882, 0xE883, 0xE884 }, + /* 0x092 */ { 0xE885, 0xE886, 0xE887, 0xE888, 0xE889, 0xE88A, 0xE88B, 0xE88C, 0xE88D, 0xE88E, 0xE88F, 0xE890, 0xE891, 0xE892, 0xE893, 0xE894 }, + /* 0x093 */ { 0xE895, 0xE896, 0xE897, 0xE898, 0xE899, 0xE89A, 0xE89B, 0xE89C, 0xE89D, 0xE89E, 0xE89F, 0xFFFF, 0xFFFF, 0xE8A0, 0xE8A1, 0xE8A2 }, + /* 0x094 */ { 0xADE6, 0xADE6, 0xADE6, 0x0000, 0xAD01, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADE6, 0xADE6, 0xADDC, 0xADDC, 0xADDC, 0xADDC }, + /* 0x095 */ { 0xADE6, 0x0000, 0xAD01, 0xAD01, 0xAD01, 0xAD01, 0xAD01, 0xAD01, 0xAD01, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDC, 0x0000, 0x0000 }, + /* 0x096 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x097 */ { 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0xADE6, 0xADEA, 0xADD6, 0xADDC }, + /* 0x098 */ { 0xADCA, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x099 */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE8, 0xADE4, 0xADE4, 0xADDC, 0xFFFF, 0xADE6, 0xADE9, 0xADDC, 0xADE6, 0xADDC }, + /* 0x09A */ { 0xB17E, 0xB17F, 0xB180, 0xB181, 0xB182, 0xB183, 0xB184, 0xB185, 0xC01A, 0xC01B, 0xB186, 0xB187, 0xB188, 0xB189, 0xB18A, 0xB18B }, + /* 0x09B */ { 0xB18C, 0xB18D, 0xB18E, 0xB18F, 0xC01C, 0xC01D, 0xC01E, 0xC01F, 0xB190, 0xB191, 0xB192, 0xB193, 0xC020, 0xC021, 0xB194, 0xB195 }, + /* 0x09C */ { 0xB196, 0xB197, 0xB198, 0xB199, 0xB19A, 0xB19B, 0xB19C, 0xB19D, 0xB19E, 0xB19F, 0xB1A0, 0xB1A1, 0xB1A2, 0xB1A3, 0xC022, 0xC023 }, + /* 0x09D */ { 0xB1A4, 0xB1A5, 0xB1A6, 0xB1A7, 0xB1A8, 0xB1A9, 0xB1AA, 0xB1AB, 0xC024, 0xC025, 0xB1AC, 0xB1AD, 0xB1AE, 0xB1AF, 0xB1B0, 0xB1B1 }, + /* 0x09E */ { 0xB1B2, 0xB1B3, 0xB1B4, 0xB1B5, 0xB1B6, 0xB1B7, 0xB1B8, 0xB1B9, 0xB1BA, 0xB1BB, 0xB1BC, 0xB1BD, 0xC026, 0xC027, 0xC028, 0xC029 }, + /* 0x09F */ { 0xC02A, 0xC02B, 0xC02C, 0xC02D, 0xB1BE, 0xB1BF, 0xB1C0, 0xB1C1, 0xB1C2, 0xB1C3, 0xB1C4, 0xB1C5, 0xC02E, 0xC02F, 0xB1C6, 0xB1C7 }, + /* 0x0A0 */ { 0xB1C8, 0xB1C9, 0xB1CA, 0xB1CB, 0xC030, 0xC031, 0xC032, 0xC033, 0xC034, 0xC035, 0xB1CC, 0xB1CD, 0xB1CE, 0xB1CF, 0xB1D0, 0xB1D1 }, + /* 0x0A1 */ { 0xB1D2, 0xB1D3, 0xB1D4, 0xB1D5, 0xB1D6, 0xB1D7, 0xB1D8, 0xB1D9, 0xC036, 0xC037, 0xC038, 0xC039, 0xB1DA, 0xB1DB, 0xB1DC, 0xB1DD }, + /* 0x0A2 */ { 0xB1DE, 0xB1DF, 0xB1E0, 0xB1E1, 0xB1E2, 0xB1E3, 0xB1E4, 0xB1E5, 0xB1E6, 0xB1E7, 0xB1E8, 0xB1E9, 0xB1EA, 0xB1EB, 0xB1EC, 0xB1ED }, + /* 0x0A3 */ { 0xB1EE, 0xB1EF, 0xB1F0, 0xB1F1, 0xB1F2, 0xB1F3, 0xB1F4, 0xB1F5, 0xB1F6, 0xB1F7, 0xB9F8, 0xB1F9, 0x0000, 0x0000, 0xB9FA, 0x0000 }, + /* 0x0A4 */ { 0xB1FB, 0xB1FC, 0xB1FD, 0xB1FE, 0xC03A, 0xC03B, 0xC03C, 0xC03D, 0xC03E, 0xC03F, 0xC040, 0xC041, 0xC042, 0xC043, 0xC044, 0xC045 }, + /* 0x0A5 */ { 0xC046, 0xC047, 0xC048, 0xC049, 0xC04A, 0xC04B, 0xC04C, 0xC04D, 0xB1FF, 0xB200, 0xB201, 0xB202, 0xB203, 0xB204, 0xC04E, 0xC04F }, + /* 0x0A6 */ { 0xC050, 0xC051, 0xC052, 0xC053, 0xC054, 0xC055, 0xC056, 0xC057, 0xB205, 0xB206, 0xB207, 0xB208, 0xB209, 0xB20A, 0xB20B, 0xB20C }, + /* 0x0A7 */ { 0xC058, 0xC059, 0xC05A, 0xC05B, 0xC05C, 0xC05D, 0xC05E, 0xC05F, 0xC060, 0xC061, 0xC062, 0xC063, 0xC064, 0xC065, 0xC066, 0xC067 }, + /* 0x0A8 */ { 0xC068, 0xC069, 0xC06A, 0xC06B, 0xB20D, 0xB20E, 0xB20F, 0xB210, 0xC06C, 0xC06D, 0xC06E, 0xC06F, 0xC070, 0xC071, 0xC072, 0xC073 }, + /* 0x0A9 */ { 0xC074, 0xC075, 0xB211, 0xB212, 0xB213, 0xB214, 0xB215, 0xB216, 0xB217, 0xB218, 0xE8A3, 0x0000, 0xE8A4, 0x0000, 0xE8A5, 0x0000 }, + /* 0x0AA */ { 0xB219, 0xB21A, 0xC076, 0xC077, 0xC078, 0xC079, 0xC07A, 0xC07B, 0xB21B, 0xB21C, 0xC07C, 0xC07D, 0xC07E, 0xC07F, 0xC080, 0xC081 }, + /* 0x0AB */ { 0xB21D, 0xB21E, 0xC082, 0xC083, 0xC084, 0xC085, 0xFFFF, 0xFFFF, 0xB21F, 0xB220, 0xC086, 0xC087, 0xC088, 0xC089, 0xFFFF, 0xFFFF }, + /* 0x0AC */ { 0xB221, 0xB222, 0xC08A, 0xC08B, 0xC08C, 0xC08D, 0xC08E, 0xC08F, 0xB223, 0xB224, 0xC090, 0xC091, 0xC092, 0xC093, 0xC094, 0xC095 }, + /* 0x0AD */ { 0xB225, 0xB226, 0xC096, 0xC097, 0xC098, 0xC099, 0xC09A, 0xC09B, 0xB227, 0xB228, 0xC09C, 0xC09D, 0xC09E, 0xC09F, 0xC0A0, 0xC0A1 }, + /* 0x0AE */ { 0xB229, 0xB22A, 0xC0A2, 0xC0A3, 0xC0A4, 0xC0A5, 0xFFFF, 0xFFFF, 0xB22B, 0xB22C, 0xC0A6, 0xC0A7, 0xC0A8, 0xC0A9, 0xFFFF, 0xFFFF }, + /* 0x0AF */ { 0xB22D, 0xB22E, 0xC0AA, 0xC0AB, 0xC0AC, 0xC0AD, 0xC0AE, 0xC0AF, 0xFFFF, 0xB22F, 0xFFFF, 0xC0B0, 0xFFFF, 0xC0B1, 0xFFFF, 0xC0B2 }, + /* 0x0B0 */ { 0xB230, 0xB231, 0xC0B3, 0xC0B4, 0xC0B5, 0xC0B6, 0xC0B7, 0xC0B8, 0xB232, 0xB233, 0xC0B9, 0xC0BA, 0xC0BB, 0xC0BC, 0xC0BD, 0xC0BE }, + /* 0x0B1 */ { 0xB234, 0xB235, 0xB236, 0xB237, 0xB238, 0xB239, 0xB23A, 0xB23B, 0xB23C, 0xB23D, 0xB23E, 0xB23F, 0xB240, 0xB241, 0xFFFF, 0xFFFF }, + /* 0x0B2 */ { 0xC0BF, 0xC0C0, 0xD012, 0xD017, 0xD01C, 0xD021, 0xD026, 0xD02B, 0xC0C1, 0xC0C2, 0xD030, 0xD035, 0xD03A, 0xD03F, 0xD044, 0xD049 }, + /* 0x0B3 */ { 0xC0C3, 0xC0C4, 0xD04E, 0xD053, 0xD058, 0xD05D, 0xD062, 0xD067, 0xC0C5, 0xC0C6, 0xD06C, 0xD071, 0xD076, 0xD07B, 0xD080, 0xD085 }, + /* 0x0B4 */ { 0xC0C7, 0xC0C8, 0xD08A, 0xD08F, 0xD094, 0xD099, 0xD09E, 0xD0A3, 0xC0C9, 0xC0CA, 0xD0A8, 0xD0AD, 0xD0B2, 0xD0B7, 0xD0BC, 0xD0C1 }, + /* 0x0B5 */ { 0xB242, 0xB243, 0xC0CB, 0xB244, 0xC0CC, 0xFFFF, 0xB245, 0xC0CD, 0xB246, 0xB247, 0xB248, 0xB249, 0xB24A, 0x0000, 0x03B9, 0x0000 }, + /* 0x0B6 */ { 0x0000, 0xB24B, 0xC0CE, 0xB24C, 0xC0CF, 0xFFFF, 0xB24D, 0xC0D0, 0xB24E, 0xB24F, 0xB250, 0xB251, 0xB252, 0xB253, 0xB254, 0xB255 }, + /* 0x0B7 */ { 0xB256, 0xB257, 0xC0D1, 0xC0D2, 0xFFFF, 0xFFFF, 0xB258, 0xC0D3, 0xB259, 0xB25A, 0xB25B, 0xB25C, 0xFFFF, 0xB25D, 0xB25E, 0xB25F }, + /* 0x0B8 */ { 0xB260, 0xB261, 0xC0D4, 0xC0D5, 0xB262, 0xB263, 0xB264, 0xC0D6, 0xB265, 0xB266, 0xB267, 0xB268, 0xB269, 0xB26A, 0xB26B, 0x0060 }, + /* 0x0B9 */ { 0xFFFF, 0xFFFF, 0xC0D7, 0xB26C, 0xC0D8, 0xFFFF, 0xB26D, 0xC0D9, 0xB26E, 0xB26F, 0xB270, 0xB271, 0xB272, 0x00B4, 0x0000, 0xFFFF }, + /* 0x0BA */ { 0x2002, 0x2003, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0BB */ { 0xADE6, 0xADE6, 0xAD01, 0xAD01, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xAD01, 0xAD01, 0xAD01, 0xADE6, 0xADE6, 0x0000, 0x0000, 0x0000 }, + /* 0x0BC */ { 0x0000, 0xADE6, 0x0000, 0x0000, 0x0000, 0xAD01, 0xAD01, 0xADE6, 0xADDC, 0xADE6, 0xAD01, 0xAD01, 0xADDC, 0xADDC, 0xADDC, 0xADDC }, + /* 0x0BD */ { 0xADE6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x0BE */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x03A9, 0x0000, 0x0000, 0x0000, 0x004B, 0xB273, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0BF */ { 0x0000, 0x0000, 0xE8A6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0C0 */ { 0xE8A7, 0xE8A8, 0xE8A9, 0xE8AA, 0xE8AB, 0xE8AC, 0xE8AD, 0xE8AE, 0xE8AF, 0xE8B0, 0xE8B1, 0xE8B2, 0xE8B3, 0xE8B4, 0xE8B5, 0xE8B6 }, + /* 0x0C1 */ { 0x0000, 0x0000, 0x0000, 0xE8B7, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x0C2 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB274, 0xB275, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0C3 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB276, 0x0000 }, + /* 0x0C4 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB277, 0xB278, 0xB279 }, + /* 0x0C5 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xB27A, 0x0000, 0x0000, 0x0000, 0x0000, 0xB27B, 0x0000, 0x0000, 0xB27C, 0x0000, 0x0000, 0x0000 }, + /* 0x0C6 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xB27D, 0x0000, 0xB27E, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0C7 */ { 0x0000, 0xB27F, 0x0000, 0x0000, 0xB280, 0x0000, 0x0000, 0xB281, 0x0000, 0xB282, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0C8 */ { 0xB283, 0x0000, 0xB284, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB285, 0xB286, 0xB287 }, + /* 0x0C9 */ { 0xB288, 0xB289, 0x0000, 0x0000, 0xB28A, 0xB28B, 0x0000, 0x0000, 0xB28C, 0xB28D, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0CA */ { 0xB28E, 0xB28F, 0x0000, 0x0000, 0xB290, 0xB291, 0x0000, 0x0000, 0xB292, 0xB293, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0CB */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB294, 0xB295, 0xB296, 0xB297 }, + /* 0x0CC */ { 0xB298, 0xB299, 0xB29A, 0xB29B, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB29C, 0xB29D, 0xB29E, 0xB29F, 0x0000, 0x0000 }, + /* 0x0CD */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3008, 0x3009, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0CE */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xE8B8, 0xE8B9, 0xE8BA, 0xE8BB, 0xE8BC, 0xE8BD, 0xE8BE, 0xE8BF, 0xE8C0, 0xE8C1 }, + /* 0x0CF */ { 0xE8C2, 0xE8C3, 0xE8C4, 0xE8C5, 0xE8C6, 0xE8C7, 0xE8C8, 0xE8C9, 0xE8CA, 0xE8CB, 0xE8CC, 0xE8CD, 0xE8CE, 0xE8CF, 0xE8D0, 0xE8D1 }, + /* 0x0D0 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB2A0, 0x0000, 0x0000, 0x0000 }, + /* 0x0D1 */ { 0xE8D2, 0xE8D3, 0xE8D4, 0xE8D5, 0xE8D6, 0xE8D7, 0xE8D8, 0xE8D9, 0xE8DA, 0xE8DB, 0xE8DC, 0xE8DD, 0xE8DE, 0xE8DF, 0xE8E0, 0xE8E1 }, + /* 0x0D2 */ { 0xE8E2, 0xE8E3, 0xE8E4, 0xE8E5, 0xE8E6, 0xE8E7, 0xE8E8, 0xE8E9, 0xE8EA, 0xE8EB, 0xE8EC, 0xE8ED, 0xE8EE, 0xE8EF, 0xE8F0, 0xE8F1 }, + /* 0x0D3 */ { 0xE8F2, 0xE8F3, 0xE8F4, 0xE8F5, 0xE8F6, 0xE8F7, 0xE8F8, 0xE8F9, 0xE8FA, 0xE8FB, 0xE8FC, 0xE8FD, 0xE8FE, 0xE8FF, 0xE900, 0xFFFF }, + /* 0x0D4 */ { 0xE901, 0x0000, 0xE902, 0xE903, 0xE904, 0x0000, 0x0000, 0xE905, 0x0000, 0xE906, 0x0000, 0xE907, 0x0000, 0xE908, 0xE909, 0xE90A }, + /* 0x0D5 */ { 0xE90B, 0x0000, 0xE90C, 0x0000, 0x0000, 0xE90D, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xE90E, 0xE90F }, + /* 0x0D6 */ { 0xE910, 0x0000, 0xE911, 0x0000, 0xE912, 0x0000, 0xE913, 0x0000, 0xE914, 0x0000, 0xE915, 0x0000, 0xE916, 0x0000, 0xE917, 0x0000 }, + /* 0x0D7 */ { 0xE918, 0x0000, 0xE919, 0x0000, 0xE91A, 0x0000, 0xE91B, 0x0000, 0xE91C, 0x0000, 0xE91D, 0x0000, 0xE91E, 0x0000, 0xE91F, 0x0000 }, + /* 0x0D8 */ { 0xE920, 0x0000, 0xE921, 0x0000, 0xE922, 0x0000, 0xE923, 0x0000, 0xE924, 0x0000, 0xE925, 0x0000, 0xE926, 0x0000, 0xE927, 0x0000 }, + /* 0x0D9 */ { 0xE928, 0x0000, 0xE929, 0x0000, 0xE92A, 0x0000, 0xE92B, 0x0000, 0xE92C, 0x0000, 0xE92D, 0x0000, 0xE92E, 0x0000, 0xE92F, 0x0000 }, + /* 0x0DA */ { 0xE930, 0x0000, 0xE931, 0x0000, 0xE932, 0x0000, 0xE933, 0x0000, 0xE934, 0x0000, 0xE935, 0x0000, 0xE936, 0x0000, 0xE937, 0x0000 }, + /* 0x0DB */ { 0xE938, 0x0000, 0xE939, 0x0000, 0xE93A, 0x0000, 0xE93B, 0x0000, 0xE93C, 0x0000, 0xE93D, 0x0000, 0xE93E, 0x0000, 0xE93F, 0x0000 }, + /* 0x0DC */ { 0xE940, 0x0000, 0xE941, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xE942, 0x0000, 0xE943, 0x0000, 0xADE6 }, + /* 0x0DD */ { 0xADE6, 0xADE6, 0xE944, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0DE */ { 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAD09 }, + /* 0x0DF */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDA, 0xADE4, 0xADE8, 0xADDE, 0xADE0, 0xADE0 }, + /* 0x0E0 */ { 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB2A1, 0x0000, 0xB2A2, 0x0000 }, + /* 0x0E1 */ { 0xB2A3, 0x0000, 0xB2A4, 0x0000, 0xB2A5, 0x0000, 0xB2A6, 0x0000, 0xB2A7, 0x0000, 0xB2A8, 0x0000, 0xB2A9, 0x0000, 0xB2AA, 0x0000 }, + /* 0x0E2 */ { 0xB2AB, 0x0000, 0xB2AC, 0x0000, 0x0000, 0xB2AD, 0x0000, 0xB2AE, 0x0000, 0xB2AF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0E3 */ { 0xB2B0, 0xB2B1, 0x0000, 0xB2B2, 0xB2B3, 0x0000, 0xB2B4, 0xB2B5, 0x0000, 0xB2B6, 0xB2B7, 0x0000, 0xB2B8, 0xB2B9, 0x0000, 0x0000 }, + /* 0x0E4 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xB2BA, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xAD08, 0xAD08, 0x0000, 0x0000, 0x0000, 0xB2BB, 0x0000 }, + /* 0x0E5 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB2BC, 0x0000, 0xB2BD, 0x0000 }, + /* 0x0E6 */ { 0xB2BE, 0x0000, 0xB2BF, 0x0000, 0xB2C0, 0x0000, 0xB2C1, 0x0000, 0xB2C2, 0x0000, 0xB2C3, 0x0000, 0xB2C4, 0x0000, 0xB2C5, 0x0000 }, + /* 0x0E7 */ { 0xB2C6, 0x0000, 0xB2C7, 0x0000, 0x0000, 0xB2C8, 0x0000, 0xB2C9, 0x0000, 0xB2CA, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0E8 */ { 0xB2CB, 0xB2CC, 0x0000, 0xB2CD, 0xB2CE, 0x0000, 0xB2CF, 0xB2D0, 0x0000, 0xB2D1, 0xB2D2, 0x0000, 0xB2D3, 0xB2D4, 0x0000, 0x0000 }, + /* 0x0E9 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xB2D5, 0x0000, 0x0000, 0xB2D6, 0xB2D7, 0xB2D8, 0xB2D9, 0x0000, 0x0000, 0x0000, 0xB2DA, 0x0000 }, + /* 0x0EA */ { 0xE945, 0x0000, 0xE946, 0x0000, 0xE947, 0x0000, 0xE948, 0x0000, 0xE949, 0x0000, 0xE94A, 0x0000, 0xE94B, 0x0000, 0xE94C, 0x0000 }, + /* 0x0EB */ { 0xE94D, 0x0000, 0xE94E, 0x0000, 0xE94F, 0x0000, 0xE950, 0x0000, 0xE951, 0x0000, 0xE952, 0x0000, 0xE953, 0x0000, 0xE954, 0x0000 }, + /* 0x0EC */ { 0xE955, 0x0000, 0xE956, 0x0000, 0xE957, 0x0000, 0xE958, 0x0000, 0xE959, 0x0000, 0xE95A, 0x0000, 0xE95B, 0x0000, 0x0000, 0xADE6 }, + /* 0x0ED */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0x0000, 0x0000 }, + /* 0x0EE */ { 0xE95C, 0x0000, 0xE95D, 0x0000, 0xE95E, 0x0000, 0xE95F, 0x0000, 0xE960, 0x0000, 0xE961, 0x0000, 0xE962, 0x0000, 0xE963, 0x0000 }, + /* 0x0EF */ { 0xE964, 0x0000, 0xE965, 0x0000, 0xE966, 0x0000, 0xE967, 0x0000, 0xE968, 0x0000, 0xE969, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6 }, + /* 0x0F0 */ { 0xADE6, 0xADE6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x0F1 */ { 0x0000, 0x0000, 0xE96A, 0x0000, 0xE96B, 0x0000, 0xE96C, 0x0000, 0xE96D, 0x0000, 0xE96E, 0x0000, 0xE96F, 0x0000, 0xE970, 0x0000 }, + /* 0x0F2 */ { 0x0000, 0x0000, 0xE971, 0x0000, 0xE972, 0x0000, 0xE973, 0x0000, 0xE974, 0x0000, 0xE975, 0x0000, 0xE976, 0x0000, 0xE977, 0x0000 }, + /* 0x0F3 */ { 0xE978, 0x0000, 0xE979, 0x0000, 0xE97A, 0x0000, 0xE97B, 0x0000, 0xE97C, 0x0000, 0xE97D, 0x0000, 0xE97E, 0x0000, 0xE97F, 0x0000 }, + /* 0x0F4 */ { 0xE980, 0x0000, 0xE981, 0x0000, 0xE982, 0x0000, 0xE983, 0x0000, 0xE984, 0x0000, 0xE985, 0x0000, 0xE986, 0x0000, 0xE987, 0x0000 }, + /* 0x0F5 */ { 0xE988, 0x0000, 0xE989, 0x0000, 0xE98A, 0x0000, 0xE98B, 0x0000, 0xE98C, 0x0000, 0xE98D, 0x0000, 0xE98E, 0x0000, 0xE98F, 0x0000 }, + /* 0x0F6 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xE990, 0x0000, 0xE991, 0x0000, 0xE992, 0xE993, 0x0000 }, + /* 0x0F7 */ { 0xE994, 0x0000, 0xE995, 0x0000, 0xE996, 0x0000, 0xE997, 0x0000, 0x0000, 0x0000, 0x0000, 0xE998, 0x0000, 0xE999, 0x0000, 0x0000 }, + /* 0x0F8 */ { 0xE99A, 0x0000, 0xE99B, 0x0000, 0x0000, 0x0000, 0xE99C, 0x0000, 0xE99D, 0x0000, 0xE99E, 0x0000, 0xE99F, 0x0000, 0xE9A0, 0x0000 }, + /* 0x0F9 */ { 0xE9A1, 0x0000, 0xE9A2, 0x0000, 0xE9A3, 0x0000, 0xE9A4, 0x0000, 0xE9A5, 0x0000, 0xE9A6, 0xE9A7, 0xE9A8, 0xE9A9, 0xE9AA, 0x0000 }, + /* 0x0FA */ { 0xE9AB, 0xE9AC, 0xE9AD, 0xE9AE, 0xE9AF, 0x0000, 0xE9B0, 0x0000, 0xE9B1, 0x0000, 0xE9B2, 0x0000, 0xE9B3, 0x0000, 0xE9B4, 0x0000 }, + /* 0x0FB */ { 0xFFFF, 0xFFFF, 0xE9B5, 0x0000, 0xE9B6, 0xE9B7, 0xE9B8, 0xE9B9, 0x0000, 0xE9BA, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x0FC */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE9BB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0FD */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x0FE */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x0FF */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000 }, + /* 0x100 */ { 0xADE6, 0xADE6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x101 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDC, 0xADDC, 0xADDC, 0x0000, 0x0000 }, + /* 0x102 */ { 0x0000, 0x0000, 0x0000, 0xAD09, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000 }, + /* 0x103 */ { 0x0000, 0x0000, 0x0000, 0xAD07, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x104 */ { 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000 }, + /* 0x105 */ { 0xADE6, 0x0000, 0xADE6, 0xADE6, 0xADDC, 0x0000, 0x0000, 0xADE6, 0xADE6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6 }, + /* 0x106 */ { 0x0000, 0xADE6, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x107 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x108 */ { 0xE9BC, 0xE9BD, 0xE9BE, 0xE9BF, 0xE9C0, 0xE9C1, 0xE9C2, 0xE9C3, 0xE9C4, 0xE9C5, 0xE9C6, 0xE9C7, 0xE9C8, 0xE9C9, 0xE9CA, 0xE9CB }, + /* 0x109 */ { 0xE9CC, 0xE9CD, 0xE9CE, 0xE9CF, 0xE9D0, 0xE9D1, 0xE9D2, 0xE9D3, 0xE9D4, 0xE9D5, 0xE9D6, 0xE9D7, 0xE9D8, 0xE9D9, 0xE9DA, 0xE9DB }, + /* 0x10A */ { 0xE9DC, 0xE9DD, 0xE9DE, 0xE9DF, 0xE9E0, 0xE9E1, 0xE9E2, 0xE9E3, 0xE9E4, 0xE9E5, 0xE9E6, 0xE9E7, 0xE9E8, 0xE9E9, 0xE9EA, 0xE9EB }, + /* 0x10B */ { 0xE9EC, 0xE9ED, 0xE9EE, 0xE9EF, 0xE9F0, 0xE9F1, 0xE9F2, 0xE9F3, 0xE9F4, 0xE9F5, 0xE9F6, 0xE9F7, 0xE9F8, 0xE9F9, 0xE9FA, 0xE9FB }, + /* 0x10C */ { 0xE9FC, 0xE9FD, 0xE9FE, 0xE9FF, 0xEA00, 0xEA01, 0xEA02, 0xEA03, 0xEA04, 0xEA05, 0xEA06, 0xEA07, 0xEA08, 0xEA09, 0xEA0A, 0xEA0B }, + /* 0x10D */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xFFFF, 0xFFFF }, + /* 0x10E */ { 0xAC00, 0xAC00, 0xAC00, 0xAC00, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x10F */ { 0x8C48, 0x66F4, 0x8ECA, 0x8CC8, 0x6ED1, 0x4E32, 0x53E5, 0x9F9C, 0x9F9C, 0x5951, 0x91D1, 0x5587, 0x5948, 0x61F6, 0x7669, 0x7F85 }, + /* 0x110 */ { 0x863F, 0x87BA, 0x88F8, 0x908F, 0x6A02, 0x6D1B, 0x70D9, 0x73DE, 0x843D, 0x916A, 0x99F1, 0x4E82, 0x5375, 0x6B04, 0x721B, 0x862D }, + /* 0x111 */ { 0x9E1E, 0x5D50, 0x6FEB, 0x85CD, 0x8964, 0x62C9, 0x81D8, 0x881F, 0x5ECA, 0x6717, 0x6D6A, 0x72FC, 0x90CE, 0x4F86, 0x51B7, 0x52DE }, + /* 0x112 */ { 0x64C4, 0x6AD3, 0x7210, 0x76E7, 0x8001, 0x8606, 0x865C, 0x8DEF, 0x9732, 0x9B6F, 0x9DFA, 0x788C, 0x797F, 0x7DA0, 0x83C9, 0x9304 }, + /* 0x113 */ { 0x9E7F, 0x8AD6, 0x58DF, 0x5F04, 0x7C60, 0x807E, 0x7262, 0x78CA, 0x8CC2, 0x96F7, 0x58D8, 0x5C62, 0x6A13, 0x6DDA, 0x6F0F, 0x7D2F }, + /* 0x114 */ { 0x7E37, 0x964B, 0x52D2, 0x808B, 0x51DC, 0x51CC, 0x7A1C, 0x7DBE, 0x83F1, 0x9675, 0x8B80, 0x62CF, 0x6A02, 0x8AFE, 0x4E39, 0x5BE7 }, + /* 0x115 */ { 0x6012, 0x7387, 0x7570, 0x5317, 0x78FB, 0x4FBF, 0x5FA9, 0x4E0D, 0x6CCC, 0x6578, 0x7D22, 0x53C3, 0x585E, 0x7701, 0x8449, 0x8AAA }, + /* 0x116 */ { 0x6BBA, 0x8FB0, 0x6C88, 0x62FE, 0x82E5, 0x63A0, 0x7565, 0x4EAE, 0x5169, 0x51C9, 0x6881, 0x7CE7, 0x826F, 0x8AD2, 0x91CF, 0x52F5 }, + /* 0x117 */ { 0x5442, 0x5973, 0x5EEC, 0x65C5, 0x6FFE, 0x792A, 0x95AD, 0x9A6A, 0x9E97, 0x9ECE, 0x529B, 0x66C6, 0x6B77, 0x8F62, 0x5E74, 0x6190 }, + /* 0x118 */ { 0x6200, 0x649A, 0x6F23, 0x7149, 0x7489, 0x79CA, 0x7DF4, 0x806F, 0x8F26, 0x84EE, 0x9023, 0x934A, 0x5217, 0x52A3, 0x54BD, 0x70C8 }, + /* 0x119 */ { 0x88C2, 0x8AAA, 0x5EC9, 0x5FF5, 0x637B, 0x6BAE, 0x7C3E, 0x7375, 0x4EE4, 0x56F9, 0x5BE7, 0x5DBA, 0x601C, 0x73B2, 0x7469, 0x7F9A }, + /* 0x11A */ { 0x8046, 0x9234, 0x96F6, 0x9748, 0x9818, 0x4F8B, 0x79AE, 0x91B4, 0x96B8, 0x60E1, 0x4E86, 0x50DA, 0x5BEE, 0x5C3F, 0x6599, 0x6A02 }, + /* 0x11B */ { 0x71CE, 0x7642, 0x84FC, 0x907C, 0x9F8D, 0x6688, 0x962E, 0x5289, 0x677B, 0x67F3, 0x6D41, 0x6E9C, 0x7409, 0x7559, 0x786B, 0x7D10 }, + /* 0x11C */ { 0x985E, 0x516D, 0x622E, 0x9678, 0x502B, 0x5D19, 0x6DEA, 0x8F2A, 0x5F8B, 0x6144, 0x6817, 0x7387, 0x9686, 0x5229, 0x540F, 0x5C65 }, + /* 0x11D */ { 0x6613, 0x674E, 0x68A8, 0x6CE5, 0x7406, 0x75E2, 0x7F79, 0x88CF, 0x88E1, 0x91CC, 0x96E2, 0x533F, 0x6EBA, 0x541D, 0x71D0, 0x7498 }, + /* 0x11E */ { 0x85FA, 0x96A3, 0x9C57, 0x9E9F, 0x6797, 0x6DCB, 0x81E8, 0x7ACB, 0x7B20, 0x7C92, 0x72C0, 0x7099, 0x8B58, 0x4EC0, 0x8336, 0x523A }, + /* 0x11F */ { 0x5207, 0x5EA6, 0x62D3, 0x7CD6, 0x5B85, 0x6D1E, 0x66B4, 0x8F3B, 0x884C, 0x964D, 0x898B, 0x5ED3, 0x5140, 0x55C0, 0x0000, 0x0000 }, + /* 0x120 */ { 0x585A, 0x0000, 0x6674, 0x0000, 0x0000, 0x51DE, 0x732A, 0x76CA, 0x793C, 0x795E, 0x7965, 0x798F, 0x9756, 0x7CBE, 0x7FBD, 0x0000 }, + /* 0x121 */ { 0x8612, 0x0000, 0x8AF8, 0x0000, 0x0000, 0x9038, 0x90FD, 0x0000, 0x0000, 0x0000, 0x98EF, 0x98FC, 0x9928, 0x9DB4, 0x90DE, 0x96B7 }, + /* 0x122 */ { 0x4FAE, 0x50E7, 0x514D, 0x52C9, 0x52E4, 0x5351, 0x559D, 0x5606, 0x5668, 0x5840, 0x58A8, 0x5C64, 0x5C6E, 0x6094, 0x6168, 0x618E }, + /* 0x123 */ { 0x61F2, 0x654F, 0x65E2, 0x6691, 0x6885, 0x6D77, 0x6E1A, 0x6F22, 0x716E, 0x722B, 0x7422, 0x7891, 0x793E, 0x7949, 0x7948, 0x7950 }, + /* 0x124 */ { 0x7956, 0x795D, 0x798D, 0x798E, 0x7A40, 0x7A81, 0x7BC0, 0x7DF4, 0x7E09, 0x7E41, 0x7F72, 0x8005, 0x81ED, 0x8279, 0x8279, 0x8457 }, + /* 0x125 */ { 0x8910, 0x8996, 0x8B01, 0x8B39, 0x8CD3, 0x8D08, 0x8FB6, 0x9038, 0x96E3, 0x97FF, 0x983B, 0x6075, 0xE20C, 0x8218, 0xFFFF, 0xFFFF }, + /* 0x126 */ { 0x4E26, 0x51B5, 0x5168, 0x4F80, 0x5145, 0x5180, 0x52C7, 0x52FA, 0x559D, 0x5555, 0x5599, 0x55E2, 0x585A, 0x58B3, 0x5944, 0x5954 }, + /* 0x127 */ { 0x5A62, 0x5B28, 0x5ED2, 0x5ED9, 0x5F69, 0x5FAD, 0x60D8, 0x614E, 0x6108, 0x618E, 0x6160, 0x61F2, 0x6234, 0x63C4, 0x641C, 0x6452 }, + /* 0x128 */ { 0x6556, 0x6674, 0x6717, 0x671B, 0x6756, 0x6B79, 0x6BBA, 0x6D41, 0x6EDB, 0x6ECB, 0x6F22, 0x701E, 0x716E, 0x77A7, 0x7235, 0x72AF }, + /* 0x129 */ { 0x732A, 0x7471, 0x7506, 0x753B, 0x761D, 0x761F, 0x76CA, 0x76DB, 0x76F4, 0x774A, 0x7740, 0x78CC, 0x7AB1, 0x7BC0, 0x7C7B, 0x7D5B }, + /* 0x12A */ { 0x7DF4, 0x7F3E, 0x8005, 0x8352, 0x83EF, 0x8779, 0x8941, 0x8986, 0x8996, 0x8ABF, 0x8AF8, 0x8ACB, 0x8B01, 0x8AFE, 0x8AED, 0x8B39 }, + /* 0x12B */ { 0x8B8A, 0x8D08, 0x8F38, 0x9072, 0x9199, 0x9276, 0x967C, 0x96E3, 0x9756, 0x97DB, 0x97FF, 0x980B, 0x983B, 0x9B12, 0x9F9C, 0xE20D }, + /* 0x12C */ { 0xE20E, 0xE20F, 0x3B9D, 0x4018, 0x4039, 0xE210, 0xE211, 0xE212, 0x9F43, 0x9F8E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x12D */ { 0xBADB, 0xBADC, 0xBADD, 0xC8DA, 0xC8DB, 0xBADE, 0xBADF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x12E */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xBAE0, 0xBAE1, 0xBAE2, 0xBAE3, 0xBAE4, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xB2E5, 0xAD1A, 0xB2E6 }, + /* 0x12F */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB2E7, 0xB2E8, 0xC0DC, 0xC0DD, 0xB2E9, 0xB2EA }, + /* 0x130 */ { 0xB2EB, 0xB2EC, 0xB2ED, 0xB2EE, 0xB2EF, 0xB2F0, 0xB2F1, 0xFFFF, 0xB2F2, 0xB2F3, 0xB2F4, 0xB2F5, 0xB2F6, 0xFFFF, 0xB2F7, 0xFFFF }, + /* 0x131 */ { 0xB2F8, 0xB2F9, 0xFFFF, 0xB2FA, 0xB2FB, 0xFFFF, 0xB2FC, 0xB2FD, 0xB2FE, 0xB2FF, 0xB300, 0xB301, 0xB302, 0xB303, 0xB304, 0x0000 }, + /* 0x132 */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADE6, 0xADE6 }, + /* 0x133 */ { 0x0000, 0xEA13, 0xEA14, 0xEA15, 0xEA16, 0xEA17, 0xEA18, 0xEA19, 0xEA1A, 0xEA1B, 0xEA1C, 0xEA1D, 0xEA1E, 0xEA1F, 0xEA20, 0xEA21 }, + /* 0x134 */ { 0xEA22, 0xEA23, 0xEA24, 0xEA25, 0xEA26, 0xEA27, 0xEA28, 0xEA29, 0xEA2A, 0xEA2B, 0xEA2C, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x135 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDC, 0xFFFF, 0xFFFF }, + /* 0x136 */ { 0xADDC, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x137 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x138 */ { 0xEA2D, 0xEA2E, 0xEA2F, 0xEA30, 0xEA31, 0xEA32, 0xEA33, 0xEA34, 0xEA35, 0xEA36, 0xEA37, 0xEA38, 0xEA39, 0xEA3A, 0xEA3B, 0xEA3C }, + /* 0x139 */ { 0xEA3D, 0xEA3E, 0xEA3F, 0xEA40, 0xEA41, 0xEA42, 0xEA43, 0xEA44, 0xEA45, 0xEA46, 0xEA47, 0xEA48, 0xEA49, 0xEA4A, 0xEA4B, 0xEA4C }, + /* 0x13A */ { 0xEA4D, 0xEA4E, 0xEA4F, 0xEA50, 0xEA51, 0xEA52, 0xEA53, 0xEA54, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x13B */ { 0xEA55, 0xEA56, 0xEA57, 0xEA58, 0xEA59, 0xEA5A, 0xEA5B, 0xEA5C, 0xEA5D, 0xEA5E, 0xEA5F, 0xEA60, 0xEA61, 0xEA62, 0xEA63, 0xEA64 }, + /* 0x13C */ { 0xEA65, 0xEA66, 0xEA67, 0xEA68, 0xEA69, 0xEA6A, 0xEA6B, 0xEA6C, 0xEA6D, 0xEA6E, 0xEA6F, 0xEA70, 0xEA71, 0xEA72, 0xEA73, 0xEA74 }, + /* 0x13D */ { 0xEA75, 0xEA76, 0xEA77, 0xEA78, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x13E */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0xADDC, 0x0000, 0xADE6 }, + /* 0x13F */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xADE6, 0xAD01, 0xADDC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAD09 }, + /* 0x140 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADDC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x141 */ { 0xEA79, 0xEA7A, 0xEA7B, 0xEA7C, 0xEA7D, 0xEA7E, 0xEA7F, 0xEA80, 0xEA81, 0xEA82, 0xEA83, 0xEA84, 0xEA85, 0xEA86, 0xEA87, 0xEA88 }, + /* 0x142 */ { 0xEA89, 0xEA8A, 0xEA8B, 0xEA8C, 0xEA8D, 0xEA8E, 0xEA8F, 0xEA90, 0xEA91, 0xEA92, 0xEA93, 0xEA94, 0xEA95, 0xEA96, 0xEA97, 0xEA98 }, + /* 0x143 */ { 0xEA99, 0xEA9A, 0xEA9B, 0xEA9C, 0xEA9D, 0xEA9E, 0xEA9F, 0xEAA0, 0xEAA1, 0xEAA2, 0xEAA3, 0xEAA4, 0xEAA5, 0xEAA6, 0xEAA7, 0xEAA8 }, + /* 0x144 */ { 0xEAA9, 0xEAAA, 0xEAAB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x145 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x146 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xADE6, 0xADE6, 0x0000, 0xFFFF, 0xFFFF }, + /* 0x147 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDC, 0xADDC, 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADE6, 0xADDC, 0xADDC, 0xADDC }, + /* 0x148 */ { 0xADDC, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x149 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF }, + /* 0x14A */ { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAD09 }, + /* 0x14B */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xF000, 0x0000, 0xF003, 0x0000, 0x0000, 0x0000 }, + /* 0x14C */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xF006, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x14D */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xAD07, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x14E */ { 0xADE6, 0xADE6, 0xADE6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x14F */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xF009, 0xF00C }, + /* 0x150 */ { 0x0000, 0x0000, 0x0000, 0xAD09, 0xAD09, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x151 */ { 0x0000, 0x0000, 0x0000, 0xAD07, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x152 */ { 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD07, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x153 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xAD07, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF }, + /* 0x154 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD07, 0xAD09, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x155 */ { 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xAD07, 0xAD07, 0x0000, 0x0000, 0x0000 }, + /* 0x156 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xF00F, 0xF012, 0xAD09, 0xFFFF, 0xFFFF }, + /* 0x157 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x158 */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x159 */ { 0x0000, 0x0000, 0xAD09, 0x0000, 0x0000, 0x0000, 0xAD07, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x15A */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0xADE6, 0x0000 }, + /* 0x15B */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xF015, 0xF018, 0x0000, 0xF01B, 0x0000 }, + /* 0x15C */ { 0x0000, 0x0000, 0xAD09, 0xAD07, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x15D */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xF01E, 0xF021, 0x0000, 0x0000, 0x0000, 0xAD09 }, + /* 0x15E */ { 0xAD07, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x15F */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09 }, + /* 0x160 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xAD07, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x161 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x162 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xAD07, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x163 */ { 0xEAAC, 0xEAAD, 0xEAAE, 0xEAAF, 0xEAB0, 0xEAB1, 0xEAB2, 0xEAB3, 0xEAB4, 0xEAB5, 0xEAB6, 0xEAB7, 0xEAB8, 0xEAB9, 0xEABA, 0xEABB }, + /* 0x164 */ { 0xEABC, 0xEABD, 0xEABE, 0xEABF, 0xEAC0, 0xEAC1, 0xEAC2, 0xEAC3, 0xEAC4, 0xEAC5, 0xEAC6, 0xEAC7, 0xEAC8, 0xEAC9, 0xEACA, 0xEACB }, + /* 0x165 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0xF024, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xAD09, 0xAD09, 0x0000 }, + /* 0x166 */ { 0x0000, 0x0000, 0x0000, 0xAD07, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x167 */ { 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x168 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x169 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x16A */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x16B */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09 }, + /* 0x16C */ { 0x0000, 0x0000, 0xAD07, 0x0000, 0xAD09, 0xAD09, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x16D */ { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD09, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x16E */ { 0xAD01, 0xAD01, 0xAD01, 0xAD01, 0xAD01, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x16F */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x170 */ { 0xEACC, 0xEACD, 0xEACE, 0xEACF, 0xEAD0, 0xEAD1, 0xEAD2, 0xEAD3, 0xEAD4, 0xEAD5, 0xEAD6, 0xEAD7, 0xEAD8, 0xEAD9, 0xEADA, 0xEADB }, + /* 0x171 */ { 0xEADC, 0xEADD, 0xEADE, 0xEADF, 0xEAE0, 0xEAE1, 0xEAE2, 0xEAE3, 0xEAE4, 0xEAE5, 0xEAE6, 0xEAE7, 0xEAE8, 0xEAE9, 0xEAEA, 0xEAEB }, + /* 0x172 */ { 0xAD06, 0xAD06, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x173 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xAD01, 0x0000 }, + /* 0x174 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xF027, 0xF02A }, + /* 0x175 */ { 0xF02D, 0xF031, 0xF035, 0xF039, 0xF03D, 0xADD8, 0xADD8, 0xAD01, 0xAD01, 0xAD01, 0x0000, 0x0000, 0x0000, 0xADE2, 0xADD8, 0xADD8 }, + /* 0x176 */ { 0xADD8, 0xADD8, 0xADD8, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC }, + /* 0x177 */ { 0xADDC, 0xADDC, 0xADDC, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADDC, 0xADDC, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x178 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0x0000, 0x0000 }, + /* 0x179 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xF041, 0xF044, 0xF047, 0xF04B, 0xF04F }, + /* 0x17A */ { 0xF053, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x17B */ { 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x17C */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xFFFF, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x17D */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xFFFF, 0xFFFF, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x17E */ { 0xADE6, 0xADE6, 0xFFFF, 0xADE6, 0xADE6, 0xFFFF, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x17F */ { 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF }, + /* 0x180 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6 }, + /* 0x181 */ { 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xADDC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x182 */ { 0xEAEC, 0xEAED, 0xEAEE, 0xEAEF, 0xEAF0, 0xEAF1, 0xEAF2, 0xEAF3, 0xEAF4, 0xEAF5, 0xEAF6, 0xEAF7, 0xEAF8, 0xEAF9, 0xEAFA, 0xEAFB }, + /* 0x183 */ { 0xEAFC, 0xEAFD, 0xEAFE, 0xEAFF, 0xEB00, 0xEB01, 0xEB02, 0xEB03, 0xEB04, 0xEB05, 0xEB06, 0xEB07, 0xEB08, 0xEB09, 0xEB0A, 0xEB0B }, + /* 0x184 */ { 0xEB0C, 0xEB0D, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }, + /* 0x185 */ { 0x0000, 0x0000, 0x0000, 0x0000, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xADE6, 0xAD07, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF }, + /* 0x186 */ { 0x4E3D, 0x4E38, 0x4E41, 0xE30E, 0x4F60, 0x4FAE, 0x4FBB, 0x5002, 0x507A, 0x5099, 0x50E7, 0x50CF, 0x349E, 0xE30F, 0x514D, 0x5154 }, + /* 0x187 */ { 0x5164, 0x5177, 0xE310, 0x34B9, 0x5167, 0x518D, 0xE311, 0x5197, 0x51A4, 0x4ECC, 0x51AC, 0x51B5, 0xE312, 0x51F5, 0x5203, 0x34DF }, + /* 0x188 */ { 0x523B, 0x5246, 0x5272, 0x5277, 0x3515, 0x52C7, 0x52C9, 0x52E4, 0x52FA, 0x5305, 0x5306, 0x5317, 0x5349, 0x5351, 0x535A, 0x5373 }, + /* 0x189 */ { 0x537D, 0x537F, 0x537F, 0x537F, 0xE313, 0x7070, 0x53CA, 0x53DF, 0xE314, 0x53EB, 0x53F1, 0x5406, 0x549E, 0x5438, 0x5448, 0x5468 }, + /* 0x18A */ { 0x54A2, 0x54F6, 0x5510, 0x5553, 0x5563, 0x5584, 0x5584, 0x5599, 0x55AB, 0x55B3, 0x55C2, 0x5716, 0x5606, 0x5717, 0x5651, 0x5674 }, + /* 0x18B */ { 0x5207, 0x58EE, 0x57CE, 0x57F4, 0x580D, 0x578B, 0x5832, 0x5831, 0x58AC, 0xE315, 0x58F2, 0x58F7, 0x5906, 0x591A, 0x5922, 0x5962 }, + /* 0x18C */ { 0xE316, 0xE317, 0x59EC, 0x5A1B, 0x5A27, 0x59D8, 0x5A66, 0x36EE, 0x36FC, 0x5B08, 0x5B3E, 0x5B3E, 0xE318, 0x5BC3, 0x5BD8, 0x5BE7 }, + /* 0x18D */ { 0x5BF3, 0xE319, 0x5BFF, 0x5C06, 0x5F53, 0x5C22, 0x3781, 0x5C60, 0x5C6E, 0x5CC0, 0x5C8D, 0xE31A, 0x5D43, 0xE31B, 0x5D6E, 0x5D6B }, + /* 0x18E */ { 0x5D7C, 0x5DE1, 0x5DE2, 0x382F, 0x5DFD, 0x5E28, 0x5E3D, 0x5E69, 0x3862, 0xE31C, 0x387C, 0x5EB0, 0x5EB3, 0x5EB6, 0x5ECA, 0xE31D }, + /* 0x18F */ { 0x5EFE, 0xE31E, 0xE31F, 0x8201, 0x5F22, 0x5F22, 0x38C7, 0xE320, 0xE321, 0x5F62, 0x5F6B, 0x38E3, 0x5F9A, 0x5FCD, 0x5FD7, 0x5FF9 }, + /* 0x190 */ { 0x6081, 0x393A, 0x391C, 0x6094, 0xE322, 0x60C7, 0x6148, 0x614C, 0x614E, 0x614C, 0x617A, 0x618E, 0x61B2, 0x61A4, 0x61AF, 0x61DE }, + /* 0x191 */ { 0x61F2, 0x61F6, 0x6210, 0x621B, 0x625D, 0x62B1, 0x62D4, 0x6350, 0xE323, 0x633D, 0x62FC, 0x6368, 0x6383, 0x63E4, 0xE324, 0x6422 }, + /* 0x192 */ { 0x63C5, 0x63A9, 0x3A2E, 0x6469, 0x647E, 0x649D, 0x6477, 0x3A6C, 0x654F, 0x656C, 0xE325, 0x65E3, 0x66F8, 0x6649, 0x3B19, 0x6691 }, + /* 0x193 */ { 0x3B08, 0x3AE4, 0x5192, 0x5195, 0x6700, 0x669C, 0x80AD, 0x43D9, 0x6717, 0x671B, 0x6721, 0x675E, 0x6753, 0xE326, 0x3B49, 0x67FA }, + /* 0x194 */ { 0x6785, 0x6852, 0x6885, 0xE327, 0x688E, 0x681F, 0x6914, 0x3B9D, 0x6942, 0x69A3, 0x69EA, 0x6AA8, 0xE328, 0x6ADB, 0x3C18, 0x6B21 }, + /* 0x195 */ { 0xE329, 0x6B54, 0x3C4E, 0x6B72, 0x6B9F, 0x6BBA, 0x6BBB, 0xE32A, 0xE32B, 0xE32C, 0x6C4E, 0xE32D, 0x6CBF, 0x6CCD, 0x6C67, 0x6D16 }, + /* 0x196 */ { 0x6D3E, 0x6D77, 0x6D41, 0x6D69, 0x6D78, 0x6D85, 0xE32E, 0x6D34, 0x6E2F, 0x6E6E, 0x3D33, 0x6ECB, 0x6EC7, 0xE32F, 0x6DF9, 0x6F6E }, + /* 0x197 */ { 0xE330, 0xE331, 0x6FC6, 0x7039, 0x701E, 0x701B, 0x3D96, 0x704A, 0x707D, 0x7077, 0x70AD, 0xE332, 0x7145, 0xE333, 0x719C, 0xE334 }, + /* 0x198 */ { 0x7228, 0x7235, 0x7250, 0xE335, 0x7280, 0x7295, 0xE336, 0xE337, 0x737A, 0x738B, 0x3EAC, 0x73A5, 0x3EB8, 0x3EB8, 0x7447, 0x745C }, + /* 0x199 */ { 0x7471, 0x7485, 0x74CA, 0x3F1B, 0x7524, 0xE338, 0x753E, 0xE339, 0x7570, 0xE33A, 0x7610, 0xE33B, 0xE33C, 0xE33D, 0x3FFC, 0x4008 }, + /* 0x19A */ { 0x76F4, 0xE33E, 0xE33F, 0xE340, 0xE341, 0x771E, 0x771F, 0x771F, 0x774A, 0x4039, 0x778B, 0x4046, 0x4096, 0xE342, 0x784E, 0x788C }, + /* 0x19B */ { 0x78CC, 0x40E3, 0xE343, 0x7956, 0xE344, 0xE345, 0x798F, 0x79EB, 0x412F, 0x7A40, 0x7A4A, 0x7A4F, 0xE346, 0xE347, 0xE348, 0x7AEE }, + /* 0x19C */ { 0x4202, 0xE349, 0x7BC6, 0x7BC9, 0x4227, 0xE34A, 0x7CD2, 0x42A0, 0x7CE8, 0x7CE3, 0x7D00, 0xE34B, 0x7D63, 0x4301, 0x7DC7, 0x7E02 }, + /* 0x19D */ { 0x7E45, 0x4334, 0xE34C, 0xE34D, 0x4359, 0xE34E, 0x7F7A, 0xE34F, 0x7F95, 0x7FFA, 0x8005, 0xE350, 0xE351, 0x8060, 0xE352, 0x8070 }, + /* 0x19E */ { 0xE353, 0x43D5, 0x80B2, 0x8103, 0x440B, 0x813E, 0x5AB5, 0xE354, 0xE355, 0xE356, 0xE357, 0x8201, 0x8204, 0x8F9E, 0x446B, 0x8291 }, + /* 0x19F */ { 0x828B, 0x829D, 0x52B3, 0x82B1, 0x82B3, 0x82BD, 0x82E6, 0xE358, 0x82E5, 0x831D, 0x8363, 0x83AD, 0x8323, 0x83BD, 0x83E7, 0x8457 }, + /* 0x1A0 */ { 0x8353, 0x83CA, 0x83CC, 0x83DC, 0xE359, 0xE35A, 0xE35B, 0x452B, 0x84F1, 0x84F3, 0x8516, 0xE35C, 0x8564, 0xE35D, 0x455D, 0x4561 }, + /* 0x1A1 */ { 0xE35E, 0xE35F, 0x456B, 0x8650, 0x865C, 0x8667, 0x8669, 0x86A9, 0x8688, 0x870E, 0x86E2, 0x8779, 0x8728, 0x876B, 0x8786, 0x45D7 }, + /* 0x1A2 */ { 0x87E1, 0x8801, 0x45F9, 0x8860, 0x8863, 0xE360, 0x88D7, 0x88DE, 0x4635, 0x88FA, 0x34BB, 0xE361, 0xE362, 0x46BE, 0x46C7, 0x8AA0 }, + /* 0x1A3 */ { 0x8AED, 0x8B8A, 0x8C55, 0xE363, 0x8CAB, 0x8CC1, 0x8D1B, 0x8D77, 0xE364, 0xE365, 0x8DCB, 0x8DBC, 0x8DF0, 0xE366, 0x8ED4, 0x8F38 }, + /* 0x1A4 */ { 0xE367, 0xE368, 0x9094, 0x90F1, 0x9111, 0xE369, 0x911B, 0x9238, 0x92D7, 0x92D8, 0x927C, 0x93F9, 0x9415, 0xE36A, 0x958B, 0x4995 }, + /* 0x1A5 */ { 0x95B7, 0xE36B, 0x49E6, 0x96C3, 0x5DB2, 0x9723, 0xE36C, 0xE36D, 0x4A6E, 0x4A76, 0x97E0, 0xE36E, 0x4AB2, 0xE36F, 0x980B, 0x980B }, + /* 0x1A6 */ { 0x9829, 0xE370, 0x98E2, 0x4B33, 0x9929, 0x99A7, 0x99C2, 0x99FE, 0x4BCE, 0xE371, 0x9B12, 0x9C40, 0x9CFD, 0x4CCE, 0x4CED, 0x9D67 }, + /* 0x1A7 */ { 0xE372, 0x4CF8, 0xE373, 0xE374, 0xE375, 0x9EBB, 0x4D56, 0x9EF9, 0x9EFE, 0x9F05, 0x9F0F, 0x9F16, 0x9F3B, 0xE376, 0xFFFF, 0xFFFF } +}; +static uint16_t nfU16InvMasks[128] = { + /* 0x000 */ 0x7800, 0xFFE0, 0x4000, 0xFFFC, 0x8000, 0xF800, 0x0020, 0xFF00, 0x6010, 0x0006, 0x0200, 0x0030, 0x7811, 0x003F, 0xFF80, 0x4011, + /* 0x010 */ 0x0004, 0xFFFE, 0x01FC, 0x6011, 0x3813, 0x38E7, 0x3C00, 0xFF7E, 0x2000, 0x0002, 0x1C00, 0x007F, 0xBF9F, 0xFFF9, 0x000F, 0x0011, + /* 0x020 */ 0x0380, 0xD004, 0xFFE3, 0x0001, 0xF000, 0x0829, 0x0050, 0x0C00, 0xC200, 0xC280, 0x80C2, 0x00C2, 0x0080, 0xE000, 0xFC00, 0xFE00, + /* 0x030 */ 0xFFF0, 0xFFF2, 0xFFC0, 0x000E, 0xC000, 0x3800, 0x000C, 0x0040, 0xDF40, 0x7F00, 0x8080, 0xFFF8, 0x0400, 0x001F, 0x07FF, 0x8181, + /* 0x040 */ 0xFF81, 0x0780, 0x0007, 0x0003, 0x0008, 0xF080, 0x6000, 0x0303, 0xE303, 0xC1FF, 0x1000, 0x4800, 0x0078, 0x0070, 0x1FF0, 0x00F0, + /* 0x050 */ 0x7FF0, 0x02C0, 0x6E40, 0x07C8, 0x7000, 0x7C00, 0x0F00, 0x0110, 0x01C0, 0x00C0, 0x00F8, 0xE1FC, 0x01FF, 0x03F8, 0xDFFC, 0x4280, + /* 0x060 */ 0x1F7E, 0x1800, 0x7FF8, 0x0D80, 0x0090, 0x0300, 0x0100, 0x0480, 0x4B80, 0x0240, 0x7FFC, 0x1F00, 0xFF0F, 0x0180, 0x219B, 0x1400, + /* 0x070 */ 0x0010, 0x1840, 0x2020, 0x8400, 0x03A0, 0x3000, 0x0060, 0x0169, 0xF508, 0x157B, 0x5569, 0x0869, 0xA108, 0x0411, 0xF8E0, 0xFFFD +}; +static uint16_t nfU16Seq2[773][2] = { + /* 0x000 */ {0x0041, 0x0300}, {0x0041, 0x0301}, {0x0041, 0x0302}, {0x0041, 0x0303}, {0x0041, 0x0308}, {0x0041, 0x030A}, + /* 0x006 */ {0x0043, 0x0327}, {0x0045, 0x0300}, {0x0045, 0x0301}, {0x0045, 0x0302}, {0x0045, 0x0308}, {0x0049, 0x0300}, + /* 0x00C */ {0x0049, 0x0301}, {0x0049, 0x0302}, {0x0049, 0x0308}, {0x004E, 0x0303}, {0x004F, 0x0300}, {0x004F, 0x0301}, + /* 0x012 */ {0x004F, 0x0302}, {0x004F, 0x0303}, {0x004F, 0x0308}, {0x0055, 0x0300}, {0x0055, 0x0301}, {0x0055, 0x0302}, + /* 0x018 */ {0x0055, 0x0308}, {0x0059, 0x0301}, {0x0073, 0x0073}, {0x0061, 0x0300}, {0x0061, 0x0301}, {0x0061, 0x0302}, + /* 0x01E */ {0x0061, 0x0303}, {0x0061, 0x0308}, {0x0061, 0x030A}, {0x0063, 0x0327}, {0x0065, 0x0300}, {0x0065, 0x0301}, + /* 0x024 */ {0x0065, 0x0302}, {0x0065, 0x0308}, {0x0069, 0x0300}, {0x0069, 0x0301}, {0x0069, 0x0302}, {0x0069, 0x0308}, + /* 0x02A */ {0x006E, 0x0303}, {0x006F, 0x0300}, {0x006F, 0x0301}, {0x006F, 0x0302}, {0x006F, 0x0303}, {0x006F, 0x0308}, + /* 0x030 */ {0x0075, 0x0300}, {0x0075, 0x0301}, {0x0075, 0x0302}, {0x0075, 0x0308}, {0x0079, 0x0301}, {0x0079, 0x0308}, + /* 0x036 */ {0x0041, 0x0304}, {0x0061, 0x0304}, {0x0041, 0x0306}, {0x0061, 0x0306}, {0x0041, 0x0328}, {0x0061, 0x0328}, + /* 0x03C */ {0x0043, 0x0301}, {0x0063, 0x0301}, {0x0043, 0x0302}, {0x0063, 0x0302}, {0x0043, 0x0307}, {0x0063, 0x0307}, + /* 0x042 */ {0x0043, 0x030C}, {0x0063, 0x030C}, {0x0044, 0x030C}, {0x0064, 0x030C}, {0x0045, 0x0304}, {0x0065, 0x0304}, + /* 0x048 */ {0x0045, 0x0306}, {0x0065, 0x0306}, {0x0045, 0x0307}, {0x0065, 0x0307}, {0x0045, 0x0328}, {0x0065, 0x0328}, + /* 0x04E */ {0x0045, 0x030C}, {0x0065, 0x030C}, {0x0047, 0x0302}, {0x0067, 0x0302}, {0x0047, 0x0306}, {0x0067, 0x0306}, + /* 0x054 */ {0x0047, 0x0307}, {0x0067, 0x0307}, {0x0047, 0x0327}, {0x0067, 0x0327}, {0x0048, 0x0302}, {0x0068, 0x0302}, + /* 0x05A */ {0x0049, 0x0303}, {0x0069, 0x0303}, {0x0049, 0x0304}, {0x0069, 0x0304}, {0x0049, 0x0306}, {0x0069, 0x0306}, + /* 0x060 */ {0x0049, 0x0328}, {0x0069, 0x0328}, {0x0049, 0x0307}, {0x004A, 0x0302}, {0x006A, 0x0302}, {0x004B, 0x0327}, + /* 0x066 */ {0x006B, 0x0327}, {0x004C, 0x0301}, {0x006C, 0x0301}, {0x004C, 0x0327}, {0x006C, 0x0327}, {0x004C, 0x030C}, + /* 0x06C */ {0x006C, 0x030C}, {0x004E, 0x0301}, {0x006E, 0x0301}, {0x004E, 0x0327}, {0x006E, 0x0327}, {0x004E, 0x030C}, + /* 0x072 */ {0x006E, 0x030C}, {0x02BC, 0x006E}, {0x004F, 0x0304}, {0x006F, 0x0304}, {0x004F, 0x0306}, {0x006F, 0x0306}, + /* 0x078 */ {0x004F, 0x030B}, {0x006F, 0x030B}, {0x0052, 0x0301}, {0x0072, 0x0301}, {0x0052, 0x0327}, {0x0072, 0x0327}, + /* 0x07E */ {0x0052, 0x030C}, {0x0072, 0x030C}, {0x0053, 0x0301}, {0x0073, 0x0301}, {0x0053, 0x0302}, {0x0073, 0x0302}, + /* 0x084 */ {0x0053, 0x0327}, {0x0073, 0x0327}, {0x0053, 0x030C}, {0x0073, 0x030C}, {0x0054, 0x0327}, {0x0074, 0x0327}, + /* 0x08A */ {0x0054, 0x030C}, {0x0074, 0x030C}, {0x0055, 0x0303}, {0x0075, 0x0303}, {0x0055, 0x0304}, {0x0075, 0x0304}, + /* 0x090 */ {0x0055, 0x0306}, {0x0075, 0x0306}, {0x0055, 0x030A}, {0x0075, 0x030A}, {0x0055, 0x030B}, {0x0075, 0x030B}, + /* 0x096 */ {0x0055, 0x0328}, {0x0075, 0x0328}, {0x0057, 0x0302}, {0x0077, 0x0302}, {0x0059, 0x0302}, {0x0079, 0x0302}, + /* 0x09C */ {0x0059, 0x0308}, {0x005A, 0x0301}, {0x007A, 0x0301}, {0x005A, 0x0307}, {0x007A, 0x0307}, {0x005A, 0x030C}, + /* 0x0A2 */ {0x007A, 0x030C}, {0x004F, 0x031B}, {0x006F, 0x031B}, {0x0055, 0x031B}, {0x0075, 0x031B}, {0x0041, 0x030C}, + /* 0x0A8 */ {0x0061, 0x030C}, {0x0049, 0x030C}, {0x0069, 0x030C}, {0x004F, 0x030C}, {0x006F, 0x030C}, {0x0055, 0x030C}, + /* 0x0AE */ {0x0075, 0x030C}, {0x00C6, 0x0304}, {0x00E6, 0x0304}, {0x0047, 0x030C}, {0x0067, 0x030C}, {0x004B, 0x030C}, + /* 0x0B4 */ {0x006B, 0x030C}, {0x004F, 0x0328}, {0x006F, 0x0328}, {0x01B7, 0x030C}, {0x0292, 0x030C}, {0x006A, 0x030C}, + /* 0x0BA */ {0x0047, 0x0301}, {0x0067, 0x0301}, {0x004E, 0x0300}, {0x006E, 0x0300}, {0x00C6, 0x0301}, {0x00E6, 0x0301}, + /* 0x0C0 */ {0x00D8, 0x0301}, {0x00F8, 0x0301}, {0x0041, 0x030F}, {0x0061, 0x030F}, {0x0041, 0x0311}, {0x0061, 0x0311}, + /* 0x0C6 */ {0x0045, 0x030F}, {0x0065, 0x030F}, {0x0045, 0x0311}, {0x0065, 0x0311}, {0x0049, 0x030F}, {0x0069, 0x030F}, + /* 0x0CC */ {0x0049, 0x0311}, {0x0069, 0x0311}, {0x004F, 0x030F}, {0x006F, 0x030F}, {0x004F, 0x0311}, {0x006F, 0x0311}, + /* 0x0D2 */ {0x0052, 0x030F}, {0x0072, 0x030F}, {0x0052, 0x0311}, {0x0072, 0x0311}, {0x0055, 0x030F}, {0x0075, 0x030F}, + /* 0x0D8 */ {0x0055, 0x0311}, {0x0075, 0x0311}, {0x0053, 0x0326}, {0x0073, 0x0326}, {0x0054, 0x0326}, {0x0074, 0x0326}, + /* 0x0DE */ {0x0048, 0x030C}, {0x0068, 0x030C}, {0x0041, 0x0307}, {0x0061, 0x0307}, {0x0045, 0x0327}, {0x0065, 0x0327}, + /* 0x0E4 */ {0x004F, 0x0307}, {0x006F, 0x0307}, {0x0059, 0x0304}, {0x0079, 0x0304}, {0x00A8, 0x0301}, {0x0391, 0x0301}, + /* 0x0EA */ {0x0395, 0x0301}, {0x0397, 0x0301}, {0x0399, 0x0301}, {0x039F, 0x0301}, {0x03A5, 0x0301}, {0x03A9, 0x0301}, + /* 0x0F0 */ {0x0399, 0x0308}, {0x03A5, 0x0308}, {0x03B1, 0x0301}, {0x03B5, 0x0301}, {0x03B7, 0x0301}, {0x03B9, 0x0301}, + /* 0x0F6 */ {0x03B9, 0x0308}, {0x03C5, 0x0308}, {0x03BF, 0x0301}, {0x03C5, 0x0301}, {0x03C9, 0x0301}, {0x03D2, 0x0301}, + /* 0x0FC */ {0x03D2, 0x0308}, {0x0415, 0x0300}, {0x0415, 0x0308}, {0x0413, 0x0301}, {0x0406, 0x0308}, {0x041A, 0x0301}, + /* 0x102 */ {0x0418, 0x0300}, {0x0423, 0x0306}, {0x0418, 0x0306}, {0x0438, 0x0306}, {0x0435, 0x0300}, {0x0435, 0x0308}, + /* 0x108 */ {0x0433, 0x0301}, {0x0456, 0x0308}, {0x043A, 0x0301}, {0x0438, 0x0300}, {0x0443, 0x0306}, {0x0474, 0x030F}, + /* 0x10E */ {0x0475, 0x030F}, {0x0416, 0x0306}, {0x0436, 0x0306}, {0x0410, 0x0306}, {0x0430, 0x0306}, {0x0410, 0x0308}, + /* 0x114 */ {0x0430, 0x0308}, {0x0415, 0x0306}, {0x0435, 0x0306}, {0x04D8, 0x0308}, {0x04D9, 0x0308}, {0x0416, 0x0308}, + /* 0x11A */ {0x0436, 0x0308}, {0x0417, 0x0308}, {0x0437, 0x0308}, {0x0418, 0x0304}, {0x0438, 0x0304}, {0x0418, 0x0308}, + /* 0x120 */ {0x0438, 0x0308}, {0x041E, 0x0308}, {0x043E, 0x0308}, {0x04E8, 0x0308}, {0x04E9, 0x0308}, {0x042D, 0x0308}, + /* 0x126 */ {0x044D, 0x0308}, {0x0423, 0x0304}, {0x0443, 0x0304}, {0x0423, 0x0308}, {0x0443, 0x0308}, {0x0423, 0x030B}, + /* 0x12C */ {0x0443, 0x030B}, {0x0427, 0x0308}, {0x0447, 0x0308}, {0x042B, 0x0308}, {0x044B, 0x0308}, {0x0565, 0x0582}, + /* 0x132 */ {0x0627, 0x0653}, {0x0627, 0x0654}, {0x0648, 0x0654}, {0x0627, 0x0655}, {0x064A, 0x0654}, {0x06D5, 0x0654}, + /* 0x138 */ {0x06C1, 0x0654}, {0x06D2, 0x0654}, {0x0928, 0x093C}, {0x0930, 0x093C}, {0x0933, 0x093C}, {0x0915, 0x093C}, + /* 0x13E */ {0x0916, 0x093C}, {0x0917, 0x093C}, {0x091C, 0x093C}, {0x0921, 0x093C}, {0x0922, 0x093C}, {0x092B, 0x093C}, + /* 0x144 */ {0x092F, 0x093C}, {0x09C7, 0x09BE}, {0x09C7, 0x09D7}, {0x09A1, 0x09BC}, {0x09A2, 0x09BC}, {0x09AF, 0x09BC}, + /* 0x14A */ {0x0A32, 0x0A3C}, {0x0A38, 0x0A3C}, {0x0A16, 0x0A3C}, {0x0A17, 0x0A3C}, {0x0A1C, 0x0A3C}, {0x0A2B, 0x0A3C}, + /* 0x150 */ {0x0B47, 0x0B56}, {0x0B47, 0x0B3E}, {0x0B47, 0x0B57}, {0x0B21, 0x0B3C}, {0x0B22, 0x0B3C}, {0x0B92, 0x0BD7}, + /* 0x156 */ {0x0BC6, 0x0BBE}, {0x0BC7, 0x0BBE}, {0x0BC6, 0x0BD7}, {0x0C46, 0x0C56}, {0x0CBF, 0x0CD5}, {0x0CC6, 0x0CD5}, + /* 0x15C */ {0x0CC6, 0x0CD6}, {0x0CC6, 0x0CC2}, {0x0D46, 0x0D3E}, {0x0D47, 0x0D3E}, {0x0D46, 0x0D57}, {0x0DD9, 0x0DCA}, + /* 0x162 */ {0x0DD9, 0x0DCF}, {0x0DD9, 0x0DDF}, {0x0F42, 0x0FB7}, {0x0F4C, 0x0FB7}, {0x0F51, 0x0FB7}, {0x0F56, 0x0FB7}, + /* 0x168 */ {0x0F5B, 0x0FB7}, {0x0F40, 0x0FB5}, {0x0FB2, 0x0F80}, {0x0FB3, 0x0F80}, {0x0F92, 0x0FB7}, {0x0F9C, 0x0FB7}, + /* 0x16E */ {0x0FA1, 0x0FB7}, {0x0FA6, 0x0FB7}, {0x0FAB, 0x0FB7}, {0x0F90, 0x0FB5}, {0x1025, 0x102E}, {0x1B05, 0x1B35}, + /* 0x174 */ {0x1B07, 0x1B35}, {0x1B09, 0x1B35}, {0x1B0B, 0x1B35}, {0x1B0D, 0x1B35}, {0x1B11, 0x1B35}, {0x1B3A, 0x1B35}, + /* 0x17A */ {0x1B3C, 0x1B35}, {0x1B3E, 0x1B35}, {0x1B3F, 0x1B35}, {0x1B42, 0x1B35}, {0x0041, 0x0325}, {0x0061, 0x0325}, + /* 0x180 */ {0x0042, 0x0307}, {0x0062, 0x0307}, {0x0042, 0x0323}, {0x0062, 0x0323}, {0x0042, 0x0331}, {0x0062, 0x0331}, + /* 0x186 */ {0x0044, 0x0307}, {0x0064, 0x0307}, {0x0044, 0x0323}, {0x0064, 0x0323}, {0x0044, 0x0331}, {0x0064, 0x0331}, + /* 0x18C */ {0x0044, 0x0327}, {0x0064, 0x0327}, {0x0044, 0x032D}, {0x0064, 0x032D}, {0x0045, 0x032D}, {0x0065, 0x032D}, + /* 0x192 */ {0x0045, 0x0330}, {0x0065, 0x0330}, {0x0046, 0x0307}, {0x0066, 0x0307}, {0x0047, 0x0304}, {0x0067, 0x0304}, + /* 0x198 */ {0x0048, 0x0307}, {0x0068, 0x0307}, {0x0048, 0x0323}, {0x0068, 0x0323}, {0x0048, 0x0308}, {0x0068, 0x0308}, + /* 0x19E */ {0x0048, 0x0327}, {0x0068, 0x0327}, {0x0048, 0x032E}, {0x0068, 0x032E}, {0x0049, 0x0330}, {0x0069, 0x0330}, + /* 0x1A4 */ {0x004B, 0x0301}, {0x006B, 0x0301}, {0x004B, 0x0323}, {0x006B, 0x0323}, {0x004B, 0x0331}, {0x006B, 0x0331}, + /* 0x1AA */ {0x004C, 0x0323}, {0x006C, 0x0323}, {0x004C, 0x0331}, {0x006C, 0x0331}, {0x004C, 0x032D}, {0x006C, 0x032D}, + /* 0x1B0 */ {0x004D, 0x0301}, {0x006D, 0x0301}, {0x004D, 0x0307}, {0x006D, 0x0307}, {0x004D, 0x0323}, {0x006D, 0x0323}, + /* 0x1B6 */ {0x004E, 0x0307}, {0x006E, 0x0307}, {0x004E, 0x0323}, {0x006E, 0x0323}, {0x004E, 0x0331}, {0x006E, 0x0331}, + /* 0x1BC */ {0x004E, 0x032D}, {0x006E, 0x032D}, {0x0050, 0x0301}, {0x0070, 0x0301}, {0x0050, 0x0307}, {0x0070, 0x0307}, + /* 0x1C2 */ {0x0052, 0x0307}, {0x0072, 0x0307}, {0x0052, 0x0323}, {0x0072, 0x0323}, {0x0052, 0x0331}, {0x0072, 0x0331}, + /* 0x1C8 */ {0x0053, 0x0307}, {0x0073, 0x0307}, {0x0053, 0x0323}, {0x0073, 0x0323}, {0x0054, 0x0307}, {0x0074, 0x0307}, + /* 0x1CE */ {0x0054, 0x0323}, {0x0074, 0x0323}, {0x0054, 0x0331}, {0x0074, 0x0331}, {0x0054, 0x032D}, {0x0074, 0x032D}, + /* 0x1D4 */ {0x0055, 0x0324}, {0x0075, 0x0324}, {0x0055, 0x0330}, {0x0075, 0x0330}, {0x0055, 0x032D}, {0x0075, 0x032D}, + /* 0x1DA */ {0x0056, 0x0303}, {0x0076, 0x0303}, {0x0056, 0x0323}, {0x0076, 0x0323}, {0x0057, 0x0300}, {0x0077, 0x0300}, + /* 0x1E0 */ {0x0057, 0x0301}, {0x0077, 0x0301}, {0x0057, 0x0308}, {0x0077, 0x0308}, {0x0057, 0x0307}, {0x0077, 0x0307}, + /* 0x1E6 */ {0x0057, 0x0323}, {0x0077, 0x0323}, {0x0058, 0x0307}, {0x0078, 0x0307}, {0x0058, 0x0308}, {0x0078, 0x0308}, + /* 0x1EC */ {0x0059, 0x0307}, {0x0079, 0x0307}, {0x005A, 0x0302}, {0x007A, 0x0302}, {0x005A, 0x0323}, {0x007A, 0x0323}, + /* 0x1F2 */ {0x005A, 0x0331}, {0x007A, 0x0331}, {0x0068, 0x0331}, {0x0074, 0x0308}, {0x0077, 0x030A}, {0x0079, 0x030A}, + /* 0x1F8 */ {0x0061, 0x02BE}, {0x017F, 0x0307}, {0x0073, 0x0073}, {0x0041, 0x0323}, {0x0061, 0x0323}, {0x0041, 0x0309}, + /* 0x1FE */ {0x0061, 0x0309}, {0x0045, 0x0323}, {0x0065, 0x0323}, {0x0045, 0x0309}, {0x0065, 0x0309}, {0x0045, 0x0303}, + /* 0x204 */ {0x0065, 0x0303}, {0x0049, 0x0309}, {0x0069, 0x0309}, {0x0049, 0x0323}, {0x0069, 0x0323}, {0x004F, 0x0323}, + /* 0x20A */ {0x006F, 0x0323}, {0x004F, 0x0309}, {0x006F, 0x0309}, {0x0055, 0x0323}, {0x0075, 0x0323}, {0x0055, 0x0309}, + /* 0x210 */ {0x0075, 0x0309}, {0x0059, 0x0300}, {0x0079, 0x0300}, {0x0059, 0x0323}, {0x0079, 0x0323}, {0x0059, 0x0309}, + /* 0x216 */ {0x0079, 0x0309}, {0x0059, 0x0303}, {0x0079, 0x0303}, {0x03B1, 0x0313}, {0x03B1, 0x0314}, {0x0391, 0x0313}, + /* 0x21C */ {0x0391, 0x0314}, {0x03B5, 0x0313}, {0x03B5, 0x0314}, {0x0395, 0x0313}, {0x0395, 0x0314}, {0x03B7, 0x0313}, + /* 0x222 */ {0x03B7, 0x0314}, {0x0397, 0x0313}, {0x0397, 0x0314}, {0x03B9, 0x0313}, {0x03B9, 0x0314}, {0x0399, 0x0313}, + /* 0x228 */ {0x0399, 0x0314}, {0x03BF, 0x0313}, {0x03BF, 0x0314}, {0x039F, 0x0313}, {0x039F, 0x0314}, {0x03C5, 0x0313}, + /* 0x22E */ {0x03C5, 0x0314}, {0x03A5, 0x0314}, {0x03C9, 0x0313}, {0x03C9, 0x0314}, {0x03A9, 0x0313}, {0x03A9, 0x0314}, + /* 0x234 */ {0x03B1, 0x0300}, {0x03B1, 0x0301}, {0x03B5, 0x0300}, {0x03B5, 0x0301}, {0x03B7, 0x0300}, {0x03B7, 0x0301}, + /* 0x23A */ {0x03B9, 0x0300}, {0x03B9, 0x0301}, {0x03BF, 0x0300}, {0x03BF, 0x0301}, {0x03C5, 0x0300}, {0x03C5, 0x0301}, + /* 0x240 */ {0x03C9, 0x0300}, {0x03C9, 0x0301}, {0x03B1, 0x0306}, {0x03B1, 0x0304}, {0x03B1, 0x0345}, {0x03B1, 0x0342}, + /* 0x246 */ {0x0391, 0x0306}, {0x0391, 0x0304}, {0x0391, 0x0300}, {0x0391, 0x0301}, {0x0391, 0x0345}, {0x00A8, 0x0342}, + /* 0x24C */ {0x03B7, 0x0345}, {0x03B7, 0x0342}, {0x0395, 0x0300}, {0x0395, 0x0301}, {0x0397, 0x0300}, {0x0397, 0x0301}, + /* 0x252 */ {0x0397, 0x0345}, {0x1FBF, 0x0300}, {0x1FBF, 0x0301}, {0x1FBF, 0x0342}, {0x03B9, 0x0306}, {0x03B9, 0x0304}, + /* 0x258 */ {0x03B9, 0x0342}, {0x0399, 0x0306}, {0x0399, 0x0304}, {0x0399, 0x0300}, {0x0399, 0x0301}, {0x1FFE, 0x0300}, + /* 0x25E */ {0x1FFE, 0x0301}, {0x1FFE, 0x0342}, {0x03C5, 0x0306}, {0x03C5, 0x0304}, {0x03C1, 0x0313}, {0x03C1, 0x0314}, + /* 0x264 */ {0x03C5, 0x0342}, {0x03A5, 0x0306}, {0x03A5, 0x0304}, {0x03A5, 0x0300}, {0x03A5, 0x0301}, {0x03A1, 0x0314}, + /* 0x26A */ {0x00A8, 0x0300}, {0x00A8, 0x0301}, {0x03C9, 0x0345}, {0x03C9, 0x0342}, {0x039F, 0x0300}, {0x039F, 0x0301}, + /* 0x270 */ {0x03A9, 0x0300}, {0x03A9, 0x0301}, {0x03A9, 0x0345}, {0x0041, 0x030A}, {0x2190, 0x0338}, {0x2192, 0x0338}, + /* 0x276 */ {0x2194, 0x0338}, {0x21D0, 0x0338}, {0x21D4, 0x0338}, {0x21D2, 0x0338}, {0x2203, 0x0338}, {0x2208, 0x0338}, + /* 0x27C */ {0x220B, 0x0338}, {0x2223, 0x0338}, {0x2225, 0x0338}, {0x223C, 0x0338}, {0x2243, 0x0338}, {0x2245, 0x0338}, + /* 0x282 */ {0x2248, 0x0338}, {0x003D, 0x0338}, {0x2261, 0x0338}, {0x224D, 0x0338}, {0x003C, 0x0338}, {0x003E, 0x0338}, + /* 0x288 */ {0x2264, 0x0338}, {0x2265, 0x0338}, {0x2272, 0x0338}, {0x2273, 0x0338}, {0x2276, 0x0338}, {0x2277, 0x0338}, + /* 0x28E */ {0x227A, 0x0338}, {0x227B, 0x0338}, {0x2282, 0x0338}, {0x2283, 0x0338}, {0x2286, 0x0338}, {0x2287, 0x0338}, + /* 0x294 */ {0x22A2, 0x0338}, {0x22A8, 0x0338}, {0x22A9, 0x0338}, {0x22AB, 0x0338}, {0x227C, 0x0338}, {0x227D, 0x0338}, + /* 0x29A */ {0x2291, 0x0338}, {0x2292, 0x0338}, {0x22B2, 0x0338}, {0x22B3, 0x0338}, {0x22B4, 0x0338}, {0x22B5, 0x0338}, + /* 0x2A0 */ {0x2ADD, 0x0338}, {0x304B, 0x3099}, {0x304D, 0x3099}, {0x304F, 0x3099}, {0x3051, 0x3099}, {0x3053, 0x3099}, + /* 0x2A6 */ {0x3055, 0x3099}, {0x3057, 0x3099}, {0x3059, 0x3099}, {0x305B, 0x3099}, {0x305D, 0x3099}, {0x305F, 0x3099}, + /* 0x2AC */ {0x3061, 0x3099}, {0x3064, 0x3099}, {0x3066, 0x3099}, {0x3068, 0x3099}, {0x306F, 0x3099}, {0x306F, 0x309A}, + /* 0x2B2 */ {0x3072, 0x3099}, {0x3072, 0x309A}, {0x3075, 0x3099}, {0x3075, 0x309A}, {0x3078, 0x3099}, {0x3078, 0x309A}, + /* 0x2B8 */ {0x307B, 0x3099}, {0x307B, 0x309A}, {0x3046, 0x3099}, {0x309D, 0x3099}, {0x30AB, 0x3099}, {0x30AD, 0x3099}, + /* 0x2BE */ {0x30AF, 0x3099}, {0x30B1, 0x3099}, {0x30B3, 0x3099}, {0x30B5, 0x3099}, {0x30B7, 0x3099}, {0x30B9, 0x3099}, + /* 0x2C4 */ {0x30BB, 0x3099}, {0x30BD, 0x3099}, {0x30BF, 0x3099}, {0x30C1, 0x3099}, {0x30C4, 0x3099}, {0x30C6, 0x3099}, + /* 0x2CA */ {0x30C8, 0x3099}, {0x30CF, 0x3099}, {0x30CF, 0x309A}, {0x30D2, 0x3099}, {0x30D2, 0x309A}, {0x30D5, 0x3099}, + /* 0x2D0 */ {0x30D5, 0x309A}, {0x30D8, 0x3099}, {0x30D8, 0x309A}, {0x30DB, 0x3099}, {0x30DB, 0x309A}, {0x30A6, 0x3099}, + /* 0x2D6 */ {0x30EF, 0x3099}, {0x30F0, 0x3099}, {0x30F1, 0x3099}, {0x30F2, 0x3099}, {0x30FD, 0x3099}, {0x0066, 0x0066}, + /* 0x2DC */ {0x0066, 0x0069}, {0x0066, 0x006C}, {0x0073, 0x0074}, {0x0073, 0x0074}, {0x0574, 0x0576}, {0x0574, 0x0565}, + /* 0x2E2 */ {0x0574, 0x056B}, {0x057E, 0x0576}, {0x0574, 0x056D}, {0x05D9, 0x05B4}, {0x05F2, 0x05B7}, {0x05E9, 0x05C1}, + /* 0x2E8 */ {0x05E9, 0x05C2}, {0x05D0, 0x05B7}, {0x05D0, 0x05B8}, {0x05D0, 0x05BC}, {0x05D1, 0x05BC}, {0x05D2, 0x05BC}, + /* 0x2EE */ {0x05D3, 0x05BC}, {0x05D4, 0x05BC}, {0x05D5, 0x05BC}, {0x05D6, 0x05BC}, {0x05D8, 0x05BC}, {0x05D9, 0x05BC}, + /* 0x2F4 */ {0x05DA, 0x05BC}, {0x05DB, 0x05BC}, {0x05DC, 0x05BC}, {0x05DE, 0x05BC}, {0x05E0, 0x05BC}, {0x05E1, 0x05BC}, + /* 0x2FA */ {0x05E3, 0x05BC}, {0x05E4, 0x05BC}, {0x05E6, 0x05BC}, {0x05E7, 0x05BC}, {0x05E8, 0x05BC}, {0x05E9, 0x05BC}, + /* 0x300 */ {0x05EA, 0x05BC}, {0x05D5, 0x05B9}, {0x05D1, 0x05BF}, {0x05DB, 0x05BF}, {0x05E4, 0x05BF} +}; +static uint16_t nfU16Seq3[222][3] = { + /* 0x000 */ {0x0055, 0x0308, 0x0304}, {0x0075, 0x0308, 0x0304}, {0x0055, 0x0308, 0x0301}, {0x0075, 0x0308, 0x0301}, + /* 0x004 */ {0x0055, 0x0308, 0x030C}, {0x0075, 0x0308, 0x030C}, {0x0055, 0x0308, 0x0300}, {0x0075, 0x0308, 0x0300}, + /* 0x008 */ {0x0041, 0x0308, 0x0304}, {0x0061, 0x0308, 0x0304}, {0x0041, 0x0307, 0x0304}, {0x0061, 0x0307, 0x0304}, + /* 0x00C */ {0x004F, 0x0328, 0x0304}, {0x006F, 0x0328, 0x0304}, {0x0041, 0x030A, 0x0301}, {0x0061, 0x030A, 0x0301}, + /* 0x010 */ {0x004F, 0x0308, 0x0304}, {0x006F, 0x0308, 0x0304}, {0x004F, 0x0303, 0x0304}, {0x006F, 0x0303, 0x0304}, + /* 0x014 */ {0x004F, 0x0307, 0x0304}, {0x006F, 0x0307, 0x0304}, {0x03B9, 0x0308, 0x0301}, {0x03C5, 0x0308, 0x0301}, + /* 0x018 */ {0x0CC6, 0x0CC2, 0x0CD5}, {0x0DD9, 0x0DCF, 0x0DCA}, {0x0043, 0x0327, 0x0301}, {0x0063, 0x0327, 0x0301}, + /* 0x01C */ {0x0045, 0x0304, 0x0300}, {0x0065, 0x0304, 0x0300}, {0x0045, 0x0304, 0x0301}, {0x0065, 0x0304, 0x0301}, + /* 0x020 */ {0x0045, 0x0327, 0x0306}, {0x0065, 0x0327, 0x0306}, {0x0049, 0x0308, 0x0301}, {0x0069, 0x0308, 0x0301}, + /* 0x024 */ {0x004C, 0x0323, 0x0304}, {0x006C, 0x0323, 0x0304}, {0x004F, 0x0303, 0x0301}, {0x006F, 0x0303, 0x0301}, + /* 0x028 */ {0x004F, 0x0303, 0x0308}, {0x006F, 0x0303, 0x0308}, {0x004F, 0x0304, 0x0300}, {0x006F, 0x0304, 0x0300}, + /* 0x02C */ {0x004F, 0x0304, 0x0301}, {0x006F, 0x0304, 0x0301}, {0x0052, 0x0323, 0x0304}, {0x0072, 0x0323, 0x0304}, + /* 0x030 */ {0x0053, 0x0301, 0x0307}, {0x0073, 0x0301, 0x0307}, {0x0053, 0x030C, 0x0307}, {0x0073, 0x030C, 0x0307}, + /* 0x034 */ {0x0053, 0x0323, 0x0307}, {0x0073, 0x0323, 0x0307}, {0x0055, 0x0303, 0x0301}, {0x0075, 0x0303, 0x0301}, + /* 0x038 */ {0x0055, 0x0304, 0x0308}, {0x0075, 0x0304, 0x0308}, {0x0041, 0x0302, 0x0301}, {0x0061, 0x0302, 0x0301}, + /* 0x03C */ {0x0041, 0x0302, 0x0300}, {0x0061, 0x0302, 0x0300}, {0x0041, 0x0302, 0x0309}, {0x0061, 0x0302, 0x0309}, + /* 0x040 */ {0x0041, 0x0302, 0x0303}, {0x0061, 0x0302, 0x0303}, {0x0041, 0x0323, 0x0302}, {0x0061, 0x0323, 0x0302}, + /* 0x044 */ {0x0041, 0x0306, 0x0301}, {0x0061, 0x0306, 0x0301}, {0x0041, 0x0306, 0x0300}, {0x0061, 0x0306, 0x0300}, + /* 0x048 */ {0x0041, 0x0306, 0x0309}, {0x0061, 0x0306, 0x0309}, {0x0041, 0x0306, 0x0303}, {0x0061, 0x0306, 0x0303}, + /* 0x04C */ {0x0041, 0x0323, 0x0306}, {0x0061, 0x0323, 0x0306}, {0x0045, 0x0302, 0x0301}, {0x0065, 0x0302, 0x0301}, + /* 0x050 */ {0x0045, 0x0302, 0x0300}, {0x0065, 0x0302, 0x0300}, {0x0045, 0x0302, 0x0309}, {0x0065, 0x0302, 0x0309}, + /* 0x054 */ {0x0045, 0x0302, 0x0303}, {0x0065, 0x0302, 0x0303}, {0x0045, 0x0323, 0x0302}, {0x0065, 0x0323, 0x0302}, + /* 0x058 */ {0x004F, 0x0302, 0x0301}, {0x006F, 0x0302, 0x0301}, {0x004F, 0x0302, 0x0300}, {0x006F, 0x0302, 0x0300}, + /* 0x05C */ {0x004F, 0x0302, 0x0309}, {0x006F, 0x0302, 0x0309}, {0x004F, 0x0302, 0x0303}, {0x006F, 0x0302, 0x0303}, + /* 0x060 */ {0x004F, 0x0323, 0x0302}, {0x006F, 0x0323, 0x0302}, {0x004F, 0x031B, 0x0301}, {0x006F, 0x031B, 0x0301}, + /* 0x064 */ {0x004F, 0x031B, 0x0300}, {0x006F, 0x031B, 0x0300}, {0x004F, 0x031B, 0x0309}, {0x006F, 0x031B, 0x0309}, + /* 0x068 */ {0x004F, 0x031B, 0x0303}, {0x006F, 0x031B, 0x0303}, {0x004F, 0x031B, 0x0323}, {0x006F, 0x031B, 0x0323}, + /* 0x06C */ {0x0055, 0x031B, 0x0301}, {0x0075, 0x031B, 0x0301}, {0x0055, 0x031B, 0x0300}, {0x0075, 0x031B, 0x0300}, + /* 0x070 */ {0x0055, 0x031B, 0x0309}, {0x0075, 0x031B, 0x0309}, {0x0055, 0x031B, 0x0303}, {0x0075, 0x031B, 0x0303}, + /* 0x074 */ {0x0055, 0x031B, 0x0323}, {0x0075, 0x031B, 0x0323}, {0x03B1, 0x0313, 0x0300}, {0x03B1, 0x0314, 0x0300}, + /* 0x078 */ {0x03B1, 0x0313, 0x0301}, {0x03B1, 0x0314, 0x0301}, {0x03B1, 0x0313, 0x0342}, {0x03B1, 0x0314, 0x0342}, + /* 0x07C */ {0x0391, 0x0313, 0x0300}, {0x0391, 0x0314, 0x0300}, {0x0391, 0x0313, 0x0301}, {0x0391, 0x0314, 0x0301}, + /* 0x080 */ {0x0391, 0x0313, 0x0342}, {0x0391, 0x0314, 0x0342}, {0x03B5, 0x0313, 0x0300}, {0x03B5, 0x0314, 0x0300}, + /* 0x084 */ {0x03B5, 0x0313, 0x0301}, {0x03B5, 0x0314, 0x0301}, {0x0395, 0x0313, 0x0300}, {0x0395, 0x0314, 0x0300}, + /* 0x088 */ {0x0395, 0x0313, 0x0301}, {0x0395, 0x0314, 0x0301}, {0x03B7, 0x0313, 0x0300}, {0x03B7, 0x0314, 0x0300}, + /* 0x08C */ {0x03B7, 0x0313, 0x0301}, {0x03B7, 0x0314, 0x0301}, {0x03B7, 0x0313, 0x0342}, {0x03B7, 0x0314, 0x0342}, + /* 0x090 */ {0x0397, 0x0313, 0x0300}, {0x0397, 0x0314, 0x0300}, {0x0397, 0x0313, 0x0301}, {0x0397, 0x0314, 0x0301}, + /* 0x094 */ {0x0397, 0x0313, 0x0342}, {0x0397, 0x0314, 0x0342}, {0x03B9, 0x0313, 0x0300}, {0x03B9, 0x0314, 0x0300}, + /* 0x098 */ {0x03B9, 0x0313, 0x0301}, {0x03B9, 0x0314, 0x0301}, {0x03B9, 0x0313, 0x0342}, {0x03B9, 0x0314, 0x0342}, + /* 0x09C */ {0x0399, 0x0313, 0x0300}, {0x0399, 0x0314, 0x0300}, {0x0399, 0x0313, 0x0301}, {0x0399, 0x0314, 0x0301}, + /* 0x0A0 */ {0x0399, 0x0313, 0x0342}, {0x0399, 0x0314, 0x0342}, {0x03BF, 0x0313, 0x0300}, {0x03BF, 0x0314, 0x0300}, + /* 0x0A4 */ {0x03BF, 0x0313, 0x0301}, {0x03BF, 0x0314, 0x0301}, {0x039F, 0x0313, 0x0300}, {0x039F, 0x0314, 0x0300}, + /* 0x0A8 */ {0x039F, 0x0313, 0x0301}, {0x039F, 0x0314, 0x0301}, {0x03C5, 0x0313, 0x0300}, {0x03C5, 0x0314, 0x0300}, + /* 0x0AC */ {0x03C5, 0x0313, 0x0301}, {0x03C5, 0x0314, 0x0301}, {0x03C5, 0x0313, 0x0342}, {0x03C5, 0x0314, 0x0342}, + /* 0x0B0 */ {0x03A5, 0x0314, 0x0300}, {0x03A5, 0x0314, 0x0301}, {0x03A5, 0x0314, 0x0342}, {0x03C9, 0x0313, 0x0300}, + /* 0x0B4 */ {0x03C9, 0x0314, 0x0300}, {0x03C9, 0x0313, 0x0301}, {0x03C9, 0x0314, 0x0301}, {0x03C9, 0x0313, 0x0342}, + /* 0x0B8 */ {0x03C9, 0x0314, 0x0342}, {0x03A9, 0x0313, 0x0300}, {0x03A9, 0x0314, 0x0300}, {0x03A9, 0x0313, 0x0301}, + /* 0x0BC */ {0x03A9, 0x0314, 0x0301}, {0x03A9, 0x0313, 0x0342}, {0x03A9, 0x0314, 0x0342}, {0x03B1, 0x0313, 0x0345}, + /* 0x0C0 */ {0x03B1, 0x0314, 0x0345}, {0x0391, 0x0313, 0x0345}, {0x0391, 0x0314, 0x0345}, {0x03B7, 0x0313, 0x0345}, + /* 0x0C4 */ {0x03B7, 0x0314, 0x0345}, {0x0397, 0x0313, 0x0345}, {0x0397, 0x0314, 0x0345}, {0x03C9, 0x0313, 0x0345}, + /* 0x0C8 */ {0x03C9, 0x0314, 0x0345}, {0x03A9, 0x0313, 0x0345}, {0x03A9, 0x0314, 0x0345}, {0x03B1, 0x0300, 0x0345}, + /* 0x0CC */ {0x03B1, 0x0301, 0x0345}, {0x03B1, 0x0342, 0x0345}, {0x03B7, 0x0300, 0x0345}, {0x03B7, 0x0301, 0x0345}, + /* 0x0D0 */ {0x03B7, 0x0342, 0x0345}, {0x03B9, 0x0308, 0x0300}, {0x03B9, 0x0308, 0x0301}, {0x03B9, 0x0308, 0x0342}, + /* 0x0D4 */ {0x03C5, 0x0308, 0x0300}, {0x03C5, 0x0308, 0x0301}, {0x03C5, 0x0308, 0x0342}, {0x03C9, 0x0300, 0x0345}, + /* 0x0D8 */ {0x03C9, 0x0301, 0x0345}, {0x03C9, 0x0342, 0x0345}, {0x0066, 0x0066, 0x0069}, {0x0066, 0x0066, 0x006C}, + /* 0x0DC */ {0x05E9, 0x05BC, 0x05C1}, {0x05E9, 0x05BC, 0x05C2} +}; +static uint16_t nfU16SeqMisc[198] = { + /* 0x000 */ 0x0E61, 0x0300, 0x0E61, 0x0301, 0x0E61, 0x0313, 0x0E62, 0x0308, 0x0301, 0x0812, 0x0F71, 0x0F72, 0x0812, 0x0F71, 0x0F74, 0x0812, + /* 0x010 */ 0x0F71, 0x0F80, 0x0004, 0x03B1, 0x0313, 0x0300, 0x0345, 0x0004, 0x03B1, 0x0314, 0x0300, 0x0345, 0x0004, 0x03B1, 0x0313, 0x0301, + /* 0x020 */ 0x0345, 0x0004, 0x03B1, 0x0314, 0x0301, 0x0345, 0x0004, 0x03B1, 0x0313, 0x0342, 0x0345, 0x0004, 0x03B1, 0x0314, 0x0342, 0x0345, + /* 0x030 */ 0x0004, 0x0391, 0x0313, 0x0300, 0x0345, 0x0004, 0x0391, 0x0314, 0x0300, 0x0345, 0x0004, 0x0391, 0x0313, 0x0301, 0x0345, 0x0004, + /* 0x040 */ 0x0391, 0x0314, 0x0301, 0x0345, 0x0004, 0x0391, 0x0313, 0x0342, 0x0345, 0x0004, 0x0391, 0x0314, 0x0342, 0x0345, 0x0004, 0x03B7, + /* 0x050 */ 0x0313, 0x0300, 0x0345, 0x0004, 0x03B7, 0x0314, 0x0300, 0x0345, 0x0004, 0x03B7, 0x0313, 0x0301, 0x0345, 0x0004, 0x03B7, 0x0314, + /* 0x060 */ 0x0301, 0x0345, 0x0004, 0x03B7, 0x0313, 0x0342, 0x0345, 0x0004, 0x03B7, 0x0314, 0x0342, 0x0345, 0x0004, 0x0397, 0x0313, 0x0300, + /* 0x070 */ 0x0345, 0x0004, 0x0397, 0x0314, 0x0300, 0x0345, 0x0004, 0x0397, 0x0313, 0x0301, 0x0345, 0x0004, 0x0397, 0x0314, 0x0301, 0x0345, + /* 0x080 */ 0x0004, 0x0397, 0x0313, 0x0342, 0x0345, 0x0004, 0x0397, 0x0314, 0x0342, 0x0345, 0x0004, 0x03C9, 0x0313, 0x0300, 0x0345, 0x0004, + /* 0x090 */ 0x03C9, 0x0314, 0x0300, 0x0345, 0x0004, 0x03C9, 0x0313, 0x0301, 0x0345, 0x0004, 0x03C9, 0x0314, 0x0301, 0x0345, 0x0004, 0x03C9, + /* 0x0A0 */ 0x0313, 0x0342, 0x0345, 0x0004, 0x03C9, 0x0314, 0x0342, 0x0345, 0x0004, 0x03A9, 0x0313, 0x0300, 0x0345, 0x0004, 0x03A9, 0x0314, + /* 0x0B0 */ 0x0300, 0x0345, 0x0004, 0x03A9, 0x0313, 0x0301, 0x0345, 0x0004, 0x03A9, 0x0314, 0x0301, 0x0345, 0x0004, 0x03A9, 0x0313, 0x0342, + /* 0x0C0 */ 0x0345, 0x0004, 0x03A9, 0x0314, 0x0342, 0x0345 +}; +static int32_t nfU32Char[887] = { + /* 0x000 */ 0x00501, 0x00503, 0x00505, 0x00507, 0x00509, 0x0050B, 0x0050D, 0x0050F, + /* 0x008 */ 0x00511, 0x00513, 0x00515, 0x00517, 0x00519, 0x0051B, 0x0051D, 0x0051F, + /* 0x010 */ 0x00521, 0x00523, 0x00525, 0x00527, 0x00529, 0x0052B, 0x0052D, 0x0052F, + /* 0x018 */ 0x00561, 0x00562, 0x00563, 0x00564, 0x00565, 0x00566, 0x00567, 0x00568, + /* 0x020 */ 0x00569, 0x0056A, 0x0056B, 0x0056C, 0x0056D, 0x0056E, 0x0056F, 0x00570, + /* 0x028 */ 0x00571, 0x00572, 0x00573, 0x00574, 0x00575, 0x00576, 0x00577, 0x00578, + /* 0x030 */ 0x00579, 0x0057A, 0x0057B, 0x0057C, 0x0057D, 0x0057E, 0x0057F, 0x00580, + /* 0x038 */ 0x00581, 0x00582, 0x00583, 0x00584, 0x00585, 0x00586, 0x02D00, 0x02D01, + /* 0x040 */ 0x02D02, 0x02D03, 0x02D04, 0x02D05, 0x02D06, 0x02D07, 0x02D08, 0x02D09, + /* 0x048 */ 0x02D0A, 0x02D0B, 0x02D0C, 0x02D0D, 0x02D0E, 0x02D0F, 0x02D10, 0x02D11, + /* 0x050 */ 0x02D12, 0x02D13, 0x02D14, 0x02D15, 0x02D16, 0x02D17, 0x02D18, 0x02D19, + /* 0x058 */ 0x02D1A, 0x02D1B, 0x02D1C, 0x02D1D, 0x02D1E, 0x02D1F, 0x02D20, 0x02D21, + /* 0x060 */ 0x02D22, 0x02D23, 0x02D24, 0x02D25, 0x02D27, 0x02D2D, 0x013F0, 0x013F1, + /* 0x068 */ 0x013F2, 0x013F3, 0x013F4, 0x013F5, 0x00432, 0x00434, 0x0043E, 0x00441, + /* 0x070 */ 0x00442, 0x00442, 0x0044A, 0x00463, 0x0A64B, 0x010D0, 0x010D1, 0x010D2, + /* 0x078 */ 0x010D3, 0x010D4, 0x010D5, 0x010D6, 0x010D7, 0x010D8, 0x010D9, 0x010DA, + /* 0x080 */ 0x010DB, 0x010DC, 0x010DD, 0x010DE, 0x010DF, 0x010E0, 0x010E1, 0x010E2, + /* 0x088 */ 0x010E3, 0x010E4, 0x010E5, 0x010E6, 0x010E7, 0x010E8, 0x010E9, 0x010EA, + /* 0x090 */ 0x010EB, 0x010EC, 0x010ED, 0x010EE, 0x010EF, 0x010F0, 0x010F1, 0x010F2, + /* 0x098 */ 0x010F3, 0x010F4, 0x010F5, 0x010F6, 0x010F7, 0x010F8, 0x010F9, 0x010FA, + /* 0x0A0 */ 0x010FD, 0x010FE, 0x010FF, 0x01EFB, 0x01EFD, 0x01EFF, 0x0214E, 0x02170, + /* 0x0A8 */ 0x02171, 0x02172, 0x02173, 0x02174, 0x02175, 0x02176, 0x02177, 0x02178, + /* 0x0B0 */ 0x02179, 0x0217A, 0x0217B, 0x0217C, 0x0217D, 0x0217E, 0x0217F, 0x02184, + /* 0x0B8 */ 0x024D0, 0x024D1, 0x024D2, 0x024D3, 0x024D4, 0x024D5, 0x024D6, 0x024D7, + /* 0x0C0 */ 0x024D8, 0x024D9, 0x024DA, 0x024DB, 0x024DC, 0x024DD, 0x024DE, 0x024DF, + /* 0x0C8 */ 0x024E0, 0x024E1, 0x024E2, 0x024E3, 0x024E4, 0x024E5, 0x024E6, 0x024E7, + /* 0x0D0 */ 0x024E8, 0x024E9, 0x02C30, 0x02C31, 0x02C32, 0x02C33, 0x02C34, 0x02C35, + /* 0x0D8 */ 0x02C36, 0x02C37, 0x02C38, 0x02C39, 0x02C3A, 0x02C3B, 0x02C3C, 0x02C3D, + /* 0x0E0 */ 0x02C3E, 0x02C3F, 0x02C40, 0x02C41, 0x02C42, 0x02C43, 0x02C44, 0x02C45, + /* 0x0E8 */ 0x02C46, 0x02C47, 0x02C48, 0x02C49, 0x02C4A, 0x02C4B, 0x02C4C, 0x02C4D, + /* 0x0F0 */ 0x02C4E, 0x02C4F, 0x02C50, 0x02C51, 0x02C52, 0x02C53, 0x02C54, 0x02C55, + /* 0x0F8 */ 0x02C56, 0x02C57, 0x02C58, 0x02C59, 0x02C5A, 0x02C5B, 0x02C5C, 0x02C5D, + /* 0x100 */ 0x02C5E, 0x02C61, 0x0026B, 0x01D7D, 0x0027D, 0x02C68, 0x02C6A, 0x02C6C, + /* 0x108 */ 0x00251, 0x00271, 0x00250, 0x00252, 0x02C73, 0x02C76, 0x0023F, 0x00240, + /* 0x110 */ 0x02C81, 0x02C83, 0x02C85, 0x02C87, 0x02C89, 0x02C8B, 0x02C8D, 0x02C8F, + /* 0x118 */ 0x02C91, 0x02C93, 0x02C95, 0x02C97, 0x02C99, 0x02C9B, 0x02C9D, 0x02C9F, + /* 0x120 */ 0x02CA1, 0x02CA3, 0x02CA5, 0x02CA7, 0x02CA9, 0x02CAB, 0x02CAD, 0x02CAF, + /* 0x128 */ 0x02CB1, 0x02CB3, 0x02CB5, 0x02CB7, 0x02CB9, 0x02CBB, 0x02CBD, 0x02CBF, + /* 0x130 */ 0x02CC1, 0x02CC3, 0x02CC5, 0x02CC7, 0x02CC9, 0x02CCB, 0x02CCD, 0x02CCF, + /* 0x138 */ 0x02CD1, 0x02CD3, 0x02CD5, 0x02CD7, 0x02CD9, 0x02CDB, 0x02CDD, 0x02CDF, + /* 0x140 */ 0x02CE1, 0x02CE3, 0x02CEC, 0x02CEE, 0x02CF3, 0x0A641, 0x0A643, 0x0A645, + /* 0x148 */ 0x0A647, 0x0A649, 0x0A64B, 0x0A64D, 0x0A64F, 0x0A651, 0x0A653, 0x0A655, + /* 0x150 */ 0x0A657, 0x0A659, 0x0A65B, 0x0A65D, 0x0A65F, 0x0A661, 0x0A663, 0x0A665, + /* 0x158 */ 0x0A667, 0x0A669, 0x0A66B, 0x0A66D, 0x0A681, 0x0A683, 0x0A685, 0x0A687, + /* 0x160 */ 0x0A689, 0x0A68B, 0x0A68D, 0x0A68F, 0x0A691, 0x0A693, 0x0A695, 0x0A697, + /* 0x168 */ 0x0A699, 0x0A69B, 0x0A723, 0x0A725, 0x0A727, 0x0A729, 0x0A72B, 0x0A72D, + /* 0x170 */ 0x0A72F, 0x0A733, 0x0A735, 0x0A737, 0x0A739, 0x0A73B, 0x0A73D, 0x0A73F, + /* 0x178 */ 0x0A741, 0x0A743, 0x0A745, 0x0A747, 0x0A749, 0x0A74B, 0x0A74D, 0x0A74F, + /* 0x180 */ 0x0A751, 0x0A753, 0x0A755, 0x0A757, 0x0A759, 0x0A75B, 0x0A75D, 0x0A75F, + /* 0x188 */ 0x0A761, 0x0A763, 0x0A765, 0x0A767, 0x0A769, 0x0A76B, 0x0A76D, 0x0A76F, + /* 0x190 */ 0x0A77A, 0x0A77C, 0x01D79, 0x0A77F, 0x0A781, 0x0A783, 0x0A785, 0x0A787, + /* 0x198 */ 0x0A78C, 0x00265, 0x0A791, 0x0A793, 0x0A797, 0x0A799, 0x0A79B, 0x0A79D, + /* 0x1A0 */ 0x0A79F, 0x0A7A1, 0x0A7A3, 0x0A7A5, 0x0A7A7, 0x0A7A9, 0x00266, 0x0025C, + /* 0x1A8 */ 0x00261, 0x0026C, 0x0026A, 0x0029E, 0x00287, 0x0029D, 0x0AB53, 0x0A7B5, + /* 0x1B0 */ 0x0A7B7, 0x0A7B9, 0x0A7BB, 0x0A7BD, 0x0A7BF, 0x0A7C3, 0x0A794, 0x00282, + /* 0x1B8 */ 0x01D8E, 0x0A7C8, 0x0A7CA, 0x0A7F6, 0x013A0, 0x013A1, 0x013A2, 0x013A3, + /* 0x1C0 */ 0x013A4, 0x013A5, 0x013A6, 0x013A7, 0x013A8, 0x013A9, 0x013AA, 0x013AB, + /* 0x1C8 */ 0x013AC, 0x013AD, 0x013AE, 0x013AF, 0x013B0, 0x013B1, 0x013B2, 0x013B3, + /* 0x1D0 */ 0x013B4, 0x013B5, 0x013B6, 0x013B7, 0x013B8, 0x013B9, 0x013BA, 0x013BB, + /* 0x1D8 */ 0x013BC, 0x013BD, 0x013BE, 0x013BF, 0x013C0, 0x013C1, 0x013C2, 0x013C3, + /* 0x1E0 */ 0x013C4, 0x013C5, 0x013C6, 0x013C7, 0x013C8, 0x013C9, 0x013CA, 0x013CB, + /* 0x1E8 */ 0x013CC, 0x013CD, 0x013CE, 0x013CF, 0x013D0, 0x013D1, 0x013D2, 0x013D3, + /* 0x1F0 */ 0x013D4, 0x013D5, 0x013D6, 0x013D7, 0x013D8, 0x013D9, 0x013DA, 0x013DB, + /* 0x1F8 */ 0x013DC, 0x013DD, 0x013DE, 0x013DF, 0x013E0, 0x013E1, 0x013E2, 0x013E3, + /* 0x200 */ 0x013E4, 0x013E5, 0x013E6, 0x013E7, 0x013E8, 0x013E9, 0x013EA, 0x013EB, + /* 0x208 */ 0x013EC, 0x013ED, 0x013EE, 0x013EF, 0x242EE, 0x2284A, 0x22844, 0x233D5, + /* 0x210 */ 0x25249, 0x25CD0, 0x27ED3, 0x0FF41, 0x0FF42, 0x0FF43, 0x0FF44, 0x0FF45, + /* 0x218 */ 0x0FF46, 0x0FF47, 0x0FF48, 0x0FF49, 0x0FF4A, 0x0FF4B, 0x0FF4C, 0x0FF4D, + /* 0x220 */ 0x0FF4E, 0x0FF4F, 0x0FF50, 0x0FF51, 0x0FF52, 0x0FF53, 0x0FF54, 0x0FF55, + /* 0x228 */ 0x0FF56, 0x0FF57, 0x0FF58, 0x0FF59, 0x0FF5A, 0x10428, 0x10429, 0x1042A, + /* 0x230 */ 0x1042B, 0x1042C, 0x1042D, 0x1042E, 0x1042F, 0x10430, 0x10431, 0x10432, + /* 0x238 */ 0x10433, 0x10434, 0x10435, 0x10436, 0x10437, 0x10438, 0x10439, 0x1043A, + /* 0x240 */ 0x1043B, 0x1043C, 0x1043D, 0x1043E, 0x1043F, 0x10440, 0x10441, 0x10442, + /* 0x248 */ 0x10443, 0x10444, 0x10445, 0x10446, 0x10447, 0x10448, 0x10449, 0x1044A, + /* 0x250 */ 0x1044B, 0x1044C, 0x1044D, 0x1044E, 0x1044F, 0x104D8, 0x104D9, 0x104DA, + /* 0x258 */ 0x104DB, 0x104DC, 0x104DD, 0x104DE, 0x104DF, 0x104E0, 0x104E1, 0x104E2, + /* 0x260 */ 0x104E3, 0x104E4, 0x104E5, 0x104E6, 0x104E7, 0x104E8, 0x104E9, 0x104EA, + /* 0x268 */ 0x104EB, 0x104EC, 0x104ED, 0x104EE, 0x104EF, 0x104F0, 0x104F1, 0x104F2, + /* 0x270 */ 0x104F3, 0x104F4, 0x104F5, 0x104F6, 0x104F7, 0x104F8, 0x104F9, 0x104FA, + /* 0x278 */ 0x104FB, 0x10CC0, 0x10CC1, 0x10CC2, 0x10CC3, 0x10CC4, 0x10CC5, 0x10CC6, + /* 0x280 */ 0x10CC7, 0x10CC8, 0x10CC9, 0x10CCA, 0x10CCB, 0x10CCC, 0x10CCD, 0x10CCE, + /* 0x288 */ 0x10CCF, 0x10CD0, 0x10CD1, 0x10CD2, 0x10CD3, 0x10CD4, 0x10CD5, 0x10CD6, + /* 0x290 */ 0x10CD7, 0x10CD8, 0x10CD9, 0x10CDA, 0x10CDB, 0x10CDC, 0x10CDD, 0x10CDE, + /* 0x298 */ 0x10CDF, 0x10CE0, 0x10CE1, 0x10CE2, 0x10CE3, 0x10CE4, 0x10CE5, 0x10CE6, + /* 0x2A0 */ 0x10CE7, 0x10CE8, 0x10CE9, 0x10CEA, 0x10CEB, 0x10CEC, 0x10CED, 0x10CEE, + /* 0x2A8 */ 0x10CEF, 0x10CF0, 0x10CF1, 0x10CF2, 0x118C0, 0x118C1, 0x118C2, 0x118C3, + /* 0x2B0 */ 0x118C4, 0x118C5, 0x118C6, 0x118C7, 0x118C8, 0x118C9, 0x118CA, 0x118CB, + /* 0x2B8 */ 0x118CC, 0x118CD, 0x118CE, 0x118CF, 0x118D0, 0x118D1, 0x118D2, 0x118D3, + /* 0x2C0 */ 0x118D4, 0x118D5, 0x118D6, 0x118D7, 0x118D8, 0x118D9, 0x118DA, 0x118DB, + /* 0x2C8 */ 0x118DC, 0x118DD, 0x118DE, 0x118DF, 0x16E60, 0x16E61, 0x16E62, 0x16E63, + /* 0x2D0 */ 0x16E64, 0x16E65, 0x16E66, 0x16E67, 0x16E68, 0x16E69, 0x16E6A, 0x16E6B, + /* 0x2D8 */ 0x16E6C, 0x16E6D, 0x16E6E, 0x16E6F, 0x16E70, 0x16E71, 0x16E72, 0x16E73, + /* 0x2E0 */ 0x16E74, 0x16E75, 0x16E76, 0x16E77, 0x16E78, 0x16E79, 0x16E7A, 0x16E7B, + /* 0x2E8 */ 0x16E7C, 0x16E7D, 0x16E7E, 0x16E7F, 0x1E922, 0x1E923, 0x1E924, 0x1E925, + /* 0x2F0 */ 0x1E926, 0x1E927, 0x1E928, 0x1E929, 0x1E92A, 0x1E92B, 0x1E92C, 0x1E92D, + /* 0x2F8 */ 0x1E92E, 0x1E92F, 0x1E930, 0x1E931, 0x1E932, 0x1E933, 0x1E934, 0x1E935, + /* 0x300 */ 0x1E936, 0x1E937, 0x1E938, 0x1E939, 0x1E93A, 0x1E93B, 0x1E93C, 0x1E93D, + /* 0x308 */ 0x1E93E, 0x1E93F, 0x1E940, 0x1E941, 0x1E942, 0x1E943, 0x20122, 0x2063A, + /* 0x310 */ 0x2051C, 0x2054B, 0x291DF, 0x20A2C, 0x20B63, 0x214E4, 0x216A8, 0x216EA, + /* 0x318 */ 0x219C8, 0x21B18, 0x21DE4, 0x21DE6, 0x22183, 0x2A392, 0x22331, 0x22331, + /* 0x320 */ 0x232B8, 0x261DA, 0x226D4, 0x22B0C, 0x22BF1, 0x2300A, 0x233C3, 0x2346D, + /* 0x328 */ 0x236A3, 0x238A7, 0x23A8D, 0x21D0B, 0x23AFA, 0x23CBC, 0x23D1E, 0x23ED1, + /* 0x330 */ 0x23F5E, 0x23F8E, 0x20525, 0x24263, 0x243AB, 0x24608, 0x24735, 0x24814, + /* 0x338 */ 0x24C36, 0x24C92, 0x2219F, 0x24FA1, 0x24FB8, 0x25044, 0x250F3, 0x250F2, + /* 0x340 */ 0x25119, 0x25133, 0x2541D, 0x25626, 0x2569A, 0x256C5, 0x2597C, 0x25AA7, + /* 0x348 */ 0x25AA7, 0x25BAB, 0x25C80, 0x25F86, 0x26228, 0x26247, 0x262D9, 0x2633E, + /* 0x350 */ 0x264DA, 0x26523, 0x265A8, 0x2335F, 0x267A7, 0x267B5, 0x23393, 0x2339C, + /* 0x358 */ 0x26B3C, 0x26C36, 0x26D6B, 0x26CD5, 0x273CA, 0x26F2C, 0x26FB1, 0x270D2, + /* 0x360 */ 0x27667, 0x278AE, 0x27966, 0x27CA8, 0x27F2F, 0x20804, 0x208DE, 0x285D2, + /* 0x368 */ 0x285ED, 0x2872E, 0x28BFA, 0x28D77, 0x29145, 0x2921A, 0x2940A, 0x29496, + /* 0x370 */ 0x295B6, 0x29B30, 0x2A0CE, 0x2A105, 0x2A20E, 0x2A291, 0x2A600 +}; +static int32_t nfU32SeqMisc[87] = { + /* 0x000 */ 0x00002, 0x11099, 0x110BA, 0x00002, 0x1109B, 0x110BA, 0x00002, 0x110A5, + /* 0x008 */ 0x110BA, 0x00002, 0x11131, 0x11127, 0x00002, 0x11132, 0x11127, 0x00002, + /* 0x010 */ 0x11347, 0x1133E, 0x00002, 0x11347, 0x11357, 0x00002, 0x114B9, 0x114BA, + /* 0x018 */ 0x00002, 0x114B9, 0x114B0, 0x00002, 0x114B9, 0x114BD, 0x00002, 0x115B8, + /* 0x020 */ 0x115AF, 0x00002, 0x115B9, 0x115AF, 0x00002, 0x11935, 0x11930, 0x00002, + /* 0x028 */ 0x1D157, 0x1D165, 0x00002, 0x1D158, 0x1D165, 0x00003, 0x1D158, 0x1D165, + /* 0x030 */ 0x1D16E, 0x00003, 0x1D158, 0x1D165, 0x1D16F, 0x00003, 0x1D158, 0x1D165, + /* 0x038 */ 0x1D170, 0x00003, 0x1D158, 0x1D165, 0x1D171, 0x00003, 0x1D158, 0x1D165, + /* 0x040 */ 0x1D172, 0x00002, 0x1D1B9, 0x1D165, 0x00002, 0x1D1BA, 0x1D165, 0x00003, + /* 0x048 */ 0x1D1B9, 0x1D165, 0x1D16E, 0x00003, 0x1D1BA, 0x1D165, 0x1D16E, 0x00003, + /* 0x050 */ 0x1D1B9, 0x1D165, 0x1D16F, 0x00003, 0x1D1BA, 0x1D165, 0x1D16F +}; +static uint16_t nfBasicCF[1280] = { + /* 0x000 */ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F, + /* 0x010 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F, + /* 0x020 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F, + /* 0x030 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F, + /* 0x040 */ 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F, + /* 0x050 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F, + /* 0x060 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F, + /* 0x070 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F, + /* 0x080 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F, + /* 0x090 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F, + /* 0x0A0 */ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7, 0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF, + /* 0x0B0 */ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x03BC, 0x00B6, 0x00B7, 0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF, + /* 0x0C0 */ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7, 0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF, + /* 0x0D0 */ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00D7, 0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00DF, + /* 0x0E0 */ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7, 0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF, + /* 0x0F0 */ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7, 0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF, + /* 0x100 */ 0x0101, 0x0101, 0x0103, 0x0103, 0x0105, 0x0105, 0x0107, 0x0107, 0x0109, 0x0109, 0x010B, 0x010B, 0x010D, 0x010D, 0x010F, 0x010F, + /* 0x110 */ 0x0111, 0x0111, 0x0113, 0x0113, 0x0115, 0x0115, 0x0117, 0x0117, 0x0119, 0x0119, 0x011B, 0x011B, 0x011D, 0x011D, 0x011F, 0x011F, + /* 0x120 */ 0x0121, 0x0121, 0x0123, 0x0123, 0x0125, 0x0125, 0x0127, 0x0127, 0x0129, 0x0129, 0x012B, 0x012B, 0x012D, 0x012D, 0x012F, 0x012F, + /* 0x130 */ 0x0130, 0x0131, 0x0133, 0x0133, 0x0135, 0x0135, 0x0137, 0x0137, 0x0138, 0x013A, 0x013A, 0x013C, 0x013C, 0x013E, 0x013E, 0x0140, + /* 0x140 */ 0x0140, 0x0142, 0x0142, 0x0144, 0x0144, 0x0146, 0x0146, 0x0148, 0x0148, 0x0149, 0x014B, 0x014B, 0x014D, 0x014D, 0x014F, 0x014F, + /* 0x150 */ 0x0151, 0x0151, 0x0153, 0x0153, 0x0155, 0x0155, 0x0157, 0x0157, 0x0159, 0x0159, 0x015B, 0x015B, 0x015D, 0x015D, 0x015F, 0x015F, + /* 0x160 */ 0x0161, 0x0161, 0x0163, 0x0163, 0x0165, 0x0165, 0x0167, 0x0167, 0x0169, 0x0169, 0x016B, 0x016B, 0x016D, 0x016D, 0x016F, 0x016F, + /* 0x170 */ 0x0171, 0x0171, 0x0173, 0x0173, 0x0175, 0x0175, 0x0177, 0x0177, 0x00FF, 0x017A, 0x017A, 0x017C, 0x017C, 0x017E, 0x017E, 0x0073, + /* 0x180 */ 0x0180, 0x0253, 0x0183, 0x0183, 0x0185, 0x0185, 0x0254, 0x0188, 0x0188, 0x0256, 0x0257, 0x018C, 0x018C, 0x018D, 0x01DD, 0x0259, + /* 0x190 */ 0x025B, 0x0192, 0x0192, 0x0260, 0x0263, 0x0195, 0x0269, 0x0268, 0x0199, 0x0199, 0x019A, 0x019B, 0x026F, 0x0272, 0x019E, 0x0275, + /* 0x1A0 */ 0x01A1, 0x01A1, 0x01A3, 0x01A3, 0x01A5, 0x01A5, 0x0280, 0x01A8, 0x01A8, 0x0283, 0x01AA, 0x01AB, 0x01AD, 0x01AD, 0x0288, 0x01B0, + /* 0x1B0 */ 0x01B0, 0x028A, 0x028B, 0x01B4, 0x01B4, 0x01B6, 0x01B6, 0x0292, 0x01B9, 0x01B9, 0x01BA, 0x01BB, 0x01BD, 0x01BD, 0x01BE, 0x01BF, + /* 0x1C0 */ 0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C6, 0x01C6, 0x01C6, 0x01C9, 0x01C9, 0x01C9, 0x01CC, 0x01CC, 0x01CC, 0x01CE, 0x01CE, 0x01D0, + /* 0x1D0 */ 0x01D0, 0x01D2, 0x01D2, 0x01D4, 0x01D4, 0x01D6, 0x01D6, 0x01D8, 0x01D8, 0x01DA, 0x01DA, 0x01DC, 0x01DC, 0x01DD, 0x01DF, 0x01DF, + /* 0x1E0 */ 0x01E1, 0x01E1, 0x01E3, 0x01E3, 0x01E5, 0x01E5, 0x01E7, 0x01E7, 0x01E9, 0x01E9, 0x01EB, 0x01EB, 0x01ED, 0x01ED, 0x01EF, 0x01EF, + /* 0x1F0 */ 0x01F0, 0x01F3, 0x01F3, 0x01F3, 0x01F5, 0x01F5, 0x0195, 0x01BF, 0x01F9, 0x01F9, 0x01FB, 0x01FB, 0x01FD, 0x01FD, 0x01FF, 0x01FF, + /* 0x200 */ 0x0201, 0x0201, 0x0203, 0x0203, 0x0205, 0x0205, 0x0207, 0x0207, 0x0209, 0x0209, 0x020B, 0x020B, 0x020D, 0x020D, 0x020F, 0x020F, + /* 0x210 */ 0x0211, 0x0211, 0x0213, 0x0213, 0x0215, 0x0215, 0x0217, 0x0217, 0x0219, 0x0219, 0x021B, 0x021B, 0x021D, 0x021D, 0x021F, 0x021F, + /* 0x220 */ 0x019E, 0x0221, 0x0223, 0x0223, 0x0225, 0x0225, 0x0227, 0x0227, 0x0229, 0x0229, 0x022B, 0x022B, 0x022D, 0x022D, 0x022F, 0x022F, + /* 0x230 */ 0x0231, 0x0231, 0x0233, 0x0233, 0x0234, 0x0235, 0x0236, 0x0237, 0x0238, 0x0239, 0x2C65, 0x023C, 0x023C, 0x019A, 0x2C66, 0x023F, + /* 0x240 */ 0x0240, 0x0242, 0x0242, 0x0180, 0x0289, 0x028C, 0x0247, 0x0247, 0x0249, 0x0249, 0x024B, 0x024B, 0x024D, 0x024D, 0x024F, 0x024F, + /* 0x250 */ 0x0250, 0x0251, 0x0252, 0x0253, 0x0254, 0x0255, 0x0256, 0x0257, 0x0258, 0x0259, 0x025A, 0x025B, 0x025C, 0x025D, 0x025E, 0x025F, + /* 0x260 */ 0x0260, 0x0261, 0x0262, 0x0263, 0x0264, 0x0265, 0x0266, 0x0267, 0x0268, 0x0269, 0x026A, 0x026B, 0x026C, 0x026D, 0x026E, 0x026F, + /* 0x270 */ 0x0270, 0x0271, 0x0272, 0x0273, 0x0274, 0x0275, 0x0276, 0x0277, 0x0278, 0x0279, 0x027A, 0x027B, 0x027C, 0x027D, 0x027E, 0x027F, + /* 0x280 */ 0x0280, 0x0281, 0x0282, 0x0283, 0x0284, 0x0285, 0x0286, 0x0287, 0x0288, 0x0289, 0x028A, 0x028B, 0x028C, 0x028D, 0x028E, 0x028F, + /* 0x290 */ 0x0290, 0x0291, 0x0292, 0x0293, 0x0294, 0x0295, 0x0296, 0x0297, 0x0298, 0x0299, 0x029A, 0x029B, 0x029C, 0x029D, 0x029E, 0x029F, + /* 0x2A0 */ 0x02A0, 0x02A1, 0x02A2, 0x02A3, 0x02A4, 0x02A5, 0x02A6, 0x02A7, 0x02A8, 0x02A9, 0x02AA, 0x02AB, 0x02AC, 0x02AD, 0x02AE, 0x02AF, + /* 0x2B0 */ 0x02B0, 0x02B1, 0x02B2, 0x02B3, 0x02B4, 0x02B5, 0x02B6, 0x02B7, 0x02B8, 0x02B9, 0x02BA, 0x02BB, 0x02BC, 0x02BD, 0x02BE, 0x02BF, + /* 0x2C0 */ 0x02C0, 0x02C1, 0x02C2, 0x02C3, 0x02C4, 0x02C5, 0x02C6, 0x02C7, 0x02C8, 0x02C9, 0x02CA, 0x02CB, 0x02CC, 0x02CD, 0x02CE, 0x02CF, + /* 0x2D0 */ 0x02D0, 0x02D1, 0x02D2, 0x02D3, 0x02D4, 0x02D5, 0x02D6, 0x02D7, 0x02D8, 0x02D9, 0x02DA, 0x02DB, 0x02DC, 0x02DD, 0x02DE, 0x02DF, + /* 0x2E0 */ 0x02E0, 0x02E1, 0x02E2, 0x02E3, 0x02E4, 0x02E5, 0x02E6, 0x02E7, 0x02E8, 0x02E9, 0x02EA, 0x02EB, 0x02EC, 0x02ED, 0x02EE, 0x02EF, + /* 0x2F0 */ 0x02F0, 0x02F1, 0x02F2, 0x02F3, 0x02F4, 0x02F5, 0x02F6, 0x02F7, 0x02F8, 0x02F9, 0x02FA, 0x02FB, 0x02FC, 0x02FD, 0x02FE, 0x02FF, + /* 0x300 */ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307, 0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F, + /* 0x310 */ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317, 0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F, + /* 0x320 */ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327, 0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F, + /* 0x330 */ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337, 0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F, + /* 0x340 */ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x03B9, 0x0346, 0x0347, 0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F, + /* 0x350 */ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357, 0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F, + /* 0x360 */ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367, 0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F, + /* 0x370 */ 0x0371, 0x0371, 0x0373, 0x0373, 0x0374, 0x0375, 0x0377, 0x0377, 0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x03F3, + /* 0x380 */ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x03AC, 0x0387, 0x03AD, 0x03AE, 0x03AF, 0x038B, 0x03CC, 0x038D, 0x03CD, 0x03CE, + /* 0x390 */ 0x0390, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, + /* 0x3A0 */ 0x03C0, 0x03C1, 0x03A2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7, 0x03C8, 0x03C9, 0x03CA, 0x03CB, 0x03AC, 0x03AD, 0x03AE, 0x03AF, + /* 0x3B0 */ 0x03B0, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, + /* 0x3C0 */ 0x03C0, 0x03C1, 0x03C3, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7, 0x03C8, 0x03C9, 0x03CA, 0x03CB, 0x03CC, 0x03CD, 0x03CE, 0x03D7, + /* 0x3D0 */ 0x03B2, 0x03B8, 0x03D2, 0x03D3, 0x03D4, 0x03C6, 0x03C0, 0x03D7, 0x03D9, 0x03D9, 0x03DB, 0x03DB, 0x03DD, 0x03DD, 0x03DF, 0x03DF, + /* 0x3E0 */ 0x03E1, 0x03E1, 0x03E3, 0x03E3, 0x03E5, 0x03E5, 0x03E7, 0x03E7, 0x03E9, 0x03E9, 0x03EB, 0x03EB, 0x03ED, 0x03ED, 0x03EF, 0x03EF, + /* 0x3F0 */ 0x03BA, 0x03C1, 0x03F2, 0x03F3, 0x03B8, 0x03B5, 0x03F6, 0x03F8, 0x03F8, 0x03F2, 0x03FB, 0x03FB, 0x03FC, 0x037B, 0x037C, 0x037D, + /* 0x400 */ 0x0450, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457, 0x0458, 0x0459, 0x045A, 0x045B, 0x045C, 0x045D, 0x045E, 0x045F, + /* 0x410 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, + /* 0x420 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, + /* 0x430 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, + /* 0x440 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, + /* 0x450 */ 0x0450, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457, 0x0458, 0x0459, 0x045A, 0x045B, 0x045C, 0x045D, 0x045E, 0x045F, + /* 0x460 */ 0x0461, 0x0461, 0x0463, 0x0463, 0x0465, 0x0465, 0x0467, 0x0467, 0x0469, 0x0469, 0x046B, 0x046B, 0x046D, 0x046D, 0x046F, 0x046F, + /* 0x470 */ 0x0471, 0x0471, 0x0473, 0x0473, 0x0475, 0x0475, 0x0477, 0x0477, 0x0479, 0x0479, 0x047B, 0x047B, 0x047D, 0x047D, 0x047F, 0x047F, + /* 0x480 */ 0x0481, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048B, 0x048B, 0x048D, 0x048D, 0x048F, 0x048F, + /* 0x490 */ 0x0491, 0x0491, 0x0493, 0x0493, 0x0495, 0x0495, 0x0497, 0x0497, 0x0499, 0x0499, 0x049B, 0x049B, 0x049D, 0x049D, 0x049F, 0x049F, + /* 0x4A0 */ 0x04A1, 0x04A1, 0x04A3, 0x04A3, 0x04A5, 0x04A5, 0x04A7, 0x04A7, 0x04A9, 0x04A9, 0x04AB, 0x04AB, 0x04AD, 0x04AD, 0x04AF, 0x04AF, + /* 0x4B0 */ 0x04B1, 0x04B1, 0x04B3, 0x04B3, 0x04B5, 0x04B5, 0x04B7, 0x04B7, 0x04B9, 0x04B9, 0x04BB, 0x04BB, 0x04BD, 0x04BD, 0x04BF, 0x04BF, + /* 0x4C0 */ 0x04CF, 0x04C2, 0x04C2, 0x04C4, 0x04C4, 0x04C6, 0x04C6, 0x04C8, 0x04C8, 0x04CA, 0x04CA, 0x04CC, 0x04CC, 0x04CE, 0x04CE, 0x04CF, + /* 0x4D0 */ 0x04D1, 0x04D1, 0x04D3, 0x04D3, 0x04D5, 0x04D5, 0x04D7, 0x04D7, 0x04D9, 0x04D9, 0x04DB, 0x04DB, 0x04DD, 0x04DD, 0x04DF, 0x04DF, + /* 0x4E0 */ 0x04E1, 0x04E1, 0x04E3, 0x04E3, 0x04E5, 0x04E5, 0x04E7, 0x04E7, 0x04E9, 0x04E9, 0x04EB, 0x04EB, 0x04ED, 0x04ED, 0x04EF, 0x04EF, + /* 0x4F0 */ 0x04F1, 0x04F1, 0x04F3, 0x04F3, 0x04F5, 0x04F5, 0x04F7, 0x04F7, 0x04F9, 0x04F9, 0x04FB, 0x04FB, 0x04FD, 0x04FD, 0x04FF, 0x04FF +}; +/* End generated data. */ +#endif // #ifndef vfs_unicode_data_h diff --git a/bsd/vfs/vfs_utfconv.c b/bsd/vfs/vfs_utfconv.c index 48f21532a..a2141312f 100644 --- a/bsd/vfs/vfs_utfconv.c +++ b/bsd/vfs/vfs_utfconv.c @@ -322,13 +322,13 @@ utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, result = ENAMETOOLONG; break; } - *utf8p++ = ucs_ch; + *utf8p++ = (u_int8_t)ucs_ch; } else if (ucs_ch < 0x800) { if ((utf8p + 1) >= bufend) { result = ENAMETOOLONG; break; } - *utf8p++ = 0xc0 | (ucs_ch >> 6); + *utf8p++ = 0xc0 | (u_int8_t)(ucs_ch >> 6); *utf8p++ = 0x80 | (0x3f & ucs_ch); } else { /* These chars never valid Unicode. */ @@ -353,7 +353,7 @@ utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, } --charcnt; ++ucsp; - *utf8p++ = 0xf0 | (pair >> 18); + *utf8p++ = 0xf0 | (u_int8_t)(pair >> 18); *utf8p++ = 0x80 | (0x3f & (pair >> 12)); *utf8p++ = 0x80 | (0x3f & (pair >> 6)); *utf8p++ = 0x80 | (0x3f & pair); @@ -366,7 +366,7 @@ utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, result = ENAMETOOLONG; break; } - *utf8p++ = ucs_ch; + *utf8p++ = (u_int8_t)ucs_ch; continue; } } @@ -464,7 +464,7 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, /* check for ascii */ if (byte < 0x80) { - ucs_ch = sfmconv ? ucs_to_sfm(byte, utf8len == 0) : byte; + ucs_ch = sfmconv ? ucs_to_sfm((u_int16_t)byte, utf8len == 0) : byte; } else { u_int32_t ch; @@ -536,7 +536,7 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, if (ucs_ch < SP_HIGH_FIRST || ucs_ch > SP_HIGH_LAST) { goto escape4; } - push(ucs_ch, &combcharcnt, &ucsp); + push((uint16_t)ucs_ch, &combcharcnt, &ucsp); if (ucsp >= bufend) { goto toolong; } @@ -545,18 +545,18 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, --ucsp; goto escape4; } - *ucsp++ = ucs_ch; + *ucsp++ = (u_int16_t)ucs_ch; continue; default: result = EINVAL; goto exit; } if (decompose) { - if (unicode_decomposeable(ucs_ch)) { + if (unicode_decomposeable((u_int16_t)ucs_ch)) { u_int16_t sequence[8]; int count, i; - count = unicode_decompose(ucs_ch, sequence); + count = unicode_decompose((u_int16_t)ucs_ch, sequence); for (i = 0; i < count; ++i) { if (ucsp >= bufend) { @@ -571,9 +571,9 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, } else if (precompose && (ucsp != bufstart)) { u_int16_t composite, base; - if (unicode_combinable(ucs_ch)) { + if (unicode_combinable((u_int16_t)ucs_ch)) { base = ucsp[-1]; - composite = unicode_combine(base, ucs_ch); + composite = unicode_combine(base, (u_int16_t)ucs_ch); if (composite) { --ucsp; ucs_ch = composite; @@ -588,7 +588,7 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, ucs_ch = '/'; } - push(ucs_ch, &combcharcnt, &ucsp); + push((u_int16_t)ucs_ch, &combcharcnt, &ucsp); continue; /* @@ -623,11 +623,11 @@ escape: combcharcnt = 0; ucs_ch = '%'; - *ucsp++ = ucs_ch; + *ucsp++ = (u_int16_t)ucs_ch; ucs_ch = hexdigits[byte >> 4]; - *ucsp++ = ucs_ch; + *ucsp++ = (u_int16_t)ucs_ch; ucs_ch = hexdigits[byte & 0x0F]; - *ucsp++ = ucs_ch; + *ucsp++ = (u_int16_t)ucs_ch; } /* * Make a previous combining sequence canonical @@ -811,7 +811,7 @@ utf8_normalizestr(const u_int8_t* instr, size_t inlen, u_int8_t* outstr, goto nonASCII; } /* ASCII is already normalized. */ - *outstr++ = byte; + *outstr++ = (u_int8_t)byte; } exit: *outlen = outstr - outbufstart; @@ -851,7 +851,7 @@ nonASCII: if (unicode_bytes <= sizeof(unicodebuf)) { unistr = &unicodebuf[0]; } else { - MALLOC(unistr, uint16_t *, unicode_bytes, M_TEMP, M_WAITOK); + unistr = kheap_alloc(KHEAP_DATA_BUFFERS, unicode_bytes, Z_WAITOK); } /* Normalize the string. */ @@ -864,7 +864,7 @@ nonASCII: outstr = outbufstart + uft8_bytes; } if (unistr && unistr != &unicodebuf[0]) { - FREE(unistr, M_TEMP); + kheap_free(KHEAP_DATA_BUFFERS, unistr, unicode_bytes); } goto exit; } @@ -1068,7 +1068,7 @@ unicode_combine(u_int16_t base, u_int16_t combining) ((const u_int32_t *)__CFUniCharBMPPrecompDestinationTable + (value & 0xFFFF)), (value >> 16), base); } - return value; + return (u_int16_t)value; } @@ -1095,7 +1095,7 @@ prioritysort(u_int16_t* characters, int count) p1 = p2; p2 = get_combining_class(*ch2); if (p1 > p2 && p2 != 0) { - u_int32_t tmp; + u_int16_t tmp; tmp = *ch1; *ch1 = *ch2; diff --git a/bsd/vfs/vfs_vnops.c b/bsd/vfs/vfs_vnops.c index cadc0d367..fa8b10370 100644 --- a/bsd/vfs/vfs_vnops.c +++ b/bsd/vfs/vfs_vnops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2014 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -169,12 +169,19 @@ vn_open(struct nameidata *ndp, int fmode, int cmode) int vn_open_modflags(struct nameidata *ndp, int *fmodep, int cmode) { - struct vnode_attr va; + int error; + struct vnode_attr *vap; - VATTR_INIT(&va); - VATTR_SET(&va, va_mode, cmode); + vap = kheap_alloc(KHEAP_TEMP, sizeof(struct vnode_attr), M_WAITOK); + + VATTR_INIT(vap); + VATTR_SET(vap, va_mode, (mode_t)cmode); + + error = vn_open_auth(ndp, fmodep, vap); + + kheap_free(KHEAP_TEMP, vap, sizeof(struct vnode_attr)); - return vn_open_auth(ndp, fmodep, &va); + return error; } static int @@ -395,6 +402,11 @@ again: fmode |= FENCRYPTED; } + if ((fmode & O_NOFOLLOW_ANY) && (fmode & (O_SYMLINK | O_NOFOLLOW))) { + error = EINVAL; + goto out; + } + /* * O_CREAT */ @@ -418,6 +430,10 @@ again: if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0 && (origcnflags & FOLLOW) != 0) { ndp->ni_cnd.cn_flags |= FOLLOW; } + if (fmode & O_NOFOLLOW_ANY) { + /* will return ELOOP on the first symlink to be hit */ + ndp->ni_flag |= NAMEI_NOFOLLOW_ANY; + } continue_create_lookup: if ((error = namei(ndp))) { @@ -534,6 +550,10 @@ continue_create_lookup: if (fmode & O_NOFOLLOW || fmode & O_SYMLINK || (origcnflags & FOLLOW) == 0) { ndp->ni_cnd.cn_flags &= ~FOLLOW; } + if (fmode & O_NOFOLLOW_ANY) { + /* will return ELOOP on the first symlink to be hit */ + ndp->ni_flag |= NAMEI_NOFOLLOW_ANY; + } /* Do a lookup, possibly going directly to filesystem for compound operation */ do { @@ -824,9 +844,7 @@ vn_read_swapfile( while (swap_count > 0) { if (my_swap_page == NULL) { - MALLOC(my_swap_page, char *, PAGE_SIZE, - M_TEMP, M_WAITOK); - memset(my_swap_page, '\0', PAGE_SIZE); + my_swap_page = kheap_alloc(KHEAP_TEMP, PAGE_SIZE, Z_WAITOK | Z_ZERO); /* add an end-of-line to keep line counters happy */ my_swap_page[PAGE_SIZE - 1] = '\n'; } @@ -837,17 +855,14 @@ vn_read_swapfile( prev_resid = uio_resid(uio); error = uiomove((caddr_t) my_swap_page, - this_count, + (int)this_count, uio); if (error) { break; } swap_count -= (prev_resid - uio_resid(uio)); } - if (my_swap_page != NULL) { - FREE(my_swap_page, M_TEMP); - my_swap_page = NULL; - } + kheap_free(KHEAP_TEMP, my_swap_page, PAGE_SIZE); return error; } @@ -870,6 +885,10 @@ vn_rdwr( int64_t resid; int result; + if (len < 0) { + return EINVAL; + } + result = vn_rdwr_64(rw, vp, (uint64_t)(uintptr_t)base, @@ -883,7 +902,7 @@ vn_rdwr( /* "resid" should be bounded above by "len," which is an int */ if (aresid != NULL) { - *aresid = resid; + *aresid = (int)resid; } return result; @@ -917,9 +936,14 @@ vn_rdwr_64( } else { spacetype = UIO_SYSSPACE; } + + if (len < 0) { + return EINVAL; + } + auio = uio_createwithbuffer(1, offset, spacetype, rw, &uio_buf[0], sizeof(uio_buf)); - uio_addiov(auio, base, len); + uio_addiov(auio, CAST_USER_ADDR_T(base), (user_size_t)len); #if CONFIG_MACF /* XXXMAC @@ -950,6 +974,7 @@ vn_rdwr_64( if (aresid) { *aresid = uio_resid(auio); + assert(*aresid <= len); } else if (uio_resid(auio) && error == 0) { error = EIO; } @@ -994,10 +1019,21 @@ vn_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) struct vnode *vp; int error; int ioflag; - off_t count; - int offset_locked = 0; + off_t read_offset; + user_ssize_t read_len; + user_ssize_t adjusted_read_len; + user_ssize_t clippedsize; + bool offset_locked; - vp = (struct vnode *)fp->f_fglob->fg_data; + read_len = uio_resid(uio); + if (read_len < 0 || read_len > INT_MAX) { + return EINVAL; + } + adjusted_read_len = read_len; + clippedsize = 0; + offset_locked = false; + + vp = (struct vnode *)fp->fp_glob->fg_data; if ((error = vnode_getwithref(vp))) { return error; } @@ -1013,33 +1049,65 @@ vn_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) /* This signals to VNOP handlers that this read came from a file table read */ ioflag = IO_SYSCALL_DISPATCH; - if (fp->f_fglob->fg_flag & FNONBLOCK) { + if (fp->fp_glob->fg_flag & FNONBLOCK) { ioflag |= IO_NDELAY; } - if ((fp->f_fglob->fg_flag & FNOCACHE) || vnode_isnocache(vp)) { + if ((fp->fp_glob->fg_flag & FNOCACHE) || vnode_isnocache(vp)) { ioflag |= IO_NOCACHE; } - if (fp->f_fglob->fg_flag & FENCRYPTED) { + if (fp->fp_glob->fg_flag & FENCRYPTED) { ioflag |= IO_ENCRYPTED; } - if (fp->f_fglob->fg_flag & FUNENCRYPTED) { + if (fp->fp_glob->fg_flag & FUNENCRYPTED) { ioflag |= IO_SKIP_ENCRYPTION; } - if (fp->f_fglob->fg_flag & O_EVTONLY) { + if (fp->fp_glob->fg_flag & O_EVTONLY) { ioflag |= IO_EVTONLY; } - if (fp->f_fglob->fg_flag & FNORDAHEAD) { + if (fp->fp_glob->fg_flag & FNORDAHEAD) { ioflag |= IO_RAOFF; } if ((flags & FOF_OFFSET) == 0) { if ((vnode_vtype(vp) == VREG) && !vnode_isswap(vp)) { - vn_offset_lock(fp->f_fglob); - offset_locked = 1; + vn_offset_lock(fp->fp_glob); + offset_locked = true; + } + read_offset = fp->fp_glob->fg_offset; + uio_setoffset(uio, read_offset); + } else { + read_offset = uio_offset(uio); + /* POSIX allows negative offsets for character devices. */ + if ((read_offset < 0) && (vnode_vtype(vp) != VCHR)) { + error = EINVAL; + goto error_out; } - uio->uio_offset = fp->f_fglob->fg_offset; } - count = uio_resid(uio); + + if (read_offset == INT64_MAX) { + /* can't read any more */ + error = 0; + goto error_out; + } + + /* + * If offset + len will cause overflow, reduce the len to a value + * (adjusted_read_len) where it won't + */ + if ((read_offset >= 0) && (INT64_MAX - read_offset) < read_len) { + /* + * 0 read_offset INT64_MAX + * |-----------------------------------------------|----------|~~~ + * <--read_len--> + * <-adjusted-> + */ + adjusted_read_len = (user_ssize_t)(INT64_MAX - read_offset); + } + + if (adjusted_read_len < read_len) { + uio_setresid(uio, adjusted_read_len); + clippedsize = read_len - adjusted_read_len; + } if (vnode_isswap(vp) && !(IO_SKIP_ENCRYPTION & ioflag)) { /* special case for swap files */ @@ -1048,12 +1116,18 @@ vn_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) error = VNOP_READ(vp, uio, ioflag, ctx); } + if (clippedsize) { + uio_setresid(uio, (uio_resid(uio) + clippedsize)); + } + if ((flags & FOF_OFFSET) == 0) { - fp->f_fglob->fg_offset += count - uio_resid(uio); - if (offset_locked) { - vn_offset_unlock(fp->f_fglob); - offset_locked = 0; - } + fp->fp_glob->fg_offset += read_len - uio_resid(uio); + } + +error_out: + if (offset_locked) { + vn_offset_unlock(fp->fp_glob); + offset_locked = false; } (void)vnode_put(vp); @@ -1069,15 +1143,24 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) { struct vnode *vp; int error, ioflag; - off_t count; - int clippedsize = 0; - int partialwrite = 0; - int residcount, oldcount; - int offset_locked = 0; + off_t write_offset; + off_t write_end_offset; + user_ssize_t write_len; + user_ssize_t adjusted_write_len; + user_ssize_t clippedsize; + bool offset_locked; proc_t p = vfs_context_proc(ctx); + rlim_t rlim_cur_fsize = p ? proc_limitgetcur(p, RLIMIT_FSIZE, TRUE) : 0; - count = 0; - vp = (struct vnode *)fp->f_fglob->fg_data; + write_len = uio_resid(uio); + if (write_len < 0 || write_len > INT_MAX) { + return EINVAL; + } + adjusted_write_len = write_len; + clippedsize = 0; + offset_locked = false; + + vp = (struct vnode *)fp->fp_glob->fg_data; if ((error = vnode_getwithref(vp))) { return error; } @@ -1096,22 +1179,22 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) */ ioflag = (IO_UNIT | IO_SYSCALL_DISPATCH); - if (vp->v_type == VREG && (fp->f_fglob->fg_flag & O_APPEND)) { + if (vp->v_type == VREG && (fp->fp_glob->fg_flag & O_APPEND)) { ioflag |= IO_APPEND; } - if (fp->f_fglob->fg_flag & FNONBLOCK) { + if (fp->fp_glob->fg_flag & FNONBLOCK) { ioflag |= IO_NDELAY; } - if ((fp->f_fglob->fg_flag & FNOCACHE) || vnode_isnocache(vp)) { + if ((fp->fp_glob->fg_flag & FNOCACHE) || vnode_isnocache(vp)) { ioflag |= IO_NOCACHE; } - if (fp->f_fglob->fg_flag & FNODIRECT) { + if (fp->fp_glob->fg_flag & FNODIRECT) { ioflag |= IO_NODIRECT; } - if (fp->f_fglob->fg_flag & FSINGLE_WRITER) { + if (fp->fp_glob->fg_flag & FSINGLE_WRITER) { ioflag |= IO_SINGLE_WRITER; } - if (fp->f_fglob->fg_flag & O_EVTONLY) { + if (fp->fp_glob->fg_flag & O_EVTONLY) { ioflag |= IO_EVTONLY; } @@ -1122,23 +1205,69 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) * XXX the non-essential metadata without some additional VFS work; * XXX the intent at this point is to plumb the interface for it. */ - if ((fp->f_fglob->fg_flag & (O_FSYNC | O_DSYNC)) || + if ((fp->fp_glob->fg_flag & (O_FSYNC | O_DSYNC)) || (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) { ioflag |= IO_SYNC; } if ((flags & FOF_OFFSET) == 0) { if ((vnode_vtype(vp) == VREG) && !vnode_isswap(vp)) { - vn_offset_lock(fp->f_fglob); - offset_locked = 1; + vn_offset_lock(fp->fp_glob); + offset_locked = true; } - uio->uio_offset = fp->f_fglob->fg_offset; - count = uio_resid(uio); + write_offset = fp->fp_glob->fg_offset; + uio_setoffset(uio, write_offset); + } else { + /* for pwrite, append should be ignored */ + ioflag &= ~IO_APPEND; + write_offset = uio_offset(uio); + /* POSIX allows negative offsets for character devices. */ + if ((write_offset < 0) && (vnode_vtype(vp) != VCHR)) { + error = EINVAL; + goto error_out; + } + } + + if (write_offset == INT64_MAX) { + /* writes are not possible */ + error = EFBIG; + goto error_out; + } + + /* + * write_len is the original write length that was requested. + * We may however need to reduce that becasue of two reasons + * + * 1) If write_offset + write_len will exceed OFF_T_MAX (i.e. INT64_MAX) + * and/or + * 2) If write_offset + write_len will exceed the administrative + * limit for the maximum file size. + * + * In both cases the write will be denied if we can't write even a single + * byte otherwise it will be "clipped" (i.e. a short write). + */ + + /* + * If offset + len will cause overflow, reduce the len + * to a value (adjusted_write_len) where it won't + */ + if ((write_offset >= 0) && (INT64_MAX - write_offset) < write_len) { + /* + * 0 write_offset INT64_MAX + * |-----------------------------------------------|----------|~~~ + * <--write_len--> + * <-adjusted-> + */ + adjusted_write_len = (user_ssize_t)(INT64_MAX - write_offset); } - if (((flags & FOF_OFFSET) == 0) && - vfs_context_proc(ctx) && (vp->v_type == VREG) && - (((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) || - ((rlim_t)uio_resid(uio) > (p->p_rlimit[RLIMIT_FSIZE].rlim_cur - uio->uio_offset)))) { + + /* write_end_offset will always be [0, INT64_MAX] */ + write_end_offset = write_offset + adjusted_write_len; + + if (p && (vp->v_type == VREG) && + (rlim_cur_fsize != RLIM_INFINITY) && + (rlim_cur_fsize <= INT64_MAX) && + (write_end_offset > (off_t)rlim_cur_fsize)) { /* * If the requested residual would cause us to go past the * administrative limit, then we need to adjust the residual @@ -1146,55 +1275,55 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) * we can't do that (e.g. the residual is already 1 byte), * then we fail the write with EFBIG. */ - residcount = uio_resid(uio); - if ((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { - clippedsize = (uio->uio_offset + uio_resid(uio)) - p->p_rlimit[RLIMIT_FSIZE].rlim_cur; - } else if ((rlim_t)uio_resid(uio) > (p->p_rlimit[RLIMIT_FSIZE].rlim_cur - uio->uio_offset)) { - clippedsize = (p->p_rlimit[RLIMIT_FSIZE].rlim_cur - uio->uio_offset); - } - if (clippedsize >= residcount) { + if (write_offset >= (off_t)rlim_cur_fsize) { + /* + * 0 rlim_fsize write_offset write_end INT64_MAX + * |------------------------|----------|-------------|--------| + * <--write_len--> + * + * write not permitted + */ psignal(p, SIGXFSZ); error = EFBIG; goto error_out; } - partialwrite = 1; - uio_setresid(uio, residcount - clippedsize); + + /* + * 0 write_offset rlim_fsize write_end INT64_MAX + * |------------------------|-----------|---------|------------| + * <------write_len------> + * <-adjusted--> + */ + adjusted_write_len = (user_ssize_t)((off_t)rlim_cur_fsize - write_offset); + assert((adjusted_write_len > 0) && (adjusted_write_len < write_len)); } - if ((flags & FOF_OFFSET) != 0) { - /* for pwrite, append should be ignored */ - ioflag &= ~IO_APPEND; - if (p && (vp->v_type == VREG) && - ((rlim_t)uio->uio_offset >= p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) { - psignal(p, SIGXFSZ); - error = EFBIG; - goto error_out; - } - if (p && (vp->v_type == VREG) && - ((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) { - //Debugger("vn_bwrite:overstepping the bounds"); - residcount = uio_resid(uio); - clippedsize = (uio->uio_offset + uio_resid(uio)) - p->p_rlimit[RLIMIT_FSIZE].rlim_cur; - partialwrite = 1; - uio_setresid(uio, residcount - clippedsize); - } + + if (adjusted_write_len < write_len) { + uio_setresid(uio, adjusted_write_len); + clippedsize = write_len - adjusted_write_len; } error = VNOP_WRITE(vp, uio, ioflag, ctx); - if (partialwrite) { - oldcount = uio_resid(uio); - uio_setresid(uio, oldcount + clippedsize); + /* + * If we had to reduce the size of write requested either because + * of rlimit or because it would have exceeded + * maximum file size, we have to add that back to the residual so + * it correctly reflects what we did in this function. + */ + if (clippedsize) { + uio_setresid(uio, (uio_resid(uio) + clippedsize)); } if ((flags & FOF_OFFSET) == 0) { if (ioflag & IO_APPEND) { - fp->f_fglob->fg_offset = uio->uio_offset; + fp->fp_glob->fg_offset = uio_offset(uio); } else { - fp->f_fglob->fg_offset += count - uio_resid(uio); + fp->fp_glob->fg_offset += (write_len - uio_resid(uio)); } if (offset_locked) { - vn_offset_unlock(fp->f_fglob); - offset_locked = 0; + vn_offset_unlock(fp->fp_glob); + offset_locked = false; } } @@ -1221,7 +1350,7 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) error_out: if (offset_locked) { - vn_offset_unlock(fp->f_fglob); + vn_offset_unlock(fp->fp_glob); } (void)vnode_put(vp); return error; @@ -1455,7 +1584,7 @@ vn_stat(struct vnode *vp, void *sb, kauth_filesec_t *xsec, int isstat64, int nee static int vn_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx) { - struct vnode *vp = ((struct vnode *)fp->f_fglob->fg_data); + struct vnode *vp = ((struct vnode *)fp->fp_glob->fg_data); off_t file_size; int error; struct vnode *ttyvp; @@ -1476,16 +1605,24 @@ vn_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx) case VREG: case VDIR: if (com == FIONREAD) { + off_t temp_nbytes; if ((error = vnode_size(vp, &file_size, ctx)) != 0) { goto out; } - *(int *)data = file_size - fp->f_fglob->fg_offset; + temp_nbytes = file_size - fp->fp_glob->fg_offset; + if (temp_nbytes > INT_MAX) { + *(int *)data = INT_MAX; + } else if (temp_nbytes < 0) { + *(int *)data = 0; + } else { + *(int *)data = (int)temp_nbytes; + } goto out; } if (com == FIONBIO || com == FIOASYNC) { /* XXX */ goto out; } - /* fall into ... */ + OS_FALLTHROUGH; default: error = ENOTTY; @@ -1495,7 +1632,7 @@ vn_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx) case VCHR: case VBLK: - if (com == TIOCREVOKE) { + if (com == TIOCREVOKE || com == TIOCREVOKECLEAR) { error = ENOTTY; goto out; } @@ -1525,7 +1662,7 @@ vn_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx) } goto out; } - error = VNOP_IOCTL(vp, com, data, fp->f_fglob->fg_flag, ctx); + error = VNOP_IOCTL(vp, com, data, fp->fp_glob->fg_flag, ctx); if (error == 0 && com == TIOCSCTTY) { sessp = proc_session(vfs_context_proc(ctx)); @@ -1550,12 +1687,12 @@ static int vn_select(struct fileproc *fp, int which, void *wql, __unused vfs_context_t ctx) { int error; - struct vnode * vp = (struct vnode *)fp->f_fglob->fg_data; + struct vnode * vp = (struct vnode *)fp->fp_glob->fg_data; struct vfs_context context; if ((error = vnode_getwithref(vp)) == 0) { context.vc_thread = current_thread(); - context.vc_ucred = fp->f_fglob->fg_cred; + context.vc_ucred = fp->fp_glob->fg_cred; #if CONFIG_MACF /* @@ -1566,7 +1703,7 @@ vn_select(struct fileproc *fp, int which, void *wql, __unused vfs_context_t ctx) error = mac_vnode_check_select(ctx, vp, which); if (error == 0) #endif - error = VNOP_SELECT(vp, which, fp->f_fglob->fg_flag, wql, ctx); + error = VNOP_SELECT(vp, which, fp->fp_glob->fg_flag, wql, ctx); (void)vnode_put(vp); } @@ -1584,7 +1721,7 @@ vn_closefile(struct fileglob *fg, vfs_context_t ctx) if ((error = vnode_getwithref(vp)) == 0) { if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE && - ((fg->fg_flag & FHASLOCK) != 0 || + ((fg->fg_flag & FWASLOCKED) != 0 || (fg->fg_lflags & FG_HAS_OFDLOCK) != 0)) { struct flock lf = { .l_whence = SEEK_SET, @@ -1593,7 +1730,7 @@ vn_closefile(struct fileglob *fg, vfs_context_t ctx) .l_type = F_UNLCK }; - if ((fg->fg_flag & FHASLOCK) != 0) { + if ((fg->fg_flag & FWASLOCKED) != 0) { (void) VNOP_ADVLOCK(vp, (caddr_t)fg, F_UNLCK, &lf, F_FLOCK, ctx, NULL); } @@ -1699,7 +1836,7 @@ vn_kqfilter(struct fileproc *fp, struct knote *kn, struct kevent_qos_s *kev) int error = 0; int result = 0; - vp = (struct vnode *)fp->f_fglob->fg_data; + vp = (struct vnode *)fp->fp_glob->fg_data; /* * Don't attach a knote to a dead vnode. @@ -1733,7 +1870,7 @@ vn_kqfilter(struct fileproc *fp, struct knote *kn, struct kevent_qos_s *kev) if (error == 0) { #if CONFIG_MACF - error = mac_vnode_check_kqfilter(ctx, fp->f_fglob->fg_cred, kn, vp); + error = mac_vnode_check_kqfilter(ctx, fp->fp_glob->fg_cred, kn, vp); if (error) { vnode_put(vp); goto out; @@ -1887,7 +2024,7 @@ filt_vnode_common(struct knote *kn, struct kevent_qos_s *kev, vnode_t vp, long h } else { switch (kn->kn_filter) { case EVFILT_READ: - data = vnode_readable_data_count(vp, kn->kn_fp->f_fglob->fg_offset, (kn->kn_flags & EV_POLL)); + data = vnode_readable_data_count(vp, kn->kn_fp->fp_glob->fg_offset, (kn->kn_flags & EV_POLL)); activate = (data != 0); break; case EVFILT_WRITE: diff --git a/bsd/vfs/vfs_xattr.c b/bsd/vfs/vfs_xattr.c index d649dd232..00ee6c94c 100644 --- a/bsd/vfs/vfs_xattr.c +++ b/bsd/vfs/vfs_xattr.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include #include #include @@ -149,7 +149,7 @@ vn_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, } /* The offset can only be non-zero for resource forks. */ if (uio != NULL && uio_offset(uio) != 0 && - bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { + strncmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { error = EINVAL; goto out; } @@ -157,7 +157,7 @@ vn_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, /* The offset can only be non-zero for resource forks. */ if (uio != NULL && uio_offset(uio) != 0 && - bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { + strncmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { error = EINVAL; goto out; } @@ -211,7 +211,7 @@ vn_setxattr(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t } /* The offset can only be non-zero for resource forks. */ if (uio_offset(uio) != 0 && - bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { + strncmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { error = EINVAL; goto out; } @@ -381,15 +381,12 @@ out: int xattr_validatename(const char *name) { - int namelen; + size_t namelen; if (name == NULL || name[0] == '\0') { return EINVAL; } namelen = strlen(name); - if (name[namelen] != '\0') { - return ENAMETOOLONG; - } if (utf8_validatestr((const unsigned char *)name, namelen) != 0) { return EINVAL; @@ -438,6 +435,12 @@ vnode_setasnamedstream_internal(vnode_t vp, vnode_t svp) */ vnode_update_identity(svp, vp, NULL, 0, 0, VNODE_UPDATE_NAMEDSTREAM_PARENT); + if (vnode_isdyldsharedcache(vp)) { + vnode_lock_spin(svp); + svp->v_flag |= VSHARED_DYLD; + vnode_unlock(svp); + } + return; } @@ -560,7 +563,7 @@ vnode_relenamedstream(vnode_t vp, vnode_t svp) cn.cn_pnbuf = tmpname; cn.cn_pnlen = sizeof(tmpname); cn.cn_nameptr = cn.cn_pnbuf; - cn.cn_namelen = strlen(tmpname); + cn.cn_namelen = (int)strlen(tmpname); /* * Obtain the vnode for the shadow files directory. Make sure to @@ -608,7 +611,10 @@ vnode_flushnamedstream(vnode_t vp, vnode_t svp, vfs_context_t context) !VATTR_IS_SUPPORTED(&va, va_data_size)) { return 0; } - datasize = va.va_data_size; + if (va.va_data_size > UINT32_MAX) { + return EINVAL; + } + datasize = (size_t)va.va_data_size; if (datasize == 0) { (void) default_removexattr(vp, XATTR_RESOURCEFORK_NAME, 0, context); return 0; @@ -708,7 +714,7 @@ vnode_verifynamedstream(vnode_t vp) cn.cn_pnbuf = tmpname; cn.cn_pnlen = sizeof(tmpname); cn.cn_nameptr = cn.cn_pnbuf; - cn.cn_namelen = strlen(tmpname); + cn.cn_namelen = (int)strlen(tmpname); if (VNOP_LOOKUP(shadow_dvp, &shadowfile, &cn, kernelctx) == 0) { /* is the pointer the same? */ @@ -762,7 +768,7 @@ retry_create: cn.cn_pnbuf = tmpname; cn.cn_pnlen = sizeof(tmpname); cn.cn_nameptr = cn.cn_pnbuf; - cn.cn_namelen = strlen(tmpname); + cn.cn_namelen = (int)strlen(tmpname); /* Pick up uid, gid, mode and date from original file. */ VATTR_INIT(&va); @@ -895,7 +901,7 @@ default_getnamedstream(vnode_t vp, vnode_t *svpp, const char *name, enum nsopera /* * Only the "com.apple.ResourceFork" stream is supported here. */ - if (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { + if (strncmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { *svpp = NULLVP; return ENOATTR; } @@ -1047,7 +1053,7 @@ default_makenamedstream(vnode_t vp, vnode_t *svpp, const char *name, vfs_context /* * Only the "com.apple.ResourceFork" stream is supported here. */ - if (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { + if (strncmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { *svpp = NULLVP; return ENOATTR; } @@ -1078,7 +1084,7 @@ default_removenamedstream(vnode_t vp, const char *name, vfs_context_t context) /* * Only the "com.apple.ResourceFork" stream is supported here. */ - if (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { + if (strncmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { return ENOATTR; } /* @@ -1146,7 +1152,7 @@ get_shadow_dir(vnode_t *sdvpp) cn.cn_pnbuf = tmpname; cn.cn_pnlen = sizeof(tmpname); cn.cn_nameptr = cn.cn_pnbuf; - cn.cn_namelen = strlen(tmpname); + cn.cn_namelen = (int)strlen(tmpname); /* * owned by root, only readable by root, hidden @@ -1606,15 +1612,15 @@ default_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, attr_header_t *header; attr_entry_t *entry; u_int8_t *attrdata; - size_t datalen; - int namelen; + u_int32_t datalen; + size_t namelen; int isrsrcfork; int fileflags; int i; int error; fileflags = FREAD; - if (strcmp(name, XATTR_RESOURCEFORK_NAME) == 0) { + if (strncmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0) { isrsrcfork = 1; /* * Open the file locked (shared) since the Carbon @@ -1635,7 +1641,7 @@ default_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, } /* Get the Finder Info. */ - if (strcmp(name, XATTR_FINDERINFO_NAME) == 0) { + if (strncmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0) { if (ainfo.finderinfo == NULL || ainfo.emptyfinderinfo) { error = ENOATTR; } else if (uio == NULL) { @@ -1687,7 +1693,7 @@ default_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, */ for (i = 0; i < header->num_attrs && ATTR_VALID(entry, ainfo); i++) { if (strncmp((const char *)entry->name, name, namelen) == 0) { - datalen = (size_t)entry->length; + datalen = entry->length; if (uio == NULL) { *size = datalen; error = 0; @@ -1740,7 +1746,13 @@ default_setxattr(vnode_t vp, const char *name, uio_t uio, int options, vfs_conte char finfo[FINDERINFOSIZE]; datalen = uio_resid(uio); - namelen = strlen(name) + 1; + if (datalen > XATTR_MAXSIZE) { + return EINVAL; + } + namelen = (int)strlen(name) + 1; + if (namelen > UINT8_MAX) { + return EINVAL; + } entrylen = ATTR_ENTRY_LENGTH(namelen); /* @@ -1758,7 +1770,7 @@ default_setxattr(vnode_t vp, const char *name, uio_t uio, int options, vfs_conte * * NOTE: this copies the Finder Info data into the "finfo" local. */ - if (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0) { + if (strncmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0) { /* * TODO: check the XATTR_CREATE and XATTR_REPLACE flags. * That means we probably have to open_xattrfile and get_xattrinfo. @@ -1766,7 +1778,7 @@ default_setxattr(vnode_t vp, const char *name, uio_t uio, int options, vfs_conte if (uio_offset(uio) != 0 || datalen != FINDERINFOSIZE) { return EINVAL; } - error = uiomove(finfo, datalen, uio); + error = uiomove(finfo, (int)datalen, uio); if (error) { return error; } @@ -1795,7 +1807,7 @@ start: } /* Set the Finder Info. */ - if (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0) { + if (strncmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0) { if (ainfo.finderinfo && !ainfo.emptyfinderinfo) { /* attr exists and "create" was specified? */ if (options & XATTR_CREATE) { @@ -1842,8 +1854,8 @@ start: } /* Write the Resource Fork. */ - if (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0) { - u_int32_t endoffset; + if (strncmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0) { + off_t endoffset; if (!vnode_isreg(vp)) { error = EPERM; @@ -1876,6 +1888,10 @@ start: } endoffset = uio_resid(uio) + uio_offset(uio); /* new size */ + if (endoffset > UINT32_MAX || endoffset < 0) { + error = EINVAL; + goto out; + } uio_setoffset(uio, uio_offset(uio) + ainfo.rsrcfork->offset); error = VNOP_WRITE(xvp, uio, 0, context); if (error) { @@ -1883,7 +1899,7 @@ start: } uio_setoffset(uio, uio_offset(uio) - ainfo.rsrcfork->offset); if (endoffset > ainfo.rsrcfork->length) { - ainfo.rsrcfork->length = endoffset; + ainfo.rsrcfork->length = (u_int32_t)endoffset; ainfo.iosize = sizeof(attr_header_t); error = write_xattrinfo(&ainfo); goto out; @@ -1935,7 +1951,7 @@ start: } } else { attrdata = (u_int8_t *)header + entry->offset; - error = uiomove((caddr_t)attrdata, datalen, uio); + error = uiomove((caddr_t)attrdata, (int)datalen, uio); if (error) { goto out; } @@ -2050,7 +2066,7 @@ start: } else { attrdata = (u_int8_t *)header + header->data_start + header->data_length; - error = uiomove((caddr_t)attrdata, datalen, uio); + error = uiomove((caddr_t)attrdata, (int)datalen, uio); if (error) { printf("setxattr: uiomove error %d\n", error); goto out; @@ -2058,9 +2074,9 @@ start: } /* Create the attribute entry. */ - lastentry->length = datalen; + lastentry->length = (u_int32_t)datalen; lastentry->offset = header->data_start + header->data_length; - lastentry->namelen = namelen; + lastentry->namelen = (u_int8_t)namelen; lastentry->flags = 0; bcopy(name, &lastentry->name[0], namelen); @@ -2211,7 +2227,7 @@ default_removexattr(vnode_t vp, const char *name, __unused int options, vfs_cont error = ENOATTR; goto out; } - namelen = strlen(name) + 1; + namelen = (int)strlen(name) + 1; header = ainfo.attrhdr; entry = ainfo.attr_entry; @@ -2442,8 +2458,8 @@ open_xattrfile(vnode_t vp, int fileflags, vnode_t *xvpp, vfs_context_t context) { vnode_t xvp = NULLVP; vnode_t dvp = NULLVP; - struct vnode_attr va; - struct nameidata nd; + struct vnode_attr *va = NULL; + struct nameidata *nd = NULL; char smallname[64]; char *filename = NULL; const char *basename = NULL; @@ -2480,7 +2496,7 @@ open_xattrfile(vnode_t vp, int fileflags, vnode_t *xvpp, vfs_context_t context) len = snprintf(filename, sizeof(smallname), "%s%s", ATTR_FILE_PREFIX, basename); if (len >= sizeof(smallname)) { len++; /* snprintf result doesn't include '\0' */ - MALLOC(filename, char *, len, M_TEMP, M_WAITOK); + filename = kheap_alloc(KHEAP_TEMP, len, Z_WAITOK); len = snprintf(filename, len, "%s%s", ATTR_FILE_PREFIX, basename); } /* @@ -2492,24 +2508,27 @@ open_xattrfile(vnode_t vp, int fileflags, vnode_t *xvpp, vfs_context_t context) * file security from the EA must always get access */ lookup: - NDINIT(&nd, LOOKUP, OP_OPEN, LOCKLEAF | NOFOLLOW | USEDVP | DONOTAUTH, + nd = kheap_alloc(KHEAP_TEMP, sizeof(struct nameidata), Z_WAITOK); + NDINIT(nd, LOOKUP, OP_OPEN, LOCKLEAF | NOFOLLOW | USEDVP | DONOTAUTH, UIO_SYSSPACE, CAST_USER_ADDR_T(filename), context); - nd.ni_dvp = dvp; + nd->ni_dvp = dvp; + + va = kheap_alloc(KHEAP_TEMP, sizeof(struct vnode_attr), Z_WAITOK); if (fileflags & O_CREAT) { - nd.ni_cnd.cn_nameiop = CREATE; + nd->ni_cnd.cn_nameiop = CREATE; #if CONFIG_TRIGGERS - nd.ni_op = OP_LINK; + nd->ni_op = OP_LINK; #endif if (dvp != vp) { - nd.ni_cnd.cn_flags |= LOCKPARENT; + nd->ni_cnd.cn_flags |= LOCKPARENT; } - if ((error = namei(&nd))) { - nd.ni_dvp = NULLVP; + if ((error = namei(nd))) { + nd->ni_dvp = NULLVP; error = ENOATTR; goto out; } - if ((xvp = nd.ni_vp) == NULLVP) { + if ((xvp = nd->ni_vp) == NULLVP) { uid_t uid; gid_t gid; mode_t umode; @@ -2517,44 +2536,44 @@ lookup: /* * Pick up uid/gid/mode from target file. */ - VATTR_INIT(&va); - VATTR_WANTED(&va, va_uid); - VATTR_WANTED(&va, va_gid); - VATTR_WANTED(&va, va_mode); - if (VNOP_GETATTR(vp, &va, context) == 0 && - VATTR_IS_SUPPORTED(&va, va_uid) && - VATTR_IS_SUPPORTED(&va, va_gid) && - VATTR_IS_SUPPORTED(&va, va_mode)) { - uid = va.va_uid; - gid = va.va_gid; - umode = va.va_mode & (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH); + VATTR_INIT(va); + VATTR_WANTED(va, va_uid); + VATTR_WANTED(va, va_gid); + VATTR_WANTED(va, va_mode); + if (VNOP_GETATTR(vp, va, context) == 0 && + VATTR_IS_SUPPORTED(va, va_uid) && + VATTR_IS_SUPPORTED(va, va_gid) && + VATTR_IS_SUPPORTED(va, va_mode)) { + uid = va->va_uid; + gid = va->va_gid; + umode = va->va_mode & (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH); } else { /* fallback values */ uid = KAUTH_UID_NONE; gid = KAUTH_GID_NONE; umode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; } - VATTR_INIT(&va); - VATTR_SET(&va, va_type, VREG); - VATTR_SET(&va, va_mode, umode); + VATTR_INIT(va); + VATTR_SET(va, va_type, VREG); + VATTR_SET(va, va_mode, umode); if (uid != KAUTH_UID_NONE) { - VATTR_SET(&va, va_uid, uid); + VATTR_SET(va, va_uid, uid); } if (gid != KAUTH_GID_NONE) { - VATTR_SET(&va, va_gid, gid); + VATTR_SET(va, va_gid, gid); } - error = vn_create(dvp, &nd.ni_vp, &nd, &va, + error = vn_create(dvp, &nd->ni_vp, nd, va, VN_CREATE_NOAUTH | VN_CREATE_NOINHERIT | VN_CREATE_NOLABEL, 0, NULL, context); if (error) { error = ENOATTR; } else { - xvp = nd.ni_vp; + xvp = nd->ni_vp; } } - nameidone(&nd); + nameidone(nd); if (dvp != vp) { vnode_put(dvp); /* drop iocount from LOCKPARENT request above */ } @@ -2562,15 +2581,15 @@ lookup: goto out; } } else { - if ((error = namei(&nd))) { - nd.ni_dvp = NULLVP; + if ((error = namei(nd))) { + nd->ni_dvp = NULLVP; error = ENOATTR; goto out; } - xvp = nd.ni_vp; - nameidone(&nd); + xvp = nd->ni_vp; + nameidone(nd); } - nd.ni_dvp = NULLVP; + nd->ni_dvp = NULLVP; if (xvp->v_type != VREG) { error = ENOATTR; @@ -2579,14 +2598,14 @@ lookup: /* * Owners must match. */ - VATTR_INIT(&va); - VATTR_WANTED(&va, va_uid); - if (VNOP_GETATTR(vp, &va, context) == 0 && VATTR_IS_SUPPORTED(&va, va_uid)) { - uid_t owner = va.va_uid; - - VATTR_INIT(&va); - VATTR_WANTED(&va, va_uid); - if (VNOP_GETATTR(xvp, &va, context) == 0 && (owner != va.va_uid)) { + VATTR_INIT(va); + VATTR_WANTED(va, va_uid); + if (VNOP_GETATTR(vp, va, context) == 0 && VATTR_IS_SUPPORTED(va, va_uid)) { + uid_t owner = va->va_uid; + + VATTR_INIT(va); + VATTR_WANTED(va, va_uid); + if (VNOP_GETATTR(xvp, va, context) == 0 && (owner != va->va_uid)) { error = ENOATTR; /* don't use this "._" file */ goto out; } @@ -2605,23 +2624,23 @@ lookup: /* If create was requested, make sure file header exists. */ if (fileflags & O_CREAT) { - VATTR_INIT(&va); - VATTR_WANTED(&va, va_data_size); - VATTR_WANTED(&va, va_fileid); - VATTR_WANTED(&va, va_nlink); - if ((error = vnode_getattr(xvp, &va, context)) != 0) { + VATTR_INIT(va); + VATTR_WANTED(va, va_data_size); + VATTR_WANTED(va, va_fileid); + VATTR_WANTED(va, va_nlink); + if ((error = vnode_getattr(xvp, va, context)) != 0) { error = EPERM; goto out; } /* If the file is empty then add a default header. */ - if (va.va_data_size == 0) { + if (va->va_data_size == 0) { /* Don't adopt hard-linked "._" files. */ - if (VATTR_IS_SUPPORTED(&va, va_nlink) && va.va_nlink > 1) { + if (VATTR_IS_SUPPORTED(va, va_nlink) && va->va_nlink > 1) { error = EPERM; goto out; } - if ((error = create_xattrfile(xvp, (u_int32_t)va.va_fileid, context))) { + if ((error = create_xattrfile(xvp, (u_int32_t)va->va_fileid, context))) { goto out; } } @@ -2659,6 +2678,8 @@ out: } } /* Release resources after error-handling */ + kheap_free(KHEAP_TEMP, nd, sizeof(struct nameidata)); + kheap_free(KHEAP_TEMP, va, sizeof(struct vnode_attr)); if (dvp && (dvp != vp)) { vnode_put(dvp); } @@ -2666,7 +2687,7 @@ out: vnode_putname(basename); } if (filename && filename != &smallname[0]) { - FREE(filename, M_TEMP); + kheap_free(KHEAP_TEMP, filename, len); } *xvpp = xvp; /* return a referenced vnode */ @@ -2697,22 +2718,18 @@ remove_xattrfile(vnode_t xvp, vfs_context_t context) int pathlen; int error = 0; - MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (path == NULL) { - return ENOMEM; - } - + path = zalloc(ZV_NAMEI); pathlen = MAXPATHLEN; error = vn_getpath(xvp, path, &pathlen); if (error) { - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, path); return error; } NDINIT(&nd, DELETE, OP_UNLINK, LOCKPARENT | NOFOLLOW | DONOTAUTH, UIO_SYSSPACE, CAST_USER_ADDR_T(path), context); error = namei(&nd); - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, path); if (error) { return error; } @@ -2753,7 +2770,7 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte void * buffer = NULL; apple_double_header_t *filehdr; struct vnode_attr va; - size_t iosize; + size_t iosize = 0; int i; int error; @@ -2780,7 +2797,7 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte goto bail; } ainfop->iosize = iosize; - MALLOC(buffer, void *, iosize, M_TEMP, M_WAITOK); + buffer = kheap_alloc(KHEAP_DATA_BUFFERS, iosize, Z_WAITOK); if (buffer == NULL) { error = ENOMEM; goto bail; @@ -2990,9 +3007,7 @@ bail: if (auio != NULL) { uio_free(auio); } - if (buffer != NULL) { - FREE(buffer, M_TEMP); - } + kheap_free(KHEAP_DATA_BUFFERS, buffer, iosize); return error; } @@ -3007,7 +3022,7 @@ create_xattrfile(vnode_t xvp, u_int32_t fileid, vfs_context_t context) int rsrcforksize; int error; - MALLOC(buffer, void *, ATTR_BUF_SIZE, M_TEMP, M_WAITOK); + buffer = kheap_alloc(KHEAP_TEMP, ATTR_BUF_SIZE, Z_WAITOK); bzero(buffer, ATTR_BUF_SIZE); xah = (attr_header_t *)buffer; @@ -3046,7 +3061,7 @@ create_xattrfile(vnode_t xvp, u_int32_t fileid, vfs_context_t context) } uio_free(auio); - FREE(buffer, M_TEMP); + kheap_free(KHEAP_TEMP, buffer, ATTR_BUF_SIZE); return error; } @@ -3070,7 +3085,7 @@ init_empty_resource_fork(rsrcfork_header_t * rsrcforkhdr) static void rel_xattrinfo(attr_info_t *ainfop) { - FREE(ainfop->filehdr, M_TEMP); + kheap_free_addr(KHEAP_DATA_BUFFERS, ainfop->filehdr); bzero(ainfop, sizeof(attr_info_t)); } @@ -3210,36 +3225,35 @@ check_and_swap_attrhdr(attr_header_t *ah, attr_info_t *ainfop) } /* Make sure the variable-length name fits (+1 is for NUL terminator) */ - /* TODO: Make sure namelen matches strnlen(name,namelen+1)? */ if (&ae->name[ae->namelen + 1] > buf_end) { return EINVAL; } + /* Make sure that namelen is matching name's real length, namelen included NUL */ + if (strnlen((const char *)ae->name, ae->namelen) != ae->namelen - 1) { + return EINVAL; + } + + /* Swap the attribute entry fields */ ae->offset = SWAP32(ae->offset); ae->length = SWAP32(ae->length); ae->flags = SWAP16(ae->flags); - /* Make sure the attribute content fits. */ + /* Make sure the attribute content fits and points to the data part */ end = ae->offset + ae->length; if (end < ae->offset || end > ah->total_size) { return EINVAL; } + /* Make sure entry points to data section and not header */ + if (ae->offset < ah->data_start) { + return EINVAL; + } + ae = ATTR_NEXT(ae); } - /* - * TODO: Make sure the contents of attributes don't overlap the header - * and don't overlap each other. The hard part is that we don't know - * what the actual header size is until we have looped over all of the - * variable-sized attribute entries. - * - * XXX Is there any guarantee that attribute entries are stored in - * XXX order sorted by the contents' file offset? If so, that would - * XXX make the pairwise overlap check much easier. - */ - return 0; } @@ -3276,14 +3290,14 @@ shift_data_down(vnode_t xvp, off_t start, size_t len, off_t delta, vfs_context_t } for (pos = start + len - chunk; pos >= start; pos -= chunk) { - ret = vn_rdwr(UIO_READ, xvp, buff, chunk, pos, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); + ret = vn_rdwr(UIO_READ, xvp, buff, (int)chunk, pos, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); if (iolen != 0) { printf("xattr:shift_data: error reading data @ %lld (read %d of %lu) (%d)\n", pos, ret, chunk, ret); break; } - ret = vn_rdwr(UIO_WRITE, xvp, buff, chunk, pos + delta, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); + ret = vn_rdwr(UIO_WRITE, xvp, buff, (int)chunk, pos + delta, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); if (iolen != 0) { printf("xattr:shift_data: error writing data @ %lld (wrote %d of %lu) (%d)\n", pos + delta, ret, chunk, ret); @@ -3331,14 +3345,14 @@ shift_data_up(vnode_t xvp, off_t start, size_t len, off_t delta, vfs_context_t c } for (pos = start; pos < end; pos += chunk) { - ret = vn_rdwr(UIO_READ, xvp, buff, chunk, pos, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); + ret = vn_rdwr(UIO_READ, xvp, buff, (int)chunk, pos, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); if (iolen != 0) { printf("xattr:shift_data: error reading data @ %lld (read %d of %lu) (%d)\n", pos, ret, chunk, ret); break; } - ret = vn_rdwr(UIO_WRITE, xvp, buff, chunk, pos - delta, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); + ret = vn_rdwr(UIO_WRITE, xvp, buff, (int)chunk, pos - delta, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); if (iolen != 0) { printf("xattr:shift_data: error writing data @ %lld (wrote %d of %lu) (%d)\n", pos + delta, ret, chunk, ret); diff --git a/bsd/vfs/vnode_if.c b/bsd/vfs/vnode_if.c index a29e14f24..5331056f3 100644 --- a/bsd/vfs/vnode_if.c +++ b/bsd/vfs/vnode_if.c @@ -1193,6 +1193,23 @@ struct vnodeop_desc vnop_bwrite_desc = { NULL }; +int vnop_verify_vp_offsets[] = { + VOPARG_OFFSETOF(struct vnop_verify_args, a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vnop_verify_desc = { + .vdesc_offset = 0, + .vdesc_name = "vnop_verify", + .vdesc_flags = 0, + .vdesc_vp_offsets = vnop_verify_vp_offsets, + .vdesc_vpp_offset = VDESC_NO_OFFSET, + .vdesc_cred_offset = VDESC_NO_OFFSET, + .vdesc_proc_offset = VDESC_NO_OFFSET, + .vdesc_componentname_offset = VDESC_NO_OFFSET, + .vdesc_context_offset = VOPARG_OFFSETOF(struct vnop_verify_args, a_context), + .vdesc_transports = NULL +}; + /* End of special cases. */ struct vnodeop_desc *vfs_op_descs[] = { @@ -1268,5 +1285,6 @@ struct vnodeop_desc *vfs_op_descs[] = { &vnop_getnamedstream_desc, &vnop_makenamedstream_desc, &vnop_removenamedstream_desc, + &vnop_verify_desc, NULL }; diff --git a/bsd/vm/dp_backing_file.c b/bsd/vm/dp_backing_file.c index d932aa153..7f1c22dc0 100644 --- a/bsd/vm/dp_backing_file.c +++ b/bsd/vm/dp_backing_file.c @@ -60,7 +60,6 @@ #include #include #include -#include #include #include diff --git a/bsd/vm/vm_compressor_backing_file.c b/bsd/vm/vm_compressor_backing_file.c index b908529dc..5f4fb755e 100644 --- a/bsd/vm/vm_compressor_backing_file.c +++ b/bsd/vm/vm_compressor_backing_file.c @@ -193,7 +193,7 @@ int vm_swapfile_io(vnode_t vp, uint64_t offset, uint64_t start, int npages, int flags, void *upl_iodone) { int error = 0; - uint64_t io_size = npages * PAGE_SIZE_64; + upl_size_t io_size = (upl_size_t) (npages * PAGE_SIZE_64); #if 1 kern_return_t kr = KERN_SUCCESS; upl_t upl = NULL; @@ -240,7 +240,7 @@ vm_swapfile_io(vnode_t vp, uint64_t offset, uint64_t start, int npages, int flag &error); if (error) { #if DEBUG - printf("vm_swapfile_io: vnode_pagein failed with %d (vp: %p, offset: 0x%llx, size:%llu)\n", error, vp, offset, io_size); + printf("vm_swapfile_io: vnode_pagein failed with %d (vp: %p, offset: 0x%llx, size:%u)\n", error, vp, offset, io_size); #else /* DEBUG */ printf("vm_swapfile_io: vnode_pagein failed with %d.\n", error); #endif /* DEBUG */ @@ -257,12 +257,13 @@ vm_swapfile_io(vnode_t vp, uint64_t offset, uint64_t start, int npages, int flag &error); if (error) { #if DEBUG - printf("vm_swapfile_io: vnode_pageout failed with %d (vp: %p, offset: 0x%llx, size:%llu)\n", error, vp, offset, io_size); + printf("vm_swapfile_io: vnode_pageout failed with %d (vp: %p, offset: 0x%llx, size:%u)\n", error, vp, offset, io_size); #else /* DEBUG */ printf("vm_swapfile_io: vnode_pageout failed with %d.\n", error); #endif /* DEBUG */ } } + return error; #else /* 1 */ @@ -312,7 +313,8 @@ vnode_trim_list(vnode_t vp, struct trim_list *tl, boolean_t route_only) devvp = vp->v_mount->mnt_devvp; blocksize = vp->v_mount->mnt_devblocksize; - extents = kalloc(sizeof(dk_extent_t) * MAX_BATCH_TO_TRIM); + extents = kheap_alloc(KHEAP_TEMP, + sizeof(dk_extent_t) * MAX_BATCH_TO_TRIM, Z_WAITOK); if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_CSUNMAP_SUPPORTED) { memset(&cs_unmap, 0, sizeof(_dk_cs_unmap_t)); @@ -390,7 +392,7 @@ vnode_trim_list(vnode_t vp, struct trim_list *tl, boolean_t route_only) } } trim_exit: - kfree(extents, sizeof(dk_extent_t) * MAX_BATCH_TO_TRIM); + kheap_free(KHEAP_TEMP, extents, sizeof(dk_extent_t) * MAX_BATCH_TO_TRIM); return error; } diff --git a/bsd/vm/vm_unix.c b/bsd/vm/vm_unix.c index b9626cc5e..9d8c38209 100644 --- a/bsd/vm/vm_unix.c +++ b/bsd/vm/vm_unix.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -65,6 +66,7 @@ #include #include #include +#include #include #include #include @@ -79,6 +81,7 @@ #include #include #include +#include #if NECP #include #endif /* NECP */ @@ -99,19 +102,19 @@ #include #include +#include #if CONFIG_MACF #include #endif +#include + #if CONFIG_CSR #include #endif /* CONFIG_CSR */ #include -int _shared_region_map_and_slide(struct proc*, int, unsigned int, struct shared_file_mapping_np*, uint32_t, user_addr_t, user_addr_t); -int shared_region_copyin_mappings(struct proc*, user_addr_t, unsigned int, struct shared_file_mapping_np *); - #if VM_MAP_DEBUG_APPLE_PROTECT SYSCTL_INT(_vm, OID_AUTO, map_debug_apple_protect, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_map_debug_apple_protect, 0, ""); #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ @@ -150,6 +153,9 @@ SYSCTL_PROC(_vm, OID_AUTO, kmem_alloc_contig, CTLTYPE_INT | CTLFLAG_WR | CTLFLAG extern int vm_region_footprint; SYSCTL_INT(_vm, OID_AUTO, region_footprint, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, &vm_region_footprint, 0, ""); + +#endif /* DEVELOPMENT || DEBUG */ + static int sysctl_vm_self_region_footprint SYSCTL_HANDLER_ARGS { @@ -176,25 +182,53 @@ sysctl_vm_self_region_footprint SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_vm, OID_AUTO, self_region_footprint, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, &sysctl_vm_self_region_footprint, "I", ""); -#endif /* DEVELOPMENT || DEBUG */ +static int +sysctl_vm_self_region_page_size SYSCTL_HANDLER_ARGS +{ +#pragma unused(arg1, arg2, oidp) + int error = 0; + int value; + + value = (1 << thread_self_region_page_shift()); + error = SYSCTL_OUT(req, &value, sizeof(int)); + if (error) { + return error; + } + if (!req->newptr) { + return 0; + } + + error = SYSCTL_IN(req, &value, sizeof(int)); + if (error) { + return error; + } + + if (value != 0 && value != 4096 && value != 16384) { + return EINVAL; + } + +#if !__ARM_MIXED_PAGE_SIZE__ + if (value != vm_map_page_size(current_map())) { + return EINVAL; + } +#endif /* !__ARM_MIXED_PAGE_SIZE__ */ + + thread_self_region_page_shift_set(bit_first(value)); + return 0; +} +SYSCTL_PROC(_vm, OID_AUTO, self_region_page_size, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, &sysctl_vm_self_region_page_size, "I", ""); -#if CONFIG_EMBEDDED #if DEVELOPMENT || DEBUG extern int panic_on_unsigned_execute; SYSCTL_INT(_vm, OID_AUTO, panic_on_unsigned_execute, CTLFLAG_RW | CTLFLAG_LOCKED, &panic_on_unsigned_execute, 0, ""); #endif /* DEVELOPMENT || DEBUG */ -extern int log_executable_mem_entry; extern int cs_executable_create_upl; -extern int cs_executable_mem_entry; extern int cs_executable_wire; -SYSCTL_INT(_vm, OID_AUTO, log_executable_mem_entry, CTLFLAG_RD | CTLFLAG_LOCKED, &log_executable_mem_entry, 0, ""); SYSCTL_INT(_vm, OID_AUTO, cs_executable_create_upl, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_create_upl, 0, ""); -SYSCTL_INT(_vm, OID_AUTO, cs_executable_mem_entry, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_mem_entry, 0, ""); SYSCTL_INT(_vm, OID_AUTO, cs_executable_wire, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_executable_wire, 0, ""); -#endif /* CONFIG_EMBEDDED */ #if DEVELOPMENT || DEBUG extern int radar_20146450; @@ -235,6 +269,10 @@ SYSCTL_UINT(_vm, OID_AUTO, kernel_pte_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &inuse #if DEVELOPMENT || DEBUG extern unsigned long pmap_asid_flushes; SYSCTL_ULONG(_vm, OID_AUTO, pmap_asid_flushes, CTLFLAG_RD | CTLFLAG_LOCKED, &pmap_asid_flushes, ""); +extern unsigned long pmap_asid_hits; +SYSCTL_ULONG(_vm, OID_AUTO, pmap_asid_hits, CTLFLAG_RD | CTLFLAG_LOCKED, &pmap_asid_hits, ""); +extern unsigned long pmap_asid_misses; +SYSCTL_ULONG(_vm, OID_AUTO, pmap_asid_misses, CTLFLAG_RD | CTLFLAG_LOCKED, &pmap_asid_misses, ""); #endif #endif /* __arm__ || __arm64__ */ @@ -328,13 +366,22 @@ int shared_region_unnest_log_count_threshold = 5; * Shared cache path enforcement. */ -#ifndef CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX + +#if defined (__x86_64__) static int scdir_enforce = 1; -static char scdir_path[] = "/var/db/dyld/"; -#else +#else /* defined (__x86_64__) */ +static int scdir_enforce = 0; /* AOT caches live elsewhere */ +#endif /* defined (__x86_64__) */ + +static char scdir_path[] = "/System/Library/dyld/"; + +#else /* XNU_TARGET_OS_OSX */ + static int scdir_enforce = 0; static char scdir_path[] = "/System/Library/Caches/com.apple.dyld/"; -#endif + +#endif /* XNU_TARGET_OS_OSX */ #ifndef SECURE_KERNEL static int sysctl_scdir_enforce SYSCTL_HANDLER_ARGS @@ -473,7 +520,7 @@ vsunlock( for (vaddr = vm_map_trunc_page(addr, PAGE_MASK); vaddr < vm_map_round_page(addr + len, PAGE_MASK); vaddr += PAGE_SIZE) { - paddr = pmap_extract(pmap, vaddr); + paddr = pmap_find_phys(pmap, vaddr); pg = PHYS_TO_VM_PAGE(paddr); vm_page_set_modified(pg); } @@ -676,7 +723,7 @@ pid_for_task( AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK); AUDIT_ARG(mach_port1, t); - t1 = port_name_to_task_inspect(t); + t1 = port_name_to_task_name(t); if (t1 == TASK_NULL) { err = KERN_FAILURE; @@ -827,8 +874,10 @@ task_for_pid( task_t task = TASK_NULL; mach_port_name_t tret = MACH_PORT_NULL; ipc_port_t tfpport = MACH_PORT_NULL; - void * sright; - int error = 0; + void * sright = NULL; + int error = 0; + boolean_t is_current_proc = FALSE; + struct proc_ident pident = {0}; AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID); AUDIT_ARG(pid, pid); @@ -854,6 +903,8 @@ task_for_pid( error = KERN_FAILURE; goto tfpout; } + pident = proc_ident(p); + is_current_proc = (p == current_proc()); #if CONFIG_AUDIT AUDIT_ARG(process, p); @@ -869,21 +920,27 @@ task_for_pid( goto tfpout; } + /* + * Grab a task reference and drop the proc reference as the proc ref + * shouldn't be held accross upcalls. + */ + task = p->task; + task_reference(task); + + proc_rele(p); + p = PROC_NULL; + #if CONFIG_MACF - error = mac_proc_check_get_task(kauth_cred_get(), p); + error = mac_proc_check_get_task(kauth_cred_get(), &pident); if (error) { error = KERN_FAILURE; goto tfpout; } #endif - /* Grab a task reference since the proc ref might be dropped if an upcall to task access server is made */ - task = p->task; - task_reference(task); - /* If we aren't root and target's task access port is set... */ if (!kauth_cred_issuser(kauth_cred_get()) && - p != current_proc() && + !is_current_proc && (task_get_task_access_port(task, &tfpport) == 0) && (tfpport != IPC_PORT_NULL)) { if (tfpport == IPC_PORT_DEAD) { @@ -891,14 +948,6 @@ task_for_pid( goto tfpout; } - /* - * Drop the proc_find proc ref before making an upcall - * to taskgated, since holding a proc_find - * ref while making an upcall can cause deadlock. - */ - proc_rele(p); - p = PROC_NULL; - /* Call up to the task access server */ error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid); @@ -1000,15 +1049,22 @@ task_name_for_pid( || ((kauth_cred_getuid(target_cred) == kauth_cred_getuid(kauth_cred_get())) && ((kauth_cred_getruid(target_cred) == kauth_getruid()))))) { if (p->task != TASK_NULL) { + struct proc_ident pident = proc_ident(p); + + task_t task = p->task; + task_reference(p->task); + proc_rele(p); + p = PROC_NULL; #if CONFIG_MACF - error = mac_proc_check_get_task_name(kauth_cred_get(), p); + error = mac_proc_check_get_task_name(kauth_cred_get(), &pident); if (error) { - task_deallocate(p->task); + task_deallocate(task); goto noperm; } #endif - sright = (void *)convert_task_name_to_port(p->task); + sright = (void *)convert_task_name_to_port(task); + task = NULL; tret = ipc_port_copyout_send(sright, get_task_ipcspace(current_task())); } else { @@ -1041,6 +1097,256 @@ tnfpout: return error; } +/* + * Routine: task_inspect_for_pid + * Purpose: + * Get the task inspect port for another "process", named by its + * process ID on the same host as "target_task". + */ +int +task_inspect_for_pid(struct proc *p __unused, struct task_inspect_for_pid_args *args, int *ret) +{ + mach_port_name_t target_tport = args->target_tport; + int pid = args->pid; + user_addr_t task_addr = args->t; + + proc_t proc = PROC_NULL; + task_t t1 = TASK_NULL; + task_inspect_t task_insp = TASK_INSPECT_NULL; + mach_port_name_t tret = MACH_PORT_NULL; + ipc_port_t tfpport = MACH_PORT_NULL; + int error = 0; + void *sright = NULL; + boolean_t is_current_proc = FALSE; + struct proc_ident pident = {0}; + + /* Disallow inspect port for kernel_task */ + if (pid == 0) { + (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t)); + return EPERM; + } + + t1 = port_name_to_task(target_tport); + if (t1 == TASK_NULL) { + (void) copyout((char *) &t1, task_addr, sizeof(mach_port_name_t)); + return EINVAL; + } + + proc = proc_find(pid); + if (proc == PROC_NULL) { + error = ESRCH; + goto tifpout; + } + pident = proc_ident(proc); + is_current_proc = (proc == current_proc()); + + if (!(task_for_pid_posix_check(proc))) { + error = EPERM; + goto tifpout; + } + + task_insp = proc->task; + if (task_insp == TASK_INSPECT_NULL) { + goto tifpout; + } + + /* + * Grab a task reference and drop the proc reference before making any upcalls. + */ + task_reference(task_insp); + + proc_rele(proc); + proc = PROC_NULL; + + /* + * For now, it performs the same set of permission checks as task_for_pid. This + * will be addressed in rdar://problem/53478660 + */ +#if CONFIG_MACF + error = mac_proc_check_get_task(kauth_cred_get(), &pident); + if (error) { + error = EPERM; + goto tifpout; + } +#endif + + /* If we aren't root and target's task access port is set... */ + if (!kauth_cred_issuser(kauth_cred_get()) && + !is_current_proc && + (task_get_task_access_port(task_insp, &tfpport) == 0) && + (tfpport != IPC_PORT_NULL)) { + if (tfpport == IPC_PORT_DEAD) { + error = EACCES; + goto tifpout; + } + + + /* Call up to the task access server */ + error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid); + + if (error != MACH_MSG_SUCCESS) { + if (error == MACH_RCV_INTERRUPTED) { + error = EINTR; + } else { + error = EPERM; + } + goto tifpout; + } + } + + /* Check if the task has been corpsified */ + if (is_corpsetask(task_insp)) { + error = EACCES; + goto tifpout; + } + + /* could be IP_NULL, consumes a ref */ + sright = (void*) convert_task_inspect_to_port(task_insp); + task_insp = TASK_INSPECT_NULL; + tret = ipc_port_copyout_send(sright, get_task_ipcspace(current_task())); + +tifpout: + task_deallocate(t1); + (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t)); + if (proc != PROC_NULL) { + proc_rele(proc); + } + if (tfpport != IPC_PORT_NULL) { + ipc_port_release_send(tfpport); + } + if (task_insp != TASK_INSPECT_NULL) { + task_deallocate(task_insp); + } + + *ret = error; + return error; +} + +/* + * Routine: task_read_for_pid + * Purpose: + * Get the task read port for another "process", named by its + * process ID on the same host as "target_task". + */ +int +task_read_for_pid(struct proc *p __unused, struct task_read_for_pid_args *args, int *ret) +{ + mach_port_name_t target_tport = args->target_tport; + int pid = args->pid; + user_addr_t task_addr = args->t; + + proc_t proc = PROC_NULL; + task_t t1 = TASK_NULL; + task_read_t task_read = TASK_READ_NULL; + mach_port_name_t tret = MACH_PORT_NULL; + ipc_port_t tfpport = MACH_PORT_NULL; + int error = 0; + void *sright = NULL; + boolean_t is_current_proc = FALSE; + struct proc_ident pident = {0}; + + /* Disallow read port for kernel_task */ + if (pid == 0) { + (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t)); + return EPERM; + } + + t1 = port_name_to_task(target_tport); + if (t1 == TASK_NULL) { + (void) copyout((char *) &t1, task_addr, sizeof(mach_port_name_t)); + return EINVAL; + } + + proc = proc_find(pid); + if (proc == PROC_NULL) { + error = ESRCH; + goto trfpout; + } + pident = proc_ident(proc); + is_current_proc = (proc == current_proc()); + + if (!(task_for_pid_posix_check(proc))) { + error = EPERM; + goto trfpout; + } + + task_read = proc->task; + if (task_read == TASK_INSPECT_NULL) { + goto trfpout; + } + + /* + * Grab a task reference and drop the proc reference before making any upcalls. + */ + task_reference(task_read); + + proc_rele(proc); + proc = PROC_NULL; + + /* + * For now, it performs the same set of permission checks as task_for_pid. This + * will be addressed in rdar://problem/53478660 + */ +#if CONFIG_MACF + error = mac_proc_check_get_task(kauth_cred_get(), &pident); + if (error) { + error = EPERM; + goto trfpout; + } +#endif + + /* If we aren't root and target's task access port is set... */ + if (!kauth_cred_issuser(kauth_cred_get()) && + !is_current_proc && + (task_get_task_access_port(task_read, &tfpport) == 0) && + (tfpport != IPC_PORT_NULL)) { + if (tfpport == IPC_PORT_DEAD) { + error = EACCES; + goto trfpout; + } + + + /* Call up to the task access server */ + error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid); + + if (error != MACH_MSG_SUCCESS) { + if (error == MACH_RCV_INTERRUPTED) { + error = EINTR; + } else { + error = EPERM; + } + goto trfpout; + } + } + + /* Check if the task has been corpsified */ + if (is_corpsetask(task_read)) { + error = EACCES; + goto trfpout; + } + + /* could be IP_NULL, consumes a ref */ + sright = (void*) convert_task_read_to_port(task_read); + task_read = TASK_READ_NULL; + tret = ipc_port_copyout_send(sright, get_task_ipcspace(current_task())); + +trfpout: + task_deallocate(t1); + (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t)); + if (proc != PROC_NULL) { + proc_rele(proc); + } + if (tfpport != IPC_PORT_NULL) { + ipc_port_release_send(tfpport); + } + if (task_read != TASK_READ_NULL) { + task_deallocate(task_read); + } + + *ret = error; + return error; +} + kern_return_t pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret) { @@ -1050,14 +1356,6 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret) int error = 0; mach_port_t tfpport = MACH_PORT_NULL; -#if CONFIG_MACF - error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_SUSPEND); - if (error) { - error = EPERM; - goto out; - } -#endif - if (pid == 0) { error = EPERM; goto out; @@ -1075,6 +1373,14 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret) goto out; } +#if CONFIG_MACF + error = mac_proc_check_suspend_resume(targetproc, MAC_PROC_CHECK_SUSPEND); + if (error) { + error = EPERM; + goto out; + } +#endif + target = targetproc->task; #ifndef CONFIG_EMBEDDED if (target != TASK_NULL) { @@ -1145,7 +1451,8 @@ debug_control_port_for_pid(struct debug_control_port_for_pid_args *args) ipc_port_t tfpport = MACH_PORT_NULL; ipc_port_t sright = NULL; int error = 0; - + boolean_t is_current_proc = FALSE; + struct proc_ident pident = {0}; AUDIT_MACH_SYSCALL_ENTER(AUE_DBGPORTFORPID); AUDIT_ARG(pid, pid); @@ -1165,12 +1472,13 @@ debug_control_port_for_pid(struct debug_control_port_for_pid_args *args) return KERN_FAILURE; } - p = proc_find(pid); if (p == PROC_NULL) { error = KERN_FAILURE; goto tfpout; } + pident = proc_ident(p); + is_current_proc = (p == current_proc()); #if CONFIG_AUDIT AUDIT_ARG(process, p); @@ -1186,14 +1494,18 @@ debug_control_port_for_pid(struct debug_control_port_for_pid_args *args) goto tfpout; } - /* Grab a task reference since the proc ref might be dropped if an upcall to task access server is made */ + /* + * Grab a task reference and drop the proc reference before making any upcalls. + */ task = p->task; task_reference(task); + proc_rele(p); + p = PROC_NULL; if (!IOTaskHasEntitlement(current_task(), DEBUG_PORT_ENTITLEMENT)) { #if CONFIG_MACF - error = mac_proc_check_get_task(kauth_cred_get(), p); + error = mac_proc_check_get_task(kauth_cred_get(), &pident); if (error) { error = KERN_FAILURE; goto tfpout; @@ -1202,7 +1514,7 @@ debug_control_port_for_pid(struct debug_control_port_for_pid_args *args) /* If we aren't root and target's task access port is set... */ if (!kauth_cred_issuser(kauth_cred_get()) && - p != current_proc() && + !is_current_proc && (task_get_task_access_port(task, &tfpport) == 0) && (tfpport != IPC_PORT_NULL)) { if (tfpport == IPC_PORT_DEAD) { @@ -1210,13 +1522,6 @@ debug_control_port_for_pid(struct debug_control_port_for_pid_args *args) goto tfpout; } - /* - * Drop the proc_find proc ref before making an upcall - * to taskgated, since holding a proc_find - * ref while making an upcall can cause deadlock. - */ - proc_rele(p); - p = PROC_NULL; /* Call up to the task access server */ error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid); @@ -1276,14 +1581,6 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret) int error = 0; mach_port_t tfpport = MACH_PORT_NULL; -#if CONFIG_MACF - error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_RESUME); - if (error) { - error = EPERM; - goto out; - } -#endif - if (pid == 0) { error = EPERM; goto out; @@ -1301,6 +1598,14 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret) goto out; } +#if CONFIG_MACF + error = mac_proc_check_suspend_resume(targetproc, MAC_PROC_CHECK_RESUME); + if (error) { + error = EPERM; + goto out; + } +#endif + target = targetproc->task; #ifndef CONFIG_EMBEDDED if (target != TASK_NULL) { @@ -1329,11 +1634,11 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret) } #endif -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX #if SOCKETS resume_proc_sockets(targetproc); #endif /* SOCKETS */ -#endif /* CONFIG_EMBEDDED */ +#endif /* !XNU_TARGET_OS_OSX */ task_reference(target); @@ -1387,14 +1692,6 @@ pid_hibernate(struct proc *p __unused, struct pid_hibernate_args *args, int *ret #pragma unused(pid) #else -#if CONFIG_MACF - error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_HIBERNATE); - if (error) { - error = EPERM; - goto out; - } -#endif - /* * If a pid has been provided, we obtain the process handle and call task_for_pid_posix_check(). */ @@ -1413,6 +1710,15 @@ pid_hibernate(struct proc *p __unused, struct pid_hibernate_args *args, int *ret } } +#if CONFIG_MACF + //Note that targetproc may be null + error = mac_proc_check_suspend_resume(targetproc, MAC_PROC_CHECK_HIBERNATE); + if (error) { + error = EPERM; + goto out; + } +#endif + if (pid == -2) { vm_pageout_anonymous_pages(); } else if (pid == -1) { @@ -1437,8 +1743,7 @@ out: int networking_memstatus_callout(proc_t p, uint32_t status) { - struct filedesc *fdp; - int i; + struct fileproc *fp; /* * proc list lock NOT held @@ -1449,19 +1754,13 @@ networking_memstatus_callout(proc_t p, uint32_t status) LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED); proc_fdlock(p); - fdp = p->p_fd; - for (i = 0; i < fdp->fd_nfiles; i++) { - struct fileproc *fp; - fp = fdp->fd_ofiles[i]; - if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { - continue; - } - switch (FILEGLOB_DTYPE(fp->f_fglob)) { + fdt_foreach(fp, p) { + switch (FILEGLOB_DTYPE(fp->fp_glob)) { #if NECP case DTYPE_NETPOLICY: necp_fd_memstatus(p, status, - (struct necp_fd_data *)fp->f_fglob->fg_data); + (struct necp_fd_data *)fp->fp_glob->fg_data); break; #endif /* NECP */ default: @@ -1480,27 +1779,20 @@ networking_defunct_callout(proc_t p, void *arg) struct pid_shutdown_sockets_args *args = arg; int pid = args->pid; int level = args->level; - struct filedesc *fdp; - int i; + struct fileproc *fp; proc_fdlock(p); - fdp = p->p_fd; - for (i = 0; i < fdp->fd_nfiles; i++) { - struct fileproc *fp = fdp->fd_ofiles[i]; - struct fileglob *fg; - if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { - continue; - } + fdt_foreach(fp, p) { + struct fileglob *fg = fp->fp_glob; - fg = fp->f_fglob; switch (FILEGLOB_DTYPE(fg)) { case DTYPE_SOCKET: { struct socket *so = (struct socket *)fg->fg_data; if (p->p_pid == pid || so->last_pid == pid || ((so->so_flags & SOF_DELEGATED) && so->e_pid == pid)) { /* Call networking stack with socket and level */ - (void) socket_defunct(p, so, level); + (void)socket_defunct(p, so, level); } break; } @@ -1537,14 +1829,6 @@ pid_shutdown_sockets(struct proc *p __unused, struct pid_shutdown_sockets_args * goto out; } -#if CONFIG_MACF - error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_SHUTDOWN_SOCKETS); - if (error) { - error = EPERM; - goto out; - } -#endif - targetproc = proc_find(pid); if (targetproc == PROC_NULL) { error = ESRCH; @@ -1557,6 +1841,14 @@ pid_shutdown_sockets(struct proc *p __unused, struct pid_shutdown_sockets_args * goto out; } +#if CONFIG_MACF + error = mac_proc_check_suspend_resume(targetproc, MAC_PROC_CHECK_SHUTDOWN_SOCKETS); + if (error) { + error = EPERM; + goto out; + } +#endif + proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS, networking_defunct_callout, args, NULL, NULL); @@ -1652,7 +1944,7 @@ shared_region_check_np( { vm_shared_region_t shared_region; mach_vm_offset_t start_address = 0; - int error; + int error = 0; kern_return_t kr; SHARED_REGION_TRACE_DEBUG( @@ -1665,16 +1957,27 @@ shared_region_check_np( shared_region = vm_shared_region_get(current_task()); if (shared_region != NULL) { /* retrieve address of its first mapping... */ - kr = vm_shared_region_start_address(shared_region, - &start_address); + kr = vm_shared_region_start_address(shared_region, &start_address); if (kr != KERN_SUCCESS) { error = ENOMEM; } else { +#if __has_feature(ptrauth_calls) + /* + * Remap any section of the shared library that + * has authenticated pointers into private memory. + */ + if (vm_shared_region_auth_remap(shared_region) != KERN_SUCCESS) { + error = ENOMEM; + } +#endif /* __has_feature(ptrauth_calls) */ + /* ... and give it to the caller */ - error = copyout(&start_address, - (user_addr_t) uap->start_address, - sizeof(start_address)); - if (error) { + if (error == 0) { + error = copyout(&start_address, + (user_addr_t) uap->start_address, + sizeof(start_address)); + } + if (error != 0) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] " "check_np(0x%llx) " @@ -1701,63 +2004,65 @@ shared_region_check_np( } -int -shared_region_copyin_mappings( - struct proc *p, - user_addr_t user_mappings, - unsigned int mappings_count, - struct shared_file_mapping_np *mappings) +static int +shared_region_copyin( + struct proc *p, + user_addr_t user_addr, + unsigned int count, + unsigned int element_size, + void *kernel_data) { int error = 0; - vm_size_t mappings_size = 0; + vm_size_t size = count * element_size; - /* get the list of mappings the caller wants us to establish */ - mappings_size = (vm_size_t) (mappings_count * sizeof(mappings[0])); - error = copyin(user_mappings, - mappings, - mappings_size); + error = copyin(user_addr, kernel_data, size); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(): " - "copyin(0x%llx, %d) failed (error=%d)\n", + "copyin(0x%llx, %ld) failed (error=%d)\n", (void *)VM_KERNEL_ADDRPERM(current_thread()), p->p_pid, p->p_comm, - (uint64_t)user_mappings, mappings_count, error)); + (uint64_t)user_addr, (long)size, error)); } return error; } + +#define _SR_FILE_MAPPINGS_MAX_FILES 2 + +/* forward declaration */ +__attribute__((noinline)) +static void shared_region_map_and_slide_cleanup( + struct proc *p, + uint32_t files_count, + struct _sr_file_mappings *sr_file_mappings, + struct vm_shared_region *shared_region, + struct vnode *scdir_vp); + /* - * shared_region_map_np() - * - * This system call is intended for dyld. - * - * dyld uses this to map a shared cache file into a shared region. - * This is usually done only the first time a shared cache is needed. - * Subsequent processes will just use the populated shared region without - * requiring any further setup. + * Setup part of _shared_region_map_and_slide(). + * It had to be broken out of _shared_region_map_and_slide() to + * prevent compiler inlining from blowing out the stack. */ -int -_shared_region_map_and_slide( - struct proc *p, - int fd, - uint32_t mappings_count, - struct shared_file_mapping_np *mappings, - uint32_t slide, - user_addr_t slide_start, - user_addr_t slide_size) +__attribute__((noinline)) +static int +shared_region_map_and_slide_setup( + struct proc *p, + uint32_t files_count, + struct shared_file_np *files, + uint32_t mappings_count, + struct shared_file_mapping_slide_np *mappings, + struct _sr_file_mappings **sr_file_mappings, + struct vm_shared_region **shared_region, + struct vnode **scdir_vp) { - int error; - kern_return_t kr; - struct fileproc *fp; - struct vnode *vp, *root_vp, *scdir_vp; + int error = 0; + struct _sr_file_mappings *srfmp; + uint32_t mappings_next; struct vnode_attr va; off_t fs; - memory_object_size_t file_size; #if CONFIG_MACF vm_prot_t maxprot = VM_PROT_ALL; #endif - memory_object_control_t file_control; - struct vm_shared_region *shared_region; uint32_t i; SHARED_REGION_TRACE_DEBUG( @@ -1765,322 +2070,503 @@ _shared_region_map_and_slide( (void *)VM_KERNEL_ADDRPERM(current_thread()), p->p_pid, p->p_comm)); - shared_region = NULL; - fp = NULL; - vp = NULL; - scdir_vp = NULL; - - /* get file structure from file descriptor */ - error = fp_lookup(p, fd, &fp, 0); - if (error) { - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map: " - "fd=%d lookup failed (error=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, fd, error)); + if (files_count > _SR_FILE_MAPPINGS_MAX_FILES) { + error = E2BIG; goto done; } - - /* make sure we're attempting to map a vnode */ - if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) { - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map: " - "fd=%d not a vnode (type=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - fd, FILEGLOB_DTYPE(fp->f_fglob))); + if (files_count == 0) { error = EINVAL; goto done; } - - /* we need at least read permission on the file */ - if (!(fp->f_fglob->fg_flag & FREAD)) { - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map: " - "fd=%d not readable\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, fd)); - error = EPERM; + *sr_file_mappings = kheap_alloc(KHEAP_TEMP, files_count * sizeof(struct _sr_file_mappings), Z_WAITOK); + if (*sr_file_mappings == NULL) { + error = ENOMEM; goto done; } + bzero(*sr_file_mappings, files_count * sizeof(struct _sr_file_mappings)); + mappings_next = 0; + for (i = 0; i < files_count; i++) { + srfmp = &(*sr_file_mappings)[i]; + srfmp->fd = files[i].sf_fd; + srfmp->mappings_count = files[i].sf_mappings_count; + srfmp->mappings = &mappings[mappings_next]; + mappings_next += srfmp->mappings_count; + if (mappings_next > mappings_count) { + error = EINVAL; + goto done; + } + srfmp->slide = files[i].sf_slide; + } - /* get vnode from file structure */ - error = vnode_getwithref((vnode_t) fp->f_fglob->fg_data); - if (error) { - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map: " - "fd=%d getwithref failed (error=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, fd, error)); - goto done; + if (scdir_enforce) { + /* get vnode for scdir_path */ + error = vnode_lookup(scdir_path, 0, scdir_vp, vfs_context_current()); + if (error) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)]: " + "vnode_lookup(%s) failed (error=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + scdir_path, error)); + goto done; + } } - vp = (struct vnode *) fp->f_fglob->fg_data; - /* make sure the vnode is a regular file */ - if (vp->v_type != VREG) { + /* get the process's shared region (setup in vm_map_exec()) */ + *shared_region = vm_shared_region_trim_and_get(current_task()); + if (*shared_region == NULL) { SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "not a file (type=%d)\n", + ("shared_region: %p [%d(%s)] map(): " + "no shared region\n", (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), - vp->v_name, vp->v_type)); + p->p_pid, p->p_comm)); error = EINVAL; goto done; } -#if CONFIG_MACF - /* pass in 0 for the offset argument because AMFI does not need the offset - * of the shared cache */ - error = mac_file_check_mmap(vfs_context_ucred(vfs_context_current()), - fp->f_fglob, VM_PROT_ALL, MAP_FILE, 0, &maxprot); - if (error) { - goto done; - } -#endif /* MAC */ + for (srfmp = &(*sr_file_mappings)[0]; + srfmp < &(*sr_file_mappings)[files_count]; + srfmp++) { + if (srfmp->mappings_count == 0) { + /* no mappings here... */ + continue; + } - /* The calling process cannot be chroot-ed. */ - root_vp = p->p_fd->fd_rdir; - if (root_vp == NULL) { - root_vp = rootvnode; - } else { - SHARED_REGION_TRACE_ERROR( - ("calling process [%d(%s)] is chroot-ed, permission denied\n", - p->p_pid, p->p_comm)); - error = EPERM; - goto done; - } + /* get file structure from file descriptor */ + error = fp_get_ftype(p, srfmp->fd, DTYPE_VNODE, EINVAL, &srfmp->fp); + if (error) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map: " + "fd=%d lookup failed (error=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, srfmp->fd, error)); + goto done; + } - /* The shared cache file must be owned by root */ - VATTR_INIT(&va); - VATTR_WANTED(&va, va_uid); - VATTR_WANTED(&va, va_flags); - error = vnode_getattr(vp, &va, vfs_context_current()); - if (error) { - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "vnode_getattr(%p) failed (error=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, - (void *)VM_KERNEL_ADDRPERM(vp), error)); - goto done; - } - if (va.va_uid != 0) { - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "owned by uid=%d instead of 0\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), - vp->v_name, va.va_uid)); - error = EPERM; - goto done; - } + /* we need at least read permission on the file */ + if (!(srfmp->fp->fp_glob->fg_flag & FREAD)) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map: " + "fd=%d not readable\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, srfmp->fd)); + error = EPERM; + goto done; + } -#if CONFIG_CSR - if (csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0 && - !(va.va_flags & SF_RESTRICTED)) { + /* get vnode from file structure */ + error = vnode_getwithref((vnode_t) srfmp->fp->fp_glob->fg_data); + if (error) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map: " + "fd=%d getwithref failed (error=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, srfmp->fd, error)); + goto done; + } + srfmp->vp = (struct vnode *) srfmp->fp->fp_glob->fg_data; + + /* make sure the vnode is a regular file */ + if (srfmp->vp->v_type != VREG) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "not a file (type=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name, srfmp->vp->v_type)); + error = EINVAL; + goto done; + } + +#if CONFIG_MACF + /* pass in 0 for the offset argument because AMFI does not need the offset + * of the shared cache */ + error = mac_file_check_mmap(vfs_context_ucred(vfs_context_current()), + srfmp->fp->fp_glob, VM_PROT_ALL, MAP_FILE, 0, &maxprot); + if (error) { + goto done; + } +#endif /* MAC */ + +#if XNU_TARGET_OS_OSX && defined(__arm64__) /* - * CSR is not configured in CSR_ALLOW_UNRESTRICTED_FS mode, and - * the shared cache file is NOT SIP-protected, so reject the - * mapping request + * Check if the shared cache is in the trust cache; + * if so, we can skip the root ownership check. */ - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map(%p:'%s'), " - "vnode is not SIP-protected. \n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, (void *)VM_KERNEL_ADDRPERM(vp), - vp->v_name)); - error = EPERM; - goto done; - } -#else - /* Devices without SIP/ROSP need to make sure that the shared cache is on the root volume. */ - if (vp->v_mount != root_vp->v_mount) { - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "not on process's root volume\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); - error = EPERM; - goto done; - } -#endif /* CONFIG_CSR */ +#if DEVELOPMENT || DEBUG + /* + * Skip both root ownership and trust cache check if + * enforcement is disabled. + */ + if (!cs_system_enforcement()) { + goto after_root_check; + } +#endif /* DEVELOPMENT || DEBUG */ + struct cs_blob *blob = csvnode_get_blob(srfmp->vp, 0); + if (blob == NULL) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "missing CS blob\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name)); + goto root_check; + } + const uint8_t *cdhash = csblob_get_cdhash(blob); + if (cdhash == NULL) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "missing cdhash\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name)); + goto root_check; + } + uint32_t result = pmap_lookup_in_static_trust_cache(cdhash); + boolean_t in_trust_cache = result & (TC_LOOKUP_FOUND << TC_LOOKUP_RESULT_SHIFT); + if (!in_trust_cache) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "not in trust cache\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name)); + goto root_check; + } + goto after_root_check; +root_check: +#endif /* XNU_TARGET_OS_OSX && defined(__arm64__) */ - if (scdir_enforce) { - /* get vnode for scdir_path */ - error = vnode_lookup(scdir_path, 0, &scdir_vp, vfs_context_current()); + /* The shared cache file must be owned by root */ + VATTR_INIT(&va); + VATTR_WANTED(&va, va_uid); + error = vnode_getattr(srfmp->vp, &va, vfs_context_current()); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "vnode_lookup(%s) failed (error=%d)\n", + "vnode_getattr(%p) failed (error=%d)\n", (void *)VM_KERNEL_ADDRPERM(current_thread()), p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, - scdir_path, error)); + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + error)); goto done; } - - /* ensure parent is scdir_vp */ - if (vnode_parent(vp) != scdir_vp) { + if (va.va_uid != 0) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "shared cache file not in %s\n", + "owned by uid=%d instead of 0\n", (void *)VM_KERNEL_ADDRPERM(current_thread()), p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), - vp->v_name, scdir_path)); + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name, va.va_uid)); error = EPERM; goto done; } - } - /* get vnode size */ - error = vnode_size(vp, &fs, vfs_context_current()); - if (error) { - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "vnode_size(%p) failed (error=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, - (void *)VM_KERNEL_ADDRPERM(vp), error)); - goto done; - } - file_size = fs; +#if XNU_TARGET_OS_OSX && defined(__arm64__) +after_root_check: +#endif /* XNU_TARGET_OS_OSX && defined(__arm64__) */ - /* get the file's memory object handle */ - file_control = ubc_getobject(vp, UBC_HOLDOBJECT); - if (file_control == MEMORY_OBJECT_CONTROL_NULL) { - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "no memory object\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); - error = EINVAL; - goto done; - } +#if CONFIG_CSR + if (csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0) { + VATTR_INIT(&va); + VATTR_WANTED(&va, va_flags); + error = vnode_getattr(srfmp->vp, &va, vfs_context_current()); + if (error) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "vnode_getattr(%p) failed (error=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + error)); + goto done; + } - /* check that the mappings are properly covered by code signatures */ - if (!cs_system_enforcement()) { - /* code signing is not enforced: no need to check */ - } else { - for (i = 0; i < mappings_count; i++) { - if (mappings[i].sfm_init_prot & VM_PROT_ZF) { - /* zero-filled mapping: not backed by the file */ - continue; + if (!(va.va_flags & SF_RESTRICTED)) { + /* + * CSR is not configured in CSR_ALLOW_UNRESTRICTED_FS mode, and + * the shared cache file is NOT SIP-protected, so reject the + * mapping request + */ + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'), " + "vnode is not SIP-protected. \n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name)); + error = EPERM; + goto done; } - if (ubc_cs_is_range_codesigned(vp, - mappings[i].sfm_file_offset, - mappings[i].sfm_size)) { - /* this mapping is fully covered by code signatures */ - continue; + } +#else /* CONFIG_CSR */ + /* Devices without SIP/ROSP need to make sure that the shared cache is on the root volume. */ + + struct vnode *root_vp = p->p_fd->fd_rdir; + if (root_vp == NULL) { + root_vp = rootvnode; + } + if (srfmp->vp->v_mount != root_vp->v_mount) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "not on process's root volume\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name)); + error = EPERM; + goto done; + } +#endif /* CONFIG_CSR */ + + if (scdir_enforce) { + /* ensure parent is scdir_vp */ + assert(*scdir_vp != NULL); + if (vnode_parent(srfmp->vp) != *scdir_vp) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "shared cache file not in %s\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name, scdir_path)); + error = EPERM; + goto done; } + } + + /* get vnode size */ + error = vnode_size(srfmp->vp, &fs, vfs_context_current()); + if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "mapping #%d/%d [0x%llx:0x%llx:0x%llx:0x%x:0x%x] " - "is not code-signed\n", + "vnode_size(%p) failed (error=%d)\n", (void *)VM_KERNEL_ADDRPERM(current_thread()), p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, - i, mappings_count, - mappings[i].sfm_address, - mappings[i].sfm_size, - mappings[i].sfm_file_offset, - mappings[i].sfm_max_prot, - mappings[i].sfm_init_prot)); + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), error)); + goto done; + } + srfmp->file_size = fs; + + /* get the file's memory object handle */ + srfmp->file_control = ubc_getobject(srfmp->vp, UBC_HOLDOBJECT); + if (srfmp->file_control == MEMORY_OBJECT_CONTROL_NULL) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "no memory object\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name)); error = EINVAL; goto done; } + + /* check that the mappings are properly covered by code signatures */ + if (!cs_system_enforcement()) { + /* code signing is not enforced: no need to check */ + } else { + for (i = 0; i < srfmp->mappings_count; i++) { + if (srfmp->mappings[i].sms_init_prot & VM_PROT_ZF) { + /* zero-filled mapping: not backed by the file */ + continue; + } + if (ubc_cs_is_range_codesigned(srfmp->vp, + srfmp->mappings[i].sms_file_offset, + srfmp->mappings[i].sms_size)) { + /* this mapping is fully covered by code signatures */ + continue; + } + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "mapping #%d/%d [0x%llx:0x%llx:0x%llx:0x%x:0x%x] " + "is not code-signed\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(srfmp->vp), + srfmp->vp->v_name, + i, srfmp->mappings_count, + srfmp->mappings[i].sms_address, + srfmp->mappings[i].sms_size, + srfmp->mappings[i].sms_file_offset, + srfmp->mappings[i].sms_max_prot, + srfmp->mappings[i].sms_init_prot)); + error = EINVAL; + goto done; + } + } } +done: + if (error != 0) { + shared_region_map_and_slide_cleanup(p, files_count, *sr_file_mappings, *shared_region, *scdir_vp); + *sr_file_mappings = NULL; + *shared_region = NULL; + *scdir_vp = NULL; + } + return error; +} - /* get the process's shared region (setup in vm_map_exec()) */ - shared_region = vm_shared_region_trim_and_get(current_task()); - if (shared_region == NULL) { - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "no shared region\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); - error = EINVAL; - goto done; +/* + * shared_region_map_np() + * + * This system call is intended for dyld. + * + * dyld uses this to map a shared cache file into a shared region. + * This is usually done only the first time a shared cache is needed. + * Subsequent processes will just use the populated shared region without + * requiring any further setup. + */ +static int +_shared_region_map_and_slide( + struct proc *p, + uint32_t files_count, + struct shared_file_np *files, + uint32_t mappings_count, + struct shared_file_mapping_slide_np *mappings) +{ + int error = 0; + kern_return_t kr = KERN_SUCCESS; + struct _sr_file_mappings *sr_file_mappings = NULL; + struct vnode *scdir_vp = NULL; + struct vm_shared_region *shared_region = NULL; + + /* + * Turn files, mappings into sr_file_mappings and other setup. + */ + error = shared_region_map_and_slide_setup(p, files_count, + files, mappings_count, mappings, + &sr_file_mappings, &shared_region, &scdir_vp); + if (error != 0) { + return error; } - /* map the file into that shared region's submap */ + /* map the file(s) into that shared region's submap */ kr = vm_shared_region_map_file(shared_region, - mappings_count, - mappings, - file_control, - file_size, (void *) p->p_fd->fd_rdir, - slide, - slide_start, - slide_size); + files_count, + sr_file_mappings); if (kr != KERN_SUCCESS) { - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "vm_shared_region_map_file() failed kr=0x%x\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, kr)); - switch (kr) { - case KERN_INVALID_ADDRESS: - error = EFAULT; - break; - case KERN_PROTECTION_FAILURE: - error = EPERM; - break; - case KERN_NO_SPACE: - error = ENOMEM; - break; - case KERN_FAILURE: - case KERN_INVALID_ARGUMENT: - default: - error = EINVAL; - break; - } - goto done; + SHARED_REGION_TRACE_ERROR(("shared_region: %p [%d(%s)] map(): " + "vm_shared_region_map_file() failed kr=0x%x\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, kr)); + } + + /* convert kern_return_t to errno */ + switch (kr) { + case KERN_SUCCESS: + error = 0; + break; + case KERN_INVALID_ADDRESS: + error = EFAULT; + break; + case KERN_PROTECTION_FAILURE: + error = EPERM; + break; + case KERN_NO_SPACE: + error = ENOMEM; + break; + case KERN_FAILURE: + case KERN_INVALID_ARGUMENT: + default: + error = EINVAL; + break; } - error = 0; + /* + * Mark that this process is now using split libraries. + */ + if (error == 0 && (p->p_flag & P_NOSHLIB)) { + OSBitAndAtomic(~((uint32_t)P_NOSHLIB), &p->p_flag); + } - vnode_lock_spin(vp); + shared_region_map_and_slide_cleanup(p, files_count, sr_file_mappings, shared_region, scdir_vp); - vp->v_flag |= VSHARED_DYLD; + SHARED_REGION_TRACE_DEBUG( + ("shared_region: %p [%d(%s)] <- map\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm)); - vnode_unlock(vp); + return error; +} - /* update the vnode's access time */ - if (!(vnode_vfsvisflags(vp) & MNT_NOATIME)) { - VATTR_INIT(&va); - nanotime(&va.va_access_time); - VATTR_SET_ACTIVE(&va, va_access_time); - vnode_setattr(vp, &va, vfs_context_current()); - } +/* + * Clean up part of _shared_region_map_and_slide() + * It had to be broken out of _shared_region_map_and_slide() to + * prevent compiler inlining from blowing out the stack. + */ +__attribute__((noinline)) +static void +shared_region_map_and_slide_cleanup( + struct proc *p, + uint32_t files_count, + struct _sr_file_mappings *sr_file_mappings, + struct vm_shared_region *shared_region, + struct vnode *scdir_vp) +{ + struct _sr_file_mappings *srfmp; + struct vnode_attr va; + + if (sr_file_mappings != NULL) { + for (srfmp = &sr_file_mappings[0]; srfmp < &sr_file_mappings[files_count]; srfmp++) { + if (srfmp->vp != NULL) { + vnode_lock_spin(srfmp->vp); + srfmp->vp->v_flag |= VSHARED_DYLD; + vnode_unlock(srfmp->vp); + + /* update the vnode's access time */ + if (!(vnode_vfsvisflags(srfmp->vp) & MNT_NOATIME)) { + VATTR_INIT(&va); + nanotime(&va.va_access_time); + VATTR_SET_ACTIVE(&va, va_access_time); + vnode_setattr(srfmp->vp, &va, vfs_context_current()); + } - if (p->p_flag & P_NOSHLIB) { - /* signal that this process is now using split libraries */ - OSBitAndAtomic(~((uint32_t)P_NOSHLIB), &p->p_flag); +#if NAMEDSTREAMS + /* + * If the shared cache is compressed, it may + * have a namedstream vnode instantiated for + * for it. That namedstream vnode will also + * have to be marked with VSHARED_DYLD. + */ + if (vnode_hasnamedstreams(srfmp->vp)) { + vnode_t svp; + if (vnode_getnamedstream(srfmp->vp, &svp, XATTR_RESOURCEFORK_NAME, + NS_OPEN, 0, vfs_context_kernel()) == 0) { + vnode_lock_spin(svp); + svp->v_flag |= VSHARED_DYLD; + vnode_unlock(svp); + vnode_put(svp); + } + } +#endif /* NAMEDSTREAMS */ + /* + * release the vnode... + * ubc_map() still holds it for us in the non-error case + */ + (void) vnode_put(srfmp->vp); + srfmp->vp = NULL; + } + if (srfmp->fp != NULL) { + /* release the file descriptor */ + fp_drop(p, srfmp->fd, srfmp->fp, 0); + srfmp->fp = NULL; + } + } + kheap_free(KHEAP_TEMP, sr_file_mappings, files_count * sizeof(*sr_file_mappings)); } -done: - if (vp != NULL) { - /* - * release the vnode... - * ubc_map() still holds it for us in the non-error case - */ - (void) vnode_put(vp); - vp = NULL; - } - if (fp != NULL) { - /* release the file descriptor */ - fp_drop(p, fd, fp, 0); - fp = NULL; - } if (scdir_vp != NULL) { (void)vnode_put(scdir_vp); scdir_vp = NULL; @@ -2089,34 +2575,30 @@ done: if (shared_region != NULL) { vm_shared_region_deallocate(shared_region); } +} - SHARED_REGION_TRACE_DEBUG( - ("shared_region: %p [%d(%s)] <- map\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm)); - return error; -} +#define SFM_MAX 1024 /* max mapping structs allowed to pass in */ +/* + * This interface is used by dyld to map shared caches which are + * for any architecture which doesn't have run time support of pointer + * authentication. Note dyld could also use the new ...map_and_slide_2_np() + * call for this case, however, it just doesn't do that yet. + */ int shared_region_map_and_slide_np( - struct proc *p, - struct shared_region_map_and_slide_np_args *uap, - __unused int *retvalp) + struct proc *p, + struct shared_region_map_and_slide_np_args *uap, + __unused int *retvalp) { - struct shared_file_mapping_np *mappings; - unsigned int mappings_count = uap->count; - kern_return_t kr = KERN_SUCCESS; - uint32_t slide = uap->slide; - -#define SFM_MAX_STACK 8 - struct shared_file_mapping_np stack_mappings[SFM_MAX_STACK]; - - /* Is the process chrooted?? */ - if (p->p_fd->fd_rdir != NULL) { - kr = EINVAL; - goto done; - } + unsigned int mappings_count = uap->count; + unsigned int m; + uint32_t slide = uap->slide; + struct shared_file_np shared_files[1]; + struct shared_file_mapping_np legacy_mapping; + struct shared_file_mapping_slide_np *mappings = NULL; + kern_return_t kr = KERN_SUCCESS; if ((kr = vm_shared_region_sliding_valid(slide)) != KERN_SUCCESS) { if (kr == KERN_INVALID_ARGUMENT) { @@ -2138,32 +2620,199 @@ shared_region_map_and_slide_np( p->p_pid, p->p_comm)); kr = 0; /* no mappings: we're done ! */ goto done; - } else if (mappings_count <= SFM_MAX_STACK) { - mappings = &stack_mappings[0]; + } else if (mappings_count <= SFM_MAX) { + mappings = kheap_alloc(KHEAP_TEMP, + mappings_count * sizeof(mappings[0]), Z_WAITOK); + if (mappings == NULL) { + kr = KERN_RESOURCE_SHORTAGE; + goto done; + } } else { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(): " - "too many mappings (%d)\n", + "too many mappings (%d) max %d\n", (void *)VM_KERNEL_ADDRPERM(current_thread()), p->p_pid, p->p_comm, - mappings_count)); + mappings_count, SFM_MAX)); kr = KERN_FAILURE; goto done; } - if ((kr = shared_region_copyin_mappings(p, uap->mappings, uap->count, mappings))) { + /* + * Read in the mappings and translate to new format. + */ + for (m = 0; m < mappings_count; ++m) { + user_addr_t from_uaddr = uap->mappings + (m * sizeof(struct shared_file_mapping_np)); + kr = shared_region_copyin(p, from_uaddr, 1, sizeof(legacy_mapping), &legacy_mapping); + if (kr != 0) { + goto done; + } + mappings[m].sms_address = legacy_mapping.sfm_address; + mappings[m].sms_size = legacy_mapping.sfm_size; + mappings[m].sms_file_offset = legacy_mapping.sfm_file_offset; + mappings[m].sms_max_prot = legacy_mapping.sfm_max_prot; + mappings[m].sms_init_prot = legacy_mapping.sfm_init_prot; + mappings[m].sms_slide_size = uap->slide_size; + mappings[m].sms_slide_start = uap->slide_start; + } + + bzero(shared_files, sizeof(shared_files)); + shared_files[0].sf_fd = uap->fd; + shared_files[0].sf_mappings_count = mappings_count; + shared_files[0].sf_slide = slide; + + kr = _shared_region_map_and_slide(p, + 1, /* # of files to map */ + &shared_files[0], /* files to map */ + mappings_count, + mappings); + +done: + if (mappings != NULL) { + kheap_free(KHEAP_TEMP, mappings, mappings_count * sizeof(mappings[0])); + mappings = NULL; + } + return kr; +} + +/* + * This interface for setting up shared region mappings is what dyld + * uses for shared caches that have __AUTH sections. All other shared + * caches use the non _2 version. + * + * The slide used for shared regions setup using this interface is done differently + * from the old interface. The slide value passed in the shared_files_np represents + * a max value. The kernel will choose a random value based on that, then use it + * for all shared regions. + */ +#define SLIDE_AMOUNT_MASK ~PAGE_MASK + +int +shared_region_map_and_slide_2_np( + struct proc *p, + struct shared_region_map_and_slide_2_np_args *uap, + __unused int *retvalp) +{ + unsigned int files_count; + struct shared_file_np *shared_files = NULL; + unsigned int mappings_count; + struct shared_file_mapping_slide_np *mappings = NULL; + kern_return_t kr = KERN_SUCCESS; + boolean_t should_slide_mappings = TRUE; + + files_count = uap->files_count; + mappings_count = uap->mappings_count; + + + if (files_count == 0) { + SHARED_REGION_TRACE_INFO( + ("shared_region: %p [%d(%s)] map(): " + "no files\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm)); + kr = 0; /* no files to map: we're done ! */ + goto done; + } else if (files_count <= _SR_FILE_MAPPINGS_MAX_FILES) { + shared_files = kheap_alloc(KHEAP_TEMP, + files_count * sizeof(shared_files[0]), Z_WAITOK); + if (shared_files == NULL) { + kr = KERN_RESOURCE_SHORTAGE; + goto done; + } + } else { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(): " + "too many files (%d) max %d\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + files_count, _SR_FILE_MAPPINGS_MAX_FILES)); + kr = KERN_FAILURE; + goto done; + } + + if (mappings_count == 0) { + SHARED_REGION_TRACE_INFO( + ("shared_region: %p [%d(%s)] map(): " + "no mappings\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm)); + kr = 0; /* no mappings: we're done ! */ + goto done; + } else if (mappings_count <= SFM_MAX) { + mappings = kheap_alloc(KHEAP_TEMP, + mappings_count * sizeof(mappings[0]), Z_WAITOK); + if (mappings == NULL) { + kr = KERN_RESOURCE_SHORTAGE; + goto done; + } + } else { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(): " + "too many mappings (%d) max %d\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + mappings_count, SFM_MAX)); + kr = KERN_FAILURE; goto done; } + kr = shared_region_copyin(p, uap->files, files_count, sizeof(shared_files[0]), shared_files); + if (kr != KERN_SUCCESS) { + goto done; + } - kr = _shared_region_map_and_slide(p, uap->fd, mappings_count, mappings, - slide, - uap->slide_start, uap->slide_size); + kr = shared_region_copyin(p, uap->mappings, mappings_count, sizeof(mappings[0]), mappings); if (kr != KERN_SUCCESS) { - return kr; + goto done; } + if (should_slide_mappings) { + uint32_t max_slide = shared_files[0].sf_slide; + uint32_t random_val; + uint32_t slide_amount; + + if (max_slide != 0) { + read_random(&random_val, sizeof random_val); + slide_amount = ((random_val % max_slide) & SLIDE_AMOUNT_MASK); + } else { + slide_amount = 0; + } + + /* + * Fix up the mappings to reflect the desired slide. + */ + unsigned int f; + unsigned int m = 0; + unsigned int i; + for (f = 0; f < files_count; ++f) { + shared_files[f].sf_slide = slide_amount; + for (i = 0; i < shared_files[f].sf_mappings_count; ++i, ++m) { + if (m >= mappings_count) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(): " + "mapping count argument was too small\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm)); + kr = KERN_FAILURE; + goto done; + } + mappings[m].sms_address += slide_amount; + if (mappings[m].sms_slide_size != 0) { + mappings[i].sms_slide_start += slide_amount; + } + } + } + } + kr = _shared_region_map_and_slide(p, files_count, shared_files, mappings_count, mappings); done: + if (shared_files != NULL) { + kheap_free(KHEAP_TEMP, shared_files, files_count * sizeof(shared_files[0])); + shared_files = NULL; + } + if (mappings != NULL) { + kheap_free(KHEAP_TEMP, mappings, mappings_count * sizeof(mappings[0])); + mappings = NULL; + } return kr; } @@ -2208,6 +2857,15 @@ SYSCTL_INT(_vm, OID_AUTO, kern_lpage_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_kern_lpage_count, 0, "kernel used large pages"); #if DEVELOPMENT || DEBUG +#if __ARM_MIXED_PAGE_SIZE__ +static int vm_mixed_pagesize_supported = 1; +#else +static int vm_mixed_pagesize_supported = 0; +#endif /*__ARM_MIXED_PAGE_SIZE__ */ +SYSCTL_INT(_debug, OID_AUTO, vm_mixed_pagesize_supported, CTLFLAG_ANYBODY | CTLFLAG_RD | CTLFLAG_LOCKED, + &vm_mixed_pagesize_supported, 0, "kernel support for mixed pagesize"); + + extern uint64_t get_pages_grabbed_count(void); static int @@ -2242,7 +2900,7 @@ SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reference_reactivated, CTLFLAG_RD | C &vm_pageout_debug.vm_pageout_cleaned_reference_reactivated, 0, "Cleaned pages reference reactivated"); SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_debug.vm_pageout_enqueued_cleaned, 0, ""); /* sum of next two */ -#endif +#endif /* DEVELOPMENT || DEBUG */ extern int madvise_free_debug; SYSCTL_INT(_vm, OID_AUTO, madvise_free_debug, CTLFLAG_RW | CTLFLAG_LOCKED, @@ -2430,15 +3088,15 @@ kas_info(struct proc *p, struct kas_info_args *uap, int *retval __unused) { -#ifdef SECURE_KERNEL +#ifndef CONFIG_KAS_INFO (void)p; (void)uap; return ENOTSUP; -#else /* !SECURE_KERNEL */ +#else /* CONFIG_KAS_INFO */ int selector = uap->selector; user_addr_t valuep = uap->value; user_addr_t sizep = uap->size; - user_size_t size; + user_size_t size, rsize; int error; if (!kauth_cred_issuser(kauth_cred_get())) { @@ -2474,18 +3132,47 @@ kas_info(struct proc *p, return EINVAL; } - if (IS_64BIT_PROCESS(p)) { - user64_size_t size64 = (user64_size_t)size; - error = copyout(&size64, sizep, sizeof(size64)); - } else { - user32_size_t size32 = (user32_size_t)size; - error = copyout(&size32, sizep, sizeof(size32)); - } + error = copyout(&slide, valuep, sizeof(slide)); if (error) { return error; } + rsize = size; + } + break; + case KAS_INFO_KERNEL_SEGMENT_VMADDR_SELECTOR: + { + uint32_t i; + kernel_mach_header_t *mh = &_mh_execute_header; + struct load_command *cmd; + cmd = (struct load_command*) &mh[1]; + uint64_t *bases; + rsize = mh->ncmds * sizeof(uint64_t); + + /* + * Return the size if no data was passed + */ + if (valuep == 0) { + break; + } + + if (rsize > size) { + return EINVAL; + } + + bases = kheap_alloc(KHEAP_TEMP, rsize, Z_WAITOK | Z_ZERO); + + for (i = 0; i < mh->ncmds; i++) { + if (cmd->cmd == LC_SEGMENT_KERNEL) { + __IGNORE_WCASTALIGN(kernel_segment_command_t * sg = (kernel_segment_command_t *) cmd); + bases[i] = (uint64_t)sg->vmaddr; + } + cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize); + } + + error = copyout(bases, valuep, rsize); + + kheap_free(KHEAP_TEMP, bases, rsize); - error = copyout(&slide, valuep, sizeof(slide)); if (error) { return error; } @@ -2495,9 +3182,33 @@ kas_info(struct proc *p, return EINVAL; } - return 0; -#endif /* !SECURE_KERNEL */ + if (IS_64BIT_PROCESS(p)) { + user64_size_t size64 = (user64_size_t)rsize; + error = copyout(&size64, sizep, sizeof(size64)); + } else { + user32_size_t size32 = (user32_size_t)rsize; + error = copyout(&size32, sizep, sizeof(size32)); + } + + return error; +#endif /* CONFIG_KAS_INFO */ +} + +#if __has_feature(ptrauth_calls) +/* + * Generate a random pointer signing key that isn't 0. + */ +uint64_t +generate_jop_key(void) +{ + uint64_t key; + + do { + read_random(&key, sizeof key); + } while (key == 0); + return key; } +#endif /* __has_feature(ptrauth_calls) */ #pragma clang diagnostic push @@ -2603,6 +3314,9 @@ SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_slid_error, CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_slid_error, ""); SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_reclaimed, CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_reclaimed, ""); +extern int shared_region_destroy_delay; +SYSCTL_INT(_vm, OID_AUTO, shared_region_destroy_delay, + CTLFLAG_RW | CTLFLAG_LOCKED, &shared_region_destroy_delay, 0, ""); #if MACH_ASSERT extern int pmap_ledgers_panic_leeway; @@ -2615,3 +3329,74 @@ SYSCTL_INT(_vm, OID_AUTO, protect_privileged_from_untrusted, extern uint64_t vm_copied_on_read; SYSCTL_QUAD(_vm, OID_AUTO, copied_on_read, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_copied_on_read, ""); + +extern int vm_shared_region_count; +extern int vm_shared_region_peak; +SYSCTL_INT(_vm, OID_AUTO, shared_region_count, + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_shared_region_count, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, shared_region_peak, + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_shared_region_peak, 0, ""); +#if DEVELOPMENT || DEBUG +extern unsigned int shared_region_pagers_resident_count; +SYSCTL_INT(_vm, OID_AUTO, shared_region_pagers_resident_count, + CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pagers_resident_count, 0, ""); +extern unsigned int shared_region_pagers_resident_peak; +SYSCTL_INT(_vm, OID_AUTO, shared_region_pagers_resident_peak, + CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pagers_resident_peak, 0, ""); +extern int shared_region_pager_count; +SYSCTL_INT(_vm, OID_AUTO, shared_region_pager_count, + CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_count, 0, ""); +#if __has_feature(ptrauth_calls) +extern int shared_region_key_count; +SYSCTL_INT(_vm, OID_AUTO, shared_region_key_count, + CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_key_count, 0, ""); +extern int vm_shared_region_reslide_count; +SYSCTL_INT(_vm, OID_AUTO, shared_region_reslide_count, + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_shared_region_reslide_count, 0, ""); +#endif /* __has_feature(ptrauth_calls) */ +#endif /* DEVELOPMENT || DEBUG */ + +#if MACH_ASSERT +extern int debug4k_filter; +SYSCTL_INT(_vm, OID_AUTO, debug4k_filter, CTLFLAG_RW | CTLFLAG_LOCKED, &debug4k_filter, 0, ""); +extern int debug4k_panic_on_terminate; +SYSCTL_INT(_vm, OID_AUTO, debug4k_panic_on_terminate, CTLFLAG_RW | CTLFLAG_LOCKED, &debug4k_panic_on_terminate, 0, ""); +extern int debug4k_panic_on_exception; +SYSCTL_INT(_vm, OID_AUTO, debug4k_panic_on_exception, CTLFLAG_RW | CTLFLAG_LOCKED, &debug4k_panic_on_exception, 0, ""); +extern int debug4k_panic_on_misaligned_sharing; +SYSCTL_INT(_vm, OID_AUTO, debug4k_panic_on_misaligned_sharing, CTLFLAG_RW | CTLFLAG_LOCKED, &debug4k_panic_on_misaligned_sharing, 0, ""); +#endif /* MACH_ASSERT */ + +/* + * A sysctl which causes all existing shared regions to become stale. They + * will no longer be used by anything new and will be torn down as soon as + * the last existing user exits. A write of non-zero value causes that to happen. + * This should only be used by launchd, so we check that this is initproc. + */ +static int +shared_region_pivot(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + unsigned int value = 0; + int changed = 0; + int error = sysctl_io_number(req, 0, sizeof(value), &value, &changed); + if (error || !changed) { + return error; + } + if (current_proc() != initproc) { + return EPERM; + } + + vm_shared_region_pivot(); + + return 0; +} + +SYSCTL_PROC(_vm, OID_AUTO, shared_region_pivot, + CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED, + 0, 0, shared_region_pivot, "I", ""); + +extern int vm_remap_old_path, vm_remap_new_path; +SYSCTL_INT(_vm, OID_AUTO, remap_old_path, + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_remap_old_path, 0, ""); +SYSCTL_INT(_vm, OID_AUTO, remap_new_path, + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_remap_new_path, 0, ""); diff --git a/bsd/vm/vnode_pager.c b/bsd/vm/vnode_pager.c index 0a9075c05..b4d6d6fc6 100644 --- a/bsd/vm/vnode_pager.c +++ b/bsd/vm/vnode_pager.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -65,7 +65,6 @@ #include #include #include -#include #include #include @@ -83,7 +82,7 @@ #include void -vnode_pager_throttle() +vnode_pager_throttle(void) { struct uthread *ut; @@ -122,7 +121,7 @@ vnode_pager_issue_reprioritize_io(struct vnode *devvp, uint64_t blkno, uint32_t set_tier.extents = &extent; set_tier.extentsCount = 1; - set_tier.tier = priority; + set_tier.tier = (uint8_t)priority; error = VNOP_IOCTL(devvp, DKIOCSETTIER, (caddr_t)&set_tier, 0, vfs_context_kernel()); return; @@ -312,6 +311,26 @@ vnode_pageout(struct vnode *vp, isize = (int)size; + /* + * This call is non-blocking and does not ever fail but it can + * only be made when there is other explicit synchronization + * with reclaiming of the vnode which, in this path, is provided + * by the paging in progress counter. + * + * In addition, this may also be entered via explicit ubc_msync + * calls or vm_swapfile_io where the existing iocount provides + * the necessary synchronization. Ideally we would not take an + * additional iocount here in the cases where an explcit iocount + * has already been taken but this call doesn't cause a deadlock + * as other forms of vnode_get* might if this thread has already + * taken an iocount. + */ + error = vnode_getalways_from_pager(vp); + if (error != 0) { + /* This can't happen */ + panic("vnode_getalways returned %d for vp %p", error, vp); + } + if (isize <= 0) { result = PAGER_ERROR; error_ret = EINVAL; @@ -549,6 +568,8 @@ vnode_pageout(struct vnode *vp, pg_index += num_of_pages; } out: + vnode_put_from_pager(vp); + if (errorp) { *errorp = error_ret; } @@ -586,6 +607,25 @@ vnode_pagein( ignore_valid_page_check = 1; } + /* + * This call is non-blocking and does not ever fail but it can + * only be made when there is other explicit synchronization + * with reclaiming of the vnode which, in this path, is provided + * by the paging in progress counter. + * + * In addition, this may also be entered via vm_swapfile_io + * where the existing iocount provides the necessary synchronization. + * Ideally we would not take an additional iocount here in the cases + * where an explcit iocount has already been taken but this call + * doesn't cause a deadlock as other forms of vnode_get* might if + * this thread has already taken an iocount. + */ + error = vnode_getalways_from_pager(vp); + if (error != 0) { + /* This can't happen */ + panic("vnode_getalways returned %d for vp %p", error, vp); + } + if (UBCINFOEXISTS(vp) == 0) { result = PAGER_ERROR; error = PAGER_ERROR; @@ -770,6 +810,8 @@ vnode_pagein( } } out: + vnode_put_from_pager(vp); + if (errorp) { *errorp = result; } diff --git a/config/BSDKernel.exports b/config/BSDKernel.exports index f2ce580fc..f439a52cd 100644 --- a/config/BSDKernel.exports +++ b/config/BSDKernel.exports @@ -4,10 +4,10 @@ _VNOP_IOCTL _VNOP_READ _VNOP_STRATEGY _VNOP_WRITE -__FREE -__FREE_ZONE +__FREE:__FREE_external +__FREE_ZONE:__FREE_ZONE_external __MALLOC:__MALLOC_external -__MALLOC_ZONE:__MALLOC_ZONE_external +__MALLOC_ZONE:__MALLOC_external _advisory_read _advisory_read_ext _bcd2bin_data @@ -17,6 +17,8 @@ _bdevsw_remove _bpf_attach _bpf_tap_in _bpf_tap_out +_bpf_tap_packet_in +_bpf_tap_packet_out _bpfattach _bsd_timeout _bsd_untimeout @@ -202,6 +204,7 @@ _futimes _fuword _groupmember _hashinit +_hashdestroy _ifaddr_address _ifaddr_address_family _ifaddr_dstaddress @@ -477,6 +480,7 @@ _physio _proc_chrooted _proc_exiting _proc_find +_proc_find_ident _proc_forcequota _proc_is64bit _proc_is64bit_data @@ -644,7 +648,6 @@ _vfs_clearauthcache_ttl _vfs_clearauthopaque _vfs_clearauthopaqueaccess _vfs_clearextendedsecurity -_vfs_clearnoswap _vfs_clearflags _vfs_context_create _vfs_context_is64bit @@ -684,7 +687,6 @@ _vfs_setauthcache_ttl _vfs_setauthopaque _vfs_setauthopaqueaccess _vfs_setextendedsecurity -_vfs_setnoswap _vfs_setflags _vfs_setfsprivate _vfs_setioattr @@ -822,6 +824,10 @@ _vnop_strategy_desc _vnop_symlink_desc _vnop_whiteout_desc _vnop_write_desc +_vsock_add_transport +_vsock_put_message +_vsock_remove_transport +_vsock_reset_transport _vttoif_tab _wakeup _wakeup_one diff --git a/config/IOKit.arm.exports b/config/IOKit.arm.exports index db83ede81..99a73b9dd 100644 --- a/config/IOKit.arm.exports +++ b/config/IOKit.arm.exports @@ -7,8 +7,8 @@ __Z17IODTGetCellCountsP15IORegistryEntryPmS1_ __Z22IODTResolveAddressCellP15IORegistryEntryPmS1_S1_ __Z23IODTFindMatchingEntriesP15IORegistryEntrymPKc __ZN10IOWorkLoop19workLoopWithOptionsEm -__ZN10IOWorkLoop9sleepGateEPvym __ZN10IOWorkLoop9sleepGateEPvm +__ZN10IOWorkLoop9sleepGateEPvym __ZN11IOCatalogue11findDriversEP12OSDictionaryPl __ZN11IOCatalogue11findDriversEP9IOServicePl __ZN11IODataQueue11withEntriesEmm @@ -33,10 +33,10 @@ __ZN12IODMACommand15genIOVMSegmentsEPFbPS_NS_9Segment64EPvmEPyS2_Pm __ZN12IODMACommand15genIOVMSegmentsEPyPvPm __ZN12IODMACommand16createCopyBufferE11IODirectiony __ZN12IODMACommand17withSpecificationEPFbPS_NS_9Segment64EPvmEPKNS_14SegmentOptionsEjP8IOMapperS2_ -__ZN12IODMACommand21initWithSpecificationEPFbPS_NS_9Segment64EPvmEPKNS_14SegmentOptionsEjP8IOMapperS2_ -__ZN12IODMACommand24prepareWithSpecificationEPFbPS_NS_9Segment64EPvmEPKNS_14SegmentOptionsEjP8IOMapperyybb __ZN12IODMACommand17withSpecificationEPFbPS_NS_9Segment64EPvmEhyNS_14MappingOptionsEymP8IOMapperS2_ +__ZN12IODMACommand21initWithSpecificationEPFbPS_NS_9Segment64EPvmEPKNS_14SegmentOptionsEjP8IOMapperS2_ __ZN12IODMACommand21initWithSpecificationEPFbPS_NS_9Segment64EPvmEhyNS_14MappingOptionsEymP8IOMapperS2_ +__ZN12IODMACommand24prepareWithSpecificationEPFbPS_NS_9Segment64EPvmEPKNS_14SegmentOptionsEjP8IOMapperyybb __ZN12IODMACommand24prepareWithSpecificationEPFbPS_NS_9Segment64EPvmEhyNS_14MappingOptionsEymP8IOMapperyybb __ZN12IODMACommand8transferEmyPvy __ZN12IOUserClient12initWithTaskEP4taskPvm @@ -46,6 +46,7 @@ __ZN12IOUserClient15sendAsyncResultEPjiPPvm __ZN12IOUserClient17mapClientMemory64EmP4taskmy __ZN12IOUserClient17sendAsyncResult64EPyiS0_m __ZN12IOUserClient19clientMemoryForTypeEmPmPP18IOMemoryDescriptor +__ZN12IOUserClient19clientMemoryForTypeEmPmR11OSSharedPtrI18IOMemoryDescriptorE __ZN12IOUserClient19setAsyncReference64EPyP8ipc_portyy __ZN12IOUserClient19setAsyncReference64EPyP8ipc_portyyP4task __ZN12IOUserClient23getExternalTrapForIndexEm @@ -55,15 +56,16 @@ __ZN12IOUserClient24registerNotificationPortEP8ipc_portmm __ZN12IOUserClient24registerNotificationPortEP8ipc_portmy __ZN12IOUserClient25getExternalMethodForIndexEm __ZN12IOUserClient26getTargetAndMethodForIndexEPP9IOServicem +__ZN12IOUserClient26getTargetAndMethodForIndexER11OSSharedPtrI9IOServiceEm __ZN12IOUserClient28sendAsyncResult64WithOptionsEPyiS0_mm __ZN12IOUserClient30getExternalAsyncMethodForIndexEm __ZN12IOUserClient31getAsyncTargetAndMethodForIndexEPP9IOServicem -__ZN13IOCommandGate12commandSleepEPvym __ZN13IOCommandGate12commandSleepEPvm +__ZN13IOCommandGate12commandSleepEPvym __ZN13IOCommandPool11commandPoolEP9IOServiceP10IOWorkLoopm __ZN13IOCommandPool4initEP9IOServiceP10IOWorkLoopm -__ZN13IOEventSource9sleepGateEPvym __ZN13IOEventSource9sleepGateEPvm +__ZN13IOEventSource9sleepGateEPvym __ZN13_IOServiceJob8startJobEP9IOServiceim __ZN14IODeviceMemory12withSubRangeEPS_mm __ZN14IODeviceMemory13arrayFromListEPNS_11InitElementEm @@ -71,9 +73,12 @@ __ZN14IODeviceMemory9withRangeEmm __ZN14IOMemoryCursor17withSpecificationEPFvNS_15PhysicalSegmentEPvmEmmm __ZN14IOMemoryCursor19genPhysicalSegmentsEP18IOMemoryDescriptormPvmmPm __ZN14IOMemoryCursor21initWithSpecificationEPFvNS_15PhysicalSegmentEPvmEmmm +__ZN14IOPMrootDomain17requestUserActiveEP9IOServicePKc __ZN14IOPMrootDomain17setSleepSupportedEm __ZN14IOPMrootDomain19sysPowerDownHandlerEPvS0_mP9IOServiceS0_j +__ZN14IOPMrootDomain20claimSystemBootEventEP9IOServicemPKcP8OSObject __ZN14IOPMrootDomain20claimSystemWakeEventEP9IOServicemPKcP8OSObject +__ZN14IOPMrootDomain24claimSystemShutdownEventEP9IOServicemPKcP8OSObject __ZN14IOPMrootDomain24receivePowerNotificationEm __ZN14IOPMrootDomain27displayWranglerNotificationEPvS0_mP9IOServiceS0_j __ZN15IODMAController13getControllerEP9IOServicem @@ -138,14 +143,14 @@ __ZN18IOMemoryDescriptor7doUnmapEP7_vm_mapjm __ZN18IOMemoryDescriptor9readBytesEmPvm __ZN18IORegistryIterator11iterateOverEP15IORegistryEntryPK15IORegistryPlanem __ZN18IORegistryIterator11iterateOverEPK15IORegistryPlanem -__ZN18IOTimerEventSource10setTimeoutEjyy -__ZN18IOTimerEventSource10setTimeoutEy __ZN18IOTimerEventSource10setTimeoutE13mach_timespec +__ZN18IOTimerEventSource10setTimeoutEjyy __ZN18IOTimerEventSource10setTimeoutEmm -__ZN18IOTimerEventSource10wakeAtTimeEjyy -__ZN18IOTimerEventSource10wakeAtTimeEy +__ZN18IOTimerEventSource10setTimeoutEy __ZN18IOTimerEventSource10wakeAtTimeE13mach_timespec +__ZN18IOTimerEventSource10wakeAtTimeEjyy __ZN18IOTimerEventSource10wakeAtTimeEmm +__ZN18IOTimerEventSource10wakeAtTimeEy __ZN18IOTimerEventSource12setTimeoutMSEm __ZN18IOTimerEventSource12setTimeoutUSEm __ZN18IOTimerEventSource12wakeAtTimeMSEm @@ -236,6 +241,8 @@ __ZN8IOPMprotC2EPK11OSMetaClass __ZN8IOPMprotC2Ev __ZN8IOPMprotD0Ev __ZN8IOPMprotD2Ev +__ZN8IOPMprotdlEPvm +__ZN8IOPMprotnwEm __ZN9IOService10adjustBusyEl __ZN9IOService10handleOpenEPS_mPv __ZN9IOService10systemWakeEv @@ -251,7 +258,9 @@ __ZN9IOService12waitForStateEmmP13mach_timespec __ZN9IOService13getPMworkloopEv __ZN9IOService13messageClientEmP8OSObjectPvj __ZN9IOService13newUserClientEP4taskPvmP12OSDictionaryPP12IOUserClient +__ZN9IOService13newUserClientEP4taskPvmP12OSDictionaryR11OSSharedPtrI12IOUserClientE __ZN9IOService13newUserClientEP4taskPvmPP12IOUserClient +__ZN9IOService13newUserClientEP4taskPvmR11OSSharedPtrI12IOUserClientE __ZN9IOService13startMatchingEm __ZN9IOService13waitMatchIdleEm __ZN9IOService13willTerminateEPS_m @@ -273,7 +282,6 @@ __ZN9IOService16command_receivedEPvS0_S0_S0_ __ZN9IOService16didYouWakeSystemEv __ZN9IOService16registerInterestEPK8OSSymbolPFiPvS3_mPS_S3_jES3_S3_ __ZN9IOService16requestTerminateEPS_m -__ZN9IOService16setCPUSnoopDelayEm __ZN9IOService18doServiceTerminateEm __ZN9IOService18matchPropertyTableEP12OSDictionaryPl __ZN9IOService18requireMaxBusStallEm @@ -284,6 +292,7 @@ __ZN9IOService22PM_Clamp_Timer_ExpiredEv __ZN9IOService22powerDomainDidChangeToEmP17IOPowerConnection __ZN9IOService23acknowledgeNotificationEPvm __ZN9IOService23addMatchingNotificationEPK8OSSymbolP12OSDictionaryPFbPvS5_PS_P10IONotifierES5_S5_l +__ZN9IOService23addMatchingNotificationEPK8OSSymbolP12OSDictionarylU13block_pointerFbPS_P10IONotifierE __ZN9IOService23powerDomainWillChangeToEmP17IOPowerConnection __ZN9IOService23scheduleTerminatePhase2Em __ZN9IOService23tellClientsWithResponseEi @@ -312,5 +321,3 @@ __ZNK8IOPMprot12getMetaClassEv __ZNK8IOPMprot9MetaClass5allocEv __ZTV8IOPMprot __ZTVN8IOPMprot9MetaClassE - -__ZN9IOService23addMatchingNotificationEPK8OSSymbolP12OSDictionarylU13block_pointerFbPS_P10IONotifierE diff --git a/config/IOKit.arm64.MacOSX.exports b/config/IOKit.arm64.MacOSX.exports new file mode 100644 index 000000000..1a2f5c49d --- /dev/null +++ b/config/IOKit.arm64.MacOSX.exports @@ -0,0 +1,258 @@ +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop0Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop1Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop2Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop3Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop4Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop5Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop6Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop7Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap0Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap1Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap2Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap3Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap4Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap5Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap6Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap7Ev +__ZN12IODMACommand22_RESERVEDIODMACommand0Ev +__ZN12IODMACommand22_RESERVEDIODMACommand1Ev +__ZN12IODMACommand22_RESERVEDIODMACommand2Ev +__ZN12IODMACommand22_RESERVEDIODMACommand3Ev +__ZN12IODMACommand22_RESERVEDIODMACommand4Ev +__ZN12IODMACommand22_RESERVEDIODMACommand5Ev +__ZN12IODMACommand22_RESERVEDIODMACommand6Ev +__ZN12IODMACommand22_RESERVEDIODMACommand7Ev +__ZN12IODMACommand22_RESERVEDIODMACommand8Ev +__ZN12IODMACommand22_RESERVEDIODMACommand9Ev +__ZN12IODMACommand23_RESERVEDIODMACommand10Ev +__ZN12IODMACommand23_RESERVEDIODMACommand11Ev +__ZN12IODMACommand23_RESERVEDIODMACommand12Ev +__ZN12IODMACommand23_RESERVEDIODMACommand13Ev +__ZN12IODMACommand23_RESERVEDIODMACommand14Ev +__ZN12IODMACommand23_RESERVEDIODMACommand15Ev +__ZN12IOUserClient22_RESERVEDIOUserClient0Ev +__ZN12IOUserClient22_RESERVEDIOUserClient1Ev +__ZN12IOUserClient22_RESERVEDIOUserClient2Ev +__ZN12IOUserClient22_RESERVEDIOUserClient3Ev +__ZN12IOUserClient22_RESERVEDIOUserClient4Ev +__ZN12IOUserClient22_RESERVEDIOUserClient5Ev +__ZN12IOUserClient22_RESERVEDIOUserClient6Ev +__ZN12IOUserClient22_RESERVEDIOUserClient7Ev +__ZN12IOUserClient22_RESERVEDIOUserClient8Ev +__ZN12IOUserClient22_RESERVEDIOUserClient9Ev +__ZN12IOUserClient23_RESERVEDIOUserClient10Ev +__ZN12IOUserClient23_RESERVEDIOUserClient11Ev +__ZN12IOUserClient23_RESERVEDIOUserClient12Ev +__ZN12IOUserClient23_RESERVEDIOUserClient13Ev +__ZN12IOUserClient23_RESERVEDIOUserClient14Ev +__ZN12IOUserClient23_RESERVEDIOUserClient15Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate0Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate1Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate2Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate3Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate4Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate5Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate6Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate7Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool0Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool1Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool2Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool3Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool4Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool5Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool6Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool7Ev +__ZN13IOEventSource23_RESERVEDIOEventSource0Ev +__ZN13IOEventSource23_RESERVEDIOEventSource1Ev +__ZN13IOEventSource23_RESERVEDIOEventSource2Ev +__ZN13IOEventSource23_RESERVEDIOEventSource3Ev +__ZN13IOEventSource23_RESERVEDIOEventSource4Ev +__ZN13IOEventSource23_RESERVEDIOEventSource5Ev +__ZN13IOEventSource23_RESERVEDIOEventSource6Ev +__ZN13IOEventSource23_RESERVEDIOEventSource7Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry0Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry1Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry2Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry3Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry4Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry5Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry6Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry7Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry8Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry9Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry10Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry11Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry12Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry13Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry14Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry15Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry16Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry17Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry18Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry19Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry20Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry21Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry22Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry23Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry24Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry25Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry26Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry27Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry28Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry29Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry30Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry31Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface0Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface1Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface2Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface3Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface4Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface5Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface6Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface7Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface8Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface9Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface10Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface11Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface12Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface13Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface14Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface15Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue0Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue1Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue2Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue3Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue4Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue5Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue6Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue7Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor0Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor1Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor2Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor3Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor4Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor5Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor6Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor7Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor8Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor9Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor10Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor11Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor12Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor13Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor14Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor15Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource0Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource1Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource2Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource3Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource4Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource5Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource6Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource7Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController0Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController1Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController2Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController3Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController4Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController5Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource0Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource1Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource2Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource3Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource4Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource5Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource6Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource7Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor0Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor1Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor2Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor3Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor4Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor5Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor6Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor7Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor8Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor9Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor10Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor11Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor12Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor13Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor14Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor15Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController0Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController1Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController2Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController3Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource0Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource1Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource2Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource3Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource4Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource5Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource6Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource7Ev +__ZN8IOMapper18_RESERVEDIOMapper0Ev +__ZN8IOMapper18_RESERVEDIOMapper1Ev +__ZN8IOMapper18_RESERVEDIOMapper2Ev +__ZN8IOMapper18_RESERVEDIOMapper3Ev +__ZN8IOMapper18_RESERVEDIOMapper4Ev +__ZN8IOMapper18_RESERVEDIOMapper5Ev +__ZN8IOMapper18_RESERVEDIOMapper6Ev +__ZN8IOMapper18_RESERVEDIOMapper7Ev +__ZN8IOMapper18_RESERVEDIOMapper8Ev +__ZN8IOMapper18_RESERVEDIOMapper9Ev +__ZN8IOMapper19_RESERVEDIOMapper10Ev +__ZN8IOMapper19_RESERVEDIOMapper11Ev +__ZN8IOMapper19_RESERVEDIOMapper12Ev +__ZN8IOMapper19_RESERVEDIOMapper13Ev +__ZN8IOMapper19_RESERVEDIOMapper14Ev +__ZN8IOMapper19_RESERVEDIOMapper15Ev +__ZN9IOService19_RESERVEDIOService0Ev +__ZN9IOService19_RESERVEDIOService1Ev +__ZN9IOService19_RESERVEDIOService2Ev +__ZN9IOService19_RESERVEDIOService3Ev +__ZN9IOService19_RESERVEDIOService4Ev +__ZN9IOService19_RESERVEDIOService5Ev +__ZN9IOService19_RESERVEDIOService6Ev +__ZN9IOService19_RESERVEDIOService7Ev +__ZN9IOService19_RESERVEDIOService8Ev +__ZN9IOService19_RESERVEDIOService9Ev +__ZN9IOService20_RESERVEDIOService10Ev +__ZN9IOService20_RESERVEDIOService11Ev +__ZN9IOService20_RESERVEDIOService12Ev +__ZN9IOService20_RESERVEDIOService13Ev +__ZN9IOService20_RESERVEDIOService14Ev +__ZN9IOService20_RESERVEDIOService15Ev +__ZN9IOService20_RESERVEDIOService16Ev +__ZN9IOService20_RESERVEDIOService17Ev +__ZN9IOService20_RESERVEDIOService18Ev +__ZN9IOService20_RESERVEDIOService19Ev +__ZN9IOService20_RESERVEDIOService20Ev +__ZN9IOService20_RESERVEDIOService21Ev +__ZN9IOService20_RESERVEDIOService22Ev +__ZN9IOService20_RESERVEDIOService23Ev +__ZN9IOService20_RESERVEDIOService24Ev +__ZN9IOService20_RESERVEDIOService25Ev +__ZN9IOService20_RESERVEDIOService26Ev +__ZN9IOService20_RESERVEDIOService27Ev +__ZN9IOService20_RESERVEDIOService28Ev +__ZN9IOService20_RESERVEDIOService29Ev +__ZN9IOService20_RESERVEDIOService30Ev +__ZN9IOService20_RESERVEDIOService31Ev +__ZN9IOService20_RESERVEDIOService32Ev +__ZN9IOService20_RESERVEDIOService33Ev +__ZN9IOService20_RESERVEDIOService34Ev +__ZN9IOService20_RESERVEDIOService35Ev +__ZN9IOService20_RESERVEDIOService36Ev +__ZN9IOService20_RESERVEDIOService37Ev +__ZN9IOService20_RESERVEDIOService38Ev +__ZN9IOService20_RESERVEDIOService39Ev +__ZN9IOService20_RESERVEDIOService40Ev +__ZN9IOService20_RESERVEDIOService41Ev +__ZN9IOService20_RESERVEDIOService42Ev +__ZN9IOService20_RESERVEDIOService43Ev +__ZN9IOService20_RESERVEDIOService44Ev +__ZN9IOService20_RESERVEDIOService45Ev +__ZN9IOService20_RESERVEDIOService46Ev +__ZN9IOService20_RESERVEDIOService47Ev diff --git a/config/IOKit.arm64.exports b/config/IOKit.arm64.exports index 691beb390..e52115fd5 100644 --- a/config/IOKit.arm64.exports +++ b/config/IOKit.arm64.exports @@ -41,6 +41,7 @@ __ZN12IOUserClient15sendAsyncResultEPjiPPvj __ZN12IOUserClient17mapClientMemory64EjP4taskjy __ZN12IOUserClient17sendAsyncResult64EPyiS0_j __ZN12IOUserClient19clientMemoryForTypeEjPjPP18IOMemoryDescriptor +__ZN12IOUserClient19clientMemoryForTypeEjPjR11OSSharedPtrI18IOMemoryDescriptorE __ZN12IOUserClient19setAsyncReference64EPyP8ipc_portyy __ZN12IOUserClient19setAsyncReference64EPyP8ipc_portyyP4task __ZN12IOUserClient23getExternalTrapForIndexEj @@ -50,6 +51,7 @@ __ZN12IOUserClient24registerNotificationPortEP8ipc_portjj __ZN12IOUserClient24registerNotificationPortEP8ipc_portjy __ZN12IOUserClient25getExternalMethodForIndexEj __ZN12IOUserClient26getTargetAndMethodForIndexEPP9IOServicej +__ZN12IOUserClient26getTargetAndMethodForIndexER11OSSharedPtrI9IOServiceEj __ZN12IOUserClient28sendAsyncResult64WithOptionsEPyiS0_jj __ZN12IOUserClient30getExternalAsyncMethodForIndexEj __ZN12IOUserClient31getAsyncTargetAndMethodForIndexEPP9IOServicej @@ -66,9 +68,12 @@ __ZN14IODeviceMemory9withRangeEyy __ZN14IOMemoryCursor17withSpecificationEPFvNS_15PhysicalSegmentEPvjEyyy __ZN14IOMemoryCursor19genPhysicalSegmentsEP18IOMemoryDescriptoryPvjjPy __ZN14IOMemoryCursor21initWithSpecificationEPFvNS_15PhysicalSegmentEPvjEyyy +__ZN14IOPMrootDomain17requestUserActiveEP9IOServicePKc __ZN14IOPMrootDomain17setSleepSupportedEj __ZN14IOPMrootDomain19sysPowerDownHandlerEPvS0_jP9IOServiceS0_m +__ZN14IOPMrootDomain20claimSystemBootEventEP9IOServicejPKcP8OSObject __ZN14IOPMrootDomain20claimSystemWakeEventEP9IOServicejPKcP8OSObject +__ZN14IOPMrootDomain24claimSystemShutdownEventEP9IOServicejPKcP8OSObject __ZN14IOPMrootDomain24receivePowerNotificationEj __ZN14IOPMrootDomain27displayWranglerNotificationEPvS0_jP9IOServiceS0_m __ZN15IODMAController13getControllerEP9IOServicej @@ -194,7 +199,9 @@ __ZN9IOService12requestProbeEj __ZN9IOService12updateReportEP19IOReportChannelListjPvS2_ __ZN9IOService13messageClientEjP8OSObjectPvm __ZN9IOService13newUserClientEP4taskPvjP12OSDictionaryPP12IOUserClient +__ZN9IOService13newUserClientEP4taskPvjP12OSDictionaryR11OSSharedPtrI12IOUserClientE __ZN9IOService13newUserClientEP4taskPvjPP12IOUserClient +__ZN9IOService13newUserClientEP4taskPvjR11OSSharedPtrI12IOUserClientE __ZN9IOService13startMatchingEj __ZN9IOService13waitMatchIdleEj __ZN9IOService13willTerminateEPS_j @@ -209,7 +216,6 @@ __ZN9IOService15terminatePhase1Ej __ZN9IOService15terminateWorkerEj __ZN9IOService16registerInterestEPK8OSSymbolPFiPvS3_jPS_S3_mES3_S3_ __ZN9IOService16requestTerminateEPS_j -__ZN9IOService16setCPUSnoopDelayEj __ZN9IOService18doServiceTerminateEj __ZN9IOService18matchPropertyTableEP12OSDictionaryPi __ZN9IOService18requireMaxBusStallEj @@ -235,4 +241,3 @@ __ZNK18IOMemoryDescriptor19dmaCommandOperationEjPvj __ZNK25IOGeneralMemoryDescriptor19dmaCommandOperationEjPvj __ZN9IOService23addMatchingNotificationEPK8OSSymbolP12OSDictionaryiU13block_pointerFbPS_P10IONotifierE - diff --git a/config/IOKit.arm64.hibernation.MacOSX.exports b/config/IOKit.arm64.hibernation.MacOSX.exports new file mode 100644 index 000000000..36b9214ee --- /dev/null +++ b/config/IOKit.arm64.hibernation.MacOSX.exports @@ -0,0 +1,8 @@ +__ZN13SEPHibernator23_RESERVEDSEPHibernator0Ev +__ZN13SEPHibernator23_RESERVEDSEPHibernator1Ev +__ZN13SEPHibernator23_RESERVEDSEPHibernator2Ev +__ZN13SEPHibernator23_RESERVEDSEPHibernator3Ev +__ZN13SEPHibernator23_RESERVEDSEPHibernator4Ev +__ZN13SEPHibernator23_RESERVEDSEPHibernator5Ev +__ZN13SEPHibernator23_RESERVEDSEPHibernator6Ev +__ZN13SEPHibernator23_RESERVEDSEPHibernator7Ev diff --git a/config/IOKit.exports b/config/IOKit.exports index 86f00a27d..cdf84e788 100644 --- a/config/IOKit.exports +++ b/config/IOKit.exports @@ -1,14 +1,3 @@ -_IORPCMessageFromMach -__ZN12IOUserClient8DispatchE5IORPC - -__ZN16IODispatchSource23SetEnableWithCompletionEbU13block_pointerFvvEPFiP15OSMetaClassBase5IORPCE -__ZN16IODispatchSource9SetEnableEbPFiP15OSMetaClassBase5IORPCE - -__ZN22IOInterruptEventSource27getPimaryInterruptTimestampEv -__ZN22IOInterruptEventSource31enablePrimaryInterruptTimestampEb - -__ZN14IOPMrootDomain11setWakeTimeEy - __ZN12IODMACommand8DispatchE5IORPC _IOAlignmentToSize @@ -45,11 +34,12 @@ _IOLockUnlock:_lck_mtx_unlock _IOLockWakeup _IOLog _IOLogv -_IOMalloc -_IOMallocAligned +_IOMalloc:_IOMalloc_external +_IOMallocAligned:_IOMallocAligned_external _IOMallocContiguous _IOMallocPageable -_IOMallocZero +_IOMallocPageableZero +_IOMallocZero:_IOMallocZero_external _IOMappedRead16 _IOMappedRead32 _IOMappedRead64 @@ -63,10 +53,13 @@ _IONetworkNamePrefixMatching _IOPageableMapForAddress _IOPause _IOPrintPlane +_IORPCMessageFromMach _IORWLockAlloc _IORWLockFree _IORWLockGetMachLock _IORWLockRead:_lck_rw_lock_shared +_IORWLockTryRead:_lck_rw_try_lock_shared +_IORWLockTryWrite:_lck_rw_try_lock_exclusive _IORWLockUnlock:_lck_rw_done _IORWLockWrite:_lck_rw_lock_exclusive _IORecursiveLockAlloc @@ -82,10 +75,10 @@ _IORecursiveLockUnlock _IORecursiveLockWakeup _IOSetProcessorCacheMode _IOSimpleLockAlloc +_IOSimpleLockDestroy _IOSimpleLockFree _IOSimpleLockGetMachLock _IOSimpleLockInit -_IOSimpleLockDestroy _IOSimpleLockLock:_lck_spin_lock _IOSimpleLockTryLock:_lck_spin_try_lock _IOSimpleLockUnlock:_lck_spin_unlock @@ -100,6 +93,8 @@ _PEGetGMTTimeOfDay _PEGetMachineName _PEGetModelName _PEGetPlatformEpoch +_PEGetProductName +_PEGetTargetName _PEHaltRestart _PESavePanicInfo _PESetGMTTimeOfDay @@ -112,11 +107,11 @@ _PE_cpu_start _PE_enter_debugger _PE_halt_restart _PE_parse_boot_argn -_StartIOKit __Z17IODTMapInterruptsP15IORegistryEntry __Z17IODeviceTreeAllocPv __Z17IOServiceOrderingPK15OSMetaClassBaseS1_Pv __Z18IODTCompareNubNamePK15IORegistryEntryP8OSStringPS3_ +__Z18IODTCompareNubNamePK15IORegistryEntryP8OSStringR11OSSharedPtrIS2_E __Z19printDictionaryKeysP12OSDictionaryPc __Z20IODTMatchNubWithKeysP15IORegistryEntryPKc __Z21IODTResolveAddressingP15IORegistryEntryPKcP14IODeviceMemory @@ -128,6 +123,50 @@ __ZN10IONotifier9MetaClassC2Ev __ZN10IONotifier9metaClassE __ZN10IONotifierC2EPK11OSMetaClass __ZN10IONotifierD2Ev +__ZN10IONotifierdlEPvm +__ZN10IONotifiernwEm +__ZN10IOReporter10addChannelEyPKc +__ZN10IOReporter10gMetaClassE +__ZN10IOReporter10legendWithEP7OSArrayS1_19IOReportChannelTypey +__ZN10IOReporter10superClassE +__ZN10IOReporter12createLegendEv +__ZN10IOReporter12lockReporterEv +__ZN10IOReporter12updateReportEP19IOReportChannelListjPvS2_ +__ZN10IOReporter14copyChannelIDsEv +__ZN10IOReporter14unlockReporterEv +__ZN10IOReporter15configureReportEP19IOReportChannelListjPvS2_ +__ZN10IOReporter15getChannelIndexEyPi +__ZN10IOReporter16getElementValuesEi +__ZN10IOReporter16setElementValuesEiP21IOReportElementValuesy +__ZN10IOReporter16updateAllReportsEP5OSSetP19IOReportChannelListjPvS4_ +__ZN10IOReporter17copyElementValuesEiP21IOReportElementValues +__ZN10IOReporter17getChannelIndicesEyPiS0_ +__ZN10IOReporter17handleSwapCleanupEi +__ZN10IOReporter17handleSwapPrepareEi +__ZN10IOReporter18handleCreateLegendEv +__ZN10IOReporter18handleUpdateReportEP19IOReportChannelListjPvS2_ +__ZN10IOReporter18lockReporterConfigEv +__ZN10IOReporter19configureAllReportsEP5OSSetP19IOReportChannelListjPvS4_ +__ZN10IOReporter19updateChannelValuesEi +__ZN10IOReporter19updateReportChannelEiPiP24IOBufferMemoryDescriptor +__ZN10IOReporter20getFirstElementIndexEyPi +__ZN10IOReporter20handleAddChannelSwapEyPK8OSSymbol +__ZN10IOReporter20unlockReporterConfigEv +__ZN10IOReporter21handleConfigureReportEP19IOReportChannelListjPvS2_ +__ZN10IOReporter4freeEv +__ZN10IOReporter4initEP9IOService19IOReportChannelTypey +__ZN10IOReporter9MetaClassC1Ev +__ZN10IOReporter9MetaClassC2Ev +__ZN10IOReporter9metaClassE +__ZN10IOReporterC1EPK11OSMetaClass +__ZN10IOReporterC1Ev +__ZN10IOReporterC2EPK11OSMetaClass +__ZN10IOReporterC2Ev +__ZN10IOReporterD0Ev +__ZN10IOReporterD1Ev +__ZN10IOReporterD2Ev +__ZN10IOReporterdlEPvm +__ZN10IOReporternwEm __ZN10IOWorkLoop10gMetaClassE __ZN10IOWorkLoop10superClassE __ZN10IOWorkLoop10threadMainEv @@ -135,6 +174,7 @@ __ZN10IOWorkLoop10wakeupGateEPvb __ZN10IOWorkLoop12tryCloseGateEv __ZN10IOWorkLoop13_maintRequestEPvS0_S0_S0_ __ZN10IOWorkLoop14addEventSourceEP13IOEventSource +__ZN10IOWorkLoop14runActionBlockEU13block_pointerFivE __ZN10IOWorkLoop15runEventSourcesEv __ZN10IOWorkLoop17removeEventSourceEP13IOEventSource __ZN10IOWorkLoop18setMaximumLockTimeEyj @@ -154,6 +194,10 @@ __ZN10IOWorkLoopC2EPK11OSMetaClass __ZN10IOWorkLoopC2Ev __ZN10IOWorkLoopD0Ev __ZN10IOWorkLoopD2Ev +__ZN10IOWorkLoopdlEPvm +__ZN10IOWorkLoopnwEm +__ZN11IOCatalogue10gMetaClassE +__ZN11IOCatalogue10superClassE __ZN11IODataQueue10gMetaClassE __ZN11IODataQueue10superClassE __ZN11IODataQueue19getMemoryDescriptorEv @@ -169,10 +213,13 @@ __ZN11IODataQueueC2EPK11OSMetaClass __ZN11IODataQueueC2Ev __ZN11IODataQueueD0Ev __ZN11IODataQueueD2Ev +__ZN11IODataQueuedlEPvm +__ZN11IODataQueuenwEm __ZN11IOMemoryMap10gMetaClassE __ZN11IOMemoryMap10superClassE __ZN11IOMemoryMap13getMapOptionsEv __ZN11IOMemoryMap14getAddressTaskEv +__ZN11IOMemoryMap17_CopyState_InvokeE5IORPCP15OSMetaClassBasePFiS2_P24_IOMemoryMapPrivateStateE __ZN11IOMemoryMap17getVirtualAddressEv __ZN11IOMemoryMap18getPhysicalAddressEv __ZN11IOMemoryMap19getMemoryDescriptorEv @@ -187,6 +234,8 @@ __ZN11IOMemoryMapC2EPK11OSMetaClass __ZN11IOMemoryMapC2Ev __ZN11IOMemoryMapD0Ev __ZN11IOMemoryMapD2Ev +__ZN11IOMemoryMapdlEPvm +__ZN11IOMemoryMapnwEm __ZN11IOResources10gMetaClassE __ZN11IOResources10superClassE __ZN11IOResources13setPropertiesEP8OSObject @@ -201,6 +250,8 @@ __ZN11IOResourcesC2EPK11OSMetaClass __ZN11IOResourcesC2Ev __ZN11IOResourcesD0Ev __ZN11IOResourcesD2Ev +__ZN11IOResourcesdlEPvm +__ZN11IOResourcesnwEm __ZN12IODMACommand10gMetaClassE __ZN12IODMACommand10superClassE __ZN12IODMACommand10withRefConEPv @@ -212,7 +263,6 @@ __ZN12IODMACommand17getNumAddressBitsEv __ZN12IODMACommand18getAlignmentLengthEv __ZN12IODMACommand19setMemoryDescriptorEPK18IOMemoryDescriptorb __ZN12IODMACommand21clearMemoryDescriptorEb -__ZNK12IODMACommand21getIOMemoryDescriptorEv __ZN12IODMACommand26getPreparedOffsetAndLengthEPyS0_ __ZN12IODMACommand28getAlignmentInternalSegmentsEv __ZN12IODMACommand4freeEv @@ -228,6 +278,8 @@ __ZN12IODMACommandC2EPK11OSMetaClass __ZN12IODMACommandC2Ev __ZN12IODMACommandD0Ev __ZN12IODMACommandD2Ev +__ZN12IODMACommanddlEPvm +__ZN12IODMACommandnwEm __ZN12IOPMinformee10gMetaClassE __ZN12IOPMinformee10initializeEP9IOService __ZN12IOPMinformee10superClassE @@ -241,6 +293,13 @@ __ZN12IOPMinformeeC2EPK11OSMetaClass __ZN12IOPMinformeeC2Ev __ZN12IOPMinformeeD0Ev __ZN12IOPMinformeeD2Ev +__ZN12IOPMinformeedlEPvm +__ZN12IOPMinformeenwEm +__ZN12IOPlatformIO10gMetaClassE +__ZN12IOPlatformIOC2EPK11OSMetaClass +__ZN12IOPlatformIOD2Ev +__ZN12IOPlatformIOdlEPvm +__ZN12IOPlatformIOnwEm __ZN12IORootParent10dozeSystemEv __ZN12IORootParent10gMetaClassE __ZN12IORootParent10superClassE @@ -259,6 +318,8 @@ __ZN12IORootParentC2EPK11OSMetaClass __ZN12IORootParentC2Ev __ZN12IORootParentD0Ev __ZN12IORootParentD2Ev +__ZN12IORootParentdlEPvm +__ZN12IORootParentnwEm __ZN12IOUserClient10clientDiedEv __ZN12IOUserClient10gMetaClassE __ZN12IOUserClient10getServiceEv @@ -270,15 +331,21 @@ __ZN12IOUserClient14externalMethodEjP25IOExternalMethodArgumentsP24IOExternalMet __ZN12IOUserClient17setAsyncReferenceEPjP8ipc_portPvS3_ __ZN12IOUserClient18clientHasPrivilegeEPvPKc __ZN12IOUserClient20exportObjectToClientEP4taskP8OSObjectPS3_ -__ZN12IOUserClient21destroyUserReferencesEP8OSObject __ZN12IOUserClient21copyClientEntitlementEP4taskPKc +__ZN12IOUserClient21destroyUserReferencesEP8OSObject +__ZN12IOUserClient22AsyncCompletion_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActioniPKyjE +__ZN12IOUserClient22AsyncCompletion_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActioniPKyjEPK11OSMetaClass +__ZN12IOUserClient22_ExternalMethod_InvokeE5IORPCP15OSMetaClassBasePFiS2_yPKyjP6OSDataP18IOMemoryDescriptorPyPjyPS6_S8_P8OSActionE __ZN12IOUserClient22clientHasAuthorizationEP4taskP9IOService +__ZN12IOUserClient23registerFilterCallbacksEPK19io_filter_callbacksm __ZN12IOUserClient23releaseAsyncReference64EPy __ZN12IOUserClient23releaseNotificationPortEP8ipc_port __ZN12IOUserClient26removeMappingForDescriptorEP18IOMemoryDescriptor +__ZN12IOUserClient30CopyClientMemoryForType_InvokeE5IORPCP15OSMetaClassBasePFiS2_yPyPP18IOMemoryDescriptorE __ZN12IOUserClient4freeEv __ZN12IOUserClient4initEP12OSDictionary __ZN12IOUserClient4initEv +__ZN12IOUserClient8DispatchE5IORPC __ZN12IOUserClient9MetaClassC1Ev __ZN12IOUserClient9MetaClassC2Ev __ZN12IOUserClient9metaClassE @@ -286,6 +353,11 @@ __ZN12IOUserClientC1EPK11OSMetaClass __ZN12IOUserClientC2EPK11OSMetaClass __ZN12IOUserClientD0Ev __ZN12IOUserClientD2Ev +__ZN12IOUserClientdlEPvm +__ZN12IOUserClientnwEm +__ZN12IOUserServer11Exit_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcE +__ZN12IOUserServer13Create_InvokeE5IORPCPFiPKcyyPPS_E +__ZN12IOUserServer17LoadModule_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcE __ZN13IOCommandGate10gMetaClassE __ZN13IOCommandGate10runCommandEPvS0_S0_S0_ __ZN13IOCommandGate10superClassE @@ -294,6 +366,7 @@ __ZN13IOCommandGate11setWorkLoopEP10IOWorkLoop __ZN13IOCommandGate13attemptActionEPFiP8OSObjectPvS2_S2_S2_ES2_S2_S2_S2_ __ZN13IOCommandGate13commandWakeupEPvb __ZN13IOCommandGate14attemptCommandEPvS0_S0_S0_ +__ZN13IOCommandGate14runActionBlockEU13block_pointerFivE __ZN13IOCommandGate4freeEv __ZN13IOCommandGate4initEP8OSObjectPFiS1_PvS2_S2_S2_E __ZN13IOCommandGate6enableEv @@ -308,6 +381,8 @@ __ZN13IOCommandGateC2EPK11OSMetaClass __ZN13IOCommandGateC2Ev __ZN13IOCommandGateD0Ev __ZN13IOCommandGateD2Ev +__ZN13IOCommandGatedlEPvm +__ZN13IOCommandGatenwEm __ZN13IOCommandPool10gMetaClassE __ZN13IOCommandPool10getCommandEb __ZN13IOCommandPool10superClassE @@ -326,12 +401,15 @@ __ZN13IOCommandPoolC2EPK11OSMetaClass __ZN13IOCommandPoolC2Ev __ZN13IOCommandPoolD0Ev __ZN13IOCommandPoolD2Ev +__ZN13IOCommandPooldlEPvm +__ZN13IOCommandPoolnwEm __ZN13IOEventSource10gMetaClassE __ZN13IOEventSource10superClassE __ZN13IOEventSource10wakeupGateEPvb __ZN13IOEventSource11setWorkLoopEP10IOWorkLoop __ZN13IOEventSource12checkForWorkEv __ZN13IOEventSource12tryCloseGateEv +__ZN13IOEventSource14setActionBlockEU13block_pointerFivE __ZN13IOEventSource19signalWorkAvailableEv __ZN13IOEventSource4freeEv __ZN13IOEventSource4initEP8OSObjectPFvS1_zE @@ -344,10 +422,13 @@ __ZN13IOEventSource9MetaClassC2Ev __ZN13IOEventSource9closeGateEv __ZN13IOEventSource9metaClassE __ZN13IOEventSource9setActionEPFvP8OSObjectzE +__ZN13IOEventSource9setRefconEPv __ZN13IOEventSourceC1EPK11OSMetaClass __ZN13IOEventSourceC2EPK11OSMetaClass __ZN13IOEventSourceD0Ev __ZN13IOEventSourceD2Ev +__ZN13IOEventSourcedlEPvm +__ZN13IOEventSourcenwEm __ZN13_IOServiceJob10gMetaClassE __ZN13_IOServiceJob10pingConfigEPS_ __ZN13_IOServiceJob10superClassE @@ -360,6 +441,8 @@ __ZN13_IOServiceJobC2EPK11OSMetaClass __ZN13_IOServiceJobC2Ev __ZN13_IOServiceJobD0Ev __ZN13_IOServiceJobD2Ev +__ZN13_IOServiceJobdlEPvm +__ZN13_IOServiceJobnwEm __ZN14IOMemoryCursor10gMetaClassE __ZN14IOMemoryCursor10superClassE __ZN14IOMemoryCursor9MetaClassC1Ev @@ -371,10 +454,14 @@ __ZN14IOMemoryCursorC2EPK11OSMetaClass __ZN14IOMemoryCursorC2Ev __ZN14IOMemoryCursorD0Ev __ZN14IOMemoryCursorD2Ev +__ZN14IOMemoryCursordlEPvm +__ZN14IOMemoryCursornwEm __ZN14IOPMrootDomain10gMetaClassE __ZN14IOPMrootDomain10superClassE +__ZN14IOPMrootDomain11setWakeTimeEy __ZN14IOPMrootDomain11sleepSystemEv __ZN14IOPMrootDomain12tellChangeUpEm +__ZN14IOPMrootDomain12updateReportEP19IOReportChannelListjPvS2_ __ZN14IOPMrootDomain12wakeFromDozeEv __ZN14IOPMrootDomain13askChangeDownEm __ZN14IOPMrootDomain13copyPMSettingEP8OSSymbol @@ -384,6 +471,7 @@ __ZN14IOPMrootDomain14publishFeatureEPKc __ZN14IOPMrootDomain14publishFeatureEPKcjPj __ZN14IOPMrootDomain14shutdownSystemEv __ZN14IOPMrootDomain14tellChangeDownEm +__ZN14IOPMrootDomain15configureReportEP19IOReportChannelListjPvS2_ __ZN14IOPMrootDomain15powerChangeDoneEm __ZN14IOPMrootDomain16tellNoChangeDownEm __ZN14IOPMrootDomain17createPMAssertionEyjP9IOServicePKc @@ -411,6 +499,30 @@ __ZN14IOPMrootDomainC2EPK11OSMetaClass __ZN14IOPMrootDomainC2Ev __ZN14IOPMrootDomainD0Ev __ZN14IOPMrootDomainD2Ev +__ZN14IOPMrootDomaindlEPvm +__ZN14IOPMrootDomainnwEm +__ZN14IOReportLegend10gMetaClassE +__ZN14IOReportLegend10superClassE +__ZN14IOReportLegend14addLegendEntryEP12OSDictionaryPKcS3_ +__ZN14IOReportLegend14organizeLegendEP12OSDictionaryPK8OSSymbolS4_ +__ZN14IOReportLegend17addReporterLegendEP10IOReporterPKcS3_ +__ZN14IOReportLegend17addReporterLegendEP9IOServiceP10IOReporterPKcS5_ +__ZN14IOReportLegend4freeEv +__ZN14IOReportLegend4withEP7OSArray +__ZN14IOReportLegend8initWithEP7OSArray +__ZN14IOReportLegend9MetaClassC1Ev +__ZN14IOReportLegend9MetaClassC2Ev +__ZN14IOReportLegend9getLegendEv +__ZN14IOReportLegend9metaClassE +__ZN14IOReportLegendC1EPK11OSMetaClass +__ZN14IOReportLegendC1Ev +__ZN14IOReportLegendC2EPK11OSMetaClass +__ZN14IOReportLegendC2Ev +__ZN14IOReportLegendD0Ev +__ZN14IOReportLegendD1Ev +__ZN14IOReportLegendD2Ev +__ZN14IOReportLegenddlEPvm +__ZN14IOReportLegendnwEm __ZN15IOConditionLock10gMetaClassE __ZN15IOConditionLock10superClassE __ZN15IOConditionLock10unlockWithEi @@ -431,6 +543,10 @@ __ZN15IOConditionLockC2EPK11OSMetaClass __ZN15IOConditionLockC2Ev __ZN15IOConditionLockD0Ev __ZN15IOConditionLockD2Ev +__ZN15IOConditionLockdlEPvm +__ZN15IOConditionLocknwEm +__ZN15IODispatchQueue10gMetaClassE +__ZN15IODispatchQueue10superClassE __ZN15IODMAController10gMetaClassE __ZN15IODMAController10superClassE __ZN15IODMAController18completeDMACommandEP16IODMAEventSourceP12IODMACommand @@ -440,6 +556,11 @@ __ZN15IODMAController9MetaClassC2Ev __ZN15IODMAController9metaClassE __ZN15IODMAControllerC2EPK11OSMetaClass __ZN15IODMAControllerD2Ev +__ZN15IODMAControllerdlEPvm +__ZN15IODMAControllernwEm +__ZN15IODispatchQueue13Create_InvokeE5IORPCPFiPKcyyPPS_E +__ZN15IODispatchQueue14SetPort_InvokeE5IORPCP15OSMetaClassBasePFiS2_P8ipc_portE +__ZN15IODispatchQueue9metaClassE __ZN15IOPMPowerSource10cycleCountEv __ZN15IOPMPowerSource10gMetaClassE __ZN15IOPMPowerSource10isChargingEv @@ -496,6 +617,8 @@ __ZN15IOPMPowerSourceC2EPK11OSMetaClass __ZN15IOPMPowerSourceC2Ev __ZN15IOPMPowerSourceD0Ev __ZN15IOPMPowerSourceD2Ev +__ZN15IOPMPowerSourcedlEPvm +__ZN15IOPMPowerSourcenwEm __ZN15IORegistryEntry10gMetaClassE __ZN15IORegistryEntry10initializeEv __ZN15IORegistryEntry10superClassE @@ -514,12 +637,18 @@ __ZN15IORegistryEntry13attachToChildEPS_PK15IORegistryPlane __ZN15IORegistryEntry13childFromPathEPKcPK15IORegistryPlanePcPi __ZN15IORegistryEntry13setPropertiesEP8OSObject __ZN15IORegistryEntry14attachToParentEPS_PK15IORegistryPlane +__ZN15IORegistryEntry14propertyExistsEPK8OSString +__ZN15IORegistryEntry14propertyExistsEPK8OSSymbol +__ZN15IORegistryEntry14propertyExistsEPKc __ZN15IORegistryEntry14removePropertyEPK8OSString __ZN15IORegistryEntry14removePropertyEPK8OSSymbol __ZN15IORegistryEntry14removePropertyEPKc __ZN15IORegistryEntry15detachFromChildEPS_PK15IORegistryPlane __ZN15IORegistryEntry15getRegistryRootEv __ZN15IORegistryEntry16detachFromParentEPS_PK15IORegistryPlane +__ZN15IORegistryEntry16propertyHasValueEPK8OSStringPK8OSObject +__ZN15IORegistryEntry16propertyHasValueEPK8OSSymbolPK8OSObject +__ZN15IORegistryEntry16propertyHasValueEPKcPK8OSObject __ZN15IORegistryEntry16setPropertyTableEP12OSDictionary __ZN15IORegistryEntry17matchPathLocationEPKcPK15IORegistryPlane __ZN15IORegistryEntry17runPropertyActionEPFiP8OSObjectPvS2_S2_S2_ES1_S2_S2_S2_S2_ @@ -544,6 +673,8 @@ __ZN15IORegistryEntryC2EPK11OSMetaClass __ZN15IORegistryEntryC2Ev __ZN15IORegistryEntryD0Ev __ZN15IORegistryEntryD2Ev +__ZN15IORegistryEntrydlEPvm +__ZN15IORegistryEntrynwEm __ZN15IORegistryPlane10gMetaClassE __ZN15IORegistryPlane10superClassE __ZN15IORegistryPlane9MetaClassC1Ev @@ -555,6 +686,49 @@ __ZN15IORegistryPlaneC2EPK11OSMetaClass __ZN15IORegistryPlaneC2Ev __ZN15IORegistryPlaneD0Ev __ZN15IORegistryPlaneD2Ev +__ZN15IORegistryPlanedlEPvm +__ZN15IORegistryPlanenwEm +__ZN15IOStateReporter10gMetaClassE +__ZN15IOStateReporter10setStateIDEyiy +__ZN15IOStateReporter10superClassE +__ZN15IOStateReporter14_getStateValueEyyNS_13valueSelectorE +__ZN15IOStateReporter15setChannelStateEyy +__ZN15IOStateReporter15setChannelStateEyyyy +__ZN15IOStateReporter16_getStateIndicesEyyPiS0_ +__ZN15IOStateReporter16handleSetStateIDEyiy +__ZN15IOStateReporter17handleSwapCleanupEi +__ZN15IOStateReporter17handleSwapPrepareEi +__ZN15IOStateReporter17setStateByIndicesEii +__ZN15IOStateReporter17setStateByIndicesEiiyy +__ZN15IOStateReporter19updateChannelValuesEi +__ZN15IOStateReporter20handleAddChannelSwapEyPK8OSSymbol +__ZN15IOStateReporter20overrideChannelStateEyyyyy +__ZN15IOStateReporter21getStateInTransitionsEyy +__ZN15IOStateReporter21getStateResidencyTimeEyy +__ZN15IOStateReporter21incrementChannelStateEyyyyy +__ZN15IOStateReporter23handleSetStateByIndicesEiiyy +__ZN15IOStateReporter26getStateLastTransitionTimeEyy +__ZN15IOStateReporter29getStateLastChannelUpdateTimeEy +__ZN15IOStateReporter35handleOverrideChannelStateByIndicesEiiyyy +__ZN15IOStateReporter36handleIncrementChannelStateByIndicesEiiyyy +__ZN15IOStateReporter4freeEv +__ZN15IOStateReporter4withEP9IOServicetiy +__ZN15IOStateReporter8initWithEP9IOServicetsy +__ZN15IOStateReporter8setStateEy +__ZN15IOStateReporter8setStateEyyy +__ZN15IOStateReporter9MetaClassC1Ev +__ZN15IOStateReporter9MetaClassC2Ev +__ZN15IOStateReporter9metaClassE +__ZN15IOStateReporterC1EPK11OSMetaClass +__ZN15IOStateReporterC1Ev +__ZN15IOStateReporterC2EPK11OSMetaClass +__ZN15IOStateReporterC2Ev +__ZN15IOStateReporterD0Ev +__ZN15IOStateReporterD1Ev +__ZN15IOStateReporterdlEPvm +__ZN15IOStateReporternwEm +__ZN16IODispatchSource10gMetaClassE +__ZN16IODispatchSource10superClassE __ZN16IODMAEventSource10gMetaClassE __ZN16IODMAEventSource10superClassE __ZN16IODMAEventSource12checkForWorkEv @@ -569,6 +743,15 @@ __ZN16IODMAEventSourceC2EPK11OSMetaClass __ZN16IODMAEventSourceC2Ev __ZN16IODMAEventSourceD0Ev __ZN16IODMAEventSourceD2Ev +__ZN16IODMAEventSourcedlEPvm +__ZN16IODMAEventSourcenwEm +__ZN16IODispatchSource13Cancel_InvokeE5IORPCP15OSMetaClassBasePFiS2_U13block_pointerFvvEE +__ZN16IODispatchSource16SetEnable_InvokeE5IORPCP15OSMetaClassBasePFiS2_bE +__ZN16IODispatchSource19CheckForWork_InvokeE5IORPCP15OSMetaClassBasePFiS2_S0_bE +__ZN16IODispatchSource23SetEnableWithCompletionEbU13block_pointerFvvEPFiP15OSMetaClassBase5IORPCE +__ZN16IODispatchSource30SetEnableWithCompletion_InvokeE5IORPCP15OSMetaClassBasePFiS2_bU13block_pointerFvvEE +__ZN16IODispatchSource9SetEnableEbPFiP15OSMetaClassBase5IORPCE +__ZN16IODispatchSource9metaClassE __ZN16IOPMinformeeList10gMetaClassE __ZN16IOPMinformeeList10initializeEv __ZN16IOPMinformeeList10nextInListEP12IOPMinformee @@ -588,6 +771,8 @@ __ZN16IOPMinformeeListC2EPK11OSMetaClass __ZN16IOPMinformeeListC2Ev __ZN16IOPMinformeeListD0Ev __ZN16IOPMinformeeListD2Ev +__ZN16IOPMinformeeListdlEPvm +__ZN16IOPMinformeeListnwEm __ZN16IORangeAllocator10gMetaClassE __ZN16IORangeAllocator10superClassE __ZN16IORangeAllocator12getFreeCountEv @@ -603,6 +788,27 @@ __ZN16IORangeAllocatorC2EPK11OSMetaClass __ZN16IORangeAllocatorC2Ev __ZN16IORangeAllocatorD0Ev __ZN16IORangeAllocatorD2Ev +__ZN16IORangeAllocatordlEPvm +__ZN16IORangeAllocatornwEm +__ZN16IOSimpleReporter10gMetaClassE +__ZN16IOSimpleReporter10superClassE +__ZN16IOSimpleReporter14incrementValueEyx +__ZN16IOSimpleReporter4withEP9IOServicety +__ZN16IOSimpleReporter8getValueEy +__ZN16IOSimpleReporter8initWithEP9IOServicety +__ZN16IOSimpleReporter8setValueEyx +__ZN16IOSimpleReporter9MetaClassC1Ev +__ZN16IOSimpleReporter9MetaClassC2Ev +__ZN16IOSimpleReporter9metaClassE +__ZN16IOSimpleReporterC1EPK11OSMetaClass +__ZN16IOSimpleReporterC1Ev +__ZN16IOSimpleReporterC2EPK11OSMetaClass +__ZN16IOSimpleReporterC2Ev +__ZN16IOSimpleReporterD0Ev +__ZN16IOSimpleReporterD1Ev +__ZN16IOSimpleReporterD2Ev +__ZN16IOSimpleReporterdlEPvm +__ZN16IOSimpleReporternwEm __ZN17IOBigMemoryCursor10gMetaClassE __ZN17IOBigMemoryCursor10superClassE __ZN17IOBigMemoryCursor9MetaClassC1Ev @@ -614,10 +820,15 @@ __ZN17IOBigMemoryCursorC2EPK11OSMetaClass __ZN17IOBigMemoryCursorC2Ev __ZN17IOBigMemoryCursorD0Ev __ZN17IOBigMemoryCursorD2Ev +__ZN17IOBigMemoryCursordlEPvm +__ZN17IOBigMemoryCursornwEm __ZN17IOPolledInterface10gMetaClassE +__ZN17IOPolledInterface10superClassE __ZN17IOPolledInterface16setEncryptionKeyEPKhm __ZN17IOPolledInterfaceC2EPK11OSMetaClass __ZN17IOPolledInterfaceD2Ev +__ZN17IOPolledInterfacedlEPvm +__ZN17IOPolledInterfacenwEm __ZN17IOPowerConnection10gMetaClassE __ZN17IOPowerConnection10superClassE __ZN17IOPowerConnection14getAwaitingAckEv @@ -643,6 +854,8 @@ __ZN17IOPowerConnectionC2EPK11OSMetaClass __ZN17IOPowerConnectionC2Ev __ZN17IOPowerConnectionD0Ev __ZN17IOPowerConnectionD2Ev +__ZN17IOPowerConnectiondlEPvm +__ZN17IOPowerConnectionnwEm __ZN17IOSharedDataQueue10gMetaClassE __ZN17IOSharedDataQueue10superClassE __ZN17IOSharedDataQueue19getMemoryDescriptorEv @@ -657,25 +870,34 @@ __ZN17IOSharedDataQueueC2EPK11OSMetaClass __ZN17IOSharedDataQueueC2Ev __ZN17IOSharedDataQueueD0Ev __ZN17IOSharedDataQueueD2Ev +__ZN17IOSharedDataQueuedlEPvm +__ZN17IOSharedDataQueuenwEm __ZN18IOMemoryDescriptor10addMappingEP11IOMemoryMap __ZN18IOMemoryDescriptor10gMetaClassE __ZN18IOMemoryDescriptor10initializeEv __ZN18IOMemoryDescriptor10superClassE __ZN18IOMemoryDescriptor13removeMappingEP11IOMemoryMap +__ZN18IOMemoryDescriptor15getDMAMapLengthEPy +__ZN18IOMemoryDescriptor15getDescriptorIDEv __ZN18IOMemoryDescriptor16getPreparationIDEv +__ZN18IOMemoryDescriptor17_CopyState_InvokeE5IORPCP15OSMetaClassBasePFiS2_P17_IOMDPrivateStateE __ZN18IOMemoryDescriptor18getPhysicalAddressEv +__ZN18IOMemoryDescriptor20CreateMapping_InvokeE5IORPCP15OSMetaClassBasePFiS2_yyyyyPP11IOMemoryMapE +__ZN18IOMemoryDescriptor26ktraceEmitPhysicalSegmentsEv __ZN18IOMemoryDescriptor30withPersistentMemoryDescriptorEPS_ __ZN18IOMemoryDescriptor4freeEv __ZN18IOMemoryDescriptor6getTagEv __ZN18IOMemoryDescriptor8getFlagsEv +__ZN18IOMemoryDescriptor8getVMTagEP7_vm_map __ZN18IOMemoryDescriptor8redirectEP4taskb __ZN18IOMemoryDescriptor9MetaClassC1Ev __ZN18IOMemoryDescriptor9MetaClassC2Ev __ZN18IOMemoryDescriptor9metaClassE +__ZN18IOMemoryDescriptor9setVMTagsEjj __ZN18IOMemoryDescriptorC2EPK11OSMetaClass __ZN18IOMemoryDescriptorD2Ev -__ZN18IOMemoryDescriptor8getVMTagEP7_vm_map -__ZN18IOMemoryDescriptor9setVMTagsEjj +__ZN18IOMemoryDescriptordlEPvm +__ZN18IOMemoryDescriptornwEm __ZN18IORegistryIterator10enterEntryEPK15IORegistryPlane __ZN18IORegistryIterator10enterEntryEv __ZN18IORegistryIterator10gMetaClassE @@ -698,6 +920,8 @@ __ZN18IORegistryIteratorC2EPK11OSMetaClass __ZN18IORegistryIteratorC2Ev __ZN18IORegistryIteratorD0Ev __ZN18IORegistryIteratorD2Ev +__ZN18IORegistryIteratordlEPvm +__ZN18IORegistryIteratornwEm __ZN18IOTimerEventSource10gMetaClassE __ZN18IOTimerEventSource10superClassE __ZN18IOTimerEventSource11setWorkLoopEP10IOWorkLoop @@ -706,6 +930,7 @@ __ZN18IOTimerEventSource13cancelTimeoutEv __ZN18IOTimerEventSource14setTimeoutFuncEv __ZN18IOTimerEventSource16timerEventSourceEP8OSObjectPFvS1_PS_E __ZN18IOTimerEventSource16timerEventSourceEjP8OSObjectPFvS1_PS_E +__ZN18IOTimerEventSource16timerEventSourceEjP8OSObjectU13block_pointerFvPS_E __ZN18IOTimerEventSource4freeEv __ZN18IOTimerEventSource4initEP8OSObjectPFvS1_PS_E __ZN18IOTimerEventSource4initEjP8OSObjectPFvS1_PS_E @@ -721,6 +946,10 @@ __ZN18IOTimerEventSourceC2EPK11OSMetaClass __ZN18IOTimerEventSourceC2Ev __ZN18IOTimerEventSourceD0Ev __ZN18IOTimerEventSourceD2Ev +__ZN18IOTimerEventSourcedlEPvm +__ZN18IOTimerEventSourcenwEm +__ZN18IOUserNotification10gMetaClassE +__ZN18IOUserNotification10superClassE __ZN18_IOServiceNotifier10gMetaClassE __ZN18_IOServiceNotifier10superClassE __ZN18_IOServiceNotifier4freeEv @@ -737,6 +966,28 @@ __ZN18_IOServiceNotifierC2EPK11OSMetaClass __ZN18_IOServiceNotifierC2Ev __ZN18_IOServiceNotifierD0Ev __ZN18_IOServiceNotifierD2Ev +__ZN18_IOServiceNotifierdlEPvm +__ZN18_IOServiceNotifiernwEm +__ZN19IOHistogramReporter10gMetaClassE +__ZN19IOHistogramReporter10superClassE +__ZN19IOHistogramReporter10tallyValueEx +__ZN19IOHistogramReporter18handleCreateLegendEv +__ZN19IOHistogramReporter20overrideBucketValuesEjyxxx +__ZN19IOHistogramReporter4freeEv +__ZN19IOHistogramReporter4withEP9IOServicetyPKcyiP24IOHistogramSegmentConfig +__ZN19IOHistogramReporter8initWithEP9IOServicetyPK8OSSymbolyiP24IOHistogramSegmentConfig +__ZN19IOHistogramReporter9MetaClassC1Ev +__ZN19IOHistogramReporter9MetaClassC2Ev +__ZN19IOHistogramReporter9metaClassE +__ZN19IOHistogramReporterC1EPK11OSMetaClass +__ZN19IOHistogramReporterC1Ev +__ZN19IOHistogramReporterC2EPK11OSMetaClass +__ZN19IOHistogramReporterC2Ev +__ZN19IOHistogramReporterD0Ev +__ZN19IOHistogramReporterD1Ev +__ZN19IOHistogramReporterD2Ev +__ZN19IOHistogramReporterdlEPvm +__ZN19IOHistogramReporternwEm __ZN19IOPMPowerSourceList10gMetaClassE __ZN19IOPMPowerSourceList10initializeEv __ZN19IOPMPowerSourceList10nextInListEP15IOPMPowerSource @@ -755,6 +1006,38 @@ __ZN19IOPMPowerSourceListC2EPK11OSMetaClass __ZN19IOPMPowerSourceListC2Ev __ZN19IOPMPowerSourceListD0Ev __ZN19IOPMPowerSourceListD2Ev +__ZN19IOPMPowerSourceListdlEPvm +__ZN19IOPMPowerSourceListnwEm +__ZN19IOPerfControlClient10copyClientEP9IOServicey +__ZN19IOPerfControlClient10gMetaClassE +__ZN19IOPerfControlClient10superClassE +__ZN19IOPerfControlClient10workSubmitEP9IOServicePNS_14WorkSubmitArgsE +__ZN19IOPerfControlClient14registerDeviceEP9IOServiceS1_ +__ZN19IOPerfControlClient15copyWorkContextEv +__ZN19IOPerfControlClient16unregisterDeviceEP9IOServiceS1_ +__ZN19IOPerfControlClient18workEndWithContextEP9IOServiceP8OSObjectPNS_11WorkEndArgsEb +__ZN19IOPerfControlClient18workSubmitAndBeginEP9IOServicePNS_14WorkSubmitArgsEPNS_13WorkBeginArgsE +__ZN19IOPerfControlClient20workBeginWithContextEP9IOServiceP8OSObjectPNS_13WorkBeginArgsE +__ZN19IOPerfControlClient21workSubmitWithContextEP9IOServiceP8OSObjectPNS_14WorkSubmitArgsE +__ZN19IOPerfControlClient29registerPerformanceControllerENS_23PerfControllerInterfaceE +__ZN19IOPerfControlClient29workSubmitAndBeginWithContextEP9IOServiceP8OSObjectPNS_14WorkSubmitArgsEPNS_13WorkBeginArgsE +__ZN19IOPerfControlClient4initEP9IOServicey +__ZN19IOPerfControlClient7workEndEP9IOServiceyPNS_11WorkEndArgsEb +__ZN19IOPerfControlClient9MetaClassC1Ev +__ZN19IOPerfControlClient9MetaClassC2Ev +__ZN19IOPerfControlClient9workBeginEP9IOServiceyPNS_13WorkBeginArgsE +__ZN19IOPerfControlClientC1EPK11OSMetaClass +__ZN19IOPerfControlClientC1Ev +__ZN19IOPerfControlClientC2EPK11OSMetaClass +__ZN19IOPerfControlClientC2Ev +__ZN19IOPerfControlClientD0Ev +__ZN19IOPerfControlClientD1Ev +__ZN19IOPerfControlClientD2Ev +__ZN19IOPerfControlClientdlEPvm +__ZN19IOPerfControlClientnwEm +__ZNK19IOPerfControlClient12getMetaClassEv +__ZNK19IOPerfControlClient9MetaClass5allocEv +__ZTV19IOPerfControlClient __ZN20IOLittleMemoryCursor10gMetaClassE __ZN20IOLittleMemoryCursor10superClassE __ZN20IOLittleMemoryCursor9MetaClassC1Ev @@ -766,6 +1049,8 @@ __ZN20IOLittleMemoryCursorC2EPK11OSMetaClass __ZN20IOLittleMemoryCursorC2Ev __ZN20IOLittleMemoryCursorD0Ev __ZN20IOLittleMemoryCursorD2Ev +__ZN20IOLittleMemoryCursordlEPvm +__ZN20IOLittleMemoryCursornwEm __ZN20RootDomainUserClient10gMetaClassE __ZN20RootDomainUserClient10superClassE __ZN20RootDomainUserClient11clientCloseEv @@ -779,6 +1064,8 @@ __ZN20RootDomainUserClientC2EPK11OSMetaClass __ZN20RootDomainUserClientC2Ev __ZN20RootDomainUserClientD0Ev __ZN20RootDomainUserClientD2Ev +__ZN20RootDomainUserClientdlEPvm +__ZN20RootDomainUserClientnwEm __ZN21IOInterruptController10gMetaClassE __ZN21IOInterruptController10superClassE __ZN21IOInterruptController14causeInterruptEP9IOServicei @@ -786,10 +1073,13 @@ __ZN21IOInterruptController15enableInterruptEP9IOServicei __ZN21IOInterruptController15handleInterruptEPvP9IOServicei __ZN21IOInterruptController16disableInterruptEP9IOServicei __ZN21IOInterruptController16getInterruptTypeEP9IOServiceiPi +__ZN21IOInterruptController17cancelDeferredIPIEj __ZN21IOInterruptController17registerInterruptEP9IOServiceiPvPFvS2_S2_S2_iES2_ __ZN21IOInterruptController19unregisterInterruptEP9IOServicei +__ZN21IOInterruptController25setCPUInterruptPropertiesEP9IOService __ZN21IOInterruptController26getInterruptHandlerAddressEv __ZN21IOInterruptController26timeStampSpuriousInterruptEv +__ZN21IOInterruptController7sendIPIEjb __ZN21IOInterruptController9MetaClassC1Ev __ZN21IOInterruptController9MetaClassC2Ev __ZN21IOInterruptController9metaClassE @@ -797,6 +1087,8 @@ __ZN21IOInterruptControllerC1EPK11OSMetaClass __ZN21IOInterruptControllerC2EPK11OSMetaClass __ZN21IOInterruptControllerD0Ev __ZN21IOInterruptControllerD2Ev +__ZN21IOInterruptControllerdlEPvm +__ZN21IOInterruptControllernwEm __ZN21IONaturalMemoryCursor10gMetaClassE __ZN21IONaturalMemoryCursor10superClassE __ZN21IONaturalMemoryCursor9MetaClassC1Ev @@ -808,6 +1100,8 @@ __ZN21IONaturalMemoryCursorC2EPK11OSMetaClass __ZN21IONaturalMemoryCursorC2Ev __ZN21IONaturalMemoryCursorD0Ev __ZN21IONaturalMemoryCursorD2Ev +__ZN21IONaturalMemoryCursordlEPvm +__ZN21IONaturalMemoryCursornwEm __ZN21IOSubMemoryDescriptor10gMetaClassE __ZN21IOSubMemoryDescriptor10superClassE __ZN21IOSubMemoryDescriptor16getPreparationIDEv @@ -822,14 +1116,20 @@ __ZN21IOSubMemoryDescriptorC2EPK11OSMetaClass __ZN21IOSubMemoryDescriptorC2Ev __ZN21IOSubMemoryDescriptorD0Ev __ZN21IOSubMemoryDescriptorD2Ev +__ZN21IOSubMemoryDescriptordlEPvm +__ZN21IOSubMemoryDescriptornwEm __ZN22IOInterruptEventSource10gMetaClassE __ZN22IOInterruptEventSource10superClassE __ZN22IOInterruptEventSource11setWorkLoopEP10IOWorkLoop __ZN22IOInterruptEventSource12checkForWorkEv __ZN22IOInterruptEventSource17interruptOccurredEPvP9IOServicei +__ZN22IOInterruptEventSource20interruptEventSourceEP8OSObjectP9IOServiceiU13block_pointerFvPS_iE __ZN22IOInterruptEventSource20interruptEventSourceEP8OSObjectPFvS1_PS_iEP9IOServicei __ZN22IOInterruptEventSource23normalInterruptOccurredEPvP9IOServicei __ZN22IOInterruptEventSource24disableInterruptOccurredEPvP9IOServicei +__ZN22IOInterruptEventSource27getPimaryInterruptTimestampEv:__ZN22IOInterruptEventSource28getPrimaryInterruptTimestampEv +__ZN22IOInterruptEventSource28getPrimaryInterruptTimestampEv +__ZN22IOInterruptEventSource31enablePrimaryInterruptTimestampEb __ZN22IOInterruptEventSource4freeEv __ZN22IOInterruptEventSource4initEP8OSObjectPFvS1_PS_iEP9IOServicei __ZN22IOInterruptEventSource6enableEv @@ -843,6 +1143,11 @@ __ZN22IOInterruptEventSourceC2EPK11OSMetaClass __ZN22IOInterruptEventSourceC2Ev __ZN22IOInterruptEventSourceD0Ev __ZN22IOInterruptEventSourceD2Ev +__ZN22IOInterruptEventSourcedlEPvm +__ZN22IOInterruptEventSourcenwEm +__ZN22IOServiceCompatibility9metaClassE +__ZN22IOServiceCompatibility10gMetaClassE +__ZN22IOServiceCompatibility10superClassE __ZN22_IOOpenServiceIterator10gMetaClassE __ZN22_IOOpenServiceIterator10superClassE __ZN22_IOOpenServiceIterator13getNextObjectEv @@ -859,6 +1164,8 @@ __ZN22_IOOpenServiceIteratorC2EPK11OSMetaClass __ZN22_IOOpenServiceIteratorC2Ev __ZN22_IOOpenServiceIteratorD0Ev __ZN22_IOOpenServiceIteratorD2Ev +__ZN22_IOOpenServiceIteratordlEPvm +__ZN22_IOOpenServiceIteratornwEm __ZN23IOMultiMemoryDescriptor10gMetaClassE __ZN23IOMultiMemoryDescriptor10superClassE __ZN23IOMultiMemoryDescriptor16getPreparationIDEv @@ -872,9 +1179,13 @@ __ZN23IOMultiMemoryDescriptorC2EPK11OSMetaClass __ZN23IOMultiMemoryDescriptorC2Ev __ZN23IOMultiMemoryDescriptorD0Ev __ZN23IOMultiMemoryDescriptorD2Ev +__ZN23IOMultiMemoryDescriptordlEPvm +__ZN23IOMultiMemoryDescriptornwEm __ZN24IOBufferMemoryDescriptor10gMetaClassE __ZN24IOBufferMemoryDescriptor10superClassE +__ZN24IOBufferMemoryDescriptor13Create_InvokeE5IORPCPFiyyyPPS_E __ZN24IOBufferMemoryDescriptor14getBytesNoCopyEv +__ZN24IOBufferMemoryDescriptor16SetLength_InvokeE5IORPCP15OSMetaClassBasePFiS2_yE __ZN24IOBufferMemoryDescriptor4freeEv __ZN24IOBufferMemoryDescriptor9MetaClassC1Ev __ZN24IOBufferMemoryDescriptor9MetaClassC2Ev @@ -885,6 +1196,53 @@ __ZN24IOBufferMemoryDescriptorC2EPK11OSMetaClass __ZN24IOBufferMemoryDescriptorC2Ev __ZN24IOBufferMemoryDescriptorD0Ev __ZN24IOBufferMemoryDescriptorD2Ev +__ZN24IOBufferMemoryDescriptordlEPvm +__ZN24IOBufferMemoryDescriptornwEm +__ZN25IODataQueueDispatchSource10CopyMemoryEPP18IOMemoryDescriptorPFiP15OSMetaClassBase5IORPCE +__ZN25IODataQueueDispatchSource10gMetaClassE +__ZN25IODataQueueDispatchSource10superClassE +__ZN25IODataQueueDispatchSource12DataServicedEP8OSActionPFiP15OSMetaClassBase5IORPCE +__ZN25IODataQueueDispatchSource13Create_InvokeE5IORPCPFiyP15IODispatchQueuePPS_E +__ZN25IODataQueueDispatchSource13DataAvailableEP8OSActionPFiP15OSMetaClassBase5IORPCE +__ZN25IODataQueueDispatchSource15IsDataAvailableEv +__ZN25IODataQueueDispatchSource16SendDataServicedEv +__ZN25IODataQueueDispatchSource17CopyMemory_InvokeE5IORPCP15OSMetaClassBasePFiS2_PP18IOMemoryDescriptorE +__ZN25IODataQueueDispatchSource17SendDataAvailableEv +__ZN25IODataQueueDispatchSource19DataServiced_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActionE +__ZN25IODataQueueDispatchSource19DataServiced_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActionEPK11OSMetaClass +__ZN25IODataQueueDispatchSource19DequeueWithCoalesceEPbU13block_pointerFvPKvmE +__ZN25IODataQueueDispatchSource19EnqueueWithCoalesceEjPbU13block_pointerFvPvmE +__ZN25IODataQueueDispatchSource20DataAvailable_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActionE +__ZN25IODataQueueDispatchSource20DataAvailable_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActionEPK11OSMetaClass +__ZN25IODataQueueDispatchSource22SetDataServicedHandlerEP8OSActionPFiP15OSMetaClassBase5IORPCE +__ZN25IODataQueueDispatchSource23CopyDataServicedHandlerEPP8OSActionPFiP15OSMetaClassBase5IORPCE +__ZN25IODataQueueDispatchSource23SetDataAvailableHandlerEP8OSActionPFiP15OSMetaClassBase5IORPCE +__ZN25IODataQueueDispatchSource24CopyDataAvailableHandlerEPP8OSActionPFiP15OSMetaClassBase5IORPCE +__ZN25IODataQueueDispatchSource29SetDataServicedHandler_InvokeE5IORPCP15OSMetaClassBasePFiS2_P8OSActionE +__ZN25IODataQueueDispatchSource30CopyDataServicedHandler_InvokeE5IORPCP15OSMetaClassBasePFiS2_PP8OSActionE +__ZN25IODataQueueDispatchSource30SetDataAvailableHandler_InvokeE5IORPCP15OSMetaClassBasePFiS2_P8OSActionE +__ZN25IODataQueueDispatchSource31CopyDataAvailableHandler_InvokeE5IORPCP15OSMetaClassBasePFiS2_PP8OSActionE +__ZN25IODataQueueDispatchSource4PeekEU13block_pointerFvPKvmE +__ZN25IODataQueueDispatchSource4freeEv +__ZN25IODataQueueDispatchSource4initEv +__ZN25IODataQueueDispatchSource6CreateEyP15IODispatchQueuePPS_ +__ZN25IODataQueueDispatchSource7DequeueEU13block_pointerFvPKvmE +__ZN25IODataQueueDispatchSource7EnqueueEjU13block_pointerFvPvmE +__ZN25IODataQueueDispatchSource8DispatchE5IORPC +__ZN25IODataQueueDispatchSource9MetaClass8DispatchE5IORPC +__ZN25IODataQueueDispatchSource9MetaClassC1Ev +__ZN25IODataQueueDispatchSource9MetaClassC2Ev +__ZN25IODataQueueDispatchSource9_DispatchEPS_5IORPC +__ZN25IODataQueueDispatchSource9metaClassE +__ZN25IODataQueueDispatchSourceC1EPK11OSMetaClass +__ZN25IODataQueueDispatchSourceC1Ev +__ZN25IODataQueueDispatchSourceC2EPK11OSMetaClass +__ZN25IODataQueueDispatchSourceC2Ev +__ZN25IODataQueueDispatchSourceD0Ev +__ZN25IODataQueueDispatchSourceD1Ev +__ZN25IODataQueueDispatchSourceD2Ev +__ZN25IODataQueueDispatchSourcedlEPvm +__ZN25IODataQueueDispatchSourcenwEm __ZN25IOGeneralMemoryDescriptor10gMetaClassE __ZN25IOGeneralMemoryDescriptor10superClassE __ZN25IOGeneralMemoryDescriptor16getPreparationIDEv @@ -898,6 +1256,15 @@ __ZN25IOGeneralMemoryDescriptorC2EPK11OSMetaClass __ZN25IOGeneralMemoryDescriptorC2Ev __ZN25IOGeneralMemoryDescriptorD0Ev __ZN25IOGeneralMemoryDescriptorD2Ev +__ZN25IOGeneralMemoryDescriptordlEPvm +__ZN25IOGeneralMemoryDescriptornwEm +__ZN25IOInterruptDispatchSource13Create_InvokeE5IORPCPFiP9IOServicejP15IODispatchQueuePPS_E +__ZN25IOInterruptDispatchSource17SetHandler_InvokeE5IORPCP15OSMetaClassBasePFiS2_P8OSActionE +__ZN25IOInterruptDispatchSource24InterruptOccurred_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActionyyE +__ZN25IOInterruptDispatchSource24InterruptOccurred_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActionyyEPK11OSMetaClass +__ZN25IOInterruptDispatchSource9metaClassE +__ZN25IOInterruptDispatchSource10gMetaClassE +__ZN25IOInterruptDispatchSource10superClassE __ZN26_IOServiceInterestNotifier10gMetaClassE __ZN26_IOServiceInterestNotifier10superClassE __ZN26_IOServiceInterestNotifier4freeEv @@ -914,6 +1281,8 @@ __ZN26_IOServiceInterestNotifierC2EPK11OSMetaClass __ZN26_IOServiceInterestNotifierC2Ev __ZN26_IOServiceInterestNotifierD0Ev __ZN26_IOServiceInterestNotifierD2Ev +__ZN26_IOServiceInterestNotifierdlEPvm +__ZN26_IOServiceInterestNotifiernwEm __ZN27IOSharedInterruptController10gMetaClassE __ZN27IOSharedInterruptController10superClassE __ZN27IOSharedInterruptController15enableInterruptEP9IOServicei @@ -933,12 +1302,15 @@ __ZN27IOSharedInterruptControllerC2EPK11OSMetaClass __ZN27IOSharedInterruptControllerC2Ev __ZN27IOSharedInterruptControllerD0Ev __ZN27IOSharedInterruptControllerD2Ev +__ZN27IOSharedInterruptControllerdlEPvm +__ZN27IOSharedInterruptControllernwEm __ZN28IOFilterInterruptEventSource10gMetaClassE __ZN28IOFilterInterruptEventSource10superClassE __ZN28IOFilterInterruptEventSource15signalInterruptEv __ZN28IOFilterInterruptEventSource20interruptEventSourceEP8OSObjectPFvS1_P22IOInterruptEventSourceiEP9IOServicei __ZN28IOFilterInterruptEventSource23normalInterruptOccurredEPvP9IOServicei __ZN28IOFilterInterruptEventSource24disableInterruptOccurredEPvP9IOServicei +__ZN28IOFilterInterruptEventSource26filterInterruptEventSourceEP8OSObjectP9IOServiceiU13block_pointerFvP22IOInterruptEventSourceiEU13block_pointerFbPS_E __ZN28IOFilterInterruptEventSource26filterInterruptEventSourceEP8OSObjectPFvS1_P22IOInterruptEventSourceiEPFbS1_PS_EP9IOServicei __ZN28IOFilterInterruptEventSource4freeEv __ZN28IOFilterInterruptEventSource4initEP8OSObjectPFvS1_P22IOInterruptEventSourceiEP9IOServicei @@ -952,6 +1324,8 @@ __ZN28IOFilterInterruptEventSourceC2EPK11OSMetaClass __ZN28IOFilterInterruptEventSourceC2Ev __ZN28IOFilterInterruptEventSourceD0Ev __ZN28IOFilterInterruptEventSourceD2Ev +__ZN28IOFilterInterruptEventSourcedlEPvm +__ZN28IOFilterInterruptEventSourcenwEm __ZN29IOInterleavedMemoryDescriptor10gMetaClassE __ZN29IOInterleavedMemoryDescriptor10superClassE __ZN29IOInterleavedMemoryDescriptor4freeEv @@ -964,12 +1338,19 @@ __ZN29IOInterleavedMemoryDescriptorC2EPK11OSMetaClass __ZN29IOInterleavedMemoryDescriptorC2Ev __ZN29IOInterleavedMemoryDescriptorD0Ev __ZN29IOInterleavedMemoryDescriptorD2Ev +__ZN29IOInterleavedMemoryDescriptordlEPvm +__ZN29IOInterleavedMemoryDescriptornwEm +__ZN6IOPMGR10gMetaClassE +__ZN6IOPMGRC2EPK11OSMetaClass +__ZN6IOPMGRD2Ev +__ZN6IOPMGRdlEPvm +__ZN6IOPMGRnwEm __ZN8IOMapper10gMetaClassE __ZN8IOMapper10superClassE __ZN8IOMapper17setMapperRequiredEb __ZN8IOMapper19copyMapperForDeviceEP9IOService -__ZN8IOMapper28copyMapperForDeviceWithIndexEP9IOServicej __ZN8IOMapper19waitForSystemMapperEv +__ZN8IOMapper28copyMapperForDeviceWithIndexEP9IOServicej __ZN8IOMapper4freeEv __ZN8IOMapper5startEP9IOService __ZN8IOMapper7gSystemE @@ -978,6 +1359,19 @@ __ZN8IOMapper9MetaClassC2Ev __ZN8IOMapper9metaClassE __ZN8IOMapperC2EPK11OSMetaClass __ZN8IOMapperD2Ev +__ZN8IOMapperdlEPvm +__ZN8IOMappernwEm +__ZN8OSAction10gMetaClassE +__ZN8OSAction13Create_InvokeE5IORPCPFiP8OSObjectyymPPS_E +__ZN8OSAction17SetAbortedHandlerEU13block_pointerFvvE +__ZN8OSAction4freeEv +__ZN8OSAction9_DispatchEPS_5IORPC +__ZN8OSActionC2EPK11OSMetaClass +__ZN8OSActionD0Ev +__ZN8OSActionD1Ev +__ZN8OSActionD2Ev +__ZN8OSObject23SetDispatchQueue_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcP15IODispatchQueueE +__ZN8OSObject24CopyDispatchQueue_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcPP15IODispatchQueueE __ZN9IOCommand10gMetaClassE __ZN9IOCommand10superClassE __ZN9IOCommand4initEv @@ -988,28 +1382,35 @@ __ZN9IOCommandC1EPK11OSMetaClass __ZN9IOCommandC2EPK11OSMetaClass __ZN9IOCommandD0Ev __ZN9IOCommandD2Ev +__ZN9IOCommanddlEPvm +__ZN9IOCommandnwEm __ZN9IOService10gMetaClassE __ZN9IOService10initializeEv __ZN9IOService10joinPMtreeEPS_ __ZN9IOService10makeUsableEv __ZN9IOService10superClassE +__ZN9IOService11Stop_InvokeE5IORPCP15OSMetaClassBasePFiS2_PS_E __ZN9IOService11addLocationEP12OSDictionary __ZN9IOService11getPlatformEv __ZN9IOService11setPlatformEP16IOPlatformExpert +__ZN9IOService12Start_InvokeE5IORPCP15OSMetaClassBasePFiS2_PS_E __ZN9IOService12getBusyStateEv __ZN9IOService12getResourcesEv __ZN9IOService12nameMatchingEPK8OSStringP12OSDictionary __ZN9IOService12nameMatchingEPKcP12OSDictionary __ZN9IOService12passiveMatchEP12OSDictionaryb __ZN9IOService12tellChangeUpEm +__ZN9IOService13Create_InvokeE5IORPCP15OSMetaClassBasePFiS2_PS_PKcPS3_E __ZN9IOService13addPowerChildEPS_ __ZN9IOService13askChangeDownEm __ZN9IOService13checkResourceEP8OSObject __ZN9IOService13getPowerStateEv __ZN9IOService13matchLocationEPS_ __ZN9IOService13setPowerStateEmPS_ +__ZN9IOService14SetName_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcE __ZN9IOService14activityTickleEmm __ZN9IOService14applyToClientsEPFvPS_PvES1_ +__ZN9IOService14applyToClientsEU13block_pointerFvPS_E __ZN9IOService14causeInterruptEi __ZN9IOService14checkResourcesEv __ZN9IOService14getServiceRootEv @@ -1033,10 +1434,11 @@ __ZN9IOService15setDeviceMemoryEP7OSArray __ZN9IOService15setPMRootDomainEP14IOPMrootDomain __ZN9IOService16allowPowerChangeEm __ZN9IOService16applyToProvidersEPFvPS_PvES1_ +__ZN9IOService16applyToProvidersEU13block_pointerFvPS_E __ZN9IOService16disableInterruptEi -__ZN9IOService16getCPUSnoopDelayEv __ZN9IOService16getInterruptTypeEiPi __ZN9IOService16propertyMatchingEPK8OSSymbolPK8OSObjectP12OSDictionary +__ZN9IOService16registerInterestEPK8OSSymbolU13block_pointerFijPS_PvmE __ZN9IOService16removePowerChildEP17IOPowerConnection __ZN9IOService16resolveInterruptEPS_i __ZN9IOService16resourceMatchingEPK8OSStringP12OSDictionary @@ -1063,28 +1465,41 @@ __ZN9IOService19powerOverrideOnPrivEv __ZN9IOService19registerPowerDriverEPS_P14IOPMPowerStatem __ZN9IOService19start_PM_idle_timerEv __ZN9IOService19unregisterInterruptEi +__ZN9IOService20NewUserClient_InvokeE5IORPCP15OSMetaClassBasePFiS2_jPP12IOUserClientE +__ZN9IOService20SetPowerState_InvokeE5IORPCP15OSMetaClassBasePFiS2_jE +__ZN9IOService20SetProperties_InvokeE5IORPCP15OSMetaClassBasePFiS2_P12OSDictionaryE __ZN9IOService20callPlatformFunctionEPK8OSSymbolbPvS3_S3_S3_ __ZN9IOService20callPlatformFunctionEPKcbPvS2_S2_S2_ __ZN9IOService20getDeviceMemoryCountEv __ZN9IOService20powerOverrideOffPrivEv __ZN9IOService20unlockForArbitrationEv +__ZN9IOService21CopyProperties_InvokeE5IORPCP15OSMetaClassBasePFiS2_PP12OSDictionaryE +__ZN9IOService21SearchProperty_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcS4_yPP8OSObjectE __ZN9IOService21getClientWithCategoryEPK8OSSymbol __ZN9IOService21powerStateDidChangeToEmmPS_ __ZN9IOService21temporaryPowerClampOnEv __ZN9IOService21unregisterAllInterestEv +__ZN9IOService22RegisterService_InvokeE5IORPCP15OSMetaClassBasePFiS2_E __ZN9IOService22acknowledgePowerChangeEPS_ __ZN9IOService22changePowerStateToPrivEm __ZN9IOService22copyClientWithCategoryEPK8OSSymbol __ZN9IOService22powerStateWillChangeToEmmPS_ +__ZN9IOService22registerInterruptBlockEiP8OSObjectU13block_pointerFvPS_iE __ZN9IOService22waitForMatchingServiceEP12OSDictionaryy +__ZN9IOService23ChangePowerState_InvokeE5IORPCP15OSMetaClassBasePFiS2_jE __ZN9IOService23currentPowerConsumptionEv __ZN9IOService23registryEntryIDMatchingEyP12OSDictionary __ZN9IOService23requestPowerDomainStateEmP17IOPowerConnectionm +__ZN9IOService23updatePowerStatesReportEjPvS0_ +__ZN9IOService23updateSimplePowerReportEjPvS0_ __ZN9IOService24acknowledgeSetPowerStateEv __ZN9IOService24getDeviceMemoryWithIndexEj __ZN9IOService24powerStateForDomainStateEm __ZN9IOService24registerInterestedDriverEPS_ __ZN9IOService24requireMaxInterruptDelayEj +__ZN9IOService25GetRegistryEntryID_InvokeE5IORPCP15OSMetaClassBasePFiS2_PyE +__ZN9IOService26configurePowerStatesReportEjPv +__ZN9IOService26configureSimplePowerReportEjPv __ZN9IOService26deRegisterInterestedDriverEPS_ __ZN9IOService27maxCapabilityForDomainStateEm __ZN9IOService31initialPowerStateForDomainStateEm @@ -1092,6 +1507,7 @@ __ZN9IOService4freeEv __ZN9IOService4initEP12OSDictionary __ZN9IOService4initEP15IORegistryEntryPK15IORegistryPlane __ZN9IOService4stopEPS_ +__ZN9IOService5StartEPS_PFiP15OSMetaClassBase5IORPCE __ZN9IOService5startEPS_ __ZN9IOService6PMinitEv __ZN9IOService6PMstopEv @@ -1109,9 +1525,13 @@ __ZN9IOServiceC2EPK11OSMetaClass __ZN9IOServiceC2Ev __ZN9IOServiceD0Ev __ZN9IOServiceD2Ev +__ZN9IOServicedlEPvm +__ZN9IOServicenwEm __ZNK10IONotifier12getMetaClassEv __ZNK10IONotifier9MetaClass5allocEv -__ZNK10IOWorkLoop12getMetaClassEv +__ZNK10IOReporter12getMetaClassEv +__ZNK10IOReporter9MetaClass5allocEv +__ZNK10IOWorkLoop12getMetaClassEv __ZNK10IOWorkLoop19enableAllInterruptsEv __ZNK10IOWorkLoop20disableAllInterruptsEv __ZNK10IOWorkLoop21enableAllEventSourcesEv @@ -1134,6 +1554,7 @@ __ZNK11IOResources12getMetaClassEv __ZNK11IOResources9MetaClass5allocEv __ZNK12IODMACommand12getMetaClassEv __ZNK12IODMACommand19getMemoryDescriptorEv +__ZNK12IODMACommand21getIOMemoryDescriptorEv __ZNK12IODMACommand9MetaClass5allocEv __ZNK12IOPMinformee12getMetaClassEv __ZNK12IOPMinformee9MetaClass5allocEv @@ -1147,10 +1568,12 @@ __ZNK13IOCommandPool12getMetaClassEv __ZNK13IOCommandPool9MetaClass5allocEv __ZNK13IOEventSource11getWorkLoopEv __ZNK13IOEventSource12getMetaClassEv +__ZNK13IOEventSource14getActionBlockEU13block_pointerFivE __ZNK13IOEventSource7getNextEv __ZNK13IOEventSource8onThreadEv __ZNK13IOEventSource9MetaClass5allocEv __ZNK13IOEventSource9getActionEv +__ZNK13IOEventSource9getRefconEv __ZNK13IOEventSource9isEnabledEv __ZNK13_IOServiceJob12getMetaClassEv __ZNK13_IOServiceJob9MetaClass5allocEv @@ -1158,6 +1581,8 @@ __ZNK14IOMemoryCursor12getMetaClassEv __ZNK14IOMemoryCursor9MetaClass5allocEv __ZNK14IOPMrootDomain12getMetaClassEv __ZNK14IOPMrootDomain9MetaClass5allocEv +__ZNK14IOReportLegend12getMetaClassEv +__ZNK14IOReportLegend9MetaClass5allocEv __ZNK15IOConditionLock12getConditionEv __ZNK15IOConditionLock12getMetaClassEv __ZNK15IOConditionLock16getInterruptibleEv @@ -1167,11 +1592,13 @@ __ZNK15IODMAController9MetaClass5allocEv __ZNK15IOPMPowerSource12getMetaClassEv __ZNK15IOPMPowerSource9MetaClass5allocEv __ZNK15IORegistryEntry11compareNameEP8OSStringPS1_ +__ZNK15IORegistryEntry11compareNameEP8OSStringR11OSSharedPtrIS0_E __ZNK15IORegistryEntry11getLocationEPK15IORegistryPlane __ZNK15IORegistryEntry11getPropertyEPK8OSString __ZNK15IORegistryEntry11getPropertyEPK8OSSymbol __ZNK15IORegistryEntry11getPropertyEPKc __ZNK15IORegistryEntry12compareNamesEP8OSObjectPP8OSString +__ZNK15IORegistryEntry12compareNamesEP8OSObjectR11OSSharedPtrI8OSStringE __ZNK15IORegistryEntry12copyLocationEPK15IORegistryPlane __ZNK15IORegistryEntry12copyPropertyEPK8OSString __ZNK15IORegistryEntry12copyPropertyEPK8OSSymbol @@ -1181,11 +1608,17 @@ __ZNK15IORegistryEntry13getChildEntryEPK15IORegistryPlane __ZNK15IORegistryEntry14applyToParentsEPFvPS_PvES1_PK15IORegistryPlane __ZNK15IORegistryEntry14copyChildEntryEPK15IORegistryPlane __ZNK15IORegistryEntry14getParentEntryEPK15IORegistryPlane +__ZNK15IORegistryEntry14propertyExistsEPK8OSStringPK15IORegistryPlanej +__ZNK15IORegistryEntry14propertyExistsEPK8OSSymbolPK15IORegistryPlanej +__ZNK15IORegistryEntry14propertyExistsEPKcPK15IORegistryPlanej __ZNK15IORegistryEntry15applyToChildrenEPFvPS_PvES1_PK15IORegistryPlane __ZNK15IORegistryEntry15copyParentEntryEPK15IORegistryPlane __ZNK15IORegistryEntry16getChildIteratorEPK15IORegistryPlane __ZNK15IORegistryEntry16getPathComponentEPcPiPK15IORegistryPlane __ZNK15IORegistryEntry16getPropertyTableEv +__ZNK15IORegistryEntry16propertyHasValueEPK8OSStringPK8OSObjectPK15IORegistryPlanej +__ZNK15IORegistryEntry16propertyHasValueEPK8OSSymbolPK8OSObjectPK15IORegistryPlanej +__ZNK15IORegistryEntry16propertyHasValueEPKcPK8OSObjectPK15IORegistryPlanej __ZNK15IORegistryEntry17getParentIteratorEPK15IORegistryPlane __ZNK15IORegistryEntry19serializePropertiesEP11OSSerialize __ZNK15IORegistryEntry20getChildSetReferenceEPK15IORegistryPlane @@ -1205,6 +1638,8 @@ __ZNK15IORegistryEntry9breakLinkEPS_jPK15IORegistryPlane __ZNK15IORegistryPlane12getMetaClassEv __ZNK15IORegistryPlane9MetaClass5allocEv __ZNK15IORegistryPlane9serializeEP11OSSerialize +__ZNK15IOStateReporter12getMetaClassEv +__ZNK15IOStateReporter9MetaClass5allocEv __ZNK16IODMAEventSource12getMetaClassEv __ZNK16IODMAEventSource9MetaClass5allocEv __ZNK16IOPMinformeeList12getMetaClassEv @@ -1212,6 +1647,8 @@ __ZNK16IOPMinformeeList9MetaClass5allocEv __ZNK16IORangeAllocator12getMetaClassEv __ZNK16IORangeAllocator9MetaClass5allocEv __ZNK16IORangeAllocator9serializeEP11OSSerialize +__ZNK16IOSimpleReporter12getMetaClassEv +__ZNK16IOSimpleReporter9MetaClass5allocEv __ZNK17IOBigMemoryCursor12getMetaClassEv __ZNK17IOBigMemoryCursor9MetaClass5allocEv __ZNK17IOPowerConnection12getMetaClassEv @@ -1230,6 +1667,8 @@ __ZNK18IOUserNotification12getMetaClassEv __ZNK18IOUserNotification9MetaClass5allocEv __ZNK18_IOServiceNotifier12getMetaClassEv __ZNK18_IOServiceNotifier9MetaClass5allocEv +__ZNK19IOHistogramReporter12getMetaClassEv +__ZNK19IOHistogramReporter9MetaClass5allocEv __ZNK19IOPMPowerSourceList12getMetaClassEv __ZNK19IOPMPowerSourceList9MetaClass5allocEv __ZNK20IOLittleMemoryCursor12getMetaClassEv @@ -1254,6 +1693,8 @@ __ZNK23IOMultiMemoryDescriptor9MetaClass5allocEv __ZNK24IOBufferMemoryDescriptor11getCapacityEv __ZNK24IOBufferMemoryDescriptor12getMetaClassEv __ZNK24IOBufferMemoryDescriptor9MetaClass5allocEv +__ZNK25IODataQueueDispatchSource12getMetaClassEv +__ZNK25IODataQueueDispatchSource9MetaClass5allocEv __ZNK25IOGeneralMemoryDescriptor12getMetaClassEv __ZNK25IOGeneralMemoryDescriptor9MetaClass5allocEv __ZNK25IOGeneralMemoryDescriptor9serializeEP11OSSerialize @@ -1285,6 +1726,7 @@ __ZNK9IOService8getStateEv __ZNK9IOService9MetaClass5allocEv __ZNK9IOService9getClientEv __ZTV10IONotifier +__ZTV10IOReporter __ZTV10IOWorkLoop __ZTV11IOCatalogue __ZTV11IODataQueue @@ -1292,6 +1734,7 @@ __ZTV11IOMemoryMap __ZTV11IOResources __ZTV12IODMACommand __ZTV12IOPMinformee +__ZTV12IOPlatformIO __ZTV12IORootParent __ZTV12IOUserClient __ZTV13IOCommandGate @@ -1300,14 +1743,19 @@ __ZTV13IOEventSource __ZTV13_IOServiceJob __ZTV14IOMemoryCursor __ZTV14IOPMrootDomain +__ZTV14IOReportLegend __ZTV15IOConditionLock +__ZTV15IODispatchQueue __ZTV15IODMAController __ZTV15IOPMPowerSource __ZTV15IORegistryEntry __ZTV15IORegistryPlane +__ZTV15IOStateReporter +__ZTV16IODispatchSource __ZTV16IODMAEventSource __ZTV16IOPMinformeeList __ZTV16IORangeAllocator +__ZTV16IOSimpleReporter __ZTV17IOBigMemoryCursor __ZTV17IOPolledInterface __ZTV17IOPowerConnection @@ -1317,6 +1765,7 @@ __ZTV18IORegistryIterator __ZTV18IOTimerEventSource __ZTV18IOUserNotification __ZTV18_IOServiceNotifier +__ZTV19IOHistogramReporter __ZTV19IOPMPowerSourceList __ZTV20IOLittleMemoryCursor __ZTV20RootDomainUserClient @@ -1324,18 +1773,24 @@ __ZTV21IOInterruptController __ZTV21IONaturalMemoryCursor __ZTV21IOSubMemoryDescriptor __ZTV22IOInterruptEventSource +__ZTV22IOServiceCompatibility __ZTV22_IOOpenServiceIterator __ZTV23IOMultiMemoryDescriptor __ZTV24IOBufferMemoryDescriptor +__ZTV25IODataQueueDispatchSource __ZTV25IOGeneralMemoryDescriptor +__ZTV25IOInterruptDispatchSource __ZTV26_IOServiceInterestNotifier __ZTV27IOSharedInterruptController __ZTV28IOFilterInterruptEventSource __ZTV29IOInterleavedMemoryDescriptor +__ZTV6IOPMGR __ZTV8IOMapper +__ZTV8OSAction __ZTV9IOCommand __ZTV9IOService __ZTVN10IONotifier9MetaClassE +__ZTVN10IOReporter9MetaClassE __ZTVN10IOWorkLoop9MetaClassE __ZTVN11IOCatalogue9MetaClassE __ZTVN11IODataQueue9MetaClassE @@ -1351,14 +1806,17 @@ __ZTVN13IOEventSource9MetaClassE __ZTVN13_IOServiceJob9MetaClassE __ZTVN14IOMemoryCursor9MetaClassE __ZTVN14IOPMrootDomain9MetaClassE +__ZTVN14IOReportLegend9MetaClassE __ZTVN15IOConditionLock9MetaClassE __ZTVN15IODMAController9MetaClassE __ZTVN15IOPMPowerSource9MetaClassE __ZTVN15IORegistryEntry9MetaClassE __ZTVN15IORegistryPlane9MetaClassE +__ZTVN15IOStateReporter9MetaClassE __ZTVN16IODMAEventSource9MetaClassE __ZTVN16IOPMinformeeList9MetaClassE __ZTVN16IORangeAllocator9MetaClassE +__ZTVN16IOSimpleReporter9MetaClassE __ZTVN17IOBigMemoryCursor9MetaClassE __ZTVN17IOPowerConnection9MetaClassE __ZTVN17IOSharedDataQueue9MetaClassE @@ -1367,6 +1825,7 @@ __ZTVN18IORegistryIterator9MetaClassE __ZTVN18IOTimerEventSource9MetaClassE __ZTVN18IOUserNotification9MetaClassE __ZTVN18_IOServiceNotifier9MetaClassE +__ZTVN19IOHistogramReporter9MetaClassE __ZTVN19IOPMPowerSourceList9MetaClassE __ZTVN20IOLittleMemoryCursor9MetaClassE __ZTVN20RootDomainUserClient9MetaClassE @@ -1377,6 +1836,7 @@ __ZTVN22IOInterruptEventSource9MetaClassE __ZTVN22_IOOpenServiceIterator9MetaClassE __ZTVN23IOMultiMemoryDescriptor9MetaClassE __ZTVN24IOBufferMemoryDescriptor9MetaClassE +__ZTVN25IODataQueueDispatchSource9MetaClassE __ZTVN25IOGeneralMemoryDescriptor9MetaClassE __ZTVN26_IOServiceInterestNotifier9MetaClassE __ZTVN27IOSharedInterruptController9MetaClassE @@ -1398,11 +1858,14 @@ _debug_malloc_size _device_close _device_data_action _di_root_image +_gIOAllCPUInitializedKey _gIOAppPowerStateInterest _gIOBusyInterest _gIOCatalogue _gIOClassKey _gIOCommandPoolSizeKey +_gIOCompatibilityMatchKey +_gIOCompatibilityPropertiesKey _gIODTAAPLInterruptsKey _gIODTAddressCellKey _gIODTCompatibleKey @@ -1438,6 +1901,7 @@ _gIONameKey _gIONameMatchKey _gIONameMatchedKey _gIOParentMatchKey +_gIOPathKey _gIOPathMatchKey _gIOPlatformActiveActionKey _gIOPlatformHaltRestartActionKey @@ -1465,265 +1929,7 @@ _registerPrioritySleepWakeInterest _registerSleepWakeInterest _vetoSleepWakeNotification - -__ZN10IOReporter10addChannelEyPKc -__ZN10IOReporter10gMetaClassE -__ZN10IOReporter10legendWithEP7OSArrayS1_19IOReportChannelTypey -__ZN10IOReporter10superClassE -__ZN10IOReporter12createLegendEv -__ZN10IOReporter12lockReporterEv -__ZN10IOReporter12updateReportEP19IOReportChannelListjPvS2_ -__ZN10IOReporter14copyChannelIDsEv -__ZN10IOReporter14unlockReporterEv -__ZN10IOReporter15configureReportEP19IOReportChannelListjPvS2_ -__ZN10IOReporter15getChannelIndexEyPi -__ZN10IOReporter16getElementValuesEi -__ZN10IOReporter16setElementValuesEiP21IOReportElementValuesy -__ZN10IOReporter16updateAllReportsEP5OSSetP19IOReportChannelListjPvS4_ -__ZN10IOReporter17copyElementValuesEiP21IOReportElementValues -__ZN10IOReporter17getChannelIndicesEyPiS0_ -__ZN10IOReporter17handleSwapCleanupEi -__ZN10IOReporter17handleSwapPrepareEi -__ZN10IOReporter18handleCreateLegendEv -__ZN10IOReporter18handleUpdateReportEP19IOReportChannelListjPvS2_ -__ZN10IOReporter18lockReporterConfigEv -__ZN10IOReporter19configureAllReportsEP5OSSetP19IOReportChannelListjPvS4_ -__ZN10IOReporter19updateChannelValuesEi -__ZN10IOReporter19updateReportChannelEiPiP24IOBufferMemoryDescriptor -__ZN10IOReporter20getFirstElementIndexEyPi -__ZN10IOReporter20handleAddChannelSwapEyPK8OSSymbol -__ZN10IOReporter20unlockReporterConfigEv -__ZN10IOReporter21handleConfigureReportEP19IOReportChannelListjPvS2_ -__ZN10IOReporter4freeEv -__ZN10IOReporter4initEP9IOService19IOReportChannelTypey -__ZN10IOReporter9MetaClassC1Ev -__ZN10IOReporter9MetaClassC2Ev -__ZN10IOReporter9metaClassE -__ZN10IOReporterC1EPK11OSMetaClass -__ZN10IOReporterC1Ev -__ZN10IOReporterC2EPK11OSMetaClass -__ZN10IOReporterC2Ev -__ZN10IOReporterD0Ev -__ZN10IOReporterD1Ev -__ZN10IOReporterD2Ev -__ZN14IOPMrootDomain12updateReportEP19IOReportChannelListjPvS2_ -__ZN14IOPMrootDomain15configureReportEP19IOReportChannelListjPvS2_ -__ZN14IOReportLegend10gMetaClassE -__ZN14IOReportLegend10superClassE -__ZN14IOReportLegend14addLegendEntryEP12OSDictionaryPKcS3_ -__ZN14IOReportLegend14organizeLegendEP12OSDictionaryPK8OSSymbolS4_ -__ZN14IOReportLegend17addReporterLegendEP10IOReporterPKcS3_ -__ZN14IOReportLegend17addReporterLegendEP9IOServiceP10IOReporterPKcS5_ -__ZN14IOReportLegend4freeEv -__ZN14IOReportLegend4withEP7OSArray -__ZN14IOReportLegend8initWithEP7OSArray -__ZN14IOReportLegend9MetaClassC1Ev -__ZN14IOReportLegend9MetaClassC2Ev -__ZN14IOReportLegend9getLegendEv -__ZN14IOReportLegend9metaClassE -__ZN14IOReportLegendC1EPK11OSMetaClass -__ZN14IOReportLegendC1Ev -__ZN14IOReportLegendC2EPK11OSMetaClass -__ZN14IOReportLegendC2Ev -__ZN14IOReportLegendD0Ev -__ZN14IOReportLegendD1Ev -__ZN14IOReportLegendD2Ev -__ZN15IOStateReporter10gMetaClassE -__ZN15IOStateReporter10setStateIDEyiy -__ZN15IOStateReporter10superClassE -__ZN15IOStateReporter14_getStateValueEyyNS_13valueSelectorE -__ZN15IOStateReporter15setChannelStateEyy -__ZN15IOStateReporter15setChannelStateEyyyy -__ZN15IOStateReporter16_getStateIndicesEyyPiS0_ -__ZN15IOStateReporter16handleSetStateIDEyiy -__ZN15IOStateReporter17handleSwapCleanupEi -__ZN15IOStateReporter17handleSwapPrepareEi -__ZN15IOStateReporter17setStateByIndicesEii -__ZN15IOStateReporter17setStateByIndicesEiiyy -__ZN15IOStateReporter19updateChannelValuesEi -__ZN15IOStateReporter20handleAddChannelSwapEyPK8OSSymbol -__ZN15IOStateReporter20overrideChannelStateEyyyyy -__ZN15IOStateReporter21getStateInTransitionsEyy -__ZN15IOStateReporter21getStateResidencyTimeEyy -__ZN15IOStateReporter21incrementChannelStateEyyyyy -__ZN15IOStateReporter23handleSetStateByIndicesEiiyy -__ZN15IOStateReporter26getStateLastTransitionTimeEyy -__ZN15IOStateReporter29getStateLastChannelUpdateTimeEy -__ZN15IOStateReporter35handleOverrideChannelStateByIndicesEiiyyy -__ZN15IOStateReporter36handleIncrementChannelStateByIndicesEiiyyy -__ZN15IOStateReporter4freeEv -__ZN15IOStateReporter4withEP9IOServicetiy -__ZN15IOStateReporter8initWithEP9IOServicetsy -__ZN15IOStateReporter8setStateEy -__ZN15IOStateReporter8setStateEyyy -__ZN15IOStateReporter9MetaClassC1Ev -__ZN15IOStateReporter9MetaClassC2Ev -__ZN15IOStateReporter9metaClassE -__ZN15IOStateReporterC1EPK11OSMetaClass -__ZN15IOStateReporterC1Ev -__ZN15IOStateReporterC2EPK11OSMetaClass -__ZN15IOStateReporterC2Ev -__ZN15IOStateReporterD0Ev -__ZN15IOStateReporterD1Ev __ZN15IOStateReporterD2Ev -__ZN16IOSimpleReporter10gMetaClassE -__ZN16IOSimpleReporter10superClassE -__ZN16IOSimpleReporter14incrementValueEyx -__ZN16IOSimpleReporter4withEP9IOServicety -__ZN16IOSimpleReporter8getValueEy -__ZN16IOSimpleReporter8initWithEP9IOServicety -__ZN16IOSimpleReporter8setValueEyx -__ZN16IOSimpleReporter9MetaClassC1Ev -__ZN16IOSimpleReporter9MetaClassC2Ev -__ZN16IOSimpleReporter9metaClassE -__ZN16IOSimpleReporterC1EPK11OSMetaClass -__ZN16IOSimpleReporterC1Ev -__ZN16IOSimpleReporterC2EPK11OSMetaClass -__ZN16IOSimpleReporterC2Ev -__ZN16IOSimpleReporterD0Ev -__ZN16IOSimpleReporterD1Ev -__ZN16IOSimpleReporterD2Ev -__ZN19IOHistogramReporter10gMetaClassE -__ZN19IOHistogramReporter10superClassE -__ZN19IOHistogramReporter10tallyValueEx -__ZN19IOHistogramReporter18handleCreateLegendEv -__ZN19IOHistogramReporter20overrideBucketValuesEjyxxx -__ZN19IOHistogramReporter4freeEv -__ZN19IOHistogramReporter4withEP9IOServicetyPKcyiP24IOHistogramSegmentConfig -__ZN19IOHistogramReporter8initWithEP9IOServicetyPK8OSSymbolyiP24IOHistogramSegmentConfig -__ZN19IOHistogramReporter9MetaClassC1Ev -__ZN19IOHistogramReporter9MetaClassC2Ev -__ZN19IOHistogramReporter9metaClassE -__ZN19IOHistogramReporterC1EPK11OSMetaClass -__ZN19IOHistogramReporterC1Ev -__ZN19IOHistogramReporterC2EPK11OSMetaClass -__ZN19IOHistogramReporterC2Ev -__ZN19IOHistogramReporterD0Ev -__ZN19IOHistogramReporterD1Ev -__ZN19IOHistogramReporterD2Ev -__ZN9IOService23updatePowerStatesReportEjPvS0_ -__ZN9IOService23updateSimplePowerReportEjPvS0_ -__ZN9IOService26configurePowerStatesReportEjPv -__ZN9IOService26configureSimplePowerReportEjPv -__ZNK10IOReporter12getMetaClassEv -__ZNK10IOReporter9MetaClass5allocEv -__ZNK14IOReportLegend12getMetaClassEv -__ZNK14IOReportLegend9MetaClass5allocEv -__ZNK15IOStateReporter12getMetaClassEv -__ZNK15IOStateReporter9MetaClass5allocEv -__ZNK16IOSimpleReporter12getMetaClassEv -__ZNK16IOSimpleReporter9MetaClass5allocEv -__ZNK19IOHistogramReporter12getMetaClassEv -__ZNK19IOHistogramReporter9MetaClass5allocEv -__ZTV10IOReporter -__ZTV14IOReportLegend -__ZTV15IOStateReporter -__ZTV16IOSimpleReporter -__ZTV19IOHistogramReporter -__ZTVN10IOReporter9MetaClassE -__ZTVN14IOReportLegend9MetaClassE -__ZTVN15IOStateReporter9MetaClassE -__ZTVN16IOSimpleReporter9MetaClassE -__ZTVN19IOHistogramReporter9MetaClassE -__ZN10IOWorkLoop14runActionBlockEU13block_pointerFivE -__ZN13IOCommandGate14runActionBlockEU13block_pointerFivE -__ZN13IOEventSource14setActionBlockEU13block_pointerFivE -__ZN18IOTimerEventSource16timerEventSourceEjP8OSObjectU13block_pointerFvPS_E -__ZN22IOInterruptEventSource20interruptEventSourceEP8OSObjectP9IOServiceiU13block_pointerFvPS_iE -__ZN28IOFilterInterruptEventSource26filterInterruptEventSourceEP8OSObjectP9IOServiceiU13block_pointerFvP22IOInterruptEventSourceiEU13block_pointerFbPS_E -__ZN9IOService16registerInterestEPK8OSSymbolU13block_pointerFijPS_PvmE -__ZN9IOService22registerInterruptBlockEiP8OSObjectU13block_pointerFvPS_iE -__ZNK13IOEventSource14getActionBlockEU13block_pointerFivE -__ZN13IOEventSource9setRefconEPv -__ZNK13IOEventSource9getRefconEv - -__ZN8OSAction17SetAbortedHandlerEU13block_pointerFvvE - -__ZN15IODispatchQueue9metaClassE -__ZN16IODispatchSource9metaClassE -__ZN25IOInterruptDispatchSource9metaClassE -__ZN9IOService5StartEPS_PFiP15OSMetaClassBase5IORPCE - -__ZN25IODataQueueDispatchSource10CopyMemoryEPP18IOMemoryDescriptorPFiP15OSMetaClassBase5IORPCE -__ZN25IODataQueueDispatchSource10gMetaClassE -__ZN25IODataQueueDispatchSource10superClassE -__ZN25IODataQueueDispatchSource12DataServicedEP8OSActionPFiP15OSMetaClassBase5IORPCE -__ZN25IODataQueueDispatchSource13DataAvailableEP8OSActionPFiP15OSMetaClassBase5IORPCE -__ZN25IODataQueueDispatchSource15IsDataAvailableEv -__ZN25IODataQueueDispatchSource16SendDataServicedEv -__ZN25IODataQueueDispatchSource17SendDataAvailableEv -__ZN25IODataQueueDispatchSource22SetDataServicedHandlerEP8OSActionPFiP15OSMetaClassBase5IORPCE -__ZN25IODataQueueDispatchSource23CopyDataServicedHandlerEPP8OSActionPFiP15OSMetaClassBase5IORPCE -__ZN25IODataQueueDispatchSource23SetDataAvailableHandlerEP8OSActionPFiP15OSMetaClassBase5IORPCE -__ZN25IODataQueueDispatchSource24CopyDataAvailableHandlerEPP8OSActionPFiP15OSMetaClassBase5IORPCE -__ZN25IODataQueueDispatchSource4PeekEU13block_pointerFvPKvmE -__ZN25IODataQueueDispatchSource4freeEv -__ZN25IODataQueueDispatchSource4initEv -__ZN25IODataQueueDispatchSource6CreateEyP15IODispatchQueuePPS_ -__ZN25IODataQueueDispatchSource7DequeueEU13block_pointerFvPKvmE -__ZN25IODataQueueDispatchSource7EnqueueEjU13block_pointerFvPvmE -__ZN25IODataQueueDispatchSource8DispatchE5IORPC -__ZN25IODataQueueDispatchSource9MetaClass8DispatchE5IORPC -__ZN25IODataQueueDispatchSource9MetaClassC1Ev -__ZN25IODataQueueDispatchSource9MetaClassC2Ev -__ZN25IODataQueueDispatchSource9_DispatchEPS_5IORPC -__ZN25IODataQueueDispatchSource9metaClassE -__ZN25IODataQueueDispatchSourceC1EPK11OSMetaClass -__ZN25IODataQueueDispatchSourceC1Ev -__ZN25IODataQueueDispatchSourceC2EPK11OSMetaClass -__ZN25IODataQueueDispatchSourceC2Ev -__ZN25IODataQueueDispatchSourceD0Ev -__ZN25IODataQueueDispatchSourceD1Ev -__ZN25IODataQueueDispatchSourceD2Ev -__ZNK25IODataQueueDispatchSource12getMetaClassEv -__ZNK25IODataQueueDispatchSource9MetaClass5allocEv -__ZTV25IODataQueueDispatchSource -__ZTVN25IODataQueueDispatchSource9MetaClassE -__ZN25IODataQueueDispatchSource19DequeueWithCoalesceEPbU13block_pointerFvPKvmE -__ZN25IODataQueueDispatchSource19EnqueueWithCoalesceEjPbU13block_pointerFvPvmE - -__ZN11IOMemoryMap17_CopyState_InvokeE5IORPCP15OSMetaClassBasePFiS2_P24_IOMemoryMapPrivateStateE -__ZN12IOUserClient22AsyncCompletion_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActioniPKyjE -__ZN12IOUserClient22_ExternalMethod_InvokeE5IORPCP15OSMetaClassBasePFiS2_yPKyjP6OSDataP18IOMemoryDescriptorPyPjyPS6_S8_P8OSActionE -__ZN12IOUserClient30CopyClientMemoryForType_InvokeE5IORPCP15OSMetaClassBasePFiS2_yPyPP18IOMemoryDescriptorE -__ZN12IOUserServer11Exit_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcE -__ZN12IOUserServer13Create_InvokeE5IORPCPFiPKcyyPPS_E -__ZN12IOUserServer17LoadModule_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcE -__ZN15IODispatchQueue13Create_InvokeE5IORPCPFiPKcyyPPS_E -__ZN15IODispatchQueue14SetPort_InvokeE5IORPCP15OSMetaClassBasePFiS2_P8ipc_portE -__ZN16IODispatchSource13Cancel_InvokeE5IORPCP15OSMetaClassBasePFiS2_U13block_pointerFvvEE -__ZN16IODispatchSource16SetEnable_InvokeE5IORPCP15OSMetaClassBasePFiS2_bE -__ZN16IODispatchSource19CheckForWork_InvokeE5IORPCP15OSMetaClassBasePFiS2_S0_bE -__ZN16IODispatchSource30SetEnableWithCompletion_InvokeE5IORPCP15OSMetaClassBasePFiS2_bU13block_pointerFvvEE -__ZN18IOMemoryDescriptor17_CopyState_InvokeE5IORPCP15OSMetaClassBasePFiS2_P17_IOMDPrivateStateE -__ZN18IOMemoryDescriptor20PrepareForDMA_InvokeE5IORPCP15OSMetaClassBasePFiS2_yP9IOServiceyyPyS5_PjP16IOAddressSegmentE -__ZN24IOBufferMemoryDescriptor13Create_InvokeE5IORPCPFiyyyPPS_E -__ZN24IOBufferMemoryDescriptor16SetLength_InvokeE5IORPCP15OSMetaClassBasePFiS2_yE -__ZN25IODataQueueDispatchSource13Create_InvokeE5IORPCPFiyP15IODispatchQueuePPS_E -__ZN25IODataQueueDispatchSource17CopyMemory_InvokeE5IORPCP15OSMetaClassBasePFiS2_PP18IOMemoryDescriptorE -__ZN25IODataQueueDispatchSource19DataServiced_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActionE -__ZN25IODataQueueDispatchSource20DataAvailable_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActionE -__ZN25IODataQueueDispatchSource29SetDataServicedHandler_InvokeE5IORPCP15OSMetaClassBasePFiS2_P8OSActionE -__ZN25IODataQueueDispatchSource30CopyDataServicedHandler_InvokeE5IORPCP15OSMetaClassBasePFiS2_PP8OSActionE -__ZN25IODataQueueDispatchSource30SetDataAvailableHandler_InvokeE5IORPCP15OSMetaClassBasePFiS2_P8OSActionE -__ZN25IODataQueueDispatchSource31CopyDataAvailableHandler_InvokeE5IORPCP15OSMetaClassBasePFiS2_PP8OSActionE -__ZN25IOInterruptDispatchSource13Create_InvokeE5IORPCPFiP9IOServicejP15IODispatchQueuePPS_E -__ZN25IOInterruptDispatchSource17SetHandler_InvokeE5IORPCP15OSMetaClassBasePFiS2_P8OSActionE -__ZN25IOInterruptDispatchSource24InterruptOccurred_InvokeE5IORPCP15OSMetaClassBasePFvS2_P8OSActionyyE -__ZN8OSAction13Create_InvokeE5IORPCPFiP8OSObjectyymPPS_E -__ZN8OSObject23SetDispatchQueue_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcP15IODispatchQueueE -__ZN8OSObject24CopyDispatchQueue_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcPP15IODispatchQueueE -__ZN9IOService11Stop_InvokeE5IORPCP15OSMetaClassBasePFiS2_PS_E -__ZN9IOService12Start_InvokeE5IORPCP15OSMetaClassBasePFiS2_PS_E -__ZN9IOService13Create_InvokeE5IORPCP15OSMetaClassBasePFiS2_PS_PKcPS3_E -__ZN9IOService14SetName_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcE -__ZN9IOService20NewUserClient_InvokeE5IORPCP15OSMetaClassBasePFiS2_jPP12IOUserClientE -__ZN9IOService20SetPowerState_InvokeE5IORPCP15OSMetaClassBasePFiS2_jE -__ZN9IOService20SetProperties_InvokeE5IORPCP15OSMetaClassBasePFiS2_P12OSDictionaryE -__ZN9IOService21CopyProperties_InvokeE5IORPCP15OSMetaClassBasePFiS2_PP12OSDictionaryE -__ZN9IOService21SearchProperty_InvokeE5IORPCP15OSMetaClassBasePFiS2_PKcS4_yPP8OSObjectE -__ZN9IOService22RegisterService_InvokeE5IORPCP15OSMetaClassBasePFiS2_E -__ZN9IOService23ChangePowerState_InvokeE5IORPCP15OSMetaClassBasePFiS2_jE -__ZN9IOService25GetRegistryEntryID_InvokeE5IORPCP15OSMetaClassBasePFiS2_PyE - -__ZN18IOMemoryDescriptor20CreateMapping_InvokeE5IORPCP15OSMetaClassBasePFiS2_yyyyyPP11IOMemoryMapE +__ZN8OSAction10superClassE +__ZN12IOPlatformIO10superClassE +__ZN6IOPMGR10superClassE diff --git a/config/IOKit.x86_64.MacOSX.exports b/config/IOKit.x86_64.MacOSX.exports new file mode 100644 index 000000000..23e9ec845 --- /dev/null +++ b/config/IOKit.x86_64.MacOSX.exports @@ -0,0 +1,241 @@ +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop0Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop1Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop2Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop3Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop4Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop5Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop6Ev +__ZN10IOWorkLoop20_RESERVEDIOWorkLoop7Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap0Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap1Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap2Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap3Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap4Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap5Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap6Ev +__ZN11IOMemoryMap21_RESERVEDIOMemoryMap7Ev +__ZN12IODMACommand22_RESERVEDIODMACommand7Ev +__ZN12IODMACommand22_RESERVEDIODMACommand8Ev +__ZN12IODMACommand22_RESERVEDIODMACommand9Ev +__ZN12IODMACommand23_RESERVEDIODMACommand10Ev +__ZN12IODMACommand23_RESERVEDIODMACommand11Ev +__ZN12IODMACommand23_RESERVEDIODMACommand12Ev +__ZN12IODMACommand23_RESERVEDIODMACommand13Ev +__ZN12IODMACommand23_RESERVEDIODMACommand14Ev +__ZN12IODMACommand23_RESERVEDIODMACommand15Ev +__ZN12IOUserClient22_RESERVEDIOUserClient0Ev +__ZN12IOUserClient22_RESERVEDIOUserClient1Ev +__ZN12IOUserClient22_RESERVEDIOUserClient2Ev +__ZN12IOUserClient22_RESERVEDIOUserClient3Ev +__ZN12IOUserClient22_RESERVEDIOUserClient4Ev +__ZN12IOUserClient22_RESERVEDIOUserClient5Ev +__ZN12IOUserClient22_RESERVEDIOUserClient6Ev +__ZN12IOUserClient22_RESERVEDIOUserClient7Ev +__ZN12IOUserClient22_RESERVEDIOUserClient8Ev +__ZN12IOUserClient22_RESERVEDIOUserClient9Ev +__ZN12IOUserClient23_RESERVEDIOUserClient10Ev +__ZN12IOUserClient23_RESERVEDIOUserClient11Ev +__ZN12IOUserClient23_RESERVEDIOUserClient12Ev +__ZN12IOUserClient23_RESERVEDIOUserClient13Ev +__ZN12IOUserClient23_RESERVEDIOUserClient14Ev +__ZN12IOUserClient23_RESERVEDIOUserClient15Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate0Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate1Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate2Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate3Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate4Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate5Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate6Ev +__ZN13IOCommandGate23_RESERVEDIOCommandGate7Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool0Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool1Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool2Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool3Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool4Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool5Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool6Ev +__ZN13IOCommandPool23_RESERVEDIOCommandPool7Ev +__ZN13IOEventSource23_RESERVEDIOEventSource0Ev +__ZN13IOEventSource23_RESERVEDIOEventSource1Ev +__ZN13IOEventSource23_RESERVEDIOEventSource2Ev +__ZN13IOEventSource23_RESERVEDIOEventSource3Ev +__ZN13IOEventSource23_RESERVEDIOEventSource4Ev +__ZN13IOEventSource23_RESERVEDIOEventSource5Ev +__ZN13IOEventSource23_RESERVEDIOEventSource6Ev +__ZN13IOEventSource23_RESERVEDIOEventSource7Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry0Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry1Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry2Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry3Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry4Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry5Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry6Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry7Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry8Ev +__ZN15IORegistryEntry25_RESERVEDIORegistryEntry9Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry10Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry11Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry12Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry13Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry14Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry15Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry16Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry17Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry18Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry19Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry20Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry21Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry22Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry23Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry24Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry25Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry26Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry27Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry28Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry29Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry30Ev +__ZN15IORegistryEntry26_RESERVEDIORegistryEntry31Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface1Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface2Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface3Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface4Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface5Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface6Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface7Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface8Ev +__ZN17IOPolledInterface27_RESERVEDIOPolledInterface9Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface10Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface11Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface12Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface13Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface14Ev +__ZN17IOPolledInterface28_RESERVEDIOPolledInterface15Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue0Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue1Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue2Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue3Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue4Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue5Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue6Ev +__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue7Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor1Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor2Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor3Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor4Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor5Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor6Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor7Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor8Ev +__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor9Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor10Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor11Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor12Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor13Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor14Ev +__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor15Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource3Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource4Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource5Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource6Ev +__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource7Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController3Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController4Ev +__ZN21IOInterruptController31_RESERVEDIOInterruptController5Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource0Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource1Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource2Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource3Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource4Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource5Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource6Ev +__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource7Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor0Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor1Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor2Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor3Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor4Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor5Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor6Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor7Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor8Ev +__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor9Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor10Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor11Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor12Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor13Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor14Ev +__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor15Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController0Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController1Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController2Ev +__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController3Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource0Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource1Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource2Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource3Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource4Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource5Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource6Ev +__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource7Ev +__ZN8IOMapper18_RESERVEDIOMapper0Ev +__ZN8IOMapper18_RESERVEDIOMapper1Ev +__ZN8IOMapper18_RESERVEDIOMapper2Ev +__ZN8IOMapper18_RESERVEDIOMapper3Ev +__ZN8IOMapper18_RESERVEDIOMapper4Ev +__ZN8IOMapper18_RESERVEDIOMapper5Ev +__ZN8IOMapper18_RESERVEDIOMapper6Ev +__ZN8IOMapper18_RESERVEDIOMapper7Ev +__ZN8IOMapper18_RESERVEDIOMapper8Ev +__ZN8IOMapper18_RESERVEDIOMapper9Ev +__ZN8IOMapper19_RESERVEDIOMapper10Ev +__ZN8IOMapper19_RESERVEDIOMapper11Ev +__ZN8IOMapper19_RESERVEDIOMapper12Ev +__ZN8IOMapper19_RESERVEDIOMapper13Ev +__ZN8IOMapper19_RESERVEDIOMapper14Ev +__ZN8IOMapper19_RESERVEDIOMapper15Ev +__ZN9IOService19_RESERVEDIOService2Ev +__ZN9IOService19_RESERVEDIOService3Ev +__ZN9IOService19_RESERVEDIOService4Ev +__ZN9IOService19_RESERVEDIOService5Ev +__ZN9IOService19_RESERVEDIOService6Ev +__ZN9IOService19_RESERVEDIOService7Ev +__ZN9IOService19_RESERVEDIOService8Ev +__ZN9IOService19_RESERVEDIOService9Ev +__ZN9IOService20_RESERVEDIOService10Ev +__ZN9IOService20_RESERVEDIOService11Ev +__ZN9IOService20_RESERVEDIOService12Ev +__ZN9IOService20_RESERVEDIOService13Ev +__ZN9IOService20_RESERVEDIOService14Ev +__ZN9IOService20_RESERVEDIOService15Ev +__ZN9IOService20_RESERVEDIOService16Ev +__ZN9IOService20_RESERVEDIOService17Ev +__ZN9IOService20_RESERVEDIOService18Ev +__ZN9IOService20_RESERVEDIOService19Ev +__ZN9IOService20_RESERVEDIOService20Ev +__ZN9IOService20_RESERVEDIOService21Ev +__ZN9IOService20_RESERVEDIOService22Ev +__ZN9IOService20_RESERVEDIOService23Ev +__ZN9IOService20_RESERVEDIOService24Ev +__ZN9IOService20_RESERVEDIOService25Ev +__ZN9IOService20_RESERVEDIOService26Ev +__ZN9IOService20_RESERVEDIOService27Ev +__ZN9IOService20_RESERVEDIOService28Ev +__ZN9IOService20_RESERVEDIOService29Ev +__ZN9IOService20_RESERVEDIOService30Ev +__ZN9IOService20_RESERVEDIOService31Ev +__ZN9IOService20_RESERVEDIOService32Ev +__ZN9IOService20_RESERVEDIOService33Ev +__ZN9IOService20_RESERVEDIOService34Ev +__ZN9IOService20_RESERVEDIOService35Ev +__ZN9IOService20_RESERVEDIOService36Ev +__ZN9IOService20_RESERVEDIOService37Ev +__ZN9IOService20_RESERVEDIOService38Ev +__ZN9IOService20_RESERVEDIOService39Ev +__ZN9IOService20_RESERVEDIOService40Ev +__ZN9IOService20_RESERVEDIOService41Ev +__ZN9IOService20_RESERVEDIOService42Ev +__ZN9IOService20_RESERVEDIOService43Ev +__ZN9IOService20_RESERVEDIOService44Ev +__ZN9IOService20_RESERVEDIOService45Ev +__ZN9IOService20_RESERVEDIOService46Ev +__ZN9IOService20_RESERVEDIOService47Ev diff --git a/config/IOKit.x86_64.exports b/config/IOKit.x86_64.exports index 91e399200..5d81bf5c7 100644 --- a/config/IOKit.x86_64.exports +++ b/config/IOKit.x86_64.exports @@ -1,31 +1,22 @@ -__ZN11IOCatalogue10addDriversEP7OSArrayb -__ZN11IOCatalogue13removeDriversEP12OSDictionaryb -__ZN11IOCatalogue13startMatchingEP12OSDictionary - -_IOLockSleep_darwin14 _IOLockSleepDeadline_darwin14 +_IOLockSleep_darwin14 _IOLockWakeup_darwin14 _IOOFPathMatching -_IOSpinUnlock -_IOTrySpinLock +_IOSpinUnlock:_OSSpinLockUnlock +_IOTrySpinLock:_OSSpinLockTry __Z16IODTFindSlotNameP15IORegistryEntryj __Z16IODTSetResolvingP15IORegistryEntryPFijPjS1_EPFvS0_PhS4_S4_E __Z17IODTGetCellCountsP15IORegistryEntryPjS1_ __Z22IODTResolveAddressCellP15IORegistryEntryPjPyS2_ __Z23IODTFindMatchingEntriesP15IORegistryEntryjPKc __ZN10IOWorkLoop19workLoopWithOptionsEj -__ZN10IOWorkLoop20_RESERVEDIOWorkLoop0Ev -__ZN10IOWorkLoop20_RESERVEDIOWorkLoop1Ev -__ZN10IOWorkLoop20_RESERVEDIOWorkLoop2Ev -__ZN10IOWorkLoop20_RESERVEDIOWorkLoop3Ev -__ZN10IOWorkLoop20_RESERVEDIOWorkLoop4Ev -__ZN10IOWorkLoop20_RESERVEDIOWorkLoop5Ev -__ZN10IOWorkLoop20_RESERVEDIOWorkLoop6Ev -__ZN10IOWorkLoop20_RESERVEDIOWorkLoop7Ev __ZN10IOWorkLoop9sleepGateEPvj __ZN10IOWorkLoop9sleepGateEPvyj +__ZN11IOCatalogue10addDriversEP7OSArrayb __ZN11IOCatalogue11findDriversEP12OSDictionaryPi __ZN11IOCatalogue11findDriversEP9IOServicePi +__ZN11IOCatalogue13removeDriversEP12OSDictionaryb +__ZN11IOCatalogue13startMatchingEP12OSDictionary __ZN11IODataQueue11withEntriesEjj __ZN11IODataQueue12withCapacityEj __ZN11IODataQueue15initWithEntriesEjj @@ -33,14 +24,6 @@ __ZN11IODataQueue16initWithCapacityEj __ZN11IODataQueue7enqueueEPvj __ZN11IOMemoryMap18getPhysicalSegmentEyPyj __ZN11IOMemoryMap19setMemoryDescriptorEP18IOMemoryDescriptory -__ZN11IOMemoryMap21_RESERVEDIOMemoryMap0Ev -__ZN11IOMemoryMap21_RESERVEDIOMemoryMap1Ev -__ZN11IOMemoryMap21_RESERVEDIOMemoryMap2Ev -__ZN11IOMemoryMap21_RESERVEDIOMemoryMap3Ev -__ZN11IOMemoryMap21_RESERVEDIOMemoryMap4Ev -__ZN11IOMemoryMap21_RESERVEDIOMemoryMap5Ev -__ZN11IOMemoryMap21_RESERVEDIOMemoryMap6Ev -__ZN11IOMemoryMap21_RESERVEDIOMemoryMap7Ev __ZN11IOMemoryMap8redirectEP18IOMemoryDescriptorjy __ZN12IODMACommand11OutputBig32EPS_NS_9Segment64EPvj __ZN12IODMACommand11OutputBig64EPS_NS_9Segment64EPvj @@ -53,19 +36,10 @@ __ZN12IODMACommand15genIOVMSegmentsEPFbPS_NS_9Segment64EPvjEPyS2_Pj __ZN12IODMACommand15genIOVMSegmentsEPyPvPj __ZN12IODMACommand16createCopyBufferEjy __ZN12IODMACommand17withSpecificationEPFbPS_NS_9Segment64EPvjEPKNS_14SegmentOptionsEjP8IOMapperS2_ -__ZN12IODMACommand21initWithSpecificationEPFbPS_NS_9Segment64EPvjEPKNS_14SegmentOptionsEjP8IOMapperS2_ -__ZN12IODMACommand24prepareWithSpecificationEPFbPS_NS_9Segment64EPvjEPKNS_14SegmentOptionsEjP8IOMapperyybb __ZN12IODMACommand17withSpecificationEPFbPS_NS_9Segment64EPvjEhyNS_14MappingOptionsEyjP8IOMapperS2_ +__ZN12IODMACommand21initWithSpecificationEPFbPS_NS_9Segment64EPvjEPKNS_14SegmentOptionsEjP8IOMapperS2_ __ZN12IODMACommand21initWithSpecificationEPFbPS_NS_9Segment64EPvjEhyNS_14MappingOptionsEyjP8IOMapperS2_ -__ZN12IODMACommand22_RESERVEDIODMACommand7Ev -__ZN12IODMACommand22_RESERVEDIODMACommand8Ev -__ZN12IODMACommand22_RESERVEDIODMACommand9Ev -__ZN12IODMACommand23_RESERVEDIODMACommand10Ev -__ZN12IODMACommand23_RESERVEDIODMACommand11Ev -__ZN12IODMACommand23_RESERVEDIODMACommand12Ev -__ZN12IODMACommand23_RESERVEDIODMACommand13Ev -__ZN12IODMACommand23_RESERVEDIODMACommand14Ev -__ZN12IODMACommand23_RESERVEDIODMACommand15Ev +__ZN12IODMACommand24prepareWithSpecificationEPFbPS_NS_9Segment64EPvjEPKNS_14SegmentOptionsEjP8IOMapperyybb __ZN12IODMACommand24prepareWithSpecificationEPFbPS_NS_9Segment64EPvjEhyNS_14MappingOptionsEyjP8IOMapperyybb __ZN12IODMACommand8transferEjyPvy __ZN12IOUserClient12initWithTaskEP4taskPvj @@ -74,24 +48,9 @@ __ZN12IOUserClient15sendAsyncResultEPjiPPvj __ZN12IOUserClient17mapClientMemory64EjP4taskjy __ZN12IOUserClient17sendAsyncResult64EPyiS0_j __ZN12IOUserClient19clientMemoryForTypeEjPjPP18IOMemoryDescriptor +__ZN12IOUserClient19clientMemoryForTypeEjPjR11OSSharedPtrI18IOMemoryDescriptorE __ZN12IOUserClient19setAsyncReference64EPyP8ipc_portyy __ZN12IOUserClient19setAsyncReference64EPyP8ipc_portyyP4task -__ZN12IOUserClient22_RESERVEDIOUserClient0Ev -__ZN12IOUserClient22_RESERVEDIOUserClient1Ev -__ZN12IOUserClient22_RESERVEDIOUserClient2Ev -__ZN12IOUserClient22_RESERVEDIOUserClient3Ev -__ZN12IOUserClient22_RESERVEDIOUserClient4Ev -__ZN12IOUserClient22_RESERVEDIOUserClient5Ev -__ZN12IOUserClient22_RESERVEDIOUserClient6Ev -__ZN12IOUserClient22_RESERVEDIOUserClient7Ev -__ZN12IOUserClient22_RESERVEDIOUserClient8Ev -__ZN12IOUserClient22_RESERVEDIOUserClient9Ev -__ZN12IOUserClient23_RESERVEDIOUserClient10Ev -__ZN12IOUserClient23_RESERVEDIOUserClient11Ev -__ZN12IOUserClient23_RESERVEDIOUserClient12Ev -__ZN12IOUserClient23_RESERVEDIOUserClient13Ev -__ZN12IOUserClient23_RESERVEDIOUserClient14Ev -__ZN12IOUserClient23_RESERVEDIOUserClient15Ev __ZN12IOUserClient23getExternalTrapForIndexEj __ZN12IOUserClient24getNotificationSemaphoreEjPP9semaphore __ZN12IOUserClient24getTargetAndTrapForIndexEPP9IOServicej @@ -99,37 +58,14 @@ __ZN12IOUserClient24registerNotificationPortEP8ipc_portjj __ZN12IOUserClient24registerNotificationPortEP8ipc_portjy __ZN12IOUserClient25getExternalMethodForIndexEj __ZN12IOUserClient26getTargetAndMethodForIndexEPP9IOServicej +__ZN12IOUserClient26getTargetAndMethodForIndexER11OSSharedPtrI9IOServiceEj __ZN12IOUserClient28sendAsyncResult64WithOptionsEPyiS0_jj __ZN12IOUserClient30getExternalAsyncMethodForIndexEj __ZN12IOUserClient31getAsyncTargetAndMethodForIndexEPP9IOServicej __ZN13IOCommandGate12commandSleepEPvj __ZN13IOCommandGate12commandSleepEPvyj -__ZN13IOCommandGate23_RESERVEDIOCommandGate0Ev -__ZN13IOCommandGate23_RESERVEDIOCommandGate1Ev -__ZN13IOCommandGate23_RESERVEDIOCommandGate2Ev -__ZN13IOCommandGate23_RESERVEDIOCommandGate3Ev -__ZN13IOCommandGate23_RESERVEDIOCommandGate4Ev -__ZN13IOCommandGate23_RESERVEDIOCommandGate5Ev -__ZN13IOCommandGate23_RESERVEDIOCommandGate6Ev -__ZN13IOCommandGate23_RESERVEDIOCommandGate7Ev __ZN13IOCommandPool11commandPoolEP9IOServiceP10IOWorkLoopj -__ZN13IOCommandPool23_RESERVEDIOCommandPool0Ev -__ZN13IOCommandPool23_RESERVEDIOCommandPool1Ev -__ZN13IOCommandPool23_RESERVEDIOCommandPool2Ev -__ZN13IOCommandPool23_RESERVEDIOCommandPool3Ev -__ZN13IOCommandPool23_RESERVEDIOCommandPool4Ev -__ZN13IOCommandPool23_RESERVEDIOCommandPool5Ev -__ZN13IOCommandPool23_RESERVEDIOCommandPool6Ev -__ZN13IOCommandPool23_RESERVEDIOCommandPool7Ev __ZN13IOCommandPool4initEP9IOServiceP10IOWorkLoopj -__ZN13IOEventSource23_RESERVEDIOEventSource0Ev -__ZN13IOEventSource23_RESERVEDIOEventSource1Ev -__ZN13IOEventSource23_RESERVEDIOEventSource2Ev -__ZN13IOEventSource23_RESERVEDIOEventSource3Ev -__ZN13IOEventSource23_RESERVEDIOEventSource4Ev -__ZN13IOEventSource23_RESERVEDIOEventSource5Ev -__ZN13IOEventSource23_RESERVEDIOEventSource6Ev -__ZN13IOEventSource23_RESERVEDIOEventSource7Ev __ZN13IOEventSource9sleepGateEPvj __ZN13IOEventSource9sleepGateEPvyj __ZN13_IOServiceJob8startJobEP9IOServiceij @@ -147,38 +83,6 @@ __ZN15IODMAController13getControllerEP9IOServicej __ZN15IODMAController16notifyDMACommandEP16IODMAEventSourceP12IODMACommandiyy __ZN15IODMAController20createControllerNameEj __ZN15IODMAController21registerDMAControllerEj -__ZN15IORegistryEntry25_RESERVEDIORegistryEntry0Ev -__ZN15IORegistryEntry25_RESERVEDIORegistryEntry1Ev -__ZN15IORegistryEntry25_RESERVEDIORegistryEntry2Ev -__ZN15IORegistryEntry25_RESERVEDIORegistryEntry3Ev -__ZN15IORegistryEntry25_RESERVEDIORegistryEntry4Ev -__ZN15IORegistryEntry25_RESERVEDIORegistryEntry5Ev -__ZN15IORegistryEntry25_RESERVEDIORegistryEntry6Ev -__ZN15IORegistryEntry25_RESERVEDIORegistryEntry7Ev -__ZN15IORegistryEntry25_RESERVEDIORegistryEntry8Ev -__ZN15IORegistryEntry25_RESERVEDIORegistryEntry9Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry10Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry11Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry12Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry13Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry14Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry15Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry16Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry17Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry18Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry19Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry20Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry21Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry22Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry23Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry24Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry25Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry26Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry27Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry28Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry29Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry30Ev -__ZN15IORegistryEntry26_RESERVEDIORegistryEntry31Ev __ZN16IODMAEventSource14dmaEventSourceEP8OSObjectP9IOServicePFvS1_PS_P12IODMACommandiyyES8_j __ZN16IODMAEventSource15startDMACommandEP12IODMACommandjyy __ZN16IODMAEventSource16notifyDMACommandEP12IODMACommandiyy @@ -194,34 +98,11 @@ __ZN16IORangeAllocator9withRangeEyyjj __ZN17IOBigMemoryCursor13outputSegmentEN14IOMemoryCursor15PhysicalSegmentEPvj __ZN17IOBigMemoryCursor17withSpecificationEyyy __ZN17IOBigMemoryCursor21initWithSpecificationEyyy -__ZN17IOPolledInterface27_RESERVEDIOPolledInterface1Ev -__ZN17IOPolledInterface27_RESERVEDIOPolledInterface2Ev -__ZN17IOPolledInterface27_RESERVEDIOPolledInterface3Ev -__ZN17IOPolledInterface27_RESERVEDIOPolledInterface4Ev -__ZN17IOPolledInterface27_RESERVEDIOPolledInterface5Ev -__ZN17IOPolledInterface27_RESERVEDIOPolledInterface6Ev -__ZN17IOPolledInterface27_RESERVEDIOPolledInterface7Ev -__ZN17IOPolledInterface27_RESERVEDIOPolledInterface8Ev -__ZN17IOPolledInterface27_RESERVEDIOPolledInterface9Ev -__ZN17IOPolledInterface28_RESERVEDIOPolledInterface10Ev -__ZN17IOPolledInterface28_RESERVEDIOPolledInterface11Ev -__ZN17IOPolledInterface28_RESERVEDIOPolledInterface12Ev -__ZN17IOPolledInterface28_RESERVEDIOPolledInterface13Ev -__ZN17IOPolledInterface28_RESERVEDIOPolledInterface14Ev -__ZN17IOPolledInterface28_RESERVEDIOPolledInterface15Ev __ZN17IOSharedDataQueue11withEntriesEjj __ZN17IOSharedDataQueue12getQueueSizeEv __ZN17IOSharedDataQueue12setQueueSizeEj __ZN17IOSharedDataQueue12withCapacityEj __ZN17IOSharedDataQueue16initWithCapacityEj -__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue0Ev -__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue1Ev -__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue2Ev -__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue3Ev -__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue4Ev -__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue5Ev -__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue6Ev -__ZN17IOSharedDataQueue27_RESERVEDIOSharedDataQueue7Ev __ZN17IOSharedDataQueue7dequeueEPvPj __ZN17IOSharedDataQueue7enqueueEPvj __ZN18IOMemoryDescriptor10setMappingEP4taskyj @@ -238,21 +119,6 @@ __ZN18IOMemoryDescriptor16withAddressRangeEyyjP4task __ZN18IOMemoryDescriptor17withAddressRangesEP14IOVirtualRangejjP4task __ZN18IOMemoryDescriptor19createMappingInTaskEP4taskyjyy __ZN18IOMemoryDescriptor19withPhysicalAddressEyyj -__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor1Ev -__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor2Ev -__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor3Ev -__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor4Ev -__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor5Ev -__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor6Ev -__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor7Ev -__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor8Ev -__ZN18IOMemoryDescriptor28_RESERVEDIOMemoryDescriptor9Ev -__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor10Ev -__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor11Ev -__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor12Ev -__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor13Ev -__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor14Ev -__ZN18IOMemoryDescriptor29_RESERVEDIOMemoryDescriptor15Ev __ZN18IOMemoryDescriptor3mapEj __ZN18IOMemoryDescriptor5doMapEP7_vm_mapPyjyy __ZN18IOMemoryDescriptor6setTagEj @@ -272,11 +138,6 @@ __ZN18IOTimerEventSource12wakeAtTimeMSEj __ZN18IOTimerEventSource12wakeAtTimeUSEj __ZN18IOTimerEventSource15setTimeoutTicksEj __ZN18IOTimerEventSource15wakeAtTimeTicksEj -__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource3Ev -__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource4Ev -__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource5Ev -__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource6Ev -__ZN18IOTimerEventSource28_RESERVEDIOTimerEventSource7Ev __ZN20IOLittleMemoryCursor13outputSegmentEN14IOMemoryCursor15PhysicalSegmentEPvj __ZN20IOLittleMemoryCursor17withSpecificationEyyy __ZN20IOLittleMemoryCursor21initWithSpecificationEyyy @@ -288,12 +149,6 @@ __ZN21IOInterruptController12enableVectorEiP17IOInterruptVector __ZN21IOInterruptController13getVectorTypeEiP17IOInterruptVector __ZN21IOInterruptController17disableVectorHardEiP17IOInterruptVector __ZN21IOInterruptController17vectorCanBeSharedEiP17IOInterruptVector -__ZN21IOInterruptController31_RESERVEDIOInterruptController0Ev -__ZN21IOInterruptController31_RESERVEDIOInterruptController1Ev -__ZN21IOInterruptController31_RESERVEDIOInterruptController2Ev -__ZN21IOInterruptController31_RESERVEDIOInterruptController3Ev -__ZN21IOInterruptController31_RESERVEDIOInterruptController4Ev -__ZN21IOInterruptController31_RESERVEDIOInterruptController5Ev __ZN21IOInterruptController28timeStampInterruptHandlerEndEiP17IOInterruptVector __ZN21IOInterruptController30timeStampInterruptHandlerStartEiP17IOInterruptVector __ZN21IONaturalMemoryCursor13outputSegmentEN14IOMemoryCursor15PhysicalSegmentEPvj @@ -307,14 +162,6 @@ __ZN21IOSubMemoryDescriptor12withSubRangeEP18IOMemoryDescriptoryyj __ZN21IOSubMemoryDescriptor18getPhysicalSegmentEyPyj __ZN21IOSubMemoryDescriptor7prepareEj __ZN21IOSubMemoryDescriptor8completeEj -__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource0Ev -__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource1Ev -__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource2Ev -__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource3Ev -__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource4Ev -__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource5Ev -__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource6Ev -__ZN22IOInterruptEventSource32_RESERVEDIOInterruptEventSource7Ev __ZN23IOMultiMemoryDescriptor12setOwnershipEP4taskij __ZN23IOMultiMemoryDescriptor15withDescriptorsEPP18IOMemoryDescriptorjjb __ZN23IOMultiMemoryDescriptor19initWithDescriptorsEPP18IOMemoryDescriptorjjb @@ -329,22 +176,6 @@ __ZN24IOBufferMemoryDescriptor17inTaskWithOptionsEP4taskjmm __ZN24IOBufferMemoryDescriptor17inTaskWithOptionsEP4taskjmmjj __ZN24IOBufferMemoryDescriptor20initWithPhysicalMaskEP4taskjyyy __ZN24IOBufferMemoryDescriptor22inTaskWithPhysicalMaskEP4taskjyy -__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor0Ev -__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor1Ev -__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor2Ev -__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor3Ev -__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor4Ev -__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor5Ev -__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor6Ev -__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor7Ev -__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor8Ev -__ZN24IOBufferMemoryDescriptor34_RESERVEDIOBufferMemoryDescriptor9Ev -__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor10Ev -__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor11Ev -__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor12Ev -__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor13Ev -__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor14Ev -__ZN24IOBufferMemoryDescriptor35_RESERVEDIOBufferMemoryDescriptor15Ev __ZN24IOBufferMemoryDescriptor9setLengthEm __ZN24IOBufferMemoryDescriptor9withBytesEPKvmjb __ZN25IOGeneralMemoryDescriptor11wireVirtualEj @@ -356,40 +187,12 @@ __ZN25IOGeneralMemoryDescriptor5doMapEP7_vm_mapPyjyy __ZN25IOGeneralMemoryDescriptor7doUnmapEP7_vm_mapyy __ZN25IOGeneralMemoryDescriptor7prepareEj __ZN25IOGeneralMemoryDescriptor8completeEj -__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController0Ev -__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController1Ev -__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController2Ev -__ZN27IOSharedInterruptController37_RESERVEDIOSharedInterruptController3Ev -__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource0Ev -__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource1Ev -__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource2Ev -__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource3Ev -__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource4Ev -__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource5Ev -__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource6Ev -__ZN28IOFilterInterruptEventSource38_RESERVEDIOFilterInterruptEventSource7Ev __ZN29IOInterleavedMemoryDescriptor12withCapacityEyj __ZN29IOInterleavedMemoryDescriptor16initWithCapacityEyj __ZN29IOInterleavedMemoryDescriptor19setMemoryDescriptorEP18IOMemoryDescriptoryy __ZN29IOInterleavedMemoryDescriptor22clearMemoryDescriptorsEj __ZN29IOInterleavedMemoryDescriptor7prepareEj __ZN29IOInterleavedMemoryDescriptor8completeEj -__ZN8IOMapper18_RESERVEDIOMapper0Ev -__ZN8IOMapper18_RESERVEDIOMapper1Ev -__ZN8IOMapper18_RESERVEDIOMapper2Ev -__ZN8IOMapper18_RESERVEDIOMapper3Ev -__ZN8IOMapper18_RESERVEDIOMapper4Ev -__ZN8IOMapper18_RESERVEDIOMapper5Ev -__ZN8IOMapper18_RESERVEDIOMapper6Ev -__ZN8IOMapper18_RESERVEDIOMapper7Ev -__ZN8IOMapper18_RESERVEDIOMapper8Ev -__ZN8IOMapper18_RESERVEDIOMapper9Ev -__ZN8IOMapper19_RESERVEDIOMapper10Ev -__ZN8IOMapper19_RESERVEDIOMapper11Ev -__ZN8IOMapper19_RESERVEDIOMapper12Ev -__ZN8IOMapper19_RESERVEDIOMapper13Ev -__ZN8IOMapper19_RESERVEDIOMapper14Ev -__ZN8IOMapper19_RESERVEDIOMapper15Ev __ZN8IOSyncer10gMetaClassE __ZN8IOSyncer10superClassE __ZN8IOSyncer13privateSignalEv @@ -408,6 +211,8 @@ __ZN8IOSyncerC2EPK11OSMetaClass __ZN8IOSyncerC2Ev __ZN8IOSyncerD0Ev __ZN8IOSyncerD2Ev +__ZN8IOSyncerdlEPvm +__ZN8IOSyncernwEm __ZN9IOService10adjustBusyEi __ZN9IOService10handleOpenEPS_jPv __ZN9IOService11_adjustBusyEi @@ -417,7 +222,9 @@ __ZN9IOService12requestProbeEj __ZN9IOService12updateReportEP19IOReportChannelListjPvS2_ __ZN9IOService13messageClientEjP8OSObjectPvm __ZN9IOService13newUserClientEP4taskPvjP12OSDictionaryPP12IOUserClient +__ZN9IOService13newUserClientEP4taskPvjP12OSDictionaryR11OSSharedPtrI12IOUserClientE __ZN9IOService13newUserClientEP4taskPvjPP12IOUserClient +__ZN9IOService13newUserClientEP4taskPvjR11OSSharedPtrI12IOUserClientE __ZN9IOService13startMatchingEj __ZN9IOService13waitMatchIdleEj __ZN9IOService13willTerminateEPS_j @@ -432,60 +239,14 @@ __ZN9IOService15terminatePhase1Ej __ZN9IOService15terminateWorkerEj __ZN9IOService16registerInterestEPK8OSSymbolPFiPvS3_jPS_S3_mES3_S3_ __ZN9IOService16requestTerminateEPS_j -__ZN9IOService16setCPUSnoopDelayEj __ZN9IOService18doServiceTerminateEj __ZN9IOService18matchPropertyTableEP12OSDictionaryPi __ZN9IOService18requireMaxBusStallEj __ZN9IOService18systemWillShutdownEj -__ZN9IOService19_RESERVEDIOService2Ev -__ZN9IOService19_RESERVEDIOService3Ev -__ZN9IOService19_RESERVEDIOService4Ev -__ZN9IOService19_RESERVEDIOService5Ev -__ZN9IOService19_RESERVEDIOService6Ev -__ZN9IOService19_RESERVEDIOService7Ev -__ZN9IOService19_RESERVEDIOService8Ev -__ZN9IOService19_RESERVEDIOService9Ev __ZN9IOService19deliverNotificationEPK8OSSymboljj -__ZN9IOService20_RESERVEDIOService10Ev -__ZN9IOService20_RESERVEDIOService11Ev -__ZN9IOService20_RESERVEDIOService12Ev -__ZN9IOService20_RESERVEDIOService13Ev -__ZN9IOService20_RESERVEDIOService14Ev -__ZN9IOService20_RESERVEDIOService15Ev -__ZN9IOService20_RESERVEDIOService16Ev -__ZN9IOService20_RESERVEDIOService17Ev -__ZN9IOService20_RESERVEDIOService18Ev -__ZN9IOService20_RESERVEDIOService19Ev -__ZN9IOService20_RESERVEDIOService20Ev -__ZN9IOService20_RESERVEDIOService21Ev -__ZN9IOService20_RESERVEDIOService22Ev -__ZN9IOService20_RESERVEDIOService23Ev -__ZN9IOService20_RESERVEDIOService24Ev -__ZN9IOService20_RESERVEDIOService25Ev -__ZN9IOService20_RESERVEDIOService26Ev -__ZN9IOService20_RESERVEDIOService27Ev -__ZN9IOService20_RESERVEDIOService28Ev -__ZN9IOService20_RESERVEDIOService29Ev -__ZN9IOService20_RESERVEDIOService30Ev -__ZN9IOService20_RESERVEDIOService31Ev -__ZN9IOService20_RESERVEDIOService32Ev -__ZN9IOService20_RESERVEDIOService33Ev -__ZN9IOService20_RESERVEDIOService34Ev -__ZN9IOService20_RESERVEDIOService35Ev -__ZN9IOService20_RESERVEDIOService36Ev -__ZN9IOService20_RESERVEDIOService37Ev -__ZN9IOService20_RESERVEDIOService38Ev -__ZN9IOService20_RESERVEDIOService39Ev -__ZN9IOService20_RESERVEDIOService40Ev -__ZN9IOService20_RESERVEDIOService41Ev -__ZN9IOService20_RESERVEDIOService42Ev -__ZN9IOService20_RESERVEDIOService43Ev -__ZN9IOService20_RESERVEDIOService44Ev -__ZN9IOService20_RESERVEDIOService45Ev -__ZN9IOService20_RESERVEDIOService46Ev -__ZN9IOService20_RESERVEDIOService47Ev __ZN9IOService23acknowledgeNotificationEPvj __ZN9IOService23addMatchingNotificationEPK8OSSymbolP12OSDictionaryPFbPvS5_PS_P10IONotifierES5_S5_i +__ZN9IOService23addMatchingNotificationEPK8OSSymbolP12OSDictionaryiU13block_pointerFbPS_P10IONotifierE __ZN9IOService23scheduleTerminatePhase2Ej __ZN9IOService24mapDeviceMemoryWithIndexEjj __ZN9IOService4openEPS_jPv @@ -506,7 +267,5 @@ __ZNK8IOSyncer12getMetaClassEv __ZNK8IOSyncer9MetaClass5allocEv __ZTV8IOSyncer __ZTVN8IOSyncer9MetaClassE -_ev_try_lock -_ev_unlock - -__ZN9IOService23addMatchingNotificationEPK8OSSymbolP12OSDictionaryiU13block_pointerFbPS_P10IONotifierE +_ev_try_lock:_OSSpinLockTry +_ev_unlock:_OSSpinLockUnlock diff --git a/config/Libkern.arm.exports b/config/Libkern.arm.exports index 0be20457d..ab47a9396 100644 --- a/config/Libkern.arm.exports +++ b/config/Libkern.arm.exports @@ -1,5 +1,5 @@ _OSAddAtomic64 _OSCompareAndSwap64 -__ZN15OSMetaClassBase9_ptmf2ptfEPKS_MS_FvvEm +__ZN15OSMetaClassBase9_ptmf2ptfEPKS_MS_FvvE __ZN12OSOrderedSet12withCapacityEjPFlPK15OSMetaClassBaseS2_PvES3_ __ZN12OSOrderedSet16initWithCapacityEjPFlPK15OSMetaClassBaseS2_PvES3_ diff --git a/config/Libkern.arm64.MacOSX.exports b/config/Libkern.arm64.MacOSX.exports new file mode 100644 index 000000000..79b08c5a6 --- /dev/null +++ b/config/Libkern.arm64.MacOSX.exports @@ -0,0 +1,132 @@ +__ZN10OSIterator20_RESERVEDOSIterator0Ev +__ZN10OSIterator20_RESERVEDOSIterator1Ev +__ZN10OSIterator20_RESERVEDOSIterator2Ev +__ZN10OSIterator20_RESERVEDOSIterator3Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass0Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass1Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass2Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass3Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass4Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass5Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass6Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass7Ev +__ZN11OSSerialize21_RESERVEDOSSerialize0Ev +__ZN11OSSerialize21_RESERVEDOSSerialize1Ev +__ZN11OSSerialize21_RESERVEDOSSerialize2Ev +__ZN11OSSerialize21_RESERVEDOSSerialize3Ev +__ZN11OSSerialize21_RESERVEDOSSerialize4Ev +__ZN11OSSerialize21_RESERVEDOSSerialize5Ev +__ZN11OSSerialize21_RESERVEDOSSerialize6Ev +__ZN11OSSerialize21_RESERVEDOSSerialize7Ev +__ZN12OSCollection22_RESERVEDOSCollection0Ev +__ZN12OSCollection22_RESERVEDOSCollection1Ev +__ZN12OSCollection22_RESERVEDOSCollection2Ev +__ZN12OSCollection22_RESERVEDOSCollection3Ev +__ZN12OSCollection22_RESERVEDOSCollection4Ev +__ZN12OSCollection22_RESERVEDOSCollection5Ev +__ZN12OSCollection22_RESERVEDOSCollection6Ev +__ZN12OSCollection22_RESERVEDOSCollection7Ev +__ZN12OSDictionary22_RESERVEDOSDictionary0Ev +__ZN12OSDictionary22_RESERVEDOSDictionary1Ev +__ZN12OSDictionary22_RESERVEDOSDictionary2Ev +__ZN12OSDictionary22_RESERVEDOSDictionary3Ev +__ZN12OSDictionary22_RESERVEDOSDictionary4Ev +__ZN12OSDictionary22_RESERVEDOSDictionary5Ev +__ZN12OSDictionary22_RESERVEDOSDictionary6Ev +__ZN12OSDictionary22_RESERVEDOSDictionary7Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet0Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet1Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet2Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet3Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet4Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet5Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet6Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet7Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase0Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase1Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase2Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase3Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase4Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase5Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase6Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase7Ev +__ZN5OSSet15_RESERVEDOSSet0Ev +__ZN5OSSet15_RESERVEDOSSet1Ev +__ZN5OSSet15_RESERVEDOSSet2Ev +__ZN5OSSet15_RESERVEDOSSet3Ev +__ZN5OSSet15_RESERVEDOSSet4Ev +__ZN5OSSet15_RESERVEDOSSet5Ev +__ZN5OSSet15_RESERVEDOSSet6Ev +__ZN5OSSet15_RESERVEDOSSet7Ev +__ZN6OSData16_RESERVEDOSData0Ev +__ZN6OSData16_RESERVEDOSData1Ev +__ZN6OSData16_RESERVEDOSData2Ev +__ZN6OSData16_RESERVEDOSData3Ev +__ZN6OSData16_RESERVEDOSData4Ev +__ZN6OSData16_RESERVEDOSData5Ev +__ZN6OSData16_RESERVEDOSData6Ev +__ZN6OSData16_RESERVEDOSData7Ev +__ZN7OSArray17_RESERVEDOSArray0Ev +__ZN7OSArray17_RESERVEDOSArray1Ev +__ZN7OSArray17_RESERVEDOSArray2Ev +__ZN7OSArray17_RESERVEDOSArray3Ev +__ZN7OSArray17_RESERVEDOSArray4Ev +__ZN7OSArray17_RESERVEDOSArray5Ev +__ZN7OSArray17_RESERVEDOSArray6Ev +__ZN7OSArray17_RESERVEDOSArray7Ev +__ZN8OSNumber18_RESERVEDOSNumber0Ev +__ZN8OSNumber18_RESERVEDOSNumber1Ev +__ZN8OSNumber18_RESERVEDOSNumber2Ev +__ZN8OSNumber18_RESERVEDOSNumber3Ev +__ZN8OSNumber18_RESERVEDOSNumber4Ev +__ZN8OSNumber18_RESERVEDOSNumber5Ev +__ZN8OSNumber18_RESERVEDOSNumber6Ev +__ZN8OSNumber18_RESERVEDOSNumber7Ev +__ZN8OSObject18_RESERVEDOSObject0Ev +__ZN8OSObject18_RESERVEDOSObject1Ev +__ZN8OSObject18_RESERVEDOSObject2Ev +__ZN8OSObject18_RESERVEDOSObject3Ev +__ZN8OSObject18_RESERVEDOSObject4Ev +__ZN8OSObject18_RESERVEDOSObject5Ev +__ZN8OSObject18_RESERVEDOSObject6Ev +__ZN8OSObject18_RESERVEDOSObject7Ev +__ZN8OSObject18_RESERVEDOSObject8Ev +__ZN8OSObject18_RESERVEDOSObject9Ev +__ZN8OSObject19_RESERVEDOSObject10Ev +__ZN8OSObject19_RESERVEDOSObject11Ev +__ZN8OSObject19_RESERVEDOSObject12Ev +__ZN8OSObject19_RESERVEDOSObject13Ev +__ZN8OSObject19_RESERVEDOSObject14Ev +__ZN8OSObject19_RESERVEDOSObject15Ev +__ZN8OSString18_RESERVEDOSString0Ev +__ZN8OSString18_RESERVEDOSString1Ev +__ZN8OSString18_RESERVEDOSString2Ev +__ZN8OSString18_RESERVEDOSString3Ev +__ZN8OSString18_RESERVEDOSString4Ev +__ZN8OSString18_RESERVEDOSString5Ev +__ZN8OSString18_RESERVEDOSString6Ev +__ZN8OSString18_RESERVEDOSString7Ev +__ZN8OSString18_RESERVEDOSString8Ev +__ZN8OSString18_RESERVEDOSString9Ev +__ZN8OSString19_RESERVEDOSString10Ev +__ZN8OSString19_RESERVEDOSString11Ev +__ZN8OSString19_RESERVEDOSString12Ev +__ZN8OSString19_RESERVEDOSString13Ev +__ZN8OSString19_RESERVEDOSString14Ev +__ZN8OSString19_RESERVEDOSString15Ev +__ZN8OSSymbol18_RESERVEDOSSymbol0Ev +__ZN8OSSymbol18_RESERVEDOSSymbol1Ev +__ZN8OSSymbol18_RESERVEDOSSymbol2Ev +__ZN8OSSymbol18_RESERVEDOSSymbol3Ev +__ZN8OSSymbol18_RESERVEDOSSymbol4Ev +__ZN8OSSymbol18_RESERVEDOSSymbol5Ev +__ZN8OSSymbol18_RESERVEDOSSymbol6Ev +__ZN8OSSymbol18_RESERVEDOSSymbol7Ev +__ZN9OSBoolean19_RESERVEDOSBoolean0Ev +__ZN9OSBoolean19_RESERVEDOSBoolean1Ev +__ZN9OSBoolean19_RESERVEDOSBoolean2Ev +__ZN9OSBoolean19_RESERVEDOSBoolean3Ev +__ZN9OSBoolean19_RESERVEDOSBoolean4Ev +__ZN9OSBoolean19_RESERVEDOSBoolean5Ev +__ZN9OSBoolean19_RESERVEDOSBoolean6Ev +__ZN9OSBoolean19_RESERVEDOSBoolean7Ev diff --git a/config/Libkern.arm64.exports b/config/Libkern.arm64.exports index d2575ff18..40f33219b 100644 --- a/config/Libkern.arm64.exports +++ b/config/Libkern.arm64.exports @@ -1,6 +1,6 @@ _OSAddAtomic64 _OSCompareAndSwap64 _PAGE_SHIFT_CONST -__ZN15OSMetaClassBase9_ptmf2ptfEPKS_MS_FvvEm +__ZN15OSMetaClassBase9_ptmf2ptfEPKS_MS_FvvE __ZN12OSOrderedSet12withCapacityEjPFiPK15OSMetaClassBaseS2_PvES3_ __ZN12OSOrderedSet16initWithCapacityEjPFiPK15OSMetaClassBaseS2_PvES3_ diff --git a/config/Libkern.exports b/config/Libkern.exports index 199aff61c..8e67942f2 100644 --- a/config/Libkern.exports +++ b/config/Libkern.exports @@ -1,5 +1,4 @@ _Assert -_img4_interface_register _MD5Final _MD5Init _MD5Update @@ -21,7 +20,7 @@ _OSCompareAndSwapPtr _OSDecrementAtomic _OSDecrementAtomic16 _OSDecrementAtomic8 -_OSFree +_OSFree:_OSFree_external _OSIncrementAtomic _OSIncrementAtomic16 _OSIncrementAtomic8 @@ -30,11 +29,11 @@ _OSKextLoadKextWithIdentifier _OSKextReleaseKextWithLoadTag _OSKextRequestResource _OSKextRetainKextWithLoadTag -_OSMalloc -_OSMalloc_Tagalloc -_OSMalloc_Tagfree -_OSMalloc_noblock -_OSMalloc_nowait +_OSMalloc:_OSMalloc_external +_OSMalloc_Tagalloc:_OSMalloc_Tagalloc_external +_OSMalloc_Tagfree:_OSMalloc_Tagfree_external +_OSMalloc_noblock:_OSMalloc_noblock_external +_OSMalloc_nowait:_OSMalloc_nowait_external _OSReportWithBacktrace _OSTestAndClear _OSTestAndSet @@ -55,11 +54,19 @@ _SHA512_Final _SHA512_Init _SHA512_Update _STRDUP +__Block_copy +__Block_release +__NSConcreteAutoBlock +__NSConcreteFinalizingBlock +__NSConcreteGlobalBlock +__NSConcreteMallocBlock +__NSConcreteStackBlock +__NSConcreteWeakBlockVariable __Z13OSUnserializePKcPP8OSString -__Z13OSUnserializePKcPN2os9smart_ptrI8OSString15osobject_policyEE -__Z16OSUnserializeXMLPKcPN2os9smart_ptrI8OSString15osobject_policyEE __Z16OSUnserializeXMLPKcPP8OSString +__Z16OSUnserializeXMLPKcR11OSSharedPtrI8OSStringE __Z16OSUnserializeXMLPKcmPP8OSString +__Z16OSUnserializeXMLPKcmR11OSSharedPtrI8OSStringE __ZN10OSIterator10gMetaClassE __ZN10OSIterator10superClassE __ZN10OSIterator9MetaClassC1Ev @@ -67,6 +74,8 @@ __ZN10OSIterator9MetaClassC2Ev __ZN10OSIterator9metaClassE __ZN10OSIteratorC2EPK11OSMetaClass __ZN10OSIteratorD2Ev +__ZN10OSIteratordlEPvm +__ZN10OSIteratornwEm __ZN11OSMetaClass10preModLoadEPKc __ZN11OSMetaClass11postModLoadEPv __ZN11OSMetaClass12checkModLoadEPv @@ -86,6 +95,7 @@ __ZN11OSMetaClass24serializeClassDictionaryEP12OSDictionary __ZN11OSMetaClass8logErrorEi __ZN11OSMetaClass9metaClassE __ZN11OSMetaClassC2EPKcPKS_j +__ZN11OSMetaClassC2EPKcPKS_jPP4zoneS1_19zone_create_flags_t __ZN11OSMetaClassD2Ev __ZN11OSMetaClassdlEPvm __ZN11OSMetaClassnwEm @@ -113,17 +123,23 @@ __ZN11OSSerializeC2EPK11OSMetaClass __ZN11OSSerializeC2Ev __ZN11OSSerializeD0Ev __ZN11OSSerializeD2Ev +__ZN11OSSerializedlEPvm +__ZN11OSSerializenwEm __ZN12OSCollection10gMetaClassE __ZN12OSCollection10setOptionsEjjPv __ZN12OSCollection10superClassE __ZN12OSCollection11haveUpdatedEv __ZN12OSCollection14copyCollectionEP12OSDictionary +__ZN12OSCollection14iterateObjectsEPvPFbS0_P8OSObjectE +__ZN12OSCollection14iterateObjectsEU13block_pointerFbP8OSObjectE __ZN12OSCollection4initEv __ZN12OSCollection9MetaClassC1Ev __ZN12OSCollection9MetaClassC2Ev __ZN12OSCollection9metaClassE __ZN12OSCollectionC2EPK11OSMetaClass __ZN12OSCollectionD2Ev +__ZN12OSCollectiondlEPvm +__ZN12OSCollectionnwEm __ZN12OSDictionary10gMetaClassE __ZN12OSDictionary10setOptionsEjjPv __ZN12OSDictionary10superClassE @@ -135,6 +151,8 @@ __ZN12OSDictionary12removeObjectEPKc __ZN12OSDictionary12withCapacityEj __ZN12OSDictionary14copyCollectionEPS_ __ZN12OSDictionary14ensureCapacityEj +__ZN12OSDictionary14iterateObjectsEPvPFbS0_PK8OSSymbolP8OSObjectE +__ZN12OSDictionary14iterateObjectsEU13block_pointerFbPK8OSSymbolP8OSObjectE __ZN12OSDictionary14withDictionaryEPKS_j __ZN12OSDictionary15flushCollectionEv __ZN12OSDictionary15initWithObjectsEPPK8OSObjectPPK8OSStringjj @@ -148,24 +166,32 @@ __ZN12OSDictionary9MetaClassC1Ev __ZN12OSDictionary9MetaClassC2Ev __ZN12OSDictionary9metaClassE __ZN12OSDictionary9setObjectEPK8OSStringPK15OSMetaClassBase +__ZN12OSDictionary9setObjectEPK8OSStringRK11OSSharedPtrIK15OSMetaClassBaseE __ZN12OSDictionary9setObjectEPK8OSSymbolPK15OSMetaClassBase __ZN12OSDictionary9setObjectEPKcPK15OSMetaClassBase +__ZN12OSDictionary9setObjectEPKcRK11OSSharedPtrIK15OSMetaClassBaseE +__ZN12OSDictionary9setObjectERK11OSSharedPtrIK8OSSymbolERKS0_IK15OSMetaClassBaseE __ZN12OSDictionaryC1EPK11OSMetaClass __ZN12OSDictionaryC1Ev __ZN12OSDictionaryC2EPK11OSMetaClass __ZN12OSDictionaryC2Ev __ZN12OSDictionaryD0Ev __ZN12OSDictionaryD2Ev +__ZN12OSDictionarydlEPvm +__ZN12OSDictionarynwEm __ZN12OSOrderedSet10gMetaClassE __ZN12OSOrderedSet10setOptionsEjjPv __ZN12OSOrderedSet10superClassE __ZN12OSOrderedSet11orderObjectEPK15OSMetaClassBase __ZN12OSOrderedSet12removeObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet12removeObjectERK11OSSharedPtrIK15OSMetaClassBaseE __ZN12OSOrderedSet13setLastObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet13setLastObjectERK11OSSharedPtrIK15OSMetaClassBaseE __ZN12OSOrderedSet14copyCollectionEP12OSDictionary __ZN12OSOrderedSet14ensureCapacityEj __ZN12OSOrderedSet14getOrderingRefEv __ZN12OSOrderedSet14setFirstObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet14setFirstObjectERK11OSSharedPtrIK15OSMetaClassBaseE __ZN12OSOrderedSet15flushCollectionEv __ZN12OSOrderedSet20setCapacityIncrementEj __ZN12OSOrderedSet4freeEv @@ -173,31 +199,48 @@ __ZN12OSOrderedSet9MetaClassC1Ev __ZN12OSOrderedSet9MetaClassC2Ev __ZN12OSOrderedSet9metaClassE __ZN12OSOrderedSet9setObjectEPK15OSMetaClassBase +__ZN12OSOrderedSet9setObjectERK11OSSharedPtrIK15OSMetaClassBaseE __ZN12OSOrderedSet9setObjectEjPK15OSMetaClassBase +__ZN12OSOrderedSet9setObjectEjRK11OSSharedPtrIK15OSMetaClassBaseE __ZN12OSOrderedSetC1EPK11OSMetaClass __ZN12OSOrderedSetC1Ev __ZN12OSOrderedSetC2EPK11OSMetaClass __ZN12OSOrderedSetC2Ev __ZN12OSOrderedSetD0Ev __ZN12OSOrderedSetD2Ev +__ZN12OSOrderedSetdlEPvm +__ZN12OSOrderedSetnwEm __ZN12OSSerializer10gMetaClassE __ZN12OSSerializer10superClassE __ZN12OSSerializer9MetaClassC1Ev __ZN12OSSerializer9MetaClassC2Ev __ZN12OSSerializer9forTargetEPvPFbS0_S0_P11OSSerializeES0_ __ZN12OSSerializer9metaClassE +__ZN12OSSerializer9withBlockEU13block_pointerFbP11OSSerializeE __ZN12OSSerializerC1EPK11OSMetaClass __ZN12OSSerializerC1Ev __ZN12OSSerializerC2EPK11OSMetaClass __ZN12OSSerializerC2Ev __ZN12OSSerializerD0Ev __ZN12OSSerializerD2Ev +__ZN12OSSerializerdlEPvm +__ZN12OSSerializernwEm +__ZN15IODispatchQueue8DispatchE5IORPC +__ZN15IODispatchQueue9MetaClass8DispatchE5IORPC __ZN15OSMetaClassBase12safeMetaCastEPKS_PK11OSMetaClass __ZN15OSMetaClassBase13checkTypeInstEPKS_S1_ +__ZN15OSMetaClassBase16requiredMetaCastEPKS_PK11OSMetaClass +__ZN15OSMetaClassBase6InvokeE5IORPC +__ZN15OSMetaClassBase8DispatchE5IORPC __ZN15OSMetaClassBaseC2Ev __ZN15OSMetaClassBaseD2Ev __ZN15OSMetaClassMetaC1Ev __ZN15OSMetaClassMetaC2Ev +__ZN15OSUserMetaClass8DispatchE5IORPC +__ZN16IODispatchSource8DispatchE5IORPC +__ZN16IODispatchSource9MetaClass8DispatchE5IORPC +__ZN18IOMemoryDescriptor8DispatchE5IORPC +__ZN18IOMemoryDescriptor9MetaClass8DispatchE5IORPC __ZN20OSCollectionIterator10gMetaClassE __ZN20OSCollectionIterator10superClassE __ZN20OSCollectionIterator13getNextObjectEv @@ -215,12 +258,19 @@ __ZN20OSCollectionIteratorC2EPK11OSMetaClass __ZN20OSCollectionIteratorC2Ev __ZN20OSCollectionIteratorD0Ev __ZN20OSCollectionIteratorD2Ev +__ZN20OSCollectionIteratordlEPvm +__ZN20OSCollectionIteratornwEm +__ZN24IOBufferMemoryDescriptor8DispatchE5IORPC +__ZN24IOBufferMemoryDescriptor9MetaClass8DispatchE5IORPC +__ZN25IOInterruptDispatchSource8DispatchE5IORPC +__ZN25IOInterruptDispatchSource9MetaClass8DispatchE5IORPC __ZN5OSSet10gMetaClassE __ZN5OSSet10setOptionsEjjPv __ZN5OSSet10superClassE __ZN5OSSet11initWithSetEPKS_j __ZN5OSSet11withObjectsEPPK8OSObjectjj __ZN5OSSet12removeObjectEPK15OSMetaClassBase +__ZN5OSSet12removeObjectERK11OSSharedPtrIK15OSMetaClassBaseE __ZN5OSSet12withCapacityEj __ZN5OSSet13initWithArrayEPK7OSArrayj __ZN5OSSet14copyCollectionEP12OSDictionary @@ -237,6 +287,7 @@ __ZN5OSSet9MetaClassC1Ev __ZN5OSSet9MetaClassC2Ev __ZN5OSSet9metaClassE __ZN5OSSet9setObjectEPK15OSMetaClassBase +__ZN5OSSet9setObjectERK11OSSharedPtrIK15OSMetaClassBaseE __ZN5OSSet9withArrayEPK7OSArrayj __ZN5OSSetC1EPK11OSMetaClass __ZN5OSSetC1Ev @@ -244,6 +295,8 @@ __ZN5OSSetC2EPK11OSMetaClass __ZN5OSSetC2Ev __ZN5OSSetD0Ev __ZN5OSSetD2Ev +__ZN5OSSetdlEPvm +__ZN5OSSetnwEm __ZN6OSData10appendByteEhj __ZN6OSData10gMetaClassE __ZN6OSData10superClassE @@ -272,6 +325,8 @@ __ZN6OSDataC2EPK11OSMetaClass __ZN6OSDataC2Ev __ZN6OSDataD0Ev __ZN6OSDataD2Ev +__ZN6OSDatadlEPvm +__ZN6OSDatanwEm __ZN7OSArray10gMetaClassE __ZN7OSArray10setOptionsEjjPv __ZN7OSArray10superClassE @@ -280,6 +335,7 @@ __ZN7OSArray12removeObjectEj __ZN7OSArray12withCapacityEj __ZN7OSArray13initWithArrayEPKS_j __ZN7OSArray13replaceObjectEjPK15OSMetaClassBase +__ZN7OSArray13replaceObjectEjRK11OSSharedPtrIK15OSMetaClassBaseE __ZN7OSArray14copyCollectionEP12OSDictionary __ZN7OSArray14ensureCapacityEj __ZN7OSArray15flushCollectionEv @@ -292,7 +348,9 @@ __ZN7OSArray9MetaClassC1Ev __ZN7OSArray9MetaClassC2Ev __ZN7OSArray9metaClassE __ZN7OSArray9setObjectEPK15OSMetaClassBase +__ZN7OSArray9setObjectERK11OSSharedPtrIK15OSMetaClassBaseE __ZN7OSArray9setObjectEjPK15OSMetaClassBase +__ZN7OSArray9setObjectEjRK11OSSharedPtrIK15OSMetaClassBaseE __ZN7OSArray9withArrayEPKS_j __ZN7OSArrayC1EPK11OSMetaClass __ZN7OSArrayC1Ev @@ -300,6 +358,14 @@ __ZN7OSArrayC2EPK11OSMetaClass __ZN7OSArrayC2Ev __ZN7OSArrayD0Ev __ZN7OSArrayD2Ev +__ZN7OSArraydlEPvm +__ZN7OSArraynwEm +__ZN8OSAction12GetReferenceEv +__ZN8OSAction18CreateWithTypeNameEP8OSObjectyymP8OSStringPPS_ +__ZN8OSAction6CreateEP8OSObjectyymPPS_ +__ZN8OSAction8DispatchE5IORPC +__ZN8OSAction9MetaClass8DispatchE5IORPC +__ZN8OSAction9metaClassE __ZN8OSNumber10gMetaClassE __ZN8OSNumber10superClassE __ZN8OSNumber10withNumberEPKcj @@ -318,12 +384,17 @@ __ZN8OSNumberC2EPK11OSMetaClass __ZN8OSNumberC2Ev __ZN8OSNumberD0Ev __ZN8OSNumberD2Ev +__ZN8OSNumberdlEPvm +__ZN8OSNumbernwEm __ZN8OSObject10gMetaClassE __ZN8OSObject10superClassE __ZN8OSObject4freeEv __ZN8OSObject4initEv +__ZN8OSObject8DispatchE5IORPC +__ZN8OSObject9MetaClass8DispatchE5IORPC __ZN8OSObject9MetaClassC1Ev __ZN8OSObject9MetaClassC2Ev +__ZN8OSObject9_DispatchEPS_5IORPC __ZN8OSObject9metaClassE __ZN8OSObjectC1EPK11OSMetaClass __ZN8OSObjectC1Ev @@ -331,8 +402,8 @@ __ZN8OSObjectC2EPK11OSMetaClass __ZN8OSObjectC2Ev __ZN8OSObjectD0Ev __ZN8OSObjectD2Ev -__ZN8OSObjectdlEPvm -__ZN8OSObjectnwEm +__ZN8OSObjectdlEPvm:_OSObject_operator_delete_external +__ZN8OSObjectnwEm:_OSObject_operator_new_external __ZN8OSString10gMetaClassE __ZN8OSString10superClassE __ZN8OSString10withStringEPKS_ @@ -352,6 +423,8 @@ __ZN8OSStringC2EPK11OSMetaClass __ZN8OSStringC2Ev __ZN8OSStringD0Ev __ZN8OSStringD2Ev +__ZN8OSStringdlEPvm +__ZN8OSStringnwEm __ZN8OSSymbol10gMetaClassE __ZN8OSSymbol10initializeEv __ZN8OSSymbol10superClassE @@ -374,6 +447,11 @@ __ZN8OSSymbolC2EPK11OSMetaClass __ZN8OSSymbolC2Ev __ZN8OSSymbolD0Ev __ZN8OSSymbolD2Ev +__ZN8OSSymboldlEPvm +__ZN8OSSymbolnwEm +__ZN9IOService8DispatchE5IORPC +__ZN9IOService9MetaClass8DispatchE5IORPC +__ZN9IOService9_DispatchEPS_5IORPC __ZN9OSBoolean10gMetaClassE __ZN9OSBoolean10initializeEv __ZN9OSBoolean10superClassE @@ -388,6 +466,8 @@ __ZN9OSBooleanC2EPK11OSMetaClass __ZN9OSBooleanC2Ev __ZN9OSBooleanD0Ev __ZN9OSBooleanD2Ev +__ZN9OSBooleandlEPvm +__ZN9OSBooleannwEm __ZNK10OSIterator12getMetaClassEv __ZNK10OSIterator9MetaClass5allocEv __ZNK11OSMetaClass12getClassNameEv @@ -591,12 +671,12 @@ ___memmove_chk ___memset_chk ___stack_chk_fail ___stack_chk_guard -___strlcpy_chk +___strcat_chk +___strcpy_chk ___strlcat_chk -___strncpy_chk +___strlcpy_chk ___strncat_chk -___strcpy_chk -___strcat_chk +___strncpy_chk __os_log_default __os_log_internal _adler32 @@ -624,6 +704,7 @@ _ffs _flush_dcache _flush_dcache64 _gOSKextUnresolved +_img4_interface_register _inet_ntop _inflate _inflateEnd @@ -639,9 +720,9 @@ _itoa _kOSBooleanFalse _kOSBooleanTrue _kdp_lck_spin_is_acquired -_kern_os_free -_kern_os_malloc -_kern_os_realloc +_kern_os_free:_kern_os_free_external +_kern_os_malloc:_kern_os_malloc_external +_kern_os_realloc:_kern_os_realloc_external _kext_assertions_enable _kmod_info:_invalid_kmod_info _kprintf @@ -695,11 +776,11 @@ _memset_s _ml_at_interrupt_context _ml_get_interrupts_enabled _ml_set_interrupts_enabled +_os_log_coprocessor +_os_log_coprocessor_register _os_log_create _os_log_debug_enabled _os_log_info_enabled -_os_release -_os_retain _os_ref_init_count_external:_os_ref_init_count_internal _os_ref_release_barrier_external:_os_ref_release_barrier_internal _os_ref_release_external:_os_ref_release_internal @@ -708,6 +789,8 @@ _os_ref_release_relaxed_external:_os_ref_release_relaxed_internal _os_ref_retain_external:_os_ref_retain_internal _os_ref_retain_locked_external:_os_ref_retain_locked_internal _os_ref_retain_try_external:_os_ref_retain_try_internal +_os_release +_os_retain _osrelease _ostype _page_mask @@ -716,11 +799,11 @@ _page_size _panic _printf _random_buf +_scnprintf _sha1_init:_SHA1Init _sha1_loop:_SHA1Update _sha1_result:_SHA1Final_r _snprintf -_scnprintf _sscanf _strcasecmp _strchr @@ -734,6 +817,7 @@ _strncmp _strncpy _strnlen _strprefix +_strsep _strtol _strtoq _strtoul @@ -758,48 +842,8 @@ _version_revision _version_stage _version_variant _vprintf -_vsnprintf _vscnprintf +_vsnprintf _vsscanf _zError _zlibVersion - -__Block_copy -__Block_release -__NSConcreteAutoBlock -__NSConcreteFinalizingBlock -__NSConcreteGlobalBlock -__NSConcreteMallocBlock -__NSConcreteStackBlock -__NSConcreteWeakBlockVariable -__ZN12OSCollection14iterateObjectsEPvPFbS0_P8OSObjectE -__ZN12OSCollection14iterateObjectsEU13block_pointerFbP8OSObjectE -__ZN12OSDictionary14iterateObjectsEPvPFbS0_PK8OSSymbolP8OSObjectE -__ZN12OSDictionary14iterateObjectsEU13block_pointerFbPK8OSSymbolP8OSObjectE -__ZN12OSSerializer9withBlockEU13block_pointerFbP11OSSerializeE - -__ZN15IODispatchQueue8DispatchE5IORPC -__ZN15IODispatchQueue9MetaClass8DispatchE5IORPC -__ZN15OSMetaClassBase8DispatchE5IORPC -__ZN15OSUserMetaClass8DispatchE5IORPC -__ZN16IODispatchSource8DispatchE5IORPC -__ZN16IODispatchSource9MetaClass8DispatchE5IORPC -__ZN18IOMemoryDescriptor8DispatchE5IORPC -__ZN18IOMemoryDescriptor9MetaClass8DispatchE5IORPC -__ZN24IOBufferMemoryDescriptor8DispatchE5IORPC -__ZN24IOBufferMemoryDescriptor9MetaClass8DispatchE5IORPC -__ZN25IOInterruptDispatchSource8DispatchE5IORPC -__ZN25IOInterruptDispatchSource9MetaClass8DispatchE5IORPC -__ZN8OSAction8DispatchE5IORPC -__ZN8OSAction9MetaClass8DispatchE5IORPC -__ZN8OSObject8DispatchE5IORPC -__ZN8OSObject9MetaClass8DispatchE5IORPC -__ZN9IOService8DispatchE5IORPC -__ZN9IOService9MetaClass8DispatchE5IORPC -__ZN8OSAction9metaClassE -__ZN15OSMetaClassBase6InvokeE5IORPC -__ZN8OSObject9_DispatchEPS_5IORPC -__ZN9IOService9_DispatchEPS_5IORPC - -__ZN8OSAction12GetReferenceEv -__ZN8OSAction6CreateEP8OSObjectyymPPS_ diff --git a/config/Libkern.x86_64.MacOSX.exports b/config/Libkern.x86_64.MacOSX.exports new file mode 100644 index 000000000..22fb540f1 --- /dev/null +++ b/config/Libkern.x86_64.MacOSX.exports @@ -0,0 +1,125 @@ +__ZN10OSIterator20_RESERVEDOSIterator0Ev +__ZN10OSIterator20_RESERVEDOSIterator1Ev +__ZN10OSIterator20_RESERVEDOSIterator2Ev +__ZN10OSIterator20_RESERVEDOSIterator3Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass0Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass1Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass2Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass3Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass4Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass5Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass6Ev +__ZN11OSMetaClass21_RESERVEDOSMetaClass7Ev +__ZN11OSSerialize21_RESERVEDOSSerialize0Ev +__ZN11OSSerialize21_RESERVEDOSSerialize1Ev +__ZN11OSSerialize21_RESERVEDOSSerialize2Ev +__ZN11OSSerialize21_RESERVEDOSSerialize3Ev +__ZN11OSSerialize21_RESERVEDOSSerialize4Ev +__ZN11OSSerialize21_RESERVEDOSSerialize5Ev +__ZN11OSSerialize21_RESERVEDOSSerialize6Ev +__ZN11OSSerialize21_RESERVEDOSSerialize7Ev +__ZN12OSCollection22_RESERVEDOSCollection2Ev +__ZN12OSCollection22_RESERVEDOSCollection3Ev +__ZN12OSCollection22_RESERVEDOSCollection4Ev +__ZN12OSCollection22_RESERVEDOSCollection5Ev +__ZN12OSCollection22_RESERVEDOSCollection6Ev +__ZN12OSCollection22_RESERVEDOSCollection7Ev +__ZN12OSDictionary22_RESERVEDOSDictionary0Ev +__ZN12OSDictionary22_RESERVEDOSDictionary1Ev +__ZN12OSDictionary22_RESERVEDOSDictionary2Ev +__ZN12OSDictionary22_RESERVEDOSDictionary3Ev +__ZN12OSDictionary22_RESERVEDOSDictionary4Ev +__ZN12OSDictionary22_RESERVEDOSDictionary5Ev +__ZN12OSDictionary22_RESERVEDOSDictionary6Ev +__ZN12OSDictionary22_RESERVEDOSDictionary7Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet0Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet1Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet2Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet3Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet4Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet5Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet6Ev +__ZN12OSOrderedSet22_RESERVEDOSOrderedSet7Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase4Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase5Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase6Ev +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase7Ev +__ZN5OSSet15_RESERVEDOSSet0Ev +__ZN5OSSet15_RESERVEDOSSet1Ev +__ZN5OSSet15_RESERVEDOSSet2Ev +__ZN5OSSet15_RESERVEDOSSet3Ev +__ZN5OSSet15_RESERVEDOSSet4Ev +__ZN5OSSet15_RESERVEDOSSet5Ev +__ZN5OSSet15_RESERVEDOSSet6Ev +__ZN5OSSet15_RESERVEDOSSet7Ev +__ZN6OSData16_RESERVEDOSData1Ev +__ZN6OSData16_RESERVEDOSData2Ev +__ZN6OSData16_RESERVEDOSData3Ev +__ZN6OSData16_RESERVEDOSData4Ev +__ZN6OSData16_RESERVEDOSData5Ev +__ZN6OSData16_RESERVEDOSData6Ev +__ZN6OSData16_RESERVEDOSData7Ev +__ZN7OSArray17_RESERVEDOSArray0Ev +__ZN7OSArray17_RESERVEDOSArray1Ev +__ZN7OSArray17_RESERVEDOSArray2Ev +__ZN7OSArray17_RESERVEDOSArray3Ev +__ZN7OSArray17_RESERVEDOSArray4Ev +__ZN7OSArray17_RESERVEDOSArray5Ev +__ZN7OSArray17_RESERVEDOSArray6Ev +__ZN7OSArray17_RESERVEDOSArray7Ev +__ZN8OSNumber18_RESERVEDOSNumber0Ev +__ZN8OSNumber18_RESERVEDOSNumber1Ev +__ZN8OSNumber18_RESERVEDOSNumber2Ev +__ZN8OSNumber18_RESERVEDOSNumber3Ev +__ZN8OSNumber18_RESERVEDOSNumber4Ev +__ZN8OSNumber18_RESERVEDOSNumber5Ev +__ZN8OSNumber18_RESERVEDOSNumber6Ev +__ZN8OSNumber18_RESERVEDOSNumber7Ev +__ZN8OSObject18_RESERVEDOSObject0Ev +__ZN8OSObject18_RESERVEDOSObject1Ev +__ZN8OSObject18_RESERVEDOSObject2Ev +__ZN8OSObject18_RESERVEDOSObject3Ev +__ZN8OSObject18_RESERVEDOSObject4Ev +__ZN8OSObject18_RESERVEDOSObject5Ev +__ZN8OSObject18_RESERVEDOSObject6Ev +__ZN8OSObject18_RESERVEDOSObject7Ev +__ZN8OSObject18_RESERVEDOSObject8Ev +__ZN8OSObject18_RESERVEDOSObject9Ev +__ZN8OSObject19_RESERVEDOSObject10Ev +__ZN8OSObject19_RESERVEDOSObject11Ev +__ZN8OSObject19_RESERVEDOSObject12Ev +__ZN8OSObject19_RESERVEDOSObject13Ev +__ZN8OSObject19_RESERVEDOSObject14Ev +__ZN8OSObject19_RESERVEDOSObject15Ev +__ZN8OSString18_RESERVEDOSString0Ev +__ZN8OSString18_RESERVEDOSString1Ev +__ZN8OSString18_RESERVEDOSString2Ev +__ZN8OSString18_RESERVEDOSString3Ev +__ZN8OSString18_RESERVEDOSString4Ev +__ZN8OSString18_RESERVEDOSString5Ev +__ZN8OSString18_RESERVEDOSString6Ev +__ZN8OSString18_RESERVEDOSString7Ev +__ZN8OSString18_RESERVEDOSString8Ev +__ZN8OSString18_RESERVEDOSString9Ev +__ZN8OSString19_RESERVEDOSString10Ev +__ZN8OSString19_RESERVEDOSString11Ev +__ZN8OSString19_RESERVEDOSString12Ev +__ZN8OSString19_RESERVEDOSString13Ev +__ZN8OSString19_RESERVEDOSString14Ev +__ZN8OSString19_RESERVEDOSString15Ev +__ZN8OSSymbol18_RESERVEDOSSymbol0Ev +__ZN8OSSymbol18_RESERVEDOSSymbol1Ev +__ZN8OSSymbol18_RESERVEDOSSymbol2Ev +__ZN8OSSymbol18_RESERVEDOSSymbol3Ev +__ZN8OSSymbol18_RESERVEDOSSymbol4Ev +__ZN8OSSymbol18_RESERVEDOSSymbol5Ev +__ZN8OSSymbol18_RESERVEDOSSymbol6Ev +__ZN8OSSymbol18_RESERVEDOSSymbol7Ev +__ZN9OSBoolean19_RESERVEDOSBoolean0Ev +__ZN9OSBoolean19_RESERVEDOSBoolean1Ev +__ZN9OSBoolean19_RESERVEDOSBoolean2Ev +__ZN9OSBoolean19_RESERVEDOSBoolean3Ev +__ZN9OSBoolean19_RESERVEDOSBoolean4Ev +__ZN9OSBoolean19_RESERVEDOSBoolean5Ev +__ZN9OSBoolean19_RESERVEDOSBoolean6Ev +__ZN9OSBoolean19_RESERVEDOSBoolean7Ev diff --git a/config/Libkern.x86_64.exports b/config/Libkern.x86_64.exports index 48690ba94..95b57f43b 100644 --- a/config/Libkern.x86_64.exports +++ b/config/Libkern.x86_64.exports @@ -1,133 +1,10 @@ _OSAddAtomic64 _OSCompareAndSwap64 -__ZN10OSIterator20_RESERVEDOSIterator0Ev -__ZN10OSIterator20_RESERVEDOSIterator1Ev -__ZN10OSIterator20_RESERVEDOSIterator2Ev -__ZN10OSIterator20_RESERVEDOSIterator3Ev -__ZN11OSMetaClass21_RESERVEDOSMetaClass0Ev -__ZN11OSMetaClass21_RESERVEDOSMetaClass1Ev -__ZN11OSMetaClass21_RESERVEDOSMetaClass2Ev -__ZN11OSMetaClass21_RESERVEDOSMetaClass3Ev -__ZN11OSMetaClass21_RESERVEDOSMetaClass4Ev -__ZN11OSMetaClass21_RESERVEDOSMetaClass5Ev -__ZN11OSMetaClass21_RESERVEDOSMetaClass6Ev -__ZN11OSMetaClass21_RESERVEDOSMetaClass7Ev -__ZN11OSSerialize21_RESERVEDOSSerialize0Ev -__ZN11OSSerialize21_RESERVEDOSSerialize1Ev -__ZN11OSSerialize21_RESERVEDOSSerialize2Ev -__ZN11OSSerialize21_RESERVEDOSSerialize3Ev -__ZN11OSSerialize21_RESERVEDOSSerialize4Ev -__ZN11OSSerialize21_RESERVEDOSSerialize5Ev -__ZN11OSSerialize21_RESERVEDOSSerialize6Ev -__ZN11OSSerialize21_RESERVEDOSSerialize7Ev -__ZN12OSCollection22_RESERVEDOSCollection2Ev -__ZN12OSCollection22_RESERVEDOSCollection3Ev -__ZN12OSCollection22_RESERVEDOSCollection4Ev -__ZN12OSCollection22_RESERVEDOSCollection5Ev -__ZN12OSCollection22_RESERVEDOSCollection6Ev -__ZN12OSCollection22_RESERVEDOSCollection7Ev -__ZN12OSDictionary22_RESERVEDOSDictionary0Ev -__ZN12OSDictionary22_RESERVEDOSDictionary1Ev -__ZN12OSDictionary22_RESERVEDOSDictionary2Ev -__ZN12OSDictionary22_RESERVEDOSDictionary3Ev -__ZN12OSDictionary22_RESERVEDOSDictionary4Ev -__ZN12OSDictionary22_RESERVEDOSDictionary5Ev -__ZN12OSDictionary22_RESERVEDOSDictionary6Ev -__ZN12OSDictionary22_RESERVEDOSDictionary7Ev __ZN12OSOrderedSet12withCapacityEjPFiPK15OSMetaClassBaseS2_PvES3_ __ZN12OSOrderedSet16initWithCapacityEjPFiPK15OSMetaClassBaseS2_PvES3_ -__ZN12OSOrderedSet22_RESERVEDOSOrderedSet0Ev -__ZN12OSOrderedSet22_RESERVEDOSOrderedSet1Ev -__ZN12OSOrderedSet22_RESERVEDOSOrderedSet2Ev -__ZN12OSOrderedSet22_RESERVEDOSOrderedSet3Ev -__ZN12OSOrderedSet22_RESERVEDOSOrderedSet4Ev -__ZN12OSOrderedSet22_RESERVEDOSOrderedSet5Ev -__ZN12OSOrderedSet22_RESERVEDOSOrderedSet6Ev -__ZN12OSOrderedSet22_RESERVEDOSOrderedSet7Ev -__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase4Ev -__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase5Ev -__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase6Ev -__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase7Ev -__ZN5OSSet15_RESERVEDOSSet0Ev -__ZN5OSSet15_RESERVEDOSSet1Ev -__ZN5OSSet15_RESERVEDOSSet2Ev -__ZN5OSSet15_RESERVEDOSSet3Ev -__ZN5OSSet15_RESERVEDOSSet4Ev -__ZN5OSSet15_RESERVEDOSSet5Ev -__ZN5OSSet15_RESERVEDOSSet6Ev -__ZN5OSSet15_RESERVEDOSSet7Ev -__ZN6OSData16_RESERVEDOSData1Ev -__ZN6OSData16_RESERVEDOSData2Ev -__ZN6OSData16_RESERVEDOSData3Ev -__ZN6OSData16_RESERVEDOSData4Ev -__ZN6OSData16_RESERVEDOSData5Ev -__ZN6OSData16_RESERVEDOSData6Ev -__ZN6OSData16_RESERVEDOSData7Ev -__ZN7OSArray17_RESERVEDOSArray0Ev -__ZN7OSArray17_RESERVEDOSArray1Ev -__ZN7OSArray17_RESERVEDOSArray2Ev -__ZN7OSArray17_RESERVEDOSArray3Ev -__ZN7OSArray17_RESERVEDOSArray4Ev -__ZN7OSArray17_RESERVEDOSArray5Ev -__ZN7OSArray17_RESERVEDOSArray6Ev -__ZN7OSArray17_RESERVEDOSArray7Ev -__ZN8OSNumber18_RESERVEDOSNumber0Ev -__ZN8OSNumber18_RESERVEDOSNumber1Ev -__ZN8OSNumber18_RESERVEDOSNumber2Ev -__ZN8OSNumber18_RESERVEDOSNumber3Ev -__ZN8OSNumber18_RESERVEDOSNumber4Ev -__ZN8OSNumber18_RESERVEDOSNumber5Ev -__ZN8OSNumber18_RESERVEDOSNumber6Ev -__ZN8OSNumber18_RESERVEDOSNumber7Ev -__ZN8OSObject18_RESERVEDOSObject0Ev -__ZN8OSObject18_RESERVEDOSObject1Ev -__ZN8OSObject18_RESERVEDOSObject2Ev -__ZN8OSObject18_RESERVEDOSObject3Ev -__ZN8OSObject18_RESERVEDOSObject4Ev -__ZN8OSObject18_RESERVEDOSObject5Ev -__ZN8OSObject18_RESERVEDOSObject6Ev -__ZN8OSObject18_RESERVEDOSObject7Ev -__ZN8OSObject18_RESERVEDOSObject8Ev -__ZN8OSObject18_RESERVEDOSObject9Ev -__ZN8OSObject19_RESERVEDOSObject10Ev -__ZN8OSObject19_RESERVEDOSObject11Ev -__ZN8OSObject19_RESERVEDOSObject12Ev -__ZN8OSObject19_RESERVEDOSObject13Ev -__ZN8OSObject19_RESERVEDOSObject14Ev -__ZN8OSObject19_RESERVEDOSObject15Ev -__ZN8OSString18_RESERVEDOSString0Ev -__ZN8OSString18_RESERVEDOSString1Ev -__ZN8OSString18_RESERVEDOSString2Ev -__ZN8OSString18_RESERVEDOSString3Ev -__ZN8OSString18_RESERVEDOSString4Ev -__ZN8OSString18_RESERVEDOSString5Ev -__ZN8OSString18_RESERVEDOSString6Ev -__ZN8OSString18_RESERVEDOSString7Ev -__ZN8OSString18_RESERVEDOSString8Ev -__ZN8OSString18_RESERVEDOSString9Ev -__ZN8OSString19_RESERVEDOSString10Ev -__ZN8OSString19_RESERVEDOSString11Ev -__ZN8OSString19_RESERVEDOSString12Ev -__ZN8OSString19_RESERVEDOSString13Ev -__ZN8OSString19_RESERVEDOSString14Ev -__ZN8OSString19_RESERVEDOSString15Ev -__ZN8OSSymbol18_RESERVEDOSSymbol0Ev -__ZN8OSSymbol18_RESERVEDOSSymbol1Ev -__ZN8OSSymbol18_RESERVEDOSSymbol2Ev -__ZN8OSSymbol18_RESERVEDOSSymbol3Ev -__ZN8OSSymbol18_RESERVEDOSSymbol4Ev -__ZN8OSSymbol18_RESERVEDOSSymbol5Ev -__ZN8OSSymbol18_RESERVEDOSSymbol6Ev -__ZN8OSSymbol18_RESERVEDOSSymbol7Ev -__ZN9OSBoolean19_RESERVEDOSBoolean0Ev -__ZN9OSBoolean19_RESERVEDOSBoolean1Ev -__ZN9OSBoolean19_RESERVEDOSBoolean2Ev -__ZN9OSBoolean19_RESERVEDOSBoolean3Ev -__ZN9OSBoolean19_RESERVEDOSBoolean4Ev -__ZN9OSBoolean19_RESERVEDOSBoolean5Ev -__ZN9OSBoolean19_RESERVEDOSBoolean6Ev -__ZN9OSBoolean19_RESERVEDOSBoolean7Ev _sprintf _strcat _strcpy _vsprintf + +__ZN15OSMetaClassBase25_RESERVEDOSMetaClassBase3Ev:__ZN15OSMetaClassBase8DispatchE5IORPC diff --git a/config/MACFramework.exports b/config/MACFramework.exports index afe5c8e1d..ee11bf004 100644 --- a/config/MACFramework.exports +++ b/config/MACFramework.exports @@ -3,32 +3,7 @@ _mac_policy_unregister _mac_vnop_getxattr _mac_vnop_setxattr _mac_vnop_removexattr -_mac_file_getxattr -_mac_file_setxattr -_mac_file_removexattr -_mac_label_get -_mac_label_set - _mac_audit_text - -_mac_iokit_check_hid_control -_mac_mount_check_snapshot_mount -_mac_vnode_check_trigger_resolve - -_sbuf_cat -_sbuf_data -_sbuf_delete -_sbuf_finish -_sbuf_len -_sbuf_new -_sbuf_overflowed -_sbuf_printf -_sbuf_putc -_sbuf_vprintf -_strsep -_sysctl__security_mac_children _VNOP_SETXATTR _VNOP_GETXATTR -_mac_vnode_label_allocate -_mac_vnode_label_get -_mac_vnode_label_set +_mac_schedule_userret diff --git a/config/MACFramework.x86_64.exports b/config/MACFramework.x86_64.exports index b296564fb..06f0b9286 100644 --- a/config/MACFramework.x86_64.exports +++ b/config/MACFramework.x86_64.exports @@ -1,10 +1,2 @@ _kau_will_audit _mac_do_machexc -_mac_kalloc -_mac_kalloc_noblock -_mac_kfree -_mac_mbuf_alloc -_mac_mbuf_free -_mac_schedule_userret -_mac_unwire -_mac_wire diff --git a/config/MASTER b/config/MASTER index f0900b345..d429f82a2 100644 --- a/config/MASTER +++ b/config/MASTER @@ -84,7 +84,6 @@ options DUMMYNET # dummynet support # options TRAFFIC_MGT # traffic management support # options MULTICAST # Internet Protocol Class-D $ options TCPDEBUG # TCP debug # -options TCP_DROP_SYNFIN # Drop TCP packets with SYN+FIN set # options ICMP_BANDLIM # ICMP bandwidth limiting sysctl options IFNET_INPUT_SANITY_CHK # allow dlil/ifnet input sanity check # options MULTIPATH # Multipath domain # @@ -106,11 +105,11 @@ options OLD_SEMWAIT_SIGNAL # old semwait_signal handler # # 4.4 general kernel # -options SOCKETS # socket support # +options SOCKETS # socket support # options DIAGNOSTIC # diagnostics # options PROFILE # kernel profiling # options SENDFILE # sendfile # -options NETWORKING # networking layer # +options NETWORKING # networking layer # options CONFIG_FSE # file system events # options CONFIG_IMAGEBOOT # local image boot # options CONFIG_LOCKERBOOT # locker boot # @@ -130,6 +129,7 @@ options FDESC # fdesc_fs support # options DEVFS # devfs support # options ROUTEFS # routefs support # options NULLFS # nullfs support # +options BINDFS # bindfs support # options FS_COMPRESSION # fs compression # options CONFIG_DEV_KMEM # /dev/kmem device for reading KVA # @@ -149,7 +149,9 @@ options CONFIG_MNT_ROOTSNAP # allow rooting from snapshot # options CONFIG_FIRMLINKS # support "firmlinks" # options CONFIG_MOUNT_VM # mount VM volume on startup # +options CONFIG_MOUNT_PREBOOTRECOVERY # mount Preboot and/or Recovery volume on startup # options CONFIG_DATALESS_FILES # support dataless file materialization # +options CONFIG_BASESYSTEMROOT # mount BaseSystem as initial root filesystem on some kinds of startup # # # NFS support @@ -168,13 +170,8 @@ profile # build a profiling kernel # # # IPv6 Support # -options "INET6" # kernel IPv6 Support # -options IPV6SEND # Secure Neighbor Discovery # options IPSEC # IP security # options IPSEC_ESP # IP security # -options "IPV6FIREWALL" # IPv6 Firewall Feature # -options "IPV6FIREWALL_DEFAULT_TO_ACCEPT" #IPv6 Firewall Feature # -#options "IPV6FIREWALL_VERBOSE" #IPv6 Firewall Feature # pseudo-device gif 1 # pseudo-device dummy 2 # @@ -296,10 +293,10 @@ options CONFIG_MFCTBLSIZ=16 # # # configurable kernel message buffer size # -options CONFIG_MSG_BSIZE_REL=4096 # -options CONFIG_MSG_BSIZE_DEV=4096 # -options CONFIG_MSG_BSIZE_REL=16384 # -options CONFIG_MSG_BSIZE_DEV=131072 # +options CONFIG_MSG_BSIZE_REL=4096 # +options CONFIG_MSG_BSIZE_DEV=4096 # +options CONFIG_MSG_BSIZE_REL=16384 # +options CONFIG_MSG_BSIZE_DEV=131072 # options CONFIG_MSG_BSIZE=CONFIG_MSG_BSIZE_REL # options CONFIG_MSG_BSIZE=CONFIG_MSG_BSIZE_DEV # @@ -317,12 +314,17 @@ options CONFIG_IPC_TABLE_ENTRIES_STEPS=256 # 300714 entries # options CONFIG_NO_KPRINTF_STRINGS # +# support vsprintf (deprecated in favor of vsnprintf) +options CONFIG_VSPRINTF # + # # configurable kernel - general switch to say we are building for an # embedded device # options CONFIG_EMBEDDED # +options CONFIG_ARROW # + # support dynamic signing of code # @@ -332,6 +334,14 @@ options CONFIG_DYNAMIC_CODE_SIGNING # # options CONFIG_ENFORCE_LIBRARY_VALIDATION # +# support loading a second static trust cache +# +options CONFIG_SECOND_STATIC_TRUST_CACHE # + +# support supplemental signatures +# +options CONFIG_SUPPLEMENTAL_SIGNATURES # + # # code decryption... used on embedded for app protection, DSMOS on desktop # @@ -355,6 +365,10 @@ options CONFIG_VPS_DYNAMIC_PRIO # # options CONFIG_MEMORYSTATUS # +# +# enable per-process dirty-status tracking +# +options CONFIG_DIRTYSTATUS_TRACKING # # # enable jetsam - used on embedded # @@ -380,6 +394,11 @@ options CONFIG_FREEZE # options CHECK_CS_VALIDATION_BITMAP # +# +# enable physical writes accounting +# +options CONFIG_PHYS_WRITE_ACCT # + # # enable detectiion of file cache thrashing - used on platforms with # dynamic VM compression enabled @@ -440,11 +459,11 @@ options CONFIG_VNGUARD # # # Ethernet (ARP) # -pseudo-device ether # +pseudo-device ether # # # Network loopback device # -pseudo-device loop # +pseudo-device loop # # # UCB pseudo terminal service # @@ -472,7 +491,7 @@ pseudo-device mdevdevice 1 init mdevinit # # packet filter device # -pseudo-device bpfilter 4 init bpf_init # +pseudo-device bpfilter 4 init bpf_init # # # fsevents device @@ -498,7 +517,7 @@ options IOKITCPP # C++ implementation # options IOKITSTATS # IOKit statistics # options IOTRACKING # IOKit tracking # options CONFIG_SLEEP # # -options CONFIG_MAX_THREADS=64 # IOConfigThread threads +options CONFIG_MAX_THREADS=500 # IOConfigThread threads options NO_KEXTD # options NO_KERNEL_HID # @@ -536,14 +555,20 @@ options PERSONA_DEBUG # Persona debugging # options CONFIG_MACF # Mandatory Access Control Framework # options CONFIG_MACF_SOCKET_SUBSET # MAC socket subest (no labels) # -#options CONFIG_MACF_SOCKET # MAC socket labels # -#options CONFIG_MACF_NET # mbuf # #options CONFIG_MACF_DEBUG # debug # options CONFIG_AUDIT # Kernel auditing # options CONFIG_ARCADE # Arcade validation support # +options CONFIG_SETUID # setuid/setgid support # + +options CONFIG_SECURE_BSD_ROOT # secure BSD root # + +options CONFIG_KAS_INFO # kas_info support # + +options CONFIG_ZALLOC_SEQUESTER # Sequester VA for zones # + # # MACH configuration options. # @@ -581,12 +606,7 @@ options MACH_VM_DEBUG # # # hardclock device driver. # options MACH_MP_DEBUG # # -# -# ZONE_DEBUG keeps track of all zalloc()ed elements to perform further -# operations on each element. -# -options ZONE_DEBUG # # -options CONFIG_ZCACHE #Enable per-cpu caching for zones # +options CONFIG_ZCACHE # Enable per-cpu caching for zones # options CONFIG_ZLEAKS # Live zone leak debugging # # @@ -699,6 +719,7 @@ options CONFIG_SCHED_GRRR_CORE # options CONFIG_SCHED_MULTIQ # options CONFIG_SCHED_TIMESHARE_CORE # options CONFIG_CLUTCH # +options CONFIG_SCHED_AUTO_JOIN # options CONFIG_SCHED_IDLE_IN_PLACE # options CONFIG_SCHED_SFI # @@ -719,6 +740,7 @@ options MACH_KDP # KDP # options CONFIG_SERIAL_KDP # KDP over serial # options CONFIG_KDP_INTERACTIVE_DEBUGGING # +options CONFIG_TASKWATCH # # Kernel Power On Self Tests # @@ -747,6 +769,7 @@ options CONFIG_SYSDIAGNOSE # # Configurable Security Restrictions options CONFIG_CSR # +options CONFIG_CSR_FROM_DT # # # Console options @@ -776,6 +799,7 @@ options CONFIG_MACH_BRIDGE_RECV_TIME # # options CONFIG_32BIT_TELEMETRY # # options CONFIG_QUIESCE_COUNTER # Support for _COMM_PAGE_CPU_QUIESCENT_COUNTER # +options CONFIG_ARM_PFZ # Support for PFZ on ARM # # # Sanitizers @@ -784,4 +808,17 @@ options CONFIG_KASAN # options CONFIG_UBSAN # options CONFIG_KSANCOV # +# dark boot support +options CONFIG_DARKBOOT # + +# support for processes delaying idle sleep for pending IO +options CONFIG_DELAY_IDLE_SLEEP # + +# support for storing a 64-bit user supplied value in the proc structure +options CONFIG_PROC_UDATA_STORAGE # + pseudo-device ksancov 1 init ksancov_init_dev # + +# debug instrumentation to catch code that leaves interrupts masked +# for an excessive period of time +options INTERRUPT_MASKED_DEBUG # diff --git a/config/MASTER.arm b/config/MASTER.arm index 6cc8a1b52..2b7602e1f 100644 --- a/config/MASTER.arm +++ b/config/MASTER.arm @@ -16,7 +16,7 @@ # Standard Apple OS Configurations: # -------- ----- -- --------------- # -# KERNEL_BASE = [ arm xsmall config_embedded config_enforce_signed_code config_zcache ] +# KERNEL_BASE = [ arm xsmall msgb_small config_embedded config_enforce_signed_code config_zcache config_darkboot ] # KERNEL_RELEASE = [ KERNEL_BASE ] # KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug ] # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug ] @@ -24,12 +24,12 @@ # BSD_RELEASE = [ BSD_BASE no_printf_str no_kprintf_str secure_kernel ] # BSD_DEV = [ BSD_BASE config_imgsrc_access config_lockerboot config_coredump pgo config_vnguard ] # BSD_DEBUG = [ BSD_BASE config_imgsrc_access config_lockerboot config_coredump pgo config_vnguard ] -# FILESYS_BASE = [ devfs fifo fs_compression config_mnt_rootsnap config_protect config_fse routefs namedstreams ] +# FILESYS_BASE = [ devfs fifo fs_compression config_mnt_rootsnap config_protect config_fse routefs namedstreams bindfs] # FILESYS_RELEASE= [ FILESYS_BASE ] # FILESYS_DEV = [ FILESYS_BASE fdesc ] # FILESYS_DEBUG = [ FILESYS_BASE fdesc ] # NFS_DEV = [ nfsclient nfsserver config_nfs_gss ] -# NETWORKING = [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto if_fake sixlowpan ] +# NETWORKING = [ inet bpfilter if_bridge traffic_mgt dummynet ah_all_crypto if_fake sixlowpan ] # NETWORKING_RELEASE = [ NETWORKING ] # NETWORKING_DEV = [ NETWORKING_RELEASE if_headless ] # NETWORKING_DEBUG = [ NETWORKING_DEV ] @@ -50,22 +50,25 @@ # PERF_DBG_RELEASE=[ PERF_DBG_BASE ist_kdebug ] # PERF_DBG_DEV = [ PERF_DBG_BASE config_dtrace zleaks kdp_interactive_debugging interrupt_masked_debug ] # PERF_DBG_DEBUG = [ PERF_DBG_BASE config_dtrace zleaks kdp_interactive_debugging interrupt_masked_debug ] -# MACH_BASE = [ mach slidable vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_library_validation config_iosched config_telemetry config_sysdiagnose config_quiesce_counter ] +# MACH_BASE = [ mach slidable vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_library_validation config_iosched config_telemetry config_sysdiagnose config_quiesce_counter phys_write_acct ] # MACH_RELEASE = [ MACH_BASE config_skip_precise_user_kernel_time debugger_for_zone_info ] # MACH_DEV = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max ] # MACH_DEBUG = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max importance_debug ] -# SCHED_BASE = [ config_sched_traditional config_sched_multiq config_clutch ] +# SCHED_BASE = [ config_sched_traditional config_sched_multiq config_clutch config_taskwatch ] # SCHED_RELEASE = [ SCHED_BASE ] # SCHED_DEV = [ SCHED_BASE ] # SCHED_DEBUG = [ SCHED_BASE config_sched_grrr config_sched_proto ] -# VM_BASE = [ vm_pressure_events jetsam memorystatus config_code_decryption config_cs_validation_bitmap ] +# VM_BASE = [ vm_pressure_events jetsam memorystatus config_code_decryption config_cs_validation_bitmap dirtystatus_tracking ] # VM_RELEASE = [ VM_BASE ] # VM_DEV = [ VM_BASE dynamic_codesigning ] # VM_DEBUG = [ VM_BASE dynamic_codesigning ] -# SECURITY = [ config_macf ] -# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF_RELEASE MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ] -# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS_DEV SKYWALK_DEV NETWORKING_DEV PF_DEV MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ] -# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF_DEBUG MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ] +# SECURITY_BASE = [ config_macf config_secure_bsd_root ] +# SECURITY_RELEASE = [ SECURITY_BASE ] +# SECURITY_DEV = [ SECURITY_BASE config_setuid config_kas_info ] +# SECURITY_DEBUG = [ SECURITY_BASE config_setuid config_kas_info ] +# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF_RELEASE MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY_RELEASE ] +# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS_DEV SKYWALK_DEV NETWORKING_DEV PF_DEV MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY_DEV ] +# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF_DEBUG MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY_DEBUG ] # ###################################################################### # @@ -83,5 +86,3 @@ options CONFIG_VNODES=1024 # options CONFIG_FREEZE_SUSPENDED_MIN=4 # options CONFIG_MACH_APPROXIMATE_TIME - -options INTERRUPT_MASKED_DEBUG=1 # # diff --git a/config/MASTER.arm64 b/config/MASTER.arm64 index 110f6a6d6..bfeb9956f 100644 --- a/config/MASTER.arm64 +++ b/config/MASTER.arm64 @@ -16,7 +16,7 @@ # Standard Apple OS Configurations: # -------- ----- -- --------------- # -# KERNEL_BASE = [ arm64 xsmall config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache ] +# KERNEL_BASE = [ arm64 xsmall msgb_small config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ] # KERNEL_RELEASE = [ KERNEL_BASE ] # KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ] # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ] @@ -24,14 +24,14 @@ # BSD_RELEASE = [ BSD_BASE no_printf_str no_kprintf_str secure_kernel ] # BSD_DEV = [ BSD_BASE config_netboot config_imgsrc_access config_lockerboot config_coredump pgo config_vnguard ] # BSD_DEBUG = [ BSD_BASE config_netboot config_imgsrc_access config_lockerboot config_coredump pgo config_vnguard ] -# FILESYS_BASE = [ devfs fifo fs_compression config_protect config_mnt_rootsnap config_triggers config_fse routefs namedstreams config_dataless_files ] +# FILESYS_BASE = [ devfs fifo fs_compression config_protect config_mnt_rootsnap config_triggers config_fse routefs namedstreams config_dataless_files bindfs] # FILESYS_RELEASE= [ FILESYS_BASE ] # FILESYS_DEV = [ FILESYS_BASE fdesc ] # FILESYS_DEBUG = [ FILESYS_BASE fdesc ] # NFS_DEV = [ nfsclient nfsserver config_nfs_gss ] # NFS_RELEASE = [ nfsclient ] # NFS_DEBUG = [ nfsclient config_nfs_gss ] -# NETWORKING = [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto if_fake sixlowpan ] +# NETWORKING = [ inet bpfilter if_bridge traffic_mgt dummynet ah_all_crypto if_fake sixlowpan ] # NETWORKING_RELEASE = [ NETWORKING ] # NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler if_headless ] # NETWORKING_DEBUG = [ NETWORKING_DEV ] @@ -52,22 +52,25 @@ # PERF_DBG_RELEASE=[ PERF_DBG_BASE ist_kdebug ] # PERF_DBG_DEV = [ PERF_DBG_BASE config_dtrace lock_stats zleaks kdp_interactive_debugging alternate_debugger interrupt_masked_debug ] # PERF_DBG_DEBUG = [ PERF_DBG_BASE config_dtrace lock_stats zleaks kdp_interactive_debugging alternate_debugger interrupt_masked_debug ] -# MACH_BASE = [ mach slidable config_ecc_logging vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_iosched config_library_validation config_sysdiagnose config_telemetry config_mach_bridge_recv_time config_quiesce_counter ] +# MACH_BASE = [ mach slidable config_ecc_logging vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_iosched config_library_validation config_sysdiagnose config_telemetry config_mach_bridge_recv_time config_quiesce_counter phys_write_acct ] # MACH_RELEASE = [ MACH_BASE config_skip_precise_user_kernel_time debugger_for_zone_info ] # MACH_DEV = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max ] # MACH_DEBUG = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max importance_debug ] -# SCHED_BASE = [ config_sched_traditional config_sched_multiq config_sched_deferred_ast config_clutch ] +# SCHED_BASE = [ config_sched_traditional config_sched_multiq config_sched_deferred_ast config_clutch config_sched_sfi config_taskwatch ] # SCHED_RELEASE = [ SCHED_BASE ] # SCHED_DEV = [ SCHED_BASE ] # SCHED_DEBUG = [ SCHED_BASE config_sched_grrr config_sched_proto ] -# VM_BASE = [ vps_dynamic_prio vm_pressure_events jetsam freeze memorystatus config_code_decryption phantom_cache config_secluded_memory config_background_queue config_cs_validation_bitmap] +# VM_BASE = [ vps_dynamic_prio vm_pressure_events jetsam memorystatus config_code_decryption phantom_cache config_secluded_memory config_background_queue config_cs_validation_bitmap dirtystatus_tracking ] # VM_RELEASE = [ VM_BASE ] # VM_DEV = [ VM_BASE dynamic_codesigning ] # VM_DEBUG = [ VM_BASE dynamic_codesigning ] -# SECURITY = [ config_macf kernel_integrity ] -# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF_RELEASE MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ] -# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS_DEV SKYWALK_DEV NETWORKING_DEV PF_DEV MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ] -# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF_DEBUG MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ] +# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ] +# SECURITY_RELEASE = [ SECURITY_BASE ] +# SECURITY_DEV = [ SECURITY_BASE config_setuid config_kas_info ] +# SECURITY_DEBUG = [ SECURITY_BASE config_setuid config_kas_info ] +# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF_RELEASE MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY_RELEASE ] +# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS_DEV SKYWALK_DEV NETWORKING_DEV PF_DEV MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY_DEV ] +# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF_DEBUG MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY_DEBUG ] # KASAN = [ DEVELOPMENT config_kasan config_ubsan config_ksancov ] # ###################################################################### @@ -88,8 +91,6 @@ options CONFIG_MACH_APPROXIMATE_TIME options CONFIG_KERNEL_INTEGRITY # -options INTERRUPT_MASKED_DEBUG=1 # # - options CONFIG_PGTRACE # options CONFIG_PGTRACE_NONKEXT # pseudo-device pgtrace 1 init pgtrace_dev_init # diff --git a/config/MASTER.arm64.BridgeOS b/config/MASTER.arm64.BridgeOS new file mode 100644 index 000000000..c4bae4b76 --- /dev/null +++ b/config/MASTER.arm64.BridgeOS @@ -0,0 +1,96 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# Copyright 2001-2018 Apple Inc. +# +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +###################################################################### +# +# Master Apple configuration file (see the master machine independent +# configuration file for a description of the file format). +# +###################################################################### +# +# Standard Apple OS Configurations: +# -------- ----- -- --------------- +# +# KERNEL_BASE = [ arm64 xsmall msgb_large config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ] +# KERNEL_RELEASE = [ KERNEL_BASE ] +# KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ] +# KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ] +# BSD_BASE = [ mach_bsd config_workqueue psynch config_proc_uuid_policy config_personas config_imageboot config_imageboot_img4 ] +# BSD_RELEASE = [ BSD_BASE no_printf_str no_kprintf_str secure_kernel ] +# BSD_DEV = [ BSD_BASE config_netboot config_imgsrc_access config_lockerboot config_coredump pgo config_vnguard ] +# BSD_DEBUG = [ BSD_BASE config_netboot config_imgsrc_access config_lockerboot config_coredump pgo config_vnguard ] +# FILESYS_BASE = [ devfs fifo fs_compression config_protect config_mnt_rootsnap config_triggers config_fse routefs namedstreams config_dataless_files bindfs] +# FILESYS_RELEASE= [ FILESYS_BASE ] +# FILESYS_DEV = [ FILESYS_BASE fdesc ] +# FILESYS_DEBUG = [ FILESYS_BASE fdesc ] +# NFS_DEV = [ nfsclient nfsserver config_nfs_gss ] +# NFS_RELEASE = [ nfsclient ] +# NFS_DEBUG = [ nfsclient config_nfs_gss ] +# NETWORKING = [ inet tcpdrop_synfin bpfilter if_bridge traffic_mgt dummynet ah_all_crypto if_fake sixlowpan ] +# NETWORKING_RELEASE = [ NETWORKING ] +# NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler if_headless ] +# NETWORKING_DEBUG = [ NETWORKING_DEV ] +# VPN = [ ipsec flow_divert necp content_filter ] +# PF_RELEASE = [ pf ] +# PF_DEV = [ PF_RELEASE pflog ] +# PF_DEBUG = [ PF_DEV ] +# MULTIPATH = [ multipath mptcp ] +# IOKIT_BASE = [ iokit iokitcpp no_kextd no_kernel_hid config_sleep ] +# IOKIT_RELEASE = [ IOKIT_BASE ] +# IOKIT_DEV = [ IOKIT_BASE iokitstats iotracking ] +# IOKIT_DEBUG = [ IOKIT_BASE iokitstats iotracking] +# LIBKERN_BASE = [ libkerncpp config_blocks config_kec_fips zlib crypto_sha2 config_img4 ] +# LIBKERN_RELEASE =[ LIBKERN_BASE ] +# LIBKERN_DEV = [ LIBKERN_BASE iotracking ] +# LIBKERN_DEBUG = [ LIBKERN_BASE iotracking ] +# PERF_DBG_BASE = [ mach_kdp config_serial_kdp MONOTONIC_BASE kperf kpc ] +# PERF_DBG_RELEASE=[ PERF_DBG_BASE ist_kdebug ] +# PERF_DBG_DEV = [ PERF_DBG_BASE config_dtrace lock_stats zleaks kdp_interactive_debugging alternate_debugger interrupt_masked_debug ] +# PERF_DBG_DEBUG = [ PERF_DBG_BASE config_dtrace lock_stats zleaks kdp_interactive_debugging alternate_debugger interrupt_masked_debug ] +# MACH_BASE = [ mach slidable config_ecc_logging vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_iosched config_library_validation config_sysdiagnose config_telemetry config_mach_bridge_recv_time config_quiesce_counter ] +# MACH_RELEASE = [ MACH_BASE config_skip_precise_user_kernel_time debugger_for_zone_info ] +# MACH_DEV = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max ] +# MACH_DEBUG = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max importance_debug ] +# SCHED_BASE = [ config_sched_traditional config_sched_multiq config_sched_deferred_ast config_clutch config_sched_sfi config_taskwatch ] +# SCHED_RELEASE = [ SCHED_BASE ] +# SCHED_DEV = [ SCHED_BASE ] +# SCHED_DEBUG = [ SCHED_BASE config_sched_grrr config_sched_proto ] +# VM_BASE = [ vps_dynamic_prio vm_pressure_events jetsam memorystatus config_code_decryption phantom_cache config_secluded_memory config_background_queue config_cs_validation_bitmap ] +# VM_RELEASE = [ VM_BASE ] +# VM_DEV = [ VM_BASE dynamic_codesigning ] +# VM_DEBUG = [ VM_BASE dynamic_codesigning ] +# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ] +# SECURITY_RELEASE = [ SECURITY_BASE ] +# SECURITY_DEV = [ SECURITY_BASE config_setuid config_kas_info ] +# SECURITY_DEBUG = [ SECURITY_BASE config_setuid config_kas_info ] +# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF_RELEASE MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY_RELEASE ] +# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS_DEV SKYWALK_DEV NETWORKING_DEV PF_DEV MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY_DEV ] +# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF_DEBUG MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY_DEBUG ] +# KASAN = [ DEVELOPMENT config_kasan config_ubsan config_ksancov ] +# +###################################################################### +# +machine "arm64" # + +makeoptions OSFMK_MACHINE = "arm64" # + +options COUNT_SYSCALLS # count bsd system calls # +options TRASH_VFP_ON_SAVE # +options ALTERNATE_DEBUGGER # + +options CONFIG_VNODES=1024 # + +options CONFIG_FREEZE_SUSPENDED_MIN=4 # + +options CONFIG_MACH_APPROXIMATE_TIME + +options CONFIG_KERNEL_INTEGRITY # + +options CONFIG_PGTRACE # +options CONFIG_PGTRACE_NONKEXT # +pseudo-device pgtrace 1 init pgtrace_dev_init # diff --git a/config/MASTER.arm64.MacOSX b/config/MASTER.arm64.MacOSX new file mode 100644 index 000000000..fd3ab5e2e --- /dev/null +++ b/config/MASTER.arm64.MacOSX @@ -0,0 +1,102 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# Copyright 2001-2018 Apple Inc. +# +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +###################################################################### +# +# Master Apple configuration file (see the master machine independent +# configuration file for a description of the file format). +# +###################################################################### +# +# Standard Apple OS Configurations: +# -------- ----- -- --------------- +# +# KERNEL_BASE = [ arm64 medium msgb_large config_arrow config_requires_u32_munging config_zcache config_delay_idle_sleep config_proc_udata_storage ARM_EXTRAS_BASE ] +# KERNEL_RELEASE = [ KERNEL_BASE ] +# KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace config_zalloc_sequester ] +# KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace config_zalloc_sequester ] +# BSD_BASE = [ mach_bsd sysv_sem sysv_msg sysv_shm config_netboot config_imageboot config_workqueue psynch config_proc_uuid_policy config_coredump pgo config_personas ] +# BSD_RELEASE = [ BSD_BASE ] +# BSD_DEV = [ BSD_BASE config_vnguard ] +# BSD_DEBUG = [ BSD_BASE config_vnguard ] +# FILESYS_BASE = [ devfs fdesc fifo fs_compression config_protect config_mnt_rootsnap config_rosv_startup config_mount_vm config_mount_prebootrecovery config_basesystemroot config_fse quota namedstreams config_imgsrc_access config_triggers config_ext_resolver config_searchfs config_volfs config_appledouble nullfs config_mnt_suid config_firmlinks config_dataless_files ] +# FILESYS_RELEASE= [ FILESYS_BASE ] +# FILESYS_DEV = [ FILESYS_BASE ] +# FILESYS_DEBUG = [ FILESYS_BASE ] +# NFS = [ nfsclient nfsserver config_nfs4 config_nfs_gss ] +# SKYWALK_BASE = [ skywalk config_nexus_user_pipe config_nexus_kernel_pipe config_nexus_monitor config_nexus_flowswitch config_nexus_netif ] +# SKYWALK_RELEASE = [ SKYWALK_BASE ] +# SKYWALK_DEV = [ SKYWALK_BASE ] +# SKYWALK_DEBUG = [ SKYWALK_BASE ] +# NETWORKING = [ inet bpfilter dummynet traffic_mgt sendfile ah_all_crypto bond vlan gif stf ifnet_input_chk config_mbuf_jumbo if_bridge MULTIPATH if_fake sixlowpan ] +# NETWORKING_RELEASE = [ NETWORKING ] +# NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler if_headless ] +# NETWORKING_DEBUG = [ NETWORKING_DEV ] +# VPN = [ ipsec flow_divert necp content_filter ] +# PF = [ pf pflog ] +# MULTIPATH = [ multipath mptcp ] +#ifdef SOC_CONFIG_t8020 +# HIBERNATION = [ ] +#else /*!SOC_CONFIG_t8020*/ +# HIBERNATION = [ hibernation ] +#endif /*!SOC_CONFIG_t8020*/ +# IOKIT_BASE = [ iokit iokitcpp no_kernel_hid config_sleep iokitstats HIBERNATION ] +# IOKIT_RELEASE = [ IOKIT_BASE ] +# IOKIT_DEV = [ IOKIT_BASE iotracking ] +# IOKIT_DEBUG = [ IOKIT_BASE iotracking ] +# LIBKERN_BASE = [ libkerncpp config_blocks config_kec_fips crypto_sha2 config_img4 ] +# LIBKERN_RELEASE =[ LIBKERN_BASE zlib ] +# LIBKERN_DEV = [ LIBKERN_BASE zlib iotracking ] +# LIBKERN_DEBUG = [ LIBKERN_BASE zlib iotracking ] +# PERF_DBG_BASE = [ config_dtrace mach_kdp config_serial_kdp kdp_interactive_debugging MONOTONIC_BASE kperf kpc ] +# PERF_DBG_RELEASE=[ PERF_DBG_BASE ist_kdebug ] +# PERF_DBG_DEV = [ PERF_DBG_BASE lock_stats zleaks alternate_debugger interrupt_masked_debug ] +# PERF_DBG_DEBUG = [ PERF_DBG_BASE lock_stats zleaks alternate_debugger interrupt_masked_debug ] +# MACH_BASE = [ mach slidable config_ecc_logging vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_iosched config_sysdiagnose config_telemetry config_mach_bridge_recv_time config_quiesce_counter config_arm_pfz ] +# MACH_RELEASE = [ MACH_BASE debugger_for_zone_info ] +# MACH_DEV = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max ] +# MACH_DEBUG = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max importance_debug ] +# SCHED_BASE = [ config_sched_traditional config_sched_multiq config_sched_deferred_ast config_clutch config_sched_sfi config_sched_auto_join ] +# SCHED_RELEASE = [ SCHED_BASE ] +# SCHED_DEV = [ SCHED_BASE ] +# SCHED_DEBUG = [ SCHED_BASE config_sched_grrr config_sched_proto ] +# VM_BASE = [ vm_pressure_events memorystatus config_code_decryption encrypted_swap config_background_queue] +# VM_RELEASE = [ VM_BASE ] +# VM_DEV = [ VM_BASE dynamic_codesigning ] +# VM_DEBUG = [ VM_BASE dynamic_codesigning ] +# SECURITY = [ config_macf config_audit kernel_integrity config_csr config_csr_from_dt config_setuid config_kas_info config_secure_bsd_root config_arcade config_supplemental_signatures second_static_trust_cache ] +# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS SKYWALK_RELEASE NETWORKING_RELEASE PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ] +# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING_DEV PF MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ] +# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS SKYWALK_DEBUG NETWORKING_DEBUG PF MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ] +# KASAN = [ DEVELOPMENT config_kasan config_ubsan config_ksancov ] +# +###################################################################### +# +machine "arm64" # + +makeoptions OSFMK_MACHINE = "arm64" # + +options COUNT_SYSCALLS # count bsd system calls # +options TRASH_VFP_ON_SAVE # +options ALTERNATE_DEBUGGER # + +options CONFIG_VNODES=1024 # + +options CONFIG_FREEZE_SUSPENDED_MIN=4 # + +options CONFIG_MACH_APPROXIMATE_TIME + +options CONFIG_KERNEL_INTEGRITY # + +options CONFIG_PGTRACE # +options CONFIG_PGTRACE_NONKEXT # +pseudo-device pgtrace 1 init pgtrace_dev_init # + +options CONFIG_MACF_LAZY_VNODE_LABELS # Turn on labels, don't preallocate + +options CONFIG_HYPERVISOR_PUBLIC # unrestricted entitlement for hypervisor diff --git a/config/MASTER.arm64.bcm2837 b/config/MASTER.arm64.bcm2837 index 73670d3d3..d1c4ef467 100644 --- a/config/MASTER.arm64.bcm2837 +++ b/config/MASTER.arm64.bcm2837 @@ -16,7 +16,7 @@ # Standard Apple OS Configurations: # -------- ----- -- --------------- # -# KERNEL_BASE = [ arm64 xsmall config_embedded config_requires_u32_munging config_zcache ] +# KERNEL_BASE = [ arm64 xsmall msgb_small config_embedded config_requires_u32_munging config_zcache ] # KERNEL_RELEASE = [ KERNEL_BASE ] # KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ] # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ] @@ -24,14 +24,14 @@ # BSD_RELEASE = [ BSD_BASE no_printf_str no_kprintf_str secure_kernel ] # BSD_DEV = [ BSD_BASE config_netboot config_imageboot config_coredump pgo config_vnguard ] # BSD_DEBUG = [ BSD_BASE config_netboot config_imageboot config_coredump pgo config_vnguard ] -# FILESYS_BASE = [ devfs fifo fs_compression config_protect config_mnt_rootsnap config_fse routefs namedstreams ] +# FILESYS_BASE = [ devfs fifo fs_compression config_protect config_mnt_rootsnap config_fse routefs namedstreams bindfs] # FILESYS_RELEASE= [ FILESYS_BASE ] # FILESYS_DEV = [ FILESYS_BASE fdesc ] # FILESYS_DEBUG = [ FILESYS_BASE fdesc ] # NFS_DEV = [ nfsclient nfsserver config_nfs_gss ] # NFS_RELEASE = [ nfsclient ] # NFS_DEBUG = [ nfsclient config_nfs_gss ] -# NETWORKING = [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto if_fake ] +# NETWORKING = [ inet bpfilter if_bridge traffic_mgt dummynet ah_all_crypto if_fake ] # NETWORKING_RELEASE = [ NETWORKING ] # NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler if_headless ] # NETWORKING_DEBUG = [ NETWORKING_DEV ] @@ -54,18 +54,21 @@ # MACH_RELEASE = [ MACH_BASE config_skip_precise_user_kernel_time debugger_for_zone_info ] # MACH_DEV = [ MACH_BASE task_zone_info config_io_accounting importance_trace ] # MACH_DEBUG = [ MACH_BASE task_zone_info config_io_accounting importance_trace importance_debug ] -# SCHED_BASE = [ config_sched_traditional config_sched_multiq config_sched_deferred_ast ] +# SCHED_BASE = [ config_sched_traditional config_sched_multiq config_sched_deferred_ast config_clutch config_taskwatch ] # SCHED_RELEASE = [ SCHED_BASE ] # SCHED_DEV = [ SCHED_BASE ] # SCHED_DEBUG = [ SCHED_BASE config_sched_grrr config_sched_proto ] -# VM_BASE = [ vm_pressure_events jetsam freeze memorystatus config_code_decryption phantom_cache config_secluded_memory config_background_queue config_cs_validation_bitmap] +# VM_BASE = [ vm_pressure_events jetsam freeze memorystatus config_code_decryption phantom_cache config_secluded_memory config_background_queue config_cs_validation_bitmap dirtystatus_tracking ] # VM_RELEASE = [ VM_BASE ] # VM_DEV = [ VM_BASE dynamic_codesigning ] # VM_DEBUG = [ VM_BASE dynamic_codesigning ] -# SECURITY = [ config_macf kernel_integrity ] -# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ] -# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS_DEV SKYWALK_DEV NETWORKING_DEV PF MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ] -# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ] +# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ] +# SECURITY_RELEASE = [ SECURITY_BASE ] +# SECURITY_DEV = [ SECURITY_BASE config_setuid config_kas_info ] +# SECURITY_DEBUG = [ SECURITY_BASE config_setuid config_kas_info ] +# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY_RELEASE ] +# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS_DEV SKYWALK_DEV NETWORKING_DEV PF MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY_DEV ] +# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY_DEBUG ] # KASAN = [ DEVELOPMENT config_kasan config_ubsan config_ksancov ] # ###################################################################### @@ -86,8 +89,6 @@ options CONFIG_MACH_APPROXIMATE_TIME options CONFIG_KERNEL_INTEGRITY # -options INTERRUPT_MASKED_DEBUG=1 # # - options CONFIG_PGTRACE # options CONFIG_PGTRACE_NONKEXT # pseudo-device pgtrace 1 init pgtrace_dev_init # diff --git a/config/MASTER.arm64.iPhoneOS b/config/MASTER.arm64.iPhoneOS new file mode 100644 index 000000000..506772eb4 --- /dev/null +++ b/config/MASTER.arm64.iPhoneOS @@ -0,0 +1,96 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# Copyright 2001-2018 Apple Inc. +# +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +###################################################################### +# +# Master Apple configuration file (see the master machine independent +# configuration file for a description of the file format). +# +###################################################################### +# +# Standard Apple OS Configurations: +# -------- ----- -- --------------- +# +# KERNEL_BASE = [ arm64 xsmall msgb_large config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ] +# KERNEL_RELEASE = [ KERNEL_BASE ] +# KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ] +# KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ] +# BSD_BASE = [ mach_bsd config_workqueue psynch config_proc_uuid_policy config_personas config_imageboot config_imageboot_img4 ] +# BSD_RELEASE = [ BSD_BASE no_printf_str no_kprintf_str secure_kernel ] +# BSD_DEV = [ BSD_BASE config_netboot config_imgsrc_access config_lockerboot config_coredump pgo config_vnguard ] +# BSD_DEBUG = [ BSD_BASE config_netboot config_imgsrc_access config_lockerboot config_coredump pgo config_vnguard ] +# FILESYS_BASE = [ devfs fifo fs_compression config_protect config_mnt_rootsnap config_triggers config_fse routefs namedstreams config_dataless_files bindfs] +# FILESYS_RELEASE= [ FILESYS_BASE ] +# FILESYS_DEV = [ FILESYS_BASE fdesc ] +# FILESYS_DEBUG = [ FILESYS_BASE fdesc ] +# NFS_DEV = [ nfsclient nfsserver config_nfs_gss ] +# NFS_RELEASE = [ nfsclient ] +# NFS_DEBUG = [ nfsclient config_nfs_gss ] +# NETWORKING = [ inet tcpdrop_synfin bpfilter if_bridge traffic_mgt dummynet ah_all_crypto if_fake sixlowpan ] +# NETWORKING_RELEASE = [ NETWORKING ] +# NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler if_headless ] +# NETWORKING_DEBUG = [ NETWORKING_DEV ] +# VPN = [ ipsec flow_divert necp content_filter ] +# PF_RELEASE = [ pf ] +# PF_DEV = [ PF_RELEASE pflog ] +# PF_DEBUG = [ PF_DEV ] +# MULTIPATH = [ multipath mptcp ] +# IOKIT_BASE = [ iokit iokitcpp no_kextd no_kernel_hid config_sleep ] +# IOKIT_RELEASE = [ IOKIT_BASE ] +# IOKIT_DEV = [ IOKIT_BASE iokitstats iotracking ] +# IOKIT_DEBUG = [ IOKIT_BASE iokitstats iotracking] +# LIBKERN_BASE = [ libkerncpp config_blocks config_kec_fips zlib crypto_sha2 config_img4 ] +# LIBKERN_RELEASE =[ LIBKERN_BASE ] +# LIBKERN_DEV = [ LIBKERN_BASE iotracking ] +# LIBKERN_DEBUG = [ LIBKERN_BASE iotracking ] +# PERF_DBG_BASE = [ mach_kdp config_serial_kdp MONOTONIC_BASE kperf kpc ] +# PERF_DBG_RELEASE=[ PERF_DBG_BASE ist_kdebug ] +# PERF_DBG_DEV = [ PERF_DBG_BASE config_dtrace lock_stats zleaks kdp_interactive_debugging alternate_debugger interrupt_masked_debug ] +# PERF_DBG_DEBUG = [ PERF_DBG_BASE config_dtrace lock_stats zleaks kdp_interactive_debugging alternate_debugger interrupt_masked_debug ] +# MACH_BASE = [ mach slidable config_ecc_logging vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_iosched config_library_validation config_sysdiagnose config_telemetry config_mach_bridge_recv_time config_quiesce_counter ] +# MACH_RELEASE = [ MACH_BASE config_skip_precise_user_kernel_time debugger_for_zone_info ] +# MACH_DEV = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max ] +# MACH_DEBUG = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max importance_debug ] +# SCHED_BASE = [ config_sched_traditional config_sched_multiq config_sched_deferred_ast config_clutch config_sched_sfi config_taskwatch ] +# SCHED_RELEASE = [ SCHED_BASE ] +# SCHED_DEV = [ SCHED_BASE ] +# SCHED_DEBUG = [ SCHED_BASE config_sched_grrr config_sched_proto ] +# VM_BASE = [ vps_dynamic_prio vm_pressure_events jetsam freeze memorystatus config_code_decryption phantom_cache config_secluded_memory config_background_queue config_cs_validation_bitmap dirtystatus_tracking ] +# VM_RELEASE = [ VM_BASE ] +# VM_DEV = [ VM_BASE dynamic_codesigning ] +# VM_DEBUG = [ VM_BASE dynamic_codesigning ] +# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ] +# SECURITY_RELEASE = [ SECURITY_BASE ] +# SECURITY_DEV = [ SECURITY_BASE config_setuid config_kas_info ] +# SECURITY_DEBUG = [ SECURITY_BASE config_setuid config_kas_info ] +# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF_RELEASE MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY_RELEASE ] +# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS_DEV SKYWALK_DEV NETWORKING_DEV PF_DEV MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY_DEV ] +# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF_DEBUG MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY_DEBUG ] +# KASAN = [ DEVELOPMENT config_kasan config_ubsan config_ksancov ] +# +###################################################################### +# +machine "arm64" # + +makeoptions OSFMK_MACHINE = "arm64" # + +options COUNT_SYSCALLS # count bsd system calls # +options TRASH_VFP_ON_SAVE # +options ALTERNATE_DEBUGGER # + +options CONFIG_VNODES=1024 # + +options CONFIG_FREEZE_SUSPENDED_MIN=4 # + +options CONFIG_MACH_APPROXIMATE_TIME + +options CONFIG_KERNEL_INTEGRITY # + +options CONFIG_PGTRACE # +options CONFIG_PGTRACE_NONKEXT # +pseudo-device pgtrace 1 init pgtrace_dev_init # diff --git a/config/MASTER.x86_64 b/config/MASTER.x86_64 index 2e72d1d45..5350cb839 100644 --- a/config/MASTER.x86_64 +++ b/config/MASTER.x86_64 @@ -16,20 +16,20 @@ # Standard Apple OS Configurations: # -------- ----- -- --------------- # -# KERNEL_BASE = [ intel medium config_requires_u32_munging config_zcache ] +# KERNEL_BASE = [ intel medium msgb_large config_requires_u32_munging config_zcache config_delay_idle_sleep config_proc_udata_storage vsprintf ] # KERNEL_RELEASE = [ KERNEL_BASE ] -# KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug ] -# KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug ] +# KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug config_zalloc_sequester ] +# KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug config_zalloc_sequester ] # BSD_BASE = [ mach_bsd sysv_sem sysv_msg sysv_shm config_netboot config_imageboot config_imageboot_chunklist config_workqueue psynch config_proc_uuid_policy config_coredump pgo config_32bit_telemetry config_personas ] # BSD_RELEASE = [ BSD_BASE ] # BSD_DEV = [ BSD_BASE config_vnguard ] # BSD_DEBUG = [ BSD_BASE config_vnguard ] -# FILESYS_BASE = [ devfs fdesc config_dev_kmem config_fse quota namedstreams config_mnt_rootsnap config_rosv_startup config_mount_vm config_keypage_wp config_protect fifo config_volfs fs_compression config_imgsrc_access config_triggers config_ext_resolver config_searchfs config_appledouble nullfs config_mnt_suid config_firmlinks config_dataless_files ] +# FILESYS_BASE = [ devfs fdesc config_dev_kmem config_fse quota namedstreams config_mnt_rootsnap config_rosv_startup config_mount_vm config_mount_prebootrecovery config_basesystemroot config_keypage_wp config_protect fifo config_volfs fs_compression config_imgsrc_access config_triggers config_ext_resolver config_searchfs config_appledouble nullfs config_mnt_suid config_firmlinks config_dataless_files bindfs] # FILESYS_RELEASE= [ FILESYS_BASE ] # FILESYS_DEV = [ FILESYS_BASE ] # FILESYS_DEBUG = [ FILESYS_BASE ] # NFS = [ nfsclient nfsserver config_nfs4 config_nfs_gss ] -# NETWORKING = [ inet inet6 ipv6send tcpdrop_synfin bpfilter dummynet traffic_mgt sendfile ah_all_crypto bond vlan gif stf ifnet_input_chk config_mbuf_jumbo if_bridge MULTIPATH if_fake sixlowpan ] +# NETWORKING = [ inet bpfilter dummynet traffic_mgt sendfile ah_all_crypto bond vlan gif stf ifnet_input_chk config_mbuf_jumbo if_bridge MULTIPATH if_fake sixlowpan ] # NETWORKING_RELEASE = [ NETWORKING ] # NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler if_headless ] # NETWORKING_DEBUG = [ NETWORKING_DEV ] @@ -40,7 +40,7 @@ # IOKIT_RELEASE = [ IOKIT_BASE ] # IOKIT_DEV = [ IOKIT_BASE iotracking ] # IOKIT_DEBUG = [ IOKIT_BASE iotracking ] -# LIBKERN_BASE = [ libkerncpp config_blocks config_kxld config_kec_fips crypto_sha2 config_img4 ] +# LIBKERN_BASE = [ libkerncpp config_blocks config_kec_fips crypto_sha2 config_img4 ] # LIBKERN_RELEASE =[ LIBKERN_BASE zlib ] # LIBKERN_DEV = [ LIBKERN_BASE zlib iotracking ] # LIBKERN_DEBUG = [ LIBKERN_BASE zlib iotracking ] @@ -48,7 +48,7 @@ # PERF_DBG_RELEASE=[ PERF_DBG_BASE ] # PERF_DBG_DEV =[ PERF_DBG_BASE lock_stats ] # PERF_DBG_DEBUG = [ PERF_DBG_BASE lock_stats ] -# MACH_BASE = [ mach config_kext_basement mdebug ipc_debug config_mca config_vmx config_mtrr config_lapic config_telemetry importance_inheritance config_atm config_coalitions hypervisor config_iosched config_sysdiagnose config_mach_bridge_send_time copyout_shim ] +# MACH_BASE = [ mach config_kext_basement mdebug ipc_debug config_mca config_vmx config_mtrr config_lapic config_telemetry importance_inheritance config_atm config_coalitions hypervisor config_iosched config_sysdiagnose config_mach_bridge_send_time copyout_shim phys_write_acct ] # MACH_RELEASE = [ MACH_BASE ] # MACH_DEV = [ MACH_BASE task_zone_info importance_trace config_ledger_interval_max ] # MACH_DEBUG = [ MACH_BASE task_zone_info importance_trace config_ledger_interval_max importance_debug ] @@ -56,8 +56,8 @@ # SCHED_RELEASE = [ SCHED_BASE ] # SCHED_DEV = [ SCHED_BASE ] # SCHED_DEBUG = [ SCHED_BASE config_sched_grrr config_sched_proto ] -# VM = [ vm_pressure_events memorystatus dynamic_codesigning config_code_decryption encrypted_swap config_background_queue] -# SECURITY = [ config_macf config_audit config_csr config_arcade] +# VM = [ vm_pressure_events memorystatus dynamic_codesigning config_code_decryption encrypted_swap config_background_queue dirtystatus_tracking ] +# SECURITY = [ config_macf config_audit config_csr config_arcade config_setuid config_kas_info ] # RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS SKYWALK_RELEASE NETWORKING_RELEASE PF VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM SECURITY ] # DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING_DEV PF VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM SECURITY ] # DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS SKYWALK_DEBUG NETWORKING_DEBUG PF VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM SECURITY ] diff --git a/config/Mach.exports b/config/Mach.exports index 025f57973..ef61fc999 100644 --- a/config/Mach.exports +++ b/config/Mach.exports @@ -67,3 +67,4 @@ _vm_kernel_addrhash:_vm_kernel_addrhash_external _vm_kernel_addrhide _vm_kernel_addrperm_external _vm_kernel_unslide_or_perm_external +_vm_stats diff --git a/config/Makefile b/config/Makefile index 0f5f3ab63..e5a5fb5c9 100644 --- a/config/Makefile +++ b/config/Makefile @@ -28,6 +28,8 @@ SYMBOL_COMPONENT_LIST = \ Unsupported \ Private +SYMBOL_SET_PLIST_COMPONENT_LIST := $(SYMBOL_COMPONENT_LIST) Kasan + # In general you want it to be possible to have a CPU sub-type's symbol exports # alias to the parent type's exports. This is a special-case way to handle it # for now: @@ -46,22 +48,21 @@ DSTROOT_INSTALL_KEXT_MACHO_FILES = $(addprefix $(INSTALL_KEXT_DIR)/,$(KEXT_MACHO SYMROOT_INSTALL_KEXT_PLISTS = $(addprefix $(SYMROOT)/,$(KEXT_PLIST_LIST)) DSTROOT_INSTALL_KEXT_PLISTS = $(addprefix $(INSTALL_KEXT_DIR)/,$(KEXT_PLIST_LIST)) -EXPORTS_FILES = $(foreach symbolset,$(SYMBOL_COMPONENT_LIST),$(symbolset).exports $(symbolset).$(EXPORT_SOURCE_ARCH_CONFIG_LC).exports) Unused.exports - -SYMBOL_SET_BUILD = $(foreach symbolset, $(SYMBOL_COMPONENT_LIST), $(OBJPATH)/$(symbolset).symbolset) +EXPORTS_FILES = $(foreach symbolset,$(SYMBOL_COMPONENT_LIST),$(symbolset).exports $(symbolset).$(EXPORT_SOURCE_ARCH_CONFIG_LC).exports $(notdir $(wildcard $(SOURCE)/$(symbolset).$(EXPORT_SOURCE_ARCH_CONFIG_LC).$(PLATFORM).exports))) Unused.exports +Kasan_EXPORTS := $(SRCROOT)/san/Kasan.exports ifeq ($(KASAN),1) -KASAN_EXPORTS = $(SRCROOT)/san/Kasan_kasan.exports +Kasan_EXPORTS := $(SRCROOT)/san/Kasan_kasan.exports endif -$(OBJPATH)/allsymbols: $(OBJPATH)/$(KERNEL_FILE_NAME) - $(_v)$(NM) -gj $< > $@ +SYMBOL_SET_BUILD = $(foreach symbolset, $(SYMBOL_COMPONENT_LIST), $(OBJPATH)/$(symbolset).symbolset) +SYMBOL_SET_PLIST_BUILD = $(foreach symbolset, $(SYMBOL_SET_PLIST_COMPONENT_LIST), $(OBJPATH)/$(symbolset).symbolset.plist) define symbol_set_rule -$(OBJPATH)/$(1).symbolset: MY_EXPORTS := $(filter $(1)%,$(EXPORTS_FILES)) -$(OBJPATH)/$(1).symbolset: MY_EXPORTS_ARGS := $$(foreach file,$$(MY_EXPORTS),-export $(SOURCE)/$$(file)) -$(OBJPATH)/$(1).symbolset: $$(MY_EXPORTS) $(OBJPATH)/allsymbols $(KEXT_CREATE_SYMBOL_SET) - $$(call makelog,$(ColorH)SYMBOLSET$(Color0) $(ColorF)$(1)$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") +$(1)_EXPORTS := $(addprefix $(SOURCE)/,$(filter $(1)%,$(EXPORTS_FILES))) +$(OBJPATH)/$(1).symbolset: MY_EXPORTS_ARGS := $$(foreach file,$$($(1)_EXPORTS),-export $$(file)) +$(OBJPATH)/$(1).symbolset: $$($(1)_EXPORTS) $(OBJPATH)/allsymbols $(KEXT_CREATE_SYMBOL_SET) + @$$(LOG_SYMBOLSET) "$(ColorF)$(1)$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" $(_v)$(KEXT_CREATE_SYMBOL_SET) \ $(ARCH_FLAGS_$(CURRENT_ARCH_CONFIG)) \ -import $(OBJPATH)/allsymbols \ @@ -69,7 +70,23 @@ $(OBJPATH)/$(1).symbolset: $$(MY_EXPORTS) $(OBJPATH)/allsymbols $(KEXT_CREATE_SY -output $$@ $(_vstdout) endef +define symbol_set_plist_rule +$(1)_KEXT_PLIST := $$(filter %/$(1).kext/Info.plist,$$(SYMROOT_INSTALL_KEXT_PLISTS)) +$(OBJPATH)/$(1).symbolset.plist: $$($(1)_KEXT_PLIST) $$($(1)_EXPORTS) + @$$(LOG_SYMBOLSETPLIST) "$(ColorF)$(1)$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" + $(_v)$(SOURCE)/generate_symbolset_plist.sh $$@ $$+ $(_vstdout) + $(_v)$(PLUTIL) -lint -s $$@ +endef + $(foreach symbolset,$(SYMBOL_COMPONENT_LIST),$(eval $(call symbol_set_rule,$(symbolset)))) +$(foreach symbolset,$(SYMBOL_SET_PLIST_COMPONENT_LIST),$(eval $(call symbol_set_plist_rule,$(symbolset)))) + +$(OBJPATH)/symbolsets.plist: $(SYMBOL_SET_PLIST_BUILD) + $(_v)$(SOURCE)/generate_combined_symbolsets_plist.sh $@ $^ $(_vstdout) + $(_v)$(PLUTIL) -convert binary1 -s $@ + +$(OBJPATH)/allsymbols: $(OBJPATH)/$(KERNEL_FILE_NAME) + $(_v)$(NM) -gj $< | sort -u > $@ .PHONY: check_all_exports @@ -90,11 +107,11 @@ check_all_exports: $(OBJPATH)/allsymbols $(KEXT_CREATE_SYMBOL_SET) -output /dev/null $(_vstdout) $(OBJPATH)/$(MD_SUPPORTED_KPI_FILENAME): $(EXPORTS_FILES) - $(call makelog,$(ColorH)SUPPORTED_KPI$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") + @$(LOG_SUPPORTED_KPI) "$(CURRENT_ARCH_CONFIG_LC)" $(_v)$(SRCROOT)/config/list_supported.sh $(SOURCE) $(EXPORT_SOURCE_ARCH_CONFIG_LC) $@ $(OBJPATH)/$(MI_SUPPORTED_KPI_FILENAME): $(EXPORTS_FILES) - $(call makelog,$(ColorH)SUPPORTED_KPI$(Color0) "($(ColorLF)all$(Color0))") + @$(LOG_SUPPORTED_KPI) "all" $(_v)$(SRCROOT)/config/list_supported.sh $(SOURCE) all $@ build_symbol_sets: check_all_exports $(SYMBOL_SET_BUILD) $(OBJPATH)/allsymbols \ @@ -106,7 +123,7 @@ do_config_all:: build_symbol_sets # There's no simple static pattern rule for these paths, so hardcode dependencies in the command list $(SYMROOT_INSTALL_KEXT_MACHO_FILES): ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALLSYM$(Color0) $(ColorF)symbolset $(notdir $@)$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") + @$(LOG_INSTALLSYM) "symbolset $(notdir $@)$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" $(_v)if [ $(OBJROOT)/.symbolset.timestamp -nt $@ ]; then \ $(INSTALL) $(EXEC_INSTALL_FLAGS) $(OBJPATH)/$(@F).symbolset $@; \ cmdstatus=$$?; \ @@ -118,23 +135,23 @@ $(SYMROOT_INSTALL_KEXT_MACHO_FILES): ALWAYS $(SYMROOT_INSTALL_KEXT_PLISTS): $(SYMROOT)/% : $(SOURCE)/% $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALLSYM$(Coloro) $(ColorLF)kextplist$(Color0) $(ColorF)$*$(Color0)) + @$(LOG_INSTALLSYM) "kextplist$(Color0) $(ColorF)$*" $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS) $< $@ $(_v)$(NEWVERS) $@ $(_vstdout) $(DSTROOT_INSTALL_KEXT_PLISTS): $(INSTALL_KEXT_DIR)/% : $(SYMROOT)/% $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALL$(Color0) $(ColorLF)kextplist$(Color0) $(ColorF)$*$(Color0)) + @$(LOG_INSTALLSYM) "kextplist$(Color0) $(ColorF)$*" $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS) $< $@ $(DSTROOT_INSTALL_KEXT_MACHO_FILES): $(INSTALL_KEXT_DIR)/% : $(SYMROOT)/% ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorF)INSTALL$(Color0) $(ColorF)$(notdir $@)$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") + @$(LOG_INSTALL) "$(notdir $@)$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" $(_v)$(INSTALL) $(EXEC_INSTALL_FLAGS) $< $@ $(DSTROOT)/$(KRESDIR)/$(MD_SUPPORTED_KPI_FILENAME) $(DSTROOT)/$(KRESDIR)/$(MI_SUPPORTED_KPI_FILENAME): $(DSTROOT)/$(KRESDIR)/% : $(OBJPATH)/% $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALL$(Color0) $(ColorF)$*$(Color0)) + @$(LOG_INSTALL) "$*" $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@ ifneq ($(INSTALL_KASAN_ONLY),1) @@ -146,14 +163,13 @@ do_config_install:: $(SYMROOT_INSTALL_KEXT_MACHO_FILES) \ $(DSTROOT)/$(KRESDIR)/$(MI_SUPPORTED_KPI_FILENAME) endif - $(OBJPATH)/all-kpi.exp: $(EXPORTS_FILES) - $(_v)$(SOURCE)/generate_linker_exports.sh $@ $+ $(KASAN_EXPORTS) + $(_v)$(SOURCE)/generate_linker_exports.sh $@ $+ $(Kasan_EXPORTS) $(OBJPATH)/all-alias.exp: $(EXPORTS_FILES) - $(_v)$(SOURCE)/generate_linker_aliases.sh $@ $+ $(KASAN_EXPORTS) + $(_v)$(SOURCE)/generate_linker_aliases.sh $@ $+ $(Kasan_EXPORTS) -do_build_all:: $(OBJPATH)/all-kpi.exp $(OBJPATH)/all-alias.exp +do_build_all:: $(OBJPATH)/all-kpi.exp $(OBJPATH)/all-alias.exp $(OBJPATH)/symbolsets.plist include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/config/MasterVersion b/config/MasterVersion index 3686593d5..09a96f00e 100644 --- a/config/MasterVersion +++ b/config/MasterVersion @@ -1,4 +1,4 @@ -19.6.0 +20.1.0 # The first line of this file contains the master version number for the kernel. # All other instances of the kernel version in xnu are derived from this file. diff --git a/config/Private.arm64.MacOSX.exports b/config/Private.arm64.MacOSX.exports new file mode 100644 index 000000000..b39330259 --- /dev/null +++ b/config/Private.arm64.MacOSX.exports @@ -0,0 +1,41 @@ +_aes_decrypt +_bufattr_delayidlesleep +_csr_check +_mountroot_post_hook +_unmountroot_pre_hook +_vfs_addtrigger +_vfs_settriggercallback +_vfs_resolver_result +_vnode_trigger_update +_dqfileclose +_dqfileopen +_dqflush +_dqget +_dqhashinit +_dqisinitialized +_dqlock +_dqrele +_dqsync +_dqsync_orphans +_dqunlock +_qf_get +_qf_put +_dqfileinit +_dqreclaim +_cs_vm_supports_4k_translations +_csfg_get_supplement_platform_binary +_csfg_get_supplement_cdhash +_csfg_get_supplement_linkage_cdhash +_csfg_get_supplement_prod_signed +_csfg_get_supplement_signer_type +_csfg_get_supplement_teamid +_xts_decrypt +_xts_done +_xts_encrypt +_xts_start +#macOS only codesigning kpi +_csproc_disable_enforcement +_csproc_mark_invalid_allowed +_csproc_check_invalid_allowed +_csproc_hardened_runtime +_csproc_forced_lv diff --git a/config/Private.arm64.exports b/config/Private.arm64.exports index e3bc84a7d..f64ac3509 100644 --- a/config/Private.arm64.exports +++ b/config/Private.arm64.exports @@ -31,13 +31,29 @@ _ml_set_reset_time _ml_thread_is64bit _pe_shmcon_set_child _proc_getcdhash +_sched_perfcontrol_inherit_recommendation_from_tg _sched_perfcontrol_register_callbacks _sched_perfcontrol_update_recommended_cores _sched_perfcontrol_thread_group_recommend +_sched_perfcontrol_thread_group_preferred_clusters_set +_sched_perfcontrol_edge_matrix_get +_sched_perfcontrol_edge_matrix_set _sched_perfcontrol_update_callback_deadline _thread_group_join_io_storage +_thread_group_join_perf_controller +_ml_cpu_signal +_ml_cpu_signal_deferred +_ml_cpu_signal_retract +_ml_get_cpu_count +_ml_get_boot_cpu_number +_ml_get_cpu_number +_ml_get_cluster_number +_ml_get_max_cpu_number +_ml_get_topology_info +_ml_lockdown_handler_register _ml_static_ptovirt _ml_static_mfree +_ml_update_cluster_wfe_recommendation _ex_cb_register _pgtrace_init _pgtrace_start @@ -50,3 +66,7 @@ _mach_bridge_init_timestamp _mach_bridge_set_params _PE_panic_debugging_enabled _register_additional_panic_data_buffer +_apply_func_phys +_Switch_context +_gT1Sz + diff --git a/config/Private.exports b/config/Private.exports index 2a8547757..28b023dd9 100644 --- a/config/Private.exports +++ b/config/Private.exports @@ -3,6 +3,7 @@ __ZN15IORegistryEntry18setIndexedPropertyEjP8OSObject __ZNK15IORegistryEntry18getIndexedPropertyEj __ZN16IOPlatformExpert* __ZNK16IOPlatformExpert* +__ZN18IOMemoryDescriptor16setPreparationIDEv __ZTV16IOPlatformExpert __ZN18IODTPlatformExpert* __ZNK18IODTPlatformExpert* @@ -10,12 +11,115 @@ __ZTV18IODTPlatformExpert __ZN5IOCPU* __ZNK5IOCPU* __ZN12IOUserClient27copyObjectForPortNameInTaskEP4taskjPP8OSObject +__ZN12IOUserClient27copyObjectForPortNameInTaskEP4taskjR11OSSharedPtrI8OSObjectE __ZN12IOUserClient27copyPortNameForObjectInTaskEP4taskP8OSObjectPj __ZN12IOUserClient30adjustPortNameReferencesInTaskEP4taskji __ZTV5IOCPU __ZN24IOCPUInterruptController* __ZNK24IOCPUInterruptController* __ZTV24IOCPUInterruptController +__ZN6OSKext10gMetaClassE +__ZN6OSKext10superClassE +__ZTV6OSKext +__ZN10IOMachPort10gMetaClassE +__ZN10IOMachPort10superClassE +__ZTV10IOMachPort +__ZN11IOPMRequest10gMetaClassE +__ZN11IOPMRequest10superClassE +__ZTV11IOPMRequest +__ZN11IOServicePM10gMetaClassE +__ZN11IOServicePM10superClassE +__ZTV11IOServicePM +__ZN12IOUserServer10gMetaClassE +__ZN12IOUserServer10superClassE +__ZTV12IOUserServer +__ZN12PMHaltWorker10gMetaClassE +__ZN12PMHaltWorker10superClassE +__ZTV12PMHaltWorker +__ZN13IOPMWorkQueue10gMetaClassE +__ZN13IOPMWorkQueue10superClassE +__ZTV13IOPMWorkQueue +__ZN13IOUserService10gMetaClassE +__ZN13IOUserService10superClassE +__ZTV13IOUserService +__ZN13PMTraceWorker10gMetaClassE +__ZN13PMTraceWorker10superClassE +__ZTV13PMTraceWorker +__ZN14IOUserIterator10gMetaClassE +__ZN14IOUserIterator10superClassE +__ZTV14IOUserIterator +__ZN15IOPanicPlatform10gMetaClassE +__ZN15IOPanicPlatform10superClassE +__ZTV15IOPanicPlatform +__ZN15IOUserResources10gMetaClassE +__ZN15IOUserResources10superClassE +__ZTV15IOUserResources +__ZN15OSUserMetaClass10gMetaClassE +__ZN15OSUserMetaClass10superClassE +__ZTV15OSUserMetaClass +__ZN15PMSettingHandle10gMetaClassE +__ZN15PMSettingHandle10superClassE +__ZTV15PMSettingHandle +__ZN15PMSettingObject10gMetaClassE +__ZN15PMSettingObject10superClassE +__ZTV15PMSettingObject +__ZN15_IOConfigThread10gMetaClassE +__ZN15_IOConfigThread10superClassE +__ZTV15_IOConfigThread +__ZN16IOKitDiagnostics10gMetaClassE +__ZN16IOKitDiagnostics10superClassE +__ZTV16IOKitDiagnostics +__ZN16IOPMRequestQueue10gMetaClassE +__ZN16IOPMRequestQueue10superClassE +__ZTV16IOPMRequestQueue +__ZN16IOUserUserClient10gMetaClassE +__ZN16IOUserUserClient10superClassE +__ZTV16IOUserUserClient +__ZN19IOPMCompletionQueue10gMetaClassE +__ZN19IOPMCompletionQueue10superClassE +__ZTV19IOPMCompletionQueue +__ZN19IOPMPowerStateQueue10gMetaClassE +__ZN19IOPMPowerStateQueue10superClassE +__ZTV19IOPMPowerStateQueue +__ZN19IOPolledFilePollers10gMetaClassE +__ZN19IOPolledFilePollers10superClassE +__ZTV19IOPolledFilePollers +__ZN19PMAssertionsTracker10gMetaClassE +__ZN19PMAssertionsTracker10superClassE +__ZTV19PMAssertionsTracker +__ZN22IOKitDiagnosticsClient10gMetaClassE +__ZN22IOKitDiagnosticsClient10superClassE +__ZTV22IOKitDiagnosticsClient +__ZN22IOPlatformExpertDevice10gMetaClassE +__ZN22IOPlatformExpertDevice10superClassE +__ZTV22IOPlatformExpertDevice +__ZN22_IOServiceNullNotifier10gMetaClassE +__ZN22_IOServiceNullNotifier10superClassE +__ZTV22_IOServiceNullNotifier +__ZN24IOPerfControlWorkContext10gMetaClassE +__ZN24IOPerfControlWorkContext10superClassE +__ZTV24IOPerfControlWorkContext +__ZN24IOUserServerCheckInToken10gMetaClassE +__ZN24IOUserServerCheckInToken10superClassE +__ZTV24IOUserServerCheckInToken +__ZN25IOServiceUserNotification10gMetaClassE +__ZN25IOServiceUserNotification10superClassE +__ZTV25IOServiceUserNotification +__ZN27IOPMServiceInterestNotifier10gMetaClassE +__ZN27IOPMServiceInterestNotifier10superClassE +__ZTV27IOPMServiceInterestNotifier +__ZN27PassthruInterruptController10gMetaClassE +__ZN27PassthruInterruptController10superClassE +__ZTV27PassthruInterruptController +__ZN32IOServiceMessageUserNotification10gMetaClassE +__ZN32IOServiceMessageUserNotification10superClassE +__ZTV32IOServiceMessageUserNotification +__ZN35IOServiceNotificationDispatchSource10gMetaClassE +__ZN35IOServiceNotificationDispatchSource10superClassE +__ZTV35IOServiceNotificationDispatchSource +__ZN38OSAction_IOUserClient_KernelCompletion10gMetaClassE +__ZN38OSAction_IOUserClient_KernelCompletion10superClassE +__ZTV38OSAction_IOUserClient_KernelCompletion _PE_i_can_has_kernel_configuration _add_fsevent _need_fsevent @@ -65,8 +169,12 @@ _convert_task_to_port _cp_is_valid_class _cp_key_store_action _cp_os_version +_cpu_event_register_callback +_cpu_event_unregister_callback _cpu_to_processor _cpx_alloc +_cpx_alloc_ctx +_cpx_free_ctx _cpx_can_copy _cpx_copy _cpx_flush @@ -145,6 +253,7 @@ _ctl_id_by_name _ctl_name_by_id _escape_str _fd_rdwr +_fp_get_pipe_id _get_aiotask _get_system_inshutdown _gpu_accumulate_time @@ -242,11 +351,146 @@ _kern_allocation_name_allocate _kern_allocation_name_release _thread_set_allocation_name _kern_asl_msg +_kern_buflet_get_data_address +_kern_buflet_get_data_offset +_kern_buflet_get_data_length +_kern_buflet_get_data_limit +_kern_buflet_get_object_address +_kern_buflet_get_object_limit +_kern_buflet_get_object_segment +_kern_buflet_set_data_address +_kern_buflet_set_data_offset +_kern_buflet_set_data_length +_kern_buflet_set_data_limit +_kern_channel_advance_slot +_kern_channel_available_slot_count +_kern_channel_get_context +_kern_channel_get_next_slot +_kern_channel_increment_ring_stats +_kern_channel_increment_ring_net_stats +_kern_channel_notify +_kern_channel_reclaim +_kern_channel_ring_get_container +_kern_channel_ring_get_context +_kern_channel_slot_get_context +_kern_channel_slot_attach_packet +_kern_channel_slot_detach_packet +_kern_channel_slot_get_packet +_kern_channel_tx_refill +_kern_channel_get_service_class +_kern_nexus_attr_create +_kern_nexus_attr_clone +_kern_nexus_attr_destroy +_kern_nexus_attr_set +_kern_nexus_attr_get +_kern_nexus_controller_create +_kern_nexus_controller_destroy +_kern_nexus_controller_alloc_provider_instance +_kern_nexus_controller_alloc_net_provider_instance +_kern_nexus_controller_bind_provider_instance +_kern_nexus_controller_deregister_provider +_kern_nexus_controller_free_provider_instance +_kern_nexus_controller_read_provider_attr +_kern_nexus_controller_register_provider +_kern_nexus_controller_unbind_provider_instance +_kern_nexus_deregister_domain_provider +_kern_nexus_get_default_domain_provider +_kern_nexus_get_context +_kern_nexus_get_pbufpool +_kern_nexus_register_domain_provider +_kern_copy_and_inet_checksum +_kern_inet_checksum +_kern_packet_clear_flow_uuid +_kern_packet_clone +_kern_packet_clone_nosleep +_kern_packet_get_euuid +_kern_packet_finalize +_kern_packet_get_buflet_count +_kern_packet_get_data_length +_kern_packet_get_expire_time +_kern_packet_get_flow_uuid +_kern_packet_get_group_start +_kern_packet_get_group_end +_kern_packet_get_headroom +_kern_packet_get_inet_checksum +_kern_packet_get_packetid +_kern_packet_get_link_broadcast +_kern_packet_get_link_ethfcs +_kern_packet_get_link_header_length +_kern_packet_get_link_header_offset +_kern_packet_get_link_multicast +_kern_packet_get_network_header_offset +_kern_packet_get_next_buflet +_kern_packet_get_object_index +_kern_packet_get_policy_id +_kern_packet_get_service_class +_kern_packet_get_service_class_index +_kern_packet_is_high_priority +_kern_packet_get_timestamp +_kern_packet_get_token +_kern_packet_get_traffic_class +_kern_packet_get_transport_header_offset +_kern_packet_get_transport_new_flow +_kern_packet_get_transport_retransmit +_kern_packet_get_transport_last_packet +_kern_packet_get_transport_traffic_background +_kern_packet_get_transport_traffic_realtime +_kern_packet_get_vlan_id +_kern_packet_get_vlan_priority +_kern_packet_get_vlan_tag +_kern_packet_set_expire_time +_kern_packet_set_flow_uuid +_kern_packet_set_group_start +_kern_packet_set_group_end +_kern_packet_set_headroom +_kern_packet_set_inet_checksum +_kern_packet_set_link_broadcast +_kern_packet_set_link_header_length +_kern_packet_set_link_header_offset +_kern_packet_set_link_multicast +_kern_packet_set_link_ethfcs +_kern_packet_set_network_header_offset +_kern_packet_set_policy_id +_kern_packet_set_service_class +_kern_packet_set_timestamp +_kern_packet_set_token +_kern_packet_set_traffic_class +_kern_packet_set_transport_header_offset +_kern_packet_set_vlan_tag +_kern_packet_get_timestamp_requested +_kern_packet_get_tx_completion_status +_kern_packet_set_tx_completion_status +_kern_packet_tx_completion +_kern_packet_add_buflet +_kern_packet_append +_kern_packet_get_next +_kern_packet_set_chain_counts +_kern_packet_get_chain_counts +_kern_pbufpool_alloc +_kern_pbufpool_alloc_batch +_kern_pbufpool_alloc_batch_callback +_kern_pbufpool_alloc_nosleep +_kern_pbufpool_alloc_batch_nosleep +_kern_pbufpool_alloc_batch_nosleep_callback +_kern_pbufpool_create +_kern_pbufpool_destroy +_kern_pbufpool_free +_kern_pbufpool_free_batch +_kern_pbufpool_free_chain +_kern_pbufpool_get_context +_kern_pbufpool_get_memory_info +_kern_pbufpool_alloc_buffer +_kern_pbufpool_alloc_buffer_nosleep +_kern_pbufpool_free_buffer +_kern_pbufpool_alloc_buflet +_kern_pbufpool_alloc_buflet_nosleep +_kern_segment_get_index _kern_coredump_log _kern_register_coredump_helper _kern_config_is_development _kern_stack_snapshot_with_reason _kernel_debug_string +_kext_receipt _kmem_alloc_kobject:_kmem_alloc_kobject_external _kmem_alloc_pageable:_kmem_alloc_pageable_external _kx_qsort @@ -264,6 +508,7 @@ _m_prepend_2 _m_pullup _m_split _m_trailingspace:_mbuf_trailingspace +_mach_msg_filter_register_callback _mach_vm_allocate:_mach_vm_allocate_external _mach_vm_behavior_set _mach_vm_deallocate @@ -318,7 +563,6 @@ _mnl_msg_to_node _mnl_msg_from_node _mnl_set_link_state _mnl_terminate -_mountroot_post_hook _net_add_domain:_net_add_domain_old _net_add_proto:_net_add_proto_old _net_del_domain:_net_del_domain_old @@ -345,26 +589,47 @@ _current_persona_get _persona_put _pffinddomain:_pffinddomain_old _pffindproto:_pffindproto_old +_pmap_claim_reserved_ppl_page +_pmap_free_reserved_ppl_page +_pmap_in_ppl +_pmap_is_trust_cache_loaded +_pmap_load_legacy_trust_cache +_pmap_load_image4_trust_cache +_pmap_lockdown_image4_slab +_pmap_lookup_in_static_trust_cache +_pmap_lookup_in_loaded_trust_caches _port_name_to_task _port_name_to_thread _post_sys_powersource _proc_csflags +_proc_fdlist +_proc_get_filter_message_flag _proc_get_syscall_filter_mask_size _proc_getexecutableoffset _proc_getexecutablevnode +_proc_sdk _proc_selfexecutableargs _proc_issetugid +_proc_parent +_proc_parent_audit_token _proc_pidbackgrounded _proc_pidversion +_proc_platform _proc_set_responsible_pid +_proc_set_filter_message_flag +_proc_set_syscall_filter_callbacks +_proc_set_syscall_filter_index _proc_set_syscall_filter_mask _proc_selfcsflags +_proc_starttime _proc_task _proc_uniqueid _proc_puniqueid _proc_gettty _proc_gettty_dev _proc_exitstatus +_proc_is_translated +_proc_isinitproc _priv_check_cred _pru_abort_notsupp _pru_accept_notsupp @@ -472,7 +737,12 @@ _ttymalloc _ttymodem _ttyselect _ttysleep -_unmountroot_pre_hook +_utf8_normalizeOptCaseFoldAndHash +_utf8_normalizeOptCaseFoldAndCompare +_utf8_normalizeOptCaseFold +_utf8_normalizeOptCaseFoldToUTF8 +_utf8_normalizeOptCaseFoldAndMatchSubstring +_utf8_normalizeOptCaseFoldGetUVersion _unputc _unregister_decmpfs_decompressor _untimeout @@ -480,13 +750,16 @@ _utun_ctl_disable_crypto_dtls _utun_ctl_register_dtls _utun_pkt_dtls_input _vfs_context_bind +_vfs_context_can_resolve_triggers _vfs_context_get_special_port _vfs_context_set_special_port _vfs_context_is_dataless_manipulator _vfs_devvp _vfs_getattr _vfs_getbyid +_vfs_is_basesystem _vfs_mntlabel +_vfs_mount_id _vfs_nativexattrs _vfs_set_root_unmounted_cleanly _vfs_setcompoundopen @@ -536,6 +809,7 @@ _write_random # HFS/APFS Kext Requirements _IOBSDMountChange _OSKextUnloadKextWithLoadTag +_bsd_boot_to_recovery _bdwrite_internal _buf_markstatic _count_lock_queue @@ -580,6 +854,9 @@ _vfs_context_issuser _vfs_context_kernel _vfs_ctx_skipatime _vfs_extendedsecurity +_vfs_setmntsystem +_vfs_setmntsystemdata +_vfs_setmntswap _vfs_update_vfsstat _vn_pathconf _vnode_cleardirty @@ -622,15 +899,19 @@ _vfs_context_iskernel _mach_to_bsd_errno _vnode_rele_ext _proc_is_forcing_hfs_case_sensitivity +_proc_ignores_content_protection _is_package_name _sysctl__hw_features_children +_task_update_physical_writes _task_update_logical_writes _zalloc _zalloc_noblock +_zalloc_flags _zdestroy _zfree _zinit -_zone_change +_zone_create +_zone_require _fs_buffer_cache_gc_register _fs_buffer_cache_gc_unregister _cp_key_store_action_for_volume @@ -649,6 +930,31 @@ __Block_object_assign __Block_object_dispose __Block_signature __Block_tryRetain -__Block_use_RR2 __Block_use_stret _IOPMRootDomainGetWillShutdown +_kern_os_zfree +_vfs_clearnoswap +_vfs_setnoswap +_vnop_verify_desc +_mac_file_getxattr +_mac_file_setxattr +_mac_file_removexattr +_mac_label_get +_mac_label_set +_mac_iokit_check_hid_control +_mac_mount_check_snapshot_mount +_mac_vnode_check_trigger_resolve +_sbuf_cat +_sbuf_data +_sbuf_delete +_sbuf_finish +_sbuf_len +_sbuf_new +_sbuf_overflowed +_sbuf_putc +_sbuf_printf +_sbuf_vprintf +_sysctl__security_mac_children +_mac_vnode_label_allocate +_mac_vnode_label_get +_mac_vnode_label_set diff --git a/config/Private.x86_64.exports b/config/Private.x86_64.exports index a24003941..7983266f3 100644 --- a/config/Private.x86_64.exports +++ b/config/Private.x86_64.exports @@ -1,9 +1,16 @@ _IOGetBootKeyStoreData _IOGetAPFSKeyStoreData _IOSetAPFSKeyStoreData +_IOGetARVRootHashData +_IOSetARVRootHashData +_IOGetARVManifestData +_IOSetARVManifestData __Z33IOSKCopyKextIdentifierWithAddressm +__ZN14IOPMrootDomain17requestUserActiveEP9IOServicePKc +__ZN14IOPMrootDomain20claimSystemBootEventEP9IOServicejPKcP8OSObject __ZN14IOPMrootDomain20claimSystemWakeEventEP9IOServicejPKcP8OSObject __ZN14IOPMrootDomain20restartWithStackshotEv +__ZN14IOPMrootDomain24claimSystemShutdownEventEP9IOServicejPKcP8OSObject __ZN22IOInterruptEventSource7warmCPUEy _acpi_install_wake_handler _acpi_sleep_kernel @@ -17,11 +24,6 @@ _cpuid_leaf7_features _cpuid_info _csr_check _csr_get_active_config -_hv_ast_pending -_hv_ept_pmap_create -_hv_get* -_hv_release* -_hv_set* _lapic_end_of_interrupt _lapic_get_cmci_vector _lapic_unmask_perfcnt_interrupt @@ -33,6 +35,7 @@ _ml_port_io_write _ml_port_io_write16 _ml_port_io_write32 _ml_port_io_write8 +_mountroot_post_hook _mp_broadcast _mp_cpus_call _mp_cpus_call1 @@ -43,6 +46,7 @@ _semaphore_timedwait _smp_initialized _kext_get_vm_map _pal_machine_sleep +_unmountroot_pre_hook _vfs_addtrigger _vfs_resolver_auxiliary _vfs_resolver_result @@ -50,6 +54,12 @@ _vfs_resolver_sequence _vfs_resolver_status _vfs_settriggercallback _vnode_trigger_update +_xcpm_mbox_lock +_xcpm_mbox_unlock +_xcpm_bios_mbox_cmd_read +_xcpm_bios_mbox_cmd_unsafe_read +_xcpm_bios_mbox_cmd_write +_xcpm_is_hwp_enabled _xts_decrypt _xts_done _xts_encrypt @@ -97,3 +107,14 @@ _csproc_forced_lv #exports for vmware/, virtualbox, ... _mach_vm_map _mach_vm_remap + +#exports for AppleHV +_hv_ast_pending +_hv_disable +_hv_ept_pmap_create +_hv_get* +_hv_release* +_hv_set* +_hv_trace* +_thread_set_no_smt +_vm_map_disable_hole_optimization diff --git a/config/Unsupported.arm.exports b/config/Unsupported.arm.exports index ee0e8db30..f4c3b1189 100644 --- a/config/Unsupported.arm.exports +++ b/config/Unsupported.arm.exports @@ -19,7 +19,9 @@ _vm_map_copyout _ml_get_cpu_count _ml_get_boot_cpu_number _ml_get_cpu_number +_ml_get_cluster_number _ml_get_max_cpu_number +_ml_get_topology_info _ml_dbgwrap_halt_cpu_with_state _vm_map:_vm_map_external __ZN5IORTC15getUTCTimeOfDayEPjS0_ diff --git a/config/Unsupported.arm64.MacOSX.exports b/config/Unsupported.arm64.MacOSX.exports new file mode 100644 index 000000000..dbe250852 --- /dev/null +++ b/config/Unsupported.arm64.MacOSX.exports @@ -0,0 +1,36 @@ +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer0Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer1Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer2Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer3Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice0Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice1Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice2Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice3Ev +__ZN5IORTC15_RESERVEDIORTC0Ev +__ZN5IORTC15_RESERVEDIORTC1Ev +__ZN5IORTC15_RESERVEDIORTC2Ev +__ZN5IORTC15_RESERVEDIORTC3Ev +__ZN5IORTC15_RESERVEDIORTC4Ev +__ZN5IORTC15_RESERVEDIORTC5Ev +__ZN5IORTC15_RESERVEDIORTC6Ev +__ZN5IORTC15_RESERVEDIORTC7Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert0Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert1Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert2Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert3Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert4Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert5Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert6Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert7Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert8Ev +__ZN16IOPlatformExpert26_RESERVEDIOPlatformExpert9Ev +__ZN16IOPlatformExpert27_RESERVEDIOPlatformExpert10Ev +__ZN16IOPlatformExpert27_RESERVEDIOPlatformExpert11Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert0Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert1Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert2Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert3Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert4Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert5Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert6Ev +__ZN18IODTPlatformExpert28_RESERVEDIODTPlatformExpert7Ev diff --git a/config/Unsupported.arm64.exports b/config/Unsupported.arm64.exports index 3687a2147..41aff1b5f 100644 --- a/config/Unsupported.arm64.exports +++ b/config/Unsupported.arm64.exports @@ -28,15 +28,6 @@ _kpc_get_period _kpc_set_period _kpc_get_actionid _kpc_set_actionid -_ml_cpu_signal -_ml_cpu_signal_deferred -_ml_cpu_signal_retract -_ml_get_cpu_count -_ml_get_boot_cpu_number -_ml_get_cpu_number -_ml_get_max_cpu_number -_ml_lockdown_handler_register -_ml_dbgwrap_halt_cpu_with_state _vm_map:_vm_map_external __ZN5IORTC15getUTCTimeOfDayEPmPj __ZN5IORTC15setUTCTimeOfDayEmj diff --git a/config/Unsupported.exports b/config/Unsupported.exports index 07f1387d6..d83a6923b 100644 --- a/config/Unsupported.exports +++ b/config/Unsupported.exports @@ -1,4 +1,3 @@ -_PE_i_can_has_debugger _Debugger _KUNCExecute _KUNCGetNotificationID @@ -6,6 +5,9 @@ _KUNCUserNotificationDisplayAlert _KUNCUserNotificationDisplayFromBundle _KUNCUserNotificationDisplayNotice _NDR_record +_OSSpinLockTry +_OSSpinLockUnlock +_PE_i_can_has_debugger _PE_kputc __ZN11IOMemoryMap9wireRangeEjyy __ZN15IOWatchDogTimer10gMetaClassE @@ -18,15 +20,35 @@ __ZN15IOWatchDogTimer9MetaClassC2Ev __ZN15IOWatchDogTimer9metaClassE __ZN15IOWatchDogTimerC2EPK11OSMetaClass __ZN15IOWatchDogTimerD2Ev +__ZN15IOWatchDogTimerdlEPvm +__ZN15IOWatchDogTimernwEm __ZN16IOPlatformDevice10gMetaClassE +__ZN16IOPlatformDevice10superClassE __ZN16IOPlatformDevice13matchLocationEP9IOService __ZN16IOPlatformDevice9metaClassE __ZN16IOPlatformDeviceC2EPK11OSMetaClass __ZN16IOPlatformDeviceD2Ev +__ZN16IOPlatformDevicedlEPvm +__ZN16IOPlatformDevicenwEm __ZN18IODTPlatformExpert9metaClassE +__ZN5IORTC10gMetaClassE +__ZN5IORTC10superClassE +__ZN5IORTC23getMonotonicClockOffsetEPx +__ZN5IORTC23setMonotonicClockOffsetEx +__ZN5IORTC29getMonotonicClockAndTimestampEPyS0_ +__ZN5IORTC9MetaClassC1Ev +__ZN5IORTC9MetaClassC2Ev +__ZN5IORTC9metaClassE +__ZN5IORTCC2EPK11OSMetaClass +__ZN5IORTCD0Ev +__ZN5IORTCD1Ev +__ZN5IORTCD2Ev +__ZN5IORTCdlEPvm +__ZN5IORTCnwEm __ZN9IODTNVRAM10gMetaClassE +__ZN9IODTNVRAM10superClassE __ZN9IODTNVRAM10safeToSyncEv -__ZN9IODTNVRAM15initOFVariablesEv +__ZN9IODTNVRAM13initVariablesEv __ZN9IODTNVRAM15syncOFVariablesEv __ZN9IODTNVRAM16escapeDataToDataEP6OSData __ZN9IODTNVRAM16updateOWBootArgsEPK8OSSymbolP8OSObject @@ -40,14 +62,22 @@ __ZN9IODTNVRAM26calculatePartitionChecksumEPh __ZN9IODTNVRAM9metaClassE __ZN9IODTNVRAMC2EPK11OSMetaClass __ZN9IODTNVRAMD2Ev +__ZN9IODTNVRAMdlEPvm +__ZN9IODTNVRAMnwEm __ZNK15IOWatchDogTimer12getMetaClassEv __ZNK15IOWatchDogTimer9MetaClass5allocEv +__ZNK5IORTC12getMetaClassEv +__ZNK5IORTC9MetaClass5allocEv __ZNK9IODTNVRAM17getOFVariablePermEPK8OSSymbol +__ZNK9IODTNVRAM17getOFVariablePermEPKc __ZNK9IODTNVRAM17getOFVariableTypeEPK8OSSymbol +__ZNK9IODTNVRAM17getOFVariableTypeEPKc __ZTV15IOWatchDogTimer __ZTV16IOPlatformDevice +__ZTV5IORTC __ZTV9IODTNVRAM __ZTVN15IOWatchDogTimer9MetaClassE +__ZTVN5IORTC9MetaClassE __doprnt __doprnt_log _aes_decrypt_cbc @@ -76,15 +106,15 @@ _get_bsdtask_info _get_task_map _get_task_pmap _getsectdatafromheader -_host_get_special_port _host_get_exception_ports +_host_get_special_port _host_priv_self _hz -_ipc_kernel_map _iflt_attach_internal _ifnet_allocate_internal -_ifnet_set_fastlane_capable _ifnet_get_fastlane_capable +_ifnet_set_fastlane_capable +_ipc_kernel_map _ipf_addv4_internal _ipf_addv6_internal _kalloc:_kalloc_external @@ -107,18 +137,18 @@ _lck_rw_done _ldisc_deregister _ldisc_register _log -_mach_gss_lookup _mach_gss_accept_sec_context _mach_gss_accept_sec_context_v2 _mach_gss_hold_cred _mach_gss_init_sec_context _mach_gss_init_sec_context_v2 +_mach_gss_lookup _mach_gss_unhold_cred _mach_make_memory_entry_64 _mach_memory_entry_page_op _mach_memory_entry_range_op -_mach_msg_rpc_from_kernel_proper _mach_msg_destroy_from_kernel_proper +_mach_msg_rpc_from_kernel_proper _mach_vm_region _max_mem _mem_size @@ -144,7 +174,6 @@ _ml_phys_write_word_64 _ml_probe_read _ml_processor_register _ml_thread_policy -_mountroot_post_hook _msleep1 _ovbcopy _pmap_find_phys @@ -154,9 +183,8 @@ _processor_exit _processor_info _processor_start _putc -_rc4_crypt -_rc4_init _securelevel +_set_vm_privilege _sflt_register_internal _sha1_hardware_hook _sleep @@ -179,7 +207,6 @@ _vm_deallocate _vm_map_deallocate _vm_map_unwire _vm_map_wire:_vm_map_wire_external -_set_vm_privilege _vm_protect _vm_region _vm_region_object_create @@ -189,19 +216,3 @@ _vnop_kqfilt_add_desc _vnop_kqfilt_remove_desc _vnop_makenamedstream_desc _vnop_removenamedstream_desc -__ZN5IORTC10gMetaClassE -__ZN5IORTC10superClassE -__ZN5IORTC23getMonotonicClockOffsetEPx -__ZN5IORTC23setMonotonicClockOffsetEx -__ZN5IORTC29getMonotonicClockAndTimestampEPyS0_ -__ZN5IORTC9MetaClassC1Ev -__ZN5IORTC9MetaClassC2Ev -__ZN5IORTC9metaClassE -__ZN5IORTCC2EPK11OSMetaClass -__ZN5IORTCD0Ev -__ZN5IORTCD1Ev -__ZN5IORTCD2Ev -__ZNK5IORTC12getMetaClassEv -__ZNK5IORTC9MetaClass5allocEv -__ZTV5IORTC -__ZTVN5IORTC9MetaClassE diff --git a/config/Unsupported.x86_64.MacOSX.exports b/config/Unsupported.x86_64.MacOSX.exports new file mode 100644 index 000000000..927f16da9 --- /dev/null +++ b/config/Unsupported.x86_64.MacOSX.exports @@ -0,0 +1,16 @@ +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer0Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer1Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer2Ev +__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer3Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice0Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice1Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice2Ev +__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice3Ev +__ZN5IORTC15_RESERVEDIORTC0Ev +__ZN5IORTC15_RESERVEDIORTC1Ev +__ZN5IORTC15_RESERVEDIORTC2Ev +__ZN5IORTC15_RESERVEDIORTC3Ev +__ZN5IORTC15_RESERVEDIORTC4Ev +__ZN5IORTC15_RESERVEDIORTC5Ev +__ZN5IORTC15_RESERVEDIORTC6Ev +__ZN5IORTC15_RESERVEDIORTC7Ev diff --git a/config/Unsupported.x86_64.exports b/config/Unsupported.x86_64.exports index c2af9c454..fd167dd60 100644 --- a/config/Unsupported.x86_64.exports +++ b/config/Unsupported.x86_64.exports @@ -1,13 +1,3 @@ -_OSSpinLockTry -_OSSpinLockUnlock -__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer0Ev -__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer1Ev -__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer2Ev -__ZN15IOWatchDogTimer25_RESERVEDIOWatchDogTimer3Ev -__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice0Ev -__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice1Ev -__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice2Ev -__ZN16IOPlatformDevice26_RESERVEDIOPlatformDevice3Ev __ZN9IODTNVRAM17getOWVariableInfoEjPPK8OSSymbolPjS4_ __ZN9IODTNVRAM19convertObjectToPropEPhPjPK8OSSymbolP8OSObject __ZN9IODTNVRAM19convertPropToObjectEPhjS0_jPPK8OSSymbolPP8OSObject @@ -28,6 +18,7 @@ _ml_cpu_int_event_time _ml_get_apicid _ml_get_maxbusdelay _ml_get_maxsnoop +_mountroot_post_hook _mp_rendezvous _mp_rendezvous_no_intrs _pmCPUControl @@ -44,13 +35,5 @@ _tmrCvt _tsc_get_info _PE_state _vm_map -__ZN5IORTC15_RESERVEDIORTC0Ev -__ZN5IORTC15_RESERVEDIORTC1Ev -__ZN5IORTC15_RESERVEDIORTC2Ev -__ZN5IORTC15_RESERVEDIORTC3Ev -__ZN5IORTC15_RESERVEDIORTC4Ev -__ZN5IORTC15_RESERVEDIORTC5Ev -__ZN5IORTC15_RESERVEDIORTC6Ev -__ZN5IORTC15_RESERVEDIORTC7Ev __ZN5IORTC15getUTCTimeOfDayEPmPj __ZN5IORTC15setUTCTimeOfDayEmj diff --git a/config/generate_combined_symbolsets_plist.sh b/config/generate_combined_symbolsets_plist.sh new file mode 100755 index 000000000..4c84b3d38 --- /dev/null +++ b/config/generate_combined_symbolsets_plist.sh @@ -0,0 +1,48 @@ +#!/bin/sh + +set -e + +if [ $# -lt 2 ]; then + echo "Usage: $0 output.plist input1.plist [input2.plist ... ]" 1>&2 + exit 1 +fi + +OUTPUT="$1" +shift + +printf \ +' + + + + SymbolsSets + +' > "$OUTPUT" + +for f in "$@"; do +awk ' + BEGIN { + print " " + } + /^\t/ { + print " " $0 + next + } + END { + print " " + } +' "$f" >> "$OUTPUT" +done + +printf \ +' + WeakRefFallbackSymbol + + SymbolName + _gOSKextUnresolved + + + +' >> "$OUTPUT" + +exit 0 diff --git a/config/generate_symbolset_plist.sh b/config/generate_symbolset_plist.sh new file mode 100755 index 000000000..a56a15f2f --- /dev/null +++ b/config/generate_symbolset_plist.sh @@ -0,0 +1,69 @@ +#!/bin/sh + +set -e + +if [ $# -lt 3 ]; then + echo "Usage: $0 output.plist Info.plist input1.exports [input2.exports ... ]" 1>&2 + exit 1 +fi + +OUTPUT="$1" +PLIST="$2" +if [ "${OUTPUT##*.}" != "plist" -o "${PLIST##*.}" != "plist" ]; then + echo "Usage: $0 output.plist Info.plist input1.exports [input2.exports ... ]" 1>&2 + exit 1 +fi +shift 2 + +printf \ +' + + + +' > "$OUTPUT" + +awk ' + /CFBundleIdentifier|OSBundleCompatibleVersion|CFBundleVersion/ { + print; getline; print + } +' $PLIST >> "$OUTPUT" + +sort -u "$@" | awk -F: ' + BEGIN { + print " Symbols" + print " " + } + $2 ~ /^_/ { + print " " + print " SymbolName" + print " "$1"" + print " AliasTarget" + print " "$2"" + print " " + next + } + $1 ~ /^_.*\*$/ { + print " " + print " SymbolPrefix" + print " "substr($1, 1, length($1) - 1)"" + print " " + next + } + $1 ~ /^_/ { + print " " + print " SymbolName" + print " "$1"" + print " " + next + } + END { + print " " + } +' >> "$OUTPUT" + +printf \ +' + +' >> "$OUTPUT" + +exit 0 diff --git a/doc/allocators.md b/doc/allocators.md new file mode 100644 index 000000000..55a844ea8 --- /dev/null +++ b/doc/allocators.md @@ -0,0 +1,466 @@ +# XNU General purpose allocators + +## Introduction + +XNU proposes two ways to allocate memory: +- the VM subsystem that provides allocations at the granularity of pages (with + `kernel_memory_allocate` and similar interfaces); +- the zone allocator subsystem (``) which is a slab-allocator of + objects of fixed size. + +This document describes all the allocator variants around the zone allocator, +how to use them and what their security model is. + +In addition to that, `` provides a variable-size general purpose +allocator implemented as a collection of zones of fixed size, and overflowing to +`kernel_memory_allocate` for allocations larger than a few pages (32KB when this +document was being written but this is subject to change/tuning in the future). + + +The Core Kernel allocators rely on the following headers: +- `` and `` for its API surface, which most + clients should find sufficient, +- `` and `` for interfaces that + need to be exported for introspection and implementation purposes, and is not + meant for general consumption. + +## TL;DR + +This section will give a rapid decision tree of which allocation method to use, +and general best practices. The rest of the document goes into more details and +offers more information that can explain the rationale behind these +recommendations. + +### Which allocator to use, and other advices + +1. If you are allocating memory that is never freed, use `zalloc_permanent*`. If + the allocation is larger than a page, then it will use + `kernel_memory_allocate` with the `KMA_PERMANENT` flag on your behalf. + The allocation is assumed to always succeed (this is mostly reserved for early + allocations that need to scale with the configuration of the machine and + cannot be decided at compile time), and will be zeroed. + +2. If the memory you are allocating is temporary and will not escape the scope + of the syscall it's used for, use `kheap_alloc` and `kheap_free` with the + `KHEAP_TEMP` heap. Note that temporary paths should use `zalloc(ZV_NAMEI)`. + +3. If the memory you are allocating will not hold pointers, and even more so + when the content of that piece of memory can be directly influenced by + user-space, then use `kheap_alloc` and `kheap_free` with the + `KHEAP_DATA_BUFFERS` heap. + +4. In general we prefer zalloc or kalloc interfaces, and would like to abandon + any legacy MALLOC/FREE interfaces over time. + +For all `kalloc` or `kheap_alloc` variants, these advices apply: + +- If your allocation size is of fixed size, of a sub-page size, and done with + the `Z_WAITOK` semantics (allocation can block), consider adding `Z_NOFAIL`, +- If you `bzero` the memory on allocation, prefer passing `Z_ZERO` which can be + optimized away more often than not. + +### Considerations for zones + +Performance wise, it is problematic to make a zone when the kernel tends to have +less than several pages worth of elements allocated at all times (think commonly +200k+ objects). When a zone is underutilized, then fragmentation becomes a +problem. + +Zones with a really high traffic of allocation and frees should consider using +zone caching, but this comes at a memory usage cost and needs to be evaluated. + +Security wise, the following questions need answering: +- Is this type "interesting" to confuse with another, if yes, having a separate + zone allows for usage of `zone_require()` and will by default sequester the + virtual address space; +- Is this type holding user "bytes", if yes, then it might be interesting to use + a zone view (like the `ZV_NAMEI` one for paths) instead; +- Is the type zeroed on allocation all the time? if yes, enabling + `ZC_ZFREE_CLEARMEM` will likely be a really marginal incremental cost that can + discover write-after-free bugs. + +## Variants + +There are several allocation wrappers in XNU, present for various reasons +ranging from additional accounting features (IOKit's `IONew`), conformance to +langauge requirements (C++ various `new` operators) or organical historical +reasons. + +`zalloc` and `kalloc` are considered the primitive allocation interfaces which +are used to implement all the other ones. The following table documents all +interfaces and their various properties. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
InterfaceCore XNUPrivate ExportPublic ExportComments
Core primitives
zallocYesYesNo + The number of zones due to their implementation is limited. + + Until this limitation is lifted, general exposition to arbitrary + kernel extensions is problematic. +
kheap_allocYesNoNo + This is the true core implementation of `kalloc`, see documentation about + kalloc heaps. +
kallocYesYes, RedirectedNo + In XNU, `kalloc` is equivalent to `kheap_alloc(KHEAP_DEFAULT)`. +
+ In kernel extensions, `kalloc` is equivalent to `kheap_alloc(KHEAP_KEXT)`. +
+ Due to legacy contracts where allocation and deallocation happen on + different sides of the XNU/Kext boundary, `kfree` will allow to free to + either heaps. New code should consider using the proper `kheap_*` variant + instead. +
Popular wrappers
IOMallocYesYes, RedirectedYes, Redirected + `IOMalloc` is a straight wrapper around `kalloc` and behaves like + `kalloc`. It does provide some debugging features integrated with `IOKit` + and is the allocator that Drivers should use. +
+ Only kernel extensions that are providing core infrastructure + (filesystems, sandbox, ...) and are out-of-tree core kernel components + should use the primitive `zalloc` or `kalloc` directly. +
C++ newYesYes, RedirectedYes, Redirected + C++'s various operators around `new` and `delete` are implemented by XNU. + It redirects to the `KHEAP_KEXT` kalloc heap as there is no use of C++ + default operator new in Core Kernel. +
+ When creating a subclass of `OSObject` with the IOKit macros to do so, an + `operator new` and `operator delete` is provided for this object that will + anchor this type to the `KHEAP_DEFAULT` heap when the class is defined in + Core XNU, or to the `KHEAP_KEXT` heap when the class is defined in a + kernel extension. +
MALLOCYesObsolete, RedirectedNo + This is a legacy BSD interface that functions mostly like `kalloc`. + For kexts, `FREE()` will allow to free either to `KHEAP_DEFAULT` or + `KHEAP_KEXT` due to legacy interfaces that allocate on one side of the + kext/core kernel boundary and free on the other. +
Obsolete wrappers
mcacheYesKindaKinda + The mcache/mbuf subsystem is mostly used by the BSD networking subsystem. + Code that is not interacting with these interfaces should not adopt + mcaches. +
OSMallocNoObsolete, RedirectedObsolete, Redirected + `` is a legacy subsystem that is no longer + recommended. It provides extremely slow and non scalable accounting + and no new code should use it. `IOMalloc` should be used instead. +
MALLOC_ZONENoObsolete, RedirectedNo + `MALLOC_ZONE` used to be a weird wrapper around `zalloc` but with poorer + security guarantees. It has been completely removed from XNU and should + not be used. +
+ For backward compatbility reasons, it is still exported, but behaves + exactly like `MALLOC` otherwise. +
kern_os_*NoObsolete, RedirectedObsolete, Redirected + These symbols used to back the implementation of C++ `operator new` and + are only kept for backward compatibility reasons. Those should not be used + by anyone directly. +
+ + +## The Zone allocator: concepts, performance and security + +Zones are created with `zone_create()`, and really meant never to be destroyed. +Destructible zones are here for legacy reasons, and not all features are +available to them. + +Zones allocate their objects from a specific fixed size map called the Zone Map. +This map is subdivided in a few submaps that provide different security +properties: + +- the VA Restricted map: it is used by the VM subsystem only, and allows for + extremely tight packing of pointers used by the VM subsystem. This submap + doesn't use sequestering. +- the general map: it is used by default by zones, and on embedded + defaults to using full VA sequestering (see below). +- the "bag of bytes" map: it is used for zones that provide various buffers + whose content is under the control of user-space. Segregating these + allocations from the other submaps closes attacks using such allocations to + spray kernel objects that live in the general map. + +It is worth noting that use of any allocation function in interrupt context is +never allowed in XNU, as none of our allocators are re-entrant and interrupt +safe. + +### Basic features + +`` defines several flags that can be used to alter the blocking +behavior of `zalloc` and `kalloc`: + +- `Z_NOWAIT` can be used to require a fully non blocking behavior, which can be + used for allocations under spinlock and other preemption disabled contexts; +- `Z_NOPAGEWAIT` allows for the allocator to block (typically on mutexes), + but not to wait for available pages if there are none; +- `Z_WAITOK` means that the zone allocator can wait and block. + +It is worth noting that unless the zone is exhaustible or "special" (which is +mostly the case for VM zones), then `zalloc` will never fail (but might block +for arbitrarily long if the zone map is under a lot of pressure). This is not +true of `kalloc` when the allocation is served by the VM. + +It is worth noting that `Z_ZERO` is provided so that the allocation returned by +the allocator is always zeroed. This should be used instead of manual usage of +`bzero` as the zone allocator is able to optimize it away when certain security +features that already guarantee the zeroing are engaged. + + +### Zone Caching + +Zones that have relatively fast allocation/deallocation patterns can use zone +caching (passing `ZC_CACHING`) to `zone_create()`. This enables per-CPU caches, +which hold onto several allocations per CPU. This should not be done lightly, +especially for zones holding onto large elements. + +### Type confusion (Zone Sequestering and `zone_require()`) + +In order to be slightly more resilient to Use after Free (UaF) bugs, XNU +provides two techniques: + +- using the `ZC_SEQUESTER` flag to `zone_create()`; +- manual use of `zone_require()` or `zone_id_require()`. + +The first form will cause the virtual address ranges that a given zone uses +to never be returned to the system, which essentially pins this address range +for holding allocations of this particular zone forever. When a zone is strongly +typed, it means that only objects of that particular type can ever be located +at this address. + +`zone_require()` is an interface that can be used prior to memory use to assert +that the memory belongs to a given zone. + +Both these techniques can be used to dramatically reduce type confusion bugs. +For example, the task zone uses both sequestering and judicious usage of +`zone_require()` in crucial parts which makes faking a `task_t` and using it +to confuse the kernel extremely difficult. + +When `zone_require()` can be used exhaustively in choking points, then +sequestering is no longer necessary to protect this type. For example, the +`ipc_port_t`, will take the `ip_lock()` or an `ip_reference()` prior to any +interesting use. These primitives have been extended to include a +`zone_id_require()` (the fastest existing form of `zone_require()`) which gives +us an exhaustive protection. As a result, it allows us not to sequester the +ports zone. This is interesting because userspace can cause spikes of +allocations of ports and this protects us from zone map exhaustion or more +generally increase cost to describe the sequestered address space of this zone +due to a high peak usage. + +### Usage of Zones in IOKit + +IOKit is a subsystem that is often used by attackers, and reducing type +confusion attacks against it is desireable. For this purpose, XNU exposes the +ability to create a zone rather than being allocated in a kalloc heap. + +Using the `OSDefineMetaClassAndStructorsWithZone` or any other +`OSDefineMetaClass.*WithZone` interface will cause the object's `operator new` +and `operator delete` to back the storage of these objects with zones. This is +available to first party kexts, and usage should be reserved to types that can +easily be allocated by user-space and in large quantities enough that the +induced fragmentation is acceptable. + +### Auto-zeroing + +A lot of bugs come from partially initialized data, or write-after-free. +To mitigate these issues, zones provide two level of protection: + +- page clearing +- element clear on free (`ZC_ZFREE_CLEARMEM`). + +Page clearing is used when new pages are added to the zone. The original version +of the zone allocator would cram pages into zones without changing their +content. Memory crammed into a zone will be cleared from its content. +This helps mitigate leaking/using uninitialized data. + +Element clear on free is an increased protection that causes `zfree()` to erase +the content of elements when they are returned to the zone. When an element is +allocated from a zone with this property set, then the allocator will check that +the element hasn't been tampered with before it is handed back. This is +particularly interesting when the allocation codepath always clears the returned +element: when using the `Z_ZERO` (resp. `M_ZERO`) with `zalloc` or `kalloc` +(resp. `MALLOC`), then the zone allocator knows not to issue this extraneous +zeroing. + +`ZC_ZFREE_CLEARMEM` at the time this document was written was default for any +zone where elements are smaller than 2 cachelines. This technique is +particularly interesting because things such as locks, refcounts or pointers +valid states can't be all zero. It makes exploitation of a Use-after-free more +difficult when this is engaged. + +### Poisoning + +The zone allocator also does statistical poisoning (see source for details). + +It also always zeroes the first 2 cachelines of any allocation on free, when +`ZC_ZFREE_CLEARMEM` isn't engaged. It sometimes mitigates certain kind of linear +buffer overflows. It also can be leveraged by types that have refcounts or locks +if those are placed "early" in the type definition, as zero is not a valid value +for such concepts. + +### Per-CPU allocations + +The zone allocator provides `ZC_PERCPU` as a way to declare a per-cpu zone. +Allocations from this zone are returning NCPU elements with a known stride. + +It is expected that such allocations are not performed in a rapid pattern, and +zone caching is not available for them. (zone caching actually is implemented +on top of a per-cpu zone). + +Usage of per-cpu zone should be limited to extremely performance sensitive +codepaths or global counters due to the enormous amplification factor on +many-core systems. + +### Permanent allocations + +The kernel sometimes needs to provide persistent allocations that depend on +parameters that aren't compile time constants, but will not vary over time (NCPU +is an obvious example here). + +The zone subsystem provides a `zalloc_permanent*` family of functions that help +allocating memory in such a fashion in a very compact way. + +Unlike the typical zone allocators, this allows for arbitrary sizes, in a +similar fashion to `kalloc`. These functions will never fail (if the allocation +fails, the kernel will panic), and always return zeroed memory. Trying to free +these allocations results in a kernel panic. + + +## kalloc: a heap of zones + +Kalloc is a general malloc-like allocator that is backed by zones when the size +of the allocation is sub-page (actually smaller than 32K at the time this +document was written, but under KASAN or other memory debugging techniques, this +limit for the usable payload might actually be lower). Larger allocations use +`kernel_memory_allocate` (KMA). + +The kernel calls the collection of zones that back kalloc a "kalloc heap", and +provides 3 builtin ones: + +- `KHEAP_DEFAULT`, the "default" heap, is the one that serves `kalloc` in Core + Kernel (XNU proper); +- `KHEAP_KEXT`, the kernel extension heap, is the one that serves `kalloc` in + kernel extensions (see "redirected" symbols in the Variants table above); +- `KHEAP_DATA_BUFFERS` which is a special heap, which allocates out of the "User + Data" submap, and is meant for allocation of payloads that hold no pointer and + tend to be under the control of user space (paths, pipe buffers, OSData + backing stores, ...). + +In addition to that, the kernel provides an extra "magical" kalloc heap: +`KHEAP_TEMP`, it is for all purposes an alias of `KHEAP_DEFAULT` but enforces +extra semantics: allocations and deallocations out of this heap must be +performed "in scope". It is meant for allocations that are made to support a +syscall, and that will be freed before that syscall returns to user-space. + +The usage of `KHEAP_TEMP` will ensure that there is no outstanding allocation at +various points (such as return-to-userspace) and will panic the system if this +property is broken. The `kheap_temp_debug=1` boot-arg can be used on development +kernels to debug such issues when the occur. + +As far as security policies are concerned, the default and kext heap are fully +segregated per size-class. The data buffers heap is isolated in the user data +submaps, and hence can never produce adresses aliasing with any other kind of +allocations in the system. + + +## Accounting (Zone Views and Kalloc Heap Aliases) + +The zone subsystem provides several accounting properties that are reported by +the `zprint(1)` command. Historically, some zones have been introduced to help +with accounting, to the cost of increased fragmentation (the more allocations +are issued from the same zone, the lower the fragmentation). It is now possible +to define zone views and kalloc heap aliases, which are two similar concepts for +zones and kalloc heaps respectively. + +Zone views are declared (in headers) and defined (in modules) with +`ZONE_VIEW_DECLARE` and `ZONE_VIEW_DEFINE`, and can be an alias either for +another regular zone, or a specific zone of a kalloc heap. This is for example +used for the `ZV_NAMEI` zone out of which temporary paths are allocated (this is +an alias to the `KHEAP_DATA_BUFFERS` 1024 bytes zone). Extra accounting is +issued for these views and are also reported by `zprint(1)`. + +In a similar fashion, `KALLOC_HEAP_DECLARE` and `KALLOC_HEAP_DEFINE` can be used +to declare a kalloc heap alias that gets its own accounting. It is particularly +useful to track leaks and various other things. + +The accounting of zone and heap views isn't free (and has a per-CPU cost) and +should be used wisely. However, if the alternative is a fully separated zone, +then the memory cost of the accounting would likely be dwarfed by the +fragmentation cost of the new zone. + +At this time, views can only be made by Core Kernel. + diff --git a/doc/atomics.md b/doc/atomics.md index eda4cc2d3..40397dee2 100644 --- a/doc/atomics.md +++ b/doc/atomics.md @@ -14,7 +14,7 @@ as this document builds on it, and explains the liberties XNU takes with said model. All the interfaces discussed in this document are available through -the `` header. +the `` header. Note: Linux has thorough documentation around memory barriers (Documentation/memory-barriers.txt), some of which is Linux specific, @@ -49,8 +49,7 @@ matching *seq_cst* atomic operations on your behalf. The sequentially consistent world is extremely safe from a lot of compiler and hardware reorderings and optimizations, which is great, but comes with -a huge cost in terms of memory barriers. It is also completely wasted when -building for a non SMP configuration. +a huge cost in terms of memory barriers. It seems very tempting to use `atomic_*_explicit()` functions with explicit @@ -109,18 +108,16 @@ How `os_atomic_*` tries to address `` pitfalls or a compiler barrier ordering `compiler_acquire`, `compiler_release`, `compiler_acq_rel`. -4. `os_atomic_*` elides barriers for non SMP configurations - by default, however, it emits the proper compiler barriers - that correspond to the requested memory ordering (using - `atomic_signal_fence()`), even on UP configuration, so that - the compiler cannot possibly reorder code on UP systems. +4. `os_atomic_*` emits the proper compiler barriers that + correspond to the requested memory ordering (using + `atomic_signal_fence()`). Best practices for the use of atomics in XNU -------------------------------------------- For most generic code, the `os_atomic_*` functions from -`` are the perferred interfaces. +`` are the perferred interfaces. `__sync_*`, `__c11_*` and `__atomic_*` compiler builtins should not be used. @@ -129,9 +126,6 @@ For most generic code, the `os_atomic_*` functions from - compiler coalescing / reordering is desired (refcounting implementations may desire this for example). -- defaulting to relaxed atomics for non SMP platforms doesn't make sense - (such as device access which may require memory fences even on UP systems). - Qualifying atomic variables with `_Atomic` or even `_Atomic volatile` is encouraged, however authors must @@ -334,7 +328,7 @@ most compilers, clang included, implement it as an equivalent for `memory_order_acquire`. However, its concept is useful for certain algorithms. -As an attempt to provide a replacement for this, `` +As an attempt to provide a replacement for this, `` implements an entirely new *dependency* memory ordering. The purpose of this ordering is to provide a relaxed load followed by an diff --git a/doc/pac.md b/doc/pac.md new file mode 100644 index 000000000..09bd89065 --- /dev/null +++ b/doc/pac.md @@ -0,0 +1,326 @@ +ARMv8.3 Pointer Authentication in xnu +===================================== + +Introduction +------------ + +This document describes xnu's use of the ARMv8.3-PAuth extension. Specifically, +xnu uses ARMv8.3-PAuth to protect against Return-Oriented-Programming (ROP) +and Jump-Oriented-Programming (JOP) attacks, which attempt to gain control flow +over a victim program by overwriting return addresses or function pointers +stored in memory. + +It is assumed the reader is already familar with the basic concepts behind +ARMv8.3-PAuth and what its instructions do. The "ARMv8.3-A Pointer +Authentication" section of Google Project Zero's ["Examining Pointer +Authentication on the iPhone +XS"](https://googleprojectzero.blogspot.com/2019/02/examining-pointer-authentication-on.html) +provides a good introduction to ARMv8.3-PAuth. The reader may find more +comprehensive background material in: + +* The "Pointer authentication in AArch64 state" section of the [ARMv8 + ARM](https://developer.arm.com/docs/ddi0487/latest/arm-architecture-reference-manual-armv8-for-armv8-a-architecture-profile) + describes the new instructions and registers associated with ARMv8.3-PAuth. + +* [LLVM's Pointer Authentication + documentation](https://github.com/apple/llvm-project/blob/apple/master/clang/docs/PointerAuthentication.rst) + outlines how clang uses ARMv8.3-PAuth instructions to harden key C, C++, + Swift, and Objective-C language constructs. + +### Threat model + +Pointer authentication's threat model assumes that an attacker has found a gadget +to read and write arbitrary memory belonging to a victim process, which may +include the kernel. The attacker does *not* have the ability to execute +arbitrary code in that process's context. Pointer authentication aims to +prevent the attacker from gaining control flow over the victim process by +overwriting sensitive pointers in its address space (e.g., return addresses +stored on the stack). + +Following this threat model, xnu takes a two-pronged approach to prevent the +attacker from gaining control flow over the victim process: + +1. Both xnu and first-party binaries are built with LLVM's `-arch arm64e` flag, + which generates pointer-signing and authentication instructions to protect + addresses stored in memory (including ones pushed to the stack). This + process is generally transparent to xnu, with exceptions discussed below. + +2. On exception entry, xnu hashes critical register state before it is spilled + to memory. On exception return, the reloaded state is validated against this + hash. + +The ["xnu PAC infrastructure"](#xnu-pac-infrastructure) section discusses how +these hardening techniques are implemented in xnu in more detail. + + +Key generation on Apple CPUs +---------------------------- + +ARMv8.3-PAuth implementations may use an implementation defined cipher. Apple CPUs implement an +optional custom cipher with two key-generation changes relevant to xnu. + + +### Per-boot diversifier + +Apple's optional cipher adds a per-boot diversifier. In effect, even if xnu +initializes the "ARM key" registers (`APIAKey`, `APGAKey`, etc.) with constants, +signing a given value will still produce different signatures from boot to boot. + + +### Kernel/userspace diversifier + +Apple CPUs also contain a second diversifier known as `KERNKey`. `KERNKey` is +automatically mixed into the final signing key (or not) based on the CPU's +exception level. When xnu needs to sign or authenticate userspace-signed +pointers, it uses the `ml_enable_user_jop_key` and `ml_disable_user_jop_key` +routines to manually enable or disable `KERNKey`. `KERNKey` allows the CPU to +effectively use different signing keys for userspace and kernel, without needing +to explicitly reprogram the generic ARM keys on every kernel entry and exit. + + +xnu PAC infrastructure +---------------------- + +For historical reasons, the xnu codebase collectively refers to xnu + iOS's +pointer authentication infrastructure as Pointer Authentication Codes (PAC). The +remainder of this document will follow this terminology for consistency with +xnu. + +### arm64e binary "slice" + +Binaries with PAC instructions are not fully backwards-compatible with non-PAC +CPUs. Hence LLVM/iOS treat PAC-enabled binaries as a distinct ABI "slice" named +arm64e. xnu enforces this distinction by disabling the PAC keys when returning +to non-arm64e userspace, effectively turning ARMv8.3-PAuth auth and sign +instructions into no-ops (see the ["SCTLR_EL1"](#sctlr-el1) heading below for +more details). + +### Kernel pointer signing + +xnu is built with `-arch arm64e`, which causes LLVM to automatically sign and +authenticate function pointers and return addresses spilled onto the stack. This +process is largely transparent to software, with some exceptions: + +- During early boot, xnu rebases and signs the pointers stored in its own + `__thread_starts` section (see `rebase_threaded_starts` in + `osfmk/arm/arm_init.c`). + +- As parts of the userspace shared region are paged in, the page-in handler must + also slide and re-sign any signed pointers stored in it. The ["Signed + pointers in shared regions"](#signed-pointers-in-shared-regions) section + discusses this in further detail. + +- Assembly routines must manually sign the return address with `pacibsp` before + pushing it onto the stack, and use an authenticating `retab` instruction in + place of `ret`. xnu provides assembly macros `ARM64_STACK_PROLOG` and + `ARM64_STACK_EPILOG` which emit the appropriate instructions for both arm64 + and arm64e targets. + + Likewise, branches in assembly to signed C function pointers must use the + authenticating `blraa` instruction in place of `blr`. + +- Signed pointers must be stripped with `ptrauth_strip` before they can be + compared against compile-time constants like `VM_MIN_KERNEL_ADDRESS`. + +### Testing data pointer signing + +xnu contains tests for each manually qualified data pointer that should be +updated as new pointers are qualified. The tests allocate a structure +containing a __ptrauth qualified member, and write a pointer to that member. +We can then compare the stored value, which should be signed, with a manually +constructed signature. See `ALLOC_VALIDATE_DATA_PTR`. + +Tests are triggered by setting the `kern.run_ptrauth_data_tests` sysctl. The +sysctl is implemented, and BSD structures are tested, in `bsd/tests/ptrauth_data_tests_sysctl.c`. +Mach structures are tested in `osfmk/tests/ptrauth_data_tests.c`. + +### Managing PAC register state + +xnu generally tries to avoid reprogramming the CPU's PAC-related registers on +kernel entry and exit, since this could add significant overhead to a hot +codepath. Instead, xnu uses the following strategies to manage the PAC register +state. + +#### A keys + +Userspace processes' A keys (`AP{IA,DA,GA}Key`) are derived from the field +`jop_pid` inside `struct task`. For implementation reasons, an exact duplicate +of this field is cached in the corresponding `struct machine_thread`. + + +A keys are randomly generated at shared region initialization time (see ["Signed +pointers in shared regions"](#signed-pointers-in-shared-regions) below) and +copied into `jop_pid` during process activation. This shared region, and hence +associated A keys, may be shared among arm64e processes under specific +circumstances: + +1. "System processes" (i.e., processes launched from first-party signed binaries + on the iOS system image) generally use a common shared region with a default + `jop_pid` value, separate from non-system processes. + + If a system process wishes to isolate its A keys even from other system + processes, it may opt into a custom shared region using an entitlement in + the form `com.apple.pac.shared_region_id=[...]`. That is, two processes with + the entitlement `com.apple.pac.shared_region_id=foo` would share A keys and + shared regions with each other, but not with other system processes. + +2. Other arm64e processes automatically use the same shared region/A keys if + their respective binaries are signed with the same team-identifier strings. + +3. `posix_spawnattr_set_ptrauth_task_port_np()` allows explicit "inheriting" of + A keys during `posix_spawn()`, using a supplied mach task port. This API is + intended to support debugging tools that may need to auth or sign pointers + using the target process's keys. + +#### B keys + +Each process is assigned a random set of "B keys" (`AP{IB,DB}Key`) on process +creation. As a special exception, processes which inherit their parents' memory +address space (e.g., during `fork`) will also inherit their parents' B keys. +These keys are stored as the field `rop_pid` inside `struct task`, with an exact +duplicate in `struct machine_thread` for implementation reasons. + +xnu reprograms the ARM B-key registers during context switch, via the macro +`set_process_dependent_keys_and_sync_context` in `cswitch.s`. + +xnu uses the B keys internally to sign pointers pushed onto the kernel stack, +such as stashed LR values. Note that xnu does *not* need to explicitly switch +to a dedicated set of "kernel B keys" to do this: + +1. The `KERNKey` diversifier already ensures that the actual signing keys are + different between xnu and userspace. + +2. Although reprogramming the ARM B-key registers will affect xnu's signing keys + as well, pointers pushed onto the stack are inherently short-lived. + Specifically, there will never be a situation where a stack pointer value is + signed with one `current_task()`, but needs to be authed under a different + active `current_task()`. + +#### SCTLR_EL1 + +As discussed above, xnu disables the ARM keys when returning to non-arm64e +userspace processes. This is implemented by manipulating the `EnIA`, `EnIB`, +and `EnDA`, and `EnDB` bits in the ARM `SCTLR_EL1` system register. When +these bits are cleared, auth or sign instruction using the respective keys +will simply pass through their inputs unmodified. + +Initially, xnu cleared these bits during every `exception_return` to a +non-arm64e process. Since xnu itself uses these keys, the exception vector +needs to restore the same bits on every exception entry (implemented in the +`EL0_64_VECTOR` macro). + +Apple A13 CPUs now have controls that allow xnu to keep the PAC keys enabled at +EL1, independent of `SCTLR_EL1` settings. On these CPUs, xnu only needs to +reconfigure `SCTLR_EL1` when context-switching from a "vanilla" arm64 process to +an arm64e process, or vice-versa (`pmap_switch_user_ttb_internal`). + +### Signed pointers in shared regions + +Each userspace process has a *shared region* mapped into its address space, +consisting of code and data shared across all processes of the same processor +type, bitness, root directory, and (for arm64e processes) team ID. Comments at +the top of `osfmk/vm/vm_shared_region.c` discuss this region, and the process of +populating it, in more detail. + +As the VM layer pages in parts of the shared region, any embedded pointers must +be rebased. Although this process is not new, PAC adds a new step: these +embedded pointers may be signed, and must be re-signed after they are rebased. +This process is implemented as `vm_shared_region_slide_page_v3` in +`osfmk/vm/vm_shared_region.c`. + +xnu signs these embedded pointers using a shared-region-specific A key +(`sr_jop_key`), which is randomly generated when the shared region is created. +Since these pointers will be consumed by userspace processes, xnu temporarily +switches to the userspace A keys when re-signing them. + +### Signing spilled register state + +xnu saves register state into kernel memory when taking exceptions, and reloads +this state on exception return. If an attacker has write access to kernel +memory, it can modify this saved state and effectively get control over a +victim thread's control flow. + +xnu hardens against this attack by calling `ml_sign_thread_state` on exception +entry to hash certain registers before they're saved to memory. On exception +return, it calls the complementary `ml_check_signed_state` function to ensure +that the reloaded values still match this hash. `ml_sign_thread_state` hashes a +handful of particularly sensitive registers: + +* `pc, lr`: directly affect control-flow +* `cpsr`: controls process's exception level +* `x16, x17`: used by LLVM to temporarily store unauthenticated addresses + +`ml_sign_thread_state` also uses the address of the thread's `arm_saved_state_t` +as a diversifier. This step keeps attackers from using `ml_sign_thread_state` +as a signing oracle. An attacker may attempt to create a sacrificial thread, +set this thread to some desired state, and use kernel memory access gadgets to +transplant the xnu-signed state onto a victim thread. Because the victim +process has a different `arm_saved_state_t` address as a diversifier, +`ml_check_signed_state` will detect a hash mismatch in the victim thread. + +Apart from exception entry and return, xnu calls `ml_check_signed_state` and +`ml_sign_thread_state` whenever it needs to mutate one of these sensitive +registers (e.g., advancing the PC to the next instruction). This process looks +like: + +1. Disable interrupts +2. Load `pc, lr, cpsr, x16, x17` values and hash from thread's + `arm_saved_state_t` into registers +3. Call `ml_check_signed_state` to ensure values have not been tampered with +4. Mutate one or more of these values using *only* register-to-register + instructions +5. Call `ml_sign_thread_state` to re-hash the mutated thread state +6. Store the mutated values and new hash back into thread's `arm_saved_state_t`. +7. Restore old interrupt state + +Critically, none of the sensitive register values can be spilled to memory +between steps 1 and 7. Otherwise an attacker with kernel memory access could +modify one of these values and use step 5 as a signing oracle. xnu implements +these routines entirely in assembly to ensure full control over register use, +using a macro `MANIPULATE_SIGNED_THREAD_STATE()` to generate boilerplate +instructions. + +Interrupts must be disabled whenever `ml_check_signed_state` or +`ml_sign_thread_state` are called, starting *before* their inputs (`x0`--`x5`) +are populated. To understand why, consider what would happen if the CPU could +be interrupted just before step 5 above. xnu's exception handler would spill +the entire register state to memory. If an attacker has kernel memory access, +they could attempt to replace the spilled `x0`--`x5` values. These modified +values would then be reloaded into the CPU during exception return; and +`ml_sign_thread_state` would be called with new, attacker-controlled inputs. + +### thread_set_state + +The `thread_set_state` call lets userspace modify the register state of a target +thread. Signed userspace state adds a wrinkle to this process, since the +incoming FP, LR, SP, and PC values are signed using the *userspace process's* +key. + +xnu handles this in two steps. First, `machine_thread_state_convert_from_user` +converts the userspace thread state representation into an in-kernel +representation. Signed values are authenticated using `pmap_auth_user_ptr`, +which involves temporarily switching to the userspace keys. + +Second, `thread_state64_to_saved_state` applies this converted state to the +target thread. Whenever `thread_state64_to_saved_state` modifies a register +that makes up part of the thread state hash, it uses +`MANIPULATE_SIGNED_THREAD_STATE()` as described above to update this hash. + + +### Signing arbitrary data blobs + +xnu provides `ptrauth_utils_sign_blob_generic` and `ptrauth_utils_auth_blob_generic` +to sign and authenticate arbitrary blobs of data. Callers are responsible for +storing the pointer-sized signature returned. The signature is a rolling MAC +of the data, using the `pacga` instruction, mixed with a provided salt and optionally +further diversified by storage address. + +Use of these functions is inherently racy. The data must be read from memory +before each pointer-sized block can be added to the signature. In normal operation, +standard thread-safety semantics protect from corruption, however in the malicious +case, it may be possible to time overwriting the buffer before signing or after +authentication. + +Callers of these functions must take care to minimise these race windows by +using them immediately preceeding/following a write/read of the blob's data. diff --git a/doc/startup.md b/doc/startup.md new file mode 100644 index 000000000..15e587dfe --- /dev/null +++ b/doc/startup.md @@ -0,0 +1,275 @@ +XNU startup sequence +==================== + +### General Principles + +XNU Startup sequence is driven by the `` module. + +The startup sequence is made of individual subsystems (the `STARTUP_SUB_*` +values of the `startup_subsystem_id_t` type) that get initialized in sequence. + +A subsystem can use ranks to order the various initializers that make up its +initialization sequence. Usage of ranks is custom to each subsystem and must be +documented in this file. + +The subsystem module will basically run hooks in that order: + +``` +for (subsystem 0 -> N) { + for (rank 0 -> N) { + // run in no particular order for a given rank in the given subsystem + init(subsystem, rank); + } +} +``` + +### Extending the startup sequence + +When extending the startup sequence: + +1. add a new value to the `startup_subsystem_id_t` enum in the right order +2. document what services this phase provides, and how it uses ranks in this + file. + + +When hooking with a given subsystem, consult this documentation to use the +proper rank for your callback. + +If a new rank needs to be used, update this documentation in the proper section. + +--------------------------------------------------------------------------------- + + +`STARTUP_SUB_TUNABLES` +---------------------- + +### Description + +Initializes various globals that alter the behavior of the kernel, lookup +tables, ... Available hooks are: + +- `TUNABLES`: parses a boot arg into a global that will become read-only at + lockdown time, +- `TUNABLE_WRITEABLE`: same as `TUNABLE` but the global will not be locked down. + +### Rank usage + +- Rank 1: `TUNABLE`, `TUNABLE_WRITEABLE` +- Middle: globals that require complex initialization (e.g. SFI classes). + + +`STARTUP_SUB_LOCKS_EARLY` +------------------------- + +### Description + +Initializes early locks that do not require any memory allocations to be +initialized. Available hooks are: + +- `LCK_GRP_DECLARE*`: automatically initialized lock groups, +- `LCK_GRP_ATTR_DECLARE`: automatically initialized lock group attributes, +- `LCK_ATTR_DECLARE`: automatically initialized lock attributes, +- `LCK_SPIN_DECLARE*`: automatically initialized spinlocks, +- `LCK_RW_DECLARE`: automatically initialized reader/writer lock, +- `LCK_MTX_EARLY_DECLARE*`: automatically initialized mutexes, with statically + allocated buffers for statistics/tracing, +- `SIMPLE_LOCK_DECLARE*`: automatically initialized simple locks. + +### Rank usage + +- Rank 1: Initializes the module (`lck_mod_init`), +- Rank 2: `LCK_GRP_ATTR_DECLARE`, `LCK_ATTR_DECLARE`, +- Rank 3: `LCK_GRP_DECLARE*` +- Rank 4: `LCK_SPIN_DECLARE*`, `LCK_MTX_EARLY_DECLARE*`, + `LCK_RW_DECLARE`, `SIMPLE_LOCK_DECLARE*`. + + +`STARTUP_SUB_KPRINTF` +--------------------- + +### Description + +Initializes the kprintf subsystem. + +### Rank usage + +- Rank 1: calls the module initializer (`PE_init_kprintf`). + + +`STARTUP_SUB_PMAP_STEAL` +------------------------ + +### Description + +Allows for subsystems to steal early memory. + +### Rank usage + +N/A. + + +`STARTUP_SUB_VM_KERNEL` +----------------------- + +### Description + +Denotes that the early kernel VM is initialized. + +### Rank usage + +N/A. + + +`STARTUP_SUB_KMEM` +------------------ + +### Description + +Denotes that `kernel_memory_allocate` is now usable. + +### Rank usage + +N/A. + + +`STARTUP_SUB_KMEM_ALLOC` +------------------------ + +### Description + +Denotes that `kmem_alloc` is now usable. + +### Rank usage + +N/A. + + +`STARTUP_SUB_ZALLOC` +-------------------- + +### Description + +Initializes the zone allocator. + +- `ZONE_DECLARE`, `ZONE_INIT`: automatically initialized permanent zones. +- `ZONE_VIEW_DEFINE`, `KALLOC_HEAP_DEFINE`: zone and kalloc heap views. + + +### Rank usage + +- Rank 1: `zone_init`: setup the zone subsystem, this allows for the already + created VM/pmap zones to become dynamic. + +- Rank 2: `vm_page_module_init`: create the "vm pages" zone. + The `vm_page_zone` must be created prior to `kalloc_init`; that routine can + trigger `zalloc()`s (for e.g. mutex statistic structure initialization). + + The `vm_page_zone` must exist to satisfy fictitious page allocations + (which are used for guard pages by the guard mode zone allocator). + +- Rank 3: Initialize kalloc. + +- Rank 4: Enable zone caching (uses kalloc) + +- Middle: for any initialization that only requires kalloc/zalloc + runs `ZONE_DECLARE` and `ZONE_INIT`. + +- Last: zone and kalloc heaps (`ZONE_VIEW_DEFINE`, `KALLOC_HEAP_DEFINE`). + + +`STARTUP_SUB_PERCPU` +-------------------- + +### Description + +Initializes the percpu subsystem. + +### Rank usage + +Rank 1: allocates the percpu memory, `percpu_foreach_base` and `percpu_foreach` + become usable. + + +`STARTUP_SUB_LOCKS` +------------------- + +### Description + +Initializes kernel locks that might require allocations (due to statistics and +tracing features). Available hooks are: + +- `LCK_MTX_DECLARE`: automatically initialized mutex, + + +### Rank usage + +- Rank 1: `LCK_MTX_DECLARE`. + + +`STARTUP_SUB_CODESIGNING` +------------------------- + +### Description + +Initializes the codesigning subsystem. + +### Rank usage + +- Rank 1: calls the module initializer (`cs_init`). + + +`STARTUP_SUB_OSLOG` +------------------- + +### Description + +Initializes the `os_log` facilities. + +### Rank usage + +- Rank 1: Calls the module initializer (`oslog_init`). + + +`STARTUP_SUB_MACH_IPC` +------------------- + +### Description + +Initializes the Mach IPC subsystem. + +### Rank usage + +- Rank 1: Initializes IPC submodule globals (ipc tables, voucher hashes, ...) +- Rank last: Final IPC initialization. + + +`STARTUP_SUB_EARLY_BOOT` +------------------------ + +### Description + +Denotes that subsystems that expect to operate with +interrupts or preemption enabled may begin enforcement. + +### Rank usage + +N/A. + + +`STARTUP_SUB_LOCKDOWN` +---------------------- + +### Description + +Denotes that the kernel is locking down, this phase should never be hooked. +When the kernel locks down: + +- data marked `__startup_data` and code marked `__startup_func` is unmapped, +- data marked `__security_const_late` or `SECURITY_READ_ONLY_LATE` becomes + read-only. + +### Rank usage + +N/A. + + diff --git a/doc/xnu_build_consolidation.md b/doc/xnu_build_consolidation.md new file mode 100644 index 000000000..40e02a302 --- /dev/null +++ b/doc/xnu_build_consolidation.md @@ -0,0 +1,142 @@ +# XNU Build Consolidation + +## Introduction and motivation + +XNU is supported on approximately 20 different targets. Whilst in some cases the differences between two +given targets are small (e.g. when they both support the same ISA), XNU has traditionally required to have +separate builds in cases where the topology of the targets differ (for example, when they feature different +core/cluster counts or cache sizes). Similarly, SoC-specific fix-ups are usually conditionally compiled +based on the target. + +Given the time it takes to compile all three different variants (release, debug and development) for each +supported SoC, usually several times a day for various teams across Apple, the goal of this project was to +reduce the number of existing builds, as well as to set up a simple framework that makes it easier to share +builds across different SoCs moving forward. + +Although this effort could be extended to KEXTs, and hence lead to shared KernelCaches across devices, the +scope of this document only includes XNU. In cases where KEXTs also differ across targets, or perhaps the +required KEXTs are completely different in the first place, the kernel still needs to be linked +appropriately with different sets of KEXTs and hence KernelCaches cannot be shared. + + +## Changes required in XNU + +The kernel itself is relatively SoC-agnostic, although strongly architecture-dependent; this is because most +of the SoC-specific aspects of the KernelCache are abstracted by the KEXTs. Things that pertain to the +kernel include: + +* Number of cores/clusters in the system, their physical IDs and type. +* Addresses of PIO registers that are to be accessed from the XNU side. +* L1/L2 cache geometry parameters (e.g. size, number of set/ways). +* Just like other components, the kernel has its share of responsibility when it comes to setting up HID +registers and applying fix-ups at various points during boot or elsewhere at runtime. +* Certain kernel-visible architectural features are optional, which means that two same-generation SoCs may +still differ in their feature set. + +All of these problems can be solved through a mix of relying more heavily on device tree information and +performing runtime checks. The latter is possible because both the ARM architecture and the Apple's +extensions provide r/o registers that can be checked at runtime to discover supported features as well as +various CPU-specific parameters. + +### Obtaining cache geometry parameters at runtime + +Although not often, the kernel may still require deriving, one way or another, parameters like cache sizes +and number of set/ways. XNU needs most of this information to perform set/way clean/invalidate operations. +Prior to this work, these values were hardcoded for each supported target in `proc_reg.h`, and used in +`caches_asm.s`. The ARM architecture provides the `CCSIDR_EL1` register, which can be used in conjunction +with `CSSELR_EL1` to select the target cache and obtain geometry information. + + +### Performing CPU/Revision-specific checks at runtime + +CPU and revision checks may be required at various places, although the focus here has been the application +of tunables at boot time. + +Tunables are often applied: + +* On a specific core type of a specific SoC. +* On a subset of all of the CPU revisions. +* On all P-cores or all E-cores. + +This has led in the past to a number of nested, conditionally-compiled blocks of code that are not easy to +understand or manage as new tunables are added or SoCs/revisions are deprecated. + +The changes applied as part of this work focus mainly on: + +1. Decoupling the tunable-application code from `start.s`. +2. Splitting the tunable-application code across different files, one per supported architecture (e.g. +`tunables_h7.h`, or `tunables_h11.h`). +3. Providing "templates" for the most commonly-used combinations of tunables. +4. Providing a family of assembly macros that can be used to conditionally execute code on a specific core +type, CPU ID, revision(s), or a combination of these. + +All of the macros live in the 64-bit version of `proc_reg.h`, and are SoC-agnostic; they simply check the +`MIDR_EL1` register against a CPU revision that is passed as a parameter to the macro, where applicable. +Similarly, where a block of code is to be executed on a core type, rather than a specific core ID, a couple +of the provided macros can check this against `MPIDR_EL1`. + + +### Checking for feature compatibility at runtime + +Some architectural features are optional, which means that, when disabled at compile-time, this may cause +two same-generation SoCs to diverge. + + +Rather than disabling features, and assuming this does not pose security risks or performance regressions, +the preferred approach is to compile them in, but perform runtime checks to enable/disable them, possibly in +early boot. The way these checks are performed varies from feature to feature (for example, VHE is an ARM +feature, and the ARM ARM specifies how it can be discovered). For Apple-specific features, these are all +advertised through the `AIDR_EL1` register. One of the changes is the addition of a function, +ml_feature_supported(), that may be used to check for the presence of a feature at runtime. + + +### Deriving core/cluster counts from device tree + +One of the aspects that until now has been hardcoded in XNU is the system topology: number of cores/clusters +and their physical IDs. This effort piggybacks on other recent XNU changes which aimed to consolidate +topology-related information into XNU, by parsing it from the device tree and exporting it to KEXTs through +well-defined APIs. + +Changes applied as part of the XNU consolidation project include: + +* Extending the `ml_*` API to extract cluster information from the topology parser. New APIs include the following: + * `ml_get_max_cluster_number()` + * `ml_get_cluster_count()` + * `ml_get_first_cpu_id()` +* Removing hardcoded core counts (`CPU_COUNT`) and cluster counts (`ARM_CLUSTER_COUNT`) from XNU, and +replacing them with `ml_*` calls. +* Similarly, deriving CPU physical IDs from the topology parser. + + +### Allocating memory that is core size/cluster size/cache size aligned + +In some cases, certain statically-allocated arrays/structures need to be cache line-aligned, or have one +element per core or cluster. Whilst this information is not known precisely at compile time anymore, the +following macros have been added to provide a reasonably close upper bound: + +* `MAX_CPUS` +* `MAX_CPU_CLUSTERS` +* `MAX_L2_CLINE` + +These macros are defined in `board_config.h`, and should be set to the same value for a group of targets +sharing a single build. Note that these no longer reflect actual counts and sizes, and the real values need +to be queried at runtime through the `ml_` API. + +The L1 cache line size is still hardcoded, and defined as `MMU_CLINE`. Since this value is always the same +and very often checked at various places across XNU and elsewhere, it made sense to keep it as a compile +time macro rather than relying on runtime checks. + +### Restrictions on conditional compilation + +Currently, a family of per-SoC macros are defined at build time to enable XNU to conditionally compile code +for different targets. These are named `ARM[64]_BOARD_CONFIG_[TARGET_NAME]`, and have historically been used +in different places across the kernel; for example, when applying tunables, various fixes, or enabling +disabling features. In order not to create divergences in the future across same-generation SoCs, but also +to keep the codebase consistent, the recommendation is to avoid the use of these macros whenever possible. + +Instead, XNU itself defines yet another family of macros that are defined for all targets of a particular +generation. These are named after the P-CORE introduced by each (for example, `APPLEMONSOON`, or +`APPLEVORTEX`), and are preferred over the SoC-specific ones. Where a generation macro is not enough to +provide correctness (which happens, for example, when the code block at hand should not be executed on a +given SoC of the same family), appropriate runtime checks can be performed inside the conditionally-compiled +code block. `machine_read_midr()` and `get_arm_cpu_version()` may be used for this purpose. diff --git a/iokit/DriverKit/IOKitKeys.h b/iokit/DriverKit/IOKitKeys.h index 758ac24be..1c5af36ad 100644 --- a/iokit/DriverKit/IOKitKeys.h +++ b/iokit/DriverKit/IOKitKeys.h @@ -110,6 +110,9 @@ // Property is an array of strings containing CFBundleIdentifiers of service being opened #define kIODriverKitUserClientEntitlementsKey "com.apple.developer.driverkit.userclient-access" +// Entitlement of a dext that allows any task to open one of its IOUserClients +#define kIODriverKitUserClientEntitlementAllowAnyKey "com.apple.developer.driverkit.allow-any-userclient-access" + // Other DriverKit entitlements #define kIODriverKitUSBTransportEntitlementKey "com.apple.developer.driverkit.transport.usb" #define kIODriverKitHIDTransportEntitlementKey "com.apple.developer.driverkit.transport.hid" @@ -121,6 +124,10 @@ #define kIONVRAMReadAccessKey "com.apple.private.iokit.nvram-read-access" // Entitlement required to write nvram properties as non-root user #define kIONVRAMWriteAccessKey "com.apple.private.iokit.nvram-write-access" +// Entitlement required to set properties on the IOResources object as non-root user +#define kIOResourcesSetPropertyKey "com.apple.private.iokit.ioresources.setproperty" +// Entitlement required to read/write to the system nvram region +#define kIONVRAMSystemAllowKey "com.apple.private.iokit.system-nvram-allow" // When possible, defer matching of this driver until kextd has started. #define kIOMatchDeferKey "IOMatchDefer" @@ -134,12 +141,18 @@ // key to find IOMappers #define kIOMapperIDKey "IOMapperID" +#ifdef XNU_KERNEL_PRIVATE +// Apple Kext Exclude List +#define kIOExcludeListBundleID "com.apple.driver.KextExcludeList" +#endif + #define kIOUserClientCrossEndianKey "IOUserClientCrossEndian" #define kIOUserClientCrossEndianCompatibleKey "IOUserClientCrossEndianCompatible" #define kIOUserClientSharedInstanceKey "IOUserClientSharedInstance" #if KERNEL_PRIVATE #define kIOUserClientMessageAppSuspendedKey "IOUserClientMessageAppSuspended" #endif +#define kIOUserClientDefaultLockingKey "IOUserClientDefaultLocking" // diagnostic string describing the creating task #define kIOUserClientCreatorKey "IOUserClientCreator" // the expected cdhash value of the userspace driver executable diff --git a/iokit/DriverKit/IOMemoryDescriptor.iig b/iokit/DriverKit/IOMemoryDescriptor.iig index c2c12063b..5b35248b4 100644 --- a/iokit/DriverKit/IOMemoryDescriptor.iig +++ b/iokit/DriverKit/IOMemoryDescriptor.iig @@ -43,10 +43,11 @@ class IOMemoryMap; // IOMemoryDescriptor Create options enum { - kIOMemoryDirectionIn = 0x00000001, - kIOMemoryDirectionOut = 0x00000002, - kIOMemoryDirectionOutIn = kIOMemoryDirectionIn | kIOMemoryDirectionOut, - kIOMemoryDirectionInOut = kIOMemoryDirectionOutIn, + kIOMemoryDirectionIn = 0x00000001, + kIOMemoryDirectionOut = 0x00000002, + kIOMemoryDirectionOutIn = kIOMemoryDirectionIn | kIOMemoryDirectionOut, + kIOMemoryDirectionInOut = kIOMemoryDirectionOutIn, + kIOMemoryDisableCopyOnWrite = 0x00000010 }; // IOMemoryDescriptor CreateMapping options @@ -118,7 +119,7 @@ public: * @param offset Start offset of the mapping in the descriptor. * @param length Pass zero to map the entire memory, or a value <= the length of the descriptor. * @param alignment of the memory virtual mapping. Only zero for no alignment is supported. - * @param map Returned IOMemoryMap object with +1 retain count. + * @param map Returned IOMemoryMap object with +1 retain count. * It should be retained until the map is no longer required. * @return kIOReturnSuccess on success. See IOReturn.h for error codes. */ @@ -131,19 +132,48 @@ public: uint64_t alignment, IOMemoryMap ** map); -private: - virtual kern_return_t - PrepareForDMA( - uint64_t options, - IOService * device, - uint64_t offset, - uint64_t length, + /*! + * @brief Create a memory descriptor that is a subrange of another memory + * descriptor + * @param memoryDescriptorCreateOptions + * kIOMemoryDirectionIn memory described will be writable + * kIOMemoryDirectionOut memory described will be readable + * @param offset Start offset of the memory relative to the descriptor ofDescriptor. + * @param length Length of the memory. + * @param ofDescriptor Memory descriptor describing source memory. + * @param memory Returned IOMemoryDescriptor object with +1 retain count. + * @return kIOReturnSuccess on success. See IOReturn.h for error codes. + */ + static kern_return_t + CreateSubMemoryDescriptor( + uint64_t memoryDescriptorCreateOptions, + uint64_t offset, + uint64_t length, + IOMemoryDescriptor * ofDescriptor, + IOMemoryDescriptor ** memory) __attribute__((availability(driverkit,introduced=20.0))); - uint64_t * flags, - uint64_t * returnLength, - uint32_t * segmentsCount, - IOAddressSegment segments[32]); + /*! + * @brief Create a memory descriptor that is a concatenation of a set of memory + * descriptors + * @param memoryDescriptorCreateOptions + * kIOMemoryDirectionIn memory described will be writable. The source + * descriptors must include this direction. + * kIOMemoryDirectionOut memory described will be readable. The source + * descriptors must include this direction. + * @param withDescriptorsCount Number of valid memory descriptors being passed + * in the withDescriptors array. + * @param withDescriptors Source memory descriptor array. + * @param memory Returned IOMemoryDescriptor object with +1 retain count. + * @return kIOReturnSuccess on success. See IOReturn.h for error codes. + */ + static kern_return_t + CreateWithMemoryDescriptors( + uint64_t memoryDescriptorCreateOptions, + uint32_t withDescriptorsCount, + IOMemoryDescriptor * const withDescriptors[32], + IOMemoryDescriptor ** memory) __attribute__((availability(driverkit,introduced=20.0))); +private: kern_return_t Map( uint64_t options, diff --git a/iokit/DriverKit/IOService.iig b/iokit/DriverKit/IOService.iig index 274950e8a..478a721f9 100644 --- a/iokit/DriverKit/IOService.iig +++ b/iokit/DriverKit/IOService.iig @@ -275,6 +275,26 @@ public: OSArray * propertyKeys, OSArray ** properties); + /*! + * @brief Reduce power saving modes in the system in order to provide decreased latency + * to hardware DMA requests. + * @discussion When the system enters lower power states DMA access to memory may be affected. + * The best way by far to handle this is to change how you schedule your time-critical DMA operations in + * your driver such that an occasional delay will not affect the proper functioning of your device. + * However, if this is not possible, your driver can inform power management when a time-critical transfer + * begins and ends so that the system will not enter the lowest power states during that time. To do this, + * pass a value to requireMaxBusStall that informs power management of the maximum memory access latency in + * nanoseconds that can be tolerated by the driver. This value is hardware dependent and is related to the + * amount of buffering available in the hardware. + * Supported values are given by the kIOMaxBusStall* enum in IOTypes.h + * Pass the largest value possible that works for your device. This will minimize power + * consumption and maximize battery life by still allowing some level of CPU power management. + * @param maxBusStall A value from the kIOMaxBusStall* enum in IOTypes.h + * @return kIOReturnSuccess on success. See IOReturn.h for error codes. + */ + virtual kern_return_t + RequireMaxBusStall( + uint64_t maxBusStall); /*! @function IOCreatePropertyMatchingDictionary * @abstract Construct a matching dictionary for property matching. @@ -323,6 +343,12 @@ public: */ static OSDictionary * CreateNameMatchingDictionary(const char * serviceName, OSDictionary * matching) LOCALONLY; + + +private: + virtual void + Stop_async( + IOService * provider) LOCAL; }; #endif /* ! _IOKIT_UIOSERVICE_H */ diff --git a/iokit/DriverKit/IOTypes.h b/iokit/DriverKit/IOTypes.h index de2d357f6..5f7bf4306 100644 --- a/iokit/DriverKit/IOTypes.h +++ b/iokit/DriverKit/IOTypes.h @@ -95,8 +95,10 @@ typedef vm_address_t IOVirtualAddress; #if !defined(__arm__) && !defined(__i386__) && !(defined(__x86_64__) && !defined(KERNEL)) && !(defined(__arm64__) && !defined(__LP64__)) typedef IOByteCount64 IOByteCount; +#define PRIIOByteCount PRIu64 #else typedef IOByteCount32 IOByteCount; +#define PRIIOByteCount PRIu32 #endif typedef IOVirtualAddress IOLogicalAddress; @@ -204,36 +206,38 @@ enum { kIOPostedWrite = 6, kIORealTimeCache = 7, kIOPostedReordered = 8, + kIOPostedCombinedReordered = 9, }; // IOMemory mapping options enum { - kIOMapAnywhere = 0x00000001, - - kIOMapCacheMask = 0x00000f00, - kIOMapCacheShift = 8, - kIOMapDefaultCache = kIODefaultCache << kIOMapCacheShift, - kIOMapInhibitCache = kIOInhibitCache << kIOMapCacheShift, - kIOMapWriteThruCache = kIOWriteThruCache << kIOMapCacheShift, - kIOMapCopybackCache = kIOCopybackCache << kIOMapCacheShift, - kIOMapWriteCombineCache = kIOWriteCombineCache << kIOMapCacheShift, - kIOMapCopybackInnerCache = kIOCopybackInnerCache << kIOMapCacheShift, - kIOMapPostedWrite = kIOPostedWrite << kIOMapCacheShift, - kIOMapRealTimeCache = kIORealTimeCache << kIOMapCacheShift, - kIOMapPostedReordered = kIOPostedReordered << kIOMapCacheShift, - - kIOMapUserOptionsMask = 0x00000fff, - - kIOMapReadOnly = 0x00001000, - - kIOMapStatic = 0x01000000, - kIOMapReference = 0x02000000, - kIOMapUnique = 0x04000000, + kIOMapAnywhere = 0x00000001, + + kIOMapCacheMask = 0x00000f00, + kIOMapCacheShift = 8, + kIOMapDefaultCache = kIODefaultCache << kIOMapCacheShift, + kIOMapInhibitCache = kIOInhibitCache << kIOMapCacheShift, + kIOMapWriteThruCache = kIOWriteThruCache << kIOMapCacheShift, + kIOMapCopybackCache = kIOCopybackCache << kIOMapCacheShift, + kIOMapWriteCombineCache = kIOWriteCombineCache << kIOMapCacheShift, + kIOMapCopybackInnerCache = kIOCopybackInnerCache << kIOMapCacheShift, + kIOMapPostedWrite = kIOPostedWrite << kIOMapCacheShift, + kIOMapRealTimeCache = kIORealTimeCache << kIOMapCacheShift, + kIOMapPostedReordered = kIOPostedReordered << kIOMapCacheShift, + kIOMapPostedCombinedReordered = kIOPostedCombinedReordered << kIOMapCacheShift, + + kIOMapUserOptionsMask = 0x00000fff, + + kIOMapReadOnly = 0x00001000, + + kIOMapStatic = 0x01000000, + kIOMapReference = 0x02000000, + kIOMapUnique = 0x04000000, #ifdef XNU_KERNEL_PRIVATE - kIOMap64Bit = 0x08000000, + kIOMap64Bit = 0x08000000, #endif - kIOMapPrefault = 0x10000000, - kIOMapOverwrite = 0x20000000 + kIOMapPrefault = 0x10000000, + kIOMapOverwrite = 0x20000000 }; /*! @enum Scale Factors @@ -294,4 +298,14 @@ typedef uint64_t IOVirtualAddress; #endif /* PLATFORM_DriverKit */ +enum { + kIOMaxBusStall40usec = 40000, + kIOMaxBusStall30usec = 30000, + kIOMaxBusStall25usec = 25000, + kIOMaxBusStall20usec = 20000, + kIOMaxBusStall10usec = 10000, + kIOMaxBusStall5usec = 5000, + kIOMaxBusStallNone = 0, +}; + #endif /* ! __IOKIT_IOTYPES_H */ diff --git a/iokit/DriverKit/IOUserClient.iig b/iokit/DriverKit/IOUserClient.iig index 5523bb271..10c76c852 100644 --- a/iokit/DriverKit/IOUserClient.iig +++ b/iokit/DriverKit/IOUserClient.iig @@ -37,9 +37,8 @@ #include #include +#include -class IOMemoryDescriptor; -class IOBufferMemoryDescriptor; enum { kIOUserClientScalarArrayCountMax = 16, @@ -243,6 +242,25 @@ public: uint64_t * options, IOMemoryDescriptor ** memory) = 0; + /*! + * @brief Create a memory descriptor that describes a set of virtual ranges in + * the client task of the user client. + * @param memoryDescriptorCreateOptions + * kIOMemoryDirectionIn memory described will be writable + * kIOMemoryDirectionOut memory described will be readable + * @param segmentsCount Number of valid address ranges being passed + * in the segments array. + * @param segments Array of address ranges. + * @param memory Returned IOMemoryDescriptor object with +1 retain count. + * @return kIOReturnSuccess on success. See IOReturn.h for error codes. + */ + virtual kern_return_t + CreateMemoryDescriptorFromClient( + uint64_t memoryDescriptorCreateOptions, + uint32_t segmentsCount, + const IOAddressSegment segments[32], + IOMemoryDescriptor ** memory) __attribute__((availability(driverkit,introduced=20.0))); + private: virtual kern_return_t _ExternalMethod( diff --git a/iokit/DriverKit/Makefile b/iokit/DriverKit/Makefile index e797df2d2..fdbce3660 100644 --- a/iokit/DriverKit/Makefile +++ b/iokit/DriverKit/Makefile @@ -4,7 +4,7 @@ export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir IIG_INCFLAGS = -I$(SRCROOT)/iokit -I$(SRCROOT)/osfmk -I$(SRCROOT)/bsd -I$(OBJROOT)/bsd $(INCFLAGS_EXTERN) -OTHER_IIG_CFLAGS = $(IIG_INCFLAGS) -isysroot $(SDKROOT) -x c++ -std=gnu++1z -D__IIG=1 -DDRIVERKIT_PRIVATE=1 $(DEPLOYMENT_TARGET_DEFINES) $($(addsuffix $(CURRENT_ARCH_CONFIG),ARCH_FLAGS_)) +OTHER_IIG_CFLAGS = $(IIG_INCFLAGS) -isysroot $(SDKROOT) -x c++ -std=gnu++1z -D__IIG=1 -DDRIVERKIT_PRIVATE=1 $(DEPLOYMENT_TARGET_DEFINES) INCDIR = $(FRAMEDIR)/$(DKIT_INCDIR) DRIVERKITINCDIR = $(DRIVERKITFRAMEDIR)/$(DRIVERKIT_DKIT_INCDIR) @@ -40,7 +40,8 @@ COMP_FILES = ${GENERATED_HEADERS} $(GENERATED_IMPL) $(GENERATED_HEADERS) : \ %.h : %.iig - $(IIG) --def $< --header $@ --impl $(patsubst %.h,%.iig.cpp,$@) --framework-name DriverKit ${OTHER_IIG_FLAGS} -- ${OTHER_IIG_CFLAGS} + @$(LOG_IIG) "$@" + $(_v)$(IIG) --def $< --header $@ --impl $(patsubst %.h,%.iig.cpp,$@) --framework-name DriverKit ${OTHER_IIG_FLAGS} -- ${OTHER_IIG_CFLAGS} $(_vstdout) $(GENERATED_IMPL) : $(GENERATED_HEADERS) diff --git a/iokit/DriverKit/OSAction.iig b/iokit/DriverKit/OSAction.iig index ddb3b2e17..2627ef0b0 100644 --- a/iokit/DriverKit/OSAction.iig +++ b/iokit/DriverKit/OSAction.iig @@ -34,6 +34,7 @@ typedef void (^OSActionCancelHandler)(void); typedef void (^OSActionAbortedHandler)(void); struct OSActionWaitToken; +class OSString; /*! * @class OSAction @@ -82,6 +83,15 @@ public: uint64_t msgid, size_t referenceSize, OSAction ** action) LOCAL; + + static kern_return_t + CreateWithTypeName( + OSObject * target, + uint64_t targetmsgid, + uint64_t msgid, + size_t referenceSize, + OSString * typeName, + OSAction ** action) LOCAL; #endif virtual void diff --git a/iokit/DriverKit/OSObject.iig b/iokit/DriverKit/OSObject.iig index f97de5aa9..62fab435c 100644 --- a/iokit/DriverKit/OSObject.iig +++ b/iokit/DriverKit/OSObject.iig @@ -41,14 +41,14 @@ #include #if DRIVERKIT_PRIVATE #include -#endif +#endif /* DRIVERKIT_PRIVATE */ #if !__IIG #include #include -#endif +#endif /* !__IIG */ class OSObject; typedef OSObject * OSObjectPtr; -#endif +#endif /* !KERNEL */ #if !__IIG_ATTRIBUTES_DEFINED__ @@ -124,12 +124,12 @@ typedef OSObject * OSObjectPtr; #if !__IIG #if KERNEL typedef OSObject OSContainer; -#else +#else /* KERNEL */ class IIG_SERIALIZABLE OSContainer; -#endif -#else +#endif /* KERNEL */ +#else /* !__IIG */ class IIG_SERIALIZABLE OSContainer; -#endif +#endif /* !__IIG */ class IIG_SERIALIZABLE OSData; class IIG_SERIALIZABLE OSNumber; @@ -196,6 +196,12 @@ public: #define DEFN(classname, name) \ name ## _Impl(classname ## _ ## name ## _Args) +/* + * Use of the IMPL macro is discouraged and should be replaced by a normal c++ + * method implementation (with the all method arguments) and the name of the method + * given a suffix '_Impl' + */ + #define IMPL(classname, name) \ classname :: DEFN(classname, name) diff --git a/iokit/IOKit/IOBSD.h b/iokit/IOKit/IOBSD.h index 0df8690ef..c9986737e 100644 --- a/iokit/IOKit/IOBSD.h +++ b/iokit/IOKit/IOBSD.h @@ -58,8 +58,10 @@ enum{ kIOMountChangeWillResize = 0x00000201, kIOMountChangeDidResize = 0x00000202, }; -extern void IOBSDMountChange(struct mount * mp, uint32_t op); -extern boolean_t IOTaskHasEntitlement(task_t task, const char * entitlement); +extern void IOBSDMountChange(struct mount *mp, uint32_t op); +extern boolean_t IOTaskHasEntitlement(task_t task, const char *entitlement); +extern boolean_t IOVnodeHasEntitlement(struct vnode *vnode, int64_t off, const char *entitlement); +extern char *IOVnodeGetEntitlement(struct vnode *vnode, int64_t offset, const char *entitlement); typedef enum { kIOPolledCoreFileModeNotInitialized, diff --git a/iokit/IOKit/IOBufferMemoryDescriptor.h b/iokit/IOKit/IOBufferMemoryDescriptor.h index 19cb85579..b81d0d158 100644 --- a/iokit/IOKit/IOBufferMemoryDescriptor.h +++ b/iokit/IOKit/IOBufferMemoryDescriptor.h @@ -28,6 +28,7 @@ #ifndef _IOBUFFERMEMORYDESCRIPTOR_H #define _IOBUFFERMEMORYDESCRIPTOR_H +#include #include #include @@ -106,8 +107,8 @@ public: OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 0); OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 1); #else /* !__LP64__ */ - OSMetaClassDeclareReservedUsed(IOBufferMemoryDescriptor, 0); - OSMetaClassDeclareReservedUsed(IOBufferMemoryDescriptor, 1); + OSMetaClassDeclareReservedUsedX86(IOBufferMemoryDescriptor, 0); + OSMetaClassDeclareReservedUsedX86(IOBufferMemoryDescriptor, 1); #endif /* !__LP64__ */ OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 2); OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 3); @@ -142,14 +143,14 @@ public: vm_offset_t alignment) APPLE_KEXT_DEPRECATED; /* use withOptions() instead */ #endif /* !__LP64__ */ - static IOBufferMemoryDescriptor * withCopy( + static OSPtr withCopy( task_t inTask, IOOptionBits options, vm_map_t sourceMap, mach_vm_address_t source, mach_vm_size_t size); - static IOBufferMemoryDescriptor * withOptions( IOOptionBits options, + static OSPtr withOptions( IOOptionBits options, vm_size_t capacity, vm_offset_t alignment = 1); @@ -171,7 +172,7 @@ public: * @param alignment The minimum required alignment of the buffer in bytes - 1 is the default for no required alignment. For example, pass 256 to get memory allocated at an address with bits 0-7 zero. * @result Returns an instance of class IOBufferMemoryDescriptor to be released by the caller, which will free the memory desriptor and associated buffer. */ - static IOBufferMemoryDescriptor * inTaskWithOptions( + static OSPtr inTaskWithOptions( task_t inTask, IOOptionBits options, vm_size_t capacity, @@ -197,7 +198,7 @@ public: * @param userTag The user memory tag * @result Returns an instance of class IOBufferMemoryDescriptor to be released by the caller, which will free the memory desriptor and associated buffer. */ - static IOBufferMemoryDescriptor * inTaskWithOptions( + static OSPtr inTaskWithOptions( task_t inTask, IOOptionBits options, vm_size_t capacity, @@ -221,7 +222,7 @@ public: * @param physicalMask The buffer will be allocated with pages such that physical addresses will only have bits set present in physicalMask. For example, pass 0x00000000FFFFFFFFULL for a buffer to be accessed by hardware that has 32 address bits. * @result Returns an instance of class IOBufferMemoryDescriptor to be released by the caller, which will free the memory desriptor and associated buffer. */ - static IOBufferMemoryDescriptor * inTaskWithPhysicalMask( + static OSPtr inTaskWithPhysicalMask( task_t inTask, IOOptionBits options, mach_vm_size_t capacity, @@ -234,7 +235,7 @@ public: * hold capacity bytes. The descriptor's length is initially set to the * capacity. */ - static IOBufferMemoryDescriptor * withCapacity( + static OSPtr withCapacity( vm_size_t capacity, IODirection withDirection, bool withContiguousMemory = false); @@ -251,7 +252,7 @@ public: * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). * The descriptor's length and capacity are set to the input buffer's size. */ - static IOBufferMemoryDescriptor * withBytes( + static OSPtr withBytes( const void * bytes, vm_size_t withLength, IODirection withDirection, diff --git a/iokit/IOKit/IOCPU.h b/iokit/IOKit/IOCPU.h index a52591e4f..c7039f204 100644 --- a/iokit/IOKit/IOCPU.h +++ b/iokit/IOKit/IOCPU.h @@ -42,6 +42,8 @@ extern "C" { #include #include +#include +#include enum { kIOCPUStateUnregistered = 0, @@ -56,7 +58,7 @@ class IOCPU : public IOService OSDeclareAbstractStructors(IOCPU); private: - OSArray *_cpuGroup; + OSPtr _cpuGroup; UInt32 _cpuNumber; UInt32 _cpuState; @@ -107,13 +109,6 @@ public: OSMetaClassDeclareReservedUnused(IOCPU, 7); }; -void IOCPUSleepKernel(void); -extern "C" kern_return_t IOCPURunPlatformQuiesceActions(void); -extern "C" kern_return_t IOCPURunPlatformActiveActions(void); -extern "C" kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message); -extern "C" kern_return_t IOCPURunPlatformPanicActions(uint32_t message); -extern "C" kern_return_t IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len); - class IOCPUInterruptController : public IOInterruptController { OSDeclareDefaultStructors(IOCPUInterruptController); @@ -131,9 +126,9 @@ protected: public: virtual IOReturn initCPUInterruptController(int sources); virtual void registerCPUInterruptController(void); - virtual void setCPUInterruptProperties(IOService *service); virtual void enableCPUInterrupt(IOCPU *cpu); + virtual void setCPUInterruptProperties(IOService *service) APPLE_KEXT_OVERRIDE; virtual IOReturn registerInterrupt(IOService *nub, int source, void *target, IOInterruptHandler handler, diff --git a/iokit/IOKit/IOCatalogue.h b/iokit/IOKit/IOCatalogue.h index 01f8f295f..1dedf566e 100644 --- a/iokit/IOKit/IOCatalogue.h +++ b/iokit/IOKit/IOCatalogue.h @@ -36,6 +36,7 @@ #ifndef _IOKIT_IOCATALOGUE_H #define _IOKIT_IOCATALOGUE_H +#include #include #include #include @@ -47,8 +48,8 @@ class IOService; -extern const OSSymbol * gIOModuleIdentifierKey; -extern const OSSymbol * gIOModuleIdentifierKernelKey; +extern OSPtr gIOModuleIdentifierKey; +extern OSPtr gIOModuleIdentifierKernelKey; /*! @@ -63,7 +64,7 @@ class IOCatalogue : public OSObject private: IORWLock * lock; SInt32 generation; - OSDictionary * personalities; + OSPtr personalities; OSArray * arrayForPersonality(OSDictionary * dict); void addPersonality(OSDictionary * dict); @@ -94,7 +95,7 @@ public: * @param generationCount Returns a reference to the generation count of the database. The generation count increases only when personalities are added to the database *and* IOService matching has been initiated. * @result Returns an ordered set of driver personalities ranked on probe-scores. The ordered set must be released by the receiver. */ - OSOrderedSet * findDrivers( IOService * service, SInt32 * generationCount ); + OSPtr findDrivers( IOService * service, SInt32 * generationCount ); /*! * @function findDrivers @@ -103,7 +104,7 @@ public: * @param generationCount Returns a reference to the current generation of the database. The generation count increases only when personalities are added to the database *and* IOService matching has been initiated. * @result Returns an ordered set of driver personalities ranked on probe-scores. The ordered set must be released by the receiver. */ - OSOrderedSet * findDrivers( OSDictionary * matching, SInt32 * generationCount ); + OSPtr findDrivers( OSDictionary * matching, SInt32 * generationCount ); /*! * @function addDrivers @@ -123,6 +124,10 @@ public: */ bool removeDrivers( OSDictionary * matching, bool doNubMatching = true ); +#if XNU_KERNEL_PRIVATE + bool removeDrivers(bool doNubMatching, bool (^shouldRemove)(OSDictionary *personality)); +#endif /* XNU_KERNEL_PRIVATE */ + /*! * @function getGenerationCount * @abstract Get the current generation count of the database. @@ -138,6 +143,8 @@ public: */ bool isModuleLoaded( OSDictionary * driver, OSObject ** kextRef ) const; + bool isModuleLoaded( OSDictionary * driver, OSSharedPtr& kextRef ) const; + /*! * @function moduleHasLoaded * @abstract Callback function called after a IOKit dependent kernel module is loaded. @@ -176,6 +183,10 @@ public: IOReturn terminateDriversForModule( const char * moduleName, bool unload = true); #if XNU_KERNEL_PRIVATE IOReturn terminateDrivers(OSDictionary * matching, io_name_t className); + + IOReturn terminateDriversForUserspaceReboot(); + + IOReturn resetAfterUserspaceReboot(); #endif /* XNU_KERNEL_PRIVATE */ /*! @@ -232,8 +243,8 @@ private: IOReturn _removeDrivers(OSDictionary * matching); }; -extern const OSSymbol * gIOClassKey; -extern const OSSymbol * gIOProbeScoreKey; -extern IOCatalogue * gIOCatalogue; +extern OSPtr gIOClassKey; +extern OSPtr gIOProbeScoreKey; +extern OSPtr gIOCatalogue; #endif /* ! _IOKIT_IOCATALOGUE_H */ diff --git a/iokit/IOKit/IOCommandGate.h b/iokit/IOKit/IOCommandGate.h index 2a1d2f287..6d85a623f 100644 --- a/iokit/IOKit/IOCommandGate.h +++ b/iokit/IOKit/IOCommandGate.h @@ -35,6 +35,7 @@ #define _IOKIT_IOCOMMANDGATE_H #include +#include /*! * @class IOCommandGate : public IOEventSource @@ -99,7 +100,7 @@ public: /*! @function commandGate * @abstract Factory method to create and initialise an IOCommandGate, See $link init. * @result Returns a pointer to the new command gate if sucessful, 0 otherwise. */ - static IOCommandGate *commandGate(OSObject *owner, Action action = NULL); + static OSPtr commandGate(OSObject *owner, Action action = NULL); /*! @function init * @abstract Class initialiser. @@ -240,7 +241,7 @@ private: #if __LP64__ OSMetaClassDeclareReservedUnused(IOCommandGate, 0); #else - OSMetaClassDeclareReservedUsed(IOCommandGate, 0); + OSMetaClassDeclareReservedUsedX86(IOCommandGate, 0); #endif OSMetaClassDeclareReservedUnused(IOCommandGate, 1); OSMetaClassDeclareReservedUnused(IOCommandGate, 2); diff --git a/iokit/IOKit/IOCommandPool.h b/iokit/IOKit/IOCommandPool.h index ee30bb44e..a9bdde4be 100644 --- a/iokit/IOKit/IOCommandPool.h +++ b/iokit/IOKit/IOCommandPool.h @@ -58,6 +58,7 @@ #include #include #include +#include /*! * @class IOCommandPool @@ -81,7 +82,7 @@ protected: queue_head_t fQueueHead; /* head of the queue of elements available */ UInt32 fSleepers; /* Count of threads sleeping on this pool */ - IOCommandGate *fSerializer; /* command gate used for serializing pool access */ + OSPtr fSerializer; /* command gate used for serializing pool access */ /*! @struct ExpansionData * @discussion This structure will be used to expand the capablilties of the IOEventSource in the future. @@ -136,7 +137,7 @@ public: * otherwise NULL. */ - static IOCommandPool *withWorkLoop(IOWorkLoop *inWorkLoop); + static OSPtr withWorkLoop(IOWorkLoop *inWorkLoop); /*! * @function init @@ -150,7 +151,7 @@ public: * @function withWorkLoop * @abstract Should never be used, obsolete. See IOCommandPool::withWorkLoop. */ - static IOCommandPool *commandPool(IOService *inOwner, + static OSPtr commandPool(IOService *inOwner, IOWorkLoop *inWorkLoop, UInt32 inSize = kIOCommandPoolDefaultSize); @@ -168,7 +169,8 @@ public: * pointer was returned. */ - virtual IOCommand *getCommand(bool blockForCommand = true); + virtual OSPtr getCommand( + bool blockForCommand = true); /*! * @function returnCommand @@ -179,7 +181,7 @@ public: * The command to place in the pool. */ - virtual void returnCommand(IOCommand *command); + virtual void returnCommand(LIBKERN_CONSUMED IOCommand *command); protected: diff --git a/iokit/IOKit/IOCommandQueue.h b/iokit/IOKit/IOCommandQueue.h index 5ad86ec5b..5832b41e2 100644 --- a/iokit/IOKit/IOCommandQueue.h +++ b/iokit/IOKit/IOCommandQueue.h @@ -40,6 +40,7 @@ #define _IOKIT_IOCOMMANDQUEUE_H #include +#include class IOCommandQueue; @@ -64,7 +65,7 @@ protected: virtual bool checkForWork() APPLE_KEXT_OVERRIDE; public: - static IOCommandQueue *commandQueue(OSObject *inOwner, + static OSPtr commandQueue(OSObject *inOwner, IOCommandQueueAction inAction = NULL, int inSize = kIOCQDefaultSize) APPLE_KEXT_DEPRECATED; diff --git a/iokit/IOKit/IOConditionLock.h b/iokit/IOKit/IOConditionLock.h index 52dd502d0..60d5b35eb 100644 --- a/iokit/IOKit/IOConditionLock.h +++ b/iokit/IOKit/IOConditionLock.h @@ -32,6 +32,7 @@ #ifndef _IOKIT_IOCONDITIONLOCK_H #define _IOKIT_IOCONDITIONLOCK_H +#include #include #include #include @@ -52,7 +53,7 @@ private: volatile bool waiting; public: - static IOConditionLock *withCondition(int condition, bool inIntr = true); + static OSPtr withCondition(int condition, bool inIntr = true); virtual bool initWithCondition(int condition, bool inIntr = true); virtual void free() APPLE_KEXT_OVERRIDE; diff --git a/iokit/IOKit/IODMACommand.h b/iokit/IOKit/IODMACommand.h index 2188f9577..c1214ef6d 100644 --- a/iokit/IOKit/IODMACommand.h +++ b/iokit/IOKit/IODMACommand.h @@ -215,7 +215,7 @@ public: * @param refCon Reference Constant * @result Returns a new IODMACommand if successfully created and initialized, 0 otherwise. */ - static IODMACommand * + static OSPtr withSpecification(SegmentFunction outSegFunc, UInt8 numAddressBits, UInt64 maxSegmentSize, @@ -255,7 +255,7 @@ public: IOMapper *mapper = NULL, void *refCon = NULL) __attribute__((always_inline)); - static IODMACommand * + static OSPtr withSpecification(SegmentFunction outSegFunc, const SegmentOptions * segmentOptions, uint32_t mappingOptions, @@ -269,7 +269,7 @@ public: * @param refCon Reference Constant * @result Returns a new IODMACommand if successfully created and initialized, 0 otherwise. */ - static IODMACommand * withRefCon(void * refCon); + static OSPtr withRefCon(void * refCon); /*! * @function cloneCommand @@ -277,7 +277,7 @@ public: * @discussion Factory function to create and initialise an IODMACommand in one operation. The current command's specification will be duplicated in the new object, but however none of its state will be duplicated. This means that it is safe to clone a command even if it is currently active and running, however you must be certain that the command to be duplicated does have a valid reference for the duration. * @result Returns a new IODMACommand if successfully created and initialised, 0 otherwise. */ - virtual IODMACommand *cloneCommand(void *refCon = NULL); + virtual OSPtr cloneCommand(void *refCon = NULL); /*! @function initWithSpecification * @abstract Primary initializer for the IODMACommand class. @@ -320,7 +320,7 @@ public: /*! @function getMemoryDescriptor * @abstract Get the current memory descriptor */ - virtual const IOMemoryDescriptor *getMemoryDescriptor() const; + virtual const IOMemoryDescriptor * getMemoryDescriptor() const; /*! @function getIOMemoryDescriptor * @abstract Get the memory descriptor to be used for DMA @@ -455,7 +455,7 @@ private: Segment64 segment, void *segments, UInt32 segmentIndex); - IOReturn walkAll(UInt8 op); + IOReturn walkAll(uint32_t op); public: @@ -536,16 +536,16 @@ public: bool synchronize = true); virtual - IOBufferMemoryDescriptor * createCopyBuffer(IODirection direction, UInt64 length); + OSPtr createCopyBuffer(IODirection direction, UInt64 length); private: - OSMetaClassDeclareReservedUsed(IODMACommand, 0); - OSMetaClassDeclareReservedUsed(IODMACommand, 1); - OSMetaClassDeclareReservedUsed(IODMACommand, 2); - OSMetaClassDeclareReservedUsed(IODMACommand, 3); - OSMetaClassDeclareReservedUsed(IODMACommand, 4); - OSMetaClassDeclareReservedUsed(IODMACommand, 5); - OSMetaClassDeclareReservedUsed(IODMACommand, 6); + OSMetaClassDeclareReservedUsedX86(IODMACommand, 0); + OSMetaClassDeclareReservedUsedX86(IODMACommand, 1); + OSMetaClassDeclareReservedUsedX86(IODMACommand, 2); + OSMetaClassDeclareReservedUsedX86(IODMACommand, 3); + OSMetaClassDeclareReservedUsedX86(IODMACommand, 4); + OSMetaClassDeclareReservedUsedX86(IODMACommand, 5); + OSMetaClassDeclareReservedUsedX86(IODMACommand, 6); OSMetaClassDeclareReservedUnused(IODMACommand, 7); OSMetaClassDeclareReservedUnused(IODMACommand, 8); OSMetaClassDeclareReservedUnused(IODMACommand, 9); @@ -574,11 +574,11 @@ protected: /*! @var fMapper * Client defined mapper. */ - IOMapper *fMapper; + OSPtr fMapper; /*! @var fMemory * memory descriptor for current I/O. */ - const IOMemoryDescriptor *fMemory; + OSPtr fMemory; /*! @var fOutSeg The action method called when an event has been delivered */ SegmentFunction fOutSeg; @@ -605,7 +605,7 @@ protected: /*! @var reserved * Reserved for future use. (Internal use only) */ - struct IODMACommandInternal * reserved; + struct IODMACommandInternal *reserved; }; IOReturn diff --git a/iokit/IOKit/IODMAController.h b/iokit/IOKit/IODMAController.h index 2d8682cc2..825cc613d 100644 --- a/iokit/IOKit/IODMAController.h +++ b/iokit/IOKit/IODMAController.h @@ -32,6 +32,7 @@ #include #include #include +#include class IODMAEventSource; @@ -43,7 +44,7 @@ class IODMAController : public IOService private: IOService *_provider; - const OSSymbol *_dmaControllerName; + OSPtr _dmaControllerName; protected: virtual void registerDMAController(IOOptionBits options = 0); @@ -62,7 +63,7 @@ protected: virtual bool validDMAConfig(UInt32 dmaIndex, IOService *provider, UInt32 reqIndex) = 0; public: - static const OSSymbol *createControllerName(UInt32 phandle); + static OSPtr createControllerName(UInt32 phandle); static IODMAController *getController(IOService *provider, UInt32 dmaIndex); virtual bool start(IOService *provider) APPLE_KEXT_OVERRIDE; diff --git a/iokit/IOKit/IODMAEventSource.h b/iokit/IOKit/IODMAEventSource.h index 5b26e08b7..29b8a1190 100644 --- a/iokit/IOKit/IODMAEventSource.h +++ b/iokit/IOKit/IODMAEventSource.h @@ -29,6 +29,7 @@ #ifndef _IOKIT_IODMAEVENTSOURCE_H #define _IOKIT_IODMAEVENTSOURCE_H +#include #include #include #include @@ -51,7 +52,7 @@ protected: virtual void notifyDMACommand(IODMACommand *dmaCommand, IOReturn status, IOByteCount actualByteCount, AbsoluteTime timeStamp); public: - static IODMAEventSource *dmaEventSource(OSObject *owner, + static OSPtr dmaEventSource(OSObject *owner, IOService *provider, Action completion = NULL, Action notification = NULL, @@ -73,7 +74,7 @@ public: private: IOService *dmaProvider; - IODMAController *dmaController; + OSPtr dmaController; UInt32 dmaIndex; queue_head_t dmaCommandsCompleted; IOSimpleLock *dmaCommandsCompletedLock; diff --git a/iokit/IOKit/IODataQueue.h b/iokit/IOKit/IODataQueue.h index c16d03fa2..7070268db 100644 --- a/iokit/IOKit/IODataQueue.h +++ b/iokit/IOKit/IODataQueue.h @@ -33,7 +33,7 @@ #warning "IODataQueue is deprecated due to security issues in its interfaces, please use IOSharedDataQueue instead" #endif - +#include #include #include #include @@ -97,7 +97,7 @@ public: * @param size The size of the data queue memory region. * @result Returns the newly allocated IODataQueue instance. Zero is returned on failure. */ - static IODataQueue *withCapacity(UInt32 size); + static OSPtr withCapacity(UInt32 size); /*! * @function withEntries @@ -105,9 +105,9 @@ public: * @discussion This method will create a new IODataQueue instance with enough capacity for numEntries of entrySize. It does account for the IODataQueueEntry overhead for each entry. Note that the numEntries and entrySize are simply used to determine the data region size. They do not actually restrict the size of number of entries that can be added to the queue.
This method allocates a new IODataQueue instance and then calls initWithEntries() with the given numEntries and entrySize parameters. If the initWithEntries() fails, the new instance is released and zero is returned. * @param numEntries Number of entries to allocate space for. * @param entrySize Size of each entry. - * @result Reeturns the newly allocated IODataQueue instance. Zero is returned on failure. + * @result Returns the newly allocated IODataQueue instance. Zero is returned on failure. */ - static IODataQueue *withEntries(UInt32 numEntries, UInt32 entrySize); + static OSPtr withEntries(UInt32 numEntries, UInt32 entrySize); /*! * @function initWithCapacity @@ -124,7 +124,7 @@ public: * @discussion This method will initialize an IODataQueue instance with enough capacity for numEntries of entrySize. It does account for the IODataQueueEntry overhead for each entry. Note that the numEntries and entrySize are simply used to determine the data region size. They do not actually restrict the size of number of entries that can be added to the queue.
This method allocates a new IODataQueue instance and then calls initWithEntries() with the given numEntries and entrySize parameters. * @param numEntries Number of entries to allocate space for. * @param entrySize Size of each entry. - * @result Reeturns true on success and false on failure. + * @result Returns true on success and false on failure. */ virtual Boolean initWithEntries(UInt32 numEntries, UInt32 entrySize); @@ -152,7 +152,7 @@ public: * @discussion The IOMemoryDescriptor instance returned by this method is intended to be mapped into a user process. This is the memory region that the IODataQueueClient code operates on. * @result Returns a newly allocated IOMemoryDescriptor for the IODataQueueMemory region. Returns zero on failure. */ - virtual IOMemoryDescriptor *getMemoryDescriptor(); + virtual OSPtr getMemoryDescriptor(); }; #endif /* _IOKIT_IODATAQUEUE_H */ diff --git a/iokit/IOKit/IODeviceMemory.h b/iokit/IOKit/IODeviceMemory.h index dadc043b0..f48b420d8 100644 --- a/iokit/IOKit/IODeviceMemory.h +++ b/iokit/IOKit/IODeviceMemory.h @@ -36,6 +36,7 @@ #define _IOKIT_IODEVICEMEMORY_H #include +#include /*! @class IODeviceMemory * @abstract An IOMemoryDescriptor used for device physical memory ranges. @@ -66,7 +67,7 @@ public: * @param count The number of elements in the list. * @result Returns a created OSArray of IODeviceMemory objects, to be released by the caller, or zero on failure. */ - static OSArray * arrayFromList( + static OSPtr arrayFromList( InitElement list[], IOItemCount count ); @@ -77,7 +78,7 @@ public: * @param length The length of memory. * @result Returns the created IODeviceMemory on success, to be released by the caller, or zero on failure. */ - static IODeviceMemory * withRange( + static OSPtr withRange( IOPhysicalAddress start, IOPhysicalLength length ); @@ -89,7 +90,7 @@ public: * @param length The length of the subrange. * @result Returns the created IODeviceMemory on success, to be released by the caller, or zero on failure. */ - static IODeviceMemory * withSubRange( + static OSPtr withSubRange( IODeviceMemory * of, IOPhysicalAddress offset, IOPhysicalLength length ); diff --git a/iokit/IOKit/IODeviceTreeSupport.h b/iokit/IOKit/IODeviceTreeSupport.h index 51d146aab..d48f58ecf 100644 --- a/iokit/IOKit/IODeviceTreeSupport.h +++ b/iokit/IOKit/IODeviceTreeSupport.h @@ -37,6 +37,7 @@ #include #include +#include class IODeviceMemory; class IOService; @@ -48,6 +49,7 @@ extern const OSSymbol * gIODTPHandleKey; extern const OSSymbol * gIODTCompatibleKey; extern const OSSymbol * gIODTTypeKey; extern const OSSymbol * gIODTModelKey; +extern const OSSymbol * gIODTBridgeModelKey; extern const OSSymbol * gIODTTargetTypeKey; extern const OSSymbol * gIODTAAPLInterruptsKey; @@ -63,6 +65,9 @@ bool IODTMatchNubWithKeys( IORegistryEntry * nub, bool IODTCompareNubName( const IORegistryEntry * regEntry, OSString * name, LIBKERN_RETURNS_RETAINED_ON_NONZERO OSString ** matchingName ); +bool IODTCompareNubName( const IORegistryEntry * regEntry, + OSString * name, + OSSharedPtr& matchingName ); enum { kIODTRecursive = 0x00000001, diff --git a/iokit/IOKit/IOFilterInterruptEventSource.h b/iokit/IOKit/IOFilterInterruptEventSource.h index 263f8ac44..5a05b5d77 100644 --- a/iokit/IOKit/IOFilterInterruptEventSource.h +++ b/iokit/IOKit/IOFilterInterruptEventSource.h @@ -35,7 +35,9 @@ #ifndef _IOKIT_IOFILTERINTERRUPTEVENTSOURCE_H #define _IOKIT_IOFILTERINTERRUPTEVENTSOURCE_H +#include #include +#include class IOService; @@ -77,7 +79,7 @@ private: IOService *inProvider = NULL, int inIntIndex = 0) APPLE_KEXT_OVERRIDE; - static IOInterruptEventSource * + static OSPtr interruptEventSource(OSObject *inOwner, IOInterruptEventSource::Action inAction = NULL, IOService *inProvider = NULL, @@ -112,7 +114,7 @@ public: * @param provider Service that provides interrupts. * @param intIndex Defaults to 0. * @result a new event source if succesful, 0 otherwise. */ - static IOFilterInterruptEventSource * + static OSPtr filterInterruptEventSource(OSObject *owner, IOInterruptEventSource::Action action, Filter filter, @@ -128,7 +130,7 @@ public: * @param action Block for the callout routine of this event source. * @param filter Block to invoke when HW interrupt occurs. * @result a new event source if succesful, 0 otherwise. */ - static IOFilterInterruptEventSource * + static OSPtr filterInterruptEventSource(OSObject *owner, IOService *provider, int intIndex, diff --git a/iokit/IOKit/IOHibernatePrivate.h b/iokit/IOKit/IOHibernatePrivate.h index df8be032a..dd1dd039b 100644 --- a/iokit/IOKit/IOHibernatePrivate.h +++ b/iokit/IOKit/IOHibernatePrivate.h @@ -26,11 +26,29 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#ifndef __IOKIT_IOHIBERNATEPRIVATE_H +#define __IOKIT_IOHIBERNATEPRIVATE_H + +#if HIBERNATION + +#if defined(__arm64__) + +#define HIBERNATE_HMAC_IMAGE 1 +#define HIBERNATE_HAVE_MACHINE_HEADER 1 + +// enable the hibernation exception handler on DEBUG and DEVELOPMENT kernels +#define HIBERNATE_TRAP_HANDLER (DEBUG || DEVELOPMENT) + +#endif /* defined(__arm64__) */ + +#endif /* HIBERNATION */ + +#ifndef __ASSEMBLER__ + #include +#include -#ifdef __cplusplus -extern "C" { -#endif +__BEGIN_DECLS #ifdef KERNEL #include @@ -38,10 +56,24 @@ extern "C" { #include extern int kdb_printf(const char *format, ...) __printflike(1, 2); -#endif +#endif /* KERNEL */ -#ifndef __IOKIT_IOHIBERNATEPRIVATE_H -#define __IOKIT_IOHIBERNATEPRIVATE_H +#define HIBERNATE_HMAC_SIZE 48 // SHA384 size in bytes + +struct IOHibernateHibSegment { + uint32_t iBootMemoryRegion; + uint32_t physPage; + uint32_t pageCount; + uint32_t protection; +}; +typedef struct IOHibernateHibSegment IOHibernateHibSegment; + +#define NUM_HIBSEGINFO_SEGMENTS 10 +struct IOHibernateHibSegInfo { + struct IOHibernateHibSegment segments[NUM_HIBSEGINFO_SEGMENTS]; + uint8_t hmac[HIBERNATE_HMAC_SIZE]; +}; +typedef struct IOHibernateHibSegInfo IOHibernateHibSegInfo; struct IOPolledFileExtent { uint64_t start; @@ -100,12 +132,19 @@ struct IOHibernateImageHeader { uint32_t debugFlags; uint32_t options; - uint32_t sleepTime; + uint64_t sleepTime __attribute__ ((packed)); uint32_t compression; uint8_t bridgeBootSessionUUID[16]; - uint32_t reserved[54]; // make sizeof == 512 + uint64_t lastHibAbsTime __attribute__ ((packed)); + union { + uint64_t lastHibContTime; + uint64_t hwClockOffset; + } __attribute__ ((packed)); + uint64_t kernVirtSlide __attribute__ ((packed)); + + uint32_t reserved[47]; // make sizeof == 512 uint32_t booterTime0; uint32_t booterTime1; uint32_t booterTime2; @@ -121,6 +160,16 @@ struct IOHibernateImageHeader { uint64_t deviceBase __attribute__ ((packed)); uint32_t deviceBlockSize; +#if defined(__arm64__) + uint32_t segmentsFileOffset; + IOHibernateHibSegInfo hibSegInfo; + uint32_t imageHeaderHMACSize; + uint8_t imageHeaderHMAC[HIBERNATE_HMAC_SIZE]; + uint8_t handoffHMAC[HIBERNATE_HMAC_SIZE]; + uint8_t image1PagesHMAC[HIBERNATE_HMAC_SIZE]; + uint8_t image2PagesHMAC[HIBERNATE_HMAC_SIZE]; +#endif /* defined(__arm64__) */ + uint32_t fileExtentMapSize; IOPolledFileExtent fileExtentMap[2]; }; @@ -250,9 +299,9 @@ struct hibernate_preview_t { uint32_t width; // Width uint32_t height; // Height uint32_t depth; // Pixel Depth - uint32_t lockTime; // Lock time - uint32_t reservedG[8];// reserved - uint32_t reservedK[8];// reserved + uint64_t lockTime; // Lock time + uint32_t reservedG[7]; // reserved + uint32_t reservedK[8]; // reserved }; typedef struct hibernate_preview_t hibernate_preview_t; @@ -278,7 +327,8 @@ struct hibernate_statistics_t { uint32_t hidReadyTime; uint32_t wakeCapability; - uint32_t resvA[15]; + uint32_t hibCount; + uint32_t resvA[14]; }; typedef struct hibernate_statistics_t hibernate_statistics_t; @@ -287,6 +337,10 @@ typedef struct hibernate_statistics_t hibernate_statistics_t; #define kIOSysctlHibernateWakeNotify "kern.hibernatewakenotification" #define kIOSysctlHibernateScreenReady "kern.hibernatelockscreenready" #define kIOSysctlHibernateHIDReady "kern.hibernatehidready" +#define kIOSysctlHibernateCount "kern.hibernatecount" +#define kIOSysctlHibernateSetPreview "kern.hibernatepreview" + +#define kIOHibernateSetPreviewEntitlementKey "com.apple.private.hibernation.set-preview" #ifdef KERNEL @@ -306,6 +360,17 @@ void IOHibernateSystemRestart(void); #endif /* __cplusplus */ +struct hibernate_scratch { + uint8_t *curPage; + size_t curPagePos; + uint64_t curPos; + uint64_t totalLength; + ppnum_t headPage; + hibernate_page_list_t *map; + uint32_t *nextFree; +}; +typedef struct hibernate_scratch hibernate_scratch_t; + void vm_compressor_do_warmup(void); @@ -404,12 +469,30 @@ hibernate_page_bitmap_count(hibernate_bitmap_t * bitmap, uint32_t set, uint32_t uintptr_t hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, uint32_t procFlags); +void +hibernate_scratch_init(hibernate_scratch_t * scratch, hibernate_page_list_t * map, uint32_t * nextFree); + +void +hibernate_scratch_start_read(hibernate_scratch_t * scratch); + +void +hibernate_scratch_write(hibernate_scratch_t * scratch, const void * buffer, size_t size); + +void +hibernate_scratch_read(hibernate_scratch_t * scratch, void * buffer, size_t size); + void hibernate_machine_init(void); uint32_t hibernate_write_image(void); +ppnum_t +hibernate_page_list_grab(hibernate_page_list_t * list, uint32_t * pNextFree); + +void +hibernate_reserve_restore_pages(uint64_t headerPhys, IOHibernateImageHeader *header, hibernate_page_list_t * map); + long hibernate_machine_entrypoint(uint32_t p1, uint32_t p2, uint32_t p3, uint32_t p4); long @@ -424,8 +507,10 @@ extern uint32_t gIOHibernateMode; extern uint32_t gIOHibernateDebugFlags; extern uint32_t gIOHibernateFreeTime; // max time to spend freeing pages (ms) extern boolean_t gIOHibernateStandbyDisabled; +#if !defined(__arm64__) extern uint8_t gIOHibernateRestoreStack[]; extern uint8_t gIOHibernateRestoreStackEnd[]; +#endif /* !defined(__arm64__) */ extern IOHibernateImageHeader * gIOHibernateCurrentHeader; #define HIBLOGFROMPANIC(fmt, args...) \ @@ -535,8 +620,8 @@ enum{ #define kIOScreenLockStateKey "IOScreenLockState" #define kIOBooterScreenLockStateKey "IOBooterScreenLockState" -#endif /* ! __IOKIT_IOHIBERNATEPRIVATE_H */ +__END_DECLS -#ifdef __cplusplus -} -#endif +#endif /* !__ASSEMBLER__ */ + +#endif /* ! __IOKIT_IOHIBERNATEPRIVATE_H */ diff --git a/iokit/IOKit/IOInterleavedMemoryDescriptor.h b/iokit/IOKit/IOInterleavedMemoryDescriptor.h index 5221ab19f..7b9a28170 100644 --- a/iokit/IOKit/IOInterleavedMemoryDescriptor.h +++ b/iokit/IOKit/IOInterleavedMemoryDescriptor.h @@ -30,6 +30,7 @@ #define _IOINTERLEAVEDMEMORYDESCRIPTOR_H #include +#include /*! @class IOInterleavedMemoryDescriptor : public IOMemoryDescriptor * @abstract The IOInterleavedMemoryDescriptor object describes a memory area made up of portions of several other IOMemoryDescriptors. @@ -59,7 +60,7 @@ public: * @param direction An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. * @result The created IOInterleavedMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOInterleavedMemoryDescriptor * withCapacity( IOByteCount capacity, + static OSPtr withCapacity( IOByteCount capacity, IODirection direction); /*! @function initWithCapacity diff --git a/iokit/IOKit/IOInterruptController.h b/iokit/IOKit/IOInterruptController.h index 53bdcf529..450b457a2 100644 --- a/iokit/IOKit/IOInterruptController.h +++ b/iokit/IOKit/IOInterruptController.h @@ -102,10 +102,14 @@ public: virtual void disableVectorHard(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); virtual void enableVector(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); virtual void causeVector(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); + virtual void setCPUInterruptProperties(IOService *service); - OSMetaClassDeclareReservedUnused(IOInterruptController, 0); - OSMetaClassDeclareReservedUnused(IOInterruptController, 1); - OSMetaClassDeclareReservedUnused(IOInterruptController, 2); + virtual void sendIPI(unsigned int cpu_id, bool deferred); + virtual void cancelDeferredIPI(unsigned int cpu_id); + + OSMetaClassDeclareReservedUsedX86(IOInterruptController, 0); + OSMetaClassDeclareReservedUsedX86(IOInterruptController, 1); + OSMetaClassDeclareReservedUsedX86(IOInterruptController, 2); OSMetaClassDeclareReservedUnused(IOInterruptController, 3); OSMetaClassDeclareReservedUnused(IOInterruptController, 4); OSMetaClassDeclareReservedUnused(IOInterruptController, 5); diff --git a/iokit/IOKit/IOInterruptEventSource.h b/iokit/IOKit/IOInterruptEventSource.h index 6313a87b4..e1f67651c 100644 --- a/iokit/IOKit/IOInterruptEventSource.h +++ b/iokit/IOKit/IOInterruptEventSource.h @@ -38,6 +38,7 @@ #ifndef _IOKIT_IOINTERRUPTEVENTSOURCE_H #define _IOKIT_IOINTERRUPTEVENTSOURCE_H +#include #include class IOService; @@ -135,7 +136,7 @@ public: * @param provider IOService that represents the interrupt source. Defaults to 0. When no provider is defined the event source assumes that the client will in some manner call the interruptOccured method explicitly. This will start the ball rolling for safe delivery of asynchronous event's into the driver. * @param intIndex The index of the interrupt within the provider's interrupt sources. Defaults to 0, i.e. the first interrupt in the provider. * @result A new interrupt event source if successfully created and initialised, 0 otherwise. */ - static IOInterruptEventSource * + static OSPtr interruptEventSource(OSObject *owner, Action action, IOService *provider = NULL, @@ -150,7 +151,7 @@ public: * @param intIndex The index of the interrupt within the provider's interrupt sources. * @param action Block for the callout routine of this event source.. * @result A new interrupt event source if successfully created and initialised, 0 otherwise. */ - static IOInterruptEventSource * + static OSPtr interruptEventSource(OSObject *owner, IOService *provider, int intIndex, @@ -238,12 +239,12 @@ public: void enablePrimaryInterruptTimestamp(bool enable); -/*! @function getPimaryInterruptTimestamp +/*! @function getPrimaryInterruptTimestamp * @abstract Returns mach_absolute_time timestamp of primary interrupt. * @discussion Returns mach_absolute_time timestamp of primary interrupt. * @result Value of the timestamp. Zero if never interrupted, or -1ULL if timestamp collection has not been enabled. */ - uint64_t getPimaryInterruptTimestamp(); + uint64_t getPrimaryInterruptTimestamp(); private: IOReturn registerInterruptHandler(IOService *inProvider, int inIntIndex); diff --git a/iokit/IOKit/IOKernelReporters.h b/iokit/IOKit/IOKernelReporters.h index 2e6ac1643..6d59b479c 100644 --- a/iokit/IOKit/IOKernelReporters.h +++ b/iokit/IOKit/IOKernelReporters.h @@ -39,6 +39,7 @@ #include +#include #include #include #include @@ -47,6 +48,8 @@ #include #include +#include + typedef OSDictionary IOReportLegendEntry; /******************************* @@ -201,7 +204,7 @@ public: * * Locking: same-instance concurrency SAFE, MAY BLOCK */ - IOReportLegendEntry* createLegend(void); + OSPtr createLegend(void); /*! @function IOReporter::configureReport * @abstract track IOService::configureReport(), provide sizing info @@ -279,6 +282,16 @@ public: */ virtual void free(void) APPLE_KEXT_OVERRIDE; +/*! @function IOReporter::init + * @abstract Initialize global state + * + * @discussion + * ::initialize() [called during IOStartIOKit] initializes all global + * state for IOReporter objects. + * + */ + static void initialize(void); + /*********************************/ /*** 2b. Useful Static Methods ***/ @@ -552,7 +565,7 @@ protected: * function, and then drops the lock. Subclasses should not call * this function directly. */ - virtual IOReportLegendEntry* handleCreateLegend(void); + virtual OSPtr handleCreateLegend(void); /*! @function IOReporter::updateChannelValues * @abstract update channel values for IOReporter::updateReport() @@ -712,7 +725,7 @@ private: * * Locking: Caller must ensure that the reporter (data) lock is held. */ - OSArray* copyChannelIDs(void); + OSPtr copyChannelIDs(void); /*! @function IOReporter::legendWith * @abstract Internal method to help create legend entries @@ -733,7 +746,7 @@ private: * * Locking: SAFE to call concurrently (no static globals), MAY BLOCK */ - static IOReportLegendEntry* legendWith(OSArray *channelIDs, + static OSPtr legendWith(OSArray *channelIDs, OSArray *channelNames, IOReportChannelType channelType, IOReportUnit unit); @@ -749,7 +762,7 @@ protected: uint16_t _channelDimension;// Max channel size int _nElements; int _nChannels; // Total Channels in this reporter - OSArray *_channelNames; + OSPtr _channelNames; // MUST be protected because check is a macro! bool _reporterIsLocked; @@ -802,7 +815,7 @@ public: * * Locking: SAFE to call concurrently (no static globals), MAY BLOCK. */ - static IOSimpleReporter* with(IOService *reportingService, + static OSPtr with(IOService *reportingService, IOReportCategories categories, IOReportUnit unit); @@ -897,7 +910,7 @@ public: * * Locking: SAFE to call concurrently (no static globals), MAY BLOCK */ - static IOStateReporter* with(IOService *reportingService, + static OSPtr with(IOService *reportingService, IOReportCategories categories, int nstates, IOReportUnit unit = kIOReportUnitHWTicks); @@ -1475,7 +1488,7 @@ public: * * */ - static IOHistogramReporter* with(IOService *reportingService, + static OSPtr with(IOService *reportingService, IOReportCategories categories, uint64_t channelID, const char *channelName, @@ -1573,7 +1586,7 @@ protected: * * Locking: same-instance concurrency SAFE, MAY BLOCK */ - IOReportLegendEntry* handleCreateLegend(void) APPLE_KEXT_OVERRIDE; + OSPtr handleCreateLegend(void) APPLE_KEXT_OVERRIDE; private: @@ -1629,7 +1642,7 @@ public: * IOReportLegend::addReporterLegend() will handle the above, removing * the need for any direct use of the IOReportLegend class. */ - static IOReportLegend* with(OSArray *legend); + static OSPtr with(OSArray *legend); /*! @function IOReportLegend::addLegendEntry * @abstract Add a new legend entry @@ -1746,7 +1759,7 @@ protected: private: - OSArray *_reportLegend; + OSPtr _reportLegend; IOReturn initWith(OSArray *legend); diff --git a/iokit/IOKit/IOKitDebug.h b/iokit/IOKit/IOKitDebug.h index 50a811610..cc01ecc02 100644 --- a/iokit/IOKit/IOKitDebug.h +++ b/iokit/IOKit/IOKitDebug.h @@ -140,6 +140,7 @@ enum { kIODKDisableDextTag = 0x00002000ULL, kIODKDisableCDHashChecking = 0x00004000ULL, kIODKDisableEntitlementChecking = 0x00008000ULL, + kIODKDisableCheckInTokenVerification = 0x00010000ULL, }; #if XNU_KERNEL_PRIVATE @@ -254,6 +255,7 @@ IOTrackingQueue * IOTrackingQueueAlloc(const char * name, uintptr_t btEntry, size_t allocSize, size_t minCaptureSize, uint32_t type, uint32_t numSiteQs); void IOTrackingQueueFree(IOTrackingQueue * head); +void IOTrackingQueueCollectUser(IOTrackingQueue * queue); void IOTrackingAdd(IOTrackingQueue * head, IOTracking * mem, size_t size, bool address, vm_tag_t tag); void IOTrackingRemove(IOTrackingQueue * head, IOTracking * mem, size_t size); void IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size); diff --git a/iokit/IOKit/IOKitKeys.h b/iokit/IOKit/IOKitKeys.h index 2e1a2fe04..1c5af36ad 100644 --- a/iokit/IOKit/IOKitKeys.h +++ b/iokit/IOKit/IOKitKeys.h @@ -120,22 +120,39 @@ #define kIODriverKitHIDFamilyEventServiceEntitlementKey "com.apple.developer.driverkit.family.hid.eventservice" #define kIODriverKitTransportBuiltinEntitlementKey "com.apple.developer.driverkit.builtin" +// Entitlement required to read nvram root-only properties as non-root user +#define kIONVRAMReadAccessKey "com.apple.private.iokit.nvram-read-access" +// Entitlement required to write nvram properties as non-root user +#define kIONVRAMWriteAccessKey "com.apple.private.iokit.nvram-write-access" +// Entitlement required to set properties on the IOResources object as non-root user +#define kIOResourcesSetPropertyKey "com.apple.private.iokit.ioresources.setproperty" +// Entitlement required to read/write to the system nvram region +#define kIONVRAMSystemAllowKey "com.apple.private.iokit.system-nvram-allow" // When possible, defer matching of this driver until kextd has started. #define kIOMatchDeferKey "IOMatchDefer" +// Published after processor_start() has been called on all CPUs at boot time. +#define kIOAllCPUInitializedKey "IOAllCPUInitialized" + // IOService default user client class, for loadable user clients #define kIOUserClientClassKey "IOUserClientClass" // key to find IOMappers #define kIOMapperIDKey "IOMapperID" +#ifdef XNU_KERNEL_PRIVATE +// Apple Kext Exclude List +#define kIOExcludeListBundleID "com.apple.driver.KextExcludeList" +#endif + #define kIOUserClientCrossEndianKey "IOUserClientCrossEndian" #define kIOUserClientCrossEndianCompatibleKey "IOUserClientCrossEndianCompatible" #define kIOUserClientSharedInstanceKey "IOUserClientSharedInstance" #if KERNEL_PRIVATE #define kIOUserClientMessageAppSuspendedKey "IOUserClientMessageAppSuspended" #endif +#define kIOUserClientDefaultLockingKey "IOUserClientDefaultLocking" // diagnostic string describing the creating task #define kIOUserClientCreatorKey "IOUserClientCreator" // the expected cdhash value of the userspace driver executable @@ -163,6 +180,11 @@ // IOService interest notification types #define kIOCFPlugInTypesKey "IOCFPlugInTypes" +#define kIOCompatibilityMatchKey "IOCompatibilityMatch" +#define kIOCompatibilityPropertiesKey "IOCompatibilityProperties" +#define kIOPathKey "IOPath" + + // properties found in services that implement command pooling #define kIOCommandPoolSizeKey "IOCommandPoolSize" // (OSNumber) diff --git a/iokit/IOKit/IOKitKeysPrivate.h b/iokit/IOKit/IOKitKeysPrivate.h index 4751aba6e..992e3a9fe 100644 --- a/iokit/IOKit/IOKitKeysPrivate.h +++ b/iokit/IOKit/IOKitKeysPrivate.h @@ -88,11 +88,13 @@ typedef struct _IOUCProcessToken { #define kIOPlatformPanicActionKey "IOPlatformPanicAction" /* value is OSNumber (priority) */ #define kIOPlatformFunctionHandlerSet "IOPlatformFunctionHandlerSet" -#if defined(__i386__) || defined(__x86_64__) -#define kIOPlatformFunctionHandlerMaxBusDelay "IOPlatformFunctionHandlerMaxBusDelay" -#define kIOPlatformFunctionHandlerMaxInterruptDelay "IOPlatformFunctionHandlerMaxInterruptDelay" +#define kIOPlatformFunctionHandlerMaxBusDelay "IOPlatformFunctionHandlerMaxBusDelay" #define kIOPlatformMaxBusDelay "IOPlatformMaxBusDelay" + +#if defined(__i386__) || defined(__x86_64__) + +#define kIOPlatformFunctionHandlerMaxInterruptDelay "IOPlatformFunctionHandlerMaxInterruptDelay" #define kIOPlatformMaxInterruptDelay "IOPlatformMaxInterruptDelay" #endif /* defined(__i386__) || defined(__x86_64__) */ diff --git a/iokit/IOKit/IOLib.h b/iokit/IOKit/IOLib.h index df5036123..4c2444df1 100644 --- a/iokit/IOKit/IOLib.h +++ b/iokit/IOKit/IOLib.h @@ -81,9 +81,42 @@ typedef void (*IOThreadFunc)(void *argument); * @param size Size of the memory requested. * @result Pointer to the allocated memory, or zero on failure. */ +#if defined(XNU_KERNEL_PRIVATE) + +/* + * IOMalloc_internal allocates memory from the specifed kalloc heap, which can be: + * - KHEAP_DATA_BUFFERS: Should be used for data buffers + * - KHEAP_KEXT: Should be used for non core kernel allocations + * - KHEAP_DEFAULT: Should be used for all core kernel allocations that + * aren't data buffers. + * + * The kalloc heap used by IOMalloc calls from core kernel is KHEAP_DEFAULT + * and that used by kexts (accessed via IOMalloc_external) is KHEAP_KEXT. + * + * For more details on kalloc_heaps see kalloc.h + */ + +extern void * +IOMalloc_internal( + struct kalloc_heap * kalloc_heap_cfg, + vm_size_t size) __attribute__((alloc_size(2))); + +extern void * +IOMallocZero_internal( + struct kalloc_heap * kalloc_heap_cfg, + vm_size_t size) __attribute__((alloc_size(2))); + +#define IOMalloc(size) IOMalloc_internal(KHEAP_DEFAULT, size) + +#define IOMallocZero(size) IOMallocZero_internal(KHEAP_DEFAULT, size) + +#else/* defined(XNU_KERNEL_PRIVATE) */ + void * IOMalloc(vm_size_t size) __attribute__((alloc_size(1))); void * IOMallocZero(vm_size_t size) __attribute__((alloc_size(1))); +#endif /* !defined(XNU_KERNEL_PRIVATE) */ + /*! @function IOFree * @abstract Frees memory allocated with IOMalloc. * @discussion This function frees memory allocated with IOMalloc, it may block and so should not be called from interrupt level or while a simple lock is held. @@ -101,8 +134,22 @@ void IOFree(void * address, vm_size_t size); * @param alignment Byte count of the alignment for the memory. For example, pass 256 to get memory allocated at an address with bit 0-7 zero. * @result Pointer to the allocated memory, or zero on failure. */ +#if defined(XNU_KERNEL_PRIVATE) + +extern void * +IOMallocAligned_internal( + struct kalloc_heap * kalloc_heap_cfg, + vm_size_t size, + vm_size_t alignment) __attribute__((alloc_size(2))); + +#define IOMallocAligned(size, alignment) IOMallocAligned_internal(KHEAP_DEFAULT, size, alignment) + +#else/* defined(XNU_KERNEL_PRIVATE) */ + void * IOMallocAligned(vm_size_t size, vm_offset_t alignment) __attribute__((alloc_size(1))); +#endif /* !defined(XNU_KERNEL_PRIVATE) */ + /*! @function IOFreeAligned * @abstract Frees memory allocated with IOMallocAligned. * @discussion This function frees memory allocated with IOMallocAligned, it may block and so should not be called from interrupt level or while a simple lock is held. @@ -140,6 +187,15 @@ void IOFreeContiguous(void * address, vm_size_t size) __attribute__((deprecate void * IOMallocPageable(vm_size_t size, vm_size_t alignment) __attribute__((alloc_size(1))); +/*! @function IOMallocPageableZero + * @abstract Allocates pageable, zeroed memory in the kernel map. + * @discussion Same as IOMallocPageable but guarantees the returned memory will be zeroed. + * @param size Size of the memory requested. + * @param alignment Byte count of the alignment for the memory. For example, pass 256 to get memory allocated at an address with bits 0-7 zero. + * @result Pointer to the allocated memory, or zero on failure. */ + +void * IOMallocPageableZero(vm_size_t size, vm_size_t alignment) __attribute__((alloc_size(1))); + /*! @function IOFreePageable * @abstract Frees memory allocated with IOMallocPageable. * @discussion This function frees memory allocated with IOMallocPageable, it may block and so should not be called from interrupt level or while a simple lock is held. @@ -342,7 +398,7 @@ __attribute__((format(printf, 1, 0))); #ifndef _FN_KPRINTF #define _FN_KPRINTF -void kprintf(const char *format, ...); +void kprintf(const char *format, ...) __printflike(1, 2); #endif #ifndef _FN_KPRINTF_DECLARED #define _FN_KPRINTF_DECLARED diff --git a/iokit/IOKit/IOLocks.h b/iokit/IOKit/IOLocks.h index 88242c940..c585840f0 100644 --- a/iokit/IOKit/IOLocks.h +++ b/iokit/IOKit/IOLocks.h @@ -298,6 +298,17 @@ lck_rw_t * IORWLockGetMachLock( IORWLock * lock); void IORWLockRead(IORWLock * lock); #endif /* !IOLOCKS_INLINE */ +/*! @function IORWLockTryRead + * @abstract Attempt to lock a read/write lock for read. + * @discussion Lock the lock for read, allowing multiple readers when there are no writers. If the lock is held for write, return false. Return true otherwise. + * @param lock Pointer to the allocated lock. */ + +#ifdef IOLOCKS_INLINE +#define IORWLockTryRead(l) lck_rw_try_lock_shared(l) +#else +void IORWLockTryRead( IORWLock * lock); +#endif /* !IOLOCKS_INLINE */ + /*! @function IORWLockWrite * @abstract Lock a read/write lock for write. * @discussion Lock the lock for write, allowing one writer exlusive access. If the lock is held for read or write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock. @@ -309,6 +320,17 @@ void IORWLockRead(IORWLock * lock); void IORWLockWrite( IORWLock * lock); #endif /* !IOLOCKS_INLINE */ +/*! @function IORWLockTryWrite + * @abstract Attempt to lock a read/write lock for write. + * @discussion Lock the lock for write, allowing one writer exlusive access. If the lock is held for read or write, return false. Return true otherwise. + * @param lock Pointer to the allocated lock. */ + +#ifdef IOLOCKS_INLINE +#define IORWLockTryWrite(l) lck_rw_try_lock_exclusive(l) +#else +void IORWLockTryWrite( IORWLock * lock); +#endif /* !IOLOCKS_INLINE */ + /*! @function IORWLockUnlock * @abstract Unlock a read/write lock. * @discussion Undo one call to IORWLockRead or IORWLockWrite. Results are undefined if the caller has not locked the lock. This function may block and so should not be called from interrupt level or while a spin lock is held. diff --git a/iokit/IOKit/IOMapper.h b/iokit/IOKit/IOMapper.h index df5214478..34ca4a408 100644 --- a/iokit/IOKit/IOMapper.h +++ b/iokit/IOKit/IOMapper.h @@ -47,6 +47,7 @@ __END_DECLS #include #include #include +#include class OSData; @@ -99,8 +100,8 @@ public: } } - static IOMapper * copyMapperForDevice(IOService * device); - static IOMapper * copyMapperForDeviceWithIndex(IOService * device, unsigned int index); + static OSPtr copyMapperForDevice(IOService * device); + static OSPtr copyMapperForDeviceWithIndex(IOService * device, unsigned int index); // { subclasses diff --git a/iokit/IOKit/IOMemoryCursor.h b/iokit/IOKit/IOMemoryCursor.h index 3b81e6d3d..22eea326e 100644 --- a/iokit/IOKit/IOMemoryCursor.h +++ b/iokit/IOKit/IOMemoryCursor.h @@ -29,6 +29,7 @@ #define _IOMEMORYCURSOR_H #include +#include #include class IOMemoryDescriptor; @@ -111,7 +112,7 @@ public: * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. * @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. */ - static IOMemoryCursor * + static OSPtr withSpecification(SegmentFunction outSegFunc, IOPhysicalLength maxSegmentSize = 0, IOPhysicalLength maxTransferSize = 0, @@ -187,7 +188,7 @@ public: * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. * @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. */ - static IONaturalMemoryCursor * + static OSPtr withSpecification(IOPhysicalLength maxSegmentSize, IOPhysicalLength maxTransferSize, IOPhysicalLength alignment = 1); @@ -263,7 +264,7 @@ public: * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. * @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. */ - static IOBigMemoryCursor * + static OSPtr withSpecification(IOPhysicalLength maxSegmentSize, IOPhysicalLength maxTransferSize, IOPhysicalLength alignment = 1); @@ -339,7 +340,7 @@ public: * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. * @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. */ - static IOLittleMemoryCursor * + static OSPtr withSpecification(IOPhysicalLength maxSegmentSize, IOPhysicalLength maxTransferSize, IOPhysicalLength alignment = 1); diff --git a/iokit/IOKit/IOMemoryDescriptor.h b/iokit/IOKit/IOMemoryDescriptor.h index 7b6beff04..dc944236e 100644 --- a/iokit/IOKit/IOMemoryDescriptor.h +++ b/iokit/IOKit/IOMemoryDescriptor.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2019 Apple Inc. All rights reserved. + * Copyright (c) 1998-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -32,6 +32,7 @@ #include #include +#include #include #include #include @@ -41,6 +42,7 @@ #include +class IOMemoryDescriptor; class IOMemoryMap; class IOMapper; class IOService; @@ -237,7 +239,6 @@ struct IOMDDMAMapArgs { uint64_t fLength; uint64_t fAlloc; uint64_t fAllocLength; - uint8_t fMapContig; }; struct IOMDDMAWalkSegmentArgs { @@ -247,10 +248,6 @@ struct IOMDDMAWalkSegmentArgs { UInt64 fMappedBase; // Input base of mapping }; typedef UInt8 IOMDDMAWalkSegmentState[128]; -// fMapped: -enum{ - kIOMDDMAWalkMappedLocal = 2 -}; #endif /* KERNEL_PRIVATE */ @@ -260,6 +257,10 @@ enum{ kIOPreparationIDAlwaysPrepared = 2, }; +#ifdef KERNEL_PRIVATE +#define kIODescriptorIDInvalid (0) +#endif + #ifdef XNU_KERNEL_PRIVATE struct IOMemoryReference; #endif @@ -283,7 +284,7 @@ protected: struct IOMemoryDescriptorReserved * reserved; protected: - OSSet * _mappings; + OSPtr _mappings; IOOptionBits _flags; @@ -412,11 +413,19 @@ public: void setVMTags(uint32_t kernelTag, uint32_t userTag); uint32_t getVMTag(vm_map_t map); +#ifdef KERNEL_PRIVATE + uint64_t getDescriptorID( void ); + void setDescriptorID( void ); + + IOReturn ktraceEmitPhysicalSegments( void ); +#endif + #ifdef XNU_KERNEL_PRIVATE IOMemoryDescriptorReserved * getKernelReserved( void ); void cleanKernelReserved(IOMemoryDescriptorReserved * reserved); IOReturn dmaMap( IOMapper * mapper, + IOMemoryDescriptor * memory, IODMACommand * command, const IODMAMapSpecification * mapSpec, uint64_t offset, @@ -436,7 +445,7 @@ public: #endif private: - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 0); + OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 0); #ifdef __LP64__ OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1); OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2); @@ -446,13 +455,13 @@ private: OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6); OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7); #else /* !__LP64__ */ - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 1); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 2); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 3); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 4); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 5); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 6); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 7); + OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 1); + OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 2); + OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 3); + OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 4); + OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 5); + OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 6); + OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 7); #endif /* !__LP64__ */ OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8); OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9); @@ -477,12 +486,12 @@ public: * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOMemoryDescriptor * withAddress(void * address, + static OSPtr withAddress(void * address, IOByteCount withLength, IODirection withDirection); #ifndef __LP64__ - static IOMemoryDescriptor * withAddress(IOVirtualAddress address, + static OSPtr withAddress(IOVirtualAddress address, IOByteCount withLength, IODirection withDirection, task_t withTask) APPLE_KEXT_DEPRECATED; /* use withAddressRange() and prepare() instead */ @@ -496,13 +505,13 @@ public: * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOMemoryDescriptor * withPhysicalAddress( + static OSPtr withPhysicalAddress( IOPhysicalAddress address, IOByteCount withLength, IODirection withDirection ); #ifndef __LP64__ - static IOMemoryDescriptor * withRanges(IOVirtualRange * ranges, + static OSPtr withRanges(IOVirtualRange * ranges, UInt32 withCount, IODirection withDirection, task_t withTask, @@ -519,7 +528,7 @@ public: * @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address. * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOMemoryDescriptor * withAddressRange( + static OSPtr withAddressRange( mach_vm_address_t address, mach_vm_size_t length, IOOptionBits options, @@ -536,7 +545,7 @@ public: * @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address. * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOMemoryDescriptor * withAddressRanges( + static OSPtr withAddressRanges( IOAddressRange * ranges, UInt32 rangeCount, IOOptionBits options, @@ -565,7 +574,7 @@ public: * * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOMemoryDescriptor *withOptions(void * buffers, + static OSPtr withOptions(void * buffers, UInt32 count, UInt32 offset, task_t task, @@ -573,7 +582,7 @@ public: IOMapper * mapper = kIOMapperSystem); #ifndef __LP64__ - static IOMemoryDescriptor * withPhysicalRanges( + static OSPtr withPhysicalRanges( IOPhysicalRange * ranges, UInt32 withCount, IODirection withDirection, @@ -581,7 +590,7 @@ public: #endif /* !__LP64__ */ #ifndef __LP64__ - static IOMemoryDescriptor * withSubRange(IOMemoryDescriptor *of, + static OSPtr withSubRange(IOMemoryDescriptor *of, IOByteCount offset, IOByteCount length, IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */ @@ -592,7 +601,7 @@ public: * @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference. Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm. Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option. * @param originalMD The memory descriptor to be duplicated. * @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */ - static IOMemoryDescriptor * + static OSPtr withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD); #ifndef __LP64__ @@ -634,6 +643,9 @@ public: virtual IOByteCount getLength() const; +#define IOMEMORYDESCRIPTOR_SUPPORTS_GETDMAMAPLENGTH + uint64_t getDMAMapLength(uint64_t * offset = NULL); + /*! @function setTag * @abstract Set the tag for the memory descriptor. * @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor. @@ -731,7 +743,7 @@ public: * @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory. * @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */ - IOMemoryMap * createMappingInTask( + OSPtr createMappingInTask( task_t intoTask, mach_vm_address_t atAddress, IOOptionBits options, @@ -739,7 +751,7 @@ public: mach_vm_size_t length = 0 ); #ifndef __LP64__ - virtual IOMemoryMap * map( + virtual OSPtr map( task_t intoTask, IOVirtualAddress atAddress, IOOptionBits options, @@ -753,7 +765,7 @@ public: * @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed. * @result See the full version of the createMappingInTask method. */ - virtual IOMemoryMap * map( + virtual OSPtr map( IOOptionBits options = 0 ); /*! @function setMapping @@ -764,7 +776,7 @@ public: * @param options Caching and read-only attributes of the mapping. * @result A IOMemoryMap object created to represent the mapping. */ - virtual IOMemoryMap * setMapping( + virtual OSPtr setMapping( task_t task, IOVirtualAddress mapAddress, IOOptionBits options = 0 ); @@ -789,7 +801,7 @@ public: mach_vm_size_t length, IOOptionBits options ); - virtual IOMemoryMap * makeMapping( + virtual IOMemoryMap * makeMapping( IOMemoryDescriptor * owner, task_t intoTask, IOVirtualAddress atAddress, @@ -828,8 +840,8 @@ class IOMemoryMap : public OSObject OSDeclareDefaultStructorsWithDispatch(IOMemoryMap); #ifdef XNU_KERNEL_PRIVATE public: - IOMemoryDescriptor * fMemory; - IOMemoryMap * fSuperMap; + OSPtr fMemory; + OSPtr fSuperMap; mach_vm_size_t fOffset; mach_vm_address_t fAddress; mach_vm_size_t fLength; @@ -962,7 +974,7 @@ public: #ifdef XNU_KERNEL_PRIVATE // for IOMemoryDescriptor use - IOMemoryMap * copyCompatible( IOMemoryMap * newMapping ); + IOMemoryMap * copyCompatible( IOMemoryMap * newMapping ); bool init( task_t intoTask, @@ -1056,6 +1068,7 @@ public: IOReturn wireVirtual(IODirection forDirection); IOReturn dmaMap( IOMapper * mapper, + IOMemoryDescriptor * memory, IODMACommand * command, const IODMAMapSpecification * mapSpec, uint64_t offset, @@ -1080,6 +1093,13 @@ public: IOOptionBits options, mach_vm_address_t * inaddr); + IOReturn memoryReferenceMapNew(IOMemoryReference * ref, + vm_map_t map, + mach_vm_size_t inoffset, + mach_vm_size_t size, + IOOptionBits options, + mach_vm_address_t * inaddr); + static IOReturn memoryReferenceSetPurgeable( IOMemoryReference * ref, IOOptionBits newState, @@ -1093,6 +1113,11 @@ public: IOMemoryReference * ref, IOByteCount * residentPageCount, IOByteCount * dirtyPageCount); + + static uint64_t memoryReferenceGetDMAMapLength( + IOMemoryReference * ref, + uint64_t * offset); + #endif private: @@ -1104,7 +1129,7 @@ private: #endif /* !__LP64__ */ // Internal - OSData * _memoryEntries; + OSPtr _memoryEntries; unsigned int _pages; ppnum_t _highestPage; uint32_t __iomd_reservedA; @@ -1199,7 +1224,7 @@ public: virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE; // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor - static IOMemoryDescriptor * + static OSPtr withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD); }; @@ -1223,6 +1248,6 @@ IOMemoryMap::getSize() /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -extern boolean_t iokit_iomd_setownership_enabled; +extern bool iokit_iomd_setownership_enabled; #endif /* !_IOMEMORYDESCRIPTOR_H */ diff --git a/iokit/IOKit/IOMultiMemoryDescriptor.h b/iokit/IOKit/IOMultiMemoryDescriptor.h index f7ede4a1b..4a56dbe8a 100644 --- a/iokit/IOKit/IOMultiMemoryDescriptor.h +++ b/iokit/IOKit/IOMultiMemoryDescriptor.h @@ -30,6 +30,7 @@ #define _IOMULTIMEMORYDESCRIPTOR_H #include +#include /*! @class IOMultiMemoryDescriptor : public IOMemoryDescriptor * @abstract The IOMultiMemoryDescriptor object describes a memory area made up of several other IOMemoryDescriptors. @@ -58,7 +59,7 @@ public: * @param asReference If false, the IOMultiMemoryDescriptor object will make a copy of the descriptors array, otherwise, the array will be used in situ, avoiding an extra allocation. * @result The created IOMultiMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOMultiMemoryDescriptor * withDescriptors( + static OSPtr withDescriptors( IOMemoryDescriptor ** descriptors, UInt32 withCount, IODirection withDirection, diff --git a/iokit/IOKit/IONVRAM.h b/iokit/IOKit/IONVRAM.h index ef2e533e8..91336d2a7 100644 --- a/iokit/IOKit/IONVRAM.h +++ b/iokit/IOKit/IONVRAM.h @@ -31,33 +31,39 @@ #define _IOKIT_IONVRAM_H #ifdef __cplusplus +#include #include #include #include #include #endif /* __cplusplus */ +#include #define kIODTNVRAMOFPartitionName "common" #define kIODTNVRAMXPRAMPartitionName "APL,MacOS75" #define kIODTNVRAMPanicInfoPartitonName "APL,OSXPanic" #define kIODTNVRAMFreePartitionName "wwwwwwwwwwww" +#define kIODTNVRAMSystemPartitionName "secure" #define MIN_SYNC_NOW_INTERVAL 15*60 /* Minimum 15 Minutes interval mandated */ -enum { - kIODTNVRAMImageSize = 0x2000, - kIODTNVRAMXPRAMSize = 0x0100, - kIODTNVRAMNameRegistrySize = 0x0400 -}; - -enum { +enum IONVRAMVariableType { kOFVariableTypeBoolean = 1, kOFVariableTypeNumber, kOFVariableTypeString, kOFVariableTypeData }; +enum IONVRAMOperation { + kIONVRAMOperationRead, + kIONVRAMOperationWrite, + kIONVRAMOperationDelete, + kIONVRAMOperationObliterate, + kIONVRAMOperationReset +}; + enum { + // Deprecated but still used in AppleEFIRuntime for now kOFVariablePermRootOnly = 0, kOFVariablePermUserRead, kOFVariablePermUserWrite, @@ -66,42 +72,40 @@ enum { #ifdef __cplusplus +class IODTNVRAMVariables; + class IODTNVRAM : public IOService { OSDeclareDefaultStructors(IODTNVRAM); private: - IONVRAMController *_nvramController; - const OSSymbol *_registryPropertiesKey; - UInt8 *_nvramImage; - __unused bool _nvramImageDirty; - UInt32 _ofPartitionOffset; - UInt32 _ofPartitionSize; - UInt8 *_ofImage; - __unused bool _ofImageDirty; - OSDictionary *_ofDict; - OSDictionary *_nvramPartitionOffsets; - OSDictionary *_nvramPartitionLengths; - UInt32 _resv0 __unused; - UInt32 _resv1 __unused; - IOLock *_ofLock; - UInt32 _resv2 __unused; - UInt32 _resv3 __unused; - UInt8 *_resv4 __unused; - UInt32 _piPartitionOffset; - UInt32 _piPartitionSize; - UInt8 *_piImage; - bool _systemPaniced; - SInt32 _lastDeviceSync; - bool _freshInterval; - bool _isProxied; + IONVRAMController *_nvramController; + OSPtr _registryPropertiesKey; + UInt8 *_nvramImage; + IOLock *_variableLock; + UInt32 _commonPartitionOffset; + UInt32 _commonPartitionSize; + UInt8 *_commonImage; + IODTNVRAMVariables *_commonService; + OSPtr _commonDict; + UInt32 _systemPartitionOffset; + UInt32 _systemPartitionSize; + UInt8 *_systemImage; + IODTNVRAMVariables *_systemService; + OSPtr _systemDict; + OSPtr _nvramPartitionOffsets; + OSPtr _nvramPartitionLengths; + bool _systemPanicked; + SInt32 _lastDeviceSync; + bool _freshInterval; + bool _isProxied; + UInt32 _nvramSize; virtual UInt8 calculatePartitionChecksum(UInt8 *partitionHeader); - virtual IOReturn initOFVariables(void); -public: - virtual IOReturn syncOFVariables(void); -private: + virtual IOReturn initVariables(void); + virtual UInt32 getOFVariableType(const char *propName) const; virtual UInt32 getOFVariableType(const OSSymbol *propSymbol) const; + virtual UInt32 getOFVariablePerm(const char *propName) const; virtual UInt32 getOFVariablePerm(const OSSymbol *propSymbol) const; virtual bool getOWVariableInfo(UInt32 variableNumber, const OSSymbol **propSymbol, UInt32 *propType, UInt32 *propOffset); @@ -109,6 +113,10 @@ private: UInt8 *propData, UInt32 propDataLength, LIBKERN_RETURNS_RETAINED const OSSymbol **propSymbol, LIBKERN_RETURNS_RETAINED OSObject **propObject); + bool convertPropToObject(UInt8 *propName, UInt32 propNameLength, + UInt8 *propData, UInt32 propDataLength, + OSSharedPtr& propSymbol, + OSSharedPtr& propObject); virtual bool convertObjectToProp(UInt8 *buffer, UInt32 *length, const OSSymbol *propSymbol, OSObject *propObject); virtual UInt16 generateOWChecksum(UInt8 *buffer); @@ -124,8 +132,8 @@ private: const OSSymbol *name, OSData * value); - virtual OSData *unescapeBytesToData(const UInt8 *bytes, UInt32 length); - virtual OSData *escapeDataToData(OSData * value); + virtual OSPtr unescapeBytesToData(const UInt8 *bytes, UInt32 length); + virtual OSPtr escapeDataToData(OSData * value); virtual IOReturn readNVRAMPropertyType1(IORegistryEntry *entry, const OSSymbol **name, @@ -134,11 +142,15 @@ private: const OSSymbol *name, OSData *value); + UInt32 getNVRAMSize(void); void initNVRAMImage(void); void initProxyData(void); IOReturn syncVariables(void); IOReturn setPropertyInternal(const OSSymbol *aKey, OSObject *anObject); - + IOReturn removePropertyInternal(const OSSymbol *aKey); + IOReturn chooseDictionary(IONVRAMOperation operation, const uuid_t *varGuid, + const char *variableName, OSDictionary **dict) const; + bool handleSpecialVariables(const char *name, uuid_t *guid, OSObject *obj, IOReturn *error); public: virtual bool init(IORegistryEntry *old, const IORegistryPlane *plane) APPLE_KEXT_OVERRIDE; @@ -146,10 +158,11 @@ public: virtual void registerNVRAMController(IONVRAMController *nvram); virtual void sync(void); + virtual IOReturn syncOFVariables(void); virtual bool serializeProperties(OSSerialize *s) const APPLE_KEXT_OVERRIDE; - virtual OSObject *copyProperty(const OSSymbol *aKey) const APPLE_KEXT_OVERRIDE; - virtual OSObject *copyProperty(const char *aKey) const APPLE_KEXT_OVERRIDE; + virtual OSPtr copyProperty(const OSSymbol *aKey) const APPLE_KEXT_OVERRIDE; + virtual OSPtr copyProperty(const char *aKey) const APPLE_KEXT_OVERRIDE; virtual OSObject *getProperty(const OSSymbol *aKey) const APPLE_KEXT_OVERRIDE; virtual OSObject *getProperty(const char *aKey) const APPLE_KEXT_OVERRIDE; virtual bool setProperty(const OSSymbol *aKey, OSObject *anObject) APPLE_KEXT_OVERRIDE; diff --git a/iokit/IOKit/IOPMGR.h b/iokit/IOKit/IOPMGR.h new file mode 100644 index 000000000..66c3a0341 --- /dev/null +++ b/iokit/IOKit/IOPMGR.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#pragma once + +extern "C" { +#include +}; + +#include + +/*! + * @class IOPMGR + * @abstract The base class for power managers, such as ApplePMGR. + */ +class IOPMGR : public IOService +{ + OSDeclareAbstractStructors(IOPMGR); + +public: + /*! + * @function enableCPUCore + * @abstract Enable a single CPU core. + * @discussion Release a secondary CPU core from reset, and enable + * external IRQ delivery to the core. XNU will not + * invoke this method on the boot CPU's cpu_id. + * @param cpu_id Logical CPU ID of the core. + */ + virtual void enableCPUCore(unsigned int cpu_id) = 0; + + /*! + * @function disableCPUCore + * @abstract Disable a single CPU core. + * @discussion Prepare a secondary CPU core for power down, and + * disable external IRQ delivery to the core. XNU + * will not invoke this method on the boot CPU's cpu_id. + * Note that the enable and disable operations are not + * symmetric, as disableCPUCore doesn't actually cut + * power to the core. + * @param cpu_id Logical CPU ID of the core. + */ + virtual void disableCPUCore(unsigned int cpu_id) = 0; + + /*! + * @function enableCPUCluster + * @abstract Enable power to a cluster of CPUs. + * @discussion Called to power up a CPU cluster if the cluster-wide + * voltage rails are disabled (i.e. PIO to the cluster + * isn't even working). + * @param cluster_id Cluster ID. + */ + virtual void enableCPUCluster(unsigned int cluster_id) = 0; + + /*! + * @function disableCPUCluster + * @abstract Disable power to a cluster of CPUs. + * @discussion Called to disable the voltage rails on a CPU + * cluster. This will only be invoked if all CPUs + * in the cluster are already disabled. It is + * presumed that after this operation completes, + * PIO operations to the cluster will cause a + * fatal bus error. + * @param cluster_id Cluster ID. + */ + virtual void disableCPUCluster(unsigned int cluster_id) = 0; + + /*! + * @function initCPUIdle + * @abstract Initialize idle-related parameters. + * @param info Pointer to the ml_processor_info_t struct that is + * being initialized (and hasn't been registered yet). + */ + virtual void initCPUIdle(ml_processor_info_t *info) = 0; + + /*! + * @function enterCPUIdle + * @abstract Called from cpu_idle() prior to entering the idle state on + * the current CPU. + * @param newIdleTimeoutTicks If non-NULL, will be overwritten with a new idle timeout value, + * in ticks. If the value is 0, XNU will disable the idle timer. + */ + virtual void enterCPUIdle(UInt64 *newIdleTimeoutTicks) = 0; + + /*! + * @function exitCPUIdle + * @abstract Called from cpu_idle_exit() after leaving the idle state on + * the current CPU. + * @param newIdleTimeoutTicks If non-NULL, will be overwritten with a new idle timeout value, + * in ticks. If the value is 0, XNU will disable the idle timer. + */ + virtual void exitCPUIdle(UInt64 *newIdleTimeoutTicks) = 0; + + /*! + * @function updateCPUIdle + * @abstract Called from timer_intr() to ask when to schedule the next idle + * timeout on the current CPU. + * @param newIdleTimeoutTicks If non-NULL, will be overwritten with a new idle timeout value, + * in ticks. If the value is 0, XNU will disable the idle timer. + */ + virtual void updateCPUIdle(UInt64 *newIdleTimeoutTicks) = 0; +}; diff --git a/iokit/IOKit/IOPlatformActions.h b/iokit/IOKit/IOPlatformActions.h new file mode 100644 index 000000000..f3db82c03 --- /dev/null +++ b/iokit/IOKit/IOPlatformActions.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#pragma once + +#include + +extern "C" kern_return_t IOCPURunPlatformQuiesceActions(void); +extern "C" kern_return_t IOCPURunPlatformActiveActions(void); +extern "C" kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message); +extern "C" kern_return_t IOCPURunPlatformPanicActions(uint32_t message, uint32_t details); +extern "C" kern_return_t IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len); + +void IOPlatformActionsPreSleep(void); +void IOPlatformActionsPostResume(void); diff --git a/iokit/IOKit/IOPlatformExpert.h b/iokit/IOKit/IOPlatformExpert.h index 8e4d78a94..6e3e852a3 100644 --- a/iokit/IOKit/IOPlatformExpert.h +++ b/iokit/IOKit/IOPlatformExpert.h @@ -41,8 +41,11 @@ #include #include #include +#include extern "C" { +#else +#include #endif #include @@ -54,8 +57,16 @@ typedef enum { } coprocessor_type_t; +/* + * PEGetMachineName() and PEGetModelName() are inconsistent across + * architectures, and considered deprecated. + * PEGetTargetName() and PEGetProductName() instead. + */ extern boolean_t PEGetMachineName( char * name, int maxLength ); extern boolean_t PEGetModelName( char * name, int maxLength ); + +extern boolean_t PEGetTargetName( char * name, int maxLength ); +extern boolean_t PEGetProductName( char * name, int maxLength ); extern int PEGetPlatformEpoch( void ); enum { @@ -68,14 +79,19 @@ enum { kPEPagingOff, kPEPanicBegin, kPEPanicEnd, - kPEPanicDiskShutdown, - kPEPanicRestartCPUNoPanicEndCallouts, kPEPanicRestartCPUNoCallouts }; + +/* Bitmask of details related to panic callouts */ +#define kPanicDetailsForcePowerOff 0x1 + extern int (*PE_halt_restart)(unsigned int type); extern int PEHaltRestart(unsigned int type); #ifdef XNU_KERNEL_PRIVATE + +extern int PEHaltRestartInternal(unsigned int type, uint32_t details); + enum { kIOSystemShutdownNotificationStageProcessExit = 0, kIOSystemShutdownNotificationStageRootUnmount = 1, @@ -87,6 +103,7 @@ extern uint32_t gEnforceQuiesceSafety; #ifdef KERNEL_PRIVATE extern boolean_t IOPMRootDomainGetWillShutdown(void); +extern void PEInitiatePanic(void); #endif /* KERNEL_PRIVATE */ // Save the Panic Info. Returns the number of bytes saved. @@ -235,12 +252,18 @@ public: virtual bool compareNubName( const IOService * nub, OSString * name, OSString ** matched = NULL ) const; + bool compareNubName( const IOService * nub, OSString * name, + OSSharedPtr& matched ) const; virtual IOReturn getNubResources( IOService * nub ); virtual long getBootROMType(void); virtual long getChipSetType(void); virtual long getMachineType(void); + /* + * getModelName() and getMachineName() are deprecated for direct + * use. Use getTargetName() and getProductName() instead. + */ virtual bool getModelName( char * name, int maxLength ); virtual bool getMachineName( char * name, int maxLength ); @@ -286,14 +309,20 @@ public: virtual void getUTCTimeOfDay( clock_sec_t * secs, clock_nsec_t * nsecs ); virtual void setUTCTimeOfDay( clock_sec_t secs, clock_nsec_t nsecs ); + void publishPlatformUUIDAndSerial( void ); + void publishNVRAM( void ); + + virtual bool getTargetName( char * name, int maxLength ); + virtual bool getProductName( char * name, int maxLength ); + + OSMetaClassDeclareReservedUsedX86(IOPlatformExpert, 0); + OSMetaClassDeclareReservedUsedX86(IOPlatformExpert, 1); + OSMetaClassDeclareReservedUsedX86(IOPlatformExpert, 2); + OSMetaClassDeclareReservedUsedX86(IOPlatformExpert, 3); + OSMetaClassDeclareReservedUsedX86(IOPlatformExpert, 4); + OSMetaClassDeclareReservedUsedX86(IOPlatformExpert, 5); + OSMetaClassDeclareReservedUsedX86(IOPlatformExpert, 6); - OSMetaClassDeclareReservedUsed(IOPlatformExpert, 0); - OSMetaClassDeclareReservedUsed(IOPlatformExpert, 1); - OSMetaClassDeclareReservedUsed(IOPlatformExpert, 2); - OSMetaClassDeclareReservedUsed(IOPlatformExpert, 3); - OSMetaClassDeclareReservedUsed(IOPlatformExpert, 4); - OSMetaClassDeclareReservedUnused(IOPlatformExpert, 5); - OSMetaClassDeclareReservedUnused(IOPlatformExpert, 6); OSMetaClassDeclareReservedUnused(IOPlatformExpert, 7); OSMetaClassDeclareReservedUnused(IOPlatformExpert, 8); OSMetaClassDeclareReservedUnused(IOPlatformExpert, 9); @@ -331,9 +360,16 @@ public: virtual IOReturn getNubResources( IOService * nub ) APPLE_KEXT_OVERRIDE; + /* + * getModelName() and getMachineName() are deprecated. Use + * getTargetName() and getProductName() instead. + */ virtual bool getModelName( char * name, int maxLength ) APPLE_KEXT_OVERRIDE; virtual bool getMachineName( char * name, int maxLength ) APPLE_KEXT_OVERRIDE; + virtual bool getTargetName( char * name, int maxLength ) APPLE_KEXT_OVERRIDE; + virtual bool getProductName( char * name, int maxLength ) APPLE_KEXT_OVERRIDE; + virtual void registerNVRAMController( IONVRAMController * nvram ) APPLE_KEXT_OVERRIDE; virtual int haltRestart(unsigned int type) APPLE_KEXT_OVERRIDE; @@ -348,6 +384,10 @@ public: IORegistryEntry * entry, const OSSymbol ** name, OSData ** value ); + IOReturn readNVRAMProperty( + IORegistryEntry * entry, + OSSharedPtr& name, OSSharedPtr& value ); + virtual IOReturn writeNVRAMProperty( IORegistryEntry * entry, const OSSymbol * name, OSData * value ); @@ -393,8 +433,7 @@ private: ExpansionData *ioped_reserved __unused; public: - virtual bool initWithArgs( void * p1, void * p2, - void * p3, void *p4 ); + virtual bool init(void *dtRoot); virtual bool compareName( OSString * name, OSString ** matched = NULL ) const APPLE_KEXT_OVERRIDE; virtual IOWorkLoop *getWorkLoop() const APPLE_KEXT_OVERRIDE; @@ -406,6 +445,10 @@ public: UInt32 type, OSDictionary * properties, IOUserClient ** handler) APPLE_KEXT_OVERRIDE; + bool startIOServiceMatching(void); + void createNVRAM(void); + void generatePlatformUUID(void); + void configureDefaults(void); OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 0); OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 1); diff --git a/iokit/IOKit/IOPolledInterface.h b/iokit/IOKit/IOPolledInterface.h index 94e41c2c6..bdba85a67 100644 --- a/iokit/IOKit/IOPolledInterface.h +++ b/iokit/IOKit/IOPolledInterface.h @@ -44,6 +44,7 @@ enum{ #if defined(__cplusplus) #include +#include #include #define kIOPolledInterfaceSupportKey "IOPolledInterface" @@ -90,7 +91,7 @@ public: virtual IOReturn setEncryptionKey(const uint8_t * key, size_t keySize); - OSMetaClassDeclareReservedUsed(IOPolledInterface, 0); + OSMetaClassDeclareReservedUsedX86(IOPolledInterface, 0); OSMetaClassDeclareReservedUnused(IOPolledInterface, 1); OSMetaClassDeclareReservedUnused(IOPolledInterface, 2); OSMetaClassDeclareReservedUnused(IOPolledInterface, 3); @@ -142,16 +143,16 @@ struct IOPolledFileIOVars { struct kern_direct_file_io_ref_t * fileRef; OSData * fileExtents; uint64_t block0; - IOByteCount blockSize; + uint32_t blockSize; uint64_t maxiobytes; - IOByteCount bufferLimit; + uint32_t bufferLimit; uint8_t * buffer; - IOByteCount bufferSize; - IOByteCount bufferOffset; - IOByteCount bufferHalf; - IOByteCount extentRemaining; - IOByteCount lastRead; - IOByteCount readEnd; + uint32_t bufferSize; + uint32_t bufferOffset; + uint32_t bufferHalf; + uint64_t extentRemaining; + uint32_t lastRead; + uint64_t readEnd; uint32_t flags; uint64_t fileSize; uint64_t position; @@ -183,6 +184,14 @@ IOReturn IOPolledFileOpen(const char * filename, LIBKERN_RETURNS_RETAINED OSData ** imagePath, uint8_t * volumeCryptKey, size_t * keySize); +IOReturn IOPolledFileOpen(const char * filename, + uint32_t flags, + uint64_t setFileSize, uint64_t fsFreeSize, + void * write_file_addr, size_t write_file_len, + IOPolledFileIOVars ** fileVars, + OSSharedPtr& imagePath, + uint8_t * volumeCryptKey, size_t * keySize); + IOReturn IOPolledFileClose(IOPolledFileIOVars ** pVars, off_t write_offset, void * addr, size_t write_length, off_t discard_offset, off_t discard_end); diff --git a/iokit/IOKit/IORangeAllocator.h b/iokit/IOKit/IORangeAllocator.h index e7b0472dc..b6791c777 100644 --- a/iokit/IOKit/IORangeAllocator.h +++ b/iokit/IOKit/IORangeAllocator.h @@ -37,6 +37,7 @@ #ifndef _IOKIT_IORANGEALLOCATOR_H #define _IOKIT_IORANGEALLOCATOR_H +#include #include #include @@ -92,7 +93,7 @@ public: * @param options Pass kLocking if the instance can be used by multiple threads. * @result Returns the new IORangeAllocator instance, to be released by the caller, or zero on failure. */ - static IORangeAllocator * withRange( IORangeScalar endOfRange, + static OSPtr withRange( IORangeScalar endOfRange, IORangeScalar defaultAlignment = 0, UInt32 capacity = 0, IOOptionBits options = 0 ); diff --git a/iokit/IOKit/IORegistryEntry.h b/iokit/IOKit/IORegistryEntry.h index af3b946ea..441023fd3 100644 --- a/iokit/IOKit/IORegistryEntry.h +++ b/iokit/IOKit/IORegistryEntry.h @@ -38,6 +38,15 @@ #include #include +#include + +#if defined(IOKIT_ENABLE_SHARED_PTR) +/*! @parseOnly */ +#define APPLE_KEXT_DEPRECATED_WITH_SHARED_PTR __attribute__((deprecated)) +#else +/*! @parseOnly */ +#define APPLE_KEXT_DEPRECATED_WITH_SHARED_PTR +#endif /* IOKIT_ENABLE_SHARED_PTR */ extern const OSSymbol * gIONameKey; @@ -101,7 +110,7 @@ public: * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. * @result The property value found, or zero. A reference on any found property is returned to caller, which should be released. */ - virtual OSObject * copyProperty( const char * aKey, + virtual OSPtr copyProperty( const char * aKey, const IORegistryPlane * plane, IOOptionBits options = kIORegistryIterateRecursively | @@ -115,7 +124,7 @@ public: * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. * @result The property value found, or zero. A reference on any found property is returned to caller, which should be released. */ - virtual OSObject * copyProperty( const OSString * aKey, + virtual OSPtr copyProperty( const OSString * aKey, const IORegistryPlane * plane, IOOptionBits options = kIORegistryIterateRecursively | @@ -129,7 +138,7 @@ public: * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. * @result The property value found, or zero. A reference on any found property is returned to caller, which should be released. */ - virtual OSObject * copyProperty( const OSSymbol * aKey, + virtual OSPtr copyProperty( const OSSymbol * aKey, const IORegistryPlane * plane, IOOptionBits options = kIORegistryIterateRecursively | @@ -141,7 +150,7 @@ public: * @param plane The plane object. * @result Returns the first parent of the registry entry, or zero if the entry is not attached into the registry in that plane. A reference on the entry is returned to caller, which should be released. */ - virtual IORegistryEntry * copyParentEntry( const IORegistryPlane * plane ) const; + virtual OSPtr copyParentEntry( const IORegistryPlane * plane ) const; /*! @function copyChildEntry * @abstract Returns an registry entry's first child entry in a plane. Available in Mac OS X 10.1 or later. @@ -149,7 +158,7 @@ public: * @param plane The plane object. * @result Returns the first child of the registry entry, or zero if the entry is not attached into the registry in that plane. A reference on the entry is returned to caller, which should be released. */ - virtual IORegistryEntry * copyChildEntry( const IORegistryPlane * plane ) const; + virtual OSPtr copyChildEntry( const IORegistryPlane * plane ) const; /* method available in Mac OS X 10.4 or later */ /*! @@ -198,12 +207,12 @@ private: OSMetaClassDeclareReservedUnused(IORegistryEntry, 4); OSMetaClassDeclareReservedUnused(IORegistryEntry, 5); #else - OSMetaClassDeclareReservedUsed(IORegistryEntry, 0); - OSMetaClassDeclareReservedUsed(IORegistryEntry, 1); - OSMetaClassDeclareReservedUsed(IORegistryEntry, 2); - OSMetaClassDeclareReservedUsed(IORegistryEntry, 3); - OSMetaClassDeclareReservedUsed(IORegistryEntry, 4); - OSMetaClassDeclareReservedUsed(IORegistryEntry, 5); + OSMetaClassDeclareReservedUsedX86(IORegistryEntry, 0); + OSMetaClassDeclareReservedUsedX86(IORegistryEntry, 1); + OSMetaClassDeclareReservedUsedX86(IORegistryEntry, 2); + OSMetaClassDeclareReservedUsedX86(IORegistryEntry, 3); + OSMetaClassDeclareReservedUsedX86(IORegistryEntry, 4); + OSMetaClassDeclareReservedUsedX86(IORegistryEntry, 5); #endif OSMetaClassDeclareReservedUnused(IORegistryEntry, 6); OSMetaClassDeclareReservedUnused(IORegistryEntry, 7); @@ -385,7 +394,7 @@ public: * @param aKey The property's name as an OSSymbol. * @result The property value found, or zero. */ - virtual OSObject * getProperty( const OSSymbol * aKey) const; + virtual OSObject * getProperty( const OSSymbol * aKey) const APPLE_KEXT_DEPRECATED_WITH_SHARED_PTR; /*! @function getProperty * @abstract Synchronized method to obtain a property from a registry entry's property table. @@ -393,7 +402,7 @@ public: * @param aKey The property's name as an OSString. * @result The property value found, or zero. */ - virtual OSObject * getProperty( const OSString * aKey) const; + virtual OSObject * getProperty( const OSString * aKey) const APPLE_KEXT_DEPRECATED_WITH_SHARED_PTR; /*! @function getProperty * @abstract Synchronized method to obtain a property from a registry entry's property table. @@ -401,7 +410,61 @@ public: * @param aKey The property's name as a C-string. * @result The property value found, or zero. */ - virtual OSObject * getProperty( const char * aKey) const; + virtual OSObject * getProperty( const char * aKey) const APPLE_KEXT_DEPRECATED_WITH_SHARED_PTR; + +/*! @function propertyExists + * @abstract Synchronized method to check if a property exists in a registry entry's property table. + * @discussion This method will check if a property exists in a registry entry's property table. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as an OSSymbol. + * @result True if the property value found. */ + + bool propertyExists(const OSSymbol * aKey); + +/*! @function propertyExists + * @abstract Synchronized method to check if a property exists in a registry entry's property table. + * @discussion This method will check if a property exists in a registry entry's property table. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as an OSString. + * @result True if the property value found. */ + + bool propertyExists(const OSString * aKey); + +/*! @function propertyExists + * @abstract Synchronized method to check if a property exists in a registry entry's property table. + * @discussion This method will check if a property exists in a registry entry's property table. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as a C-string. + * @result True if the property value found. */ + + bool propertyExists(const char * aKey); + +/*! @function propertyHasValue + * @abstract Synchronized method to check if a property in a registry entry's property table has a given value. + * @discussion This method will check if a property exists in a registry entry's property table and compares with isEqualTo() the supplied value. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as an OSSymbol. + * @param value The property's value to be compared. + * @result True if the property value was found and isEqualTo() the supplied value. */ + + bool propertyHasValue(const OSSymbol * aKey, + const OSObject * value); + +/*! @function propertyHasValue + * @abstract Synchronized method to check if a property in a registry entry's property table has a given value. + * @discussion This method will check if a property exists in a registry entry's property table and compares with isEqualTo() the supplied value. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as an OSString. + * @param value The property's value to be compared. + * @result True if the property value was found and isEqualTo() the supplied value. */ + + bool propertyHasValue(const OSString * aKey, + const OSObject * value); + +/*! @function propertyHasValue + * @abstract Synchronized method to check if a property in a registry entry's property table has a given value. + * @discussion This method will check if a property exists in a registry entry's property table and compares with isEqualTo() the supplied value. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as a C-string. + * @param value The property's value to be compared. + * @result True if the property value was found and isEqualTo() the supplied value. */ + + bool propertyHasValue(const char * aKey, + const OSObject * value); /*! @function getProperty * @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. @@ -415,7 +478,7 @@ public: const IORegistryPlane * plane, IOOptionBits options = kIORegistryIterateRecursively | - kIORegistryIterateParents) const; + kIORegistryIterateParents) const APPLE_KEXT_DEPRECATED_WITH_SHARED_PTR; /*! @function getProperty * @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. @@ -429,7 +492,7 @@ public: const IORegistryPlane * plane, IOOptionBits options = kIORegistryIterateRecursively | - kIORegistryIterateParents) const; + kIORegistryIterateParents) const APPLE_KEXT_DEPRECATED_WITH_SHARED_PTR; /*! @function getProperty * @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. @@ -443,15 +506,106 @@ public: const IORegistryPlane * plane, IOOptionBits options = kIORegistryIterateRecursively | + kIORegistryIterateParents) const APPLE_KEXT_DEPRECATED_WITH_SHARED_PTR; + +/*! @function propertyExists + * @abstract Synchronized method to check if a property exists from a registry entry or one of its parents (or children) in the hierarchy. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will return true. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as an OSSymbol. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard propertyExists() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result True if the property was found. */ + + bool propertyExists( const OSSymbol * aKey, + const IORegistryPlane * plane, + uint32_t options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; + +/*! @function propertyExists + * @abstract Synchronized method to check if a property exists from a registry entry or one of its parents (or children) in the hierarchy. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will return true. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as an OSString. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard propertyExists() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result True if the property was found. */ + + bool propertyExists( const OSString * aKey, + const IORegistryPlane * plane, + uint32_t options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; + +/*! @function propertyExists + * @abstract Synchronized method to check if a property exists from a registry entry or one of its parents (or children) in the hierarchy. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will return true. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as a C-string. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard propertyExists() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result True if the property was found. */ + + bool propertyExists( const char * aKey, + const IORegistryPlane * plane, + uint32_t options = + kIORegistryIterateRecursively | kIORegistryIterateParents) const; +/*! @function propertyHasValue + * @abstract Synchronized method to check if a property has a given value from a registry entry or one of its parents (or children) in the hierarchy. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will return true if the property isEqualTo() the supplied value. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as an OSSymbol. + * @param value The property value to be compared. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard propertyExists() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result True if the property was found and isEqualTo() the supplied value. */ + + bool propertyHasValue( const OSSymbol * aKey, + const OSObject * value, + const IORegistryPlane * plane, + uint32_t options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; + +/*! @function propertyHasValue + * @abstract Synchronized method to check if a property has a given value from a registry entry or one of its parents (or children) in the hierarchy. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will return true if the property isEqualTo() the supplied value. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as an OSString. + * @param value The property value to be compared. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard propertyExists() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result True if the property was found and isEqualTo() the supplied value. */ + + bool propertyHasValue( const OSString * aKey, + const OSObject * value, + const IORegistryPlane * plane, + uint32_t options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; + +/*! @function propertyHasValue + * @abstract Synchronized method to check if a property has a given value from a registry entry or one of its parents (or children) in the hierarchy. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will return true if the property isEqualTo() the supplied value. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as a C-string. + * @param value The property value to be compared. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard propertyExists() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result True if the property was found and isEqualTo() the supplied value. */ + + bool propertyHasValue( const char * aKey, + const OSObject * value, + const IORegistryPlane * plane, + uint32_t options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; + + /*! @function copyProperty * @abstract Synchronized method to obtain a property from a registry entry's property table. * @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics, and return a reference to the caller. This method is synchronized with other IORegistryEntry accesses to the property table. * @param aKey The property's name as an OSSymbol. * @result The property value found, or zero. It should be released by the caller. */ - virtual OSObject * copyProperty( const OSSymbol * aKey) const; + virtual OSPtr copyProperty( const OSSymbol * aKey) const; /*! @function copyProperty * @abstract Synchronized method to obtain a property from a registry entry's property table. @@ -459,7 +613,7 @@ public: * @param aKey The property's name as an OSString. * @result The property value found, or zero. It should be released by the caller. */ - virtual OSObject * copyProperty( const OSString * aKey) const; + virtual OSPtr copyProperty( const OSString * aKey) const; /*! @function copyProperty * @abstract Synchronized method to obtain a property from a registry entry's property table. @@ -467,14 +621,14 @@ public: * @param aKey The property's name as a C-string. * @result The property value found, or zero. It should be released by the caller. */ - virtual OSObject * copyProperty( const char * aKey) const; + virtual OSPtr copyProperty( const char * aKey) const; /*! @function dictionaryWithProperties * @abstract Synchronized method to obtain copy a registry entry's property table. * @discussion This method will copy a registry entry's property table, using the OSDictionary::withDictionary semantics. This method is synchronized with other IORegistryEntry accesses to the property table. Since OSDictionary will only copy property values by reference, synchronization is not guaranteed to any collection values. * @result The created dictionary, or zero on a resource value. It should be released by the caller. */ - virtual OSDictionary * dictionaryWithProperties( void ) const; + virtual OSPtr dictionaryWithProperties( void ) const; /*! @function serializeProperties * @abstract Synchronized method to serialize a registry entry's property table. @@ -511,7 +665,7 @@ public: * @param plane The plane object. * @result Returns an iterator over the parents of the registry entry, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ - virtual OSIterator * getParentIterator( const IORegistryPlane * plane ) + virtual OSPtr getParentIterator( const IORegistryPlane * plane ) const; virtual void applyToParents( IORegistryEntryApplierFunction applier, void * context, @@ -531,12 +685,12 @@ public: * @param plane The plane object. * @result Returns an iterator over the children of the entry, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ - virtual OSIterator * getChildIterator( const IORegistryPlane * plane ) + virtual OSPtr getChildIterator( const IORegistryPlane * plane ) const; #if XNU_KERNEL_PRIVATE uint32_t getChildCount( const IORegistryPlane * plane ) const; - OSArray * copyPropertyKeys(void) const; + OSPtr copyPropertyKeys(void) const; #endif virtual void applyToChildren( IORegistryEntryApplierFunction applier, @@ -661,7 +815,7 @@ public: * @param plane The plane object, or zero for the global name. * @result A reference to an OSSymbol for the name, which should be released by the caller. */ - virtual const OSSymbol * copyName( + virtual OSPtr copyName( const IORegistryPlane * plane = NULL ) const; /*! @function compareNames @@ -673,6 +827,8 @@ public: virtual bool compareNames( OSObject * name, OSString ** matched = NULL ) const; + bool compareNames( OSObject * name, OSSharedPtr& matched) const; + /*! @function compareName * @abstract Compares the name of the entry with one name, and optionally returns the matching name. * @discussion This method is called during IOService name matching and elsewhere from the compareNames method. It should be overridden to provide non-standard name matching. @@ -682,6 +838,8 @@ public: virtual bool compareName( OSString * name, OSString ** matched = NULL ) const; + bool compareName( OSString * name, OSSharedPtr& matched) const; + /*! @function setName * @abstract Sets a name for the registry entry, in a particular plane, or globally. * @discussion Entries can be named in a particular plane, or globally. If the plane is specified the name applies only to that plane, otherwise the global name is set. The global name defaults to the entry's meta class name if it has not been named. @@ -714,7 +872,7 @@ public: * @param plane The plane object, or zero for the global name. * @result A reference to an OSSymbol for the location if one exists, which should be released by the caller, or zero. */ - virtual const OSSymbol * copyLocation( + virtual OSPtr copyLocation( const IORegistryPlane * plane = NULL ) const; /*! @function setLocation @@ -760,7 +918,7 @@ public: * @param fromEntry The lookup will proceed rooted at this entry if non-zero, otherwise it proceeds from the root of the plane. * @result A retained registry entry is returned on success, or zero on failure. The caller should release the entry. */ - static IORegistryEntry * fromPath( const char * path, + static OSPtr fromPath( const char * path, const IORegistryPlane * plane = NULL, char * residualPath = NULL, int * residualLength = NULL, @@ -775,7 +933,7 @@ public: * @param residualLength See IORegistryEntry::fromPath. * @result See IORegistryEntry::fromPath. */ - virtual IORegistryEntry * childFromPath( const char * path, + virtual OSPtr childFromPath( const char * path, const IORegistryPlane * plane = NULL, char * residualPath = NULL, int * residualLength = NULL ); @@ -796,7 +954,7 @@ public: * @param name A C-string name for the new plane, to be copied. * @result A new instance of an IORegistryPlane, or zero on failure. */ - static const IORegistryPlane * makePlane( const char * name ); + static OSPtr makePlane( const char * name ); /*! @abstract Returns an ID for the registry entry that is global to all tasks. * @discussion The entry ID returned by getRegistryEntryID can be used to identify a registry entry across all tasks. A registry entry may be looked up by its entry ID by creating a matching dictionary with IORegistryEntryIDMatching() in user space, or IOService::registryEntryIDMatching() in the kernel, to be used with the IOKit matching functions. The ID is valid only until the machine reboots. @@ -892,7 +1050,7 @@ public: * @param options kIORegistryIterateRecursively may be set to recurse automatically into each entry as it is returned. This option affects the behaviour of the getNextObject method, which is defined in the OSIterator superclass. Other methods will override this behaviour. kIORegistryIterateParents may be set to iterate the parents of each entry, by default the children are iterated. * @result A created IORegistryIterator instance, to be released by the caller when it has finished with it. */ - static IORegistryIterator * iterateOver( IORegistryEntry * start, + static OSPtr iterateOver( IORegistryEntry * start, const IORegistryPlane * plane, IOOptionBits options = 0 ); @@ -903,7 +1061,7 @@ public: * @param options kIORegistryIterateRecursively may be set to recurse automatically into each entry as it is returned. This option affects the behaviour of the getNextObject method, which is defined in the OSIterator superclass. Other methods will override this behaviour. kIORegistryIterateParents may be set to iterate the parents of each entry, by default the children are iterated. * @result A created IORegistryIterator instance, to be released by the caller when it has finished with it. */ - static IORegistryIterator * iterateOver( const IORegistryPlane * plane, + static OSPtr iterateOver( const IORegistryPlane * plane, IOOptionBits options = 0 ); /*! @function getNextObject @@ -972,7 +1130,7 @@ public: * @discussion This method will reset, then iterate all entries in the iteration (with getNextObject). * @result A set of entries returned by the iteration. The caller should release the set when it has finished with it. Zero is returned on a resource failure. */ - virtual OSOrderedSet * iterateAll( void ); + virtual OSPtr iterateAll( void ); }; #endif /* _IOKIT_IOREGISTRYENTRY_H */ diff --git a/iokit/IOKit/IOReportMacros.h b/iokit/IOKit/IOReportMacros.h index db8e8aed9..7dfb9c1c6 100644 --- a/iokit/IOKit/IOReportMacros.h +++ b/iokit/IOKit/IOReportMacros.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Apple Computer, Inc. All Rights Reserved. + * Copyright (c) 2012-2020 Apple Computer, Inc. All Rights Reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -218,7 +218,7 @@ typedef struct { /* * Initialize a StateReport buffer. * - * int nstates - number of states to be reported + * uint16_t nstates - number of states to be reported * void* buffer - ptr to STATEREPORT_BUFSIZE(nstates) bytes * size_t bufSize - sanity check of buffer's size * uint64_t providerID - registry Entry ID of the reporting service @@ -235,7 +235,7 @@ do { \ IOStateReportValues *__rep; \ IOReportElement *__elem; \ if ((bufSize) >= STATEREPORT_BUFSIZE(nstates)) { \ - for (unsigned __no = 0; __no < (nstates); __no++) { \ + for (uint16_t __no = 0; __no < (nstates); __no++) { \ __elem = &(__info->elem[__no]); \ __rep = (IOStateReportValues *) &(__elem->values); \ __elem->provider_id = (providerID); \ @@ -577,8 +577,8 @@ typedef struct { /* * Initialize a HistogramReport buffer. Supports only linear scale histogram. * - * int nbuckets - number of buckets data is combined into - * uint32_t bucketWidth - size of each bucket + * uint16_t nbuckets - number of buckets data is combined into + * uint32_t bucketWidth - size of each bucket * void* buffer - ptr to HISTREPORT_BUFSIZE(nbuckets) bytes * size_t bufSize - sanity check of buffer's size * uint64_t providerID - registry Entry ID of the reporting service @@ -596,7 +596,7 @@ do { \ IOHistogramReportValues *__rep; \ if ((bufSize) >= HISTREPORT_BUFSIZE(nbuckets)) { \ __info->bucketWidth = (bktSize); \ - for (unsigned __no = 0; __no < (nbuckets); __no++) { \ + for (uint16_t __no = 0; __no < (nbuckets); __no++) { \ __elem = &(__info->elem[__no]); \ __rep = (IOHistogramReportValues *) &(__elem->values); \ __elem->provider_id = (providerID); \ diff --git a/iokit/IOKit/IOService.h b/iokit/IOKit/IOService.h index b99bf60b0..c5d489127 100644 --- a/iokit/IOKit/IOService.h +++ b/iokit/IOKit/IOService.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2019 Apple Inc. All rights reserved. + * Copyright (c) 1998-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -54,6 +54,7 @@ #include #include #include +#include extern "C" { #include @@ -136,7 +137,6 @@ extern const OSSymbol * gIOUserClassKey; extern const OSSymbol * gIOUserServerClassKey; extern const OSSymbol * gIOUserServerNameKey; extern const OSSymbol * gIOUserServerTagKey; -extern const OSSymbol * gIOUserServerCDHashKey; extern const OSSymbol * gIOUserUserClientKey; extern const OSSymbol * gIOKitDebugKey; @@ -164,6 +164,9 @@ extern const OSSymbol * gIOInterruptSpecifiersKey; extern const OSSymbol * gIOSupportedPropertiesKey; extern const OSSymbol * gIOUserServicePropertiesKey; +extern const OSSymbol * gIOCompatibilityMatchKey; +extern const OSSymbol * gIOCompatibilityPropertiesKey; +extern const OSSymbol * gIOPathKey; extern const OSSymbol * gIOBSDKey; extern const OSSymbol * gIOBSDNameKey; @@ -176,6 +179,27 @@ extern const OSSymbol * gIOServiceDEXTEntitlementsKey; extern const OSSymbol * gIODriverKitUserClientEntitlementsKey; extern const OSSymbol * gIODriverKitUserClientEntitlementAllowAnyKey; extern const OSSymbol * gIOMatchDeferKey; +extern const OSSymbol * gIOAllCPUInitializedKey; + +#if XNU_KERNEL_PRIVATE && !defined(IOServiceTrace) + +#include +#if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) +#define IOServiceTrace(csc, a, b, c, d) do { \ + if(kIOTraceIOService & gIOKitTrace) { \ + KERNEL_DEBUG_CONSTANT(IODBG_IOSERVICE(csc), a, b, c, d, 0); \ + } \ +} while(0) +#else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) */ +#define IOServiceTrace(csc, a, b, c, d) do { \ + (void)a; \ + (void)b; \ + (void)c; \ + (void)d; \ +} while (0) +#endif /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) */ + +#endif // XNU_KERNEL_PRIVATE && !IOServiceTrace extern SInt32 IOServiceOrdering( const OSMetaClassBase * inObj1, const OSMetaClassBase * inObj2, void * ref ); @@ -223,9 +247,15 @@ typedef IOReturn (^IOServiceInterestHandlerBlock)( uint32_t messageType, IOServi typedef void (*IOServiceApplierFunction)(IOService * service, void * context); typedef void (*OSObjectApplierFunction)(OSObject * object, void * context); +#ifdef __BLOCKS__ +typedef void (^IOServiceApplierBlock)(IOService * service); +typedef void (^OSObjectApplierBlock)(OSObject * object); +#endif /* __BLOCKS__ */ + class IOUserClient; class IOPlatformExpert; +class IOUserServerCheckInToken; /*! @class IOService * @abstract The base class for most I/O Kit families, devices, and drivers. @@ -482,8 +512,8 @@ public: private: #if __LP64__ - OSMetaClassDeclareReservedUsed(IOService, 0); - OSMetaClassDeclareReservedUsed(IOService, 1); + OSMetaClassDeclareReservedUsedX86(IOService, 0); + OSMetaClassDeclareReservedUsedX86(IOService, 1); OSMetaClassDeclareReservedUnused(IOService, 2); OSMetaClassDeclareReservedUnused(IOService, 3); OSMetaClassDeclareReservedUnused(IOService, 4); @@ -491,14 +521,14 @@ private: OSMetaClassDeclareReservedUnused(IOService, 6); OSMetaClassDeclareReservedUnused(IOService, 7); #else - OSMetaClassDeclareReservedUsed(IOService, 0); - OSMetaClassDeclareReservedUsed(IOService, 1); - OSMetaClassDeclareReservedUsed(IOService, 2); - OSMetaClassDeclareReservedUsed(IOService, 3); - OSMetaClassDeclareReservedUsed(IOService, 4); - OSMetaClassDeclareReservedUsed(IOService, 5); - OSMetaClassDeclareReservedUsed(IOService, 6); - OSMetaClassDeclareReservedUsed(IOService, 7); + OSMetaClassDeclareReservedUsedX86(IOService, 0); + OSMetaClassDeclareReservedUsedX86(IOService, 1); + OSMetaClassDeclareReservedUsedX86(IOService, 2); + OSMetaClassDeclareReservedUsedX86(IOService, 3); + OSMetaClassDeclareReservedUsedX86(IOService, 4); + OSMetaClassDeclareReservedUsedX86(IOService, 5); + OSMetaClassDeclareReservedUsedX86(IOService, 6); + OSMetaClassDeclareReservedUsedX86(IOService, 7); #endif OSMetaClassDeclareReservedUnused(IOService, 8); @@ -794,7 +824,7 @@ public: * @param priority A constant ordering all notifications of a each type. * @result An instance of an IONotifier object that can be used to control or destroy the notification request. */ - static IONotifier * addNotification( + static OSPtr addNotification( const OSSymbol * type, OSDictionary * matching, IOServiceNotificationHandler handler, void * target, void * ref = NULL, @@ -850,16 +880,21 @@ public: * @param timeout The maximum time to wait in nanoseconds. Default is to wait forever. * @result A published IOService object matching the supplied dictionary. waitForMatchingService returns a reference to the IOService which should be released by the caller. (Differs from waitForService() which does not retain the returned object.) */ - static IOService * waitForMatchingService( OSDictionary * matching, + static OSPtr waitForMatchingService( OSDictionary * matching, uint64_t timeout = UINT64_MAX); +#ifdef XNU_KERNEL_PRIVATE + static IOService * waitForMatchingServiceWithToken( OSDictionary * matching, + uint64_t timeout, IOUserServerCheckInToken * token ); +#endif + /*! @function getMatchingServices * @abstract Finds the set of current published IOService objects matching a matching dictionary. * @discussion Provides a method of finding the current set of published IOService objects matching the supplied matching dictionary. * @param matching The matching dictionary describing the desired IOService objects. * @result An instance of an iterator over a set of IOService objects. To be released by the caller. */ - static OSIterator * getMatchingServices( OSDictionary * matching ); + static OSPtr getMatchingServices( OSDictionary * matching ); /*! @function copyMatchingService * @abstract Finds one of the current published IOService objects matching a matching dictionary. @@ -867,7 +902,7 @@ public: * @param matching The matching dictionary describing the desired IOService object. * @result The IOService object or NULL. To be released by the caller. */ - static IOService * copyMatchingService( OSDictionary * matching ); + static OSPtr copyMatchingService( OSDictionary * matching ); public: /* Helpers to make matching dictionaries for simple cases, @@ -880,7 +915,7 @@ public: * @param table If zero, serviceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * serviceMatching( const char * className, + static OSPtr serviceMatching( const char * className, OSDictionary * table = NULL ); /*! @function serviceMatching @@ -890,7 +925,7 @@ public: * @param table If zero, serviceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * serviceMatching( const OSString * className, + static OSPtr serviceMatching( const OSString * className, OSDictionary * table = NULL ); /*! @function nameMatching @@ -900,7 +935,7 @@ public: * @param table If zero, nameMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * nameMatching( const char * name, + static OSPtr nameMatching( const char * name, OSDictionary * table = NULL ); /*! @function nameMatching @@ -910,7 +945,7 @@ public: * @param table If zero, nameMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * nameMatching( const OSString* name, + static OSPtr nameMatching( const OSString* name, OSDictionary * table = NULL ); /*! @function resourceMatching @@ -920,7 +955,7 @@ public: * @param table If zero, resourceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * resourceMatching( const char * name, + static OSPtr resourceMatching( const char * name, OSDictionary * table = NULL ); /*! @function resourceMatching @@ -930,7 +965,7 @@ public: * @param table If zero, resourceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * resourceMatching( const OSString * name, + static OSPtr resourceMatching( const OSString * name, OSDictionary * table = NULL ); @@ -942,7 +977,7 @@ public: * @param table If zero, nameMatching will create a matching dictionary and return a reference to it, otherwise the matching properties are added to the specified dictionary. * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * propertyMatching( const OSSymbol * key, const OSObject * value, + static OSPtr propertyMatching( const OSSymbol * key, const OSObject * value, OSDictionary * table = NULL ); /*! @function registryEntryIDMatching @@ -962,7 +997,7 @@ public: * @param table The matching properties are added to the specified dictionary, which must be non-zero. * @result The location matching dictionary created is returned on success, or zero on failure. */ - static OSDictionary * addLocation( OSDictionary * table ); + static OSPtr addLocation( OSDictionary * table ); /* Helpers for matching dictionaries. */ @@ -1031,14 +1066,14 @@ public: * @discussion For those few IOService objects that obtain service from multiple providers, this method supplies an iterator over a client's providers. * @result An iterator over the providers of the client, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ - virtual OSIterator * getProviderIterator( void ) const; + virtual OSPtr getProviderIterator( void ) const; /*! @function getOpenProviderIterator * @abstract Returns an iterator over an client's providers that are currently opened by the client. * @discussion For those few IOService objects that obtain service from multiple providers, this method supplies an iterator over a client's providers, locking each in turn with @link lockForArbitration lockForArbitration@/link and returning those that have been opened by the client. * @result An iterator over the providers the client has open, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, and the current entry in the iteration is locked with lockForArbitration, protecting it from state changes. */ - virtual OSIterator * getOpenProviderIterator( void ) const; + virtual OSPtr getOpenProviderIterator( void ) const; /*! @function getClient * @abstract Returns an IOService object's primary client. @@ -1052,14 +1087,14 @@ public: * @discussion For IOService objects that may have multiple clients, this method supplies an iterator over a provider's clients. * @result An iterator over the clients of the provider, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ - virtual OSIterator * getClientIterator( void ) const; + virtual OSPtr getClientIterator( void ) const; /*! @function getOpenClientIterator * @abstract Returns an iterator over a provider's clients that currently have opened the provider. * @discussion For IOService objects that may have multiple clients, this method supplies an iterator over a provider's clients, locking each in turn with @link lockForArbitration lockForArbitration@/link and returning those that have opened the provider. * @result An iterator over the clients that have opened the provider, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, and the current entry in the iteration is locked with lockForArbitration, protecting it from state changes. */ - virtual OSIterator * getOpenClientIterator( void ) const; + virtual OSPtr getOpenClientIterator( void ) const; /*! @function callPlatformFunction * @abstract Calls the platform function with the given name. @@ -1283,12 +1318,12 @@ public: virtual IOReturn messageClients( UInt32 type, void * argument = NULL, vm_size_t argSize = 0 ); - virtual IONotifier * registerInterest( const OSSymbol * typeOfInterest, + virtual OSPtr registerInterest( const OSSymbol * typeOfInterest, IOServiceInterestHandler handler, void * target, void * ref = NULL ); #ifdef __BLOCKS__ - IONotifier * registerInterest(const OSSymbol * typeOfInterest, + OSPtr registerInterest(const OSSymbol * typeOfInterest, IOServiceInterestHandlerBlock handler); #endif /* __BLOCKS__ */ @@ -1298,6 +1333,11 @@ public: virtual void applyToClients( IOServiceApplierFunction applier, void * context ); +#ifdef __BLOCKS__ + void applyToProviders(IOServiceApplierBlock applier); + void applyToClients(IOServiceApplierBlock applier); +#endif /* __BLOCKS__ */ + virtual void applyToInterested( const OSSymbol * typeOfInterest, OSObjectApplierFunction applier, void * context ); @@ -1325,6 +1365,14 @@ public: UInt32 type, LIBKERN_RETURNS_RETAINED IOUserClient ** handler ); + IOReturn newUserClient( task_t owningTask, void * securityID, + UInt32 type, OSDictionary * properties, + OSSharedPtr& handler ); + + IOReturn newUserClient( task_t owningTask, void * securityID, + UInt32 type, + OSSharedPtr& handler ); + /* Return code utilities */ /*! @function stringFromReturn @@ -1355,13 +1403,8 @@ public: /* overrides */ virtual bool serializeProperties( OSSerialize * s ) const APPLE_KEXT_OVERRIDE; -#ifdef KERNEL_PRIVATE -/* Apple only SPI to control CPU low power modes */ - void setCPUSnoopDelay(UInt32 ns); - UInt32 getCPUSnoopDelay(); -#endif - void requireMaxBusStall(UInt32 ns); - void requireMaxInterruptDelay(uint32_t ns); + IOReturn requireMaxBusStall(UInt32 ns); + IOReturn requireMaxInterruptDelay(uint32_t ns); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* * * * * * * * * * * * Internals * * * * * * * * * * * */ @@ -1375,7 +1418,8 @@ public: static void setPMRootDomain( class IOPMrootDomain * rootDomain ); static IOReturn catalogNewDrivers( OSOrderedSet * newTables ); uint64_t getAccumulatedBusyTime( void ); - static void updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessage); + static void updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessage, + bool afterUserspaceReboot = false); static void consoleLockTimer(thread_call_param_t p0, thread_call_param_t p1); void setTerminateDefer(IOService * provider, bool defer); uint64_t getAuthorizationID( void ); @@ -1384,18 +1428,22 @@ public: void scheduleFinalize(bool now); static void willShutdown(); static void startDeferredMatches(); - static void kextdLaunched(); + static void iokitDaemonLaunched(); + void resetRematchProperties(); + bool hasUserServer() const; + static void userSpaceWillReboot(); + static void userSpaceDidReboot(); private: static IOReturn waitMatchIdle( UInt32 ms ); - static IONotifier * installNotification( + static OSPtr installNotification( const OSSymbol * type, OSDictionary * matching, IOServiceMatchingNotificationHandler handler, void * target, void * ref, SInt32 priority, - LIBKERN_RETURNS_RETAINED OSIterator ** existing ); + LIBKERN_RETURNS_RETAINED OSIterator ** existing); #if !defined(__LP64__) - static IONotifier * installNotification( + static OSPtr installNotification( const OSSymbol * type, OSDictionary * matching, IOServiceNotificationHandler handler, void * target, void * ref, @@ -1411,7 +1459,7 @@ private: bool checkResource( OSObject * matching ); APPLE_KEXT_COMPATIBILITY_VIRTUAL - void probeCandidates( OSOrderedSet * matches ); + void probeCandidates( LIBKERN_CONSUMED OSOrderedSet * matches ); APPLE_KEXT_COMPATIBILITY_VIRTUAL bool startCandidate( IOService * candidate ); @@ -1442,16 +1490,16 @@ private: bool matchInternal(OSDictionary * table, uint32_t options, unsigned int * did); static bool instanceMatch(const OSObject * entry, void * context); - static OSObject * copyExistingServices( OSDictionary * matching, + static OSPtr copyExistingServices( OSDictionary * matching, IOOptionBits inState, IOOptionBits options = 0 ); - static IONotifier * setNotification( + static OSPtr setNotification( const OSSymbol * type, OSDictionary * matching, IOServiceMatchingNotificationHandler handler, void * target, void * ref, SInt32 priority = 0 ); - static IONotifier * doInstallNotification( + static OSPtr doInstallNotification( const OSSymbol * type, OSDictionary * matching, IOServiceMatchingNotificationHandler handler, void * target, void * ref, @@ -1460,11 +1508,15 @@ private: static bool syncNotificationHandler( void * target, void * ref, IOService * newService, IONotifier * notifier ); + static void userServerCheckInTokenNotificationHandler( + IOUserServerCheckInToken * token, + void * ref); + APPLE_KEXT_COMPATIBILITY_VIRTUAL void deliverNotification( const OSSymbol * type, IOOptionBits orNewState, IOOptionBits andNewState ); - OSArray * copyNotifiers(const OSSymbol * type, + OSPtr copyNotifiers(const OSSymbol * type, IOOptionBits orNewState, IOOptionBits andNewState); bool invokeNotifiers(OSArray * willSend[]); @@ -1921,10 +1973,12 @@ public: bool assertPMDriverCall( IOPMDriverCallEntry * callEntry, IOOptionBits method, const IOPMinformee * inform = NULL, IOOptionBits options = 0 ); void deassertPMDriverCall( IOPMDriverCallEntry * callEntry ); IOReturn changePowerStateWithOverrideTo( IOPMPowerStateIndex ordinal, IOPMRequestTag tag ); + IOReturn changePowerStateWithTagToPriv( IOPMPowerStateIndex ordinal, IOPMRequestTag tag ); + IOReturn changePowerStateWithTagTo( IOPMPowerStateIndex ordinal, IOPMRequestTag tag ); IOReturn changePowerStateForRootDomain( IOPMPowerStateIndex ordinal ); IOReturn setIgnoreIdleTimer( bool ignore ); IOReturn quiescePowerTree( void * target, IOPMCompletionAction action, void * param ); - uint32_t getPowerStateForClient( const OSSymbol * client ); + IOPMPowerStateIndex getPowerStateForClient( const OSSymbol * client ); static const char * getIOMessageString( uint32_t msg ); static void setAdvisoryTickleEnable( bool enable ); void reset_watchdog_timer(IOService *obj, int timeout); @@ -2020,7 +2074,7 @@ private: void addPowerChild1( IOPMRequest * request ); void addPowerChild2( IOPMRequest * request ); void addPowerChild3( IOPMRequest * request ); - void adjustPowerState( uint32_t clamp = 0 ); + void adjustPowerState( IOPMPowerStateIndex clamp = 0 ); void handlePMstop( IOPMRequest * request ); void handleRegisterPowerDriver( IOPMRequest * request ); bool handleAcknowledgePowerChange( IOPMRequest * request ); @@ -2036,7 +2090,7 @@ private: bool actionPMWorkQueueRetire( IOPMRequest * request, IOPMWorkQueue * queue ); bool actionPMRequestQueue( IOPMRequest * request, IOPMRequestQueue * queue ); bool actionPMReplyQueue( IOPMRequest * request, IOPMRequestQueue * queue ); - bool actionPMCompletionQueue( IOPMRequest * request, IOPMCompletionQueue * queue ); + bool actionPMCompletionQueue( LIBKERN_CONSUMED IOPMRequest * request, IOPMCompletionQueue * queue ); bool notifyInterestedDrivers( void ); void notifyInterestedDriversDone( void ); bool notifyControllingDriver( void ); @@ -2050,9 +2104,9 @@ private: void notifyRootDomain( void ); void notifyRootDomainDone( void ); void cleanClientResponses( bool logErrors ); - void updatePowerClient( const OSSymbol * client, uint32_t powerState ); + void updatePowerClient( const OSSymbol * client, IOPMPowerStateIndex powerState ); void removePowerClient( const OSSymbol * client ); - IOReturn requestPowerState( const OSSymbol * client, uint32_t state ); + IOReturn requestPowerState( const OSSymbol * client, IOPMPowerStateIndex state, IOPMRequestTag tag = 0 ); IOReturn requestDomainPower( IOPMPowerStateIndex ourPowerState, IOOptionBits options = 0 ); IOReturn configurePowerStatesReport( IOReportConfigureAction action, void *result ); IOReturn updatePowerStatesReport( IOReportConfigureAction action, void *result, void *destination ); @@ -2062,4 +2116,13 @@ private: #endif /* XNU_KERNEL_PRIVATE */ }; +#ifdef PRIVATE + +class IOServiceCompatibility : public IOService +{ + OSDeclareDefaultStructors(IOServiceCompatibility); +}; + +#endif /* PRIVATE */ + #endif /* ! _IOKIT_IOSERVICE_H */ diff --git a/iokit/IOKit/IOSharedDataQueue.h b/iokit/IOKit/IOSharedDataQueue.h index c956ce774..e634da8f8 100644 --- a/iokit/IOKit/IOSharedDataQueue.h +++ b/iokit/IOKit/IOSharedDataQueue.h @@ -32,6 +32,7 @@ #define DISABLE_DATAQUEUE_WARNING /* IODataQueue is deprecated, please use IOSharedDataQueue instead */ #include +#include #undef DISABLE_DATAQUEUE_WARNING @@ -87,7 +88,7 @@ public: * @param size The size of the data queue memory region. * @result Returns the newly allocated IOSharedDataQueue instance. Zero is returned on failure. */ - static IOSharedDataQueue *withCapacity(UInt32 size); + static OSPtr withCapacity(UInt32 size); /*! * @function withEntries @@ -97,7 +98,7 @@ public: * @param entrySize Size of each entry. * @result Reeturns the newly allocated IOSharedDataQueue instance. Zero is returned on failure. */ - static IOSharedDataQueue *withEntries(UInt32 numEntries, UInt32 entrySize); + static OSPtr withEntries(UInt32 numEntries, UInt32 entrySize); /*! * @function initWithCapacity @@ -114,7 +115,7 @@ public: * @discussion The IOMemoryDescriptor instance returned by this method is intended to be mapped into a user process. This is the memory region that the IODataQueueClient code operates on. * @result Returns a newly allocated IOMemoryDescriptor for the IODataQueueMemory region. Returns zero on failure. */ - virtual IOMemoryDescriptor *getMemoryDescriptor() APPLE_KEXT_OVERRIDE; + virtual OSPtr getMemoryDescriptor() APPLE_KEXT_OVERRIDE; /*! * @function peek diff --git a/iokit/IOKit/IOSubMemoryDescriptor.h b/iokit/IOKit/IOSubMemoryDescriptor.h index 42b10d913..fae29ba4e 100644 --- a/iokit/IOKit/IOSubMemoryDescriptor.h +++ b/iokit/IOKit/IOSubMemoryDescriptor.h @@ -30,6 +30,7 @@ #define _IOSUBMEMORYDESCRIPTOR_H #include +#include /*! @class IOSubMemoryDescriptor : public IOMemoryDescriptor * @abstract The IOSubMemoryDescriptor object describes a memory area made up of a portion of another IOMemoryDescriptor. @@ -56,7 +57,7 @@ public: * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOSubMemoryDescriptor * withSubRange(IOMemoryDescriptor *of, + static OSPtr withSubRange(IOMemoryDescriptor *of, IOByteCount offset, IOByteCount length, IOOptionBits options); diff --git a/iokit/IOKit/IOTimeStamp.h b/iokit/IOKit/IOTimeStamp.h index 95d08197e..2040caaf6 100644 --- a/iokit/IOKit/IOTimeStamp.h +++ b/iokit/IOKit/IOTimeStamp.h @@ -35,6 +35,11 @@ IOTimeStampStartConstant(unsigned int csc, uintptr_t a = 0, uintptr_t b = 0, uintptr_t c = 0, uintptr_t d = 0) { + (void)csc; + (void)a; + (void)b; + (void)c; + (void)d; KERNEL_DEBUG_CONSTANT(((uint32_t)csc) | DBG_FUNC_START, a, b, c, d, 0); } @@ -43,6 +48,11 @@ IOTimeStampEndConstant(uintptr_t csc, uintptr_t a = 0, uintptr_t b = 0, uintptr_t c = 0, uintptr_t d = 0) { + (void)csc; + (void)a; + (void)b; + (void)c; + (void)d; KERNEL_DEBUG_CONSTANT(((uint32_t)csc) | DBG_FUNC_END, a, b, c, d, 0); } @@ -51,9 +61,95 @@ IOTimeStampConstant(uintptr_t csc, uintptr_t a = 0, uintptr_t b = 0, uintptr_t c = 0, uintptr_t d = 0) { + (void)csc; + (void)a; + (void)b; + (void)c; + (void)d; KERNEL_DEBUG_CONSTANT(((uint32_t)csc) | DBG_FUNC_NONE, a, b, c, d, 0); } +static inline void +IOTimeStampConstantFiltered(uintptr_t csc, + uintptr_t a = 0, uintptr_t b = 0, + uintptr_t c = 0, uintptr_t d = 0) +{ + (void)csc; + (void)a; + (void)b; + (void)c; + (void)d; + KERNEL_DEBUG_CONSTANT_FILTERED(((uint32_t)csc) | DBG_FUNC_NONE, a, b, c, d, 0); +} + +/* + * Objects of this class will trace a filtered interval for their lifetime. + * The constructor takes in the start tracepoint's arguments. + * By default, the end tracepoint emits no additional arguments, + * but you can set them with setEndCodes. + * Alternatively, you can set them individually with setA...setD + */ +class IOTimeStampIntervalConstantFiltered +{ +public: + IOTimeStampIntervalConstantFiltered(unsigned int csc, + uintptr_t arg1 = 0, uintptr_t arg2 = 0, + uintptr_t arg3 = 0, uintptr_t arg4 = 0) + : _csc(csc), _endArg1(0), _endArg2(0), _endArg3(0), _endArg4(0) + { + (void)csc; + (void)arg1; + (void)arg2; + (void)arg3; + (void)arg4; + KDBG_FILTERED(((uint32_t)csc) | DBG_FUNC_START, arg1, arg2, arg3, arg4); + } + // Setters for the end debug codes + void + setEndCodes(uintptr_t arg1 = 0, uintptr_t arg2 = 0, + uintptr_t arg3 = 0, uintptr_t arg4 = 0) + { + _endArg1 = arg1; + _endArg2 = arg2; + _endArg3 = arg3; + _endArg4 = arg4; + } + void + setEndArg1(uintptr_t arg) + { + _endArg1 = arg; + } + void + setEndArg2(uintptr_t arg) + { + _endArg2 = arg; + } + void + setEndArg3(uintptr_t arg) + { + _endArg3 = arg; + } + void + setEndArg4(uintptr_t arg) + { + _endArg4 = arg; + } + ~IOTimeStampIntervalConstantFiltered() + { + KDBG_FILTERED(((uint32_t)_csc) | DBG_FUNC_END, _endArg1, _endArg2, _endArg3, _endArg4); + } +private: +#if (KDEBUG_LEVEL < KDEBUG_LEVEL_STANDARD) + __unused +#endif /* KDEBUG_LEVEL < KDEBUG_LEVEL_STANDARD */ + unsigned int _csc; + // End debug codes +#if (KDEBUG_LEVEL < KDEBUG_LEVEL_STANDARD) + __unused +#endif /* KDEBUG_LEVEL < KDEBUG_LEVEL_STANDARD */ + uintptr_t _endArg1, _endArg2, _endArg3, _endArg4; +}; + #if KDEBUG static inline void @@ -61,6 +157,11 @@ IOTimeStampStart(uintptr_t csc, uintptr_t a = 0, uintptr_t b = 0, uintptr_t c = 0, uintptr_t d = 0) { + (void)csc; + (void)a; + (void)b; + (void)c; + (void)d; KERNEL_DEBUG(((uint32_t)csc) | DBG_FUNC_START, a, b, c, d, 0); } @@ -69,6 +170,11 @@ IOTimeStampEnd(uintptr_t csc, uintptr_t a = 0, uintptr_t b = 0, uintptr_t c = 0, uintptr_t d = 0) { + (void)csc; + (void)a; + (void)b; + (void)c; + (void)d; KERNEL_DEBUG(((uint32_t)csc) | DBG_FUNC_END, a, b, c, d, 0); } @@ -77,6 +183,11 @@ IOTimeStamp(uintptr_t csc, uintptr_t a = 0, uintptr_t b = 0, uintptr_t c = 0, uintptr_t d = 0) { + (void)csc; + (void)a; + (void)b; + (void)c; + (void)d; KERNEL_DEBUG(((uint32_t)csc) | DBG_FUNC_NONE, a, b, c, d, 0); } @@ -117,6 +228,7 @@ IOTimeStamp(uintptr_t csc, #define IODBG_POWER(code) (KDBG_CODE(DBG_IOKIT, DBG_IOPOWER, code)) #define IODBG_IOSERVICE(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSERVICE, code)) #define IODBG_IOREGISTRY(code) (KDBG_CODE(DBG_IOKIT, DBG_IOREGISTRY, code)) +#define IODBG_IOMDPA(code) (KDBG_CODE(DBG_IOKIT, DBG_IOMDPA, code)) /* IOKit specific codes - within each subclass */ @@ -168,6 +280,19 @@ IOTimeStamp(uintptr_t csc, /* DBG_IOKIT/DBG_IOMCURS codes */ /* DBG_IOKIT/DBG_IOMDESC codes */ +#define IOMDESC_WIRE 1 /* 0x05060004 */ +#define IOMDESC_PREPARE 2 /* 0x05060008 */ +#define IOMDESC_MAP 3 /* 0x0506000C */ +#define IOMDESC_UNMAP 4 /* 0x05060010 */ +#define IOMDESC_DMA_MAP 5 /* 0x05060014 */ +#define IOMDESC_DMA_UNMAP 6 /* 0x05060018 */ +#define IOMDESC_COMPLETE 7 /* 0x0506001C */ + +/* DBG_IOKIT/DBG_IOMDPA */ +#define IOMDPA_MAPPED 1 /* 0x05410004 */ +#define IOMDPA_UNMAPPED 2 /* 0x05410008 */ +#define IOMDPA_SEGMENTS_PAGE 3 /* 0x0541000C */ +#define IOMDPA_SEGMENTS_LONG 4 /* 0x05410010 */ /* DBG_IOKIT/DBG_IOPOWER codes */ // See IOKit/pwr_mgt/IOPMlog.h for the power management codes diff --git a/iokit/IOKit/IOTimerEventSource.h b/iokit/IOKit/IOTimerEventSource.h index f4987b69b..4b907bb86 100644 --- a/iokit/IOKit/IOTimerEventSource.h +++ b/iokit/IOKit/IOTimerEventSource.h @@ -44,6 +44,7 @@ __BEGIN_DECLS #include __END_DECLS +#include #include #include @@ -160,7 +161,7 @@ public: typedef void (^ActionBlock)(IOTimerEventSource *sender); #endif /* __BLOCKS__ */ - static IOTimerEventSource * + static OSPtr timerEventSource(OSObject *owner, Action action = NULL); /*! @function timerEventSource @@ -169,7 +170,7 @@ public: * @param owner The object that that will be passed to the Action callback. * @param action 'C' Function pointer for the callout routine of this event source. */ - static IOTimerEventSource * + static OSPtr timerEventSource(uint32_t options, OSObject *owner, Action action = NULL); #ifdef __BLOCKS__ @@ -179,12 +180,12 @@ public: * @param inOwner The object that that will be passed to the Action callback. * @param action Block for the callout routine of this event source. */ - static IOTimerEventSource * + static OSPtr timerEventSource(uint32_t options, OSObject *inOwner, ActionBlock action); #endif /* __BLOCKS__ */ #if XNU_KERNEL_PRIVATE - __inline__ void invokeAction(IOTimerEventSource::Action action, IOTimerEventSource * ts, + __inline__ void invokeAction(IOEventSource::Action action, IOTimerEventSource * ts, OSObject * owner, IOWorkLoop * workLoop); #endif /* XNU_KERNEL_PRIVATE */ @@ -316,9 +317,9 @@ private: static void timeoutSignaled(void *self, void *c); private: - OSMetaClassDeclareReservedUsed(IOTimerEventSource, 0); - OSMetaClassDeclareReservedUsed(IOTimerEventSource, 1); - OSMetaClassDeclareReservedUsed(IOTimerEventSource, 2); + OSMetaClassDeclareReservedUsedX86(IOTimerEventSource, 0); + OSMetaClassDeclareReservedUsedX86(IOTimerEventSource, 1); + OSMetaClassDeclareReservedUsedX86(IOTimerEventSource, 2); OSMetaClassDeclareReservedUnused(IOTimerEventSource, 3); OSMetaClassDeclareReservedUnused(IOTimerEventSource, 4); OSMetaClassDeclareReservedUnused(IOTimerEventSource, 5); diff --git a/iokit/IOKit/IOTypes.h b/iokit/IOKit/IOTypes.h index d07f14f1b..5f7bf4306 100644 --- a/iokit/IOKit/IOTypes.h +++ b/iokit/IOKit/IOTypes.h @@ -49,11 +49,15 @@ extern "C" { #ifndef NULL #if defined (__cplusplus) -#if __cplusplus >= 201103L +#ifdef XNU_KERNEL_PRIVATE +#define NULL nullptr +#else +#if __cplusplus >= 201103L && (defined(__arm__) || defined(__arm64__)) #define NULL nullptr #else #define NULL 0 #endif +#endif #else #define NULL ((void *)0) #endif @@ -91,8 +95,10 @@ typedef vm_address_t IOVirtualAddress; #if !defined(__arm__) && !defined(__i386__) && !(defined(__x86_64__) && !defined(KERNEL)) && !(defined(__arm64__) && !defined(__LP64__)) typedef IOByteCount64 IOByteCount; +#define PRIIOByteCount PRIu64 #else typedef IOByteCount32 IOByteCount; +#define PRIIOByteCount PRIu32 #endif typedef IOVirtualAddress IOLogicalAddress; @@ -292,4 +298,14 @@ typedef uint64_t IOVirtualAddress; #endif /* PLATFORM_DriverKit */ +enum { + kIOMaxBusStall40usec = 40000, + kIOMaxBusStall30usec = 30000, + kIOMaxBusStall25usec = 25000, + kIOMaxBusStall20usec = 20000, + kIOMaxBusStall10usec = 10000, + kIOMaxBusStall5usec = 5000, + kIOMaxBusStallNone = 0, +}; + #endif /* ! __IOKIT_IOTYPES_H */ diff --git a/iokit/IOKit/IOUserClient.h b/iokit/IOKit/IOUserClient.h index eba181d07..fd45bf3b0 100644 --- a/iokit/IOKit/IOUserClient.h +++ b/iokit/IOKit/IOUserClient.h @@ -37,6 +37,7 @@ #include #include #include +#include #if IOKITSTATS #include @@ -166,6 +167,24 @@ enum { kIOExternalMethodArgumentsCurrentVersion = IO_EXTERNAL_METHOD_ARGUMENTS_CURRENT_VERSION }; +#if PRIVATE +typedef uintptr_t io_filter_policy_t; +enum io_filter_type_t { + io_filter_type_external_method = 1, + io_filter_type_external_async_method = 2, + io_filter_type_trap = 3, +}; + +typedef IOReturn (*io_filter_resolver_t) (task_t task, IOUserClient * client, uint32_t type, io_filter_policy_t *filterp); +typedef IOReturn (*io_filter_applier_t) (io_filter_policy_t filter, io_filter_type_t type, uint32_t selector); +typedef void (*io_filter_release_t) (io_filter_policy_t filter); +struct io_filter_callbacks { + const io_filter_resolver_t io_filter_resolver; + const io_filter_applier_t io_filter_applier; + const io_filter_release_t io_filter_release; +}; +struct IOUCFilterPolicy; +#endif /* PRIVATE */ /*! * @class IOUserClient @@ -179,7 +198,11 @@ class IOUserClient : public IOService friend class IOStatistics; #endif +#if XNU_KERNEL_PRIVATE +public: +#else /* XNU_KERNEL_PRIVATE */ protected: +#endif /* !XNU_KERNEL_PRIVATE */ /*! @struct ExpansionData * @discussion This structure will be used to expand the capablilties of this class in the future. */ @@ -188,6 +211,11 @@ protected: IOUserClientCounter *counter; #else void *iokitstatsReserved; +#endif +#if PRIVATE + IOUCFilterPolicy * filterPolicies; +#else + void *iokitFilterReserved; #endif }; @@ -207,14 +235,17 @@ public: UInt8 sharedInstance; UInt8 closed; UInt8 __ipcFinal; - UInt8 messageAppSuspended; + UInt8 messageAppSuspended:1, + defaultLocking:1, + __reservedA:6; volatile SInt32 __ipc; queue_head_t owners; - IOLock * lock; + IORWLock * lock; + IOLock * filterLock; #if __LP64__ - void * __reserved[4]; -#else void * __reserved[3]; +#else + void * __reserved[2]; #endif #else /* XNU_KERNEL_PRIVATE */ @@ -236,8 +267,8 @@ private: OSMetaClassDeclareReservedUnused(IOUserClient, 0); OSMetaClassDeclareReservedUnused(IOUserClient, 1); #else - OSMetaClassDeclareReservedUsed(IOUserClient, 0); - OSMetaClassDeclareReservedUsed(IOUserClient, 1); + OSMetaClassDeclareReservedUsedX86(IOUserClient, 0); + OSMetaClassDeclareReservedUsedX86(IOUserClient, 1); #endif OSMetaClassDeclareReservedUnused(IOUserClient, 2); OSMetaClassDeclareReservedUnused(IOUserClient, 3); @@ -261,15 +292,20 @@ public: static void initialize( void ); static void destroyUserReferences( OSObject * obj ); static bool finalizeUserReferences( OSObject * obj ); - IOMemoryMap * mapClientMemory64( IOOptionBits type, + OSPtr mapClientMemory64( IOOptionBits type, task_t task, IOOptionBits mapFlags = kIOMapAnywhere, mach_vm_address_t atAddress = 0 ); IOReturn registerOwner(task_t task); void noMoreSenders(void); - + io_filter_policy_t filterForTask(task_t task, io_filter_policy_t addFilterPolicy); #endif /* XNU_KERNEL_PRIVATE */ +#if PRIVATE +public: + static IOReturn registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size); +#endif /* PRIVATE */ + protected: static IOReturn sendAsyncResult(OSAsyncReference reference, IOReturn result, void *args[], UInt32 numArgs); @@ -308,10 +344,12 @@ public: static IOReturn clientHasPrivilege( void * securityToken, const char * privilegeName ); - static OSObject * copyClientEntitlement( task_t task, - const char * entitlement ); + static OSPtr copyClientEntitlement(task_t task, const char *entitlement); + static OSPtr copyClientEntitlementVnode(struct vnode *vnode, off_t offset, const char *entitlement); - static OSDictionary * copyClientEntitlements(task_t task); + static OSPtr copyClientEntitlements(task_t task); + static OSPtr copyClientEntitlementsVnode(struct vnode *vnode, off_t offset); + static OSPtr copyEntitlementsFromBlob(void *blob, size_t len); /*! * @function releaseAsyncReference64 @@ -360,10 +398,14 @@ public: IOOptionBits * options, IOMemoryDescriptor ** memory ); + IOReturn clientMemoryForType( UInt32 type, + IOOptionBits * options, + OSSharedPtr& memory ); + #if !__LP64__ private: APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOMemoryMap * mapClientMemory( IOOptionBits type, + OSPtr mapClientMemory( IOOptionBits type, task_t task, IOOptionBits mapFlags = kIOMapAnywhere, IOVirtualAddress atAddress = 0 ); @@ -379,7 +421,7 @@ public: * @param memory The memory descriptor instance previously returned by the implementation of clientMemoryForType(). * @result A reference to the first IOMemoryMap instance found in the list of mappings created by IOUserClient from that passed memory descriptor is returned, or zero if none exist. The caller should release this reference. */ - IOMemoryMap * removeMappingForDescriptor(IOMemoryDescriptor * memory); + OSPtr removeMappingForDescriptor(IOMemoryDescriptor * memory); /*! * @function exportObjectToClient @@ -389,7 +431,7 @@ public: * @param clientObj Returned value is the client's port name. */ virtual IOReturn exportObjectToClient(task_t task, - OSObject *obj, io_object_t *clientObj); + LIBKERN_CONSUMED OSObject *obj, io_object_t *clientObj); #if KERNEL_PRIVATE @@ -417,6 +459,9 @@ public: static IOReturn copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name, OSObject **object); + static IOReturn copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name, + OSSharedPtr& object); + /*! * @function adjustPortNameReferencesInTask * Adjust the send rights for a port name created with copyPortNameForObjectInTask(). @@ -446,6 +491,12 @@ public: virtual IOExternalAsyncMethod * getAsyncTargetAndMethodForIndex( LIBKERN_RETURNS_NOT_RETAINED IOService ** targetP, UInt32 index ); + IOExternalMethod * + getTargetAndMethodForIndex( + OSSharedPtr& targetP, UInt32 index ); + IOExternalAsyncMethod * + getAsyncTargetAndMethodForIndex( + OSSharedPtr& targetP, UInt32 index ); // Methods for accessing trap vector - old and new style virtual IOExternalTrap * diff --git a/iokit/IOKit/IOUserServer.h b/iokit/IOKit/IOUserServer.h index 0741ed4cb..a750d3fec 100644 --- a/iokit/IOKit/IOUserServer.h +++ b/iokit/IOKit/IOUserServer.h @@ -73,7 +73,7 @@ struct OSObject_Instantiate_Rpl_Content { kern_return_t __result; uint32_t __pad; uint64_t flags; - char classname[64]; + char classname[128]; uint64_t methods[0]; }; @@ -101,6 +101,7 @@ typedef uint64_t IOTrapMessageBuffer[256]; #include #include #include +#include #include class IOUserServer; @@ -109,6 +110,7 @@ class IODispatchQueue; class IODispatchSource; class IOInterruptDispatchSource; class IOTimerDispatchSource; +class IOUserServerCheckInToken; struct IOPStrings; struct OSObjectUserVars { @@ -116,6 +118,8 @@ struct OSObjectUserVars { IODispatchQueue ** queueArray; OSUserMetaClass * userMeta; OSArray * openProviders; + IOService * controllingDriver; + unsigned long willPowerState; bool willTerminate; bool didTerminate; bool serverDied; @@ -137,6 +141,7 @@ namespace IOServicePH void serverAdd(IOUserServer * server); void serverRemove(IOUserServer * server); void serverAck(IOUserServer * server); +bool serverSlept(void); }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -157,6 +162,7 @@ class IOUserServer : public IOUserClient uint8_t fRootNotifier; uint8_t fSystemPowerAck; uint8_t fSystemOff; + IOUserServerCheckInToken * fCheckInToken; public: @@ -182,17 +188,21 @@ public: static void serviceDidStop(IOService * client, IOService * provider); IOReturn serviceOpen(IOService * provider, IOService * client); IOReturn serviceClose(IOService * provider, IOService * client); + IOReturn serviceSetPowerState(IOService * controllingDriver, IOService * service, IOPMPowerFlags flags, IOPMPowerStateIndex powerState); IOReturn serviceNewUserClient(IOService * service, task_t owningTask, void * securityID, uint32_t type, OSDictionary * properties, IOUserClient ** handler); + IOReturn serviceNewUserClient(IOService * service, task_t owningTask, void * securityID, + uint32_t type, OSDictionary * properties, OSSharedPtr& handler); IOReturn exit(const char * reason); - bool serviceMatchesCDHash(IOService *service); + bool serviceMatchesCheckInToken(IOUserServerCheckInToken *token); bool checkEntitlements(IOService * provider, IOService * dext); bool checkEntitlements(OSDictionary * entitlements, OSObject * prop, IOService * provider, IOService * dext); void setTaskLoadTag(OSKext *kext); void setDriverKitUUID(OSKext *kext); + void setCheckInToken(IOUserServerCheckInToken *token); void systemPower(bool powerOff); IOReturn setPowerState(unsigned long state, IOService * service) APPLE_KEXT_OVERRIDE; IOReturn powerStateWillChangeTo(IOPMPowerFlags flags, unsigned long state, IOService * service) APPLE_KEXT_OVERRIDE; @@ -201,6 +211,7 @@ public: IOPStrings * copyInStringArray(const char * string, uint32_t userSize); uint32_t stringArrayIndex(IOPStrings * array, const char * look); IOReturn registerClass(OSClassDescription * desc, uint32_t size, OSUserMetaClass ** cls); + IOReturn registerClass(OSClassDescription * desc, uint32_t size, OSSharedPtr& cls); IOReturn setRootQueue(IODispatchQueue * queue); OSObjectUserVars * varsForObject(OSObject * obj); @@ -225,6 +236,21 @@ public: kern_return_t waitInterruptTrap(void * p1, void * p2, void * p3, void * p4, void * p5, void * p6); }; +typedef void (*IOUserServerCheckInNotificationHandler)(class IOUserServerCheckInToken*, void*); + +class IOUserServerCheckInToken : public OSObject +{ + OSDeclareDefaultStructors(IOUserServerCheckInToken); +public: + static IOUserServerCheckInToken * create(); + void setNoSendersNotification(IOUserServerCheckInNotificationHandler handler, void *handlerArgs); + void clearNotification(); + static void notifyNoSenders(IOUserServerCheckInToken * token); +private: + IOUserServerCheckInNotificationHandler handler; + void *handlerArgs; +}; + extern "C" kern_return_t IOUserServerUEXTTrap(OSObject * object, void * p1, void * p2, void * p3, void * p4, void * p5, void * p6); diff --git a/iokit/IOKit/IOWorkLoop.h b/iokit/IOKit/IOWorkLoop.h index 1d5fa916b..511dfacfc 100644 --- a/iokit/IOKit/IOWorkLoop.h +++ b/iokit/IOKit/IOWorkLoop.h @@ -33,6 +33,7 @@ #include #include #include +#include #include @@ -97,6 +98,12 @@ private: */ bool eventSourcePerformsWork(IOEventSource *inEventSource); +/*! @function releaseEventChain + * @abstract Static function that releases the events in a chain and sets + * their work loops to NULL. + */ + static void releaseEventChain(LIBKERN_CONSUMED IOEventSource *eventChain); + protected: /*! @typedef maintCommandEnum @@ -193,14 +200,14 @@ public: * @abstract Factory member function to construct and intialize a work loop. * @result Returns a workLoop instance if constructed successfully, 0 otherwise. */ - static IOWorkLoop *workLoop(); + static OSPtr workLoop(); /*! @function workLoopWithOptions(IOOptionBits options) * @abstract Factory member function to constuct and intialize a work loop. * @param options Options - kPreciousStack to avoid stack deallocation on paging path. * @result Returns a workLoop instance if constructed successfully, 0 otherwise. */ - static IOWorkLoop *workLoopWithOptions(IOOptionBits options); + static OSPtr workLoopWithOptions(IOOptionBits options); /*! @function init * @discussion Initializes an instance of the workloop. This method creates and initializes the signaling semaphore, the controller gate lock, and spawns the thread that will continue executing. @@ -349,9 +356,9 @@ protected: OSMetaClassDeclareReservedUnused(IOWorkLoop, 1); OSMetaClassDeclareReservedUnused(IOWorkLoop, 2); #else - OSMetaClassDeclareReservedUsed(IOWorkLoop, 0); - OSMetaClassDeclareReservedUsed(IOWorkLoop, 1); - OSMetaClassDeclareReservedUsed(IOWorkLoop, 2); + OSMetaClassDeclareReservedUsedX86(IOWorkLoop, 0); + OSMetaClassDeclareReservedUsedX86(IOWorkLoop, 1); + OSMetaClassDeclareReservedUsedX86(IOWorkLoop, 2); #endif OSMetaClassDeclareReservedUnused(IOWorkLoop, 3); OSMetaClassDeclareReservedUnused(IOWorkLoop, 4); diff --git a/iokit/IOKit/Makefile b/iokit/IOKit/Makefile index e898d7e4b..02ece6b65 100644 --- a/iokit/IOKit/Makefile +++ b/iokit/IOKit/Makefile @@ -42,7 +42,9 @@ NOT_KF_MI_HEADERS = $(NOT_EXPORT_HEADERS) \ IOSyncer.h AppleKeyStoreInterface.h \ IOStatistics.h IOStatisticsPrivate.h \ IOKernelReporters.h \ - IOInterruptAccounting.h + IOInterruptAccounting.h \ + IOPlatformIO.h \ + IOPMGR.h IOPlatformActions.h # These should be additionally installed in IOKit.framework's public Headers diff --git a/iokit/IOKit/PassthruInterruptController.h b/iokit/IOKit/PassthruInterruptController.h new file mode 100644 index 000000000..80abec844 --- /dev/null +++ b/iokit/IOKit/PassthruInterruptController.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#pragma once + +extern "C" { +#include +}; + +#include + +/*! + * @class PassthruInterruptController + * @abstract Trivial IOInterruptController class that passes all IRQs through to a + * "child" driver. + * @discussion Waits for a "child" driver (typically loaded in a kext) to register itself, + * then passes the child driver's IOService pointer back via + * waitForChildController() so that XNU can operate on it directly. + */ +class PassthruInterruptController : public IOInterruptController +{ + OSDeclareDefaultStructors(PassthruInterruptController); + +public: + virtual bool init(void) APPLE_KEXT_OVERRIDE; + + virtual void *waitForChildController(void); + + virtual void setCPUInterruptProperties(IOService *service) APPLE_KEXT_OVERRIDE; + + virtual IOReturn registerInterrupt(IOService *nub, int source, + void *target, + IOInterruptHandler handler, + void *refCon) APPLE_KEXT_OVERRIDE; + + virtual IOReturn getInterruptType(IOService *nub, int source, + int *interruptType) APPLE_KEXT_OVERRIDE; + + virtual IOReturn enableInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; + virtual IOReturn disableInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; + virtual IOReturn causeInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; + + virtual IOReturn handleInterrupt(void *refCon, IOService *nub, + int source) APPLE_KEXT_OVERRIDE; + + virtual void externalInterrupt(void); + +protected: + IOInterruptHandler child_handler; + void *child_target; + void *child_refCon; + IOService *child_nub; + semaphore_t child_sentinel; +}; diff --git a/iokit/IOKit/perfcontrol/IOPerfControl.h b/iokit/IOKit/perfcontrol/IOPerfControl.h index 27e776ee3..dfe90cd2a 100644 --- a/iokit/IOKit/perfcontrol/IOPerfControl.h +++ b/iokit/IOKit/perfcontrol/IOPerfControl.h @@ -10,6 +10,7 @@ #include #include #include +#include struct thread_group; @@ -146,10 +147,71 @@ public: * specific subclass of IOService. * @param args Optional device-specific arguments related to the end of this work item. * @param done Optional Set to false if the work has not yet completed. Drivers are then responsible for - * calling workBegin when the work resumes and workEnd with done set to True when it has completed. + * calling workBegin when the work resumes and workEnd with done set to True when it has completed. A workEnd() call + * without a corresponding workBegin() call is a way to cancel a work item and return token to IOPerfControl. */ virtual void workEnd(IOService *device, uint64_t token, WorkEndArgs *args = nullptr, bool done = true); +/*! + * @function copyWorkContext + * @abstract Return a retained reference to an opaque OSObject, to be released by the driver. This object can + * be used by IOPerfControl to track a work item. This may perform dynamic memory allocation. + * @returns A pointer to an OSObject + */ + OSPtr copyWorkContext(); + +/*! + * @function workSubmitAndBeginWithContext + * @abstract Tell the performance controller that work was submitted and immediately began executing + * @param device The device that is executing the work. Some platforms require device to be a + * specific subclass of IOService. + * @param context An OSObject returned by copyWorkContext(). The context object will be used by IOPerfControl to track + * this work item. + * @param submitArgs Optional device-specific arguments related to the submission of this work item. + * @param beginArgs Optional device-specific arguments related to the start of this work item. + * @returns true if IOPerfControl is tracking this work item, else false. + * @note The workEndWithContext() call is optional if the corresponding workSubmitWithContext() call returned false. + */ + bool workSubmitAndBeginWithContext(IOService *device, OSObject *context, WorkSubmitArgs *submitArgs = nullptr, + WorkBeginArgs *beginArgs = nullptr); + +/*! + * @function workSubmitWithContext + * @abstract Tell the performance controller that work was submitted. + * @param device The device that will execute the work. Some platforms require device to be a + * specific subclass of IOService. + * @param context An OSObject returned by copyWorkContext(). The context object will be used by IOPerfControl to track + * this work item. + * @param args Optional device-specific arguments related to the submission of this work item. + * @returns true if IOPerfControl is tracking this work item, else false. + */ + bool workSubmitWithContext(IOService *device, OSObject *context, WorkSubmitArgs *args = nullptr); + +/*! + * @function workBeginWithContext + * @abstract Tell the performance controller that previously submitted work began executing. + * @param device The device that is executing the work. Some platforms require device to be a + * specific subclass of IOService. + * @param context An OSObject returned by copyWorkContext() and provided to the previous call to workSubmitWithContext(). + * @param args Optional device-specific arguments related to the start of this work item. + * @note The workBeginWithContext() and workEndWithContext() calls are optional if the corresponding workSubmitWithContext() call returned false. + */ + void workBeginWithContext(IOService *device, OSObject *context, WorkBeginArgs *args = nullptr); + +/*! + * @function workEndWithContext + * @abstract Tell the performance controller that previously started work finished executing. + * @param device The device that executed the work. Some platforms require device to be a + * specific subclass of IOService. + * @param context An OSObject returned by copyWorkContext() and provided to the previous call to workSubmitWithContext(). + * @param args Optional device-specific arguments related to the end of this work item. + * @param done Optional Set to false if the work has not yet completed. Drivers are then responsible for + * calling workBegin when the work resumes and workEnd with done set to True when it has completed. + * @note The workEndWithContext() call is optional if the corresponding workSubmitWithContext() call returned false. A workEndWithContext() + * call without a corresponding workBeginWithContext() call is a way to cancel a work item. + */ + void workEndWithContext(IOService *device, OSObject *context, WorkEndArgs *args = nullptr, bool done = true); + /*! * @struct PerfControllerInterface * @discussion Function pointers necessary to register a performance controller. Not for general driver use. @@ -160,6 +222,8 @@ public: void *thread_group_data; void *work_data; uint32_t work_data_size; + uint32_t started : 1; + uint32_t reserved : 31; }; using RegisterDeviceFunction = IOReturn (*)(IOService *); @@ -203,11 +267,11 @@ private: static constexpr size_t kWorkTableIndexBits = 24; static constexpr size_t kWorkTableMaxSize = (1 << kWorkTableIndexBits) - 1; // - 1 since // kIOPerfControlClientWorkUntracked takes number 0 - static constexpr size_t kWorkTableIndexMask = mask(kWorkTableIndexBits); + static constexpr size_t kWorkTableIndexMask = (const size_t)mask(kWorkTableIndexBits); uint64_t allocateToken(thread_group *thread_group); void deallocateToken(uint64_t token); - bool getEntryForToken(uint64_t token, WorkTableEntry &entry); + WorkTableEntry *getEntryForToken(uint64_t token); void markEntryStarted(uint64_t token, bool started); inline uint64_t tokenToGlobalUniqueToken(uint64_t token); diff --git a/iokit/IOKit/platform/IOPlatformIO.h b/iokit/IOKit/platform/IOPlatformIO.h new file mode 100644 index 000000000..6cf73052e --- /dev/null +++ b/iokit/IOKit/platform/IOPlatformIO.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _IOKIT_PLATFORM_IOPLATFORMIO_H +#define _IOKIT_PLATFORM_IOPLATFORMIO_H + +extern "C" { +#include +} + +#include + +/*! + * @class IOPlatformIO + * @abstract The base class for platform I/O drivers, such as AppleARMIO. + */ +class IOPlatformIO : public IOService +{ + OSDeclareAbstractStructors(IOPlatformIO); + +public: + virtual bool start(IOService * provider) APPLE_KEXT_OVERRIDE; + + /*! + * @function handlePlatformError + * @abstract Handler for platform-defined errors. + * @discussion If the CPU reports an error that XNU does not know how + * to handle, such as a parity error or SError, XNU will + * invoke this method if there is an IOPlatformIO + * driver loaded. + * @param far Fault address provided by the CPU, if any. + * @result true if the exception was handled, false if not. + */ + virtual bool handlePlatformError(vm_offset_t far) = 0; +}; + +#endif /* ! _IOKIT_PLATFORM_IOPLATFORMIO_H */ diff --git a/iokit/IOKit/pwr_mgt/IOPM.h b/iokit/IOKit/pwr_mgt/IOPM.h index 6db286316..1f2b651ad 100644 --- a/iokit/IOKit/pwr_mgt/IOPM.h +++ b/iokit/IOKit/pwr_mgt/IOPM.h @@ -671,6 +671,7 @@ enum { kIOPSFamilyCodeExternal3 = iokit_family_err(sub_iokit_pmu, 3), kIOPSFamilyCodeExternal4 = iokit_family_err(sub_iokit_pmu, 4), kIOPSFamilyCodeExternal5 = iokit_family_err(sub_iokit_pmu, 5), + kIOPSFamilyCodeExternal6 = iokit_family_err(sub_iokit_pmu, 6), }; // values for kIOPMPSAdapterDetailsErrorFlagsKey diff --git a/iokit/IOKit/pwr_mgt/IOPMLibDefs.h b/iokit/IOKit/pwr_mgt/IOPMLibDefs.h index 7caa52528..f0c92aaf8 100644 --- a/iokit/IOKit/pwr_mgt/IOPMLibDefs.h +++ b/iokit/IOKit/pwr_mgt/IOPMLibDefs.h @@ -43,5 +43,6 @@ #define kPMSleepWakeWatchdogEnable 13 #define kPMSleepWakeDebugTrig 14 #define kPMSetDisplayPowerOn 15 +#define kPMSetDisplayState 16 -#define kNumPMMethods 16 +#define kNumPMMethods 17 diff --git a/iokit/IOKit/pwr_mgt/IOPMPrivate.h b/iokit/IOKit/pwr_mgt/IOPMPrivate.h index fd91d71e9..4c1eb9eec 100644 --- a/iokit/IOKit/pwr_mgt/IOPMPrivate.h +++ b/iokit/IOKit/pwr_mgt/IOPMPrivate.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2002-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -109,6 +109,12 @@ enum { #define kIOPMMessageProModeStateChange \ iokit_family_msg(sub_iokit_powermanagement, 0x450) +#define kIOPMMessageRequestUserActive \ + iokit_family_msg(sub_iokit_powermanagement, 0x460) + +#define kIOPMMessageRequestSystemShutdown \ + iokit_family_msg(sub_iokit_powermanagement, 0x470) + /* @enum SystemSleepReasons * @abstract The potential causes for system sleep as logged in the system event record. */ @@ -122,7 +128,8 @@ enum { kIOPMSleepReasonThermalEmergency = 107, kIOPMSleepReasonMaintenance = 108, kIOPMSleepReasonSleepServiceExit = 109, - kIOPMSleepReasonDarkWakeThermalEmergency = 110 + kIOPMSleepReasonDarkWakeThermalEmergency = 110, + kIOPMSleepReasonNotificationWakeExit = 111 }; /* @@ -137,6 +144,7 @@ enum { #define kIOPMThermalEmergencySleepKey "Thermal Emergency Sleep" #define kIOPMSleepServiceExitKey "Sleep Service Back to Sleep" #define kIOPMDarkWakeThermalEmergencyKey "Dark Wake Thermal Emergency" +#define kIOPMNotificationWakeExitKey "Notification Wake Back to Sleep" /*! kIOPMPSRestrictedModeKey * An IOPMPowerSource property key @@ -147,6 +155,9 @@ enum { */ #define kIOPMPSRestrictedModeKey "RestrictedMode" +// Private keys for kIOPMPSAdapterDetailsKey dictionary +#define kIOPMPSAdapterDetailsIsWirelessKey "IsWireless" + #pragma mark Stray Bitfields // Private power commands issued to root domain // bits 0-7 in IOPM.h @@ -246,6 +257,13 @@ enum { kIOPMSilentRunningModeOn = 0x00000001 }; +/* @constant kIOPMSettingLowLatencyAudioModeKey + * @abstract Notification about low latency activity in the system available to kexts. + * @discussion This type can be passed as arguments to registerPMSettingController() + * to receive callbacks. + */ +#define kIOPMSettingLowLatencyAudioModeKey "LowLatencyAudioMode" + /*****************************************************************************/ /*****************************************************************************/ @@ -737,6 +755,7 @@ enum { | kIOPMWakeEventAOTConfirmedPossibleExit) enum { + kIOPMAOTModeMask = 0x000000ff, kIOPMAOTModeEnable = 0x00000001, kIOPMAOTModeCycle = 0x00000002, kIOPMAOTModeAddEventFlags = 0x00000004, @@ -768,6 +787,18 @@ struct IOPMAOTMetrics #define kIOPMAOTPowerKey "aot-power" +/***************************************************************************** + * + * Dark Wake + * + *****************************************************************************/ + +/* An OSNumber property set on a power managed driver that the root domain + * will use as the driver's max power state while system is in dark wake. + * This property should be set prior to the driver joining the PM tree. + */ +#define kIOPMDarkWakeMaxPowerStateKey "IOPMDarkWakeMaxPowerState" + /***************************************************************************** * * System Sleep Policy diff --git a/iokit/IOKit/pwr_mgt/IOPMinformeeList.h b/iokit/IOKit/pwr_mgt/IOPMinformeeList.h index ae4b12110..dc2b53624 100644 --- a/iokit/IOKit/pwr_mgt/IOPMinformeeList.h +++ b/iokit/IOKit/pwr_mgt/IOPMinformeeList.h @@ -50,7 +50,7 @@ public: unsigned long numberOfItems( void ); - IOPMinformee *appendNewInformee( IOService * newObject ); + LIBKERN_RETURNS_NOT_RETAINED IOPMinformee *appendNewInformee( IOService * newObject ); // OBSOLETE // do not use addToList(); Use appendNewInformee() instead diff --git a/iokit/IOKit/pwr_mgt/RootDomain.h b/iokit/IOKit/pwr_mgt/RootDomain.h index 22e38474e..79210790e 100644 --- a/iokit/IOKit/pwr_mgt/RootDomain.h +++ b/iokit/IOKit/pwr_mgt/RootDomain.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2019 Apple Inc. All rights reserved. + * Copyright (c) 1998-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -28,6 +28,7 @@ #ifndef _IOKIT_ROOTDOMAIN_H #define _IOKIT_ROOTDOMAIN_H +#include #include #include #include @@ -163,7 +164,7 @@ public: virtual IOReturn setProperties( OSObject * ) APPLE_KEXT_OVERRIDE; virtual bool serializeProperties( OSSerialize * s ) const APPLE_KEXT_OVERRIDE; - virtual OSObject * copyProperty( const char * aKey ) const APPLE_KEXT_OVERRIDE; + virtual OSPtr copyProperty( const char * aKey ) const APPLE_KEXT_OVERRIDE; /*! @function systemPowerEventOccurred * @abstract Other drivers may inform IOPMrootDomain of system PM events @@ -244,10 +245,23 @@ public: * Please pass an OSString defining the event. */ #endif - void claimSystemWakeEvent( IOService *device, - IOOptionBits flags, - const char *reason, - OSObject *details = NULL ); + void claimSystemWakeEvent( + IOService *device, + IOOptionBits flags, + const char *reason, + OSObject *details = NULL ); + + void claimSystemBootEvent( + IOService *device, + IOOptionBits flags, + const char *reason, + OSObject *details = NULL ); + + void claimSystemShutdownEvent( + IOService *device, + IOOptionBits flags, + const char *reason, + OSObject *details = NULL ); virtual IOReturn receivePowerNotification( UInt32 msg ); @@ -257,6 +271,8 @@ public: void wakeFromDoze( void ); + void requestUserActive(IOService *driver, const char *reason); + // KEXT driver announces support of power management feature void publishFeature( const char *feature ); @@ -282,7 +298,7 @@ public: * @param whichSetting Name of the desired setting. * @result OSObject value if valid, NULL otherwise. */ - OSObject * copyPMSetting( OSSymbol *whichSetting ); + OSPtr copyPMSetting( OSSymbol *whichSetting ); /*! @function registerPMSettingController * @abstract Register for callbacks on changes to certain PM settings. @@ -325,7 +341,7 @@ public: uintptr_t refcon, OSObject **handle); // out param - virtual IONotifier * registerInterest( + virtual OSPtr registerInterest( const OSSymbol * typeOfInterest, IOServiceInterestHandler handler, void * target, void * ref = NULL ) APPLE_KEXT_OVERRIDE; @@ -423,33 +439,35 @@ private: /* Root Domain internals */ public: void tagPowerPlaneService( - IOService * service, - IOPMActions * actions ); + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex maxPowerState ); void overrideOurPowerChange( IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex * inOutPowerState, - IOPMPowerChangeFlags * inOutChangeFlags, - IOPMRequestTag requestTag ); + IOPMPowerChangeFlags * inOutChangeFlags ); void handleOurPowerChangeStart( IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags * inOutChangeFlags, - IOPMRequestTag requestTag ); + IOPMPowerChangeFlags * inOutChangeFlags ); void handleOurPowerChangeDone( IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags changeFlags, - IOPMRequestTag requestTag ); + IOPMPowerChangeFlags changeFlags ); - void overridePowerChangeForUIService( + void overridePowerChangeForService( IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex * inOutPowerState, IOPMPowerChangeFlags * inOutChangeFlags ); @@ -470,12 +488,14 @@ public: void handlePowerChangeStartForPCIDevice( IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex powerState, IOPMPowerChangeFlags * inOutChangeFlags ); void handlePowerChangeDoneForPCIDevice( IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex powerState, IOPMPowerChangeFlags changeFlags ); @@ -489,7 +509,9 @@ public: void handleQueueSleepWakeUUID( OSObject *obj); - void handleDisplayPowerOn(); + void willTellSystemCapabilityDidChange(void); + + void handleSetDisplayPowerOn(bool powerOn); void willNotifyPowerChildren( IOPMPowerStateIndex newPowerState ); @@ -519,8 +541,11 @@ public: uintptr_t param1, uintptr_t param2, uintptr_t param3 = 0); void tracePoint(uint8_t point); void traceDetail(uint32_t msgType, uint32_t msgIndex, uint32_t delay); - void traceDetail(OSObject *notifier, bool start); - void traceAckDelay(OSObject *notifier, uint32_t response, uint32_t delay_ms); + void traceNotification(OSObject *notifier, bool start, uint64_t ts = 0, uint32_t msgIndex = UINT_MAX); + void traceNotificationAck(OSObject *notifier, uint32_t delay_ms); + void traceNotificationResponse(OSObject *object, uint32_t delay_ms, uint32_t ack_time_us); + void traceFilteredNotification(OSObject *notifier); + const char * getNotificationClientName(OSObject *notifier); void startSpinDump(uint32_t spindumpKind); @@ -550,9 +575,12 @@ public: uint32_t delay_ms, uint64_t id, OSObject *object, - IOPMPowerStateIndex ps = 0); + IOPMPowerStateIndex ps = 0, + bool async = false); void copyWakeReasonString( char * outBuf, size_t bufSize ); + void copyShutdownReasonString( char * outBuf, size_t bufSize ); + void lowLatencyAudioNotify(uint64_t time, boolean_t state); #if HIBERNATION bool getHibernateSettings( @@ -588,51 +616,40 @@ private: UInt32 messageType, IOService * service, void * messageArgument, vm_size_t argSize ); - static bool displayWranglerMatchPublished( void * target, void * refCon, - IOService * newService, - IONotifier * notifier); - - static bool batteryPublished( void * target, void * refCon, - IOService * resourceService, - IONotifier * notifier); - void initializeBootSessionUUID( void ); void fullWakeDelayedWork( void ); - IOService * wrangler; - OSDictionary * wranglerIdleSettings; + OSPtr wrangler; + OSPtr wranglerIdleSettings; IOLock *featuresDictLock;// guards supportedFeatures IOLock *wakeEventLock; IOPMPowerStateQueue *pmPowerStateQueue; - OSArray *allowedPMSettings; - OSArray *noPublishPMSettings; - PMTraceWorker *pmTracer; + OSPtr allowedPMSettings; + OSPtr noPublishPMSettings; + OSPtr pmTracer; PMAssertionsTracker *pmAssertions; // Settings controller info IOLock *settingsCtrlLock; - OSDictionary *settingsCallbacks; - OSDictionary *fPMSettingsDict; - - IONotifier *_batteryPublishNotifier; - IONotifier *_displayWranglerNotifier; + OSPtr settingsCallbacks; + OSPtr fPMSettingsDict; // Statistics - const OSSymbol *_statsNameKey; - const OSSymbol *_statsPIDKey; - const OSSymbol *_statsTimeMSKey; - const OSSymbol *_statsResponseTypeKey; - const OSSymbol *_statsMessageTypeKey; - const OSSymbol *_statsPowerCapsKey; + OSPtr _statsNameKey; + OSPtr _statsPIDKey; + OSPtr _statsTimeMSKey; + OSPtr _statsResponseTypeKey; + OSPtr _statsMessageTypeKey; + OSPtr _statsPowerCapsKey; uint32_t sleepCnt; uint32_t darkWakeCnt; uint32_t displayWakeCnt; - OSString *queuedSleepWakeUUIDString; - OSArray *pmStatsAppResponses; + OSPtr queuedSleepWakeUUIDString; + OSPtr pmStatsAppResponses; IOLock *pmStatsLock;// guards pmStatsAppResponses void *sleepDelaysReport; // report to track time taken to go to sleep @@ -640,7 +657,6 @@ private: uint64_t ts_sleepStart; uint64_t wake2DarkwakeDelay; // Time taken to change from full wake -> Dark wake - void *assertOnWakeReport;// report to track time spent without any assertions held after wake uint32_t assertOnWakeClientCnt;// Number of clients interested in assertOnWakeReport clock_sec_t assertOnWakeSecs; // Num of secs after wake for first assertion @@ -649,13 +665,11 @@ private: // Pref: idle time before idle sleep bool idleSleepEnabled; - unsigned long sleepSlider; - unsigned long idleSeconds; - uint64_t autoWakeStart; - uint64_t autoWakeEnd; + uint32_t sleepSlider; + uint32_t idleSeconds; // Difference between sleepSlider and longestNonSleepSlider - unsigned long extraSleepDelay; + uint32_t extraSleepDelay; // Used to wait between say display idle and system idle thread_call_t extraSleepTimer; @@ -670,7 +684,7 @@ private: uint32_t _currentCapability; uint32_t _pendingCapability; uint32_t _highestCapability; - OSSet * _joinedCapabilityClients; + OSPtr _joinedCapabilityClients; uint32_t _systemStateGeneration; // Type of clients that can receive system messages. @@ -700,20 +714,19 @@ private: unsigned int desktopMode :1; unsigned int acAdaptorConnected :1; - unsigned int clamshellSleepDisabled :1; + unsigned int clamshellIgnoreClose :1; unsigned int idleSleepTimerPending :1; unsigned int userDisabledAllSleep :1; unsigned int ignoreTellChangeDown :1; unsigned int wranglerAsleep :1; - unsigned int wranglerTickled :1; + unsigned int darkWakeExit :1; unsigned int _preventUserActive :1; - unsigned int graphicsSuppressed :1; - unsigned int isRTCAlarmWake :1; + unsigned int darkWakePowerClamped :1; unsigned int capabilityLoss :1; unsigned int pciCantSleepFlag :1; unsigned int pciCantSleepValid :1; - unsigned int logGraphicsClamp :1; + unsigned int darkWakeLogClamp :1; unsigned int darkWakeToSleepASAP :1; unsigned int darkWakeMaintenance :1; unsigned int darkWakeSleepService :1; @@ -724,15 +737,18 @@ private: unsigned int lowBatteryCondition :1; unsigned int hibernateDisabled :1; unsigned int hibernateRetry :1; - unsigned int wranglerTickleLatched :1; + unsigned int wranglerTickled :1; unsigned int userIsActive :1; unsigned int userWasActive :1; unsigned int displayIdleForDemandSleep :1; unsigned int darkWakeHibernateError :1; - unsigned int thermalWarningState:1; + unsigned int thermalWarningState :1; unsigned int toldPowerdCapWillChange :1; - unsigned int displayPowerOnRequested:1; + unsigned int displayPowerOnRequested :1; + unsigned int isRTCAlarmWake :1; + unsigned int wranglerPowerOff :1; + unsigned int thermalEmergencyState :1; uint8_t tasksSuspended; uint8_t tasksSuspendState; @@ -755,25 +771,30 @@ private: }; uint32_t fullWakeReason; + enum { + kClamshellSleepDisableInternal = 0x01, + kClamshellSleepDisablePowerd = 0x02 + }; + uint32_t clamshellSleepDisableMask; + // Info for communicating system state changes to PMCPU int32_t idxPMCPUClamshell; int32_t idxPMCPULimitedPower; IOOptionBits platformSleepSupport; uint32_t _debugWakeSeconds; - uint32_t _lastDebugWakeSeconds; queue_head_t aggressivesQueue; thread_call_t aggressivesThreadCall; - OSData * aggressivesData; + OSPtr aggressivesData; AbsoluteTime userBecameInactiveTime; // PCI top-level PM trace - IOService * pciHostBridgeDevice; - IOService * pciHostBridgeDriver; + OSPtr pciHostBridgeDevice; + OSPtr pciHostBridgeDriver; - IONotifier * systemCapabilityNotifier; + OSPtr systemCapabilityNotifier; typedef struct { uint32_t pid; @@ -784,12 +805,14 @@ private: uint32_t pmSuspendedSize; PMNotifySuspendedStruct *pmSuspendedPIDS; - OSSet * preventIdleSleepList; - OSSet * preventSystemSleepList; + OSPtr preventIdleSleepList; + OSPtr preventSystemSleepList; - UInt32 _scheduledAlarms; - UInt32 _userScheduledAlarm; - clock_sec_t _scheduledAlarmUTC; + OSPtr _nextScheduledAlarmType; + clock_sec_t _nextScheduledAlarmUTC; + clock_sec_t _calendarWakeAlarmUTC; + UInt32 _scheduledAlarmMask; + UInt32 _userScheduledAlarmMask; #if HIBERNATION clock_sec_t _standbyTimerResetSeconds; @@ -800,17 +823,18 @@ private: void * swd_compressed_buffer; void * swd_spindump_buffer; thread_t notifierThread; - OSObject *notifierObject; + OSPtr notifierObject; - IOBufferMemoryDescriptor *swd_memDesc; + OSPtr swd_spindump_memDesc; + OSPtr swd_memDesc; // Wake Event Reporting - OSArray * _systemWakeEventsArray; - bool _acceptSystemWakeEvents; + OSPtr _systemWakeEventsArray; + bool _acceptSystemWakeEvents; // AOT -- IOPMCalendarStruct _aotWakeTimeCalendar; - IOTimerEventSource * _aotTimerES; + OSPtr _aotTimerES; clock_sec_t _aotWakeTimeUTC; uint64_t _aotTestTime; uint64_t _aotTestInterval; @@ -844,7 +868,9 @@ private: // IOPMrootDomain internal sleep call IOReturn privateSleepSystem( uint32_t sleepReason ); void reportUserInput( void ); + void updateUserActivity( void ); void setDisableClamShellSleep( bool ); + void setClamShellSleepDisable(bool disable, uint32_t bitmask); bool checkSystemSleepAllowed( IOOptionBits options, uint32_t sleepReason ); bool checkSystemSleepEnabled( void ); @@ -907,6 +933,9 @@ private: void sleepWakeDebugSpinDumpMemAlloc(); errno_t sleepWakeDebugSaveFile(const char *name, char *buf, int len); + IOReturn changePowerStateWithOverrideTo( IOPMPowerStateIndex ordinal, IOPMRequestTag tag ); + IOReturn changePowerStateWithTagToPriv( IOPMPowerStateIndex ordinal, IOPMRequestTag tag ); + IOReturn changePowerStateWithTagTo( IOPMPowerStateIndex ordinal, IOPMRequestTag tag ); #if HIBERNATION bool getSleepOption( const char * key, uint32_t * option ); @@ -919,12 +948,20 @@ private: bool latchDisplayWranglerTickle( bool latch ); void setDisplayPowerOn( uint32_t options ); - void acceptSystemWakeEvents( bool accept ); + void acceptSystemWakeEvents( uint32_t control ); void systemDidNotSleep( void ); void preventTransitionToUserActive( bool prevent ); void setThermalState(OSObject *value); void copySleepPreventersList(OSArray **idleSleepList, OSArray **systemSleepList); void copySleepPreventersListWithID(OSArray **idleSleepList, OSArray **systemSleepList); + void recordRTCAlarm(const OSSymbol *type, OSObject *object); + + // Used to inform interested clients about low latency audio activity in the system + OSPtr lowLatencyAudioNotifierDict; + OSPtr lowLatencyAudioNotifyStateVal; + OSPtr lowLatencyAudioNotifyTimestampVal; + OSPtr lowLatencyAudioNotifyStateSym; + OSPtr lowLatencyAudioNotifyTimestampSym; #endif /* XNU_KERNEL_PRIVATE */ }; @@ -935,7 +972,7 @@ class IORootParent : public IOService public: static void initialize( void ); - virtual OSObject * copyProperty( const char * aKey ) const APPLE_KEXT_OVERRIDE; + virtual OSPtr copyProperty( const char * aKey ) const APPLE_KEXT_OVERRIDE; bool start( IOService * nub ) APPLE_KEXT_OVERRIDE; void shutDownSystem( void ); void restartSystem( void ); diff --git a/iokit/Kernel/IOBufferMemoryDescriptor.cpp b/iokit/Kernel/IOBufferMemoryDescriptor.cpp index 29437390a..7b670bf53 100644 --- a/iokit/Kernel/IOBufferMemoryDescriptor.cpp +++ b/iokit/Kernel/IOBufferMemoryDescriptor.cpp @@ -25,6 +25,7 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR #define _IOMEMORYDESCRIPTOR_INTERNAL_ @@ -72,8 +73,8 @@ enum{ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define super IOGeneralMemoryDescriptor -OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, - IOGeneralMemoryDescriptor); +OSDefineMetaClassAndStructorsWithZone(IOBufferMemoryDescriptor, + IOGeneralMemoryDescriptor, ZC_ZFREE_CLEARMEM); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -111,7 +112,7 @@ IOBufferMemoryDescriptor::initWithOptions( } #endif /* !__LP64__ */ -IOBufferMemoryDescriptor * +OSSharedPtr IOBufferMemoryDescriptor::withCopy( task_t inTask, IOOptionBits options, @@ -119,7 +120,7 @@ IOBufferMemoryDescriptor::withCopy( mach_vm_address_t source, mach_vm_size_t size) { - IOBufferMemoryDescriptor * inst; + OSSharedPtr inst; kern_return_t err; vm_map_copy_t copy; vm_map_address_t address; @@ -127,7 +128,7 @@ IOBufferMemoryDescriptor::withCopy( copy = NULL; do { err = kIOReturnNoMemory; - inst = new IOBufferMemoryDescriptor; + inst = OSMakeShared(); if (!inst) { break; } @@ -163,8 +164,8 @@ IOBufferMemoryDescriptor::withCopy( if (copy) { vm_map_copy_discard(copy); } - OSSafeReleaseNULL(inst); - return NULL; + + return nullptr; } @@ -183,7 +184,7 @@ IOBufferMemoryDescriptor::initWithPhysicalMask( IODMAMapSpecification mapSpec; bool mapped = false; bool withCopy = false; - bool needZero; + bool mappedOrShared = false; if (!capacity) { return false; @@ -225,7 +226,6 @@ IOBufferMemoryDescriptor::initWithPhysicalMask( IOMapper::checkForSystemMapper(); mapped = (NULL != IOMapper::gSystem); } - needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options))); if (physicalMask && (alignment <= 1)) { alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); @@ -241,7 +241,9 @@ IOBufferMemoryDescriptor::initWithPhysicalMask( } if (alignment >= page_size) { - capacity = round_page(capacity); + if (round_page_overflow(capacity, &capacity)) { + return false; + } } if (alignment > page_size) { @@ -263,9 +265,9 @@ IOBufferMemoryDescriptor::initWithPhysicalMask( mapSpec.numAddressBits = 64; if (highestMask && mapped) { if (highestMask <= 0xFFFFFFFF) { - mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); + mapSpec.numAddressBits = (uint8_t)(32 - __builtin_clz((unsigned int) highestMask)); } else { - mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); + mapSpec.numAddressBits = (uint8_t)(64 - __builtin_clz((unsigned int) (highestMask >> 32))); } highestMask = 0; } @@ -297,23 +299,25 @@ IOBufferMemoryDescriptor::initWithPhysicalMask( #endif } + mappedOrShared = (mapped || (0 != (kIOMemorySharingTypeMask & options))); if (contig || highestMask || (alignment > page_size)) { _internalFlags |= kInternalFlagPhysical; if (highestMask) { _internalFlags |= kInternalFlagPageSized; - capacity = round_page(capacity); + if (round_page_overflow(capacity, &capacity)) { + return false; + } } _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( capacity, highestMask, alignment, contig); - } else if (needZero - && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) { + } else if (mappedOrShared + && (capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)) { _internalFlags |= kInternalFlagPageAllocated; - needZero = false; _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); if (_buffer) { IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); #if IOALLOCDEBUG - OSAddAtomic(capacity, &debug_iomalloc_size); + OSAddAtomicLong(capacity, &debug_iomalloc_size); #endif } } else if (alignment > 1) { @@ -324,9 +328,7 @@ IOBufferMemoryDescriptor::initWithPhysicalMask( if (!_buffer) { return false; } - if (needZero) { - bzero(_buffer, capacity); - } + bzero(_buffer, capacity); } if ((options & (kIOMemoryPageable | kIOMapCacheMask))) { @@ -389,7 +391,7 @@ IOBufferMemoryDescriptor::initWithPhysicalMask( } } reserved->map = createMappingInTask(mapTask, 0, - kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0); + kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0).detach(); if (!reserved->map) { _buffer = NULL; return false; @@ -409,23 +411,22 @@ IOBufferMemoryDescriptor::initWithPhysicalMask( return true; } -IOBufferMemoryDescriptor * +OSSharedPtr IOBufferMemoryDescriptor::inTaskWithOptions( task_t inTask, IOOptionBits options, vm_size_t capacity, vm_offset_t alignment) { - IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { - me->release(); - me = NULL; + me.reset(); } return me; } -IOBufferMemoryDescriptor * +OSSharedPtr IOBufferMemoryDescriptor::inTaskWithOptions( task_t inTask, IOOptionBits options, @@ -434,31 +435,29 @@ IOBufferMemoryDescriptor::inTaskWithOptions( uint32_t kernTag, uint32_t userTag) { - IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + OSSharedPtr me = OSMakeShared(); if (me) { me->setVMTags(kernTag, userTag); if (!me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { - me->release(); - me = NULL; + me.reset(); } } return me; } -IOBufferMemoryDescriptor * +OSSharedPtr IOBufferMemoryDescriptor::inTaskWithPhysicalMask( task_t inTask, IOOptionBits options, mach_vm_size_t capacity, mach_vm_address_t physicalMask) { - IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) { - me->release(); - me = NULL; + me.reset(); } return me; } @@ -474,17 +473,16 @@ IOBufferMemoryDescriptor::initWithOptions( } #endif /* !__LP64__ */ -IOBufferMemoryDescriptor * +OSSharedPtr IOBufferMemoryDescriptor::withOptions( IOOptionBits options, vm_size_t capacity, vm_offset_t alignment) { - IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { - me->release(); - me = NULL; + me.reset(); } return me; } @@ -496,7 +494,7 @@ IOBufferMemoryDescriptor::withOptions( * Returns a new IOBufferMemoryDescriptor with a buffer large enough to * hold capacity bytes. The descriptor's length is initially set to the capacity. */ -IOBufferMemoryDescriptor * +OSSharedPtr IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, IODirection inDirection, bool inContiguous) @@ -543,20 +541,19 @@ IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). * The descriptor's length and capacity are set to the input buffer's size. */ -IOBufferMemoryDescriptor * +OSSharedPtr IOBufferMemoryDescriptor::withBytes(const void * inBytes, vm_size_t inLength, IODirection inDirection, bool inContiguous) { - IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithPhysicalMask( kernel_task, inDirection | kIOMemoryUnshared | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), inLength, inLength, 0 )) { - me->release(); - me = NULL; + me.reset(); } if (me) { @@ -564,8 +561,7 @@ IOBufferMemoryDescriptor::withBytes(const void * inBytes, me->setLength(0); if (!me->appendBytes(inBytes, inLength)) { - me->release(); - me = NULL; + me.reset(); } } return me; @@ -632,7 +628,7 @@ IOBufferMemoryDescriptor::free() kmem_free(kernel_map, page, page_size); } #if IOALLOCDEBUG - OSAddAtomic(-size, &debug_iomalloc_size); + OSAddAtomicLong(-size, &debug_iomalloc_size); #endif IOStatisticsAlloc(kIOStatisticsFreeAligned, size); } else if (alignment > 1) { @@ -785,8 +781,8 @@ IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); #else /* !__LP64__ */ -OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0); -OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1); +OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 0); +OSMetaClassDefineReservedUsedX86(IOBufferMemoryDescriptor, 1); #endif /* !__LP64__ */ OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); diff --git a/iokit/Kernel/IOCPU.cpp b/iokit/Kernel/IOCPU.cpp index 84b9cedec..02077e43c 100644 --- a/iokit/Kernel/IOCPU.cpp +++ b/iokit/Kernel/IOCPU.cpp @@ -26,6 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + extern "C" { #include #include @@ -37,12 +39,14 @@ extern void kperf_kernel_configure(char *); #include #include #include +#include #include #include #include #include "IOKitKernelInternal.h" /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + #include #include @@ -51,230 +55,16 @@ extern "C" void console_resume(); extern "C" void sched_override_recommended_cores_for_sleep(void); extern "C" void sched_restore_recommended_cores_after_sleep(void); -typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority, - void * param1, void * param2, void * param3, - const char * name); - -struct iocpu_platform_action_entry { - queue_chain_t link; - iocpu_platform_action_t action; - int32_t priority; - const char * name; - void * refcon0; - void * refcon1; - boolean_t callout_in_progress; - struct iocpu_platform_action_entry * alloc_list; -}; -typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t; - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static IOLock *gIOCPUsLock; -static OSArray *gIOCPUs; -static const OSSymbol *gIOCPUStateKey; -static OSString *gIOCPUStateNames[kIOCPUStateCount]; - -enum{ - kQueueSleep = 0, - kQueueWake = 1, - kQueueQuiesce = 2, - kQueueActive = 3, - kQueueHaltRestart = 4, - kQueuePanic = 5, - kQueueCount = 6 -}; - -const OSSymbol * gIOPlatformSleepActionKey; -const OSSymbol * gIOPlatformWakeActionKey; -const OSSymbol * gIOPlatformQuiesceActionKey; -const OSSymbol * gIOPlatformActiveActionKey; -const OSSymbol * gIOPlatformHaltRestartActionKey; -const OSSymbol * gIOPlatformPanicActionKey; - -static queue_head_t gActionQueues[kQueueCount]; -static const OSSymbol * gActionSymbols[kQueueCount]; - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -static void -iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry) -{ - iocpu_platform_action_entry_t * next; - - queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) - { - if (next->priority > entry->priority) { - queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link); - return; - } - } - queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail -} - -static void -iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry) -{ - remque(&entry->link); -} - -static kern_return_t -iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority, - void * param1, void * param2, void * param3, boolean_t allow_nested_callouts) -{ - kern_return_t ret = KERN_SUCCESS; - kern_return_t result = KERN_SUCCESS; - iocpu_platform_action_entry_t * next; - - queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) - { - uint32_t pri = (next->priority < 0) ? -next->priority : next->priority; - if ((pri >= first_priority) && (pri <= last_priority)) { - //kprintf("[%p]", next->action); - if (!allow_nested_callouts && !next->callout_in_progress) { - next->callout_in_progress = TRUE; - ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); - next->callout_in_progress = FALSE; - } else if (allow_nested_callouts) { - ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); - } - } - if (KERN_SUCCESS == result) { - result = ret; - } - } - return result; -} - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -extern "C" kern_return_t -IOCPURunPlatformQuiesceActions(void) -{ - assert(preemption_enabled() == false); - return iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U - 1, - NULL, NULL, NULL, TRUE); -} - -extern "C" kern_return_t -IOCPURunPlatformActiveActions(void) -{ - assert(preemption_enabled() == false); - return iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U - 1, - NULL, NULL, NULL, TRUE); -} - -extern "C" kern_return_t -IOCPURunPlatformHaltRestartActions(uint32_t message) -{ - if (!gActionQueues[kQueueHaltRestart].next) { - return kIOReturnNotReady; - } - return iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U - 1, - (void *)(uintptr_t) message, NULL, NULL, TRUE); -} - -extern "C" kern_return_t -IOCPURunPlatformPanicActions(uint32_t message) -{ - // Don't allow nested calls of panic actions - if (!gActionQueues[kQueuePanic].next) { - return kIOReturnNotReady; - } - return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1, - (void *)(uintptr_t) message, NULL, NULL, FALSE); -} - - -extern "C" kern_return_t -IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len) -{ - PE_panic_save_context_t context = { - .psc_buffer = addr, - .psc_offset = offset, - .psc_length = len - }; - - // Don't allow nested calls of panic actions - if (!gActionQueues[kQueuePanic].next) { - return kIOReturnNotReady; - } - return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1, - (void *)(uintptr_t)(kPEPanicSync), &context, NULL, FALSE); -} +static OSSharedPtr gIOCPUs; +static OSSharedPtr gIOCPUStateKey; +static OSSharedPtr gIOCPUStateNames[kIOCPUStateCount]; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static kern_return_t -IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority, - void * param1, void * param2, void * param3, - const char * service_name) -{ - IOReturn ret; - IOService * service = (IOService *) refcon0; - const OSSymbol * function = (const OSSymbol *) refcon1; - - kprintf("%s -> %s\n", function->getCStringNoCopy(), service_name); - - ret = service->callPlatformFunction(function, false, - (void *)(uintptr_t) priority, param1, param2, param3); - - return ret; -} - -static void -IOInstallServicePlatformAction(IOService * service, uint32_t qidx) -{ - iocpu_platform_action_entry_t * entry; - OSNumber * num; - uint32_t priority; - const OSSymbol * key = gActionSymbols[qidx]; - queue_head_t * queue = &gActionQueues[qidx]; - bool reverse; - bool uniq; - - num = OSDynamicCast(OSNumber, service->getProperty(key)); - if (!num) { - return; - } - - reverse = false; - uniq = false; - switch (qidx) { - case kQueueWake: - case kQueueActive: - reverse = true; - break; - case kQueueHaltRestart: - case kQueuePanic: - uniq = true; - break; - } - if (uniq) { - queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link) - { - if (service == entry->refcon0) { - return; - } - } - } - - entry = IONew(iocpu_platform_action_entry_t, 1); - entry->action = &IOServicePlatformAction; - entry->name = service->getName(); - priority = num->unsigned32BitValue(); - if (reverse) { - entry->priority = -priority; - } else { - entry->priority = priority; - } - entry->refcon0 = service; - entry->refcon1 = (void *) key; - entry->callout_in_progress = FALSE; - - iocpu_add_platform_action(queue, entry); -} - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +#if !USE_APPLEARMSMP void IOCPUInitialize(void) @@ -282,10 +72,6 @@ IOCPUInitialize(void) gIOCPUsLock = IOLockAlloc(); gIOCPUs = OSArray::withCapacity(1); - for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) { - queue_init(&gActionQueues[qidx]); - } - gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState"); gIOCPUStateNames[kIOCPUStateUnregistered] = @@ -296,60 +82,8 @@ IOCPUInitialize(void) OSString::withCStringNoCopy("Stopped"); gIOCPUStateNames[kIOCPUStateRunning] = OSString::withCStringNoCopy("Running"); - - gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep] - = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey); - gIOPlatformWakeActionKey = gActionSymbols[kQueueWake] - = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey); - gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce] - = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey); - gIOPlatformActiveActionKey = gActionSymbols[kQueueActive] - = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey); - gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart] - = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey); - gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic] - = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey); -} - -IOReturn -IOInstallServicePlatformActions(IOService * service) -{ - IOLockLock(gIOCPUsLock); - - IOInstallServicePlatformAction(service, kQueueHaltRestart); - IOInstallServicePlatformAction(service, kQueuePanic); - - IOLockUnlock(gIOCPUsLock); - - return kIOReturnSuccess; } -IOReturn -IORemoveServicePlatformActions(IOService * service) -{ - iocpu_platform_action_entry_t * entry; - iocpu_platform_action_entry_t * next; - - IOLockLock(gIOCPUsLock); - - for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) { - next = (typeof(entry))queue_first(&gActionQueues[qidx]); - while (!queue_end(&gActionQueues[qidx], &next->link)) { - entry = next; - next = (typeof(entry))queue_next(&entry->link); - if (service == entry->refcon0) { - iocpu_remove_platform_action(entry); - IODelete(entry, iocpu_platform_action_entry_t, 1); - } - } - } - - IOLockUnlock(gIOCPUsLock); - - return kIOReturnSuccess; -} - - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ kern_return_t @@ -457,6 +191,8 @@ PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable) } #endif +#endif /* !USE_APPLEARMSMP */ + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define super IOService @@ -473,54 +209,25 @@ OSMetaClassDefineReservedUnused(IOCPU, 7); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +#if !USE_APPLEARMSMP void IOCPUSleepKernel(void) { #if defined(__x86_64__) extern IOCPU *currentShutdownTarget; #endif - long cnt, numCPUs; + unsigned int cnt, numCPUs; IOCPU *target; IOCPU *bootCPU = NULL; IOPMrootDomain *rootDomain = IOService::getPMRootDomain(); - kprintf("IOCPUSleepKernel\n"); + printf("IOCPUSleepKernel enter\n"); #if defined(__arm64__) sched_override_recommended_cores_for_sleep(); #endif - IORegistryIterator * iter; - OSOrderedSet * all; - IOService * service; - rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions ); - - iter = IORegistryIterator::iterateOver( gIOServicePlane, - kIORegistryIterateRecursively ); - if (iter) { - all = NULL; - do{ - if (all) { - all->release(); - } - all = iter->iterateAll(); - }while (!iter->isValid()); - iter->release(); - - if (all) { - while ((service = (IOService *) all->getFirstObject())) { - for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) { - IOInstallServicePlatformAction(service, qidx); - } - all->removeObject(service); - } - all->release(); - } - } - - iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U - 1, - NULL, NULL, NULL, TRUE); - + IOPlatformActionsPreSleep(); rootDomain->tracePoint( kIOPMTracePointSleepCPUs ); numCPUs = gIOCPUs->getCount(); @@ -541,6 +248,7 @@ IOCPUSleepKernel(void) thread_kern_set_pri(self, thread_kern_get_kernel_maxpri()); // Sleep the CPUs. + ml_set_is_quiescing(true); cnt = numCPUs; while (cnt--) { target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); @@ -572,6 +280,7 @@ IOCPUSleepKernel(void) */ bootCPU->haltCPU(); + ml_set_is_quiescing(false); /* * The system is now coming back from sleep on the boot CPU. @@ -583,18 +292,7 @@ IOCPUSleepKernel(void) console_resume(); - iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U - 1, - NULL, NULL, NULL, TRUE); - - iocpu_platform_action_entry_t * entry; - for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) { - while (!(queue_empty(&gActionQueues[qidx]))) { - entry = (typeof(entry))queue_first(&gActionQueues[qidx]); - iocpu_remove_platform_action(entry); - IODelete(entry, iocpu_platform_action_entry_t, 1); - } - } - + IOPlatformActionsPostResume(); rootDomain->tracePoint( kIOPMTracePointWakeCPUs ); // Wake the other CPUs. @@ -618,12 +316,28 @@ IOCPUSleepKernel(void) #endif thread_kern_set_pri(self, old_pri); + printf("IOCPUSleepKernel exit\n"); +} + +static bool +is_IOCPU_disabled(void) +{ + return false; +} +#else /* !USE_APPLEARMSMP */ +static bool +is_IOCPU_disabled(void) +{ + return true; } +#endif /* !USE_APPLEARMSMP */ bool IOCPU::start(IOService *provider) { - OSData *busFrequency, *cpuFrequency, *timebaseFrequency; + if (is_IOCPU_disabled()) { + return false; + } if (!super::start(provider)) { return false; @@ -638,24 +352,23 @@ IOCPU::start(IOService *provider) // Correct the bus, cpu and timebase frequencies in the device tree. if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) { - busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); + OSSharedPtr busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); + provider->setProperty("bus-frequency", busFrequency.get()); } else { - busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8); + OSSharedPtr busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8); + provider->setProperty("bus-frequency", busFrequency.get()); } - provider->setProperty("bus-frequency", busFrequency); - busFrequency->release(); if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) { - cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4); + OSSharedPtr cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4); + provider->setProperty("clock-frequency", cpuFrequency.get()); } else { - cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8); + OSSharedPtr cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8); + provider->setProperty("clock-frequency", cpuFrequency.get()); } - provider->setProperty("clock-frequency", cpuFrequency); - cpuFrequency->release(); - timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4); - provider->setProperty("timebase-frequency", timebaseFrequency); - timebaseFrequency->release(); + OSSharedPtr timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4); + provider->setProperty("timebase-frequency", timebaseFrequency.get()); super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t) * 8); @@ -668,6 +381,10 @@ IOCPU::start(IOService *provider) void IOCPU::detach(IOService *provider) { + if (is_IOCPU_disabled()) { + return; + } + super::detach(provider); IOLockLock(gIOCPUsLock); unsigned int index = gIOCPUs->getNextIndexOfObject(this, 0); @@ -681,10 +398,12 @@ OSObject * IOCPU::getProperty(const OSSymbol *aKey) const { if (aKey == gIOCPUStateKey) { - return gIOCPUStateNames[_cpuState]; + return gIOCPUStateNames[_cpuState].get(); } - +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" return super::getProperty(aKey); +#pragma clang diagnostic pop } bool @@ -701,13 +420,12 @@ bool IOCPU::serializeProperties(OSSerialize *serialize) const { bool result; - OSDictionary *dict = dictionaryWithProperties(); + OSSharedPtr dict = dictionaryWithProperties(); if (!dict) { return false; } - dict->setObject(gIOCPUStateKey, gIOCPUStateNames[_cpuState]); + dict->setObject(gIOCPUStateKey.get(), gIOCPUStateNames[_cpuState].get()); result = dict->serialize(serialize); - dict->release(); return result; } @@ -722,14 +440,14 @@ IOCPU::setProperties(OSObject *properties) return kIOReturnUnsupported; } - stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey)); + stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey.get())); if (stateStr != NULL) { result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); if (result != kIOReturnSuccess) { return result; } - if (setProperty(gIOCPUStateKey, stateStr)) { + if (setProperty(gIOCPUStateKey.get(), stateStr)) { return kIOReturnSuccess; } @@ -795,7 +513,7 @@ IOCPU::setCPUState(UInt32 cpuState) OSArray * IOCPU::getCPUGroup(void) { - return _cpuGroup; + return _cpuGroup.get(); } UInt32 @@ -865,27 +583,14 @@ IOCPUInterruptController::initCPUInterruptController(int sources, int cpus) } } - ml_init_max_cpus(numSources); - -#if KPERF - /* - * kperf allocates based on the number of CPUs and requires them to all be - * accounted for. - */ - boolean_t found_kperf = FALSE; - char kperf_config_str[64]; - found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str)); - if (found_kperf && kperf_config_str[0] != '\0') { - kperf_kernel_configure(kperf_config_str); - } -#endif /* KPERF */ - + ml_set_max_cpus(numSources); return kIOReturnSuccess; } void IOCPUInterruptController::registerCPUInterruptController(void) { + setProperty(gPlatformInterruptControllerName, kOSBooleanTrue); registerService(); getPlatform()->registerInterruptController(gPlatformInterruptControllerName, @@ -896,13 +601,12 @@ void IOCPUInterruptController::setCPUInterruptProperties(IOService *service) { int cnt; - OSArray *controller; - OSArray *specifier; - OSData *tmpData; + OSSharedPtr specifier; + OSSharedPtr controller; long tmpLong; - if ((service->getProperty(gIOInterruptControllersKey) != NULL) && - (service->getProperty(gIOInterruptSpecifiersKey) != NULL)) { + if ((service->propertyExists(gIOInterruptControllersKey)) && + (service->propertyExists(gIOInterruptSpecifiersKey))) { return; } @@ -910,11 +614,9 @@ IOCPUInterruptController::setCPUInterruptProperties(IOService *service) specifier = OSArray::withCapacity(numSources); for (cnt = 0; cnt < numSources; cnt++) { tmpLong = cnt; - tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong)); - specifier->setObject(tmpData); - tmpData->release(); + OSSharedPtr tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong)); + specifier->setObject(tmpData.get()); } - ; // Create the interrupt controller array. controller = OSArray::withCapacity(numSources); @@ -923,10 +625,8 @@ IOCPUInterruptController::setCPUInterruptProperties(IOService *service) } // Put the two arrays into the property table. - service->setProperty(gIOInterruptControllersKey, controller); - service->setProperty(gIOInterruptSpecifiersKey, specifier); - controller->release(); - specifier->release(); + service->setProperty(gIOInterruptControllersKey, controller.get()); + service->setProperty(gIOInterruptSpecifiersKey, specifier.get()); } void diff --git a/iokit/Kernel/IOCatalogue.cpp b/iokit/Kernel/IOCatalogue.cpp index 6f8f07890..958022743 100644 --- a/iokit/Kernel/IOCatalogue.cpp +++ b/iokit/Kernel/IOCatalogue.cpp @@ -38,6 +38,8 @@ * Version 2.0. */ +#define IOKIT_ENABLE_SHARED_PTR + extern "C" { #include #include @@ -48,6 +50,7 @@ extern "C" { #include #include #include +#include #include #include @@ -57,6 +60,7 @@ extern "C" { #include #include +#include #if PRAGMA_MARK #pragma mark Internal Declarations @@ -64,11 +68,11 @@ extern "C" { /********************************************************************* *********************************************************************/ -IOCatalogue * gIOCatalogue; -const OSSymbol * gIOClassKey; -const OSSymbol * gIOProbeScoreKey; -const OSSymbol * gIOModuleIdentifierKey; -const OSSymbol * gIOModuleIdentifierKernelKey; +OSSharedPtr gIOCatalogue; +OSSharedPtr gIOClassKey; +OSSharedPtr gIOProbeScoreKey; +OSSharedPtr gIOModuleIdentifierKey; +OSSharedPtr gIOModuleIdentifierKernelKey; IORWLock * gIOCatalogLock; #if PRAGMA_MARK @@ -93,17 +97,16 @@ static bool isModuleLoadedNoOSKextLock(OSDictionary *theKexts, void IOCatalogue::initialize(void) { - OSArray * array; - OSString * errorString; + OSSharedPtr array; + OSSharedPtr errorString; bool rc; extern const char * gIOKernelConfigTables; - array = OSDynamicCast(OSArray, OSUnserialize(gIOKernelConfigTables, &errorString)); + array = OSDynamicPtrCast(OSUnserialize(gIOKernelConfigTables, errorString)); if (!array && errorString) { IOLog("KernelConfigTables syntax error: %s\n", errorString->getCStringNoCopy()); - errorString->release(); } gIOClassKey = OSSymbol::withCStringNoCopy( kIOClassKey ); @@ -115,11 +118,10 @@ IOCatalogue::initialize(void) assert( array && gIOClassKey && gIOProbeScoreKey && gIOModuleIdentifierKey); - gIOCatalogue = new IOCatalogue; + gIOCatalogue = OSMakeShared(); assert(gIOCatalogue); - rc = gIOCatalogue->init(array); + rc = gIOCatalogue->init(array.get()); assert(rc); - array->release(); } /********************************************************************* @@ -152,9 +154,8 @@ IOCatalogue::addPersonality(OSDictionary * dict) if (arr) { arr->setObject(dict); } else { - arr = OSArray::withObjects((const OSObject **)&dict, 1, 2); - personalities->setObject(sym, arr); - arr->release(); + OSSharedPtr sharedArr = OSArray::withObjects((const OSObject **)&dict, 1, 2); + personalities->setObject(sym, sharedArr.get()); } } @@ -181,7 +182,7 @@ IOCatalogue::init(OSArray * initArray) continue; } OSKext::uniquePersonalityProperties(dict); - if (NULL == dict->getObject( gIOClassKey )) { + if (NULL == dict->getObject( gIOClassKey.get())) { IOLog("Missing or bad \"%s\" key\n", gIOClassKey->getCStringNoCopy()); continue; @@ -208,19 +209,19 @@ IOCatalogue::free( void ) /********************************************************************* *********************************************************************/ -OSOrderedSet * +OSPtr IOCatalogue::findDrivers( IOService * service, SInt32 * generationCount) { OSDictionary * nextTable; - OSOrderedSet * set; + OSSharedPtr set; OSArray * array; const OSMetaClass * meta; unsigned int idx; set = OSOrderedSet::withCapacity( 1, IOServiceOrdering, - (void *)gIOProbeScoreKey ); + (void *)(gIOProbeScoreKey.get())); if (!set) { return NULL; } @@ -251,14 +252,14 @@ IOCatalogue::findDrivers( /********************************************************************* * Is personality already in the catalog? *********************************************************************/ -OSOrderedSet * +OSPtr IOCatalogue::findDrivers( OSDictionary * matching, SInt32 * generationCount) { - OSCollectionIterator * iter; + OSSharedPtr iter; OSDictionary * dict; - OSOrderedSet * set; + OSSharedPtr set; OSArray * array; const OSSymbol * key; unsigned int idx; @@ -266,14 +267,13 @@ IOCatalogue::findDrivers( OSKext::uniquePersonalityProperties(matching); set = OSOrderedSet::withCapacity( 1, IOServiceOrdering, - (void *)gIOProbeScoreKey ); + (void *)(gIOProbeScoreKey.get())); if (!set) { return NULL; } - iter = OSCollectionIterator::withCollection(personalities); + iter = OSCollectionIterator::withCollection(personalities.get()); if (!iter) { - set->release(); - return NULL; + return nullptr; } IORWLockRead(lock); @@ -293,7 +293,6 @@ IOCatalogue::findDrivers( *generationCount = getGenerationCount(); IORWLockUnlock(lock); - iter->release(); return set; } @@ -315,8 +314,8 @@ IOCatalogue::addDrivers( bool doNubMatching) { bool result = false; - OSCollectionIterator * iter = NULL; // must release - OSOrderedSet * set = NULL; // must release + OSSharedPtr set; + OSSharedPtr iter; OSObject * object = NULL; // do not release OSArray * persons = NULL;// do not release @@ -326,7 +325,7 @@ IOCatalogue::addDrivers( } set = OSOrderedSet::withCapacity( 10, IOServiceOrdering, - (void *)gIOProbeScoreKey ); + (void *)(gIOProbeScoreKey.get())); if (!set) { goto finish; } @@ -393,51 +392,34 @@ IOCatalogue::addDrivers( } // Start device matching. if (result && doNubMatching && (set->getCount() > 0)) { - IOService::catalogNewDrivers(set); + IOService::catalogNewDrivers(set.get()); generation++; } IORWLockUnlock(lock); finish: - if (set) { - set->release(); - } - if (iter) { - iter->release(); - } return result; } -/********************************************************************* -* Remove drivers from the catalog which match the -* properties in the matching dictionary. -*********************************************************************/ bool -IOCatalogue::removeDrivers( - OSDictionary * matching, - bool doNubMatching) +IOCatalogue::removeDrivers(bool doNubMatching, bool (^shouldRemove)(OSDictionary *personality)) { - OSOrderedSet * set; - OSCollectionIterator * iter; + OSSharedPtr set; + OSSharedPtr iter; OSDictionary * dict; OSArray * array; const OSSymbol * key; unsigned int idx; - if (!matching) { - return false; - } - set = OSOrderedSet::withCapacity(10, IOServiceOrdering, - (void *)gIOProbeScoreKey); + (void *)(gIOProbeScoreKey.get())); if (!set) { return false; } - iter = OSCollectionIterator::withCollection(personalities); + iter = OSCollectionIterator::withCollection(personalities.get()); if (!iter) { - set->release(); return false; } @@ -446,10 +428,7 @@ IOCatalogue::removeDrivers( array = (OSArray *) personalities->getObject(key); if (array) { for (idx = 0; (dict = (OSDictionary *) array->getObject(idx)); idx++) { - /* This comparison must be done with only the keys in the - * "matching" dict to enable general searches. - */ - if (dict->isEqualTo(matching, matching)) { + if (shouldRemove(dict)) { set->setObject(dict); array->removeObject(idx); idx--; @@ -458,18 +437,35 @@ IOCatalogue::removeDrivers( } // Start device matching. if (doNubMatching && (set->getCount() > 0)) { - IOService::catalogNewDrivers(set); + IOService::catalogNewDrivers(set.get()); generation++; } } IORWLockUnlock(lock); - set->release(); - iter->release(); - return true; } +/********************************************************************* +* Remove drivers from the catalog which match the +* properties in the matching dictionary. +*********************************************************************/ +bool +IOCatalogue::removeDrivers( + OSDictionary * matching, + bool doNubMatching) +{ + if (!matching) { + return false; + } + return removeDrivers(doNubMatching, ^(OSDictionary *dict) { + /* This comparison must be done with only the keys in the + * "matching" dict to enable general searches. + */ + return dict->isEqualTo(matching, matching); + }); +} + // Return the generation count. SInt32 IOCatalogue::getGenerationCount(void) const @@ -502,7 +498,7 @@ IOCatalogue::isModuleLoaded(OSDictionary * driver, OSObject ** kextRef) const driver->getObject(kIOPersonalityPublisherKey)); OSKext::recordIdentifierRequest(publisherName); - moduleName = OSDynamicCast(OSString, driver->getObject(gIOModuleIdentifierKernelKey)); + moduleName = OSDynamicCast(OSString, driver->getObject(gIOModuleIdentifierKernelKey.get())); if (moduleName) { ret = OSKext::loadKextWithIdentifier(moduleName, kextRef); if (kOSKextReturnDeferred == ret) { @@ -510,11 +506,10 @@ IOCatalogue::isModuleLoaded(OSDictionary * driver, OSObject ** kextRef) const // loaded yet, so stall. return false; } - OSString *moduleDextName = OSDynamicCast(OSString, driver->getObject(gIOModuleIdentifierKey)); + OSString *moduleDextName = OSDynamicCast(OSString, driver->getObject(gIOModuleIdentifierKey.get())); if (moduleDextName && !(moduleName->isEqualTo(moduleDextName))) { - OSObject *dextRef = NULL; - ret = OSKext::loadKextWithIdentifier(moduleDextName, &dextRef); - OSSafeReleaseNULL(dextRef); + OSSharedPtr dextRef; + ret = OSKext::loadKextWithIdentifier(moduleDextName, dextRef); } // module is present or never will be return true; @@ -526,6 +521,15 @@ IOCatalogue::isModuleLoaded(OSDictionary * driver, OSObject ** kextRef) const return true; } +bool +IOCatalogue::isModuleLoaded(OSDictionary * driver, OSSharedPtr& kextRef) const +{ + OSObject* kextRefRaw = NULL; + bool result = isModuleLoaded(driver, &kextRefRaw); + kextRef.reset(kextRefRaw, OSNoRetain); + return result; +} + /* This function is called after a module has been loaded. * Is invoked from user client call, ultimately from IOKitLib's * IOCatalogueModuleLoaded(). Sent from kextd. @@ -542,11 +546,10 @@ IOCatalogue::moduleHasLoaded(const OSSymbol * moduleName) void IOCatalogue::moduleHasLoaded(const char * moduleName) { - const OSSymbol * name; + OSSharedPtr name; name = OSSymbol::withCString(moduleName); - moduleHasLoaded(name); - name->release(); + moduleHasLoaded(name.get()); } // xxx - return is really OSReturn/kern_return_t @@ -560,7 +563,7 @@ IOReturn IOCatalogue::terminateDrivers(OSDictionary * matching, io_name_t className) { OSDictionary * dict; - OSIterator * iter; + OSSharedPtr iter; IOService * service; IOReturn ret; @@ -573,7 +576,7 @@ IOCatalogue::terminateDrivers(OSDictionary * matching, io_name_t className) } if (matching) { - OSKext::uniquePersonalityProperties( matching ); + OSKext::uniquePersonalityProperties( matching, false ); } // terminate instances. @@ -598,23 +601,33 @@ IOCatalogue::terminateDrivers(OSDictionary * matching, io_name_t className) } OSKext * kext; + OSSharedPtr dextBundleID; const char * bundleIDStr; OSObject * prop; bool okToTerminate; + bool isDext = service->hasUserServer(); for (okToTerminate = true;;) { - kext = service->getMetaClass()->getKext(); - if (!kext) { - break; + if (isDext) { + dextBundleID = OSDynamicPtrCast(service->copyProperty(gIOModuleIdentifierKey.get())); + if (!dextBundleID) { + break; + } + bundleIDStr = dextBundleID->getCStringNoCopy(); + } else { + kext = service->getMetaClass()->getKext(); + if (!kext) { + break; + } + bundleIDStr = kext->getIdentifierCString(); + prop = kext->getPropertyForHostArch(kOSBundleAllowUserTerminateKey); + if (prop) { + okToTerminate = (kOSBooleanTrue == prop); + break; + } } - bundleIDStr = kext->getIdentifierCString(); if (!bundleIDStr) { break; } - prop = kext->getPropertyForHostArch(kOSBundleAllowUserTerminateKey); - if (prop) { - okToTerminate = (kOSBooleanTrue == prop); - break; - } if (!strcmp(kOSKextKernelIdentifier, bundleIDStr)) { okToTerminate = false; break; @@ -636,13 +649,16 @@ IOCatalogue::terminateDrivers(OSDictionary * matching, io_name_t className) break; } } - if (!service->terminate(kIOServiceRequired | kIOServiceSynchronous)) { + IOOptionBits terminateOptions = kIOServiceRequired | kIOServiceSynchronous; + if (isDext) { + terminateOptions |= kIOServiceTerminateNeedWillTerminate; + } + if (!service->terminate(terminateOptions)) { ret = kIOReturnUnsupported; break; } } } while (!service && !iter->isValid()); - iter->release(); return ret; } @@ -651,7 +667,7 @@ IOReturn IOCatalogue::_removeDrivers(OSDictionary * matching) { IOReturn ret = kIOReturnSuccess; - OSCollectionIterator * iter; + OSSharedPtr iter; OSDictionary * dict; OSArray * array; const OSSymbol * key; @@ -659,7 +675,7 @@ IOCatalogue::_removeDrivers(OSDictionary * matching) // remove configs from catalog. - iter = OSCollectionIterator::withCollection(personalities); + iter = OSCollectionIterator::withCollection(personalities.get()); if (!iter) { return kIOReturnNoMemory; } @@ -680,7 +696,6 @@ IOCatalogue::_removeDrivers(OSDictionary * matching) } } } - iter->release(); return ret; } @@ -703,13 +718,96 @@ IOCatalogue::terminateDrivers(OSDictionary * matching) return ret; } +IOReturn +IOCatalogue::terminateDriversForUserspaceReboot() +{ + IOReturn ret = kIOReturnSuccess; + +#if !NO_KEXTD + OSSharedPtr iter; + IOService * service; + bool isDeferredMatch; + bool isDext; + IOOptionBits terminateOptions; + + iter = IORegistryIterator::iterateOver(gIOServicePlane, + kIORegistryIterateRecursively); + if (!iter) { + return kIOReturnNoMemory; + } + + do { + iter->reset(); + while ((service = (IOService *)iter->getNextObject())) { + isDeferredMatch = service->propertyHasValue(gIOMatchDeferKey, kOSBooleanTrue); + isDext = service->hasUserServer(); + if (isDeferredMatch || isDext) { + if (isDext) { + OSSharedPtr name = OSDynamicPtrCast(service->copyProperty(gIOUserServerNameKey)); + const char *userServerName = NULL; + if (name) { + userServerName = name->getCStringNoCopy(); + } + IOLog("terminating service %s-0x%llx [dext %s]\n", service->getName(), service->getRegistryEntryID(), userServerName ? userServerName : "(null)"); + } else { + OSKext *kext = service->getMetaClass()->getKext(); + const char *bundleID = NULL; + if (kext) { + bundleID = kext->getIdentifierCString(); + } + IOLog("terminating service %s-0x%llx [kext %s]\n", service->getName(), service->getRegistryEntryID(), bundleID ? bundleID : "(null)"); + } + terminateOptions = kIOServiceRequired | kIOServiceSynchronous; + if (isDext) { + terminateOptions |= kIOServiceTerminateNeedWillTerminate; + } + if (!service->terminate(terminateOptions)) { + IOLog("failed to terminate service %s-0x%llx\n", service->getName(), service->getRegistryEntryID()); + ret = kIOReturnUnsupported; + break; + } + } + } + } while (!service && !iter->isValid()); +#endif + + return ret; +} + +IOReturn +IOCatalogue::resetAfterUserspaceReboot(void) +{ + OSSharedPtr iter; + IOService * service; + + iter = IORegistryIterator::iterateOver(gIOServicePlane, + kIORegistryIterateRecursively); + if (!iter) { + return kIOReturnNoMemory; + } + + do { + iter->reset(); + while ((service = (IOService *)iter->getNextObject())) { + service->resetRematchProperties(); + } + } while (!service && !iter->isValid()); + + /* Remove all dext personalities */ + removeDrivers(false, ^(OSDictionary *dict) { + return dict->getObject(gIOUserServerNameKey) != NULL; + }); + + return kIOReturnSuccess; +} + IOReturn IOCatalogue::terminateDriversForModule( OSString * moduleName, bool unload) { IOReturn ret; - OSDictionary * dict; + OSSharedPtr dict; bool isLoaded = false; /* Check first if the kext currently has any linkage dependents; @@ -737,15 +835,15 @@ IOCatalogue::terminateDriversForModule( goto finish; } - dict->setObject(gIOModuleIdentifierKey, moduleName); + dict->setObject(gIOModuleIdentifierKey.get(), moduleName); - ret = terminateDrivers(dict, NULL); + ret = terminateDrivers(dict.get(), NULL); /* No goto between IOLock calls! */ IORWLockWrite(lock); if (kIOReturnSuccess == ret) { - ret = _removeDrivers(dict); + ret = _removeDrivers(dict.get()); } // Unload the module itself. @@ -755,8 +853,6 @@ IOCatalogue::terminateDriversForModule( IORWLockUnlock(lock); - dict->release(); - finish: return ret; } @@ -766,7 +862,7 @@ IOCatalogue::terminateDriversForModule( const char * moduleName, bool unload) { - OSString * name; + OSSharedPtr name; IOReturn ret; name = OSString::withCString(moduleName); @@ -774,8 +870,7 @@ IOCatalogue::terminateDriversForModule( return kIOReturnNoMemory; } - ret = terminateDriversForModule(name, unload); - name->release(); + ret = terminateDriversForModule(name.get(), unload); return ret; } @@ -784,14 +879,14 @@ IOCatalogue::terminateDriversForModule( bool IOCatalogue::startMatching( OSDictionary * matching ) { - OSOrderedSet * set; + OSSharedPtr set; if (!matching) { return false; } set = OSOrderedSet::withCapacity(10, IOServiceOrdering, - (void *)gIOProbeScoreKey); + (void *)(gIOProbeScoreKey.get())); if (!set) { return false; } @@ -817,14 +912,12 @@ IOCatalogue::startMatching( OSDictionary * matching ) // Start device matching. if (set->getCount() > 0) { - IOService::catalogNewDrivers(set); + IOService::catalogNewDrivers(set.get()); generation++; } IORWLockUnlock(lock); - set->release(); - return true; } #endif /* defined(__i386__) || defined(__x86_64__) */ @@ -832,14 +925,14 @@ IOCatalogue::startMatching( OSDictionary * matching ) bool IOCatalogue::startMatching( const OSSymbol * moduleName ) { - OSOrderedSet * set; + OSSharedPtr set; if (!moduleName) { return false; } set = OSOrderedSet::withCapacity(10, IOServiceOrdering, - (void *)gIOProbeScoreKey); + (void *)(gIOProbeScoreKey.get())); if (!set) { return false; } @@ -849,13 +942,16 @@ IOCatalogue::startMatching( const OSSymbol * moduleName ) personalities->iterateObjects(^bool (const OSSymbol * key, OSObject * value) { OSArray * array; OSDictionary * dict; - OSObject * obj; + OSObject * moduleIdentifierKernel; + OSObject * moduleIdentifier; unsigned int idx; array = (OSArray *) value; for (idx = 0; (dict = (OSDictionary *) array->getObject(idx)); idx++) { - obj = dict->getObject(gIOModuleIdentifierKernelKey); - if (obj && moduleName->isEqualTo(obj)) { + moduleIdentifierKernel = dict->getObject(gIOModuleIdentifierKernelKey.get()); + moduleIdentifier = dict->getObject(gIOModuleIdentifierKey.get()); + if ((moduleIdentifierKernel && moduleName->isEqualTo(moduleIdentifierKernel)) || + (moduleIdentifier && moduleName->isEqualTo(moduleIdentifier))) { set->setObject(dict); } } @@ -864,14 +960,12 @@ IOCatalogue::startMatching( const OSSymbol * moduleName ) // Start device matching. if (set->getCount() > 0) { - IOService::catalogNewDrivers(set); + IOService::catalogNewDrivers(set.get()); generation++; } IORWLockUnlock(lock); - set->release(); - return true; } @@ -888,13 +982,13 @@ IOCatalogue::resetAndAddDrivers(OSArray * drivers, bool doNubMatching) { bool result = false; OSArray * newPersonalities = NULL;// do not release - OSCollectionIterator * iter = NULL;// must release - OSOrderedSet * matchSet = NULL;// must release const OSSymbol * key; OSArray * array; OSDictionary * thisNewPersonality = NULL;// do not release OSDictionary * thisOldPersonality = NULL;// do not release - OSDictionary * myKexts = NULL;// must release + OSSharedPtr myKexts; + OSSharedPtr iter; + OSSharedPtr matchSet; signed int idx, newIdx; if (drivers) { @@ -904,11 +998,11 @@ IOCatalogue::resetAndAddDrivers(OSArray * drivers, bool doNubMatching) } } matchSet = OSOrderedSet::withCapacity(10, IOServiceOrdering, - (void *)gIOProbeScoreKey); + (void *)(gIOProbeScoreKey.get())); if (!matchSet) { goto finish; } - iter = OSCollectionIterator::withCollection(personalities); + iter = OSCollectionIterator::withCollection(personalities.get()); if (!iter) { goto finish; } @@ -966,7 +1060,7 @@ IOCatalogue::resetAndAddDrivers(OSArray * drivers, bool doNubMatching) } else { // not in new set - remove // only remove dictionary if this module in not loaded - 9953845 - if (isModuleLoadedNoOSKextLock(myKexts, thisOldPersonality) == false) { + if (isModuleLoadedNoOSKextLock(myKexts.get(), thisOldPersonality) == false) { if (matchSet) { matchSet->setObject(thisOldPersonality); } @@ -996,22 +1090,13 @@ IOCatalogue::resetAndAddDrivers(OSArray * drivers, bool doNubMatching) /* Finally, start device matching on all new & removed personalities. */ if (result && doNubMatching && (matchSet->getCount() > 0)) { - IOService::catalogNewDrivers(matchSet); + IOService::catalogNewDrivers(matchSet.get()); generation++; } IORWLockUnlock(lock); finish: - if (matchSet) { - matchSet->release(); - } - if (iter) { - iter->release(); - } - if (myKexts) { - myKexts->release(); - } return result; } @@ -1079,7 +1164,7 @@ isModuleLoadedNoOSKextLock(OSDictionary *theKexts, // gIOModuleIdentifierKey is "CFBundleIdentifier" myBundleID = OSDynamicCast(OSString, - theModuleDict->getObject(gIOModuleIdentifierKey)); + theModuleDict->getObject(gIOModuleIdentifierKey.get())); if (myBundleID == NULL) { return myResult; } diff --git a/iokit/Kernel/IOCommandGate.cpp b/iokit/Kernel/IOCommandGate.cpp index 1ae9bcf08..1404aac5a 100644 --- a/iokit/Kernel/IOCommandGate.cpp +++ b/iokit/Kernel/IOCommandGate.cpp @@ -25,7 +25,11 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ + +#define IOKIT_ENABLE_SHARED_PTR + #include +#include #include #include @@ -35,11 +39,11 @@ #define super IOEventSource -OSDefineMetaClassAndStructors(IOCommandGate, IOEventSource) +OSDefineMetaClassAndStructorsWithZone(IOCommandGate, IOEventSource, ZC_NONE) #if __LP64__ OSMetaClassDefineReservedUnused(IOCommandGate, 0); #else -OSMetaClassDefineReservedUsed(IOCommandGate, 0); +OSMetaClassDefineReservedUsedX86(IOCommandGate, 0); #endif OSMetaClassDefineReservedUnused(IOCommandGate, 1); OSMetaClassDefineReservedUnused(IOCommandGate, 2); @@ -79,14 +83,13 @@ IOCommandGate::init(OSObject *inOwner, Action inAction) return res; } -IOCommandGate * +OSSharedPtr IOCommandGate::commandGate(OSObject *inOwner, Action inAction) { - IOCommandGate *me = new IOCommandGate; + OSSharedPtr me = OSMakeShared(); if (me && !me->init(inOwner, inAction)) { - me->release(); - return NULL; + return nullptr; } return me; diff --git a/iokit/Kernel/IOCommandPool.cpp b/iokit/Kernel/IOCommandPool.cpp index 550d9aac5..0dcb24e0f 100644 --- a/iokit/Kernel/IOCommandPool.cpp +++ b/iokit/Kernel/IOCommandPool.cpp @@ -37,7 +37,10 @@ * */ +#define IOKIT_ENABLE_SHARED_PTR + #include +#include #define super OSObject OSDefineMetaClassAndStructors(IOCommandPool, OSObject); @@ -54,15 +57,14 @@ OSMetaClassDefineReservedUnused(IOCommandPool, 7); // withWorkLoop - primary initializer and factory method //-------------------------------------------------------------------------- -IOCommandPool * +OSSharedPtr IOCommandPool:: withWorkLoop(IOWorkLoop *inWorkLoop) { - IOCommandPool * me = new IOCommandPool; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithWorkLoop(inWorkLoop)) { - me->release(); - return NULL; + return nullptr; } return me; @@ -87,7 +89,7 @@ initWithWorkLoop(IOWorkLoop *inWorkLoop) return false; } - if (kIOReturnSuccess != inWorkLoop->addEventSource(fSerializer)) { + if (kIOReturnSuccess != inWorkLoop->addEventSource(fSerializer.get())) { return false; } @@ -98,15 +100,14 @@ initWithWorkLoop(IOWorkLoop *inWorkLoop) // commandPool & init - obsolete initializer and factory method //-------------------------------------------------------------------------- -IOCommandPool * +OSSharedPtr IOCommandPool:: commandPool(IOService * inOwner, IOWorkLoop *inWorkLoop, UInt32 inSize) { - IOCommandPool * me = new IOCommandPool; + OSSharedPtr me = OSMakeShared(); if (me && !me->init(inOwner, inWorkLoop, inSize)) { - me->release(); - return NULL; + return nullptr; } return me; @@ -131,11 +132,10 @@ IOCommandPool::free(void) // remove our event source from owner's workloop IOWorkLoop *wl = fSerializer->getWorkLoop(); if (wl) { - wl->removeEventSource(fSerializer); + wl->removeEventSource(fSerializer.get()); } - fSerializer->release(); - fSerializer = NULL; + fSerializer.reset(); } // Tell our superclass to cleanup too @@ -149,7 +149,7 @@ IOCommandPool::free(void) // waiting for resources //-------------------------------------------------------------------------- -IOCommand * +OSSharedPtr IOCommandPool::getCommand(bool blockForCommand) { IOReturn result = kIOReturnSuccess; @@ -160,7 +160,7 @@ IOCommandPool::getCommand(bool blockForCommand) result = fSerializer-> runAction(func, (void *) &command, (void *) blockForCommand); if (kIOReturnSuccess == result) { - return command; + return OSSharedPtr(command, OSNoRetain); } else { return NULL; } diff --git a/iokit/Kernel/IOCommandQueue.cpp b/iokit/Kernel/IOCommandQueue.cpp index 2623d063d..9278db935 100644 --- a/iokit/Kernel/IOCommandQueue.cpp +++ b/iokit/Kernel/IOCommandQueue.cpp @@ -26,12 +26,15 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #if !defined(__LP64__) #include #include #include #include +#include #include @@ -111,16 +114,16 @@ IOCommandQueue::init(OSObject *inOwner, return true; } -IOCommandQueue * +OSSharedPtr IOCommandQueue::commandQueue(OSObject *inOwner, IOCommandQueueAction inAction, int inSize) { - IOCommandQueue *me = new IOCommandQueue; + OSSharedPtr me = OSMakeShared(); if (me && !me->init(inOwner, inAction, inSize)) { - me->free(); - return NULL; + me.reset(); + return nullptr; } return me; diff --git a/iokit/Kernel/IODMACommand.cpp b/iokit/Kernel/IODMACommand.cpp index 24047b542..484656a0d 100644 --- a/iokit/Kernel/IODMACommand.cpp +++ b/iokit/Kernel/IODMACommand.cpp @@ -26,6 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include @@ -79,19 +81,23 @@ enum{ #define DEBG(fmt, args...) {} #endif +#if 0 +#define LOGTAG 0x87654321 +#endif + /**************************** class IODMACommand ***************************/ #undef super #define super IOCommand -OSDefineMetaClassAndStructors(IODMACommand, IOCommand); - -OSMetaClassDefineReservedUsed(IODMACommand, 0); -OSMetaClassDefineReservedUsed(IODMACommand, 1); -OSMetaClassDefineReservedUsed(IODMACommand, 2); -OSMetaClassDefineReservedUsed(IODMACommand, 3); -OSMetaClassDefineReservedUsed(IODMACommand, 4); -OSMetaClassDefineReservedUsed(IODMACommand, 5); -OSMetaClassDefineReservedUsed(IODMACommand, 6); +OSDefineMetaClassAndStructorsWithZone(IODMACommand, IOCommand, ZC_NONE); + +OSMetaClassDefineReservedUsedX86(IODMACommand, 0); +OSMetaClassDefineReservedUsedX86(IODMACommand, 1); +OSMetaClassDefineReservedUsedX86(IODMACommand, 2); +OSMetaClassDefineReservedUsedX86(IODMACommand, 3); +OSMetaClassDefineReservedUsedX86(IODMACommand, 4); +OSMetaClassDefineReservedUsedX86(IODMACommand, 5); +OSMetaClassDefineReservedUsedX86(IODMACommand, 6); OSMetaClassDefineReservedUnused(IODMACommand, 7); OSMetaClassDefineReservedUnused(IODMACommand, 8); OSMetaClassDefineReservedUnused(IODMACommand, 9); @@ -102,38 +108,37 @@ OSMetaClassDefineReservedUnused(IODMACommand, 13); OSMetaClassDefineReservedUnused(IODMACommand, 14); OSMetaClassDefineReservedUnused(IODMACommand, 15); -IODMACommand * + +OSSharedPtr IODMACommand::withRefCon(void * refCon) { - IODMACommand * me = new IODMACommand; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithRefCon(refCon)) { - me->release(); - return NULL; + return nullptr; } return me; } -IODMACommand * +OSSharedPtr IODMACommand::withSpecification(SegmentFunction outSegFunc, const SegmentOptions * segmentOptions, uint32_t mappingOptions, IOMapper * mapper, void * refCon) { - IODMACommand * me = new IODMACommand; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions, mapper, refCon)) { - me->release(); - return NULL; + return nullptr; } return me; } -IODMACommand * +OSSharedPtr IODMACommand::withSpecification(SegmentFunction outSegFunc, UInt8 numAddressBits, UInt64 maxSegmentSize, @@ -143,20 +148,19 @@ IODMACommand::withSpecification(SegmentFunction outSegFunc, IOMapper *mapper, void *refCon) { - IODMACommand * me = new IODMACommand; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithSpecification(outSegFunc, numAddressBits, maxSegmentSize, mappingOptions, maxTransferSize, alignment, mapper, refCon)) { - me->release(); - return NULL; + return nullptr; } return me; } -IODMACommand * +OSSharedPtr IODMACommand::cloneCommand(void *refCon) { SegmentOptions segmentOptions = @@ -171,7 +175,7 @@ IODMACommand::cloneCommand(void *refCon) }; return IODMACommand::withSpecification(fOutSeg, &segmentOptions, - fMappingOptions, fMapper, refCon); + fMappingOptions, fMapper.get(), refCon); } #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction) @@ -331,13 +335,7 @@ IODMACommand::setSpecification(SegmentFunction outSegFunc, ; if (mapper != fMapper) { - if (mapper) { - mapper->retain(); - } - if (fMapper) { - fMapper->release(); - } - fMapper = mapper; + fMapper.reset(mapper, OSRetain); } fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions)); @@ -353,9 +351,35 @@ IODMACommand::free() IODelete(reserved, IODMACommandInternal, 1); } - if (fMapper) { - fMapper->release(); - } + fMapper.reset(); + + // Correct use of this class when setting an IOMemoryDescriptor + // in fMemory via setMemoryDescriptor(desc) is, for the caller, to + // have a matching call to clearMemoryDescriptor() before releasing + // the object. The matching call has also the effect of releasing + // the ref taken on the IOMemoryDescriptor in setMemoryDescriptor(). + // + // A number of "misbehaving" drivers has been found during testing, + // whereby a matching call to clearMemoryDescriptor() is missing: + // + // rdar://59947343 + // rdar://59946968 + // + // Both the approaches taken in said drivers are wrong, but have gone + // basically silent with fMemory being a regular pointer. With fMemory + // becoming a OSSharedPtr, the IODMACommand destructor expects to find + // either fMemory reset (through the call to clearMemoryDescriptor()) or + // a reference hold for the release. + // + // For this reason, this workaround of detaching fMemory is put in + // place here, choosing the leak over the panic for misbehaving + // drivers. Once all instances are fixed, this workaround will be + // removed. + // + // Note: all well behaving drivers that have matching calls for + // setMemoryDescriptor() and clearMemoryDescriptor() are unaffected + // since fMemory will be null at this point. + fMemory.detach(); super::free(); } @@ -402,8 +426,7 @@ IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepar } fInternalState->fNewMD = true; - mem->retain(); - fMemory = mem; + fMemory.reset(const_cast(mem), OSRetain); fInternalState->fSetActiveNoMapper = (!fMapper); if (fInternalState->fSetActiveNoMapper) { mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0); @@ -433,8 +456,7 @@ IODMACommand::clearMemoryDescriptor(bool autoComplete) if (fInternalState->fSetActiveNoMapper) { fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0); } - fMemory->release(); - fMemory = NULL; + fMemory.reset(); } return kIOReturnSuccess; @@ -443,20 +465,20 @@ IODMACommand::clearMemoryDescriptor(bool autoComplete) const IOMemoryDescriptor * IODMACommand::getMemoryDescriptor() const { - return fMemory; + return fMemory.get(); } IOMemoryDescriptor * IODMACommand::getIOMemoryDescriptor() const { - IOMemoryDescriptor * mem; + OSSharedPtr mem; mem = reserved->fCopyMD; if (!mem) { - mem = __IODEQUALIFY(IOMemoryDescriptor *, fMemory); + mem = fMemory; } - return mem; + return mem.get(); } IOReturn @@ -467,7 +489,7 @@ IODMACommand::segmentOp( void *segments, UInt32 segmentIndex) { - IOOptionBits op = (uintptr_t) reference; + IOOptionBits op = (IOOptionBits)(uintptr_t) reference; addr64_t maxPhys, address; uint64_t length; uint32_t numPages; @@ -521,7 +543,11 @@ IODMACommand::segmentOp( return kIOReturnSuccess; } - numPages = atop_64(round_page_64((address & PAGE_MASK) + length)); + uint64_t numPages64 = atop_64(round_page_64((address & PAGE_MASK) + length)); + if (numPages64 > UINT_MAX) { + return kIOReturnVMError; + } + numPages = (typeof(numPages))numPages64; if (kWalkPreflight & op) { state->fCopyPageCount += numPages; @@ -557,16 +583,19 @@ IODMACommand::segmentOp( if (chunk > length) { chunk = length; } + if (chunk > (UINT_MAX - PAGE_SIZE + 1)) { + chunk = (UINT_MAX - PAGE_SIZE + 1); + } DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr, (kWalkSyncIn & op) ? "->" : "<-", address, chunk, op); if (kWalkSyncIn & op) { // cppvNoModSnk - copypv(remapAddr, cpuAddr, chunk, + copypv(remapAddr, cpuAddr, (unsigned int) chunk, cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); } else { - copypv(cpuAddr, remapAddr, chunk, + copypv(cpuAddr, remapAddr, (unsigned int) chunk, cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); } address += chunk; @@ -581,7 +610,7 @@ IODMACommand::segmentOp( return kIOReturnSuccess; } -IOBufferMemoryDescriptor * +OSSharedPtr IODMACommand::createCopyBuffer(IODirection direction, UInt64 length) { mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask @@ -590,7 +619,7 @@ IODMACommand::createCopyBuffer(IODirection direction, UInt64 length) } IOReturn -IODMACommand::walkAll(UInt8 op) +IODMACommand::walkAll(uint32_t op) { IODMACommandInternal * state = fInternalState; @@ -619,7 +648,7 @@ IODMACommand::walkAll(UInt8 op) state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer); state->fForceDoubleBuffer = false; if (state->fDoubleBuffer) { - state->fCopyPageCount = atop_64(round_page(state->fPreparedLength)); + state->fCopyPageCount = (typeof(state->fCopyPageCount))(atop_64(round_page(state->fPreparedLength))); } if (state->fCopyPageCount) { @@ -678,7 +707,7 @@ IODMACommand::walkAll(UInt8 op) DEBG("sync IOBMD\n"); if (SHOULD_COPY_DIR(op, fMDSummary.fDirection)) { - IOMemoryDescriptor *poMD = const_cast(fMemory); + OSSharedPtr poMD = fMemory; IOByteCount bytes; @@ -707,8 +736,7 @@ IODMACommand::walkAll(UInt8 op) state->fCopyPageCount = 0; } if (state->fCopyMD) { - state->fCopyMD->release(); - state->fCopyMD = NULL; + state->fCopyMD.reset(); } state->fPrepared = false; @@ -719,7 +747,7 @@ IODMACommand::walkAll(UInt8 op) UInt8 IODMACommand::getNumAddressBits(void) { - return fNumAddressBits; + return (UInt8) fNumAddressBits; } UInt32 @@ -825,10 +853,12 @@ IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchr return kIOReturnNotAligned; } + if (atop_64(state->fPreparedLength) > UINT_MAX) { + return kIOReturnVMError; + } state->fPreparedOffset = offset; state->fPreparedLength = length; - state->fMapContig = false; state->fMisaligned = false; state->fDoubleBuffer = false; state->fPrepared = false; @@ -863,38 +893,148 @@ IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchr if (state->fCopyMD) { state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length); } else { - IOMemoryDescriptor * md = const_cast(fMemory); - md->performOperation(kIOMemoryIncoherentIOStore, offset, length); + fMemory->performOperation(kIOMemoryIncoherentIOStore, offset, length); } } if (fMapper) { IOMDDMAMapArgs mapArgs; bzero(&mapArgs, sizeof(mapArgs)); - mapArgs.fMapper = fMapper; + mapArgs.fMapper = fMapper.get(); mapArgs.fCommand = this; mapArgs.fMapSpec.device = state->fDevice; mapArgs.fMapSpec.alignment = fAlignMask + 1; - mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64; + mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? ((UInt8) fNumAddressBits) : 64; mapArgs.fLength = state->fPreparedLength; - const IOMemoryDescriptor * md = state->fCopyMD; + OSSharedPtr md = state->fCopyMD; if (md) { mapArgs.fOffset = 0; } else { md = fMemory; mapArgs.fOffset = state->fPreparedOffset; } - ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs)); -//IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength); + ret = md->dmaCommandOperation(kIOMDDMAMap, &mapArgs, sizeof(mapArgs)); + + if ((kIOReturnSuccess == ret) + && mapArgs.fAllocLength + && (mapArgs.fAllocLength != mapArgs.fLength)) { + do { + // multisegment case + IOMDDMAWalkSegmentState walkState; + IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState; + IOOptionBits mdOp; + uint64_t index; + IOPhysicalLength segLen; + uint32_t segCount; + uint64_t phys, align; + uint64_t mapperPageMask; + uint64_t mapperPageShift; + uint64_t insertOffset; + uint32_t mapOptions; + uint64_t length; + + assert(mapArgs.fAllocLength > mapArgs.fLength); + + mapperPageMask = fMapper->getPageSize(); + assert(mapperPageMask); + mapperPageMask -= 1; + mapperPageShift = (64 - __builtin_clzll(mapperPageMask)); + walkArgs->fMapped = false; + length = state->fPreparedLength; + mdOp = kIOMDFirstSegment; + segCount = 0; + for (index = 0; index < length; segCount++) { + walkArgs->fOffset = state->fPreparedOffset + index; + + ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState)); + mdOp = kIOMDWalkSegments; + assert(kIOReturnSuccess == ret); + if (ret != kIOReturnSuccess) { + panic("dmaCommandOperation"); + } + segLen = walkArgs->fLength; + index += segLen; + } + if (ret != kIOReturnSuccess) { + break; + } + +#if defined(LOGTAG) + if (LOGTAG == fMemory->getTag()) { + IOLog("DMA[%p] alloc 0x%qx, 0x%qx\n", this, mapArgs.fAlloc, mapArgs.fAllocLength); + } +#endif /* defined(LOGTAG) */ + + state->fMapSegments = IONewZero(IODMACommandMapSegment, segCount); + if (!state->fMapSegments) { + ret = kIOReturnNoMemory; + break; + } + state->fMapSegmentsCount = segCount; + + switch (kIODirectionOutIn & fMDSummary.fDirection) { + case kIODirectionOut: + mapOptions = kIODMAMapReadAccess; + break; + case kIODirectionIn: + mapOptions = kIODMAMapWriteAccess; + break; + default: + mapOptions = kIODMAMapReadAccess | kIODMAMapWriteAccess; + break; + } + + mdOp = kIOMDFirstSegment; + segCount = 0; + for (insertOffset = 0, index = 0; index < length; segCount++) { + walkArgs->fOffset = state->fPreparedOffset + index; + ret = md->dmaCommandOperation(mdOp, &walkState, sizeof(walkState)); + mdOp = kIOMDWalkSegments; + if (ret != kIOReturnSuccess) { + panic("dmaCommandOperation 0x%x", ret); + } + phys = walkArgs->fIOVMAddr; + segLen = walkArgs->fLength; + +#if defined(LOGTAG) + if (LOGTAG == fMemory->getTag()) { + IOLog("DMA[%p] phys[%d] 0x%qx, 0x%qx\n", this, segCount, (uint64_t) phys, (uint64_t) segLen); + } +#endif /* defined(LOGTAG) */ + + align = (phys & mapperPageMask); + +#if defined(LOGTAG) + if (LOGTAG == fMemory->getTag()) { + IOLog("DMA[%p] runs[%d] dmaoff 0x%qx, mapoff 0x%qx, align 0x%qx\n", this, segCount, index, insertOffset, align); + } +#endif /* defined(LOGTAG) */ + + assert(segCount < state->fMapSegmentsCount); + state->fMapSegments[segCount].fDMAOffset = state->fPreparedOffset + index; + state->fMapSegments[segCount].fMapOffset = insertOffset; + state->fMapSegments[segCount].fPageOffset = align; + index += segLen; + + // segment page align + segLen = ((phys + segLen + mapperPageMask) & ~mapperPageMask); + phys -= align; + segLen -= phys; + insertOffset += segLen; + } + state->fLocalMapperAllocBase = (mapArgs.fAlloc & ~mapperPageMask); +#if defined(LOGTAG) + if (LOGTAG == fMemory->getTag()) { + IOLog("IODMACommand fMapSegmentsCount %d\n", state->fMapSegmentsCount); + } +#endif /* defined(LOGTAG) */ + } while (false); + } if (kIOReturnSuccess == ret) { state->fLocalMapperAlloc = mapArgs.fAlloc; state->fLocalMapperAllocValid = true; state->fLocalMapperAllocLength = mapArgs.fAllocLength; - state->fMapContig = mapArgs.fMapContig; - } - if (NULL != IOMapper::gSystem) { - ret = kIOReturnSuccess; } } if (kIOReturnSuccess == ret) { @@ -909,7 +1049,7 @@ IODMACommand::complete(bool invalidateCache, bool synchronize) { IODMACommandInternal * state = fInternalState; IOReturn ret = kIOReturnSuccess; - IOMemoryDescriptor * copyMD; + OSSharedPtr copyMD; if (fActive < 1) { return kIOReturnNotReady; @@ -917,15 +1057,12 @@ IODMACommand::complete(bool invalidateCache, bool synchronize) if (!--fActive) { copyMD = state->fCopyMD; - if (copyMD) { - copyMD->retain(); - } if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) { if (copyMD) { copyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength); } else { - IOMemoryDescriptor * md = const_cast(fMemory); + OSSharedPtr md = fMemory; md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength); } } @@ -941,11 +1078,11 @@ IODMACommand::complete(bool invalidateCache, bool synchronize) if (state->fLocalMapperAllocValid) { IOMDDMAMapArgs mapArgs; bzero(&mapArgs, sizeof(mapArgs)); - mapArgs.fMapper = fMapper; + mapArgs.fMapper = fMapper.get(); mapArgs.fCommand = this; mapArgs.fAlloc = state->fLocalMapperAlloc; mapArgs.fAllocLength = state->fLocalMapperAllocLength; - const IOMemoryDescriptor * md = copyMD; + OSSharedPtr md = copyMD; if (md) { mapArgs.fOffset = 0; } else { @@ -958,10 +1095,13 @@ IODMACommand::complete(bool invalidateCache, bool synchronize) state->fLocalMapperAlloc = 0; state->fLocalMapperAllocValid = false; state->fLocalMapperAllocLength = 0; + if (state->fMapSegments) { + IODelete(state->fMapSegments, IODMACommandMapSegment, state->fMapSegmentsCount); + state->fMapSegments = NULL; + state->fMapSegmentsCount = 0; + } } - if (copyMD) { - copyMD->release(); - } + state->fPrepared = false; } @@ -1060,14 +1200,17 @@ IODMACommand::transferSegment(void *reference, copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1))); ioAddr += copyLen; } + if (copyLen > (UINT_MAX - PAGE_SIZE + 1)) { + copyLen = (UINT_MAX - PAGE_SIZE + 1); + } switch (context->op) { case kIODMACommandTransferOpReadBytes: - copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen, + copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, (unsigned int) copyLen, cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); break; case kIODMACommandTransferOpWriteBytes: - copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen, + copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, (unsigned int) copyLen, cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); break; } @@ -1163,14 +1306,13 @@ IODMACommand::genIOVMSegments(uint32_t op, mdOp = kIOMDFirstSegment; if (fMapper) { if (internalState->fLocalMapperAllocValid) { - state->fMapped = kIOMDDMAWalkMappedLocal; + state->fMapped = true; state->fMappedBase = internalState->fLocalMapperAlloc; } else { - state->fMapped = true; + state->fMapped = false; } } } - ; UInt32 segIndex = 0; UInt32 numSegments = *numSegmentsP; @@ -1193,31 +1335,107 @@ IODMACommand::genIOVMSegments(uint32_t op, state->fOffset = offset; state->fLength = memLength - offset; - if (internalState->fMapContig && internalState->fLocalMapperAllocValid) { - state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset; - rtn = kIOReturnSuccess; -#if 0 - { - uint64_t checkOffset; - IOPhysicalLength segLen; - for (checkOffset = 0; checkOffset < state->fLength;) { - addr64_t phys = const_cast(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone); - if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys) { - panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset, - state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength, - phys, checkOffset); - } - checkOffset += page_size - (phys & page_mask); + bool done = false; + bool check = false; + + if (internalState->fLocalMapperAllocValid) { + if (!internalState->fMapSegmentsCount) { + state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset; + rtn = kIOReturnSuccess; + done = true; + check = true; + } else { + uint64_t address; + uint64_t length; + uint64_t runOffset; + uint64_t ind; + uint64_t off2Ind = internalState->fOffset2Index; + + // Validate the previous offset + if (offset + && (offset == internalState->fNextOffset || off2Ind <= offset)) { + ind = internalState->fIndex; + } else { + ind = off2Ind = 0; // Start from beginning } +#if defined(LOGTAG) + if (LOGTAG == fMemory->getTag()) { + IOLog("DMA[%p] offsets 0x%qx, 0x%qx, 0x%qx ind %qd\n", this, offset, internalState->fPreparedOffset, internalState->fNextOffset, ind); + } +#endif /* defined(LOGTAG) */ + + // Scan through iopl info blocks looking for block containing offset + while (ind < internalState->fMapSegmentsCount && offset >= internalState->fMapSegments[ind].fDMAOffset) { + ind++; + } + if (ind < internalState->fMapSegmentsCount) { + length = internalState->fMapSegments[ind].fDMAOffset; + } else { + length = memLength; + } + length -= offset; // Remainder within iopl + + // Go back to actual range as search goes past it + ind--; + off2Ind = internalState->fMapSegments[ind].fDMAOffset; + + // Subtract offset till this iopl in total list + runOffset = offset - off2Ind; + + // Compute an offset relative to the mapped base + + runOffset += internalState->fMapSegments[ind].fPageOffset; + address = internalState->fLocalMapperAllocBase + internalState->fMapSegments[ind].fMapOffset + runOffset; +#if defined(LOGTAG) + if (LOGTAG == fMemory->getTag()) { + IOLog("DMA[%p] addrlen 0x%qx, 0x%qx\n", this, address, length); + } +#endif /* defined(LOGTAG) */ + + state->fIOVMAddr = address; + state->fLength = length; + + internalState->fIndex = ind; + internalState->fOffset2Index = off2Ind; + internalState->fNextOffset = state->fOffset + length; + + rtn = kIOReturnSuccess; + done = true; + check = true; } -#endif - } else { - const IOMemoryDescriptor * memory = - internalState->fCopyMD ? internalState->fCopyMD : fMemory; + } + + if (!done) { + IOMemoryDescriptor * memory = + internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get(); rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState)); mdOp = kIOMDWalkSegments; } - +#if 0 + if (check + && !ml_at_interrupt_context() + && (rtn == kIOReturnSuccess) + && fMapper + && strcmp("AppleNVMeMMU", fMapper->getName())) { + uint64_t checkOffset; + IOPhysicalLength segLen; + IOMemoryDescriptor * memory = + internalState->fCopyMD ? internalState->fCopyMD.get() : fMemory.get(); + for (checkOffset = 0; checkOffset < state->fLength;) { + addr64_t phys = memory->getPhysicalSegment(offset + checkOffset, &segLen, kIOMemoryMapperNone); + addr64_t mapperPhys; + + mapperPhys = fMapper->mapToPhysicalAddress(state->fIOVMAddr + checkOffset); + mapperPhys |= (phys & (fMapper->getPageSize() - 1)); + if (mapperPhys != phys) { + panic("DMA[%p] mismatch at offset %llx + %llx, dma %llx mapperPhys %llx != %llx, len %llx\n", + this, offset, checkOffset, + state->fIOVMAddr + checkOffset, mapperPhys, phys, state->fLength); + } + checkOffset += page_size - (phys & page_mask); + } + } +#endif if (rtn == kIOReturnSuccess) { internalState->fIOVMAddrValid = true; assert(state->fLength); @@ -1264,7 +1482,7 @@ IODMACommand::genIOVMSegments(uint32_t op, offset -= remain; } else { UInt64 addr = curSeg.fIOVMAddr; - ppnum_t addrPage = atop_64(addr); + ppnum_t addrPage = (ppnum_t) atop_64(addr); vm_page_t remap = NULL; UInt64 remain, newLength; @@ -1374,7 +1592,11 @@ IODMACommand::genIOVMSegments(uint32_t op, if ((segIndex + 1 == numSegments)) { break; } - +#if defined(LOGTAG) + if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) { + IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n", this, curSeg.fIOVMAddr, curSeg.fLength); + } +#endif /* defined(LOGTAG) */ ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); curSegValid = curSeg.fIOVMAddr = 0; if (kIOReturnSuccess != ret) { @@ -1384,6 +1606,11 @@ IODMACommand::genIOVMSegments(uint32_t op, } if (curSegValid) { +#if defined(LOGTAG) + if ((LOGTAG == fMemory->getTag()) && (kWalkClient == op)) { + IOLog("DMA[%p] outseg 0x%qx, 0x%qx\n", this, curSeg.fIOVMAddr, curSeg.fLength); + } +#endif /* defined(LOGTAG) */ ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); } diff --git a/iokit/Kernel/IODMAController.cpp b/iokit/Kernel/IODMAController.cpp index 8650d6189..50ee85a01 100644 --- a/iokit/Kernel/IODMAController.cpp +++ b/iokit/Kernel/IODMAController.cpp @@ -26,13 +26,16 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #include +#include #define super IOService OSDefineMetaClassAndAbstractStructors(IODMAController, IOService); -const OSSymbol * +OSSharedPtr IODMAController::createControllerName(UInt32 phandle) { #define CREATE_BUF_LEN 48 @@ -47,11 +50,12 @@ IODMAController * IODMAController::getController(IOService *provider, UInt32 dmaIndex) { OSData *dmaParentData; - const OSSymbol *dmaParentName; + OSSharedPtr dmaParentName; IODMAController *dmaController; // Find the name of the parent dma controller - dmaParentData = OSDynamicCast(OSData, provider->getProperty("dma-parent")); + OSSharedPtr prop = provider->copyProperty("dma-parent"); + dmaParentData = OSDynamicCast(OSData, prop.get()); if (dmaParentData == NULL) { return NULL; } @@ -69,7 +73,7 @@ IODMAController::getController(IOService *provider, UInt32 dmaIndex) } // Wait for the parent dma controller - dmaController = OSDynamicCast(IODMAController, IOService::waitForService(IOService::nameMatching(dmaParentName))); + dmaController = OSDynamicCast(IODMAController, IOService::waitForService( IOService::nameMatching(dmaParentName.get()).detach())); return dmaController; } @@ -95,11 +99,12 @@ IODMAController::registerDMAController(IOOptionBits options) { OSData *phandleData; - phandleData = OSDynamicCast(OSData, _provider->getProperty("AAPL,phandle")); + OSSharedPtr prop = _provider->copyProperty("AAPL,phandle"); + phandleData = OSDynamicCast(OSData, prop.get()); _dmaControllerName = createControllerName(*(UInt32 *)phandleData->getBytesNoCopy()); - setName(_dmaControllerName); + setName(_dmaControllerName.get()); registerService(options | ((options & kIOServiceAsynchronous) ? 0 : kIOServiceSynchronous)); } diff --git a/iokit/Kernel/IODMAEventSource.cpp b/iokit/Kernel/IODMAEventSource.cpp index dd4d186f0..e41d7cb35 100644 --- a/iokit/Kernel/IODMAEventSource.cpp +++ b/iokit/Kernel/IODMAEventSource.cpp @@ -26,6 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include @@ -57,11 +59,10 @@ IODMAEventSource::init(OSObject *inOwner, dmaCompletionAction = inCompletion; dmaNotificationAction = inNotification; - dmaController = IODMAController::getController(dmaProvider, inDMAIndex); + dmaController.reset(IODMAController::getController(dmaProvider, inDMAIndex), OSRetain); if (dmaController == NULL) { return false; } - dmaController->retain(); result = dmaController->initDMAChannel(dmaProvider, this, &dmaIndex, inDMAIndex); if (result != kIOReturnSuccess) { @@ -83,18 +84,17 @@ IODMAEventSource::free() super::free(); } -IODMAEventSource * +OSSharedPtr IODMAEventSource::dmaEventSource(OSObject *inOwner, IOService *inProvider, Action inCompletion, Action inNotification, UInt32 inDMAIndex) { - IODMAEventSource *dmaES = new IODMAEventSource; + OSSharedPtr dmaES = OSMakeShared(); if (dmaES && !dmaES->init(inOwner, inProvider, inCompletion, inNotification, inDMAIndex)) { - dmaES->release(); - return NULL; + return nullptr; } return dmaES; diff --git a/iokit/Kernel/IODataQueue.cpp b/iokit/Kernel/IODataQueue.cpp index dde414b6a..35d6e09c0 100644 --- a/iokit/Kernel/IODataQueue.cpp +++ b/iokit/Kernel/IODataQueue.cpp @@ -26,6 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #define DISABLE_DATAQUEUE_WARNING #include @@ -36,6 +38,7 @@ #include #include #include +#include struct IODataQueueInternal { mach_msg_header_t msg; @@ -54,29 +57,28 @@ struct IODataQueueInternal { OSDefineMetaClassAndStructors(IODataQueue, OSObject) -IODataQueue *IODataQueue::withCapacity(UInt32 size) +OSSharedPtr +IODataQueue::withCapacity(UInt32 size) { - IODataQueue *dataQueue = new IODataQueue; + OSSharedPtr dataQueue = OSMakeShared(); if (dataQueue) { if (!dataQueue->initWithCapacity(size)) { - dataQueue->release(); - dataQueue = NULL; + return nullptr; } } return dataQueue; } -IODataQueue * +OSSharedPtr IODataQueue::withEntries(UInt32 numEntries, UInt32 entrySize) { - IODataQueue *dataQueue = new IODataQueue; + OSSharedPtr dataQueue = OSMakeShared(); if (dataQueue) { if (!dataQueue->initWithEntries(numEntries, entrySize)) { - dataQueue->release(); - dataQueue = NULL; + return nullptr; } } @@ -288,10 +290,10 @@ IODataQueue::sendDataAvailableNotification() } } -IOMemoryDescriptor * +OSSharedPtr IODataQueue::getMemoryDescriptor() { - IOMemoryDescriptor *descriptor = NULL; + OSSharedPtr descriptor; UInt32 queueSize; queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize; diff --git a/iokit/Kernel/IODeviceTreeSupport.cpp b/iokit/Kernel/IODeviceTreeSupport.cpp index 1077c7920..f0553da60 100644 --- a/iokit/Kernel/IODeviceTreeSupport.cpp +++ b/iokit/Kernel/IODeviceTreeSupport.cpp @@ -28,6 +28,7 @@ #include #include +#include #include #include #include @@ -66,6 +67,7 @@ const OSSymbol * gIODTUnitKey; const OSSymbol * gIODTCompatibleKey; const OSSymbol * gIODTTypeKey; const OSSymbol * gIODTModelKey; +const OSSymbol * gIODTBridgeModelKey; const OSSymbol * gIODTTargetTypeKey; const OSSymbol * gIODTSizeCellKey; @@ -90,6 +92,11 @@ static void AddPHandle( IORegistryEntry * regEntry ); static void FreePhysicalMemory( vm_offset_t * range ); static bool IODTMapInterruptsSharing( IORegistryEntry * regEntry, OSDictionary * allInts ); +// FIXME: Implementation of this function is hidden from the static analyzer. +// The analyzer doesn't know that the registry holds retains, and gets confused +// about releases after calls to 'attachToParent'. +// Feel free to remove the #ifndef and address the warning! +#ifndef __clang_analyzer__ IORegistryEntry * IODeviceTreeAlloc( void * dtTop ) { @@ -105,6 +112,7 @@ IODeviceTreeAlloc( void * dtTop ) vm_offset_t * dtMap; unsigned int propSize; bool intMap; + bool foundDTNode; bool freeDT; gIODTPlane = IORegistryEntry::makePlane( kIODeviceTreePlane ); @@ -114,6 +122,7 @@ IODeviceTreeAlloc( void * dtTop ) gIODTCompatibleKey = OSSymbol::withCStringNoCopy( "compatible" ); gIODTTypeKey = OSSymbol::withCStringNoCopy( "device_type" ); gIODTModelKey = OSSymbol::withCStringNoCopy( "model" ); + gIODTBridgeModelKey = OSSymbol::withCStringNoCopy( "bridge-model" ); gIODTTargetTypeKey = OSSymbol::withCStringNoCopy( "target-type" ); gIODTSizeCellKey = OSSymbol::withCStringNoCopy( "#size-cells" ); gIODTAddressCellKey = OSSymbol::withCStringNoCopy( "#address-cells" ); @@ -154,28 +163,30 @@ IODeviceTreeAlloc( void * dtTop ) && gIODTInterruptCellKey ); - freeDT = (kSuccess == DTLookupEntry( NULL, "/chosen/memory-map", &mapEntry )) - && (kSuccess == DTGetProperty( mapEntry, - "DeviceTree", (void **) &dtMap, &propSize )) + foundDTNode = (kSuccess == SecureDTLookupEntry( NULL, "/chosen/memory-map", &mapEntry )) + && (kSuccess == SecureDTGetProperty( mapEntry, + "DeviceTree", (void const **) &dtMap, &propSize )) && ((2 * sizeof(uint32_t)) == propSize); + freeDT = foundDTNode && !SecureDTIsLockedDown(); + parent = MakeReferenceTable((DTEntry)dtTop, freeDT ); stack = OSArray::withObjects((const OSObject **) &parent, 1, 10 ); - DTInitEntryIterator((DTEntry)dtTop, &iter ); + SecureDTInitEntryIterator((DTEntry)dtTop, &iter ); do { parent = (IORegistryEntry *)stack->getObject( stack->getCount() - 1); //parent->release(); stack->removeObject( stack->getCount() - 1); - while (kSuccess == DTIterateEntries( &iter, &dtChild)) { + while (kSuccess == SecureDTIterateEntries( &iter, &dtChild)) { child = MakeReferenceTable( dtChild, freeDT ); child->attachToParent( parent, gIODTPlane); AddPHandle( child ); - if (kSuccess == DTEnterEntry( &iter, dtChild)) { + if (kSuccess == SecureDTEnterEntry( &iter, dtChild)) { stack->setObject( parent); parent = child; } @@ -183,10 +194,10 @@ IODeviceTreeAlloc( void * dtTop ) child->release(); } } while (stack->getCount() - && (kSuccess == DTExitEntry( &iter, &dtChild))); + && (kSuccess == SecureDTExitEntry( &iter, &dtChild))); stack->release(); - assert(kSuccess != DTExitEntry(&iter, &dtChild)); + assert(kSuccess != SecureDTExitEntry(&iter, &dtChild)); // parent is now root of the created tree @@ -202,7 +213,7 @@ IODeviceTreeAlloc( void * dtTop ) if (freeDT) { // free original device tree - DTInit(NULL); + SecureDTInit(NULL, 0); IODTFreeLoaderInfo( "DeviceTree", (void *)dtMap[0], (int) round_page(dtMap[1])); } @@ -263,6 +274,7 @@ IODeviceTreeAlloc( void * dtTop ) return parent; } +#endif int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infoSize ) @@ -370,9 +382,9 @@ MakeReferenceTable( DTEntry dtEntry, bool copy ) OSData *data; const OSSymbol *sym; OpaqueDTPropertyIterator dtIter; - void *prop; + void const *prop; unsigned int propSize; - char *name; + char const *name; char location[32]; bool noLocation = true; bool kernelOnly; @@ -385,12 +397,12 @@ MakeReferenceTable( DTEntry dtEntry, bool copy ) } if (regEntry && - (kSuccess == DTInitPropertyIterator( dtEntry, &dtIter))) { - kernelOnly = (kSuccess == DTGetProperty(dtEntry, "kernel-only", &prop, &propSize)); + (kSuccess == SecureDTInitPropertyIterator( dtEntry, &dtIter))) { + kernelOnly = (kSuccess == SecureDTGetProperty(dtEntry, "kernel-only", &prop, &propSize)); propTable = regEntry->getPropertyTable(); - while (kSuccess == DTIterateProperties( &dtIter, &name)) { - if (kSuccess != DTGetProperty( dtEntry, name, &prop, &propSize )) { + while (kSuccess == SecureDTIterateProperties( &dtIter, &name)) { + if (kSuccess != SecureDTGetProperty( dtEntry, name, &prop, &propSize )) { continue; } @@ -399,7 +411,10 @@ MakeReferenceTable( DTEntry dtEntry, bool copy ) data = OSData::withBytes(prop, propSize); } else { nameKey = OSSymbol::withCStringNoCopy(name); - data = OSData::withBytesNoCopy(prop, propSize); + /* There is no OSDataConst or other way to indicate + * that the OSData is actually immutable. But CTRR + * will catch any write attempts. */ + data = OSData::withBytesNoCopy((void**)(uintptr_t)prop, propSize); } assert( nameKey && data ); @@ -885,7 +900,7 @@ CompareKey( OSString * key, do { // for each name in the property - nlen = strnlen(names, lastName - names); + nlen = (unsigned int) strnlen(names, lastName - names); if (wild) { matched = ((nlen >= (keyLen - 1)) && (0 == strncmp(ckey, names, keyLen - 1))); } else { @@ -926,6 +941,16 @@ IODTCompareNubName( const IORegistryEntry * regEntry, return matched; } +bool +IODTCompareNubName( const IORegistryEntry * regEntry, + OSString * name, OSSharedPtr& matchingName ) +{ + OSString* matchingNameRaw = NULL; + bool result = IODTCompareNubName(regEntry, name, &matchingNameRaw); + matchingName.reset(matchingNameRaw, OSNoRetain); + return result; +} + bool IODTMatchNubWithKeys( IORegistryEntry * regEntry, const char * keys ) @@ -1354,7 +1379,7 @@ IODTFindSlotName( IORegistryEntry * regEntry, UInt32 deviceNumber ) OSData *ret = NULL; UInt32 *bits; UInt32 i; - size_t nlen; + UInt32 nlen; char *names; char *lastName; UInt32 mask; @@ -1389,7 +1414,7 @@ IODTFindSlotName( IORegistryEntry * regEntry, UInt32 deviceNumber ) for (i = 0; (i <= deviceNumber) && (names < lastName); i++) { if (mask & (1 << i)) { - nlen = 1 + strnlen(names, lastName - names); + nlen = 1 + ((unsigned int) strnlen(names, lastName - names)); if (i == deviceNumber) { data = OSData::withBytesNoCopy(names, nlen); if (data) { diff --git a/iokit/Kernel/IOEventSource.cpp b/iokit/Kernel/IOEventSource.cpp index 3415fd34a..527921130 100644 --- a/iokit/Kernel/IOEventSource.cpp +++ b/iokit/Kernel/IOEventSource.cpp @@ -32,6 +32,9 @@ * 1998-7-13 Godfrey van der Linden(gvdl) * Created. * ]*/ + +#define IOKIT_ENABLE_SHARED_PTR + #include #include @@ -230,6 +233,7 @@ IOEventSource::setAction(Action inAction) Block_release(actionBlock); } action = inAction; + flags &= ~kActionBlock; } void diff --git a/iokit/Kernel/IOFilterInterruptEventSource.cpp b/iokit/Kernel/IOFilterInterruptEventSource.cpp index 5e3371a10..a5d36fe37 100644 --- a/iokit/Kernel/IOFilterInterruptEventSource.cpp +++ b/iokit/Kernel/IOFilterInterruptEventSource.cpp @@ -26,6 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include #include @@ -78,7 +80,7 @@ IOFilterInterruptEventSource::init(OSObject *inOwner, return false; } -IOInterruptEventSource * +OSSharedPtr IOFilterInterruptEventSource::interruptEventSource(OSObject *inOwner, Action inAction, IOService *inProvider, @@ -109,7 +111,7 @@ IOFilterInterruptEventSource::init(OSObject *inOwner, return true; } -IOFilterInterruptEventSource * +OSSharedPtr IOFilterInterruptEventSource ::filterInterruptEventSource(OSObject *inOwner, Action inAction, @@ -117,19 +119,18 @@ IOFilterInterruptEventSource IOService *inProvider, int inIntIndex) { - IOFilterInterruptEventSource *me = new IOFilterInterruptEventSource; + OSSharedPtr me = OSMakeShared(); if (me && !me->init(inOwner, inAction, inFilterAction, inProvider, inIntIndex)) { - me->release(); - return NULL; + return nullptr; } return me; } -IOFilterInterruptEventSource * +OSSharedPtr IOFilterInterruptEventSource ::filterInterruptEventSource(OSObject *inOwner, IOService *inProvider, @@ -137,19 +138,17 @@ IOFilterInterruptEventSource ActionBlock inAction, FilterBlock inFilterAction) { - IOFilterInterruptEventSource *me = new IOFilterInterruptEventSource; + OSSharedPtr me = OSMakeShared(); FilterBlock filter = Block_copy(inFilterAction); if (!filter) { - OSSafeReleaseNULL(me); - return NULL; + return nullptr; } if (me && !me->init(inOwner, (Action) NULL, (Filter) filter, inProvider, inIntIndex)) { - me->release(); Block_release(filter); - return NULL; + return nullptr; } me->flags |= kFilterBlock; me->setActionBlock((IOEventSource::ActionBlock) inAction); diff --git a/iokit/Kernel/IOHibernateIO.cpp b/iokit/Kernel/IOHibernateIO.cpp index 3b696609a..bb6e1e85c 100644 --- a/iokit/Kernel/IOHibernateIO.cpp +++ b/iokit/Kernel/IOHibernateIO.cpp @@ -166,17 +166,25 @@ #include #include #include "IOHibernateInternal.h" -#include #include #include "IOKitKernelInternal.h" #include #include #include +#if defined(__i386__) || defined(__x86_64__) #include #include +#include +#elif defined(__arm64__) +#include +#endif /* defined(__i386__) || defined(__x86_64__) */ #include +#if HIBERNATE_HMAC_IMAGE +#include +#endif /* HIBERNATE_HMAC_IMAGE */ + extern "C" addr64_t kvtophys(vm_offset_t va); extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); @@ -190,6 +198,7 @@ extern uint32_t gIOHibernateState; uint32_t gIOHibernateMode; static char gIOHibernateBootSignature[256 + 1]; static char gIOHibernateFilename[MAXPATHLEN + 1]; +uint32_t gIOHibernateCount; static uuid_string_t gIOHibernateBridgeBootSessionUUIDString; @@ -249,6 +258,7 @@ enum { kVideoMapSize = 80 * 1024 * 1024 }; // copy from phys addr to MD +#if !HIBERNATE_HMAC_IMAGE static IOReturn IOMemoryDescriptorWriteFromPhysical(IOMemoryDescriptor * md, IOByteCount offset, addr64_t bytes, IOByteCount length) @@ -286,6 +296,7 @@ IOMemoryDescriptorWriteFromPhysical(IOMemoryDescriptor * md, return remaining ? kIOReturnUnderrun : kIOReturnSuccess; } +#endif /* !HIBERNATE_HMAC_IMAGE */ // copy from MD to phys addr @@ -334,26 +345,31 @@ hibernate_set_page_state(hibernate_page_list_t * page_list, hibernate_page_list_ vm_offset_t ppnum, vm_offset_t count, uint32_t kind) { count += ppnum; + + if (count > UINT_MAX) { + panic("hibernate_set_page_state ppnum"); + } + switch (kind) { case kIOHibernatePageStateUnwiredSave: // unwired save for (; ppnum < count; ppnum++) { - hibernate_page_bitset(page_list, FALSE, ppnum); - hibernate_page_bitset(page_list_wired, TRUE, ppnum); + hibernate_page_bitset(page_list, FALSE, (uint32_t) ppnum); + hibernate_page_bitset(page_list_wired, TRUE, (uint32_t) ppnum); } break; case kIOHibernatePageStateWiredSave: // wired save for (; ppnum < count; ppnum++) { - hibernate_page_bitset(page_list, FALSE, ppnum); - hibernate_page_bitset(page_list_wired, FALSE, ppnum); + hibernate_page_bitset(page_list, FALSE, (uint32_t) ppnum); + hibernate_page_bitset(page_list_wired, FALSE, (uint32_t) ppnum); } break; case kIOHibernatePageStateFree: // free page for (; ppnum < count; ppnum++) { - hibernate_page_bitset(page_list, TRUE, ppnum); - hibernate_page_bitset(page_list_wired, TRUE, ppnum); + hibernate_page_bitset(page_list, TRUE, (uint32_t) ppnum); + hibernate_page_bitset(page_list_wired, TRUE, (uint32_t) ppnum); } break; default: @@ -361,10 +377,31 @@ hibernate_set_page_state(hibernate_page_list_t * page_list, hibernate_page_list_ } } +static void +hibernate_set_descriptor_page_state(IOHibernateVars *vars, + IOMemoryDescriptor *descriptor, + uint32_t kind, + uint32_t *pageCount) +{ + IOItemCount count; + addr64_t phys64; + IOByteCount segLen; + if (descriptor) { + for (count = 0; + (phys64 = descriptor->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); + count += segLen) { + hibernate_set_page_state(vars->page_list, vars->page_list_wired, + atop_64(phys64), atop_32(segLen), + kind); + *pageCount -= atop_32(segLen); + } + } +} + static vm_offset_t -hibernate_page_list_iterate(hibernate_page_list_t * list, vm_offset_t * pPage) +hibernate_page_list_iterate(hibernate_page_list_t * list, ppnum_t * pPage) { - uint32_t page = *pPage; + uint32_t page = ((typeof(page)) * pPage); uint32_t count; hibernate_bitmap_t * bitmap; @@ -457,7 +494,7 @@ IOHibernateSystemSleep(void) swapPinned = false; do{ vars->srcBuffer = IOBufferMemoryDescriptor::withOptions(kIODirectionOutIn, - 2 * page_size + WKdm_SCRATCH_BUF_SIZE_INTERNAL, page_size); + HIBERNATION_SRC_BUFFER_SIZE, page_size); vars->handoffBuffer = IOBufferMemoryDescriptor::withOptions(kIODirectionOutIn, ptoa_64(gIOHibernateHandoffPageCount), page_size); @@ -594,6 +631,10 @@ IOHibernateSystemSleep(void) gIOHibernateCurrentHeader->options |= kIOHibernateOptionProgress; } +#if HIBERNATE_HMAC_IMAGE + // inform HMAC driver that we're going to hibernate + ppl_hmac_hibernate_begin(); +#endif /* HIBERNATE_HMAC_IMAGE */ #if defined(__i386__) || defined(__x86_64__) if (vars->volumeCryptKeySize && @@ -621,10 +662,10 @@ IOHibernateSystemSleep(void) // generate crypt keys for (uint32_t i = 0; i < sizeof(vars->wiredCryptKey); i++) { - vars->wiredCryptKey[i] = random(); + vars->wiredCryptKey[i] = ((uint8_t) random()); } for (uint32_t i = 0; i < sizeof(vars->cryptKey); i++) { - vars->cryptKey[i] = random(); + vars->cryptKey[i] = ((uint8_t) random()); } // set nvram @@ -673,7 +714,7 @@ IOHibernateSystemSleep(void) } continue; } - value = (value << 4) | c; + value = ((uint8_t) ((value << 4) | c)); if (digits & 1) { rtcVars.booterSignature[out++] = value; if (out >= sizeof(rtcVars.booterSignature)) { @@ -709,7 +750,7 @@ IOHibernateSystemSleep(void) if (data && data->getLength() >= 4) { fileData = OSDynamicCast(OSData, gIOChosenEntry->getProperty("boot-file-path")); } - if (data) { + if (data && (data->getLength() <= UINT16_MAX)) { // AppleNVRAM_EFI_LOAD_OPTION struct { uint32_t Attributes; @@ -717,7 +758,7 @@ IOHibernateSystemSleep(void) uint16_t Desc; } loadOptionHeader; loadOptionHeader.Attributes = 1; - loadOptionHeader.FilePathLength = data->getLength(); + loadOptionHeader.FilePathLength = ((uint16_t) data->getLength()); loadOptionHeader.Desc = 0; if (fileData) { loadOptionHeader.FilePathLength -= 4; @@ -773,6 +814,7 @@ IOHibernateSystemSleep(void) gFileVars.allocated = false; gIOHibernateVars.fileVars = &gFileVars; gIOHibernateCurrentHeader->signature = kIOHibernateHeaderSignature; + gIOHibernateCurrentHeader->kernVirtSlide = vm_kernel_slide; gIOHibernateState = kIOHibernateStateHibernating; #if DEBUG || DEVELOPMENT @@ -831,7 +873,14 @@ IOSetBootImageNVRAM(OSData * data) #endif /* DEBUG || DEVELOPMENT */ } else { gIOOptionsEntry->removeProperty(gIOHibernateBootImageKey); +#if __x86_64__ gIOOptionsEntry->sync(); +#else + if (gIOHibernateState == kIOHibernateStateWakingFromHibernate) { + // if we woke from hibernation, the booter may have changed the state of NVRAM, so force a sync + gIOOptionsEntry->sync(); + } +#endif } } } @@ -899,8 +948,8 @@ ProgressInit(hibernate_graphics_t * display, uint8_t * screen, uint8_t * saveund uint32_t rowBytes, pixelShift; uint32_t x, y; int32_t blob; - uint32_t alpha, in, color, result; - uint8_t * out; + uint32_t alpha, color, result; + uint8_t * out, in; uint32_t saveindex[kIOHibernateProgressCount] = { 0 }; rowBytes = display->rowBytes; @@ -924,7 +973,7 @@ ProgressInit(hibernate_graphics_t * display, uint8_t * screen, uint8_t * saveund if (0xff != alpha) { if (1 == pixelShift) { in = *((uint16_t *)out) & 0x1f; // 16 - in = (in << 3) | (in >> 2); + in = ((uint8_t)(in << 3)) | ((uint8_t)(in >> 2)); } else { in = *((uint32_t *)out) & 0xff; // 32 } @@ -933,7 +982,7 @@ ProgressInit(hibernate_graphics_t * display, uint8_t * screen, uint8_t * saveund } if (1 == pixelShift) { result >>= 3; - *((uint16_t *)out) = (result << 10) | (result << 5) | result; // 16 + *((uint16_t *)out) = ((uint16_t)((result << 10) | (result << 5) | result)); // 16 } else { *((uint32_t *)out) = (result << 16) | (result << 8) | result; // 32 } @@ -985,7 +1034,7 @@ ProgressUpdate(hibernate_graphics_t * display, uint8_t * screen, int32_t firstBl } if (1 == pixelShift) { result >>= 3; - *((uint16_t *)out) = (result << 10) | (result << 5) | result; // 16 + *((uint16_t *)out) = ((uint16_t)((result << 10) | (result << 5) | result)); // 16 } else { *((uint32_t *)out) = (result << 16) | (result << 8) | result; // 32 } @@ -1039,6 +1088,9 @@ IOHibernateSystemHasSlept(void) if (obj && !vars->previewBuffer) { obj->release(); } + if (vars->previewBuffer && (vars->previewBuffer->getLength() > UINT_MAX)) { + OSSafeReleaseNULL(vars->previewBuffer); + } vars->consoleMapping = NULL; if (vars->previewBuffer && (kIOReturnSuccess != vars->previewBuffer->prepare())) { @@ -1061,10 +1113,10 @@ IOHibernateSystemHasSlept(void) IOService::getPlatform()->getConsoleInfo(&consoleInfo); - graphicsInfo->width = consoleInfo.v_width; - graphicsInfo->height = consoleInfo.v_height; - graphicsInfo->rowBytes = consoleInfo.v_rowBytes; - graphicsInfo->depth = consoleInfo.v_depth; + graphicsInfo->width = (uint32_t) consoleInfo.v_width; + graphicsInfo->height = (uint32_t) consoleInfo.v_height; + graphicsInfo->rowBytes = (uint32_t) consoleInfo.v_rowBytes; + graphicsInfo->depth = (uint32_t) consoleInfo.v_depth; vars->consoleMapping = (uint8_t *) consoleInfo.v_baseAddr; HIBPRINT("video %p %d %d %d\n", @@ -1078,7 +1130,13 @@ IOHibernateSystemHasSlept(void) } if (gIOOptionsEntry) { +#if __x86_64__ gIOOptionsEntry->sync(); +#else + if (gIOHibernateMode) { + gIOOptionsEntry->sync(); + } +#endif } return ret; @@ -1086,8 +1144,9 @@ IOHibernateSystemHasSlept(void) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +#if defined(__i386__) || defined(__x86_64__) static DeviceTreeNode * -MergeDeviceTree(DeviceTreeNode * entry, IORegistryEntry * regEntry) +MergeDeviceTree(DeviceTreeNode * entry, IORegistryEntry * regEntry, vm_offset_t region_start, vm_size_t region_size) { DeviceTreeNodeProperty * prop; DeviceTreeNode * child; @@ -1106,15 +1165,18 @@ MergeDeviceTree(DeviceTreeNode * entry, IORegistryEntry * regEntry) child = (DeviceTreeNode *) prop; for (idx = 0; idx < entry->nChildren; idx++) { - if (kSuccess != DTGetProperty(child, "name", (void **) &nameProp, &propLen)) { + if (kSuccess != SecureDTGetPropertyRegion(child, "name", (void const **) &nameProp, &propLen, + region_start, region_size)) { panic("no name"); } childRegEntry = regEntry ? regEntry->childFromPath(nameProp, gIODTPlane) : NULL; // HIBPRINT("%s == %p\n", nameProp, childRegEntry); - child = MergeDeviceTree(child, childRegEntry); + child = MergeDeviceTree(child, childRegEntry, region_start, region_size); } return child; } +#endif + IOReturn IOHibernateSystemWake(void) @@ -1126,6 +1188,15 @@ IOHibernateSystemWake(void) IOService::getPMRootDomain()->removeProperty(kIOHibernateOptionsKey); IOService::getPMRootDomain()->removeProperty(kIOHibernateGfxStatusKey); } + + if (gIOOptionsEntry && gIOHibernateBootImageKey) { + // if we got this far, clear boot-image + // we don't need to sync immediately; the booter should have already removed this entry + // we just want to make sure that if anyone syncs nvram after this point, we don't re-write + // a stale boot-image value + gIOOptionsEntry->removeProperty(gIOHibernateBootImageKey); + } + return kIOReturnSuccess; } @@ -1204,6 +1275,12 @@ IOHibernateDone(IOHibernateVars * vars) if (vars->srcBuffer) { vars->srcBuffer->release(); } + +#if HIBERNATE_HMAC_IMAGE + // inform HMAC driver that we're done hibernating + ppl_hmac_hibernate_end(); +#endif /* HIBERNATE_HMAC_IMAGE */ + bzero(&gIOHibernateHandoffPages[0], gIOHibernateHandoffPageCount * sizeof(gIOHibernateHandoffPages[0])); if (vars->handoffBuffer) { if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) { @@ -1213,14 +1290,20 @@ IOHibernateDone(IOHibernateVars * vars) !done; handoff = (IOHibernateHandoff *) &handoff->data[handoff->bytecount]) { HIBPRINT("handoff %p, %x, %x\n", handoff, handoff->type, handoff->bytecount); - uint8_t * data = &handoff->data[0]; + uint8_t * __unused data = &handoff->data[0]; switch (handoff->type) { case kIOHibernateHandoffTypeEnd: done = true; break; case kIOHibernateHandoffTypeDeviceTree: - MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot()); +#if defined(__i386__) || defined(__x86_64__) + MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot(), + (vm_offset_t)data, (vm_size_t)handoff->bytecount); +#else + // On ARM, the device tree is confined to its region covered by CTRR, so effectively immutable. + panic("kIOHibernateHandoffTypeDeviceTree not supported on this platform."); +#endif break; case kIOHibernateHandoffTypeKeyStore: @@ -1271,6 +1354,7 @@ IOHibernateDone(IOHibernateVars * vars) bzero(vars, sizeof(*vars)); // gIOHibernateState = kIOHibernateStateInactive; // leave it for post wake code to see + gIOHibernateCount++; return kIOReturnSuccess; } @@ -1375,6 +1459,51 @@ SYSCTL_UINT(_kern, OID_AUTO, hibernatehidready, CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_ANYBODY, &_hibernateStats.hidReadyTime, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, hibernatecount, + CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_ANYBODY, + &gIOHibernateCount, 0, ""); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static int +hibernate_set_preview SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + + if (!IOTaskHasEntitlement(current_task(), kIOHibernateSetPreviewEntitlementKey)) { + return EPERM; + } + + if ((req->newptr == USER_ADDR_NULL) || (!req->newlen)) { + IOService::getPMRootDomain()->removeProperty(kIOHibernatePreviewBufferKey); + return 0; + } + + size_t rounded_size = round_page(req->newlen); + IOBufferMemoryDescriptor *md = IOBufferMemoryDescriptor::withOptions(kIODirectionOutIn, rounded_size, page_size); + if (!md) { + return ENOMEM; + } + + uint8_t *bytes = (uint8_t *)md->getBytesNoCopy(); + int error = SYSCTL_IN(req, bytes, req->newlen); + if (error) { + md->release(); + return error; + } + + IOService::getPMRootDomain()->setProperty(kIOHibernatePreviewBufferKey, md); + md->release(); + + return 0; +} + +SYSCTL_PROC(_kern, OID_AUTO, hibernatepreview, + CTLTYPE_OPAQUE | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, NULL, 0, + hibernate_set_preview, "S", ""); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + void IOHibernateSystemInit(IOPMrootDomain * rootDomain) { @@ -1409,6 +1538,7 @@ IOHibernateSystemInit(IOPMrootDomain * rootDomain) sysctl_register_oid(&sysctl__kern_hibernatewakenotification); sysctl_register_oid(&sysctl__kern_hibernatelockscreenready); sysctl_register_oid(&sysctl__kern_hibernatehidready); + sysctl_register_oid(&sysctl__kern_hibernatecount); gIOChosenEntry = IORegistryEntry::fromPath("/chosen", gIODTPlane); @@ -1420,22 +1550,51 @@ IOHibernateSystemInit(IOPMrootDomain * rootDomain) } gFSLock = IOLockAlloc(); + gIOHibernateCount = 0; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static IOReturn -IOHibernatePolledFileWrite(IOPolledFileIOVars * vars, +IOHibernatePolledFileWrite(IOHibernateVars * vars, const uint8_t * bytes, IOByteCount size, IOPolledFileCryptVars * cryptvars) { IOReturn err; - err = IOPolledFileWrite(vars, bytes, size, cryptvars); +#if HIBERNATE_HMAC_IMAGE + uint64_t originalPosition = 0; + if (!bytes && !size) { + originalPosition = vars->fileVars->position; + } +#endif /* HIBERNATE_HMAC_IMAGE */ + + err = IOPolledFileWrite(vars->fileVars, bytes, size, cryptvars); if ((kIOReturnSuccess == err) && hibernate_should_abort()) { err = kIOReturnAborted; } +#if HIBERNATE_HMAC_IMAGE + if ((kIOReturnSuccess == err) && (vars->imageShaCtx)) { + if (!bytes && !size) { + // determine how many bytes were written + size = vars->fileVars->position - originalPosition; + } + if (bytes) { + SHA256_Update(vars->imageShaCtx, bytes, size); + } else { + // update with zeroes + uint8_t zeroes[512] = {}; + size_t len = size; + while (len) { + IOByteCount toHash = min(len, sizeof(zeroes)); + SHA256_Update(vars->imageShaCtx, zeroes, toHash); + len -= toHash; + } + } + } +#endif /* HIBERNATE_HMAC_IMAGE */ + return err; } @@ -1448,12 +1607,14 @@ hibernate_write_image(void) IOHibernateVars * vars = &gIOHibernateVars; IOPolledFileExtent * fileExtents; +#if !defined(__arm64__) _static_assert_1_arg(sizeof(IOHibernateImageHeader) == 512); +#endif /* !defined(__arm64__) */ uint32_t pageCount, pagesDone; IOReturn err; - vm_offset_t ppnum, page; - IOItemCount count; + ppnum_t ppnum, page; + vm_offset_t count; uint8_t * src; uint8_t * data; uint8_t * compressed; @@ -1463,22 +1624,24 @@ hibernate_write_image(void) uint64_t image1Size = 0; uint32_t bitmap_size; bool iterDone, pollerOpen, needEncrypt; - uint32_t restore1Sum, sum, sum1, sum2; int wkresult; uint32_t tag; uint32_t pageType; uint32_t pageAndCount[2]; addr64_t phys64; IOByteCount segLen; +#if !HIBERNATE_HMAC_IMAGE + uint32_t restore1Sum = 0, sum = 0, sum1 = 0, sum2 = 0; uintptr_t hibernateBase; uintptr_t hibernateEnd; +#endif /* HIBERNATE_HMAC_IMAGE */ AbsoluteTime startTime, endTime; AbsoluteTime allTime, compTime; uint64_t compBytes; uint64_t nsec; - uint32_t lastProgressStamp = 0; - uint32_t progressStamp; + uint64_t lastProgressStamp = 0; + uint64_t progressStamp; uint32_t blob, lastBlob = (uint32_t) -1L; uint32_t wiredPagesEncrypted; @@ -1502,15 +1665,25 @@ hibernate_write_image(void) return kIOHibernatePostWriteSleep; } +#if HIBERNATE_HMAC_IMAGE + // set up SHA and HMAC context to hash image1 (wired pages) + SHA256_CTX imageShaCtx; + vars->imageShaCtx = &imageShaCtx; + SHA256_Init(vars->imageShaCtx); + ppl_hmac_reset(true); +#endif /* HIBERNATE_HMAC_IMAGE */ + +#if !defined(__arm64__) if (kIOHibernateModeSleep & gIOHibernateMode) { kdebug_enable = save_kdebug_enable; } +#endif /* !defined(__arm64__) */ + + pal_hib_write_hook(); KDBG(IOKDBG_CODE(DBG_HIBERNATE, 1) | DBG_FUNC_START); IOService::getPMRootDomain()->tracePoint(kIOPMTracePointHibernate); - restore1Sum = sum1 = sum2 = 0; - #if CRYPTO // encryption data. "iv" is the "initial vector". if (kIOHibernateModeEncrypt & gIOHibernateMode) { @@ -1605,33 +1778,93 @@ hibernate_write_image(void) count = vars->fileVars->fileExtents->getLength(); if (count > sizeof(header->fileExtentMap)) { count -= sizeof(header->fileExtentMap); - err = IOHibernatePolledFileWrite(vars->fileVars, + err = IOHibernatePolledFileWrite(vars, ((uint8_t *) &fileExtents[0]) + sizeof(header->fileExtentMap), count, cryptvars); if (kIOReturnSuccess != err) { break; } } - hibernateBase = HIB_BASE; /* Defined in PAL headers */ - hibernateEnd = (segHIBB + segSizeHIB); - // copy out restore1 code for (count = 0; (phys64 = vars->handoffBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); count += segLen) { for (pagesDone = 0; pagesDone < atop_32(segLen); pagesDone++) { - gIOHibernateHandoffPages[atop_32(count) + pagesDone] = atop_64(phys64) + pagesDone; + gIOHibernateHandoffPages[atop_32(count) + pagesDone] = atop_64_ppnum(phys64) + pagesDone; } } +#if HIBERNATE_HMAC_IMAGE + if (vars->fileVars->position > UINT32_MAX) { + err = kIOReturnNoSpace; + break; + } + header->segmentsFileOffset = (uint32_t)vars->fileVars->position; + + // fetch the IOHibernateHibSegInfo and the actual pages to write + // we use srcBuffer as scratch space + IOHibernateHibSegInfo *segInfo = &header->hibSegInfo; + void *segInfoScratch = vars->srcBuffer->getBytesNoCopy(); + + // This call also enables PMAP hibernation asserts which will prevent modification + // of PMAP data structures. This needs to occur before pages start getting written + // into the image. + ppl_hmac_fetch_hibseg_and_info(segInfoScratch, vars->srcBuffer->getCapacity(), segInfo); + + // write each segment to the file + size_t segInfoScratchPos = 0; + int hibSectIdx = -1; + uint32_t hibSegPageCount = 0; + for (int i = 0; i < NUM_HIBSEGINFO_SEGMENTS; i++) { + hibSegPageCount += segInfo->segments[i].pageCount; + size_t size = ptoa_64(segInfo->segments[i].pageCount); + if (size) { + err = IOHibernatePolledFileWrite(vars, + (uint8_t*)segInfoScratch + segInfoScratchPos, size, cryptvars); + if (kIOReturnSuccess != err) { + break; + } + segInfoScratchPos += size; + + // is this sectHIBTEXTB? + if (ptoa_64(segInfo->segments[i].physPage) == trunc_page(kvtophys(sectHIBTEXTB))) { + // remember which segment is sectHIBTEXTB because we'll need it later + hibSectIdx = i; + } + } + } + + if (hibSectIdx == -1) { + panic("couldn't find sectHIBTEXTB in segInfo"); + } + + // set the header fields corresponding to the HIB segments + header->restore1CodePhysPage = segInfo->segments[hibSectIdx].physPage; + header->restore1CodeVirt = trunc_page(sectHIBTEXTB); + header->restore1PageCount = hibSegPageCount; + header->restore1CodeOffset = (uint32_t)(((uintptr_t) &hibernate_machine_entrypoint) - header->restore1CodeVirt); + + // set restore1StackOffset to the physical page of the top of the stack to simplify the restore code + vm_offset_t stackFirstPage, stackPageSize; + pal_hib_get_stack_pages(&stackFirstPage, &stackPageSize); + header->restore1StackOffset = (uint32_t)(stackFirstPage + stackPageSize); +#else /* !HIBERNATE_HMAC_IMAGE */ + hibernateBase = HIB_BASE; /* Defined in PAL headers */ + hibernateEnd = (segHIBB + segSizeHIB); + page = atop_32(kvtophys(hibernateBase)); count = atop_32(round_page(hibernateEnd) - hibernateBase); - header->restore1CodePhysPage = page; + uintptr_t entrypoint = ((uintptr_t) &hibernate_machine_entrypoint) - hibernateBase; + uintptr_t stack = ((uintptr_t) &gIOHibernateRestoreStackEnd[0]) - 64 - hibernateBase; + if ((count > UINT_MAX) || (entrypoint > UINT_MAX) || (stack > UINT_MAX)) { + panic("malformed kernel layout"); + } + header->restore1CodePhysPage = (ppnum_t) page; header->restore1CodeVirt = hibernateBase; - header->restore1PageCount = count; - header->restore1CodeOffset = ((uintptr_t) &hibernate_machine_entrypoint) - hibernateBase; - header->restore1StackOffset = ((uintptr_t) &gIOHibernateRestoreStackEnd[0]) - 64 - hibernateBase; + header->restore1PageCount = (uint32_t) count; + header->restore1CodeOffset = (uint32_t) entrypoint; + header->restore1StackOffset = (uint32_t) stack; if (uuid_parse(&gIOHibernateBridgeBootSessionUUIDString[0], &header->bridgeBootSessionUUID[0])) { bzero(&header->bridgeBootSessionUUID[0], sizeof(header->bridgeBootSessionUUID)); @@ -1641,7 +1874,7 @@ hibernate_write_image(void) src = (uint8_t *) trunc_page(hibernateBase); for (page = 0; page < count; page++) { if ((src < &gIOHibernateRestoreStack[0]) || (src >= &gIOHibernateRestoreStackEnd[0])) { - restore1Sum += hibernate_sum_page(src, header->restore1CodeVirt + page); + restore1Sum += hibernate_sum_page(src, (uint32_t) (header->restore1CodeVirt + page)); } else { restore1Sum += 0x00000000; } @@ -1654,12 +1887,12 @@ hibernate_write_image(void) src = (uint8_t *) trunc_page(hibernateBase); count = ((uintptr_t) &gIOHibernateRestoreStack[0]) - trunc_page(hibernateBase); if (count) { - err = IOHibernatePolledFileWrite(vars->fileVars, src, count, cryptvars); + err = IOHibernatePolledFileWrite(vars, src, count, cryptvars); if (kIOReturnSuccess != err) { break; } } - err = IOHibernatePolledFileWrite(vars->fileVars, + err = IOHibernatePolledFileWrite(vars, (uint8_t *) NULL, &gIOHibernateRestoreStackEnd[0] - &gIOHibernateRestoreStack[0], cryptvars); @@ -1669,11 +1902,12 @@ hibernate_write_image(void) src = &gIOHibernateRestoreStackEnd[0]; count = round_page(hibernateEnd) - ((uintptr_t) src); if (count) { - err = IOHibernatePolledFileWrite(vars->fileVars, src, count, cryptvars); + err = IOHibernatePolledFileWrite(vars, src, count, cryptvars); if (kIOReturnSuccess != err) { break; } } +#endif /* !HIBERNATE_HMAC_IMAGE */ if (!vars->hwEncrypt && (kIOHibernateModeEncrypt & gIOHibernateMode)) { vars->fileVars->encryptStart = (vars->fileVars->position & ~(AES_BLOCK_SIZE - 1)); @@ -1688,9 +1922,9 @@ hibernate_write_image(void) count = 0; do{ phys64 = vars->previewBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone); - pageAndCount[0] = atop_64(phys64); - pageAndCount[1] = atop_32(segLen); - err = IOHibernatePolledFileWrite(vars->fileVars, + pageAndCount[0] = atop_64_ppnum(phys64); + pageAndCount[1] = atop_64_ppnum(segLen); + err = IOHibernatePolledFileWrite(vars, (const uint8_t *) &pageAndCount, sizeof(pageAndCount), cryptvars); if (kIOReturnSuccess != err) { @@ -1707,47 +1941,42 @@ hibernate_write_image(void) ((hibernate_preview_t *)src)->lockTime = gIOConsoleLockTime; - count = vars->previewBuffer->getLength(); + count = (uint32_t) vars->previewBuffer->getLength(); - header->previewPageListSize = ppnum; - header->previewSize = count + ppnum; + header->previewPageListSize = ((uint32_t) ppnum); + header->previewSize = ((uint32_t) (count + ppnum)); for (page = 0; page < count; page += page_size) { phys64 = vars->previewBuffer->getPhysicalSegment(page, NULL, kIOMemoryMapperNone); - sum1 += hibernate_sum_page(src + page, atop_64(phys64)); +#if HIBERNATE_HMAC_IMAGE + err = ppl_hmac_update_and_compress_page(atop_64_ppnum(phys64), NULL, NULL); + if (kIOReturnSuccess != err) { + break; + } +#else /* !HIBERNATE_HMAC_IMAGE */ + sum1 += hibernate_sum_page(src + page, atop_64_ppnum(phys64)); +#endif /* !HIBERNATE_HMAC_IMAGE */ + } + if (kIOReturnSuccess != err) { + break; } - err = IOHibernatePolledFileWrite(vars->fileVars, src, count, cryptvars); + err = IOHibernatePolledFileWrite(vars, src, count, cryptvars); if (kIOReturnSuccess != err) { break; } } // mark areas for no save - IOMemoryDescriptor * ioBuffer; - ioBuffer = IOPolledFileGetIOBuffer(vars->fileVars); - for (count = 0; - (phys64 = ioBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); - count += segLen) { - hibernate_set_page_state(vars->page_list, vars->page_list_wired, - atop_64(phys64), atop_32(segLen), - kIOHibernatePageStateFree); - pageCount -= atop_32(segLen); - } - - for (count = 0; - (phys64 = vars->srcBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); - count += segLen) { - hibernate_set_page_state(vars->page_list, vars->page_list_wired, - atop_64(phys64), atop_32(segLen), - kIOHibernatePageStateFree); - pageCount -= atop_32(segLen); - } + hibernate_set_descriptor_page_state(vars, IOPolledFileGetIOBuffer(vars->fileVars), + kIOHibernatePageStateFree, &pageCount); + hibernate_set_descriptor_page_state(vars, vars->srcBuffer, + kIOHibernatePageStateFree, &pageCount); // copy out bitmap of pages available for trashing during restore bitmap_size = vars->page_list_wired->list_size; src = (uint8_t *) vars->page_list_wired; - err = IOHibernatePolledFileWrite(vars->fileVars, src, bitmap_size, cryptvars); + err = IOHibernatePolledFileWrite(vars, src, bitmap_size, cryptvars); if (kIOReturnSuccess != err) { break; } @@ -1757,33 +1986,40 @@ hibernate_write_image(void) hibernate_page_list_set_volatile(vars->page_list, vars->page_list_wired, &pageCount); - +#if defined(__i386__) || defined(__x86_64__) + // __HIB is explicitly saved above so we don't have to save it again page = atop_32(KERNEL_IMAGE_TO_PHYS(hibernateBase)); count = atop_32(round_page(KERNEL_IMAGE_TO_PHYS(hibernateEnd))) - page; hibernate_set_page_state(vars->page_list, vars->page_list_wired, page, count, kIOHibernatePageStateFree); pageCount -= count; - - if (vars->previewBuffer) { - for (count = 0; - (phys64 = vars->previewBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); - count += segLen) { +#elif defined(__arm64__) + // the segments described in IOHibernateHibSegInfo are stored directly in the + // hibernation file, so they don't need to be saved again + extern unsigned long gPhysBase, gPhysSize; + for (size_t i = 0; i < NUM_HIBSEGINFO_SEGMENTS; i++) { + page = segInfo->segments[i].physPage; + count = segInfo->segments[i].pageCount; + uint64_t physAddr = ptoa_64(page); + uint64_t size = ptoa_64(count); + if (size && + (physAddr >= gPhysBase) && + (physAddr + size <= gPhysBase + gPhysSize)) { hibernate_set_page_state(vars->page_list, vars->page_list_wired, - atop_64(phys64), atop_32(segLen), + page, count, kIOHibernatePageStateFree); - pageCount -= atop_32(segLen); + pageCount -= count; } } +#else +#error unimplemented +#endif - for (count = 0; - (phys64 = vars->handoffBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); - count += segLen) { - hibernate_set_page_state(vars->page_list, vars->page_list_wired, - atop_64(phys64), atop_32(segLen), - kIOHibernatePageStateFree); - pageCount -= atop_32(segLen); - } + hibernate_set_descriptor_page_state(vars, vars->previewBuffer, + kIOHibernatePageStateFree, &pageCount); + hibernate_set_descriptor_page_state(vars, vars->handoffBuffer, + kIOHibernatePageStateFree, &pageCount); #if KASAN vm_size_t shadow_pages_free = atop_64(shadow_ptop) - atop_64(shadow_pnext); @@ -1806,6 +2042,13 @@ hibernate_write_image(void) bitmap_size, header->previewSize, pageCount, vars->fileVars->position); +#if HIBERNATE_HMAC_IMAGE + // we don't need to sign the page data into imageHeaderHMAC because it's + // already signed into image1PagesHMAC/image2PagesHMAC + vars->imageShaCtx = NULL; + header->imageHeaderHMACSize = (uint32_t)vars->fileVars->position; +#endif /* HIBERNATE_HMAC_IMAGE */ + enum // pageType { @@ -1816,7 +2059,11 @@ hibernate_write_image(void) kUnwiredEncrypt = kEncrypt }; +#if defined(__i386__) || defined(__x86_64__) bool cpuAES = (0 != (CPUID_FEATURE_AES & cpuid_features())); +#else /* defined(__i386__) || defined(__x86_64__) */ + static const bool cpuAES = true; +#endif /* defined(__i386__) || defined(__x86_64__) */ for (pageType = kWiredEncrypt; pageType >= kUnwiredEncrypt; pageType--) { if (kUnwiredEncrypt == pageType) { @@ -1837,6 +2084,9 @@ hibernate_write_image(void) } else { count = hibernate_page_list_iterate((kWired & pageType) ? vars->page_list_wired : vars->page_list, &ppnum); + if (count > UINT_MAX) { + count = UINT_MAX; + } } // kprintf("[%d](%x : %x)\n", pageType, ppnum, count); iterDone = !count; @@ -1846,7 +2096,7 @@ hibernate_write_image(void) uint32_t checkIndex; for (checkIndex = 0; (checkIndex < count) - && (((kEncrypt & pageType) == 0) == pmap_is_noencrypt(ppnum + checkIndex)); + && (((kEncrypt & pageType) == 0) == pmap_is_noencrypt(((ppnum_t)(ppnum + checkIndex)))); checkIndex++) { } if (!checkIndex) { @@ -1865,9 +2115,9 @@ hibernate_write_image(void) if (iterDone && (kWiredEncrypt == pageType)) {/* not yet end of wired list */ } else { - pageAndCount[0] = ppnum; - pageAndCount[1] = count; - err = IOHibernatePolledFileWrite(vars->fileVars, + pageAndCount[0] = (uint32_t) ppnum; + pageAndCount[1] = (uint32_t) count; + err = IOHibernatePolledFileWrite(vars, (const uint8_t *) &pageAndCount, sizeof(pageAndCount), cryptvars); if (kIOReturnSuccess != err) { @@ -1876,13 +2126,16 @@ hibernate_write_image(void) } for (page = ppnum; page < (ppnum + count); page++) { +#if HIBERNATE_HMAC_IMAGE + wkresult = ppl_hmac_update_and_compress_page(page, (void **)&src, compressed); +#else /* !HIBERNATE_HMAC_IMAGE */ err = IOMemoryDescriptorWriteFromPhysical(vars->srcBuffer, 0, ptoa_64(page), page_size); if (err) { HIBLOG("IOMemoryDescriptorWriteFromPhysical %d [%ld] %x\n", __LINE__, (long)page, err); break; } - sum = hibernate_sum_page(src, page); + sum = hibernate_sum_page(src, (uint32_t) page); if (kWired & pageType) { sum1 += sum; } else { @@ -1893,7 +2146,8 @@ hibernate_write_image(void) wkresult = WKdm_compress_new((const WK_word*) src, (WK_word*) compressed, (WK_word*) scratch, - page_size - 4); + (uint32_t) (page_size - 4)); +#endif /* !HIBERNATE_HMAC_IMAGE */ clock_get_uptime(&endTime); ADD_ABSOLUTETIME(&compTime, &endTime); @@ -1919,13 +2173,14 @@ hibernate_write_image(void) } } - tag = pageCompressedSize | kIOHibernateTagSignature; - err = IOHibernatePolledFileWrite(vars->fileVars, (const uint8_t *) &tag, sizeof(tag), cryptvars); + assert(pageCompressedSize <= page_size); + tag = ((uint32_t) pageCompressedSize) | kIOHibernateTagSignature; + err = IOHibernatePolledFileWrite(vars, (const uint8_t *) &tag, sizeof(tag), cryptvars); if (kIOReturnSuccess != err) { break; } - err = IOHibernatePolledFileWrite(vars->fileVars, data, (pageCompressedSize + 3) & ~3, cryptvars); + err = IOHibernatePolledFileWrite(vars, data, (pageCompressedSize + 3) & ~3, cryptvars); if (kIOReturnSuccess != err) { break; } @@ -1969,14 +2224,14 @@ hibernate_write_image(void) if (kWiredEncrypt != pageType) { // end of image1/2 - fill to next block - err = IOHibernatePolledFileWrite(vars->fileVars, NULL, 0, cryptvars); + err = IOHibernatePolledFileWrite(vars, NULL, 0, cryptvars); if (kIOReturnSuccess != err) { break; } } if (kWiredClear == pageType) { // enlarge wired image for test -// err = IOHibernatePolledFileWrite(vars->fileVars, 0, 0x60000000, cryptvars); +// err = IOHibernatePolledFileWrite(vars, 0, 0x60000000, cryptvars); // end wired image header->encryptStart = vars->fileVars->encryptStart; @@ -1984,6 +2239,12 @@ hibernate_write_image(void) image1Size = vars->fileVars->position; HIBLOG("image1Size 0x%qx, encryptStart1 0x%qx, End1 0x%qx\n", image1Size, header->encryptStart, header->encryptEnd); +#if HIBERNATE_HMAC_IMAGE + // compute the image1 HMAC + ppl_hmac_final(header->image1PagesHMAC, sizeof(header->image1PagesHMAC)); + // reset the PPL context so we can compute the image2 (non-wired pages) HMAC + ppl_hmac_reset(false); +#endif /* HIBERNATE_HMAC_IMAGE */ } } if (kIOReturnSuccess != err) { @@ -1997,6 +2258,11 @@ hibernate_write_image(void) break; } +#if HIBERNATE_HMAC_IMAGE + // compute the image2 HMAC + ppl_hmac_final(header->image2PagesHMAC, sizeof(header->image2PagesHMAC)); +#endif /* HIBERNATE_HMAC_IMAGE */ + // Header: header->imageSize = vars->fileVars->position; @@ -2004,17 +2270,19 @@ hibernate_write_image(void) header->bitmapSize = bitmap_size; header->pageCount = pageCount; +#if !HIBERNATE_HMAC_IMAGE header->restore1Sum = restore1Sum; header->image1Sum = sum1; header->image2Sum = sum2; +#endif /* !HIBERNATE_HMAC_IMAGE */ header->sleepTime = gIOLastSleepTime.tv_sec; - header->compression = (compressedSize << 8) / uncompressedSize; + header->compression = ((uint32_t)((compressedSize << 8) / uncompressedSize)); gIOHibernateCompression = header->compression; count = vars->fileVars->fileExtents->getLength(); if (count > sizeof(header->fileExtentMap)) { - header->fileExtentMapSize = count; + header->fileExtentMapSize = ((uint32_t) count); count = sizeof(header->fileExtentMap); } else { header->fileExtentMapSize = sizeof(header->fileExtentMap); @@ -2023,15 +2291,30 @@ hibernate_write_image(void) header->deviceBase = vars->fileVars->block0; header->deviceBlockSize = vars->fileVars->blockSize; + header->lastHibAbsTime = mach_absolute_time(); + header->lastHibContTime = mach_continuous_time(); + +#if HIBERNATE_HMAC_IMAGE + // include the headers in the SHA calculation + SHA256_Update(&imageShaCtx, header, sizeof(*header)); + + // finalize the image header SHA + uint8_t imageHash[CCSHA256_OUTPUT_SIZE]; + SHA256_Final(imageHash, &imageShaCtx); + + // compute the header HMAC + ppl_hmac_finalize_image(imageHash, sizeof(imageHash), header->imageHeaderHMAC, sizeof(header->imageHeaderHMAC)); +#endif /* HIBERNATE_HMAC_IMAGE */ IOPolledFileSeek(vars->fileVars, 0); - err = IOHibernatePolledFileWrite(vars->fileVars, + err = IOHibernatePolledFileWrite(vars, (uint8_t *) header, sizeof(IOHibernateImageHeader), cryptvars); if (kIOReturnSuccess != err) { break; } - err = IOHibernatePolledFileWrite(vars->fileVars, NULL, 0, cryptvars); + + err = IOHibernatePolledFileWrite(vars, NULL, 0, cryptvars); }while (false); clock_get_uptime(&endTime); @@ -2055,11 +2338,14 @@ hibernate_write_image(void) nsec / 1000000ULL, nsec ? (((vars->fileVars->cryptBytes * 1000000000ULL) / 1024 / 1024) / nsec) : 0); - HIBLOG("\nimage %qd (%lld%%), uncompressed %qd (%d), compressed %qd (%d%%), sum1 %x, sum2 %x\n", + HIBLOG("\nimage %qd (%lld%%), uncompressed %qd (%d), compressed %qd (%d%%)\n", header->imageSize, (header->imageSize * 100) / vars->fileVars->fileSize, uncompressedSize, atop_32(uncompressedSize), compressedSize, - uncompressedSize ? ((int) ((compressedSize * 100ULL) / uncompressedSize)) : 0, - sum1, sum2); + uncompressedSize ? ((int) ((compressedSize * 100ULL) / uncompressedSize)) : 0); + +#if !HIBERNATE_HMAC_IMAGE + HIBLOG("\nsum1 %x, sum2 %x\n", sum1, sum2); +#endif /* !HIBERNATE_HMAC_IMAGE */ HIBLOG("svPageCount %d, zvPageCount %d, wiredPagesEncrypted %d, wiredPagesClear %d, dirtyPagesEncrypted %d\n", svPageCount, zvPageCount, wiredPagesEncrypted, wiredPagesClear, dirtyPagesEncrypted); @@ -2081,6 +2367,15 @@ hibernate_write_image(void) KDBG(IOKDBG_CODE(DBG_HIBERNATE, 1) | DBG_FUNC_END, wiredPagesEncrypted, wiredPagesClear, dirtyPagesEncrypted); +#if defined(__arm64__) + if (kIOReturnSuccess == err) { + return kIOHibernatePostWriteHalt; + } else { + // on ARM, once ApplePMGR decides we're hibernating, we can't turn back + // see: Tonga ApplePMGR diff quiesce path support + panic("hibernate_write_image encountered error 0x%x", err); + } +#else if (kIOReturnSuccess == err) { if (kIOHibernateModeSleep & gIOHibernateMode) { return kIOHibernatePostWriteSleep; @@ -2096,6 +2391,7 @@ hibernate_write_image(void) /* on error, sleep */ return kIOHibernatePostWriteSleep; } +#endif } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -2112,8 +2408,8 @@ hibernate_machine_init(void) AbsoluteTime startIOTime, endIOTime; uint64_t nsec, nsecIO; uint64_t compBytes; - uint32_t lastProgressStamp = 0; - uint32_t progressStamp; + uint64_t lastProgressStamp = 0; + uint64_t progressStamp; IOPolledFileCryptVars * cryptvars = NULL; IOHibernateVars * vars = &gIOHibernateVars; @@ -2135,7 +2431,11 @@ hibernate_machine_init(void) gIOHibernateCurrentHeader->diag[0], gIOHibernateCurrentHeader->diag[1], gIOHibernateCurrentHeader->diag[2], gIOHibernateCurrentHeader->diag[3]); -#define t40ms(x) (tmrCvt((((uint64_t)(x)) << 8), tscFCvtt2n) / 1000000) +#if defined(__i386__) || defined(__x86_64__) +#define t40ms(x) ((uint32_t)((tmrCvt((((uint64_t)(x)) << 8), tscFCvtt2n) / 1000000))) +#else /* defined(__i386__) || defined(__x86_64__) */ +#define t40ms(x) x +#endif /* defined(__i386__) || defined(__x86_64__) */ #define tStat(x, y) gIOHibernateStats->x = t40ms(gIOHibernateCurrentHeader->y); tStat(booterStart, booterStart); gIOHibernateStats->smcStart = gIOHibernateCurrentHeader->smcStart; @@ -2176,7 +2476,13 @@ hibernate_machine_init(void) hibernate_page_list_discard(vars->page_list); } - cryptvars = (kIOHibernateModeEncrypt & gIOHibernateMode) ? &gIOHibernateCryptWakeContext : NULL; + if (vars->hwEncrypt) { + // if vars->hwEncrypt is true, we don't need cryptvars since we supply the + // decryption key via IOPolledFilePollersSetEncryptionKey + cryptvars = NULL; + } else { + cryptvars = (kIOHibernateModeEncrypt & gIOHibernateMode) ? &gIOHibernateCryptWakeContext : NULL; + } if (gIOHibernateCurrentHeader->handoffPageCount > gIOHibernateHandoffPageCount) { panic("handoff overflow"); @@ -2186,10 +2492,16 @@ hibernate_machine_init(void) bool done = false; bool foundCryptData = false; bool foundVolumeEncryptData = false; + const uint8_t * handoffStart = (const uint8_t*)vars->handoffBuffer->getBytesNoCopy(); + const uint8_t * handoffEnd = handoffStart + vars->handoffBuffer->getLength(); for (handoff = (IOHibernateHandoff *) vars->handoffBuffer->getBytesNoCopy(); !done; handoff = (IOHibernateHandoff *) &handoff->data[handoff->bytecount]) { + if (((uint8_t*)handoff < handoffStart) || + (&handoff->data[handoff->bytecount] > handoffEnd)) { + panic("handoff out of range"); + } // HIBPRINT("handoff %p, %x, %x\n", handoff, handoff->type, handoff->bytecount); uint8_t * data = &handoff->data[0]; switch (handoff->type) { @@ -2207,7 +2519,11 @@ hibernate_machine_init(void) if (cryptvars) { hibernate_cryptwakevars_t * wakevars = (hibernate_cryptwakevars_t *) &handoff->data[0]; - bcopy(&wakevars->aes_iv[0], &cryptvars->aes_iv[0], sizeof(cryptvars->aes_iv)); + if (handoff->bytecount == sizeof(*wakevars)) { + bcopy(&wakevars->aes_iv[0], &cryptvars->aes_iv[0], sizeof(cryptvars->aes_iv)); + } else { + panic("kIOHibernateHandoffTypeCryptVars(%d)", handoff->bytecount); + } } foundCryptData = true; bzero(data, handoff->bytecount); @@ -2222,6 +2538,7 @@ hibernate_machine_init(void) } break; +#if defined(__i386__) || defined(__x86_64__) case kIOHibernateHandoffTypeMemoryMap: clock_get_uptime(&allTime); @@ -2241,9 +2558,10 @@ hibernate_machine_init(void) case kIOHibernateHandoffTypeDeviceTree: { // DTEntry chosen = NULL; -// HIBPRINT("DTLookupEntry %d\n", DTLookupEntry((const DTEntry) data, "/chosen", &chosen)); +// HIBPRINT("SecureDTLookupEntry %d\n", SecureDTLookupEntry((const DTEntry) data, "/chosen", &chosen)); } break; +#endif /* defined(__i386__) || defined(__x86_64__) */ default: done = (kIOHibernateHandoffType != (handoff->type & 0xFFFF0000)); @@ -2317,11 +2635,19 @@ hibernate_machine_init(void) AbsoluteTime_to_scalar(&vars->fileVars->cryptTime) = 0; err = IOPolledFileRead(vars->fileVars, NULL, 0, cryptvars); + if (kIOReturnSuccess != err) { + panic("Hibernate restore error %x", err); + } vars->fileVars->bufferOffset = vars->fileVars->bufferLimit; // -- HIBLOG("hibernate_machine_init reading\n"); +#if HIBERNATE_HMAC_IMAGE + // Reset SHA context to verify image2 hash (non-wired pages). + ppl_hmac_reset(false); +#endif /* HIBERNATE_HMAC_IMAGE */ + uint32_t * header = (uint32_t *) src; sum = 0; @@ -2329,11 +2655,12 @@ hibernate_machine_init(void) unsigned int count; unsigned int page; uint32_t tag; - vm_offset_t ppnum, compressedSize; + vm_offset_t compressedSize; + ppnum_t ppnum; err = IOPolledFileRead(vars->fileVars, src, 8, cryptvars); if (kIOReturnSuccess != err) { - break; + panic("Hibernate restore error %x", err); } ppnum = header[0]; @@ -2348,22 +2675,22 @@ hibernate_machine_init(void) for (page = 0; page < count; page++) { err = IOPolledFileRead(vars->fileVars, (uint8_t *) &tag, 4, cryptvars); if (kIOReturnSuccess != err) { - break; + panic("Hibernate restore error %x", err); } compressedSize = kIOHibernateTagLength & tag; if (kIOHibernateTagSignature != (tag & ~kIOHibernateTagLength)) { err = kIOReturnIPCError; - break; + panic("Hibernate restore error %x", err); } err = IOPolledFileRead(vars->fileVars, src, (compressedSize + 3) & ~3, cryptvars); if (kIOReturnSuccess != err) { - break; + panic("Hibernate restore error %x", err); } if (compressedSize < page_size) { - decoOffset = page_size; + decoOffset = ((uint32_t) page_size); clock_get_uptime(&startTime); if (compressedSize == 4) { @@ -2377,7 +2704,7 @@ hibernate_machine_init(void) *d++ = *s; } } else { - WKdm_decompress_new((WK_word*) src, (WK_word*) compressed, (WK_word*) scratch, compressedSize); + pal_hib_decompress_page(src, compressed, scratch, ((unsigned int) compressedSize)); } clock_get_uptime(&endTime); ADD_ABSOLUTETIME(&compTime, &endTime); @@ -2387,13 +2714,20 @@ hibernate_machine_init(void) decoOffset = 0; } - sum += hibernate_sum_page((src + decoOffset), ppnum); + sum += hibernate_sum_page((src + decoOffset), ((uint32_t) ppnum)); err = IOMemoryDescriptorReadToPhysical(vars->srcBuffer, decoOffset, ptoa_64(ppnum), page_size); if (err) { HIBLOG("IOMemoryDescriptorReadToPhysical [%ld] %x\n", (long)ppnum, err); - break; + panic("Hibernate restore error %x", err); } +#if HIBERNATE_HMAC_IMAGE + err = ppl_hmac_update_and_compress_page(ppnum, NULL, NULL); + if (err) { + panic("Hibernate restore error %x", err); + } +#endif /* HIBERNATE_HMAC_IMAGE */ + ppnum++; pagesDone++; pagesRead++; @@ -2419,6 +2753,14 @@ hibernate_machine_init(void) panic("Hibernate restore error %x", err); } +#if HIBERNATE_HMAC_IMAGE + uint8_t image2PagesHMAC[HIBERNATE_HMAC_SIZE]; + ppl_hmac_final(image2PagesHMAC, sizeof(image2PagesHMAC)); + if (memcmp(image2PagesHMAC, gIOHibernateCurrentHeader->image2PagesHMAC, sizeof(image2PagesHMAC)) != 0) { + panic("image2 pages corrupted"); + } +#endif /* HIBERNATE_HMAC_IMAGE */ + gIOHibernateCurrentHeader->actualImage2Sum = sum; gIOHibernateCompression = gIOHibernateCurrentHeader->compression; @@ -2439,7 +2781,7 @@ hibernate_machine_init(void) SUB_ABSOLUTETIME(&endIOTime, &startIOTime); absolutetime_to_nanoseconds(endIOTime, &nsecIO); - gIOHibernateStats->kernelImageReadDuration = nsec / 1000000ULL; + gIOHibernateStats->kernelImageReadDuration = ((uint32_t) (nsec / 1000000ULL)); gIOHibernateStats->imagePages = pagesDone; HIBLOG("hibernate_machine_init pagesDone %d sum2 %x, time: %d ms, disk(0x%x) %qd Mb/s, ", diff --git a/iokit/Kernel/IOHibernateInternal.h b/iokit/Kernel/IOHibernateInternal.h index a96d843bd..b28a270a3 100644 --- a/iokit/Kernel/IOHibernateInternal.h +++ b/iokit/Kernel/IOHibernateInternal.h @@ -27,10 +27,26 @@ */ #include +#include #ifdef __cplusplus -enum { kIOHibernateAESKeySize = 128 }; /* bits */ +#if HIBERNATE_HMAC_IMAGE +#include +#endif /* HIBERNATE_HMAC_IMAGE */ + +enum { kIOHibernateAESKeySize = 16 }; /* bytes */ + +#if HIBERNATE_HMAC_IMAGE +// when we call out to PPL to compute IOHibernateHibSegInfo, we use +// srcBuffer as a temporary buffer, to copy out all of the required +// HIB segments, so it should be big enough to contain those segments +#define HIBERNATION_SRC_BUFFER_SIZE (16 * 1024 * 1024) +#else +// srcBuffer has to be big enough for a source page, the WKDM +// compressed output, and a scratch page needed by WKDM +#define HIBERNATION_SRC_BUFFER_SIZE (2 * page_size + WKdm_SCRATCH_BUF_SIZE_INTERNAL) +#endif struct IOHibernateVars { hibernate_page_list_t * page_list; @@ -53,10 +69,13 @@ struct IOHibernateVars { uint8_t haveFastBoot; uint8_t saveBootAudioVolume; uint8_t hwEncrypt; - uint8_t wiredCryptKey[kIOHibernateAESKeySize / 8]; - uint8_t cryptKey[kIOHibernateAESKeySize / 8]; + uint8_t wiredCryptKey[kIOHibernateAESKeySize]; + uint8_t cryptKey[kIOHibernateAESKeySize]; size_t volumeCryptKeySize; uint8_t volumeCryptKey[64]; +#if HIBERNATE_HMAC_IMAGE + SHA256_CTX * imageShaCtx; +#endif /* HIBERNATE_HMAC_IMAGE */ }; typedef struct IOHibernateVars IOHibernateVars; @@ -64,7 +83,7 @@ typedef struct IOHibernateVars IOHibernateVars; enum{ kIOHibernateTagSignature = 0x53000000, - kIOHibernateTagLength = 0x00001fff, + kIOHibernateTagLength = 0x00007fff, }; #ifdef __cplusplus @@ -73,10 +92,18 @@ extern "C" uint32_t hibernate_sum_page(uint8_t *buf, uint32_t ppnum); +#if defined(__i386__) || defined(__x86_64__) extern vm_offset_t segHIBB; extern unsigned long segSizeHIB; -extern vm_offset_t segDATAB; -extern unsigned long segSizeDATA; +#elif defined(__arm64__) +extern vm_offset_t sectHIBTEXTB; +extern unsigned long sectSizeHIBTEXT; +#endif extern ppnum_t gIOHibernateHandoffPages[]; -extern uint32_t gIOHibernateHandoffPageCount; +extern const uint32_t gIOHibernateHandoffPageCount; + +// max address that can fit in a ppnum_t +#define IO_MAX_PAGE_ADDR (((uint64_t) UINT_MAX) << PAGE_SHIFT) +// atop() returning ppnum_t +#define atop_64_ppnum(x) ((ppnum_t)((uint64_t)(x) >> PAGE_SHIFT)) diff --git a/iokit/Kernel/IOHibernateRestoreKernel.c b/iokit/Kernel/IOHibernateRestoreKernel.c index e330b5467..f0ec6fc8d 100644 --- a/iokit/Kernel/IOHibernateRestoreKernel.c +++ b/iokit/Kernel/IOHibernateRestoreKernel.c @@ -27,6 +27,7 @@ */ #include +#include #include #include #include @@ -34,7 +35,6 @@ #include #include -#include #include "IOHibernateInternal.h" #include @@ -46,6 +46,8 @@ * it calls or references needs to be careful to only touch memory also in the "__HIB" section. */ +#define HIB_ROUND_PAGE(x) (((x) + PAGE_MASK) & ~PAGE_MASK) + uint32_t gIOHibernateState; uint32_t gIOHibernateDebugFlags; @@ -54,7 +56,7 @@ static IOHibernateImageHeader _hibernateHeader; IOHibernateImageHeader * gIOHibernateCurrentHeader = &_hibernateHeader; ppnum_t gIOHibernateHandoffPages[64]; -uint32_t gIOHibernateHandoffPageCount = sizeof(gIOHibernateHandoffPages) +const uint32_t gIOHibernateHandoffPageCount = sizeof(gIOHibernateHandoffPages) / sizeof(gIOHibernateHandoffPages[0]); #if CONFIG_DEBUG @@ -134,7 +136,7 @@ enum { }; static void -uart_putc(char c) +hib_uart_putc(char c) { while (!(inb(COM1_PORT_ADDR + UART_LSR) & UART_LSR_THRE)) { } @@ -153,10 +155,70 @@ debug_probe( void ) if (inb(COM1_PORT_ADDR + UART_SCR) != 0xa5) { return false; } - uart_putc('\n'); + hib_uart_putc('\n'); return true; } +#elif defined(__arm64__) + +#define DBGLOG 1 + +#include +#include +#define dockchannel_uart_base gHibernateGlobals.dockChannelRegBase +#define uart_base gHibernateGlobals.hibUartRegBase + +static void +hib_uart_putc(char c) +{ + if (dockchannel_uart_base) { + while ((rDOCKCHANNELS_DEV_WSTAT(DOCKCHANNEL_UART_CHANNEL) & gHibernateGlobals.dockChannelWstatMask) == 0) { + } + rDOCKCHANNELS_DEV_WDATA1(DOCKCHANNEL_UART_CHANNEL) = c; + } + if (uart_base) { + while ((rUTRSTAT0 & 0x04) == 0) { + // wait for space in the uart + } + rUTXH0 = c; + } +} + +static int +debug_probe( void ) +{ + // todo + return false; +} + +#endif /* defined(__arm64__) */ + +#if defined(__i386__) || defined(__x86_64__) || defined(__arm64__) + +static void +uart_putstring(const char *str) +{ + while (*str) { + hib_uart_putc(*str++); + } +} + +static void +uart_putdec(uint64_t num) +{ + bool leading = true; + for (uint64_t pos = 10000000000000000000ull; pos != 0; pos /= 10) { + char c = (char) (num / pos); + if (c) { + leading = false; + num -= c * pos; + } else if (leading && (pos != 1)) { + continue; + } + hib_uart_putc(c + '0'); + } +} + static void uart_puthex(uint64_t num) { @@ -176,7 +238,7 @@ uart_puthex(uint64_t num) } else { c += 'a' - 10; } - uart_putc(c); + hib_uart_putc(c); } } @@ -193,16 +255,16 @@ debug_code(uint32_t code, uint64_t value) for (bit = 24; bit >= 0; bit -= 8) { c = 0xFF & (code >> bit); if (c) { - uart_putc(c); + hib_uart_putc(c); } } - uart_putc('='); + hib_uart_putc('='); uart_puthex(value); - uart_putc('\n'); - uart_putc('\r'); + hib_uart_putc('\n'); + hib_uart_putc('\r'); } -#endif /* defined(__i386__) || defined(__x86_64__) */ +#endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */ #if !defined(DBGLOG) #define debug_probe() (false) @@ -230,15 +292,20 @@ enum{ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static void -fatal(void) +void +__hib_assert(const char *file, int line, const char *expression) { + uart_putstring(file); + hib_uart_putc(':'); + uart_putdec(line); + uart_putstring(" Assertion failed: "); + uart_putstring(expression); + hib_uart_putc('\n'); #if defined(__i386__) || defined(__x86_64__) outb(0xcf9, 6); -#else +#endif /* defined(__i386__) || defined(__x86_64__) */ while (true) { } -#endif } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -367,7 +434,7 @@ hibernate_page_bitmap_count(hibernate_bitmap_t * bitmap, uint32_t set, uint32_t return count; } -static ppnum_t +ppnum_t hibernate_page_list_grab(hibernate_page_list_t * list, uint32_t * pNextFree) { uint32_t nextFree = *pNextFree; @@ -385,24 +452,97 @@ hibernate_page_list_grab(hibernate_page_list_t * list, uint32_t * pNextFree) if (!bitmap) { debug_code(kIOHibernateRestoreCodeNoMemory, nextFree); - fatal(); - nextFree = 0; + HIB_ASSERT(0); } return nextFree; } +#pragma mark - +#pragma mark hibernate_scratch + +void +hibernate_scratch_init(hibernate_scratch_t * scratch, hibernate_page_list_t * map, uint32_t * nextFree) +{ + // initialize "scratch" so we can start writing into it + __nosan_bzero(scratch, sizeof(*scratch)); + scratch->map = map; + scratch->nextFree = nextFree; + scratch->headPage = hibernate_page_list_grab(scratch->map, scratch->nextFree); + scratch->curPage = (uint8_t *)pal_hib_map(SCRATCH_AREA, ptoa_64(scratch->headPage)); +} + +void +hibernate_scratch_start_read(hibernate_scratch_t * scratch) +{ + // re-initialize "scratch" so we can start reading from it it + hibernate_scratch_t result; + __nosan_bzero(&result, sizeof(result)); + result.headPage = scratch->headPage; + result.curPage = (uint8_t *)pal_hib_map(SCRATCH_AREA, ptoa_64(result.headPage)); + result.totalLength = scratch->curPos; + *scratch = result; +} + +static void +hibernate_scratch_io(hibernate_scratch_t * scratch, void * buffer, size_t size, bool write) +{ + // copy data to or from "scratch" based on the value of "write" + if (!write) { + // check that we are in bounds + HIB_ASSERT(scratch->curPos + size <= scratch->totalLength); + } + while (size) { + // if we got to the end of a page (leaving room for our chain pointer), advance to the next page + if (scratch->curPagePos == PAGE_SIZE - sizeof(ppnum_t)) { + ppnum_t *nextPage = (ppnum_t *)(scratch->curPage + scratch->curPagePos); + if (write) { + // allocate the next page and store the page number + *nextPage = hibernate_page_list_grab(scratch->map, scratch->nextFree); + } + scratch->curPage = (uint8_t *)pal_hib_map(SCRATCH_AREA, ptoa_64(*nextPage)); + scratch->curPagePos = 0; + } + size_t curPageRemaining = PAGE_SIZE - sizeof(ppnum_t) - scratch->curPagePos; + size_t toCopy = MIN(size, curPageRemaining); + if (write) { + // copy from "buffer" into "scratch" + __nosan_memcpy(scratch->curPage + scratch->curPagePos, buffer, toCopy); + } else { + // copy from "scratch" into "buffer" + __nosan_memcpy(buffer, scratch->curPage + scratch->curPagePos, toCopy); + } + scratch->curPos += toCopy; + scratch->curPagePos += toCopy; + buffer += toCopy; + size -= toCopy; + } +} + +void +hibernate_scratch_write(hibernate_scratch_t * scratch, const void * buffer, size_t size) +{ + hibernate_scratch_io(scratch, (void *)(uintptr_t)buffer, size, true); +} + +void +hibernate_scratch_read(hibernate_scratch_t * scratch, void * buffer, size_t size) +{ + hibernate_scratch_io(scratch, buffer, size, false); +} + +#pragma mark - + static uint32_t store_one_page(uint32_t procFlags, uint32_t * src, uint32_t compressedSize, - uint32_t * buffer, uint32_t ppnum) + uint8_t * scratch, uint32_t ppnum) { uint64_t dst = ptoa_64(ppnum); - uint8_t scratch[WKdm_SCRATCH_BUF_SIZE_INTERNAL] __attribute__ ((aligned(16))); if (compressedSize != PAGE_SIZE) { dst = pal_hib_map(DEST_COPY_AREA, dst); if (compressedSize != 4) { - WKdm_decompress_new((WK_word*) src, (WK_word*)(uintptr_t)dst, (WK_word*) &scratch[0], compressedSize); + pal_hib_decompress_page(src, (void *)dst, scratch, compressedSize); } else { size_t i; uint32_t s, *d; @@ -424,6 +564,24 @@ store_one_page(uint32_t procFlags, uint32_t * src, uint32_t compressedSize, return hibernate_sum_page((uint8_t *)(uintptr_t)dst, ppnum); } +void +hibernate_reserve_restore_pages(uint64_t headerPhys, IOHibernateImageHeader *header, hibernate_page_list_t * map) +{ + uint32_t lastImagePage = atop_64_ppnum(HIB_ROUND_PAGE(headerPhys + header->image1Size)); + uint32_t handoffPages = header->handoffPages; + uint32_t handoffPageCount = header->handoffPageCount; + uint32_t ppnum; + + // knock all the image pages to be used out of free map + for (ppnum = atop_64_ppnum(headerPhys); ppnum <= lastImagePage; ppnum++) { + hibernate_page_bitset(map, FALSE, ppnum); + } + // knock all the handoff pages to be used out of free map + for (ppnum = handoffPages; ppnum < (handoffPages + handoffPageCount); ppnum++) { + hibernate_page_bitset(map, FALSE, ppnum); + } +} + long hibernate_kernel_entrypoint(uint32_t p1, uint32_t p2, uint32_t p3, uint32_t p4) @@ -435,18 +593,14 @@ hibernate_kernel_entrypoint(uint32_t p1, uint64_t pageIndexPhys; uint32_t * pageIndexSource; hibernate_page_list_t * map; - uint32_t stage; + pal_hib_restore_stage_t stage; uint32_t count; uint32_t ppnum; uint32_t page; uint32_t conflictCount; uint32_t compressedSize; uint32_t uncompressedPages; - uint32_t copyPageListHeadPage; - uint32_t pageListPage; - uint32_t * copyPageList; uint32_t * src; - uint32_t copyPageIndex; uint32_t sum; uint32_t pageSum; uint32_t nextFree; @@ -455,11 +609,16 @@ hibernate_kernel_entrypoint(uint32_t p1, uint32_t lastPageIndexPage; uint32_t handoffPages; uint32_t handoffPageCount; + uint8_t * wkdmScratch; + hibernate_scratch_t conflictList; + pal_hib_ctx_t palHibCtx; uint64_t timeStart; timeStart = rdtsc64(); +#if !defined(__arm64__) static_assert(sizeof(IOHibernateImageHeader) == 512); +#endif /* !defined(__arm64__) */ headerPhys = ptoa_64(p1); @@ -483,8 +642,15 @@ hibernate_kernel_entrypoint(uint32_t p1, map = (hibernate_page_list_t *) pal_hib_map(BITMAP_AREA, mapPhys); - lastImagePage = atop_64(headerPhys + gIOHibernateCurrentHeader->image1Size); - lastMapPage = atop_64(mapPhys + gIOHibernateCurrentHeader->bitmapSize); + + // make the rest of the image is safe for atop() + uint64_t imageEnd; + if (os_add_overflow(headerPhys, gIOHibernateCurrentHeader->image1Size, &imageEnd) || (imageEnd > IO_MAX_PAGE_ADDR)) { + HIB_ASSERT(0); + } + + lastImagePage = atop_64_ppnum(HIB_ROUND_PAGE(headerPhys + gIOHibernateCurrentHeader->image1Size)); + lastMapPage = atop_64_ppnum(HIB_ROUND_PAGE(mapPhys + gIOHibernateCurrentHeader->bitmapSize)); handoffPages = gIOHibernateCurrentHeader->handoffPages; handoffPageCount = gIOHibernateCurrentHeader->handoffPageCount; @@ -497,31 +663,30 @@ hibernate_kernel_entrypoint(uint32_t p1, debug_code(kIOHibernateRestoreCodeHandoffPages, ptoa_64(handoffPages)); debug_code(kIOHibernateRestoreCodeHandoffCount, handoffPageCount); - // knock all the image pages to be used out of free map - for (ppnum = atop_64(headerPhys); ppnum <= lastImagePage; ppnum++) { - hibernate_page_bitset(map, FALSE, ppnum); - } - // knock all the handoff pages to be used out of free map - for (ppnum = handoffPages; ppnum < (handoffPages + handoffPageCount); ppnum++) { - hibernate_page_bitset(map, FALSE, ppnum); - } +#if defined(__arm64__) + // on arm64 we've already done this in pal_hib_resume_tramp +#else /* !defined(__arm64__) */ + hibernate_reserve_restore_pages(headerPhys, gIOHibernateCurrentHeader, map); +#endif /* !defined(__arm64__) */ nextFree = 0; hibernate_page_list_grab(map, &nextFree); + pal_hib_resume_init(&palHibCtx, map, &nextFree); + + // allocate scratch space for wkdm + wkdmScratch = (uint8_t *)pal_hib_map(WKDM_AREA, ptoa_64(hibernate_page_list_grab(map, &nextFree))); + sum = gIOHibernateCurrentHeader->actualRestore1Sum; - gIOHibernateCurrentHeader->diag[0] = atop_64(headerPhys); + gIOHibernateCurrentHeader->diag[0] = atop_64_ppnum(headerPhys); gIOHibernateCurrentHeader->diag[1] = sum; gIOHibernateCurrentHeader->trampolineTime = 0; uncompressedPages = 0; conflictCount = 0; - copyPageListHeadPage = 0; - copyPageList = 0; - copyPageIndex = PAGE_SIZE >> 2; compressedSize = PAGE_SIZE; - stage = 2; + stage = pal_hib_restore_stage_handoff_data; count = 0; srcPhys = 0; @@ -531,7 +696,7 @@ hibernate_kernel_entrypoint(uint32_t p1, + gIOHibernateCurrentHeader->fileExtentMapSize + ptoa_32(gIOHibernateCurrentHeader->restore1PageCount)); imageReadPhys = (pageIndexPhys + gIOHibernateCurrentHeader->previewPageListSize); - lastPageIndexPage = atop_64(imageReadPhys); + lastPageIndexPage = atop_64_ppnum(HIB_ROUND_PAGE(imageReadPhys)); pageIndexSource = (uint32_t *) pal_hib_map(IMAGE2_AREA, pageIndexPhys); } else { pageIndexPhys = 0; @@ -544,7 +709,7 @@ hibernate_kernel_entrypoint(uint32_t p1, while (1) { switch (stage) { - case 2: + case pal_hib_restore_stage_handoff_data: // copy handoff data count = srcPhys ? 0 : handoffPageCount; if (!count) { @@ -556,7 +721,7 @@ hibernate_kernel_entrypoint(uint32_t p1, srcPhys = ptoa_64(handoffPages); break; - case 1: + case pal_hib_restore_stage_preview_pages: // copy pageIndexSource pages == preview image data if (!srcPhys) { if (!pageIndexPhys) { @@ -571,7 +736,7 @@ hibernate_kernel_entrypoint(uint32_t p1, imageReadPhys = srcPhys; break; - case 0: + case pal_hib_restore_stage_dram_pages: // copy pages if (!srcPhys) { srcPhys = (mapPhys + gIOHibernateCurrentHeader->bitmapSize); @@ -586,7 +751,7 @@ hibernate_kernel_entrypoint(uint32_t p1, if (!count) { - if (!stage) { + if (stage == pal_hib_restore_stage_dram_pages) { break; } stage--; @@ -600,97 +765,78 @@ hibernate_kernel_entrypoint(uint32_t p1, src = (uint32_t *) pal_hib_map(IMAGE_AREA, srcPhys); - if (2 == stage) { + if (stage == pal_hib_restore_stage_handoff_data) { ppnum = gIOHibernateHandoffPages[page]; - } else if (!stage) { + } else if (stage == pal_hib_restore_stage_dram_pages) { tag = *src++; + HIB_ASSERT((tag & ~kIOHibernateTagLength) == kIOHibernateTagSignature); // debug_code(kIOHibernateRestoreCodeTag, (uintptr_t) tag); srcPhys += sizeof(*src); compressedSize = kIOHibernateTagLength & tag; + HIB_ASSERT(compressedSize <= PAGE_SIZE); } - conflicts = (ppnum >= atop_64(mapPhys)) && (ppnum <= lastMapPage); + conflicts = (ppnum >= atop_64_ppnum(mapPhys)) && (ppnum <= lastMapPage); - conflicts |= ((ppnum >= atop_64(imageReadPhys)) && (ppnum <= lastImagePage)); + conflicts |= ((ppnum >= atop_64_ppnum(imageReadPhys)) && (ppnum <= lastImagePage)); - if (stage >= 2) { - conflicts |= ((ppnum >= atop_64(srcPhys)) && (ppnum <= (handoffPages + handoffPageCount - 1))); + if (stage >= pal_hib_restore_stage_handoff_data) { + conflicts |= ((ppnum >= atop_64_ppnum(srcPhys)) && (ppnum <= (handoffPages + handoffPageCount - 1))); } - if (stage >= 1) { - conflicts |= ((ppnum >= atop_64(pageIndexPhys)) && (ppnum <= lastPageIndexPage)); + if (stage >= pal_hib_restore_stage_preview_pages) { + conflicts |= ((ppnum >= atop_64_ppnum(pageIndexPhys)) && (ppnum <= lastPageIndexPage)); } if (!conflicts) { pageSum = store_one_page(gIOHibernateCurrentHeader->processorFlags, - src, compressedSize, 0, ppnum); - if (stage != 2) { + src, compressedSize, wkdmScratch, ppnum); + if (stage != pal_hib_restore_stage_handoff_data) { sum += pageSum; } uncompressedPages++; } else { - uint32_t bufferPage = 0; - uint32_t * dst; - // debug_code(kIOHibernateRestoreCodeConflictPage, ppnum); // debug_code(kIOHibernateRestoreCodeConflictSource, (uintptr_t) src); conflictCount++; - if (compressedSize) { - // alloc new buffer page - bufferPage = hibernate_page_list_grab(map, &nextFree); - dst = (uint32_t *)pal_hib_map(DEST_COPY_AREA, ptoa_64(bufferPage)); - __nosan_memcpy(dst, src, compressedSize); + if (!conflictList.headPage) { + hibernate_scratch_init(&conflictList, map, &nextFree); } - if (copyPageIndex > ((PAGE_SIZE >> 2) - 3)) { - // alloc new copy list page - pageListPage = hibernate_page_list_grab(map, &nextFree); - // link to current - if (copyPageList) { - copyPageList[1] = pageListPage; - } else { - copyPageListHeadPage = pageListPage; - } - copyPageList = (uint32_t *)pal_hib_map(SRC_COPY_AREA, - ptoa_64(pageListPage)); - copyPageList[1] = 0; - copyPageIndex = 2; - } - copyPageList[copyPageIndex++] = ppnum; - copyPageList[copyPageIndex++] = bufferPage; - copyPageList[copyPageIndex++] = (compressedSize | (stage << 24)); - copyPageList[0] = copyPageIndex; + hibernate_scratch_write(&conflictList, &ppnum, sizeof(ppnum)); + hibernate_scratch_write(&conflictList, &compressedSize, sizeof(compressedSize)); + hibernate_scratch_write(&conflictList, &stage, sizeof(stage)); + hibernate_scratch_write(&conflictList, src, compressedSize); } srcPhys += ((compressedSize + 3) & ~3); src += ((compressedSize + 3) >> 2); + pal_hib_restored_page(&palHibCtx, stage, ppnum); } } /* src points to the last page restored, so we need to skip over that */ - hibernateRestorePALState(src); + pal_hib_restore_pal_state(src); // -- copy back conflicts - pageListPage = copyPageListHeadPage; - while (pageListPage) { - copyPageList = (uint32_t *)pal_hib_map(COPY_PAGE_AREA, ptoa_64(pageListPage)); - for (copyPageIndex = 2; copyPageIndex < copyPageList[0]; copyPageIndex += 3) { - ppnum = copyPageList[copyPageIndex + 0]; - srcPhys = ptoa_64(copyPageList[copyPageIndex + 1]); - src = (uint32_t *) pal_hib_map(SRC_COPY_AREA, srcPhys); - compressedSize = copyPageList[copyPageIndex + 2]; - stage = compressedSize >> 24; - compressedSize &= 0x1FFF; + if (conflictCount) { + src = (uint32_t *)pal_hib_map(COPY_PAGE_AREA, ptoa_64(hibernate_page_list_grab(map, &nextFree))); + hibernate_scratch_start_read(&conflictList); + for (uint32_t i = 0; i < conflictCount; i++) { + hibernate_scratch_read(&conflictList, &ppnum, sizeof(ppnum)); + hibernate_scratch_read(&conflictList, &compressedSize, sizeof(compressedSize)); + hibernate_scratch_read(&conflictList, &stage, sizeof(stage)); + HIB_ASSERT(compressedSize <= PAGE_SIZE); + hibernate_scratch_read(&conflictList, src, compressedSize); pageSum = store_one_page(gIOHibernateCurrentHeader->processorFlags, - src, compressedSize, 0, ppnum); - if (stage != 2) { + src, compressedSize, wkdmScratch, ppnum); + if (stage != pal_hib_restore_stage_handoff_data) { sum += pageSum; } uncompressedPages++; } - pageListPage = copyPageList[1]; } - pal_hib_patchup(); + pal_hib_patchup(&palHibCtx); // -- image has been destroyed... @@ -701,7 +847,7 @@ hibernate_kernel_entrypoint(uint32_t p1, gIOHibernateState = kIOHibernateStateWakingFromHibernate; - gIOHibernateCurrentHeader->trampolineTime = (((rdtsc64() - timeStart)) >> 8); + gIOHibernateCurrentHeader->trampolineTime = ((uint32_t) (((rdtsc64() - timeStart)) >> 8)); // debug_code('done', 0); @@ -713,12 +859,14 @@ hibernate_kernel_entrypoint(uint32_t p1, // flush caches __asm__("wbinvd"); proc(); + return -1; +#elif defined(__arm64__) + // return control to hibernate_machine_entrypoint + return 0; #else // implement me #endif #endif - - return -1; } #if CONFIG_DEBUG @@ -814,19 +962,19 @@ ksprintn(char *nbuf, uintmax_t num, int base, int *lenp, int upper) * The format %b is supported to decode error registers. * Its usage is: * - * printf("reg=%b\n", regval, "*"); + * printf("reg=%b\n", regval, "*"); * - * where is the output base expressed as a control character, e.g. + * where is the output base expressed as a control character, e.g. * \10 gives octal; \20 gives hex. Each arg is a sequence of characters, * the first of which gives the bit number to be inspected (origin 1), and * the next characters (up to a control character, i.e. a character <= 32), * give the name of the register. Thus: * - * kvprintf("reg=%b\n", 3, "\10\2BITTWO\1BITONE\n"); + * kvprintf("reg=%b\n", 3, "\10\2BITTWO\1BITONE"); * * would produce output: * - * reg=3 + * reg=3 * * XXX: %D -- Hexdump, takes pointer and separator string: * ("%6D", ptr, ":") -> XX:XX:XX:XX:XX:XX @@ -835,7 +983,7 @@ ksprintn(char *nbuf, uintmax_t num, int base, int *lenp, int upper) static int hibkvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_list ap) { -#define PCHAR(c) {int cc=(c); if (func) (*func)(cc,arg); else *d++ = cc; retval++; } +#define PCHAR(c) {int cc=(c); if (func) (*func)(cc,arg); else *d++ = (char)cc; retval++; } char nbuf[MAXNBUF]; char *d; const char *p, *percent, *q; @@ -1006,9 +1154,9 @@ reswitch: switch (ch = (u_char) * fmt++) { } else if (zflag) { *(va_arg(ap, size_t *)) = retval; } else if (hflag) { - *(va_arg(ap, short *)) = retval; + *(va_arg(ap, short *)) = (short)retval; } else if (cflag) { - *(va_arg(ap, char *)) = retval; + *(va_arg(ap, char *)) = (char)retval; } else { *(va_arg(ap, int *)) = retval; } @@ -1172,7 +1320,7 @@ number: PCHAR(*percent++); } /* - * Since we ignore an formatting argument it is no + * Since we ignore a formatting argument it is no * longer safe to obey the remaining formatting * arguments as the arguments will no longer match * the format specs. @@ -1189,7 +1337,7 @@ static void putchar(int c, void *arg) { (void)arg; - uart_putc(c); + hib_uart_putc((char)c); } void @@ -1203,3 +1351,37 @@ hibprintf(const char *fmt, ...) va_end(ap); } #endif /* CONFIG_DEBUG */ + +#if __arm64__ && HIBERNATE_TRAP_HANDLER +void +hibernate_trap(__unused arm_context_t *context, __unused uint64_t trap_addr) +__attribute__((optnone)) +{ + // enable logging + gIOHibernateDebugFlags |= kIOHibernateDebugRestoreLogs; + + // dump some interesting registers + for (int i = 0; i < 29; i++) { + debug_code(' r00' + (i / 10 * 256) + (i % 10), context->ss.ss_64.x[i]); + } + debug_code(' fp', context->ss.ss_64.fp); + debug_code(' lr', context->ss.ss_64.lr); + debug_code(' sp', context->ss.ss_64.sp); + debug_code(' pc', context->ss.ss_64.pc); + debug_code('cpsr', context->ss.ss_64.cpsr); + debug_code('asps', context->ss.ss_64.aspsr); + debug_code(' far', context->ss.ss_64.far); + debug_code(' esr', context->ss.ss_64.esr); + + // dump the trap_addr + debug_code('trap', trap_addr); + + // dump the kernel slide + debug_code('slid', _hibernateHeader.kernVirtSlide); + + // loop forever + while (true) { + ; + } +} +#endif /* __arm64__ && HIBERNATE_TRAP_HANDLER */ diff --git a/iokit/Kernel/IOHistogramReporter.cpp b/iokit/Kernel/IOHistogramReporter.cpp index b5e9176b4..5f29356c0 100644 --- a/iokit/Kernel/IOHistogramReporter.cpp +++ b/iokit/Kernel/IOHistogramReporter.cpp @@ -26,6 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #define __STDC_LIMIT_MACROS // what are the C++ equivalents? #include @@ -38,7 +40,7 @@ OSDefineMetaClassAndStructors(IOHistogramReporter, IOReporter); /* static */ -IOHistogramReporter* +OSSharedPtr IOHistogramReporter::with(IOService *reportingService, IOReportCategories categories, uint64_t channelID, @@ -47,9 +49,8 @@ IOHistogramReporter::with(IOService *reportingService, int nSegments, IOHistogramSegmentConfig *config) { - IOHistogramReporter *reporter = new IOHistogramReporter; - - const OSSymbol *tmpChannelName = NULL; + OSSharedPtr reporter = OSMakeShared(); + OSSharedPtr tmpChannelName; if (reporter) { if (channelName) { @@ -57,15 +58,13 @@ IOHistogramReporter::with(IOService *reportingService, } if (reporter->initWith(reportingService, categories, - channelID, tmpChannelName, + channelID, tmpChannelName.get(), unit, nSegments, config)) { return reporter; } } - OSSafeReleaseNULL(reporter); - OSSafeReleaseNULL(tmpChannelName); - return NULL; + return nullptr; } @@ -192,7 +191,7 @@ IOHistogramReporter::initWith(IOService *reportingService, // Setup IOReporter's channel type _elements[cnt2].channel_type = _channelType; - _elements[cnt2].channel_type.element_idx = cnt2; + _elements[cnt2].channel_type.element_idx = ((int16_t) cnt2); //IOREPORTER_DEBUG_ELEMENT(cnt2); } @@ -273,16 +272,16 @@ IOHistogramReporter::free(void) } -IOReportLegendEntry* +OSSharedPtr IOHistogramReporter::handleCreateLegend(void) { - IOReportLegendEntry *rval = NULL, *legendEntry = NULL; - OSData *tmpConfigData = NULL; - OSDictionary *tmpDict; // no refcount + OSSharedPtr legendEntry; + OSSharedPtr tmpConfigData; + OSDictionary *tmpDict; // no refcount legendEntry = super::handleCreateLegend(); if (!legendEntry) { - goto finish; + return nullptr; } PREFL_MEMOP_PANIC(_segmentCount, IOHistogramSegmentConfig); @@ -290,29 +289,18 @@ IOHistogramReporter::handleCreateLegend(void) (unsigned)_segmentCount * sizeof(IOHistogramSegmentConfig)); if (!tmpConfigData) { - goto finish; + return nullptr; } tmpDict = OSDynamicCast(OSDictionary, legendEntry->getObject(kIOReportLegendInfoKey)); if (!tmpDict) { - goto finish; + return nullptr; } - tmpDict->setObject(kIOReportLegendConfigKey, tmpConfigData); - - // success - rval = legendEntry; - -finish: - if (tmpConfigData) { - tmpConfigData->release(); - } - if (!rval && legendEntry) { - legendEntry->release(); - } + tmpDict->setObject(kIOReportLegendConfigKey, tmpConfigData.get()); - return rval; + return legendEntry; } IOReturn diff --git a/iokit/Kernel/IOInterruptAccounting.cpp b/iokit/Kernel/IOInterruptAccounting.cpp index 3d7f57cdb..25d654449 100644 --- a/iokit/Kernel/IOInterruptAccounting.cpp +++ b/iokit/Kernel/IOInterruptAccounting.cpp @@ -26,6 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include diff --git a/iokit/Kernel/IOInterruptController.cpp b/iokit/Kernel/IOInterruptController.cpp index 664890442..6dc591963 100644 --- a/iokit/Kernel/IOInterruptController.cpp +++ b/iokit/Kernel/IOInterruptController.cpp @@ -43,9 +43,9 @@ OSDefineMetaClassAndAbstractStructors(IOInterruptController, IOService); -OSMetaClassDefineReservedUnused(IOInterruptController, 0); -OSMetaClassDefineReservedUnused(IOInterruptController, 1); -OSMetaClassDefineReservedUnused(IOInterruptController, 2); +OSMetaClassDefineReservedUsedX86(IOInterruptController, 0); +OSMetaClassDefineReservedUsedX86(IOInterruptController, 1); +OSMetaClassDefineReservedUsedX86(IOInterruptController, 2); OSMetaClassDefineReservedUnused(IOInterruptController, 3); OSMetaClassDefineReservedUnused(IOInterruptController, 4); OSMetaClassDefineReservedUnused(IOInterruptController, 5); @@ -317,9 +317,15 @@ IOInterruptController::enableInterrupt(IOService *nub, int source) } if (vector->interruptDisabledHard) { vector->interruptDisabledHard = 0; -#if !defined(__i386__) && !defined(__x86_64__) - OSMemoryBarrier(); -#endif + + // A DSB ISH on ARM is needed to make sure the vector data are + // properly initialized before the MMIO enabling the interrupts + // in hardware. OSMemoryBarrier(), which maps to DMB, is not + // sufficient here as the CPUs are not consumers of the device + // write. Hence, the DMB does not guarantee the CPUs won't see an + // interrupt before it initalizes the vector data properly. + OSSynchronizeIO(); + enableVector(vectorNumber, vector); } } @@ -425,6 +431,21 @@ IOInterruptController::causeVector(IOInterruptVectorNumber /*vectorNumber*/, { } +void +IOInterruptController::setCPUInterruptProperties(IOService */*service*/) +{ +} + +void +IOInterruptController::sendIPI(unsigned int /*cpu_id*/, bool /*deferred*/) +{ +} + +void +IOInterruptController::cancelDeferredIPI(unsigned int /*cpu_id*/) +{ +} + void IOInterruptController::timeStampSpuriousInterrupt(void) { @@ -458,11 +479,17 @@ IOInterruptController::timeStampInterruptHandlerInternal(bool isStart, IOInterru if (isStart) { +#if INTERRUPT_MASKED_DEBUG + ml_irq_debug_start((uintptr_t)vector->handler, (uintptr_t)vector); +#endif IOTimeStampStartConstant(IODBG_INTC(IOINTC_HANDLER), (uintptr_t)vectorNumber, (uintptr_t)unslidHandler, (uintptr_t)unslidTarget, (uintptr_t)providerID); } else { IOTimeStampEndConstant(IODBG_INTC(IOINTC_HANDLER), (uintptr_t)vectorNumber, (uintptr_t)unslidHandler, (uintptr_t)unslidTarget, (uintptr_t)providerID); +#if INTERRUPT_MASKED_DEBUG + ml_irq_debug_end(); +#endif } } diff --git a/iokit/Kernel/IOInterruptEventSource.cpp b/iokit/Kernel/IOInterruptEventSource.cpp index 5decae5c6..a720e9a30 100644 --- a/iokit/Kernel/IOInterruptEventSource.cpp +++ b/iokit/Kernel/IOInterruptEventSource.cpp @@ -26,6 +26,9 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + +#include #include #include #include @@ -34,6 +37,7 @@ #include #include #include +#include #if IOKITSTATS @@ -208,29 +212,28 @@ IOInterruptEventSource::unregisterInterruptHandler(IOService *inProvider, } -IOInterruptEventSource * +OSSharedPtr IOInterruptEventSource::interruptEventSource(OSObject *inOwner, Action inAction, IOService *inProvider, int inIntIndex) { - IOInterruptEventSource *me = new IOInterruptEventSource; + OSSharedPtr me = OSMakeShared(); if (me && !me->init(inOwner, inAction, inProvider, inIntIndex)) { - me->release(); - return NULL; + return nullptr; } return me; } -IOInterruptEventSource * +OSSharedPtr IOInterruptEventSource::interruptEventSource(OSObject *inOwner, IOService *inProvider, int inIntIndex, ActionBlock inAction) { - IOInterruptEventSource * ies; + OSSharedPtr ies; ies = IOInterruptEventSource::interruptEventSource(inOwner, (Action) NULL, inProvider, inIntIndex); if (ies) { ies->setActionBlock((IOEventSource::ActionBlock) inAction); @@ -331,16 +334,24 @@ IOInterruptEventSource::checkForWork() uint64_t endCPUTime = 0; unsigned int cacheProdCount = producerCount; int numInts = cacheProdCount - consumerCount; - IOInterruptEventAction intAction = (IOInterruptEventAction) action; + IOEventSource::Action intAction = action; ActionBlock intActionBlock = (ActionBlock) actionBlock; + void *address; bool trace = (gIOKitTrace & kIOTraceIntEventSource) ? true : false; + if (kActionBlock & flags) { + address = ptrauth_nop_cast(void *, _Block_get_invoke_fn((struct Block_layout *)intActionBlock)); + } else { + address = ptrauth_nop_cast(void *, intAction); + } + IOStatisticsCheckForWork(); if (numInts > 0) { if (trace) { IOTimeStampStartConstant(IODBG_INTES(IOINTES_ACTION), - VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), + VM_KERNEL_ADDRHIDE(address), + VM_KERNEL_ADDRHIDE(owner), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); } @@ -358,7 +369,7 @@ IOInterruptEventSource::checkForWork() if (kActionBlock & flags) { (intActionBlock)(this, numInts); } else { - (*intAction)(owner, this, numInts); + ((IOInterruptEventAction)intAction)(owner, this, numInts); } if (reserved->statistics) { @@ -379,7 +390,8 @@ IOInterruptEventSource::checkForWork() if (trace) { IOTimeStampEndConstant(IODBG_INTES(IOINTES_ACTION), - VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), + VM_KERNEL_ADDRHIDE(address), + VM_KERNEL_ADDRHIDE(owner), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); } @@ -390,7 +402,8 @@ IOInterruptEventSource::checkForWork() } else if (numInts < 0) { if (trace) { IOTimeStampStartConstant(IODBG_INTES(IOINTES_ACTION), - VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), + VM_KERNEL_ADDRHIDE(address), + VM_KERNEL_ADDRHIDE(owner), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); } @@ -408,7 +421,7 @@ IOInterruptEventSource::checkForWork() if (kActionBlock & flags) { (intActionBlock)(this, numInts); } else { - (*intAction)(owner, this, numInts); + ((IOInterruptEventAction)intAction)(owner, this, numInts); } if (reserved->statistics) { @@ -429,7 +442,8 @@ IOInterruptEventSource::checkForWork() if (trace) { IOTimeStampEndConstant(IODBG_INTES(IOINTES_ACTION), - VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), + VM_KERNEL_ADDRHIDE(address), + VM_KERNEL_ADDRHIDE(owner), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); } @@ -529,7 +543,7 @@ IOInterruptEventSource::enablePrimaryInterruptTimestamp(bool enable) } uint64_t -IOInterruptEventSource::getPimaryInterruptTimestamp() +IOInterruptEventSource::getPrimaryInterruptTimestamp() { if (reserved->statistics && reserved->statistics->enablePrimaryTimestamp) { return reserved->statistics->primaryTimestamp; diff --git a/iokit/Kernel/IOKitDebug.cpp b/iokit/Kernel/IOKitDebug.cpp index 7c1a6a141..0a4213b38 100644 --- a/iokit/Kernel/IOKitDebug.cpp +++ b/iokit/Kernel/IOKitDebug.cpp @@ -80,11 +80,11 @@ SYSCTL_PROC(_debug, OID_AUTO, iokit, CTLTYPE_QUAD | IODEBUG_CTLFLAGS | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &gIOKitDebug, 0, sysctl_debug_iokit, "Q", "boot_arg io"); -int debug_malloc_size; -int debug_iomalloc_size; +size_t debug_malloc_size; +size_t debug_iomalloc_size; vm_size_t debug_iomallocpageable_size; -int debug_container_malloc_size; +size_t debug_container_malloc_size; // int debug_ivars_size; // in OSObject.cpp extern "C" { @@ -147,10 +147,10 @@ IOPrintMemory( void ) // OSMetaClass::printInstanceCounts(); IOLog("\n" - "ivar kalloc() 0x%08x\n" - "malloc() 0x%08x\n" - "containers kalloc() 0x%08x\n" - "IOMalloc() 0x%08x\n" + "ivar kalloc() 0x%08lx\n" + "malloc() 0x%08lx\n" + "containers kalloc() 0x%08lx\n" + "IOMalloc() 0x%08lx\n" "----------------------------------------\n", debug_ivars_size, debug_malloc_size, @@ -259,20 +259,33 @@ struct IOTrackingQueue { queue_head_t sites[]; }; + +struct IOTrackingCallSiteUser { + pid_t pid; + uint8_t user32; + uint8_t userCount; + uintptr_t bt[kIOTrackingCallSiteBTs]; +}; + struct IOTrackingCallSite { queue_chain_t link; + queue_head_t instances; IOTrackingQueue * queue; + IOTracking * addresses; + size_t size[2]; uint32_t crc; + uint32_t count; vm_tag_t tag; - uint32_t count; - size_t size[2]; - uintptr_t bt[kIOTrackingCallSiteBTs]; + uint8_t user32; + uint8_t userCount; + pid_t btPID; - queue_head_t instances; - IOTracking * addresses; + uintptr_t bt[kIOTrackingCallSiteBTs]; + IOTrackingCallSiteUser user[0]; }; + struct IOTrackingLeaksRef { uintptr_t * instances; uint32_t zoneSize; @@ -368,6 +381,13 @@ IOTrackingQueueAlloc(const char * name, uintptr_t btEntry, return queue; }; +void +IOTrackingQueueCollectUser(IOTrackingQueue * queue) +{ + assert(0 == queue->siteCount); + queue->type |= kIOTrackingQueueTypeUser; +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ void @@ -468,7 +488,7 @@ fasthash32(const void *buf, size_t len, uint32_t seed) // residue, which shall retain information from both the higher // and lower parts of hashcode. uint64_t h = fasthash64(buf, len, seed); - return h - (h >> 32); + return (uint32_t) (h - (h >> 32)); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -477,7 +497,7 @@ void IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size) { uint32_t num; - proc_t self; + int pid; if (!queue->captureOn) { return; @@ -490,16 +510,16 @@ IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size) num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs, NULL); num = 0; - if ((kernel_task != current_task()) && (self = proc_self())) { + if ((kernel_task != current_task()) && (pid = proc_selfpid())) { bool user_64 = false; - mem->btPID = proc_pid(self); + mem->btPID = pid; num = backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1, NULL, &user_64, NULL); mem->user32 = !user_64; - proc_rele(self); } assert(num <= kIOTrackingCallSiteBTs); - mem->userCount = num; + static_assert(kIOTrackingCallSiteBTs <= UINT8_MAX); + mem->userCount = ((uint8_t) num); IOTRecursiveLockLock(&queue->lock); queue_enter/*last*/ (&queue->sites[0], mem, IOTrackingUser *, link); @@ -531,7 +551,12 @@ IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool addre IOTrackingCallSite * site; uint32_t crc, num; uintptr_t bt[kIOTrackingCallSiteBTs + 1]; + uintptr_t btUser[kIOTrackingCallSiteBTs]; queue_head_t * que; + bool user; + int pid; + int userCount; + bool user64; if (mem->site) { return; @@ -543,6 +568,8 @@ IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool addre return; } + user = (0 != (kIOTrackingQueueTypeUser & queue->type)); + assert(!mem->link.next); num = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1, NULL); @@ -552,6 +579,17 @@ IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool addre num--; crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7); + userCount = 0; + user64 = false; + pid = 0; + if (user) { + if ((kernel_task != current_task()) && (pid = proc_selfpid())) { + userCount = backtrace_user(&btUser[0], kIOTrackingCallSiteBTs, NULL, &user64, NULL); + assert(userCount <= kIOTrackingCallSiteBTs); + crc = fasthash32(&btUser[0], userCount * sizeof(bt[0]), crc); + } + } + IOTRecursiveLockLock(&queue->lock); que = &queue->sites[crc % queue->numSiteQs]; queue_iterate(que, site, IOTrackingCallSite *, link) @@ -559,13 +597,20 @@ IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool addre if (tag != site->tag) { continue; } + if (user && (pid != site->user[0].pid)) { + continue; + } if (crc == site->crc) { break; } } if (queue_end(que, (queue_entry_t) site)) { - site = (typeof(site))kalloc(sizeof(IOTrackingCallSite)); + size_t siteSize = sizeof(IOTrackingCallSite); + if (user) { + siteSize += sizeof(IOTrackingCallSiteUser); + } + site = (typeof(site))kalloc(siteSize); queue_init(&site->instances); site->addresses = (IOTracking *) &site->instances; @@ -577,7 +622,15 @@ IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool addre bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0])); assert(num <= kIOTrackingCallSiteBTs); bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0])); - + if (user) { + bcopy(&btUser[0], &site->user[0].bt[0], userCount * sizeof(site->user[0].bt[0])); + assert(userCount <= kIOTrackingCallSiteBTs); + bzero(&site->user[0].bt[userCount], (kIOTrackingCallSiteBTs - userCount) * sizeof(site->user[0].bt[0])); + site->user[0].pid = pid; + site->user[0].user32 = !user64; + static_assert(kIOTrackingCallSiteBTs <= UINT8_MAX); + site->user[0].userCount = ((uint8_t) userCount); + } queue_enter_first(que, site, IOTrackingCallSite *, link); queue->siteCount++; } @@ -628,7 +681,11 @@ IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size) remque(&mem->site->link); assert(queue->siteCount); queue->siteCount--; - kfree(mem->site, sizeof(IOTrackingCallSite)); + size_t siteSize = sizeof(IOTrackingCallSite); + if (kIOTrackingQueueTypeUser & queue->type) { + siteSize += sizeof(IOTrackingCallSiteUser); + } + kfree(mem->site, siteSize); } mem->site = NULL; } @@ -744,7 +801,11 @@ IOTrackingReset(IOTrackingQueue * queue) } } } - kfree(site, sizeof(IOTrackingCallSite)); + size_t siteSize = sizeof(IOTrackingCallSite); + if (kIOTrackingQueueTypeUser & queue->type) { + siteSize += sizeof(IOTrackingCallSiteUser); + } + kfree(site, siteSize); } } } @@ -811,7 +872,7 @@ IOTrackingZoneElementCompare(const void * left, const void * right) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static void -CopyOutKernelBacktrace(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo) +CopyOutBacktraces(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo) { uint32_t j; mach_vm_address_t bt, btEntry; @@ -826,6 +887,22 @@ CopyOutKernelBacktrace(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteI } siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt); } + + siteInfo->btPID = 0; + if (kIOTrackingQueueTypeUser & site->queue->type) { + siteInfo->btPID = site->user[0].pid; + uint32_t * bt32 = (typeof(bt32))((void *) &site->user[0].bt[0]); + uint64_t * bt64 = (typeof(bt64))((void *) &site->user[0].bt[0]); + for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) { + if (j >= site->user[0].userCount) { + siteInfo->bt[1][j] = 0; + } else if (site->user[0].user32) { + siteInfo->bt[1][j] = bt32[j]; + } else { + siteInfo->bt[1][j] = bt64[j]; + } + } + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1033,7 +1110,7 @@ IOTrackingLeaks(LIBKERN_CONSUMED OSData * data) siteInfo.count = siteCount; siteInfo.size[0] = (site->size[0] * site->count) / siteCount; siteInfo.size[1] = (site->size[1] * site->count) / siteCount;; - CopyOutKernelBacktrace(site, &siteInfo); + CopyOutBacktraces(site, &siteInfo); leakData->appendBytes(&siteInfo, sizeof(siteInfo)); } data->release(); @@ -1112,7 +1189,7 @@ IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value, proc = NULL; if (kIOTrackingGetMappings == selector) { if (value != -1ULL) { - proc = proc_find(value); + proc = proc_find((pid_t) value); if (!proc) { return kIOReturnNotFound; } @@ -1261,12 +1338,10 @@ IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value, if (size && ((tsize[0] + tsize[1]) < size)) { continue; } - siteInfo.count = count; siteInfo.size[0] = tsize[0]; siteInfo.size[1] = tsize[1]; - - CopyOutKernelBacktrace(site, &siteInfo); + CopyOutBacktraces(site, &siteInfo); data->appendBytes(&siteInfo, sizeof(siteInfo)); } } @@ -1282,7 +1357,7 @@ IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value, break; } if (!data) { - data = OSData::withCapacity(page_size); + data = OSData::withCapacity((unsigned int) page_size); } IOTRecursiveLockLock(&queue->lock); @@ -1359,7 +1434,7 @@ IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value, if ((kIOTrackingLeaks == selector) && namesLen && names) { const char * scan; const char * next; - size_t sLen; + uint8_t sLen; if (!data) { data = OSData::withCapacity(4096 * sizeof(uintptr_t)); @@ -1368,7 +1443,7 @@ IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value, // ...<0> scan = names; do{ - sLen = scan[0]; + sLen = ((uint8_t) scan[0]); scan++; next = scan + sLen; if (next >= (names + namesLen)) { diff --git a/iokit/Kernel/IOKitKernelInternal.h b/iokit/Kernel/IOKitKernelInternal.h index f1a0d882d..1cbb0485d 100644 --- a/iokit/Kernel/IOKitKernelInternal.h +++ b/iokit/Kernel/IOKitKernelInternal.h @@ -42,27 +42,6 @@ __BEGIN_DECLS /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) - -#define IOServiceTrace(csc, a, b, c, d) do { \ - if(kIOTraceIOService & gIOKitTrace) { \ - KERNEL_DEBUG_CONSTANT(IODBG_IOSERVICE(csc), a, b, c, d, 0); \ - } \ -} while(0) - -#else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) */ - -#define IOServiceTrace(csc, a, b, c, d) do { \ - (void)a; \ - (void)b; \ - (void)c; \ - (void)d; \ -} while (0) - -#endif /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) */ - -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref); void IOLibInit(void); @@ -111,6 +90,11 @@ __END_DECLS ({ typeof(expr) expr_ = (type)(uintptr_t)(expr); \ (type)(uintptr_t)(expr_); }) +struct IODMACommandMapSegment { + uint64_t fDMAOffset; // The offset of this segment in DMA + uint64_t fMapOffset; // Offset of segment in mapping + uint64_t fPageOffset; // Offset within first page of segment +}; struct IODMACommandInternal { IOMDDMAWalkSegmentState fState; @@ -125,7 +109,6 @@ struct IODMACommandInternal { UInt8 fCheckAddressing; UInt8 fIterateOnly; UInt8 fMisaligned; - UInt8 fMapContig; UInt8 fPrepared; UInt8 fDoubleBuffer; UInt8 fNewMD; @@ -143,7 +126,7 @@ struct IODMACommandInternal { uint64_t fLocalMapperAlloc; uint64_t fLocalMapperAllocLength; - class IOBufferMemoryDescriptor * fCopyMD; + OSPtr fCopyMD; IOService * fDevice; @@ -151,6 +134,14 @@ struct IODMACommandInternal { IOReturn fStatus; UInt64 fActualByteCount; AbsoluteTime fTimeStamp; + + // Multisegment vars + IODMACommandMapSegment * fMapSegments; + uint32_t fMapSegmentsCount; + uint64_t fLocalMapperAllocBase; + uint64_t fOffset2Index; + uint64_t fNextOffset; + uint64_t fIndex; }; struct IOMemoryDescriptorDevicePager { @@ -162,6 +153,7 @@ struct IOMemoryDescriptorDevicePager { struct IOMemoryDescriptorReserved { IOMemoryDescriptorDevicePager dp; + uint64_t descriptorID; uint64_t preparationID; // for kernel IOMD subclasses... they have no expansion uint64_t kernReserved[4]; @@ -191,7 +183,7 @@ enum{ }; extern "C" void iopa_init(iopa_t * a); -extern "C" uintptr_t iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign); +extern "C" uintptr_t iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, vm_size_t balign); extern "C" uintptr_t iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes); extern "C" uint32_t gIOPageAllocChunkBytes; @@ -222,6 +214,8 @@ extern "C" void IOKitKernelLogBuffer(const char * title, const void * buffer, si extern const OSSymbol * gIOCreateEFIDevicePathSymbol; extern "C" void IOSetKeyStoreData(LIBKERN_CONSUMED IOMemoryDescriptor * data); extern "C" void IOSetAPFSKeyStoreData(LIBKERN_CONSUMED IOMemoryDescriptor* data); +extern "C" void IOSetARVRootHashData(LIBKERN_CONSUMED IOMemoryDescriptor* arvData); +extern "C" void IOSetARVManifestData(LIBKERN_CONSUMED IOMemoryDescriptor* arvData); #endif extern const OSSymbol * gAKSGetKey; @@ -229,6 +223,9 @@ void IOScreenLockTimeUpdate(clock_sec_t secs); void IOCPUInitialize(void); IOReturn IOInstallServicePlatformActions(IOService * service); +IOReturn IOInstallServiceSleepPlatformActions(IOService * service); IOReturn IORemoveServicePlatformActions(IOService * service); +void IOCPUSleepKernel(void); +void IOPlatformActionsInitialize(void); #endif /* ! _IOKIT_KERNELINTERNAL_H */ diff --git a/iokit/Kernel/IOLib.cpp b/iokit/Kernel/IOLib.cpp index 16459d585..6d3312369 100644 --- a/iokit/Kernel/IOLib.cpp +++ b/iokit/Kernel/IOLib.cpp @@ -95,7 +95,7 @@ __doprnt( int is_log); extern void cons_putc_locked(char); -extern void bsd_log_lock(void); +extern bool bsd_log_lock(bool); extern void bsd_log_unlock(void); @@ -218,13 +218,17 @@ IOLibInit(void) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static uint32_t -log2up(uint32_t size) +static vm_size_t +log2up(vm_size_t size) { if (size <= 1) { size = 0; } else { - size = 32 - __builtin_clz(size - 1); +#if __LP64__ + size = 64 - __builtin_clzl(size - 1); +#else + size = 32 - __builtin_clzl(size - 1); +#endif } return size; } @@ -254,13 +258,33 @@ IOExitThread(void) (void) thread_terminate(current_thread()); } +void * +IOMalloc_external( + vm_size_t size); +void * +IOMalloc_external( + vm_size_t size) +{ + return IOMalloc_internal(KHEAP_KEXT, size); +} + +void * +IOMallocZero_external( + vm_size_t size); +void * +IOMallocZero_external( + vm_size_t size) +{ + return IOMallocZero_internal(KHEAP_KEXT, size); +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ void * -IOMallocZero(vm_size_t size) +IOMallocZero_internal(struct kalloc_heap *kalloc_heap_cfg, vm_size_t size) { void * result; - result = IOMalloc(size); + result = IOMalloc_internal(kalloc_heap_cfg, size); if (result) { bzero(result, size); } @@ -284,7 +308,7 @@ struct IOLibMallocHeader { /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ void * -IOMalloc(vm_size_t size) +IOMalloc_internal(struct kalloc_heap *kheap, vm_size_t size) { void * address; vm_size_t allocSize; @@ -295,7 +319,7 @@ IOMalloc(vm_size_t size) return NULL; // overflow } #endif - address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT); + address = kheap_alloc_tag_bt(kheap, allocSize, Z_WAITOK, VM_KERN_MEMORY_IOKIT); if (address) { #if IOTRACKING @@ -311,7 +335,7 @@ IOMalloc(vm_size_t size) address = (typeof(address))(((uintptr_t) address) + sizeofIOLibMallocHeader); #if IOALLOCDEBUG - OSAddAtomic(size, &debug_iomalloc_size); + OSAddAtomicLong(size, &debug_iomalloc_size); #endif IOStatisticsAlloc(kIOStatisticsMalloc, size); } @@ -349,7 +373,7 @@ IOFree(void * inAddress, vm_size_t size) kfree(address, size + sizeofIOLibMallocHeader); #if IOALLOCDEBUG - OSAddAtomic(-size, &debug_iomalloc_size); + OSAddAtomicLong(-size, &debug_iomalloc_size); #endif IOStatisticsAlloc(kIOStatisticsFree, size); } @@ -391,9 +415,19 @@ struct IOLibPageMallocHeader { #endif /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +void * +IOMallocAligned_external( + vm_size_t size, vm_size_t alignment); +void * +IOMallocAligned_external( + vm_size_t size, vm_size_t alignment) +{ + return IOMallocAligned_internal(KHEAP_KEXT, size, alignment); +} void * -IOMallocAligned(vm_size_t size, vm_size_t alignment) +IOMallocAligned_internal(struct kalloc_heap *kheap, vm_size_t size, + vm_size_t alignment) { kern_return_t kr; vm_offset_t address; @@ -405,8 +439,11 @@ IOMallocAligned(vm_size_t size, vm_size_t alignment) if (size == 0) { return NULL; } + if (((uint32_t) alignment) != alignment) { + return NULL; + } - alignment = (1UL << log2up(alignment)); + alignment = (1UL << log2up((uint32_t) alignment)); alignMask = alignment - 1; adjustedSize = size + sizeofIOLibPageMallocHeader; @@ -433,7 +470,8 @@ IOMallocAligned(vm_size_t size, vm_size_t alignment) allocationAddress = 0; } } else { - allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT); + allocationAddress = (vm_address_t) kheap_alloc_tag_bt(kheap, + adjustedSize, Z_WAITOK, VM_KERN_MEMORY_IOKIT); } if (allocationAddress) { @@ -460,7 +498,7 @@ IOMallocAligned(vm_size_t size, vm_size_t alignment) if (address) { #if IOALLOCDEBUG - OSAddAtomic(size, &debug_iomalloc_size); + OSAddAtomicLong(size, &debug_iomalloc_size); #endif IOStatisticsAlloc(kIOStatisticsMallocAligned, size); } @@ -511,7 +549,7 @@ IOFreeAligned(void * address, vm_size_t size) } #if IOALLOCDEBUG - OSAddAtomic(-size, &debug_iomalloc_size); + OSAddAtomicLong(-size, &debug_iomalloc_size); #endif IOStatisticsAlloc(kIOStatisticsFreeAligned, size); @@ -522,8 +560,8 @@ IOFreeAligned(void * address, vm_size_t size) void IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size) { - mach_vm_address_t allocationAddress; - mach_vm_size_t adjustedSize; + vm_address_t allocationAddress; + vm_size_t adjustedSize; IOLibPageMallocHeader * hdr; if (!address) { @@ -554,7 +592,7 @@ IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size) IOStatisticsAlloc(kIOStatisticsFreeContiguous, size); #if IOALLOCDEBUG - OSAddAtomic(-size, &debug_iomalloc_size); + OSAddAtomicLong(-size, &debug_iomalloc_size); #endif } @@ -612,7 +650,7 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP } if (contiguous || maxPhys) { kr = kmem_alloc_contig(kernel_map, &virt, size, - alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map)); + alignMask, (ppnum_t) atop(maxPhys), (ppnum_t) atop(alignMask), 0, IOMemoryTag(kernel_map)); } else { kr = kernel_memory_allocate(kernel_map, &virt, size, alignMask, options, IOMemoryTag(kernel_map)); @@ -632,7 +670,8 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP if (adjustedSize < size) { return 0; } - allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT); + allocationAddress = (mach_vm_address_t) kheap_alloc_tag_bt(KHEAP_KEXT, + adjustedSize, Z_WAITOK, VM_KERN_MEMORY_IOKIT); if (allocationAddress) { address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader) @@ -661,7 +700,7 @@ IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxP if (address) { IOStatisticsAlloc(kIOStatisticsMallocContiguous, size); #if IOALLOCDEBUG - OSAddAtomic(size, &debug_iomalloc_size); + OSAddAtomicLong(size, &debug_iomalloc_size); #endif } @@ -918,15 +957,23 @@ IOMallocOnePageablePage(iopa_t * a) return (uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT); } -void * -IOMallocPageable(vm_size_t size, vm_size_t alignment) +static void * +IOMallocPageableInternal(vm_size_t size, vm_size_t alignment, bool zeroed) { void * addr; - if (size >= (page_size - 4 * gIOPageAllocChunkBytes)) { + if (((uint32_t) alignment) != alignment) { + return NULL; + } + if (size >= (page_size - 4 * gIOPageAllocChunkBytes) || + alignment > page_size) { addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map)); + /* Memory allocated this way will already be zeroed. */ } else { - addr = ((void *) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment)); + addr = ((void *) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, (uint32_t) alignment)); + if (zeroed) { + bzero(addr, size); + } } if (addr) { @@ -939,6 +986,18 @@ IOMallocPageable(vm_size_t size, vm_size_t alignment) return addr; } +void * +IOMallocPageable(vm_size_t size, vm_size_t alignment) +{ + return IOMallocPageableInternal(size, alignment, /*zeroed*/ false); +} + +void * +IOMallocPageableZero(vm_size_t size, vm_size_t alignment) +{ + return IOMallocPageableInternal(size, alignment, /*zeroed*/ true); +} + void IOFreePageable(void * address, vm_size_t size) { @@ -996,7 +1055,7 @@ iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align) } uintptr_t -iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign) +iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, vm_size_t balign) { static const uint64_t align_masks[] = { 0xFFFFFFFFFFFFFFFF, @@ -1011,12 +1070,19 @@ iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign) uintptr_t addr = 0; uint32_t count; uint64_t align; + vm_size_t align_masks_idx; + if (((uint32_t) bytes) != bytes) { + return 0; + } if (!bytes) { bytes = 1; } - count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; - align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)]; + count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; + + align_masks_idx = log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes); + assert(align_masks_idx < sizeof(align_masks) / sizeof(*align_masks)); + align = align_masks[align_masks_idx]; IOLockLock(a->lock); __IGNORE_WCASTALIGN(pa = (typeof(pa))queue_first(&a->list)); @@ -1061,6 +1127,9 @@ iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes) uint32_t count; uintptr_t chunk; + if (((uint32_t) bytes) != bytes) { + return 0; + } if (!bytes) { bytes = 1; } @@ -1071,7 +1140,7 @@ iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes) pa = (typeof(pa))(addr | (page_size - gIOPageAllocChunkBytes)); assert(kIOPageAllocSignature == pa->signature); - count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; + count = (((uint32_t) bytes) + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; chunk /= gIOPageAllocChunkBytes; IOLockLock(a->lock); @@ -1318,7 +1387,7 @@ IOCopyLogNameForPID(int pid) size_t len; snprintf(buf, sizeof(buf), "pid %d, ", pid); len = strlen(buf); - proc_name(pid, buf + len, sizeof(buf) - len); + proc_name(pid, buf + len, (int) (sizeof(buf) - len)); return OSString::withCString(buf); } diff --git a/iokit/Kernel/IOMapper.cpp b/iokit/Kernel/IOMapper.cpp index cbf31f3d4..c2f764834 100644 --- a/iokit/Kernel/IOMapper.cpp +++ b/iokit/Kernel/IOMapper.cpp @@ -25,6 +25,9 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ + +#define IOKIT_ENABLE_SHARED_PTR + #include #include #include @@ -98,7 +101,7 @@ static IOMapperLock sMapperLock; bool IOMapper::start(IOService *provider) { - OSObject * obj; + OSSharedPtr obj; if (!super::start(provider)) { return false; } @@ -107,7 +110,9 @@ IOMapper::start(IOService *provider) return false; } - fPageSize = getPageSize(); + uint64_t pageSize = getPageSize(); + assert(pageSize <= UINT_MAX); + fPageSize = (uint32_t) pageSize; if (fIsSystem) { sMapperLock.lock(); @@ -117,12 +122,12 @@ IOMapper::start(IOService *provider) } if (provider) { - obj = provider->getProperty("iommu-id"); + obj = provider->copyProperty("iommu-id"); if (!obj) { - obj = provider->getProperty("AAPL,phandle"); + obj = provider->copyProperty("AAPL,phandle"); } if (obj) { - setProperty(gIOMapperIDKey, obj); + setProperty(gIOMapperIDKey, obj.get()); } } return true; @@ -158,54 +163,48 @@ IOMapper::waitForSystemMapper() sMapperLock.unlock(); } -IOMapper * +OSSharedPtr IOMapper::copyMapperForDevice(IOService * device) { return copyMapperForDeviceWithIndex(device, 0); } -IOMapper * +OSSharedPtr IOMapper::copyMapperForDeviceWithIndex(IOService * device, unsigned int index) { - OSData *data; - OSObject * obj; - IOMapper * mapper = NULL; - OSDictionary * matching; + OSSharedPtr data; + OSSharedPtr obj; + OSSharedPtr mapper; + OSSharedPtr matching; obj = device->copyProperty("iommu-parent"); if (!obj) { return NULL; } - if ((mapper = OSDynamicCast(IOMapper, obj))) { + if ((mapper = OSDynamicPtrCast(obj))) { goto found; } - if ((data = OSDynamicCast(OSData, obj))) { + if ((data = OSDynamicPtrCast(obj))) { if (index >= data->getLength() / sizeof(UInt32)) { - goto done; + goto found; } data = OSData::withBytesNoCopy((UInt32 *)data->getBytesNoCopy() + index, sizeof(UInt32)); if (!data) { - goto done; + goto found; } - matching = IOService::propertyMatching(gIOMapperIDKey, data); - data->release(); + matching = IOService::propertyMatching(gIOMapperIDKey, data.get()); } else { - matching = IOService::propertyMatching(gIOMapperIDKey, obj); + matching = IOService::propertyMatching(gIOMapperIDKey, obj.get()); } if (matching) { - mapper = OSDynamicCast(IOMapper, IOService::waitForMatchingService(matching)); - matching->release(); + mapper = OSDynamicPtrCast(IOService::waitForMatchingService(matching.get())); } -done: - if (obj) { - obj->release(); - } found: if (mapper) { if (!mapper->fAllocName) { @@ -247,7 +246,12 @@ IOMapperIOVMAlloc(unsigned pages) } if (kIOReturnSuccess == ret) { - return atop_64(dmaAddress); + uint64_t dmaAddressPage64; + dmaAddressPage64 = atop_64(dmaAddress); + if (dmaAddressPage64 > UINT_MAX) { + return 0; + } + return (ppnum_t) atop_64(dmaAddress); } return 0; } diff --git a/iokit/Kernel/IOMemoryCursor.cpp b/iokit/Kernel/IOMemoryCursor.cpp index 82b139267..7f3166350 100644 --- a/iokit/Kernel/IOMemoryCursor.cpp +++ b/iokit/Kernel/IOMemoryCursor.cpp @@ -27,6 +27,8 @@ */ /* IOMemoryCursor.cpp created by wgulland on 1999-3-02 */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include #include @@ -39,20 +41,19 @@ #define super OSObject OSDefineMetaClassAndStructors(IOMemoryCursor, OSObject) -IOMemoryCursor * +OSSharedPtr IOMemoryCursor::withSpecification(SegmentFunction inSegFunc, IOPhysicalLength inMaxSegmentSize, IOPhysicalLength inMaxTransferSize, IOPhysicalLength inAlignment) { - IOMemoryCursor * me = new IOMemoryCursor; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithSpecification(inSegFunc, inMaxSegmentSize, inMaxTransferSize, inAlignment)) { - me->release(); - return NULL; + return nullptr; } return me; @@ -88,6 +89,9 @@ IOMemoryCursor::initWithSpecification(SegmentFunction inSegFunc, if (!inSegFunc) { return false; } + if (inMaxTransferSize > UINT_MAX) { + return false; + } outSeg = inSegFunc; maxSegmentSize = inMaxSegmentSize; @@ -121,7 +125,7 @@ IOMemoryCursor::genPhysicalSegments(IOMemoryDescriptor *inDescriptor, } if (!inMaxTransferSize) { - inMaxTransferSize = maxTransferSize; + inMaxTransferSize = (typeof(inMaxTransferSize))maxTransferSize; } /* @@ -204,18 +208,17 @@ IONaturalMemoryCursor::outputSegment(PhysicalSegment segment, ((PhysicalSegment *) outSegments)[outSegmentIndex] = segment; } -IONaturalMemoryCursor * +OSSharedPtr IONaturalMemoryCursor::withSpecification(IOPhysicalLength inMaxSegmentSize, IOPhysicalLength inMaxTransferSize, IOPhysicalLength inAlignment) { - IONaturalMemoryCursor *me = new IONaturalMemoryCursor; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithSpecification(inMaxSegmentSize, inMaxTransferSize, inAlignment)) { - me->release(); - return NULL; + return nullptr; } return me; @@ -255,18 +258,17 @@ IOBigMemoryCursor::outputSegment(PhysicalSegment inSegment, #endif } -IOBigMemoryCursor * +OSSharedPtr IOBigMemoryCursor::withSpecification(IOPhysicalLength inMaxSegmentSize, IOPhysicalLength inMaxTransferSize, IOPhysicalLength inAlignment) { - IOBigMemoryCursor * me = new IOBigMemoryCursor; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithSpecification(inMaxSegmentSize, inMaxTransferSize, inAlignment)) { - me->release(); - return NULL; + return nullptr; } return me; @@ -306,18 +308,17 @@ IOLittleMemoryCursor::outputSegment(PhysicalSegment inSegment, #endif } -IOLittleMemoryCursor * +OSSharedPtr IOLittleMemoryCursor::withSpecification(IOPhysicalLength inMaxSegmentSize, IOPhysicalLength inMaxTransferSize, IOPhysicalLength inAlignment) { - IOLittleMemoryCursor * me = new IOLittleMemoryCursor; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithSpecification(inMaxSegmentSize, inMaxTransferSize, inAlignment)) { - me->release(); - return NULL; + return nullptr; } return me; diff --git a/iokit/Kernel/IOMemoryDescriptor.cpp b/iokit/Kernel/IOMemoryDescriptor.cpp index a4e7d0536..e4accd208 100644 --- a/iokit/Kernel/IOMemoryDescriptor.cpp +++ b/iokit/Kernel/IOMemoryDescriptor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2016 Apple Inc. All rights reserved. + * Copyright (c) 1998-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -25,7 +25,7 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - +#define IOKIT_ENABLE_SHARED_PTR #include @@ -41,6 +41,7 @@ #include #include +#include #include #include @@ -52,6 +53,8 @@ #include #include #include +#include +#include #include @@ -84,7 +87,8 @@ OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject ) #define super IOMemoryDescriptor -OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor) +OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor, + IOMemoryDescriptor, ZC_ZFREE_CLEARMEM) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -120,7 +124,7 @@ struct IOMDPersistentInitData { struct ioPLBlock { upl_t fIOPL; vm_address_t fPageInfo; // Pointer to page list or index into it - uint32_t fIOMDOffset; // The offset of this iopl in descriptor + uint64_t fIOMDOffset; // The offset of this iopl in descriptor ppnum_t fMappedPage; // Page number of first page in this iopl unsigned int fPageOffset; // Offset within first page of iopl unsigned int fFlags; // Flags @@ -139,10 +143,9 @@ struct ioGMDData { #endif /* IOTRACKING */ unsigned int fPageCnt; uint8_t fDMAMapNumAddressBits; - unsigned char fDiscontig:1; unsigned char fCompletionError:1; unsigned char fMappedBaseValid:1; - unsigned char _resv:3; + unsigned char _resv:4; unsigned char fDMAAccess:2; /* variable length arrays */ @@ -158,7 +161,7 @@ struct ioGMDData { #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt])) #define getNumIOPL(osd, d) \ - (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) + ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))) #define getPageList(d) (&(d->fPageList[0])) #define computeDataSize(p, u) \ (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) @@ -167,8 +170,6 @@ enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#define next_page(a) ( trunc_page(a) + PAGE_SIZE ) - extern "C" { kern_return_t device_data_action( @@ -180,14 +181,13 @@ device_data_action( { kern_return_t kr; IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle; - IOMemoryDescriptor * memDesc; + OSSharedPtr memDesc; LOCK; - memDesc = ref->dp.memory; - if (memDesc) { - memDesc->retain(); + if (ref->dp.memory) { + memDesc.reset(ref->dp.memory, OSRetain); kr = memDesc->handleFault(device_pager, offset, size); - memDesc->release(); + memDesc.reset(); } else { kr = KERN_ABORTED; } @@ -360,6 +360,7 @@ struct IOMemoryEntry { ipc_port_t entry; int64_t offset; uint64_t size; + uint64_t start; }; struct IOMemoryReference { @@ -467,6 +468,8 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( mach_vm_size_t srcAddr, srcLen; mach_vm_size_t nextAddr, nextLen; mach_vm_size_t offset, remain; + vm_map_offset_t overmap_start = 0, overmap_end = 0; + int misaligned_start = 0, misaligned_end = 0; IOByteCount physLen; IOOptionBits type = (_flags & kIOMemoryTypeMask); IOOptionBits cacheMode; @@ -479,7 +482,7 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( return kIOReturnNoMemory; } - tag = getVMTag(kernel_map); + tag = (vm_tag_t) getVMTag(kernel_map); vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE; entries = &ref->entries[0]; count = 0; @@ -568,8 +571,10 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( prot |= VM_PROT_WRITE; map = NULL; } else { + prot |= MAP_MEM_USE_DATA_ADDR; map = get_task_map(_task); } + DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot); remain = _length; while (remain) { @@ -585,8 +590,18 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( } srcLen += nextLen; } - entryAddr = trunc_page_64(srcAddr); - endAddr = round_page_64(srcAddr + srcLen); + + if (MAP_MEM_USE_DATA_ADDR & prot) { + entryAddr = srcAddr; + endAddr = srcAddr + srcLen; + } else { + entryAddr = trunc_page_64(srcAddr); + endAddr = round_page_64(srcAddr + srcLen); + } + if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) { + DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen); + } + do{ entrySize = (endAddr - entryAddr); if (!entrySize) { @@ -607,12 +622,47 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry); if (KERN_SUCCESS != err) { + DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err); break; } - if (actualSize > entrySize) { + if (MAP_MEM_USE_DATA_ADDR & prot) { + if (actualSize > entrySize) { + actualSize = entrySize; + } + } else if (actualSize > entrySize) { panic("mach_make_memory_entry_64 actualSize"); } + memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end); + + if (count && overmap_start) { + /* + * Track misaligned start for all + * except the first entry. + */ + misaligned_start++; + } + + if (overmap_end) { + /* + * Ignore misaligned end for the + * last entry. + */ + if ((entryAddr + actualSize) != endAddr) { + misaligned_end++; + } + } + + if (count) { + /* Middle entries */ + if (misaligned_start || misaligned_end) { + DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr); + ipc_port_release_send(entry); + err = KERN_NOT_SUPPORTED; + break; + } + } + if (count >= ref->capacity) { ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref); entries = &ref->entries[count]; @@ -620,6 +670,7 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( entries->entry = entry; entries->size = actualSize; entries->offset = offset + (entryAddr - srcAddr); + entries->start = entryAddr; entryAddr += actualSize; if (MAP_MEM_NAMED_REUSE & prot) { if ((cloneEntries->entry == entries->entry) @@ -660,6 +711,7 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( size, pagerFlags); assert(pager); if (!pager) { + DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags); err = kIOReturnVMError; } else { srcAddr = nextAddr; @@ -687,6 +739,9 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( && (kIOMemoryMapCopyOnWrite & _flags) && !(kIOMemoryReferenceCOW & options)) { err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef); + if (KERN_SUCCESS != err) { + DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err); + } } if (KERN_SUCCESS == err) { @@ -696,6 +751,7 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( ref = _memRef; } } else { + DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err); memoryReferenceFree(ref); ref = NULL; } @@ -715,7 +771,12 @@ IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) addr = ref->mapped; err = vm_map_enter_mem_object(map, &addr, ref->size, +#if __ARM_MIXED_PAGE_SIZE__ + // TODO4K this should not be necessary... + (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0), +#else /* __ARM_MIXED_PAGE_SIZE__ */ (vm_map_offset_t) 0, +#endif /* __ARM_MIXED_PAGE_SIZE__ */ (((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)), @@ -763,11 +824,18 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( UInt currentPageIndex = 0; bool didAlloc; + DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr); + if (ref->mapRef) { err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr); return err; } + if (MAP_MEM_USE_DATA_ADDR & ref->prot) { + err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr); + return err; + } + type = _flags & kIOMemoryTypeMask; prot = VM_PROT_READ; @@ -782,7 +850,7 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode)); } - tag = getVMTag(map); + tag = (typeof(tag))getVMTag(map); if (_task) { // Find first range for offset @@ -805,19 +873,24 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( assert(remain < nextLen); if (remain >= nextLen) { + DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen); return kIOReturnBadArgument; } nextAddr += remain; nextLen -= remain; +#if __ARM_MIXED_PAGE_SIZE__ + pageOffset = (vm_map_page_mask(map) & nextAddr); +#else /* __ARM_MIXED_PAGE_SIZE__ */ pageOffset = (page_mask & nextAddr); +#endif /* __ARM_MIXED_PAGE_SIZE__ */ addr = 0; didAlloc = false; if (!(options & kIOMapAnywhere)) { addr = *inaddr; - if (pageOffset != (page_mask & addr)) { - return kIOReturnNotAligned; + if (pageOffset != (vm_map_page_mask(map) & addr)) { + DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset); } addr -= pageOffset; } @@ -885,6 +958,7 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( assert(_memoryEntries != NULL); if ((_wireCount == 0) || (_memoryEntries == NULL)) { + DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr); return kIOReturnBadArgument; } @@ -902,7 +976,7 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( * right range at the end. */ UInt ioplIndex = 0; - while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) { + while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) { ioplIndex++; } ioplIndex--; @@ -934,8 +1008,9 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( while (remain && (KERN_SUCCESS == err)) { entryOffset = offset - entry->offset; - if ((page_mask & entryOffset) != pageOffset) { + if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) { err = kIOReturnNotAligned; + DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset); break; } @@ -961,7 +1036,7 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( chunk = remain; } if (options & kIOMapPrefault) { - UInt nb_pages = round_page(chunk) / PAGE_SIZE; + UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE; err = vm_map_enter_mem_object_prefault(map, &mapAddr, @@ -977,15 +1052,335 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( &pageList[currentPageIndex], nb_pages); + if (err || vm_map_page_mask(map) < PAGE_MASK) { + DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err); + } + // Compute the next index in the page list. + currentPageIndex += nb_pages; + assert(currentPageIndex <= _pages); + } else { + err = vm_map_enter_mem_object(map, + &mapAddr, + chunk, 0 /* mask */, + (VM_FLAGS_FIXED + | VM_FLAGS_OVERWRITE), + vmk_flags, + tag, + entry->entry, + entryOffset, + false, // copy + prot, // cur + prot, // max + VM_INHERIT_NONE); + } + if (KERN_SUCCESS != err) { + DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err); + break; + } + remain -= chunk; + if (!remain) { + break; + } + mapAddr += chunk; + offset += chunk - pageOffset; + } + pageOffset = 0; + entry++; + entryIdx++; + if (entryIdx >= ref->count) { + err = kIOReturnOverrun; + DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count); + break; + } + } + + if ((KERN_SUCCESS != err) && didAlloc) { + (void) mach_vm_deallocate(map, trunc_page_64(addr), size); + addr = 0; + } + *inaddr = addr; + + if (err /* || vm_map_page_mask(map) < PAGE_MASK */) { + DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err); + } + return err; +} + +#define LOGUNALIGN 0 +IOReturn +IOGeneralMemoryDescriptor::memoryReferenceMapNew( + IOMemoryReference * ref, + vm_map_t map, + mach_vm_size_t inoffset, + mach_vm_size_t size, + IOOptionBits options, + mach_vm_address_t * inaddr) +{ + IOReturn err; + int64_t offset = inoffset; + uint32_t entryIdx, firstEntryIdx; + vm_map_offset_t addr, mapAddr, mapAddrOut; + vm_map_offset_t entryOffset, remain, chunk; + + IOMemoryEntry * entry; + vm_prot_t prot, memEntryCacheMode; + IOOptionBits type; + IOOptionBits cacheMode; + vm_tag_t tag; + // for the kIOMapPrefault option. + upl_page_info_t * pageList = NULL; + UInt currentPageIndex = 0; + bool didAlloc; + + DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr); + + if (ref->mapRef) { + err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr); + return err; + } + +#if LOGUNALIGN + printf("MAP offset %qx, %qx\n", inoffset, size); +#endif + + type = _flags & kIOMemoryTypeMask; + + prot = VM_PROT_READ; + if (!(kIOMapReadOnly & options)) { + prot |= VM_PROT_WRITE; + } + prot &= ref->prot; + + cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift); + if (kIODefaultCache != cacheMode) { + // VM system requires write access to update named entry cache mode + memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode)); + } + + tag = (vm_tag_t) getVMTag(map); + + addr = 0; + didAlloc = false; + + if (!(options & kIOMapAnywhere)) { + addr = *inaddr; + } + + // find first entry for offset + for (firstEntryIdx = 0; + (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset); + firstEntryIdx++) { + } + firstEntryIdx--; + + // calculate required VM space + + entryIdx = firstEntryIdx; + entry = &ref->entries[entryIdx]; + + remain = size; + int64_t iteroffset = offset; + uint64_t mapSize = 0; + while (remain) { + entryOffset = iteroffset - entry->offset; + if (entryOffset >= entry->size) { + panic("entryOffset"); + } + +#if LOGUNALIGN + printf("[%d] size %qx offset %qx start %qx iter %qx\n", + entryIdx, entry->size, entry->offset, entry->start, iteroffset); +#endif + + chunk = entry->size - entryOffset; + if (chunk) { + if (chunk > remain) { + chunk = remain; + } + mach_vm_size_t entrySize; + err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize); + assert(KERN_SUCCESS == err); + mapSize += entrySize; + + remain -= chunk; + if (!remain) { + break; + } + iteroffset += chunk; // - pageOffset; + } + entry++; + entryIdx++; + if (entryIdx >= ref->count) { + panic("overrun"); + err = kIOReturnOverrun; + break; + } + } + + if (kIOMapOverwrite & options) { + if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) { + map = IOPageableMapForAddress(addr); + } + err = KERN_SUCCESS; + } else { + IOMemoryDescriptorMapAllocRef ref; + ref.map = map; + ref.tag = tag; + ref.options = options; + ref.size = mapSize; + ref.prot = prot; + if (options & kIOMapAnywhere) { + // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE + ref.mapped = 0; + } else { + ref.mapped = addr; + } + if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) { + err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); + } else { + err = IOMemoryDescriptorMapAlloc(ref.map, &ref); + } + + if (KERN_SUCCESS == err) { + addr = ref.mapped; + map = ref.map; + didAlloc = true; + } +#if LOGUNALIGN + IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr); +#endif + } + + /* + * If the memory is associated with a device pager but doesn't have a UPL, + * it will be immediately faulted in through the pager via populateDevicePager(). + * kIOMapPrefault is redundant in that case, so don't try to use it for UPL + * operations. + */ + if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) { + options &= ~kIOMapPrefault; + } + + /* + * Prefaulting is only possible if we wired the memory earlier. Check the + * memory type, and the underlying data. + */ + if (options & kIOMapPrefault) { + /* + * The memory must have been wired by calling ::prepare(), otherwise + * we don't have the UPL. Without UPLs, pages cannot be pre-faulted + */ + assert(_wireCount != 0); + assert(_memoryEntries != NULL); + if ((_wireCount == 0) || + (_memoryEntries == NULL)) { + return kIOReturnBadArgument; + } + + // Get the page list. + ioGMDData* dataP = getDataP(_memoryEntries); + ioPLBlock const* ioplList = getIOPLList(dataP); + pageList = getPageList(dataP); + + // Get the number of IOPLs. + UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); + + /* + * Scan through the IOPL Info Blocks, looking for the first block containing + * the offset. The research will go past it, so we'll need to go back to the + * right range at the end. + */ + UInt ioplIndex = 0; + while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) { + ioplIndex++; + } + ioplIndex--; + + // Retrieve the IOPL info block. + ioPLBlock ioplInfo = ioplList[ioplIndex]; + + /* + * For external UPLs, the fPageInfo points directly to the UPL's page_info_t + * array. + */ + if (ioplInfo.fFlags & kIOPLExternUPL) { + pageList = (upl_page_info_t*) ioplInfo.fPageInfo; + } else { + pageList = &pageList[ioplInfo.fPageInfo]; + } + + // Rebase [offset] into the IOPL in order to looks for the first page index. + mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset; + + // Retrieve the index of the first page corresponding to the offset. + currentPageIndex = atop_32(offsetInIOPL); + } + + // enter mappings + remain = size; + mapAddr = addr; + entryIdx = firstEntryIdx; + entry = &ref->entries[entryIdx]; + + while (remain && (KERN_SUCCESS == err)) { +#if LOGUNALIGN + printf("offset %qx, %qx\n", offset, entry->offset); +#endif + if (kIODefaultCache != cacheMode) { + vm_size_t unused = 0; + err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/, + memEntryCacheMode, NULL, entry->entry); + assert(KERN_SUCCESS == err); + } + entryOffset = offset - entry->offset; + if (entryOffset >= entry->size) { + panic("entryOffset"); + } + chunk = entry->size - entryOffset; +#if LOGUNALIGN + printf("entryIdx %d, chunk %qx\n", entryIdx, chunk); +#endif + if (chunk) { + vm_map_kernel_flags_t vmk_flags; + + vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */ + + if (chunk > remain) { + chunk = remain; + } + mapAddrOut = mapAddr; + if (options & kIOMapPrefault) { + UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE; + + err = vm_map_enter_mem_object_prefault(map, + &mapAddrOut, + chunk, 0 /* mask */, + (VM_FLAGS_FIXED + | VM_FLAGS_OVERWRITE + | VM_FLAGS_RETURN_DATA_ADDR), + vmk_flags, + tag, + entry->entry, + entryOffset, + prot, // cur + prot, // max + &pageList[currentPageIndex], + nb_pages); + // Compute the next index in the page list. currentPageIndex += nb_pages; assert(currentPageIndex <= _pages); } else { +#if LOGUNALIGN + printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk); +#endif err = vm_map_enter_mem_object(map, - &mapAddr, + &mapAddrOut, chunk, 0 /* mask */, (VM_FLAGS_FIXED - | VM_FLAGS_OVERWRITE), + | VM_FLAGS_OVERWRITE + | VM_FLAGS_RETURN_DATA_ADDR), vmk_flags, tag, entry->entry, @@ -996,16 +1391,26 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( VM_INHERIT_NONE); } if (KERN_SUCCESS != err) { + panic("map enter err %x", err); break; } +#if LOGUNALIGN + printf("mapAddr o %qx\n", mapAddrOut); +#endif + if (entryIdx == firstEntryIdx) { + addr = mapAddrOut; + } remain -= chunk; if (!remain) { break; } - mapAddr += chunk; - offset += chunk - pageOffset; + mach_vm_size_t entrySize; + err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize); + assert(KERN_SUCCESS == err); + mapAddr += entrySize; + offset += chunk; } - pageOffset = 0; + entry++; entryIdx++; if (entryIdx >= ref->count) { @@ -1014,6 +1419,10 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( } } + if (KERN_SUCCESS != err) { + DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err); + } + if ((KERN_SUCCESS != err) && didAlloc) { (void) mach_vm_deallocate(map, trunc_page_64(addr), size); addr = 0; @@ -1023,6 +1432,42 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( return err; } +uint64_t +IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength( + IOMemoryReference * ref, + uint64_t * offset) +{ + kern_return_t kr; + vm_object_offset_t data_offset = 0; + uint64_t total; + uint32_t idx; + + assert(ref->count); + if (offset) { + *offset = (uint64_t) data_offset; + } + total = 0; + for (idx = 0; idx < ref->count; idx++) { + kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry, + &data_offset); + if (KERN_SUCCESS != kr) { + DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr); + } else if (0 != data_offset) { + DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr); + } + if (offset && !idx) { + *offset = (uint64_t) data_offset; + } + total += round_page(data_offset + ref->entries[idx].size); + } + + DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref, + (offset ? *offset : (vm_object_offset_t)-1), total); + + return total; +} + + IOReturn IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts( IOMemoryReference * ref, @@ -1131,7 +1576,7 @@ IOGeneralMemoryDescriptor::memoryReferenceSetOwnership( /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IOMemoryDescriptor * +OSSharedPtr IOMemoryDescriptor::withAddress(void * address, IOByteCount length, IODirection direction) @@ -1141,25 +1586,23 @@ IOMemoryDescriptor::withAddress(void * address, } #ifndef __LP64__ -IOMemoryDescriptor * +OSSharedPtr IOMemoryDescriptor::withAddress(IOVirtualAddress address, IOByteCount length, IODirection direction, task_t task) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + OSSharedPtr that = OSMakeShared(); if (that) { if (that->initWithAddress(address, length, direction, task)) { - return that; + return os::move(that); } - - that->release(); } - return NULL; + return nullptr; } #endif /* !__LP64__ */ -IOMemoryDescriptor * +OSSharedPtr IOMemoryDescriptor::withPhysicalAddress( IOPhysicalAddress address, IOByteCount length, @@ -1169,26 +1612,24 @@ IOMemoryDescriptor::withPhysicalAddress( } #ifndef __LP64__ -IOMemoryDescriptor * +OSSharedPtr IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, UInt32 withCount, IODirection direction, task_t task, bool asReference) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + OSSharedPtr that = OSMakeShared(); if (that) { if (that->initWithRanges(ranges, withCount, direction, task, asReference)) { - return that; + return os::move(that); } - - that->release(); } - return NULL; + return nullptr; } #endif /* !__LP64__ */ -IOMemoryDescriptor * +OSSharedPtr IOMemoryDescriptor::withAddressRange(mach_vm_address_t address, mach_vm_size_t length, IOOptionBits options, @@ -1198,13 +1639,13 @@ IOMemoryDescriptor::withAddressRange(mach_vm_address_t address, return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task); } -IOMemoryDescriptor * +OSSharedPtr IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges, UInt32 rangeCount, IOOptionBits options, task_t task) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + OSSharedPtr that = OSMakeShared(); if (that) { if (task) { options |= kIOMemoryTypeVirtual64; @@ -1213,13 +1654,11 @@ IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges, } if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) { - return that; + return os::move(that); } - - that->release(); } - return NULL; + return nullptr; } @@ -1231,7 +1670,7 @@ IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges, * * Passing the ranges as a reference will avoid an extra allocation. */ -IOMemoryDescriptor * +OSSharedPtr IOMemoryDescriptor::withOptions(void * buffers, UInt32 count, UInt32 offset, @@ -1239,15 +1678,14 @@ IOMemoryDescriptor::withOptions(void * buffers, IOOptionBits opts, IOMapper * mapper) { - IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; + OSSharedPtr self = OSMakeShared(); if (self && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) { - self->release(); - return NULL; + return nullptr; } - return self; + return os::move(self); } bool @@ -1262,24 +1700,22 @@ IOMemoryDescriptor::initWithOptions(void * buffers, } #ifndef __LP64__ -IOMemoryDescriptor * +OSSharedPtr IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, UInt32 withCount, IODirection direction, bool asReference) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + OSSharedPtr that = OSMakeShared(); if (that) { if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) { - return that; + return os::move(that); } - - that->release(); } - return NULL; + return nullptr; } -IOMemoryDescriptor * +OSSharedPtr IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, IOByteCount offset, IOByteCount length, @@ -1289,7 +1725,7 @@ IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, } #endif /* !__LP64__ */ -IOMemoryDescriptor * +OSSharedPtr IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD) { IOGeneralMemoryDescriptor *origGenMD = @@ -1299,34 +1735,34 @@ IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalM return IOGeneralMemoryDescriptor:: withPersistentMemoryDescriptor(origGenMD); } else { - return NULL; + return nullptr; } } -IOMemoryDescriptor * +OSSharedPtr IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD) { IOMemoryReference * memRef; + OSSharedPtr self; if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) { - return NULL; + return nullptr; } if (memRef == originalMD->_memRef) { - originalMD->retain(); // Add a new reference to ourselves + self.reset(originalMD, OSRetain); originalMD->memoryReferenceRelease(memRef); - return originalMD; + return os::move(self); } - IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor; + self = OSMakeShared(); IOMDPersistentInitData initData = { originalMD, memRef }; if (self && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) { - self->release(); - self = NULL; + return nullptr; } - return self; + return os::move(self); } #ifndef __LP64__ @@ -1617,6 +2053,7 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, } _highestPage = upl_get_highest_page(iopl.fIOPL); + DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage); // Set the flag kIOPLOnDevice convieniently equal to 1 iopl.fFlags = pageList->device | kIOPLExternUPL; @@ -1712,19 +2149,35 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, // addr & len are returned by this function getAddrLenForInd(addr, len, type, vec, ind); - if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) { - break; - } - if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) { - break; + if (_task) { + mach_vm_size_t phys_size; + kern_return_t kret; + kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size); + if (KERN_SUCCESS != kret) { + break; + } + if (os_add_overflow(pages, atop_64(phys_size), &pages)) { + break; + } + } else { + if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) { + break; + } + if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) { + break; + } + if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) { + break; + } } if (os_add_overflow(totalLength, len, &totalLength)) { break; } if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { - ppnum_t highPage = atop_64(addr + len - 1); - if (highPage > _highestPage) { - _highestPage = highPage; + uint64_t highPage = atop_64(addr + len - 1); + if ((highPage > _highestPage) && (highPage <= UINT_MAX)) { + _highestPage = (ppnum_t) highPage; + DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage); } } } @@ -1792,7 +2245,7 @@ IOGeneralMemoryDescriptor::free() { IOOptionBits type = _flags & kIOMemoryTypeMask; - if (reserved) { + if (reserved && reserved->dp.memory) { LOCK; reserved->dp.memory = NULL; UNLOCK; @@ -1810,7 +2263,7 @@ IOGeneralMemoryDescriptor::free() } if (_memoryEntries) { - _memoryEntries->release(); + _memoryEntries.reset(); } if (_ranges.v && !(kIOMemoryAsReference & _flags)) { @@ -1937,13 +2390,18 @@ IOMemoryDescriptor::readBytes (IOByteCount offset, void *bytes, IOByteCount length) { addr64_t dstAddr = CAST_DOWN(addr64_t, bytes); + IOByteCount endoffset; IOByteCount remaining; - // Assert that this entire I/O is withing the available range - assert(offset <= _length); - assert(offset + length <= _length); - if ((offset >= _length) - || ((offset + length) > _length)) { + + // Check that this entire I/O is within the available range + if ((offset > _length) + || os_add_overflow(length, offset, &endoffset) + || (endoffset > _length)) { + assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length); + return 0; + } + if (offset >= _length) { return 0; } @@ -1971,7 +2429,10 @@ IOMemoryDescriptor::readBytes srcLen = remaining; } - copypv(srcAddr64, dstAddr, srcLen, + if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) { + srcLen = (UINT_MAX - PAGE_SIZE + 1); + } + copypv(srcAddr64, dstAddr, (unsigned int) srcLen, cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); dstAddr += srcLen; @@ -1994,17 +2455,22 @@ IOMemoryDescriptor::writeBytes { addr64_t srcAddr = CAST_DOWN(addr64_t, bytes); IOByteCount remaining; + IOByteCount endoffset; IOByteCount offset = inoffset; - // Assert that this entire I/O is withing the available range - assert(offset <= _length); - assert(offset + length <= _length); - assert( !(kIOMemoryPreparedReadOnly & _flags)); - if ((kIOMemoryPreparedReadOnly & _flags) - || (offset >= _length) - || ((offset + length) > _length)) { + // Check that this entire I/O is within the available range + if ((offset > _length) + || os_add_overflow(length, offset, &endoffset) + || (endoffset > _length)) { + assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length); + return 0; + } + if (kIOMemoryPreparedReadOnly & _flags) { + return 0; + } + if (offset >= _length) { return 0; } @@ -2032,10 +2498,13 @@ IOMemoryDescriptor::writeBytes dstLen = remaining; } + if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) { + dstLen = (UINT_MAX - PAGE_SIZE + 1); + } if (!srcAddr) { - bzero_phys(dstAddr64, dstLen); + bzero_phys(dstAddr64, (unsigned int) dstLen); } else { - copypv(srcAddr, (addr64_t) dstAddr64, dstLen, + copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen, cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); srcAddr += dstLen; } @@ -2069,6 +2538,7 @@ IOGeneralMemoryDescriptor::setPosition(IOByteCount position) #endif /* !__LP64__ */ static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32); +static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL); uint64_t IOGeneralMemoryDescriptor::getPreparationID( void ) @@ -2133,6 +2603,189 @@ IOMemoryDescriptor::getPreparationID( void ) } } +void +IOMemoryDescriptor::setDescriptorID( void ) +{ + if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) { + SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID); + OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID); + } +} + +uint64_t +IOMemoryDescriptor::getDescriptorID( void ) +{ + setDescriptorID(); + + if (reserved) { + return reserved->descriptorID; + } else { + return kIODescriptorIDInvalid; + } +} + +IOReturn +IOMemoryDescriptor::ktraceEmitPhysicalSegments( void ) +{ + if (!kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) { + return kIOReturnSuccess; + } + + assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared); + if (getPreparationID() < kIOPreparationIDAlwaysPrepared) { + return kIOReturnBadArgument; + } + + uint64_t descriptorID = getDescriptorID(); + assert(descriptorID != kIODescriptorIDInvalid); + if (getDescriptorID() == kIODescriptorIDInvalid) { + return kIOReturnBadArgument; + } + + IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength()); + +#if __LP64__ + static const uint8_t num_segments_page = 8; +#else + static const uint8_t num_segments_page = 4; +#endif + static const uint8_t num_segments_long = 2; + + IOPhysicalAddress segments_page[num_segments_page]; + IOPhysicalRange segments_long[num_segments_long]; + memset(segments_page, UINT32_MAX, sizeof(segments_page)); + memset(segments_long, 0, sizeof(segments_long)); + + uint8_t segment_page_idx = 0; + uint8_t segment_long_idx = 0; + + IOPhysicalRange physical_segment; + for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) { + physical_segment.address = getPhysicalSegment(offset, &physical_segment.length); + + if (physical_segment.length == 0) { + break; + } + + /** + * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace + * buffer memory, pack segment events according to the following. + * + * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous + * IOMDPA_MAPPED event emitted on by the current thread_id. + * + * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length + * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7) + * - unmapped pages will have a ppn of MAX_INT_32 + * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length + * - address_0, length_0, address_0, length_1 + * - unmapped pages will have an address of 0 + * + * During each iteration do the following depending on the length of the mapping: + * 1. add the current segment to the appropriate queue of pending segments + * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass + * 1a. if FALSE emit and reset all events in the previous queue + * 2. check if we have filled up the current queue of pending events + * 2a. if TRUE emit and reset all events in the pending queue + * 3. after completing all iterations emit events in the current queue + */ + + bool emit_page = false; + bool emit_long = false; + if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) { + segments_page[segment_page_idx] = physical_segment.address; + segment_page_idx++; + + emit_long = segment_long_idx != 0; + emit_page = segment_page_idx == num_segments_page; + + if (os_unlikely(emit_long)) { + IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG), + segments_long[0].address, segments_long[0].length, + segments_long[1].address, segments_long[1].length); + } + + if (os_unlikely(emit_page)) { +#if __LP64__ + IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE), + ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]), + ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]), + ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]), + ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7])); +#else + IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE), + (ppnum_t) atop_32(segments_page[1]), + (ppnum_t) atop_32(segments_page[2]), + (ppnum_t) atop_32(segments_page[3]), + (ppnum_t) atop_32(segments_page[4])); +#endif + } + } else { + segments_long[segment_long_idx] = physical_segment; + segment_long_idx++; + + emit_page = segment_page_idx != 0; + emit_long = segment_long_idx == num_segments_long; + + if (os_unlikely(emit_page)) { +#if __LP64__ + IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE), + ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]), + ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]), + ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]), + ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7])); +#else + IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE), + (ppnum_t) atop_32(segments_page[1]), + (ppnum_t) atop_32(segments_page[2]), + (ppnum_t) atop_32(segments_page[3]), + (ppnum_t) atop_32(segments_page[4])); +#endif + } + + if (emit_long) { + IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG), + segments_long[0].address, segments_long[0].length, + segments_long[1].address, segments_long[1].length); + } + } + + if (os_unlikely(emit_page)) { + memset(segments_page, UINT32_MAX, sizeof(segments_page)); + segment_page_idx = 0; + } + + if (os_unlikely(emit_long)) { + memset(segments_long, 0, sizeof(segments_long)); + segment_long_idx = 0; + } + } + + if (segment_page_idx != 0) { + assert(segment_long_idx == 0); +#if __LP64__ + IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE), + ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]), + ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]), + ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]), + ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7])); +#else + IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE), + (ppnum_t) atop_32(segments_page[1]), + (ppnum_t) atop_32(segments_page[2]), + (ppnum_t) atop_32(segments_page[3]), + (ppnum_t) atop_32(segments_page[4])); +#endif + } else if (segment_long_idx != 0) { + assert(segment_page_idx == 0); + IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG), + segments_long[0].address, segments_long[0].length, + segments_long[1].address, segments_long[1].length); + } + + return kIOReturnSuccess; +} + void IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag) { @@ -2202,8 +2855,7 @@ IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UI remap |= (dataP->fDMAMapAlignment > page_size); if (remap || !dataP->fMappedBaseValid) { -// if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params); - err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); + err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) { dataP->fMappedBase = data->fAlloc; dataP->fMappedBaseValid = true; @@ -2215,7 +2867,6 @@ IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UI data->fAllocLength = 0; // give out IOMD map md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength); } - data->fMapContig = !dataP->fDiscontig; if ((data->fMapper == gIOSystemMapper) && _prepareLock) { IOLockUnlock(_prepareLock); @@ -2316,7 +2967,7 @@ IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UI } isP = (InternalState *) vData; - mach_vm_size_t offset = isP->fIO.fOffset; + uint64_t offset = isP->fIO.fOffset; uint8_t mapped = isP->fIO.fMapped; uint64_t mappedBase; @@ -2339,7 +2990,7 @@ IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UI bzero(&mapSpec, sizeof(mapSpec)); mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; mapSpec.alignment = dataP->fDMAMapAlignment; - err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength); + err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength); if (kIOReturnSuccess != err) { return err; } @@ -2347,9 +2998,7 @@ IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UI } } - if (kIOMDDMAWalkMappedLocal == mapped) { - mappedBase = isP->fIO.fMappedBase; - } else if (mapped) { + if (mapped) { if (IOMapper::gSystem && (!(kIOMemoryHostOnly & _flags)) && _memoryEntries @@ -2792,14 +3441,24 @@ IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt data IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData; IOByteCount offset = (IOByteCount) data->fOffset; + IOPhysicalLength length, nextLength; + addr64_t addr, nextAddr; - IOPhysicalLength length; - if (data->fMapped && IOMapper::gSystem) { - data->fIOVMAddr = md->getPhysicalSegment(offset, &length); - } else { - data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone); + if (data->fMapped) { + panic("fMapped %p %s %qx\n", this, getMetaClass()->getClassName(), (uint64_t) getLength()); } - data->fLength = length; + addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone); + offset += length; + while (offset < getLength()) { + nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone); + if ((addr + length) != nextAddr) { + break; + } + length += nextLength; + offset += nextLength; + } + data->fIOVMAddr = addr; + data->fLength = length; } else if (kIOMDAddDMAMapSpec == op) { return kIOReturnUnsupported; } else if (kIOMDDMAMap == op) { @@ -2808,12 +3467,7 @@ IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt data } IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; - if (params) { - panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName()); - } - - data->fMapContig = true; - err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); + err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); return err; } else if (kIOMDDMAUnmap == op) { @@ -2977,6 +3631,40 @@ IOMemoryDescriptor::setOwnership( task_t newOwner, return err; } + +uint64_t +IOMemoryDescriptor::getDMAMapLength(uint64_t * offset) +{ + uint64_t length; + + if (_memRef) { + length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset); + } else { + IOByteCount iterate, segLen; + IOPhysicalAddress sourceAddr, sourceAlign; + + if (kIOMemoryThreadSafe & _flags) { + LOCK; + } + length = 0; + iterate = 0; + while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) { + sourceAlign = (sourceAddr & page_mask); + if (offset && !iterate) { + *offset = sourceAlign; + } + length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr); + iterate += segLen; + } + if (kIOMemoryThreadSafe & _flags) { + UNLOCK; + } + } + + return length; +} + + IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount, IOByteCount * dirtyPageCount ) @@ -3023,8 +3711,8 @@ SetEncryptOp(addr64_t pa, unsigned int count) { ppnum_t page, end; - page = atop_64(round_page_64(pa)); - end = atop_64(trunc_page_64(pa + count)); + page = (ppnum_t) atop_64(round_page_64(pa)); + end = (ppnum_t) atop_64(trunc_page_64(pa + count)); for (; page < end; page++) { pmap_clear_noencrypt(page); } @@ -3035,8 +3723,8 @@ ClearEncryptOp(addr64_t pa, unsigned int count) { ppnum_t page, end; - page = atop_64(round_page_64(pa)); - end = atop_64(trunc_page_64(pa + count)); + page = (ppnum_t) atop_64(round_page_64(pa)); + end = (ppnum_t) atop_64(trunc_page_64(pa + count)); for (; page < end; page++) { pmap_set_noencrypt(page); } @@ -3124,20 +3812,26 @@ IOMemoryDescriptor::performOperation( IOOptionBits options, if (dstLen > remaining) { dstLen = remaining; } + if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) { + dstLen = (UINT_MAX - PAGE_SIZE + 1); + } + if (remaining > UINT_MAX) { + remaining = UINT_MAX; + } #if defined(__arm__) || defined(__arm64__) if (func) { - (*func)(dstAddr64, dstLen); + (*func)(dstAddr64, (unsigned int) dstLen); } if (func_ext) { - (*func_ext)(dstAddr64, dstLen, remaining, &res); + (*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res); if (res != 0x0UL) { remaining = 0; break; } } #else /* defined(__arm__) || defined(__arm64__) */ - (*func)(dstAddr64, dstLen); + (*func)(dstAddr64, (unsigned int) dstLen); #endif /* defined(__arm__) || defined(__arm64__) */ offset += dstLen; @@ -3157,8 +3851,14 @@ IOMemoryDescriptor::performOperation( IOOptionBits options, #if defined(__i386__) || defined(__x86_64__) +extern vm_offset_t kc_highest_nonlinkedit_vmaddr; + +/* XXX: By extending io_kernel_static_end to the highest virtual address in the KC, + * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to + * kernel non-text data -- should we just add another range instead? + */ #define io_kernel_static_start vm_kernel_stext -#define io_kernel_static_end vm_kernel_etext +#define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext) #elif defined(__arm__) || defined(__arm64__) @@ -3181,6 +3881,7 @@ io_get_kernel_static_upl( vm_map_t /* map */, uintptr_t offset, upl_size_t *upl_size, + unsigned int *page_offset, upl_t *upl, upl_page_info_array_t page_list, unsigned int *count, @@ -3190,12 +3891,14 @@ io_get_kernel_static_upl( ppnum_t phys; ppnum_t highestPage = 0; - pageCount = atop_32(*upl_size); + pageCount = atop_32(round_page(*upl_size + (page_mask & offset))); if (pageCount > *count) { pageCount = *count; } + *upl_size = (upl_size_t) ptoa_64(pageCount); *upl = NULL; + *page_offset = ((unsigned int) page_mask & offset); for (page = 0; page < pageCount; page++) { phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page)); @@ -3227,6 +3930,7 @@ IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) upl_page_info_array_t pageInfo; ppnum_t mapBase; vm_tag_t tag = VM_KERN_MEMORY_NONE; + mach_vm_size_t numBytesWired = 0; assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type); @@ -3260,6 +3964,7 @@ IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) error = kIOReturnNotWritable; } } else { + IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection); IOMapper *mapper; mapper = dataP->fMapper; @@ -3295,10 +4000,14 @@ IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) // and the length parameter is an unsigned int size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t); if (uplPageSize > ((unsigned int)uplPageSize)) { - return kIOReturnNoMemory; + error = kIOReturnNoMemory; + traceInterval.setEndArg2(error); + return error; } - if (!_memoryEntries->appendBytes(NULL, uplPageSize)) { - return kIOReturnNoMemory; + if (!_memoryEntries->appendBytes(NULL, (unsigned int) uplPageSize)) { + error = kIOReturnNoMemory; + traceInterval.setEndArg2(error); + return error; } dataP = NULL; @@ -3315,24 +4024,46 @@ IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) unsigned int pageIndex = 0; IOByteCount mdOffset = 0; ppnum_t highestPage = 0; + bool byteAlignUPL; IOMemoryEntry * memRefEntry = NULL; if (_memRef) { memRefEntry = &_memRef->entries[0]; + byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot)); + } else { + byteAlignUPL = true; } - for (UInt range = 0; range < _rangesCount; range++) { + for (UInt range = 0; mdOffset < _length; range++) { ioPLBlock iopl; mach_vm_address_t startPage, startPageOffset; mach_vm_size_t numBytes; ppnum_t highPage = 0; - // Get the startPage address and length of vec[range] - getAddrLenForInd(startPage, numBytes, type, vec, range); - startPageOffset = startPage & PAGE_MASK; - iopl.fPageOffset = startPageOffset; + if (_memRef) { + if (range >= _memRef->count) { + panic("memRefEntry"); + } + memRefEntry = &_memRef->entries[range]; + numBytes = memRefEntry->size; + startPage = -1ULL; + if (byteAlignUPL) { + startPageOffset = 0; + } else { + startPageOffset = (memRefEntry->start & PAGE_MASK); + } + } else { + // Get the startPage address and length of vec[range] + getAddrLenForInd(startPage, numBytes, type, vec, range); + if (byteAlignUPL) { + startPageOffset = 0; + } else { + startPageOffset = startPage & PAGE_MASK; + startPage = trunc_page_64(startPage); + } + } + iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset; numBytes += startPageOffset; - startPage = trunc_page_64(startPage); if (mapper) { iopl.fMappedPage = mapBase + pageIndex; @@ -3359,34 +4090,49 @@ IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) pageInfo = getPageList(dataP); upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; - mach_vm_size_t _ioplSize = round_page(numBytes); - upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES; - unsigned int numPageInfo = atop_32(ioplSize); + mach_vm_size_t ioplPhysSize; + upl_size_t ioplSize; + unsigned int numPageInfo; - if ((theMap == kernel_map) - && (kernelStart >= io_kernel_static_start) - && (kernelStart < io_kernel_static_end)) { - error = io_get_kernel_static_upl(theMap, - kernelStart, - &ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &highPage); - } else if (_memRef) { + if (_memRef) { + error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize); + DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize); + } else { + error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize); + DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize); + } + if (error != KERN_SUCCESS) { + if (_memRef) { + DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error); + } else { + DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error); + } + printf("entry size error %d\n", error); + goto abortExit; + } + ioplPhysSize = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES; + numPageInfo = atop_32(ioplPhysSize); + if (byteAlignUPL) { + if (numBytes > ioplPhysSize) { + ioplSize = ((typeof(ioplSize))ioplPhysSize); + } else { + ioplSize = ((typeof(ioplSize))numBytes); + } + } else { + ioplSize = ((typeof(ioplSize))ioplPhysSize); + } + + if (_memRef) { memory_object_offset_t entryOffset; entryOffset = mdOffset; - entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset); - if (entryOffset >= memRefEntry->size) { - memRefEntry++; - if (memRefEntry >= &_memRef->entries[_memRef->count]) { - panic("memRefEntry"); - } - entryOffset = 0; + if (byteAlignUPL) { + entryOffset = (entryOffset - memRefEntry->offset); + } else { + entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset); } if (ioplSize > (memRefEntry->size - entryOffset)) { - ioplSize = (memRefEntry->size - entryOffset); + ioplSize = ((typeof(ioplSize))(memRefEntry->size - entryOffset)); } error = memory_object_iopl_request(memRefEntry->entry, entryOffset, @@ -3396,6 +4142,17 @@ IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) &numPageInfo, &ioplFlags, tag); + } else if ((theMap == kernel_map) + && (kernelStart >= io_kernel_static_start) + && (kernelStart < io_kernel_static_end)) { + error = io_get_kernel_static_upl(theMap, + kernelStart, + &ioplSize, + &iopl.fPageOffset, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &highPage); } else { assert(theMap); error = vm_map_create_upl(theMap, @@ -3409,6 +4166,8 @@ IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) } if (error != KERN_SUCCESS) { + traceInterval.setEndArg2(error); + DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize); goto abortExit; } @@ -3428,11 +4187,21 @@ IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) iopl.fFlags = 0; } + if (byteAlignUPL) { + if (iopl.fIOPL) { + DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL)); + iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL); + } + if (startPage != (mach_vm_address_t)-1) { + // assert(iopl.fPageOffset == (startPage & PAGE_MASK)); + startPage -= iopl.fPageOffset; + } + ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo)); + numBytes += iopl.fPageOffset; + } + iopl.fIOMDOffset = mdOffset; iopl.fPageInfo = pageIndex; - if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) { - dataP->fDiscontig = true; - } if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { // Clean up partial created and unsaved iopl @@ -3441,6 +4210,7 @@ IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) upl_deallocate(iopl.fIOPL); } error = kIOReturnNoMemory; + traceInterval.setEndArg2(error); goto abortExit; } dataP = NULL; @@ -3448,9 +4218,12 @@ IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) // Check for a multiple iopl's in one virtual range pageIndex += numPageInfo; mdOffset -= iopl.fPageOffset; + numBytesWired += ioplSize; if (ioplSize < numBytes) { numBytes -= ioplSize; - startPage += ioplSize; + if (startPage != (mach_vm_address_t)-1) { + startPage += ioplSize; + } mdOffset += ioplSize; iopl.fPageOffset = 0; if (mapper) { @@ -3464,10 +4237,12 @@ IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) } _highestPage = highestPage; + DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage); if (UPL_COPYOUT_FROM & uplFlags) { _flags |= kIOMemoryPreparedReadOnly; } + traceInterval.setEndCodes(numBytesWired, error); } #if IOTRACKING @@ -3487,10 +4262,10 @@ abortExit: UInt done = getNumIOPL(_memoryEntries, dataP); ioPLBlock *ioplList = getIOPLList(dataP); - for (UInt range = 0; range < done; range++) { - if (ioplList[range].fIOPL) { - upl_abort(ioplList[range].fIOPL, 0); - upl_deallocate(ioplList[range].fIOPL); + for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) { + if (ioplList[ioplIdx].fIOPL) { + upl_abort(ioplList[ioplIdx].fIOPL, 0); + upl_deallocate(ioplList[ioplIdx].fIOPL); } } (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() @@ -3509,8 +4284,12 @@ bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper) { ioGMDData * dataP; - unsigned dataSize = size; + unsigned dataSize; + if (size > UINT_MAX) { + return false; + } + dataSize = (unsigned int) size; if (!_memoryEntries) { _memoryEntries = OSData::withCapacity(dataSize); if (!_memoryEntries) { @@ -3533,7 +4312,6 @@ IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper) dataP->fDMAMapNumAddressBits = 64; dataP->fDMAMapAlignment = 0; dataP->fPreparationID = kIOPreparationIDUnprepared; - dataP->fDiscontig = false; dataP->fCompletionError = false; dataP->fMappedBaseValid = false; @@ -3543,6 +4321,7 @@ IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper) IOReturn IOMemoryDescriptor::dmaMap( IOMapper * mapper, + IOMemoryDescriptor * memory, IODMACommand * command, const IODMAMapSpecification * mapSpec, uint64_t offset, @@ -3559,7 +4338,7 @@ IOMemoryDescriptor::dmaMap( mapOptions |= kIODMAMapWriteAccess; } - err = mapper->iovmMapMemory(this, offset, length, mapOptions, + err = mapper->iovmMapMemory(memory, offset, length, mapOptions, mapSpec, command, NULL, mapAddress, mapLength); if (kIOReturnSuccess == err) { @@ -3575,6 +4354,7 @@ IOMemoryDescriptor::dmaMapRecord( IODMACommand * command, uint64_t mapLength) { + IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this)); kern_allocation_name_t alloc; int16_t prior; @@ -3605,6 +4385,7 @@ IOMemoryDescriptor::dmaUnmap( uint64_t mapAddress, uint64_t mapLength) { + IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this)); IOReturn ret; kern_allocation_name_t alloc; kern_allocation_name_t mapName; @@ -3622,6 +4403,7 @@ IOMemoryDescriptor::dmaUnmap( } if (!mapLength) { + traceInterval.setEndArg1(kIOReturnSuccess); return kIOReturnSuccess; } @@ -3635,12 +4417,14 @@ IOMemoryDescriptor::dmaUnmap( } } + traceInterval.setEndArg1(ret); return ret; } IOReturn IOGeneralMemoryDescriptor::dmaMap( IOMapper * mapper, + IOMemoryDescriptor * memory, IODMACommand * command, const IODMAMapSpecification * mapSpec, uint64_t offset, @@ -3662,7 +4446,7 @@ IOGeneralMemoryDescriptor::dmaMap( if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64) || offset || (length != _length)) { - err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength); + err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength); } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) { const ioPLBlock * ioplList = getIOPLList(dataP); upl_page_info_t * pageList; @@ -3700,7 +4484,7 @@ IOGeneralMemoryDescriptor::dmaMap( .pageListCount = _pages, .pageList = &pageList[0] }; - err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec, + err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec, command, &dmaPageList, mapAddress, mapLength); if (kIOReturnSuccess == err) { @@ -3726,13 +4510,16 @@ IOGeneralMemoryDescriptor::prepare(IODirection forDirection) { IOReturn error = kIOReturnSuccess; IOOptionBits type = _flags & kIOMemoryTypeMask; + IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection); if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { + traceInterval.setEndArg1(kIOReturnSuccess); return kIOReturnSuccess; } assert(!(kIOMemoryRemote & _flags)); if (kIOMemoryRemote & _flags) { + traceInterval.setEndArg1(kIOReturnNotAttached); return kIOReturnNotAttached; } @@ -3753,6 +4540,8 @@ IOGeneralMemoryDescriptor::prepare(IODirection forDirection) if (kIOMemoryClearEncrypt & _flags) { performOperation(kIOMemoryClearEncrypted, 0, _length); } + + ktraceEmitPhysicalSegments(); } } @@ -3761,6 +4550,7 @@ finish: if (_prepareLock) { IOLockUnlock(_prepareLock); } + traceInterval.setEndArg1(error); return error; } @@ -3779,13 +4569,16 @@ IOGeneralMemoryDescriptor::complete(IODirection forDirection) { IOOptionBits type = _flags & kIOMemoryTypeMask; ioGMDData * dataP; + IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection); if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { + traceInterval.setEndArg1(kIOReturnSuccess); return kIOReturnSuccess; } assert(!(kIOMemoryRemote & _flags)); if (kIOMemoryRemote & _flags) { + traceInterval.setEndArg1(kIOReturnNotAttached); return kIOReturnNotAttached; } @@ -3819,7 +4612,7 @@ IOGeneralMemoryDescriptor::complete(IODirection forDirection) // kIODirectionCompleteWithDataValid & forDirection if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { vm_tag_t tag; - tag = getVMTag(kernel_map); + tag = (typeof(tag))getVMTag(kernel_map); for (ind = 0; ind < count; ind++) { if (ioplList[ind].fIOPL) { iopl_valid_data(ioplList[ind].fIOPL, tag); @@ -3860,6 +4653,10 @@ IOGeneralMemoryDescriptor::complete(IODirection forDirection) dataP->fPreparationID = kIOPreparationIDUnprepared; _flags &= ~kIOMemoryPreparedReadOnly; + + if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) { + IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this)); + } } } }while (false); @@ -3868,6 +4665,7 @@ IOGeneralMemoryDescriptor::complete(IODirection forDirection) IOLockUnlock(_prepareLock); } + traceInterval.setEndArg1(kIOReturnSuccess); return kIOReturnSuccess; } @@ -3879,6 +4677,8 @@ IOGeneralMemoryDescriptor::doMap( IOByteCount __offset, IOByteCount __length ) { + IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length); + traceInterval.setEndArg1(kIOReturnSuccess); #ifndef __LP64__ if (!(kIOMap64Bit & options)) { panic("IOGeneralMemoryDescriptor::doMap !64bit"); @@ -3898,6 +4698,9 @@ IOGeneralMemoryDescriptor::doMap( mach_vm_size_t range0Len = 0; if ((offset >= _length) || ((offset + length) > _length)) { + traceInterval.setEndArg1(kIOReturnBadArgument); + DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length); + // assert(offset == 0 && _length == 0 && length == 0); return kIOReturnBadArgument; } @@ -3939,6 +4742,8 @@ IOGeneralMemoryDescriptor::doMap( } err = memoryReferenceCreate(createOptions, &_memRef); if (kIOReturnSuccess != err) { + traceInterval.setEndArg1(err); + DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err); return err; } } @@ -3956,16 +4761,17 @@ IOGeneralMemoryDescriptor::doMap( if (!_memRef || (1 != _memRef->count)) { err = kIOReturnNotReadable; + DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err); break; } - size = round_page(mapping->fLength); + size = (upl_size_t) round_page(mapping->fLength); flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2, NULL, NULL, - &flags, getVMTag(kernel_map))) { + &flags, (vm_tag_t) getVMTag(kernel_map))) { redirUPL2 = NULL; } @@ -4005,6 +4811,9 @@ IOGeneralMemoryDescriptor::doMap( // upl_transpose> // else { err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress); + if (err) { + DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err); + } #if IOTRACKING if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) { // only dram maps in the default on developement case @@ -4022,6 +4831,10 @@ IOGeneralMemoryDescriptor::doMap( } } + traceInterval.setEndArg1(err); + if (err) { + DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err); + } return err; } @@ -4052,7 +4865,11 @@ IOGeneralMemoryDescriptor::doUnmap( IOVirtualAddress __address, IOByteCount __length ) { - return super::doUnmap(addressMap, __address, __length); + IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length); + IOReturn ret; + ret = super::doUnmap(addressMap, __address, __length); + traceInterval.setEndArg1(ret); + return ret; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -4060,7 +4877,7 @@ IOGeneralMemoryDescriptor::doUnmap( #undef super #define super OSObject -OSDefineMetaClassAndStructors( IOMemoryMap, OSObject ) +OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE ) OSMetaClassDefineReservedUnused(IOMemoryMap, 0); OSMetaClassDefineReservedUnused(IOMemoryMap, 1); @@ -4125,14 +4942,14 @@ IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _o fOffset = _offset; } - _memory->retain(); + + OSSharedPtr tempval(_memory, OSRetain); if (fMemory) { if (fMemory != _memory) { fMemory->removeMapping(this); } - fMemory->release(); } - fMemory = _memory; + fMemory = os::move(tempval); return true; } @@ -4219,7 +5036,7 @@ IOMemoryDescriptor::populateDevicePager( #if DEBUG || DEVELOPMENT if ((kIOMemoryTypeUPL != type) - && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) { + && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) { OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen); } #endif /* DEBUG || DEVELOPMENT */ @@ -4300,6 +5117,9 @@ IOMemoryDescriptor::doUnmap( } #endif err = mach_vm_deallocate( addressMap, address, length ); + if (vm_map_page_mask(addressMap) < PAGE_MASK) { + DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err); + } } #if IOTRACKING @@ -4314,7 +5134,7 @@ IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) { IOReturn err = kIOReturnSuccess; IOMemoryMap * mapping = NULL; - OSIterator * iter; + OSSharedPtr iter; LOCK; @@ -4325,7 +5145,7 @@ IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) } do { - if ((iter = OSCollectionIterator::withCollection( _mappings))) { + if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) { memory_object_t pager; if (reserved) { @@ -4341,7 +5161,7 @@ IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) } } - iter->release(); + iter.reset(); } } while (false); @@ -4484,7 +5304,7 @@ IOMemoryMap::free() LOCK; fMemory->removeMapping(this); UNLOCK; - fMemory->release(); + fMemory.reset(); } if (fOwner && (fOwner != fMemory)) { @@ -4494,7 +5314,7 @@ IOMemoryMap::free() } if (fSuperMap) { - fSuperMap->release(); + fSuperMap.reset(); } if (fRedirUPL) { @@ -4561,7 +5381,7 @@ IOMemoryMap::getMapOptions() IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor() { - return fMemory; + return fMemory.get(); } IOMemoryMap * @@ -4599,11 +5419,11 @@ IOMemoryMap::copyCompatible( return NULL; } - retain(); if ((fLength == _length) && (!_offset)) { + retain(); newMapping = this; } else { - newMapping->fSuperMap = this; + newMapping->fSuperMap.reset(this, OSRetain); newMapping->fOffset = fOffset + _offset; newMapping->fAddress = fAddress + _offset; } @@ -4624,7 +5444,7 @@ IOMemoryMap::wireRange( prot = (kIODirectionOutIn & options); if (prot) { - kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE); + kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE); } else { kr = vm_map_unwire(fAddressMap, start, end, FALSE); } @@ -4674,7 +5494,7 @@ void IOMemoryDescriptor::free( void ) { if (_mappings) { - _mappings->release(); + _mappings.reset(); } if (reserved) { @@ -4685,7 +5505,7 @@ IOMemoryDescriptor::free( void ) super::free(); } -IOMemoryMap * +OSSharedPtr IOMemoryDescriptor::setMapping( task_t intoTask, IOVirtualAddress mapAddress, @@ -4696,7 +5516,7 @@ IOMemoryDescriptor::setMapping( 0, getLength()); } -IOMemoryMap * +OSSharedPtr IOMemoryDescriptor::map( IOOptionBits options ) { @@ -4706,7 +5526,7 @@ IOMemoryDescriptor::map( } #ifndef __LP64__ -IOMemoryMap * +OSSharedPtr IOMemoryDescriptor::map( task_t intoTask, IOVirtualAddress atAddress, @@ -4724,7 +5544,7 @@ IOMemoryDescriptor::map( } #endif /* !__LP64__ */ -IOMemoryMap * +OSSharedPtr IOMemoryDescriptor::createMappingInTask( task_t intoTask, mach_vm_address_t atAddress, @@ -4751,7 +5571,7 @@ IOMemoryDescriptor::createMappingInTask( if (mapping) { result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0); } else { - result = NULL; + result = nullptr; } #if DEBUG @@ -4761,7 +5581,10 @@ IOMemoryDescriptor::createMappingInTask( } #endif - return result; + // already retained through makeMapping + OSSharedPtr retval(result, OSNoRetain); + + return retval; } #ifndef __LP64__ // there is only a 64 bit version for LP64 @@ -4780,7 +5603,7 @@ IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, mach_vm_size_t offset) { IOReturn err = kIOReturnSuccess; - IOMemoryDescriptor * physMem = NULL; + OSSharedPtr physMem; LOCK; @@ -4789,16 +5612,15 @@ IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) { physMem = fMemory; - physMem->retain(); } if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) { - upl_size_t size = round_page(fLength); + upl_size_t size = (typeof(size))round_page(fLength); upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL, NULL, NULL, - &flags, fMemory->getVMTag(kernel_map))) { + &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) { fRedirUPL = NULL; } @@ -4833,10 +5655,6 @@ IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, UNLOCK; - if (physMem) { - physMem->release(); - } - return err; } @@ -4855,7 +5673,7 @@ IOMemoryDescriptor::makeMapping( } #endif /* !__LP64__ */ - IOMemoryDescriptor * mapDesc = NULL; + OSSharedPtr mapDesc; __block IOMemoryMap * result = NULL; IOMemoryMap * mapping = (IOMemoryMap *) __address; @@ -4919,15 +5737,14 @@ IOMemoryDescriptor::makeMapping( } if (!mapDesc) { - mapDesc = this; - mapDesc->retain(); + mapDesc.reset(this, OSRetain); } IOReturn kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 ); if (kIOReturnSuccess == kr) { result = mapping; mapDesc->addMapping(result); - result->setMemoryDescriptor(mapDesc, offset); + result->setMemoryDescriptor(mapDesc.get(), offset); } else { mapping->release(); mapping = NULL; @@ -4936,10 +5753,6 @@ IOMemoryDescriptor::makeMapping( UNLOCK; - if (mapDesc) { - mapDesc->release(); - } - return result; } @@ -5028,15 +5841,17 @@ IOMemoryDescriptor::getVirtualSegment(IOByteCount offset, bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const { - OSSymbol const *keys[2] = {NULL}; - OSObject *values[2] = {NULL}; - OSArray * array; + OSSharedPtr keys[2] = {NULL}; + OSSharedPtr values[2] = {NULL}; + OSSharedPtr array; + vm_size_t vcopy_size; struct SerData { user_addr_t address; user_size_t length; } *vcopy = NULL; + unsigned int index, nRanges; bool result = false; @@ -5097,46 +5912,29 @@ IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const result = false; goto bail; } - OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2); + OSSharedPtr dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2); if (dict == NULL) { result = false; goto bail; } - array->setObject(dict); - dict->release(); - values[0]->release(); - values[1]->release(); - values[0] = values[1] = NULL; + array->setObject(dict.get()); + dict.reset(); + values[0].reset(); + values[1].reset(); } result = array->serialize(s); bail: - if (array) { - array->release(); - } - if (values[0]) { - values[0]->release(); - } - if (values[1]) { - values[1]->release(); - } - if (keys[0]) { - keys[0]->release(); - } - if (keys[1]) { - keys[1]->release(); - } if (vcopy) { IOFree(vcopy, vcopy_size); } return result; } - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0); +OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0); #ifdef __LP64__ OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2); @@ -5146,13 +5944,13 @@ OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7); #else /* !__LP64__ */ -OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1); -OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2); -OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3); -OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4); -OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5); -OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6); -OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7); +OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1); +OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2); +OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3); +OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4); +OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5); +OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6); +OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7); #endif /* !__LP64__ */ OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9); diff --git a/iokit/Kernel/IONVRAM.cpp b/iokit/Kernel/IONVRAM.cpp index 4b0c315a9..daa6cf7cb 100644 --- a/iokit/Kernel/IONVRAM.cpp +++ b/iokit/Kernel/IONVRAM.cpp @@ -27,57 +27,675 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + +#include #include #include #include #include #include #include +#include #include #include #include - #define super IOService +#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) + +// Internal values +#define NVRAM_CHRP_SIG_APPLE 0x5A +#define NVRAM_CHRP_APPLE_HEADER_NAME "nvram" + +// From Apple CHRP Spec +#define NVRAM_CHRP_SIG_SYSTEM 0x70 +#define NVRAM_CHRP_SIG_CONFIG 0x71 +#define NVRAM_CHRP_SIG_FREESPACE 0x7F + +#define NVRAM_CHRP_PARTITION_NAME_COMMON "common" +#define NVRAM_CHRP_PARTITION_NAME_SYSTEM "system" +#define NVRAM_CHRP_PARTITION_NAME_SYSTEM_LEGACY "secure" +#define NVRAM_CHRP_PARTITION_NAME_FREESPACE "\x77\x77\x77\x77\x77\x77\x77\x77\x77\x77\x77\x77" + +#define NVRAM_CHRP_LENGTH_BLOCK_SIZE 0x10 // CHRP length field is in 16 byte blocks + +typedef struct chrp_nvram_header { //16 bytes + uint8_t sig; + uint8_t cksum; // checksum on sig, len, and name + uint16_t len; // total length of the partition in 16 byte blocks starting with the signature + // and ending with the last byte of data area, ie len includes its own header size + char name[12]; + uint8_t data[0]; +} chrp_nvram_header_t; + +typedef struct apple_nvram_header { // 16 + 16 bytes + struct chrp_nvram_header chrp; + uint32_t adler; + uint32_t generation; + uint8_t padding[8]; +} apple_nvram_header_t; + + #define kIONVRAMPrivilege kIOClientPrivilegeAdministrator -//#define kIONVRAMPrivilege kIOClientPrivilegeLocalUser OSDefineMetaClassAndStructors(IODTNVRAM, IOService); +#if defined(DEBUG) || defined(DEVELOPMENT) +#define DEBUG_INFO(fmt, args...) \ +({ \ + if (gNVRAMLogging) \ + IOLog("IONVRAM::%s:%u - " fmt, __FUNCTION__, __LINE__, ##args); \ +}) + +#define DEBUG_ALWAYS(fmt, args...) \ +({ \ + IOLog("IONVRAM::%s:%u - " fmt, __FUNCTION__, __LINE__, ##args); \ +}) +#else +#define DEBUG_INFO(fmt, args...) +#define DEBUG_ALWAYS(fmt, args...) +#endif + +#define DEBUG_ERROR DEBUG_ALWAYS + +#define NVRAMLOCK() \ +({ \ + if (preemption_enabled() && !panic_active()) \ + IOLockLock(_variableLock); \ +}) + +#define NVRAMUNLOCK() \ +({ \ + if (preemption_enabled() && !panic_active()) \ + IOLockUnlock(_variableLock); \ +}) + +#define NVRAMLOCKASSERT() \ +({ \ + if (preemption_enabled() && !panic_active()) \ + IOLockAssert(_variableLock, kIOLockAssertOwned); \ +}) + +typedef struct { + const char *name; + UInt32 offset; + UInt32 size; + OSSharedPtr &dict; + UInt8 *image; +} NVRAMRegionInfo; + +// Guid for Apple System Boot variables +// 40A0DDD2-77F8-4392-B4A3-1E7304206516 +UUID_DEFINE(gAppleSystemVariableGuid, 0x40, 0xA0, 0xDD, 0xD2, 0x77, 0xF8, 0x43, 0x92, 0xB4, 0xA3, 0x1E, 0x73, 0x04, 0x20, 0x65, 0x16); + +// Apple NVRAM Variable namespace (APPLE_VENDOR_OS_VARIABLE_GUID) +// 7C436110-AB2A-4BBB-A880-FE41995C9F82 +UUID_DEFINE(gAppleNVRAMGuid, 0x7C, 0x43, 0x61, 0x10, 0xAB, 0x2A, 0x4B, 0xBB, 0xA8, 0x80, 0xFE, 0x41, 0x99, 0x5C, 0x9F, 0x82); + +static bool gNVRAMLogging = false; + +// allowlist variables from macboot that need to be set/get from system region if present +static const char * const gNVRAMSystemList[] = { + "adbe-tunable", + "adbe-tunables", + "adfe-tunables", + "alamo-path", + "alt-boot-volume", + "ASMB", + "atc0", + "atc1", + "auto-boot", + "auto-boot-halt-stage", + "auto-boot-once", + "auto-boot-usb", + "auxkc-path", + "backlight-level", + "backlight-nits", + "base-system-path", + "boot-args", + "boot-breadcrumbs", + "boot-command", + "boot-device", + "boot-image", + "boot-partition", + "boot-path", + "boot-ramdisk", + "boot-script", + "boot-volume", + "bootdelay", + "bt1addr", + "btaddr", + "cam-use-ext-ldo", + "CLCG_override", + "com.apple.System.boot-nonce", + "com.apple.System.rtc-offset", + "com.apple.System.tz0-size", + "core-bin-offset", + "cpu-bin-offset", + "darkboot", + "DClr_override", + "dcp-auto-boot", + "debug-gg", + "debug-soc", + "debug-uarts", + "diags-path", + "disable-boot-wdt", + "display-color-space", + "display-timing", + "display-vsh-comp", + "dpcd-max-brightness", + "dtdump", + "dtdump-path", + "e75", + "emu", + "enable-auth-debug", + "enable-jop", + "enable-marconi", + "enable-upgrade-fallback", + "enforce-iuob", + "eth1addr", + "ethaddr", + "failboot-breadcrumbs", + "fixed-lcm-boost", + "force-ctrr-lock", + "force-upgrade-fail", + "fuos-path", + "hib-ui-force", + "hibhack-test-hmac", + "iboot-data", + "iboot-failure-reason", + "iboot-failure-reason-str", + "iboot-failure-volume", + "iboot1-precommitted", + "idle-off", + "is-tethered", + "kaslr-off", + "kaslr-slide", + "kis-rsm", + "knobs", + "loadaddr", + "memmapdump", + "mipi-bridge-cmd-verify", + "mipi-bridge-poll-cmd-fifo", + "no-ctrr", + "one-time-boot-command", + "osenvironment", + "ota-breadcrumbs", + "ota-outcome", + "panicmedic", + "panicmedic-threshold", + "panicmedic-timestamps", + "phleet-path", + "pinot-panel-id", + "pintoaddr", + "policy-nonce-digests", + "preserve-debuggability", + "prevent-restores", // Keep for factory + "prev-lang:kbd", + "ramrod-kickstart-aces", + "rbdaddr0", + "rbm-path", + "reconfig-behavior", + "reconfig-breakpoints", + "recovery-boot-mode", + "recovery-breadcrumbs", + "restored-host-timeout", + "root-live-fs", + "rtos-path", + "soc-bin-offset", + "StartupMute", + "StartupMuteAccessibility", + "storage-prev-assert", + "storage-prev-assert-stored", + "summit-panel-id", + "SystemAudioVolume", + "SystemAudioVolumeExtension", + "SystemAudioVolumeSaved", + "tz0-size-override", + "upgrade-fallback-boot-command", + "upgrade-retry", + "usb-enabled", + "wifi1addr", + "wifiaddr", + nullptr +}; + +typedef struct { + const char *name; + IONVRAMVariableType type; +} VariableTypeEntry; + +static const +VariableTypeEntry gVariableTypes[] = { + {"auto-boot?", kOFVariableTypeBoolean}, + {"boot-args", kOFVariableTypeString}, + {"boot-command", kOFVariableTypeString}, + {"boot-device", kOFVariableTypeString}, + {"boot-file", kOFVariableTypeString}, + {"boot-screen", kOFVariableTypeString}, + {"boot-script", kOFVariableTypeString}, + {"console-screen", kOFVariableTypeString}, + {"default-client-ip", kOFVariableTypeString}, + {"default-gateway-ip", kOFVariableTypeString}, + {"default-mac-address?", kOFVariableTypeBoolean}, + {"default-router-ip", kOFVariableTypeString}, + {"default-server-ip", kOFVariableTypeString}, + {"default-subnet-mask", kOFVariableTypeString}, + {"diag-device", kOFVariableTypeString}, + {"diag-file", kOFVariableTypeString}, + {"diag-switch?", kOFVariableTypeBoolean}, + {"fcode-debug?", kOFVariableTypeBoolean}, + {"input-device", kOFVariableTypeString}, + {"input-device-1", kOFVariableTypeString}, + {"little-endian?", kOFVariableTypeBoolean}, + {"load-base", kOFVariableTypeNumber}, + {"mouse-device", kOFVariableTypeString}, + {"nvramrc", kOFVariableTypeString}, + {"oem-banner", kOFVariableTypeString}, + {"oem-banner?", kOFVariableTypeBoolean}, + {"oem-logo", kOFVariableTypeString}, + {"oem-logo?", kOFVariableTypeBoolean}, + {"output-device", kOFVariableTypeString}, + {"output-device-1", kOFVariableTypeString}, + {"pci-probe-list", kOFVariableTypeNumber}, + {"pci-probe-mask", kOFVariableTypeNumber}, + {"real-base", kOFVariableTypeNumber}, + {"real-mode?", kOFVariableTypeBoolean}, + {"real-size", kOFVariableTypeNumber}, + {"screen-#columns", kOFVariableTypeNumber}, + {"screen-#rows", kOFVariableTypeNumber}, + {"security-mode", kOFVariableTypeString}, + {"selftest-#megs", kOFVariableTypeNumber}, + {"use-generic?", kOFVariableTypeBoolean}, + {"use-nvramrc?", kOFVariableTypeBoolean}, + {"virt-base", kOFVariableTypeNumber}, + {"virt-size", kOFVariableTypeNumber}, + +#if !defined(__x86_64__) + {"acc-cm-override-charger-count", kOFVariableTypeNumber}, + {"acc-cm-override-count", kOFVariableTypeNumber}, + {"acc-mb-ld-lifetime", kOFVariableTypeNumber}, + {"com.apple.System.boot-nonce", kOFVariableTypeString}, + {"darkboot", kOFVariableTypeBoolean}, + {"enter-tdm-mode", kOFVariableTypeBoolean}, +#endif /* !defined(__x86_64__) */ + {nullptr, kOFVariableTypeData} // Default type to return +}; + +union VariablePermission { + struct { + uint64_t UserWrite :1; + uint64_t RootRequired :1; + uint64_t KernelOnly :1; + uint64_t ResetNVRAMOnlyDelete :1; + uint64_t NeverAllowedToDelete :1; + uint64_t FullAccess :1; + uint64_t Reserved:58; + } Bits; + uint64_t Uint64; +}; + +typedef struct { + const char *name; + VariablePermission p; +} VariablePermissionEntry; + +static const +VariablePermissionEntry gVariablePermissions[] = { + {"aapl,pci", .p.Bits.RootRequired = 1}, + {"battery-health", .p.Bits.RootRequired = 1, + .p.Bits.NeverAllowedToDelete = 1}, + {"boot-image", .p.Bits.UserWrite = 1}, + {"com.apple.System.fp-state", .p.Bits.KernelOnly = 1}, + {"policy-nonce-digests", .p.Bits.ResetNVRAMOnlyDelete = 1}, + {"security-password", .p.Bits.RootRequired = 1}, + +#if !defined(__x86_64__) + {"acc-cm-override-charger-count", .p.Bits.KernelOnly = 1}, + {"acc-cm-override-count", .p.Bits.KernelOnly = 1}, + {"acc-mb-ld-lifetime", .p.Bits.KernelOnly = 1}, + {"backlight-level", .p.Bits.UserWrite = 1}, + {"com.apple.System.boot-nonce", .p.Bits.KernelOnly = 1}, + {"com.apple.System.sep.art", .p.Bits.KernelOnly = 1}, + {"darkboot", .p.Bits.UserWrite = 1}, + {"nonce-seeds", .p.Bits.KernelOnly = 1}, +#endif /* !defined(__x86_64__) */ + + {nullptr, {.Bits.FullAccess = 1}} // Default access +}; + +static IONVRAMVariableType +getVariableType(const char *propName) +{ + const VariableTypeEntry *entry; + + entry = gVariableTypes; + while (entry->name != nullptr) { + if (strcmp(entry->name, propName) == 0) { + break; + } + entry++; + } + + return entry->type; +} + +static IONVRAMVariableType +getVariableType(const OSSymbol *propSymbol) +{ + return getVariableType(propSymbol->getCStringNoCopy()); +} + +static VariablePermission +getVariablePermission(const char *propName) +{ + const VariablePermissionEntry *entry; + + entry = gVariablePermissions; + while (entry->name != nullptr) { + if (strcmp(entry->name, propName) == 0) { + break; + } + entry++; + } + + return entry->p; +} + +static bool +variableInAllowList(const char *varName) +{ + unsigned int i = 0; + + while (gNVRAMSystemList[i] != nullptr) { + if (strcmp(varName, gNVRAMSystemList[i]) == 0) { + return true; + } + i++; + } + + return false; +} + +static bool +verifyWriteSizeLimit(const uuid_t *varGuid, const char *variableName, size_t propDataSize) +{ + if (variableInAllowList(variableName)) { + if (strnstr(variableName, "breadcrumbs", strlen(variableName)) != NULL) { + return propDataSize <= 1024; + } else { + return propDataSize <= 768; + } + } + + return true; +} + +static bool +verifyPermission(IONVRAMOperation op, const uuid_t *varGuid, const char *varName) +{ + VariablePermission perm; + bool kernel, admin, writeEntitled, readEntitled, allowList, systemGuid, systemEntitled; + + perm = getVariablePermission(varName); + + kernel = current_task() == kernel_task; + + if (perm.Bits.KernelOnly) { + DEBUG_INFO("KernelOnly access for %s, kernel=%d\n", varName, kernel); + return kernel; + } + + allowList = variableInAllowList(varName); + systemGuid = uuid_compare(*varGuid, gAppleSystemVariableGuid) == 0; + admin = IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege) == kIOReturnSuccess; + writeEntitled = IOTaskHasEntitlement(current_task(), kIONVRAMWriteAccessKey); + readEntitled = IOTaskHasEntitlement(current_task(), kIONVRAMReadAccessKey); + systemEntitled = IOTaskHasEntitlement(current_task(), kIONVRAMSystemAllowKey) || kernel; + + switch (op) { + case kIONVRAMOperationRead: + if (kernel || admin || readEntitled || perm.Bits.FullAccess) { + return true; + } + break; + + case kIONVRAMOperationWrite: + if (kernel || perm.Bits.UserWrite || admin || writeEntitled) { + if (systemGuid) { + if (allowList) { + if (!systemEntitled) { + DEBUG_ERROR("Allowed write to system region when NOT entitled for %s\n", varName); + } + } else if (!systemEntitled) { + DEBUG_ERROR("Not entitled for system region writes for %s\n", varName); + break; + } + } + return true; + } + break; + + case kIONVRAMOperationDelete: + case kIONVRAMOperationObliterate: + case kIONVRAMOperationReset: + if (perm.Bits.NeverAllowedToDelete) { + DEBUG_INFO("Never allowed to delete %s\n", varName); + break; + } else if ((op == kIONVRAMOperationObliterate) && perm.Bits.ResetNVRAMOnlyDelete) { + DEBUG_INFO("Not allowed to obliterate %s\n", varName); + break; + } + + if (kernel || perm.Bits.UserWrite || admin || writeEntitled) { + if (systemGuid) { + if (allowList) { + if (!systemEntitled) { + DEBUG_ERROR("Allowed delete to system region when NOT entitled for %s\n", varName); + } + } else if (!systemEntitled) { + DEBUG_ERROR("Not entitled for system region deletes for %s\n", varName); + break; + } + } + return true; + } + break; + } + + DEBUG_INFO("Permission for %s denied, kernel=%d, admin=%d, writeEntitled=%d, readEntitled=%d, systemGuid=%d, systemEntitled=%d\n", + varName, kernel, admin, writeEntitled, readEntitled, systemGuid, systemEntitled); + return false; +} + +static bool +verifyPermission(IONVRAMOperation op, const uuid_t *varGuid, const OSSymbol *varName) +{ + return verifyPermission(op, varGuid, varName->getCStringNoCopy()); +} + +/* + * Parse a variable name of the form "GUID:name". + * If the name cannot be parsed, substitute the Apple global variable GUID. + * Returns TRUE if a GUID was found in the name, FALSE otherwise. + * The guidResult and nameResult arguments may be nullptr if you just want + * to check the format of the string. + */ +static bool +parseVariableName(const char *key, uuid_t *guidResult, const char **nameResult) +{ + uuid_string_t temp = {0}; + size_t keyLen = strlen(key); + bool result = false; + const char *name = key; + uuid_t guid; + + if (keyLen > sizeof(temp)) { + // check for at least UUID + ":" + more + memcpy(temp, key, sizeof(temp) - 1); + + if ((uuid_parse(temp, guid) == 0) && + (key[sizeof(temp) - 1] == ':')) { + name = key + sizeof(temp); + result = true; + } + } + + if (guidResult) { + result ? uuid_copy(*guidResult, guid) : uuid_copy(*guidResult, gAppleNVRAMGuid); + } + if (nameResult) { + *nameResult = name; + } + + return false; +} + +// private IOService based class for publishing distinct dictionary properties on +// for easy ioreg access since the serializeProperties call is overloaded and is used +// as variable access +class IODTNVRAMVariables : public IOService +{ + OSDeclareDefaultStructors(IODTNVRAMVariables) +private: + IODTNVRAM *_provider; + OSDictionary *_properties; + uuid_t _guid; + +public: + bool init(const uuid_t *guid); + virtual bool start(IOService * provider) APPLE_KEXT_OVERRIDE; + virtual IOReturn setProperties(OSObject * properties) APPLE_KEXT_OVERRIDE; + virtual bool serializeProperties(OSSerialize *s) const APPLE_KEXT_OVERRIDE; +}; + +OSDefineMetaClassAndStructors(IODTNVRAMVariables, IOService) + +bool +IODTNVRAMVariables::init(const uuid_t *guid) +{ + require(super::init(), error); + require(guid, error); + + uuid_copy(_guid, *guid); + + return true; + +error: + return false; +} + +bool +IODTNVRAMVariables::start(IOService * provider) +{ + require(IOService::start(provider), error); + + require(_provider = OSDynamicCast(IODTNVRAM, provider), error); + + registerService(); + + return true; + +error: + stop(provider); + + return false; +} + +IOReturn +IODTNVRAMVariables::setProperties(OSObject * properties) +{ + if (OSDynamicCast(OSDictionary, properties)) { + OSSafeReleaseNULL(_properties); + _properties = OSDynamicCast(OSDictionary, properties); + properties->retain(); + } + + return IOService::setProperties(properties); +} + +bool +IODTNVRAMVariables::serializeProperties(OSSerialize *s) const +{ + const OSSymbol *key; + OSSharedPtr dict; + OSSharedPtr iter; + OSSharedPtr localProperties(_properties, OSRetain); + bool result = false; + + require(localProperties != nullptr, exit); + + dict = OSDictionary::withCapacity(localProperties->getCount()); + require_action(dict, exit, DEBUG_ERROR("No dictionary\n")); + + iter = OSCollectionIterator::withCollection(localProperties.get()); + require_action(iter, exit, DEBUG_ERROR("failed to create iterator\n")); + + while ((key = OSDynamicCast(OSSymbol, iter->getNextObject()))) { + if (verifyPermission(kIONVRAMOperationRead, &_guid, key)) { + dict->setObject(key, localProperties->getObject(key)); + } + } + + result = dict->serialize(s); + +exit: + DEBUG_INFO("result=%d\n", result); + return result; +} + bool IODTNVRAM::init(IORegistryEntry *old, const IORegistryPlane *plane) { - OSDictionary *dict; + OSSharedPtr dict; if (!super::init(old, plane)) { return false; } + _variableLock = IOLockAlloc(); + if (!_variableLock) { + return false; + } + + PE_parse_boot_argn("nvram-log", &gNVRAMLogging, sizeof(gNVRAMLogging)); + dict = OSDictionary::withCapacity(1); - if (dict == NULL) { + if (dict == nullptr) { return false; } - setPropertyTable(dict); - dict->release(); + setPropertyTable(dict.get()); + dict.reset(); - _nvramImage = IONew(UInt8, kIODTNVRAMImageSize); - if (_nvramImage == NULL) { + _nvramSize = getNVRAMSize(); + if (_nvramSize == 0) { + DEBUG_ERROR("NVRAM : Error - default size not specified in DT\n"); + return false; + } + // partition offsets are UInt16 (bytes / 0x10) + 1 + if (_nvramSize > 0xFFFF * 0x10) { + DEBUG_ERROR("NVRAM : truncating _nvramSize from %ld\n", (long) _nvramSize); + _nvramSize = 0xFFFF * 0x10; + } + _nvramImage = IONew(UInt8, _nvramSize); + if (_nvramImage == nullptr) { return false; } _nvramPartitionOffsets = OSDictionary::withCapacity(1); - if (_nvramPartitionOffsets == NULL) { + if (_nvramPartitionOffsets == nullptr) { return false; } _nvramPartitionLengths = OSDictionary::withCapacity(1); - if (_nvramPartitionLengths == NULL) { + if (_nvramPartitionLengths == nullptr) { return false; } _registryPropertiesKey = OSSymbol::withCStringNoCopy("aapl,pci"); - if (_registryPropertiesKey == NULL) { + if (_registryPropertiesKey == nullptr) { return false; } @@ -85,26 +703,30 @@ IODTNVRAM::init(IORegistryEntry *old, const IORegistryPlane *plane) // IODTNVRAM and IONVRAMController (restore loses boot-args) initProxyData(); + // Require at least the common partition to be present and error free + if (_commonDict == nullptr) { + return false; + } + return true; } void IODTNVRAM::initProxyData(void) { - IORegistryEntry *entry; - const char *key = "nvram-proxy-data"; - OSObject *prop; - OSData *data; - const void *bytes; + OSSharedPtr entry; + const char *key = "nvram-proxy-data"; + OSData *data; + const void *bytes; entry = IORegistryEntry::fromPath("/chosen", gIODTPlane); - if (entry != NULL) { - prop = entry->getProperty(key); - if (prop != NULL) { - data = OSDynamicCast(OSData, prop); - if (data != NULL) { + if (entry != nullptr) { + OSSharedPtr prop = entry->copyProperty(key); + if (prop != nullptr) { + data = OSDynamicCast(OSData, prop.get()); + if (data != nullptr) { bytes = data->getBytesNoCopy(); - if ((bytes != NULL) && (data->getLength() <= kIODTNVRAMImageSize)) { + if ((bytes != nullptr) && (data->getLength() <= _nvramSize)) { bcopy(bytes, _nvramImage, data->getLength()); initNVRAMImage(); _isProxied = true; @@ -112,29 +734,105 @@ IODTNVRAM::initProxyData(void) } } entry->removeProperty(key); - entry->release(); } } +UInt32 +IODTNVRAM::getNVRAMSize(void) +{ + OSSharedPtr entry; + const char *key = "nvram-total-size"; + OSData *data; + UInt32 size = 0; + + entry = IORegistryEntry::fromPath("/chosen", gIODTPlane); + if (entry != nullptr) { + OSSharedPtr prop = entry->copyProperty(key); + if (prop != nullptr) { + data = OSDynamicCast(OSData, prop.get()); + if (data != nullptr) { + size = *((UInt32*)data->getBytesNoCopy()); + DEBUG_ALWAYS("NVRAM size is %u bytes\n", (unsigned int) size); + } + } + } + return size; +} + + void IODTNVRAM::registerNVRAMController(IONVRAMController *nvram) { - if (_nvramController != NULL) { + if (_nvramController != nullptr) { + DEBUG_ERROR("Duplicate controller set\n"); return; } + DEBUG_INFO("setting controller\n"); + _nvramController = nvram; // race condition possible between // IODTNVRAM and IONVRAMController (restore loses boot-args) if (!_isProxied) { - _nvramController->read(0, _nvramImage, kIODTNVRAMImageSize); + DEBUG_INFO("Proxied NVRAM data\n"); + _nvramController->read(0, _nvramImage, _nvramSize); initNVRAMImage(); - } else if (_ofLock) { - IOLockLock(_ofLock); - (void) syncVariables(); - IOLockUnlock(_ofLock); } + + if (_systemPartitionSize) { + _systemService = new IODTNVRAMVariables; + + if (!_systemService || !_systemService->init(&gAppleSystemVariableGuid)) { + DEBUG_ERROR("Unable to start the system service!\n"); + goto no_system; + } + + _systemService->setName("options-system"); + + if (!_systemService->attach(this)) { + DEBUG_ERROR("Unable to attach the system service!\n"); + OSSafeReleaseNULL(_systemService); + goto no_system; + } + + if (!_systemService->start(this)) { + DEBUG_ERROR("Unable to start the system service!\n"); + _systemService->detach(this); + OSSafeReleaseNULL(_systemService); + goto no_system; + } + } + +no_system: + if (_commonPartitionSize) { + _commonService = new IODTNVRAMVariables; + + if (!_commonService || !_commonService->init(&gAppleNVRAMGuid)) { + DEBUG_ERROR("Unable to start the common service!\n"); + goto no_common; + } + + _commonService->setName("options-common"); + + if (!_commonService->attach(this)) { + DEBUG_ERROR("Unable to attach the common service!\n"); + OSSafeReleaseNULL(_commonService); + goto no_common; + } + + if (!_commonService->start(this)) { + DEBUG_ERROR("Unable to start the common service!\n"); + _systemService->detach(this); + OSSafeReleaseNULL(_commonService); + goto no_common; + } + } + +no_common: + NVRAMLOCK(); + (void) syncVariables(); + NVRAMUNLOCK(); } void @@ -142,135 +840,83 @@ IODTNVRAM::initNVRAMImage(void) { char partitionID[18]; UInt32 partitionOffset, partitionLength; - UInt32 freePartitionOffset, freePartitionSize; UInt32 currentLength, currentOffset = 0; - OSNumber *partitionOffsetNumber, *partitionLengthNumber; - // Find the offsets for the OF, XPRAM, NameRegistry and PanicInfo partitions. - _ofPartitionOffset = 0xFFFFFFFF; - _piPartitionOffset = 0xFFFFFFFF; - freePartitionOffset = 0xFFFFFFFF; - freePartitionSize = 0; + _commonPartitionOffset = 0xFFFFFFFF; + _systemPartitionOffset = 0xFFFFFFFF; + + // Look through the partitions to find the OF and System partitions. + while (currentOffset < _nvramSize) { + bool common_partition; + bool system_partition; - // Look through the partitions to find the OF, MacOS partitions. - while (currentOffset < kIODTNVRAMImageSize) { - currentLength = ((UInt16 *)(_nvramImage + currentOffset))[1] * 16; + chrp_nvram_header_t * header = (chrp_nvram_header_t *)(_nvramImage + currentOffset); - if (currentLength < 16) { + currentLength = header->len * NVRAM_CHRP_LENGTH_BLOCK_SIZE; + + if (currentLength < sizeof(chrp_nvram_header_t)) { break; } - partitionOffset = currentOffset + 16; - partitionLength = currentLength - 16; - if ((partitionOffset + partitionLength) > kIODTNVRAMImageSize) { + + partitionOffset = currentOffset + sizeof(chrp_nvram_header_t); + partitionLength = currentLength - sizeof(chrp_nvram_header_t); + + if ((partitionOffset + partitionLength) > _nvramSize) { break; } - if (strncmp((const char *)_nvramImage + currentOffset + 4, - kIODTNVRAMOFPartitionName, 12) == 0) { - _ofPartitionOffset = partitionOffset; - _ofPartitionSize = partitionLength; - } else if (strncmp((const char *)_nvramImage + currentOffset + 4, - kIODTNVRAMXPRAMPartitionName, 12) == 0) { - } else if (strncmp((const char *)_nvramImage + currentOffset + 4, - kIODTNVRAMPanicInfoPartitonName, 12) == 0) { - _piPartitionOffset = partitionOffset; - _piPartitionSize = partitionLength; - } else if (strncmp((const char *)_nvramImage + currentOffset + 4, - kIODTNVRAMFreePartitionName, 12) == 0) { - freePartitionOffset = currentOffset; - freePartitionSize = currentLength; + common_partition = memcmp(header->name, NVRAM_CHRP_PARTITION_NAME_COMMON, strlen(NVRAM_CHRP_PARTITION_NAME_COMMON)) == 0; + system_partition = (memcmp(header->name, NVRAM_CHRP_PARTITION_NAME_SYSTEM, strlen(NVRAM_CHRP_PARTITION_NAME_SYSTEM)) == 0) || + (memcmp(header->name, NVRAM_CHRP_PARTITION_NAME_SYSTEM_LEGACY, strlen(NVRAM_CHRP_PARTITION_NAME_SYSTEM_LEGACY)) == 0); + + if (common_partition) { + _commonPartitionOffset = partitionOffset; + _commonPartitionSize = partitionLength; + } else if (system_partition) { + _systemPartitionOffset = partitionOffset; + _systemPartitionSize = partitionLength; } else { + OSSharedPtr partitionOffsetNumber, partitionLengthNumber; + // Construct the partition ID from the signature and name. - snprintf(partitionID, sizeof(partitionID), "0x%02x,", - *(UInt8 *)(_nvramImage + currentOffset)); - strncpy(partitionID + 5, - (const char *)(_nvramImage + currentOffset + 4), 12); + snprintf(partitionID, sizeof(partitionID), "0x%02x,", header->sig); + strncpy(partitionID + 5, header->name, sizeof(header->name)); partitionID[17] = '\0'; partitionOffsetNumber = OSNumber::withNumber(partitionOffset, 32); partitionLengthNumber = OSNumber::withNumber(partitionLength, 32); // Save the partition offset and length - _nvramPartitionOffsets->setObject(partitionID, partitionOffsetNumber); - _nvramPartitionLengths->setObject(partitionID, partitionLengthNumber); - - partitionOffsetNumber->release(); - partitionLengthNumber->release(); + _nvramPartitionOffsets->setObject(partitionID, partitionOffsetNumber.get()); + _nvramPartitionLengths->setObject(partitionID, partitionLengthNumber.get()); } currentOffset += currentLength; } - if (_ofPartitionOffset != 0xFFFFFFFF) { - _ofImage = _nvramImage + _ofPartitionOffset; + if (_commonPartitionOffset != 0xFFFFFFFF) { + _commonImage = _nvramImage + _commonPartitionOffset; } - if (_piPartitionOffset == 0xFFFFFFFF) { - if (freePartitionSize > 0x20) { - // Set the signature to 0xa1. - _nvramImage[freePartitionOffset] = 0xa1; - // Set the checksum to 0. - _nvramImage[freePartitionOffset + 1] = 0; - // Set the name for the Panic Info partition. - strncpy((char *)(_nvramImage + freePartitionOffset + 4), - kIODTNVRAMPanicInfoPartitonName, 12); - - // Calculate the partition offset and size. - _piPartitionOffset = freePartitionOffset + 0x10; - _piPartitionSize = 0x800; - if (_piPartitionSize + 0x20 > freePartitionSize) { - _piPartitionSize = freePartitionSize - 0x20; - } - - _piImage = _nvramImage + _piPartitionOffset; - - // Zero the new partition. - bzero(_piImage, _piPartitionSize); - - // Set the partition size. - *(UInt16 *)(_nvramImage + freePartitionOffset + 2) = - (_piPartitionSize / 0x10) + 1; - - // Set the partition checksum. - _nvramImage[freePartitionOffset + 1] = - calculatePartitionChecksum(_nvramImage + freePartitionOffset); - - // Calculate the free partition offset and size. - freePartitionOffset += _piPartitionSize + 0x10; - freePartitionSize -= _piPartitionSize + 0x10; - - // Set the signature to 0x7f. - _nvramImage[freePartitionOffset] = 0x7f; - // Set the checksum to 0. - _nvramImage[freePartitionOffset + 1] = 0; - // Set the name for the free partition. - strncpy((char *)(_nvramImage + freePartitionOffset + 4), - kIODTNVRAMFreePartitionName, 12); - // Set the partition size. - *(UInt16 *)(_nvramImage + freePartitionOffset + 2) = - freePartitionSize / 0x10; - // Set the partition checksum. - _nvramImage[freePartitionOffset + 1] = - calculatePartitionChecksum(_nvramImage + freePartitionOffset); - - if (_nvramController != NULL) { - _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); - } - } - } else { - _piImage = _nvramImage + _piPartitionOffset; + if (_systemPartitionOffset != 0xFFFFFFFF) { + _systemImage = _nvramImage + _systemPartitionOffset; } + DEBUG_ALWAYS("NVRAM : ofPartitionOffset - 0x%x, ofPartitionSize - 0x%x, systemPartitionOffset - 0x%x, systemPartitionSize - 0x%x\n", + (unsigned int) _commonPartitionOffset, (unsigned int) _commonPartitionSize, (unsigned int) _systemPartitionOffset, (unsigned int) _systemPartitionSize); + _lastDeviceSync = 0; _freshInterval = TRUE; // we will allow sync() even before the first 15 minutes have passed. - initOFVariables(); + initVariables(); } void IODTNVRAM::syncInternal(bool rateLimit) { + DEBUG_INFO("rateLimit=%d\n", rateLimit); + // Don't try to perform controller operations if none has been registered. - if (_nvramController == NULL) { + if (_nvramController == nullptr) { return; } @@ -280,7 +926,10 @@ IODTNVRAM::syncInternal(bool rateLimit) return; } + DEBUG_INFO("Calling sync()\n"); + NVRAMLOCK(); _nvramController->sync(); + NVRAMUNLOCK(); } void @@ -292,104 +941,232 @@ IODTNVRAM::sync(void) bool IODTNVRAM::serializeProperties(OSSerialize *s) const { - bool result, hasPrivilege; - UInt32 variablePerm; - const OSSymbol *key; - OSDictionary *dict; - OSCollectionIterator *iter = NULL; + const OSSymbol *key; + OSSharedPtr dict; + OSSharedPtr iter; + bool result = false; + unsigned int totalCapacity = 0; + + NVRAMLOCK(); + if (_commonDict) { + totalCapacity += _commonDict->getCapacity(); + } - // Verify permissions. - hasPrivilege = (kIOReturnSuccess == IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege)); + if (_systemDict) { + totalCapacity += _systemDict->getCapacity(); + } + + dict = OSDictionary::withCapacity(totalCapacity); + + if (dict == nullptr) { + DEBUG_ERROR("No dictionary\n"); + goto unlock; + } - if (_ofDict == NULL) { - /* No nvram. Return an empty dictionary. */ - dict = OSDictionary::withCapacity(1); - if (dict == NULL) { - return false; + // Copy system entries first if present then copy unique common entries + if (_systemDict != nullptr) { + iter = OSCollectionIterator::withCollection(_systemDict.get()); + if (iter == nullptr) { + DEBUG_ERROR("failed to create iterator\n"); + goto unlock; } - } else { - IOLockLock(_ofLock); - dict = OSDictionary::withDictionary(_ofDict); - IOLockUnlock(_ofLock); - if (dict == NULL) { - return false; - } - - /* Copy properties with client privilege. */ - iter = OSCollectionIterator::withCollection(dict); - if (iter == NULL) { - dict->release(); - return false; - } - while (1) { - key = OSDynamicCast(OSSymbol, iter->getNextObject()); - if (key == NULL) { - break; + + while ((key = OSDynamicCast(OSSymbol, iter->getNextObject()))) { + if (verifyPermission(kIONVRAMOperationRead, &gAppleSystemVariableGuid, key)) { + dict->setObject(key, _systemDict->getObject(key)); } + } - variablePerm = getOFVariablePerm(key); - if ((hasPrivilege || (variablePerm != kOFVariablePermRootOnly)) && - (!(variablePerm == kOFVariablePermKernelOnly && current_task() != kernel_task))) { - } else { - dict->removeObject(key); - iter->reset(); + iter.reset(); + } + + if (_commonDict != nullptr) { + iter = OSCollectionIterator::withCollection(_commonDict.get()); + if (iter == nullptr) { + DEBUG_ERROR("failed to create common iterator\n"); + goto unlock; + } + + while ((key = OSDynamicCast(OSSymbol, iter->getNextObject()))) { + if (dict->getObject(key) != nullptr) { + // Skip non uniques + continue; + } + if (verifyPermission(kIONVRAMOperationRead, &gAppleNVRAMGuid, key)) { + dict->setObject(key, _commonDict->getObject(key)); } } } result = dict->serialize(s); - dict->release(); - if (iter != NULL) { - iter->release(); - } +unlock: + NVRAMUNLOCK(); + + DEBUG_INFO("result=%d\n", result); return result; } -OSObject * -IODTNVRAM::copyProperty(const OSSymbol *aKey) const +IOReturn +IODTNVRAM::chooseDictionary(IONVRAMOperation operation, const uuid_t *varGuid, const char *variableName, OSDictionary **dict) const { - IOReturn result; - UInt32 variablePerm; - OSObject *theObject; + if (_systemDict != nullptr) { + bool systemGuid = uuid_compare(*varGuid, gAppleSystemVariableGuid) == 0; - if (_ofDict == NULL) { - return NULL; + if (variableInAllowList(variableName)) { + DEBUG_INFO("Using system dictionary due to allow list\n"); + if (!systemGuid) { + DEBUG_ERROR("System GUID NOT used for %s\n", variableName); + } + *dict = _systemDict.get(); + } else if (systemGuid) { + DEBUG_INFO("Using system dictionary via GUID\n"); + *dict = _systemDict.get(); + } else { + DEBUG_INFO("Using common dictionary\n"); + *dict = _commonDict.get(); + } + } else { + DEBUG_INFO("Defaulting to common dictionary\n"); + *dict = _commonDict.get(); } - // Verify permissions. - variablePerm = getOFVariablePerm(aKey); - result = IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege); - if (result != kIOReturnSuccess) { - if (variablePerm == kOFVariablePermRootOnly) { - return NULL; + return kIOReturnSuccess; +} + +bool +IODTNVRAM::handleSpecialVariables(const char *name, uuid_t *guid, OSObject *obj, IOReturn *error) +{ + IOReturn err = kIOReturnSuccess; + bool special = false; + + NVRAMLOCKASSERT(); + + if (strcmp(name, "ResetNVRam") == 0) { + DEBUG_INFO("%s requested\n", name); + + if (uuid_compare(*guid, gAppleSystemVariableGuid) == 0) { + if (_systemDict != nullptr) { + _systemDict->flushCollection(); + } + + _commonDict->flushCollection(); + DEBUG_INFO("system & common dictionary flushed\n"); + + err = syncVariables(); + } + + special = true; + } else if (strcmp(name, "ObliterateNVRam") == 0) { + DEBUG_INFO("%s requested\n", name); + + if ((_systemDict != nullptr) && (uuid_compare(*guid, gAppleSystemVariableGuid) == 0)) { + const OSSymbol *key; + OSSharedPtr newDict; + OSSharedPtr iter; + + newDict = OSDictionary::withCapacity(_systemDict->getCapacity()); + iter = OSCollectionIterator::withCollection(newDict.get()); + if ((newDict == nullptr) || (iter == nullptr)) { + err = kIOReturnNoMemory; + goto exit; + } + + while ((key = OSDynamicCast(OSSymbol, iter->getNextObject()))) { + const OSSymbol *key = OSDynamicCast(OSSymbol, iter->getNextObject()); + if (key == nullptr) { + err = kIOReturnNoMemory; + goto exit; + } + + if (!verifyPermission(kIONVRAMOperationObliterate, &gAppleSystemVariableGuid, key)) { + newDict->setObject(key, _systemDict->getObject(key)); + } + } + + _systemDict = newDict; + + DEBUG_INFO("system dictionary flushed\n"); + } else if (_commonDict != nullptr) { + const OSSymbol *key; + OSSharedPtr newDict; + OSSharedPtr iter; + + newDict = OSDictionary::withCapacity(_commonDict->getCapacity()); + iter = OSCollectionIterator::withCollection(newDict.get()); + if ((newDict == nullptr) || (iter == nullptr)) { + err = kIOReturnNoMemory; + goto exit; + } + + while ((key = OSDynamicCast(OSSymbol, iter->getNextObject()))) { + if (!verifyPermission(kIONVRAMOperationObliterate, &gAppleNVRAMGuid, key)) { + newDict->setObject(key, _commonDict->getObject(key)); + } + } + + _commonDict = newDict; + + DEBUG_INFO("common dictionary flushed\n"); } + + special = true; + err = syncVariables(); } - if (variablePerm == kOFVariablePermKernelOnly && current_task() != kernel_task) { - return NULL; + +exit: + if (error) { + *error = err; } - IOLockLock(_ofLock); - theObject = _ofDict->getObject(aKey); - if (theObject) { - theObject->retain(); + return special; +} + +OSSharedPtr +IODTNVRAM::copyProperty(const OSSymbol *aKey) const +{ + IOReturn result; + const char *variableName; + uuid_t varGuid; + OSDictionary *dict; + OSSharedPtr theObject = nullptr; + + DEBUG_INFO("aKey=%s\n", aKey->getCStringNoCopy()); + + parseVariableName(aKey->getCStringNoCopy(), &varGuid, &variableName); + + result = chooseDictionary(kIONVRAMOperationRead, &varGuid, variableName, &dict); + if (result != kIOReturnSuccess) { + goto exit; + } + + if (!verifyPermission(kIONVRAMOperationRead, &varGuid, variableName)) { + DEBUG_INFO("Not privileged\n"); + goto exit; + } + + NVRAMLOCK(); + theObject.reset(dict->getObject(variableName), OSRetain); + NVRAMUNLOCK(); + + if (theObject != nullptr) { + DEBUG_INFO("found data\n"); } - IOLockUnlock(_ofLock); +exit: return theObject; } -OSObject * +OSSharedPtr IODTNVRAM::copyProperty(const char *aKey) const { - const OSSymbol *keySymbol; - OSObject *theObject = NULL; + OSSharedPtr keySymbol; + OSSharedPtr theObject; keySymbol = OSSymbol::withCString(aKey); - if (keySymbol != NULL) { - theObject = copyProperty(keySymbol); - keySymbol->release(); + if (keySymbol != nullptr) { + theObject = copyProperty(keySymbol.get()); } return theObject; @@ -398,107 +1175,168 @@ IODTNVRAM::copyProperty(const char *aKey) const OSObject * IODTNVRAM::getProperty(const OSSymbol *aKey) const { - OSObject *theObject; - - theObject = copyProperty(aKey); - if (theObject) { - theObject->release(); - } + // The shared pointer gets released at the end of the function, + // and returns a view into theObject. + OSSharedPtr theObject = copyProperty(aKey); - return theObject; + return theObject.get(); } OSObject * IODTNVRAM::getProperty(const char *aKey) const { - OSObject *theObject; - - theObject = copyProperty(aKey); - if (theObject) { - theObject->release(); - } + // The shared pointer gets released at the end of the function, + // and returns a view into theObject. + OSSharedPtr theObject = copyProperty(aKey); - return theObject; + return theObject.get(); } IOReturn IODTNVRAM::setPropertyInternal(const OSSymbol *aKey, OSObject *anObject) { - IOReturn result = kIOReturnSuccess; - UInt32 propType, propPerm; - OSString *tmpString = NULL; - OSObject *propObject = NULL, *oldObject; - - if (_ofDict == NULL) { - return false; - } - - // Verify permissions. - propPerm = getOFVariablePerm(aKey); - if (IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege) != kIOReturnSuccess) { - if (propPerm != kOFVariablePermUserWrite) { - return kIOReturnNotPrivileged; + IOReturn result = kIOReturnSuccess; + bool remove = false; + OSString *tmpString = nullptr; + OSSharedPtr propObject, oldObject; + OSSharedPtr sharedObject(anObject, OSRetain); + const char *variableName; + uuid_t varGuid; + OSDictionary *dict; + bool deletePropertyKey, syncNowPropertyKey, forceSyncNowPropertyKey; + size_t propDataSize = 0; + + DEBUG_INFO("aKey=%s\n", aKey->getCStringNoCopy()); + + parseVariableName(aKey->getCStringNoCopy(), &varGuid, &variableName); + deletePropertyKey = strncmp(variableName, kIONVRAMDeletePropertyKey, sizeof(kIONVRAMDeletePropertyKey)) == 0; + syncNowPropertyKey = strncmp(variableName, kIONVRAMSyncNowPropertyKey, sizeof(kIONVRAMSyncNowPropertyKey)) == 0; + forceSyncNowPropertyKey = strncmp(variableName, kIONVRAMForceSyncNowPropertyKey, sizeof(kIONVRAMForceSyncNowPropertyKey)) == 0; + + if (deletePropertyKey) { + tmpString = OSDynamicCast(OSString, anObject); + if (tmpString != nullptr) { + DEBUG_INFO("kIONVRAMDeletePropertyKey found\n"); + OSSharedPtr sharedKey = OSSymbol::withString(tmpString); + removeProperty(sharedKey.get()); + } else { + DEBUG_INFO("kIONVRAMDeletePropertyKey value needs to be an OSString\n"); + result = kIOReturnError; + } + goto exit; + } else if (syncNowPropertyKey || forceSyncNowPropertyKey) { + tmpString = OSDynamicCast(OSString, anObject); + DEBUG_INFO("NVRAM sync key %s found\n", aKey->getCStringNoCopy()); + if (tmpString != nullptr) { + // We still want to throttle NVRAM commit rate for SyncNow. ForceSyncNow is provided as a really big hammer. + syncInternal(syncNowPropertyKey); + } else { + DEBUG_INFO("%s value needs to be an OSString\n", variableName); + result = kIOReturnError; } + goto exit; } - if (propPerm == kOFVariablePermKernelOnly && current_task() != kernel_task) { - return kIOReturnNotPrivileged; + + result = chooseDictionary(kIONVRAMOperationWrite, &varGuid, variableName, &dict); + if (result != kIOReturnSuccess) { + goto exit; } - // Don't allow change of 'aapl,panic-info'. - if (aKey->isEqualTo(kIODTNVRAMPanicInfoKey)) { - return kIOReturnUnsupported; + if (!verifyPermission(kIONVRAMOperationWrite, &varGuid, variableName)) { + DEBUG_INFO("Not privileged\n"); + result = kIOReturnNotPrivileged; + goto exit; } // Make sure the object is of the correct type. - propType = getOFVariableType(aKey); - switch (propType) { + switch (getVariableType(variableName)) { case kOFVariableTypeBoolean: - propObject = OSDynamicCast(OSBoolean, anObject); + propObject = OSDynamicPtrCast(sharedObject); break; case kOFVariableTypeNumber: - propObject = OSDynamicCast(OSNumber, anObject); + propObject = OSDynamicPtrCast(sharedObject); break; case kOFVariableTypeString: - propObject = OSDynamicCast(OSString, anObject); - if (propObject != NULL && aKey->isEqualTo(kIONVRAMBootArgsKey) && ((OSString*)propObject)->getLength() >= BOOT_LINE_LENGTH) { - return kIOReturnNoSpace; + propObject = OSDynamicPtrCast(sharedObject); + if (propObject != nullptr) { + propDataSize = (OSDynamicPtrCast(propObject))->getLength(); + + if (aKey->isEqualTo(kIONVRAMBootArgsKey) && (propDataSize >= BOOT_LINE_LENGTH)) { + DEBUG_ERROR("boot-args size too large for BOOT_LINE_LENGTH, propDataSize=%zu\n", propDataSize); + result = kIOReturnNoSpace; + goto exit; + } } break; case kOFVariableTypeData: - propObject = OSDynamicCast(OSData, anObject); - if (propObject == NULL) { - tmpString = OSDynamicCast(OSString, anObject); - if (tmpString != NULL) { + propObject = OSDynamicPtrCast(sharedObject); + if (propObject == nullptr) { + tmpString = OSDynamicCast(OSString, sharedObject.get()); + if (tmpString != nullptr) { propObject = OSData::withBytes(tmpString->getCStringNoCopy(), tmpString->getLength()); } } + + if (propObject != nullptr) { + propDataSize = (OSDynamicPtrCast(propObject))->getLength(); + } + +#if defined(XNU_TARGET_OS_OSX) + if ((propObject != nullptr) && ((OSDynamicPtrCast(propObject))->getLength() == 0)) { + remove = true; + } +#endif /* defined(XNU_TARGET_OS_OSX) */ + break; + default: break; } - if (propObject == NULL) { - return kIOReturnBadArgument; + if (propObject == nullptr) { + DEBUG_INFO("No property object\n"); + result = kIOReturnBadArgument; + goto exit; } - IOLockLock(_ofLock); + if (!verifyWriteSizeLimit(&varGuid, variableName, propDataSize)) { + DEBUG_ERROR("Property data size of %zu too long for %s\n", propDataSize, variableName); + result = kIOReturnNoSpace; + goto exit; + } - oldObject = _ofDict->getObject(aKey); - if (oldObject) { - oldObject->retain(); + NVRAMLOCK(); + + if (handleSpecialVariables(variableName, &varGuid, propObject.get(), &result)) { + goto unlock; } - if (!_ofDict->setObject(aKey, propObject)) { - result = kIOReturnBadArgument; + + oldObject.reset(dict->getObject(variableName), OSRetain); + if (remove == false) { + DEBUG_INFO("Adding object\n"); + if (!dict->setObject(variableName, propObject.get())) { + result = kIOReturnBadArgument; + } + } else { + DEBUG_INFO("Removing object\n"); + // Check for existence so we can decide whether we need to sync variables + if (oldObject) { + result = removePropertyInternal(aKey); + } else { + result = kIOReturnNotFound; + } } if (result == kIOReturnSuccess) { - if (syncVariables() != kIOReturnSuccess) { + result = syncVariables(); + if (result != kIOReturnSuccess) { + DEBUG_ERROR("syncVariables failed, result=0x%08x\n", result); if (oldObject) { - _ofDict->setObject(aKey, oldObject); + dict->setObject(variableName, oldObject.get()); } else { - _ofDict->removeObject(aKey); + dict->removeObject(variableName); } (void) syncVariables(); result = kIOReturnNoMemory; @@ -506,13 +1344,17 @@ IODTNVRAM::setPropertyInternal(const OSSymbol *aKey, OSObject *anObject) } if (oldObject) { - oldObject->release(); + oldObject.reset(); } if (tmpString) { - propObject->release(); + propObject.reset(); } - IOLockUnlock(_ofLock); +unlock: + NVRAMUNLOCK(); + +exit: + DEBUG_INFO("result=0x%08x\n", result); return result; } @@ -526,103 +1368,92 @@ IODTNVRAM::setProperty(const OSSymbol *aKey, OSObject *anObject) void IODTNVRAM::removeProperty(const OSSymbol *aKey) { - bool result; - UInt32 propPerm; + IOReturn ret; - if (_ofDict == NULL) { - return; + NVRAMLOCK(); + + ret = removePropertyInternal(aKey); + + NVRAMUNLOCK(); + + if (ret != kIOReturnSuccess) { + DEBUG_INFO("removePropertyInternal failed, ret=0x%08x\n", ret); } +} - // Verify permissions. - propPerm = getOFVariablePerm(aKey); - result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); +IOReturn +IODTNVRAM::removePropertyInternal(const OSSymbol *aKey) +{ + IOReturn result; + const char *variableName; + uuid_t varGuid; + OSDictionary *dict; + + DEBUG_INFO("aKey=%s\n", aKey->getCStringNoCopy()); + + NVRAMLOCKASSERT(); + + parseVariableName(aKey->getCStringNoCopy(), &varGuid, &variableName); + + result = chooseDictionary(kIONVRAMOperationDelete, &varGuid, variableName, &dict); if (result != kIOReturnSuccess) { - if (propPerm != kOFVariablePermUserWrite) { - return; - } - } - if (propPerm == kOFVariablePermKernelOnly && current_task() != kernel_task) { - return; + goto exit; } - // Don't allow change of 'aapl,panic-info'. - if (aKey->isEqualTo(kIODTNVRAMPanicInfoKey)) { - return; + if (!verifyPermission(kIONVRAMOperationDelete, &varGuid, variableName)) { + DEBUG_INFO("Not priveleged\n"); + result = kIOReturnNotPrivileged; + goto exit; } // If the object exists, remove it from the dictionary. - - IOLockLock(_ofLock); - result = _ofDict->getObject(aKey) != NULL; - if (result) { - _ofDict->removeObject(aKey); + if (dict->getObject(variableName) != nullptr) { + dict->removeObject(variableName); + result = syncVariables(); } - if (result) { - (void) syncVariables(); - } - - IOLockUnlock(_ofLock); +exit: + return result; } IOReturn IODTNVRAM::setProperties(OSObject *properties) { - IOReturn res = kIOReturnSuccess; - OSObject *object; - const OSSymbol *key; - const OSString *tmpStr; - OSDictionary *dict; - OSCollectionIterator *iter; + IOReturn result = kIOReturnSuccess; + OSObject *object; + const OSSymbol *key; + OSDictionary *dict; + OSSharedPtr iter; dict = OSDynamicCast(OSDictionary, properties); - if (dict == NULL) { + if (dict == nullptr) { + DEBUG_ERROR("Not a dictionary\n"); return kIOReturnBadArgument; } iter = OSCollectionIterator::withCollection(dict); - if (iter == NULL) { + if (iter == nullptr) { + DEBUG_ERROR("Couldn't create iterator\n"); return kIOReturnBadArgument; } - while (res == kIOReturnSuccess) { + while (result == kIOReturnSuccess) { key = OSDynamicCast(OSSymbol, iter->getNextObject()); - if (key == NULL) { + if (key == nullptr) { break; } object = dict->getObject(key); - if (object == NULL) { + if (object == nullptr) { continue; } - if (key->isEqualTo(kIONVRAMDeletePropertyKey)) { - tmpStr = OSDynamicCast(OSString, object); - if (tmpStr != NULL) { - key = OSSymbol::withString(tmpStr); - removeProperty(key); - key->release(); - } else { - res = kIOReturnError; - } - } else if (key->isEqualTo(kIONVRAMSyncNowPropertyKey) || key->isEqualTo(kIONVRAMForceSyncNowPropertyKey)) { - tmpStr = OSDynamicCast(OSString, object); - if (tmpStr != NULL) { - // We still want to throttle NVRAM commit rate for SyncNow. ForceSyncNow is provided as a really big hammer. - syncInternal(key->isEqualTo(kIONVRAMSyncNowPropertyKey)); - } else { - res = kIOReturnError; - } - } else { - if (!setProperty(key, object)) { - res = kIOReturnNoSpace; - } - } + result = setPropertyInternal(key, object); } - iter->release(); + DEBUG_INFO("result=0x%08x\n", result); - return res; + return result; } IOReturn @@ -666,7 +1497,7 @@ IODTNVRAM::writeNVRAMProperty(IORegistryEntry *entry, OSDictionary * IODTNVRAM::getNVRAMPartitions(void) { - return _nvramPartitionLengths; + return _nvramPartitionLengths.get(); } IOReturn @@ -682,7 +1513,7 @@ IODTNVRAM::readNVRAMPartition(const OSSymbol *partitionID, partitionLengthNumber = (OSNumber *)_nvramPartitionLengths->getObject(partitionID); - if ((partitionOffsetNumber == NULL) || (partitionLengthNumber == NULL)) { + if ((partitionOffsetNumber == nullptr) || (partitionLengthNumber == nullptr)) { return kIOReturnNotFound; } @@ -692,7 +1523,7 @@ IODTNVRAM::readNVRAMPartition(const OSSymbol *partitionID, if (os_add_overflow(offset, length, &end)) { return kIOReturnBadArgument; } - if ((buffer == NULL) || (length == 0) || (end > partitionLength)) { + if ((buffer == nullptr) || (length == 0) || (end > partitionLength)) { return kIOReturnBadArgument; } @@ -714,7 +1545,7 @@ IODTNVRAM::writeNVRAMPartition(const OSSymbol *partitionID, partitionLengthNumber = (OSNumber *)_nvramPartitionLengths->getObject(partitionID); - if ((partitionOffsetNumber == NULL) || (partitionLengthNumber == NULL)) { + if ((partitionOffsetNumber == nullptr) || (partitionLengthNumber == nullptr)) { return kIOReturnNotFound; } @@ -724,14 +1555,14 @@ IODTNVRAM::writeNVRAMPartition(const OSSymbol *partitionID, if (os_add_overflow(offset, length, &end)) { return kIOReturnBadArgument; } - if ((buffer == NULL) || (length == 0) || (end > partitionLength)) { + if ((buffer == nullptr) || (length == 0) || (end > partitionLength)) { return kIOReturnBadArgument; } bcopy(buffer, _nvramImage + partitionOffset + offset, length); - if (_nvramController != NULL) { - _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); + if (_nvramController != nullptr) { + _nvramController->write(0, _nvramImage, _nvramSize); } return kIOReturnSuccess; @@ -740,33 +1571,7 @@ IODTNVRAM::writeNVRAMPartition(const OSSymbol *partitionID, IOByteCount IODTNVRAM::savePanicInfo(UInt8 *buffer, IOByteCount length) { - if ((_piImage == NULL) || (length <= 0)) { - return 0; - } - - if (length > (_piPartitionSize - 4)) { - length = _piPartitionSize - 4; - } - - // Save the Panic Info. - bcopy(buffer, _piImage + 4, length); - - // Save the Panic Info length. - *(UInt32 *)_piImage = length; - - if (_nvramController != NULL) { - _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); - } - /* - * This prevents OF variables from being committed if the system has panicked - */ - _systemPaniced = true; - /* The call to sync() forces the NVRAM controller to write the panic info - * partition to NVRAM. - */ - sync(); - - return length; + return 0; } // Private methods @@ -788,92 +1593,84 @@ IODTNVRAM::calculatePartitionChecksum(UInt8 *partitionHeader) } IOReturn -IODTNVRAM::initOFVariables(void) +IODTNVRAM::initVariables(void) { - UInt32 cnt; - UInt8 *propName, *propData; - UInt32 propNameLength, propDataLength; - const OSSymbol *propSymbol; - OSObject *propObject; + UInt32 cnt; + UInt8 *propName, *propData; + UInt32 propNameLength, propDataLength, regionIndex; + OSSharedPtr propSymbol; + OSSharedPtr propObject; + NVRAMRegionInfo *currentRegion; - if (_ofImage == NULL) { - return kIOReturnNotReady; - } + NVRAMRegionInfo variableRegions[] = { { NVRAM_CHRP_PARTITION_NAME_COMMON, _commonPartitionOffset, _commonPartitionSize, _commonDict, _commonImage}, + { NVRAM_CHRP_PARTITION_NAME_SYSTEM, _systemPartitionOffset, _systemPartitionSize, _systemDict, _systemImage} }; - _ofDict = OSDictionary::withCapacity(1); - _ofLock = IOLockAlloc(); - if (!_ofDict || !_ofLock) { - return kIOReturnNoMemory; - } + DEBUG_INFO("...\n"); - cnt = 0; - while (cnt < _ofPartitionSize) { - // Break if there is no name. - if (_ofImage[cnt] == '\0') { - break; + for (regionIndex = 0; regionIndex < ARRAY_SIZE(variableRegions); regionIndex++) { + currentRegion = &variableRegions[regionIndex]; + + if (currentRegion->size == 0) { + continue; } - // Find the length of the name. - propName = _ofImage + cnt; - for (propNameLength = 0; (cnt + propNameLength) < _ofPartitionSize; - propNameLength++) { - if (_ofImage[cnt + propNameLength] == '=') { + currentRegion->dict = OSDictionary::withCapacity(1); + + DEBUG_INFO("region = %s\n", currentRegion->name); + cnt = 0; + while (cnt < currentRegion->size) { + // Break if there is no name. + if (currentRegion->image[cnt] == '\0') { break; } - } - // Break if the name goes past the end of the partition. - if ((cnt + propNameLength) >= _ofPartitionSize) { - break; - } - cnt += propNameLength + 1; + // Find the length of the name. + propName = currentRegion->image + cnt; + for (propNameLength = 0; (cnt + propNameLength) < currentRegion->size; + propNameLength++) { + if (currentRegion->image[cnt + propNameLength] == '=') { + break; + } + } - propData = _ofImage + cnt; - for (propDataLength = 0; (cnt + propDataLength) < _ofPartitionSize; - propDataLength++) { - if (_ofImage[cnt + propDataLength] == '\0') { + // Break if the name goes past the end of the partition. + if ((cnt + propNameLength) >= currentRegion->size) { break; } - } + cnt += propNameLength + 1; - // Break if the data goes past the end of the partition. - if ((cnt + propDataLength) >= _ofPartitionSize) { - break; - } - cnt += propDataLength + 1; + propData = currentRegion->image + cnt; + for (propDataLength = 0; (cnt + propDataLength) < currentRegion->size; + propDataLength++) { + if (currentRegion->image[cnt + propDataLength] == '\0') { + break; + } + } + + // Break if the data goes past the end of the partition. + if ((cnt + propDataLength) >= currentRegion->size) { + break; + } + cnt += propDataLength + 1; - if (convertPropToObject(propName, propNameLength, - propData, propDataLength, - &propSymbol, &propObject)) { - _ofDict->setObject(propSymbol, propObject); - propSymbol->release(); - propObject->release(); + if (convertPropToObject(propName, propNameLength, + propData, propDataLength, + propSymbol, propObject)) { + DEBUG_INFO("adding %s, dataLength=%u\n", propSymbol.get()->getCStringNoCopy(), (unsigned int)propDataLength); + currentRegion->dict.get()->setObject(propSymbol.get(), propObject.get()); + } } } // Create the boot-args property if it is not in the dictionary. - if (_ofDict->getObject(kIONVRAMBootArgsKey) == NULL) { + if (_commonDict->getObject(kIONVRAMBootArgsKey) == nullptr) { propObject = OSString::withCStringNoCopy(""); - if (propObject != NULL) { - _ofDict->setObject(kIONVRAMBootArgsKey, propObject); - propObject->release(); + if (propObject != nullptr) { + _commonDict->setObject(kIONVRAMBootArgsKey, propObject.get()); } } - if (_piImage != NULL) { - propDataLength = *(UInt32 *)_piImage; - if ((propDataLength != 0) && (propDataLength <= (_piPartitionSize - 4))) { - propObject = OSData::withBytes(_piImage + 4, propDataLength); - _ofDict->setObject(kIODTNVRAMPanicInfoKey, propObject); - propObject->release(); - - // Clear the length from _piImage and mark dirty. - *(UInt32 *)_piImage = 0; - if (_nvramController != NULL) { - _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); - } - } - } + DEBUG_INFO("%s _commonDict=%p _systemDict=%p\n", __FUNCTION__, _commonDict.get(), _systemDict.get()); return kIOReturnSuccess; } @@ -887,180 +1684,122 @@ IODTNVRAM::syncOFVariables(void) IOReturn IODTNVRAM::syncVariables(void) { - bool ok; - UInt32 length, maxLength; - UInt8 *buffer, *tmpBuffer; - const OSSymbol *tmpSymbol; - OSObject *tmpObject; - OSCollectionIterator *iter; + bool ok; + UInt32 length, maxLength, regionIndex; + UInt8 *buffer, *tmpBuffer; + const OSSymbol *tmpSymbol; + OSObject *tmpObject; + OSSharedPtr iter; + NVRAMRegionInfo *currentRegion; - IOLockAssert(_ofLock, kIOLockAssertOwned); + NVRAMRegionInfo variableRegions[] = { { NVRAM_CHRP_PARTITION_NAME_COMMON, _commonPartitionOffset, _commonPartitionSize, _commonDict, _commonImage}, + { NVRAM_CHRP_PARTITION_NAME_SYSTEM, _systemPartitionOffset, _systemPartitionSize, _systemDict, _systemImage} }; - if ((_ofImage == NULL) || (_ofDict == NULL) || _systemPaniced) { + NVRAMLOCKASSERT(); + + if (_systemPanicked) { return kIOReturnNotReady; } - buffer = tmpBuffer = IONew(UInt8, _ofPartitionSize); - if (buffer == NULL) { - return kIOReturnNoMemory; + if (_nvramController == nullptr) { + DEBUG_ERROR("No _nvramController\n"); + return kIOReturnNotReady; } - bzero(buffer, _ofPartitionSize); - ok = true; - maxLength = _ofPartitionSize; + DEBUG_INFO("...\n"); - iter = OSCollectionIterator::withCollection(_ofDict); - if (iter == NULL) { - ok = false; - } + for (regionIndex = 0; regionIndex < ARRAY_SIZE(variableRegions); regionIndex++) { + OSSharedPtr sizeUsed; + currentRegion = &variableRegions[regionIndex]; - while (ok) { - tmpSymbol = OSDynamicCast(OSSymbol, iter->getNextObject()); - if (tmpSymbol == NULL) { - break; + if (currentRegion->size == 0) { + continue; } - // Don't save 'aapl,panic-info'. - if (tmpSymbol->isEqualTo(kIODTNVRAMPanicInfoKey)) { - continue; + DEBUG_INFO("region = %s\n", currentRegion->name); + buffer = tmpBuffer = IONew(UInt8, currentRegion->size); + if (buffer == nullptr) { + return kIOReturnNoMemory; } + bzero(buffer, currentRegion->size); - tmpObject = _ofDict->getObject(tmpSymbol); + ok = true; + maxLength = currentRegion->size; + + iter = OSCollectionIterator::withCollection(currentRegion->dict.get()); + if (iter == nullptr) { + ok = false; + } + + while (ok) { + tmpSymbol = OSDynamicCast(OSSymbol, iter->getNextObject()); + if (tmpSymbol == nullptr) { + break; + } + + DEBUG_INFO("adding variable %s\n", tmpSymbol->getCStringNoCopy()); + + tmpObject = currentRegion->dict->getObject(tmpSymbol); + + length = maxLength; + ok = convertObjectToProp(tmpBuffer, &length, tmpSymbol, tmpObject); + if (ok) { + tmpBuffer += length; + maxLength -= length; + } + } - length = maxLength; - ok = convertObjectToProp(tmpBuffer, &length, tmpSymbol, tmpObject); if (ok) { - tmpBuffer += length; - maxLength -= length; + bcopy(buffer, currentRegion->image, currentRegion->size); } - } - iter->release(); - if (ok) { - bcopy(buffer, _ofImage, _ofPartitionSize); - } + IODelete(buffer, UInt8, currentRegion->size); - IODelete(buffer, UInt8, _ofPartitionSize); + sizeUsed = OSNumber::withNumber(maxLength, 32); + _nvramController->setProperty(currentRegion->name, sizeUsed.get()); + sizeUsed.reset(); - if (!ok) { - return kIOReturnBadArgument; - } + if ((strncmp(currentRegion->name, NVRAM_CHRP_PARTITION_NAME_SYSTEM, strlen(NVRAM_CHRP_PARTITION_NAME_SYSTEM)) == 0) && + (_systemService != nullptr)) { + _systemService->setProperties(_systemDict.get()); + } else if ((strncmp(currentRegion->name, NVRAM_CHRP_PARTITION_NAME_COMMON, strlen(NVRAM_CHRP_PARTITION_NAME_COMMON)) == 0) && + (_commonService != nullptr)) { + _commonService->setProperties(_commonDict.get()); + } - if (_nvramController != NULL) { - return _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); + if (!ok) { + return kIOReturnBadArgument; + } } - return kIOReturnNotReady; -} - -struct OFVariable { - const char *variableName; - UInt32 variableType; - UInt32 variablePerm; - SInt32 variableOffset; -}; -typedef struct OFVariable OFVariable; + DEBUG_INFO("ok=%d\n", ok); -enum { - kOWVariableOffsetNumber = 8, - kOWVariableOffsetString = 17 -}; + return _nvramController->write(0, _nvramImage, _nvramSize); +} -static const -OFVariable gOFVariables[] = { - {"little-endian?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 0}, - {"real-mode?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 1}, - {"auto-boot?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 2}, - {"diag-switch?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 3}, - {"fcode-debug?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 4}, - {"oem-banner?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 5}, - {"oem-logo?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 6}, - {"use-nvramrc?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 7}, - {"use-generic?", kOFVariableTypeBoolean, kOFVariablePermUserRead, -1}, - {"default-mac-address?", kOFVariableTypeBoolean, kOFVariablePermUserRead, -1}, - {"real-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 8}, - {"real-size", kOFVariableTypeNumber, kOFVariablePermUserRead, 9}, - {"virt-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 10}, - {"virt-size", kOFVariableTypeNumber, kOFVariablePermUserRead, 11}, - {"load-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 12}, - {"pci-probe-list", kOFVariableTypeNumber, kOFVariablePermUserRead, 13}, - {"pci-probe-mask", kOFVariableTypeNumber, kOFVariablePermUserRead, -1}, - {"screen-#columns", kOFVariableTypeNumber, kOFVariablePermUserRead, 14}, - {"screen-#rows", kOFVariableTypeNumber, kOFVariablePermUserRead, 15}, - {"selftest-#megs", kOFVariableTypeNumber, kOFVariablePermUserRead, 16}, - {"boot-device", kOFVariableTypeString, kOFVariablePermUserRead, 17}, - {"boot-file", kOFVariableTypeString, kOFVariablePermUserRead, 18}, - {"boot-screen", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"console-screen", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"diag-device", kOFVariableTypeString, kOFVariablePermUserRead, 19}, - {"diag-file", kOFVariableTypeString, kOFVariablePermUserRead, 20}, - {"input-device", kOFVariableTypeString, kOFVariablePermUserRead, 21}, - {"output-device", kOFVariableTypeString, kOFVariablePermUserRead, 22}, - {"input-device-1", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"output-device-1", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"mouse-device", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"oem-banner", kOFVariableTypeString, kOFVariablePermUserRead, 23}, - {"oem-logo", kOFVariableTypeString, kOFVariablePermUserRead, 24}, - {"nvramrc", kOFVariableTypeString, kOFVariablePermUserRead, 25}, - {"boot-command", kOFVariableTypeString, kOFVariablePermUserRead, 26}, - {"default-client-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"default-server-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"default-gateway-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"default-subnet-mask", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"default-router-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"boot-script", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"boot-args", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"aapl,pci", kOFVariableTypeData, kOFVariablePermRootOnly, -1}, - {"security-mode", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"security-password", kOFVariableTypeData, kOFVariablePermRootOnly, -1}, - {"boot-image", kOFVariableTypeData, kOFVariablePermUserWrite, -1}, - {"com.apple.System.fp-state", kOFVariableTypeData, kOFVariablePermKernelOnly, -1}, -#if CONFIG_EMBEDDED - {"backlight-level", kOFVariableTypeData, kOFVariablePermUserWrite, -1}, - {"com.apple.System.sep.art", kOFVariableTypeData, kOFVariablePermKernelOnly, -1}, - {"com.apple.System.boot-nonce", kOFVariableTypeString, kOFVariablePermKernelOnly, -1}, - {"darkboot", kOFVariableTypeBoolean, kOFVariablePermUserWrite, -1}, - {"acc-mb-ld-lifetime", kOFVariableTypeNumber, kOFVariablePermKernelOnly, -1}, - {"acc-cm-override-charger-count", kOFVariableTypeNumber, kOFVariablePermKernelOnly, -1}, - {"acc-cm-override-count", kOFVariableTypeNumber, kOFVariablePermKernelOnly, -1}, - {"enter-tdm-mode", kOFVariableTypeBoolean, kOFVariablePermUserWrite, -1}, - {"nonce-seeds", kOFVariableTypeData, kOFVariablePermKernelOnly, -1}, -#endif - {NULL, kOFVariableTypeData, kOFVariablePermUserRead, -1} -}; +UInt32 +IODTNVRAM::getOFVariableType(const char *propName) const +{ + return 0; +} UInt32 IODTNVRAM::getOFVariableType(const OSSymbol *propSymbol) const { - const OFVariable *ofVar; + return 0; +} - ofVar = gOFVariables; - while (1) { - if ((ofVar->variableName == NULL) || - propSymbol->isEqualTo(ofVar->variableName)) { - break; - } - ofVar++; - } - return ofVar->variableType; +UInt32 +IODTNVRAM::getOFVariablePerm(const char *propName) const +{ + return 0; } UInt32 IODTNVRAM::getOFVariablePerm(const OSSymbol *propSymbol) const { - const OFVariable *ofVar; - - ofVar = gOFVariables; - while (1) { - if ((ofVar->variableName == NULL) || - propSymbol->isEqualTo(ofVar->variableName)) { - break; - } - ofVar++; - } - - return ofVar->variablePerm; + return 0; } bool @@ -1070,50 +1809,43 @@ IODTNVRAM::getOWVariableInfo(UInt32 variableNumber, const OSSymbol **propSymbol, /* UNSUPPORTED */ return false; } - bool IODTNVRAM::convertPropToObject(UInt8 *propName, UInt32 propNameLength, UInt8 *propData, UInt32 propDataLength, const OSSymbol **propSymbol, OSObject **propObject) { - UInt32 propType; - const OSSymbol *tmpSymbol; - OSObject *tmpObject; - OSNumber *tmpNumber; - OSString *tmpString; + OSSharedPtr tmpSymbol; + OSSharedPtr tmpNumber; + OSSharedPtr tmpString; + OSSharedPtr tmpObject = nullptr; - // Create the symbol. propName[propNameLength] = '\0'; tmpSymbol = OSSymbol::withCString((const char *)propName); propName[propNameLength] = '='; - if (tmpSymbol == NULL) { + if (tmpSymbol == nullptr) { return false; } - propType = getOFVariableType(tmpSymbol); - - // Create the object. - tmpObject = NULL; - switch (propType) { + switch (getVariableType(tmpSymbol.get())) { case kOFVariableTypeBoolean: if (!strncmp("true", (const char *)propData, propDataLength)) { - tmpObject = kOSBooleanTrue; + tmpObject.reset(kOSBooleanTrue, OSRetain); } else if (!strncmp("false", (const char *)propData, propDataLength)) { - tmpObject = kOSBooleanFalse; + tmpObject.reset(kOSBooleanFalse, OSRetain); } break; case kOFVariableTypeNumber: - tmpNumber = OSNumber::withNumber(strtol((const char *)propData, NULL, 0), 32); - if (tmpNumber != NULL) { + tmpNumber = OSNumber::withNumber(strtol((const char *)propData, nullptr, 0), 32); + if (tmpNumber != nullptr) { tmpObject = tmpNumber; } break; case kOFVariableTypeString: tmpString = OSString::withCString((const char *)propData); - if (tmpString != NULL) { + if (tmpString != nullptr) { tmpObject = tmpString; } break; @@ -1121,66 +1853,87 @@ IODTNVRAM::convertPropToObject(UInt8 *propName, UInt32 propNameLength, case kOFVariableTypeData: tmpObject = unescapeBytesToData(propData, propDataLength); break; + + default: + break; } - if (tmpObject == NULL) { - tmpSymbol->release(); + if (tmpObject == nullptr) { + tmpSymbol.reset(); return false; } - *propSymbol = tmpSymbol; - *propObject = tmpObject; + *propSymbol = tmpSymbol.detach(); + *propObject = tmpObject.detach(); return true; } +bool +IODTNVRAM::convertPropToObject(UInt8 *propName, UInt32 propNameLength, + UInt8 *propData, UInt32 propDataLength, + OSSharedPtr& propSymbol, + OSSharedPtr& propObject) +{ + const OSSymbol* propSymbolRaw = nullptr; + OSObject* propObjectRaw = nullptr; + bool result = convertPropToObject(propName, propNameLength, propData, propDataLength, + &propSymbolRaw, &propObjectRaw); + propSymbol.reset(propSymbolRaw, OSNoRetain); + propObject.reset(propObjectRaw, OSNoRetain); + return result; +} + bool IODTNVRAM::convertObjectToProp(UInt8 *buffer, UInt32 *length, const OSSymbol *propSymbol, OSObject *propObject) { - const UInt8 *propName; - UInt32 propNameLength, propDataLength, remaining; - UInt32 propType, tmpValue; - OSBoolean *tmpBoolean = NULL; - OSNumber *tmpNumber = NULL; - OSString *tmpString = NULL; - OSData *tmpData = NULL; + const UInt8 *propName; + UInt32 propNameLength, propDataLength, remaining; + IONVRAMVariableType propType; + OSBoolean *tmpBoolean = nullptr; + OSNumber *tmpNumber = nullptr; + OSString *tmpString = nullptr; + OSSharedPtr tmpData; propName = (const UInt8 *)propSymbol->getCStringNoCopy(); propNameLength = propSymbol->getLength(); - propType = getOFVariableType(propSymbol); + propType = getVariableType(propSymbol); // Get the size of the data. propDataLength = 0xFFFFFFFF; switch (propType) { case kOFVariableTypeBoolean: tmpBoolean = OSDynamicCast(OSBoolean, propObject); - if (tmpBoolean != NULL) { + if (tmpBoolean != nullptr) { propDataLength = 5; } break; case kOFVariableTypeNumber: tmpNumber = OSDynamicCast(OSNumber, propObject); - if (tmpNumber != NULL) { + if (tmpNumber != nullptr) { propDataLength = 10; } break; case kOFVariableTypeString: tmpString = OSDynamicCast(OSString, propObject); - if (tmpString != NULL) { + if (tmpString != nullptr) { propDataLength = tmpString->getLength(); } break; case kOFVariableTypeData: - tmpData = OSDynamicCast(OSData, propObject); - if (tmpData != NULL) { - tmpData = escapeDataToData(tmpData); + tmpData.reset(OSDynamicCast(OSData, propObject), OSNoRetain); + if (tmpData != nullptr) { + tmpData = escapeDataToData(tmpData.detach()); propDataLength = tmpData->getLength(); } break; + + default: + break; } // Make sure the propertySize is known and will fit. @@ -1205,7 +1958,8 @@ IODTNVRAM::convertObjectToProp(UInt8 *buffer, UInt32 *length, break; case kOFVariableTypeNumber: - tmpValue = tmpNumber->unsigned32BitValue(); + { + uint32_t tmpValue = tmpNumber->unsigned32BitValue(); if (tmpValue == 0xFFFFFFFF) { strlcpy((char *)buffer, "-1", remaining); } else if (tmpValue < 1000) { @@ -1213,7 +1967,8 @@ IODTNVRAM::convertObjectToProp(UInt8 *buffer, UInt32 *length, } else { snprintf((char *)buffer, remaining, "0x%x", (uint32_t)tmpValue); } - break; + } + break; case kOFVariableTypeString: strlcpy((char *)buffer, tmpString->getCStringNoCopy(), remaining); @@ -1221,11 +1976,14 @@ IODTNVRAM::convertObjectToProp(UInt8 *buffer, UInt32 *length, case kOFVariableTypeData: bcopy(tmpData->getBytesNoCopy(), buffer, propDataLength); - tmpData->release(); + tmpData.reset(); + break; + + default: break; } - propDataLength = strlen((const char *)buffer); + propDataLength = ((UInt32) strlen((const char *)buffer)); *length = propNameLength + propDataLength + 2; @@ -1239,7 +1997,7 @@ IODTNVRAM::generateOWChecksum(UInt8 *buffer) UInt32 cnt, checksum = 0; UInt16 *tmpBuffer = (UInt16 *)buffer; - for (cnt = 0; cnt < _ofPartitionSize / 2; cnt++) { + for (cnt = 0; cnt < _commonPartitionSize / 2; cnt++) { checksum += tmpBuffer[cnt]; } @@ -1252,7 +2010,7 @@ IODTNVRAM::validateOWChecksum(UInt8 *buffer) UInt32 cnt, checksum, sum = 0; UInt16 *tmpBuffer = (UInt16 *)buffer; - for (cnt = 0; cnt < _ofPartitionSize / 2; cnt++) { + for (cnt = 0; cnt < _commonPartitionSize / 2; cnt++) { sum += tmpBuffer[cnt]; } @@ -1295,14 +2053,14 @@ IODTNVRAM::writeNVRAMPropertyType0(IORegistryEntry *entry, } -OSData * +OSSharedPtr IODTNVRAM::unescapeBytesToData(const UInt8 *bytes, UInt32 length) { - OSData *data = NULL; - UInt32 totalLength = 0; - UInt32 cnt, cnt2; - UInt8 byte; - bool ok; + OSSharedPtr data; + UInt32 totalLength = 0; + UInt32 cnt, cnt2; + UInt8 byte; + bool ok; // Calculate the actual length of the data. ok = true; @@ -1325,7 +2083,7 @@ IODTNVRAM::unescapeBytesToData(const UInt8 *bytes, UInt32 length) if (ok) { // Create an empty OSData of the correct size. data = OSData::withCapacity(totalLength); - if (data != NULL) { + if (data != nullptr) { for (cnt = 0; cnt < length;) { byte = bytes[cnt++]; if (byte == 0xFF) { @@ -1343,20 +2101,20 @@ IODTNVRAM::unescapeBytesToData(const UInt8 *bytes, UInt32 length) return data; } -OSData * +OSSharedPtr IODTNVRAM::escapeDataToData(OSData * value) { - OSData * result; - const UInt8 * startPtr; - const UInt8 * endPtr; - const UInt8 * wherePtr; - UInt8 byte; - bool ok = true; + OSSharedPtr result; + const UInt8 *startPtr; + const UInt8 *endPtr; + const UInt8 *wherePtr; + UInt8 byte; + bool ok = true; wherePtr = (const UInt8 *) value->getBytesNoCopy(); endPtr = wherePtr + value->getLength(); - result = OSData::withCapacity(endPtr - wherePtr); + result = OSData::withCapacity((unsigned int) (endPtr - wherePtr)); if (!result) { return result; } @@ -1370,15 +2128,14 @@ IODTNVRAM::escapeDataToData(OSData * value) wherePtr++) { } ok &= result->appendByte(0xff, 1); - byte = (byte & 0x80) | (wherePtr - startPtr); + byte = (byte & 0x80) | ((UInt8)(wherePtr - startPtr)); } ok &= result->appendByte(byte, 1); } ok &= result->appendByte(0, 1); if (!ok) { - result->release(); - result = NULL; + result.reset(); } return result; @@ -1407,22 +2164,18 @@ IODTNVRAM::readNVRAMPropertyType1(IORegistryEntry *entry, const UInt8 *startPtr; const UInt8 *endPtr; const UInt8 *wherePtr; - const UInt8 *nvPath = NULL; - const char *nvName = NULL; - const char *resultName = NULL; - const UInt8 *resultValue = NULL; + const UInt8 *nvPath = nullptr; + const char *nvName = nullptr; + const char *resultName = nullptr; + const UInt8 *resultValue = nullptr; UInt32 resultValueLen = 0; UInt8 byte; - if (_ofDict == NULL) { - return err; - } - - IOLockLock(_ofLock); - data = OSDynamicCast(OSData, _ofDict->getObject(_registryPropertiesKey)); - IOLockUnlock(_ofLock); + NVRAMLOCK(); + data = OSDynamicCast(OSData, _commonDict->getObject(_registryPropertiesKey.get())); + NVRAMUNLOCK(); - if (data == NULL) { + if (data == nullptr) { return err; } @@ -1436,35 +2189,32 @@ IODTNVRAM::readNVRAMPropertyType1(IORegistryEntry *entry, continue; } - if (nvPath == NULL) { + if (nvPath == nullptr) { nvPath = startPtr; - } else if (nvName == NULL) { + } else if (nvName == nullptr) { nvName = (const char *) startPtr; } else { - IORegistryEntry * compareEntry = IORegistryEntry::fromPath((const char *) nvPath, gIODTPlane); - if (compareEntry) { - compareEntry->release(); - } + OSSharedPtr compareEntry = IORegistryEntry::fromPath((const char *) nvPath, gIODTPlane); if (entry == compareEntry) { bool appleProp = IsApplePropertyName(nvName); if (!appleProp || !resultName) { resultName = nvName; resultValue = startPtr; - resultValueLen = wherePtr - startPtr - 1; + resultValueLen = (UInt32) (wherePtr - startPtr - 1); // OSData getLength() is 32b } if (!appleProp) { break; } } - nvPath = NULL; - nvName = NULL; + nvPath = nullptr; + nvName = nullptr; } startPtr = wherePtr; } if (resultName) { - *name = OSSymbol::withCString(resultName); - *value = unescapeBytesToData(resultValue, resultValueLen); - if ((*name != NULL) && (*value != NULL)) { + *name = OSSymbol::withCString(resultName).detach(); + *value = unescapeBytesToData(resultValue, resultValueLen).detach(); + if ((*name != nullptr) && (*value != nullptr)) { err = kIOReturnSuccess; } else { err = kIOReturnNoMemory; @@ -1478,31 +2228,26 @@ IODTNVRAM::writeNVRAMPropertyType1(IORegistryEntry *entry, const OSSymbol *propName, OSData *value) { - OSData *oldData, *escapedData; - OSData *data = NULL; - const UInt8 *startPtr; - const UInt8 *propStart; - const UInt8 *endPtr; - const UInt8 *wherePtr; - const UInt8 *nvPath = NULL; - const char *nvName = NULL; - const char * comp; - const char * name; - UInt8 byte; - bool ok = true; - bool settingAppleProp; - - if (_ofDict == NULL) { - return kIOReturnNoResources; - } + OSSharedPtr data, oldData; + const UInt8 *startPtr; + const UInt8 *propStart; + const UInt8 *endPtr; + const UInt8 *wherePtr; + const UInt8 *nvPath = nullptr; + const char *nvName = nullptr; + const char *comp; + const char *name; + UInt8 byte; + bool ok = true; + bool settingAppleProp; settingAppleProp = IsApplePropertyName(propName->getCStringNoCopy()); // copy over existing properties for other entries - IOLockLock(_ofLock); + NVRAMLOCK(); - oldData = OSDynamicCast(OSData, _ofDict->getObject(_registryPropertiesKey)); + oldData.reset(OSDynamicCast(OSData, _commonDict->getObject(_registryPropertiesKey.get())), OSRetain); if (oldData) { startPtr = (const UInt8 *) oldData->getBytesNoCopy(); endPtr = startPtr + oldData->getLength(); @@ -1514,28 +2259,26 @@ IODTNVRAM::writeNVRAMPropertyType1(IORegistryEntry *entry, if (byte) { continue; } - if (nvPath == NULL) { + if (nvPath == nullptr) { nvPath = startPtr; - } else if (nvName == NULL) { + } else if (nvName == nullptr) { nvName = (const char *) startPtr; } else { - IORegistryEntry * compareEntry = IORegistryEntry::fromPath((const char *) nvPath, gIODTPlane); - if (compareEntry) { - compareEntry->release(); - } + OSSharedPtr compareEntry = IORegistryEntry::fromPath((const char *) nvPath, gIODTPlane); + if (entry == compareEntry) { if ((settingAppleProp && propName->isEqualTo(nvName)) || (!settingAppleProp && !IsApplePropertyName(nvName))) { - // delete old property (nvPath -> wherePtr) - data = OSData::withBytes(propStart, nvPath - propStart); + // delete old property (nvPath -> wherePtr) source OSData len is 32b + data = OSData::withBytes(propStart, (UInt32)(nvPath - propStart)); if (data) { - ok &= data->appendBytes(wherePtr, endPtr - wherePtr); + ok &= data->appendBytes(wherePtr, (UInt32)(endPtr - wherePtr)); } break; } } - nvPath = NULL; - nvName = NULL; + nvPath = nullptr; + nvName = nullptr; } startPtr = wherePtr; @@ -1546,7 +2289,7 @@ IODTNVRAM::writeNVRAMPropertyType1(IORegistryEntry *entry, if (!data) { if (oldData) { - data = OSData::withData(oldData); + data = OSData::withData(oldData.get()); } else { data = OSData::withCapacity(16); } @@ -1558,7 +2301,7 @@ IODTNVRAM::writeNVRAMPropertyType1(IORegistryEntry *entry, if (ok && value && value->getLength()) { do { // get entries in path - OSArray *array = OSArray::withCapacity(5); + OSSharedPtr array = OSArray::withCapacity(5); if (!array) { ok = false; break; @@ -1582,49 +2325,40 @@ IODTNVRAM::writeNVRAMPropertyType1(IORegistryEntry *entry, ok &= data->appendByte('/', 1); comp = name; } - ok &= data->appendBytes(comp, strlen(comp)); + ok &= data->appendBytes(comp, (unsigned int) strnlen(comp, UINT16_MAX)); } ok &= data->appendByte(0, 1); - array->release(); - // append prop name ok &= data->appendBytes(propName->getCStringNoCopy(), propName->getLength() + 1); // append escaped data - escapedData = escapeDataToData(value); - ok &= (escapedData != NULL); + OSSharedPtr escapedData = escapeDataToData(value); + ok &= (escapedData != nullptr); if (ok) { - ok &= data->appendBytes(escapedData); + ok &= data->appendBytes(escapedData.get()); } } while (false); } - oldData->retain(); if (ok) { - ok = _ofDict->setObject(_registryPropertiesKey, data); - } - - if (data) { - data->release(); + ok = _commonDict->setObject(_registryPropertiesKey.get(), data.get()); } if (ok) { if (syncVariables() != kIOReturnSuccess) { if (oldData) { - _ofDict->setObject(_registryPropertiesKey, oldData); + _commonDict->setObject(_registryPropertiesKey.get(), oldData.get()); } else { - _ofDict->removeObject(_registryPropertiesKey); + _commonDict->removeObject(_registryPropertiesKey.get()); } (void) syncVariables(); ok = false; } } - if (oldData) { - oldData->release(); - } + oldData.reset(); - IOLockUnlock(_ofLock); + NVRAMUNLOCK(); return ok ? kIOReturnSuccess : kIOReturnNoMemory; } diff --git a/iokit/Kernel/IOPMGR.cpp b/iokit/Kernel/IOPMGR.cpp new file mode 100644 index 000000000..4fd29c336 --- /dev/null +++ b/iokit/Kernel/IOPMGR.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include + +#define super IOService +OSDefineMetaClassAndAbstractStructors(IOPMGR, IOService); diff --git a/iokit/Kernel/IOPMinformeeList.cpp b/iokit/Kernel/IOPMinformeeList.cpp index 18cf810ed..56b6ca76e 100644 --- a/iokit/Kernel/IOPMinformeeList.cpp +++ b/iokit/Kernel/IOPMinformeeList.cpp @@ -91,6 +91,7 @@ IOPMinformeeList::appendNewInformee( IOService * newObject ) if (IOPMNoErr == addToList(newInformee)) { return newInformee; } else { + newInformee->release(); return NULL; } } @@ -104,33 +105,30 @@ IOPMinformeeList::appendNewInformee( IOService * newObject ) IOReturn IOPMinformeeList::addToList( IOPMinformee * newInformee ) { - IOPMinformee * nextInformee; - IORecursiveLock *listLock = getSharedRecursiveLock(); + IORecursiveLock *listLock = getSharedRecursiveLock(); + IOReturn ret = kIOReturnError; if (!listLock) { - return kIOReturnError; + return ret; } IORecursiveLockLock(listLock); - nextInformee = firstItem; // Is new object already in the list? - while (nextInformee != NULL) { - if (nextInformee->whatObject == newInformee->whatObject) { - // object is present; just exit - goto unlock_and_exit; - } - nextInformee = nextInList(nextInformee); + if (findItem(newInformee->whatObject) != NULL) { + // object is present; just exit + goto unlock_and_exit; } // add it to the front of the list newInformee->nextInList = firstItem; firstItem = newInformee; length++; + ret = IOPMNoErr; unlock_and_exit: IORecursiveLockUnlock(listLock); - return IOPMNoErr; + return ret; } diff --git a/iokit/Kernel/IOPMrootDomain.cpp b/iokit/Kernel/IOPMrootDomain.cpp index d72b90091..b4faa6330 100644 --- a/iokit/Kernel/IOPMrootDomain.cpp +++ b/iokit/Kernel/IOPMrootDomain.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2019 Apple Inc. All rights reserved. + * Copyright (c) 1998-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -25,6 +25,9 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ + +#define IOKIT_ENABLE_SHARED_PTR + #include #include #include @@ -34,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -53,7 +57,10 @@ #include "IOKitKernelInternal.h" #if HIBERNATION #include -#endif +#if __arm64__ +#include +#endif /* __arm64__ */ +#endif /* HIBERNATION */ #include #include #include @@ -69,6 +76,8 @@ #include "IOServicePMPrivate.h" #include +#include +#include __BEGIN_DECLS #include @@ -148,43 +157,68 @@ do { \ #define CAP_HIGHEST(c) \ ((_highestCapability & (c)) != 0) +#define CAP_PENDING(c) \ + ((_pendingCapability & (c)) != 0) + +// rdar://problem/9157444 #if defined(__i386__) || defined(__x86_64__) -#define DARK_TO_FULL_EVALUATE_CLAMSHELL 1 +#define DARK_TO_FULL_EVALUATE_CLAMSHELL_DELAY 20 #endif // Event types for IOPMPowerStateQueue::submitPowerEvent() enum { - kPowerEventFeatureChanged = 1, // 1 - kPowerEventReceivedPowerNotification, // 2 - kPowerEventSystemBootCompleted, // 3 - kPowerEventSystemShutdown, // 4 - kPowerEventUserDisabledSleep, // 5 + kPowerEventFeatureChanged = 1, // 1 + kPowerEventReceivedPowerNotification, // 2 + kPowerEventSystemBootCompleted, // 3 + kPowerEventSystemShutdown, // 4 + kPowerEventUserDisabledSleep, // 5 kPowerEventRegisterSystemCapabilityClient, // 6 kPowerEventRegisterKernelCapabilityClient, // 7 - kPowerEventPolicyStimulus, // 8 - kPowerEventAssertionCreate, // 9 - kPowerEventAssertionRelease, // 10 - kPowerEventAssertionSetLevel, // 11 - kPowerEventQueueSleepWakeUUID, // 12 - kPowerEventPublishSleepWakeUUID, // 13 - kPowerEventSetDisplayPowerOn // 14 + kPowerEventPolicyStimulus, // 8 + kPowerEventAssertionCreate, // 9 + kPowerEventAssertionRelease, // 10 + kPowerEventAssertionSetLevel, // 11 + kPowerEventQueueSleepWakeUUID, // 12 + kPowerEventPublishSleepWakeUUID, // 13 + kPowerEventSetDisplayPowerOn, // 14 + kPowerEventPublishWakeType, // 15 + kPowerEventAOTEvaluate // 16 }; // For evaluatePolicy() // List of stimuli that affects the root domain policy. enum { - kStimulusDisplayWranglerSleep, // 0 - kStimulusDisplayWranglerWake, // 1 - kStimulusAggressivenessChanged, // 2 - kStimulusDemandSystemSleep, // 3 - kStimulusAllowSystemSleepChanged, // 4 - kStimulusDarkWakeActivityTickle, // 5 - kStimulusDarkWakeEntry, // 6 - kStimulusDarkWakeReentry, // 7 - kStimulusDarkWakeEvaluate, // 8 - kStimulusNoIdleSleepPreventers, // 9 - kStimulusEnterUserActiveState, // 10 - kStimulusLeaveUserActiveState // 11 + kStimulusDisplayWranglerSleep, // 0 + kStimulusDisplayWranglerWake, // 1 + kStimulusAggressivenessChanged, // 2 + kStimulusDemandSystemSleep, // 3 + kStimulusAllowSystemSleepChanged, // 4 + kStimulusDarkWakeActivityTickle, // 5 + kStimulusDarkWakeEntry, // 6 + kStimulusDarkWakeReentry, // 7 + kStimulusDarkWakeEvaluate, // 8 + kStimulusNoIdleSleepPreventers, // 9 + kStimulusEnterUserActiveState, // 10 + kStimulusLeaveUserActiveState // 11 +}; + +// Internal power state change reasons +// Must be less than kIOPMSleepReasonClamshell=101 +enum { + kCPSReasonNone = 0, // 0 + kCPSReasonInit, // 1 + kCPSReasonWake, // 2 + kCPSReasonIdleSleepPrevent, // 3 + kCPSReasonIdleSleepAllow, // 4 + kCPSReasonPowerOverride, // 5 + kCPSReasonPowerDownCancel, // 6 + kCPSReasonAOTExit, // 7 + kCPSReasonAdjustPowerState, // 8 + kCPSReasonDarkWakeCannotSleep, // 9 + kCPSReasonIdleSleepEnabled, // 10 + kCPSReasonEvaluatePolicy, // 11 + kCPSReasonSustainFullWake, // 12 + kCPSReasonPMInternals = (kIOPMSleepReasonClamshell - 1) }; extern "C" { @@ -200,63 +234,66 @@ static void handleAggressivesFunction( thread_call_param_t, thread_call_param_t static void pmEventTimeStamp(uint64_t *recordTS); static void powerButtonUpCallout( thread_call_param_t, thread_call_param_t ); static void powerButtonDownCallout( thread_call_param_t, thread_call_param_t ); +static OSPtr copyKextIdentifierWithAddress(vm_address_t address); -static int IOPMConvertSecondsToCalendar(long secs, IOPMCalendarStruct * dt); -static long IOPMConvertCalendarToSeconds(const IOPMCalendarStruct * dt); +static int IOPMConvertSecondsToCalendar(clock_sec_t secs, IOPMCalendarStruct * dt); +static clock_sec_t IOPMConvertCalendarToSeconds(const IOPMCalendarStruct * dt); #define YMDTF "%04d/%02d/%d %02d:%02d:%02d" #define YMDT(cal) ((int)(cal)->year), (cal)->month, (cal)->day, (cal)->hour, (cal)->minute, (cal)->second // "IOPMSetSleepSupported" callPlatformFunction name -static const OSSymbol *sleepSupportedPEFunction = NULL; -static const OSSymbol *sleepMessagePEFunction = NULL; - -static const OSSymbol * gIOPMPSExternalConnectedKey; -static const OSSymbol * gIOPMPSExternalChargeCapableKey; -static const OSSymbol * gIOPMPSBatteryInstalledKey; -static const OSSymbol * gIOPMPSIsChargingKey; -static const OSSymbol * gIOPMPSAtWarnLevelKey; -static const OSSymbol * gIOPMPSAtCriticalLevelKey; -static const OSSymbol * gIOPMPSCurrentCapacityKey; -static const OSSymbol * gIOPMPSMaxCapacityKey; -static const OSSymbol * gIOPMPSDesignCapacityKey; -static const OSSymbol * gIOPMPSTimeRemainingKey; -static const OSSymbol * gIOPMPSAmperageKey; -static const OSSymbol * gIOPMPSVoltageKey; -static const OSSymbol * gIOPMPSCycleCountKey; -static const OSSymbol * gIOPMPSMaxErrKey; -static const OSSymbol * gIOPMPSAdapterInfoKey; -static const OSSymbol * gIOPMPSLocationKey; -static const OSSymbol * gIOPMPSErrorConditionKey; -static const OSSymbol * gIOPMPSManufacturerKey; -static const OSSymbol * gIOPMPSManufactureDateKey; -static const OSSymbol * gIOPMPSModelKey; -static const OSSymbol * gIOPMPSSerialKey; -static const OSSymbol * gIOPMPSLegacyBatteryInfoKey; -static const OSSymbol * gIOPMPSBatteryHealthKey; -static const OSSymbol * gIOPMPSHealthConfidenceKey; -static const OSSymbol * gIOPMPSCapacityEstimatedKey; -static const OSSymbol * gIOPMPSBatteryChargeStatusKey; -static const OSSymbol * gIOPMPSBatteryTemperatureKey; -static const OSSymbol * gIOPMPSAdapterDetailsKey; -static const OSSymbol * gIOPMPSChargerConfigurationKey; -static const OSSymbol * gIOPMPSAdapterDetailsIDKey; -static const OSSymbol * gIOPMPSAdapterDetailsWattsKey; -static const OSSymbol * gIOPMPSAdapterDetailsRevisionKey; -static const OSSymbol * gIOPMPSAdapterDetailsSerialNumberKey; -static const OSSymbol * gIOPMPSAdapterDetailsFamilyKey; -static const OSSymbol * gIOPMPSAdapterDetailsAmperageKey; -static const OSSymbol * gIOPMPSAdapterDetailsDescriptionKey; -static const OSSymbol * gIOPMPSAdapterDetailsPMUConfigurationKey; -static const OSSymbol * gIOPMPSAdapterDetailsSourceIDKey; -static const OSSymbol * gIOPMPSAdapterDetailsErrorFlagsKey; -static const OSSymbol * gIOPMPSAdapterDetailsSharedSourceKey; -static const OSSymbol * gIOPMPSAdapterDetailsCloakedKey; -static const OSSymbol * gIOPMPSInvalidWakeSecondsKey; -static const OSSymbol * gIOPMPSPostChargeWaitSecondsKey; -static const OSSymbol * gIOPMPSPostDishargeWaitSecondsKey; +static OSSharedPtr sleepSupportedPEFunction; +static OSSharedPtr sleepMessagePEFunction; +static OSSharedPtr gIOPMWakeTypeUserKey; + +static OSSharedPtr gIOPMPSExternalConnectedKey; +static OSSharedPtr gIOPMPSExternalChargeCapableKey; +static OSSharedPtr gIOPMPSBatteryInstalledKey; +static OSSharedPtr gIOPMPSIsChargingKey; +static OSSharedPtr gIOPMPSAtWarnLevelKey; +static OSSharedPtr gIOPMPSAtCriticalLevelKey; +static OSSharedPtr gIOPMPSCurrentCapacityKey; +static OSSharedPtr gIOPMPSMaxCapacityKey; +static OSSharedPtr gIOPMPSDesignCapacityKey; +static OSSharedPtr gIOPMPSTimeRemainingKey; +static OSSharedPtr gIOPMPSAmperageKey; +static OSSharedPtr gIOPMPSVoltageKey; +static OSSharedPtr gIOPMPSCycleCountKey; +static OSSharedPtr gIOPMPSMaxErrKey; +static OSSharedPtr gIOPMPSAdapterInfoKey; +static OSSharedPtr gIOPMPSLocationKey; +static OSSharedPtr gIOPMPSErrorConditionKey; +static OSSharedPtr gIOPMPSManufacturerKey; +static OSSharedPtr gIOPMPSManufactureDateKey; +static OSSharedPtr gIOPMPSModelKey; +static OSSharedPtr gIOPMPSSerialKey; +static OSSharedPtr gIOPMPSLegacyBatteryInfoKey; +static OSSharedPtr gIOPMPSBatteryHealthKey; +static OSSharedPtr gIOPMPSHealthConfidenceKey; +static OSSharedPtr gIOPMPSCapacityEstimatedKey; +static OSSharedPtr gIOPMPSBatteryChargeStatusKey; +static OSSharedPtr gIOPMPSBatteryTemperatureKey; +static OSSharedPtr gIOPMPSAdapterDetailsKey; +static OSSharedPtr gIOPMPSChargerConfigurationKey; +static OSSharedPtr gIOPMPSAdapterDetailsIDKey; +static OSSharedPtr gIOPMPSAdapterDetailsWattsKey; +static OSSharedPtr gIOPMPSAdapterDetailsRevisionKey; +static OSSharedPtr gIOPMPSAdapterDetailsSerialNumberKey; +static OSSharedPtr gIOPMPSAdapterDetailsFamilyKey; +static OSSharedPtr gIOPMPSAdapterDetailsAmperageKey; +static OSSharedPtr gIOPMPSAdapterDetailsDescriptionKey; +static OSSharedPtr gIOPMPSAdapterDetailsPMUConfigurationKey; +static OSSharedPtr gIOPMPSAdapterDetailsSourceIDKey; +static OSSharedPtr gIOPMPSAdapterDetailsErrorFlagsKey; +static OSSharedPtr gIOPMPSAdapterDetailsSharedSourceKey; +static OSSharedPtr gIOPMPSAdapterDetailsCloakedKey; +static OSSharedPtr gIOPMPSInvalidWakeSecondsKey; +static OSSharedPtr gIOPMPSPostChargeWaitSecondsKey; +static OSSharedPtr gIOPMPSPostDishargeWaitSecondsKey; #define kIOSleepSupportedKey "IOSleepSupported" #define kIOPMSystemCapabilitiesKey "System Capabilities" +#define kIOPMSystemDefaultOverrideKey "SystemPowerProfileOverrideDict" #define kIORequestWranglerIdleKey "IORequestIdle" #define kDefaultWranglerIdlePeriod 1000 // in milliseconds @@ -272,6 +309,8 @@ static const OSSymbol * gIOPMPSPostDishargeWaitSecondsKey; #define kLocalEvalClamshellCommand (1 << 15) #define kIdleSleepRetryInterval (3 * 60) +#define DISPLAY_WRANGLER_PRESENT (!NO_KERNEL_HID) + enum { kWranglerPowerStateMin = 0, kWranglerPowerStateSleep = 2, @@ -399,8 +438,8 @@ struct AggressivesRequest { uint32_t options; uint32_t dataType; union { - IOService * service; - AggressivesRecord record; + OSSharedPtr service; + AggressivesRecord record; } data; }; @@ -450,16 +489,33 @@ getSystemSleepPreventerString( uint32_t preventer ) // gDarkWakeFlags enum { - kDarkWakeFlagHIDTickleEarly = 0x01,// hid tickle before gfx suppression - kDarkWakeFlagHIDTickleLate = 0x02,// hid tickle after gfx suppression - kDarkWakeFlagHIDTickleNone = 0x03,// hid tickle is not posted - kDarkWakeFlagHIDTickleMask = 0x03, + kDarkWakeFlagPromotionNone = 0x0000, + kDarkWakeFlagPromotionEarly = 0x0001, // promote before gfx clamp + kDarkWakeFlagPromotionLate = 0x0002, // promote after gfx clamp + kDarkWakeFlagPromotionMask = 0x0003, kDarkWakeFlagAlarmIsDark = 0x0100, - kDarkWakeFlagGraphicsPowerState1 = 0x0200, - kDarkWakeFlagAudioNotSuppressed = 0x0400 + kDarkWakeFlagAudioNotSuppressed = 0x0200, + kDarkWakeFlagUserWakeWorkaround = 0x1000 +}; + +// gClamshellFlags +// The workaround for 9157444 is enabled at compile time using the +// DARK_TO_FULL_EVALUATE_CLAMSHELL_DELAY macro and is not represented below. +enum { + kClamshell_WAR_38378787 = 0x00000001, + kClamshell_WAR_47715679 = 0x00000002, + kClamshell_WAR_58009435 = 0x00000004 +}; + +// acceptSystemWakeEvents() +enum { + kAcceptSystemWakeEvents_Disable = 0, + kAcceptSystemWakeEvents_Enable, + kAcceptSystemWakeEvents_Reenable }; static IOPMrootDomain * gRootDomain; +static IORootParent * gPatriarch; static IONotifier * gSysPowerDownNotifier = NULL; static UInt32 gSleepOrShutdownPending = 0; static UInt32 gWillShutdown = 0; @@ -473,11 +529,26 @@ static char * gHaltLog; enum { kHaltLogSize = 2048 }; static size_t gHaltLogPos; static uint64_t gHaltStartTime; - +static char gKextNameBuf[64]; +static size_t gKextNamePos; +static bool gKextNameEnd; uuid_string_t bootsessionuuid_string; -static uint32_t gDarkWakeFlags = kDarkWakeFlagHIDTickleNone; +#if defined(XNU_TARGET_OS_OSX) +#if DISPLAY_WRANGLER_PRESENT +static uint32_t gDarkWakeFlags = kDarkWakeFlagPromotionNone; +#elif CONFIG_ARROW +// Enable temporary full wake promotion workarounds +static uint32_t gDarkWakeFlags = kDarkWakeFlagUserWakeWorkaround; +#else +// Enable full wake promotion workarounds +static uint32_t gDarkWakeFlags = kDarkWakeFlagUserWakeWorkaround; +#endif +#else /* !defined(XNU_TARGET_OS_OSX) */ +static uint32_t gDarkWakeFlags = kDarkWakeFlagPromotionEarly; +#endif /* !defined(XNU_TARGET_OS_OSX) */ + static uint32_t gNoIdleFlag = 0; static uint32_t gSwdPanic = 1; static uint32_t gSwdSleepTimeout = 0; @@ -488,26 +559,60 @@ static PMStatsStruct gPMStats; static uint32_t swd_panic_phase; #endif +static uint32_t gClamshellFlags = 0 +#if defined(__i386__) || defined(__x86_64__) + | kClamshell_WAR_58009435 +#endif +; #if HIBERNATION + +#if defined(__arm64__) +static IOReturn +defaultSleepPolicyHandler(void *ctx, const IOPMSystemSleepPolicyVariables *vars, IOPMSystemSleepParameters *params) +{ + uint32_t sleepType = kIOPMSleepTypeDeepIdle; + + assert(vars->signature == kIOPMSystemSleepPolicySignature); + assert(vars->version == kIOPMSystemSleepPolicyVersion); + + // Hibernation enabled and either user forced hibernate or low battery sleep + if ((vars->hibernateMode & kIOHibernateModeOn) && + ppl_hib_hibernation_supported() && + (((vars->hibernateMode & kIOHibernateModeSleep) == 0) || + (vars->sleepFactors & kIOPMSleepFactorBatteryLow))) { + sleepType = kIOPMSleepTypeHibernate; + } + params->version = kIOPMSystemSleepParametersVersion; + params->sleepType = sleepType; + return kIOReturnSuccess; +} +static IOPMSystemSleepPolicyHandler gSleepPolicyHandler = &defaultSleepPolicyHandler; +#else /* defined(__arm64__) */ static IOPMSystemSleepPolicyHandler gSleepPolicyHandler = NULL; +#endif /* defined(__arm64__) */ + static IOPMSystemSleepPolicyVariables * gSleepPolicyVars = NULL; static void * gSleepPolicyTarget; #endif struct timeval gIOLastSleepTime; struct timeval gIOLastWakeTime; +AbsoluteTime gIOLastWakeAbsTime; +AbsoluteTime gIOLastSleepAbsTime; struct timeval gIOLastUserSleepTime; static char gWakeReasonString[128]; +static char gBootReasonString[80]; +static char gShutdownReasonString[80]; static bool gWakeReasonSysctlRegistered = false; -static AbsoluteTime gIOLastWakeAbsTime; -static AbsoluteTime gIOLastSleepAbsTime; +static bool gBootReasonSysctlRegistered = false; +static bool gShutdownReasonSysctlRegistered = false; static AbsoluteTime gUserActiveAbsTime; static AbsoluteTime gUserInactiveAbsTime; -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || defined(__x86_64__) || (defined(__arm64__) && HIBERNATION) static bool gSpinDumpBufferFull = false; #endif @@ -524,8 +629,8 @@ static unsigned int gPMHaltIdleCount; static int gPMHaltDepth; static uint32_t gPMHaltMessageType; static IOLock * gPMHaltLock = NULL; -static OSArray * gPMHaltArray = NULL; -static const OSSymbol * gPMHaltClientAcknowledgeKey = NULL; +static OSSharedPtr gPMHaltArray; +static OSSharedPtr gPMHaltClientAcknowledgeKey; static bool gPMQuiesced; // Constants used as arguments to IOPMrootDomain::informCPUStateChange @@ -536,11 +641,11 @@ enum { kInformableCount = 2 }; -const OSSymbol *gIOPMStatsResponseTimedOut; -const OSSymbol *gIOPMStatsResponseCancel; -const OSSymbol *gIOPMStatsResponseSlow; -const OSSymbol *gIOPMStatsResponsePrompt; -const OSSymbol *gIOPMStatsDriverPSChangeSlow; +OSSharedPtr gIOPMStatsResponseTimedOut; +OSSharedPtr gIOPMStatsResponseCancel; +OSSharedPtr gIOPMStatsResponseSlow; +OSSharedPtr gIOPMStatsResponsePrompt; +OSSharedPtr gIOPMStatsDriverPSChangeSlow; #define kBadPMFeatureID 0 @@ -591,7 +696,7 @@ public: const OSSymbol *settings[], OSObject **handle_obj); - void dispatchPMSetting(const OSSymbol *type, OSObject *object); + IOReturn dispatchPMSetting(const OSSymbol *type, OSObject *object); void clientHandleFreed(void); }; @@ -621,7 +726,7 @@ class PMTraceWorker : public OSObject public: typedef enum { kPowerChangeStart, kPowerChangeCompleted } change_t; - static PMTraceWorker *tracer( IOPMrootDomain * ); + static OSPtr tracer( IOPMrootDomain * ); void tracePCIPowerChange(change_t, IOService *, uint32_t, uint32_t); void tracePoint(uint8_t phase); void traceDetail(uint32_t detail); @@ -638,7 +743,7 @@ public: private: IOPMrootDomain *owner; IOLock *pmTraceWorkerLock; - OSArray *pciDeviceBitMappings; + OSSharedPtr pciDeviceBitMappings; uint8_t addedToRegistry; uint8_t tracePhase; @@ -663,7 +768,7 @@ public: IOReturn setAssertionLevel(IOPMDriverAssertionID, IOPMDriverAssertionLevel); IOReturn setUserAssertionLevels(IOPMDriverAssertionType); - OSArray *copyAssertionsArray(void); + OSSharedPtr copyAssertionsArray(void); IOPMDriverAssertionType getActivatedAssertions(void); IOPMDriverAssertionLevel getAssertionLevel(IOPMDriverAssertionType); @@ -672,8 +777,13 @@ public: IOReturn handleSetAssertionLevel(IOPMDriverAssertionID, IOPMDriverAssertionLevel); IOReturn handleSetUserAssertionLevels(void * arg0); void publishProperties(void); + void reportCPUBitAccounting(void); private: + /* + * this should be treated as POD, as it's byte-copied around + * and we cannot rely on d'tor firing at the right time + */ typedef struct { IOPMDriverAssertionID id; IOPMDriverAssertionType assertionBits; @@ -683,16 +793,22 @@ private: IOService *ownerService; uint64_t registryEntryID; IOPMDriverAssertionLevel level; + uint64_t assertCPUStartTime; + uint64_t assertCPUDuration; } PMAssertStruct; uint32_t tabulateProducerCount; uint32_t tabulateConsumerCount; + uint64_t maxAssertCPUDuration; + uint64_t maxAssertCPUEntryId; + PMAssertStruct *detailsForID(IOPMDriverAssertionID, int *); void tabulate(void); + void updateCPUBitAccounting(PMAssertStruct * assertStruct); IOPMrootDomain *owner; - OSArray *assertionsArray; + OSSharedPtr assertionsArray; IOLock *assertionsArrayLock; IOPMDriverAssertionID issuingUniqueID __attribute__((aligned(8)));/* aligned for atomic access */ IOPMDriverAssertionType assertionsKernel; @@ -757,13 +873,13 @@ IOPMRootDomainWillShutdown(void) extern "C" IONotifier * registerSleepWakeInterest(IOServiceInterestHandler handler, void * self, void * ref) { - return gRootDomain->registerInterest( gIOGeneralInterest, handler, self, ref ); + return gRootDomain->registerInterest( gIOGeneralInterest, handler, self, ref ).detach(); } extern "C" IONotifier * registerPrioritySleepWakeInterest(IOServiceInterestHandler handler, void * self, void * ref) { - return gRootDomain->registerInterest( gIOPriorityPowerStateInterest, handler, self, ref ); + return gRootDomain->registerInterest( gIOPriorityPowerStateInterest, handler, self, ref ).detach(); } extern "C" IOReturn @@ -852,7 +968,7 @@ IOSystemShutdownNotification(int stage) uint64_t startTime; if (kIOSystemShutdownNotificationStageRootUnmount == stage) { -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) uint64_t nano, millis; startTime = mach_absolute_time(); IOService::getPlatform()->waitQuiet(30 * NSEC_PER_SEC); @@ -861,7 +977,7 @@ IOSystemShutdownNotification(int stage) if (gHaltTimeMaxLog && (millis >= gHaltTimeMaxLog)) { printf("waitQuiet() for unmount %qd ms\n", millis); } -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ return; } @@ -990,7 +1106,7 @@ IOPMrootDomain::updateTasksSuspend(void) static void disk_sync_callout( thread_call_param_t p0, thread_call_param_t p1 ) { - IOService * rootDomain = (IOService *) p0; + IOPMrootDomain * rootDomain = (IOPMrootDomain *) p0; uint32_t notifyRef = (uint32_t)(uintptr_t) p1; uint32_t powerState = rootDomain->getPowerState(); @@ -1008,9 +1124,7 @@ disk_sync_callout( thread_call_param_t p0, thread_call_param_t p1 ) else { IOHibernateSystemPostWake(false); - if (gRootDomain) { - gRootDomain->sleepWakeDebugSaveSpinDumpFile(); - } + rootDomain->sleepWakeDebugSaveSpinDumpFile(); } #endif @@ -1054,7 +1168,7 @@ sysctl_sleepwaketime SYSCTL_HANDLER_ARGS return sysctl_io_opaque(req, &t, sizeof(t), NULL); } else { struct user32_timeval t = {}; - t.tv_sec = swt->tv_sec; + t.tv_sec = (typeof(t.tv_sec))swt->tv_sec; t.tv_usec = swt->tv_usec; return sysctl_io_opaque(req, &t, sizeof(t), NULL); } @@ -1096,7 +1210,7 @@ static SYSCTL_PROC(_kern, OID_AUTO, willshutdown, extern struct sysctl_oid sysctl__kern_iokittest; extern struct sysctl_oid sysctl__debug_iokit; -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) static int sysctl_progressmeterenable @@ -1138,7 +1252,7 @@ static SYSCTL_PROC(_kern, OID_AUTO, progressmeter, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, 0, sysctl_progressmeter, "I", ""); -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ @@ -1191,21 +1305,41 @@ SYSCTL_PROC(_kern, OID_AUTO, wakereason, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, 0, sysctl_wakereason, "A", "wakereason"); +SYSCTL_STRING(_kern, OID_AUTO, bootreason, + CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + gBootReasonString, sizeof(gBootReasonString), ""); + +static int +sysctl_shutdownreason SYSCTL_HANDLER_ARGS +{ + char sr[sizeof(gShutdownReasonString)]; + + sr[0] = '\0'; + if (gRootDomain) { + gRootDomain->copyShutdownReasonString(sr, sizeof(sr)); + } + + return sysctl_io_string(req, sr, 0, 0, NULL); +} + +SYSCTL_PROC(_kern, OID_AUTO, shutdownreason, + CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + NULL, 0, sysctl_shutdownreason, "A", "shutdownreason"); + static int sysctl_targettype SYSCTL_HANDLER_ARGS { IOService * root; - OSObject * obj; + OSSharedPtr obj; OSData * data; char tt[32]; tt[0] = '\0'; root = IOService::getServiceRoot(); if (root && (obj = root->copyProperty(gIODTTargetTypeKey))) { - if ((data = OSDynamicCast(OSData, obj))) { + if ((data = OSDynamicCast(OSData, obj.get()))) { strlcpy(tt, (const char *) data->getBytesNoCopy(), sizeof(tt)); } - obj->release(); } return sysctl_io_string(req, tt, 0, 0, NULL); } @@ -1214,7 +1348,6 @@ SYSCTL_PROC(_hw, OID_AUTO, targettype, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, 0, sysctl_targettype, "A", "targettype"); -static SYSCTL_INT(_debug, OID_AUTO, darkwake, CTLFLAG_RW, &gDarkWakeFlags, 0, ""); static SYSCTL_INT(_debug, OID_AUTO, noidle, CTLFLAG_RW, &gNoIdleFlag, 0, ""); static SYSCTL_INT(_debug, OID_AUTO, swd_sleep_timeout, CTLFLAG_RW, &gSwdSleepTimeout, 0, ""); static SYSCTL_INT(_debug, OID_AUTO, swd_wake_timeout, CTLFLAG_RW, &gSwdWakeTimeout, 0, ""); @@ -1222,7 +1355,11 @@ static SYSCTL_INT(_debug, OID_AUTO, swd_timeout, CTLFLAG_RW, &gSwdSleepWakeTimeo static SYSCTL_INT(_debug, OID_AUTO, swd_panic, CTLFLAG_RW, &gSwdPanic, 0, ""); #if DEVELOPMENT || DEBUG static SYSCTL_INT(_debug, OID_AUTO, swd_panic_phase, CTLFLAG_RW, &swd_panic_phase, 0, ""); -#endif +#if defined(XNU_TARGET_OS_OSX) +static SYSCTL_INT(_debug, OID_AUTO, clamshell, CTLFLAG_RW, &gClamshellFlags, 0, ""); +static SYSCTL_INT(_debug, OID_AUTO, darkwake, CTLFLAG_RW, &gDarkWakeFlags, 0, ""); +#endif /* defined(XNU_TARGET_OS_OSX) */ +#endif /* DEVELOPMENT || DEBUG */ //****************************************************************************** // AOT @@ -1263,7 +1400,7 @@ update_aotmode(uint32_t mode) } oldCount = gRootDomain->idleSleepPreventersCount(); - gRootDomain->_aotMode = mode; + gRootDomain->_aotMode = (mode & kIOPMAOTModeMask); gRootDomain->updatePreventIdleSleepListInternal(NULL, false, oldCount); return 0; }); @@ -1319,32 +1456,32 @@ static SYSCTL_PROC(_kern, OID_AUTO, aotmode, //****************************************************************************** -static const OSSymbol * gIOPMSettingAutoWakeCalendarKey; -static const OSSymbol * gIOPMSettingAutoWakeSecondsKey; -static const OSSymbol * gIOPMSettingAutoPowerCalendarKey; -static const OSSymbol * gIOPMSettingAutoPowerSecondsKey; -static const OSSymbol * gIOPMSettingDebugWakeRelativeKey; -static const OSSymbol * gIOPMSettingDebugPowerRelativeKey; -static const OSSymbol * gIOPMSettingMaintenanceWakeCalendarKey; -static const OSSymbol * gIOPMSettingSleepServiceWakeCalendarKey; -static const OSSymbol * gIOPMSettingSilentRunningKey; -static const OSSymbol * gIOPMUserTriggeredFullWakeKey; -static const OSSymbol * gIOPMUserIsActiveKey; +static OSSharedPtr gIOPMSettingAutoWakeCalendarKey; +static OSSharedPtr gIOPMSettingAutoWakeSecondsKey; +static OSSharedPtr gIOPMSettingAutoPowerCalendarKey; +static OSSharedPtr gIOPMSettingAutoPowerSecondsKey; +static OSSharedPtr gIOPMSettingDebugWakeRelativeKey; +static OSSharedPtr gIOPMSettingDebugPowerRelativeKey; +static OSSharedPtr gIOPMSettingMaintenanceWakeCalendarKey; +static OSSharedPtr gIOPMSettingSleepServiceWakeCalendarKey; +static OSSharedPtr gIOPMSettingSilentRunningKey; +static OSSharedPtr gIOPMUserTriggeredFullWakeKey; +static OSSharedPtr gIOPMUserIsActiveKey; +static OSSharedPtr gIOPMSettingLowLatencyAudioModeKey; //****************************************************************************** // start // //****************************************************************************** -#define kRootDomainSettingsCount 19 -#define kRootDomainNoPublishSettingsCount 3 +#define kRootDomainSettingsCount 20 +#define kRootDomainNoPublishSettingsCount 4 bool IOPMrootDomain::start( IOService * nub ) { - OSIterator *psIterator; - OSDictionary *tmpDict; - IORootParent * patriarch; + OSSharedPtr psIterator; + OSSharedPtr tmpDict; super::start(nub); @@ -1360,6 +1497,7 @@ IOPMrootDomain::start( IOService * nub ) gIOPMSettingSilentRunningKey = OSSymbol::withCStringNoCopy(kIOPMSettingSilentRunningKey); gIOPMUserTriggeredFullWakeKey = OSSymbol::withCStringNoCopy(kIOPMUserTriggeredFullWakeKey); gIOPMUserIsActiveKey = OSSymbol::withCStringNoCopy(kIOPMUserIsActiveKey); + gIOPMSettingLowLatencyAudioModeKey = OSSymbol::withCStringNoCopy(kIOPMSettingLowLatencyAudioModeKey); gIOPMStatsResponseTimedOut = OSSymbol::withCString(kIOPMStatsResponseTimedOut); gIOPMStatsResponseCancel = OSSymbol::withCString(kIOPMStatsResponseCancel); @@ -1369,8 +1507,9 @@ IOPMrootDomain::start( IOService * nub ) sleepSupportedPEFunction = OSSymbol::withCString("IOPMSetSleepSupported"); sleepMessagePEFunction = OSSymbol::withCString("IOPMSystemSleepMessage"); + gIOPMWakeTypeUserKey = OSSymbol::withCStringNoCopy(kIOPMRootDomainWakeTypeUser); - const OSSymbol *settingsArr[kRootDomainSettingsCount] = + OSSharedPtr settingsArr[kRootDomainSettingsCount] = { OSSymbol::withCString(kIOPMSettingSleepOnPowerButtonKey), gIOPMSettingAutoWakeSecondsKey, @@ -1391,16 +1530,24 @@ IOPMrootDomain::start( IOService * nub ) OSSymbol::withCString(kIOPMSettingProModeControl), OSSymbol::withCString(kIOPMSettingProModeDefer), gIOPMSettingSilentRunningKey, + gIOPMSettingLowLatencyAudioModeKey, }; - const OSSymbol *noPublishSettingsArr[kRootDomainNoPublishSettingsCount] = + OSSharedPtr noPublishSettingsArr[kRootDomainNoPublishSettingsCount] = { OSSymbol::withCString(kIOPMSettingProModeControl), OSSymbol::withCString(kIOPMSettingProModeDefer), gIOPMSettingSilentRunningKey, + gIOPMSettingLowLatencyAudioModeKey, }; +#if DEVELOPMENT || DEBUG +#if defined(XNU_TARGET_OS_OSX) PE_parse_boot_argn("darkwake", &gDarkWakeFlags, sizeof(gDarkWakeFlags)); + PE_parse_boot_argn("clamshell", &gClamshellFlags, sizeof(gClamshellFlags)); +#endif /* defined(XNU_TARGET_OS_OSX) */ +#endif /* DEVELOPMENT || DEBUG */ + PE_parse_boot_argn("noidle", &gNoIdleFlag, sizeof(gNoIdleFlag)); PE_parse_boot_argn("swd_sleeptimeout", &gSwdSleepTimeout, sizeof(gSwdSleepTimeout)); PE_parse_boot_argn("swd_waketimeout", &gSwdWakeTimeout, sizeof(gSwdWakeTimeout)); @@ -1438,11 +1585,12 @@ IOPMrootDomain::start( IOService * nub ) &updateConsoleUsersCallout, (thread_call_param_t) this); -#if DARK_TO_FULL_EVALUATE_CLAMSHELL - fullWakeThreadCall = thread_call_allocate( +#if DARK_TO_FULL_EVALUATE_CLAMSHELL_DELAY + fullWakeThreadCall = thread_call_allocate_with_options( OSMemberFunctionCast(thread_call_func_t, this, &IOPMrootDomain::fullWakeDelayedWork), - (thread_call_param_t) this); + (thread_call_param_t) this, THREAD_CALL_PRIORITY_KERNEL, + THREAD_CALL_OPTIONS_ONCE); #endif setProperty(kIOSleepSupportedKey, true); @@ -1459,11 +1607,16 @@ IOPMrootDomain::start( IOService * nub ) sleepSlider = 0; idleSleepTimerPending = false; wrangler = NULL; - clamshellClosed = false; - clamshellExists = false; - clamshellDisabled = true; + clamshellClosed = false; + clamshellExists = false; +#if DISPLAY_WRANGLER_PRESENT + clamshellDisabled = true; +#else + clamshellDisabled = false; +#endif + clamshellIgnoreClose = false; acAdaptorConnected = true; - clamshellSleepDisabled = false; + clamshellSleepDisableMask = 0; gWakeReasonString[0] = '\0'; // Initialize to user active. @@ -1471,7 +1624,7 @@ IOPMrootDomain::start( IOService * nub ) fullWakeReason = kFullWakeReasonLocalUser; userIsActive = userWasActive = true; clock_get_uptime(&gUserActiveAbsTime); - setProperty(gIOPMUserIsActiveKey, kOSBooleanTrue); + setProperty(gIOPMUserIsActiveKey.get(), kOSBooleanTrue); // Set the default system capabilities at boot. _currentCapability = kIOPMSystemCapabilityCPU | @@ -1500,8 +1653,14 @@ IOPMrootDomain::start( IOService * nub ) idxPMCPULimitedPower = kCPUUnknownIndex; tmpDict = OSDictionary::withCapacity(1); - setProperty(kRootDomainSupportedFeatures, tmpDict); - tmpDict->release(); + setProperty(kRootDomainSupportedFeatures, tmpDict.get()); + + // Set a default "SystemPowerProfileOverrideDict" for platform + // drivers without any overrides. + if (!propertyExists(kIOPMSystemDefaultOverrideKey)) { + tmpDict = OSDictionary::withCapacity(1); + setProperty(kIOPMSystemDefaultOverrideKey, tmpDict.get()); + } settingsCallbacks = OSDictionary::withCapacity(1); @@ -1537,70 +1696,59 @@ IOPMrootDomain::start( IOService * nub ) _aotTimerES = IOTimerEventSource::timerEventSource(this, OSMemberFunctionCast(IOTimerEventSource::Action, this, &IOPMrootDomain::aotEvaluate)); - gIOPMWorkLoop->addEventSource(_aotTimerES); + gIOPMWorkLoop->addEventSource(_aotTimerES.get()); // create our power parent - patriarch = new IORootParent; - patriarch->init(); - patriarch->attach(this); - patriarch->start(this); - patriarch->addPowerChild(this); + gPatriarch = new IORootParent; + gPatriarch->init(); + gPatriarch->attach(this); + gPatriarch->start(this); + gPatriarch->addPowerChild(this); registerPowerDriver(this, ourPowerStates, NUM_POWER_STATES); - changePowerStateToPriv(ON_STATE); + changePowerStateWithTagToPriv(ON_STATE, kCPSReasonInit); // install power change handler gSysPowerDownNotifier = registerPrioritySleepWakeInterest( &sysPowerDownHandler, this, NULL); -#if !NO_KERNEL_HID - // Register for a notification when IODisplayWrangler is published - if ((tmpDict = serviceMatching("IODisplayWrangler"))) { - _displayWranglerNotifier = addMatchingNotification( - gIOPublishNotification, tmpDict, - (IOServiceMatchingNotificationHandler) & displayWranglerMatchPublished, - this, NULL); - tmpDict->release(); - } -#endif - -#if defined(__i386__) || defined(__x86_64__) - - wranglerIdleSettings = NULL; - OSNumber * wranglerIdlePeriod = NULL; +#if DISPLAY_WRANGLER_PRESENT wranglerIdleSettings = OSDictionary::withCapacity(1); - wranglerIdlePeriod = OSNumber::withNumber(kDefaultWranglerIdlePeriod, 32); + OSSharedPtr wranglerIdlePeriod = OSNumber::withNumber(kDefaultWranglerIdlePeriod, 32); if (wranglerIdleSettings && wranglerIdlePeriod) { wranglerIdleSettings->setObject(kIORequestWranglerIdleKey, - wranglerIdlePeriod); + wranglerIdlePeriod.get()); } - if (wranglerIdlePeriod) { - wranglerIdlePeriod->release(); +#endif /* DISPLAY_WRANGLER_PRESENT */ + + lowLatencyAudioNotifierDict = OSDictionary::withCapacity(2); + lowLatencyAudioNotifyStateSym = OSSymbol::withCString("LowLatencyAudioNotifyState"); + lowLatencyAudioNotifyTimestampSym = OSSymbol::withCString("LowLatencyAudioNotifyTimestamp"); + lowLatencyAudioNotifyStateVal = OSNumber::withNumber(0ull, 32); + lowLatencyAudioNotifyTimestampVal = OSNumber::withNumber(0ull, 64); + + if (lowLatencyAudioNotifierDict && lowLatencyAudioNotifyStateSym && lowLatencyAudioNotifyTimestampSym && + lowLatencyAudioNotifyStateVal && lowLatencyAudioNotifyTimestampVal) { + lowLatencyAudioNotifierDict->setObject(lowLatencyAudioNotifyStateSym.get(), lowLatencyAudioNotifyStateVal.get()); + lowLatencyAudioNotifierDict->setObject(lowLatencyAudioNotifyTimestampSym.get(), lowLatencyAudioNotifyTimestampVal.get()); } -#endif - const OSSymbol *ucClassName = OSSymbol::withCStringNoCopy("RootDomainUserClient"); - setProperty(gIOUserClientClassKey, (OSObject *) ucClassName); - ucClassName->release(); + OSSharedPtr ucClassName = OSSymbol::withCStringNoCopy("RootDomainUserClient"); + setProperty(gIOUserClientClassKey, const_cast(static_cast(ucClassName.get()))); // IOBacklightDisplay can take a long time to load at boot, or it may // not load at all if you're booting with clamshell closed. We publish // 'DisplayDims' here redundantly to get it published early and at all. - OSDictionary * matching; + OSSharedPtr matching; matching = serviceMatching("IOPMPowerSource"); - psIterator = getMatchingServices( matching ); - if (matching) { - matching->release(); - } + psIterator = getMatchingServices(matching.get()); + if (psIterator && psIterator->getNextObject()) { // There's at least one battery on the system, so we publish // 'DisplayDims' support for the LCD. publishFeature("DisplayDims"); } - if (psIterator) { - psIterator->release(); - } // read swd_panic boot-arg PE_parse_boot_argn("swd_panic", &gSwdPanic, sizeof(gSwdPanic)); @@ -1611,11 +1759,11 @@ IOPMrootDomain::start( IOService * nub ) sysctl_register_oid(&sysctl__debug_iokit); sysctl_register_oid(&sysctl__hw_targettype); -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) sysctl_register_oid(&sysctl__kern_progressmeterenable); sysctl_register_oid(&sysctl__kern_progressmeter); sysctl_register_oid(&sysctl__kern_wakereason); -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ sysctl_register_oid(&sysctl__kern_consoleoptions); sysctl_register_oid(&sysctl__kern_progressoptions); @@ -1624,6 +1772,11 @@ IOPMrootDomain::start( IOService * nub ) sysctl_register_oid(&sysctl__kern_aotmetrics); #if HIBERNATION +#if defined(__arm64__) + if (ppl_hib_hibernation_supported()) { + publishFeature(kIOHibernateFeatureKey); + } +#endif /* defined(__arm64__) */ IOHibernateSystemInit(this); #endif @@ -1644,20 +1797,21 @@ IOPMrootDomain::setProperties( OSObject * props_obj ) { IOReturn return_value = kIOReturnSuccess; OSDictionary *dict = OSDynamicCast(OSDictionary, props_obj); - OSBoolean *b; - OSNumber *n; - const OSSymbol *key; - OSObject *obj; - OSCollectionIterator * iter = NULL; + OSBoolean *b = NULL; + OSNumber *n = NULL; + const OSSymbol *key = NULL; + OSObject *obj = NULL; + OSSharedPtr iter; if (!dict) { return kIOReturnBadArgument; } bool clientEntitled = false; - obj = IOUserClient::copyClientEntitlement(current_task(), kRootDomainEntitlementSetProperty); - clientEntitled = (obj == kOSBooleanTrue); - OSSafeReleaseNULL(obj); + { + OSSharedPtr obj = IOUserClient::copyClientEntitlement(current_task(), kRootDomainEntitlementSetProperty); + clientEntitled = (obj == kOSBooleanTrue); + } if (!clientEntitled) { const char * errorSuffix = NULL; @@ -1665,12 +1819,12 @@ IOPMrootDomain::setProperties( OSObject * props_obj ) // IOPMSchedulePowerEvent() clients may not be entitled, but must be root. // That API can set 6 possible keys that are checked below. if ((dict->getCount() == 1) && - (dict->getObject(gIOPMSettingAutoWakeSecondsKey) || - dict->getObject(gIOPMSettingAutoPowerSecondsKey) || - dict->getObject(gIOPMSettingAutoWakeCalendarKey) || - dict->getObject(gIOPMSettingAutoPowerCalendarKey) || - dict->getObject(gIOPMSettingDebugWakeRelativeKey) || - dict->getObject(gIOPMSettingDebugPowerRelativeKey))) { + (dict->getObject(gIOPMSettingAutoWakeSecondsKey.get()) || + dict->getObject(gIOPMSettingAutoPowerSecondsKey.get()) || + dict->getObject(gIOPMSettingAutoWakeCalendarKey.get()) || + dict->getObject(gIOPMSettingAutoPowerCalendarKey.get()) || + dict->getObject(gIOPMSettingDebugWakeRelativeKey.get()) || + dict->getObject(gIOPMSettingDebugPowerRelativeKey.get()))) { return_value = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); if (return_value != kIOReturnSuccess) { errorSuffix = "privileged"; @@ -1681,32 +1835,40 @@ IOPMrootDomain::setProperties( OSObject * props_obj ) } if (return_value != kIOReturnSuccess) { - OSString * procName = IOCopyLogNameForPID(proc_selfpid()); + OSSharedPtr procName(IOCopyLogNameForPID(proc_selfpid()), OSNoRetain); DLOG("%s failed, process %s is not %s\n", __func__, procName ? procName->getCStringNoCopy() : "", errorSuffix); - OSSafeReleaseNULL(procName); return return_value; } } - const OSSymbol *publish_simulated_battery_string = OSSymbol::withCString("SoftwareSimulatedBatteries"); - const OSSymbol *boot_complete_string = OSSymbol::withCString("System Boot Complete"); - const OSSymbol *sys_shutdown_string = OSSymbol::withCString("System Shutdown"); - const OSSymbol *stall_halt_string = OSSymbol::withCString("StallSystemAtHalt"); - const OSSymbol *battery_warning_disabled_string = OSSymbol::withCString("BatteryWarningsDisabled"); - const OSSymbol *idle_seconds_string = OSSymbol::withCString("System Idle Seconds"); - const OSSymbol *sleepdisabled_string = OSSymbol::withCString("SleepDisabled"); - const OSSymbol *ondeck_sleepwake_uuid_string = OSSymbol::withCString(kIOPMSleepWakeUUIDKey); - const OSSymbol *loginwindow_progress_string = OSSymbol::withCString(kIOPMLoginWindowProgressKey); - const OSSymbol *coredisplay_progress_string = OSSymbol::withCString(kIOPMCoreDisplayProgressKey); - const OSSymbol *coregraphics_progress_string = OSSymbol::withCString(kIOPMCoreGraphicsProgressKey); + OSSharedPtr publish_simulated_battery_string = OSSymbol::withCString("SoftwareSimulatedBatteries"); + OSSharedPtr boot_complete_string = OSSymbol::withCString("System Boot Complete"); + OSSharedPtr sys_shutdown_string = OSSymbol::withCString("System Shutdown"); + OSSharedPtr stall_halt_string = OSSymbol::withCString("StallSystemAtHalt"); + OSSharedPtr battery_warning_disabled_string = OSSymbol::withCString("BatteryWarningsDisabled"); + OSSharedPtr idle_seconds_string = OSSymbol::withCString("System Idle Seconds"); + OSSharedPtr sleepdisabled_string = OSSymbol::withCString("SleepDisabled"); + OSSharedPtr ondeck_sleepwake_uuid_string = OSSymbol::withCString(kIOPMSleepWakeUUIDKey); + OSSharedPtr loginwindow_progress_string = OSSymbol::withCString(kIOPMLoginWindowProgressKey); + OSSharedPtr coredisplay_progress_string = OSSymbol::withCString(kIOPMCoreDisplayProgressKey); + OSSharedPtr coregraphics_progress_string = OSSymbol::withCString(kIOPMCoreGraphicsProgressKey); +#if DEBUG || DEVELOPMENT + OSSharedPtr clamshell_close_string = OSSymbol::withCString("IOPMTestClamshellClose"); + OSSharedPtr clamshell_open_string = OSSymbol::withCString("IOPMTestClamshellOpen"); + OSSharedPtr ac_detach_string = OSSymbol::withCString("IOPMTestACDetach"); + OSSharedPtr ac_attach_string = OSSymbol::withCString("IOPMTestACAttach"); + OSSharedPtr desktopmode_set_string = OSSymbol::withCString("IOPMTestDesktopModeSet"); + OSSharedPtr desktopmode_remove_string = OSSymbol::withCString("IOPMTestDesktopModeRemove"); +#endif + #if HIBERNATION - const OSSymbol *hibernatemode_string = OSSymbol::withCString(kIOHibernateModeKey); - const OSSymbol *hibernatefile_string = OSSymbol::withCString(kIOHibernateFileKey); - const OSSymbol *hibernatefilemin_string = OSSymbol::withCString(kIOHibernateFileMinSizeKey); - const OSSymbol *hibernatefilemax_string = OSSymbol::withCString(kIOHibernateFileMaxSizeKey); - const OSSymbol *hibernatefreeratio_string = OSSymbol::withCString(kIOHibernateFreeRatioKey); - const OSSymbol *hibernatefreetime_string = OSSymbol::withCString(kIOHibernateFreeTimeKey); + OSSharedPtr hibernatemode_string = OSSymbol::withCString(kIOHibernateModeKey); + OSSharedPtr hibernatefile_string = OSSymbol::withCString(kIOHibernateFileKey); + OSSharedPtr hibernatefilemin_string = OSSymbol::withCString(kIOHibernateFileMinSizeKey); + OSSharedPtr hibernatefilemax_string = OSSymbol::withCString(kIOHibernateFileMaxSizeKey); + OSSharedPtr hibernatefreeratio_string = OSSymbol::withCString(kIOHibernateFreeRatioKey); + OSSharedPtr hibernatefreetime_string = OSSymbol::withCString(kIOHibernateFreeTimeKey); #endif iter = OSCollectionIterator::withCollection(dict); @@ -1717,61 +1879,61 @@ IOPMrootDomain::setProperties( OSObject * props_obj ) while ((key = (const OSSymbol *) iter->getNextObject()) && (obj = dict->getObject(key))) { - if (key->isEqualTo(publish_simulated_battery_string)) { + if (key->isEqualTo(publish_simulated_battery_string.get())) { if (OSDynamicCast(OSBoolean, obj)) { publishResource(key, kOSBooleanTrue); } - } else if (key->isEqualTo(idle_seconds_string)) { + } else if (key->isEqualTo(idle_seconds_string.get())) { if ((n = OSDynamicCast(OSNumber, obj))) { setProperty(key, n); idleSeconds = n->unsigned32BitValue(); } - } else if (key->isEqualTo(boot_complete_string)) { + } else if (key->isEqualTo(boot_complete_string.get())) { pmPowerStateQueue->submitPowerEvent(kPowerEventSystemBootCompleted); - } else if (key->isEqualTo(sys_shutdown_string)) { + } else if (key->isEqualTo(sys_shutdown_string.get())) { if ((b = OSDynamicCast(OSBoolean, obj))) { pmPowerStateQueue->submitPowerEvent(kPowerEventSystemShutdown, (void *) b); } - } else if (key->isEqualTo(battery_warning_disabled_string)) { + } else if (key->isEqualTo(battery_warning_disabled_string.get())) { setProperty(key, obj); } #if HIBERNATION - else if (key->isEqualTo(hibernatemode_string) || - key->isEqualTo(hibernatefilemin_string) || - key->isEqualTo(hibernatefilemax_string) || - key->isEqualTo(hibernatefreeratio_string) || - key->isEqualTo(hibernatefreetime_string)) { + else if (key->isEqualTo(hibernatemode_string.get()) || + key->isEqualTo(hibernatefilemin_string.get()) || + key->isEqualTo(hibernatefilemax_string.get()) || + key->isEqualTo(hibernatefreeratio_string.get()) || + key->isEqualTo(hibernatefreetime_string.get())) { if ((n = OSDynamicCast(OSNumber, obj))) { setProperty(key, n); } - } else if (key->isEqualTo(hibernatefile_string)) { + } else if (key->isEqualTo(hibernatefile_string.get())) { OSString * str = OSDynamicCast(OSString, obj); if (str) { setProperty(key, str); } } #endif - else if (key->isEqualTo(sleepdisabled_string)) { + else if (key->isEqualTo(sleepdisabled_string.get())) { if ((b = OSDynamicCast(OSBoolean, obj))) { setProperty(key, b); pmPowerStateQueue->submitPowerEvent(kPowerEventUserDisabledSleep, (void *) b); } - } else if (key->isEqualTo(ondeck_sleepwake_uuid_string)) { + } else if (key->isEqualTo(ondeck_sleepwake_uuid_string.get())) { obj->retain(); pmPowerStateQueue->submitPowerEvent(kPowerEventQueueSleepWakeUUID, (void *)obj); - } else if (key->isEqualTo(loginwindow_progress_string)) { + } else if (key->isEqualTo(loginwindow_progress_string.get())) { if (pmTracer && (n = OSDynamicCast(OSNumber, obj))) { uint32_t data = n->unsigned32BitValue(); pmTracer->traceComponentWakeProgress(kIOPMLoginWindowProgress, data); kdebugTrace(kPMLogComponentWakeProgress, 0, kIOPMLoginWindowProgress, data); } - } else if (key->isEqualTo(coredisplay_progress_string)) { + } else if (key->isEqualTo(coredisplay_progress_string.get())) { if (pmTracer && (n = OSDynamicCast(OSNumber, obj))) { uint32_t data = n->unsigned32BitValue(); pmTracer->traceComponentWakeProgress(kIOPMCoreDisplayProgress, data); kdebugTrace(kPMLogComponentWakeProgress, 0, kIOPMCoreDisplayProgress, data); } - } else if (key->isEqualTo(coregraphics_progress_string)) { + } else if (key->isEqualTo(coregraphics_progress_string.get())) { if (pmTracer && (n = OSDynamicCast(OSNumber, obj))) { uint32_t data = n->unsigned32BitValue(); pmTracer->traceComponentWakeProgress(kIOPMCoreGraphicsProgress, data); @@ -1780,7 +1942,7 @@ IOPMrootDomain::setProperties( OSObject * props_obj ) } else if (key->isEqualTo(kIOPMDeepSleepEnabledKey) || key->isEqualTo(kIOPMDestroyFVKeyOnStandbyKey) || key->isEqualTo(kIOPMAutoPowerOffEnabledKey) || - key->isEqualTo(stall_halt_string)) { + key->isEqualTo(stall_halt_string.get())) { if ((b = OSDynamicCast(OSBoolean, obj))) { setProperty(key, b); } @@ -1793,101 +1955,51 @@ IOPMrootDomain::setProperties( OSObject * props_obj ) } } else if (key->isEqualTo(kIOPMUserWakeAlarmScheduledKey)) { if (kOSBooleanTrue == obj) { - OSBitOrAtomic(kIOPMAlarmBitCalendarWake, &_userScheduledAlarm); + OSBitOrAtomic(kIOPMAlarmBitCalendarWake, &_userScheduledAlarmMask); } else { - OSBitAndAtomic(~kIOPMAlarmBitCalendarWake, &_userScheduledAlarm); - } - DLOG("_userScheduledAlarm = 0x%x\n", (uint32_t) _userScheduledAlarm); + OSBitAndAtomic(~kIOPMAlarmBitCalendarWake, &_userScheduledAlarmMask); + } + DLOG("_userScheduledAlarmMask 0x%x\n", (uint32_t) _userScheduledAlarmMask); + } +#if DEBUG || DEVELOPMENT + else if (key->isEqualTo(clamshell_close_string.get())) { + DLOG("SetProperties: setting clamshell close\n"); + UInt32 msg = kIOPMClamshellClosed; + pmPowerStateQueue->submitPowerEvent(kPowerEventReceivedPowerNotification, (void *)(uintptr_t)msg); + } else if (key->isEqualTo(clamshell_open_string.get())) { + DLOG("SetProperties: setting clamshell open\n"); + UInt32 msg = kIOPMClamshellOpened; + pmPowerStateQueue->submitPowerEvent(kPowerEventReceivedPowerNotification, (void *)(uintptr_t)msg); + } else if (key->isEqualTo(ac_detach_string.get())) { + DLOG("SetProperties: setting ac detach\n"); + UInt32 msg = kIOPMSetACAdaptorConnected; + pmPowerStateQueue->submitPowerEvent(kPowerEventReceivedPowerNotification, (void *)(uintptr_t)msg); + } else if (key->isEqualTo(ac_attach_string.get())) { + DLOG("SetProperties: setting ac attach\n"); + UInt32 msg = kIOPMSetACAdaptorConnected | kIOPMSetValue; + pmPowerStateQueue->submitPowerEvent(kPowerEventReceivedPowerNotification, (void *)(uintptr_t)msg); + } else if (key->isEqualTo(desktopmode_set_string.get())) { + DLOG("SetProperties: setting desktopmode"); + UInt32 msg = kIOPMSetDesktopMode | kIOPMSetValue; + pmPowerStateQueue->submitPowerEvent(kPowerEventReceivedPowerNotification, (void *)(uintptr_t)msg); + } else if (key->isEqualTo(desktopmode_remove_string.get())) { + DLOG("SetProperties: removing desktopmode\n"); + UInt32 msg = kIOPMSetDesktopMode; + pmPowerStateQueue->submitPowerEvent(kPowerEventReceivedPowerNotification, (void *)(uintptr_t)msg); } +#endif // Relay our allowed PM settings onto our registered PM clients else if ((allowedPMSettings->getNextIndexOfObject(key, 0) != (unsigned int) -1)) { return_value = setPMSetting(key, obj); if (kIOReturnSuccess != return_value) { break; } - - if (gIOPMSettingDebugWakeRelativeKey == key) { - if ((n = OSDynamicCast(OSNumber, obj)) && - (_debugWakeSeconds = n->unsigned32BitValue())) { - OSBitOrAtomic(kIOPMAlarmBitDebugWake, &_scheduledAlarms); - } else { - _debugWakeSeconds = 0; - OSBitAndAtomic(~kIOPMAlarmBitDebugWake, &_scheduledAlarms); - } - DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms); - } else if (gIOPMSettingAutoWakeCalendarKey == key) { - OSData * data; - if ((data = OSDynamicCast(OSData, obj)) && - (data->getLength() == sizeof(IOPMCalendarStruct))) { - const IOPMCalendarStruct * cs = - (const IOPMCalendarStruct *) data->getBytesNoCopy(); - IOLog("gIOPMSettingAutoWakeCalendarKey " YMDTF "\n", YMDT(cs)); - if (cs->year) { - _scheduledAlarmUTC = IOPMConvertCalendarToSeconds(cs); - OSBitOrAtomic(kIOPMAlarmBitCalendarWake, &_scheduledAlarms); - } else { - _scheduledAlarmUTC = 0; - OSBitAndAtomic(~kIOPMAlarmBitCalendarWake, &_scheduledAlarms); - } - DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms); - } - } } else { DLOG("setProperties(%s) not handled\n", key->getCStringNoCopy()); } } exit: - if (publish_simulated_battery_string) { - publish_simulated_battery_string->release(); - } - if (boot_complete_string) { - boot_complete_string->release(); - } - if (sys_shutdown_string) { - sys_shutdown_string->release(); - } - if (stall_halt_string) { - stall_halt_string->release(); - } - if (battery_warning_disabled_string) { - battery_warning_disabled_string->release(); - } - if (idle_seconds_string) { - idle_seconds_string->release(); - } - if (sleepdisabled_string) { - sleepdisabled_string->release(); - } - if (ondeck_sleepwake_uuid_string) { - ondeck_sleepwake_uuid_string->release(); - } - if (loginwindow_progress_string) { - loginwindow_progress_string->release(); - } - if (coredisplay_progress_string) { - coredisplay_progress_string->release(); - } - if (coregraphics_progress_string) { - coregraphics_progress_string->release(); - } -#if HIBERNATION - if (hibernatemode_string) { - hibernatemode_string->release(); - } - if (hibernatefile_string) { - hibernatefile_string->release(); - } - if (hibernatefreeratio_string) { - hibernatefreeratio_string->release(); - } - if (hibernatefreetime_string) { - hibernatefreetime_string->release(); - } -#endif - if (iter) { - iter->release(); - } return return_value; } @@ -1921,6 +2033,10 @@ IOPMrootDomain::setAggressiveness( AggressivesRequest * request; bool found = false; + if ((type > UINT_MAX) || (value > UINT_MAX)) { + return kIOReturnBadArgument; + } + if (type == kPMMinutesToDim || type == kPMMinutesToSleep) { DLOG("setAggressiveness(%x) %s = %u\n", (uint32_t) options, getAggressivenessTypeString((uint32_t) type), (uint32_t) value); @@ -1958,7 +2074,7 @@ IOPMrootDomain::setAggressiveness( if ((entry->dataType == kAggressivesRequestTypeRecord) && (entry->data.record.type == type) && ((entry->options & kAggressivesOptionQuickSpindownMask) == 0)) { - entry->data.record.value = value; + entry->data.record.value = (uint32_t) value; found = true; break; } @@ -1999,7 +2115,7 @@ IOPMrootDomain::getAggressiveness( uint32_t value = 0; int source = 0; - if (!outLevel) { + if (!outLevel || (type > UINT_MAX)) { return kIOReturnBadArgument; } @@ -2083,11 +2199,9 @@ IOPMrootDomain::joinAggressiveness( return kIOReturnNoMemory; } - service->retain(); // released by synchronizeAggressives() - memset(request, 0, sizeof(*request)); request->dataType = kAggressivesRequestTypeService; - request->data.service = service; + request->data.service.reset(service, OSRetain); // released by synchronizeAggressives() AGGRESSIVES_LOCK(); queue_enter(&aggressivesQueue, request, AggressivesRequest *, chain); @@ -2269,7 +2383,7 @@ IOPMrootDomain::synchronizeAggressives( const AggressivesRecord * array, int count ) { - IOService * service; + OSSharedPtr service; AggressivesRequest * request; const AggressivesRecord * record; IOPMDriverCallEntry callEntry; @@ -2279,9 +2393,10 @@ IOPMrootDomain::synchronizeAggressives( while (!queue_empty(joinedQueue)) { queue_remove_first(joinedQueue, request, AggressivesRequest *, chain); if (request->dataType == kAggressivesRequestTypeService) { - service = request->data.service; + // retained by joinAggressiveness(), so take ownership + service = os::move(request->data.service); } else { - service = NULL; + service.reset(); } IODelete(request, AggressivesRequest, 1); @@ -2301,7 +2416,6 @@ IOPMrootDomain::synchronizeAggressives( } service->deassertPMDriverCall(&callEntry); } - service->release(); // retained by joinAggressiveness() } } } @@ -2317,19 +2431,21 @@ IOPMrootDomain::broadcastAggressives( const AggressivesRecord * array, int count ) { - IORegistryIterator * iter; - IORegistryEntry * entry; - IOPowerConnection * connect; - IOService * service; - const AggressivesRecord * record; - IOPMDriverCallEntry callEntry; - uint32_t value; - int i; + OSSharedPtr iter; + IORegistryEntry *entry; + OSSharedPtr child; + IOPowerConnection *connect; + IOService *service; + const AggressivesRecord *record; + IOPMDriverCallEntry callEntry; + uint32_t value; + int i; iter = IORegistryIterator::iterateOver( this, gIOPowerPlane, kIORegistryIterateRecursively); if (iter) { do{ + // !! reset the iterator iter->reset(); while ((entry = iter->getNextObject())) { connect = OSDynamicCast(IOPowerConnection, entry); @@ -2337,26 +2453,27 @@ IOPMrootDomain::broadcastAggressives( continue; } - if ((service = OSDynamicCast(IOService, connect->copyChildEntry(gIOPowerPlane)))) { - if (service->assertPMDriverCall(&callEntry, kIOPMDriverCallMethodSetAggressive)) { - for (i = 0, record = array; i < count; i++, record++) { - if (record->flags & kAggressivesRecordFlagModified) { - value = record->value; - if (record->flags & kAggressivesRecordFlagMinValue) { - value = kAggressivesMinValue; + child = connect->copyChildEntry(gIOPowerPlane); + if (child) { + if ((service = OSDynamicCast(IOService, child.get()))) { + if (service->assertPMDriverCall(&callEntry, kIOPMDriverCallMethodSetAggressive)) { + for (i = 0, record = array; i < count; i++, record++) { + if (record->flags & kAggressivesRecordFlagModified) { + value = record->value; + if (record->flags & kAggressivesRecordFlagMinValue) { + value = kAggressivesMinValue; + } + _LOG("broadcastAggressives %x = %u to %s\n", + record->type, value, service->getName()); + service->setAggressiveness(record->type, value); } - _LOG("broadcastAggressives %x = %u to %s\n", - record->type, value, service->getName()); - service->setAggressiveness(record->type, value); } + service->deassertPMDriverCall(&callEntry); } - service->deassertPMDriverCall(&callEntry); } - service->release(); } } }while (!entry && !iter->isValid()); - iter->release(); } } @@ -2471,14 +2588,10 @@ IOPMrootDomain::handleSleepTimerExpiration( void ) return; } - AbsoluteTime time; - DLOG("sleep timer expired\n"); ASSERT_GATED(); idleSleepTimerPending = false; - - clock_get_uptime(&time); setQuickSpinDownTimeout(); adjustPowerState(true); } @@ -2497,7 +2610,7 @@ IOPMrootDomain::getTimeToIdleSleep( void ) AbsoluteTime now, lastActivityTime; uint64_t nanos; uint32_t minutesSinceUserInactive = 0; - uint32_t sleepDelay = 0; + uint32_t sleepDelay = 0; if (!idleSleepEnabled) { return 0xffffffff; @@ -2509,8 +2622,14 @@ IOPMrootDomain::getTimeToIdleSleep( void ) lastActivityTime = userBecameInactiveTime; } + // Ignore any lastActivityTime that predates the last system wake. + // The goal is to avoid a sudden idle sleep right after a dark wake + // due to sleepDelay=0 computed below. The alternative 60s minimum + // timeout should be large enough to allow dark wake to complete, + // at which point the idle timer will be promptly cancelled. clock_get_uptime(&now); - if (CMP_ABSOLUTETIME(&now, &lastActivityTime) > 0) { + if ((CMP_ABSOLUTETIME(&lastActivityTime, &gIOLastWakeAbsTime) >= 0) && + (CMP_ABSOLUTETIME(&now, &lastActivityTime) > 0)) { SUB_ABSOLUTETIME(&now, &lastActivityTime); absolutetime_to_nanoseconds(now, &nanos); minutesSinceUserInactive = nanos / (60000000000ULL); @@ -2521,6 +2640,8 @@ IOPMrootDomain::getTimeToIdleSleep( void ) sleepDelay = sleepSlider - minutesSinceUserInactive; } } else { + DLOG("ignoring lastActivityTime 0x%qx, now 0x%qx, wake 0x%qx\n", + lastActivityTime, now, gIOLastWakeAbsTime); sleepDelay = sleepSlider; } @@ -2592,6 +2713,9 @@ IOPMrootDomain::sleepSystemOptions( OSDictionary *options ) if (reason && reason->isEqualTo(kIOPMDarkWakeThermalEmergencyKey)) { return privateSleepSystem(kIOPMSleepReasonDarkWakeThermalEmergency); } + if (reason && reason->isEqualTo(kIOPMNotificationWakeExitKey)) { + return privateSleepSystem(kIOPMSleepReasonNotificationWakeExit); + } } return privateSleepSystem( kIOPMSleepReasonSoftware); @@ -2626,10 +2750,11 @@ IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) #if !__i386__ && !__x86_64__ uint64_t timeSinceReset = 0; #endif - uint64_t now; - unsigned long newState; + uint64_t now; + unsigned long newState; clock_sec_t secs; clock_usec_t microsecs; + uint32_t lastDebugWakeSeconds; clock_sec_t adjWakeTime; IOPMCalendarStruct nowCalendar; @@ -2651,9 +2776,9 @@ IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) PEGetUTCTimeOfDay(&secs, µsecs); adjWakeTime = 0; - if ((kIOPMAOTModeRespectTimers & _aotMode) && (_scheduledAlarmUTC < _aotWakeTimeUTC)) { - IOLog("use _scheduledAlarmUTC\n"); - adjWakeTime = _scheduledAlarmUTC; + if ((kIOPMAOTModeRespectTimers & _aotMode) && (_calendarWakeAlarmUTC < _aotWakeTimeUTC)) { + IOLog("use _calendarWakeAlarmUTC\n"); + adjWakeTime = _calendarWakeAlarmUTC; } else if (_aotExit || (kIOPMWakeEventAOTExitFlags & _aotPendingFlags)) { IOLog("accelerate _aotWakeTime for exit\n"); adjWakeTime = secs; @@ -2680,11 +2805,32 @@ IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) } } _aotPendingFlags &= ~kIOPMWakeEventAOTPerCycleFlags; - acceptSystemWakeEvents(true); + if (_aotTimerScheduled) { + _aotTimerES->cancelTimeout(); + _aotTimerScheduled = false; + } + acceptSystemWakeEvents(kAcceptSystemWakeEvents_Enable); // re-enable this timer for next sleep cancelIdleSleepTimer(); + if (clamshellExists) { +#if DARK_TO_FULL_EVALUATE_CLAMSHELL_DELAY + if (gClamshellFlags & kClamshell_WAR_58009435) { + // Disable clamshell sleep until system has completed full wake. + // This prevents a system sleep request (due to a clamshell close) + // from being queued until the end of system full wake - even if + // other clamshell disable bits outside of our control is wrong. + setClamShellSleepDisable(true, kClamshellSleepDisableInternal); + } +#endif + + // Log the last known clamshell state before system sleep + DLOG("clamshell closed %d, disabled %d/%x, desktopMode %d, ac %d\n", + clamshellClosed, clamshellDisabled, clamshellSleepDisableMask, + desktopMode, acAdaptorConnected); + } + clock_get_calendar_absolute_and_microtime(&secs, µsecs, &now); logtime(secs); gIOLastSleepTime.tv_sec = secs; @@ -2720,14 +2866,14 @@ IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) LOG("System Sleep\n"); #endif if (thermalWarningState) { - const OSSymbol *event = OSSymbol::withCString(kIOPMThermalLevelWarningKey); + OSSharedPtr event = OSSymbol::withCString(kIOPMThermalLevelWarningKey); if (event) { - systemPowerEventOccurred(event, kIOPMThermalLevelUnknown); - event->release(); + systemPowerEventOccurred(event.get(), kIOPMThermalLevelUnknown); } } assertOnWakeSecs = 0; lowBatteryCondition = false; + thermalEmergencyState = false; #if DEVELOPMENT || DEBUG extern int g_should_log_clock_adjustments; @@ -2799,48 +2945,53 @@ IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) lastSleepReason = 0; - _lastDebugWakeSeconds = _debugWakeSeconds; - _debugWakeSeconds = 0; - _scheduledAlarms = 0; + lastDebugWakeSeconds = _debugWakeSeconds; + _debugWakeSeconds = 0; + _scheduledAlarmMask = 0; + _nextScheduledAlarmType = NULL; -#if defined(__i386__) || defined(__x86_64__) - kdebugTrace(kPMLogSystemWake, 0, 0, 0); - wranglerTickled = false; - graphicsSuppressed = false; + darkWakeExit = false; + darkWakePowerClamped = false; darkWakePostTickle = false; darkWakeHibernateError = false; darkWakeToSleepASAP = true; - logGraphicsClamp = true; + darkWakeLogClamp = true; sleepTimerMaintenance = false; sleepToStandby = false; - wranglerTickleLatched = false; + wranglerTickled = false; userWasActive = false; isRTCAlarmWake = false; + clamshellIgnoreClose = false; fullWakeReason = kFullWakeReasonNone; - OSString * wakeType = OSDynamicCast( - OSString, getProperty(kIOPMRootDomainWakeTypeKey)); - OSString * wakeReason = OSDynamicCast( - OSString, getProperty(kIOPMRootDomainWakeReasonKey)); +#if defined(__i386__) || defined(__x86_64__) + kdebugTrace(kPMLogSystemWake, 0, 0, 0); + + OSSharedPtr wakeTypeProp = copyProperty(kIOPMRootDomainWakeTypeKey); + OSSharedPtr wakeReasonProp = copyProperty(kIOPMRootDomainWakeReasonKey); + OSString * wakeType = OSDynamicCast(OSString, wakeTypeProp.get()); + OSString * wakeReason = OSDynamicCast(OSString, wakeReasonProp.get()); if (wakeReason && (wakeReason->getLength() >= 2) && gWakeReasonString[0] == '\0') { + WAKEEVENT_LOCK(); // Until the platform driver can claim its wake reasons strlcat(gWakeReasonString, wakeReason->getCStringNoCopy(), sizeof(gWakeReasonString)); + WAKEEVENT_UNLOCK(); } if (wakeType && wakeType->isEqualTo(kIOPMrootDomainWakeTypeLowBattery)) { lowBatteryCondition = true; darkWakeMaintenance = true; - } else if ((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) != 0) { + } else { #if HIBERNATION - OSNumber * hibOptions = OSDynamicCast( - OSNumber, getProperty(kIOHibernateOptionsKey)); + OSSharedPtr hibOptionsProp = copyProperty(kIOHibernateOptionsKey); + OSNumber * hibOptions = OSDynamicCast( OSNumber, hibOptionsProp.get()); if (hibernateAborted || ((hibOptions && !(hibOptions->unsigned32BitValue() & kIOHibernateOptionDarkWake)))) { // Hibernate aborted, or EFI brought up graphics - wranglerTickled = true; + darkWakeExit = true; if (hibernateAborted) { DLOG("Hibernation aborted\n"); } else { @@ -2852,7 +3003,7 @@ IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) wakeType->isEqualTo(kIOPMRootDomainWakeTypeUser) || wakeType->isEqualTo(kIOPMRootDomainWakeTypeAlarm))) { // User wake or RTC alarm - wranglerTickled = true; + darkWakeExit = true; if (wakeType->isEqualTo(kIOPMRootDomainWakeTypeAlarm)) { isRTCAlarmWake = true; } @@ -2861,10 +3012,10 @@ IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) // SMC standby timer trumps SleepX darkWakeMaintenance = true; sleepTimerMaintenance = true; - } else if ((_lastDebugWakeSeconds != 0) && + } else if ((lastDebugWakeSeconds != 0) && ((gDarkWakeFlags & kDarkWakeFlagAlarmIsDark) == 0)) { // SleepX before maintenance - wranglerTickled = true; + darkWakeExit = true; } else if (wakeType && wakeType->isEqualTo(kIOPMRootDomainWakeTypeMaintenance)) { darkWakeMaintenance = true; @@ -2885,55 +3036,47 @@ IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) // Unidentified wake source, resume to full wake if debug // alarm is pending. - if (_lastDebugWakeSeconds && + if (lastDebugWakeSeconds && (!wakeReason || wakeReason->isEqualTo(""))) { - wranglerTickled = true; + darkWakeExit = true; } } - } else { - if (wakeType && - wakeType->isEqualTo(kIOPMRootDomainWakeTypeSleepTimer)) { - darkWakeMaintenance = true; - sleepTimerMaintenance = true; - } else if (hibernateAborted || !wakeType || - !wakeType->isEqualTo(kIOPMRootDomainWakeTypeMaintenance) || - !wakeReason || !wakeReason->isEqualTo("RTC")) { - // Post a HID tickle immediately - except for RTC maintenance wake. - wranglerTickled = true; - } else { - darkWakeMaintenance = true; - } } - if (wranglerTickled) { + if (darkWakeExit) { darkWakeToSleepASAP = false; fullWakeReason = kFullWakeReasonLocalUser; reportUserInput(); } else if (displayPowerOnRequested && checkSystemCanSustainFullWake()) { - handleDisplayPowerOn(); + handleSetDisplayPowerOn(true); } else if (!darkWakeMaintenance) { // Early/late tickle for non-maintenance wake. - if (((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == - kDarkWakeFlagHIDTickleEarly) || - ((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == - kDarkWakeFlagHIDTickleLate)) { + if ((gDarkWakeFlags & kDarkWakeFlagPromotionMask) != kDarkWakeFlagPromotionNone) { darkWakePostTickle = true; } } #else /* !__i386__ && !__x86_64__ */ timeSinceReset = ml_get_time_since_reset(); + kdebugTrace(kPMLogSystemWake, 0, (uintptr_t)(timeSinceReset >> 32), (uintptr_t) timeSinceReset); + + if ((gDarkWakeFlags & kDarkWakeFlagPromotionMask) == kDarkWakeFlagPromotionEarly) { + wranglerTickled = true; + fullWakeReason = kFullWakeReasonLocalUser; + requestUserActive(this, "Full wake on dark wake promotion boot-arg"); + } else if ((lastDebugWakeSeconds != 0) && !(gDarkWakeFlags & kDarkWakeFlagAlarmIsDark)) { + isRTCAlarmWake = true; + fullWakeReason = kFullWakeReasonLocalUser; + requestUserActive(this, "RTC debug alarm"); + } - kdebugTrace(kPMLogSystemWake, 0, timeSinceReset >> 32, timeSinceReset); // stay awake for at least 30 seconds - wranglerTickled = true; - fullWakeReason = kFullWakeReasonLocalUser; startIdleSleepTimer(30); #endif sleepCnt++; thread_call_enter(updateConsoleUsersEntry); - changePowerStateToPriv(getRUN_STATE()); + changePowerStateWithTagToPriv(getRUN_STATE(), kCPSReasonWake); break; } #if !__i386__ && !__x86_64__ @@ -2952,7 +3095,7 @@ IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) // state since the changePowerStateToPriv() issued at the tail // end of SLEEP_STATE case should take care of that. if (getPowerState() == ON_STATE) { - changePowerStateToPriv(ON_STATE); + changePowerStateWithTagToPriv(ON_STATE, kCPSReasonWake); } break; } @@ -2980,6 +3123,30 @@ IOPMrootDomain::requestPowerDomainState( } +static void +makeSleepPreventersListLog(const OSSharedPtr &preventers, char *buf, size_t buf_size) +{ + if (!preventers->getCount()) { + return; + } + + char *buf_iter = buf + strlen(buf); + char *buf_end = buf + buf_size; + + OSSharedPtr iterator = OSCollectionIterator::withCollection(preventers.get()); + OSObject *obj = NULL; + + while ((obj = iterator->getNextObject())) { + IOService *srv = OSDynamicCast(IOService, obj); + if (buf_iter < buf_end) { + buf_iter += snprintf(buf_iter, buf_end - buf_iter, " %s", srv->getName()); + } else { + DLOG("Print buffer exhausted for sleep preventers list\n"); + break; + } + } +} + //****************************************************************************** // updatePreventIdleSleepList // @@ -3006,9 +3173,10 @@ IOPMrootDomain::updatePreventIdleSleepListInternal( ASSERT_GATED(); -#if defined(__i386__) || defined(__x86_64__) - // Disregard disk I/O (besides the display wrangler) as a factor preventing - // idle sleep, except in the case of legacy disk I/O +#if defined(XNU_TARGET_OS_OSX) + // Only the display wrangler and no-idle-sleep kernel assertions + // can prevent idle sleep. The kIOPMPreventIdleSleep capability flag + // reported by drivers in their power state table is ignored. if (service && (service != wrangler) && (service != this)) { return false; } @@ -3017,14 +3185,21 @@ IOPMrootDomain::updatePreventIdleSleepListInternal( if (service) { if (addNotRemove) { preventIdleSleepList->setObject(service); - DLOG("prevent idle sleep list: %s+ (%u)\n", + DLOG("Added %s to idle sleep preventers list (Total %u)\n", service->getName(), preventIdleSleepList->getCount()); } else if (preventIdleSleepList->member(service)) { preventIdleSleepList->removeObject(service); - DLOG("prevent idle sleep list: %s- (%u)\n", + DLOG("Removed %s from idle sleep preventers list (Total %u)\n", service->getName(), preventIdleSleepList->getCount()); } + + if (preventIdleSleepList->getCount()) { + char buf[256] = "Idle Sleep Preventers:"; + makeSleepPreventersListLog(preventIdleSleepList, buf, sizeof(buf)); + DLOG("%s\n", buf); + } } + newCount = idleSleepPreventersCount(); if ((oldCount == 0) && (newCount != 0)) { @@ -3032,18 +3207,18 @@ IOPMrootDomain::updatePreventIdleSleepListInternal( // Update the driver desire to prevent idle sleep. // Driver desire does not prevent demand sleep. - changePowerStateTo(getRUN_STATE()); + changePowerStateWithTagTo(getRUN_STATE(), kCPSReasonIdleSleepPrevent); } else if ((oldCount != 0) && (newCount == 0)) { // Last driver removed from prevent list. // Drop the driver clamp to allow idle sleep. - changePowerStateTo(SLEEP_STATE); + changePowerStateWithTagTo(SLEEP_STATE, kCPSReasonIdleSleepAllow); evaluatePolicy( kStimulusNoIdleSleepPreventers ); } - messageClient(kIOPMMessageIdleSleepPreventers, systemCapabilityNotifier, + messageClient(kIOPMMessageIdleSleepPreventers, systemCapabilityNotifier.get(), &newCount, sizeof(newCount)); -#if defined(__i386__) || defined(__x86_64__) +#if defined(XNU_TARGET_OS_OSX) if (addNotRemove && (service == wrangler) && !checkSystemCanSustainFullWake()) { DLOG("Cannot cancel idle sleep\n"); return false; // do not idle-cancel @@ -3083,7 +3258,7 @@ IOPMrootDomain::updatePreventSystemSleepList( oldCount = preventSystemSleepList->getCount(); if (addNotRemove) { preventSystemSleepList->setObject(service); - DLOG("prevent system sleep list: %s+ (%u)\n", + DLOG("Added %s to system sleep preventers list (Total %u)\n", service->getName(), preventSystemSleepList->getCount()); if (!assertOnWakeSecs && gIOLastWakeAbsTime) { AbsoluteTime now; @@ -3098,7 +3273,7 @@ IOPMrootDomain::updatePreventSystemSleepList( } } else if (preventSystemSleepList->member(service)) { preventSystemSleepList->removeObject(service); - DLOG("prevent system sleep list: %s- (%u)\n", + DLOG("Removed %s from system sleep preventers list (Total %u)\n", service->getName(), preventSystemSleepList->getCount()); if ((oldCount != 0) && (preventSystemSleepList->getCount() == 0)) { @@ -3107,17 +3282,24 @@ IOPMrootDomain::updatePreventSystemSleepList( evaluatePolicy( kStimulusDarkWakeEvaluate ); } } + newCount = preventSystemSleepList->getCount(); - messageClient(kIOPMMessageSystemSleepPreventers, systemCapabilityNotifier, + if (newCount) { + char buf[256] = "System Sleep Preventers:"; + makeSleepPreventersListLog(preventSystemSleepList, buf, sizeof(buf)); + DLOG("%s\n", buf); + } + + messageClient(kIOPMMessageSystemSleepPreventers, systemCapabilityNotifier.get(), &newCount, sizeof(newCount)); } void IOPMrootDomain::copySleepPreventersList(OSArray **idleSleepList, OSArray **systemSleepList) { - OSCollectionIterator *iterator = NULL; + OSSharedPtr iterator; OSObject *object = NULL; - OSArray *array = NULL; + OSSharedPtr array; if (!gIOPMWorkLoop->inGate()) { gIOPMWorkLoop->runAction( @@ -3128,42 +3310,48 @@ IOPMrootDomain::copySleepPreventersList(OSArray **idleSleepList, OSArray **syste } if (idleSleepList && preventIdleSleepList && (preventIdleSleepList->getCount() != 0)) { - iterator = OSCollectionIterator::withCollection(preventIdleSleepList); + iterator = OSCollectionIterator::withCollection(preventIdleSleepList.get()); array = OSArray::withCapacity(5); - while ((object = iterator->getNextObject())) { - IOService *service = OSDynamicCast(IOService, object); - if (object) { - array->setObject(OSSymbol::withCString(service->getName())); + if (iterator && array) { + while ((object = iterator->getNextObject())) { + IOService *service = OSDynamicCast(IOService, object); + if (service) { + OSSharedPtr name = service->copyName(); + if (name) { + array->setObject(name.get()); + } + } } } - - iterator->release(); - *idleSleepList = array; + *idleSleepList = array.detach(); } if (systemSleepList && preventSystemSleepList && (preventSystemSleepList->getCount() != 0)) { - iterator = OSCollectionIterator::withCollection(preventSystemSleepList); + iterator = OSCollectionIterator::withCollection(preventSystemSleepList.get()); array = OSArray::withCapacity(5); - while ((object = iterator->getNextObject())) { - IOService *service = OSDynamicCast(IOService, object); - if (object) { - array->setObject(OSSymbol::withCString(service->getName())); + if (iterator && array) { + while ((object = iterator->getNextObject())) { + IOService *service = OSDynamicCast(IOService, object); + if (service) { + OSSharedPtr name = service->copyName(); + if (name) { + array->setObject(name.get()); + } + } } } - - iterator->release(); - *systemSleepList = array; + *systemSleepList = array.detach(); } } void IOPMrootDomain::copySleepPreventersListWithID(OSArray **idleSleepList, OSArray **systemSleepList) { - OSCollectionIterator *iterator = NULL; + OSSharedPtr iterator; OSObject *object = NULL; - OSArray *array = NULL; + OSSharedPtr array; if (!gIOPMWorkLoop->inGate()) { gIOPMWorkLoop->runAction( @@ -3174,49 +3362,47 @@ IOPMrootDomain::copySleepPreventersListWithID(OSArray **idleSleepList, OSArray * } if (idleSleepList && preventIdleSleepList && (preventIdleSleepList->getCount() != 0)) { - iterator = OSCollectionIterator::withCollection(preventIdleSleepList); + iterator = OSCollectionIterator::withCollection(preventIdleSleepList.get()); array = OSArray::withCapacity(5); - while ((object = iterator->getNextObject())) { - IOService *service = OSDynamicCast(IOService, object); - if (object) { - OSDictionary *dict = OSDictionary::withCapacity(2); - if (dict) { - OSNumber *id = OSNumber::withNumber(service->getRegistryEntryID(), 64); - dict->setObject(kIOPMDriverAssertionRegistryEntryIDKey, id); - dict->setObject(kIOPMDriverAssertionOwnerStringKey, OSSymbol::withCString(service->getName())); - array->setObject(dict); - id->release(); - dict->release(); + if (iterator && array) { + while ((object = iterator->getNextObject())) { + IOService *service = OSDynamicCast(IOService, object); + if (service) { + OSSharedPtr dict = OSDictionary::withCapacity(2); + OSSharedPtr name = service->copyName(); + OSSharedPtr id = OSNumber::withNumber(service->getRegistryEntryID(), 64); + if (dict && name && id) { + dict->setObject(kIOPMDriverAssertionRegistryEntryIDKey, id.get()); + dict->setObject(kIOPMDriverAssertionOwnerStringKey, name.get()); + array->setObject(dict.get()); + } } } } - - iterator->release(); - *idleSleepList = array; + *idleSleepList = array.detach(); } if (systemSleepList && preventSystemSleepList && (preventSystemSleepList->getCount() != 0)) { - iterator = OSCollectionIterator::withCollection(preventSystemSleepList); + iterator = OSCollectionIterator::withCollection(preventSystemSleepList.get()); array = OSArray::withCapacity(5); - while ((object = iterator->getNextObject())) { - IOService *service = OSDynamicCast(IOService, object); - if (object) { - OSDictionary *dict = OSDictionary::withCapacity(2); - if (dict) { - OSNumber *id = OSNumber::withNumber(service->getRegistryEntryID(), 64); - dict->setObject(kIOPMDriverAssertionRegistryEntryIDKey, id); - dict->setObject(kIOPMDriverAssertionOwnerStringKey, OSSymbol::withCString(service->getName())); - array->setObject(dict); - id->release(); - dict->release(); + if (iterator && array) { + while ((object = iterator->getNextObject())) { + IOService *service = OSDynamicCast(IOService, object); + if (service) { + OSSharedPtr dict = OSDictionary::withCapacity(2); + OSSharedPtr name = service->copyName(); + OSSharedPtr id = OSNumber::withNumber(service->getRegistryEntryID(), 64); + if (dict && name && id) { + dict->setObject(kIOPMDriverAssertionRegistryEntryIDKey, id.get()); + dict->setObject(kIOPMDriverAssertionOwnerStringKey, name.get()); + array->setObject(dict.get()); + } } } } - - iterator->release(); - *systemSleepList = array; + *systemSleepList = array.detach(); } } @@ -3352,16 +3538,17 @@ IOPMrootDomain::systemDidNotSleep( void ) // reset console lock state thread_call_enter(updateConsoleUsersEntry); - if (!wrangler) { - if (idleSleepEnabled) { - // stay awake for at least idleSeconds + if (idleSleepEnabled) { + if (!wrangler) { +#if defined(XNU_TARGET_OS_OSX) && !DISPLAY_WRANGLER_PRESENT + startIdleSleepTimer(kIdleSleepRetryInterval); +#else startIdleSleepTimer(idleSeconds); - } - } else { - if (idleSleepEnabled && !userIsActive) { +#endif + } else if (!userIsActive) { // Manually start the idle sleep timer besides waiting for // the user to become inactive. - startIdleSleepTimer( kIdleSleepRetryInterval ); + startIdleSleepTimer(kIdleSleepRetryInterval); } } @@ -3387,7 +3574,7 @@ IOPMrootDomain::systemDidNotSleep( void ) DLOG("MESG cap %x->%x did change\n", params.fromCapabilities, params.toCapabilities); - messageClient(kIOMessageSystemCapabilityChange, systemCapabilityNotifier, + messageClient(kIOMessageSystemCapabilityChange, systemCapabilityNotifier.get(), ¶ms, sizeof(params)); } } @@ -3441,7 +3628,7 @@ IOPMrootDomain::tellChangeUp( unsigned long stateNum ) // Notify platform that sleep was cancelled or resumed. getPlatform()->callPlatformFunction( - sleepMessagePEFunction, false, + sleepMessagePEFunction.get(), false, (void *)(uintptr_t) kIOMessageSystemHasPoweredOn, NULL, NULL, NULL); @@ -3562,7 +3749,7 @@ IOPMrootDomain::sysPowerDownHandler( // Notify platform that sleep has begun, after the early // sleep policy evaluation. getPlatform()->callPlatformFunction( - sleepMessagePEFunction, false, + sleepMessagePEFunction.get(), false, (void *)(uintptr_t) kIOMessageSystemWillSleep, NULL, NULL, NULL); @@ -3613,26 +3800,18 @@ IOPMrootDomain::sysPowerDownHandler( void IOPMrootDomain::handleQueueSleepWakeUUID(OSObject *obj) { - OSString *str = NULL; + OSSharedPtr str; if (kOSBooleanFalse == obj) { - handlePublishSleepWakeUUID(NULL); - } else if ((str = OSDynamicCast(OSString, obj))) { - // This branch caches the UUID for an upcoming sleep/wake - if (queuedSleepWakeUUIDString) { - queuedSleepWakeUUIDString->release(); - queuedSleepWakeUUIDString = NULL; + handlePublishSleepWakeUUID(false); + } else { + str.reset(OSDynamicCast(OSString, obj), OSNoRetain); + if (str) { + // This branch caches the UUID for an upcoming sleep/wake + queuedSleepWakeUUIDString = str; + DLOG("SleepWake UUID queued: %s\n", queuedSleepWakeUUIDString->getCStringNoCopy()); } - queuedSleepWakeUUIDString = str; - queuedSleepWakeUUIDString->retain(); - - DLOG("SleepWake UUID queued: %s\n", queuedSleepWakeUUIDString->getCStringNoCopy()); } - - if (obj) { - obj->release(); - } - return; } //****************************************************************************** // handlePublishSleepWakeUUID @@ -3665,21 +3844,18 @@ IOPMrootDomain::handlePublishSleepWakeUUID( bool shouldPublish ) * Optionally, publish a new UUID */ if (queuedSleepWakeUUIDString && shouldPublish) { - OSString *publishThisUUID = NULL; + OSSharedPtr publishThisUUID; publishThisUUID = queuedSleepWakeUUIDString; - publishThisUUID->retain(); if (publishThisUUID) { - setProperty(kIOPMSleepWakeUUIDKey, publishThisUUID); - publishThisUUID->release(); + setProperty(kIOPMSleepWakeUUIDKey, publishThisUUID.get()); } gSleepWakeUUIDIsSet = true; messageClients(kIOPMMessageSleepWakeUUIDChange, kIOPMMessageSleepWakeUUIDSet); - queuedSleepWakeUUIDString->release(); - queuedSleepWakeUUIDString = NULL; + queuedSleepWakeUUIDString.reset(); } } @@ -3700,23 +3876,55 @@ IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len) } if (buffer != NULL) { - OSString *string; + OSSharedPtr string = + OSDynamicPtrCast(gRootDomain->copyProperty(kIOPMSleepWakeUUIDKey)); - string = (OSString *) - gRootDomain->copyProperty(kIOPMSleepWakeUUIDKey); - - if (string == NULL) { + if (!string) { *buffer = '\0'; } else { strlcpy(buffer, string->getCStringNoCopy(), buf_len); - - string->release(); } } return true; } +//****************************************************************************** +// lowLatencyAudioNotify +// +// Used to send an update about low latency audio activity to interested +// clients. To keep the overhead minimal the OSDictionary used here +// is initialized at boot. +//****************************************************************************** + +void +IOPMrootDomain::lowLatencyAudioNotify(uint64_t time, boolean_t state) +{ + if (lowLatencyAudioNotifierDict && lowLatencyAudioNotifyStateSym && lowLatencyAudioNotifyTimestampSym && + lowLatencyAudioNotifyStateVal && lowLatencyAudioNotifyTimestampVal) { + lowLatencyAudioNotifyTimestampVal->setValue(time); + lowLatencyAudioNotifyStateVal->setValue(state); + setPMSetting(gIOPMSettingLowLatencyAudioModeKey.get(), lowLatencyAudioNotifierDict.get()); + } else { + DLOG("LowLatencyAudioNotify error\n"); + } + return; +} + +//****************************************************************************** +// IOPMrootDomainRTNotifier +// +// Used by performance controller to update the timestamp and state associated +// with low latency audio activity in the system. +//****************************************************************************** + +extern "C" void +IOPMrootDomainRTNotifier(uint64_t time, boolean_t state) +{ + gRootDomain->lowLatencyAudioNotify(time, state); + return; +} + //****************************************************************************** // initializeBootSessionUUID // @@ -3737,33 +3945,74 @@ IOPMrootDomain::initializeBootSessionUUID(void) } //****************************************************************************** -// changePowerStateTo & changePowerStateToPriv -// -// Override of these methods for logging purposes. +// Root domain uses the private and tagged changePowerState methods for +// tracking and logging purposes. //****************************************************************************** +#define REQUEST_TAG_TO_REASON(x) ((uint16_t)x) + +static uint32_t +nextRequestTag( IOPMRequestTag tag ) +{ + static SInt16 msb16 = 1; + uint16_t id = OSAddAtomic16(1, &msb16); + return ((uint32_t)id << 16) | REQUEST_TAG_TO_REASON(tag); +} + +// TODO: remove this shim function and exported symbol IOReturn IOPMrootDomain::changePowerStateTo( unsigned long ordinal ) { - DLOG("changePowerStateTo(%u)\n", (uint32_t) ordinal); + return changePowerStateWithTagTo(ordinal, kCPSReasonNone); +} + +// TODO: remove this shim function and exported symbol +IOReturn +IOPMrootDomain::changePowerStateToPriv( unsigned long ordinal ) +{ + return changePowerStateWithTagToPriv(ordinal, kCPSReasonNone); +} + +IOReturn +IOPMrootDomain::changePowerStateWithOverrideTo( + IOPMPowerStateIndex ordinal, IOPMRequestTag reason ) +{ + uint32_t tag = nextRequestTag(reason); + DLOG("%s(%s, %x)\n", __FUNCTION__, getPowerStateString((uint32_t) ordinal), tag); if ((ordinal != ON_STATE) && (ordinal != AOT_STATE) && (ordinal != SLEEP_STATE)) { return kIOReturnUnsupported; } - return super::changePowerStateTo(ordinal); + return super::changePowerStateWithOverrideTo(ordinal, tag); } IOReturn -IOPMrootDomain::changePowerStateToPriv( unsigned long ordinal ) +IOPMrootDomain::changePowerStateWithTagTo( + IOPMPowerStateIndex ordinal, IOPMRequestTag reason ) +{ + uint32_t tag = nextRequestTag(reason); + DLOG("%s(%s, %x)\n", __FUNCTION__, getPowerStateString((uint32_t) ordinal), tag); + + if ((ordinal != ON_STATE) && (ordinal != AOT_STATE) && (ordinal != SLEEP_STATE)) { + return kIOReturnUnsupported; + } + + return super::changePowerStateWithTagTo(ordinal, tag); +} + +IOReturn +IOPMrootDomain::changePowerStateWithTagToPriv( + IOPMPowerStateIndex ordinal, IOPMRequestTag reason ) { - DLOG("changePowerStateToPriv(%u)\n", (uint32_t) ordinal); + uint32_t tag = nextRequestTag(reason); + DLOG("%s(%s, %x)\n", __FUNCTION__, getPowerStateString((uint32_t) ordinal), tag); if ((ordinal != ON_STATE) && (ordinal != AOT_STATE) && (ordinal != SLEEP_STATE)) { return kIOReturnUnsupported; } - return super::changePowerStateToPriv(ordinal); + return super::changePowerStateWithTagToPriv(ordinal, tag); } //****************************************************************************** @@ -3780,6 +4029,12 @@ IOPMrootDomain::activitySinceSleep(void) bool IOPMrootDomain::abortHibernation(void) { +#if __arm64__ + // don't allow hibernation to be aborted on ARM due to user activity + // since once ApplePMGR decides we're hibernating, we can't turn back + // see: Tonga ApplePMGR diff quiesce path support + return false; +#else bool ret = activitySinceSleep(); if (ret && !hibernateAborted && checkSystemCanSustainFullWake()) { @@ -3787,6 +4042,7 @@ IOPMrootDomain::abortHibernation(void) hibernateAborted = true; } return ret; +#endif } extern "C" int @@ -3811,8 +4067,8 @@ hibernate_should_abort(void) void IOPMrootDomain::willNotifyPowerChildren( IOPMPowerStateIndex newPowerState ) { - OSDictionary *dict; - OSNumber *secs; + OSSharedPtr dict; + OSSharedPtr secs; if (SLEEP_STATE == newPowerState) { notifierThread = current_thread(); @@ -3822,9 +4078,9 @@ IOPMrootDomain::willNotifyPowerChildren( IOPMPowerStateIndex newPowerState ) updateTasksSuspend(); clock_interval_to_deadline(10, kSecondScale, &deadline); -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) vm_pageout_wait(AbsoluteTime_to_scalar(&deadline)); -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ } _aotReadyToFullWake = false; @@ -3872,22 +4128,56 @@ IOPMrootDomain::willNotifyPowerChildren( IOPMPowerStateIndex newPowerState ) secs = OSNumber::withNumber(1, 32); if (dict && secs) { - dict->setObject(gIOPMSettingDebugWakeRelativeKey, secs); - gRootDomain->setProperties(dict); + dict->setObject(gIOPMSettingDebugWakeRelativeKey.get(), secs.get()); + gRootDomain->setProperties(dict.get()); MSG("Reverting sleep with relative wake\n"); } - if (dict) { - dict->release(); - } - if (secs) { - secs->release(); - } } notifierThread = NULL; } } +//****************************************************************************** +// willTellSystemCapabilityDidChange +// +// IOServicePM calls this from OurChangeTellCapabilityDidChange() when root +// domain is raising its power state, immediately after notifying interested +// drivers and power children. +//****************************************************************************** + +void +IOPMrootDomain::willTellSystemCapabilityDidChange( void ) +{ + if ((_systemTransitionType == kSystemTransitionWake) && + !CAP_GAIN(kIOPMSystemCapabilityGraphics)) { + // After powering up drivers, dark->full promotion on the current wake + // transition is no longer possible. That is because the next machine + // state will issue the system capability change messages. + // The darkWakePowerClamped flag may already be set if the system has + // at least one driver that was power clamped due to dark wake. + // This function sets the darkWakePowerClamped flag in case there + // is no power-clamped driver in the system. + // + // Last opportunity to exit dark wake using: + // requestFullWake( kFullWakeReasonLocalUser ); + + if (!darkWakePowerClamped) { + if (darkWakeLogClamp) { + AbsoluteTime now; + uint64_t nsec; + + clock_get_uptime(&now); + SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); + absolutetime_to_nanoseconds(now, &nsec); + DLOG("dark wake promotion disabled at %u ms\n", + ((int)((nsec) / NSEC_PER_MSEC))); + } + darkWakePowerClamped = true; + } + } +} + //****************************************************************************** // sleepOnClamshellClosed // @@ -3902,10 +4192,10 @@ IOPMrootDomain::shouldSleepOnClamshellClosed( void ) return false; } - DLOG("clamshell closed %d, disabled %d, desktopMode %d, ac %d sleepDisabled %d\n", - clamshellClosed, clamshellDisabled, desktopMode, acAdaptorConnected, clamshellSleepDisabled); + DLOG("clamshell closed %d, disabled %d/%x, desktopMode %d, ac %d\n", + clamshellClosed, clamshellDisabled, clamshellSleepDisableMask, desktopMode, acAdaptorConnected); - return !clamshellDisabled && !(desktopMode && acAdaptorConnected) && !clamshellSleepDisabled; + return !clamshellDisabled && !(desktopMode && acAdaptorConnected) && !clamshellSleepDisableMask; } bool @@ -3917,10 +4207,10 @@ IOPMrootDomain::shouldSleepOnRTCAlarmWake( void ) return false; } - DLOG("shouldSleepOnRTCAlarmWake: clamshell closed %d, disabled %d, desktopMode %d, ac %d sleepDisabled %d\n", - clamshellClosed, clamshellDisabled, desktopMode, acAdaptorConnected, clamshellSleepDisabled); + DLOG("shouldSleepOnRTCAlarmWake: clamshell closed %d, disabled %d/%x, desktopMode %d, ac %d\n", + clamshellClosed, clamshellDisabled, clamshellSleepDisableMask, desktopMode, acAdaptorConnected); - return !acAdaptorConnected && !clamshellSleepDisabled; + return !acAdaptorConnected && !clamshellSleepDisableMask; } void @@ -3971,30 +4261,37 @@ IOPMrootDomain::setSleepSupported( IOOptionBits flags ) } //****************************************************************************** -// setDisableClamShellSleep +// setClamShellSleepDisable // //****************************************************************************** void -IOPMrootDomain::setDisableClamShellSleep( bool val ) +IOPMrootDomain::setClamShellSleepDisable( bool disable, uint32_t bitmask ) { + uint32_t oldMask; + + // User client calls this in non-gated context if (gIOPMWorkLoop->inGate() == false) { gIOPMWorkLoop->runAction( - OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::setDisableClamShellSleep), - (OSObject *)this, - (void *)val); - + OSMemberFunctionCast(IOWorkLoop::Action, this, + &IOPMrootDomain::setClamShellSleepDisable), + (OSObject *) this, + (void *) disable, (void *)(uintptr_t) bitmask); return; + } + + oldMask = clamshellSleepDisableMask; + if (disable) { + clamshellSleepDisableMask |= bitmask; } else { - DLOG("setDisableClamShellSleep(%x)\n", (uint32_t) val); - if (clamshellSleepDisabled != val) { - clamshellSleepDisabled = val; - // If clamshellSleepDisabled is reset to 0, reevaluate if - // system need to go to sleep due to clamshell state - if (!clamshellSleepDisabled && clamshellClosed) { - handlePowerNotification(kLocalEvalClamshellCommand); - } - } + clamshellSleepDisableMask &= ~bitmask; + } + DLOG("setClamShellSleepDisable(%x->%x)\n", oldMask, clamshellSleepDisableMask); + + if (clamshellExists && clamshellClosed && + (clamshellSleepDisableMask != oldMask) && + (clamshellSleepDisableMask == 0)) { + handlePowerNotification(kLocalEvalClamshellCommand); } } @@ -4010,6 +4307,89 @@ IOPMrootDomain::wakeFromDoze( void ) // Preserve symbol for familes (IOUSBFamily and IOGraphics) } +//****************************************************************************** +// recordRTCAlarm +// +// Record the earliest scheduled RTC alarm to determine whether a RTC wake +// should be a dark wake or a full wake. Both Maintenance and SleepService +// alarms are dark wake, while AutoWake (WakeByCalendarDate) and DebugWake +// (WakeRelativeToSleep) should trigger a full wake. Scheduled power-on +// PMSettings are ignored. +// +// Caller serialized using settingsCtrlLock. +//****************************************************************************** + +void +IOPMrootDomain::recordRTCAlarm( + const OSSymbol *type, + OSObject *object ) +{ + uint32_t previousAlarmMask = _scheduledAlarmMask; + + if (type == gIOPMSettingDebugWakeRelativeKey) { + OSNumber * n = OSDynamicCast(OSNumber, object); + if (n) { + // Debug wake has highest scheduling priority so it overrides any + // pre-existing alarm. + uint32_t debugSecs = n->unsigned32BitValue(); + _nextScheduledAlarmType.reset(type, OSRetain); + _nextScheduledAlarmUTC = debugSecs; + + _debugWakeSeconds = debugSecs; + OSBitOrAtomic(kIOPMAlarmBitDebugWake, &_scheduledAlarmMask); + DLOG("next alarm (%s) in %u secs\n", + type->getCStringNoCopy(), debugSecs); + } + } else if ((type == gIOPMSettingAutoWakeCalendarKey.get()) || + (type == gIOPMSettingMaintenanceWakeCalendarKey.get()) || + (type == gIOPMSettingSleepServiceWakeCalendarKey.get())) { + OSData * data = OSDynamicCast(OSData, object); + if (data && (data->getLength() == sizeof(IOPMCalendarStruct))) { + const IOPMCalendarStruct * cs; + bool replaceNextAlarm = false; + clock_sec_t secs; + + cs = (const IOPMCalendarStruct *) data->getBytesNoCopy(); + secs = IOPMConvertCalendarToSeconds(cs); + DLOG("%s " YMDTF "\n", type->getCStringNoCopy(), YMDT(cs)); + + // Update the next scheduled alarm type + if ((_nextScheduledAlarmType == NULL) || + ((_nextScheduledAlarmType != gIOPMSettingDebugWakeRelativeKey) && + (secs < _nextScheduledAlarmUTC))) { + replaceNextAlarm = true; + } + + if (type == gIOPMSettingAutoWakeCalendarKey.get()) { + if (cs->year) { + _calendarWakeAlarmUTC = IOPMConvertCalendarToSeconds(cs); + OSBitOrAtomic(kIOPMAlarmBitCalendarWake, &_scheduledAlarmMask); + } else { + // TODO: can this else-block be removed? + _calendarWakeAlarmUTC = 0; + OSBitAndAtomic(~kIOPMAlarmBitCalendarWake, &_scheduledAlarmMask); + } + } + if (type == gIOPMSettingMaintenanceWakeCalendarKey.get()) { + OSBitOrAtomic(kIOPMAlarmBitMaintenanceWake, &_scheduledAlarmMask); + } + if (type == gIOPMSettingSleepServiceWakeCalendarKey.get()) { + OSBitOrAtomic(kIOPMAlarmBitSleepServiceWake, &_scheduledAlarmMask); + } + + if (replaceNextAlarm) { + _nextScheduledAlarmType.reset(type, OSRetain); + _nextScheduledAlarmUTC = secs; + DLOG("next alarm (%s) " YMDTF "\n", type->getCStringNoCopy(), YMDT(cs)); + } + } + } + + if (_scheduledAlarmMask != previousAlarmMask) { + DLOG("scheduled alarm mask 0x%x\n", (uint32_t) _scheduledAlarmMask); + } +} + // MARK: - // MARK: Features @@ -4037,13 +4417,14 @@ IOPMrootDomain::publishFeature( uint32_t supportedWhere, uint32_t *uniqueFeatureID) { - static uint16_t next_feature_id = 500; + static uint16_t next_feature_id = 500; - OSNumber *new_feature_data = NULL; - OSNumber *existing_feature = NULL; - OSArray *existing_feature_arr = NULL; - OSObject *osObj = NULL; - uint32_t feature_value = 0; + OSSharedPtr new_feature_data; + OSNumber *existing_feature = NULL; + OSArray *existing_feature_arr_raw = NULL; + OSSharedPtr existing_feature_arr; + OSObject *osObj = NULL; + uint32_t feature_value = 0; supportedWhere &= kRD_AllPowerSources; // mask off any craziness! @@ -4061,12 +4442,13 @@ IOPMrootDomain::publishFeature( IOLockLock(featuresDictLock); } - OSDictionary *features = - (OSDictionary *) getProperty(kRootDomainSupportedFeatures); + OSSharedPtr origFeaturesProp = copyProperty(kRootDomainSupportedFeatures); + OSDictionary *origFeatures = OSDynamicCast(OSDictionary, origFeaturesProp.get()); + OSSharedPtr features; // Create new features dict if necessary - if (features && OSDynamicCast(OSDictionary, features)) { - features = OSDictionary::withDictionary(features); + if (origFeatures) { + features = OSDictionary::withDictionary(origFeatures); } else { features = OSDictionary::withCapacity(1); } @@ -4094,30 +4476,24 @@ IOPMrootDomain::publishFeature( // We need to create an OSArray to hold the now 2 elements. existing_feature_arr = OSArray::withObjects( (const OSObject **)&existing_feature, 1, 2); - } else if ((existing_feature_arr = OSDynamicCast(OSArray, osObj))) { + } else if ((existing_feature_arr_raw = OSDynamicCast(OSArray, osObj))) { // Add object to existing array existing_feature_arr = OSArray::withArray( - existing_feature_arr, - existing_feature_arr->getCount() + 1); + existing_feature_arr_raw, + existing_feature_arr_raw->getCount() + 1); } if (existing_feature_arr) { - existing_feature_arr->setObject(new_feature_data); - features->setObject(feature, existing_feature_arr); - existing_feature_arr->release(); - existing_feature_arr = NULL; + existing_feature_arr->setObject(new_feature_data.get()); + features->setObject(feature, existing_feature_arr.get()); } } else { // The easy case: no previously existing features listed. We simply // set the OSNumber at key 'feature' and we're on our way. - features->setObject(feature, new_feature_data); + features->setObject(feature, new_feature_data.get()); } - new_feature_data->release(); - - setProperty(kRootDomainSupportedFeatures, features); - - features->release(); + setProperty(kRootDomainSupportedFeatures, features.get()); if (featuresDictLock) { IOLockUnlock(featuresDictLock); @@ -4145,12 +4521,12 @@ IOPMrootDomain::removePublishedFeature( uint32_t removeFeatureID ) bool madeAChange = false; OSSymbol *dictKey = NULL; - OSCollectionIterator *dictIterator = NULL; + OSSharedPtr dictIterator; OSArray *arrayMember = NULL; OSNumber *numberMember = NULL; OSObject *osObj = NULL; OSNumber *osNum = NULL; - OSArray *arrayMemberCopy; + OSSharedPtr arrayMemberCopy; if (kBadPMFeatureID == removeFeatureID) { return kIOReturnNotFound; @@ -4160,14 +4536,15 @@ IOPMrootDomain::removePublishedFeature( uint32_t removeFeatureID ) IOLockLock(featuresDictLock); } - OSDictionary *features = - (OSDictionary *) getProperty(kRootDomainSupportedFeatures); + OSSharedPtr origFeaturesProp = copyProperty(kRootDomainSupportedFeatures); + OSDictionary *origFeatures = OSDynamicCast(OSDictionary, origFeaturesProp.get()); + OSSharedPtr features; - if (features && OSDynamicCast(OSDictionary, features)) { + if (origFeatures) { // Any modifications to the dictionary are made to the copy to prevent // races & crashes with userland clients. Dictionary updated // automically later. - features = OSDictionary::withDictionary(features); + features = OSDictionary::withDictionary(origFeatures); } else { features = NULL; ret = kIOReturnNotFound; @@ -4178,7 +4555,7 @@ IOPMrootDomain::removePublishedFeature( uint32_t removeFeatureID ) // with 'removeFeatureID'. If found, we remove it from our tracking // structures and notify the OS via a general interest message. - dictIterator = OSCollectionIterator::withCollection(features); + dictIterator = OSCollectionIterator::withCollection(features.get()); if (!dictIterator) { goto exit; } @@ -4222,8 +4599,7 @@ IOPMrootDomain::removePublishedFeature( uint32_t removeFeatureID ) arrayMemberCopy = OSArray::withArray(arrayMember); if (arrayMemberCopy) { arrayMemberCopy->removeObject(i); - features->setObject(dictKey, arrayMemberCopy); - arrayMemberCopy->release(); + features->setObject(dictKey, arrayMemberCopy.get()); } } @@ -4234,12 +4610,10 @@ IOPMrootDomain::removePublishedFeature( uint32_t removeFeatureID ) } } - dictIterator->release(); - if (madeAChange) { ret = kIOReturnSuccess; - setProperty(kRootDomainSupportedFeatures, features); + setProperty(kRootDomainSupportedFeatures, features.get()); // Notify EnergySaver and all those in user space so they might // re-populate their feature specific UI @@ -4251,9 +4625,6 @@ IOPMrootDomain::removePublishedFeature( uint32_t removeFeatureID ) } exit: - if (features) { - features->release(); - } if (featuresDictLock) { IOLockUnlock(featuresDictLock); } @@ -4295,11 +4666,13 @@ IOPMrootDomain::setPMSetting( OSObject *object ) { PMSettingCallEntry *entries = NULL; - OSArray *chosen = NULL; + OSSharedPtr chosen; const OSArray *array; PMSettingObject *pmso; thread_t thisThread; int i, j, count, capacity; + bool ok = false; + IOReturn ret; if (NULL == type) { return kIOReturnBadArgument; @@ -4349,7 +4722,20 @@ IOPMrootDomain::setPMSetting( // Call each pmso in the chosen array. for (i = 0; i < count; i++) { pmso = (PMSettingObject *) chosen->getObject(i); - pmso->dispatchPMSetting(type, object); + ret = pmso->dispatchPMSetting(type, object); + if (ret == kIOReturnSuccess) { + // At least one setting handler was successful + ok = true; +#if DEVELOPMENT || DEBUG + } else { + // Log the handler and kext that failed + OSSharedPtr kextName = copyKextIdentifierWithAddress((vm_address_t) pmso->func); + if (kextName) { + DLOG("PMSetting(%s) error 0x%x from %s\n", + type->getCStringNoCopy(), ret, kextName->getCStringNoCopy()); + } +#endif + } } PMSETTING_LOCK(); @@ -4360,12 +4746,13 @@ IOPMrootDomain::setPMSetting( PMSETTING_WAKEUP(pmso); } } + + if (ok) { + recordRTCAlarm(type, object); + } unlock_exit: PMSETTING_UNLOCK(); - if (chosen) { - chosen->release(); - } if (entries) { IODelete(entries, PMSettingCallEntry, capacity); } @@ -4380,21 +4767,18 @@ unlock_exit: // notifications. //****************************************************************************** -OSObject * +OSSharedPtr IOPMrootDomain::copyPMSetting( OSSymbol *whichSetting) { - OSObject *obj = NULL; + OSSharedPtr obj; if (!whichSetting) { return NULL; } PMSETTING_LOCK(); - obj = fPMSettingsDict->getObject(whichSetting); - if (obj) { - obj->retain(); - } + obj.reset(fPMSettingsDict->getObject(whichSetting), OSRetain); PMSETTING_UNLOCK(); return obj; @@ -4451,7 +4835,6 @@ IOPMrootDomain::registerPMSettingController( { PMSettingObject *pmso = NULL; OSObject *pmsh = NULL; - OSArray *list = NULL; int i; if (NULL == settings || @@ -4471,12 +4854,13 @@ IOPMrootDomain::registerPMSettingController( PMSETTING_LOCK(); for (i = 0; settings[i]; i++) { - list = OSDynamicCast(OSArray, settingsCallbacks->getObject(settings[i])); + OSSharedPtr newList; + OSArray *list = OSDynamicCast(OSArray, settingsCallbacks->getObject(settings[i])); if (!list) { // New array of callbacks for this setting - list = OSArray::withCapacity(1); - settingsCallbacks->setObject(settings[i], list); - list->release(); + newList = OSArray::withCapacity(1); + settingsCallbacks->setObject(settings[i], newList.get()); + list = newList.get(); } // Add caller to the callback list @@ -4501,7 +4885,7 @@ IOPMrootDomain::deregisterPMSettingObject( PMSettingObject * pmso ) { thread_t thisThread = current_thread(); PMSettingCallEntry *callEntry; - OSCollectionIterator *iter; + OSSharedPtr iter; OSSymbol *sym; OSArray *array; int index; @@ -4530,7 +4914,7 @@ IOPMrootDomain::deregisterPMSettingObject( PMSettingObject * pmso ) } while (wait); // Search each PM settings array in the kernel. - iter = OSCollectionIterator::withCollection(settingsCallbacks); + iter = OSCollectionIterator::withCollection(settingsCallbacks.get()); if (iter) { while ((sym = OSDynamicCast(OSSymbol, iter->getNextObject()))) { array = OSDynamicCast(OSArray, settingsCallbacks->getObject(sym)); @@ -4539,7 +4923,6 @@ IOPMrootDomain::deregisterPMSettingObject( PMSettingObject * pmso ) array->removeObject(index); } } - iter->release(); } PMSETTING_UNLOCK(); @@ -4673,8 +5056,36 @@ bool IOPMrootDomain::evaluateSystemSleepPolicy( IOPMSystemSleepParameters * params, int sleepPhase, uint32_t * hibMode ) { +#define SLEEP_FACTOR(x) {(uint32_t) kIOPMSleepFactor ## x, #x} + + static const IONamedValue factorValues[] = { + SLEEP_FACTOR( SleepTimerWake ), + SLEEP_FACTOR( LidOpen ), + SLEEP_FACTOR( ACPower ), + SLEEP_FACTOR( BatteryLow ), + SLEEP_FACTOR( StandbyNoDelay ), + SLEEP_FACTOR( StandbyForced ), + SLEEP_FACTOR( StandbyDisabled ), + SLEEP_FACTOR( USBExternalDevice ), + SLEEP_FACTOR( BluetoothHIDDevice ), + SLEEP_FACTOR( ExternalMediaMounted ), + SLEEP_FACTOR( ThunderboltDevice ), + SLEEP_FACTOR( RTCAlarmScheduled ), + SLEEP_FACTOR( MagicPacketWakeEnabled ), + SLEEP_FACTOR( HibernateForced ), + SLEEP_FACTOR( AutoPowerOffDisabled ), + SLEEP_FACTOR( AutoPowerOffForced ), + SLEEP_FACTOR( ExternalDisplay ), + SLEEP_FACTOR( NetworkKeepAliveActive ), + SLEEP_FACTOR( LocalUserActivity ), + SLEEP_FACTOR( HibernateFailed ), + SLEEP_FACTOR( ThermalWarning ), + SLEEP_FACTOR( DisplayCaptured ), + { 0, NULL } + }; + const IOPMSystemSleepPolicyTable * pt; - OSObject * prop = NULL; + OSSharedPtr prop; OSData * policyData; uint64_t currentFactors = 0; char currentFactorsBuf[512]; @@ -4697,9 +5108,9 @@ IOPMrootDomain::evaluateSystemSleepPolicy( // Fetch additional settings standbyEnabled = (getSleepOption(kIOPMDeepSleepDelayKey, &standbyDelay) - && (getProperty(kIOPMDeepSleepEnabledKey) == kOSBooleanTrue)); + && propertyHasValue(kIOPMDeepSleepEnabledKey, kOSBooleanTrue)); powerOffEnabled = (getSleepOption(kIOPMAutoPowerOffDelayKey, &powerOffDelay) - && (getProperty(kIOPMAutoPowerOffEnabledKey) == kOSBooleanTrue)); + && propertyHasValue(kIOPMAutoPowerOffEnabledKey, kOSBooleanTrue)); if (!getSleepOption(kIOPMAutoPowerOffTimerKey, &powerOffTimer)) { powerOffTimer = powerOffDelay; } @@ -4724,109 +5135,99 @@ IOPMrootDomain::evaluateSystemSleepPolicy( // If poweroff is enabled, force poweroff. if (standbyEnabled) { currentFactors |= kIOPMSleepFactorStandbyForced; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "StandbyForced"); } else if (powerOffEnabled) { currentFactors |= kIOPMSleepFactorAutoPowerOffForced; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "AutoPowerOffForced"); } else { currentFactors |= kIOPMSleepFactorHibernateForced; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "HibernateForced"); } } // Current factors based on environment and assertions if (sleepTimerMaintenance) { currentFactors |= kIOPMSleepFactorSleepTimerWake; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "SleepTimerWake"); } if (standbyEnabled && sleepToStandby && !gSleepPolicyHandler) { currentFactors |= kIOPMSleepFactorSleepTimerWake; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "SleepTimerWake"); } if (!clamshellClosed) { currentFactors |= kIOPMSleepFactorLidOpen; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "LidOpen"); } if (acAdaptorConnected) { currentFactors |= kIOPMSleepFactorACPower; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "ACPower"); } if (lowBatteryCondition) { - currentFactors |= kIOPMSleepFactorBatteryLow; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "BatteryLow"); + hibernateMode = 0; + getSleepOption(kIOHibernateModeKey, &hibernateMode); + if ((hibernateMode & kIOHibernateModeOn) == 0) { + DLOG("HibernateMode is 0. Not sending LowBattery factor to IOPPF\n"); + } else { + currentFactors |= kIOPMSleepFactorBatteryLow; + } } if (!standbyDelay || !standbyTimer) { currentFactors |= kIOPMSleepFactorStandbyNoDelay; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "StandbyNoDelay"); } if (standbyNixed || !standbyEnabled) { currentFactors |= kIOPMSleepFactorStandbyDisabled; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "StandbyDisabled"); } if (resetTimers) { currentFactors |= kIOPMSleepFactorLocalUserActivity; currentFactors &= ~kIOPMSleepFactorSleepTimerWake; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "LocalUserActivity, !SleepTimerWake"); } if (getPMAssertionLevel(kIOPMDriverAssertionUSBExternalDeviceBit) != kIOPMDriverAssertionLevelOff) { currentFactors |= kIOPMSleepFactorUSBExternalDevice; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "USBExternalDevice"); } if (getPMAssertionLevel(kIOPMDriverAssertionBluetoothHIDDevicePairedBit) != kIOPMDriverAssertionLevelOff) { currentFactors |= kIOPMSleepFactorBluetoothHIDDevice; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "BluetoothHIDDevice"); } if (getPMAssertionLevel(kIOPMDriverAssertionExternalMediaMountedBit) != kIOPMDriverAssertionLevelOff) { currentFactors |= kIOPMSleepFactorExternalMediaMounted; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "ExternalMediaMounted"); } if (getPMAssertionLevel(kIOPMDriverAssertionReservedBit5) != kIOPMDriverAssertionLevelOff) { currentFactors |= kIOPMSleepFactorThunderboltDevice; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "ThunderboltDevice"); } - if (_scheduledAlarms != 0) { + if (_scheduledAlarmMask != 0) { currentFactors |= kIOPMSleepFactorRTCAlarmScheduled; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "RTCAlaramScheduled"); } if (getPMAssertionLevel(kIOPMDriverAssertionMagicPacketWakeEnabledBit) != kIOPMDriverAssertionLevelOff) { currentFactors |= kIOPMSleepFactorMagicPacketWakeEnabled; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "MagicPacketWakeEnabled"); } #define TCPKEEPALIVE 1 #if TCPKEEPALIVE if (getPMAssertionLevel(kIOPMDriverAssertionNetworkKeepAliveActiveBit) != kIOPMDriverAssertionLevelOff) { currentFactors |= kIOPMSleepFactorNetworkKeepAliveActive; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "NetworkKeepAliveActive"); } #endif if (!powerOffEnabled) { currentFactors |= kIOPMSleepFactorAutoPowerOffDisabled; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "AutoPowerOffDisabled"); } if (desktopMode) { currentFactors |= kIOPMSleepFactorExternalDisplay; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "ExternalDisplay"); } if (userWasActive) { currentFactors |= kIOPMSleepFactorLocalUserActivity; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "LocalUserActivity"); } if (darkWakeHibernateError && !CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) { currentFactors |= kIOPMSleepFactorHibernateFailed; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "HibernateFailed"); } if (thermalWarningState) { currentFactors |= kIOPMSleepFactorThermalWarning; - snprintf(currentFactorsBuf, sizeof(currentFactorsBuf), "%s, %s", currentFactorsBuf, "ThermalWarning"); } - DLOG("sleep factors 0x%llx %s\n", currentFactors, currentFactorsBuf); + for (int factorBit = 0; factorBit < (8 * sizeof(uint32_t)); factorBit++) { + uint32_t factor = 1 << factorBit; + if (factor & currentFactors) { + strlcat(currentFactorsBuf, ", ", sizeof(currentFactorsBuf)); + strlcat(currentFactorsBuf, IOFindNameForValue(factor, factorValues), sizeof(currentFactorsBuf)); + } + } + DLOG("sleep factors 0x%llx%s\n", currentFactors, currentFactorsBuf); if (gSleepPolicyHandler) { uint32_t savedHibernateMode; @@ -4849,7 +5250,7 @@ IOPMrootDomain::evaluateSystemSleepPolicy( gSleepPolicyVars->standbyDelay = standbyDelay; gSleepPolicyVars->standbyTimer = standbyTimer; gSleepPolicyVars->poweroffDelay = powerOffDelay; - gSleepPolicyVars->scheduledAlarms = _scheduledAlarms | _userScheduledAlarm; + gSleepPolicyVars->scheduledAlarms = _scheduledAlarmMask | _userScheduledAlarmMask; gSleepPolicyVars->poweroffTimer = powerOffTimer; if (kIOPMSleepPhase0 == sleepPhase) { @@ -4895,7 +5296,7 @@ IOPMrootDomain::evaluateSystemSleepPolicy( } // Validate the sleep policy table - policyData = OSDynamicCast(OSData, prop); + policyData = OSDynamicCast(OSData, prop.get()); if (!policyData || (policyData->getLength() <= sizeof(IOPMSystemSleepPolicyTable))) { goto done; } @@ -4966,10 +5367,6 @@ IOPMrootDomain::evaluateSystemSleepPolicy( } done: - if (prop) { - prop->release(); - } - return found; } @@ -5027,7 +5424,7 @@ void IOPMrootDomain::evaluateSystemSleepPolicyFinal( void ) { IOPMSystemSleepParameters params; - OSData * paramsData; + OSSharedPtr paramsData; bool wakeNow; // Evaluate sleep policy after sleeping drivers but before platform sleep. @@ -5072,8 +5469,7 @@ IOPMrootDomain::evaluateSystemSleepPolicyFinal( void ) paramsData = OSData::withBytes(¶ms, sizeof(params)); if (paramsData) { - setProperty(kIOPMSystemSleepParametersKey, paramsData); - paramsData->release(); + setProperty(kIOPMSystemSleepParametersKey, paramsData.get()); } if (getSleepTypeAttributes(params.sleepType) & @@ -5108,41 +5504,31 @@ IOPMrootDomain::getHibernateSettings( bool IOPMrootDomain::getSleepOption( const char * key, uint32_t * option ) { - OSObject * optionsProp; - OSDictionary * optionsDict; - OSObject * obj = NULL; - OSNumber * num; - bool ok = false; + OSSharedPtr optionsProp; + OSDictionary * optionsDict; + OSSharedPtr obj; + OSNumber * num; + bool ok = false; optionsProp = copyProperty(kRootDomainSleepOptionsKey); - optionsDict = OSDynamicCast(OSDictionary, optionsProp); + optionsDict = OSDynamicCast(OSDictionary, optionsProp.get()); if (optionsDict) { - obj = optionsDict->getObject(key); - if (obj) { - obj->retain(); - } + obj.reset(optionsDict->getObject(key), OSRetain); } if (!obj) { obj = copyProperty(key); } if (obj) { - if ((num = OSDynamicCast(OSNumber, obj))) { + if ((num = OSDynamicCast(OSNumber, obj.get()))) { *option = num->unsigned32BitValue(); ok = true; - } else if (OSDynamicCast(OSBoolean, obj)) { + } else if (OSDynamicCast(OSBoolean, obj.get())) { *option = (obj == kOSBooleanTrue) ? 1 : 0; ok = true; } } - if (obj) { - obj->release(); - } - if (optionsProp) { - optionsProp->release(); - } - return ok; } #endif /* HIBERNATION */ @@ -5320,16 +5706,13 @@ IOPMrootDomain::handlePlatformHaltRestart( UInt32 pe_type ) // For normal shutdown, turn off File Server Mode. if (kPEHaltCPU == pe_type) { - const OSSymbol * setting = OSSymbol::withCString(kIOPMSettingRestartOnPowerLossKey); - OSNumber * num = OSNumber::withNumber((unsigned long long) 0, 32); + OSSharedPtr setting = OSSymbol::withCString(kIOPMSettingRestartOnPowerLossKey); + OSSharedPtr num = OSNumber::withNumber((unsigned long long) 0, 32); if (setting && num) { - setPMSetting(setting, num); - setting->release(); - num->release(); + setPMSetting(setting.get(), num.get()); } } - if (kPEPagingOff != pe_type) { gHaltRestartCtx.phase = kNotifyPowerPlaneDrivers; // Notify in power tree order @@ -5337,13 +5720,13 @@ IOPMrootDomain::handlePlatformHaltRestart( UInt32 pe_type ) } gHaltRestartCtx.phase = kNotifyHaltRestartAction; -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) IOCPURunPlatformHaltRestartActions(pe_type); -#else +#else /* !defined(XNU_TARGET_OS_OSX) */ if (kPEPagingOff != pe_type) { IOCPURunPlatformHaltRestartActions(pe_type); } -#endif +#endif /* !defined(XNU_TARGET_OS_OSX) */ // Wait for PM to quiesce if ((kPEPagingOff != pe_type) && gPMHaltLock) { @@ -5440,11 +5823,11 @@ IOPMrootDomain::restartSystem( void ) void IOPMrootDomain::tagPowerPlaneService( - IOService * service, - IOPMActions * actions ) + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex maxPowerState ) { uint32_t flags = 0; - bool isDisplayWrangler; memset(actions, 0, sizeof(*actions)); actions->target = this; @@ -5467,31 +5850,39 @@ IOPMrootDomain::tagPowerPlaneService( return; } -#if !NO_KERNEL_HID - isDisplayWrangler = (NULL != service->metaCast("IODisplayWrangler")); - if (isDisplayWrangler) { - wrangler = service; +#if DISPLAY_WRANGLER_PRESENT + if (NULL != service->metaCast("IODisplayWrangler")) { + // XXX should this really retain? + wrangler.reset(service, OSRetain); + wrangler->registerInterest(gIOGeneralInterest, + &displayWranglerNotification, this, NULL); + // found the display wrangler, check for any display assertions already created if (pmAssertions->getActivatedAssertions() & kIOPMDriverAssertionPreventDisplaySleepBit) { DLOG("wrangler setIgnoreIdleTimer\(1) due to pre-existing assertion\n"); wrangler->setIgnoreIdleTimer( true ); } + flags |= kPMActionsFlagIsDisplayWrangler; } -#else - isDisplayWrangler = false; -#endif +#endif /* DISPLAY_WRANGLER_PRESENT */ -#if defined(__i386__) || defined(__x86_64__) - if (isDisplayWrangler) { - flags |= kPMActionsFlagIsDisplayWrangler; + if (service->propertyExists("IOPMStrictTreeOrder")) { + flags |= kPMActionsFlagIsGraphicsDriver; } - if (service->getProperty("IOPMStrictTreeOrder")) { - flags |= kPMActionsFlagIsGraphicsDevice; + if (service->propertyExists("IOPMUnattendedWakePowerState")) { + flags |= kPMActionsFlagIsAudioDriver; } - if (service->getProperty("IOPMUnattendedWakePowerState")) { - flags |= kPMActionsFlagIsAudioDevice; + + OSSharedPtr prop = service->copyProperty(kIOPMDarkWakeMaxPowerStateKey); + if (prop) { + OSNumber * num = OSDynamicCast(OSNumber, prop.get()); + if (num) { + actions->darkWakePowerState = num->unsigned32BitValue(); + if (actions->darkWakePowerState < maxPowerState) { + flags |= kPMActionsFlagHasDarkWakePowerState; + } + } } -#endif // Find the power connection object that is a child of the PCI host // bridge, and has a graphics/audio device attached below. Mark the @@ -5502,7 +5893,7 @@ IOPMrootDomain::tagPowerPlaneService( IORegistryEntry * parent = child->getParentEntry(gIOPowerPlane); while (child != this) { - if (child->getProperty("IOPCITunnelled") == kOSBooleanTrue) { + if (child->propertyHasValue("IOPCITunnelled", kOSBooleanTrue)) { // Skip delaying notifications and clamping power on external graphics and audio devices. DLOG("Avoiding delayChildNotification on object 0x%llx. flags: 0x%x\n", service->getRegistryEntryID(), flags); flags = 0; @@ -5524,11 +5915,11 @@ IOPMrootDomain::tagPowerPlaneService( if (flags) { DLOG("%s tag flags %x\n", service->getName(), flags); - actions->parameter |= flags; + actions->flags |= flags; actions->actionPowerChangeOverride = OSMemberFunctionCast( IOPMActionPowerChangeOverride, this, - &IOPMrootDomain::overridePowerChangeForUIService); + &IOPMrootDomain::overridePowerChangeForService); if (flags & kPMActionsFlagIsDisplayWrangler) { actions->actionActivityTickle = @@ -5549,8 +5940,8 @@ IOPMrootDomain::tagPowerPlaneService( IOService * provider = service->getProvider(); if (OSDynamicCast(IOPlatformDevice, provider) && provider->inPlane(gIODTPlane)) { - pciHostBridgeDevice = provider; - pciHostBridgeDriver = service; + pciHostBridgeDevice.reset(provider, OSNoRetain); + pciHostBridgeDriver.reset(service, OSNoRetain); DLOG("PMTrace found PCI host bridge %s->%s\n", provider->getName(), service->getName()); } @@ -5562,11 +5953,11 @@ IOPMrootDomain::tagPowerPlaneService( // Would prefer to check built-in property, but tagPowerPlaneService() // is called before pciDevice->registerService(). IORegistryEntry * parent = service->getParentEntry(gIODTPlane); - if ((parent == pciHostBridgeDevice) && service->getProperty("acpi-device")) { + if ((parent == pciHostBridgeDevice) && service->propertyExists("acpi-device")) { int bit = pmTracer->recordTopLevelPCIDevice( service ); if (bit >= 0) { // Save the assigned bit for fast lookup. - actions->parameter |= (bit & kPMActionsPCIBitNumberMask); + actions->flags |= (bit & kPMActionsPCIBitNumberMask); actions->actionPowerChangeStart = OSMemberFunctionCast( @@ -5590,15 +5981,26 @@ void IOPMrootDomain::overrideOurPowerChange( IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex * inOutPowerState, - IOPMPowerChangeFlags * inOutChangeFlags, - IOPMRequestTag requestTag ) + IOPMPowerChangeFlags * inOutChangeFlags ) { - uint32_t powerState = (uint32_t) *inOutPowerState; uint32_t changeFlags = *inOutChangeFlags; + uint32_t desiredPowerState = (uint32_t) *inOutPowerState; uint32_t currentPowerState = (uint32_t) getPowerState(); - if ((AOT_STATE == powerState) && (ON_STATE == currentPowerState)) { + if (request->getTag() == 0) { + // Set a tag for any request that originates from IOServicePM + (const_cast(request))->fTag = nextRequestTag(kCPSReasonPMInternals); + } + + DLOG("PowerChangeOverride (%s->%s, %x, 0x%x) tag 0x%x\n", + getPowerStateString(currentPowerState), + getPowerStateString(desiredPowerState), + _currentCapability, changeFlags, + request->getTag()); + + if ((AOT_STATE == desiredPowerState) && (ON_STATE == currentPowerState)) { // Assertion may have been taken in AOT leading to changePowerStateTo(AOT) *inOutChangeFlags |= kIOPMNotDone; return; @@ -5611,9 +6013,18 @@ IOPMrootDomain::overrideOurPowerChange( return; } - if (powerState < currentPowerState) { +#if defined(XNU_TARGET_OS_OSX) && !DISPLAY_WRANGLER_PRESENT + if (lowBatteryCondition && (desiredPowerState < currentPowerState)) { + // Reject sleep requests when lowBatteryCondition is TRUE to + // avoid racing with the impending system shutdown. + *inOutChangeFlags |= kIOPMNotDone; + return; + } +#endif + + if (desiredPowerState < currentPowerState) { if (CAP_CURRENT(kIOPMSystemCapabilityGraphics)) { - // Root domain is dropping power state ON->SLEEP. + // Root domain is dropping power state from ON->SLEEP. // If system is in full wake, first enter dark wake by // converting the power drop to a capability change. // Once in dark wake, transition to sleep state ASAP. @@ -5630,13 +6041,13 @@ IOPMrootDomain::overrideOurPowerChange( *inOutChangeFlags |= kIOPMSynchronize; // Revert device desire from SLEEP to ON - changePowerStateToPriv(getRUN_STATE()); + changePowerStateWithTagToPriv(getRUN_STATE(), kCPSReasonPowerOverride); } else { - // System is in dark wake, ok to drop power state. - // Broadcast root powering down to entire tree. + // System is already in dark wake, ok to drop power state. + // Broadcast root power down to entire tree. *inOutChangeFlags |= kIOPMRootChangeDown; } - } else if (powerState > currentPowerState) { + } else if (desiredPowerState > currentPowerState) { if ((_currentCapability & kIOPMSystemCapabilityCPU) == 0) { // Broadcast power up when waking from sleep, but not for the // initial power change at boot by checking for cpu capability. @@ -5649,31 +6060,40 @@ void IOPMrootDomain::handleOurPowerChangeStart( IOService * service, IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags * inOutChangeFlags, - IOPMRequestTag requestTag ) + const IOPMRequest * request, + IOPMPowerStateIndex newPowerState, + IOPMPowerChangeFlags * inOutChangeFlags ) { + IOPMRequestTag requestTag = request->getTag(); + IOPMRequestTag sleepReason; + uint32_t changeFlags = *inOutChangeFlags; uint32_t currentPowerState = (uint32_t) getPowerState(); - uint32_t sleepReason = requestTag ? requestTag : kIOPMSleepReasonIdle; bool publishSleepReason = false; + // Check if request has a valid sleep reason + sleepReason = REQUEST_TAG_TO_REASON(requestTag); + if (sleepReason < kIOPMSleepReasonClamshell) { + sleepReason = kIOPMSleepReasonIdle; + } + _systemTransitionType = kSystemTransitionNone; _systemMessageClientMask = 0; capabilityLoss = false; toldPowerdCapWillChange = false; + // Emergency notifications may arrive after the initial sleep request + // has been queued. Override the sleep reason so powerd and others can + // treat this as an emergency sleep. if (lowBatteryCondition) { - // Low battery notification may arrive after the initial sleep request - // has been queued. Override the sleep reason so powerd and others can - // treat this as an emergency sleep. sleepReason = kIOPMSleepReasonLowPower; + } else if (thermalEmergencyState) { + sleepReason = kIOPMSleepReasonThermalEmergency; } // 1. Explicit capability change. - if (changeFlags & kIOPMSynchronize) { - if (powerState == ON_STATE) { + if (newPowerState == ON_STATE) { if (changeFlags & kIOPMSyncNoChildNotify) { _systemTransitionType = kSystemTransitionNewCapClient; } else { @@ -5682,22 +6102,30 @@ IOPMrootDomain::handleOurPowerChangeStart( } } // 2. Going to sleep (cancellation still possible). - else if (powerState < currentPowerState) { + else if (newPowerState < currentPowerState) { _systemTransitionType = kSystemTransitionSleep; } // 3. Woke from (idle or demand) sleep. else if (!systemBooting && (changeFlags & kIOPMSelfInitiated) && - (powerState > currentPowerState)) { + (newPowerState > currentPowerState)) { _systemTransitionType = kSystemTransitionWake; - _desiredCapability = kIOPMSystemCapabilityCPU | - kIOPMSystemCapabilityNetwork; + _desiredCapability = kIOPMSystemCapabilityCPU | kIOPMSystemCapabilityNetwork; // Early exit from dark wake to full (e.g. LID open) if (kFullWakeReasonNone != fullWakeReason) { _desiredCapability |= ( kIOPMSystemCapabilityGraphics | kIOPMSystemCapabilityAudio); + +#if defined(XNU_TARGET_OS_OSX) && !DISPLAY_WRANGLER_PRESENT + if (fullWakeReason == kFullWakeReasonLocalUser) { + darkWakeExit = true; + darkWakeToSleepASAP = false; + setProperty(kIOPMRootDomainWakeTypeKey, isRTCAlarmWake ? + kIOPMRootDomainWakeTypeAlarm : kIOPMRootDomainWakeTypeUser); + } +#endif } #if HIBERNATION IOHibernateSetWakeCapabilities(_desiredCapability); @@ -5733,12 +6161,23 @@ IOPMrootDomain::handleOurPowerChangeStart( } // 1. Capability change. - if (kSystemTransitionCapability == _systemTransitionType) { // Dark to Full transition. if (CAP_GAIN(kIOPMSystemCapabilityGraphics)) { tracePoint( kIOPMTracePointDarkWakeExit ); +#if defined(XNU_TARGET_OS_OSX) + // rdar://problem/65627936 + // When a dark->full wake promotion is scheduled before an ON->SLEEP + // power state drop, invalidate any request to drop power state already + // in the queue, including the override variant, unless full wake cannot + // be sustained. Any power state drop queued after this SustainFullWake + // request will not be affected. + if (checkSystemCanSustainFullWake()) { + changePowerStateWithOverrideTo(getRUN_STATE(), kCPSReasonSustainFullWake); + } +#endif + willEnterFullWake(); } @@ -5747,18 +6186,15 @@ IOPMrootDomain::handleOurPowerChangeStart( // Clear previous stats IOLockLock(pmStatsLock); if (pmStatsAppResponses) { - pmStatsAppResponses->release(); pmStatsAppResponses = OSArray::withCapacity(5); } IOLockUnlock(pmStatsLock); - tracePoint( kIOPMTracePointDarkWakeEntry ); *inOutChangeFlags |= kIOPMSyncTellPowerDown; _systemMessageClientMask = kSystemMessageClientPowerd | kSystemMessageClientLegacyApp; - // rdar://15971327 // Prevent user active transitions before notifying clients // that system will sleep. @@ -5777,7 +6213,7 @@ IOPMrootDomain::handleOurPowerChangeStart( DLOG("sleepDelaysReport f->9 start at 0x%llx\n", ts_sleepStart); } - wranglerTickled = false; + darkWakeExit = false; } } // 2. System sleep. @@ -5795,6 +6231,14 @@ IOPMrootDomain::handleOurPowerChangeStart( // transition to full wake, so don't notify them unless system // has gained graphics capability since the last system wake. _systemMessageClientMask &= ~kSystemMessageClientKernel; + } else { + // System was in full wake, but the downwards power transition is driven + // by a request that originates from IOServicePM, so it isn't tagged with + // a valid system sleep reason. + if (REQUEST_TAG_TO_REASON(requestTag) == kCPSReasonPMInternals) { + // Publish the same reason for full to dark + sleepReason = fullToDarkReason; + } } #if HIBERNATION gIOHibernateState = 0; @@ -5815,16 +6259,19 @@ IOPMrootDomain::handleOurPowerChangeStart( tracePoint( kIOPMTracePointWakeWillPowerOnClients ); // Clear stats about sleep - if (AOT_STATE == powerState) { + if (AOT_STATE == newPowerState) { _pendingCapability = 0; } + if (AOT_STATE == currentPowerState) { + // Wake events are no longer accepted after waking to AOT_STATE. + // Re-enable wake event acceptance to append wake events claimed + // during the AOT to ON_STATE transition. + acceptSystemWakeEvents(kAcceptSystemWakeEvents_Reenable); + } + if (_pendingCapability & kIOPMSystemCapabilityGraphics) { willEnterFullWake(); - } else { - // Message powerd only - _systemMessageClientMask = kSystemMessageClientPowerd; - tellClients(kIOMessageSystemWillPowerOn); } } @@ -5844,7 +6291,8 @@ IOPMrootDomain::handleOurPowerChangeStart( kIOPMThermalEmergencySleepKey, kIOPMMaintenanceSleepKey, kIOPMSleepServiceExitKey, - kIOPMDarkWakeThermalEmergencyKey + kIOPMDarkWakeThermalEmergencyKey, + kIOPMNotificationWakeExitKey }; // Record sleep cause in IORegistry @@ -5860,19 +6308,21 @@ IOPMrootDomain::handleOurPowerChangeStart( _systemStateGeneration++; systemDarkWake = false; - DLOG("=== START (%s->%s, 0x%x) type %u, gen %u, msg %x, " - "dcp %x:%x:%x\n", - getPowerStateString(currentPowerState), getPowerStateString((uint32_t) powerState), *inOutChangeFlags, - _systemTransitionType, _systemStateGeneration, - _systemMessageClientMask, - _desiredCapability, _currentCapability, _pendingCapability); + DLOG("=== START (%s->%s, %x->%x, 0x%x) gen %u, msg %x, tag %x\n", + getPowerStateString(currentPowerState), + getPowerStateString((uint32_t) newPowerState), + _currentCapability, _pendingCapability, + *inOutChangeFlags, _systemStateGeneration, _systemMessageClientMask, + requestTag); } - if ((AOT_STATE == powerState) && (SLEEP_STATE != currentPowerState)) { + if ((AOT_STATE == newPowerState) && (SLEEP_STATE != currentPowerState)) { panic("illegal AOT entry from %s", getPowerStateString(currentPowerState)); } - if (_aotNow && (ON_STATE == powerState)) { + if (_aotNow && (ON_STATE == newPowerState)) { + WAKEEVENT_LOCK(); aotShouldExit(false, true); + WAKEEVENT_UNLOCK(); aotExit(false); } } @@ -5881,9 +6331,9 @@ void IOPMrootDomain::handleOurPowerChangeDone( IOService * service, IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags changeFlags, - IOPMRequestTag requestTag __unused ) + const IOPMRequest * request, + IOPMPowerStateIndex oldPowerState, + IOPMPowerChangeFlags changeFlags ) { if (kSystemTransitionNewCapClient == _systemTransitionType) { _systemTransitionType = kSystemTransitionNone; @@ -5898,14 +6348,18 @@ IOPMrootDomain::handleOurPowerChangeDone( _pendingCapability = _currentCapability; lastSleepReason = 0; + // When sleep is cancelled or reverted, don't report + // the target (lower) power state as the previous state. + oldPowerState = currentPowerState; + if (!CAP_CURRENT(kIOPMSystemCapabilityGraphics) && CAP_CURRENT(kIOPMSystemCapabilityCPU)) { -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) pmPowerStateQueue->submitPowerEvent( kPowerEventPolicyStimulus, (void *) kStimulusDarkWakeReentry, _systemStateGeneration ); -#else +#else /* !defined(XNU_TARGET_OS_OSX) */ // On embedded, there are no factors that can prolong a // "darkWake" when a power down is vetoed. We need to // promote to "fullWake" at least once so that factors @@ -5913,11 +6367,11 @@ IOPMrootDomain::handleOurPowerChangeDone( pmPowerStateQueue->submitPowerEvent( kPowerEventPolicyStimulus, (void *) kStimulusDarkWakeActivityTickle); -#endif +#endif /* !defined(XNU_TARGET_OS_OSX) */ } // Revert device desire to max. - changePowerStateToPriv(getRUN_STATE()); + changePowerStateWithTagToPriv(getRUN_STATE(), kCPSReasonPowerDownCancel); } else { // Send message on dark wake to full wake promotion. // tellChangeUp() handles the normal SLEEP->ON case. @@ -5957,7 +6411,7 @@ IOPMrootDomain::handleOurPowerChangeDone( wrangler->changePowerStateForRootDomain( kWranglerPowerStateMin ); } - removeProperty(gIOPMUserTriggeredFullWakeKey); + removeProperty(gIOPMUserTriggeredFullWakeKey.get()); } } @@ -5976,25 +6430,22 @@ IOPMrootDomain::handleOurPowerChangeDone( } } - DLOG("=== FINISH (%s->%s, 0x%x) type %u, gen %u, msg %x, " - "dcp %x:%x:%x, dbgtimer %u\n", - getPowerStateString(currentPowerState), getPowerStateString((uint32_t) powerState), changeFlags, - _systemTransitionType, _systemStateGeneration, - _systemMessageClientMask, - _desiredCapability, _currentCapability, _pendingCapability, - _lastDebugWakeSeconds); + DLOG("=== FINISH (%s->%s, %x->%x, 0x%x) gen %u, msg %x, tag %x\n", + getPowerStateString((uint32_t) oldPowerState), getPowerStateString(currentPowerState), + _currentCapability, _pendingCapability, + changeFlags, _systemStateGeneration, _systemMessageClientMask, + request->getTag()); + + if ((currentPowerState == ON_STATE) && pmAssertions) { + pmAssertions->reportCPUBitAccounting(); + } if (_pendingCapability & kIOPMSystemCapabilityGraphics) { displayWakeCnt++; -#if DARK_TO_FULL_EVALUATE_CLAMSHELL - if (clamshellExists && fullWakeThreadCall && - CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) { - // Not the initial graphics full power, graphics won't - // send a power notification to trigger a lid state - // evaluation. - +#if DARK_TO_FULL_EVALUATE_CLAMSHELL_DELAY + if (clamshellExists && fullWakeThreadCall) { AbsoluteTime deadline; - clock_interval_to_deadline(45, kSecondScale, &deadline); + clock_interval_to_deadline(DARK_TO_FULL_EVALUATE_CLAMSHELL_DELAY, kSecondScale, &deadline); thread_call_enter_delayed(fullWakeThreadCall, deadline); } #endif @@ -6013,11 +6464,11 @@ IOPMrootDomain::handleOurPowerChangeDone( if (darkWakePostTickle && (kSystemTransitionWake == _systemTransitionType) && - (gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == - kDarkWakeFlagHIDTickleLate) { + (gDarkWakeFlags & kDarkWakeFlagPromotionMask) == + kDarkWakeFlagPromotionLate) { darkWakePostTickle = false; reportUserInput(); - } else if (wranglerTickled) { + } else if (darkWakeExit) { requestFullWake( kFullWakeReasonLocalUser ); } @@ -6036,17 +6487,19 @@ IOPMrootDomain::handleOurPowerChangeDone( _systemMessageClientMask = 0; toldPowerdCapWillChange = false; - logGraphicsClamp = false; + darkWakeLogClamp = false; if (lowBatteryCondition) { privateSleepSystem(kIOPMSleepReasonLowPower); - } else if ((fullWakeReason == kFullWakeReasonDisplayOn) && (!displayPowerOnRequested)) { + } else if (thermalEmergencyState) { + privateSleepSystem(kIOPMSleepReasonThermalEmergency); + } else if ((fullWakeReason == kFullWakeReasonDisplayOn) && !displayPowerOnRequested) { // Request for full wake is removed while system is waking up to full wake DLOG("DisplayOn fullwake request is removed\n"); - handleDisplayPowerOn(); + handleSetDisplayPowerOn(false); } - if (isRTCAlarmWake) { + if ((gClamshellFlags & kClamshell_WAR_47715679) && isRTCAlarmWake) { pmPowerStateQueue->submitPowerEvent( kPowerEventReceivedPowerNotification, (void *)(uintptr_t) kLocalEvalClamshellCommand ); } @@ -6058,69 +6511,83 @@ IOPMrootDomain::handleOurPowerChangeDone( //****************************************************************************** void -IOPMrootDomain::overridePowerChangeForUIService( +IOPMrootDomain::overridePowerChangeForService( IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex * inOutPowerState, IOPMPowerChangeFlags * inOutChangeFlags ) { uint32_t powerState = (uint32_t) *inOutPowerState; uint32_t changeFlags = (uint32_t) *inOutChangeFlags; + const uint32_t actionFlags = actions->flags; if (kSystemTransitionNone == _systemTransitionType) { // Not in midst of a system transition. - // Do not modify power limit enable state. - } else if ((actions->parameter & kPMActionsFlagLimitPower) == 0) { - // Activate power limiter. + // Do not set kPMActionsStatePowerClamped. + } else if ((actions->state & kPMActionsStatePowerClamped) == 0) { + bool enableClamp = false; - if ((actions->parameter & kPMActionsFlagIsDisplayWrangler) && + // For most drivers, enable the clamp during ON->Dark transition + // which has the kIOPMSynchronize flag set in changeFlags. + if ((actionFlags & kPMActionsFlagIsDisplayWrangler) && ((_pendingCapability & kIOPMSystemCapabilityGraphics) == 0) && (changeFlags & kIOPMSynchronize)) { - actions->parameter |= kPMActionsFlagLimitPower; - } else if ((actions->parameter & kPMActionsFlagIsAudioDevice) && + enableClamp = true; + } else if ((actionFlags & kPMActionsFlagIsAudioDriver) && ((gDarkWakeFlags & kDarkWakeFlagAudioNotSuppressed) == 0) && ((_pendingCapability & kIOPMSystemCapabilityAudio) == 0) && (changeFlags & kIOPMSynchronize)) { - actions->parameter |= kPMActionsFlagLimitPower; - } else if ((actions->parameter & kPMActionsFlagIsGraphicsDevice) && + enableClamp = true; + } else if ((actionFlags & kPMActionsFlagHasDarkWakePowerState) && + ((_pendingCapability & kIOPMSystemCapabilityGraphics) == 0) && + (changeFlags & kIOPMSynchronize)) { + enableClamp = true; + } else if ((actionFlags & kPMActionsFlagIsGraphicsDriver) && (_systemTransitionType == kSystemTransitionSleep)) { - // For graphics devices, arm the limiter when entering + // For graphics drivers, clamp power when entering // system sleep. Not when dropping to dark wake. - actions->parameter |= kPMActionsFlagLimitPower; + enableClamp = true; } - if (actions->parameter & kPMActionsFlagLimitPower) { - DLOG("+ plimit %s %p\n", - service->getName(), OBFUSCATE(service)); + if (enableClamp) { + actions->state |= kPMActionsStatePowerClamped; + DLOG("power clamp enabled %s %qx, pendingCap 0x%x, ps %d, cflags 0x%x\n", + service->getName(), service->getRegistryEntryID(), + _pendingCapability, powerState, changeFlags); } - } else { - // Remove power limit. + } else if ((actions->state & kPMActionsStatePowerClamped) != 0) { + bool disableClamp = false; - if ((actions->parameter & ( + if ((actionFlags & ( kPMActionsFlagIsDisplayWrangler | - kPMActionsFlagIsGraphicsDevice)) && + kPMActionsFlagIsGraphicsDriver)) && (_pendingCapability & kIOPMSystemCapabilityGraphics)) { - actions->parameter &= ~kPMActionsFlagLimitPower; - } else if ((actions->parameter & kPMActionsFlagIsAudioDevice) && + disableClamp = true; + } else if ((actionFlags & kPMActionsFlagIsAudioDriver) && (_pendingCapability & kIOPMSystemCapabilityAudio)) { - actions->parameter &= ~kPMActionsFlagLimitPower; + disableClamp = true; + } else if ((actionFlags & kPMActionsFlagHasDarkWakePowerState) && + (_pendingCapability & kIOPMSystemCapabilityGraphics)) { + disableClamp = true; } - if ((actions->parameter & kPMActionsFlagLimitPower) == 0) { - DLOG("- plimit %s %p\n", - service->getName(), OBFUSCATE(service)); + if (disableClamp) { + actions->state &= ~kPMActionsStatePowerClamped; + DLOG("power clamp removed %s %qx, pendingCap 0x%x, ps %d, cflags 0x%x\n", + service->getName(), service->getRegistryEntryID(), + _pendingCapability, powerState, changeFlags); } } - if (actions->parameter & kPMActionsFlagLimitPower) { - uint32_t maxPowerState = (uint32_t)(-1); + if (actions->state & kPMActionsStatePowerClamped) { + uint32_t maxPowerState = 0; + // Determine the max power state allowed when clamp is enabled if (changeFlags & (kIOPMDomainDidChange | kIOPMDomainWillChange)) { - // Enforce limit for system power/cap transitions. - - maxPowerState = 0; + // Parent intiated power state changes if ((service->getPowerState() > maxPowerState) && - (actions->parameter & kPMActionsFlagIsDisplayWrangler)) { + (actionFlags & kPMActionsFlagIsDisplayWrangler)) { maxPowerState++; // Remove lingering effects of any tickle before entering @@ -6130,46 +6597,45 @@ IOPMrootDomain::overridePowerChangeForUIService( if (changeFlags & kIOPMDomainDidChange) { *inOutChangeFlags |= kIOPMExpireIdleTimer; } - } else if (actions->parameter & kPMActionsFlagIsGraphicsDevice) { + } else if (actionFlags & kPMActionsFlagIsGraphicsDriver) { maxPowerState++; + } else if (actionFlags & kPMActionsFlagHasDarkWakePowerState) { + maxPowerState = actions->darkWakePowerState; } } else { // Deny all self-initiated changes when power is limited. // Wrangler tickle should never defeat the limiter. - maxPowerState = service->getPowerState(); } if (powerState > maxPowerState) { - DLOG("> plimit %s %p (%u->%u, 0x%x)\n", - service->getName(), OBFUSCATE(service), powerState, maxPowerState, - changeFlags); + DLOG("power clamped %s %qx, ps %u->%u, cflags 0x%x)\n", + service->getName(), service->getRegistryEntryID(), + powerState, maxPowerState, changeFlags); *inOutPowerState = maxPowerState; if (darkWakePostTickle && - (actions->parameter & kPMActionsFlagIsDisplayWrangler) && + (actionFlags & kPMActionsFlagIsDisplayWrangler) && (changeFlags & kIOPMDomainWillChange) && - ((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == - kDarkWakeFlagHIDTickleEarly)) { + ((gDarkWakeFlags & kDarkWakeFlagPromotionMask) == + kDarkWakeFlagPromotionEarly)) { darkWakePostTickle = false; reportUserInput(); } } - if (!graphicsSuppressed && (changeFlags & kIOPMDomainDidChange)) { - if (logGraphicsClamp) { + if (!darkWakePowerClamped && (changeFlags & kIOPMDomainDidChange)) { + if (darkWakeLogClamp) { AbsoluteTime now; uint64_t nsec; clock_get_uptime(&now); SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); absolutetime_to_nanoseconds(now, &nsec); - if (kIOLogPMRootDomain & gIOKitDebug) { - MSG("Graphics suppressed %u ms\n", - ((int)((nsec) / NSEC_PER_MSEC))); - } + DLOG("dark wake power clamped after %u ms\n", + ((int)((nsec) / NSEC_PER_MSEC))); } - graphicsSuppressed = true; + darkWakePowerClamped = true; } } } @@ -6179,7 +6645,7 @@ IOPMrootDomain::handleActivityTickleForDisplayWrangler( IOService * service, IOPMActions * actions ) { -#if !NO_KERNEL_HID +#if DISPLAY_WRANGLER_PRESENT // Warning: Not running in PM work loop context - don't modify state !!! // Trap tickle directed to IODisplayWrangler while running with graphics // capability suppressed. @@ -6196,8 +6662,7 @@ IOPMrootDomain::handleActivityTickleForDisplayWrangler( userActivityCount, lastSleepReason); } - if (!wranglerTickled && - ((_pendingCapability & kIOPMSystemCapabilityGraphics) == 0)) { + if (!darkWakeExit && ((_pendingCapability & kIOPMSystemCapabilityGraphics) == 0)) { DLOG("display wrangler tickled\n"); if (kIOLogPMRootDomain & gIOKitDebug) { OSReportWithBacktrace("Dark wake display tickle"); @@ -6209,7 +6674,7 @@ IOPMrootDomain::handleActivityTickleForDisplayWrangler( true /* set wake type */ ); } } -#endif +#endif /* DISPLAY_WRANGLER_PRESENT */ } void @@ -6220,7 +6685,7 @@ IOPMrootDomain::handleUpdatePowerClientForDisplayWrangler( IOPMPowerStateIndex oldPowerState, IOPMPowerStateIndex newPowerState ) { -#if !NO_KERNEL_HID +#if DISPLAY_WRANGLER_PRESENT assert(service == wrangler); // This function implements half of the user active detection @@ -6258,7 +6723,7 @@ IOPMrootDomain::handleUpdatePowerClientForDisplayWrangler( } else if (newPowerState == kWranglerPowerStateMax) { evaluatePolicy( kStimulusDisplayWranglerWake ); } -#endif +#endif /* DISPLAY_WRANGLER_PRESENT */ } //****************************************************************************** @@ -6268,7 +6733,7 @@ IOPMrootDomain::handleUpdatePowerClientForDisplayWrangler( void IOPMrootDomain::preventTransitionToUserActive( bool prevent ) { -#if !NO_KERNEL_HID +#if DISPLAY_WRANGLER_PRESENT _preventUserActive = prevent; if (wrangler && !_preventUserActive) { // Allowing transition to user active, but the wrangler may have @@ -6282,7 +6747,7 @@ IOPMrootDomain::preventTransitionToUserActive( bool prevent ) evaluatePolicy( kStimulusEnterUserActiveState ); } } -#endif +#endif /* DISPLAY_WRANGLER_PRESENT */ } //****************************************************************************** @@ -6293,8 +6758,7 @@ bool IOPMrootDomain::shouldDelayChildNotification( IOService * service ) { - if (((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) != 0) && - (kFullWakeReasonNone == fullWakeReason) && + if ((kFullWakeReasonNone == fullWakeReason) && (kSystemTransitionWake == _systemTransitionType)) { DLOG("%s: delay child notify\n", service->getName()); return true; @@ -6310,32 +6774,34 @@ void IOPMrootDomain::handlePowerChangeStartForPCIDevice( IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex powerState, IOPMPowerChangeFlags * inOutChangeFlags ) { pmTracer->tracePCIPowerChange( PMTraceWorker::kPowerChangeStart, service, *inOutChangeFlags, - (actions->parameter & kPMActionsPCIBitNumberMask)); + (actions->flags & kPMActionsPCIBitNumberMask)); } void IOPMrootDomain::handlePowerChangeDoneForPCIDevice( IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex powerState, IOPMPowerChangeFlags changeFlags ) { pmTracer->tracePCIPowerChange( PMTraceWorker::kPowerChangeCompleted, service, changeFlags, - (actions->parameter & kPMActionsPCIBitNumberMask)); + (actions->flags & kPMActionsPCIBitNumberMask)); } //****************************************************************************** // registerInterest // -// Override IOService::registerInterest() to intercept special clients. +// Override IOService::registerInterest() for root domain clients. //****************************************************************************** class IOPMServiceInterestNotifier : public _IOServiceInterestNotifier @@ -6344,32 +6810,35 @@ class IOPMServiceInterestNotifier : public _IOServiceInterestNotifier OSDeclareDefaultStructors(IOPMServiceInterestNotifier); protected: - uint32_t ackTimeoutCnt; - uint32_t msgType;// Message pending ack - - uint64_t uuid0; - uint64_t uuid1; - const OSSymbol *identifier; + uint32_t ackTimeoutCnt; + uint32_t msgType; // Message pending ack + uint32_t msgIndex; + uint32_t maxMsgDelayMS; + uint32_t maxAckDelayMS; + uint64_t msgAbsTime; + uint64_t uuid0; + uint64_t uuid1; + OSSharedPtr identifier; + OSSharedPtr clientName; }; OSDefineMetaClassAndStructors(IOPMServiceInterestNotifier, _IOServiceInterestNotifier) -IONotifier * IOPMrootDomain::registerInterest( +OSSharedPtr +IOPMrootDomain::registerInterest( const OSSymbol * typeOfInterest, IOServiceInterestHandler handler, void * target, void * ref ) { - IOPMServiceInterestNotifier *notifier = NULL; + IOPMServiceInterestNotifier* notifier; bool isSystemCapabilityClient; bool isKernelCapabilityClient; - IOReturn rc = kIOReturnError;; + IOReturn rc = kIOReturnError; - isSystemCapabilityClient = - typeOfInterest && + isSystemCapabilityClient = typeOfInterest && typeOfInterest->isEqualTo(kIOPMSystemCapabilityInterest); - isKernelCapabilityClient = - typeOfInterest && + isKernelCapabilityClient = typeOfInterest && typeOfInterest->isEqualTo(gIOPriorityPowerStateInterest); if (isSystemCapabilityClient) { @@ -6385,13 +6854,12 @@ IONotifier * IOPMrootDomain::registerInterest( rc = super::registerInterestForNotifier(notifier, typeOfInterest, handler, target, ref); } if (rc != kIOReturnSuccess) { - notifier->release(); - notifier = NULL; - return NULL; } + + notifier->ackTimeoutCnt = 0; + if (pmPowerStateQueue) { - notifier->ackTimeoutCnt = 0; if (isSystemCapabilityClient) { notifier->retain(); if (pmPowerStateQueue->submitPowerEvent( @@ -6409,9 +6877,9 @@ IONotifier * IOPMrootDomain::registerInterest( } } - OSData *data = NULL; + OSSharedPtr data; uint8_t *uuid = NULL; - OSKext *kext = OSKext::lookupKextWithAddress((vm_address_t)handler); + OSSharedPtr kext = OSKext::lookupKextWithAddress((vm_address_t)handler); if (kext) { data = kext->copyUUID(); } @@ -6425,16 +6893,9 @@ IONotifier * IOPMrootDomain::registerInterest( ((uint64_t)(uuid[11]) << 32) | ((uint64_t)(uuid[12]) << 24) | ((uint64_t)(uuid[13]) << 16) | ((uint64_t)(uuid[14]) << 8) | (uuid[15]); - notifier->identifier = kext->getIdentifier(); + notifier->identifier = copyKextIdentifierWithAddress((vm_address_t) handler); } - if (kext) { - kext->release(); - } - if (data) { - data->release(); - } - - return notifier; + return OSSharedPtr(notifier, OSNoRetain); } //****************************************************************************** @@ -6464,13 +6925,15 @@ IOPMrootDomain::systemMessageFilter( // Capability change message for app and kernel clients. if (isCapMsg) { + // Kernel clients if ((context->notifyType == kNotifyPriority) || (context->notifyType == kNotifyCapabilityChangePriority)) { isCapClient = true; } + // powerd's systemCapabilityNotifier if ((context->notifyType == kNotifyCapabilityChangeApps) && - (object == (void *) systemCapabilityNotifier)) { + (object == (void *) systemCapabilityNotifier.get())) { isCapClient = true; } } @@ -6493,7 +6956,7 @@ IOPMrootDomain::systemMessageFilter( capArgs->changeFlags = kIOPMSystemCapabilityDidChange; } - if ((object == (void *) systemCapabilityNotifier) && + if ((object == (void *) systemCapabilityNotifier.get()) && context->isPreChange) { toldPowerdCapWillChange = true; } @@ -6521,7 +6984,7 @@ IOPMrootDomain::systemMessageFilter( if ((kIOMessageCanSystemSleep == context->messageType) || (kIOMessageSystemWillNotSleep == context->messageType)) { - if (object == (OSObject *) systemCapabilityNotifier) { + if (object == (OSObject *) systemCapabilityNotifier.get()) { allow = true; break; } @@ -6533,7 +6996,7 @@ IOPMrootDomain::systemMessageFilter( } if (kIOPMMessageLastCallBeforeSleep == context->messageType) { - if ((object == (OSObject *) systemCapabilityNotifier) && + if ((object == (OSObject *) systemCapabilityNotifier.get()) && CAP_HIGHEST(kIOPMSystemCapabilityGraphics) && (fullToDarkReason == kIOPMSleepReasonIdle)) { allow = true; @@ -6544,7 +7007,7 @@ IOPMrootDomain::systemMessageFilter( // Reject capability change messages for legacy clients. // Reject legacy system sleep messages for capability client. - if (isCapMsg || (object == (OSObject *) systemCapabilityNotifier)) { + if (isCapMsg || (object == (OSObject *) systemCapabilityNotifier.get())) { break; } @@ -6573,9 +7036,8 @@ IOPMrootDomain::systemMessageFilter( _joinedCapabilityClients->removeObject((OSObject *) object); if (_joinedCapabilityClients->getCount() == 0) { DLOG("destroyed capability client set %p\n", - OBFUSCATE(_joinedCapabilityClients)); - _joinedCapabilityClients->release(); - _joinedCapabilityClients = NULL; + OBFUSCATE(_joinedCapabilityClients.get())); + _joinedCapabilityClients.reset(); } } if (notifier) { @@ -6594,7 +7056,7 @@ IOReturn IOPMrootDomain::setMaintenanceWakeCalendar( const IOPMCalendarStruct * calendar ) { - OSData * data; + OSSharedPtr data; IOReturn ret = 0; if (!calendar) { @@ -6607,19 +7069,11 @@ IOPMrootDomain::setMaintenanceWakeCalendar( } if (kPMCalendarTypeMaintenance == calendar->selector) { - ret = setPMSetting(gIOPMSettingMaintenanceWakeCalendarKey, data); - if (kIOReturnSuccess == ret) { - OSBitOrAtomic(kIOPMAlarmBitMaintenanceWake, &_scheduledAlarms); - } + ret = setPMSetting(gIOPMSettingMaintenanceWakeCalendarKey.get(), data.get()); } else if (kPMCalendarTypeSleepService == calendar->selector) { - ret = setPMSetting(gIOPMSettingSleepServiceWakeCalendarKey, data); - if (kIOReturnSuccess == ret) { - OSBitOrAtomic(kIOPMAlarmBitSleepServiceWake, &_scheduledAlarms); - } + ret = setPMSetting(gIOPMSettingSleepServiceWakeCalendarKey.get(), data.get()); } - DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms); - data->release(); return ret; } @@ -6638,8 +7092,8 @@ IOPMrootDomain::displayWranglerNotification( UInt32 messageType, IOService * service, void * messageArgument, vm_size_t argSize ) { -#if !NO_KERNEL_HID - int displayPowerState; +#if DISPLAY_WRANGLER_PRESENT + IOPMPowerStateIndex displayPowerState; IOPowerStateChangeNotification * params = (IOPowerStateChangeNotification *) messageArgument; @@ -6655,7 +7109,7 @@ IOPMrootDomain::displayWranglerNotification( displayPowerState = params->stateNumber; DLOG("wrangler %s ps %d\n", - getIOMessageString(messageType), displayPowerState); + getIOMessageString(messageType), (uint32_t) displayPowerState); switch (messageType) { case kIOMessageDeviceWillPowerOff: @@ -6688,61 +7142,74 @@ IOPMrootDomain::displayWranglerNotification( } break; } -#endif +#endif /* DISPLAY_WRANGLER_PRESENT */ return kIOReturnUnsupported; } //****************************************************************************** -// displayWranglerMatchPublished +// reportUserInput // -// Receives a notification when the IODisplayWrangler is published. -// When it's published we install a power state change handler. //****************************************************************************** -bool -IOPMrootDomain::displayWranglerMatchPublished( - void * target, - void * refCon, - IOService * newService, - IONotifier * notifier __unused) -{ -#if !NO_KERNEL_HID - // install a handler - if (!newService->registerInterest( gIOGeneralInterest, - &displayWranglerNotification, target, NULL)) { - return false; +void +IOPMrootDomain::updateUserActivity( void ) +{ +#if defined(XNU_TARGET_OS_OSX) && !DISPLAY_WRANGLER_PRESENT + clock_get_uptime(&userActivityTime); + bool aborting = ((lastSleepReason == kIOPMSleepReasonSoftware) + || (lastSleepReason == kIOPMSleepReasonIdle) + || (lastSleepReason == kIOPMSleepReasonMaintenance)); + if (aborting) { + userActivityCount++; + DLOG("user activity reported %d lastSleepReason %d\n", userActivityCount, lastSleepReason); } #endif - return true; } - -//****************************************************************************** -// reportUserInput -// -//****************************************************************************** - void IOPMrootDomain::reportUserInput( void ) { -#if !NO_KERNEL_HID - OSIterator * iter; - OSDictionary * matching; + if (wrangler) { + wrangler->activityTickle(0, 0); + } +#if defined(XNU_TARGET_OS_OSX) && !DISPLAY_WRANGLER_PRESENT + // Update user activity + updateUserActivity(); - if (!wrangler) { - matching = serviceMatching("IODisplayWrangler"); - iter = getMatchingServices(matching); - if (matching) { - matching->release(); - } - if (iter) { - wrangler = OSDynamicCast(IOService, iter->getNextObject()); - iter->release(); - } + if (!darkWakeExit && ((_pendingCapability & kIOPMSystemCapabilityGraphics) == 0)) { + // update user active abs time + clock_get_uptime(&gUserActiveAbsTime); + pmPowerStateQueue->submitPowerEvent( + kPowerEventPolicyStimulus, + (void *) kStimulusDarkWakeActivityTickle, + true /* set wake type */ ); } +#endif +} +void +IOPMrootDomain::requestUserActive(IOService *device, const char *reason) +{ +#if DISPLAY_WRANGLER_PRESENT if (wrangler) { wrangler->activityTickle(0, 0); } +#else + if (!device) { + DLOG("requestUserActive: device is null\n"); + return; + } + OSSharedPtr deviceName = device->copyName(); + uint64_t registryID = device->getRegistryEntryID(); + + if (!deviceName || !registryID) { + DLOG("requestUserActive: no device name or registry entry\n"); + return; + } + const char *name = deviceName->getCStringNoCopy(); + char payload[128]; + snprintf(payload, sizeof(payload), "%s:%s", name, reason); + DLOG("requestUserActive from %s (0x%llx) for %s\n", name, registryID, reason); + messageClient(kIOPMMessageRequestUserActive, systemCapabilityNotifier.get(), (void *)payload, sizeof(payload)); #endif } @@ -6753,7 +7220,7 @@ IOPMrootDomain::reportUserInput( void ) bool IOPMrootDomain::latchDisplayWranglerTickle( bool latch ) { -#if !NO_KERNEL_HID +#if DISPLAY_WRANGLER_PRESENT if (latch) { if (!(_currentCapability & kIOPMSystemCapabilityGraphics) && !(_pendingCapability & kIOPMSystemCapabilityGraphics) && @@ -6761,22 +7228,22 @@ IOPMrootDomain::latchDisplayWranglerTickle( bool latch ) // Currently in dark wake, and not transitioning to full wake. // Full wake is unsustainable, so latch the tickle to prevent // the display from lighting up momentarily. - wranglerTickleLatched = true; + wranglerTickled = true; } else { - wranglerTickleLatched = false; + wranglerTickled = false; } - } else if (wranglerTickleLatched && checkSystemCanSustainFullWake()) { - wranglerTickleLatched = false; + } else if (wranglerTickled && checkSystemCanSustainFullWake()) { + wranglerTickled = false; pmPowerStateQueue->submitPowerEvent( kPowerEventPolicyStimulus, (void *) kStimulusDarkWakeActivityTickle ); } - return wranglerTickleLatched; -#else + return wranglerTickled; +#else /* ! DISPLAY_WRANGLER_PRESENT */ return false; -#endif +#endif /* ! DISPLAY_WRANGLER_PRESENT */ } //****************************************************************************** @@ -6792,32 +7259,6 @@ IOPMrootDomain::setDisplayPowerOn( uint32_t options ) (void *) NULL, options ); } -// MARK: - -// MARK: Battery - -//****************************************************************************** -// batteryPublished -// -// Notification on battery class IOPowerSource appearance -//****************************************************************************** - -bool -IOPMrootDomain::batteryPublished( - void * target, - void * root_domain, - IOService * resourceService, - IONotifier * notifier __unused ) -{ - // rdar://2936060&4435589 - // All laptops have dimmable LCD displays - // All laptops have batteries - // So if this machine has a battery, publish the fact that the backlight - // supports dimming. - ((IOPMrootDomain *)root_domain)->publishFeature("DisplayDims"); - - return true; -} - // MARK: - // MARK: System PM Policy @@ -6858,8 +7299,8 @@ IOPMrootDomain::checkSystemSleepAllowed( IOOptionBits options, break; #endif - if (lowBatteryCondition || thermalWarningState) { - break; // always sleep on low battery or when in thermal warning state + if (lowBatteryCondition || thermalWarningState || thermalEmergencyState) { + break; // always sleep on low battery or when in thermal warning/emergency state } if (sleepReason == kIOPMSleepReasonDarkWakeThermalEmergency) { @@ -6887,7 +7328,7 @@ IOPMrootDomain::checkSystemSleepAllowed( IOOptionBits options, IOReturn ret; OSBitAndAtomic(~kPCICantSleep, &platformSleepSupport); ret = getPlatform()->callPlatformFunction( - sleepSupportedPEFunction, false, + sleepSupportedPEFunction.get(), false, NULL, NULL, NULL, NULL); pciCantSleepValid = true; pciCantSleepFlag = false; @@ -6927,15 +7368,14 @@ IOPMrootDomain::checkSystemCanSleep( uint32_t sleepReason ) bool IOPMrootDomain::checkSystemCanSustainFullWake( void ) { -#if !NO_KERNEL_HID - if (lowBatteryCondition || thermalWarningState) { + if (lowBatteryCondition || thermalWarningState || thermalEmergencyState) { // Low battery wake, or received a low battery notification // while system is awake. This condition will persist until // the following wake. return false; } - if (clamshellExists && clamshellClosed && !clamshellSleepDisabled) { + if (clamshellExists && clamshellClosed && !clamshellSleepDisableMask) { // Graphics state is unknown and external display might not be probed. // Do not incorporate state that requires graphics to be in max power // such as desktopMode or clamshellDisabled. @@ -6945,7 +7385,6 @@ IOPMrootDomain::checkSystemCanSustainFullWake( void ) return false; } } -#endif return true; } @@ -6969,21 +7408,17 @@ IOPMrootDomain::mustHibernate( void ) // Tables for accumulated days in year by month, latter used for leap years -static const int daysbymonth[] = +static const unsigned int daysbymonth[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }; -static const int lydaysbymonth[] = +static const unsigned int lydaysbymonth[] = { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }; static int __unused -IOPMConvertSecondsToCalendar(long secs, IOPMCalendarStruct * dt) +IOPMConvertSecondsToCalendar(clock_sec_t secs, IOPMCalendarStruct * dt) { - const int * dbm = daysbymonth; - long n, x, y, z; - - if (secs < 0) { - return 0; - } + const unsigned int * dbm = daysbymonth; + clock_sec_t n, x, y, z; // Calculate seconds, minutes and hours @@ -6991,7 +7426,7 @@ IOPMConvertSecondsToCalendar(long secs, IOPMCalendarStruct * dt) dt->second = n % 60; n /= 60; dt->minute = n % 60; - dt->hour = n / 60; + dt->hour = (typeof(dt->hour))(n / 60); // Calculate day of week @@ -7026,7 +7461,7 @@ IOPMConvertSecondsToCalendar(long secs, IOPMCalendarStruct * dt) return 0; } - dt->year = z; + dt->year = (typeof(dt->year))z; // Adjust remaining days value to start at 1 @@ -7034,25 +7469,25 @@ IOPMConvertSecondsToCalendar(long secs, IOPMCalendarStruct * dt) // Calculate month - for (x = 1; n > dbm[x]; x++) { + for (x = 1; (n > dbm[x]) && (x < 12); x++) { continue; } - dt->month = x; + dt->month = (typeof(dt->month))x; // Calculate day of month - dt->day = n - dbm[x - 1]; + dt->day = (typeof(dt->day))(n - dbm[x - 1]); return 1; } -static long +static clock_sec_t IOPMConvertCalendarToSeconds(const IOPMCalendarStruct * dt) { - const int * dbm = daysbymonth; + const unsigned int * dbm = daysbymonth; long y, secs, days; - if (dt->year < 1970) { + if (dt->year < 1970 || dt->month > 12) { return 0; } @@ -7151,11 +7586,11 @@ IOPMrootDomain::aotShouldExit(bool checkTimeSet, bool software) _aotExit = true; _aotMetrics->noTimeSetCount++; reason = "flipbook expired"; - } else if ((kIOPMAOTModeRespectTimers & _aotMode) && _scheduledAlarmUTC) { + } else if ((kIOPMAOTModeRespectTimers & _aotMode) && _calendarWakeAlarmUTC) { clock_sec_t sec; clock_usec_t usec; clock_get_calendar_microtime(&sec, &usec); - if (_scheduledAlarmUTC <= sec) { + if (_calendarWakeAlarmUTC <= sec) { _aotExit = true; _aotMetrics->rtcAlarmsCount++; reason = "user alarm"; @@ -7180,6 +7615,9 @@ IOPMrootDomain::aotShouldExit(bool checkTimeSet, bool software) void IOPMrootDomain::aotExit(bool cps) { + uint32_t savedMessageMask; + + ASSERT_GATED(); _aotTasksSuspended = false; _aotReadyToFullWake = false; if (_aotTimerScheduled) { @@ -7191,18 +7629,24 @@ IOPMrootDomain::aotExit(bool cps) _aotMetrics->totalTime += mach_absolute_time() - _aotLastWakeTime; _aotLastWakeTime = 0; if (_aotMetrics->sleepCount && (_aotMetrics->sleepCount <= kIOPMAOTMetricsKernelWakeCountMax)) { + WAKEEVENT_LOCK(); strlcpy(&_aotMetrics->kernelWakeReason[_aotMetrics->sleepCount - 1][0], gWakeReasonString, sizeof(_aotMetrics->kernelWakeReason[_aotMetrics->sleepCount])); + WAKEEVENT_UNLOCK(); } _aotWakeTimeCalendar.selector = kPMCalendarTypeInvalid; + // Preserve the message mask since a system wake transition + // may have already started and initialized the mask. + savedMessageMask = _systemMessageClientMask; _systemMessageClientMask = kSystemMessageClientLegacyApp; tellClients(kIOMessageSystemWillPowerOn); + _systemMessageClientMask = savedMessageMask | kSystemMessageClientLegacyApp; if (cps) { - changePowerStateToPriv(getRUN_STATE()); + changePowerStateWithTagToPriv(getRUN_STATE(), kCPSReasonAOTExit); } } @@ -7247,7 +7691,7 @@ IOPMrootDomain::aotEvaluate(IOTimerEventSource * timer) void IOPMrootDomain::adjustPowerState( bool sleepASAP ) { - DEBUG_LOG("adjustPowerState ps %s, asap %d, idleSleepEnabled %d\n", + DEBUG_LOG("adjustPowerState %s, asap %d, idleSleepEnabled %d\n", getPowerStateString((uint32_t) getPowerState()), sleepASAP, idleSleepEnabled); ASSERT_GATED(); @@ -7283,27 +7727,26 @@ IOPMrootDomain::adjustPowerState( bool sleepASAP ) } if ((!idleSleepEnabled) || !checkSystemSleepEnabled()) { - changePowerStateToPriv(getRUN_STATE()); + changePowerStateWithTagToPriv(getRUN_STATE(), kCPSReasonAdjustPowerState); } else if (sleepASAP) { - changePowerStateToPriv(SLEEP_STATE); + changePowerStateWithTagToPriv(SLEEP_STATE, kCPSReasonAdjustPowerState); } } void -IOPMrootDomain::handleDisplayPowerOn() +IOPMrootDomain::handleSetDisplayPowerOn(bool powerOn) { - if (!wrangler) { - return; - } - if (displayPowerOnRequested) { + if (powerOn) { if (!checkSystemCanSustainFullWake()) { + DLOG("System cannot sustain full wake\n"); return; } // Force wrangler to max power state. If system is in dark wake // this alone won't raise the wrangler's power state. - - wrangler->changePowerStateForRootDomain(kWranglerPowerStateMax); + if (wrangler) { + wrangler->changePowerStateForRootDomain(kWranglerPowerStateMax); + } // System in dark wake, always requesting full wake should // not have any bad side-effects, even if the request fails. @@ -7317,9 +7760,10 @@ IOPMrootDomain::handleDisplayPowerOn() // Must first transition to state 1 since wrangler doesn't // power off the displays at state 0. At state 0 the root // domain is removed from the wrangler's power client list. - - wrangler->changePowerStateForRootDomain(kWranglerPowerStateMin + 1); - wrangler->changePowerStateForRootDomain(kWranglerPowerStateMin); + if (wrangler) { + wrangler->changePowerStateForRootDomain(kWranglerPowerStateMin + 1); + wrangler->changePowerStateForRootDomain(kWranglerPowerStateMin); + } } } @@ -7352,18 +7796,21 @@ IOPMrootDomain::dispatchPowerEvent( systemBooting = false; // read noidle setting from Device Tree - IORegistryEntry *defaults = IORegistryEntry::fromPath("IODeviceTree:/defaults"); + OSSharedPtr defaults = IORegistryEntry::fromPath("IODeviceTree:/defaults"); if (defaults != NULL) { - OSData *data = OSDynamicCast(OSData, defaults->getProperty("no-idle")); + OSSharedPtr noIdleProp = defaults->copyProperty("no-idle"); + OSData *data = OSDynamicCast(OSData, noIdleProp.get()); if ((data != NULL) && (data->getLength() == 4)) { gNoIdleFlag = *(uint32_t*)data->getBytesNoCopy(); DLOG("Setting gNoIdleFlag to %u from device tree\n", gNoIdleFlag); } - defaults->release(); } - if (lowBatteryCondition) { - privateSleepSystem(kIOPMSleepReasonLowPower); - + if (lowBatteryCondition || thermalEmergencyState) { + if (lowBatteryCondition) { + privateSleepSystem(kIOPMSleepReasonLowPower); + } else { + privateSleepSystem(kIOPMSleepReasonThermalEmergency); + } // The rest is unnecessary since the system is expected // to sleep immediately. The following wake will update // everything. @@ -7411,14 +7858,9 @@ IOPMrootDomain::dispatchPowerEvent( case kPowerEventRegisterSystemCapabilityClient: DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - if (systemCapabilityNotifier) { - systemCapabilityNotifier->release(); - systemCapabilityNotifier = NULL; - } - if (arg0) { - systemCapabilityNotifier = (IONotifier *) arg0; - systemCapabilityNotifier->retain(); - } + + // reset() handles the arg0 == nullptr case for us + systemCapabilityNotifier.reset((IONotifier *) arg0, OSRetain); /* intentional fall-through */ [[clang::fallthrough]]; @@ -7428,20 +7870,19 @@ IOPMrootDomain::dispatchPowerEvent( _joinedCapabilityClients = OSSet::withCapacity(8); } if (arg0) { - IONotifier * notify = (IONotifier *) arg0; + OSSharedPtr notify((IONotifier *) arg0, OSNoRetain); if (_joinedCapabilityClients) { - _joinedCapabilityClients->setObject(notify); + _joinedCapabilityClients->setObject(notify.get()); synchronizePowerTree( kIOPMSyncNoChildNotify ); } - notify->release(); } break; case kPowerEventPolicyStimulus: DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); if (arg0) { - int stimulus = (uintptr_t) arg0; - evaluatePolicy( stimulus, (uint32_t) arg1 ); + int stimulus = (int)(uintptr_t) arg0; + evaluatePolicy(stimulus, (uint32_t) arg1); } break; @@ -7478,15 +7919,46 @@ IOPMrootDomain::dispatchPowerEvent( case kPowerEventSetDisplayPowerOn: DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - if (!wrangler) { - break; - } if (arg1 != 0) { displayPowerOnRequested = true; } else { displayPowerOnRequested = false; } - handleDisplayPowerOn(); + handleSetDisplayPowerOn(displayPowerOnRequested); + break; + + case kPowerEventPublishWakeType: + DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + + // Don't replace wake type property if already set + if ((arg0 == gIOPMWakeTypeUserKey) || + !propertyExists(kIOPMRootDomainWakeTypeKey)) { + const char * wakeType = NULL; + + if (arg0 == gIOPMWakeTypeUserKey) { + requestUserActive(this, "WakeTypeUser"); + wakeType = kIOPMRootDomainWakeTypeUser; + } else if (arg0 == gIOPMSettingDebugWakeRelativeKey) { + requestUserActive(this, "WakeTypeAlarm"); + wakeType = kIOPMRootDomainWakeTypeAlarm; + } else if (arg0 == gIOPMSettingSleepServiceWakeCalendarKey) { + darkWakeSleepService = true; + wakeType = kIOPMRootDomainWakeTypeSleepService; + } else if (arg0 == gIOPMSettingMaintenanceWakeCalendarKey) { + wakeType = kIOPMRootDomainWakeTypeMaintenance; + } + + if (wakeType) { + setProperty(kIOPMRootDomainWakeTypeKey, wakeType); + } + } + break; + + case kPowerEventAOTEvaluate: + DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + if (_aotReadyToFullWake) { + aotEvaluate(NULL); + } break; } } @@ -7508,7 +7980,7 @@ IOPMrootDomain::systemPowerEventOccurred( uint32_t intValue) { IOReturn attempt = kIOReturnSuccess; - OSNumber *newNumber = NULL; + OSSharedPtr newNumber; if (!event) { return kIOReturnBadArgument; @@ -7519,9 +7991,7 @@ IOPMrootDomain::systemPowerEventOccurred( return kIOReturnInternalError; } - attempt = systemPowerEventOccurred(event, (OSObject *)newNumber); - - newNumber->release(); + attempt = systemPowerEventOccurred(event, static_cast(newNumber.get())); return attempt; } @@ -7550,7 +8020,7 @@ IOPMrootDomain::systemPowerEventOccurred( const OSSymbol *event, OSObject *value) { - OSDictionary *thermalsDict = NULL; + OSSharedPtr thermalsDict; bool shouldUpdate = true; if (!event || !value) { @@ -7565,10 +8035,11 @@ IOPMrootDomain::systemPowerEventOccurred( IOLockLock(featuresDictLock); } - thermalsDict = (OSDictionary *)getProperty(kIOPMRootDomainPowerStatusKey); + OSSharedPtr origThermalsProp = copyProperty(kIOPMRootDomainPowerStatusKey); + OSDictionary * origThermalsDict = OSDynamicCast(OSDictionary, origThermalsProp.get()); - if (thermalsDict && OSDynamicCast(OSDictionary, thermalsDict)) { - thermalsDict = OSDictionary::withDictionary(thermalsDict); + if (origThermalsDict) { + thermalsDict = OSDictionary::withDictionary(origThermalsDict); } else { thermalsDict = OSDictionary::withCapacity(1); } @@ -7580,9 +8051,7 @@ IOPMrootDomain::systemPowerEventOccurred( thermalsDict->setObject(event, value); - setProperty(kIOPMRootDomainPowerStatusKey, thermalsDict); - - thermalsDict->release(); + setProperty(kIOPMRootDomainPowerStatusKey, thermalsDict.get()); exit: // UNLOCK @@ -7645,7 +8114,7 @@ IOPMrootDomain::handlePowerNotification( UInt32 msg ) * Local (IOPMrootDomain only) eval clamshell command */ if (msg & kLocalEvalClamshellCommand) { - if (isRTCAlarmWake) { + if ((gClamshellFlags & kClamshell_WAR_47715679) && isRTCAlarmWake) { eval_clamshell_alarm = true; // reset isRTCAlarmWake. This evaluation should happen only once @@ -7661,7 +8130,8 @@ IOPMrootDomain::handlePowerNotification( UInt32 msg ) * Overtemp */ if (msg & kIOPMOverTemp) { - MSG("PowerManagement emergency overtemp signal. Going to sleep!"); + DLOG("Thermal overtemp message received!\n"); + thermalEmergencyState = true; privateSleepSystem(kIOPMSleepReasonThermalEmergency); } @@ -7670,7 +8140,6 @@ IOPMrootDomain::handlePowerNotification( UInt32 msg ) */ if ((msg & kIOPMDWOverTemp) && (_systemTransitionType != kSystemTransitionSleep)) { DLOG("DarkWake thermal limits message received!\n"); - messageClients(kIOPMMessageDarkWakeThermalEmergency); } @@ -7685,8 +8154,23 @@ IOPMrootDomain::handlePowerNotification( UInt32 msg ) * Power Emergency */ if (msg & kIOPMPowerEmergency) { + DLOG("Low battery notification received\n"); +#if defined(XNU_TARGET_OS_OSX) && !DISPLAY_WRANGLER_PRESENT + // Wait for the next low battery notification if the system state is + // in transition. + if ((_systemTransitionType == kSystemTransitionNone) && + CAP_CURRENT(kIOPMSystemCapabilityCPU) && + !systemBooting && !systemShutdown && !gWillShutdown) { + // Setting lowBatteryCondition will prevent system sleep + lowBatteryCondition = true; + + // Notify userspace to initiate system shutdown + messageClients(kIOPMMessageRequestSystemShutdown); + } +#else lowBatteryCondition = true; privateSleepSystem(kIOPMSleepReasonLowPower); +#endif } /* @@ -7725,7 +8209,8 @@ IOPMrootDomain::handlePowerNotification( UInt32 msg ) * Send the clamshell interest notification since the lid is closing. */ if (msg & kIOPMClamshellClosed) { - if (clamshellClosed && clamshellExists) { + if ((clamshellIgnoreClose || (gClamshellFlags & kClamshell_WAR_38378787)) && + clamshellClosed && clamshellExists) { DLOG("Ignoring redundant Clamshell close event\n"); } else { DLOG("Clamshell closed\n"); @@ -7734,6 +8219,13 @@ IOPMrootDomain::handlePowerNotification( UInt32 msg ) clamshellClosed = true; clamshellExists = true; + // Ignore all following clamshell close events until the clamshell + // is opened or the system sleeps. When a clamshell close triggers + // a system wake, the lid driver may send us two clamshell close + // events, one for the clamshell close event itself, and a second + // close event when the driver polls the lid state on wake. + clamshellIgnoreClose = true; + // Tell PMCPU informCPUStateChange(kInformLid, 1); @@ -7751,9 +8243,9 @@ IOPMrootDomain::handlePowerNotification( UInt32 msg ) * -> reevaluate lid state */ if (msg & kIOPMSetDesktopMode) { - DLOG("Desktop mode\n"); desktopMode = (0 != (msg & kIOPMSetValue)); msg &= ~(kIOPMSetDesktopMode | kIOPMSetValue); + DLOG("Desktop mode %d\n", desktopMode); sendClientClamshellNotification(); @@ -7804,11 +8296,28 @@ IOPMrootDomain::handlePowerNotification( UInt32 msg ) */ if (msg & kIOPMEnableClamshell) { DLOG("Clamshell enabled\n"); + // Re-evaluate the lid state // System should sleep on external display disappearance // in lid closed operation. if (true == clamshellDisabled) { eval_clamshell = true; + +#if DARK_TO_FULL_EVALUATE_CLAMSHELL_DELAY + // Also clear kClamshellSleepDisableInternal when graphics enables + // the clamshell during a full wake. When graphics is behaving as + // expected, this will allow clamshell close to be honored earlier + // rather than waiting for the delayed evaluation. + if ((clamshellSleepDisableMask & kClamshellSleepDisableInternal) && + (CAP_PENDING(kIOPMSystemCapabilityGraphics) || + CAP_CURRENT(kIOPMSystemCapabilityGraphics))) { + setClamShellSleepDisable(false, kClamshellSleepDisableInternal); + + // Cancel the TC to avoid an extra kLocalEvalClamshellCommand + // when timer expires which is harmless but useless. + thread_call_cancel(fullWakeThreadCall); + } +#endif } clamshellDisabled = false; @@ -7827,7 +8336,7 @@ IOPMrootDomain::handlePowerNotification( UInt32 msg ) } /* - * Evaluate clamshell and SLEEP if appropiate + * Evaluate clamshell and SLEEP if appropriate */ if (eval_clamshell_alarm && clamshellClosed) { if (shouldSleepOnRTCAlarmWake()) { @@ -7844,13 +8353,13 @@ IOPMrootDomain::handlePowerNotification( UInt32 msg ) if (msg & kIOPMProModeEngaged) { int newState = 1; DLOG("ProModeEngaged\n"); - messageClient(kIOPMMessageProModeStateChange, systemCapabilityNotifier, &newState, sizeof(newState)); + messageClient(kIOPMMessageProModeStateChange, systemCapabilityNotifier.get(), &newState, sizeof(newState)); } if (msg & kIOPMProModeDisengaged) { int newState = 0; DLOG("ProModeDisengaged\n"); - messageClient(kIOPMMessageProModeStateChange, systemCapabilityNotifier, &newState, sizeof(newState)); + messageClient(kIOPMMessageProModeStateChange, systemCapabilityNotifier.get(), &newState, sizeof(newState)); } } @@ -7872,6 +8381,7 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) int evaluateDarkWake : 1; int adjustPowerState : 1; int userBecameInactive : 1; + int displaySleepEntry : 1; } bit; uint32_t u32; } flags; @@ -7883,15 +8393,21 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) switch (stimulus) { case kStimulusDisplayWranglerSleep: DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - if (!wranglerAsleep) { - // first transition to wrangler sleep or lower + if (!wranglerPowerOff) { + // wrangler is in sleep state or lower flags.bit.displaySleep = true; } + if (!wranglerAsleep) { + // transition from wrangler wake to wrangler sleep + flags.bit.displaySleepEntry = true; + wranglerAsleep = true; + } break; case kStimulusDisplayWranglerWake: DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); displayIdleForDemandSleep = false; + wranglerPowerOff = false; wranglerAsleep = false; break; @@ -7914,7 +8430,7 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) } kdebugTrace(kPMLogUserActiveState, 0, 1, 0); - setProperty(gIOPMUserIsActiveKey, kOSBooleanTrue); + setProperty(gIOPMUserIsActiveKey.get(), kOSBooleanTrue); messageClients(kIOPMMessageUserIsActiveChanged); } flags.bit.idleSleepDisabled = true; @@ -7929,7 +8445,7 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) flags.bit.userBecameInactive = true; kdebugTrace(kPMLogUserActiveState, 0, 0, 0); - setProperty(gIOPMUserIsActiveKey, kOSBooleanFalse); + setProperty(gIOPMUserIsActiveKey.get(), kOSBooleanFalse); messageClients(kIOPMMessageUserIsActiveChanged); } break; @@ -7937,22 +8453,25 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) case kStimulusAggressivenessChanged: { DMSG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - unsigned long minutesToIdleSleep = 0; - unsigned long minutesToDisplayDim = 0; - unsigned long minutesDelta = 0; + unsigned long aggressiveValue; + uint32_t minutesToIdleSleep = 0; + uint32_t minutesToDisplayDim = 0; + uint32_t minutesDelta = 0; // Fetch latest display and system sleep slider values. - getAggressiveness(kPMMinutesToSleep, &minutesToIdleSleep); - getAggressiveness(kPMMinutesToDim, &minutesToDisplayDim); + aggressiveValue = 0; + getAggressiveness(kPMMinutesToSleep, &aggressiveValue); + minutesToIdleSleep = (uint32_t) aggressiveValue; + + aggressiveValue = 0; + getAggressiveness(kPMMinutesToDim, &aggressiveValue); + minutesToDisplayDim = (uint32_t) aggressiveValue; DLOG("aggressiveness changed: system %u->%u, display %u\n", - (uint32_t) sleepSlider, - (uint32_t) minutesToIdleSleep, - (uint32_t) minutesToDisplayDim); + sleepSlider, minutesToIdleSleep, minutesToDisplayDim); - DLOG("idle time -> %ld secs (ena %d)\n", + DLOG("idle time -> %d secs (ena %d)\n", idleSeconds, (minutesToIdleSleep != 0)); - // How long to wait before sleeping the system once // the displays turns off is indicated by 'extraSleepDelay'. @@ -7970,9 +8489,11 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) flags.bit.idleSleepDisabled = true; idleSleepEnabled = false; } +#if !defined(XNU_TARGET_OS_OSX) if (0x7fffffff == minutesToIdleSleep) { minutesToIdleSleep = idleSeconds; } +#endif /* !defined(XNU_TARGET_OS_OSX) */ if (((minutesDelta != extraSleepDelay) || (userActivityTime != userActivityTime_prev)) && @@ -7998,7 +8519,7 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) // Request wrangler idle only when demand sleep is triggered // from full wake. if (CAP_CURRENT(kIOPMSystemCapabilityGraphics)) { - wrangler->setProperties(wranglerIdleSettings); + wrangler->setProperties(wranglerIdleSettings.get()); DLOG("Requested wrangler idle\n"); } } @@ -8021,14 +8542,14 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) setProperty(kIOPMRootDomainWakeTypeKey, kIOPMRootDomainWakeTypeHIDActivity); } - if (false == wranglerTickled) { + if (!darkWakeExit) { if (latchDisplayWranglerTickle(true)) { DLOG("latched tickle\n"); break; } - wranglerTickled = true; - DLOG("Requesting full wake after dark wake activity tickle\n"); + darkWakeExit = true; + DLOG("Requesting full wake due to dark wake activity tickle\n"); requestFullWake( kFullWakeReasonLocalUser ); } break; @@ -8043,9 +8564,9 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) DLOG("dark wake entry\n"); systemDarkWake = true; - // Keep wranglerAsleep an invariant when wrangler is absent + // Keep wranglerPowerOff an invariant when wrangler is absent if (wrangler) { - wranglerAsleep = true; + wranglerPowerOff = true; } if (kStimulusDarkWakeEntry == stimulus) { @@ -8079,6 +8600,8 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) } /* switch(stimulus) */ if (flags.bit.evaluateDarkWake && (kFullWakeReasonNone == fullWakeReason)) { + DLOG("DarkWake: sleepASAP %d, clamshell closed %d, disabled %d/%x, desktopMode %d, ac %d\n", + darkWakeToSleepASAP, clamshellClosed, clamshellDisabled, clamshellSleepDisableMask, desktopMode, acAdaptorConnected); if (darkWakeToSleepASAP || (clamshellClosed && !(desktopMode && acAdaptorConnected))) { uint32_t newSleepReason; @@ -8089,6 +8612,8 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) if (lowBatteryCondition) { newSleepReason = kIOPMSleepReasonLowPower; + } else if (thermalEmergencyState) { + newSleepReason = kIOPMSleepReasonThermalEmergency; } else { newSleepReason = fullToDarkReason; } @@ -8110,7 +8635,7 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) // Release power clamp, and wait for children idle. adjustPowerState(true); } else { - changePowerStateToPriv(getRUN_STATE()); + changePowerStateWithTagToPriv(getRUN_STATE(), kCPSReasonDarkWakeCannotSleep); } } } @@ -8120,11 +8645,11 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) flags.u32 = 0; } - if ((flags.bit.displaySleep) && + if ((flags.bit.displaySleepEntry) && (kFullWakeReasonDisplayOn == fullWakeReason)) { - // kIOPMSleepReasonMaintenance? + // kIOPMSleepReasonNotificationWakeExit DLOG("Display sleep while in notification wake\n"); - changePowerStateWithOverrideTo( SLEEP_STATE, kIOPMSleepReasonMaintenance ); + changePowerStateWithOverrideTo(SLEEP_STATE, kIOPMSleepReasonNotificationWakeExit); } if (flags.bit.userBecameInactive || flags.bit.sleepDelayChanged) { @@ -8154,8 +8679,12 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) if (flags.bit.idleSleepEnabled) { DLOG("idle sleep timer enabled\n"); if (!wrangler) { - changePowerStateToPriv(getRUN_STATE()); +#if defined(XNU_TARGET_OS_OSX) && !DISPLAY_WRANGLER_PRESENT + startIdleSleepTimer(getTimeToIdleSleep()); +#else + changePowerStateWithTagToPriv(getRUN_STATE(), kCPSReasonIdleSleepEnabled); startIdleSleepTimer( idleSeconds ); +#endif } else { // Start idle timer if prefs now allow system sleep // and user is already inactive. Disk spindown is @@ -8179,10 +8708,16 @@ IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) if (!systemBooting && (0 == idleSleepPreventersCount())) { if (!wrangler) { - changePowerStateToPriv(getRUN_STATE()); + changePowerStateWithTagToPriv(getRUN_STATE(), kCPSReasonEvaluatePolicy); if (idleSleepEnabled) { +#if defined(XNU_TARGET_OS_OSX) && !DISPLAY_WRANGLER_PRESENT + if (!extraSleepDelay && !idleSleepTimerPending) { + sleepASAP = true; + } +#else // stay awake for at least idleSeconds startIdleSleepTimer(idleSeconds); +#endif } } else if (!extraSleepDelay && !idleSleepTimerPending && !systemDarkWake) { sleepASAP = true; @@ -8241,21 +8776,20 @@ IOPMrootDomain::requestFullWake( FullWakeReason reason ) if ((kSystemTransitionWake == _systemTransitionType) && !(_pendingCapability & kIOPMSystemCapabilityGraphics) && - !graphicsSuppressed) { + !darkWakePowerClamped) { // Promote to full wake while waking up to dark wake due to tickle. // PM will hold off notifying the graphics subsystem about system wake // as late as possible, so if a HID tickle does arrive, graphics can - // power up on this same wake cycle. The latency to power up graphics - // on the next cycle can be huge on some systems. However, once any - // graphics suppression has taken effect, it is too late. All other - // graphics devices must be similarly suppressed. But the delay till - // the following cycle should be short. - + // power up from this same wake transition. Otherwise, the latency to + // power up graphics on the following transition can be huge on certain + // systems. However, once any power clamping has taken effect, it is + // too late to promote the current dark wake transition to a full wake. _pendingCapability |= (kIOPMSystemCapabilityGraphics | kIOPMSystemCapabilityAudio); - // Immediately bring up audio and graphics - pciRoot = pciHostBridgeDriver; + // Tell the PCI parent of audio and graphics drivers to stop + // delaying the child notifications. Same for root domain. + pciRoot = pciHostBridgeDriver.get(); willEnterFullWake(); promotion = true; } @@ -8263,7 +8797,7 @@ IOPMrootDomain::requestFullWake( FullWakeReason reason ) // Unsafe to cancel once graphics was powered. // If system woke from dark wake, the return to sleep can // be cancelled. "awake -> dark -> sleep" transition - // can be canceled also, during the "dark --> sleep" phase + // can be cancelled also, during the "dark -> sleep" phase // *prior* to driver power down. if (!CAP_HIGHEST(kIOPMSystemCapabilityGraphics) || _pendingCapability == 0) { @@ -8271,6 +8805,7 @@ IOPMrootDomain::requestFullWake( FullWakeReason reason ) } synchronizePowerTree(options, pciRoot); + if (kFullWakeReasonLocalUser == fullWakeReason) { // IOGraphics doesn't light the display even though graphics is // enabled in kIOMessageSystemCapabilityChange message(radar 9502104) @@ -8314,15 +8849,17 @@ IOPMrootDomain::willEnterFullWake( void ) resetTimers = false; sleepTimerMaintenance = false; + assert(!CAP_CURRENT(kIOPMSystemCapabilityGraphics)); + _systemMessageClientMask = kSystemMessageClientPowerd | kSystemMessageClientLegacyApp; if ((_highestCapability & kIOPMSystemCapabilityGraphics) == 0) { - // Initial graphics full power + // First time to attain full wake capability since the last wake _systemMessageClientMask |= kSystemMessageClientKernel; // Set kIOPMUserTriggeredFullWakeKey before full wake for IOGraphics - setProperty(gIOPMUserTriggeredFullWakeKey, + setProperty(gIOPMUserTriggeredFullWakeKey.get(), (kFullWakeReasonLocalUser == fullWakeReason) ? kOSBooleanTrue : kOSBooleanFalse); } @@ -8344,11 +8881,28 @@ IOPMrootDomain::willEnterFullWake( void ) void IOPMrootDomain::fullWakeDelayedWork( void ) { -#if DARK_TO_FULL_EVALUATE_CLAMSHELL - // Not gated, don't modify state - if ((kSystemTransitionNone == _systemTransitionType) && - CAP_CURRENT(kIOPMSystemCapabilityGraphics)) { - receivePowerNotification( kLocalEvalClamshellCommand ); +#if DARK_TO_FULL_EVALUATE_CLAMSHELL_DELAY + if (!gIOPMWorkLoop->inGate()) { + gIOPMWorkLoop->runAction( + OSMemberFunctionCast(IOWorkLoop::Action, this, + &IOPMrootDomain::fullWakeDelayedWork), this); + return; + } + + DLOG("fullWakeDelayedWork cap cur %x pend %x high %x, clamshell disable %x/%x\n", + _currentCapability, _pendingCapability, _highestCapability, + clamshellDisabled, clamshellSleepDisableMask); + + if (clamshellExists && + CAP_CURRENT(kIOPMSystemCapabilityGraphics) && + !CAP_CHANGE(kIOPMSystemCapabilityGraphics)) { + if (clamshellSleepDisableMask & kClamshellSleepDisableInternal) { + setClamShellSleepDisable(false, kClamshellSleepDisableInternal); + } else { + // Not the initial full wake after waking from sleep. + // Evaluate the clamshell for rdar://problem/9157444. + receivePowerNotification(kLocalEvalClamshellCommand); + } } #endif } @@ -8432,7 +8986,7 @@ IOPMrootDomain::pmStatsRecordEvent( bool stopping = eventIndex & kIOPMStatsEventStopFlag ? true:false; uint64_t delta; uint64_t nsec; - OSData *publishPMStats = NULL; + OSSharedPtr publishPMStats; eventIndex &= ~(kIOPMStatsEventStartFlag | kIOPMStatsEventStopFlag); @@ -8463,8 +9017,7 @@ IOPMrootDomain::pmStatsRecordEvent( IOLog("PMStats: Hibernate read took %qd ms\n", delta / NSEC_PER_MSEC); publishPMStats = OSData::withBytes(&gPMStats, sizeof(gPMStats)); - setProperty(kIOPMSleepStatisticsKey, publishPMStats); - publishPMStats->release(); + setProperty(kIOPMSleepStatisticsKey, publishPMStats.get()); bzero(&gPMStats, sizeof(gPMStats)); } break; @@ -8483,32 +9036,34 @@ IOPMrootDomain::pmStatsRecordApplicationResponse( uint32_t delay_ms, uint64_t id, OSObject *object, - IOPMPowerStateIndex powerState) -{ - OSDictionary *responseDescription = NULL; - OSNumber *delayNum = NULL; - OSNumber *powerCaps = NULL; - OSNumber *pidNum = NULL; - OSNumber *msgNum = NULL; - const OSSymbol *appname; - const OSSymbol *sleep = NULL, *wake = NULL; + IOPMPowerStateIndex powerState, + bool async) +{ + OSSharedPtr responseDescription; + OSSharedPtr delayNum; + OSSharedPtr powerCaps; + OSSharedPtr pidNum; + OSSharedPtr msgNum; + OSSharedPtr appname; + OSSharedPtr sleep; + OSSharedPtr wake; IOPMServiceInterestNotifier *notify = NULL; if (object && (notify = OSDynamicCast(IOPMServiceInterestNotifier, object))) { - if (response->isEqualTo(gIOPMStatsResponseTimedOut)) { + if (response->isEqualTo(gIOPMStatsResponseTimedOut.get())) { notify->ackTimeoutCnt++; } else { notify->ackTimeoutCnt = 0; } } - if (response->isEqualTo(gIOPMStatsResponsePrompt) || + if (response->isEqualTo(gIOPMStatsResponsePrompt.get()) || (_systemTransitionType == kSystemTransitionNone) || (_systemTransitionType == kSystemTransitionNewCapClient)) { return; } - if (response->isEqualTo(gIOPMStatsDriverPSChangeSlow)) { + if (response->isEqualTo(gIOPMStatsDriverPSChangeSlow.get())) { kdebugTrace(kPMLogDrvPSChangeDelay, id, messageType, delay_ms); } else if (notify) { // User space app or kernel capability client @@ -8523,13 +9078,12 @@ IOPMrootDomain::pmStatsRecordApplicationResponse( responseDescription = OSDictionary::withCapacity(5); if (responseDescription) { if (response) { - responseDescription->setObject(_statsResponseTypeKey, response); + responseDescription->setObject(_statsResponseTypeKey.get(), response); } msgNum = OSNumber::withNumber(messageType, 32); if (msgNum) { - responseDescription->setObject(_statsMessageTypeKey, msgNum); - msgNum->release(); + responseDescription->setObject(_statsMessageTypeKey.get(), msgNum.get()); } if (!name && notify && notify->identifier) { @@ -8539,8 +9093,7 @@ IOPMrootDomain::pmStatsRecordApplicationResponse( if (name && (strlen(name) > 0)) { appname = OSSymbol::withCString(name); if (appname) { - responseDescription->setObject(_statsNameKey, appname); - appname->release(); + responseDescription->setObject(_statsNameKey.get(), appname.get()); } } @@ -8550,62 +9103,57 @@ IOPMrootDomain::pmStatsRecordApplicationResponse( if (id != 0) { pidNum = OSNumber::withNumber(id, 64); if (pidNum) { - responseDescription->setObject(_statsPIDKey, pidNum); - pidNum->release(); + responseDescription->setObject(_statsPIDKey.get(), pidNum.get()); } } delayNum = OSNumber::withNumber(delay_ms, 32); if (delayNum) { - responseDescription->setObject(_statsTimeMSKey, delayNum); - delayNum->release(); + responseDescription->setObject(_statsTimeMSKey.get(), delayNum.get()); } - if (response->isEqualTo(gIOPMStatsDriverPSChangeSlow)) { + if (response->isEqualTo(gIOPMStatsDriverPSChangeSlow.get())) { powerCaps = OSNumber::withNumber(powerState, 32); #if !defined(__i386__) && !defined(__x86_64__) && (DEVELOPMENT || DEBUG) - IOLog("%s::powerStateChange type(%d) to(%lu) async took %d ms\n", - name, messageType, - powerState, delay_ms); + static const char * driverCallTypes[] = { + [kDriverCallInformPreChange] = "powerStateWillChangeTo", + [kDriverCallInformPostChange] = "powerStateDidChangeTo", + [kDriverCallSetPowerState] = "setPowerState" + }; + + if (messageType < (sizeof(driverCallTypes) / sizeof(driverCallTypes[0]))) { + DLOG("%s[0x%qx]::%s(%u) %stook %d ms\n", + name, id, driverCallTypes[messageType], (uint32_t) powerState, + async ? "async " : "", delay_ms); + } #endif } else { powerCaps = OSNumber::withNumber(_pendingCapability, 32); } if (powerCaps) { - responseDescription->setObject(_statsPowerCapsKey, powerCaps); - powerCaps->release(); + responseDescription->setObject(_statsPowerCapsKey.get(), powerCaps.get()); } sleep = OSSymbol::withCString("Sleep"); wake = OSSymbol::withCString("Wake"); if (_systemTransitionType == kSystemTransitionSleep) { - responseDescription->setObject(kIOPMStatsSystemTransitionKey, sleep); + responseDescription->setObject(kIOPMStatsSystemTransitionKey, sleep.get()); } else if (_systemTransitionType == kSystemTransitionWake) { - responseDescription->setObject(kIOPMStatsSystemTransitionKey, wake); + responseDescription->setObject(kIOPMStatsSystemTransitionKey, wake.get()); } else if (_systemTransitionType == kSystemTransitionCapability) { if (CAP_LOSS(kIOPMSystemCapabilityGraphics)) { - responseDescription->setObject(kIOPMStatsSystemTransitionKey, sleep); + responseDescription->setObject(kIOPMStatsSystemTransitionKey, sleep.get()); } else if (CAP_GAIN(kIOPMSystemCapabilityGraphics)) { - responseDescription->setObject(kIOPMStatsSystemTransitionKey, wake); + responseDescription->setObject(kIOPMStatsSystemTransitionKey, wake.get()); } } - if (sleep) { - sleep->release(); - } - if (wake) { - wake->release(); - } - - IOLockLock(pmStatsLock); if (pmStatsAppResponses && pmStatsAppResponses->getCount() < 50) { - pmStatsAppResponses->setObject(responseDescription); + pmStatsAppResponses->setObject(responseDescription.get()); } IOLockUnlock(pmStatsLock); - - responseDescription->release(); } return; @@ -8629,7 +9177,6 @@ IOPMrootDomain::callPlatformFunction( void * param1, void * param2, void * param3, void * param4 ) { - uint32_t bootFailureCode = 0xffffffff; if (pmTracer && functionName && functionName->isEqualTo(kIOPMRegisterNVRAMTracePointHandlerKey) && !pmTracer->tracePointHandler && !pmTracer->tracePointTarget) { @@ -8641,16 +9188,18 @@ IOPMrootDomain::callPlatformFunction( tracePointPCI = (uint32_t)(uintptr_t) param3; tracePointPhases = (uint32_t)(uintptr_t) param4; if ((tracePointPhases & 0xff) == kIOPMTracePointSystemSleep) { - IORegistryEntry *node = IORegistryEntry::fromPath( "/chosen", gIODTPlane ); + OSSharedPtr node = IORegistryEntry::fromPath( "/chosen", gIODTPlane ); if (node) { - OSData *data = OSDynamicCast( OSData, node->getProperty(kIOEFIBootRomFailureKey)); + OSSharedPtr bootRomFailureProp; + bootRomFailureProp = node->copyProperty(kIOEFIBootRomFailureKey); + OSData *data = OSDynamicCast(OSData, bootRomFailureProp.get()); + uint32_t bootFailureCode; if (data && data->getLength() == sizeof(bootFailureCode)) { + // Failure code from EFI/BootRom is a four byte structure memcpy(&bootFailureCode, data->getBytesNoCopy(), sizeof(bootFailureCode)); + tracePointPCI = OSSwapBigToHostInt32(bootFailureCode); } - node->release(); } - // Failure code from EFI/BootRom is a four byte structure - tracePointPCI = OSSwapBigToHostInt32(bootFailureCode); } statusCode = (((uint64_t)tracePointPCI) << 32) | tracePointPhases; if ((tracePointPhases & 0xff) != kIOPMTracePointSystemUp) { @@ -8691,10 +9240,9 @@ IOPMrootDomain::kdebugTrace(uint32_t event, uint64_t id, if (regId == 0) { regId = getRegistryEntryID(); } - IOTimeStampConstant(code, (uintptr_t) regId, param1, param2, param3); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, code, (uintptr_t) regId, param1, param2, param3, 0); } - void IOPMrootDomain::tracePoint( uint8_t point ) { @@ -8703,68 +9251,201 @@ IOPMrootDomain::tracePoint( uint8_t point ) } if (kIOPMTracePointWakeCapabilityClients == point) { - acceptSystemWakeEvents(false); + acceptSystemWakeEvents(kAcceptSystemWakeEvents_Disable); } kdebugTrace(kPMLogSleepWakeTracePoint, 0, point, 0); pmTracer->tracePoint(point); } +static void +kext_log_putc(char c) +{ + if (gKextNameEnd || gKextNamePos >= (sizeof(gKextNameBuf) - 1)) { + return; + } + if (c == '(' || c == '[' || c == ' ') { + c = 0; + gKextNameEnd = true; + } + + gKextNameBuf[gKextNamePos++] = c; +} + +static int +kext_log(const char *fmt, ...) +{ + va_list listp; + + va_start(listp, fmt); + _doprnt(fmt, &listp, &kext_log_putc, 16); + va_end(listp); + + return 0; +} + +static OSPtr +copyKextIdentifierWithAddress(vm_address_t address) +{ + OSSharedPtr identifer; + + IOLockLock(gHaltLogLock); + + gKextNameEnd = false; + gKextNamePos = 0; + gKextNameBuf[0] = 0; + + OSKext::printKextsInBacktrace(&address, 1, kext_log, OSKext::kPrintKextsLock | OSKext::kPrintKextsTerse); + gKextNameBuf[sizeof(gKextNameBuf) - 1] = 0; + identifer = OSSymbol::withCString((gKextNameBuf[0] != 0) ? gKextNameBuf : kOSKextKernelIdentifier); + + IOLockUnlock(gHaltLogLock); + + return identifer; +} + +// Caller serialized using PM workloop +const char * +IOPMrootDomain::getNotificationClientName(OSObject *object) +{ + IOPMServiceInterestNotifier *notifier = (typeof(notifier))object; + const char *clientName = "UNKNOWN"; + + if (!notifier->clientName) { + // Check for user client + if (systemCapabilityNotifier && (((IOPMServiceInterestNotifier *) systemCapabilityNotifier.get())->handler == notifier->handler)) { + OSNumber *clientID = NULL; + messageClient(kIOMessageCopyClientID, object, &clientID); + if (clientID) { + OSSharedPtr string(IOCopyLogNameForPID(clientID->unsigned32BitValue()), OSNoRetain); + if (string) { + notifier->clientName = OSSymbol::withString(string.get()); + } + clientID->release(); + } + } else if (notifier->identifier) { + notifier->clientName.reset(notifier->identifier.get(), OSRetain); + } + } + + if (notifier->clientName) { + clientName = notifier->clientName->getCStringNoCopy(); + } + + return clientName; +} + void -IOPMrootDomain::traceDetail(OSObject *object, bool start) +IOPMrootDomain::traceNotification(OSObject *object, bool start, uint64_t timestamp, uint32_t msgIndex) { IOPMServiceInterestNotifier *notifier; if (systemBooting) { return; } - notifier = OSDynamicCast(IOPMServiceInterestNotifier, object); if (!notifier) { return; } if (start) { - pmTracer->traceDetail( notifier->uuid0 >> 32 ); - kdebugTrace(kPMLogSleepWakeMessage, pmTracer->getTracePhase(), notifier->msgType, notifier->uuid0, notifier->uuid1); - if (notifier->identifier) { - DLOG("trace point 0x%02x msg 0x%x to %s\n", pmTracer->getTracePhase(), notifier->msgType, - notifier->identifier->getCStringNoCopy()); + pmTracer->traceDetail(notifier->uuid0 >> 32); + kdebugTrace(kPMLogSleepWakeMessage, pmTracer->getTracePhase(), + (uintptr_t) notifier->msgType, (uintptr_t) notifier->uuid0, (uintptr_t) notifier->uuid1); + + // Update notifier state used for response/ack logging + notifier->msgIndex = msgIndex; + notifier->msgAbsTime = timestamp; + + if (msgIndex != UINT_MAX) { + DLOG("%s[%u] to %s\n", getIOMessageString(notifier->msgType), msgIndex, getNotificationClientName(notifier)); } else { - DLOG("trace point 0x%02x msg 0x%x\n", pmTracer->getTracePhase(), notifier->msgType); + DLOG("%s to %s\n", getIOMessageString(notifier->msgType), getNotificationClientName(notifier)); } + + assert(notifierObject == NULL); notifierThread = current_thread(); - notifierObject = notifier; - notifier->retain(); + notifierObject.reset(notifier, OSRetain); } else { + uint64_t nsec; + uint32_t delayMS; + + SUB_ABSOLUTETIME(×tamp, ¬ifier->msgAbsTime); + absolutetime_to_nanoseconds(timestamp, &nsec); + delayMS = (uint32_t)(nsec / 1000000ULL); + if (delayMS > notifier->maxMsgDelayMS) { + notifier->maxMsgDelayMS = delayMS; + } + + assert(notifierObject == notifier); + notifierObject.reset(); notifierThread = NULL; - notifierObject = NULL; - notifier->release(); } } +void +IOPMrootDomain::traceNotificationAck(OSObject *object, uint32_t delay_ms) +{ + if (systemBooting) { + return; + } + IOPMServiceInterestNotifier *notifier = OSDynamicCast(IOPMServiceInterestNotifier, object); + if (!notifier) { + return; + } + + kdebugTrace(kPMLogDrvResponseDelay, notifier->uuid0, + (uintptr_t) notifier->uuid1, (uintptr_t) 0, (uintptr_t) delay_ms); + + DLOG("%s[%u] ack from %s took %d ms\n", + getIOMessageString(notifier->msgType), notifier->msgIndex, getNotificationClientName(notifier), delay_ms); + if (delay_ms > notifier->maxAckDelayMS) { + notifier->maxAckDelayMS = delay_ms; + } +} void -IOPMrootDomain::traceAckDelay(OSObject *object, uint32_t response, uint32_t delay_ms) +IOPMrootDomain::traceNotificationResponse(OSObject *object, uint32_t delay_ms, uint32_t ack_time_us) { + if (systemBooting) { + return; + } IOPMServiceInterestNotifier *notifier = OSDynamicCast(IOPMServiceInterestNotifier, object); if (!notifier) { - DLOG("Unknown notifier\n"); return; } - if (!systemBooting) { - kdebugTrace(kPMLogDrvResponseDelay, notifier->uuid0, notifier->uuid1, response, delay_ms); - if (notifier->identifier) { - DLOG("Response from %s took %d ms(response:%d)\n", - notifier->identifier->getCStringNoCopy(), delay_ms, response); - } else { - DLOG("Response from kext UUID %llx-%llx took %d ms(response:%d)\n", - notifier->uuid0, notifier->uuid1, delay_ms, response); - } + kdebugTrace(kPMLogDrvResponseDelay, notifier->uuid0, + (uintptr_t) notifier->uuid1, (uintptr_t)(ack_time_us / 1000), (uintptr_t) delay_ms); + + if (ack_time_us == 0) { + // Client work is done and ack will not be forthcoming + DLOG("%s[%u] response from %s took %d ms\n", + getIOMessageString(notifier->msgType), notifier->msgIndex, getNotificationClientName(notifier), delay_ms); + } else { + // Client needs more time and it must ack within ack_time_us + DLOG("%s[%u] response from %s took %d ms (ack in %d us)\n", + getIOMessageString(notifier->msgType), notifier->msgIndex, getNotificationClientName(notifier), delay_ms, ack_time_us); } } +void +IOPMrootDomain::traceFilteredNotification(OSObject *object) +{ + if ((kIOLogDebugPower & gIOKitDebug) == 0) { + return; + } + if (systemBooting) { + return; + } + IOPMServiceInterestNotifier *notifier = OSDynamicCast(IOPMServiceInterestNotifier, object); + if (!notifier) { + return; + } + + DLOG("%s to %s dropped\n", getIOMessageString(notifier->msgType), getNotificationClientName(notifier)); +} + void IOPMrootDomain::traceDetail(uint32_t msgType, uint32_t msgIndex, uint32_t delay) { @@ -8776,7 +9457,6 @@ IOPMrootDomain::traceDetail(uint32_t msgType, uint32_t msgIndex, uint32_t delay) } } - void IOPMrootDomain::configureReportGated(uint64_t channel_id, uint64_t action, void *result) { @@ -8799,6 +9479,9 @@ IOPMrootDomain::configureReportGated(uint64_t channel_id, uint64_t action, void bktCnt = kSleepDelaysBcktCnt; bktSize = kSleepDelaysBcktSize; clientCnt = &sleepDelaysClientCnt; + } else { + assert(false); + return; } switch (action) { @@ -8815,7 +9498,7 @@ IOPMrootDomain::configureReportGated(uint64_t channel_id, uint64_t action, void break; } bzero(*report, reportSize); - HISTREPORT_INIT(bktCnt, bktSize, *report, reportSize, + HISTREPORT_INIT((uint16_t)bktCnt, bktSize, *report, reportSize, getRegistryEntryID(), channel_id, kIOReportCategoryPower); if (channel_id == kAssertDelayChID) { @@ -8892,6 +9575,9 @@ IOPMrootDomain::updateReportGated(uint64_t ch_id, void *result, IOBufferMemoryDe report = &assertOnWakeReport; } else if (ch_id == kSleepDelaysChID) { report = &sleepDelaysReport; + } else { + assert(false); + return kIOReturnBadArgument; } if (*report == NULL) { @@ -8972,16 +9658,15 @@ OSDefineMetaClassAndStructors(PMTraceWorker, OSObject) #define kPMBestGuessPCIDevicesCount 25 #define kPMMaxRTCBitfieldSize 32 -PMTraceWorker * PMTraceWorker::tracer(IOPMrootDomain * owner) +OSPtr +PMTraceWorker::tracer(IOPMrootDomain * owner) { - PMTraceWorker *me; - - me = OSTypeAlloc( PMTraceWorker ); + OSSharedPtr me = OSMakeShared(); if (!me || !me->init()) { return NULL; } - DLOG("PMTraceWorker %p\n", OBFUSCATE(me)); + DLOG("PMTraceWorker %p\n", OBFUSCATE(me.get())); // Note that we cannot instantiate the PCI device -> bit mappings here, since // the IODeviceTree has not yet been created by IOPlatformExpert. We create @@ -9024,7 +9709,7 @@ PMTraceWorker::RTC_TRACE(void) int PMTraceWorker::recordTopLevelPCIDevice(IOService * pciDevice) { - const OSSymbol * deviceName; + OSSharedPtr deviceName; int index = -1; IOLockLock(pmTraceWorkerLock); @@ -9042,15 +9727,13 @@ PMTraceWorker::recordTopLevelPCIDevice(IOService * pciDevice) } if ((deviceName = pciDevice->copyName()) && - (pciDeviceBitMappings->getNextIndexOfObject(deviceName, 0) == (unsigned int)-1) && - pciDeviceBitMappings->setObject(deviceName)) { + (pciDeviceBitMappings->getNextIndexOfObject(deviceName.get(), 0) == (unsigned int)-1) && + pciDeviceBitMappings->setObject(deviceName.get())) { index = pciDeviceBitMappings->getCount() - 1; _LOG("PMTrace PCI array: set object %s => %d\n", deviceName->getCStringNoCopy(), index); } - if (deviceName) { - deviceName->release(); - } + if (!addedToRegistry && (index >= 0)) { addedToRegistry = owner->setProperty("PCITopLevel", this); } @@ -9271,24 +9954,22 @@ PMHaltWorker::main( void * arg, wait_result_t waitResult ) void PMHaltWorker::work( PMHaltWorker * me ) { - IOService * service; + OSSharedPtr service; OSSet * inner; AbsoluteTime startTime, elapsedTime; UInt32 deltaTime; bool timeout; while (true) { - service = NULL; timeout = false; // Claim an unit of work from the shared pool IOLockLock( gPMHaltLock ); inner = (OSSet *)gPMHaltArray->getObject(me->depth); if (inner) { - service = OSDynamicCast(IOService, inner->getAnyObject()); + service.reset(OSDynamicCast(IOService, inner->getAnyObject()), OSRetain); if (service) { - service->retain(); - inner->removeObject(service); + inner->removeObject(service.get()); } } IOLockUnlock( gPMHaltLock ); @@ -9298,18 +9979,18 @@ PMHaltWorker::work( PMHaltWorker * me ) clock_get_uptime(&startTime); if (!service->isInactive() && - service->setProperty(gPMHaltClientAcknowledgeKey, me)) { + service->setProperty(gPMHaltClientAcknowledgeKey.get(), me)) { IOLockLock(me->lock); me->startTime = startTime; - me->service = service; + me->service = service.get(); me->timeout = false; IOLockUnlock(me->lock); - service->systemWillShutdown( gPMHaltMessageType ); + service->systemWillShutdown( gPMHaltMessageType); // Wait for driver acknowledgement IOLockLock(me->lock); - while (service->getProperty(gPMHaltClientAcknowledgeKey)) { + while (service->propertyExists(gPMHaltClientAcknowledgeKey.get())) { IOLockSleep(me->lock, me, THREAD_UNINT); } me->service = NULL; @@ -9325,11 +10006,10 @@ PMHaltWorker::work( PMHaltWorker * me ) service->getName(), service->getRegistryEntryID(), (uint32_t) deltaTime ); halt_log_enter("PowerOff/Restart handler completed", - OSMemberFunctionCast(const void *, service, &IOService::systemWillShutdown), + OSMemberFunctionCast(const void *, service.get(), &IOService::systemWillShutdown), elapsedTime); } - service->release(); me->visits++; } } @@ -9374,22 +10054,21 @@ PMHaltWorker::checkTimeout( PMHaltWorker * me, AbsoluteTime * now ) void IOPMrootDomain::acknowledgeSystemWillShutdown( IOService * from ) { - PMHaltWorker * worker; - OSObject * prop; + PMHaltWorker * worker; + OSSharedPtr prop; if (!from) { return; } //DLOG("%s acknowledged\n", from->getName()); - prop = from->copyProperty( gPMHaltClientAcknowledgeKey ); + prop = from->copyProperty( gPMHaltClientAcknowledgeKey.get()); if (prop) { - worker = (PMHaltWorker *) prop; + worker = (PMHaltWorker *) prop.get(); IOLockLock(worker->lock); - from->removeProperty( gPMHaltClientAcknowledgeKey ); + from->removeProperty( gPMHaltClientAcknowledgeKey.get()); thread_wakeup((event_t) worker); IOLockUnlock(worker->lock); - worker->release(); } else { DLOG("%s acknowledged without worker property\n", from->getName()); @@ -9406,21 +10085,22 @@ IOPMrootDomain::acknowledgeSystemWillShutdown( IOService * from ) static void notifySystemShutdown( IOService * root, uint32_t messageType ) { -#define PLACEHOLDER ((OSSet *)gPMHaltArray) - IORegistryIterator * iter; - IORegistryEntry * entry; - IOService * node; - OSSet * inner; - PMHaltWorker * workers[kPMHaltMaxWorkers]; - AbsoluteTime deadline; - unsigned int totalNodes = 0; - unsigned int depth; - unsigned int rootDepth; - unsigned int numWorkers; - unsigned int count; - int waitResult; - void * baseFunc; - bool ok; +#define PLACEHOLDER ((OSSet *)gPMHaltArray.get()) + OSSharedPtr iter; + IORegistryEntry * entry; + IOService * node; + OSSet * inner; + OSSharedPtr newInner; + PMHaltWorker * workers[kPMHaltMaxWorkers]; + AbsoluteTime deadline; + unsigned int totalNodes = 0; + unsigned int depth; + unsigned int rootDepth; + unsigned int numWorkers; + unsigned int count; + int waitResult; + void * baseFunc; + bool ok; DLOG("%s msgType = 0x%x\n", __FUNCTION__, messageType); @@ -9504,10 +10184,10 @@ notifySystemShutdown( IOService * root, uint32_t messageType ) if (depth < count) { inner = (OSSet *)gPMHaltArray->getObject(depth); if (inner == PLACEHOLDER) { - inner = OSSet::withCapacity(40); - if (inner) { - gPMHaltArray->replaceObject(depth, inner); - inner->release(); + newInner = OSSet::withCapacity(40); + if (newInner) { + gPMHaltArray->replaceObject(depth, newInner.get()); + inner = newInner.get(); } } @@ -9521,7 +10201,6 @@ notifySystemShutdown( IOService * root, uint32_t messageType ) DLOG("Skipped PM node %s\n", node->getName()); } } - iter->release(); } // debug only @@ -9692,10 +10371,10 @@ IOPMrootDomain::serializeProperties( OSSerialize * s ) const return IOService::serializeProperties(s); } -OSObject * +OSSharedPtr IOPMrootDomain::copyProperty( const char * aKey) const { - OSObject *obj = NULL; + OSSharedPtr obj; obj = IOService::copyProperty(aKey); if (obj) { @@ -9705,18 +10384,18 @@ IOPMrootDomain::copyProperty( const char * aKey) const if (!strncmp(aKey, kIOPMSleepWakeWdogRebootKey, sizeof(kIOPMSleepWakeWdogRebootKey))) { if (swd_flags & SWD_BOOT_BY_SW_WDOG) { - return kOSBooleanTrue; + return OSSharedPtr(kOSBooleanTrue, OSNoRetain); } else { - return kOSBooleanFalse; + return OSSharedPtr(kOSBooleanFalse, OSNoRetain); } } if (!strncmp(aKey, kIOPMSleepWakeWdogLogsValidKey, sizeof(kIOPMSleepWakeWdogLogsValidKey))) { if (swd_flags & SWD_VALID_LOGS) { - return kOSBooleanTrue; + return OSSharedPtr(kOSBooleanTrue, OSNoRetain); } else { - return kOSBooleanFalse; + return OSSharedPtr(kOSBooleanFalse, OSNoRetain); } } @@ -9727,68 +10406,67 @@ IOPMrootDomain::copyProperty( const char * aKey) const */ if (!strcmp(aKey, "DesktopMode")) { if (desktopMode) { - return kOSBooleanTrue; + return OSSharedPtr(kOSBooleanTrue, OSNoRetain); } else { - return kOSBooleanFalse; + return OSSharedPtr(kOSBooleanFalse, OSNoRetain); } } if (!strcmp(aKey, "DisplayIdleForDemandSleep")) { if (displayIdleForDemandSleep) { - return kOSBooleanTrue; + return OSSharedPtr(kOSBooleanTrue, OSNoRetain); } else { - return kOSBooleanFalse; + return OSSharedPtr(kOSBooleanFalse, OSNoRetain); } } if (!strcmp(aKey, kIOPMDriverWakeEventsKey)) { - OSArray * array = NULL; + OSSharedPtr array; WAKEEVENT_LOCK(); if (_systemWakeEventsArray && _systemWakeEventsArray->getCount()) { - OSCollection *collection = _systemWakeEventsArray->copyCollection(); - if (collection && !(array = OSDynamicCast(OSArray, collection))) { - collection->release(); + OSSharedPtr collection = _systemWakeEventsArray->copyCollection(); + if (collection) { + array = OSDynamicPtrCast(collection); } } WAKEEVENT_UNLOCK(); - return array; + return os::move(array); } if (!strcmp(aKey, kIOPMSleepStatisticsAppsKey)) { - OSArray * array = NULL; + OSSharedPtr array; IOLockLock(pmStatsLock); if (pmStatsAppResponses && pmStatsAppResponses->getCount()) { - OSCollection *collection = pmStatsAppResponses->copyCollection(); - if (collection && !(array = OSDynamicCast(OSArray, collection))) { - collection->release(); + OSSharedPtr collection = pmStatsAppResponses->copyCollection(); + if (collection) { + array = OSDynamicPtrCast(collection); } - pmStatsAppResponses->flushCollection(); } IOLockUnlock(pmStatsLock); - return array; + return os::move(array); } if (!strcmp(aKey, kIOPMIdleSleepPreventersKey)) { OSArray *idleSleepList = NULL; gRootDomain->copySleepPreventersList(&idleSleepList, NULL); - return idleSleepList; + return OSSharedPtr(idleSleepList, OSNoRetain); } if (!strcmp(aKey, kIOPMSystemSleepPreventersKey)) { OSArray *systemSleepList = NULL; gRootDomain->copySleepPreventersList(NULL, &systemSleepList); - return systemSleepList; + return OSSharedPtr(systemSleepList, OSNoRetain); } if (!strcmp(aKey, kIOPMIdleSleepPreventersWithIDKey)) { OSArray *idleSleepList = NULL; gRootDomain->copySleepPreventersListWithID(&idleSleepList, NULL); - return idleSleepList; + return OSSharedPtr(idleSleepList, OSNoRetain); } if (!strcmp(aKey, kIOPMSystemSleepPreventersWithIDKey)) { OSArray *systemSleepList = NULL; gRootDomain->copySleepPreventersListWithID(NULL, &systemSleepList); - return systemSleepList; + return OSSharedPtr(systemSleepList, OSNoRetain); } return NULL; } @@ -9804,6 +10482,14 @@ IOPMrootDomain::copyWakeReasonString( char * outBuf, size_t bufSize ) WAKEEVENT_UNLOCK(); } +void +IOPMrootDomain::copyShutdownReasonString( char * outBuf, size_t bufSize ) +{ + WAKEEVENT_LOCK(); + strlcpy(outBuf, gShutdownReasonString, bufSize); + WAKEEVENT_UNLOCK(); +} + //****************************************************************************** // acceptSystemWakeEvents // @@ -9811,12 +10497,14 @@ IOPMrootDomain::copyWakeReasonString( char * outBuf, size_t bufSize ) //****************************************************************************** void -IOPMrootDomain::acceptSystemWakeEvents( bool accept ) +IOPMrootDomain::acceptSystemWakeEvents( uint32_t control ) { bool logWakeReason = false; WAKEEVENT_LOCK(); - if (accept) { + switch (control) { + case kAcceptSystemWakeEvents_Enable: + assert(_acceptSystemWakeEvents == false); if (!_systemWakeEventsArray) { _systemWakeEventsArray = OSArray::withCapacity(4); } @@ -9827,9 +10515,17 @@ IOPMrootDomain::acceptSystemWakeEvents( bool accept ) _systemWakeEventsArray->flushCollection(); } } - } else { + + // Remove stale WakeType property before system sleep + removeProperty(kIOPMRootDomainWakeTypeKey); + removeProperty(kIOPMRootDomainWakeReasonKey); + break; + + case kAcceptSystemWakeEvents_Disable: _acceptSystemWakeEvents = false; -#if CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) + logWakeReason = (gWakeReasonString[0] != '\0'); +#else /* !defined(XNU_TARGET_OS_OSX) */ logWakeReason = gWakeReasonSysctlRegistered; #if DEVELOPMENT static int panic_allowed = -1; @@ -9851,13 +10547,25 @@ IOPMrootDomain::acceptSystemWakeEvents( bool accept ) panic("Wake reason is empty\n"); } } -#endif -#endif +#endif /* DEVELOPMENT */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ + + // publish kIOPMRootDomainWakeReasonKey if not already set + if (!propertyExists(kIOPMRootDomainWakeReasonKey)) { + setProperty(kIOPMRootDomainWakeReasonKey, gWakeReasonString); + } + break; + + case kAcceptSystemWakeEvents_Reenable: + assert(_acceptSystemWakeEvents == false); + _acceptSystemWakeEvents = (_systemWakeEventsArray != NULL); + removeProperty(kIOPMRootDomainWakeReasonKey); + break; } WAKEEVENT_UNLOCK(); if (logWakeReason) { - MSG("system wake events:%s\n", gWakeReasonString); + MSG("system wake events: %s\n", gWakeReasonString); } } @@ -9874,22 +10582,21 @@ IOPMrootDomain::claimSystemWakeEvent( const char * reason, OSObject * details ) { - const OSSymbol * deviceName = NULL; - OSNumber * deviceRegId = NULL; - OSNumber * claimTime = NULL; - OSData * flagsData = NULL; - OSString * reasonString = NULL; - OSDictionary * d = NULL; - uint64_t timestamp; - bool ok = false; - bool addWakeReason; - - pmEventTimeStamp(×tamp); + OSSharedPtr deviceName; + OSSharedPtr deviceRegId; + OSSharedPtr claimTime; + OSSharedPtr flagsData; + OSSharedPtr reasonString; + OSSharedPtr dict; + uint64_t timestamp; + bool addWakeReason; if (!device || !reason) { return; } + pmEventTimeStamp(×tamp); + IOOptionBits aotFlags = 0; bool needAOTEvaluate = FALSE; @@ -9914,23 +10621,39 @@ IOPMrootDomain::claimSystemWakeEvent( } #endif /* DEVELOPMENT || DEBUG */ +#if defined(XNU_TARGET_OS_OSX) && !DISPLAY_WRANGLER_PRESENT + // Publishing the WakeType is serialized by the PM work loop + if (!strcmp("rtc", reason) && (_nextScheduledAlarmType != NULL)) { + pmPowerStateQueue->submitPowerEvent(kPowerEventPublishWakeType, + (void *) _nextScheduledAlarmType.get()); + } + + // Workaround for the missing wake HID event + if (gDarkWakeFlags & kDarkWakeFlagUserWakeWorkaround) { + if (!strcmp("trackpadkeyboard", reason)) { + pmPowerStateQueue->submitPowerEvent(kPowerEventPublishWakeType, + (void *) gIOPMWakeTypeUserKey.get()); + } + } +#endif + deviceName = device->copyName(gIOServicePlane); deviceRegId = OSNumber::withNumber(device->getRegistryEntryID(), 64); claimTime = OSNumber::withNumber(timestamp, 64); flagsData = OSData::withBytes(&flags, sizeof(flags)); reasonString = OSString::withCString(reason); - d = OSDictionary::withCapacity(5 + (details ? 1 : 0)); - if (!deviceName || !deviceRegId || !claimTime || !flagsData || !reasonString) { + dict = OSDictionary::withCapacity(5 + (details ? 1 : 0)); + if (!dict || !deviceName || !deviceRegId || !claimTime || !flagsData || !reasonString) { goto done; } - d->setObject(gIONameKey, deviceName); - d->setObject(gIORegistryEntryIDKey, deviceRegId); - d->setObject(kIOPMWakeEventTimeKey, claimTime); - d->setObject(kIOPMWakeEventFlagsKey, flagsData); - d->setObject(kIOPMWakeEventReasonKey, reasonString); + dict->setObject(gIONameKey, deviceName.get()); + dict->setObject(gIORegistryEntryIDKey, deviceRegId.get()); + dict->setObject(kIOPMWakeEventTimeKey, claimTime.get()); + dict->setObject(kIOPMWakeEventFlagsKey, flagsData.get()); + dict->setObject(kIOPMWakeEventReasonKey, reasonString.get()); if (details) { - d->setObject(kIOPMWakeEventDetailsKey, details); + dict->setObject(kIOPMWakeEventDetailsKey, details); } WAKEEVENT_LOCK(); @@ -9959,17 +10682,20 @@ IOPMrootDomain::claimSystemWakeEvent( addWakeReason = _aotNow && _systemWakeEventsArray && ((kIOPMWakeEventAOTExitFlags & aotFlags)); needAOTEvaluate = _aotReadyToFullWake; } + DMSG("claimSystemWakeEvent(%s, 0x%x, %s, 0x%llx) aot %d phase 0x%x add %d\n", + reason, (int)flags, deviceName->getCStringNoCopy(), device->getRegistryEntryID(), + _aotNow, pmTracer->getTracePhase(), addWakeReason); if (!gWakeReasonSysctlRegistered) { // Lazy registration until the platform driver stops registering // the same name. gWakeReasonSysctlRegistered = true; -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) sysctl_register_oid(&sysctl__kern_wakereason); -#endif +#endif /* !defined(XNU_TARGET_OS_OSX) */ } if (addWakeReason) { - ok = _systemWakeEventsArray->setObject(d); + _systemWakeEventsArray->setObject(dict.get()); if (gWakeReasonString[0] != '\0') { strlcat(gWakeReasonString, " ", sizeof(gWakeReasonString)); } @@ -9978,28 +10704,72 @@ IOPMrootDomain::claimSystemWakeEvent( WAKEEVENT_UNLOCK(); if (needAOTEvaluate) { - aotEvaluate(NULL); + // Call aotEvaluate() on PM work loop since it may call + // aotExit() which accesses PM state. + pmPowerStateQueue->submitPowerEvent(kPowerEventAOTEvaluate); } done: - if (deviceName) { - deviceName->release(); - } - if (deviceRegId) { - deviceRegId->release(); + return; +} + +//****************************************************************************** +// claimSystemBootEvent +// +// For a driver to claim a device is the source/conduit of a system boot event. +//****************************************************************************** + +void +IOPMrootDomain::claimSystemBootEvent( + IOService * device, + IOOptionBits flags, + const char * reason, + __unused OSObject * details ) +{ + if (!device || !reason) { + return; } - if (claimTime) { - claimTime->release(); + + DEBUG_LOG("claimSystemBootEvent(%s, %s, 0x%x)\n", reason, device->getName(), (uint32_t) flags); + WAKEEVENT_LOCK(); + if (!gBootReasonSysctlRegistered) { + // Lazy sysctl registration after setting gBootReasonString + strlcat(gBootReasonString, reason, sizeof(gBootReasonString)); + sysctl_register_oid(&sysctl__kern_bootreason); + gBootReasonSysctlRegistered = true; } - if (flagsData) { - flagsData->release(); + WAKEEVENT_UNLOCK(); +} + +//****************************************************************************** +// claimSystemShutdownEvent +// +// For drivers to claim a system shutdown event on the ensuing boot. +//****************************************************************************** + +void +IOPMrootDomain::claimSystemShutdownEvent( + IOService * device, + IOOptionBits flags, + const char * reason, + __unused OSObject * details ) +{ + if (!device || !reason) { + return; } - if (reasonString) { - reasonString->release(); + + DEBUG_LOG("claimSystemShutdownEvent(%s, %s, 0x%x)\n", reason, device->getName(), (uint32_t) flags); + WAKEEVENT_LOCK(); + if (gShutdownReasonString[0] != '\0') { + strlcat(gShutdownReasonString, " ", sizeof(gShutdownReasonString)); } - if (d) { - d->release(); + strlcat(gShutdownReasonString, reason, sizeof(gShutdownReasonString)); + + if (!gShutdownReasonSysctlRegistered) { + sysctl_register_oid(&sysctl__kern_shutdownreason); + gShutdownReasonSysctlRegistered = true; } + WAKEEVENT_UNLOCK(); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -10116,10 +10886,10 @@ PMSettingObject::free( void ) super::free(); } -void +IOReturn PMSettingObject::dispatchPMSetting( const OSSymbol * type, OSObject * object ) { - (*func)(target, type, object, refcon); + return (*func)(target, type, object, refcon); } void @@ -10141,27 +10911,29 @@ PMSettingObject::clientHandleFreed( void ) PMAssertionsTracker * PMAssertionsTracker::pmAssertionsTracker( IOPMrootDomain *rootDomain ) { - PMAssertionsTracker *myself; - - myself = new PMAssertionsTracker; - - if (myself) { - myself->init(); - myself->owner = rootDomain; - myself->issuingUniqueID = kAssertUniqueIDStart; - myself->assertionsArray = OSArray::withCapacity(5); - myself->assertionsKernel = 0; - myself->assertionsUser = 0; - myself->assertionsCombined = 0; - myself->assertionsArrayLock = IOLockAlloc(); - myself->tabulateProducerCount = myself->tabulateConsumerCount = 0; + PMAssertionsTracker *me; - if (!myself->assertionsArray || !myself->assertionsArrayLock) { - myself = NULL; + me = new PMAssertionsTracker; + if (!me || !me->init()) { + if (me) { + me->release(); } + return NULL; } - return myself; + me->owner = rootDomain; + me->issuingUniqueID = kAssertUniqueIDStart; + me->assertionsArray = OSArray::withCapacity(5); + me->assertionsKernel = 0; + me->assertionsUser = 0; + me->assertionsCombined = 0; + me->assertionsArrayLock = IOLockAlloc(); + me->tabulateProducerCount = me->tabulateConsumerCount = 0; + + assert(me->assertionsArray); + assert(me->assertionsArrayLock); + + return me; } /* tabulate @@ -10210,10 +10982,79 @@ PMAssertionsTracker::tabulate(void) } } +void +PMAssertionsTracker::updateCPUBitAccounting( PMAssertStruct *assertStruct ) +{ + AbsoluteTime now; + uint64_t nsec; + + if (((assertStruct->assertionBits & kIOPMDriverAssertionCPUBit) == 0) || + (assertStruct->assertCPUStartTime == 0)) { + return; + } + + now = mach_absolute_time(); + SUB_ABSOLUTETIME(&now, &assertStruct->assertCPUStartTime); + absolutetime_to_nanoseconds(now, &nsec); + assertStruct->assertCPUDuration += nsec; + assertStruct->assertCPUStartTime = 0; + + if (assertStruct->assertCPUDuration > maxAssertCPUDuration) { + maxAssertCPUDuration = assertStruct->assertCPUDuration; + maxAssertCPUEntryId = assertStruct->registryEntryID; + } +} + +void +PMAssertionsTracker::reportCPUBitAccounting( void ) +{ + PMAssertStruct *_a; + OSData *_d; + int i, count; + AbsoluteTime now; + uint64_t nsec; + + ASSERT_GATED(); + + // Account for drivers that are still holding the CPU assertion + if (assertionsKernel & kIOPMDriverAssertionCPUBit) { + now = mach_absolute_time(); + if ((count = assertionsArray->getCount())) { + for (i = 0; i < count; i++) { + _d = OSDynamicCast(OSData, assertionsArray->getObject(i)); + if (_d) { + _a = (PMAssertStruct *)_d->getBytesNoCopy(); + if ((_a->assertionBits & kIOPMDriverAssertionCPUBit) && + (_a->level == kIOPMDriverAssertionLevelOn) && + (_a->assertCPUStartTime != 0)) { + // Don't modify PMAssertStruct, leave that + // for updateCPUBitAccounting() + SUB_ABSOLUTETIME(&now, &_a->assertCPUStartTime); + absolutetime_to_nanoseconds(now, &nsec); + nsec += _a->assertCPUDuration; + if (nsec > maxAssertCPUDuration) { + maxAssertCPUDuration = nsec; + maxAssertCPUEntryId = _a->registryEntryID; + } + } + } + } + } + } + + if (maxAssertCPUDuration) { + DLOG("cpu assertion held for %llu ms by 0x%llx\n", + (maxAssertCPUDuration / NSEC_PER_MSEC), maxAssertCPUEntryId); + } + + maxAssertCPUDuration = 0; + maxAssertCPUEntryId = 0; +} + void PMAssertionsTracker::publishProperties( void ) { - OSArray *assertionsSummary = NULL; + OSSharedPtr assertionsSummary; if (tabulateConsumerCount != tabulateProducerCount) { IOLockLock(assertionsArrayLock); @@ -10224,8 +11065,7 @@ PMAssertionsTracker::publishProperties( void ) */ assertionsSummary = copyAssertionsArray(); if (assertionsSummary) { - owner->setProperty(kIOPMAssertionsDriverDetailedKey, assertionsSummary); - assertionsSummary->release(); + owner->setProperty(kIOPMAssertionsDriverDetailedKey, assertionsSummary.get()); } else { owner->removeProperty(kIOPMAssertionsDriverDetailedKey); } @@ -10277,10 +11117,17 @@ PMAssertionsTracker::detailsForID(IOPMDriverAssertionID _id, int *index) IOReturn PMAssertionsTracker::handleCreateAssertion(OSData *newAssertion) { + PMAssertStruct *assertStruct; + ASSERT_GATED(); if (newAssertion) { IOLockLock(assertionsArrayLock); + assertStruct = (PMAssertStruct *) newAssertion->getBytesNoCopy(); + if ((assertStruct->assertionBits & kIOPMDriverAssertionCPUBit) && + (assertStruct->level == kIOPMDriverAssertionLevelOn)) { + assertStruct->assertCPUStartTime = mach_absolute_time(); + } assertionsArray->setObject(newAssertion); IOLockUnlock(assertionsArrayLock); newAssertion->release(); @@ -10302,23 +11149,29 @@ PMAssertionsTracker::createAssertion( const char *whoItIs, IOPMDriverAssertionID *outID) { - OSData *dataStore = NULL; + OSSharedPtr dataStore; PMAssertStruct track; // Warning: trillions and trillions of created assertions may overflow the unique ID. track.id = OSIncrementAtomic64((SInt64*) &issuingUniqueID); track.level = level; track.assertionBits = which; - track.ownerString = whoItIs ? OSSymbol::withCString(whoItIs):NULL; + + // NB: ownerString is explicitly managed by PMAssertStruct + // it will be released in `handleReleaseAssertion' below + track.ownerString = whoItIs ? OSSymbol::withCString(whoItIs).detach():nullptr; track.ownerService = serviceID; track.registryEntryID = serviceID ? serviceID->getRegistryEntryID():0; track.modifiedTime = 0; pmEventTimeStamp(&track.createdTime); + track.assertCPUStartTime = 0; + track.assertCPUDuration = 0; dataStore = OSData::withBytes(&track, sizeof(PMAssertStruct)); if (!dataStore) { if (track.ownerString) { track.ownerString->release(); + track.ownerString = NULL; } return kIOReturnNoMemory; } @@ -10326,7 +11179,8 @@ PMAssertionsTracker::createAssertion( *outID = track.id; if (owner && owner->pmPowerStateQueue) { - owner->pmPowerStateQueue->submitPowerEvent(kPowerEventAssertionCreate, (void *)dataStore); + // queue action is responsible for releasing dataStore + owner->pmPowerStateQueue->submitPowerEvent(kPowerEventAssertionCreate, (void *)dataStore.detach()); } return kIOReturnSuccess; @@ -10349,8 +11203,15 @@ PMAssertionsTracker::handleReleaseAssertion( } IOLockLock(assertionsArrayLock); + + if ((assertStruct->assertionBits & kIOPMDriverAssertionCPUBit) && + (assertStruct->level == kIOPMDriverAssertionLevelOn)) { + updateCPUBitAccounting(assertStruct); + } + if (assertStruct->ownerString) { assertStruct->ownerString->release(); + assertStruct->ownerString = NULL; } assertionsArray->removeObject(index); @@ -10392,6 +11253,14 @@ PMAssertionsTracker::handleSetAssertionLevel( IOLockLock(assertionsArrayLock); pmEventTimeStamp(&assertStruct->modifiedTime); + if ((assertStruct->assertionBits & kIOPMDriverAssertionCPUBit) && + (assertStruct->level != _level)) { + if (_level == kIOPMDriverAssertionLevelOn) { + assertStruct->assertCPUStartTime = mach_absolute_time(); + } else { + updateCPUBitAccounting(assertStruct); + } + } assertStruct->level = _level; IOLockUnlock(assertionsArrayLock); @@ -10422,8 +11291,8 @@ PMAssertionsTracker::handleSetUserAssertionLevels(void * arg0) ASSERT_GATED(); if (new_user_levels != assertionsUser) { + DLOG("assertionsUser 0x%llx->0x%llx\n", assertionsUser, new_user_levels); assertionsUser = new_user_levels; - DLOG("assertionsUser 0x%llx\n", assertionsUser); } tabulate(); @@ -10448,65 +11317,60 @@ PMAssertionsTracker::setUserAssertionLevels( } -OSArray * +OSSharedPtr PMAssertionsTracker::copyAssertionsArray(void) { int count; int i; - OSArray *outArray = NULL; + OSSharedPtr outArray = NULL; - if (!assertionsArray || - (0 == (count = assertionsArray->getCount())) || - (NULL == (outArray = OSArray::withCapacity(count)))) { + if (!assertionsArray || (0 == (count = assertionsArray->getCount()))) { + goto exit; + } + outArray = OSArray::withCapacity(count); + if (!outArray) { goto exit; } for (i = 0; i < count; i++) { PMAssertStruct *_a = NULL; OSData *_d = NULL; - OSDictionary *details = NULL; + OSSharedPtr details; _d = OSDynamicCast(OSData, assertionsArray->getObject(i)); if (_d && (_a = (PMAssertStruct *)_d->getBytesNoCopy())) { - OSNumber *_n = NULL; + OSSharedPtr _n; details = OSDictionary::withCapacity(7); if (!details) { continue; } - outArray->setObject(details); - details->release(); + outArray->setObject(details.get()); _n = OSNumber::withNumber(_a->id, 64); if (_n) { - details->setObject(kIOPMDriverAssertionIDKey, _n); - _n->release(); + details->setObject(kIOPMDriverAssertionIDKey, _n.get()); } _n = OSNumber::withNumber(_a->createdTime, 64); if (_n) { - details->setObject(kIOPMDriverAssertionCreatedTimeKey, _n); - _n->release(); + details->setObject(kIOPMDriverAssertionCreatedTimeKey, _n.get()); } _n = OSNumber::withNumber(_a->modifiedTime, 64); if (_n) { - details->setObject(kIOPMDriverAssertionModifiedTimeKey, _n); - _n->release(); + details->setObject(kIOPMDriverAssertionModifiedTimeKey, _n.get()); } _n = OSNumber::withNumber((uintptr_t)_a->registryEntryID, 64); if (_n) { - details->setObject(kIOPMDriverAssertionRegistryEntryIDKey, _n); - _n->release(); + details->setObject(kIOPMDriverAssertionRegistryEntryIDKey, _n.get()); } _n = OSNumber::withNumber(_a->level, 64); if (_n) { - details->setObject(kIOPMDriverAssertionLevelKey, _n); - _n->release(); + details->setObject(kIOPMDriverAssertionLevelKey, _n.get()); } _n = OSNumber::withNumber(_a->assertionBits, 64); if (_n) { - details->setObject(kIOPMDriverAssertionAssertedKey, _n); - _n->release(); + details->setObject(kIOPMDriverAssertionAssertedKey, _n.get()); } if (_a->ownerString) { @@ -10516,7 +11380,7 @@ PMAssertionsTracker::copyAssertionsArray(void) } exit: - return outArray; + return os::move(outArray); } IOPMDriverAssertionType @@ -10529,6 +11393,7 @@ IOPMDriverAssertionLevel PMAssertionsTracker::getAssertionLevel( IOPMDriverAssertionType type) { + // FIXME: unused and also wrong if (type && ((type & assertionsKernel) == assertionsKernel)) { return kIOPMDriverAssertionLevelOn; } else { @@ -10676,7 +11541,7 @@ IORootParent::wakeSystem( void ) { } -OSObject * +OSSharedPtr IORootParent::copyProperty( const char * aKey) const { return IOService::copyProperty(aKey); @@ -10697,7 +11562,7 @@ IOPMrootDomain::getWatchdogTimeout() } -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || defined(__x86_64__) || (defined(__arm64__) && HIBERNATION) IOReturn IOPMrootDomain::restartWithStackshot() { @@ -10872,7 +11737,8 @@ IOPMrootDomain::saveFailureData2File() uint32_t phaseDetail = 0; bool efiFailure = false; - statusCode = OSDynamicCast(OSNumber, getProperty(kIOPMSleepWakeFailureCodeKey)); + OSSharedPtr statusCodeProp = copyProperty(kIOPMSleepWakeFailureCodeKey); + statusCode = OSDynamicCast(OSNumber, statusCodeProp.get()); if (statusCode) { pmStatusCode = statusCode->unsigned64BitValue(); phaseData = pmStatusCode & 0xFFFFFFFF; @@ -10882,7 +11748,7 @@ IOPMrootDomain::saveFailureData2File() efiFailure = true; failureStr[0] = 0; snprintf(failureStr, sizeof(failureStr), "Sleep Wake failure in EFI\n\nFailure code:: 0x%08x 0x%08x\n\nPlease IGNORE the below stackshot\n", phaseDetail, phaseData); - len = strlen(failureStr); + len = (typeof(len))strnlen(failureStr, sizeof(failureStr)); } } @@ -10948,14 +11814,15 @@ IOPMrootDomain::saveFailureData2File() hdr = (swd_hdr *)swd_buffer; outbuf = (char *)hdr + hdr->spindump_offset; + OSBoundedArrayRef boundedOutBuf(outbuf, hdr->alloc_size - hdr->spindump_offset); for (int i = 0; i < 8; i++) { - snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, i + 1); + snprintf(nvram_var_name_buffer, sizeof(nvram_var_name_buffer), "%s%02d", SWD_STACKSHOT_VAR_PREFIX, i + 1); if (!PEReadNVRAMProperty(nvram_var_name_buffer, NULL, &len)) { LOG("No SleepWake blob to read beyond chunk %d\n", i); break; } - if (PEReadNVRAMProperty(nvram_var_name_buffer, outbuf + concat_len, &len) == FALSE) { + if (PEReadNVRAMProperty(nvram_var_name_buffer, boundedOutBuf.slice(concat_len, len).data(), &len) == FALSE) { PERemoveNVRAMProperty(nvram_var_name_buffer); LOG("Could not read the property :-(\n"); break; @@ -10998,7 +11865,8 @@ exit: void IOPMrootDomain::getFailureData(thread_t *thread, char *failureStr, size_t strLen) { - IORegistryIterator * iter; + OSSharedPtr iter; + OSSharedPtr kextName = NULL; IORegistryEntry * entry; IOService * node; bool nodeFound = false; @@ -11009,14 +11877,14 @@ IOPMrootDomain::getFailureData(thread_t *thread, char *failureStr, size_t strLen const char * phaseString = NULL; const char * phaseDescription = NULL; - IOPMServiceInterestNotifier *notifier = OSDynamicCast(IOPMServiceInterestNotifier, notifierObject); + IOPMServiceInterestNotifier *notifier = OSDynamicCast(IOPMServiceInterestNotifier, notifierObject.get()); uint32_t tracePhase = pmTracer->getTracePhase(); *thread = NULL; if ((tracePhase < kIOPMTracePointSystemSleep) || (tracePhase == kIOPMTracePointDarkWakeEntry)) { - snprintf(failureStr, strLen, "%sSleep transition timed out after %d seconds", failureStr, timeout); + snprintf(failureStr, strLen, "Sleep transition timed out after %d seconds", timeout); } else { - snprintf(failureStr, strLen, "%sWake transition timed out after %d seconds", failureStr, timeout); + snprintf(failureStr, strLen, "Wake transition timed out after %d seconds", timeout); } tracePhase2String(tracePhase, &phaseString, &phaseDescription); @@ -11044,24 +11912,28 @@ IOPMrootDomain::getFailureData(thread_t *thread, char *failureStr, size_t strLen break; } } - iter->release(); } if (nodeFound) { - OSKext *kext = OSKext::lookupKextWithAddress((vm_address_t)callMethod); - if (kext) { - objectName = kext->getIdentifierCString(); - kext->release(); + kextName = copyKextIdentifierWithAddress((vm_address_t) callMethod); + if (kextName) { + objectName = kextName->getCStringNoCopy(); } } } if (phaseDescription) { - snprintf(failureStr, strLen, "%s while %s.", failureStr, phaseDescription); + strlcat(failureStr, " while ", strLen); + strlcat(failureStr, phaseDescription, strLen); + strlcat(failureStr, ".", strLen); } if (objectName) { - snprintf(failureStr, strLen, "%s Suspected bundle: %s.", failureStr, objectName); + strlcat(failureStr, " Suspected bundle: ", strLen); + strlcat(failureStr, objectName, strLen); + strlcat(failureStr, ".", strLen); } if (*thread) { - snprintf(failureStr, strLen, "%s Thread 0x%llx.", failureStr, thread_tid(*thread)); + char threadName[40]; + snprintf(threadName, sizeof(threadName), " Thread 0x%llx.", thread_tid(*thread)); + strlcat(failureStr, threadName, strLen); } DLOG("%s\n", failureStr); @@ -11126,6 +11998,7 @@ swd_zoutput(z_streamp strm, Bytef *buf, unsigned len) { unsigned int i = 0; // if outlen > max size don't add to the buffer + assert(buf != NULL); if (strm && buf) { if (swd_zip_var.outlen + len > SWD_COMPRESSED_BUFSIZE) { LOG("No space to GZIP... not writing to NVRAM\n"); @@ -11138,6 +12011,7 @@ swd_zoutput(z_streamp strm, Bytef *buf, unsigned len) swd_zip_var.outlen += len; return len; } + static void swd_zs_free(void * __unused ref, void * __unused ptr) { @@ -11149,6 +12023,10 @@ swd_compress(char *inPtr, char *outPtr, size_t numBytes) int wbits = 12; int memlevel = 3; + if (((unsigned int) numBytes) != numBytes) { + return 0; + } + if (!swd_zs.zalloc) { swd_zs.zalloc = swd_zs_alloc; swd_zs.zfree = swd_zs_free; @@ -11161,8 +12039,6 @@ swd_compress(char *inPtr, char *outPtr, size_t numBytes) } } - - swd_zip_var.zipped = 0; swd_zip_var.totalbytes = 0; // should this be the max that we have? swd_zip_var.lastpercent = 0; @@ -11185,12 +12061,10 @@ swd_compress(char *inPtr, char *outPtr, size_t numBytes) int zr; zs = &swd_zs; - zr = Z_OK; - while (swd_zip_var.error >= 0) { if (!zs->avail_in) { zs->next_in = (unsigned char *)inPtr ? (Bytef *)inPtr : (Bytef *)zs; /* zero marker? */ - zs->avail_in = numBytes; + zs->avail_in = (unsigned int) numBytes; } if (!zs->avail_out) { zs->next_out = (Bytef *)zs; @@ -11209,7 +12083,7 @@ swd_compress(char *inPtr, char *outPtr, size_t numBytes) } } } - zr = Z_OK; + //now flush the stream while (swd_zip_var.error >= 0) { if (!zs->avail_out) { @@ -11246,17 +12120,18 @@ IOPMrootDomain::deleteStackshot() PERemoveNVRAMProperty(kIOSleepWakeFailureString); char nvram_var_name_buf[20]; for (int i = 0; i < 8; i++) { - snprintf(nvram_var_name_buf, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, i + 1); + snprintf(nvram_var_name_buf, sizeof(nvram_var_name_buf), "%s%02d", SWD_STACKSHOT_VAR_PREFIX, i + 1); if (PERemoveNVRAMProperty(nvram_var_name_buf) == false) { LOG("Removing %s returned false\n", nvram_var_name_buf); } } // force NVRAM sync - if (PEWriteNVRAMProperty(kIONVRAMSyncNowPropertyKey, kIONVRAMSyncNowPropertyKey, strlen(kIONVRAMSyncNowPropertyKey)) == false) { + if (PEWriteNVRAMProperty(kIONVRAMSyncNowPropertyKey, kIONVRAMSyncNowPropertyKey, (unsigned int) strlen(kIONVRAMSyncNowPropertyKey)) == false) { DLOG("Failed to force nvram sync\n"); } gRootDomain->swd_lock = 0; } + void IOPMrootDomain::takeStackshot(bool wdogTrigger) { @@ -11265,7 +12140,7 @@ IOPMrootDomain::takeStackshot(bool wdogTrigger) int max_cnt = 2; pid_t pid = 0; kern_return_t kr = KERN_SUCCESS; - uint32_t flags; + uint64_t flags; char * dstAddr; uint32_t size; @@ -11276,9 +12151,14 @@ IOPMrootDomain::takeStackshot(bool wdogTrigger) thread_t thread = NULL; const char * swfPanic = "swfPanic"; + uint32_t bufSize; + int success = 0; - uint32_t bufSize; - int success = 0; +#if defined(__i386__) || defined(__x86_64__) + const bool concise = false; +#else + const bool concise = true; +#endif if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) { return; @@ -11292,7 +12172,7 @@ IOPMrootDomain::takeStackshot(bool wdogTrigger) if (wdogTrigger) { getFailureData(&thread, failureStr, sizeof(failureStr)); - if (PEGetCoprocessorVersion() >= kCoprocessorVersion2) { + if (concise || (PEGetCoprocessorVersion() >= kCoprocessorVersion2)) { goto skip_stackshot; } } else { @@ -11301,7 +12181,7 @@ IOPMrootDomain::takeStackshot(bool wdogTrigger) clock_get_uptime(&now); SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); absolutetime_to_nanoseconds(now, &nsec); - snprintf(failureStr, sizeof(failureStr), "%sPower button pressed during wake transition after %u ms.\n", failureStr, ((int)((nsec) / NSEC_PER_MSEC))); + snprintf(failureStr, sizeof(failureStr), "Power button pressed during wake transition after %u ms.\n", ((int)((nsec) / NSEC_PER_MSEC))); } if (swd_buffer == NULL) { @@ -11311,10 +12191,7 @@ IOPMrootDomain::takeStackshot(bool wdogTrigger) } } hdr = (swd_hdr *)swd_buffer; - bufSize = hdr->alloc_size;; - - - + bufSize = hdr->alloc_size; dstAddr = (char*)hdr + hdr->spindump_offset; flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_NO_IO_STATS | STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY | STACKSHOT_THREAD_WAITINFO; @@ -11335,8 +12212,8 @@ IOPMrootDomain::takeStackshot(bool wdogTrigger) DLOG("Taking snapshot. bytesRemaining: %d\n", bytesRemaining); size = bytesRemaining; - kr = stack_snapshot_from_kernel(pid, dstAddr, size, flags, 0, &bytesWritten); - DLOG("stack_snapshot_from_kernel returned 0x%x. pid: %d bufsize:0x%x flags:0x%x bytesWritten: %d\n", + kr = stack_snapshot_from_kernel(pid, dstAddr, size, flags, 0, 0, &bytesWritten); + DLOG("stack_snapshot_from_kernel returned 0x%x. pid: %d bufsize:0x%x flags:0x%llx bytesWritten: %d\n", kr, pid, size, flags, bytesWritten); if (kr == KERN_INSUFFICIENT_BUFFER_SIZE) { if (pid == -1) { @@ -11348,7 +12225,7 @@ IOPMrootDomain::takeStackshot(bool wdogTrigger) } if (kr == KERN_SUCCESS) { if (bytesWritten == 0) { - MSG("Failed to get stackshot(0x%x) bufsize:0x%x flags:0x%x\n", kr, size, flags); + MSG("Failed to get stackshot(0x%x) bufsize:0x%x flags:0x%llx\n", kr, size, flags); continue; } bytesRemaining -= bytesWritten; @@ -11373,14 +12250,14 @@ IOPMrootDomain::takeStackshot(bool wdogTrigger) if (max_chunks < 8) { for (num_chunks = 0; num_chunks < max_chunks; num_chunks++) { - snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, num_chunks + 1); + snprintf(nvram_var_name_buffer, sizeof(nvram_var_name_buffer), "%s%02d", SWD_STACKSHOT_VAR_PREFIX, num_chunks + 1); if (PEWriteNVRAMPropertyWithCopy(nvram_var_name_buffer, (outbuf + (num_chunks * (2096 - 200))), (2096 - 200)) == FALSE) { LOG("Failed to update NVRAM %d\n", num_chunks); break; } } if (leftover) { - snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, num_chunks + 1); + snprintf(nvram_var_name_buffer, sizeof(nvram_var_name_buffer), "%s%02d", SWD_STACKSHOT_VAR_PREFIX, num_chunks + 1); if (PEWriteNVRAMPropertyWithCopy(nvram_var_name_buffer, (outbuf + (num_chunks * (2096 - 200))), leftover) == FALSE) { LOG("Failed to update NVRAM with leftovers\n"); } @@ -11403,15 +12280,17 @@ IOPMrootDomain::takeStackshot(bool wdogTrigger) if (failureStr[0]) { // append sleep-wake failure code - snprintf(failureStr, sizeof(failureStr), "%s\nFailure code:: 0x%08x %08x\n", - failureStr, pmTracer->getTraceData(), pmTracer->getTracePhase()); - if (PEWriteNVRAMProperty(kIOSleepWakeFailureString, failureStr, strlen(failureStr)) == false) { + char traceCode[80]; + snprintf(traceCode, sizeof(traceCode), "\nFailure code:: 0x%08x %08x\n", + pmTracer->getTraceData(), pmTracer->getTracePhase()); + strlcat(failureStr, traceCode, sizeof(failureStr)); + if (PEWriteNVRAMProperty(kIOSleepWakeFailureString, failureStr, (unsigned int) strnlen(failureStr, sizeof(failureStr))) == false) { DLOG("Failed to write SleepWake failure string\n"); } } // force NVRAM sync - if (PEWriteNVRAMProperty(kIONVRAMSyncNowPropertyKey, kIONVRAMSyncNowPropertyKey, strlen(kIONVRAMSyncNowPropertyKey)) == false) { + if (PEWriteNVRAMProperty(kIONVRAMSyncNowPropertyKey, kIONVRAMSyncNowPropertyKey, (unsigned int) strlen(kIONVRAMSyncNowPropertyKey)) == false) { DLOG("Failed to force nvram sync\n"); } @@ -11440,12 +12319,15 @@ skip_stackshot: PEHaltRestart(kPERestartCPU); } } - if (PEWriteNVRAMProperty(kIOSleepWakeFailurePanic, swfPanic, strlen(swfPanic)) == false) { + if (!concise && (PEWriteNVRAMProperty(kIOSleepWakeFailurePanic, swfPanic, (unsigned int) strlen(swfPanic)) == false)) { DLOG("Failed to write SleepWake failure panic key\n"); } +#if defined(__x86_64__) if (thread) { panic_with_thread_context(0, NULL, DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT, thread, "%s", failureStr); - } else { + } else +#endif /* defined(__x86_64__) */ + { panic_with_options(0, NULL, DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT, "%s", failureStr); } } else { @@ -11462,7 +12344,7 @@ IOPMrootDomain::sleepWakeDebugMemAlloc() swd_hdr *hdr = NULL; void *bufPtr = NULL; - IOBufferMemoryDescriptor *memDesc = NULL; + OSSharedPtr memDesc; if (kIOSleepWakeWdogOff & gIOKitDebug) { @@ -11500,7 +12382,7 @@ IOPMrootDomain::sleepWakeDebugMemAlloc() hdr->spindump_offset = sizeof(swd_hdr); swd_buffer = (void *)hdr; - swd_memDesc = memDesc; + swd_memDesc = os::move(memDesc); DLOG("SleepWake debug buffer size:0x%x spindump offset:0x%x\n", hdr->alloc_size, hdr->spindump_offset); exit: @@ -11510,11 +12392,12 @@ exit: void IOPMrootDomain::sleepWakeDebugSpinDumpMemAlloc() { +#if UNUSED vm_size_t size = SWD_SPINDUMP_SIZE; swd_hdr *hdr = NULL; - IOBufferMemoryDescriptor *memDesc = NULL; + OSSharedPtr memDesc; if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) { return; @@ -11538,9 +12421,11 @@ IOPMrootDomain::sleepWakeDebugSpinDumpMemAlloc() hdr->spindump_offset = sizeof(swd_hdr); swd_spindump_buffer = (void *)hdr; + swd_spindump_memDesc = os::move(memDesc); exit: gRootDomain->swd_lock = 0; +#endif /* UNUSED */ } void @@ -11629,8 +12514,7 @@ exit: return error; } - -#else +#else /* defined(__i386__) || defined(__x86_64__) */ void IOPMrootDomain::sleepWakeDebugTrig(bool restart) @@ -11649,14 +12533,17 @@ IOPMrootDomain::takeStackshot(bool restart) { #pragma unused(restart) } + void IOPMrootDomain::deleteStackshot() { } + void IOPMrootDomain::sleepWakeDebugMemAlloc() { } + void IOPMrootDomain::saveFailureData2File() { @@ -11673,11 +12560,16 @@ IOPMrootDomain::sleepWakeDebugIsWdogEnabled() return false; } +void +IOPMrootDomain::sleepWakeDebugSaveSpinDumpFile() +{ +} + errno_t IOPMrootDomain::sleepWakeDebugSaveFile(const char *name, char *buf, int len) { return 0; } -#endif +#endif /* defined(__i386__) || defined(__x86_64__) */ diff --git a/iokit/Kernel/IOPerfControl.cpp b/iokit/Kernel/IOPerfControl.cpp index b3d1a5aac..eddd2b3e7 100644 --- a/iokit/Kernel/IOPerfControl.cpp +++ b/iokit/Kernel/IOPerfControl.cpp @@ -80,8 +80,8 @@ IOPerfControlClient::init(IOService *driver, uint64_t maxWorkCapacity) } } + // Note: driverIndex is not guaranteed to be unique if maxDriverIndex wraps around. It is intended for debugging only. driverIndex = atomic_fetch_add_explicit(&shared->maxDriverIndex, 1, memory_order_relaxed) + 1; - assertf(driverIndex != 0, "Overflow in driverIndex. Too many IOPerfControlClients created.\n"); // + 1 since index 0 is unused for kIOPerfControlClientWorkUntracked workTableLength = maxWorkCapacity + 1; @@ -159,6 +159,41 @@ IOPerfControlClient::allocateToken(thread_group *thread_group) { uint64_t token = kIOPerfControlClientWorkUntracked; +#if CONFIG_THREAD_GROUPS + auto s = IOSimpleLockLockDisableInterrupt(workTableLock); + + uint64_t num_tries = 0; + size_t index = workTableNextIndex; + // - 1 since entry 0 is for kIOPerfControlClientWorkUntracked + while (num_tries < workTableLength - 1) { + if (workTable[index].thread_group == nullptr) { + thread_group_retain(thread_group); + workTable[index].thread_group = thread_group; + token = index; + // next integer between 1 and workTableLength - 1 + workTableNextIndex = (index % (workTableLength - 1)) + 1; + break; + } + // next integer between 1 and workTableLength - 1 + index = (index % (workTableLength - 1)) + 1; + num_tries += 1; + } +#if (DEVELOPMENT || DEBUG) + if (token == kIOPerfControlClientWorkUntracked) { + /* When investigating a panic here, first check that the driver is not leaking tokens. + * If the driver is not leaking tokens and maximum is less than kMaxWorkTableNumEntries, + * the driver should be modified to pass a larger value to copyClient. + * If the driver is not leaking tokens and maximum is equal to kMaxWorkTableNumEntries, + * this code will have to be modified to support dynamic table growth to support larger + * numbers of tokens. + */ + panic("Tokens allocated for this device exceeded maximum of %zu.\n", + workTableLength - 1); // - 1 since entry 0 is for kIOPerfControlClientWorkUntracked + } +#endif + + IOSimpleLockUnlockEnableInterrupt(workTableLock, s); +#endif return token; } @@ -166,23 +201,38 @@ IOPerfControlClient::allocateToken(thread_group *thread_group) void IOPerfControlClient::deallocateToken(uint64_t token) { +#if CONFIG_THREAD_GROUPS + assertf(token != kIOPerfControlClientWorkUntracked, "Attempt to deallocate token kIOPerfControlClientWorkUntracked\n"); + assertf(token <= workTableLength, "Attempt to deallocate token %llu which is greater than the table size of %zu\n", token, workTableLength); + auto s = IOSimpleLockLockDisableInterrupt(workTableLock); + + auto &entry = workTable[token]; + auto *thread_group = entry.thread_group; + bzero(&entry, sizeof(entry)); + workTableNextIndex = token; + + IOSimpleLockUnlockEnableInterrupt(workTableLock, s); + + // This can call into the performance controller if the last reference is dropped here. Are we sure + // the driver isn't holding any locks? If not, we may want to async this to another context. + thread_group_release(thread_group); +#endif } -bool -IOPerfControlClient::getEntryForToken(uint64_t token, IOPerfControlClient::WorkTableEntry &entry) +IOPerfControlClient::WorkTableEntry * +IOPerfControlClient::getEntryForToken(uint64_t token) { if (token == kIOPerfControlClientWorkUntracked) { - return false; + return nullptr; } if (token >= workTableLength) { panic("Invalid work token (%llu): index out of bounds.", token); } - entry = workTable[token]; - auto *thread_group = entry.thread_group; - assertf(thread_group, "Invalid work token: %llu", token); - return thread_group != nullptr; + WorkTableEntry *entry = &workTable[token]; + assertf(entry->thread_group, "Invalid work token: %llu", token); + return entry; } void @@ -234,23 +284,316 @@ IOPerfControlClient::unregisterDevice(__unused IOService *driver, IOService *dev uint64_t IOPerfControlClient::workSubmit(IOService *device, WorkSubmitArgs *args) { +#if CONFIG_THREAD_GROUPS + auto *thread_group = thread_group_get(current_thread()); + if (!thread_group) { + return kIOPerfControlClientWorkUntracked; + } + + PerfControllerInterface::WorkState state{ + .thread_group_id = thread_group_get_id(thread_group), + .thread_group_data = thread_group_get_machine_data(thread_group), + .work_data = nullptr, + .work_data_size = 0, + .started = false, + }; + if (!shared->interface.workCanSubmit(device, &state, args)) { + return kIOPerfControlClientWorkUntracked; + } + + uint64_t token = allocateToken(thread_group); + if (token != kIOPerfControlClientWorkUntracked) { + state.work_data = &workTable[token].perfcontrol_data; + state.work_data_size = sizeof(workTable[token].perfcontrol_data); + shared->interface.workSubmit(device, tokenToGlobalUniqueToken(token), &state, args); + } + return token; +#else return kIOPerfControlClientWorkUntracked; +#endif } uint64_t IOPerfControlClient::workSubmitAndBegin(IOService *device, WorkSubmitArgs *submitArgs, WorkBeginArgs *beginArgs) { +#if CONFIG_THREAD_GROUPS + auto *thread_group = thread_group_get(current_thread()); + if (!thread_group) { + return kIOPerfControlClientWorkUntracked; + } + + PerfControllerInterface::WorkState state{ + .thread_group_id = thread_group_get_id(thread_group), + .thread_group_data = thread_group_get_machine_data(thread_group), + .work_data = nullptr, + .work_data_size = 0, + .started = false, + }; + if (!shared->interface.workCanSubmit(device, &state, submitArgs)) { + return kIOPerfControlClientWorkUntracked; + } + + uint64_t token = allocateToken(thread_group); + if (token != kIOPerfControlClientWorkUntracked) { + auto &entry = workTable[token]; + state.work_data = &entry.perfcontrol_data; + state.work_data_size = sizeof(workTable[token].perfcontrol_data); + shared->interface.workSubmit(device, tokenToGlobalUniqueToken(token), &state, submitArgs); + state.started = true; + shared->interface.workBegin(device, tokenToGlobalUniqueToken(token), &state, beginArgs); + markEntryStarted(token, true); + } + return token; +#else return kIOPerfControlClientWorkUntracked; +#endif } void IOPerfControlClient::workBegin(IOService *device, uint64_t token, WorkBeginArgs *args) { +#if CONFIG_THREAD_GROUPS + WorkTableEntry *entry = getEntryForToken(token); + if (entry == nullptr) { + return; + } + + assertf(!entry->started, "Work for token %llu was already started", token); + + PerfControllerInterface::WorkState state{ + .thread_group_id = thread_group_get_id(entry->thread_group), + .thread_group_data = thread_group_get_machine_data(entry->thread_group), + .work_data = &entry->perfcontrol_data, + .work_data_size = sizeof(entry->perfcontrol_data), + .started = true, + }; + shared->interface.workBegin(device, tokenToGlobalUniqueToken(token), &state, args); + markEntryStarted(token, true); +#endif } void IOPerfControlClient::workEnd(IOService *device, uint64_t token, WorkEndArgs *args, bool done) { +#if CONFIG_THREAD_GROUPS + WorkTableEntry *entry = getEntryForToken(token); + if (entry == nullptr) { + return; + } + + PerfControllerInterface::WorkState state{ + .thread_group_id = thread_group_get_id(entry->thread_group), + .thread_group_data = thread_group_get_machine_data(entry->thread_group), + .work_data = &entry->perfcontrol_data, + .work_data_size = sizeof(entry->perfcontrol_data), + .started = entry->started, + }; + shared->interface.workEnd(device, tokenToGlobalUniqueToken(token), &state, args, done); + + if (done) { + deallocateToken(token); + } else { + markEntryStarted(token, false); + } +#endif +} + +static _Atomic uint64_t unique_work_context_id = 1ull; + +class IOPerfControlWorkContext : public OSObject +{ + OSDeclareDefaultStructors(IOPerfControlWorkContext); + +public: + uint64_t id; + struct thread_group *thread_group; + bool started; + uint8_t perfcontrol_data[32]; + + bool init() override; + void reset(); + void free() override; +}; + +OSDefineMetaClassAndStructors(IOPerfControlWorkContext, OSObject); + +bool +IOPerfControlWorkContext::init() +{ + if (!super::init()) { + return false; + } + id = atomic_fetch_add_explicit(&unique_work_context_id, 1, memory_order_relaxed) + 1; + reset(); + return true; +} + +void +IOPerfControlWorkContext::reset() +{ + thread_group = nullptr; + started = false; + bzero(perfcontrol_data, sizeof(perfcontrol_data)); +} + +void +IOPerfControlWorkContext::free() +{ + assertf(thread_group == nullptr, "IOPerfControlWorkContext ID %llu being released without calling workEnd!\n", id); + super::free(); +} + +OSObject * +IOPerfControlClient::copyWorkContext() +{ + IOPerfControlWorkContext *context = new IOPerfControlWorkContext; + + if (context == nullptr) { + return nullptr; + } + + if (!context->init()) { + context->free(); + return nullptr; + } + + return OSDynamicCast(OSObject, context); +} + +bool +IOPerfControlClient::workSubmitAndBeginWithContext(IOService *device, OSObject *context, WorkSubmitArgs *submitArgs, WorkBeginArgs *beginArgs) +{ +#if CONFIG_THREAD_GROUPS + + if (workSubmitWithContext(device, context, submitArgs) == false) { + return false; + } + + IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context); + + PerfControllerInterface::WorkState state{ + .thread_group_id = thread_group_get_id(work_context->thread_group), + .thread_group_data = thread_group_get_machine_data(work_context->thread_group), + .work_data = &work_context->perfcontrol_data, + .work_data_size = sizeof(work_context->perfcontrol_data), + .started = true, + }; + + shared->interface.workBegin(device, work_context->id, &state, beginArgs); + + work_context->started = true; + + return true; +#else + return false; +#endif +} + +bool +IOPerfControlClient::workSubmitWithContext(IOService *device, OSObject *context, WorkSubmitArgs *args) +{ +#if CONFIG_THREAD_GROUPS + IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context); + + if (work_context == nullptr) { + return false; + } + + auto *thread_group = thread_group_get(current_thread()); + assert(thread_group != nullptr); + + assertf(!work_context->started, "IOPerfControlWorkContext ID %llu was already started", work_context->id); + assertf(work_context->thread_group == nullptr, "IOPerfControlWorkContext ID %llu has already taken a refcount on TG 0x%p \n", work_context->id, (void *)(work_context->thread_group)); + + PerfControllerInterface::WorkState state{ + .thread_group_id = thread_group_get_id(thread_group), + .thread_group_data = thread_group_get_machine_data(thread_group), + .work_data = nullptr, + .work_data_size = 0, + .started = false, + }; + if (!shared->interface.workCanSubmit(device, &state, args)) { + return false; + } + + work_context->thread_group = thread_group_retain(thread_group); + + state.work_data = &work_context->perfcontrol_data; + state.work_data_size = sizeof(work_context->perfcontrol_data); + + shared->interface.workSubmit(device, work_context->id, &state, args); + + return true; +#else + return false; +#endif +} + +void +IOPerfControlClient::workBeginWithContext(IOService *device, OSObject *context, WorkBeginArgs *args) +{ +#if CONFIG_THREAD_GROUPS + IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context); + + if (work_context == nullptr) { + return; + } + + if (work_context->thread_group == nullptr) { + // This Work Context has not taken a refcount on a TG + return; + } + + assertf(!work_context->started, "IOPerfControlWorkContext %llu was already started", work_context->id); + + PerfControllerInterface::WorkState state{ + .thread_group_id = thread_group_get_id(work_context->thread_group), + .thread_group_data = thread_group_get_machine_data(work_context->thread_group), + .work_data = &work_context->perfcontrol_data, + .work_data_size = sizeof(work_context->perfcontrol_data), + .started = true, + }; + shared->interface.workBegin(device, work_context->id, &state, args); + + work_context->started = true; +#endif +} + +void +IOPerfControlClient::workEndWithContext(IOService *device, OSObject *context, WorkEndArgs *args, bool done) +{ +#if CONFIG_THREAD_GROUPS + IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context); + + if (work_context == nullptr) { + return; + } + + if (work_context->thread_group == nullptr) { + return; + } + + PerfControllerInterface::WorkState state{ + .thread_group_id = thread_group_get_id(work_context->thread_group), + .thread_group_data = thread_group_get_machine_data(work_context->thread_group), + .work_data = &work_context->perfcontrol_data, + .work_data_size = sizeof(work_context->perfcontrol_data), + .started = work_context->started, + }; + + shared->interface.workEnd(device, work_context->id, &state, args, done); + + if (done) { + thread_group_release(work_context->thread_group); + work_context->reset(); + } else { + work_context->started = false; + } + + return; +#else + return; +#endif } IOReturn diff --git a/iokit/Kernel/IOPlatformActions.cpp b/iokit/Kernel/IOPlatformActions.cpp new file mode 100644 index 000000000..3574de9c1 --- /dev/null +++ b/iokit/Kernel/IOPlatformActions.cpp @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +extern "C" { +#include +#include +#include +} + +#include +#include +#include +#include +#include "IOKitKernelInternal.h" + +static IOLock *gIOPlatformActionsLock; + +typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority, + void * param1, void * param2, void * param3, + const char * name); + +struct iocpu_platform_action_entry { + queue_chain_t link; + iocpu_platform_action_t action; + int32_t priority; + const char * name; + void * refcon0; + void * refcon1; + boolean_t callout_in_progress; + struct iocpu_platform_action_entry * alloc_list; +}; +typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t; + +enum { + kQueueSleep = 0, + kQueueWake = 1, + kQueueQuiesce = 2, + kQueueActive = 3, + kQueueHaltRestart = 4, + kQueuePanic = 5, + kQueueCount = 6 +}; + +const OSSymbol * gIOPlatformSleepActionKey; +const OSSymbol * gIOPlatformWakeActionKey; +const OSSymbol * gIOPlatformQuiesceActionKey; +const OSSymbol * gIOPlatformActiveActionKey; +const OSSymbol * gIOPlatformHaltRestartActionKey; +const OSSymbol * gIOPlatformPanicActionKey; + +static queue_head_t gActionQueues[kQueueCount]; +static const OSSymbol * gActionSymbols[kQueueCount]; + +static bool +IOInstallServicePlatformAction(IOService * service, uint32_t qidx); + +static void +iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry) +{ + iocpu_platform_action_entry_t * next; + + queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) + { + if (next->priority > entry->priority) { + queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link); + return; + } + } + queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail +} + +static void +iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry) +{ + remque(&entry->link); +} + +static kern_return_t +iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority, + void * param1, void * param2, void * param3, boolean_t allow_nested_callouts) +{ + kern_return_t ret = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; + iocpu_platform_action_entry_t * next; + + queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) + { + uint32_t pri = (next->priority < 0) ? -next->priority : next->priority; + if ((pri >= first_priority) && (pri <= last_priority)) { + //kprintf("[%p]", next->action); + if (!allow_nested_callouts && !next->callout_in_progress) { + next->callout_in_progress = TRUE; + ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); + next->callout_in_progress = FALSE; + } else if (allow_nested_callouts) { + ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); + } + } + if (KERN_SUCCESS == result) { + result = ret; + } + } + return result; +} + +extern "C" kern_return_t +IOCPURunPlatformQuiesceActions(void) +{ + assert(preemption_enabled() == false); + return iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U - 1, + NULL, NULL, NULL, TRUE); +} + +extern "C" kern_return_t +IOCPURunPlatformActiveActions(void) +{ + assert(preemption_enabled() == false); + ml_hibernate_active_pre(); + kern_return_t result = iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U - 1, + NULL, NULL, NULL, TRUE); + ml_hibernate_active_post(); + return result; +} + +extern "C" kern_return_t +IOCPURunPlatformHaltRestartActions(uint32_t message) +{ + if (!gActionQueues[kQueueHaltRestart].next) { + return kIOReturnNotReady; + } + return iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U - 1, + (void *)(uintptr_t) message, NULL, NULL, TRUE); +} + +extern "C" kern_return_t +IOCPURunPlatformPanicActions(uint32_t message, uint32_t details) +{ + // Don't allow nested calls of panic actions + if (!gActionQueues[kQueuePanic].next) { + return kIOReturnNotReady; + } + return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1, + (void *)(uintptr_t) message, (void *)(uintptr_t) details, NULL, FALSE); +} + +extern "C" kern_return_t +IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len) +{ + PE_panic_save_context_t context = { + .psc_buffer = addr, + .psc_offset = offset, + .psc_length = len + }; + + // Don't allow nested calls of panic actions + if (!gActionQueues[kQueuePanic].next) { + return kIOReturnNotReady; + } + return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1, + (void *)(uintptr_t)(kPEPanicSync), &context, NULL, FALSE); +} + +void +IOPlatformActionsPreSleep(void) +{ + iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U - 1, + NULL, NULL, NULL, TRUE); +} + +void +IOPlatformActionsPostResume(void) +{ + iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U - 1, + NULL, NULL, NULL, TRUE); +} + +void +IOPlatformActionsInitialize(void) +{ + gIOPlatformActionsLock = IOLockAlloc(); + + for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) { + queue_init(&gActionQueues[qidx]); + } + + gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep] + = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey); + gIOPlatformWakeActionKey = gActionSymbols[kQueueWake] + = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey); + gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce] + = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey); + gIOPlatformActiveActionKey = gActionSymbols[kQueueActive] + = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey); + gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart] + = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey); + gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic] + = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey); +} + +static kern_return_t +IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority, + void * param1, void * param2, void * param3, + const char * service_name) +{ + IOReturn ret; + IOService * service = (IOService *) refcon0; + const OSSymbol * function = (const OSSymbol *) refcon1; + + IOLog("%s -> %s\n", function->getCStringNoCopy(), service_name); + + ret = service->callPlatformFunction(function, false, + (void *)(uintptr_t) priority, param1, param2, param3); + + return ret; +} + +static bool +IOInstallServicePlatformAction(IOService * service, uint32_t qidx) +{ + iocpu_platform_action_entry_t * entry; + OSNumber * num; + uint32_t priority; + const OSSymbol * key = gActionSymbols[qidx]; + queue_head_t * queue = &gActionQueues[qidx]; + bool reverse; + + num = OSDynamicCast(OSNumber, service->getProperty(key)); + if (!num) { + return true; + } + + reverse = false; + switch (qidx) { + case kQueueWake: + case kQueueActive: + reverse = true; + break; + } + queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link) + { + if (service == entry->refcon0) { + return true; + } + } + + entry = IONew(iocpu_platform_action_entry_t, 1); + entry->action = &IOServicePlatformAction; + entry->name = service->getName(); + priority = num->unsigned32BitValue(); + if (reverse) { + entry->priority = -priority; + } else { + entry->priority = priority; + } + entry->refcon0 = service; + entry->refcon1 = (void *) key; + entry->callout_in_progress = FALSE; + + iocpu_add_platform_action(queue, entry); + return false; +} + + +IOReturn +IOInstallServicePlatformActions(IOService * service) +{ + IOLockLock(gIOPlatformActionsLock); + + IOInstallServicePlatformAction(service, kQueueHaltRestart); + IOInstallServicePlatformAction(service, kQueuePanic); + + IOLockUnlock(gIOPlatformActionsLock); + + return kIOReturnSuccess; +} + +IOReturn +IOInstallServiceSleepPlatformActions(IOService * service) +{ + IOLockLock(gIOPlatformActionsLock); + + for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) { + IOInstallServicePlatformAction(service, qidx); + } + + IOLockUnlock(gIOPlatformActionsLock); + + return kIOReturnSuccess; +} + +IOReturn +IORemoveServicePlatformActions(IOService * service) +{ + iocpu_platform_action_entry_t * entry; + iocpu_platform_action_entry_t * next; + + IOLockLock(gIOPlatformActionsLock); + + for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) { + next = (typeof(entry))queue_first(&gActionQueues[qidx]); + while (!queue_end(&gActionQueues[qidx], &next->link)) { + entry = next; + next = (typeof(entry))queue_next(&entry->link); + if (service == entry->refcon0) { + iocpu_remove_platform_action(entry); + IODelete(entry, iocpu_platform_action_entry_t, 1); + } + } + } + + IOLockUnlock(gIOPlatformActionsLock); + + return kIOReturnSuccess; +} diff --git a/iokit/Kernel/IOPlatformExpert.cpp b/iokit/Kernel/IOPlatformExpert.cpp index 1fb74c642..407dd5b02 100644 --- a/iokit/Kernel/IOPlatformExpert.cpp +++ b/iokit/Kernel/IOPlatformExpert.cpp @@ -27,6 +27,7 @@ */ #include +#include #include #include #include @@ -42,17 +43,25 @@ #include #include +#include "IOKitKernelInternal.h" + #include #include #include +#include #include #include +#if defined(__arm64__) +#include +#endif + extern "C" { #include #include #include +#include } #define kShutdownTimeout 30 //in secs @@ -83,14 +92,14 @@ uint32_t gEnforceQuiesceSafety = 0; OSDefineMetaClassAndStructors(IOPlatformExpert, IOService) -OSMetaClassDefineReservedUsed(IOPlatformExpert, 0); -OSMetaClassDefineReservedUsed(IOPlatformExpert, 1); -OSMetaClassDefineReservedUsed(IOPlatformExpert, 2); -OSMetaClassDefineReservedUsed(IOPlatformExpert, 3); -OSMetaClassDefineReservedUsed(IOPlatformExpert, 4); +OSMetaClassDefineReservedUsedX86(IOPlatformExpert, 0); +OSMetaClassDefineReservedUsedX86(IOPlatformExpert, 1); +OSMetaClassDefineReservedUsedX86(IOPlatformExpert, 2); +OSMetaClassDefineReservedUsedX86(IOPlatformExpert, 3); +OSMetaClassDefineReservedUsedX86(IOPlatformExpert, 4); +OSMetaClassDefineReservedUsedX86(IOPlatformExpert, 5); +OSMetaClassDefineReservedUsedX86(IOPlatformExpert, 6); -OSMetaClassDefineReservedUnused(IOPlatformExpert, 5); -OSMetaClassDefineReservedUnused(IOPlatformExpert, 6); OSMetaClassDefineReservedUnused(IOPlatformExpert, 7); OSMetaClassDefineReservedUnused(IOPlatformExpert, 8); OSMetaClassDefineReservedUnused(IOPlatformExpert, 9); @@ -167,17 +176,11 @@ IOPlatformExpert::start( IOService * provider ) PMInstantiatePowerDomains(); - // Parse the serial-number data and publish a user-readable string - OSData* mydata = (OSData*) (provider->getProperty("serial-number")); - if (mydata != NULL) { - OSString *serNoString = createSystemSerialNumberString(mydata); - if (serNoString != NULL) { - provider->setProperty(kIOPlatformSerialNumberKey, serNoString); - serNoString->release(); - } - } +#if !defined(__x86_64__) + publishPlatformUUIDAndSerial(); +#endif /* !defined(__x86_64__) */ -#if !CONFIG_EMBEDDED +#if defined (__x86_64__) if (PEGetCoprocessorVersion() >= kCoprocessorVersion2) { coprocessor_paniclog_flush = TRUE; extended_debug_log_init(); @@ -239,6 +242,16 @@ IOPlatformExpert::compareNubName( const IOService * nub, return nub->IORegistryEntry::compareName( name, matched ); } +bool +IOPlatformExpert::compareNubName( const IOService * nub, + OSString * name, OSSharedPtr& matched ) const +{ + OSString* matchedRaw = NULL; + bool result = compareNubName(nub, name, &matchedRaw); + matched.reset(matchedRaw, OSNoRetain); + return result; +} + IOReturn IOPlatformExpert::getNubResources( IOService * nub ) { @@ -293,6 +306,18 @@ IOPlatformExpert::getModelName( char * /*name*/, int /*maxLength*/) return false; } +bool +IOPlatformExpert::getTargetName( char * /*name*/, int /*maxLength*/) +{ + return false; +} + +bool +IOPlatformExpert::getProductName( char * /*name*/, int /*maxLength*/) +{ + return false; +} + OSString* IOPlatformExpert::createSystemSerialNumberString(OSData* myProperty) { @@ -325,7 +350,7 @@ IOPlatformExpert::haltRestart(unsigned int type) type = kPEHaltCPU; } -#if !CONFIG_EMBEDDED +#if defined (__x86_64__) // On ARM kPEPanicRestartCPU is supported in the drivers if (type == kPEPanicRestartCPU) { type = kPERestartCPU; @@ -439,9 +464,12 @@ IOPlatformExpert::lookUpInterruptController(OSSymbol *name) void IOPlatformExpert::setCPUInterruptProperties(IOService *service) { - IOCPUInterruptController *controller; + IOInterruptController *controller; + + OSDictionary *matching = serviceMatching("IOInterruptController"); + matching = propertyMatching(gPlatformInterruptControllerName, kOSBooleanTrue, matching); - controller = OSDynamicCast(IOCPUInterruptController, waitForService(serviceMatching("IOCPUInterruptController"))); + controller = OSDynamicCast(IOInterruptController, waitForService(matching)); if (controller) { controller->setCPUInterruptProperties(service); } @@ -840,7 +868,7 @@ getCStringForObject(OSObject *inObj, char *outStr, size_t outStrLen) /* IOShutdownNotificationsTimedOut * - Called from a timer installed by PEHaltRestart */ -#ifdef CONFIG_EMBEDDED +#if !defined(__x86_64) __abortlike #endif static void @@ -848,11 +876,11 @@ IOShutdownNotificationsTimedOut( thread_call_param_t p0, thread_call_param_t p1) { -#ifdef CONFIG_EMBEDDED +#if !defined(__x86_64__) /* 30 seconds has elapsed - panic */ panic("Halt/Restart Timed Out"); -#else /* ! CONFIG_EMBEDDED */ +#else /* !defined(__x86_64__) */ int type = (int)(long)p0; uint32_t timeout = (uint32_t)(uintptr_t)p1; @@ -867,7 +895,7 @@ IOShutdownNotificationsTimedOut( if (gIOPlatform) { gIOPlatform->haltRestart(type); } -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__x86_64__) */ } @@ -876,6 +904,11 @@ extern "C" { * Callouts from BSD for machine name & model */ +/* + * PEGetMachineName() and PEGetModelName() are inconsistent across + * architectures, and considered deprecated. Use PEGetTargetName() and + * PEGetProductName() instead. + */ boolean_t PEGetMachineName( char * name, int maxLength ) { @@ -886,6 +919,11 @@ PEGetMachineName( char * name, int maxLength ) } } +/* + * PEGetMachineName() and PEGetModelName() are inconsistent across + * architectures, and considered deprecated. Use PEGetTargetName() and + * PEGetProductName() instead. + */ boolean_t PEGetModelName( char * name, int maxLength ) { @@ -896,18 +934,51 @@ PEGetModelName( char * name, int maxLength ) } } +boolean_t +PEGetTargetName( char * name, int maxLength ) +{ + if (gIOPlatform) { + return gIOPlatform->getTargetName( name, maxLength ); + } else { + return false; + } +} + +boolean_t +PEGetProductName( char * name, int maxLength ) +{ + if (gIOPlatform) { + return gIOPlatform->getProductName( name, maxLength ); + } else { + return false; + } +} + int PEGetPlatformEpoch(void) { if (gIOPlatform) { - return gIOPlatform->getBootROMType(); + return (int) gIOPlatform->getBootROMType(); } else { return -1; } } +/* Handle necessary platform specific actions prior to panic */ +void +PEInitiatePanic(void) +{ +#if defined(__arm64__) + /* + * Trigger a TLB flush so any hard hangs exercise the SoC diagnostic + * collection flow rather than hanging late in panic (see rdar://58062030) + */ + flush_mmu_tlb_entry(0); +#endif +} + int -PEHaltRestart(unsigned int type) +PEHaltRestartInternal(unsigned int type, uint32_t details) { IOPMrootDomain *pmRootDomain; AbsoluteTime deadline; @@ -936,7 +1007,7 @@ PEHaltRestart(unsigned int type) * the timer expires. If the device wants a different * timeout, use that value instead of 30 seconds. */ -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) #define RESTART_NODE_PATH "/defaults" #else #define RESTART_NODE_PATH "/chosen" @@ -973,50 +1044,42 @@ PEHaltRestart(unsigned int type) * later. PM internals make it very hard to wait for asynchronous * replies. */ - } else if (type == kPEPanicRestartCPU || type == kPEPanicSync || type == kPEPanicRestartCPUNoPanicEndCallouts || - type == kPEPanicRestartCPUNoCallouts) { + } else if (type == kPEPanicRestartCPU || type == kPEPanicSync || type == kPEPanicRestartCPUNoCallouts) { if (type == kPEPanicRestartCPU) { // Notify any listeners that we're done collecting // panic data before we call through to do the restart -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) if (coprocessor_cross_panic_enabled) #endif - IOCPURunPlatformPanicActions(kPEPanicEnd); - } - - if ((type == kPEPanicRestartCPU) || (type == kPEPanicRestartCPUNoPanicEndCallouts)) { - // Callout to shutdown the disk driver once we've returned from the - // kPEPanicEnd callbacks (if appropriate) and we know all coredumps - // on this system are complete). - IOCPURunPlatformPanicActions(kPEPanicDiskShutdown); - } - - if (type == kPEPanicRestartCPUNoPanicEndCallouts || type == kPEPanicRestartCPUNoCallouts) { - // Replace the wrapper type with the type drivers handle + IOCPURunPlatformPanicActions(kPEPanicEnd, details); + } else if (type == kPEPanicRestartCPUNoCallouts) { + // We skipped the callouts so now set the type to + // the variant that the platform uses for panic restarts. type = kPEPanicRestartCPU; } + // Do an initial sync to flush as much panic data as possible, // in case we have a problem in one of the platorm panic handlers. // After running the platform handlers, do a final sync w/ // platform hardware quiesced for the panic. PE_sync_panic_buffers(); - IOCPURunPlatformPanicActions(type); + IOCPURunPlatformPanicActions(type, details); PE_sync_panic_buffers(); } else if (type == kPEPanicEnd) { -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) if (coprocessor_cross_panic_enabled) #endif - IOCPURunPlatformPanicActions(type); + IOCPURunPlatformPanicActions(type, details); } else if (type == kPEPanicBegin) { -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) if (coprocessor_cross_panic_enabled) #endif { // Only call the kPEPanicBegin callout once if (!panic_begin_called) { panic_begin_called = TRUE; - IOCPURunPlatformPanicActions(type); + IOCPURunPlatformPanicActions(type, details); } } } @@ -1029,11 +1092,17 @@ skip_to_haltRestart: } } +int +PEHaltRestart(unsigned int type) +{ + return PEHaltRestartInternal(type, 0); +} + UInt32 PESavePanicInfo(UInt8 *buffer, UInt32 length) { if (gIOPlatform != NULL) { - return gIOPlatform->savePanicInfo(buffer, length); + return (UInt32) gIOPlatform->savePanicInfo(buffer, length); } else { return 0; } @@ -1047,6 +1116,12 @@ PESavePanicInfoAction(void *buffer, UInt32 offset, UInt32 length) } +/* + * Depending on the platform, the /options node may not be created + * until after IOKit matching has started, by an externally-supplied + * platform expert subclass. Therefore, we must check for its presence + * here and update gIOOptionsEntry for the platform code as necessary. + */ inline static int init_gIOOptionsEntry(void) { @@ -1290,7 +1365,7 @@ coprocessor_type_t PEGetCoprocessorVersion( void ) { coprocessor_type_t coprocessor_version = kCoprocessorVersionNone; -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) IORegistryEntry *platform_entry = NULL; OSData *coprocessor_version_obj = NULL; @@ -1307,50 +1382,55 @@ PEGetCoprocessorVersion( void ) } } /* extern "C" */ -void -IOPlatformExpert::registerNVRAMController(IONVRAMController * caller) -{ - OSData * data; - IORegistryEntry * entry; - OSString * string = NULL; - uuid_string_t uuid; - -#if CONFIG_EMBEDDED - entry = IORegistryEntry::fromPath( "/chosen", gIODTPlane ); - if (entry) { - OSData * data1; - - data1 = OSDynamicCast( OSData, entry->getProperty( "unique-chip-id" )); - if (data1 && data1->getLength() == 8) { - OSData * data2; +bool gIOPlatformUUIDAndSerialDone = false; - data2 = OSDynamicCast( OSData, entry->getProperty( "chip-id" )); - if (data2 && data2->getLength() == 4) { - SHA1_CTX context; - uint8_t digest[SHA_DIGEST_LENGTH]; - const uuid_t space = { 0xA6, 0xDD, 0x4C, 0xCB, 0xB5, 0xE8, 0x4A, 0xF5, 0xAC, 0xDD, 0xB6, 0xDC, 0x6A, 0x05, 0x42, 0xB8 }; - - SHA1Init( &context ); - SHA1Update( &context, space, sizeof(space)); - SHA1Update( &context, data1->getBytesNoCopy(), data1->getLength()); - SHA1Update( &context, data2->getBytesNoCopy(), data2->getLength()); - SHA1Final( digest, &context ); - - digest[6] = (digest[6] & 0x0F) | 0x50; - digest[8] = (digest[8] & 0x3F) | 0x80; - - uuid_unparse( digest, uuid ); - string = OSString::withCString( uuid ); +void +IOPlatformExpert::publishPlatformUUIDAndSerial( void ) +{ + if (!gIOPlatformUUIDAndSerialDone) { + // Parse the serial-number data and publish a user-readable string + if (NULL == getProvider()->getProperty(kIOPlatformSerialNumberKey)) { + OSData* mydata = (OSData*) (getProvider()->getProperty("serial-number")); + if (mydata != NULL) { + OSString *serNoString = createSystemSerialNumberString(mydata); + if (serNoString != NULL) { + getProvider()->setProperty(kIOPlatformSerialNumberKey, serNoString); + serNoString->release(); + } } } + IOPlatformExpertDevice *provider = OSDynamicCast(IOPlatformExpertDevice, getProvider()); + assert(provider != NULL); + provider->generatePlatformUUID(); + } - entry->release(); + if (gIOPlatformUUIDAndSerialDone) { + publishResource(kIOPlatformUUIDKey, getProvider()->getProperty(kIOPlatformUUIDKey)); } -#endif /* CONFIG_EMBEDDED */ +} + +void +IOPlatformExpert::publishNVRAM( void ) +{ + if (init_gIOOptionsEntry() < 0) { + IOPlatformExpertDevice *provider = OSDynamicCast(IOPlatformExpertDevice, getProvider()); + assert(provider != NULL); + provider->createNVRAM(); + } + if (gIOOptionsEntry != NULL) { + gIOOptionsEntry->registerService(); + } +} + +void +IOPlatformExpert::registerNVRAMController(IONVRAMController * caller) +{ +#if defined(__x86_64__) + OSData * data; + IORegistryEntry * entry; -#if defined(XNU_TARGET_OS_OSX) /* - * If we have panic debugging enabled and the bridgeOS panic SoC watchdog is enabled, + * If we have panic debugging enabled and a prod-fused coprocessor, * disable cross panics so that the co-processor doesn't cause the system * to reset when we enter the debugger or hit a panic on the x86 side. */ @@ -1370,49 +1450,26 @@ IOPlatformExpert::registerNVRAMController(IONVRAMController * caller) } } - entry = IORegistryEntry::fromPath( "/efi/platform", gIODTPlane ); +#if (DEVELOPMENT || DEBUG) + entry = IORegistryEntry::fromPath( "/options", gIODTPlane ); if (entry) { - data = OSDynamicCast( OSData, entry->getProperty( "system-id" )); - if (data && data->getLength() == 16) { - SHA1_CTX context; - uint8_t digest[SHA_DIGEST_LENGTH]; - const uuid_t space = { 0x2A, 0x06, 0x19, 0x90, 0xD3, 0x8D, 0x44, 0x40, 0xA1, 0x39, 0xC4, 0x97, 0x70, 0x37, 0x65, 0xAC }; - - SHA1Init( &context ); - SHA1Update( &context, space, sizeof(space)); - SHA1Update( &context, data->getBytesNoCopy(), data->getLength()); - SHA1Final( digest, &context ); - - digest[6] = (digest[6] & 0x0F) | 0x50; - digest[8] = (digest[8] & 0x3F) | 0x80; - - uuid_unparse( digest, uuid ); - string = OSString::withCString( uuid ); - } - - entry->release(); - } -#endif /* defined(XNU_TARGET_OS_OSX) */ - - if (string == NULL) { - entry = IORegistryEntry::fromPath( "/options", gIODTPlane ); - if (entry) { - data = OSDynamicCast( OSData, entry->getProperty( "platform-uuid" )); - if (data && data->getLength() == sizeof(uuid_t)) { - uuid_unparse((uint8_t *) data->getBytesNoCopy(), uuid ); - string = OSString::withCString( uuid ); + data = OSDynamicCast( OSData, entry->getProperty(nvram_osenvironment)); + if (data) { + sysctl_set_osenvironment(data->getLength(), data->getBytesNoCopy()); + entry->removeProperty(nvram_osenvironment); + IODTNVRAM * nvramOptionsEntry = OSDynamicCast(IODTNVRAM, entry); + if (nvramOptionsEntry) { + nvramOptionsEntry->sync(); } - - entry->release(); } + entry->release(); } + sysctl_unblock_osenvironment(); +#endif + /* on intel the UUID must be published after nvram is available */ + publishPlatformUUIDAndSerial(); - if (string) { - getProvider()->setProperty( kIOPlatformUUIDKey, string ); - publishResource( kIOPlatformUUIDKey, string ); - - string->release(); - } +#endif /* defined(__x86_64__) */ publishResource("IONVRAM"); } @@ -1536,6 +1593,45 @@ IODTPlatformExpert::createNubs( IOService * parent, OSIterator * iter ) } nub->attach( parent ); +#if !defined(__x86_64__) + OSData *tmpData = (OSData *)next->getProperty("device_type"); + if (tmpData == NULL) { + nub->registerService(); + continue; + } + + char *device_type = (char *)tmpData->getBytesNoCopy(); + if (strcmp(device_type, "cpu") != 0) { + nub->registerService(); + continue; + } + + tmpData = (OSData *)next->getProperty("reg"); + assert(tmpData != NULL); + assert(tmpData->getLength() >= sizeof(UInt32)); + + uint32_t phys_id = *(UInt32 *)tmpData->getBytesNoCopy(); + int logical_cpu_id = ml_get_cpu_number(phys_id); + int logical_cluster_id = ml_get_cluster_number(phys_id); + + /* + * If the following condition triggers, it means that a CPU that was present in the DT + * was ignored by XNU at topology parsing time. This can happen currently when using the + * cpus=N boot-arg; for example, cpus=1 will cause XNU to parse and enable a single CPU. + * + * Note that this condition will not trigger for harvested cores because these do not show up + * in the DT/IORegistry in the first place. + */ + if (logical_cpu_id < 0) { + nub->registerService(); + continue; + } + + __assert_only bool logical_id_added_to_ioreg = nub->setProperty("logical-cpu-id", logical_cpu_id, 32U); + assert(logical_id_added_to_ioreg == true); + logical_id_added_to_ioreg = nub->setProperty("logical-cluster-id", logical_cluster_id, 32U); + assert(logical_id_added_to_ioreg == true); +#endif nub->registerService(); } iter->release(); @@ -1550,7 +1646,6 @@ IODTPlatformExpert::processTopLevel( IORegistryEntry * rootEntry ) OSIterator * kids; IORegistryEntry * next; IORegistryEntry * cpus; - IORegistryEntry * options; // infanticide kids = IODTFindMatchingEntries( rootEntry, 0, deleteList()); @@ -1561,21 +1656,9 @@ IODTPlatformExpert::processTopLevel( IORegistryEntry * rootEntry ) kids->release(); } - // Publish an IODTNVRAM class on /options. - options = rootEntry->childFromPath("options", gIODTPlane); - if (options) { - dtNVRAM = new IODTNVRAM; - if (dtNVRAM) { - if (!dtNVRAM->init(options, gIODTPlane)) { - dtNVRAM->release(); - dtNVRAM = NULL; - } else { - dtNVRAM->attach(this); - dtNVRAM->registerService(); - options->release(); - } - } - } + publishNVRAM(); + assert(gIOOptionsEntry != NULL); // subclasses that do their own NVRAM initialization shouldn't be calling this + dtNVRAM = gIOOptionsEntry; // Publish the cpus. cpus = rootEntry->childFromPath( "cpus", gIODTPlane); @@ -1608,6 +1691,20 @@ IODTPlatformExpert::compareNubName( const IOService * nub, || super::compareNubName( nub, name, matched); } + +/* + * Do not use this method directly, it returns inconsistent results + * across architectures and is considered deprecated. + * + * Use getTargetName and getProductName respectively. For example: + * + * targetName: J137AP + * productName: iMacPro1,1 + * + * targetName: D331pAP + * productName: iPhone11,6 + */ + bool IODTPlatformExpert::getModelName( char * name, int maxLength ) { @@ -1645,6 +1742,19 @@ IODTPlatformExpert::getModelName( char * name, int maxLength ) return ok; } +/* + * Do not use this method directly, it returns inconsistent results + * across architectures and is considered deprecated. + * + * Use getTargetName and getProductName respectively. For example: + * + * targetName: J137AP + * productName: iMacPro1,1 + * + * targetName: D331pAP + * productName: iPhone11,6 + */ + bool IODTPlatformExpert::getMachineName( char * name, int maxLength ) { @@ -1662,6 +1772,46 @@ IODTPlatformExpert::getMachineName( char * name, int maxLength ) return ok; } +/* Examples: J137AP, D331pAP... */ + +bool +IODTPlatformExpert::getTargetName( char * name, int maxLength ) +{ +#if __x86_64__ + OSData * prop; + + const OSSymbol * key = gIODTBridgeModelKey; + + maxLength--; + prop = (OSData *) getProvider()->getProperty( key ); + + if (prop == NULL) { + // This happens if there is no bridge. + char const * const unknown = ""; + + strlcpy( name, unknown, maxLength ); + } else { + strlcpy( name, (const char *)prop->getBytesNoCopy(), maxLength ); + } + + return true; +#else + return getModelName( name, maxLength ); +#endif +} + +/* Examples: iMacPro1,1, iPhone11,6... */ + +bool +IODTPlatformExpert::getProductName( char * name, int maxLength ) +{ +#if __x86_64__ + return getModelName( name, maxLength ); +#else + return getMachineName( name, maxLength ); +#endif +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ void @@ -1718,6 +1868,22 @@ IODTPlatformExpert::readNVRAMProperty( } } +IOReturn +IODTPlatformExpert::readNVRAMProperty( + IORegistryEntry * entry, + OSSharedPtr& name, OSSharedPtr& value ) +{ + const OSSymbol* nameRaw = NULL; + OSData* valueRaw = NULL; + + IOReturn result = readNVRAMProperty(entry, &nameRaw, &valueRaw); + + name.reset(nameRaw, OSNoRetain); + value.reset(valueRaw, OSNoRetain); + + return result; +} + IOReturn IODTPlatformExpert::writeNVRAMProperty( IORegistryEntry * entry, @@ -1841,14 +2007,12 @@ IOPlatformExpertDevice::compareName( OSString * name, } bool -IOPlatformExpertDevice::initWithArgs( - void * dtTop, void * p2, void * p3, void * p4 ) +IOPlatformExpertDevice::init(void *dtRoot) { IORegistryEntry * dt = NULL; bool ok; - // dtTop may be zero on non- device tree systems - if (dtTop && (dt = IODeviceTreeAlloc( dtTop ))) { + if ((dtRoot != NULL) && (dt = IODeviceTreeAlloc(dtRoot))) { ok = super::init( dt, gIODTPlane ); } else { ok = super::init(); @@ -1858,11 +2022,19 @@ IOPlatformExpertDevice::initWithArgs( return false; } + return true; +} + +bool +IOPlatformExpertDevice::startIOServiceMatching(void) +{ workLoop = IOWorkLoop::workLoop(); if (!workLoop) { return false; } + registerService(); + return true; } @@ -1927,6 +2099,131 @@ IOPlatformExpertDevice::free() } } +void +IOPlatformExpertDevice::configureDefaults( void ) +{ + createNVRAM(); + // Parse the serial-number data and publish a user-readable string + OSData* mydata = (OSData*) (getProperty("serial-number")); + if (mydata != NULL) { + OSString *serNoString = OSString::withCString((const char *)mydata->getBytesNoCopy()); + if (serNoString != NULL) { + setProperty(kIOPlatformSerialNumberKey, serNoString); + serNoString->release(); + } + } + generatePlatformUUID(); +} + +void +IOPlatformExpertDevice::createNVRAM( void ) +{ + /* + * Publish an IODTNVRAM class on /options, if present. + * DT-based platforms may need NVRAM access prior to the start + * of IOKit matching, to support security-related operations + * that must happen before machine_lockdown(). + */ + IORegistryEntry *options = IORegistryEntry::fromPath("/options", gIODTPlane); + if (options == NULL) { + return; // /options may not be present + } + + assert(gIOOptionsEntry == NULL); + gIOOptionsEntry = new IODTNVRAM; + + assert(gIOOptionsEntry != NULL); + + gIOOptionsEntry->init(options, gIODTPlane); + + gIOOptionsEntry->attach(this); + options->release(); +} + +void +IOPlatformExpertDevice::generatePlatformUUID( void ) +{ + IORegistryEntry * entry; + OSString * string = NULL; + uuid_string_t uuid; + +#if !defined(__x86_64__) + entry = IORegistryEntry::fromPath( "/chosen", gIODTPlane ); + if (entry) { + OSData * data1; + + data1 = OSDynamicCast( OSData, entry->getProperty( "unique-chip-id" )); + if (data1 && data1->getLength() == 8) { + OSData * data2; + + data2 = OSDynamicCast( OSData, entry->getProperty( "chip-id" )); + if (data2 && data2->getLength() == 4) { + SHA1_CTX context; + uint8_t digest[SHA_DIGEST_LENGTH]; + const uuid_t space = { 0xA6, 0xDD, 0x4C, 0xCB, 0xB5, 0xE8, 0x4A, 0xF5, 0xAC, 0xDD, 0xB6, 0xDC, 0x6A, 0x05, 0x42, 0xB8 }; + + SHA1Init( &context ); + SHA1Update( &context, space, sizeof(space)); + SHA1Update( &context, data1->getBytesNoCopy(), data1->getLength()); + SHA1Update( &context, data2->getBytesNoCopy(), data2->getLength()); + SHA1Final( digest, &context ); + + digest[6] = (digest[6] & 0x0F) | 0x50; + digest[8] = (digest[8] & 0x3F) | 0x80; + + uuid_unparse( digest, uuid ); + string = OSString::withCString( uuid ); + } + } + + entry->release(); + } +#else /* !defined(__x86_64__) */ + OSData * data; + + entry = IORegistryEntry::fromPath( "/efi/platform", gIODTPlane ); + if (entry) { + data = OSDynamicCast( OSData, entry->getProperty( "system-id" )); + if (data && data->getLength() == 16) { + SHA1_CTX context; + uint8_t digest[SHA_DIGEST_LENGTH]; + const uuid_t space = { 0x2A, 0x06, 0x19, 0x90, 0xD3, 0x8D, 0x44, 0x40, 0xA1, 0x39, 0xC4, 0x97, 0x70, 0x37, 0x65, 0xAC }; + + SHA1Init( &context ); + SHA1Update( &context, space, sizeof(space)); + SHA1Update( &context, data->getBytesNoCopy(), data->getLength()); + SHA1Final( digest, &context ); + + digest[6] = (digest[6] & 0x0F) | 0x50; + digest[8] = (digest[8] & 0x3F) | 0x80; + + uuid_unparse( digest, uuid ); + string = OSString::withCString( uuid ); + } + + entry->release(); + } + if (!string) { + /* vmware still runs this path */ + entry = IORegistryEntry::fromPath( "/options", gIODTPlane ); + if (entry) { + data = OSDynamicCast( OSData, entry->getProperty( "platform-uuid" )); + if (data && data->getLength() == sizeof(uuid_t)) { + uuid_unparse((uint8_t *) data->getBytesNoCopy(), uuid ); + string = OSString::withCString( uuid ); + } + entry->release(); + } + } +#endif /* defined(__x86_64__) */ + + if (string) { + setProperty( kIOPlatformUUIDKey, string ); + gIOPlatformUUIDAndSerialDone = true; + + string->release(); + } +} /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #undef super diff --git a/libkern/c++/OSCompat.cpp b/iokit/Kernel/IOPlatformIO.cpp similarity index 64% rename from libkern/c++/OSCompat.cpp rename to iokit/Kernel/IOPlatformIO.cpp index b0fd9156f..5c7c8a20b 100644 --- a/libkern/c++/OSCompat.cpp +++ b/iokit/Kernel/IOPlatformIO.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -26,29 +26,37 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* - * Compatibility definitions for I/O Kit smart pointers - */ +extern "C" { +#include +} -#define LIBKERN_SMART_POINTERS +#include -#include -#include +#define super IOService +OSDefineMetaClassAndAbstractStructors(IOPlatformIO, IOService); -extern OSObjectPtr -OSUnserialize(const char *buffer, LIBKERN_RETURNS_RETAINED_ON_ZERO OSString **errorString); +static IOPlatformIO * platformIOProvider; -OSObjectPtr -OSUnserialize(const char *buffer, OSStringPtr *errorString) +bool +IOPlatformIO::start(IOService * provider) { - return OSUnserialize(buffer, OSOutPtr(errorString)); -} + if (!super::start(provider)) { + return false; + } + + if (platformIOProvider == NULL) { + platformIOProvider = this; + } -extern OSObjectPtr -OSUnserializeXML(const char *buffer, LIBKERN_RETURNS_RETAINED_ON_ZERO OSString **errorString); + return true; +} -OSObjectPtr -OSUnserializeXML(const char *buffer, OSStringPtr *errorString) +bool +PE_handle_platform_error(vm_offset_t far) { - return OSUnserializeXML(buffer, OSOutPtr(errorString)); + if (platformIOProvider == NULL) { + return false; + } else { + return platformIOProvider->handlePlatformError(far); + } } diff --git a/iokit/Kernel/IOPolledInterface.cpp b/iokit/Kernel/IOPolledInterface.cpp index d36c0c6db..ccbea3d90 100644 --- a/iokit/Kernel/IOPolledInterface.cpp +++ b/iokit/Kernel/IOPolledInterface.cpp @@ -37,14 +37,21 @@ #include #include #include +#include #include "IOKitKernelInternal.h" +#if defined(__arm64__) +#include +#if XNU_MONITOR_PPL_HIB +#include +#endif /* XNU_MONITOR_PPL_HIB */ +#endif /* defined(__arm64__) */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ OSDefineMetaClassAndAbstractStructors(IOPolledInterface, OSObject); -OSMetaClassDefineReservedUsed(IOPolledInterface, 0); +OSMetaClassDefineReservedUsedX86(IOPolledInterface, 0); OSMetaClassDefineReservedUnused(IOPolledInterface, 1); OSMetaClassDefineReservedUnused(IOPolledInterface, 2); OSMetaClassDefineReservedUnused(IOPolledInterface, 3); @@ -486,6 +493,9 @@ IOCopyMediaForDev(dev_t device) return result; } +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#if defined(__i386__) || defined(__x86_64__) #define APFSMEDIA_GETHIBERKEY "getHiberKey" static IOReturn @@ -570,6 +580,32 @@ IOGetVolumeCryptKey(dev_t block_dev, part->release(); return err; } +#endif /* defined(__i386__) || defined(__x86_64__) */ + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#if defined(__arm64__) +static IOReturn +IOGetHibernationCryptKey(uint8_t * hibernationKey, + size_t * keySize, + uint32_t *swSeed + ) +{ +#if XNU_MONITOR_PPL_HIB + SEPHibernator *hibernator = SEPHibernator::sepHibernator(); + sephib_wrapped_key_t wrappedKey = {}; + sephib_seprom_hib_payload_t sepromPayload = {}; + hibernator->prepareToHibernate(&wrappedKey, &sepromPayload); + *swSeed = sepromPayload.sw_seed; + assert(*keySize >= sizeof(wrappedKey.data)); + *keySize = sizeof(wrappedKey.data); + memcpy(hibernationKey, wrappedKey.data, *keySize); + return kIOReturnSuccess; +#else + return kIOReturnNotFound; +#endif +} +#endif /* defined(__arm64__) */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -671,15 +707,36 @@ IOPolledFileOpen(const char * filename, vars->pollers->pollers->getCount()); OSString * keyUUID = NULL; +#if defined(__i386__) || defined(__x86_64__) if (volumeCryptKey) { err = IOGetVolumeCryptKey(block_dev, &keyUUID, volumeCryptKey, keySize); } +#elif defined(__arm64__) + uint32_t swSeed = 0; + if (volumeCryptKey) { + if (flags & kIOPolledFileHibernate) { + err = IOGetHibernationCryptKey(volumeCryptKey, keySize, &swSeed); + if (kIOReturnSuccess != err) { + HIBLOG("error 0x%x from IOGetHibernationCryptKey\n", err); + break; + } + } else { + *keySize = 0; + } + } +#else + if (volumeCryptKey) { + HIBLOG("IOPolledFileOpen: unable to get volumeCryptKey\n"); + err = kIOReturnNotFound; + break; + } +#endif *fileVars = vars; vars->fileExtents = extentsData; // make imagePath - OSData * data; + OSData * data = NULL; if (imagePath) { #if defined(__i386__) || defined(__x86_64__) char str2[24 + sizeof(uuid_string_t) + 2]; @@ -695,9 +752,13 @@ IOPolledFileOpen(const char * filename, gIOCreateEFIDevicePathSymbol, false, (void *) part, (void *) str2, (void *) (uintptr_t) true, (void *) &data); -#else - data = NULL; +#elif defined(__arm64__) + char str2[26]; + snprintf(str2, sizeof(str2), "%qx:%x", vars->extentMap[0].start, swSeed); + data = OSData::withBytes(str2, (unsigned int) strlen(str2)); err = kIOReturnSuccess; +#else + err = kIOReturnNotFound; #endif if (kIOReturnSuccess != err) { HIBLOG("error 0x%x getting path\n", err); @@ -728,6 +789,22 @@ IOPolledFileOpen(const char * filename, return err; } +IOReturn +IOPolledFileOpen(const char * filename, + uint32_t flags, + uint64_t setFileSize, uint64_t fsFreeSize, + void * write_file_addr, size_t write_file_len, + IOPolledFileIOVars ** fileVars, + OSSharedPtr& imagePath, + uint8_t * volumeCryptKey, size_t * keySize) +{ + OSData* imagePathRaw = NULL; + IOReturn result = IOPolledFileOpen(filename, flags, setFileSize, fsFreeSize, write_file_addr, write_file_len, + fileVars, &imagePathRaw, volumeCryptKey, keySize); + imagePath.reset(imagePathRaw, OSNoRetain); + return result; +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOReturn @@ -793,10 +870,11 @@ IOPolledFilePollersSetup(IOPolledFileIOVars * vars, vars->buffer = (uint8_t *) vars->pollers->ioBuffer->getBytesNoCopy(); vars->bufferHalf = 0; vars->bufferOffset = 0; - vars->bufferSize = (vars->pollers->ioBuffer->getLength() >> 1); + assert(vars->pollers->ioBuffer->getLength() <= UINT_MAX); + vars->bufferSize = (typeof(vars->bufferSize))(vars->pollers->ioBuffer->getLength() >> 1); if (vars->maxiobytes < vars->bufferSize) { - vars->bufferSize = vars->maxiobytes; + vars->bufferSize = (typeof(vars->bufferSize))vars->maxiobytes; } }while (false); @@ -836,7 +914,7 @@ IOPolledFileSeek(IOPolledFileIOVars * vars, uint64_t position) if (vars->bufferSize <= vars->extentRemaining) { vars->bufferLimit = vars->bufferSize; } else { - vars->bufferLimit = vars->extentRemaining; + vars->bufferLimit = ((uint32_t) vars->extentRemaining); } return kIOReturnSuccess; @@ -861,8 +939,6 @@ IOPolledFileWrite(IOPolledFileIOVars * vars, size = vars->blockSize - size; } flush = true; - // use some garbage for the fill - bytes = vars->buffer + vars->bufferOffset; } copy = vars->bufferLimit - vars->bufferOffset; @@ -914,10 +990,11 @@ IOPolledFileWrite(IOPolledFileIOVars * vars, clock_get_uptime(&startTime); + assert(encryptLen <= UINT_MAX); // encrypt the buffer aes_encrypt_cbc(vars->buffer + vars->bufferHalf + encryptStart, &cryptvars->aes_iv[0], - encryptLen / AES_BLOCK_SIZE, + (unsigned int) (encryptLen / AES_BLOCK_SIZE), vars->buffer + vars->bufferHalf + encryptStart, &cryptvars->ctx.encrypt); @@ -963,7 +1040,7 @@ IOPolledFileWrite(IOPolledFileIOVars * vars, if (vars->bufferSize <= vars->extentRemaining) { vars->bufferLimit = vars->bufferSize; } else { - vars->bufferLimit = vars->extentRemaining; + vars->bufferLimit = ((uint32_t) vars->extentRemaining); } if (!vars->extentRemaining) { @@ -1059,17 +1136,17 @@ IOPolledFileRead(IOPolledFileIOVars * vars, } } - uint64_t length; - uint64_t lastReadLength = vars->lastRead; + uint32_t length; + uint32_t lastReadLength = vars->lastRead; uint64_t offset = (vars->position - vars->extentPosition + vars->currentExtent->start); if (vars->extentRemaining <= vars->bufferSize) { - length = vars->extentRemaining; + length = ((uint32_t) vars->extentRemaining); } else { length = vars->bufferSize; } if ((length + vars->position) > vars->readEnd) { - length = vars->readEnd - vars->position; + length = ((uint32_t) (vars->readEnd - vars->position)); } vars->lastRead = length; @@ -1098,9 +1175,10 @@ IOPolledFileRead(IOPolledFileIOVars * vars, // decrypt the buffer clock_get_uptime(&startTime); + assert(lastReadLength <= UINT_MAX); aes_decrypt_cbc(vars->buffer + vars->bufferHalf, &thisVector[0], - lastReadLength / AES_BLOCK_SIZE, + (unsigned int) (lastReadLength / AES_BLOCK_SIZE), vars->buffer + vars->bufferHalf, &cryptvars->ctx.decrypt); diff --git a/iokit/Kernel/IORegistryEntry.cpp b/iokit/Kernel/IORegistryEntry.cpp index 456f93729..4f53c6d76 100644 --- a/iokit/Kernel/IORegistryEntry.cpp +++ b/iokit/Kernel/IORegistryEntry.cpp @@ -31,10 +31,12 @@ #include #include #include +#include #include #include #include +#include #include "IOKitKernelInternal.h" @@ -430,13 +432,14 @@ IORegistryEntry::free( void ) #endif /* IOREGSPLITTABLES */ if (reserved) { - if (reserved->fIndexedProperties) { + OSObject ** array = os_atomic_load(&reserved->fIndexedProperties, acquire); + if (array) { for (int idx = 0; idx < kIORegistryEntryIndexedPropertyCount; idx++) { - if (reserved->fIndexedProperties[idx]) { - reserved->fIndexedProperties[idx]->release(); + if (array[idx]) { + array[idx]->release(); } } - IODelete(reserved->fIndexedProperties, OSObject *, kIORegistryEntryIndexedPropertyCount); + IODelete(array, OSObject *, kIORegistryEntryIndexedPropertyCount); } if (reserved->fLock) { IORecursiveLockFree(reserved->fLock); @@ -589,6 +592,135 @@ wrap5(const OSString, const) // copyProperty() w/plane definition wrap5(const char, const) // copyProperty() w/plane definition +bool +IORegistryEntry::propertyExists(const OSSymbol * aKey) +{ + return NULL != getProperty(aKey); +} + +bool +IORegistryEntry::propertyExists(const OSString * aKey) +{ + return NULL != getProperty(aKey); +} + +bool +IORegistryEntry::propertyExists(const char * aKey) +{ + return NULL != getProperty(aKey); +} + + +bool +IORegistryEntry::propertyHasValue(const OSSymbol * aKey, + const OSObject * value) +{ + const OSObject * found; + bool result; + + found = copyProperty(aKey); + result = (!found && !value) || (found && value && value->isEqualTo(found)); + OSSafeReleaseNULL(found); + return result; +} + +bool +IORegistryEntry::propertyHasValue(const OSString * aKey, + const OSObject * value) +{ + const OSObject * found; + bool result; + + found = copyProperty(aKey); + result = (!found && !value) || (found && value && value->isEqualTo(found)); + OSSafeReleaseNULL(found); + return result; +} + +bool +IORegistryEntry::propertyHasValue(const char * aKey, + const OSObject * value) +{ + const OSObject * found; + bool result; + + found = copyProperty(aKey); + result = (!found && !value) || (found && value && value->isEqualTo(found)); + OSSafeReleaseNULL(found); + return result; +} + + +bool +IORegistryEntry::propertyExists(const OSSymbol * aKey, + const IORegistryPlane * plane, + uint32_t options) const +{ + return NULL != getProperty(aKey, plane, options); +} + +bool +IORegistryEntry::propertyExists(const OSString * aKey, + const IORegistryPlane * plane, + uint32_t options) const +{ + return NULL != getProperty(aKey, plane, options); +} +bool +IORegistryEntry::propertyExists(const char * aKey, + const IORegistryPlane * plane, + uint32_t options) const +{ + return NULL != getProperty(aKey, plane, options); +} + + +bool +IORegistryEntry::propertyHasValue(const OSSymbol * aKey, + const OSObject * value, + const IORegistryPlane * plane, + uint32_t options) const +{ + const OSObject * found; + bool result; + + found = copyProperty(aKey, plane, options); + result = (!found && !value) || (found && value && value->isEqualTo(found)); + OSSafeReleaseNULL(found); + return result; +} + +bool +IORegistryEntry::propertyHasValue(const OSString * aKey, + const OSObject * value, + const IORegistryPlane * plane, + uint32_t options) const +{ + const OSObject * found; + bool result; + + found = copyProperty(aKey, plane, options); + result = (!found && !value) || (found && value && value->isEqualTo(found)); + OSSafeReleaseNULL(found); + return result; +} + +bool +IORegistryEntry::propertyHasValue(const char * aKey, + const OSObject * value, + const IORegistryPlane * plane, + uint32_t options) const +{ + const OSObject * found; + bool result; + + found = copyProperty(aKey, plane, options); + result = (!found && !value) || (found && value && value->isEqualTo(found)); + OSSafeReleaseNULL(found); + return result; +} + + OSObject * IORegistryEntry::getProperty( const OSSymbol * aKey) const { @@ -820,26 +952,28 @@ IORegistryEntry::setIndexedProperty(uint32_t index, OSObject * anObject) return NULL; } - array = atomic_load_explicit(&reserved->fIndexedProperties, memory_order_acquire); + array = os_atomic_load(&reserved->fIndexedProperties, acquire); if (!array) { array = IONew(OSObject *, kIORegistryEntryIndexedPropertyCount); if (!array) { return NULL; } bzero(array, kIORegistryEntryIndexedPropertyCount * sizeof(array[0])); - if (!OSCompareAndSwapPtr(NULL, array, &reserved->fIndexedProperties)) { + if (!os_atomic_cmpxchg(&reserved->fIndexedProperties, NULL, array, release)) { IODelete(array, OSObject *, kIORegistryEntryIndexedPropertyCount); + array = os_atomic_load(&reserved->fIndexedProperties, acquire); } } - if (!reserved->fIndexedProperties) { + + if (!array) { return NULL; } - prior = reserved->fIndexedProperties[index]; + prior = array[index]; if (anObject) { anObject->retain(); } - reserved->fIndexedProperties[index] = anObject; + array[index] = anObject; return prior; } @@ -850,11 +984,13 @@ IORegistryEntry::getIndexedProperty(uint32_t index) const if (index >= kIORegistryEntryIndexedPropertyCount) { return NULL; } - if (!reserved->fIndexedProperties) { + + OSObject ** array = os_atomic_load(&reserved->fIndexedProperties, acquire); + if (!array) { return NULL; } - return reserved->fIndexedProperties[index]; + return array[index]; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1025,6 +1161,7 @@ IORegistryEntry::setLocation( const char * location, } } + bool IORegistryEntry::compareName( OSString * name, OSString ** matched ) const { @@ -1074,6 +1211,23 @@ IORegistryEntry::compareNames( OSObject * names, OSString ** matched ) const return result; } +bool +IORegistryEntry::compareName( OSString * name, OSSharedPtr& matched ) const +{ + OSString* matchedRaw = NULL; + bool result = compareName(name, &matchedRaw); + matched.reset(matchedRaw, OSNoRetain); + return result; +} + +bool +IORegistryEntry::compareNames( OSObject * names, OSSharedPtr& matched ) const +{ + OSString* matchedRaw = NULL; + bool result = compareNames(names, &matchedRaw); + matched.reset(matchedRaw, OSNoRetain); + return result; +} bool IORegistryEntry::getPath( char * path, int * length, @@ -1189,9 +1343,9 @@ IORegistryEntry::getPathComponent( char * path, int * length, maxLength = *length; compName = getName( plane ); - len = strlen( compName ); + len = (int) strnlen( compName, sizeof(io_name_t)); if ((loc = getLocation( plane ))) { - locLen = 1 + strlen( loc ); + locLen = 1 + ((int) strnlen( loc, sizeof(io_name_t))); } else { locLen = 0; } @@ -1480,7 +1634,7 @@ IORegistryEntry::fromPath( if (opath && length) { // copy out residual path - len2 = strlen( path ); + len2 = (int) strnlen(path, 65536); if ((len + len2) < *length) { strlcpy( opath + len, path, len2 + 1 ); } @@ -1734,6 +1888,11 @@ IORegistryEntry::copyChildEntry( return entry; } +// FIXME: Implementation of this function is hidden from the static analyzer. +// The analyzer is worried that this release might as well be the last release. +// Feel free to remove the #ifndef and address the warning! +// See also rdar://problem/63023165. +#ifndef __clang_analyzer__ IORegistryEntry * IORegistryEntry::getChildEntry( const IORegistryPlane * plane ) const @@ -1747,6 +1906,7 @@ IORegistryEntry::getChildEntry( return entry; } +#endif // __clang_analyzer__ void IORegistryEntry::applyToChildren( IORegistryEntryApplierFunction applier, @@ -2395,12 +2555,12 @@ OSMetaClassDefineReservedUnused(IORegistryEntry, 3); OSMetaClassDefineReservedUnused(IORegistryEntry, 4); OSMetaClassDefineReservedUnused(IORegistryEntry, 5); #else -OSMetaClassDefineReservedUsed(IORegistryEntry, 0); -OSMetaClassDefineReservedUsed(IORegistryEntry, 1); -OSMetaClassDefineReservedUsed(IORegistryEntry, 2); -OSMetaClassDefineReservedUsed(IORegistryEntry, 3); -OSMetaClassDefineReservedUsed(IORegistryEntry, 4); -OSMetaClassDefineReservedUsed(IORegistryEntry, 5); +OSMetaClassDefineReservedUsedX86(IORegistryEntry, 0); +OSMetaClassDefineReservedUsedX86(IORegistryEntry, 1); +OSMetaClassDefineReservedUsedX86(IORegistryEntry, 2); +OSMetaClassDefineReservedUsedX86(IORegistryEntry, 3); +OSMetaClassDefineReservedUsedX86(IORegistryEntry, 4); +OSMetaClassDefineReservedUsedX86(IORegistryEntry, 5); #endif OSMetaClassDefineReservedUnused(IORegistryEntry, 6); OSMetaClassDefineReservedUnused(IORegistryEntry, 7); diff --git a/iokit/Kernel/IOReportLegend.cpp b/iokit/Kernel/IOReportLegend.cpp index 9c0eeb9c5..8316f6198 100644 --- a/iokit/Kernel/IOReportLegend.cpp +++ b/iokit/Kernel/IOReportLegend.cpp @@ -26,6 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include @@ -47,22 +49,21 @@ #define super OSObject OSDefineMetaClassAndStructors(IOReportLegend, OSObject); -IOReportLegend* +OSSharedPtr IOReportLegend::with(OSArray *legend) { - IOReportLegend *iorLegend = new IOReportLegend; + OSSharedPtr iorLegend = OSMakeShared(); if (iorLegend) { if (legend != NULL) { if (iorLegend->initWith(legend) != kIOReturnSuccess) { - OSSafeReleaseNULL(iorLegend); - return NULL; + return nullptr; } } return iorLegend; } else { - return NULL; + return nullptr; } } @@ -85,9 +86,6 @@ IOReportLegend::initWith(OSArray *legend) void IOReportLegend::free(void) { - if (_reportLegend) { - _reportLegend->release(); - } super::free(); } @@ -95,7 +93,7 @@ IOReportLegend::free(void) OSArray* IOReportLegend::getLegend(void) { - return _reportLegend; + return _reportLegend.get(); } IOReturn @@ -105,8 +103,8 @@ IOReportLegend::addReporterLegend(IOService *reportingService, const char *subGroupName) { IOReturn res = kIOReturnError; - IOReportLegend *legend = NULL; - OSObject *curLegend = NULL; + OSSharedPtr legend; + OSSharedPtr curLegend; // No need to check groupName and subGroupName because optional params if (!reportingService || !reporter) { @@ -117,7 +115,7 @@ IOReportLegend::addReporterLegend(IOService *reportingService, // is how you make an empty legend). If it's not an array, then // we're just going to replace it. curLegend = reportingService->copyProperty(kIOReportLegendKey); - legend = IOReportLegend::with(OSDynamicCast(OSArray, curLegend)); + legend = IOReportLegend::with(OSDynamicCast(OSArray, curLegend.get())); if (!legend) { goto finish; } @@ -131,13 +129,6 @@ IOReportLegend::addReporterLegend(IOService *reportingService, res = kIOReturnSuccess; finish: - if (legend) { - legend->release(); - } - if (curLegend) { - curLegend->release(); - } - return res; } @@ -148,11 +139,11 @@ IOReportLegend::addLegendEntry(IOReportLegendEntry *legendEntry, const char *subGroupName) { kern_return_t res = kIOReturnError; - const OSSymbol *tmpGroupName = NULL; - const OSSymbol *tmpSubGroupName = NULL; + OSSharedPtr tmpGroupName; + OSSharedPtr tmpSubGroupName; if (!legendEntry) { - goto finish; + return res; } if (groupName) { @@ -164,18 +155,8 @@ IOReportLegend::addLegendEntry(IOReportLegendEntry *legendEntry, } // It is ok to call appendLegendWith() if tmpGroups are NULL - if (legendEntry) { - res = organizeLegend(legendEntry, tmpGroupName, tmpSubGroupName); + res = organizeLegend(legendEntry, tmpGroupName.get(), tmpSubGroupName.get()); - if (tmpGroupName) { - tmpGroupName->release(); - } - if (tmpSubGroupName) { - tmpSubGroupName->release(); - } - } - -finish: return res; } @@ -186,14 +167,13 @@ IOReportLegend::addReporterLegend(IOReporter *reporter, const char *subGroupName) { IOReturn res = kIOReturnError; - IOReportLegendEntry *legendEntry = NULL; + OSSharedPtr legendEntry; if (reporter) { legendEntry = reporter->createLegend(); if (legendEntry) { - res = addLegendEntry(legendEntry, groupName, subGroupName); - legendEntry->release(); + res = addLegendEntry(legendEntry.get(), groupName, subGroupName); } } @@ -206,14 +186,12 @@ IOReportLegend::organizeLegend(IOReportLegendEntry *legendEntry, const OSSymbol *groupName, const OSSymbol *subGroupName) { - IOReturn res = kIOReturnError; - if (!legendEntry) { - return res = kIOReturnBadArgument; + return kIOReturnBadArgument; } if (!groupName && subGroupName) { - return res = kIOReturnBadArgument; + return kIOReturnBadArgument; } IORLEGENDLOG("IOReportLegend::organizeLegend"); @@ -239,5 +217,5 @@ IOReportLegend::organizeLegend(IOReportLegendEntry *legendEntry, // callers can now safely release legendEntry (it is part of _reportLegend) - return res = kIOReturnSuccess; + return kIOReturnSuccess; } diff --git a/iokit/Kernel/IOReporter.cpp b/iokit/Kernel/IOReporter.cpp index 7d2bf3268..ddc91b89e 100644 --- a/iokit/Kernel/IOReporter.cpp +++ b/iokit/Kernel/IOReporter.cpp @@ -26,6 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include #include "IOReporterDefs.h" @@ -36,8 +38,7 @@ #define super OSObject OSDefineMetaClassAndStructors(IOReporter, OSObject); -// be careful to retain and release as necessary -static const OSSymbol *gIOReportNoChannelName = OSSymbol::withCString("_NO_NAME_4"); +static OSSharedPtr gIOReportNoChannelName; // * We might someday want an IOReportManager (vs. these static funcs) @@ -52,7 +53,7 @@ IOReporter::configureAllReports(OSSet *reporters, void *destination) { IOReturn rval = kIOReturnError; - OSCollectionIterator *iterator = NULL; + OSSharedPtr iterator; if (reporters == NULL || channelList == NULL || result == NULL) { rval = kIOReturnBadArgument; @@ -91,10 +92,6 @@ IOReporter::configureAllReports(OSSet *reporters, rval = kIOReturnSuccess; finish: - if (iterator) { - iterator->release(); - } - return rval; } @@ -107,7 +104,7 @@ IOReporter::updateAllReports(OSSet *reporters, void *destination) { IOReturn rval = kIOReturnError; - OSCollectionIterator *iterator = NULL; + OSSharedPtr iterator; if (reporters == NULL || channelList == NULL || @@ -146,10 +143,6 @@ IOReporter::updateAllReports(OSSet *reporters, rval = kIOReturnSuccess; finish: - if (iterator) { - iterator->release(); - } - return rval; } @@ -170,7 +163,7 @@ IOReporter::init(IOService *reportingService, _configLock = NULL; _elements = NULL; _enableCounts = NULL; - _channelNames = NULL; + _channelNames = nullptr; if (channelType.report_format == kIOReportInvalidFormat) { IORLOG("init ERROR: Channel Type ill-defined"); @@ -187,6 +180,9 @@ IOReporter::init(IOService *reportingService, return false; } + if (channelType.nelements > INT16_MAX) { + return false; + } _channelDimension = channelType.nelements; _channelType = channelType; // FIXME: need to look up dynamically @@ -234,13 +230,17 @@ finish: /*** PUBLIC METHODS ***/ /*******************************/ +void +IOReporter::initialize(void) +{ + gIOReportNoChannelName = OSSymbol::withCString("_NO_NAME_4"); +} + // init() [possibly via init*()] must be called before free() // to ensure that _ = NULL void IOReporter::free(void) { - OSSafeReleaseNULL(_channelNames); - if (_configLock) { IOLockFree(_configLock); } @@ -274,7 +274,7 @@ IOReporter::addChannel(uint64_t channelID, const char *channelName /* = NULL */) { IOReturn res = kIOReturnError, kerr; - const OSSymbol *symChannelName = NULL; + OSSharedPtr symChannelName; int oldNChannels, newNChannels = 0, freeNChannels = 0; IORLOG("IOReporter::addChannel %llx", channelID); @@ -306,7 +306,6 @@ IOReporter::addChannel(uint64_t channelID, } else { // grab a reference to our shared global symChannelName = gIOReportNoChannelName; - symChannelName->retain(); } // allocate new buffers into _swap* variables @@ -318,7 +317,7 @@ IOReporter::addChannel(uint64_t channelID, // exchange main and _swap* buffers with buffer contents protected // IOReporter::handleAddChannelSwap() also increments _nElements, etc lockReporter(); - res = handleAddChannelSwap(channelID, symChannelName); + res = handleAddChannelSwap(channelID, symChannelName.get()); unlockReporter(); // On failure, handleAddChannelSwap() leaves *new* buffers in _swap*. // On success, it's the old buffers, so we put the right size in here. @@ -329,19 +328,17 @@ IOReporter::addChannel(uint64_t channelID, finish: // free up not-in-use buffers (tracked by _swap*) handleSwapCleanup(freeNChannels); - if (symChannelName) { - symChannelName->release(); - } + unlockReporterConfig(); return res; } -IOReportLegendEntry* +OSSharedPtr IOReporter::createLegend(void) { - IOReportLegendEntry *legendEntry = NULL; + OSSharedPtr legendEntry; lockReporterConfig(); @@ -543,7 +540,7 @@ IOReporter::handleAddChannelSwap(uint64_t channel_id, _elements[_nElements + cnt].channel_id = channel_id; _elements[_nElements + cnt].provider_id = _driver_id; _elements[_nElements + cnt].channel_type = _channelType; - _elements[_nElements + cnt].channel_type.element_idx = cnt; + _elements[_nElements + cnt].channel_type.element_idx = ((int16_t) cnt); //IOREPORTER_DEBUG_ELEMENT(_swapNElements + cnt); } @@ -731,17 +728,16 @@ finish: } -IOReportLegendEntry* +OSSharedPtr IOReporter::handleCreateLegend(void) { - IOReportLegendEntry *legendEntry = NULL; - OSArray *channelIDs; + OSSharedPtr legendEntry = nullptr; + OSSharedPtr channelIDs; channelIDs = copyChannelIDs(); if (channelIDs) { - legendEntry = IOReporter::legendWith(channelIDs, _channelNames, _channelType, _unit); - channelIDs->release(); + legendEntry = IOReporter::legendWith(channelIDs.get(), _channelNames.get(), _channelType, _unit); } return legendEntry; @@ -970,17 +966,17 @@ finish: // copyChannelIDs relies on the caller to take lock -OSArray* +OSSharedPtr IOReporter::copyChannelIDs() { int cnt, cnt2; - OSArray *channelIDs = NULL; - OSNumber *tmpNum; + OSSharedPtr channelIDs; + OSSharedPtr tmpNum; channelIDs = OSArray::withCapacity((unsigned)_nChannels); if (!channelIDs) { - goto finish; + return nullptr; } for (cnt = 0; cnt < _nChannels; cnt++) { @@ -990,22 +986,19 @@ IOReporter::copyChannelIDs() tmpNum = OSNumber::withNumber(_elements[cnt2].channel_id, 64); if (!tmpNum) { IORLOG("ERROR: Could not create array of channelIDs"); - channelIDs->release(); - channelIDs = NULL; - goto finish; + return nullptr; } - channelIDs->setObject((unsigned)cnt, tmpNum); - tmpNum->release(); + channelIDs->setObject((unsigned)cnt, tmpNum.get()); + tmpNum.reset(); } -finish: return channelIDs; } // DO NOT REMOVE THIS METHOD WHICH IS THE MAIN LEGEND CREATION FUNCTION -/*static */ IOReportLegendEntry* +/*static */ OSSharedPtr IOReporter::legendWith(OSArray *channelIDs, OSArray *channelNames, IOReportChannelType channelType, @@ -1013,11 +1006,12 @@ IOReporter::legendWith(OSArray *channelIDs, { unsigned int cnt, chCnt; uint64_t type64; - OSNumber *tmpNum; + OSSharedPtr tmpNum; const OSSymbol *tmpSymbol; - OSArray *channelLegendArray = NULL, *tmpChannelArray = NULL; - OSDictionary *channelInfoDict = NULL; - IOReportLegendEntry *legendEntry = NULL; + OSSharedPtr channelLegendArray; + OSSharedPtr tmpChannelArray; + OSSharedPtr channelInfoDict; + OSSharedPtr legendEntry = nullptr; // No need to check validity of channelNames because param is optional if (!channelIDs) { @@ -1039,8 +1033,8 @@ IOReporter::legendWith(OSArray *channelIDs, if (!tmpNum) { goto finish; } - tmpChannelArray->setObject(kIOReportChannelTypeIdx, tmpNum); - tmpNum->release(); + tmpChannelArray->setObject(kIOReportChannelTypeIdx, tmpNum.get()); + tmpNum.reset(); // Encapsulate the Channel Name in OSSymbol // Use channelNames if provided @@ -1051,9 +1045,8 @@ IOReporter::legendWith(OSArray *channelIDs, } // Else, skip and leave name field empty } - channelLegendArray->setObject(cnt, tmpChannelArray); - tmpChannelArray->release(); - tmpChannelArray = NULL; + channelLegendArray->setObject(cnt, tmpChannelArray.get()); + tmpChannelArray.reset(); } // Stuff the legend entry only if we have channels... @@ -1066,28 +1059,17 @@ IOReporter::legendWith(OSArray *channelIDs, tmpNum = OSNumber::withNumber(unit, 64); if (tmpNum) { - channelInfoDict->setObject(kIOReportLegendUnitKey, tmpNum); - tmpNum->release(); + channelInfoDict->setObject(kIOReportLegendUnitKey, tmpNum.get()); } legendEntry = OSDictionary::withCapacity(1); if (legendEntry) { - legendEntry->setObject(kIOReportLegendChannelsKey, channelLegendArray); - legendEntry->setObject(kIOReportLegendInfoKey, channelInfoDict); + legendEntry->setObject(kIOReportLegendChannelsKey, channelLegendArray.get()); + legendEntry->setObject(kIOReportLegendInfoKey, channelInfoDict.get()); } } finish: - if (tmpChannelArray) { - tmpChannelArray->release(); - } - if (channelInfoDict) { - channelInfoDict->release(); - } - if (channelLegendArray) { - channelLegendArray->release(); - } - return legendEntry; } diff --git a/iokit/Kernel/IOService.cpp b/iokit/Kernel/IOService.cpp index 3189e590c..c99de8858 100644 --- a/iokit/Kernel/IOService.cpp +++ b/iokit/Kernel/IOService.cpp @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -103,6 +104,8 @@ OSDefineMetaClassAndStructors(_IOOpenServiceIterator, OSIterator) OSDefineMetaClassAndAbstractStructors(IONotifier, OSObject) +OSDefineMetaClassAndStructors(IOServiceCompatibility, IOService) + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static IOPlatformExpert * gIOPlatform; @@ -136,9 +139,13 @@ const OSSymbol * gIORematchCountKey; const OSSymbol * gIODEXTMatchCountKey; const OSSymbol * gIOSupportedPropertiesKey; const OSSymbol * gIOUserServicePropertiesKey; -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) const OSSymbol * gIOServiceLegacyMatchingRegistryIDKey; -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ + +const OSSymbol * gIOCompatibilityMatchKey; +const OSSymbol * gIOCompatibilityPropertiesKey; +const OSSymbol * gIOPathKey; const OSSymbol * gIOMapperIDKey; const OSSymbol * gIOUserClientClassKey; @@ -147,7 +154,6 @@ const OSSymbol * gIOUserClassKey; const OSSymbol * gIOUserServerClassKey; const OSSymbol * gIOUserServerNameKey; const OSSymbol * gIOUserServerTagKey; -const OSSymbol * gIOUserServerCDHashKey; const OSSymbol * gIOUserUserClientKey; const OSSymbol * gIOKitDebugKey; @@ -187,6 +193,7 @@ const OSSymbol * gIODriverKitEntitlementKey; const OSSymbol * gIODriverKitUserClientEntitlementsKey; const OSSymbol * gIODriverKitUserClientEntitlementAllowAnyKey; const OSSymbol * gIOMatchDeferKey; +const OSSymbol * gIOAllCPUInitializedKey; const OSSymbol * gIOGeneralInterest; const OSSymbol * gIOBusyInterest; @@ -217,10 +224,13 @@ static semaphore_port_t gJobsSemaphore; static IOLock * gJobsLock; static int gOutstandingJobs; static int gNumConfigThreads; +static int gHighNumConfigThreads; +static int gMaxConfigThreads = kMaxConfigThreads; static int gNumWaitingThreads; static IOLock * gIOServiceBusyLock; bool gCPUsRunning; -bool gKextdWillTerminate; +bool gIOKitWillTerminate; +bool gInUserspaceReboot; static thread_t gIOTerminateThread; static thread_t gIOTerminateWorkerThread; @@ -248,6 +258,10 @@ static IONotifier * gIOServiceNullNotifier; static uint32_t gIODextRelaunchMax = 1000; +#if DEVELOPMENT || DEBUG +uint64_t driverkit_checkin_timed_out = 0; +#endif + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define LOCKREADNOTIFY() \ @@ -311,8 +325,6 @@ IOService::isInactive( void ) const /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#if defined(__i386__) || defined(__x86_64__) - // Only used by the intel implementation of // IOService::requireMaxBusStall(UInt32 ns) // IOService::requireMaxInterruptDelay(uint32_t ns) @@ -323,7 +335,10 @@ struct CpuDelayEntry { }; enum { - kCpuDelayBusStall, kCpuDelayInterrupt, + kCpuDelayBusStall, +#if defined(__x86_64__) + kCpuDelayInterrupt, +#endif /* defined(__x86_64__) */ kCpuNumDelayTypes }; @@ -340,10 +355,10 @@ requireMaxCpuDelay(IOService * service, UInt32 ns, UInt32 delayType); static IOReturn setLatencyHandler(UInt32 delayType, IOService * target, bool enable); -#endif /* defined(__i386__) || defined(__x86_64__) */ - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +static IOMessage sSystemPower; + namespace IOServicePH { IONotifier * fRootNotifier; @@ -404,12 +419,13 @@ IOService::initialize( void ) gIODEXTMatchCountKey = OSSymbol::withCStringNoCopy( kIODEXTMatchCountKey ); -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) gIOServiceLegacyMatchingRegistryIDKey = OSSymbol::withCStringNoCopy( kIOServiceLegacyMatchingRegistryIDKey ); -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ PE_parse_boot_argn("dextrelaunch", &gIODextRelaunchMax, sizeof(gIODextRelaunchMax)); + PE_parse_boot_argn("iocthreads", &gMaxConfigThreads, sizeof(gMaxConfigThreads)); gIOUserClientClassKey = OSSymbol::withCStringNoCopy( kIOUserClientClassKey ); @@ -418,7 +434,6 @@ IOService::initialize( void ) gIOUserServerClassKey = OSSymbol::withCStringNoCopy(kIOUserServerClassKey); gIOUserServerNameKey = OSSymbol::withCStringNoCopy(kIOUserServerNameKey); gIOUserServerTagKey = OSSymbol::withCStringNoCopy(kIOUserServerTagKey); - gIOUserServerCDHashKey = OSSymbol::withCStringNoCopy(kIOUserServerCDHashKey); gIOUserUserClientKey = OSSymbol::withCStringNoCopy(kIOUserUserClientKey); gIOResourcesKey = OSSymbol::withCStringNoCopy( kIOResourcesClass ); @@ -432,6 +447,9 @@ IOService::initialize( void ) gIOInterruptSpecifiersKey = OSSymbol::withCStringNoCopy("IOInterruptSpecifiers"); + gIOCompatibilityMatchKey = OSSymbol::withCStringNoCopy(kIOCompatibilityMatchKey); + gIOCompatibilityPropertiesKey = OSSymbol::withCStringNoCopy(kIOCompatibilityPropertiesKey); + gIOPathKey = OSSymbol::withCStringNoCopy(kIOPathKey); gIOSupportedPropertiesKey = OSSymbol::withCStringNoCopy(kIOSupportedPropertiesKey); gIOUserServicePropertiesKey = OSSymbol::withCStringNoCopy(kIOUserServicePropertiesKey); @@ -488,19 +506,24 @@ IOService::initialize( void ) gIODriverKitUserClientEntitlementsKey = OSSymbol::withCStringNoCopy( kIODriverKitUserClientEntitlementsKey ); gIODriverKitUserClientEntitlementAllowAnyKey = OSSymbol::withCStringNoCopy( kIODriverKitUserClientEntitlementAllowAnyKey ); gIOMatchDeferKey = OSSymbol::withCStringNoCopy( kIOMatchDeferKey ); + gIOAllCPUInitializedKey = OSSymbol::withCStringNoCopy( kIOAllCPUInitializedKey ); gIOPlatformFunctionHandlerSet = OSSymbol::withCStringNoCopy(kIOPlatformFunctionHandlerSet); -#if defined(__i386__) || defined(__x86_64__) sCPULatencyFunctionName[kCpuDelayBusStall] = OSSymbol::withCStringNoCopy(kIOPlatformFunctionHandlerMaxBusDelay); +#if defined(__x86_64__) sCPULatencyFunctionName[kCpuDelayInterrupt] = OSSymbol::withCStringNoCopy(kIOPlatformFunctionHandlerMaxInterruptDelay); +#endif /* defined(__x86_64__) */ uint32_t idx; for (idx = 0; idx < kCpuNumDelayTypes; idx++) { - sCPULatencySet[idx] = OSNumber::withNumber(-1U, 32); + sCPULatencySet[idx] = OSNumber::withNumber(UINT_MAX, 32); sCPULatencyHolder[idx] = OSNumber::withNumber(0ULL, 64); assert(sCPULatencySet[idx] && sCPULatencyHolder[idx]); } + +#if defined(__x86_64__) gIOCreateEFIDevicePathSymbol = OSSymbol::withCString("CreateEFIDevicePath"); -#endif +#endif /* defined(__x86_64__) */ + gNotificationLock = IORecursiveLockAlloc(); gAKSGetKey = OSSymbol::withCStringNoCopy(AKS_PLATFORM_FUNCTION_GETKEY); @@ -563,7 +586,7 @@ IOService::initialize( void ) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#if defined(__i386__) || defined(__x86_64__) +#if defined(__x86_64__) extern "C" { const char *getCpuDelayBusStallHolderName(void); const char * @@ -579,7 +602,9 @@ getCpuInterruptDelayHolderName(void) return sCPULatencyHolderName[kCpuDelayInterrupt]; } } -#endif +#endif /* defined(__x86_64__) */ + + #if IOMATCHDEBUG static UInt64 @@ -727,7 +752,9 @@ IOService::free( void ) { int i = 0; requireMaxBusStall(0); +#if defined(__x86_64__) requireMaxInterruptDelay(0); +#endif /* defined(__x86_64__) */ if (getPropertyTable()) { unregisterAllInterest(); } @@ -952,6 +979,7 @@ IOService::registerService( IOOptionBits options ) } IOInstallServicePlatformActions(this); + IOInstallServiceSleepPlatformActions(this); if ((this != gIOResources) && (kIOLogRegister & gIOKitDebug)) { @@ -1096,7 +1124,7 @@ IOService::startDeferredMatches(void) } void -IOService::kextdLaunched(void) +IOService::iokitDaemonLaunched(void) { #if !NO_KEXTD IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0); @@ -1379,17 +1407,18 @@ IOService::callPlatformFunction( const OSSymbol * functionName, } if (gIOPlatformFunctionHandlerSet == functionName) { -#if defined(__i386__) || defined(__x86_64__) const OSSymbol * functionHandlerName = (const OSSymbol *) param1; IOService * target = (IOService *) param2; bool enable = (param3 != NULL); if (sCPULatencyFunctionName[kCpuDelayBusStall] == functionHandlerName) { result = setLatencyHandler(kCpuDelayBusStall, target, enable); - } else if (sCPULatencyFunctionName[kCpuDelayInterrupt] == param1) { + } +#if defined(__x86_64__) + else if (sCPULatencyFunctionName[kCpuDelayInterrupt] == param1) { result = setLatencyHandler(kCpuDelayInterrupt, target, enable); } -#endif /* defined(__i386__) || defined(__x86_64__) */ +#endif /* defined(__x86_64__) */ } if ((kIOReturnUnsupported == result) && (provider = getProvider())) { @@ -1448,10 +1477,12 @@ IOService::setPlatform( IOPlatformExpert * platform) gIOResources->attachToParent( gIOServiceRoot, gIOServicePlane ); gIOUserResources->attachToParent( gIOServiceRoot, gIOServicePlane ); -#if defined(__i386__) || defined(__x86_64__) - static const char * keys[kCpuNumDelayTypes] = { - kIOPlatformMaxBusDelay, kIOPlatformMaxInterruptDelay }; + kIOPlatformMaxBusDelay, +#if defined(__x86_64__) + kIOPlatformMaxInterruptDelay +#endif /* defined(__x86_64__) */ + }; const OSObject * objs[2]; OSArray * array; uint32_t idx; @@ -1466,7 +1497,6 @@ IOService::setPlatform( IOPlatformExpert * platform) platform->setProperty(keys[idx], array); array->release(); } -#endif /* defined(__i386__) || defined(__x86_64__) */ } void @@ -1881,6 +1911,25 @@ IOService::applyToClients( IOServiceApplierFunction applier, } +static void +IOServiceApplierToBlock(IOService * next, void * context) +{ + IOServiceApplierBlock block = (IOServiceApplierBlock) context; + block(next); +} + +void +IOService::applyToProviders(IOServiceApplierBlock applier) +{ + applyToProviders(&IOServiceApplierToBlock, applier); +} + +void +IOService::applyToClients(IOServiceApplierBlock applier) +{ + applyToClients(&IOServiceApplierToBlock, applier); +} + /* * Client messages */ @@ -2670,7 +2719,7 @@ IOService::terminateThread( void * arg, wait_result_t waitResult ) } while (gIOTerminateWork) { - terminateWorker((uintptr_t)arg ); + terminateWorker((IOOptionBits)(uintptr_t)arg ); } gIOTerminateThread = NULL; @@ -3428,16 +3477,19 @@ IONotifyOrdering( const OSMetaClassBase * inObj1, const OSMetaClassBase * inObj2 val1 = 0; val2 = 0; - if (obj1) { val1 = obj1->priority; } - if (obj2) { val2 = obj2->priority; } - - return val1 - val2; + if (val1 > val2) { + return 1; + } + if (val1 < val2) { + return -1; + } + return 0; } static SInt32 @@ -3596,7 +3648,6 @@ IOService::invokeNotifiers(OSArray * willSend[]) return ret; } - /* * Alloc and probe matching classes, * called on the provider instance @@ -3918,7 +3969,7 @@ IOService::probeCandidates( OSOrderedSet * matches ) #if !NO_KEXTD IOLockLock(gJobsLock); matchDeferred = (gIOMatchDeferList - && (kOSBooleanTrue == inst->getProperty(gIOMatchDeferKey))); + && (kOSBooleanTrue == inst->getProperty(gIOMatchDeferKey) || gInUserspaceReboot)); if (matchDeferred && (-1U == gIOMatchDeferList->getNextIndexOfObject(this, 0))) { gIOMatchDeferList->setObject(this); } @@ -3928,7 +3979,7 @@ IOService::probeCandidates( OSOrderedSet * matches ) IOLog("%s(0x%qx): matching deferred by %s\n", getName(), getRegistryEntryID(), symbol ? symbol->getCStringNoCopy() : ""); - // rematching will occur after kextd loads all plists + // rematching will occur after the IOKit daemon loads all plists } #endif if (!matchDeferred) { @@ -4009,23 +4060,45 @@ IOService::probeCandidates( OSOrderedSet * matches ) static __attribute__((noinline, not_tail_called)) IOService * -__WAITING_FOR_USER_SERVER__(OSDictionary * matching) +__WAITING_FOR_USER_SERVER__(OSDictionary * matching, IOUserServerCheckInToken * token) { IOService * server; - server = IOService::waitForMatchingService(matching, kIOUserServerCheckInTimeoutSecs * NSEC_PER_SEC); + server = IOService::waitForMatchingServiceWithToken(matching, kIOUserServerCheckInTimeoutSecs * NSEC_PER_SEC, token); return server; } void IOService::willShutdown() { - gKextdWillTerminate = true; + gIOKitWillTerminate = true; #if !NO_KEXTD getPlatform()->waitQuiet(30 * NSEC_PER_SEC); #endif OSKext::willShutdown(); } +void +IOService::userSpaceWillReboot() +{ + IOLockLock(gJobsLock); +#if !NO_KEXTD + // Recreate the defer list if it does not exist + if (!gIOMatchDeferList) { + gIOMatchDeferList = OSArray::withCapacity( 16 ); + } +#endif + gInUserspaceReboot = true; + IOLockUnlock(gJobsLock); +} + +void +IOService::userSpaceDidReboot() +{ + IOLockLock(gJobsLock); + gInUserspaceReboot = false; + IOLockUnlock(gJobsLock); +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ void @@ -4202,6 +4275,19 @@ IOServicePH::matchingEnd(IOService * service) serverAck(NULL); } +bool +IOServicePH::serverSlept(void) +{ + bool ret; + + lock(); + ret = (kIOMessageSystemWillSleep == sSystemPower) + || (kIOMessageSystemPagingOff == sSystemPower); + unlock(); + + return ret; +} + IOReturn IOServicePH::systemPowerChange( void * target, @@ -4310,14 +4396,16 @@ IOService::startCandidate( IOService * service ) IOService * server; OSNumber * serverTag; uint64_t entryID; + IOUserServerCheckInToken * token; if ((serverName = OSDynamicCast(OSString, obj))) { obj = service->copyProperty(gIOModuleIdentifierKey); bundleID = OSDynamicCast(OSString, obj); entryID = service->getRegistryEntryID(); serverTag = OSNumber::withNumber(entryID, 64); + token = NULL; - if (gKextdWillTerminate) { + if (gIOKitWillTerminate) { DKLOG("%s disabled in shutdown\n", serverName->getCStringNoCopy()); service->detach(this); OSSafeReleaseNULL(obj); @@ -4340,7 +4428,13 @@ IOService::startCandidate( IOService * service ) OSSafeReleaseNULL(prop); if (!(kIODKDisableDextLaunch & gIODKDebug)) { - OSKext::requestDaemonLaunch(bundleID, serverName, serverTag); + OSKext::requestDaemonLaunch(bundleID, serverName, serverTag, &token); + } + if (!token) { + DKLOG("%s failed to create check in token\n", serverName->getCStringNoCopy()); + service->detach(this); + OSSafeReleaseNULL(obj); + return false; } sym = OSSymbol::withString(serverName); matching = serviceMatching(gIOUserServerClassKey); @@ -4349,28 +4443,37 @@ IOService::startCandidate( IOService * service ) propertyMatching(gIOUserServerTagKey, serverTag, matching); } - server = __WAITING_FOR_USER_SERVER__(matching); + server = __WAITING_FOR_USER_SERVER__(matching, token); matching->release(); OSSafeReleaseNULL(serverTag); OSSafeReleaseNULL(serverName); userServer = OSDynamicCast(IOUserServer, server); if (!userServer) { + token->release(); service->detach(this); IOServicePH::matchingEnd(this); + OSSafeReleaseNULL(obj); DKLOG(DKS " user server timeout\n", DKN(service)); +#if DEVELOPMENT || DEBUG + driverkit_checkin_timed_out = mach_absolute_time(); +#endif return false; } - if (!(kIODKDisableCDHashChecking & gIODKDebug)) { - if (!userServer->serviceMatchesCDHash(service)) { + if (!(kIODKDisableCheckInTokenVerification & gIODKDebug)) { + if (!userServer->serviceMatchesCheckInToken(token)) { + token->release(); service->detach(this); IOServicePH::matchingEnd(this); - userServer->exit("CDHash check failed"); + OSSafeReleaseNULL(obj); + userServer->exit("Check In Token verification failed"); userServer->release(); return false; } } + token->release(); + OSKext *kext = OSKext::lookupKextWithIdentifier(bundleID); if (!kext) { const char *name = bundleID->getCStringNoCopy(); @@ -4427,6 +4530,11 @@ skip_log: userServer->serviceStarted(service, this, ok); userServer->release(); } + + if (ok) { + IOInstallServiceSleepPlatformActions(service); + } + if (!ok) { service->detach( this ); } @@ -4562,6 +4670,7 @@ IOService::checkResources( void ) { OSObject * resourcesProp; OSSet * set; + OSObject * obj; OSIterator * iter; bool ok; @@ -4573,8 +4682,8 @@ IOService::checkResources( void ) if ((set = OSDynamicCast( OSSet, resourcesProp ))) { iter = OSCollectionIterator::withCollection( set ); ok = (NULL != iter); - while (ok && (resourcesProp = iter->getNextObject())) { - ok = checkResource( resourcesProp ); + while (ok && (obj = iter->getNextObject())) { + ok = checkResource( obj ); } if (iter) { iter->release(); @@ -4590,7 +4699,7 @@ IOService::checkResources( void ) void -_IOConfigThread::configThread( int configThreadId ) +_IOConfigThread::configThread( const char * name ) { _IOConfigThread * inst; @@ -4607,7 +4716,7 @@ _IOConfigThread::configThread( int configThreadId ) } char threadName[MAXTHREADNAMESIZE]; - snprintf(threadName, sizeof(threadName), "IOConfigThread_%d", configThreadId); + snprintf(threadName, sizeof(threadName), "IOConfigThread_'%s'", name); thread_set_thread_name(thread, threadName); thread_deallocate(thread); @@ -4904,8 +5013,8 @@ IOService::waitQuiet( uint64_t timeout ) size_t panicStringLen; uint64_t time; uint64_t nano; - bool kextdWait; - bool dopanic; + bool pendingRequests; + bool dopanic = false; #if KASAN /* @@ -4915,12 +5024,17 @@ IOService::waitQuiet( uint64_t timeout ) * kasan kexts loaded and started. */ enum { kTimeoutExtensions = 8 }; +#define WITH_IOWAITQUIET_EXTENSIONS 1 +#elif XNU_TARGET_OS_OSX && defined(__arm64__) + enum { kTimeoutExtensions = 1 }; +#define WITH_IOWAITQUIET_EXTENSIONS 0 #else enum { kTimeoutExtensions = 4 }; +#define WITH_IOWAITQUIET_EXTENSIONS 1 #endif time = mach_absolute_time(); - kextdWait = false; + pendingRequests = false; for (loops = 0; loops < kTimeoutExtensions; loops++) { ret = waitForState( kIOServiceBusyStateMask, 0, timeout ); @@ -4932,7 +5046,7 @@ IOService::waitQuiet( uint64_t timeout ) break; } else if (kIOReturnTimeout != ret) { break; - } else if (timeout < (4100ull * NSEC_PER_SEC)) { + } else if (timeout < (41ull * NSEC_PER_SEC)) { break; } @@ -4954,7 +5068,7 @@ IOService::waitQuiet( uint64_t timeout ) panicString = IONew(char, panicStringLen); } set = NULL; - kextdWait = OSKext::isWaitingKextd(); + pendingRequests = OSKext::pendingIOKitDaemonRequests(); iter = IORegistryIterator::iterateOver(this, gIOServicePlane, kIORegistryIterateRecursively); leaves = OSOrderedSet::withCapacity(4); if (iter) { @@ -4966,7 +5080,7 @@ IOService::waitQuiet( uint64_t timeout ) while ((next = (IOService *) set->getLastObject())) { if (next->getBusyState()) { if (kIOServiceModuleStallState & next->__state[1]) { - kextdWait = true; + pendingRequests = true; } leaves->setObject(next); nextParent = next; @@ -4993,10 +5107,13 @@ IOService::waitQuiet( uint64_t timeout ) OSSafeReleaseNULL(iter); } - dopanic = ((loops >= (kTimeoutExtensions - 1)) && (kIOWaitQuietPanics & gIOKitDebug)); + dopanic = (kIOWaitQuietPanics & gIOKitDebug); +#if WITH_IOWAITQUIET_EXTENSIONS + dopanic = (dopanic && (loops >= (kTimeoutExtensions - 1))); +#endif snprintf(panicString, panicStringLen, "%s[%d], (%llds): %s", - kextdWait ? "kextd stall" : "busy timeout", + pendingRequests ? "IOKit Daemon (" kIOKitDaemonName ") stall" : "busy timeout", loops, timeout / 1000000000ULL, string ? string : ""); IOLog("%s\n", panicString); @@ -5043,6 +5160,13 @@ IOService::serializeProperties( OSSerialize * s ) const return super::serializeProperties(s); } +void +IOService::resetRematchProperties() +{ + removeProperty(gIORematchCountKey); + removeProperty(gIORematchPersonalityKey); +} + void _IOConfigThread::main(void * arg, wait_result_t result) @@ -5169,24 +5293,33 @@ _IOServiceJob::pingConfig( _IOServiceJob * job ) { int count; bool create; + IOService * nub; assert( job ); + nub = job->nub; IOTakeLock( gJobsLock ); gOutstandingJobs++; - gJobs->setLastObject( job ); + if (nub == gIOResources) { + gJobs->setFirstObject( job ); + } else { + gJobs->setLastObject( job ); + } count = gNumWaitingThreads; // if( gNumConfigThreads) count++;// assume we're called from a config thread create = ((gOutstandingJobs > count) - && ((gNumConfigThreads < kMaxConfigThreads) - || (job->nub == gIOResources) + && ((gNumConfigThreads < gMaxConfigThreads) + || (nub == gIOResources) || !gCPUsRunning)); if (create) { gNumConfigThreads++; gNumWaitingThreads++; + if (gNumConfigThreads > gHighNumConfigThreads) { + gHighNumConfigThreads = gNumConfigThreads; + } } IOUnlock( gJobsLock ); @@ -5197,7 +5330,7 @@ _IOServiceJob::pingConfig( _IOServiceJob * job ) if (gIOKitDebug & kIOLogConfig) { LOG("config(%d): creating\n", gNumConfigThreads - 1); } - _IOConfigThread::configThread(gNumConfigThreads - 1); + _IOConfigThread::configThread(nub->getName()); } semaphore_signal( gJobsSemaphore ); @@ -5230,9 +5363,11 @@ IOService::instanceMatch(const OSObject * entry, void * context) if (!match) { break; } - ctx->count += table->getCount(); match = service->matchInternal(table, options, &done); - ctx->done += done; + if (match) { + ctx->count += table->getCount(); + ctx->done += done; + } }while (false); if (!match) { return false; @@ -5286,6 +5421,8 @@ IOService::copyExistingServices( OSDictionary * matching, } } else { IOServiceMatchContext ctx; + + options |= kIOServiceClassDone; ctx.table = matching; ctx.state = inState; ctx.count = 0; @@ -5301,13 +5438,15 @@ IOService::copyExistingServices( OSDictionary * matching, IOService::gMetaClass.applyToInstances(instanceMatch, &ctx); } + if (((!(options & kIONotifyOnce) || !ctx.result)) + && matching->getObject(gIOCompatibilityMatchKey)) { + IOServiceCompatibility::gMetaClass.applyToInstances(instanceMatch, &ctx); + } current = ctx.result; - - options |= kIOServiceInternalDone | kIOServiceClassDone; + options |= kIOServiceInternalDone; if (current && (ctx.done != ctx.count)) { - OSSet * - source = OSDynamicCast(OSSet, current); + OSSet * source = OSDynamicCast(OSSet, current); current = NULL; while ((service = (IOService *) source->getAnyObject())) { if (service->matchPassive(matching, options)) { @@ -5554,6 +5693,7 @@ IOService::installNotification(const OSSymbol * type, OSDictionary * matching, return result; } + #endif /* !defined(__LP64__) */ @@ -5675,6 +5815,15 @@ IOService::addMatchingNotification( return notify; } +void +IOService::userServerCheckInTokenNotificationHandler( + __unused IOUserServerCheckInToken *token, + void *ref) +{ + LOCKWRITENOTIFY(); + WAKEUPNOTIFY(ref); + UNLOCKNOTIFY(); +} bool IOService::syncNotificationHandler( @@ -5694,8 +5843,9 @@ IOService::syncNotificationHandler( } IOService * -IOService::waitForMatchingService( OSDictionary * matching, - uint64_t timeout) +IOService::waitForMatchingServiceWithToken( OSDictionary * matching, + uint64_t timeout, + IOUserServerCheckInToken * checkInToken) { IONotifier * notify = NULL; // priority doesn't help us much since we need a thread wakeup @@ -5708,8 +5858,48 @@ IOService::waitForMatchingService( OSDictionary * matching, result = NULL; +#if DEBUG || DEVELOPMENT + char currentName[MAXTHREADNAMESIZE]; + char newName[MAXTHREADNAMESIZE]; + OSObject * obj; + OSString * str; + OSDictionary * dict; + + currentName[0] = '\0'; + if (thread_has_thread_name(current_thread())) { + dict = matching; + obj = matching->getObject(gIOPropertyMatchKey); + if ((dict = OSDynamicCast(OSDictionary, obj))) { + OSObject * result __block = NULL; + dict->iterateObjects(^bool (const OSSymbol * sym, OSObject * value) { + result = __DECONST(OSObject *, sym); + return true; + }); + obj = result; + } + if (!obj) { + obj = matching->getObject(gIOResourceMatchKey); + } + if (!obj) { + obj = matching->getObject(gIONameMatchKey); + } + if (!obj) { + obj = matching->getObject(gIOProviderClassKey); + } + if ((str = OSDynamicCast(OSString, obj))) { + thread_get_thread_name(current_thread(), currentName); + snprintf(newName, sizeof(newName), "Waiting_'%s'", str->getCStringNoCopy()); + thread_set_thread_name(current_thread(), newName); + } + } +#endif /* DEBUG || DEVELOPMENT */ + LOCKWRITENOTIFY(); do{ + if (checkInToken) { + checkInToken->setNoSendersNotification(&IOService::userServerCheckInTokenNotificationHandler, + &result); + } result = (IOService *) copyExistingServices( matching, kIOServiceMatchedState, kIONotifyOnce ); if (result) { @@ -5733,12 +5923,30 @@ IOService::waitForMatchingService( OSDictionary * matching, UNLOCKNOTIFY(); +#if DEBUG || DEVELOPMENT + if (currentName[0]) { + thread_set_thread_name(current_thread(), currentName); + } +#endif /* DEBUG || DEVELOPMENT */ + if (notify) { notify->remove(); // dequeues } + + if (checkInToken) { + checkInToken->clearNotification(); + } + return result; } +IOService * +IOService::waitForMatchingService( OSDictionary * matching, + uint64_t timeout) +{ + return IOService::waitForMatchingServiceWithToken(matching, timeout, NULL); +} + IOService * IOService::waitForService( OSDictionary * matching, mach_timespec_t * timeout ) @@ -6269,14 +6477,13 @@ IOService::consoleLockTimer(thread_call_param_t p0, thread_call_param_t p1) } void -IOService::updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessage) +IOService::updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessage, bool afterUserspaceReboot) { IORegistryEntry * regEntry; OSObject * locked = kOSBooleanFalse; uint32_t idx; bool publish; OSDictionary * user; - static IOMessage sSystemPower; clock_sec_t now = 0; clock_usec_t microsecs; @@ -6329,7 +6536,7 @@ IOService::updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessage) } } #if HIBERNATION - if (!loginLocked) { + if (!loginLocked || afterUserspaceReboot) { gIOConsoleBooterLockState = NULL; } IOLog("IOConsoleUsers: time(%d) %ld->%d, lin %d, llk %d, \n", @@ -6342,7 +6549,14 @@ IOService::updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessage) if (!gIOConsoleLoggedIn || (kIOMessageSystemWillSleep == sSystemPower) || (kIOMessageSystemPagingOff == sSystemPower)) { - locked = kOSBooleanTrue; + if (afterUserspaceReboot) { + // set "locked" to false after a user space reboot + // because the reboot happens directly after a user + // logs into the machine via fvunlock mode. + locked = kOSBooleanFalse; + } else { + locked = kOSBooleanTrue; + } } #if HIBERNATION else if (gIOConsoleBooterLockState) { @@ -6353,7 +6567,15 @@ IOService::updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessage) clock_get_calendar_microtime(&now, µsecs); if (gIOConsoleLockTime > now) { AbsoluteTime deadline; - clock_interval_to_deadline(gIOConsoleLockTime - now, kSecondScale, &deadline); + clock_sec_t interval; + uint32_t interval32; + + interval = (gIOConsoleLockTime - now); + interval32 = (uint32_t) interval; + if (interval32 != interval) { + interval32 = UINT_MAX; + } + clock_interval_to_deadline(interval32, kSecondScale, &deadline); thread_call_enter_delayed(gIOConsoleLockCallout, deadline); } else { locked = kOSBooleanTrue; @@ -6410,9 +6632,11 @@ IOResources::setProperties( OSObject * properties ) OSDictionary * dict; OSCollectionIterator * iter; - err = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); - if (kIOReturnSuccess != err) { - return err; + if (!IOTaskHasEntitlement(current_task(), kIOResourcesSetPropertyKey)) { + err = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); + if (kIOReturnSuccess != err) { + return err; + } } dict = OSDynamicCast(OSDictionary, properties); @@ -6496,6 +6720,12 @@ IOService::compareProperty( OSDictionary * matching, return ok; } +#ifndef __clang_analyzer__ +// Implementation of this function is hidden from the static analyzer. +// The analyzer was worried about this function's confusing contract over +// the 'keys' parameter. The contract is to either release it or not release it +// depending on whether 'matching' is non-null. Such contracts are discouraged +// but changing it now would break compatibility. bool IOService::compareProperties( OSDictionary * matching, OSCollection * keys ) @@ -6521,6 +6751,7 @@ IOService::compareProperties( OSDictionary * matching, return ok; } +#endif // __clang_analyzer__ /* Helper to add a location matching dict to the table */ @@ -6569,6 +6800,7 @@ IOService::matchInternal(OSDictionary * table, uint32_t options, uint32_t * did) OSString * matched; OSObject * obj; OSString * str; + OSDictionary * matchProps; IORegistryEntry * entry; OSNumber * num; bool match = true; @@ -6579,11 +6811,26 @@ IOService::matchInternal(OSDictionary * table, uint32_t options, uint32_t * did) do{ count = table->getCount(); done = 0; + matchProps = NULL; + + if (table->getObject(gIOCompatibilityMatchKey)) { + done++; + obj = copyProperty(gIOCompatibilityPropertiesKey); + matchProps = OSDynamicCast(OSDictionary, obj); + if (!matchProps) { + OSSafeReleaseNULL(obj); + } + } str = OSDynamicCast(OSString, table->getObject(gIOProviderClassKey)); if (str) { done++; - match = ((kIOServiceClassDone & options) || (NULL != metaCast(str))); + if (matchProps && (obj = matchProps->getObject(gIOClassKey))) { + match = str->isEqualTo(obj); + } else { + match = ((kIOServiceClassDone & options) || (NULL != metaCast(str))); + } + #if MATCH_DEBUG match = (0 != metaCast( str )); if ((kIOServiceClassDone & options) && !match) { @@ -6629,13 +6876,14 @@ IOService::matchInternal(OSDictionary * table, uint32_t options, uint32_t * did) obj = table->getObject( gIOPropertyMatchKey ); if (obj) { - OSDictionary * dict; OSDictionary * nextDict; OSIterator * iter; done++; match = false; - dict = dictionaryWithProperties(); - if (dict) { + if (!matchProps) { + matchProps = dictionaryWithProperties(); + } + if (matchProps) { nextDict = OSDynamicCast( OSDictionary, obj); if (nextDict) { iter = NULL; @@ -6647,13 +6895,12 @@ IOService::matchInternal(OSDictionary * table, uint32_t options, uint32_t * did) while (nextDict || (iter && (NULL != (nextDict = OSDynamicCast(OSDictionary, iter->getNextObject()))))) { - match = dict->isEqualTo( nextDict, nextDict); + match = matchProps->isEqualTo( nextDict, nextDict); if (match) { break; } nextDict = NULL; } - dict->release(); if (iter) { iter->release(); } @@ -6665,13 +6912,14 @@ IOService::matchInternal(OSDictionary * table, uint32_t options, uint32_t * did) obj = table->getObject( gIOPropertyExistsMatchKey ); if (obj) { - OSDictionary * dict; OSString * nextKey; OSIterator * iter; done++; match = false; - dict = dictionaryWithProperties(); - if (dict) { + if (!matchProps) { + matchProps = dictionaryWithProperties(); + } + if (matchProps) { nextKey = OSDynamicCast( OSString, obj); if (nextKey) { iter = NULL; @@ -6683,13 +6931,12 @@ IOService::matchInternal(OSDictionary * table, uint32_t options, uint32_t * did) while (nextKey || (iter && (NULL != (nextKey = OSDynamicCast(OSString, iter->getNextObject()))))) { - match = (NULL != dict->getObject(nextKey)); + match = (NULL != matchProps->getObject(nextKey)); if (match) { break; } nextKey = NULL; } - dict->release(); if (iter) { iter->release(); } @@ -6707,6 +6954,9 @@ IOService::matchInternal(OSDictionary * table, uint32_t options, uint32_t * did) if (entry) { entry->release(); } + if (!match && matchProps && (obj = matchProps->getObject(gIOPathKey))) { + match = str->isEqualTo(obj); + } if ((!match) || (done == count)) { break; } @@ -6765,6 +7015,8 @@ IOService::matchInternal(OSDictionary * table, uint32_t options, uint32_t * did) #undef propMatch }while (false); + OSSafeReleaseNULL(matchProps); + if (did) { *did = done; } @@ -6791,10 +7043,10 @@ IOService::matchPassive(OSDictionary * table, uint32_t options) assert( table ); -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) OSArray* aliasServiceRegIds = NULL; IOService* foundAlternateService = NULL; -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ #if MATCH_DEBUG OSDictionary * root = table; @@ -6867,7 +7119,7 @@ IOService::matchPassive(OSDictionary * table, uint32_t options) } if (matchParent == true) { -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) // check if service has an alias to search its other "parents" if a parent match isn't found OSObject * prop = where->copyProperty(gIOServiceLegacyMatchingRegistryIDKey); OSNumber * alternateRegistryID = OSDynamicCast(OSNumber, prop); @@ -6878,13 +7130,13 @@ IOService::matchPassive(OSDictionary * table, uint32_t options) aliasServiceRegIds->setObject(alternateRegistryID); } OSSafeReleaseNULL(prop); -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ } else { break; } where = where->getProvider(); -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) if (where == NULL) { // there were no matching parent services, check to see if there are aliased services that have a matching parent if (aliasServiceRegIds != NULL) { @@ -6906,13 +7158,13 @@ IOService::matchPassive(OSDictionary * table, uint32_t options) } } } -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ }while (where != NULL); -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) OSSafeReleaseNULL(foundAlternateService); OSSafeReleaseNULL(aliasServiceRegIds); -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ #if MATCH_DEBUG if (where != this) { @@ -6999,6 +7251,17 @@ IOService::newUserClient( task_t owningTask, void * securityID, return kIOReturnSuccess; } +IOReturn +IOService::newUserClient( task_t owningTask, void * securityID, + UInt32 type, OSDictionary * properties, + OSSharedPtr& handler ) +{ + IOUserClient* handlerRaw = NULL; + IOReturn result = newUserClient(owningTask, securityID, type, properties, &handlerRaw); + handler.reset(handlerRaw, OSNoRetain); + return result; +} + IOReturn IOService::newUserClient( task_t owningTask, void * securityID, UInt32 type, IOUserClient ** handler ) @@ -7006,12 +7269,29 @@ IOService::newUserClient( task_t owningTask, void * securityID, return kIOReturnUnsupported; } +IOReturn +IOService::newUserClient( task_t owningTask, void * securityID, + UInt32 type, OSSharedPtr& handler ) +{ + IOUserClient* handlerRaw = nullptr; + IOReturn result = IOService::newUserClient(owningTask, securityID, type, &handlerRaw); + handler.reset(handlerRaw, OSNoRetain); + return result; +} + + IOReturn IOService::requestProbe( IOOptionBits options ) { return kIOReturnUnsupported; } +bool +IOService::hasUserServer() const +{ + return reserved && reserved->uvars && reserved->uvars->userServer; +} + /* * Convert an IOReturn to text. Subclasses which add additional * IOReturn's should override this method and call @@ -7254,32 +7534,6 @@ IOService::setDeviceMemory( OSArray * array ) setProperty( gIODeviceMemoryKey, array); } -/* - * For machines where the transfers on an I/O bus can stall because - * the CPU is in an idle mode, These APIs allow a driver to specify - * the maximum bus stall that they can handle. 0 indicates no limit. - */ -void -IOService:: -setCPUSnoopDelay(UInt32 __unused ns) -{ -#if defined(__i386__) || defined(__x86_64__) - ml_set_maxsnoop(ns); -#endif /* defined(__i386__) || defined(__x86_64__) */ -} - -UInt32 -IOService:: -getCPUSnoopDelay() -{ -#if defined(__i386__) || defined(__x86_64__) - return ml_get_maxsnoop(); -#else - return 0; -#endif /* defined(__i386__) || defined(__x86_64__) */ -} - -#if defined(__i386__) || defined(__x86_64__) static void requireMaxCpuDelay(IOService * service, UInt32 ns, UInt32 delayType) { @@ -7352,10 +7606,15 @@ requireMaxCpuDelay(IOService * service, UInt32 ns, UInt32 delayType) // Must be safe to call from locked context if (delayType == kCpuDelayBusStall) { +#if defined(__x86_64__) ml_set_maxbusdelay(ns); - } else if (delayType == kCpuDelayInterrupt) { +#endif /* defined(__x86_64__) */ + } +#if defined(__x86_64__) + else if (delayType == kCpuDelayInterrupt) { ml_set_maxintdelay(ns); } +#endif /* defined(__x86_64__) */ sCPULatencyHolder[delayType]->setValue(holder ? holder->getRegistryEntryID() : 0); sCPULatencySet[delayType]->setValue(ns); @@ -7430,24 +7689,36 @@ setLatencyHandler(UInt32 delayType, IOService * target, bool enable) return result; } -#endif /* defined(__i386__) || defined(__x86_64__) */ - -void -IOService:: -requireMaxBusStall(UInt32 __unused ns) -{ -#if defined(__i386__) || defined(__x86_64__) +IOReturn +IOService::requireMaxBusStall(UInt32 ns) +{ +#if !defined(__x86_64__) + switch (ns) { + case kIOMaxBusStall40usec: + case kIOMaxBusStall30usec: + case kIOMaxBusStall25usec: + case kIOMaxBusStall20usec: + case kIOMaxBusStall10usec: + case kIOMaxBusStall5usec: + case kIOMaxBusStallNone: + break; + default: + return kIOReturnBadArgument; + } +#endif /* !defined(__x86_64__) */ requireMaxCpuDelay(this, ns, kCpuDelayBusStall); -#endif + return kIOReturnSuccess; } -void -IOService:: -requireMaxInterruptDelay(uint32_t __unused ns) +IOReturn +IOService::requireMaxInterruptDelay(uint32_t ns) { -#if defined(__i386__) || defined(__x86_64__) +#if defined(__x86_64__) requireMaxCpuDelay(this, ns, kCpuDelayInterrupt); -#endif + return kIOReturnSuccess; +#else /* defined(__x86_64__) */ + return kIOReturnUnsupported; +#endif /* defined(__x86_64__) */ } /* @@ -7461,7 +7732,7 @@ IOService::resolveInterrupt(IOService *nub, int source) OSArray *array; OSData *data; OSSymbol *interruptControllerName; - long numSources; + unsigned int numSources; IOInterruptSource *interruptSources; // Get the parents list from the nub. @@ -8014,8 +8285,8 @@ IOService::setAuthorizationID( uint64_t authorizationID ) #if __LP64__ -OSMetaClassDefineReservedUsed(IOService, 0); -OSMetaClassDefineReservedUsed(IOService, 1); +OSMetaClassDefineReservedUsedX86(IOService, 0); +OSMetaClassDefineReservedUsedX86(IOService, 1); OSMetaClassDefineReservedUnused(IOService, 2); OSMetaClassDefineReservedUnused(IOService, 3); OSMetaClassDefineReservedUnused(IOService, 4); @@ -8023,14 +8294,14 @@ OSMetaClassDefineReservedUnused(IOService, 5); OSMetaClassDefineReservedUnused(IOService, 6); OSMetaClassDefineReservedUnused(IOService, 7); #else -OSMetaClassDefineReservedUsed(IOService, 0); -OSMetaClassDefineReservedUsed(IOService, 1); -OSMetaClassDefineReservedUsed(IOService, 2); -OSMetaClassDefineReservedUsed(IOService, 3); -OSMetaClassDefineReservedUsed(IOService, 4); -OSMetaClassDefineReservedUsed(IOService, 5); -OSMetaClassDefineReservedUsed(IOService, 6); -OSMetaClassDefineReservedUsed(IOService, 7); +OSMetaClassDefineReservedUsedX86(IOService, 0); +OSMetaClassDefineReservedUsedX86(IOService, 1); +OSMetaClassDefineReservedUsedX86(IOService, 2); +OSMetaClassDefineReservedUsedX86(IOService, 3); +OSMetaClassDefineReservedUsedX86(IOService, 4); +OSMetaClassDefineReservedUsedX86(IOService, 5); +OSMetaClassDefineReservedUsedX86(IOService, 6); +OSMetaClassDefineReservedUsedX86(IOService, 7); #endif OSMetaClassDefineReservedUnused(IOService, 8); OSMetaClassDefineReservedUnused(IOService, 9); diff --git a/iokit/Kernel/IOServicePM.cpp b/iokit/Kernel/IOServicePM.cpp index 5ef79da20..7736e09cd 100644 --- a/iokit/Kernel/IOServicePM.cpp +++ b/iokit/Kernel/IOServicePM.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2016 Apple Inc. All rights reserved. + * Copyright (c) 1998-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -57,11 +58,13 @@ #include "IOServicePMPrivate.h" #include "IOKitKernelInternal.h" - +#if USE_SETTLE_TIMER static void settle_timer_expired(thread_call_param_t, thread_call_param_t); +#endif static void idle_timer_expired(thread_call_param_t, thread_call_param_t); static void tellKernelClientApplier(OSObject * object, void * arg); static void tellAppClientApplier(OSObject * object, void * arg); +static const char * getNotificationPhaseString(uint32_t phase); static uint64_t computeTimeDeltaNS( const AbsoluteTime * start ) @@ -96,6 +99,14 @@ static IOPMRequest * gIOPMRequest = NULL; static IOService * gIOPMRootNode = NULL; static IOPlatformExpert * gPlatform = NULL; +// log setPowerStates and powerStateChange longer than (ns): +static uint64_t gIOPMSetPowerStateLogNS = +#if defined(__i386__) || defined(__x86_64__) + (300ULL * 1000ULL * 1000ULL) +#else + (50ULL * 1000ULL * 1000ULL) +#endif +; const OSSymbol * gIOPMPowerClientDevice = NULL; const OSSymbol * gIOPMPowerClientDriver = NULL; @@ -118,17 +129,6 @@ getPMRequestType( void ) return type; } -static IOPMRequestTag -getPMRequestTag( void ) -{ - IOPMRequestTag tag = 0; - if (gIOPMRequest && - (gIOPMRequest->getType() == kIOPMRequestTypeRequestPowerStateOverride)) { - tag = gIOPMRequest->fRequestTag; - } - return tag; -} - SYSCTL_UINT(_kern, OID_AUTO, pmtimeout, CTLFLAG_RW | CTLFLAG_LOCKED, &gCanSleepTimeout, 0, "Power Management Timeout"); //****************************************************************************** @@ -172,11 +172,11 @@ do { \ #define ns_per_us 1000 #define k30Seconds (30*us_per_s) #define k5Seconds ( 5*us_per_s) -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) #define kCanSleepMaxTimeReq k5Seconds -#else +#else /* defined(XNU_TARGET_OS_OSX) */ #define kCanSleepMaxTimeReq k30Seconds -#endif +#endif /* defined(XNU_TARGET_OS_OSX) */ #define kMaxTimeRequested k30Seconds #define kMinAckTimeoutTicks (10*1000000) #define kIOPMTardyAckSPSKey "IOPMTardyAckSetPowerState" @@ -225,12 +225,6 @@ do { \ #define IS_POWER_DROP (StateOrder(fHeadNotePowerState) < StateOrder(fCurrentPowerState)) #define IS_POWER_RISE (StateOrder(fHeadNotePowerState) > StateOrder(fCurrentPowerState)) -// log setPowerStates longer than (ns): -#if defined(__i386__) || defined(__x86_64__) -#define LOG_SETPOWER_TIMES (300ULL * 1000ULL * 1000ULL) -#else -#define LOG_SETPOWER_TIMES (50ULL * 1000ULL * 1000ULL) -#endif // log app responses longer than (ns): #define LOG_APP_RESPONSE_TIMES (100ULL * 1000ULL * 1000ULL) // use message tracer to log messages longer than (ns): @@ -253,18 +247,17 @@ enum { fMachineState = fSavedMachineState; \ fSavedMachineState = kIOPM_BadMachineState; } while (false) -#define PM_ACTION_0(a) \ +#define PM_ACTION_TICKLE(a) \ do { if (fPMActions.a) { \ (fPMActions.a)(fPMActions.target, this, &fPMActions); } \ } while (false) -#define PM_ACTION_2(a, x, y) \ +#define PM_ACTION_CHANGE(a, x, y) \ do { if (fPMActions.a) { \ - (fPMActions.a)(fPMActions.target, this, &fPMActions, x, y, \ - getPMRequestTag()); } \ + (fPMActions.a)(fPMActions.target, this, &fPMActions, gIOPMRequest, x, y); } \ } while (false) -#define PM_ACTION_3(a, x, y, z) \ +#define PM_ACTION_CLIENT(a, x, y, z) \ do { if (fPMActions.a) { \ (fPMActions.a)(fPMActions.target, this, &fPMActions, x, y, z); } \ } while (false) @@ -400,6 +393,13 @@ IOService::PMinit( void ) if (gIOPMRequestQueue && gIOPMReplyQueue && gIOPMCompletionQueue) { gIOPMInitialized = true; } + +#if (DEVELOPMENT || DEBUG) + uint32_t setPowerStateLogMS = 0; + if (PE_parse_boot_argn("setpowerstate_log", &setPowerStateLogMS, sizeof(setPowerStateLogMS))) { + gIOPMSetPowerStateLogNS = setPowerStateLogMS * 1000000ULL; + } +#endif } if (!gIOPMInitialized) { return; @@ -461,8 +461,10 @@ IOService::PMinit( void ) fAckTimer = thread_call_allocate( &IOService::ack_timer_expired, (thread_call_param_t)this); +#if USE_SETTLE_TIMER fSettleTimer = thread_call_allocate( &settle_timer_expired, (thread_call_param_t)this); +#endif fIdleTimer = thread_call_allocate( &idle_timer_expired, (thread_call_param_t)this); fDriverCallEntry = thread_call_allocate( @@ -529,12 +531,13 @@ IOService::PMfree( void ) fBlockedArray->release(); fBlockedArray = NULL; } - +#if USE_SETTLE_TIMER if (fSettleTimer) { thread_call_cancel(fSettleTimer); thread_call_free(fSettleTimer); fSettleTimer = NULL; } +#endif if (fAckTimer) { thread_call_cancel(fAckTimer); thread_call_free(fAckTimer); @@ -574,6 +577,10 @@ IOService::PMfree( void ) fNotifyClientArray->release(); fNotifyClientArray = NULL; } + if (fReportBuf && fNumberOfPowerStates) { + IOFree(fReportBuf, STATEREPORT_BUFSIZE(fNumberOfPowerStates)); + fReportBuf = NULL; + } if (fPowerStates && fNumberOfPowerStates) { IODelete(fPowerStates, IOPMPSEntry, fNumberOfPowerStates); fNumberOfPowerStates = 0; @@ -929,8 +936,8 @@ IOService::addPowerChild2( IOPMRequest * request ) IOService * parent; IOPMPowerFlags powerFlags; bool knowsState; - unsigned long powerState; - unsigned long tempDesire; + IOPMPowerStateIndex powerState; + IOPMPowerStateIndex tempDesire; PM_ASSERT_IN_GATE(); parent = (IOService *) connection->getParentEntry(gIOPowerPlane); @@ -967,8 +974,6 @@ IOService::addPowerChild2( IOPMRequest * request ) fPreviousRequestPowerFlags = (IOPMPowerFlags)(-1); adjustPowerState(tempDesire); } - - getPMRootDomain()->tagPowerPlaneService(this, &fPMActions); } //********************************************************************************* @@ -1142,8 +1147,10 @@ IOService::registerPowerDriver( powerStatesCopy[i].outputPowerFlags = powerStates[i].outputPowerCharacter; powerStatesCopy[i].inputPowerFlags = powerStates[i].inputPowerRequirement; powerStatesCopy[i].staticPower = powerStates[i].staticPower; +#if USE_SETTLE_TIMER powerStatesCopy[i].settleUpTime = powerStates[i].settleUpTime; powerStatesCopy[i].settleDownTime = powerStates[i].settleDownTime; +#endif if (powerStates[i].version >= kIOPMPowerStateVersion2) { stateOrder = powerStates[i].stateOrder; } else { @@ -1196,13 +1203,13 @@ IOService::registerPowerDriver( void IOService::handleRegisterPowerDriver( IOPMRequest * request ) { - IOService * powerDriver = (IOService *) request->fArg0; - IOPMPSEntry * powerStates = (IOPMPSEntry *) request->fArg1; - unsigned long numberOfStates = (unsigned long) request->fArg2; - unsigned long i, stateIndex; - unsigned long lowestPowerState; - IOService * root; - OSIterator * iter; + IOService * powerDriver = (IOService *) request->fArg0; + IOPMPSEntry * powerStates = (IOPMPSEntry *) request->fArg1; + IOPMPowerStateIndex numberOfStates = (IOPMPowerStateIndex) request->fArg2; + IOPMPowerStateIndex i, stateIndex; + IOPMPowerStateIndex lowestPowerState; + IOService * root; + OSIterator * iter; PM_ASSERT_IN_GATE(); assert(powerStates); @@ -1210,9 +1217,7 @@ IOService::handleRegisterPowerDriver( IOPMRequest * request ) assert(numberOfStates > 1); if (!fNumberOfPowerStates) { - OUR_PMLog(kPMLogControllingDriver, - (unsigned long) numberOfStates, - (unsigned long) kIOPMPowerStateVersion1); + OUR_PMLog(kPMLogControllingDriver, numberOfStates, kIOPMPowerStateVersion1); fPowerStates = powerStates; fNumberOfPowerStates = numberOfStates; @@ -1306,6 +1311,9 @@ IOService::handleRegisterPowerDriver( IOPMRequest * request ) iter->release(); } + // Populate IOPMActions for a few special services + getPMRootDomain()->tagPowerPlaneService(this, &fPMActions, fNumberOfPowerStates - 1); + if (inPlane(gIOPowerPlane) && fParentsKnowState) { IOPMPowerStateIndex tempDesire; fMaxPowerState = fControllingDriver->maxCapabilityForDomainState(fParentsCurrentPowerFlags); @@ -1437,7 +1445,7 @@ IOService::handleInterestChanged( IOPMRequest * request ) if (fInsertInterestSet) { while ((driver = (IOService *) fInsertInterestSet->getAnyObject())) { if (list->findItem(driver) == NULL) { - informee = list->appendNewInformee(driver); + list->appendNewInformee(driver); } fInsertInterestSet->removeObject(driver); } @@ -1511,7 +1519,7 @@ bool IOService::handleAcknowledgePowerChange( IOPMRequest * request ) { IOPMinformee * informee; - unsigned long childPower = kIOPMUnknown; + IOPMPowerStateIndex childPower = kIOPMUnknown; IOService * theChild; IOService * whichObject; bool all_acked = false; @@ -1543,11 +1551,11 @@ IOService::handleAcknowledgePowerChange( IOPMRequest * request ) if (informee->timer != 0) { if (informee->timer > 0) { uint64_t nsec = computeTimeDeltaNS(&informee->startTime); - if (nsec > LOG_SETPOWER_TIMES) { + if (nsec > gIOPMSetPowerStateLogNS) { getPMRootDomain()->pmStatsRecordApplicationResponse( gIOPMStatsDriverPSChangeSlow, informee->whatObject->getName(), fDriverCallReason, NS_TO_MS(nsec), informee->whatObject->getRegistryEntryID(), - NULL, fHeadNotePowerState); + NULL, fHeadNotePowerState, true); } } @@ -1631,7 +1639,7 @@ IOService::acknowledgeSetPowerState( void ) //********************************************************************************* void -IOService::adjustPowerState( uint32_t clamp ) +IOService::adjustPowerState( IOPMPowerStateIndex clamp ) { PM_ASSERT_IN_GATE(); computeDesiredState(clamp, false); @@ -1684,6 +1692,9 @@ IOService::synchronizePowerTree( if (nr) { submitPMRequest(nr); } + + // For display wrangler or any other delay-eligible (dark wake clamped) + // drivers attached to root domain in the power plane. nr = acquirePMRequest(getPMRootDomain(), kIOPMRequestTypeChildNotifyDelayCancel); if (nr) { submitPMRequest(nr); @@ -1728,7 +1739,7 @@ IOService::handleSynchronizePowerTree( IOPMRequest * request ) PM_ASSERT_IN_GATE(); if (fControllingDriver && fParentsKnowState && inPlane(gIOPowerPlane) && (fCurrentPowerState == fHighestPowerState)) { - IOOptionBits options = (uintptr_t) request->fArg0; + IOPMPowerChangeFlags options = (IOPMPowerChangeFlags)(uintptr_t) request->fArg0; startPowerChange( /* flags */ kIOPMSelfInitiated | kIOPMSynchronize | @@ -1777,7 +1788,6 @@ IOService::handlePowerDomainWillChangeTo( IOPMRequest * request ) IOPowerConnection * connection; IOPMPowerStateIndex maxPowerState; IOPMPowerFlags combinedPowerFlags; - bool savedParentsKnowState; IOReturn result = IOPMAckImplied; PM_ASSERT_IN_GATE(); @@ -1788,8 +1798,6 @@ IOService::handlePowerDomainWillChangeTo( IOPMRequest * request ) goto exit_no_ack; } - savedParentsKnowState = fParentsKnowState; - // Combine parents' output power flags. combinedPowerFlags = 0; @@ -2246,8 +2254,8 @@ IOService::requestPowerDomainState( //********************************************************************************* // [public] temporaryPowerClampOn // -// A power domain wants to clamp its power on till it has children which -// will thendetermine the power domain state. +// A power domain wants to be clamped to max power until it has children which +// will then determine the power domain state. // // We enter the highest state until addPowerChild is called. //********************************************************************************* @@ -2348,7 +2356,7 @@ IOService::changePowerStateWithOverrideTo( IOPMPowerStateIndex ordinal, } gIOPMPowerClientDevice->retain(); - request->fRequestTag = tag; + request->fTag = tag; request->fArg0 = (void *) ordinal; request->fArg1 = (void *) gIOPMPowerClientDevice; request->fArg2 = NULL; @@ -2360,18 +2368,45 @@ IOService::changePowerStateWithOverrideTo( IOPMPowerStateIndex ordinal, // Prevent needless downwards power transitions by clamping power // until the scheduled request is executed. + // + // TODO: review fOverrideMaxPowerState if (gIOPMWorkLoop->inGate() && (ordinal < fNumberOfPowerStates)) { fTempClampPowerState = StateMax(fTempClampPowerState, ordinal); fTempClampCount++; + request->fArg2 = (void *)(uintptr_t) true; + + // Place a power state ceiling to prevent any transition to a + // power state higher than fOverrideMaxPowerState. fOverrideMaxPowerState = ordinal; - request->fArg2 = (void *) (uintptr_t) true; } submitPMRequest( request ); return IOPMNoErr; } +//********************************************************************************* +// Tagged form of changePowerStateTo() +//********************************************************************************* + +IOReturn +IOService::changePowerStateWithTagTo( IOPMPowerStateIndex ordinal, IOPMRequestTag tag ) +{ + OUR_PMLog(kPMLogChangeStateTo, ordinal, tag); + return requestPowerState(gIOPMPowerClientDriver, ordinal, tag); +} + +//********************************************************************************* +// Tagged form of changePowerStateToPriv() +//********************************************************************************* + +IOReturn +IOService::changePowerStateWithTagToPriv( unsigned long ordinal, IOPMRequestTag tag ) +{ + OUR_PMLog(kPMLogChangeStateToPriv, ordinal, tag); + return requestPowerState(gIOPMPowerClientDevice, ordinal, tag); +} + //********************************************************************************* // [public] changePowerStateForRootDomain // @@ -2434,11 +2469,12 @@ IOService::quiescePowerTree( IOReturn IOService::requestPowerState( const OSSymbol * client, - uint32_t state ) + IOPMPowerStateIndex state, + IOPMRequestTag tag ) { IOPMRequest * request; - if (!client) { + if (!client || (state > UINT_MAX)) { return kIOReturnBadArgument; } if (!initialized) { @@ -2451,6 +2487,7 @@ IOService::requestPowerState( } client->retain(); + request->fTag = tag; request->fArg0 = (void *)(uintptr_t) state; request->fArg1 = (void *) client; request->fArg2 = NULL; @@ -2466,7 +2503,7 @@ IOService::requestPowerState( if (gIOPMWorkLoop->inGate() && (state < fNumberOfPowerStates)) { fTempClampPowerState = StateMax(fTempClampPowerState, state); fTempClampCount++; - request->fArg2 = (void *) (uintptr_t) true; + request->fArg2 = (void *)(uintptr_t) true; } submitPMRequest( request ); @@ -2480,8 +2517,8 @@ IOService::requestPowerState( void IOService::handleRequestPowerState( IOPMRequest * request ) { - const OSSymbol * client = (const OSSymbol *) request->fArg1; - uint32_t state = (uint32_t)(uintptr_t) request->fArg0; + const OSSymbol * client = (const OSSymbol *) request->fArg1; + IOPMPowerStateIndex state = (IOPMPowerStateIndex) request->fArg0; PM_ASSERT_IN_GATE(); if (request->fArg2) { @@ -2524,10 +2561,15 @@ IOService::handleRequestPowerState( IOPMRequest * request ) //********************************************************************************* void -IOService::updatePowerClient( const OSSymbol * client, uint32_t powerState ) +IOService::updatePowerClient( const OSSymbol * client, IOPMPowerStateIndex powerState ) { IOPMPowerStateIndex oldPowerState = kPowerStateZero; + if (powerState > UINT_MAX) { + assert(false); + return; + } + if (!fPowerClients) { fPowerClients = OSDictionary::withCapacity(4); } @@ -2544,7 +2586,7 @@ IOService::updatePowerClient( const OSSymbol * client, uint32_t powerState ) } } - PM_ACTION_3(actionUpdatePowerClient, client, oldPowerState, powerState); + PM_ACTION_CLIENT(actionUpdatePowerClient, client, oldPowerState, powerState); } } @@ -2556,10 +2598,10 @@ IOService::removePowerClient( const OSSymbol * client ) } } -uint32_t +IOPMPowerStateIndex IOService::getPowerStateForClient( const OSSymbol * client ) { - uint32_t powerState = kPowerStateZero; + IOPMPowerStateIndex powerState = kPowerStateZero; if (fPowerClients && client) { OSNumber * num = (OSNumber *) fPowerClients->getObject(client); @@ -2653,8 +2695,8 @@ IOService::computeDesiredState( unsigned long localClamp, bool computeOnly ) OSIterator * iter; OSObject * next; IOPowerConnection * connection; - uint32_t desiredState = kPowerStateZero; - uint32_t newPowerState = kPowerStateZero; + IOPMPowerStateIndex desiredState = kPowerStateZero; + IOPMPowerStateIndex newPowerState = kPowerStateZero; bool hasChildren = false; // Desired power state is always 0 without a controlling driver. @@ -2717,7 +2759,7 @@ IOService::computeDesiredState( unsigned long localClamp, bool computeOnly ) desiredState = getPowerStateForClient(client); assert(desiredState < fNumberOfPowerStates); PM_LOG1(" %u %s\n", - desiredState, client->getCStringNoCopy()); + (uint32_t) desiredState, client->getCStringNoCopy()); newPowerState = StateMax(newPowerState, desiredState); @@ -2754,7 +2796,7 @@ IOService::computeDesiredState( unsigned long localClamp, bool computeOnly ) PM_LOG1(" temp %u, clamp %u, current %u, new %u\n", (uint32_t) localClamp, (uint32_t) fTempClampPowerState, - (uint32_t) fCurrentPowerState, newPowerState); + (uint32_t) fCurrentPowerState, (uint32_t) newPowerState); if (!computeOnly) { // Restart idle timer if possible when device desire has increased. @@ -2914,7 +2956,7 @@ IOService::activityTickle( unsigned long type, unsigned long stateNumber ) fActivityTickleCount++; clock_get_uptime(&fDeviceActiveTimestamp); - PM_ACTION_0(actionActivityTickle); + PM_ACTION_TICKLE(actionActivityTickle); // Record the last tickle power state. // This helps to filter out redundant tickles as @@ -2972,10 +3014,10 @@ IOService::activityTickle( unsigned long type, unsigned long stateNumber ) void IOService::handleActivityTickle( IOPMRequest * request ) { - uint32_t ticklePowerState = (uint32_t)(uintptr_t) request->fArg0; - uint32_t tickleFlags = (uint32_t)(uintptr_t) request->fArg1; - uint32_t tickleGeneration = (uint32_t)(uintptr_t) request->fArg2; - bool adjustPower = false; + IOPMPowerStateIndex ticklePowerState = (IOPMPowerStateIndex) request->fArg0; + IOPMPowerStateIndex tickleFlags = (IOPMPowerStateIndex) request->fArg1; + uint32_t tickleGeneration = (uint32_t)(uintptr_t) request->fArg2; + bool adjustPower = false; PM_ASSERT_IN_GATE(); if (fResetPowerStateOnWake && (tickleGeneration != gIOPMTickleGeneration)) { @@ -2991,7 +3033,7 @@ IOService::handleActivityTickle( IOPMRequest * request ) if (tickleFlags & kTickleTypeActivity) { IOPMPowerStateIndex deviceDesireOrder = StateOrder(fDeviceDesire); - uint32_t idleTimerGeneration = ticklePowerState; // kTickleTypePowerDrop + IOPMPowerStateIndex idleTimerGeneration = ticklePowerState; // kTickleTypePowerDrop if (tickleFlags & kTickleTypePowerRise) { if ((StateOrder(ticklePowerState) > deviceDesireOrder) && @@ -3057,6 +3099,10 @@ IOService::setIdleTimerPeriod( unsigned long period ) OUR_PMLog(kPMLogSetIdleTimerPeriod, period, fIdleTimerPeriod); + if (period > INT_MAX) { + return kIOReturnBadArgument; + } + IOPMRequest * request = acquirePMRequest( this, kIOPMRequestTypeSetIdleTimerPeriod ); if (!request) { @@ -3139,7 +3185,8 @@ IOService::start_PM_idle_timer( void ) SInt32 idle_in = 0; boolean_t pending; - if (!initialized || !fIdleTimerPeriod) { + if (!initialized || !fIdleTimerPeriod || + ((unsigned int) fCurrentPowerState != fCurrentPowerState)) { return; } @@ -3148,14 +3195,14 @@ IOService::start_PM_idle_timer( void ) clock_get_uptime(&uptime); // Subclasses may modify idle sleep algorithm - idle_in = nextIdleTimeout(uptime, fDeviceActiveTimestamp, fCurrentPowerState); + idle_in = nextIdleTimeout(uptime, fDeviceActiveTimestamp, (unsigned int) fCurrentPowerState); // Check for out-of range responses if (idle_in > maxTimeout) { // use standard implementation idle_in = IOService::nextIdleTimeout(uptime, fDeviceActiveTimestamp, - fCurrentPowerState); + (unsigned int) fCurrentPowerState); } else if (idle_in < minTimeout) { idle_in = fIdleTimerPeriod; } @@ -3359,7 +3406,7 @@ IOService::getPowerState( void ) return kPowerStateZero; } - return fCurrentPowerState; + return (UInt32) fCurrentPowerState; } #ifndef __LP64__ @@ -3451,7 +3498,7 @@ IOService::startPowerChange( IOPowerConnection * parentConnection, IOPMPowerFlags parentFlags ) { - uint32_t savedPMActionsParam; + uint32_t savedPMActionsState; PM_ASSERT_IN_GATE(); assert( fMachineState == kIOPM_Finished ); @@ -3462,8 +3509,8 @@ IOService::startPowerChange( } fIsPreChange = true; - savedPMActionsParam = fPMActions.parameter; - PM_ACTION_2(actionPowerChangeOverride, &powerState, &changeFlags); + savedPMActionsState = fPMActions.state; + PM_ACTION_CHANGE(actionPowerChangeOverride, &powerState, &changeFlags); // rdar://problem/55040032 // Schedule a power adjustment after removing the power clamp @@ -3472,8 +3519,8 @@ IOService::startPowerChange( // automatically request parent power when necessary. if (!fAdjustPowerScheduled && ((changeFlags & kIOPMSelfInitiated) == 0) && - ((fPMActions.parameter & kPMActionsFlagLimitPower) == 0) && - ((savedPMActionsParam & kPMActionsFlagLimitPower) != 0)) { + ((fPMActions.state & kPMActionsStatePowerClamped) == 0) && + ((savedPMActionsState & kPMActionsStatePowerClamped) != 0)) { IOPMRequest * request = acquirePMRequest(this, kIOPMRequestTypeAdjustPowerState); if (request) { submitPMRequest(request); @@ -3538,8 +3585,9 @@ IOService::notifyInterestedDrivers( void ) IOPMinformee * informee; IOPMinformeeList * list = fInterestedDrivers; DriverCallParam * param; - IOItemCount count; - IOItemCount skipCnt = 0; + unsigned long numItems; + uint32_t count; + uint32_t skipCnt = 0; PM_ASSERT_IN_GATE(); assert( fDriverCallParamCount == 0 ); @@ -3547,10 +3595,12 @@ IOService::notifyInterestedDrivers( void ) fHeadNotePendingAcks = 0; - count = list->numberOfItems(); - if (!count) { - goto done; // no interested drivers + numItems = list->numberOfItems(); + if (!numItems || ((uint32_t) numItems != numItems)) { + goto done; // interested drivers count out of range } + count = (uint32_t) numItems; + // Allocate an array of interested drivers and their return values // for the callout thread. Everything else is still "owned" by the // PM work loop, which can run to process acknowledgePowerChange() @@ -3705,7 +3755,9 @@ IOService::notifyRootDomain( void ) { assert( fDriverCallBusy == false ); - // Only for root domain in the will-change phase + // Only for root domain in the will-change phase. + // On a power up, don't notify children right after the interested drivers. + // Perform setPowerState() first, then notify the children. if (!IS_ROOT_DOMAIN || (fMachineState != kIOPM_OurChangeSetPowerState)) { notifyChildren(); return; @@ -3995,7 +4047,12 @@ IOService::driverSetPowerState( void ) if (assertPMDriverCall(&callEntry, kIOPMDriverCallMethodSetPowerState)) { OUR_PMLogFuncStart(kPMLogProgramHardware, (uintptr_t) this, powerState); clock_get_uptime(&fDriverCallStartTime); - result = fControllingDriver->setPowerState( powerState, this ); + + if (reserved && reserved->uvars && reserved->uvars->userServer) { + result = reserved->uvars->userServer->serviceSetPowerState(fControllingDriver, this, fHeadNotePowerArrayEntry->capabilityFlags, powerState); + } else { + result = fControllingDriver->setPowerState( powerState, this ); + } clock_get_uptime(&end); OUR_PMLogFuncEnd(kPMLogProgramHardware, (uintptr_t) this, (UInt32) result); @@ -4020,7 +4077,7 @@ IOService::driverSetPowerState( void ) SUB_ABSOLUTETIME(&end, &fDriverCallStartTime); absolutetime_to_nanoseconds(end, &nsec); - if (nsec > LOG_SETPOWER_TIMES) { + if (nsec > gIOPMSetPowerStateLogNS) { getPMRootDomain()->pmStatsRecordApplicationResponse( gIOPMStatsDriverPSChangeSlow, fName, kDriverCallSetPowerState, NS_TO_MS(nsec), getRegistryEntryID(), @@ -4092,7 +4149,7 @@ IOService::driverInformPowerChange( void ) SUB_ABSOLUTETIME(&end, &informee->startTime); absolutetime_to_nanoseconds(end, &nsec); - if (nsec > LOG_SETPOWER_TIMES) { + if (nsec > gIOPMSetPowerStateLogNS) { getPMRootDomain()->pmStatsRecordApplicationResponse( gIOPMStatsDriverPSChangeSlow, driver->getName(), fDriverCallReason, NS_TO_MS(nsec), driver->getRegistryEntryID(), @@ -4296,7 +4353,7 @@ IOService::all_done( void ) // Do not inform driver and clients about this request completion, // except for the originator (root domain). - PM_ACTION_2(actionPowerChangeDone, + PM_ACTION_CHANGE(actionPowerChangeDone, fHeadNotePowerState, fHeadNoteChangeFlags); if (getPMRequestType() == kIOPMRequestTypeSynchronizePowerTree) { @@ -4344,15 +4401,15 @@ IOService::all_done( void ) PM_LOCK(); if (fReportBuf) { ts = mach_absolute_time(); - STATEREPORT_SETSTATE(fReportBuf, fCurrentPowerState, ts); + STATEREPORT_SETSTATE(fReportBuf, (uint16_t) fCurrentPowerState, ts); } PM_UNLOCK(); #if PM_VARS_SUPPORT fPMVars->myCurrentState = fCurrentPowerState; #endif OUR_PMLog(kPMLogChangeDone, fCurrentPowerState, prevPowerState); - PM_ACTION_2(actionPowerChangeDone, - fHeadNotePowerState, fHeadNoteChangeFlags); + PM_ACTION_CHANGE(actionPowerChangeDone, + prevPowerState, fHeadNoteChangeFlags); actionCalled = true; powerStatePtr = &fPowerStates[fCurrentPowerState]; @@ -4400,7 +4457,7 @@ IOService::all_done( void ) PM_LOCK(); if (fReportBuf) { ts = mach_absolute_time(); - STATEREPORT_SETSTATE(fReportBuf, fCurrentPowerState, ts); + STATEREPORT_SETSTATE(fReportBuf, (uint16_t) fCurrentPowerState, ts); } PM_UNLOCK(); #if PM_VARS_SUPPORT @@ -4408,8 +4465,8 @@ IOService::all_done( void ) #endif OUR_PMLog(kPMLogChangeDone, fCurrentPowerState, prevPowerState); - PM_ACTION_2(actionPowerChangeDone, - fHeadNotePowerState, fHeadNoteChangeFlags); + PM_ACTION_CHANGE(actionPowerChangeDone, + prevPowerState, fHeadNoteChangeFlags); actionCalled = true; powerStatePtr = &fPowerStates[fCurrentPowerState]; @@ -4435,7 +4492,7 @@ IOService::all_done( void ) } if (!actionCalled) { - PM_ACTION_2(actionPowerChangeDone, + PM_ACTION_CHANGE(actionPowerChangeDone, fHeadNotePowerState, fHeadNoteChangeFlags); } } @@ -4477,7 +4534,7 @@ IOService::OurChangeStart( void ) // Change started, but may not complete... // Can be canceled (power drop) or deferred (power rise). - PM_ACTION_2(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); + PM_ACTION_CHANGE(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); // Two separate paths, depending if power is being raised or lowered. // Lowering power is subject to approval by clients of this service. @@ -4634,7 +4691,7 @@ IOService::OurSyncStart( void ) return; } - PM_ACTION_2(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); + PM_ACTION_CHANGE(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); if (fHeadNoteChangeFlags & kIOPMNotDone) { OurChangeFinish(); @@ -4807,6 +4864,12 @@ IOService::OurChangeTellCapabilityDidChange( void ) return OurChangeFinish(); } + if (!IS_POWER_DROP) { + // Notify root domain immediately after notifying interested + // drivers and power children. + getPMRootDomain()->willTellSystemCapabilityDidChange(); + } + getPMRootDomain()->tracePoint( IS_POWER_DROP ? kIOPMTracePointSleepCapabilityClients : kIOPMTracePointWakeCapabilityClients ); @@ -4852,7 +4915,7 @@ IOService::ParentChangeStart( void ) // Power domain is forcing us to lower power if (StateOrder(fHeadNotePowerState) < StateOrder(fCurrentPowerState)) { - PM_ACTION_2(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); + PM_ACTION_CHANGE(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); // Tell apps and kernel clients fInitialPowerChange = false; @@ -4880,7 +4943,7 @@ IOService::ParentChangeStart( void ) if (fHeadNoteChangeFlags & kIOPMDomainDidChange) { if (StateOrder(fHeadNotePowerState) > StateOrder(fCurrentPowerState)) { - PM_ACTION_2(actionPowerChangeStart, + PM_ACTION_CHANGE(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); // Parent did change up - start our change up @@ -5135,8 +5198,10 @@ IOService::ParentChangeAcknowledgePowerChange( void ) void IOService::settleTimerExpired( void ) { +#if USE_SETTLE_TIMER fSettleTimeUS = 0; gIOPMWorkQueue->signalWorkAvailable(); +#endif } //********************************************************************************* @@ -5145,6 +5210,7 @@ IOService::settleTimerExpired( void ) // Holds a retain while the settle timer callout is in flight. //********************************************************************************* +#if USE_SETTLE_TIMER static void settle_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 ) { @@ -5157,6 +5223,7 @@ settle_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 ) } me->release(); } +#endif //********************************************************************************* // [private] startSettleTimer @@ -5167,7 +5234,7 @@ settle_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 ) void IOService::startSettleTimer( void ) { -#if NOT_USEFUL +#if USE_SETTLE_TIMER // This function is broken and serves no useful purpose since it never // updates fSettleTimeUS to a non-zero value to stall the state machine, // yet it starts a delay timer. It appears no driver relies on a delay @@ -5263,10 +5330,19 @@ IOService::ackTimerTick( void ) PM_ERROR("%s::setPowerState(%p, %lu -> %lu) timed out after %d ms\n", fName, OBFUSCATE(this), fCurrentPowerState, fHeadNotePowerState, NS_TO_MS(nsec)); -#if DEBUG || DEVELOPMENT || CONFIG_EMBEDDED - uint32_t panic_allowed = -1; - PE_parse_boot_argn("setpowerstate_panic", &panic_allowed, sizeof(panic_allowed)); - if (panic_allowed != 0) { +#if DEBUG || DEVELOPMENT || !defined(XNU_TARGET_OS_OSX) + bool panic_allowed = false; + uint32_t setpowerstate_panic = -1; + PE_parse_boot_argn("setpowerstate_panic", &setpowerstate_panic, sizeof(setpowerstate_panic)); + panic_allowed = setpowerstate_panic != 0; +#ifdef CONFIG_XNUPOST + uint64_t kernel_post_args = 0; + PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args)); + if (kernel_post_args != 0) { + panic_allowed = false; + } +#endif /* CONFIG_XNUPOST */ + if (panic_allowed) { // rdar://problem/48743340 - excluding AppleSEPManager from panic const char *whitelist = "AppleSEPManager"; if (strncmp(fName, whitelist, strlen(whitelist))) { @@ -5274,9 +5350,16 @@ IOService::ackTimerTick( void ) fName, this, fCurrentPowerState, fHeadNotePowerState, NS_TO_MS(nsec)); } } else { - PM_ERROR("setPowerState panic disabled by setpowerstate_panic boot-arg\n"); +#ifdef CONFIG_XNUPOST + if (kernel_post_args != 0) { + PM_ERROR("setPowerState panic disabled by kernPOST boot-arg\n"); + } +#endif /* CONFIG_XNUPOST */ + if (setpowerstate_panic != 0) { + PM_ERROR("setPowerState panic disabled by setpowerstate_panic boot-arg\n"); + } } -#else +#else /* !(DEBUG || DEVELOPMENT || !defined(XNU_TARGET_OS_OSX)) */ if (gIOKitDebug & kIOLogDebugPower) { panic("%s::setPowerState(%p, %lu -> %lu) timed out after %d ms", fName, this, fCurrentPowerState, fHeadNotePowerState, NS_TO_MS(nsec)); @@ -5287,6 +5370,9 @@ IOService::ackTimerTick( void ) function_addr = OSMemberFunctionCast(const void *, fControllingDriver, &IOService::setPowerState); kext = OSKext::lookupKextWithAddress((vm_address_t)function_addr); if (kext) { +#if __has_feature(ptrauth_calls) + function_addr = (const void*)VM_KERNEL_STRIP_PTR(function_addr); +#endif /* __has_feature(ptrauth_calls) */ const char *bundleID = kext->getIdentifierCString(); const char *apple_prefix = "com.apple"; const char *kernel_prefix = "__kernel__"; @@ -5297,10 +5383,10 @@ IOService::ackTimerTick( void ) } kext->release(); } - // Unblock state machine and pretend driver has acked. - done = true; } -#endif +#endif /* !(DEBUG || DEVELOPMENT || !defined(XNU_TARGET_OS_OSX)) */ + // Unblock state machine and pretend driver has acked. + done = true; getPMRootDomain()->reset_watchdog_timer(this, 0); } else { // still waiting, set timer again @@ -5626,6 +5712,8 @@ IOService::ack_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 void IOService::tellSystemCapabilityChange( uint32_t nextMS ) { + assert(IS_ROOT_DOMAIN); + MS_PUSH( nextMS ); fMachineState = kIOPM_TellCapabilityChangeDone; fOutOfBandMessage = kIOMessageSystemCapabilityChange; @@ -5823,13 +5911,8 @@ IOService::tellClientsWithResponse( int messageType ) assert( fResponseArray == NULL ); assert( fNotifyClientArray == NULL ); - if (messageType == (int)kIOPMMessageLastCallBeforeSleep) { - RD_LOG("tellClientsWithResponse( kIOPMMessageLastCallBeforeSleep, %d )\n", - fOutOfBandParameter); - } else { - RD_LOG("tellClientsWithResponse( %s, %d )\n", - getIOMessageString(messageType), fOutOfBandParameter); - } + RD_LOG("tellClientsWithResponse( %s, %s )\n", getIOMessageString(messageType), + getNotificationPhaseString(fOutOfBandParameter)); fResponseArray = OSArray::withCapacity( 1 ); if (!fResponseArray) { @@ -5858,7 +5941,7 @@ IOService::tellClientsWithResponse( int messageType ) context.messageFilter = (isRootDomain) ? OSMemberFunctionCast( IOPMMessageFilter, - this, + (IOPMrootDomain *)this, &IOPMrootDomain::systemMessageFilter) : NULL; switch (fOutOfBandParameter) { @@ -5907,6 +5990,7 @@ IOService::tellClientsWithResponse( int messageType ) break; case kNotifyCapabilityChangeApps: + context.enableTracing = isRootDomain; applyToInterested( gIOAppPowerStateInterest, pmTellCapabilityAppWithResponse, (void *) &context ); if (context.messageType == kIOMessageCanSystemSleep) { @@ -5927,8 +6011,8 @@ IOService::tellClientsWithResponse( int messageType ) fNotifyClientArray = context.notifyClients; if (context.skippedInDark) { - IOLog("tellClientsWithResponse(%s, %d) %d of %d skipped in dark\n", - getIOMessageString(messageType), fOutOfBandParameter, + IOLog("tellClientsWithResponse(%s, %s) %d of %d skipped in dark\n", + getIOMessageString(messageType), getNotificationPhaseString(fOutOfBandParameter), context.skippedInDark, context.skippedInDark + context.notSkippedInDark); } @@ -6070,24 +6154,23 @@ IOService::pmTellClientWithResponse( OSObject * object, void * arg ) IOReturn retCode; AbsoluteTime start, end; uint64_t nsec; + bool enableTracing; if (context->messageFilter && !context->messageFilter(context->us, object, context, NULL, NULL)) { - if ((kIOLogDebugPower & gIOKitDebug) && - (OSDynamicCast(_IOServiceInterestNotifier, object))) { - _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; - PM_LOG("%s DROP Client %s, notifier %p, handler %p\n", - context->us->getName(), - getIOMessageString(context->messageType), - OBFUSCATE(object), OBFUSCATE(n->handler)); - } + getPMRootDomain()->traceFilteredNotification(object); return; } + // Besides interest notifiers this applier function can also be invoked against + // IOService clients of context->us, so notifier can be NULL. But for tracing + // purposes the IOService clients can be ignored but each will still consume + // an entry in the responseArray and also advance msgIndex. notifier = OSDynamicCast(_IOServiceInterestNotifier, object); msgType = context->messageType; msgIndex = context->responseArray->getCount(); msgRef = ((context->serialNumber & 0xFFFF) << 16) + (msgIndex & 0xFFFF); + enableTracing = context->enableTracing && (notifier != NULL); IOServicePM * pwrMgt = context->us->pwrMgt; if (gIOKitDebug & kIOLogPower) { @@ -6099,15 +6182,10 @@ IOService::pmTellClientWithResponse( OSObject * object, void * arg ) OUR_PMLog(kPMLogClientNotify, (uintptr_t) notifier->handler, 0); } } - if ((kIOLogDebugPower & gIOKitDebug) && notifier) { - PM_LOG("%s MESG Client %s, notifier %p, handler %p\n", - context->us->getName(), - getIOMessageString(msgType), - OBFUSCATE(object), OBFUSCATE(notifier->handler)); - } if (NULL == context->notifyClients) { - context->notifyClients = OSArray::withCapacity( 32 ); + context->notifyClients = OSArray::withCapacity(32); + assert(context->notifyClients != NULL); } notify.powerRef = (void *)(uintptr_t) msgRef; @@ -6115,19 +6193,18 @@ IOService::pmTellClientWithResponse( OSObject * object, void * arg ) notify.stateNumber = context->stateNumber; notify.stateFlags = context->stateFlags; - if (context->enableTracing && (notifier != NULL)) { - getPMRootDomain()->traceDetail(notifier, true); + clock_get_uptime(&start); + if (enableTracing) { + getPMRootDomain()->traceNotification(notifier, true, start, msgIndex); } - clock_get_uptime(&start); retCode = context->us->messageClient(msgType, object, (void *) ¬ify, sizeof(notify)); - clock_get_uptime(&end); - if (context->enableTracing && (notifier != NULL)) { - getPMRootDomain()->traceDetail(notifier, false); + clock_get_uptime(&end); + if (enableTracing) { + getPMRootDomain()->traceNotification(notifier, false, end); } - if (kIOReturnSuccess == retCode) { if (0 == notify.returnValue) { OUR_PMLog(kPMLogClientAcknowledge, msgRef, (uintptr_t) object); @@ -6143,7 +6220,7 @@ IOService::pmTellClientWithResponse( OSObject * object, void * arg ) (uint64_t) notify.returnValue, getIOMessageString(msgType)); } else { - context->maxTimeRequested = notify.returnValue; + context->maxTimeRequested = (typeof(context->maxTimeRequested))notify.returnValue; } } // @@ -6159,12 +6236,12 @@ IOService::pmTellClientWithResponse( OSObject * object, void * arg ) } } - if (context->enableTracing) { + if (enableTracing) { SUB_ABSOLUTETIME(&end, &start); absolutetime_to_nanoseconds(end, &nsec); if ((nsec > LOG_KEXT_RESPONSE_TIMES) || (notify.returnValue != 0)) { - getPMRootDomain()->traceAckDelay(notifier, notify.returnValue / 1000, NS_TO_MS(nsec)); + getPMRootDomain()->traceNotificationResponse(notifier, NS_TO_MS(nsec), (uint32_t) notify.returnValue); } } } else { @@ -6234,7 +6311,8 @@ IOService::pmTellCapabilityAppWithResponse( OSObject * object, void * arg ) // Create client array (for tracking purposes) only if the service // has app clients. Usually only root domain does. if (NULL == context->notifyClients) { - context->notifyClients = OSArray::withCapacity( 32 ); + context->notifyClients = OSArray::withCapacity(32); + assert(context->notifyClients != NULL); } msgType = context->messageType; @@ -6308,28 +6386,25 @@ IOService::pmTellCapabilityClientWithResponse( IOReturn retCode; AbsoluteTime start, end; uint64_t nsec; + bool enableTracing; memset(&msgArg, 0, sizeof(msgArg)); if (context->messageFilter && !context->messageFilter(context->us, object, context, &msgArg, NULL)) { - if ((kIOLogDebugPower & gIOKitDebug) && - (OSDynamicCast(_IOServiceInterestNotifier, object))) { - _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; - PM_LOG("%s DROP Client %s, notifier %p, handler %p\n", - context->us->getName(), - getIOMessageString(context->messageType), - OBFUSCATE(object), OBFUSCATE(n->handler)); - } + getPMRootDomain()->traceFilteredNotification(object); return; } if (NULL == context->notifyClients) { - context->notifyClients = OSArray::withCapacity( 32 ); + context->notifyClients = OSArray::withCapacity(32); + assert(context->notifyClients != NULL); } + notifier = OSDynamicCast(_IOServiceInterestNotifier, object); msgType = context->messageType; msgIndex = context->responseArray->getCount(); msgRef = ((context->serialNumber & 0xFFFF) << 16) + (msgIndex & 0xFFFF); + enableTracing = context->enableTracing && (notifier != NULL); IOServicePM * pwrMgt = context->us->pwrMgt; if (gIOKitDebug & kIOLogPower) { @@ -6341,26 +6416,20 @@ IOService::pmTellCapabilityClientWithResponse( OUR_PMLog(kPMLogClientNotify, (uintptr_t) notifier->handler, 0); } } - if ((kIOLogDebugPower & gIOKitDebug) && notifier) { - PM_LOG("%s MESG Client %s, notifier %p, handler %p\n", - context->us->getName(), - getIOMessageString(msgType), - OBFUSCATE(object), OBFUSCATE(notifier->handler)); - } msgArg.notifyRef = msgRef; msgArg.maxWaitForReply = 0; - if (context->enableTracing && (notifier != NULL)) { - getPMRootDomain()->traceDetail(notifier, true); + clock_get_uptime(&start); + if (enableTracing) { + getPMRootDomain()->traceNotification(notifier, true, start, msgIndex); } - clock_get_uptime(&start); - retCode = context->us->messageClient( - msgType, object, (void *) &msgArg, sizeof(msgArg)); + retCode = context->us->messageClient(msgType, object, (void *) &msgArg, sizeof(msgArg)); + clock_get_uptime(&end); - if (context->enableTracing && (notifier != NULL)) { - getPMRootDomain()->traceDetail(notifier, false); + if (enableTracing) { + getPMRootDomain()->traceNotification(notifier, false, end, msgIndex); } if (kIOReturnSuccess == retCode) { @@ -6395,12 +6464,12 @@ IOService::pmTellCapabilityClientWithResponse( } } - if (context->enableTracing) { + if (enableTracing) { SUB_ABSOLUTETIME(&end, &start); absolutetime_to_nanoseconds(end, &nsec); if ((nsec > LOG_KEXT_RESPONSE_TIMES) || (msgArg.maxWaitForReply != 0)) { - getPMRootDomain()->traceAckDelay(notifier, msgArg.maxWaitForReply / 1000, NS_TO_MS(nsec)); + getPMRootDomain()->traceNotificationResponse(notifier, NS_TO_MS(nsec), msgArg.maxWaitForReply); } } } else { @@ -6469,14 +6538,14 @@ IOService::tellClients( int messageType ) context.messageFilter = (IS_ROOT_DOMAIN) ? OSMemberFunctionCast( IOPMMessageFilter, - this, + (IOPMrootDomain *)this, &IOPMrootDomain::systemMessageFilter) : NULL; - context.notifyType = kNotifyPriority; + context.notifyType = kNotifyPriority; applyToInterested( gIOPriorityPowerStateInterest, tellKernelClientApplier, (void *) &context ); - context.notifyType = kNotifyApps; + context.notifyType = kNotifyApps; applyToInterested( gIOAppPowerStateInterest, tellAppClientApplier, (void *) &context ); @@ -6495,17 +6564,11 @@ tellKernelClientApplier( OSObject * object, void * arg ) { IOPowerStateChangeNotification notify; IOPMInterestContext * context = (IOPMInterestContext *) arg; + bool enableTracing = context->enableTracing; if (context->messageFilter && !context->messageFilter(context->us, object, context, NULL, NULL)) { - if ((kIOLogDebugPower & gIOKitDebug) && - (OSDynamicCast(_IOServiceInterestNotifier, object))) { - _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; - PM_LOG("%s DROP Client %s, notifier %p, handler %p\n", - context->us->getName(), - IOService::getIOMessageString(context->messageType), - OBFUSCATE(object), OBFUSCATE(n->handler)); - } + IOService::getPMRootDomain()->traceFilteredNotification(object); return; } @@ -6514,23 +6577,14 @@ tellKernelClientApplier( OSObject * object, void * arg ) notify.stateNumber = context->stateNumber; notify.stateFlags = context->stateFlags; - if (context->enableTracing && object) { - IOService::getPMRootDomain()->traceDetail(object, true); - } - context->us->messageClient(context->messageType, object, ¬ify, sizeof(notify)); - if (context->enableTracing && object) { - IOService::getPMRootDomain()->traceDetail(object, false); + if (enableTracing) { + IOService::getPMRootDomain()->traceNotification(object, true); } + context->us->messageClient(context->messageType, object, ¬ify, sizeof(notify)); - - if ((kIOLogDebugPower & gIOKitDebug) && - (OSDynamicCast(_IOServiceInterestNotifier, object))) { - _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; - PM_LOG("%s MESG Client %s, notifier %p, handler %p\n", - context->us->getName(), - IOService::getIOMessageString(context->messageType), - OBFUSCATE(object), OBFUSCATE(n->handler)); + if (enableTracing) { + IOService::getPMRootDomain()->traceNotification(object, false); } } @@ -6715,7 +6769,7 @@ IOService::responseValid( uint32_t refcon, int pid ) name, 0, NS_TO_MS(nsec), pid, object); } } else { - getPMRootDomain()->traceAckDelay(object, 0, NS_TO_MS(nsec)); + getPMRootDomain()->traceNotificationAck(object, NS_TO_MS(nsec)); } if (kIOLogDebugPower & gIOKitDebug) { @@ -6873,7 +6927,7 @@ IOService::configurePowerStatesReport( IOReportConfigureAction action, void *res IOReturn rc = kIOReturnSuccess; size_t reportSize; unsigned long i; - uint64_t ts; + uint64_t ts; if (!pwrMgt) { return kIOReturnUnsupported; @@ -6882,6 +6936,10 @@ IOService::configurePowerStatesReport( IOReportConfigureAction action, void *res if (!fNumberOfPowerStates) { return kIOReturnSuccess; // For drivers which are in power plane, but haven't called registerPowerDriver() } + + if (fNumberOfPowerStates > INT16_MAX) { + return kIOReturnOverrun; + } PM_LOCK(); switch (action) { @@ -6898,7 +6956,7 @@ IOService::configurePowerStatesReport( IOReportConfigureAction action, void *res } memset(fReportBuf, 0, reportSize); - STATEREPORT_INIT(fNumberOfPowerStates, fReportBuf, reportSize, + STATEREPORT_INIT((uint16_t) fNumberOfPowerStates, fReportBuf, reportSize, getRegistryEntryID(), kPMPowerStatesChID, kIOReportCategoryPower); for (i = 0; i < fNumberOfPowerStates; i++) { @@ -6918,7 +6976,7 @@ IOService::configurePowerStatesReport( IOReportConfigureAction action, void *res ((StateOrder(fMaxPowerState) & 0xf) << 4) | (StateOrder(i) & 0xf)); } ts = mach_absolute_time(); - STATEREPORT_SETSTATE(fReportBuf, fCurrentPowerState, ts); + STATEREPORT_SETSTATE(fReportBuf, (uint16_t) fCurrentPowerState, ts); break; case kIOReportDisable: @@ -7094,7 +7152,7 @@ IOService::updateSimplePowerReport( IOReportConfigureAction action, void *result PM_UNLOCK(); - return kIOReturnSuccess; + return rc; } @@ -7132,7 +7190,7 @@ IOService::getPowerStateForDomainFlags( IOPMPowerFlags flags ) return kPowerStateZero; } - for (int order = fNumberOfPowerStates - 1; order >= 0; order--) { + for (long order = fNumberOfPowerStates - 1; order >= 0; order--) { stateIndex = fPowerStates[order].stateOrderToIndex; if ((flags & fPowerStates[stateIndex].inputPowerFlags) == @@ -7483,10 +7541,12 @@ IOService::isPMBlocked( IOPMRequest * request, int count ) reason = 3; break; } +#if USE_SETTLE_TIMER // Waiting on settle timer expiration. if (fSettleTimeUS) { reason = 4; break; } +#endif } while (false); fWaitReason = reason; @@ -7765,13 +7825,13 @@ IOService::actionPMWorkQueueInvoke( IOPMRequest * request, IOPMWorkQueue * queue case kIOPM_TellCapabilityChangeDone: if (fIsPreChange) { if (fOutOfBandParameter == kNotifyCapabilityChangePriority) { - MS_POP(); // tellSystemCapabilityChange() + MS_POP(); // MS passed to tellSystemCapabilityChange() continue; } fOutOfBandParameter = kNotifyCapabilityChangePriority; } else { if (fOutOfBandParameter == kNotifyCapabilityChangeApps) { - MS_POP(); // tellSystemCapabilityChange() + MS_POP(); // MS passed to tellSystemCapabilityChange() continue; } fOutOfBandParameter = kNotifyCapabilityChangeApps; @@ -7859,7 +7919,7 @@ IOService::executePMRequest( IOPMRequest * request ) case kIOPMRequestTypeSetIdleTimerPeriod: { - fIdleTimerPeriod = (uintptr_t) request->fArg0; + fIdleTimerPeriod = (typeof(fIdleTimerPeriod))(uintptr_t) request->fArg0; fNextIdleTimerPeriod = fIdleTimerPeriod; if ((false == fLockedFlags.PMStop) && (fIdleTimerPeriod > 0)) { restartIdleTimer(); @@ -7958,11 +8018,11 @@ IOService::actionPMReplyQueue( IOPMRequest * request, IOPMRequestQueue * queue ) getPMRootDomain()->reset_watchdog_timer(this, 0); uint64_t nsec = computeTimeDeltaNS(&fDriverCallStartTime); - if (nsec > LOG_SETPOWER_TIMES) { + if (nsec > gIOPMSetPowerStateLogNS) { getPMRootDomain()->pmStatsRecordApplicationResponse( gIOPMStatsDriverPSChangeSlow, fName, kDriverCallSetPowerState, NS_TO_MS(nsec), getRegistryEntryID(), - NULL, fHeadNotePowerState); + NULL, fHeadNotePowerState, true); } OUR_PMLog(kPMLogDriverAcknowledgeSet, (uintptr_t) this, fDriverTimer); @@ -8213,6 +8273,21 @@ IOService::getIOMessageString( uint32_t msg ) return IOFindNameForValue(msg, msgNames); } +static const char * +getNotificationPhaseString( uint32_t phase ) +{ +#define PHASE_ENTRY(x) {(int) x, #x} + + static const IONamedValue phaseNames[] = { + PHASE_ENTRY( kNotifyApps ), + PHASE_ENTRY( kNotifyPriority ), + PHASE_ENTRY( kNotifyCapabilityChangeApps ), + PHASE_ENTRY( kNotifyCapabilityChangePriority ), + { 0, NULL } + }; + + return IOFindNameForValue(phase, phaseNames); +} // MARK: - // MARK: IOPMRequest @@ -8419,7 +8494,10 @@ IOPMRequestQueue::free( void ) void IOPMRequestQueue::queuePMRequest( IOPMRequest * request ) { + uint64_t now = mach_continuous_time(); + assert(request); + request->setTimestamp(now); IOLockLock(fLock); queue_enter(&fQueue, request, typeof(request), fCommandChain); IOLockUnlock(fLock); @@ -8432,11 +8510,13 @@ void IOPMRequestQueue::queuePMRequestChain( IOPMRequest ** requests, IOItemCount count ) { IOPMRequest * next; + uint64_t now = mach_continuous_time(); assert(requests && count); IOLockLock(fLock); while (count--) { next = *requests; + next->setTimestamp(now); requests++; queue_enter(&fQueue, next, typeof(next), fCommandChain); } @@ -8826,11 +8906,11 @@ IOServicePM::gatedSerialize( OSSerialize * s ) const dictSize += 4; } - if (PMActions.parameter & kPMActionsFlagLimitPower) { + if (PMActions.state & kPMActionsStatePowerClamped) { dictSize += 1; powerClamp = 0; - if (PMActions.parameter & - (kPMActionsFlagIsDisplayWrangler | kPMActionsFlagIsGraphicsDevice)) { + if (PMActions.flags & + (kPMActionsFlagIsDisplayWrangler | kPMActionsFlagIsGraphicsDriver)) { powerClamp++; } } @@ -8965,5 +9045,11 @@ IOServicePM::pmTrace( // NULL termination is not required. strncpy((char*)&nameAsArg, Name, sizeof(nameAsArg)); +#if defined(XNU_TARGET_OS_OSX) + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, IODBG_POWER(event) | eventFunc, nameAsArg, + (uintptr_t)Owner->getRegistryEntryID(), (uintptr_t)(OBFUSCATE(param1)), + (uintptr_t)(OBFUSCATE(param2)), 0); +#else IOTimeStampConstant(IODBG_POWER(event) | eventFunc, nameAsArg, (uintptr_t)Owner->getRegistryEntryID(), (uintptr_t)(OBFUSCATE(param1)), (uintptr_t)(OBFUSCATE(param2))); +#endif } diff --git a/iokit/Kernel/IOServicePMPrivate.h b/iokit/Kernel/IOServicePMPrivate.h index 7d5a2bc54..31e42e28c 100644 --- a/iokit/Kernel/IOServicePMPrivate.h +++ b/iokit/Kernel/IOServicePMPrivate.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -32,6 +32,8 @@ #include #include +#define USE_SETTLE_TIMER 0 + //****************************************************************************** // PM command types //****************************************************************************** @@ -79,27 +81,27 @@ typedef void void * target, IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags * changeFlags, - IOPMRequestTag requestTag ); + IOPMPowerChangeFlags * changeFlagsPtr ); typedef void (*IOPMActionPowerChangeDone)( void * target, IOService * service, IOPMActions * actions, + const IOPMRequest * request, IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags changeFlags, - IOPMRequestTag requestTag ); + IOPMPowerChangeFlags changeFlags ); typedef void (*IOPMActionPowerChangeOverride)( void * target, IOService * service, IOPMActions * actions, - IOPMPowerStateIndex * powerState, - IOPMPowerChangeFlags * changeFlags, - IOPMRequestTag requestTag ); + const IOPMRequest * request, + IOPMPowerStateIndex * powerStatePtr, + IOPMPowerChangeFlags * changeFlagsPtr ); typedef void (*IOPMActionActivityTickle)( @@ -114,26 +116,32 @@ typedef void IOPMActions * actions, const OSSymbol * powerClient, IOPMPowerStateIndex oldPowerState, - IOPMPowerStateIndex newPowerState - ); + IOPMPowerStateIndex newPowerState ); struct IOPMActions { void * target; - uint32_t parameter; IOPMActionPowerChangeStart actionPowerChangeStart; IOPMActionPowerChangeDone actionPowerChangeDone; IOPMActionPowerChangeOverride actionPowerChangeOverride; IOPMActionActivityTickle actionActivityTickle; IOPMActionUpdatePowerClient actionUpdatePowerClient; + uint32_t darkWakePowerState; + uint16_t flags; + uint16_t state; }; -// IOPMActions parameter flags +// IOPMActions flags enum { - kPMActionsFlagIsDisplayWrangler = 0x00000100, - kPMActionsFlagIsGraphicsDevice = 0x00000200, - kPMActionsFlagIsAudioDevice = 0x00000400, - kPMActionsFlagLimitPower = 0x00000800, - kPMActionsPCIBitNumberMask = 0x000000ff + kPMActionsPCIBitNumberMask = 0x00ff, + kPMActionsFlagIsDisplayWrangler = 0x0100, + kPMActionsFlagIsGraphicsDriver = 0x0200, + kPMActionsFlagIsAudioDriver = 0x0400, + kPMActionsFlagHasDarkWakePowerState = 0x0800 +}; + +// IOPMActions state +enum { + kPMActionsStatePowerClamped = 0x0001 }; //****************************************************************************** @@ -142,9 +150,11 @@ struct IOPMPSEntry { IOPMPowerFlags capabilityFlags; IOPMPowerFlags outputPowerFlags; IOPMPowerFlags inputPowerFlags; - uint32_t staticPower; + unsigned long staticPower; +#if USE_SETTLE_TIMER uint32_t settleUpTime; uint32_t settleDownTime; +#endif IOPMPowerStateIndex stateOrder; IOPMPowerStateIndex stateOrderToIndex; }; @@ -180,7 +190,9 @@ private: uint32_t MachineState; thread_call_t AckTimer; +#if USE_SETTLE_TIMER thread_call_t SettleTimer; +#endif thread_call_t IdleTimer; thread_call_t WatchdogTimer; thread_call_t SpinDumpTimer; @@ -191,8 +203,10 @@ private: uint64_t WatchdogDeadline; // Settle time after changing power state. +#if USE_SETTLE_TIMER uint32_t SettleTimeUS; - uint32_t IdleTimerGeneration; +#endif + IOPMPowerStateIndex IdleTimerGeneration; // The flags describing current change note. IOPMPowerChangeFlags HeadNoteChangeFlags; @@ -247,9 +261,9 @@ private: IOLock * ActivityLock; // Idle timer's period in seconds. - unsigned long IdleTimerPeriod; - unsigned long IdleTimerMinPowerState; - unsigned long NextIdleTimerPeriod; + int IdleTimerPeriod; + int NextIdleTimerPeriod; + IOPMPowerStateIndex IdleTimerMinPowerState; AbsoluteTime IdleTimerStartTime; // Power state desired by a subclassed device object. @@ -293,7 +307,7 @@ private: OSArray * NotifyClientArray; // Used to uniquely identify power management notification to apps and clients. - UInt16 SerialNumber; + uint16_t SerialNumber; // Used to communicate desired function to tellClientsWithResponse(). // This is used because it avoids changing the signatures of the affected virtual methods. @@ -312,8 +326,8 @@ private: uint32_t DriverCallReason; uint32_t OutOfBandMessage; uint32_t TempClampCount; - uint32_t OverrideMaxPowerState; - uint32_t DeviceUsablePowerState; + IOPMPowerStateIndex OverrideMaxPowerState; + IOPMPowerStateIndex DeviceUsablePowerState; // Protected by ActivityLock - BEGIN IOPMPowerStateIndex ActivityTicklePowerState; @@ -469,8 +483,8 @@ private: #define WATCHDOG_SLEEP_TIMEOUT (180) // 180 secs #define WATCHDOG_WAKE_TIMEOUT (180) // 180 secs #else -#define WATCHDOG_SLEEP_TIMEOUT (180) // 180 secs -#define WATCHDOG_WAKE_TIMEOUT (180) // 180 secs +#define WATCHDOG_SLEEP_TIMEOUT (35) // 35 secs (kMaxTimeRequested + 5s) +#define WATCHDOG_WAKE_TIMEOUT (35) // 35 secs (kMaxTimeRequested + 5s) #endif // Max wait time in microseconds for kernel priority and capability clients @@ -570,11 +584,11 @@ enum { // PM Statistics & Diagnostics //****************************************************************************** -extern const OSSymbol *gIOPMStatsResponseTimedOut; -extern const OSSymbol *gIOPMStatsResponseCancel; -extern const OSSymbol *gIOPMStatsResponseSlow; -extern const OSSymbol *gIOPMStatsResponsePrompt; -extern const OSSymbol *gIOPMStatsDriverPSChangeSlow; +extern OSPtr gIOPMStatsResponseTimedOut; +extern OSPtr gIOPMStatsResponseCancel; +extern OSPtr gIOPMStatsResponseSlow; +extern OSPtr gIOPMStatsResponsePrompt; +extern OSPtr gIOPMStatsDriverPSChangeSlow; //****************************************************************************** // IOPMRequest @@ -585,12 +599,13 @@ class IOPMRequest : public IOCommand OSDeclareDefaultStructors( IOPMRequest ); protected: - IOService * fTarget; // request target - IOPMRequest * fRequestNext; // the next request in the chain - IOPMRequest * fRequestRoot; // the root request in the call tree - IOItemCount fWorkWaitCount;// execution blocked if non-zero - IOItemCount fFreeWaitCount;// completion blocked if non-zero - uint32_t fRequestType; // request type + IOService * fTarget; // request target + IOPMRequest * fRequestNext; // the next request in the chain + IOPMRequest * fRequestRoot; // the root request in the call tree + uint32_t fWorkWaitCount; // execution blocked if non-zero + uint32_t fFreeWaitCount; // completion blocked if non-zero + uint64_t fTimestamp; // MCTU + uint32_t fRequestType; // request type bool fIsQuiesceBlocker; IOPMCompletionAction fCompletionAction; @@ -598,7 +613,7 @@ protected: void * fCompletionParam; public: - uint32_t fRequestTag; + uint32_t fTag; void * fArg0; void * fArg1; void * fArg2; @@ -641,6 +656,12 @@ public: return fRequestType; } + inline uint32_t + getTag( void ) const + { + return fTag; + } + inline bool isReplyType( void ) const { @@ -677,6 +698,18 @@ public: fCompletionParam = param; } + inline void + setTimestamp( uint64_t time ) + { + fTimestamp = time; + } + + inline uint64_t + getTimestamp( void ) const + { + return fTimestamp; + } + static IOPMRequest * create( void ); bool init( IOService * owner, IOOptionBits type ); void reset( void ); @@ -709,7 +742,7 @@ protected: public: static IOPMRequestQueue * create( IOService * inOwner, Action inAction ); - void queuePMRequest( IOPMRequest * request ); + void queuePMRequest( LIBKERN_CONSUMED IOPMRequest * request ); void queuePMRequestChain( IOPMRequest ** requests, IOItemCount count ); }; diff --git a/iokit/Kernel/IOServicePrivate.h b/iokit/Kernel/IOServicePrivate.h index 4ae23be1b..85501621e 100644 --- a/iokit/Kernel/IOServicePrivate.h +++ b/iokit/Kernel/IOServicePrivate.h @@ -150,7 +150,7 @@ class _IOConfigThread : public OSObject OSDeclareDefaultStructors(_IOConfigThread); public: - static void configThread( int configThreadId ); + static void configThread( const char * name ); static void main( void * arg, wait_result_t result ); }; @@ -175,7 +175,7 @@ public: static _IOServiceJob * startJob( IOService * nub, int type, IOOptionBits options = 0 ); - static void pingConfig( class _IOServiceJob * job ); + static void pingConfig( LIBKERN_CONSUMED class _IOServiceJob * job ); }; class IOResources : public IOService diff --git a/iokit/Kernel/IOSharedDataQueue.cpp b/iokit/Kernel/IOSharedDataQueue.cpp index 71d3c6817..4443e5fa7 100644 --- a/iokit/Kernel/IOSharedDataQueue.cpp +++ b/iokit/Kernel/IOSharedDataQueue.cpp @@ -26,10 +26,13 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include #include #include +#include #ifdef enqueue #undef enqueue @@ -43,29 +46,28 @@ OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue) -IOSharedDataQueue *IOSharedDataQueue::withCapacity(UInt32 size) +OSSharedPtr +IOSharedDataQueue::withCapacity(UInt32 size) { - IOSharedDataQueue *dataQueue = new IOSharedDataQueue; + OSSharedPtr dataQueue = OSMakeShared(); if (dataQueue) { if (!dataQueue->initWithCapacity(size)) { - dataQueue->release(); - dataQueue = NULL; + return nullptr; } } return dataQueue; } -IOSharedDataQueue * +OSSharedPtr IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize) { - IOSharedDataQueue *dataQueue = new IOSharedDataQueue; + OSSharedPtr dataQueue = OSMakeShared(); if (dataQueue) { if (!dataQueue->initWithEntries(numEntries, entrySize)) { - dataQueue->release(); - dataQueue = NULL; + return nullptr; } } @@ -147,10 +149,10 @@ IOSharedDataQueue::free() super::free(); } -IOMemoryDescriptor * +OSSharedPtr IOSharedDataQueue::getMemoryDescriptor() { - IOMemoryDescriptor *descriptor = NULL; + OSSharedPtr descriptor; if (dataQueue != NULL) { descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn); @@ -182,7 +184,7 @@ IOSharedDataQueue::peek() UInt32 headOffset = dataQueue->head; UInt32 queueSize = getQueueSize(); - if (headOffset >= queueSize) { + if (headOffset > queueSize) { return NULL; } diff --git a/iokit/Kernel/IOSimpleReporter.cpp b/iokit/Kernel/IOSimpleReporter.cpp index 7807763a9..fc29f24c3 100644 --- a/iokit/Kernel/IOSimpleReporter.cpp +++ b/iokit/Kernel/IOSimpleReporter.cpp @@ -26,6 +26,9 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + +#include #include #include #include "IOReporterDefs.h" @@ -34,34 +37,23 @@ OSDefineMetaClassAndStructors(IOSimpleReporter, IOReporter); /* static */ -IOSimpleReporter* +OSSharedPtr IOSimpleReporter::with(IOService *reportingService, IOReportCategories categories, IOReportUnit unit) { - IOSimpleReporter *reporter, *rval = NULL; - - // kprintf("%s\n", __func__); // can't IORLOG() from static + OSSharedPtr reporter; - reporter = new IOSimpleReporter; + reporter = OSMakeShared(); if (!reporter) { - goto finish; + return nullptr; } - if (!reporter->initWith(reportingService, categories, unit)) { - goto finish; - } - - // success - rval = reporter; - -finish: - if (!rval) { - OSSafeReleaseNULL(reporter); + return nullptr; } - return rval; + return reporter; } bool diff --git a/iokit/Kernel/IOStartIOKit.cpp b/iokit/Kernel/IOStartIOKit.cpp index e64a84a45..c022807f2 100644 --- a/iokit/Kernel/IOStartIOKit.cpp +++ b/iokit/Kernel/IOStartIOKit.cpp @@ -28,6 +28,7 @@ #include #include +#include #include #include #include @@ -35,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -52,7 +54,9 @@ const OSSymbol * gIOProgressBackbufferKey; OSSet * gIORemoveOnReadProperties; extern "C" { -void StartIOKit( void * p1, void * p2, void * p3, void * p4 ); +void InitIOKit(void *dtTop); +void ConfigureIOKit(void); +void StartIOKitMatching(void); void IORegistrySetOSBuildVersion(char * build_version); void IORecordProgressBackbuffer(void * buffer, size_t size, uint32_t theme); @@ -60,6 +64,8 @@ extern void OSlibkernInit(void); void iokit_post_constructor_init(void); +SECURITY_READ_ONLY_LATE(static IOPlatformExpertDevice*) gRootNub; + #include #include @@ -91,6 +97,7 @@ iokit_post_constructor_init(void) OSObject * obj; IOCPUInitialize(); + IOPlatformActionsInitialize(); root = IORegistryEntry::initialize(); assert( root ); IOService::initialize(); @@ -100,6 +107,7 @@ iokit_post_constructor_init(void) IOUserClient::initialize(); IOMemoryDescriptor::initialize(); IORootParent::initialize(); + IOReporter::initialize(); // Initializes IOPMinformeeList class-wide shared lock IOPMinformeeList::getSharedRecursiveLock(); @@ -123,19 +131,21 @@ iokit_post_constructor_init(void) void (*record_startup_extensions_function)(void) = NULL; void -StartIOKit( void * p1, void * p2, void * p3, void * p4 ) +InitIOKit(void *dtTop) { - IOPlatformExpertDevice * rootNub; - int debugFlags; + int debugFlags = 0; if (PE_parse_boot_argn( "io", &debugFlags, sizeof(debugFlags))) { gIOKitDebug = debugFlags; } -#if DEVELOPMENT || DEBUG + // Enable IOWaitQuiet panics on arm64 macOS except on KASAN. + // existing 3rd party KEXTs may hold the registry busy on x86 RELEASE kernels. + // Enabling this on other platforms is tracked in rdar://66364108 +#if XNU_TARGET_OS_OSX && defined(__arm64__) && !KASAN else { gIOKitDebug |= kIOWaitQuietPanics; } -#endif /* DEVELOPMENT || DEBUG */ +#endif if (PE_parse_boot_argn( "iotrace", &debugFlags, sizeof(debugFlags))) { gIOKitTrace = debugFlags; @@ -170,29 +180,48 @@ StartIOKit( void * p1, void * p2, void * p3, void * p4 ) interruptAccountingInit(); - rootNub = new IOPlatformExpertDevice; + gRootNub = new IOPlatformExpertDevice; + if (__improbable(gRootNub == NULL)) { + panic("Failed to allocate IOKit root nub"); + } + bool ok = gRootNub->init(dtTop); + if (__improbable(!ok)) { + panic("Failed to initialize IOKit root nub"); + } + gRootNub->attach(NULL); - if (rootNub && rootNub->initWithArgs( p1, p2, p3, p4)) { - rootNub->attach( NULL ); + /* If the bootstrap segment set up a function to record startup + * extensions, call it now. + */ + if (record_startup_extensions_function) { + record_startup_extensions_function(); + } +} - /* If the bootstrap segment set up a function to record startup - * extensions, call it now. - */ - if (record_startup_extensions_function) { - record_startup_extensions_function(); - } +void +ConfigureIOKit(void) +{ + assert(gRootNub != NULL); + gRootNub->configureDefaults(); +} - rootNub->registerService(); +void +StartIOKitMatching(void) +{ + assert(gRootNub != NULL); + bool ok = gRootNub->startIOServiceMatching(); + if (__improbable(!ok)) { + panic("Failed to start IOService matching"); + } #if !NO_KEXTD - /* Add a busy count to keep the registry busy until kextd has - * completely finished launching. This is decremented when kextd - * messages the kernel after the in-kernel linker has been - * removed and personalities have been sent. - */ - IOService::getServiceRoot()->adjustBusy(1); + /* Add a busy count to keep the registry busy until kextd has + * completely finished launching. This is decremented when kextd + * messages the kernel after the in-kernel linker has been + * removed and personalities have been sent. + */ + IOService::getServiceRoot()->adjustBusy(1); #endif - } } void @@ -215,8 +244,12 @@ void IORecordProgressBackbuffer(void * buffer, size_t size, uint32_t theme) { IORegistryEntry * chosen; + + if (((unsigned int) size) != size) { + return; + } if ((chosen = IORegistryEntry::fromPath(kIODeviceTreePlane ":/chosen"))) { - chosen->setProperty(kIOProgressBackbufferKey, buffer, size); + chosen->setProperty(kIOProgressBackbufferKey, buffer, (unsigned int) size); chosen->setProperty(kIOProgressColorThemeKey, theme, 32); chosen->release(); diff --git a/iokit/Kernel/IOStateReporter.cpp b/iokit/Kernel/IOStateReporter.cpp index 4380c6cf2..44dd5c18d 100644 --- a/iokit/Kernel/IOStateReporter.cpp +++ b/iokit/Kernel/IOStateReporter.cpp @@ -26,6 +26,9 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + +#include #include #include #include "IOReporterDefs.h" @@ -36,34 +39,28 @@ OSDefineMetaClassAndStructors(IOStateReporter, IOReporter); /* static */ -IOStateReporter* +OSSharedPtr IOStateReporter::with(IOService *reportingService, IOReportCategories categories, int nstates, IOReportUnit unit /* = kIOReportUnitHWTicks*/) { - IOStateReporter *reporter, *rval = NULL; - - // kprintf("%s\n", __func__); // can't IORLOG() from static + OSSharedPtr reporter; - reporter = new IOStateReporter; - if (!reporter) { - goto finish; + if (nstates > INT16_MAX) { + return nullptr; } - if (!reporter->initWith(reportingService, categories, nstates, unit)) { - goto finish; + reporter = OSMakeShared(); + if (!reporter) { + return nullptr; } - // success - rval = reporter; - -finish: - if (!rval) { - OSSafeReleaseNULL(reporter); + if (!reporter->initWith(reportingService, categories, (int16_t) nstates, unit)) { + return nullptr; } - return rval; + return reporter; } bool diff --git a/iokit/Kernel/IOStatistics.cpp b/iokit/Kernel/IOStatistics.cpp index 6bac5ad69..20d9cc3ef 100644 --- a/iokit/Kernel/IOStatistics.cpp +++ b/iokit/Kernel/IOStatistics.cpp @@ -679,14 +679,13 @@ IOStatistics::getStatistics(sysctl_req *req) goto exit; } - buffer = (char*)kalloc(calculatedSize); + buffer = (char*)kheap_alloc(KHEAP_TEMP, calculatedSize, + (zalloc_flags_t)(Z_WAITOK | Z_ZERO)); if (!buffer) { error = ENOMEM; goto exit; } - memset(buffer, 0, calculatedSize); - ptr = buffer; header = (IOStatisticsHeader*)((void*)ptr); @@ -740,7 +739,7 @@ IOStatistics::getStatistics(sysctl_req *req) error = SYSCTL_OUT(req, buffer, calculatedSize); - kfree(buffer, calculatedSize); + kheap_free(KHEAP_TEMP, buffer, calculatedSize); exit: IORWLockUnlock(IOStatistics::lock); @@ -775,12 +774,12 @@ IOStatistics::getWorkLoopStatistics(sysctl_req *req) goto exit; } - buffer = (char*)kalloc(calculatedSize); + buffer = (char*)kheap_alloc(KHEAP_TEMP, calculatedSize, + (zalloc_flags_t)(Z_WAITOK | Z_ZERO)); if (!buffer) { error = ENOMEM; goto exit; } - memset(buffer, 0, calculatedSize); header = (IOStatisticsWorkLoopHeader*)((void*)buffer); header->sig = IOSTATISTICS_SIG_WORKLOOP; @@ -798,7 +797,7 @@ IOStatistics::getWorkLoopStatistics(sysctl_req *req) error = SYSCTL_OUT(req, buffer, size); - kfree(buffer, calculatedSize); + kheap_free(KHEAP_TEMP, buffer, calculatedSize); exit: IORWLockUnlock(IOStatistics::lock); @@ -841,12 +840,12 @@ IOStatistics::getUserClientStatistics(sysctl_req *req) LOG(2, "IOStatistics::getUserClientStatistics - requesting kext w/load tag: %d\n", requestedLoadTag); - buffer = (char*)kalloc(calculatedSize); + buffer = (char*)kheap_alloc(KHEAP_TEMP, calculatedSize, + (zalloc_flags_t)(Z_WAITOK | Z_ZERO)); if (!buffer) { error = ENOMEM; goto exit; } - memset(buffer, 0, calculatedSize); header = (IOStatisticsUserClientHeader*)((void*)buffer); header->sig = IOSTATISTICS_SIG_USERCLIENT; @@ -866,7 +865,7 @@ IOStatistics::getUserClientStatistics(sysctl_req *req) error = EINVAL; } - kfree(buffer, calculatedSize); + kheap_free(KHEAP_TEMP, buffer, calculatedSize); exit: IORWLockUnlock(IOStatistics::lock); @@ -1312,10 +1311,13 @@ IOStatistics::countAlloc(uint32_t index, vm_size_t size) if (!enabled) { return; } + if (size > INT_MAX) { + return; + } ke = getKextNodeFromBacktrace(FALSE); if (ke) { - OSAddAtomic(size, &ke->memoryCounters[index]); + OSAddAtomic((SInt32) size, &ke->memoryCounters[index]); releaseKextNode(ke); } } diff --git a/iokit/Kernel/IOStringFuncs.c b/iokit/Kernel/IOStringFuncs.c index fc3a8f825..48f8239c3 100644 --- a/iokit/Kernel/IOStringFuncs.c +++ b/iokit/Kernel/IOStringFuncs.c @@ -178,7 +178,7 @@ strtol(const char *nptr, char **endptr, int base) * overflow. */ cutoff = neg ? -(unsigned long)LONG_MIN : LONG_MAX; - cutlim = cutoff % (unsigned long)base; + cutlim = ((int)(cutoff % (unsigned long)base)); cutoff /= (unsigned long)base; for (acc = 0, any = 0;; c = *s++) { if (isdigit(c)) { @@ -251,7 +251,7 @@ strtoul(const char *nptr, char **endptr, int base) base = c == '0' ? 8 : 10; } cutoff = (unsigned long)ULONG_MAX / (unsigned long)base; - cutlim = (unsigned long)ULONG_MAX % (unsigned long)base; + cutlim = ((int)((unsigned long)ULONG_MAX % (unsigned long)base)); for (acc = 0, any = 0;; c = *s++) { if (isdigit(c)) { c -= '0'; @@ -351,7 +351,7 @@ strtoq(const char *nptr, char **endptr, int base) */ qbase = (unsigned)base; cutoff = neg ? -(u_quad_t)QUAD_MIN : QUAD_MAX; - cutlim = cutoff % qbase; + cutlim = ((int)(cutoff % qbase)); cutoff /= qbase; for (acc = 0, any = 0;; c = *s++) { if (isdigit(c)) { @@ -434,7 +434,7 @@ strtouq(const char *nptr, } qbase = (unsigned)base; cutoff = (u_quad_t)UQUAD_MAX / qbase; - cutlim = (u_quad_t)UQUAD_MAX % qbase; + cutlim = ((int)((u_quad_t)UQUAD_MAX % qbase)); for (acc = 0, any = 0;; c = *s++) { if (isdigit(c)) { c -= '0'; diff --git a/iokit/Kernel/IOSubMemoryDescriptor.cpp b/iokit/Kernel/IOSubMemoryDescriptor.cpp index c65c7c486..07ef3782b 100644 --- a/iokit/Kernel/IOSubMemoryDescriptor.cpp +++ b/iokit/Kernel/IOSubMemoryDescriptor.cpp @@ -27,6 +27,7 @@ */ #include +#include #include "IOKitKernelInternal.h" diff --git a/iokit/Kernel/IOTimerEventSource.cpp b/iokit/Kernel/IOTimerEventSource.cpp index 59eb6a0a2..644d840c3 100644 --- a/iokit/Kernel/IOTimerEventSource.cpp +++ b/iokit/Kernel/IOTimerEventSource.cpp @@ -26,6 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#include #include __BEGIN_DECLS @@ -46,13 +47,14 @@ __END_DECLS #endif #include +#include #define super IOEventSource OSDefineMetaClassAndStructors(IOTimerEventSource, IOEventSource) -OSMetaClassDefineReservedUsed(IOTimerEventSource, 0); -OSMetaClassDefineReservedUsed(IOTimerEventSource, 1); -OSMetaClassDefineReservedUsed(IOTimerEventSource, 2); +OSMetaClassDefineReservedUsedX86(IOTimerEventSource, 0); +OSMetaClassDefineReservedUsedX86(IOTimerEventSource, 1); +OSMetaClassDefineReservedUsedX86(IOTimerEventSource, 2); OSMetaClassDefineReservedUnused(IOTimerEventSource, 3); OSMetaClassDefineReservedUnused(IOTimerEventSource, 4); OSMetaClassDefineReservedUnused(IOTimerEventSource, 5); @@ -100,29 +102,38 @@ do { \ // __inline__ void -IOTimerEventSource::invokeAction(IOTimerEventSource::Action _action, IOTimerEventSource * ts, +IOTimerEventSource::invokeAction(IOEventSource::Action _action, IOTimerEventSource * ts, OSObject * _owner, IOWorkLoop * _workLoop) { bool trace = (gIOKitTrace & kIOTraceTimers) ? true : false; + void * address; + + if (kActionBlock & flags) { + address = ptrauth_nop_cast(void *, _Block_get_invoke_fn((struct Block_layout *) actionBlock)); + } else { + address = ptrauth_nop_cast(void *, _action); + } if (trace) { IOTimeStampStartConstant(IODBG_TIMES(IOTIMES_ACTION), - VM_KERNEL_ADDRHIDE(_action), VM_KERNEL_ADDRHIDE(_owner)); + VM_KERNEL_ADDRHIDE(address), + VM_KERNEL_ADDRHIDE(_owner)); } if (kActionBlock & flags) { ((IOTimerEventSource::ActionBlock) actionBlock)(ts); } else { - (*_action)(_owner, ts); + ((IOTimerEventSource::Action)_action)(_owner, ts); } #if CONFIG_DTRACE - DTRACE_TMR3(iotescallout__expire, Action, _action, OSObject, _owner, void, _workLoop); + DTRACE_TMR3(iotescallout__expire, Action, address, OSObject, _owner, void, _workLoop); #endif if (trace) { IOTimeStampEndConstant(IODBG_TIMES(IOTIMES_ACTION), - VM_KERNEL_UNSLIDE(_action), VM_KERNEL_ADDRHIDE(_owner)); + VM_KERNEL_UNSLIDE(address), + VM_KERNEL_ADDRHIDE(_owner)); } } @@ -137,10 +148,10 @@ IOTimerEventSource::timeout(void *self) IOWorkLoop * wl = me->workLoop; if (wl) { - Action doit; + IOEventSource::Action doit; wl->closeGate(); IOStatisticsCloseGate(); - doit = (Action) me->action; + doit = me->action; if (doit && me->enabled && AbsoluteTime_to_scalar(&me->abstime)) { me->invokeAction(doit, me, me->owner, me->workLoop); } @@ -164,11 +175,12 @@ IOTimerEventSource::timeoutAndRelease(void * self, void * c) IOWorkLoop * wl = me->reserved->workLoop; if (wl) { - Action doit; + IOEventSource::Action doit; wl->closeGate(); IOStatisticsCloseGate(); - doit = (Action) me->action; + doit = me->action; if (doit && (me->reserved->calloutGeneration == count)) { + thread_call_start_iotes_invocation((thread_call_t)me->calloutEntry); me->invokeAction(doit, me, me->owner, me->workLoop); } IOStatisticsOpenGate(); @@ -185,11 +197,11 @@ IOTimerEventSource::timeoutAndRelease(void * self, void * c) bool IOTimerEventSource::checkForWork() { - Action doit; + IOEventSource::Action doit; if (reserved && (reserved->calloutGenerationSignaled == reserved->calloutGeneration) - && enabled && (doit = (Action) action)) { + && enabled && (doit = action)) { reserved->calloutGenerationSignaled = ~reserved->calloutGeneration; invokeAction(doit, this, owner, workLoop); } @@ -222,9 +234,11 @@ IOTimerEventSource::setTimeoutFunc() // reserved != 0 means IOTimerEventSource::timeoutAndRelease is being used, // not a subclassed implementation - reserved = IONew(ExpansionData, 1); + reserved = IONewZero(ExpansionData, 1); + reserved->calloutGenerationSignaled = ~reserved->calloutGeneration; - options = abstime; + // make use of an existing ivar for parameter passing + options = (uint32_t) abstime; abstime = 0; thread_call_options_t tcoptions = 0; @@ -308,6 +322,7 @@ IOTimerEventSource::init(OSObject *inOwner, Action inAction) bool IOTimerEventSource::init(uint32_t options, OSObject *inOwner, Action inAction) { + // make use of an existing ivar for parameter passing abstime = options; return init(inOwner, inAction); } diff --git a/iokit/Kernel/IOUserClient.cpp b/iokit/Kernel/IOUserClient.cpp index e98f41f36..12bc47e46 100644 --- a/iokit/Kernel/IOUserClient.cpp +++ b/iokit/Kernel/IOUserClient.cpp @@ -28,6 +28,7 @@ #include +#include #include #include #include @@ -173,35 +174,37 @@ public: }; #define super OSObject -OSDefineMetaClassAndStructors(IOMachPort, OSObject) +OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM) static IOLock * gIOObjectPortLock; IOLock * gIOUserServerLock; +SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks; + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ SLIST_HEAD(IOMachPortHashList, IOMachPort); -#if CONFIG_EMBEDDED -#define PORT_HASH_SIZE 256 -#else +#if defined(XNU_TARGET_OS_OSX) #define PORT_HASH_SIZE 4096 -#endif /* CONFIG_EMBEDDED */ +#else /* defined(!XNU_TARGET_OS_OSX) */ +#define PORT_HASH_SIZE 256 +#endif /* !defined(!XNU_TARGET_OS_OSX) */ -IOMachPortHashList ports[PORT_HASH_SIZE]; +IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE]; void IOMachPortInitialize(void) { for (size_t i = 0; i < PORT_HASH_SIZE; i++) { - SLIST_INIT(&ports[i]); + SLIST_INIT(&gIOMachPortHash[i]); } } IOMachPortHashList* IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type ) { - return &ports[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE]; + return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE]; } IOMachPort* @@ -409,6 +412,23 @@ IOMachPort::free( void ) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +static bool +IOTaskRegistryCompatibility(task_t task) +{ + return false; +} + +static void +IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching) +{ + if (!IOTaskRegistryCompatibility(task)) { + return; + } + matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + class IOUserIterator : public OSIterator { OSDeclareDefaultStructors(IOUserIterator); @@ -560,7 +580,7 @@ iokit_port_object_description(io_object_t obj, kobject_description_t desc) #if DEVELOPMENT || DEBUG } else if ((noti = OSDynamicCast(IOUserNotification, obj)) && ((serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->holdNotify)))) { - s = OSSerialize::withCapacity(page_size); + s = OSSerialize::withCapacity((unsigned int) page_size); if (s && serviceNoti->matching->serialize(s)) { snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text()); } @@ -571,6 +591,10 @@ iokit_port_object_description(io_object_t obj, kobject_description_t desc) } } +// FIXME: Implementation of these functions are hidden from the static analyzer. +// As for now, the analyzer doesn't consistently support wrapper functions +// for retain and release. +#ifndef __clang_analyzer__ void iokit_add_reference( io_object_t obj, natural_t type ) { @@ -595,6 +619,7 @@ iokit_remove_reference( io_object_t obj ) obj->release(); } } +#endif // __clang_analyzer__ void iokit_remove_connect_reference( io_object_t obj ) @@ -676,24 +701,33 @@ iokit_client_died( io_object_t obj, ipc_port_t /* port */, IOUserClient * client; IOMemoryMap * map; IOUserNotification * notify; + IOUserServerCheckInToken * token; if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) { return kIOReturnNotReady; } - if (IKOT_IOKIT_CONNECT == type) { + switch (type) { + case IKOT_IOKIT_CONNECT: if ((client = OSDynamicCast( IOUserClient, obj ))) { IOStatisticsClientCall(); - IOLockLock(client->lock); + IORWLockWrite(client->lock); client->clientDied(); - IOLockUnlock(client->lock); + IORWLockUnlock(client->lock); } - } else if (IKOT_IOKIT_OBJECT == type) { + break; + case IKOT_IOKIT_OBJECT: if ((map = OSDynamicCast( IOMemoryMap, obj ))) { map->taskDied(); } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) { notify->setNotification( NULL ); } + break; + case IKOT_IOKIT_IDENT: + if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) { + IOUserServerCheckInToken::notifyNoSenders( token ); + } + break; } return kIOReturnSuccess; @@ -714,7 +748,7 @@ class IOServiceUserNotification : public IOUserNotification enum { kMaxOutstanding = 1024 }; PingMsg * pingMsg; - vm_size_t msgSize; + mach_msg_size_t msgSize; OSArray * newSet; bool armed; bool ipcLogged; @@ -747,7 +781,7 @@ class IOServiceMessageUserNotification : public IOUserNotification }; PingMsg * pingMsg; - vm_size_t msgSize; + mach_msg_size_t msgSize; uint8_t clientIs64; int owningPID; bool ipcLogged; @@ -756,7 +790,7 @@ public: virtual bool init( mach_port_t port, natural_t type, void * reference, vm_size_t referenceSize, - vm_size_t extraSize, + mach_msg_size_t extraSize, bool clientIs64 ); virtual void free() APPLE_KEXT_OVERRIDE; @@ -852,7 +886,8 @@ IOServiceUserNotification::init( mach_port_t port, natural_t type, return false; } - msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize; + msgSize = (mach_msg_size_t) (sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize); + pingMsg = (PingMsg *) IOMalloc( msgSize); if (!pingMsg) { return false; @@ -1000,7 +1035,7 @@ OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotificati bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type, - void * reference, vm_size_t referenceSize, vm_size_t extraSize, + void * reference, vm_size_t referenceSize, mach_msg_size_t extraSize, bool client64 ) { if (!super::init()) { @@ -1016,7 +1051,7 @@ IOServiceMessageUserNotification::init( mach_port_t port, natural_t type, owningPID = proc_selfpid(); extraSize += sizeof(IOServiceInterestContent64); - msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize; + msgSize = (mach_msg_size_t) (sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize); pingMsg = (PingMsg *) IOMalloc( msgSize); if (!pingMsg) { return false; @@ -1091,7 +1126,7 @@ IOServiceMessageUserNotification::handler( void * ref, void * allocMsg; kern_return_t kr; vm_size_t argSize; - vm_size_t thisMsgSize; + mach_msg_size_t thisMsgSize; ipc_port_t thisPort, providerPort; struct PingMsg * thisMsg; IOServiceInterestContent64 * data; @@ -1121,10 +1156,9 @@ IOServiceMessageUserNotification::handler( void * ref, type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift); argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask; - thisMsgSize = msgSize - + sizeof(IOServiceInterestContent64) - - sizeof(data->messageArgument) - + argSize; + if (os_add3_overflow(msgSize, sizeof(IOServiceInterestContent64) - sizeof(data->messageArgument), argSize, &thisMsgSize)) { + return kIOReturnBadArgument; + } if (thisMsgSize > sizeof(stackMsg)) { allocMsg = IOMalloc(thisMsgSize); @@ -1211,17 +1245,32 @@ IOUserClient::initialize( void ) gIOUserClientOwnersLock = IOLockAlloc(); gIOUserServerLock = IOLockAlloc(); assert(gIOObjectPortLock && gIOUserClientOwnersLock); + +#if IOTRACKING + IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking()); + IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking()); + IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking()); + IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking()); + IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking()); +#endif /* IOTRACKING */ } void +#if __LP64__ +__attribute__((__noreturn__)) +#endif IOUserClient::setAsyncReference(OSAsyncReference asyncRef, mach_port_t wakePort, void *callback, void *refcon) { +#if __LP64__ + panic("setAsyncReference not valid for 64b"); +#else asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort) | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]); asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback; asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon; +#endif } void @@ -1406,24 +1455,20 @@ IOUserClient::clientHasPrivilege( void * securityToken, return kr; } +#define MAX_ENTITLEMENTS_LEN (128 * 1024) OSDictionary * IOUserClient::copyClientEntitlements(task_t task) { -#define MAX_ENTITLEMENTS_LEN (128 * 1024) - proc_t p = NULL; pid_t pid = 0; size_t len = 0; void *entitlements_blob = NULL; - char *entitlements_data = NULL; - OSObject *entitlements_obj = NULL; OSDictionary *entitlements = NULL; - OSString *errorString = NULL; p = (proc_t)get_bsdtask_info(task); if (p == NULL) { - goto fail; + return NULL; } pid = proc_pid(p); @@ -1434,8 +1479,18 @@ IOUserClient::copyClientEntitlements(task_t task) } if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0) { - goto fail; + return NULL; } + return IOUserClient::copyEntitlementsFromBlob(entitlements_blob, len); +} + +OSDictionary * +IOUserClient::copyEntitlementsFromBlob(void *entitlements_blob, size_t len) +{ + char *entitlements_data = NULL; + OSObject *entitlements_obj = NULL; + OSString *errorString = NULL; + OSDictionary *entitlements = NULL; if (len <= offsetof(CS_GenericBlob, data)) { goto fail; @@ -1447,8 +1502,8 @@ IOUserClient::copyClientEntitlements(task_t task) */ len -= offsetof(CS_GenericBlob, data); if (len > MAX_ENTITLEMENTS_LEN) { - IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", - proc_best_name(p), pid, len, MAX_ENTITLEMENTS_LEN); + IOLog("failed to parse entitlements: %lu bytes of entitlements exceeds maximum of %u\n", + len, MAX_ENTITLEMENTS_LEN); goto fail; } @@ -1466,8 +1521,7 @@ IOUserClient::copyClientEntitlements(task_t task) entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString); if (errorString != NULL) { - IOLog("failed to parse entitlements for %s[%u]: %s\n", - proc_best_name(p), pid, errorString->getCStringNoCopy()); + IOLog("failed to parse entitlements: %s\n", errorString->getCStringNoCopy()); goto fail; } if (entitlements_obj == NULL) { @@ -1493,6 +1547,18 @@ fail: return entitlements; } +OSDictionary * +IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset) +{ + size_t len = 0; + void *entitlements_blob = NULL; + + if (cs_entitlements_blob_get_vnode(vnode, offset, &entitlements_blob, &len) != 0) { + return NULL; + } + return IOUserClient::copyEntitlementsFromBlob(entitlements_blob, len); +} + OSObject * IOUserClient::copyClientEntitlement( task_t task, const char * entitlement ) @@ -1515,6 +1581,30 @@ IOUserClient::copyClientEntitlement( task_t task, return value; } +OSObject * +IOUserClient::copyClientEntitlementVnode( + struct vnode *vnode, + off_t offset, + const char *entitlement) +{ + OSDictionary *entitlements; + OSObject *value; + + entitlements = copyClientEntitlementsVnode(vnode, offset); + if (entitlements == NULL) { + return NULL; + } + + /* Fetch the entitlement value from the dictionary. */ + value = entitlements->getObject(entitlement); + if (value != NULL) { + value->retain(); + } + + entitlements->release(); + return value; +} + bool IOUserClient::init() { @@ -1565,7 +1655,7 @@ bool IOUserClient::reserve() { if (!reserved) { - reserved = IONew(ExpansionData, 1); + reserved = IONewZero(ExpansionData, 1); if (!reserved) { return false; } @@ -1752,6 +1842,44 @@ iokit_task_terminate(task_t task) return KERN_SUCCESS; } +struct IOUCFilterPolicy { + task_t task; + io_filter_policy_t filterPolicy; + IOUCFilterPolicy * next; +}; + +io_filter_policy_t +IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy) +{ + IOUCFilterPolicy * elem; + io_filter_policy_t filterPolicy; + + filterPolicy = 0; + IOLockLock(filterLock); + + for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) { + } + + if (elem) { + if (addFilterPolicy) { + assert(addFilterPolicy == elem->filterPolicy); + } + filterPolicy = elem->filterPolicy; + } else if (addFilterPolicy) { + elem = IONewZero(IOUCFilterPolicy, 1); + if (elem) { + elem->task = task; + elem->filterPolicy = addFilterPolicy; + elem->next = reserved->filterPolicies; + reserved->filterPolicies = elem; + filterPolicy = addFilterPolicy; + } + } + + IOLockUnlock(filterLock); + return filterPolicy; +} + void IOUserClient::free() { @@ -1759,7 +1887,10 @@ IOUserClient::free() mappings->release(); } if (lock) { - IOLockFree(lock); + IORWLockFree(lock); + } + if (filterLock) { + IOLockFree(filterLock); } IOStatisticsUnregisterCounter(); @@ -1768,6 +1899,15 @@ IOUserClient::free() assert(!owners.prev); if (reserved) { + IOUCFilterPolicy * elem; + IOUCFilterPolicy * nextElem; + for (elem = reserved->filterPolicies; elem; elem = nextElem) { + nextElem = elem->next; + if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) { + gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy); + } + IODelete(elem, IOUCFilterPolicy, 1); + } IODelete(reserved, ExpansionData, 1); } @@ -1837,6 +1977,17 @@ IOUserClient::clientMemoryForType( UInt32 type, return kIOReturnUnsupported; } +IOReturn +IOUserClient::clientMemoryForType( UInt32 type, + IOOptionBits * options, + OSSharedPtr& memory ) +{ + IOMemoryDescriptor* memoryRaw = nullptr; + IOReturn result = clientMemoryForType(type, options, &memoryRaw); + memory.reset(memoryRaw, OSNoRetain); + return result; +} + #if !__LP64__ IOMemoryMap * IOUserClient::mapClientMemory( @@ -1920,6 +2071,16 @@ IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_nam return object ? kIOReturnSuccess : kIOReturnIPCError; } +IOReturn +IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name, + OSSharedPtr& obj) +{ + OSObject* objRaw = NULL; + IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw); + obj.reset(objRaw, OSNoRetain); + return result; +} + IOReturn IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta) { @@ -1963,6 +2124,16 @@ getTargetAndMethodForIndex(IOService **targetP, UInt32 index) return method; } +IOExternalMethod * +IOUserClient:: +getTargetAndMethodForIndex(OSSharedPtr& targetP, UInt32 index) +{ + IOService* targetPRaw = NULL; + IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index); + targetP.reset(targetPRaw, OSRetain); + return result; +} + IOExternalAsyncMethod * IOUserClient:: getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index) @@ -1976,6 +2147,16 @@ getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index) return method; } +IOExternalAsyncMethod * +IOUserClient:: +getAsyncTargetAndMethodForIndex(OSSharedPtr& targetP, UInt32 index) +{ + IOService* targetPRaw = NULL; + IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index); + targetP.reset(targetPRaw, OSRetain); + return result; +} + IOExternalTrap * IOUserClient:: getTargetAndTrapForIndex(IOService ** targetP, UInt32 index) @@ -2404,7 +2585,6 @@ is_io_iterator_is_valid( return kIOReturnSuccess; } - static kern_return_t internal_io_service_match_property_table( io_service_t _service, @@ -2419,9 +2599,12 @@ internal_io_service_match_property_table( OSDictionary * dict; assert(matching_size); + + obj = OSUnserializeXML(matching, matching_size); if ((dict = OSDynamicCast( OSDictionary, obj))) { + IOTaskRegistryCompatibilityMatching(current_task(), dict); *matches = service->passiveMatch( dict ); kr = kIOReturnSuccess; } else { @@ -2502,6 +2685,7 @@ internal_io_service_get_matching_services( obj = OSUnserializeXML(matching, matching_size); if ((dict = OSDynamicCast( OSDictionary, obj))) { + IOTaskRegistryCompatibilityMatching(current_task(), dict); *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict )); kr = kIOReturnSuccess; } else { @@ -2584,6 +2768,7 @@ internal_io_service_get_matching_service( obj = OSUnserializeXML(matching, matching_size); if ((dict = OSDynamicCast( OSDictionary, obj))) { + IOTaskRegistryCompatibilityMatching(current_task(), dict); *service = IOService::copyMatchingService( dict ); kr = *service ? kIOReturnSuccess : kIOReturnNotFound; } else { @@ -2661,9 +2846,10 @@ internal_io_service_add_notification( IOServiceUserNotification * userNotify = NULL; IONotifier * notify = NULL; const OSSymbol * sym; + OSObject * obj; OSDictionary * dict; IOReturn err; - unsigned long int userMsgType; + natural_t userMsgType; if (master_port != master_device_port) { return kIOReturnNotPrivileged; @@ -2681,11 +2867,13 @@ internal_io_service_add_notification( } assert(matching_size); - dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size)); + obj = OSUnserializeXML(matching, matching_size); + dict = OSDynamicCast(OSDictionary, obj); if (!dict) { err = kIOReturnBadArgument; continue; } + IOTaskRegistryCompatibilityMatching(current_task(), dict); if ((sym == gIOPublishNotification) || (sym == gIOFirstPublishNotification)) { @@ -2731,8 +2919,8 @@ internal_io_service_add_notification( if (sym) { sym->release(); } - if (dict) { - dict->release(); + if (obj) { + obj->release(); } return err; @@ -3040,11 +3228,16 @@ is_io_connect_get_notification_semaphore( natural_t notification_type, semaphore_t *semaphore ) { + IOReturn ret; CHECK( IOUserClient, connection, client ); IOStatisticsClientCall(); - return client->getNotificationSemaphore((UInt32) notification_type, - semaphore ); + IORWLockWrite(client->lock); + ret = client->getNotificationSemaphore((UInt32) notification_type, + semaphore ); + IORWLockUnlock(client->lock); + + return ret; } /* Routine io_registry_get_root_entry */ @@ -3149,6 +3342,20 @@ is_io_registry_entry_from_path( entry = IORegistryEntry::fromPath( path ); + if (!entry && IOTaskRegistryCompatibility(current_task())) { + OSDictionary * matching; + const OSObject * objects[2] = { kOSBooleanTrue, NULL }; + const OSSymbol * keys[2] = { gIOCompatibilityMatchKey, gIOPathMatchKey }; + + objects[1] = OSString::withCStringNoCopy(path); + matching = OSDictionary::withObjects(objects, keys, 2, 2); + if (matching) { + entry = IOService::copyMatchingService(matching); + } + OSSafeReleaseNULL(matching); + OSSafeReleaseNULL(objects[1]); + } + *registry_entry = entry; return kIOReturnSuccess; @@ -3364,6 +3571,31 @@ is_io_registry_entry_get_registry_entry_id( return kIOReturnSuccess; } + +static OSObject * +IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name) +{ + OSObject * obj; + OSObject * compatProps; + OSDictionary * props; + + obj = regEntry->copyProperty(name); + if (!obj + && IOTaskRegistryCompatibility(current_task()) + && (compatProps = regEntry->copyProperty(gIOCompatibilityPropertiesKey))) { + props = OSDynamicCast(OSDictionary, compatProps); + if (props) { + obj = props->getObject(name); + if (obj) { + obj->retain(); + } + } + compatProps->release(); + } + + return obj; +} + /* Routine io_registry_entry_get_property */ kern_return_t is_io_registry_entry_get_property_bytes( @@ -3390,7 +3622,7 @@ is_io_registry_entry_get_property_bytes( } #endif - obj = entry->copyProperty(property_name); + obj = IOCopyPropertyCompatible(entry, property_name); if (!obj) { return kIOReturnNoResources; } @@ -3447,7 +3679,7 @@ is_io_registry_entry_get_property( mach_msg_type_number_t *propertiesCnt ) { kern_return_t err; - vm_size_t len; + unsigned int len; OSObject * obj; CHECK( IORegistryEntry, registry_entry, entry ); @@ -3458,7 +3690,7 @@ is_io_registry_entry_get_property( } #endif - obj = entry->copyProperty(property_name); + obj = IOCopyPropertyCompatible(entry, property_name); if (!obj) { return kIOReturnNotFound; } @@ -3494,7 +3726,7 @@ is_io_registry_entry_get_property_recursively( mach_msg_type_number_t *propertiesCnt ) { kern_return_t err; - vm_size_t len; + unsigned int len; OSObject * obj; CHECK( IORegistryEntry, registry_entry, entry ); @@ -3583,8 +3815,9 @@ is_io_registry_entry_get_properties_bin_buf( io_buf_ptr_t *properties, mach_msg_type_number_t *propertiesCnt) { - kern_return_t err = kIOReturnSuccess; - vm_size_t len; + kern_return_t err = kIOReturnSuccess; + unsigned int len; + OSObject * compatProperties; OSSerialize * s; OSSerialize::Editor editor = NULL; void * editRef = NULL; @@ -3607,7 +3840,23 @@ is_io_registry_entry_get_properties_bin_buf( return kIOReturnNoMemory; } - if (!entry->serializeProperties(s)) { + if (IOTaskRegistryCompatibility(current_task()) + && (compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey))) { + OSDictionary * dict; + + dict = entry->dictionaryWithProperties(); + if (!dict) { + err = kIOReturnNoMemory; + } else { + dict->removeObject(gIOCompatibilityPropertiesKey); + dict->merge(OSDynamicCast(OSDictionary, compatProperties)); + if (!dict->serialize(s)) { + err = kIOReturnUnsupported; + } + dict->release(); + } + compatProperties->release(); + } else if (!entry->serializeProperties(s)) { err = kIOReturnUnsupported; } @@ -3659,7 +3908,7 @@ is_io_registry_entry_get_property_bin_buf( mach_msg_type_number_t *propertiesCnt ) { kern_return_t err; - vm_size_t len; + unsigned int len; OSObject * obj; const OSSymbol * sym; @@ -3680,10 +3929,24 @@ is_io_registry_entry_get_property_bin_buf( obj = entry->copyPropertyKeys(); } else { if ((kIORegistryIterateRecursively & options) && plane[0]) { - obj = entry->copyProperty(property_name, - IORegistryEntry::getPlane(plane), options ); + if (!IOTaskRegistryCompatibility(current_task())) { + obj = entry->copyProperty(property_name, + IORegistryEntry::getPlane(plane), options); + } else { + obj = IOCopyPropertyCompatible(entry, property_name); + if ((NULL == obj) && plane && (options & kIORegistryIterateRecursively)) { + IORegistryIterator * iter; + iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options); + if (iter) { + while ((NULL == obj) && (entry = iter->getNextObject())) { + obj = IOCopyPropertyCompatible(entry, property_name); + } + iter->release(); + } + } + } } else { - obj = entry->copyProperty(property_name); + obj = IOCopyPropertyCompatible(entry, property_name); } if (obj && gIORemoveOnReadProperties->containsObject(sym)) { entry->removeProperty(sym); @@ -4007,11 +4270,46 @@ is_io_service_open_extended( if (res == kIOReturnSuccess) { assert( OSDynamicCast(IOUserClient, client)); + if (!client->reserved) { + if (!client->reserve()) { + client->clientClose(); + OSSafeReleaseNULL(client); + res = kIOReturnNoMemory; + } + } + } + if (res == kIOReturnSuccess) { client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey)); - client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey)); - client->closed = false; - client->lock = IOLockAlloc(); + if (client->sharedInstance) { + IOLockLock(gIOUserClientOwnersLock); + } + if (!client->lock) { + client->lock = IORWLockAlloc(); + client->filterLock = IOLockAlloc(); + + client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey)); + { + OSObject * obj; + extern const OSSymbol * gIOSurfaceIdentifier; + obj = client->getProperty(kIOUserClientDefaultLockingKey); + if (obj) { + client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey)); + } else { + const OSMetaClass * meta; + OSKext * kext; + meta = client->getMetaClass(); + kext = meta->getKext(); + if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) { + client->defaultLocking = true; + client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue); + } + } + } + } + if (client->sharedInstance) { + IOLockUnlock(gIOUserClientOwnersLock); + } disallowAccess = (crossEndian && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey)) @@ -4025,6 +4323,21 @@ is_io_service_open_extended( } #endif + if ((kIOReturnSuccess == res) + && gIOUCFilterCallbacks + && gIOUCFilterCallbacks->io_filter_resolver) { + io_filter_policy_t filterPolicy; + filterPolicy = client->filterForTask(owningTask, 0); + if (!filterPolicy) { + res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy); + if (kIOReturnUnsupported == res) { + res = kIOReturnSuccess; + } else if (kIOReturnSuccess == res) { + client->filterForTask(owningTask, filterPolicy); + } + } + } + if (kIOReturnSuccess == res) { res = client->registerOwner(owningTask); } @@ -4066,9 +4379,9 @@ is_io_service_close( IOStatisticsClientCall(); if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) { - IOLockLock(client->lock); + IORWLockWrite(client->lock); client->clientClose(); - IOLockUnlock(client->lock); + IORWLockUnlock(client->lock); } else { IOLog("ignored is_io_service_close(0x%qx,%s)\n", client->getRegistryEntryID(), client->getName()); @@ -4109,10 +4422,10 @@ is_io_connect_set_notification_port( CHECK( IOUserClient, connection, client ); IOStatisticsClientCall(); - IOLockLock(client->lock); + IORWLockWrite(client->lock); ret = client->registerNotificationPort( port, notification_type, (io_user_reference_t) reference ); - IOLockUnlock(client->lock); + IORWLockUnlock(client->lock); return ret; } @@ -4128,10 +4441,10 @@ is_io_connect_set_notification_port_64( CHECK( IOUserClient, connection, client ); IOStatisticsClientCall(); - IOLockLock(client->lock); + IORWLockWrite(client->lock); ret = client->registerNotificationPort( port, notification_type, reference ); - IOLockUnlock(client->lock); + IORWLockUnlock(client->lock); return ret; } @@ -4157,7 +4470,13 @@ is_io_connect_map_memory_into_task } IOStatisticsClientCall(); + if (client->defaultLocking) { + IORWLockWrite(client->lock); + } map = client->mapClientMemory64( memory_type, into_task, flags, *address ); + if (client->defaultLocking) { + IORWLockUnlock(client->lock); + } if (map) { *address = map->getAddress(); @@ -4266,7 +4585,13 @@ is_io_connect_unmap_memory_from_task } IOStatisticsClientCall(); + if (client->defaultLocking) { + IORWLockWrite(client->lock); + } err = client->clientMemoryForType((UInt32) memory_type, &options, &memory ); + if (client->defaultLocking) { + IORWLockUnlock(client->lock); + } if (memory && (kIOReturnSuccess == err)) { options = (options & ~kIOMapUserOptionsMask) @@ -4282,7 +4607,8 @@ is_io_connect_unmap_memory_from_task IOLockUnlock( gIOObjectPortLock); mach_port_name_t name = 0; - if (from_task != current_task()) { + bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance; + if (is_shared_instance_or_from_current_task) { name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT ); map->release(); } @@ -4294,7 +4620,7 @@ is_io_connect_unmap_memory_from_task } else { IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT ); } - if (from_task == current_task()) { + if (!is_shared_instance_or_from_current_task) { map->release(); } } else { @@ -4332,8 +4658,17 @@ is_io_connect_add_client( CHECK( IOUserClient, connection, client ); CHECK( IOUserClient, connect_to, to ); + IOReturn ret; + IOStatisticsClientCall(); - return client->connectClient( to ); + if (client->defaultLocking) { + IORWLockWrite(client->lock); + } + ret = client->connectClient( to ); + if (client->defaultLocking) { + IORWLockUnlock(client->lock); + } + return ret; } @@ -4412,7 +4747,21 @@ is_io_connect_method_var_output args.structureOutputDescriptorSize = 0; IOStatisticsClientCall(); - ret = client->externalMethod( selector, &args ); + ret = kIOReturnSuccess; + + io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0); + if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) { + ret = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_external_method, selector); + } + if (kIOReturnSuccess == ret) { + if (client->defaultLocking) { + IORWLockRead(client->lock); + } + ret = client->externalMethod( selector, &args ); + if (client->defaultLocking) { + IORWLockUnlock(client->lock); + } + } *scalar_outputCnt = args.scalarOutputCount; *inband_outputCnt = args.structureOutputSize; @@ -4420,7 +4769,7 @@ is_io_connect_method_var_output if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) { OSSerialize * serialize; OSData * data; - vm_size_t len; + unsigned int len; if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) { len = serialize->getLength(); @@ -4491,8 +4840,13 @@ is_io_connect_method if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) { return kIOReturnIPCError; } - if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) { - return kIOReturnIPCError; + if (ool_output) { + if (*ool_output_size <= sizeof(io_struct_inband_t)) { + return kIOReturnIPCError; + } + if (*ool_output_size > UINT_MAX) { + return kIOReturnIPCError; + } } if (ool_input) { @@ -4515,10 +4869,25 @@ is_io_connect_method } args.structureOutputDescriptor = outputMD; - args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0; + args.structureOutputDescriptorSize = ool_output_size + ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size) + : 0; IOStatisticsClientCall(); - ret = client->externalMethod( selector, &args ); + ret = kIOReturnSuccess; + io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0); + if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) { + ret = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_external_method, selector); + } + if (kIOReturnSuccess == ret) { + if (client->defaultLocking) { + IORWLockRead(client->lock); + } + ret = client->externalMethod( selector, &args ); + if (client->defaultLocking) { + IORWLockUnlock(client->lock); + } + } *scalar_outputCnt = args.scalarOutputCount; *inband_outputCnt = args.structureOutputSize; @@ -4564,6 +4933,10 @@ is_io_connect_async_method IOMemoryDescriptor * inputMD = NULL; IOMemoryDescriptor * outputMD = NULL; + if (referenceCnt < 1) { + return kIOReturnBadArgument; + } + bzero(&args.__reserved[0], sizeof(args.__reserved)); args.__reservedA = 0; args.version = kIOExternalMethodArgumentsCurrentVersion; @@ -4589,8 +4962,13 @@ is_io_connect_async_method if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) { return kIOReturnIPCError; } - if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) { - return kIOReturnIPCError; + if (ool_output) { + if (*ool_output_size <= sizeof(io_struct_inband_t)) { + return kIOReturnIPCError; + } + if (*ool_output_size > UINT_MAX) { + return kIOReturnIPCError; + } } if (ool_input) { @@ -4613,10 +4991,23 @@ is_io_connect_async_method } args.structureOutputDescriptor = outputMD; - args.structureOutputDescriptorSize = *ool_output_size; + args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size); IOStatisticsClientCall(); - ret = client->externalMethod( selector, &args ); + ret = kIOReturnSuccess; + io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0); + if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) { + ret = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_external_async_method, selector); + } + if (kIOReturnSuccess == ret) { + if (client->defaultLocking) { + IORWLockRead(client->lock); + } + ret = client->externalMethod( selector, &args ); + if (client->defaultLocking) { + IORWLockUnlock(client->lock); + } + } *scalar_outputCnt = args.scalarOutputCount; *inband_outputCnt = args.structureOutputSize; @@ -5487,10 +5878,6 @@ shim_io_async_method_structureI_structureO( return err; } -#if !NO_KEXTD -bool gIOKextdClearedBusy = false; -#endif - /* Routine io_catalog_send_data */ kern_return_t is_io_catalog_send_data( @@ -5520,7 +5907,7 @@ is_io_catalog_send_data( return kIOReturnBadArgument; } - if (!IOTaskHasEntitlement(current_task(), kOSKextManagementEntitlement)) { + if (!IOTaskHasEntitlement(current_task(), kIOCatalogManagementEntitlement)) { OSString * taskName = IOCopyLogNameForPID(proc_selfpid()); IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : ""); OSSafeReleaseNULL(taskName); @@ -5606,33 +5993,11 @@ is_io_catalog_send_data( case kIOCatalogStartMatching__Removed: case kIOCatalogRemoveKernelLinker__Removed: - kr = KERN_NOT_SUPPORTED; - break; - case kIOCatalogKextdActive: -#if !NO_KEXTD - IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0); - OSKext::setKextdActive(); - - /* Dump all nonloaded startup extensions; kextd will now send them - * down on request. - */ - OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false); -#endif - kr = kIOReturnSuccess; + case kIOCatalogKextdFinishedLaunching: + kr = KERN_NOT_SUPPORTED; break; - case kIOCatalogKextdFinishedLaunching: { -#if !NO_KEXTD - if (!gIOKextdClearedBusy) { - IOService::kextdLaunched(); - gIOKextdClearedBusy = true; - } -#endif - kr = kIOReturnSuccess; - } - break; - default: kr = kIOReturnBadArgument; break; @@ -5714,14 +6079,14 @@ is_io_catalog_get_data( if (kr == kIOReturnSuccess) { vm_offset_t data; vm_map_copy_t copy; - vm_size_t size; + unsigned int size; size = s->getLength(); kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT); if (kr == kIOReturnSuccess) { bcopy(s->text(), (void *)data, size); kr = vm_map_copyin(kernel_map, (vm_map_address_t)data, - (vm_map_size_t)size, true, ©); + size, true, ©); *outData = (char *)copy; *outDataCount = size; } @@ -5813,11 +6178,17 @@ iokit_user_client_trap(struct iokit_user_client_trap_args *args) } OSSafeReleaseNULL(object); } else if ((userClient = OSDynamicCast(IOUserClient, iokit_lookup_connect_ref_current_task((mach_port_name_t) ref)))) { - IOExternalTrap *trap; + IOExternalTrap *trap = NULL; IOService *target = NULL; - trap = userClient->getTargetAndTrapForIndex(&target, args->index); - + result = kIOReturnSuccess; + io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0); + if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) { + result = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_trap, args->index); + } + if (kIOReturnSuccess == result) { + trap = userClient->getTargetAndTrapForIndex(&target, args->index); + } if (trap && target) { IOTrap func; @@ -5998,11 +6369,28 @@ IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * arg } } - args->structureOutputSize = structureOutputSize; + if (structureOutputSize > UINT_MAX) { + structureOutputSize = 0; + err = kIOReturnBadArgument; + } + + args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize); return err; } +IOReturn +IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size) +{ + if (size < sizeof(*callbacks)) { + return kIOReturnBadArgument; + } + if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) { + return kIOReturnBusy; + } + return kIOReturnSuccess; +} + #if __LP64__ OSMetaClassDefineReservedUnused(IOUserClient, 0); OSMetaClassDefineReservedUnused(IOUserClient, 1); diff --git a/iokit/Kernel/IOUserServer.cpp b/iokit/Kernel/IOUserServer.cpp index df4172f3c..88fd179e5 100644 --- a/iokit/Kernel/IOUserServer.cpp +++ b/iokit/Kernel/IOUserServer.cpp @@ -35,6 +35,8 @@ #include #include #include +#include +#include #include #include #include @@ -44,12 +46,13 @@ #include #include #include +#include #include #include #include #include "IOKitKernelInternal.h" -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include #include @@ -64,15 +67,15 @@ #include #include -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -SInt64 gIODKDebug = kIODKEnable; +SECURITY_READ_ONLY_LATE(SInt64) gIODKDebug = kIODKEnable; -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ struct IOPStrings; @@ -129,6 +132,7 @@ public: IOMemoryDescriptor ** memory) APPLE_KEXT_OVERRIDE; }; +OSDefineMetaClassAndStructors(IOUserServerCheckInToken, OSObject); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -147,11 +151,11 @@ IOUserService::start(IOService * provider) return ok; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #undef super -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ struct IODispatchQueue_IVars { IOUserServer * userServer; @@ -168,13 +172,15 @@ struct OSAction_IVars { uint64_t msgid; OSActionAbortedHandler abortedHandler; size_t referenceSize; + OSString * typeName; void * reference[0]; }; -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ kern_return_t -IMPL(IOService, GetRegistryEntryID) +IOService::GetRegistryEntryID_Impl( + uint64_t * registryEntryID) { IOReturn ret = kIOReturnSuccess; @@ -184,7 +190,8 @@ IMPL(IOService, GetRegistryEntryID) } kern_return_t -IMPL(IOService, SetName) +IOService::SetName_Impl( + const char * name) { IOReturn ret = kIOReturnSuccess; @@ -194,14 +201,15 @@ IMPL(IOService, SetName) } kern_return_t -IMPL(IOService, Start) +IOService::Start_Impl( + IOService * provider) { IOReturn ret = kIOReturnSuccess; return ret; } kern_return_t -IMPL(IOService, RegisterService) +IOService::RegisterService_Impl() { IOReturn ret = kIOReturnSuccess; @@ -211,13 +219,19 @@ IMPL(IOService, RegisterService) } kern_return_t -IMPL(IOService, CopyDispatchQueue) +IOService::CopyDispatchQueue_Impl( + const char * name, + IODispatchQueue ** queue) { IODispatchQueue * result; IOService * service; IOReturn ret; uint32_t index; + if (!reserved->uvars) { + return kIOReturnError; + } + ret = kIOReturnNotFound; index = -1U; if (!strcmp("Default", name)) { @@ -246,11 +260,17 @@ IMPL(IOService, CopyDispatchQueue) } kern_return_t -IMPL(IOService, SetDispatchQueue) +IOService::SetDispatchQueue_Impl( + const char * name, + IODispatchQueue * queue) { IOReturn ret = kIOReturnSuccess; uint32_t index; + if (!reserved->uvars) { + return kIOReturnError; + } + if (kIODKLogSetup & gIODKDebug) { DKLOG(DKS "::SetDispatchQueue(%s)\n", DKN(this), name); } @@ -276,7 +296,8 @@ IMPL(IOService, SetDispatchQueue) } kern_return_t -IMPL(IOService, SetProperties) +IOService::SetProperties_Impl( + OSDictionary * properties) { IOUserServer * us; OSDictionary * dict; @@ -317,17 +338,34 @@ IMPL(IOService, SetProperties) } kern_return_t -IMPL(IOService, CopyProperties) +IOService::CopyProperties_Impl( + OSDictionary ** properties) { IOReturn ret = kIOReturnSuccess; *properties = dictionaryWithProperties(); return ret; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +kern_return_t +IOService::RequireMaxBusStall_Impl( + uint64_t u64ns) +{ + IOReturn ret; + UInt32 ns; + + if (os_convert_overflow(u64ns, &ns)) { + return kIOReturnBadArgument; + } + ret = requireMaxBusStall(ns); + + return kIOReturnSuccess; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ kern_return_t -IMPL(IOMemoryDescriptor, _CopyState) +IOMemoryDescriptor::_CopyState_Impl( + _IOMDPrivateState * state) { IOReturn ret; @@ -348,7 +386,13 @@ IOMemoryDescriptor::GetLength(uint64_t * returnLength) } kern_return_t -IMPL(IOMemoryDescriptor, CreateMapping) +IOMemoryDescriptor::CreateMapping_Impl( + uint64_t options, + uint64_t address, + uint64_t offset, + uint64_t length, + uint64_t alignment, + IOMemoryMap ** map) { IOReturn ret; IOMemoryMap * resultMap; @@ -403,53 +447,148 @@ IMPL(IOMemoryDescriptor, CreateMapping) } kern_return_t -IMPL(IOMemoryDescriptor, PrepareForDMA) +IOMemoryDescriptor::CreateSubMemoryDescriptor_Impl( + uint64_t memoryDescriptorCreateOptions, + uint64_t offset, + uint64_t length, + IOMemoryDescriptor * ofDescriptor, + IOMemoryDescriptor ** memory) +{ + IOReturn ret; + IOMemoryDescriptor * iomd; + IOByteCount mdOffset; + IOByteCount mdLength; + IOByteCount mdEnd; + + if (!ofDescriptor) { + return kIOReturnBadArgument; + } + if (memoryDescriptorCreateOptions & ~kIOMemoryDirectionOutIn) { + return kIOReturnBadArgument; + } + if (os_convert_overflow(offset, &mdOffset)) { + return kIOReturnBadArgument; + } + if (os_convert_overflow(length, &mdLength)) { + return kIOReturnBadArgument; + } + if (os_add_overflow(mdOffset, mdLength, &mdEnd)) { + return kIOReturnBadArgument; + } + if (mdEnd > ofDescriptor->getLength()) { + return kIOReturnBadArgument; + } + + iomd = IOSubMemoryDescriptor::withSubRange( + ofDescriptor, mdOffset, mdLength, (IOOptionBits) memoryDescriptorCreateOptions); + + if (iomd) { + ret = kIOReturnSuccess; + *memory = iomd; + } else { + ret = kIOReturnNoMemory; + *memory = NULL; + } + + return ret; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +kern_return_t +IOMemoryDescriptor::CreateWithMemoryDescriptors_Impl( + uint64_t memoryDescriptorCreateOptions, + uint32_t withDescriptorsCount, + IOMemoryDescriptor ** const withDescriptors, + IOMemoryDescriptor ** memory) { - IOReturn ret; - uint32_t idx, count; - uint64_t sumLength; - uint64_t lflags; + IOReturn ret; + IOMemoryDescriptor * iomd; - if (!device) { + if (!withDescriptors) { + return kIOReturnBadArgument; + } + if (!withDescriptorsCount) { + return kIOReturnBadArgument; + } + if (memoryDescriptorCreateOptions & ~kIOMemoryDirectionOutIn) { return kIOReturnBadArgument; } - count = *segmentsCount; - sumLength = 0; - for (idx = 0; idx < count; idx++) { -#ifdef __LP64__ - segments[idx].address = getPhysicalSegment(offset, &segments[idx].length); -#else - segments[idx].address = 0; -#endif - if (!segments[idx].address) { - break; + for (unsigned int idx = 0; idx < withDescriptorsCount; idx++) { + if (NULL == withDescriptors[idx]) { + return kIOReturnBadArgument; } - sumLength += segments[idx].length; - offset += segments[idx].length; } - *returnLength = sumLength; - *segmentsCount = idx; - // !!translate flags - lflags = 0; - if (kIODirectionOut & _flags) { - lflags |= kIOMemoryDirectionOut; + iomd = IOMultiMemoryDescriptor::withDescriptors(withDescriptors, withDescriptorsCount, + (IODirection) memoryDescriptorCreateOptions, false); + + if (iomd) { + ret = kIOReturnSuccess; + *memory = iomd; + } else { + ret = kIOReturnNoMemory; + *memory = NULL; } - if (kIODirectionIn & _flags) { - lflags |= kIOMemoryDirectionIn; + + return ret; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +kern_return_t +IOUserClient::CreateMemoryDescriptorFromClient_Impl( + uint64_t memoryDescriptorCreateOptions, + uint32_t segmentsCount, + const IOAddressSegment segments[32], + IOMemoryDescriptor ** memory) +{ + IOReturn ret; + IOMemoryDescriptor * iomd; + IOOptionBits mdOptions; + IOUserUserClient * me; + IOAddressRange * ranges; + + me = OSDynamicCast(IOUserUserClient, this); + if (!me) { + return kIOReturnBadArgument; } - *flags = lflags; - ret = kIOReturnSuccess; + mdOptions = 0; + if (kIOMemoryDirectionOut & memoryDescriptorCreateOptions) { + mdOptions |= kIODirectionOut; + } + if (kIOMemoryDirectionIn & memoryDescriptorCreateOptions) { + mdOptions |= kIODirectionIn; + } + if (!(kIOMemoryDisableCopyOnWrite & memoryDescriptorCreateOptions)) { + mdOptions |= kIOMemoryMapCopyOnWrite; + } + + static_assert(sizeof(IOAddressRange) == sizeof(IOAddressSegment)); + ranges = __DECONST(IOAddressRange *, &segments[0]); + + iomd = IOMemoryDescriptor::withAddressRanges( + ranges, segmentsCount, + mdOptions, me->fTask); + + if (iomd) { + ret = kIOReturnSuccess; + *memory = iomd; + } else { + ret = kIOReturnNoMemory; + *memory = NULL; + } return ret; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ kern_return_t -IMPL(IOMemoryMap, _CopyState) +IOMemoryMap::_CopyState_Impl( + _IOMemoryMapPrivateState * state) { IOReturn ret; @@ -463,12 +602,17 @@ IMPL(IOMemoryMap, _CopyState) return ret; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ kern_return_t -IMPL(IOBufferMemoryDescriptor, Create) +IOBufferMemoryDescriptor::Create_Impl( + uint64_t options, + uint64_t capacity, + uint64_t alignment, + IOBufferMemoryDescriptor ** memory) { IOReturn ret; + IOOptionBits bmdOptions; IOBufferMemoryDescriptor * bmd; IOMemoryDescriptorReserved * reserved; @@ -476,10 +620,9 @@ IMPL(IOBufferMemoryDescriptor, Create) // no other options currently defined return kIOReturnBadArgument; } - options &= kIOMemoryDirectionOutIn; - options |= kIOMemoryKernelUserShared; + bmdOptions = (options & kIOMemoryDirectionOutIn) | kIOMemoryKernelUserShared; bmd = IOBufferMemoryDescriptor::inTaskWithOptions( - kernel_task, options, capacity, alignment); + kernel_task, bmdOptions, capacity, alignment); *memory = bmd; @@ -497,17 +640,22 @@ IMPL(IOBufferMemoryDescriptor, Create) } kern_return_t -IMPL(IOBufferMemoryDescriptor, SetLength) +IOBufferMemoryDescriptor::SetLength_Impl( + uint64_t length) { setLength(length); return kIOReturnSuccess; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ kern_return_t -IMPL(IODMACommand, Create) +IODMACommand::Create_Impl( + IOService * device, + uint64_t options, + const IODMACommandSpecification * specification, + IODMACommand ** command) { IOReturn ret; IODMACommand * dma; @@ -550,7 +698,14 @@ IMPL(IODMACommand, Create) } kern_return_t -IMPL(IODMACommand, PrepareForDMA) +IODMACommand::PrepareForDMA_Impl( + uint64_t options, + IOMemoryDescriptor * memory, + uint64_t offset, + uint64_t length, + uint64_t * flags, + uint32_t * segmentsCount, + IOAddressSegment * segments) { IOReturn ret; uint64_t lflags, mdFlags; @@ -562,14 +717,22 @@ IMPL(IODMACommand, PrepareForDMA) return kIOReturnBadArgument; } + // uses IOMD direction + ret = memory->prepare(); + if (kIOReturnSuccess != ret) { + return ret; + } + ret = setMemoryDescriptor(memory, false); if (kIOReturnSuccess != ret) { + memory->complete(); return ret; } ret = prepare(offset, length); if (kIOReturnSuccess != ret) { clearMemoryDescriptor(false); + memory->complete(); return ret; } @@ -580,9 +743,7 @@ IMPL(IODMACommand, PrepareForDMA) ret = genIOVMSegments(&genOffset, segments, &numSegments); if (kIOReturnSuccess == ret) { - IOMemoryDescriptor * mem; - mem = __IODEQUALIFY(IOMemoryDescriptor *, fMemory); - mdFlags = mem->getFlags(); + mdFlags = fMemory->getFlags(); lflags = 0; if (kIODirectionOut & mdFlags) { lflags |= kIOMemoryDirectionOut; @@ -598,22 +759,43 @@ IMPL(IODMACommand, PrepareForDMA) } kern_return_t -IMPL(IODMACommand, CompleteDMA) +IODMACommand::CompleteDMA_Impl( + uint64_t options) { - IOReturn ret; + IOReturn ret, completeRet; + IOMemoryDescriptor * md; if (options & ~((uint64_t) kIODMACommandCompleteDMANoOptions)) { // no other options currently defined return kIOReturnBadArgument; } + if (!fActive) { + return kIOReturnNotReady; + } + + md = __DECONST(IOMemoryDescriptor *, fMemory); + if (md) { + md->retain(); + } ret = clearMemoryDescriptor(true); + if (md) { + completeRet = md->complete(); + OSSafeReleaseNULL(md); + if (kIOReturnSuccess == ret) { + ret = completeRet; + } + } + return ret; } kern_return_t -IMPL(IODMACommand, GetPreparation) +IODMACommand::GetPreparation_Impl( + uint64_t * offset, + uint64_t * length, + IOMemoryDescriptor ** memory) { IOReturn ret; IOMemoryDescriptor * md; @@ -640,7 +822,12 @@ IMPL(IODMACommand, GetPreparation) } kern_return_t -IMPL(IODMACommand, PerformOperation) +IODMACommand::PerformOperation_Impl( + uint64_t options, + uint64_t dmaOffset, + uint64_t length, + uint64_t dataOffset, + IOMemoryDescriptor * data) { IOReturn ret; void * buffer; @@ -730,43 +917,109 @@ IMPL(IODMACommand, PerformOperation) } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -kern_return_t -OSAction::Create(OSAction_Create_Args) -{ - kern_return_t ret; - ret = OSAction::Create_Call(target, targetmsgid, msgid, referenceSize, action); - return ret; -} +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -kern_return_t -IMPL(OSAction, Create) +static kern_return_t +OSActionCreateWithTypeNameInternal(OSObject * target, uint64_t targetmsgid, uint64_t msgid, size_t referenceSize, OSString * typeName, bool fromKernel, OSAction ** action) { - OSAction * inst; + OSAction * inst = NULL; vm_size_t allocsize; + const OSSymbol *sym = NULL; // must release + OSObject *obj = NULL; // must release + const OSMetaClass *actionMetaClass = NULL; // do not release + kern_return_t ret; if (os_add_overflow(referenceSize, sizeof(OSAction_IVars), &allocsize)) { - return kIOReturnBadArgument; + ret = kIOReturnBadArgument; + goto finish; } - inst = OSTypeAlloc(OSAction); - if (!inst) { - return kIOReturnNoMemory; + + if (fromKernel && typeName) { + /* The action is being constructed in the kernel with a type name */ + sym = OSSymbol::withString(typeName); + actionMetaClass = OSMetaClass::getMetaClassWithName(sym); + if (actionMetaClass && actionMetaClass->getSuperClass() == OSTypeID(OSAction)) { + obj = actionMetaClass->alloc(); + if (!obj) { + ret = kIOReturnNoMemory; + goto finish; + } + inst = OSDynamicCast(OSAction, obj); + obj = NULL; // prevent release + assert(inst); // obj is a subclass of OSAction so the dynamic cast should always work + } else { + DKLOG("Attempted to create action object with type \"%s\" which does not inherit from OSAction\n", typeName->getCStringNoCopy()); + ret = kIOReturnBadArgument; + goto finish; + } + } else { + inst = OSTypeAlloc(OSAction); + if (!inst) { + ret = kIOReturnNoMemory; + goto finish; + } } + inst->ivars = (typeof(inst->ivars))(uintptr_t) IONewZero(uint8_t, allocsize); if (!inst->ivars) { - inst->release(); - return kIOReturnNoMemory; + ret = kIOReturnNoMemory; + goto finish; } target->retain(); inst->ivars->target = target; inst->ivars->targetmsgid = targetmsgid; inst->ivars->msgid = msgid; inst->ivars->referenceSize = referenceSize; + if (typeName) { + typeName->retain(); + } + inst->ivars->typeName = typeName; *action = inst; + inst = NULL; // prevent release + ret = kIOReturnSuccess; - return kIOReturnSuccess; +finish: + OSSafeReleaseNULL(obj); + OSSafeReleaseNULL(sym); + OSSafeReleaseNULL(inst); + + return ret; +} + +kern_return_t +OSAction::Create(OSAction_Create_Args) +{ + return OSAction::CreateWithTypeName(target, targetmsgid, msgid, referenceSize, NULL, action); +} + +kern_return_t +OSAction::CreateWithTypeName(OSAction_CreateWithTypeName_Args) +{ + return OSActionCreateWithTypeNameInternal(target, targetmsgid, msgid, referenceSize, typeName, true, action); +} + +kern_return_t +OSAction::Create_Impl( + OSObject * target, + uint64_t targetmsgid, + uint64_t msgid, + size_t referenceSize, + OSAction ** action) +{ + return OSAction::CreateWithTypeName_Impl(target, targetmsgid, msgid, referenceSize, NULL, action); +} + +kern_return_t +OSAction::CreateWithTypeName_Impl( + OSObject * target, + uint64_t targetmsgid, + uint64_t msgid, + size_t referenceSize, + OSString * typeName, + OSAction ** action) +{ + return OSActionCreateWithTypeNameInternal(target, targetmsgid, msgid, referenceSize, typeName, false, action); } void @@ -778,6 +1031,7 @@ OSAction::free() ivars->abortedHandler = NULL; } OSSafeReleaseNULL(ivars->target); + OSSafeReleaseNULL(ivars->typeName); IOSafeDeleteNULL(ivars, uint8_t, ivars->referenceSize + sizeof(OSAction_IVars)); } return super::free(); @@ -805,7 +1059,7 @@ OSAction::Aborted_Impl(void) } } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ struct IODispatchSource_IVars { queue_chain_t link; @@ -837,16 +1091,18 @@ IODispatchSource::free() } kern_return_t -IMPL(IODispatchSource, SetEnable) +IODispatchSource::SetEnable_Impl( + bool enable) { return SetEnableWithCompletion(enable, NULL); } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ struct IOInterruptDispatchSource_IVars { IOService * provider; uint32_t intIndex; + int interruptType; IOSimpleLock * lock; thread_t waiter; uint64_t count; @@ -869,11 +1125,18 @@ IOInterruptDispatchSourceInterrupt(OSObject * target, void * refCon, thread_wakeup_thread((event_t) ivars, ivars->waiter); ivars->waiter = NULL; } + if (kIOInterruptTypeLevel & ivars->interruptType) { + ivars->provider->disableInterrupt(ivars->intIndex); + } IOSimpleLockUnlockEnableInterrupt(ivars->lock, is); } kern_return_t -IMPL(IOInterruptDispatchSource, Create) +IOInterruptDispatchSource::Create_Impl( + IOService * provider, + uint32_t index, + IODispatchQueue * queue, + IOInterruptDispatchSource ** source) { IOReturn ret; IOInterruptDispatchSource * inst; @@ -886,17 +1149,26 @@ IMPL(IOInterruptDispatchSource, Create) inst->ivars->lock = IOSimpleLockAlloc(); + ret = provider->getInterruptType(index, &inst->ivars->interruptType); + if (kIOReturnSuccess != ret) { + OSSafeReleaseNULL(inst); + return ret; + } ret = provider->registerInterrupt(index, inst, IOInterruptDispatchSourceInterrupt, inst->ivars); if (kIOReturnSuccess == ret) { inst->ivars->intIndex = index; inst->ivars->provider = provider; + inst->ivars->provider->retain(); *source = inst; } return ret; } kern_return_t -IMPL(IOInterruptDispatchSource, GetInterruptType) +IOInterruptDispatchSource::GetInterruptType_Impl( + IOService * provider, + uint32_t index, + uint64_t * interruptType) { IOReturn ret; int type; @@ -932,6 +1204,7 @@ IOInterruptDispatchSource::free() if (ivars && ivars->provider) { ret = ivars->provider->unregisterInterrupt(ivars->intIndex); assert(kIOReturnSuccess == ret); + ivars->provider->release(); } if (ivars && ivars->lock) { @@ -944,7 +1217,8 @@ IOInterruptDispatchSource::free() } kern_return_t -IMPL(IOInterruptDispatchSource, SetHandler) +IOInterruptDispatchSource::SetHandler_Impl( + OSAction * action) { IOReturn ret; OSAction * oldAction; @@ -962,7 +1236,9 @@ IMPL(IOInterruptDispatchSource, SetHandler) } kern_return_t -IMPL(IOInterruptDispatchSource, SetEnableWithCompletion) +IOInterruptDispatchSource::SetEnableWithCompletion_Impl( + bool enable, + IODispatchSourceCancelHandler handler) { IOReturn ret; IOInterruptState is; @@ -987,16 +1263,20 @@ IMPL(IOInterruptDispatchSource, SetEnableWithCompletion) } kern_return_t -IMPL(IOInterruptDispatchSource, Cancel) +IOInterruptDispatchSource::Cancel_Impl( + IODispatchSourceCancelHandler handler) { return kIOReturnUnsupported; } kern_return_t -IMPL(IOInterruptDispatchSource, CheckForWork) +IOInterruptDispatchSource::CheckForWork_Impl( + const IORPC rpc, + bool synchronous) { IOReturn ret = kIOReturnNotReady; IOInterruptState is; + bool willWait; wait_result_t waitResult; uint64_t icount; uint64_t itime; @@ -1015,10 +1295,17 @@ IMPL(IOInterruptDispatchSource, CheckForWork) ivars->waiter = self; waitResult = assert_wait((event_t) ivars, THREAD_INTERRUPTIBLE); } + willWait = (synchronous && (waitResult == THREAD_WAITING)); + if (willWait && (kIOInterruptTypeLevel & ivars->interruptType) && ivars->enable) { + ivars->provider->enableInterrupt(ivars->intIndex); + } IOSimpleLockUnlockEnableInterrupt(ivars->lock, is); - if (synchronous && (waitResult == THREAD_WAITING)) { + if (willWait) { waitResult = thread_block(THREAD_CONTINUE_NULL); if (THREAD_INTERRUPTED == waitResult) { + is = IOSimpleLockLockDisableInterrupt(ivars->lock); + ivars->waiter = NULL; + IOSimpleLockUnlockEnableInterrupt(ivars->lock, is); break; } } @@ -1032,11 +1319,14 @@ IMPL(IOInterruptDispatchSource, CheckForWork) } void -IMPL(IOInterruptDispatchSource, InterruptOccurred) +IOInterruptDispatchSource::InterruptOccurred_Impl( + OSAction * action, + uint64_t count, + uint64_t time) { } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ enum { kIOServiceNotificationTypeCount = kIOServiceNotificationTypeLast + 1, @@ -1053,7 +1343,11 @@ struct IOServiceNotificationDispatchSource_IVars { }; kern_return_t -IMPL(IOServiceNotificationDispatchSource, Create) +IOServiceNotificationDispatchSource::Create_Impl( + OSDictionary * matching, + uint64_t options, + IODispatchQueue * queue, + IOServiceNotificationDispatchSource ** notification) { IOUserServer * us; IOReturn ret; @@ -1164,7 +1458,10 @@ IMPL(IOServiceNotificationDispatchSource, Create) } kern_return_t -IMPL(IOServiceNotificationDispatchSource, CopyNextNotification) +IOServiceNotificationDispatchSource::CopyNextNotification_Impl( + uint64_t * type, + IOService ** service, + uint64_t * options) { IOService * next; uint32_t idx; @@ -1235,7 +1532,8 @@ IOServiceNotificationDispatchSource::free() } kern_return_t -IMPL(IOServiceNotificationDispatchSource, SetHandler) +IOServiceNotificationDispatchSource::SetHandler_Impl( + OSAction * action) { IOReturn ret; bool notifyReady; @@ -1265,7 +1563,9 @@ IMPL(IOServiceNotificationDispatchSource, SetHandler) } kern_return_t -IMPL(IOServiceNotificationDispatchSource, SetEnableWithCompletion) +IOServiceNotificationDispatchSource::SetEnableWithCompletion_Impl( + bool enable, + IODispatchSourceCancelHandler handler) { if (enable == ivars->enable) { return kIOReturnSuccess; @@ -1279,13 +1579,16 @@ IMPL(IOServiceNotificationDispatchSource, SetEnableWithCompletion) } kern_return_t -IMPL(IOServiceNotificationDispatchSource, Cancel) +IOServiceNotificationDispatchSource::Cancel_Impl( + IODispatchSourceCancelHandler handler) { return kIOReturnUnsupported; } kern_return_t -IMPL(IOServiceNotificationDispatchSource, CheckForWork) +IOServiceNotificationDispatchSource::CheckForWork_Impl( + const IORPC rpc, + bool synchronous) { return kIOReturnNotReady; } @@ -1296,7 +1599,7 @@ IOServiceNotificationDispatchSource::DeliverNotifications(IOServiceNotificationB return kIOReturnUnsupported; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ kern_return_t IOUserServer::waitInterruptTrap(void * p1, void * p2, void * p3, void * p4, void * p5, void * p6) @@ -1307,6 +1610,7 @@ IOUserServer::waitInterruptTrap(void * p1, void * p2, void * p3, void * p4, void IOInterruptDispatchSource_IVars * ivars; IOInterruptDispatchSourcePayload payload; + bool willWait; wait_result_t waitResult; thread_t self; @@ -1334,10 +1638,17 @@ IOUserServer::waitInterruptTrap(void * p1, void * p2, void * p3, void * p4, void ivars->waiter = self; waitResult = assert_wait((event_t) ivars, THREAD_INTERRUPTIBLE); } + willWait = (waitResult == THREAD_WAITING); + if (willWait && (kIOInterruptTypeLevel & ivars->interruptType) && ivars->enable) { + ivars->provider->enableInterrupt(ivars->intIndex); + } IOSimpleLockUnlockEnableInterrupt(ivars->lock, is); - if (waitResult == THREAD_WAITING) { + if (willWait) { waitResult = thread_block(THREAD_CONTINUE_NULL); if (THREAD_INTERRUPTED == waitResult) { + is = IOSimpleLockLockDisableInterrupt(ivars->lock); + ivars->waiter = NULL; + IOSimpleLockUnlockEnableInterrupt(ivars->lock, is); break; } } @@ -1357,10 +1668,14 @@ IOUserServer::waitInterruptTrap(void * p1, void * p2, void * p3, void * p4, void return ret; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ kern_return_t -IMPL(IOUserServer, Create) +IOUserServer::Create_Impl( + const char * name, + uint64_t tag, + uint64_t options, + IOUserServer ** server) { IOReturn ret; IOUserServer * us; @@ -1397,22 +1712,28 @@ IMPL(IOUserServer, Create) } kern_return_t -IMPL(IOUserServer, Exit) +IOUserServer::Exit_Impl( + const char * reason) { return kIOReturnUnsupported; } kern_return_t -IMPL(IOUserServer, LoadModule) +IOUserServer::LoadModule_Impl( + const char * path) { return kIOReturnUnsupported; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ kern_return_t -IMPL(IODispatchQueue, Create) +IODispatchQueue::Create_Impl( + const char * name, + uint64_t options, + uint64_t priority, + IODispatchQueue ** queue) { IODispatchQueue * result; IOUserServer * us; @@ -1441,7 +1762,8 @@ IMPL(IODispatchQueue, Create) } kern_return_t -IMPL(IODispatchQueue, SetPort) +IODispatchQueue::SetPort_Impl( + mach_port_t port) { if (MACH_PORT_NULL != ivars->serverPort) { return kIOReturnNotReady; @@ -1648,68 +1970,26 @@ IOUserServer::setDriverKitUUID(OSKext *kext) kext->setDriverKitUUID(new_uuid); } -bool -IOUserServer::serviceMatchesCDHash(IOService *service) +void +IOUserServer::setCheckInToken(IOUserServerCheckInToken *token) { - OSObject *obj = NULL; - bool result = false; - OSString *requiredCDHashStr = NULL; - const char *requiredCDHash = NULL; - char taskCDHash[CS_CDHASH_LEN]; - - task_t owningTask = this->fOwningTask; - if (!owningTask) { - printf("%s: fOwningTask not found\n", __FUNCTION__); - goto out; - } - - obj = service->copyProperty(gIOUserServerCDHashKey); - requiredCDHashStr = OSDynamicCast(OSString, obj); - if (!requiredCDHashStr) { - printf("%s: required cdhash not found as property of personality\n", __FUNCTION__); - goto out; - } - - requiredCDHash = requiredCDHashStr->getCStringNoCopy(); - if (!requiredCDHash) { - printf("%s: required cdhash unable to be read as string\n", __FUNCTION__); - goto out; - } - - if (strlen(requiredCDHash) != CS_CDHASH_LEN * 2) { - printf("%s: required cdhash string has incorrect length\n", __FUNCTION__); - goto out; + if (token != NULL && fCheckInToken == NULL) { + token->retain(); + fCheckInToken = token; + } else { + printf("%s: failed to set check in token. token=%p, fCheckInToken=%p\n", __FUNCTION__, token, fCheckInToken); } +} - get_task_cdhash(owningTask, taskCDHash); - for (int i = 0; i < (int)CS_CDHASH_LEN * 2; i++) { - uint8_t which = (i + 1) & 0x1; /* 1 for upper nibble, 0 for lower */ - uint8_t nibble = requiredCDHash[i]; - uint8_t byte = taskCDHash[i / 2]; - if ('0' <= nibble && nibble <= '9') { - nibble -= '0'; - } else if ('a' <= nibble && nibble <= 'f') { - nibble -= 'a' - 10; - } else if ('A' <= nibble && nibble <= 'F') { - nibble -= 'A' - 10; - } else { - printf("%s: required cdhash contains invalid token '%c'\n", __FUNCTION__, nibble); - goto out; - } - - /* - * Decide which half of the byte to compare - */ - if (nibble != (which ? (byte >> 4) : (byte & 0x0f))) { - printf("%s: required cdhash %s in personality does not match service\n", __FUNCTION__, requiredCDHash); - goto out; - } +bool +IOUserServer::serviceMatchesCheckInToken(IOUserServerCheckInToken *token) +{ + if (token != NULL) { + return token == fCheckInToken; + } else { + printf("%s: null check in token\n", __FUNCTION__); + return false; } - - result = true; -out: - OSSafeReleaseNULL(obj); - return result; } bool @@ -1940,7 +2220,7 @@ IOUserServer::objectInstantiate(OSObject * obj, IORPC rpc, IORPCMessage * messag const char * resultClassName; uint64_t resultFlags; - size_t replySize; + mach_msg_size_t replySize; uint32_t methodCount; const uint64_t * methods; IODispatchQueue * queue; @@ -1962,6 +2242,7 @@ IOUserServer::objectInstantiate(OSObject * obj, IORPC rpc, IORPCMessage * messag ret = kIOReturnUnsupportedMode; service = OSDynamicCast(IOService, obj); + action = OSDynamicCast(OSAction, obj); if (!service) { // xxx other classes hosted resultFlags |= kOSObjectRPCKernel; @@ -1990,6 +2271,12 @@ IOUserServer::objectInstantiate(OSObject * obj, IORPC rpc, IORPCMessage * messag const OSMetaClass * meta; meta = obj->getMetaClass(); IOLockLock(fLock); + if (action) { + str = action->ivars->typeName; + if (str) { + userMeta = (typeof(userMeta))fClasses->getObject(str); + } + } while (meta && !userMeta) { str = (OSString *) meta->getClassNameSymbol(); userMeta = (typeof(userMeta))fClasses->getObject(str); @@ -2010,8 +2297,11 @@ IOUserServer::objectInstantiate(OSObject * obj, IORPC rpc, IORPCMessage * messag } if (userMeta) { if (kOSObjectRPCRemote & resultFlags) { - while (userMeta && !(kOSClassCanRemote & userMeta->description->flags)) { - userMeta = userMeta->superMeta; + if (!action) { + /* Special case: For OSAction subclasses, do not use the superclass */ + while (userMeta && !(kOSClassCanRemote & userMeta->description->flags)) { + userMeta = userMeta->superMeta; + } } if (userMeta) { resultClassName = userMeta->description->name; @@ -2028,6 +2318,14 @@ IOUserServer::objectInstantiate(OSObject * obj, IORPC rpc, IORPCMessage * messag resultClassName = str->getCStringNoCopy(); ret = kIOReturnSuccess; } + } else if (kIODKLogSetup & gIODKDebug) { + DKLOG("userMeta %s was not found in fClasses\n", str->getCStringNoCopy()); + IOLockLock(fLock); + fClasses->iterateObjects(^bool (const OSSymbol * key, OSObject * val) { + DKLOG(" fClasses[\"%s\"] => %p\n", key->getCStringNoCopy(), val); + return false; + }); + IOLockUnlock(fLock); } } OSSafeReleaseNULL(prop); @@ -2037,7 +2335,7 @@ IOUserServer::objectInstantiate(OSObject * obj, IORPC rpc, IORPCMessage * messag if ((kIOReturnSuccess == ret) && (kOSObjectRPCRemote & resultFlags)) { target = obj; - if ((action = OSDynamicCast(OSAction, obj))) { + if (action) { if (action->ivars->referenceSize) { resultFlags |= kOSObjectRPCKernel; } else { @@ -2097,7 +2395,7 @@ IOUserServer::objectInstantiate(OSObject * obj, IORPC rpc, IORPCMessage * messag } if (kIODKLogIPC & gIODKDebug) { - DKLOG("instantiate %s\n", obj->getMetaClass()->getClassName()); + DKLOG("instantiate object %s with user class %s\n", obj->getMetaClass()->getClassName(), str ? str->getCStringNoCopy() : "(null)"); } if (kIOReturnSuccess != ret) { @@ -2129,7 +2427,7 @@ IOUserServer::objectInstantiate(OSObject * obj, IORPC rpc, IORPCMessage * messag return ret; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOReturn IOUserServer::kernelDispatch(OSObject * obj, IORPC rpc) @@ -2161,7 +2459,7 @@ IOUserServer::kernelDispatch(OSObject * obj, IORPC rpc) } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ OSObject * IOUserServer::target(OSAction * action, IORPCMessage * message) @@ -2176,7 +2474,13 @@ IOUserServer::target(OSAction * action, IORPCMessage * message) message->objects[0] = (OSObjectRef) object; if (kIORPCMessageRemote & message->flags) { object->retain(); +#ifndef __clang_analyzer__ + // Hide the release of 'action' from the clang static analyzer to suppress + // an overrelease diagnostic. The analyzer doesn't have a way to express the + // non-standard contract of this method, which is that it releases 'action' when + // the message flags have kIORPCMessageRemote set. action->release(); +#endif } if (kIODKLogIPC & gIODKDebug) { DKLOG("TARGET %s msg 0x%qx from 0x%qx\n", object->getMetaClass()->getClassName(), message->msgid, action->ivars->msgid); @@ -2185,7 +2489,7 @@ IOUserServer::target(OSAction * action, IORPCMessage * message) return object; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ kern_return_t uext_server(ipc_kmsg_t requestkmsg, ipc_kmsg_t * pReply) @@ -2349,10 +2653,10 @@ IOUserServer::server(ipc_kmsg_t requestkmsg, ipc_kmsg_t * pReply) return oneway ? MIG_NO_REPLY : KERN_SUCCESS; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define MAX_OBJECT_COUNT(mach, size, message) \ - ((((size) + ((uintptr_t) (mach))) - ((uintptr_t) (&message->objects[0]))) / sizeof(OSObjectRef)) + ((uint32_t)(((((size) + ((uintptr_t) (mach))) - ((uintptr_t) (&message->objects[0]))) / sizeof(OSObjectRef)))) kern_return_t IOUserServerUEXTTrap(OSObject * object, void * p1, void * p2, void * p3, void * p4, void * p5, void * p6) @@ -2361,7 +2665,7 @@ IOUserServerUEXTTrap(OSObject * object, void * p1, void * p2, void * p3, void * size_t inSize = (uintptr_t) p2; user_addr_t out = (uintptr_t) p3; size_t outSize = (uintptr_t) p4; - mach_port_name_t objectName1 = (uintptr_t) p5; + mach_port_name_t objectName1 = (mach_port_name_t)(uintptr_t) p5; size_t totalSize; OSObject * objectArg1; @@ -2419,13 +2723,13 @@ IOUserServerUEXTTrap(OSObject * object, void * p1, void * p2, void * p3, void * mach = (typeof(mach))(p - refs * sizeof(*descs) - sizeof(*mach)); mach->msgh.msgh_id = kIORPCVersionCurrent; - mach->msgh.msgh_size = sizeof(IORPCMessageMach) + refs * sizeof(*descs) + inSize; - mach->msgh_body.msgh_descriptor_count = refs; + mach->msgh.msgh_size = (mach_msg_size_t) (sizeof(IORPCMessageMach) + refs * sizeof(*descs) + inSize); // totalSize was checked + mach->msgh_body.msgh_descriptor_count = ((mach_msg_size_t) refs); rpc.message = mach; rpc.sendSize = mach->msgh.msgh_size; rpc.reply = (IORPCMessageMach *) (p + inSize); - rpc.replySize = sizeof(buffer.buffer) - inSize; + rpc.replySize = ((uint32_t) (sizeof(buffer.buffer) - inSize)); // inSize was checked message->objects[0] = 0; if ((action = OSDynamicCast(OSAction, object))) { @@ -2484,7 +2788,7 @@ IOUserServerUEXTTrap(OSObject * object, void * p1, void * p2, void * p3, void * return ret; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOReturn IOUserServer::rpc(IORPC rpc) @@ -2517,7 +2821,7 @@ IOUserServer::rpc(IORPC rpc) message = IORPCMessageFromMach(mach, false); if (!message) { - ret = kIOReturnIPCError; + return kIOReturnIPCError; } msgid = message->msgid; machid = (msgid >> 32); @@ -3097,6 +3401,7 @@ IOUserServer::free() IOLockFree(fLock); } OSSafeReleaseNULL(fServices); + OSSafeReleaseNULL(fCheckInToken); IOUserClient::free(); } @@ -3219,6 +3524,17 @@ IOUserServer::registerClass(OSClassDescription * desc, uint32_t size, OSUserMeta return ret; } +IOReturn +IOUserServer::registerClass(OSClassDescription * desc, uint32_t size, OSSharedPtr& pCls) +{ + OSUserMetaClass* pClsRaw = NULL; + IOReturn result = registerClass(desc, size, &pClsRaw); + if (result == kIOReturnSuccess) { + pCls.reset(pClsRaw, OSRetain); + } + return result; +} + IOReturn IOUserServer::setRootQueue(IODispatchQueue * queue) { @@ -3263,6 +3579,21 @@ IOUserServer::externalMethod(uint32_t selector, IOExternalMethodArguments * args if (args->scalarOutputCount != 1) { return kIOReturnBadArgument; } + if (!(kIODKDisableCheckInTokenVerification & gIODKDebug)) { + if (args->scalarInputCount != 1) { + return kIOReturnBadArgument; + } + mach_port_name_t checkInPortName = ((typeof(checkInPortName))args->scalarInput[0]); + OSObject * obj = iokit_lookup_object_with_port_name(checkInPortName, IKOT_IOKIT_IDENT, fOwningTask); + IOUserServerCheckInToken * retrievedToken = OSDynamicCast(IOUserServerCheckInToken, obj); + if (retrievedToken != NULL) { + setCheckInToken(retrievedToken); + } else { + OSSafeReleaseNULL(obj); + return kIOReturnBadArgument; + } + OSSafeReleaseNULL(obj); + } portname = iokit_make_send_right(fOwningTask, this, IKOT_UEXT_OBJECT); assert(portname); args->scalarOutput[0] = portname; @@ -3298,7 +3629,7 @@ IOUserServer::serviceAttach(IOService * service, IOService * provider) OSObjectUserVars * vars; OSObject * prop; OSString * str; - OSSymbolConstPtr bundleID; + OSSymbol const* bundleID; char execPath[1024]; vars = IONewZero(OSObjectUserVars, 1); @@ -3406,6 +3737,16 @@ IOUserServer::serviceNewUserClient(IOService * service, task_t owningTask, void return ret; } +IOReturn +IOUserServer::serviceNewUserClient(IOService * service, task_t owningTask, void * securityID, + uint32_t type, OSDictionary * properties, OSSharedPtr& handler) +{ + IOUserClient* handlerRaw = NULL; + IOReturn result = serviceNewUserClient(service, owningTask, securityID, type, properties, &handlerRaw); + handler.reset(handlerRaw, OSNoRetain); + return result; +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static IOPMPowerState @@ -3434,17 +3775,19 @@ IOUserServer::setPowerState(unsigned long state, IOService * service) } IOReturn -IOUserServer::powerStateWillChangeTo(IOPMPowerFlags flags, unsigned long state, IOService * service) +IOUserServer::serviceSetPowerState(IOService * controllingDriver, IOService * service, IOPMPowerFlags flags, unsigned long state) { IOReturn ret; if (service->reserved->uvars) { if (!fSystemOff && !(kIODKDisablePM & gIODKDebug)) { service->reserved->uvars->willPower = true; + service->reserved->uvars->willPowerState = state; + service->reserved->uvars->controllingDriver = controllingDriver; if (kIODKLogPM & gIODKDebug) { - DKLOG(DKS "::powerStateWillChangeTo(%ld) 0x%qx, %d\n", DKN(service), state, fPowerStates, fSystemPowerAck); + DKLOG(DKS "::serviceSetPowerState(%ld) 0x%qx, %d\n", DKN(service), state, fPowerStates, fSystemPowerAck); } - ret = service->SetPowerState(flags); + ret = service->SetPowerState((uint32_t) flags); if (kIOReturnSuccess == ret) { return 20 * 1000 * 1000; } @@ -3455,6 +3798,12 @@ IOUserServer::powerStateWillChangeTo(IOPMPowerFlags flags, unsigned long state, return kIOPMAckImplied; } +IOReturn +IOUserServer::powerStateWillChangeTo(IOPMPowerFlags flags, unsigned long state, IOService * service) +{ + return kIOPMAckImplied; +} + IOReturn IOUserServer::powerStateDidChangeTo(IOPMPowerFlags flags, unsigned long state, IOService * service) { @@ -3492,7 +3841,8 @@ IOUserServer::powerStateDidChangeTo(IOPMPowerFlags flags, unsigned long state, I } kern_return_t -IMPL(IOService, SetPowerState) +IOService::SetPowerState_Impl( + uint32_t powerFlags) { if (kIODKLogPM & gIODKDebug) { DKLOG(DKS "::SetPowerState(%d), %d\n", DKN(this), powerFlags, reserved->uvars->willPower); @@ -3500,15 +3850,20 @@ IMPL(IOService, SetPowerState) if (reserved->uvars && reserved->uvars->userServer && reserved->uvars->willPower) { + IOReturn ret; reserved->uvars->willPower = false; - acknowledgePowerChange(reserved->uvars->userServer); + ret = reserved->uvars->controllingDriver->setPowerState(reserved->uvars->willPowerState, this); + if (kIOPMAckImplied == ret) { + acknowledgeSetPowerState(); + } return kIOReturnSuccess; } return kIOReturnNotReady; } kern_return_t -IMPL(IOService, ChangePowerState) +IOService::ChangePowerState_Impl( + uint32_t powerFlags) { switch (powerFlags) { case kIOServicePowerCapabilityOff: @@ -3528,7 +3883,10 @@ IMPL(IOService, ChangePowerState) } kern_return_t -IMPL(IOService, Create) +IOService::Create_Impl( + IOService * provider, + const char * propertiesKey, + IOService ** result) { OSObject * inst; IOService * service; @@ -3575,7 +3933,8 @@ IMPL(IOService, Create) } kern_return_t -IMPL(IOService, Terminate) +IOService::Terminate_Impl( + uint64_t options) { IOUserServer * us; @@ -3594,30 +3953,39 @@ IMPL(IOService, Terminate) } kern_return_t -IMPL(IOService, NewUserClient) +IOService::NewUserClient_Impl( + uint32_t type, + IOUserClient ** userClient) { return kIOReturnError; } kern_return_t -IMPL(IOService, SearchProperty) +IOService::SearchProperty_Impl( + const char * name, + const char * plane, + uint64_t options, + OSContainer ** property) { - OSObject * object; + OSObject * object; + IOOptionBits regOptions; if (kIOServiceSearchPropertyParents & options) { - options = kIORegistryIterateParents | kIORegistryIterateRecursively; + regOptions = kIORegistryIterateParents | kIORegistryIterateRecursively; } else { - options = 0; + regOptions = 0; } - object = copyProperty(name, IORegistryEntry::getPlane(plane), options); + object = copyProperty(name, IORegistryEntry::getPlane(plane), regOptions); *property = object; return object ? kIOReturnSuccess : kIOReturnNotFound; } kern_return_t -IMPL(IOService, CopyProviderProperties) +IOService::CopyProviderProperties_Impl( + OSArray * propertyKeys, + OSArray ** properties) { IOReturn ret; OSArray * result; @@ -3753,6 +4121,7 @@ IOUserServer::serviceStarted(IOService * service, IOService * provider, bool res { IOReturn ret; IOService * pmProvider; + bool joinTree; DKLOG(DKS "::start(" DKS ") %s\n", DKN(service), DKN(provider), result ? "ok" : "fail"); @@ -3768,34 +4137,39 @@ IOUserServer::serviceStarted(IOService * service, IOService * provider, bool res fRootNotifier = true; } + joinTree = false; if (!(kIODKDisablePM & gIODKDebug) && !service->pm_vars) { service->PMinit(); ret = service->registerPowerDriver(this, sPowerStates, sizeof(sPowerStates) / sizeof(sPowerStates[0])); assert(kIOReturnSuccess == ret); + joinTree = true; + } - pmProvider = service; - while (pmProvider && !pmProvider->inPlane(gIOPowerPlane)) { - pmProvider = pmProvider->getProvider(); - } - if (pmProvider) { - OSObject * prop; - OSString * str; - prop = pmProvider->copyProperty("non-removable"); - if (prop) { - str = OSDynamicCast(OSString, prop); - if (str && str->isEqualTo("yes")) { - pmProvider = NULL; - } - prop->release(); + pmProvider = service; + while (pmProvider && !pmProvider->inPlane(gIOPowerPlane)) { + pmProvider = pmProvider->getProvider(); + } + if (pmProvider) { + OSObject * prop; + OSString * str; + prop = pmProvider->copyProperty("non-removable"); + if (prop) { + str = OSDynamicCast(OSString, prop); + if (str && str->isEqualTo("yes")) { + pmProvider = NULL; } + prop->release(); } - if (pmProvider) { - IOLockLock(fLock); - unsigned int idx = fServices->getNextIndexOfObject(service, 0); - assert(idx <= 63); - fPowerStates |= (1ULL << idx); - IOLockUnlock(fLock); + } + if (!(kIODKDisablePM & gIODKDebug) && pmProvider) { + IOLockLock(fLock); + unsigned int idx = fServices->getNextIndexOfObject(service, 0); + assert(idx <= 63); + fPowerStates |= (1ULL << idx); + IOLockUnlock(fLock); + + if (joinTree) { pmProvider->joinPMtree(service); service->reserved->uvars->userServerPM = true; } @@ -3913,9 +4287,15 @@ IOUserServer::serviceWillTerminate(IOService * client, IOService * provider, IOO } if (willTerminate) { - ret = client->Stop(provider); + if (IOServicePH::serverSlept()) { + client->Stop_async(provider); + ret = kIOReturnOffline; + } else { + ret = client->Stop(provider); + } if (kIOReturnSuccess != ret) { - ret = client->IOService::Stop(provider); + IOUserServer::serviceDidStop(client, provider); + ret = kIOReturnSuccess; } } } @@ -3975,13 +4355,20 @@ IOUserServer::serviceDidStop(IOService * client, IOService * provider) } kern_return_t -IMPL(IOService, Stop) +IOService::Stop_Impl( + IOService * provider) { IOUserServer::serviceDidStop(this, provider); return kIOReturnSuccess; } +void +IOService::Stop_async_Impl( + IOService * provider) +{ +} + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #undef super @@ -4029,7 +4416,11 @@ struct IOUserUserClientActionRef { }; void -IMPL(IOUserClient, KernelCompletion) +IOUserClient::KernelCompletion_Impl( + OSAction * action, + IOReturn status, + const unsigned long long * asyncData, + uint32_t asyncDataCount) { IOUserUserClientActionRef * ref; @@ -4039,7 +4430,18 @@ IMPL(IOUserClient, KernelCompletion) } kern_return_t -IMPL(IOUserClient, _ExternalMethod) +IOUserClient::_ExternalMethod_Impl( + uint64_t selector, + const unsigned long long * scalarInput, + uint32_t scalarInputCount, + OSData * structureInput, + IOMemoryDescriptor * structureInputDescriptor, + unsigned long long * scalarOutput, + uint32_t * scalarOutputCount, + uint64_t structureOutputMaximumSize, + OSData ** structureOutput, + IOMemoryDescriptor * structureOutputDescriptor, + OSAction * completion) { return kIOReturnUnsupported; } @@ -4084,6 +4486,7 @@ IOUserUserClient::externalMethod(uint32_t selector, IOExternalMethodArguments * kr = kIOReturnUnsupported; structureInput = NULL; action = NULL; + ref = NULL; if (args->structureInputSize) { structureInput = OSData::withBytesNoCopy((void *) args->structureInput, args->structureInputSize); @@ -4094,6 +4497,17 @@ IOUserUserClient::externalMethod(uint32_t selector, IOExternalMethodArguments * assert(KERN_SUCCESS == kr); ref = (typeof(ref))action->GetReference(); bcopy(args->asyncReference, &ref->asyncRef[0], args->asyncReferenceCount * sizeof(ref->asyncRef[0])); + + kr = action->SetAbortedHandler(^(void) { + IOUserUserClientActionRef * ref; + IOReturn ret; + + ref = (typeof(ref))action->GetReference(); + ret = releaseAsyncReference64(ref->asyncRef); + assert(kIOReturnSuccess == ret); + bzero(&ref->asyncRef[0], sizeof(ref->asyncRef)); + }); + assert(KERN_SUCCESS == kr); } if (args->structureVariableOutputData) { @@ -4114,6 +4528,10 @@ IOUserUserClient::externalMethod(uint32_t selector, IOExternalMethodArguments * OSSafeReleaseNULL(action); if (kIOReturnSuccess != kr) { + if (ref) { + // mig will destroy any async port, remove our pointer to it + bzero(&ref->asyncRef[0], sizeof(ref->asyncRef)); + } return kr; } if (structureOutput) { @@ -4134,3 +4552,40 @@ IOUserUserClient::externalMethod(uint32_t selector, IOExternalMethodArguments * } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void +IOUserServerCheckInToken::setNoSendersNotification(IOUserServerCheckInNotificationHandler handler, + void* handlerArgs) +{ + this->handler = handler; + this->handlerArgs = handlerArgs; +} + +void +IOUserServerCheckInToken::notifyNoSenders(IOUserServerCheckInToken *token) +{ + if (token->handler) { + token->handler(token, token->handlerArgs); + } +} + +void +IOUserServerCheckInToken::clearNotification() +{ + this->handler = NULL; + this->handlerArgs = NULL; +} + +IOUserServerCheckInToken * +IOUserServerCheckInToken::create() +{ + IOUserServerCheckInToken *me = new IOUserServerCheckInToken; + if (me && !me->init()) { + me->release(); + return NULL; + } + me->clearNotification(); + return me; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ diff --git a/iokit/Kernel/IOWorkLoop.cpp b/iokit/Kernel/IOWorkLoop.cpp index d046938d2..4fba3bdfa 100644 --- a/iokit/Kernel/IOWorkLoop.cpp +++ b/iokit/Kernel/IOWorkLoop.cpp @@ -47,9 +47,9 @@ OSMetaClassDefineReservedUnused(IOWorkLoop, 0); OSMetaClassDefineReservedUnused(IOWorkLoop, 1); OSMetaClassDefineReservedUnused(IOWorkLoop, 2); #else -OSMetaClassDefineReservedUsed(IOWorkLoop, 0); -OSMetaClassDefineReservedUsed(IOWorkLoop, 1); -OSMetaClassDefineReservedUsed(IOWorkLoop, 2); +OSMetaClassDefineReservedUsedX86(IOWorkLoop, 0); +OSMetaClassDefineReservedUsedX86(IOWorkLoop, 1); +OSMetaClassDefineReservedUsedX86(IOWorkLoop, 2); #endif OSMetaClassDefineReservedUnused(IOWorkLoop, 3); OSMetaClassDefineReservedUnused(IOWorkLoop, 4); @@ -220,6 +220,29 @@ IOWorkLoop::workLoopWithOptions(IOOptionBits options) return me; } +void +IOWorkLoop::releaseEventChain(LIBKERN_CONSUMED IOEventSource *eventChain) +{ + IOEventSource *event, *next; + for (event = eventChain; event; event = next) { + next = event->getNext(); +#ifdef __clang_analyzer__ + // Unlike the usual IOKit memory management convention, IOWorkLoop + // manages the retain count for the IOEventSource instances in the + // the chain rather than have IOEventSource do that itself. This means + // it is safe to call release() on the result of getNext() while the + // chain is being torn down. However, the analyzer doesn't + // realize this. We add an extra retain under analysis to suppress + // an analyzer diagnostic about violations of the memory management rules. + if (next) { + next->retain(); + } +#endif + event->setWorkLoop(NULL); + event->setNext(NULL); + event->release(); + } +} // Free is called twice: // First when the atomic retainCount transitions from 1 -> 0 // Secondly when the work loop itself is commiting hari kari @@ -246,22 +269,10 @@ IOWorkLoop::free() openGate(); } else { /* !workThread */ - IOEventSource *event, *next; - - for (event = eventChain; event; event = next) { - next = event->getNext(); - event->setWorkLoop(NULL); - event->setNext(NULL); - event->release(); - } + releaseEventChain(eventChain); eventChain = NULL; - for (event = passiveEventChain; event; event = next) { - next = event->getNext(); - event->setWorkLoop(NULL); - event->setNext(NULL); - event->release(); - } + releaseEventChain(passiveEventChain); passiveEventChain = NULL; // Either we have a partial initialization to clean up diff --git a/iokit/Kernel/PassthruInterruptController.cpp b/iokit/Kernel/PassthruInterruptController.cpp new file mode 100644 index 000000000..362fb36d2 --- /dev/null +++ b/iokit/Kernel/PassthruInterruptController.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +extern "C" { +#include +#include +#include +}; + +#include +#include +#include + +#define super IOInterruptController +OSDefineMetaClassAndStructors(PassthruInterruptController, IOInterruptController); + +bool +PassthruInterruptController::init(void) +{ + if (!super::init() || + !this->setProperty(gPlatformInterruptControllerName, kOSBooleanTrue) || + !this->attach(getPlatform())) { + return false; + } + registerService(); + if (getPlatform()->registerInterruptController(gPlatformInterruptControllerName, this) != kIOReturnSuccess) { + return false; + } + if (semaphore_create(kernel_task, &child_sentinel, SYNC_POLICY_FIFO, 0) != KERN_SUCCESS) { + return false; + } + return true; +} + +void +PassthruInterruptController::setCPUInterruptProperties(IOService *service) +{ + if ((service->getProperty(gIOInterruptControllersKey) != NULL) && + (service->getProperty(gIOInterruptSpecifiersKey) != NULL)) { + return; + } + + long zero = 0; + OSArray *specifier = OSArray::withCapacity(1); + OSData *tmpData = OSData::withBytes(&zero, sizeof(zero)); + specifier->setObject(tmpData); + tmpData->release(); + service->setProperty(gIOInterruptSpecifiersKey, specifier); + specifier->release(); + + OSArray *controller = OSArray::withCapacity(1); + controller->setObject(gPlatformInterruptControllerName); + service->setProperty(gIOInterruptControllersKey, controller); + controller->release(); +} + +IOReturn +PassthruInterruptController::registerInterrupt(IOService *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon) +{ + child_handler = handler; + child_nub = nub; + child_target = target; + child_refCon = refCon; + + // Wake up waitForChildController() to tell it that AIC is registered + semaphore_signal(child_sentinel); + return kIOReturnSuccess; +} + +void * +PassthruInterruptController::waitForChildController(void) +{ + // Block if child controller isn't registered yet. Assumes that this + // is only called from one place. + semaphore_wait(child_sentinel); + + // NOTE: Assumes that AppleInterruptController passes |this| as the target argument. + return child_target; +} + +IOReturn +PassthruInterruptController::getInterruptType(IOService */*nub*/, + int /*source*/, + int *interruptType) +{ + if (interruptType == NULL) { + return kIOReturnBadArgument; + } + + *interruptType = kIOInterruptTypeLevel; + + return kIOReturnSuccess; +} + +IOReturn +PassthruInterruptController::enableInterrupt(IOService */*nub*/, + int /*source*/) +{ + return kIOReturnSuccess; +} + +IOReturn +PassthruInterruptController::disableInterrupt(IOService */*nub*/, + int /*source*/) +{ + return kIOReturnSuccess; +} + +IOReturn +PassthruInterruptController::causeInterrupt(IOService */*nub*/, + int /*source*/) +{ + ml_cause_interrupt(); + return kIOReturnSuccess; +} + +IOReturn +PassthruInterruptController::handleInterrupt(void */*refCon*/, + IOService */*nub*/, + int source) +{ + panic("handleInterrupt shouldn't be invoked directly"); +} + +void +PassthruInterruptController::externalInterrupt(void) +{ + child_handler(child_target, child_refCon, child_nub, 0); +} diff --git a/iokit/Kernel/RootDomainUserClient.cpp b/iokit/Kernel/RootDomainUserClient.cpp index 632b97ae5..3fce5d5b0 100644 --- a/iokit/Kernel/RootDomainUserClient.cpp +++ b/iokit/Kernel/RootDomainUserClient.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2012 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -328,7 +328,8 @@ RootDomainUserClient::externalMethod( break; case kPMSetClamshellSleepState: - fOwner->setDisableClamShellSleep(arguments->scalarInput[0] ? true : false); + fOwner->setClamShellSleepDisable(arguments->scalarInput[0] ? true : false, + IOPMrootDomain::kClamshellSleepDisablePowerd); ret = kIOReturnSuccess; break; diff --git a/iokit/Kernel/arm/AppleARMSMP.cpp b/iokit/Kernel/arm/AppleARMSMP.cpp new file mode 100644 index 000000000..9415e0226 --- /dev/null +++ b/iokit/Kernel/arm/AppleARMSMP.cpp @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +extern "C" { +#include +#include +#include +#include +#include +#include +}; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if USE_APPLEARMSMP + +// FIXME: These are in but that file has other deps that aren't being resolved +extern "C" void console_suspend(); +extern "C" void console_resume(); + +static PassthruInterruptController *gCPUIC; +static IOPMGR *gPMGR; +static IOInterruptController *gAIC; +static bool aic_ipis = false; +static const ml_topology_info *topology_info; + +// cpu_id of the boot processor +static unsigned int boot_cpu; + +// array index is a cpu_id (so some elements may be NULL) +static processor_t *machProcessors; + +static void +processor_idle_wrapper(cpu_id_t /*cpu_id*/, boolean_t enter, uint64_t *new_timeout_ticks) +{ + if (enter) { + gPMGR->enterCPUIdle(new_timeout_ticks); + } else { + gPMGR->exitCPUIdle(new_timeout_ticks); + } +} + +static void +idle_timer_wrapper(void */*refCon*/, uint64_t *new_timeout_ticks) +{ + gPMGR->updateCPUIdle(new_timeout_ticks); +} + +static void +register_aic_handlers(const ml_topology_cpu *cpu_info, + ipi_handler_t ipi_handler, + perfmon_interrupt_handler_func pmi_handler) +{ + const int n_irqs = 3; + int i; + IOInterruptVectorNumber irqlist[n_irqs] = { + cpu_info->self_ipi_irq, + cpu_info->other_ipi_irq, + cpu_info->pmi_irq }; + + IOService *fakeCPU = new IOService(); + if (!fakeCPU || !fakeCPU->init()) { + panic("Can't initialize fakeCPU"); + } + + IOInterruptSource source[n_irqs]; + for (i = 0; i < n_irqs; i++) { + source[i].vectorData = OSData::withBytes(&irqlist[i], sizeof(irqlist[0])); + } + fakeCPU->_interruptSources = source; + + if (cpu_info->self_ipi_irq && cpu_info->other_ipi_irq) { + // Legacy configuration, for !HAS_IPI chips (pre-Skye). + if (gAIC->registerInterrupt(fakeCPU, 0, NULL, (IOInterruptHandler)ipi_handler, NULL) != kIOReturnSuccess || + gAIC->enableInterrupt(fakeCPU, 0) != kIOReturnSuccess || + gAIC->registerInterrupt(fakeCPU, 1, NULL, (IOInterruptHandler)ipi_handler, NULL) != kIOReturnSuccess || + gAIC->enableInterrupt(fakeCPU, 1) != kIOReturnSuccess) { + panic("Error registering IPIs"); + } +#if !defined(HAS_IPI) + // Ideally this should be decided by EDT, but first we need to update EDT + // to default to fast IPIs on modern platforms. + aic_ipis = true; +#endif + } + // Conditional, because on Skye and later, we use an FIQ instead of an external IRQ. + if (pmi_handler && cpu_info->pmi_irq) { + if (gAIC->registerInterrupt(fakeCPU, 2, NULL, (IOInterruptHandler)pmi_handler, NULL) != kIOReturnSuccess || + gAIC->enableInterrupt(fakeCPU, 2) != kIOReturnSuccess) { + panic("Error registering PMI"); + } + } + + for (i = 0; i < n_irqs; i++) { + source[i].vectorData->release(); + } +} + +static void +cpu_boot_thread(void */*unused0*/, wait_result_t /*unused1*/) +{ + OSDictionary *matching = IOService::serviceMatching("IOPlatformExpert"); + IOService::waitForMatchingService(matching, UINT64_MAX); + matching->release(); + + gCPUIC = new PassthruInterruptController; + if (!gCPUIC || !gCPUIC->init()) { + panic("Can't initialize PassthruInterruptController"); + } + gAIC = static_cast(gCPUIC->waitForChildController()); + + ml_set_max_cpus(topology_info->max_cpu_id + 1); + + matching = IOService::serviceMatching("IOPMGR"); + gPMGR = OSDynamicCast(IOPMGR, + IOService::waitForMatchingService(matching, UINT64_MAX)); + matching->release(); + + const size_t array_size = (topology_info->max_cpu_id + 1) * sizeof(*machProcessors); + machProcessors = static_cast(IOMalloc(array_size)); + if (!machProcessors) { + panic("Can't allocate machProcessors array"); + } + memset(machProcessors, 0, array_size); + + ml_cpu_init_state(); + for (unsigned int cpu = 0; cpu < topology_info->num_cpus; cpu++) { + const ml_topology_cpu *cpu_info = &topology_info->cpus[cpu]; + const unsigned int cpu_id = cpu_info->cpu_id; + ml_processor_info_t this_processor_info; + ipi_handler_t ipi_handler; + perfmon_interrupt_handler_func pmi_handler; + + memset(&this_processor_info, 0, sizeof(this_processor_info)); + this_processor_info.cpu_id = reinterpret_cast(cpu_id); + this_processor_info.phys_id = cpu_info->phys_id; + this_processor_info.log_id = cpu_id; + this_processor_info.cluster_id = cpu_info->cluster_id; + this_processor_info.cluster_type = cpu_info->cluster_type; + this_processor_info.l2_cache_size = cpu_info->l2_cache_size; + this_processor_info.l2_cache_id = cpu_info->l2_cache_id; + this_processor_info.l3_cache_size = cpu_info->l3_cache_size; + this_processor_info.l3_cache_id = cpu_info->l3_cache_id; + + gPMGR->initCPUIdle(&this_processor_info); + this_processor_info.processor_idle = &processor_idle_wrapper; + this_processor_info.idle_timer = &idle_timer_wrapper; + + kern_return_t result = ml_processor_register(&this_processor_info, + &machProcessors[cpu_id], &ipi_handler, &pmi_handler); + if (result == KERN_FAILURE) { + panic("ml_processor_register failed: %d", result); + } + register_aic_handlers(cpu_info, ipi_handler, pmi_handler); + + if (processor_start(machProcessors[cpu_id]) != KERN_SUCCESS) { + panic("processor_start failed"); + } + } + IOService::publishResource(gIOAllCPUInitializedKey, kOSBooleanTrue); +} + +void +IOCPUInitialize(void) +{ + topology_info = ml_get_topology_info(); + boot_cpu = topology_info->boot_cpu->cpu_id; + + thread_t thread; + kernel_thread_start(&cpu_boot_thread, NULL, &thread); + thread_set_thread_name(thread, "cpu_boot_thread"); + thread_deallocate(thread); +} + +static unsigned int +target_to_cpu_id(cpu_id_t in) +{ + return (unsigned int)(uintptr_t)in; +} + +// Release a secondary CPU from reset. Runs from a different CPU (obviously). +kern_return_t +PE_cpu_start(cpu_id_t target, + vm_offset_t /*start_paddr*/, vm_offset_t /*arg_paddr*/) +{ + unsigned int cpu_id = target_to_cpu_id(target); + + if (cpu_id != boot_cpu) { + gPMGR->enableCPUCore(cpu_id); + } + return KERN_SUCCESS; +} + +// Initialize a CPU when it first comes up. Runs on the target CPU. +// |bootb| is true on the initial boot, false on S2R resume. +void +PE_cpu_machine_init(cpu_id_t target, boolean_t bootb) +{ + unsigned int cpu_id = target_to_cpu_id(target); + + if (!bootb && cpu_id == boot_cpu && ml_is_quiescing()) { + IOCPURunPlatformActiveActions(); + } + + ml_broadcast_cpu_event(CPU_BOOTED, cpu_id); + + // Send myself an IPI to clear SIGPdisabled. Hang here if IPIs are broken. + // (Probably only works on the boot CPU.) + PE_cpu_signal(target, target); + while (ml_get_interrupts_enabled() && !ml_cpu_signal_is_enabled()) { + OSMemoryBarrier(); + } +} + +void +PE_cpu_halt(cpu_id_t target) +{ + unsigned int cpu_id = target_to_cpu_id(target); + processor_exit(machProcessors[cpu_id]); +} + +void +PE_cpu_signal(cpu_id_t /*source*/, cpu_id_t target) +{ + struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)]; + if (aic_ipis) { + gAIC->sendIPI(cpu->cpu_id, false); + } else { + ml_cpu_signal(cpu->phys_id); + } +} + +void +PE_cpu_signal_deferred(cpu_id_t /*source*/, cpu_id_t target) +{ + struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)]; + if (aic_ipis) { + gAIC->sendIPI(cpu->cpu_id, true); + } else { + ml_cpu_signal_deferred(cpu->phys_id); + } +} + +void +PE_cpu_signal_cancel(cpu_id_t /*source*/, cpu_id_t target) +{ + struct ml_topology_cpu *cpu = &topology_info->cpus[target_to_cpu_id(target)]; + if (aic_ipis) { + gAIC->cancelDeferredIPI(cpu->cpu_id); + } else { + ml_cpu_signal_retract(cpu->phys_id); + } +} + +// Brings down one CPU core for S2R. Runs on the target CPU. +void +PE_cpu_machine_quiesce(cpu_id_t target) +{ + unsigned int cpu_id = target_to_cpu_id(target); + + if (cpu_id == boot_cpu) { + IOCPURunPlatformQuiesceActions(); + } else { + gPMGR->disableCPUCore(cpu_id); + } + + ml_broadcast_cpu_event(CPU_DOWN, cpu_id); + ml_arm_sleep(); +} + +// Takes one secondary CPU core offline at runtime. Runs on the target CPU. +// Returns true if the platform code should go into deep sleep WFI, false otherwise. +bool +PE_cpu_down(cpu_id_t target) +{ + unsigned int cpu_id = target_to_cpu_id(target); + assert(cpu_id != boot_cpu); + gPMGR->disableCPUCore(cpu_id); + return false; +} + +void +PE_handle_ext_interrupt(void) +{ + gCPUIC->externalInterrupt(); +} + +void +IOCPUSleepKernel(void) +{ + IOPMrootDomain *rootDomain = IOService::getPMRootDomain(); + unsigned int i; + + printf("IOCPUSleepKernel enter\n"); +#if defined(__arm64__) + sched_override_recommended_cores_for_sleep(); +#endif + + rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions ); + IOPlatformActionsPreSleep(); + rootDomain->tracePoint( kIOPMTracePointSleepCPUs ); + + integer_t old_pri; + thread_t self = current_thread(); + + /* + * We need to boost this thread's priority to the maximum kernel priority to + * ensure we can urgently preempt ANY thread currently executing on the + * target CPU. Note that realtime threads have their own mechanism to eventually + * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long. + */ + old_pri = thread_kern_get_pri(self); + thread_kern_set_pri(self, thread_kern_get_kernel_maxpri()); + + // Sleep the non-boot CPUs. + ml_set_is_quiescing(true); + for (i = 0; i < topology_info->num_cpus; i++) { + unsigned int cpu_id = topology_info->cpus[i].cpu_id; + if (cpu_id != boot_cpu) { + processor_exit(machProcessors[cpu_id]); + } + } + + console_suspend(); + + rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver ); + rootDomain->stop_watchdog_timer(); + + /* + * Now sleep the boot CPU, including calling the kQueueQuiesce actions. + * The system sleeps here. + */ + processor_exit(machProcessors[boot_cpu]); + + /* + * The system is now coming back from sleep on the boot CPU. + * The kQueueActive actions have already been called. + */ + + ml_set_is_quiescing(false); + rootDomain->start_watchdog_timer(); + rootDomain->tracePoint( kIOPMTracePointWakePlatformActions ); + + console_resume(); + + IOPlatformActionsPostResume(); + rootDomain->tracePoint( kIOPMTracePointWakeCPUs ); + + for (i = 0; i < topology_info->num_cpus; i++) { + unsigned int cpu_id = topology_info->cpus[i].cpu_id; + if (cpu_id != boot_cpu) { + processor_start(machProcessors[cpu_id]); + } + } + +#if defined(__arm64__) + sched_restore_recommended_cores_after_sleep(); +#endif + + thread_kern_set_pri(self, old_pri); + printf("IOCPUSleepKernel exit\n"); +} + +#endif /* USE_APPLEARMSMP */ diff --git a/iokit/Kernel/i386/IOKeyStoreHelper.cpp b/iokit/Kernel/i386/IOKeyStoreHelper.cpp index 88b077a25..ca0e3b895 100644 --- a/iokit/Kernel/i386/IOKeyStoreHelper.cpp +++ b/iokit/Kernel/i386/IOKeyStoreHelper.cpp @@ -54,6 +54,25 @@ static IOMemoryDescriptor* apfsKeyData = NULL; IOMemoryDescriptor* IOGetAPFSKeyStoreData(); void IOSetAPFSKeyStoreData(IOMemoryDescriptor* data); +static volatile UInt32 arvRootHashFetched = 0; +static volatile UInt32 bsARVRootHashFetched = 0; +static IOMemoryDescriptor* arvRootHashData = NULL; +static IOMemoryDescriptor* bsARVRootHashData = NULL; + +IOMemoryDescriptor* IOGetARVRootHashData(void); +void IOSetARVRootHashData(IOMemoryDescriptor* arvData); + +IOMemoryDescriptor* IOGetBaseSystemARVRootHashData(void); +bool IOBaseSystemARVRootHashAvailable(void); +void IOSetBaseSystemARVRootHashData(IOMemoryDescriptor* arvData); + + +static volatile UInt32 arvManifestFetched = 0; +static IOMemoryDescriptor* arvManifestData = NULL; + +IOMemoryDescriptor* IOGetARVManifestData(void); +void IOSetARVManifestData(IOMemoryDescriptor* arvData); + __END_DECLS #if 1 @@ -159,3 +178,133 @@ IOGetAPFSKeyStoreData() DEBG("%s: memory descriptor %p\n", __func__, memoryDescriptor); return memoryDescriptor; } + +// ARV Root Hash fetcher + +// Store in-memory Root Hash +void +IOSetARVRootHashData(IOMemoryDescriptor* arvData) +{ + // Do not allow re-fetching of the boot_args root hash by passing NULL here. + if (arvData) { + arvRootHashData = arvData; + arvRootHashFetched = 0; + } +} + +// Retrieve any root hash we may have (stored in boot_args or in-memory) +IOMemoryDescriptor* +IOGetARVRootHashData(void) +{ + // Check if someone got the root hash before us + if (!OSCompareAndSwap(0, 1, &arvRootHashFetched)) { + return NULL; + } + + // Do we have in-memory root hash? + if (arvRootHashData) { + IOMemoryDescriptor* arvData = arvRootHashData; + arvRootHashData = NULL; + return arvData; + } + + // Looks like there was no in-memory root hash and it's the first call - try boot_args + boot_args* args = (boot_args*)PE_state.bootArgs; + + DEBG("%s: data at address %llu size %llu\n", __func__, args->arvRootHashStart, args->arvRootHashSize); + if (args->arvRootHashStart == 0) { + return NULL; + } + + // We have the root hash in the boot_args, create IOMemoryDescriptor for the blob + IOAddressRange ranges; + ranges.address = args->arvRootHashStart; + ranges.length = args->arvRootHashSize; + + const IOOptionBits options = kIODirectionInOut | kIOMemoryTypePhysical64 | kIOMemoryMapperNone; + + IOMemoryDescriptor* memoryDescriptor = IOMemoryDescriptor::withOptions(&ranges, 1, 0, NULL, options); + DEBG("%s: memory descriptor %p\n", __func__, memoryDescriptor); + return memoryDescriptor; +} + +// Base System Analogues + +IOMemoryDescriptor* +IOGetBaseSystemARVRootHashData(void) +{ + //TBD! + return NULL; +} + +bool +IOBaseSystemARVRootHashAvailable(void) +{ + // Check if someone got the root hash before us + if (!OSCompareAndSwap(0, 1, &bsARVRootHashFetched)) { + return false; + } + + // Do we have in-memory root hash? + if (bsARVRootHashData) { + return true; + } + return false; +} + + +void +IOSetBaseSystemARVRootHashData(IOMemoryDescriptor* arvData) +{ + return; +} + + +// ARV Manifest fetcher + +// Store in-memory Manifest +void +IOSetARVManifestData(IOMemoryDescriptor* arvData) +{ + // Do not allow re-fetching of the boot_args manifest by passing NULL here. + if (arvData) { + arvManifestData = arvData; + arvManifestFetched = 0; + } +} + +// Retrieve any manifest we may have (stored in boot_args or in-memory) +IOMemoryDescriptor* +IOGetARVManifestData(void) +{ + // Check if someone got the manifest before us + if (!OSCompareAndSwap(0, 1, &arvManifestFetched)) { + return NULL; + } + + // Do we have in-memory manifest? + if (arvManifestData) { + IOMemoryDescriptor* arvData = arvManifestData; + arvManifestData = NULL; + return arvData; + } + + // Looks like there was no in-memory manifest and it's the first call - try boot_args + boot_args* args = (boot_args*)PE_state.bootArgs; + + DEBG("%s: data at address %llu size %llu\n", __func__, args->arvManifestStart, args->arvManifestSize); + if (args->arvManifestStart == 0) { + return NULL; + } + + // We have the manifest in the boot_args, create IOMemoryDescriptor for the blob + IOAddressRange ranges; + ranges.address = args->arvManifestStart; + ranges.length = args->arvManifestSize; + + const IOOptionBits options = kIODirectionInOut | kIOMemoryTypePhysical64 | kIOMemoryMapperNone; + + IOMemoryDescriptor* memoryDescriptor = IOMemoryDescriptor::withOptions(&ranges, 1, 0, NULL, options); + DEBG("%s: memory descriptor %p\n", __func__, memoryDescriptor); + return memoryDescriptor; +} diff --git a/iokit/System/IODataQueueDispatchSourceShared.h b/iokit/System/IODataQueueDispatchSourceShared.h index bee716add..7ee450913 100644 --- a/iokit/System/IODataQueueDispatchSourceShared.h +++ b/iokit/System/IODataQueueDispatchSourceShared.h @@ -58,7 +58,9 @@ IODataQueueDispatchSource::init() } kern_return_t -IMPL(IODataQueueDispatchSource, CheckForWork) +IODataQueueDispatchSource::CheckForWork_Impl( + const IORPC rpc, + bool synchronous) { IOReturn ret = kIOReturnNotReady; @@ -68,7 +70,10 @@ IMPL(IODataQueueDispatchSource, CheckForWork) #if KERNEL kern_return_t -IMPL(IODataQueueDispatchSource, Create) +IODataQueueDispatchSource::Create_Impl( + uint64_t queueByteCount, + IODispatchQueue * queue, + IODataQueueDispatchSource ** source) { IODataQueueDispatchSource * inst; IOBufferMemoryDescriptor * bmd; @@ -76,6 +81,9 @@ IMPL(IODataQueueDispatchSource, Create) if (3 & queueByteCount) { return kIOReturnBadArgument; } + if (queueByteCount > UINT_MAX) { + return kIOReturnBadArgument; + } inst = OSTypeAlloc(IODataQueueDispatchSource); if (!inst) { return kIOReturnNoMemory; @@ -93,7 +101,7 @@ IMPL(IODataQueueDispatchSource, Create) return kIOReturnNoMemory; } inst->ivars->memory = bmd; - inst->ivars->queueByteCount = queueByteCount; + inst->ivars->queueByteCount = ((uint32_t) queueByteCount); inst->ivars->options = 0; inst->ivars->dataQueue = (typeof(inst->ivars->dataQueue))bmd->getBytesNoCopy(); @@ -103,7 +111,8 @@ IMPL(IODataQueueDispatchSource, Create) } kern_return_t -IMPL(IODataQueueDispatchSource, CopyMemory) +IODataQueueDispatchSource::CopyMemory_Impl( + IOMemoryDescriptor ** memory) { kern_return_t ret; IOMemoryDescriptor * result; @@ -121,7 +130,8 @@ IMPL(IODataQueueDispatchSource, CopyMemory) } kern_return_t -IMPL(IODataQueueDispatchSource, CopyDataAvailableHandler) +IODataQueueDispatchSource::CopyDataAvailableHandler_Impl( + OSAction ** action) { kern_return_t ret; OSAction * result; @@ -139,7 +149,8 @@ IMPL(IODataQueueDispatchSource, CopyDataAvailableHandler) } kern_return_t -IMPL(IODataQueueDispatchSource, CopyDataServicedHandler) +IODataQueueDispatchSource::CopyDataServicedHandler_Impl( + OSAction ** action) { kern_return_t ret; OSAction * result; @@ -156,7 +167,8 @@ IMPL(IODataQueueDispatchSource, CopyDataServicedHandler) } kern_return_t -IMPL(IODataQueueDispatchSource, SetDataAvailableHandler) +IODataQueueDispatchSource::SetDataAvailableHandler_Impl( + OSAction * action) { IOReturn ret; OSAction * oldAction; @@ -178,7 +190,8 @@ IMPL(IODataQueueDispatchSource, SetDataAvailableHandler) } kern_return_t -IMPL(IODataQueueDispatchSource, SetDataServicedHandler) +IODataQueueDispatchSource::SetDataServicedHandler_Impl( + OSAction * action) { IOReturn ret; OSAction * oldAction; @@ -232,7 +245,9 @@ IODataQueueDispatchSource::SendDataServiced(void) } kern_return_t -IMPL(IODataQueueDispatchSource, SetEnableWithCompletion) +IODataQueueDispatchSource::SetEnableWithCompletion_Impl( + bool enable, + IODispatchSourceCancelHandler handler) { IOReturn ret; @@ -255,7 +270,8 @@ IODataQueueDispatchSource::free() } kern_return_t -IMPL(IODataQueueDispatchSource, Cancel) +IODataQueueDispatchSource::Cancel_Impl( + IODispatchSourceCancelHandler handler) { return kIOReturnSuccess; } diff --git a/iokit/Tests/TestIOMemoryDescriptor.cpp b/iokit/Tests/TestIOMemoryDescriptor.cpp index 9e3220369..98c42d28b 100644 --- a/iokit/Tests/TestIOMemoryDescriptor.cpp +++ b/iokit/Tests/TestIOMemoryDescriptor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016 Apple Inc. All rights reserved. + * Copyright (c) 2014-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -80,7 +80,7 @@ IOMultMemoryDescriptorTest(int newValue) data = (typeof(data))IOMallocAligned(ptoa(8), page_size); for (i = 0; i < ptoa(8); i++) { - data[i] = atop(i) | 0xD0; + data[i] = ((uint8_t) atop(i)) | 0xD0; } ranges[0].address = (IOVirtualAddress)(data + ptoa(4)); @@ -88,12 +88,48 @@ IOMultMemoryDescriptorTest(int newValue) ranges[1].address = (IOVirtualAddress)(data + ptoa(0)); ranges[1].length = ptoa(4); - mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task); + mds[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) data, 2, kIODirectionOutIn, kernel_task); + assert(mds[0]); + { + uint64_t dmaLen, dmaOffset; + dmaLen = mds[0]->getDMAMapLength(&dmaOffset); + assert(0 == dmaOffset); + assert(ptoa(1) == dmaLen); + } + mds[0]->release(); + mds[0] = IOMemoryDescriptor::withAddressRange((mach_vm_address_t) (data + page_size - 2), 4, kIODirectionOutIn, kernel_task); + assert(mds[0]); + { + uint64_t dmaLen, dmaOffset; + dmaLen = mds[0]->getDMAMapLength(&dmaOffset); + assert((page_size - 2) == dmaOffset); + assert(ptoa(2) == dmaLen); + } + mds[0]->release(); + mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task); + { + uint64_t dmaLen, dmaOffset; + dmaLen = mds[0]->getDMAMapLength(&dmaOffset); + assert(0 == dmaOffset); + assert(ptoa(8) == dmaLen); + } mds[1] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(3), ptoa(2), kIODirectionOutIn); + { + uint64_t dmaLen, dmaOffset; + dmaLen = mds[1]->getDMAMapLength(&dmaOffset); + assert(0 == dmaOffset); + assert(ptoa(2) == dmaLen); + } mds[2] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(7), ptoa(1), kIODirectionOutIn); mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false); + { + uint64_t dmaLen, dmaOffset; + dmaLen = mmd->getDMAMapLength(&dmaOffset); + assert(0 == dmaOffset); + assert(ptoa(11) == dmaLen); + } mds[2]->release(); mds[1]->release(); mds[0]->release(); @@ -142,6 +178,12 @@ IODMACommandForceDoubleBufferTest(int newValue) bmd = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, dir | kIOMemoryPageable, ptoa(8)); assert(bmd); + { + uint64_t dmaLen, dmaOffset; + dmaLen = bmd->getDMAMapLength(&dmaOffset); + assert(0 == dmaOffset); + assert(ptoa(8) == dmaLen); + } ((uint32_t*) bmd->getBytesNoCopy())[0] = 0x53535300 | dir; @@ -393,6 +435,17 @@ IOMemoryPrefaultTest(uint32_t options) return kIOReturnSuccess; } +static IOReturn +IOBMDOverflowTest(uint32_t options) +{ + IOBufferMemoryDescriptor * bmd; + + bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, kIOMemoryKernelUserShared | kIODirectionOut, + 0xffffffffffffffff, 0xfffffffffffff000); + assert(NULL == bmd); + + return kIOReturnSuccess; +} // static IOReturn @@ -793,6 +846,11 @@ IOMemoryDescriptorTest(int newValue) return result; } + result = IOBMDOverflowTest(newValue); + if (result) { + return result; + } + result = ZeroLengthTest(newValue); if (result) { return result; @@ -902,7 +960,7 @@ IOMemoryDescriptorTest(int newValue) break; } for (idx = 0; idx < size; idx += sizeof(uint32_t)) { - offidx = (idx + mapoffset + srcoffset); + offidx = (typeof(offidx))(idx + mapoffset + srcoffset); if ((srcsize <= ptoa(5)) && (srcsize > ptoa(2)) && !(page_mask & srcoffset)) { if (offidx < ptoa(2)) { offidx ^= ptoa(1); diff --git a/iokit/Tests/Tests.cpp b/iokit/Tests/Tests.cpp index 47be23b23..ac67b43b5 100644 --- a/iokit/Tests/Tests.cpp +++ b/iokit/Tests/Tests.cpp @@ -35,7 +35,11 @@ #include #include +#include #include +#include +#include +#include #include #include #include @@ -52,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -74,7 +79,6 @@ #include #include #include -#include #include #include #include @@ -185,8 +189,16 @@ #include #include #include +#include +#include #include #include +#include +#include +#include +#include +#include +#include static uint64_t gIOWorkLoopTestDeadline; @@ -298,6 +310,139 @@ OSCollectionTest(int newValue) return 0; } +static int +OSAllocationTests(int) +{ + OSAllocation ints(100, OSAllocateMemory); + assert(ints); + + { + int counter = 0; + for (int& i : ints) { + i = counter++; + } + } + + { + int counter = 0; + for (int& i : ints) { + assert(i == counter); + ++counter; + } + } + + // Make sure we can have two-level OSAllocations + { + OSAllocation > testArray(10, OSAllocateMemory); + for (int i = 0; i < 10; i++) { + testArray[i] = OSAllocation(10, OSAllocateMemory); + for (int j = 0; j < 10; ++j) { + testArray[i][j] = i + j; + } + } + + for (int i = 0; i < 10; i++) { + for (int j = 0; j < 10; ++j) { + assert(testArray[i][j] == i + j); + } + } + } + + return 0; +} + +static int +OSBoundedArrayTests(int) +{ + OSBoundedArray ints = {0, 1, 2, 3, 4}; + assert(ints.size() == 5); + + { + int counter = 0; + for (int& i : ints) { + i = counter++; + } + } + + { + int counter = 0; + for (int& i : ints) { + assert(i == counter); + ++counter; + } + } + + return 0; +} + +static int +OSBoundedArrayRefTests(int) +{ + OSBoundedArray storage = {0, 1, 2, 3, 4}; + OSBoundedArrayRef ints(storage); + assert(ints); + + { + int counter = 0; + for (int& i : ints) { + i = counter++; + } + } + + { + int counter = 0; + for (int& i : ints) { + assert(i == counter); + ++counter; + } + } + + return 0; +} + +static int +OSBoundedPtrTests(int) +{ + int array[5] = {55, 66, 77, 88, 99}; + OSBoundedPtr begin(&array[0], &array[0], &array[5]); + OSBoundedPtr end(&array[5], &array[0], &array[5]); + + { + int counter = 0; + for (OSBoundedPtr b = begin; b != end; ++b) { + *b = counter++; + } + } + + { + int counter = 0; + for (OSBoundedPtr b = begin; b != end; ++b) { + assert(*b == counter); + ++counter; + } + } + + return 0; +} + +static int +IOSharedDataQueue_44636964(__unused int newValue) +{ + IOSharedDataQueue* sd = IOSharedDataQueue::withCapacity(DATA_QUEUE_ENTRY_HEADER_SIZE + sizeof(UInt64)); + UInt64 data = 0x11223344aa55aa55; + UInt32 data2 = 0x44332211; + UInt32 size = sizeof(UInt32); + /* enqueue moves tail to end */ + sd->enqueue(&data, sizeof(UInt64)); + /* dequeue moves head to end */ + sd->dequeue(&data, &size); + /* Tail wraps around, head is still at end */ + sd->enqueue(&data2, sizeof(UInt32)); + /* something in the queue so peek() should return non-null */ + assert(sd->peek() != NULL); + return KERN_SUCCESS; +} + #if 0 #include class TestUserClient : public IOUserClient @@ -383,8 +528,125 @@ IOServiceTest(int newValue) return 0; } +static void +OSStaticPtrCastTests() +{ + // const& overload + { + OSSharedPtr const dict = OSMakeShared(); + OSSharedPtr collection = OSStaticPtrCast(dict); + assert(collection == dict); + } + { + OSSharedPtr const dict = nullptr; + OSSharedPtr collection = OSStaticPtrCast(dict); + assert(collection == nullptr); + } + // && overload + { + OSSharedPtr dict = OSMakeShared(); + OSDictionary* oldDict = dict.get(); + OSSharedPtr collection = OSStaticPtrCast(os::move(dict)); + assert(collection.get() == oldDict); + assert(dict == nullptr); + } + { + OSSharedPtr dict = nullptr; + OSSharedPtr collection = OSStaticPtrCast(os::move(dict)); + assert(collection == nullptr); + assert(dict == nullptr); + } +} + +static void +OSConstPtrCastTests() +{ + // const& overload + { + OSSharedPtr const dict = OSMakeShared(); + OSSharedPtr dict2 = OSConstPtrCast(dict); + assert(dict2 == dict); + } + { + OSSharedPtr const dict = OSMakeShared(); + OSSharedPtr dict2 = OSConstPtrCast(dict); + assert(dict2 == dict); + } + { + OSSharedPtr const dict = nullptr; + OSSharedPtr dict2 = OSConstPtrCast(dict); + assert(dict2 == nullptr); + } + { + OSSharedPtr const dict = nullptr; + OSSharedPtr dict2 = OSConstPtrCast(dict); + assert(dict2 == nullptr); + } + + // && overload + { + OSSharedPtr dict = OSMakeShared(); + OSDictionary const* oldDict = dict.get(); + OSSharedPtr dict2 = OSConstPtrCast(os::move(dict)); + assert(dict == nullptr); + assert(dict2 == oldDict); + } + { + OSSharedPtr dict = nullptr; + OSSharedPtr dict2 = OSConstPtrCast(os::move(dict)); + assert(dict == nullptr); + assert(dict2 == nullptr); + } +} + +static void +OSDynamicPtrCastTests() +{ + OSSharedPtr const dict = OSMakeShared(); + { + OSSharedPtr collection = OSDynamicPtrCast(dict); + assert(collection != nullptr); + } + { + OSSharedPtr array = OSDynamicPtrCast(dict); + assert(array == nullptr); + assert(dict != nullptr); + } + { + OSTaggedSharedPtr taggedDict(dict.get(), OSRetain); + OSTaggedSharedPtr collection = OSDynamicPtrCast(taggedDict); + assert(collection != nullptr); + } + { + OSTaggedSharedPtr taggedDict(dict.get(), OSRetain); + OSTaggedSharedPtr array = OSDynamicPtrCast(taggedDict); + assert(array == nullptr); + assert(dict != nullptr); + } + { + OSSharedPtr collection = OSDynamicPtrCast(dict); + assert(collection.get() == OSDynamicCast(OSDictionary, dict.get())); + OSSharedPtr newDict = OSDynamicPtrCast(os::move(collection)); + assert(collection == nullptr); + assert(newDict != nullptr); + assert(newDict.get() == dict.get()); + } +} + +static int +OSSharedPtrTests(int) +{ + OSDynamicPtrCastTests(); + OSConstPtrCastTests(); + OSStaticPtrCastTests(); + return 0; +} + #endif /* DEVELOPMENT || DEBUG */ +#ifndef __clang_analyzer__ +// All the scary things that this function is doing, such as the intentional +// overrelease of an OSData, are hidden from the static analyzer. static int sysctl_iokittest(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { @@ -455,8 +717,20 @@ sysctl_iokittest(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused assert(KERN_SUCCESS == error); error = OSCollectionTest(newValue); assert(KERN_SUCCESS == error); + error = OSAllocationTests(newValue); + assert(KERN_SUCCESS == error); + error = OSBoundedArrayTests(newValue); + assert(KERN_SUCCESS == error); + error = OSBoundedArrayRefTests(newValue); + assert(KERN_SUCCESS == error); + error = OSBoundedPtrTests(newValue); + assert(KERN_SUCCESS == error); error = IOMemoryDescriptorTest(newValue); assert(KERN_SUCCESS == error); + error = OSSharedPtrTests(newValue); + assert(KERN_SUCCESS == error); + error = IOSharedDataQueue_44636964(newValue); + assert(KERN_SUCCESS == error); } #endif /* DEVELOPMENT || DEBUG */ @@ -466,3 +740,4 @@ sysctl_iokittest(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused SYSCTL_PROC(_kern, OID_AUTO, iokittest, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, 0, sysctl_iokittest, "I", ""); +#endif // __clang_analyzer__ diff --git a/iokit/bsddev/DINetBootHook.cpp b/iokit/bsddev/DINetBootHook.cpp index 6f4eb396d..263386ad3 100644 --- a/iokit/bsddev/DINetBootHook.cpp +++ b/iokit/bsddev/DINetBootHook.cpp @@ -87,17 +87,21 @@ #endif #include +#include #include #include #include "DINetBootHook.h" #define kIOHDIXControllerClassName "IOHDIXController" #define kDIRootImageKey "di-root-image" +#define kDIRootImageRemovableKey "di-root-removable" #define kDIRootImageResultKey "di-root-image-result" #define kDIRootImageDevNameKey "di-root-image-devname" #define kDIRootImageDevTKey "di-root-image-devt" #define kDIRootRamFileKey "di-root-ram-file" +#define kDIMatchQuiesceTimeout 30ull + static IOService * di_load_controller( void ) { @@ -139,16 +143,24 @@ di_load_controller( void ) } extern "C" { -/* - * Name: di_root_image - * Function: mount the disk image returning the dev node - * Parameters: path -> path/url to disk image - * devname <- dev node used to set the rootdevice global variable - * dev_p <- device number generated from major/minor numbers - * Comments: +/* FIXME: removable should be replaced with a struct (so it could be easily + * extensible in the future). However, since there is no common header file + * between imageboot and NetBoot, we opt for a simple bool for now. + * Refactor this into a common header file. */ +static int +di_add_properties(IOService *controller, bool removable) +{ + if (!controller->setProperty(kDIRootImageRemovableKey, removable ? kOSBooleanTrue : kOSBooleanFalse)) { + IOLog("IOHDIXController::setProperty(%s, %d) failed.\n", kDIRootImageRemovableKey, !!removable); + return kIOReturnBadArgument; + } + + return kIOReturnSuccess; +} + int -di_root_image(const char *path, char *devname, size_t devsz, dev_t *dev_p) +di_root_image_ext(const char *path, char *devname, size_t devsz, dev_t *dev_p, bool removable) { IOReturn res = 0; IOService * controller = NULL; @@ -188,6 +200,17 @@ di_root_image(const char *path, char *devname, size_t devsz, dev_t *dev_p) goto CannotCreatePathOSString; } + /* + * This is a bit racy, as two concurrent attached could have + * different properties. However, since we query the result and dev + * below locklessly, the existing code is already racy, so we + * keep the status quo. + */ + res = di_add_properties(controller, removable); + if (res) { + goto error_add_properties; + } + // do it if (!controller->setProperty(kDIRootImageKey, pathString)) { IOLog("IOHDIXController::setProperty(%s, %s) failed.\n", kDIRootImageKey, pathString->getCStringNoCopy()); @@ -223,10 +246,25 @@ di_root_image(const char *path, char *devname, size_t devsz, dev_t *dev_p) goto di_root_image_FAILED; } + /* + * NOTE: The attached disk image may trigger IOKit matching. At the very least, an IOMedia + * must claim it. More complex scenarios might include a GPT containing a partition mapping + * to an APFS container, both of which need to probe and claim their respective media devices. + * + * After the attach is complete, we should quiesce the disk image controller before returning + * from this function successfully. If we failed to quiesce, then we should treat it as a hard + * failure, to make it more obvious to triage. + */ + res = controller->waitQuiet((NSEC_PER_SEC * kDIMatchQuiesceTimeout)); + if (res) { + IOLog("failed to quiesce attached disk image (%s)! \n", devname); + goto di_root_image_FAILED; + } di_root_image_FAILED: CannotCreatePathOSString: NoIOHDIXController: +error_add_properties: // clean up memory allocations if (pathString) { @@ -239,6 +277,21 @@ NoIOHDIXController: return res; } +/* + * Name: di_root_image + * Function: mount the disk image returning the dev node + * Parameters: path -> path/url to disk image + * devname <- dev node used to set the rootdevice global variable + * dev_p <- device number generated from major/minor numbers + * Comments: + * This is an exported function. Changing this will break API. + */ +int +di_root_image(const char *path, char *devname, size_t devsz, dev_t *dev_p) +{ + return di_root_image_ext(path, devname, devsz, dev_p, false); +} + int di_root_ramfile_buf(void *buf, size_t bufsz, char *devname, size_t devsz, dev_t *dev_p) { diff --git a/iokit/bsddev/DINetBootHook.h b/iokit/bsddev/DINetBootHook.h index 3f3a0e331..930f5ece6 100644 --- a/iokit/bsddev/DINetBootHook.h +++ b/iokit/bsddev/DINetBootHook.h @@ -95,6 +95,7 @@ extern "C" { * Comments: */ int di_root_image(const char *path, char *devname, size_t devsz, dev_t *dev_p); +int di_root_image_ext(const char *path, char *devname, size_t devsz, dev_t *dev_p, bool removable); void di_root_ramfile( IORegistryEntry * entry ); int di_root_ramfile_buf(void *buf, size_t bufsz, char *devname, size_t devsz, dev_t *dev_p); diff --git a/iokit/bsddev/IOKitBSDInit.cpp b/iokit/bsddev/IOKitBSDInit.cpp index 11514d895..903cda795 100644 --- a/iokit/bsddev/IOKitBSDInit.cpp +++ b/iokit/bsddev/IOKitBSDInit.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -61,10 +62,14 @@ extern void di_root_ramfile(IORegistryEntry * entry); #if defined(XNU_TARGET_OS_BRIDGE) #define kIOCoreDumpPath "/private/var/internal/kernelcore" +#elif defined(XNU_TARGET_OS_OSX) +#define kIOCoreDumpPath "/System/Volumes/VM/kernelcore" #else #define kIOCoreDumpPath "/private/var/vm/kernelcore" #endif +#define SYSTEM_NVRAM_PREFIX "40A0DDD2-77F8-4392-B4A3-1E7304206516:" + #if CONFIG_KDP_INTERACTIVE_DEBUGGING /* * Touched by IOFindBSDRoot() if a RAMDisk is used for the root device. @@ -340,7 +345,7 @@ IOOFPathMatching( const char * path, char * buf, int maxLen ) int len; do { - len = strlen( kIODeviceTreePlane ":" ); + len = ((int) strlen( kIODeviceTreePlane ":" )); maxLen -= len; if (maxLen <= 0) { continue; @@ -349,7 +354,7 @@ IOOFPathMatching( const char * path, char * buf, int maxLen ) strlcpy( buf, kIODeviceTreePlane ":", len + 1 ); comp = buf + len; - len = strlen( path ); + len = ((int) strnlen( path, INT_MAX )); maxLen -= len; if (maxLen <= 0) { continue; @@ -381,6 +386,168 @@ IOOFPathMatching( const char * path, char * buf, int maxLen ) static int didRam = 0; enum { kMaxPathBuf = 512, kMaxBootVar = 128 }; +const char* +IOGetBootUUID(void) +{ + IORegistryEntry *entry; + + if ((entry = IORegistryEntry::fromPath("/chosen", gIODTPlane))) { + OSData *uuid_data = (OSData *)entry->getProperty("boot-uuid"); + if (uuid_data) { + return (const char*)uuid_data->getBytesNoCopy(); + } + } + + return NULL; +} + +const char * +IOGetApfsPrebootUUID(void) +{ + IORegistryEntry *entry; + + if ((entry = IORegistryEntry::fromPath("/chosen", gIODTPlane))) { + OSData *uuid_data = (OSData *)entry->getProperty("apfs-preboot-uuid"); + if (uuid_data) { + return (const char*)uuid_data->getBytesNoCopy(); + } + } + + return NULL; +} + +const char * +IOGetAssociatedApfsVolgroupUUID(void) +{ + IORegistryEntry *entry; + + if ((entry = IORegistryEntry::fromPath("/chosen", gIODTPlane))) { + OSData *uuid_data = (OSData *)entry->getProperty("associated-volume-group"); + if (uuid_data) { + return (const char*)uuid_data->getBytesNoCopy(); + } + } + + return NULL; +} + +const char * +IOGetBootObjectsPath(void) +{ + IORegistryEntry *entry; + + if ((entry = IORegistryEntry::fromPath("/chosen", gIODTPlane))) { + OSData *path_prefix_data = (OSData *)entry->getProperty("boot-objects-path"); + if (path_prefix_data) { + return (const char *)path_prefix_data->getBytesNoCopy(); + } + } + + return NULL; +} + +/* + * Set NVRAM to boot into the right flavor of Recovery, + * optionally passing a UUID of a volume that failed to boot. + * If `reboot` is true, reboot immediately. + * + * Returns true if `mode` was understood, false otherwise. + * (Does not return if `reboot` is true.) + */ +boolean_t +IOSetRecoveryBoot(bsd_bootfail_mode_t mode, uuid_t volume_uuid, boolean_t reboot) +{ + IODTNVRAM *nvram = NULL; + const OSSymbol *boot_command_sym = NULL; + OSString *boot_command_recover = NULL; + + if (mode == BSD_BOOTFAIL_SEAL_BROKEN) { + const char *boot_mode = "ssv-seal-broken"; + uuid_string_t volume_uuid_str; + + // Set `recovery-broken-seal-uuid = `. + if (volume_uuid) { + uuid_unparse_upper(volume_uuid, volume_uuid_str); + + if (!PEWriteNVRAMProperty(SYSTEM_NVRAM_PREFIX "recovery-broken-seal-uuid", + volume_uuid_str, sizeof(uuid_string_t))) { + IOLog("Failed to write recovery-broken-seal-uuid to NVRAM.\n"); + } + } + + // Set `recovery-boot-mode = ssv-seal-broken`. + if (!PEWriteNVRAMProperty(SYSTEM_NVRAM_PREFIX "recovery-boot-mode", boot_mode, + (const unsigned int) strlen(boot_mode))) { + IOLog("Failed to write recovery-boot-mode to NVRAM.\n"); + } + } else if (mode == BSD_BOOTFAIL_MEDIA_MISSING) { + const char *boot_picker_reason = "missing-boot-media"; + + // Set `boot-picker-bringup-reason = missing-boot-media`. + if (!PEWriteNVRAMProperty(SYSTEM_NVRAM_PREFIX "boot-picker-bringup-reason", + boot_picker_reason, (const unsigned int) strlen(boot_picker_reason))) { + IOLog("Failed to write boot-picker-bringup-reason to NVRAM.\n"); + } + + // Set `boot-command = recover`. + + // Construct an OSSymbol and an OSString to be the (key, value) pair + // we write to NVRAM. Unfortunately, since our value must be an OSString + // instead of an OSData, we cannot use PEWriteNVRAMProperty() here. + boot_command_sym = OSSymbol::withCStringNoCopy(SYSTEM_NVRAM_PREFIX "boot-command"); + boot_command_recover = OSString::withCStringNoCopy("recover"); + if (boot_command_sym == NULL || boot_command_recover == NULL) { + IOLog("Failed to create boot-command strings.\n"); + goto do_reboot; + } + + // Wait for NVRAM to be readable... + nvram = OSDynamicCast(IODTNVRAM, IOService::waitForService( + IOService::serviceMatching("IODTNVRAM"))); + if (nvram == NULL) { + IOLog("Failed to acquire IODTNVRAM object.\n"); + goto do_reboot; + } + + // Wait for NVRAM to be writable... + if (!IOServiceWaitForMatchingResource("IONVRAM", UINT64_MAX)) { + IOLog("Failed to wait for IONVRAM service.\n"); + // attempt the work anyway... + } + + // Write the new boot-command to NVRAM, and sync if successful. + if (!nvram->setProperty(boot_command_sym, boot_command_recover)) { + IOLog("Failed to save new boot-command to NVRAM.\n"); + } else { + nvram->sync(); + } + } else { + IOLog("Unknown mode: %d\n", mode); + return false; + } + + // Clean up and reboot! +do_reboot: + if (nvram != NULL) { + nvram->release(); + } + + if (boot_command_recover != NULL) { + boot_command_recover->release(); + } + + if (boot_command_sym != NULL) { + boot_command_sym->release(); + } + + if (reboot) { + IOLog("\nAbout to reboot into Recovery!\n"); + (void)PEHaltRestart(kPERestartCPU); + } + + return true; +} + kern_return_t IOFindBSDRoot( char * rootName, unsigned int rootNameSize, dev_t * root, u_int32_t * oflags ) @@ -482,7 +649,13 @@ IOFindBSDRoot( char * rootName, unsigned int rootNameSize, if (data) { /* We found one */ uintptr_t *ramdParms; ramdParms = (uintptr_t *)data->getBytesNoCopy(); /* Point to the ram disk base and size */ - (void)mdevadd(-1, ml_static_ptovirt(ramdParms[0]) >> 12, ramdParms[1] >> 12, 0); /* Initialize it and pass back the device number */ +#if __LP64__ +#define MAX_PHYS_RAM (((uint64_t)UINT_MAX) << 12) + if (ramdParms[1] > MAX_PHYS_RAM) { + panic("ramdisk params"); + } +#endif /* __LP64__ */ + (void)mdevadd(-1, ml_static_ptovirt(ramdParms[0]) >> 12, (unsigned int) (ramdParms[1] >> 12), 0); /* Initialize it and pass back the device number */ } regEntry->release(); /* Toss the entry */ } @@ -510,7 +683,7 @@ IOFindBSDRoot( char * rootName, unsigned int rootNameSize, if (*root >= 0) { /* Did we find one? */ rootName[0] = 'm'; /* Build root name */ rootName[1] = 'd'; /* Build root name */ - rootName[2] = dchar; /* Build root name */ + rootName[2] = (char) dchar; /* Build root name */ rootName[3] = 0; /* Build root name */ IOLog("BSD root: %s, major %d, minor %d\n", rootName, major(*root), minor(*root)); *oflags = 0; /* Show that this is not network */ @@ -603,8 +776,10 @@ IOFindBSDRoot( char * rootName, unsigned int rootNameSize, matching->retain(); service = IOService::waitForService( matching, &t ); if ((!service) || (mountAttempts == 10)) { +#if !XNU_TARGET_OS_OSX || !defined(__arm64__) PE_display_icon( 0, "noroot"); IOLog( "Still waiting for root device\n" ); +#endif if (!debugInfoPrintedOnce) { debugInfoPrintedOnce = true; @@ -620,6 +795,11 @@ IOFindBSDRoot( char * rootName, unsigned int rootNameSize, IOPrintMemory(); } } + +#if XNU_TARGET_OS_OSX && defined(__arm64__) + // The disk isn't found - have the user pick from recoveryOS+. + (void)IOSetRecoveryBoot(BSD_BOOTFAIL_MEDIA_MISSING, NULL, true); +#endif } } while (!service); matching->release(); @@ -715,7 +895,7 @@ IORamDiskBSDRoot(void) void IOSecureBSDRoot(const char * rootName) { -#if CONFIG_EMBEDDED +#if CONFIG_SECURE_BSD_ROOT IOReturn result; IOPlatformExpert *pe; OSDictionary *matching; @@ -736,7 +916,7 @@ IOSecureBSDRoot(const char * rootName) mdevremoveall(); } -#endif // CONFIG_EMBEDDED +#endif // CONFIG_SECURE_BSD_ROOT } void * @@ -813,7 +993,7 @@ IOPolledCoreFileMode_t gIOPolledCoreFileMode = kIOPolledCoreFileModeNotInitializ #define kIOCoreDumpSize 150ULL*1024ULL*1024ULL #define kIOCoreDumpFreeSize 150ULL*1024ULL*1024ULL -#elif CONFIG_EMBEDDED /* defined(XNU_TARGET_OS_BRIDGE) */ +#elif !defined(XNU_TARGET_OS_OSX) /* defined(XNU_TARGET_OS_BRIDGE) */ // On embedded devices with >3GB DRAM we allocate a 500MB corefile // otherwise allocate a 350MB corefile. Leave 350 MB free @@ -859,7 +1039,7 @@ IOCoreFileGetSize(uint64_t *ideal_size, uint64_t *fallback_size, uint64_t *free_ #pragma unused(mode) *ideal_size = *fallback_size = kIOCoreDumpSize; *free_space_to_leave = kIOCoreDumpFreeSize; -#elif CONFIG_EMBEDDED /* defined(XNU_TARGET_OS_BRIDGE) */ +#elif !defined(XNU_TARGET_OS_OSX) /* defined(XNU_TARGET_OS_BRIDGE) */ #pragma unused(mode) *ideal_size = *fallback_size = kIOCoreDumpMinSize; @@ -1046,3 +1226,39 @@ IOTaskHasEntitlement(task_t task, const char * entitlement) obj->release(); return obj != kOSBooleanFalse; } + +extern "C" boolean_t +IOVnodeHasEntitlement(vnode_t vnode, int64_t off, const char *entitlement) +{ + OSObject * obj; + off_t offset = (off_t)off; + + obj = IOUserClient::copyClientEntitlementVnode(vnode, offset, entitlement); + if (!obj) { + return false; + } + obj->release(); + return obj != kOSBooleanFalse; +} + +extern "C" char * +IOVnodeGetEntitlement(vnode_t vnode, int64_t off, const char *entitlement) +{ + OSObject *obj = NULL; + OSString *str = NULL; + size_t len; + char *value = NULL; + off_t offset = (off_t)off; + + obj = IOUserClient::copyClientEntitlementVnode(vnode, offset, entitlement); + if (obj != NULL) { + str = OSDynamicCast(OSString, obj); + if (str != NULL) { + len = str->getLength() + 1; + value = (char *)kheap_alloc(KHEAP_DATA_BUFFERS, len, Z_WAITOK); + strlcpy(value, str->getCStringNoCopy(), len); + } + obj->release(); + } + return value; +} diff --git a/iokit/conf/Makefile b/iokit/conf/Makefile index 05c4b79cf..51eddb889 100644 --- a/iokit/conf/Makefile +++ b/iokit/conf/Makefile @@ -23,7 +23,7 @@ endif $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile: $(SRCROOT)/SETUP/config/doconf $(OBJROOT)/SETUP/config $(DOCONFDEPS) $(_v)$(MKDIR) $(TARGET)/$(CURRENT_KERNEL_CONFIG) - $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) + $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -platform $(PLATFORM) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) do_all: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile $(_v)${MAKE} \ diff --git a/iokit/conf/Makefile.arm b/iokit/conf/Makefile.arm index 184148ea4..d57017bde 100644 --- a/iokit/conf/Makefile.arm +++ b/iokit/conf/Makefile.arm @@ -1,18 +1,67 @@ ###################################################################### #BEGIN Machine dependent Makefile fragment for arm ###################################################################### - -IODMACommand.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IODataQueue.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IONVRAM.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IOPMrootDomain.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IOSharedDataQueue.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IOUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +# +# XXX: CFLAGS +# +CWARNFLAGS = $(CWARNFLAGS_STD) -Wno-unused-parameter +CXXWARNFLAGS = $(CXXWARNFLAGS_STD) -Wno-unused-parameter -Wno-cast-qual -Wno-shadow # Files that must go in the __HIB segment: HIB_FILES= +# +# Diagnostic opt-outs. We need to make this list empty. +# +# DO NOT ADD MORE HERE. +# +# -Wno-implicit-int-conversion +IOBufferMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +IODMACommand.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +IOHibernateIO.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +IOHibernateRestoreKernel.o_CFLAGS_ADD += -Wno-implicit-int-conversion +IOHistogramReporter.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +IOKitBSDInit.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +IOKitDebug.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +IOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +IONVRAM.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +IOReporter.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +IOService.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +IOStateReporter.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +TestIOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +# -Wno-shorten-64-to-32 +IOBufferMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOCPU.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IODMACommand.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IODMAEventSource.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IODeviceTreeSupport.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOHibernateIO.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOHibernateRestoreKernel.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +IOInterruptController.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOKitBSDInit.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOKitDebug.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOLib.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOMapper.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOMemoryCursor.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOMultiMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IONVRAM.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOPerfControl.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOPlatformExpert.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOPolledInterface.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IORegistryEntry.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOService.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOSkywalkSupport.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOStartIOKit.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOStatistics.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOStringFuncs.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +IOSubMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOTimerEventSource.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +IOUserServer.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +RootDomainUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +TestIOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 + ###################################################################### #END Machine dependent Makefile fragment for arm ###################################################################### diff --git a/iokit/conf/Makefile.arm64 b/iokit/conf/Makefile.arm64 index 184148ea4..f40ee0aaf 100644 --- a/iokit/conf/Makefile.arm64 +++ b/iokit/conf/Makefile.arm64 @@ -1,17 +1,29 @@ ###################################################################### #BEGIN Machine dependent Makefile fragment for arm ###################################################################### - -IODMACommand.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IODataQueue.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IONVRAM.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IOPMrootDomain.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IOSharedDataQueue.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -IOUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +# +# XXX: CFLAGS +# +CWARNFLAGS = $(CWARNFLAGS_STD) -Wno-unused-parameter -Wshorten-64-to-32 -Wimplicit-int-conversion +CXXWARNFLAGS = $(CXXWARNFLAGS_STD) -Wno-unused-parameter -Wno-cast-qual -Wno-shadow -Wshorten-64-to-32 -Wimplicit-int-conversion # Files that must go in the __HIB segment: -HIB_FILES= +UNCONFIGURED_HIB_FILES= \ + IOHibernateRestoreKernel.o + +HIB_FILES=$(filter $(UNCONFIGURED_HIB_FILES),$(OBJS)) + +# Unconfigured __HIB files must be Mach-O for "setsegname" +# KASAN must be disabled for unconfigured __HIB files +# because the kasan runtime isn't available during hibernation resume +IOHibernateRestoreKernel.o_CFLAGS_ADD += $(CFLAGS_NOLTO_FLAG) -fno-sanitize=address -UKASAN +# Stack protector and stack check must be disabled because the stack protector runtime isn't available +IOHibernateRestoreKernel.o_CFLAGS_ADD += -fno-stack-protector -fno-stack-check +# signing keys aren't set up yet, so ptrauth must be disabled +IOHibernateRestoreKernel.o_CFLAGS_ADD += -fno-ptrauth-calls + +IOHibernateIO.cpo_CFLAGS_ADD += -I$(SRCROOT)/osfmk +IOHibernateRestoreKernel.o_CFLAGS_ADD += -I$(SRCROOT)/osfmk ###################################################################### #END Machine dependent Makefile fragment for arm diff --git a/iokit/conf/Makefile.template b/iokit/conf/Makefile.template index b58cd7ee8..64215c70d 100644 --- a/iokit/conf/Makefile.template +++ b/iokit/conf/Makefile.template @@ -21,32 +21,10 @@ CFLAGS+= -include meta_features.h -DDRIVER_PRIVATE \ SFLAGS+= -include meta_features.h #-DIOKITDEBUG=-1 -CWARNFLAGS = $(CWARNFLAGS_STD) -Wno-unused-parameter -CXXWARNFLAGS = $(CXXWARNFLAGS_STD) -Wno-unused-parameter -Wno-cast-qual -Wno-shadow - -# Objects that don't want -Wcast-align warning (8474835) -IOHibernateRestoreKernel.o_CWARNFLAGS_ADD = -Wno-cast-align -CXXOBJS_NO_CAST_ALIGN = \ - IODMACommand.cpo \ - IODataQueue.cpo \ - IOHibernateIO.cpo \ - IOMemoryDescriptor.cpo \ - IONVRAM.cpo \ - IOPMrootDomain.cpo \ - IOSharedDataQueue.cpo \ - IOUserClient.cpo - -$(foreach file,$(CXXOBJS_NO_CAST_ALIGN),$(eval $(call add_perfile_cxxflags,$(file),-Wno-cast-align))) - CFLAGS_RELEASE += -DIOASSERT=0 CFLAGS_DEVELOPMENT += -DIOASSERT=1 CFLAGS_DEBUG += -DIOASSERT=1 -IOUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-missing-prototypes -IOKitDebug.cpo_CXXWARNFLAGS_ADD += -Wno-missing-prototypes -IOKitBSDInit.cpo_CXXWARNFLAGS_ADD += -Wno-missing-prototypes -Wno-documentation -IOPMrootDomain.cpo_CXXWARNFLAGS_ADD += -Wno-missing-prototypes - # # Directories for generated files # @@ -77,6 +55,148 @@ COMP_SUBDIRS = \ %MACHDEP +# +# Diagnostic opt-outs. We need to make this list empty. +# +# DO NOT ADD MORE HERE. +# +# -Wno-cast-align +IODataQueue.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +IOHibernateIO.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +IOHibernateRestoreKernel.o_CFLAGS_ADD += -Wno-cast-align +IONVRAM.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +IOSharedDataQueue.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +IOUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +# -Wno-cast-qual +DINetBootHook.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOCatalogue.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IODMAController.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IODataQueue.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IODeviceTreeSupport.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOHibernateIO.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOInterruptController.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOKitBSDInit.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOKitDebug.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOLib.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOLocks.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOMapper.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IONVRAM.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOPMrootDomain.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOPlatformExpert.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOPolledInterface.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IORegistryEntry.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOService.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOServicePM.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOSharedDataQueue.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOSimpleReporter.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOSkywalkSupport.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOStateReporter.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +IOUserServer.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +RootDomainUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +# -Wno-missing-prototypes +IOKitBSDInit.cpo_CXXWARNFLAGS_ADD += -Wno-missing-prototypes +IOKitDebug.cpo_CXXWARNFLAGS_ADD += -Wno-missing-prototypes +IOPMrootDomain.cpo_CXXWARNFLAGS_ADD += -Wno-missing-prototypes +IOUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-missing-prototypes +# -Wno-shadow +IOHibernateIO.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +IOKitDebug.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +IOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +IOMultiMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +IOPMrootDomain.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +IOPolledInterface.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +IOService.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +IOServicePM.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +IOSharedDataQueue.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +IOTimerEventSource.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +IOUserServer.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +TestIOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +Tests.cpo_CXXWARNFLAGS_ADD += -Wno-shadow +# -Wno-shadow-field-in-constructor +IOUserServer.cpo_CXXWARNFLAGS_ADD += -Wno-shadow-field-in-constructor +# -Wno-sign-conversion +DINetBootHook.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOBufferMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOCPU.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOCatalogue.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOCommandQueue.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IODMACommand.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IODMAEventSource.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IODeviceTreeSupport.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOFilterInterruptEventSource.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOHibernateIO.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOHibernateRestoreKernel.o_CFLAGS_ADD += -Wno-sign-conversion +IOHistogramReporter.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOInterleavedMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOInterruptAccounting.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOInterruptController.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOInterruptEventSource.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOKitBSDInit.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOKitDebug.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOLib.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOMultiMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IONVRAM.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOPMPowerSource.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOPMrootDomain.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOPlatformActions.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOPlatformExpert.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOPolledInterface.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IORTC.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IORegistryEntry.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOService.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOServicePM.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOSkywalkSupport.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOStartIOKit.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOStatistics.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOStringFuncs.o_CFLAGS_ADD += -Wno-sign-conversion +IOTimerEventSource.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOUserServer.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +IOWorkLoop.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +RootDomainUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +TestIOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +# -Wno-unused-parameter +IOBufferMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOCatalogue.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOCommandGate.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IODMACommand.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IODeviceTreeSupport.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOFilterInterruptEventSource.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOHibernateIO.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOHibernateRestoreKernel.o_CFLAGS_ADD += -Wno-unused-parameter +IOInterruptController.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOInterruptEventSource.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOKitBSDInit.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOKitDebug.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOLib.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOLocks.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOMultiMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IONVRAM.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOPMrootDomain.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOPerfControl.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOPlatformExpert.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOPolledInterface.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IORTC.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IORegistryEntry.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOReporter.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOService.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOServicePM.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOSkywalkSupport.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOStateReporter.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOUserServer.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOWatchDogTimer.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IOWorkLoop.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +PassthruInterruptController.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +RootDomainUserClient.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +TestIOMemoryDescriptor.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +Tests.cpo_CXXWARNFLAGS_ADD += -Wno-unused-parameter +IODMACommand.iig.cpo_CXXWARNFLAGS_ADD += -Wno-duplicate-decl-specifier + # Rebuild if per-file overrides change ${OBJS}: $(firstword $(MAKEFILE_LIST)) @@ -97,7 +217,7 @@ $(COMPONENT).filelist: $(OBJS) $(SEG_HACK) -n __HIB -o $${hib_file}__ $${hib_file} || exit 1; \ mv $${hib_file}__ $${hib_file} || exit 1; \ done - $(call makelog,$(ColorL)LDFILELIST$(Color0) $(ColorLF)$(COMPONENT)$(Color0)) + @$(LOG_LDFILELIST) "$(COMPONENT)" $(_v)for obj in ${OBJS}; do \ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \ done > $(COMPONENT).filelist diff --git a/iokit/conf/Makefile.x86_64 b/iokit/conf/Makefile.x86_64 index 587fe4264..25cc81936 100644 --- a/iokit/conf/Makefile.x86_64 +++ b/iokit/conf/Makefile.x86_64 @@ -1,6 +1,11 @@ ###################################################################### #BEGIN Machine dependent Makefile fragment for x86_64 ###################################################################### +# +# XXX: CFLAGS +# +CWARNFLAGS = $(CWARNFLAGS_STD) -Wno-unused-parameter -Wshorten-64-to-32 -Wimplicit-int-conversion +CXXWARNFLAGS = $(CXXWARNFLAGS_STD) -Wno-unused-parameter -Wno-cast-qual -Wno-shadow -Wshorten-64-to-32 -Wimplicit-int-conversion # Files that must go in the __HIB segment: UNCONFIGURED_HIB_FILES= \ diff --git a/iokit/conf/files b/iokit/conf/files index 48db7bedf..dd1286996 100644 --- a/iokit/conf/files +++ b/iokit/conf/files @@ -46,8 +46,9 @@ iokit/Kernel/IOPMinformee.cpp optional iokitcpp iokit/Kernel/IOPMinformeeList.cpp optional iokitcpp iokit/Kernel/IOPMPowerStateQueue.cpp optional iokitcpp iokit/Kernel/IOCatalogue.cpp optional iokitcpp -iokit/Kernel/IOPMPowerSource.cpp optional iokitcpp -iokit/Kernel/IOPMPowerSourceList.cpp optional iokitcpp +iokit/Kernel/IOPMPowerSource.cpp optional iokitcpp +iokit/Kernel/IOPMPowerSourceList.cpp optional iokitcpp +iokit/Kernel/IOPMGR.cpp optional iokitcpp iokit/Kernel/IOPolledInterface.cpp optional iokitcpp iokit/Kernel/IOWorkLoop.cpp optional iokitcpp @@ -74,13 +75,16 @@ iokit/Kernel/IORangeAllocator.cpp optional iokitcpp iokit/Kernel/IOSubMemoryDescriptor.cpp optional iokitcpp iokit/Kernel/IOPlatformExpert.cpp optional iokitcpp +iokit/Kernel/IOPlatformIO.cpp optional iokitcpp iokit/Kernel/IOCPU.cpp optional iokitcpp +iokit/Kernel/IOPlatformActions.cpp optional iokitcpp iokit/Kernel/IONVRAM.cpp optional iokitcpp iokit/Kernel/IODMAController.cpp optional iokitcpp iokit/Kernel/IOInterruptController.cpp optional iokitcpp +iokit/Kernel/PassthruInterruptController.cpp optional iokitcpp iokit/Kernel/IOUserClient.cpp optional iokitcpp diff --git a/iokit/conf/files.arm64 b/iokit/conf/files.arm64 index 7269e46f8..f731d43a3 100644 --- a/iokit/conf/files.arm64 +++ b/iokit/conf/files.arm64 @@ -2,3 +2,6 @@ iokit/Families/IONVRAM/IONVRAMController.cpp optional iokitcpp # Power Domains iokit/Kernel/IOPMrootDomain.cpp optional iokitcpp + +# SMP +iokit/Kernel/arm/AppleARMSMP.cpp optional iokitcpp diff --git a/iokit/conf/files.x86_64 b/iokit/conf/files.x86_64 index 457354b48..33e7bcb76 100644 --- a/iokit/conf/files.x86_64 +++ b/iokit/conf/files.x86_64 @@ -3,9 +3,6 @@ iokit/Kernel/IOSyncer.cpp optional iokitcpp -# Shared lock - -iokit/Kernel/x86_64/IOSharedLock.s standard iokit/Kernel/x86_64/IOAsmSupport.s standard # Power Domains diff --git a/libkdd/kcdata.h b/libkdd/kcdata.h index f00a3be8f..f2eaf624c 100644 --- a/libkdd/kcdata.h +++ b/libkdd/kcdata.h @@ -190,6 +190,52 @@ * kcdata_add_type_definition(kcdata_p, KCTYPE_SAMPLE_DISK_IO_STATS, "sample_disk_io_stats", * &disk_io_stats_def[0], sizeof(disk_io_stats_def)/sizeof(struct kcdata_subtype_descriptor)); * + * Feature description: Compression + * -------------------- + * In order to avoid keeping large amunt of memory reserved for a panic stackshot, kcdata has support + * for compressing the buffer in a streaming fashion. New data pushed to the kcdata buffer will be + * automatically compressed using an algorithm selected by the API user (currently, we only support + * pass-through and zlib, in the future we plan to add WKDM support, see: 57913859). + * + * To start using compression, call: + * kcdata_init_compress(kcdata_p, hdr_tag, memcpy_f, comp_type); + * where: + * `kcdata_p` is the kcdata buffer that will be used + * `hdr_tag` is the usual header tag denoting what type of kcdata buffer this will be + * `memcpy_f` a memcpy(3) function to use to copy into the buffer, optional. + * `compy_type` is the compression type, see KCDCT_ZLIB for an example. + * + * Once compression is initialized: + * (1) all self-describing APIs will automatically compress + * (2) you can now use the following APIs to compress data into the buffer: + * (None of the following will compress unless kcdata_init_compress() has been called) + * + * - kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data) + * Pushes the buffer of kctype @type at[@input_data, @input_data + @size] + * into the kcdata buffer @data, compressing if needed. + * + * - kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element, + * uint32_t size_of_element, uint32_t count, const void *input_data) + * Pushes the array found at @input_data, with element type @type_of_element, where + * each element is of size @size_of_element and there are @count elements into the kcdata buffer + * at @data. + * + * - kcdata_compression_window_open/close(kcdata_descriptor_t data) + * In case the data you are trying to push to the kcdata buffer @data is difficult to predict, + * you can open a "compression window". Between an open and a close, no compression will be done. + * Once you clsoe the window, the underlying compression algorithm will compress the data into the buffer + * and automatically rewind the current end marker of the kcdata buffer. + * There is an ASCII art in kern_cdata.c to aid the reader in understanding + * this. + * + * - kcdata_finish_compression(kcdata_descriptor_t data) + * Must be called at the end to flush any underlying buffers used by the compression algorithms. + * This function will also add some statistics about the compression to the buffer which helps with + * decompressing later. + * + * Once you are done with the kcdata buffer, call kcdata_deinit_compress to + * free any buffers that may have been allocated internal to the compression + * algorithm. */ @@ -401,6 +447,7 @@ struct kcdata_type_definition { #define KCDATA_TYPE_PID 0x36u /* int32_t */ #define KCDATA_TYPE_PROCNAME 0x37u /* char * */ #define KCDATA_TYPE_NESTED_KCDATA 0x38u /* nested kcdata buffer */ +#define KCDATA_TYPE_LIBRARY_AOTINFO 0x39u /* struct user64_dyld_aot_info */ #define KCDATA_TYPE_BUFFER_END 0xF19158EDu @@ -410,16 +457,18 @@ struct kcdata_type_definition { * numbers are byteswaps of each other */ -#define KCDATA_BUFFER_BEGIN_CRASHINFO 0xDEADF157u /* owner: corpses/task_corpse.h */ - /* type-range: 0x800 - 0x8ff */ -#define KCDATA_BUFFER_BEGIN_STACKSHOT 0x59a25807u /* owner: sys/stackshot.h */ - /* type-range: 0x900 - 0x93f */ -#define KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT 0xDE17A59Au /* owner: sys/stackshot.h */ - /* type-range: 0x940 - 0x9ff */ -#define KCDATA_BUFFER_BEGIN_OS_REASON 0x53A20900u /* owner: sys/reason.h */ - /* type-range: 0x1000-0x103f */ -#define KCDATA_BUFFER_BEGIN_XNUPOST_CONFIG 0x1e21c09fu /* owner: osfmk/tests/kernel_tests.c */ - /* type-range: 0x1040-0x105f */ +#define KCDATA_BUFFER_BEGIN_CRASHINFO 0xDEADF157u /* owner: corpses/task_corpse.h */ + /* type-range: 0x800 - 0x8ff */ +#define KCDATA_BUFFER_BEGIN_STACKSHOT 0x59a25807u /* owner: sys/stackshot.h */ + /* type-range: 0x900 - 0x93f */ +#define KCDATA_BUFFER_BEGIN_COMPRESSED 0x434f4d50u /* owner: sys/stackshot.h */ + /* type-range: 0x900 - 0x93f */ +#define KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT 0xDE17A59Au /* owner: sys/stackshot.h */ + /* type-range: 0x940 - 0x9ff */ +#define KCDATA_BUFFER_BEGIN_OS_REASON 0x53A20900u /* owner: sys/reason.h */ + /* type-range: 0x1000-0x103f */ +#define KCDATA_BUFFER_BEGIN_XNUPOST_CONFIG 0x1e21c09fu /* owner: osfmk/tests/kernel_tests.c */ + /* type-range: 0x1040-0x105f */ /* next type range number available 0x1060 */ /**************** definitions for XNUPOST *********************/ @@ -477,6 +526,12 @@ struct kcdata_type_definition { #define STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT 0x927u /* same as KCDATA_TYPE_LIBRARY_LOADINFO64 */ #define STACKSHOT_KCTYPE_THREAD_DISPATCH_QUEUE_LABEL 0x928u /* dispatch queue label */ #define STACKSHOT_KCTYPE_THREAD_TURNSTILEINFO 0x929u /* struct stackshot_thread_turnstileinfo */ +#define STACKSHOT_KCTYPE_TASK_CPU_ARCHITECTURE 0x92au /* struct stackshot_cpu_architecture */ +#define STACKSHOT_KCTYPE_LATENCY_INFO 0x92bu /* struct stackshot_latency_collection */ +#define STACKSHOT_KCTYPE_LATENCY_INFO_TASK 0x92cu /* struct stackshot_latency_task */ +#define STACKSHOT_KCTYPE_LATENCY_INFO_THREAD 0x92du /* struct stackshot_latency_thread */ +#define STACKSHOT_KCTYPE_LOADINFO64_TEXT_EXEC 0x92eu /* TEXT_EXEC load info -- same as KCDATA_TYPE_LIBRARY_LOADINFO64 */ +#define STACKSHOT_KCTYPE_AOTCACHE_LOADINFO 0x92fu /* struct dyld_aot_cache_uuid_info */ #define STACKSHOT_KCTYPE_TASK_DELTA_SNAPSHOT 0x940u /* task_delta_snapshot_v2 */ #define STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT 0x941u /* thread_delta_snapshot_v* */ @@ -508,6 +563,13 @@ struct dyld_uuid_info_64_v2 { uint64_t imageSlidBaseAddress; /* slid base address of image */ }; +struct dyld_aot_cache_uuid_info { + uint64_t x86SlidBaseAddress; /* slid base address of x86 shared cache */ + uuid_t x86UUID; /* UUID of x86 shared cache */ + uint64_t aotSlidBaseAddress; /* slide base address of aot cache */ + uuid_t aotUUID; /* UUID of aot shared cache */ +}; + struct user32_dyld_uuid_info { uint32_t imageLoadAddress; /* base address image is mapped into */ uuid_t imageUUID; /* UUID of image */ @@ -518,6 +580,15 @@ struct user64_dyld_uuid_info { uuid_t imageUUID; /* UUID of image */ }; +#define DYLD_AOT_IMAGE_KEY_SIZE 32 + +struct user64_dyld_aot_info { + uint64_t x86LoadAddress; + uint64_t aotLoadAddress; + uint64_t aotImageSize; + uint8_t aotImageKey[DYLD_AOT_IMAGE_KEY_SIZE]; +}; + enum task_snapshot_flags { /* k{User,Kernel}64_p (values 0x1 and 0x2) are defined in generic_snapshot_flags */ kTaskRsrcFlagged = 0x4, // In the EXC_RESOURCE danger zone? @@ -546,6 +617,7 @@ enum task_snapshot_flags { /* 0x2000000 unused */ kTaskIsDirtyTracked = 0x4000000, kTaskAllowIdleExit = 0x8000000, + kTaskIsTranslated = 0x10000000, }; enum thread_snapshot_flags { @@ -823,10 +895,12 @@ typedef struct stackshot_thread_turnstileinfo { uint64_t turnstile_context; /* Associated data (either thread id, or workq addr) */ uint8_t turnstile_priority; uint8_t number_of_hops; -#define STACKSHOT_TURNSTILE_STATUS_UNKNOWN (1 << 0) /* The final inheritor is unknown (bug?) */ -#define STACKSHOT_TURNSTILE_STATUS_LOCKED_WAITQ (1 << 1) /* A waitq was found to be locked */ -#define STACKSHOT_TURNSTILE_STATUS_WORKQUEUE (1 << 2) /* The final inheritor is a workqueue */ -#define STACKSHOT_TURNSTILE_STATUS_THREAD (1 << 3) /* The final inheritor is a thread */ +#define STACKSHOT_TURNSTILE_STATUS_UNKNOWN 0x01 /* The final inheritor is unknown (bug?) */ +#define STACKSHOT_TURNSTILE_STATUS_LOCKED_WAITQ 0x02 /* A waitq was found to be locked */ +#define STACKSHOT_TURNSTILE_STATUS_WORKQUEUE 0x04 /* The final inheritor is a workqueue */ +#define STACKSHOT_TURNSTILE_STATUS_THREAD 0x08 /* The final inheritor is a thread */ +#define STACKSHOT_TURNSTILE_STATUS_BLOCKED_ON_TASK 0x10 /* blocked on task, dind't find thread */ +#define STACKSHOT_TURNSTILE_STATUS_HELD_IPLOCK 0x20 /* the ip_lock was held */ uint64_t turnstile_flags; } __attribute__((packed)) thread_turnstileinfo_t; @@ -838,12 +912,52 @@ typedef struct stackshot_thread_turnstileinfo { #define STACKSHOT_WAITOWNER_THREQUESTED (UINT64_MAX - 6) /* workloop waiting for a new worker thread */ #define STACKSHOT_WAITOWNER_SUSPENDED (UINT64_MAX - 7) /* workloop is suspended */ +struct stackshot_cpu_architecture { + int32_t cputype; + int32_t cpusubtype; +} __attribute__((packed)); struct stack_snapshot_stacktop { uint64_t sp; uint8_t stack_contents[8]; }; +/* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */ +struct stackshot_latency_collection { + uint64_t latency_version; + uint64_t setup_latency; + uint64_t total_task_iteration_latency; + uint64_t total_terminated_task_iteration_latency; +} __attribute__((packed)); + +/* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */ +struct stackshot_latency_task { + uint64_t task_uniqueid; + uint64_t setup_latency; + uint64_t task_thread_count_loop_latency; + uint64_t task_thread_data_loop_latency; + uint64_t cur_tsnap_latency; + uint64_t pmap_latency; + uint64_t bsd_proc_ids_latency; + uint64_t misc_latency; + uint64_t misc2_latency; + uint64_t end_latency; +} __attribute__((packed)); + +/* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */ +struct stackshot_latency_thread { + uint64_t thread_id; + uint64_t cur_thsnap1_latency; + uint64_t dispatch_serial_latency; + uint64_t dispatch_label_latency; + uint64_t cur_thsnap2_latency; + uint64_t thread_name_latency; + uint64_t sur_times_latency; + uint64_t user_stack_latency; + uint64_t kernel_stack_latency; + uint64_t misc_latency; +} __attribute__((packed)); + /**************** definitions for crashinfo *********************/ @@ -913,8 +1027,15 @@ struct crashinfo_proc_uniqidentifierinfo { #define TASK_CRASHINFO_LEDGER_WIRED_MEM 0x82A /* uint64_t */ #define TASK_CRASHINFO_PROC_PERSONA_ID 0x82B /* uid_t */ #define TASK_CRASHINFO_MEMORY_LIMIT_INCREASE 0x82C /* uint32_t */ - - +#define TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT 0x82D /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED 0x82E /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT 0x82F /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED 0x830 /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT 0x831 /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED 0x832 /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT 0x833 /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED 0x834 /* uint64_t */ +#define TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY 0x835 /* int32_t */ #define TASK_CRASHINFO_END KCDATA_TYPE_BUFFER_END diff --git a/libkdd/kcdtypes.c b/libkdd/kcdtypes.c index cafecb641..cca45ba6c 100644 --- a/libkdd/kcdtypes.c +++ b/libkdd/kcdtypes.c @@ -850,6 +850,14 @@ kcdata_get_typedescription(unsigned type_id, uint8_t * buffer, uint32_t buffer_s break; } + case STACKSHOT_KCTYPE_TASK_CPU_ARCHITECTURE: { + i = 0; + _SUBTYPE(KC_ST_INT32, struct stackshot_cpu_architecture, cputype); + _SUBTYPE(KC_ST_INT32, struct stackshot_cpu_architecture, cpusubtype); + setup_type_definition(retval, type_id, i, "task_cpu_architecture"); + break; + } + default: retval = NULL; break; diff --git a/libkdd/kdd.xcodeproj/project.pbxproj b/libkdd/kdd.xcodeproj/project.pbxproj index 33575c38d..10b6503c1 100644 --- a/libkdd/kdd.xcodeproj/project.pbxproj +++ b/libkdd/kdd.xcodeproj/project.pbxproj @@ -800,6 +800,7 @@ SWIFT_OBJC_BRIDGING_HEADER = tests/kdd_bridge.h; SWIFT_OPTIMIZATION_LEVEL = "-Onone"; SWIFT_VERSION = 4.0; + USE_HEADERMAP = NO; }; name = Debug; }; @@ -817,6 +818,7 @@ SDKROOT = macosx; SWIFT_OBJC_BRIDGING_HEADER = tests/kdd_bridge.h; SWIFT_VERSION = 4.0; + USE_HEADERMAP = NO; }; name = Release; }; @@ -1024,6 +1026,7 @@ SDKROOT = macosx; SWIFT_OBJC_BRIDGING_HEADER = tests/kdd_bridge.h; SWIFT_VERSION = 4.0; + USE_HEADERMAP = NO; }; name = ReleaseHost; }; diff --git a/libkdd/tests/kdd_bridge.h b/libkdd/tests/kdd_bridge.h index fb2f48487..dda49def3 100644 --- a/libkdd/tests/kdd_bridge.h +++ b/libkdd/tests/kdd_bridge.h @@ -9,8 +9,8 @@ #ifndef kdd_bridge_h #define kdd_bridge_h -#include -#include +#include +#include #include #endif /* kdd_bridge_h */ diff --git a/libkern/OSKextLib.cpp b/libkern/OSKextLib.cpp index 5a887bc79..4a2cc377b 100644 --- a/libkern/OSKextLib.cpp +++ b/libkern/OSKextLib.cpp @@ -74,6 +74,12 @@ finish: /********************************************************************* *********************************************************************/ + +// FIXME: Implementation of this function is hidden from the static analyzer. +// The analyzer is worried about the lack of release and suggests +// refactoring the code into the typical non-owning container pattern. +// Feel free to remove the #ifndef and address the warning! +#ifndef __clang_analyzer__ OSReturn OSKextRetainKextWithLoadTag(uint32_t loadTag) { @@ -108,9 +114,16 @@ OSKextRetainKextWithLoadTag(uint32_t loadTag) finish: return result; } +#endif // __clang_analyzer__ /********************************************************************* *********************************************************************/ + +// FIXME: Implementation of this function is hidden from the static analyzer. +// The analyzer is worried about the double release and suggests +// refactoring the code into the typical non-owning container pattern. +// Feel free to remove the #ifndef and address the warning! +#ifndef __clang_analyzer__ OSReturn OSKextReleaseKextWithLoadTag(uint32_t loadTag) { @@ -146,6 +159,7 @@ OSKextReleaseKextWithLoadTag(uint32_t loadTag) finish: return result; } +#endif // __clang_analyzer__ /********************************************************************* * Not to be called by the kext being unloaded! @@ -270,7 +284,7 @@ kext_request( } if (isMkext) { -#ifdef SECURE_KERNEL +#if defined(SECURE_KERNEL) || !CONFIG_KXLD // xxx - something tells me if we have a secure kernel we don't even // xxx - want to log a message here. :-) *op_result = KERN_NOT_SUPPORTED; @@ -363,21 +377,32 @@ finish: * Gets the vm_map for the current kext *********************************************************************/ extern vm_offset_t segPRELINKTEXTB; +extern vm_offset_t segLINKB; extern unsigned long segSizePRELINKTEXT; -extern int kth_started; extern vm_map_t g_kext_map; vm_map_t kext_get_vm_map(kmod_info_t *info) { vm_map_t kext_map = NULL; - - /* Set the vm map */ - if ((info->address >= segPRELINKTEXTB) && - (info->address < (segPRELINKTEXTB + segSizePRELINKTEXT))) { - kext_map = kernel_map; + kc_format_t kcformat; + + if (PE_get_primary_kc_format(&kcformat) && kcformat == KCFormatFileset) { + /* Check if the kext is from the boot KC */ + assert(segLINKB >= (segPRELINKTEXTB + segSizePRELINKTEXT)); + if ((info->address >= segPRELINKTEXTB) && + (info->address < segLINKB)) { + kext_map = kernel_map; + } else { + kext_map = g_kext_map; + } } else { - kext_map = g_kext_map; + if ((info->address >= segPRELINKTEXTB) && + (info->address < (segPRELINKTEXTB + segSizePRELINKTEXT))) { + kext_map = kernel_map; + } else { + kext_map = g_kext_map; + } } return kext_map; diff --git a/libkern/OSKextVersion.c b/libkern/OSKextVersion.c index 5a761d48b..35f361d88 100644 --- a/libkern/OSKextVersion.c +++ b/libkern/OSKextVersion.c @@ -169,9 +169,11 @@ __OSKextVersionStageForString(const char ** string_p) /********************************************************************* *********************************************************************/ static const char * -__OSKextVersionStringForStage(OSKextVersionStage stage) +__OSKextVersionStringForStage(OSKextVersion stage) { switch (stage) { + default: + OS_FALLTHROUGH; case kOSKextVersionStageInvalid: return NULL; case kOSKextVersionStageDevelopment: return "d"; case kOSKextVersionStageAlpha: return "a"; @@ -192,7 +194,7 @@ OSKextParseVersionString(const char * versionString) OSKextVersion vers_major = 0; OSKextVersion vers_minor = 0; OSKextVersion vers_revision = 0; - OSKextVersion vers_stage = 0; + OSKextVersionStage vers_stage = 0; OSKextVersion vers_stage_level = 0; const char * current_char_p; diff --git a/libkern/c++/OSArray.cpp b/libkern/c++/OSArray.cpp index cd04323f3..92d41271d 100644 --- a/libkern/c++/OSArray.cpp +++ b/libkern/c++/OSArray.cpp @@ -29,15 +29,20 @@ /* IOArray.cpp converted to C++ by gvdl on Fri 1998-10-30 */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include -#include #include +#include +#include #include +#include #define super OSCollection -OSDefineMetaClassAndStructors(OSArray, OSCollection) +OSDefineMetaClassAndStructorsWithZone(OSArray, OSCollection, + (zone_create_flags_t) (ZC_CACHING | ZC_ZFREE_CLEARMEM)) OSMetaClassDefineReservedUnused(OSArray, 0); OSMetaClassDefineReservedUnused(OSArray, 1); OSMetaClassDefineReservedUnused(OSArray, 2); @@ -47,10 +52,6 @@ OSMetaClassDefineReservedUnused(OSArray, 5); OSMetaClassDefineReservedUnused(OSArray, 6); OSMetaClassDefineReservedUnused(OSArray, 7); - -#define EXT_CAST(obj) \ - reinterpret_cast(const_cast(obj)) - bool OSArray::initWithCapacity(unsigned int inCapacity) { @@ -61,12 +62,12 @@ OSArray::initWithCapacity(unsigned int inCapacity) } // integer overflow check - if (inCapacity > (UINT_MAX / sizeof(const OSMetaClassBase*))) { + if (inCapacity > (UINT_MAX / sizeof(*array))) { return false; } - size = sizeof(const OSMetaClassBase *) * inCapacity; - array = (const OSMetaClassBase **) kalloc_container(size); + size = sizeof(*array) * inCapacity; + array = (ArraySharedPtrType *)kalloc_container(size); if (!array) { return false; } @@ -75,7 +76,7 @@ OSArray::initWithCapacity(unsigned int inCapacity) capacity = inCapacity; capacityIncrement = (inCapacity)? inCapacity : 16; - bzero(array, size); + os::uninitialized_value_construct(array, array + capacity); OSCONTAINER_ACCUMSIZE(size); return true; @@ -107,8 +108,7 @@ OSArray::initWithObjects(const OSObject *objects[], return false; } - array[count++] = newObject; - newObject->taggedRetain(OSTypeID(OSCollection)); + array[count++].reset(newObject, OSRetain); } return true; @@ -126,43 +126,40 @@ OSArray::initWithArray(const OSArray *anArray, anArray->count, theCapacity); } -OSArray * +OSSharedPtr OSArray::withCapacity(unsigned int capacity) { - OSArray *me = new OSArray; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithCapacity(capacity)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSArray * +OSSharedPtr OSArray::withObjects(const OSObject *objects[], unsigned int count, unsigned int capacity) { - OSArray *me = new OSArray; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithObjects(objects, count, capacity)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSArray * +OSSharedPtr OSArray::withArray(const OSArray *array, unsigned int capacity) { - OSArray *me = new OSArray; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithArray(array, capacity)) { - me->release(); - return NULL; + return nullptr; } return me; @@ -177,8 +174,9 @@ OSArray::free() flushCollection(); if (array) { - kfree(array, sizeof(const OSMetaClassBase *) * capacity); - OSCONTAINER_ACCUMSIZE( -(sizeof(const OSMetaClassBase *) * capacity)); + os::destroy(array, array + capacity); + kfree(array, sizeof(*array) * capacity); + OSCONTAINER_ACCUMSIZE( -(sizeof(*array) * capacity)); } super::free(); @@ -211,8 +209,8 @@ OSArray::setCapacityIncrement(unsigned int increment) unsigned int OSArray::ensureCapacity(unsigned int newCapacity) { - const OSMetaClassBase **newArray; - unsigned int finalCapacity; + ArraySharedPtrType *newArray; + vm_size_t finalCapacity; vm_size_t oldSize, newSize; if (newCapacity <= capacity) { @@ -224,26 +222,32 @@ OSArray::ensureCapacity(unsigned int newCapacity) * capacityIncrement; // integer overflow check - if ((finalCapacity < newCapacity) || (finalCapacity > (UINT_MAX / sizeof(const OSMetaClassBase*)))) { + if (finalCapacity < newCapacity) { return capacity; } - newSize = sizeof(const OSMetaClassBase *) * finalCapacity; + newSize = sizeof(*newArray) * finalCapacity; - newArray = (const OSMetaClassBase **) kallocp_container(&newSize); + newArray = (decltype(newArray))kallocp_container(&newSize); if (newArray) { // use all of the actual allocation size - finalCapacity = newSize / sizeof(const OSMetaClassBase *); + finalCapacity = (newSize / sizeof(*newArray)); + if (finalCapacity > UINT_MAX) { + // failure, too large + kfree(newArray, newSize); + return capacity; + } - oldSize = sizeof(const OSMetaClassBase *) * capacity; + oldSize = sizeof(*array) * capacity; OSCONTAINER_ACCUMSIZE(((size_t)newSize) - ((size_t)oldSize)); - bcopy(array, newArray, oldSize); - bzero(&newArray[capacity], newSize - oldSize); + os::uninitialized_move(array, array + capacity, newArray); + os::uninitialized_value_construct(newArray + capacity, newArray + finalCapacity); + os::destroy(array, array + capacity); kfree(array, oldSize); array = newArray; - capacity = finalCapacity; + capacity = (unsigned int) finalCapacity; } return capacity; @@ -256,7 +260,7 @@ OSArray::flushCollection() haveUpdated(); for (i = 0; i < count; i++) { - array[i]->taggedRelease(OSTypeID(OSCollection)); + array[i].reset(); } count = 0; } @@ -267,6 +271,12 @@ OSArray::setObject(const OSMetaClassBase *anObject) return setObject(count, anObject); } +bool +OSArray::setObject(OSSharedPtr const& anObject) +{ + return setObject(count, anObject); +} + bool OSArray::setObject(unsigned int index, const OSMetaClassBase *anObject) { @@ -285,16 +295,21 @@ OSArray::setObject(unsigned int index, const OSMetaClassBase *anObject) haveUpdated(); if (index != count) { for (i = count; i > index; i--) { - array[i] = array[i - 1]; + array[i] = os::move(array[i - 1]); } } - array[index] = anObject; - anObject->taggedRetain(OSTypeID(OSCollection)); + array[index].reset(anObject, OSRetain); count++; return true; } +bool +OSArray::setObject(unsigned int index, OSSharedPtr const& anObject) +{ + return setObject(index, anObject.get()); +} + bool OSArray::merge(const OSArray * otherArray) { @@ -318,8 +333,7 @@ OSArray::merge(const OSArray * otherArray) for (unsigned int i = 0; i < otherCount; i++) { const OSMetaClassBase *newObject = otherArray->getObject(i); - array[count++] = newObject; - newObject->taggedRetain(OSTypeID(OSCollection)); + array[count++].reset(newObject, OSRetain); } return true; @@ -329,39 +343,38 @@ void OSArray:: replaceObject(unsigned int index, const OSMetaClassBase *anObject) { - const OSMetaClassBase *oldObject; - if ((index >= count) || !anObject) { return; } haveUpdated(); - oldObject = array[index]; - array[index] = anObject; - anObject->taggedRetain(OSTypeID(OSCollection)); - oldObject->taggedRelease(OSTypeID(OSCollection)); + array[index].reset(anObject, OSRetain); +} + +void +OSArray::replaceObject(unsigned int index, OSSharedPtr const& anObject) +{ + return replaceObject(index, anObject.get()); } void OSArray::removeObject(unsigned int index) { unsigned int i; - const OSMetaClassBase *oldObject; + ArraySharedPtrType oldObject; if (index >= count) { return; } haveUpdated(); - oldObject = array[index]; + oldObject = os::move(array[index]); count--; for (i = index; i < count; i++) { - array[i] = array[i + 1]; + array[i] = os::move(array[i + 1]); } - - oldObject->taggedRelease(OSTypeID(OSCollection)); } bool @@ -405,7 +418,7 @@ OSArray::getObject(unsigned int index) const if (index >= count) { return NULL; } else { - return (OSObject *) (const_cast(array[index])); + return static_cast(const_cast(array[index].get())); } } @@ -415,7 +428,7 @@ OSArray::getLastObject() const if (count == 0) { return NULL; } else { - return (OSObject *) (const_cast(array[count - 1])); + return static_cast(const_cast(array[count - 1].get())); } } @@ -454,7 +467,7 @@ OSArray::getNextObjectForIterator(void *inIterator, OSObject **ret) const unsigned int index = (*iteratorP)++; if (index < count) { - *ret = (OSObject *)(const_cast (array[index])); + *ret = static_cast(const_cast(array[index].get())); return true; } else { *ret = NULL; @@ -489,7 +502,7 @@ OSArray::setOptions(unsigned options, unsigned mask, void *) if ((old ^ options) & mask) { // Value changed need to recurse over all of the child collections for (unsigned i = 0; i < count; i++) { - OSCollection *coll = OSDynamicCast(OSCollection, array[i]); + OSCollection *coll = OSDynamicCast(OSCollection, array[i].get()); if (coll) { coll->setOptions(options, mask); } @@ -499,18 +512,19 @@ OSArray::setOptions(unsigned options, unsigned mask, void *) return old; } -OSCollection * +OSSharedPtr OSArray::copyCollection(OSDictionary *cycleDict) { - bool allocDict = !cycleDict; - OSCollection *ret = NULL; - OSArray *newArray = NULL; - - if (allocDict) { - cycleDict = OSDictionary::withCapacity(16); - if (!cycleDict) { - return NULL; + OSSharedPtr ourCycleDict; + OSSharedPtr ret; + OSSharedPtr newArray; + + if (!cycleDict) { + ourCycleDict = OSDictionary::withCapacity(16); + if (!ourCycleDict) { + return nullptr; } + cycleDict = ourCycleDict.get(); } do { @@ -526,37 +540,26 @@ OSArray::copyCollection(OSDictionary *cycleDict) } // Insert object into cycle Dictionary - cycleDict->setObject((const OSSymbol *) this, newArray); + cycleDict->setObject((const OSSymbol *) this, newArray.get()); for (unsigned int i = 0; i < count; i++) { OSCollection *coll = - OSDynamicCast(OSCollection, EXT_CAST(newArray->array[i])); + OSDynamicCast(OSCollection, static_cast( + const_cast( + newArray->array[i].get()))); if (coll) { - OSCollection *newColl = coll->copyCollection(cycleDict); + OSSharedPtr newColl = coll->copyCollection(cycleDict); if (!newColl) { - goto abortCopy; + return ret; } - newArray->replaceObject(i, newColl); - newColl->release(); + newArray->replaceObject(i, newColl.get()); } - ; } - ; - ret = newArray; - newArray = NULL; + ret = os::move(newArray); } while (false); -abortCopy: - if (newArray) { - newArray->release(); - } - - if (allocDict) { - cycleDict->release(); - } - return ret; } diff --git a/libkern/c++/OSCollection.cpp b/libkern/c++/OSCollection.cpp index 459824188..f7a360943 100644 --- a/libkern/c++/OSCollection.cpp +++ b/libkern/c++/OSCollection.cpp @@ -27,6 +27,8 @@ */ /* IOArray.h created by rsulack on Thu 11-Sep-1997 */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include @@ -39,8 +41,8 @@ OSDefineMetaClassAndAbstractStructors(OSCollection, OSObject) -OSMetaClassDefineReservedUsed(OSCollection, 0); -OSMetaClassDefineReservedUsed(OSCollection, 1); +OSMetaClassDefineReservedUsedX86(OSCollection, 0); +OSMetaClassDefineReservedUsedX86(OSCollection, 1); OSMetaClassDefineReservedUnused(OSCollection, 2); OSMetaClassDefineReservedUnused(OSCollection, 3); OSMetaClassDefineReservedUnused(OSCollection, 4); @@ -85,23 +87,19 @@ OSCollection::setOptions(unsigned options, unsigned mask, void *) return old; } -OSCollection * +OSSharedPtr OSCollection::copyCollection(OSDictionary *cycleDict) { if (cycleDict) { OSObject *obj = cycleDict->getObject((const OSSymbol *) this); - if (obj) { - obj->retain(); - } - return reinterpret_cast(obj); + return OSSharedPtr(reinterpret_cast(obj), OSRetain); } else { // If we are here it means that there is a collection subclass that // hasn't overridden the copyCollection method. In which case just // return a reference to ourselves. // Hopefully this collection will not be inserted into the registry - retain(); - return this; + return OSSharedPtr(this, OSRetain); } } diff --git a/libkern/c++/OSCollectionIterator.cpp b/libkern/c++/OSCollectionIterator.cpp index cc60901c8..dbbe7fc44 100644 --- a/libkern/c++/OSCollectionIterator.cpp +++ b/libkern/c++/OSCollectionIterator.cpp @@ -27,10 +27,13 @@ */ /* IOArray.h created by rsulack on Thu 11-Sep-1997 */ -#include -#include +#define IOKIT_ENABLE_SHARED_PTR + #include +#include +#include #include +#include #define super OSIterator @@ -43,8 +46,7 @@ OSCollectionIterator::initWithCollection(const OSCollection *inColl) return false; } - inColl->retain(); - collection = inColl; + collection.reset(inColl, OSRetain); collIterator = NULL; initialUpdateStamp = 0; valid = false; @@ -52,14 +54,13 @@ OSCollectionIterator::initWithCollection(const OSCollection *inColl) return true; } -OSCollectionIterator * +OSSharedPtr OSCollectionIterator::withCollection(const OSCollection *inColl) { - OSCollectionIterator *me = new OSCollectionIterator; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithCollection(inColl)) { - me->release(); - return NULL; + return nullptr; } return me; @@ -74,10 +75,7 @@ OSCollectionIterator::free() collIterator = NULL; } - if (collection) { - collection->release(); - collection = NULL; - } + collection.reset(); super::free(); } diff --git a/libkern/c++/OSData.cpp b/libkern/c++/OSData.cpp index 082711312..dd7836908 100644 --- a/libkern/c++/OSData.cpp +++ b/libkern/c++/OSData.cpp @@ -33,7 +33,7 @@ __BEGIN_DECLS #include __END_DECLS -#define LIBKERN_SMART_POINTERS +#define IOKIT_ENABLE_SHARED_PTR #include #include @@ -43,8 +43,8 @@ __END_DECLS #define super OSObject -OSDefineMetaClassAndStructors(OSData, OSObject) -OSMetaClassDefineReservedUsed(OSData, 0); // setDeallocFunction +OSDefineMetaClassAndStructorsWithZone(OSData, OSObject, ZC_ZFREE_CLEARMEM) +OSMetaClassDefineReservedUsedX86(OSData, 0); // setDeallocFunction OSMetaClassDefineReservedUnused(OSData, 1); OSMetaClassDefineReservedUnused(OSData, 2); OSMetaClassDefineReservedUnused(OSData, 3); @@ -58,12 +58,14 @@ OSMetaClassDefineReservedUnused(OSData, 7); bool OSData::initWithCapacity(unsigned int inCapacity) { + void *_data = NULL; + if (data) { OSCONTAINER_ACCUMSIZE(-((size_t)capacity)); if (!inCapacity || (capacity < inCapacity)) { // clean out old data's storage if it isn't big enough if (capacity < page_size) { - kfree(data, capacity); + kfree_data_container(data, capacity); } else { kmem_free(kernel_map, (vm_offset_t)data, capacity); } @@ -78,13 +80,14 @@ OSData::initWithCapacity(unsigned int inCapacity) if (inCapacity && !data) { if (inCapacity < page_size) { - data = (void *) kalloc_container(inCapacity); + data = (void *)kalloc_data_container(inCapacity, Z_WAITOK); } else { kern_return_t kr; if (round_page_overflow(inCapacity, &inCapacity)) { kr = KERN_RESOURCE_SHORTAGE; } else { - kr = kmem_alloc(kernel_map, (vm_offset_t *)&data, inCapacity, IOMemoryTag(kernel_map)); + kr = kmem_alloc(kernel_map, (vm_offset_t *)&_data, inCapacity, IOMemoryTag(kernel_map)); + data = _data; } if (KERN_SUCCESS != kr) { data = NULL; @@ -155,10 +158,10 @@ OSData::initWithData(const OSData *inData, } } -OSDataPtr +OSSharedPtr OSData::withCapacity(unsigned int inCapacity) { - OSDataPtr me = OSDataPtr::alloc(); + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithCapacity(inCapacity)) { return nullptr; @@ -167,10 +170,10 @@ OSData::withCapacity(unsigned int inCapacity) return me; } -OSDataPtr +OSSharedPtr OSData::withBytes(const void *bytes, unsigned int inLength) { - OSDataPtr me = OSDataPtr::alloc(); + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithBytes(bytes, inLength)) { return nullptr; @@ -178,10 +181,10 @@ OSData::withBytes(const void *bytes, unsigned int inLength) return me; } -OSDataPtr +OSSharedPtr OSData::withBytesNoCopy(void *bytes, unsigned int inLength) { - OSDataPtr me = OSDataPtr::alloc(); + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithBytesNoCopy(bytes, inLength)) { return nullptr; @@ -190,10 +193,10 @@ OSData::withBytesNoCopy(void *bytes, unsigned int inLength) return me; } -OSDataPtr +OSSharedPtr OSData::withData(const OSData *inData) { - OSDataPtr me = OSDataPtr::alloc(); + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithData(inData)) { return nullptr; @@ -202,11 +205,11 @@ OSData::withData(const OSData *inData) return me; } -OSDataPtr +OSSharedPtr OSData::withData(const OSData *inData, unsigned int start, unsigned int inLength) { - OSDataPtr me = OSDataPtr::alloc(); + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithData(inData, start, inLength)) { return nullptr; @@ -220,7 +223,7 @@ OSData::free() { if ((capacity != EXTERNAL) && data && capacity) { if (capacity < page_size) { - kfree(data, capacity); + kfree_data_container(data, capacity); } else { kmem_free(kernel_map, (vm_offset_t)data, capacity); } @@ -306,7 +309,7 @@ OSData::ensureCapacity(unsigned int newCapacity) newData = NULL; } } else { - newData = (unsigned char *) kalloc_container(finalCapacity); + newData = (unsigned char *)kalloc_data_container(finalCapacity, Z_WAITOK); } if (newData) { @@ -316,7 +319,7 @@ OSData::ensureCapacity(unsigned int newCapacity) } if (data) { if (capacity < page_size) { - kfree(data, capacity); + kfree_data_container(data, capacity); } else { kmem_free(kernel_map, (vm_offset_t)data, capacity); } diff --git a/libkern/c++/OSDictionary.cpp b/libkern/c++/OSDictionary.cpp index c928cdd15..c88f0ecfd 100644 --- a/libkern/c++/OSDictionary.cpp +++ b/libkern/c++/OSDictionary.cpp @@ -29,17 +29,21 @@ /* OSDictionary.cpp converted to C++ by gvdl on Fri 1998-10-30 */ /* OSDictionary.cpp rewritten by gvdl on Fri 1998-10-30 */ +#define IOKIT_ENABLE_SHARED_PTR -#include #include -#include -#include -#include #include +#include +#include +#include +#include +#include +#include #define super OSCollection -OSDefineMetaClassAndStructors(OSDictionary, OSCollection) +OSDefineMetaClassAndStructorsWithZone(OSDictionary, OSCollection, + (zone_create_flags_t) (ZC_CACHING | ZC_ZFREE_CLEARMEM)) OSMetaClassDefineReservedUnused(OSDictionary, 0); OSMetaClassDefineReservedUnused(OSDictionary, 1); OSMetaClassDefineReservedUnused(OSDictionary, 2); @@ -62,11 +66,11 @@ OSDictionary::dictEntry::compare(const void *_e1, const void *_e2) const OSDictionary::dictEntry *e1 = (const OSDictionary::dictEntry *)_e1; const OSDictionary::dictEntry *e2 = (const OSDictionary::dictEntry *)_e2; - if ((uintptr_t)e1->key == (uintptr_t)e2->key) { + if ((uintptr_t)e1->key.get() == (uintptr_t)e2->key.get()) { return 0; } - return (uintptr_t)e1->key > (uintptr_t)e2->key ? 1 : -1; + return (uintptr_t)e1->key.get() > (uintptr_t)e2->key.get() ? 1 : -1; } void @@ -95,7 +99,7 @@ OSDictionary::initWithCapacity(unsigned int inCapacity) return false; } - bzero(dictionary, size); + os::uninitialized_value_construct(dictionary, dictionary + inCapacity); OSCONTAINER_ACCUMSIZE(size); count = 0; @@ -165,19 +169,16 @@ OSDictionary::initWithObjects(const OSObject *objects[], } for (unsigned int i = 0; i < theCount; i++) { - const OSSymbol *key = OSSymbol::withString(*keys++); + OSSharedPtr key = OSSymbol::withString(*keys++); const OSMetaClassBase *newObject = *objects++; if (!key) { return false; } - if (!newObject || !setObject(key, newObject)) { - key->release(); + if (!newObject || !setObject(key.get(), newObject)) { return false; } - - key->release(); } return true; @@ -208,10 +209,9 @@ OSDictionary::initWithDictionary(const OSDictionary *dict, } count = dict->count; - bcopy(dict->dictionary, dictionary, count * sizeof(dictEntry)); for (unsigned int i = 0; i < count; i++) { - dictionary[i].key->taggedRetain(OSTypeID(OSCollection)); - dictionary[i].value->taggedRetain(OSTypeID(OSCollection)); + dictionary[i].key = dict->dictionary[i].key; + dictionary[i].value = dict->dictionary[i].value; } if ((kSort & fOptions) && !(kSort & dict->fOptions)) { @@ -221,60 +221,56 @@ OSDictionary::initWithDictionary(const OSDictionary *dict, return true; } -OSDictionary * +OSSharedPtr OSDictionary::withCapacity(unsigned int capacity) { - OSDictionary *me = new OSDictionary; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithCapacity(capacity)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSDictionary * +OSSharedPtr OSDictionary::withObjects(const OSObject *objects[], const OSSymbol *keys[], unsigned int count, unsigned int capacity) { - OSDictionary *me = new OSDictionary; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithObjects(objects, keys, count, capacity)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSDictionary * +OSSharedPtr OSDictionary::withObjects(const OSObject *objects[], const OSString *keys[], unsigned int count, unsigned int capacity) { - OSDictionary *me = new OSDictionary; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithObjects(objects, keys, count, capacity)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSDictionary * +OSSharedPtr OSDictionary::withDictionary(const OSDictionary *dict, unsigned int capacity) { - OSDictionary *me = new OSDictionary; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithDictionary(dict, capacity)) { - me->release(); - return NULL; + return nullptr; } return me; @@ -322,7 +318,7 @@ unsigned int OSDictionary::ensureCapacity(unsigned int newCapacity) { dictEntry *newDict; - unsigned int finalCapacity; + vm_size_t finalCapacity; vm_size_t oldSize, newSize; if (newCapacity <= capacity) { @@ -334,7 +330,7 @@ OSDictionary::ensureCapacity(unsigned int newCapacity) * capacityIncrement; // integer overflow check - if (finalCapacity < newCapacity || (finalCapacity > (UINT_MAX / sizeof(dictEntry)))) { + if (finalCapacity < newCapacity) { return capacity; } @@ -343,18 +339,24 @@ OSDictionary::ensureCapacity(unsigned int newCapacity) newDict = (dictEntry *) kallocp_container(&newSize); if (newDict) { // use all of the actual allocation size - finalCapacity = newSize / sizeof(dictEntry); + finalCapacity = (newSize / sizeof(dictEntry)); + if (finalCapacity > UINT_MAX) { + // failure, too large + kfree(newDict, newSize); + return capacity; + } oldSize = sizeof(dictEntry) * capacity; - bcopy(dictionary, newDict, oldSize); - bzero(&newDict[capacity], newSize - oldSize); + os::uninitialized_move(dictionary, dictionary + capacity, newDict); + os::uninitialized_value_construct(newDict + capacity, newDict + finalCapacity); + os::destroy(dictionary, dictionary + capacity); OSCONTAINER_ACCUMSIZE(((size_t)newSize) - ((size_t)oldSize)); kfree(dictionary, oldSize); dictionary = newDict; - capacity = finalCapacity; + capacity = (unsigned int) finalCapacity; } return capacity; @@ -401,14 +403,11 @@ setObject(const OSSymbol *aKey, const OSMetaClassBase *anObject, bool onlyAdd) return false; } - const OSMetaClassBase *oldObject = dictionary[i].value; + OSTaggedSharedPtr oldObject; haveUpdated(); - anObject->taggedRetain(OSTypeID(OSCollection)); - dictionary[i].value = anObject; - - oldObject->taggedRelease(OSTypeID(OSCollection)); + dictionary[i].value.reset(anObject, OSRetain); return true; } @@ -419,12 +418,11 @@ setObject(const OSSymbol *aKey, const OSMetaClassBase *anObject, bool onlyAdd) haveUpdated(); - bcopy(&dictionary[i], &dictionary[i + 1], (count - i) * sizeof(dictionary[0])); + new (&dictionary[count]) dictEntry(); + os::move_backward(&dictionary[i], &dictionary[count], &dictionary[count + 1]); - aKey->taggedRetain(OSTypeID(OSCollection)); - anObject->taggedRetain(OSTypeID(OSCollection)); - dictionary[i].key = aKey; - dictionary[i].value = anObject; + dictionary[i].key.reset(aKey, OSRetain); + dictionary[i].value.reset(anObject, OSRetain); count++; return true; @@ -437,6 +435,24 @@ setObject(const OSSymbol *aKey, const OSMetaClassBase *anObject) return setObject(aKey, anObject, false); } +bool +OSDictionary::setObject(OSSharedPtr const& aKey, OSSharedPtr const& anObject) +{ + return setObject(aKey.get(), anObject.get()); +} + +bool +OSDictionary::setObject(const OSString* aKey, OSSharedPtr const& anObject) +{ + return setObject(aKey, anObject.get()); +} + +bool +OSDictionary::setObject(const char* aKey, OSSharedPtr const& anObject) +{ + return setObject(aKey, anObject.get()); +} + void OSDictionary::removeObject(const OSSymbol *aKey) { @@ -480,7 +496,7 @@ bool OSDictionary::merge(const OSDictionary *srcDict) { const OSSymbol * sym; - OSCollectionIterator * iter; + OSSharedPtr iter; if (!OSDynamicCast(OSDictionary, srcDict)) { return false; @@ -496,11 +512,9 @@ OSDictionary::merge(const OSDictionary *srcDict) obj = srcDict->getObject(sym); if (!setObject(sym, obj)) { - iter->release(); return false; } } - iter->release(); return true; } @@ -525,10 +539,10 @@ OSDictionary::getObject(const OSSymbol *aKey) const while (l < r) { i = (l + r) / 2; if (aKey == dictionary[i].key) { - return const_cast ((const OSObject *)dictionary[i].value); + return const_cast ((const OSObject *)dictionary[i].value.get()); } - if ((uintptr_t)aKey < (uintptr_t)dictionary[i].key) { + if ((uintptr_t)aKey < (uintptr_t)dictionary[i].key.get()) { r = i; } else { l = i + 1; @@ -537,7 +551,7 @@ OSDictionary::getObject(const OSSymbol *aKey) const } else { for (i = l; i < r; i++) { if (aKey == dictionary[i].key) { - return const_cast ((const OSObject *)dictionary[i].value); + return const_cast ((const OSObject *)dictionary[i].value.get()); } } } @@ -548,30 +562,27 @@ OSDictionary::getObject(const OSSymbol *aKey) const // Wrapper macros #define OBJECT_WRAP_1(cmd, k) \ { \ - const OSSymbol *tmpKey = k; \ + OSSharedPtr tmpKey = k; \ OSObject *retObj = NULL; \ if (tmpKey) { \ - retObj = cmd(tmpKey); \ - tmpKey->release(); \ + retObj = cmd(tmpKey.get()); \ } \ return retObj; \ } #define OBJECT_WRAP_2(cmd, k, o) \ { \ - const OSSymbol *tmpKey = k; \ - bool ret = cmd(tmpKey, o); \ + OSSharedPtr tmpKey = k; \ + bool ret = cmd(tmpKey.get(), o); \ \ - tmpKey->release(); \ return ret; \ } #define OBJECT_WRAP_3(cmd, k) \ { \ - const OSSymbol *tmpKey = k; \ + OSSharedPtr tmpKey = k; \ if (tmpKey) { \ - cmd(tmpKey); \ - tmpKey->release(); \ + cmd(tmpKey.get()); \ } \ } @@ -598,7 +609,7 @@ OBJECT_WRAP_3(removeObject, OSSymbol::existingSymbolForCString(aKey)) bool OSDictionary::isEqualTo(const OSDictionary *srcDict, const OSCollection *keys) const { - OSCollectionIterator * iter; + OSSharedPtr iter; unsigned int keysCount; const OSMetaClassBase * obj1; const OSMetaClassBase * obj2; @@ -633,7 +644,6 @@ OSDictionary::isEqualTo(const OSDictionary *srcDict, const OSCollection *keys) c break; } } - iter->release(); return ret; } @@ -653,7 +663,7 @@ OSDictionary::isEqualTo(const OSDictionary *srcDict) const } for (i = 0; i < count; i++) { - obj = srcDict->getObject(dictionary[i].key); + obj = srcDict->getObject(dictionary[i].key.get()); if (!obj) { return false; } @@ -701,7 +711,7 @@ OSDictionary::getNextObjectForIterator(void *inIterator, OSObject **ret) const unsigned int index = (*iteratorP)++; if (index < count) { - *ret = (OSObject *) dictionary[index].key; + *ret = const_cast(dictionary[index].key.get()); } else { *ret = NULL; } @@ -721,7 +731,7 @@ OSDictionary::serialize(OSSerialize *s) const } for (unsigned i = 0; i < count; i++) { - const OSSymbol *key = dictionary[i].key; + const OSSymbol *key = dictionary[i].key.get(); // due the nature of the XML syntax, this must be a symbol if (!key->metaCast("OSSymbol")) { @@ -770,7 +780,7 @@ OSDictionary::setOptions(unsigned options, unsigned mask, void *) if ((old ^ options) & mask) { // Value changed need to recurse over all of the child collections for (unsigned i = 0; i < count; i++) { - OSCollection *v = OSDynamicCast(OSCollection, dictionary[i].value); + OSCollection *v = OSDynamicCast(OSCollection, dictionary[i].value.get()); if (v) { v->setOptions(options, mask); } @@ -784,18 +794,19 @@ OSDictionary::setOptions(unsigned options, unsigned mask, void *) return old; } -OSCollection * +OSSharedPtr OSDictionary::copyCollection(OSDictionary *cycleDict) { - bool allocDict = !cycleDict; - OSCollection *ret = NULL; - OSDictionary *newDict = NULL; + OSSharedPtr ourCycleDict; + OSSharedPtr ret; + OSSharedPtr newDict; - if (allocDict) { - cycleDict = OSDictionary::withCapacity(16); - if (!cycleDict) { - return NULL; + if (!cycleDict) { + ourCycleDict = OSDictionary::withCapacity(16); + if (!ourCycleDict) { + return nullptr; } + cycleDict = ourCycleDict.get(); } do { @@ -811,58 +822,41 @@ OSDictionary::copyCollection(OSDictionary *cycleDict) } // Insert object into cycle Dictionary - cycleDict->setObject((const OSSymbol *) this, newDict); + cycleDict->setObject((const OSSymbol *) this, newDict.get()); for (unsigned int i = 0; i < count; i++) { - const OSMetaClassBase *obj = dictionary[i].value; - OSCollection *coll = OSDynamicCast(OSCollection, EXT_CAST(obj)); + const OSMetaClassBase *obj = dictionary[i].value.get(); + OSTaggedSharedPtr coll(OSDynamicCast(OSCollection, EXT_CAST(obj)), OSNoRetain); if (coll) { - OSCollection *newColl = coll->copyCollection(cycleDict); + OSSharedPtr newColl = coll->copyCollection(cycleDict); if (!newColl) { - goto abortCopy; + return ret; } - - newDict->dictionary[i].value = newColl; - - coll->taggedRelease(OSTypeID(OSCollection)); - newColl->taggedRetain(OSTypeID(OSCollection)); - newColl->release(); + newDict->dictionary[i].value.detach(); + newDict->dictionary[i].value.reset(newColl.get(), OSRetain); } - ; } - ret = newDict; - newDict = NULL; + ret = os::move(newDict); } while (false); -abortCopy: - if (newDict) { - newDict->release(); - } - - if (allocDict) { - cycleDict->release(); - } - return ret; } -OSArray * +OSSharedPtr OSDictionary::copyKeys(void) { - OSArray * array; + OSSharedPtr array; array = OSArray::withCapacity(count); if (!array) { - return NULL; + return nullptr; } for (unsigned int i = 0; i < count; i++) { - if (!array->setObject(i, dictionary[i].key)) { - array->release(); - array = NULL; - break; + if (!array->setObject(i, dictionary[i].key.get())) { + return nullptr; } } return array; @@ -877,7 +871,7 @@ OSDictionary::iterateObjects(void * refcon, bool (*callback)(void * refcon, cons initialUpdateStamp = updateStamp; done = false; for (unsigned int i = 0; i < count; i++) { - done = callback(refcon, dictionary[i].key, EXT_CAST(dictionary[i].value)); + done = callback(refcon, dictionary[i].key.get(), EXT_CAST(dictionary[i].value.get())); if (done) { break; } diff --git a/libkern/c++/OSKext.cpp b/libkern/c++/OSKext.cpp index 6de614778..aa1c83d5b 100644 --- a/libkern/c++/OSKext.cpp +++ b/libkern/c++/OSKext.cpp @@ -26,6 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + extern "C" { #include #include @@ -35,6 +37,7 @@ extern "C" { #include #include #include +#include #include #include #include @@ -49,6 +52,7 @@ extern "C" { #include #include #include +#include #include @@ -56,8 +60,16 @@ extern "C" { #include #include #endif + +#if CONFIG_CSR +#include +#include +#include +#endif /* CONFIG_CSR */ }; +#include + #include #include #include @@ -66,9 +78,11 @@ extern "C" { #include #include #include +#include #include #include +#include #include @@ -83,17 +97,41 @@ extern void IODTFreeLoaderInfo(const char * key, void * infoAddr, int infoSize); extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); /* osfmk/machine/pmap.h */ extern int dtrace_keep_kernel_symbols(void); + +#if defined(__x86_64__) || defined(__i386__) +extern kern_return_t i386_slide_individual_kext(kernel_mach_header_t *mh, uintptr_t slide); +extern kern_return_t i386_slide_kext_collection_mh_addrs(kernel_mach_header_t *mh, uintptr_t slide, bool adjust_mach_headers); +extern void *ubc_getobject_from_filename(const char *filename, struct vnode **, off_t *file_size); +static void *allocate_kcfileset_map_entry_list(void); +static void add_kcfileset_map_entry(void *map_entry_list, vm_map_offset_t start, vm_map_offset_t size); +static void deallocate_kcfileset_map_entry_list_and_unmap_entries(void *map_entry_list, boolean_t unmap_entries, bool pageable); +int vnode_put(struct vnode *vp); +kern_return_t vm_map_kcfileset_segment(vm_map_offset_t *start, vm_map_offset_t size, + void *control, vm_object_offset_t fileoffset, vm_prot_t max_prot); +kern_return_t vm_unmap_kcfileset_segment(vm_map_offset_t *start, vm_map_offset_t size); +void * ubc_getobject(struct vnode *vp, __unused int flags); +#endif //(__x86_64__) || defined(__i386__) } extern unsigned long gVirtBase; extern unsigned long gPhysBase; -#if CONFIG_EMBEDDED -extern vm_offset_t segLOWESTTEXT; -#endif /* CONFIG_EMBEDDED */ +extern vm_map_t g_kext_map; + +bool pageableKCloaded = false; +bool auxKCloaded = false; +bool resetAuxKCSegmentOnUnload = false; + +extern boolean_t pageablekc_uuid_valid; +extern uuid_t pageablekc_uuid; +extern uuid_string_t pageablekc_uuid_string; + +extern boolean_t auxkc_uuid_valid; +extern uuid_t auxkc_uuid; +extern uuid_string_t auxkc_uuid_string; static OSReturn _OSKextCreateRequest( const char * predicate, - OSDictionary ** requestP); + OSSharedPtr & requestP); static OSString * _OSKextGetRequestPredicate(OSDictionary * requestDict); static OSObject * _OSKextGetRequestArgument( OSDictionary * requestDict, @@ -103,12 +141,15 @@ static bool _OSKextSetRequestArgument( const char * argName, OSObject * value); static void * _OSKextExtractPointer(OSData * wrapper); +static OSKextRequestResourceCallback _OSKextExtractCallbackPointer(OSData * wrapper); static OSReturn _OSDictionarySetCStringValue( OSDictionary * dict, const char * key, const char * value); -static bool _OSKextInPrelinkRebuildWindow(void); static bool _OSKextInUnloadedPrelinkedKexts(const OSSymbol * theBundleID); +#if CONFIG_KXLD +static bool _OSKextInPrelinkRebuildWindow(void); +#endif // We really should add containsObject() & containsCString to OSCollection & subclasses. // So few pad slots, though.... @@ -243,6 +284,8 @@ typedef struct MkextEntryRef { static bool sPrelinkBoot = false; static bool sSafeBoot = false; static bool sKeepSymbols = false; +static bool sPanicOnKCMismatch = false; +static bool sOSKextWasResetAfterUserspaceReboot = false; /********************************************************************* * sKextLock is the principal lock for OSKext, and guards all static @@ -265,27 +308,30 @@ static bool sKeepSymbols = false; */ static IORecursiveLock * sKextLock = NULL; -static OSDictionary * sKextsByID = NULL; -static OSDictionary * sExcludeListByID = NULL; -static OSKextVersion sExcludeListVersion = 0; -static OSArray * sLoadedKexts = NULL; -static OSArray * sUnloadedPrelinkedKexts = NULL; -static OSArray * sLoadedDriverKitKexts = NULL; +static OSSharedPtr sKextsByID; +static OSSharedPtr sExcludeListByID; +static OSKextVersion sExcludeListVersion = 0; +static OSSharedPtr sLoadedKexts; +static OSSharedPtr sNonLoadableKextsByID; +static OSSharedPtr sUnloadedPrelinkedKexts; +static OSSharedPtr sLoadedDriverKitKexts; -// Requests to kextd waiting to be picked up. -static OSArray * sKernelRequests = NULL; +// Requests to the IOKit daemon waiting to be picked up. +static OSSharedPtr sKernelRequests; // Identifier of kext load requests in sKernelRequests -static OSSet * sPostedKextLoadIdentifiers = NULL; -static OSArray * sRequestCallbackRecords = NULL; +static OSSharedPtr sPostedKextLoadIdentifiers; +static OSSharedPtr sRequestCallbackRecords; // Identifiers of all kexts ever requested in kernel; used for prelinked kernel -static OSSet * sAllKextLoadIdentifiers = NULL; +static OSSharedPtr sAllKextLoadIdentifiers; +#if CONFIG_KXLD static KXLDContext * sKxldContext = NULL; +#endif static uint32_t sNextLoadTag = 0; static uint32_t sNextRequestTag = 0; static bool sUserLoadsActive = false; -static bool sKextdActive = false; +static bool sIOKitDaemonActive = false; static bool sDeferredLoadSucceeded = false; static bool sConsiderUnloadsExecuted = false; @@ -387,7 +433,7 @@ static uint32_t gBuiltinKmodsCount; static kernel_section_t * gBuiltinKmodsSectionInfo; static kernel_section_t * gBuiltinKmodsSectionStart; -static const OSSymbol * gIOSurfaceIdentifier; +const OSSymbol * gIOSurfaceIdentifier; vm_tag_t gIOSurfaceTag; /********************************************************************* @@ -410,7 +456,9 @@ static bool sConsiderUnloadsPending = false; static unsigned int sConsiderUnloadDelay = 60; // seconds static thread_call_t sUnloadCallout = NULL; +#if CONFIG_KXLD static thread_call_t sDestroyLinkContextThread = NULL; // one-shot, one-at-a-time thread +#endif // CONFIG_KXLD static bool sSystemSleep = false; // true when system going to sleep static AbsoluteTime sLastWakeTime; // last time we woke up @@ -436,25 +484,25 @@ OSKextLoadedKextSummaryHeader * gLoadedKextSummaries __attribute__((used)) = NUL uint64_t gLoadedKextSummariesTimestamp __attribute__((used)) = 0; static size_t sLoadedKextSummariesAllocSize = 0; -static OSKextActiveAccount * sKextAccounts; +static OSKextActiveAccount * sKextAccounts; static uint32_t sKextAccountsCount; }; /********************************************************************* * sKextLoggingLock protects the logging variables declared immediately below. **********/ -static IOLock * sKextLoggingLock = NULL; +static IOLock * sKextLoggingLock = NULL; -static const OSKextLogSpec kDefaultKernelLogFilter = kOSKextLogBasicLevel | +static const OSKextLogSpec kDefaultKernelLogFilter = kOSKextLogBasicLevel | kOSKextLogVerboseFlagsMask; -static OSKextLogSpec sKernelLogFilter = kDefaultKernelLogFilter; -static bool sBootArgLogFilterFound = false; +static OSKextLogSpec sKernelLogFilter = kDefaultKernelLogFilter; +static bool sBootArgLogFilterFound = false; SYSCTL_UINT(_debug, OID_AUTO, kextlog, CTLFLAG_RW | CTLFLAG_LOCKED, &sKernelLogFilter, 0, "kernel kext logging"); -static OSKextLogSpec sUserSpaceKextLogFilter = kOSKextLogSilentFilter; -static OSArray * sUserSpaceLogSpecArray = NULL; -static OSArray * sUserSpaceLogMessageArray = NULL; +static OSKextLogSpec sUserSpaceKextLogFilter = kOSKextLogSilentFilter; +static OSSharedPtr sUserSpaceLogSpecArray; +static OSSharedPtr sUserSpaceLogMessageArray; /********* * End scope for sKextInnerLock-protected variables. @@ -514,6 +562,7 @@ osdata_kext_free(void * ptr, unsigned int length) #if PRAGMA_MARK #pragma mark KXLD Allocation Callback #endif +#if CONFIG_KXLD /********************************************************************* * KXLD Allocation Callback *********************************************************************/ @@ -527,8 +576,19 @@ kern_allocate( kern_return_t mach_result = KERN_FAILURE; bool success = false; OSKext * theKext = (OSKext *)user_data; - u_long roundSize = round_page(size); - OSData * linkBuffer = NULL;// must release + unsigned int roundSize = 0; + OSSharedPtr linkBuffer; + + if (round_page(size) > UINT_MAX) { + OSKextLog(theKext, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "%s: Requested memory size is greater than UINT_MAX.", + theKext->getIdentifierCString()); + goto finish; + } + + roundSize = (unsigned int)round_page(size); mach_result = kext_alloc(&result, roundSize, /* fixed */ FALSE); if (mach_result != KERN_SUCCESS) { @@ -559,7 +619,7 @@ kern_allocate( theKext->getIdentifierCString(), (void *)result, (unsigned long)roundSize); - theKext->setLinkedExecutable(linkBuffer); + theKext->setLinkedExecutable(linkBuffer.get()); *flags = kKxldAllocateWritable; success = true; @@ -570,8 +630,6 @@ finish: result = 0; } - OSSafeReleaseNULL(linkBuffer); - return (kxld_addr_t)result; } @@ -620,6 +678,7 @@ kxld_log_callback( OSKextVLog(theKext, logSpec, format, argList); } +#endif // CONFIG_KXLD #if PRAGMA_MARK #pragma mark IOStatistics defines @@ -665,23 +724,24 @@ do { \ #define super OSObject OSDefineMetaClassAndStructors(OSKext, OSObject) +OSDefineMetaClassAndStructors(OSKextSavedMutableSegment, OSObject); + /********************************************************************* *********************************************************************/ /* static */ void OSKext::initialize(void) { - OSData * kernelExecutable = NULL;// do not release + OSSharedPtr kernelExecutable = NULL;// do not release u_char * kernelStart = NULL;// do not free size_t kernelLength = 0; - OSString * scratchString = NULL;// must release IORegistryEntry * registryRoot = NULL;// do not release - OSNumber * kernelCPUType = NULL;// must release - OSNumber * kernelCPUSubtype = NULL;// must release + OSSharedPtr kernelCPUType; + OSSharedPtr kernelCPUSubtype; OSKextLogSpec bootLogFilter = kOSKextLogSilentFilter; bool setResult = false; uint64_t * timestamp = NULL; - char bootArgBuffer[16];// for PE_parse_boot_argn w/strings + __unused char bootArgBuffer[16];// for PE_parse_boot_argn w/strings /* This must be the first thing allocated. Everything else grabs this lock. */ @@ -719,8 +779,17 @@ OSKext::initialize(void) (unsigned)sKernelLogFilter); } +#if !defined(__arm__) && !defined(__arm64__) + /* + * On our ARM targets, the kernelcache/boot kernel collection contains + * the set of kexts required to boot, as specified by KCB. Safeboot is + * either unsupported, or is supported by the bootloader only loading + * the boot kernel collection; as a result OSKext has no role to play + * in safeboot policy on ARM. + */ sSafeBoot = PE_parse_boot_argn("-x", bootArgBuffer, sizeof(bootArgBuffer)) ? true : false; +#endif /* defined(__arm__) && defined(__arm64__) */ if (sSafeBoot) { OSKextLog(/* kext */ NULL, @@ -741,6 +810,15 @@ OSKext::initialize(void) sKeepSymbols = true; #endif + /* + * Should we panic when the SystemKC is not linked against the + * BootKC that was loaded by the booter? By default: yes, if the + * "-nokcmismatchpanic" boot-arg is passed, then we _don't_ panic + * on mis-match and instead just print an error and continue. + */ + sPanicOnKCMismatch = PE_parse_boot_argn("-nokcmismatchpanic", bootArgBuffer, + sizeof(bootArgBuffer)) ? false : true; + /* Set up an OSKext instance to represent the kernel itself. */ sKernelKext = new OSKext; @@ -748,8 +826,9 @@ OSKext::initialize(void) kernelStart = (u_char *)&_mh_execute_header; kernelLength = getlastaddr() - (vm_offset_t)kernelStart; + assert(kernelLength <= UINT_MAX); kernelExecutable = OSData::withBytesNoCopy( - kernelStart, kernelLength); + kernelStart, (unsigned int)kernelLength); assert(kernelExecutable); #if KASLR_KEXT_DEBUG @@ -766,7 +845,7 @@ OSKext::initialize(void) sKernelKext->version = OSKextParseVersionString(osrelease); sKernelKext->compatibleVersion = sKernelKext->version; - sKernelKext->linkedExecutable = kernelExecutable; + sKernelKext->linkedExecutable = os::move(kernelExecutable); sKernelKext->interfaceUUID = sKernelKext->copyUUID(); sKernelKext->flags.hasAllDependencies = 1; @@ -789,34 +868,39 @@ OSKext::initialize(void) sKernelKext->infoDict = OSDictionary::withCapacity(5); assert(sKernelKext->infoDict); setResult = sKernelKext->infoDict->setObject(kCFBundleIdentifierKey, - sKernelKext->bundleID); + sKernelKext->bundleID.get()); assert(setResult); setResult = sKernelKext->infoDict->setObject(kOSKernelResourceKey, kOSBooleanTrue); assert(setResult); - scratchString = OSString::withCStringNoCopy(osrelease); - assert(scratchString); - setResult = sKernelKext->infoDict->setObject(kCFBundleVersionKey, - scratchString); - assert(setResult); - OSSafeReleaseNULL(scratchString); + { + OSSharedPtr scratchString(OSString::withCStringNoCopy(osrelease)); + assert(scratchString); + setResult = sKernelKext->infoDict->setObject(kCFBundleVersionKey, + scratchString.get()); + assert(setResult); + } - scratchString = OSString::withCStringNoCopy("mach_kernel"); - assert(scratchString); - setResult = sKernelKext->infoDict->setObject(kCFBundleNameKey, - scratchString); - assert(setResult); - OSSafeReleaseNULL(scratchString); + { + OSSharedPtr scratchString(OSString::withCStringNoCopy("mach_kernel")); + assert(scratchString); + setResult = sKernelKext->infoDict->setObject(kCFBundleNameKey, + scratchString.get()); + assert(setResult); + } /* Add the kernel kext to the bookkeeping dictionaries. Note that * the kernel kext doesn't have a kmod_info struct. copyInfo() * gathers info from other places anyhow. */ - setResult = sKextsByID->setObject(sKernelKext->bundleID, sKernelKext); + setResult = sKextsByID->setObject(sKernelKext->bundleID.get(), sKernelKext); assert(setResult); setResult = sLoadedKexts->setObject(sKernelKext); assert(setResult); + + // XXX: better way with OSSharedPtr? + // sKernelKext remains a valid pointer even after the decref sKernelKext->release(); registryRoot = IORegistryEntry::getRegistryRoot(); @@ -828,11 +912,8 @@ OSKext::initialize(void) 8 * sizeof(_mh_execute_header.cpusubtype)); assert(registryRoot && kernelCPUSubtype && kernelCPUType); - registryRoot->setProperty(kOSKernelCPUTypeKey, kernelCPUType); - registryRoot->setProperty(kOSKernelCPUSubtypeKey, kernelCPUSubtype); - - OSSafeReleaseNULL(kernelCPUType); - OSSafeReleaseNULL(kernelCPUSubtype); + registryRoot->setProperty(kOSKernelCPUTypeKey, kernelCPUType.get()); + registryRoot->setProperty(kOSKernelCPUSubtypeKey, kernelCPUSubtype.get()); gBuiltinKmodsSectionInfo = getsectbyname(kPrelinkInfoSegment, kBuiltinInfoSection); if (gBuiltinKmodsSectionInfo) { @@ -840,20 +921,24 @@ OSKext::initialize(void) assert(gBuiltinKmodsSectionInfo->addr); assert(gBuiltinKmodsSectionInfo->size); - gBuiltinKmodsCount = (gBuiltinKmodsSectionInfo->size / sizeof(kmod_info_t *)); + assert(gBuiltinKmodsSectionInfo->size / sizeof(kmod_info_t *) <= UINT_MAX); + gBuiltinKmodsCount = (unsigned int)(gBuiltinKmodsSectionInfo->size / sizeof(kmod_info_t *)); gBuiltinKmodsSectionStart = getsectbyname(kPrelinkInfoSegment, kBuiltinStartSection); assert(gBuiltinKmodsSectionStart); assert(gBuiltinKmodsSectionStart->addr); assert(gBuiltinKmodsSectionStart->size); - count = (gBuiltinKmodsSectionStart->size / sizeof(uintptr_t)); + assert(gBuiltinKmodsSectionStart->size / sizeof(uintptr_t) <= UINT_MAX); + count = (unsigned int)(gBuiltinKmodsSectionStart->size / sizeof(uintptr_t)); // one extra pointer for the end of last kmod assert(count == (gBuiltinKmodsCount + 1)); vm_kernel_builtinkmod_text = ((uintptr_t *)gBuiltinKmodsSectionStart->addr)[0]; vm_kernel_builtinkmod_text_end = ((uintptr_t *)gBuiltinKmodsSectionStart->addr)[count - 1]; } - gIOSurfaceIdentifier = OSSymbol::withCStringNoCopy("com.apple.iokit.IOSurface"); + + // Don't track this object -- it's never released + gIOSurfaceIdentifier = OSSymbol::withCStringNoCopy("com.apple.iokit.IOSurface").detach(); timestamp = __OSAbsoluteTimePtr(&last_loaded_timestamp); *timestamp = 0; @@ -892,17 +977,27 @@ OSKext::removeKextBootstrap(void) kernel_segment_command_t * seg_to_remove = NULL; -#if __arm__ || __arm64__ - const char * dt_segment_name = NULL; - void * segment_paddress = NULL; - int segment_size = 0; -#endif + const char __unused * dt_segment_name = NULL; + void __unused * segment_paddress = NULL; + int __unused segment_size = 0; OSKextLog(/* kext */ NULL, kOSKextLogProgressLevel | kOSKextLogGeneralFlag, "Jettisoning kext bootstrap segments."); + /* + * keep the linkedit segment around when booted from a new MH_FILESET + * KC because all the kexts shared a linkedit segment. + */ + kc_format_t kc_format; + if (!PE_get_primary_kc_format(&kc_format)) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Unable to determine primary KC format"); + } + /***** * Dispose of unnecessary stuff that the booter didn't need to load. */ @@ -947,10 +1042,7 @@ OSKext::removeKextBootstrap(void) * defining the lower bound for valid physical addresses. */ if (seg_to_remove && seg_to_remove->vmaddr && seg_to_remove->vmsize) { - // 04/18/11 - gab: - // overwrite memory occupied by KLD segment with random data before - // releasing it. - read_frandom((void *) seg_to_remove->vmaddr, seg_to_remove->vmsize); + bzero((void *)seg_to_remove->vmaddr, seg_to_remove->vmsize); ml_static_mfree(seg_to_remove->vmaddr, seg_to_remove->vmsize); } #else @@ -962,10 +1054,12 @@ OSKext::removeKextBootstrap(void) /***** * Prelinked kernel's symtab (if there is one). */ - kernel_section_t * sect; - sect = getsectbyname("__PRELINK", "__symtab"); - if (sect && sect->addr && sect->size) { - ml_static_mfree(sect->addr, sect->size); + if (kc_format != KCFormatFileset) { + kernel_section_t * sect; + sect = getsectbyname("__PRELINK", "__symtab"); + if (sect && sect->addr && sect->size) { + ml_static_mfree(sect->addr, sect->size); + } } seg_to_remove = (kernel_segment_command_t *)getsegbyname("__LINKEDIT"); @@ -974,12 +1068,15 @@ OSKext::removeKextBootstrap(void) * pageable, unless keepsyms is set. To do that, we have to copy it from * its booter-allocated memory, free the booter memory, reallocate proper * managed memory, then copy the segment back in. + * + * NOTE: This optimization is not valid for fileset KCs because each + * fileset entry (kext or xnu) in an MH_FILESET has a LINKEDIT segment + * that points to one fileset-global LINKEDIT segment. This + * optimization is also only valid for platforms that support vm + * mapped kexts or mapped kext collections (pageable KCs) */ -#if CONFIG_KXLD -#if (__arm__ || __arm64__) -#error CONFIG_KXLD not expected for this arch -#endif - if (!sKeepSymbols) { +#if VM_MAPPED_KEXTS + if (!sKeepSymbols && kc_format != KCFormatFileset) { kern_return_t mem_result; void *seg_copy = NULL; void *seg_data = NULL; @@ -1044,16 +1141,16 @@ OSKext::removeKextBootstrap(void) /* Free the copy. */ kmem_free(kernel_map, seg_copy_offset, seg_length); + } else if (!sKeepSymbols && kc_format == KCFormatFileset) { + /* Remove the linkedit segment of the Boot KC */ + kernel_mach_header_t *mh = (kernel_mach_header_t *)PE_get_kc_header(KCKindPrimary); + OSKext::jettisonFileSetLinkeditSegment(mh); } -#else /* we are not CONFIG_KXLD */ -#if !(__arm__ || __arm64__) -#error CONFIG_KXLD is expected for this arch -#endif - +#else // !VM_MAPPED_KEXTS /***** * Dump the LINKEDIT segment, unless keepsyms is set. */ - if (!sKeepSymbols) { + if (!sKeepSymbols && kc_format != KCFormatFileset) { dt_segment_name = "Kernel-__LINKEDIT"; if (0 == IODTGetLoaderInfo(dt_segment_name, &segment_paddress, &segment_size)) { @@ -1070,7 +1167,7 @@ OSKext::removeKextBootstrap(void) kOSKextLogGeneralFlag, "keepsyms boot arg specified; keeping linkedit segment for symbols."); } -#endif /* CONFIG_KXLD */ +#endif // VM_MAPPED_KEXTS seg_to_remove = NULL; @@ -1079,13 +1176,22 @@ OSKext::removeKextBootstrap(void) return result; } +#if CONFIG_KXLD /********************************************************************* *********************************************************************/ void OSKext::flushNonloadedKexts( Boolean flushPrelinkedKexts) { - OSSet * keepKexts = NULL;// must release + OSSharedPtr keepKexts; + + /* TODO: make this more efficient with MH_FILESET kexts */ + + // Do not unload prelinked kexts on arm because the kernelcache is not + // structured in a way that allows them to be unmapped +#if !defined(__x86_64__) + flushPrelinkedKexts = false; +#endif /* defined(__x86_64__) */ IORecursiveLockLock(sKextLock); @@ -1108,28 +1214,31 @@ OSKext::flushNonloadedKexts( * any lingering inter-kext references for nonloaded kexts * so they have min. retain counts. */ - sKextsByID->iterateObjects(^bool (const OSSymbol * thisID __unused, OSObject * obj) { - OSKext * thisKext = OSDynamicCast(OSKext, obj); - if (!thisKext) { - return false; - } - if (!flushPrelinkedKexts && thisKext->isPrelinked()) { - keepKexts->setObject(thisKext); - } - if (!thisKext->declaresExecutable()) { - /* - * Don't unload codeless kexts, because they never appear in the loadedKexts array. - * Requesting one from kextd will load it and then immediately remove it by calling - * flushNonloadedKexts(). - * And adding one to loadedKexts breaks code assuming they have kmod_info etc. - */ - keepKexts->setObject(thisKext); - } - - thisKext->flushDependencies(/* forceIfLoaded */ false); - return false; - }); + { + sKextsByID->iterateObjects(^bool (const OSSymbol * thisID __unused, OSObject * obj) { + OSKext * thisKext = OSDynamicCast(OSKext, obj); + if (!thisKext) { + return false; + } + if (!flushPrelinkedKexts && thisKext->isPrelinked()) { + keepKexts->setObject(thisKext); + } else if (!thisKext->declaresExecutable()) { + /* + * Don't unload codeless kexts, because they never appear in the loadedKexts array. + * Requesting one from the IOKit daemon will load it and then immediately remove it by calling + * flushNonloadedKexts(). + * And adding one to loadedKexts breaks code assuming they have kmod_info etc. + */ + keepKexts->setObject(thisKext); + } else if (thisKext->isInFileset()) { + /* keep all kexts in the new MH_FILESET KC */ + keepKexts->setObject(thisKext); + } + thisKext->flushDependencies(/* forceIfLoaded */ false); + return false; + }); + } /* Dump all the kexts in the ID dictionary; we'll repopulate it shortly. */ sKextsByID->flushCollection(); @@ -1158,22 +1267,55 @@ OSKext::flushNonloadedKexts( finish: IORecursiveLockUnlock(sKextLock); + return; +} +#else /* !CONFIG_KXLD */ + +void +OSKext::flushNonloadedKexts( + Boolean flushPrelinkedKexts __unused) +{ + IORecursiveLockLock(sKextLock); + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogKextBookkeepingFlag, + "Flushing dependency info for non-loaded kexts."); - OSSafeReleaseNULL(keepKexts); + /* + * In a world where we don't dynamically link kexts, they all come + * from a kext collection that's either in wired memory, or + * wire-on-demand. We don't need to mess around with moving kexts in + * and out of the sKextsByID array - they can all just stay there. + * Here we just flush the dependency list for kexts that are not + * loaded. + */ + sKextsByID->iterateObjects(^bool (const OSSymbol * thisID __unused, OSObject * obj) { + OSKext * thisKext = OSDynamicCast(OSKext, obj); + if (!thisKext) { + return false; + } + thisKext->flushDependencies(/* forceIfLoaded */ false); + return false; + }); + IORecursiveLockUnlock(sKextLock); return; } +#endif /* CONFIG_KXLD */ + /********************************************************************* *********************************************************************/ /* static */ void -OSKext::setKextdActive(Boolean active) +OSKext::setIOKitDaemonActive(bool active) { + IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0); IORecursiveLockLock(sKextLock); - sKextdActive = active; + sIOKitDaemonActive = active; if (sKernelRequests->getCount()) { - OSKext::pingKextd(); + OSKext::pingIOKitDaemon(); } IORecursiveLockUnlock(sKextLock); @@ -1190,13 +1332,13 @@ extern void ipc_port_release_send(ipc_port_t); /* static */ OSReturn -OSKext::pingKextd(void) +OSKext::pingIOKitDaemon(void) { OSReturn result = kOSReturnError; #if !NO_KEXTD mach_port_t kextd_port = IPC_PORT_NULL; - if (!sKextdActive) { + if (!sIOKitDaemonActive) { result = kOSKextReturnDisabled; // basically unavailable goto finish; } @@ -1206,7 +1348,7 @@ OSKext::pingKextd(void) OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogIPCFlag, - "Can't get kextd port."); + "Can't get " kIOKitDaemonName " port."); goto finish; } @@ -1215,7 +1357,7 @@ OSKext::pingKextd(void) OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogIPCFlag, - "kextd ping failed (0x%x).", (int)result); + kIOKitDaemonName " ping failed (0x%x).", (int)result); goto finish; } @@ -1251,7 +1393,7 @@ OSKext::willShutdown(void) #if !NO_KEXTD OSReturn checkResult = kOSReturnError; #endif - OSDictionary * exitRequest = NULL; // must release + OSSharedPtr exitRequest; IORecursiveLockLock(sKextLock); @@ -1260,32 +1402,67 @@ OSKext::willShutdown(void) OSKext::setAutounloadsEnabled(false); OSKext::setKernelRequestsEnabled(false); +#if defined(__x86_64__) || defined(__i386__) + if (IOPMRootDomainGetWillShutdown()) { + OSKext::freeKCFileSetcontrol(); + } +#endif // (__x86_64__) || defined(__i386__) + #if !NO_KEXTD OSKextLog(/* kext */ NULL, kOSKextLogProgressLevel | kOSKextLogGeneralFlag, - "System shutdown; requesting immediate kextd exit."); + "System shutdown; requesting immediate " kIOKitDaemonName " exit."); - checkResult = _OSKextCreateRequest(kKextRequestPredicateRequestKextdExit, - &exitRequest); + checkResult = _OSKextCreateRequest(kKextRequestPredicateRequestDaemonExit, + exitRequest); if (checkResult != kOSReturnSuccess) { goto finish; } - if (!sKernelRequests->setObject(exitRequest)) { + if (!sKernelRequests->setObject(exitRequest.get())) { goto finish; } - OSKext::pingKextd(); + OSKext::pingIOKitDaemon(); finish: #endif IORecursiveLockUnlock(sKextLock); - - OSSafeReleaseNULL(exitRequest); return; } +void +OSKext::willUserspaceReboot(void) +{ + OSKext::willShutdown(); + IOService::userSpaceWillReboot(); + gIOCatalogue->terminateDriversForUserspaceReboot(); +} + +void +OSKext::resetAfterUserspaceReboot(void) +{ + OSSharedPtr arr = OSArray::withCapacity(1); + IOService::updateConsoleUsers(arr.get(), 0, true /* after_userspace_reboot */); + + IORecursiveLockLock(sKextLock); + gIOCatalogue->resetAfterUserspaceReboot(); + IOService::userSpaceDidReboot(); + OSKext::setLoadEnabled(true); + OSKext::setUnloadEnabled(true); + OSKext::setAutounloadsEnabled(true); + OSKext::setKernelRequestsEnabled(true); + sOSKextWasResetAfterUserspaceReboot = true; + IORecursiveLockUnlock(sKextLock); +} + +extern "C" void +OSKextResetAfterUserspaceReboot(void) +{ + OSKext::resetAfterUserspaceReboot(); +} + /********************************************************************* *********************************************************************/ /* static */ @@ -1463,20 +1640,38 @@ OSKext::getKernelRequestsEnabled(void) return result; } +static bool +segmentIsMutable(kernel_segment_command_t *seg) +{ + /* Mutable segments have to have VM_PROT_WRITE */ + if ((seg->maxprot & VM_PROT_WRITE) == 0) { + return false; + } + /* Exclude the __DATA_CONST segment */ + if (strncmp(seg->segname, "__DATA_CONST", sizeof(seg->segname)) == 0) { + return false; + } + /* Exclude __LINKEDIT */ + if (strncmp(seg->segname, "__LINKEDIT", sizeof(seg->segname)) == 0) { + return false; + } + return true; +} + #if PRAGMA_MARK #pragma mark Kext Life Cycle #endif /********************************************************************* *********************************************************************/ -OSKext * +OSSharedPtr OSKext::withPrelinkedInfoDict( OSDictionary * anInfoDict, - bool doCoalesedSlides) + bool doCoalescedSlides, + kc_kind_t type) { - OSKext * newKext = new OSKext; + OSSharedPtr newKext(OSMakeShared()); - if (newKext && !newKext->initWithPrelinkedInfoDict(anInfoDict, doCoalesedSlides)) { - newKext->release(); + if (newKext && !newKext->initWithPrelinkedInfoDict(anInfoDict, doCoalescedSlides, type)) { return NULL; } @@ -1488,16 +1683,20 @@ OSKext::withPrelinkedInfoDict( bool OSKext::initWithPrelinkedInfoDict( OSDictionary * anInfoDict, - bool doCoalesedSlides) + bool doCoalescedSlides, + kc_kind_t type) { bool result = false; - OSString * kextPath = NULL;// do not release - OSNumber * addressNum = NULL;// reused; do not release - OSNumber * lengthNum = NULL;// reused; do not release - void * data = NULL;// do not free - void * srcData = NULL;// do not free - OSData * prelinkedExecutable = NULL;// must release - uint32_t length = 0; // reused + OSString * kextPath = NULL; // do not release + OSNumber * addressNum = NULL; // reused; do not release + OSNumber * lengthNum = NULL; // reused; do not release + OSBoolean * scratchBool = NULL; // do not release + void * data = NULL; // do not free + void * srcData = NULL; // do not free + OSSharedPtr prelinkedExecutable; + uint32_t length = 0; // reused + uintptr_t kext_slide = PE_get_kc_slide(type); + bool shouldSaveSegments = false; if (!super::init()) { goto finish; @@ -1511,35 +1710,35 @@ OSKext::initWithPrelinkedInfoDict( if (!setInfoDictionaryAndPath(anInfoDict, kextPath)) { goto finish; } + #if KASLR_KEXT_DEBUG - IOLog("kaslr: doCoalesedSlides %d kext %s \n", doCoalesedSlides, getIdentifierCString()); + IOLog("kaslr: doCoalescedSlides %d kext %s \n", doCoalescedSlides, getIdentifierCString()); #endif /* Also get the executable's bundle-relative path if present. * Don't look for an arch-specific path property. */ - executableRelPath = OSDynamicCast(OSString, - anInfoDict->getObject(kPrelinkExecutableRelativePathKey)); - if (executableRelPath) { - executableRelPath->retain(); - } - - userExecutableRelPath = OSDynamicCast(OSString, - anInfoDict->getObject("CFBundleUEXTExecutable")); - if (userExecutableRelPath) { - userExecutableRelPath->retain(); - } + executableRelPath.reset(OSDynamicCast(OSString, + anInfoDict->getObject(kPrelinkExecutableRelativePathKey)), OSRetain); + userExecutableRelPath.reset(OSDynamicCast(OSString, + anInfoDict->getObject(kCFBundleDriverKitExecutableKey)), OSRetain); /* Don't need the paths to be in the info dictionary any more. */ anInfoDict->removeObject(kPrelinkBundlePathKey); anInfoDict->removeObject(kPrelinkExecutableRelativePathKey); + scratchBool = OSDynamicCast(OSBoolean, + getPropertyForHostArch(kOSBundleRequireExplicitLoadKey)); + if (scratchBool == kOSBooleanTrue) { + flags.requireExplicitLoad = 1; + } + /* Create an OSData wrapper around the linked executable. */ addressNum = OSDynamicCast(OSNumber, anInfoDict->getObject(kPrelinkExecutableLoadKey)); - if (addressNum) { + if (addressNum && addressNum->unsigned64BitValue() != kOSKextCodelessKextLoadAddr) { lengthNum = OSDynamicCast(OSNumber, anInfoDict->getObject(kPrelinkExecutableSizeKey)); if (!lengthNum) { @@ -1548,10 +1747,10 @@ OSKext::initWithPrelinkedInfoDict( kOSKextLogArchiveFlag, "Kext %s can't find prelinked kext executable size.", getIdentifierCString()); - goto finish; + return result; } - data = (void *) ml_static_slide((intptr_t) (addressNum->unsigned64BitValue())); + data = (void *) (((uintptr_t) (addressNum->unsigned64BitValue())) + kext_slide); length = (uint32_t) (lengthNum->unsigned32BitValue()); #if KASLR_KEXT_DEBUG @@ -1569,7 +1768,7 @@ OSKext::initWithPrelinkedInfoDict( */ addressNum = OSDynamicCast(OSNumber, anInfoDict->getObject(kPrelinkExecutableSourceKey)); if (addressNum) { - srcData = (void *) ml_static_slide((intptr_t) (addressNum->unsigned64BitValue())); + srcData = (void *) (((uintptr_t) (addressNum->unsigned64BitValue())) + kext_slide); #if KASLR_KEXT_DEBUG IOLog("kaslr: unslid 0x%lx slid 0x%lx - prelink executable source \n", @@ -1618,7 +1817,7 @@ OSKext::initWithPrelinkedInfoDict( #else prelinkedExecutable->setDeallocFunction(osdata_phys_free); #endif - setLinkedExecutable(prelinkedExecutable); + setLinkedExecutable(prelinkedExecutable.get()); addressNum = OSDynamicCast(OSNumber, anInfoDict->getObject(kPrelinkKmodInfoKey)); if (!addressNum) { @@ -1631,14 +1830,19 @@ OSKext::initWithPrelinkedInfoDict( } if (addressNum->unsigned64BitValue() != 0) { - kmod_info = (kmod_info_t *) ml_static_slide((intptr_t) (addressNum->unsigned64BitValue())); - kmod_info->address = ml_static_slide(kmod_info->address); + kmod_info = (kmod_info_t *) (((uintptr_t) (addressNum->unsigned64BitValue())) + kext_slide); + if (kmod_info->address) { + kmod_info->address = (((uintptr_t)(kmod_info->address)) + kext_slide); + } else { + kmod_info->address = (uintptr_t)data; + kmod_info->size = length; + } #if KASLR_KEXT_DEBUG IOLog("kaslr: unslid 0x%lx slid 0x%lx - kmod_info \n", - (unsigned long)ml_static_unslide((vm_offset_t)kmod_info), + (unsigned long)((vm_offset_t)kmod_info) - kext_slide, (unsigned long)kmod_info); IOLog("kaslr: unslid 0x%lx slid 0x%lx - kmod_info->address \n", - (unsigned long)ml_static_unslide(kmod_info->address), + (unsigned long)((vm_offset_t)kmod_info->address) - kext_slide, (unsigned long)kmod_info->address); #endif } @@ -1665,23 +1869,80 @@ OSKext::initWithPrelinkedInfoDict( /* If the plist has a UUID for an interface, save that off. */ if (isInterface()) { - interfaceUUID = OSDynamicCast(OSData, - anInfoDict->getObject(kPrelinkInterfaceUUIDKey)); + interfaceUUID.reset(OSDynamicCast(OSData, + anInfoDict->getObject(kPrelinkInterfaceUUIDKey)), OSRetain); if (interfaceUUID) { - interfaceUUID->retain(); anInfoDict->removeObject(kPrelinkInterfaceUUIDKey); } } - result = slidePrelinkedExecutable(doCoalesedSlides); - if (result != kOSReturnSuccess) { + result = (kOSReturnSuccess == slidePrelinkedExecutable(doCoalescedSlides)); + if (!result) { goto finish; } - if (doCoalesedSlides == false) { - /* set VM protections now, wire later at kext load */ - result = setVMAttributes(true, false); - if (result != KERN_SUCCESS) { + kc_type = type; + /* Exclude builtin and codeless kexts */ + if (prelinkedExecutable && kmod_info) { + switch (kc_type) { + case KCKindPrimary: + shouldSaveSegments = ( + getPropertyForHostArch(kOSMutableSegmentCopy) == kOSBooleanTrue || + getPropertyForHostArch(kOSBundleAllowUserLoadKey) == kOSBooleanTrue); + if (shouldSaveSegments) { + flags.resetSegmentsFromImmutableCopy = 1; + } + break; + case KCKindPageable: + flags.resetSegmentsFromVnode = 1; + break; + case KCKindAuxiliary: + if (!pageableKCloaded) { + flags.resetSegmentsFromImmutableCopy = 1; + } else if (resetAuxKCSegmentOnUnload) { + flags.resetSegmentsFromVnode = 1; + } + break; + default: + break; + } + } + + if (flags.resetSegmentsFromImmutableCopy) { + /* Save a pristine copy of the mutable segments */ + kernel_segment_command_t *seg = NULL; + kernel_mach_header_t *k_mh = (kernel_mach_header_t *)kmod_info->address; + + savedMutableSegments = OSArray::withCapacity(0); + + for (seg = firstsegfromheader(k_mh); seg; seg = nextsegfromheader(k_mh, seg)) { + if (!segmentIsMutable(seg)) { + continue; + } + uint64_t unslid_vmaddr = seg->vmaddr - kext_slide; + uint64_t vmsize = seg->vmsize; + OSKextLog(this, kOSKextLogDebugLevel | kOSKextLogLoadFlag, + "Saving kext %s mutable segment %.*s %llx->%llx.", getIdentifierCString(), (int)strnlen(seg->segname, sizeof(seg->segname)), seg->segname, unslid_vmaddr, unslid_vmaddr + vmsize - 1); + OSSharedPtr savedSegment = OSKextSavedMutableSegment::withSegment(seg); + if (!savedSegment) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Kext %s failed to save mutable segment %llx->%llx.", getIdentifierCString(), unslid_vmaddr, unslid_vmaddr + vmsize - 1); + result = kOSKextReturnInternalError; + goto finish; + } + savedMutableSegments->setObject(savedSegment); + } + } + + if (doCoalescedSlides == false && !flags.resetSegmentsFromVnode) { + /* + * set VM protections now, wire pages for the old style Aux KC now, + * wire pages for the rest of the KC types at load time. + */ + result = (kOSReturnSuccess == setVMAttributes(true, (type == KCKindAuxiliary) ? true : false)); + if (!result) { goto finish; } } @@ -1696,8 +1957,98 @@ OSKext::initWithPrelinkedInfoDict( result = registerIdentifier(); finish: - OSSafeReleaseNULL(prelinkedExecutable); + return result; +} + +/********************************************************************* +*********************************************************************/ +/* static */ +OSSharedPtr +OSKext::withCodelessInfo(OSDictionary * anInfoDict) +{ + OSSharedPtr newKext = OSMakeShared(); + + if (newKext && !newKext->initWithCodelessInfo(anInfoDict)) { + return NULL; + } + + return newKext; +} + +/********************************************************************* +*********************************************************************/ +bool +OSKext::initWithCodelessInfo(OSDictionary * anInfoDict) +{ + bool result = false; + OSString * kextPath = NULL; // do not release + OSBoolean * scratchBool = NULL; // do not release + + if (anInfoDict == NULL || !super::init()) { + goto finish; + } + + /* + * Get the path. Don't look for an arch-specific path property. + */ + kextPath = OSDynamicCast(OSString, + anInfoDict->getObject(kKextRequestArgumentCodelessInfoBundlePathKey)); + if (!kextPath) { + OSKextLog(NULL, + kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Requested codeless kext dictionary does not contain the '%s' key", + kKextRequestArgumentCodelessInfoBundlePathKey); + goto finish; + } + + uniquePersonalityProperties(anInfoDict); + + if (!setInfoDictionaryAndPath(anInfoDict, kextPath)) { + goto finish; + } + + /* + * This path is meant to initialize codeless kexts only. Refuse + * anything that looks like it has an executable and/or declares + * itself as a kernel component. + */ + if (declaresExecutable() || isKernelComponent()) { + OSKextLog(NULL, + kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Refusing to register codeless kext that declares an executable/kernel component: %s", + getIdentifierCString()); + goto finish; + } + + if (strcmp(getIdentifierCString(), kIOExcludeListBundleID) == 0) { + boolean_t updated = updateExcludeList(infoDict.get()); + if (updated) { + OSKextLog(this, + kOSKextLogDebugLevel | kOSKextLogLoadFlag, + "KextExcludeList was updated to version: %lld", sExcludeListVersion); + } + } + + kc_type = KCKindNone; + + scratchBool = OSDynamicCast(OSBoolean, + getPropertyForHostArch(kOSBundleRequireExplicitLoadKey)); + if (scratchBool == kOSBooleanTrue) { + flags.requireExplicitLoad = 1; + } + + /* Also get the executable's bundle-relative path if present. + * Don't look for an arch-specific path property. + */ + userExecutableRelPath.reset(OSDynamicCast(OSString, + anInfoDict->getObject(kCFBundleDriverKitExecutableKey)), OSRetain); + + /* remove unnecessary paths from the info dict */ + anInfoDict->removeObject(kKextRequestArgumentCodelessInfoBundlePathKey); + result = registerIdentifier(); + +finish: return result; } @@ -1707,46 +2058,49 @@ finish: void OSKext::setAllVMAttributes(void) { - OSCollectionIterator * kextIterator = NULL;// must release - const OSSymbol * thisID = NULL;// do not release + OSSharedPtr kextIterator; + const OSSymbol * thisID = NULL; // do not release IORecursiveLockLock(sKextLock); - kextIterator = OSCollectionIterator::withCollection(sKextsByID); + kextIterator = OSCollectionIterator::withCollection(sKextsByID.get()); if (!kextIterator) { goto finish; } while ((thisID = OSDynamicCast(OSSymbol, kextIterator->getNextObject()))) { - OSKext * thisKext;// do not release + OSKext * thisKext; // do not release thisKext = OSDynamicCast(OSKext, sKextsByID->getObject(thisID)); if (!thisKext || thisKext->isInterface() || !thisKext->declaresExecutable()) { continue; } - /* set VM protections now, wire later at kext load */ - thisKext->setVMAttributes(true, false); + if (!thisKext->flags.resetSegmentsFromVnode) { + /* + * set VM protections now, wire pages for the old style Aux KC now, + * wire pages for the rest of the KC types at load time. + */ + thisKext->setVMAttributes(true, (thisKext->kc_type == KCKindAuxiliary) ? true : false); + } } finish: IORecursiveLockUnlock(sKextLock); - OSSafeReleaseNULL(kextIterator); return; } /********************************************************************* *********************************************************************/ -OSKext * +OSSharedPtr OSKext::withBooterData( OSString * deviceTreeName, OSData * booterData) { - OSKext * newKext = new OSKext; + OSSharedPtr newKext(OSMakeShared()); if (newKext && !newKext->initWithBooterData(deviceTreeName, booterData)) { - newKext->release(); return NULL; } @@ -1770,16 +2124,17 @@ OSKext::initWithBooterData( OSData * booterData) { bool result = false; - _BooterKextFileInfo * kextFileInfo = NULL;// do not free - char * infoDictAddr = NULL;// do not free - void * executableAddr = NULL;// do not free - char * bundlePathAddr = NULL;// do not free + _BooterKextFileInfo * kextFileInfo = NULL; // do not free + char * infoDictAddr = NULL; // do not free + void * executableAddr = NULL; // do not free + char * bundlePathAddr = NULL; // do not free + + OSDictionary * theInfoDict = NULL; // do not release + OSSharedPtr parsedXML; + OSSharedPtr kextPath; - OSObject * parsedXML = NULL;// must release - OSDictionary * theInfoDict = NULL;// do not release - OSString * kextPath = NULL;// must release - OSString * errorString = NULL;// must release - OSData * executable = NULL;// must release + OSSharedPtr errorString; + OSSharedPtr executable; if (!super::init()) { goto finish; @@ -1818,9 +2173,9 @@ OSKext::initWithBooterData( goto finish; } - parsedXML = OSUnserializeXML(infoDictAddr, &errorString); + parsedXML = OSUnserializeXML(infoDictAddr, errorString); if (parsedXML) { - theInfoDict = OSDynamicCast(OSDictionary, parsedXML); + theInfoDict = OSDynamicCast(OSDictionary, parsedXML.get()); } if (!theInfoDict) { const char * errorCString = "(unknown error)"; @@ -1852,7 +2207,7 @@ OSKext::initWithBooterData( deviceTreeName->getCStringNoCopy()); goto finish; } - bundlePathAddr[kextFileInfo->bundlePathLength - 1] = '\0'; // just in case! + bundlePathAddr[kextFileInfo->bundlePathLength - 1] = '\0'; // just in case! kextPath = OSString::withCString(bundlePathAddr); if (!kextPath) { @@ -1865,7 +2220,7 @@ OSKext::initWithBooterData( } } - if (!setInfoDictionaryAndPath(theInfoDict, kextPath)) { + if (!setInfoDictionaryAndPath(theInfoDict, kextPath.get())) { goto finish; } @@ -1898,7 +2253,7 @@ OSKext::initWithBooterData( /* A kext with an executable needs to retain the whole booterData * object to keep the executable in memory. */ - if (!setExecutable(executable, booterData)) { + if (!setExecutable(executable.get(), booterData)) { OSKextLog(this, kOSKextLogErrorLevel | kOSKextLogGeneralFlag, @@ -1911,11 +2266,6 @@ OSKext::initWithBooterData( result = registerIdentifier(); finish: - OSSafeReleaseNULL(parsedXML); - OSSafeReleaseNULL(kextPath); - OSSafeReleaseNULL(errorString); - OSSafeReleaseNULL(executable); - return result; } @@ -1925,15 +2275,17 @@ bool OSKext::registerIdentifier(void) { bool result = false; - OSKext * existingKext = NULL;// do not release + OSKext * existingKext = NULL; // do not release bool existingIsLoaded = false; bool existingIsPrelinked = false; + bool existingIsCodeless = false; + bool existingIsDext = false; OSKextVersion newVersion = -1; OSKextVersion existingVersion = -1; char newVersionCString[kOSKextVersionMaxLength]; char existingVersionCString[kOSKextVersionMaxLength]; - OSData * newUUID = NULL;// must release - OSData * existingUUID = NULL;// must release + OSSharedPtr newUUID; + OSSharedPtr existingUUID; IORecursiveLockLock(sKextLock); @@ -1946,9 +2298,9 @@ OSKext::registerIdentifier(void) /* If we don't have an existing kext with this identifier, * just record the new kext and we're done! */ - existingKext = OSDynamicCast(OSKext, sKextsByID->getObject(bundleID)); + existingKext = OSDynamicCast(OSKext, sKextsByID->getObject(bundleID.get())); if (!existingKext) { - sKextsByID->setObject(bundleID, this); + sKextsByID->setObject(bundleID.get(), this); result = true; goto finish; } @@ -1961,27 +2313,44 @@ OSKext::registerIdentifier(void) existingIsLoaded = existingKext->isLoaded(); existingIsPrelinked = existingKext->isPrelinked(); + existingIsDext = existingKext->isDriverKit(); + existingIsCodeless = !existingKext->declaresExecutable() && !existingIsDext; - /* If we have a kext with this identifier that's already loaded/prelinked, - * we can't use the new one, but let's be really thorough and check how - * the two are related for a precise diagnostic log message. + /* If we have a non-codeless kext with this identifier that's already + * loaded/prelinked, we can't use the new one, but let's be really + * thorough and check how the two are related for a precise diagnostic + * log message. * - * Note that user space can't find out about nonloaded prelinked kexts, - * so in this case we log a message when new & existing are equivalent - * at the step rather than warning level, because we are always going - * be getting a copy of the kext in the user load request mkext. + * This check is valid for kexts that declare an executable and for + * dexts, but not for codeless kexts - we can just replace those. */ - if (existingIsLoaded || existingIsPrelinked) { + if ((!existingIsCodeless || existingIsDext) && + (existingIsLoaded || existingIsPrelinked)) { bool sameVersion = (newVersion == existingVersion); - bool sameExecutable = true; // assume true unless we have UUIDs + bool sameExecutable = true; // assume true unless we have UUIDs /* Only get the UUID if the existing kext is loaded. Doing so * might have to uncompress an mkext executable and we shouldn't * take that hit when neither kext is loaded. + * + * Note: there is no decompression that happens when all kexts + * are loaded from kext collecitons. */ newUUID = copyUUID(); existingUUID = existingKext->copyUUID(); + if (existingIsDext && !isDriverKit()) { + OSKextLog(this, + kOSKextLogWarningLevel | + kOSKextLogKextBookkeepingFlag, + "Notice - new kext %s, v%s matches a %s dext" + "with the same bundle ID, v%s.", + getIdentifierCString(), newVersionCString, + (existingIsLoaded ? "loaded" : "prelinked"), + existingVersionCString); + goto finish; + } + /* I'm entirely too paranoid about checking equivalence of executables, * but I remember nasty problems with it in the past. * @@ -1989,7 +2358,7 @@ OSKext::registerIdentifier(void) * - If only one kext has a UUID, they're definitely different. */ if (newUUID && existingUUID) { - sameExecutable = newUUID->isEqualTo(existingUUID); + sameExecutable = newUUID->isEqualTo(existingUUID.get()); } else if (newUUID || existingUUID) { sameExecutable = false; } @@ -2001,9 +2370,6 @@ OSKext::registerIdentifier(void) * in-kernel for a prelinked kext). We certainly don't want to do * a whole fake link for the new kext just to compare, either. */ - - OSKextVersionGetString(version, newVersionCString, - sizeof(newVersionCString)); OSKextLog(this, kOSKextLogWarningLevel | kOSKextLogKextBookkeepingFlag, @@ -2046,7 +2412,30 @@ OSKext::registerIdentifier(void) } } goto finish; - } /* if (existingIsLoaded || existingIsPrelinked) */ + } /* if ((!existingIsCodeless || existingIsDext) && (existingIsLoaded || existingIsPrelinked)) */ + + /* Refuse to allow an existing loaded codeless kext be replaced by a + * normal kext with the same bundle ID. + */ + if (existingIsCodeless && declaresExecutable()) { + OSKextLog(this, + kOSKextLogWarningLevel | kOSKextLogKextBookkeepingFlag, + "Refusing new kext %s, v%s: a codeless copy is already %s", + getIdentifierCString(), newVersionCString, + (existingIsLoaded ? "loaded" : "prelinked")); + goto finish; + } + + /* Dexts packaged in the BootKC will be protected against replacement + * by non-dexts by the logic above which checks if they are prelinked. + * Dexts which are prelinked into the System KC will be registered + * before any other kexts in the AuxKC are registered, and we never + * put dexts in the AuxKC. Therefore, there is no need to check if an + * existing object is a dext and is being replaced by a non-dext. + * The scenario cannot happen by construction. + * + * See: OSKext::loadFileSetKexts() + */ /* We have two nonloaded/nonprelinked kexts, so our decision depends on whether * user loads are happening or if we're still in early boot. User agents are @@ -2055,7 +2444,7 @@ OSKext::registerIdentifier(void) * see an older unloaded copy hanging around). */ if (sUserLoadsActive) { - sKextsByID->setObject(bundleID, this); + sKextsByID->setObject(bundleID.get(), this); result = true; OSKextLog(this, @@ -2074,7 +2463,7 @@ OSKext::registerIdentifier(void) * kexts might have duplicates. */ if (newVersion > existingVersion) { - sKextsByID->setObject(bundleID, this); + sKextsByID->setObject(bundleID.get(), this); result = true; OSKextLog(this, @@ -2109,9 +2498,6 @@ finish: getIdentifierCString(), newVersionCString); } - OSSafeReleaseNULL(newUUID); - OSSafeReleaseNULL(existingUUID); - return result; } @@ -2125,13 +2511,13 @@ OSKext::setInfoDictionaryAndPath( OSString * aPath) { bool result = false; - OSString * bundleIDString = NULL;// do not release - OSString * versionString = NULL;// do not release - OSString * compatibleVersionString = NULL;// do not release - const char * versionCString = NULL;// do not free - const char * compatibleVersionCString = NULL;// do not free - OSBoolean * scratchBool = NULL;// do not release - OSDictionary * scratchDict = NULL;// do not release + OSString * bundleIDString = NULL; // do not release + OSString * versionString = NULL; // do not release + OSString * compatibleVersionString = NULL; // do not release + const char * versionCString = NULL; // do not free + const char * compatibleVersionCString = NULL; // do not free + OSBoolean * scratchBool = NULL; // do not release + OSDictionary * scratchDict = NULL; // do not release if (infoDict) { panic("Attempt to set info dictionary on a kext " @@ -2143,8 +2529,7 @@ OSKext::setInfoDictionaryAndPath( goto finish; } - infoDict = aDictionary; - infoDict->retain(); + infoDict.reset(aDictionary, OSRetain); /* Check right away if the info dictionary has any log flags. */ @@ -2182,8 +2567,7 @@ OSKext::setInfoDictionaryAndPath( * just something nice to have for bookkeeping). */ if (aPath) { - path = aPath; - path->retain(); + path.reset(aPath, OSRetain); } /***** @@ -2221,7 +2605,7 @@ OSKext::setInfoDictionaryAndPath( goto finish; } - compatibleVersion = -1; // set to illegal value for kexts that don't have + compatibleVersion = -1; // set to illegal value for kexts that don't have compatibleVersionString = OSDynamicCast(OSString, getPropertyForHostArch(kOSBundleCompatibleVersionKey)); @@ -2271,7 +2655,7 @@ OSKext::setInfoDictionaryAndPath( getPropertyForHostArch(kOSKernelResourceKey)); if (scratchBool == kOSBooleanTrue) { flags.kernelComponent = 1; - flags.interface = 1; // xxx - hm. the kernel itself isn't an interface... + flags.interface = 1; // xxx - hm. the kernel itself isn't an interface... flags.started = 1; /* A kernel component has one implicit dependency on the kernel. @@ -2305,7 +2689,7 @@ OSKext::setExecutable( bool externalDataIsMkext) { bool result = false; - const char * executableKey = NULL; // do not free + const char * executableKey = NULL; // do not free if (!anExecutable) { infoDict->removeObject(_kOSKextExecutableKey); @@ -2347,10 +2731,20 @@ finish: static void uniqueStringPlistProperty(OSDictionary * dict, const char * key) { - OSString * stringValue = NULL;// do not release - const OSSymbol * symbolValue = NULL; // must release + OSObject * value = NULL; // do not release + OSString * stringValue = NULL; // do not release + OSSharedPtr symbolValue; + + value = dict->getObject(key); + if (!value) { + goto finish; + } + if (OSDynamicCast(OSSymbol, value)) { + /* this is already an OSSymbol: we're good */ + goto finish; + } - stringValue = OSDynamicCast(OSString, dict->getObject(key)); + stringValue = OSDynamicCast(OSString, value); if (!stringValue) { goto finish; } @@ -2360,13 +2754,9 @@ uniqueStringPlistProperty(OSDictionary * dict, const char * key) goto finish; } - dict->setObject(key, symbolValue); + dict->setObject(key, symbolValue.get()); finish: - if (symbolValue) { - symbolValue->release(); - } - return; } @@ -2375,45 +2765,57 @@ finish: static void uniqueStringPlistProperty(OSDictionary * dict, const OSString * key) { - OSString * stringValue = NULL;// do not release - const OSSymbol * symbolValue = NULL; // must release + OSObject * value = NULL; // do not release + OSString * stringValue = NULL; // do not release + OSSharedPtr symbolValue; - stringValue = OSDynamicCast(OSString, dict->getObject(key)); - if (!stringValue) { + value = dict->getObject(key); + if (!value) { goto finish; } - - symbolValue = OSSymbol::withString(stringValue); - if (!symbolValue) { + if (OSDynamicCast(OSSymbol, value)) { + /* this is already an OSSymbol: we're good */ goto finish; } - dict->setObject(key, symbolValue); + stringValue = OSDynamicCast(OSString, value); + if (!stringValue) { + goto finish; + } -finish: - if (symbolValue) { - symbolValue->release(); + symbolValue = OSSymbol::withString(stringValue); + if (!symbolValue) { + goto finish; } + dict->setObject(key, symbolValue.get()); + +finish: return; } +void +OSKext::uniquePersonalityProperties(OSDictionary * personalityDict) +{ + OSKext::uniquePersonalityProperties(personalityDict, true); +} + /********************************************************************* * Replace common personality property values with uniqued instances * to save on wired memory. *********************************************************************/ /* static */ void -OSKext::uniquePersonalityProperties(OSDictionary * personalityDict) +OSKext::uniquePersonalityProperties(OSDictionary * personalityDict, bool defaultAddKernelBundleIdentifier) { /* Properties every personality has. */ uniqueStringPlistProperty(personalityDict, kCFBundleIdentifierKey); uniqueStringPlistProperty(personalityDict, kIOProviderClassKey); - uniqueStringPlistProperty(personalityDict, gIOClassKey); + uniqueStringPlistProperty(personalityDict, gIOClassKey.get()); if (personalityDict->getObject(kCFBundleIdentifierKernelKey)) { uniqueStringPlistProperty(personalityDict, kCFBundleIdentifierKernelKey); - } else { + } else if (defaultAddKernelBundleIdentifier) { personalityDict->setObject(kCFBundleIdentifierKernelKey, personalityDict->getObject(kCFBundleIdentifierKey)); } @@ -2449,16 +2851,16 @@ OSKext::free(void) panic("Attempt to free loaded kext %s.", getIdentifierCString()); } - OSSafeReleaseNULL(infoDict); - OSSafeReleaseNULL(bundleID); - OSSafeReleaseNULL(path); - OSSafeReleaseNULL(executableRelPath); - OSSafeReleaseNULL(userExecutableRelPath); - OSSafeReleaseNULL(dependencies); - OSSafeReleaseNULL(linkedExecutable); - OSSafeReleaseNULL(metaClasses); - OSSafeReleaseNULL(interfaceUUID); - OSSafeReleaseNULL(driverKitUUID); + infoDict.reset(); + bundleID.reset(); + path.reset(); + executableRelPath.reset(); + userExecutableRelPath.reset(); + dependencies.reset(); + linkedExecutable.reset(); + metaClasses.reset(); + interfaceUUID.reset(); + driverKitUUID.reset(); if (isInterface() && kmod_info) { kfree(kmod_info, sizeof(kmod_info_t)); @@ -2471,6 +2873,13 @@ OSKext::free(void) #if PRAGMA_MARK #pragma mark Mkext files #endif + +#if CONFIG_KXLD +/* + * mkext archives are really only relevant on kxld-enabled kernels. + * Without a dynamic kernel linker, we don't need to support any mkexts. + */ + /********************************************************************* *********************************************************************/ OSReturn @@ -2479,7 +2888,7 @@ OSKext::readMkextArchive(OSData * mkextData, { OSReturn result = kOSKextReturnBadData; uint32_t mkextLength = 0; - mkext_header * mkextHeader = NULL;// do not free + mkext_header * mkextHeader = NULL; // do not free uint32_t mkextVersion = 0; /* Note default return of kOSKextReturnBadData above. @@ -2546,22 +2955,30 @@ OSKext::readMkext2Archive( { OSReturn result = kOSReturnError; uint32_t mkextLength; - mkext2_header * mkextHeader = NULL;// do not free - void * mkextEnd = NULL;// do not free + mkext2_header * mkextHeader = NULL; // do not free + void * mkextEnd = NULL; // do not free uint32_t mkextVersion; uint8_t * crc_address = NULL; + size_t crc_buffer_size = 0; uint32_t checksum; uint32_t mkextPlistOffset; uint32_t mkextPlistCompressedSize; - char * mkextPlistEnd = NULL;// do not free + char * mkextPlistEnd = NULL; // do not free uint32_t mkextPlistFullSize; - OSString * errorString = NULL;// must release - OSData * mkextPlistUncompressedData = NULL;// must release - const char * mkextPlistDataBuffer = NULL;// do not free - OSObject * parsedXML = NULL;// must release - OSDictionary * mkextPlist = NULL;// do not release - OSArray * mkextInfoDictArray = NULL;// do not release + OSSharedPtr errorString; + OSSharedPtr mkextPlistUncompressedData; + const char * mkextPlistDataBuffer = NULL; // do not free + OSSharedPtr parsedXML; + OSDictionary * mkextPlist = NULL; // do not release + OSArray * mkextInfoDictArray = NULL; // do not release uint32_t count, i; + kc_format_t kc_format; + + if (!PE_get_primary_kc_format(&kc_format)) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogGeneralFlag, + "Unable to determine primary KC format"); + goto finish; + } mkextLength = mkextData->getLength(); mkextHeader = (mkext2_header *)mkextData->getBytesNoCopy(); @@ -2569,9 +2986,18 @@ OSKext::readMkext2Archive( mkextVersion = MKEXT_GET_VERSION(mkextHeader); crc_address = (u_int8_t *)&mkextHeader->version; - checksum = mkext_adler32(crc_address, - (uintptr_t)mkextHeader + - MKEXT_GET_LENGTH(mkextHeader) - (uintptr_t)crc_address); + crc_buffer_size = (uintptr_t)mkextHeader + + MKEXT_GET_LENGTH(mkextHeader) - (uintptr_t)crc_address; + if (crc_buffer_size > INT32_MAX) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext archive size is too large (%lu > INT32_MAX).", + crc_buffer_size); + result = kOSKextReturnBadData; + goto finish; + } + checksum = mkext_adler32(crc_address, (int32_t)crc_buffer_size); if (MKEXT_GET_CHECKSUM(mkextHeader) != checksum) { OSKextLog(/* kext */ NULL, @@ -2635,9 +3061,9 @@ OSKext::readMkext2Archive( /* IOCFSerialize added a nul byte to the end of the string. Very nice of it. */ - parsedXML = OSUnserializeXML(mkextPlistDataBuffer, &errorString); + parsedXML = OSUnserializeXML(mkextPlistDataBuffer, errorString); if (parsedXML) { - mkextPlist = OSDynamicCast(OSDictionary, parsedXML); + mkextPlist = OSDynamicCast(OSDictionary, parsedXML.get()); } if (!mkextPlist) { const char * errorCString = "(unknown error)"; @@ -2654,14 +3080,6 @@ OSKext::readMkext2Archive( goto finish; } - /* If the caller needs the plist, hand it back and retain it. - * (This function releases it at the end.) - */ - if (mkextPlistOut) { - *mkextPlistOut = mkextPlist; - (*mkextPlistOut)->retain(); - } - mkextInfoDictArray = OSDynamicCast(OSArray, mkextPlist->getObject(kMKEXTInfoDictionariesKey)); if (!mkextInfoDictArray) { @@ -2685,37 +3103,70 @@ OSKext::readMkext2Archive( * Any creation/registration failures are already logged for us. */ if (infoDict) { - OSKext * newKext = OSKext::withMkext2Info(infoDict, mkextData); - OSSafeReleaseNULL(newKext); + OSSharedPtr newKext = OSKext::withMkext2Info(infoDict, mkextData); + + /* Fail dynamic loading of a kext when booted from MH_FILESET */ + if (kc_format == KCFormatFileset && + newKext && + !(newKext->isPrelinked()) && + newKext->declaresExecutable()) { + result = kOSReturnError; + printf("Kext LOG: Dynamic loading of kext denied for kext %s\n", + newKext->getIdentifier() ? newKext->getIdentifierCString() : "unknown kext"); + + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "Dynamic loading of kext denied for kext %s\n", + newKext->getIdentifier() ? newKext->getIdentifierCString() : "unknown kext"); + goto finish; + } } } + /* If the caller needs the plist, hand them back our copy + */ + if (mkextPlistOut) { + *mkextPlistOut = mkextPlist; + parsedXML.detach(); + } + /* Even if we didn't keep any kexts from the mkext, we may have a load * request to process, so we are successful (no errors occurred). */ result = kOSReturnSuccess; finish: + return result; +} - OSSafeReleaseNULL(parsedXML); - OSSafeReleaseNULL(mkextPlistUncompressedData); - OSSafeReleaseNULL(errorString); +/* static */ +OSReturn +OSKext::readMkext2Archive( + OSData * mkextData, + OSSharedPtr &mkextPlistOut, + uint32_t * checksumPtr) +{ + OSDictionary * mkextPlist = NULL; + OSReturn ret; - return result; + if (kOSReturnSuccess == (ret = readMkext2Archive(mkextData, + &mkextPlist, + checksumPtr))) { + mkextPlistOut.reset(mkextPlist, OSNoRetain); + } + return ret; } /********************************************************************* *********************************************************************/ /* static */ -OSKext * +OSSharedPtr OSKext::withMkext2Info( OSDictionary * anInfoDict, OSData * mkextData) { - OSKext * newKext = new OSKext; + OSSharedPtr newKext = OSMakeShared(); if (newKext && !newKext->initWithMkext2Info(anInfoDict, mkextData)) { - newKext->release(); return NULL; } @@ -2730,10 +3181,9 @@ OSKext::initWithMkext2Info( OSData * mkextData) { bool result = false; - OSString * kextPath = NULL;// do not release - OSNumber * executableOffsetNum = NULL;// do not release - OSCollectionIterator * iterator = NULL;// must release - OSData * executable = NULL;// must release + OSString * kextPath = NULL; // do not release + OSNumber * executableOffsetNum = NULL; // do not release + OSSharedPtr executable; if (anInfoDict == NULL || !super::init()) { goto finish; @@ -2750,11 +3200,8 @@ OSKext::initWithMkext2Info( /* If we have a path to the executable, save it. */ - executableRelPath = OSDynamicCast(OSString, - anInfoDict->getObject(kMKEXTExecutableRelativePathKey)); - if (executableRelPath) { - executableRelPath->retain(); - } + executableRelPath.reset(OSDynamicCast(OSString, + anInfoDict->getObject(kMKEXTExecutableRelativePathKey)), OSRetain); /* Don't need the paths to be in the info dictionary any more. */ @@ -2770,7 +3217,7 @@ OSKext::initWithMkext2Info( if (!executable) { goto finish; } - if (!setExecutable(executable, mkextData, true)) { + if (!setExecutable(executable.get(), mkextData, true)) { goto finish; } } @@ -2778,21 +3225,18 @@ OSKext::initWithMkext2Info( result = registerIdentifier(); finish: - - OSSafeReleaseNULL(executable); - OSSafeReleaseNULL(iterator); return result; } /********************************************************************* *********************************************************************/ -OSData * +OSSharedPtr OSKext::createMkext2FileEntry( OSData * mkextData, OSNumber * offsetNum, const char * name) { - OSData * result = NULL; + OSSharedPtr result; MkextEntryRef entryRef; uint8_t * mkextBuffer = (uint8_t *)mkextData->getBytesNoCopy(); uint32_t entryOffset = offsetNum->unsigned32BitValue(); @@ -2805,7 +3249,7 @@ OSKext::createMkext2FileEntry( entryRef.mkext = (mkext_basic_header *)mkextBuffer; entryRef.fileinfo = mkextBuffer + entryOffset; if (!result->appendBytes(&entryRef, sizeof(entryRef))) { - OSSafeReleaseNULL(result); + result.reset(); goto finish; } @@ -2855,7 +3299,8 @@ z_alloc(void * notused __unused, u_int num_items, u_int size) } uint32_t allocSize = (uint32_t)allocSize64; - zmem = (z_mem *)kalloc_tag(allocSize, VM_KERN_MEMORY_OSKEXT); + zmem = (z_mem *)kheap_alloc_tag(KHEAP_DATA_BUFFERS, allocSize, + Z_WAITOK, VM_KERN_MEMORY_OSKEXT); if (!zmem) { goto finish; } @@ -2870,23 +3315,22 @@ z_free(void * notused __unused, void * ptr) { uint32_t * skipper = (uint32_t *)ptr - 1; z_mem * zmem = (z_mem *)skipper; - kfree(zmem, zmem->alloc_size); + kheap_free(KHEAP_DATA_BUFFERS, zmem, zmem->alloc_size); return; } }; -OSData * +OSSharedPtr OSKext::extractMkext2FileData( UInt8 * data, const char * name, uint32_t compressedSize, uint32_t fullSize) { - OSData * result = NULL; + OSSharedPtr result; + OSSharedPtr uncompressedData; // release on error - OSData * uncompressedData = NULL;// release on error - - uint8_t * uncompressedDataBuffer = NULL;// do not free + uint8_t * uncompressedDataBuffer = NULL; // do not free unsigned long uncompressedSize; z_stream zstream; bool zstream_inited = false; @@ -3029,7 +3473,7 @@ OSKext::extractMkext2FileData( goto finish; } - result = uncompressedData; + result = os::move(uncompressedData); finish: /* Don't bother checking return, nothing we can do on fail. @@ -3038,10 +3482,6 @@ finish: inflateEnd(&zstream); } - if (!result) { - OSSafeReleaseNULL(uncompressedData); - } - return result; } @@ -3059,20 +3499,20 @@ OSKext::loadFromMkext( OSReturn result = kOSReturnError; OSReturn tempResult = kOSReturnError; - OSData * mkextData = NULL;// must release - OSDictionary * mkextPlist = NULL;// must release + OSSharedPtr mkextData; + OSSharedPtr mkextPlist; - OSArray * logInfoArray = NULL;// must release - OSSerialize * serializer = NULL;// must release + OSSharedPtr logInfoArray; + OSSharedPtr serializer; - OSString * predicate = NULL;// do not release - OSDictionary * requestArgs = NULL;// do not release + OSString * predicate = NULL; // do not release + OSDictionary * requestArgs = NULL; // do not release - OSString * kextIdentifier = NULL;// do not release - OSNumber * startKextExcludeNum = NULL;// do not release - OSNumber * startMatchingExcludeNum = NULL;// do not release - OSBoolean * delayAutounloadBool = NULL;// do not release - OSArray * personalityNames = NULL;// do not release + OSString * kextIdentifier = NULL; // do not release + OSNumber * startKextExcludeNum = NULL; // do not release + OSNumber * startMatchingExcludeNum = NULL; // do not release + OSBoolean * delayAutounloadBool = NULL; // do not release + OSArray * personalityNames = NULL; // do not release /* Default values for these two options: regular autounload behavior, * load all kexts, send no personalities. @@ -3133,7 +3573,7 @@ OSKext::loadFromMkext( goto finish; } - result = readMkext2Archive(mkextData, &mkextPlist, NULL); + result = readMkext2Archive(mkextData.get(), mkextPlist, NULL); if (result != kOSReturnSuccess) { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | @@ -3142,7 +3582,7 @@ OSKext::loadFromMkext( goto finish; } - predicate = _OSKextGetRequestPredicate(mkextPlist); + predicate = _OSKextGetRequestPredicate(mkextPlist.get()); if (!predicate || !predicate->isEqualTo(kKextRequestPredicateLoad)) { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | @@ -3216,7 +3656,7 @@ OSKext::loadFromMkext( if (result != kOSReturnSuccess) { goto finish; } - /* If the load came down from kextd, it will shortly inform IOCatalogue + /* If the load came down from the IOKit daemon, it will shortly inform IOCatalogue * for matching via a separate IOKit calldown. */ @@ -3229,7 +3669,7 @@ finish: logInfoArray = OSKext::clearUserSpaceLogFilter(); if (logInfoArray && logInfoOut && logInfoLengthOut) { - tempResult = OSKext::serializeLogInfo(logInfoArray, + tempResult = OSKext::serializeLogInfo(logInfoArray.get(), logInfoOut, logInfoLengthOut); if (tempResult != kOSReturnSuccess) { result = tempResult; @@ -3238,6 +3678,8 @@ finish: OSKext::flushNonloadedKexts(/* flushPrelinkedKexts */ false); + IORecursiveLockUnlock(sKextLock); + /* Note: mkextDataObject will have been retained by every kext w/an * executable in it. That should all have been flushed out at the * and of the load operation, but you never know.... @@ -3250,16 +3692,11 @@ finish: "probable memory leak."); } - IORecursiveLockUnlock(sKextLock); - - OSSafeReleaseNULL(mkextData); - OSSafeReleaseNULL(mkextPlist); - OSSafeReleaseNULL(serializer); - OSSafeReleaseNULL(logInfoArray); - return result; } +#endif // CONFIG_KXLD + /********************************************************************* *********************************************************************/ /* static */ @@ -3272,8 +3709,8 @@ OSKext::serializeLogInfo( OSReturn result = kOSReturnError; char * buffer = NULL; kern_return_t kmem_result = KERN_FAILURE; - OSSerialize * serializer = NULL;// must release; reused - char * logInfo = NULL;// returned by reference + OSSharedPtr serializer; + char * logInfo = NULL; // returned by reference uint32_t logInfoLength = 0; if (!logInfoArray || !logInfoOut || !logInfoLengthOut) { @@ -3296,7 +3733,7 @@ OSKext::serializeLogInfo( * itself to succeed. */ } - if (!logInfoArray->serialize(serializer)) { + if (!logInfoArray->serialize(serializer.get())) { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogIPCFlag, @@ -3327,7 +3764,6 @@ OSKext::serializeLogInfo( result = kOSReturnSuccess; finish: - OSSafeReleaseNULL(serializer); return result; } @@ -3336,16 +3772,13 @@ finish: #endif /********************************************************************* *********************************************************************/ -OSKext * +OSSharedPtr OSKext::lookupKextWithIdentifier(const char * kextIdentifier) { - OSKext * foundKext = NULL; + OSSharedPtr foundKext; IORecursiveLockLock(sKextLock); - foundKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); - if (foundKext) { - foundKext->retain(); - } + foundKext.reset(OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)), OSRetain); IORecursiveLockUnlock(sKextLock); return foundKext; @@ -3353,7 +3786,7 @@ OSKext::lookupKextWithIdentifier(const char * kextIdentifier) /********************************************************************* *********************************************************************/ -OSKext * +OSSharedPtr OSKext::lookupKextWithIdentifier(OSString * kextIdentifier) { return OSKext::lookupKextWithIdentifier(kextIdentifier->getCStringNoCopy()); @@ -3361,12 +3794,12 @@ OSKext::lookupKextWithIdentifier(OSString * kextIdentifier) /********************************************************************* *********************************************************************/ -OSKext * +OSSharedPtr OSKext::lookupKextWithLoadTag(uint32_t aTag) { - OSKext * foundKext = NULL; // returned + OSSharedPtr foundKext; // returned uint32_t i, j; - OSArray *list[2] = {sLoadedKexts, sLoadedDriverKitKexts}; + OSArray *list[2] = {sLoadedKexts.get(), sLoadedDriverKitKexts.get()}; uint32_t count[2] = {sLoadedKexts->getCount(), sLoadedDriverKitKexts->getCount()}; IORecursiveLockLock(sKextLock); @@ -3375,8 +3808,7 @@ OSKext::lookupKextWithLoadTag(uint32_t aTag) for (i = 0; i < count[j]; i++) { OSKext * thisKext = OSDynamicCast(OSKext, list[j]->getObject(i)); if (thisKext->getLoadTag() == aTag) { - foundKext = thisKext; - foundKext->retain(); + foundKext.reset(thisKext, OSRetain); goto finish; } } @@ -3390,40 +3822,66 @@ finish: /********************************************************************* *********************************************************************/ -OSKext * +OSSharedPtr OSKext::lookupKextWithAddress(vm_address_t address) { - OSKext * foundKext = NULL; // returned + OSSharedPtr foundKext; // returned uint32_t count, i; + kmod_info_t *kmod_info; +#if defined(__arm64__) + uint64_t textExecBase; + size_t textExecSize; +#endif /* defined(__arm64__) */ + +#if __has_feature(ptrauth_calls) + address = (vm_address_t)VM_KERNEL_STRIP_PTR(address); +#endif /* __has_feature(ptrauth_calls) */ IORecursiveLockLock(sKextLock); count = sLoadedKexts->getCount(); for (i = 0; i < count; i++) { OSKext * thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - if (thisKext->linkedExecutable) { - vm_address_t kext_start = - (vm_address_t)thisKext->linkedExecutable->getBytesNoCopy(); - vm_address_t kext_end = kext_start + - thisKext->linkedExecutable->getLength(); + if (thisKext == sKernelKext) { + continue; + } + if (thisKext->kmod_info && thisKext->kmod_info->address) { + kmod_info = thisKext->kmod_info; + vm_address_t kext_start = kmod_info->address; + vm_address_t kext_end = kext_start + kmod_info->size; if ((kext_start <= address) && (address < kext_end)) { - foundKext = thisKext; - foundKext->retain(); + foundKext.reset(thisKext, OSRetain); goto finish; } +#if defined(__arm64__) + textExecBase = (uintptr_t) getsegdatafromheader((kernel_mach_header_t *)kmod_info->address, "__TEXT_EXEC", &textExecSize); + if ((textExecBase <= address) && (address < textExecBase + textExecSize)) { + foundKext.reset(thisKext, OSRetain); + goto finish; + } +#endif /* defined (__arm64__) */ } } + if ((address >= vm_kernel_stext) && (address < vm_kernel_etext)) { + foundKext.reset(sKernelKext, OSRetain); + goto finish; + } + /* + * DriverKit userspace executables do not have a kernel linkedExecutable, + * so we "fake" their address range with the LoadTag. + * + * This is supposed to be used for logging reasons only. When logd + * calls this function it ors the address with FIREHOSE_TRACEPOINT_PC_KERNEL_MASK, so we + * remove it here before checking it against the LoadTag. + * Also we need to remove FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT set when emitting the log line. + */ + address = address & ~(FIREHOSE_TRACEPOINT_PC_KERNEL_MASK | FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT); count = sLoadedDriverKitKexts->getCount(); for (i = 0; i < count; i++) { OSKext * thisKext = OSDynamicCast(OSKext, sLoadedDriverKitKexts->getObject(i)); - /* - * DriverKitKexts do not have a linkedExecutable, - * so we "fake" their address with the LoadTag - */ if (thisKext->getLoadTag() == address) { - foundKext = thisKext; - foundKext->retain(); + foundKext.reset(thisKext, OSRetain); } } @@ -3433,21 +3891,23 @@ finish: return foundKext; } -OSData * +OSSharedPtr OSKext::copyKextUUIDForAddress(OSNumber *address) { - OSData * uuid = NULL; - OSKextActiveAccount * active; - OSKext * kext = NULL; - uint32_t baseIdx; - uint32_t lim; - uint32_t count, i; + OSSharedPtr uuid; + OSSharedPtr kext; if (!address) { return NULL; } uintptr_t addr = ml_static_slide((uintptr_t)address->unsigned64BitValue()); + if (addr == 0) { + return NULL; + } +#if __has_feature(ptrauth_calls) + addr = (uintptr_t)VM_KERNEL_STRIP_PTR(addr); +#endif /* __has_feature(ptrauth_calls) */ #if CONFIG_MACF /* Is the calling process allowed to query kext info? */ @@ -3468,74 +3928,21 @@ OSKext::copyKextUUIDForAddress(OSNumber *address) } } #endif - - IOSimpleLockLock(sKextAccountsLock); - // bsearch sKextAccounts list - for (baseIdx = 0, lim = sKextAccountsCount; lim; lim >>= 1) { - active = &sKextAccounts[baseIdx + (lim >> 1)]; - if ((addr >= active->address) && (addr < active->address_end)) { - kext = active->account->kext; - if (kext) { - kext->retain(); - } - break; - } else if (addr > active->address) { - // move right - baseIdx += (lim >> 1) + 1; - lim--; - } - // else move left - } - IOSimpleLockUnlock(sKextAccountsLock); - - if (!kext) { - /* - * Maybe it is a Dext. - * DriverKit userspace executables do not have a kernel linkedExecutable, - * so we "fake" their address range with the LoadTag. - * - * This is supposed to be used for logging reasons only. When logd - * calls this function it ors the address with FIREHOSE_TRACEPOINT_PC_KERNEL_MASK, so we - * remove it here before checking it against the LoadTag. - * Also we need to remove FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT set when emitting the log line. - */ - addr = (uintptr_t)address->unsigned64BitValue() & ~(FIREHOSE_TRACEPOINT_PC_KERNEL_MASK | FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT); - IORecursiveLockLock(sKextLock); - count = sLoadedDriverKitKexts->getCount(); - for (i = 0; i < count; i++) { - OSKext * thisKext = NULL; - - thisKext = OSDynamicCast(OSKext, sLoadedDriverKitKexts->getObject(i)); - if (!thisKext) { - continue; - } - if (thisKext->getLoadTag() == addr) { - kext = thisKext; - kext->retain(); - break; - } - } - IORecursiveLockUnlock(sKextLock); - } - + kext = lookupKextWithAddress(addr); if (kext) { uuid = kext->copyTextUUID(); - kext->release(); - } else if (((vm_offset_t)addr >= vm_kernel_stext) && ((vm_offset_t)addr < vm_kernel_etext)) { - uuid = sKernelKext->copyTextUUID(); } - return uuid; } /********************************************************************* *********************************************************************/ -OSKext * +OSSharedPtr OSKext::lookupKextWithUUID(uuid_t wanted) { - OSKext * foundKext = NULL; // returned + OSSharedPtr foundKext; // returned uint32_t j, i; - OSArray *list[2] = {sLoadedKexts, sLoadedDriverKitKexts}; + OSArray *list[2] = {sLoadedKexts.get(), sLoadedDriverKitKexts.get()}; uint32_t count[2] = {sLoadedKexts->getCount(), sLoadedDriverKitKexts->getCount()}; @@ -3550,18 +3957,16 @@ OSKext::lookupKextWithUUID(uuid_t wanted) continue; } - OSData *uuid_data = thisKext->copyUUID(); + OSSharedPtr uuid_data = thisKext->copyUUID(); if (!uuid_data) { continue; } uuid_t uuid; memcpy(&uuid, uuid_data->getBytesNoCopy(), sizeof(uuid)); - uuid_data->release(); if (0 == uuid_compare(wanted, uuid)) { - foundKext = thisKext; - foundKext->retain(); + foundKext.reset(thisKext, OSRetain); goto finish; } } @@ -3620,7 +4025,7 @@ OSKext::removeKext( #else /* CONFIG_EMBEDDED */ OSReturn result = kOSKextReturnInUse; - OSKext * checkKext = NULL; // do not release + OSKext * checkKext = NULL; // do not release #if CONFIG_MACF int macCheckResult = 0; kauth_cred_t cred = NULL; @@ -3701,13 +4106,21 @@ OSKext::removeKext( aKext->removePersonalitiesFromCatalog(); } - OSKextLog(aKext, - kOSKextLogProgressLevel | - kOSKextLogKextBookkeepingFlag, - "Removing kext %s.", - aKext->getIdentifierCString()); + if (aKext->isInFileset()) { + OSKextLog(aKext, + kOSKextLogProgressLevel | + kOSKextLogKextBookkeepingFlag, + "Fileset kext %s unloaded.", + aKext->getIdentifierCString()); + } else { + OSKextLog(aKext, + kOSKextLogProgressLevel | + kOSKextLogKextBookkeepingFlag, + "Removing kext %s.", + aKext->getIdentifierCString()); - sKextsByID->removeObject(aKext->getIdentifier()); + sKextsByID->removeObject(aKext->getIdentifier()); + } result = kOSReturnSuccess; finish: @@ -3760,7 +4173,7 @@ OSKext::removeKextWithLoadTag( OSReturn result = kOSReturnError; OSKext * foundKext = NULL; uint32_t i, j; - OSArray *list[2] = {sLoadedKexts, sLoadedDriverKitKexts}; + OSArray *list[2] = {sLoadedKexts.get(), sLoadedDriverKitKexts.get()}; uint32_t count[2] = {sLoadedKexts->getCount(), sLoadedDriverKitKexts->getCount()}; @@ -3797,13 +4210,13 @@ finish: /********************************************************************* *********************************************************************/ -OSDictionary * +OSSharedPtr OSKext::copyKexts(void) { - OSDictionary * result; + OSSharedPtr result; IORecursiveLockLock(sKextLock); - result = OSDynamicCast(OSDictionary, sKextsByID->copyCollection()); + result = OSDynamicPtrCast(sKextsByID->copyCollection()); IORecursiveLockUnlock(sKextLock); return result; @@ -3827,13 +4240,13 @@ OSKext::createExcludeListFromBooterData( OSDictionary * theDictionary, OSCollectionIterator * theIterator ) { - OSString * deviceTreeName = NULL;// do not release - const _DeviceTreeBuffer * deviceTreeBuffer = NULL;// do not release - char * booterDataPtr = NULL;// do not release - _BooterKextFileInfo * kextFileInfo = NULL;// do not release - char * infoDictAddr = NULL;// do not release - OSObject * parsedXML = NULL;// must release - OSDictionary * theInfoDict = NULL;// do not release + OSString * deviceTreeName = NULL; // do not release + const _DeviceTreeBuffer * deviceTreeBuffer = NULL; // do not release + char * booterDataPtr = NULL; // do not release + _BooterKextFileInfo * kextFileInfo = NULL; // do not release + char * infoDictAddr = NULL; // do not release + OSSharedPtr parsedXML; + OSDictionary * theInfoDict = NULL; // do not release theIterator->reset(); @@ -3841,10 +4254,8 @@ OSKext::createExcludeListFromBooterData( while ((deviceTreeName = OSDynamicCast(OSString, theIterator->getNextObject()))) { const char * devTreeNameCString; - OSData * deviceTreeEntry; - OSString * myBundleID;// do not release - - OSSafeReleaseNULL(parsedXML); + OSData * deviceTreeEntry; // do not release + OSString * myBundleID; // do not release deviceTreeEntry = OSDynamicCast(OSData, theDictionary->getObject(deviceTreeName)); @@ -3891,7 +4302,7 @@ OSKext::createExcludeListFromBooterData( continue; } - theInfoDict = OSDynamicCast(OSDictionary, parsedXML); + theInfoDict = OSDynamicCast(OSDictionary, parsedXML.get()); if (!theInfoDict) { continue; } @@ -3900,7 +4311,7 @@ OSKext::createExcludeListFromBooterData( OSDynamicCast(OSString, theInfoDict->getObject(kCFBundleIdentifierKey)); if (myBundleID && - strcmp( myBundleID->getCStringNoCopy(), "com.apple.driver.KextExcludeList" ) == 0) { + strcmp( myBundleID->getCStringNoCopy(), kIOExcludeListBundleID ) == 0) { boolean_t updated = updateExcludeList(theInfoDict); if (!updated) { /* 25322874 */ @@ -3908,9 +4319,8 @@ OSKext::createExcludeListFromBooterData( } break; } - } // while ( (deviceTreeName = ...) ) + } // while ( (deviceTreeName = ...) ) - OSSafeReleaseNULL(parsedXML); return; } @@ -3922,11 +4332,11 @@ OSKext::createExcludeListFromBooterData( void OSKext::createExcludeListFromPrelinkInfo( OSArray * theInfoArray ) { - OSDictionary * myInfoDict = NULL;// do not release - OSString * myBundleID; // do not release + OSDictionary * myInfoDict = NULL; // do not release + OSString * myBundleID; // do not release u_int i; - /* Find com.apple.driver.KextExcludeList. */ + /* Find the Apple Kext Exclude List. */ for (i = 0; i < theInfoArray->getCount(); i++) { myInfoDict = OSDynamicCast(OSDictionary, theInfoArray->getObject(i)); if (!myInfoDict) { @@ -3936,7 +4346,7 @@ OSKext::createExcludeListFromPrelinkInfo( OSArray * theInfoArray ) OSDynamicCast(OSString, myInfoDict->getObject(kCFBundleIdentifierKey)); if (myBundleID && - strcmp( myBundleID->getCStringNoCopy(), "com.apple.driver.KextExcludeList" ) == 0) { + strcmp( myBundleID->getCStringNoCopy(), kIOExcludeListBundleID ) == 0) { boolean_t updated = updateExcludeList(myInfoDict); if (!updated) { /* 25322874 */ @@ -3944,7 +4354,7 @@ OSKext::createExcludeListFromPrelinkInfo( OSArray * theInfoArray ) } break; } - } // for (i = 0; i < theInfoArray->getCount()... + } // for (i = 0; i < theInfoArray->getCount()... return; } @@ -3953,8 +4363,8 @@ OSKext::createExcludeListFromPrelinkInfo( OSArray * theInfoArray ) boolean_t OSKext::updateExcludeList(OSDictionary *infoDict) { - OSDictionary *myTempDict = NULL; // do not free - OSString *myTempString = NULL;// do not free + OSDictionary *myTempDict = NULL; // do not free + OSString *myTempString = NULL; // do not free OSKextVersion newVersion = 0; boolean_t updated = false; @@ -3980,7 +4390,6 @@ OSKext::updateExcludeList(OSDictionary *infoDict) IORecursiveLockLock(sKextLock); if (newVersion > sExcludeListVersion) { - OSSafeReleaseNULL(sExcludeListByID); sExcludeListByID = OSDictionary::withDictionary(myTempDict, 0); sExcludeListVersion = newVersion; updated = true; @@ -3998,7 +4407,7 @@ OSKext::updateExcludeList(OSDictionary *infoDict) const OSSymbol * OSKext::getIdentifier(void) { - return bundleID; + return bundleID.get(); } /********************************************************************* @@ -4064,18 +4473,19 @@ OSData * OSKext::getExecutable(void) { OSData * result = NULL; - OSData * extractedExecutable = NULL; // must release - OSData * mkextExecutableRef = NULL;// do not release + OSSharedPtr extractedExecutable; if (flags.builtin) { - return sKernelKext->linkedExecutable; + return sKernelKext->linkedExecutable.get(); } result = OSDynamicCast(OSData, infoDict->getObject(_kOSKextExecutableKey)); if (result) { - goto finish; + return result; } +#if CONFIG_KXLD + OSData * mkextExecutableRef = NULL; // do not release mkextExecutableRef = OSDynamicCast(OSData, getPropertyForHostArch(_kOSKextMkextExecutableReferenceKey)); @@ -4106,19 +4516,17 @@ OSKext::getExecutable(void) infoDict->removeObject(_kOSKextExecutableExternalDataKey); if (extractedExecutable && extractedExecutable->getLength()) { - if (!setExecutable(extractedExecutable)) { + if (!setExecutable(extractedExecutable.get())) { goto finish; } - result = extractedExecutable; + result = extractedExecutable.get(); } else { goto finish; } } finish: - - OSSafeReleaseNULL(extractedExecutable); - +#endif // CONFIG_KXLD return result; } @@ -4170,13 +4578,18 @@ bool OSKext::isLoadableInSafeBoot(void) { bool result = false; - OSString * required = NULL; // do not release + OSString * required = NULL; // do not release if (isKernel()) { result = true; goto finish; } + if (isDriverKit()) { + result = true; + goto finish; + } + required = OSDynamicCast(OSString, getPropertyForHostArch(kOSBundleRequiredKey)); if (!required) { @@ -4254,7 +4667,7 @@ OSKext::getSizeInfo(uint32_t *loadSize, uint32_t *wiredSize) * from that. Otherwise it's the full load size. */ if (kmod_info) { - *wiredSize = *loadSize - kmod_info->hdr_size; + *wiredSize = *loadSize - (uint32_t)kmod_info->hdr_size; } else { *wiredSize = *loadSize; } @@ -4266,11 +4679,11 @@ OSKext::getSizeInfo(uint32_t *loadSize, uint32_t *wiredSize) /********************************************************************* *********************************************************************/ -OSData * +OSSharedPtr OSKext::copyUUID(void) { - OSData * result = NULL; - OSData * theExecutable = NULL;// do not release + OSSharedPtr result; + OSData * theExecutable = NULL; // do not release const kernel_mach_header_t * header; /* An interface kext doesn't have a linked executable with an LC_UUID, @@ -4278,7 +4691,6 @@ OSKext::copyUUID(void) */ if (interfaceUUID) { result = interfaceUUID; - result->retain(); goto finish; } @@ -4287,18 +4699,13 @@ OSKext::copyUUID(void) } if (isDriverKit() && infoDict) { - if (driverKitUUID) { - driverKitUUID->retain(); - return driverKitUUID; - } else { - return NULL; - } + return driverKitUUID; } /* For real kexts, try to get the UUID from the linked executable, * or if is hasn't been linked yet, the unrelocated executable. */ - theExecutable = linkedExecutable; + theExecutable = linkedExecutable.get(); if (!theExecutable) { theExecutable = getExecutable(); } @@ -4316,7 +4723,7 @@ finish: /********************************************************************* *********************************************************************/ -OSData * +OSSharedPtr OSKext::copyTextUUID(void) { if (flags.builtin) { @@ -4327,10 +4734,10 @@ OSKext::copyTextUUID(void) /********************************************************************* *********************************************************************/ -OSData * +OSSharedPtr OSKext::copyMachoUUID(const kernel_mach_header_t * header) { - OSData * result = NULL; + OSSharedPtr result; const struct load_command * load_cmd = NULL; const struct uuid_command * uuid_cmd = NULL; uint32_t i; @@ -4400,16 +4807,18 @@ OSKext::setDriverKitUUID(OSData *uuid) #define ARCH_SEPARATOR_CHAR '_' static char * -makeHostArchKey(const char * key, uint32_t * keySizeOut) +makeHostArchKey(const char * key, size_t * keySizeOut) { char * result = NULL; - uint32_t keyLength = strlen(key); - uint32_t keySize; + size_t keyLength = strlen(key); + size_t keySize; /* Add 1 for the ARCH_SEPARATOR_CHAR, and 1 for the '\0'. */ - keySize = 1 + 1 + strlen(key) + strlen(ARCHNAME); - result = (char *)kalloc_tag(keySize, VM_KERN_MEMORY_OSKEXT); + keySize = 1 + 1 + keyLength + strlen(ARCHNAME); + result = (char *)kheap_alloc_tag(KHEAP_TEMP, keySize, + Z_WAITOK, VM_KERN_MEMORY_OSKEXT); + if (!result) { goto finish; } @@ -4429,7 +4838,7 @@ OSObject * OSKext::getPropertyForHostArch(const char * key) { OSObject * result = NULL;// do not release - uint32_t hostArchKeySize = 0; + size_t hostArchKeySize = 0; char * hostArchKey = NULL;// must kfree if (!key || !infoDict) { @@ -4459,7 +4868,7 @@ OSKext::getPropertyForHostArch(const char * key) finish: if (hostArchKey) { - kfree(hostArchKey, hostArchKeySize); + kheap_free(KHEAP_TEMP, hostArchKey, hostArchKeySize); } return result; } @@ -4491,8 +4900,8 @@ finish: bool OSKext::isInExcludeList(void) { - OSString * versionString = NULL;// do not release - char * versionCString = NULL;// do not free + OSString * versionString = NULL; // do not release + char * versionCString = NULL; // do not free size_t i; boolean_t wantLessThan = false; boolean_t wantLessThanEqualTo = false; @@ -4507,7 +4916,7 @@ OSKext::isInExcludeList(void) /* look up by bundleID in our exclude list and if found get version * string (or strings) that we will not allow to load */ - versionString = OSDynamicCast(OSString, sExcludeListByID->getObject(bundleID)); + versionString = OSDynamicCast(OSString, sExcludeListByID->getObject(bundleID.get())); if (versionString == NULL || versionString->getLength() > (sizeof(myBuffer) - 1)) { isInExcludeList = false; } @@ -4578,6 +4987,52 @@ OSKext::isInExcludeList(void) return false; } +/********************************************************************* +* sNonLoadableKextsByID is a dictionary with keys / values of: +* key = bundleID string of kext we will not allow to load +* value = boolean (true == loadable, false == not loadable) +* +* Only kexts which are in the AuxKC will be marked as "not loadble," +* i.e., the value for the kext's bundleID will be false. All kexts in +* the primary and system KCs will always be marked as "loadable." +* +* This list ultimately comes from kexts which have been uninstalled +* in user space by deleting the kext from disk, but which have not +* yet been removed from the AuxKC. Because the user could choose to +* re-install the exact same version of the kext, we need to keep +* a dictionary of boolean values so that user space only needs to +* keep a simple list of "uninstalled" or "missing" bundles. When +* a bundle is re-installed, the iokit daemon can use the +* AucKCBundleAvailable predicate to set the individual kext's +* availability to true. +*********************************************************************/ +bool +OSKext::isLoadable(void) +{ + bool isLoadable = true; + + if (kc_type != KCKindAuxiliary) { + /* this filtering only applies to kexts in the auxkc */ + return true; + } + + IORecursiveLockLock(sKextLock); + + if (sNonLoadableKextsByID) { + /* look up by bundleID in our exclude list and if found get version + * string (or strings) that we will not allow to load + */ + OSBoolean *loadableVal; + loadableVal = OSDynamicCast(OSBoolean, sNonLoadableKextsByID->getObject(bundleID.get())); + if (loadableVal && !loadableVal->getValue()) { + isLoadable = false; + } + } + IORecursiveLockUnlock(sKextLock); + + return isLoadable; +} + /********************************************************************* *********************************************************************/ /* static */ @@ -4591,20 +5046,45 @@ OSKext::loadKextWithIdentifier( OSArray * personalityNames) { OSReturn result = kOSReturnError; - OSString * kextIdentifier = NULL; // must release + OSSharedPtr kextIdentifier; kextIdentifier = OSString::withCString(kextIdentifierCString); if (!kextIdentifier) { result = kOSKextReturnNoMemory; goto finish; } - result = OSKext::loadKextWithIdentifier(kextIdentifier, + result = OSKext::loadKextWithIdentifier(kextIdentifier.get(), NULL /* kextRef */, allowDeferFlag, delayAutounloadFlag, startOpt, startMatchingOpt, personalityNames); finish: - OSSafeReleaseNULL(kextIdentifier); + return result; +} + +OSReturn +OSKext::loadKextWithIdentifier( + OSString * kextIdentifier, + OSSharedPtr &kextRef, + Boolean allowDeferFlag, + Boolean delayAutounloadFlag, + OSKextExcludeLevel startOpt, + OSKextExcludeLevel startMatchingOpt, + OSArray * personalityNames) +{ + OSObject * kextRefRaw = NULL; + OSReturn result; + + result = loadKextWithIdentifier(kextIdentifier, + &kextRefRaw, + allowDeferFlag, + delayAutounloadFlag, + startOpt, + startMatchingOpt, + personalityNames); + if ((kOSReturnSuccess == result) && kextRefRaw) { + kextRef.reset(kextRefRaw, OSNoRetain); + } return result; } @@ -4622,9 +5102,9 @@ OSKext::loadKextWithIdentifier( { OSReturn result = kOSReturnError; OSReturn pingResult = kOSReturnError; - OSKext * theKext = NULL;// do not release - OSDictionary * loadRequest = NULL;// must release - const OSSymbol * kextIdentifierSymbol = NULL;// must release + OSKext * theKext = NULL; // do not release + OSSharedPtr loadRequest; + OSSharedPtr kextIdentifierSymbol; if (kextRef) { *kextRef = NULL; @@ -4664,23 +5144,23 @@ OSKext::loadKextWithIdentifier( * in sKernelRequests for this bundle identifier */ kextIdentifierSymbol = OSSymbol::withString(kextIdentifier); - if (!sPostedKextLoadIdentifiers->containsObject(kextIdentifierSymbol)) { + if (!sPostedKextLoadIdentifiers->containsObject(kextIdentifierSymbol.get())) { result = _OSKextCreateRequest(kKextRequestPredicateRequestLoad, - &loadRequest); + loadRequest); if (result != kOSReturnSuccess) { goto finish; } - if (!_OSKextSetRequestArgument(loadRequest, + if (!_OSKextSetRequestArgument(loadRequest.get(), kKextRequestArgumentBundleIdentifierKey, kextIdentifier)) { result = kOSKextReturnNoMemory; goto finish; } - if (!sKernelRequests->setObject(loadRequest)) { + if (!sKernelRequests->setObject(loadRequest.get())) { result = kOSKextReturnNoMemory; goto finish; } - if (!sPostedKextLoadIdentifiers->setObject(kextIdentifierSymbol)) { + if (!sPostedKextLoadIdentifiers->setObject(kextIdentifierSymbol.get())) { result = kOSKextReturnNoMemory; goto finish; } @@ -4692,12 +5172,12 @@ OSKext::loadKextWithIdentifier( kextIdentifier->getCStringNoCopy()); } - pingResult = OSKext::pingKextd(); + pingResult = OSKext::pingIOKitDaemon(); if (pingResult == kOSKextReturnDisabled) { OSKextLog(/* kext */ NULL, ((sPrelinkBoot) ? kOSKextLogDebugLevel : kOSKextLogErrorLevel) | kOSKextLogLoadFlag, - "Kext %s might not load - kextd is currently unavailable.", + "Kext %s might not load - " kIOKitDaemonName " is currently unavailable.", kextIdentifier->getCStringNoCopy()); } @@ -4714,8 +5194,10 @@ OSKext::loadKextWithIdentifier( "Failed to load kext %s (error 0x%x).", kextIdentifier->getCStringNoCopy(), (int)result); - OSKext::removeKext(theKext, - /* terminateService/removePersonalities */ true); + if (theKext->kc_type == KCKindUnknown) { + OSKext::removeKext(theKext, + /* terminateService/removePersonalities */ true); + } goto finish; } @@ -4729,85 +5211,264 @@ OSKext::loadKextWithIdentifier( } finish: - OSSafeReleaseNULL(loadRequest); - OSSafeReleaseNULL(kextIdentifierSymbol); - if ((kOSReturnSuccess == result) && kextRef) { - theKext->retain(); - theKext->matchingRefCount++; *kextRef = theKext; + theKext->matchingRefCount++; + theKext->retain(); } IORecursiveLockUnlock(sKextLock); return result; } -/********************************************************************* -*********************************************************************/ -/* static */ -void -OSKext::dropMatchingReferences( - OSSet * kexts) -{ - IORecursiveLockLock(sKextLock); - kexts->iterateObjects(^bool (OSObject * obj) { - OSKext * thisKext = OSDynamicCast(OSKext, obj); - if (!thisKext) { - return false; - } - thisKext->matchingRefCount--; - return false; - }); - IORecursiveLockUnlock(sKextLock); -} /********************************************************************* *********************************************************************/ /* static */ -void -OSKext::recordIdentifierRequest( - OSString * kextIdentifier) +OSReturn +OSKext::loadKextFromKC(OSKext *theKext, OSDictionary *requestDict) { - const OSSymbol * kextIdentifierSymbol = NULL; // must release - bool fail = false; + OSReturn result = kOSReturnError; - if (!sAllKextLoadIdentifiers || !kextIdentifier) { - goto finish; - } + OSBoolean *delayAutounloadBool = NULL; // do not release + OSNumber *startKextExcludeNum = NULL; // do not release + OSNumber *startMatchingExcludeNum = NULL; // do not release + OSArray *personalityNames = NULL; // do not release - kextIdentifierSymbol = OSSymbol::withString(kextIdentifier); - if (!kextIdentifierSymbol) { - // xxx - this is really a basic alloc failure - fail = true; - goto finish; - } + /* + * Default values for these options: + * regular autounload behavior + * start the kext + * send all personalities to the catalog + */ + Boolean delayAutounload = false; + OSKextExcludeLevel startKextExcludeLevel = kOSKextExcludeNone; + OSKextExcludeLevel startMatchingExcludeLevel = kOSKextExcludeNone; IORecursiveLockLock(sKextLock); - if (!sAllKextLoadIdentifiers->containsObject(kextIdentifierSymbol)) { - if (!sAllKextLoadIdentifiers->setObject(kextIdentifierSymbol)) { - fail = true; - } else { - // xxx - need to find a way to associate this whole func w/the kext - OSKextLog(/* kext */ NULL, - // xxx - check level - kOSKextLogStepLevel | - kOSKextLogArchiveFlag, - "Recorded kext %s as a candidate for inclusion in prelinked kernel.", - kextIdentifier->getCStringNoCopy()); - } - } - IORecursiveLockUnlock(sKextLock); -finish: + OSKextLog(/* kext */ NULL, + kOSKextLogDebugLevel | + kOSKextLogIPCFlag, + "Received kext KC load request from user space."); - if (fail) { + /* Regardless of processing, the fact that we have gotten here means some + * user-space program is up and talking to us, so we'll switch our kext + * registration to reflect that. + */ + if (!sUserLoadsActive) { + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogGeneralFlag | kOSKextLogLoadFlag, + "Switching to late startup (user-space) kext loading policy."); + sUserLoadsActive = true; + } + + delayAutounloadBool = OSDynamicCast(OSBoolean, + _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentDelayAutounloadKey)); + startKextExcludeNum = OSDynamicCast(OSNumber, + _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentStartExcludeKey)); + startMatchingExcludeNum = OSDynamicCast(OSNumber, + _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentStartMatchingExcludeKey)); + personalityNames = OSDynamicCast(OSArray, + _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentPersonalityNamesKey)); + + if (delayAutounloadBool) { + delayAutounload = delayAutounloadBool->getValue(); + } + if (startKextExcludeNum) { + startKextExcludeLevel = startKextExcludeNum->unsigned8BitValue(); + } + if (startMatchingExcludeNum) { + startMatchingExcludeLevel = startMatchingExcludeNum->unsigned8BitValue(); + } + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogIPCFlag, + "Received request from user space to load KC kext %s.", + theKext->getIdentifierCString()); + + /* this could be in the Auxiliary KC, so record the load request */ + OSKext::recordIdentifierRequest(OSDynamicCast(OSString, theKext->getIdentifier())); + + /* + * Load the kext + */ + result = theKext->load(startKextExcludeLevel, + startMatchingExcludeLevel, personalityNames); + + if (result != kOSReturnSuccess) { + OSKextLog(theKext, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Failed to load kext %s (error 0x%x).", + theKext->getIdentifierCString(), (int)result); + + OSKext::removeKext(theKext, + /* terminateService/removePersonalities */ true); + goto finish; + } else { + OSKextLog(theKext, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag, + "Kext %s Loaded successfully from %s KC", + theKext->getIdentifierCString(), theKext->getKCTypeString()); + } + + if (delayAutounload) { + OSKextLog(theKext, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, + "Setting delayed autounload for %s.", + theKext->getIdentifierCString()); + theKext->flags.delayAutounload = 1; + } + +finish: + IORecursiveLockUnlock(sKextLock); + + return result; +} + +/********************************************************************* +*********************************************************************/ +/* static */ +OSReturn +OSKext::loadCodelessKext(OSString *kextIdentifier, OSDictionary *requestDict) +{ + OSReturn result = kOSReturnError; + OSDictionary *anInfoDict = NULL; // do not release + + anInfoDict = OSDynamicCast(OSDictionary, + _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentCodelessInfoKey)); + if (anInfoDict == NULL) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag | kOSKextLogLoadFlag, + "Missing 'Codeless Kext Info' dictionary in codeless kext load request of %s.", + kextIdentifier->getCStringNoCopy()); + return kOSKextReturnInvalidArgument; + } + + IORecursiveLockLock(sKextLock); + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogIPCFlag, + "Received request from user space to load codeless kext %s.", + kextIdentifier->getCStringNoCopy()); + + { + // instantiate a new kext, and don't hold a reference + // (the kext subsystem will hold one implicitly) + OSSharedPtr newKext = OSKext::withCodelessInfo(anInfoDict); + if (!newKext) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag | kOSKextLogLoadFlag, + "Could not instantiate codeless kext."); + result = kOSKextReturnNotLoadable; + goto finish; + } + if (!kextIdentifier->isEqualTo(newKext->getIdentifierCString())) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag | kOSKextLogLoadFlag, + "Codeless kext identifiers don't match '%s' != '%s'", + kextIdentifier->getCStringNoCopy(), newKext->getIdentifierCString()); + + OSKext::removeKext(newKext.get(), false); + result = kOSKextReturnInvalidArgument; + goto finish; + } + + /* Record the request for the codeless kext */ + OSKext::recordIdentifierRequest(OSDynamicCast(OSString, newKext->getIdentifier())); + + result = kOSReturnSuccess; + /* send the kext's personalities to the IOCatalog */ + if (!newKext->flags.requireExplicitLoad) { + result = newKext->sendPersonalitiesToCatalog(true, NULL); + } + } + +finish: + IORecursiveLockUnlock(sKextLock); + + return result; +} + +/********************************************************************* +*********************************************************************/ +/* static */ +void +OSKext::dropMatchingReferences( + OSSet * kexts) +{ + IORecursiveLockLock(sKextLock); + kexts->iterateObjects(^bool (OSObject * obj) { + OSKext * thisKext = OSDynamicCast(OSKext, obj); + if (!thisKext) { + return false; + } + thisKext->matchingRefCount--; + return false; + }); + IORecursiveLockUnlock(sKextLock); +} + +/********************************************************************* +*********************************************************************/ +/* static */ +void +OSKext::recordIdentifierRequest( + OSString * kextIdentifier) +{ + OSSharedPtr kextIdentifierSymbol; + bool fail = false; + + if (!sAllKextLoadIdentifiers || !kextIdentifier) { + goto finish; + } + + kextIdentifierSymbol = OSSymbol::withString(kextIdentifier); + if (!kextIdentifierSymbol) { + // xxx - this is really a basic alloc failure + fail = true; + goto finish; + } + + IORecursiveLockLock(sKextLock); + if (!sAllKextLoadIdentifiers->containsObject(kextIdentifierSymbol.get())) { + if (!sAllKextLoadIdentifiers->setObject(kextIdentifierSymbol.get())) { + fail = true; + } else { + // xxx - need to find a way to associate this whole func w/the kext + OSKextLog(/* kext */ NULL, + // xxx - check level + kOSKextLogStepLevel | + kOSKextLogArchiveFlag, + "Recorded kext %s as a candidate for inclusion in prelinked kernel.", + kextIdentifier->getCStringNoCopy()); + } + } + IORecursiveLockUnlock(sKextLock); + +finish: + + if (fail) { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, "Failed to record kext %s as a candidate for inclusion in prelinked kernel.", kextIdentifier->getCStringNoCopy()); } - OSSafeReleaseNULL(kextIdentifierSymbol); return; } @@ -4820,12 +5481,11 @@ OSKext::load( OSArray * personalityNames) { OSReturn result = kOSReturnError; - kern_return_t kxldResult; OSKextExcludeLevel dependenciesStartOpt = startOpt; OSKextExcludeLevel dependenciesStartMatchingOpt = startMatchingOpt; unsigned int i, count; Boolean alreadyLoaded = false; - OSKext * lastLoadedKext = NULL; + OSKext * lastLoadedKext = NULL; // do not release if (isInExcludeList()) { OSKextLog(this, @@ -4837,6 +5497,16 @@ OSKext::load( result = kOSKextReturnNotLoadable; goto finish; } + if (!isLoadable()) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogGeneralFlag | + kOSKextLogLoadFlag, + "Kext %s is not loadable", + getIdentifierCString()); + + result = kOSKextReturnNotLoadable; + goto finish; + } if (isLoaded()) { alreadyLoaded = true; @@ -4850,8 +5520,22 @@ OSKext::load( goto loaded; } -#if CONFIG_MACF +#if CONFIG_MACF && XNU_TARGET_OS_OSX +#if CONFIG_KXLD if (current_task() != kernel_task) { +#else + /* + * On non-kxld systems, only check the mac-hook for kexts in the + * Pageable and Aux KCs. This means on Apple silicon devices that + * the mac hook will only be useful to block 3rd party kexts. + * + * Note that this should _not_ be called on kexts loaded from the + * kernel bootstrap thread as the kernel proc's cred struct is not + * yet initialized! This won't happen on macOS because all the kexts + * in the BootKC are self-contained and their kc_type = KCKindPrimary. + */ + if (kc_type != KCKindPrimary && kc_type != KCKindUnknown) { +#endif /* CONFIG_KXLD */ int macCheckResult = 0; kauth_cred_t cred = NULL; @@ -4901,8 +5585,8 @@ OSKext::load( * AppleKextExcludeList. Detect that special kext by bundle identifier and * load its metadata into the global data structures, if appropriate */ - if (strcmp(getIdentifierCString(), "com.apple.driver.KextExcludeList") == 0) { - boolean_t updated = updateExcludeList(infoDict); + if (strcmp(getIdentifierCString(), kIOExcludeListBundleID) == 0) { + boolean_t updated = updateExcludeList(infoDict.get()); if (updated) { OSKextLog(this, kOSKextLogDebugLevel | kOSKextLogLoadFlag, @@ -4937,7 +5621,21 @@ OSKext::load( "Loading kext %s.", getIdentifierCString()); +#if !VM_MAPPED_KEXTS + if (isPrelinked() == false) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Can't load kext %s - not in a kext collection.", + getIdentifierCString()); + result = kOSKextReturnDisabled; + goto finish; + } +#endif /* defined(__x86_64__) */ + +#if CONFIG_KXLD if (!sKxldContext) { + kern_return_t kxldResult; kxldResult = kxld_create_context(&sKxldContext, &kern_allocate, &kxld_log_callback, /* Flags */ (KXLDFlags) 0, /* cputype */ 0, /* cpusubtype */ 0, /* page size */ 0); @@ -4951,6 +5649,7 @@ OSKext::load( goto finish; } } +#endif // CONFIG_KXLD /* We only need to resolve dependencies once for the whole graph, but * resolveDependencies will just return if there's no work to do, so it's @@ -5137,11 +5836,12 @@ finish: getIdentifierCString()); queueKextNotification(kKextRequestPredicateLoadNotification, - OSDynamicCast(OSString, bundleID)); + OSDynamicCast(OSString, bundleID.get())); } return result; } +#if CONFIG_KXLD /********************************************************************* * *********************************************************************/ @@ -5156,7 +5856,8 @@ strdup(const char * string) } size = 1 + strlen(string); - result = (char *)kalloc_tag(size, VM_KERN_MEMORY_OSKEXT); + result = (char *)kheap_alloc_tag(KHEAP_DATA_BUFFERS, size, + Z_WAITOK, VM_KERN_MEMORY_OSKEXT); if (!result) { goto finish; } @@ -5166,6 +5867,7 @@ strdup(const char * string) finish: return result; } +#endif // CONFIG_KXLD /********************************************************************* * @@ -5207,7 +5909,7 @@ out: *********************************************************************/ OSReturn -OSKext::slidePrelinkedExecutable(bool doCoalesedSlides) +OSKext::slidePrelinkedExecutable(bool doCoalescedSlides) { OSReturn result = kOSKextReturnBadData; kernel_mach_header_t * mh = NULL; @@ -5233,6 +5935,12 @@ OSKext::slidePrelinkedExecutable(bool doCoalesedSlides) } mh = (kernel_mach_header_t *)linkedExecutable->getBytesNoCopy(); + if (kernel_mach_header_is_in_fileset(mh)) { + // kexts in filesets are slid as part of collection sliding + result = kOSReturnSuccess; + goto finish; + } + segmentSplitInfo = (struct linkedit_data_command *) getcommandfromheader(mh, LC_SEGMENT_SPLIT_INFO); for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) { @@ -5274,7 +5982,7 @@ OSKext::slidePrelinkedExecutable(bool doCoalesedSlides) symtab = (struct symtab_command *) getcommandfromheader(mh, LC_SYMTAB); - if (symtab != NULL && doCoalesedSlides == false) { + if (symtab != NULL && doCoalescedSlides == false) { /* Some pseudo-kexts have symbol tables without segments. * Ignore them. */ if (symtab->nsyms > 0 && haveLinkeditBase) { @@ -5297,7 +6005,7 @@ OSKext::slidePrelinkedExecutable(bool doCoalesedSlides) } } - if (dysymtab != NULL && doCoalesedSlides == false) { + if (dysymtab != NULL && doCoalescedSlides == false) { if (dysymtab->nextrel > 0) { OSKextLog(this, kOSKextLogErrorLevel | kOSKextLogLoadFlag | @@ -5367,12 +6075,20 @@ OSKext::slidePrelinkedExecutable(bool doCoalesedSlides) * For now, we do not free LINKEDIT for kexts with split segments. */ new_kextsize = round_page(kmod_info->size - reloc_size); + if (new_kextsize > UINT_MAX) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogLoadFlag | + kOSKextLogLinkFlag, + "Kext %s: new kext size is too large.", + getIdentifierCString()); + goto finish; + } if (((kmod_info->size - new_kextsize) > PAGE_SIZE) && (!segmentSplitInfo)) { vm_offset_t endofkext = kmod_info->address + kmod_info->size; vm_offset_t new_endofkext = kmod_info->address + new_kextsize; vm_offset_t endofrelocInfo = (vm_offset_t) (((uint8_t *)reloc) + reloc_size); - int bytes_remaining = endofkext - endofrelocInfo; - OSData * new_osdata = NULL; + size_t bytes_remaining = endofkext - endofrelocInfo; + OSSharedPtr new_osdata; /* fix up symbol offsets if they are after the dsymtab local relocs */ if (symtab) { @@ -5398,7 +6114,7 @@ OSKext::slidePrelinkedExecutable(bool doCoalesedSlides) linkeditSeg->vmsize = round_page(linkeditSeg->vmsize - reloc_size); linkeditSeg->filesize = linkeditSeg->vmsize; - new_osdata = OSData::withBytesNoCopy((void *)kmod_info->address, new_kextsize); + new_osdata = OSData::withBytesNoCopy((void *)kmod_info->address, (unsigned int)new_kextsize); if (new_osdata) { /* Fix up kmod info and linkedExecutable. */ @@ -5409,8 +6125,7 @@ OSKext::slidePrelinkedExecutable(bool doCoalesedSlides) new_osdata->setDeallocFunction(osdata_phys_free); #endif linkedExecutable->setDeallocFunction(NULL); - linkedExecutable->release(); - linkedExecutable = new_osdata; + linkedExecutable = os::move(new_osdata); #if VM_MAPPED_KEXTS kext_free(new_endofkext, (endofkext - new_endofkext)); @@ -5436,19 +6151,22 @@ OSReturn OSKext::loadExecutable() { OSReturn result = kOSReturnError; - kern_return_t kxldResult; - KXLDDependency * kxlddeps = NULL;// must kfree - uint32_t num_kxlddeps = 0; - OSArray * linkDependencies = NULL;// must release - uint32_t numDirectDependencies = 0; + OSSharedPtr linkDependencies; uint32_t num_kmod_refs = 0; - struct mach_header ** kxldHeaderPtr = NULL;// do not free - struct mach_header * kxld_header = NULL;// xxx - need to free here? - OSData * theExecutable = NULL;// do not release - OSString * versString = NULL;// do not release - const char * versCString = NULL;// do not free - const char * string = NULL;// do not free + OSData * theExecutable = NULL; // do not release + OSString * versString = NULL; // do not release + const char * versCString = NULL; // do not free + const char * string = NULL; // do not free + +#if CONFIG_KXLD unsigned int i; + uint32_t numDirectDependencies = 0; + kern_return_t kxldResult; + KXLDDependency * kxlddeps = NULL; // must kfree + uint32_t num_kxlddeps = 0; + struct mach_header ** kxldHeaderPtr = NULL; // do not free + struct mach_header * kxld_header = NULL; // xxx - need to free here? +#endif // CONFIG_KXLD /* We need the version string for a variety of bits below. */ @@ -5485,12 +6203,24 @@ OSKext::loadExecutable() } } +#if defined(__x86_64__) || defined(__i386__) + if (flags.resetSegmentsFromVnode) { + /* Fixup the chains and slide the mach headers */ + kernel_mach_header_t *mh = (kernel_mach_header_t *)kmod_info->address; + + if (i386_slide_individual_kext(mh, PE_get_kc_slide(kc_type)) != KERN_SUCCESS) { + result = kOSKextReturnValidation; + goto finish; + } + } +#endif //(__x86_64__) || defined(__i386__) + if (isPrelinked()) { goto register_kmod; } /* all callers must be entitled */ - if (FALSE == IOTaskHasEntitlement(current_task(), kOSKextManagementEntitlement)) { + if (FALSE == IOTaskHasEntitlement(current_task(), kOSKextCollectionManagementEntitlement)) { OSKextLog(this, kOSKextLogErrorLevel | kOSKextLogLoadFlag, "Not entitled to link kext '%s'", @@ -5514,19 +6244,20 @@ OSKext::loadExecutable() } if (isInterface()) { - OSData *executableCopy = OSData::withData(theExecutable); - setLinkedExecutable(executableCopy); - executableCopy->release(); + OSSharedPtr executableCopy = OSData::withData(theExecutable); + if (executableCopy) { + setLinkedExecutable(executableCopy.get()); + } goto register_kmod; } +#if CONFIG_KXLD numDirectDependencies = getNumDependencies(); if (flags.hasBleedthrough) { linkDependencies = dependencies; - linkDependencies->retain(); } else { - linkDependencies = OSArray::withArray(dependencies); + linkDependencies = OSArray::withArray(dependencies.get()); if (!linkDependencies) { OSKextLog(this, kOSKextLogErrorLevel | @@ -5539,7 +6270,7 @@ OSKext::loadExecutable() for (i = 0; i < numDirectDependencies; ++i) { OSKext * dependencyKext = OSDynamicCast(OSKext, dependencies->getObject(i)); - dependencyKext->addBleedthroughDependencies(linkDependencies); + dependencyKext->addBleedthroughDependencies(linkDependencies.get()); } } @@ -5568,17 +6299,17 @@ OSKext::loadExecutable() OSKext * dependency = OSDynamicCast(OSKext, linkDependencies->getObject(i)); if (dependency->isInterface()) { - OSKext *interfaceTargetKext = NULL; - OSData * interfaceTarget = NULL; + OSKext *interfaceTargetKext = NULL; //do not release + OSData * interfaceTarget = NULL; //do not release if (dependency->isKernelComponent()) { interfaceTargetKext = sKernelKext; - interfaceTarget = sKernelKext->linkedExecutable; + interfaceTarget = sKernelKext->linkedExecutable.get(); } else { interfaceTargetKext = OSDynamicCast(OSKext, dependency->dependencies->getObject(0)); - interfaceTarget = interfaceTargetKext->linkedExecutable; + interfaceTarget = interfaceTargetKext->linkedExecutable.get(); } if (!interfaceTarget) { @@ -5594,8 +6325,13 @@ OSKext::loadExecutable() kxlddeps[i].kext_size = interfaceTarget->getLength(); kxlddeps[i].kext_name = strdup(interfaceTargetKext->getIdentifierCString()); - kxlddeps[i].interface = (u_char *) dependency->linkedExecutable->getBytesNoCopy(); - kxlddeps[i].interface_size = dependency->linkedExecutable->getLength(); + if (dependency->linkedExecutable != NULL) { + kxlddeps[i].interface = (u_char *) dependency->linkedExecutable->getBytesNoCopy(); + kxlddeps[i].interface_size = dependency->linkedExecutable->getLength(); + } else { + kxlddeps[i].interface = (u_char *) NULL; + kxlddeps[i].interface_size = 0; + } kxlddeps[i].interface_name = strdup(dependency->getIdentifierCString()); } else { kxlddeps[i].kext = (u_char *) dependency->linkedExecutable->getBytesNoCopy(); @@ -5652,6 +6388,14 @@ OSKext::loadExecutable() flush_dcache(kmod_info->address, kmod_info->size, false); invalidate_icache(kmod_info->address, kmod_info->size, false); #endif + +#else // !CONFIG_KXLD + OSKextLog(this, kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Refusing to link non-prelinked kext: %s (no kxld support)", getIdentifierCString()); + result = kOSKextReturnLinkError; + goto finish; +#endif // CONFIG_KXLD + register_kmod: if (isInterface()) { @@ -5677,7 +6421,7 @@ register_kmod: } kmod_info->id = loadTag = sNextLoadTag++; - kmod_info->reference_count = 0; // KMOD_DECL... sets it to -1 (invalid). + kmod_info->reference_count = 0; // KMOD_DECL... sets it to -1 (invalid). /* Stamp the bundle ID and version from the OSKext over anything * resident inside the kmod_info. @@ -5712,6 +6456,36 @@ register_kmod: } } + if (kmod_info->hdr_size > UINT32_MAX) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, +#if __LP64__ + "Kext %s header size is too large (%lu > UINT32_MAX).", +#else + "Kext %s header size is too large (%u > UINT32_MAX).", +#endif + kmod_info->name, + kmod_info->hdr_size); + result = KERN_FAILURE; + goto finish; + } + + if (kmod_info->size > UINT32_MAX) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, +#if __LP64__ + "Kext %s size is too large (%lu > UINT32_MAX).", +#else + "Kext %s size is too large (%u > UINT32_MAX).", +#endif + kmod_info->name, + kmod_info->size); + result = KERN_FAILURE; + goto finish; + } + if (!isInterface() && linkedExecutable) { OSKextLog(this, kOSKextLogProgressLevel | @@ -5723,10 +6497,13 @@ register_kmod: (unsigned)kmod_info->id); } - /* if prelinked, VM protections are already set */ - result = setVMAttributes(!isPrelinked(), true); - if (result != KERN_SUCCESS) { - goto finish; + /* VM protections and wiring for the Aux KC are done at collection loading time */ + if (kc_type != KCKindAuxiliary || flags.resetSegmentsFromVnode) { + /* if prelinked and primary KC, VM protections are already set */ + result = setVMAttributes(!isPrelinked() || flags.resetSegmentsFromVnode, true); + if (result != KERN_SUCCESS) { + goto finish; + } } #if KASAN @@ -5749,8 +6526,8 @@ register_kmod: result = kOSReturnSuccess; finish: - OSSafeReleaseNULL(linkDependencies); +#if CONFIG_KXLD /* Clear up locally allocated dependency info. */ for (i = 0; i < num_kxlddeps; ++i) { @@ -5758,16 +6535,17 @@ finish: if (kxlddeps[i].kext_name) { size = 1 + strlen(kxlddeps[i].kext_name); - kfree(kxlddeps[i].kext_name, size); + kheap_free(KHEAP_DATA_BUFFERS, kxlddeps[i].kext_name, size); } if (kxlddeps[i].interface_name) { size = 1 + strlen(kxlddeps[i].interface_name); - kfree(kxlddeps[i].interface_name, size); + kheap_free(KHEAP_DATA_BUFFERS, kxlddeps[i].interface_name, size); } } if (kxlddeps) { kfree(kxlddeps, (num_kxlddeps * sizeof(*kxlddeps))); } +#endif // CONFIG_KXLD /* We no longer need the unrelocated executable (which the linker * has altered anyhow). @@ -5787,17 +6565,40 @@ finish: } if (isInterface()) { kfree(kmod_info, sizeof(kmod_info_t)); + kmod_info = NULL; } - kmod_info = NULL; - if (linkedExecutable) { - linkedExecutable->release(); - linkedExecutable = NULL; + if (kc_type == KCKindUnknown) { + kmod_info = NULL; + if (linkedExecutable) { + linkedExecutable.reset(); + } } } return result; } +#if VM_MAPPED_KEXTS +/* static */ +void +OSKext::jettisonFileSetLinkeditSegment(kernel_mach_header_t *mh) +{ + kernel_segment_command_t *linkeditseg = NULL; + + linkeditseg = getsegbynamefromheader(mh, SEG_LINKEDIT); + assert(linkeditseg != NULL); + + /* BootKC on x86_64 is not vm mapped */ + ml_static_mfree(linkeditseg->vmaddr, linkeditseg->vmsize); + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogGeneralFlag, + "Jettisoning fileset Linkedit segments from vmaddr %llx with size %llu", + linkeditseg->vmaddr, linkeditseg->vmsize); +} +#endif /* VM_MAPPED_KEXTS */ + /********************************************************************* * The linkedit segment is used by the kext linker for dependency * resolution, and by dtrace for probe initialization. We can free it @@ -5811,7 +6612,11 @@ OSKext::jettisonLinkeditSegment(void) kernel_segment_command_t * linkedit = NULL; vm_offset_t start; vm_size_t linkeditsize, kextsize; - OSData * data = NULL; + OSSharedPtr data; + + if (isInFileset()) { + return; + } #if NO_KEXTD /* We can free symbol tables for all embedded kexts because we don't @@ -5844,8 +6649,11 @@ OSKext::jettisonLinkeditSegment(void) kextsize = kmod_info->size - linkeditsize; start = linkedit->vmaddr; - data = OSData::withBytesNoCopy((void *)kmod_info->address, kextsize); - if (!data) { + if (kextsize > UINT_MAX) { + goto finish; + } + data = OSData::withBytesNoCopy((void *)kmod_info->address, (unsigned int)kextsize); + if (!data) { goto finish; } @@ -5859,8 +6667,7 @@ OSKext::jettisonLinkeditSegment(void) data->setDeallocFunction(osdata_phys_free); #endif linkedExecutable->setDeallocFunction(NULL); - linkedExecutable->release(); - linkedExecutable = data; + linkedExecutable = os::move(data); flags.jettisonLinkeditSeg = 1; /* Free the linkedit segment. @@ -5894,6 +6701,10 @@ OSKext::jettisonDATASegmentPadding(void) } mh = (kernel_mach_header_t *)kmod_info->address; + if (isInFileset()) { + return; + } + dataSeg = getsegbynamefromheader(mh, SEG_DATA); if (dataSeg == NULL) { return; @@ -5943,8 +6754,7 @@ OSKext::setLinkedExecutable(OSData * anExecutable) "that already has one (%s).\n", getIdentifierCString()); } - linkedExecutable = anExecutable; - linkedExecutable->retain(); + linkedExecutable.reset(anExecutable, OSRetain); return; } @@ -5963,7 +6773,7 @@ OSKext::registerKextsWithDTrace(void) IORecursiveLockLock(sKextLock); for (i = 0; i < count; i++) { - OSKext * thisKext = NULL;// do not release + OSKext * thisKext = NULL; // do not release thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); if (!thisKext || !thisKext->isExecutable()) { @@ -5994,6 +6804,18 @@ OSKext::registerWithDTrace(void) if (!flags.dtraceInitialized && (dtrace_modload != NULL)) { uint32_t modflag = 0; OSObject * forceInit = getPropertyForHostArch("OSBundleForceDTraceInit"); + +#if VM_MAPPED_KEXTS + if (!sKeepSymbols && kc_type == KCKindPrimary) { + if (forceInit == kOSBooleanTrue) { + /* Make sure the kext is not from the Boot KC */ + panic("OSBundleForceDTraceInit key specified for the Boot KC kext : %s", getIdentifierCString()); + } else { + /* Linkedit segment of the Boot KC is gone, make sure fbt_provide_module don't use kernel symbols */ + modflag |= KMOD_DTRACE_NO_KERNEL_SYMS; + } + } +#endif /* VM_MAPPED_KEXTS */ if (forceInit == kOSBooleanTrue) { modflag |= KMOD_DTRACE_FORCE_INIT; } @@ -6031,19 +6853,21 @@ OSKext::unregisterWithDTrace(void) #if defined(__arm__) || defined(__arm64__) static inline kern_return_t OSKext_protect( + kernel_mach_header_t *kext_mh, vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_prot_t new_prot, - boolean_t set_max) + boolean_t set_max, + kc_kind_t kc_type) { -#pragma unused(map) - assert(map == kernel_map); // we can handle KEXTs arising from the PRELINK segment and no others +#pragma unused(kext_mh,map,kc_type) + assert(map == kernel_map); // we can handle KEXTs arising from the PRELINK segment and no others assert(start <= end); if (start >= end) { - return KERN_SUCCESS; // Punt segments of length zero (e.g., headers) or less (i.e., blunders) + return KERN_SUCCESS; // Punt segments of length zero (e.g., headers) or less (i.e., blunders) } else if (set_max) { - return KERN_SUCCESS; // Punt set_max, as there's no mechanism to record that state + return KERN_SUCCESS; // Punt set_max, as there's no mechanism to record that state } else { return ml_static_protect(start, end - start, new_prot); } @@ -6051,14 +6875,16 @@ OSKext_protect( static inline kern_return_t OSKext_wire( + kernel_mach_header_t *kext_mh, vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_prot_t access_type, - boolean_t user_wire) + boolean_t user_wire, + kc_kind_t kc_type) { -#pragma unused(map,start,end,access_type,user_wire) - return KERN_SUCCESS; // No-op as PRELINK kexts are cemented into physical memory at boot +#pragma unused(kext_mh,map,start,end,access_type,user_wire,kc_type) + return KERN_SUCCESS; // No-op as PRELINK kexts are cemented into physical memory at boot } #else #error Unrecognized architecture @@ -6066,26 +6892,41 @@ OSKext_wire( #else static inline kern_return_t OSKext_protect( + kernel_mach_header_t *kext_mh, vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_prot_t new_prot, - boolean_t set_max) + boolean_t set_max, + kc_kind_t kc_type) { - if (start == end) { // 10538581 + if (start == end) { // 10538581 return KERN_SUCCESS; } + if (kernel_mach_header_is_in_fileset(kext_mh) && kc_type == KCKindPrimary) { + /* + * XXX: This will probably need to be different for AuxKC and + * pageableKC! + */ + return ml_static_protect(start, end - start, new_prot); + } return vm_map_protect(map, start, end, new_prot, set_max); } static inline kern_return_t OSKext_wire( + kernel_mach_header_t *kext_mh, vm_map_t map, vm_map_offset_t start, vm_map_offset_t end, vm_prot_t access_type, - boolean_t user_wire) + boolean_t user_wire, + kc_kind_t kc_type) { + if (kernel_mach_header_is_in_fileset(kext_mh) && kc_type == KCKindPrimary) { + /* TODO: we may need to hook this for the pageableKC */ + return KERN_SUCCESS; + } return vm_map_wire_kernel(map, start, end, access_type, VM_KERN_MEMORY_KEXT, user_wire); } #endif @@ -6095,8 +6936,10 @@ OSKext::setVMAttributes(bool protect, bool wire) { vm_map_t kext_map = NULL; kernel_segment_command_t * seg = NULL; - vm_map_offset_t start = 0; - vm_map_offset_t end = 0; + vm_map_offset_t start_protect = 0; + vm_map_offset_t start_wire = 0; + vm_map_offset_t end_protect = 0; + vm_map_offset_t end_wire = 0; OSReturn result = kOSReturnError; if (isInterface() || !declaresExecutable() || flags.builtin) { @@ -6123,11 +6966,18 @@ OSKext::setVMAttributes(bool protect, bool wire) result = KERN_SUCCESS; goto finish; } + + if (isInFileset() && kc_type != KCKindPageable) { + // kexts in filesets have protections setup as part of collection loading + result = KERN_SUCCESS; + goto finish; + } #endif /* Protect the headers as read-only; they do not need to be wired */ - result = (protect) ? OSKext_protect(kext_map, kmod_info->address, - kmod_info->address + kmod_info->hdr_size, VM_PROT_READ, TRUE) + result = (protect) ? OSKext_protect((kernel_mach_header_t *)kmod_info->address, + kext_map, kmod_info->address, + kmod_info->address + kmod_info->hdr_size, VM_PROT_READ, TRUE, kc_type) : KERN_SUCCESS; if (result != KERN_SUCCESS) { goto finish; @@ -6142,11 +6992,35 @@ OSKext::setVMAttributes(bool protect, bool wire) assert((seg->vmsize & PAGE_MASK) == 0); #endif - start = round_page(seg->vmaddr); - end = trunc_page(seg->vmaddr + seg->vmsize); + /* + * For the non page aligned segments, the range calculation for protection + * and wiring differ as follows: + * + * Protection: The non page aligned data at the start or at the end of the + * segment is excluded from the protection. This exclusion is needed to make + * sure OSKext_protect is not called twice on same page, if the page is shared + * between two segments. + * + * Wiring: The non page aligned data at the start or at the end of the + * segment is included in the wiring range, this inclusion is needed to make sure + * all the data of the segment is wired. + */ + start_protect = round_page(seg->vmaddr); + end_protect = trunc_page(seg->vmaddr + seg->vmsize); + + start_wire = trunc_page(seg->vmaddr); + end_wire = round_page(seg->vmaddr + seg->vmsize); - if (protect) { - result = OSKext_protect(kext_map, start, end, seg->maxprot, TRUE); + /* + * Linkedit and Linkinfo for the Pageable KC and the Aux KC are shared + * across kexts and data from kexts is not page aligned + */ + if (protect && (end_protect > start_protect) && + ((strncmp(seg->segname, SEG_LINKEDIT, sizeof(seg->segname)) != 0 && + strncmp(seg->segname, SEG_LINKINFO, sizeof(seg->segname)) != 0) || + (kc_type != KCKindPageable && kc_type != KCKindAuxiliary))) { + result = OSKext_protect((kernel_mach_header_t *)kmod_info->address, + kext_map, start_protect, end_protect, seg->maxprot, TRUE, kc_type); if (result != KERN_SUCCESS) { OSKextLog(this, kOSKextLogErrorLevel | @@ -6157,7 +7031,8 @@ OSKext::setVMAttributes(bool protect, bool wire) goto finish; } - result = OSKext_protect(kext_map, start, end, seg->initprot, FALSE); + result = OSKext_protect((kernel_mach_header_t *)kmod_info->address, + kext_map, start_protect, end_protect, seg->initprot, FALSE, kc_type); if (result != KERN_SUCCESS) { OSKextLog(this, kOSKextLogErrorLevel | @@ -6170,7 +7045,8 @@ OSKext::setVMAttributes(bool protect, bool wire) } if (segmentShouldBeWired(seg) && wire) { - result = OSKext_wire(kext_map, start, end, seg->initprot, FALSE); + result = OSKext_wire((kernel_mach_header_t *)kmod_info->address, + kext_map, start_wire, end_wire, seg->initprot, FALSE, kc_type); if (result != KERN_SUCCESS) { goto finish; } @@ -6188,7 +7064,8 @@ finish: boolean_t OSKext::segmentShouldBeWired(kernel_segment_command_t *seg) { - return sKeepSymbols || strncmp(seg->segname, SEG_LINKEDIT, sizeof(seg->segname)); + return sKeepSymbols || (strncmp(seg->segname, SEG_LINKEDIT, sizeof(seg->segname)) && + strncmp(seg->segname, SEG_LINKINFO, sizeof(seg->segname))); } /********************************************************************* @@ -6204,8 +7081,11 @@ OSKext::validateKextMapping(bool startFlag) mach_vm_address_t address = 0; mach_vm_size_t size = 0; uint32_t depth = 0; + uint64_t kext_segbase = 0; + uint64_t kext_segsize = 0; mach_msg_type_number_t count; vm_region_submap_short_info_data_64_t info; + uintptr_t kext_slide = PE_get_kc_slide(kc_type); if (flags.builtin) { return kOSReturnSuccess; @@ -6247,10 +7127,16 @@ OSKext::validateKextMapping(bool startFlag) kext_map = kext_get_vm_map(kmod_info); depth = (kernel_map == kext_map) ? 1 : 2; + if (isInFileset()) { +#if defined(HAS_APPLE_PAC) + address = (mach_vm_address_t)ptrauth_auth_data((void*)address, ptrauth_key_function_pointer, 0); +#endif /* defined(HAS_APPLE_PAC) */ + } /* Verify that the start/stop function lies within the kext's address range. */ - if (getcommandfromheader((kernel_mach_header_t *)kmod_info->address, LC_SEGMENT_SPLIT_INFO)) { + if (getcommandfromheader((kernel_mach_header_t *)kmod_info->address, LC_SEGMENT_SPLIT_INFO) || + isInFileset()) { /* This will likely be how we deal with split kexts; walk the segments to * check that the function lies inside one of the segments of this kext. */ @@ -6258,6 +7144,8 @@ OSKext::validateKextMapping(bool startFlag) seg != NULL; seg = nextsegfromheader((kernel_mach_header_t *)kmod_info->address, seg)) { if ((address >= seg->vmaddr) && address < (seg->vmaddr + seg->vmsize)) { + kext_segbase = seg->vmaddr; + kext_segsize = seg->vmsize; break; } } @@ -6271,8 +7159,8 @@ OSKext::validateKextMapping(bool startFlag) getIdentifierCString(), whichOp, whichOp, - (void *)ml_static_unslide(address), - (void *)ml_static_unslide(kmod_info->address)); + (void *)(((uintptr_t)address) - kext_slide), + (void *)(((uintptr_t)kmod_info->address) - kext_slide)); result = kOSKextReturnBadData; goto finish; } @@ -6289,9 +7177,9 @@ OSKext::validateKextMapping(bool startFlag) getIdentifierCString(), whichOp, whichOp, - (void *)ml_static_unslide(address), - (void *)ml_static_unslide(kmod_info->address), - (void *)(ml_static_unslide(kmod_info->address) + kmod_info->size)); + (void *)(((uintptr_t)address) - kext_slide), + (void *)(((uintptr_t)kmod_info->address) - kext_slide), + (void *)((((uintptr_t)kmod_info->address) - kext_slide) + kmod_info->size)); result = kOSKextReturnBadData; goto finish; } @@ -6302,23 +7190,44 @@ OSKext::validateKextMapping(bool startFlag) * we'll likely have panicked well before any attempt to stop the kext. */ if (startFlag) { - /* Verify that the start/stop function is executable. - */ - kern_result = mach_vm_region_recurse(kernel_map, &address, &size, &depth, - (vm_region_recurse_info_t)&info, &count); - if (kern_result != KERN_SUCCESS) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s - bad %s pointer %p.", - getIdentifierCString(), - whichOp, (void *)ml_static_unslide(address)); - result = kOSKextReturnBadData; - goto finish; + if (!isInFileset() || kc_type != KCKindPrimary) { + /* + * Verify that the start/stop function is executable. + */ + kern_result = mach_vm_region_recurse(kernel_map, &address, &size, &depth, + (vm_region_recurse_info_t)&info, &count); + if (kern_result != KERN_SUCCESS) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s - bad %s pointer %p.", + getIdentifierCString(), + whichOp, (void *)ml_static_unslide(address)); + result = kOSKextReturnBadData; + goto finish; + } + } else { + /* + * Since kexts loaded from the primary KC are held in memory + * allocated by efiboot, we cannot use mach_vm_region_recurse() to + * discover that memory's protection flags. Instead, we need to + * get that information from the kernel pmap itself. Above, we + * (potentially) saved the size of the segment in which the address + * in question was located. If we have a non-zero size, verify + * that all pages in the (address, address + kext_segsize) range + * are marked executable. If we somehow did not record the size + * (or the base) just verify the single page that includes the address. + */ + if (kext_segbase == 0 || kext_segsize == 0) { + kext_segbase = address & ~(uint64_t)PAGE_MASK; + kext_segsize = PAGE_SIZE; + } } #if VM_MAPPED_KEXTS - if (!(info.protection & VM_PROT_EXECUTE)) { + if (((!isInFileset() || kc_type != KCKindPrimary) && !(info.protection & VM_PROT_EXECUTE)) || + ((isInFileset() && kc_type == KCKindPrimary) && + ml_static_verify_page_protections(kext_segbase, kext_segsize, VM_PROT_EXECUTE) != KERN_SUCCESS)) { OSKextLog(this, kOSKextLogErrorLevel | kOSKextLogLoadFlag, @@ -6355,6 +7264,10 @@ OSKext::verifySegmentMapping(kernel_segment_command_t *seg) { mach_vm_address_t address = 0; + if (seg->vmsize > UINT32_MAX) { + return false; + } + if (!segmentShouldBeWired(seg)) { return true; } @@ -6386,7 +7299,7 @@ OSKextLogKextInfo(OSKext *aKext, uint64_t address, uint64_t size, firehose_trace struct firehose_trace_uuid_info_s uuid_info_s; firehose_trace_uuid_info_t uuid_info = &uuid_info_s; size_t uuid_info_len = sizeof(struct firehose_trace_uuid_info_s); - OSData *uuid_data; + OSSharedPtr uuid_data; stamp = firehose_tracepoint_time(firehose_activity_flags_default); trace_id.ftid_value = FIREHOSE_TRACE_ID_MAKE(firehose_tracepoint_namespace_metadata, _firehose_tracepoint_type_metadata_kext, (firehose_tracepoint_flags_t)0, code); @@ -6394,7 +7307,6 @@ OSKextLogKextInfo(OSKext *aKext, uint64_t address, uint64_t size, firehose_trace uuid_data = aKext->copyTextUUID(); if (uuid_data) { memcpy(uuid_info->ftui_uuid, uuid_data->getBytesNoCopy(), sizeof(uuid_info->ftui_uuid)); - OSSafeReleaseNULL(uuid_data); } uuid_info->ftui_size = size; @@ -6487,7 +7399,7 @@ OSKext::start(bool startDependenciesFlag) "Not starting %s - dependency %s not started yet.", getIdentifierCString(), dependency->getIdentifierCString()); - result = kOSKextReturnStartStopError; // xxx - make new return? + result = kOSKextReturnStartStopError; // xxx - make new return? goto finish; } } @@ -6527,7 +7439,7 @@ OSKext::start(bool startDependenciesFlag) /* result not actually used */ kOSKextReturnStartStopError, /* invokeFlag */ false); OSKextLog(this, - kOSKextLogProgressLevel | + kOSKextLogWarningLevel | kOSKextLogLoadFlag, "Kext %s did not start (return code 0x%x).", getIdentifierCString(), result); @@ -6546,14 +7458,14 @@ OSKext::canUnloadKextWithIdentifier( bool checkClassesFlag) { bool result = false; - OSKext * aKext = NULL;// do not release + OSKext * aKext = NULL; // do not release IORecursiveLockLock(sKextLock); aKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); if (!aKext) { - goto finish; // can't unload what's not loaded + goto finish; // can't unload what's not loaded } if (aKext->isLoaded()) { @@ -6678,6 +7590,7 @@ OSKext::unload(void) unsigned int index; uint32_t num_kmod_refs = 0; OSKextAccount * freeAccount; + bool in_fileset = false; if (!sUnloadEnabled) { OSKextLog(this, @@ -6690,6 +7603,10 @@ OSKext::unload(void) goto finish; } + // cache this result so we don't need to access the kmod_info after + // it's been potentially free'd + in_fileset = isInFileset(); + /* Refuse to unload if we have clients or instances. It is up to * the caller to make sure those aren't true. */ @@ -6723,7 +7640,7 @@ OSKext::unload(void) goto finish; } - if (metaClasses && !OSMetaClass::removeClasses(metaClasses)) { + if (metaClasses && !OSMetaClass::removeClasses(metaClasses.get())) { OSKextLog(this, kOSKextLogErrorLevel | kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, @@ -6810,7 +7727,7 @@ OSKext::unload(void) sLoadedKexts->getObject(index - 1)); nextKext->kmod_info->next = gapKext->kmod_info; - } else { /* index == 0 */ + } else { /* index == 0 */ nextKext->kmod_info->next = NULL; } } @@ -6819,7 +7736,7 @@ OSKext::unload(void) if (lastKext && !lastKext->isKernel()) { kmod = lastKext->kmod_info; } else { - kmod = NULL; // clear the global kmod variable + kmod = NULL; // clear the global kmod variable } } @@ -6864,7 +7781,7 @@ OSKext::unload(void) #endif #if VM_MAPPED_KEXTS - if (!isInterface()) { + if (!isInterface() && (!in_fileset || flags.resetSegmentsFromVnode)) { kernel_segment_command_t *seg = NULL; vm_map_t kext_map = kext_get_vm_map(kmod_info); @@ -6887,8 +7804,11 @@ OSKext::unload(void) seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address); while (seg) { if (segmentShouldBeWired(seg)) { - result = vm_map_unwire(kext_map, seg->vmaddr, - seg->vmaddr + seg->vmsize, FALSE); + vm_map_offset_t start_wire = trunc_page(seg->vmaddr); + vm_map_offset_t end_wire = round_page(seg->vmaddr + seg->vmsize); + + result = vm_map_unwire(kext_map, start_wire, + end_wire, FALSE); if (result != KERN_SUCCESS) { OSKextLog(this, kOSKextLogErrorLevel | @@ -6902,9 +7822,30 @@ OSKext::unload(void) seg = nextsegfromheader((kernel_mach_header_t *) kmod_info->address, seg); } +#if defined(__x86_64__) || defined(__i386__) + if (in_fileset && flags.resetSegmentsFromVnode) { + IORecursiveLockLock(sKextLock); + resetKCFileSetSegments(); + IORecursiveLockUnlock(sKextLock); + } +#endif // (__x86_64__) || defined(__i386__) + } +#endif /* VM_MAPPED_KEXTS */ + if (flags.resetSegmentsFromImmutableCopy) { + result = resetMutableSegments(); + if (result != kOSReturnSuccess) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Failed to reset kext %s.", + getIdentifierCString()); + result = kOSKextReturnInternalError; + goto finish; + } + } + if (kc_type == KCKindUnknown) { + linkedExecutable.reset(); } -#endif - OSSafeReleaseNULL(linkedExecutable); } /* An interface kext has a fake kmod_info that was allocated, @@ -6912,9 +7853,12 @@ OSKext::unload(void) */ if (isInterface()) { kfree(kmod_info, sizeof(kmod_info_t)); + kmod_info = NULL; } - kmod_info = NULL; + if (!in_fileset) { + kmod_info = NULL; + } flags.loaded = false; flushDependencies(); @@ -6925,10 +7869,10 @@ OSKext::unload(void) * kernel cache. 9055303 */ if (isPrelinked()) { - if (!_OSKextInUnloadedPrelinkedKexts(bundleID)) { + if (!_OSKextInUnloadedPrelinkedKexts(bundleID.get())) { IORecursiveLockLock(sKextLock); if (sUnloadedPrelinkedKexts) { - sUnloadedPrelinkedKexts->setObject(bundleID); + sUnloadedPrelinkedKexts->setObject(bundleID.get()); } IORecursiveLockUnlock(sKextLock); } @@ -6939,7 +7883,7 @@ OSKext::unload(void) "Kext %s unloaded.", getIdentifierCString()); queueKextNotification(kKextRequestPredicateUnloadNotification, - OSDynamicCast(OSString, bundleID)); + OSDynamicCast(OSString, bundleID.get())); finish: OSKext::saveLoadedKextPanicList(); @@ -6959,7 +7903,7 @@ OSKext::queueKextNotification( OSString * kextIdentifier) { OSReturn result = kOSReturnError; - OSDictionary * loadRequest = NULL;// must release + OSSharedPtr loadRequest; if (!kextIdentifier) { result = kOSKextReturnInvalidArgument; @@ -6969,35 +7913,35 @@ OSKext::queueKextNotification( /* Create a new request unless one is already sitting * in sKernelRequests for this bundle identifier */ - result = _OSKextCreateRequest(notificationName, &loadRequest); + result = _OSKextCreateRequest(notificationName, loadRequest); if (result != kOSReturnSuccess) { goto finish; } - if (!_OSKextSetRequestArgument(loadRequest, + if (!_OSKextSetRequestArgument(loadRequest.get(), kKextRequestArgumentBundleIdentifierKey, kextIdentifier)) { result = kOSKextReturnNoMemory; goto finish; } - if (!sKernelRequests->setObject(loadRequest)) { + if (!sKernelRequests->setObject(loadRequest.get())) { result = kOSKextReturnNoMemory; goto finish; } - /* We might want to only queue the notification if kextd is active, + /* We might want to only queue the notification if the IOKit daemon is active, * but that wouldn't work for embedded. Note that we don't care if * the ping immediately succeeds here so don't do anything with the * result of this call. */ - OSKext::pingKextd(); + OSKext::pingIOKitDaemon(); result = kOSReturnSuccess; finish: - OSSafeReleaseNULL(loadRequest); - return result; } + +#if CONFIG_KXLD /********************************************************************* *********************************************************************/ static void @@ -7079,6 +8023,17 @@ finish: return; } +#else // !CONFIG_KXLD + +/* static */ +void +OSKext::considerDestroyingLinkContext(void) +{ + return; +} + +#endif // CONFIG_KXLD + #if PRAGMA_MARK #pragma mark Autounload #endif @@ -7092,6 +8047,17 @@ OSKext::autounloadKext(OSKext * aKext) { OSReturn result = kOSKextReturnInUse; +#if NO_KEXTD + /* + * Do not unload prelinked kexts on platforms that do not have an + * IOKit daemon as there is no way to reload the kext or restart + * matching. + */ + if (aKext->isPrelinked()) { + goto finish; + } +#endif /* defined(__x86_64__) */ + /* Check for external references to this kext (usu. dependents), * instances of defined classes (or classes derived from them), * outstanding requests. @@ -7233,7 +8199,11 @@ OSKext::considerUnloads(Boolean rescheduleOnlyFlag) } thread_call_cancel(sUnloadCallout); - if (OSKext::getAutounloadEnabled() && !sSystemSleep) { + if (OSKext::getAutounloadEnabled() && !sSystemSleep +#if !NO_KEXTD + && sIOKitDaemonActive +#endif + ) { clock_interval_to_deadline(sConsiderUnloadDelay, 1000 * 1000 * 1000, &when); @@ -7300,6 +8270,8 @@ OSKextSystemSleepOrWake(UInt32 messageType) #if PRAGMA_MARK #pragma mark Prelinked Kernel #endif + +#ifdef CONFIG_KXLD /********************************************************************* * Do not access sConsiderUnloads... variables other than * sConsiderUnloadsExecuted in this function. They are guarded by a @@ -7311,9 +8283,9 @@ OSKext::considerRebuildOfPrelinkedKernel(void) { static bool requestedPrelink = false; OSReturn checkResult = kOSReturnError; - OSDictionary * prelinkRequest = NULL;// must release - OSCollectionIterator * kextIterator = NULL;// must release - const OSSymbol * thisID = NULL;// do not release + OSSharedPtr prelinkRequest; + OSSharedPtr kextIterator; + const OSSymbol * thisID = NULL; // do not release bool doRebuild = false; AbsoluteTime my_abstime; UInt64 my_ns; @@ -7327,7 +8299,7 @@ OSKext::considerRebuildOfPrelinkedKernel(void) /* no direct return from this point */ IORecursiveLockLock(sKextLock); - /* We need to wait for kextd to get up and running with unloads already done + /* We need to wait for the IOKit daemon to get up and running with unloads already done * and any new startup kexts loaded. */ if (!sConsiderUnloadsExecuted || @@ -7372,20 +8344,20 @@ OSKext::considerRebuildOfPrelinkedKernel(void) * We will rebuild if any kext is not marked prelinked AND is not in our * list of prelinked kexts that got unloaded. (see radar 9055303) */ - kextIterator = OSCollectionIterator::withCollection(sKextsByID); + kextIterator = OSCollectionIterator::withCollection(sKextsByID.get()); if (!kextIterator) { goto finish; } while ((thisID = OSDynamicCast(OSSymbol, kextIterator->getNextObject()))) { - OSKext * thisKext;// do not release + OSKext * thisKext; // do not release thisKext = OSDynamicCast(OSKext, sKextsByID->getObject(thisID)); if (!thisKext || thisKext->isPrelinked() || thisKext->isKernel()) { continue; } - if (_OSKextInUnloadedPrelinkedKexts(thisKext->bundleID)) { + if (_OSKextInUnloadedPrelinkedKexts(thisKext->bundleID.get())) { continue; } /* kext is loaded and was not in current kernel cache so let's rebuild @@ -7404,25 +8376,34 @@ OSKext::considerRebuildOfPrelinkedKernel(void) } checkResult = _OSKextCreateRequest(kKextRequestPredicateRequestPrelink, - &prelinkRequest); + prelinkRequest); if (checkResult != kOSReturnSuccess) { goto finish; } - if (!sKernelRequests->setObject(prelinkRequest)) { + if (!sKernelRequests->setObject(prelinkRequest.get())) { goto finish; } - OSKext::pingKextd(); + OSKext::pingIOKitDaemon(); finish: IORecursiveLockUnlock(sKextLock); - OSSafeReleaseNULL(prelinkRequest); - OSSafeReleaseNULL(kextIterator); return; } +#else /* !CONFIG_KXLD */ + +void +OSKext::considerRebuildOfPrelinkedKernel(void) +{ + /* in a non-dynamic kext loading world, there is never a reason to rebuild */ + return; +} + +#endif /* CONFIG_KXLD */ + #if PRAGMA_MARK #pragma mark Dependencies #endif @@ -7433,20 +8414,23 @@ OSKext::resolveDependencies( OSArray * loopStack) { bool result = false; - OSArray * localLoopStack = NULL;// must release + OSSharedPtr localLoopStack; bool addedToLoopStack = false; - OSDictionary * libraries = NULL;// do not release - OSCollectionIterator * libraryIterator = NULL;// must release - OSString * libraryID = NULL;// do not release - OSString * infoString = NULL;// do not release - OSString * readableString = NULL;// do not release - OSKext * libraryKext = NULL;// do not release + OSDictionary * libraries = NULL; // do not release + OSSharedPtr libraryIterator; + OSString * libraryID = NULL; // do not release + OSKext * libraryKext = NULL; // do not release bool hasRawKernelDependency = false; bool hasKernelDependency = false; bool hasKPIDependency = false; bool hasPrivateKPIDependency = false; unsigned int count; +#if CONFIG_KXLD + OSString * infoString = NULL; // do not release + OSString * readableString = NULL; // do not release +#endif // CONFIG_KXLD + /* A kernel component will automatically have this flag set, * and a loaded kext should also have it set (as should all its * loaded dependencies). @@ -7474,8 +8458,8 @@ OSKext::resolveDependencies( "Kext %s resolving dependencies.", getIdentifierCString()); - loopStack = OSArray::withCapacity(6); // any small capacity will do - if (!loopStack) { + localLoopStack = OSArray::withCapacity(6); // any small capacity will do + if (!localLoopStack) { OSKextLog(this, kOSKextLogErrorLevel | kOSKextLogDependenciesFlag, @@ -7483,7 +8467,7 @@ OSKext::resolveDependencies( getIdentifierCString()); goto finish; } - localLoopStack = loopStack; + loopStack = localLoopStack.get(); } if (!loopStack->setObject(this)) { OSKextLog(this, @@ -7750,11 +8734,16 @@ OSKext::resolveDependencies( for (i = 0; i < count; i++) { OSKext * dependencyKext = OSDynamicCast(OSKext, dependencies->getObject(i)); - dependencyKext->addBleedthroughDependencies(dependencies); + dependencyKext->addBleedthroughDependencies(dependencies.get()); } } #endif /* __LP64__ */ +#if CONFIG_KXLD + /* + * If we're not dynamically linking kexts, then we don't need to check + * copyright strings. The linker in user space has already done this. + */ if (hasPrivateKPIDependency) { bool hasApplePrefix = false; bool infoCopyrightIsValid = false; @@ -7787,6 +8776,7 @@ OSKext::resolveDependencies( goto finish; } } +#endif // CONFIG_KXLD result = true; flags.hasAllDependencies = 1; @@ -7814,9 +8804,6 @@ finish: getIdentifierCString()); } - OSSafeReleaseNULL(localLoopStack); - OSSafeReleaseNULL(libraryIterator); - return result; } @@ -7873,7 +8860,7 @@ OSKext::flushDependencies(bool forceFlag) kOSKextLogDependenciesFlag, "Kext %s flushing dependencies.", getIdentifierCString()); - OSSafeReleaseNULL(dependencies); + dependencies.reset(); } if (!isKernelComponent()) { flags.hasAllDependencies = 0; @@ -7900,7 +8887,31 @@ OSKext::getNumDependencies(void) OSArray * OSKext::getDependencies(void) { - return dependencies; + return dependencies.get(); +} + +bool +OSKext::hasDependency(const OSSymbol * depID) +{ + bool result __block; + + if (depID == getIdentifier()) { + return true; + } + if (!dependencies) { + return false; + } + result = false; + dependencies->iterateObjects(^bool (OSObject * obj) { + OSKext * kext; + kext = OSDynamicCast(OSKext, obj); + if (!kext) { + return false; + } + result = (depID == kext->getIdentifier()); + return result; + }); + return result; } #if PRAGMA_MARK @@ -7945,7 +8956,7 @@ OSKext::addClass( } if (!flags.autounloadEnabled) { - const OSMetaClass * metaScan = NULL;// do not release + const OSMetaClass * metaScan = NULL; // do not release for (metaScan = aClass; metaScan; metaScan = metaScan->getSuperClass()) { if (metaScan == OSTypeID(IOService)) { @@ -8032,7 +9043,7 @@ finish: OSSet * OSKext::getMetaClasses(void) { - return metaClasses; + return metaClasses.get(); } /********************************************************************* @@ -8041,14 +9052,14 @@ bool OSKext::hasOSMetaClassInstances(void) { bool result = false; - OSCollectionIterator * classIterator = NULL; // must release - OSMetaClass * checkClass = NULL;// do not release + OSSharedPtr classIterator; + OSMetaClass * checkClass = NULL; // do not release if (!metaClasses) { goto finish; } - classIterator = OSCollectionIterator::withCollection(metaClasses); + classIterator = OSCollectionIterator::withCollection(metaClasses.get()); if (!classIterator) { // xxx - log alloc failure? goto finish; @@ -8061,8 +9072,6 @@ OSKext::hasOSMetaClassInstances(void) } finish: - - OSSafeReleaseNULL(classIterator); return result; } @@ -8074,7 +9083,7 @@ OSKext::reportOSMetaClassInstances( const char * kextIdentifier, OSKextLogSpec msgLogSpec) { - OSKext * theKext = NULL; // must release + OSSharedPtr theKext; theKext = OSKext::lookupKextWithIdentifier(kextIdentifier); if (!theKext) { @@ -8083,7 +9092,6 @@ OSKext::reportOSMetaClassInstances( theKext->reportOSMetaClassInstances(msgLogSpec); finish: - OSSafeReleaseNULL(theKext); return; } @@ -8092,14 +9100,14 @@ finish: void OSKext::reportOSMetaClassInstances(OSKextLogSpec msgLogSpec) { - OSCollectionIterator * classIterator = NULL; // must release - OSMetaClass * checkClass = NULL;// do not release + OSSharedPtr classIterator; + OSMetaClass * checkClass = NULL; // do not release if (!metaClasses) { goto finish; } - classIterator = OSCollectionIterator::withCollection(metaClasses); + classIterator = OSCollectionIterator::withCollection(metaClasses.get()); if (!classIterator) { goto finish; } @@ -8116,13 +9124,82 @@ OSKext::reportOSMetaClassInstances(OSKextLogSpec msgLogSpec) } finish: - OSSafeReleaseNULL(classIterator); return; } #if PRAGMA_MARK #pragma mark User-Space Requests #endif + +static kern_return_t +patchDextLaunchRequests(task_t calling_task, OSArray *requests) +{ + OSReturn result = kOSReturnSuccess; + for (uint32_t requestIndex = 0; requestIndex < requests->getCount(); requestIndex++) { + OSDictionary * request = NULL; //do not release + IOUserServerCheckInToken * token = NULL; //do not release + OSString * requestPredicate = NULL; //do not release + OSSharedPtr portNameNumber; + mach_port_name_t portName = 0; + request = OSDynamicCast(OSDictionary, requests->getObject(requestIndex)); + if (!request) { + OSKextLog(/* kext */ NULL, + kOSKextLogGeneralFlag | kOSKextLogErrorLevel, + "Elements of request should be of type OSDictionary"); + result = kOSKextReturnInternalError; + goto finish; + } + requestPredicate = _OSKextGetRequestPredicate(request); + if (!requestPredicate) { + OSKextLog(/* kext */ NULL, + kOSKextLogGeneralFlag | kOSKextLogErrorLevel, + "Failed to get request predicate"); + result = kOSKextReturnInternalError; + goto finish; + } + // is this a dext launch? + if (requestPredicate->isEqualTo(kKextRequestPredicateRequestDaemonLaunch)) { + token = OSDynamicCast(IOUserServerCheckInToken, _OSKextGetRequestArgument(request, kKextRequestArgumentCheckInToken)); + if (!token) { + OSKextLog(/* kext */ NULL, + kOSKextLogGeneralFlag | kOSKextLogErrorLevel, + "Could not find a IOUserServerCheckInToken in daemon launch request."); + result = kOSKextReturnInternalError; + goto finish; + } + portName = iokit_make_send_right(calling_task, token, IKOT_IOKIT_IDENT); + if (portName == 0 || portName == MACH_PORT_DEAD) { + OSKextLog(/* kext */ NULL, + kOSKextLogGeneralFlag | kOSKextLogErrorLevel, + "Could not create send right for object."); + result = kOSKextReturnInternalError; + goto finish; + } + // Store the mach port name as a OSNumber + portNameNumber = OSNumber::withNumber(portName, CHAR_BIT * sizeof(portName)); + if (!portNameNumber) { + OSKextLog(/* kext */ NULL, + kOSKextLogGeneralFlag | kOSKextLogErrorLevel, + "Could not create OSNumber object."); + result = kOSKextReturnNoMemory; + goto finish; + } + if (!_OSKextSetRequestArgument(request, kKextRequestArgumentCheckInToken, portNameNumber.get())) { + OSKextLog(/* kext */ NULL, + kOSKextLogGeneralFlag | kOSKextLogErrorLevel, + "Could not set OSNumber object as request " kKextRequestArgumentCheckInToken); + result = kOSKextReturnNoMemory; + goto finish; + } + } +finish: + if (result != kOSReturnSuccess) { + break; + } + } + return result; +} + /********************************************************************* * XXX - this function is a big ugly mess *********************************************************************/ @@ -8141,24 +9218,27 @@ OSKext::handleRequest( OSReturn result = kOSReturnError; kern_return_t kmem_result = KERN_FAILURE; - char * response = NULL;// returned by reference + char * response = NULL; // returned by reference uint32_t responseLength = 0; - OSObject * parsedXML = NULL;// must release - OSDictionary * requestDict = NULL;// do not release - OSString * errorString = NULL;// must release + bool taskCanManageAllKCs = false; + bool taskOnlyManagesBootKC = false; - OSObject * responseObject = NULL;// must release + OSSharedPtr parsedXML; + OSDictionary * requestDict = NULL; // do not release + OSSharedPtr errorString; - OSSerialize * serializer = NULL;// must release + OSSharedPtr responseObject; - OSArray * logInfoArray = NULL;// must release + OSSharedPtr serializer; - OSString * predicate = NULL;// do not release - OSString * kextIdentifier = NULL;// do not release - OSArray * kextIdentifiers = NULL;// do not release - OSKext * theKext = NULL;// do not release - OSBoolean * boolArg = NULL;// do not release + OSSharedPtr logInfoArray; + + OSString * predicate = NULL; // do not release + OSString * kextIdentifier = NULL; // do not release + OSArray * kextIdentifiers = NULL; // do not release + OSKext * theKext = NULL; // do not release + OSBoolean * boolArg = NULL; // do not release IORecursiveLockLock(sKextLock); @@ -8183,9 +9263,9 @@ OSKext::handleRequest( result = kOSKextReturnBadData; goto finish; } - parsedXML = OSUnserializeXML((const char *)requestBuffer, &errorString); + parsedXML = OSUnserializeXML((const char *)requestBuffer, errorString); if (parsedXML) { - requestDict = OSDynamicCast(OSDictionary, parsedXML); + requestDict = OSDynamicCast(OSDictionary, parsedXML.get()); } if (!requestDict) { const char * errorCString = "(unknown error)"; @@ -8220,20 +9300,65 @@ OSKext::handleRequest( "Received '%s' request from user space.", predicate->getCStringNoCopy()); + /* + * All management of file sets requires an entitlement + */ result = kOSKextReturnNotPrivileged; - if (hostPriv == HOST_PRIV_NULL) { - /* must be root to use these kext requests */ - if (predicate->isEqualTo(kKextRequestPredicateUnload) || - predicate->isEqualTo(kKextRequestPredicateStart) || - predicate->isEqualTo(kKextRequestPredicateStop) || - predicate->isEqualTo(kKextRequestPredicateGetKernelRequests) || - predicate->isEqualTo(kKextRequestPredicateSendResource)) { + if (predicate->isEqualTo(kKextRequestPredicateUnload) || + predicate->isEqualTo(kKextRequestPredicateStart) || + predicate->isEqualTo(kKextRequestPredicateStop) || + predicate->isEqualTo(kKextRequestPredicateGetKernelRequests) || + predicate->isEqualTo(kKextRequestPredicateSendResource) || + predicate->isEqualTo(kKextRequestPredicateLoadFileSetKC) || + predicate->isEqualTo(kKextRequestPredicateLoadCodeless) || + predicate->isEqualTo(kKextRequestPredicateLoadFromKC) || + predicate->isEqualTo(kKextRequestPredicateMissingAuxKCBundles) || + predicate->isEqualTo(kKextRequestPredicateAuxKCBundleAvailable) || + predicate->isEqualTo(kKextRequestPredicateDaemonReady)) { + if (hostPriv == HOST_PRIV_NULL) { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogIPCFlag, "Access Failure - must be root user."); goto finish; } + taskCanManageAllKCs = IOTaskHasEntitlement(current_task(), kOSKextCollectionManagementEntitlement) == TRUE; + taskOnlyManagesBootKC = IOTaskHasEntitlement(current_task(), kOSKextOnlyBootKCManagementEntitlement) == TRUE; + + if (!taskCanManageAllKCs && !taskOnlyManagesBootKC) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Access Failure - client not entitled to manage file sets."); + goto finish; + } + + /* + * The OnlyBootKC entitlement restricts the + * collection-management entitlement to only managing kexts in + * the BootKC. All other predicates that alter global state or + * add new KCs are disallowed. + */ + if (taskOnlyManagesBootKC && + (predicate->isEqualTo(kKextRequestPredicateGetKernelRequests) || + predicate->isEqualTo(kKextRequestPredicateSendResource) || + predicate->isEqualTo(kKextRequestPredicateLoadFileSetKC) || + predicate->isEqualTo(kKextRequestPredicateLoadCodeless) || + predicate->isEqualTo(kKextRequestPredicateMissingAuxKCBundles) || + predicate->isEqualTo(kKextRequestPredicateAuxKCBundleAvailable) || + predicate->isEqualTo(kKextRequestPredicateDaemonReady))) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Access Failure - client not entitled to manage non-primary KCs"); + goto finish; + } + + /* + * If we get here, then the process either has the full KC + * management entitlement, or it has the BootKC-only + * entitlement and the request is about the BootKC. + */ } /* Get common args in anticipation of use. @@ -8248,6 +9373,18 @@ OSKext::handleRequest( boolArg = OSDynamicCast(OSBoolean, _OSKextGetRequestArgument( requestDict, kKextRequestArgumentValueKey)); + if (taskOnlyManagesBootKC && + theKext && + theKext->isInFileset() && + theKext->kc_type != KCKindPrimary) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Access Failure - client not entitled to manage kext in non-primary KC"); + result = kOSKextReturnNotPrivileged; + goto finish; + } + result = kOSKextReturnInvalidArgument; if (predicate->isEqualTo(kKextRequestPredicateStart)) { @@ -8282,6 +9419,49 @@ OSKext::handleRequest( } else { result = theKext->stop(); } + } else if (predicate->isEqualTo(kKextRequestPredicateMissingAuxKCBundles)) { + result = OSKext::setMissingAuxKCBundles(requestDict); + } else if (predicate->isEqualTo(kKextRequestPredicateAuxKCBundleAvailable)) { + if (!kextIdentifier) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid arguments to AuxKC Bundle Available request."); + } else { + result = OSKext::setAuxKCBundleAvailable(kextIdentifier, requestDict); + } + } else if (predicate->isEqualTo(kKextRequestPredicateLoadFromKC)) { + if (!kextIdentifier) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid arguments to kext load from KC request."); + } else if (!theKext) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Kext %s not found for load from KC request.", + kextIdentifier->getCStringNoCopy()); + result = kOSKextReturnNotFound; + } else if (!theKext->isInFileset()) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Kext %s does not exist in a KC: refusing to load.", + kextIdentifier->getCStringNoCopy()); + result = kOSKextReturnNotLoadable; + } else { + result = OSKext::loadKextFromKC(theKext, requestDict); + } + } else if (predicate->isEqualTo(kKextRequestPredicateLoadCodeless)) { + if (!kextIdentifier) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid arguments to codeless kext load interface (missing identifier)."); + } else { + result = OSKext::loadCodelessKext(kextIdentifier, requestDict); + } } else if (predicate->isEqualTo(kKextRequestPredicateUnload)) { if (!kextIdentifier) { OSKextLog(/* kext */ NULL, @@ -8316,7 +9496,8 @@ OSKext::handleRequest( goto finish; } } else if (predicate->isEqualTo(kKextRequestPredicateGetLoaded) || - predicate->isEqualTo(kKextRequestPredicateGetLoadedByUUID)) { + predicate->isEqualTo(kKextRequestPredicateGetLoadedByUUID) || + predicate->isEqualTo(kKextRequestPredicateGetKextsInCollection)) { OSBoolean * delayAutounloadBool = NULL; OSObject * infoKeysRaw = NULL; OSArray * infoKeys = NULL; @@ -8361,7 +9542,10 @@ OSKext::handleRequest( responseObject = OSKext::copyLoadedKextInfo(kextIdentifiers, infoKeys); } else if (predicate->isEqualTo(kKextRequestPredicateGetLoadedByUUID)) { responseObject = OSKext::copyLoadedKextInfoByUUID(kextIdentifiers, infoKeys); + } else if (predicate->isEqualTo(kKextRequestPredicateGetKextsInCollection)) { + responseObject = OSKext::copyKextCollectionInfo(requestDict, infoKeys); } + if (!responseObject) { result = kOSKextReturnInternalError; } else { @@ -8375,7 +9559,7 @@ OSKext::handleRequest( /* Hand the current sKernelRequests array to the caller * (who must release it), and make a new one. */ - responseObject = sKernelRequests; + responseObject = os::move(sKernelRequests); sKernelRequests = OSArray::withCapacity(0); sPostedKextLoadIdentifiers->flushCollection(); OSKextLog(/* kext */ NULL, @@ -8386,12 +9570,17 @@ OSKext::handleRequest( } else if (predicate->isEqualTo(kKextRequestPredicateGetAllLoadRequests)) { /* Return the set of all requested bundle identifiers */ responseObject = sAllKextLoadIdentifiers; - responseObject->retain(); OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, "Returning load requests."); result = kOSReturnSuccess; + } else if (predicate->isEqualTo(kKextRequestPredicateLoadFileSetKC)) { + printf("KextLog: Loading FileSet KC(s)\n"); + result = OSKext::loadFileSetKexts(requestDict); + } else if (predicate->isEqualTo(kKextRequestPredicateDaemonReady)) { + printf("KextLog: " kIOKitDaemonName " is %s\n", sIOKitDaemonActive ? "active" : "not active"); + result = (sIOKitDaemonActive && !sOSKextWasResetAfterUserspaceReboot) ? kOSReturnSuccess : kIOReturnNotReady; } else { OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | @@ -8423,8 +9612,31 @@ OSKext::handleRequest( result = kOSKextReturnNoMemory; goto finish; } + /* + * Before serializing the kernel requests, patch the dext launch requests so + * that the value for kKextRequestArgumentCheckInToken is a mach port name for the + * IOUserServerCheckInToken kernel object. + */ + if (predicate->isEqualTo(kKextRequestPredicateGetKernelRequests)) { + OSArray * requests = OSDynamicCast(OSArray, responseObject.get()); + task_t calling_task = current_task(); + if (!requests) { + OSKextLog(/* kext */ NULL, + kOSKextLogGeneralFlag | kOSKextLogErrorLevel, + "responseObject should be an OSArray if predicate is " kKextRequestPredicateGetKernelRequests); + result = kOSKextReturnInternalError; + goto finish; + } + result = patchDextLaunchRequests(calling_task, requests); + if (result != kOSReturnSuccess) { + OSKextLog(/* kext */ NULL, + kOSKextLogGeneralFlag | kOSKextLogErrorLevel, + "Failed to patch dext launch requests."); + goto finish; + } + } - if (!responseObject->serialize(serializer)) { + if (!responseObject->serialize(serializer.get())) { OSKextLog(/* kext */ NULL, kOSKextLogGeneralFlag | kOSKextLogErrorLevel, "Failed to serialize response to request from user space."); @@ -8471,47 +9683,338 @@ finish: logInfoArray = OSKext::clearUserSpaceLogFilter(); if (logInfoArray && logInfoOut && logInfoLengthOut) { - (void)OSKext::serializeLogInfo(logInfoArray, + (void)OSKext::serializeLogInfo(logInfoArray.get(), logInfoOut, logInfoLengthOut); } IORecursiveLockUnlock(sKextLock); - OSSafeReleaseNULL(parsedXML); - OSSafeReleaseNULL(errorString); - OSSafeReleaseNULL(responseObject); - OSSafeReleaseNULL(serializer); - OSSafeReleaseNULL(logInfoArray); - return result; } +#if PRAGMA_MARK +#pragma mark Linked Kext Collection Support +#endif -// #include -extern "C" { -uint64_t __llvm_profile_get_size_for_buffer_internal(const char *DataBegin, - const char *DataEnd, - const char *CountersBegin, - const char *CountersEnd, - const char *NamesBegin, - const char *NamesEnd); -int __llvm_profile_write_buffer_internal(char *Buffer, - const char *DataBegin, - const char *DataEnd, - const char *CountersBegin, - const char *CountersEnd, - const char *NamesBegin, - const char *NamesEnd); +static int +__whereIsAddr(vm_offset_t theAddr, unsigned long *segSizes, vm_offset_t *segAddrs, int segCount) +{ + for (int i = 0; i < segCount; i++) { + vm_offset_t segStart = segAddrs[i]; + vm_offset_t segEnd = segStart + (vm_offset_t)segSizes[i]; + + if (theAddr >= segStart && theAddr < segEnd) { + return i; + } + } + return -1; } +static void +__slideOldKaslrOffsets(kernel_mach_header_t *mh, + kernel_segment_command_t *kextTextSeg, + OSData *kaslrOffsets) +{ + static const char *plk_segNames[] = { + "__TEXT", + "__TEXT_EXEC", + "__DATA", + "__DATA_CONST", + "__LINKEDIT", + "__PRELINK_TEXT", + "__PLK_TEXT_EXEC", + "__PRELINK_DATA", + "__PLK_DATA_CONST", + "__PLK_LLVM_COV", + "__PLK_LINKEDIT", + "__PRELINK_INFO" + }; + static const size_t num_plk_seg = (size_t)(sizeof(plk_segNames) / sizeof(plk_segNames[0])); + + unsigned long plk_segSizes[num_plk_seg]; + vm_offset_t plk_segAddrs[num_plk_seg]; + + for (size_t i = 0; i < num_plk_seg; i++) { + plk_segSizes[i] = 0; + plk_segAddrs[i] = (vm_offset_t)getsegdatafromheader(mh, plk_segNames[i], &plk_segSizes[i]); + } + + uint64_t kextTextStart = (uint64_t)kextTextSeg->vmaddr; + + int slidKextAddrCount = 0; + int badSlideAddr = 0; + int badSlideTarget = 0; + + struct kaslrPackedOffsets { + uint32_t count; /* number of offsets */ + uint32_t offsetsArray[]; /* offsets to slide */ + }; + const struct kaslrPackedOffsets *myOffsets = NULL; + myOffsets = (const struct kaslrPackedOffsets *)kaslrOffsets->getBytesNoCopy(); + + for (uint32_t j = 0; j < myOffsets->count; j++) { + uint64_t slideOffset = (uint64_t)myOffsets->offsetsArray[j]; + vm_offset_t *slideAddr = (vm_offset_t *)((uint64_t)kextTextStart + slideOffset); + int slideAddrSegIndex = -1; + int addrToSlideSegIndex = -1; + + slideAddrSegIndex = __whereIsAddr((vm_offset_t)slideAddr, &plk_segSizes[0], &plk_segAddrs[0], num_plk_seg); + if (slideAddrSegIndex >= 0) { + addrToSlideSegIndex = __whereIsAddr(ml_static_slide(*slideAddr), &plk_segSizes[0], &plk_segAddrs[0], num_plk_seg); + if (addrToSlideSegIndex < 0) { + badSlideTarget++; + continue; + } + } else { + badSlideAddr++; + continue; + } -static -void -OSKextPgoMetadataPut(char *pBuffer, - size_t *position, - size_t bufferSize, - uint32_t *num_pairs, - const char *key, + slidKextAddrCount++; + *slideAddr = ml_static_slide(*slideAddr); + } // for ... +} + + + +/******************************************************************** +* addKextsFromKextCollection +* +* Input: MachO header of kext collection. The MachO is assumed to +* have a section named 'info_seg_name,info_sect_name' that +* contains a serialized XML info dictionary. This dictionary +* contains a UUID, possibly a set of relocations (for older +* kxld-built binaries), and an array of kext personalities. +* +********************************************************************/ +bool +OSKext::addKextsFromKextCollection(kernel_mach_header_t *mh, + OSDictionary *infoDict, const char *text_seg_name, + OSData **kcUUID, kc_kind_t type) +{ + bool result = false; + + OSArray *kextArray = NULL; // do not release + OSData *infoDictKCUUID = NULL; // do not release + OSData *kaslrOffsets = NULL; // do not release + + IORegistryEntry *registryRoot = NULL; // do not release + OSSharedPtr kcKextCount; + + /* extract the KC UUID from the dictionary */ + infoDictKCUUID = OSDynamicCast(OSData, infoDict->getObject(kPrelinkInfoKCIDKey)); + if (infoDictKCUUID) { + if (infoDictKCUUID->getLength() != sizeof(uuid_t)) { + panic("kcUUID length is %d, expected %lu", + infoDictKCUUID->getLength(), sizeof(uuid_t)); + } + } + + /* locate the array of kext dictionaries */ + kextArray = OSDynamicCast(OSArray, infoDict->getObject(kPrelinkInfoDictionaryKey)); + if (!kextArray) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "The given KC has no kext info dictionaries"); + goto finish; + } + + /* + * old-style KASLR offsets may be present in the info dictionary. If + * we find them, use them and eventually slide them. + */ + kaslrOffsets = OSDynamicCast(OSData, infoDict->getObject(kPrelinkLinkKASLROffsetsKey)); + + /* + * Before processing any kexts, locate the special kext bundle which + * contains a list of kexts that we are to prevent from loading. + */ + createExcludeListFromPrelinkInfo(kextArray); + + /* + * Create OSKext objects for each kext we find in the array of kext + * info plist dictionaries. + */ + for (int i = 0; i < (int)kextArray->getCount(); ++i) { + OSDictionary *kextDict = NULL; + kextDict = OSDynamicCast(OSDictionary, kextArray->getObject(i)); + if (!kextDict) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, + "Kext info dictionary for kext #%d isn't a dictionary?", i); + continue; + } + + /* + * Create the kext for the entry, then release it, because the + * kext system keeps a reference around until the kext is + * explicitly removed. Any creation/registration failures are + * already logged for us. + */ + withPrelinkedInfoDict(kextDict, (kaslrOffsets ? TRUE : FALSE), type); + } + + /* + * slide old-style kxld relocations + * NOTE: this is still used on embedded KCs built with kcgen + * TODO: Remove this once we use the new kext linker everywhere! + */ + if (kaslrOffsets && vm_kernel_slide > 0) { + kernel_segment_command_t *text_segment = NULL; + text_segment = getsegbynamefromheader(mh, text_seg_name); + if (!text_segment) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "Can't find a TEXT segment named '%s' in macho header", text_seg_name); + goto finish; + } + + __slideOldKaslrOffsets(mh, text_segment, kaslrOffsets); + /* All kexts covered by the old-style kaslr relocation list are now slid, set VM protections for them */ + setAllVMAttributes(); + } + + /* Store the number of prelinked kexts in the registry so we can tell + * when the system has been started from a prelinked kernel. + */ + registryRoot = IORegistryEntry::getRegistryRoot(); + assert(registryRoot); + + kcKextCount = OSNumber::withNumber((unsigned long long)infoDict->getCount(), 8 * sizeof(uint32_t)); + assert(kcKextCount); + if (kcKextCount) { + OSSharedPtr prop = registryRoot->copyProperty(kOSPrelinkKextCountKey); + OSNumber *num; + num = OSDynamicCast(OSNumber, prop.get()); + if (num) { + kcKextCount->addValue(num->unsigned64BitValue()); + } + registryRoot->setProperty(kOSPrelinkKextCountKey, kcKextCount.get()); + } + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogGeneralFlag | kOSKextLogKextBookkeepingFlag | + kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, + "%u prelinked kexts", infoDict->getCount()); + + + if (kcUUID && infoDictKCUUID) { + *kcUUID = OSData::withData(infoDictKCUUID).detach(); + } + + result = true; + +finish: + return result; +} + +bool +OSKext::addKextsFromKextCollection(kernel_mach_header_t *mh, + OSDictionary *infoDict, const char *text_seg_name, + OSSharedPtr &kcUUID, kc_kind_t type) +{ + OSData *result = NULL; + bool success = addKextsFromKextCollection(mh, + infoDict, + text_seg_name, + &result, + type); + if (success) { + kcUUID.reset(result, OSNoRetain); + } + return success; +} + +static OSSharedPtr deferredAuxKCXML; +bool +OSKext::registerDeferredKextCollection(kernel_mach_header_t *mh, + OSSharedPtr &parsedXML, kc_kind_t type) +{ + if (type != KCKindAuxiliary) { + return false; + } + + kernel_mach_header_t *_mh; + _mh = (kernel_mach_header_t*)PE_get_kc_header(type); + if (!_mh || _mh != mh) { + return false; + } + + if (deferredAuxKCXML) { + /* only allow this to be called once */ + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "An Aux KC has already been registered for deferred processing."); + return false; + } + + OSDictionary *infoDict = OSDynamicCast(OSDictionary, parsedXML.get()); + if (!infoDict) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "The Aux KC has info dictionary"); + return false; + } + + OSData *kcUUID = OSDynamicCast(OSData, infoDict->getObject(kPrelinkInfoKCIDKey)); + if (!kcUUID || kcUUID->getLength() != sizeof(uuid_t)) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "The Aux KC has no UUID in %s", kPrelinkInfoKCIDKey); + return false; + } + + /* + * Copy the AuxKC UUID to make sure that the kern.auxiliaryfilesetuuid + * sysctl can return the UUID to user space which will check this + * value for errors. + */ + memcpy((void *)&auxkc_uuid, (const void *)kcUUID->getBytesNoCopy(), + kcUUID->getLength()); + uuid_unparse_upper(auxkc_uuid, auxkc_uuid_string); + auxkc_uuid_valid = TRUE; + + deferredAuxKCXML = parsedXML; + + return true; +} + +OSSharedPtr +OSKext::consumeDeferredKextCollection(kc_kind_t type) +{ + if (type != KCKindAuxiliary || !deferredAuxKCXML) { + return NULL; + } + + return os::move(deferredAuxKCXML); +} + +#if PRAGMA_MARK +#pragma mark Profile-Guided-Optimization Support +#endif + +// #include +extern "C" { +uint64_t __llvm_profile_get_size_for_buffer_internal(const char *DataBegin, + const char *DataEnd, + const char *CountersBegin, + const char *CountersEnd, + const char *NamesBegin, + const char *NamesEnd); +int __llvm_profile_write_buffer_internal(char *Buffer, + const char *DataBegin, + const char *DataEnd, + const char *CountersBegin, + const char *CountersEnd, + const char *NamesBegin, + const char *NamesEnd); +} + + +static +void +OSKextPgoMetadataPut(char *pBuffer, + size_t *position, + size_t bufferSize, + uint32_t *num_pairs, + const char *key, const char *value) { size_t strlen_key = strlen(key); @@ -8563,13 +10066,12 @@ OSKextPgoMetadataPutAll(OSKext *kext, OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, "INSTANCE", instance_uuid_string); - OSData *uuid_data; + OSSharedPtr uuid_data; uuid_t uuid; uuid_string_t uuid_string; uuid_data = kext->copyUUID(); if (uuid_data) { memcpy(uuid, uuid_data->getBytesNoCopy(), sizeof(uuid)); - OSSafeReleaseNULL(uuid_data); uuid_unparse(uuid, uuid_string); OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, "UUID", uuid_string); @@ -8620,6 +10122,7 @@ OSKextGrabPgoDataLocked(OSKext *kext, kernel_section_t *sect_prf_cnts = NULL; uint64_t size; size_t metadata_size = 0; + size_t offset_to_pairs = 0; sect_prf_data = kext->lookupSection("__DATA", "__llvm_prf_data"); sect_prf_name = kext->lookupSection("__DATA", "__llvm_prf_names"); @@ -8669,6 +10172,12 @@ OSKextGrabPgoDataLocked(OSKext *kext, } if (metadata) { + offset_to_pairs = sizeof(struct pgo_metadata_footer) + metadata_size; + if (offset_to_pairs > UINT32_MAX) { + err = E2BIG; + goto out; + } + char *end_of_buffer = pBuffer + size; struct pgo_metadata_footer *footerp = (struct pgo_metadata_footer *) (end_of_buffer - sizeof(struct pgo_metadata_footer)); char *metadata_buffer = end_of_buffer - (sizeof(struct pgo_metadata_footer) + metadata_size); @@ -8683,7 +10192,7 @@ OSKextGrabPgoDataLocked(OSKext *kext, struct pgo_metadata_footer footer; footer.magic = htonl(0x6d657461); footer.number_of_pairs = htonl( num_pairs ); - footer.offset_to_pairs = htonl( sizeof(struct pgo_metadata_footer) + metadata_size ); + footer.offset_to_pairs = htonl((uint32_t)offset_to_pairs ); memcpy(footerp, &footer, sizeof(footer)); } } @@ -8702,7 +10211,7 @@ OSKextGrabPgoData(uuid_t uuid, int metadata) { int err = 0; - OSKext *kext = NULL; + OSSharedPtr kext; IORecursiveLockLock(sKextLock); @@ -8731,8 +10240,7 @@ OSKextGrabPgoData(uuid_t uuid, prev->next = &s.list_head; next->prev = &s.list_head; - kext->release(); - kext = NULL; + kext.reset(); IORecursiveLockSleep(sKextLock, &s, THREAD_ABORTSAFE); @@ -8744,13 +10252,10 @@ OSKextGrabPgoData(uuid_t uuid, err = s.err; } else { - err = OSKextGrabPgoDataLocked(kext, metadata, kext->instance_uuid, pSize, pBuffer, bufferSize); + err = OSKextGrabPgoDataLocked(kext.get(), metadata, kext->instance_uuid, pSize, pBuffer, bufferSize); } out: - if (kext) { - kext->release(); - } IORecursiveLockUnlock(sKextLock); @@ -8787,18 +10292,18 @@ OSKextResetPgoCounters() } } -OSDictionary * +OSSharedPtr OSKext::copyLoadedKextInfoByUUID( OSArray * kextIdentifiers, OSArray * infoKeys) { - OSDictionary * result = NULL; - OSDictionary * kextInfo = NULL; // must release + OSSharedPtr result; + OSSharedPtr kextInfo; uint32_t max_count, i, j; uint32_t idCount = 0; uint32_t idIndex = 0; IORecursiveLockLock(sKextLock); - OSArray *list[2] = {sLoadedKexts, sLoadedDriverKitKexts}; + OSArray *list[2] = {sLoadedKexts.get(), sLoadedDriverKitKexts.get()}; uint32_t count[2] = {sLoadedKexts->getCount(), sLoadedDriverKitKexts->getCount()}; #if CONFIG_MACF @@ -8843,11 +10348,11 @@ OSKext::copyLoadedKextInfoByUUID( for (j = 0; j < (sizeof(list) / sizeof(list[0])); j++) { for (i = 0; i < count[j]; i++) { - OSKext *thisKext = NULL;// do not release + OSKext *thisKext = NULL; // do not release Boolean includeThis = true; uuid_t thisKextUUID; uuid_t thisKextTextUUID; - OSData *uuid_data; + OSSharedPtr uuid_data; uuid_string_t uuid_key; thisKext = OSDynamicCast(OSKext, list[j]->getObject(i)); @@ -8861,7 +10366,6 @@ OSKext::copyLoadedKextInfoByUUID( } memcpy(&thisKextUUID, uuid_data->getBytesNoCopy(), sizeof(thisKextUUID)); - OSSafeReleaseNULL(uuid_data); uuid_unparse(thisKextUUID, uuid_key); @@ -8870,7 +10374,6 @@ OSKext::copyLoadedKextInfoByUUID( continue; } memcpy(&thisKextTextUUID, uuid_data->getBytesNoCopy(), sizeof(thisKextTextUUID)); - OSSafeReleaseNULL(uuid_data); /* Skip current kext if we have a list of UUIDs and * it isn't in the list. @@ -8905,8 +10408,7 @@ OSKext::copyLoadedKextInfoByUUID( kextInfo = thisKext->copyInfo(infoKeys); if (kextInfo) { - result->setObject(uuid_key, kextInfo); - kextInfo->release(); + result->setObject(uuid_key, kextInfo.get()); } if (kextIdentifiers && !kextIdentifiers->getCount()) { @@ -8924,12 +10426,162 @@ finish: /********************************************************************* *********************************************************************/ /* static */ -OSDictionary * +OSSharedPtr +OSKext::copyKextCollectionInfo( + OSDictionary *requestDict, + OSArray *infoKeys) +{ + OSSharedPtr result; + OSString *collectionType = NULL; + OSObject *rawLoadedState = NULL; + OSString *loadedState = NULL; + + kc_kind_t kc_request_kind = KCKindUnknown; + bool onlyLoaded = false; + bool onlyUnloaded = false; + +#if CONFIG_MACF + /* Is the calling process allowed to query kext info? */ + if (current_task() != kernel_task) { + int macCheckResult = 0; + kauth_cred_t cred = NULL; + + cred = kauth_cred_get_with_ref(); + macCheckResult = mac_kext_check_query(cred); + kauth_cred_unref(&cred); + + if (macCheckResult != 0) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Failed to query kext info (MAC policy error 0x%x).", + macCheckResult); + goto finish; + } + } +#endif + + if (infoKeys && !infoKeys->getCount()) { + infoKeys = NULL; + } + + collectionType = OSDynamicCast(OSString, + _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentCollectionTypeKey)); + if (!collectionType) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid '%s' argument to kext collection info request.", + kKextRequestArgumentCollectionTypeKey); + goto finish; + } + if (collectionType->isEqualTo(kKCTypePrimary)) { + kc_request_kind = KCKindPrimary; + } else if (collectionType->isEqualTo(kKCTypeSystem)) { + kc_request_kind = KCKindPageable; + } else if (collectionType->isEqualTo(kKCTypeAuxiliary)) { + kc_request_kind = KCKindAuxiliary; + } else if (collectionType->isEqualTo(kKCTypeCodeless)) { + kc_request_kind = KCKindNone; + } else if (!collectionType->isEqualTo(kKCTypeAny)) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid '%s' argument value '%s' to kext collection info request.", + kKextRequestArgumentCollectionTypeKey, + collectionType->getCStringNoCopy()); + goto finish; + } + + rawLoadedState = _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentLoadedStateKey); + if (rawLoadedState) { + loadedState = OSDynamicCast(OSString, rawLoadedState); + if (!loadedState) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid '%s' argument to kext collection info request.", + kKextRequestArgumentLoadedStateKey); + goto finish; + } + } + if (loadedState) { + if (loadedState->isEqualTo("Loaded")) { + onlyLoaded = true; + } else if (loadedState->isEqualTo("Unloaded")) { + onlyUnloaded = true; + } else if (!loadedState->isEqualTo("Any")) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Invalid '%s' argument value '%s' for '%s' collection info", + kKextRequestArgumentLoadedStateKey, + loadedState->getCStringNoCopy(), + collectionType->getCStringNoCopy()); + goto finish; + } + } + + result = OSDictionary::withCapacity(sKextsByID->getCount()); + if (!result) { + goto finish; + } + + IORecursiveLockLock(sKextLock); + { // start block scope + sKextsByID->iterateObjects(^bool (const OSSymbol *thisKextID, OSObject *obj) + { + OSKext *thisKext = NULL; // do not release + OSSharedPtr kextInfo; + + (void)thisKextID; + + thisKext = OSDynamicCast(OSKext, obj); + if (!thisKext) { + return false;; + } + + /* + * skip the kext if it came from the wrong collection type + * (and the caller requested a specific type) + */ + if ((kc_request_kind != KCKindUnknown) && (thisKext->kc_type != kc_request_kind)) { + return false; + } + + /* + * respect the caller's desire to find only loaded or + * unloaded kexts + */ + if (onlyLoaded && (-1U == sLoadedKexts->getNextIndexOfObject(thisKext, 0))) { + return false; + } + if (onlyUnloaded && (-1U != sLoadedKexts->getNextIndexOfObject(thisKext, 0))) { + return false; + } + + kextInfo = thisKext->copyInfo(infoKeys); + if (kextInfo) { + result->setObject(thisKext->getIdentifier(), kextInfo.get()); + } + return false; + }); + } // end block scope + IORecursiveLockUnlock(sKextLock); + +finish: + return result; +} + +/********************************************************************* +*********************************************************************/ +/* static */ +OSSharedPtr OSKext::copyLoadedKextInfo( OSArray * kextIdentifiers, OSArray * infoKeys) { - OSDictionary * result = NULL; + OSSharedPtr result; uint32_t idCount = 0; bool onlyLoaded; @@ -9013,51 +10665,51 @@ OSKext::copyLoadedKextInfo( "kaslr: vm_slinkedit 0x%lx vm_elinkedit 0x%lx \n", vm_slinkedit, vm_elinkedit); #endif + { // start block scope + sKextsByID->iterateObjects(^bool (const OSSymbol * thisKextID, OSObject * obj) + { + OSKext * thisKext = NULL; // do not release + Boolean includeThis = true; + OSSharedPtr kextInfo; - sKextsByID->iterateObjects(^bool (const OSSymbol * thisKextID, OSObject * obj) - { - OSKext * thisKext = NULL;// do not release - Boolean includeThis = true; - OSDictionary * kextInfo = NULL;// must release - - thisKext = OSDynamicCast(OSKext, obj); - if (!thisKext) { - return false;; - } + thisKext = OSDynamicCast(OSKext, obj); + if (!thisKext) { + return false;; + } - /* Skip current kext if not yet started and caller didn't request all. - */ - if (onlyLoaded && (-1U == sLoadedKexts->getNextIndexOfObject(thisKext, 0))) { - return false;; - } + /* Skip current kext if not yet started and caller didn't request all. + */ + if (onlyLoaded && (-1U == sLoadedKexts->getNextIndexOfObject(thisKext, 0))) { + return false;; + } - /* Skip current kext if we have a list of bundle IDs and - * it isn't in the list. - */ - if (kextIdentifiers) { - includeThis = false; - - for (uint32_t idIndex = 0; idIndex < idCount; idIndex++) { - const OSString * thisRequestID = OSDynamicCast(OSString, - kextIdentifiers->getObject(idIndex)); - if (thisKextID->isEqualTo(thisRequestID)) { - includeThis = true; - break; + /* Skip current kext if we have a list of bundle IDs and + * it isn't in the list. + */ + if (kextIdentifiers) { + includeThis = false; + + for (uint32_t idIndex = 0; idIndex < idCount; idIndex++) { + const OSString * thisRequestID = OSDynamicCast(OSString, + kextIdentifiers->getObject(idIndex)); + if (thisKextID->isEqualTo(thisRequestID)) { + includeThis = true; + break; + } } } - } - if (!includeThis) { - return false; - } + if (!includeThis) { + return false; + } - kextInfo = thisKext->copyInfo(infoKeys); - if (kextInfo) { - result->setObject(thisKext->getIdentifier(), kextInfo); - kextInfo->release(); - } - return false; - }); + kextInfo = thisKext->copyInfo(infoKeys); + if (kextInfo) { + result->setObject(thisKext->getIdentifier(), kextInfo.get()); + } + return false; + }); + } // end block scope finish: IORecursiveLockUnlock(sKextLock); @@ -9072,29 +10724,30 @@ finish: *********************************************************************/ #define _OSKextLoadInfoDictCapacity (12) -OSDictionary * +OSSharedPtr OSKext::copyInfo(OSArray * infoKeys) { - OSDictionary * result = NULL; - bool success = false; - OSData * headerData = NULL;// must release - OSData * logData = NULL;// must release - OSNumber * cpuTypeNumber = NULL;// must release - OSNumber * cpuSubtypeNumber = NULL;// must release - OSString * versionString = NULL;// do not release - uint32_t executablePathCStringSize = 0; - char * executablePathCString = NULL;// must release - OSString * executablePathString = NULL;// must release - OSData * uuid = NULL;// must release - OSNumber * scratchNumber = NULL;// must release - OSArray * dependencyLoadTags = NULL;// must release - OSCollectionIterator * metaClassIterator = NULL;// must release - OSArray * metaClassInfo = NULL;// must release - OSDictionary * metaClassDict = NULL;// must release - OSMetaClass * thisMetaClass = NULL;// do not release - OSString * metaClassName = NULL;// must release - OSString * superclassName = NULL;// must release - uint32_t count, i; + OSSharedPtr result; + bool success = false; + OSSharedPtr headerData; + OSSharedPtr logData; + OSSharedPtr cpuTypeNumber; + OSSharedPtr cpuSubtypeNumber; + OSString * versionString = NULL; // do not release + OSString * bundleType = NULL; // do not release + uint32_t executablePathCStringSize = 0; + char * executablePathCString = NULL; // must kfree + OSSharedPtr executablePathString; + OSSharedPtr uuid; + OSSharedPtr dependencyLoadTags; + OSSharedPtr metaClassIterator; + OSSharedPtr metaClassInfo; + OSSharedPtr metaClassDict; + OSMetaClass * thisMetaClass = NULL; // do not release + OSSharedPtr metaClassName; + OSSharedPtr superclassName; + kc_format_t kcformat; + uint32_t count, i; result = OSDictionary::withCapacity(_OSKextLoadInfoDictCapacity); if (!result) { @@ -9108,6 +10761,10 @@ OSKext::copyInfo(OSArray * infoKeys) infoKeys = NULL; } + if (!PE_get_primary_kc_format(&kcformat)) { + goto finish; + } + /* Headers, CPU type, and CPU subtype. */ if (!infoKeys || @@ -9119,8 +10776,8 @@ OSKext::copyInfo(OSArray * infoKeys) kernel_mach_header_t *kext_mach_hdr = (kernel_mach_header_t *) linkedExecutable->getBytesNoCopy(); -#if !SECURE_KERNEL - // do not return macho header info on shipping iOS - 19095897 +#if !SECURE_KERNEL || XNU_TARGET_OS_OSX + // do not return macho header info on shipping embedded - 19095897 if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleMachOHeadersKey)) { kernel_mach_header_t * temp_kext_mach_hdr; struct load_command * lcp; @@ -9154,6 +10811,18 @@ OSKext::copyInfo(OSArray * infoKeys) } } +#if __arm__ || __arm64__ + // iBoot disregards zero-size segments, just set their addresses to gVirtBase + // and unslide them to avoid vm assertion failures / kernel logging breakage. + if (segp->vmsize == 0 && segp->vmaddr < gVirtBase) { + segp->vmaddr = gVirtBase; + for (secp = firstsect(segp); secp != NULL; secp = nextsect(segp, secp)) { + secp->size = 0; // paranoia :) + secp->addr = gVirtBase; + } + } +#endif + #if 0 OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | @@ -9182,9 +10851,9 @@ OSKext::copyInfo(OSArray * infoKeys) } lcp = (struct load_command *)((caddr_t)lcp + lcp->cmdsize); } - result->setObject(kOSBundleMachOHeadersKey, headerData); + result->setObject(kOSBundleMachOHeadersKey, headerData.get()); } -#endif // SECURE_KERNEL +#endif // !SECURE_KERNEL || XNU_TARGET_OS_OSX if (_OSArrayContainsCString(infoKeys, kOSBundleLogStringsKey)) { osLogDataHeaderRef *header; @@ -9199,9 +10868,9 @@ OSKext::copyInfo(OSArray * infoKeys) bool res; os_log_data = getsectdatafromheader(kext_mach_hdr, "__TEXT", "__os_log", &os_log_size); - os_log_offset = getsectoffsetfromheader(kext_mach_hdr, "__TEXT", "__os_log"); + os_log_offset = (uintptr_t)os_log_data - (uintptr_t)kext_mach_hdr; cstring_data = getsectdatafromheader(kext_mach_hdr, "__TEXT", "__cstring", &cstring_size); - cstring_offset = getsectoffsetfromheader(kext_mach_hdr, "__TEXT", "__cstring"); + cstring_offset = (uintptr_t)cstring_data - (uintptr_t)kext_mach_hdr; header = (osLogDataHeaderRef *) headerBytes; header->version = OS_LOG_HDR_VERSION; @@ -9232,7 +10901,7 @@ OSKext::copyInfo(OSArray * infoKeys) goto finish; } } - result->setObject(kOSBundleLogStringsKey, logData); + result->setObject(kOSBundleLogStringsKey, logData.get()); } if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCPUTypeKey)) { @@ -9242,7 +10911,7 @@ OSKext::copyInfo(OSArray * infoKeys) if (!cpuTypeNumber) { goto finish; } - result->setObject(kOSBundleCPUTypeKey, cpuTypeNumber); + result->setObject(kOSBundleCPUTypeKey, cpuTypeNumber.get()); } if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCPUSubtypeKey)) { @@ -9252,7 +10921,7 @@ OSKext::copyInfo(OSArray * infoKeys) if (!cpuSubtypeNumber) { goto finish; } - result->setObject(kOSBundleCPUSubtypeKey, cpuSubtypeNumber); + result->setObject(kOSBundleCPUSubtypeKey, cpuSubtypeNumber.get()); } } else { if (isDriverKit() && _OSArrayContainsCString(infoKeys, kOSBundleLogStringsKey)) { @@ -9276,14 +10945,21 @@ OSKext::copyInfo(OSArray * infoKeys) if (!res) { goto finish; } - result->setObject(kOSBundleLogStringsKey, logData); + result->setObject(kOSBundleLogStringsKey, logData.get()); } } } /* CFBundleIdentifier. We set this regardless because it's just stupid not to. */ - result->setObject(kCFBundleIdentifierKey, bundleID); + result->setObject(kCFBundleIdentifierKey, bundleID.get()); + + /* CFBundlePackageType + */ + bundleType = infoDict ? OSDynamicCast(OSString, infoDict->getObject(kCFBundlePackageTypeKey)): NULL; + if (bundleType) { + result->setObject(kCFBundlePackageTypeKey, bundleType); + } /* CFBundleVersion. */ @@ -9309,7 +10985,7 @@ OSKext::copyInfo(OSArray * infoKeys) */ if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundlePathKey)) { if (path) { - result->setObject(kOSBundlePathKey, path); + result->setObject(kOSBundlePathKey, path.get()); } } @@ -9318,13 +10994,13 @@ OSKext::copyInfo(OSArray * infoKeys) */ if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleExecutablePathKey)) { if (path && executableRelPath) { - uint32_t pathLength = path->getLength(); // gets incremented below + uint32_t pathLength = path->getLength(); // gets incremented below // +1 for slash, +1 for \0 executablePathCStringSize = pathLength + executableRelPath->getLength() + 2; - executablePathCString = (char *)kalloc_tag((executablePathCStringSize) * - sizeof(char), VM_KERN_MEMORY_OSKEXT); // +1 for \0 + executablePathCString = (char *)kheap_alloc_tag(KHEAP_TEMP, + executablePathCStringSize, Z_WAITOK, VM_KERN_MEMORY_OSKEXT); // +1 for \0 if (!executablePathCString) { goto finish; } @@ -9341,17 +11017,17 @@ OSKext::copyInfo(OSArray * infoKeys) goto finish; } - result->setObject(kOSBundleExecutablePathKey, executablePathString); + result->setObject(kOSBundleExecutablePathKey, executablePathString.get()); } else if (flags.builtin) { - result->setObject(kOSBundleExecutablePathKey, bundleID); + result->setObject(kOSBundleExecutablePathKey, bundleID.get()); } else if (isDriverKit()) { if (path) { // +1 for slash, +1 for \0 uint32_t pathLength = path->getLength(); executablePathCStringSize = pathLength + 2; - executablePathCString = (char *)kalloc_tag((executablePathCStringSize) * - sizeof(char), VM_KERN_MEMORY_OSKEXT); + executablePathCString = (char *)kheap_alloc_tag(KHEAP_TEMP, + executablePathCStringSize, Z_WAITOK, VM_KERN_MEMORY_OSKEXT); if (!executablePathCString) { goto finish; } @@ -9365,7 +11041,7 @@ OSKext::copyInfo(OSArray * infoKeys) goto finish; } - result->setObject(kOSBundleExecutablePathKey, executablePathString); + result->setObject(kOSBundleExecutablePathKey, executablePathString.get()); } } } @@ -9375,33 +11051,78 @@ OSKext::copyInfo(OSArray * infoKeys) if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleUUIDKey)) { uuid = copyUUID(); if (uuid) { - result->setObject(kOSBundleUUIDKey, uuid); - uuid->release(); + result->setObject(kOSBundleUUIDKey, uuid.get()); } } if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleTextUUIDKey)) { uuid = copyTextUUID(); if (uuid) { - result->setObject(kOSBundleTextUUIDKey, uuid); uuid->release(); + result->setObject(kOSBundleTextUUIDKey, uuid.get()); } } - /***** - * OSKernelResource, OSBundleIsInterface, OSBundlePrelinked, OSBundleStarted. + /* + * Info.plist digest */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSKernelResourceKey)) { - result->setObject(kOSKernelResourceKey, - isKernelComponent() ? kOSBooleanTrue : kOSBooleanFalse); + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSKextInfoPlistDigestKey)) { + OSData *digest; + digest = infoDict ? OSDynamicCast(OSData, infoDict->getObject(kOSKextInfoPlistDigestKey)) : NULL; + if (digest) { + result->setObject(kOSKextInfoPlistDigestKey, digest); + } } - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleIsInterfaceKey)) { - result->setObject(kOSBundleIsInterfaceKey, - isInterface() ? kOSBooleanTrue : kOSBooleanFalse); + /* + * Collection type + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSKextBundleCollectionTypeKey)) { + result->setObject(kOSKextBundleCollectionTypeKey, OSString::withCString(getKCTypeString())); } - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundlePrelinkedKey)) { - result->setObject(kOSBundlePrelinkedKey, - isPrelinked() ? kOSBooleanTrue : kOSBooleanFalse); + /* + * Collection availability + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSKextAuxKCAvailabilityKey)) { + result->setObject(kOSKextAuxKCAvailabilityKey, + isLoadable() ? kOSBooleanTrue : kOSBooleanFalse); + } + + /* + * Allows user load + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleAllowUserLoadKey)) { + OSBoolean *allowUserLoad = OSDynamicCast(OSBoolean, getPropertyForHostArch(kOSBundleAllowUserLoadKey)); + if (allowUserLoad) { + result->setObject(kOSBundleAllowUserLoadKey, allowUserLoad); + } + } + + /* + * Bundle Dependencies (OSBundleLibraries) + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleLibrariesKey)) { + OSDictionary *libraries = OSDynamicCast(OSDictionary, getPropertyForHostArch(kOSBundleLibrariesKey)); + if (libraries) { + result->setObject(kOSBundleLibrariesKey, libraries); + } + } + + /***** + * OSKernelResource, OSBundleIsInterface, OSBundlePrelinked, OSBundleStarted. + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSKernelResourceKey)) { + result->setObject(kOSKernelResourceKey, + isKernelComponent() ? kOSBooleanTrue : kOSBooleanFalse); + } + + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleIsInterfaceKey)) { + result->setObject(kOSBundleIsInterfaceKey, + isInterface() ? kOSBooleanTrue : kOSBooleanFalse); + } + + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundlePrelinkedKey)) { + result->setObject(kOSBundlePrelinkedKey, + isPrelinked() ? kOSBooleanTrue : kOSBooleanFalse); } if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleStartedKey)) { @@ -9412,13 +11133,12 @@ OSKext::copyInfo(OSArray * infoKeys) /* LoadTag (Index). */ if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleLoadTagKey)) { - scratchNumber = OSNumber::withNumber((unsigned long long)loadTag, + OSSharedPtr scratchNumber = OSNumber::withNumber((unsigned long long)loadTag, /* numBits */ 8 * sizeof(loadTag)); if (!scratchNumber) { goto finish; } - result->setObject(kOSBundleLoadTagKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); + result->setObject(kOSBundleLoadTagKey, scratchNumber.get()); } /* LoadAddress, LoadSize. @@ -9453,7 +11173,7 @@ OSKext::copyInfo(OSArray * infoKeys) if (flags.builtin) { loadAddress = kmod_info->address; - loadSize = kmod_info->size; + loadSize = (uint32_t)kmod_info->size; } else { loadAddress = (uint64_t)linkedExecutable->getBytesNoCopy(); loadSize = linkedExecutable->getLength(); @@ -9467,7 +11187,7 @@ OSKext::copyInfo(OSArray * infoKeys) for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) { if (seg->initprot & VM_PROT_EXECUTE) { execLoadAddress = ml_static_unslide(seg->vmaddr); - execLoadSize = seg->vmsize; + execLoadSize = (uint32_t)seg->vmsize; break; } } @@ -9476,7 +11196,7 @@ OSKext::copyInfo(OSArray * infoKeys) * from that. Otherwise it's the full load size. */ if (kmod_info) { - wiredSize = loadSize - kmod_info->hdr_size; + wiredSize = loadSize - (uint32_t)kmod_info->hdr_size; } else { wiredSize = loadSize; } @@ -9492,71 +11212,71 @@ OSKext::copyInfo(OSArray * infoKeys) } if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleLoadAddressKey)) { - scratchNumber = OSNumber::withNumber( + OSSharedPtr scratchNumber = OSNumber::withNumber( (unsigned long long)(loadAddress), /* numBits */ 8 * sizeof(loadAddress)); if (!scratchNumber) { goto finish; } - result->setObject(kOSBundleLoadAddressKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); + result->setObject(kOSBundleLoadAddressKey, scratchNumber.get()); } -#if CONFIG_EMBEDDED - if ((!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCacheLoadAddressKey)) - && loadAddress && loadSize) { - scratchNumber = OSNumber::withNumber( - (unsigned long long)ml_static_unslide((uintptr_t)segLOWESTTEXT), - /* numBits */ 8 * sizeof(loadAddress)); - if (!scratchNumber) { - goto finish; + if (kcformat == KCFormatStatic || kcformat == KCFormatKCGEN) { + if ((!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCacheLoadAddressKey)) + && loadAddress && loadSize) { + void *baseAddress = PE_get_kc_baseaddress(KCKindPrimary); + if (!baseAddress) { + goto finish; + } + + OSSharedPtr scratchNumber = OSNumber::withNumber( + (unsigned long long)ml_static_unslide((vm_offset_t)baseAddress), + /* numBits */ 8 * sizeof(loadAddress)); + if (!scratchNumber) { + goto finish; + } + result->setObject(kOSBundleCacheLoadAddressKey, scratchNumber.get()); + } + if ((!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleKextsInKernelTextKey)) + && (this == sKernelKext) && gBuiltinKmodsCount) { + result->setObject(kOSBundleKextsInKernelTextKey, kOSBooleanTrue); } - result->setObject(kOSBundleCacheLoadAddressKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); - } - if ((!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleKextsInKernelTextKey)) - && (this == sKernelKext) && gBuiltinKmodsCount) { - result->setObject(kOSBundleKextsInKernelTextKey, kOSBooleanTrue); } -#endif /* CONFIG_EMBEDDED */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleExecLoadAddressKey)) { - scratchNumber = OSNumber::withNumber( + OSSharedPtr scratchNumber = OSNumber::withNumber( (unsigned long long)(execLoadAddress), /* numBits */ 8 * sizeof(execLoadAddress)); if (!scratchNumber) { goto finish; } - result->setObject(kOSBundleExecLoadAddressKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); + result->setObject(kOSBundleExecLoadAddressKey, scratchNumber.get()); } if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleLoadSizeKey)) { - scratchNumber = OSNumber::withNumber( + OSSharedPtr scratchNumber = OSNumber::withNumber( (unsigned long long)(loadSize), /* numBits */ 8 * sizeof(loadSize)); if (!scratchNumber) { goto finish; } - result->setObject(kOSBundleLoadSizeKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); + result->setObject(kOSBundleLoadSizeKey, scratchNumber.get()); } if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleExecLoadSizeKey)) { - scratchNumber = OSNumber::withNumber( + OSSharedPtr scratchNumber = OSNumber::withNumber( (unsigned long long)(execLoadSize), /* numBits */ 8 * sizeof(execLoadSize)); if (!scratchNumber) { goto finish; } - result->setObject(kOSBundleExecLoadSizeKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); + result->setObject(kOSBundleExecLoadSizeKey, scratchNumber.get()); } if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleWiredSizeKey)) { - scratchNumber = OSNumber::withNumber( + OSSharedPtr scratchNumber = OSNumber::withNumber( (unsigned long long)(wiredSize), /* numBits */ 8 * sizeof(wiredSize)); if (!scratchNumber) { goto finish; } - result->setObject(kOSBundleWiredSizeKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); + result->setObject(kOSBundleWiredSizeKey, scratchNumber.get()); } } } @@ -9567,49 +11287,40 @@ OSKext::copyInfo(OSArray * infoKeys) if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleDependenciesKey)) { if ((count = getNumDependencies())) { dependencyLoadTags = OSArray::withCapacity(count); - result->setObject(kOSBundleDependenciesKey, dependencyLoadTags); + result->setObject(kOSBundleDependenciesKey, dependencyLoadTags.get()); i = count - 1; do { OSKext * dependency = OSDynamicCast(OSKext, dependencies->getObject(i)); - OSSafeReleaseNULL(scratchNumber); - if (!dependency) { continue; } - scratchNumber = OSNumber::withNumber( + OSSharedPtr scratchNumber = OSNumber::withNumber( (unsigned long long)dependency->getLoadTag(), /* numBits*/ 8 * sizeof(loadTag)); if (!scratchNumber) { goto finish; } - dependencyLoadTags->setObject(scratchNumber); + dependencyLoadTags->setObject(scratchNumber.get()); } while (i--); } } - OSSafeReleaseNULL(scratchNumber); - /* OSBundleMetaClasses. */ if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleClassesKey)) { if (metaClasses && metaClasses->getCount()) { - metaClassIterator = OSCollectionIterator::withCollection(metaClasses); + metaClassIterator = OSCollectionIterator::withCollection(metaClasses.get()); metaClassInfo = OSArray::withCapacity(metaClasses->getCount()); if (!metaClassIterator || !metaClassInfo) { goto finish; } - result->setObject(kOSBundleClassesKey, metaClassInfo); + result->setObject(kOSBundleClassesKey, metaClassInfo.get()); while ((thisMetaClass = OSDynamicCast(OSMetaClass, metaClassIterator->getNextObject()))) { - OSSafeReleaseNULL(metaClassDict); - OSSafeReleaseNULL(scratchNumber); - OSSafeReleaseNULL(metaClassName); - OSSafeReleaseNULL(superclassName); - metaClassDict = OSDictionary::withCapacity(3); if (!metaClassDict) { goto finish; @@ -9620,7 +11331,7 @@ OSKext::copyInfo(OSArray * infoKeys) superclassName = OSString::withCString( thisMetaClass->getSuperClass()->getClassName()); } - scratchNumber = OSNumber::withNumber(thisMetaClass->getInstanceCount(), + OSSharedPtr scratchNumber = OSNumber::withNumber(thisMetaClass->getInstanceCount(), 8 * sizeof(unsigned int)); /* Bail if any of the essentials is missing. The root class lacks a superclass, @@ -9630,12 +11341,12 @@ OSKext::copyInfo(OSArray * infoKeys) goto finish; } - metaClassInfo->setObject(metaClassDict); - metaClassDict->setObject(kOSMetaClassNameKey, metaClassName); + metaClassInfo->setObject(metaClassDict.get()); + metaClassDict->setObject(kOSMetaClassNameKey, metaClassName.get()); if (superclassName) { - metaClassDict->setObject(kOSMetaClassSuperclassNameKey, superclassName); + metaClassDict->setObject(kOSMetaClassSuperclassNameKey, superclassName.get()); } - metaClassDict->setObject(kOSMetaClassTrackingCountKey, scratchNumber); + metaClassDict->setObject(kOSMetaClassTrackingCountKey, scratchNumber.get()); } } } @@ -9643,17 +11354,16 @@ OSKext::copyInfo(OSArray * infoKeys) /* OSBundleRetainCount. */ if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleRetainCountKey)) { - OSSafeReleaseNULL(scratchNumber); { int kextRetainCount = getRetainCount() - 1; if (isLoaded()) { kextRetainCount--; } - scratchNumber = OSNumber::withNumber( + OSSharedPtr scratchNumber = OSNumber::withNumber( (int)kextRetainCount, /* numBits*/ 8 * sizeof(int)); if (scratchNumber) { - result->setObject(kOSBundleRetainCountKey, scratchNumber); + result->setObject(kOSBundleRetainCountKey, scratchNumber.get()); } } } @@ -9661,23 +11371,11 @@ OSKext::copyInfo(OSArray * infoKeys) success = true; finish: - OSSafeReleaseNULL(headerData); - OSSafeReleaseNULL(logData); - OSSafeReleaseNULL(cpuTypeNumber); - OSSafeReleaseNULL(cpuSubtypeNumber); - OSSafeReleaseNULL(executablePathString); if (executablePathCString) { - kfree(executablePathCString, executablePathCStringSize); - } - OSSafeReleaseNULL(scratchNumber); - OSSafeReleaseNULL(dependencyLoadTags); - OSSafeReleaseNULL(metaClassIterator); - OSSafeReleaseNULL(metaClassInfo); - OSSafeReleaseNULL(metaClassDict); - OSSafeReleaseNULL(metaClassName); - OSSafeReleaseNULL(superclassName); + kheap_free(KHEAP_TEMP, executablePathCString, executablePathCStringSize); + } if (!success) { - OSSafeReleaseNULL(result); + result.reset(); } return result; } @@ -9689,24 +11387,19 @@ bool OSKext::copyUserExecutablePath(const OSSymbol * bundleID, char * pathResult, size_t pathSize) { bool ok; - OSKext * kext; + OSSharedPtr kext; IORecursiveLockLock(sKextLock); - kext = OSDynamicCast(OSKext, sKextsByID->getObject(bundleID)); - if (kext) { - kext->retain(); - } + kext.reset(OSDynamicCast(OSKext, sKextsByID->getObject(bundleID)), OSRetain); IORecursiveLockUnlock(sKextLock); if (!kext || !kext->path || !kext->userExecutableRelPath) { - OSSafeReleaseNULL(kext); return false; } snprintf(pathResult, pathSize, "%s/Contents/MacOS/%s", kext->path->getCStringNoCopy(), kext->userExecutableRelPath->getCStringNoCopy()); ok = true; - kext->release(); return ok; } @@ -9722,20 +11415,19 @@ OSKext::requestResource( void * context, OSKextRequestTag * requestTagOut) { - OSReturn result = kOSReturnError; - OSKext * callbackKext = NULL;// must release (looked up) + OSReturn result = kOSReturnError; + OSSharedPtr callbackKext; // looked up OSKextRequestTag requestTag = -1; - OSNumber * requestTagNum = NULL;// must release - - OSDictionary * requestDict = NULL;// must release - OSString * kextIdentifier = NULL;// must release - OSString * resourceName = NULL;// must release + OSSharedPtr requestTagNum; + OSSharedPtr requestDict; + OSSharedPtr kextIdentifier; + OSSharedPtr resourceName; - OSDictionary * callbackRecord = NULL;// must release - OSData * callbackWrapper = NULL;// must release + OSSharedPtr callbackRecord; + OSSharedPtr callbackWrapper; - OSData * contextWrapper = NULL;// must release + OSSharedPtr contextWrapper; IORecursiveLockLock(sKextLock); @@ -9795,7 +11487,7 @@ OSKext::requestResource( requestTag = sNextRequestTag++; result = _OSKextCreateRequest(kKextRequestPredicateRequestResource, - &requestDict); + requestDict); if (result != kOSReturnSuccess) { goto finish; } @@ -9807,17 +11499,17 @@ OSKext::requestResource( if (!kextIdentifier || !resourceName || !requestTagNum || - !_OSKextSetRequestArgument(requestDict, - kKextRequestArgumentBundleIdentifierKey, kextIdentifier) || - !_OSKextSetRequestArgument(requestDict, - kKextRequestArgumentNameKey, resourceName) || - !_OSKextSetRequestArgument(requestDict, - kKextRequestArgumentRequestTagKey, requestTagNum)) { + !_OSKextSetRequestArgument(requestDict.get(), + kKextRequestArgumentBundleIdentifierKey, kextIdentifier.get()) || + !_OSKextSetRequestArgument(requestDict.get(), + kKextRequestArgumentNameKey, resourceName.get()) || + !_OSKextSetRequestArgument(requestDict.get(), + kKextRequestArgumentRequestTagKey, requestTagNum.get())) { result = kOSKextReturnNoMemory; goto finish; } - callbackRecord = OSDynamicCast(OSDictionary, requestDict->copyCollection()); + callbackRecord = OSDynamicPtrCast(requestDict->copyCollection()); if (!callbackRecord) { result = kOSKextReturnNoMemory; goto finish; @@ -9827,15 +11519,15 @@ OSKext::requestResource( if (context) { contextWrapper = OSData::withBytes((void *)&context, sizeof(void *)); } - if (!callbackWrapper || !_OSKextSetRequestArgument(callbackRecord, - kKextRequestArgumentCallbackKey, callbackWrapper)) { + if (!callbackWrapper || !_OSKextSetRequestArgument(callbackRecord.get(), + kKextRequestArgumentCallbackKey, callbackWrapper.get())) { result = kOSKextReturnNoMemory; goto finish; } if (context) { - if (!contextWrapper || !_OSKextSetRequestArgument(callbackRecord, - kKextRequestArgumentContextKey, contextWrapper)) { + if (!contextWrapper || !_OSKextSetRequestArgument(callbackRecord.get(), + kKextRequestArgumentContextKey, contextWrapper.get())) { result = kOSKextReturnNoMemory; goto finish; } @@ -9844,13 +11536,13 @@ OSKext::requestResource( /* Only post the requests after all the other potential failure points * have been passed. */ - if (!sKernelRequests->setObject(requestDict) || - !sRequestCallbackRecords->setObject(callbackRecord)) { + if (!sKernelRequests->setObject(requestDict.get()) || + !sRequestCallbackRecords->setObject(callbackRecord.get())) { result = kOSKextReturnNoMemory; goto finish; } - OSKext::pingKextd(); + OSKext::pingIOKitDaemon(); result = kOSReturnSuccess; if (requestTagOut) { @@ -9865,11 +11557,11 @@ finish: if (result != kOSReturnSuccess) { unsigned int index; - index = sKernelRequests->getNextIndexOfObject(requestDict, 0); + index = sKernelRequests->getNextIndexOfObject(requestDict.get(), 0); if (index != (unsigned int)-1) { sKernelRequests->removeObject(index); } - index = sRequestCallbackRecords->getNextIndexOfObject(callbackRecord, 0); + index = sRequestCallbackRecords->getNextIndexOfObject(callbackRecord.get(), 0); if (index != (unsigned int)-1) { sRequestCallbackRecords->removeObject(index); } @@ -9879,31 +11571,24 @@ finish: IORecursiveLockUnlock(sKextLock); - if (callbackKext) { - callbackKext->release(); - } - if (requestTagNum) { - requestTagNum->release(); - } + return result; +} - if (requestDict) { - requestDict->release(); - } - if (kextIdentifier) { - kextIdentifier->release(); - } - if (resourceName) { - resourceName->release(); - } +OSReturn +OSKext::requestDaemonLaunch( + OSString *kextIdentifier, + OSString *serverName, + OSNumber *serverTag, + OSSharedPtr &checkInToken) +{ + OSReturn result; + IOUserServerCheckInToken * checkInTokenRaw = NULL; - if (callbackRecord) { - callbackRecord->release(); - } - if (callbackWrapper) { - callbackWrapper->release(); - } - if (contextWrapper) { - contextWrapper->release(); + result = requestDaemonLaunch(kextIdentifier, serverName, + serverTag, &checkInTokenRaw); + + if (kOSReturnSuccess == result) { + checkInToken.reset(checkInTokenRaw, OSNoRetain); } return result; @@ -9913,10 +11598,12 @@ OSReturn OSKext::requestDaemonLaunch( OSString *kextIdentifier, OSString *serverName, - OSNumber *serverTag) + OSNumber *serverTag, + IOUserServerCheckInToken ** checkInToken) { OSReturn result = kOSReturnError; - OSDictionary * requestDict = NULL; // must release + OSSharedPtr requestDict; + OSSharedPtr token; if (!kextIdentifier || !serverName || !serverTag) { result = kOSKextReturnInvalidArgument; @@ -9934,17 +11621,25 @@ OSKext::requestDaemonLaunch( serverTag->unsigned64BitValue() ); - result = _OSKextCreateRequest(kKextRequestPredicateRequestDaemonLaunch, &requestDict); + result = _OSKextCreateRequest(kKextRequestPredicateRequestDaemonLaunch, requestDict); if (result != kOSReturnSuccess) { goto finish; } - if (!_OSKextSetRequestArgument(requestDict, + token.reset(IOUserServerCheckInToken::create(), OSNoRetain); + if (!token) { + result = kOSKextReturnNoMemory; + goto finish; + } + + if (!_OSKextSetRequestArgument(requestDict.get(), kKextRequestArgumentBundleIdentifierKey, kextIdentifier) || - !_OSKextSetRequestArgument(requestDict, + !_OSKextSetRequestArgument(requestDict.get(), kKextRequestArgumentDriverExtensionServerName, serverName) || - !_OSKextSetRequestArgument(requestDict, - kKextRequestArgumentDriverExtensionServerTag, serverTag)) { + !_OSKextSetRequestArgument(requestDict.get(), + kKextRequestArgumentDriverExtensionServerTag, serverTag) || + !_OSKextSetRequestArgument(requestDict.get(), + kKextRequestArgumentCheckInToken, token.get())) { result = kOSKextReturnNoMemory; goto finish; } @@ -9952,108 +11647,1233 @@ OSKext::requestDaemonLaunch( /* Only post the requests after all the other potential failure points * have been passed. */ - if (!sKernelRequests->setObject(requestDict)) { + if (!sKernelRequests->setObject(requestDict.get())) { result = kOSKextReturnNoMemory; goto finish; } - OSKext::pingKextd(); + *checkInToken = token.detach(); + OSKext::pingIOKitDaemon(); result = kOSReturnSuccess; finish: IORecursiveLockUnlock(sKextLock); - if (requestDict) { - requestDict->release(); + return result; +} + +/********************************************************************* +* Assumes sKextLock is held. +*********************************************************************/ +/* static */ +OSReturn +OSKext::dequeueCallbackForRequestTag( + OSKextRequestTag requestTag, + OSSharedPtr &callbackRecordOut) +{ + OSDictionary * callbackRecordOutRaw = NULL; + OSReturn result; + + result = dequeueCallbackForRequestTag(requestTag, + &callbackRecordOutRaw); + + if (kOSReturnSuccess == result) { + callbackRecordOut.reset(callbackRecordOutRaw, OSNoRetain); + } + + return result; +} +OSReturn +OSKext::dequeueCallbackForRequestTag( + OSKextRequestTag requestTag, + OSDictionary ** callbackRecordOut) +{ + OSReturn result = kOSReturnError; + OSSharedPtr requestTagNum; + + requestTagNum = OSNumber::withNumber((long long unsigned int)requestTag, + 8 * sizeof(requestTag)); + if (!requestTagNum) { + goto finish; + } + + result = OSKext::dequeueCallbackForRequestTag(requestTagNum.get(), + callbackRecordOut); + +finish: + return result; +} + +/********************************************************************* +* Assumes sKextLock is held. +*********************************************************************/ +/* static */ +OSReturn +OSKext::dequeueCallbackForRequestTag( + OSNumber * requestTagNum, + OSSharedPtr &callbackRecordOut) +{ + OSDictionary * callbackRecordOutRaw = NULL; + OSReturn result; + + result = dequeueCallbackForRequestTag(requestTagNum, + &callbackRecordOutRaw); + + if (kOSReturnSuccess == result) { + callbackRecordOut.reset(callbackRecordOutRaw, OSNoRetain); + } + + return result; +} +OSReturn +OSKext::dequeueCallbackForRequestTag( + OSNumber * requestTagNum, + OSDictionary ** callbackRecordOut) +{ + OSReturn result = kOSKextReturnInvalidArgument; + OSDictionary * callbackRecord = NULL; // retain if matched! + OSNumber * callbackTagNum = NULL; // do not release + unsigned int count, i; + + result = kOSReturnError; + count = sRequestCallbackRecords->getCount(); + for (i = 0; i < count; i++) { + callbackRecord = OSDynamicCast(OSDictionary, + sRequestCallbackRecords->getObject(i)); + if (!callbackRecord) { + goto finish; + } + + /* If we don't find a tag, we basically have a leak here. Maybe + * we should just remove it. + */ + callbackTagNum = OSDynamicCast(OSNumber, _OSKextGetRequestArgument( + callbackRecord, kKextRequestArgumentRequestTagKey)); + if (!callbackTagNum) { + goto finish; + } + + /* We could be even more paranoid and check that all the incoming + * args match what's in the callback record. + */ + if (callbackTagNum->isEqualTo(requestTagNum)) { + if (callbackRecordOut) { + *callbackRecordOut = callbackRecord; + callbackRecord->retain(); + } + sRequestCallbackRecords->removeObject(i); + result = kOSReturnSuccess; + goto finish; + } + } + result = kOSKextReturnNotFound; + +finish: + return result; +} + + +/********************************************************************* +* Busy timeout triage +*********************************************************************/ +/* static */ +bool +OSKext::pendingIOKitDaemonRequests(void) +{ + return sRequestCallbackRecords && sRequestCallbackRecords->getCount(); +} + +/********************************************************************* +* Acquires and releases sKextLock +* +* This function is designed to be called exactly once on boot by +* the IOKit management daemon, kernelmanagerd. It gathers all codeless +* kext and dext personalities, and then attempts to map a System +* (pageable) KC and an Auxiliary (aux) KC. +* +* Even if the pageable or aux KC fail to load - this function will +* not allow a second call. This avoids security issues where +* kernelmanagerd has been compromised or the pageable kc has been +* tampered with and the attacker attempts to re-load a malicious +* variant. +* +* Return: if a KC fails to load the return value will contain: +* kOSKextReturnKCLoadFailure. If the pageable KC fails, +* the return value will contain kOSKextReturnKCLoadFailureSystemKC. +* Similarly, if the aux kc load fails, the return value will +* contain kOSKextReturnKCLoadFailureAuxKC. The two values +* compose with each other and with kOSKextReturnKCLoadFailure. +*********************************************************************/ +/* static */ +OSReturn +OSKext::loadFileSetKexts(OSDictionary * requestDict __unused) +{ + static bool daemon_ready = false; + + OSReturn ret = kOSKextReturnInvalidArgument; + OSReturn kcerr = 0; + bool start_matching = false; + + bool allow_fileset_load = !daemon_ready; +#if !(defined(__x86_64__) || defined(__i386__)) + /* never allow KCs full of kexts on non-x86 machines */ + allow_fileset_load = false; +#endif + + /* + * Get the args from the request. Right now we need the file + * name for the pageable and the aux kext collection file sets. + */ + OSDictionary * requestArgs = NULL; // do not release + OSString * pageable_filepath = NULL; // do not release + OSString * aux_filepath = NULL; // do not release + OSArray * codeless_kexts = NULL; // do not release + + kernel_mach_header_t *akc_mh = NULL; + + requestArgs = OSDynamicCast(OSDictionary, + requestDict->getObject(kKextRequestArgumentsKey)); + + if (requestArgs == NULL) { + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "KextLog: No arguments in plist for loading fileset kext\n"); + printf("KextLog: No arguments in plist for loading fileset kext\n"); + return ret; + } + + ret = kOSKextReturnDisabled; + + IORecursiveLockLock(sKextLock); + + pageable_filepath = OSDynamicCast(OSString, + requestArgs->getObject(kKextRequestArgumentPageableKCFilename)); + + if (allow_fileset_load && pageable_filepath != NULL) { + printf("KextLog: Loading Pageable KC from file %s\n", pageable_filepath->getCStringNoCopy()); + + ret = OSKext::loadKCFileSet(pageable_filepath->getCStringNoCopy(), KCKindPageable); + if (ret) { + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "KextLog: loadKCFileSet for Pageable KC returned %d\n", ret); + + printf("KextLog: loadKCFileSet for Pageable KC returned %d\n", ret); + ret = kOSKextReturnKCLoadFailure; + kcerr |= kOSKextReturnKCLoadFailureSystemKC; + goto try_auxkc; + } + /* + * Even if the AuxKC fails to load, we still want to send + * the System KC personalities to the catalog for matching + */ + start_matching = true; + } else if (pageable_filepath != NULL) { + OSKextLog(/* kext */ NULL, kOSKextLogBasicLevel | kOSKextLogIPCFlag, + "KextLog: ignoring Pageable KC load from %s\n", pageable_filepath->getCStringNoCopy()); + ret = kOSKextReturnUnsupported; + } + +try_auxkc: + akc_mh = (kernel_mach_header_t*)PE_get_kc_header(KCKindAuxiliary); + if (akc_mh) { + /* + * If we try to load a deferred AuxKC, then don't ever attempt + * a filesystem map of a file + */ + allow_fileset_load = false; + + /* + * This function is only called once per boot, so we haven't + * yet loaded an AuxKC. If we have registered the AuxKC mach + * header, that means that the kext collection has been placed + * in memory for us by the booter, and is waiting for us to + * process it. Grab the deferred XML plist of info + * dictionaries and add all the kexts. + */ + OSSharedPtr parsedXML; + OSSharedPtr loaded_kcUUID; + OSDictionary *infoDict; + parsedXML = consumeDeferredKextCollection(KCKindAuxiliary); + infoDict = OSDynamicCast(OSDictionary, parsedXML.get()); + if (infoDict) { + bool added; + printf("KextLog: Adding kexts from in-memory AuxKC\n"); + added = OSKext::addKextsFromKextCollection(akc_mh, infoDict, + kPrelinkTextSegment, loaded_kcUUID, KCKindAuxiliary); + if (!loaded_kcUUID) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "KextLog: WARNING: did not find UUID in deferred Aux KC!"); + } else if (!added) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "KextLog: WARNING: Failed to load AuxKC from memory."); + } + /* only return success if the pageable load (above) was successful */ + if (ret != kOSKextReturnKCLoadFailure) { + ret = kOSReturnSuccess; + } + /* the registration of the AuxKC parsed out the KC's UUID already */ + } else { + if (daemon_ready) { + /* + * Complain, but don't return an error if this isn't the first time the + * IOKit daemon is checking in. If the daemon ever restarts, we will + * hit this case because we've already consumed the deferred personalities. + * We return success here so that a call to this function from a restarted + * daemon with no codeless kexts will succeed. + */ + OSKextLog(/* kext */ NULL, kOSKextLogBasicLevel | kOSKextLogIPCFlag, + "KextLog: can't re-parse deferred AuxKC personalities on IOKit daemon restart"); + if (ret != kOSKextReturnKCLoadFailure) { + ret = kOSReturnSuccess; + } + } else { + /* this is a real error case */ + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogIPCFlag, + "KextLog: ERROR loading deferred AuxKC: PRELINK_INFO wasn't an OSDictionary"); + printf("KextLog: ERROR loading deferred AuxKC: PRELINK_INFO wasn't an OSDictionary\n"); + ret = kOSKextReturnKCLoadFailure; + kcerr |= kOSKextReturnKCLoadFailureAuxKC; + } + } + } + + aux_filepath = OSDynamicCast(OSString, + requestArgs->getObject(kKextRequestArgumentAuxKCFilename)); + if (allow_fileset_load && aux_filepath != NULL) { + printf("KextLog: Loading Aux KC from file %s\n", aux_filepath->getCStringNoCopy()); + + ret = OSKext::loadKCFileSet(aux_filepath->getCStringNoCopy(), KCKindAuxiliary); + if (ret) { + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "KextLog: loadKCFileSet for Aux KC returned %d\n", ret); + + printf("KextLog: loadKCFileSet for Aux KC returned %d\n", ret); + ret = kOSKextReturnKCLoadFailure; + kcerr |= kOSKextReturnKCLoadFailureAuxKC; + goto try_codeless; + } + start_matching = true; + } else if (aux_filepath != NULL) { + OSKextLog(/* kext */ NULL, kOSKextLogBasicLevel | kOSKextLogIPCFlag, + "KextLog: Ignoring AuxKC load from %s\n", aux_filepath->getCStringNoCopy()); + if (ret != kOSKextReturnKCLoadFailure) { + ret = kOSKextReturnUnsupported; + } + } + +try_codeless: + /* + * Load codeless kexts last so that there is no possibilty of a + * codeless kext bundle ID preventing a kext in the system KC from + * loading + */ + codeless_kexts = OSDynamicCast(OSArray, + requestArgs->getObject(kKextRequestArgumentCodelessPersonalities)); + if (codeless_kexts != NULL) { + uint32_t count = codeless_kexts->getCount(); + OSKextLog(NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "KextLog: loading %d codeless kexts/dexts", count); + for (uint32_t i = 0; i < count; i++) { + OSDictionary *infoDict; + infoDict = OSDynamicCast(OSDictionary, + codeless_kexts->getObject(i)); + if (!infoDict) { + continue; + } + // instantiate a new kext, and don't hold a reference + // (the kext subsystem will hold one implicitly) + OSKext::withCodelessInfo(infoDict); + } + /* ignore errors that are not KC load failures */ + if (ret != kOSKextReturnKCLoadFailure) { + ret = kOSReturnSuccess; + } + start_matching = true; + } + + /* send personalities to the IOCatalog once */ + if (ret == kOSReturnSuccess || start_matching || sOSKextWasResetAfterUserspaceReboot) { + OSKext::sendAllKextPersonalitiesToCatalog(true); + /* + * This request necessarily came from the IOKit daemon (kernelmanagerd), so mark + * things as active and start all the delayed matching: the + * dext and codeless kext personalities should have all been + * delivered via this one call. + */ + if (!daemon_ready) { + OSKext::setIOKitDaemonActive(); + OSKext::setDeferredLoadSucceeded(TRUE); + IOService::iokitDaemonLaunched(); + } + if (sOSKextWasResetAfterUserspaceReboot) { + sOSKextWasResetAfterUserspaceReboot = false; + OSKext::setIOKitDaemonActive(); + IOService::startDeferredMatches(); + } + } + + if (ret == kOSKextReturnKCLoadFailure) { + ret |= kcerr; + } + + /* + * Only allow this function to attempt to load the pageable and + * aux KCs once per boot. + */ + daemon_ready = true; + + IORecursiveLockUnlock(sKextLock); + + return ret; +} + +OSReturn +OSKext::resetMutableSegments(void) +{ + kernel_segment_command_t *seg = NULL; + kernel_mach_header_t *k_mh = (kernel_mach_header_t *)kmod_info->address; + u_int index = 0; + OSKextSavedMutableSegment *savedSegment = NULL; + uintptr_t kext_slide = PE_get_kc_slide(kc_type); + OSReturn err; + + if (!savedMutableSegments) { + OSKextLog(this, kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Kext %s cannot be reset, mutable segments were not saved.", getIdentifierCString()); + err = kOSKextReturnInternalError; + goto finish; + } + + for (seg = firstsegfromheader(k_mh), index = 0; seg; seg = nextsegfromheader(k_mh, seg)) { + if (!segmentIsMutable(seg)) { + continue; + } + uint64_t unslid_vmaddr = seg->vmaddr - kext_slide; + uint64_t vmsize = seg->vmsize; + err = kOSKextReturnInternalError; + for (index = 0; index < savedMutableSegments->getCount(); index++) { + savedSegment = OSDynamicCast(OSKextSavedMutableSegment, savedMutableSegments->getObject(index)); + assert(savedSegment); + if (savedSegment->getVMAddr() == seg->vmaddr && savedSegment->getVMSize() == seg->vmsize) { + OSKextLog(this, kOSKextLogDebugLevel | kOSKextLogLoadFlag, + "Resetting kext %s, mutable segment %.*s %llx->%llx.", getIdentifierCString(), (int)strnlen(seg->segname, sizeof(seg->segname)), seg->segname, unslid_vmaddr, unslid_vmaddr + vmsize - 1); + err = savedSegment->restoreContents(seg); + if (err != kOSReturnSuccess) { + panic("Kext %s cannot be reset, mutable segment %llx->%llx could not be restored.", getIdentifierCString(), unslid_vmaddr, unslid_vmaddr + vmsize - 1); + } + } + } + if (err != kOSReturnSuccess) { + panic("Kext %s cannot be reset, could not find saved mutable segment for %llx->%llx.", getIdentifierCString(), unslid_vmaddr, unslid_vmaddr + vmsize - 1); + } + } + err = kOSReturnSuccess; +finish: + return err; +} + + +/********************************************************************* +* Assumes sKextLock is held. +*********************************************************************/ +/* static */ +OSReturn +OSKext::loadKCFileSet( + const char *filepath, + kc_kind_t type) +{ +#if VM_MAPPED_KEXTS + /* we only need to load filesets on systems that support VM_MAPPED kexts */ + OSReturn err; + struct vnode *vp = NULL; + void *fileset_control; + off_t fsize; + bool pageable = (type == KCKindPageable); + + if ((pageable && pageableKCloaded) || + (!pageable && auxKCloaded)) { + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "KC FileSet of type %s is already loaded", (pageable ? "Pageable" : "Aux")); + + return kOSKextReturnInvalidArgument; + } + + /* Do not allow AuxKC to load if Pageable KC is not loaded */ + if (!pageable && !pageableKCloaded) { + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "Trying to load the Aux KC without loading the Pageable KC"); + return kOSKextReturnInvalidArgument; + } + + fileset_control = ubc_getobject_from_filename(filepath, &vp, &fsize); + + if (fileset_control == NULL) { + printf("Could not get memory control object for file %s", filepath); + + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "Could not get memory control object for file %s", filepath); + return kOSKextReturnInvalidArgument; + } + if (vp == NULL) { + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "Could not find vnode for file %s", filepath); + return kOSKextReturnInvalidArgument; + } + + kernel_mach_header_t *mh = NULL; + uintptr_t slide = 0; + +#if CONFIG_CSR + /* + * When SIP is enabled, the KC we map must be SIP-protected + */ + if (csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0) { + struct vnode_attr va; + int error; + VATTR_INIT(&va); + VATTR_WANTED(&va, va_flags); + error = vnode_getattr(vp, &va, vfs_context_current()); + if (error) { + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "vnode_getattr(%s) failed (error=%d)", filepath, error); + err = kOSKextReturnInternalError; + goto finish; + } + if (!(va.va_flags & SF_RESTRICTED)) { + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "Path to KC '%s' is not SIP-protected", filepath); + err = kOSKextReturnInvalidArgument; + goto finish; + } + } +#endif + + err = OSKext::mapKCFileSet(fileset_control, (vm_size_t)fsize, &mh, 0, &slide, pageable, NULL); + if (err) { + printf("KextLog: mapKCFileSet returned %d\n", err); + + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "mapKCFileSet returned %d\n", err); + + err = kOSKextReturnInvalidArgument; + } + +#if CONFIG_CSR +finish: +#endif + /* Drop the vnode ref returned by ubc_getobject_from_filename if mapKCFileSet failed */ + assert(vp != NULL); + if (err == kOSReturnSuccess) { + PE_set_kc_vp(type, vp); + if (pageable) { + pageableKCloaded = true; + } else { + auxKCloaded = true; + } + } else { + vnode_put(vp); + } + + return err; +#else + (void)filepath; + (void)type; + return kOSKextReturnUnsupported; +#endif // VM_MAPPED_KEXTS +} + +#if defined(__x86_64__) || defined(__i386__) +/********************************************************************* +* Assumes sKextLock is held. +*********************************************************************/ +/* static */ +OSReturn +OSKext::mapKCFileSet( + void *control, + vm_size_t fsize, + kernel_mach_header_t **mhp, + off_t file_offset, + uintptr_t *slidep, + bool pageable, + void *map_entry_list) +{ + bool fileset_load = false; + kern_return_t ret; + OSReturn err; + kernel_section_t *infoPlistSection = NULL; + OSDictionary *infoDict = NULL; + + OSSharedPtr parsedXML; + OSSharedPtr errorString; + OSSharedPtr loaded_kcUUID; + + /* Check if initial load for file set */ + if (*mhp == NULL) { + fileset_load = true; + + /* Get a page aligned address from kext map to map the file */ + vm_map_offset_t pagealigned_addr = get_address_from_kext_map(fsize); + if (pagealigned_addr == 0) { + return kOSKextReturnNoMemory; + } + + *mhp = (kernel_mach_header_t *)pagealigned_addr; + + /* Allocate memory for bailout mechanism */ + map_entry_list = allocate_kcfileset_map_entry_list(); + if (map_entry_list == NULL) { + return kOSKextReturnNoMemory; + } + } + + uintptr_t *slideptr = fileset_load ? slidep : NULL; + err = mapKCTextSegment(control, mhp, file_offset, slideptr, map_entry_list); + /* mhp and slideptr are updated by mapKCTextSegment */ + if (err) { + if (fileset_load) { + deallocate_kcfileset_map_entry_list_and_unmap_entries(map_entry_list, TRUE, pageable); + } + return err; + } + + /* Initialize the kc header globals */ + if (fileset_load) { + if (pageable) { + PE_set_kc_header(KCKindPageable, *mhp, *slidep); + } else { + PE_set_kc_header(KCKindAuxiliary, *mhp, *slidep); + } + } + + /* Iterate through all the segments and map necessary segments */ + struct load_command *lcp = (struct load_command *) (*mhp + 1); + for (unsigned int i = 0; i < (*mhp)->ncmds; i++, lcp = (struct load_command *)((uintptr_t)lcp + lcp->cmdsize)) { + vm_map_offset_t start; + kernel_mach_header_t *k_mh = NULL; + kernel_segment_command_t * seg = NULL; + struct fileset_entry_command *fse = NULL; + + if (lcp->cmd == LC_SEGMENT_KERNEL) { + seg = (kernel_segment_command_t *)lcp; + start = ((uintptr_t)(seg->vmaddr)) + *slidep; + } else if (lcp->cmd == LC_FILESET_ENTRY) { + fse = (struct fileset_entry_command *)lcp; + k_mh = (kernel_mach_header_t *)(((uintptr_t)(fse->vmaddr)) + *slidep); + + /* Map the segments of the mach-o binary */ + err = OSKext::mapKCFileSet(control, 0, &k_mh, fse->fileoff, slidep, pageable, map_entry_list); + if (err) { + deallocate_kcfileset_map_entry_list_and_unmap_entries(map_entry_list, TRUE, pageable); + return kOSKextReturnInvalidArgument; + } + continue; + } else if (lcp->cmd == LC_DYLD_CHAINED_FIXUPS) { + /* Check if the Aux KC is built pageable style */ + if (!pageable && !fileset_load && !auxKCloaded) { + resetAuxKCSegmentOnUnload = true; + } + continue; + } else { + continue; + } + + if (fileset_load) { + if (seg->vmsize == 0) { + continue; + } + + /* Only map __PRELINK_INFO, __BRANCH_STUBS, __BRANCH_GOTS and __LINKEDIT sections */ + if (strncmp(seg->segname, kPrelinkInfoSegment, sizeof(seg->segname)) != 0 && + strncmp(seg->segname, kKCBranchStubs, sizeof(seg->segname)) != 0 && + strncmp(seg->segname, kKCBranchGots, sizeof(seg->segname)) != 0 && + strncmp(seg->segname, SEG_LINKEDIT, sizeof(seg->segname)) != 0) { + continue; + } + } else { + if (seg->vmsize == 0) { + continue; + } + + /* Skip the __LINKEDIT, __LINKINFO and __TEXT segments */ + if (strncmp(seg->segname, SEG_LINKEDIT, sizeof(seg->segname)) == 0 || + strncmp(seg->segname, SEG_LINKINFO, sizeof(seg->segname)) == 0 || + strncmp(seg->segname, SEG_TEXT, sizeof(seg->segname)) == 0) { + continue; + } + } + + ret = vm_map_kcfileset_segment( + &start, seg->vmsize, + (memory_object_control_t)control, seg->fileoff, seg->maxprot); + + if (ret != KERN_SUCCESS) { + if (fileset_load) { + deallocate_kcfileset_map_entry_list_and_unmap_entries(map_entry_list, TRUE, pageable); + } + return kOSKextReturnInvalidArgument; + } + add_kcfileset_map_entry(map_entry_list, start, seg->vmsize); + } + + /* Return if regular mach-o */ + if (!fileset_load) { + return 0; + } + + /* + * Fixup for the Pageable KC and the Aux KC is done by + * i386_slide_kext_collection_mh_addrs, but it differs in + * following ways: + * + * PageableKC: Fixup only __BRANCH_STUBS segment and top level load commands. + * The fixup of kext segments and kext load commands are done at kext + * load time by calling i386_slide_individual_kext. + * + * AuxKC old style: Fixup all the segments and all the load commands. + * + * AuxKC pageable style: Same as the Pageable KC. + */ + bool adjust_mach_header = (pageable ? true : ((resetAuxKCSegmentOnUnload) ? true : false)); + ret = i386_slide_kext_collection_mh_addrs(*mhp, *slidep, adjust_mach_header); + if (ret != KERN_SUCCESS) { + deallocate_kcfileset_map_entry_list_and_unmap_entries(map_entry_list, TRUE, pageable); + return kOSKextReturnInvalidArgument; + } + + /* Get the prelink info dictionary */ + infoPlistSection = getsectbynamefromheader(*mhp, kPrelinkInfoSegment, kPrelinkInfoSection); + parsedXML = OSUnserializeXML((const char *)infoPlistSection->addr, errorString); + if (parsedXML) { + infoDict = OSDynamicCast(OSDictionary, parsedXML.get()); + } + + if (!infoDict) { + const char *errorCString = "(unknown error)"; + + if (errorString && errorString->getCStringNoCopy()) { + errorCString = errorString->getCStringNoCopy(); + } else if (parsedXML) { + errorCString = "not a dictionary"; + } + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "Error unserializing kext info plist section: %s.", errorCString); + deallocate_kcfileset_map_entry_list_and_unmap_entries(map_entry_list, TRUE, pageable); + return kOSKextReturnInvalidArgument; + } + + /* Validate that the Kext Collection is prelinked to the loaded KC */ + err = OSKext::validateKCFileSetUUID(infoDict, pageable ? KCKindPageable : KCKindAuxiliary); + if (err) { + deallocate_kcfileset_map_entry_list_and_unmap_entries(map_entry_list, TRUE, pageable); + return kOSKextReturnInvalidArgument; + } + + /* Set Protection of Segments */ + OSKext::protectKCFileSet(*mhp, pageable ? KCKindPageable : KCKindAuxiliary); + + OSKext::addKextsFromKextCollection(*mhp, + infoDict, kPrelinkTextSegment, + loaded_kcUUID, pageable ? KCKindPageable : KCKindAuxiliary); + + /* Copy in the KC UUID */ + if (!loaded_kcUUID) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "WARNING: did not find UUID in prelinked %s KC!", pageable ? "Pageable" : "Aux"); + } else if (pageable) { + pageablekc_uuid_valid = TRUE; + memcpy((void *)&pageablekc_uuid, (const void *)loaded_kcUUID->getBytesNoCopy(), loaded_kcUUID->getLength()); + uuid_unparse_upper(pageablekc_uuid, pageablekc_uuid_string); + } else { + auxkc_uuid_valid = TRUE; + memcpy((void *)&auxkc_uuid, (const void *)loaded_kcUUID->getBytesNoCopy(), loaded_kcUUID->getLength()); + uuid_unparse_upper(auxkc_uuid, auxkc_uuid_string); + } + + deallocate_kcfileset_map_entry_list_and_unmap_entries(map_entry_list, FALSE, pageable); + + return 0; +} + +/********************************************************************* +* Assumes sKextLock is held. +*********************************************************************/ +/* static */ +OSReturn +OSKext::mapKCTextSegment( + void *control, + kernel_mach_header_t **mhp, + off_t file_offset, + uintptr_t *slidep, + void *map_entry_list) +{ + kern_return_t ret; + vm_map_offset_t mach_header_map_size = vm_map_round_page(sizeof(kernel_mach_header_t), + PAGE_MASK); + vm_map_offset_t load_command_map_size = 0; + kernel_mach_header_t *base_mh = *mhp; + + /* Map the mach header at start of fileset for now (vmaddr = 0) */ + ret = vm_map_kcfileset_segment( + (vm_map_offset_t *)&base_mh, mach_header_map_size, + (memory_object_control_t)control, file_offset, (VM_PROT_READ | VM_PROT_WRITE)); + + if (ret != KERN_SUCCESS) { + printf("Kext Log: mapKCTextSegment failed to map mach header of fileset %x", ret); + + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "Failed to map mach header of kc fileset with error %d", ret); + return kOSKextReturnInvalidArgument; + } + + if (slidep) { + /* Verify that it's an MH_FILESET */ + if (base_mh->filetype != MH_FILESET) { + printf("Kext Log: mapKCTextSegment mach header filetype" + " is not an MH_FILESET, it is %x", base_mh->filetype); + + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "mapKCTextSegment mach header filetype is not an MH_FILESET, it is %x", base_mh->filetype); + + /* Unmap the mach header */ + vm_unmap_kcfileset_segment((vm_map_offset_t *)&base_mh, mach_header_map_size); + return kOSKextReturnInvalidArgument; + } + } + + /* Map the remaining pages of load commands */ + if (base_mh->sizeofcmds > mach_header_map_size) { + vm_map_offset_t load_command_addr = ((vm_map_offset_t)base_mh) + mach_header_map_size; + load_command_map_size = base_mh->sizeofcmds - mach_header_map_size; + + /* Map the load commands */ + ret = vm_map_kcfileset_segment( + &load_command_addr, load_command_map_size, + (memory_object_control_t)control, file_offset + mach_header_map_size, + (VM_PROT_READ | VM_PROT_WRITE)); + + if (ret != KERN_SUCCESS) { + printf("KextLog: mapKCTextSegment failed to map load commands of fileset %x", ret); + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "Failed to map load commands of kc fileset with error %d", ret); + + /* Unmap the mach header */ + vm_unmap_kcfileset_segment((vm_map_offset_t *)&base_mh, mach_header_map_size); + return kOSKextReturnInvalidArgument; + } + } + + kernel_segment_command_t *text_seg; + text_seg = getsegbynamefromheader((kernel_mach_header_t *)base_mh, SEG_TEXT); + + /* Calculate the slide and vm addr of mach header */ + if (slidep) { + *mhp = (kernel_mach_header_t *)((uintptr_t)base_mh + text_seg->vmaddr); + *slidep = ((uintptr_t)*mhp) - text_seg->vmaddr; + } + + /* Cache the text segment size and file offset before unmapping */ + vm_map_offset_t text_segment_size = text_seg->vmsize; + vm_object_offset_t text_segment_fileoff = text_seg->fileoff; + vm_prot_t text_maxprot = text_seg->maxprot; + + /* Unmap the first page and loadcommands and map the text segment */ + ret = vm_unmap_kcfileset_segment((vm_map_offset_t *)&base_mh, mach_header_map_size); + assert(ret == KERN_SUCCESS); + + if (load_command_map_size) { + vm_map_offset_t load_command_addr = ((vm_map_offset_t)base_mh) + mach_header_map_size; + ret = vm_unmap_kcfileset_segment(&load_command_addr, load_command_map_size); + assert(ret == KERN_SUCCESS); + } + + /* Map the text segment at actual vm addr specified in fileset */ + ret = vm_map_kcfileset_segment((vm_map_offset_t *)mhp, text_segment_size, + (memory_object_control_t)control, text_segment_fileoff, text_maxprot); + if (ret != KERN_SUCCESS) { + OSKextLog(/* kext */ NULL, kOSKextLogDebugLevel | kOSKextLogIPCFlag, + "Failed to map Text segment of kc fileset with error %d", ret); + return kOSKextReturnInvalidArgument; + } + + add_kcfileset_map_entry(map_entry_list, (vm_map_offset_t)*mhp, text_segment_size); + return 0; +} + +/********************************************************************* +* Assumes sKextLock is held. +*********************************************************************/ +/* static */ +OSReturn +OSKext::protectKCFileSet( + kernel_mach_header_t *mh, + kc_kind_t type) +{ + vm_map_t kext_map = g_kext_map; + kernel_segment_command_t * seg = NULL; + vm_map_offset_t start = 0; + vm_map_offset_t end = 0; + OSReturn ret = 0; + + /* Set VM permissions */ + seg = firstsegfromheader((kernel_mach_header_t *)mh); + while (seg) { + start = round_page(seg->vmaddr); + end = trunc_page(seg->vmaddr + seg->vmsize); + + /* + * Wire down and protect __TEXT, __BRANCH_STUBS and __BRANCH_GOTS + * for the Pageable KC and the Aux KC, wire down and protect __LINKEDIT + * for the Aux KC as well. + */ + if (strncmp(seg->segname, kKCBranchGots, sizeof(seg->segname)) == 0 || + strncmp(seg->segname, kKCBranchStubs, sizeof(seg->segname)) == 0 || + strncmp(seg->segname, SEG_TEXT, sizeof(seg->segname)) == 0 || + (type == KCKindAuxiliary && !resetAuxKCSegmentOnUnload && + strncmp(seg->segname, SEG_LINKEDIT, sizeof(seg->segname)) == 0)) { + ret = OSKext_protect((kernel_mach_header_t *)mh, + kext_map, start, end, seg->maxprot, TRUE, type); + if (ret != KERN_SUCCESS) { + printf("OSKext protect failed with error %d", ret); + return kOSKextReturnInvalidArgument; + } + + ret = OSKext_protect((kernel_mach_header_t *)mh, + kext_map, start, end, seg->initprot, FALSE, type); + if (ret != KERN_SUCCESS) { + printf("OSKext protect failed with error %d", ret); + return kOSKextReturnInvalidArgument; + } + + ret = OSKext_wire((kernel_mach_header_t *)mh, + kext_map, start, end, seg->initprot, FALSE, type); + if (ret != KERN_SUCCESS) { + printf("OSKext wire failed with error %d", ret); + return kOSKextReturnInvalidArgument; + } + } + + seg = nextsegfromheader((kernel_mach_header_t *) mh, seg); + } + + return 0; +} + +/********************************************************************* +* Assumes sKextLock is held. +*********************************************************************/ +/* static */ +void +OSKext::freeKCFileSetcontrol(void) +{ + PE_reset_all_kc_vp(); +} + +/********************************************************************* +* Assumes sKextLock is held. +* +* resetKCFileSetSegments: Kext start function expects data segment to +* be pristine on every load, unmap the dirty segments on unload and +* remap them from FileSet on disk. Remap all segments of kext since +* fixups are done per kext and not per segment. +*********************************************************************/ +OSReturn +OSKext::resetKCFileSetSegments(void) +{ + kernel_segment_command_t *seg = NULL; + kernel_segment_command_t *text_seg; + uint32_t text_fileoff; + kernel_mach_header_t *k_mh = NULL; + uintptr_t slide; + struct vnode *vp = NULL; + void *fileset_control = NULL; + bool pageable = (kc_type == KCKindPageable); + OSReturn err; + kern_return_t kr; + + /* Check the vnode reference is still available */ + vp = (struct vnode *)PE_get_kc_vp(kc_type); + if (vp == NULL) { + OSKextLog(this, kOSKextLogProgressLevel | kOSKextLogLoadFlag, + "Kext %s could not be reset, since reboot released the vnode ref", getIdentifierCString()); + return kOSKextReturnInternalError; + } + + fileset_control = ubc_getobject(vp, 0); + assert(fileset_control != NULL); + + OSKextLog(this, kOSKextLogProgressLevel | kOSKextLogLoadFlag, + "Kext %s resetting all segments", getIdentifierCString()); + + k_mh = (kernel_mach_header_t *)kmod_info->address; + text_seg = getsegbynamefromheader((kernel_mach_header_t *)kmod_info->address, SEG_TEXT); + text_fileoff = text_seg->fileoff; + slide = PE_get_kc_slide(kc_type); + + seg = firstsegfromheader((kernel_mach_header_t *)k_mh); + while (seg) { + if (seg->vmsize == 0) { + seg = nextsegfromheader((kernel_mach_header_t *) k_mh, seg); + continue; + } + + /* Skip the __LINKEDIT, __LINKINFO and __TEXT segments */ + if (strncmp(seg->segname, SEG_LINKEDIT, sizeof(seg->segname)) == 0 || + strncmp(seg->segname, SEG_LINKINFO, sizeof(seg->segname)) == 0 || + strncmp(seg->segname, SEG_TEXT, sizeof(seg->segname)) == 0) { + seg = nextsegfromheader((kernel_mach_header_t *) k_mh, seg); + continue; + } + + kr = vm_unmap_kcfileset_segment(&seg->vmaddr, seg->vmsize); + assert(kr == KERN_SUCCESS); + seg = nextsegfromheader((kernel_mach_header_t *) k_mh, seg); + } + + /* Unmap the text segment */ + kr = vm_unmap_kcfileset_segment(&text_seg->vmaddr, text_seg->vmsize); + assert(kr == KERN_SUCCESS); + + /* Map all the segments of the kext */ + err = OSKext::mapKCFileSet(fileset_control, 0, &k_mh, text_fileoff, &slide, pageable, NULL); + if (err) { + panic("Could not reset segments of a mapped kext, error %x", err); + } + + /* Update address in kmod_info, since it has been reset */ + if (kmod_info->address) { + kmod_info->address = (((uintptr_t)(kmod_info->address)) + slide); + } + + return 0; +} + +/********************************************************************* +* Mechanism to track all segment mapping while mapping KC fileset. +*********************************************************************/ + +struct kcfileset_map_entry { + vm_map_offset_t me_start; + vm_map_offset_t me_size; +}; + +struct kcfileset_map_entry_list { + int kme_list_count; + int kme_list_index; + struct kcfileset_map_entry kme_list[]; +}; + +#define KCFILESET_MAP_ENTRY_MAX (16380) + +static void * +allocate_kcfileset_map_entry_list(void) +{ + struct kcfileset_map_entry_list *entry_list; + + entry_list = (struct kcfileset_map_entry_list *)kalloc(sizeof(struct kcfileset_map_entry_list) + + (sizeof(struct kcfileset_map_entry) * KCFILESET_MAP_ENTRY_MAX)); + + entry_list->kme_list_count = KCFILESET_MAP_ENTRY_MAX; + entry_list->kme_list_index = 0; + return entry_list; +} + +static void +add_kcfileset_map_entry( + void *map_entry_list, + vm_map_offset_t start, + vm_map_offset_t size) +{ + if (map_entry_list == NULL) { + return; + } + + struct kcfileset_map_entry_list *entry_list = (struct kcfileset_map_entry_list *)map_entry_list; + + if (entry_list->kme_list_index >= entry_list->kme_list_count) { + panic("Ran out of map kc fileset list\n"); + } + + entry_list->kme_list[entry_list->kme_list_index].me_start = start; + entry_list->kme_list[entry_list->kme_list_index].me_size = size; + + entry_list->kme_list_index++; +} + +static void +deallocate_kcfileset_map_entry_list_and_unmap_entries( + void *map_entry_list, + boolean_t unmap_entries, + bool pageable) +{ + struct kcfileset_map_entry_list *entry_list = (struct kcfileset_map_entry_list *)map_entry_list; + + if (unmap_entries) { + for (int i = 0; i < entry_list->kme_list_index; i++) { + kern_return_t ret; + ret = vm_unmap_kcfileset_segment( + &(entry_list->kme_list[i].me_start), + entry_list->kme_list[i].me_size); + assert(ret == KERN_SUCCESS); + } + + PE_reset_kc_header(pageable ? KCKindPageable : KCKindAuxiliary); } - return result; + + kfree(entry_list, sizeof(struct kcfileset_map_entry_list) + + (sizeof(struct kcfileset_map_entry) * KCFILESET_MAP_ENTRY_MAX)); } /********************************************************************* -* Assumes sKextLock is held. +* Mechanism to map kext segment. *********************************************************************/ -/* static */ -OSReturn -OSKext::dequeueCallbackForRequestTag( - OSKextRequestTag requestTag, - OSDictionary ** callbackRecordOut) + +kern_return_t +vm_map_kcfileset_segment( + vm_map_offset_t *start, + vm_map_offset_t size, + void *control, + vm_object_offset_t fileoffset, + vm_prot_t max_prot) { - OSReturn result = kOSReturnError; - OSNumber * requestTagNum = NULL;// must release + vm_map_kernel_flags_t vmk_flags; + vmk_flags.vmkf_no_copy_on_read = 1; + vmk_flags.vmkf_cs_enforcement = 0; + vmk_flags.vmkf_cs_enforcement_override = 1; + kern_return_t ret; - requestTagNum = OSNumber::withNumber((long long unsigned int)requestTag, - 8 * sizeof(requestTag)); - if (!requestTagNum) { - goto finish; - } + /* Add Write to max prot to allow fixups */ + max_prot = max_prot | VM_PROT_WRITE; - result = OSKext::dequeueCallbackForRequestTag(requestTagNum, - callbackRecordOut); + /* + * Map the segments from file as COPY mappings to + * make sure changes on disk to the file does not affect + * mapped segments. + */ + ret = vm_map_enter_mem_object_control( + g_kext_map, + start, + size, + (mach_vm_offset_t)0, + VM_FLAGS_FIXED, + vmk_flags, + VM_KERN_MEMORY_OSKEXT, + (memory_object_control_t)control, + fileoffset, + TRUE, /* copy */ + (VM_PROT_READ | VM_PROT_WRITE), max_prot, + VM_INHERIT_NONE); -finish: - OSSafeReleaseNULL(requestTagNum); + return ret; +} - return result; +kern_return_t +vm_unmap_kcfileset_segment( + vm_map_offset_t *start, + vm_map_offset_t size) +{ + return mach_vm_deallocate(g_kext_map, *start, size); } +#endif //(__x86_64__) || defined(__i386__) + /********************************************************************* * Assumes sKextLock is held. *********************************************************************/ /* static */ OSReturn -OSKext::dequeueCallbackForRequestTag( - OSNumber * requestTagNum, - OSDictionary ** callbackRecordOut) +OSKext::validateKCFileSetUUID( + OSDictionary *infoDict, + kc_kind_t type) { - OSReturn result = kOSKextReturnInvalidArgument; - OSDictionary * callbackRecord = NULL;// retain if matched! - OSNumber * callbackTagNum = NULL;// do not release - unsigned int count, i; + OSReturn ret = kOSReturnSuccess; - result = kOSReturnError; - count = sRequestCallbackRecords->getCount(); - for (i = 0; i < count; i++) { - callbackRecord = OSDynamicCast(OSDictionary, - sRequestCallbackRecords->getObject(i)); - if (!callbackRecord) { - goto finish; - } + if (!kernelcache_uuid_valid) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "validateKCFileSetUUID Boot KC UUID was not set at boot."); + ret = kOSKextReturnInvalidArgument; + goto finish; + } + ret = OSKext::validateKCUUIDfromPrelinkInfo(&kernelcache_uuid, type, infoDict, kPrelinkInfoBootKCIDKey); + if (ret != 0) { + goto finish; + } - /* If we don't find a tag, we basically have a leak here. Maybe - * we should just remove it. - */ - callbackTagNum = OSDynamicCast(OSNumber, _OSKextGetRequestArgument( - callbackRecord, kKextRequestArgumentRequestTagKey)); - if (!callbackTagNum) { +#if defined(__x86_64__) || defined(__i386__) + /* Check if the Aux KC is prelinked to correct Pageable KC */ + if (type == KCKindAuxiliary) { + if (!pageablekc_uuid_valid) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "validateKCFileSetUUID Pageable KC UUID was not set while loading Pageable KC."); + ret = kOSKextReturnInvalidArgument; goto finish; } - - /* We could be even more paranoid and check that all the incoming - * args match what's in the callback record. - */ - if (callbackTagNum->isEqualTo(requestTagNum)) { - if (callbackRecordOut) { - *callbackRecordOut = callbackRecord; - callbackRecord->retain(); - } - sRequestCallbackRecords->removeObject(i); - result = kOSReturnSuccess; + ret = OSKext::validateKCUUIDfromPrelinkInfo(&pageablekc_uuid, type, infoDict, kPrelinkInfoPageableKCIDKey); + if (ret != 0) { goto finish; } } - result = kOSKextReturnNotFound; +#endif //(__x86_64__) || defined(__i386__) + printf("KextLog: Collection UUID matches with loaded KCs.\n"); finish: - return result; + return ret; } - /********************************************************************* -* Busy timeout triage +* Assumes sKextLock is held. *********************************************************************/ /* static */ -bool -OSKext::isWaitingKextd(void) -{ - return sRequestCallbackRecords && sRequestCallbackRecords->getCount(); +OSReturn +OSKext::validateKCUUIDfromPrelinkInfo( + uuid_t *loaded_kcuuid, + kc_kind_t type, + OSDictionary *infoDict, + const char *uuid_key) +{ + /* extract the UUID from the dictionary */ + OSData *prelinkinfoKCUUID = OSDynamicCast(OSData, infoDict->getObject(uuid_key)); + if (!prelinkinfoKCUUID) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "validateKCUUID Info plist does not contain %s KC UUID key.", uuid_key); + return kOSKextReturnInvalidArgument; + } + + if (prelinkinfoKCUUID->getLength() != sizeof(uuid_t)) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "validateKCUUID %s KC UUID has wrong length: %d.", uuid_key, prelinkinfoKCUUID->getLength()); + return kOSKextReturnInvalidArgument; + } + + if (memcmp((void *)loaded_kcuuid, (const void *)prelinkinfoKCUUID->getBytesNoCopy(), + prelinkinfoKCUUID->getLength())) { + OSData *info_dict_uuid; + uuid_string_t info_dict_uuid_str = {}; + uuid_string_t expected_uuid_str = {}; + uuid_string_t given_uuid_str = {}; + uuid_t given_uuid; + + /* extract the KC UUID from the dictionary */ + info_dict_uuid = OSDynamicCast(OSData, infoDict->getObject(kPrelinkInfoKCIDKey)); + if (info_dict_uuid && info_dict_uuid->getLength() == sizeof(uuid_t)) { + uuid_t tmp_uuid; + memcpy(tmp_uuid, (const void *)info_dict_uuid->getBytesNoCopy(), sizeof(tmp_uuid)); + uuid_unparse(tmp_uuid, info_dict_uuid_str); + } + + uuid_unparse(*loaded_kcuuid, expected_uuid_str); + memcpy(given_uuid, (const void *)prelinkinfoKCUUID->getBytesNoCopy(), sizeof(given_uuid)); + uuid_unparse(given_uuid, given_uuid_str); + + printf("KextLog: ERROR: UUID from key:%s %s != expected %s (KC UUID: %s)\n", uuid_key, + given_uuid_str, expected_uuid_str, info_dict_uuid_str); + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "KextLog: ERROR: UUID from key:%s %s != expected %s (KC UUID: %s)\n", uuid_key, + given_uuid_str, expected_uuid_str, info_dict_uuid_str); + if (type == KCKindPageable && sPanicOnKCMismatch) { + panic("System KC UUID %s linked against %s, but %s is loaded", + info_dict_uuid_str, given_uuid_str, expected_uuid_str); + } + return kOSKextReturnInvalidArgument; + } + + return 0; } /********************************************************************* @@ -10064,17 +12884,17 @@ OSReturn OSKext::dispatchResource(OSDictionary * requestDict) { OSReturn result = kOSReturnError; - OSDictionary * callbackRecord = NULL;// must release - OSNumber * requestTag = NULL;// do not release - OSNumber * requestResult = NULL;// do not release - OSData * dataObj = NULL;// do not release + OSSharedPtr callbackRecord; + OSNumber * requestTag = NULL; // do not release + OSNumber * requestResult = NULL; // do not release + OSData * dataObj = NULL; // do not release uint32_t dataLength = 0; - const void * dataPtr = NULL;// do not free - OSData * callbackWrapper = NULL;// do not release + const void * dataPtr = NULL; // do not free + OSData * callbackWrapper = NULL; // do not release OSKextRequestResourceCallback callback = NULL; - OSData * contextWrapper = NULL;// do not release - void * context = NULL;// do not free - OSKext * callbackKext = NULL;// must release (looked up) + OSData * contextWrapper = NULL; // do not release + void * context = NULL; // do not free + OSSharedPtr callbackKext; /* Get the args from the request. Right now we need the tag * to look up the callback record, and the result for invoking the callback. @@ -10090,7 +12910,7 @@ OSKext::dispatchResource(OSDictionary * requestDict) /* Look for a callback record matching this request's tag. */ - result = dequeueCallbackForRequestTag(requestTag, &callbackRecord); + result = dequeueCallbackForRequestTag(requestTag, callbackRecord); if (result != kOSReturnSuccess) { goto finish; } @@ -10098,7 +12918,7 @@ OSKext::dispatchResource(OSDictionary * requestDict) /***** * Get the context pointer of the callback record (if there is one). */ - contextWrapper = OSDynamicCast(OSData, _OSKextGetRequestArgument(callbackRecord, + contextWrapper = OSDynamicCast(OSData, _OSKextGetRequestArgument(callbackRecord.get(), kKextRequestArgumentContextKey)); context = _OSKextExtractPointer(contextWrapper); if (contextWrapper && !context) { @@ -10106,10 +12926,9 @@ OSKext::dispatchResource(OSDictionary * requestDict) } callbackWrapper = OSDynamicCast(OSData, - _OSKextGetRequestArgument(callbackRecord, + _OSKextGetRequestArgument(callbackRecord.get(), kKextRequestArgumentCallbackKey)); - callback = (OSKextRequestResourceCallback) - _OSKextExtractPointer(callbackWrapper); + callback = _OSKextExtractCallbackPointer(callbackWrapper); if (!callback) { goto finish; } @@ -10146,14 +12965,81 @@ OSKext::dispatchResource(OSDictionary * requestDict) result = kOSReturnSuccess; finish: - if (callbackKext) { - callbackKext->release(); + return result; +} + +/********************************************************************* +* Assumes sKextLock is held. +*********************************************************************/ +/* static */ +OSReturn +OSKext::setMissingAuxKCBundles(OSDictionary * requestDict) +{ + OSSharedPtr missingIDs; + OSArray *bundleIDList = NULL; // do not release + + bundleIDList = OSDynamicCast(OSArray, _OSKextGetRequestArgument( + requestDict, kKextRequestArgumentMissingBundleIDs)); + if (!bundleIDList) { + return kOSKextReturnInvalidArgument; + } + + missingIDs = OSDictionary::withCapacity(bundleIDList->getCount()); + if (!missingIDs) { + return kOSKextReturnNoMemory; } - if (callbackRecord) { - callbackRecord->release(); + + uint32_t count, i; + count = bundleIDList->getCount(); + for (i = 0; i < count; i++) { + OSString *thisID = OSDynamicCast(OSString, bundleIDList->getObject(i)); + if (thisID) { + missingIDs->setObject(thisID, kOSBooleanFalse); + } } - return result; + sNonLoadableKextsByID.reset(missingIDs.get(), OSRetain); + + return kOSReturnSuccess; +} + +/********************************************************************* +* Assumes sKextLock is held. +*********************************************************************/ +/* static */ +OSReturn +OSKext::setAuxKCBundleAvailable(OSString *kextIdentifier, OSDictionary *requestDict) +{ + bool loadable = true; + if (!kextIdentifier) { + return kOSKextReturnInvalidArgument; + } + + if (requestDict) { + OSBoolean *loadableArg; + loadableArg = OSDynamicCast(OSBoolean, _OSKextGetRequestArgument( + requestDict, kKextRequestArgumentBundleAvailability)); + /* If we find the "Bundle Available" arg, and it's false, then + * mark the bundle ID as _not_ loadable + */ + if (loadableArg && !loadableArg->getValue()) { + loadable = false; + } + } + + if (!sNonLoadableKextsByID) { + sNonLoadableKextsByID = OSDictionary::withCapacity(1); + } + + sNonLoadableKextsByID->setObject(kextIdentifier, OSBoolean::withBoolean(loadable)); + + OSKextLog(/* kext */ NULL, + kOSKextLogBasicLevel | kOSKextLogIPCFlag, + "KextLog: AuxKC bundle %s marked as %s", + kextIdentifier->getCStringNoCopy(), + (loadable ? "loadable" : "NOT loadable")); + + return kOSReturnSuccess; } /********************************************************************* @@ -10165,7 +13051,7 @@ OSKext::invokeRequestCallback( OSReturn callbackResult) { OSString * predicate = _OSKextGetRequestPredicate(callbackRecord); - OSNumber * resultNum = NULL;// must release + OSSharedPtr resultNum; if (!predicate) { goto finish; @@ -10181,7 +13067,7 @@ OSKext::invokeRequestCallback( * were the reply coming down from user space. */ _OSKextSetRequestArgument(callbackRecord, kKextRequestArgumentResultKey, - resultNum); + resultNum.get()); if (predicate->isEqualTo(kKextRequestPredicateRequestResource)) { /* This removes the pending callback record. @@ -10190,9 +13076,6 @@ OSKext::invokeRequestCallback( } finish: - if (resultNum) { - resultNum->release(); - } return; } @@ -10206,25 +13089,21 @@ OSKext::cancelRequest( void ** contextOut) { OSReturn result = kOSKextReturnNoMemory; - OSDictionary * callbackRecord = NULL; // must release - OSData * contextWrapper = NULL;// do not release + OSSharedPtr callbackRecord; + OSData * contextWrapper = NULL; // do not release IORecursiveLockLock(sKextLock); result = OSKext::dequeueCallbackForRequestTag(requestTag, - &callbackRecord); + callbackRecord); IORecursiveLockUnlock(sKextLock); if (result == kOSReturnSuccess && contextOut) { contextWrapper = OSDynamicCast(OSData, - _OSKextGetRequestArgument(callbackRecord, + _OSKextGetRequestArgument(callbackRecord.get(), kKextRequestArgumentContextKey)); *contextOut = _OSKextExtractPointer(contextWrapper); } - if (callbackRecord) { - callbackRecord->release(); - } - return result; } @@ -10261,7 +13140,7 @@ OSKext::invokeOrCancelRequestCallbacks( } vm_address_t callbackAddress = (vm_address_t) - _OSKextExtractPointer(callbackWrapper); + ptrauth_strip(_OSKextExtractPointer(callbackWrapper), ptrauth_key_function_pointer); if ((kmod_info->address <= callbackAddress) && (callbackAddress < (kmod_info->address + kmod_info->size))) { @@ -10310,7 +13189,7 @@ OSKext::countRequestCallbacks(void) } vm_address_t callbackAddress = (vm_address_t) - _OSKextExtractPointer(callbackWrapper); + ptrauth_strip(_OSKextExtractPointer(callbackWrapper), ptrauth_key_function_pointer); if ((kmod_info->address <= callbackAddress) && (callbackAddress < (kmod_info->address + kmod_info->size))) { @@ -10327,16 +13206,16 @@ finish: static OSReturn _OSKextCreateRequest( const char * predicate, - OSDictionary ** requestP) + OSSharedPtr & requestR) { OSReturn result = kOSKextReturnNoMemory; - OSDictionary * request = NULL; // must release on error + OSSharedPtr request; request = OSDictionary::withCapacity(2); if (!request) { goto finish; } - result = _OSDictionarySetCStringValue(request, + result = _OSDictionarySetCStringValue(request.get(), kKextRequestPredicateKey, predicate); if (result != kOSReturnSuccess) { goto finish; @@ -10344,12 +13223,8 @@ _OSKextCreateRequest( result = kOSReturnSuccess; finish: - if (result != kOSReturnSuccess) { - if (request) { - request->release(); - } - } else { - *requestP = request; + if (result == kOSReturnSuccess) { + requestR = os::move(request); } return result; @@ -10389,13 +13264,14 @@ _OSKextSetRequestArgument( { OSDictionary * args = OSDynamicCast(OSDictionary, requestDict->getObject(kKextRequestArgumentsKey)); + OSSharedPtr newArgs; if (!args) { - args = OSDictionary::withCapacity(2); + newArgs = OSDictionary::withCapacity(2); + args = newArgs.get(); if (!args) { goto finish; } requestDict->setObject(kKextRequestArgumentsKey, args); - args->release(); } if (args) { return args->setObject(argName, value); @@ -10421,6 +13297,24 @@ finish: return result; } +/********************************************************************* +*********************************************************************/ +static OSKextRequestResourceCallback +_OSKextExtractCallbackPointer(OSData * wrapper) +{ + OSKextRequestResourceCallback result = NULL; + const void * resultPtr = NULL; + + if (!wrapper) { + goto finish; + } + resultPtr = wrapper->getBytesNoCopy(); + result = *(OSKextRequestResourceCallback *)resultPtr; +finish: + return result; +} + + /********************************************************************* *********************************************************************/ static OSReturn @@ -10430,26 +13324,19 @@ _OSDictionarySetCStringValue( const char * cValue) { OSReturn result = kOSKextReturnNoMemory; - const OSSymbol * key = NULL; // must release - OSString * value = NULL; // must release + OSSharedPtr key; + OSSharedPtr value; key = OSSymbol::withCString(cKey); value = OSString::withCString(cValue); if (!key || !value) { goto finish; } - if (dict->setObject(key, value)) { + if (dict->setObject(key.get(), value.get())) { result = kOSReturnSuccess; } finish: - if (key) { - key->release(); - } - if (value) { - value->release(); - } - return result; } @@ -10461,7 +13348,7 @@ _OSArrayContainsCString( const char * cString) { bool result = false; - const OSSymbol * symbol = NULL; + OSSharedPtr symbol; uint32_t count, i; if (!array || !cString) { @@ -10483,12 +13370,10 @@ _OSArrayContainsCString( } finish: - if (symbol) { - symbol->release(); - } return result; } +#if CONFIG_KXLD /********************************************************************* * We really only care about boot / system start up related kexts. * We return true if we're less than REBUILD_MAX_TIME since start up, @@ -10514,6 +13399,7 @@ _OSKextInPrelinkRebuildWindow(void) } return true; } +#endif /* CONFIG_KXLD */ /********************************************************************* *********************************************************************/ @@ -10534,7 +13420,7 @@ _OSKextInUnloadedPrelinkedKexts( const OSSymbol * theBundleID ) } for (i = 0; i < unLoadedCount; i++) { - const OSSymbol * myBundleID;// do not release + const OSSymbol * myBundleID; // do not release myBundleID = OSDynamicCast(OSSymbol, sUnloadedPrelinkedKexts->getObject(i)); if (!myBundleID) { @@ -10556,16 +13442,15 @@ finish: /********************************************************************* *********************************************************************/ /* static */ -OSArray * +OSSharedPtr OSKext::copyAllKextPersonalities(bool filterSafeBootFlag) { - OSArray * result = NULL;// returned - OSCollectionIterator * kextIterator = NULL;// must release - OSArray * personalities = NULL;// must release - OSCollectionIterator * personalitiesIterator = NULL; // must release + OSSharedPtr result; + OSSharedPtr kextIterator; + OSSharedPtr personalities; - OSString * kextID = NULL;// do not release - OSKext * theKext = NULL;// do not release + OSString * kextID = NULL; // do not release + OSKext * theKext = NULL; // do not release IORecursiveLockLock(sKextLock); @@ -10577,28 +13462,26 @@ OSKext::copyAllKextPersonalities(bool filterSafeBootFlag) goto finish; } - kextIterator = OSCollectionIterator::withCollection(sKextsByID); + kextIterator = OSCollectionIterator::withCollection(sKextsByID.get()); if (!kextIterator) { goto finish; } while ((kextID = OSDynamicCast(OSString, kextIterator->getNextObject()))) { - if (personalitiesIterator) { - personalitiesIterator->release(); - personalitiesIterator = NULL; - } - if (personalities) { - personalities->release(); - personalities = NULL; - } - theKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextID)); - if (!sSafeBoot || !filterSafeBootFlag || theKext->isLoadableInSafeBoot()) { + if (theKext->flags.requireExplicitLoad) { + OSKextLog(theKext, + kOSKextLogDebugLevel | + kOSKextLogLoadFlag, + "Kext %s requires an explicit kextload; " + "omitting its personalities.", + theKext->getIdentifierCString()); + } else if (!sSafeBoot || !filterSafeBootFlag || theKext->isLoadableInSafeBoot()) { personalities = theKext->copyPersonalitiesArray(); if (!personalities) { continue; } - result->merge(personalities); + result->merge(personalities.get()); } else { // xxx - check for better place to put this log msg OSKextLog(theKext, @@ -10613,16 +13496,6 @@ OSKext::copyAllKextPersonalities(bool filterSafeBootFlag) finish: IORecursiveLockUnlock(sKextLock); - if (kextIterator) { - kextIterator->release(); - } - if (personalitiesIterator) { - personalitiesIterator->release(); - } - if (personalities) { - personalities->release(); - } - return result; } @@ -10641,13 +13514,12 @@ OSKext::sendAllKextPersonalitiesToCatalog(bool startMatching) "to the IOCatalogue %s.", startMatching ? "and starting matching" : "but not starting matching"); - OSArray * personalities = OSKext::copyAllKextPersonalities( + OSSharedPtr personalities = OSKext::copyAllKextPersonalities( /* filterSafeBootFlag */ true); if (personalities) { - gIOCatalogue->addDrivers(personalities, startMatching); + gIOCatalogue->addDrivers(personalities.get(), startMatching); numPersonalities = personalities->getCount(); - personalities->release(); } OSKextLog(/* kext */ NULL, @@ -10663,15 +13535,15 @@ OSKext::sendAllKextPersonalitiesToCatalog(bool startMatching) * Do not make a deep copy, just convert the IOKitPersonalities dict * to an array for sending to the IOCatalogue. *********************************************************************/ -OSArray * +OSSharedPtr OSKext::copyPersonalitiesArray(void) { - OSArray * result = NULL; - OSDictionary * personalities = NULL;// do not release - OSCollectionIterator * personalitiesIterator = NULL;// must release + OSSharedPtr result; + OSDictionary * personalities = NULL; // do not release + OSSharedPtr personalitiesIterator; - OSString * personalityName = NULL;// do not release - OSString * personalityBundleIdentifier = NULL;// do not release + OSString * personalityName = NULL; // do not release + OSString * personalityBundleIdentifier = NULL; // do not release personalities = OSDynamicCast(OSDictionary, getPropertyForHostArch(kIOKitPersonalitiesKey)); @@ -10704,19 +13576,15 @@ OSKext::copyPersonalitiesArray(void) personality->getObject(kCFBundleIdentifierKey)); if (!personalityBundleIdentifier) { - personality->setObject(kCFBundleIdentifierKey, bundleID); - } else if (!personalityBundleIdentifier->isEqualTo(bundleID)) { - personality->setObject(kIOPersonalityPublisherKey, bundleID); + personality->setObject(kCFBundleIdentifierKey, bundleID.get()); + } else if (!personalityBundleIdentifier->isEqualTo(bundleID.get())) { + personality->setObject(kIOPersonalityPublisherKey, bundleID.get()); } result->setObject(personality); } finish: - if (personalitiesIterator) { - personalitiesIterator->release(); - } - return result; } @@ -10729,8 +13597,8 @@ OSKext::sendPersonalitiesToCatalog( OSArray * personalityNames) { OSReturn result = kOSReturnSuccess; - OSArray * personalitiesToSend = NULL;// must release - OSDictionary * kextPersonalities = NULL;// do not release + OSSharedPtr personalitiesToSend; + OSDictionary * kextPersonalities = NULL; // do not release int count, i; if (!sLoadEnabled) { @@ -10792,12 +13660,9 @@ OSKext::sendPersonalitiesToCatalog( numPersonalities, numPersonalities > 1 ? "ies" : "y", startMatching ? " and starting matching" : " but not starting matching"); - gIOCatalogue->addDrivers(personalitiesToSend, startMatching); + gIOCatalogue->addDrivers(personalitiesToSend.get(), startMatching); } finish: - if (personalitiesToSend) { - personalitiesToSend->release(); - } return result; } @@ -10808,7 +13673,7 @@ finish: void OSKext::removePersonalitiesFromCatalog(void) { - OSDictionary * personality = NULL; // do not release + OSSharedPtr personality; personality = OSDictionary::withCapacity(1); if (!personality) { @@ -10825,13 +13690,9 @@ OSKext::removePersonalitiesFromCatalog(void) /* Have the IOCatalog remove all personalities matching this kext's * bundle ID and trigger matching anew. */ - gIOCatalogue->removeDrivers(personality, /* startMatching */ true); + gIOCatalogue->removeDrivers(personality.get(), /* startMatching */ true); finish: - if (personality) { - personality->release(); - } - return; } @@ -10866,8 +13727,6 @@ OSKext::setUserSpaceLogFilter( sUserSpaceLogMessageArray = OSArray::withCapacity(0); if (!sUserSpaceLogSpecArray || !sUserSpaceLogMessageArray) { - OSSafeReleaseNULL(sUserSpaceLogSpecArray); - OSSafeReleaseNULL(sUserSpaceLogMessageArray); allocError = true; } } @@ -10899,10 +13758,10 @@ OSKext::setUserSpaceLogFilter( * Do not call any function that takes sKextLock here! *********************************************************************/ /* static */ -OSArray * +OSSharedPtr OSKext::clearUserSpaceLogFilter(void) { - OSArray * result = NULL; + OSSharedPtr result; OSKextLogSpec oldLogFilter; OSKextLogSpec newLogFilter = kOSKextLogSilentFilter; @@ -10913,11 +13772,11 @@ OSKext::clearUserSpaceLogFilter(void) result = OSArray::withCapacity(2); if (result) { - result->setObject(sUserSpaceLogSpecArray); - result->setObject(sUserSpaceLogMessageArray); + result->setObject(sUserSpaceLogSpecArray.get()); + result->setObject(sUserSpaceLogMessageArray.get()); } - OSSafeReleaseNULL(sUserSpaceLogSpecArray); - OSSafeReleaseNULL(sUserSpaceLogMessageArray); + sUserSpaceLogSpecArray.reset(); + sUserSpaceLogMessageArray.reset(); oldLogFilter = sUserSpaceKextLogFilter; sUserSpaceKextLogFilter = newLogFilter; @@ -10996,7 +13855,7 @@ colorForFlags(OSKextLogSpec flags) case kOSKextLogDebugLevel: return VTMAGENTA; default: - return ""; // white + return ""; // white } } @@ -11071,10 +13930,10 @@ OSKextVLog( va_list argList; char stackBuffer[120]; uint32_t length = 0; - char * allocBuffer = NULL; // must kfree - OSNumber * logSpecNum = NULL; // must release - OSString * logString = NULL; // must release - char * buffer = stackBuffer;// do not free + char * allocBuffer = NULL; // must kfree + OSSharedPtr logSpecNum; + OSSharedPtr logString; + char * buffer = stackBuffer; // do not free IOLockLock(sKextLoggingLock); @@ -11101,7 +13960,8 @@ OSKextVLog( va_end(argList); if (length + 1 >= sizeof(stackBuffer)) { - allocBuffer = (char *)kalloc_tag((length + 1) * sizeof(char), VM_KERN_MEMORY_OSKEXT); + allocBuffer = (char *)kheap_alloc_tag(KHEAP_TEMP, + length + 1, Z_WAITOK, VM_KERN_MEMORY_OSKEXT); if (!allocBuffer) { goto finish; } @@ -11121,8 +13981,8 @@ OSKextVLog( logSpecNum = OSNumber::withNumber(msgLogSpec, 8 * sizeof(msgLogSpec)); logString = OSString::withCString(buffer); if (logSpecNum && logString) { - sUserSpaceLogSpecArray->setObject(logSpecNum); - sUserSpaceLogMessageArray->setObject(logString); + sUserSpaceLogSpecArray->setObject(logSpecNum.get()); + sUserSpaceLogMessageArray->setObject(logString.get()); } } @@ -11134,7 +13994,7 @@ OSKextVLog( * colorize the log message. */ if (!disableConsoleOutput && sBootArgLogFilterFound) { - const char * color = ""; // do not free + const char * color = ""; // do not free color = colorForFlags(msgLogSpec); printf("%s%s%s\n", colorForFlags(msgLogSpec), buffer, color[0] ? VTRESET : ""); @@ -11147,10 +14007,8 @@ finish: IOLockUnlock(sKextLoggingLock); if (allocBuffer) { - kfree(allocBuffer, (length + 1) * sizeof(char)); + kheap_free(KHEAP_TEMP, allocBuffer, (length + 1) * sizeof(char)); } - OSSafeReleaseNULL(logString); - OSSafeReleaseNULL(logSpecNum); return; } @@ -11177,7 +14035,7 @@ ScanForAddrInObject(OSObject * theObject, int indent) { const OSMetaClass * myTypeID; - OSCollectionIterator * myIter; + OSSharedPtr myIter; OSSymbol * myKey; OSObject * myValue; bool myResult = false; @@ -11198,6 +14056,8 @@ ScanForAddrInObject(OSObject * theObject, if (myIter == NULL) { return myResult; } + + // !! reset the iterator myIter->reset(); while ((myKey = OSDynamicCast(OSSymbol, myIter->getNextObject()))) { @@ -11212,7 +14072,9 @@ ScanForAddrInObject(OSObject * theObject, IOLog("OSDictionary key \"%s\" \n", myKey->getCStringNoCopy()); } } - myIter->release(); + + // !! release the iterator + myIter.reset(); } else if (myTypeID == OSTypeID(OSArray)) { OSArray * myArray; @@ -11221,6 +14083,7 @@ ScanForAddrInObject(OSObject * theObject, if (myIter == NULL) { return myResult; } + // !! reset the iterator myIter->reset(); while ((myValue = myIter->getNextObject())) { @@ -11233,7 +14096,8 @@ ScanForAddrInObject(OSObject * theObject, IOLog("OSArray: \n"); } } - myIter->release(); + // !! release the iterator + myIter.reset(); } else if (myTypeID == OSTypeID(OSString) || myTypeID == OSTypeID(OSSymbol)) { // should we look for addresses in strings? } else if (myTypeID == OSTypeID(OSData)) { @@ -11255,18 +14119,17 @@ ScanForAddrInObject(OSObject * theObject, if (kext_alloc_max != 0 && numberValue >= kext_alloc_base && numberValue < kext_alloc_max) { - OSKext * myKext = NULL;// must release (looked up) - // IOLog("found OSData %p in kext map %p to %p \n", - // *(myPtrPtr), - // (void *) kext_alloc_base, - // (void *) kext_alloc_max); + OSSharedPtr myKext; + // IOLog("found OSData %p in kext map %p to %p \n", + // *(myPtrPtr), + // (void *) kext_alloc_base, + // (void *) kext_alloc_max); myKext = OSKext::lookupKextWithAddress((vm_address_t) *(myPtrPtr)); if (myKext) { IOLog("found addr %p from an OSData obj within kext \"%s\" \n", *(myPtrPtr), myKext->getIdentifierCString()); - myKext->release(); } myResult = true; } @@ -11292,7 +14155,7 @@ ScanForAddrInObject(OSObject * theObject, if (kext_alloc_max != 0 && numberValue >= kext_alloc_base && numberValue < kext_alloc_max) { - OSKext * myKext = NULL;// must release (looked up) + OSSharedPtr myKext; IOLog("found OSNumber in kext map %p to %p \n", (void *) kext_alloc_base, (void *) kext_alloc_max); @@ -11302,7 +14165,6 @@ ScanForAddrInObject(OSObject * theObject, if (myKext) { IOLog("found in kext \"%s\" \n", myKext->getIdentifierCString()); - myKext->release(); } myResult = true; @@ -11333,7 +14195,7 @@ ScanForAddrInObject(OSObject * theObject, return myResult; } #endif // KASLR_KEXT_DEBUG -}; /* extern "C" */ +}; /* extern "C" */ #if PRAGMA_MARK #pragma mark Backtrace Dump & kmod_get_info() support @@ -11420,8 +14282,11 @@ OSKext::summaryIsInBacktrace( for (i = 0; i < cnt; i++) { vm_offset_t kscan_addr = addr[i]; - if ((kscan_addr >= summary->address) && - (kscan_addr < (summary->address + summary->size))) { +#if __has_feature(ptrauth_calls) + kscan_addr = (vm_offset_t)VM_KERNEL_STRIP_PTR(kscan_addr); +#endif /* __has_feature(ptrauth_calls) */ + if ((kscan_addr >= summary->text_exec_address) && + (kscan_addr < (summary->text_exec_address + summary->text_exec_size))) { return TRUE; } } @@ -11434,8 +14299,11 @@ OSKext::summaryIsInBacktrace( * sKextSummariesLock held. */ OSKextLoadedKextSummary * -OSKext::summaryForAddress(const uintptr_t addr) +OSKext::summaryForAddress(uintptr_t addr) { +#if __has_feature(ptrauth_calls) + addr = (uintptr_t)VM_KERNEL_STRIP_PTR(addr); +#endif /* __has_feature(ptrauth_calls) */ for (unsigned i = 0; i < gLoadedKextSummaries->numSummaries; ++i) { OSKextLoadedKextSummary *summary = &gLoadedKextSummaries->summaries[i]; if (!summary->address) { @@ -11477,10 +14345,14 @@ OSKext::kextForAddress(const void *address) uint32_t baseIdx; uint32_t lim; uintptr_t addr = (uintptr_t) address; + size_t i; if (!addr) { return NULL; } +#if __has_feature(ptrauth_calls) + addr = (uintptr_t)VM_KERNEL_STRIP_PTR(addr); +#endif /* __has_feature(ptrauth_calls) */ if (sKextAccountsCount) { IOSimpleLockLock(sKextAccountsLock); @@ -11505,38 +14377,35 @@ OSKext::kextForAddress(const void *address) if (!image && (addr >= vm_kernel_stext) && (addr < vm_kernel_etext)) { image = (void *) &_mh_execute_header; } + if (!image && gLoadedKextSummaries) { + IOLockLock(sKextSummariesLock); + for (i = 0; i < gLoadedKextSummaries->numSummaries; i++) { + OSKextLoadedKextSummary *summary = gLoadedKextSummaries->summaries + i; + if (addr >= summary->address && addr < summary->address + summary->size) { + image = (void *)summary->address; + } + } + IOLockUnlock(sKextSummariesLock); + } return image; } -/********************************************************************* -* scan list of loaded kext summaries looking for a load address match and if -* found return the UUID C string. If not found then set empty string. -*********************************************************************/ -static void findSummaryUUID( - uint32_t tag_ID, - uuid_string_t uuid); - -static void -findSummaryUUID( - uint32_t tag_ID, - uuid_string_t uuid) +/* + * Find a OSKextLoadedKextSummary given the ID from a kmod_info_t * + * Safe to call in panic context. + */ +static OSKextLoadedKextSummary * +findSummary(uint32_t tagID) { - u_int i; - - uuid[0] = 0x00; // default to no UUID - - for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) { - OSKextLoadedKextSummary * summary; - + OSKextLoadedKextSummary * summary; + for (size_t i = 0; i < gLoadedKextSummaries->numSummaries; ++i) { summary = gLoadedKextSummaries->summaries + i; - - if (summary->loadTag == tag_ID) { - (void) uuid_unparse(summary->uuid, uuid); - break; + if (summary->loadTag == tagID) { + return summary; } } - return; + return NULL; } /********************************************************************* @@ -11552,21 +14421,28 @@ OSKext::printSummary( uuid_string_t uuid; char version[kOSKextVersionMaxLength]; uint64_t tmpAddr; + uint64_t tmpSize; + OSKextLoadedKextSummary *dependencySummary; if (!OSKextVersionGetString(summary->version, version, sizeof(version))) { strlcpy(version, "unknown version", sizeof(version)); } (void) uuid_unparse(summary->uuid, uuid); +#if defined(__arm__) || defined(__arm64__) + tmpAddr = summary->text_exec_address; + tmpSize = summary->text_exec_size; +#else + tmpAddr = summary->address; + tmpSize = summary->size; +#endif if (kPrintKextsUnslide & flags) { - tmpAddr = ml_static_unslide(summary->address); - } else { - tmpAddr = summary->address; + tmpAddr = ml_static_unslide(tmpAddr); } (*printf_func)("%s%s(%s)[%s]@0x%llx->0x%llx\n", (kPrintKextsTerse & flags) ? "" : " ", summary->name, version, uuid, - tmpAddr, tmpAddr + summary->size - 1); + tmpAddr, tmpAddr + tmpSize - 1); if (kPrintKextsTerse & flags) { return; @@ -11594,24 +14470,32 @@ OSKext::printSummary( } if (!rinfo->address) { - continue; // skip fake entries for built-ins + continue; // skip fake entries for built-ins } - /* locate UUID in gLoadedKextSummaries */ - findSummaryUUID(rinfo->id, uuid); + dependencySummary = findSummary(rinfo->id); + uuid[0] = 0x00; + tmpAddr = rinfo->address; + tmpSize = rinfo->size; + if (dependencySummary) { + (void) uuid_unparse(dependencySummary->uuid, uuid); +#if defined(__arm__) || defined(__arm64__) + tmpAddr = dependencySummary->text_exec_address; + tmpSize = dependencySummary->text_exec_size; +#endif + } if (kPrintKextsUnslide & flags) { - tmpAddr = ml_static_unslide(rinfo->address); - } else { - tmpAddr = rinfo->address; + tmpAddr = ml_static_unslide(tmpAddr); } - (*printf_func)(" dependency: %s(%s)[%s]@%p\n", - rinfo->name, rinfo->version, uuid, tmpAddr); + (*printf_func)(" dependency: %s(%s)[%s]@%p->%p\n", + rinfo->name, rinfo->version, uuid, tmpAddr, tmpAddr + tmpSize - 1); } return; } +#if !defined(__arm__) && !defined(__arm64__) /******************************************************************************* * substitute() looks at an input string (a pointer within a larger buffer) * for a match to a substring, and on match it writes the marker & substitution @@ -11639,7 +14523,7 @@ substitute( char marker, char substitution) { - uint32_t substring_length = strnlen(substring, KMOD_MAX_NAME - 1); + size_t substring_length = strnlen(substring, KMOD_MAX_NAME - 1); /* On a substring match, append the marker (if there is one) and then * the substitution character, updating the output (to) index accordingly. @@ -11728,6 +14612,7 @@ compactIdentifier( return; } +#endif /* !defined(__arm__) && !defined(__arm64__) */ /******************************************************************************* * assemble_identifier_and_version() adds to a string buffer a compacted @@ -11736,23 +14621,27 @@ compactIdentifier( /* identPlusVers must be at least 2*KMOD_MAX_NAME in length. */ -static int assemble_identifier_and_version( +static size_t assemble_identifier_and_version( kmod_info_t * kmod_info, char * identPlusVers, - int bufSize); + size_t bufSize); -static int +static size_t assemble_identifier_and_version( kmod_info_t * kmod_info, char * identPlusVers, - int bufSize) + size_t bufSize) { - int result = 0; + size_t result = 0; +#if defined(__arm__) || defined(__arm64__) + result = strlcpy(identPlusVers, kmod_info->name, KMOD_MAX_NAME); +#else compactIdentifier(kmod_info->name, identPlusVers, NULL); result = strnlen(identPlusVers, KMOD_MAX_NAME - 1); - identPlusVers[result++] = '\t'; // increment for real char - identPlusVers[result] = '\0'; // don't increment for nul char +#endif + identPlusVers[result++] = '\t'; // increment for real char + identPlusVers[result] = '\0'; // don't increment for nul char result = strlcat(identPlusVers, kmod_info->version, bufSize); if (result >= bufSize) { identPlusVers[bufSize - 1] = '\0'; @@ -11787,8 +14676,8 @@ OSKext::saveLoadedKextPanicListTyped( OSObject * rawKext = sLoadedKexts->getObject(i); OSKext * theKext = OSDynamicCast(OSKext, rawKext); int match; - uint32_t identPlusVersLength; - uint32_t tempLen; + size_t identPlusVersLength; + size_t tempLen; char identPlusVers[2 * KMOD_MAX_NAME]; if (!rawKext) { @@ -11874,7 +14763,8 @@ OSKext::saveLoadedKextPanicList(void) uint32_t newlist_size = 0; newlist_size = KEXT_PANICLIST_SIZE; - newlist = (char *)kalloc_tag(newlist_size, VM_KERN_MEMORY_OSKEXT); + newlist = (char *)kheap_alloc_tag(KHEAP_DATA_BUFFERS, newlist_size, + Z_WAITOK, VM_KERN_MEMORY_OSKEXT); if (!newlist) { OSKextLog(/* kext */ NULL, @@ -11902,7 +14792,8 @@ OSKext::saveLoadedKextPanicList(void) } if (loaded_kext_paniclist) { - kfree(loaded_kext_paniclist, loaded_kext_paniclist_size); + kheap_free(KHEAP_DATA_BUFFERS, loaded_kext_paniclist, + loaded_kext_paniclist_size); } loaded_kext_paniclist = newlist; newlist = NULL; @@ -11910,7 +14801,7 @@ OSKext::saveLoadedKextPanicList(void) finish: if (newlist) { - kfree(newlist, newlist_size); + kheap_free(KHEAP_TEMP, newlist, newlist_size); } return; } @@ -11924,7 +14815,7 @@ OSKext::savePanicString(bool isLoading) u_long len; if (!kmod_info) { - return; // do not goto finish here b/c of lock + return; // do not goto finish here b/c of lock } len = assemble_identifier_and_version( kmod_info, @@ -11958,14 +14849,14 @@ void OSKext::printKextPanicLists(int (*printf_func)(const char *fmt, ...)) { if (last_loaded_strlen) { - printf_func("last loaded kext at %llu: %.*s (addr %p, size %lu)\n", + printf_func("last started kext at %llu: %.*s (addr %p, size %lu)\n", AbsoluteTime_to_scalar(&last_loaded_timestamp), last_loaded_strlen, last_loaded_str_buf, last_loaded_address, last_loaded_size); } if (last_unloaded_strlen) { - printf_func("last unloaded kext at %llu: %.*s (addr %p, size %lu)\n", + printf_func("last stopped kext at %llu: %.*s (addr %p, size %lu)\n", AbsoluteTime_to_scalar(&last_unloaded_timestamp), last_unloaded_strlen, last_unloaded_str_buf, last_unloaded_address, last_unloaded_size); @@ -12160,7 +15051,7 @@ finish: void OSKext::updateLoadedKextSummary(OSKextLoadedKextSummary *summary) { - OSData *uuid; + OSSharedPtr uuid; strlcpy(summary->name, getIdentifierCString(), sizeof(summary->name)); @@ -12168,7 +15059,6 @@ OSKext::updateLoadedKextSummary(OSKextLoadedKextSummary *summary) uuid = copyUUID(); if (uuid) { memcpy(summary->uuid, uuid->getBytesNoCopy(), sizeof(summary->uuid)); - OSSafeReleaseNULL(uuid); } if (flags.builtin) { @@ -12186,6 +15076,11 @@ OSKext::updateLoadedKextSummary(OSKextLoadedKextSummary *summary) summary->flags = 0; summary->reference_list = (uint64_t) kmod_info->reference_list; + summary->text_exec_address = (uint64_t) getsegdatafromheader((kernel_mach_header_t *)summary->address, "__TEXT_EXEC", &summary->text_exec_size); + if (summary->text_exec_address == 0) { + // Fallback to __TEXT + summary->text_exec_address = (uint64_t) getsegdatafromheader((kernel_mach_header_t *)summary->address, "__TEXT", &summary->text_exec_size); + } return; } @@ -12201,8 +15096,10 @@ OSKext::updateActiveAccount(OSKextActiveAccount *accountp) bzero(accountp, sizeof(*accountp)); hdr = (kernel_mach_header_t *)kmod_info->address; - if (getcommandfromheader(hdr, LC_SEGMENT_SPLIT_INFO)) { - /* If this kext supports split segments, use the first + if (getcommandfromheader(hdr, LC_SEGMENT_SPLIT_INFO) || isInFileset()) { + /* + * If this kext supports split segments (or is in a new + * MH_FILESET kext collection), use the first * executable segment as the range for instructions * (and thus for backtracing. */ @@ -12246,6 +15143,96 @@ OSKext::isDriverKit(void) return FALSE; } +bool +OSKext::isInFileset(void) +{ + if (!kmod_info) { + goto check_prelinked; + } + + if (kmod_info->address && kernel_mach_header_is_in_fileset((kernel_mach_header_t *)kmod_info->address)) { + return true; + } + +check_prelinked: + if (isPrelinked()) { + /* + * If we haven't setup kmod_info yet, but we know + * we're loading a prelinked kext in an MH_FILESET KC, + * then return true + */ + kc_format_t kc_format; + if (PE_get_primary_kc_format(&kc_format) && kc_format == KCFormatFileset) { + return true; + } + } + return false; +} + +bool +OSKextSavedMutableSegment::initWithSegment(kernel_segment_command_t *seg) +{ + kern_return_t result; + if (!super::init()) { + return false; + } + if (seg == nullptr) { + return false; + } + result = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&data, seg->vmsize, VM_KERN_MEMORY_KEXT); + if (result != KERN_SUCCESS) { + return false; + } + memcpy((void *)data, (const void *)seg->vmaddr, seg->vmsize); + savedSegment = seg; + vmsize = seg->vmsize; + vmaddr = seg->vmaddr; + return true; +} + +OSSharedPtr +OSKextSavedMutableSegment::withSegment(kernel_segment_command_t *seg) +{ + OSSharedPtr me = OSMakeShared(); + if (me && !me->initWithSegment(seg)) { + return nullptr; + } + return me; +} + +void +OSKextSavedMutableSegment::free(void) +{ + if (data) { + kmem_free(kernel_map, (vm_offset_t)data, vmsize); + } +} + +vm_offset_t +OSKextSavedMutableSegment::getVMAddr() const +{ + return vmaddr; +} + +vm_offset_t +OSKextSavedMutableSegment::getVMSize() const +{ + return vmsize; +} + +OSReturn +OSKextSavedMutableSegment::restoreContents(kernel_segment_command_t *seg) +{ + if (seg != savedSegment) { + return kOSKextReturnInvalidArgument; + } + if (seg->vmaddr != vmaddr || seg->vmsize != vmsize) { + return kOSKextReturnInvalidArgument; + } + memcpy((void *)seg->vmaddr, data, vmsize); + return kOSReturnSuccess; +} + extern "C" const vm_allocation_site_t * OSKextGetAllocationSiteForCaller(uintptr_t address) { @@ -12255,6 +15242,9 @@ OSKextGetAllocationSiteForCaller(uintptr_t address) uint32_t baseIdx; uint32_t lim; +#if __has_feature(ptrauth_calls) + address = (uintptr_t)VM_KERNEL_STRIP_PTR(address); +#endif /* __has_feature(ptrauth_calls) */ IOSimpleLockLock(sKextAccountsLock); site = releasesite = NULL; @@ -12315,18 +15305,36 @@ OSKextFreeSite(vm_allocation_site_t * site) int OSKextGetUUIDForName(const char *name, uuid_t uuid) { - OSKext *kext = OSKext::lookupKextWithIdentifier(name); + OSSharedPtr kext = OSKext::lookupKextWithIdentifier(name); if (!kext) { return 1; } - OSData *uuid_data = kext->copyUUID(); + OSSharedPtr uuid_data = kext->copyUUID(); if (uuid_data) { memcpy(uuid, uuid_data->getBytesNoCopy(), sizeof(uuid_t)); - OSSafeReleaseNULL(uuid_data); return 0; } return 1; } #endif + +static int +sysctl_willuserspacereboot +(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int new_value = 0, old_value = 0, changed = 0; + int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed); + if (error) { + return error; + } + if (changed) { + OSKext::willUserspaceReboot(); + } + return 0; +} + +static SYSCTL_PROC(_kern, OID_AUTO, willuserspacereboot, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + NULL, 0, sysctl_willuserspacereboot, "I", ""); diff --git a/libkern/c++/OSMetaClass.cpp b/libkern/c++/OSMetaClass.cpp index 6015d4683..46380c336 100644 --- a/libkern/c++/OSMetaClass.cpp +++ b/libkern/c++/OSMetaClass.cpp @@ -129,7 +129,7 @@ struct ExpansionData { /********************************************************************* * Reserved vtable functions. *********************************************************************/ -#if SLOT_USED +#if defined(__arm64__) || defined(__arm__) void OSMetaClassBase::_RESERVEDOSMetaClassBase0() { @@ -150,7 +150,7 @@ OSMetaClassBase::_RESERVEDOSMetaClassBase3() { panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 3); } -#endif /* SLOT_USED */ +#endif /* defined(__arm64__) || defined(__arm__) */ // As these slots are used move them up inside the #if above void @@ -196,14 +196,13 @@ OSMetaClassBase::_RESERVEDOSMetaClassBase6() */ OSMetaClassBase::_ptf_t -#if defined(HAS_APPLE_PAC) && __has_feature(ptrauth_type_discriminator) +#if defined(HAS_APPLE_PAC) && \ + __has_feature(ptrauth_member_function_pointer_type_discrimination) OSMetaClassBase::_ptmf2ptf(const OSMetaClassBase *self __attribute__((unused)), - void (OSMetaClassBase::*func)(void), uintptr_t typeDisc) + void (OSMetaClassBase::*func)(void)) #else OSMetaClassBase::_ptmf2ptf(const OSMetaClassBase *self, - void (OSMetaClassBase::*func)(void), - uintptr_t typeDisc - __attribute__((unused))) + void (OSMetaClassBase::*func)(void)) #endif { struct ptmf_t { @@ -219,11 +218,18 @@ OSMetaClassBase::_ptmf2ptf(const OSMetaClassBase *self, map.fIn = func; pfn = map.pTMF.fPFN; -#if defined(HAS_APPLE_PAC) && __has_feature(ptrauth_type_discriminator) +#if defined(HAS_APPLE_PAC) && \ + __has_feature(ptrauth_member_function_pointer_type_discrimination) // Authenticate 'pfn' using the member function pointer type discriminator // and resign it as a C function pointer. 'pfn' can point to either a // non-virtual function or a virtual member function thunk. - pfn = ptrauth_auth_function(pfn, ptrauth_key_function_pointer, typeDisc); + // It can also be NULL. + if (pfn) { + pfn = ptrauth_auth_and_resign(pfn, ptrauth_key_function_pointer, + ptrauth_type_discriminator(__typeof__(func)), + ptrauth_key_function_pointer, + ptrauth_function_pointer_type_discriminator(_ptf_t)); + } return pfn; #else if (map.pTMF.delta & 1) { @@ -241,9 +247,14 @@ OSMetaClassBase::_ptmf2ptf(const OSMetaClassBase *self, uint32_t entity_hash = ((uintptr_t)pfn) >> 32; pfn = (_ptf_t)(((uintptr_t) pfn) & 0xFFFFFFFF); +#if __has_builtin(__builtin_get_vtable_pointer) + const _ptf_t *vtablep = + (const _ptf_t *)__builtin_get_vtable_pointer(u.fObj); +#else // Authenticate the vtable pointer. - _ptf_t *vtablep = ptrauth_auth_data(*u.vtablep, + const _ptf_t *vtablep = ptrauth_auth_data(*u.vtablep, ptrauth_key_cxx_vtable_pointer, 0); +#endif // Calculate the address of the vtable entry. _ptf_t *vtentryp = (_ptf_t *)(((uintptr_t)vtablep) + (uintptr_t)pfn); // Load the pointer from the vtable entry. @@ -252,7 +263,8 @@ OSMetaClassBase::_ptmf2ptf(const OSMetaClassBase *self, // Finally, resign the vtable entry as a function pointer. uintptr_t auth_data = ptrauth_blend_discriminator(vtentryp, entity_hash); pfn = ptrauth_auth_and_resign(pfn, ptrauth_key_function_pointer, - auth_data, ptrauth_key_function_pointer, 0); + auth_data, ptrauth_key_function_pointer, + ptrauth_function_pointer_type_discriminator(_ptf_t)); #else /* defined(HAS_APPLE_PAC) */ pfn = *(_ptf_t *)(((uintptr_t)*u.vtablep) + (uintptr_t)pfn); #endif /* !defined(HAS_APPLE_PAC) */ @@ -627,11 +639,26 @@ OSMetaClass::OSMetaClass( } } +OSMetaClass::OSMetaClass( + const char * inClassName, + const OSMetaClass * inSuperClass, + unsigned int inClassSize, + zone_t * inZone, + const char * zone_name, + zone_create_flags_t zflags) : OSMetaClass(inClassName, inSuperClass, + inClassSize) +{ + if (!(kIOTracking & gIOKitDebug)) { + *inZone = zone_create(zone_name, inClassSize, + (zone_create_flags_t) (ZC_ZFREE_CLEARMEM | zflags)); + } +} + /********************************************************************* *********************************************************************/ OSMetaClass::~OSMetaClass() { - OSKext * myKext = reserved ? reserved->kext : NULL; // do not release + OSKext * myKext = reserved->kext; // do not release /* Hack alert: 'className' is a C string during early C++ init, and * is converted to a real OSSymbol only when we record the OSKext in @@ -860,15 +887,15 @@ OSMetaClass::postModLoad(void * loadHandle) /* Log this error here so we can include the class name. * xxx - we should look up the other kext that defines the class */ -#if CONFIG_EMBEDDED - panic( -#else +#if defined(XNU_TARGET_OS_OSX) OSKextLog(myKext, kOSMetaClassLogSpec, -#endif /* CONFIG_EMBEDDED */ - "OSMetaClass: Kext %s class %s is a duplicate;" - "kext %s already has a class by that name.", - sStalled->kextIdentifier, (const char *)me->className, - ((OSKext *)orig->reserved->kext)->getIdentifierCString()); +#else + panic( +#endif /* defined(XNU_TARGET_OS_OSX) */ + "OSMetaClass: Kext %s class %s is a duplicate;" + "kext %s already has a class by that name.", + sStalled->kextIdentifier, (const char *)me->className, + ((OSKext *)orig->reserved->kext)->getIdentifierCString()); result = kOSMetaClassDuplicateClass; break; } diff --git a/libkern/c++/OSNumber.cpp b/libkern/c++/OSNumber.cpp index 6b6a6caae..26cbde47c 100644 --- a/libkern/c++/OSNumber.cpp +++ b/libkern/c++/OSNumber.cpp @@ -27,18 +27,22 @@ */ /* IOOffset.m created by rsulack on Wed 17-Sep-1997 */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include #include #include +#include #include #define sizeMask (~0ULL >> (64 - size)) #define super OSObject -OSDefineMetaClassAndStructors(OSNumber, OSObject) +OSDefineMetaClassAndStructorsWithZone(OSNumber, OSObject, + (zone_create_flags_t) (ZC_CACHING | ZC_ZFREE_CLEARMEM)) OSMetaClassDefineReservedUnused(OSNumber, 0); OSMetaClassDefineReservedUnused(OSNumber, 1); @@ -77,28 +81,26 @@ OSNumber::free() super::free(); } -OSNumber * +OSSharedPtr OSNumber::withNumber(unsigned long long value, unsigned int newNumberOfBits) { - OSNumber *me = new OSNumber; + OSSharedPtr me = OSMakeShared(); if (me && !me->init(value, newNumberOfBits)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSNumber * +OSSharedPtr OSNumber::withNumber(const char *value, unsigned int newNumberOfBits) { - OSNumber *me = new OSNumber; + OSSharedPtr me = OSMakeShared(); if (me && !me->init(value, newNumberOfBits)) { - me->release(); - return NULL; + return nullptr; } return me; diff --git a/libkern/c++/OSObject.cpp b/libkern/c++/OSObject.cpp index 61168bf48..a72fff4ea 100644 --- a/libkern/c++/OSObject.cpp +++ b/libkern/c++/OSObject.cpp @@ -42,7 +42,7 @@ #include __BEGIN_DECLS -int debug_ivars_size; +size_t debug_ivars_size; __END_DECLS @@ -289,9 +289,9 @@ OSObject::operator new(size_t size) } #endif - void * mem = kalloc_tag_bt(size, VM_KERN_MEMORY_LIBKERN); + void *mem = kheap_alloc_tag_bt(KHEAP_DEFAULT, size, + (zalloc_flags_t) (Z_WAITOK | Z_ZERO), VM_KERN_MEMORY_LIBKERN); assert(mem); - bzero(mem, size); OSIVAR_ACCUMSIZE(size); return (void *) mem; @@ -310,10 +310,47 @@ OSObject::operator delete(void * mem, size_t size) } #endif - kfree(mem, size); + kern_os_kfree(mem, size); OSIVAR_ACCUMSIZE(-size); } +__BEGIN_DECLS +void *OSObject_operator_new_external(size_t size); +void * +OSObject_operator_new_external(size_t size) +{ + #if IOTRACKING + if (kIOTracking & gIOKitDebug) { + return OSMetaClass::trackedNew(size); + } +#endif + + void * mem = kheap_alloc_tag_bt(KHEAP_KEXT, size, + (zalloc_flags_t) (Z_WAITOK | Z_ZERO), VM_KERN_MEMORY_LIBKERN); + assert(mem); + OSIVAR_ACCUMSIZE(size); + + return (void *) mem; +} + +void OSObject_operator_delete_external(void * mem, size_t size); +void +OSObject_operator_delete_external(void * mem, size_t size) +{ + if (!mem) { + return; + } + +#if IOTRACKING + if (kIOTracking & gIOKitDebug) { + return OSMetaClass::trackedDelete(mem, size); + } +#endif + + kheap_free(KHEAP_KEXT, mem, size); + OSIVAR_ACCUMSIZE(-size); +} +__END_DECLS bool OSObject::init() { diff --git a/libkern/c++/OSOrderedSet.cpp b/libkern/c++/OSOrderedSet.cpp index 88230f659..6f95621b3 100644 --- a/libkern/c++/OSOrderedSet.cpp +++ b/libkern/c++/OSOrderedSet.cpp @@ -26,9 +26,13 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#define IOKIT_ENABLE_SHARED_PTR + #include -#include #include +#include +#include +#include #define super OSCollection @@ -44,8 +48,7 @@ OSMetaClassDefineReservedUnused(OSOrderedSet, 7); struct _Element { - const OSMetaClassBase * obj; -// unsigned int pri; + OSTaggedPtr obj; }; #define EXT_CAST(obj) \ @@ -84,16 +87,15 @@ initWithCapacity(unsigned int inCapacity, return true; } -OSOrderedSet * +OSSharedPtr OSOrderedSet:: withCapacity(unsigned int capacity, OSOrderFunction ordering, void * orderingRef) { - OSOrderedSet *me = new OSOrderedSet; + auto me = OSMakeShared(); if (me && !me->initWithCapacity(capacity, ordering, orderingRef)) { - me->release(); - me = NULL; + return nullptr; } return me; @@ -139,8 +141,8 @@ unsigned int OSOrderedSet::ensureCapacity(unsigned int newCapacity) { _Element *newArray; - unsigned int finalCapacity; - vm_size_t oldSize, newSize; + vm_size_t finalCapacity; + vm_size_t oldSize, newSize; if (newCapacity <= capacity) { return capacity; @@ -149,8 +151,7 @@ OSOrderedSet::ensureCapacity(unsigned int newCapacity) // round up finalCapacity = (((newCapacity - 1) / capacityIncrement) + 1) * capacityIncrement; - if ((finalCapacity < newCapacity) || - (finalCapacity > (UINT_MAX / sizeof(_Element)))) { + if (finalCapacity < newCapacity) { return capacity; } newSize = sizeof(_Element) * finalCapacity; @@ -158,7 +159,12 @@ OSOrderedSet::ensureCapacity(unsigned int newCapacity) newArray = (_Element *) kallocp_container(&newSize); if (newArray) { // use all of the actual allocation size - finalCapacity = newSize / sizeof(_Element); + finalCapacity = (newSize / sizeof(_Element)); + if (finalCapacity > UINT_MAX) { + // failure, too large + kfree(newArray, newSize); + return capacity; + } oldSize = sizeof(_Element) * capacity; @@ -168,7 +174,7 @@ OSOrderedSet::ensureCapacity(unsigned int newCapacity) bzero(&newArray[capacity], newSize - oldSize); kfree(array, oldSize); array = newArray; - capacity = finalCapacity; + capacity = (unsigned int) finalCapacity; } return capacity; @@ -182,7 +188,7 @@ OSOrderedSet::flushCollection() haveUpdated(); for (i = 0; i < count; i++) { - array[i].obj->taggedRelease(OSTypeID(OSCollection)); + array[i].obj.reset(); } count = 0; @@ -211,17 +217,20 @@ OSOrderedSet::setObject(unsigned int index, const OSMetaClassBase *anObject) haveUpdated(); if (index != count) { for (i = count; i > index; i--) { - array[i] = array[i - 1]; + array[i] = os::move(array[i - 1]); } } - array[index].obj = anObject; -// array[index].pri = pri; - anObject->taggedRetain(OSTypeID(OSCollection)); + array[index].obj.reset(anObject, OSRetain); count++; return true; } +bool +OSOrderedSet::setObject(unsigned int index, OSSharedPtr const& anObject) +{ + return setObject(index, anObject.get()); +} bool OSOrderedSet::setFirstObject(const OSMetaClassBase *anObject) @@ -229,12 +238,24 @@ OSOrderedSet::setFirstObject(const OSMetaClassBase *anObject) return setObject(0, anObject); } +bool +OSOrderedSet::setFirstObject(OSSharedPtr const& anObject) +{ + return setFirstObject(anObject.get()); +} + bool OSOrderedSet::setLastObject(const OSMetaClassBase *anObject) { return setObject( count, anObject); } +bool +OSOrderedSet::setLastObject(OSSharedPtr const& anObject) +{ + return setLastObject(anObject.get()); +} + #define ORDER(obj1, obj2) \ (ordering ? ((*ordering)( (const OSObject *) obj1, (const OSObject *) obj2, orderingRef)) : 0) @@ -246,13 +267,19 @@ OSOrderedSet::setObject(const OSMetaClassBase *anObject ) // queue it behind those with same priority for (i = 0; - (i < count) && (ORDER(array[i].obj, anObject) >= 0); + (i < count) && (ORDER(array[i].obj.get(), anObject) >= 0); i++) { } return setObject(i, anObject); } +bool +OSOrderedSet::setObject(OSSharedPtr const& anObject) +{ + return setObject(anObject.get()); +} + void OSOrderedSet::removeObject(const OSMetaClassBase *anObject) { @@ -261,11 +288,11 @@ OSOrderedSet::removeObject(const OSMetaClassBase *anObject) for (i = 0; i < count; i++) { if (deleted) { - array[i - 1] = array[i]; + array[i - 1] = os::move(array[i]); } else if (array[i].obj == anObject) { deleted = true; haveUpdated(); // Pity we can't flush the log - array[i].obj->taggedRelease(OSTypeID(OSCollection)); + array[i].obj.reset(); } } @@ -274,6 +301,12 @@ OSOrderedSet::removeObject(const OSMetaClassBase *anObject) } } +void +OSOrderedSet::removeObject(OSSharedPtr const& anObject) +{ + return removeObject(anObject.get()); +} + bool OSOrderedSet::containsObject(const OSMetaClassBase *anObject) const { @@ -301,17 +334,14 @@ OSOrderedSet::getObject( unsigned int index ) const return NULL; } -// if( pri) -// *pri = array[index].pri; - - return const_cast((const OSObject *) array[index].obj); + return const_cast((const OSObject *) array[index].obj.get()); } OSObject * OSOrderedSet::getFirstObject() const { if (count) { - return const_cast((const OSObject *) array[0].obj); + return const_cast((const OSObject *) array[0].obj.get()); } else { return NULL; } @@ -321,7 +351,7 @@ OSObject * OSOrderedSet::getLastObject() const { if (count) { - return const_cast((const OSObject *) array[count - 1].obj); + return const_cast((const OSObject *) array[count - 1].obj.get()); } else { return NULL; } @@ -397,7 +427,7 @@ getNextObjectForIterator(void *inIterator, OSObject **ret) const unsigned int index = (*iteratorP)++; if (index < count) { - *ret = const_cast((const OSObject *) array[index].obj); + *ret = const_cast((const OSObject *) array[index].obj.get()); } else { *ret = NULL; } @@ -413,7 +443,7 @@ OSOrderedSet::setOptions(unsigned options, unsigned mask, void *) if ((old ^ options) & mask) { // Value changed need to recurse over all of the child collections for (unsigned i = 0; i < count; i++) { - OSCollection *coll = OSDynamicCast(OSCollection, array[i].obj); + OSCollection *coll = OSDynamicCast(OSCollection, array[i].obj.get()); if (coll) { coll->setOptions(options, mask); } @@ -423,18 +453,19 @@ OSOrderedSet::setOptions(unsigned options, unsigned mask, void *) return old; } -OSCollection * +OSSharedPtr OSOrderedSet::copyCollection(OSDictionary *cycleDict) { - bool allocDict = !cycleDict; - OSCollection *ret = NULL; - OSOrderedSet *newSet = NULL; - - if (allocDict) { - cycleDict = OSDictionary::withCapacity(16); - if (!cycleDict) { - return NULL; + OSSharedPtr ourCycleDict; + OSSharedPtr ret; + OSSharedPtr newSet; + + if (!cycleDict) { + ourCycleDict = OSDictionary::withCapacity(16); + if (!ourCycleDict) { + return nullptr; } + cycleDict = ourCycleDict.get(); } do { @@ -451,40 +482,28 @@ OSOrderedSet::copyCollection(OSDictionary *cycleDict) } // Insert object into cycle Dictionary - cycleDict->setObject((const OSSymbol *) this, newSet); + cycleDict->setObject((const OSSymbol *) this, newSet.get()); newSet->capacityIncrement = capacityIncrement; // Now copy over the contents to the new duplicate for (unsigned int i = 0; i < count; i++) { - OSObject *obj = EXT_CAST(array[i].obj); + OSObject *obj = EXT_CAST(array[i].obj.get()); OSCollection *coll = OSDynamicCast(OSCollection, obj); if (coll) { - OSCollection *newColl = coll->copyCollection(cycleDict); + OSSharedPtr newColl = coll->copyCollection(cycleDict); if (newColl) { - obj = newColl; // Rely on cycleDict ref for a bit - newColl->release(); + obj = newColl.get(); // Rely on cycleDict ref for a bit } else { - goto abortCopy; + return ret; } } - ; + newSet->setLastObject(obj); } - ; - ret = newSet; - newSet = NULL; + ret = os::move(newSet); } while (false); -abortCopy: - if (newSet) { - newSet->release(); - } - - if (allocDict) { - cycleDict->release(); - } - return ret; } diff --git a/libkern/c++/OSRuntime.cpp b/libkern/c++/OSRuntime.cpp index ba1dd30b1..40bd13b6f 100644 --- a/libkern/c++/OSRuntime.cpp +++ b/libkern/c++/OSRuntime.cpp @@ -36,6 +36,12 @@ #include #include +#if defined(HAS_APPLE_PAC) +#include +#define PTRAUTH_STRIP_STRUCTOR(x) ((uintptr_t) ptrauth_strip(ptrauth_nop_cast(void *, (x)), ptrauth_key_function_pointer)) +#else /* defined(HAS_APPLE_PAC) */ +#define PTRAUTH_STRIP_STRUCTOR(x) ((uintptr_t) (x)) +#endif /* !defined(HAS_APPLE_PAC) */ __BEGIN_DECLS @@ -70,7 +76,7 @@ OSKextLogSpec kOSRuntimeLogSpec = *********************************************************************/ static bool gKernelCPPInitialized = false; -#define OSRuntimeLog(kext, flags, format, args...) \ +#define OSRuntimeLog(kext, flags, format, args ...) \ do { \ if (gKernelCPPInitialized) { \ OSKextLog((kext), (flags), (format), ## args); \ @@ -79,100 +85,6 @@ static bool gKernelCPPInitialized = false; } \ } while (0) -#if PRAGMA_MARK -#pragma mark kern_os Allocator Package -#endif /* PRAGMA_MARK */ -/********************************************************************* -* kern_os Allocator Package -*********************************************************************/ - -/********************************************************************* -*********************************************************************/ -#if OSALLOCDEBUG -extern int debug_iomalloc_size; -#endif - -/********************************************************************* -*********************************************************************/ -void * -kern_os_malloc(size_t size) -{ - void *mem; - if (size == 0) { - return NULL; - } - - mem = kallocp_tag_bt((vm_size_t *)&size, VM_KERN_MEMORY_LIBKERN); - if (!mem) { - return NULL; - } - -#if OSALLOCDEBUG - OSAddAtomic(size, &debug_iomalloc_size); -#endif - - bzero(mem, size); - - return mem; -} - -/********************************************************************* -*********************************************************************/ -void -kern_os_free(void * addr) -{ - size_t size; - size = kalloc_size(addr); -#if OSALLOCDEBUG - OSAddAtomic(-size, &debug_iomalloc_size); -#endif - - kfree_addr(addr); -} - -/********************************************************************* -*********************************************************************/ -void * -kern_os_realloc( - void * addr, - size_t nsize) -{ - void *nmem; - size_t osize; - - if (!addr) { - return kern_os_malloc(nsize); - } - - osize = kalloc_size(addr); - if (nsize == osize) { - return addr; - } - - if (nsize == 0) { - kfree_addr(addr); - return NULL; - } - - nmem = kallocp_tag_bt((vm_size_t *)&nsize, VM_KERN_MEMORY_LIBKERN); - if (!nmem) { - kfree_addr(addr); - return NULL; - } - -#if OSALLOCDEBUG - OSAddAtomic((nsize - osize), &debug_iomalloc_size); -#endif - - if (nsize > osize) { - (void)memset((char *)nmem + osize, 0, nsize - osize); - } - (void)memcpy(nmem, addr, (nsize > osize) ? osize : nsize); - kfree_addr(addr); - - return nmem; -} - #if PRAGMA_MARK #pragma mark Libkern Init #endif /* PRAGMA_MARK */ @@ -240,10 +152,6 @@ __END_DECLS * kern_os C++ Runtime Load/Unload *********************************************************************/ -#if defined(HAS_APPLE_PAC) -#include -#endif /* defined(HAS_APPLE_PAC) */ - typedef void (*structor_t)(void); static bool @@ -265,6 +173,9 @@ OSRuntimeCallStructorsInSection( if (strncmp(section->sectname, sectionName, sizeof(section->sectname) - 1)) { continue; } + if (section->size == 0) { + continue; + } structor_t * structors = (structor_t *)section->addr; if (!structors) { @@ -272,26 +183,28 @@ OSRuntimeCallStructorsInSection( } structor_t structor; - unsigned int num_structors = section->size / sizeof(structor_t); + uintptr_t value; + unsigned long num_structors = section->size / sizeof(structor_t); unsigned int hit_null_structor = 0; - unsigned int firstIndex = 0; + unsigned long firstIndex = 0; if (textStart) { // bsearch for any in range - unsigned int baseIdx; - unsigned int lim; - uintptr_t value; + unsigned long baseIdx; + unsigned long lim; firstIndex = num_structors; for (lim = num_structors, baseIdx = 0; lim; lim >>= 1) { - value = (uintptr_t) structors[baseIdx + (lim >> 1)]; - if (!value) { + structor = structors[baseIdx + (lim >> 1)]; + if (!structor) { panic("%s: null structor", kmodInfo->name); } + value = PTRAUTH_STRIP_STRUCTOR(structor); if ((value >= textStart) && (value < textEnd)) { firstIndex = (baseIdx + (lim >> 1)); // scan back for the first in range for (; firstIndex; firstIndex--) { - value = (uintptr_t) structors[firstIndex - 1]; + structor = structors[firstIndex - 1]; + value = PTRAUTH_STRIP_STRUCTOR(structor); if ((value < textStart) || (value >= textEnd)) { break; } @@ -312,15 +225,11 @@ OSRuntimeCallStructorsInSection( && (!metaHandle || OSMetaClass::checkModLoad(metaHandle)); firstIndex++) { if ((structor = structors[firstIndex])) { - if ((textStart && ((uintptr_t) structor < textStart)) - || (textEnd && ((uintptr_t) structor >= textEnd))) { + value = PTRAUTH_STRIP_STRUCTOR(structor); + if ((textStart && (value < textStart)) + || (textEnd && (value >= textEnd))) { break; } - -#if !defined(XXX) && defined(HAS_APPLE_PAC) - structor = __builtin_ptrauth_strip(structor, ptrauth_key_function_pointer); - structor = __builtin_ptrauth_sign_unauthenticated(structor, ptrauth_key_function_pointer, 0); -#endif (*structor)(); } else if (!hit_null_structor) { hit_null_structor = 1; @@ -420,6 +329,91 @@ finish: return result; } +#if defined(HAS_APPLE_PAC) +static inline void +OSRuntimeSignStructorsInSegment(kernel_segment_command_t *segment) +{ + kernel_section_t * section; + structor_t * structors; + volatile structor_t structor; + size_t idx, num_structors; + + for (section = firstsect(segment); + section != NULL; + section = nextsect(segment, section)) { + if ((S_MOD_INIT_FUNC_POINTERS != (SECTION_TYPE & section->flags)) + && (S_MOD_TERM_FUNC_POINTERS != (SECTION_TYPE & section->flags))) { + continue; + } + structors = (structor_t *)section->addr; + if (!structors) { + continue; + } + num_structors = section->size / sizeof(structor_t); + for (idx = 0; idx < num_structors; idx++) { + structor = structors[idx]; + if (NULL == structor) { + continue; + } + structor = ptrauth_strip(structor, ptrauth_key_function_pointer); + structor = ptrauth_sign_unauthenticated(structor, ptrauth_key_function_pointer, ptrauth_function_pointer_type_discriminator(void (*)(void))); + structors[idx] = structor; + } + } /* for (section...) */ +} +#endif + +/********************************************************************* +*********************************************************************/ +void +OSRuntimeSignStructors( + kernel_mach_header_t * header __unused) +{ +#if defined(HAS_APPLE_PAC) + + kernel_segment_command_t * segment; + + for (segment = firstsegfromheader(header); + segment != NULL; + segment = nextsegfromheader(header, segment)) { + OSRuntimeSignStructorsInSegment(segment); + } /* for (segment...) */ +#endif /* !defined(XXX) && defined(HAS_APPLE_PAC) */ +} + +/********************************************************************* +*********************************************************************/ +void +OSRuntimeSignStructorsInFileset( + kernel_mach_header_t * fileset_header __unused) +{ +#if defined(HAS_APPLE_PAC) + struct load_command *lc; + + lc = (struct load_command *)((uintptr_t)fileset_header + sizeof(*fileset_header)); + for (uint32_t i = 0; i < fileset_header->ncmds; i++, + lc = (struct load_command *)((uintptr_t)lc + lc->cmdsize)) { + if (lc->cmd == LC_FILESET_ENTRY) { + struct fileset_entry_command *fse; + kernel_mach_header_t *mh; + + fse = (struct fileset_entry_command *)(uintptr_t)lc; + mh = (kernel_mach_header_t *)((uintptr_t)fse->vmaddr); + OSRuntimeSignStructors(mh); + } else if (lc->cmd == LC_SEGMENT_64) { + /* + * Slide/adjust all LC_SEGMENT_64 commands in the fileset + * (and any sections in those segments) + */ + kernel_segment_command_t *seg; + seg = (kernel_segment_command_t *)(uintptr_t)lc; + OSRuntimeSignStructorsInSegment(seg); + } + } + +#endif /* defined(HAS_APPLE_PAC) */ +} + /********************************************************************* *********************************************************************/ kern_return_t @@ -551,10 +545,9 @@ OSRuntimeUnloadCPPForSegment( void * operator new(size_t size) { - void * result; - - result = (void *) kern_os_malloc(size); - return result; + assert(size); + return kheap_alloc_tag_bt(KERN_OS_MALLOC, size, + (zalloc_flags_t) (Z_WAITOK | Z_ZERO), VM_KERN_MEMORY_LIBKERN); } void @@ -563,17 +556,15 @@ operator delete(void * addr) noexcept #endif { - kern_os_free(addr); + kheap_free_addr(KERN_OS_MALLOC, addr); return; } void * operator new[](unsigned long sz) { - if (sz == 0) { - sz = 1; - } - return kern_os_malloc(sz); + return kheap_alloc_tag_bt(KERN_OS_MALLOC, sz, + (zalloc_flags_t) (Z_WAITOK | Z_ZERO), VM_KERN_MEMORY_LIBKERN); } void @@ -590,7 +581,7 @@ noexcept */ kasan_unpoison_cxx_array_cookie(ptr); #endif - kern_os_free(ptr); + kheap_free_addr(KERN_OS_MALLOC, ptr); } return; } @@ -600,20 +591,14 @@ noexcept void operator delete(void * addr, size_t sz) noexcept { -#if OSALLOCDEBUG - OSAddAtomic(-sz, &debug_iomalloc_size); -#endif /* OSALLOCDEBUG */ - kfree(addr, sz); + kheap_free(KERN_OS_MALLOC, addr, sz); } void operator delete[](void * addr, size_t sz) noexcept { if (addr) { -#if OSALLOCDEBUG - OSAddAtomic(-sz, &debug_iomalloc_size); -#endif /* OSALLOCDEBUG */ - kfree(addr, sz); + kheap_free(KERN_OS_MALLOC, addr, sz); } } diff --git a/libkern/c++/OSSerialize.cpp b/libkern/c++/OSSerialize.cpp index d015efe9e..0b9c52020 100644 --- a/libkern/c++/OSSerialize.cpp +++ b/libkern/c++/OSSerialize.cpp @@ -27,6 +27,8 @@ */ /* OSSerialize.cpp created by rsulack on Wen 25-Nov-1998 */ +#define IOKIT_ENABLE_SHARED_PTR + #include __BEGIN_DECLS @@ -36,6 +38,7 @@ __END_DECLS #include #include #include +#include #include #include #include @@ -204,8 +207,7 @@ OSSerialize::initWithCapacity(unsigned int inCapacity) inCapacity = 1; } if (round_page_overflow(inCapacity, &capacity)) { - tags->release(); - tags = NULL; + tags.reset(); return false; } @@ -216,8 +218,6 @@ OSSerialize::initWithCapacity(unsigned int inCapacity) kern_return_t rc = kmem_alloc(kernel_map, (vm_offset_t *)&data, capacity, IOMemoryTag(kernel_map)); if (rc) { - tags->release(); - tags = NULL; return false; } bzero((void *)data, capacity); @@ -228,14 +228,13 @@ OSSerialize::initWithCapacity(unsigned int inCapacity) return true; } -OSSerialize * +OSSharedPtr OSSerialize::withCapacity(unsigned int inCapacity) { - OSSerialize *me = new OSSerialize; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithCapacity(inCapacity)) { - me->release(); - return NULL; + return nullptr; } return me; @@ -303,9 +302,6 @@ OSSerialize::ensureCapacity(unsigned int newCapacity) void OSSerialize::free() { - OSSafeReleaseNULL(tags); - OSSafeReleaseNULL(indexData); - if (data) { kmem_free(kernel_map, (vm_offset_t)data, capacity); OSCONTAINER_ACCUMSIZE( -((size_t)capacity)); @@ -316,15 +312,14 @@ OSSerialize::free() OSDefineMetaClassAndStructors(OSSerializer, OSObject) -OSSerializer * OSSerializer::forTarget( void * target, +OSSharedPtr +OSSerializer::forTarget( void * target, OSSerializerCallback callback, void * ref ) { - OSSerializer * thing; + OSSharedPtr thing = OSMakeShared(); - thing = new OSSerializer; if (thing && !thing->init()) { - thing->release(); - thing = NULL; + thing.reset(); } if (thing) { @@ -342,11 +337,11 @@ OSSerializer::callbackToBlock(void * target __unused, void * ref, return ((OSSerializerBlock)ref)(serializer); } -OSSerializer * +OSSharedPtr OSSerializer::withBlock( OSSerializerBlock callback) { - OSSerializer * serializer; + OSSharedPtr serializer; OSSerializerBlock block; block = Block_copy(callback); diff --git a/libkern/c++/OSSerializeBinary.cpp b/libkern/c++/OSSerializeBinary.cpp index a9d9ed61c..56a4c7b82 100644 --- a/libkern/c++/OSSerializeBinary.cpp +++ b/libkern/c++/OSSerializeBinary.cpp @@ -27,10 +27,13 @@ */ +#include +#include #include #include #include #include +#include #include @@ -108,7 +111,7 @@ OSSerialize::setIndexed(bool index __unused) bool OSSerialize::addBinaryObject(const OSMetaClassBase * o, uint32_t key, - const void * bits, size_t size, + const void * bits, uint32_t size, uint32_t * startCollection) { unsigned int newCapacity; @@ -216,7 +219,7 @@ OSSerialize::binarySerializeInternal(const OSMetaClassBase *o) unsigned int tagIdx; uint32_t i, key, startCollection; - size_t len; + uint32_t len; bool ok; tagIdx = tags->getNextIndexOfObject(o, 0); @@ -519,6 +522,7 @@ OSUnserializeBinary(const char *buffer, size_t bufferSize, OSString **errorStrin if (hasLength) { bufferPos += sizeof(*next); if (!(ok = (bufferPos <= bufferSize))) { + o->release(); break; } length = *next++; @@ -627,3 +631,44 @@ OSUnserializeBinary(const char *buffer, size_t bufferSize, OSString **errorStrin return result; } + +OSObject* +OSUnserializeXML( + const char * buffer, + OSSharedPtr& errorString) +{ + OSString* errorStringRaw = NULL; + OSObject* result = OSUnserializeXML(buffer, &errorStringRaw); + errorString.reset(errorStringRaw, OSNoRetain); + return result; +} + +OSObject* +OSUnserializeXML( + const char * buffer, + size_t bufferSize, + OSSharedPtr &errorString) +{ + OSString* errorStringRaw = NULL; + OSObject* result = OSUnserializeXML(buffer, bufferSize, &errorStringRaw); + errorString.reset(errorStringRaw, OSNoRetain); + return result; +} + +OSObject* +OSUnserializeBinary(const char *buffer, size_t bufferSize, OSSharedPtr& errorString) +{ + OSString* errorStringRaw = NULL; + OSObject* result = OSUnserializeBinary(buffer, bufferSize, &errorStringRaw); + errorString.reset(errorStringRaw, OSNoRetain); + return result; +} + +OSObject* +OSUnserialize(const char *buffer, OSSharedPtr& errorString) +{ + OSString* errorStringRaw = NULL; + OSObject* result = OSUnserialize(buffer, &errorStringRaw); + errorString.reset(errorStringRaw, OSNoRetain); + return result; +} diff --git a/libkern/c++/OSSet.cpp b/libkern/c++/OSSet.cpp index ed8b2762c..7aaf85cc3 100644 --- a/libkern/c++/OSSet.cpp +++ b/libkern/c++/OSSet.cpp @@ -27,14 +27,20 @@ */ /* IOSet.m created by rsulack on Thu 11-Jun-1998 */ -#include +#define IOKIT_ENABLE_SHARED_PTR + #include +#include #include #include +#include +#include +#include #define super OSCollection -OSDefineMetaClassAndStructors(OSSet, OSCollection) +OSDefineMetaClassAndStructorsWithZone(OSSet, OSCollection, + ZC_ZFREE_CLEARMEM) OSMetaClassDefineReservedUnused(OSSet, 0); OSMetaClassDefineReservedUnused(OSSet, 1); OSMetaClassDefineReservedUnused(OSSet, 2); @@ -109,60 +115,56 @@ bool OSSet::initWithSet(const OSSet *inSet, unsigned int inCapacity) { - return initWithArray(inSet->members, inCapacity); + return initWithArray(inSet->members.get(), inCapacity); } -OSSet * +OSSharedPtr OSSet::withCapacity(unsigned int capacity) { - OSSet *me = new OSSet; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithCapacity(capacity)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSSet * +OSSharedPtr OSSet::withObjects(const OSObject *objects[], unsigned int count, unsigned int capacity) { - OSSet *me = new OSSet; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithObjects(objects, count, capacity)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSSet * +OSSharedPtr OSSet::withArray(const OSArray *array, unsigned int capacity) { - OSSet *me = new OSSet; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithArray(array, capacity)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSSet * +OSSharedPtr OSSet::withSet(const OSSet *set, unsigned int capacity) { - OSSet *me = new OSSet; + OSSharedPtr me = OSMakeShared();; if (me && !me->initWithSet(set, capacity)) { - me->release(); - return NULL; + return nullptr; } return me; @@ -173,7 +175,6 @@ OSSet::free() { if (members) { (void) members->super::setOptions(0, kImmutable); - members->release(); } super::free(); @@ -227,6 +228,12 @@ OSSet::setObject(const OSMetaClassBase *anObject) } } +bool +OSSet::setObject(OSSharedPtr const& anObject) +{ + return setObject(anObject.get()); +} + bool OSSet::merge(const OSArray * array) { @@ -251,7 +258,7 @@ OSSet::merge(const OSArray * array) bool OSSet::merge(const OSSet * set) { - return merge(set->members); + return merge(set->members.get()); } void @@ -268,6 +275,12 @@ OSSet::removeObject(const OSMetaClassBase *anObject) } } +void +OSSet::removeObject(OSSharedPtr const& anObject) +{ + removeObject(anObject.get()); +} + bool OSSet::containsObject(const OSMetaClassBase *anObject) const @@ -406,18 +419,19 @@ OSSet::setOptions(unsigned options, unsigned mask, void *) return old; } -OSCollection * +OSSharedPtr OSSet::copyCollection(OSDictionary *cycleDict) { - bool allocDict = !cycleDict; - OSCollection *ret = NULL; - OSSet *newSet = NULL; - - if (allocDict) { - cycleDict = OSDictionary::withCapacity(16); - if (!cycleDict) { - return NULL; + OSSharedPtr ourCycleDict; + OSSharedPtr ret; + OSSharedPtr newSet; + + if (!cycleDict) { + ourCycleDict = OSDictionary::withCapacity(16); + if (!ourCycleDict) { + return nullptr; } + cycleDict = ourCycleDict.get(); } do { @@ -431,41 +445,28 @@ OSSet::copyCollection(OSDictionary *cycleDict) continue; // Couldn't create new set abort } // Insert object into cycle Dictionary - cycleDict->setObject((const OSSymbol *) this, newSet); + cycleDict->setObject((const OSSymbol *) this, newSet.get()); - OSArray *newMembers = newSet->members; + OSArray *newMembers = newSet->members.get(); newMembers->capacityIncrement = members->capacityIncrement; // Now copy over the contents into the new duplicate for (unsigned int i = 0; i < members->count; i++) { - OSObject *obj = EXT_CAST(members->array[i]); + OSObject *obj = EXT_CAST(members->array[i].get()); OSCollection *coll = OSDynamicCast(OSCollection, obj); if (coll) { - OSCollection *newColl = coll->copyCollection(cycleDict); + OSSharedPtr newColl = coll->copyCollection(cycleDict); if (newColl) { - obj = newColl; // Rely on cycleDict ref for a bit - newColl->release(); + obj = newColl.get(); // Rely on cycleDict ref for a bit } else { - goto abortCopy; + return ret; } } - ; newMembers->setObject(obj); } - ; - ret = newSet; - newSet = NULL; + ret = os::move(newSet); } while (false); -abortCopy: - if (newSet) { - newSet->release(); - } - - if (allocDict) { - cycleDict->release(); - } - return ret; } diff --git a/libkern/c++/OSString.cpp b/libkern/c++/OSString.cpp index c5196917c..1639debf1 100644 --- a/libkern/c++/OSString.cpp +++ b/libkern/c++/OSString.cpp @@ -28,17 +28,21 @@ /* IOString.m created by rsulack on Wed 17-Sep-1997 */ /* IOString.cpp converted to C++ on Tue 1998-9-22 */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include #include +#include #include #include #include #define super OSObject -OSDefineMetaClassAndStructors(OSString, OSObject) +OSDefineMetaClassAndStructorsWithZone(OSString, OSObject, + (zone_create_flags_t) (ZC_CACHING | ZC_ZFREE_CLEARMEM)) OSMetaClassDefineReservedUnused(OSString, 0); OSMetaClassDefineReservedUnused(OSString, 1); OSMetaClassDefineReservedUnused(OSString, 2); @@ -72,13 +76,13 @@ OSString::initWithCString(const char *cString) return false; } - newLength = strnlen(cString, kMaxStringLength); + newLength = (unsigned int) strnlen(cString, kMaxStringLength); if (newLength >= kMaxStringLength) { return false; } newLength++; - newString = (char *) kalloc_container(newLength); + newString = (char *)kalloc_data_container(newLength, Z_WAITOK); if (!newString) { return false; } @@ -86,7 +90,7 @@ OSString::initWithCString(const char *cString) bcopy(cString, newString, newLength); if (!(flags & kOSStringNoCopy) && string) { - kfree(string, (vm_size_t)length); + kfree_data_container(string, length); OSCONTAINER_ACCUMSIZE(-((size_t)length)); } string = newString; @@ -116,8 +120,8 @@ OSString::initWithStringOfLength(const char *cString, size_t inlength) return false; } - newLength = inlength + 1; - newString = (char *) kalloc_container(newLength); + newLength = (unsigned int) (inlength + 1); + newString = (char *)kalloc_data_container(newLength, Z_WAITOK); if (!newString) { return false; } @@ -126,7 +130,7 @@ OSString::initWithStringOfLength(const char *cString, size_t inlength) newString[inlength] = 0; if (!(flags & kOSStringNoCopy) && string) { - kfree(string, (vm_size_t)length); + kfree_data_container(string, length); OSCONTAINER_ACCUMSIZE(-((size_t)length)); } @@ -146,7 +150,7 @@ OSString::initWithCStringNoCopy(const char *cString) return false; } - length = strnlen(cString, kMaxStringLength); + length = (unsigned int) strnlen(cString, kMaxStringLength); if (length >= kMaxStringLength) { return false; } @@ -158,53 +162,49 @@ OSString::initWithCStringNoCopy(const char *cString) return true; } -OSString * +OSSharedPtr OSString::withString(const OSString *aString) { - OSString *me = new OSString; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithString(aString)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSString * +OSSharedPtr OSString::withCString(const char *cString) { - OSString *me = new OSString; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithCString(cString)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSString * +OSSharedPtr OSString::withCStringNoCopy(const char *cString) { - OSString *me = new OSString; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithCStringNoCopy(cString)) { - me->release(); - return NULL; + return nullptr; } return me; } -OSString * +OSSharedPtr OSString::withStringOfLength(const char *cString, size_t length) { - OSString *me = new OSString; + OSSharedPtr me = OSMakeShared(); if (me && !me->initWithStringOfLength(cString, length)) { - me->release(); - return NULL; + return nullptr; } return me; @@ -245,7 +245,7 @@ void OSString::free() { if (!(flags & kOSStringNoCopy) && string) { - kfree(string, (vm_size_t)length); + kfree_data_container(string, length); OSCONTAINER_ACCUMSIZE(-((size_t)length)); } diff --git a/libkern/c++/OSSymbol.cpp b/libkern/c++/OSSymbol.cpp index 695fda33a..14380608a 100644 --- a/libkern/c++/OSSymbol.cpp +++ b/libkern/c++/OSSymbol.cpp @@ -27,20 +27,24 @@ */ /* IOSymbol.cpp created by gvdl on Fri 1998-11-17 */ +#define IOKIT_ENABLE_SHARED_PTR + #include #include #include #include +#include #include +#include #include #define super OSString typedef struct { unsigned int i, j; } OSSymbolPoolState; -#define INITIAL_POOL_SIZE (exp2ml(1 + log2(kInitBucketCount))) +#define INITIAL_POOL_SIZE ((unsigned int)((exp2ml(1 + log2(kInitBucketCount))))) #define GROW_FACTOR (1) #define SHRINK_FACTOR (3) @@ -83,26 +87,26 @@ private: if (!*s) { break; } - len++; hash ^= *s++; + len++; hash ^= (unsigned int)(unsigned char) *s++; if (!*s) { break; } - len++; hash ^= *s++ << 8; + len++; hash ^= ((unsigned int)(unsigned char) *s++) << 8; if (!*s) { break; } - len++; hash ^= *s++ << 16; + len++; hash ^= ((unsigned int)(unsigned char) *s++) << 16; if (!*s) { break; } - len++; hash ^= *s++ << 24; + len++; hash ^= ((unsigned int)(unsigned char) *s++) << 24; } *lenP = len; *hashP = hash; } static unsigned long log2(unsigned int x); - static unsigned long exp2ml(unsigned int x); + static unsigned long exp2ml(unsigned long x); void reconstructSymbols(void); void reconstructSymbols(bool grow); @@ -145,12 +149,12 @@ public: lck_rw_unlock(poolGate, LCK_RW_TYPE_EXCLUSIVE); } - LIBKERN_RETURNS_RETAINED OSSymbol *findSymbol(const char *cString) const; - LIBKERN_RETURNS_RETAINED OSSymbol *insertSymbol(OSSymbol *sym); + OSSharedPtr findSymbol(const char *cString) const; + OSSharedPtr insertSymbol(OSSymbol *sym); void removeSymbol(OSSymbol *sym); OSSymbolPoolState initHashState(); - LIBKERN_RETURNS_NOT_RETAINED OSSymbol *nextHashState(OSSymbolPoolState *stateP); + LIBKERN_RETURNS_NOT_RETAINED OSSymbol * nextHashState(OSSymbolPoolState *stateP); }; void * @@ -183,7 +187,6 @@ OSSymbolPool::init() if (!buckets) { return false; } - bzero(buckets, nBuckets * sizeof(Bucket)); poolGate = lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL); @@ -231,7 +234,7 @@ OSSymbolPool::log2(unsigned int x) } unsigned long -OSSymbolPool::exp2ml(unsigned int x) +OSSymbolPool::exp2ml(unsigned long x) { return (1 << x) - 1; } @@ -307,12 +310,13 @@ OSSymbolPool::reconstructSymbols(bool grow) } } -OSSymbol * +OSSharedPtr OSSymbolPool::findSymbol(const char *cString) const { Bucket *thisBucket; unsigned int j, inLen, hash; OSSymbol *probeSymbol, **list; + OSSharedPtr ret; hashSymbol(cString, &hash, &inLen); inLen++; thisBucket = &buckets[hash % nBuckets]; @@ -328,7 +332,8 @@ OSSymbolPool::findSymbol(const char *cString) const if (inLen == probeSymbol->length && strncmp(probeSymbol->string, cString, probeSymbol->length) == 0 && probeSymbol->taggedTryRetain(nullptr)) { - return probeSymbol; + ret.reset(probeSymbol, OSNoRetain); + return ret; } return NULL; } @@ -338,20 +343,22 @@ OSSymbolPool::findSymbol(const char *cString) const if (inLen == probeSymbol->length && strncmp(probeSymbol->string, cString, probeSymbol->length) == 0 && probeSymbol->taggedTryRetain(nullptr)) { - return probeSymbol; + ret.reset(probeSymbol, OSNoRetain); + return ret; } } return NULL; } -OSSymbol * +OSSharedPtr OSSymbolPool::insertSymbol(OSSymbol *sym) { const char *cString = sym->string; Bucket *thisBucket; unsigned int j, inLen, hash; OSSymbol *probeSymbol, **list; + OSSharedPtr ret; hashSymbol(cString, &hash, &inLen); inLen++; thisBucket = &buckets[hash % nBuckets]; @@ -370,7 +377,8 @@ OSSymbolPool::insertSymbol(OSSymbol *sym) if (inLen == probeSymbol->length && strncmp(probeSymbol->string, cString, probeSymbol->length) == 0 && probeSymbol->taggedTryRetain(nullptr)) { - return probeSymbol; + ret.reset(probeSymbol, OSNoRetain); + return ret; } list = (OSSymbol **) kalloc_tag(2 * sizeof(OSSymbol *), VM_KERN_MEMORY_LIBKERN); @@ -391,7 +399,8 @@ OSSymbolPool::insertSymbol(OSSymbol *sym) if (inLen == probeSymbol->length && strncmp(probeSymbol->string, cString, probeSymbol->length) == 0 && probeSymbol->taggedTryRetain(nullptr)) { - return probeSymbol; + ret.reset(probeSymbol, OSNoRetain); + return ret; } } @@ -502,8 +511,8 @@ OSSymbolPool::removeSymbol(OSSymbol *sym) * From here on we are actually implementing the OSSymbol class ********************************************************************* */ -OSDefineMetaClassAndStructorsWithInit(OSSymbol, OSString, - OSSymbol::initialize()) +OSDefineMetaClassAndStructorsWithInitAndZone(OSSymbol, OSString, + OSSymbol::initialize(), ZC_ZFREE_CLEARMEM) OSMetaClassDefineReservedUnused(OSSymbol, 0); OSMetaClassDefineReservedUnused(OSSymbol, 1); OSMetaClassDefineReservedUnused(OSSymbol, 2); @@ -544,13 +553,13 @@ OSSymbol::initWithString(const OSString *) return false; } -const OSSymbol * +OSSharedPtr OSSymbol::withString(const OSString *aString) { // This string may be a OSSymbol already, cheap check. if (OSDynamicCast(OSSymbol, aString)) { - aString->retain(); - return (const OSSymbol *) aString; + OSSharedPtr aStringNew((const OSSymbol *)aString, OSRetain); + return aStringNew; } else if (((const OSSymbol *) aString)->flags & kOSStringNoCopy) { return OSSymbol::withCStringNoCopy(aString->getCStringNoCopy()); } else { @@ -558,10 +567,10 @@ OSSymbol::withString(const OSString *aString) } } -const OSSymbol * +OSSharedPtr OSSymbol::withCString(const char *cString) { - const OSSymbol *symbol; + OSSharedPtr symbol; // Check if the symbol exists already, we don't need to take a lock here, // since existingSymbolForCString will take the shared lock. @@ -570,31 +579,31 @@ OSSymbol::withCString(const char *cString) return symbol; } - OSSymbol *newSymb = new OSSymbol; + OSSharedPtr newSymb = OSMakeShared(); if (!newSymb) { - return newSymb; + return os::move(newSymb); } if (newSymb->OSString::initWithCString(cString)) { pool->closeWriteGate(); - symbol = pool->insertSymbol(newSymb); + symbol = pool->insertSymbol(newSymb.get()); pool->openWriteGate(); if (symbol) { // Somebody must have inserted the new symbol so free our copy - newSymb->OSString::free(); + newSymb.detach()->OSString::free(); return symbol; } } - return newSymb; // return the newly created & inserted symbol. + return os::move(newSymb); // return the newly created & inserted symbol. } -const OSSymbol * +OSSharedPtr OSSymbol::withCStringNoCopy(const char *cString) { - const OSSymbol *symbol; - OSSymbol *newSymb; + OSSharedPtr symbol; + OSSharedPtr newSymb; // Check if the symbol exists already, we don't need to take a lock here, // since existingSymbolForCString will take the shared lock. @@ -603,47 +612,47 @@ OSSymbol::withCStringNoCopy(const char *cString) return symbol; } - newSymb = new OSSymbol; + newSymb = OSMakeShared(); if (!newSymb) { - return newSymb; + return os::move(newSymb); } if (newSymb->OSString::initWithCStringNoCopy(cString)) { pool->closeWriteGate(); - symbol = pool->insertSymbol(newSymb); + symbol = pool->insertSymbol(newSymb.get()); pool->openWriteGate(); if (symbol) { + newSymb.detach()->OSString::free(); // Somebody must have inserted the new symbol so free our copy - newSymb->OSString::free(); return symbol; } } - return newSymb; // return the newly created & inserted symbol. + return os::move(newSymb); // return the newly created & inserted symbol. } -const OSSymbol * +OSSharedPtr OSSymbol::existingSymbolForString(const OSString *aString) { if (OSDynamicCast(OSSymbol, aString)) { - aString->retain(); - return (const OSSymbol *) aString; + OSSharedPtr aStringNew((const OSSymbol *)aString, OSRetain); + return aStringNew; } return OSSymbol::existingSymbolForCString(aString->getCStringNoCopy()); } -const OSSymbol * +OSSharedPtr OSSymbol::existingSymbolForCString(const char *cString) { - OSSymbol *symbol; + OSSharedPtr symbol; pool->closeReadGate(); symbol = pool->findSymbol(cString); pool->openReadGate(); - return symbol; + return os::move(symbol); } void diff --git a/libkern/c++/OSUnserialize.cpp b/libkern/c++/OSUnserialize.cpp index adcccb339..927cd1c04 100644 --- a/libkern/c++/OSUnserialize.cpp +++ b/libkern/c++/OSUnserialize.cpp @@ -184,11 +184,57 @@ static OSObject *parsedObject; #define YYSTYPE object_t * -#include +__BEGIN_DECLS +#include +__END_DECLS -#define malloc(s) kern_os_malloc(s) -#define realloc(a, s) kern_os_realloc(a, s) -#define free(a) kern_os_free(a) +#define malloc(size) malloc_impl(size) +static inline void * +malloc_impl(size_t size) +{ + if (size == 0) { + return NULL; + } + return kheap_alloc_tag_bt(KHEAP_DEFAULT, size, + (zalloc_flags_t) (Z_WAITOK | Z_ZERO), + VM_KERN_MEMORY_LIBKERN); +} + +#define free(addr) free_impl(addr) +static inline void +free_impl(void *addr) +{ + kheap_free_addr(KHEAP_DEFAULT, addr); +} +static inline void +safe_free(void *addr, size_t size) +{ + if (addr) { + assert(size != 0); + kheap_free(KHEAP_DEFAULT, addr, size); + } +} + +#define realloc(addr, osize, nsize) realloc_impl(addr, osize, nsize) +static inline void * +realloc_impl(void *addr, size_t osize, size_t nsize) +{ + if (!addr) { + return malloc(nsize); + } + if (nsize == osize) { + return addr; + } + void *nmem = malloc(nsize); + if (!nmem) { + safe_free(addr, osize); + return NULL; + } + (void)memcpy(nmem, addr, (nsize > osize) ? osize : nsize); + safe_free(addr, osize); + + return nmem; +} @@ -223,7 +269,7 @@ typedef int YYSTYPE; /* Line 216 of yacc.c. */ -#line 182 "OSUnserialize.tab.c" +#line 224 "OSUnserialize.tab.c" #ifdef short # undef short @@ -513,9 +559,9 @@ static const yytype_int8 yyrhs[] = /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ static const yytype_uint8 yyrline[] = { - 0, 121, 121, 122, 123, 126, 127, 128, 129, 130, - 131, 132, 133, 142, 150, 151, 154, 155, 158, 168, - 169, 172, 173, 176, 181, 192, 200, 205, 210 + 0, 163, 163, 164, 165, 168, 169, 170, 171, 172, + 173, 174, 175, 184, 192, 193, 196, 197, 200, 210, + 211, 214, 215, 218, 223, 234, 242, 247, 252 }; #endif @@ -732,7 +778,7 @@ while (YYID (0)) #if YYDEBUG # ifndef YYFPRINTF -# include /* INFRINGES ON USER NAME SPACE */ +# include /* INFRINGES ON USER NAME SPACE */ # define YYFPRINTF fprintf # endif @@ -1454,57 +1500,57 @@ yyreduce: YY_REDUCE_PRINT(yyn); switch (yyn) { case 2: -#line 121 "OSUnserialize.y" +#line 163 "OSUnserialize.y" { parsedObject = (OSObject *)NULL; YYACCEPT;;} break; case 3: -#line 122 "OSUnserialize.y" +#line 164 "OSUnserialize.y" { parsedObject = (OSObject *)(yyvsp[(1) - (1)]); YYACCEPT;;} break; case 4: -#line 123 "OSUnserialize.y" +#line 165 "OSUnserialize.y" { yyerror("syntax error"); YYERROR;;} break; case 5: -#line 126 "OSUnserialize.y" +#line 168 "OSUnserialize.y" { (yyval) = (object_t *)buildOSDictionary((yyvsp[(1) - (1)]));;} break; case 6: -#line 127 "OSUnserialize.y" +#line 169 "OSUnserialize.y" { (yyval) = (object_t *)buildOSArray((yyvsp[(1) - (1)]));;} break; case 7: -#line 128 "OSUnserialize.y" +#line 170 "OSUnserialize.y" { (yyval) = (object_t *)buildOSSet((yyvsp[(1) - (1)]));;} break; case 8: -#line 129 "OSUnserialize.y" +#line 171 "OSUnserialize.y" { (yyval) = (object_t *)buildOSString((yyvsp[(1) - (1)]));;} break; case 9: -#line 130 "OSUnserialize.y" +#line 172 "OSUnserialize.y" { (yyval) = (object_t *)buildOSData((yyvsp[(1) - (1)]));;} break; case 10: -#line 131 "OSUnserialize.y" +#line 173 "OSUnserialize.y" { (yyval) = (object_t *)buildOSOffset((yyvsp[(1) - (1)]));;} break; case 11: -#line 132 "OSUnserialize.y" +#line 174 "OSUnserialize.y" { (yyval) = (object_t *)buildOSBoolean((yyvsp[(1) - (1)]));;} break; case 12: -#line 133 "OSUnserialize.y" +#line 175 "OSUnserialize.y" { (yyval) = (object_t *)retrieveObject((yyvsp[(2) - (2)])->u.offset); if ((yyval)) { ((OSObject *)(yyval))->retain(); @@ -1517,7 +1563,7 @@ yyreduce: break; case 13: -#line 142 "OSUnserialize.y" +#line 184 "OSUnserialize.y" { (yyval) = (yyvsp[(1) - (3)]); rememberObject((yyvsp[(3) - (3)])->u.offset, (yyvsp[(1) - (3)])); freeObject((yyvsp[(3) - (3)])); @@ -1525,22 +1571,22 @@ yyreduce: break; case 14: -#line 150 "OSUnserialize.y" +#line 192 "OSUnserialize.y" { (yyval) = NULL;;} break; case 15: -#line 151 "OSUnserialize.y" +#line 193 "OSUnserialize.y" { (yyval) = (yyvsp[(2) - (3)]);;} break; case 17: -#line 155 "OSUnserialize.y" +#line 197 "OSUnserialize.y" { (yyvsp[(2) - (2)])->next = (yyvsp[(1) - (2)]); (yyvsp[(1) - (2)])->prev = (yyvsp[(2) - (2)]); (yyval) = (yyvsp[(2) - (2)]);;} break; case 18: -#line 158 "OSUnserialize.y" +#line 200 "OSUnserialize.y" { (yyval) = newObject(); (yyval)->next = NULL; (yyval)->prev = NULL; @@ -1550,27 +1596,27 @@ yyreduce: break; case 19: -#line 168 "OSUnserialize.y" +#line 210 "OSUnserialize.y" { (yyval) = NULL;;} break; case 20: -#line 169 "OSUnserialize.y" +#line 211 "OSUnserialize.y" { (yyval) = (yyvsp[(2) - (3)]);;} break; case 21: -#line 172 "OSUnserialize.y" +#line 214 "OSUnserialize.y" { (yyval) = NULL;;} break; case 22: -#line 173 "OSUnserialize.y" +#line 215 "OSUnserialize.y" { (yyval) = (yyvsp[(2) - (3)]);;} break; case 23: -#line 176 "OSUnserialize.y" +#line 218 "OSUnserialize.y" { (yyval) = newObject(); (yyval)->object = (yyvsp[(1) - (1)]); (yyval)->next = NULL; @@ -1579,7 +1625,7 @@ yyreduce: break; case 24: -#line 181 "OSUnserialize.y" +#line 223 "OSUnserialize.y" { oo = newObject(); oo->object = (yyvsp[(3) - (3)]); oo->next = (yyvsp[(1) - (3)]); @@ -1590,7 +1636,7 @@ yyreduce: break; case 25: -#line 192 "OSUnserialize.y" +#line 234 "OSUnserialize.y" { (yyval) = (yyvsp[(1) - (3)]); (yyval)->size = (yyvsp[(3) - (3)])->u.offset; freeObject((yyvsp[(3) - (3)])); @@ -1599,7 +1645,7 @@ yyreduce: /* Line 1267 of yacc.c. */ -#line 1555 "OSUnserialize.tab.c" +#line 1597 "OSUnserialize.tab.c" default: break; } YY_SYMBOL_PRINT("-> $$ =", yyr1[yyn], &yyval, &yyloc); @@ -1811,7 +1857,7 @@ yyreturn: } -#line 213 "OSUnserialize.y" +#line 255 "OSUnserialize.y" static int lineNumber = 0; @@ -1926,7 +1972,7 @@ top: /* copy to null terminated buffer */ tempString = (char *)malloc(length + 1); - if (tempString == 0) { + if (tempString == NULL) { printf("OSUnserialize: can't alloc temp memory\n"); return 0; } @@ -1960,7 +2006,7 @@ top: (void)nextChar(); /* copy to null terminated buffer */ tempString = (char *)malloc(length + 1); - if (tempString == 0) { + if (tempString == NULL) { printf("OSUnserialize: can't alloc temp memory\n"); return 0; } @@ -2020,7 +2066,8 @@ top: if (c == '<') { unsigned char *d, *start, *lastStart; - start = lastStart = d = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + size_t buflen = OSDATA_ALLOC_SIZE; + start = lastStart = d = (unsigned char *)malloc(buflen); c = nextChar(); // skip over '<' while (c != 0 && c != '>') { if (isSpace(c)) { @@ -2063,13 +2110,14 @@ top: d++; if ((d - lastStart) >= OSDATA_ALLOC_SIZE) { int oldsize = d - start; - start = (unsigned char *)realloc(start, oldsize + OSDATA_ALLOC_SIZE); + assert(buflen == oldsize); + start = (unsigned char *)realloc(start, oldsize, buflen); d = lastStart = start + oldsize; } c = nextChar(); } if (c != '>') { - free(start); + safe_free(start, buflen); return SYNTAX_ERROR; } @@ -2111,7 +2159,7 @@ freeObject(object_t *o) #if DEBUG debugUnserializeAllocCount--; #endif - free(o); + safe_free(o, sizeof(object_t)); } static OSDictionary *tags; @@ -2212,7 +2260,7 @@ buildOSString(object_t *o) { OSString *s = OSString::withCString((char *)o); - free(o); + safe_free(o, strlen((char *)o) + 1); return s; }; @@ -2227,7 +2275,7 @@ buildOSData(object_t *o) } else { d = OSData::withCapacity(0); } - free(o->object); + safe_free(o->object, o->size); freeObject(o); return d; }; @@ -2276,10 +2324,10 @@ OSUnserialize(const char *buffer, OSString **errorString) if (yyparse() == 0) { object = parsedObject; if (errorString) { - *errorString = 0; + *errorString = NULL; } } else { - object = 0; + object = NULL; if (errorString) { *errorString = OSString::withCString(yyerror_message); } diff --git a/libkern/c++/OSUnserialize.y b/libkern/c++/OSUnserialize.y index d3189324e..fbc5ca2f2 100644 --- a/libkern/c++/OSUnserialize.y +++ b/libkern/c++/OSUnserialize.y @@ -99,11 +99,57 @@ static OSObject *parsedObject; #define YYSTYPE object_t * -#include +__BEGIN_DECLS +#include +__END_DECLS -#define malloc(s) kern_os_malloc(s) -#define realloc(a, s) kern_os_realloc(a, s) -#define free(a) kern_os_free(a) +#define malloc(size) malloc_impl(size) +static inline void * +malloc_impl(size_t size) +{ + if (size == 0) { + return NULL; + } + return kheap_alloc_tag_bt(KHEAP_DEFAULT, size, + (zalloc_flags_t) (Z_WAITOK | Z_ZERO), + VM_KERN_MEMORY_LIBKERN); +} + +#define free(addr) free_impl(addr) +static inline void +free_impl(void *addr) +{ + kheap_free_addr(KHEAP_DEFAULT, addr); +} +static inline void +safe_free(void *addr, size_t size) +{ + if(addr) { + assert(size != 0); + kheap_free(KHEAP_DEFAULT, addr, size); + } +} + +#define realloc(addr, osize, nsize) realloc_impl(addr, osize, nsize) +static inline void * +realloc_impl(void *addr, size_t osize, size_t nsize) +{ + if (!addr) { + return malloc(nsize); + } + if (nsize == osize) { + return addr; + } + void *nmem = malloc(nsize); + if (!nmem) { + safe_free(addr, osize); + return NULL; + } + (void)memcpy(nmem, addr, (nsize > osize) ? osize : nsize); + safe_free(addr, osize); + + return nmem; +} %} %token NUMBER @@ -381,7 +427,8 @@ yylex() if (c == '<') { unsigned char *d, *start, *lastStart; - start = lastStart = d = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + size_t buflen = OSDATA_ALLOC_SIZE; + start = lastStart = d = (unsigned char *)malloc(buflen); c = nextChar(); // skip over '<' while (c != 0 && c != '>') { @@ -413,13 +460,14 @@ yylex() d++; if ((d - lastStart) >= OSDATA_ALLOC_SIZE) { int oldsize = d - start; - start = (unsigned char *)realloc(start, oldsize + OSDATA_ALLOC_SIZE); + assert(buflen == oldsize); + start = (unsigned char *)realloc(start, oldsize, buflen); d = lastStart = start + oldsize; } c = nextChar(); } if (c != '>' ) { - free(start); + safe_free(start, buflen); return SYNTAX_ERROR; } @@ -461,7 +509,7 @@ freeObject(object_t *o) #if DEBUG debugUnserializeAllocCount--; #endif - free(o); + safe_free(o, sizeof(object_t)); } static OSDictionary *tags; @@ -562,7 +610,7 @@ buildOSString(object_t *o) { OSString *s = OSString::withCString((char *)o); - free(o); + safe_free(o, strlen((char *)o) + 1); return s; }; @@ -577,7 +625,7 @@ buildOSData(object_t *o) } else { d = OSData::withCapacity(0); } - free(o->object); + safe_free(o->object, o->size); freeObject(o); return d; }; diff --git a/libkern/c++/OSUnserializeXML.cpp b/libkern/c++/OSUnserializeXML.cpp index 338246042..4ff840d1d 100644 --- a/libkern/c++/OSUnserializeXML.cpp +++ b/libkern/c++/OSUnserializeXML.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2013 Apple Inc. All rights reserved. + * Copyright (c) 1999-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -177,6 +177,7 @@ typedef struct object { int size; void *data; // for data char *string; // for string & symbol + int string_alloc_length; long long number; // for number int idref; } object_t; @@ -219,11 +220,57 @@ static object_t *buildData(parser_state_t *state, object_t *o); static object_t *buildNumber(parser_state_t *state, object_t *o); static object_t *buildBoolean(parser_state_t *state, object_t *o); -#include +__BEGIN_DECLS +#include +__END_DECLS -#define malloc(s) kern_os_malloc(s) -#define realloc(a, s) kern_os_realloc(a, s) -#define free(a) kern_os_free((void *)a) +#define malloc(size) malloc_impl(size) +static inline void * +malloc_impl(size_t size) +{ + if (size == 0) { + return NULL; + } + return kheap_alloc_tag_bt(KHEAP_DEFAULT, size, + (zalloc_flags_t) (Z_WAITOK | Z_ZERO), + VM_KERN_MEMORY_LIBKERN); +} + +#define free(addr) free_impl(addr) +static inline void +free_impl(void *addr) +{ + kheap_free_addr(KHEAP_DEFAULT, addr); +} +static inline void +safe_free(void *addr, size_t size) +{ + if (addr) { + assert(size != 0); + kheap_free(KHEAP_DEFAULT, addr, size); + } +} + +#define realloc(addr, osize, nsize) realloc_impl(addr, osize, nsize) +static inline void * +realloc_impl(void *addr, size_t osize, size_t nsize) +{ + if (!addr) { + return malloc(nsize); + } + if (nsize == osize) { + return addr; + } + void *nmem = malloc(nsize); + if (!nmem) { + safe_free(addr, osize); + return NULL; + } + (void)memcpy(nmem, addr, (nsize > osize) ? osize : nsize); + safe_free(addr, osize); + + return nmem; +} @@ -258,7 +305,7 @@ typedef int YYSTYPE; /* Line 216 of yacc.c. */ -#line 212 "OSUnserializeXML.tab.c" +#line 258 "OSUnserializeXML.tab.c" #ifdef short # undef short @@ -549,10 +596,10 @@ static const yytype_int8 yyrhs[] = /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ static const yytype_uint16 yyrline[] = { - 0, 146, 146, 149, 154, 159, 171, 183, 195, 207, - 219, 231, 243, 267, 270, 273, 276, 277, 292, 301, - 313, 316, 319, 322, 325, 328, 331, 334, 341, 344, - 347, 350, 353 + 0, 192, 192, 195, 200, 205, 217, 229, 241, 253, + 265, 277, 289, 313, 316, 319, 322, 323, 338, 347, + 359, 362, 365, 368, 371, 374, 377, 380, 387, 390, + 393, 396, 399 }; #endif @@ -1495,14 +1542,14 @@ yyreduce: YY_REDUCE_PRINT(yyn); switch (yyn) { case 2: -#line 146 "OSUnserializeXML.y" +#line 192 "OSUnserializeXML.y" { yyerror("unexpected end of buffer"); YYERROR; ;} break; case 3: -#line 149 "OSUnserializeXML.y" +#line 195 "OSUnserializeXML.y" { STATE->parsedObject = (yyvsp[(1) - (1)])->object; (yyvsp[(1) - (1)])->object = 0; freeObject(STATE, (yyvsp[(1) - (1)])); @@ -1511,14 +1558,14 @@ yyreduce: break; case 4: -#line 154 "OSUnserializeXML.y" +#line 200 "OSUnserializeXML.y" { yyerror("syntax error"); YYERROR; ;} break; case 5: -#line 159 "OSUnserializeXML.y" +#line 205 "OSUnserializeXML.y" { (yyval) = buildDictionary(STATE, (yyvsp[(1) - (1)])); if (!yyval->object) { @@ -1534,7 +1581,7 @@ yyreduce: break; case 6: -#line 171 "OSUnserializeXML.y" +#line 217 "OSUnserializeXML.y" { (yyval) = buildArray(STATE, (yyvsp[(1) - (1)])); if (!yyval->object) { @@ -1550,7 +1597,7 @@ yyreduce: break; case 7: -#line 183 "OSUnserializeXML.y" +#line 229 "OSUnserializeXML.y" { (yyval) = buildSet(STATE, (yyvsp[(1) - (1)])); if (!yyval->object) { @@ -1566,7 +1613,7 @@ yyreduce: break; case 8: -#line 195 "OSUnserializeXML.y" +#line 241 "OSUnserializeXML.y" { (yyval) = buildString(STATE, (yyvsp[(1) - (1)])); if (!yyval->object) { @@ -1582,7 +1629,7 @@ yyreduce: break; case 9: -#line 207 "OSUnserializeXML.y" +#line 253 "OSUnserializeXML.y" { (yyval) = buildData(STATE, (yyvsp[(1) - (1)])); if (!yyval->object) { @@ -1598,7 +1645,7 @@ yyreduce: break; case 10: -#line 219 "OSUnserializeXML.y" +#line 265 "OSUnserializeXML.y" { (yyval) = buildNumber(STATE, (yyvsp[(1) - (1)])); if (!yyval->object) { @@ -1614,7 +1661,7 @@ yyreduce: break; case 11: -#line 231 "OSUnserializeXML.y" +#line 277 "OSUnserializeXML.y" { (yyval) = buildBoolean(STATE, (yyvsp[(1) - (1)])); if (!yyval->object) { @@ -1630,7 +1677,7 @@ yyreduce: break; case 12: -#line 243 "OSUnserializeXML.y" +#line 289 "OSUnserializeXML.y" { (yyval) = retrieveObject(STATE, (yyvsp[(1) - (1)])->idref); if ((yyval)) { STATE->retrievedObjectCount++; @@ -1654,21 +1701,21 @@ yyreduce: break; case 13: -#line 267 "OSUnserializeXML.y" +#line 313 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (2)]); (yyval)->elements = NULL; ;} break; case 14: -#line 270 "OSUnserializeXML.y" +#line 316 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (3)]); (yyval)->elements = (yyvsp[(2) - (3)]); ;} break; case 17: -#line 277 "OSUnserializeXML.y" +#line 323 "OSUnserializeXML.y" { (yyval) = (yyvsp[(2) - (2)]); (yyval)->next = (yyvsp[(1) - (2)]); @@ -1685,7 +1732,7 @@ yyreduce: break; case 18: -#line 292 "OSUnserializeXML.y" +#line 338 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (2)]); (yyval)->key = (OSSymbol *)(yyval)->object; (yyval)->object = (yyvsp[(2) - (2)])->object; @@ -1696,7 +1743,7 @@ yyreduce: break; case 19: -#line 301 "OSUnserializeXML.y" +#line 347 "OSUnserializeXML.y" { (yyval) = buildSymbol(STATE, (yyvsp[(1) - (1)])); // STATE->parsedObjectCount++; @@ -1708,42 +1755,42 @@ yyreduce: break; case 20: -#line 313 "OSUnserializeXML.y" +#line 359 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (2)]); (yyval)->elements = NULL; ;} break; case 21: -#line 316 "OSUnserializeXML.y" +#line 362 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (3)]); (yyval)->elements = (yyvsp[(2) - (3)]); ;} break; case 23: -#line 322 "OSUnserializeXML.y" +#line 368 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (2)]); (yyval)->elements = NULL; ;} break; case 24: -#line 325 "OSUnserializeXML.y" +#line 371 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (3)]); (yyval)->elements = (yyvsp[(2) - (3)]); ;} break; case 26: -#line 331 "OSUnserializeXML.y" +#line 377 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (1)]); (yyval)->next = NULL; ;} break; case 27: -#line 334 "OSUnserializeXML.y" +#line 380 "OSUnserializeXML.y" { (yyval) = (yyvsp[(2) - (2)]); (yyval)->next = (yyvsp[(1) - (2)]); ;} @@ -1751,7 +1798,7 @@ yyreduce: /* Line 1267 of yacc.c. */ -#line 1701 "OSUnserializeXML.tab.c" +#line 1747 "OSUnserializeXML.tab.c" default: break; } YY_SYMBOL_PRINT("-> $$ =", yyr1[yyn], &yyval, &yyloc); @@ -1963,7 +2010,7 @@ yyreturn: } -#line 356 "OSUnserializeXML.y" +#line 402 "OSUnserializeXML.y" int @@ -2160,7 +2207,7 @@ getTag(parser_state_t *state, } static char * -getString(parser_state_t *state) +getString(parser_state_t *state, int *alloc_lengthp) { int c = currentChar(); int start, length, i, j; @@ -2191,6 +2238,9 @@ getString(parser_state_t *state) printf("OSUnserializeXML: can't alloc temp memory\n"); goto error; } + if (alloc_lengthp) { + *alloc_lengthp = length + 1; + } // copy out string in tempString // "&" -> '&', "<" -> '<', ">" -> '>' @@ -2252,7 +2302,10 @@ getString(parser_state_t *state) error: if (tempString) { - free(tempString); + safe_free(tempString, length + 1); + if (alloc_lengthp) { + *alloc_lengthp = 0; + } } return 0; } @@ -2326,8 +2379,9 @@ getCFEncodedData(parser_state_t *state, unsigned int *size) { int numeq = 0, cntr = 0; unsigned int acc = 0; - int tmpbufpos = 0, tmpbuflen = 0; - unsigned char *tmpbuf = (unsigned char *)malloc(DATA_ALLOC_SIZE); + int tmpbufpos = 0; + size_t tmpbuflen = DATA_ALLOC_SIZE; + unsigned char *tmpbuf = (unsigned char *)malloc(tmpbuflen); int c = currentChar(); *size = 0; @@ -2335,7 +2389,7 @@ getCFEncodedData(parser_state_t *state, unsigned int *size) while (c != '<') { c &= 0x7f; if (c == 0) { - free(tmpbuf); + safe_free(tmpbuf, tmpbuflen); return 0; } if (c == '=') { @@ -2355,8 +2409,9 @@ getCFEncodedData(parser_state_t *state, unsigned int *size) acc += __CFPLDataDecodeTable[c]; if (0 == (cntr & 0x3)) { if (tmpbuflen <= tmpbufpos + 2) { + size_t oldsize = tmpbuflen; tmpbuflen += DATA_ALLOC_SIZE; - tmpbuf = (unsigned char *)realloc(tmpbuf, tmpbuflen); + tmpbuf = (unsigned char *)realloc(tmpbuf, oldsize, tmpbuflen); } tmpbuf[tmpbufpos++] = (acc >> 16) & 0xff; if (numeq < 2) { @@ -2370,7 +2425,7 @@ getCFEncodedData(parser_state_t *state, unsigned int *size) } *size = tmpbufpos; if (*size == 0) { - free(tmpbuf); + safe_free(tmpbuf, tmpbuflen); return 0; } return tmpbuf; @@ -2382,7 +2437,8 @@ getHexData(parser_state_t *state, unsigned int *size) int c; unsigned char *d, *start, *lastStart; - start = lastStart = d = (unsigned char *)malloc(DATA_ALLOC_SIZE); + size_t buflen = DATA_ALLOC_SIZE; + start = lastStart = d = (unsigned char *)malloc(buflen); c = currentChar(); while (c != '<') { @@ -2419,7 +2475,9 @@ getHexData(parser_state_t *state, unsigned int *size) d++; if ((d - lastStart) >= DATA_ALLOC_SIZE) { int oldsize = d - start; - start = (unsigned char *)realloc(start, oldsize + DATA_ALLOC_SIZE); + assert(oldsize == buflen); + buflen += DATA_ALLOC_SIZE; + start = (unsigned char *)realloc(start, oldsize, buflen); d = lastStart = start + oldsize; } c = nextChar(); @@ -2431,7 +2489,7 @@ getHexData(parser_state_t *state, unsigned int *size) error: *size = 0; - free(start); + safe_free(start, buflen); return 0; } @@ -2445,7 +2503,7 @@ yylex(YYSTYPE *lvalp, parser_state_t *state) char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; object_t *object; - + int alloc_length; top: c = currentChar(); @@ -2579,10 +2637,11 @@ top: if (tagType == TAG_EMPTY) { return SYNTAX_ERROR; } - object->string = getString(STATE); + object->string = getString(STATE, &alloc_length); if (!object->string) { return SYNTAX_ERROR; } + object->string_alloc_length = alloc_length; if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "key")) { return SYNTAX_ERROR; @@ -2600,13 +2659,15 @@ top: if (!strcmp(tag, "string")) { if (tagType == TAG_EMPTY) { object->string = (char *)malloc(1); + object->string_alloc_length = 1; object->string[0] = 0; return STRING; } - object->string = getString(STATE); + object->string = getString(STATE, &alloc_length); if (!object->string) { return SYNTAX_ERROR; } + object->string_alloc_length = alloc_length; if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "string")) { return SYNTAX_ERROR; @@ -2659,7 +2720,6 @@ newObject(parser_state_t *state) } else { o = (object_t *)malloc(sizeof(object_t)); // object_count++; - bzero(o, sizeof(object_t)); o->free = state->objects; state->objects = o; } @@ -2699,7 +2759,7 @@ cleanupObjects(parser_state_t *state) t = o; o = o->free; - free(t); + safe_free(t, sizeof(object_t)); // object_count--; } // printf("object_count = %d\n", object_count); @@ -2871,7 +2931,7 @@ buildSymbol(parser_state_t *state, object_t *o) rememberObject(state, o->idref, symbol); } - free(o->string); + safe_free(o->string, o->string_alloc_length); o->string = 0; o->object = symbol; @@ -2956,7 +3016,7 @@ OSUnserializeXML(const char *buffer, OSString **errorString) cleanupObjects(state); state->tags->release(); - free(state); + safe_free(state, sizeof(parser_state_t)); return object; } diff --git a/libkern/c++/OSUnserializeXML.y b/libkern/c++/OSUnserializeXML.y index 1769fb631..f4138fe38 100644 --- a/libkern/c++/OSUnserializeXML.y +++ b/libkern/c++/OSUnserializeXML.y @@ -124,11 +124,57 @@ static object_t *buildData(parser_state_t *state, object_t *o); static object_t *buildNumber(parser_state_t *state, object_t *o); static object_t *buildBoolean(parser_state_t *state, object_t *o); -#include +__BEGIN_DECLS +#include +__END_DECLS -#define malloc(s) kern_os_malloc(s) -#define realloc(a, s) kern_os_realloc(a, s) -#define free(a) kern_os_free((void *)a) +#define malloc(size) malloc_impl(size) +static inline void * +malloc_impl(size_t size) +{ + if (size == 0) { + return NULL; + } + return kheap_alloc_tag_bt(KHEAP_DEFAULT, size, + (zalloc_flags_t) (Z_WAITOK | Z_ZERO), + VM_KERN_MEMORY_LIBKERN); +} + +#define free(addr) free_impl(addr) +static inline void +free_impl(void *addr) +{ + kheap_free_addr(KHEAP_DEFAULT, addr); +} +static inline void +safe_free(void *addr, size_t size) +{ + if(addr) { + assert(size != 0); + kheap_free(KHEAP_DEFAULT, addr, size); + } +} + +#define realloc(addr, osize, nsize) realloc_impl(addr, osize, nsize) +static inline void * +realloc_impl(void *addr, size_t osize, size_t nsize) +{ + if (!addr) { + return malloc(nsize); + } + if (nsize == osize) { + return addr; + } + void *nmem = malloc(nsize); + if (!nmem) { + safe_free(addr, osize); + return NULL; + } + (void)memcpy(nmem, addr, (nsize > osize) ? osize : nsize); + safe_free(addr, osize); + + return nmem; +} %} %token ARRAY @@ -549,7 +595,7 @@ getTag(parser_state_t *state, } static char * -getString(parser_state_t *state) +getString(parser_state_t *state, int *alloc_lengthp) { int c = currentChar(); int start, length, i, j; @@ -580,6 +626,9 @@ getString(parser_state_t *state) printf("OSUnserializeXML: can't alloc temp memory\n"); goto error; } + if (alloc_lengthp != NULL) { + *alloc_lengthp = length + 1; + } // copy out string in tempString // "&" -> '&', "<" -> '<', ">" -> '>' @@ -641,7 +690,10 @@ getString(parser_state_t *state) error: if (tempString) { - free(tempString); + safe_free(tempString, length + 1); + if (alloc_lengthp != NULL) { + *alloc_lengthp = 0; + } } return 0; } @@ -715,8 +767,9 @@ getCFEncodedData(parser_state_t *state, unsigned int *size) { int numeq = 0, cntr = 0; unsigned int acc = 0; - int tmpbufpos = 0, tmpbuflen = 0; - unsigned char *tmpbuf = (unsigned char *)malloc(DATA_ALLOC_SIZE); + int tmpbufpos = 0; + size_t tmpbuflen = DATA_ALLOC_SIZE; + unsigned char *tmpbuf = (unsigned char *)malloc(tmpbuflen); int c = currentChar(); *size = 0; @@ -724,7 +777,7 @@ getCFEncodedData(parser_state_t *state, unsigned int *size) while (c != '<') { c &= 0x7f; if (c == 0) { - free(tmpbuf); + safe_free(tmpbuf, tmpbuflen); return 0; } if (c == '=') { @@ -744,8 +797,9 @@ getCFEncodedData(parser_state_t *state, unsigned int *size) acc += __CFPLDataDecodeTable[c]; if (0 == (cntr & 0x3)) { if (tmpbuflen <= tmpbufpos + 2) { + size_t oldsize = tmpbuflen; tmpbuflen += DATA_ALLOC_SIZE; - tmpbuf = (unsigned char *)realloc(tmpbuf, tmpbuflen); + tmpbuf = (unsigned char *)realloc(tmpbuf, oldsize, tmpbuflen); } tmpbuf[tmpbufpos++] = (acc >> 16) & 0xff; if (numeq < 2) { @@ -759,7 +813,7 @@ getCFEncodedData(parser_state_t *state, unsigned int *size) } *size = tmpbufpos; if (*size == 0) { - free(tmpbuf); + safe_free(tmpbuf, tmpbuflen); return 0; } return tmpbuf; @@ -771,7 +825,8 @@ getHexData(parser_state_t *state, unsigned int *size) int c; unsigned char *d, *start, *lastStart; - start = lastStart = d = (unsigned char *)malloc(DATA_ALLOC_SIZE); + size_t buflen = DATA_ALLOC_SIZE; + start = lastStart = d = (unsigned char *)malloc(buflen); c = currentChar(); while (c != '<') { @@ -808,7 +863,9 @@ getHexData(parser_state_t *state, unsigned int *size) d++; if ((d - lastStart) >= DATA_ALLOC_SIZE) { int oldsize = d - start; - start = (unsigned char *)realloc(start, oldsize + DATA_ALLOC_SIZE); + assert(oldsize == buflen); + buflen += DATA_ALLOC_SIZE; + start = (unsigned char *)realloc(start, oldsize, buflen); d = lastStart = start + oldsize; } c = nextChar(); @@ -820,7 +877,7 @@ getHexData(parser_state_t *state, unsigned int *size) error: *size = 0; - free(start); + safe_free(start, buflen); return 0; } @@ -834,6 +891,7 @@ yylex(YYSTYPE *lvalp, parser_state_t *state) char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; object_t *object; + int alloc_length; top: c = currentChar(); @@ -968,10 +1026,11 @@ top: if (tagType == TAG_EMPTY) { return SYNTAX_ERROR; } - object->string = getString(STATE); + object->string = getString(STATE, &alloc_length); if (!object->string) { return SYNTAX_ERROR; } + object->string_alloc_length = alloc_length; if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "key")) { return SYNTAX_ERROR; @@ -990,12 +1049,14 @@ top: if (tagType == TAG_EMPTY) { object->string = (char *)malloc(1); object->string[0] = 0; + object->string_alloc_length = 1; return STRING; } - object->string = getString(STATE); + object->string = getString(STATE, &alloc_length); if (!object->string) { return SYNTAX_ERROR; } + object->string_alloc_length = alloc_length; if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "string")) { return SYNTAX_ERROR; @@ -1048,7 +1109,6 @@ newObject(parser_state_t *state) } else { o = (object_t *)malloc(sizeof(object_t)); // object_count++; - bzero(o, sizeof(object_t)); o->free = state->objects; state->objects = o; } @@ -1088,7 +1148,7 @@ cleanupObjects(parser_state_t *state) t = o; o = o->free; - free(t); + safe_free(t, sizeof(object_t)); // object_count--; } // printf("object_count = %d\n", object_count); @@ -1260,7 +1320,7 @@ buildSymbol(parser_state_t *state, object_t *o) rememberObject(state, o->idref, symbol); } - free(o->string); + safe_free(o->string, strlen(o->string) + 1); o->string = 0; o->object = symbol; @@ -1345,7 +1405,7 @@ OSUnserializeXML(const char *buffer, OSString **errorString) cleanupObjects(state); state->tags->release(); - free(state); + safe_free(state, sizeof(parser_state_t)); return object; } diff --git a/libkern/c++/priority_queue.cpp b/libkern/c++/priority_queue.cpp new file mode 100644 index 000000000..ba7998939 --- /dev/null +++ b/libkern/c++/priority_queue.cpp @@ -0,0 +1,437 @@ +/* + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#if KERNEL +#include +#include + +#ifdef __LP64__ +static_assert(PRIORITY_QUEUE_ENTRY_CHILD_BITS >= VM_KERNEL_POINTER_SIGNIFICANT_BITS, + "Priority Queue child pointer packing failed"); +#endif +#endif // KERNEL + +#pragma mark priority queue helpers + +/* + * These traits allow to parametrize `struct pqueue` below. + */ + +template +struct pqueue_entry_traits { + /* + * Explain how to compare two elements in the natural order. + */ + static inline int + compare(queue_t que, entry_t a, entry_t b); +}; + +template +struct pqueue_entry_traits { + static inline int + compare(queue_t que, priority_queue_entry_t e1, priority_queue_entry_t e2) + { + return que->pq_cmp_fn(e1, e2); + } +}; + +template +struct pqueue_entry_traits { + static inline int + compare(queue_t que __unused, + priority_queue_entry_deadline_t e1, priority_queue_entry_deadline_t e2) + { + return priority_heap_compare_ints(e1->deadline, e2->deadline); + } +}; + +template +struct pqueue_entry_traits { + static inline int + compare(queue_t que __unused, + priority_queue_entry_sched_t e1, priority_queue_entry_sched_t e2) + { + return (int)e2->key - (int)e1->key; + } +}; + +template +struct pqueue_entry_traits { + static inline int + compare(queue_t que __unused, + priority_queue_entry_stable_t e1, priority_queue_entry_stable_t e2) + { + /* + * the key is (2 * pri + preempted) so preempted entries + * sort "higher" than non preempted entries at the same priority. + */ + if (e1->key != e2->key) { + return (int)e2->key - (int)e1->key; + } + if (e1->stamp != e2->stamp) { + /* + * preempted entries: younger (bigger timestamp) is "higher" + * non preempted entries: older (smaller timestamp) is "higher" + */ + if (e1->key & PRIORITY_QUEUE_ENTRY_PREEMPTED) { + return e1->stamp < e2->stamp ? 1 : -1; + } else { + return e1->stamp > e2->stamp ? 1 : -1; + } + } + return 0; + } +}; + +#pragma mark main template + +/* + * Template for our priority queue. + * + * It is parametrized with: + * - `queue_t`: the queue type + * - `entry_t`: the element type + * + * It will use: + * - priority_queue_is_min_heap() to determine if it is a min/max heap + * - pqueue_entry_traits::compare for the ordering + */ +template +struct pqueue { + using entry_traits = pqueue_entry_traits; + + static inline void + pack_child(entry_t e, const entry_t child) + { + e->child = (long)child; + } + + static inline entry_t + unpack_child(entry_t e) + { + return (entry_t)e->child; + } + +private: + static inline bool + merge_parent_is_subtree_b(queue_t que, entry_t subtree_a, entry_t subtree_b) + { + if (priority_queue_is_max_heap((queue_t)nullptr)) { + return entry_traits::compare(que, subtree_a, subtree_b) > 0; + } + return entry_traits::compare(que, subtree_a, subtree_b) < 0; + } + + static inline entry_t + merge_pair_inline(queue_t que, entry_t subtree_a, entry_t subtree_b) + { + entry_t merge_result = NULL; + if (subtree_a == NULL) { + merge_result = subtree_b; + } else if (subtree_b == NULL || (subtree_a == subtree_b)) { + merge_result = subtree_a; + } else { + entry_t parent = subtree_a; + entry_t child = subtree_b; + if (merge_parent_is_subtree_b(que, subtree_a, subtree_b)) { + parent = subtree_b; + child = subtree_a; + } + /* Insert the child as the first element in the parent's child list */ + child->next = unpack_child(parent); + child->prev = parent; + if (unpack_child(parent) != NULL) { + unpack_child(parent)->prev = child; + } + /* Create the parent child relationship */ + pack_child(parent, child); + parent->next = NULL; + parent->prev = NULL; + merge_result = parent; + } + return merge_result; + } + + OS_NOINLINE + static entry_t + merge_pair(queue_t que, entry_t subtree_a, entry_t subtree_b) + { + return merge_pair_inline(que, subtree_a, subtree_b); + } + + OS_NOINLINE + static entry_t + meld_pair(queue_t que, entry_t elt) + { + entry_t pq_meld_result = NULL; + entry_t pair_list = NULL; + + assert(elt); // caller needs to check this. + + /* Phase 1: */ + /* Split the list into a set of pairs going front to back. */ + /* Hook these pairs onto an intermediary list in reverse order of traversal.*/ + + do { + /* Consider two elements at a time for pairing */ + entry_t pair_item_a = elt; + entry_t pair_item_b = elt->next; + if (pair_item_b == NULL) { + /* Odd number of elements in the list; link the odd element */ + /* as it is on the intermediate list. */ + pair_item_a->prev = pair_list; + pair_list = pair_item_a; + break; + } + /* Found two elements to pair up */ + elt = pair_item_b->next; + entry_t pair = merge_pair_inline(que, pair_item_a, pair_item_b); + /* Link the pair onto the intermediary list */ + pair->prev = pair_list; + pair_list = pair; + } while (elt != NULL); + + /* Phase 2: Merge all the pairs in the pair_list */ + do { + elt = pair_list->prev; + pq_meld_result = merge_pair_inline(que, pq_meld_result, pair_list); + pair_list = elt; + } while (pair_list != NULL); + + return pq_meld_result; + } + + static inline void + list_remove(entry_t elt) + { + assert(elt->prev != NULL); + /* Check if elt is head of list at its level; */ + /* If yes, make the next node the head at that level */ + /* Else, remove elt from the list at that level */ + if (unpack_child(elt->prev) == elt) { + pack_child(elt->prev, elt->next); + } else { + elt->prev->next = elt->next; + } + /* Update prev for next element in list */ + if (elt->next != NULL) { + elt->next->prev = elt->prev; + } + } + + static inline bool + sift_down(queue_t que, entry_t elt) + { + bool was_root = remove(que, elt); + insert(que, elt); + return was_root; + } + + static inline bool + sift_up(queue_t que, entry_t elt) + { + if (elt == que->pq_root) { + return true; + } + + /* Remove the element from its current level list */ + list_remove(elt); + /* Re-insert the element into the heap with a merge */ + return insert(que, elt); + } + + static inline entry_t + remove_non_root(queue_t que, entry_t elt) + { + entry_t child, new_root; + + /* To remove a non-root element with children levels, */ + /* - Remove element from its current level list */ + /* - Pairwise split all the elements in the child level list */ + /* - Meld all these splits (right-to-left) to form new subtree */ + /* - Merge the root subtree with the newly formed subtree */ + list_remove(elt); + + child = unpack_child(elt); + if (child) { + child = meld_pair(que, child); + new_root = merge_pair(que, que->pq_root, child); + que->pq_root = new_root; + } + + return elt; + } + +public: + + /* + * exposed interfaces + */ + + OS_NOINLINE + static void + destroy(queue_t que, uintptr_t offset, void (^callback)(void *e)) + { + assert(callback != NULL); + entry_t head = que->pq_root; + entry_t tail = head; + + while (head != NULL) { + entry_t child_list = unpack_child(head); + if (child_list) { + tail->next = child_list; + while (tail->next) { + tail = tail->next; + } + } + + entry_t elt = head; + head = head->next; + callback((void *)((char *)elt - offset)); + } + + /* poison the queue now that it's destroyed */ + que->pq_root = (entry_t)(~0ul); + } + + static inline bool + insert(queue_t que, entry_t elt) + { + return (que->pq_root = merge_pair(que, que->pq_root, elt)) == elt; + } + + static inline entry_t + remove_root(queue_t que, entry_t old_root) + { + entry_t new_root = unpack_child(old_root); + que->pq_root = new_root ? meld_pair(que, new_root) : NULL; + return old_root; + } + + static inline bool + remove(queue_t que, entry_t elt) + { + if (elt == que->pq_root) { + remove_root(que, elt); + elt->next = elt->prev = NULL; + elt->child = 0; + return true; + } else { + remove_non_root(que, elt); + elt->next = elt->prev = NULL; + elt->child = 0; + return false; + } + } + + static inline bool + entry_increased(queue_t que, entry_t elt) + { + if (priority_queue_is_max_heap(que)) { + return sift_up(que, elt); + } else { + return sift_down(que, elt); + } + } + + static inline bool + entry_decreased(queue_t que, entry_t elt) + { + if (priority_queue_is_min_heap(que)) { + return sift_up(que, elt); + } else { + return sift_down(que, elt); + } + } +}; + +#pragma mark instantiation + +#define PRIORITY_QUEUE_MAKE_IMPL(pqueue_t, queue_t, entry_t) \ + \ +using pqueue_t = pqueue; \ + \ +extern "C" { \ + \ +__pqueue_overloadable void \ +_priority_queue_destroy(queue_t que, uintptr_t offset, void (^cb)(void *e)) \ +{ \ + pqueue_t::destroy(que, offset, cb); \ +} \ + \ +__pqueue_overloadable extern bool \ +priority_queue_insert(queue_t que, entry_t elt) \ +{ \ + return pqueue_t::insert(que, elt); \ +} \ + \ +__pqueue_overloadable extern entry_t \ +_priority_queue_remove_root(queue_t que) \ +{ \ + return pqueue_t::remove_root(que, que->pq_root); \ +} \ + \ +__pqueue_overloadable extern bool \ +priority_queue_remove(queue_t que, entry_t elt) \ +{ \ + return pqueue_t::remove(que, elt); \ +} \ + \ +__pqueue_overloadable extern bool \ +priority_queue_entry_decreased(queue_t que, entry_t elt) \ +{ \ + return pqueue_t::entry_decreased(que, elt); \ +} \ + \ +__pqueue_overloadable extern bool \ +priority_queue_entry_increased(queue_t que, entry_t elt) \ +{ \ + return pqueue_t::entry_increased(que, elt); \ +} \ + \ +} + +PRIORITY_QUEUE_MAKE_IMPL(pqueue_min_t, + struct priority_queue_min *, priority_queue_entry_t); +PRIORITY_QUEUE_MAKE_IMPL(pqueue_max_t, + struct priority_queue_max *, priority_queue_entry_t); + +PRIORITY_QUEUE_MAKE_IMPL(pqueue_sched_min_t, + struct priority_queue_sched_min *, priority_queue_entry_sched_t); +PRIORITY_QUEUE_MAKE_IMPL(pqueue_sched_max_t, + struct priority_queue_sched_max *, priority_queue_entry_sched_t); + +PRIORITY_QUEUE_MAKE_IMPL(pqueue_deadline_min_t, + struct priority_queue_deadline_min *, priority_queue_entry_deadline_t); +PRIORITY_QUEUE_MAKE_IMPL(pqueue_deadline_max_t, + struct priority_queue_deadline_max *, priority_queue_entry_deadline_t); + +PRIORITY_QUEUE_MAKE_IMPL(pqueue_sched_stable_min_t, + struct priority_queue_sched_stable_min *, priority_queue_entry_stable_t); +PRIORITY_QUEUE_MAKE_IMPL(pqueue_sched_stable_max_t, + struct priority_queue_sched_stable_max *, priority_queue_entry_stable_t); diff --git a/libkern/conf/Makefile b/libkern/conf/Makefile index 05c4b79cf..51eddb889 100644 --- a/libkern/conf/Makefile +++ b/libkern/conf/Makefile @@ -23,7 +23,7 @@ endif $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile: $(SRCROOT)/SETUP/config/doconf $(OBJROOT)/SETUP/config $(DOCONFDEPS) $(_v)$(MKDIR) $(TARGET)/$(CURRENT_KERNEL_CONFIG) - $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) + $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -platform $(PLATFORM) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) do_all: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile $(_v)${MAKE} \ diff --git a/libkern/conf/Makefile.arm b/libkern/conf/Makefile.arm index 04938ae54..b9ac39535 100644 --- a/libkern/conf/Makefile.arm +++ b/libkern/conf/Makefile.arm @@ -2,18 +2,22 @@ #BEGIN Machine dependent Makefile fragment for arm ###################################################################### -# The following files cast opaque pointers to more specific -# structures -OBJS_NO_CAST_ALIGN = kxld_kext.o kxld_reloc.o kxld_sect.o kxld_seg.o \ - kxld_state.o kxld_sym.o kxld_symtab.o kxld_util.o \ - kxld_srcversion.o kxld_splitinfolc.o kxld_uuid.o kxld_vtable.o uuid.o - -$(foreach file,$(OBJS_NO_CAST_ALIGN),$(eval $(call add_perfile_cflags,$(file),-Wno-cast-align))) - -OSKext.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -Wno-error=shadow -OSMetaClass.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -OSKextLib.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -OSUnserialize.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +# +# Diagnostic opt-outs. We need to make this list empty. +# +# DO NOT ADD MORE HERE. +# +# -Wno-implicit-int-conversion +OSSpinLock.o_CFLAGS_ADD += -Wno-implicit-int-conversion +# -Wno-shorten-64-to-32 +OSArray.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +OSDictionary.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +OSKextVersion.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +OSOrderedSet.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +OSRuntime.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +OSSerializeBinary.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +OSString.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +OSSymbol.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 ###################################################################### #END Machine dependent Makefile fragment for arm diff --git a/libkern/conf/Makefile.arm64 b/libkern/conf/Makefile.arm64 index 04938ae54..d6d0d935e 100644 --- a/libkern/conf/Makefile.arm64 +++ b/libkern/conf/Makefile.arm64 @@ -1,19 +1,11 @@ ###################################################################### #BEGIN Machine dependent Makefile fragment for arm ###################################################################### - -# The following files cast opaque pointers to more specific -# structures -OBJS_NO_CAST_ALIGN = kxld_kext.o kxld_reloc.o kxld_sect.o kxld_seg.o \ - kxld_state.o kxld_sym.o kxld_symtab.o kxld_util.o \ - kxld_srcversion.o kxld_splitinfolc.o kxld_uuid.o kxld_vtable.o uuid.o - -$(foreach file,$(OBJS_NO_CAST_ALIGN),$(eval $(call add_perfile_cflags,$(file),-Wno-cast-align))) - -OSKext.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -Wno-error=shadow -OSMetaClass.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -OSKextLib.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align -OSUnserialize.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +# +# XXX: CFLAGS +# +CWARNFLAGS = $(CWARNFLAGS_STD) -Wshorten-64-to-32 -Wimplicit-int-conversion +CXXWARNFLAGS = $(CXXWARNFLAGS_STD) -Wshorten-64-to-32 -Wimplicit-int-conversion ###################################################################### #END Machine dependent Makefile fragment for arm diff --git a/libkern/conf/Makefile.template b/libkern/conf/Makefile.template index fa45a7f1f..077b90f9b 100644 --- a/libkern/conf/Makefile.template +++ b/libkern/conf/Makefile.template @@ -19,32 +19,6 @@ include $(MakeInc_def) CFLAGS+= -include meta_features.h -DLIBKERN_KERNEL_PRIVATE -DOSALLOCDEBUG=1 SFLAGS+= -include meta_features.h -# Objects that don't want -Wcast-align warning (8474835) -OSKextLib.cpo_CXXWARNFLAGS_ADD = -Wno-cast-align -OSKext.cpo_CXXWARNFLAGS_ADD = -Wno-cast-align -Wno-cast-qual -OSMetaClass.cpo_CXXWARNFLAGS_ADD = -Wno-cast-align -OSRuntime.cpo_CXXWARNFLAGS_ADD += -Wno-missing-prototypes -OSUnserialize.cpo_CXXWARNFLAGS_ADD = -Wno-cast-align -Wno-unreachable-code-break -corecrypto_md5.o_CWARNFLAGS_ADD = -Wno-cast-align -corecrypto_sha1.o_CWARNFLAGS_ADD = -Wno-cast-align - -# zlib is 3rd party source -compress.o_CWARNFLAGS_ADD = -Wno-cast-qual -deflate.o_CWARNFLAGS_ADD = -Wno-cast-qual -infback.o_CWARNFLAGS_ADD = -Wno-cast-qual -inffast.o_CWARNFLAGS_ADD = -Wno-cast-qual -inflate.o_CWARNFLAGS_ADD = -Wno-cast-qual -trees.o_CWARNFLAGS_ADD = -Wno-cast-qual -uncompr.o_CWARNFLAGS_ADD = -Wno-cast-qual - -# libclosure -runtime.cpo_CXXWARNFLAGS_ADD = -Wno-cast-qual - - -# warnings in bison-generated code -OSUnserializeXML.cpo_CXXWARNFLAGS_ADD += -Wno-uninitialized -Wno-unreachable-code -Wno-unreachable-code-break -Wno-zero-as-null-pointer-constant -OSUnserialize.cpo_CXXWARNFLAGS_ADD += -Wno-unreachable-code -Wno-zero-as-null-pointer-constant - # Runtime support functions don't interact well with LTO (9294679) stack_protector.o_CFLAGS_ADD += $(CFLAGS_NOLTO_FLAG) @@ -77,6 +51,87 @@ COMP_SUBDIRS = %MACHDEP +# +# Machine-independent per-file flags +# + +# +# Diagnostic opt-outs. We need to make this list empty. +# +# DO NOT ADD MORE HERE. +# +# -Wno-cast-align +OSKext.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +OSKextLib.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +OSMetaClass.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +OSUnserialize.cpo_CXXWARNFLAGS_ADD += -Wno-cast-align +corecrypto_md5.o_CFLAGS_ADD += -Wno-cast-align +corecrypto_sha1.o_CFLAGS_ADD += -Wno-cast-align +# -Wno-cast-qual +OSKext.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +compress.o_CFLAGS_ADD += -Wno-cast-qual +deflate.o_CFLAGS_ADD += -Wno-cast-qual +runtime.cpo_CXXWARNFLAGS_ADD += -Wno-cast-qual +trees.o_CFLAGS_ADD += -Wno-cast-qual +uncompr.o_CFLAGS_ADD += -Wno-cast-qual +# -Wno-implicit-int-conversion +OSUnserialize.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +OSUnserializeXML.cpo_CXXWARNFLAGS_ADD += -Wno-implicit-int-conversion +kxld_sym.o_CFLAGS_ADD += -Wno-implicit-int-conversion +log.o_CFLAGS_ADD += -Wno-implicit-int-conversion +scanf.o_CFLAGS_ADD += -Wno-implicit-int-conversion +# -Wno-missing-prototypes +OSRuntime.cpo_CXXWARNFLAGS_ADD += -Wno-missing-prototypes +# -Wno-shorten-64-to-32 +OSKext.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +OSUnserialize.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +OSUnserializeXML.cpo_CXXWARNFLAGS_ADD += -Wno-shorten-64-to-32 +log.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +scanf.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +# -Wno-sign-conversion +OSCollection.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSData.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSDebug.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSKext.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSKextVersion.o_CFLAGS_ADD += -Wno-sign-conversion +OSMetaClass.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSNumber.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSObject.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSRuntime.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSSerializeBinary.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSSet.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSString.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSUnserialize.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +OSUnserializeXML.cpo_CXXWARNFLAGS_ADD += -Wno-sign-conversion +adler32.o_CFLAGS_ADD += -Wno-sign-conversion +corecrypto_aes.o_CFLAGS_ADD += -Wno-sign-conversion +corecrypto_aesxts.o_CFLAGS_ADD += -Wno-sign-conversion +deflate.o_CFLAGS_ADD += -Wno-sign-conversion +inet_aton.o_CFLAGS_ADD += -Wno-sign-conversion +infback.o_CFLAGS_ADD += -Wno-sign-conversion +inflate.o_CFLAGS_ADD += -Wno-sign-conversion +kxld_array.o_CFLAGS_ADD += -Wno-sign-conversion +kxld_copyright.o_CFLAGS_ADD += -Wno-sign-conversion +kxld_dict.o_CFLAGS_ADD += -Wno-sign-conversion +kxld_object.o_CFLAGS_ADD += -Wno-sign-conversion +kxld_reloc.o_CFLAGS_ADD += -Wno-sign-conversion +kxld_sym.o_CFLAGS_ADD += -Wno-sign-conversion +kxld_symtab.o_CFLAGS_ADD += -Wno-sign-conversion +kxld_util.o_CFLAGS_ADD += -Wno-sign-conversion +log.o_CFLAGS_ADD += -Wno-sign-conversion +refcnt.o_CFLAGS_ADD += -Wno-sign-conversion +scanf.o_CFLAGS_ADD += -Wno-sign-conversion +trees.o_CFLAGS_ADD += -Wno-sign-conversion +uuid.o_CFLAGS_ADD += -Wno-sign-conversion +# -Wno-unreachable-code +OSUnserialize.cpo_CXXWARNFLAGS_ADD += -Wno-unreachable-code +OSUnserializeXML.cpo_CXXWARNFLAGS_ADD += -Wno-unreachable-code +OSUnserialize.cpo_CXXWARNFLAGS_ADD += -Wno-unreachable-code-break +OSUnserializeXML.cpo_CXXWARNFLAGS_ADD += -Wno-unreachable-code-break +# -Wno-zero-as-null-pointer-constant +OSUnserialize.cpo_CXXWARNFLAGS_ADD += -Wno-zero-as-null-pointer-constant +OSUnserializeXML.cpo_CXXWARNFLAGS_ADD += -Wno-zero-as-null-pointer-constant + # Rebuild if per-file overrides change ${OBJS}: $(firstword $(MAKEFILE_LIST)) @@ -97,7 +152,7 @@ $(COMPONENT).filelist: $(OBJS) $(SEG_HACK) -n __HIB -o $${hib_file}__ $${hib_file} || exit 1; \ mv $${hib_file}__ $${hib_file} || exit 1; \ done - $(call makelog,$(ColorL)LDFILELIST$(Color0) $(ColorLF)$(COMPONENT)$(Color0)) + @$(LOG_LDFILELIST) "$(COMPONENT)" $(_v)for obj in ${OBJS}; do \ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \ done > $(COMPONENT).filelist diff --git a/libkern/conf/Makefile.x86_64 b/libkern/conf/Makefile.x86_64 index 7b0de925d..959309959 100644 --- a/libkern/conf/Makefile.x86_64 +++ b/libkern/conf/Makefile.x86_64 @@ -1,6 +1,26 @@ ###################################################################### #BEGIN Machine dependent Makefile fragment for x86_64 ###################################################################### +# +# XXX: CFLAGS +# +CWARNFLAGS = $(CWARNFLAGS_STD) -Wshorten-64-to-32 -Wimplicit-int-conversion +CXXWARNFLAGS = $(CXXWARNFLAGS_STD) -Wshorten-64-to-32 -Wimplicit-int-conversion + +mkext.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +inet_ntop.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +inet_pton.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +log.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +z_crc32.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +deflate.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +trees.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +trees.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +corecrypto_sha1.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +corecrypto_md5.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +corecrypto_aes.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +kxld_sym.o_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +OSUnserializeXML.cpo_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion +OSUnserialize.cpo_CFLAGS_ADD += -Wno-shorten-64-to-32 -Wno-implicit-int-conversion ###################################################################### #END Machine dependent Makefile fragment for x86_64 diff --git a/libkern/conf/files b/libkern/conf/files index 95674f7f4..e5b444617 100644 --- a/libkern/conf/files +++ b/libkern/conf/files @@ -13,6 +13,7 @@ OPTIONS/zlibc optional zlibc # libkern libkern/gen/OSAtomicOperations.c standard +libkern/gen/OSSpinLock.c standard libkern/gen/OSDebug.cpp standard libkern/c++/OSMetaClass.cpp optional libkerncpp @@ -37,7 +38,8 @@ libkern/c++/OSSymbol.cpp optional libkerncpp libkern/c++/OSUnserialize.cpp optional libkerncpp libkern/c++/OSUnserializeXML.cpp optional libkerncpp libkern/c++/OSSerializeBinary.cpp optional libkerncpp -libkern/c++/OSCompat.cpp optional libkerncpp + +libkern/c++/priority_queue.cpp standard libkern/OSKextLib.cpp optional libkerncpp libkern/mkext.c standard @@ -97,6 +99,7 @@ libkern/crypto/corecrypto_chacha20poly1305.c optional crypto libkern/img4/interface.c standard libkern/stack_protector.c standard +libkern/ptrauth_utils.c standard libkern/kxld/kxld.c optional config_kxld libkern/kxld/kxld_array.c optional config_kxld @@ -116,7 +119,6 @@ libkern/kxld/kxld_util.c optional config_kxld libkern/kxld/kxld_uuid.c optional config_kxld libkern/kxld/kxld_versionmin.c optional config_kxld libkern/kxld/kxld_vtable.c optional config_kxld -libkern/kxld/kxld_stubs.c standard libkern/libclosure/runtime.cpp optional config_blocks libkern/libclosure/libclosuredata.c optional config_blocks diff --git a/libkern/crypto/corecrypto_aes.c b/libkern/crypto/corecrypto_aes.c index 0105da4e7..20ff4f1ca 100644 --- a/libkern/crypto/corecrypto_aes.c +++ b/libkern/crypto/corecrypto_aes.c @@ -210,7 +210,7 @@ aes_encrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, } aes_rval -aes_encrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx) +aes_encrypt_finalize_gcm(unsigned char *tag, size_t tag_bytes, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; if (!gcm) { @@ -248,7 +248,7 @@ aes_decrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigne } aes_rval -aes_decrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx) +aes_decrypt_set_iv_gcm(const unsigned char *in_iv, size_t len, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; if (!gcm) { @@ -309,7 +309,7 @@ aes_decrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, } aes_rval -aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx) +aes_decrypt_finalize_gcm(unsigned char *tag, size_t tag_bytes, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; if (!gcm) { @@ -324,7 +324,7 @@ aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx * return ccgcm_reset(gcm, ctx); } -unsigned +size_t aes_encrypt_get_ctx_size_gcm(void) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; @@ -334,7 +334,7 @@ aes_encrypt_get_ctx_size_gcm(void) return cc_ctx_sizeof(ccgcm_ctx, gcm->size); } -unsigned +size_t aes_decrypt_get_ctx_size_gcm(void) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; diff --git a/libkern/crypto/corecrypto_aesxts.c b/libkern/crypto/corecrypto_aesxts.c index 61c2d0d63..533e9d29c 100644 --- a/libkern/crypto/corecrypto_aesxts.c +++ b/libkern/crypto/corecrypto_aesxts.c @@ -55,7 +55,7 @@ xts_start(uint32_t cipher __unused, // ignored - we're doing this for xts-aes on enc = g_crypto_funcs->ccaes_xts_encrypt; dec = g_crypto_funcs->ccaes_xts_decrypt; - if (!enc && !dec) { + if (!enc || !dec) { panic("%s: xts mode not registered? enc=%p, dec=%p\n", __FUNCTION__, enc, dec); } diff --git a/libkern/crypto/corecrypto_md5.c b/libkern/crypto/corecrypto_md5.c index 63906e576..242dcdbac 100644 --- a/libkern/crypto/corecrypto_md5.c +++ b/libkern/crypto/corecrypto_md5.c @@ -31,7 +31,7 @@ MD5ToDi(const struct ccdigest_info *di, MD5_CTX *md5_ctx, struct ccdigest_ctx *d { uint64_t count = getCount(md5_ctx); - ccdigest_num(di, di_ctx) = count % di->block_size; + ccdigest_num(di, di_ctx) = (unsigned)(count % di->block_size); ccdigest_nbits(di, di_ctx) = (count - ccdigest_num(di, di_ctx)) * 8; memcpy(ccdigest_data(di, di_ctx), md5_ctx->buffer, di->block_size); memcpy(ccdigest_state_ccn(di, di_ctx), md5_ctx->state, di->state_size); diff --git a/libkern/crypto/corecrypto_sha1.c b/libkern/crypto/corecrypto_sha1.c index 3b2b57cca..a6badffc0 100644 --- a/libkern/crypto/corecrypto_sha1.c +++ b/libkern/crypto/corecrypto_sha1.c @@ -31,7 +31,7 @@ SHA1ToDi(const struct ccdigest_info *di, SHA1_CTX *sha1_ctx, struct ccdigest_ctx { uint64_t count = getCount(sha1_ctx); - ccdigest_num(di, di_ctx) = count % di->block_size; + ccdigest_num(di, di_ctx) = (unsigned)(count % di->block_size); ccdigest_nbits(di, di_ctx) = (count - ccdigest_num(di, di_ctx)) * 8; memcpy(ccdigest_data(di, di_ctx), sha1_ctx->m.b8, di->block_size); memcpy(ccdigest_state_ccn(di, di_ctx), sha1_ctx->h.b8, di->state_size); diff --git a/libkern/firehose/Makefile b/libkern/firehose/Makefile index 41be8924a..7861dcfae 100644 --- a/libkern/firehose/Makefile +++ b/libkern/firehose/Makefile @@ -3,13 +3,14 @@ export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir +# PRIVATE_DATAFILES go to /usr/local/include +LIBKERN_USE_USR_LOCAL_INCLUDE := 1 + include $(MakeInc_cmd) include $(MakeInc_def) INSTALLHDRS_SKIP_HOST = NO -LCLDIR = $(SDKHEADERSROOT)/usr/local/include - KERNELFILES = DATAFILES = diff --git a/libkern/firehose/chunk_private.h b/libkern/firehose/chunk_private.h index dece91a37..ac3fbe92e 100644 --- a/libkern/firehose/chunk_private.h +++ b/libkern/firehose/chunk_private.h @@ -21,12 +21,7 @@ #ifndef __FIREHOSE_CHUNK_PRIVATE__ #define __FIREHOSE_CHUNK_PRIVATE__ -#if KERNEL -#include -#endif -#include #include -#include #include "firehose_types_private.h" #include "tracepoint_private.h" @@ -42,7 +37,7 @@ __BEGIN_DECLS ((((pos).fcp_pos >> 48) & 0x1ff) == (uint16_t)stream) typedef union { - _Atomic(uint64_t) fcp_atomic_pos; + os_atomic(uint64_t) fcp_atomic_pos; uint64_t fcp_pos; struct { uint16_t fcp_next_entry_offs; @@ -73,14 +68,15 @@ typedef struct firehose_chunk_range_s { OS_ALWAYS_INLINE static inline bool -firehose_chunk_pos_fits(firehose_chunk_pos_u pos, uint16_t size) +firehose_chunk_pos_fits(firehose_chunk_pos_u *pos, uint16_t size) { - return pos.fcp_next_entry_offs + size <= pos.fcp_private_offs; + return pos->fcp_next_entry_offs + size <= pos->fcp_private_offs; } #define FIREHOSE_CHUNK_TRY_RESERVE_FAIL_ENQUEUE (-1) #define FIREHOSE_CHUNK_TRY_RESERVE_FAIL ( 0) +#if OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY OS_ALWAYS_INLINE static inline long firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp, @@ -109,8 +105,8 @@ firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp, // - read the chunk to find a very old thing os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL); } - pos = orig; - if (!firehose_chunk_pos_fits(orig, + pos.fcp_pos = orig.fcp_pos; + if (!firehose_chunk_pos_fits(&orig, ft_size + pubsize + privsize) || !stamp_delta_fits) { pos.fcp_flag_full = true; reservation_failed = true; @@ -126,7 +122,7 @@ firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp, pos.fcp_pos -= privsize * FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC; pos.fcp_pos += FIREHOSE_CHUNK_POS_REFCNT_INC; const uint16_t minimum_payload_size = 16; - if (!firehose_chunk_pos_fits(pos, + if (!firehose_chunk_pos_fits(&pos, roundup(ft_size + minimum_payload_size, 8))) { // if we can't even have minimum_payload_size bytes of payload // for the next tracepoint, just flush right away @@ -162,8 +158,8 @@ firehose_chunk_tracepoint_begin(firehose_chunk_t fc, uint64_t stamp, stamp |= (uint64_t)pubsize << 48; // The compiler barrier is needed for userland process death handling, see // (tracepoint-begin) in libdispatch's firehose_buffer_stream_chunk_install. - atomic_store_explicit(&ft->ft_atomic_stamp_and_length, stamp, - memory_order_relaxed); + os_atomic_std(atomic_store_explicit)(&ft->ft_atomic_stamp_and_length, stamp, + os_atomic_std(memory_order_relaxed)); __asm__ __volatile__ ("" ::: "memory"); ft->ft_thread = thread_id; return ft; @@ -176,12 +172,13 @@ firehose_chunk_tracepoint_end(firehose_chunk_t fc, { firehose_chunk_pos_u pos; - atomic_store_explicit(&ft->ft_id.ftid_atomic_value, - ftid.ftid_value, memory_order_release); - pos.fcp_pos = atomic_fetch_sub_explicit(&fc->fc_pos.fcp_atomic_pos, - FIREHOSE_CHUNK_POS_REFCNT_INC, memory_order_relaxed); + os_atomic_std(atomic_store_explicit)(&ft->ft_id.ftid_atomic_value, + ftid.ftid_value, os_atomic_std(memory_order_release)); + pos.fcp_pos = os_atomic_std(atomic_fetch_sub_explicit)(&fc->fc_pos.fcp_atomic_pos, + FIREHOSE_CHUNK_POS_REFCNT_INC, os_atomic_std(memory_order_relaxed)); return pos.fcp_refcnt == 1 && pos.fcp_flag_full; } +#endif // OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY #endif // defined(KERNEL) || defined(OS_FIREHOSE_SPI) diff --git a/libkern/firehose/firehose_types_private.h b/libkern/firehose/firehose_types_private.h index 22e3def40..27fef1448 100644 --- a/libkern/firehose/firehose_types_private.h +++ b/libkern/firehose/firehose_types_private.h @@ -104,6 +104,8 @@ OS_ENUM(firehose_tracepoint_namespace, uint8_t, OS_ENUM(firehose_tracepoint_code, uint32_t, firehose_tracepoint_code_load = 0x01, firehose_tracepoint_code_unload = 0x02, + firehose_tracepoint_code_load_filesystem = 0x04, + firehose_tracepoint_code_load_memory = 0x08, ); /*! @@ -236,6 +238,7 @@ OS_ENUM(_firehose_tracepoint_type_metadata, firehose_tracepoint_type_t, _firehose_tracepoint_type_metadata_dyld = 0x01, _firehose_tracepoint_type_metadata_subsystem = 0x02, _firehose_tracepoint_type_metadata_kext = 0x03, + _firehose_tracepoint_type_metadata_coprocessor = 0x04, ); /*! diff --git a/libkern/firehose/tracepoint_private.h b/libkern/firehose/tracepoint_private.h index f5ad06986..69c04c982 100644 --- a/libkern/firehose/tracepoint_private.h +++ b/libkern/firehose/tracepoint_private.h @@ -27,6 +27,7 @@ #if KERNEL #include #endif +#include #include "firehose_types_private.h" OS_ASSUME_NONNULL_BEGIN @@ -45,7 +46,7 @@ typedef union { uint32_t _code; } ftid; firehose_tracepoint_id_t ftid_value; - _Atomic(firehose_tracepoint_id_t) ftid_atomic_value; + os_atomic(firehose_tracepoint_id_t) ftid_atomic_value; } firehose_tracepoint_id_u; #define FIREHOSE_STAMP_SLOP (1ULL << 36) // ~1minute @@ -76,7 +77,7 @@ typedef struct firehose_tracepoint_s { uint64_t ft_length : 16; }; uint64_t ft_stamp_and_length; - _Atomic(uint64_t) ft_atomic_stamp_and_length; + os_atomic(uint64_t) ft_atomic_stamp_and_length; }; uint8_t ft_data[]; } *firehose_tracepoint_t; diff --git a/libkern/gen/OSAtomicOperations.c b/libkern/gen/OSAtomicOperations.c index 8408c83a4..ba66fc09b 100644 --- a/libkern/gen/OSAtomicOperations.c +++ b/libkern/gen/OSAtomicOperations.c @@ -59,14 +59,14 @@ enum { Boolean OSCompareAndSwap8(UInt8 oldValue, UInt8 newValue, volatile UInt8 *address) { - return os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); + return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); } #undef OSCompareAndSwap16 Boolean OSCompareAndSwap16(UInt16 oldValue, UInt16 newValue, volatile UInt16 *address) { - return os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); + return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); } #undef OSCompareAndSwap @@ -74,7 +74,7 @@ Boolean OSCompareAndSwap(UInt32 oldValue, UInt32 newValue, volatile UInt32 *address) { ALIGN_TEST(address, UInt32); - return os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); + return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); } #undef OSCompareAndSwap64 @@ -89,26 +89,26 @@ OSCompareAndSwap64(UInt64 oldValue, UInt64 newValue, volatile UInt64 *address) _Atomic UInt64 *aligned_addr = (_Atomic UInt64 *)(uintptr_t)address; ALIGN_TEST(address, UInt64); - return os_atomic_cmpxchg(aligned_addr, oldValue, newValue, acq_rel); + return (Boolean)os_atomic_cmpxchg(aligned_addr, oldValue, newValue, acq_rel); } #undef OSCompareAndSwapPtr Boolean OSCompareAndSwapPtr(void *oldValue, void *newValue, void * volatile *address) { - return os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); + return (Boolean)os_atomic_cmpxchg(address, oldValue, newValue, acq_rel); } SInt8 OSAddAtomic8(SInt32 amount, volatile SInt8 *address) { - return os_atomic_add_orig(address, amount, relaxed); + return os_atomic_add_orig(address, (SInt8)amount, relaxed); } SInt16 OSAddAtomic16(SInt32 amount, volatile SInt16 *address) { - return os_atomic_add_orig(address, amount, relaxed); + return os_atomic_add_orig(address, (SInt16)amount, relaxed); } #undef OSAddAtomic @@ -222,19 +222,19 @@ OSDecrementAtomic8(volatile SInt8 * value) UInt8 OSBitAndAtomic8(UInt32 mask, volatile UInt8 * value) { - return os_atomic_and_orig(value, mask, relaxed); + return os_atomic_and_orig(value, (UInt8)mask, relaxed); } UInt8 OSBitOrAtomic8(UInt32 mask, volatile UInt8 * value) { - return os_atomic_or_orig(value, mask, relaxed); + return os_atomic_or_orig(value, (UInt8)mask, relaxed); } UInt8 OSBitXorAtomic8(UInt32 mask, volatile UInt8 * value) { - return os_atomic_xor_orig(value, mask, relaxed); + return os_atomic_xor_orig(value, (UInt8)mask, relaxed); } SInt16 @@ -252,17 +252,17 @@ OSDecrementAtomic16(volatile SInt16 * value) UInt16 OSBitAndAtomic16(UInt32 mask, volatile UInt16 * value) { - return os_atomic_and_orig(value, mask, relaxed); + return os_atomic_and_orig(value, (UInt16)mask, relaxed); } UInt16 OSBitOrAtomic16(UInt32 mask, volatile UInt16 * value) { - return os_atomic_or_orig(value, mask, relaxed); + return os_atomic_or_orig(value, (UInt16)mask, relaxed); } UInt16 OSBitXorAtomic16(UInt32 mask, volatile UInt16 * value) { - return os_atomic_xor_orig(value, mask, relaxed); + return os_atomic_xor_orig(value, (UInt16)mask, relaxed); } diff --git a/osfmk/corecrypto/ccdigest/src/ccdigest_init.c b/libkern/gen/OSSpinLock.c similarity index 76% rename from osfmk/corecrypto/ccdigest/src/ccdigest_init.c rename to libkern/gen/OSSpinLock.c index 0ba754841..641953397 100644 --- a/osfmk/corecrypto/ccdigest/src/ccdigest_init.c +++ b/libkern/gen/OSSpinLock.c @@ -1,11 +1,5 @@ /* - * ccdigest_init.c - * corecrypto - * - * Created on 11/30/2010 - * - * Copyright (c) 2010,2011,2015 Apple Inc. All rights reserved. - * + * Copyright (c) 2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -32,13 +26,19 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#include -#include +#include +#include + +_Static_assert(sizeof(int) == sizeof(OSSpinLock), "OSSpinLock size mismatch"); void -ccdigest_init(const struct ccdigest_info *di, ccdigest_ctx_t ctx) +OSSpinLockUnlock(volatile OSSpinLock *lock) +{ + os_atomic_store(lock, 0, release); +} + +Boolean +OSSpinLockTry(volatile OSSpinLock *lock) { - ccdigest_copy_state(di, ccdigest_state_ccn(di, ctx), di->initial_state); - ccdigest_nbits(di, ctx) = 0; - ccdigest_num(di, ctx) = 0; + return os_atomic_cmpxchg(lock, 0, 1, acquire); } diff --git a/libkern/kxld/Makefile b/libkern/kxld/Makefile index bebe89829..68f8f6e7d 100644 --- a/libkern/kxld/Makefile +++ b/libkern/kxld/Makefile @@ -38,7 +38,7 @@ ifdef INSTALL_LOCATION override DSTROOT := $(DSTROOT)/$(INSTALL_LOCATION) endif -PRODUCT_TYPE ?= DYLIB +PRODUCT_TYPE ?= ALL HDRDST=$(DSTROOT)/usr/local/include DYLIBDST=$(DSTROOT)/usr/lib/system @@ -57,6 +57,9 @@ LIBKXLDDST_ARCHIVE=$(ARCHIVEDST)/$(LIBKXLD_ARCHIVE) TESTSRC=$(SRCROOT)/tests TESTDST=./BUILD/tests +# Build both the dylib and archive +LIBKXLDDST_ALL=$(LIBKXLDDST_DYLIB) $(LIBKXLDDST_ARCHIVE) + # Flags ifdef SDKROOT SDK_DIR := $(shell xcodebuild -version -sdk $(SDKROOT) Path) @@ -85,7 +88,6 @@ endif # Tools CC = xcrun -sdk $(SDK_DIR) clang -CLANG_ANALYZER = clang --analyze LIBTOOL = xcrun -sdk $(SDK_DIR) libtool STRIP = xcrun -sdk $(SDK_DIR) strip DSYMUTIL = xcrun -sdk $(SDK_DIR) dsymutil @@ -194,11 +196,6 @@ $(TESTDST)/copyrighttest: $(COPYTESTOBJS) $(CC) $(ARCHS) $(COPYTESTOBJS) -framework CoreFoundation -framework IOKit -o $(OBJROOT)/copyrighttest install -c -m 755 $(OBJROOT)/copyrighttest $@ -analyze: - @$(CLANG_ANALYZER) $(CFLAGS) $(INCLUDES) $(filter-out WKdm%.c,$(wildcard *.c)) - @$(CLANG_ANALYZER) $(CFLAGS) $(INCLUDES) -I$(SRCROOT) tests/*.c - @rm -f *.plist - clean: @rm -rf $(OBJROOT)/* diff --git a/libkern/kxld/kxld_copyright.c b/libkern/kxld/kxld_copyright.c index ca66c9ab9..e42757fc1 100644 --- a/libkern/kxld/kxld_copyright.c +++ b/libkern/kxld/kxld_copyright.c @@ -265,10 +265,11 @@ kxld_validate_copyright_string(const char *str) const char *copyright = NULL; const char *rights = NULL; char *date_str = NULL; - u_long len = 0; + size_t len = 0; - copyright = kxld_strstr(str, kCopyrightToken); - rights = kxld_strstr(str, kRightsToken); + len = strlen(str); + copyright = strnstr(str, kCopyrightToken, len); + rights = strnstr(str, kRightsToken, len); if (!copyright || !rights || copyright > rights) { goto finish; diff --git a/libkern/kxld/kxld_reloc.c b/libkern/kxld/kxld_reloc.c index ac4a8b319..f9a9b850d 100644 --- a/libkern/kxld/kxld_reloc.c +++ b/libkern/kxld/kxld_reloc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2008 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -28,6 +28,7 @@ #include #include #include +#include #if KERNEL #include @@ -1381,25 +1382,25 @@ x86_64_process_reloc(const KXLDRelocator *relocator __unused, u_char *instructio adjustment = 0; break; } - /* Fall through */ + OS_FALLTHROUGH; case X86_64_RELOC_SIGNED_1: if (pair_target) { adjustment = 1; break; } - /* Fall through */ + OS_FALLTHROUGH; case X86_64_RELOC_SIGNED_2: if (pair_target) { adjustment = 2; break; } - /* Fall through */ + OS_FALLTHROUGH; case X86_64_RELOC_SIGNED_4: if (pair_target) { adjustment = 4; break; } - /* Fall through */ + OS_FALLTHROUGH; case X86_64_RELOC_BRANCH: case X86_64_RELOC_GOT: case X86_64_RELOC_GOT_LOAD: diff --git a/libkern/kxld/kxld_stubs.c b/libkern/kxld/kxld_stubs.c deleted file mode 100644 index e2201eb0c..000000000 --- a/libkern/kxld/kxld_stubs.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2008 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -/* - * These kxld stubs panic if the kernel is built without kxld support but - * something tries to use it anyway. - */ -#if KERNEL - -#if !CONFIG_KXLD - -#include -#include - -#include - -kern_return_t -kxld_create_context(KXLDContext **_context __unused, - KXLDAllocateCallback allocate_callback __unused, - KXLDLoggingCallback logging_callback __unused, - KXLDFlags flags __unused, cpu_type_t cputype __unused, - cpu_subtype_t cpusubtype __unused, vm_size_t pagesize __unused) -{ - return KERN_SUCCESS; -} - -void -kxld_destroy_context(KXLDContext *context __unused) -{ - /* Do nothing */ -} - -kern_return_t -kxld_link_file( - KXLDContext * context __unused, - u_char * file __unused, - u_long size __unused, - const char * name __unused, - void * callback_data __unused, - KXLDDependency * dependencies __unused, - u_int ndependencies __unused, - u_char ** linked_object_out __unused, - kxld_addr_t * kmod_info_kern __unused) -{ - panic("%s (%s) called in kernel without kxld support", __PRETTY_FUNCTION__, name); - return KERN_SUCCESS; -} - -boolean_t -kxld_validate_copyright_string(const char *str __unused) -{ - return TRUE; -} - -#endif - -#endif /* KERNEL */ diff --git a/libkern/kxld/kxld_sym.c b/libkern/kxld/kxld_sym.c index 0072c4197..9060b04da 100644 --- a/libkern/kxld/kxld_sym.c +++ b/libkern/kxld/kxld_sym.c @@ -273,11 +273,11 @@ init_predicates(KXLDSym *sym, u_char n_type, u_short n_desc) } else if (streq_safe(sym->name, VTABLE_PREFIX, const_strlen(VTABLE_PREFIX))) { sym->is_class_vtable = 1; - } else if (kxld_strstr(sym->name, RESERVED_TOKEN)) { + } else if (strnstr(sym->name, RESERVED_TOKEN, strlen(sym->name))) { sym->is_padslot = 1; - } else if (kxld_strstr(sym->name, METACLASS_TOKEN)) { + } else if (strnstr(sym->name, METACLASS_TOKEN, strlen(sym->name))) { sym->is_metaclass = 1; - } else if (kxld_strstr(sym->name, SUPER_METACLASS_POINTER_TOKEN)) { + } else if (strnstr(sym->name, SUPER_METACLASS_POINTER_TOKEN, strlen(sym->name))) { sym->is_super_metaclass_pointer = 1; } } else if (kxld_sym_name_is_pure_virtual(sym->name)) { @@ -577,7 +577,7 @@ kxld_sym_name_is_padslot(const char *name) { check(name); - return kxld_strstr(name, RESERVED_TOKEN) != 0; + return strnstr(name, RESERVED_TOKEN, strlen(name)) != 0; } /******************************************************************************* diff --git a/libkern/kxld/kxld_util.c b/libkern/kxld/kxld_util.c index 47df25866..44fdd6068 100644 --- a/libkern/kxld/kxld_util.c +++ b/libkern/kxld/kxld_util.c @@ -67,7 +67,13 @@ static void *s_callback_data = NULL; #if !KERNEL static boolean_t s_cross_link_enabled = FALSE; -static kxld_size_t s_cross_link_page_size = PAGE_SIZE; +/* Can't use PAGE_SIZE here because it is not a compile-time constant. + * However from inspection below, s_cross_link_page_size is not used + * unless s_cross_link_enabled is TRUE, and s_cross_link_enabled is + * only set to TRUE when a client specifies the value. So the + * default should never be used in practice, + */ +static kxld_size_t s_cross_link_page_size; #endif @@ -835,37 +841,6 @@ kxld_is_32_bit(cpu_type_t cputype) return !(cputype & CPU_ARCH_ABI64); } -/******************************************************************************* -* Borrowed (and slightly modified) the libc implementation for the kernel -* until the kernel has a supported strstr(). -* Find the first occurrence of find in s. -*******************************************************************************/ -const char * -kxld_strstr(const char *s, const char *find) -{ -#if KERNEL - char c, sc; - size_t len; - if (!s || !find) { - return s; - } - if ((c = *find++) != 0) { - len = strlen(find); - do { - do { - if ((sc = *s++) == 0) { - return NULL; - } - } while (sc != c); - } while (strncmp(s, find, len) != 0); - s--; - } - return s; -#else - return strstr(s, find); -#endif /* KERNEL */ -} - /******************************************************************************* *******************************************************************************/ void diff --git a/libkern/kxld/kxld_util.h b/libkern/kxld/kxld_util.h index d11d2dc8b..3cefd7c28 100644 --- a/libkern/kxld/kxld_util.h +++ b/libkern/kxld/kxld_util.h @@ -199,9 +199,6 @@ __attribute__((const, visibility("hidden"))); boolean_t kxld_is_32_bit(cpu_type_t) __attribute__((const, visibility("hidden"))); -const char * kxld_strstr(const char *s, const char *find) -__attribute__((pure, visibility("hidden"))); - /******************************************************************************* * Debugging *******************************************************************************/ diff --git a/libkern/libclosure/runtime.cpp b/libkern/libclosure/runtime.cpp index 4ae5cd977..d4c10bae7 100644 --- a/libkern/libclosure/runtime.cpp +++ b/libkern/libclosure/runtime.cpp @@ -17,12 +17,28 @@ #include #else /* !KERNEL */ +#define TARGET_OS_WIN32 0 #include -#include +__BEGIN_DECLS +#include +__END_DECLS -#define malloc(s) kern_os_malloc((s)) -#define free(a) kern_os_free((a)) +static inline void * +malloc(size_t size) +{ + if (size == 0) { + return NULL; + } + return kheap_alloc_tag_bt(KHEAP_DEFAULT, size, + (zalloc_flags_t) (Z_WAITOK | Z_ZERO), VM_KERN_MEMORY_LIBKERN); +} + +static inline void +free(void *addr) +{ + kheap_free_addr(KHEAP_DEFAULT, addr); +} #endif /* KERNEL */ @@ -130,6 +146,23 @@ latching_decr_int_should_deallocate(volatile int32_t *where) #if !TARGET_OS_WIN32 #pragma mark Framework Callback Routines #endif +#if KERNEL +static inline void +_Block_retain_object(const void *ptr __unused) +{ +} + +static inline void +_Block_release_object(const void *ptr __unused) +{ +} + +static inline void +_Block_destructInstance(const void *aBlock __unused) +{ +} + +#else static void _Block_retain_object_default(const void *ptr __unused) @@ -162,36 +195,40 @@ _Block_use_RR2(const Block_callbacks_RR *callbacks) _Block_release_object = callbacks->release; _Block_destructInstance = callbacks->destructInstance; } +#endif // !KERNEL /**************************************************************************** * Accessors for block descriptor fields *****************************************************************************/ -#if 0 -static struct Block_descriptor_1 * -_Block_descriptor_1(struct Block_layout *aBlock) + +template +static T * +unwrap_relative_pointer(int32_t &offset) { - return aBlock->descriptor; + if (offset == 0) { + return nullptr; + } + + uintptr_t base = (uintptr_t)&offset; + uintptr_t extendedOffset = (uintptr_t)(intptr_t)offset; + uintptr_t pointer = base + extendedOffset; + return (T *)pointer; } -#endif +#if 0 static struct Block_descriptor_2 * _Block_descriptor_2(struct Block_layout *aBlock) { - if (!(aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) { - return NULL; - } - uint8_t *desc = (uint8_t *)aBlock->descriptor; + uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock); desc += sizeof(struct Block_descriptor_1); return __IGNORE_WCASTALIGN((struct Block_descriptor_2 *)desc); } +#endif static struct Block_descriptor_3 * _Block_descriptor_3(struct Block_layout *aBlock) { - if (!(aBlock->flags & BLOCK_HAS_SIGNATURE)) { - return NULL; - } - uint8_t *desc = (uint8_t *)aBlock->descriptor; + uint8_t *desc = (uint8_t *)_Block_get_descriptor(aBlock); desc += sizeof(struct Block_descriptor_1); if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) { desc += sizeof(struct Block_descriptor_2); @@ -202,23 +239,17 @@ _Block_descriptor_3(struct Block_layout *aBlock) static void _Block_call_copy_helper(void *result, struct Block_layout *aBlock) { - struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock); - if (!desc) { - return; + if (auto *pFn = _Block_get_copy_function(aBlock)) { + pFn(result, aBlock); } - - (*desc->copy)(result, aBlock); // do fixup } static void _Block_call_dispose_helper(struct Block_layout *aBlock) { - struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock); - if (!desc) { - return; + if (auto *pFn = _Block_get_dispose_function(aBlock)) { + pFn(aBlock); } - - (*desc->dispose)(aBlock); } /******************************************************************************* @@ -249,15 +280,30 @@ _Block_copy(const void *arg) return aBlock; } else { // Its a stack block. Make a copy. - struct Block_layout *result = (typeof(result))malloc(aBlock->descriptor->size); + size_t size = Block_size(aBlock); + struct Block_layout *result = (struct Block_layout *)malloc(size); if (!result) { return NULL; } - memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first + memmove(result, aBlock, size); // bitcopy first #if __has_feature(ptrauth_calls) // Resign the invoke pointer as it uses address authentication. result->invoke = aBlock->invoke; + +#if __has_feature(ptrauth_signed_block_descriptors) + uintptr_t oldDesc = + ptrauth_blend_discriminator( + &aBlock->descriptor, _Block_descriptor_ptrauth_discriminator); + uintptr_t newDesc = + ptrauth_blend_discriminator( + &result->descriptor, _Block_descriptor_ptrauth_discriminator); + + result->descriptor = + ptrauth_auth_and_resign(aBlock->descriptor, ptrauth_key_asda, oldDesc, + ptrauth_key_asda, newDesc); #endif +#endif + // reset refcount result->flags &= ~(BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING); // XXX not needed result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1 @@ -399,7 +445,12 @@ _Block_isDeallocating(const void *arg) size_t Block_size(void *aBlock) { - return ((struct Block_layout *)aBlock)->descriptor->size; + auto *layout = (Block_layout *)aBlock; + void *desc = _Block_get_descriptor(layout); + if (layout->flags & BLOCK_SMALL_DESCRIPTOR) { + return ((Block_descriptor_small *)desc)->size; + } + return ((Block_descriptor_1 *)desc)->size; } bool @@ -421,11 +472,17 @@ _Block_has_signature(void *aBlock) const char * _Block_signature(void *aBlock) { - struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock); - if (!desc3) { - return NULL; + struct Block_layout *layout = (struct Block_layout *)aBlock; + if (!(layout->flags & BLOCK_HAS_SIGNATURE)) { + return nullptr; } + if (layout->flags & BLOCK_SMALL_DESCRIPTOR) { + auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout); + return unwrap_relative_pointer(bds->signature); + } + + struct Block_descriptor_3 *desc3 = _Block_descriptor_3(layout); return desc3->signature; } @@ -433,40 +490,50 @@ const char * _Block_layout(void *aBlock) { // Don't return extended layout to callers expecting old GC layout - struct Block_layout *layout = (struct Block_layout *)aBlock; - if (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) { - return NULL; + Block_layout *layout = (Block_layout *)aBlock; + if ((layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) || + !(layout->flags & BLOCK_HAS_SIGNATURE)) { + return nullptr; } - struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock); - if (!desc3) { - return NULL; + if (layout->flags & BLOCK_SMALL_DESCRIPTOR) { + auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout); + return unwrap_relative_pointer(bds->layout); } - return desc3->layout; + Block_descriptor_3 *desc = _Block_descriptor_3(layout); + return desc->layout; } const char * _Block_extended_layout(void *aBlock) { // Don't return old GC layout to callers expecting extended layout - struct Block_layout *layout = (struct Block_layout *)aBlock; - if (!(layout->flags & BLOCK_HAS_EXTENDED_LAYOUT)) { - return NULL; + Block_layout *layout = (Block_layout *)aBlock; + if (!(layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) || + !(layout->flags & BLOCK_HAS_SIGNATURE)) { + return nullptr; } - struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock); - if (!desc3) { - return NULL; + const char *extLayout; + if (layout->flags & BLOCK_SMALL_DESCRIPTOR) { + auto *bds = (Block_descriptor_small *)_Block_get_descriptor(layout); + if (layout->flags & BLOCK_INLINE_LAYOUT_STRING) { + extLayout = (const char *)(uintptr_t)bds->layout; + } else { + extLayout = unwrap_relative_pointer(bds->layout); + } + } else { + Block_descriptor_3 *desc3 = _Block_descriptor_3(layout); + extLayout = desc3->layout; } // Return empty string (all non-object bytes) instead of NULL // so callers can distinguish "empty layout" from "no layout". - if (!desc3->layout) { - return ""; - } else { - return desc3->layout; + if (!extLayout) { + extLayout = ""; } + return extLayout; } #if !TARGET_OS_WIN32 @@ -612,6 +679,3 @@ _Block_object_dispose(const void *object, const int flags) // Workaround for dylib with no __DATA segment fails to rebase __attribute__((used)) static int let_there_be_data = 42; - -#undef malloc -#undef free diff --git a/libkern/libkern/Block_private.h b/libkern/libkern/Block_private.h index 4c24211dc..c2c2bb349 100644 --- a/libkern/libkern/Block_private.h +++ b/libkern/libkern/Block_private.h @@ -93,7 +93,7 @@ public: return 0; } else { return (uintptr_t) - ptrauth_auth_and_resign(fn, ptrauth_key_function_pointer, 0, + ptrauth_auth_and_resign(fn, ptrauth_key_function_pointer, ptrauth_function_pointer_type_discriminator(Fn), Key, &bits); } } @@ -122,7 +122,7 @@ public: if (ptr == 0) { return nullptr; } else { - return ptrauth_auth_function((Fn)ptr, Key, &bits); + return (Fn)ptrauth_auth_function((void *)ptr, Key, &bits); } } @@ -225,11 +225,25 @@ typedef uintptr_t BlockByrefDestroyFunction; #endif +#if __has_feature(ptrauth_calls) +#define _Block_get_relative_function_pointer(field, type) \ + ((type)ptrauth_sign_unauthenticated( \ + (void *)((uintptr_t)(intptr_t)(field) + (uintptr_t)&(field)), \ + ptrauth_key_function_pointer, 0)) +#else +#define _Block_get_relative_function_pointer(field, type) \ + ((type)((uintptr_t)(intptr_t)(field) + (uintptr_t)&(field))) +#endif + +#define _Block_descriptor_ptrauth_discriminator 0xC0BB // Values for Block_layout->flags to describe block objects enum { BLOCK_DEALLOCATING = (0x0001),// runtime BLOCK_REFCOUNT_MASK = (0xfffe),// runtime + BLOCK_INLINE_LAYOUT_STRING = (1 << 21), // compiler + BLOCK_SMALL_DESCRIPTOR = (1 << 22), // compiler + BLOCK_IS_NOESCAPE = (1 << 23), // compiler BLOCK_NEEDS_FREE = (1 << 24),// runtime BLOCK_HAS_COPY_DISPOSE = (1 << 25),// compiler BLOCK_HAS_CTOR = (1 << 26),// compiler: helpers have C++ code @@ -260,6 +274,19 @@ struct Block_descriptor_3 { const char *layout; // contents depend on BLOCK_HAS_EXTENDED_LAYOUT }; +struct Block_descriptor_small { + uint32_t size; + + int32_t signature; + int32_t layout; + + /* copy & dispose are optional, only access them if + * Block_layout->flags & BLOCK_HAS_COPY_DIPOSE */ + int32_t copy; + int32_t dispose; +}; + + struct Block_layout { void *isa; volatile int32_t flags; // contains ref count @@ -375,6 +402,17 @@ _Block_set_invoke_fn(struct Block_layout *block, void (*fn)(void *, ...)) _Block_set_function_pointer(block->invoke, fn); } +static inline void * +_Block_get_descriptor(struct Block_layout *aBlock) +{ +#if __has_feature(ptrauth_signed_block_descriptors) + uintptr_t disc = ptrauth_blend_discriminator( + &aBlock->descriptor, _Block_descriptor_ptrauth_discriminator); + return ptrauth_auth_data(aBlock->descriptor, ptrauth_key_asda, disc); +#else + return aBlock->descriptor; +#endif +} static inline __typeof__(void (*)(void *, const void *)) _Block_get_copy_fn(struct Block_descriptor_2 *desc) @@ -403,6 +441,52 @@ _Block_set_dispose_fn(struct Block_descriptor_2 *desc, _Block_set_function_pointer(desc->dispose, fn); } +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcast-align" + +static inline __typeof__(void (*)(void *, const void *)) +_Block_get_copy_function(struct Block_layout *aBlock) +{ + if (!(aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) { + return NULL; + } + + void *desc = _Block_get_descriptor(aBlock); + if (aBlock->flags & BLOCK_SMALL_DESCRIPTOR) { + struct Block_descriptor_small *bds = + (struct Block_descriptor_small *)desc; + return _Block_get_relative_function_pointer( + bds->copy, void (*)(void *, const void *)); + } + + struct Block_descriptor_2 *bd2 = + (struct Block_descriptor_2 *)((unsigned char *)desc + + sizeof(struct Block_descriptor_1)); + return _Block_get_copy_fn(bd2); +} + +static inline __typeof__(void (*)(const void *)) +_Block_get_dispose_function(struct Block_layout *aBlock) +{ + if (!(aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) { + return NULL; + } + + void *desc = _Block_get_descriptor(aBlock); + if (aBlock->flags & BLOCK_SMALL_DESCRIPTOR) { + struct Block_descriptor_small *bds = + (struct Block_descriptor_small *)desc; + return _Block_get_relative_function_pointer( + bds->dispose, void (*)(const void *)); + } + + struct Block_descriptor_2 *bd2 = + (struct Block_descriptor_2 *)((unsigned char *)desc + + sizeof(struct Block_descriptor_1)); + return _Block_get_dispose_fn(bd2); +} + +#pragma clang diagnostic pop // Other support functions @@ -460,6 +544,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); // BLOCK_EXPORT void * _NSConcreteStackBlock[32]; +#if !KERNEL struct Block_callbacks_RR { size_t size; // size == sizeof(struct Block_callbacks_RR) void (*retain)(const void *); @@ -469,6 +554,6 @@ struct Block_callbacks_RR { typedef struct Block_callbacks_RR Block_callbacks_RR; BLOCK_EXPORT void _Block_use_RR2(const Block_callbacks_RR *callbacks); - +#endif // !KERNEL #endif diff --git a/libkern/libkern/Makefile b/libkern/libkern/Makefile index f40ad2170..3f78b8f25 100644 --- a/libkern/libkern/Makefile +++ b/libkern/libkern/Makefile @@ -54,11 +54,13 @@ KERNELFILES = \ PRIVATE_KERNELFILES = \ OSKextLibPrivate.h \ OSSerializeBinary.h \ + kernel_mach_header.h \ kext_request_keys.h \ mkext.h \ prelink.h \ section_keywords.h \ - Block_private.h + Block_private.h \ + ptrauth_utils.h PRIVATE_DATAFILES = \ ${PRIVATE_KERNELFILES} \ @@ -83,7 +85,6 @@ INSTALL_KF_MI_LCL_LIST = ${KERNELFILES} ${PRIVATE_KERNELFILES} EXPORT_MI_LIST = \ $(sort ${KERNELFILES} ${PRIVATE_DATAFILES}) \ - kernel_mach_header.h \ kxld.h \ kxld_types.h \ stack_protector.h @@ -93,7 +94,7 @@ EXPORT_MI_GEN_LIST = version.h EXPORT_MI_DIR = libkern version.h: version.h.template $(SRCROOT)/config/MasterVersion - $(call makelog,[$(CMD_MC)] $(ColorH)GENERATING$(Color0) $(ColorLF)libkern/$@$(Color0) from $(ColorF)$<$(Color0)) + @$(LOG_GENERATE) "libkern/$@$(Color0) from $(ColorF)$<" $(_v)install $(DATA_INSTALL_FLAGS) $< $@ $(_v)$(NEWVERS) $@ > /dev/null diff --git a/libkern/libkern/OSByteOrder.h b/libkern/libkern/OSByteOrder.h index eed25a420..63d67d449 100644 --- a/libkern/libkern/OSByteOrder.h +++ b/libkern/libkern/OSByteOrder.h @@ -37,6 +37,16 @@ #define OSSwapConstInt32(x) __DARWIN_OSSwapConstInt32(x) #define OSSwapConstInt64(x) __DARWIN_OSSwapConstInt64(x) +#if !defined(__DARWIN_OS_INLINE) +# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +# define __DARWIN_OS_INLINE static inline +# elif defined(__MWERKS__) || defined(__cplusplus) +# define __DARWIN_OS_INLINE static inline +# else +# define __DARWIN_OS_INLINE static __inline__ +# endif +#endif + #if defined(__GNUC__) #if (defined(__i386__) || defined(__x86_64__)) @@ -63,7 +73,7 @@ enum { OSBigEndian }; -OS_INLINE +__DARWIN_OS_INLINE int32_t OSHostByteOrder(void) { @@ -87,7 +97,7 @@ OSHostByteOrder(void) /* Functions for loading native endian values. */ -OS_INLINE +__DARWIN_OS_INLINE uint16_t _OSReadInt16( const volatile void * base, @@ -97,7 +107,7 @@ _OSReadInt16( return *(volatile uint16_t *)((uintptr_t)base + byteOffset); } -OS_INLINE +__DARWIN_OS_INLINE uint32_t _OSReadInt32( const volatile void * base, @@ -107,7 +117,7 @@ _OSReadInt32( return *(volatile uint32_t *)((uintptr_t)base + byteOffset); } -OS_INLINE +__DARWIN_OS_INLINE uint64_t _OSReadInt64( const volatile void * base, @@ -119,7 +129,7 @@ _OSReadInt64( /* Functions for storing native endian values. */ -OS_INLINE +__DARWIN_OS_INLINE void _OSWriteInt16( volatile void * base, @@ -130,7 +140,7 @@ _OSWriteInt16( *(volatile uint16_t *)((uintptr_t)base + byteOffset) = data; } -OS_INLINE +__DARWIN_OS_INLINE void _OSWriteInt32( volatile void * base, @@ -141,7 +151,7 @@ _OSWriteInt32( *(volatile uint32_t *)((uintptr_t)base + byteOffset) = data; } -OS_INLINE +__DARWIN_OS_INLINE void _OSWriteInt64( volatile void * base, diff --git a/libkern/libkern/OSDebug.h b/libkern/libkern/OSDebug.h index 14d65743c..db3c1c776 100644 --- a/libkern/libkern/OSDebug.h +++ b/libkern/libkern/OSDebug.h @@ -62,7 +62,7 @@ __END_DECLS #define TRACE_MACHLEAKS(a, b, c, d) \ do { \ - if (log_leaks) \ + if (__builtin_expect(!!log_leaks, 0)) \ trace_backtrace(a,b,c,d); \ } while(0) diff --git a/libkern/libkern/OSKextLib.h b/libkern/libkern/OSKextLib.h index f602dfd7d..f18b6e206 100644 --- a/libkern/libkern/OSKextLib.h +++ b/libkern/libkern/OSKextLib.h @@ -244,6 +244,38 @@ __BEGIN_DECLS */ #define kOSKextReturnSystemPolicy libkern_kext_err(0x1b) +/*! + * @define kOSKextReturnKCLoadFailure + * @abstract Loading of the System KC failed + */ +#define kOSKextReturnKCLoadFailure libkern_kext_err(0x1c) + +/*! + * @define kOSKextReturnKCLoadFailureSystemKC + * @abstract Loading of the System KC failed + * + * This a sub-code of kOSKextReturnKCLoadFailure. It can be OR'd together + * with: kOSKextReturnKCLoadFailureAuxKC + * + * If both the System and Aux KCs fail to load, then the error code will be: + * libkern_kext_err(0x1f) + */ +#define kOSKextReturnKCLoadFailureSystemKC libkern_kext_err(0x1d) + +/*! + * @define kOSKextReturnKCLoadFailureAuxKC + * @abstract Loading of the Aux KC failed + * + * This a sub-code of kOSKextReturnKCLoadFailure. It can be OR'd together + * with: kOSKextReturnKCLoadFailureSystemKC + * + * If both the System and Aux KCs fail to load, then the error code will be: + * libkern_kext_err(0x1f) + */ +#define kOSKextReturnKCLoadFailureAuxKC libkern_kext_err(0x1e) + +/* next available error is: libkern_kext_err(0x20) */ + #if PRAGMA_MARK #pragma mark - /********************************************************************/ @@ -267,6 +299,7 @@ __BEGIN_DECLS #define kCFBundleExecutableKey "CFBundleExecutable" #define kCFBundlePackageTypeKey "CFBundlePackageType" #define kCFBundleDriverKitUUIDKey "CFBundleDriverKitUUID" +#define kCFBundleDriverKitExecutableKey "CFBundleUEXTExecutable" #endif /* KERNEL */ /*! @@ -322,6 +355,13 @@ __BEGIN_DECLS */ #define kOSBundleRequiredKey "OSBundleRequired" +/*! + * @define kOSBundleRequireExplicitLoadKey + * @abstract A boolean value indicating whether the kext requires an + * explicit kextload in order to start/match. + */ +#define kOSBundleRequireExplicitLoadKey "OSBundleRequireExplicitLoad" + /*! * @define kOSBundleAllowUserLoadKey * @abstract A boolean value indicating whether @@ -380,6 +420,14 @@ __BEGIN_DECLS #define kAppleTextHashesKey "AppleTextHashes" #endif +/*! + * @define kOSMutableSegmentCopy + * @abstract A boolean value indicating whether the kext requires a copy of + * its mutable segments to be kept in memory, and then reset when the kext + * unloads. This should be used with caution as it will increase the + * amount of memory used by the kext. + */ +#define kOSMutableSegmentCopy "OSMutableSegmentCopy" #if PRAGMA_MARK diff --git a/libkern/libkern/OSKextLibPrivate.h b/libkern/libkern/OSKextLibPrivate.h index a3971be07..6297be81c 100644 --- a/libkern/libkern/OSKextLibPrivate.h +++ b/libkern/libkern/OSKextLibPrivate.h @@ -60,7 +60,13 @@ typedef uint8_t OSKextExcludeLevel; #define kOSKextExcludeKext (1) #define kOSKextExcludeAll (2) -#define kOSKextManagementEntitlement "com.apple.private.security.kext-management" +#define kIOCatalogManagementEntitlement "com.apple.private.security.iocatalog-management" +#define kOSKextCollectionManagementEntitlement "com.apple.private.security.kext-collection-management" +#define kOSKextOnlyBootKCManagementEntitlement "com.apple.private.security.only-bootkc-management" + +#define kOSKextCodelessKextLoadAddr (0x7FFFFFFFFFFFFFFFULL) + +#define kIOKitDaemonName "kernelmanagerd" #if PRAGMA_MARK #pragma mark - @@ -104,6 +110,28 @@ typedef uint8_t OSKextExcludeLevel; */ #define kAppleKernelExternalComponentKey "AppleKernelExternalComponent" +/*! + * @define kOSKextInfoPlistDigestKey + * @abstract SHA-256 data of the kext's Info.plist + */ +#define kOSKextInfoPlistDigestKey "_InfoPlistDigest" + +/*! + * @define kOSKextBundleCollectionTypeKey + * @abstract The type of collection in which a kext is linked. Possible + * values: kKCTypePrimary, kKCTypeSystem, kKCTypeAuxiliary, + * kKCTypeCodeless + */ +#define kOSKextBundleCollectionTypeKey "_BundleCollectionType" + +/*! + * @define kOSKextAuxKCAvailabilityKey + * @abstract boolean value: false if the kext is in the AuxKC and + * is not loadable; true otherwise. + */ +#define kOSKextAuxKCAvailabilityKey "_AuxKCAvailability" + + // properties found in the registry root #define kOSKernelCPUTypeKey "OSKernelCPUType" #define kOSKernelCPUSubtypeKey "OSKernelCPUSubtype" @@ -160,6 +188,18 @@ typedef uint8_t OSKextExcludeLevel; #define kOSMetaClassSuperclassNameKey "OSMetaClassSuperclassName" #define kOSMetaClassTrackingCountKey "OSMetaClassTrackingCount" +#if PRAGMA_MARK +#pragma mark - +/********************************************************************/ +#pragma mark Kext Collection Type Keys +/********************************************************************/ +#endif +#define kKCTypePrimary "Primary" +#define kKCTypeSystem "System" +#define kKCTypeAuxiliary "Auxiliary" +#define kKCTypeCodeless "Codeless" +#define kKCTypeAny "Any" + #if PRAGMA_MARK #pragma mark - /********************************************************************/ @@ -864,6 +904,8 @@ OSReturn OSKextUnloadKextWithLoadTag(uint32_t loadTag); * @field loadTag The kext's load tag. * @field flags Internal tracking flags. * @field reference_list who this refs (links on). + * @field text_exec_address The address of the __TEXT_EXEC segment (if it exists), otherwise __TEXT + * @field text_exec_size The size of the segment pointed to by text_address * * @discussion * The OSKextLoadedKextSummary structure contains a basic set of information @@ -878,6 +920,8 @@ typedef struct _loaded_kext_summary { uint32_t loadTag; uint32_t flags; uint64_t reference_list; + uint64_t text_exec_address; + size_t text_exec_size; } OSKextLoadedKextSummary; /*! diff --git a/libkern/libkern/OSMalloc.h b/libkern/libkern/OSMalloc.h index 200b99265..b7aecdd52 100644 --- a/libkern/libkern/OSMalloc.h +++ b/libkern/libkern/OSMalloc.h @@ -37,6 +37,12 @@ __BEGIN_DECLS #ifdef MACH_KERNEL_PRIVATE #include #endif +#if defined(XNU_KERNEL_PRIVATE) +#include +#ifndef OSMallocDeprecated +#define OSMallocDeprecated __deprecated_msg("Use kalloc heaps") +#endif +#endif /* XNU_KERNEL_PRIVATE */ /*! * @header @@ -154,6 +160,9 @@ typedef struct __OSMallocTag__ * OSMallocTag_t; * allocations smaller than a page are wired. * */ +#if XNU_KERNEL_PRIVATE +OSMallocDeprecated +#endif extern OSMallocTag OSMalloc_Tagalloc( const char * name, uint32_t flags); @@ -174,6 +183,9 @@ extern OSMallocTag OSMalloc_Tagalloc( * Any OSMalloc function called on those blocks * will result in a panic. */ +#if XNU_KERNEL_PRIVATE +OSMallocDeprecated +#endif extern void OSMalloc_Tagfree(OSMallocTag tag); @@ -198,22 +210,26 @@ extern void OSMalloc_Tagfree(OSMallocTag tag); * is a full page or larger, the allocated memory is pageable; * otherwise it is wired. */ +#if XNU_KERNEL_PRIVATE +OSMallocDeprecated +#endif extern void * OSMalloc( uint32_t size, OSMallocTag tag) __attribute__((alloc_size(1))); - /*! * @function OSMalloc_nowait * * @abstract * Equivalent to @link OSMalloc_noblock OSMalloc_noblock@/link. */ +#if XNU_KERNEL_PRIVATE +OSMallocDeprecated +#endif extern void * OSMalloc_nowait( uint32_t size, OSMallocTag tag) __attribute__((alloc_size(1))); - /*! * @function OSMalloc_noblock * @@ -239,11 +255,13 @@ extern void * OSMalloc_nowait( * * This function is guaranteed not to block. */ +#if XNU_KERNEL_PRIVATE +OSMallocDeprecated +#endif extern void * OSMalloc_noblock( uint32_t size, OSMallocTag tag) __attribute__((alloc_size(1))); - /*! * @function OSFree * @@ -255,24 +273,14 @@ extern void * OSMalloc_noblock( * @param tag The @link OSMallocTag OSMallocTag@/link * with which addr was originally allocated. */ +#if XNU_KERNEL_PRIVATE +OSMallocDeprecated +#endif extern void OSFree( void * addr, uint32_t size, OSMallocTag tag); -#ifdef XNU_KERNEL_PRIVATE -/*! - * @function OSMalloc_size - * - * @abstract - * Returns the size of a block of memory allocated by @link OSMalloc OSMalloc@/link. - * - * @param addr A pointer to the memory block allocated via OSMalloc. - */ -extern uint32_t OSMalloc_size( - void * addr); -#endif /* XNU_KERNEL_PRIVATE */ - __END_DECLS #endif /* LIBKERN_OSMALLOC_h */ diff --git a/libkern/libkern/_OSByteOrder.h b/libkern/libkern/_OSByteOrder.h index 0b399b97b..db7419df0 100644 --- a/libkern/libkern/_OSByteOrder.h +++ b/libkern/libkern/_OSByteOrder.h @@ -41,14 +41,14 @@ /* Macros for swapping constant values in the preprocessing stage. */ #define __DARWIN_OSSwapConstInt16(x) \ - ((__uint16_t)((((__uint16_t)(x) & 0xff00) >> 8) | \ - (((__uint16_t)(x) & 0x00ff) << 8))) + ((__uint16_t)((((__uint16_t)(x) & 0xff00U) >> 8) | \ + (((__uint16_t)(x) & 0x00ffU) << 8))) #define __DARWIN_OSSwapConstInt32(x) \ - ((__uint32_t)((((__uint32_t)(x) & 0xff000000) >> 24) | \ - (((__uint32_t)(x) & 0x00ff0000) >> 8) | \ - (((__uint32_t)(x) & 0x0000ff00) << 8) | \ - (((__uint32_t)(x) & 0x000000ff) << 24))) + ((__uint32_t)((((__uint32_t)(x) & 0xff000000U) >> 24) | \ + (((__uint32_t)(x) & 0x00ff0000U) >> 8) | \ + (((__uint32_t)(x) & 0x0000ff00U) << 8) | \ + (((__uint32_t)(x) & 0x000000ffU) << 24))) #define __DARWIN_OSSwapConstInt64(x) \ ((__uint64_t)((((__uint64_t)(x) & 0xff00000000000000ULL) >> 56) | \ @@ -62,6 +62,16 @@ #if defined(__GNUC__) +#if !defined(__DARWIN_OS_INLINE) +# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +# define __DARWIN_OS_INLINE static inline +# elif defined(__MWERKS__) || defined(__cplusplus) +# define __DARWIN_OS_INLINE static inline +# else +# define __DARWIN_OS_INLINE static __inline__ +# endif +#endif + #if defined(__i386__) || defined(__x86_64__) #include #endif @@ -84,16 +94,6 @@ #if defined(__i386__) || defined(__x86_64__) -#if !defined(__DARWIN_OS_INLINE) -# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L -# define __DARWIN_OS_INLINE static inline -# elif defined(__MWERKS__) || defined(__cplusplus) -# define __DARWIN_OS_INLINE static inline -# else -# define __DARWIN_OS_INLINE static __inline__ -# endif -#endif - __DARWIN_OS_INLINE uint16_t _OSSwapInt16( diff --git a/libkern/libkern/arm/Makefile b/libkern/libkern/arm/Makefile index acfa028c8..c7d51e36d 100644 --- a/libkern/libkern/arm/Makefile +++ b/libkern/libkern/arm/Makefile @@ -9,8 +9,15 @@ include $(MakeInc_def) DATAFILES = \ OSByteOrder.h +DRIVERKIT_DATAFILES = \ + OSByteOrder.h + INSTALL_MD_LIST = ${DATAFILES} +INSTALL_DRIVERKIT_MD_LIST = ${DRIVERKIT_DATAFILES} + +DRIVERKITINCDIR = $(DRIVERKITSDKHEADERSROOT)/usr/local/include + INSTALL_MD_DIR = libkern/arm EXPORT_MD_LIST = ${DATAFILES} diff --git a/libkern/libkern/arm/OSByteOrder.h b/libkern/libkern/arm/OSByteOrder.h index e35ea88b6..0cd44a579 100644 --- a/libkern/libkern/arm/OSByteOrder.h +++ b/libkern/libkern/arm/OSByteOrder.h @@ -7,141 +7,210 @@ #include #include /* for _ARM_ARCH_6 */ -#include /* Generic byte swapping functions. */ -OS_INLINE +__DARWIN_OS_INLINE uint16_t _OSSwapInt16( - uint16_t data + uint16_t _data ) { /* Reduces to 'rev16' with clang */ - return (uint16_t)(data << 8 | data >> 8); + return (uint16_t)(_data << 8 | _data >> 8); } -OS_INLINE +__DARWIN_OS_INLINE uint32_t _OSSwapInt32( - uint32_t data + uint32_t _data ) { #if defined(__llvm__) - data = __builtin_bswap32(data); + _data = __builtin_bswap32(_data); #else /* This actually generates the best code */ - data = (((data ^ (data >> 16 | (data << 16))) & 0xFF00FFFF) >> 8) ^ (data >> 8 | data << 24); + _data = (((_data ^ (_data >> 16 | (_data << 16))) & 0xFF00FFFF) >> 8) ^ (_data >> 8 | _data << 24); #endif - return data; + return _data; } -OS_INLINE +__DARWIN_OS_INLINE uint64_t _OSSwapInt64( - uint64_t data + uint64_t _data ) { #if defined(__llvm__) - return __builtin_bswap64(data); + return __builtin_bswap64(_data); #else union { - uint64_t ull; - uint32_t ul[2]; - } u; + uint64_t _ull; + uint32_t _ul[2]; + } _u; /* This actually generates the best code */ - u.ul[0] = (uint32_t)(data >> 32); - u.ul[1] = (uint32_t)(data & 0xffffffff); - u.ul[0] = _OSSwapInt32(u.ul[0]); - u.ul[1] = _OSSwapInt32(u.ul[1]); - return u.ull; + _u._ul[0] = (uint32_t)(_data >> 32); + _u._ul[1] = (uint32_t)(_data & 0xffffffff); + _u._ul[0] = _OSSwapInt32(_u._ul[0]); + _u._ul[1] = _OSSwapInt32(_u._ul[1]); + return _u._ull; #endif } /* Functions for byte reversed loads. */ -OS_INLINE +struct _OSUnalignedU16 { + volatile uint16_t __val; +} __attribute__((__packed__)); + +struct _OSUnalignedU32 { + volatile uint32_t __val; +} __attribute__((__packed__)); + +struct _OSUnalignedU64 { + volatile uint64_t __val; +} __attribute__((__packed__)); + +#if defined(_POSIX_C_SOURCE) || defined(_XOPEN_SOURCE) +__DARWIN_OS_INLINE +uint16_t +_OSReadSwapInt16( + const volatile void * _base, + uintptr_t _offset + ) +{ + return _OSSwapInt16(((struct _OSUnalignedU16 *)((uintptr_t)_base + _offset))->__val); +} +#else +__DARWIN_OS_INLINE uint16_t OSReadSwapInt16( - const volatile void * base, - uintptr_t offset + const volatile void * _base, + uintptr_t _offset ) { - uint16_t result; - - result = *(volatile uint16_t *)((volatile uintptr_t)base + offset); - return _OSSwapInt16(result); + return _OSSwapInt16(((struct _OSUnalignedU16 *)((uintptr_t)_base + _offset))->__val); } +#endif -OS_INLINE +#if defined(_POSIX_C_SOURCE) || defined(_XOPEN_SOURCE) +__DARWIN_OS_INLINE +uint32_t +_OSReadSwapInt32( + const volatile void * _base, + uintptr_t _offset + ) +{ + return _OSSwapInt32(((struct _OSUnalignedU32 *)((uintptr_t)_base + _offset))->__val); +} +#else +__DARWIN_OS_INLINE uint32_t OSReadSwapInt32( - const volatile void * base, - uintptr_t offset + const volatile void * _base, + uintptr_t _offset ) { - uint32_t result; - - result = *(volatile uint32_t *)((volatile uintptr_t)base + offset); - return _OSSwapInt32(result); + return _OSSwapInt32(((struct _OSUnalignedU32 *)((uintptr_t)_base + _offset))->__val); } +#endif -OS_INLINE +#if defined(_POSIX_C_SOURCE) || defined(_XOPEN_SOURCE) +__DARWIN_OS_INLINE +uint64_t +_OSReadSwapInt64( + const volatile void * _base, + uintptr_t _offset + ) +{ + return _OSSwapInt64(((struct _OSUnalignedU64 *)((uintptr_t)_base + _offset))->__val); +} +#else +__DARWIN_OS_INLINE uint64_t OSReadSwapInt64( - const volatile void * base, - uintptr_t offset + const volatile void * _base, + uintptr_t _offset ) { - volatile uint32_t * inp; - union ullc { - uint64_t ull; - uint32_t ul[2]; - } outv; - - inp = (volatile uint32_t *)((volatile uintptr_t)base + offset); - outv.ul[0] = inp[1]; - outv.ul[1] = inp[0]; - outv.ul[0] = _OSSwapInt32(outv.ul[0]); - outv.ul[1] = _OSSwapInt32(outv.ul[1]); - return outv.ull; + return _OSSwapInt64(((struct _OSUnalignedU64 *)((uintptr_t)_base + _offset))->__val); } +#endif /* Functions for byte reversed stores. */ -OS_INLINE +#if defined(_POSIX_C_SOURCE) || defined(_XOPEN_SOURCE) +__DARWIN_OS_INLINE +void +_OSWriteSwapInt16( + volatile void * _base, + uintptr_t _offset, + uint16_t _data + ) +{ + ((struct _OSUnalignedU16 *)((uintptr_t)_base + _offset))->__val = _OSSwapInt16(_data); +} +#else +__DARWIN_OS_INLINE void OSWriteSwapInt16( - volatile void * base, - uintptr_t offset, - uint16_t data + volatile void * _base, + uintptr_t _offset, + uint16_t _data ) { - *(volatile uint16_t *)((volatile uintptr_t)base + offset) = _OSSwapInt16(data); + ((struct _OSUnalignedU16 *)((uintptr_t)_base + _offset))->__val = _OSSwapInt16(_data); } +#endif -OS_INLINE +#if defined(_POSIX_C_SOURCE) || defined(_XOPEN_SOURCE) +__DARWIN_OS_INLINE +void +_OSWriteSwapInt32( + volatile void * _base, + uintptr_t _offset, + uint32_t _data + ) +{ + ((struct _OSUnalignedU32 *)((uintptr_t)_base + _offset))->__val = _OSSwapInt32(_data); +} +#else +__DARWIN_OS_INLINE void OSWriteSwapInt32( - volatile void * base, - uintptr_t offset, - uint32_t data + volatile void * _base, + uintptr_t _offset, + uint32_t _data ) { - *(volatile uint32_t *)((volatile uintptr_t)base + offset) = _OSSwapInt32(data); + ((struct _OSUnalignedU32 *)((uintptr_t)_base + _offset))->__val = _OSSwapInt32(_data); } +#endif -OS_INLINE +#if defined(_POSIX_C_SOURCE) || defined(_XOPEN_SOURCE) +__DARWIN_OS_INLINE +void +_OSWriteSwapInt64( + volatile void * _base, + uintptr_t _offset, + uint64_t _data + ) +{ + ((struct _OSUnalignedU64 *)((uintptr_t)_base + _offset))->__val = _OSSwapInt64(_data); +} +#else +__DARWIN_OS_INLINE void OSWriteSwapInt64( - volatile void * base, - uintptr_t offset, - uint64_t data + volatile void * _base, + uintptr_t _offset, + uint64_t _data ) { - *(volatile uint64_t *)((volatile uintptr_t)base + offset) = _OSSwapInt64(data); + ((struct _OSUnalignedU64 *)((uintptr_t)_base + _offset))->__val = _OSSwapInt64(_data); } +#endif #endif /* ! _OS_OSBYTEORDERARM_H */ diff --git a/libkern/libkern/c++/Makefile b/libkern/libkern/c++/Makefile index f9e09b101..fb5fcd4c1 100644 --- a/libkern/libkern/c++/Makefile +++ b/libkern/libkern/c++/Makefile @@ -7,8 +7,13 @@ include $(MakeInc_cmd) include $(MakeInc_def) DATAFILES = \ + OSAllocation.h \ OSArray.h \ OSBoolean.h \ + OSBoundedArray.h \ + OSBoundedArrayRef.h \ + OSBoundedPtr.h \ + OSBoundedPtrFwd.h \ OSCollection.h \ OSCollectionIterator.h \ OSContainers.h \ @@ -24,11 +29,18 @@ DATAFILES = \ OSObject.h \ OSOrderedSet.h \ OSPtr.h \ + OSSharedPtr.h \ + intrusive_shared_ptr.h \ OSSerialize.h \ OSSet.h \ OSString.h \ OSSymbol.h \ - OSUnserialize.h + OSUnserialize.h \ + bounded_array.h \ + bounded_array_ref.h \ + bounded_ptr.h \ + bounded_ptr_fwd.h \ + safe_allocation.h INSTALL_MI_LIST = diff --git a/libkern/libkern/c++/OSAllocation.h b/libkern/libkern/c++/OSAllocation.h new file mode 100644 index 000000000..050f05fcf --- /dev/null +++ b/libkern/libkern/c++/OSAllocation.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef XNU_LIBKERN_LIBKERN_CXX_OS_ALLOCATION_H +#define XNU_LIBKERN_LIBKERN_CXX_OS_ALLOCATION_H + +#include +#include +#include +#include // IOMalloc/IOFree +#include + +namespace os_detail { +struct IOKit_allocator { + static void* + allocate(size_t bytes) + { + return IOMalloc(bytes); + } + + static void + deallocate(void* p, size_t bytes) + { + IOFree(p, bytes); + } +}; +} // end namespace os_detail + +template +using OSAllocation = libkern::safe_allocation; + +inline constexpr auto OSAllocateMemory = libkern::allocate_memory; +inline constexpr auto OSAdoptMemory = libkern::adopt_memory; + +#endif /* !XNU_LIBKERN_LIBKERN_CXX_OS_ALLOCATION_H */ diff --git a/libkern/libkern/c++/OSArray.h b/libkern/libkern/c++/OSArray.h index 73edeb601..4e0eb72aa 100644 --- a/libkern/libkern/c++/OSArray.h +++ b/libkern/libkern/c++/OSArray.h @@ -33,11 +33,12 @@ #include #include +#include class OSSerialize; class OSArray; -typedef OSPtr OSArrayPtr; +typedef OSArray* OSArrayPtr; /*! * @header @@ -96,18 +97,21 @@ class OSArray : public OSCollection OSDeclareDefaultStructors(OSArray); + typedef OSTaggedPtr ArrayPtrType; + typedef OSTaggedSharedPtr ArraySharedPtrType; + #if APPLE_KEXT_ALIGN_CONTAINERS protected: unsigned int count; unsigned int capacity; unsigned int capacityIncrement; - OSCollectionTaggedPtr *array; + ArrayPtrType * OS_PTRAUTH_SIGNED_PTR("OSArray.array") array; #else /* APPLE_KEXT_ALIGN_CONTAINERS */ protected: - OSCollectionTaggedPtr *array; + ArrayPtrType * OS_PTRAUTH_SIGNED_PTR("OSArray.array") array; unsigned int count; unsigned int capacity; unsigned int capacityIncrement; @@ -144,7 +148,7 @@ public: * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, * for which the initial capacity is a hard limit). */ - static OSArrayPtr withCapacity(unsigned int capacity); + static OSPtr withCapacity(unsigned int capacity); /*! @@ -172,7 +176,7 @@ public: * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, * for which the initial capacity is a hard limit). */ - static OSArrayPtr withObjects( + static OSPtr withObjects( const OSObject * objects[], unsigned int count, unsigned int capacity = 0); @@ -210,7 +214,7 @@ public: * for storage in the new OSArray, * not copied. */ - static OSArrayPtr withArray( + static OSPtr withArray( const OSArray * array, unsigned int capacity = 0); @@ -460,6 +464,8 @@ public: */ virtual bool setObject(const OSMetaClassBase * anObject); + bool setObject(OSSharedPtr const& anObject); + /*! * @function setObject @@ -498,6 +504,10 @@ public: unsigned int index, const OSMetaClassBase * anObject); + bool setObject( + unsigned int index, + OSSharedPtr const& anObject); + /*! * @function merge @@ -535,6 +545,10 @@ public: unsigned int index, const OSMetaClassBase * anObject); + void replaceObject( + unsigned int index, + OSSharedPtr const& anObject); + /*! * @function removeObject @@ -727,7 +741,7 @@ public: * Objects that are not derived from OSCollection are retained * rather than copied. */ - OSCollectionPtr copyCollection(OSDictionary * cycleDict = NULL) APPLE_KEXT_OVERRIDE; + OSPtr copyCollection(OSDictionary * cycleDict = NULL) APPLE_KEXT_OVERRIDE; OSMetaClassDeclareReservedUnused(OSArray, 0); OSMetaClassDeclareReservedUnused(OSArray, 1); diff --git a/libkern/libkern/c++/OSBoolean.h b/libkern/libkern/c++/OSBoolean.h index 67e3b840b..4c4d21a71 100644 --- a/libkern/libkern/c++/OSBoolean.h +++ b/libkern/libkern/c++/OSBoolean.h @@ -36,7 +36,7 @@ class OSString; class OSBoolean; -typedef OSPtr OSBooleanPtr; +typedef OSBoolean* OSBooleanPtr; /*! * @header @@ -106,7 +106,7 @@ public: * @link kOSBooleanFalse kOSBooleanFalse@/link, * so that you can always use pointer comparison with OSBoolean objects. */ - static OSBooleanPtr withBoolean(bool value); + static OSPtr withBoolean(bool value); /*! * @function free diff --git a/osfmk/corecrypto/ccsha2/src/ccdigest_internal.h b/libkern/libkern/c++/OSBoundedArray.h similarity index 72% rename from osfmk/corecrypto/ccsha2/src/ccdigest_internal.h rename to libkern/libkern/c++/OSBoundedArray.h index f055084b0..ecbf08b6e 100644 --- a/osfmk/corecrypto/ccsha2/src/ccdigest_internal.h +++ b/libkern/libkern/c++/OSBoundedArray.h @@ -1,11 +1,5 @@ /* - * ccdigest_internal.h - * corecrypto - * - * Created on 12/20/2017 - * - * Copyright (c) 2017 Apple Inc. All rights reserved. - * + * Copyright (c) 2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -32,14 +26,14 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _CORECRYPTO_CCDIGEST_INTERNAL_H_ -#define _CORECRYPTO_CCDIGEST_INTERNAL_H_ +#ifndef XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_ARRAY_H +#define XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_ARRAY_H -#include +#include +#include +#include -void ccdigest_final_64be(const struct ccdigest_info *di, ccdigest_ctx_t, - unsigned char *digest); -void ccdigest_final_64le(const struct ccdigest_info *di, ccdigest_ctx_t, - unsigned char *digest); +template +using OSBoundedArray = libkern::bounded_array; -#endif /* _CORECRYPTO_CCDIGEST_INTERNAL_H_ */ +#endif /* !XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_ARRAY_H */ diff --git a/libkern/libkern/c++/OSBoundedArrayRef.h b/libkern/libkern/c++/OSBoundedArrayRef.h new file mode 100644 index 000000000..7886193a2 --- /dev/null +++ b/libkern/libkern/c++/OSBoundedArrayRef.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_ARRAY_REF_H +#define XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_ARRAY_REF_H + +#include +#include + +template +using OSBoundedArrayRef = libkern::bounded_array_ref; + +#endif /* !XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_ARRAY_REF_H */ diff --git a/osfmk/kern/processor_data.c b/libkern/libkern/c++/OSBoundedPtr.h similarity index 64% rename from osfmk/kern/processor_data.c rename to libkern/libkern/c++/OSBoundedPtr.h index b658db17f..263087459 100644 --- a/osfmk/kern/processor_data.c +++ b/libkern/libkern/c++/OSBoundedPtr.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003-2008 Apple Inc. All rights reserved. + * Copyright (c) 2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -25,32 +25,23 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* - * Machine independent per processor data. - */ -#include +#ifndef XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_PTR_H +#define XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_PTR_H -#include -#include #include +#include -void -processor_data_init( - processor_t processor) -{ - (void)memset(&processor->processor_data, 0, sizeof(processor_data_t)); - - timer_init(&PROCESSOR_DATA(processor, idle_state)); - timer_init(&PROCESSOR_DATA(processor, system_state)); - timer_init(&PROCESSOR_DATA(processor, user_state)); - - PROCESSOR_DATA(processor, debugger_state).db_current_op = DBOP_NONE; +namespace os_detail { +struct panic_trapping_policy { + [[noreturn]] static void + trap(char const* message) + { + panic("%s", message); + } +}; } -boolean_t -processor_in_panic_context( - processor_t processor) -{ - return PROCESSOR_DATA(processor, debugger_state).db_entry_count > 0; -} +// OSBoundedPtr alias is defined in the fwd decl header + +#endif /* !XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_PTR_H */ diff --git a/libkern/libkern/c++/OSBoundedPtrFwd.h b/libkern/libkern/c++/OSBoundedPtrFwd.h new file mode 100644 index 000000000..f11d14f76 --- /dev/null +++ b/libkern/libkern/c++/OSBoundedPtrFwd.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_PTR_FWD_H +#define XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_PTR_FWD_H + +#include +#include + +namespace os_detail { +struct panic_trapping_policy; +} + +template +using OSBoundedPtr = libkern::bounded_ptr; + +#endif /* !XNU_LIBKERN_LIBKERN_CXX_OS_BOUNDED_PTR_FWD_H */ diff --git a/libkern/libkern/c++/OSCPPDebug.h b/libkern/libkern/c++/OSCPPDebug.h index 11e45a375..79dd42a15 100644 --- a/libkern/libkern/c++/OSCPPDebug.h +++ b/libkern/libkern/c++/OSCPPDebug.h @@ -37,21 +37,15 @@ __BEGIN_DECLS -// xx-review: Do we want to document these? +extern size_t debug_malloc_size; +extern size_t debug_iomalloc_size; +extern size_t debug_container_malloc_size; +extern size_t debug_ivars_size; -// xx-review: exported in IOKit.kext -extern int debug_malloc_size; -extern int debug_iomalloc_size; -extern int debug_container_malloc_size; - -// xx-review: exported in Libkern.kext -extern int debug_ivars_size; - -// xx-review: exported in IOKit.kext void OSPrintMemory(void); __END_DECLS -#endif +#endif /* OSCPP_DEBUG */ #endif /* _OSCPPDEBUG_H */ diff --git a/libkern/libkern/c++/OSCollection.h b/libkern/libkern/c++/OSCollection.h index 67ec1f771..a4bb58def 100644 --- a/libkern/libkern/c++/OSCollection.h +++ b/libkern/libkern/c++/OSCollection.h @@ -36,10 +36,17 @@ class OSDictionary; class OSCollection; -typedef OSPtr OSCollectionPtr; +typedef OSCollection* OSCollectionPtr; + +// We're not necessarily in C++11 mode, so we need to disable warnings +// for C++11 extensions +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wc++11-extensions" template -using OSCollectionTaggedPtr = OSTaggedPtr; +using OSCollectionTaggedPtr = T *; + +#pragma clang diagnostic pop /*! @@ -426,7 +433,6 @@ public: unsigned options, unsigned mask, void * context = NULL); - OSMetaClassDeclareReservedUsed(OSCollection, 0); /*! * @function copyCollection @@ -452,8 +458,7 @@ public: * Subclasses of OSCollection must override this function * to properly support deep copies. */ - virtual OSCollectionPtr copyCollection(OSDictionary * cycleDict = NULL); - OSMetaClassDeclareReservedUsed(OSCollection, 1); + virtual OSPtr copyCollection(OSDictionary * cycleDict = NULL); /*! * @function iterateObjects @@ -496,6 +501,8 @@ public: #endif /* __BLOCKS__ */ + OSMetaClassDeclareReservedUsedX86(OSCollection, 0); + OSMetaClassDeclareReservedUsedX86(OSCollection, 1); OSMetaClassDeclareReservedUnused(OSCollection, 2); OSMetaClassDeclareReservedUnused(OSCollection, 3); OSMetaClassDeclareReservedUnused(OSCollection, 4); diff --git a/libkern/libkern/c++/OSCollectionIterator.h b/libkern/libkern/c++/OSCollectionIterator.h index eb57231d9..d5f203154 100644 --- a/libkern/libkern/c++/OSCollectionIterator.h +++ b/libkern/libkern/c++/OSCollectionIterator.h @@ -36,7 +36,7 @@ class OSCollectionIterator; -typedef OSPtr OSCollectionIteratorPtr; +typedef OSCollectionIterator* OSCollectionIteratorPtr; /*! * @header @@ -63,7 +63,7 @@ typedef OSPtr OSCollectionIteratorPtr; * * @textblock *
- *     OSCollectionIterator * iterator =
+ *     OSPtr  iterator =
  *         OSCollectionIterator::withCollection(myCollection);
  *     OSObject * object;
  *     while (object = iterator->getNextObject()) {
@@ -73,7 +73,7 @@ typedef OSPtr OSCollectionIteratorPtr;
  *     if (!iterator->isValid()) {
  *         // report that collection changed during iteration
  *     }
- *     iterator->release();
+ *     iterator = nullptr;
  * 
* @/textblock * @@ -116,7 +116,7 @@ public: * @result * A new instance of OSCollectionIterator, or NULL on failure. */ - static OSCollectionIteratorPtr withCollection(const OSCollection * inColl); + static OSPtr withCollection(const OSCollection * inColl); /*! diff --git a/libkern/libkern/c++/OSData.h b/libkern/libkern/c++/OSData.h index ed473487a..6e7ce8799 100644 --- a/libkern/libkern/c++/OSData.h +++ b/libkern/libkern/c++/OSData.h @@ -33,12 +33,13 @@ #include #include +#include class OSData; class OSString; -typedef OSPtr OSDataPtr; -typedef OSPtr OSDataConstPtr; +typedef OSData* OSDataPtr; +typedef OSData const* OSDataConstPtr; /*! * @header @@ -88,12 +89,12 @@ protected: unsigned int length; unsigned int capacity; unsigned int capacityIncrement; - void * data; + void * OS_PTRAUTH_SIGNED_PTR("OSData.data") data; #else /* APPLE_KEXT_ALIGN_CONTAINERS */ protected: - void * data; + void * OS_PTRAUTH_SIGNED_PTR("OSData.data") data; unsigned int length; unsigned int capacity; unsigned int capacityIncrement; @@ -140,7 +141,7 @@ public: * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, * for which a nonzero initial capacity is a hard limit). */ - static OSDataPtr withCapacity(unsigned int capacity); + static OSPtr withCapacity(unsigned int capacity); /*! @@ -163,7 +164,7 @@ public: * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, * for which a nonzero initial capacity is a hard limit). */ - static OSDataPtr withBytes( + static OSPtr withBytes( const void * bytes, unsigned int numBytes); @@ -196,7 +197,7 @@ public: * but you can get the byte pointer and * modify bytes within the shared buffer. */ - static OSDataPtr withBytesNoCopy( + static OSPtr withBytesNoCopy( void * bytes, unsigned int numBytes); @@ -220,7 +221,7 @@ public: * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, * for which a nonzero initial capacity is a hard limit). */ - static OSDataPtr withData(const OSData * inData); + static OSPtr withData(const OSData * inData); /*! @@ -245,7 +246,7 @@ public: * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, * for which a nonzero initial capacity is a hard limit). */ - static OSDataPtr withData( + static OSPtr withData( const OSData * inData, unsigned int start, unsigned int numBytes); @@ -754,10 +755,10 @@ public: private: #endif virtual void setDeallocFunction(DeallocFunction func); - OSMetaClassDeclareReservedUsed(OSData, 0); bool isSerializable(void); private: + OSMetaClassDeclareReservedUsedX86(OSData, 0); OSMetaClassDeclareReservedUnused(OSData, 1); OSMetaClassDeclareReservedUnused(OSData, 2); OSMetaClassDeclareReservedUnused(OSData, 3); diff --git a/libkern/libkern/c++/OSDictionary.h b/libkern/libkern/c++/OSDictionary.h index 98d258153..61c8b79b1 100644 --- a/libkern/libkern/c++/OSDictionary.h +++ b/libkern/libkern/c++/OSDictionary.h @@ -40,13 +40,14 @@ #include #include #include +#include class OSArray; class OSSymbol; class OSString; class OSDictionary; -typedef OSPtr OSDictionaryPtr; +typedef OSDictionary* OSDictionaryPtr; /*! * @header @@ -128,25 +129,25 @@ protected: unsigned int capacity; unsigned int capacityIncrement; struct dictEntry { - OSCollectionTaggedPtr key; - OSCollectionTaggedPtr value; + OSTaggedPtr key; + OSTaggedPtr value; #if XNU_KERNEL_PRIVATE static int compare(const void *, const void *); #endif }; - dictEntry * dictionary; + dictEntry * OS_PTRAUTH_SIGNED_PTR("OSDictionary.dictionary") dictionary; #else /* APPLE_KEXT_ALIGN_CONTAINERS */ protected: struct dictEntry { - OSCollectionTaggedPtr key; - OSCollectionTaggedPtr value; + OSTaggedPtr key; + OSTaggedPtr value; #if XNU_KERNEL_PRIVATE static int compare(const void *, const void *); #endif }; - dictEntry * dictionary; + dictEntry * OS_PTRAUTH_SIGNED_PTR("OSDictionary.dictionary") dictionary; unsigned int count; unsigned int capacity; unsigned int capacityIncrement; @@ -184,7 +185,7 @@ public: * (unlike @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, * for which the initial capacity is a hard limit). */ - static OSDictionaryPtr withCapacity(unsigned int capacity); + static OSPtr withCapacity(unsigned int capacity); /*! @@ -219,7 +220,7 @@ public: * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, * for which the initial capacity is a hard limit). */ - static OSDictionaryPtr withObjects( + static OSPtr withObjects( const OSObject * objects[], const OSSymbol * keys[], unsigned int count, @@ -257,7 +258,7 @@ public: * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, * for which the initial capacity is a hard limit). */ - static OSDictionaryPtr withObjects( + static OSPtr withObjects( const OSObject * objects[], const OSString * keys[], unsigned int count, @@ -298,7 +299,7 @@ public: * in the new OSDictionary, * not copied. */ - static OSDictionaryPtr withDictionary( + static OSPtr withDictionary( const OSDictionary * dict, unsigned int capacity = 0); @@ -603,6 +604,10 @@ public: const OSSymbol * aKey, const OSMetaClassBase * anObject); + bool setObject( + OSSharedPtr const& aKey, + OSSharedPtr const& anObject); + /*! * @function setObject @@ -626,6 +631,10 @@ public: const OSString * aKey, const OSMetaClassBase * anObject); + bool setObject( + const OSString * aKey, + OSSharedPtr const& anObject); + /*! * @function setObject @@ -650,6 +659,10 @@ public: const char * aKey, const OSMetaClassBase * anObject); + bool setObject( + const char * aKey, + OSSharedPtr const& anObject); + /*! * @function removeObject @@ -929,12 +942,12 @@ public: * Objects that are not derived from OSCollection are retained * rather than copied. */ - OSCollectionPtr copyCollection(OSDictionary * cycleDict = NULL) APPLE_KEXT_OVERRIDE; + OSPtr copyCollection(OSDictionary * cycleDict = NULL) APPLE_KEXT_OVERRIDE; #if XNU_KERNEL_PRIVATE bool setObject(const OSSymbol *aKey, const OSMetaClassBase *anObject, bool onlyAdd); void sortBySymbol(void); - OSArrayPtr copyKeys(void); + OSPtr copyKeys(void); #endif /* XNU_KERNEL_PRIVATE */ diff --git a/libkern/libkern/c++/OSKext.h b/libkern/libkern/c++/OSKext.h index 942788f0c..aff038533 100644 --- a/libkern/libkern/c++/OSKext.h +++ b/libkern/libkern/c++/OSKext.h @@ -41,10 +41,12 @@ extern "C" { #endif /* XNU_KERNEL_PRIVATE */ } + #include #include #include #include + #include #include @@ -71,8 +73,6 @@ void kxld_log_callback( *********************************************************************/ class OSKext; -typedef OSPtr OSKextPtr; - extern "C" { void OSKextLog( OSKext * aKext, @@ -95,6 +95,12 @@ kern_return_t OSRuntimeFinalizeCPP( OSKext * kext); void OSRuntimeUnloadCPPForSegment( kernel_segment_command_t * segment); +void +OSRuntimeSignStructors( + kernel_mach_header_t * header); +void +OSRuntimeSignStructorsInFileset( + kernel_mach_header_t * fileset_header); kern_return_t is_io_catalog_send_data( mach_port_t masterPort, @@ -148,6 +154,22 @@ struct OSKextActiveAccount { }; typedef struct OSKextActiveAccount OSKextActiveAccount; +class OSKextSavedMutableSegment : public OSObject { + OSDeclareDefaultStructors(OSKextSavedMutableSegment); +public: + static OSPtr withSegment(kernel_segment_command_t *seg); + OSReturn restoreContents(kernel_segment_command_t *seg); + vm_offset_t getVMAddr() const; + vm_size_t getVMSize() const; + virtual void free(void) APPLE_KEXT_OVERRIDE; +private: + bool initWithSegment(kernel_segment_command_t *seg); + kernel_segment_command_t *savedSegment; + vm_offset_t vmaddr; + vm_size_t vmsize; + void * data; +}; + #endif /* XNU_KERNEL_PRIVATE */ /* @@ -237,12 +259,12 @@ private: /************************* * Instance variables *************************/ - OSDictionaryPtr infoDict; + OSPtr infoDict; - OSSymbolConstPtr bundleID; - OSStringPtr path; // not necessarily correct :-/ - OSStringPtr executableRelPath;// relative to bundle - OSStringPtr userExecutableRelPath;// relative to bundle + OSPtr bundleID; + OSPtr path; // not necessarily correct :-/ + OSPtr executableRelPath;// relative to bundle + OSPtr userExecutableRelPath;// relative to bundle OSKextVersion version; // parsed OSKextVersion compatibleVersion;// parsed @@ -254,19 +276,19 @@ private: // kOSKextInvalidLoadTag invalid kmod_info_t * kmod_info; // address into linkedExec./alloced for interface - OSArrayPtr dependencies; // kernel resource does not have any; - // links directly to kernel + OSPtr dependencies; // kernel resource does not have any; + // links directly to kernel /* Only real kexts have these; interface kexts do not. */ - OSDataPtr linkedExecutable; - OSSetPtr metaClasses; // for C++/OSMetaClass kexts + OSPtr linkedExecutable; + OSPtr metaClasses; // for C++/OSMetaClass kexts /* Only interface kexts have these; non-interface kexts can get at them * in the linked Executable. */ - OSDataPtr interfaceUUID; - OSDataPtr driverKitUUID; + OSPtr interfaceUUID; + OSPtr driverKitUUID; struct { unsigned int loggingEnabled:1; @@ -284,20 +306,25 @@ private: unsigned int started:1; unsigned int stopping:1; unsigned int unloading:1; + unsigned int resetSegmentsFromVnode:1; + unsigned int requireExplicitLoad:1; unsigned int autounloadEnabled:1; unsigned int delayAutounload:1; // for development unsigned int CPPInitialized:1; unsigned int jettisonLinkeditSeg:1; + unsigned int resetSegmentsFromImmutableCopy:1; } flags; uint32_t matchingRefCount; + kc_kind_t kc_type; struct list_head pendingPgoHead; uuid_t instance_uuid; OSKextAccount * account; uint32_t builtinKmodIdx; + OSPtr savedMutableSegments; #if PRAGMA_MARK /**************************************/ @@ -310,9 +337,11 @@ private: */ public: static void initialize(void); - static OSDictionaryPtr copyKexts(void); + static OSPtr copyKexts(void); static OSReturn removeKextBootstrap(void); static void willShutdown(void);// called by IOPMrootDomain on shutdown + static void willUserspaceReboot(void); + static void resetAfterUserspaceReboot(void); static void reportOSMetaClassInstances( const char * kextIdentifier, OSKextLogSpec msgLogSpec); @@ -335,28 +364,26 @@ private: /* Instance life cycle. */ - static OSKextPtr withBooterData( + static OSPtr withBooterData( OSString * deviceTreeName, OSData * booterData); virtual bool initWithBooterData( OSString * deviceTreeName, OSData * booterData); - static OSKextPtr withPrelinkedInfoDict( + static OSPtr withPrelinkedInfoDict( OSDictionary * infoDict, - bool doCoalesedSlides); + bool doCoalesedSlides, kc_kind_t type); virtual bool initWithPrelinkedInfoDict( OSDictionary * infoDict, - bool doCoalesedSlides); + bool doCoalesedSlides, kc_kind_t type); - static void setAllVMAttributes(void); + static OSSharedPtr withCodelessInfo( + OSDictionary * infoDict); + virtual bool initWithCodelessInfo( + OSDictionary * infoDict); - static OSKextPtr withMkext2Info( - OSDictionary * anInfoDict, - OSData * mkextData); - virtual bool initWithMkext2Info( - OSDictionary * anInfoDict, - OSData * mkextData); + static void setAllVMAttributes(void); virtual bool setInfoDictionaryAndPath( OSDictionary * aDictionary, @@ -374,9 +401,18 @@ private: bool terminateServicesAndRemovePersonalitiesFlag = false); virtual bool isInExcludeList(void); + virtual bool isLoadable(void); /* Mkexts. */ +#if CONFIG_KXLD + static OSPtr withMkext2Info( + OSDictionary * anInfoDict, + OSData * mkextData); + virtual bool initWithMkext2Info( + OSDictionary * anInfoDict, + OSData * mkextData); + static OSReturn readMkextArchive( OSData * mkextData, uint32_t * checksumPtr = NULL); @@ -384,15 +420,22 @@ private: OSData * mkextData, OSDictionary ** mkextPlistOut, uint32_t * checksumPtr = NULL); - virtual OSData * createMkext2FileEntry( + + static OSReturn readMkext2Archive( + OSData * mkextData, + OSSharedPtr &mkextPlistOut, + uint32_t * checksumPtr = NULL); + + virtual OSPtr createMkext2FileEntry( OSData * mkextData, OSNumber * offsetNum, const char * entryName); - virtual OSDataPtr extractMkext2FileData( + virtual OSPtr extractMkext2FileData( UInt8 * data, const char * name, uint32_t compressedSize, uint32_t fullSize); +#endif // CONFIG_KXLD /* Dependencies. */ @@ -420,6 +463,9 @@ private: uint32_t * responseLengthOut, char ** logInfoOut, uint32_t * logInfoLengthOut); + static OSReturn loadCodelessKext( + OSString * kextIdentifier, + OSDictionary * requestDict); static OSReturn serializeLogInfo( OSArray * logInfoArray, char ** logInfoOut, @@ -427,6 +473,18 @@ private: /* Loading. */ + static bool addKextsFromKextCollection(kernel_mach_header_t *mh, + OSDictionary *infoDict, const char *text_seg_name, + OSData **kcUUID, kc_kind_t type); + + static bool addKextsFromKextCollection(kernel_mach_header_t *mh, + OSDictionary *infoDict, const char *text_seg_name, + OSSharedPtr &kcUUID, kc_kind_t type); + + static bool registerDeferredKextCollection(kernel_mach_header_t *mh, + OSSharedPtr &parsedXML, kc_kind_t type); + static OSSharedPtr consumeDeferredKextCollection(kc_kind_t type); + virtual OSReturn load( OSKextExcludeLevel startOpt = kOSKextExcludeNone, OSKextExcludeLevel startMatchingOpt = kOSKextExcludeAll, @@ -461,7 +519,7 @@ private: virtual OSReturn validateKextMapping(bool startFlag); virtual boolean_t verifySegmentMapping(kernel_segment_command_t *seg); - static OSArrayPtr copyAllKextPersonalities( + static OSPtr copyAllKextPersonalities( bool filterSafeBootFlag = false); static void setPrelinkedPersonalities(OSArray * personalitiesArray); @@ -480,25 +538,28 @@ private: /* Sync with user space. */ - static OSReturn pingKextd(void); + static OSReturn pingIOKitDaemon(void); /* Getting info about loaded kexts (kextstat). */ - static OSDictionaryPtr copyLoadedKextInfo( + static OSPtr copyLoadedKextInfo( OSArray * kextIdentifiers = NULL, OSArray * keys = NULL); - static OSDictionaryPtr copyLoadedKextInfoByUUID( + static OSPtr copyLoadedKextInfoByUUID( OSArray * kextIdentifiers = NULL, OSArray * keys = NULL); - static OSDataPtr copyKextUUIDForAddress(OSNumber *address = NULL); - virtual OSDictionaryPtr copyInfo(OSArray * keys = NULL); + static OSPtr copyKextCollectionInfo( + OSDictionary *requestDict, + OSArray *infoKeys = NULL); + static OSPtr copyKextUUIDForAddress(OSNumber *address = NULL); + virtual OSPtr copyInfo(OSArray * keys = NULL); /* Logging to user space. */ static OSKextLogSpec setUserSpaceLogFilter( OSKextLogSpec userLogSpec, bool captureFlag = false); - static OSArrayPtr clearUserSpaceLogFilter(void); + static OSPtr clearUserSpaceLogFilter(void); static OSKextLogSpec getUserSpaceLogFilter(void); /* OSMetaClasses defined by kext. @@ -516,14 +577,64 @@ private: /* Resource requests and other callback stuff. */ + static OSReturn loadFileSetKexts(OSDictionary * requestDict); + + static OSReturn loadKCFileSet(const char *filepath, kc_kind_t type); + +#if defined(__x86_64__) || defined(__i386__) + static OSReturn mapKCFileSet( + void *control, + vm_size_t fsize, + kernel_mach_header_t **mh, + off_t file_offset, + uintptr_t *slide, + bool pageable, + void *map_entry_buffer); + static OSReturn protectKCFileSet( + kernel_mach_header_t *mh, + kc_kind_t type); + static OSReturn mapKCTextSegment( + void *control, + kernel_mach_header_t **mhp, + off_t file_offset, + uintptr_t *slide, + void *map_entry_list); + static void freeKCFileSetcontrol(void); + OSReturn resetKCFileSetSegments(void); + static void jettisonFileSetLinkeditSegment(kernel_mach_header_t *mh); +#endif //(__x86_64__) || defined(__i386__) + + static OSReturn validateKCFileSetUUID( + OSDictionary *infoDict, + kc_kind_t type); + + static OSReturn validateKCUUIDfromPrelinkInfo( + uuid_t *loaded_kcuuid, + kc_kind_t type, + OSDictionary *infoDict, + const char *uuid_key); + static OSReturn dispatchResource(OSDictionary * requestDict); + static OSReturn setMissingAuxKCBundles(OSDictionary * requestDict); + + static OSReturn setAuxKCBundleAvailable(OSString *kextIdentifier, + OSDictionary *requestDict); + static OSReturn dequeueCallbackForRequestTag( OSKextRequestTag requestTag, LIBKERN_RETURNS_RETAINED OSDictionary ** callbackRecordOut); static OSReturn dequeueCallbackForRequestTag( OSNumber * requestTagNum, LIBKERN_RETURNS_RETAINED OSDictionary ** callbackRecordOut); + + static OSReturn dequeueCallbackForRequestTag( + OSKextRequestTag requestTag, + OSSharedPtr &callbackRecordOut); + static OSReturn dequeueCallbackForRequestTag( + OSNumber * requestTagNum, + OSSharedPtr &callbackRecordOut); + static void invokeRequestCallback( OSDictionary * callbackRecord, OSReturn requestResult); @@ -531,6 +642,7 @@ private: OSReturn callbackResult, bool invokeFlag = true); virtual uint32_t countRequestCallbacks(void); + OSReturn resetMutableSegments(void); /* panic() support. */ @@ -546,6 +658,7 @@ public: int (* printf_func)(const char *fmt, ...), uint32_t flags); bool isDriverKit(void); + bool isInFileset(void); private: static OSKextLoadedKextSummary *summaryForAddress(const uintptr_t addr); static void *kextForAddress(const void *addr); @@ -589,11 +702,11 @@ public: #endif public: // caller must release - static OSKextPtr lookupKextWithIdentifier(const char * kextIdentifier); - static OSKextPtr lookupKextWithIdentifier(OSString * kextIdentifier); - static OSKextPtr lookupKextWithLoadTag(OSKextLoadTag aTag); - static OSKextPtr lookupKextWithAddress(vm_address_t address); - static OSKextPtr lookupKextWithUUID(uuid_t uuid); + static OSPtr lookupKextWithIdentifier(const char * kextIdentifier); + static OSPtr lookupKextWithIdentifier(OSString * kextIdentifier); + static OSPtr lookupKextWithLoadTag(OSKextLoadTag aTag); + static OSPtr lookupKextWithAddress(vm_address_t address); + static OSPtr lookupKextWithUUID(uuid_t uuid); kernel_section_t *lookupSection(const char *segname, const char*secname); @@ -616,9 +729,22 @@ public: OSKextExcludeLevel startMatchingOpt = kOSKextExcludeAll, OSArray * personalityNames = NULL); + static OSReturn loadKextWithIdentifier( + OSString * kextIdentifier, + OSSharedPtr &kextRef, + Boolean allowDeferFlag = true, + Boolean delayAutounloadFlag = false, + OSKextExcludeLevel startOpt = kOSKextExcludeNone, + OSKextExcludeLevel startMatchingOpt = kOSKextExcludeAll, + OSArray * personalityNames = NULL); + + static OSReturn loadKextFromKC(OSKext *theKext, OSDictionary *requestDict); + static void dropMatchingReferences( OSSet * kexts); + bool hasDependency(const OSSymbol * depID); + static OSReturn removeKextWithIdentifier( const char * kextIdentifier, bool terminateServicesAndRemovePersonalitiesFlag = false); @@ -628,7 +754,15 @@ public: static OSReturn requestDaemonLaunch( OSString * kextIdentifier, OSString * serverName, - OSNumber * serverTag); + OSNumber * serverTag, + class IOUserServerCheckInToken ** checkInToken); + + static OSReturn requestDaemonLaunch( + OSString * kextIdentifier, + OSString * serverName, + OSNumber * serverTag, + OSSharedPtr &checkInToken); + static OSReturn requestResource( const char * kextIdentifier, const char * resourceName, @@ -641,7 +775,7 @@ public: static void considerUnloads(Boolean rescheduleOnlyFlag = false); static void flushNonloadedKexts(Boolean flushPrelinkedKexts); - static void setKextdActive(Boolean active = true); + static void setIOKitDaemonActive(bool active = true); static void setDeferredLoadSucceeded(Boolean succeeded = true); static void considerRebuildOfPrelinkedKernel(void); static void createExcludeListFromBooterData( @@ -650,7 +784,7 @@ public: static void createExcludeListFromPrelinkInfo(OSArray * theInfoArray); static boolean_t updateExcludeList(OSDictionary * infoDict); - static bool isWaitingKextd(void); + static bool pendingIOKitDaemonRequests(void); virtual bool setAutounloadEnabled(bool flag); @@ -664,10 +798,10 @@ public: virtual OSKextLoadTag getLoadTag(void); virtual void getSizeInfo(uint32_t *loadSize, uint32_t *wiredSize); - virtual OSDataPtr copyUUID(void); - OSDataPtr copyTextUUID(void); - OSDataPtr copyMachoUUID(const kernel_mach_header_t * header); - virtual OSArrayPtr copyPersonalitiesArray(void); + virtual OSPtr copyUUID(void); + OSPtr copyTextUUID(void); + OSPtr copyMachoUUID(const kernel_mach_header_t * header); + virtual OSPtr copyPersonalitiesArray(void); static bool copyUserExecutablePath(const OSSymbol * bundleID, char * pathResult, size_t pathSize); virtual void setDriverKitUUID(OSData *uuid); /* This removes personalities naming the kext (by CFBundleIdentifier), @@ -678,6 +812,9 @@ public: /* Converts common string-valued properties to OSSymbols for lower memory consumption. */ static void uniquePersonalityProperties(OSDictionary * personalityDict); +#ifdef XNU_KERNEL_PRIVATE + static void uniquePersonalityProperties(OSDictionary * personalityDict, bool defaultAddKernelBundleIdentifier); +#endif virtual bool declaresExecutable(void); // might be missing virtual bool isInterface(void); @@ -689,7 +826,25 @@ public: virtual bool isLoaded(void); virtual bool isStarted(void); virtual bool isCPPInitialized(void); + + const char * + getKCTypeString(void) + { + switch (kc_type) { + case KCKindPrimary: + return kKCTypePrimary; + case KCKindPageable: + return kKCTypeSystem; + case KCKindAuxiliary: + return kKCTypeAuxiliary; + case KCKindNone: + return kKCTypeCodeless; + default: + return "??"; + } + } }; +extern "C" void OSKextResetAfterUserspaceReboot(void); #endif /* !_LIBKERN_OSKEXT_H */ diff --git a/libkern/libkern/c++/OSLib.h b/libkern/libkern/c++/OSLib.h index 358b2ad53..b5ea4371b 100644 --- a/libkern/libkern/c++/OSLib.h +++ b/libkern/libkern/c++/OSLib.h @@ -49,23 +49,29 @@ __END_DECLS #if XNU_KERNEL_PRIVATE #include +#include #define kalloc_container(size) \ - ({ kalloc_tag_bt(size, VM_KERN_MEMORY_LIBKERN); }) + kalloc_tag_bt(size, VM_KERN_MEMORY_LIBKERN) + +#define kalloc_data_container(size, flags) \ + kheap_alloc_tag_bt(KHEAP_DATA_BUFFERS, size, flags, VM_KERN_MEMORY_LIBKERN) + +#define kfree_data_container(buffer, size) \ + kheap_free(KHEAP_DATA_BUFFERS, buffer, size) #define kallocp_container(size) \ - ({ kallocp_tag_bt(size, VM_KERN_MEMORY_LIBKERN); }) + kallocp_tag_bt(size, VM_KERN_MEMORY_LIBKERN) #if OSALLOCDEBUG -extern "C" int debug_container_malloc_size; -extern "C" int debug_ivars_size; + #if IOTRACKING -#define OSCONTAINER_ACCUMSIZE(s) do { OSAddAtomic((SInt32)(s), &debug_container_malloc_size); trackingAccumSize(s); } while(0) +#define OSCONTAINER_ACCUMSIZE(s) do { OSAddAtomicLong((s), &debug_container_malloc_size); trackingAccumSize(s); } while(0) #else -#define OSCONTAINER_ACCUMSIZE(s) do { OSAddAtomic((SInt32)(s), &debug_container_malloc_size); } while(0) +#define OSCONTAINER_ACCUMSIZE(s) do { OSAddAtomicLong((s), &debug_container_malloc_size); } while(0) #endif -#define OSMETA_ACCUMSIZE(s) do { OSAddAtomic((SInt32)(s), &debug_container_malloc_size); } while(0) -#define OSIVAR_ACCUMSIZE(s) do { OSAddAtomic((SInt32)(s), &debug_ivars_size); } while(0) +#define OSMETA_ACCUMSIZE(s) do { OSAddAtomicLong((s), &debug_container_malloc_size); } while(0) +#define OSIVAR_ACCUMSIZE(s) do { OSAddAtomicLong((s), &debug_ivars_size); } while(0) #else /* OSALLOCDEBUG */ diff --git a/libkern/libkern/c++/OSMetaClass.h b/libkern/libkern/c++/OSMetaClass.h index b9688fd51..e83f4bac0 100644 --- a/libkern/libkern/c++/OSMetaClass.h +++ b/libkern/libkern/c++/OSMetaClass.h @@ -33,6 +33,10 @@ #include #include #include +#ifdef KERNEL_PRIVATE +#include +#include +#endif /* KERNEL_PRIVATE */ /* * LIBKERN_ macros below can be used to describe the ownership semantics @@ -158,26 +162,41 @@ class OSInterface #ifdef XNU_KERNEL_PRIVATE -#ifdef CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX #define APPLE_KEXT_VTABLE_PADDING 0 -#else /* CONFIG_EMBEDDED */ -/*! @parseOnly */ +#else /* !XNU_TARGET_OS_OSX */ #define APPLE_KEXT_VTABLE_PADDING 1 -#endif /* CONFIG_EMBEDDED */ +#endif /* !XNU_TARGET_OS_OSX */ #else /* XNU_KERNEL_PRIVATE */ -#include -#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) +/* No xnu-private defines outside of xnu */ + +#include +#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR #define APPLE_KEXT_VTABLE_PADDING 0 -#else /* (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) */ -/*! @parseOnly */ +#else /* TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR */ #define APPLE_KEXT_VTABLE_PADDING 1 -#endif /* (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) */ +#endif /* TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR */ #endif /* XNU_KERNEL_PRIVATE */ +#ifdef XNU_KERNEL_PRIVATE +#if XNU_TARGET_OS_OSX && defined(__arm64__) +#define APPLE_KEXT_ALIGN_CONTAINERS 1 +#else /* XNU_TARGET_OS_OSX && defined(__arm64__) */ #define APPLE_KEXT_ALIGN_CONTAINERS (0 == APPLE_KEXT_VTABLE_PADDING) +#endif /* XNU_TARGET_OS_OSX && defined(__arm64__) */ + +#else /* XNU_KERNEL_PRIVATE */ + +#if TARGET_OS_OSX && defined(__arm64__) +#define APPLE_KEXT_ALIGN_CONTAINERS 1 +#else /* TARGET_OS_OSX && defined(__arm64__) */ +#define APPLE_KEXT_ALIGN_CONTAINERS (0 == APPLE_KEXT_VTABLE_PADDING) +#endif /* TARGET_OS_OSX && defined(__arm64__) */ + +#endif /* XNU_KERNEL_PRIVATE */ #if defined(__LP64__) /*! @parseOnly */ @@ -332,7 +351,7 @@ public: * @link //apple_ref/cpp/macro/OSCheckTypeInst OSCheckTypeInst@/link. */ #define OSTypeID(type) (type::metaClass) -#define OSMTypeID(type) ((OSMetaClass *) type::metaClass) +#define OSMTypeID(type) (const_cast(type::metaClass)) /*! @@ -448,7 +467,7 @@ public: #if defined(__arm__) || defined(__arm64__) - static _ptf_t _ptmf2ptf(const OSMetaClassBase * self, void (OSMetaClassBase::*func)(void), uintptr_t typeDisc); + static _ptf_t _ptmf2ptf(const OSMetaClassBase * self, void (OSMetaClassBase::*func)(void)); #elif defined(__i386__) || defined(__x86_64__) @@ -457,8 +476,7 @@ public: // ABI static inline _ptf_t - _ptmf2ptf(const OSMetaClassBase *self, void (OSMetaClassBase::*func)(void), - uintptr_t typeDisc __attribute__((unused))) + _ptmf2ptf(const OSMetaClassBase *self, void (OSMetaClassBase::*func)(void)) { union { void (OSMetaClassBase::*fIn)(void); @@ -503,6 +521,10 @@ public: * you wish to cache. * @param func The pointer to the member function itself, * something like &Class::function. + * It should be an explicit member function pointer constant, + * rather than a variable. + * Don't pass a NULL member function pointer. + * Instead, directly use a NULL function pointer. * * @result * A pointer to a function of the given type referencing self. @@ -516,10 +538,16 @@ public: * This function will panic if an attempt is made to call it * with a multiply-inheriting class. */ +#if __has_builtin(__builtin_load_member_function_pointer) +#define OSMemberFunctionCast(cptrtype, self, func) \ + ((cptrtype) __builtin_load_member_function_pointer(*self, func) ? : \ + (cptrtype) OSMetaClassBase:: \ + _ptmf2ptf(self, (void (OSMetaClassBase::*)(void)) func)) +#else #define OSMemberFunctionCast(cptrtype, self, func) \ (cptrtype) OSMetaClassBase:: \ - _ptmf2ptf(self, (void (OSMetaClassBase::*)(void)) func, \ - ptrauth_type_discriminator(__typeof__(func))) + _ptmf2ptf(self, (void (OSMetaClassBase::*)(void)) func) +#endif protected: OSMetaClassBase(); @@ -906,11 +934,17 @@ public: private: #if APPLE_KEXT_VTABLE_PADDING // Virtual Padding +#if defined(__arm64__) || defined(__arm__) + virtual void _RESERVEDOSMetaClassBase0(); + virtual void _RESERVEDOSMetaClassBase1(); + virtual void _RESERVEDOSMetaClassBase2(); + virtual void _RESERVEDOSMetaClassBase3(); +#endif /* defined(__arm64__) || defined(__arm__) */ virtual void _RESERVEDOSMetaClassBase4(); virtual void _RESERVEDOSMetaClassBase5(); virtual void _RESERVEDOSMetaClassBase6(); virtual void _RESERVEDOSMetaClassBase7(); -#endif +#endif /* APPLE_KEXT_VTABLE_PADDING */ } APPLE_KEXT_COMPATIBILITY; @@ -1255,6 +1289,37 @@ protected: const OSMetaClass * superclass, unsigned int classSize); +#ifdef KERNEL_PRIVATE +/*! + * @function OSMetaClass + * + * @abstract + * Constructor for OSMetaClass objects. + * + * @param className A C string naming the C++ class + * that this OSMetaClass represents. + * @param superclass The OSMetaClass object representing the superclass + * of this metaclass's class. + * @param classSize The allocation size of the represented C++ class. + * @param zone Pointer to return the created zone. + * @param zone_name Name of zone to create + * @param zflags Zone creation flags + * + * @discussion + * This constructor is protected and cannot be used + * to instantiate OSMetaClass directly, as OSMetaClass is an abstract class. + * This function is called during kext loading + * to queue C++ classes for registration. + * See @link preModLoad preModLoad@/link and + * @link postModLoad postModLoad@/link. + */ + OSMetaClass(const char * className, + const OSMetaClass * superclass, + unsigned int classSize, + zone_t * zone, + const char * zone_name, + zone_create_flags_t zflags); +#endif /*! * @function ~OSMetaClass @@ -1777,19 +1842,24 @@ public: static const OSMetaClass * const superClass; \ public: \ static const OSMetaClass * const metaClass; \ - static class MetaClass : public OSMetaClass { \ - public: \ - MetaClass(); \ - virtual OSObject *alloc() const APPLE_KEXT_OVERRIDE;\ - _OS_ADD_METAMETHODS(dispatch); \ - } gMetaClass; \ - friend class className ::MetaClass; \ + static class MetaClass : public OSMetaClass { \ + public: \ + MetaClass(); \ + virtual OSObject *alloc() const APPLE_KEXT_OVERRIDE; \ + _OS_ADD_METAMETHODS(dispatch); \ + } gMetaClass; \ + friend class className ::MetaClass; \ virtual const OSMetaClass * getMetaClass() const APPLE_KEXT_OVERRIDE; \ protected: \ className (const OSMetaClass *); \ virtual ~ className () APPLE_KEXT_OVERRIDE; \ _OS_ADD_METHODS(className, dispatch) +#define _OS_ADD_OPERATOR_PROTO \ + public: \ + static void *operator new(size_t size); \ + protected: \ + static void operator delete(void *mem, size_t size); /*! * @define OSDeclareDefaultStructors @@ -1807,18 +1877,18 @@ public: * immediately after the opening brace in a class declaration. * It leaves the current privacy state as protected:. */ -#define _OSDeclareDefaultStructors(className, dispatch) \ - OSDeclareCommonStructors(className, dispatch); \ - public: \ - className (void); \ +#define _OSDeclareDefaultStructors(className, dispatch) \ + OSDeclareCommonStructors(className, dispatch); \ + public: \ + className (void); \ + _OS_ADD_OPERATOR_PROTO \ protected: +#define OSDeclareDefaultStructors(className) \ + _OSDeclareDefaultStructors(className, ) -#define OSDeclareDefaultStructors(className) \ -_OSDeclareDefaultStructors(className, ) - -#define OSDeclareDefaultStructorsWithDispatch(className) \ -_OSDeclareDefaultStructors(className, dispatch) +#define OSDeclareDefaultStructorsWithDispatch(className) \ + _OSDeclareDefaultStructors(className, dispatch) /*! @@ -1838,17 +1908,25 @@ _OSDeclareDefaultStructors(className, dispatch) * immediately after the opening brace in a class declaration. * It leaves the current privacy state as protected:. */ -#define _OSDeclareAbstractStructors(className, dispatch) \ - OSDeclareCommonStructors(className, dispatch); \ - private: \ - className (void); /* Make primary constructor private in abstract */ \ - protected: +#define _OSDeclareAbstractStructors(className, dispatch) \ + OSDeclareCommonStructors(className, dispatch) \ + private: \ + /* Make primary constructor private in abstract */ \ + className (void); \ + protected: \ + +#define OSDeclareAbstractStructors(className) \ + _OSDeclareAbstractStructors(className, ) \ + _OS_ADD_OPERATOR_PROTO -#define OSDeclareAbstractStructors(className) \ -_OSDeclareAbstractStructors(className, ) +#define OSDeclareAbstractStructorsWithDispatch(className) \ + _OSDeclareAbstractStructors(className, dispatch) \ + _OS_ADD_OPERATOR_PROTO + +#define OSDeclareAbstractStructorsWithDispatchAndNoOperators( \ + className) \ + _OSDeclareAbstractStructors(className, dispatch) -#define OSDeclareAbstractStructorsWithDispatch(className) \ -_OSDeclareAbstractStructors(className, dispatch) /*! * @define OSDeclareFinalStructors @@ -1875,18 +1953,17 @@ _OSDeclareAbstractStructors(className, dispatch) * Warning: Changing a class from "Default" to "Final" will break * binary compatibility. */ -#define _OSDeclareFinalStructors(className, dispatch) \ - _OSDeclareDefaultStructors(className, dispatch) \ - private: \ - void __OSFinalClass(void); \ +#define _OSDeclareFinalStructors(className, dispatch) \ + _OSDeclareDefaultStructors(className, dispatch) \ + private: \ + void __OSFinalClass(void); \ protected: +#define OSDeclareFinalStructors(className) \ + _OSDeclareFinalStructors(className, ) -#define OSDeclareFinalStructors(className) \ -_OSDeclareFinalStructors(className, ) - -#define OSDeclareFinalStructorsWithDispatch(className) \ -_OSDeclareFinalStructors(className, dispatch) +#define OSDeclareFinalStructorsWithDispatch(className) \ + _OSDeclareFinalStructors(className, dispatch) /* Not to be included in headerdoc. @@ -1905,24 +1982,56 @@ _OSDeclareFinalStructors(className, dispatch) * not a string or macro. * @param init A function to call in the constructor * of the class's OSMetaClass. - */ -#define OSDefineMetaClassWithInit(className, superclassName, init) \ - /* Class global data */ \ - className ::MetaClass className ::gMetaClass; \ - const OSMetaClass * const className ::metaClass = \ - & className ::gMetaClass; \ - const OSMetaClass * const className ::superClass = \ - & superclassName ::gMetaClass; \ - /* Class member functions */ \ - className :: className(const OSMetaClass *meta) \ - : superclassName (meta) { } \ - className ::~ className() { } \ - const OSMetaClass * className ::getMetaClass() const \ - { return &gMetaClass; } \ - /* The ::MetaClass constructor */ \ - className ::MetaClass::MetaClass() \ - : OSMetaClass(#className, className::superClass, sizeof(className)) \ + * + * @discussion + * Note: Needs to be followed by + * OSMetaClassConstructorInit or + * OSMetaClassConstructorInitWithZone for initialization + * of class's OSMetaClass constructor. + */ +#define OSMetaClassConstructorInit(className, superclassName, \ + init) \ + /* The ::MetaClass constructor */ \ + className ::MetaClass::MetaClass() \ + : OSMetaClass(#className, className::superClass, \ + sizeof(className)) \ + { init; } + +#ifdef XNU_KERNEL_PRIVATE +#define declareZone(className) \ + static SECURITY_READ_ONLY_LATE(zone_t) className ## _zone; +#elif KERNEL_PRIVATE /* XNU_KERNEL_PRIVATE */ +#define declareZone(className) \ + static zone_t className ## _zone; +#endif /* KERNEL_PRIVATE */ + +#ifdef KERNEL_PRIVATE +#define OSMetaClassConstructorInitWithZone(className, \ + superclassName, init, zflags) \ + declareZone(className) \ + /* The ::MetaClass constructor */ \ + className ::MetaClass::MetaClass() \ + : OSMetaClass(#className, className::superClass, \ + sizeof(className), \ + &(className ## _zone), \ + "iokit." #className, zflags) \ { init; } +#endif /* KERNEL_PRIVATE */ + +#define OSDefineMetaClassWithInit(className, superclassName, \ + init) \ + /* Class global data */ \ + className ::MetaClass className ::gMetaClass; \ + const OSMetaClass * const className ::metaClass = \ + & className ::gMetaClass; \ + const OSMetaClass * const className ::superClass = \ + & superclassName ::gMetaClass; \ + /* Class member functions */ \ + className :: className(const OSMetaClass *meta) \ + : superclassName (meta) { } \ + className ::~ className() { } \ + const OSMetaClass * className ::getMetaClass() const \ + { return &gMetaClass; } /* Not to be included in headerdoc. @@ -1940,7 +2049,7 @@ _OSDeclareFinalStructors(className, dispatch) * as a raw token, * not a string or macro. */ -#define OSDefineAbstractStructors(className, superclassName) \ +#define OSDefineAbstractStructors(className, superclassName) \ OSObject * className ::MetaClass::alloc() const { return NULL; } @@ -1959,11 +2068,49 @@ _OSDeclareFinalStructors(className, dispatch) * as a raw token, * not a string or macro. */ +#define OSDefineBasicStructors(className, superclassName) \ + OSObject * className ::MetaClass::alloc() const \ + { return new className; } \ + className :: className () : superclassName (&gMetaClass) \ + { gMetaClass.instanceConstructed(); } + +#define OSDefineOperatorMethods(className) \ + void * className::operator new(size_t size) \ + { return OSObject::operator new(size); } \ + void className::operator delete(void *mem, size_t size) \ + { return OSObject::operator delete(mem, size); } + +#ifdef KERNEL_PRIVATE +#define OSDefineOperatorMethodsWithZone(className) \ + void * className :: operator new(size_t size) { \ + if(className ## _zone) { \ + return zalloc_flags(className ## _zone, \ + (zalloc_flags_t) (Z_WAITOK | Z_ZERO));\ + } else { \ + /* + * kIOTracking is on, disabling zones + * for iokit objects + */ \ + return OSObject::operator new(size); \ + } \ + } \ + void className :: operator delete(void *mem, size_t size) { \ + if(className ## _zone) { \ + kern_os_zfree(className ## _zone, mem, size); \ + } else { \ + /* + * kIOTracking is on, disabling zones + * for iokit objects + */ \ + return OSObject::operator delete(mem, size); \ + } \ + } +#endif /* KERNEL_PRIVATE */ + #define OSDefineDefaultStructors(className, superclassName) \ - OSObject * className ::MetaClass::alloc() const \ - { return new className; } \ - className :: className () : superclassName (&gMetaClass) \ - { gMetaClass.instanceConstructed(); } + OSDefineBasicStructors(className, superclassName) \ + OSDefineOperatorMethods(className) + /* Not to be included in headerdoc. * @@ -1980,8 +2127,8 @@ _OSDeclareFinalStructors(className, dispatch) * as a raw token, * not a string or macro. */ -#define OSDefineFinalStructors(className, superclassName) \ - OSDefineDefaultStructors(className, superclassName) \ +#define OSDefineFinalStructors(className, superclassName) \ + OSDefineBasicStructors(className, superclassName) \ void className ::__OSFinalClass(void) { } @@ -2002,10 +2149,46 @@ _OSDeclareFinalStructors(className, dispatch) * @param init A function to call in the constructor * of the class's OSMetaClass. */ -#define OSDefineMetaClassAndStructorsWithInit(className, superclassName, init) \ - OSDefineMetaClassWithInit(className, superclassName, init) \ - OSDefineDefaultStructors(className, superclassName) +#define OSDefineMetaClassAndStructorsWithInit(className, \ + superclassName, init) \ + OSDefineMetaClassWithInit(className, superclassName, init) \ + OSMetaClassConstructorInit(className, superclassName, init) \ + OSDefineDefaultStructors(className, superclassName) +#ifdef KERNEL_PRIVATE +/* Not to be included in headerdoc. + * + * @define OSDefineMetaClassAndStructorsWithInitWithZone + * @hidecontents + * + * @abstract + * Helper macro for for the standard metaclass-registration macros. + * DO NOT USE. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * @param init A function to call in the constructor + * of the class's OSMetaClass. + * @param zflags Zone creation flags. + * + * @discussion + * In addition to what + * OSDefineMetaClassAndStructorsWithInit does this + * macro implements operator new and delete to use zalloc rather + * than kalloc. Objects of this class get will reside in their + * own zone rather than share VA with other objects. + */ +#define OSDefineMetaClassAndStructorsWithInitAndZone(className, \ + superclassName, init, zflags) \ + OSDefineMetaClassWithInit(className, superclassName, init) \ + OSMetaClassConstructorInitWithZone(className, \ + superclassName, init, zflags) \ + OSDefineBasicStructors(className, superclassName) \ + OSDefineOperatorMethodsWithZone(className) +#endif /* KERNEL_PRIVATE */ /* Not to be included in headerdoc. * @@ -2024,9 +2207,12 @@ _OSDeclareFinalStructors(className, dispatch) * @param init A function to call in the constructor * of the class's OSMetaClass. */ -#define OSDefineMetaClassAndAbstractStructorsWithInit(className, superclassName, init) \ - OSDefineMetaClassWithInit(className, superclassName, init) \ - OSDefineAbstractStructors(className, superclassName) +#define OSDefineMetaClassAndAbstractStructorsWithInit( \ + className, superclassName, init) \ + OSDefineMetaClassWithInit(className, superclassName, init) \ + OSMetaClassConstructorInit(className, superclassName, init) \ + OSDefineAbstractStructors(className, superclassName) \ + OSDefineOperatorMethods(className) /* Not to be included in headerdoc. @@ -2046,10 +2232,47 @@ _OSDeclareFinalStructors(className, dispatch) * @param init A function to call in the constructor * of the class's OSMetaClass. */ -#define OSDefineMetaClassAndFinalStructorsWithInit(className, superclassName, init) \ - OSDefineMetaClassWithInit(className, superclassName, init) \ - OSDefineFinalStructors(className, superclassName) +#define OSDefineMetaClassAndFinalStructorsWithInit(className, \ + superclassName, init) \ + OSDefineMetaClassWithInit(className, superclassName, init) \ + OSMetaClassConstructorInit(className, superclassName, init) \ + OSDefineFinalStructors(className, superclassName) \ + OSDefineOperatorMethods(className) +#ifdef KERNEL_PRIVATE +/* Not to be included in headerdoc. + * + * @define OSDefineMetaClassAndFinalStructorsWithInitAndZone + * @hidecontents + * + * @abstract + * Helper macro for for the standard metaclass-registration macros. + * DO NOT USE. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * @param init A function to call in the constructor + * of the class's OSMetaClass. + * @param zflags Zone creation flags. + * + * @discussion + * In addition to what + * does this + * macro implements operator new and delete to use zalloc rather + * than kalloc. Objects of this class get will reside in their + * own zone rather than share VA with other objects. + */ +#define OSDefineMetaClassAndFinalStructorsWithInitAndZone( \ + className, superclassName, init, zflags) \ + OSDefineMetaClassWithInit(className, superclassName, init) \ + OSMetaClassConstructorInitWithZone(className, \ + superclassName, init, zflags) \ + OSDefineFinalStructors(className, superclassName) \ + OSDefineOperatorMethodsWithZone(className) +#endif /* Helpers */ @@ -2071,7 +2294,9 @@ _OSDeclareFinalStructors(className, dispatch) * of the class's OSMetaClass. */ #define OSDefineMetaClass(className, superclassName) \ - OSDefineMetaClassWithInit(className, superclassName, ) + OSDefineMetaClassWithInit(className, superclassName, ) \ + OSMetaClassConstructorInit(className, superclassName, ) \ + OSDefineOperatorMethods(className) /*! @@ -2093,9 +2318,38 @@ _OSDeclareFinalStructors(className, dispatch) * at the beginning of their implementation files, * before any function implementations for the class. */ -#define OSDefineMetaClassAndStructors(className, superclassName) \ - OSDefineMetaClassAndStructorsWithInit(className, superclassName, ) +#define OSDefineMetaClassAndStructors(className, superclassName) \ + OSDefineMetaClassAndStructorsWithInit(className, \ + superclassName, ) +#ifdef KERNEL_PRIVATE +/*! + * @define OSDefineMetaClassAndStructorsWithZone + * @hidecontents + * + * @abstract + * Defines an OSMetaClass and associated routines + * for a concrete Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * @param zflags Zone creation flags. + * + * @discussion + * In addition to what + * does this + * macro implements operator new and delete to use zalloc rather + * than kalloc. Objects of this class get will reside in their + * own zone rather than share VA with other objects. + */ +#define OSDefineMetaClassAndStructorsWithZone(className, \ + superclassName, zflags) \ + OSDefineMetaClassAndStructorsWithInitAndZone(className, \ + superclassName, , zflags) +#endif /*! * @define OSDefineMetaClassAndAbstractStructors @@ -2117,8 +2371,10 @@ _OSDeclareFinalStructors(className, dispatch) * at the beginning of their implementation files, * before any function implementations for the class. */ -#define OSDefineMetaClassAndAbstractStructors(className, superclassName) \ - OSDefineMetaClassAndAbstractStructorsWithInit (className, superclassName, ) +#define OSDefineMetaClassAndAbstractStructors(className, \ + superclassName) \ + OSDefineMetaClassAndAbstractStructorsWithInit (className, \ + superclassName, ) /*! @@ -2150,8 +2406,39 @@ _OSDeclareFinalStructors(className, dispatch) * Warning: Changing a class from "Default" to "Final" will break * binary compatibility. */ -#define OSDefineMetaClassAndFinalStructors(className, superclassName) \ - OSDefineMetaClassAndFinalStructorsWithInit(className, superclassName, ) +#define OSDefineMetaClassAndFinalStructors(className, \ + superclassName) \ + OSDefineMetaClassAndFinalStructorsWithInit(className, \ + superclassName, ) + +#ifdef KERNEL_PRIVATE +/*! + * @define OSDefineMetaClassAndFinalStructorsWithZone + * @hidecontents + * + * @abstract + * Defines an OSMetaClass and associated routines + * for concrete Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * @param zflags Zone creation flags. + * + * @discussion + * In addition to what + * OSDefineMetaClassAndFinalStructors does this + * macro implements operator new and delete to use zalloc rather + * than kalloc. Objects of this class get will reside in their + * own zone rather than share VA with other objects. + */ +#define OSDefineMetaClassAndFinalStructorsWithZone(className, \ + superclassName, zflags) \ + OSDefineMetaClassAndFinalStructorsWithInitAndZone( \ + className, superclassName, , zflags) +#endif /* KERNEL_PRIVATE */ // Dynamic vtable patchup support routines and types @@ -2190,8 +2477,7 @@ _OSDeclareFinalStructors(className, dispatch) * OSMetaClassDeclareReservedUsed@/link. */ #if APPLE_KEXT_VTABLE_PADDING -#define OSMetaClassDeclareReservedUnused(className, index) \ - private: \ +#define OSMetaClassDeclareReservedUnused(className, index) \ virtual void _RESERVED ## className ## index () #else #define OSMetaClassDeclareReservedUnused(className, index) @@ -2219,6 +2505,7 @@ _OSDeclareFinalStructors(className, dispatch) * OSMetaClassDeclareReservedUnused@/link. */ #define OSMetaClassDeclareReservedUsed(className, index) +#define OSMetaClassDeclareReservedUsedARM(className, x86index, armindex) /*! @@ -2284,6 +2571,20 @@ void className ::_RESERVED ## className ## index () \ * OSMetaClassDefineReservedUnused@/link. */ #define OSMetaClassDefineReservedUsed(className, index) +#define OSMetaClassDefineReservedUsedARM(className, x86index, armindex) + +/* + * OSMetaClassDeclareReservedUsedX86 needs to be placed with the unused vtable + * slots since it will unused on arm targets. + */ +#if defined(__arm64__) || defined(__arm__) +#define OSMetaClassDeclareReservedUsedX86 OSMetaClassDeclareReservedUnused +#define OSMetaClassDefineReservedUsedX86 OSMetaClassDefineReservedUnused +#else +#define OSMetaClassDeclareReservedUsedX86 OSMetaClassDeclareReservedUsed +#define OSMetaClassDefineReservedUsedX86 OSMetaClassDefineReservedUsed + +#endif // I/O Kit debug internal routines. static void printInstanceCounts(); diff --git a/libkern/libkern/c++/OSNumber.h b/libkern/libkern/c++/OSNumber.h index 34a9472d5..3ac99810f 100644 --- a/libkern/libkern/c++/OSNumber.h +++ b/libkern/libkern/c++/OSNumber.h @@ -31,8 +31,8 @@ #ifndef _OS_OSNUMBER_H #define _OS_OSNUMBER_H -#include #include +#include /*! * @header @@ -43,7 +43,7 @@ class OSNumber; -typedef OSPtr OSNumberPtr; +typedef OSNumber* OSNumberPtr; /*! * @class OSNumber @@ -122,7 +122,7 @@ public: * and @link addValue addValue@/link, * but you can't change the bit size. */ - static OSNumberPtr withNumber( + static OSPtr withNumber( unsigned long long value, unsigned int numberOfBits); @@ -157,7 +157,7 @@ public: * and @link addValue addValue@/link, * but you can't change the bit size. */ - static OSNumberPtr withNumber( + static OSPtr withNumber( const char * valueString, unsigned int numberOfBits); diff --git a/libkern/libkern/c++/OSObject.h b/libkern/libkern/c++/OSObject.h index d75fad273..145ae8559 100644 --- a/libkern/libkern/c++/OSObject.h +++ b/libkern/libkern/c++/OSObject.h @@ -35,9 +35,9 @@ #define _LIBKERN_OSOBJECT_H #include +#include #include #include -#include #if defined(__clang__) #pragma clang diagnostic ignored "-Woverloaded-virtual" @@ -47,7 +47,7 @@ class OSSymbol; class OSString; class OSObject; -typedef OSPtr OSObjectPtr; +typedef OSObject* OSObjectPtr; /*! @@ -173,17 +173,12 @@ typedef OSPtr OSObjectPtr; */ class OSObject : public OSMetaClassBase { - OSDeclareAbstractStructorsWithDispatch(OSObject); + OSDeclareAbstractStructorsWithDispatchAndNoOperators(OSObject); #if IOKITSTATS friend class IOStatistics; #endif -#ifdef LIBKERN_SMART_POINTERS - template - friend class os::smart_ptr; -#endif - private: /* Not to be included in headerdoc. * @@ -309,7 +304,13 @@ protected: * release@/link * instead. */ +#ifdef XNU_KERNEL_PRIVATE + static void operator delete(void * mem, size_t size) + __XNU_INTERNAL(OSObject_operator_delete); +#else static void operator delete(void * mem, size_t size); +#endif + // XXX: eventually we can flip this switch //#ifdef LIBKERN_SMART_POINTERS @@ -330,7 +331,12 @@ public: * @result * A pointer to block of memory if available, NULL otherwise. */ +#ifdef XNU_KERNEL_PRIVATE + static void * operator new(size_t size) + __XNU_INTERNAL(OSObject_operator_new); +#else static void * operator new(size_t size); +#endif public: diff --git a/libkern/libkern/c++/OSOrderedSet.h b/libkern/libkern/c++/OSOrderedSet.h index dc1a61d20..cb70a4276 100644 --- a/libkern/libkern/c++/OSOrderedSet.h +++ b/libkern/libkern/c++/OSOrderedSet.h @@ -36,7 +36,7 @@ class OSOffset; class OSOrderedSet; -typedef OSPtr OSOrderedSetPtr; +typedef OSOrderedSet* OSOrderedSetPtr; /*! * @header @@ -184,7 +184,7 @@ public: * See * @link getOrderingRef getOrderingRef@/link. */ - static OSOrderedSetPtr withCapacity( + static OSPtr withCapacity( unsigned int capacity, OSOrderFunction orderFunc = NULL, void * orderingContext = NULL); @@ -409,6 +409,8 @@ public: */ virtual bool setObject(const OSMetaClassBase * anObject); + bool setObject(OSSharedPtr const& anObject); + /*! * @function setFirstObject @@ -443,6 +445,8 @@ public: */ virtual bool setFirstObject(const OSMetaClassBase * anObject); + bool setFirstObject(OSSharedPtr const& anObject); + /*! * @function setLastObject @@ -477,6 +481,8 @@ public: */ virtual bool setLastObject(const OSMetaClassBase * anObject); + bool setLastObject(OSSharedPtr const& anObject); + /*! * @function removeObject @@ -492,6 +498,8 @@ public: */ virtual void removeObject(const OSMetaClassBase * anObject); + void removeObject(OSSharedPtr const& anObject); + /*! * @function containsObject @@ -632,6 +640,10 @@ public: unsigned int index, const OSMetaClassBase * anObject); + bool setObject( + unsigned int index, + OSSharedPtr const& anObject); + /*! * @function getObject @@ -757,7 +769,7 @@ public: * Objects that are not derived from OSCollection are retained * rather than copied. */ - OSCollectionPtr copyCollection(OSDictionary * cycleDict = NULL) APPLE_KEXT_OVERRIDE; + OSPtr copyCollection(OSDictionary * cycleDict = NULL) APPLE_KEXT_OVERRIDE; OSMetaClassDeclareReservedUnused(OSOrderedSet, 0); OSMetaClassDeclareReservedUnused(OSOrderedSet, 1); diff --git a/libkern/libkern/c++/OSPtr.h b/libkern/libkern/c++/OSPtr.h index fb2dc9704..a3c645ee3 100644 --- a/libkern/libkern/c++/OSPtr.h +++ b/libkern/libkern/c++/OSPtr.h @@ -1,145 +1,116 @@ -#ifndef _OS_OBJECT_PTR_H -#define _OS_OBJECT_PTR_H +// +// Copyright (c) 2019 Apple, Inc. All rights reserved. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_START@ +// +// This file contains Original Code and/or Modifications of Original Code +// as defined in and that are subject to the Apple Public Source License +// Version 2.0 (the 'License'). You may not use this file except in +// compliance with the License. The rights granted to you under the License +// may not be used to create, or enable the creation or redistribution of, +// unlawful or unlicensed copies of an Apple operating system, or to +// circumvent, violate, or enable the circumvention or violation of, any +// terms of an Apple operating system software license agreement. +// +// Please obtain a copy of the License at +// http://www.opensource.apple.com/apsl/ and read it before using this file. +// +// The Original Code and all software distributed under the License are +// distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +// INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +// Please see the License for the specific language governing rights and +// limitations under the License. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_END@ +// + +#ifndef XNU_LIBKERN_LIBKERN_CXX_OS_PTR_H +#define XNU_LIBKERN_LIBKERN_CXX_OS_PTR_H + +// +// The declarations in this file are a transition tool from raw pointers to +// the new OSSharedPtr class. +// +// Basically, code in headers that wants to be able to vend both a raw pointer +// and a shared pointer interface should use `OSPtr` instead of `T*`. +// Then, users that want to opt into using `OSSharedPtr` can define the +// `IOKIT_ENABLE_SHARED_PTR` macro in their translation unit (.cpp file), +// and `OSPtr` will suddenly be `OSSharedPtr`. +// +// When the `IOKIT_ENABLE_SHARED_PTR` macro is not enabled, however, `OSPtr` +// will simply be `T*`, so that clients that do not wish to migrate to smart +// pointers don't need to. +// +// Note that defining `IOKIT_ENABLE_SHARED_PTR` requires C++17, because the +// implementation of `OSSharedPtr` requires that. +// + +#if !defined(PRIVATE) // only ask to opt-in explicitly for third-party developers +# if defined(IOKIT_ENABLE_SHARED_PTR) +# if !defined(IOKIT_ENABLE_EXPERIMENTAL_SHARED_PTR_IN_API) +# error It seems that you have defined IOKIT_ENABLE_SHARED_PTR to \ + ask IOKit to return shared pointers from many of its API \ + functions. This is great! However, please note that we may \ + transition more IOKit APIs to shared pointers in the future, \ + so if you enable IOKIT_ENABLE_SHARED_PTR right now, your \ + code may fail to compile with future versions of IOKit \ + (which would return shared pointers where you expect raw \ + pointers). If you are OK with that, please define the \ + IOKIT_ENABLE_EXPERIMENTAL_SHARED_PTR_IN_API macro to \ + silence this error. If that is not acceptable, please hold \ + off on enabling shared pointers in IOKit APIs until we have \ + committed to API stability for it. +# endif +# endif +#endif -#include -#include +#if defined(IOKIT_ENABLE_SHARED_PTR) -#if KERNEL -# include +#if __cplusplus < 201703L +#error "Your code must compile with C++17 or later to adopt shared pointers. Use Xcode's 'C++ Language Dialect' setting, or on clang's command-line use -std=gnu++17" #endif -#ifdef LIBKERN_SMART_POINTERS - -/* - * OSObject pointers (OSPtr) - */ - -struct osobject_policy { - static void - retain(const OSMetaClassBase *obj) - { - obj->retain(); - } - static void - release(const OSMetaClassBase *obj) - { - obj->release(); - } - template static T * - alloc() - { - return OSTypeAlloc(T); - } - template static To * - dyn_cast(From *p) - { - return OSDynamicCast(To, p); - } -}; - -template -using OSPtr = os::smart_ptr; - -/* - * Tagged OSObject pointers (OSTaggedPtr) - */ - -template -struct osobject_tagged_policy { - static void - retain(const OSMetaClassBase *obj) - { - obj->taggedRetain(OSTypeID(Tag)); - } - static void - release(const OSMetaClassBase *obj) - { - obj->taggedRelease(OSTypeID(Tag)); - } - template static T * - alloc() - { - return OSTypeAlloc(T); - } - template static To * - dyn_cast(From *p) - { - return OSDynamicCast(To, p); - } -}; - -template -using OSTaggedPtr = os::smart_ptr >; - -/* - * Dynamic cast - */ - -template -os::smart_ptr -OSDynamicCastPtr(os::smart_ptr const &from) -{ - return from.template dynamic_pointer_cast(); -} - -template -os::smart_ptr -OSDynamicCastPtr(os::smart_ptr &&from) -{ - return os::move(from).template dynamic_pointer_cast(); -} - -/* - * Creation helpers - */ - -template -os::smart_ptr -OSNewObject() -{ - return os::smart_ptr::alloc(); -} - -template -os::smart_ptr -OSMakePtr(T *&p) -{ - return os::smart_ptr(p); -} - -template -os::smart_ptr -OSMakePtr(T *&&p) -{ - return os::smart_ptr(os::move(p)); -} - -template -os::smart_ptr -OSMakePtr(T *&&p, bool retain) -{ - return os::smart_ptr(os::move(p), retain); -} - -template -static inline T ** -OSOutPtr(os::smart_ptr *p) -{ - if (p == nullptr) { - return nullptr; - } else { - return p->get_for_out_param(); - } -} - -#else /* LIBKERN_SMART_POINTERS */ - -/* Fall back to the smart pointer types just being a simple pointer */ -template +#include + +template +using OSPtr = OSSharedPtr; + +class OSCollection; // Forward declare only because OSCollection.h needs OSPtr.h + +template +using OSTaggedPtr = OSTaggedSharedPtr; + +#else + +template +class __attribute__((trivial_abi)) OSSharedPtr; + +template +class __attribute__((trivial_abi)) OSTaggedSharedPtr; + +// We're not necessarily in C++11 mode, so we need to disable warnings +// for C++11 extensions +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wc++11-extensions" + +template using OSPtr = T *; -template +template using OSTaggedPtr = T *; -#endif /* LIBKERN_SMART_POINTERS */ -#endif /* _OS_OBJECT_PTR_H */ +#pragma clang diagnostic pop + +#endif + +// Allow C++98 code to use nullptr. +// +// This isn't the right place to put this, however the old OSPtr.h header +// had it and some code has now started relying on nullptr being defined. +#if !__has_feature(cxx_nullptr) && !defined(nullptr) +# define nullptr NULL +#endif + +#endif // !XNU_LIBKERN_LIBKERN_CXX_OS_PTR_H diff --git a/libkern/libkern/c++/OSSerialize.h b/libkern/libkern/c++/OSSerialize.h index 758162584..b9ba18bf9 100644 --- a/libkern/libkern/c++/OSSerialize.h +++ b/libkern/libkern/c++/OSSerialize.h @@ -31,6 +31,7 @@ #define _OS_OSSERIALIZE_H #include +#include class OSCollection; class OSSet; @@ -39,10 +40,10 @@ class OSArray; class OSData; class OSSerializer; -typedef OSPtr OSSerializerPtr; +typedef OSSerializer* OSSerializerPtr; class OSSerialize; -typedef OSPtr OSSerializePtr; +typedef OSSerialize* OSSerializePtr; /*! * @header @@ -99,11 +100,11 @@ private: unsigned int capacity; // of container unsigned int capacityIncrement;// of container - OSArray * tags; // tags for all objects seen + OSPtr tags; // tags for all objects seen #ifdef XNU_KERNEL_PRIVATE public: - typedef const OSMetaClassBase * (*Editor)(void * reference, + typedef OSPtr (*Editor)(void * reference, OSSerialize * s, OSCollection * container, const OSSymbol * name, @@ -116,12 +117,12 @@ public: bool endCollection; Editor editor; void * editRef; - OSData * indexData; + OSPtr indexData; bool binarySerialize(const OSMetaClassBase *o); bool binarySerializeInternal(const OSMetaClassBase *o); bool addBinary(const void * data, size_t size); - bool addBinaryObject(const OSMetaClassBase * o, uint32_t key, const void * _bits, size_t size, + bool addBinaryObject(const OSMetaClassBase * o, uint32_t key, const void * _bits, uint32_t size, uint32_t * startCollection); void endBinaryCollection(uint32_t startCollection); @@ -143,9 +144,9 @@ public: * @discussion * The serializer will grow as needed to accommodate more data. */ - static OSSerializePtr withCapacity(unsigned int capacity); + static OSPtr withCapacity(unsigned int capacity); - static OSSerializePtr binaryWithCapacity(unsigned int inCapacity, Editor editor = NULL, void * reference = NULL); + static OSPtr binaryWithCapacity(unsigned int inCapacity, Editor editor = NULL, void * reference = NULL); void setIndexed(bool index); /*! @@ -341,13 +342,13 @@ class OSSerializer : public OSObject public: - static OSSerializerPtr forTarget( + static OSPtr forTarget( void * target, OSSerializerCallback callback, void * ref = NULL); #ifdef __BLOCKS__ - static OSSerializerPtr withBlock( + static OSPtr withBlock( OSSerializerBlock callback); #endif diff --git a/libkern/libkern/c++/OSSet.h b/libkern/libkern/c++/OSSet.h index 9c7718807..ff4c457a2 100644 --- a/libkern/libkern/c++/OSSet.h +++ b/libkern/libkern/c++/OSSet.h @@ -37,8 +37,8 @@ class OSArray; class OSSet; -typedef OSPtr OSSetPtr; -typedef OSPtr OSArrayPtr; +typedef OSSet* OSSetPtr; +typedef OSArray* OSArrayPtr; /*! * @header @@ -96,12 +96,12 @@ class OSSet : public OSCollection #if APPLE_KEXT_ALIGN_CONTAINERS private: - OSArrayPtr members; + OSPtr members; #else /* APPLE_KEXT_ALIGN_CONTAINERS */ private: - OSArrayPtr members; + OSPtr members; protected: struct ExpansionData { }; @@ -140,7 +140,7 @@ public: * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, * for which the initial capacity is a hard limit). */ - static OSSetPtr withCapacity(unsigned int capacity); + static OSPtr withCapacity(unsigned int capacity); /*! @@ -174,7 +174,7 @@ public: * The objects in objects are retained for storage in the new set, * not copied. */ - static OSSetPtr withObjects( + static OSPtr withObjects( const OSObject * objects[], unsigned int count, unsigned int capacity = 0); @@ -212,7 +212,7 @@ public: * The objects in array are retained for storage in the new set, * not copied. */ - static OSSetPtr withArray( + static OSPtr withArray( const OSArray * array, unsigned int capacity = 0); @@ -248,7 +248,7 @@ public: * The objects in set are retained for storage in the new set, * not copied. */ - static OSSetPtr withSet(const OSSet * set, + static OSPtr withSet(const OSSet * set, unsigned int capacity = 0); @@ -543,6 +543,8 @@ public: */ virtual bool setObject(const OSMetaClassBase * anObject); + bool setObject(OSSharedPtr const& anObject); + /*! * @function merge @@ -614,6 +616,8 @@ public: */ virtual void removeObject(const OSMetaClassBase * anObject); + void removeObject(OSSharedPtr const& anObject); + /*! * @function containsObject @@ -779,7 +783,7 @@ public: * Objects that are not derived from OSCollection are retained * rather than copied. */ - OSCollectionPtr copyCollection(OSDictionary *cycleDict = NULL) APPLE_KEXT_OVERRIDE; + OSPtr copyCollection(OSDictionary *cycleDict = NULL) APPLE_KEXT_OVERRIDE; OSMetaClassDeclareReservedUnused(OSSet, 0); OSMetaClassDeclareReservedUnused(OSSet, 1); diff --git a/libkern/libkern/c++/OSSharedPtr.h b/libkern/libkern/c++/OSSharedPtr.h new file mode 100644 index 000000000..f293a6d80 --- /dev/null +++ b/libkern/libkern/c++/OSSharedPtr.h @@ -0,0 +1,153 @@ +// +// Copyright (c) 2019 Apple, Inc. All rights reserved. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_START@ +// +// This file contains Original Code and/or Modifications of Original Code +// as defined in and that are subject to the Apple Public Source License +// Version 2.0 (the 'License'). You may not use this file except in +// compliance with the License. The rights granted to you under the License +// may not be used to create, or enable the creation or redistribution of, +// unlawful or unlicensed copies of an Apple operating system, or to +// circumvent, violate, or enable the circumvention or violation of, any +// terms of an Apple operating system software license agreement. +// +// Please obtain a copy of the License at +// http://www.opensource.apple.com/apsl/ and read it before using this file. +// +// The Original Code and all software distributed under the License are +// distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +// INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +// Please see the License for the specific language governing rights and +// limitations under the License. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_END@ +// + +#ifndef XNU_LIBKERN_LIBKERN_CXX_OS_SHARED_PTR_H +#define XNU_LIBKERN_LIBKERN_CXX_OS_SHARED_PTR_H + +#include +#include + +struct intrusive_osobject_retainer { + static void + retain(OSMetaClassBase const& obj) + { + obj.retain(); + } + static void + release(OSMetaClassBase const& obj) + { + obj.release(); + } +}; + +template +struct intrusive_tagged_osobject_retainer { + static void + retain(OSMetaClassBase const& obj) + { + obj.taggedRetain(OSTypeID(Tag)); + } + static void + release(OSMetaClassBase const& obj) + { + obj.taggedRelease(OSTypeID(Tag)); + } +}; + +inline constexpr auto OSNoRetain = libkern::no_retain; +inline constexpr auto OSRetain = libkern::retain; + +template +class __attribute__((trivial_abi)) OSSharedPtr: public libkern::intrusive_shared_ptr { + using libkern::intrusive_shared_ptr::intrusive_shared_ptr; +}; + +template +class __attribute__((trivial_abi)) OSTaggedSharedPtr: public libkern::intrusive_shared_ptr > { + using libkern::intrusive_shared_ptr >::intrusive_shared_ptr; +}; + +template +OSSharedPtr +OSMakeShared() +{ + T* memory = OSTypeAlloc(T); + // OSTypeAlloc returns an object with a refcount of 1, so we must not + // retain when constructing the shared pointer. + return OSSharedPtr(memory, OSNoRetain); +} + +template +OSSharedPtr +OSDynamicPtrCast(OSSharedPtr const& source) +{ + Destination* raw = OSDynamicCast(Destination, source.get()); + if (raw == nullptr) { + return nullptr; + } else { + OSSharedPtr dest(raw, OSRetain); + return dest; + } +} + +template +OSSharedPtr +OSDynamicPtrCast(OSSharedPtr && source) +{ + Destination* raw = OSDynamicCast(Destination, source.get()); + if (raw == nullptr) { + return nullptr; + } else { + OSSharedPtr dest(raw, OSNoRetain); + source.detach(); // we stole the retain! + return dest; + } +} + +template +OSTaggedSharedPtr +OSDynamicPtrCast(OSTaggedSharedPtr const& source) +{ + Destination* raw = OSDynamicCast(Destination, source.get()); + if (raw == nullptr) { + return nullptr; + } else { + OSTaggedSharedPtr dest(raw, OSRetain); + return dest; + } +} + +template +OSSharedPtr +OSStaticPtrCast(OSSharedPtr const& ptr) noexcept +{ + return OSSharedPtr(static_cast(ptr.get()), OSRetain); +} + +template +OSSharedPtr +OSStaticPtrCast(OSSharedPtr&& ptr) noexcept +{ + return OSSharedPtr(static_cast(ptr.detach()), OSNoRetain); +} + +template +OSSharedPtr +OSConstPtrCast(OSSharedPtr const& ptr) noexcept +{ + return OSSharedPtr(const_cast(ptr.get()), OSRetain); +} + +template +OSSharedPtr +OSConstPtrCast(OSSharedPtr&& ptr) noexcept +{ + return OSSharedPtr(const_cast(ptr.detach()), OSNoRetain); +} + +#endif // !XNU_LIBKERN_LIBKERN_CXX_OS_SHARED_PTR_H diff --git a/libkern/libkern/c++/OSString.h b/libkern/libkern/c++/OSString.h index 925d5a3a4..c8cd5025f 100644 --- a/libkern/libkern/c++/OSString.h +++ b/libkern/libkern/c++/OSString.h @@ -33,13 +33,13 @@ #include #include +#include class OSData; class OSString; -typedef OSPtr OSStringPtr; -typedef OSPtr OSStringConstPtr; - +typedef OSString* OSStringPtr; +typedef OSString const* OSStringConstPtr; /*! * @header @@ -117,12 +117,12 @@ protected: unsigned int flags:14, length:18; - char * string; + char * OS_PTRAUTH_SIGNED_PTR("OSString.string") string;; #else /* APPLE_KEXT_ALIGN_CONTAINERS */ protected: - char * string; + char * OS_PTRAUTH_SIGNED_PTR("OSString.string") string;; unsigned int flags; unsigned int length; @@ -150,7 +150,7 @@ public: * with the reference count incremented. * Changes to one will not be reflected in the other. */ - static OSStringPtr withString(const OSString * aString); + static OSPtr withString(const OSString * aString); /*! @@ -167,7 +167,7 @@ public: * and with a reference count of 1; * NULL on failure. */ - static OSStringPtr withCString(const char * cString); + static OSPtr withCString(const char * cString); /*! @@ -196,10 +196,10 @@ public: * An OSString object created with this function does not * allow changing the string via @link setChar setChar@/link. */ - static OSStringPtr withCStringNoCopy(const char * cString); + static OSPtr withCStringNoCopy(const char * cString); #if XNU_KERNEL_PRIVATE - static OSStringPtr withStringOfLength(const char *cString, size_t length); + static OSPtr withStringOfLength(const char *cString, size_t length); #endif /* XNU_KERNEL_PRIVATE */ /*! diff --git a/libkern/libkern/c++/OSSymbol.h b/libkern/libkern/c++/OSSymbol.h index 1ee9792b6..b55cdda4f 100644 --- a/libkern/libkern/c++/OSSymbol.h +++ b/libkern/libkern/c++/OSSymbol.h @@ -36,8 +36,8 @@ class OSSymbol; -typedef OSPtr OSSymbolPtr; -typedef OSPtr OSSymbolConstPtr; +typedef OSSymbol* OSSymbolPtr; +typedef OSSymbol const* OSSymbolConstPtr; /*! * @header @@ -251,7 +251,7 @@ public: * new OSSymbol with a retain count of 1, * or increments the retain count of the existing instance. */ - static OSSymbolConstPtr withString(const OSString * aString); + static OSPtr withString(const OSString * aString); /*! @@ -278,7 +278,7 @@ public: * new OSSymbol with a retain count of 1, * or increments the retain count of the existing instance. */ - static OSSymbolConstPtr withCString(const char * cString); + static OSPtr withCString(const char * cString); /*! @@ -308,7 +308,7 @@ public: * new OSSymbol with a retain count of 1, * or increments the retain count of the existing instance. */ - static OSSymbolConstPtr withCStringNoCopy(const char * cString); + static OSPtr withCStringNoCopy(const char * cString); /*! * @function existingSymbolForString @@ -327,7 +327,7 @@ public: * The returned OSSymbol object is returned with an incremented refcount * that needs to be released. */ - static OSSymbolConstPtr existingSymbolForString(const OSString *aString); + static OSPtr existingSymbolForString(const OSString *aString); /*! * @function existingSymbolForCString @@ -346,7 +346,7 @@ public: * The returned OSSymbol object is returned with an incremented refcount * that needs to be released. */ - static OSSymbolConstPtr existingSymbolForCString(const char *aCString); + static OSPtr existingSymbolForCString(const char *aCString); /*! * @function isEqualTo diff --git a/libkern/libkern/c++/OSUnserialize.h b/libkern/libkern/c++/OSUnserialize.h index 678e48828..a6cded44e 100644 --- a/libkern/libkern/c++/OSUnserialize.h +++ b/libkern/libkern/c++/OSUnserialize.h @@ -32,6 +32,7 @@ #include #include +#include #include #include @@ -67,9 +68,13 @@ class OSString; * @discussion * Not safe to call in a primary interrupt handler. */ -extern "C++" OSObjectPtr OSUnserializeXML( +extern "C++" OSPtr OSUnserializeXML( const char * buffer, - OSStringPtr * errorString = NULL); + OSString * * errorString = NULL); + +extern "C++" OSPtr OSUnserializeXML( + const char * buffer, + OSSharedPtr& errorString); /*! * @function OSUnserializeXML @@ -93,16 +98,27 @@ extern "C++" OSObjectPtr OSUnserializeXML( * @discussion * Not safe to call in a primary interrupt handler. */ -extern "C++" OSObjectPtr OSUnserializeXML( +extern "C++" OSPtr OSUnserializeXML( + const char * buffer, + size_t bufferSize, + OSString * *errorString = NULL); + +extern "C++" OSPtr OSUnserializeXML( const char * buffer, size_t bufferSize, - OSStringPtr *errorString = NULL); + OSSharedPtr &errorString); + +extern "C++" OSPtr +OSUnserializeBinary(const char *buffer, size_t bufferSize, OSString * *errorString); -extern "C++" OSObjectPtr -OSUnserializeBinary(const char *buffer, size_t bufferSize, OSStringPtr *errorString); +extern "C++" OSPtr +OSUnserializeBinary(const char *buffer, size_t bufferSize, OSSharedPtr& errorString); #ifdef __APPLE_API_OBSOLETE -extern OSObjectPtr OSUnserialize(const char *buffer, OSStringPtr *errorString = NULL); +extern OSPtr OSUnserialize(const char *buffer, OSString * *errorString = NULL); + +extern OSPtr OSUnserialize(const char *buffer, OSSharedPtr& errorString); + #endif /* __APPLE_API_OBSOLETE */ #endif /* _OS_OSUNSERIALIZE_H */ diff --git a/libkern/libkern/c++/bounded_array.h b/libkern/libkern/c++/bounded_array.h new file mode 100644 index 000000000..3b8363429 --- /dev/null +++ b/libkern/libkern/c++/bounded_array.h @@ -0,0 +1,104 @@ +// +// Copyright (c) 2019 Apple, Inc. All rights reserved. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_START@ +// +// This file contains Original Code and/or Modifications of Original Code +// as defined in and that are subject to the Apple Public Source License +// Version 2.0 (the 'License'). You may not use this file except in +// compliance with the License. The rights granted to you under the License +// may not be used to create, or enable the creation or redistribution of, +// unlawful or unlicensed copies of an Apple operating system, or to +// circumvent, violate, or enable the circumvention or violation of, any +// terms of an Apple operating system software license agreement. +// +// Please obtain a copy of the License at +// http://www.opensource.apple.com/apsl/ and read it before using this file. +// +// The Original Code and all software distributed under the License are +// distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +// INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +// Please see the License for the specific language governing rights and +// limitations under the License. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_END@ +// + +#ifndef XNU_LIBKERN_LIBKERN_CXX_BOUNDED_ARRAY_H +#define XNU_LIBKERN_LIBKERN_CXX_BOUNDED_ARRAY_H + +#include +#include +#include + +namespace libkern { +// `bounded_array` is a simple abstraction for a C-style array. +// +// Unlike C-style arrays, however, it ensures that the array is not accessed +// outside of its bounds. Furthermore, the iterators of the `bounded_array` +// are `bounded_ptr`, which track the range they're allowed to access. +// +// TODO: +// - Should we provide deep comparison operators? +// - Document individual methods +template +struct bounded_array { + // DO NOT USE THIS MEMBER DIRECTLY OR WE WILL BREAK YOUR CODE IN THE FUTURE. + // THIS HAS TO BE PUBLIC FOR THIS TYPE TO SUPPORT AGGREGATE-INITIALIZATION. + T data_[N]; + + using iterator = bounded_ptr; + using const_iterator = bounded_ptr; + + iterator + begin() noexcept + { + return iterator(data_, data_, data_ + N); + } + const_iterator + begin() const noexcept + { + return const_iterator(data_, data_, data_ + N); + } + iterator + end() noexcept + { + return iterator(data_ + N, data_, data_ + N); + } + const_iterator + end() const noexcept + { + return const_iterator(data_ + N, data_, data_ + N); + } + + constexpr size_t + size() const + { + return N; + } + constexpr T* + data() noexcept + { + return data_; + } + constexpr T const* + data() const noexcept + { + return data_; + } + OS_ALWAYS_INLINE T& + operator[](ptrdiff_t n) + { + return begin()[n]; + } + OS_ALWAYS_INLINE T const& + operator[](ptrdiff_t n) const + { + return begin()[n]; + } +}; +} // end namespace libkern + +#endif // !XNU_LIBKERN_LIBKERN_CXX_BOUNDED_ARRAY_H diff --git a/libkern/libkern/c++/bounded_array_ref.h b/libkern/libkern/c++/bounded_array_ref.h new file mode 100644 index 000000000..48bf4f70e --- /dev/null +++ b/libkern/libkern/c++/bounded_array_ref.h @@ -0,0 +1,283 @@ +// +// Copyright (c) 2019 Apple, Inc. All rights reserved. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_START@ +// +// This file contains Original Code and/or Modifications of Original Code +// as defined in and that are subject to the Apple Public Source License +// Version 2.0 (the 'License'). You may not use this file except in +// compliance with the License. The rights granted to you under the License +// may not be used to create, or enable the creation or redistribution of, +// unlawful or unlicensed copies of an Apple operating system, or to +// circumvent, violate, or enable the circumvention or violation of, any +// terms of an Apple operating system software license agreement. +// +// Please obtain a copy of the License at +// http://www.opensource.apple.com/apsl/ and read it before using this file. +// +// The Original Code and all software distributed under the License are +// distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +// INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +// Please see the License for the specific language governing rights and +// limitations under the License. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_END@ +// + +#ifndef XNU_LIBKERN_LIBKERN_CXX_BOUNDED_ARRAY_REF_H +#define XNU_LIBKERN_LIBKERN_CXX_BOUNDED_ARRAY_REF_H + +#include +#include +#include +#include + +namespace libkern { +namespace bar_detail { +using nullptr_t = decltype(nullptr); +} + +// Represents a reference to a sequence of 0 or more elements consecutively in +// memory, i.e. a start pointer and a length. +// +// When elements of the sequence are accessed, `bounded_array_ref` ensures +// that those elements are in the bounds of the sequence (which are provided +// when the `bounded_array_ref` is constructed). +// +// This class does not own the underlying data, it is expected to be used in +// situations where the data resides in some other buffer, whose lifetime +// extends past that of the `bounded_array_ref`. For this reason, it is not +// in general safe to store a `bounded_array_ref`. +// +// `bounded_array_ref` is trivially copyable and it should be passed by value. +template +struct bounded_array_ref { + // Creates an empty `bounded_array_ref`. + // + // An empty `bounded_array_ref` does not reference anything, so its + // `data()` is null and its `size()` is 0. + explicit constexpr bounded_array_ref() noexcept : data_(nullptr), size_(0) + { + } + + // Creates a `bounded_array_ref` from a bounded pointer and a size. + // + // The resulting `bounded_array_ref` starts at the location where the + // pointer points, and has the given number of elements. All the elements + // must be in the bounds of the `bounded_ptr`, otherwise this constructor + // will trap. + explicit constexpr bounded_array_ref(bounded_ptr data, size_t n) + : data_(data.unsafe_discard_bounds()), size_(static_cast(n)) + { + if (n != 0) { + data[n - 1]; // make sure the bounds are valid + // TODO: find a better way to do that + } + if (__improbable(n > UINT32_MAX)) { + TrappingPolicy::trap("bounded_array_ref: Can't construct from a size greater than UINT32_MAX"); + } + } + + // Creates a `bounded_array_ref` from a raw pointer and a size. + // + // The resulting `bounded_array_ref` starts at the location where the + // pointer points, and has the given number of elements. This constructor + // trusts that `n` elements are reachable from the given pointer. + explicit constexpr bounded_array_ref(T* data, size_t n) : data_(data), size_(static_cast(n)) + { + if (__improbable(n > UINT32_MAX)) { + TrappingPolicy::trap("bounded_array_ref: Can't construct from a size greater than UINT32_MAX"); + } + } + + // Creates a `bounded_array_ref` from a `[first, last)` half-open range. + // + // The resulting `bounded_array_ref` starts at the location pointed-to by + // `first`, and contains `last - first` elements. The `[first, last)` + // half-open range must be a valid range, i.e. it must be the case that + // `first <= last`, otherwise the constructor traps. + explicit constexpr bounded_array_ref(T* first, T* last) : data_(first), size_(static_cast(last - first)) + { + if (__improbable(first > last)) { + TrappingPolicy::trap("bounded_array_ref: The [first, last) constructor requires a valid range."); + } + if (__improbable(last - first > UINT32_MAX)) { + TrappingPolicy::trap("bounded_array_ref: Can't construct from a size greater than UINT32_MAX"); + } + } + + // Creates a `bounded_array_ref` from a `bounded_array`. + // + // The resulting `bounded_array_ref` starts at the first element of the + // `bounded_array`, and has the number of elements in the `bounded_array`. + template + constexpr bounded_array_ref(bounded_array& data) : data_(data.data()), size_(static_cast(data.size())) + { + if (__improbable(data.size() > UINT32_MAX)) { + TrappingPolicy::trap("bounded_array_ref: Can't construct from a size greater than UINT32_MAX"); + } + } + + // Creates a `bounded_array_ref` from a C-style array. + // + // The resulting `bounded_array_ref` starts at the first element of the + // C-style array, and has the number of elements in that array. + template + constexpr bounded_array_ref(T (&array)[N]) : data_(array), size_(static_cast(N)) + { + if (__improbable(N > UINT32_MAX)) { + TrappingPolicy::trap("bounded_array_ref: Can't construct from a size greater than UINT32_MAX"); + } + } + + constexpr + bounded_array_ref(bounded_array_ref const&) = default; + constexpr + bounded_array_ref(bounded_array_ref&& other) noexcept = default; + + constexpr bounded_array_ref& operator=(bounded_array_ref const&) = default; + constexpr bounded_array_ref& operator=(bounded_array_ref&& other) = default; + ~bounded_array_ref() = default; + + // Returns whether the `bounded_array_ref` points to a sequence or not. + // + // Note that pointing to a sequence at all is different from pointing to + // a valid sequence, or having a size of 0. If a `bounded_array_ref` + // points to a sequence (regardless of whether it is valid or whether + // the size of that sequence is 0), this operator will return true. + explicit + operator bool() const noexcept + { + return data_ != nullptr; + } + + using iterator = bounded_ptr; + + // The following methods allow obtaining iterators (i.e. cursors) to + // objects inside a `bounded_array_ref`. + // + // The iterators of a `bounded_array_ref` are `bounded_ptr`s, which know + // the bounds of the sequence and will trap when dereferenced outside + // of those bounds. + // + // `begin()` returns an iterator to the first element in the range, and + // `end()` returns an iterator to one-past-the-last element in the range. + // The `end()` iterator can't be dereferenced, since it is out of bounds. + // + // If the `bounded_array_ref` is empty, these methods will return null + // `bounded_ptr`s, which can be checked for equality but can't be + // dereferenced. + iterator + begin() const noexcept + { + return iterator(data_, data_, data_ + size_); + } + iterator + end() const noexcept + { + return iterator(data_ + size_, data_, data_ + size_); + } + + // Returns the number of elements in the range referenced by the + // `bounded_array_ref`. + // + // This method returns `0` if the `bounded_array_ref` is null, since + // such an array ref behaves the same as an empty range. + constexpr size_t + size() const + { + return size_; + } + + // Returns a non-owning pointer to the underlying memory referenced by a + // `bounded_array_ref`. + // + // This method can be called even if the `bounded_array_ref` is null, in + // which case the returned pointer will be null. + constexpr T* + data() const noexcept + { + return data_; + } + + // Access the n-th element of a `bounded_array_ref`. + // + // If `n` is out of the bounds of the sequence, this operation will + // trap. If the array ref is null, this operation will trap too. + // + // Design note: + // We voluntarily use a signed type to represent the index even though a + // negative index will always cause a trap. If we used an unsigned type, + // we could get an implicit conversion from signed to unsigned, which + // could silently wrap around. We think trapping early is more likely + // to be helpful in this situation. + OS_ALWAYS_INLINE T& + operator[](ptrdiff_t n) const + { + return begin()[n]; + } + + // Chop off the first `n` elements of the array, and keep `m` elements + // in the array. + // + // The resulting range can be described by `[beg + n, beg + n + m)`, where + // `beg` is the `begin()` of the range being sliced. This operation traps + // if `n + m` is larger than the number of elements in the array. + // + // Since `bounded_array_ref` checks (or assumes) that the range it is + // given on construction is within bounds and `slice()` checks that the + // produced slice is within the original range, it is impossible to create + // a `bounded_array_ref` that isn't a subset of a valid range using this + // function. + bounded_array_ref + slice(size_t n, size_t m) const + { + uint32_t total; + if (__improbable(os_add_overflow(n, m, &total))) { + TrappingPolicy::trap("bounded_array_ref: n + m is larger than the size of any bounded_array_ref"); + } + if (__improbable(total > size())) { + TrappingPolicy::trap("bounded_array_ref: invalid slice provided, the indices are of bounds for the bounded_array_ref"); + } + return bounded_array_ref(data_ + n, m); + } + +private: + T* data_; + uint32_t size_; +}; + +// The comparison functions against `nullptr` all return whether the +// `bounded_array_ref` references a sequence or not. +template +bool +operator==(bounded_array_ref const& x, bar_detail::nullptr_t) +{ + return !static_cast(x); +} + +template +bool +operator!=(bounded_array_ref const& x, bar_detail::nullptr_t) +{ + return !(x == nullptr); +} + +template +bool +operator==(bar_detail::nullptr_t, bounded_array_ref const& x) +{ + return x == nullptr; +} + +template +bool +operator!=(bar_detail::nullptr_t, bounded_array_ref const& x) +{ + return x != nullptr; +} +} // end namespace libkern + +#endif // !XNU_LIBKERN_LIBKERN_CXX_BOUNDED_ARRAY_REF_H diff --git a/libkern/libkern/c++/bounded_ptr.h b/libkern/libkern/c++/bounded_ptr.h new file mode 100644 index 000000000..5e8f3df37 --- /dev/null +++ b/libkern/libkern/c++/bounded_ptr.h @@ -0,0 +1,706 @@ +// +// Copyright (c) 2019 Apple, Inc. All rights reserved. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_START@ +// +// This file contains Original Code and/or Modifications of Original Code +// as defined in and that are subject to the Apple Public Source License +// Version 2.0 (the 'License'). You may not use this file except in +// compliance with the License. The rights granted to you under the License +// may not be used to create, or enable the creation or redistribution of, +// unlawful or unlicensed copies of an Apple operating system, or to +// circumvent, violate, or enable the circumvention or violation of, any +// terms of an Apple operating system software license agreement. +// +// Please obtain a copy of the License at +// http://www.opensource.apple.com/apsl/ and read it before using this file. +// +// The Original Code and all software distributed under the License are +// distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +// INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +// Please see the License for the specific language governing rights and +// limitations under the License. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_END@ +// + +#ifndef XNU_LIBKERN_LIBKERN_CXX_BOUNDED_PTR_H +#define XNU_LIBKERN_LIBKERN_CXX_BOUNDED_PTR_H + +#include +#include +#include +#include + +#if !defined(__improbable) +# define __improbable(...) __builtin_expect((__VA_ARGS__), 0) +#endif + +namespace libkern { +namespace detail { +// Reimplementation of things in because we don't seem +// to have the right to rely on the C++ Standard Library (based on +// attempts to compile IOHIDFamily). +// TODO: Do we really need to re-implement this here? +template using void_t = void; +template T && declval() noexcept; +using nullptr_t = decltype(nullptr); +template struct enable_if; +template struct enable_if { using type = T; }; +template using enable_if_t = typename enable_if::type; +template +constexpr bool is_convertible_v = __is_convertible_to(T1, T2); + +template inline constexpr bool is_void_v = false; +template <> inline constexpr bool is_void_v = true; +template <> inline constexpr bool is_void_v = true; + +template struct copy_const { using type = U; }; +template struct copy_const { using type = U const; }; +template using copy_const_t = typename copy_const::type; + +template struct copy_cv { using type = U; }; +template struct copy_cv { using type = U const; }; +template struct copy_cv { using type = U volatile; }; +template struct copy_cv { using type = U const volatile; }; +template using copy_cv_t = typename copy_cv::type; + +template +using WhenComparable = void_t< + decltype(declval() == declval()), + decltype(declval() != declval()) + >; + +template +using WhenOrderable = void_t < + decltype(declval() < declval()), +decltype(declval() > declval()), +decltype(declval() >= declval()), +decltype(declval() <= declval()) +>; + +// Pretend that sizeof(void) is 1, otherwise the in-bounds check doesn't +// make sense for `bounded_ptr`. +template constexpr size_t sizeof_v = sizeof(T); +template <> inline constexpr size_t sizeof_v = 1; +template <> inline constexpr size_t sizeof_v = 1; +template <> inline constexpr size_t sizeof_v = 1; +template <> inline constexpr size_t sizeof_v = 1; +} // end namespace detail + +// Non-owning pointer to an object (or a range of objects) of type `T` +// that validates that the address is within some specified bounds on +// dereference-like operations. +// +// Conceptually, a `bounded_ptr` points within a range of memory `[begin, end)`. +// If accessing any part of the result of dereferencing the pointer would +// lead to an access outside of the `[begin, end)` range, the pointer is +// said to be out-of-bounds. Due to representational constraints, the range +// of in-bounds memory must be no larger than 4GB. +// +// Dereference-like operations (dereference, subscript, pointer member access) +// validate that the pointer is not out-of-bounds. If an out-of-bounds pointer +// is dereferenced, the `TrappingPolicy` is called as +// `TrappingPolicy::trap(some-message)`, and the operation is said to "trap". +// This terminology is used below to describe the behavior of the `TrappingPolicy`. +// +// Pointer arithmetic is allowed (and the bounds are not validated), so it is +// entirely possible to make a `bounded_ptr` point outside of its range. +// However, overflow checking is performed on arithmetic operations, and +// any operation resulting in an overflow will also "trap". +// +// The behavior of the `TrappingPolicy` can be customized as desired, however +// a trap should never return, causing the current `bounded_ptr` operation to +// be aborted. This is important since the trap could signify an integer +// overflow, a null-pointer dereference or something else that would lead to +// undefined behavior (UB) if `TrappingPolicy::trap` were to return. +// +// Creation of `bounded_ptr`s +// ========================== +// `bounded_ptr` provides a single constructor allowing the bounds of the +// pointer to be specified. When integrating `bounded_ptr` into an existing +// code base, it is recommended to use `bounded_ptr` as an iterator obtained +// from other container-like abstractions, instead of manually using the +// constructor that allows specifying a range. Specifying the range manually +// on construction is error-prone, and `bounded_ptr` can't help reduce +// out-of-bounds accesses if the bounds are specified incorrectly. +// +// Furthermore, it is a design choice to not provide a constructor that uses +// relative offsets from the pointer itself to determine the range, because +// such a constructor is deemed more confusing than helpful. For example, is +// the offset a number of bytes or a number of objects? Is the offset inclusive +// or exclusive? Instead, factory functions should be used to create `bounded_ptr`s. +// +// Remark on const-ness +// ==================== +// Like for raw pointers, the const-ness of a `bounded_ptr` has no bearing on +// whether the pointee is const. Hence, it is possible to obtain a non-const +// reference to an object from a const `bounded_ptr`. To encode a +// pointer-to-const, simply create a `bounded_ptr`. +template +struct __attribute__((trivial_abi)) bounded_ptr { +private: + using CharType = detail::copy_cv_t; + +public: + // Creates a null `bounded_ptr`. + // + // A null `bounded_ptr` does not point to any object and is conceptually + // out of bounds, so dereferencing it will trap. "Observing" operations + // like comparison and check-for-null, along with assignment, are valid + // operations on a null `bounded_ptr`. + OS_ALWAYS_INLINE constexpr + bounded_ptr(detail::nullptr_t) + : base_(nullptr), count_(0), offset_(0) + { + } + + OS_ALWAYS_INLINE constexpr + explicit + bounded_ptr() + : bounded_ptr(nullptr) + { + } + + // Creates a `bounded_ptr` pointing to the given object, and whose bounds + // are described by the provided `[begin, end)` range. + // + // This constructor does not check whether the constructed pointer is + // within its bounds. However, it does check that the provided `[begin, end)` + // range is a valid range (that is, `begin <= end`). + // + // Furthermore, the number of bytes in the range of in-bounds memory must be + // representable by a uint32_t, which means that there can be no more than + // 2^32 bytes (i.e. 4GB) in that range. Otherwise, the constructor will trap. + OS_ALWAYS_INLINE explicit + bounded_ptr(T* pointer, T const* begin, T const* end) + { + base_ = reinterpret_cast(const_cast(begin)); + + // Store (end - begin) into count_, making sure we don't overflow + if (__improbable(os_sub_overflow(reinterpret_cast(end), + reinterpret_cast(begin), + &count_))) { + TrappingPolicy::trap("The range of valid memory is too large to be represented " + "by this type, or [begin, end) is not a well-formed range"); + } + + // Store (pointer - begin) into offset_, making sure we don't overflow. + // Note that offset_ can be negative if `pointer` is outside of the + // range delimited by [begin, end), which can be valid if it represents + // e.g. a subrange of an array. + if (__improbable(os_sub_overflow(reinterpret_cast(pointer), + reinterpret_cast(begin), + &offset_))) { + TrappingPolicy::trap("The offset of the pointer inside its valid memory " + "range can't be represented using int32_t"); + } + } + + // Creates a `bounded_ptr` to a type `T` from a `bounded_ptr` to a type `U`. + // + // This converting constructor is enabled whenever `U*` is implicitly + // convertible to `T*`. This allows the usual implicit conversions + // between base-and-derived types, and also from any type `U*` to a + // `void*`. If other casts (like between unrelated pointer types) are + // desired, `libkern::reinterpret_pointer_cast` can be used instead. + // + // The bounds on the resulting `bounded_ptr` are inherited from the + // original `bounded_ptr`. + template > > + OS_ALWAYS_INLINE + bounded_ptr(bounded_ptr const & other) + : base_(other.base_) + , count_(other.count_) + , offset_(static_cast(reinterpret_cast(static_cast(other.get_ptr_())) - other.base_)) + { + } + + // Assigns a `bounded_ptr` to a type `U` to a `bounded_ptr` to a type `T`, + // as long as `U*` is convertible to `T*`. + // + // This is a rebinding operation, like assignment between raw pointers, + // and the destination `bounded_ptr` will inherit the bounds of the + // source `bounded_ptr`. + template > > + OS_ALWAYS_INLINE bounded_ptr& + operator=(bounded_ptr const& other) + { + base_ = other.base_; + count_ = other.count_; + offset_ = static_cast(reinterpret_cast(static_cast(other.get_ptr_())) - other.base_); + return *this; + } + + // Sets a `bounded_ptr` to null. + // + // This is effectively equivalent to assigning a default-constructed + // `bounded_ptr` to the target. As a result, the original bounds of + // the `bounded_ptr` are discarded, and the resulting `bounded_ptr` + // is both out-of-bounds and also has no bounds assigned to it (like + // a default-constructed `bounded_ptr`). + OS_ALWAYS_INLINE bounded_ptr& + operator=(detail::nullptr_t) + { + *this = bounded_ptr(); + return *this; + } + + // Returns a reference to the object pointed-to by the `bounded_ptr`. + // + // Traps if the pointer is pointing outside of its bounds. + // + // Also note that this function will trap when dereferencing a null + // `bounded_ptr`, unless the bounds of the pointer have been set and + // include address 0, in which case there's effectively nothing to + // diagnose. + template // delay instantiation to avoid forming invalid ref for bounded_ptr + OS_ALWAYS_INLINE T_& + operator*() const + { + if (__improbable(!in_bounds_())) { + TrappingPolicy::trap("bounded_ptr::operator*: Dereferencing this pointer " + "would access memory outside of the bounds set originally"); + } + return *get_ptr_(); + } + + OS_ALWAYS_INLINE T* + operator->() const + { + if (__improbable(!in_bounds_())) { + TrappingPolicy::trap("bounded_ptr::operator->: Accessing a member through this pointer " + "would access memory outside of the bounds set originally"); + } + return get_ptr_(); + } + + // Provides access to the n-th element past the given pointer. + // + // The `bounded_ptr` validates whether the provided index is within the + // bounds of the `bounded_ptr`. Like for raw pointers, a negative index + // may be passed, in which case the pointer is accessed at a negative + // offset (which must still be in bounds). + template // delay instantiation to avoid forming invalid ref for bounded_ptr + OS_ALWAYS_INLINE T_& + operator[](ptrdiff_t n) const + { + return *(*this + n); + } + + // Converts a `bounded_ptr` to a raw pointer, after checking it is within + // its bounds. + // + // The primary intended usage of this function is to aid bridging between + // code that uses `bounded_ptr`s and code that does not. + OS_ALWAYS_INLINE T* + discard_bounds() const + { + if (__improbable(!in_bounds_())) { + TrappingPolicy::trap("bounded_ptr::discard_bounds: Discarding the bounds on " + "this pointer would lose the fact that it is outside of the " + "bounds set originally"); + } + return get_ptr_(); + } + + // Converts a `bounded_ptr` to a raw pointer, without checking whether the + // pointer is within its bounds. + // + // Like `discard_bounds()`, the primary intended usage of this function + // is to aid bridging between code that uses `bounded_ptr`s and code that + // does not. However, unlike `discard_bounds()`, this function does not + // validate that the returned pointer is in bounds. This functionality is + // necessary when the pointer represents something that can't be + // dereferenced (hence it's OK for it to be out-of-bounds), but that + // is still useful for other purposes like comparing against other + // pointers. An example of that is the `end` pointer in a half-open + // interval `[begin, end)`, where the `end` pointer is out-of-bounds and + // can't be dereferenced, yet it's still useful to delimit the range. + OS_ALWAYS_INLINE T* + unsafe_discard_bounds() const + { + return get_ptr_(); + } + + // Implicit conversion to bool, returning whether the pointer is null. + // + // This operation does not perform any validation of the bounds. + OS_ALWAYS_INLINE explicit + operator bool() const + { + return get_ptr_() != nullptr; + } + + // Increment/decrement a `bounded_ptr`. + // + // Like for other arithmetic operations, this does not check whether the + // increment or decrement operation results in an out-of-bounds pointer. + OS_ALWAYS_INLINE bounded_ptr& + operator++() + { + *this += 1; + return *this; + } + OS_ALWAYS_INLINE bounded_ptr + operator++(int) + { + bounded_ptr old = *this; + ++*this; + return old; + } + OS_ALWAYS_INLINE bounded_ptr& + operator--() + { + *this -= 1; + return *this; + } + OS_ALWAYS_INLINE bounded_ptr + operator--(int) + { + bounded_ptr old = *this; + --*this; + return old; + } + + // Increment or decrement a `bounded_ptr` by a given offset. + // + // This is equivalent to adding the given offset to the underlying raw + // pointer. In particular, the bounds of the `bounded_ptr` are left + // untouched by this operation. Furthermore, like for raw pointers, it + // is possible to provide a negative offset, which will have the effect + // of decrementing the `bounded_ptr` instead of incrementing it. + // + // Also note that the offset is NOT a number of bytes -- just like for + // raw pointers, it is a number of "positions" to move the pointer from, + // which essentially means `n * sizeof(T)` bytes. Again, this works exactly + // the same as a raw pointer to an object of type `T`. + // + // Like other arithmetic operations, this does not check whether the + // increment or decrement operation results in an out-of-bounds pointer. + // However, this does check whether the arithmetic operation would result + // in an overflow, in which case the operation will trap. + template + OS_ALWAYS_INLINE bounded_ptr& + operator+=(ptrdiff_t n) + { + static_assert(!detail::is_void_v, "Arithmetic on bounded_ptr is not allowed."); + + ptrdiff_t bytes; + if (__improbable(os_mul_overflow(n, sizeof(T), &bytes))) { + TrappingPolicy::trap( + "bounded_ptr::operator+=(n): Calculating the number of bytes to " + "add to the offset (n * sizeof(T)) would trigger an overflow"); + } + if (__improbable(os_add_overflow(offset_, bytes, &offset_))) { + TrappingPolicy::trap( + "bounded_ptr::operator+=(n): Adding the specified number of bytes " + "to the offset representing the current position would overflow."); + } + return *this; + } + + template + OS_ALWAYS_INLINE bounded_ptr& + operator-=(ptrdiff_t n) + { + static_assert(!detail::is_void_v, "Arithmetic on bounded_ptr is not allowed."); + + ptrdiff_t bytes; + if (__improbable(os_mul_overflow(n, sizeof(T), &bytes))) { + TrappingPolicy::trap( + "bounded_ptr::operator-=(n): Calculating the number of bytes to " + "subtract from the offset (n * sizeof(T)) would trigger an overflow"); + } + if (__improbable(os_sub_overflow(offset_, bytes, &offset_))) { + TrappingPolicy::trap( + "bounded_ptr::operator-=(n): Subtracting the specified number of bytes " + "from the offset representing the current position would overflow."); + } + return *this; + } + + friend OS_ALWAYS_INLINE bounded_ptr + operator+(bounded_ptr p, ptrdiff_t n) + { + p += n; + return p; + } + friend OS_ALWAYS_INLINE bounded_ptr + operator+(ptrdiff_t n, bounded_ptr p) + { + p += n; + return p; + } + friend OS_ALWAYS_INLINE bounded_ptr + operator-(bounded_ptr p, ptrdiff_t n) + { + p -= n; + return p; + } + + // Returns the difference between two `bounded_ptr`s. + // + // This is semantically equivalent to subtracting the two underlying + // pointers. The bounds of the pointers are not validated by this + // operation. + friend OS_ALWAYS_INLINE ptrdiff_t + operator-(bounded_ptr const& a, bounded_ptr const& b) + { + return a.get_ptr_() - b.get_ptr_(); + } + + friend OS_ALWAYS_INLINE ptrdiff_t + operator-(bounded_ptr const& a, T const* b) + { + return a.get_ptr_() - b; + } + + friend OS_ALWAYS_INLINE ptrdiff_t + operator-(T const* a, bounded_ptr const& b) + { + return a - b.get_ptr_(); + } + +private: + OS_ALWAYS_INLINE bool + in_bounds_() const + { + static_assert(detail::sizeof_v <= UINT32_MAX - INT32_MAX, + "The type pointed-to by bounded_ptr is too large, which would defeat " + "our optimization to check for inboundedness using arithmetic on unsigned"); + return offset_ >= 0 && static_cast(offset_) + static_cast(detail::sizeof_v) <= count_; + } + + OS_ALWAYS_INLINE T* + get_ptr_() const + { + // Compute `base_ + offset_`, catching overflows. + uintptr_t ptr; + if (__improbable(os_add_overflow(reinterpret_cast(base_), offset_, &ptr))) { + TrappingPolicy::trap("This bounded_ptr is pointing to memory outside of what can " + "be represented by a native pointer."); + } + return reinterpret_cast(ptr); + } + + template + friend bounded_ptr reinterpret_pointer_cast(bounded_ptr const&) noexcept; + + template friend struct bounded_ptr; // for cross-type operations and conversions + + CharType* base_; // pointer to the beginning of the valid address range + uint32_t count_; // number of bytes considered in-bounds (non-negative) + int32_t offset_; // current offset into the range, in bytes +}; + +// Returns whether two `bounded_ptr`s point to the same object. +// +// This comparison is semantically equivalent to comparing the underlying +// raw pointers. In particular, it doesn't validate the bounds of either +// `bounded_ptr`, nor does it compare whether the two `bounded_ptr`s have +// the same bounds. +// +// This comparison is enabled between `bounded_ptr`s whenever the two +// corresponding raw pointer types are comparable. Comparison between a +// raw pointer and a `bounded_ptr` is also allowed, so long as the +// two corresponding raw pointer types are comparable. +template > +OS_ALWAYS_INLINE bool +operator==(bounded_ptr const& a, bounded_ptr const& b) +{ + return a.unsafe_discard_bounds() == b.unsafe_discard_bounds(); +} + +template > +OS_ALWAYS_INLINE bool +operator!=(bounded_ptr const& a, bounded_ptr const& b) +{ + return !(a == b); +} + +template > +OS_ALWAYS_INLINE bool +operator==(bounded_ptr const& a, U* b) +{ + return a.unsafe_discard_bounds() == b; +} + +template > +OS_ALWAYS_INLINE bool +operator==(U* a, bounded_ptr const& b) +{ + return a == b.unsafe_discard_bounds(); +} + +template > +OS_ALWAYS_INLINE bool +operator!=(bounded_ptr const& a, U* b) +{ + return !(a == b); +} + +template > +OS_ALWAYS_INLINE bool +operator!=(U* a, bounded_ptr const& b) +{ + return !(a == b); +} + +template +OS_ALWAYS_INLINE bool +operator==(detail::nullptr_t, bounded_ptr const& p) +{ + return p.unsafe_discard_bounds() == nullptr; +} + +template +OS_ALWAYS_INLINE bool +operator!=(detail::nullptr_t, bounded_ptr const& p) +{ + return p.unsafe_discard_bounds() != nullptr; +} + +template +OS_ALWAYS_INLINE bool +operator==(bounded_ptr const& p, detail::nullptr_t) +{ + return p.unsafe_discard_bounds() == nullptr; +} + +template +OS_ALWAYS_INLINE bool +operator!=(bounded_ptr const& p, detail::nullptr_t) +{ + return p.unsafe_discard_bounds() != nullptr; +} + +// Returns whether a `bounded_ptr` points to an address that is {less-than, +// less-than-or-equal-to, greater-than, greater-than-or-equal-to} the address +// held in another `bounded_ptr`. +// +// This doesn't validate the bounds of either `bounded_ptr`, nor does it +// compare those bounds to determine the ordering result. This ordering is +// semantically equivalent to ordering the result of calling `get()` on both +// `bounded_ptr`s. +// +// This ordering is enabled between `bounded_ptr`s whenever the two +// corresponding raw pointer types are orderable. Ordering between a +// raw pointer and a `bounded_ptr` is also allowed, so long as the +// two corresponding raw pointer types are orderable. +// + +template > +OS_ALWAYS_INLINE bool +operator<(bounded_ptr const& a, bounded_ptr const& b) +{ + return a.unsafe_discard_bounds() < b.unsafe_discard_bounds(); +} + +template > +OS_ALWAYS_INLINE bool +operator<=(bounded_ptr const& a, bounded_ptr const& b) +{ + return a.unsafe_discard_bounds() <= b.unsafe_discard_bounds(); +} + +template > +OS_ALWAYS_INLINE bool +operator>(bounded_ptr const& a, bounded_ptr const& b) +{ + return a.unsafe_discard_bounds() > b.unsafe_discard_bounds(); +} + +template > +OS_ALWAYS_INLINE bool +operator>=(bounded_ptr const& a, bounded_ptr const& b) +{ + return a.unsafe_discard_bounds() >= b.unsafe_discard_bounds(); +} + +template > +OS_ALWAYS_INLINE bool +operator<(T* a, bounded_ptr const& b) +{ + return a < b.unsafe_discard_bounds(); +} + +template > +OS_ALWAYS_INLINE bool +operator<(bounded_ptr const& a, U* b) +{ + return a.unsafe_discard_bounds() < b; +} + +template > +OS_ALWAYS_INLINE bool +operator<=(T* a, bounded_ptr const& b) +{ + return a <= b.unsafe_discard_bounds(); +} + +template > +OS_ALWAYS_INLINE bool +operator<=(bounded_ptr const& a, U* b) +{ + return a.unsafe_discard_bounds() <= b; +} + +template > +OS_ALWAYS_INLINE bool +operator>(T* a, bounded_ptr const& b) +{ + return a > b.unsafe_discard_bounds(); +} + +template > +OS_ALWAYS_INLINE bool +operator>(bounded_ptr const& a, U* b) +{ + return a.unsafe_discard_bounds() > b; +} + +template > +OS_ALWAYS_INLINE bool +operator>=(T* a, bounded_ptr const& b) +{ + return a >= b.unsafe_discard_bounds(); +} + +template > +OS_ALWAYS_INLINE bool +operator>=(bounded_ptr const& a, U* b) +{ + return a.unsafe_discard_bounds() >= b; +} + +template +OS_ALWAYS_INLINE T* +reinterpret_pointer_cast(U* p) noexcept +{ + return reinterpret_cast(p); +} + +// Reinterprets a `bounded_ptr` to a type `T` to a `bounded_ptr` to a type `U`. +// +// This is equivalent to `reinterpret_cast`ing the underlying pointer as well +// as the bounds of the original pointer. Like for a raw `reinterpret_cast`, +// no offset adjustment is performed (even if needed, e.g. for derived-to-base +// casts with multiple inheritance). Because this is extremely unsafe, it should +// be used extremely sparingly. +template +OS_ALWAYS_INLINE bounded_ptr +reinterpret_pointer_cast(bounded_ptr const& p) noexcept +{ + using CharType = detail::copy_cv_t; + CharType* new_begin = reinterpret_cast(p.base_); + CharType* new_end = new_begin + p.count_; + return bounded_ptr(reinterpret_cast(p.get_ptr_()), + reinterpret_cast(new_begin), + reinterpret_cast(new_end)); +} +} // end namespace libkern + +#endif // !XNU_LIBKERN_LIBKERN_CXX_BOUNDED_PTR_H diff --git a/libkern/libkern/c++/bounded_ptr_fwd.h b/libkern/libkern/c++/bounded_ptr_fwd.h new file mode 100644 index 000000000..b2fc2f279 --- /dev/null +++ b/libkern/libkern/c++/bounded_ptr_fwd.h @@ -0,0 +1,37 @@ +// +// Copyright (c) 2019 Apple, Inc. All rights reserved. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_START@ +// +// This file contains Original Code and/or Modifications of Original Code +// as defined in and that are subject to the Apple Public Source License +// Version 2.0 (the 'License'). You may not use this file except in +// compliance with the License. The rights granted to you under the License +// may not be used to create, or enable the creation or redistribution of, +// unlawful or unlicensed copies of an Apple operating system, or to +// circumvent, violate, or enable the circumvention or violation of, any +// terms of an Apple operating system software license agreement. +// +// Please obtain a copy of the License at +// http://www.opensource.apple.com/apsl/ and read it before using this file. +// +// The Original Code and all software distributed under the License are +// distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +// INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +// Please see the License for the specific language governing rights and +// limitations under the License. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_END@ +// + +#ifndef XNU_LIBKERN_LIBKERN_CXX_BOUNDED_PTR_FWD_H +#define XNU_LIBKERN_LIBKERN_CXX_BOUNDED_PTR_FWD_H + +namespace libkern { +template +struct __attribute__((trivial_abi)) bounded_ptr; +} // end namespace libkern + +#endif // !XNU_LIBKERN_LIBKERN_CXX_BOUNDED_PTR_FWD_H diff --git a/libkern/libkern/c++/intrusive_shared_ptr.h b/libkern/libkern/c++/intrusive_shared_ptr.h new file mode 100644 index 000000000..5c2f9dd89 --- /dev/null +++ b/libkern/libkern/c++/intrusive_shared_ptr.h @@ -0,0 +1,619 @@ +// +// Copyright (c) 2019 Apple, Inc. All rights reserved. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_START@ +// +// This file contains Original Code and/or Modifications of Original Code +// as defined in and that are subject to the Apple Public Source License +// Version 2.0 (the 'License'). You may not use this file except in +// compliance with the License. The rights granted to you under the License +// may not be used to create, or enable the creation or redistribution of, +// unlawful or unlicensed copies of an Apple operating system, or to +// circumvent, violate, or enable the circumvention or violation of, any +// terms of an Apple operating system software license agreement. +// +// Please obtain a copy of the License at +// http://www.opensource.apple.com/apsl/ and read it before using this file. +// +// The Original Code and all software distributed under the License are +// distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +// INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +// Please see the License for the specific language governing rights and +// limitations under the License. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_END@ +// + +#ifndef XNU_LIBKERN_LIBKERN_CXX_INTRUSIVE_SHARED_PTR_H +#define XNU_LIBKERN_LIBKERN_CXX_INTRUSIVE_SHARED_PTR_H + +namespace libkern { +namespace isp_detail { +// TODO: Consolidate these utilities with the ones used in other similar places. +using nullptr_t = decltype(nullptr); + +template T && declval() noexcept; + +template using void_t = void; + +template struct is_lvalue_reference { static constexpr bool value = false; }; +template struct is_lvalue_reference { static constexpr bool value = true; }; +template constexpr bool is_lvalue_reference_v = is_lvalue_reference::value; + +template constexpr bool is_empty_v = __is_empty(T); + +template struct remove_reference { using type = T; }; +template struct remove_reference { using type = T; }; +template struct remove_reference { using type = T; }; +template using remove_reference_t = typename remove_reference::type; + +template struct enable_if; +template struct enable_if { using type = T; }; +template using enable_if_t = typename enable_if::type; + +template constexpr bool is_convertible_v = __is_convertible_to(From, To); + +template +constexpr T && forward(remove_reference_t&t) noexcept { + return static_cast(t); +} + +template +constexpr T && forward(remove_reference_t&& t) noexcept { + static_assert(!is_lvalue_reference_v, + "can not forward an rvalue as an lvalue"); + return static_cast(t); +} + +template +constexpr remove_reference_t&& move(T && t) noexcept { + using RvalueRef = remove_reference_t&&; + return static_cast(t); +} + +template +using WhenComparable = void_t< + decltype(declval() == declval()), + decltype(declval() != declval()) + >; +} // end namespace isp_detail + +struct no_retain_t { + explicit constexpr no_retain_t() + { + } +}; +struct retain_t { + explicit constexpr retain_t() + { + } +}; +inline constexpr no_retain_t no_retain{}; +inline constexpr retain_t retain{}; + +// Smart pointer representing a shared resource. +// +// This shared pointer class implements a refcounted resource that uses +// a policy to manage the refcount. This allows various refcount +// implementations, notably ones where the refcount is contained +// in the pointed-to object. +// +// The refcounting policy must consist of the following two static functions: +// +// static void RefcountPolicy::retain(T&); +// static void RefcountPolicy::release(T&); +// +// The `retain` function is called whenever a new reference to the pointed-to +// object is created, and should increase the refcount. The `release` function +// is called whenever a reference to the pointed-to object is removed, and +// should decrease the refcount. These functions are always called with a +// reference to a valid object, i.e. there is no need to check whether the +// reference is null in `retain()` and `release()` (since this is already +// handled by the shared pointer). +// +// One notable difference between this shared pointer and most other shared +// pointer classes is that this shared pointer never destroys the pointed-to +// object. It relies on the `release()` function to do it whenever the refcount +// hits 0. +// +// Since this class represents a pointer to an object (as opposed to a range +// of objects), pointer arithmetic is not allowed on `intrusive_shared_ptr`s. +template +struct __attribute__((trivial_abi)) intrusive_shared_ptr { + static_assert(isp_detail::is_empty_v, + "intrusive_shared_ptr only allows a stateless RefcountPolicy " + "because it must be ABI compatible with raw pointers."); + + // TODO: Add a check that `T` can be used with the `RefcountPolicy` + + using pointer = T *; + using element_type = T; + + // Constructs a null shared pointer. + // + // A null shared pointer can't be dereferenced, but it can be checked + // for nullness, assigned to, reset, etc. + constexpr intrusive_shared_ptr() noexcept : ptr_(nullptr) { + } + constexpr intrusive_shared_ptr(isp_detail::nullptr_t) noexcept : ptr_(nullptr) { + } + + // Constructs a shared pointer to the given object, incrementing the + // refcount for that object. + // + // This constructor is adequate when transforming a raw pointer with + // shared ownership into a shared pointer, when the raw pointer is at + // +1. This can be done by replacing the raw pointer and the manual call + // to `retain()` by a shared pointer constructed with this constructor, + // which will retain the pointed-to object. + // + // If the original code did not contain a manual retain and you use this + // constructor, you will create a leak. + explicit + intrusive_shared_ptr(pointer p, retain_t) noexcept : ptr_(p) { + if (ptr_ != nullptr) { + RefcountPolicy::retain(*ptr_); + } + } + + // Constructs a shared pointer to the given object, without incrementing + // the refcount for that object. + // + // This constructor is adequate when transforming a raw pointer with + // shared ownership into a shared pointer, when the raw pointer is at + // +0. This can be done by replacing the raw pointer by a shared + // pointer constructed with this constructor, which does not retain + // the pointed-to object. + // + // If the original code contained a manual retain that you removed and + // you use this constructor, you will cause a use-after-free bug. + explicit constexpr + intrusive_shared_ptr(pointer p, no_retain_t) noexcept : ptr_(p) { + } + + // Makes a copy of a shared pointer, incrementing the refcount. + // + // Since this creates a new reference to the pointed-to object, the + // refcount is increased. Unlike for move operations, the source + // pointer is left untouched. + intrusive_shared_ptr(intrusive_shared_ptr const & other) : ptr_(other.ptr_) { + if (ptr_ != nullptr) { + RefcountPolicy::retain(*ptr_); + } + } + + // Makes a copy of a shared pointer from another compatible shared pointer, + // increasing the refcount. + // + // This converting constructor is enabled whenever `U*` is implicitly + // convertible to `T*`. This allows the usual implicit conversions + // between base-and-derived types. + // + // Since this creates a new reference to the pointed-to object, the + // refcount is increased. Unlike for move operations, the source + // pointer is left untouched. + template > > + intrusive_shared_ptr(intrusive_shared_ptr const & other) : ptr_(other.ptr_) { + if (ptr_ != nullptr) { + RefcountPolicy::retain(*ptr_); + } + } + + // Moves a shared pointer into another one, nulling the source. + // + // Since this moves the ownership from one pointer to another, no + // refcount increment or decrement is required. The moved-from pointer + // becomes a null pointer, as if it had been default-constructed. + constexpr intrusive_shared_ptr(intrusive_shared_ptr && other) noexcept : ptr_(other.ptr_) { + other.ptr_ = nullptr; + } + + // Moves a shared pointer to a type `U` into a shared pointer + // to a type `T`. + // + // This converting constructor is enabled whenever `U*` is implicitly + // convertible to `T*`. This allows the usual implicit conversions + // between base-and-derived types. + // + // Since this moves the ownership from one pointer to another, no + // refcount increment or decrement is required. The moved-from pointer + // becomes a null pointer, as if it had been default-constructed. + template > > + constexpr intrusive_shared_ptr(intrusive_shared_ptr&& other) noexcept : ptr_(other.ptr_) { + other.ptr_ = nullptr; + } + + // Destroys a shared pointer. + // + // The destruction of the shared pointer implies that one fewer reference + // to the pointed-to object exist, which means that the refcount of the + // pointed-to object is decremented. + // + // If that decrement causes the refcount to reach 0, the refcounting + // policy must destroy the pointed-to object and perform any cleanup + // associated to it (such as freeing the allocated memory). + ~intrusive_shared_ptr() { + reset(); + } + + // Copy-assigns a shared pointer. + // + // Since this creates a new reference to the pointed-to object, the + // refcount is increased. Unlike for move operations, the source + // pointer is left untouched. + // + // If the destination shared pointer is pointing to an object before + // the assignment, the refcount is decremented on that object after + // the assignment is performed. + intrusive_shared_ptr& + operator=(intrusive_shared_ptr const& other) + { + reset(other.get(), retain); + return *this; + } + + // Copy-assigns a shared pointer, enabling implicit conversions. + // + // This converting copy-assignment is enabled whenever `U*` is implicitly + // convertible to `T*`. This allows the usual implicit conversions + // between base-and-derived types. + // + // Since this creates a new reference to the pointed-to object, the + // refcount is increased. Unlike for move operations, the source + // pointer is left untouched. + // + // If the destination shared pointer is pointing to an object before + // the assignment, the refcount is decremented on that object after + // the assignment is performed. + template > > + intrusive_shared_ptr& + operator=(intrusive_shared_ptr const& other) + { + reset(other.get(), retain); + return *this; + } + + // Move-assigns a shared pointer. + // + // Since this moves the ownership from one pointer to another, no + // refcount increment or decrement is required. The moved-from pointer + // becomes a null pointer, as if it had been default-constructed. + // + // If the destination shared pointer is pointing to an object before + // the assignment, the refcount is decremented on that object after + // the assignment is performed. + intrusive_shared_ptr& + operator=(intrusive_shared_ptr&& other) + { + reset(other.get(), no_retain); + other.ptr_ = nullptr; + return *this; + } + + // Move-assigns a shared pointer, enabling implicit conversions. + // + // This converting move-assignment is enabled whenever `U*` is implicitly + // convertible to `T*`. This allows the usual implicit conversions + // between base-and-derived types. + // + // Since this moves the ownership from one pointer to another, no + // refcount increment or decrement is required. The moved-from pointer + // becomes a null pointer, as if it had been default-constructed. + // + // If the destination shared pointer is pointing to an object before + // the assignment, the refcount is decremented on that object after + // the assignment is performed. + template > > + intrusive_shared_ptr& + operator=(intrusive_shared_ptr&& other) + { + reset(other.get(), no_retain); + other.ptr_ = nullptr; + return *this; + } + + // Resets a shared pointer to a null pointer, as if calling `reset()`. + // + // If the destination shared pointer is pointing to an object before + // the assignment, the refcount is decremented on that object after + // the assignment is performed. + intrusive_shared_ptr& + operator=(isp_detail::nullptr_t) noexcept + { + reset(); + return *this; + } + + // Returns a reference to the object pointed-to by the shared pointer. + constexpr T& + operator*() const noexcept + { + return *ptr_; + } + constexpr pointer + operator->() const noexcept + { + return ptr_; + } + + // Implicit conversion to bool, returning whether the shared pointer is null. + explicit constexpr + operator bool() const noexcept + { + return ptr_ != nullptr; + } + + // Sets a shared pointer to null. + // + // If the shared pointer is pointing to an object, the refcount is + // decremented on that object. + intrusive_shared_ptr& + reset() noexcept + { + if (ptr_ != nullptr) { + RefcountPolicy::release(*ptr_); + } + ptr_ = nullptr; + return *this; + } + + // Sets the object pointed-to by the shared pointer to the given object. + // + // This variant of `reset()` does not increment the refcount on the object + // assigned to the shared pointer. + // + // If the shared pointer is pointing to an object before calling `reset`, + // the refcount is decremented on that object. + intrusive_shared_ptr& + reset(pointer p, no_retain_t) noexcept + { + if (ptr_ != nullptr) { + RefcountPolicy::release(*ptr_); + } + ptr_ = p; + return *this; + } + + // Sets the object pointed-to by the shared pointer to the given object. + // + // This variant of `reset()` increments the refcount on the object + // assigned to the shared pointer. + // + // If the shared pointer is pointing to an object before calling `reset`, + // the refcount is decremented on that object. + intrusive_shared_ptr& + reset(pointer p, retain_t) noexcept + { + // Make sure we don't release-before-we-retain in case of self-reset + pointer old = ptr_; + ptr_ = p; + if (ptr_ != nullptr) { + RefcountPolicy::retain(*ptr_); + } + if (old != nullptr) { + RefcountPolicy::release(*old); + } + return *this; + } + + // Retrieves the raw pointer held by a shared pointer. + // + // The primary intended usage of this function is to aid bridging between + // code that uses shared pointers and code that does not, or simply to + // obtain a non-owning reference to the object managed by the shared pointer. + // + // After this operation, the shared pointer still manages the object it + // points to (unlike for `detach()`). + // + // One must not hold on to the pointer returned by `.get()` after the + // last shared pointer pointing to that object goes out of scope, since + // it will then be a dangling pointer. To try and catch frequent cases of + // misuse, calling `.get()` on a temporary shared pointer is not allowed. + constexpr pointer + get() const & noexcept + { + return ptr_; + } + + constexpr pointer + get() const&& noexcept = delete; + + // Returns the raw pointer contained in a shared pointer, detaching + // ownership management from the shared pointer. + // + // This operation returns a pointer to the object pointed-to by the + // shared pointer, and severes the link between the shared pointer and + // that object. After this operation, the shared pointer is no longer + // responsible for managing the object, and instead whoever called + // `detach()` has that responsibility. + // + // `detach()` does _not_ decrement the refcount of the pointee, since + // the caller of `detach()` is responsible for managing the lifetime of + // that object. + // + // After a call to `detach()`, the shared pointer is null since it has + // no more object to manage. + constexpr pointer + detach() noexcept + { + pointer tmp = ptr_; + ptr_ = nullptr; + return tmp; + } + +private: + friend constexpr void + swap(intrusive_shared_ptr& a, intrusive_shared_ptr& b) noexcept + { + pointer tmp = a.ptr_; + a.ptr_ = b.ptr_; + b.ptr_ = tmp; + } + + // For access to other.ptr_ in converting operations + template + friend struct intrusive_shared_ptr; + + pointer ptr_; +}; + +// Casts a shared pointer to a type `T` to a shared pointer to a type `U` +// using `static_cast` on the underlying pointer type. +// +// The version of this function that takes a const reference to the source +// shared pointer makes a copy, and as such it increments the refcount of the +// pointed-to object (since a new reference is created). It leaves the source +// shared pointer untouched. +// +// The version of this function that takes a rvalue-reference moves the +// ownership from the source shared pointer to the destination shared pointer. +// It does not increment the refcount, and the source shared pointer is in a +// moved-from state (i.e. null). +template +intrusive_shared_ptr +static_pointer_cast(intrusive_shared_ptr const& ptr) +{ + return intrusive_shared_ptr(static_cast(ptr.get()), retain); +} +template +intrusive_shared_ptr +static_pointer_cast(intrusive_shared_ptr&& ptr) +{ + return intrusive_shared_ptr(static_cast(ptr.detach()), no_retain); +} + +// Const-casts a shared pointer to a type `cv-T` to a shared pointer to a +// type `T` (without cv-qualifiers) using `const_cast` on the underlying +// pointer type. +// +// The version of this function that takes a const reference to the source +// shared pointer makes a copy, and as such it increments the refcount of the +// pointed-to object (since a new reference is created). It leaves the source +// shared pointer untouched. +// +// The version of this function that takes a rvalue-reference moves the +// ownership from the source shared pointer to the destination shared pointer. +// It does not increment the refcount, and the source shared pointer is in a +// moved-from state (i.e. null). +template +intrusive_shared_ptr +const_pointer_cast(intrusive_shared_ptr const& ptr) noexcept +{ + return intrusive_shared_ptr(const_cast(ptr.get()), retain); +} +template +intrusive_shared_ptr +const_pointer_cast(intrusive_shared_ptr&& ptr) noexcept +{ + return intrusive_shared_ptr(const_cast(ptr.detach()), no_retain); +} + +// Casts a shared pointer to a type `T` to a shared pointer to a type `U` +// using `reinterpret_cast` on the underlying pointer type. +// +// The version of this function that takes a const reference to the source +// shared pointer makes a copy, and as such it increments the refcount of the +// pointed-to object (since a new reference is created). It leaves the source +// shared pointer untouched. +// +// The version of this function that takes a rvalue-reference moves the +// ownership from the source shared pointer to the destination shared pointer. +// It does not increment the refcount, and the source shared pointer is in a +// moved-from state (i.e. null). +// +// WARNING: +// This function makes it possible to cast pointers between unrelated types. +// This rarely makes sense, and when it does, it can often point to a design +// problem. You should have red lights turning on when you're about to use +// this function. +template +intrusive_shared_ptr +reinterpret_pointer_cast(intrusive_shared_ptr const& ptr) noexcept +{ + return intrusive_shared_ptr(reinterpret_cast(ptr.get()), retain); +} +template +intrusive_shared_ptr +reinterpret_pointer_cast(intrusive_shared_ptr&& ptr) noexcept +{ + return intrusive_shared_ptr(reinterpret_cast(ptr.detach()), no_retain); +} + +// Comparison operations between: +// - two shared pointers +// - a shared pointer and nullptr_t +// - a shared pointer and a raw pointer +template > +bool +operator==(intrusive_shared_ptr const& x, intrusive_shared_ptr const& y) +{ + return x.get() == y.get(); +} + +template > +bool +operator!=(intrusive_shared_ptr const& x, intrusive_shared_ptr const& y) +{ + return x.get() != y.get(); +} + +template > +bool +operator==(intrusive_shared_ptr const& x, U* y) +{ + return x.get() == y; +} + +template > +bool +operator!=(intrusive_shared_ptr const& x, U* y) +{ + return x.get() != y; +} + +template > +bool +operator==(T* x, intrusive_shared_ptr const& y) +{ + return x == y.get(); +} + +template > +bool +operator!=(T* x, intrusive_shared_ptr const& y) +{ + return x != y.get(); +} + +template +bool +operator==(intrusive_shared_ptr const& x, isp_detail::nullptr_t) noexcept +{ + return x.get() == nullptr; +} + +template +bool +operator==(isp_detail::nullptr_t, intrusive_shared_ptr const& x) noexcept +{ + return nullptr == x.get(); +} + +template +bool +operator!=(intrusive_shared_ptr const& x, isp_detail::nullptr_t) noexcept +{ + return x.get() != nullptr; +} + +template +bool +operator!=(isp_detail::nullptr_t, intrusive_shared_ptr const& x) noexcept +{ + return nullptr != x.get(); +} +} // end namespace libkern + +#endif // !XNU_LIBKERN_LIBKERN_CXX_INTRUSIVE_SHARED_PTR_H diff --git a/libkern/libkern/c++/safe_allocation.h b/libkern/libkern/c++/safe_allocation.h new file mode 100644 index 000000000..4b91ce904 --- /dev/null +++ b/libkern/libkern/c++/safe_allocation.h @@ -0,0 +1,457 @@ +// +// Copyright (c) 2019 Apple, Inc. All rights reserved. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_START@ +// +// This file contains Original Code and/or Modifications of Original Code +// as defined in and that are subject to the Apple Public Source License +// Version 2.0 (the 'License'). You may not use this file except in +// compliance with the License. The rights granted to you under the License +// may not be used to create, or enable the creation or redistribution of, +// unlawful or unlicensed copies of an Apple operating system, or to +// circumvent, violate, or enable the circumvention or violation of, any +// terms of an Apple operating system software license agreement. +// +// Please obtain a copy of the License at +// http://www.opensource.apple.com/apsl/ and read it before using this file. +// +// The Original Code and all software distributed under the License are +// distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +// INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +// Please see the License for the specific language governing rights and +// limitations under the License. +// +// @APPLE_OSREFERENCE_LICENSE_HEADER_END@ +// + +#ifndef XNU_LIBKERN_LIBKERN_CXX_SAFE_ALLOCATION_H +#define XNU_LIBKERN_LIBKERN_CXX_SAFE_ALLOCATION_H + +#include +#include +#include +#include + +void* operator new(size_t, void*); // forward declaration needed for placement-new + +namespace libkern { +namespace sa_detail { +// TODO: Deduplicate these utilities with other smart pointer utilities +using nullptr_t = decltype(nullptr); +template +constexpr bool is_trivially_destructible_v = __is_trivially_destructible(T); +template +constexpr bool is_empty_v = __is_empty(T); +template +constexpr bool is_nothrow_default_constructible_v = __is_nothrow_constructible(T); + +template struct enable_if; +template struct enable_if { using type = T; }; +template using enable_if_t = typename enable_if::type; + +template struct remove_const { using type = T; }; +template struct remove_const { using type = T; }; +template using remove_const_t = typename remove_const::type; + +template +void +generic_swap(T& a, T& b) +{ + T tmp = a; + a = b; + b = tmp; +} + +template >* = nullptr> +void +destroy(T* first, T* last) +{ + for (; first != last; ++first) { + first->~T(); + } +} + +template >* = nullptr> +void +destroy(T*, T*) +{ + // Nothing to do, the elements are trivially destructible +} + +template +void +uninitialized_value_construct(T* first, T* last) +{ + for (; first != last; ++first) { + ::new (static_cast(first)) T(); + } +} +} // end namespace sa_detail + +struct adopt_memory_t { + explicit constexpr + adopt_memory_t() = default; +}; +inline constexpr adopt_memory_t adopt_memory{}; + +struct allocate_memory_t { + explicit constexpr + allocate_memory_t() = default; +}; +inline constexpr allocate_memory_t allocate_memory{}; + +// Lightweight utility class representing a dynamically allocated slab of +// memory, with contiguous objects in it. +// +// The main purpose `safe_allocation` is to: +// 1. Manage a uniquely-owned allocation of memory containing multiple objects +// 2. Check that the allocation is accessed within its bounds on indexing operations +// 3. Act as a source for obtaining (non-owning) `bounded_ptr`s to the underlying memory +// +// In fact, `safe_allocation` should be the primary source of `bounded_ptr`s to +// heap-allocated memory, via its `.begin()` and `.end()` methods. `safe_allocation` +// is optimized for use cases where simple scratch space is needed for calculation +// and deallocated once the calculation is done. As such, it is not a full-blown +// container class, which drives many design choices behind `safe_allocation`: +// +// 1. It can't be copied or compared for equality -- `safe_allocation` is not a proper value type +// 2. It can't be resized -- this keeps the design extremely simple and free of overhead +// 3. You can transfer ownership of `safe_allocation` by using std::move +// +// Design decision: stateless allocators +// ===================================== +// Only allow stateless allocators. While we could technically handle stateful +// allocators (as the C++ Standard Library) does, the benefit of doing so +// compared to the added complexity is absolutely not worth it. Supporting +// stateful allocators everywhere in C++ is regarded (at least in the +// Standardization Committee) as one of the worst design mistakes we've made, +// and so we won't repeat it here. +// +// Design decision: size() is 0 when allocation is null +// ==================================================== +// When the `safe_allocation` is null (because it's been moved-from, or because +// allocation failed, or whatever), we could technically leave the `size_` +// undefined (as long as we make `data_` null). However, this would mean +// that querying the size of the allocation in that case is undefined behavior +// (UB), which is seen as something bad in the context of a type that vends +// itself as safe. So instead, we "overimplement" the type to provide stronger +// guarantees than would be strictly required if performance were the main goal. +template +struct safe_allocation { + static_assert(sa_detail::is_empty_v, + "safe_allocation requires the Allocator to be stateless"); + + // Create a null allocation, pointing to no memory. + // + // A null allocation can be destroyed, assigned-to, checked for nullness, + // and otherwise queries for length, but trying to access an element of + // the allocation will fail. + // + // A null allocation basically behaves as an empty array, i.e. `begin()` + // and `end()` will return iterators that are equal and `size()` will + // return `0`. + explicit constexpr safe_allocation() noexcept : data_(nullptr), size_(0) + { + } + + constexpr safe_allocation(sa_detail::nullptr_t) noexcept : safe_allocation() + { + } + + // Create an allocation pointing to already-allocated and initialized memory. + // + // This constructor attaches existing memory to a `safe_allocation`, such + // that it will be released automatically when the `safe_allocation` goes + // out of scope. The objects in that memory must already have been + // initialized, or they must be initialized before the `safe_allocation` + // goes out of scope. + // + // The `n` argument is the number of objects of type `T` in the allocation, + // i.e. `n * sizeof(T)` bytes should have been allocated. + // + // Note that the memory MUST have been allocated with an allocator compatible + // with the `safe_allocation`'s `Allocator`, since the memory will be + // deallocated using that `Allocator`. Bad things will happen if, for + // example, `adopt_memory` is used with memory allocated on the stack: + // the destructor will try to deallocate that memory and will fail to do so. + explicit safe_allocation(T* data, size_t n, adopt_memory_t) : data_(data) + { + if (__improbable(n > UINT32_MAX)) { + TrappingPolicy::trap("safe_allocation size exceeds UINT32_MAX"); + } + + size_ = static_cast(n); + } + + // Allocate memory for `n` objects of type `T`, and manage it. + // + // This constructor allocates enough memory for `n` objects of type `T` + // using the `Allocator`, and manages that. Each object in the allocation + // is value-initialized (either set to 0 or the default-constructor called). + // + // If either `n * sizeof(T)` overflows or the allocation fails, the + // resulting `safe_allocation` will be null. It is therefore necessary + // to check whether the allocation is null after using this constructor. + explicit safe_allocation(size_t n, allocate_memory_t) + { + size_t bytes; + if (__improbable(os_mul_overflow(n, sizeof(T), &bytes) || (n > UINT32_MAX))) { + data_ = nullptr; + size_ = 0; + } else { + data_ = reinterpret_cast(Allocator::allocate(bytes)); + size_ = static_cast(n); + using RawT = sa_detail::remove_const_t; + RawT* data = const_cast(data_); + sa_detail::uninitialized_value_construct(data, data + size_); + } + } + + // A `safe_allocation` can't be copied, because it is not a proper value + // type and it doesn't assume that the elements of the allocation can be + // copied. + safe_allocation(safe_allocation const&) = delete; + safe_allocation& operator=(safe_allocation const&) = delete; + + // Moves the ownership of an allocation from one `safe_allocation` to + // another one. + // + // After this operation, the moved-from `safe_allocation` is null, and + // any iterator into the moved-from `safe_allocation` are now tied to + // the `safe_allocation` that's the target of the assignment, in the + // sense that the iterators will be invalidated when the target of the + // assignment goes out of scope, not when the moved-from allocation + // goes out of scope. + safe_allocation(safe_allocation&& other) noexcept : data_(other.data_), size_(other.size_) + { + other.data_ = nullptr; + other.size_ = 0; + } + + // Clears a `safe_allocation`, making it a null allocation. + // + // If the `safe_allocation` was pointing to valid memory, the objects + // in that memory are destroyed and that memory is freed. + safe_allocation& + operator=(sa_detail::nullptr_t) + { + if (data_ != nullptr) { + destroy_dealloc_(data_, size_); + } + data_ = nullptr; + size_ = 0; + return *this; + } + + // Moves the ownership of an allocation from one `safe_allocation` to + // another one. + // + // After this operation, the moved-from `safe_allocation` is null, and + // any iterator to the moved-from `safe_allocation` obtained before the + // move operation are invalidated. + // + // If the destination `safe_allocation` was pointing to memory before the + // move-assignment, the objects in that memory are destroyed and the + // memory itself is freed. + // + // In case of self-move-assignment, nothing is done. + safe_allocation& + operator=(safe_allocation&& other) + { + if (&other == this) { + return *this; + } + + T* old_data = data_; + size_t old_size = size_; + + data_ = other.data_; + size_ = other.size_; + other.data_ = nullptr; + other.size_ = 0; + + if (old_data != nullptr) { + destroy_dealloc_(old_data, old_size); + } + + return *this; + } + + // Destroys a `safe_allocation`, destroying the objects in it and + // deallocating the underlying memory with the `Allocator`. + // + // If the `safe_allocation` is null, this destructor does nothing. + ~safe_allocation() + { + if (data_ != nullptr) { + destroy_dealloc_(data_, size_); + } + } + + // Returns whether a `safe_allocation` is non-null, i.e. whether it is + // pointing to some memory. + explicit + operator bool() const noexcept + { + return data_ != nullptr; + } + + using iterator = bounded_ptr; + using const_iterator = bounded_ptr; + + // The following methods allow obtaining iterators (i.e. cursors) to + // objects inside a `safe_allocation`. + // + // The iterators of a `safe_allocation` are `bounded_ptr`s, which know + // the bounds of the allocation and will trap when dereferenced outside + // of those bounds. + // + // `begin()` returns a (const) iterator to the first element in the + // allocation, and `end()` returns a (const) iterator to one-past-the-last + // element in the allocation. The `end()` iterator can't be dereferenced, + // since it is out of bounds. + // + // If the allocation is null, these methods will return null `bounded_ptr`s, + // which can be checked for equality but can't be dereferenced. + OS_ALWAYS_INLINE iterator + begin() noexcept + { + if (data_ == nullptr) { + return iterator(); + } else { + return iterator(data_, data_, data_ + size_); + } + } + OS_ALWAYS_INLINE const_iterator + begin() const noexcept + { + if (data_ == nullptr) { + return const_iterator(); + } else { + return const_iterator(data_, data_, data_ + size_); + } + } + iterator + end() noexcept + { + if (data_ == nullptr) { + return iterator(); + } else { + return iterator(data_ + size_, data_, data_ + size_); + } + } + const_iterator + end() const noexcept + { + if (data_ == nullptr) { + return const_iterator(); + } else { + return const_iterator(data_ + size_, data_, data_ + size_); + } + } + + // Returns the number of objects in the allocation. + // + // This method returns `0` if the allocation is null, since such an + // allocation behaves the same as an empty range. + size_t + size() const + { + return size_; + } + + // Returns a non-owning pointer to the underlying memory managed by a + // `safe_allocation`. + // + // This method can be called even if the `safe_allocation` is null, in + // which case the returned pointer will be null. + T* + data() noexcept + { + return data_; + } + T const* + data() const noexcept + { + return data_; + } + + // Access the n-th element of an allocation. + // + // If `n` is out of the bounds of the allocation, this operation will + // trap. If the allocation is null, this operation will trap too. + // + // Design note: + // We voluntarily use a signed type to represent the index even though a + // negative index will always cause a trap. If we used an unsigned type, + // we could get an implicit conversion from signed to unsigned, which + // could silently wrap around. We think trapping early is more likely + // to be helpful in this situation. + OS_ALWAYS_INLINE T& + operator[](ptrdiff_t n) + { + return begin()[n]; // trap happens in `bounded_ptr` if null or OOB + } + OS_ALWAYS_INLINE T const& + operator[](ptrdiff_t n) const + { + return begin()[n]; // trap happens in `bounded_ptr` if null or OOB + } + +private: + // Swap support + friend void + swap(safe_allocation& a, safe_allocation& b) noexcept + { + sa_detail::generic_swap(a.data_, b.data_); + sa_detail::generic_swap(a.size_, b.size_); + } + + static void + destroy_dealloc_(T* ptr, size_t size) + { + sa_detail::destroy(ptr, ptr + size); + // `size * sizeof(T)` can't overflow, because it would have + // overflowed when the allocation was performed otherwise. + using RawT = sa_detail::remove_const_t; + Allocator::deallocate(const_cast(ptr), size * sizeof(T)); + } + + T* data_; + uint32_t size_; +}; + +// The comparison functions against `nullptr` all return whether the allocation +// is null or not. +template +bool +operator==(safe_allocation const& x, sa_detail::nullptr_t) +{ + return !static_cast(x); +} + +template +bool +operator!=(safe_allocation const& x, sa_detail::nullptr_t) +{ + return !(x == nullptr); +} + +template +bool +operator==(sa_detail::nullptr_t, safe_allocation const& x) +{ + return x == nullptr; +} + +template +bool +operator!=(sa_detail::nullptr_t, safe_allocation const& x) +{ + return !(x == nullptr); +} +} // end namespace libkern + +#endif // !XNU_LIBKERN_LIBKERN_CXX_SAFE_ALLOCATION_H diff --git a/libkern/libkern/crypto/aes.h b/libkern/libkern/crypto/aes.h index bd8c84c63..9e962a873 100644 --- a/libkern/libkern/crypto/aes.h +++ b/libkern/libkern/crypto/aes.h @@ -102,18 +102,18 @@ aes_rval aes_encrypt_reset_gcm(ccgcm_ctx *ctx); aes_rval aes_encrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx); aes_rval aes_encrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx); aes_rval aes_encrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, unsigned char *out_blk, ccgcm_ctx *ctx); -aes_rval aes_encrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx); -unsigned aes_encrypt_get_ctx_size_gcm(void); +aes_rval aes_encrypt_finalize_gcm(unsigned char *tag, size_t tag_bytes, ccgcm_ctx *ctx); +size_t aes_encrypt_get_ctx_size_gcm(void); aes_rval aes_decrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx); aes_rval aes_decrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx); -aes_rval aes_decrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx); +aes_rval aes_decrypt_set_iv_gcm(const unsigned char *in_iv, size_t len, ccgcm_ctx *ctx); aes_rval aes_decrypt_reset_gcm(ccgcm_ctx *ctx); aes_rval aes_decrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx); aes_rval aes_decrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx); aes_rval aes_decrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, unsigned char *out_blk, ccgcm_ctx *ctx); -aes_rval aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx); -unsigned aes_decrypt_get_ctx_size_gcm(void); +aes_rval aes_decrypt_finalize_gcm(unsigned char *tag, size_t tag_bytes, ccgcm_ctx *ctx); +size_t aes_decrypt_get_ctx_size_gcm(void); #if defined(__cplusplus) } diff --git a/libkern/libkern/crypto/register_crypto.h b/libkern/libkern/crypto/register_crypto.h index d804f53ac..d2a8219f2 100644 --- a/libkern/libkern/crypto/register_crypto.h +++ b/libkern/libkern/crypto/register_crypto.h @@ -36,7 +36,6 @@ extern "C" { #include #include #include -#include #include #include #include @@ -94,13 +93,6 @@ typedef void (*ccpbkdf2_hmac_fn_t)(const struct ccdigest_info *di, typedef int (*ccdes_key_is_weak_fn_t)(void *key, unsigned long length); typedef void (*ccdes_key_set_odd_parity_fn_t)(void *key, unsigned long length); -/* XTS padding */ -typedef void (*ccpad_xts_decrypt_fn_t)(const struct ccmode_xts *xts, ccxts_ctx *ctx, - unsigned long nbytes, const void *in, void *out); - -typedef void (*ccpad_xts_encrypt_fn_t)(const struct ccmode_xts *xts, ccxts_ctx *ctx, - unsigned long nbytes, const void *in, void *out); - /* CBC padding (such as PKCS7 or CTSx per NIST standard) */ typedef size_t (*ccpad_cts3_crypt_fn_t)(const struct ccmode_cbc *cbc, cccbc_ctx *cbc_key, cccbc_iv *iv, size_t nbytes, const void *in, void *out); @@ -164,20 +156,9 @@ typedef struct crypto_functions { const struct ccmode_ecb *cctdes_ecb_decrypt; const struct ccmode_cbc *cctdes_cbc_encrypt; const struct ccmode_cbc *cctdes_cbc_decrypt; - /* RC4 */ - const struct ccrc4_info *ccrc4_info; - /* Blowfish - ECB only */ - const struct ccmode_ecb *ccblowfish_ecb_encrypt; - const struct ccmode_ecb *ccblowfish_ecb_decrypt; - /* CAST - ECB only */ - const struct ccmode_ecb *cccast_ecb_encrypt; - const struct ccmode_ecb *cccast_ecb_decrypt; /* DES key helper functions */ ccdes_key_is_weak_fn_t ccdes_key_is_weak_fn; ccdes_key_set_odd_parity_fn_t ccdes_key_set_odd_parity_fn; - /* XTS padding+encrypt functions */ - ccpad_xts_encrypt_fn_t ccpad_xts_encrypt_fn; - ccpad_xts_decrypt_fn_t ccpad_xts_decrypt_fn; /* CTS3 padding+encrypt functions */ ccpad_cts3_crypt_fn_t ccpad_cts3_encrypt_fn; ccpad_cts3_crypt_fn_t ccpad_cts3_decrypt_fn; diff --git a/libkern/libkern/img4/interface.h b/libkern/libkern/img4/interface.h index 195a2e6d3..c6714bb21 100644 --- a/libkern/libkern/img4/interface.h +++ b/libkern/libkern/img4/interface.h @@ -38,13 +38,13 @@ #include /* - * We rely on img4.h's logic for either including sys/types.h or declaring - * errno_t ourselves. So when building the kernel, include img4.h from our + * We rely on firmware.h's logic for either including sys/types.h or declaring + * errno_t ourselves. So when building the kernel, include firmware.h from our * external headers. Avoid this inclusion if we're building AppleImage4, which * will have included its own internal version of the header. */ #if MACH_KERNEL_PRIVATE || !_DARWIN_BUILDING_PROJECT_APPLEIMAGE4 -#include +#include #endif /*! @@ -54,227 +54,215 @@ * it can be tested at build-time and not require rev-locked submissions of xnu * and AppleImage4. */ -#define IMG4_INTERFACE_VERSION (4u) +#define IMG4_INTERFACE_VERSION (10u) /*! - * @typedef img4_init_t - * A type describing a pointer to the {@link img4_init} function. + * @typegroup + * Type definitions for all exported functions and constants in the AppleImage4 + * kext. */ -typedef errno_t (*const img4_init_t)( - img4_t *i4, - img4_flags_t flags, - const uint8_t *bytes, - size_t len, - img4_destructor_t destructor +typedef const void *img4_retired_t; + +typedef errno_t (*const img4_nonce_domain_copy_nonce_t)( + const img4_nonce_domain_t *nd, + img4_nonce_t *n ); -/*! - * @typedef img4_get_trusted_payload_t - * A type describing a pointer to the {@link img4_get_trusted_payload} function. - */ -typedef errno_t (*const img4_get_trusted_payload_t)( - img4_t *i4, - img4_tag_t tag, - const img4_environment_t *env, - const uint8_t **bytes, - size_t *len +typedef errno_t (*const img4_nonce_domain_roll_nonce_t)( + const img4_nonce_domain_t *nd ); -/*! - * @typedef img4_get_trusted_external_payload_t - * A type describing a pointer to the {@link img4_get_trusted_external_payload} - * function. - */ -typedef errno_t (*const img4_get_trusted_external_payload_t)( - img4_t *img4, - img4_payload_t *payload, - const img4_environment_t *env, - const uint8_t **bytes, - size_t *len +typedef img4_chip_t *(*img4_chip_init_from_buff_t)( + void *buff, + size_t len ); -/*! - * @typedef img4_set_nonce_t - * A type describing a pointer to the {@link img4_set_nonce} function. - */ -typedef void (*const img4_set_nonce_t)(img4_t *i4, - const void *bytes, - size_t len - ); +typedef const img4_chip_t *(*img4_chip_select_personalized_ap_t)( + void + ); -/*! - * @typedef img4_destroy_t - * A type describing the {@link img4_destroy} function. - */ -typedef void (*const img4_destroy_t)( - img4_t *i4 +typedef const img4_chip_t *(*img4_chip_select_effective_ap_t)( + void ); -/*! - * @typedef img4_payload_init_t - * A type describing the {@link img4_payload_init} function. - */ -typedef errno_t (*const img4_payload_init_t)( - img4_payload_t *i4p, - img4_tag_t tag, - img4_payload_flags_t flags, - const uint8_t *bytes, - size_t len, - img4_destructor_t destructor +typedef errno_t (*img4_chip_instantiate_t)( + const img4_chip_t *chip, + img4_chip_instance_t *chip_instance ); -/*! - * @typedef img4_payload_destroy_t - * A type describing the {@link img4_payload_destroy} function. - */ -typedef void (*const img4_payload_destroy_t)( - img4_payload_t *i4 +typedef const img4_chip_t *(*img4_chip_custom_t)( + const img4_chip_instance_t *chip_instance, + img4_chip_t *chip ); -/*! - * @typedef img4_payload_destroy_t - * A type describing the {@link img4_set_nonce_domain} function. - */ -typedef void (*const img4_set_nonce_domain_t)( - img4_t *i4, - const img4_nonce_domain_t *nd +typedef img4_firmware_t (*img4_firmware_new_t)( + const img4_runtime_t *rt, + const img4_firmware_execution_context_t *exec, + img4_4cc_t _4cc, + img4_buff_t *buff, + img4_firmware_flags_t flags ); -/*! - * @typedef img4_nonce_domain_copy_nonce_t - * A type describing the {@link img4_nonce_domain_copy_nonce} function. - */ -typedef errno_t (*const img4_nonce_domain_copy_nonce_t)( - const img4_nonce_domain_t *nd, - img4_nonce_t *n +typedef img4_firmware_t (*img4_firmware_new_from_vnode_4xnu_t)( + const img4_runtime_t *rt, + const img4_firmware_execution_context_t *exec, + img4_4cc_t _4cc, + vnode_t vn, + img4_firmware_flags_t flags ); -/*! - * @typedef img4_nonce_domain_roll_nonce_t - * A type describing the {@link img4_nonce_domain_roll_nonce} function. - */ -typedef errno_t (*const img4_nonce_domain_roll_nonce_t)( - const img4_nonce_domain_t *nd +typedef img4_firmware_t (*img4_firmware_init_from_buff_t)( + void *buff, + size_t len ); -/*! - * @typedef img4_payload_init_with_vnode_4xnu_t - * A type describing the {@link img4_payload_init_with_vnode_4xnu} function. - */ -typedef errno_t (*const img4_payload_init_with_vnode_4xnu_t)( - img4_payload_t *i4p, - img4_tag_t tag, - vnode_t vn, - img4_payload_flags_t flags +typedef void (*img4_firmware_init_t)( + img4_firmware_t fw, + const img4_runtime_t *rt, + const img4_firmware_execution_context_t *exec, + img4_4cc_t _4cc, + img4_buff_t *buff, + img4_firmware_flags_t flags ); -/*! - * @typedef img4_environment_init_identity_t - * A type describing the {@link img4_environment_init_identity} function. - */ -typedef errno_t (*const img4_environment_init_identity_t)( - img4_environment_t *i4e, - size_t len, - const img4_identity_t *i4id +typedef void (*img4_firmware_attach_manifest_t)( + img4_firmware_t fw, + img4_buff_t *buff ); -/*! - * @typedef img4_interface_t - * A structure describing the interface to the AppleImage4 kext. - * - * @field i4if_version - * The version of the structure supported by the implementation. - * - * @field i4if_init - * A pointer to the {@link img4_init} function. - * - * @field i4if_get_trusted_payload - * A pointer to the {@link img4_get_trusted_payload} function. - * - * @field i4if_get_trusted_external_payload - * A pointer to the {@link img4_get_trusted_external_payload} function. - * - * @field i4if_destroy - * A pointer to the {@link img4_destroy} function. - * - * @field i4if_payload_init - * A pointer to the {@link img4_payload_init} function. - * - * @field i4if_destroy - * A pointer to the {@link img4_payload_destroy} function. - * - * @field i4if_environment_platform - * The {@link IMG4_ENVIRONMENT_PLATFORM} global. - * - * @field i4if_environment_reserved - * Reserved for use by the implementation. - * - * @field i4if_environment_trust_cache - * The {@link IMG4_ENVIRONMENT_TRUST_CACHE} global. - * - * @field i4if_v1 - * All fields added in version 1 of the structure. - * - * @field i4if_v1.set_nonce_domain - * A pointer to the @{link img4_set_nonce_domain} function. - * - * @field i4if_v1.nonce_domain_copy_nonce - * A pointer to the {@link img4_nonce_domain_copy_nonce} function. - * - * @field i4if_v1.nonce_domain_roll_nonce - * A pointer to the {@link img4_nonce_domain_roll_nonce} function. - * - * @field i4if_v1.nonce_domain_trust_cache - * The {@link IMG4_NONCE_DOMAIN_TRUST_CACHE} global. - * - * @field i4if_v2 - * All fields added in version 2 of the structure. - * - * @field i4if_v2.payload_init_with_vnode_4xnu - * A pointer to the {@link img4_payload_init_with_vnode_4xnu} function. - * - * @field i4if_v3 - * All fields added in version 3 of the structure. - * - * @field i4if_v3.nonce_domain_pdi - * The {@link IMG4_NONCE_DOMAIN_PDI} global. - * - * @field i4if_v3.nonce_domain_cryptex - * The {@link IMG4_NONCE_DOMAIN_CRYPTEX} global. - * - * @field i4if_v4.environment_init_identity - * A pointer to the {@link img4_environment_init_identity} function. - */ +typedef void (*img4_firmware_execute_t)( + img4_firmware_t fw, + const img4_chip_t *chip, + const img4_nonce_t *nonce + ); + +typedef void (*img4_firmware_destroy_t)( + img4_firmware_t *fw + ); + +typedef const img4_buff_t *(*img4_image_get_bytes_t)( + img4_image_t image + ); + +typedef const bool *(*img4_image_get_property_bool_t)( + img4_image_t image, + img4_4cc_t _4cc, + bool *storage + ); + +typedef const uint32_t *(*img4_image_get_property_uint32_t)( + img4_image_t image, + img4_4cc_t _4cc, + uint32_t *storage + ); + +typedef const uint64_t *(*img4_image_get_property_uint64_t)( + img4_image_t image, + img4_4cc_t _4cc, + uint64_t *storage + ); + +typedef const img4_buff_t *(*img4_image_get_property_data_t)( + img4_image_t image, + img4_4cc_t _4cc, + img4_buff_t *storage + ); + +typedef void (*img4_buff_dealloc_t)( + img4_buff_t *buff + ); + +typedef errno_t (*img4_firmware_evaluate_t)( + img4_firmware_t fw, + const img4_chip_t *chip, + const img4_nonce_t *nonce + ); + +typedef const img4_chip_t *(*img4_firmware_select_chip_t)( + const img4_firmware_t fw, + const img4_chip_select_array_t acceptable_chips, + size_t acceptable_chips_cnt + ); typedef struct _img4_interface { const uint32_t i4if_version; - img4_init_t i4if_init; - img4_set_nonce_t i4if_set_nonce; - img4_get_trusted_payload_t i4if_get_trusted_payload; - img4_get_trusted_external_payload_t i4if_get_trusted_external_payload; - img4_destroy_t i4if_destroy; - img4_payload_init_t i4if_payload_init; - img4_payload_destroy_t i4if_payload_destroy; - const img4_environment_t *i4if_environment_platform; - const img4_environment_t *i4if_environment_reserved; - const img4_environment_t *i4if_environment_trust_cache; + img4_retired_t i4if_init; + img4_retired_t i4if_set_nonce; + img4_retired_t i4if_get_trusted_payload; + img4_retired_t i4if_get_trusted_external_payload; + img4_retired_t i4if_destroy; + img4_retired_t i4if_payload_init; + img4_retired_t i4if_payload_destroy; + img4_retired_t i4if_environment_platform; + img4_retired_t i4if_environment_reserved; + img4_retired_t i4if_environment_trust_cache; struct { - img4_set_nonce_domain_t set_nonce_domain; + img4_retired_t set_nonce_domain; img4_nonce_domain_copy_nonce_t nonce_domain_copy_nonce; img4_nonce_domain_roll_nonce_t nonce_domain_roll_nonce; const img4_nonce_domain_t *nonce_domain_trust_cache; } i4if_v1; struct { - img4_payload_init_with_vnode_4xnu_t payload_init_with_vnode_4xnu; + img4_retired_t payload_init_with_vnode_4xnu; } i4if_v2; struct { const img4_nonce_domain_t *nonce_domain_pdi; const img4_nonce_domain_t *nonce_domain_cryptex; } i4if_v3; struct { - const img4_environment_init_identity_t environment_init_identity; + img4_retired_t environment_init_identity; } i4if_v4; - void *__reserved[14]; + struct { + img4_retired_t environment_t2; + img4_retired_t environment_init_from_identity; + img4_retired_t identity_init_from_environment; + } i4if_v5; + struct { + img4_retired_t environment_x86; + } i4if_v6; + struct { + const img4_chip_t *chip_ap_sha1; + const img4_chip_t *chip_ap_sha2_384; + const img4_chip_t *chip_ap_hybrid; + const img4_chip_t *chip_ap_reduced; + const img4_chip_t *chip_ap_software_ff00; + const img4_chip_t *chip_ap_software_ff01; + const img4_chip_t *chip_x86; + const img4_chip_t *chip_x86_software_8012; + img4_chip_init_from_buff_t chip_init_from_buff; + img4_chip_select_personalized_ap_t chip_select_personalized_ap; + img4_chip_select_effective_ap_t chip_select_effective_ap; + img4_chip_instantiate_t chip_instantiate; + img4_chip_custom_t chip_custom; + img4_firmware_new_t firmware_new; + img4_firmware_new_from_vnode_4xnu_t firmware_new_from_vnode_4xnu; + img4_firmware_init_from_buff_t firmware_init_from_buff; + img4_firmware_init_t firmware_init; + img4_firmware_attach_manifest_t firmware_attach_manifest; + img4_firmware_execute_t firmware_execute; + img4_firmware_destroy_t firmware_destroy; + img4_image_get_bytes_t image_get_bytes; + img4_image_get_property_bool_t image_get_property_bool; + img4_image_get_property_uint32_t image_get_property_uint32; + img4_image_get_property_uint64_t image_get_property_uint64; + img4_image_get_property_data_t image_get_property_data; + const img4_object_spec_t *firmware_spec; + const img4_object_spec_t *chip_spec; + const img4_runtime_t *runtime_default; + const img4_runtime_t *runtime_pmap_cs; + img4_buff_dealloc_t buff_dealloc; + } i4if_v7; + struct { + const img4_chip_t *chip_ap_permissive; + const img4_chip_t *chip_ap_hybrid_medium; + const img4_chip_t *chip_ap_hybrid_relaxed; + } i4if_v8; + struct { + img4_firmware_evaluate_t firmware_evaluate; + } i4if_v9; + struct { + img4_firmware_select_chip_t firmware_select_chip; + } i4if_v10; } img4_interface_t; __BEGIN_DECLS diff --git a/libkern/libkern/kernel_mach_header.h b/libkern/libkern/kernel_mach_header.h index ba418a09c..6a171db21 100644 --- a/libkern/libkern/kernel_mach_header.h +++ b/libkern/libkern/kernel_mach_header.h @@ -81,6 +81,15 @@ typedef struct nlist kernel_nlist_t; extern kernel_mach_header_t _mh_execute_header; +/* + * If the 'MH_DYLIB_IN_CACHE' bit is set in a kernel or kext mach-o header flag, + * then that mach-o has been linked by the new KernelCollectionBuilder into + * an MH_FILESET kernel collection. This bit is typically reserved for dylibs + * that are part of the dyld-shared-cache, but when applied to constituents of + * a kernel collection, it has this special meaning. + */ +#define kernel_mach_header_is_in_fileset(_mh) ((_mh)->flags & MH_DYLIB_IN_CACHE) + vm_offset_t getlastaddr(void); kernel_segment_command_t *firstseg(void); diff --git a/libkern/libkern/kext_request_keys.h b/libkern/libkern/kext_request_keys.h index fa1697ac0..5956a6f9d 100644 --- a/libkern/libkern/kext_request_keys.h +++ b/libkern/libkern/kext_request_keys.h @@ -119,6 +119,19 @@ extern "C" { */ #define kKextRequestPredicateGetAllLoadRequests "Get All Load Requests" +/* Predicate: Get Kexts in Collection + * Arguments: Name of the collection: All, Primary, System, Auxiliary + * Boolean - RequestLoadedOnly + * Response: An array of information about the kexts in the given collection + * (see OSKextLib.h). + * Op result: OSReturn indicating any errors in processing (see OSKextLib.h) + * + * Retrieves an array of dictionaries whose properties describe every kext + * present in the given kext collection type + * loaded at the time of the call. + */ +#define kKextRequestPredicateGetKextsInCollection "Get Kexts in Collection" + /********************************************************************* * Privileged requests from user -> kernel @@ -150,6 +163,30 @@ extern "C" { */ #define kKextRequestPredicateLoad "Load" +/* Predicate: LoadFromKC + * Argument: kKextRequestPredicateLoadFromKC + * Response: None (yet, may become an array of log message strings) + * Op result: OSReturn indicating processing/load+start result (see OSKextLib.h) + * + * Load one kexts which already exists in the kernel's address space as part + * of a kext collection. By default, the kext will start and have all of its + * personalities sent to the IOCatalogue for matching. + */ +#define kKextRequestPredicateLoadFromKC "LoadFromKC" + +/* Predicate: LoadCodelessKext + * Argument: kKextRequestPredicateLoadCodeless + * Response: None (yet, may become an array of log message strings) + * Op result: OSReturn indicating processing/load+start result (see OSKextLib.h) + * + * Load one codeless kext. The input to this request is a single kext + * Info.plist dictionary contained in the kKextRequestArgumentCodelessInfoKey + * key. The personalities will be sent to the IOCatalogue for matching. + * + * See kKextRequestArgumentCodelessInfoKey for more info. + */ +#define kKextRequestPredicateLoadCodeless "LoadCodelessKext" + /* Predicate: Start * Argument: kKextRequestArgumentBundleIdentifierKey (CFBundleIdentifier) * Response: None (yet, may become an array of log message strings) @@ -181,6 +218,45 @@ extern "C" { */ #define kKextRequestPredicateUnload "Unload" +/* Predicate: LoadFileSetKC + * Argument: kKextRequestArgument + * Response: None (yet, may become an array of log message strings) + * Op result: OSReturn indicating load result of kext collections + * + * Load Pageable and Aux kext collection. + */ +#define kKextRequestPredicateLoadFileSetKC "loadfilesetkc" + +/* Predicate: MissingAuxKCBundles + * Argument: kKextRequestArgumentMissingBundleIDs + * Response: None + * Op result: OSReturn indicating success or failure + * + * Set the list of bundle IDs which may exist in the AuxKC, but + * which are missing from disk. This list represents kexts whose + * code exists in the AuxKC, but should not be loadable. + */ +#define kKextRequestPredicateMissingAuxKCBundles "MissingAuxKCBundles" + +/* Predicate: AuxKCBundleAvailable + * Arguments: kKextRequestArgumentBundleIdentifierKey (CFBundleIdentifier) + * Boolean - kKextRequestArgumentBundleAvailability (optional) + * Response: None + * Op result: OSReturn indicating success or failure + * + * Set the availability of an individual kext in the AuxKC. + */ +#define kKextRequestPredicateAuxKCBundleAvailable "AuxKCBundleAvailable" + +/* Predicate: DaemonReady + * Arguments: None + * Response: None + * Op result: OSReturn indicating whether daemon has already checked in + * + * Check whether the daemon has previously checked into the kernel. + */ +#define kKextRequestPredicateDaemonReady "DaemonReady" + #if PRAGMA_MARK /********************************************************************/ #pragma mark Requests Predicates - Kernel to User Space (kextd) @@ -260,14 +336,20 @@ extern "C" { */ #define kKextRequestPredicateRequestResource "Kext Resource Request" -/* Predicate: Kext Kextd Exit Request + +/* Predicate: IOKit Daemon Exit Request * Argument: None * Response: None * Op result: OSReturn indicating result (see OSKextLib.h) * - * Requests kextd exit for system shutdown. + * Requests that the IOKit daemon (kernelmanagerd) exit for system shutdown. + */ +#define kKextRequestPredicateRequestDaemonExit "IOKit Daemon Exit" + +/* For source compatibility */ -#define kKextRequestPredicateRequestKextdExit "Kextd Exit" +#define kKextRequestPredicateRequestKextdExit kKextRequestPredicateRequestDaemonExit + /* Predicate: Dext Daemon Launch * Argument: kKextRequestArgumentBundleIdentifierKey @@ -293,7 +375,7 @@ extern "C" { * be performed with its options. A kext load request is effectively a * nested series requests. Currently only one load request is embedded * in a user-space Load request, so the result is unambiguous. We might - * change this, specifically for kextd, to allow all pending kernel + * change this, specifically for kernelmanagerd, to allow all pending kernel * load requests to be rolled up into one blob. Might not be much win * in that, however. The nested logic makes the code difficult to read. */ @@ -432,6 +514,30 @@ extern "C" { */ #define kKextRequestArgumentPersonalityNamesKey "Personality Names" +/* Argument: Codeless Kext Info + * Type: Dictionary (Info.plist of codeless kext) + * Default: (required) + * + * When loading a codeless kext, this request argument's value should be set + * to the entire contents of the Info.plist of the codeless kext. + * + * NOTE: One additional key should be injected into the codeless kext's + * plist: kKextRequestArgumentCodelessInfoBundlePathKey + */ +#define kKextRequestArgumentCodelessInfoKey "Codeless Kext Info" + + +/* Argument: _CodelessKextBundlePath + * Type: String + * Default: (required) + * + * This argument is a plist key that must be injected into the dictionary sent + * as the kKextRequestArgumentCodelessInfoKey value. It specifies the + * filesystem path to the codeless kext bundle, and will be used in kext + * diagnostic information. + */ +#define kKextRequestArgumentCodelessInfoBundlePathKey "_CodelessKextBundlePath" + #if PRAGMA_MARK #pragma mark Unload Request Arguments #endif @@ -470,6 +576,30 @@ extern "C" { */ #define kKextRequestArgumentDriverExtensionServerName "Driver Extension Server Name" +#if PRAGMA_MARK +#pragma mark Missing AuxKC Bundles Arguments +#endif + +/* Argument: Missing Bundle IDs + * Type: Array + * Default: N/A + * Used by: kKextRequestPredicateMissingAuxKCBundles + * + * This array of bundle IDs represents the list of kexts which have been + * removed from disk, but still exist in the AuxKC. + */ +#define kKextRequestArgumentMissingBundleIDs "Missing Bundle IDs" + +/* Argument: Bundle Availability + * Type: Boolean + * Default: true + * Used by: kKextRequestPredicateAuxKCBundleAvailable + * + * If present, this argument can indicate that the specified bundle ID + * is no longer available for loading from the AuxKC + */ +#define kKextRequestArgumentBundleAvailability "Bundle Availability" + #if PRAGMA_MARK #pragma mark Internal Tracking Properties #endif @@ -508,6 +638,65 @@ extern "C" { */ #define kKextRequestStaleKey "Request Stale" +/* Argument: Check In Token + * Type: Mach Send Right + * Used by: DriverKit daemon launch + */ +#define kKextRequestArgumentCheckInToken "Check In Token" + +#if PRAGMA_MARK +#pragma mark fileset load request arguments +#endif + +/* Argument: PageableKCName + * Type: String (path) + * Used by: kKextRequestPredicateLoadFileSetKC + * + * Name of the Pageable fileset kext collection + */ +#define kKextRequestArgumentPageableKCFilename "PageableKCName" + +/* Argument: AuxKCName + * Type: String (path) + * Used by: kKextRequestPredicateLoadFileSetKC + * + * Name of the Aux fileset kext collection + */ +#define kKextRequestArgumentAuxKCFilename "AuxKCName" + +/* Argument: Codeless Personalities + * Type: Array of Dictionaries + * Used by: kKextRequestPredicateLoadFileSetKC + * + * Any array of DriverKit driver (and codeless kext) personalities + */ +#define kKextRequestArgumentCodelessPersonalities "Codeless Personalities" + +#if PRAGMAA_MARK +#pragma mark kext collection request arguments +#endif + +/* Argument: Collection + * Type: String + * Used by: kKextRequestPredicateGetKextsInCollection + * + * Contains a string describing the type of kext collection + */ +#define kKextRequestArgumentCollectionTypeKey "Collection Type" + +/* Argument: LoadedState + * Type: String + * Values: Any, Loaded, Unloaded + * Default: Any + * Used by: kKextRequestPredicateGetKextsInCollection + * + * If present, this argument limits the GetKextsInCollection output to: + * Loaded -- only kexts which have been loaded + * Unloaded -- only kexts which have been unloaded + * Any -- return all kexts in a collection + */ +#define kKextRequestArgumentLoadedStateKey "Loaded State" + #ifdef __cplusplus }; #endif /* __cplusplus */ diff --git a/libkern/libkern/prelink.h b/libkern/libkern/prelink.h index eb27f88ac..39bf39ee3 100644 --- a/libkern/libkern/prelink.h +++ b/libkern/libkern/prelink.h @@ -40,6 +40,9 @@ #define kBuiltinInfoSection "__kmod_info" #define kBuiltinStartSection "__kmod_start" +#define kReceiptInfoSegment "__RECEIPT_INFO" +#define kAuxKCReceiptSection "__aux_kc_receipt" + // __DATA segment #define kBuiltinInitSection "__kmod_init" #define kBuiltinTermSection "__kmod_term" @@ -56,5 +59,9 @@ #define kPrelinkLinkStateSizeKey "_PrelinkLinkStateSize" #define kPrelinkLinkKASLROffsetsKey "_PrelinkLinkKASLROffsets" #define kPrelinkInfoKCIDKey "_PrelinkKCID" +#define kPrelinkInfoBootKCIDKey "_BootKCID" +#define kPrelinkInfoPageableKCIDKey "_PageableKCID" +#define kKCBranchStubs "__BRANCH_STUBS" +#define kKCBranchGots "__BRANCH_GOTS" #endif /* _PRELINK_H_ */ diff --git a/libkern/libkern/ptrauth_utils.h b/libkern/libkern/ptrauth_utils.h new file mode 100644 index 000000000..765b93320 --- /dev/null +++ b/libkern/libkern/ptrauth_utils.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef __PTRAUTH_UTILS_H +#define __PTRAUTH_UTILS_H + +#include + +/* ptrauth_utils flags */ +#define PTRAUTH_ADDR_DIVERSIFY 0x0001 /* Mix storage address in to signature */ +#define PTRAUTH_NON_NULL 0x0002 /* ptr must not be NULL */ + +/* ptrauth_utils_sign_blob_generic + * + * Description: Sign a blob of data with the GA key and extra data, optionally + * diversified by its storage address. + * + * Caveat: A race window exists between the blob being written to memory and its signature being + * calculated by this function. In normal operation, standard thread safety semantics prevent this being + * an issue, however in the malicious case it should be acknowledged that an attacker may be able to accurately + * time overwriting parts/all of the blob and we would generate a signature for that modified data. It is + * therefore important that users of this API minimise that window by calculating signatures immediately + * after modification to the blob. + * + * + * Parameters: ptr Address of data to sign + * len_bytes Length in bytes of data to sign + * data Salt to mix in signature when signing + * flags Signing options + * + * Returns: ptrauth_generic_signature_t Signature of blob + * + */ +#if __has_feature(ptrauth_calls) +ptrauth_generic_signature_t +ptrauth_utils_sign_blob_generic(void * ptr, size_t len_bytes, uint64_t data, int flags); +#else +static inline ptrauth_generic_signature_t +ptrauth_utils_sign_blob_generic(__unused void * ptr, __unused size_t len_bytes, __unused uint64_t data, __unused int flags) +{ + return 0; +} +#endif // __has_feature(ptrauth_calls) + + +/* ptrauth_utils_auth_blob_generic + * + * Description: Authenticates a signature for a blob of data + * + * Caveat: As with ptrauth_utils_sign_blob_generic, an attacker who is able to accurately time access between + * authenticating blobs and its use may be able to modify its contents. Failure to time this correctly will + * result in a panic. Care should be taken to authenticate immediately before reading data from the blob to + * minimise this window. + * + * Parameters: ptr Address of data being authenticated + * len_bytes Length of data being authenticated + * data Salt to mix with digest when authenticating + * flags Signing options + * signature The signature to verify + * + * Returns: void If the function returns, the authentication succeeded, + * else we panic as something's gone awry + * + */ +#if __has_feature(ptrauth_calls) +void +ptrauth_utils_auth_blob_generic(void * ptr, size_t len_bytes, uint64_t data, int flags, ptrauth_generic_signature_t signature); +#else +static inline void +ptrauth_utils_auth_blob_generic(__unused void * ptr, __unused size_t len_bytes, __unused uint64_t data, __unused int flags, __unused ptrauth_generic_signature_t signature) +{ + return; +} +#endif // __has_feature(ptrauth_calls) + + +#endif // __PTRAUTH_UTILS_H diff --git a/libkern/libkern/section_keywords.h b/libkern/libkern/section_keywords.h index 02d71ee0c..90382aa4d 100644 --- a/libkern/libkern/section_keywords.h +++ b/libkern/libkern/section_keywords.h @@ -29,21 +29,37 @@ #ifndef _SECTION_KEYWORDS_H #define _SECTION_KEYWORDS_H - -/* Default behaviour */ -#ifndef SECURITY_READ_ONLY_EARLY #define __PLACE_IN_SECTION(__segment__section) \ __attribute__((used, section(__segment__section))) -#define SECURITY_READ_ONLY_SPECIAL_SECTION(_t, __segment__section) \ - const _t __PLACE_IN_SECTION(__segment__section) +#define __SEGMENT_START_SYM(seg) asm("segment$start$" seg) +#define __SEGMENT_END_SYM(seg) asm("segment$end$" seg) -#define SECURITY_READ_ONLY_EARLY(_t) const _t +#define __SECTION_START_SYM(seg, sect) asm("section$start$" seg "$" sect) +#define __SECTION_END_SYM(seg, sect) asm("section$end$" seg "$" sect) -#define SECURITY_READ_ONLY_LATE(_t) _t -#define SECURITY_READ_WRITE(_t) _t __attribute__((used)) -#endif /* SECURITY_READ_ONLY_EARLY */ +#ifndef __security_const_early +#define __security_const_early const +#endif +#ifndef __security_const_late +#define __security_const_late +#endif +#ifndef __security_read_write +#define __security_read_write +#endif +#ifndef MARK_AS_HIBERNATE_TEXT +#define MARK_AS_HIBERNATE_TEXT +#endif +#ifndef MARK_AS_HIBERNATE_DATA +#define MARK_AS_HIBERNATE_DATA +#endif + +#define SECURITY_READ_ONLY_SPECIAL_SECTION(_t, __segment__section) \ + __security_const_early _t __PLACE_IN_SECTION(__segment__section) +#define SECURITY_READ_ONLY_EARLY(_t) _t __security_const_early __attribute__((used)) +#define SECURITY_READ_ONLY_LATE(_t) _t __security_const_late __attribute__((used)) +#define SECURITY_READ_WRITE(_t) _t __security_read_write __attribute__((used)) #endif /* _SECTION_KEYWORDS_H_ */ diff --git a/libkern/libkern/tree.h b/libkern/libkern/tree.h index 5cf38cbc0..75c432b76 100644 --- a/libkern/libkern/tree.h +++ b/libkern/libkern/tree.h @@ -263,6 +263,26 @@ name##_SPLAY(struct name *head, struct type *elm) \ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ } \ \ +/* Searches for a matching entry without splaying */ \ +static __inline struct type * \ +name##_SPLAY_SEARCH(struct name *head, struct type *elm) \ +{ \ + struct type *__tmp; \ + int __comp; \ + \ + __tmp = (head)->sph_root; \ + while ((__tmp != NULL) && ((__comp = (cmp)(elm, __tmp)) != 0)) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT(__tmp, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT(__tmp, field); \ + } else { \ + return __tmp; \ + } \ + } \ + return (NULL); \ +} \ + \ /* Splay with either the minimum or the maximum element \ * Used to find minimum or maximum element in tree. \ */ \ @@ -305,6 +325,7 @@ void name##_SPLAY_MINMAX(struct name *head, int __comp) \ #define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) #define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) #define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) +#define SPLAY_SEARCH(name, x, y) name##_SPLAY_SEARCH(x, y) #define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) #define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) diff --git a/libkern/mkext.c b/libkern/mkext.c index 59634832a..9a1136cd3 100644 --- a/libkern/mkext.c +++ b/libkern/mkext.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include // For uintptr_t. @@ -33,124 +33,144 @@ #define BASE 65521L /* largest prime smaller than 65536 */ #define NMAX 5552 // the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 -#define DO1(buf,i) {s1 += buf[i]; s2 += s1;} -#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1); -#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2); -#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); +#define DO1(buf, i) {s1 += buf[i]; s2 += s1;} +#define DO2(buf, i) DO1(buf,i); DO1(buf,i+1); +#define DO4(buf, i) DO2(buf,i); DO2(buf,i+2); +#define DO8(buf, i) DO4(buf,i); DO4(buf,i+4); #define DO16(buf) DO8(buf,0); DO8(buf,8); u_int32_t mkext_adler32(uint8_t *buf, int32_t len) { - unsigned long s1 = 1; // adler & 0xffff; - unsigned long s2 = 0; // (adler >> 16) & 0xffff; - int k; + unsigned long s1 = 1; // adler & 0xffff; + unsigned long s2 = 0; // (adler >> 16) & 0xffff; + int k; - while (len > 0) { - k = len < NMAX ? len : NMAX; - len -= k; - while (k >= 16) { - DO16(buf); - buf += 16; - k -= 16; - } - if (k != 0) do { - s1 += *buf++; - s2 += s1; - } while (--k); - s1 %= BASE; - s2 %= BASE; - } - return (s2 << 16) | s1; + while (len > 0) { + k = len < NMAX ? len : NMAX; + len -= k; + while (k >= 16) { + DO16(buf); + buf += 16; + k -= 16; + } + if (k != 0) { + do { + s1 += *buf++; + s2 += s1; + } while (--k); + } + s1 %= BASE; + s2 %= BASE; + } + return (u_int32_t)((s2 << 16) | s1); } /************************************************************** - LZSS.C -- A Data Compression Program +* LZSS.C -- A Data Compression Program *************************************************************** - 4/6/1989 Haruhiko Okumura - Use, distribute, and modify this program freely. - Please send me your improved versions. - PC-VAN SCIENCE - NIFTY-Serve PAF01022 - CompuServe 74050,1022 - +* 4/6/1989 Haruhiko Okumura +* Use, distribute, and modify this program freely. +* Please send me your improved versions. +* PC-VAN SCIENCE +* NIFTY-Serve PAF01022 +* CompuServe 74050,1022 +* **************************************************************/ #define N 4096 /* size of ring buffer - must be power of 2 */ #define F 18 /* upper limit for match_length */ #define THRESHOLD 2 /* encode string into position and length - if match_length is greater than this */ + * if match_length is greater than this */ #if !KERNEL #define NIL N /* index for root of binary search trees */ #endif struct encode_state { - /* - * left & right children & parent. These constitute binary search trees. - */ - int lchild[N + 1], rchild[N + 257], parent[N + 1]; + /* + * left & right children & parent. These constitute binary search trees. + */ + int lchild[N + 1], rchild[N + 257], parent[N + 1]; - /* ring buffer of size N, with extra F-1 bytes to aid string comparison */ - u_int8_t text_buf[N + F - 1]; + /* ring buffer of size N, with extra F-1 bytes to aid string comparison */ + u_int8_t text_buf[N + F - 1]; - /* - * match_length of longest match. - * These are set by the insert_node() procedure. - */ - int match_position, match_length; + /* + * match_length of longest match. + * These are set by the insert_node() procedure. + */ + int match_position, match_length; }; int decompress_lzss(u_int8_t *dst, u_int32_t dstlen, u_int8_t *src, u_int32_t srclen) { - /* ring buffer of size N, with extra F-1 bytes to aid string comparison */ - u_int8_t text_buf[N + F - 1]; - u_int8_t *dststart = dst; + /* ring buffer of size N, with extra F-1 bytes to aid string comparison */ + u_int8_t text_buf[N + F - 1]; + u_int8_t *dststart = dst; u_int8_t *dstend = dst + dstlen; - u_int8_t *srcend = src + srclen; - int i, j, k, r, c; - unsigned int flags; - - dst = dststart; - srcend = src + srclen; - for (i = 0; i < N - F; i++) - text_buf[i] = ' '; - r = N - F; - flags = 0; - for ( ; ; ) { - if (((flags >>= 1) & 0x100) == 0) { - if (src < srcend) c = *src++; else break; - flags = c | 0xFF00; /* uses higher byte cleverly */ - } /* to count eight */ - if (flags & 1) { - if (src < srcend) c = *src++; else break; - *dst++ = c; + u_int8_t *srcend = src + srclen; + int i, j, k, r; + u_int8_t c; + unsigned int flags; + + dst = dststart; + srcend = src + srclen; + for (i = 0; i < N - F; i++) { + text_buf[i] = ' '; + } + r = N - F; + flags = 0; + for (;;) { + if (((flags >>= 1) & 0x100) == 0) { + if (src < srcend) { + c = *src++; + } else { + break; + } + flags = c | 0xFF00; /* uses higher byte cleverly */ + } /* to count eight */ + if (flags & 1) { + if (src < srcend) { + c = *src++; + } else { + break; + } + *dst++ = c; if (dst >= dstend) { goto finish; } - text_buf[r++] = c; - r &= (N - 1); - } else { - if (src < srcend) i = *src++; else break; - if (src < srcend) j = *src++; else break; - i |= ((j & 0xF0) << 4); - j = (j & 0x0F) + THRESHOLD; - for (k = 0; k <= j; k++) { - c = text_buf[(i + k) & (N - 1)]; - *dst++ = c; + text_buf[r++] = c; + r &= (N - 1); + } else { + if (src < srcend) { + i = *src++; + } else { + break; + } + if (src < srcend) { + j = *src++; + } else { + break; + } + i |= ((j & 0xF0) << 4); + j = (j & 0x0F) + THRESHOLD; + for (k = 0; k <= j; k++) { + c = text_buf[(i + k) & (N - 1)]; + *dst++ = c; if (dst >= dstend) { goto finish; } - text_buf[r++] = c; - r &= (N - 1); - } - } - } + text_buf[r++] = c; + r &= (N - 1); + } + } + } finish: - return dst - dststart; + return (int)(dst - dststart); } #if !KERNEL @@ -158,24 +178,28 @@ finish: /* * initialize state, mostly the trees * - * For i = 0 to N - 1, rchild[i] and lchild[i] will be the right and left - * children of node i. These nodes need not be initialized. Also, parent[i] - * is the parent of node i. These are initialized to NIL (= N), which stands - * for 'not used.' For i = 0 to 255, rchild[N + i + 1] is the root of the - * tree for strings that begin with character i. These are initialized to NIL. + * For i = 0 to N - 1, rchild[i] and lchild[i] will be the right and left + * children of node i. These nodes need not be initialized. Also, parent[i] + * is the parent of node i. These are initialized to NIL (= N), which stands + * for 'not used.' For i = 0 to 255, rchild[N + i + 1] is the root of the + * tree for strings that begin with character i. These are initialized to NIL. * Note there are 256 trees. */ -static void init_state(struct encode_state *sp) +static void +init_state(struct encode_state *sp) { - int i; + int i; - bzero(sp, sizeof(*sp)); + bzero(sp, sizeof(*sp)); - for (i = 0; i < N - F; i++) - sp->text_buf[i] = ' '; - for (i = N + 1; i <= N + 256; i++) - sp->rchild[i] = NIL; - for (i = 0; i < N; i++) - sp->parent[i] = NIL; + for (i = 0; i < N - F; i++) { + sp->text_buf[i] = ' '; + } + for (i = N + 1; i <= N + 256; i++) { + sp->rchild[i] = NIL; + } + for (i = 0; i < N; i++) { + sp->parent[i] = NIL; + } } /* @@ -186,88 +210,94 @@ static void init_state(struct encode_state *sp) * because the old one will be deleted sooner. Note r plays double role, * as tree node and position in buffer. */ -static void insert_node(struct encode_state *sp, int r) +static void +insert_node(struct encode_state *sp, int r) { - int i, p, cmp; - u_int8_t *key; + int i, p, cmp; + u_int8_t *key; - cmp = 1; - key = &sp->text_buf[r]; - p = N + 1 + key[0]; - sp->rchild[r] = sp->lchild[r] = NIL; - sp->match_length = 0; - for ( ; ; ) { - if (cmp >= 0) { - if (sp->rchild[p] != NIL) - p = sp->rchild[p]; - else { - sp->rchild[p] = r; - sp->parent[r] = p; - return; - } - } else { - if (sp->lchild[p] != NIL) - p = sp->lchild[p]; - else { - sp->lchild[p] = r; - sp->parent[r] = p; - return; - } - } - for (i = 1; i < F; i++) { - if ((cmp = key[i] - sp->text_buf[p + i]) != 0) - break; - } - if (i > sp->match_length) { - sp->match_position = p; - if ((sp->match_length = i) >= F) - break; - } - } - sp->parent[r] = sp->parent[p]; - sp->lchild[r] = sp->lchild[p]; - sp->rchild[r] = sp->rchild[p]; - sp->parent[sp->lchild[p]] = r; - sp->parent[sp->rchild[p]] = r; - if (sp->rchild[sp->parent[p]] == p) - sp->rchild[sp->parent[p]] = r; - else - sp->lchild[sp->parent[p]] = r; - sp->parent[p] = NIL; /* remove p */ + cmp = 1; + key = &sp->text_buf[r]; + p = N + 1 + key[0]; + sp->rchild[r] = sp->lchild[r] = NIL; + sp->match_length = 0; + for (;;) { + if (cmp >= 0) { + if (sp->rchild[p] != NIL) { + p = sp->rchild[p]; + } else { + sp->rchild[p] = r; + sp->parent[r] = p; + return; + } + } else { + if (sp->lchild[p] != NIL) { + p = sp->lchild[p]; + } else { + sp->lchild[p] = r; + sp->parent[r] = p; + return; + } + } + for (i = 1; i < F; i++) { + if ((cmp = key[i] - sp->text_buf[p + i]) != 0) { + break; + } + } + if (i > sp->match_length) { + sp->match_position = p; + if ((sp->match_length = i) >= F) { + break; + } + } + } + sp->parent[r] = sp->parent[p]; + sp->lchild[r] = sp->lchild[p]; + sp->rchild[r] = sp->rchild[p]; + sp->parent[sp->lchild[p]] = r; + sp->parent[sp->rchild[p]] = r; + if (sp->rchild[sp->parent[p]] == p) { + sp->rchild[sp->parent[p]] = r; + } else { + sp->lchild[sp->parent[p]] = r; + } + sp->parent[p] = NIL; /* remove p */ } /* deletes node p from tree */ -static void delete_node(struct encode_state *sp, int p) +static void +delete_node(struct encode_state *sp, int p) { - int q; - - if (sp->parent[p] == NIL) - return; /* not in tree */ - if (sp->rchild[p] == NIL) - q = sp->lchild[p]; - else if (sp->lchild[p] == NIL) - q = sp->rchild[p]; - else { - q = sp->lchild[p]; - if (sp->rchild[q] != NIL) { - do { - q = sp->rchild[q]; - } while (sp->rchild[q] != NIL); - sp->rchild[sp->parent[q]] = sp->lchild[q]; - sp->parent[sp->lchild[q]] = sp->parent[q]; - sp->lchild[q] = sp->lchild[p]; - sp->parent[sp->lchild[p]] = q; - } - sp->rchild[q] = sp->rchild[p]; - sp->parent[sp->rchild[p]] = q; - } - sp->parent[q] = sp->parent[p]; - if (sp->rchild[sp->parent[p]] == p) - sp->rchild[sp->parent[p]] = q; - else - sp->lchild[sp->parent[p]] = q; - sp->parent[p] = NIL; + int q; + + if (sp->parent[p] == NIL) { + return; /* not in tree */ + } + if (sp->rchild[p] == NIL) { + q = sp->lchild[p]; + } else if (sp->lchild[p] == NIL) { + q = sp->rchild[p]; + } else { + q = sp->lchild[p]; + if (sp->rchild[q] != NIL) { + do { + q = sp->rchild[q]; + } while (sp->rchild[q] != NIL); + sp->rchild[sp->parent[q]] = sp->lchild[q]; + sp->parent[sp->lchild[q]] = sp->parent[q]; + sp->lchild[q] = sp->lchild[p]; + sp->parent[sp->lchild[p]] = q; + } + sp->rchild[q] = sp->rchild[p]; + sp->parent[sp->rchild[p]] = q; + } + sp->parent[q] = sp->parent[p]; + if (sp->rchild[sp->parent[p]] == p) { + sp->rchild[sp->parent[p]] = q; + } else { + sp->lchild[sp->parent[p]] = q; + } + sp->parent[p] = NIL; } #endif /* !KERNEL */ - diff --git a/libkern/net/inet_ntop.c b/libkern/net/inet_ntop.c index 2d898d0d1..f0711c150 100644 --- a/libkern/net/inet_ntop.c +++ b/libkern/net/inet_ntop.c @@ -164,7 +164,8 @@ inet_ntop6(const u_char *src, char *dst, socklen_t size) if (i == 6 && best.base == 0 && (best.len == 6 || (best.len == 7 && words[7] != 0x0001) || (best.len == 5 && words[5] == 0xffff))) { - if (!inet_ntop4(src + 12, tp, sizeof tmp - (tp - tmp))) { + if (!inet_ntop4(src + 12, tp, + (socklen_t)(sizeof tmp - (tp - tmp)))) { return NULL; } tp += strlen(tp); diff --git a/libkern/net/inet_pton.c b/libkern/net/inet_pton.c index 19543ced5..2af1d384a 100644 --- a/libkern/net/inet_pton.c +++ b/libkern/net/inet_pton.c @@ -84,7 +84,7 @@ inet_pton4(const char *src, u_char *dst) const char *pch; if ((pch = strchr(digits, ch)) != NULL) { - u_int new = *tp * 10 + (pch - digits); + u_int new = *tp * 10 + (u_int)(pch - digits); if (saw_digit && *tp == 0) { return 0; @@ -92,7 +92,7 @@ inet_pton4(const char *src, u_char *dst) if (new > 255) { return 0; } - *tp = new; + *tp = (u_char)new; if (!saw_digit) { if (++octets > 4) { return 0; @@ -207,7 +207,7 @@ inet_pton6(const char *src, u_char *dst) * Since some memmove()'s erroneously fail to handle * overlapping regions, we'll do the shift by hand. */ - const int n = tp - colonp; + const long n = tp - colonp; int i; if (tp == endp) { diff --git a/libkern/os/Makefile b/libkern/os/Makefile index 26c29df2c..74fa7b854 100644 --- a/libkern/os/Makefile +++ b/libkern/os/Makefile @@ -3,34 +3,47 @@ export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir +# PRIVATE_DATAFILES go to /usr/local/include +LIBKERN_USE_USR_LOCAL_INCLUDE := 1 + include $(MakeInc_cmd) include $(MakeInc_def) KERNELFILES = \ + atomic.h \ base.h \ - object.h \ + cpp_util.h \ log.h \ - trace.h \ + object.h \ overflow.h \ - smart_ptr.h \ - cpp_util.h + trace.h PRIVATE_KERNELFILES = \ + atomic_private.h \ + atomic_private_arch.h \ + atomic_private_impl.h \ + base_private.h \ hash.h \ - object_private.h \ ptrtools.h \ reason_private.h \ refcnt.h \ refcnt_internal.h DATAFILES = \ + atomic.h \ + base.h \ overflow.h DRIVERKIT_DATAFILES = \ + atomic.h \ base.h \ overflow.h PRIVATE_DATAFILES = \ + atomic_private.h \ + atomic_private_arch.h \ + atomic_private_impl.h \ + base_private.h \ reason_private.h INSTALL_MI_LIST = ${DATAFILES} diff --git a/libkern/os/atomic.h b/libkern/os/atomic.h new file mode 100644 index 000000000..ccd6e439e --- /dev/null +++ b/libkern/os/atomic.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_ATOMIC_H__ +#define __OS_ATOMIC_H__ + +/*! + * @file + * + * @brief + * Small header that helps write code that works with both C11 and C++11, + * or pre-C11 type declarations. + * + * @discussion + * The macros below allow to write code like this, that can be put in a header + * and will work with both C11 and C++11: + * + * + * struct old_type { + * int atomic_field; + * } old_variable; + * + * os_atomic_std(atomic_fetch_add_explicit)( + * os_cast_to_atomic_pointer(&old_variable), 1, + * os_atomic_std(memory_order_relaxed)); + * + */ + +#include + +#ifndef OS_ATOMIC_USES_CXX +#ifdef KERNEL +#define OS_ATOMIC_USES_CXX 0 +#elif defined(__cplusplus) && __cplusplus >= 201103L +#define OS_ATOMIC_USES_CXX 1 +#else +#define OS_ATOMIC_USES_CXX 0 +#endif +#endif + +#if OS_ATOMIC_USES_CXX +#include +#define OS_ATOMIC_STD std:: +#define os_atomic_std(op) std::op +#define os_atomic(type) std::atomic volatile +#define os_cast_to_atomic_pointer(p) os::cast_to_atomic_pointer(p) +#define os_atomic_basetypeof(p) decltype(os_cast_to_atomic_pointer(p)->load()) +#define os_cast_to_nonatomic_pointer(p) os::cast_to_nonatomic_pointer(p) +#else /* !OS_ATOMIC_USES_CXX */ +#include +#define OS_ATOMIC_STD +#define os_atomic_std(op) op +#define os_atomic(type) type volatile _Atomic +#define os_cast_to_atomic_pointer(p) (__typeof__(*(p)) volatile _Atomic *)(uintptr_t)(p) +#define os_atomic_basetypeof(p) __typeof__(atomic_load(os_cast_to_atomic_pointer(p))) +#define os_cast_to_nonatomic_pointer(p) (os_atomic_basetypeof(p) *)(uintptr_t)(p) +#endif /* !OS_ATOMIC_USES_CXX */ + +/*! + * @group Internal implementation details + * + * @discussion The functions below are not intended to be used directly. + */ + +#if OS_ATOMIC_USES_CXX +#include + +namespace os { +template using add_volatile_t = typename std::add_volatile::type; +template using remove_volatile_t = typename std::remove_volatile::type; + +template +inline add_volatile_t > > * +cast_to_atomic_pointer(T *v) +{ + return reinterpret_cast > > *>(v); +} + +template +inline add_volatile_t > > * +cast_to_atomic_pointer(std::atomic *v) +{ + return reinterpret_cast > > *>(v); +} + +template +inline remove_volatile_t * +cast_to_nonatomic_pointer(T *v) +{ + return const_cast *>(v); +} + +template +inline remove_volatile_t * +cast_to_nonatomic_pointer(std::atomic *v) +{ + return reinterpret_cast *>(v); +} + +template +inline remove_volatile_t * +cast_to_nonatomic_pointer(volatile std::atomic *v) +{ + auto _v = const_cast *>(v); + return reinterpret_cast *>(_v); +} +}; +#endif /* OS_ATOMIC_USES_CXX */ + +#endif /* __OS_ATOMIC_H__ */ diff --git a/libkern/os/atomic_private.h b/libkern/os/atomic_private.h new file mode 100644 index 000000000..96ab5bb14 --- /dev/null +++ b/libkern/os/atomic_private.h @@ -0,0 +1,925 @@ +/* + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_ATOMIC_PRIVATE_H__ +#define __OS_ATOMIC_PRIVATE_H__ + +/*! + * @file + * + * @brief + * This file defines nicer (terser and safer) wrappers for C11's . + * + * @discussion + * @see xnu.git::doc/atomics.md which provides more extensive documentation + * about this header. + * + * Note that some of the macros defined in this file may be overridden by + * architecture specific headers. + * + * All the os_atomic* functions take an operation ordering argument that can be: + * - C11 memory orders: relaxed, acquire, release, acq_rel or seq_cst which + * imply a memory fence on SMP machines, and always carry the matching + * compiler barrier semantics. + * + * - the os_atomic-specific `dependency` memory ordering that is used to + * document intent to a carry a data or address dependency. + * See doc/atomics.md for more information. + * + * - a compiler barrier: compiler_acquire, compiler_release, compiler_acq_rel + * without a corresponding memory fence. + */ + +#include + +/*! + * @group tunables. + * + * @{ + * + * @brief + * @c OS_ATOMIC_CONFIG_* macros provide tunables for clients. + */ + +/*! + * @macro OS_ATOMIC_CONFIG_SMP + * + * @brief + * Whether this is used on an SMP system, defaults to 1. + */ +#ifndef OS_ATOMIC_CONFIG_SMP +#define OS_ATOMIC_CONFIG_SMP 1 +#endif // OS_ATOMIC_CONFIG_SMP + +/*! + * @macro OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY + * + * @brief + * Hide interfaces that can lead to starvation on certain hardware/build + * configurations. + * + * @discussion + * The following ABIs are currently supported by os_atomic: + * - i386 and x86_64: Intel atomics + * - armv7: load/store exclusive + * - armv8: load/store exclusive + * - armv8.1: armv8.1 style atomics + * + * On armv8 hardware with asymmetric cores, using load/store exclusive based + * atomics can lead to starvation in very hot code or non-preemptible context, + * and code that is sensitive to such must not use these interfaces. + * + * When OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY is set, any os_atomic_* interface + * that may cause starvation will be made unavailable to avoid accidental use. + * + * Defaults: + * - XNU: builds per SoC, already safe + * - Kexts: default to avoid starvable interfaces by default + * - User: default to allow starvable interfaces by default + * + * Note: at this time, on Apple supported platforms, the only configuration + * that is affected by this would be for the "arm64" slices. + * + * Intel, armv7 variants, and the arm64e slice always are unaffected. + */ +#ifndef OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY +#if XNU_KERNEL_PRIVATE +#define OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY 0 +#elif KERNEL +#define OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY 1 +#else +#define OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY 0 +#endif +#endif // OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY + +/*! + * @macro OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY + * + * @brief + * Expose the os_atomic-specific fake `dependency` memory ordering. + * + * @discussion + * The dependency ordering can be used to try to "repair" C11's consume ordering + * and should be limited to extremely complex algorithms where every cycle counts. + * + * Due to the inherent risks (no compiler support) for this feature, it is + * reserved for expert and very domain-specific code only and is off by default. + * + * Default: 0 + */ +#ifndef OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#if XNU_KERNEL_PRIVATE +#define OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY 1 +#else +#define OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY 0 +#endif +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY + +/*! @} */ + +/*! + * @group features (arch specific). + * + * @{ + * + * @brief + * The @c OS_ATOMIC_USE_* and @c OS_ATOMIC_HAS_* defines expose some + * specificities of implementation that are relevant to + * certain clients and can be used to conditionalize code. + */ + +/*! + * @const OS_ATOMIC_HAS_LLSC + * + * @brief + * Whether the platform has LL/SC features. + * + * @discussion + * When set, the os_atomic_*_exclusive() macros are defined. + */ +#if defined(__i386__) || defined(__x86_64__) +#define OS_ATOMIC_HAS_LLSC 0 +#elif defined(__arm__) || defined(__arm64__) +#define OS_ATOMIC_HAS_LLSC 1 +#else +#error unsupported architecture +#endif + +/*! + * @const OS_ATOMIC_USE_LLSC + * + * @brief + * Whether os_atomic* use LL/SC internally. + * + * @discussion + * OS_ATOMIC_USE_LLSC implies OS_ATOMIC_HAS_LLSC. + */ +#if defined(__arm64__) && defined(__ARM_ARCH_8_2__) +#define OS_ATOMIC_USE_LLSC 0 +#else +#define OS_ATOMIC_USE_LLSC OS_ATOMIC_HAS_LLSC +#endif + +/*! + * @const OS_ATOMIC_HAS_STARVATION_FREE_RMW + * + * @brief + * Whether os_atomic* Read-Modify-Write operations are starvation free + * in the current configuration. + */ +#define OS_ATOMIC_HAS_STARVATION_FREE_RMW (!OS_ATOMIC_USE_LLSC) + +/*! @} */ + +#include "atomic_private_impl.h" // Internal implementation details + +/*! + * @function os_compiler_barrier + * + * @brief + * Provide a compiler barrier according to the specified ordering. + * + * @param m + * An optional ordering among `acquire`, `release` or `acq_rel` which defaults + * to `acq_rel` when not specified. + * These are equivalent to the `compiler_acquire`, `compiler_release` and + * `compiler_acq_rel` orderings taken by the os_atomic* functions + */ +#undef os_compiler_barrier +#define os_compiler_barrier(b...) \ + os_atomic_std(atomic_signal_fence)(_os_compiler_barrier_##b) + +/*! + * @function os_atomic_thread_fence + * + * @brief + * Memory fence which is elided in non-SMP mode, but always carries the + * corresponding compiler barrier. + * + * @param m + * The ordering for this fence. + */ +#define os_atomic_thread_fence(m) ({ \ + os_atomic_std(atomic_thread_fence)(_os_atomic_mo_##m##_smp); \ + os_atomic_std(atomic_signal_fence)(_os_atomic_mo_##m); \ +}) + +/*! + * @function os_atomic_init + * + * @brief + * Wrapper for C11 atomic_init() + * + * @discussion + * This initialization is not performed atomically, and so must only be used as + * part of object initialization before the object is made visible to other + * threads/cores. + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value to initialize the variable with. + * + * @returns + * The value loaded from @a p. + */ +#define os_atomic_init(p, v) \ + os_atomic_std(atomic_init)(os_cast_to_atomic_pointer(p), v) + +/*! + * @function os_atomic_load_is_plain, os_atomic_store_is_plain + * + * @brief + * Return whether a relaxed atomic load (resp. store) to an atomic variable + * is implemented as a single plain load (resp. store) instruction. + * + * @discussion + * Non-relaxed loads/stores may involve additional memory fence instructions + * or more complex atomic instructions. + * + * This is a construct that can safely be used in static asserts. + * + * This doesn't check for alignment and it is assumed that `p` is + * "aligned enough". + * + * @param p + * A pointer to an atomic variable. + * + * @returns + * True when relaxed atomic loads (resp. stores) compile to a plain load + * (resp. store) instruction, false otherwise. + */ +#define os_atomic_load_is_plain(p) (sizeof(*(p)) <= sizeof(void *)) +#define os_atomic_store_is_plain(p) os_atomic_load_is_plain(p) + +/*! + * @function os_atomic_load + * + * @brief + * Wrapper for C11 atomic_load_explicit(), guaranteed to compile to a single + * plain load instruction (when @a m is `relaxed`). + * + * @param p + * A pointer to an atomic variable. + * + * @param m + * The ordering to use. + * + * @returns + * The value loaded from @a p. + */ +#define os_atomic_load(p, m) ({ \ + _Static_assert(os_atomic_load_is_plain(p), "Load is wide"); \ + _os_compiler_barrier_before_atomic(m); \ + __auto_type _r = os_atomic_std(atomic_load_explicit)( \ + os_cast_to_atomic_pointer(p), _os_atomic_mo_##m##_smp); \ + _os_compiler_barrier_after_atomic(m); \ + _r; \ +}) + +/*! + * @function os_atomic_store + * + * @brief + * Wrapper for C11 atomic_store_explicit(), guaranteed to compile to a single + * plain store instruction (when @a m is `relaxed`). + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value to store. + * + * @param m + * The ordering to use. + * + * @returns + * The value stored at @a p. + */ +#define os_atomic_store(p, v, m) ({ \ + _Static_assert(os_atomic_store_is_plain(p), "Store is wide"); \ + __auto_type _v = (v); \ + _os_compiler_barrier_before_atomic(m); \ + os_atomic_std(atomic_store_explicit)(os_cast_to_atomic_pointer(p), _v, \ + _os_atomic_mo_##m##_smp); \ + _os_compiler_barrier_after_atomic(m); \ + _v; \ +}) + +#if OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY + +/*! + * @function os_atomic_load_wide + * + * @brief + * Wrapper for C11 atomic_load_explicit(), which may be implemented by a + * compare-exchange loop for double-wide variables. + * + * @param p + * A pointer to an atomic variable. + * + * @param m + * The ordering to use. + * + * @returns + * The value loaded from @a p. + */ +#define os_atomic_load_wide(p, m) ({ \ + _os_compiler_barrier_before_atomic(m); \ + __auto_type _r = os_atomic_std(atomic_load_explicit)( \ + os_cast_to_atomic_pointer(p), _os_atomic_mo_##m##_smp); \ + _os_compiler_barrier_after_atomic(m); \ + _r; \ +}) + +/*! + * @function os_atomic_store_wide + * + * @brief + * Wrapper for C11 atomic_store_explicit(), which may be implemented by a + * compare-exchange loop for double-wide variables. + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value to store. + * + * @param m + * The ordering to use. + * + * @returns + * The value stored at @a p. + */ +#define os_atomic_store_wide(p, v, m) ({ \ + __auto_type _v = (v); \ + _os_compiler_barrier_before_atomic(m); \ + os_atomic_std(atomic_store_explicit)(os_cast_to_atomic_pointer(p), _v, \ + _os_atomic_mo_##m##_smp); \ + _os_compiler_barrier_after_atomic(m); \ + _v; \ +}) + +/*! + * @function os_atomic_add, os_atomic_add_orig + * + * @brief + * Wrappers for C11 atomic_fetch_add_explicit(). + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value to add. + * + * @param m + * The ordering to use. + * + * @returns + * os_atomic_add_orig returns the value of the variable before the atomic add, + * os_atomic_add returns the value of the variable after the atomic add. + */ +#define os_atomic_add_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_add) +#define os_atomic_add(p, v, m) _os_atomic_c11_op(p, v, m, fetch_add, +) + +/*! + * @function os_atomic_inc, os_atomic_inc_orig + * + * @brief + * Perform an atomic increment. + * + * @param p + * A pointer to an atomic variable. + * + * @param m + * The ordering to use. + * + * @returns + * os_atomic_inc_orig returns the value of the variable before the atomic increment, + * os_atomic_inc returns the value of the variable after the atomic increment. + */ +#define os_atomic_inc_orig(p, m) _os_atomic_c11_op_orig(p, 1, m, fetch_add) +#define os_atomic_inc(p, m) _os_atomic_c11_op(p, 1, m, fetch_add, +) + +/*! + * @function os_atomic_sub, os_atomic_sub_orig + * + * @brief + * Wrappers for C11 atomic_fetch_sub_explicit(). + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value to subtract. + * + * @param m + * The ordering to use. + * + * @returns + * os_atomic_sub_orig returns the value of the variable before the atomic subtract, + * os_atomic_sub returns the value of the variable after the atomic subtract. + */ +#define os_atomic_sub_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_sub) +#define os_atomic_sub(p, v, m) _os_atomic_c11_op(p, v, m, fetch_sub, -) + +/*! + * @function os_atomic_dec, os_atomic_dec_orig + * + * @brief + * Perform an atomic decrement. + * + * @param p + * A pointer to an atomic variable. + * + * @param m + * The ordering to use. + * + * @returns + * os_atomic_dec_orig returns the value of the variable before the atomic decrement, + * os_atomic_dec returns the value of the variable after the atomic decrement. + */ +#define os_atomic_dec_orig(p, m) _os_atomic_c11_op_orig(p, 1, m, fetch_sub) +#define os_atomic_dec(p, m) _os_atomic_c11_op(p, 1, m, fetch_sub, -) + +/*! + * @function os_atomic_and, os_atomic_and_orig + * + * @brief + * Wrappers for C11 atomic_fetch_and_explicit(). + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value to and. + * + * @param m + * The ordering to use. + * + * @returns + * os_atomic_and_orig returns the value of the variable before the atomic and, + * os_atomic_and returns the value of the variable after the atomic and. + */ +#define os_atomic_and_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_and) +#define os_atomic_and(p, v, m) _os_atomic_c11_op(p, v, m, fetch_and, &) + +/*! + * @function os_atomic_andnot, os_atomic_andnot_orig + * + * @brief + * Wrappers for C11 atomic_fetch_and_explicit(p, ~value). + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value whose complement to and. + * + * @param m + * The ordering to use. + * + * @returns + * os_atomic_andnot_orig returns the value of the variable before the atomic andnot, + * os_atomic_andnot returns the value of the variable after the atomic andnot. + */ +#define os_atomic_andnot_orig(p, v, m) _os_atomic_c11_op_orig(p, (typeof(v))~(v), m, fetch_and) +#define os_atomic_andnot(p, v, m) _os_atomic_c11_op(p, (typeof(v))~(v), m, fetch_and, &) + +/*! + * @function os_atomic_or, os_atomic_or_orig + * + * @brief + * Wrappers for C11 atomic_fetch_or_explicit(). + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value to or. + * + * @param m + * The ordering to use. + * + * @returns + * os_atomic_or_orig returns the value of the variable before the atomic or, + * os_atomic_or returns the value of the variable after the atomic or. + */ +#define os_atomic_or_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_or) +#define os_atomic_or(p, v, m) _os_atomic_c11_op(p, v, m, fetch_or, |) + +/*! + * @function os_atomic_xor, os_atomic_xor_orig + * + * @brief + * Wrappers for C11 atomic_fetch_xor_explicit(). + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value to xor. + * + * @param m + * The ordering to use. + * + * @returns + * os_atomic_xor_orig returns the value of the variable before the atomic xor, + * os_atomic_xor returns the value of the variable after the atomic xor. + */ +#define os_atomic_xor_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_xor) +#define os_atomic_xor(p, v, m) _os_atomic_c11_op(p, v, m, fetch_xor, ^) + +/*! + * @function os_atomic_min, os_atomic_min_orig + * + * @brief + * Wrappers for Clang's __atomic_fetch_min() + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value to minimize. + * + * @param m + * The ordering to use. + * + * @returns + * os_atomic_min_orig returns the value of the variable before the atomic min, + * os_atomic_min returns the value of the variable after the atomic min. + */ +#define os_atomic_min_orig(p, v, m) _os_atomic_clang_op_orig(p, v, m, fetch_min) +#define os_atomic_min(p, v, m) _os_atomic_clang_op(p, v, m, fetch_min, MIN) + +/*! + * @function os_atomic_max, os_atomic_max_orig + * + * @brief + * Wrappers for Clang's __atomic_fetch_max() + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value to maximize. + * + * @param m + * The ordering to use. + * + * @returns + * os_atomic_max_orig returns the value of the variable before the atomic max, + * os_atomic_max returns the value of the variable after the atomic max. + */ +#define os_atomic_max_orig(p, v, m) _os_atomic_clang_op_orig(p, v, m, fetch_max) +#define os_atomic_max(p, v, m) _os_atomic_clang_op(p, v, m, fetch_max, MAX) + +/*! + * @function os_atomic_xchg + * + * @brief + * Wrapper for C11 atomic_exchange_explicit(). + * + * @param p + * A pointer to an atomic variable. + * + * @param v + * The value to exchange with. + * + * @param m + * The ordering to use. + * + * @returns + * The value of the variable before the exchange. + */ +#define os_atomic_xchg(p, v, m) _os_atomic_c11_op_orig(p, v, m, exchange) + +/*! + * @function os_atomic_cmpxchg + * + * @brief + * Wrapper for C11 atomic_compare_exchange_strong_explicit(). + * + * @discussion + * Loops around os_atomic_cmpxchg() may want to consider using the + * os_atomic_rmw_loop() construct instead to take advantage of the C11 weak + * compare-exchange operation. + * + * @param p + * A pointer to an atomic variable. + * + * @param e + * The value expected in the atomic variable. + * + * @param v + * The value to store if the atomic variable has the expected value @a e. + * + * @param m + * The ordering to use in case of success. + * The ordering in case of failure is always `relaxed`. + * + * @returns + * 0 if the compare-exchange failed. + * 1 if the compare-exchange succeeded. + */ +#define os_atomic_cmpxchg(p, e, v, m) ({ \ + os_atomic_basetypeof(p) _r = (e); int _b; \ + _os_compiler_barrier_before_atomic(m); \ + _b = os_atomic_std(atomic_compare_exchange_strong_explicit)( \ + os_cast_to_atomic_pointer(p), &_r, \ + _os_atomic_value_cast(p, v), \ + _os_atomic_mo_##m##_smp, _os_atomic_mo_relaxed); \ + _os_compiler_barrier_after_atomic(m); \ + _b; \ +}) + +/*! + * @function os_atomic_cmpxchgv + * + * @brief + * Wrapper for C11 atomic_compare_exchange_strong_explicit(). + * + * @discussion + * Loops around os_atomic_cmpxchgv() may want to consider using the + * os_atomic_rmw_loop() construct instead to take advantage of the C11 weak + * compare-exchange operation. + * + * @param p + * A pointer to an atomic variable. + * + * @param e + * The value expected in the atomic variable. + * + * @param v + * The value to store if the atomic variable has the expected value @a e. + * + * @param g + * A pointer to a location that is filled with the value that was present in + * the atomic variable before the compare-exchange (whether successful or not). + * This can be used to redrive compare-exchange loops. + * + * @param m + * The ordering to use in case of success. + * The ordering in case of failure is always `relaxed`. + * + * @returns + * 0 if the compare-exchange failed. + * 1 if the compare-exchange succeeded. + */ +#define os_atomic_cmpxchgv(p, e, v, g, m) ({ \ + os_atomic_basetypeof(p) _r = (e); int _b; \ + _os_compiler_barrier_before_atomic(m); \ + _b = os_atomic_std(atomic_compare_exchange_strong_explicit)( \ + os_cast_to_atomic_pointer(p), &_r, \ + _os_atomic_value_cast(p, v), \ + _os_atomic_mo_##m##_smp, _os_atomic_mo_relaxed); \ + _os_compiler_barrier_after_atomic(m); \ + *(g) = _r; _b; \ +}) + +/*! + * @function os_atomic_rmw_loop + * + * @brief + * Advanced read-modify-write construct to wrap compare-exchange loops. + * + * @param p + * A pointer to an atomic variable to be modified. + * + * @param ov + * The name of the variable that will contain the original value of the atomic + * variable (reloaded every iteration of the loop). + * + * @param nv + * The name of the variable that will contain the new value to compare-exchange + * the atomic variable to (typically computed from @a ov every iteration of the + * loop). + * + * @param m + * The ordering to use in case of success. + * The ordering in case of failure is always `relaxed`. + * + * @param ... + * Code block that validates the value of @p ov and computes the new value of + * @p nv that the atomic variable will be compare-exchanged to in an iteration + * of the loop. + * + * The loop can be aborted using os_atomic_rmw_loop_give_up(), e.g. when the + * value of @p ov is found to be "invalid" for the ovarall operation. + * `continue` cannot be used in this context. + * + * No stores to memory should be performed within the code block as it may cause + * LL/SC transactions used to implement compare-exchange to fail persistently. + * + * @returns + * 0 if the loop was aborted with os_atomic_rmw_loop_give_up(). + * 1 if the loop completed. + */ +#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ + int _result = 0; \ + __auto_type _p = os_cast_to_nonatomic_pointer(p); \ + _os_compiler_barrier_before_atomic(m); \ + ov = *_p; \ + do { \ + __VA_ARGS__; \ + _result = os_atomic_std(atomic_compare_exchange_weak_explicit)( \ + os_cast_to_atomic_pointer(_p), &ov, nv, \ + _os_atomic_mo_##m##_smp, _os_atomic_mo_relaxed); \ + } while (__builtin_expect(!_result, 0)); \ + _os_compiler_barrier_after_atomic(m); \ + _result; \ +}) + +/*! + * @function os_atomic_rmw_loop_give_up + * + * @brief + * Abort an os_atomic_rmw_loop() loop. + * + * @param ... + * Optional code block to execute before the `break` out of the loop. May + * further alter the control flow (e.g. using `return`, `goto`, ...). + */ +#define os_atomic_rmw_loop_give_up(...) ({ __VA_ARGS__; break; }) + +#else // !OS_ATOMIC_HAS_STARVATION_FREE_RMW && OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY + +#define _os_atomic_error_is_starvable(name) \ + _Static_assert(0, #name " is not starvation-free and isn't available in this configuration") +#define os_atomic_load_wide(p, m) _os_atomic_error_is_starvable(os_atomic_load_wide) +#define os_atomic_store_wide(p, v, m) _os_atomic_error_is_starvable(os_atomic_store_wide) +#define os_atomic_add_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_add_orig) +#define os_atomic_add(p, v, m) _os_atomic_error_is_starvable(os_atomic_add) +#define os_atomic_inc_orig(p, m) _os_atomic_error_is_starvable(os_atomic_inc_orig) +#define os_atomic_inc(p, m) _os_atomic_error_is_starvable(os_atomic_inc) +#define os_atomic_sub_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_sub_orig) +#define os_atomic_sub(p, v, m) _os_atomic_error_is_starvable(os_atomic_sub) +#define os_atomic_dec_orig(p, m) _os_atomic_error_is_starvable(os_atomic_dec_orig) +#define os_atomic_dec(p, m) _os_atomic_error_is_starvable(os_atomic_dec) +#define os_atomic_and_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_and_orig) +#define os_atomic_and(p, v, m) _os_atomic_error_is_starvable(os_atomic_and) +#define os_atomic_andnot_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_andnot_orig) +#define os_atomic_andnot(p, v, m) _os_atomic_error_is_starvable(os_atomic_andnot) +#define os_atomic_or_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_or_orig) +#define os_atomic_or(p, v, m) _os_atomic_error_is_starvable(os_atomic_or) +#define os_atomic_xor_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_xor_orig) +#define os_atomic_xor(p, v, m) _os_atomic_error_is_starvable(os_atomic_xor) +#define os_atomic_min_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_min_orig) +#define os_atomic_min(p, v, m) _os_atomic_error_is_starvable(os_atomic_min) +#define os_atomic_max_orig(p, v, m) _os_atomic_error_is_starvable(os_atomic_max_orig) +#define os_atomic_max(p, v, m) _os_atomic_error_is_starvable(os_atomic_max) +#define os_atomic_xchg(p, v, m) _os_atomic_error_is_starvable(os_atomic_xchg) +#define os_atomic_cmpxchg(p, e, v, m) _os_atomic_error_is_starvable(os_atomic_cmpxchg) +#define os_atomic_cmpxchgv(p, e, v, g, m) _os_atomic_error_is_starvable(os_atomic_cmpxchgv) +#define os_atomic_rmw_loop(p, ov, nv, m, ...) _os_atomic_error_is_starvable(os_atomic_rmw_loop) +#define os_atomic_rmw_loop_give_up(...) _os_atomic_error_is_starvable(os_atomic_rmw_loop_give_up) + +#endif // !OS_ATOMIC_HAS_STARVATION_FREE_RMW && OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY + +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY + +/*! + * @typedef os_atomic_dependency_t + * + * @brief + * Type for dependency tokens that can be derived from loads with dependency + * and injected into various expressions. + * + * @warning + * The implementation of atomic dependencies makes painstakingly sure that the + * compiler doesn't know that os_atomic_dependency_t::__opaque_zero is always 0. + * + * Users of os_atomic_dependency_t MUST NOT test its value (even with an + * assert), as doing so would allow the compiler to reason about the value and + * elide its use to inject hardware dependencies (thwarting the entire purpose + * of the construct). + */ +typedef struct { unsigned long __opaque_zero; } os_atomic_dependency_t; + +/*! + * @const OS_ATOMIC_DEPENDENCY_NONE + * + * @brief + * A value to pass to functions that can carry dependencies, to indicate that + * no dependency should be carried. + */ +#define OS_ATOMIC_DEPENDENCY_NONE \ + ((os_atomic_dependency_t){ 0UL }) + +/*! + * @function os_atomic_make_dependency + * + * @brief + * Create a dependency token that can be injected into expressions to force a + * hardware dependency. + * + * @discussion + * This function is only useful for cases where the dependency needs to be used + * several times. + * + * os_atomic_load_with_dependency_on() and os_atomic_inject_dependency() are + * otherwise capable of automatically creating dependency tokens. + * + * @param v + * The result of: + * - an os_atomic_load(..., dependency), + * - an os_atomic_inject_dependency(), + * - an os_atomic_load_with_dependency_on(). + * + * Note that due to implementation limitations, the type of @p v must be + * register-sized, if necessary an explicit cast is required. + * + * @returns + * An os_atomic_dependency_t token that can be used to prolongate dependency + * chains. + * + * The token value is always 0, but the compiler must never be able to reason + * about that fact (c.f. os_atomic_dependency_t) + */ +#define os_atomic_make_dependency(v) \ + ((void)(v), OS_ATOMIC_DEPENDENCY_NONE) + +/*! + * @function os_atomic_inject_dependency + * + * @brief + * Inject a hardware dependency resulting from a `dependency` load into a + * specified pointer. + * + * @param p + * A pointer to inject the dependency into. + * + * @param e + * - a dependency token returned from os_atomic_make_dependency(), + * + * - OS_ATOMIC_DEPENDENCY_NONE, which turns this operation into a no-op, + * + * - any value accepted by os_atomic_make_dependency(). + * + * @returns + * A value equal to @a p but that prolongates the dependency chain rooted at + * @a e. + */ +#define os_atomic_inject_dependency(p, e) \ + ((typeof(*(p)) *)((p) + _os_atomic_auto_dependency(e).__opaque_zero)) + +/*! + * @function os_atomic_load_with_dependency_on + * + * @brief + * Load that prolongates the dependency chain rooted at `v`. + * + * @discussion + * This is shorthand for: + * + * + * os_atomic_load(os_atomic_inject_dependency(p, e), dependency) + * + * + * @param p + * A pointer to an atomic variable. + * + * @param e + * - a dependency token returned from os_atomic_make_dependency(), + * + * - OS_ATOMIC_DEPENDENCY_NONE, which turns this operation into a no-op, + * + * - any value accepted by os_atomic_make_dependency(). + * + * @returns + * The value loaded from @a p. + */ +#define os_atomic_load_with_dependency_on(p, e) \ + os_atomic_load(os_atomic_inject_dependency(p, e), dependency) + +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY + +#include "atomic_private_arch.h" // Per architecture overrides + +#endif /* __OS_ATOMIC_PRIVATE_H__ */ diff --git a/libkern/os/atomic_private_arch.h b/libkern/os/atomic_private_arch.h new file mode 100644 index 000000000..52510bd51 --- /dev/null +++ b/libkern/os/atomic_private_arch.h @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +/* + * This header provides some gory details to implement the + * interfaces. Nothing in this header should be called directly, no promise is + * made to keep this interface stable. + * + * Architecture overrides. + */ + +#ifndef __OS_ATOMIC_PRIVATE_H__ +#error "Do not include directly, use " +#endif + +#ifndef __OS_ATOMIC_PRIVATE_ARCH_H__ +#define __OS_ATOMIC_PRIVATE_ARCH_H__ + +#pragma mark - arm v7 + +#if defined(__arm__) + +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +/* + * On armv7, we do provide fine grained dependency injection, so + * memory_order_dependency maps to relaxed as far as thread fences are concerned + */ +#undef _os_atomic_mo_dependency +#define _os_atomic_mo_dependency memory_order_relaxed + +#undef os_atomic_make_dependency +#define os_atomic_make_dependency(v) ({ \ + os_atomic_dependency_t _dep; \ + __asm__ __volatile__("and %[_dep], %[_v], #0" \ + : [_dep] "=r" (_dep.__opaque_zero) \ + : [_v] "r" (v)); \ + os_compiler_barrier(acquire); \ + _dep; \ +}) +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY + +#define os_atomic_clear_exclusive() __builtin_arm_clrex() + +#define os_atomic_load_exclusive(p, m) ({ \ + __auto_type _r = __builtin_arm_ldrex(os_cast_to_nonatomic_pointer(p)); \ + _os_memory_fence_after_atomic(m); \ + _os_compiler_barrier_after_atomic(m); \ + _r; \ +}) + +#define os_atomic_store_exclusive(p, v, m) ({ \ + _os_compiler_barrier_before_atomic(m); \ + _os_memory_fence_before_atomic(m); \ + !__builtin_arm_strex(v, os_cast_to_nonatomic_pointer(p)); \ +}) + +#if !OS_ATOMIC_HAS_STARVATION_FREE_RMW && !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY + +/* + * armv7 override of os_atomic_rmw_loop + * documentation for os_atomic_rmw_loop is in + */ +#undef os_atomic_rmw_loop +#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ + int _result = 0; uint32_t _err = 0; \ + __auto_type *_p = os_cast_to_nonatomic_pointer(p); \ + for (;;) { \ + ov = __builtin_arm_ldrex(_p); \ + __VA_ARGS__; \ + if (!_err) { \ + /* release barrier only done for the first loop iteration */ \ + _os_memory_fence_before_atomic(m); \ + } \ + _err = __builtin_arm_strex(nv, _p); \ + if (__builtin_expect(!_err, 1)) { \ + _os_memory_fence_after_atomic(m); \ + _result = 1; \ + break; \ + } \ + } \ + _os_compiler_barrier_after_atomic(m); \ + _result; \ +}) + +/* + * armv7 override of os_atomic_rmw_loop_give_up + * documentation for os_atomic_rmw_loop_give_up is in + */ +#undef os_atomic_rmw_loop_give_up +#define os_atomic_rmw_loop_give_up(...) \ + ({ os_atomic_clear_exclusive(); __VA_ARGS__; break; }) + +#endif // !OS_ATOMIC_HAS_STARVATION_FREE_RMW && !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY + +#endif // __arm__ + +#pragma mark - arm64 + +#if defined(__arm64__) + +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +/* + * On arm64, we do provide fine grained dependency injection, so + * memory_order_dependency maps to relaxed as far as thread fences are concerned + */ +#undef _os_atomic_mo_dependency +#define _os_atomic_mo_dependency memory_order_relaxed + +#undef os_atomic_make_dependency +#if __ARM64_ARCH_8_32__ +#define os_atomic_make_dependency(v) ({ \ + os_atomic_dependency_t _dep; \ + __asm__ __volatile__("and %w[_dep], %w[_v], wzr" \ + : [_dep] "=r" (_dep.__opaque_zero) \ + : [_v] "r" (v)); \ + os_compiler_barrier(acquire); \ + _dep; \ +}) +#else +#define os_atomic_make_dependency(v) ({ \ + os_atomic_dependency_t _dep; \ + __asm__ __volatile__("and %[_dep], %[_v], xzr" \ + : [_dep] "=r" (_dep.__opaque_zero) \ + : [_v] "r" (v)); \ + os_compiler_barrier(acquire); \ + _dep; \ +}) +#endif +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY + +#if defined(__ARM_ARCH_8_4__) +/* on armv8.4 16-byte aligned load/store pair is atomic */ +#undef os_atomic_load_is_plain +#define os_atomic_load_is_plain(p) (sizeof(*(p)) <= 16) +#endif + +#define os_atomic_clear_exclusive() __builtin_arm_clrex() + +#define os_atomic_load_exclusive(p, m) ({ \ + __auto_type _r = _os_atomic_mo_has_acquire(_os_atomic_mo_##m##_smp) \ + ? __builtin_arm_ldaex(os_cast_to_nonatomic_pointer(p)) \ + : __builtin_arm_ldrex(os_cast_to_nonatomic_pointer(p)); \ + _os_compiler_barrier_after_atomic(m); \ + _r; \ +}) + +#define os_atomic_store_exclusive(p, v, m) ({ \ + _os_compiler_barrier_before_atomic(m); \ + (_os_atomic_mo_has_release(_os_atomic_mo_##m##_smp) \ + ? !__builtin_arm_stlex(v, os_cast_to_nonatomic_pointer(p)) \ + : !__builtin_arm_strex(v, os_cast_to_nonatomic_pointer(p))); \ +}) + +#if !OS_ATOMIC_HAS_STARVATION_FREE_RMW && !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY + +/* + * arm64 (without armv81 atomics) override of os_atomic_rmw_loop + * documentation for os_atomic_rmw_loop is in + */ +#undef os_atomic_rmw_loop +#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ + int _result = 0; \ + __auto_type *_p = os_cast_to_nonatomic_pointer(p); \ + _os_compiler_barrier_before_atomic(m); \ + do { \ + if (_os_atomic_mo_has_acquire(_os_atomic_mo_##m##_smp)) { \ + ov = __builtin_arm_ldaex(_p); \ + } else { \ + ov = __builtin_arm_ldrex(_p); \ + } \ + __VA_ARGS__; \ + if (_os_atomic_mo_has_release(_os_atomic_mo_##m##_smp)) { \ + _result = !__builtin_arm_stlex(nv, _p); \ + } else { \ + _result = !__builtin_arm_strex(nv, _p); \ + } \ + } while (__builtin_expect(!_result, 0)); \ + _os_compiler_barrier_after_atomic(m); \ + _result; \ +}) + +/* + * arm64 override of os_atomic_rmw_loop_give_up + * documentation for os_atomic_rmw_loop_give_up is in + */ +#undef os_atomic_rmw_loop_give_up +#define os_atomic_rmw_loop_give_up(...) \ + ({ os_atomic_clear_exclusive(); __VA_ARGS__; break; }) + +#endif // !OS_ATOMIC_HAS_STARVATION_FREE_RMW && !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY + +#endif // __arm64__ + +#endif /* __OS_ATOMIC_PRIVATE_ARCH_H__ */ diff --git a/libkern/os/atomic_private_impl.h b/libkern/os/atomic_private_impl.h new file mode 100644 index 000000000..276a91f5e --- /dev/null +++ b/libkern/os/atomic_private_impl.h @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +/* + * This header provides some gory details to implement the + * interfaces. Nothing in this header should be called directly, no promise is + * made to keep this interface stable. + */ + +#ifndef __OS_ATOMIC_PRIVATE_H__ +#error "Do not include directly, use " +#endif + +#ifndef __OS_ATOMIC_PRIVATE_IMPL_H__ +#define __OS_ATOMIC_PRIVATE_IMPL_H__ + +#pragma mark - implementation details + +static inline int +_os_atomic_mo_has_acquire(OS_ATOMIC_STD memory_order ord) +{ + switch (ord) { + case os_atomic_std(memory_order_consume): + case os_atomic_std(memory_order_acquire): + case os_atomic_std(memory_order_acq_rel): + case os_atomic_std(memory_order_seq_cst): + return 1; + default: + return 0; + } +} + +static inline int +_os_atomic_mo_has_release(OS_ATOMIC_STD memory_order ord) +{ + switch (ord) { + case os_atomic_std(memory_order_release): + case os_atomic_std(memory_order_acq_rel): + case os_atomic_std(memory_order_seq_cst): + return 1; + default: + return 0; + } +} + +#define _os_atomic_mo_relaxed os_atomic_std(memory_order_relaxed) +#define _os_atomic_mo_compiler_acquire os_atomic_std(memory_order_relaxed) +#define _os_atomic_mo_compiler_release os_atomic_std(memory_order_relaxed) +#define _os_atomic_mo_compiler_acq_rel os_atomic_std(memory_order_relaxed) +#define _os_atomic_mo_consume os_atomic_std(memory_order_consume) +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_atomic_mo_dependency os_atomic_std(memory_order_acquire) +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_atomic_mo_acquire os_atomic_std(memory_order_acquire) +#define _os_atomic_mo_release os_atomic_std(memory_order_release) +#define _os_atomic_mo_acq_rel os_atomic_std(memory_order_acq_rel) +#define _os_atomic_mo_seq_cst os_atomic_std(memory_order_seq_cst) + +/* + * Mapping between symbolic memory orderings and actual ones + * to take SMP into account. + */ +#if OS_ATOMIC_CONFIG_SMP +#define _os_atomic_mo_relaxed_smp _os_atomic_mo_relaxed +#define _os_atomic_mo_compiler_acquire_smp _os_atomic_mo_relaxed +#define _os_atomic_mo_compiler_release_smp _os_atomic_mo_relaxed +#define _os_atomic_mo_compiler_acq_rel_smp _os_atomic_mo_relaxed +#define _os_atomic_mo_consume_smp _os_atomic_mo_consume +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_atomic_mo_dependency_smp _os_atomic_mo_dependency +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_atomic_mo_acquire_smp _os_atomic_mo_acquire +#define _os_atomic_mo_release_smp _os_atomic_mo_release +#define _os_atomic_mo_acq_rel_smp _os_atomic_mo_acq_rel +#define _os_atomic_mo_seq_cst_smp _os_atomic_mo_seq_cst +#else +#define _os_atomic_mo_relaxed_smp _os_atomic_mo_relaxed +#define _os_atomic_mo_compiler_acquire_smp _os_atomic_mo_relaxed +#define _os_atomic_mo_compiler_release_smp _os_atomic_mo_relaxed +#define _os_atomic_mo_compiler_acq_rel_smp _os_atomic_mo_relaxed +#define _os_atomic_mo_consume_smp _os_atomic_mo_relaxed +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_atomic_mo_dependency_smp _os_atomic_mo_relaxed +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_atomic_mo_acquire_smp _os_atomic_mo_relaxed +#define _os_atomic_mo_release_smp _os_atomic_mo_relaxed +#define _os_atomic_mo_acq_rel_smp _os_atomic_mo_relaxed +#define _os_atomic_mo_seq_cst_smp _os_atomic_mo_relaxed +#endif + +#if KERNEL_PRIVATE +#define memory_order_relaxed_smp _os_atomic_mo_relaxed_smp +#define memory_order_compiler_acquire_smp _os_atomic_mo_compiler_acquire_smp +#define memory_order_compiler_release_smp _os_atomic_mo_compiler_release_smp +#define memory_order_compiler_acq_rel_smp _os_atomic_mo_compiler_acq_rel_smp +#define memory_order_consume_smp _os_atomic_mo_consume_smp +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define memory_order_dependency _os_atomic_mo_dependency +#define memory_order_dependency_smp _os_atomic_mo_dependency_smp +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define memory_order_acquire_smp _os_atomic_mo_acquire_smp +#define memory_order_release_smp _os_atomic_mo_release_smp +#define memory_order_acq_rel_smp _os_atomic_mo_acq_rel_smp +#define memory_order_seq_cst_smp _os_atomic_mo_seq_cst_smp +#endif + +/* + * Hack needed for os_compiler_barrier() to work (including with empty argument) + */ +#define _os_compiler_barrier_relaxed _os_atomic_mo_relaxed +#define _os_compiler_barrier_acquire _os_atomic_mo_acquire +#define _os_compiler_barrier_release _os_atomic_mo_release +#define _os_compiler_barrier_acq_rel _os_atomic_mo_acq_rel +#define _os_compiler_barrier_ _os_atomic_mo_acq_rel + +/* + * Mapping between compiler barrier/memory orders and: + * - compiler barriers before atomics ("rel_barrier") + * - compiler barriers after atomics ("acq_barrier") + */ +#define _os_rel_barrier_relaxed _os_atomic_mo_relaxed +#define _os_rel_barrier_compiler_acquire _os_atomic_mo_relaxed +#define _os_rel_barrier_compiler_release _os_atomic_mo_release +#define _os_rel_barrier_compiler_acq_rel _os_atomic_mo_release +#define _os_rel_barrier_consume _os_atomic_mo_relaxed +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_rel_barrier_dependency _os_atomic_mo_relaxed +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_rel_barrier_acquire _os_atomic_mo_relaxed +#define _os_rel_barrier_release _os_atomic_mo_release +#define _os_rel_barrier_acq_rel _os_atomic_mo_release +#define _os_rel_barrier_seq_cst _os_atomic_mo_release + +#define _os_acq_barrier_relaxed _os_atomic_mo_relaxed +#define _os_acq_barrier_compiler_acquire _os_atomic_mo_acquire +#define _os_acq_barrier_compiler_release _os_atomic_mo_relaxed +#define _os_acq_barrier_compiler_acq_rel _os_atomic_mo_acquire +#define _os_acq_barrier_consume _os_atomic_mo_acquire +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_acq_barrier_dependency _os_atomic_mo_acquire +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_acq_barrier_acquire _os_atomic_mo_acquire +#define _os_acq_barrier_release _os_atomic_mo_relaxed +#define _os_acq_barrier_acq_rel _os_atomic_mo_acquire +#define _os_acq_barrier_seq_cst _os_atomic_mo_acquire + +#define _os_compiler_barrier_before_atomic(m) \ + os_atomic_std(atomic_signal_fence)(_os_rel_barrier_##m) +#define _os_compiler_barrier_after_atomic(m) \ + os_atomic_std(atomic_signal_fence)(_os_acq_barrier_##m) + +/* + * Mapping between compiler barrier/memmory orders and: + * - memory fences before atomics ("rel_fence") + * - memory fences after atomics ("acq_fence") + */ +#define _os_rel_fence_relaxed _os_atomic_mo_relaxed +#define _os_rel_fence_compiler_acquire _os_atomic_mo_relaxed +#define _os_rel_fence_compiler_release _os_atomic_mo_release +#define _os_rel_fence_compiler_acq_rel _os_atomic_mo_release +#define _os_rel_fence_consume _os_atomic_mo_relaxed_smp +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_rel_fence_dependency _os_atomic_mo_relaxed_smp +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_rel_fence_acquire _os_atomic_mo_relaxed_smp +#define _os_rel_fence_release _os_atomic_mo_release_smp +#define _os_rel_fence_acq_rel _os_atomic_mo_release_smp +#define _os_rel_fence_seq_cst _os_atomic_mo_release_smp + +#define _os_acq_fence_relaxed _os_atomic_mo_relaxed +#define _os_acq_fence_compiler_acquire _os_atomic_mo_relaxed +#define _os_acq_fence_compiler_release _os_atomic_mo_relaxed +#define _os_acq_fence_compiler_acq_rel _os_atomic_mo_relaxed +#define _os_acq_fence_consume _os_atomic_mo_acquire_smp +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_acq_fence_dependency _os_atomic_mo_dependency_smp +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_acq_fence_acquire _os_atomic_mo_acquire_smp +#define _os_acq_fence_release _os_atomic_mo_relaxed_smp +#define _os_acq_fence_acq_rel _os_atomic_mo_acquire_smp +#define _os_acq_fence_seq_cst _os_atomic_mo_acquire_smp + +#define _os_memory_fence_before_atomic(m) \ + os_atomic_std(atomic_thread_fence)(_os_rel_fence_##m) +#define _os_memory_fence_after_atomic(m) \ + os_atomic_std(atomic_thread_fence)(_os_acq_fence_##m) + +/* + * Misc. helpers + */ + +#define _os_atomic_value_cast(p, v) \ + ({ typeof(*os_cast_to_nonatomic_pointer(p)) ___v = (v); ___v; }) + +#define _os_atomic_c11_op_orig(p, v, m, o) ({ \ + _os_compiler_barrier_before_atomic(m); \ + __auto_type _r = os_atomic_std(atomic_##o##_explicit)(\ + os_cast_to_atomic_pointer(p), \ + _os_atomic_value_cast(p, v), \ + _os_atomic_mo_##m##_smp); \ + _os_compiler_barrier_after_atomic(m); \ + _r; \ +}) + +#define _os_atomic_c11_op(p, v, m, o, op) ({ \ + __auto_type _v = _os_atomic_value_cast(p, v); \ + _os_atomic_c11_op_orig(p, _v, m, o) op _v; \ +}) + +#define _os_atomic_clang_op_orig(p, v, m, o) ({ \ + _os_compiler_barrier_before_atomic(m); \ + __auto_type _r = __atomic_##o(os_cast_to_nonatomic_pointer(p), \ + _os_atomic_value_cast(p, v), \ + _os_atomic_mo_##m##_smp); \ + _os_compiler_barrier_after_atomic(m); \ + _r; \ +}) + +#define _os_atomic_clang_op(p, v, m, o, op) ({ \ + __auto_type _v = _os_atomic_value_cast(p, v); \ + __auto_type _r = _os_atomic_clang_op_orig(p, _v, m, o); \ + op(_r, _v); \ +}) + +#if OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY +#define _os_atomic_auto_dependency(e) \ + _Generic(e, \ + os_atomic_dependency_t: (e), \ + default: os_atomic_make_dependency(e)) +#endif // OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY + +#endif /* __OS_ATOMIC_PRIVATE_IMPL_H__ */ diff --git a/libkern/os/base.h b/libkern/os/base.h index bea2772a4..b3e16ce39 100644 --- a/libkern/os/base.h +++ b/libkern/os/base.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. + * Copyright (c) 2008-2020 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -23,6 +23,7 @@ #include + #ifndef __has_builtin #define __has_builtin(x) 0 #endif @@ -124,6 +125,8 @@ #if defined(__cplusplus) && defined(__clang__) #define OS_FALLTHROUGH [[clang::fallthrough]] +#elif __has_attribute(fallthrough) +#define OS_FALLTHROUGH __attribute__((__fallthrough__)) #else #define OS_FALLTHROUGH #endif @@ -172,16 +175,13 @@ #if __has_feature(objc_fixed_enum) || __has_extension(cxx_fixed_enum) || \ __has_extension(cxx_strong_enums) #define OS_ENUM(_name, _type, ...) \ - typedef enum : _type { __VA_ARGS__ } _name##_t + typedef enum : _type { __VA_ARGS__ } _name##_t #define OS_CLOSED_ENUM(_name, _type, ...) \ - typedef enum : _type { __VA_ARGS__ } \ - __OS_ENUM_ATTR_CLOSED _name##_t + typedef enum : _type { __VA_ARGS__ } __OS_ENUM_ATTR_CLOSED _name##_t #define OS_OPTIONS(_name, _type, ...) \ - typedef enum : _type { __VA_ARGS__ } \ - __OS_ENUM_ATTR __OS_OPTIONS_ATTR _name##_t + typedef enum : _type { __VA_ARGS__ } __OS_ENUM_ATTR __OS_OPTIONS_ATTR _name##_t #define OS_CLOSED_OPTIONS(_name, _type, ...) \ - typedef enum : _type { __VA_ARGS__ } \ - __OS_ENUM_ATTR_CLOSED __OS_OPTIONS_ATTR _name##_t + typedef enum : _type { __VA_ARGS__ } __OS_ENUM_ATTR_CLOSED __OS_OPTIONS_ATTR _name##_t #else /*! * There is unfortunately no good way in plain C to have both fixed-type enums @@ -214,25 +214,25 @@ * When compiling in ObjC or C++, both of the above assignments are illegal. */ #define __OS_ENUM_C_FALLBACK(_name, _type, ...) \ - typedef _type _name##_t; enum _name { __VA_ARGS__ } + typedef _type _name##_t; enum _name { __VA_ARGS__ } #define OS_ENUM(_name, _type, ...) \ - typedef _type _name##_t; enum { __VA_ARGS__ } + typedef _type _name##_t; enum { __VA_ARGS__ } #define OS_CLOSED_ENUM(_name, _type, ...) \ - __OS_ENUM_C_FALLBACK(_name, _type, ## __VA_ARGS__) \ - __OS_ENUM_ATTR_CLOSED + __OS_ENUM_C_FALLBACK(_name, _type, ## __VA_ARGS__) \ + __OS_ENUM_ATTR_CLOSED #define OS_OPTIONS(_name, _type, ...) \ - __OS_ENUM_C_FALLBACK(_name, _type, ## __VA_ARGS__) \ - __OS_ENUM_ATTR __OS_OPTIONS_ATTR + __OS_ENUM_C_FALLBACK(_name, _type, ## __VA_ARGS__) \ + __OS_ENUM_ATTR __OS_OPTIONS_ATTR #define OS_CLOSED_OPTIONS(_name, _type, ...) \ - __OS_ENUM_C_FALLBACK(_name, _type, ## __VA_ARGS__) \ - __OS_ENUM_ATTR_CLOSED __OS_OPTIONS_ATTR + __OS_ENUM_C_FALLBACK(_name, _type, ## __VA_ARGS__) \ + __OS_ENUM_ATTR_CLOSED __OS_OPTIONS_ATTR #endif // __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) #if __has_feature(attribute_availability_swift) // equivalent to __SWIFT_UNAVAILABLE from Availability.h #define OS_SWIFT_UNAVAILABLE(_msg) \ - __attribute__((__availability__(swift, unavailable, message=_msg))) + __attribute__((__availability__(swift, unavailable, message=_msg))) #else #define OS_SWIFT_UNAVAILABLE(_msg) #endif @@ -256,16 +256,12 @@ #ifdef __GNUC__ #define os_prevent_tail_call_optimization() __asm__("") -#define os_is_compile_time_constant(expr) __builtin_constant_p(expr) -#ifndef KERNEL -#define os_compiler_barrier() __asm__ __volatile__("" ::: "memory") -#endif +#define os_is_compile_time_constant(expr) __builtin_constant_p(expr) +#define os_compiler_barrier() __asm__ __volatile__("" ::: "memory") #else #define os_prevent_tail_call_optimization() do { } while (0) -#define os_is_compile_time_constant(expr) 0 -#ifndef KERNEL -#define os_compiler_barrier() do { } while (0) -#endif +#define os_is_compile_time_constant(expr) 0 +#define os_compiler_barrier() do { } while (0) #endif #if __has_attribute(not_tail_called) @@ -274,6 +270,7 @@ #define OS_NOT_TAIL_CALLED #endif +#if KERNEL /* * LIBKERN_ALWAYS_DESTROY attribute can be applied to global variables with * destructors. It specifies that and object should have its exit-time @@ -281,10 +278,11 @@ * -fno-c++-static-destructors. */ #if __has_attribute(always_destroy) -#define LIBKERN_ALWAYS_DESTROY __attribute__((always_destroy)) +#define LIBKERN_ALWAYS_DESTROY __attribute__((__always_destroy__)) #else #define LIBKERN_ALWAYS_DESTROY #endif +#endif typedef void (*os_function_t)(void *_Nullable); @@ -332,4 +330,21 @@ typedef void (*os_function_t)(void *_Nullable); typedef void (^os_block_t)(void); #endif +#if KERNEL +#if __has_feature(ptrauth_calls) +#include +#define OS_PTRAUTH_SIGNED_PTR(type) __ptrauth(ptrauth_key_process_independent_data, 1, ptrauth_string_discriminator(type)) +#define OS_PTRAUTH_DISCRIMINATOR(str) ptrauth_string_discriminator(str) +#define __ptrauth_only +#else // __has_feature(ptrauth_calls) +#define OS_PTRAUTH_SIGNED_PTR(type) +#define OS_PTRAUTH_DISCRIMINATOR(str) 0 +#define __ptrauth_only __unused +#endif // __has_feature(ptrauth_calls) +#endif // KERNEL + +#if KERNEL_PRIVATE +#define XNU_PTRAUTH_SIGNED_PTR OS_PTRAUTH_SIGNED_PTR +#endif // KERNEL_PRIVATE + #endif // __OS_BASE__ diff --git a/libkern/os/base_private.h b/libkern/os/base_private.h new file mode 100644 index 000000000..9f8b01de2 --- /dev/null +++ b/libkern/os/base_private.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_BASE_PRIVATE__ +#define __OS_BASE_PRIVATE__ + +#ifndef KERNEL +#include +#endif +#include + +#ifndef os_fastpath +#define os_fastpath(x) ((__typeof__(x))OS_EXPECT((long)(x), ~0l)) +#endif +#ifndef os_slowpath +#define os_slowpath(x) ((__typeof__(x))OS_EXPECT((long)(x), 0l)) +#endif +#ifndef os_likely +#define os_likely(x) OS_EXPECT(!!(x), 1) +#endif +#ifndef os_unlikely +#define os_unlikely(x) OS_EXPECT(!!(x), 0) +#endif + + +#endif // __OS_BASE_PRIVATE__ diff --git a/libkern/os/cpp_util.h b/libkern/os/cpp_util.h index dc7236bff..5821f1f08 100644 --- a/libkern/os/cpp_util.h +++ b/libkern/os/cpp_util.h @@ -11,6 +11,8 @@ # define OS_HAS_RVALUE_REFERENCES 1 #endif +void* operator new(size_t, void*); + namespace os { #if OS_HAS_NULLPTR typedef decltype(nullptr) nullptr_t; @@ -33,6 +35,9 @@ template struct remove_const {typedef _T type;}; template struct remove_const {typedef _T type;}; template using remove_const_t = typename remove_const<_T>::type; +template struct is_lvalue_reference { static constexpr bool value = false; }; +template struct is_lvalue_reference { static constexpr bool value = true; }; + /* * Move */ @@ -44,6 +49,69 @@ move(_T && _t) typedef typename os::remove_reference<_T>::type _U; return static_cast<_U &&>(_t); } + +template +T* +move(T* first, T* last, T* d_first) +{ + for (; first != last; ++d_first, (void)++first) { + *d_first = os::move(*first); + } + return d_first; +} + +template +constexpr T && forward(os::remove_reference_t&t) noexcept { + return static_cast(t); +} + +template +constexpr T && forward(os::remove_reference_t&& t) noexcept { + static_assert(!os::is_lvalue_reference::value, + "can not forward an rvalue as an lvalue"); + return static_cast(t); +} + +// Moves [first, last) into the range ending at d_last, +// proceeding backwards (from last to first) +// UB if d_last is within (first, last] +template +T* +move_backward(T* first, T* last, T* d_last) +{ + while (first != last) { + *(--d_last) = os::move(*(--last)); + } + return d_last; +} + +template +T* +uninitialized_move(T* first, T* last, T* d_first) +{ + for (; first != last; ++d_first, (void) ++first) { + ::new (static_cast(d_first)) T(os::move(*first)); + } + return first; +} + +template +void +destroy(T* first, T* last) +{ + for (; first != last; ++first) { + first->~T(); + } +} + +template +void +uninitialized_value_construct(T* first, T* last) +{ + for (; first != last; ++first) { + ::new (static_cast(first)) T(); + } +} } #endif /* _OS_CPP_UTIL_H */ diff --git a/libkern/os/log.c b/libkern/os/log.c index dd4478913..0cd4a9deb 100644 --- a/libkern/os/log.c +++ b/libkern/os/log.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -37,15 +38,6 @@ #include "log_encode.h" -/* on embedded, with no kext loading or unloads, - * make the kernel use the libtrace shared cache path for logging - */ -#define FIREHOSE_USES_SHARED_CACHE NO_KEXTD - -#if FIREHOSE_USES_SHARED_CACHE -extern vm_offset_t segLOWESTTEXT; -#endif - struct os_log_s { int a; }; @@ -55,7 +47,7 @@ struct os_log_s _os_log_replay; extern vm_offset_t kernel_firehose_addr; extern firehose_chunk_t firehose_boot_chunk; -extern void bsd_log_lock(void); +extern bool bsd_log_lock(bool); extern void bsd_log_unlock(void); extern void logwakeup(struct msgbuf *); @@ -80,14 +72,19 @@ uint32_t oslog_p_error_count = 0; uint32_t oslog_p_saved_msgcount = 0; uint32_t oslog_p_dropped_msgcount = 0; uint32_t oslog_p_boot_dropped_msgcount = 0; +uint32_t oslog_p_coprocessor_total_msgcount = 0; +uint32_t oslog_p_coprocessor_dropped_msgcount = 0; /* Counters for streaming mode */ uint32_t oslog_s_total_msgcount = 0; uint32_t oslog_s_error_count = 0; uint32_t oslog_s_metadata_msgcount = 0; +/* Counters for msgbuf logging */ +uint32_t oslog_msgbuf_msgcount = 0; +uint32_t oslog_msgbuf_dropped_msgcount = 0; + static bool oslog_boot_done = false; -extern boolean_t early_boot_complete; #ifdef XNU_KERNEL_PRIVATE bool startup_serial_logging_active = true; @@ -101,7 +98,7 @@ firehose_debug_trace(firehose_stream_t stream, firehose_tracepoint_id_t trace_id static inline firehose_tracepoint_id_t _firehose_trace(firehose_stream_t stream, firehose_tracepoint_id_u ftid, - uint64_t stamp, const void *pubdata, size_t publen); + uint64_t stamp, const void *pubdata, size_t publen, bool use_streaming); static oslog_stream_buf_entry_t oslog_stream_create_buf_entry(oslog_stream_link_type_t type, firehose_tracepoint_id_u ftid, @@ -238,7 +235,7 @@ _os_log_with_args_internal(os_log_t oslog, os_log_type_t type, } /* early boot can log to dmesg for later replay (27307943) */ - safe = (!early_boot_complete || oslog_is_safe()); + safe = (startup_phase < STARTUP_SUB_EARLY_BOOT || oslog_is_safe()); if (logging_config & ATM_TRACE_DISABLE || logging_config & ATM_TRACE_OFF) { logging = false; @@ -258,16 +255,22 @@ _os_log_with_args_internal(os_log_t oslog, os_log_type_t type, static void _os_log_to_msgbuf_internal(const char *format, va_list args, bool safe, bool logging, bool addcr) { + /* + * The following threshold was determined empirically as the point where + * it would be more advantageous to be able to fit in more log lines than + * to know exactly when a log line was printed out. We don't want to use up + * a large percentage of the log buffer on timestamps in a memory-constricted + * environment. + */ + const int MSGBUF_TIMESTAMP_THRESHOLD = 4096; static int msgbufreplay = -1; + static bool newlogline = true; va_list args_copy; -#if DEVELOPMENT || DEBUG - if (safe) { - bsd_log_lock(); + if (!bsd_log_lock(safe)) { + os_atomic_inc(&oslog_msgbuf_dropped_msgcount, relaxed); + return; } -#else - bsd_log_lock(); -#endif if (!safe) { if (-1 == msgbufreplay) { @@ -288,9 +291,10 @@ _os_log_to_msgbuf_internal(const char *format, va_list args, bool safe, bool log } bsd_log_unlock(); /* Allocate a temporary non-circular buffer */ - if ((localbuff = (char *)kalloc_noblock(localbuff_size))) { + localbuff = kheap_alloc(KHEAP_TEMP, localbuff_size, Z_NOWAIT); + if (localbuff != NULL) { /* in between here, the log could become bigger, but that's fine */ - bsd_log_lock(); + bsd_log_lock(true); /* * The message buffer is circular; start at the replay pointer, and * make one loop up to write pointer - 1. @@ -326,32 +330,34 @@ _os_log_to_msgbuf_internal(const char *format, va_list args, bool safe, bool log next[0] = ch; s = next; } - kfree(localbuff, localbuff_size); + kheap_free(KHEAP_TEMP, localbuff, localbuff_size); } - bsd_log_lock(); + bsd_log_lock(true); + } + + /* Do not prepend timestamps when we are memory-constricted */ + if (newlogline && (msgbufp->msg_size > MSGBUF_TIMESTAMP_THRESHOLD)) { + clock_sec_t secs; + clock_usec_t microsecs; + const uint64_t timestamp = firehose_tracepoint_time(firehose_activity_flags_default); + absolutetime_to_microtime(timestamp, &secs, µsecs); + printf_log_locked(FALSE, "[%5lu.%06u]: ", (unsigned long)secs, microsecs); } va_copy(args_copy, args); - vprintf_log_locked(format, args_copy, addcr); + newlogline = vprintf_log_locked(format, args_copy, addcr); va_end(args_copy); -#if DEVELOPMENT || DEBUG - if (safe) { - bsd_log_unlock(); - logwakeup(msgbufp); - } -#else bsd_log_unlock(); - if (safe) { - logwakeup(msgbufp); - } -#endif + logwakeup(msgbufp); + os_atomic_inc(&oslog_msgbuf_msgcount, relaxed); } static void _os_log_to_log_internal(os_log_t oslog, os_log_type_t type, const char *format, va_list args, void *addr, void *dso, bool driverKit) { + kc_format_t kcformat = KCFormatUnknown; struct os_log_buffer_context_s context; unsigned char buffer_data[OS_LOG_BUFFER_MAX_SIZE] __attribute__((aligned(8))); os_log_buffer_t buffer = (os_log_buffer_t)buffer_data; @@ -362,26 +368,33 @@ _os_log_to_log_internal(os_log_t oslog, os_log_type_t type, return; } -#if FIREHOSE_USES_SHARED_CACHE - dso = (void *) segLOWESTTEXT; -#else /* FIREHOSE_USES_SHARED_CACHE */ - if (dso == NULL) { - dso = (void *) OSKextKextForAddress(format); - if (dso == NULL) { - return; - } - } - - if (!_os_trace_addr_in_text_segment(dso, format)) { + if (!PE_get_primary_kc_format(&kcformat)) { return; } - if (!driverKit) { - void *dso_addr = (void *) OSKextKextForAddress(addr); - if (dso != dso_addr) { + + if (kcformat == KCFormatStatic || kcformat == KCFormatKCGEN) { + void *baseAddress = PE_get_kc_baseaddress(KCKindPrimary); + if (!baseAddress) { return; } + dso = baseAddress; + } else if (kcformat == KCFormatDynamic || kcformat == KCFormatFileset) { + if (dso == NULL) { + dso = (void *) OSKextKextForAddress(format); + if (dso == NULL) { + return; + } + } + if (!_os_trace_addr_in_text_segment(dso, format)) { + return; + } + if (!driverKit) { + void *dso_addr = (void *) OSKextKextForAddress(addr); + if (dso != dso_addr) { + return; + } + } } -#endif /* FIREHOSE_USES_SHARED_CACHE */ memset(&context, 0, sizeof(context)); memset(buffer, 0, OS_LOG_BUFFER_MAX_SIZE); @@ -408,42 +421,44 @@ static inline size_t _os_trace_write_location_for_address(uint8_t buf[static sizeof(uint64_t)], void *dso, const void *address, firehose_tracepoint_flags_t *flags, __unused bool driverKit) { - uintptr_t shift_addr = (uintptr_t)address - (uintptr_t)dso; -#if FIREHOSE_USES_SHARED_CACHE - - *flags = _firehose_tracepoint_flags_pc_style_shared_cache; - memcpy(buf, (uint32_t[]){ shift_addr }, - sizeof(uint32_t)); - return sizeof(uint32_t); + uintptr_t shift_addr = (uintptr_t)address - (uintptr_t)dso; -#else /* FIREHOSE_USES_SHARED_CACHE */ - kernel_mach_header_t *mh = dso; + kc_format_t kcformat = KCFormatUnknown; + __assert_only bool result = PE_get_primary_kc_format(&kcformat); + assert(result); - /* - * driverKit will have the dso set as MH_EXECUTE - * (it is logging from a syscall in the kernel) - * but needs logd to parse the address as an - * absolute pc. - */ - if (mh->filetype == MH_EXECUTE && !driverKit) { - *flags = _firehose_tracepoint_flags_pc_style_main_exe; - memcpy(buf, (uint32_t[]){ shift_addr}, sizeof(uint32_t)); + if (kcformat == KCFormatStatic || kcformat == KCFormatKCGEN) { + *flags = _firehose_tracepoint_flags_pc_style_shared_cache; + memcpy(buf, (uint32_t[]){ (uint32_t)shift_addr }, sizeof(uint32_t)); return sizeof(uint32_t); } else { - *flags = _firehose_tracepoint_flags_pc_style_absolute; - if (!driverKit) { - shift_addr = VM_KERNEL_UNSLIDE(address); + kernel_mach_header_t *mh = dso; + + /* + * driverKit will have the dso set as MH_EXECUTE + * (it is logging from a syscall in the kernel) + * but needs logd to parse the address as an + * absolute pc. + */ + if (mh->filetype == MH_EXECUTE && !driverKit) { + *flags = _firehose_tracepoint_flags_pc_style_main_exe; + memcpy(buf, (uint32_t[]){ (uint32_t)shift_addr }, sizeof(uint32_t)); + return sizeof(uint32_t); } else { - shift_addr = (uintptr_t) address; - } - memcpy(buf, (uintptr_t[]){ shift_addr }, sizeof(uintptr_t)); + *flags = _firehose_tracepoint_flags_pc_style_absolute; + if (!driverKit) { + shift_addr = VM_KERNEL_UNSLIDE(address); + } else { + shift_addr = (uintptr_t) address; + } + memcpy(buf, (uintptr_t[]){ shift_addr }, sizeof(uintptr_t)); #if __LP64__ - return 6; // 48 bits are enough + return 6; // 48 bits are enough #else - return sizeof(uintptr_t); + return sizeof(uintptr_t); #endif + } } -#endif /* !FIREHOSE_USES_SHARED_CACHE */ } @@ -493,28 +508,71 @@ _os_log_actual(os_log_t oslog __unused, os_log_type_t type, const char *format, // set FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT so logd will not try to find the format string in // the executable text trace_id.ftid_value = FIREHOSE_TRACE_ID_MAKE(firehose_tracepoint_namespace_log, - type, flags, (uintptr_t) addr | FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT); + type, flags, (uint32_t)((uintptr_t)addr | FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT)); } else { // create trace_id after we've set additional flags trace_id.ftid_value = FIREHOSE_TRACE_ID_MAKE(firehose_tracepoint_namespace_log, type, flags, _os_trace_offset(dso, format, (_firehose_tracepoint_flags_activity_t)flags)); } - if (FALSE) { - firehose_debug_trace(stream, trace_id.ftid_value, timestamp, - format, buffdata, buffdata_sz); + if (type == OS_LOG_TYPE_INFO || type == OS_LOG_TYPE_DEBUG) { + stream = firehose_stream_memory; + } else { + stream = firehose_stream_persist; } + _firehose_trace(stream, trace_id, timestamp, buffdata, buffdata_sz, true); +} + +bool +os_log_coprocessor(void *buff, uint64_t buff_len, os_log_type_t type, + const char *uuid, uint64_t timestamp, uint32_t offset, bool stream_log) +{ + firehose_tracepoint_id_u trace_id; + firehose_tracepoint_id_t return_id = 0; + firehose_stream_t stream; + uint8_t pubdata[OS_LOG_BUFFER_MAX_SIZE]; + size_t wr_pos = 0; + + if (buff_len + 16 + sizeof(uint32_t) > OS_LOG_BUFFER_MAX_SIZE) { + return false; + } + + // unlike kext, where pc is used to find uuid, in coprocessor logs the uuid is passed as part of the tracepoint + firehose_tracepoint_flags_t flags = _firehose_tracepoint_flags_pc_style_uuid_relative; + + memcpy(pubdata, &offset, sizeof(uint32_t)); + wr_pos += sizeof(uint32_t); + memcpy(pubdata + wr_pos, uuid, 16); + wr_pos += 16; + + memcpy(pubdata + wr_pos, buff, buff_len); + + // create firehose trace id + trace_id.ftid_value = FIREHOSE_TRACE_ID_MAKE(firehose_tracepoint_namespace_log, + type, flags, offset); + if (type == OS_LOG_TYPE_INFO || type == OS_LOG_TYPE_DEBUG) { stream = firehose_stream_memory; } else { stream = firehose_stream_persist; } - _firehose_trace(stream, trace_id, timestamp, buffdata, buffdata_sz); + + os_atomic_inc(&oslog_p_coprocessor_total_msgcount, relaxed); + + // send firehose tracepoint containing os log to firehose buffer + return_id = _firehose_trace(stream, trace_id, timestamp, pubdata, + buff_len + wr_pos, stream_log); + + if (return_id == 0) { + os_atomic_inc(&oslog_p_coprocessor_dropped_msgcount, relaxed); + return false; + } + return true; } static inline firehose_tracepoint_id_t _firehose_trace(firehose_stream_t stream, firehose_tracepoint_id_u ftid, - uint64_t stamp, const void *pubdata, size_t publen) + uint64_t stamp, const void *pubdata, size_t publen, bool use_streaming) { const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); const size_t _firehose_chunk_payload_size = @@ -528,7 +586,7 @@ _firehose_trace(firehose_stream_t stream, firehose_tracepoint_id_u ftid, return 0; } - if (oslog_stream_open && (stream != firehose_stream_metadata)) { + if (oslog_stream_open && (stream != firehose_stream_metadata) && use_streaming) { stream_lock(); if (!oslog_stream_open) { stream_unlock(); @@ -559,13 +617,13 @@ out: //only stream available during boot is persist offset = firehose_chunk_tracepoint_try_reserve(fbc, stamp, - firehose_stream_persist, 0, publen, 0, NULL); + firehose_stream_persist, 0, (uint16_t)publen, 0, NULL); if (offset <= 0) { os_atomic_inc(&oslog_p_boot_dropped_msgcount, relaxed); return 0; } - ft = firehose_chunk_tracepoint_begin(fbc, stamp, publen, + ft = firehose_chunk_tracepoint_begin(fbc, stamp, (uint16_t)publen, thread_tid(current_thread()), offset); memcpy(ft->ft_data, pubdata, publen); firehose_chunk_tracepoint_end(fbc, ft, ftid); @@ -587,37 +645,70 @@ out: } static oslog_stream_buf_entry_t -oslog_stream_create_buf_entry(oslog_stream_link_type_t type, firehose_tracepoint_id_u ftid, - uint64_t stamp, const void* pubdata, size_t publen) +oslog_stream_create_buf_entry(oslog_stream_link_type_t type, + firehose_tracepoint_id_u ftid, uint64_t stamp, + const void* pubdata, size_t publen) { - oslog_stream_buf_entry_t m_entry = NULL; - firehose_tracepoint_t ft = NULL; - size_t m_entry_len = 0; + const size_t ft_size = sizeof(struct firehose_tracepoint_s) + publen; + const size_t m_entry_len = sizeof(struct oslog_stream_buf_entry_s) + ft_size; + oslog_stream_buf_entry_t m_entry; - if (!pubdata) { + if (!pubdata || publen > UINT16_MAX || ft_size > UINT16_MAX) { return NULL; } - m_entry_len = sizeof(struct oslog_stream_buf_entry_s) + - sizeof(struct firehose_tracepoint_s) + publen; - m_entry = (oslog_stream_buf_entry_t) kalloc(m_entry_len); + m_entry = kheap_alloc(KHEAP_DEFAULT, m_entry_len, Z_WAITOK); if (!m_entry) { return NULL; } - m_entry->type = type; m_entry->timestamp = stamp; - m_entry->size = sizeof(struct firehose_tracepoint_s) + publen; + m_entry->size = (uint16_t)ft_size; - ft = m_entry->metadata; + firehose_tracepoint_t ft = m_entry->metadata; ft->ft_thread = thread_tid(current_thread()); ft->ft_id.ftid_value = ftid.ftid_value; - ft->ft_length = publen; + ft->ft_length = (uint16_t)publen; memcpy(ft->ft_data, pubdata, publen); return m_entry; } +void +os_log_coprocessor_register(const char *uuid, const char *file_path, bool copy) +{ + uint64_t stamp; + size_t path_size = strlen(file_path) + 1; + firehose_tracepoint_id_u trace_id; + size_t uuid_info_len = sizeof(struct firehose_trace_uuid_info_s) + path_size; + union { + struct firehose_trace_uuid_info_s uuid_info; + char path[PATH_MAX + sizeof(struct firehose_trace_uuid_info_s)]; + } buf; + + if (path_size > PATH_MAX) { + return; + } + + // write metadata to uuid_info + memcpy(buf.uuid_info.ftui_uuid, uuid, sizeof(uuid_t)); + buf.uuid_info.ftui_size = 1; + buf.uuid_info.ftui_address = 1; + + stamp = firehose_tracepoint_time(firehose_activity_flags_default); + + // create tracepoint id + trace_id.ftid_value = FIREHOSE_TRACE_ID_MAKE(firehose_tracepoint_namespace_metadata, _firehose_tracepoint_type_metadata_coprocessor, + (firehose_tracepoint_flags_t)0, copy ? firehose_tracepoint_code_load_memory : firehose_tracepoint_code_load_filesystem); + + // write path to buffer + memcpy(buf.uuid_info.ftui_path, file_path, path_size); + + // send metadata tracepoint to firehose for coprocessor registration in logd + firehose_trace_metadata(firehose_stream_metadata, trace_id, stamp, (void *)&buf, uuid_info_len); + return; +} + #ifdef KERNEL void firehose_trace_metadata(firehose_stream_t stream, firehose_tracepoint_id_u ftid, @@ -646,7 +737,8 @@ firehose_trace_metadata(firehose_stream_t stream, firehose_tracepoint_id_u ftid, stream_lock(); if (!oslog_stream_open) { stream_unlock(); - kfree(m_entry, sizeof(struct oslog_stream_buf_entry_s) + + kheap_free(KHEAP_DEFAULT, m_entry, + sizeof(struct oslog_stream_buf_entry_s) + sizeof(struct firehose_tracepoint_s) + publen); goto finish; } @@ -655,35 +747,10 @@ firehose_trace_metadata(firehose_stream_t stream, firehose_tracepoint_id_u ftid, stream_unlock(); finish: - _firehose_trace(stream, ftid, stamp, pubdata, publen); + _firehose_trace(stream, ftid, stamp, pubdata, publen, true); } #endif -firehose_tracepoint_id_t -firehose_debug_trace(firehose_stream_t stream, firehose_tracepoint_id_t trace_id, - uint64_t timestamp, const char *format, const void *pubdata, size_t publen) -{ - kprintf("[os_log stream 0x%x trace_id 0x%llx timestamp %llu format '%s' data %p len %lu]\n", - (unsigned int)stream, (unsigned long long)trace_id, timestamp, - format, pubdata, publen); - size_t i; - const unsigned char *cdata = (const unsigned char *)pubdata; - for (i = 0; i < publen; i += 8) { - kprintf(">oslog 0x%08x: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", - (unsigned int)i, - (i + 0) < publen ? cdata[i + 0] : 0, - (i + 1) < publen ? cdata[i + 1] : 0, - (i + 2) < publen ? cdata[i + 2] : 0, - (i + 3) < publen ? cdata[i + 3] : 0, - (i + 4) < publen ? cdata[i + 4] : 0, - (i + 5) < publen ? cdata[i + 5] : 0, - (i + 6) < publen ? cdata[i + 6] : 0, - (i + 7) < publen ? cdata[i + 7] : 0 - ); - } - return trace_id; -} - void __firehose_buffer_push_to_logd(firehose_buffer_t fb __unused, bool for_io __unused) { @@ -731,7 +798,7 @@ __firehose_critical_region_leave(void) #define TESTOSLOG(fn_name) TESTOSLOGPFX TESTOSLOGFMT(fn_name "#") extern u_int32_t RandomULong(void); -extern uint32_t find_pattern_in_buffer(char * pattern, uint32_t len, int expected_count); +extern size_t find_pattern_in_buffer(const char *pattern, size_t len, size_t expected_count); void test_oslog_default_helper(uint32_t uniqid, uint64_t count); void test_oslog_info_helper(uint32_t uniqid, uint64_t count); void test_oslog_debug_helper(uint32_t uniqid, uint64_t count); @@ -771,12 +838,12 @@ test_os_log() { char databuffer[256]; uint32_t uniqid = RandomULong(); - uint32_t match_count = 0; + size_t match_count = 0; uint32_t checksum = 0; uint32_t total_msg = 0; uint32_t saved_msg = 0; uint32_t dropped_msg = 0; - int datalen = 0; + size_t datalen = 0; uint64_t a = mach_absolute_time(); uint64_t seqno = 1; uint64_t total_seqno = 2; @@ -808,7 +875,7 @@ test_os_log() datalen = scnprintf(databuffer, sizeof(databuffer), "kernel^0^test^printf_only#mat%llu", a); match_count = find_pattern_in_buffer(databuffer, datalen, total_seqno); - T_EXPECT_EQ_UINT(match_count, 2, "verify printf_only goes to systemlog buffer"); + T_EXPECT_EQ_ULONG(match_count, total_seqno, "verify printf_only goes to systemlog buffer"); uint32_t logging_config = atm_get_diagnostic_config(); T_LOG("checking atm_diagnostic_config 0x%X", logging_config); @@ -834,7 +901,7 @@ test_os_log() datalen = scnprintf(databuffer, sizeof(databuffer), "kernel^0^test^oslog_info#mat%llu", a); match_count = find_pattern_in_buffer(databuffer, datalen, total_seqno); - T_EXPECT_EQ_UINT(match_count, 1, "verify oslog_info does not go to systemlog buffer"); + T_EXPECT_EQ_ULONG(match_count, total_seqno, "verify oslog_info does not go to systemlog buffer"); total_msg = oslog_p_total_msgcount; test_oslog_info_helper(uniqid, 10); diff --git a/libkern/os/log.h b/libkern/os/log.h index 2972daca7..08229dd3a 100644 --- a/libkern/os/log.h +++ b/libkern/os/log.h @@ -493,6 +493,26 @@ os_log_debug_enabled(os_log_t log); __asm__(""); /* avoid tailcall */ \ }) +/*! + * @function os_log_coprocessor + * + * @abstract + * IOP logging function, intended for use by RTBuddy for + * coprocessor os log functionality only. + */ +bool +os_log_coprocessor(void *buff, uint64_t buff_len, os_log_type_t type, + const char *uuid, uint64_t timestamp, uint32_t offset, bool stream_log); + +/*! + * @function os_log_coprocessor_register + * + * @abstract + * IOP metadata registration, intended for use by RTBuddy for + * coprocessor os log functionality only. + */ +void +os_log_coprocessor_register(const char *uuid, const char *file_path, bool copy); /*! * @function os_log_sensitive_debug diff --git a/libkern/os/log_encode.h b/libkern/os/log_encode.h index 4591be6dc..82f2ac21d 100644 --- a/libkern/os/log_encode.h +++ b/libkern/os/log_encode.h @@ -38,23 +38,25 @@ extern boolean_t doprnt_hide_pointers; #endif static bool -_encode_data(os_log_buffer_value_t content, const void *arg, uint16_t arg_len, os_log_buffer_context_t context) +_encode_data(os_log_buffer_value_t content, const void *arg, size_t arg_len, os_log_buffer_context_t context) { struct os_log_arginfo_s arginfo; void *databuf; + arg_len = MIN(arg_len, UINT16_MAX); + if (content->flags & OS_LOG_CONTENT_FLAG_PRIVATE) { databuf = context->privdata + context->privdata_off; - arginfo.length = MIN(arg_len, (context->privdata_sz - context->privdata_off)); + arginfo.length = MIN((uint16_t)arg_len, (context->privdata_sz - context->privdata_off)); arginfo.offset = context->privdata_off; } else { databuf = context->pubdata + context->pubdata_off; - arginfo.length = MIN(arg_len, (context->pubdata_sz - context->pubdata_off)); + arginfo.length = MIN((uint16_t)arg_len, (context->pubdata_sz - context->pubdata_off)); arginfo.offset = context->pubdata_off; } if (context->arg_content_sz > 0) { - arginfo.length = MIN(context->arg_content_sz, arginfo.length); + arginfo.length = MIN((uint16_t)context->arg_content_sz, arginfo.length); } memcpy(content->value, &arginfo, sizeof(arginfo)); @@ -144,7 +146,7 @@ _os_log_parse_annotated(char *annotated, const char **visibility, const char **l OS_ALWAYS_INLINE static inline bool -_os_log_encode_arg(void *arg, uint16_t arg_len, os_log_value_type_t ctype, bool is_private, os_log_buffer_context_t context) +_os_log_encode_arg(void *arg, size_t arg_len, os_log_value_type_t ctype, bool is_private, os_log_buffer_context_t context) { os_log_buffer_value_t content = (os_log_buffer_value_t) &context->buffer->content[context->content_off]; size_t content_sz = sizeof(*content) + arg_len; @@ -207,7 +209,7 @@ _os_log_encode_arg(void *arg, uint16_t arg_len, os_log_value_type_t ctype, bool } memcpy(content->value, arg, arg_len); - content->size = arg_len; + content->size = (uint8_t)arg_len; context->content_off += content_sz; } break; @@ -331,12 +333,12 @@ _os_log_encode(const char *format, va_list args, int saved_errno, os_log_buffer_ case 'X': // upper-hex switch (type) { case OST_CHAR: - value.type.ch = va_arg(args, int); + value.type.ch = (char) va_arg(args, int); _os_log_encode_arg(&value.type.ch, sizeof(value.type.ch), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); break; case OST_SHORT: - value.type.s = va_arg(args, int); + value.type.s = (short) va_arg(args, int); _os_log_encode_arg(&value.type.s, sizeof(value.type.s), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); break; @@ -440,7 +442,7 @@ _os_log_encode(const char *format, va_list args, int saved_errno, os_log_buffer_ #endif /* !KERNEL */ case 'c': // char - value.type.ch = va_arg(args, int); + value.type.ch = (char) va_arg(args, int); _os_log_encode_arg(&value.type.ch, sizeof(value.type.ch), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); done = true; break; diff --git a/libkern/os/object.h b/libkern/os/object.h index 16cfea0f0..373b31ead 100644 --- a/libkern/os/object.h +++ b/libkern/os/object.h @@ -18,6 +18,8 @@ * @APPLE_APACHE_LICENSE_HEADER_END@ */ +#if KERNEL + #ifndef __OS_OBJECT__ #define __OS_OBJECT__ @@ -199,4 +201,11 @@ os_release(void *object); __END_DECLS -#endif +#endif /* OS_OBJECT file guard */ + +#else /* KERNEL */ + +/* This should use the libdispatch header */ +#include_next + +#endif /* KERNEL */ diff --git a/libkern/os/object_private.h b/libkern/os/object_private.h deleted file mode 100644 index 5908a640b..000000000 --- a/libkern/os/object_private.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2011-2012 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -/* - * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch - * which are subject to change in future releases of Mac OS X. Any applications - * relying on these interfaces WILL break. - */ - -#ifndef __OS_OBJECT_PRIVATE__ -#define __OS_OBJECT_PRIVATE__ - -#include -#include -#include - -#ifndef __OSX_AVAILABLE_STARTING -#define __OSX_AVAILABLE_STARTING(x, y) -#endif - -#if __GNUC__ -#define OS_OBJECT_NOTHROW __attribute__((__nothrow__)) -#define OS_OBJECT_NONNULL __attribute__((__nonnull__)) -#define OS_OBJECT_WARN_RESULT __attribute__((__warn_unused_result__)) -#define OS_OBJECT_MALLOC __attribute__((__malloc__)) -#define OS_OBJECT_EXPORT extern __attribute__((visibility("default"))) -#else -/*! @parseOnly */ -#define OS_OBJECT_NOTHROW -/*! @parseOnly */ -#define OS_OBJECT_NONNULL -/*! @parseOnly */ -#define OS_OBJECT_WARN_RESULT -/*! @parseOnly */ -#define OS_OBJECT_MALLOC -#define OS_OBJECT_EXPORT extern -#endif - -#if OS_OBJECT_USE_OBJC && defined(__has_feature) -#if __has_feature(objc_arc) -#define _OS_OBJECT_OBJC_ARC 1 -#else -#define _OS_OBJECT_OBJC_ARC 0 -#endif -#else -#define _OS_OBJECT_OBJC_ARC 0 -#endif - -#define _OS_OBJECT_GLOBAL_REFCNT INT_MAX - -#define _OS_OBJECT_HEADER(isa, ref_cnt, xref_cnt) \ - isa; /* must be pointer-sized */ \ - int volatile ref_cnt; \ - int volatile xref_cnt - -#if OS_OBJECT_HAVE_OBJC_SUPPORT -// Must match size of compiler-generated OBJC_CLASS structure rdar://10640168 -#define _OS_OBJECT_CLASS_HEADER() \ - void *_os_obj_objc_class_t[5] -#else -#define _OS_OBJECT_CLASS_HEADER() \ - void (*_os_obj_xref_dispose)(_os_object_t); \ - void (*_os_obj_dispose)(_os_object_t) -#endif - -#define OS_OBJECT_CLASS(name) OS_##name - -#if OS_OBJECT_USE_OBJC -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -OS_OBJECT_EXPORT -@interface OS_OBJECT_CLASS(object) : NSObject -- (void)_xref_dispose; -- (void)_dispose; -@end -typedef OS_OBJECT_CLASS(object) *_os_object_t; -#define _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ - @interface OS_OBJECT_CLASS(name) : OS_OBJECT_CLASS(super) \ - \ - @end -#else -typedef struct _os_object_s *_os_object_t; -#endif - -__BEGIN_DECLS - -#if !_OS_OBJECT_OBJC_ARC - - __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW -_os_object_t -_os_object_alloc(const void *cls, size_t size); - -__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0) -OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW -_os_object_t -_os_object_alloc_realized(const void *cls, size_t size); - -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW -void _os_object_dealloc(_os_object_t object); - -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW -_os_object_t -_os_object_retain(_os_object_t object); - -__OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0) -OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW -_os_object_t -_os_object_retain_with_resurrect(_os_object_t obj); - -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW -void -_os_object_release(_os_object_t object); - -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW -_os_object_t -_os_object_retain_internal(_os_object_t object); - -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW -void -_os_object_release_internal(_os_object_t object); - -#endif // !_OS_OBJECT_OBJC_ARC - -__END_DECLS - -#endif diff --git a/libkern/os/refcnt.c b/libkern/os/refcnt.c index 0cbcdf745..e3e0f5dac 100644 --- a/libkern/os/refcnt.c +++ b/libkern/os/refcnt.c @@ -6,6 +6,7 @@ #include #include #endif +#include #include "refcnt.h" @@ -20,8 +21,11 @@ bool ref_debug_enable = false; static const size_t ref_log_nrecords = 1000000; #define REFLOG_BTDEPTH 10 -#define REFLOG_RETAIN 1 -#define REFLOG_RELEASE 2 + +__enum_closed_decl(reflog_op_t, uint8_t, { + REFLOG_RETAIN = 1, + REFLOG_RELEASE = 2 +}); #define __debug_only #else @@ -60,9 +64,9 @@ os_ref_panic_overflow(void *rc) } static inline void -os_ref_check_underflow(void *rc, os_ref_count_t count) +os_ref_check_underflow(void *rc, os_ref_count_t count, os_ref_count_t n) { - if (__improbable(count == 0)) { + if (__improbable(count < n)) { os_ref_panic_underflow(rc); } } @@ -76,17 +80,11 @@ os_ref_check_overflow(os_ref_atomic_t *rc, os_ref_count_t count) } static inline void -os_ref_assert_referenced(void *rc, os_ref_count_t count) +os_ref_check_retain(os_ref_atomic_t *rc, os_ref_count_t count, os_ref_count_t n) { - if (__improbable(count == 0)) { + if (__improbable(count < n)) { os_ref_panic_resurrection(rc); } -} - -static inline void -os_ref_check_retain(os_ref_atomic_t *rc, os_ref_count_t count) -{ - os_ref_assert_referenced(rc, count); os_ref_check_overflow(rc, count); } @@ -94,7 +92,7 @@ os_ref_check_retain(os_ref_atomic_t *rc, os_ref_count_t count) #if KERNEL __attribute__((cold, noinline)) static void -ref_log_op(struct os_refgrp *grp, void *elem, int op) +ref_log_op(struct os_refgrp *grp, void *elem, reflog_op_t op) { if (grp == NULL) { return; @@ -158,13 +156,22 @@ ref_log_init(struct os_refgrp *grp) #else #ifndef ref_log_init -# define ref_log_init(...) do {} while (0) +static inline void +ref_log_init(struct os_refgrp *grp __unused) +{ +} #endif #ifndef ref_log_op -# define ref_log_op(...) do {} while (0) +static inline void +ref_log_op(struct os_refgrp *grp __unused, void *rc __unused, reflog_op_t op __unused) +{ +} #endif #ifndef ref_log_drop -# define ref_log_drop(...) do {} while (0) +static inline void +ref_log_drop(struct os_refgrp *grp __unused, void *rc __unused) +{ +} #endif #endif /* KERNEL */ @@ -248,7 +255,7 @@ ref_retain_debug(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp) void os_ref_init_count_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t count) { - os_ref_check_underflow(rc, count); + os_ref_check_underflow(rc, count, 1); atomic_init(rc, count); #if OS_REFCNT_DEBUG @@ -262,7 +269,7 @@ void os_ref_retain_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp) { os_ref_count_t old = atomic_fetch_add_explicit(rc, 1, memory_order_relaxed); - os_ref_check_retain(rc, old); + os_ref_check_retain(rc, old, 1); #if OS_REFCNT_DEBUG if (__improbable(grp && ref_debug_enable)) { @@ -274,20 +281,17 @@ os_ref_retain_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp) bool os_ref_retain_try_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp) { - os_ref_count_t cur = os_ref_get_count_internal(rc); + os_ref_count_t cur, next; - while (1) { + os_atomic_rmw_loop(rc, cur, next, relaxed, { if (__improbable(cur == 0)) { - return false; + os_atomic_rmw_loop_give_up(return false); } - os_ref_check_retain(rc, cur); + next = cur + 1; + }); - if (atomic_compare_exchange_weak_explicit(rc, &cur, cur + 1, - memory_order_relaxed, memory_order_relaxed)) { - break; - } - } + os_ref_check_overflow(rc, cur); #if OS_REFCNT_DEBUG if (__improbable(grp && ref_debug_enable)) { @@ -300,7 +304,8 @@ os_ref_retain_try_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only __attribute__((always_inline)) static inline os_ref_count_t -_os_ref_release_inline(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, +_os_ref_release_inline(os_ref_atomic_t *rc, os_ref_count_t n, + struct os_refgrp * __debug_only grp, memory_order release_order, memory_order dealloc_order) { os_ref_count_t val; @@ -315,15 +320,16 @@ _os_ref_release_inline(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, } #endif - val = atomic_fetch_sub_explicit(rc, 1, release_order); - os_ref_check_underflow(rc, val); - if (__improbable(--val == 0)) { + val = atomic_fetch_sub_explicit(rc, n, release_order); + os_ref_check_underflow(rc, val, n); + val -= n; + if (__improbable(val < n)) { atomic_load_explicit(rc, dealloc_order); } #if OS_REFCNT_DEBUG if (__improbable(grp && ref_debug_enable)) { - if (val == 0) { + if (val < n) { ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */ } ref_release_group(grp, !val); @@ -333,6 +339,21 @@ _os_ref_release_inline(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, return val; } +#if OS_REFCNT_DEBUG +__attribute__((noinline)) +static os_ref_count_t +os_ref_release_n_internal(os_ref_atomic_t *rc, os_ref_count_t n, + struct os_refgrp * __debug_only grp, + memory_order release_order, memory_order dealloc_order) +{ + // Legacy exported interface with bad codegen due to the barriers + // not being immediate + // + // Also serves as the debug function + return _os_ref_release_inline(rc, n, grp, release_order, dealloc_order); +} +#endif + __attribute__((noinline)) os_ref_count_t os_ref_release_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, @@ -342,7 +363,7 @@ os_ref_release_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp // not being immediate // // Also serves as the debug function - return _os_ref_release_inline(rc, grp, release_order, dealloc_order); + return _os_ref_release_inline(rc, 1, grp, release_order, dealloc_order); } os_ref_count_t @@ -355,7 +376,7 @@ os_ref_release_barrier_internal(os_ref_atomic_t *rc, memory_order_release, memory_order_acquire); } #endif - return _os_ref_release_inline(rc, NULL, + return _os_ref_release_inline(rc, 1, NULL, memory_order_release, memory_order_acquire); } @@ -369,7 +390,7 @@ os_ref_release_relaxed_internal(os_ref_atomic_t *rc, memory_order_relaxed, memory_order_relaxed); } #endif - return _os_ref_release_inline(rc, NULL, + return _os_ref_release_inline(rc, 1, NULL, memory_order_relaxed, memory_order_relaxed); } @@ -377,7 +398,7 @@ void os_ref_retain_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp) { os_ref_count_t val = os_ref_get_count_internal(rc); - os_ref_check_retain(rc, val); + os_ref_check_retain(rc, val, 1); atomic_store_explicit(rc, ++val, memory_order_relaxed); #if OS_REFCNT_DEBUG @@ -391,7 +412,7 @@ os_ref_count_t os_ref_release_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp) { os_ref_count_t val = os_ref_get_count_internal(rc); - os_ref_check_underflow(rc, val); + os_ref_check_underflow(rc, val, 1); atomic_store_explicit(rc, --val, memory_order_relaxed); #if OS_REFCNT_DEBUG @@ -411,22 +432,15 @@ os_ref_release_locked_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_o * Bitwise API */ -os_ref_count_t -os_ref_get_count_mask(os_ref_atomic_t *rc, os_ref_count_t bits) -{ - os_ref_count_t ret; - ret = os_ref_get_count_raw(rc); - return ret >> bits; -} - #undef os_ref_init_count_mask void -os_ref_init_count_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, - os_ref_count_t init_count, os_ref_count_t init_bits, os_ref_count_t b) +os_ref_init_count_mask(os_ref_atomic_t *rc, uint32_t b, + struct os_refgrp *__debug_only grp, + os_ref_count_t init_count, uint32_t init_bits) { assert(init_bits < (1U << b)); - os_ref_check_underflow(rc, init_count); atomic_init(rc, (init_count << b) | init_bits); + os_ref_check_underflow(rc, (init_count << b), 1u << b); #if OS_REFCNT_DEBUG if (__improbable(ref_debug_enable && grp)) { @@ -435,13 +449,13 @@ os_ref_init_count_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, #endif } -#undef os_ref_retain_mask -void -os_ref_retain_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t bits) +__attribute__((always_inline)) +static inline void +os_ref_retain_mask_inline(os_ref_atomic_t *rc, uint32_t n, + struct os_refgrp *__debug_only grp, memory_order mo) { - os_ref_count_t old = atomic_fetch_add_explicit(rc, 1U << bits, memory_order_relaxed); - os_ref_check_overflow(rc, old); - os_ref_assert_referenced(rc, old >> bits); + os_ref_count_t old = atomic_fetch_add_explicit(rc, n, mo); + os_ref_check_retain(rc, old, n); #if OS_REFCNT_DEBUG if (__improbable(grp && ref_debug_enable)) { @@ -450,59 +464,64 @@ os_ref_retain_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ #endif } -#undef os_ref_release_mask_internal -os_ref_count_t -os_ref_release_mask_internal(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t bits, - memory_order release_order, memory_order dealloc_order) +void +os_ref_retain_mask_internal(os_ref_atomic_t *rc, uint32_t n, + struct os_refgrp *__debug_only grp) +{ + os_ref_retain_mask_inline(rc, n, grp, memory_order_relaxed); +} + +void +os_ref_retain_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n, + struct os_refgrp *__debug_only grp) +{ + os_ref_retain_mask_inline(rc, n, grp, memory_order_acquire); +} + +uint32_t +os_ref_release_barrier_mask_internal(os_ref_atomic_t *rc, uint32_t n, + struct os_refgrp *__debug_only grp) { #if OS_REFCNT_DEBUG if (__improbable(grp && ref_debug_enable)) { - /* - * Care not to use 'rc' after the decrement because it might be deallocated - * under us. - */ - ref_log_op(grp, (void *)rc, REFLOG_RELEASE); + return os_ref_release_n_internal(rc, n, grp, + memory_order_release, memory_order_acquire); } #endif - os_ref_count_t val = atomic_fetch_sub_explicit(rc, 1U << bits, release_order); - val >>= bits; - os_ref_check_underflow(rc, val); - if (__improbable(--val == 0)) { - atomic_load_explicit(rc, dealloc_order); - } + return _os_ref_release_inline(rc, n, NULL, + memory_order_release, memory_order_acquire); +} +uint32_t +os_ref_release_relaxed_mask_internal(os_ref_atomic_t *rc, uint32_t n, + struct os_refgrp *__debug_only grp) +{ #if OS_REFCNT_DEBUG if (__improbable(grp && ref_debug_enable)) { - if (val == 0) { - ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */ - } - ref_release_group(grp, !val); + return os_ref_release_n_internal(rc, n, grp, + memory_order_relaxed, memory_order_relaxed); } #endif - return val; + return _os_ref_release_inline(rc, n, NULL, + memory_order_relaxed, memory_order_relaxed); } -#undef os_ref_retain_try_mask bool -os_ref_retain_try_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t bits) +os_ref_retain_try_mask_internal(os_ref_atomic_t *rc, uint32_t n, + uint32_t reject_mask, struct os_refgrp *__debug_only grp) { - os_ref_count_t cur = os_ref_get_count_internal(rc); + os_ref_count_t cur, next; - while (1) { - if (__improbable((cur >> bits) == 0)) { - return false; + os_atomic_rmw_loop(rc, cur, next, relaxed, { + if (__improbable(cur < n || (cur & reject_mask))) { + os_atomic_rmw_loop_give_up(return false); } + next = cur + n; + }); - os_ref_check_overflow(rc, cur); - - os_ref_count_t next = cur + (1U << bits); - if (atomic_compare_exchange_weak_explicit(rc, &cur, next, - memory_order_relaxed, memory_order_relaxed)) { - break; - } - } + os_ref_check_overflow(rc, cur); #if OS_REFCNT_DEBUG if (__improbable(grp && ref_debug_enable)) { @@ -513,45 +532,26 @@ os_ref_retain_try_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, return true; } -#undef os_ref_retain_locked_mask -void -os_ref_retain_locked_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t bits) +bool +os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n, + uint32_t reject_mask, struct os_refgrp *__debug_only grp) { - os_ref_count_t val = os_ref_get_count_internal(rc); + os_ref_count_t cur, next; - os_ref_check_overflow(rc, val); - os_ref_assert_referenced(rc, val >> bits); + os_atomic_rmw_loop(rc, cur, next, acquire, { + if (__improbable(cur < n || (cur & reject_mask))) { + os_atomic_rmw_loop_give_up(return false); + } + next = cur + n; + }); - val += (1U << bits); - atomic_store_explicit(rc, val, memory_order_relaxed); + os_ref_check_overflow(rc, cur); #if OS_REFCNT_DEBUG if (__improbable(grp && ref_debug_enable)) { ref_retain_debug(rc, grp); } #endif -} - -#undef os_ref_release_locked_mask -os_ref_count_t -os_ref_release_locked_mask(os_ref_atomic_t *rc, struct os_refgrp * __debug_only grp, os_ref_count_t bits) -{ - os_ref_count_t val = os_ref_get_count_internal(rc); - os_ref_check_underflow(rc, val >> bits); - val -= (1U << bits); - atomic_store_explicit(rc, val, memory_order_relaxed); - - val >>= bits; - -#if OS_REFCNT_DEBUG - if (__improbable(grp && ref_debug_enable)) { - ref_release_group(grp, !val); - ref_log_op(grp, (void *)rc, REFLOG_RELEASE); - if (val == 0) { - ref_log_drop(grp, (void *)rc); - } - } -#endif - return val; + return true; } diff --git a/libkern/os/refcnt.h b/libkern/os/refcnt.h index bca8fcdf8..e378e558c 100644 --- a/libkern/os/refcnt.h +++ b/libkern/os/refcnt.h @@ -157,6 +157,8 @@ static os_ref_count_t os_ref_get_count(struct os_refcnt *rc); #if XNU_KERNEL_PRIVATE +#pragma GCC visibility push(hidden) + /* * Raw API that uses a plain atomic counter (os_ref_atomic_t) and a separate * refgroup. This can be used in situations where the refcount object must be @@ -169,7 +171,7 @@ static void os_ref_init_count_raw(os_ref_atomic_t *, struct os_refgrp *, os_ref_ os_error_if(count == 0, "Reference count must be non-zero initialized"); static void os_ref_retain_raw(os_ref_atomic_t *, struct os_refgrp *); static os_ref_count_t os_ref_release_raw(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT; -static os_ref_count_t os_ref_release_relaxed_raw(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT; +static os_ref_count_t os_ref_release_raw_relaxed(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT; static void os_ref_release_live_raw(os_ref_atomic_t *, struct os_refgrp *); static bool os_ref_retain_try_raw(os_ref_atomic_t *, struct os_refgrp *) OS_WARN_RESULT; static void os_ref_retain_locked_raw(os_ref_atomic_t *, struct os_refgrp *); @@ -189,25 +191,52 @@ static os_ref_count_t os_ref_get_count_raw(os_ref_atomic_t *rc); * Due to guard bits, the maximum reference count is 2^(28 - 'b') - 1, and the * maximum 'b' is 26 bits. This API can also be used just to limit the max * refcount. + * + * The "*_raw_mask" APIs return the raw bit pattern of the refcount (with a type + * of uint32_t), that the caller is supposed to decode. Other APIs that return + * os_ref_count_t return a normalized refcount where the trailing bits have been + * removed. + * + * "locked" variants aren't provided as the point of these interfaces + * is to combine flags into a refcount and be able to manipulate both + * atomically with respect to each other. */ /* Initialize the reference count and reserved bits */ -#define os_ref_init_mask(rc, grp, b) os_ref_init_count_mask((rc), (grp), 1, 0, (b)) -void os_ref_init_count_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t init_count, - os_ref_count_t init_bits, os_ref_count_t b) +#define os_ref_init_mask(rc, b, grp, bits) os_ref_init_count_mask((rc), (b), (grp), 1, bits) +void os_ref_init_count_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp, + os_ref_count_t init_count, uint32_t init_bits) os_error_if(init_count == 0, "Reference count must be non-zero initialized") os_error_if(b > 26, "Bitwise reference count limited to 26 bits") os_error_if(init_bits >= (1U << b), "Bits out of range"); -void os_ref_retain_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b); -static os_ref_count_t os_ref_release_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT; -static os_ref_count_t os_ref_release_relaxed_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT; -static void os_ref_release_live_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b); -bool os_ref_retain_try_mask(os_ref_atomic_t *, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT; -void os_ref_retain_locked_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b); -os_ref_count_t os_ref_release_locked_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) OS_WARN_RESULT; -os_ref_count_t os_ref_get_count_mask(os_ref_atomic_t *rc, os_ref_count_t b); - +static uint32_t os_ref_get_raw_mask(os_ref_atomic_t *rc); +static uint32_t os_ref_get_bits_mask(os_ref_atomic_t *rc, uint32_t b); +static os_ref_count_t os_ref_get_count_mask(os_ref_atomic_t *rc, uint32_t b); + +static void +os_ref_retain_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp); +static void +os_ref_retain_acquire_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp); +static bool +os_ref_retain_try_mask(os_ref_atomic_t *, uint32_t b, uint32_t reject_mask, + struct os_refgrp *grp) OS_WARN_RESULT; +static bool +os_ref_retain_try_acquire_mask(os_ref_atomic_t *, uint32_t b, uint32_t reject_mask, + struct os_refgrp *grp) OS_WARN_RESULT; + +static uint32_t +os_ref_release_raw_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp) OS_WARN_RESULT; +static uint32_t +os_ref_release_raw_relaxed_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp) OS_WARN_RESULT; +static os_ref_count_t +os_ref_release_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp) OS_WARN_RESULT; +static os_ref_count_t +os_ref_release_relaxed_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp) OS_WARN_RESULT; +static void +os_ref_release_live_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp); + +#pragma GCC visibility pop #endif /* XNU_KERNEL_PRIVATE */ __END_DECLS diff --git a/libkern/os/refcnt_internal.h b/libkern/os/refcnt_internal.h index fdc26ecd2..a866e215f 100644 --- a/libkern/os/refcnt_internal.h +++ b/libkern/os/refcnt_internal.h @@ -208,7 +208,7 @@ os_ref_release_raw(os_ref_atomic_t *rc, struct os_refgrp *grp) } static inline os_ref_count_t -os_ref_release_relaxed_raw(os_ref_atomic_t *rc, struct os_refgrp *grp) +os_ref_release_raw_relaxed(os_ref_atomic_t *rc, struct os_refgrp *grp) { return os_ref_release_relaxed_internal(rc, grp); } @@ -250,7 +250,7 @@ os_ref_get_count_raw(os_ref_atomic_t *rc) #define os_ref_init_count_raw(rc, grp, count) (os_ref_init_count_raw)((rc), NULL, (count)) #define os_ref_retain_raw(rc, grp) (os_ref_retain_raw)((rc), NULL) #define os_ref_release_raw(rc, grp) (os_ref_release_raw)((rc), NULL) -#define os_ref_release_relaxed_raw(rc, grp) (os_ref_release_relaxed_raw)((rc), NULL) +#define os_ref_release_raw_relaxed(rc, grp) (os_ref_release_relaxed_raw)((rc), NULL) #define os_ref_release_live_raw(rc, grp) (os_ref_release_live_raw)((rc), NULL) #define os_ref_retain_try_raw(rc, grp) (os_ref_retain_try_raw)((rc), NULL) #define os_ref_retain_locked_raw(rc, grp) (os_ref_retain_locked_raw)((rc), NULL) @@ -258,42 +258,116 @@ os_ref_get_count_raw(os_ref_atomic_t *rc) #endif #if XNU_KERNEL_PRIVATE -os_ref_count_t os_ref_release_mask_internal(os_ref_atomic_t *rc, struct os_refgrp *grp, - os_ref_count_t b, memory_order release_order, memory_order dealloc_order); +#pragma GCC visibility push(hidden) + +extern void +os_ref_retain_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp); +extern void +os_ref_retain_acquire_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp); +extern bool +os_ref_retain_try_mask_internal(os_ref_atomic_t *, uint32_t n, + uint32_t reject_mask, struct os_refgrp *grp) OS_WARN_RESULT; +extern bool +os_ref_retain_try_acquire_mask_internal(os_ref_atomic_t *, uint32_t n, + uint32_t reject_mask, struct os_refgrp *grp) OS_WARN_RESULT; + +extern uint32_t +os_ref_release_barrier_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp); +extern uint32_t +os_ref_release_relaxed_mask_internal(os_ref_atomic_t *rc, uint32_t n, struct os_refgrp *grp); + +static inline uint32_t +os_ref_get_raw_mask(os_ref_atomic_t *rc) +{ + return os_ref_get_count_internal(rc); +} + +static inline uint32_t +os_ref_get_bits_mask(os_ref_atomic_t *rc, uint32_t b) +{ + return os_ref_get_raw_mask(rc) & ((1u << b) - 1); +} + +static inline os_ref_count_t +os_ref_get_count_mask(os_ref_atomic_t *rc, uint32_t b) +{ + return os_ref_get_raw_mask(rc) >> b; +} + +static inline void +os_ref_retain_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp) +{ + os_ref_retain_mask_internal(rc, 1u << b, grp); +} + +static inline void +os_ref_retain_acquire_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp) +{ + os_ref_retain_acquire_mask_internal(rc, 1u << b, grp); +} + +static inline bool +os_ref_retain_try_mask(os_ref_atomic_t *rc, uint32_t b, + uint32_t reject_mask, struct os_refgrp *grp) +{ + return os_ref_retain_try_mask_internal(rc, 1u << b, reject_mask, grp); +} + +static inline bool +os_ref_retain_try_acquire_mask(os_ref_atomic_t *rc, uint32_t b, + uint32_t reject_mask, struct os_refgrp *grp) +{ + return os_ref_retain_try_acquire_mask_internal(rc, 1u << b, reject_mask, grp); +} + +static inline uint32_t +os_ref_release_raw_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp) +{ + return os_ref_release_barrier_mask_internal(rc, 1u << b, grp); +} + +static inline uint32_t +os_ref_release_raw_relaxed_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp) +{ + return os_ref_release_relaxed_mask_internal(rc, 1u << b, grp); +} static inline os_ref_count_t -os_ref_release_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) +os_ref_release_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp) { - return os_ref_release_mask_internal(rc, grp, b, memory_order_release, memory_order_acquire); + return os_ref_release_barrier_mask_internal(rc, 1u << b, grp) >> b; } static inline os_ref_count_t -os_ref_release_relaxed_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) +os_ref_release_relaxed_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp) { - return os_ref_release_mask_internal(rc, grp, b, memory_order_relaxed, memory_order_relaxed); + return os_ref_release_relaxed_mask_internal(rc, 1u << b, grp) >> b; } static inline void -os_ref_release_live_mask(os_ref_atomic_t *rc, struct os_refgrp *grp, os_ref_count_t b) +os_ref_release_live_mask(os_ref_atomic_t *rc, uint32_t b, struct os_refgrp *grp) { - if (__improbable(os_ref_release_mask_internal(rc, grp, b, - memory_order_release, memory_order_relaxed) == 0)) { + uint32_t val = os_ref_release_barrier_mask_internal(rc, 1u << b, grp); + if (__improbable(val < 1u << b)) { os_ref_panic_live(rc); } } #if !OS_REFCNT_DEBUG /* remove the group argument for non-debug */ -#define os_ref_init_count_mask(rc, grp, init_c, init_b, b) (os_ref_init_count_mask)(rc, NULL, init_c, init_b, b) -#define os_ref_retain_mask(rc, grp, b) (os_ref_retain_mask)((rc), NULL, (b)) -#define os_ref_release_mask(rc, grp, b) (os_ref_release_mask)((rc), NULL, (b)) -#define os_ref_release_relaxed_mask(rc, grp, b) (os_ref_relaxed_mask)((rc), NULL, (b)) -#define os_ref_release_live_mask(rc, grp, b) (os_ref_release_live_mask)((rc), NULL, (b)) -#define os_ref_retain_try_mask(rc, grp, b) (os_ref_retain_try_mask)((rc), NULL, (b)) -#define os_ref_release_locked_mask(rc, grp, b) (os_ref_release_locked_mask)((rc), NULL, (b)) -#define os_ref_retain_locked_mask(rc, grp, b) (os_ref_retain_locked_mask)((rc), NULL, (b)) +#define os_ref_init_count_mask(rc, b, grp, init_c, init_b) (os_ref_init_count_mask)(rc, b, NULL, init_c, init_b) +#define os_ref_retain_mask(rc, b, grp) (os_ref_retain_mask)((rc), (b), NULL) +#define os_ref_retain_acquire_mask(rc, b, grp) (os_ref_retain_acquire_mask)((rc), (b), NULL) +#define os_ref_retain_try_mask(rc, b, grp) (os_ref_retain_try_mask)((rc), (b), NULL) +#define os_ref_retain_try_acquire_mask(rc, b, grp) (os_ref_retain_try_acquire_mask)((rc), (b), NULL) +#define os_ref_release_mask(rc, b, grp) (os_ref_release_mask)((rc), (b), NULL) +#define os_ref_release_relaxed_mask(rc, b, grp) (os_ref_relaxed_mask)((rc), (b), NULL) +#define os_ref_release_raw_mask(rc, b, grp) (os_ref_release_mask)((rc), (b), NULL) +#define os_ref_release_relaxed_raw_mask(rc, b, grp) (os_ref_relaxed_mask)((rc), (b), NULL) +#define os_ref_release_live_mask(rc, b, grp) (os_ref_release_live_mask)((rc), (b), NULL) #endif +#pragma GCC visibility pop #endif __END_DECLS diff --git a/libkern/os/smart_ptr.h b/libkern/os/smart_ptr.h deleted file mode 100644 index 5f89c7fec..000000000 --- a/libkern/os/smart_ptr.h +++ /dev/null @@ -1,523 +0,0 @@ -#ifndef _OS_SMART_POINTER_H -#define _OS_SMART_POINTER_H - -#include -#include - -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wc++11-extensions" - -#if __has_attribute(trivial_abi) -# define OS_TRIVIAL_ABI __attribute__((trivial_abi)) -#else -# error Smart pointers depend on trivial_abi attribute -#endif - -#if !OS_HAS_RVALUE_REFERENCES -# error Smart pointers depend on rvalue references -#endif - -/* C++98 compatibility */ -#if !OS_HAS_NULLPTR && !defined(nullptr) -# define nullptr NULL -#endif - -#ifndef OSPTR_LOG -# define OSPTR_LOG(x, ...) do {} while(0) -#endif - -namespace os { -static struct no_retain_t {} no_retain; - -template -class OS_TRIVIAL_ABI smart_ptr -{ - template friend class smart_ptr; - -public: - -/* - * Default constructor, creates a null pointer - */ - smart_ptr() : pointer(nullptr) - { - OSPTR_LOG("Default construct smart_ptr\n"); - } - -#if OS_HAS_NULLPTR -/* - * Construction from a nullptr - */ - smart_ptr(os::nullptr_t) : pointer(nullptr) - { - OSPTR_LOG("Construct smart_ptr from null\n"); - } -#endif - -/* - * Construct from a raw pointer, taking a reference to the object - */ - explicit smart_ptr(T *&p) : pointer(p) - { - OSPTR_LOG("Construct smart_ptr from raw %p\n", pointer); - if (pointer != nullptr) { - _retain(pointer); - } - } - -/* - * Construct from a raw pointer, without bumping the refcount - */ - explicit smart_ptr(T *&p, no_retain_t) : pointer(p) - { - OSPTR_LOG("Construct smart_ptr from raw %p no retain\n", pointer); - } - -/* - * Copy constructor from the same smart_ptr type - */ - smart_ptr(smart_ptr const &rhs) : pointer(rhs.pointer) - { - OSPTR_LOG("Copy construct smart_ptr with %p\n", rhs.pointer); - if (pointer != nullptr) { - _retain(pointer); - } - } - -#if !LIBKERN_NO_MEMBER_TEMPLATES -/* - * Allows copy of a smart_ptr from a smart_ptr - * if U is convertible to T. For example, if T is a base class of U - */ - template - smart_ptr(smart_ptr const &rhs) : pointer(rhs.get()) - { - OSPTR_LOG("Copy construct smart_ptr with compatible %p\n", rhs.pointer); - if (pointer != nullptr) { - _retain(pointer); - } - } -#endif - -/* - * Assign to an OSPointer from a raw pointer - */ - smart_ptr & - operator=(T *&rhs) - { - OSPTR_LOG("Assign smart_ptr with replacing %p with raw %p\n", pointer, rhs); - smart_ptr(rhs).swap(*this); - return *this; - } - -#if OS_HAS_NULLPTR -/* - * Assign to an OSPointer from a null pointer - */ - smart_ptr & - operator=(os::nullptr_t) - { - OSPTR_LOG("Assign smart_ptr to null replacing %p\n", pointer); - smart_ptr().swap(*this); - return *this; - } -#endif - -/* - * Assign to a smart_ptr from a smart_ptr of the same type - */ - smart_ptr & - operator=(smart_ptr &rhs) - { - OSPTR_LOG("Assign smart_ptr replacing %p with %p\n", pointer, rhs.pointer); - smart_ptr(rhs).swap(*this); - return *this; - } - -#if !LIBKERN_NO_MEMBER_TEMPLATES -/* - * Allows assignment of a smart_ptr from a smart_ptr - * if U is convertible to T. For example, if T is a base class of U. - */ - template - smart_ptr & - operator=(smart_ptr const &rhs) - { - OSPTR_LOG("Assign smart_ptr to compatible replacing %p with %p\n", pointer, rhs.pointer); - smart_ptr(rhs.get()).swap(*this); - return *this; - } -#endif - -/* - * Move support - */ - -#if OS_HAS_RVALUE_REFERENCES -/* - * Move-construct from a different smart_ptr of the same pointer type - */ - smart_ptr(smart_ptr &&rhs) : pointer(rhs.pointer) - { - OSPTR_LOG("Move construct smart_ptr with %p\n", rhs.pointer); - rhs.pointer = nullptr; - } - -/* - * Move-construct from a raw pointer - */ - smart_ptr(T *&&p) : pointer(p) - { - OSPTR_LOG("Move construct smart_ptr with %p\n", pointer); - if (pointer != nullptr) { - _retain(pointer); - } - p = nullptr; - } - -/* - * Move-construct from a raw pointer without bumping the refcount - */ - smart_ptr(T *&&p, no_retain_t) : pointer(p) - { - OSPTR_LOG("Move construct smart_ptr with %p no retain\n", pointer); - p = nullptr; - } - -/* - * Move-assign to a smart_ptr from a raw pointer - */ - smart_ptr & - operator=(T *&&rhs) - { - OSPTR_LOG("Move assign smart_ptr replacing %p with raw %p\n", pointer, rhs); - smart_ptr(os::move(rhs)).swap(*this); - rhs = nullptr; - return *this; - } - -/* - * Move-assign from a different smart_ptr of the same type - */ - smart_ptr & - operator=(smart_ptr &&rhs) - { - OSPTR_LOG("Move assign smart_ptr replacing %p with %p\n", pointer, rhs.pointer); - smart_ptr(os::move(rhs)).swap(*this); - return *this; - } - -/* - * Move from a different smart_ptr with a compatible pointer type - */ - template - smart_ptr(smart_ptr &&rhs) : pointer(rhs.pointer) - { - OSPTR_LOG("Move construct smart_ptr with compatible %p\n", rhs.pointer); - rhs.pointer = nullptr; - } - - template - smart_ptr & - operator=(smart_ptr &&rhs) - { - OSPTR_LOG("Move assign smart_ptr replacing %p with compatible %p\n", pointer, rhs.pointer); - smart_ptr(os::move(rhs)).swap(*this); - return *this; - } -#endif - -/* - * Destructor - decreases the object's reference count - */ - ~smart_ptr() - { - OSPTR_LOG("Destroy smart_ptr with %p\n", pointer); - if (pointer) { - _release(pointer); - } - } - -/* - * Create a new object of type T and wrap it in a smart_ptr. The object will have - * a reference count of 1, so destruction of the smart_ptr will result in the - * object being freed if the smart_ptr wasn't copied first. - */ - static inline smart_ptr - alloc() - { - return smart_ptr(_alloc(), no_retain); - } - - void - reset() - { - smart_ptr().swap(*this); - } - - T * - get() const - { - return pointer; - } - - T ** - get_for_out_param() - { - reset(); - return &pointer; - } - -/* - * Take ownership of object from raw pointer - */ - void - attach(T *&p) - { - OSPTR_LOG("Attach smart_ptr with %p\n", p); - smart_ptr(p, no_retain).swap(*this); - } - - void - attach(T *&&p) - { - OSPTR_LOG("Move attach smart_ptr with %p\n", p); - smart_ptr(os::move(p), no_retain).swap(*this); - } - -/* Return and drop ownership of pointer with NO release() */ - T * - detach() - { - OSPTR_LOG("Detach smart_ptr with %p\n", pointer); - T *ret = pointer; - pointer = nullptr; - return ret; - } - - T * - operator->() const - { - OSPTR_LOG("Dereference smart_ptr with %p\n", pointer); - return pointer; - } - - explicit - operator bool() const - { - return pointer != nullptr; - } - - inline void - swap(smart_ptr &p) - { - T *temp = pointer; - pointer = p.pointer; - p.pointer = temp; - } - -/* swap pointers to the same type but with different policies */ - template - void - swap(smart_ptr &p) - { - if (p.pointer) { - _retain(p.pointer); - } - if (pointer) { - smart_ptr::_retain(pointer); - } - - T *temp = pointer; - pointer = p.pointer; - p.pointer = temp; - - if (p.pointer) { - _release(p.pointer); - } - if (pointer) { - smart_ptr::_release(pointer); - } - } - - template - smart_ptr - const_pointer_cast() const & - { - OSPTR_LOG("const_pointer_cast smart_ptr with %p\n", pointer); - return smart_ptr(const_cast(pointer)); - } - - template - smart_ptr - const_pointer_cast() && - { - OSPTR_LOG("const_pointer_cast move smart_ptr with %p\n", pointer); - U *newPointer = const_cast(detach()); - return smart_ptr(os::move(newPointer), no_retain); - } - - template - smart_ptr - static_pointer_cast() const & - { - OSPTR_LOG("static_pointer_cast smart_ptr with %p\n", pointer); - return smart_ptr(static_cast(pointer)); - } - - template - smart_ptr - static_pointer_cast() && - { - OSPTR_LOG("static_pointer_cast move smart_ptr with %p\n", pointer); - return smart_ptr(static_cast(detach()), no_retain); - } - - template - smart_ptr - dynamic_pointer_cast() const & - { - OSPTR_LOG("dynamic_pointer_cast smart_ptr with %p\n", pointer); - return smart_ptr(Policy::template dyn_cast(pointer)); - } - - template - smart_ptr - dynamic_pointer_cast() && - { - OSPTR_LOG("dynamic_pointer_cast move smart_ptr with %p\n", pointer); - U *newPointer = Policy::template dyn_cast(pointer); - - if (newPointer != nullptr) { - detach(); - } else { - reset(); - } - return smart_ptr(os::move(newPointer), no_retain); - } - -private: - static inline void - _retain(T *obj) - { - OSPTR_LOG(" %s with %p\n", __FUNCTION__, obj); - Policy::retain(obj); - } - - static inline void - _release(T *obj) - { - OSPTR_LOG(" %s with %p\n", __FUNCTION__, obj); - Policy::release(obj); - } - - static inline T * - _alloc() - { - OSPTR_LOG(" %s\n", __FUNCTION__); - return Policy::template alloc(); - } - - T *pointer; -}; - -/* - * Comparison - */ - -template -inline bool -operator==(smart_ptr const &a, smart_ptr const &b) -{ - return a.get() == b.get(); -} - -template -inline bool -operator!=(smart_ptr const &a, smart_ptr const &b) -{ - return a.get() != b.get(); -} - -template -inline bool -operator==(smart_ptr const &a, smart_ptr const &b) -{ - return a.get() == b.get(); -} - -template -inline bool -operator!=(smart_ptr const &a, smart_ptr const &b) -{ - return a.get() != b.get(); -} - -/* - * Comparison with nullptr - */ - -#if OS_HAS_NULLPTR -template -inline bool -operator==(smart_ptr const &p, os::nullptr_t) -{ - return p.get() == nullptr; -} - -template inline bool -operator==(os::nullptr_t, smart_ptr const &p) -{ - return p.get() == nullptr; -} - -template -inline bool -operator!=(smart_ptr const &p, os::nullptr_t) -{ - return p.get() != nullptr; -} - -template -inline bool -operator!=(os::nullptr_t, smart_ptr const &p) -{ - return p.get() != nullptr; -} -#endif - -/* - * Comparison with raw pointer - */ - -template -inline bool -operator==(smart_ptr const &p, const os::remove_const_t *other) -{ - return p.get() == other; -} - -template -inline bool -operator==(const os::remove_const_t *other, smart_ptr const &p) -{ - return other == p.get(); -} - -template -inline bool -operator!=(smart_ptr const &p, const os::remove_const_t *other) -{ - return p.get() != other; -} - -template -inline bool -operator!=(const os::remove_const_t *other, smart_ptr const &p) -{ - return other != p.get(); -} -}; - -#pragma clang diagnostic pop -#endif /* _OS_SMART_POINTER_H */ diff --git a/libkern/ptrauth_utils.c b/libkern/ptrauth_utils.c new file mode 100644 index 000000000..8385a23d1 --- /dev/null +++ b/libkern/ptrauth_utils.c @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include // for panic() + +#include + + +#if __has_feature(ptrauth_calls) + +/* + * ptrauth_utils_sign_blob_generic + * + * Sign a blob of data with the GA key + * + */ +ptrauth_generic_signature_t +ptrauth_utils_sign_blob_generic(void * ptr, size_t len_bytes, uint64_t data, int flags) +{ + ptrauth_generic_signature_t sig = 0; + + uint64_t rounds = len_bytes / sizeof(uintptr_t); + size_t ntrailing = len_bytes % sizeof(uintptr_t); + uintptr_t trailing = 0; + + if (ptr == NULL) { + return 0; + } + + /* If address diversification is requested, mix the blob address with the salt */ + if (flags & PTRAUTH_ADDR_DIVERSIFY) { + data ^= (uint64_t)ptr; + } + + /* First round adds salt */ + sig = ptrauth_sign_generic_data(sig, data); + + /* Calculate an additive signature of the buffer */ + for (uint64_t i = 0; i < rounds; i++) { + sig = ptrauth_sign_generic_data(*(uintptr_t *)ptr, sig); + ptr += sizeof(uintptr_t); + } + + /* ptrauth_sign_generic_data operates on pointer-sized values only, + * so we need to handle trailing bytes for the non-pointer-aligned case */ + if (ntrailing) { + memcpy(&trailing, ptr, ntrailing); + sig = ptrauth_sign_generic_data(trailing, sig); + } + + return sig; +} + +/* + * ptrauth_utils_auth_blob_generic + * + * Authenticate signature produced by ptrauth_utils_sign_blob_generic + */ +void +ptrauth_utils_auth_blob_generic(void * ptr, size_t len_bytes, uint64_t data, int flags, ptrauth_generic_signature_t signature) +{ + ptrauth_generic_signature_t calculated_signature = 0; + + if (ptr == NULL) { + if (flags & PTRAUTH_NON_NULL) { + panic("ptrauth_utils_auth_blob_generic: ptr must not be NULL"); + } else { + return; + } + } + + if ((calculated_signature = ptrauth_utils_sign_blob_generic(ptr, len_bytes, data, flags)) == signature) { + return; + } else { + panic("signature mismatch for %lu bytes at %p, calculated %lx vs %lx", len_bytes, + ptr, + calculated_signature, + signature); + } +} + +#endif //!ptrauth_calls diff --git a/libkern/stdio/scanf.c b/libkern/stdio/scanf.c index 6c1968cde..0c634abf4 100644 --- a/libkern/stdio/scanf.c +++ b/libkern/stdio/scanf.c @@ -61,21 +61,20 @@ * SUCH DAMAGE. */ +#include +#include +#include #include +#include + +quad_t strtoq(const char *, char **, int); +u_quad_t strtouq(const char *, char **, int); -#if 0 /* XXX coming soon */ -#include -#else static inline int isspace(char c) { return c == ' ' || c == '\t' || c == '\n' || c == '\12'; } -#endif -#include -#include -#include -#include #define BUF 32 /* Maximum length of numeric string. */ @@ -115,6 +114,9 @@ isspace(char c) static const u_char *__sccl(char *, const u_char *); +int sscanf(const char *, const char *, ...); +int vsscanf(const char *, char const *, va_list); + int sscanf(const char *ibuf, const char *fmt, ...) { @@ -130,18 +132,16 @@ sscanf(const char *ibuf, const char *fmt, ...) int vsscanf(const char *inp, char const *fmt0, va_list ap) { - int inr; + ssize_t inr; const u_char *fmt = (const u_char *)fmt0; - int c; /* character from format, or conversion */ - size_t width; /* field width, or 0 */ + ssize_t width; /* field width, or 0 */ char *p; /* points into all kinds of strings */ - int n; /* handy integer */ int flags; /* flags as defined above */ char *p0; /* saves original value of p when necessary */ - int nassigned; /* number of fields assigned */ - int nconversions; /* number of conversions */ - int nread; /* number of characters consumed from fp */ - int base; /* base argument to conversion function */ + int nassigned = 0; /* number of fields assigned */ + int nconversions = 0; /* number of conversions */ + int nread = 0; /* number of characters consumed from fp */ + int base = 0; /* base argument to conversion function */ char ccltab[256]; /* character class table for %[...] */ char buf[BUF]; /* buffer for numeric conversions */ @@ -149,14 +149,10 @@ vsscanf(const char *inp, char const *fmt0, va_list ap) static short basefix[17] = { 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; - inr = strlen(inp); + inr = (ssize_t)strlen(inp); - nassigned = 0; - nconversions = 0; - nread = 0; - base = 0; /* XXX just to keep gcc happy */ for (;;) { - c = *fmt++; + char c = (char)*fmt++; /* character from format, or conversion */ if (c == 0) { return nassigned; } @@ -177,7 +173,8 @@ vsscanf(const char *inp, char const *fmt0, va_list ap) * switch on the format. continue if done; * break once format type is derived. */ -again: c = *fmt++; +again: + c = (char)*fmt++; switch (c) { case '%': literal: @@ -281,15 +278,15 @@ literal: continue; } if (flags & SHORTSHORT) { - *va_arg(ap, char *) = nread; + *va_arg(ap, char *) = (char)nread; } else if (flags & SHORT) { - *va_arg(ap, short *) = nread; + *va_arg(ap, short *) = (short)nread; } else if (flags & LONG) { - *va_arg(ap, long *) = nread; + *va_arg(ap, long *) = (long)nread; } else if (flags & LONGLONG) { - *va_arg(ap, long long *) = nread; + *va_arg(ap, long long *) = (long long)nread; } else { - *va_arg(ap, int *) = nread; + *va_arg(ap, int *) = (int)nread; } continue; } @@ -333,8 +330,9 @@ literal: if (flags & SUPPRESS) { size_t sum = 0; for (;;) { - if ((n = inr) < (int)width) { - sum += n; + ssize_t n = inr; + if (n < width) { + sum += (size_t)n; width -= n; inp += n; if (sum == 0) { @@ -342,7 +340,7 @@ literal: } break; } else { - sum += width; + sum += (size_t)width; inr -= width; inp += width; break; @@ -359,12 +357,13 @@ literal: nconversions++; break; - case CT_CCL: + case CT_CCL: { /* scan a (nonempty) character class (sets NOSKIP) */ if (width == 0) { - width = (size_t)~0; /* `infinity' */ + width = SSIZE_MAX; /* `infinity' */ } /* take only those things in the class */ + ptrdiff_t n; if (flags & SUPPRESS) { n = 0; while (ccltab[(unsigned char)*inp]) { @@ -409,14 +408,15 @@ literal: nread += n; nconversions++; break; + } case CT_STRING: /* like CCL, but zero-length string OK, & no NOSKIP */ if (width == 0) { - width = (size_t)~0; + width = SSIZE_MAX; } if (flags & SUPPRESS) { - n = 0; + size_t n = 0; while (!isspace(*inp)) { n++; inr--; @@ -450,17 +450,9 @@ literal: case CT_INT: /* scan an integer as if by the conversion function */ -#ifdef hardway - if (width == 0 || width > sizeof(buf) - 1) { + if (width <= 0 || width > (ssize_t)(sizeof(buf) - 1)) { width = sizeof(buf) - 1; } -#else - /* size_t is unsigned, hence this optimisation */ - if (--width > sizeof(buf) - 2) { - width = sizeof(buf) - 2; - } - width++; -#endif flags |= SIGNOK | NDIGITS | NZDIGITS; for (p = buf; width; width--) { c = *inp; @@ -568,7 +560,7 @@ ok: } goto match_failure; } - c = ((u_char *)p)[-1]; + c = p[-1]; if (c == 'x' || c == 'X') { --p; inp--; @@ -579,7 +571,7 @@ ok: *p = 0; if ((flags & UNSIGNED) == 0) { - res = strtoq(buf, (char **)NULL, base); + res = (u_quad_t)strtoq(buf, (char **)NULL, base); } else { res = strtouq(buf, (char **)NULL, base); } @@ -587,15 +579,15 @@ ok: *va_arg(ap, void **) = (void *)(uintptr_t)res; } else if (flags & SHORTSHORT) { - *va_arg(ap, char *) = res; + *va_arg(ap, char *) = (char)res; } else if (flags & SHORT) { - *va_arg(ap, short *) = res; + *va_arg(ap, short *) = (short)res; } else if (flags & LONG) { - *va_arg(ap, long *) = res; + *va_arg(ap, long *) = (long)res; } else if (flags & LONGLONG) { - *va_arg(ap, long long *) = res; + *va_arg(ap, long long *) = (long long)res; } else { - *va_arg(ap, int *) = res; + *va_arg(ap, int *) = (int)res; } nassigned++; } @@ -619,10 +611,10 @@ match_failure: static const u_char * __sccl(char *tab, const u_char *fmt) { - int c, n, v; + char v; /* first `clear' the whole table */ - c = *fmt++; /* first char hat => negated scanset */ + int c = *fmt++; /* first char hat => negated scanset */ if (c == '^') { v = 1; /* default => accept */ c = *fmt++; /* get new first char */ @@ -644,9 +636,10 @@ __sccl(char *tab, const u_char *fmt) */ v = 1 - v; for (;;) { + int n; tab[c] = v; /* take character c */ doswitch: - n = *fmt++; /* and examine the next */ + n = *fmt++; switch (n) { case 0: /* format ended too soon */ return fmt - 1; diff --git a/libkern/zlib/deflate.c b/libkern/zlib/deflate.c index f902d2c9e..64677feb5 100644 --- a/libkern/zlib/deflate.c +++ b/libkern/zlib/deflate.c @@ -708,7 +708,7 @@ deflate(z_streamp strm, int flush) if (s->status == NAME_STATE) { if (s->gzhead->name != NULL) { uInt beg = s->pending; /* start of bytes to update crc */ - int val; + Bytef val; do { if (s->pending == s->pending_buf_size) { @@ -739,7 +739,7 @@ deflate(z_streamp strm, int flush) if (s->status == COMMENT_STATE) { if (s->gzhead->comment != NULL) { uInt beg = s->pending; /* start of bytes to update crc */ - int val; + Bytef val; do { if (s->pending == s->pending_buf_size) { diff --git a/libkern/zlib/deflate.h b/libkern/zlib/deflate.h index a2c347a31..d3af250ee 100644 --- a/libkern/zlib/deflate.h +++ b/libkern/zlib/deflate.h @@ -342,8 +342,8 @@ void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, flush = (s->last_lit == s->lit_bufsize-1); \ } # define _tr_tally_dist(s, distance, length, flush) \ - { uch len = (length); \ - ush dist = (distance); \ + { uch len = (uch)(length); \ + ush dist = (ush)(distance); \ s->d_buf[s->last_lit] = dist; \ s->l_buf[s->last_lit++] = len; \ dist--; \ diff --git a/libkern/zlib/infback.c b/libkern/zlib/infback.c index f3151ac8a..143ec6343 100644 --- a/libkern/zlib/infback.c +++ b/libkern/zlib/infback.c @@ -41,6 +41,7 @@ #include "inftrees.h" #include "inflate.h" #include "inffast.h" +#include /* function prototypes */ local void fixedtables OF((struct inflate_state FAR *state)); @@ -484,6 +485,7 @@ inflateBack(z_streamp strm, in_func in, void FAR *in_desc, out_func out, Tracev((stderr, "inflate: codes ok\n")); state->mode = LEN; + OS_FALLTHROUGH; case LEN: /* use inflate_fast() if we have enough input and output */ if (have >= 6 && left >= 258) { diff --git a/libkern/zlib/inffast.c b/libkern/zlib/inffast.c index 133374e40..98e393491 100644 --- a/libkern/zlib/inffast.c +++ b/libkern/zlib/inffast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * Copyright (c) 2008-2016, 2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -38,25 +38,6 @@ #ifndef ASMINF -/* Allow machine dependent optimization for post-increment or pre-increment. - Based on testing to date, - Pre-increment preferred for: - - PowerPC G3 (Adler) - - MIPS R5000 (Randers-Pehrson) - Post-increment preferred for: - - none - No measurable difference: - - Pentium III (Anderson) - - M68060 (Nikl) - */ -#ifdef POSTINC -# define OFF 0 -# define PUP(a) *(a)++ -#else -# define OFF 1 -# define PUP(a) *++(a) -#endif - /* Decode literal, length, and distance codes and write out the resulting literal and match bytes until either not enough input or output is @@ -125,9 +106,9 @@ inflate_fast(z_streamp strm, unsigned start) /* copy state to local variables */ state = (struct inflate_state FAR *)strm->state; - in = strm->next_in - OFF; + in = strm->next_in; last = in + (strm->avail_in - 5); - out = strm->next_out - OFF; + out = strm->next_out; beg = out - (start - strm->avail_out); end = out + (strm->avail_out - 257); #ifdef INFLATE_STRICT @@ -148,9 +129,9 @@ inflate_fast(z_streamp strm, unsigned start) input data or output space */ do { if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } this = lcode[hold & lmask]; @@ -163,14 +144,14 @@ inflate_fast(z_streamp strm, unsigned start) Tracevv((stderr, this.val >= 0x20 && this.val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", this.val)); - PUP(out) = (unsigned char)(this.val); + *out++ = (unsigned char)(this.val); } else if (op & 16) { /* length base */ len = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); @@ -179,9 +160,9 @@ inflate_fast(z_streamp strm, unsigned start) } Tracevv((stderr, "inflate: length %u\n", len)); if (bits < 15) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } this = dcode[hold & dmask]; @@ -194,10 +175,10 @@ inflate_fast(z_streamp strm, unsigned start) dist = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; if (bits < op) { - hold += (unsigned long)(PUP(in)) << bits; + hold += (unsigned long)(*in++) << bits; bits += 8; } } @@ -220,13 +201,13 @@ inflate_fast(z_streamp strm, unsigned start) state->mode = BAD; break; } - from = window - OFF; + from = window; if (write == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } @@ -237,14 +218,14 @@ inflate_fast(z_streamp strm, unsigned start) if (op < len) { /* some from end of window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); - from = window - OFF; + from = window; if (write < len) { /* some from start of window */ op = write; len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } @@ -255,35 +236,35 @@ inflate_fast(z_streamp strm, unsigned start) if (op < len) { /* some from window */ len -= op; do { - PUP(out) = PUP(from); + *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } while (len > 2) { - PUP(out) = PUP(from); - PUP(out) = PUP(from); - PUP(out) = PUP(from); + *out++ = *from++; + *out++ = *from++; + *out++ = *from++; len -= 3; } if (len) { - PUP(out) = PUP(from); + *out++ = *from++; if (len > 1) - PUP(out) = PUP(from); + *out++ = *from++; } } else { from = out - dist; /* copy direct from output */ do { /* minimum length is three */ - PUP(out) = PUP(from); - PUP(out) = PUP(from); - PUP(out) = PUP(from); + *out++ = *from++; + *out++ = *from++; + *out++ = *from++; len -= 3; } while (len > 2); if (len) { - PUP(out) = PUP(from); + *out++ = *from++; if (len > 1) - PUP(out) = PUP(from); + *out++ = *from++; } } } @@ -320,8 +301,8 @@ inflate_fast(z_streamp strm, unsigned start) hold &= (1U << bits) - 1; /* update state and return */ - strm->next_in = in + OFF; - strm->next_out = out + OFF; + strm->next_in = in; + strm->next_out = out; strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); strm->avail_out = (unsigned)(out < end ? 257 + (end - out) : 257 - (out - end)); diff --git a/libkern/zlib/inflate.c b/libkern/zlib/inflate.c index e5d688d3a..31f3bdf9e 100644 --- a/libkern/zlib/inflate.c +++ b/libkern/zlib/inflate.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2008 Apple Inc. All rights reserved. + * Copyright (c) 2008-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* inflate.c -- zlib decompression @@ -111,6 +111,7 @@ #include "inftrees.h" #include "inflate.h" #include "inffast.h" +#include #ifdef MAKEFIXED # ifndef BUILDFIXED @@ -671,6 +672,7 @@ inflate(z_streamp strm, int flush) if (state->flags & 0x0200) CRC2(state->check, hold); INITBITS(); state->mode = TIME; + OS_FALLTHROUGH; case TIME: NEEDBITS(32); if (state->head != Z_NULL) @@ -678,6 +680,7 @@ inflate(z_streamp strm, int flush) if (state->flags & 0x0200) CRC4(state->check, hold); INITBITS(); state->mode = OS; + OS_FALLTHROUGH; case OS: NEEDBITS(16); if (state->head != Z_NULL) { @@ -687,6 +690,7 @@ inflate(z_streamp strm, int flush) if (state->flags & 0x0200) CRC2(state->check, hold); INITBITS(); state->mode = EXLEN; + OS_FALLTHROUGH; case EXLEN: if (state->flags & 0x0400) { NEEDBITS(16); @@ -699,6 +703,7 @@ inflate(z_streamp strm, int flush) else if (state->head != Z_NULL) state->head->extra = Z_NULL; state->mode = EXTRA; + OS_FALLTHROUGH; case EXTRA: if (state->flags & 0x0400) { copy = state->length; @@ -721,6 +726,7 @@ inflate(z_streamp strm, int flush) } state->length = 0; state->mode = NAME; + OS_FALLTHROUGH; case NAME: if (state->flags & 0x0800) { if (have == 0) goto inf_leave; @@ -730,7 +736,7 @@ inflate(z_streamp strm, int flush) if (state->head != Z_NULL && state->head->name != Z_NULL && state->length < state->head->name_max) - state->head->name[state->length++] = len; + state->head->name[state->length++] = (Bytef)len; } while (len && copy < have); if (state->flags & 0x0200) state->check = z_crc32(state->check, next, copy); @@ -742,6 +748,7 @@ inflate(z_streamp strm, int flush) state->head->name = Z_NULL; state->length = 0; state->mode = COMMENT; + OS_FALLTHROUGH; case COMMENT: if (state->flags & 0x1000) { if (have == 0) goto inf_leave; @@ -751,7 +758,7 @@ inflate(z_streamp strm, int flush) if (state->head != Z_NULL && state->head->comment != Z_NULL && state->length < state->head->comm_max) - state->head->comment[state->length++] = len; + state->head->comment[state->length++] = (Bytef)len; } while (len && copy < have); if (state->flags & 0x0200) state->check = z_crc32(state->check, next, copy); @@ -762,6 +769,7 @@ inflate(z_streamp strm, int flush) else if (state->head != Z_NULL) state->head->comment = Z_NULL; state->mode = HCRC; + OS_FALLTHROUGH; case HCRC: if (state->flags & 0x0200) { NEEDBITS(16); @@ -779,12 +787,15 @@ inflate(z_streamp strm, int flush) strm->adler = state->check = z_crc32(0L, Z_NULL, 0); state->mode = TYPE; break; +#else + OS_FALLTHROUGH; #endif case DICTID: NEEDBITS(32); strm->adler = state->check = REVERSE(hold); INITBITS(); state->mode = DICT; + OS_FALLTHROUGH; case DICT: if (state->havedict == 0) { RESTORE(); @@ -792,8 +803,10 @@ inflate(z_streamp strm, int flush) } strm->adler = state->check = adler32(0L, Z_NULL, 0); state->mode = TYPE; + OS_FALLTHROUGH; case TYPE: if (flush == Z_BLOCK) goto inf_leave; + OS_FALLTHROUGH; case TYPEDO: if (state->last) { BYTEBITS(); @@ -839,6 +852,7 @@ inflate(z_streamp strm, int flush) state->length)); INITBITS(); state->mode = COPY; + OS_FALLTHROUGH; case COPY: copy = state->length; if (copy) { @@ -874,6 +888,7 @@ inflate(z_streamp strm, int flush) Tracev((stderr, "inflate: table sizes ok\n")); state->have = 0; state->mode = LENLENS; + OS_FALLTHROUGH; case LENLENS: while (state->have < state->ncode) { NEEDBITS(3); @@ -895,6 +910,7 @@ inflate(z_streamp strm, int flush) Tracev((stderr, "inflate: code lengths ok\n")); state->have = 0; state->mode = CODELENS; + OS_FALLTHROUGH; case CODELENS: while (state->have < state->nlen + state->ndist) { for (;;) { @@ -969,6 +985,7 @@ inflate(z_streamp strm, int flush) } Tracev((stderr, "inflate: codes ok\n")); state->mode = LEN; + OS_FALLTHROUGH; case LEN: if (have >= 6 && left >= 258) { RESTORE(); @@ -1012,6 +1029,7 @@ inflate(z_streamp strm, int flush) } state->extra = (unsigned)(this.op) & 15; state->mode = LENEXT; + OS_FALLTHROUGH; case LENEXT: if (state->extra) { NEEDBITS(state->extra); @@ -1020,6 +1038,7 @@ inflate(z_streamp strm, int flush) } Tracevv((stderr, "inflate: length %u\n", state->length)); state->mode = DIST; + OS_FALLTHROUGH; case DIST: for (;;) { this = state->distcode[BITS(state->distbits)]; @@ -1045,6 +1064,7 @@ inflate(z_streamp strm, int flush) state->offset = (unsigned)this.val; state->extra = (unsigned)(this.op) & 15; state->mode = DISTEXT; + OS_FALLTHROUGH; case DISTEXT: if (state->extra) { NEEDBITS(state->extra); @@ -1065,6 +1085,7 @@ inflate(z_streamp strm, int flush) } Tracevv((stderr, "inflate: distance %u\n", state->offset)); state->mode = MATCH; + OS_FALLTHROUGH; case MATCH: if (left == 0) goto inf_leave; copy = out - left; @@ -1120,6 +1141,7 @@ inflate(z_streamp strm, int flush) } #ifdef GUNZIP state->mode = LENGTH; + OS_FALLTHROUGH; case LENGTH: if (state->wrap && state->flags) { NEEDBITS(32); @@ -1133,6 +1155,7 @@ inflate(z_streamp strm, int flush) } #endif state->mode = DONE; + OS_FALLTHROUGH; case DONE: ret = Z_STREAM_END; goto inf_leave; diff --git a/libkern/zlib/inftrees.c b/libkern/zlib/inftrees.c index 8d4f79594..c049915df 100644 --- a/libkern/zlib/inftrees.c +++ b/libkern/zlib/inftrees.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * Copyright (c) 2008-2016, 2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -78,7 +78,7 @@ inflate_table(codetype type, unsigned short FAR *lens, unsigned codes, code FAR *next; /* next available space in table */ const unsigned short FAR *base; /* base value table to use */ const unsigned short FAR *extra; /* extra bits table to use */ - int end; /* use base and extra for symbol > end */ + unsigned match; /* use base and extra for symbol >= match */ unsigned short count[MAXBITS+1]; /* number of codes of each length */ unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ static const unsigned short lbase[31] = { /* Length codes 257..285 base */ @@ -206,19 +206,17 @@ inflate_table(codetype type, unsigned short FAR *lens, unsigned codes, switch (type) { case CODES: base = extra = work; /* dummy value--not used */ - end = 19; + match = 20; break; case LENS: base = lbase; - base -= 257; extra = lext; - extra -= 257; - end = 256; + match = 257; break; default: /* DISTS */ base = dbase; extra = dext; - end = -1; + match = 0; } /* initialize state for loop */ @@ -240,13 +238,13 @@ inflate_table(codetype type, unsigned short FAR *lens, unsigned codes, for (;;) { /* create table entry */ this.bits = (unsigned char)(len - drop); - if ((int)(work[sym]) < end) { + if (work[sym] + 1 < match) { this.op = (unsigned char)0; this.val = work[sym]; } - else if ((int)(work[sym]) > end) { - this.op = (unsigned char)(extra[work[sym]]); - this.val = base[work[sym]]; + else if (work[sym] >= match) { + this.op = (unsigned char)(extra[work[sym] - match]); + this.val = base[work[sym] - match]; } else { this.op = (unsigned char)(32 + 64); /* end of block */ diff --git a/libkern/zlib/trees.c b/libkern/zlib/trees.c index 21d483a7f..2e4b337cf 100644 --- a/libkern/zlib/trees.c +++ b/libkern/zlib/trees.c @@ -625,7 +625,7 @@ gen_codes(ct_data *tree, int max_code, ushf *bl_count) * without bit reversal. */ for (bits = 1; bits <= MAX_BITS; bits++) { - next_code[bits] = code = (code + bl_count[bits-1]) << 1; + next_code[bits] = code = (ush)((code + bl_count[bits-1]) << 1); } /* Check that the bit counts in bl_count are consistent. The last code * must be all ones. @@ -638,7 +638,7 @@ gen_codes(ct_data *tree, int max_code, ushf *bl_count) int len = tree[n].Len; if (len == 0) continue; /* Now reverse the bits */ - tree[n].Code = bi_reverse(next_code[len]++, len); + tree[n].Code = (ush)bi_reverse(next_code[len]++, len); Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1)); diff --git a/libkern/zlib/z_crc32.c b/libkern/zlib/z_crc32.c index ac0acac44..458fb87d3 100644 --- a/libkern/zlib/z_crc32.c +++ b/libkern/zlib/z_crc32.c @@ -281,8 +281,8 @@ z_crc32(unsigned long crc, const unsigned char FAR *buf, unsigned len) /* ========================================================================= */ #define DOLIT4 c ^= *buf4++; \ - c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \ - crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24] + c = (u4)(crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \ + crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24]) #define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4 /* ========================================================================= */ @@ -295,7 +295,7 @@ crc32_little(unsigned long crc, const unsigned char FAR *buf, unsigned len) c = (u4)crc; c = ~c; while (len && ((ptrdiff_t)buf & 3)) { - c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); + c = (u4)(crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8)); len--; } @@ -311,7 +311,7 @@ crc32_little(unsigned long crc, const unsigned char FAR *buf, unsigned len) buf = (const unsigned char FAR *)buf4; if (len) do { - c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); + c = (u4)(crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8)); } while (--len); c = ~c; return (unsigned long)c; @@ -319,8 +319,8 @@ crc32_little(unsigned long crc, const unsigned char FAR *buf, unsigned len) /* ========================================================================= */ #define DOBIG4 c ^= *++buf4; \ - c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ - crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] + c = (u4)(crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ + crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]) #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 /* ========================================================================= */ @@ -333,7 +333,7 @@ crc32_big(unsigned long crc, const unsigned char FAR *buf, unsigned len) c = REV((u4)crc); c = ~c; while (len && ((ptrdiff_t)buf & 3)) { - c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); + c = (u4)(crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8)); len--; } @@ -351,7 +351,7 @@ crc32_big(unsigned long crc, const unsigned char FAR *buf, unsigned len) buf = (const unsigned char FAR *)buf4; if (len) do { - c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); + c = (u4)(crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8)); } while (--len); c = ~c; return (unsigned long)(REV(c)); diff --git a/libsa/bootstrap.cpp b/libsa/bootstrap.cpp index f46485a1d..dc1fbf236 100644 --- a/libsa/bootstrap.cpp +++ b/libsa/bootstrap.cpp @@ -29,8 +29,11 @@ extern "C" { #include #include #include +#include } +#define IOKIT_ENABLE_SHARED_PTR + #include #include #include @@ -112,26 +115,6 @@ static const char * sKernelComponentNames[] = { NULL }; -static int __whereIsAddr(vm_offset_t theAddr, unsigned long * segSizes, vm_offset_t *segAddrs, int segCount ); - -#define PLK_SEGMENTS 12 - -static const char * plk_segNames[] = { - "__TEXT", - "__TEXT_EXEC", - "__DATA", - "__DATA_CONST", - "__LINKEDIT", - "__PRELINK_TEXT", - "__PLK_TEXT_EXEC", - "__PRELINK_DATA", - "__PLK_DATA_CONST", - "__PLK_LLVM_COV", - "__PLK_LINKEDIT", - "__PRELINK_INFO", - NULL -}; - #if PRAGMA_MARK #pragma mark KLDBootstrap Class #endif @@ -149,8 +132,7 @@ class KLDBootstrap { private: void readStartupExtensions(void); - void readPrelinkedExtensions( - kernel_section_t * prelinkInfoSect); + void readPrelinkedExtensions(kernel_mach_header_t *mh, kc_kind_t type); void readBooterExtensions(void); OSReturn loadKernelComponentKexts(void); @@ -207,147 +189,69 @@ KLDBootstrap::readStartupExtensions(void) kOSKextLogKextBookkeepingFlag, "Reading startup extensions."); + kc_format_t kc_format; + kernel_mach_header_t *mh = &_mh_execute_header; + if (PE_get_primary_kc_format(&kc_format) && kc_format == KCFormatFileset) { + mh = (kernel_mach_header_t *)PE_get_kc_header(KCKindPrimary); + } + /* If the prelink info segment has a nonzero size, we are prelinked * and won't have any individual kexts or mkexts to read. * Otherwise, we need to read kexts or the mkext from what the booter * has handed us. */ - prelinkInfoSect = getsectbyname(kPrelinkInfoSegment, kPrelinkInfoSection); + prelinkInfoSect = getsectbynamefromheader(mh, kPrelinkInfoSegment, kPrelinkInfoSection); if (prelinkInfoSect->size) { - readPrelinkedExtensions(prelinkInfoSect); + readPrelinkedExtensions(mh, KCKindPrimary); } else { readBooterExtensions(); } + kernel_mach_header_t *akc_mh; + akc_mh = (kernel_mach_header_t*)PE_get_kc_header(KCKindAuxiliary); + if (akc_mh) { + readPrelinkedExtensions(akc_mh, KCKindAuxiliary); + } + loadKernelComponentKexts(); loadKernelExternalComponents(); readBuiltinPersonalities(); - OSKext::sendAllKextPersonalitiesToCatalog(); + OSKext::sendAllKextPersonalitiesToCatalog(true); return; } -typedef struct kaslrPackedOffsets { - uint32_t count; /* number of offsets */ - uint32_t offsetsArray[]; /* offsets to slide */ -} kaslrPackedOffsets; - /********************************************************************* *********************************************************************/ void -KLDBootstrap::readPrelinkedExtensions( - kernel_section_t * prelinkInfoSect) +KLDBootstrap::readPrelinkedExtensions(kernel_mach_header_t *mh, kc_kind_t type) { - OSArray * infoDictArray = NULL;// do not release - OSObject * parsedXML = NULL;// must release - OSDictionary * prelinkInfoDict = NULL;// do not release - OSString * errorString = NULL;// must release - OSKext * theKernel = NULL;// must release - OSData * kernelcacheUUID = NULL;// do not release - - kernel_segment_command_t * prelinkTextSegment = NULL;// see code - kernel_segment_command_t * prelinkInfoSegment = NULL;// see code - - /* We make some copies of data, but if anything fails we're basically - * going to fail the boot, so these won't be cleaned up on error. - */ - void * prelinkData = NULL;// see code - vm_size_t prelinkLength = 0; - - - OSDictionary * infoDict = NULL;// do not release - - IORegistryEntry * registryRoot = NULL;// do not release - OSNumber * prelinkCountObj = NULL;// must release - - u_int i = 0; -#if NO_KEXTD - bool ramDiskBoot; - bool developerDevice; - bool dontLoad; -#endif - OSData * kaslrOffsets = NULL; - unsigned long plk_segSizes[PLK_SEGMENTS]; - vm_offset_t plk_segAddrs[PLK_SEGMENTS]; + bool ret; + OSSharedPtr loaded_kcUUID; + OSSharedPtr errorString; + OSSharedPtr parsedXML; + kernel_section_t *infoPlistSection = NULL; + OSDictionary *infoDict = NULL; // do not release OSKextLog(/* kext */ NULL, kOSKextLogProgressLevel | kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, "Starting from prelinked kernel."); - prelinkTextSegment = getsegbyname(kPrelinkTextSegment); - if (!prelinkTextSegment) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, - "Can't find prelinked kexts' text segment."); - goto finish; - } - -#if KASLR_KEXT_DEBUG - unsigned long scratchSize; - vm_offset_t scratchAddr; - - IOLog("kaslr: prelinked kernel address info: \n"); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __TEXT \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __DATA \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LINKEDIT", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __LINKEDIT \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLD", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __KLD \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __PRELINK_TEXT \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_INFO", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __PRELINK_INFO \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); -#endif - - prelinkData = (void *) prelinkTextSegment->vmaddr; - prelinkLength = prelinkTextSegment->vmsize; - - /* build arrays of plk info for later use */ - const char ** segNamePtr; - - for (segNamePtr = &plk_segNames[0], i = 0; *segNamePtr && i < PLK_SEGMENTS; segNamePtr++, i++) { - plk_segSizes[i] = 0; - plk_segAddrs[i] = (vm_offset_t)getsegdatafromheader(&_mh_execute_header, *segNamePtr, &plk_segSizes[i]); - } - - - /* Unserialize the info dictionary from the prelink info section. + /* + * The 'infoPlistSection' should contains an XML dictionary that + * contains some meta data about the KC, and also describes each kext + * included in the kext collection. Unserialize this dictionary and + * then iterate over each kext. */ - parsedXML = OSUnserializeXML((const char *)prelinkInfoSect->addr, - &errorString); + infoPlistSection = getsectbynamefromheader(mh, kPrelinkInfoSegment, kPrelinkInfoSection); + parsedXML = OSUnserializeXML((const char *)infoPlistSection->addr, errorString); if (parsedXML) { - prelinkInfoDict = OSDynamicCast(OSDictionary, parsedXML); + infoDict = OSDynamicCast(OSDictionary, parsedXML.get()); } - if (!prelinkInfoDict) { - const char * errorCString = "(unknown error)"; + + if (!infoDict) { + const char *errorCString = "(unknown error)"; if (errorString && errorString->getCStringNoCopy()) { errorCString = errorString->getCStringNoCopy(); @@ -355,218 +259,92 @@ KLDBootstrap::readPrelinkedExtensions( errorCString = "not a dictionary"; } OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, - "Error unserializing prelink plist: %s.", errorCString); - goto finish; - } - -#if NO_KEXTD - /* Check if we should keep developer kexts around. - * TODO: Check DeviceTree instead of a boot-arg - */ - developerDevice = true; - PE_parse_boot_argn("developer", &developerDevice, sizeof(developerDevice)); - - ramDiskBoot = IORamDiskBSDRoot(); -#endif /* NO_KEXTD */ - - /* Copy in the kernelcache UUID */ - kernelcacheUUID = OSDynamicCast(OSData, - prelinkInfoDict->getObject(kPrelinkInfoKCIDKey)); - if (kernelcacheUUID) { - if (kernelcacheUUID->getLength() != sizeof(kernelcache_uuid)) { - panic("kernelcacheUUID length is %d, expected %lu", kernelcacheUUID->getLength(), - sizeof(kernelcache_uuid)); - } else { - kernelcache_uuid_valid = TRUE; - memcpy((void *)&kernelcache_uuid, (const void *)kernelcacheUUID->getBytesNoCopy(), kernelcacheUUID->getLength()); - uuid_unparse_upper(kernelcache_uuid, kernelcache_uuid_string); - } - } - - infoDictArray = OSDynamicCast(OSArray, - prelinkInfoDict->getObject(kPrelinkInfoDictionaryKey)); - if (!infoDictArray) { - OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, - "The prelinked kernel has no kext info dictionaries"); - goto finish; + "Error unserializing kext info plist section: %s.", errorCString); + return; } - /* kaslrOffsets are available use them to slide local relocations */ - kaslrOffsets = OSDynamicCast(OSData, - prelinkInfoDict->getObject(kPrelinkLinkKASLROffsetsKey)); - - /* Create dictionary of excluded kexts - */ -#ifndef CONFIG_EMBEDDED - OSKext::createExcludeListFromPrelinkInfo(infoDictArray); -#endif - /* Create OSKext objects for each info dictionary. - */ - for (i = 0; i < infoDictArray->getCount(); ++i) { - infoDict = OSDynamicCast(OSDictionary, infoDictArray->getObject(i)); - if (!infoDict) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, - "Can't find info dictionary for prelinked kext #%d.", i); - continue; + /* Validate that the Kext Collection is prelinked to the loaded KC */ + if (type == KCKindAuxiliary) { + if (OSKext::validateKCFileSetUUID(infoDict, KCKindAuxiliary) != 0) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "Early boot AuxKC doesn't appear to be linked against the loaded BootKC."); + return; } -#if NO_KEXTD - dontLoad = false; - - /* If we're not on a developer device, skip and free developer kexts. + /* + * Defer further processing of the AuxKC, but keep the + * processed info dictionary around so we can ml_static_free + * the segment. */ - if (developerDevice == false) { - OSBoolean *devOnlyBool = OSDynamicCast(OSBoolean, - infoDict->getObject(kOSBundleDeveloperOnlyKey)); - if (devOnlyBool == kOSBooleanTrue) { - dontLoad = true; - } - } - - /* Skip and free kexts that are only needed when booted from a ram disk. - */ - if (ramDiskBoot == false) { - OSBoolean *ramDiskOnlyBool = OSDynamicCast(OSBoolean, - infoDict->getObject(kOSBundleRamDiskOnlyKey)); - if (ramDiskOnlyBool == kOSBooleanTrue) { - dontLoad = true; - } - } - - if (dontLoad == true) { - OSString *bundleID = OSDynamicCast(OSString, - infoDict->getObject(kCFBundleIdentifierKey)); - if (bundleID) { - OSKextLog(NULL, kOSKextLogWarningLevel | kOSKextLogGeneralFlag, - "Kext %s not loading.", bundleID->getCStringNoCopy()); - } - - OSNumber *addressNum = OSDynamicCast(OSNumber, - infoDict->getObject(kPrelinkExecutableLoadKey)); - OSNumber *lengthNum = OSDynamicCast(OSNumber, - infoDict->getObject(kPrelinkExecutableSizeKey)); - if (addressNum && lengthNum) { -#if __arm__ || __arm64__ - vm_offset_t data = ml_static_slide(addressNum->unsigned64BitValue()); - vm_size_t length = (vm_size_t) (lengthNum->unsigned32BitValue()); - ml_static_mfree(data, length); -#else -#error Pick the right way to free prelinked data on this arch -#endif - } - - infoDictArray->removeObject(i--); - continue; + if (!OSKext::registerDeferredKextCollection(mh, parsedXML, KCKindAuxiliary)) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "Error deferring AuxKC kext processing: Kexts in this collection will be unusable."); } -#endif /* NO_KEXTD */ - - /* Create the kext for the entry, then release it, because the - * kext system keeps them around until explicitly removed. - * Any creation/registration failures are already logged for us. - */ - OSKext * newKext = OSKext::withPrelinkedInfoDict(infoDict, (kaslrOffsets ? TRUE : FALSE)); - OSSafeReleaseNULL(newKext); + goto skip_adding_kexts; } - /* slide kxld relocations */ - if (kaslrOffsets && vm_kernel_slide > 0) { - int slidKextAddrCount = 0; - int badSlideAddr = 0; - int badSlideTarget = 0; - - const kaslrPackedOffsets * myOffsets = NULL; - myOffsets = (const kaslrPackedOffsets *) kaslrOffsets->getBytesNoCopy(); - - for (uint32_t j = 0; j < myOffsets->count; j++) { - uint64_t slideOffset = (uint64_t) myOffsets->offsetsArray[j]; - uintptr_t * slideAddr = (uintptr_t *) ((uint64_t)prelinkData + slideOffset); - int slideAddrSegIndex = -1; - int addrToSlideSegIndex = -1; - - slideAddrSegIndex = __whereIsAddr((vm_offset_t)slideAddr, &plk_segSizes[0], &plk_segAddrs[0], PLK_SEGMENTS ); - if (slideAddrSegIndex >= 0) { - addrToSlideSegIndex = __whereIsAddr(ml_static_slide((vm_offset_t)(*slideAddr)), &plk_segSizes[0], &plk_segAddrs[0], PLK_SEGMENTS ); - if (addrToSlideSegIndex < 0) { - badSlideTarget++; - continue; - } - } else { - badSlideAddr++; - continue; - } - - slidKextAddrCount++; - *slideAddr = ml_static_slide(*slideAddr); - } // for ... + /* + * this function does all the heavy lifting of adding OSKext objects + * and potentially sliding them if necessary + */ + ret = OSKext::addKextsFromKextCollection(mh, infoDict, + kPrelinkTextSegment, loaded_kcUUID, (mh->filetype == MH_FILESET) ? type : KCKindUnknown); - /* All kexts are now slid, set VM protections for them */ - OSKext::setAllVMAttributes(); + if (!ret) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "Error loading kext info from prelinked primary KC"); + return; } - /* Store the number of prelinked kexts in the registry so we can tell - * when the system has been started from a prelinked kernel. - */ - registryRoot = IORegistryEntry::getRegistryRoot(); - assert(registryRoot); - - prelinkCountObj = OSNumber::withNumber( - (unsigned long long)infoDictArray->getCount(), - 8 * sizeof(uint32_t)); - assert(prelinkCountObj); - if (prelinkCountObj) { - registryRoot->setProperty(kOSPrelinkKextCountKey, prelinkCountObj); + /* Copy in the kernelcache UUID */ + if (!loaded_kcUUID) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "WARNING: did not find UUID in %s KC!", (type == KCKindAuxiliary) ? "Aux" : "Primary"); + } else if (type != KCKindAuxiliary) { + kernelcache_uuid_valid = TRUE; + memcpy((void *)&kernelcache_uuid, (const void *)loaded_kcUUID->getBytesNoCopy(), loaded_kcUUID->getLength()); + uuid_unparse_upper(kernelcache_uuid, kernelcache_uuid_string); + } else { + auxkc_uuid_valid = TRUE; + memcpy((void *)&auxkc_uuid, (const void *)loaded_kcUUID->getBytesNoCopy(), loaded_kcUUID->getLength()); + uuid_unparse_upper(auxkc_uuid, auxkc_uuid_string); } - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogGeneralFlag | kOSKextLogKextBookkeepingFlag | - kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, - "%u prelinked kexts", - infoDictArray->getCount()); - +skip_adding_kexts: #if CONFIG_KEXT_BASEMENT - /* On CONFIG_KEXT_BASEMENT systems, kexts are copied to their own - * special VM region during OSKext init time, so we can free the whole - * segment now. - */ - ml_static_mfree((vm_offset_t) prelinkData, prelinkLength); -#endif /* __x86_64__ */ + if (mh->filetype != MH_FILESET) { + /* + * On CONFIG_KEXT_BASEMENT systems which do _not_ boot the new + * MH_FILESET kext collection, kexts are copied to their own + * special VM region during OSKext init time, so we can free + * the whole segment now. + */ + kernel_segment_command_t *prelinkTextSegment = NULL; + prelinkTextSegment = getsegbyname(kPrelinkTextSegment); + if (!prelinkTextSegment) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "Can't find prelinked kexts' text segment."); + return; + } + + ml_static_mfree((vm_offset_t)prelinkTextSegment->vmaddr, prelinkTextSegment->vmsize); + } +#endif /* CONFIG_KEXT_BASEMENT */ - /* Free the prelink info segment, we're done with it. + /* + * Free the prelink info segment, we're done with it. */ + kernel_segment_command_t *prelinkInfoSegment = NULL; prelinkInfoSegment = getsegbyname(kPrelinkInfoSegment); if (prelinkInfoSegment) { ml_static_mfree((vm_offset_t)prelinkInfoSegment->vmaddr, (vm_size_t)prelinkInfoSegment->vmsize); } -finish: - OSSafeReleaseNULL(errorString); - OSSafeReleaseNULL(parsedXML); - OSSafeReleaseNULL(theKernel); - OSSafeReleaseNULL(prelinkCountObj); return; } -static int -__whereIsAddr(vm_offset_t theAddr, unsigned long * segSizes, vm_offset_t *segAddrs, int segCount) -{ - int i; - - for (i = 0; i < segCount; i++) { - vm_offset_t myAddr = *(segAddrs + i); - unsigned long mySize = *(segSizes + i); - - if (theAddr >= myAddr && theAddr < (myAddr + mySize)) { - return i; - } - } - - return -1; -} - /********************************************************************* *********************************************************************/ @@ -580,16 +358,15 @@ typedef struct _DeviceTreeBuffer { void KLDBootstrap::readBooterExtensions(void) { - IORegistryEntry * booterMemoryMap = NULL;// must release - OSDictionary * propertyDict = NULL;// must release - OSCollectionIterator * keyIterator = NULL;// must release + OSSharedPtr booterMemoryMap; + OSSharedPtr propertyDict; + OSSharedPtr keyIterator; OSString * deviceTreeName = NULL;// do not release const _DeviceTreeBuffer * deviceTreeBuffer = NULL;// do not free char * booterDataPtr = NULL;// do not free - OSData * booterData = NULL;// must release - - OSKext * aKext = NULL;// must release + OSSharedPtr booterData; + OSSharedPtr aKext; OSKextLog(/* kext */ NULL, kOSKextLogProgressLevel | @@ -615,7 +392,7 @@ KLDBootstrap::readBooterExtensions(void) goto finish; } - keyIterator = OSCollectionIterator::withCollection(propertyDict); + keyIterator = OSCollectionIterator::withCollection(propertyDict.get()); if (!keyIterator) { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | @@ -627,8 +404,9 @@ KLDBootstrap::readBooterExtensions(void) /* Create dictionary of excluded kexts */ #ifndef CONFIG_EMBEDDED - OSKext::createExcludeListFromBooterData(propertyDict, keyIterator); + OSKext::createExcludeListFromBooterData(propertyDict.get(), keyIterator.get()); #endif + // !! reset the iterator, not the pointer keyIterator->reset(); while ((deviceTreeName = @@ -637,10 +415,6 @@ KLDBootstrap::readBooterExtensions(void) OSData * deviceTreeEntry = OSDynamicCast(OSData, propertyDict->getObject(deviceTreeName)); - /* Clear out the booterData from the prior iteration. - */ - OSSafeReleaseNULL(booterData); - /* If there is no entry for the name, we can't do much with it. */ if (!deviceTreeEntry) { continue; @@ -698,19 +472,12 @@ KLDBootstrap::readBooterExtensions(void) * kext system keeps them around until explicitly removed. * Any creation/registration failures are already logged for us. */ - OSKext * newKext = OSKext::withBooterData(deviceTreeName, booterData); - OSSafeReleaseNULL(newKext); + OSSharedPtr newKext = OSKext::withBooterData(deviceTreeName, booterData.get()); booterMemoryMap->removeProperty(deviceTreeName); } /* while ( (deviceTreeName = OSDynamicCast(OSString, ...) ) ) */ finish: - - OSSafeReleaseNULL(booterMemoryMap); - OSSafeReleaseNULL(propertyDict); - OSSafeReleaseNULL(keyIterator); - OSSafeReleaseNULL(booterData); - OSSafeReleaseNULL(aKext); return; } @@ -721,8 +488,8 @@ finish: void KLDBootstrap::loadSecurityExtensions(void) { - OSDictionary * extensionsDict = NULL;// must release - OSCollectionIterator * keyIterator = NULL;// must release + OSSharedPtr extensionsDict; + OSSharedPtr keyIterator; OSString * bundleID = NULL;// don't release OSKext * theKext = NULL;// don't release @@ -736,7 +503,7 @@ KLDBootstrap::loadSecurityExtensions(void) return; } - keyIterator = OSCollectionIterator::withCollection(extensionsDict); + keyIterator = OSCollectionIterator::withCollection(extensionsDict.get()); if (!keyIterator) { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | @@ -771,9 +538,6 @@ KLDBootstrap::loadSecurityExtensions(void) } finish: - OSSafeReleaseNULL(keyIterator); - OSSafeReleaseNULL(extensionsDict); - return; } @@ -791,12 +555,11 @@ finish: OSReturn KLDBootstrap::loadKernelComponentKexts(void) { - OSReturn result = kOSReturnSuccess;// optimistic - OSKext * theKext = NULL; // must release - const char ** kextIDPtr = NULL; // do not release + OSReturn result = kOSReturnSuccess;// optimistic + OSSharedPtr theKext; + const char ** kextIDPtr = NULL; // do not release for (kextIDPtr = &sKernelComponentNames[0]; *kextIDPtr; kextIDPtr++) { - OSSafeReleaseNULL(theKext); theKext = OSKext::lookupKextWithIdentifier(*kextIDPtr); if (theKext) { @@ -812,7 +575,6 @@ KLDBootstrap::loadKernelComponentKexts(void) } } - OSSafeReleaseNULL(theKext); return result; } @@ -829,8 +591,8 @@ KLDBootstrap::loadKernelComponentKexts(void) void KLDBootstrap::loadKernelExternalComponents(void) { - OSDictionary * extensionsDict = NULL;// must release - OSCollectionIterator * keyIterator = NULL;// must release + OSSharedPtr extensionsDict; + OSSharedPtr keyIterator; OSString * bundleID = NULL;// don't release OSKext * theKext = NULL;// don't release OSBoolean * isKernelExternalComponent = NULL;// don't release @@ -845,7 +607,7 @@ KLDBootstrap::loadKernelExternalComponents(void) return; } - keyIterator = OSCollectionIterator::withCollection(extensionsDict); + keyIterator = OSCollectionIterator::withCollection(extensionsDict.get()); if (!keyIterator) { OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | @@ -882,9 +644,6 @@ KLDBootstrap::loadKernelExternalComponents(void) } finish: - OSSafeReleaseNULL(keyIterator); - OSSafeReleaseNULL(extensionsDict); - return; } @@ -893,12 +652,12 @@ finish: void KLDBootstrap::readBuiltinPersonalities(void) { - OSObject * parsedXML = NULL;// must release + OSSharedPtr parsedXML; OSArray * builtinExtensions = NULL;// do not release - OSArray * allPersonalities = NULL;// must release - OSString * errorString = NULL;// must release + OSSharedPtr allPersonalities; + OSSharedPtr errorString; kernel_section_t * infosect = NULL;// do not free - OSCollectionIterator * personalitiesIterator = NULL;// must release + OSSharedPtr personalitiesIterator; unsigned int count, i; OSKextLog(/* kext */ NULL, @@ -920,9 +679,9 @@ KLDBootstrap::readBuiltinPersonalities(void) } parsedXML = OSUnserializeXML((const char *) (uintptr_t)infosect->addr, - &errorString); + errorString); if (parsedXML) { - builtinExtensions = OSDynamicCast(OSArray, parsedXML); + builtinExtensions = OSDynamicCast(OSArray, parsedXML.get()); } if (!builtinExtensions) { const char * errorCString = "(unknown error)"; @@ -949,8 +708,6 @@ KLDBootstrap::readBuiltinPersonalities(void) OSDictionary * personalities;// do not release OSString * personalityName;// do not release - OSSafeReleaseNULL(personalitiesIterator); - infoDict = OSDynamicCast(OSDictionary, builtinExtensions->getObject(i)); if (!infoDict) { @@ -998,13 +755,9 @@ KLDBootstrap::readBuiltinPersonalities(void) } } - gIOCatalogue->addDrivers(allPersonalities, false); + gIOCatalogue->addDrivers(allPersonalities.get(), false); finish: - OSSafeReleaseNULL(parsedXML); - OSSafeReleaseNULL(allPersonalities); - OSSafeReleaseNULL(errorString); - OSSafeReleaseNULL(personalitiesIterator); return; } diff --git a/libsa/conf/Makefile b/libsa/conf/Makefile index 05c4b79cf..51eddb889 100644 --- a/libsa/conf/Makefile +++ b/libsa/conf/Makefile @@ -23,7 +23,7 @@ endif $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile: $(SRCROOT)/SETUP/config/doconf $(OBJROOT)/SETUP/config $(DOCONFDEPS) $(_v)$(MKDIR) $(TARGET)/$(CURRENT_KERNEL_CONFIG) - $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) + $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -platform $(PLATFORM) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) do_all: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile $(_v)${MAKE} \ diff --git a/libsa/conf/Makefile.arm64 b/libsa/conf/Makefile.arm64 index 1c1cef911..7ed305fdc 100644 --- a/libsa/conf/Makefile.arm64 +++ b/libsa/conf/Makefile.arm64 @@ -2,6 +2,7 @@ #BEGIN Machine dependent Makefile fragment for arm ###################################################################### + # Bootstrap __KLD files must be Mach-O for "setsegname" $(foreach file,$(OBJS),$(eval $(file)_CFLAGS_ADD += $(CFLAGS_NOLTO_FLAG))) diff --git a/libsa/conf/Makefile.template b/libsa/conf/Makefile.template index 628823632..3d68f7aa8 100644 --- a/libsa/conf/Makefile.template +++ b/libsa/conf/Makefile.template @@ -69,7 +69,7 @@ $(COMPONENT).filelist: $(OBJS) $(SEG_HACK) -n __KLD -o $${kld_file}__ $${kld_file} || exit 1; \ mv $${kld_file}__ $${kld_file} || exit 1; \ done - $(call makelog,$(ColorL)LDFILELIST$(Color0) $(ColorLF)$(COMPONENT)$(Color0)) + @$(LOG_LDFILELIST) "$(COMPONENT)" $(_v)for obj in ${OBJS}; do \ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \ done > $(COMPONENT).filelist diff --git a/libsyscall/Libsyscall.xcodeproj/project.pbxproj b/libsyscall/Libsyscall.xcodeproj/project.pbxproj index 0fba2db30..05e980948 100644 --- a/libsyscall/Libsyscall.xcodeproj/project.pbxproj +++ b/libsyscall/Libsyscall.xcodeproj/project.pbxproj @@ -59,8 +59,12 @@ /* Begin PBXBuildFile section */ 030B179B135377B400DAD1F0 /* open_dprotected_np.c in Sources */ = {isa = PBXBuildFile; fileRef = 030B179A135377B400DAD1F0 /* open_dprotected_np.c */; }; + 132B8BE6245D02D400F546A7 /* system-version-compat.c in Sources */ = {isa = PBXBuildFile; fileRef = 134D9532245BEB15009D4962 /* system-version-compat.c */; }; + 132B8BEC245D0B2C00F546A7 /* system-version-compat-support.c in Sources */ = {isa = PBXBuildFile; fileRef = 132B8BEA245D0AFA00F546A7 /* system-version-compat-support.c */; }; 139D584B1C7BDE41003D3B17 /* terminate_with_reason.c in Sources */ = {isa = PBXBuildFile; fileRef = 13D932CB1C7B9DE600158FA1 /* terminate_with_reason.c */; }; 13B598941A142F6400DB2D5A /* stackshot.c in Sources */ = {isa = PBXBuildFile; fileRef = 13B598931A142F5900DB2D5A /* stackshot.c */; }; + 13CBF78224575F9F00B26F7D /* open-base.c in Sources */ = {isa = PBXBuildFile; fileRef = 13CBF78124575F9F00B26F7D /* open-base.c */; }; + 13CBF784245783A800B26F7D /* open.c in Sources */ = {isa = PBXBuildFile; fileRef = 13CBF783245783A800B26F7D /* open.c */; }; 14FE60EC1B7D3BF400ACB44C /* mach_get_times.c in Sources */ = {isa = PBXBuildFile; fileRef = 14FE60EB1B7D3BED00ACB44C /* mach_get_times.c */; }; 240BAC4C1214770F000A1719 /* memcpy.c in Sources */ = {isa = PBXBuildFile; fileRef = 24B028D511FF4FBB00CA64A9 /* memcpy.c */; }; 2419382B12135FF6003CDE41 /* chmod.c in Sources */ = {isa = PBXBuildFile; fileRef = 2419382A12135FF6003CDE41 /* chmod.c */; }; @@ -86,7 +90,6 @@ 248BA089121DA8E0008C073F /* mprotect.c in Sources */ = {isa = PBXBuildFile; fileRef = 248BA088121DA8E0008C073F /* mprotect.c */; }; 248BA08B121DAC86008C073F /* msync.c in Sources */ = {isa = PBXBuildFile; fileRef = 248BA08A121DAC86008C073F /* msync.c */; }; 248BA08D121DB0E7008C073F /* munmap.c in Sources */ = {isa = PBXBuildFile; fileRef = 248BA08C121DB0E7008C073F /* munmap.c */; }; - 248BA08F121DC545008C073F /* open.c in Sources */ = {isa = PBXBuildFile; fileRef = 248BA08E121DC545008C073F /* open.c */; }; 248BA093121DE369008C073F /* select.c in Sources */ = {isa = PBXBuildFile; fileRef = 248BA092121DE369008C073F /* select.c */; }; 248BA095121DE565008C073F /* select-pre1050.c in Sources */ = {isa = PBXBuildFile; fileRef = 248BA094121DE565008C073F /* select-pre1050.c */; }; 248BA0B3121DE760008C073F /* select-cancel.c in Sources */ = {isa = PBXBuildFile; fileRef = 248BA0B2121DE760008C073F /* select-cancel.c */; }; @@ -297,6 +300,8 @@ E4D7E56216F8776300F92D8D /* strlen.c in Sources */ = {isa = PBXBuildFile; fileRef = E4D7E55A16F8776300F92D8D /* strlen.c */; }; E4D7E56316F8776300F92D8D /* strsep.c in Sources */ = {isa = PBXBuildFile; fileRef = E4D7E55B16F8776300F92D8D /* strsep.c */; }; EE3F605A149A6D66003BAEBA /* getaudit.c in Sources */ = {isa = PBXBuildFile; fileRef = EE3F6059149A6D66003BAEBA /* getaudit.c */; }; + FBE367BF237A540A00B690B7 /* mach_eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = FBE367BE237A540A00B690B7 /* mach_eventlink.c */; }; + FBE367C1237A58A500B690B7 /* mach_eventlink.defs in Sources */ = {isa = PBXBuildFile; fileRef = FBE367C0237A58A500B690B7 /* mach_eventlink.defs */; }; /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ @@ -465,7 +470,12 @@ /* Begin PBXFileReference section */ 030B179A135377B400DAD1F0 /* open_dprotected_np.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = open_dprotected_np.c; sourceTree = ""; }; + 132B8BEA245D0AFA00F546A7 /* system-version-compat-support.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = "system-version-compat-support.c"; sourceTree = ""; }; + 134D9532245BEB15009D4962 /* system-version-compat.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = "system-version-compat.c"; sourceTree = ""; }; + 138288582527B83800FC8585 /* system-version-compat-support.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "system-version-compat-support.h"; sourceTree = ""; }; 13B598931A142F5900DB2D5A /* stackshot.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = stackshot.c; sourceTree = ""; }; + 13CBF78124575F9F00B26F7D /* open-base.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = "open-base.c"; sourceTree = ""; }; + 13CBF783245783A800B26F7D /* open.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = open.c; sourceTree = ""; }; 13D932CB1C7B9DE600158FA1 /* terminate_with_reason.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = terminate_with_reason.c; sourceTree = ""; }; 14FE60EB1B7D3BED00ACB44C /* mach_get_times.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mach_get_times.c; sourceTree = ""; }; 240D716711933ED300556E97 /* mach_install_mig.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = mach_install_mig.sh; sourceTree = ""; }; @@ -497,7 +507,6 @@ 248BA088121DA8E0008C073F /* mprotect.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mprotect.c; sourceTree = ""; }; 248BA08A121DAC86008C073F /* msync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = msync.c; sourceTree = ""; }; 248BA08C121DB0E7008C073F /* munmap.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = munmap.c; sourceTree = ""; }; - 248BA08E121DC545008C073F /* open.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = open.c; sourceTree = ""; }; 248BA090121DDD7F008C073F /* select-base.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "select-base.c"; sourceTree = ""; }; 248BA092121DE369008C073F /* select.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = select.c; sourceTree = ""; }; 248BA094121DE565008C073F /* select-pre1050.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "select-pre1050.c"; sourceTree = ""; }; @@ -697,6 +706,8 @@ E4D7E55B16F8776300F92D8D /* strsep.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = strsep.c; sourceTree = ""; }; EE3F6059149A6D66003BAEBA /* getaudit.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = getaudit.c; sourceTree = ""; }; FB50F1B315AB7DE700F814BA /* carbon_delete.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = carbon_delete.c; sourceTree = ""; }; + FBE367BE237A540A00B690B7 /* mach_eventlink.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = mach_eventlink.c; sourceTree = ""; }; + FBE367C0237A58A500B690B7 /* mach_eventlink.defs */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.mig; path = mach_eventlink.defs; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -802,6 +813,8 @@ C962B16D18DBB43F0031244A /* thread_act.c */, C9D9BD10114B00600000D8B9 /* thread_act.defs */, C9D9BD11114B00600000D8B9 /* vm_map.defs */, + FBE367BE237A540A00B690B7 /* mach_eventlink.c */, + FBE367C0237A58A500B690B7 /* mach_eventlink.defs */, ); path = mach; sourceTree = ""; @@ -875,6 +888,10 @@ 72FB18801B437F7A00181A5B /* mach_continuous_time.c */, 14FE60EB1B7D3BED00ACB44C /* mach_get_times.c */, 929FD46E1C5711CF0087B9C8 /* mach_timebase_info.c */, + 13CBF78124575F9F00B26F7D /* open-base.c */, + 134D9532245BEB15009D4962 /* system-version-compat.c */, + 138288582527B83800FC8585 /* system-version-compat-support.h */, + 132B8BEA245D0AFA00F546A7 /* system-version-compat-support.c */, 030B179A135377B400DAD1F0 /* open_dprotected_np.c */, 3F538F881A659C5600B37EFD /* persona.c */, 9CCF28261E68E993002EE6CD /* pid_shutdown_networking.c */, @@ -915,6 +932,7 @@ 248BA04B121C8EE4008C073F /* fcntl-base.c */, 248BA04E121C8F06008C073F /* fcntl.c */, 248BA051121C8FE2008C073F /* fcntl-cancel.c */, + 13CBF783245783A800B26F7D /* open.c */, 248BA0BC121DE902008C073F /* select.c */, 248BA0B2121DE760008C073F /* select-cancel.c */, 24B223AF121DFD36007DAEDE /* sigsuspend.c */, @@ -939,7 +957,6 @@ 248BA088121DA8E0008C073F /* mprotect.c */, 248BA08A121DAC86008C073F /* msync.c */, 248BA08C121DB0E7008C073F /* munmap.c */, - 248BA08E121DC545008C073F /* open.c */, 24A7C5B611FF8DA6007669EB /* recvfrom.c */, 24A7C5B711FF8DA6007669EB /* recvmsg.c */, 248BA092121DE369008C073F /* select.c */, @@ -1381,6 +1398,7 @@ E4D7E55F16F8776300F92D8D /* strcmp.c in Sources */, E4D7E55E16F8776300F92D8D /* memset.c in Sources */, 240BAC4C1214770F000A1719 /* memcpy.c in Sources */, + 132B8BE6245D02D400F546A7 /* system-version-compat.c in Sources */, E4D7E56316F8776300F92D8D /* strsep.c in Sources */, E4D7E56016F8776300F92D8D /* strcpy.c in Sources */, E4D7E56116F8776300F92D8D /* strlcpy.c in Sources */, @@ -1471,6 +1489,8 @@ 24A7C5C211FF8DA6007669EB /* lchown.c in Sources */, 24A7C5C311FF8DA6007669EB /* listen.c in Sources */, 24A7C5C411FF8DA6007669EB /* recvfrom.c in Sources */, + 13CBF78224575F9F00B26F7D /* open-base.c in Sources */, + FBE367BF237A540A00B690B7 /* mach_eventlink.c in Sources */, 92197BAF1EAD8F2C003994B9 /* utimensat.c in Sources */, C962B16E18DBB43F0031244A /* thread_act.c in Sources */, 24A7C5C511FF8DA6007669EB /* recvmsg.c in Sources */, @@ -1481,6 +1501,7 @@ 24A7C5C911FF8DA6007669EB /* socketpair.c in Sources */, 928336A11B83ED9100873B90 /* thread_register_state.c in Sources */, 9002401118FC9A7F00D73BFA /* renamex.c in Sources */, + 132B8BEC245D0B2C00F546A7 /* system-version-compat-support.c in Sources */, 2419382B12135FF6003CDE41 /* chmod.c in Sources */, 248BA01D121C56BF008C073F /* connect.c in Sources */, 9C4B507422273E0F00F068C1 /* log_data.c in Sources */, @@ -1499,6 +1520,7 @@ C6BEE9181806840200D25AAB /* posix_sem_obsolete.c in Sources */, 248BA082121DA4F3008C073F /* kill.c in Sources */, 248BA085121DA5E4008C073F /* kill.c in Sources */, + FBE367C1237A58A500B690B7 /* mach_eventlink.defs in Sources */, 9CCF28271E68E993002EE6CD /* pid_shutdown_networking.c in Sources */, 2BA88DCC1810A3CE00EB63F6 /* coalition.c in Sources */, 248BA087121DA72D008C073F /* mmap.c in Sources */, @@ -1506,7 +1528,6 @@ 248BA089121DA8E0008C073F /* mprotect.c in Sources */, 248BA08B121DAC86008C073F /* msync.c in Sources */, 248BA08D121DB0E7008C073F /* munmap.c in Sources */, - 248BA08F121DC545008C073F /* open.c in Sources */, E2A0F3341C3B17D100A11F8A /* fs_snapshot.c in Sources */, 929FD46F1C5711DB0087B9C8 /* mach_timebase_info.c in Sources */, 248BA093121DE369008C073F /* select.c in Sources */, @@ -1522,6 +1543,7 @@ E4216C311822D404006F2632 /* mach_voucher.defs in Sources */, 24B223B5121DFF29007DAEDE /* sigsuspend.c in Sources */, 248AA963122C7B2A0085F5B1 /* unlink.c in Sources */, + 13CBF784245783A800B26F7D /* open.c in Sources */, 248AA965122C7C330085F5B1 /* rmdir.c in Sources */, 435F3CAA1B06B7BA005ED9EF /* work_interval.c in Sources */, 248AA967122C7CDA0085F5B1 /* rename.c in Sources */, diff --git a/libsyscall/Platforms/DriverKit/arm64/syscall.map b/libsyscall/Platforms/DriverKit/arm64/syscall.map new file mode 100644 index 000000000..5eeff07c2 --- /dev/null +++ b/libsyscall/Platforms/DriverKit/arm64/syscall.map @@ -0,0 +1,54 @@ +_accept$NOCANCEL ___accept_nocancel +_aio_suspend$NOCANCEL ___aio_suspend_nocancel +_close$NOCANCEL ___close_nocancel +_connect$NOCANCEL ___connect_nocancel +_fstat ___fstat64 +_fstatat ___fstatat64 +_fstatfs ___fstatfs64 +_fsync$NOCANCEL ___fsync_nocancel +_getfsstat ___getfsstat64 +_getmntinfo ___getmntinfo64 +_lstat ___lstat64 +_msgrcv$NOCANCEL ___msgrcv_nocancel +_msgsnd$NOCANCEL ___msgsnd_nocancel +_msync$NOCANCEL ___msync_nocancel +_poll$NOCANCEL ___poll_nocancel +_pread$NOCANCEL ___pread_nocancel +_preadv$NOCANCEL ___preadv_nocancel +_pwrite$NOCANCEL ___pwrite_nocancel +_pwritev$NOCANCEL ___pwritev_nocancel +_read$NOCANCEL ___read_nocancel +_readv$NOCANCEL ___readv_nocancel +_recvfrom$NOCANCEL ___recvfrom_nocancel +_recvmsg$NOCANCEL ___recvmsg_nocancel +_select$DARWIN_EXTSN ___select +_select$DARWIN_EXTSN$NOCANCEL ___select_nocancel +_sem_wait$NOCANCEL ___sem_wait_nocancel +_sendmsg$NOCANCEL ___sendmsg_nocancel +_sendto$NOCANCEL ___sendto_nocancel +_stat ___stat64 +_statfs ___statfs64 +_waitid$NOCANCEL ___waitid_nocancel +_write$NOCANCEL ___write_nocancel +_writev$NOCANCEL ___writev_nocancel + +_accept ___accept +_bind ___bind +_connect ___connect +_getattrlist ___getattrlist +_getpeername ___getpeername +_getsockname ___getsockname +_lchown ___lchown +_listen ___listen +_mprotect ___mprotect +_msgctl ___msgctl +_msync ___msync +_recvfrom ___recvfrom +_recvmsg ___recvmsg +_sendmsg ___sendmsg +_sendto ___sendto +_setattrlist ___setattrlist +_setregid ___setregid +_setreuid ___setreuid +_shmctl ___shmctl +_socketpair ___socketpair diff --git a/libsyscall/Platforms/DriverKit/x86_64/syscall.map b/libsyscall/Platforms/DriverKit/x86_64/syscall.map index 9aa2064cb..da638981e 100644 --- a/libsyscall/Platforms/DriverKit/x86_64/syscall.map +++ b/libsyscall/Platforms/DriverKit/x86_64/syscall.map @@ -12,11 +12,11 @@ _msgrcv$NOCANCEL ___msgrcv_nocancel _msgsnd$NOCANCEL ___msgsnd_nocancel _msgsys ___msgsys _msync$NOCANCEL ___msync_nocancel -_open$NOCANCEL ___open_nocancel -_openat$NOCANCEL ___openat_nocancel _poll$NOCANCEL ___poll_nocancel _pread$NOCANCEL ___pread_nocancel +_preadv$NOCANCEL ___preadv_nocancel _pwrite$NOCANCEL ___pwrite_nocancel +_pwritev$NOCANCEL ___pwritev_nocancel _read$NOCANCEL ___read_nocancel _readv$NOCANCEL ___readv_nocancel _recvfrom$NOCANCEL ___recvfrom_nocancel @@ -44,8 +44,6 @@ _listen ___listen _mprotect ___mprotect _msgctl ___msgctl _msync ___msync -_open ___open -_openat ___openat _recvfrom ___recvfrom _recvmsg ___recvmsg _semctl ___semctl diff --git a/libsyscall/Platforms/MacOSX/arm64/syscall.map b/libsyscall/Platforms/MacOSX/arm64/syscall.map new file mode 100644 index 000000000..5eeff07c2 --- /dev/null +++ b/libsyscall/Platforms/MacOSX/arm64/syscall.map @@ -0,0 +1,54 @@ +_accept$NOCANCEL ___accept_nocancel +_aio_suspend$NOCANCEL ___aio_suspend_nocancel +_close$NOCANCEL ___close_nocancel +_connect$NOCANCEL ___connect_nocancel +_fstat ___fstat64 +_fstatat ___fstatat64 +_fstatfs ___fstatfs64 +_fsync$NOCANCEL ___fsync_nocancel +_getfsstat ___getfsstat64 +_getmntinfo ___getmntinfo64 +_lstat ___lstat64 +_msgrcv$NOCANCEL ___msgrcv_nocancel +_msgsnd$NOCANCEL ___msgsnd_nocancel +_msync$NOCANCEL ___msync_nocancel +_poll$NOCANCEL ___poll_nocancel +_pread$NOCANCEL ___pread_nocancel +_preadv$NOCANCEL ___preadv_nocancel +_pwrite$NOCANCEL ___pwrite_nocancel +_pwritev$NOCANCEL ___pwritev_nocancel +_read$NOCANCEL ___read_nocancel +_readv$NOCANCEL ___readv_nocancel +_recvfrom$NOCANCEL ___recvfrom_nocancel +_recvmsg$NOCANCEL ___recvmsg_nocancel +_select$DARWIN_EXTSN ___select +_select$DARWIN_EXTSN$NOCANCEL ___select_nocancel +_sem_wait$NOCANCEL ___sem_wait_nocancel +_sendmsg$NOCANCEL ___sendmsg_nocancel +_sendto$NOCANCEL ___sendto_nocancel +_stat ___stat64 +_statfs ___statfs64 +_waitid$NOCANCEL ___waitid_nocancel +_write$NOCANCEL ___write_nocancel +_writev$NOCANCEL ___writev_nocancel + +_accept ___accept +_bind ___bind +_connect ___connect +_getattrlist ___getattrlist +_getpeername ___getpeername +_getsockname ___getsockname +_lchown ___lchown +_listen ___listen +_mprotect ___mprotect +_msgctl ___msgctl +_msync ___msync +_recvfrom ___recvfrom +_recvmsg ___recvmsg +_sendmsg ___sendmsg +_sendto ___sendto +_setattrlist ___setattrlist +_setregid ___setregid +_setreuid ___setreuid +_shmctl ___shmctl +_socketpair ___socketpair diff --git a/libsyscall/Platforms/MacOSX/i386/syscall.map b/libsyscall/Platforms/MacOSX/i386/syscall.map index 60976ed6d..6c8e43357 100644 --- a/libsyscall/Platforms/MacOSX/i386/syscall.map +++ b/libsyscall/Platforms/MacOSX/i386/syscall.map @@ -38,6 +38,7 @@ _msgsnd$UNIX2003 ___msgsnd _msgsys ___msgsys _msync$NOCANCEL$UNIX2003 ___msync_nocancel _msync$UNIX2003 ___msync +_open ___open _open$NOCANCEL$UNIX2003 ___open_nocancel _open$UNIX2003 ___open _openat$NOCANCEL ___openat_nocancel @@ -48,9 +49,13 @@ _poll$UNIX2003 ___poll _pread ___pread_nocancel _pread$NOCANCEL$UNIX2003 ___pread_nocancel _pread$UNIX2003 ___pread +_preadv ___preadv_nocancel +_preadv$NOCANCEL ___preadv_nocancel _pwrite ___pwrite_nocancel _pwrite$NOCANCEL$UNIX2003 ___pwrite_nocancel _pwrite$UNIX2003 ___pwrite +_pwritev ___pwritev_nocancel +_pwritev$NOCANCEL ___pwritev_nocancel _read ___read_nocancel _read$NOCANCEL$UNIX2003 ___read_nocancel _read$UNIX2003 ___read diff --git a/libsyscall/Platforms/MacOSX/x86_64/syscall.map b/libsyscall/Platforms/MacOSX/x86_64/syscall.map index f606b2619..ad89a185d 100644 --- a/libsyscall/Platforms/MacOSX/x86_64/syscall.map +++ b/libsyscall/Platforms/MacOSX/x86_64/syscall.map @@ -12,13 +12,13 @@ _msgrcv$NOCANCEL ___msgrcv_nocancel _msgsnd$NOCANCEL ___msgsnd_nocancel _msgsys ___msgsys _msync$NOCANCEL ___msync_nocancel -_open$NOCANCEL ___open_nocancel -_openat$NOCANCEL ___openat_nocancel _poll$NOCANCEL ___poll_nocancel _pread$NOCANCEL ___pread_nocancel _pwrite$NOCANCEL ___pwrite_nocancel +_pwritev$NOCANCEL ___pwritev_nocancel _read$NOCANCEL ___read_nocancel _readv$NOCANCEL ___readv_nocancel +_preadv$NOCANCEL ___preadv_nocancel _recvfrom$NOCANCEL ___recvfrom_nocancel _recvmsg$NOCANCEL ___recvmsg_nocancel _select$DARWIN_EXTSN ___select @@ -44,8 +44,6 @@ _listen ___listen _mprotect ___mprotect _msgctl ___msgctl _msync ___msync -_open ___open -_openat ___openat _recvfrom ___recvfrom _recvmsg ___recvmsg _semctl ___semctl diff --git a/libsyscall/Platforms/iPhoneOS/arm/syscall.map b/libsyscall/Platforms/iPhoneOS/arm/syscall.map index 2466d41b2..22d829344 100644 --- a/libsyscall/Platforms/iPhoneOS/arm/syscall.map +++ b/libsyscall/Platforms/iPhoneOS/arm/syscall.map @@ -23,11 +23,11 @@ _msgrcv$NOCANCEL ___msgrcv_nocancel _msgsnd$NOCANCEL ___msgsnd_nocancel _msync$NOCANCEL ___msync_nocancel _msgsys ___msgsys -_open$NOCANCEL ___open_nocancel -_openat$NOCANCEL ___openat_nocancel _poll$NOCANCEL ___poll_nocancel _pread$NOCANCEL ___pread_nocancel +_preadv$NOCANCEL ___preadv_nocancel _pwrite$NOCANCEL ___pwrite_nocancel +_pwritev$NOCANCEL ___pwritev_nocancel _read$NOCANCEL ___read_nocancel _readv$NOCANCEL ___readv_nocancel _recvfrom$NOCANCEL ___recvfrom_nocancel @@ -65,8 +65,6 @@ _socketpair ___socketpair _mprotect ___mprotect _setregid ___setregid _setreuid ___setreuid -_open ___open -_openat ___openat _connect ___connect _msync ___msync _sem_open ___sem_open diff --git a/libsyscall/Platforms/iPhoneOS/arm64/syscall.map b/libsyscall/Platforms/iPhoneOS/arm64/syscall.map index 20eb08fae..25b539382 100644 --- a/libsyscall/Platforms/iPhoneOS/arm64/syscall.map +++ b/libsyscall/Platforms/iPhoneOS/arm64/syscall.map @@ -24,7 +24,9 @@ _msgsnd$NOCANCEL ___msgsnd_nocancel _msync$NOCANCEL ___msync_nocancel _poll$NOCANCEL ___poll_nocancel _pread$NOCANCEL ___pread_nocancel +_preadv$NOCANCEL ___preadv_nocancel _pwrite$NOCANCEL ___pwrite_nocancel +_pwritev$NOCANCEL ___pwritev_nocancel _read$NOCANCEL ___read_nocancel _readv$NOCANCEL ___readv_nocancel _recvfrom$NOCANCEL ___recvfrom_nocancel diff --git a/libsyscall/custom/SYS.h b/libsyscall/custom/SYS.h index ffc6a8f2e..f79354e16 100644 --- a/libsyscall/custom/SYS.h +++ b/libsyscall/custom/SYS.h @@ -334,6 +334,9 @@ name: #elif __SYSCALL_32BIT_ARG_BYTES == 36 #define SYSCALL(name, nargs, cerror) SYSCALL_8(name, cerror) #define SYSCALL_NONAME(name, nargs, cerror) SYSCALL_NONAME_8(name, cerror) +#elif __SYSCALL_32BIT_ARG_BYTES == 40 +#define SYSCALL(name, nargs, cerror) SYSCALL_8(name, cerror) +#define SYSCALL_NONAME(name, nargs, cerror) SYSCALL_NONAME_8(name, cerror) #elif __SYSCALL_32BIT_ARG_BYTES == 44 #define SYSCALL(name, nargs, cerror) SYSCALL_8(name, cerror) #define SYSCALL_NONAME(name, nargs, cerror) SYSCALL_NONAME_8(name, cerror) @@ -443,15 +446,16 @@ pseudo: ;\ * TBD */ -#define DO_SYSCALL(num, cerror) \ - mov x16, #(num) %%\ - svc #SWI_SYSCALL %%\ - b.cc 2f %%\ - PUSH_FRAME %%\ - bl _##cerror %%\ - POP_FRAME %%\ - ret %%\ -2: +#define DO_SYSCALL(num, cerror) \ + mov x16, #(num) %%\ + svc #SWI_SYSCALL %%\ + b.cc 2f %%\ + ARM64_STACK_PROLOG %%\ + PUSH_FRAME %%\ + bl _##cerror %%\ + POP_FRAME %%\ + ARM64_STACK_EPILOG %%\ +2: #define MI_GET_ADDRESS(reg,var) \ adrp reg, var@page %%\ diff --git a/libsyscall/custom/__vfork.s b/libsyscall/custom/__vfork.s index 65a781efd..95a5c1219 100644 --- a/libsyscall/custom/__vfork.s +++ b/libsyscall/custom/__vfork.s @@ -167,10 +167,12 @@ L0: bx lr // return Lbotch: + stmfd sp!, {lr} MI_CALL_EXTERNAL(_cerror) // jump here on error mov r0,#-1 // set the error // reload values clobbered by cerror (so we can treat them as live in Lparent) MI_GET_ADDRESS(r3, __current_pid) // get address of __current_pid + ldmfd sp!, {lr} #ifndef _ARM_ARCH_6 mov r2, #0x80000000 // load "looking" value #endif @@ -196,6 +198,7 @@ Lparent: #elif defined(__arm64__) MI_ENTRY_POINT(___vfork) + ARM64_STACK_PROLOG MI_GET_ADDRESS(x9, __current_pid) Ltry_set_vfork: @@ -214,20 +217,22 @@ Ltry_set_vfork: // Child mov w0, #0 - ret + ARM64_STACK_EPILOG // Error case Lbotch: + PUSH_FRAME bl _cerror // Update errno mov w0, #-1 // Set return value MI_GET_ADDRESS(x9, __current_pid) // Reload current pid address + POP_FRAME // Fall through Lparent: ldxr w10, [x9] // Exclusive load current pid value add w10, w10, #1 // Increment (i.e. decrement vfork count) stxr w11, w10, [x9] // Attempt exclusive store of updated vfork count cbnz w11, Lparent // If exclusive store failed, retry - ret // Done, return + ARM64_STACK_EPILOG // Done, return #else #error Unsupported architecture diff --git a/libsyscall/mach/err_libkern.sub b/libsyscall/mach/err_libkern.sub index f865b9d7c..6443231d1 100644 --- a/libsyscall/mach/err_libkern.sub +++ b/libsyscall/mach/err_libkern.sub @@ -90,6 +90,11 @@ static const char * const err_codes_libkern_kext[] = { "(libkern/kext) kext request timed out", /* 0x19 */ "(libkern/kext) kext is stopping and cannot issue requests", /* 0x1a */ "(libkern/kext) system policy prevents loading", /* 0x1b */ + + "(libkern/kext) loading/mapping of a kext collection failed", /* 0x1c */ + "(libkern/kext) mapping of the SystemKC failed", /* 0x1d */ + "(libkern/kext) loading/mapping of the AuxiliaryKC failed", /* 0x1e */ + "(libkern/kext) loading of both Auxiliary and System KCs failed", /* 0x1f */ }; /* libkern is err_system(0x37) */ @@ -115,3 +120,6 @@ static const struct error_subsystem err_libkern_sub[] = { err_codes_libkern_kext, }, }; + + +/* vim: set ft=c ts=8 sw=4: */ diff --git a/libsyscall/mach/mach_eventlink.c b/libsyscall/mach/mach_eventlink.c new file mode 100644 index 000000000..b0f612dde --- /dev/null +++ b/libsyscall/mach/mach_eventlink.c @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * __mach_eventlink* calls are bsd syscalls instead of mach traps because + * they need to return a 64 bit value in register and mach traps currently + * does not allow 64 bit return values. + */ +uint64_t +__mach_eventlink_signal( + mach_port_t eventlink_port, + uint64_t signal_count); + +uint64_t +__mach_eventlink_wait_until( + mach_port_t eventlink_port, + uint64_t wait_signal_count, + uint64_t deadline, + kern_clock_id_t clock_id, + mach_eventlink_signal_wait_option_t option); + +uint64_t +__mach_eventlink_signal_wait_until( + mach_port_t eventlink_port, + uint64_t wait_count, + uint64_t signal_count, + uint64_t deadline, + kern_clock_id_t clock_id, + mach_eventlink_signal_wait_option_t option); + +kern_return_t +mach_eventlink_signal( + mach_port_t eventlink_port, + uint64_t signal_count) +{ + uint64_t retval = __mach_eventlink_signal(eventlink_port, signal_count); + + return decode_eventlink_error_from_retval(retval); +} + +kern_return_t +mach_eventlink_wait_until( + mach_port_t eventlink_port, + uint64_t *wait_count_ptr, + mach_eventlink_signal_wait_option_t option, + kern_clock_id_t clock_id, + uint64_t deadline) +{ + uint64_t retval; + + retval = __mach_eventlink_wait_until(eventlink_port, *wait_count_ptr, + deadline, clock_id, option); + + *wait_count_ptr = decode_eventlink_count_from_retval(retval); + return decode_eventlink_error_from_retval(retval); +} + +kern_return_t +mach_eventlink_signal_wait_until( + mach_port_t eventlink_port, + uint64_t *wait_count_ptr, + uint64_t signal_count, + mach_eventlink_signal_wait_option_t option, + kern_clock_id_t clock_id, + uint64_t deadline) +{ + uint64_t retval; + retval = __mach_eventlink_signal_wait_until(eventlink_port, *wait_count_ptr, + signal_count, deadline, clock_id, option); + *wait_count_ptr = decode_eventlink_count_from_retval(retval); + return decode_eventlink_error_from_retval(retval); +} diff --git a/libsyscall/mach/mach_eventlink.defs b/libsyscall/mach/mach_eventlink.defs new file mode 100644 index 000000000..6df3f18f5 --- /dev/null +++ b/libsyscall/mach/mach_eventlink.defs @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +import ; + +/* + * Libsyscall mach_eventlink.defs should include mach/mach_eventlink.defs exported by + * kernel, putting a copy of the file instead to avoid the build failure due to + * ordering issues. Once the mach/mach_eventlink.defs is in build, replace the content + * on defs file with #inlude . + */ +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + mach_eventlink 716200; + +#include +#include +#include + +#ifndef _MACH_MACH_EVENTLINK_TYPE_DEFS +#define _MACH_MACH_EVENTLINK_TYPE_DEFS + +type eventlink_t = mach_port_t + ctype: mach_port_t +#if KERNEL_SERVER + intran: ipc_eventlink_t convert_port_to_eventlink(mach_port_t) + destructor: ipc_eventlink_deallocate(ipc_eventlink_t) +#endif /* KERNEL_SERVER */ + ; + +type eventlink_consume_ref_t = mach_port_move_send_t + ctype: mach_port_t +#if KERNEL_SERVER + intran: ipc_eventlink_t convert_port_to_eventlink(mach_port_t) + destructor: ipc_eventlink_deallocate(ipc_eventlink_t) +#endif /* KERNEL_SERVER */ + ; + +type eventlink_port_pair_t = array[2] of mach_port_t; +type mach_eventlink_create_option_t = uint32_t; +type mach_eventlink_associate_option_t = uint32_t; +type mach_eventlink_disassociate_option_t = uint32_t; +type mach_eventlink_signal_wait_option_t = uint32_t; + +#endif /* _MACH_MACH_EVENTLINK_TYPE_DEFS */ + +routine mach_eventlink_create( + task : task_t; + option : mach_eventlink_create_option_t; + out eventlink_pair : eventlink_port_pair_t); + +routine mach_eventlink_destroy( + eventlink : eventlink_consume_ref_t); + + +routine mach_eventlink_associate( + eventlink : eventlink_t; + thread : thread_t; + copyin_addr_wait : mach_vm_address_t; + copyin_mask_wait : uint64_t; + copyin_addr_signal : mach_vm_address_t; + copyin_mask_signal : uint64_t; + option : mach_eventlink_associate_option_t); + +routine mach_eventlink_disassociate( + eventlink : eventlink_t; + option : mach_eventlink_disassociate_option_t); + + /* vim: set ft=c : */ diff --git a/libsyscall/mach/mach_init.c b/libsyscall/mach/mach_init.c index 0c832bfbc..4206401a8 100644 --- a/libsyscall/mach/mach_init.c +++ b/libsyscall/mach/mach_init.c @@ -122,6 +122,12 @@ _mach_fork_child(void) #endif #endif +#if defined(__x86_64__) || defined(__i386__) +#define COMM_PAGE_KERNEL_PAGE_SHIFT_MIN_VERSION 14 +#else +#define COMM_PAGE_KERNEL_PAGE_SHIFT_MIN_VERSION 3 +#endif + void mach_init_doit(void) { @@ -130,15 +136,17 @@ mach_init_doit(void) _task_reply_port = mach_reply_port(); if (vm_kernel_page_shift == 0) { -#ifdef _COMM_PAGE_KERNEL_PAGE_SHIFT +#if defined(__x86_64__) || defined(__i386__) + if ((*((uint16_t *)_COMM_PAGE_VERSION) >= COMM_PAGE_KERNEL_PAGE_SHIFT_MIN_VERSION)) { + vm_kernel_page_shift = *(uint8_t*) _COMM_PAGE_KERNEL_PAGE_SHIFT; + } else { + vm_kernel_page_shift = I386_PGSHIFT; + } +#else vm_kernel_page_shift = *(uint8_t*) _COMM_PAGE_KERNEL_PAGE_SHIFT; +#endif vm_kernel_page_size = 1 << vm_kernel_page_shift; vm_kernel_page_mask = vm_kernel_page_size - 1; -#else - vm_kernel_page_size = PAGE_SIZE; - vm_kernel_page_mask = PAGE_MASK; - vm_kernel_page_shift = PAGE_SHIFT; -#endif /* _COMM_PAGE_KERNEL_PAGE_SHIFT */ } if (vm_page_shift == 0) { @@ -147,7 +155,11 @@ mach_init_doit(void) #elif defined(__arm__) vm_page_shift = *(uint8_t*) _COMM_PAGE_USER_PAGE_SHIFT_32; #else - vm_page_shift = vm_kernel_page_shift; + if ((*((uint16_t *)_COMM_PAGE_VERSION) >= COMM_PAGE_KERNEL_PAGE_SHIFT_MIN_VERSION)) { + vm_page_shift = *(uint8_t*) _COMM_PAGE_USER_PAGE_SHIFT_64; + } else { + vm_page_shift = vm_kernel_page_shift; + } #endif vm_page_size = 1 << vm_page_shift; vm_page_mask = vm_page_size - 1; diff --git a/libsyscall/mach/mach_msg.c b/libsyscall/mach/mach_msg.c index 1c1b7af74..6daa05377 100644 --- a/libsyscall/mach/mach_msg.c +++ b/libsyscall/mach/mach_msg.c @@ -769,3 +769,38 @@ mach_voucher_deallocate( { return mach_port_deallocate(mach_task_self(), voucher); } + +#undef mach_msg_priority_is_pthread_priority +int +mach_msg_priority_is_pthread_priority(mach_msg_priority_t pri) +{ + return mach_msg_priority_is_pthread_priority_inline(pri); +} + +#undef mach_msg_priority_encode +mach_msg_priority_t +mach_msg_priority_encode(mach_msg_qos_t override_qos, mach_msg_qos_t qos, int relpri) +{ + return mach_msg_priority_encode_inline(override_qos, qos, relpri); +} + +#undef mach_msg_priority_overide_qos +mach_msg_qos_t +mach_msg_priority_overide_qos(mach_msg_priority_t pri) +{ + return mach_msg_priority_overide_qos_inline(pri); +} + +#undef mach_msg_priority_qos +mach_msg_qos_t +mach_msg_priority_qos(mach_msg_priority_t pri) +{ + return mach_msg_priority_qos_inline(pri); +} + +#undef mach_msg_priority_relpri +int +mach_msg_priority_relpri(mach_msg_priority_t pri) +{ + return mach_msg_priority_relpri_inline(pri); +} diff --git a/libsyscall/mach/mach_port.c b/libsyscall/mach/mach_port.c index 62585338d..6a305be02 100644 --- a/libsyscall/mach/mach_port.c +++ b/libsyscall/mach/mach_port.c @@ -117,11 +117,7 @@ mach_port_destroy( { kern_return_t rv; - rv = _kernelrpc_mach_port_destroy_trap(task, name); - - if (rv == MACH_SEND_INVALID_DEST) { - rv = _kernelrpc_mach_port_destroy(task, name); - } + rv = _kernelrpc_mach_port_destroy(task, name); return rv; } diff --git a/libsyscall/mach/mach_right.c b/libsyscall/mach/mach_right.c index f1857fe65..5483125a2 100644 --- a/libsyscall/mach/mach_right.c +++ b/libsyscall/mach/mach_right.c @@ -32,7 +32,7 @@ #pragma mark Utilities -#define _assert_mach(__op, __kr) \ +#define _mach_assert(__op, __kr) \ do { \ if (kr != KERN_SUCCESS) { \ __builtin_trap(); \ @@ -79,8 +79,6 @@ mach_right_recv_destruct(mach_right_recv_t r, mach_right_send_t *s, if (s) { if (r.mrr_name != s->mrs_name) { - _os_set_crash_log_cause_and_message(s->mrs_name, - "api misuse: bad send right"); __builtin_trap(); } diff --git a/libsyscall/mach/mach_vm.c b/libsyscall/mach/mach_vm.c index f8fbf921d..8b8dfa3fa 100644 --- a/libsyscall/mach/mach_vm.c +++ b/libsyscall/mach/mach_vm.c @@ -58,8 +58,8 @@ mach_vm_allocate( rv = _kernelrpc_mach_vm_allocate(target, address, size, flags); } - if (__syscall_logger && rv == KERN_SUCCESS && !(flags & VM_MAKE_TAG(VM_MEMORY_STACK))) { - int userTagFlags = flags & VM_FLAGS_ALIAS_MASK; + int userTagFlags = flags & VM_FLAGS_ALIAS_MASK; + if (__syscall_logger && rv == KERN_SUCCESS && (userTagFlags != VM_MAKE_TAG(VM_MEMORY_STACK))) { __syscall_logger(stack_logging_type_vm_allocate | userTagFlags, (uintptr_t)target, (uintptr_t)size, 0, (uintptr_t)*address, 0); } @@ -184,9 +184,9 @@ mach_vm_map( offset, copy, cur_protection, max_protection, inheritance); } - if (__syscall_logger && rv == KERN_SUCCESS && !(flags & VM_MAKE_TAG(VM_MEMORY_STACK))) { + int userTagFlags = flags & VM_FLAGS_ALIAS_MASK; + if (__syscall_logger && rv == KERN_SUCCESS && (userTagFlags != VM_MAKE_TAG(VM_MEMORY_STACK))) { int eventTypeFlags = stack_logging_type_vm_allocate | stack_logging_type_mapped_file_or_shared_mem; - int userTagFlags = flags & VM_FLAGS_ALIAS_MASK; __syscall_logger(eventTypeFlags | userTagFlags, (uintptr_t)target, (uintptr_t)size, 0, (uintptr_t)*address, 0); } diff --git a/libsyscall/mach/string.h b/libsyscall/mach/string.h index 833c6af05..e558d9c17 100644 --- a/libsyscall/mach/string.h +++ b/libsyscall/mach/string.h @@ -42,8 +42,11 @@ int _mach_snprintf(char *buffer, int length, const char *fmt, ...) __printflike(3, 4); int _mach_vsnprintf(char *buffer, int length, const char *fmt, va_list ap) __printflike(3, 0); -// Actually in memcpy.c but MIG likes to include string.h +// These declarations are just for MIG, other users should include string/strings.h +// These symbols are defined in _libc_funcptr.c void *memcpy(void *dst0, const void *src0, size_t length); +void *memset(void *dst0, int c0, size_t length); +void bzero(void *dst0, size_t length); #endif /* _STRING_H_ */ diff --git a/libsyscall/wrappers/__get_cpu_capabilities.s b/libsyscall/wrappers/__get_cpu_capabilities.s index 86b0ee2a7..199ea8ad9 100644 --- a/libsyscall/wrappers/__get_cpu_capabilities.s +++ b/libsyscall/wrappers/__get_cpu_capabilities.s @@ -67,10 +67,10 @@ __get_cpu_capabilities: .globl __get_cpu_capabilities __get_cpu_capabilities: ldr x0, Lcommpage_cc_addr - ldr w0, [x0] + ldr x0, [x0] ret Lcommpage_cc_addr: -.quad _COMM_PAGE_CPU_CAPABILITIES +.quad _COMM_PAGE_CPU_CAPABILITIES64 #else #error Unsupported architecture diff --git a/libsyscall/wrappers/_libc_funcptr.c b/libsyscall/wrappers/_libc_funcptr.c index 8ebcc87c8..bb6083d23 100644 --- a/libsyscall/wrappers/_libc_funcptr.c +++ b/libsyscall/wrappers/_libc_funcptr.c @@ -131,6 +131,13 @@ bzero(void *s, size_t n) return _libkernel_string_functions->bzero(s, n); } +__attribute__((visibility("hidden"))) +void +__bzero(void *s, size_t n) +{ + return _libkernel_string_functions->bzero(s, n); +} + __attribute__((visibility("hidden"))) void * memchr(const void *s, int c, size_t n) diff --git a/libsyscall/wrappers/_libkernel_init.c b/libsyscall/wrappers/_libkernel_init.c index 127d65efd..6440b6098 100644 --- a/libsyscall/wrappers/_libkernel_init.c +++ b/libsyscall/wrappers/_libkernel_init.c @@ -35,10 +35,33 @@ extern int mach_init(void); #if TARGET_OS_OSX + +#if !defined(__i386__) + +#include "system-version-compat-support.h" +#include + +extern bool _system_version_compat_check_path_suffix(const char *orig_path); +extern int _system_version_compat_open_shim(int opened_fd, int openat_fd, const char *orig_path, int oflag, mode_t mode, + int (*close_syscall)(int), int (*open_syscall)(const char *, int, mode_t), + int (*openat_syscall)(int, const char *, int, mode_t), + int (*fcntl_syscall)(int, int, long)); + +extern bool (*system_version_compat_check_path_suffix)(const char *orig_path); +extern int (*system_version_compat_open_shim)(int opened_fd, int openat_fd, const char *orig_path, int oflag, mode_t mode, + int (*close_syscall)(int), int (*open_syscall)(const char *, int, mode_t), + int (*openat_syscall)(int, const char *, int, mode_t), + int (*fcntl_syscall)(int, int, long)); + +extern system_version_compat_mode_t system_version_compat_mode; + +int __sysctlbyname(const char *name, size_t namelen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +#endif /* !defined(__i386__) */ + __attribute__((visibility("default"))) extern bool _os_xbs_chrooted; bool _os_xbs_chrooted; -#endif +#endif /* TARGET_OS_OSX */ /* dlsym() funcptr is for legacy support in exc_catcher */ void* (*_dlsym)(void*, const char*) __attribute__((visibility("hidden"))); @@ -59,3 +82,42 @@ __libkernel_init(_libkernel_functions_t fns, } mach_init(); } + +void +__libkernel_init_late(_libkernel_late_init_config_t config) +{ + if (config->version >= 1) { +#if TARGET_OS_OSX && !defined(__i386__) + if (config->enable_system_version_compat) { + /* enable the version compatibility shim for this process (macOS only) */ + + /* first hook up the shims we reference from open{at}() */ + system_version_compat_check_path_suffix = _system_version_compat_check_path_suffix; + system_version_compat_open_shim = _system_version_compat_open_shim; + + system_version_compat_mode = SYSTEM_VERSION_COMPAT_MODE_MACOSX; + + /* + * tell the kernel the shim is enabled for this process so it can shim any + * necessary sysctls + */ + int enable = 1; + __sysctlbyname("kern.system_version_compat", strlen("kern.system_version_compat"), + NULL, NULL, &enable, sizeof(enable)); + } else if ((config->version >= 2) && config->enable_ios_version_compat) { + /* enable the iOS ProductVersion compatibility shim for this process */ + + /* first hook up the shims we reference from open{at}() */ + system_version_compat_check_path_suffix = _system_version_compat_check_path_suffix; + system_version_compat_open_shim = _system_version_compat_open_shim; + + system_version_compat_mode = SYSTEM_VERSION_COMPAT_MODE_IOS; + + /* + * We don't currently shim any sysctls for iOS apps running on macOS so we + * don't need to inform the kernel that this app has the SystemVersion shim enabled. + */ + } +#endif /* TARGET_OS_OSX && !defined(__i386__) */ + } +} diff --git a/libsyscall/wrappers/_libkernel_init.h b/libsyscall/wrappers/_libkernel_init.h index 42aba7be4..cc95f1b26 100644 --- a/libsyscall/wrappers/_libkernel_init.h +++ b/libsyscall/wrappers/_libkernel_init.h @@ -29,6 +29,7 @@ #ifndef __LIBKERNEL_INIT_H #define __LIBKERNEL_INIT_H +#include #include #include #include @@ -103,6 +104,12 @@ typedef const struct _libkernel_voucher_functions { /* Subsequent versions must only add pointers! */ } *_libkernel_voucher_functions_t; +typedef struct _libkernel_late_init_config { + unsigned long version; + bool enable_system_version_compat; + bool enable_ios_version_compat; +} *_libkernel_late_init_config_t; + struct ProgramVars; /* forward reference */ void __libkernel_init(_libkernel_functions_t fns, const char *envp[], @@ -112,4 +119,6 @@ kern_return_t __libkernel_platform_init(_libkernel_string_functions_t fns); kern_return_t __libkernel_voucher_init(_libkernel_voucher_functions_t fns); +void __libkernel_init_late(_libkernel_late_init_config_t config); + #endif // __LIBKERNEL_INIT_H diff --git a/libsyscall/wrappers/cancelable/fcntl-base.c b/libsyscall/wrappers/cancelable/fcntl-base.c index 2f84dba81..bd27ca3c6 100644 --- a/libsyscall/wrappers/cancelable/fcntl-base.c +++ b/libsyscall/wrappers/cancelable/fcntl-base.c @@ -64,11 +64,14 @@ fcntl(int fd, int cmd, ...) case F_ADDFILESIGS: case F_ADDFILESIGS_FOR_DYLD_SIM: case F_ADDFILESIGS_RETURN: + case F_ADDFILESIGS_INFO: + case F_ADDFILESUPPL: case F_FINDSIGS: case F_TRANSCODEKEY: case F_TRIM_ACTIVE_FILE: case F_SPECULATIVE_READ: case F_CHECK_LV: + case F_GETSIGSINFO: arg = va_arg(ap, void *); break; default: diff --git a/iokit/Kernel/x86_64/IOSharedLock.s b/libsyscall/wrappers/cancelable/open.c similarity index 60% rename from iokit/Kernel/x86_64/IOSharedLock.s rename to libsyscall/wrappers/cancelable/open.c index d5e5ecabc..5bb8d42d9 100644 --- a/iokit/Kernel/x86_64/IOSharedLock.s +++ b/libsyscall/wrappers/cancelable/open.c @@ -1,15 +1,15 @@ /* - * Copyright (c) 1998-2010 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2020 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,44 +17,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include - - TEXT - -/* - * void - * OSSpinLockUnlock(p) - * int *p; * - * Unlock the lock pointed to by p. + * @APPLE_LICENSE_HEADER_END@ */ -LEAF(_OSSpinLockUnlock, 0) -LEAF(_IOSpinUnlock, 0) -LEAF(_ev_unlock, 0) - movl $0, (%rdi) -END(_OSSpinLockUnlock) - - -/* - * int - * OSSpinLockTry(p) - * int *p; - * - * Try to lock p. Return zero if not successful. - */ +#define VARIANT_CANCELABLE -LEAF(_OSSpinLockTry, 0) -LEAF(_IOTrySpinLock, 0) -LEAF(_ev_try_lock, 0) - xorl %eax, %eax - orl $-1, %edx - lock - cmpxchgl %edx, (%rdi) - setz %dl - movzbl %dl, %eax -END(_OSSpinLockTry) +#include "../open-base.c" diff --git a/libsyscall/wrappers/init_cpu_capabilities.c b/libsyscall/wrappers/init_cpu_capabilities.c index 3feb3209d..5f10e21cb 100644 --- a/libsyscall/wrappers/init_cpu_capabilities.c +++ b/libsyscall/wrappers/init_cpu_capabilities.c @@ -40,7 +40,7 @@ _init_cpu_capabilities( void ) #elif defined(__arm__) || defined(__arm64__) -extern int _get_cpu_capabilities(void); +extern uint64_t _get_cpu_capabilities(void); int _cpu_capabilities = 0; int _cpu_has_altivec = 0; // DEPRECATED: use _cpu_capabilities instead diff --git a/libsyscall/wrappers/legacy/open.c b/libsyscall/wrappers/legacy/open.c deleted file mode 100644 index 8635b0c7f..000000000 --- a/libsyscall/wrappers/legacy/open.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2005, 2009 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#ifndef NO_SYSCALL_LEGACY - -#define _NONSTD_SOURCE -#include - -#include -#include -#include - -int __open_nocancel(const char *path, int flags, mode_t mode); - -/* - * open stub: The legacy interface never automatically associated a controlling - * tty, so we always pass O_NOCTTY. - */ -int -open(const char *path, int flags, ...) -{ - mode_t mode = 0; - - if (flags & O_CREAT) { - va_list ap; - va_start(ap, flags); - // compiler warns to pass int (not mode_t) to va_arg - mode = va_arg(ap, int); - va_end(ap); - } - return __open_nocancel(path, flags | O_NOCTTY, mode); -} - -#endif /* NO_SYSCALL_LEGACY */ diff --git a/libsyscall/wrappers/libproc/libproc.c b/libsyscall/wrappers/libproc/libproc.c index 8cf27b6ac..c3ef51b83 100644 --- a/libsyscall/wrappers/libproc/libproc.c +++ b/libsyscall/wrappers/libproc/libproc.c @@ -37,6 +37,7 @@ #include "libproc_internal.h" int __proc_info(int callnum, int pid, int flavor, uint64_t arg, void * buffer, int buffersize); +int __proc_info_extended_id(int32_t callnum, int32_t pid, uint32_t flavor, uint32_t flags, uint64_t ext_id, uint64_t arg, user_addr_t buffer, int32_t buffersize); __private_extern__ int proc_setthreadname(void * buffer, int buffersize); int __process_policy(int scope, int action, int policy, int policy_subtype, proc_policy_attribute_t * attrp, pid_t target_pid, uint64_t target_threadid); int proc_rlimit_control(pid_t pid, int flavor, void *arg); @@ -234,7 +235,7 @@ proc_regionfilename(int pid, uint64_t address, void * buffer, uint32_t buffersiz } retval = proc_pidinfo(pid, PROC_PIDREGIONPATH, (uint64_t)address, &path, sizeof(struct proc_regionpath)); - if (retval != -1) { + if (retval != 0) { return (int)(strlcpy(buffer, path.prpo_path, buffersize)); } return 0; @@ -273,6 +274,31 @@ proc_pidpath(int pid, void * buffer, uint32_t buffersize) return 0; } +int +proc_pidpath_audittoken(audit_token_t *audittoken, void * buffer, uint32_t buffersize) +{ + int retval, len; + + if (buffersize < PROC_PIDPATHINFO_SIZE) { + errno = ENOMEM; + return 0; + } + if (buffersize > PROC_PIDPATHINFO_MAXSIZE) { + errno = EOVERFLOW; + return 0; + } + + int pid = audittoken->val[5]; + int idversion = audittoken->val[7]; + + retval = __proc_info_extended_id(PROC_INFO_CALL_PIDINFO, pid, PROC_PIDPATHINFO, PIF_COMPARE_IDVERSION, (uint64_t)idversion, + (uint64_t)0, buffer, buffersize); + if (retval != -1) { + len = (int)strlen(buffer); + return len; + } + return 0; +} int proc_libversion(int *major, int * minor) @@ -1003,3 +1029,69 @@ proc_suppress(__unused pid_t pid, __unused uint64_t *generation) #endif /* !TARGET_OS_SIMULATOR */ #endif /* !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) */ + +int +proc_set_no_smt(void) +{ + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_NO_SMT, 0, NULL, getpid(), (uint64_t)0) == -1) { + return errno; + } + return 0; +} + +int +proc_setthread_no_smt(void) +{ + extern uint64_t __thread_selfid(void); + if (__process_policy(PROC_POLICY_SCOPE_THREAD, PROC_POLICY_ACTION_APPLY, PROC_POLICY_NO_SMT, 0, NULL, 0, __thread_selfid()) == -1) { + return errno; + } + return 0; +} + +int +proc_set_csm(uint32_t flags) +{ + const uint32_t mask = PROC_CSM_ALL | PROC_CSM_TECS | PROC_CSM_NOSMT; + if ((flags & ~mask) != 0) { + return EINVAL; + } + + if (flags & (PROC_CSM_NOSMT | PROC_CSM_ALL)) { + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_NO_SMT, 0, NULL, getpid(), (uint64_t)0) == -1) { + return errno; + } + } + + if (flags & (PROC_CSM_TECS | PROC_CSM_ALL)) { + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_TECS, 0, NULL, getpid(), (uint64_t)0) == -1) { + return errno; + } + } + + return 0; +} + +int +proc_setthread_csm(uint32_t flags) +{ + extern uint64_t __thread_selfid(void); + const uint32_t mask = PROC_CSM_ALL | PROC_CSM_TECS | PROC_CSM_NOSMT; + if ((flags & ~mask) != 0) { + return EINVAL; + } + + if (flags & (PROC_CSM_NOSMT | PROC_CSM_ALL)) { + if (__process_policy(PROC_POLICY_SCOPE_THREAD, PROC_POLICY_ACTION_APPLY, PROC_POLICY_NO_SMT, 0, NULL, 0, __thread_selfid()) == -1) { + return errno; + } + } + + if (flags & (PROC_CSM_TECS | PROC_CSM_ALL)) { + if (__process_policy(PROC_POLICY_SCOPE_THREAD, PROC_POLICY_ACTION_APPLY, PROC_POLICY_TECS, 0, NULL, 0, __thread_selfid()) == -1) { + return errno; + } + } + + return 0; +} diff --git a/libsyscall/wrappers/libproc/libproc.h b/libsyscall/wrappers/libproc/libproc.h index 053e039b7..9142dcd57 100644 --- a/libsyscall/wrappers/libproc/libproc.h +++ b/libsyscall/wrappers/libproc/libproc.h @@ -31,10 +31,12 @@ #include #include #include +#include /* for audit_token_t */ #include #include +#include /* * This header file contains private interfaces to obtain process information. @@ -98,6 +100,7 @@ int proc_name(int pid, void * buffer, uint32_t buffersize) __OSX_AVAILABLE_START int proc_regionfilename(int pid, uint64_t address, void * buffer, uint32_t buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); int proc_kmsgbuf(void * buffer, uint32_t buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); int proc_pidpath(int pid, void * buffer, uint32_t buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int proc_pidpath_audittoken(audit_token_t *audittoken, void * buffer, uint32_t buffersize) API_AVAILABLE(macos(10.16), ios(14.0), watchos(7.0), tvos(14.0)); int proc_libversion(int *major, int * minor) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); /* @@ -126,6 +129,38 @@ int proc_clear_dirty(pid_t pid, uint32_t flags); int proc_terminate(pid_t pid, int *sig); +/* + * NO_SMT means that on an SMT CPU, this thread must be scheduled alone, + * with the paired CPU idle. + * + * Set NO_SMT on the current proc (all existing and future threads) + * This attribute is inherited on fork and exec + */ +int proc_set_no_smt(void) __API_AVAILABLE(macos(10.16)); + +/* Set NO_SMT on the current thread */ +int proc_setthread_no_smt(void) __API_AVAILABLE(macos(10.16)); + +/* + * CPU Security Mitigation APIs + * + * Set CPU security mitigation on the current proc (all existing and future threads) + * This attribute is inherited on fork and exec + */ +int proc_set_csm(uint32_t flags) __API_AVAILABLE(macos(10.16)); + +/* Set CPU security mitigation on the current thread */ +int proc_setthread_csm(uint32_t flags) __API_AVAILABLE(macos(10.16)); + +/* + * flags for CPU Security Mitigation APIs + * PROC_CSM_ALL should be used in most cases, + * the individual flags are provided only for performance evaluation etc + */ +#define PROC_CSM_ALL 0x0001 /* Set all available mitigations */ +#define PROC_CSM_NOSMT 0x0002 /* Set NO_SMT - see above */ +#define PROC_CSM_TECS 0x0004 /* Execute VERW on every return to user mode */ + #ifdef PRIVATE #include /* diff --git a/libsyscall/wrappers/mach_absolute_time.s b/libsyscall/wrappers/mach_absolute_time.s index 5b3b36541..89c15592a 100644 --- a/libsyscall/wrappers/mach_absolute_time.s +++ b/libsyscall/wrappers/mach_absolute_time.s @@ -153,6 +153,7 @@ _mach_absolute_time: #elif defined(__arm__) #include +#include /* * If userspace access to the timebase is supported (indicated through the commpage), @@ -180,7 +181,7 @@ _mach_absolute_time: ldrb r0, [ip, #((_COMM_PAGE_USER_TIMEBASE) - (_COMM_PAGE_TIMEBASE_OFFSET))] cmp r0, #USER_TIMEBASE_NONE // Are userspace reads supported? beq _mach_absolute_time_kernel // If not, go to the kernel - isb // Prevent speculation on CNTPCT across calls + isb // Prevent speculation on CNTVCT across calls // (see ARMV7C.b section B8.1.2, ARMv8 section D6.1.2) push {r4, r5, r7, lr} // Push a frame add r7, sp, #8 @@ -206,7 +207,7 @@ L_mach_absolute_time_user: .align 2 .globl _mach_absolute_time_kernel _mach_absolute_time_kernel: - mov r12, #-3 // Load the magic MAT number + mov r12, #MACH_ARM_TRAP_ABSTIME // Load the magic MAT number swi #SWI_SYSCALL bx lr @@ -214,13 +215,14 @@ _mach_absolute_time_kernel: .align 2 .globl _mach_continuous_time_kernel _mach_continuous_time_kernel: - mov r12, #-4 // Load the magic MCT number + mov r12, #MACH_ARM_TRAP_CONTTIME // Load the magic MCT number swi #SWI_SYSCALL bx lr #elif defined(__arm64__) #include +#include /* * If userspace access to the timebase is supported (indicated through the commpage), @@ -244,11 +246,11 @@ _mach_absolute_time: ldrb w2, [x3, #((_COMM_PAGE_USER_TIMEBASE) - (_COMM_PAGE_TIMEBASE_OFFSET))] cmp x2, #USER_TIMEBASE_NONE // Are userspace reads supported? b.eq _mach_absolute_time_kernel // If not, go to the kernel - isb // Prevent speculation on CNTPCT across calls + isb // Prevent speculation on CNTVCT across calls // (see ARMV7C.b section B8.1.2, ARMv8 section D6.1.2) L_mach_absolute_time_user: ldr x1, [x3] // Load the offset - mrs x0, CNTPCT_EL0 // Read the timebase + mrs x0, CNTVCT_EL0 // Read the timebase ldr x2, [x3] // Load the offset cmp x1, x2 // Compare our offset values... b.ne L_mach_absolute_time_user // If they changed, try again @@ -261,7 +263,7 @@ L_mach_absolute_time_user: .align 2 .globl _mach_absolute_time_kernel _mach_absolute_time_kernel: - mov w16, #-3 // Load the magic MAT number + mov w16, #MACH_ARM_TRAP_ABSTIME // Load the magic MAT number svc #SWI_SYSCALL ret @@ -269,7 +271,7 @@ _mach_absolute_time_kernel: .align 2 .globl _mach_continuous_time_kernel _mach_continuous_time_kernel: - mov w16, #-4 // Load the magic MCT number + mov w16, #MACH_ARM_TRAP_CONTTIME // Load the magic MCT number svc #SWI_SYSCALL ret diff --git a/libsyscall/wrappers/mach_continuous_time.c b/libsyscall/wrappers/mach_continuous_time.c index c128ac1b7..e07aba3bf 100644 --- a/libsyscall/wrappers/mach_continuous_time.c +++ b/libsyscall/wrappers/mach_continuous_time.c @@ -62,8 +62,9 @@ _mach_continuous_hwclock(uint64_t *cont_time __unused) #define ISB_SY 0xf uint8_t cont_hwclock = *((uint8_t*)_COMM_PAGE_CONT_HWCLOCK); if (cont_hwclock) { + volatile uint64_t *base_ptr = (volatile uint64_t*)_COMM_PAGE_CONT_HW_TIMEBASE; __builtin_arm_isb(ISB_SY); - *cont_time = __builtin_arm_rsr64("CNTPCT_EL0"); + *cont_time = __builtin_arm_rsr64("CNTVCT_EL0") + *base_ptr; return KERN_SUCCESS; } #endif diff --git a/libsyscall/wrappers/mach_get_times.c b/libsyscall/wrappers/mach_get_times.c index 4a8a3f19e..daf4e9bb7 100644 --- a/libsyscall/wrappers/mach_get_times.c +++ b/libsyscall/wrappers/mach_get_times.c @@ -64,12 +64,7 @@ mach_get_times(uint64_t* absolute_time, uint64_t* cont_time, struct timespec *tp if (__gettimeofday_with_mach(&tv, NULL, &tbr) < 0) { return KERN_FAILURE; } else if (tbr == 0) { -#if !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) - // On an old kernel, likely chroot'ed. (remove next year) - tbr = mach_absolute_time(); -#else __builtin_trap(); -#endif } } diff --git a/libsyscall/wrappers/open-base.c b/libsyscall/wrappers/open-base.c new file mode 100644 index 000000000..7a970c231 --- /dev/null +++ b/libsyscall/wrappers/open-base.c @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include + +#if !defined(__i386__) + +#if TARGET_OS_OSX +#include + +/* + * On macOS we have support for shimming calls to open the SystemVersion plist. + * This support is enabled for specific (generally older) binaries from + * libSystem_initializer()/__libkernel_init_late() by populating these function pointers + * with the corresponding functions built into libsyscall_dynamic (see open-compat-shim.c). + */ +extern bool (*system_version_compat_check_path_suffix)(const char *orig_path); +extern int (*system_version_compat_open_shim)(int opened_fd, int openat_fd, const char *orig_path, int oflag, mode_t mode, + int (*close_syscall)(int), int (*open_syscall)(const char *, int, mode_t), + int (*openat_syscall)(int, const char *, int, mode_t), + int (*fcntl_syscall)(int, int, long)); +#endif /* TARGET_OS_OSX */ + +#ifdef VARIANT_CANCELABLE +int __open(const char *path, int oflag, mode_t mode); +int __openat(int fd, const char *path, int oflag, mode_t mode); + +#define OPEN_SYSCALL __open +#define OPENAT_SYSCALL __openat + +#if TARGET_OS_OSX +int __fcntl(int fd, int cmd, long arg); +int close(int fd); + +#define FCNTL_SYSCALL __fcntl +#define CLOSE_SYSCALL close +#endif /* TARGET_OS_OSX */ + +#else /* VARIANT_CANCELABLE */ +int __open_nocancel(const char *path, int oflag, mode_t mode); +int __openat_nocancel(int fd, const char *path, int oflag, mode_t mode); + +#define OPEN_SYSCALL __open_nocancel +#define OPENAT_SYSCALL __openat_nocancel + +#if TARGET_OS_OSX +int __fcntl_nocancel(int fd, int cmd, long arg); +int __close_nocancel(int fd); + +#define FCNTL_SYSCALL __fcntl_nocancel +#define CLOSE_SYSCALL __close_nocancel +#endif /* TARGET_OS_OSX */ +#endif /* VARIANT_CANCELABLE */ + +#ifdef VARIANT_CANCELABLE +int +open(const char *path, int oflag, ...) +#else /* VARIANT_CANCELABLE */ +int +open$NOCANCEL(const char *path, int oflag, ...) +#endif +{ + int opened_fd = 0; + mode_t mode = 0; + + if (oflag & O_CREAT) { + va_list ap; + va_start(ap, oflag); + /* compiler warns to pass int (not mode_t) to va_arg */ + mode = va_arg(ap, int); + va_end(ap); + } + + opened_fd = OPEN_SYSCALL(path, oflag, mode); +#if !TARGET_OS_OSX + return opened_fd; +#else /* TARGET_OS_OSX */ + if (opened_fd < 0) { + return opened_fd; + } + + /* check to see if system_version_compat is enabled for this process */ + if (system_version_compat_check_path_suffix == NULL) { + return opened_fd; + } + + /* check to see if the suffix of the path we opened matches one we are shimming */ + if (!system_version_compat_check_path_suffix(path)) { + return opened_fd; + } + + /* at this point we call into the version compat open shim and return values from there */ + return system_version_compat_open_shim(opened_fd, -1, path, oflag, mode, CLOSE_SYSCALL, OPEN_SYSCALL, + NULL, FCNTL_SYSCALL); +#endif /* TARGET_OS_OSX */ +} + +#ifdef VARIANT_CANCELABLE +int +openat(int fd, const char *path, int oflag, ...) +#else /* VARIANT_CANCELABLE */ +int +openat$NOCANCEL(int fd, const char *path, int oflag, ...) +#endif +{ + int opened_fd = 0; + mode_t mode = 0; + + if (oflag & O_CREAT) { + va_list ap; + va_start(ap, oflag); + // compiler warns to pass int (not mode_t) to va_arg + mode = va_arg(ap, int); + va_end(ap); + } + + opened_fd = OPENAT_SYSCALL(fd, path, oflag, mode); +#if !TARGET_OS_OSX + return opened_fd; +#else + if (opened_fd < 0) { + return opened_fd; + } + + /* check to see if system_version_compat is enabled for this process */ + if (system_version_compat_check_path_suffix == NULL) { + return opened_fd; + } + + /* check to see if the suffix of the path we opened matches one we are shimming */ + if (!system_version_compat_check_path_suffix(path)) { + return opened_fd; + } + + /* at this point we call into the version compat open shim and return values from there */ + return system_version_compat_open_shim(opened_fd, fd, path, oflag, mode, CLOSE_SYSCALL, NULL, + OPENAT_SYSCALL, FCNTL_SYSCALL); +#endif /* !TARGET_OS_OSX */ +} +#endif /* !defined(__i386__) */ diff --git a/libsyscall/wrappers/proc.c b/libsyscall/wrappers/proc.c index ce95bce97..3beba6aaf 100644 --- a/libsyscall/wrappers/proc.c +++ b/libsyscall/wrappers/proc.c @@ -27,7 +27,6 @@ #include #include -#if !TARGET_OS_OSX extern uint64_t __memorystatus_available_memory(void); size_t @@ -35,4 +34,3 @@ os_proc_available_memory(void) { return (size_t)__memorystatus_available_memory(); } -#endif diff --git a/libsyscall/wrappers/skywalk/os_channel.c b/libsyscall/wrappers/skywalk/os_channel.c index 7d7762110..ac5c65f6b 100644 --- a/libsyscall/wrappers/skywalk/os_channel.c +++ b/libsyscall/wrappers/skywalk/os_channel.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2019 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * diff --git a/libsyscall/wrappers/skywalk/os_packet.c b/libsyscall/wrappers/skywalk/os_packet.c index 6eda01c17..ac5c65f6b 100644 --- a/libsyscall/wrappers/skywalk/os_packet.c +++ b/libsyscall/wrappers/skywalk/os_packet.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * diff --git a/libsyscall/wrappers/spawn/posix_spawn.c b/libsyscall/wrappers/spawn/posix_spawn.c index 73dfc1a3e..42cb5db95 100644 --- a/libsyscall/wrappers/spawn/posix_spawn.c +++ b/libsyscall/wrappers/spawn/posix_spawn.c @@ -97,6 +97,8 @@ posix_spawnattr_init(posix_spawnattr_t *attr) /* Default is no binary preferences, i.e. use normal grading */ memset((*psattrp)->psa_binprefs, 0, sizeof((*psattrp)->psa_binprefs)); + memset((*psattrp)->psa_subcpuprefs, 0xff /* CPU_SUBTYPE_ANY */, + sizeof((*psattrp)->psa_subcpuprefs)); /* Default is no port actions to take */ (*psattrp)->psa_ports = NULL; @@ -157,6 +159,18 @@ posix_spawnattr_init(posix_spawnattr_t *attr) (*psattrp)->psa_darwin_role = POSIX_SPAWN_DARWIN_ROLE_NONE; (*psattrp)->psa_max_addr = 0; + + (*psattrp)->psa_no_smt = false; + (*psattrp)->psa_tecs = false; + + /* Default is no subsystem root path */ + (*psattrp)->psa_subsystem_root_path = NULL; + + /* Default is no platform given */ + (*psattrp)->psa_platform = 0; + + /* Default is no option */ + (*psattrp)->psa_options = PSA_OPTION_NONE; } return err; @@ -188,6 +202,7 @@ static int posix_spawn_destroycoalition_info_np(posix_spawnattr_t *); static int posix_spawn_destroypersona_info_np(posix_spawnattr_t *); static int posix_spawn_destroyposix_cred_info_np(posix_spawnattr_t *); static int posix_spawn_destroymacpolicy_info_np(posix_spawnattr_t *); +static int posix_spawn_destroysubsystem_root_path_np(posix_spawnattr_t *); int posix_spawnattr_destroy(posix_spawnattr_t *attr) @@ -204,6 +219,7 @@ posix_spawnattr_destroy(posix_spawnattr_t *attr) posix_spawn_destroypersona_info_np(attr); posix_spawn_destroyposix_cred_info_np(attr); posix_spawn_destroymacpolicy_info_np(attr); + posix_spawn_destroysubsystem_root_path_np(attr); free(psattr); *attr = NULL; @@ -420,13 +436,60 @@ posix_spawnattr_getbinpref_np(const posix_spawnattr_t * __restrict attr, _posix_spawnattr_t psattr; int i = 0; - if (attr == NULL || *attr == NULL) { + if (attr == NULL || *attr == NULL || pref == NULL) { + return EINVAL; + } + + psattr = *(_posix_spawnattr_t *)attr; + for (i = 0; i < count && i < NBINPREFS; i++) { + pref[i] = psattr->psa_binprefs[i]; + } + + if (ocount) { + *ocount = i; + } + return 0; +} + +/* + * posix_spawnattr_getarchpref_np + * + * Description: Obtain the value of the spawn binary preferences attribute from + * the spawn attributes object referenced by 'attr' and place the + * result into the memory referenced by 'pref' and 'subpref'. + * + * Parameters: attr The spawn attributes object whose + * binary preferences are to be retrieved + * count The size of the cpu_type_t array + * pref An array of cpu types + * subpref An array of subcpu types + * ocount The actual number copied + * + * Returns: 0 No cpu/subcpu preferences found + * > 0 The number of types (less than + * count) copied over from 'attr'. + * + * Implicit Returns: + * *pref (modified) The cpu preferences array + * from the spawn attributes object + * *subpref (modified) The subcpu preferences array + * from the spawn attributes object + */ +int +posix_spawnattr_getarchpref_np(const posix_spawnattr_t * __restrict attr, + size_t count, cpu_type_t *pref, cpu_subtype_t *subpref, size_t * __restrict ocount) +{ + _posix_spawnattr_t psattr; + int i = 0; + + if (attr == NULL || *attr == NULL || pref == NULL || subpref == NULL) { return EINVAL; } psattr = *(_posix_spawnattr_t *)attr; - for (i = 0; i < count && i < 4; i++) { + for (i = 0; i < count && i < NBINPREFS; i++) { pref[i] = psattr->psa_binprefs[i]; + subpref[i] = psattr->psa_subcpuprefs[i]; } if (ocount) { @@ -628,22 +691,82 @@ posix_spawnattr_setbinpref_np(posix_spawnattr_t * __restrict attr, _posix_spawnattr_t psattr; int i = 0; - if (attr == NULL || *attr == NULL) { + if (attr == NULL || *attr == NULL || pref == NULL) { return EINVAL; } psattr = *(_posix_spawnattr_t *)attr; - for (i = 0; i < count && i < 4; i++) { + for (i = 0; i < count && i < NBINPREFS; i++) { psattr->psa_binprefs[i] = pref[i]; + psattr->psa_subcpuprefs[i] = CPU_SUBTYPE_ANY; } /* return number of binprefs copied over */ if (ocount) { *ocount = i; } + + for (; i < NBINPREFS; i++) { + psattr->psa_binprefs[i] = 0; + psattr->psa_subcpuprefs[i] = CPU_SUBTYPE_ANY; + } + return 0; } +/* + * posix_spawnattr_setarchpref_np + * + * Description: Set the universal binary preferences for the spawn attribute + * value referenced by 'attr' from the memory containing the + * cpu_type_t array referenced by 'pref', the cpu_subtype_t array + * referenced by 'subpref' and size of 'count' + * + * Parameters: attr The spawn attributes object whose + * binary preferences are to be set + * count Size of the array pointed to by 'pref' + * pref cpu_type_t array of cpu binary preferences + * subpref cpu_subtype_t array of subcpu binary preferences + * ocount The actual number copied + * + * Returns: 0 No preferences copied + * > 0 Number of preferences copied + * + * Note: The posix_spawnattr_t currently only holds four + * cpu_type_t/cpu_subtype_t pairs. + * If the caller provides more preferences than this limit, they + * will be ignored, as reflected in the return value. + */ +int +posix_spawnattr_setarchpref_np(posix_spawnattr_t * __restrict attr, + size_t count, cpu_type_t *pref, cpu_subtype_t *subpref, + size_t * __restrict ocount) +{ + _posix_spawnattr_t psattr; + int i = 0; + + if (attr == NULL || *attr == NULL || pref == NULL || subpref == NULL) { + return EINVAL; + } + + psattr = *(_posix_spawnattr_t *)attr; + for (i = 0; i < count && i < NBINPREFS; i++) { + psattr->psa_binprefs[i] = pref[i]; + psattr->psa_subcpuprefs[i] = subpref[i]; + } + + /* return number of binprefs copied over */ + if (ocount) { + *ocount = i; + } + + for (; i < NBINPREFS; i++) { + psattr->psa_binprefs[i] = 0; + psattr->psa_subcpuprefs[i] = CPU_SUBTYPE_ANY; + } + + return 0; +} /* * posix_spawnattr_setpcontrol_np @@ -873,6 +996,115 @@ posix_spawn_destroyposix_cred_info_np(posix_spawnattr_t *attr) return 0; } +/* + * posix_spawn_set_subsystem_root_path + * Description: Set path as the subsystem root path for attr; clears if NULL + */ +int +posix_spawnattr_set_subsystem_root_path_np(posix_spawnattr_t *attr, char *path) +{ + _posix_spawnattr_t psattr; + char * buf = NULL; + char * old_buf; + size_t bytes; + + if (attr == NULL || *attr == NULL) { + return EINVAL; + } + + psattr = *(_posix_spawnattr_t *)attr; + + if (path) { + buf = malloc(MAXPATHLEN); + + if (buf == NULL) { + return ENOMEM; + } + + bytes = strlcpy(buf, path, MAXPATHLEN); + + if (bytes >= MAXPATHLEN) { + free(buf); + return ENAMETOOLONG; + } + } + + old_buf = psattr->psa_subsystem_root_path; + psattr->psa_subsystem_root_path = buf; + + free(old_buf); + + return 0; +} + +/* + * posix_spawn_destroy_subsystem_root_path_np + * Description: clean up subsystem_root_path string in posix_spawnattr_t attr + */ +static int +posix_spawn_destroysubsystem_root_path_np(posix_spawnattr_t *attr) +{ + _posix_spawnattr_t psattr; + char * subsystem_root_path; + + if (attr == NULL || *attr == NULL) { + return EINVAL; + } + + psattr = *(_posix_spawnattr_t *)attr; + subsystem_root_path = psattr->psa_subsystem_root_path; + + if (subsystem_root_path == NULL) { + return EINVAL; + } + + psattr->psa_subsystem_root_path = NULL; + free(subsystem_root_path); + return 0; +} + +/* + * posix_spawnattr_set_platform_np + * Description: sets the platform in posix_spawnattr_t attr + * + * To be implemented. + */ +int +posix_spawnattr_set_platform_np(posix_spawnattr_t *attr, int platform, uint32_t flags) +{ + _posix_spawnattr_t psattr; + + if (attr == NULL || *attr == NULL) { + return EINVAL; + } + + psattr = *(_posix_spawnattr_t *)attr; + psattr->psa_platform = platform; + + (void)flags; + return 0; +} + +/* + * posix_spawnattr_disable_ptr_auth_a_keys_np + * Description: Set flag to disable A keys for Ptr Auth + */ +int +posix_spawnattr_disable_ptr_auth_a_keys_np(posix_spawnattr_t *attr, uint32_t flags) +{ + _posix_spawnattr_t psattr; + + if (attr == NULL || *attr == NULL) { + return EINVAL; + } + + psattr = *(_posix_spawnattr_t *)attr; + + psattr->psa_options |= PSA_OPTION_PLUGIN_HOST_DISABLE_A_KEYS; + (void)flags; + return 0; +} + /* * posix_spawn_appendportaction_np * Description: append a port action, grow the array if necessary @@ -1777,6 +2009,20 @@ posix_spawnattr_set_registered_ports_np(posix_spawnattr_t * __restrict attr, return err; } +int +posix_spawnattr_set_ptrauth_task_port_np(posix_spawnattr_t * __restrict attr, + mach_port_t port) +{ + int err = 0; + + _ps_port_action_t action = { + .port_type = PSPA_PTRAUTH_TASK_PORT, + .new_port = port, + }; + + err = posix_spawn_appendportaction_np(attr, &action); + return err; +} static _ps_mac_policy_extension_t * @@ -2138,6 +2384,47 @@ posix_spawnattr_set_max_addr_np(const posix_spawnattr_t * __restrict attr, uint6 return 0; } +int +posix_spawnattr_setnosmt_np(const posix_spawnattr_t * __restrict attr) +{ + _posix_spawnattr_t psattr; + + if (attr == NULL || *attr == NULL) { + return EINVAL; + } + + psattr = *(_posix_spawnattr_t *)attr; + psattr->psa_no_smt = true; + + return 0; +} + +int +posix_spawnattr_set_csm_np(const posix_spawnattr_t * __restrict attr, uint32_t flags) +{ + _posix_spawnattr_t psattr; + + if (attr == NULL || *attr == NULL) { + return EINVAL; + } + + const uint32_t mask = POSIX_SPAWN_NP_CSM_ALL | POSIX_SPAWN_NP_CSM_TECS | POSIX_SPAWN_NP_CSM_NOSMT; + if ((flags & ~mask) != 0) { + return EINVAL; + } + + psattr = *(_posix_spawnattr_t *)attr; + + if (flags & (POSIX_SPAWN_NP_CSM_TECS | POSIX_SPAWN_NP_CSM_ALL)) { + psattr->psa_tecs = true; + } + if (flags & (POSIX_SPAWN_NP_CSM_NOSMT | POSIX_SPAWN_NP_CSM_ALL)) { + psattr->psa_no_smt = true; + } + + return 0; +} + static struct _posix_spawn_posix_cred_info * _posix_spawnattr_get_posix_creds_info(_posix_spawnattr_t psattr) { @@ -2426,6 +2713,10 @@ posix_spawn(pid_t * __restrict pid, const char * __restrict path, ad.posix_cred_info_size = sizeof(struct _posix_spawn_posix_cred_info); ad.posix_cred_info = psattr->psa_posix_cred_info; } + if (psattr->psa_subsystem_root_path != NULL) { + ad.subsystem_root_path_size = MAXPATHLEN; + ad.subsystem_root_path = psattr->psa_subsystem_root_path; + } } if (file_actions != NULL && *file_actions != NULL) { _posix_spawn_file_actions_t psactsp = diff --git a/libsyscall/wrappers/spawn/spawn.h b/libsyscall/wrappers/spawn/spawn.h index 1bc1171fd..bf13027fe 100644 --- a/libsyscall/wrappers/spawn/spawn.h +++ b/libsyscall/wrappers/spawn/spawn.h @@ -135,12 +135,18 @@ __BEGIN_DECLS int posix_spawnattr_getbinpref_np(const posix_spawnattr_t * __restrict, size_t, cpu_type_t *__restrict, size_t *__restrict) __API_AVAILABLE(macos(10.5), ios(2.0)) __SPI_AVAILABLE(watchos(2.0), tvos(9.0), bridgeos(1.0)); +int posix_spawnattr_getarchpref_np(const posix_spawnattr_t * __restrict, + size_t, cpu_type_t *__restrict, cpu_subtype_t *__restrict, size_t *__restrict) __API_AVAILABLE(macos(10.16), ios(14.0)) __SPI_AVAILABLE(watchos(7.0), tvos(14.0), bridgeos(5.0)); + int posix_spawnattr_setauditsessionport_np(posix_spawnattr_t * __restrict, mach_port_t) __API_AVAILABLE(macos(10.6), ios(3.2)); int posix_spawnattr_setbinpref_np(posix_spawnattr_t * __restrict, size_t, cpu_type_t *__restrict, size_t *__restrict) __API_AVAILABLE(macos(10.5), ios(2.0)) __SPI_AVAILABLE(watchos(2.0), tvos(9.0), bridgeos(1.0)); +int posix_spawnattr_setarchpref_np(posix_spawnattr_t * __restrict, + size_t, cpu_type_t *__restrict, cpu_subtype_t *__restrict, size_t *__restrict) __API_AVAILABLE(macos(10.16), ios(14.0)) __SPI_AVAILABLE(watchos(7.0), tvos(14.0), bridgeos(5.0)); + int posix_spawnattr_setexceptionports_np(posix_spawnattr_t * __restrict, exception_mask_t, mach_port_t, exception_behavior_t, thread_state_flavor_t) __API_AVAILABLE(macos(10.5), ios(2.0)) __SPI_AVAILABLE(watchos(2.0), tvos(9.0), bridgeos(1.0)); @@ -150,6 +156,22 @@ int posix_spawnattr_setspecialport_np(posix_spawnattr_t * __restrict, int posix_spawnattr_setsuidcredport_np(posix_spawnattr_t * __restrict, mach_port_t) __SPI_AVAILABLE(ios(13.0), macos(10.15)); +int posix_spawnattr_setnosmt_np(const posix_spawnattr_t * __restrict attr) __API_AVAILABLE(macos(10.16)); + +/* + * Set CPU Security Mitigation on the spawned process + * This attribute affects all threads and is inherited on fork and exec + */ +int posix_spawnattr_set_csm_np(const posix_spawnattr_t * __restrict attr, uint32_t flags) __API_AVAILABLE(macos(10.16)); +/* + * flags for CPU Security Mitigation attribute + * POSIX_SPAWN_NP_CSM_ALL should be used in most cases, + * the individual flags are provided only for performance evaluation etc + */ +#define POSIX_SPAWN_NP_CSM_ALL 0x0001 +#define POSIX_SPAWN_NP_CSM_NOSMT 0x0002 +#define POSIX_SPAWN_NP_CSM_TECS 0x0004 + int posix_spawn_file_actions_addinherit_np(posix_spawn_file_actions_t *, int) __API_AVAILABLE(macos(10.7), ios(4.3)) __SPI_AVAILABLE(watchos(2.0), tvos(9.0), bridgeos(1.0)); diff --git a/libsyscall/wrappers/spawn/spawn_private.h b/libsyscall/wrappers/spawn/spawn_private.h index aa2897d33..6510b8535 100644 --- a/libsyscall/wrappers/spawn/spawn_private.h +++ b/libsyscall/wrappers/spawn/spawn_private.h @@ -30,6 +30,8 @@ #include #include +__BEGIN_DECLS + int posix_spawnattr_getpcontrol_np(const posix_spawnattr_t * __restrict, int * __restrict) __API_AVAILABLE(macos(10.6), ios(3.2)); int posix_spawnattr_setpcontrol_np(posix_spawnattr_t *, const int) __API_AVAILABLE(macos(10.6), ios(3.2)); @@ -60,6 +62,10 @@ int posix_spawnattr_set_importancewatch_port_np(posix_spawnattr_t * __restri int posix_spawnattr_set_registered_ports_np(posix_spawnattr_t * __restrict attr, mach_port_t portarray[], uint32_t count) __API_AVAILABLE(macos(10.15), ios(13.0), tvos(13.0), watchos(6.0)); +int +posix_spawnattr_set_ptrauth_task_port_np(posix_spawnattr_t * __restrict attr, + mach_port_t port) __API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)); + #define POSIX_SPAWN_MACPOLICYINFO_WITHSIZE 1 int posix_spawnattr_getmacpolicyinfo_np(const posix_spawnattr_t * __restrict, const char *, void **, size_t *) __API_AVAILABLE(macos(10.9), ios(7.0)); int posix_spawnattr_setmacpolicyinfo_np(posix_spawnattr_t * __restrict, const char *, void *, size_t) __API_AVAILABLE(macos(10.9), ios(7.0)); @@ -84,6 +90,14 @@ int posix_spawnattr_set_gid_np(const posix_spawnattr_t * __restrict, gid_t) int posix_spawnattr_set_groups_np(const posix_spawnattr_t * __restrict, int, gid_t * __restrict, uid_t) __API_AVAILABLE(macos(10.15), ios(13.0), tvos(13.0), watchos(6.0)); int posix_spawnattr_set_login_np(const posix_spawnattr_t * __restrict, const char * __restrict) __API_AVAILABLE(macos(10.15), ios(13.0), tvos(13.0), watchos(6.0)); +int posix_spawnattr_set_subsystem_root_path_np(posix_spawnattr_t *attr, char *path); __API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), watchos(7.0)); + +int posix_spawnattr_set_platform_np(posix_spawnattr_t *attr, int platform, uint32_t flags); __API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), watchos(7.0)); + +int posix_spawnattr_disable_ptr_auth_a_keys_np(posix_spawnattr_t *attr, uint32_t flags); __API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0), watchos(7.0)); + int posix_spawn_file_actions_add_fileportdup2_np(posix_spawn_file_actions_t * __restrict, mach_port_t, int) __API_AVAILABLE(macos(10.15), ios(13.0), tvos(13.0), watchos(6.0)); +__END_DECLS + #endif /* !defined _SPAWN_PRIVATE_H_*/ diff --git a/libsyscall/wrappers/stackshot.c b/libsyscall/wrappers/stackshot.c index d819d3470..c1e11905d 100644 --- a/libsyscall/wrappers/stackshot.c +++ b/libsyscall/wrappers/stackshot.c @@ -53,6 +53,7 @@ stackshot_config_create(void) s_config->sc_delta_timestamp = 0; s_config->sc_buffer = 0; s_config->sc_size = 0; + s_config->sc_pagetable_mask = 0; return s_config; } @@ -91,7 +92,7 @@ stackshot_config_set_pid(stackshot_config_t *stackshot_config, int pid) * 0 on success */ int -stackshot_config_set_flags(stackshot_config_t *stackshot_config, uint32_t flags) +stackshot_config_set_flags(stackshot_config_t *stackshot_config, uint64_t flags) { stackshot_config_t *s_config; @@ -236,6 +237,35 @@ stackshot_config_set_delta_timestamp(stackshot_config_t *stackshot_config, uint6 return 0; } +/* + * stackshot_config_set_pagetable_mask: set the level mask for pagetable dumping + * + * Each bit of the mask corresponds to a level in the paging structure. Bit 0 + * corresponds to Level 0, bit 1 to level 1, and so on. It is undefined what + * happens when a bit is set that's higher than the current maximum level of + * pagetable structures. + * + * When using this setter, you must also pass STACKSHOT_PAGE_TABLES as a flag + * before invoking stackshot, otherwise this setter is a no-operation. + * + * Inputs: stackshot_config - a pointer to a stackshot_config_t + * level_mask - the pagetable level mask, as described above + * + * Outputs: -1 if the passed stackshot config is NULL or there is existing stackshot buffer set. + * 0 on success + */ +int +stackshot_config_set_pagetable_mask(stackshot_config_t *stackshot_config, uint32_t pagetable_mask) +{ + if (stackshot_config == NULL || (void *)stackshot_config->sc_buffer != NULL) { + return -1; + } + + stackshot_config->sc_pagetable_mask = pagetable_mask; + + return 0; +} + /* * stackshot_config_dealloc_buffer: dealloc the stackshot buffer and reset the size so that a diff --git a/libsyscall/wrappers/string/strings.h b/libsyscall/wrappers/string/strings.h index f2167a542..e2fad0b55 100644 --- a/libsyscall/wrappers/string/strings.h +++ b/libsyscall/wrappers/string/strings.h @@ -64,13 +64,17 @@ void *memmove(void *, const void *, size_t); void *memset(void *, int, size_t); int strcmp(const char *, const char *); +int strncmp(const char *, const char *, size_t); char *strcpy(char *, const char *); size_t strlen(const char *); +size_t strnlen(const char *, size_t); size_t strlcpy(char *, const char *, size_t); +size_t strlcat(char *, const char *, size_t); char *strsep(char **, const char *); void bcopy(const void *, void *, size_t); void bzero(void *, size_t); +void __bzero(void *, size_t); char *index(const char *, int); char *strchr(const char *, int); @@ -80,6 +84,7 @@ void *_libkernel_memmove(void *, const void *, size_t); void *_libkernel_memset(void *, int, size_t); int _libkernel_strcmp(const char *, const char *); char *_libkernel_strcpy(char *, const char *); +size_t _libkernel_strnlen(const char *, size_t); size_t _libkernel_strlen(const char *); size_t _libkernel_strlcpy(char *, const char *, size_t); void _libkernel_bzero(void *, size_t); diff --git a/libsyscall/wrappers/system-version-compat-support.c b/libsyscall/wrappers/system-version-compat-support.c new file mode 100644 index 000000000..7d7dba1d8 --- /dev/null +++ b/libsyscall/wrappers/system-version-compat-support.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#include + +#if TARGET_OS_OSX && !defined(__i386__) + +/* + * Support for the open compatibilty shim for macOS. These NULL + * function pointers need to be built into libsyscall_static. They + * are hooked up to the actual functions from libsyscall_dynamic + * if/when they are used (generally only for older binaries where we + * need to shim the version information). + */ + +#include "system-version-compat-support.h" +#include +#include +#include + +__attribute__((visibility("hidden"))) +bool (*system_version_compat_check_path_suffix)(const char *orig_path) = NULL; +system_version_compat_mode_t system_version_compat_mode = SYSTEM_VERSION_COMPAT_MODE_DISABLED; + +__attribute__((visibility("hidden"))) +int (*system_version_compat_open_shim)(int opened_fd, int openat_fd, const char *orig_path, int oflag, mode_t mode, + int (*close_syscall)(int), int (*open_syscall)(const char *, int, mode_t), + int (*openat_syscall)(int, const char *, int, mode_t), + int (*fcntl_syscall)(int, int, long)) = NULL; +#endif /* TARGET_OS_OSX && && !defined(__i386__) */ diff --git a/libsyscall/wrappers/system-version-compat-support.h b/libsyscall/wrappers/system-version-compat-support.h new file mode 100644 index 000000000..8d05ffe87 --- /dev/null +++ b/libsyscall/wrappers/system-version-compat-support.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#ifndef __SYSTEM_VERSION_COMPAT_SUPPORT_H +#define __SYSTEM_VERSION_COMPAT_SUPPORT_H + +typedef enum system_version_compat_mode { + SYSTEM_VERSION_COMPAT_MODE_DISABLED = 0, + SYSTEM_VERSION_COMPAT_MODE_MACOSX = 1, + SYSTEM_VERSION_COMPAT_MODE_IOS = 2, +} system_version_compat_mode_t; + +#endif /* __SYSTEM_VERSION_COMPAT_SUPPORT_H */ diff --git a/libsyscall/wrappers/system-version-compat.c b/libsyscall/wrappers/system-version-compat.c new file mode 100644 index 000000000..c9a3321ac --- /dev/null +++ b/libsyscall/wrappers/system-version-compat.c @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +#if TARGET_OS_OSX && !defined(__i386__) +/* + * Support for shimming calls to open() to the SystemVersion plist on macOS for older + * binaries. This code is only built into libsyscall_dynamic. + */ + +#include +#include +#include +#include +#include +#include + +#include "system-version-compat-support.h" + +#define PLAT_PREFIX_IOS "iOS" +#define PLAT_PREFIX_MACOS "" + +#define COMPAT_SUFFIX_MACOS "Compat" +#define COMPAT_SUFFIX_IOS "" + +#define SYSTEM_VERSION_PLIST_FILENAME "SystemVersion.plist" +#define SYSTEM_VERSION_PLIST_PATH ("/System/Library/CoreServices/" SYSTEM_VERSION_PLIST_FILENAME) + +#define SYSTEM_VERSION_COMPAT_PLIST_FILENAME(platform_prefix, compat_suffix) (platform_prefix "SystemVersion" compat_suffix ".plist") + +#define SYSTEM_VERSION_PLIST_FILENAMELEN strlen(SYSTEM_VERSION_PLIST_FILENAME) + +#define SYSTEM_VERSION_COMPAT_PLIST_FILENAMELEN(platform_prefix, compat_suffix) strlen(SYSTEM_VERSION_COMPAT_PLIST_FILENAME(platform_prefix, compat_suffix)) + +#define SYSTEM_VERSION_PLIST_PATHLEN strlen(SYSTEM_VERSION_PLIST_PATH) + +extern system_version_compat_mode_t system_version_compat_mode; + +/* + * This routine determines whether the path specified matches the path of the SystemVersion plist file + * we are shimming accesses to. If the file name suffix matches, it's expected we'll call into the + * version_compat_open_shim() routine below which will do a full comparison on the expanded path. + * + * Parameters: orig_path The path suffix that was provided to the open{at} call. + * + * Returns: true if the path suffix matches the SystemVersion plist path we're shimming + * false otherwise + */ +__attribute__((visibility("hidden"))) +bool +_system_version_compat_check_path_suffix(const char *orig_path) +{ + size_t path_str_len = strnlen(orig_path, MAXPATHLEN); + /* + * If the length of the filename we're opening is shorter than + * SYSTEM_VERSION_PLIST_FILENAME, bail. + */ + if (path_str_len < SYSTEM_VERSION_PLIST_FILENAMELEN) { + return false; + } + + /* If the path we're accessing doesn't end in SYSTEM_VERSION_PLIST_FILENAME, bail. */ + if (strncmp(&orig_path[path_str_len - SYSTEM_VERSION_PLIST_FILENAMELEN], SYSTEM_VERSION_PLIST_FILENAME, + SYSTEM_VERSION_PLIST_FILENAMELEN) != 0) { + return false; + } + + /* If modifying the path specified would exceed MAXPATHLEN, bail */ + if (path_str_len == MAXPATHLEN) { + return false; + } + + size_t compat_len = (system_version_compat_mode == SYSTEM_VERSION_COMPAT_MODE_IOS) ? SYSTEM_VERSION_COMPAT_PLIST_FILENAMELEN(PLAT_PREFIX_IOS, COMPAT_SUFFIX_IOS) : SYSTEM_VERSION_COMPAT_PLIST_FILENAMELEN(PLAT_PREFIX_MACOS, COMPAT_SUFFIX_MACOS); + if ((compat_len - SYSTEM_VERSION_PLIST_FILENAMELEN) > (MAXPATHLEN - path_str_len - 1)) { + return false; + } + + /* Indicate that we should */ + return true; +} + +/* + * This routine determines whether we are trying to open the SystemVersion plist at SYSTEM_VERSION_PLIST_PATH. + * It's only used on targets that have the compatibility shim enabled (mainly older binaries). + * + * Note that this routine should * ABSOLUTELY NOT * be used as a general shim for accesses at all paths. We replace + * what the developer generally expected to be one system call with multiple additional system calls. We're ok + * with doing this here because we only do it for calls to open against files that match this very specific pattern + * (named SystemVersion.plist), but doing so for all calls to open could result in various issues. Specifically it's + * difficult to ensure the same cancellation semantics (around EINTR etc) that developers generally expect when replacing + * a single system call with multiple. + * + * This routine should return with the same semantics as the general open system calls that it is shimming - specifically + * it should leave errno and the return value matching what developers expect. + * + * It's expected that _version_compat_check_path_suffix() above was called prior to this call and returned true. + * + * We take the close, open and fcntl syscalls as parameters to make sure the variant we call matches the original call + * to open{at}. + * + * Parameters: opened_fd The file descriptor that was opened in the original open{at} call + * openat_fd The file descriptor passed to the original openat call (only used when use_openat is true) + * orig_path The original path suffix passed to open{at} + * oflag The original oflag passed to open{at} + * mode The original mode passed to open{at} + * close_syscall The appropriate syscall to use for closing file descriptors + * open_syscall The syscall that should be used for a new call to open. + * fctnl_syscall The appopriate syscall to use for fcntl. + * + * Returns: The original file descriptor if the open{at} access wasn't to SYSTEM_VERSION_PLIST_PATH + * A new file descriptor (with the original closed) if the expanded path matches SYSTEM_VERSION_PLIST_PATH + * The original file descriptor if the full path suffix does not match SYSTEM_VERSION_PLIST_PATH + * -1 (with errno set to EINTR) if the new open or fcntl calls received EINTR (with all new fds closed) + */ +__attribute__((visibility("hidden"))) +int +_system_version_compat_open_shim(int opened_fd, int openat_fd, const char *orig_path, int oflag, mode_t mode, + int (*close_syscall)(int), int (*open_syscall)(const char *, int, mode_t), + int (*openat_syscall)(int, const char *, int, mode_t), + int (*fcntl_syscall)(int, int, long)) +{ + /* stash the errno from the original open{at} call */ + int stashed_errno = errno; + char new_path[MAXPATHLEN]; + size_t path_str_len = strnlen(orig_path, sizeof(new_path)); + + /* Resolve the full path of the file we've opened */ + if (fcntl_syscall(opened_fd, F_GETPATH, new_path)) { + if (errno == EINTR) { + /* If we got EINTR, we close the file that was opened and return -1 & EINTR */ + close_syscall(opened_fd); + errno = EINTR; + return -1; + } else { + /* otherwise we return the original file descriptor that was requested */ + errno = stashed_errno; + return opened_fd; + } + } + + /* Check to see whether the path matches SYSTEM_VERSION_PLIST_PATH */ + size_t newpathlen = strnlen(new_path, MAXPATHLEN); + if (newpathlen != SYSTEM_VERSION_PLIST_PATHLEN) { + errno = stashed_errno; + return opened_fd; + } + + if (strncmp(new_path, SYSTEM_VERSION_PLIST_PATH, SYSTEM_VERSION_PLIST_PATHLEN) != 0) { + errno = stashed_errno; + return opened_fd; + } + + new_path[0] = '\0'; + + /* + * It looks like we're trying to access the SystemVersion plist. Let's try to open + * the compatibility plist and return that instead if it exists. + */ + size_t prefix_str_len = path_str_len - SYSTEM_VERSION_PLIST_FILENAMELEN; + strlcpy(new_path, orig_path, (prefix_str_len + 1)); + if (system_version_compat_mode == SYSTEM_VERSION_COMPAT_MODE_IOS) { + strlcat(new_path, SYSTEM_VERSION_COMPAT_PLIST_FILENAME(PLAT_PREFIX_IOS, COMPAT_SUFFIX_IOS), MAXPATHLEN); + } else { + strlcat(new_path, SYSTEM_VERSION_COMPAT_PLIST_FILENAME(PLAT_PREFIX_MACOS, COMPAT_SUFFIX_MACOS), MAXPATHLEN); + } + + int new_fd = -1; + if (openat_syscall != NULL) { + new_fd = openat_syscall(openat_fd, new_path, oflag, mode); + } else { + new_fd = open_syscall(new_path, oflag, mode); + } + if ((new_fd == -1) && (errno == ENOENT)) { + /* The file doesn't exist, so return the original fd and errno. */ + errno = stashed_errno; + return opened_fd; + } + + /* + * Otherwise we close the first file we opened and populate errno + * with errno from the call to open{at}. (Note this covers the EINTR + * case and other failures). + */ + stashed_errno = errno; + close_syscall(opened_fd); + errno = stashed_errno; + return new_fd; +} + +#endif /* TARGET_OS_OSX && !defined(__i386__) */ diff --git a/libsyscall/wrappers/unix03/mmap.c b/libsyscall/wrappers/unix03/mmap.c index 203301e3f..c2fd55f78 100644 --- a/libsyscall/wrappers/unix03/mmap.c +++ b/libsyscall/wrappers/unix03/mmap.c @@ -46,19 +46,21 @@ mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off) * Preemptory failures: * * o off is not a multiple of the page size + * [ This is enforced by the kernel with MAP_UNIX03 ] * o flags does not contain either MAP_PRIVATE or MAP_SHARED * o len is zero + * + * Now enforced by the kernel when the MAP_UNIX03 flag is provided. */ extern void cerror_nocancel(int); - if ((off & PAGE_MASK) || - (((flags & MAP_PRIVATE) != MAP_PRIVATE) && + if ((((flags & MAP_PRIVATE) != MAP_PRIVATE) && ((flags & MAP_SHARED) != MAP_SHARED)) || (len == 0)) { cerror_nocancel(EINVAL); return MAP_FAILED; } - void *ptr = __mmap(addr, len, prot, flags, fildes, off); + void *ptr = __mmap(addr, len, prot, flags | MAP_UNIX03, fildes, off); if (__syscall_logger) { int stackLoggingFlags = stack_logging_type_vm_allocate; diff --git a/libsyscall/wrappers/varargs_wrappers.s b/libsyscall/wrappers/varargs_wrappers.s index fae37a483..cf24010b6 100644 --- a/libsyscall/wrappers/varargs_wrappers.s +++ b/libsyscall/wrappers/varargs_wrappers.s @@ -54,70 +54,6 @@ MI_ENTRY_POINT(_sem_open) POP_FRAME ARM64_STACK_EPILOG -/* - * int open(const char *name, int oflag, ...); - * int __open(const char *name, int oflag, int mode, int value); - */ -MI_ENTRY_POINT(_open) - ARM64_STACK_PROLOG - PUSH_FRAME -#if __LP64__ - ldr x2, [fp, #16] -#else - ldr w2, [fp, #16] -#endif - MI_CALL_EXTERNAL(___open) - POP_FRAME - ARM64_STACK_EPILOG - -/* - * int open_nocancel(const char *name, int oflag, ...); - * int __open_nocancel(const char *name, int oflag, int mode); - */ -MI_ENTRY_POINT(_open$NOCANCEL) - ARM64_STACK_PROLOG - PUSH_FRAME -#if __LP64__ - ldr x2, [fp, #16] -#else - ldr w2, [fp, #16] -#endif - MI_CALL_EXTERNAL(___open_nocancel) - POP_FRAME - ARM64_STACK_EPILOG - -/* - * int openat(int fd,const char *name, int oflag, ...); - * int __openat(int fd, const char *name, int oflag, int mode, int value); - */ -MI_ENTRY_POINT(_openat) - ARM64_STACK_PROLOG - PUSH_FRAME -#if __LP64__ - ldr x3, [fp, #16] -#else - ldr w3, [fp, #16] -#endif - MI_CALL_EXTERNAL(___openat) - POP_FRAME - ARM64_STACK_EPILOG - -/* - * int openat_nocancel(int fd, const char *name, int oflag, ...); - * int __openat_nocancel(int fd, const char *name, int oflag, int mode); - */ -MI_ENTRY_POINT(_openat$NOCANCEL) - ARM64_STACK_PROLOG - PUSH_FRAME -#if __LP64__ - ldr x3, [fp, #16] -#else - ldr w3, [fp, #16] -#endif - MI_CALL_EXTERNAL(___openat_nocancel) - POP_FRAME - ARM64_STACK_EPILOG - /* * int shm_open(const char *, int, ...); * int __shm_open(const char*, int oflag, int mode); diff --git a/libsyscall/wrappers/work_interval.c b/libsyscall/wrappers/work_interval.c index 643c68173..89d2d0cce 100644 --- a/libsyscall/wrappers/work_interval.c +++ b/libsyscall/wrappers/work_interval.c @@ -78,6 +78,26 @@ work_interval_create(work_interval_t *interval_handle, uint32_t create_flags) return 0; } +int +work_interval_get_flags_from_port(mach_port_t port, uint32_t *flags) +{ + if (!MACH_PORT_VALID(port) || flags == NULL) { + errno = EINVAL; + return -1; + } + + struct work_interval_create_params create_params = { 0 }; + + int ret = __work_interval_ctl(WORK_INTERVAL_OPERATION_GET_FLAGS, port, + &create_params, sizeof(create_params)); + if (ret == -1) { + return ret; + } + + *flags = create_params.wicp_create_flags; + return 0; +} + int work_interval_notify(work_interval_t interval_handle, uint64_t start, uint64_t finish, uint64_t deadline, uint64_t next_start, @@ -98,6 +118,10 @@ work_interval_notify(work_interval_t interval_handle, uint64_t start, return -1; } + if (interval_handle->create_flags & WORK_INTERVAL_FLAG_IGNORED) { + return 0; + } + notification.create_flags = interval_handle->create_flags; work_interval_id = interval_handle->work_interval_id; diff --git a/libsyscall/xcodescripts/create-syscalls.pl b/libsyscall/xcodescripts/create-syscalls.pl index 6bf15db90..d4e7c9452 100755 --- a/libsyscall/xcodescripts/create-syscalls.pl +++ b/libsyscall/xcodescripts/create-syscalls.pl @@ -133,7 +133,7 @@ my @Cancelable = qw/ link linkat lseek lstat msgrcv msgsnd msync open openat - pathconf peeloff poll posix_spawn pread pselect pwrite + pathconf peeloff poll posix_spawn pread preadv pselect pwrite pwritev read readv recvfrom recvmsg rename renameat rename_ext __semwait_signal __sigwait @@ -165,17 +165,24 @@ sub readMaster { die "$MyName: $file: $!\n" unless defined($f); my $line = 0; my $skip = 0; + my $allow_missing = 0; while(<$f>) { $line++; if(/^#\s*endif/) { $skip = 0; + $allow_missing = 0; next; } if(/^#\s*else/) { $skip = -$skip; + $allow_missing = 0; next; } chomp; + if(/^#\s*ifndef\s+(RC_HIDE\S+)$/) { + $skip = 1; + $allow_missing = 1; + } if(/^#\s*if\s+(\S+)$/) { $skip = ($1 eq 'COMPAT_GETFSSTAT') ? -1 : 1; next; @@ -229,6 +236,7 @@ sub readMaster { aliases => {}, mismatch_args => \%mismatch_args, # Arguments that might need to be zero/sign-extended except => [], + allow_missing => $allow_missing, }; } } @@ -324,7 +332,7 @@ sub readAliases { ########################################################################## sub writeStubForSymbol { my ($f, $symbol) = @_; - + my @conditions; my $has_arm64 = 0; for my $subarch (@Architectures) { @@ -343,6 +351,9 @@ sub writeStubForSymbol { print $f "#define __SYSCALL_32BIT_ARG_BYTES $$symbol{bytes}\n"; print $f "#include \"SYS.h\"\n\n"; + if ($$symbol{allow_missing}) { + printf $f "#ifdef SYS_%s\n", $$symbol{syscall}; + } if (scalar(@conditions)) { printf $f "#ifndef SYS_%s\n", $$symbol{syscall}; @@ -379,6 +390,10 @@ sub writeStubForSymbol { # override it we need to honour that. } + if ($$symbol{allow_missing}) { + printf $f "#endif\n"; + } + if($has_arm64) { printf $f "#endif\n\n"; } @@ -386,7 +401,11 @@ sub writeStubForSymbol { sub writeAliasesForSymbol { my ($f, $symbol) = @_; - + + if ($$symbol{allow_missing}) { + printf $f "#ifdef SYS_%s\n", $$symbol{syscall}; + } + foreach my $subarch (@Architectures) { (my $arch = $subarch) =~ s/arm(v.*)/arm/; $arch =~ s/x86_64(.*)/x86_64/; @@ -403,6 +422,9 @@ sub writeAliasesForSymbol { } printf $f "#endif\n\n"; } + if ($$symbol{allow_missing}) { + printf $f "#endif\n"; + } } usage() unless scalar(@ARGV) == 5; diff --git a/libsyscall/xcodescripts/mach_install_mig.sh b/libsyscall/xcodescripts/mach_install_mig.sh index 0761c11fe..f51db78e4 100755 --- a/libsyscall/xcodescripts/mach_install_mig.sh +++ b/libsyscall/xcodescripts/mach_install_mig.sh @@ -46,27 +46,19 @@ MIG_PRIVATE_DEFS_INCFLAGS="-I${SDKROOT}/${SDK_INSTALL_HEADERS_ROOT}/System/Libra SRC="$SRCROOT/mach" FILTER_MIG="$SRCROOT/xcodescripts/filter_mig.awk" -# from old Libsystem makefiles -MACHINE_ARCH=`echo $ARCHS | cut -d' ' -f 1` -if [[ ( "$MACHINE_ARCH" =~ ^"arm64" || "$MACHINE_ARCH" =~ ^"x86_64" ) && `echo $ARCHS | wc -w` -gt 1 ]] -then - # MACHINE_ARCH needs to be a 32-bit arch to generate vm_map_internal.h correctly. - MACHINE_ARCH=`echo $ARCHS | cut -d' ' -f 2` - if [[ ( "$MACHINE_ARCH" =~ ^"arm64" || "$MACHINE_ARCH" =~ ^"x86_64" ) && `echo $ARCHS | wc -w` -gt 2 ]] - then - # MACHINE_ARCH needs to be a 32-bit arch to generate vm_map_internal.h correctly. - MACHINE_ARCH=`echo $ARCHS | cut -d' ' -f 3` - fi -fi # MACHINE_ARCH *really* needs to be a 32-bit arch to generate vm_map_internal.h correctly, even if there are no 32-bit targets. -if [[ ( "$MACHINE_ARCH" =~ ^"arm64" ) ]] -then - MACHINE_ARCH="armv7" -fi -if [[ ( "$MACHINE_ARCH" =~ ^"x86_64" ) ]] -then - MACHINE_ARCH="i386" -fi +# thread_state_t *really* needs to pick up arm64 over intel because it has a larger struct type. +case "$ARCHS" in +*arm64*) + MACHINE_ARCH=armv7 + ;; +*x86_64*) + MACHINE_ARCH=i386 + ;; +*) + MACHINE_ARCH=`echo $ARCHS | cut -d' ' -f 1` + ;; +esac ASROOT="" if [ `whoami` = "root" ]; then @@ -80,6 +72,7 @@ MIGS="clock.defs host_priv.defs host_security.defs lock_set.defs + mach_eventlink.defs mach_host.defs mach_port.defs mach_voucher.defs diff --git a/makedefs/MakeInc.cmd b/makedefs/MakeInc.cmd index 0c8420d13..2bbd75c38 100644 --- a/makedefs/MakeInc.cmd +++ b/makedefs/MakeInc.cmd @@ -1,6 +1,6 @@ # -*- mode: makefile;-*- # -# Copyright (C) 1999-2016 Apple Inc. All rights reserved. +# Copyright (C) 1999-2020 Apple Inc. All rights reserved. # # MakeInc.cmd contains command paths for use during # the build, as well as make fragments and text @@ -11,9 +11,9 @@ # Commands for the build environment # -## -# Verbosity -## +# +# Build Logging and Verbosity +# ifeq ($(RC_XBS),YES) VERBOSE = YES @@ -23,31 +23,92 @@ endif ECHO = echo -LOG = echo -makelog = $(info $1) ERR = $(ECHO) > /dev/stderr +PRINTF = printf QUIET ?= 0 ifneq ($(QUIET),0) - LOG = : - makelog = + PRINTF = printf > /dev/null ifeq ($(VERBOSE),YES) override VERBOSE = NO endif endif +# Helper functions for logging operations. +LOG_PFX_LEN = 15 +LOG_PFX_LEN_ADJ = $(LOG_PFX_LEN) +LOG = $(PRINTF) "$2%$4s$(Color0) $3%s$(Color0)\n" "$1" + +CONCISE ?= 0 +ifneq ($(CONCISE),0) + # Concise logging puts all logs on the same line (CSI K to clear and + # carriage return). + LOG = $(PRINTF) "$2%$4s$(Color0) $3%s$(Color0)\033[K\r" "$1" +endif + +_LOG_COMP = $(call LOG,$1,$(ColorC),$(ColorF),$(LOG_PFX_LEN_ADJ)) +_LOG_HOST = $(call LOG,$1,$(ColorH),$(ColorF),$(LOG_PFX_LEN)) +_LOG_HOST_LINK = $(call LOG,$1,$(ColorH),$(ColorLF),$(LOG_PFX_LEN)) + +# Special operations. +LOG_LDFILELIST = $(call LOG,LDFILELIST,$(ColorL),$(ColorLF),$(LOG_PFX_LEN_ADJ)) +LOG_MIG = $(call LOG,MIG,$(ColorM),$(ColorF),$(LOG_PFX_LEN_ADJ)) +LOG_LD = $(call LOG,LD,$(ColorL),$(ColorF),$(LOG_PFX_LEN_ADJ)) + +# Compiling/machine-specific operations. +LOG_CC = $(call _LOG_COMP,CC) +LOG_CXX = $(call _LOG_COMP,C++) +LOG_AS = $(call _LOG_COMP,AS) +LOG_LTO = $(call _LOG_COMP,LTO) +LOG_SYMBOLSET = $(call _LOG_COMP,SYMSET) +LOG_SYMBOLSETPLIST = $(call _LOG_COMP,SYMSETPLIST) + +# Host-side operations. +LOG_IIG = $(call _LOG_HOST,IIG) +LOG_HOST_CC = $(call _LOG_HOST,CC) +LOG_HOST_LD = $(call _LOG_HOST,LD) +LOG_HOST_CODESIGN = $(call _LOG_HOST,CODESIGN) +LOG_HOST_BISON = $(call _LOG_HOST,BISON) +LOG_HOST_FLEX = $(call _LOG_HOST,FLEX) +LOG_INSTALL = $(call _LOG_HOST,INSTALL) +LOG_INSTALLSYM = $(call _LOG_HOST,INSTALLSYM) +LOG_INSTALLHDR = $(call _LOG_HOST,INSTALLHDR) +LOG_INSTALLMACROS = $(call _LOG_HOST,INSTALLMACROS) +LOG_INSTALLPY = $(call _LOG_HOST,INSTALLPY) +LOG_MAN = $(call _LOG_HOST,MAN) +LOG_MANLINK = $(call _LOG_HOST,MANLINK) +LOG_ALIAS = $(call _LOG_HOST,ALIAS) +LOG_STRIP = $(call _LOG_HOST,STRIP) +LOG_DSYMUTIL = $(call _LOG_HOST,DSYMUTIL) +LOG_LIBTOOL = $(call _LOG_HOST,LIBTOOL) + +# Host-side linking operations. +LOG_GENASSYM = $(call _LOG_HOST_LINK,GENASSYM) +LOG_GENERATE= $(call _LOG_HOST_LINK,GENERATE) +LOG_CTFCONVERT = $(call _LOG_HOST_LINK,CTFCONVERT) +LOG_CTFMERGE = $(call _LOG_HOST_LINK,CTFMERGE) +LOG_CTFINSERT = $(call _LOG_HOST_LINK,CTFINSERT) +LOG_DSYMUTIL = $(call _LOG_HOST_LINK,DSYMUTIL) +LOG_SUPPORTED_KPI = $(call _LOG_HOST_LINK,SUPPORTED_KPI) + ifeq ($(VERBOSE),YES) _v = _vstdout = + _vstderr = XCRUN = /usr/bin/xcrun -verbose else _v = @ _vstdout = > /dev/null + _vstderr = 2&> /dev/null XCRUN = /usr/bin/xcrun endif VERBOSE_GENERATED_MAKE_FRAGMENTS = NO +# +# Defaults +# + SDKROOT ?= macosx HOST_SDKROOT ?= macosx @@ -152,12 +213,7 @@ SUPPORTED_SIMULATOR_PLATFORMS := iPhoneSimulator iPhoneNanoSimulator tvSimulator SUPPORTED_PLATFORMS := MacOSX DriverKit $(SUPPORTED_SIMULATOR_PLATFORMS) $(SUPPORTED_EMBEDDED_PLATFORMS) # Platform-specific tools -ifneq ($(filter $(SUPPORTED_EMBEDDED_PLATFORMS),$(PLATFORM)),) -ifeq ($(EMBEDDED_DEVICE_MAP),) - export EMBEDDED_DEVICE_MAP := $(shell $(XCRUN) -sdk $(SDKROOT) -find embedded_device_map) -endif EDM_DBPATH ?= $(PLATFORMPATH)/usr/local/standalone/firmware/device_map.db -endif # Scripts or tools we build ourselves # diff --git a/makedefs/MakeInc.def b/makedefs/MakeInc.def index a1030c34a..d53f3e1a7 100644 --- a/makedefs/MakeInc.def +++ b/makedefs/MakeInc.def @@ -1,6 +1,6 @@ # -*- mode: makefile;-*- # -# Copyright (C) 1999-2019 Apple Inc. All rights reserved. +# Copyright (C) 1999-2020 Apple Inc. All rights reserved. # # MakeInc.def contains global definitions for building, # linking, and installing files. @@ -24,9 +24,17 @@ SUPPORTED_KERNEL_CONFIGS = RELEASE DEVELOPMENT DEBUG PROFILE KASAN SUPPORTED_X86_64_MACHINE_CONFIGS = NONE SUPPORTED_X86_64H_MACHINE_CONFIGS = NONE -SUPPORTED_ARM_MACHINE_CONFIGS = S7002 T8002 T8004 +ifneq ($(findstring _Sim,$(RC_ProjectName)),) +SUPPORTED_ARM_MACHINE_CONFIGS = NONE +SUPPORTED_ARM64_MACHINE_CONFIGS = NONE +else ifneq ($(findstring _host,$(RC_ProjectName)),) +SUPPORTED_ARM_MACHINE_CONFIGS = NONE +SUPPORTED_ARM64_MACHINE_CONFIGS = NONE +else +SUPPORTED_ARM_MACHINE_CONFIGS = T8002 T8004 SUPPORTED_ARM64_MACHINE_CONFIGS = T7000 T7001 S8000 S8001 T8010 T8011 BCM2837 +endif # # Setup up *_LC variables during recursive invocations @@ -53,7 +61,6 @@ COMPONENT_IMPORT_LIST = $(filter-out $(COMPONENT),$(COMPONENT_LIST)) MACHINE_FLAGS_ARM64_T7000 = -DARM64_BOARD_CONFIG_T7000 MACHINE_FLAGS_ARM64_T7001 = -DARM64_BOARD_CONFIG_T7001 -MACHINE_FLAGS_ARM_S7002 = -DARM_BOARD_CONFIG_S7002 MACHINE_FLAGS_ARM64_S8000 = -DARM64_BOARD_CONFIG_S8000 MACHINE_FLAGS_ARM64_S8001 = -DARM64_BOARD_CONFIG_S8001 MACHINE_FLAGS_ARM_T8002 = -DARM_BOARD_CONFIG_T8002 @@ -97,20 +104,31 @@ endif DEPLOYMENT_TARGET_DEFINES = -DPLATFORM_$(PLATFORM) +ifneq ($(RC_ENABLE_PRODUCT_INFO_FILTER),) +SEED_DEFINES += -DRC_ENABLE_XNU_PRODUCT_INFO_FILTER +else +SEED_DEFINES += -URC_ENABLE_XNU_PRODUCT_INFO_FILTER +endif + # # Standard defines list # DEFINES = -DAPPLE -DKERNEL -DKERNEL_PRIVATE -DXNU_KERNEL_PRIVATE \ - -DPRIVATE -D__MACHO__=1 -Dvolatile=__volatile $(CONFIG_DEFINES) \ - $(SEED_DEFINES) + -DPRIVATE -D__MACHO__=1 -Dvolatile=__volatile -DXNU_KERN_EVENT_DATA_IS_VLA \ + $(CONFIG_DEFINES) $(SEED_DEFINES) + +# Enable caching with `make CCACHE=ccache` +# This intentionally does not override $(CC) because that will confuse +# utilities like mig. +CCACHE ?= # # Compiler command # -KCC = $(CC) -KC++ = $(CXX) +KCC = $(CCACHE) $(CC) +KC++ = $(CCACHE) $(CXX) -GENASSYM_KCC = $(CC) +GENASSYM_KCC = $(CCACHE) $(CC) # # Compiler warning flags @@ -131,9 +149,6 @@ WARNFLAGS_STD := \ -Weverything \ -Wno-pedantic \ $(WERROR) \ - -Wno-implicit-int-conversion \ - -Wno-sign-conversion \ - -Wno-shorten-64-to-32 \ -Wno-bad-function-cast \ -Wno-c++-compat \ -Wno-c++98-compat \ @@ -159,9 +174,20 @@ WARNFLAGS_STD := \ -Wno-vla \ -Wno-zero-length-array +# When a new clang has new warnings disable them here until the kernel is fixed. WARNFLAGS_STD := $(WARNFLAGS_STD) \ -Wno-unknown-warning-option \ - -Wno-error=atomic-implicit-seq-cst + -Wno-anon-enum-enum-conversion \ + -Wno-error=enum-enum-conversion \ + -Wno-error=c99-designator \ + -Wno-error=reorder-init-list + +# Hand-written sign conversion diagnostics are resolved, but the +# auto-generated ones need mig and iig to be updated to fix. Disable the +# diagnostic here until we've completed that: +WARNFLAGS_STD := $(WARNFLAGS_STD) \ + -Wno-sign-compare \ + -Wno-sign-conversion CWARNFLAGS_STD = \ $(WARNFLAGS_STD) @@ -201,40 +227,48 @@ ARCH_FLAGS_X86_64H = -arch x86_64h ifneq ($(filter ARM ARM64,$(CURRENT_ARCH_CONFIG)),) +ifneq ($(findstring _Sim,$(RC_ProjectName)),) +ARCH_FLAGS_ARM64 = -arch arm64e +else ifneq ($(findstring _host,$(RC_ProjectName)),) +ARCH_FLAGS_ARM64 = -arch arm64e +else + ifndef ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG + +ifneq ($(EMBEDDED_DEVICE_MAP),) export ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG := $(shell $(EMBEDDED_DEVICE_MAP) -db $(EDM_DBPATH) -query SELECT DISTINCT KernelMachOArchitecture FROM Targets WHERE KernelPlatform IS \"$(CURRENT_MACHINE_CONFIG_LC)\" LIMIT 1 || echo UNKNOWN ) +else +# Without embdedded device map, use a default arch string +export ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG := $(shell echo $(CURRENT_ARCH_CONFIG) | tr A-Z a-z) +endif endif # # This can have false negatives, and is used to avoid calling CTF when we'll build a static KC # ifndef WILL_BUILD_STATIC_KC +ifneq ($(EMBEDDED_DEVICE_MAP),) export WILL_BUILD_STATIC_KC := $(shell $(EMBEDDED_DEVICE_MAP) -db $(EDM_DBPATH) \ -query 'SELECT COUNT(*) != 0 FROM Targets WHERE KernelPlatform IS "$(CURRENT_MACHINE_CONFIG_LC)" \ AND (KernelMachOArchitecture LIKE "arm64e" OR ProductType LIKE "iphone10,%")') +else +export WILL_BUILD_STATIC_KC := 0 +endif endif BUILD_STATIC_LINK := 1 -endif - ARCH_FLAGS_ARM = -arch $(ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG) ARCH_FLAGS_ARM64 = -arch $(ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG) -# -# Clang static analyzer flags -# -ANALYZER = $(CC) -ANALYZERPP = $(CXX) -ANALYZERFLAGS = --analyze -D__clang_analyzer__ -ifneq ($(ANALYZE_FORMAT),text) -ANALYZERFLAGS += -Xanalyzer -analyzer-output=html -ANALYZERFLAGS += -o $(OBJROOT)/analyzer-html +endif + else -ANALYZERFLAGS += -Xanalyzer -analyzer-output=text +# non arm machine config string +ifndef ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG +export ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG := $(shell echo $(CURRENT_ARCH_CONFIG) | tr A-Z a-z) endif -ifneq ($(ANALYZE_VERBOSE),YES) -ANALYZERFLAGS += -Xclang -analyzer-disable-checker -Xclang deadcode.DeadStores + endif # @@ -284,8 +318,20 @@ CFLAGS_X86_64H = $(CFLAGS_X86_64) CFLAGS_ARM = -Darm -DARM -D__ARM__ -DPAGE_SIZE_FIXED \ -momit-leaf-frame-pointer -fno-strict-aliasing -D__API__=v4 -CFLAGS_ARM64 = -Darm64 -DARM64 -D__ARM64__ -DLP64 -DPAGE_SIZE_FIXED \ - -momit-leaf-frame-pointer -fno-strict-aliasing -D__API__=v4 -mkernel +LARGE_MEMORY_DEFINE=-UARM_LARGE_MEMORY +ARM64_PLKSEG_ADDR =0xfffffff004004000 +ARM64_LINK_ADDR =0xfffffff007004000 + +# Use ARM_LARGE_MEMORY config for all MacOSX targets. +ifneq ($(filter $(PLATFORM),MacOSX),) +LARGE_MEMORY_DEFINE=-DARM_LARGE_MEMORY=1 +ARM64_PLKSEG_ADDR =0xfffffe0004004000 +ARM64_LINK_ADDR =0xfffffe0007004000 +endif + + +CFLAGS_ARM64 = -Darm64 -DARM64 -D__ARM64__ -DLP64 -DPAGE_SIZE_FIXED -DVM_KERNEL_LINK_ADDRESS=$(ARM64_LINK_ADDR) \ + $(LARGE_MEMORY_DEFINE) -momit-leaf-frame-pointer -fno-strict-aliasing -D__API__=v4 -mkernel CFLAGS_RELEASEX86_64 = -O2 CFLAGS_DEVELOPMENTX86_64 = -O2 @@ -319,6 +365,9 @@ CFLAGS_PROFILEARM64 = -O2 SAN=0 +# KASan support +# + ifeq ($(CURRENT_KERNEL_CONFIG),KASAN) # KASan kernel config implicitly enables the KASan instrumentation. # Instrumentation for other sanitizers is enabled explicitly at build time. @@ -327,23 +376,30 @@ endif ifeq ($(KASAN),1) SAN=1 -BUILD_LTO = 0 -KASAN_SHIFT_ARM64=0xe000000000000000 +BUILD_LTO=0 + +# Shadow map scale size. With KASAN_SCALE=N, the shadow memory consumes 1/2^N of +# the virtual address space. +KASAN_SCALE=3 + +KASAN_OFFSET_ARM64=0xe000000000000000 # -# To calculate the kasan shift, subtract the lowest KVA to sanitize, shifted right by 3 bits, +# To calculate the kasan offset, subtract the lowest KVA to sanitize, shifted right by 3 bits, # from the base address of the kasan shadow area, (e.g. solve the following equation: -# SHIFT = {VA mapped by the first KASAN PML4 [Currently #494]} - (LOWEST_KVA >> 3) -# SHIFT = (0ULL - (512GiB * (512 - 494))) - (LOWEST_SAN_KVA >> 3) -# SHIFT = FFFFF70000000000 - ((0ULL - (512GiB * (512 - 496))) >> 3) [PML4 #496 is the first possible KVA] -# SHIFT = FFFFF70000000000 - (FFFFF80000000000 >> 3) -# SHIFT = DFFFF80000000000 +# OFFSET = {VA mapped by the first KASAN PML4 [Currently #494]} - (LOWEST_KVA >> 3) +# OFFSET = (0ULL - (512GiB * (512 - 494))) - (LOWEST_SAN_KVA >> 3) +# OFFSET = FFFFF70000000000 - ((0ULL - (512GiB * (512 - 496))) >> 3) [PML4 #496 is the first possible KVA] +# OFFSET = FFFFF70000000000 - (FFFFF80000000000 >> 3) +# OFFSET = DFFFF80000000000 # ). -KASAN_SHIFT_X86_64=0xdffff80000000000 -KASAN_SHIFT_X86_64H=$(KASAN_SHIFT_X86_64) -KASAN_SHIFT=$($(addsuffix $(CURRENT_ARCH_CONFIG),KASAN_SHIFT_)) -CFLAGS_GEN += -DKASAN=1 -DKASAN_SHIFT=$(KASAN_SHIFT) -fsanitize=address \ +KASAN_OFFSET_X86_64=0xdffff80000000000 +KASAN_OFFSET_X86_64H=$(KASAN_OFFSET_X86_64) +KASAN_OFFSET=$($(addsuffix $(CURRENT_ARCH_CONFIG),KASAN_OFFSET_)) +KASAN_BLACKLIST=$(OBJROOT)/san/kasan-blacklist-$(CURRENT_ARCH_CONFIG_LC) +CFLAGS_GEN += -DKASAN=1 -DKASAN_OFFSET=$(KASAN_OFFSET) -DKASAN_SCALE=$(KASAN_SCALE) -fsanitize=address \ -mllvm -asan-globals-live-support \ - -mllvm -asan-mapping-offset=$(KASAN_SHIFT) + -mllvm -asan-mapping-offset=$(KASAN_OFFSET) \ + -fsanitize-blacklist=$(KASAN_BLACKLIST) endif @@ -406,7 +462,7 @@ CXXFLAGS = $(CXXFLAGS_GEN) \ # # Assembler command # -AS = $(CC) +AS = $(CCACHE) $(CC) S_KCC = $(CC) # @@ -451,9 +507,11 @@ LDFLAGS_KERNEL_GEN = \ -fapple-kext \ -Wl,-e,__start \ -Wl,-sectalign,__TEXT,__text,0x1000 \ + -Wl,-sectalign,__DATA,__percpu,0x80 \ -Wl,-sectalign,__DATA,__common,0x1000 \ -Wl,-sectalign,__DATA,__bss,0x1000 \ -Wl,-sectcreate,__PRELINK_TEXT,__text,/dev/null \ + -Wl,-segprot,__PRELINK_TEXT,r-x,r-x \ -Wl,-sectcreate,__PRELINK_INFO,__info,/dev/null \ -Wl,-new_linker \ -Wl,-pagezero_size,0x0 \ @@ -501,7 +559,9 @@ LDFLAGS_KERNEL_RELEASEX86_64 = \ -Wl,-sectalign,__HIB,__llvm_prf_names,0x1000 \ -Wl,-sectalign,__HIB,__llvm_prf_data,0x1000 \ -Wl,-sectalign,__HIB,__textcoal_nt,0x1000 \ + -Wl,-sectalign,__HIB,__cstring,0x1000 \ -Wl,-rename_section,__DATA,__const,__DATA_CONST,__const \ + -Wl,-segprot,__DATA_CONST,r--,r-- \ -Wl,-no_zero_fill_sections \ $(LDFLAGS_NOSTRIP_FLAG) @@ -549,17 +609,17 @@ LDFLAGS_KERNEL_RELEASEARM = \ $(LDFLAGS_KERNEL_GENARM) \ $(LDFLAGS_KERNEL_STRIP_LTO) -LDFLAGS_KERNEL_EXPORTS_RELEASEARM = \ +LDFLAGS_KERNEL_ONLY_CONFIG_RELEASEARM = \ -Wl,-exported_symbols_list,$(TARGET)/all-kpi.exp LDFLAGS_KERNEL_DEVELOPMENTARM = \ $(LDFLAGS_KERNEL_GENARM) \ $(LDFLAGS_NOSTRIP_FLAG) -LDFLAGS_KERNEL_EXPORTS_DEVELOPMENTARM = +LDFLAGS_KERNEL_ONLY_CONFIG_DEVELOPMENTARM = LDFLAGS_KERNEL_DEBUGARM = $(LDFLAGS_KERNEL_DEVELOPMENTARM) -LDFLAGS_KERNEL_EXPORTS_DEBUGARM = $(LDFLAGS_KERNEL_EXPORTS_DEVELOPMENTARM) +LDFLAGS_KERNEL_ONLY_CONFIG_DEBUGARM = $(LDFLAGS_KERNEL_ONLY_CONFIG_DEVELOPMENTARM) # Offset image base by page to have iBoot load kernel TEXT correctly. # First page is used for various purposes : sleep token, reset vector. @@ -592,17 +652,28 @@ LDFLAGS_KERNEL_EXPORTS_DEBUGARM = $(LDFLAGS_KERNEL_EXPORTS_DEVELOPMENTARM) LDFLAGS_KERNEL_GENARM64 = \ -Wl,-pie \ -Wl,-static \ - -Wl,-segaddr,__PRELINK_TEXT,0xfffffff004004000 \ - -Wl,-image_base,0xfffffff007004000 \ + -Wl,-segaddr,__PRELINK_TEXT,$(ARM64_PLKSEG_ADDR) \ + -Wl,-image_base,$(ARM64_LINK_ADDR) \ + \ + -Wl,-rename_section,__HIB,__text,__TEXT_EXEC,__hib_text \ + \ + -Wl,-rename_section,__HIB,__const,__DATA_CONST,__hib_const \ + -Wl,-rename_section,__HIB,__cstring,__DATA_CONST,__hib_const \ + -Wl,-rename_section,__HIB,__literal8,__DATA_CONST,__hib_const \ + -Wl,-rename_section,__HIB,__literal16,__DATA_CONST,__hib_const \ + \ + -Wl,-rename_segment,__HIB,__HIBDATA \ + \ -Wl,-sectalign,__DATA,__const,0x4000 \ + -Wl,-sectalign,__DATA,__data,0x4000 \ -Wl,-rename_section,__DATA,__mod_init_func,__DATA_CONST,__mod_init_func \ -Wl,-rename_section,__DATA,__mod_term_func,__DATA_CONST,__mod_term_func \ -Wl,-rename_section,__DATA,__auth_ptr,__DATA_CONST,__auth_ptr \ -Wl,-rename_section,__DATA,__auth_got,__DATA_CONST,__auth_got \ -Wl,-rename_section,__DATA,__const,__DATA_CONST,__const \ + -Wl,-segprot,__DATA_CONST,r--,r-- \ -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text \ -Wl,-rename_section,__TEXT,__stubs,__TEXT_EXEC,__stubs \ - -Wl,-rename_section,__TEXT,initcode,__TEXT_EXEC,initcode \ -Wl,-sectcreate,"__PLK_TEXT_EXEC",__text,/dev/null \ -Wl,-sectcreate,__PRELINK_DATA,__data,/dev/null \ -Wl,-sectcreate,"__PLK_DATA_CONST",__data,/dev/null \ @@ -618,7 +689,7 @@ LDFLAGS_KERNEL_RELEASEARM64 = \ $(LDFLAGS_KERNEL_SEGARM64) \ $(LDFLAGS_KERNEL_STRIP_LTO) -LDFLAGS_KERNEL_EXPORTS_RELEASEARM64 = \ +LDFLAGS_KERNEL_ONLY_CONFIG_RELEASEARM64 = \ -Wl,-exported_symbols_list,$(TARGET)/all-kpi.exp LDFLAGS_KERNEL_DEVELOPMENTARM64 = \ @@ -626,13 +697,26 @@ LDFLAGS_KERNEL_DEVELOPMENTARM64 = \ $(LDFLAGS_KERNEL_SEGARM64) \ $(LDFLAGS_NOSTRIP_FLAG) -LDFLAGS_KERNEL_EXPORTS_DEVELOPMENTARM64 = +LDFLAGS_KERNEL_ONLY_CONFIG_DEVELOPMENTARM64 = LDFLAGS_KERNEL_KASANARM64 = $(LDFLAGS_KERNEL_DEVELOPMENTARM64) LDFLAGS_KERNEL_DEBUGARM64 = $(LDFLAGS_KERNEL_DEVELOPMENTARM64) -LDFLAGS_KERNEL_EXPORTS_KASANARM64 = $(LDFLAGS_KERNEL_EXPORTS_DEVELOPMENTARM64) -LDFLAGS_KERNEL_EXPORTS_DEBUGARM64 = $(LDFLAGS_KERNEL_EXPORTS_DEVELOPMENTARM64) +LDFLAGS_KERNEL_ONLY_CONFIG_KASANARM64 = $(LDFLAGS_KERNEL_ONLY_CONFIG_DEVELOPMENTARM64) +LDFLAGS_KERNEL_ONLY_CONFIG_DEBUGARM64 = $(LDFLAGS_KERNEL_ONLY_CONFIG_DEVELOPMENTARM64) + +# +# arm64e specific linker flags that should be used only when linking the kernel +# (and not the static kernel cache / kcgen) +# +LDFLAGS_KERNEL_ONLY_SUBARCH_arm64e = \ + -Wl,-add_split_seg_info \ + -Wl,-kernel + +LDFLAGS_KERNEL_ONLY_SUBARCH_x86_64 = \ + -Wl,-add_split_seg_info \ + -Wl,-kernel +LDFLAGS_KERNEL_ONLY_SUBARCH_x86_64h = $(LDFLAGS_KERNEL_ONLY_SUBARCH_x86_64) LDFLAGS_KERNEL = $(LDFLAGS_KERNEL_GEN) \ $(LDFLAGS_KERNEL_SDK) \ @@ -642,11 +726,11 @@ LDFLAGS_KERNEL = $(LDFLAGS_KERNEL_GEN) \ $($(addsuffix $(CURRENT_ARCH_CONFIG), $(addsuffix $(CURRENT_KERNEL_CONFIG),LDFLAGS_KERNEL_))) \ $(DEPLOYMENT_TARGET_FLAGS) - -LDFLAGS_KERNEL_EXPORTS = \ - $($(addsuffix $(CURRENT_ARCH_CONFIG), $(addsuffix $(CURRENT_KERNEL_CONFIG),LDFLAGS_KERNEL_EXPORTS_))) \ - -Wl,-alias_list,$(TARGET)/all-alias.exp - +LDFLAGS_KERNEL_ONLY = \ + $($(addsuffix $(CURRENT_ARCH_CONFIG), $(addsuffix $(CURRENT_KERNEL_CONFIG),LDFLAGS_KERNEL_ONLY_CONFIG_))) \ + $($(addsuffix $(ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG),LDFLAGS_KERNEL_ONLY_SUBARCH_)) \ + -Wl,-alias_list,$(TARGET)/all-alias.exp \ + -Wl,-sectcreate,__LINKINFO,__symbolsets,$(TARGET)/symbolsets.plist -Wl,-segprot,__LINKINFO,r--,r-- # # Default runtime libraries to be linked with the kernel @@ -660,15 +744,17 @@ LD_KERNEL_ARCHIVES = $(LDFLAGS_KERNEL_SDK) -lfirehose_kernel ifndef DO_CTFMERGE DO_CTFMERGE := 1 ifeq ($(CURRENT_KERNEL_CONFIG),RELEASE) -ifneq ($(filter ARM%,$(CURRENT_ARCH_CONFIG)),) +ifneq ($(PLATFORM),MacOSX) DO_CTFMERGE := 0 endif endif ifneq ($(CURRENT_KERNEL_CONFIG),KASAN) +ifneq ($(PLATFORM),MacOSX) ifeq ($(WILL_BUILD_STATIC_KC),1) DO_CTFMERGE := 0 endif endif +endif endif # DO_CTFMERGE @@ -769,6 +855,7 @@ export VPATH = .:$(SOURCE) # INSTALL_FLAGS = -c -S -m 0444 DATA_INSTALL_FLAGS = -c -S -m 0644 +DATA_INSTALL_FLAGS_RO = -c -S -m 0444 EXEC_INSTALL_FLAGS = -c -S -m 0755 # @@ -797,7 +884,11 @@ ifndef DRIVERKITINCDIR DRIVERKITINCDIR = $(DRIVERKITSDKHEADERSROOT)/usr/include endif ifndef LCLDIR - LCLDIR = $(SPINCDIR) + ifeq ($(LIBKERN_USE_USR_LOCAL_INCLUDE),) + LCLDIR = $(SPINCDIR) + else + LCLDIR = $(SDKHEADERSROOT)/usr/local/include + endif endif ifndef DRIVERKITLCLDIR DRIVERKITLCLDIR = $(DRIVERKITSDKHEADERSROOT)/usr/local/include @@ -929,7 +1020,7 @@ INSTALL_DTRACE_LIBEXEC_DIR = /usr/libexec/dtrace # # Overrides for XBS build aliases # -ifeq ($(RC_ProjectName),xnu_debug) +ifneq ($(filter $(RC_ProjectName),xnu_debug),) INSTALL_KERNEL_DIR := $(DEVELOPER_EXTRAS_DIR) INSTALL_KERNEL_SYM_DIR := $(DEVELOPER_EXTRAS_DIR) INSTALL_KERNEL_SYM_TO_KDK = 1 @@ -945,7 +1036,7 @@ INSTALL_KERNEL_SYM_DIR := $(SYSTEM_LIBRARY_KERNELS_DIR) INSTALL_KERNEL_SYM_TO_KDK = $(if $(filter YES,$(DWARF_DSYM_FILE_SHOULD_ACCOMPANY_PRODUCT)),1,0) endif -ifeq ($(RC_ProjectName),xnu_kasan) +ifneq ($(filter $(RC_ProjectName),xnu_kasan),) INSTALL_KASAN_ONLY = 1 endif diff --git a/makedefs/MakeInc.dir b/makedefs/MakeInc.dir index ec79505e4..4191ad444 100644 --- a/makedefs/MakeInc.dir +++ b/makedefs/MakeInc.dir @@ -96,8 +96,13 @@ $(eval $(call RECURSIVE_BUILD_RULES_template,build_install_non_primary,$(INST_SU $(eval $(call RECURSIVE_BUILD_RULES_template,config_install,$(CONFIG_SUBDIRS),do_config_install,1)) # -# Install text files +# Install machine independent text files # -$(eval $(call RECURSIVE_BUILD_RULES_template,textfiles_install,$(INSTTEXTFILES_SUBDIRS),do_textfiles_install,)) +$(eval $(call RECURSIVE_BUILD_RULES_template,textfiles_install_mi,$(INSTTEXTFILES_SUBDIRS),do_textfiles_install_mi,)) + +# +# Install machine dependent text files +# +$(eval $(call RECURSIVE_BUILD_RULES_template,textfiles_install_md,$(INSTTEXTFILES_SUBDIRS_$(CURRENT_ARCH_CONFIG)),do_textfiles_install_md,)) # vim: set ft=make: diff --git a/makedefs/MakeInc.kernel b/makedefs/MakeInc.kernel index 363709142..f630a4ba0 100644 --- a/makedefs/MakeInc.kernel +++ b/makedefs/MakeInc.kernel @@ -85,37 +85,38 @@ do_build_kernel_dSYM: $(TARGET)/$(KERNEL_FILE_NAME).dSYM @: .LDFLAGS: ALWAYS - $(_v)$(REPLACECONTENTS) $@ $(LD) $(LDFLAGS_KERNEL) $(LDFLAGS_KERNEL_EXPORTS) $(LD_KERNEL_LIBS) + $(_v)$(REPLACECONTENTS) $@ $(LD) $(LDFLAGS_KERNEL) $(LDFLAGS_KERNEL_ONLY) $(LD_KERNEL_LIBS) .CFLAGS: ALWAYS $(_v)$(REPLACECONTENTS) $@ $(KCC) $(CFLAGS) $(INCFLAGS) $(TARGET)/$(KERNEL_FILE_NAME): $(TARGET)/$(KERNEL_FILE_NAME).unstripped $(TARGET)/$(KERNEL_FILE_NAME).dSYM - $(call makelog,$(ColorH)STRIP$(Color0) $(ColorLF)$(@F)$(Color0)) + @$(LOG_STRIP) "$(@F)" $(_v)$(STRIP) $(STRIP_FLAGS) $< -o $@ + @echo "built kernel at $@" $(_v)$(RM) $@.ctfdata ifeq ($(DO_CTFMERGE),1) - $(call makelog,$(ColorH)CTFCONVERT$(Color0) $(ColorLF)$(@F)$(Color0)) - $(_v)$(CTFCONVERT) -c -l xnu -u /xnu -o $@.ctf $(TARGET)/$(KERNEL_FILE_NAME).dSYM/Contents/Resources/DWARF/$(KERNEL_FILE_NAME) - $(call makelog,$(ColorH)CTFMERGE$(Color0) $(ColorLF)$(@F)$(Color0)) + @$(LOG_CTFCONVERT) "$(@F)" + $(_v)$(CTFCONVERT) -c -l xnu -u xnu -o $@.ctf $(TARGET)/$(KERNEL_FILE_NAME).dSYM/Contents/Resources/DWARF/$(KERNEL_FILE_NAME) + @$(LOG_CTFMERGE) "$(@F)" $(_v)$(CTFMERGE) -l xnu -o $@ -Z $@.ctfdata $@.ctf - $(_v)if [ -s $@.ctfdata ]; then \ - $(LOG) "$(ColorH)CTFINSERT$(Color0) $(ColorLF)$(@F)$(Color0)"; \ - $(CTFINSERT) $@ $(ARCH_FLAGS_$(CURRENT_ARCH_CONFIG)) \ - $@.ctfdata -o $@; \ + $(_v)if [ -s $@.ctfdata ]; then \ + $(LOG_CTFINSERT) "$(@F)"; \ + $(CTFINSERT) $@ $(ARCH_FLAGS_$(CURRENT_ARCH_CONFIG)) \ + $@.ctfdata -o $@; \ fi; endif $(_v)$(LN) $(call function_convert_build_config_to_objdir,$(CURRENT_BUILD_CONFIG))/$(KERNEL_FILE_NAME) $(OBJROOT)/$(KERNEL_FILE_NAME) $(TARGET)/$(KERNEL_FILE_NAME).dSYM: $(TARGET)/$(KERNEL_FILE_NAME).unstripped - $(call makelog,$(ColorH)DSYMUTIL$(Color0) $(ColorLF)$(@F)$(Color0)) - $(_v)$(DSYMUTIL) $(DSYMUTIL_FLAGS) $< -o $@ + @$(LOG_DSYMUTIL) "$(@F)" + $(_v)bash -c "$(DSYMUTIL) $(DSYMUTIL_FLAGS) $< -o $@ $(_vstdout) 2> >(grep -v '^warning:.*could not find object file symbol for symbol' 1>&2)" $(_v)$(MV) $@/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME).unstripped $@/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME) $(_v)$(TOUCH) $@ $(TARGET)/$(KERNEL_FILE_NAME).unstripped: $(addprefix $(TARGET)/,$(foreach component,$(COMPONENT_LIST),$(component)/$(CURRENT_KERNEL_CONFIG)/$(component).filelist)) lastkerneldataconst.o lastkernelconstructor.o nonlto.o $(SRCROOT)/config/version.c $(SRCROOT)/config/MasterVersion .LDFLAGS $(filter %/MakeInc.kernel,$(MAKEFILE_LIST)) $(_v)${MAKE} -f $(firstword $(MAKEFILE_LIST)) version.o ifeq ($(PRE_LTO),1) - $(call makelog,$(ColorL)LTO$(Color0) $(ColorLF)$(@F)$(Color0)) + @$(LOG_LTO) "$(@F)" $(_v)rm -f ltolink.filelist $(_v)rm -f nonltolink.filelist $(_v)files="$$($(CAT) $(filter %.filelist,$+)) version.o $(filter %.o,$+)"; \ @@ -131,22 +132,25 @@ ifeq ($(PRE_LTO),1) done; \ printf "$$lto" >ltolink.filelist; \ printf "$$nonlto" >nonltolink.filelist + @$(LOG_LD) "$(@F)" $(_v)if [ -s ltolink.filelist ]; \ then \ $(LD) $($(addsuffix $(CURRENT_ARCH_CONFIG),ARCH_FLAGS_)) -r nonlto.o -filelist ltolink.filelist $(LDFLAGS_KERNEL_LTO) -Wl,-object_path_lto,$(TARGET)/justlto.o -o $(TARGET)/justlto.tmp.o && \ - $(LD) $(LDFLAGS_KERNEL) $(LDFLAGS_KERNEL_EXPORTS) -filelist nonltolink.filelist $(TARGET)/justlto.o $(LDFLAGS_KERNEL_STRIP_LTO) -o $@ $(LD_KERNEL_LIBS) $(LD_KERNEL_ARCHIVES); \ + $(LD) $(LDFLAGS_KERNEL) $(LDFLAGS_KERNEL_ONLY) -filelist nonltolink.filelist $(TARGET)/justlto.o $(LDFLAGS_KERNEL_STRIP_LTO) -o $@ $(LD_KERNEL_LIBS) $(LD_KERNEL_ARCHIVES); \ else \ - $(LD) $(LDFLAGS_KERNEL) $(LDFLAGS_KERNEL_EXPORTS) -filelist nonltolink.filelist -o $@ $(LD_KERNEL_LIBS) $(LD_KERNEL_ARCHIVES); \ + $(LD) $(LDFLAGS_KERNEL) $(LDFLAGS_KERNEL_ONLY) -filelist nonltolink.filelist -o $@ $(LD_KERNEL_LIBS) $(LD_KERNEL_ARCHIVES); \ fi else - $(call makelog,$(ColorL)LD$(Color0) $(ColorLF)$(@F)$(Color0)) + @$(LOG_LD) "$(@F)" $(_v)$(CAT) $(filter %.filelist,$+) < /dev/null > link.filelist - $(_v)$(LD) $(LDFLAGS_KERNEL) $(LDFLAGS_KERNEL_EXPORTS) -filelist link.filelist version.o $(filter %.o,$+) -o $@ $(LD_KERNEL_LIBS) $(LD_KERNEL_ARCHIVES) + $(_v)$(LD) $(LDFLAGS_KERNEL) $(LDFLAGS_KERNEL_ONLY) -filelist link.filelist version.o $(filter %.o,$+) -o $@ $(LD_KERNEL_LIBS) $(LD_KERNEL_ARCHIVES) endif +# for now, rename LASTDATA_CONST to LAST on static kernel cache builds +EXTRA_KC_LINKARGS = -Wl,-rename_segment,__LASTDATA_CONST,__LAST $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a: $(TARGET)/$(KERNEL_FILE_NAME).unstripped .LDFLAGS $(filter %/MakeInc.kernel,$(MAKEFILE_LIST)) - $(call makelog,$(ColorL)LIBTOOL$(Color0) $(ColorLF)$(@F)$(Color0)) + @$(LOG_LIBTOOL) "$(@F)" $(_v)$(MKDIR) $(dir $@) ifeq ($(PRE_LTO),1) $(_v)$(LIBTOOL) -ca $(TARGET)/justlto.o -filelist nonltolink.filelist -o $@ @@ -156,7 +160,7 @@ endif $(_v)cp $(TARGET)/all-kpi.exp $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).exp $(_v)cp $(TARGET)/all-alias.exp $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).alias.exp $(_v)echo "$(LD_KERNEL_ARCHIVES)" >$(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).linkarchives - $(_v)echo "$(LDFLAGS_KERNEL) $(LD_KERNEL_LIBS)" >$(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).linkarguments + $(_v)echo "$(LDFLAGS_KERNEL) $(LD_KERNEL_LIBS) $(EXTRA_KC_LINKARGS)" >$(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).linkarguments $(_v)$(LN) $(call function_convert_build_config_to_objdir,$(CURRENT_BUILD_CONFIG))/$(KERNEL_FILE_NAME).link $(OBJROOT)/$(KERNEL_FILE_NAME).link nonlto.o: .CFLAGS $(filter %/MakeInc.kernel,$(MAKEFILE_LIST)) @@ -204,9 +208,10 @@ lastkernelconstructor.o: $(SRCROOT)/libsa/lastkernelconstructor.c ${C_RULE_4} $(_v)for last_file in ${LAST_FILES}; \ do \ - $(SEG_HACK) -s __DATA -n __LAST -o $${last_file}__ $${last_file} || exit 1; \ + $(SEG_HACK) -s __DATA -n __LASTDATA_CONST -o $${last_file}__ $${last_file} || exit 1; \ mv $${last_file}__ $${last_file} || exit 1; \ done +EXTRA_KC_LINKARGS = -Wl,-rename_segment,__LASTDATA_CONST,__LAST # # Install rules. Each build config is classified as "primary" (the first @@ -257,7 +262,7 @@ do_install_xnu_debug_files: $(DSTROOT)/$(DEVELOPER_EXTRAS_DIR)/README.DEBUG-kern $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME): $(TARGET)/$(KERNEL_FILE_NAME) ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALL$(Color0) $(ColorF)$(@F)$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0) $(ColorLF)$(CURRENT_MACHINE_CONFIG_LC)$(Color0))") + @$(LOG_INSTALL) "$(@F)$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" $(_v)if [ $(OBJROOT)/.mach_kernel.timestamp -nt $@ ]; then \ $(INSTALL) $(EXEC_INSTALL_FLAGS) $< $@; \ cmdstatus=$$?; \ @@ -271,27 +276,27 @@ ifeq ($(BUILD_STATIC_LINK),1) $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a: $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALL$(Color0) $(ColorF)$(@F)$(Color0)) + @$(LOG_INSTALL) "$(@F)" $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@ $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).linkarguments: $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).linkarguments ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALL$(Color0) $(ColorF)$(@F)$(Color0)) + @$(LOG_INSTALL) "$(@F)" $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@ $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).linkarchives: $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).linkarchives ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALL$(Color0) $(ColorF)$(@F)$(Color0)) + @$(LOG_INSTALL) "$(@F)" $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@ $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).exp: $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).exp ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALL$(Color0) $(ColorF)$(@F)$(Color0)) + @$(LOG_INSTALL) "$(@F)" $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@ $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).alias.exp: $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).alias.exp ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALL$(Color0) $(ColorF)$(@F)$(Color0)) + @$(LOG_INSTALL) "$(@F)" $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@ # BUILD_STATIC_LINK @@ -299,7 +304,7 @@ endif $(SYMROOT)/$(KERNEL_FILE_NAME): $(TARGET)/$(KERNEL_FILE_NAME).unstripped ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALLSYM$(Color0) $(ColorF)$(@F)$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") + @$(LOG_INSTALLSYM) "$(@F)$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" $(_v)if [ $(OBJROOT)/.mach_kernel.timestamp -nt $@ ]; then \ $(INSTALL) $(EXEC_INSTALL_FLAGS) $< $@; \ cmdstatus=$$?; \ @@ -315,7 +320,7 @@ $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).dS $(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/lldbmacros: \ $(TARGET)/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/lldbmacros $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALLMACROS$(Color0) $(ColorF)$(@F)$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") + @$(LOG_INSTALLMACROS) "$(@F)$(Color0) $(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" $(_v)$(CP) -r $< $(dir $@) $(_v)$(TOUCH) $@ @@ -324,22 +329,22 @@ $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).dS $(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/$(KERNEL_LLDBBOOTSTRAP_NAME): \ $(TARGET)/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/$(KERNEL_LLDBBOOTSTRAP_NAME) $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALLMACROS$(Color0) $(ColorF)$(@F)$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") + @$(LOG_INSTALLMACROS) "$(@F)$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@ $(DSTROOT)/$(DEVELOPER_EXTRAS_DIR)/README.DEBUG-kernel.txt: $(SRCROOT)/config/README.DEBUG-kernel.txt $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALL$(Color0) $(ColorF)$(@F)$(Color0)) + @$(LOG_INSTALL) "$(@F)" $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@ $(SYMROOT)/$(KERNEL_FILE_NAME).dSYM/$(DSYMINFODIR)/Info.plist $(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMINFODIR)/Info.plist: $(TARGET)/$(KERNEL_FILE_NAME).dSYM/$(DSYMINFODIR)/Info.plist $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALLSYM$(Color0) $(ColorL)dSYM$(Color0) $(ColorF)$(@F)$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") + @$(LOG_INSTALLSYM) "$(ColorL)dSYM$(Color0) $(ColorF)$(@F)$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@ $(SYMROOT)/$(KERNEL_FILE_NAME).dSYM/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME) $(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME): $(TARGET)/$(KERNEL_FILE_NAME).dSYM/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME) ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALLSYM$(Color0) $(ColorL)dSYM$(Color0) $(ColorF)$(@F).dSYM$(ColorF) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") + @$(LOG_INSTALLSYM) "$(ColorL)dSYM$(Color0) $(ColorF)$(@F).dSYM$(ColorF) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" $(_v)if [ $(OBJROOT)/.mach_kernel.timestamp -nt $@ ]; then \ $(INSTALL) $(EXEC_INSTALL_FLAGS) $< $@; \ cmdstatus=$$?; \ @@ -404,7 +409,7 @@ endif endif $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(ALIAS_FILE_NAME): ALWAYS - $(call makelog,$(ColorH)ALIAS$(Color0) $(ColorF)$(@F)$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0) $(ColorLF)$(CURRENT_MACHINE_CONFIG_LC)$(Color0) $(ColorLF)$(CURRENT_ALIAS_MACHINE_CONFIG_LC)$(Color0))") + @$(LOG_ALIAS) "$(@F)$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0) $(ColorLF)$(CURRENT_MACHINE_CONFIG_LC)$(Color0) $(ColorLF)$(CURRENT_ALIAS_MACHINE_CONFIG_LC)$(Color0))" $(_v)$(INSTALL) $(EXEC_INSTALL_FLAGS) $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME) $@ install_alias: $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(ALIAS_FILE_NAME) diff --git a/makedefs/MakeInc.rule b/makedefs/MakeInc.rule index 3961872d5..ad66233b6 100644 --- a/makedefs/MakeInc.rule +++ b/makedefs/MakeInc.rule @@ -56,34 +56,38 @@ ifeq ($(LOGCOLORS),y) # export CDevs := $(shell $(EMBEDDED_DEVICE_MAP) -db $(EDM_DBPATH) -query "SELECT DISTINCT TargetType FROM Targets WHERE KernelPlatform = '$(CURRENT_MACHINE_CONFIG_LC)'" | tr '[\r\n]' ':' | sed 's,:$$,,') #endif endif - ifndef CMD_MC + ifndef MACHINE_PFX export _MACHINE := $(CURRENT_MACHINE_CONFIG_LC) ifeq ($(CURRENT_MACHINE_CONFIG),NONE) - export _MACHINE := $(subst Mac,,$(PLATFORM)) + export _MACHINE := $(subst OSX,,$(PLATFORM)) endif - export CMD_MC := $(shell __A="$(CURRENT_ARCH_CONFIG_LC)"; \ + export MACHINE_PFX := $(shell __A="$(CURRENT_ARCH_CONFIG_LC)"; \ __As=$$((6-$${\#__A})); \ - printf "\\033[1m%-.6s%*.*s %9.9s\\033[m" \ + printf "%-.6s%*.*s %9.9s" \ "$${__A}" \ $${__As} $${__As} " " \ "$(_MACHINE)") endif + override LOG_PFX_LEN := 30 + override LOG_PFX_LEN_ADJ := $(shell __TMP="$(MACHINE_PFX)"; \ + printf "%d" $$(($(LOG_PFX_LEN) - $${\#__TMP} - 3))) + MACHINE_PFX_COL = $(shell printf "\\033[1m%s\\033[m" "$(MACHINE_PFX)") # Turn off colored output Color0:=$(shell printf "\\033[m") - # Start a host command: bold, underlined pink text - ColorH:=$(shell printf "\\033[1;4;35m") - # Start a compilation-related command: bold, underlined blue text - ColorC:=$(shell printf "[$(CMD_MC)] \\033[1;4;34m") - # Start a MIG command: bold, green text on light grey background - ColorM:=$(shell printf "[$(CMD_MC)] \\033[1;32;40m") - # Start a linking command: bold, white text on blue background - ColorL:=$(shell printf "[$(CMD_MC)] \\033[1;37;44m") - # Start a filename: bold, white text - ColorF:=$(shell printf "\\033[1;37m") - # Start a linked file name: yellow text on light grey background - ColorLF:=$(shell printf "\\033[1;33;40m") - # Error strings: underlined bold white text on red background - ColorErr:=$(shell printf "\033[1;4;37;41m") + # Start a host command: bold text + ColorH:=$(shell printf "\\033[1m") + # Start a compilation-related command: blue text + ColorC:=$(shell printf "[$(MACHINE_PFX_COL)] \\033[1;34m") + # Start a MIG command: green text + ColorM:=$(shell printf "[$(MACHINE_PFX_COL)] \\033[1;32m") + # Start a linking command: purple text + ColorL:=$(shell printf "[$(MACHINE_PFX_COL)] \\033[1;35m") + # Start a filename + ColorF:=$(shell printf "") + # Start a linked file name: italic text + ColorLF:=$(shell printf "\\033[3m") + # Error strings: red text + ColorErr:=$(shell printf "\033[31m") endif .PHONY: ALWAYS @@ -112,7 +116,7 @@ $(3)/.UNIFDEF_FLAGS: ALWAYS | $(3)_MKDIR $$(_v)$$(REPLACECONTENTS) $$@ $$(UNIFDEF) $(4) $(1): $(dir $(firstword $(1)))% : $(if $(2),%,$$(SOURCE)/%) | $(3)_MKDIR - $$(call makelog,$$(ColorH)INSTALLHDR$$(Color0) $$(ColorF)$$*$$(Color0)) + @$$(LOG_INSTALLHDR) "$$*" $$(_v)$$(UNIFDEF) $(4) $$< > ./$(3)/$$*.unifdef.$$$$$$$$; \ if [ $$$$? -eq 2 ]; then \ $(ERR) Parse failure for $$<; \ @@ -148,7 +152,7 @@ $(3)/.UNIFDEF_FLAGS: ALWAYS | $(3)_MKDIR $$(_v)$$(REPLACECONTENTS) $$@ $$(UNIFDEF) -t $(4) $(1): $(5)% : $(2) | $(3)_MKDIR - $$(call makelog,$$(ColorH)INSTALLPY$$(Color0) $$(ColorF)$$*$$(Color0)) + @$$(LOG_INSTALLPY) "$$*" $$(_v)$$(MKDIR) $$(dir $$@) $$(dir ./$(3)/$$*) $$(_v)$$(UNIFDEF) -t $(4) $$< > ./$(3)/$$*.unifdef.$$$$$$$$$$(suffix $$*); \ if [ $$$$? -eq 2 ]; then \ @@ -192,6 +196,7 @@ INSTALL_DRIVERKIT_MD_LCL_LIST = INSTALL_DRIVERKIT_MD_LCL_GEN_LIST = INSTALL_KF_MD_LCL_LIST = INSTALL_KF_MD_LCL_GEN_LIST = +INSTALL_MODULEMAP_INCDIR_MI_LIST = endif # @@ -218,12 +223,22 @@ INSTALL_KF_MI_GEN_FILES = $(addprefix $(DSTROOT)/$(KINCDIR)/$(EXPORT_MI_DIR)/, $ $(eval $(call INSTALLHDRS_RULE_template,$(INSTALL_KF_MI_FILES),,kincmidir,$(KINCFRAME_UNIFDEF))) $(eval $(call INSTALLHDRS_RULE_template,$(INSTALL_KF_MI_GEN_FILES),1,kincmigendir,$(KINCFRAME_UNIFDEF))) +# Skip INSTALL_MI_DIR to install directly to INCDIR. +INSTALL_MODULEMAP_INCDIR_MI_INC_FILES = $(addprefix $(DSTROOT)/$(INCDIR)/,$(INSTALL_MODULEMAP_INCDIR_MI_LIST)) + +$(eval $(call INSTALLHDRS_RULE_template,$(INSTALL_MODULEMAP_INCDIR_MI_INC_FILES),,mmrootincmidir,-t $(DATA_UNIFDEF))) + # # Machine-independent local (private) files # +ifeq ($(LIBKERN_USE_USR_LOCAL_INCLUDE),) INSTALL_MI_LCL_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR)/, $(sort $(INSTALL_MI_LCL_LIST) $(INSTALL_MI_LIST))) INSTALL_MI_LCL_GEN_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR)/, $(sort $(INSTALL_MI_LCL_GEN_LIST) $(INSTALL_MI_GEN_LIST))) +else +INSTALL_MI_LCL_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR)/, $(sort $(INSTALL_MI_LCL_LIST))) +INSTALL_MI_LCL_GEN_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR)/, $(sort $(INSTALL_MI_LCL_GEN_LIST))) +endif $(eval $(call INSTALLHDRS_RULE_template,$(INSTALL_MI_LCL_FILES),,pincmidir,$(SPINCFRAME_UNIFDEF))) $(eval $(call INSTALLHDRS_RULE_template,$(INSTALL_MI_LCL_GEN_FILES),1,pincmigendir,$(SPINCFRAME_UNIFDEF))) @@ -270,8 +285,13 @@ $(eval $(call INSTALLHDRS_RULE_template,$(INSTALL_KF_MD_GEN_FILES),1,kincgendir, # Machine-dependent local (private) files # +ifeq ($(LIBKERN_USE_USR_LOCAL_INCLUDE),) INSTALL_MD_LCL_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR)/, $(sort $(INSTALL_MD_LCL_LIST) $(INSTALL_MD_LIST))) INSTALL_MD_LCL_GEN_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR)/, $(sort $(INSTALL_MD_LCL_GEN_LIST) $(INSTALL_MD_GEN_LIST))) +else +INSTALL_MD_LCL_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR)/, $(sort $(INSTALL_MD_LCL_LIST))) +INSTALL_MD_LCL_GEN_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR)/, $(sort $(INSTALL_MD_LCL_GEN_LIST))) +endif $(eval $(call INSTALLHDRS_RULE_template,$(INSTALL_MD_LCL_FILES),,pincdir,$(SPINCFRAME_UNIFDEF))) $(eval $(call INSTALLHDRS_RULE_template,$(INSTALL_MD_LCL_GEN_FILES),1,pincgendir,$(SPINCFRAME_UNIFDEF))) @@ -298,7 +318,8 @@ do_installhdrs_mi:: $(INSTALL_MI_INC_FILES) $(INSTALL_MI_INC_GEN_FILES) \ $(INSTALL_KF_MI_FILES) $(INSTALL_KF_MI_GEN_FILES) \ $(INSTALL_MI_LCL_FILES) $(INSTALL_MI_LCL_GEN_FILES) \ $(INSTALL_DRIVERKIT_MI_LCL_FILES) $(INSTALL_DRIVERKIT_MI_LCL_GEN_FILES) \ - $(INSTALL_KF_MI_LCL_FILES) $(INSTALL_KF_MI_LCL_GEN_FILES) + $(INSTALL_KF_MI_LCL_FILES) $(INSTALL_KF_MI_LCL_GEN_FILES) \ + $(INSTALL_MODULEMAP_INCDIR_MI_INC_FILES) @: .PHONY: do_installhdrs_md @@ -323,10 +344,10 @@ EXP_MI_INC_DIR: $(_v)$(MKDIR) $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR) $(EXPORT_MI_GEN_INC_FILES): $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR)/% : % | EXP_MI_INC_DIR - $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS) $< $@ + $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS_RO) $< $@ $(EXPORT_MI_INC_FILES): $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR)/% : $(SOURCE)/% | EXP_MI_INC_DIR - $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS) $< $@ + $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS_RO) $< $@ EXPORT_MD_INC_FILES = $(addprefix $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR)/, $(EXPORT_MD_LIST)) EXPORT_MD_GEN_INC_FILES = $(addprefix $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR)/, $(EXPORT_MD_GEN_LIST)) @@ -337,10 +358,10 @@ EXP_MD_INC_DIR: $(_v)$(MKDIR) $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR) $(EXPORT_MD_GEN_INC_FILES): $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR)/% : % | EXP_MD_INC_DIR - $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS) $< $@ + $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS_RO) $< $@ $(EXPORT_MD_INC_FILES): $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR)/% : $(SOURCE)/% | EXP_MD_INC_DIR - $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS) $< $@ + $(_v)$(INSTALL) $(DATA_INSTALL_FLAGS_RO) $< $@ .PHONY: do_exporthdrs_mi @@ -360,7 +381,7 @@ do_exporthdrs_md: $(EXPORT_MD_GEN_INC_FILES) $(EXPORT_MD_INC_FILES) # Compilation rules to generate .o from .s # -S_RULE_0=$(call makelog,$(ColorC)AS$(Color0) $(ColorF)$@$(Color0)) +S_RULE_0=@$(LOG_AS) "$@" S_RULE_1A=$(_v)${S_KCC} -c ${SFLAGS} -MD -MF $(@:o=d) -MP ${$@_SFLAGS_ADD} ${INCFLAGS} ${$@_INCFLAGS} S_RULE_1B=$( $@; \ + if [ $$? -eq 2 ]; then \ + $(ERR) Parse failure for $<; \ + exit 1; \ + fi + # # This isn't the right place to put this, but we need to := override some settings # in Makefiles that include the generic helper fragments (like this file) @@ -440,12 +472,12 @@ INSTALL_MAN_DIR: $(_v)$(MKDIR) $(DSTROOT)/$(MANDIR)/$(INSTALL_MAN_DIR) $(INSTALL_MAN_FILES): $(DSTROOT)/$(MANDIR)/$(INSTALL_MAN_DIR)/% : % | INSTALL_MAN_DIR - $(call makelog,$(ColorH)MAN$(Color0) $(ColorF)$*$(Color0)) + @$(LOG_MAN) "$*" $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@ define MAN_LINKS_RULE_template $$(DSTROOT)/$$(MANDIR)/$$(INSTALL_MAN_DIR)/$(2): $$(DSTROOT)/$$(MANDIR)/$$(INSTALL_MAN_DIR)/$(1) - $$(call makelog,$$(ColorH)MANLINK$$(Color0) $$(ColorF)$(2)$$(Color0)) + @$$(LOG_MANLINK) "$(2)" $(_v)ln -f $$< $$@ endef @@ -458,11 +490,16 @@ INSTALL_MAN_FILES_LINKS = $(call function_generate_man_links_rules,$(INSTALL_MAN do_installman: $(INSTALL_MAN_FILES) $(INSTALL_MAN_FILES_LINKS) @: -.PHONY: do_textfiles_install +.PHONY: do_textfiles_install do_textfiles_install_mi do_textfiles_install_md + +# Do-nothing rule, since not all levels of the recursive hierarchy might implement this +# in their local Makefiles. Those that do will use a "::" rule to augment this. +do_textfiles_install_mi:: do_installman + @: # Do-nothing rule, since not all levels of the recursive hierarchy might implement this # in their local Makefiles. Those that do will use a "::" rule to augment this. -do_textfiles_install:: do_installman +do_textfiles_install_md:: @: .PHONY: do_build_setup diff --git a/makedefs/MakeInc.top b/makedefs/MakeInc.top index dd1070e64..213d0cec1 100644 --- a/makedefs/MakeInc.top +++ b/makedefs/MakeInc.top @@ -58,9 +58,9 @@ endif DEFAULT_PRODUCT_CONFIGS := -ifeq ($(RC_ProjectName),xnu_debug) +ifneq ($(filter $(RC_ProjectName),xnu_debug),) override DEFAULT_KERNEL_CONFIG := DEBUG -else ifeq ($(RC_ProjectName),xnu_kasan) +else ifneq ($(filter $(RC_ProjectName),xnu_kasan),) override KERNEL_CONFIGS := KASAN else ifneq ($(filter $(SUPPORTED_EMBEDDED_PLATFORMS),$(PLATFORM)),) override DEFAULT_KERNEL_CONFIG := DEVELOPMENT @@ -87,8 +87,17 @@ endif override DEFAULT_I386_MACHINE_CONFIG := NONE override DEFAULT_X86_64_MACHINE_CONFIG := NONE override DEFAULT_X86_64H_MACHINE_CONFIG := NONE + +ifneq ($(findstring _Sim,$(RC_ProjectName)),) +override DEFAULT_ARM_MACHINE_CONFIG := NONE +override DEFAULT_ARM64_MACHINE_CONFIG := NONE +else ifneq ($(findstring _host,$(RC_ProjectName)),) +override DEFAULT_ARM_MACHINE_CONFIG := NONE +override DEFAULT_ARM64_MACHINE_CONFIG := NONE +else override DEFAULT_ARM_MACHINE_CONFIG := T8002 override DEFAULT_ARM64_MACHINE_CONFIG := T7000 +endif # This is typically never specified (TARGET_CONFIGS is used) ifndef MACHINE_CONFIGS @@ -115,7 +124,7 @@ endif # default kernel configuration = DEFAULT_KERNEL_CONFIG # default architecture configuration = system architecture where you are running make. -ifneq ($(filter $(SUPPORTED_EMBEDDED_PLATFORMS),$(PLATFORM)),) +ifneq ($(filter $(SUPPORTED_PLATFORMS),$(PLATFORM)),) # Defaults for "make all_embedded" ifeq ($(KERNEL_CONFIGS),DEFAULT) @@ -126,11 +135,14 @@ endif ifeq ($(ARCH_CONFIGS),DEFAULT) ARCH_CONFIGS_EMBEDDED := ARM ARM64 +ARCH_CONFIGS_DESKTOP := X86_64 else -ARCH_CONFIGS_EMBEDDED := $(strip $(shell echo $(ARCH_CONFIGS) | $(TR) a-z A-Z)) +ARCH_CONFIGS_EMBEDDED := $(strip $(shell echo $(filter-out X86_64, $(ARCH_CONFIGS)) | $(TR) a-z A-Z)) +ARCH_CONFIGS_DESKTOP := $(strip $(shell echo $(filter X86_64, $(ARCH_CONFIGS)) | $(TR) a-z A-Z)) endif # Find supported products from the device map +ifneq ($(EMBEDDED_DEVICE_MAP),) DEVICEMAP_PRODUCTS_ARMV7 := $(shell $(EMBEDDED_DEVICE_MAP) -db $(EDM_DBPATH) \ -query 'SELECT DISTINCT TargetType \ FROM Files \ @@ -162,11 +174,22 @@ DEVICEMAP_PRODUCTS_ARM64 := $(shell $(EMBEDDED_DEVICE_MAP) -db $(EDM_DBPATH) \ INNER JOIN Targets USING (Target) \ WHERE (KernelMachOArchitecture LIKE "arm64" \ AND fileType in ("KernelCache", "RestoreKernelCache"))') +DEVICEMAP_PRODUCTS_OSX_ARM64 := $(shell $(EMBEDDED_DEVICE_MAP) -db $(EDM_DBPATH) \ + -query 'SELECT DISTINCT TargetType \ + FROM Files \ + INNER JOIN Manifests USING (manifestID) \ + INNER JOIN Targets USING (Target) \ + WHERE (KernelMachOArchitecture LIKE "arm64" \ + AND fileType in ("KernelCache", "RestoreKernelCache") \ + AND SDKPlatform == "macosx")') # Generate a list of mappings of the form "n75:arm;t8002" based on the device map DEVICEMAP_PRODUCT_SOC_MAPPINGS := $(shell $(EMBEDDED_DEVICE_MAP) -db $(EDM_DBPATH) -query SELECT DISTINCT TargetType, KernelMachOArchitecture, KernelPlatform FROM Targets | awk -F\| '{ if ($$2 ~ /armv[0-9][a-z]?/) { print $$1 ":arm;" $$3 } else if ($$2 ~ /arm64[a-z]?/) { print $$1 ":arm64;" $$3 ";" $$4} else { print $$1 ":" $$2 ";" $$3 ";" $$4} }' ) +# use embedded_device_map +endif + # Map a product like "n75" to "arm;t8002" # $(1) is a product name in lower case function_lookup_product = $(call function_substitute_word_with_replacement, \ @@ -175,11 +198,16 @@ function_lookup_product = $(call function_substitute_word_with_replacement, \ unknown_arch_for_$(1);unknown_platform_for_$(1) \ ) +ifneq ($(PLATFORM),MacOSX) +ifneq ($(EMBEDDED_DEVICE_MAP),) # Generate a list of mappings for products that use a different platform for their kernel configuration than their true platform # of the form "n71m:arm64;s8000;s8003". The 4th element is the true SoC platform, which will get an on-disk copy, while the # kernel's recursive build system will build the 3rd element as the KernelPlatform DEVICEMAP_PRODUCT_SOC_ALIASES := $(shell $(EMBEDDED_DEVICE_MAP) -db $(EDM_DBPATH) -query SELECT DISTINCT TargetType, KernelMachOArchitecture, KernelPlatform, Platform FROM Targets WHERE KernelPlatform "!=" Platform | awk -F\| '{ if ($$2 ~ /armv[0-9][a-z]?/) { print $$1 ":arm;" $$3 ";" $$4} else if ($$2 ~ /arm64[a-z]?/) { print $$1 ":arm64;" $$3 ";" $$4} else { print $$1 ":" $$2 ";" $$3 ";" $$4} }' ) +endif +endif + function_lookup_product_alias = $(call function_substitute_word_with_replacement, \ $(1), \ $(DEVICEMAP_PRODUCT_SOC_ALIASES), \ @@ -216,8 +244,9 @@ TARGET_CONFIGS := $(foreach my_devicemap_config,$(foreach my_arch_config,$(ARCH_ TARGET_CONFIGS_ALIASES := $(foreach my_devicemap_config,$(foreach my_arch_config,$(ARCH_CONFIGS_EMBEDDED),$(foreach my_product_config,$(DEVICEMAP_PRODUCTS_$(my_arch_config)),$(call function_lookup_product_alias,$(my_product_config)))),$(foreach my_kernel_config,$(KERNEL_CONFIGS_EMBEDDED),$(my_kernel_config) $(subst ;, ,$(my_devicemap_config)))) else ifneq ($(filter %_desktop,$(MAKECMDGOALS)),) # generate TARGET_CONFIGS for all kernel configs for B&I -TARGET_CONFIGS := $(foreach my_kern_config, $(KERNEL_CONFIGS_DESKTOP), $(foreach my_arch_config, $(ARCH_CONFIGS), $(foreach my_machine_config, $(MACHINE_CONFIGS), $(my_kern_config) $(my_arch_config) $(my_machine_config)))) -TARGET_CONFIGS_ALIASES := +TARGET_CONFIGS := $(foreach my_kern_config, $(KERNEL_CONFIGS_DESKTOP), $(foreach my_arch_config, $(ARCH_CONFIGS_DESKTOP), $(foreach my_machine_config, $(MACHINE_CONFIGS), $(my_kern_config) $(my_arch_config) $(my_machine_config)))) +TARGET_CONFIGS += $(foreach my_devicemap_config,$(foreach my_arch_config,$(ARCH_CONFIGS_EMBEDDED),$(foreach my_product_config,$(DEVICEMAP_PRODUCTS_OSX_$(my_arch_config)),$(call function_lookup_product,$(my_product_config)))),$(foreach my_kernel_config,$(KERNEL_CONFIGS_EMBEDDED),$(my_kernel_config) $(subst ;, ,$(my_devicemap_config)))) +TARGET_CONFIGS_ALIASES := $(foreach my_devicemap_config,$(foreach my_arch_config,$(ARCH_CONFIGS_EMBEDDED),$(foreach my_product_config,$(DEVICEMAP_PRODUCTS_OSX_$(my_arch_config)),$(call function_lookup_product_alias,$(my_product_config)))),$(foreach my_kernel_config,$(KERNEL_CONFIGS_EMBEDDED),$(my_kernel_config) $(subst ;, ,$(my_devicemap_config)))) else # generate TARGET_CONFIGS using KERNEL_CONFIGS and ARCH_CONFIGS and MACHINE_CONFIGS (which defaults to "DEFAULT") TARGET_CONFIGS := $(foreach my_kern_config, $(KERNEL_CONFIGS), $(foreach my_arch_config, $(ARCH_CONFIGS), $(foreach my_machine_config, $(MACHINE_CONFIGS), $(my_kern_config) $(my_arch_config) $(my_machine_config)))) @@ -419,10 +448,10 @@ exporthdrs_md: build_exporthdrs_md_bootstrap .PHONY: installhdrs installhdrs_mi installhdrs_md -ifeq ($(RC_ProjectName),xnu_debug) +ifneq ($(filter $(RC_ProjectName),xnu_debug),) installhdrs: @: -else ifeq ($(RC_ProjectName),xnu_kasan) +else ifneq ($(filter $(RC_ProjectName),xnu_kasan),) installhdrs: @: else @@ -460,19 +489,33 @@ $(eval $(generated_top_level_build_installhdrs_md)) installhdrs_md: build_installhdrs_md_bootstrap +.PHONY: install_textfiles install_textfiles_mi install_textfiles_md + +install_textfiles: install_textfiles_mi install_textfiles_md + # -# Install text files (man pages, dtrace scripts, etc.) +# Install machine independent text files (man pages, dtrace scripts, etc.) # -generated_top_level_textfiles_install = $(call TOP_LEVEL_EACH_BUILD_CONFIG_BOOTSTRAP_template,textfiles_install,,setup,,1,$(FIRST_BUILD_CONFIG)) +generated_top_level_textfiles_install_mi = $(call TOP_LEVEL_EACH_BUILD_CONFIG_BOOTSTRAP_template,textfiles_install_mi,,setup,,1,$(FIRST_BUILD_CONFIG)) ifeq ($(VERBOSE_GENERATED_MAKE_FRAGMENTS),YES) -$(warning Generate makefile fragment: $(generated_top_level_textfiles_install)) +$(warning Generate makefile fragment: $(generated_top_level_textfiles_install_mi)) endif -$(eval $(generated_top_level_textfiles_install)) +$(eval $(generated_top_level_textfiles_install_mi)) -.PHONY: install_textfiles +install_textfiles_mi: textfiles_install_mi_bootstrap + +# +# Install machine dependent text files (man pages, dtrace scripts, etc.) +# + +generated_top_level_textfiles_install_md = $(call TOP_LEVEL_EACH_BUILD_CONFIG_BOOTSTRAP_template,textfiles_install_md,,setup,,$(KERNEL_BUILDS_IN_PARALLEL),$(PRIMARY_BUILD_CONFIGS)) +ifeq ($(VERBOSE_GENERATED_MAKE_FRAGMENTS),YES) +$(warning Generate makefile fragment: $(generated_top_level_textfiles_install_md)) +endif +$(eval $(generated_top_level_textfiles_install_md)) -install_textfiles: textfiles_install_bootstrap +install_textfiles_md: textfiles_install_md_bootstrap # # Build all architectures for all Configuration/Architecture options @@ -550,16 +593,16 @@ final_touch_config_timestamps: config_install_bootstrap .PHONY: install -ifeq ($(RC_ProjectName),xnu_debug) +ifneq ($(filter $(RC_ProjectName),xnu_debug),) install: install_kernels -else ifeq ($(RC_ProjectName),xnu_kasan) +else ifneq ($(filter $(RC_ProjectName),xnu_kasan),) install: install_config install_kernels -else ifeq ($(RC_ProjectName),xnu_headers_Sim) +else ifneq ($(filter $(RC_ProjectName),xnu_headers_Sim),) install: installhdrs -else ifeq ($(RC_ProjectName),xnu_headers_host) +else ifneq ($(filter $(RC_ProjectName),xnu_headers_host),) install: installhdrs export INSTALLHDRS_SKIP_HOST=YES -else ifeq ($(RC_ProjectName),xnu_headers_driverkit) +else ifneq ($(filter $(RC_ProjectName),xnu_headers_driverkit),) install: installhdrs_desktop else @@ -581,6 +624,7 @@ install_kernels: build_install_primary_bootstrap build_install_non_primary_boots # Tell the next build the latest timestamp of any potential file in DSTROOT/SYMROOT final_touch_kernel_timestamps: build_install_primary_bootstrap build_install_non_primary_bootstrap $(_v)$(TOUCH) $(OBJROOT)/.mach_kernel.timestamp + @echo "done building xnu" # Copy kernels that are aliases of another configuration generated_top_level_install_alias = $(call TOP_LEVEL_EACH_BUILD_CONFIG_BOOTSTRAP_template,install_alias,,install_kernels,,$(KERNEL_BUILDS_IN_PARALLEL),$(ALIAS_CONFIGS)) diff --git a/osfmk/UserNotification/Makefile b/osfmk/UserNotification/Makefile index be0723a8b..f8dde43ab 100644 --- a/osfmk/UserNotification/Makefile +++ b/osfmk/UserNotification/Makefile @@ -63,7 +63,7 @@ ${COMP_FILES} : ${MIG_TYPES} ${MIG_KUSRC} : \ %.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) "$@" $(_v)${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ -user $*.c \ -header $*.h \ @@ -73,7 +73,7 @@ ${MIG_KUSRC} : \ ${MIG_KSSRC}: \ %Server.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) "$@" $(_v)${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ -user /dev/null \ -header /dev/null \ diff --git a/osfmk/arm/Makefile b/osfmk/arm/Makefile index ab9975c6e..50251f857 100644 --- a/osfmk/arm/Makefile +++ b/osfmk/arm/Makefile @@ -6,11 +6,75 @@ export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir include $(MakeInc_cmd) include $(MakeInc_def) +ifeq ($(PLATFORM),MacOSX) + +ARM_HEADER_FILES = \ + arch.h \ + atomic.h \ + cpu_number.h \ + cpu_capabilities.h \ + cpu_x86_64_capabilities.h \ + cpuid.h \ + cpuid_internal.h \ + io_map_entries.h \ + lock.h \ + locks.h \ + machine_cpuid.h \ + machine_routines.h \ + memory_types.h \ + pal_routines.h \ + simple_lock.h \ + thread.h \ + trap.h + +INSTALL_MD_DIR = arm + +# Headers installed into System.framework/PrivateHeaders (internal SDK only). +INSTALL_MD_LCL_LIST = arch.h cpu_capabilities.h + +# Headers installed into /usr/include (public and internal SDKs). +INSTALL_MD_LIST = arch.h + +# Headers installed in the public/internal SDKs for userspace DriverKit drivers. +INSTALL_DRIVERKIT_MD_LIST = arch.h + +# Headers installed into Kernel.framework/Headers (public and internal SDKs). +INSTALL_KF_MD_LIST = $(ARM_HEADER_FILES) + +# Headers installed into Kernel.framework/PrivateHeaders (internal SDK only). +INSTALL_KF_MD_LCL_LIST = \ + dbgwrap.h \ + machine_kpc.h \ + monotonic.h \ + pmap_public.h \ + proc_reg.h \ + smp.h \ + $(ARM_HEADER_FILES) + +# TODO: consolidate INSTALL_KF_MD_LCL_LIST and EXPORT_MD_LIST? Only difference is caches_internal.h/machine_cpu.h +# Headers used to compile xnu +EXPORT_MD_LIST = \ + caches_internal.h \ + dbgwrap.h \ + machine_cpu.h \ + machine_kpc.h \ + monotonic.h \ + pmap_public.h \ + proc_reg.h \ + smp.h \ + ${ARM_HEADER_FILES} + +# These headers will be available with #include +EXPORT_MD_DIR = arm + +else # $(PLATFORM),MacOSX + ARM_HEADER_FILES = \ arch.h \ atomic.h \ cpu_number.h \ cpu_capabilities.h \ + cpu_x86_64_capabilities.h \ cpuid.h \ cpuid_internal.h \ dbgwrap.h \ @@ -26,7 +90,8 @@ ARM_HEADER_FILES = \ proc_reg.h \ simple_lock.h \ smp.h \ - thread.h + thread.h \ + trap.h INSTALL_MD_DIR = arm @@ -34,6 +99,8 @@ INSTALL_MD_LCL_LIST = arch.h cpu_capabilities.h INSTALL_MD_LIST = arch.h +INSTALL_DRIVERKIT_MD_LIST = arch.h + INSTALL_KF_MD_LIST = $(ARM_HEADER_FILES) INSTALL_KF_MD_LCL_LIST = machine_kpc.h monotonic.h $(ARM_HEADER_FILES) @@ -46,5 +113,7 @@ EXPORT_MD_LIST = \ EXPORT_MD_DIR = arm +endif # $(PLATFORM),MacOSX + include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/osfmk/arm/arm_init.c b/osfmk/arm/arm_init.c index 4c25645de..9c11427c2 100644 --- a/osfmk/arm/arm_init.c +++ b/osfmk/arm/arm_init.c @@ -42,8 +42,12 @@ #include #include #include +#if HIBERNATION +#include +#endif /* HIBERNATION */ /* ARM64_TODO unify boot.h */ #if __arm64__ +#include #include #elif __arm__ #include @@ -57,6 +61,8 @@ #include #include #include +#include +#include #include #include #include @@ -66,6 +72,7 @@ #include #include #include +#include #include @@ -78,6 +85,10 @@ #include #endif /* MONOTONIC */ +#if HIBERNATION +#include +#endif /* HIBERNATION */ + extern void patch_low_glo(void); extern int serial_init(void); extern void sleep_token_buffer_init(void); @@ -85,7 +96,6 @@ extern void sleep_token_buffer_init(void); extern vm_offset_t intstack_top; #if __arm64__ extern vm_offset_t excepstack_top; -extern uint64_t events_per_sec; #else extern vm_offset_t fiqstack_top; #endif @@ -98,13 +108,9 @@ int pc_trace_buf[PC_TRACE_BUF_SIZE] = {0}; int pc_trace_cnt = PC_TRACE_BUF_SIZE; int debug_task; -boolean_t up_style_idle_exit = 0; - +bool need_wa_rdar_55577508 = false; +SECURITY_READ_ONLY_LATE(bool) static_kernelcache = false; -#if HAS_NEX_PG -uint32_t nex_pg = 1; -extern void set_nex_pg(void); -#endif #if HAS_BP_RET /* Enable both branch target retention (0x2) and branch direction retention (0x1) across sleep */ @@ -114,13 +120,15 @@ extern void set_bp_ret(void); #if INTERRUPT_MASKED_DEBUG boolean_t interrupt_masked_debug = 1; +/* the following are in mach timebase units */ uint64_t interrupt_masked_timeout = 0xd0000; +uint64_t stackshot_interrupt_masked_timeout = 0xf9999; #endif boot_args const_boot_args __attribute__((section("__DATA, __const"))); boot_args *BootArgs __attribute__((section("__DATA, __const"))); -unsigned int arm_diag; +TUNABLE(uint32_t, arm_diag, "diag", 0); #ifdef APPLETYPHOON static unsigned cpus_defeatures = 0x0; extern void cpu_defeatures_set(unsigned int); @@ -132,6 +140,13 @@ extern volatile boolean_t arm64_stall_sleep; extern boolean_t force_immediate_debug_halt; +#if HAS_APPLE_PAC +SECURITY_READ_ONLY_LATE(boolean_t) diversify_user_jop = TRUE; +#endif + +SECURITY_READ_ONLY_LATE(uint64_t) gDramBase; +SECURITY_READ_ONLY_LATE(uint64_t) gDramSize; + /* * Forward definition */ @@ -146,105 +161,160 @@ unsigned int page_shift_user32; /* for page_size as seen by a 32-bit task */ * JOP rebasing */ +#define dyldLogFunc(msg, ...) +#include + +extern uint32_t __thread_starts_sect_start[] __asm("section$start$__TEXT$__thread_starts"); +extern uint32_t __thread_starts_sect_end[] __asm("section$end$__TEXT$__thread_starts"); #if defined(HAS_APPLE_PAC) -#include +extern void OSRuntimeSignStructors(kernel_mach_header_t * header); +extern void OSRuntimeSignStructorsInFileset(kernel_mach_header_t * header); #endif /* defined(HAS_APPLE_PAC) */ -// Note, the following should come from a header from dyld +extern vm_offset_t vm_kernel_slide; +extern vm_offset_t segLOWESTKC, segHIGHESTKC, segLOWESTROKC, segHIGHESTROKC; +extern vm_offset_t segLOWESTAuxKC, segHIGHESTAuxKC, segLOWESTROAuxKC, segHIGHESTROAuxKC; +extern vm_offset_t segLOWESTRXAuxKC, segHIGHESTRXAuxKC, segHIGHESTNLEAuxKC; + static void -rebase_chain(uintptr_t chainStartAddress, uint64_t stepMultiplier, uintptr_t baseAddress __unused, uint64_t slide) +arm_slide_rebase_and_sign_image(void) { - uint64_t delta = 0; - uintptr_t address = chainStartAddress; - do { - uint64_t value = *(uint64_t*)address; + kernel_mach_header_t *k_mh, *kc_mh = NULL; + kernel_segment_command_t *seg; + uintptr_t slide; -#if HAS_APPLE_PAC - uint16_t diversity = (uint16_t)(value >> 32); - bool hasAddressDiversity = (value & (1ULL << 48)) != 0; - ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3); -#endif - bool isAuthenticated = (value & (1ULL << 63)) != 0; - bool isRebase = (value & (1ULL << 62)) == 0; - if (isRebase) { - if (isAuthenticated) { - // The new value for a rebase is the low 32-bits of the threaded value plus the slide. - uint64_t newValue = (value & 0xFFFFFFFF) + slide; - // Add in the offset from the mach_header - newValue += baseAddress; -#if HAS_APPLE_PAC - // We have bits to merge in to the discriminator - uintptr_t discriminator = diversity; - if (hasAddressDiversity) { - // First calculate a new discriminator using the address of where we are trying to store the value - // Only blend if we have a discriminator - if (discriminator) { - discriminator = __builtin_ptrauth_blend_discriminator((void*)address, discriminator); - } else { - discriminator = address; - } - } - switch (key) { - case ptrauth_key_asia: - newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asia, discriminator); - break; - case ptrauth_key_asib: - newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asib, discriminator); - break; - case ptrauth_key_asda: - newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asda, discriminator); - break; - case ptrauth_key_asdb: - newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asdb, discriminator); - break; - } -#endif - *(uint64_t*)address = newValue; - } else { - // Regular pointer which needs to fit in 51-bits of value. - // C++ RTTI uses the top bit, so we'll allow the whole top-byte - // and the bottom 43-bits to be fit in to 51-bits. - uint64_t top8Bits = value & 0x0007F80000000000ULL; - uint64_t bottom43Bits = value & 0x000007FFFFFFFFFFULL; - uint64_t targetValue = (top8Bits << 13) | (((intptr_t)(bottom43Bits << 21) >> 21) & 0x00FFFFFFFFFFFFFF); - targetValue = targetValue + slide; - *(uint64_t*)address = targetValue; - } + k_mh = &_mh_execute_header; + if (kernel_mach_header_is_in_fileset(k_mh)) { + /* + * The kernel is part of a MH_FILESET kernel collection, determine slide + * based on first segment's mach-o vmaddr (requires first kernel load + * command to be LC_SEGMENT_64 of the __TEXT segment) + */ + seg = (kernel_segment_command_t *)((uintptr_t)k_mh + sizeof(*k_mh)); + assert(seg->cmd == LC_SEGMENT_KERNEL); + slide = (uintptr_t)k_mh - seg->vmaddr; + + /* + * The kernel collection linker guarantees that the boot collection mach + * header vmaddr is the hardcoded kernel link address (as specified to + * ld64 when linking the kernel). + */ + kc_mh = (kernel_mach_header_t*)(VM_KERNEL_LINK_ADDRESS + slide); + assert(kc_mh->filetype == MH_FILESET); + + /* + * rebase and sign jops + * Note that we can't call any functions before this point, so + * we have to hard-code the knowledge that the base of the KC + * is the KC's mach-o header. This would change if any + * segment's VA started *before* the text segment + * (as the HIB segment does on x86). + */ + const void *collection_base_pointers[KCNumKinds] = {[0] = kc_mh, }; + kernel_collection_slide((struct mach_header_64 *)kc_mh, collection_base_pointers); + + PE_set_kc_header(KCKindPrimary, kc_mh, slide); + + /* + * iBoot doesn't slide load command vmaddrs in an MH_FILESET kernel + * collection, so adjust them now, and determine the vmaddr range + * covered by read-only segments for the CTRR rorgn. + */ + kernel_collection_adjust_mh_addrs((struct mach_header_64 *)kc_mh, slide, false, + (uintptr_t *)&segLOWESTKC, (uintptr_t *)&segHIGHESTKC, + (uintptr_t *)&segLOWESTROKC, (uintptr_t *)&segHIGHESTROKC, + NULL, NULL, NULL); +#if defined(HAS_APPLE_PAC) + OSRuntimeSignStructorsInFileset(kc_mh); +#endif /* defined(HAS_APPLE_PAC) */ + } else { + /* + * Static kernelcache: iBoot slid kernel MachO vmaddrs, determine slide + * using hardcoded kernel link address + */ + slide = (uintptr_t)k_mh - VM_KERNEL_LINK_ADDRESS; + + /* rebase and sign jops */ + static_kernelcache = &__thread_starts_sect_end[0] != &__thread_starts_sect_start[0]; + if (static_kernelcache) { + rebase_threaded_starts( &__thread_starts_sect_start[0], + &__thread_starts_sect_end[0], + (uintptr_t)k_mh, (uintptr_t)k_mh - slide, slide); } +#if defined(HAS_APPLE_PAC) + OSRuntimeSignStructors(&_mh_execute_header); +#endif /* defined(HAS_APPLE_PAC) */ + } + - // The delta is bits [51..61] - // And bit 62 is to tell us if we are a rebase (0) or bind (1) - value &= ~(1ULL << 62); - delta = (value & 0x3FF8000000000000) >> 51; - address += delta * stepMultiplier; - } while (delta != 0); + /* + * Initialize slide global here to avoid duplicating this logic in + * arm_vm_init() + */ + vm_kernel_slide = slide; } -// Note, the following method should come from a header from dyld -static bool -rebase_threaded_starts(uint32_t *threadArrayStart, uint32_t *threadArrayEnd, - uintptr_t macho_header_addr, uintptr_t macho_header_vmaddr, size_t slide) +void +arm_auxkc_init(void *mh, void *base) { - uint32_t threadStartsHeader = *threadArrayStart; - uint64_t stepMultiplier = (threadStartsHeader & 1) == 1 ? 8 : 4; - for (uint32_t* threadOffset = threadArrayStart + 1; threadOffset != threadArrayEnd; ++threadOffset) { - if (*threadOffset == 0xFFFFFFFF) { - break; - } - rebase_chain(macho_header_addr + *threadOffset, stepMultiplier, macho_header_vmaddr, slide); - } - return true; + /* + * The kernel collection linker guarantees that the lowest vmaddr in an + * AuxKC collection is 0 (but note that the mach header is higher up since + * RW segments precede RO segments in the AuxKC). + */ + uintptr_t slide = (uintptr_t)base; + kernel_mach_header_t *akc_mh = (kernel_mach_header_t*)mh; + + assert(akc_mh->filetype == MH_FILESET); + PE_set_kc_header_and_base(KCKindAuxiliary, akc_mh, base, slide); + + /* rebase and sign jops */ + const void *collection_base_pointers[KCNumKinds]; + memcpy(collection_base_pointers, PE_get_kc_base_pointers(), sizeof(collection_base_pointers)); + kernel_collection_slide((struct mach_header_64 *)akc_mh, collection_base_pointers); + + kernel_collection_adjust_mh_addrs((struct mach_header_64 *)akc_mh, slide, false, + (uintptr_t *)&segLOWESTAuxKC, (uintptr_t *)&segHIGHESTAuxKC, (uintptr_t *)&segLOWESTROAuxKC, + (uintptr_t *)&segHIGHESTROAuxKC, (uintptr_t *)&segLOWESTRXAuxKC, (uintptr_t *)&segHIGHESTRXAuxKC, + (uintptr_t *)&segHIGHESTNLEAuxKC); +#if defined(HAS_APPLE_PAC) + OSRuntimeSignStructorsInFileset(akc_mh); +#endif /* defined(HAS_APPLE_PAC) */ } +#if HAS_IC_INVAL_FILTERS +static void +configure_misc_apple_regs(void) +{ + uint64_t actlr, __unused acfg, __unused ahcr; + + actlr = get_aux_control(); + +#if HAS_IC_INVAL_FILTERS + ahcr = __builtin_arm_rsr64(ARM64_REG_AHCR_EL2); + ahcr |= AHCR_IC_IVAU_EnRegime; + ahcr |= AHCR_IC_IVAU_EnVMID; + ahcr |= AHCR_IC_IALLU_EnRegime; + ahcr |= AHCR_IC_IALLU_EnVMID; + __builtin_arm_wsr64(ARM64_REG_AHCR_EL2, ahcr); +#endif /* HAS_IC_INVAL_FILTERS */ + + +#if HAS_IC_INVAL_FILTERS + actlr |= ACTLR_EL1_IC_IVAU_EnASID; +#endif /* HAS_IC_INVAL_FILTERS */ + + set_aux_control(actlr); + +} +#endif /* HAS_IC_INVAL_FILTERS */ /* * Routine: arm_init - * Function: + * Function: Runs on the boot CPU, once, on entry from iBoot. */ -extern uint32_t __thread_starts_sect_start[] __asm("section$start$__TEXT$__thread_starts"); -extern uint32_t __thread_starts_sect_end[] __asm("section$end$__TEXT$__thread_starts"); - +__startup_func void arm_init( boot_args *args) @@ -253,16 +323,8 @@ arm_init( uint32_t memsize; uint64_t xmaxmem; thread_t thread; - processor_t my_master_proc; - - // rebase and sign jops - if (&__thread_starts_sect_end[0] != &__thread_starts_sect_start[0]) { - uintptr_t mh = (uintptr_t) &_mh_execute_header; - uintptr_t slide = mh - VM_KERNEL_LINK_ADDRESS; - rebase_threaded_starts( &__thread_starts_sect_start[0], - &__thread_starts_sect_end[0], - mh, mh - slide, slide); - } + + arm_slide_rebase_and_sign_image(); /* If kernel integrity is supported, use a constant copy of the boot args. */ const_boot_args = *args; @@ -272,74 +334,54 @@ arm_init( #if defined(HAS_APPLE_PAC) /* bootstrap cpu process dependent key for kernel has been loaded by start.s */ BootCpuData.rop_key = KERNEL_ROP_ID; + BootCpuData.jop_key = ml_default_jop_pid(); #endif /* defined(HAS_APPLE_PAC) */ PE_init_platform(FALSE, args); /* Get platform expert set up */ #if __arm64__ - + wfe_timeout_configure(); +#if HAS_IC_INVAL_FILTERS + configure_misc_apple_regs(); +#endif /* HAS_IC_INVAL_FILTERS */ #if defined(HAS_APPLE_PAC) +#if DEVELOPMENT || DEBUG boolean_t user_jop = TRUE; PE_parse_boot_argn("user_jop", &user_jop, sizeof(user_jop)); if (!user_jop) { args->bootFlags |= kBootFlagsDisableUserJOP; } +#endif /* DEVELOPMENT || DEBUG */ boolean_t user_ts_jop = TRUE; PE_parse_boot_argn("user_ts_jop", &user_ts_jop, sizeof(user_ts_jop)); if (!user_ts_jop) { args->bootFlags |= kBootFlagsDisableUserThreadStateJOP; } + PE_parse_boot_argn("diversify_user_jop", &diversify_user_jop, sizeof(diversify_user_jop)); #endif /* defined(HAS_APPLE_PAC) */ { - unsigned int tmp_16k = 0; - -#ifdef XXXX /* - * Select the advertised kernel page size; without the boot-arg - * we default to the hardware page size for the current platform. + * Select the advertised kernel page size. */ - if (PE_parse_boot_argn("-vm16k", &tmp_16k, sizeof(tmp_16k))) { + if (args->memSize > 1ULL * 1024 * 1024 * 1024) { + /* + * arm64 device with > 1GB of RAM: + * kernel uses 16KB pages. + */ PAGE_SHIFT_CONST = PAGE_MAX_SHIFT; } else { + /* + * arm64 device with <= 1GB of RAM: + * kernel uses hardware page size + * (4KB for H6/H7, 16KB for H8+). + */ PAGE_SHIFT_CONST = ARM_PGSHIFT; } -#else - /* - * Select the advertised kernel page size; with the boot-arg - * use to the hardware page size for the current platform. - */ - int radar_20804515 = 1; /* default: new mode */ - PE_parse_boot_argn("radar_20804515", &radar_20804515, sizeof(radar_20804515)); - if (radar_20804515) { - if (args->memSize > 1ULL * 1024 * 1024 * 1024) { - /* - * arm64 device with > 1GB of RAM: - * kernel uses 16KB pages. - */ - PAGE_SHIFT_CONST = PAGE_MAX_SHIFT; - } else { - /* - * arm64 device with <= 1GB of RAM: - * kernel uses hardware page size - * (4KB for H6/H7, 16KB for H8+). - */ - PAGE_SHIFT_CONST = ARM_PGSHIFT; - } - /* 32-bit apps always see 16KB page size */ - page_shift_user32 = PAGE_MAX_SHIFT; - } else { - /* kernel page size: */ - if (PE_parse_boot_argn("-use_hwpagesize", &tmp_16k, sizeof(tmp_16k))) { - PAGE_SHIFT_CONST = ARM_PGSHIFT; - } else { - PAGE_SHIFT_CONST = PAGE_MAX_SHIFT; - } - /* old mode: 32-bit apps see same page size as kernel */ - page_shift_user32 = PAGE_SHIFT_CONST; - } -#endif + + /* 32-bit apps always see 16KB page size */ + page_shift_user32 = PAGE_MAX_SHIFT; #ifdef APPLETYPHOON if (PE_parse_boot_argn("cpus_defeatures", &cpus_defeatures, sizeof(cpus_defeatures))) { if ((cpus_defeatures & 0xF) != 0) { @@ -368,7 +410,6 @@ arm_init( BootCpuData.fiqstack_top = (vm_offset_t) &fiqstack_top; BootCpuData.fiqstackptr = BootCpuData.fiqstack_top; #endif - BootCpuData.cpu_processor = cpu_processor_alloc(TRUE); BootCpuData.cpu_console_buf = (void *)NULL; CpuDataEntries[master_cpu].cpu_data_vaddr = &BootCpuData; CpuDataEntries[master_cpu].cpu_data_paddr = (void *)((uintptr_t)(args->physBase) @@ -377,6 +418,7 @@ arm_init( thread = thread_bootstrap(); thread->machine.CpuDatap = &BootCpuData; + thread->machine.pcpu_data_base = (vm_offset_t)0; machine_set_current_thread(thread); /* @@ -388,37 +430,32 @@ arm_init( thread->machine.preemption_count = 0; #if __arm__ && __ARM_USER_PROTECT__ { - unsigned int ttbr0_val, ttbr1_val, ttbcr_val; + unsigned int ttbr0_val, ttbr1_val; __asm__ volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val)); __asm__ volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val)); - __asm__ volatile ("mrc p15,0,%0,c2,c0,2\n" : "=r"(ttbcr_val)); thread->machine.uptw_ttb = ttbr0_val; thread->machine.kptw_ttb = ttbr1_val; - thread->machine.uptw_ttc = ttbcr_val; } #endif - BootCpuData.cpu_processor->processor_data.kernel_timer = &thread->system_timer; - BootCpuData.cpu_processor->processor_data.thread_timer = &thread->system_timer; + processor_t boot_processor = PERCPU_GET_MASTER(processor); + boot_processor->kernel_timer = &thread->system_timer; + boot_processor->thread_timer = &thread->system_timer; cpu_bootstrap(); rtclock_early_init(); - lck_mod_init(); + kernel_debug_string_early("kernel_startup_bootstrap"); + kernel_startup_bootstrap(); /* * Initialize the timer callout world */ timer_call_init(); - kernel_early_bootstrap(); - cpu_init(); processor_bootstrap(); - my_master_proc = master_processor; - - (void)PE_parse_boot_argn("diag", &arm_diag, sizeof(arm_diag)); if (PE_parse_boot_argn("maxmem", &maxmem, sizeof(maxmem))) { xmaxmem = (uint64_t) maxmem * (1024 * 1024); @@ -428,9 +465,6 @@ arm_init( xmaxmem = 0; } - if (PE_parse_boot_argn("up_style_idle_exit", &up_style_idle_exit, sizeof(up_style_idle_exit))) { - up_style_idle_exit = 1; - } #if INTERRUPT_MASKED_DEBUG int wdt_boot_arg = 0; /* Disable if WDT is disabled or no_interrupt_mask_debug in boot-args */ @@ -443,11 +477,6 @@ arm_init( PE_parse_boot_argn("interrupt_masked_debug_timeout", &interrupt_masked_timeout, sizeof(interrupt_masked_timeout)); #endif -#if HAS_NEX_PG - PE_parse_boot_argn("nexpg", &nex_pg, sizeof(nex_pg)); - set_nex_pg(); // Apply NEX powergating settings to boot CPU -#endif - #if HAS_BP_RET PE_parse_boot_argn("bpret", &bp_ret, sizeof(bp_ret)); set_bp_ret(); // Apply branch predictor retention settings to boot CPU @@ -461,33 +490,32 @@ arm_init( arm_vm_init(xmaxmem, args); - uint32_t debugmode; - if (PE_parse_boot_argn("debug", &debugmode, sizeof(debugmode)) && - debugmode) { + if (debug_boot_arg) { patch_low_glo(); } - printf_init(); - panic_init(); -#if __arm64__ - /* Enable asynchronous exceptions */ - __builtin_arm_wsr("DAIFClr", DAIFSC_ASYNCF); -#endif #if __arm64__ && WITH_CLASSIC_S2R sleep_token_buffer_init(); #endif PE_consistent_debug_inherit(); - /* setup debugging output if one has been chosen */ - PE_init_kprintf(FALSE); + /* + * rdar://54622819 Insufficient HSP purge window can cause incorrect translation when ASID and TTBR base address is changed at same time) + * (original info on HSP purge window issues can be found in rdar://55577508) + * We need a flag to check for this, so calculate and set it here. We'll use it in machine_switch_amx_context(). + */ +#if __arm64__ + need_wa_rdar_55577508 = cpuid_get_cpufamily() == CPUFAMILY_ARM_LIGHTNING_THUNDER; +#endif + /* setup debugging output if one has been chosen */ + kernel_startup_initialize_upto(STARTUP_SUB_KPRINTF); kprintf("kprintf initialized\n"); - serialmode = 0; /* Assume normal keyboard and console */ - if (PE_parse_boot_argn("serial", &serialmode, sizeof(serialmode))) { /* Do we want a serial - * keyboard and/or - * console? */ + serialmode = 0; + if (PE_parse_boot_argn("serial", &serialmode, sizeof(serialmode))) { + /* Do we want a serial keyboard and/or console? */ kprintf("Serial mode specified: %08X\n", serialmode); int force_sync = serialmode & SERIALMODE_SYNCDRAIN; if (force_sync || PE_parse_boot_argn("drain_uart_sync", &force_sync, sizeof(force_sync))) { @@ -530,26 +558,41 @@ arm_init( PE_init_platform(TRUE, &BootCpuData); #if __arm64__ - if (PE_parse_boot_argn("wfe_events_sec", &events_per_sec, sizeof(events_per_sec))) { - if (events_per_sec <= 0) { - events_per_sec = 1; - } else if (events_per_sec > USEC_PER_SEC) { - events_per_sec = USEC_PER_SEC; - } - } else { -#if defined(ARM_BOARD_WFE_TIMEOUT_NS) - events_per_sec = NSEC_PER_SEC / ARM_BOARD_WFE_TIMEOUT_NS; -#else /* !defined(ARM_BOARD_WFE_TIMEOUT_NS) */ - /* Default to 1usec (or as close as we can get) */ - events_per_sec = USEC_PER_SEC; -#endif /* !defined(ARM_BOARD_WFE_TIMEOUT_NS) */ - } + ml_map_cpu_pio(); #endif cpu_timebase_init(TRUE); PE_init_cpu(); - fiq_context_bootstrap(TRUE); + fiq_context_init(TRUE); + +#if HIBERNATION + pal_hib_init(); +#endif /* HIBERNATION */ + + /* + * gPhysBase/Size only represent kernel-managed memory. These globals represent + * the actual DRAM base address and size as reported by iBoot through the + * device tree. + */ + DTEntry chosen; + unsigned int dt_entry_size; + unsigned long const *dram_base; + unsigned long const *dram_size; + if (SecureDTLookupEntry(NULL, "/chosen", &chosen) != kSuccess) { + panic("%s: Unable to find 'chosen' DT node", __FUNCTION__); + } + + if (SecureDTGetProperty(chosen, "dram-base", (void const **)&dram_base, &dt_entry_size) != kSuccess) { + panic("%s: Unable to find 'dram-base' entry in the 'chosen' DT node", __FUNCTION__); + } + + if (SecureDTGetProperty(chosen, "dram-size", (void const **)&dram_size, &dt_entry_size) != kSuccess) { + panic("%s: Unable to find 'dram-size' entry in the 'chosen' DT node", __FUNCTION__); + } + + gDramBase = *dram_base; + gDramSize = *dram_size; /* * Initialize the stack protector for all future calls @@ -569,7 +612,7 @@ arm_init( /* * Routine: arm_init_cpu * Function: - * Re-initialize CPU when coming out of reset + * Runs on S2R resume (all CPUs) and SMP boot (non-boot CPUs only). */ void @@ -580,19 +623,44 @@ arm_init_cpu( __builtin_arm_wsr("pan", 1); #endif +#if HAS_IC_INVAL_FILTERS + configure_misc_apple_regs(); +#endif /* HAS_IC_INVAL_FILTERS */ cpu_data_ptr->cpu_flags &= ~SleepState; -#if __ARM_SMP__ && defined(ARMA7) +#if defined(ARMA7) cpu_data_ptr->cpu_CLW_active = 1; #endif machine_set_current_thread(cpu_data_ptr->cpu_active_thread); +#if HIBERNATION + if ((cpu_data_ptr == &BootCpuData) && (gIOHibernateState == kIOHibernateStateWakingFromHibernate)) { + // the "normal" S2R code captures wake_abstime too early, so on a hibernation resume we fix it up here + extern uint64_t wake_abstime; + wake_abstime = gIOHibernateCurrentHeader->lastHibAbsTime; + + // since the hw clock stops ticking across hibernation, we need to apply an offset; + // iBoot computes this offset for us and passes it via the hibernation header + extern uint64_t hwclock_conttime_offset; + hwclock_conttime_offset = gIOHibernateCurrentHeader->hwClockOffset; + + // during hibernation, we captured the idle thread's state from inside the PPL context, so we have to + // fix up its preemption count + unsigned int expected_preemption_count = (gEnforceQuiesceSafety ? 2 : 1); + if (cpu_data_ptr->cpu_active_thread->machine.preemption_count != expected_preemption_count) { + panic("unexpected preemption count %u on boot cpu thread (should be %u)\n", + cpu_data_ptr->cpu_active_thread->machine.preemption_count, + expected_preemption_count); + } + cpu_data_ptr->cpu_active_thread->machine.preemption_count--; + } +#endif /* HIBERNATION */ + #if __arm64__ + wfe_timeout_init(); pmap_clear_user_ttb(); flush_mmu_tlb(); - /* Enable asynchronous exceptions */ - __builtin_arm_wsr("DAIFClr", DAIFSC_ASYNCF); #endif cpu_machine_idle_init(FALSE); @@ -636,9 +704,15 @@ arm_init_cpu( PE_arm_debug_enable_trace(); #endif - kprintf("arm_cpu_init(): cpu %d online\n", cpu_data_ptr->cpu_processor->cpu_id); + + kprintf("arm_cpu_init(): cpu %d online\n", cpu_data_ptr->cpu_number); if (cpu_data_ptr == &BootCpuData) { + if (kdebug_enable == 0) { + __kdebug_only uint64_t elapsed = kdebug_wake(); + KDBG(IOKDBG_CODE(DBG_HIBERNATE, 15), mach_absolute_time() - elapsed); + } + #if CONFIG_TELEMETRY bootprofile_wake_from_sleep(); #endif /* CONFIG_TELEMETRY */ @@ -648,9 +722,9 @@ arm_init_cpu( #endif /* MONOTONIC && defined(__arm64__) */ #if defined(KERNEL_INTEGRITY_CTRR) - if (cpu_data_ptr->cluster_master) { + if (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKED) { lck_spin_lock(&ctrr_cpu_start_lck); - ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = 1; + ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKED; thread_wakeup(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]); lck_spin_unlock(&ctrr_cpu_start_lck); } @@ -660,8 +734,8 @@ arm_init_cpu( } /* - * Routine: arm_init_idle_cpu - * Function: + * Routine: arm_init_idle_cpu + * Function: Resume from non-retention WFI. Called from the reset vector. */ void __attribute__((noreturn)) arm_init_idle_cpu( @@ -670,13 +744,14 @@ arm_init_idle_cpu( #if __ARM_PAN_AVAILABLE__ __builtin_arm_wsr("pan", 1); #endif -#if __ARM_SMP__ && defined(ARMA7) +#if defined(ARMA7) cpu_data_ptr->cpu_CLW_active = 1; #endif machine_set_current_thread(cpu_data_ptr->cpu_active_thread); #if __arm64__ + wfe_timeout_init(); pmap_clear_user_ttb(); flush_mmu_tlb(); /* Enable asynchronous exceptions */ diff --git a/osfmk/arm/arm_timer.c b/osfmk/arm/arm_timer.c index 3b9c4f310..8b3e8192e 100644 --- a/osfmk/arm/arm_timer.c +++ b/osfmk/arm/arm_timer.c @@ -69,6 +69,7 @@ timer_intr(__unused int inuser, __unused uint64_t iaddr) uint64_t abstime, new_idle_timeout_ticks; rtclock_timer_t *mytimer; cpu_data_t *cpu_data_ptr; + processor_t processor; cpu_data_ptr = getCpuDatap(); mytimer = &cpu_data_ptr->rtclock_timer; /* Point to the event timer */ @@ -80,7 +81,7 @@ timer_intr(__unused int inuser, __unused uint64_t iaddr) new_idle_timeout_ticks = 0x0ULL; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, MACHDBG_CODE(DBG_MACH_EXCP_DECI, 3) | DBG_FUNC_START, 0, 0, 0, 0, 0); - ((idle_timer_t)cpu_data_ptr->idle_timer_notify)(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks); + cpu_data_ptr->idle_timer_notify(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, MACHDBG_CODE(DBG_MACH_EXCP_DECI, 3) | DBG_FUNC_END, 0, 0, 0, 0, 0); /* if a new idle timeout was requested set the new idle timer deadline */ @@ -104,12 +105,11 @@ timer_intr(__unused int inuser, __unused uint64_t iaddr) abstime = mach_absolute_time(); /* Get the time again since we ran a bit */ } - uint64_t quantum_deadline = cpu_data_ptr->quantum_timer_deadline; - /* is it the quantum timer expiration? */ - if ((quantum_deadline <= abstime) && (quantum_deadline > 0)) { - cpu_data_ptr->quantum_timer_deadline = 0; - quantum_timer_expire(abstime); - } + processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr); + (void)running_timers_expire(processor, abstime); + /* + * No need to update abstime. + */ /* Force reload our next deadline */ cpu_data_ptr->rtcPop = EndOfAllTime; @@ -138,19 +138,6 @@ timer_set_deadline(uint64_t deadline) splx(s); } -void -quantum_timer_set_deadline(uint64_t deadline) -{ - cpu_data_t *cpu_data_ptr; - - /* We should've only come into this path with interrupts disabled */ - assert(ml_get_interrupts_enabled() == FALSE); - - cpu_data_ptr = getCpuDatap(); - cpu_data_ptr->quantum_timer_deadline = deadline; - timer_resync_deadlines(); -} - /* * Re-evaluate the outstanding deadlines and select the most proximate. * @@ -180,10 +167,10 @@ timer_resync_deadlines(void) deadline = cpu_data_ptr->idle_timer_deadline; } - /* If we have the quantum timer setup, check that */ - if ((cpu_data_ptr->quantum_timer_deadline > 0) - && (cpu_data_ptr->quantum_timer_deadline < deadline)) { - deadline = cpu_data_ptr->quantum_timer_deadline; + uint64_t run_deadline = running_timers_deadline( + PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr)); + if (run_deadline < deadline) { + deadline = run_deadline; } if ((deadline == EndOfAllTime) @@ -288,10 +275,17 @@ static timer_coalescing_priority_params_ns_t tcoal_prio_params_init = .timer_coalesce_kt_ns_max = 1 * NSEC_PER_MSEC, .timer_coalesce_fp_ns_max = 1 * NSEC_PER_MSEC, .timer_coalesce_ts_ns_max = 1 * NSEC_PER_MSEC, +#if XNU_TARGET_OS_OSX + .latency_qos_scale = {3, 2, 1, -2, 3, 3}, + .latency_qos_ns_max = {1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC, + 75 * NSEC_PER_MSEC, 1 * NSEC_PER_MSEC, 1 * NSEC_PER_MSEC}, + .latency_tier_rate_limited = {FALSE, FALSE, FALSE, FALSE, FALSE, FALSE}, +#else /* XNU_TARGET_OS_OSX */ .latency_qos_scale = {3, 2, 1, -2, -15, -15}, .latency_qos_ns_max = {1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC, 75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC}, .latency_tier_rate_limited = {FALSE, FALSE, FALSE, FALSE, TRUE, TRUE}, +#endif /* XNU_TARGET_OS_OSX */ }; timer_coalescing_priority_params_ns_t * timer_call_get_priority_params(void) diff --git a/osfmk/arm/arm_vm_init.c b/osfmk/arm/arm_vm_init.c index 08788e136..35f66e5ee 100644 --- a/osfmk/arm/arm_vm_init.c +++ b/osfmk/arm/arm_vm_init.c @@ -90,8 +90,9 @@ vm_offset_t mem_size; /* Size of actual physical m uint64_t mem_actual; /* The "One True" physical memory size * actually, it's the highest physical * address + 1 */ -uint64_t max_mem; /* Size of physical memory (bytes), adjusted - * by maxmem */ +uint64_t max_mem; /* kernel/vm managed memory, adjusted by maxmem */ +uint64_t max_mem_actual; /* Actual size of physical memory (bytes), adjusted + * by the maxmem boot-arg */ uint64_t sane_size; /* Memory size to use for defaults * calculations */ addr64_t vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Highest kernel @@ -101,15 +102,18 @@ addr64_t vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Highest kernel vm_offset_t segEXTRADATA; unsigned long segSizeEXTRADATA; vm_offset_t segLOWESTTEXT; +vm_offset_t segLOWEST; static vm_offset_t segTEXTB; static unsigned long segSizeTEXT; static vm_offset_t segDATAB; static unsigned long segSizeDATA; -static vm_offset_t segLINKB; +vm_offset_t segLINKB; static unsigned long segSizeLINK; static vm_offset_t segKLDB; static unsigned long segSizeKLD; static vm_offset_t segLASTB; +static vm_offset_t segLASTDATACONSTB; +static unsigned long segSizeLASTDATACONST; static unsigned long segSizeLAST; static vm_offset_t sectCONSTB; static unsigned long sectSizeCONST; @@ -124,6 +128,18 @@ unsigned long segSizePRELINKTEXT; vm_offset_t segPRELINKINFOB; unsigned long segSizePRELINKINFO; +vm_offset_t segLOWESTKC; +vm_offset_t segHIGHESTKC; +vm_offset_t segLOWESTROKC; +vm_offset_t segHIGHESTROKC; +vm_offset_t segLOWESTAuxKC; +vm_offset_t segHIGHESTAuxKC; +vm_offset_t segLOWESTROAuxKC; +vm_offset_t segHIGHESTROAuxKC; +vm_offset_t segLOWESTRXAuxKC; +vm_offset_t segHIGHESTRXAuxKC; +vm_offset_t segHIGHESTNLEAuxKC; + static kernel_segment_command_t *segDATA; static boolean_t doconstro = TRUE; @@ -316,6 +332,9 @@ arm_vm_prot_init(boot_args * args) arm_vm_page_granular_ROX(segKLDB, segSizeKLD, force_coarse_physmap); arm_vm_page_granular_RWNX(segLINKB, segSizeLINK, force_coarse_physmap); arm_vm_page_granular_RWNX(segLASTB, segSizeLAST, FALSE); // __LAST may be empty, but we cannot assume this + if (segLASTDATACONSTB) { + arm_vm_page_granular_RWNX(segLASTDATACONSTB, segSizeLASTDATACONST, FALSE); // __LASTDATA_CONST may be empty, but we cannot assume this + } arm_vm_page_granular_RWNX(segPRELINKTEXTB, segSizePRELINKTEXT, TRUE); // Refined in OSKext::readPrelinkedExtensions arm_vm_page_granular_RWNX(segPRELINKTEXTB + segSizePRELINKTEXT, end_kern - (segPRELINKTEXTB + segSizePRELINKTEXT), force_coarse_physmap); // PreLinkInfoDictionary @@ -344,7 +363,7 @@ arm_vm_prot_init(boot_args * args) */ pmap_paddr_t p = (pmap_paddr_t)(args->topOfKernelData) + (ARM_PGBYTES * 9); pt_entry_t *ppte = (pt_entry_t *)phystokv(p); - pmap_init_pte_page(kernel_pmap, ppte, HIGH_EXC_VECTORS & ~ARM_TT_L1_PT_OFFMASK, 2, TRUE, FALSE); + pmap_init_pte_page(kernel_pmap, ppte, HIGH_EXC_VECTORS & ~ARM_TT_L1_PT_OFFMASK, 2, TRUE); int idx = (HIGH_EXC_VECTORS & ARM_TT_L1_PT_OFFMASK) >> ARM_TT_L2_SHIFT; pt_entry_t ptmp = ppte[idx]; @@ -396,12 +415,17 @@ arm_vm_init(uint64_t memory_size, boot_args * args) gPhysBase = args->physBase; gPhysSize = args->memSize; mem_size = args->memSize; - if ((memory_size != 0) && (mem_size > memory_size)) { - mem_size = memory_size; - } + mem_actual = args->memSizeActual ? args->memSizeActual : mem_size; if (mem_size > MEM_SIZE_MAX) { mem_size = MEM_SIZE_MAX; } + if ((memory_size != 0) && (mem_size > memory_size)) { + mem_size = memory_size; + max_mem_actual = memory_size; + } else { + max_mem_actual = mem_actual; + } + static_memory_end = gVirtBase + mem_size; /* Calculate the nubmer of ~256MB segments of memory */ @@ -453,10 +477,12 @@ arm_vm_init(uint64_t memory_size, boot_args * args) */ segTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT", &segSizeTEXT); segLOWESTTEXT = segTEXTB; + segLOWEST = segLOWESTTEXT; segDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA", &segSizeDATA); segLINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LINKEDIT", &segSizeLINK); segKLDB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLD", &segSizeKLD); segLASTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LAST", &segSizeLAST); + segLASTDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LASTDATA_CONST", &segSizeLASTDATACONST); segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &segSizePRELINKTEXT); segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_INFO", &segSizePRELINKINFO); segBOOTDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__BOOTDATA", &segSizeBOOTDATA); @@ -465,14 +491,14 @@ arm_vm_init(uint64_t memory_size, boot_args * args) segSizeEXTRADATA = 0; DTEntry memory_map; - MemoryMapFileInfo *trustCacheRange; + MemoryMapFileInfo const *trustCacheRange; unsigned int trustCacheRangeSize; int err; - err = DTLookupEntry(NULL, "chosen/memory-map", &memory_map); + err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map); assert(err == kSuccess); - err = DTGetProperty(memory_map, "TrustCache", (void**)&trustCacheRange, &trustCacheRangeSize); + err = SecureDTGetProperty(memory_map, "TrustCache", (const void**)&trustCacheRange, &trustCacheRangeSize); if (err == kSuccess) { assert(trustCacheRangeSize == sizeof(MemoryMapFileInfo)); @@ -551,6 +577,8 @@ arm_vm_init(uint64_t memory_size, boot_args * args) arm_vm_prot_init(args); + vm_page_kernelcache_count = (unsigned int) (atop_64(end_kern - segLOWEST)); + /* * To avoid recursing while trying to init the vm_page and object * mechanisms, * pre-initialize kernel pmap page table pages to cover this address range: @@ -566,7 +594,8 @@ arm_vm_init(uint64_t memory_size, boot_args * args) ptp = (pt_entry_t *) phystokv(avail_start); ptp_phys = (pmap_paddr_t)avail_start; avail_start += ARM_PGBYTES; - pmap_init_pte_page(kernel_pmap, ptp, va + off, 2, TRUE, TRUE); + bzero(ptp, ARM_PGBYTES); + pmap_init_pte_page(kernel_pmap, ptp, va + off, 2, TRUE); tte = &cpu_tte[ttenum(va + off)]; *tte = pa_to_tte((ptp_phys)) | ARM_TTE_TYPE_TABLE; *(tte + 1) = pa_to_tte((ptp_phys + 0x400)) | ARM_TTE_TYPE_TABLE; @@ -579,15 +608,13 @@ arm_vm_init(uint64_t memory_size, boot_args * args) flush_mmu_tlb(); #if __arm__ && __ARM_USER_PROTECT__ { - unsigned int ttbr0_val, ttbr1_val, ttbcr_val; + unsigned int ttbr0_val, ttbr1_val; thread_t thread = current_thread(); __asm__ volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val)); __asm__ volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val)); - __asm__ volatile ("mrc p15,0,%0,c2,c0,2\n" : "=r"(ttbcr_val)); thread->machine.uptw_ttb = ttbr0_val; thread->machine.kptw_ttb = ttbr1_val; - thread->machine.uptw_ttc = ttbcr_val; } #endif avail_start = (avail_start + PAGE_MASK) & ~PAGE_MASK; diff --git a/osfmk/arm/atomic.h b/osfmk/arm/atomic.h index 0382aa233..2b679dbb3 100644 --- a/osfmk/arm/atomic.h +++ b/osfmk/arm/atomic.h @@ -66,166 +66,4 @@ // Parameter for __builtin_arm_isb #define ISB_SY 0xf -#undef OS_ATOMIC_HAS_LLSC -#define OS_ATOMIC_HAS_LLSC 1 - -#if defined(__ARM_ARCH_8_2__) && defined(__arm64__) -#undef OS_ATOMIC_USE_LLSC -#define OS_ATOMIC_USE_LLSC 0 -#endif - -#if defined(__ARM_ARCH_8_4__) && defined(__arm64__) -/* on armv8.4 16-byte aligned load/store pair is atomic */ -#undef os_atomic_load_is_plain -#define os_atomic_load_is_plain(p) \ - (sizeof(*(p)) <= 16 && _Alignof(typeof(*(p))) >= sizeof(*(p))) -#endif - -/* - * On armv7 & arm64, we do provide fine grained dependency injection, so - * memory_order_dependency maps to relaxed as far as thread fences are concerned - */ -#undef memory_order_dependency_smp -#define memory_order_dependency_smp memory_order_relaxed - -#define os_atomic_clear_exclusive() __builtin_arm_clrex() - -#if __arm__ - -#define os_atomic_load_exclusive(p, m) ({ \ - _os_atomic_basetypeof(p) _r; \ - _r = __builtin_arm_ldrex(p); \ - _os_memory_fence_after_atomic(m); \ - _os_compiler_barrier_after_atomic(m); \ - _r; \ -}) - -#define os_atomic_store_exclusive(p, v, m) ({ \ - _os_compiler_barrier_before_atomic(m); \ - _os_memory_fence_before_atomic(m); \ - !__builtin_arm_strex(p, v); \ -}) - -/* - * armv7 override of os_atomic_make_dependency - * documentation for os_atomic_make_dependency is in - */ -#undef os_atomic_make_dependency -#define os_atomic_make_dependency(v) ({ \ - os_atomic_dependency_t _dep; \ - __asm__ __volatile__("and %[_dep], %[_v], #0" \ - : [_dep] "=r" (_dep.__opaque_zero) : [_v] "r" (v)); \ - os_compiler_barrier(acquire); \ - _dep; \ -}) - -/* - * armv7 override of os_atomic_rmw_loop - * documentation for os_atomic_rmw_loop is in - */ -#undef os_atomic_rmw_loop -#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - int _result = 0; uint32_t _err = 0; \ - _os_atomic_basetypeof(p) *_p; \ - _p = (_os_atomic_basetypeof(p) *)(p); \ - _os_compiler_barrier_before_atomic(m); \ - for (;;) { \ - ov = __builtin_arm_ldrex(_p); \ - __VA_ARGS__; \ - if (!_err) { \ - /* release barrier only done for the first loop iteration */ \ - _os_memory_fence_before_atomic(m); \ - } \ - _err = __builtin_arm_strex(nv, _p); \ - if (__builtin_expect(!_err, 1)) { \ - _os_memory_fence_after_atomic(m); \ - _result = 1; \ - break; \ - } \ - } \ - _os_compiler_barrier_after_atomic(m); \ - _result; \ - }) - -/* - * armv7 override of os_atomic_rmw_loop_give_up - * documentation for os_atomic_rmw_loop_give_up is in - */ -#undef os_atomic_rmw_loop_give_up -#define os_atomic_rmw_loop_give_up(...) \ - ({ os_atomic_clear_exclusive(); __VA_ARGS__; break; }) - -#else // __arm64__ - -#define os_atomic_load_exclusive(p, m) ({ \ - _os_atomic_basetypeof(p) _r; \ - if (memory_order_has_acquire(memory_order_##m##_smp)) { \ - _r = __builtin_arm_ldaex(p); \ - } else { \ - _r = __builtin_arm_ldrex(p); \ - } \ - _os_compiler_barrier_after_atomic(m); \ - _r; \ -}) - -#define os_atomic_store_exclusive(p, v, m) ({ \ - _os_compiler_barrier_before_atomic(m); \ - (memory_order_has_release(memory_order_##m##_smp) ? \ - !__builtin_arm_stlex(p, v) : !__builtin_arm_strex(p, v)); \ -}) - -/* - * arm64 override of os_atomic_make_dependency - * documentation for os_atomic_make_dependency is in - */ -#undef os_atomic_make_dependency -#define os_atomic_make_dependency(v) ({ \ - os_atomic_dependency_t _dep; \ - __asm__ __volatile__("and %[_dep], %[_v], xzr" \ - : [_dep] "=r" (_dep.__opaque_zero) : [_v] "r" (v)); \ - os_compiler_barrier(acquire); \ - _dep; \ -}) - -#if OS_ATOMIC_USE_LLSC - -/* - * arm64 (without armv81 atomics) override of os_atomic_rmw_loop - * documentation for os_atomic_rmw_loop is in - */ -#undef os_atomic_rmw_loop -#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - int _result = 0; \ - _os_atomic_basetypeof(p) *_p; \ - _p = (_os_atomic_basetypeof(p) *)(p); \ - _os_compiler_barrier_before_atomic(m); \ - do { \ - if (memory_order_has_acquire(memory_order_##m##_smp)) { \ - ov = __builtin_arm_ldaex(_p); \ - } else { \ - ov = __builtin_arm_ldrex(_p); \ - } \ - __VA_ARGS__; \ - if (memory_order_has_release(memory_order_##m##_smp)) { \ - _result = !__builtin_arm_stlex(nv, _p); \ - } else { \ - _result = !__builtin_arm_strex(nv, _p); \ - } \ - } while (__builtin_expect(!_result, 0)); \ - _os_compiler_barrier_after_atomic(m); \ - _result; \ - }) - -/* - * arm64 override of os_atomic_rmw_loop_give_up - * documentation for os_atomic_rmw_loop_give_up is in - */ -#undef os_atomic_rmw_loop_give_up -#define os_atomic_rmw_loop_give_up(...) \ - ({ os_atomic_clear_exclusive(); __VA_ARGS__; break; }) - -#endif // OS_ATOMIC_USE_LLSC - -#endif // __arm64__ - #endif // _ARM_ATOMIC_H_ diff --git a/osfmk/arm/caches.c b/osfmk/arm/caches.c index e5e64cff7..71ed2a214 100644 --- a/osfmk/arm/caches.c +++ b/osfmk/arm/caches.c @@ -55,7 +55,7 @@ #ifndef __ARM_COHERENT_IO__ -extern boolean_t up_style_idle_exit; +TUNABLE(bool, up_style_idle_exit, "up_style_idle_exit", false); void flush_dcache( @@ -83,8 +83,7 @@ flush_dcache( } FlushPoC_DcacheRegion(vaddr, (unsigned)count); if (paddr && (cpu_data_ptr->cpu_cache_dispatch != NULL)) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( - cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count); + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count); } addr += count; length -= count; @@ -118,8 +117,7 @@ clean_dcache( } CleanPoC_DcacheRegion(vaddr, (unsigned)count); if (paddr && (cpu_data_ptr->cpu_cache_dispatch != NULL)) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( - cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count); + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count); } addr += count; length -= count; @@ -133,12 +131,12 @@ flush_dcache_syscall( unsigned length) { if ((cache_info()->c_bulksize_op != 0) && (length >= (cache_info()->c_bulksize_op))) { -#if __ARM_SMP__ && defined(ARMA7) +#if defined(ARMA7) cache_xcall(LWFlush); #else FlushPoC_Dcache(); - if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) { - ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch)( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); + if (getCpuDatap()->cpu_cache_dispatch != NULL) { + getCpuDatap()->cpu_cache_dispatch(getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); } #endif } else { @@ -157,12 +155,12 @@ dcache_incoherent_io_flush64( cpu_data_t *cpu_data_ptr = getCpuDatap(); if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) { -#if __ARM_SMP__ && defined (ARMA7) +#if defined (ARMA7) cache_xcall(LWFlush); #else FlushPoC_Dcache(); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != NULL) { + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); } #endif *res = BWOpDone; @@ -190,8 +188,7 @@ dcache_incoherent_io_flush64( FlushPoC_DcacheRegion(vaddr, (unsigned)count); if (isphysmem(paddr)) { if (cpu_data_ptr->cpu_cache_dispatch != NULL) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( - cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count); + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count); } } else { pmap_unmap_cpu_windows_copy(index); @@ -223,15 +220,15 @@ dcache_incoherent_io_store64( } if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) { -#if __ARM_SMP__ && defined (ARMA7) +#if defined (ARMA7) cache_xcall(LWClean); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != NULL) { + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL); } #else CleanPoC_Dcache(); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != NULL) { + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL); } #endif *res = BWOpDone; @@ -257,8 +254,7 @@ dcache_incoherent_io_store64( CleanPoC_DcacheRegion(vaddr, (unsigned)count); if (isphysmem(paddr)) { if (cpu_data_ptr->cpu_cache_dispatch != NULL) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( - cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count); + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count); } } else { pmap_unmap_cpu_windows_copy(index); @@ -298,13 +294,11 @@ platform_cache_init( cpuid_cache_info = cache_info(); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( - cpu_data_ptr->cpu_id, CacheControl, CacheControlEnable, 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != NULL) { + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheControl, CacheControlEnable, 0x0UL); if (cpuid_cache_info->c_l2size == 0x0) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( - cpu_data_ptr->cpu_id, CacheConfig, CacheConfigSize, (unsigned int)&cache_size); + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheConfig, CacheConfigSize, (unsigned int)&cache_size); cpuid_cache_info->c_l2size = cache_size; } } @@ -318,9 +312,8 @@ platform_cache_flush( FlushPoC_Dcache(); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( - cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != NULL) { + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); } } @@ -332,9 +325,8 @@ platform_cache_clean( CleanPoC_Dcache(); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( - cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != NULL) { + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL); } } @@ -346,9 +338,8 @@ platform_cache_shutdown( CleanPoC_Dcache(); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( - cpu_data_ptr->cpu_id, CacheShutdown, 0x0UL, 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != NULL) { + cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheShutdown, 0x0UL, 0x0UL); } } @@ -370,7 +361,6 @@ void platform_cache_idle_enter( void) { -#if __ARM_SMP__ platform_cache_disable(); /* @@ -393,11 +383,8 @@ platform_cache_idle_enter( CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t)); #endif /* (__ARM_ARCH__ < 8) */ } -#else /* !__ARM_SMP__ */ - CleanPoU_Dcache(); -#endif /* !__ARM_SMP__ */ -#if defined(__ARM_SMP__) && defined(ARMA7) +#if defined(ARMA7) uint32_t actlr_value = 0; /* Leave the coherency domain */ @@ -412,7 +399,7 @@ platform_cache_idle_enter( __builtin_arm_isb(ISB_SY); /* Ensures the second possible pending fwd request ends up. */ __builtin_arm_dsb(DSB_SY); -#endif /* defined(__ARM_SMP__) && defined(ARMA7) */ +#endif /* defined(ARMA7) */ } void @@ -442,7 +429,6 @@ platform_cache_idle_exit( __builtin_arm_mcr(MCR_ACTLR(actlr_value)); __builtin_arm_isb(ISB_SY); -#if __ARM_SMP__ uint32_t sctlr_value = 0; /* Enable dcache allocation. */ @@ -451,7 +437,6 @@ platform_cache_idle_exit( __builtin_arm_mcr(MCR_SCTLR(sctlr_value)); __builtin_arm_isb(ISB_SY); getCpuDatap()->cpu_CLW_active = 1; -#endif /* __ARM_SMP__ */ #endif /* defined(ARMA7) */ } @@ -475,17 +460,17 @@ platform_cache_flush_wimg( __unused unsigned int new_wimg ) { -#if __ARM_SMP__ && defined (ARMA7) +#if defined (ARMA7) cache_xcall(LWFlush); #else FlushPoC_Dcache(); - if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) { - ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch)( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); + if (getCpuDatap()->cpu_cache_dispatch != NULL) { + getCpuDatap()->cpu_cache_dispatch(getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); } #endif } -#if __ARM_SMP__ && defined(ARMA7) +#if defined(ARMA7) void cache_xcall_handler(unsigned int op) { @@ -526,7 +511,8 @@ cache_xcall(unsigned int op) signal = SIGPLWFlush; } - for (cpu = 0; cpu < MAX_CPUS; cpu++) { + const unsigned int max_cpu_id = ml_get_max_cpu_number(); + for (cpu = 0; cpu <= max_cpu_id; cpu++) { target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; if (target_cdp == (cpu_data_t *)NULL) { break; @@ -571,7 +557,7 @@ cache_xcall(unsigned int op) (void) ml_set_interrupts_enabled(intr); - for (cpu = 0; cpu < MAX_CPUS; cpu++) { + for (cpu = 0; cpu <= max_cpu_id; cpu++) { target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; if (target_cdp == (cpu_data_t *)NULL) { break; diff --git a/osfmk/arm/caches_asm.s b/osfmk/arm/caches_asm.s index 0b305f48f..5556a00ae 100644 --- a/osfmk/arm/caches_asm.s +++ b/osfmk/arm/caches_asm.s @@ -31,6 +31,7 @@ #include #include #include "assym.s" +#include "caches_macros.s" /* @@ -143,31 +144,37 @@ fmir_loop: LEXT(CleanPoC_Dcache) LEXT(clean_mmu_dcache) #if !defined(__ARM_L1_WT_CACHE__) + mov r0, #0 + GET_CACHE_CONFIG r0, r1, r2, r3 mov r0, #0 dsb clean_dcacheway: clean_dcacheline: mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set - add r0, r0, #1 << MMU_I7SET // increment set index - tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow + add r0, r0, r1 // increment set index + tst r0, r2 // look for overflow beq clean_dcacheline - bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow - adds r0, r0, #1 << MMU_I7WAY // increment way + bic r0, r0, r2 // clear set overflow + adds r0, r0, r3 // increment way bcc clean_dcacheway // loop #endif -#if __ARM_L2CACHE__ + HAS_L2_CACHE r0 + cmp r0, #0 + beq clean_skipl2dcache + mov r0, #1 + GET_CACHE_CONFIG r0, r1, r2, r3 dsb mov r0, #2 clean_l2dcacheway: clean_l2dcacheline: mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set - add r0, r0, #1 << L2_I7SET // increment set index - tst r0, #1 << (L2_NSET + L2_I7SET) // look for overflow + add r0, r0, r1 // increment set index + tst r0, r2 // look for overflow beq clean_l2dcacheline - bic r0, r0, #1 << (L2_NSET + L2_I7SET) // clear set overflow - adds r0, r0, #1 << L2_I7WAY // increment way + bic r0, r0, r2 // clear set overflow + adds r0, r0, r3 // increment way bcc clean_l2dcacheway // loop -#endif +clean_skipl2dcache: dsb bx lr @@ -181,16 +188,18 @@ clean_l2dcacheline: .globl EXT(CleanPoU_Dcache) LEXT(CleanPoU_Dcache) #if !defined(__ARM_PoU_WT_CACHE__) + mov r0, #0 + GET_CACHE_CONFIG r0, r1, r2, r3 mov r0, #0 dsb clean_dcacheway_idle: clean_dcacheline_idle: mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set - add r0, r0, #1 << MMU_I7SET // increment set index - tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow + add r0, r0, r1 // increment set index + tst r0, r2 // look for overflow beq clean_dcacheline_idle - bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow - adds r0, r0, #1 << MMU_I7WAY // increment way + bic r0, r0, r2 // clear set overflow + adds r0, r0, r3 // increment way bcc clean_dcacheway_idle // loop #endif dsb @@ -224,7 +233,7 @@ cudr_loop: bx lr /* - * void CleanPoC_DcacheRegion(vm_offset_t va, unsigned length) + * void CleanPoC_DcacheRegion(vm_offset_t va, size_t length) * * Clean d-cache region to Point of Coherency */ @@ -256,30 +265,36 @@ ccdr_loop: .align 2 .globl EXT(FlushPoC_Dcache) LEXT(FlushPoC_Dcache) + mov r0, #0 + GET_CACHE_CONFIG r0, r1, r2, r3 mov r0, #0 dsb cleanflush_dcacheway: cleanflush_dcacheline: mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set - add r0, r0, #1 << MMU_I7SET // increment set index - tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow + add r0, r0, r1 // increment set index + tst r0, r2 // look for overflow beq cleanflush_dcacheline - bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow - adds r0, r0, #1 << MMU_I7WAY // increment way + bic r0, r0, r2 // clear set overflow + adds r0, r0, r3 // increment way bcc cleanflush_dcacheway // loop -#if __ARM_L2CACHE__ + HAS_L2_CACHE r0 + cmp r0, #0 + beq cleanflush_skipl2dcache + mov r0, #1 + GET_CACHE_CONFIG r0, r1, r2, r3 dsb mov r0, #2 cleanflush_l2dcacheway: cleanflush_l2dcacheline: mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set - add r0, r0, #1 << L2_I7SET // increment set index - tst r0, #1 << (L2_NSET + L2_I7SET) // look for overflow + add r0, r0, r1 // increment set index + tst r0, r2 // look for overflow beq cleanflush_l2dcacheline - bic r0, r0, #1 << (L2_NSET + L2_I7SET) // clear set overflow - adds r0, r0, #1 << L2_I7WAY // increment way + bic r0, r0, r2 // clear set overflow + adds r0, r0, r3 // increment way bcc cleanflush_l2dcacheway // loop -#endif +cleanflush_skipl2dcache: dsb bx lr @@ -292,16 +307,18 @@ cleanflush_l2dcacheline: .align 2 .globl EXT(FlushPoU_Dcache) LEXT(FlushPoU_Dcache) + mov r0, #0 + GET_CACHE_CONFIG r0, r1, r2, r3 mov r0, #0 dsb fpud_way: fpud_line: mcr p15, 0, r0, c7, c14, 2 // cleanflush dcache line by way/set - add r0, r0, #1 << MMU_I7SET // increment set index - tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow + add r0, r0, r1 // increment set index + tst r0, r2 // look for overflow beq fpud_line - bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow - adds r0, r0, #1 << MMU_I7WAY // increment way + bic r0, r0, r2 // clear set overflow + adds r0, r0, r3 // increment way bcc fpud_way // loop dsb bx lr diff --git a/osfmk/arm/caches_internal.h b/osfmk/arm/caches_internal.h index 60a1d40b5..74a1f7767 100644 --- a/osfmk/arm/caches_internal.h +++ b/osfmk/arm/caches_internal.h @@ -40,7 +40,7 @@ extern void flush_dcache64(addr64_t addr, unsigned count, int phys); extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys); -#if __ARM_SMP__ && defined(ARMA7) +#if defined(ARMA7) #define LWFlush 1 #define LWClean 2 extern void cache_xcall(unsigned int op); @@ -60,7 +60,7 @@ extern void CleanPoU_Dcache(void); * This is the one you need unless you really know what * you're doing. */ -extern void CleanPoC_DcacheRegion(vm_offset_t va, unsigned length); +extern void CleanPoC_DcacheRegion(vm_offset_t va, size_t length); /* * Always actually flushes the cache, even on platforms @@ -69,22 +69,22 @@ extern void CleanPoC_DcacheRegion(vm_offset_t va, unsigned length); * panic save routine (where caches will be yanked by reset * and coherency doesn't help). */ -extern void CleanPoC_DcacheRegion_Force(vm_offset_t va, unsigned length); +extern void CleanPoC_DcacheRegion_Force(vm_offset_t va, size_t length); -extern void CleanPoU_DcacheRegion(vm_offset_t va, unsigned length); +extern void CleanPoU_DcacheRegion(vm_offset_t va, size_t length); extern void FlushPoC_Dcache(void); extern void FlushPoU_Dcache(void); -extern void FlushPoC_DcacheRegion(vm_offset_t va, unsigned length); +extern void FlushPoC_DcacheRegion(vm_offset_t va, size_t length); #ifdef __arm__ extern void invalidate_mmu_cache(void); extern void invalidate_mmu_dcache(void); -extern void invalidate_mmu_dcache_region(vm_offset_t va, unsigned length); +extern void invalidate_mmu_dcache_region(vm_offset_t va, size_t length); #endif extern void InvalidatePoU_Icache(void); -extern void InvalidatePoU_IcacheRegion(vm_offset_t va, unsigned length); +extern void InvalidatePoU_IcacheRegion(vm_offset_t va, size_t length); extern void cache_sync_page(ppnum_t pp); diff --git a/osfmk/arm/caches_macros.s b/osfmk/arm/caches_macros.s new file mode 100644 index 000000000..c02867726 --- /dev/null +++ b/osfmk/arm/caches_macros.s @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +/* + * Obtains cache physical layout information required for way/set + * data cache maintenance operations. + * + * $0: Data cache level, starting from 0 + * $1: Output register for set increment + * $2: Output register for last valid set + * $3: Output register for way increment + */ +.macro GET_CACHE_CONFIG + lsl $0, $0, #1 + mcr p15, 2, $0, c0, c0, 0 // Select appropriate cache + isb // Synchronize context + + mrc p15, 1, $0, c0, c0, 0 + ubfx $1, $0, #3, #10 // extract number of ways - 1 + mov $2, $1 + add $1, $1, #1 // calculate number of ways + + mov $0, #31 + and $2, $2, $1 + cmp $2, #0 + addne $0, $0, #1 + clz $1, $1 + sub $0, $0, $1 + + mov $1, #32 // calculate way increment + sub $3, $1, $0 + mov $1, #1 + lsl $3, $1, $3 + + mrc p15, 1, $0, c0, c0, 0 + ubfx $1, $0, #0, #3 // extract log2(line size) - 4 + add $1, $1, #4 // calculate log2(line size) + mov $2, #1 + lsl $1, $2, $1 // calculate set increment + + ubfx $2, $0, #13, #15 // extract number of sets - 1 + add $2, $2, #1 // calculate number of sets + mul $2, $1, $2 // calculate last valid set +.endmacro + +/* + * Detects the presence of an L2 cache and returns 1 if implemented, + * zero otherwise. + * + * $0: Output register + */ +.macro HAS_L2_CACHE + mrc p15, 1, $0, c0, c0, 1 + ubfx $0, $0, #3, #3 // extract L2 cache Ctype + cmp $0, #0x1 + movls $0, #0 + movhi $0, #1 +.endmacro \ No newline at end of file diff --git a/osfmk/arm/commpage/commpage.c b/osfmk/arm/commpage/commpage.c index 2aebfb3ce..fa8df6ff6 100644 --- a/osfmk/arm/commpage/commpage.c +++ b/osfmk/arm/commpage/commpage.c @@ -54,6 +54,7 @@ #include #include #include +#include #include @@ -61,32 +62,46 @@ #include #endif -static void commpage_init_cpu_capabilities( void ); static int commpage_cpus( void ); + +static void commpage_init_cpu_capabilities( void ); + SECURITY_READ_ONLY_LATE(vm_address_t) commPagePtr = 0; SECURITY_READ_ONLY_LATE(vm_address_t) sharedpage_rw_addr = 0; -SECURITY_READ_ONLY_LATE(uint32_t) _cpu_capabilities = 0; +SECURITY_READ_ONLY_LATE(uint64_t) _cpu_capabilities = 0; +SECURITY_READ_ONLY_LATE(vm_address_t) sharedpage_rw_text_addr = 0; + +extern user64_addr_t commpage_text64_location; +extern user32_addr_t commpage_text32_location; /* For sysctl access from BSD side */ extern int gARMv81Atomics; extern int gARMv8Crc32; extern int gARMv82FHM; +extern int gARMv82SHA512; +extern int gARMv82SHA3; void -commpage_populate( - void) +commpage_populate(void) { uint16_t c2; int cpufamily; - sharedpage_rw_addr = pmap_create_sharedpage(); - commPagePtr = (vm_address_t)_COMM_PAGE_BASE_ADDRESS; + // Create the data and the text commpage + vm_map_address_t kernel_data_addr, kernel_text_addr, user_text_addr; + pmap_create_sharedpages(&kernel_data_addr, &kernel_text_addr, &user_text_addr); + + sharedpage_rw_addr = kernel_data_addr; + sharedpage_rw_text_addr = kernel_text_addr; + commPagePtr = (vm_address_t) _COMM_PAGE_BASE_ADDRESS; #if __arm64__ + commpage_text64_location = user_text_addr; bcopy(_COMM_PAGE64_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET), MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE64_SIGNATURE_STRING))); #else + commpage_text32_location = user_text_addr; bcopy(_COMM_PAGE32_SIGNATURE_STRING, (void *)(_COMM_PAGE_SIGNATURE + _COMM_PAGE_RW_OFFSET), MIN(_COMM_PAGE_SIGNATURELEN, strlen(_COMM_PAGE32_SIGNATURE_STRING))); #endif @@ -107,19 +122,17 @@ commpage_populate( } *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2; - *((uint32_t*)(_COMM_PAGE_SPIN_COUNT + _COMM_PAGE_RW_OFFSET)) = 1; commpage_update_active_cpus(); cpufamily = cpuid_get_cpufamily(); - /* machine_info valid after ml_get_max_cpus() */ *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max; *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max; *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem; *((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily; *((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL); *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_type(); - *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = user_cont_hwclock_allowed(); + *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = (uint8_t)user_cont_hwclock_allowed(); *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift; #if __arm64__ @@ -163,11 +176,70 @@ commpage_populate( *((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS; } -struct mu { - uint64_t m; // magic number - int32_t a; // add indicator - int32_t s; // shift amount -}; +#define COMMPAGE_TEXT_SEGMENT "__TEXT_EXEC" +#define COMMPAGE_TEXT_SECTION "__commpage_text" + +/* Get a pointer to the start of the ARM PFZ code section. This macro tell the + * linker that the storage for the variable here is at the start of the section */ +extern char commpage_text_start[] +__SECTION_START_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION); + +/* Get a pointer to the end of the ARM PFZ code section. This macro tell the + * linker that the storage for the variable here is at the end of the section */ +extern char commpage_text_end[] +__SECTION_END_SYM(COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION); + +/* This is defined in the commpage text section as a symbol at the start of the preemptible + * functions */ +extern char commpage_text_preemptible_functions; + +#if CONFIG_ARM_PFZ +static size_t size_of_pfz = 0; +#endif + +/* This is the opcode for brk #666 */ +#define BRK_666_OPCODE 0xD4205340 + +void +commpage_text_populate(void) +{ +#if CONFIG_ARM_PFZ + size_t size_of_commpage_text = commpage_text_end - commpage_text_start; + if (size_of_commpage_text == 0) { + panic("ARM comm page text section %s,%s missing", COMMPAGE_TEXT_SEGMENT, COMMPAGE_TEXT_SECTION); + } + assert(size_of_commpage_text <= PAGE_SIZE); + assert(size_of_commpage_text > 0); + + /* Get the size of the PFZ half of the comm page text section. */ + size_of_pfz = &commpage_text_preemptible_functions - commpage_text_start; + + // Copy the code segment of comm page text section into the PFZ + memcpy((void *) _COMM_PAGE64_TEXT_START_ADDRESS, (void *) commpage_text_start, size_of_commpage_text); + + // Make sure to populate the rest of it with brk 666 so that undefined code + // doesn't get run + memset((char *) _COMM_PAGE64_TEXT_START_ADDRESS + size_of_commpage_text, BRK_666_OPCODE, + PAGE_SIZE - size_of_commpage_text); +#endif +} + +uint32_t +commpage_is_in_pfz64(addr64_t addr64) +{ +#if CONFIG_ARM_PFZ + if ((addr64 >= commpage_text64_location) && + (addr64 < (commpage_text64_location + size_of_pfz))) { + return 1; + } else { + return 0; + } +#else +#pragma unused (addr64) + return 0; +#endif +} + void commpage_set_timestamp( @@ -199,6 +271,7 @@ commpage_set_timestamp( __asm__ volatile ("dmb ish"); #endif commpage_timeofday_datap->TimeStamp_tick = tbr; + } /* @@ -215,24 +288,6 @@ commpage_set_memory_pressure( *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure; } -/* - * Update _COMM_PAGE_SPIN_COUNT. We might want to reduce when running on a battery, etc. - */ - -void -commpage_set_spin_count( - unsigned int count ) -{ - if (count == 0) { /* we test for 0 after decrement, not before */ - count = 1; - } - - if (commPagePtr == 0) { - return; - } - *((uint32_t *)(_COMM_PAGE_SPIN_COUNT + _COMM_PAGE_RW_OFFSET)) = count; -} - /* * Determine number of CPUs on this system. */ @@ -241,7 +296,7 @@ commpage_cpus( void ) { int cpus; - cpus = ml_get_max_cpus(); // NB: this call can block + cpus = machine_info.max_cpus; if (cpus == 0) { panic("commpage cpus==0"); @@ -253,7 +308,7 @@ commpage_cpus( void ) return cpus; } -int +uint64_t _get_cpu_capabilities(void) { return _cpu_capabilities; @@ -265,13 +320,19 @@ _get_commpage_priv_address(void) return sharedpage_rw_addr; } +vm_address_t +_get_commpage_text_priv_address(void) +{ + return sharedpage_rw_text_addr; +} + /* * Initialize _cpu_capabilities vector */ static void commpage_init_cpu_capabilities( void ) { - uint32_t bits; + uint64_t bits; int cpus; ml_cpu_info_t cpu_info; @@ -318,14 +379,8 @@ commpage_init_cpu_capabilities( void ) bits |= kHasFMA; #endif #if __ARM_ENABLE_WFE_ -#ifdef __arm64__ - if (arm64_wfe_allowed()) { - bits |= kHasEvent; - } -#else bits |= kHasEvent; #endif -#endif #if __ARM_V8_CRYPTO_EXTENSIONS__ bits |= kHasARMv8Crypto; #endif @@ -343,6 +398,16 @@ commpage_init_cpu_capabilities( void ) bits |= kHasARMv82FHM; gARMv82FHM = 1; } + + if ((isar0 & ID_AA64ISAR0_EL1_SHA2_MASK) > ID_AA64ISAR0_EL1_SHA2_EN) { + bits |= kHasARMv82SHA512; + gARMv82SHA512 = 1; + } + if ((isar0 & ID_AA64ISAR0_EL1_SHA3_MASK) >= ID_AA64ISAR0_EL1_SHA3_EN) { + bits |= kHasARMv82SHA3; + gARMv82SHA3 = 1; + } + #endif @@ -350,7 +415,8 @@ commpage_init_cpu_capabilities( void ) _cpu_capabilities = bits; - *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities; + *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = (uint32_t)_cpu_capabilities; + *((uint64_t *)(_COMM_PAGE_CPU_CAPABILITIES64 + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities; } /* @@ -362,7 +428,8 @@ commpage_update_active_cpus(void) if (!commPagePtr) { return; } - *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = processor_avail_count; + *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t)processor_avail_count; + } /* @@ -463,6 +530,12 @@ commpage_update_mach_continuous_time(uint64_t sleeptime) } } +void +commpage_update_mach_continuous_time_hw_offset(uint64_t offset) +{ + *((uint64_t *)(_COMM_PAGE_CONT_HW_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = offset; +} + /* * update the commpage's value for the boot time */ @@ -564,5 +637,6 @@ commpage_update_dof(boolean_t enabled) void commpage_update_dyld_flags(uint64_t value) { - *((uint64_t*)(_COMM_PAGE_DYLD_SYSTEM_FLAGS + _COMM_PAGE_RW_OFFSET)) = value; + *((uint64_t*)(_COMM_PAGE_DYLD_FLAGS + _COMM_PAGE_RW_OFFSET)) = value; + } diff --git a/osfmk/arm/commpage/commpage.h b/osfmk/arm/commpage/commpage.h index ee124d4b1..3b54771f8 100644 --- a/osfmk/arm/commpage/commpage.h +++ b/osfmk/arm/commpage/commpage.h @@ -31,6 +31,7 @@ #ifndef __ASSEMBLER__ #include +#include #endif /* __ASSEMBLER__ */ extern void commpage_set_timestamp(uint64_t tbr, uint64_t secs, uint64_t frac, uint64_t scale, uint64_t tick_per_sec); @@ -43,11 +44,13 @@ extern void commpage_update_mach_approximate_time(uint64_t); extern void commpage_update_kdebug_state(void); extern void commpage_update_atm_diagnostic_config(uint32_t); extern void commpage_update_mach_continuous_time(uint64_t sleeptime); +extern void commpage_update_mach_continuous_time_hw_offset(uint64_t offset); extern void commpage_update_multiuser_config(uint32_t); extern void commpage_update_boottime(uint64_t boottime_usec); extern void commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts); extern uint64_t commpage_increment_cpu_quiescent_counter(void); extern void commpage_update_dof(boolean_t enabled); extern void commpage_update_dyld_flags(uint64_t value); +extern uint32_t commpage_is_in_pfz64(addr64_t addr); #endif /* _ARM_COMMPAGE_H */ diff --git a/osfmk/arm/commpage/commpage_asm.s b/osfmk/arm/commpage/commpage_asm.s new file mode 100644 index 000000000..da0bbba9b --- /dev/null +++ b/osfmk/arm/commpage/commpage_asm.s @@ -0,0 +1,528 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include + +/* This section has all the code necessary for the atomic operations supported by + * OSAtomicFifoEnqueue, OSAtomicFifoDequeue APIs in libplatform. + * + * This code needs to be compiled as 1 section and should not make branches + * outside of this section. This allows us to copy the entire section to the + * text comm page once it is created - see osfmk/arm/commpage/commpage.c + * + * This section is split into 2 parts - the preemption-free zone (PFZ) routines + * and the preemptible routines (non-PFZ). The PFZ routines will not be + * preempted by the scheduler if the pc of the userspace process is in that + * region while handling asynchronous interrupts (note that traps are still + * possible in the PFZ). Instead, the scheduler will mark x15 (known through + * coordination with the functions in the commpage section) to indicate to the + * userspace code that it needs to take a delayed preemption. The PFZ functions + * may make callouts to preemptible routines and vice-versa. When a function + * returns to a preemptible routine after a callout to a function in the PFZ, it + * needs to check x15 to determine if a delayed preemption needs to be taken. In + * addition, functions in the PFZ should not have backwards branches. + * + * The entry point to execute code in the commpage text section is through the + * jump table at the very top of the section. The base of the jump table is + * exposed to userspace via the APPLE array and the offsets from the base of the + * jump table are listed in the arm/cpu_capabilities.h header. Adding any new + * functions in the PFZ requires a lockstep change to the cpu_capabilities.h + * header. + * + * Functions in PFZ: + * Enqueue function + * Dequeue function + * + * Functions not in PFZ: + * Backoff function as part of spin loop + * Preempt function to take delayed preemption as indicated by kernel + * + * ---------------------------------------------------------------------- + * + * The high level goal of the asm code in this section is to enqueue and dequeue + * from a FIFO linked list. + * + * typedef volatile struct { + * void *opaque1; <-- ptr to first queue element or null + * void *opaque2; <-- ptr to second queue element or null + * int opaque3; <-- spinlock + * } OSFifoQueueHead; + * + * This is done through a userspace spin lock stored in the linked list head + * for synchronization. + * + * Here is the pseudocode for the spin lock acquire algorithm which is split + * between the PFZ and the non-PFZ areas of the commpage text section. The + * pseudocode here is just for the enqueue operation but it is symmetrical for + * the dequeue operation. + * + * // Not in the PFZ. Entry from jump table. + * ENQUEUE() + * enqueued = TRY_LOCK_AND_ENQUEUE(lock_addr); + * // We're running here after running the TRY_LOCK_AND_ENQUEUE code in + * // the PFZ so we need to check if we need to take a delayed + * // preemption. + * if (kernel_wants_to_preempt_us){ + * // This is done through the pfz_exit() mach trap which is a dummy + * // syscall whose sole purpose is to allow the thread to enter the + * // kernel so that it can be preempted at AST. + * enter_kernel_to_take_delayed_preemption() + * } + * + * if (!enqueued) { + * ARM_MONITOR; + * WFE; + * enqueued = TRY_LOCK_AND_ENQUEUE(lock_addr); + * if (!enqueued) { + * // We failed twice, take a backoff + * BACKOFF(); + * goto ENQUEUE() + * } else { + * // We got here from PFZ, check for delayed preemption + * if (kernel_wants_to_preempt_us){ + * enter_kernel_to_take_delayed_preemption() + * } + * } + * } + * + * // in PFZ + * TRY_LOCK_AND_ENQUEUE(): + * is_locked = try_lock(lock_addr); + * if (is_locked) { + * + * return true + * } else { + * return false + * } + * + * + * // Not in the PFZ + * BACKOFF(): + * // We're running here after running the TRY_LOCK_AND_ENQUEUE code in + * // the PFZ so we need to check if we need to take a delayed + * // preemption. + * if (kernel_wants_to_preempt_us) { + * enter_kernel_to_take_preemption() + * } else { + * // Note that it is safe to do this loop here since the entire + * // BACKOFF function isn't in the PFZ and so can be preempted at any + * // time + * do { + * lock_is_free = peek(lock_addr); + * if (lock_is_free) { + * return + * } else { + * pause_with_monitor(lock_addr) + * } + * } while (1) + * } + */ + +/* Macros and helpers */ + +.macro BACKOFF lock_addr + // Save registers we can't clobber + stp x0, x1, [sp, #-16]! + stp x2, x9, [sp, #-16]! + + // Pass in lock addr to backoff function + mov x0, \lock_addr + bl _backoff // Jump out of the PFZ zone now + + // Restore registers + ldp x2, x9, [sp], #16 + ldp x0, x1, [sp], #16 +.endmacro + +/* x0 = pointer to queue head + * x1 = pointer to new elem to enqueue + * x2 = offset of link field inside element + * x3 = Address of lock + * + * Moves result of the helper function to the register specified + */ +.macro TRYLOCK_ENQUEUE result + stp x0, xzr, [sp, #-16]! // Save x0 since it'll be clobbered by return value + + bl _pfz_trylock_and_enqueue + mov \result, x0 + + ldp x0, xzr, [sp], #16 // Restore saved registers +.endmacro + +/* x0 = pointer to queue head + * x1 = offset of link field inside element + * x2 = Address of lock + * + * Moves result of the helper function to the register specified + */ +.macro TRYLOCK_DEQUEUE result + stp x0, xzr, [sp, #-16]! // Save x0 since it'll be clobbered by return value + + bl _pfz_trylock_and_dequeue + mov \result, x0 + + ldp x0, xzr, [sp], #16 // Restore saved registers +.endmacro + +/* + * Takes a delayed preemption if needed and then branches to the label + * specified. + * + * Modifies x15 + */ +.macro PREEMPT_SELF_THEN branch_to_take_on_success + cbz x15, \branch_to_take_on_success // No delayed preemption to take, just try again + + mov x15, xzr // zero out the preemption pending field + bl _preempt_self + b \branch_to_take_on_success +.endmacro + + .section __TEXT_EXEC,__commpage_text,regular,pure_instructions + + /* Preemption free functions */ + .align 2 +_jump_table: // 32 entry jump table, only 2 are used + b _pfz_enqueue + b _pfz_dequeue + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + brk #666 + + +/* + * typedef volatile struct { + * void *opaque1; <-- ptr to first queue element or null + * void *opaque2; <-- ptr to second queue element or null + * int opaque3; <-- spinlock + * } osfifoqueuehead; + */ + +/* Non-preemptible helper routine to FIFO enqueue: + * int pfz_trylock_and_enqueue(OSFifoQueueHead *__list, void *__new, size_t __offset, uint32_t *lock_addr); + * + * x0 = pointer to queue head structure + * x1 = pointer to new element to enqueue + * x2 = offset of link field inside element + * x3 = address of lock + * + * Only caller save registers (x9 - x15) are used in this function + * + * Returns 0 on success and non-zero value on failure + */ + .globl _pfz_trylock_and_enqueue + .align 2 +_pfz_trylock_and_enqueue: + ARM64_STACK_PROLOG + PUSH_FRAME + + mov w10, wzr // unlock value = w10 = 0 + mov w11, #1 // locked value = w11 = 1 + + // Try to grab the lock + casa w10, w11, [x3] // Atomic CAS with acquire barrier + cbz w10, Ltrylock_enqueue_success + + mov x0, #-1 // Failed + b Ltrylock_enqueue_exit + + /* We got the lock, enqueue the element */ + +Ltrylock_enqueue_success: + ldr x10, [x0, #8] // x10 = tail of the queue + cbnz x10, Lnon_empty_queue // tail not NULL + str x1, [x0] // Set head to new element + b Lset_new_tail + +Lnon_empty_queue: + str x1, [x10, x2] // Set old tail -> offset = new elem + +Lset_new_tail: + str x1, [x0, #8] // Set tail = new elem + + // Drop spin lock with release barrier (pairs with acquire in casa) + stlr wzr, [x3] + + mov x0, xzr // Mark success + +Ltrylock_enqueue_exit: + POP_FRAME + ARM64_STACK_EPILOG + +/* Non-preemptible helper routine to FIFO dequeue: + * void *pfz_trylock_and_dequeue(OSFifoQueueHead *__list, size_t __offset, uint32_t *lock_addr); + * + * x0 = pointer to queue head structure + * x1 = pointer to new element to enqueue + * x2 = address of lock + * + * Only caller save registers (x9 - x15) are used in this function + * + * Returns -1 on failure, and the pointer on success (can be NULL) + */ + .globl _pfz_trylock_and_dequeue + .align 2 +_pfz_trylock_and_dequeue: + ARM64_STACK_PROLOG + PUSH_FRAME + + // Try to grab the lock + mov w10, wzr // unlock value = w10 = 0 + mov w11, #1 // locked value = w11 = 1 + + casa w10, w11, [x2] // Atomic CAS with acquire barrier + cbz w10, Ltrylock_dequeue_success + + mov x0, #-1 // Failed + b Ltrylock_dequeue_exit + + /* We got the lock, dequeue the element */ +Ltrylock_dequeue_success: + ldr x10, [x0] // x10 = head of the queue + cbz x10, Lreturn_head // if head is null, return + + ldr x11, [x10, x1] // get ptr to new head + cbnz x11, Lupdate_new_head // If new head != NULL, then not singleton. Only need to update head + + // Singleton case + str xzr, [x0, #8] // dequeuing from singleton queue, update tail to NULL + +Lupdate_new_head: + str xzr, [x10, x1] // zero the link in the old head + str x11, [x0] // Set up a new head + +Lreturn_head: + mov x0, x10 // Move head to x0 + stlr wzr, [x2] // Drop spin lock with release barrier (pairs with acquire in casa) + +Ltrylock_dequeue_exit: + POP_FRAME + ARM64_STACK_EPILOG + + + /* Preemptible functions */ + .private_extern _commpage_text_preemptible_functions +_commpage_text_preemptible_functions: + + +/* + * void pfz_enqueue(OSFifoQueueHead *__list, void *__new, size_t __offset); + * x0 = pointer to queue head + * x1 = pointer to new elem to enqueue + * x2 = offset of link field inside element + */ + .globl _pfz_enqueue + + .align 2 +_pfz_enqueue: + ARM64_STACK_PROLOG + PUSH_FRAME + + str xzr, [x1, x2] // Zero the forward link in the new element + mov x15, xzr // zero out the register used to communicate with kernel + + add x3, x0, #16 // address of lock = x3 = x0 + 16 +Lenqueue_trylock_loop: + + // Attempt #1 + TRYLOCK_ENQUEUE x9 + PREEMPT_SELF_THEN Lenqueue_determine_success + +Lenqueue_determine_success: + + cbz x9, Lenqueue_success // did we succeed? if so, exit + + ldxr w9, [x3] // arm the monitor for the lock address + cbz w9, Lenqueue_clear_monitor // lock is available, retry. + + wfe // Wait with monitor armed + + // Attempt #2 + TRYLOCK_ENQUEUE x9 + cbz x9, Lenqueue_take_delayed_preemption_upon_success // did we succeed? if so, exit + + // We failed twice - backoff then try again + + BACKOFF x3 + b Lenqueue_trylock_loop + +Lenqueue_clear_monitor: + clrex // Pairs with the ldxr + + // Take a preemption if needed then branch to enqueue_trylock_loop + PREEMPT_SELF_THEN Lenqueue_trylock_loop + +Lenqueue_take_delayed_preemption_upon_success: + PREEMPT_SELF_THEN Lenqueue_success + +Lenqueue_success: + POP_FRAME + ARM64_STACK_EPILOG + +/* + * void *pfz_dequeue(OSFifoQueueHead *__list, size_t __offset); + * x0 = pointer to queue head + * x1 = offset of link field inside element + * + * This function is not in the PFZ but calls out to a helper which is in the PFZ + * (_pfz_trylock_and_dequeue) + */ + .globl _pfz_dequeue + .align 2 +_pfz_dequeue: + ARM64_STACK_PROLOG + PUSH_FRAME + + mov x15, xzr // zero out the register used to communicate with kernel + + add x2, x0, #16 // address of lock = x2 = x0 + 16 +Ldequeue_trylock_loop: + + // Attempt #1 + TRYLOCK_DEQUEUE x9 + PREEMPT_SELF_THEN Ldequeue_determine_success + +Ldequeue_determine_success: + cmp x9, #-1 // is result of dequeue == -1? + b.ne Ldequeue_success // no, we succeeded + + ldxr w9, [x2] // arm the monitor for the lock address + cbz w9, Ldequeue_clear_monitor // lock is available, retry. + + wfe // Wait with monitor armed + + // Attempt #2 + TRYLOCK_DEQUEUE x9 + cmp x9, #-1 // did we fail? + b.ne Ldequeue_take_delayed_preemption_upon_success // no, we succeeded + + // We failed twice - backoff then try again + + BACKOFF x2 + b Ldequeue_trylock_loop + +Ldequeue_take_delayed_preemption_upon_success: + // We just got here after executing PFZ code, check if we need a preemption + PREEMPT_SELF_THEN Ldequeue_success + +Ldequeue_clear_monitor: + clrex // Pairs with the ldxr + // Take a preemption if needed then branch to dequeue_trylock_loop. + PREEMPT_SELF_THEN Ldequeue_trylock_loop + +Ldequeue_success: + mov x0, x9 // Move x9 (where result was stored earlier) to x0 + POP_FRAME + ARM64_STACK_EPILOG + + +/* void preempt_self(void) + * + * Make a syscall to take a preemption. This function is not in the PFZ. + */ + .align 2 +_preempt_self: + ARM64_STACK_PROLOG + PUSH_FRAME + + // Save registers on which will be clobbered by mach trap on stack and keep + // it 16 byte aligned + stp x0, x1, [sp, #-16]! + + // Note: We don't need to caller save registers since svc will trigger an + // exception and kernel will save and restore register state + + // Make syscall to take delayed preemption + mov x16, #-58 // -58 = pfz_exit + svc #0x80 + + // Restore registers from stack + ldp x0, x1, [sp], #16 + + POP_FRAME + ARM64_STACK_EPILOG + +/* + * void backoff(uint32_t *lock_addr); + * The function returns when it observes that the lock has become available. + * This function is not in the PFZ. + * + * x0 = lock address + */ + .align 2 + .globl _backoff +_backoff: + ARM64_STACK_PROLOG + PUSH_FRAME + + cbz x15, Lno_preempt // Kernel doesn't want to preempt us, jump to loop + + mov x15, xzr // zero out the preemption pending field + bl _preempt_self + +Lno_preempt: + ldxr w9, [x0] // Snoop on lock and arm the monitor + cbz w9, Lend_backoff // The lock seems to be available, return + + wfe // pause + + b Lno_preempt + +Lend_backoff: + clrex + + POP_FRAME + ARM64_STACK_EPILOG diff --git a/osfmk/arm/cpu.c b/osfmk/arm/cpu.c index e641e72d9..cce170bb0 100644 --- a/osfmk/arm/cpu.c +++ b/osfmk/arm/cpu.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -134,8 +135,8 @@ cpu_idle(void) } cpu_data_ptr->cpu_user_debug = NULL; - if (cpu_data_ptr->cpu_idle_notify) { - ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks); + if (cpu_data_ptr->cpu_idle_notify != NULL) { + cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks); } if (cpu_data_ptr->idle_timer_notify != 0) { @@ -148,13 +149,17 @@ cpu_idle(void) } timer_resync_deadlines(); if (cpu_data_ptr->rtcPop != lastPop) { + /* + * Ignore the return value here: this CPU has called idle_notify and + * committed to going idle. + */ SetIdlePop(); } } #if KPC kpc_idle(); -#endif +#endif /* KPC */ platform_cache_idle_enter(); cpu_idle_wfi((boolean_t) wfi_fast); @@ -181,8 +186,8 @@ cpu_idle_exit(boolean_t from_reset __unused) pmap_set_pmap(cpu_data_ptr->cpu_active_thread->map->pmap, current_thread()); - if (cpu_data_ptr->cpu_idle_notify) { - ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks); + if (cpu_data_ptr->cpu_idle_notify != NULL) { + cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks); } if (cpu_data_ptr->idle_timer_notify != 0) { @@ -265,7 +270,6 @@ cpu_init(void) } cdp->cpu_stat.irq_ex_cnt_wake = 0; cdp->cpu_stat.ipi_cnt_wake = 0; - cdp->cpu_stat.timer_cnt_wake = 0; cdp->cpu_running = TRUE; cdp->cpu_sleep_token_last = cdp->cpu_sleep_token; cdp->cpu_sleep_token = 0x0UL; @@ -309,37 +313,34 @@ cpu_data_free(cpu_data_t *cpu_data_ptr) return; } - cpu_processor_free( cpu_data_ptr->cpu_processor); - if (CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_vaddr == cpu_data_ptr) { + int cpu_number = cpu_data_ptr->cpu_number; + + if (CpuDataEntries[cpu_number].cpu_data_vaddr == cpu_data_ptr) { OSDecrementAtomic((SInt32*)&real_ncpus); - CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_vaddr = NULL; - CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_paddr = 0; + CpuDataEntries[cpu_number].cpu_data_vaddr = NULL; + CpuDataEntries[cpu_number].cpu_data_paddr = 0; __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible } (kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE); (kfree)((void *)(cpu_data_ptr->fiqstack_top - FIQSTACK_SIZE), FIQSTACK_SIZE); - kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t)); } void cpu_data_init(cpu_data_t *cpu_data_ptr) { - uint32_t i = 0; - cpu_data_ptr->cpu_flags = 0; #if __arm__ cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&ExceptionVectorsTable; #endif - cpu_data_ptr->interrupts_enabled = 0; cpu_data_ptr->cpu_int_state = 0; cpu_data_ptr->cpu_pending_ast = AST_NONE; - cpu_data_ptr->cpu_cache_dispatch = (void *) 0; + cpu_data_ptr->cpu_cache_dispatch = NULL; cpu_data_ptr->rtcPop = EndOfAllTime; cpu_data_ptr->rtclock_datap = &RTClockData; cpu_data_ptr->cpu_user_debug = NULL; cpu_data_ptr->cpu_base_timebase_low = 0; cpu_data_ptr->cpu_base_timebase_high = 0; - cpu_data_ptr->cpu_idle_notify = (void *) 0; + cpu_data_ptr->cpu_idle_notify = NULL; cpu_data_ptr->cpu_idle_latency = 0x0ULL; cpu_data_ptr->cpu_idle_pop = 0x0ULL; cpu_data_ptr->cpu_reset_type = 0x0UL; @@ -369,7 +370,7 @@ cpu_data_init(cpu_data_t *cpu_data_ptr) cpu_data_ptr->cpu_imm_xcall_p0 = NULL; cpu_data_ptr->cpu_imm_xcall_p1 = NULL; -#if __ARM_SMP__ && defined(ARMA7) +#if defined(ARMA7) cpu_data_ptr->cpu_CLWFlush_req = 0x0ULL; cpu_data_ptr->cpu_CLWFlush_last = 0x0ULL; cpu_data_ptr->cpu_CLWClean_req = 0x0ULL; @@ -384,9 +385,7 @@ cpu_data_init(cpu_data_t *cpu_data_ptr) pmap_cpu_data_ptr->cpu_user_pmap_stamp = 0; pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM; - for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) { - pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0; - } + bzero(&(pmap_cpu_data_ptr->cpu_sw_asids[0]), sizeof(pmap_cpu_data_ptr->cpu_sw_asids)); #endif cpu_data_ptr->halt_status = CPU_NOT_HALTED; } @@ -397,7 +396,7 @@ cpu_data_register(cpu_data_t *cpu_data_ptr) int cpu; cpu = OSIncrementAtomic((SInt32*)&real_ncpus); - if (real_ncpus > MAX_CPUS) { + if (real_ncpus > ml_get_cpu_count()) { return KERN_FAILURE; } @@ -416,9 +415,9 @@ cpu_start(int cpu) cpu_machine_init(); return KERN_SUCCESS; } else { -#if __ARM_SMP__ cpu_data_t *cpu_data_ptr; thread_t first_thread; + processor_t processor; cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr; cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr; @@ -427,21 +426,21 @@ cpu_start(int cpu) cpu_data_ptr->cpu_pmap_cpu_data.cpu_user_pmap = NULL; #endif - if (cpu_data_ptr->cpu_processor->startup_thread != THREAD_NULL) { - first_thread = cpu_data_ptr->cpu_processor->startup_thread; + processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr); + if (processor->startup_thread != THREAD_NULL) { + first_thread = processor->startup_thread; } else { - first_thread = cpu_data_ptr->cpu_processor->idle_thread; + first_thread = processor->idle_thread; } cpu_data_ptr->cpu_active_thread = first_thread; first_thread->machine.CpuDatap = cpu_data_ptr; + first_thread->machine.pcpu_data_base = + (vm_address_t)cpu_data_ptr - __PERCPU_ADDR(cpu_data); flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE); flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE); (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL); return KERN_SUCCESS; -#else - return KERN_FAILURE; -#endif } } @@ -482,7 +481,8 @@ ml_arm_sleep(void) cpu_data_t *target_cdp; unsigned int cpu; - for (cpu = 0; cpu < MAX_CPUS; cpu++) { + const unsigned int max_cpu_id = ml_get_max_cpu_number(); + for (cpu = 0; cpu <= max_cpu_id; cpu++) { target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; if (target_cdp == (cpu_data_t *)NULL) { break; @@ -506,7 +506,7 @@ ml_arm_sleep(void) CleanPoU_Dcache(); } cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH; -#if __ARM_SMP__ && defined(ARMA7) +#if defined(ARMA7) cpu_data_ptr->cpu_CLWFlush_req = 0; cpu_data_ptr->cpu_CLWClean_req = 0; __builtin_arm_dmb(DMB_ISH); diff --git a/osfmk/arm/cpu_capabilities.h b/osfmk/arm/cpu_capabilities.h index b0f2b3fda..738e94f02 100644 --- a/osfmk/arm/cpu_capabilities.h +++ b/osfmk/arm/cpu_capabilities.h @@ -35,6 +35,7 @@ #include #endif + #define USER_TIMEBASE_NONE 0 #define USER_TIMEBASE_SPEC 1 @@ -48,8 +49,6 @@ /* * Bit definitions for _cpu_capabilities: */ -#define kHasICDSBShift 2 -#define kHasICDSB 0x00000004 // ICache Data Syncronization on DSB enabled (H13) #define kHasNeonFP16 0x00000008 // ARM v8.2 NEON FP16 supported #define kCache32 0x00000010 // cache line size is 32 bytes #define kCache64 0x00000020 // cache line size is 64 bytes @@ -67,6 +66,9 @@ #define kHasARMv8Crypto 0x01000000 // Optional ARMv8 Crypto extensions #define kHasARMv81Atomics 0x02000000 // ARMv8.1 Atomic instructions supported #define kHasARMv8Crc32 0x04000000 // Optional ARMv8 crc32 instructions (required in ARMv8.1) +#define kHasARMv82SHA512 0x80000000 // Optional ARMv8.2 SHA512 instructions +/* Extending into 64-bits from here: */ +#define kHasARMv82SHA3 0x0000000100000000 // Optional ARMv8.2 SHA3 instructions #define kNumCPUsShift 16 // see _NumCPUs() below /* @@ -79,7 +81,7 @@ #include __BEGIN_DECLS -extern int _get_cpu_capabilities( void ); +extern uint64_t _get_cpu_capabilities( void ); __END_DECLS __inline static @@ -103,6 +105,7 @@ typedef struct { __BEGIN_DECLS extern vm_address_t _get_commpage_priv_address(void); +extern vm_address_t _get_commpage_text_priv_address(void); __END_DECLS #endif /* __ASSEMBLER__ */ @@ -115,7 +118,11 @@ __END_DECLS #if defined(__LP64__) #define _COMM_PAGE64_BASE_ADDRESS (0x0000000FFFFFC000ULL) /* In TTBR0 */ +#if defined(ARM_LARGE_MEMORY) +#define _COMM_HIGH_PAGE64_BASE_ADDRESS (0xFFFFFE00001FC000ULL) /* Just below the kernel, safely in TTBR1; only used for testing */ +#else #define _COMM_HIGH_PAGE64_BASE_ADDRESS (0xFFFFFFF0001FC000ULL) /* Just below the kernel, safely in TTBR1; only used for testing */ +#endif #define _COMM_PAGE64_AREA_LENGTH (_COMM_PAGE32_AREA_LENGTH) #define _COMM_PAGE64_AREA_USED (-1) @@ -128,6 +135,28 @@ __END_DECLS #define _COMM_PAGE_BASE_ADDRESS (_get_commpage_priv_address()) #define _COMM_PAGE_START_ADDRESS (_get_commpage_priv_address()) + +/** + * This represents the size of the memory region that the commpage is nested in. + * On 4K page systems, this is 1GB, and on 16KB page systems this is technically + * only 32MB, but to keep consistency across address spaces we always reserve + * 1GB for the commpage on ARM devices. + * + * The commpage itself only takes up a single page, but its page tables are + * being shared across every user process. Entries should not be allowed to + * be created in those shared tables, which is why the VM uses these values to + * reserve the entire nesting region in every user process address space. + * + * If the commpage base address changes, these values might also need to be + * updated. + */ +#define _COMM_PAGE64_NESTING_START (0x0000000FC0000000ULL) +#define _COMM_PAGE64_NESTING_SIZE (0x40000000ULL) /* 1GiB */ +_Static_assert((_COMM_PAGE64_BASE_ADDRESS >= _COMM_PAGE64_NESTING_START) && + (_COMM_PAGE64_BASE_ADDRESS < (_COMM_PAGE64_NESTING_START + _COMM_PAGE64_NESTING_SIZE)), + "_COMM_PAGE64_BASE_ADDRESS is not within the nesting region. Commpage nesting " + "region probably needs to be updated."); + #else /* KERNEL_PRIVATE */ #define _COMM_PAGE_AREA_LENGTH (4096) @@ -135,7 +164,7 @@ __END_DECLS #define _COMM_PAGE_START_ADDRESS _COMM_PAGE64_BASE_ADDRESS #endif /* KERNEL_PRIVATE */ -#else +#else /* __LP64__ */ #define _COMM_PAGE64_BASE_ADDRESS (-1) #define _COMM_PAGE64_AREA_LENGTH (-1) @@ -149,23 +178,18 @@ __END_DECLS #ifdef KERNEL_PRIVATE #define _COMM_PAGE_RW_OFFSET (_get_commpage_priv_address()-_COMM_PAGE_BASE_ADDRESS) #define _COMM_PAGE_AREA_LENGTH (PAGE_SIZE) -#else +#else /* KERNEL_PRIVATE */ #define _COMM_PAGE_AREA_LENGTH (4096) -#endif +#endif /* KERNEL_PRIVATE */ #define _COMM_PAGE_BASE_ADDRESS _COMM_PAGE32_BASE_ADDRESS #define _COMM_PAGE_START_ADDRESS _COMM_PAGE32_BASE_ADDRESS -#endif +#endif /* __LP64__ */ #define _COMM_PAGE32_BASE_ADDRESS (0xFFFF4000) /* Must be outside of normal map bounds */ #define _COMM_PAGE32_AREA_LENGTH (_COMM_PAGE_AREA_LENGTH) - -#define _COMM_PAGE_TEXT_START (-1) #define _COMM_PAGE32_TEXT_START (-1) -#define _COMM_PAGE64_TEXT_START (-1) -#define _COMM_PAGE_PFZ_START_OFFSET (-1) -#define _COMM_PAGE_PFZ_END_OFFSET (-1) #define _COMM_PAGE32_OBJC_SIZE 0ULL #define _COMM_PAGE32_OBJC_BASE 0ULL @@ -178,6 +202,8 @@ __END_DECLS */ #define _COMM_PAGE_SIGNATURE (_COMM_PAGE_START_ADDRESS+0x000) // first few bytes are a signature #define _COMM_PAGE_SIGNATURELEN (0x10) +#define _COMM_PAGE_CPU_CAPABILITIES64 (_COMM_PAGE_START_ADDRESS+0x010) /* uint64_t _cpu_capabilities */ +#define _COMM_PAGE_UNUSED (_COMM_PAGE_START_ADDRESS+0x018) /* 6 unused bytes */ #define _COMM_PAGE_VERSION (_COMM_PAGE_START_ADDRESS+0x01E) // 16-bit version# #define _COMM_PAGE_THIS_VERSION 3 // version of the commarea format @@ -186,8 +212,8 @@ __END_DECLS #define _COMM_PAGE_USER_PAGE_SHIFT_32 (_COMM_PAGE_START_ADDRESS+0x024) // VM page shift for 32-bit processes #define _COMM_PAGE_USER_PAGE_SHIFT_64 (_COMM_PAGE_START_ADDRESS+0x025) // VM page shift for 64-bit processes #define _COMM_PAGE_CACHE_LINESIZE (_COMM_PAGE_START_ADDRESS+0x026) // uint16_t cache line size -#define _COMM_PAGE_SCHED_GEN (_COMM_PAGE_START_ADDRESS+0x028) // uint32_t scheduler generation number (count of pre-emptions) -#define _COMM_PAGE_SPIN_COUNT (_COMM_PAGE_START_ADDRESS+0x02C) // uint32_t max spin count for mutex's +#define _COMM_PAGE_UNUSED4 (_COMM_PAGE_START_ADDRESS+0x028) // used to be _COMM_PAGE_SCHED_GEN: uint32_t scheduler generation number (count of pre-emptions) +#define _COMM_PAGE_UNUSED3 (_COMM_PAGE_START_ADDRESS+0x02C) // used to be _COMM_PAGE_SPIN_COUNT: uint32_t max spin count for mutex's #define _COMM_PAGE_MEMORY_PRESSURE (_COMM_PAGE_START_ADDRESS+0x030) // uint32_t copy of vm_memory_pressure #define _COMM_PAGE_ACTIVE_CPUS (_COMM_PAGE_START_ADDRESS+0x034) // uint8_t number of active CPUs (hw.activecpu) #define _COMM_PAGE_PHYSICAL_CPUS (_COMM_PAGE_START_ADDRESS+0x035) // uint8_t number of physical CPUs (hw.physicalcpu_max) @@ -202,8 +228,9 @@ __END_DECLS #define _COMM_PAGE_CONT_HWCLOCK (_COMM_PAGE_START_ADDRESS+0x091) // uint8_t is always-on hardware clock present for mach_continuous_time() #define _COMM_PAGE_DTRACE_DOF_ENABLED (_COMM_PAGE_START_ADDRESS+0x092) // uint8_t 0 if userspace DOF disable, 1 if enabled #define _COMM_PAGE_UNUSED0 (_COMM_PAGE_START_ADDRESS+0x093) // 5 unused bytes -#define _COMM_PAGE_CONT_TIMEBASE (_COMM_PAGE_START_ADDRESS+0x098) // uint64_t base for mach_continuous_time() +#define _COMM_PAGE_CONT_TIMEBASE (_COMM_PAGE_START_ADDRESS+0x098) // uint64_t base for mach_continuous_time() relative to mach_absolute_time() #define _COMM_PAGE_BOOTTIME_USEC (_COMM_PAGE_START_ADDRESS+0x0A0) // uint64_t boottime in microseconds +#define _COMM_PAGE_CONT_HW_TIMEBASE (_COMM_PAGE_START_ADDRESS+0x0A8) // uint64_t base for mach_continuous_time() relative to CNT[PV]CT // aligning to 64byte for cacheline size #define _COMM_PAGE_APPROX_TIME (_COMM_PAGE_START_ADDRESS+0x0C0) // uint64_t last known mach_absolute_time() @@ -217,12 +244,25 @@ __END_DECLS #define _COMM_PAGE_NEWTIMEOFDAY_DATA (_COMM_PAGE_START_ADDRESS+0x120) // used by gettimeofday(). Currently, sizeof(new_commpage_timeofday_data_t) = 40. #define _COMM_PAGE_REMOTETIME_PARAMS (_COMM_PAGE_START_ADDRESS+0x148) // used by mach_bridge_remote_time(). Currently, sizeof(struct bt_params) = 24 -#define _COMM_PAGE_DYLD_SYSTEM_FLAGS (_COMM_PAGE_START_ADDRESS+0x160) // uint64_t export kern.dyld_system_flags to userspace +#define _COMM_PAGE_DYLD_FLAGS (_COMM_PAGE_START_ADDRESS+0x160) // uint64_t export kern.dyld_system_flags to userspace // aligning to 128 bytes for cacheline/fabric size #define _COMM_PAGE_CPU_QUIESCENT_COUNTER (_COMM_PAGE_START_ADDRESS+0x180) // uint64_t, but reserve the whole 128 (0x80) bytes -#define _COMM_PAGE_END (_COMM_PAGE_START_ADDRESS+0x1000) // end of common page +#define _COMM_PAGE_END (_COMM_PAGE_START_ADDRESS+0xfff) // end of common page + +#if defined(__LP64__) +#if KERNEL_PRIVATE +#define _COMM_PAGE64_TEXT_START_ADDRESS (_get_commpage_text_priv_address()) // Address through physical aperture +#endif +/* Offset in bytes from start of text comm page to get to these functions. Start + * address to text comm page is from apple array */ +#define _COMM_PAGE_TEXT_ATOMIC_ENQUEUE (0x0) +#define _COMM_PAGE_TEXT_ATOMIC_DEQUEUE (0x4) + +#else /* __LP64__ */ +/* No 32 bit text region */ +#endif /* __LP64__ */ #endif /* _ARM_CPU_CAPABILITIES_H */ #endif /* PRIVATE */ diff --git a/osfmk/arm/cpu_common.c b/osfmk/arm/cpu_common.c index 9d972f6e4..11ad96d9e 100644 --- a/osfmk/arm/cpu_common.c +++ b/osfmk/arm/cpu_common.c @@ -31,11 +31,12 @@ * cpu routines common to all supported arm variants */ -#include #include #include #include +#include #include +#include #include #include #include @@ -53,16 +54,16 @@ #include #include #include +#include #include -#if KPERF -void kperf_signal_handler(unsigned int cpu_number); -#endif - -cpu_data_t BootCpuData; +SECURITY_READ_ONLY_LATE(struct percpu_base) percpu_base; +vm_address_t percpu_base_cur; +cpu_data_t PERCPU_DATA(cpu_data); cpu_data_entry_t CpuDataEntries[MAX_CPUS]; -struct processor BootProcessor; +static lck_grp_t cpu_lck_grp; +static lck_rw_t cpu_state_lock; unsigned int real_ncpus = 1; boolean_t idle_enable = FALSE; @@ -75,7 +76,7 @@ extern unsigned int gFastIPI; cpu_data_t * cpu_datap(int cpu) { - assert(cpu < MAX_CPUS); + assert(cpu <= ml_get_max_cpu_number()); return CpuDataEntries[cpu].cpu_data_vaddr; } @@ -192,8 +193,8 @@ cpu_idle_tickle(void) intr = ml_set_interrupts_enabled(FALSE); cpu_data_ptr = getCpuDatap(); - if (cpu_data_ptr->idle_timer_notify != (void *)NULL) { - ((idle_timer_t)cpu_data_ptr->idle_timer_notify)(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks); + if (cpu_data_ptr->idle_timer_notify != NULL) { + cpu_data_ptr->idle_timer_notify(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks); if (new_idle_timeout_ticks != 0x0ULL) { /* if a new idle timeout was requested set the new idle timer deadline */ clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); @@ -217,21 +218,25 @@ cpu_handle_xcall(cpu_data_t *cpu_data_ptr) * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/ if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) { xfunc = cpu_data_ptr->cpu_xcall_p0; + INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI); xparam = cpu_data_ptr->cpu_xcall_p1; cpu_data_ptr->cpu_xcall_p0 = NULL; cpu_data_ptr->cpu_xcall_p1 = NULL; os_atomic_thread_fence(acq_rel); os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcall, relaxed); xfunc(xparam); + INTERRUPT_MASKED_DEBUG_END(); } if (cpu_data_ptr->cpu_imm_xcall_p0 != NULL && cpu_data_ptr->cpu_imm_xcall_p1 != NULL) { xfunc = cpu_data_ptr->cpu_imm_xcall_p0; + INTERRUPT_MASKED_DEBUG_START(xfunc, DBG_INTR_TYPE_IPI); xparam = cpu_data_ptr->cpu_imm_xcall_p1; cpu_data_ptr->cpu_imm_xcall_p0 = NULL; cpu_data_ptr->cpu_imm_xcall_p1 = NULL; os_atomic_thread_fence(acq_rel); os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPxcallImm, relaxed); xfunc(xparam); + INTERRUPT_MASKED_DEBUG_END(); } } @@ -249,6 +254,10 @@ cpu_broadcast_xcall_internal(unsigned int signal, int cpu; int max_cpu = ml_get_max_cpu_number() + 1; + //yes, param ALSO cannot be NULL + assert(func); + assert(parm); + intr = ml_set_interrupts_enabled(FALSE); cpu_data_ptr = getCpuDatap(); @@ -303,6 +312,38 @@ cpu_broadcast_xcall(uint32_t *synch, return cpu_broadcast_xcall_internal(SIGPxcall, synch, self_xcall, func, parm); } +struct cpu_broadcast_xcall_simple_data { + broadcastFunc func; + void* parm; + uint32_t sync; +}; + +static void +cpu_broadcast_xcall_simple_cbk(void *parm) +{ + struct cpu_broadcast_xcall_simple_data *data = (struct cpu_broadcast_xcall_simple_data*)parm; + + data->func(data->parm); + + if (os_atomic_dec(&data->sync, relaxed) == 0) { + thread_wakeup((event_t)&data->sync); + } +} + +static unsigned int +cpu_xcall_simple(boolean_t self_xcall, + broadcastFunc func, + void *parm, + bool immediate) +{ + struct cpu_broadcast_xcall_simple_data data = {}; + + data.func = func; + data.parm = parm; + + return cpu_broadcast_xcall_internal(immediate ? SIGPxcallImm : SIGPxcall, &data.sync, self_xcall, cpu_broadcast_xcall_simple_cbk, &data); +} + unsigned int cpu_broadcast_immediate_xcall(uint32_t *synch, boolean_t self_xcall, @@ -312,6 +353,22 @@ cpu_broadcast_immediate_xcall(uint32_t *synch, return cpu_broadcast_xcall_internal(SIGPxcallImm, synch, self_xcall, func, parm); } +unsigned int +cpu_broadcast_xcall_simple(boolean_t self_xcall, + broadcastFunc func, + void *parm) +{ + return cpu_xcall_simple(self_xcall, func, parm, false); +} + +unsigned int +cpu_broadcast_immediate_xcall_simple(boolean_t self_xcall, + broadcastFunc func, + void *parm) +{ + return cpu_xcall_simple(self_xcall, func, parm, true); +} + static kern_return_t cpu_xcall_internal(unsigned int signal, int cpu_number, broadcastFunc func, void *param) { @@ -492,11 +549,9 @@ cpu_signal_handler_internal(boolean_t disable_signal) cpu_data_t *cpu_data_ptr = getCpuDatap(); unsigned int cpu_signal; - cpu_data_ptr->cpu_stat.ipi_cnt++; cpu_data_ptr->cpu_stat.ipi_cnt_wake++; - - SCHED_STATS_IPI(current_processor()); + SCHED_STATS_INC(ipi_count); cpu_signal = os_atomic_or(&cpu_data_ptr->cpu_signal, 0, relaxed); @@ -509,33 +564,46 @@ cpu_signal_handler_internal(boolean_t disable_signal) while (cpu_signal & ~SIGPdisabled) { if (cpu_signal & SIGPdec) { os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdec, relaxed); + INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_IPI); rtclock_intr(FALSE); + INTERRUPT_MASKED_DEBUG_END(); } #if KPERF - if (cpu_signal & SIGPkptimer) { - os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPkptimer, relaxed); - kperf_signal_handler((unsigned int)cpu_data_ptr->cpu_number); + if (cpu_signal & SIGPkppet) { + os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPkppet, relaxed); + extern void kperf_signal_handler(void); + INTERRUPT_MASKED_DEBUG_START(kperf_signal_handler, DBG_INTR_TYPE_IPI); + kperf_signal_handler(); + INTERRUPT_MASKED_DEBUG_END(); } -#endif +#endif /* KPERF */ if (cpu_signal & (SIGPxcall | SIGPxcallImm)) { cpu_handle_xcall(cpu_data_ptr); } if (cpu_signal & SIGPast) { os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPast, relaxed); - ast_check(cpu_data_ptr->cpu_processor); + INTERRUPT_MASKED_DEBUG_START(ast_check, DBG_INTR_TYPE_IPI); + ast_check(current_processor()); + INTERRUPT_MASKED_DEBUG_END(); } if (cpu_signal & SIGPdebug) { os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPdebug, relaxed); + INTERRUPT_MASKED_DEBUG_START(DebuggerXCall, DBG_INTR_TYPE_IPI); DebuggerXCall(cpu_data_ptr->cpu_int_state); + INTERRUPT_MASKED_DEBUG_END(); } -#if __ARM_SMP__ && defined(ARMA7) +#if defined(ARMA7) if (cpu_signal & SIGPLWFlush) { os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWFlush, relaxed); + INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI); cache_xcall_handler(LWFlush); + INTERRUPT_MASKED_DEBUG_END(); } if (cpu_signal & SIGPLWClean) { os_atomic_andnot(&cpu_data_ptr->cpu_signal, SIGPLWClean, relaxed); + INTERRUPT_MASKED_DEBUG_START(cache_xcall_handler, DBG_INTR_TYPE_IPI); cache_xcall_handler(LWClean); + INTERRUPT_MASKED_DEBUG_END(); } #endif @@ -544,12 +612,28 @@ cpu_signal_handler_internal(boolean_t disable_signal) } void -cpu_exit_wait(int cpu) -{ - if (cpu != master_cpu) { +cpu_exit_wait(int cpu_id) +{ +#if USE_APPLEARMSMP + if (!ml_is_quiescing()) { + // For runtime disable (non S2R) the CPU will shut down immediately. + ml_topology_cpu_t *cpu = &ml_get_topology_info()->cpus[cpu_id]; + assert(cpu && cpu->cpu_IMPL_regs); + volatile uint64_t *cpu_sts = (void *)(cpu->cpu_IMPL_regs + CPU_PIO_CPU_STS_OFFSET); + + // Poll the "CPU running state" field until it is 0 (off) + while ((*cpu_sts & CPU_PIO_CPU_STS_cpuRunSt_mask) != 0x00) { + __builtin_arm_dsb(DSB_ISH); + } + return; + } +#endif /* USE_APPLEARMSMP */ + + if (cpu_id != master_cpu) { + // For S2R, ml_arm_sleep() will do some extra polling after setting ARM_CPU_ON_SLEEP_PATH. cpu_data_t *cpu_data_ptr; - cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr; + cpu_data_ptr = CpuDataEntries[cpu_id].cpu_data_vaddr; while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) { } ; @@ -570,7 +654,7 @@ cpu_machine_init(void) cpu_data_ptr = getCpuDatap(); started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + if (cpu_data_ptr->cpu_cache_dispatch != NULL) { platform_cache_init(); } @@ -581,36 +665,10 @@ cpu_machine_init(void) ml_init_interrupt(); } -processor_t -cpu_processor_alloc(boolean_t is_boot_cpu) -{ - processor_t proc; - - if (is_boot_cpu) { - return &BootProcessor; - } - - proc = kalloc(sizeof(*proc)); - if (!proc) { - return NULL; - } - - bzero((void *) proc, sizeof(*proc)); - return proc; -} - -void -cpu_processor_free(processor_t proc) -{ - if (proc != NULL && proc != &BootProcessor) { - kfree(proc, sizeof(*proc)); - } -} - processor_t current_processor(void) { - return getCpuDatap()->cpu_processor; + return PERCPU_GET(processor); } processor_t @@ -618,7 +676,7 @@ cpu_to_processor(int cpu) { cpu_data_t *cpu_data = cpu_datap(cpu); if (cpu_data != NULL) { - return cpu_data->cpu_processor; + return PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data); } else { return NULL; } @@ -627,44 +685,59 @@ cpu_to_processor(int cpu) cpu_data_t * processor_to_cpu_datap(processor_t processor) { - cpu_data_t *target_cpu_datap; - - assert(processor->cpu_id < MAX_CPUS); + assert(processor->cpu_id <= ml_get_max_cpu_number()); assert(CpuDataEntries[processor->cpu_id].cpu_data_vaddr != NULL); - target_cpu_datap = (cpu_data_t*)CpuDataEntries[processor->cpu_id].cpu_data_vaddr; - assert(target_cpu_datap->cpu_processor == processor); + return PERCPU_GET_RELATIVE(cpu_data, processor, processor); +} + +__startup_func +static void +cpu_data_startup_init(void) +{ + vm_size_t size = percpu_section_size() * (ml_get_cpu_count() - 1); + + percpu_base.size = percpu_section_size(); + if (ml_get_cpu_count() == 1) { + percpu_base.start = VM_MAX_KERNEL_ADDRESS; + return; + } + + /* + * The memory needs to be physically contiguous because it contains + * cpu_data_t structures sometimes accessed during reset + * with the MMU off. + * + * kmem_alloc_contig() can't be used early, at the time STARTUP_SUB_PERCPU + * normally runs, so we instead steal the memory for the PERCPU subsystem + * even earlier. + */ + percpu_base.start = (vm_offset_t)pmap_steal_memory(round_page(size)); + bzero((void *)percpu_base.start, round_page(size)); - return target_cpu_datap; + percpu_base.start -= percpu_section_start(); + percpu_base.end = percpu_base.start + size - 1; + percpu_base_cur = percpu_base.start; } +STARTUP(PMAP_STEAL, STARTUP_RANK_FIRST, cpu_data_startup_init); cpu_data_t * cpu_data_alloc(boolean_t is_boot_cpu) { - cpu_data_t *cpu_data_ptr = NULL; + cpu_data_t *cpu_data_ptr = NULL; + vm_address_t base; if (is_boot_cpu) { - cpu_data_ptr = &BootCpuData; + cpu_data_ptr = PERCPU_GET_MASTER(cpu_data); } else { - if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) { - goto cpu_data_alloc_error; - } - - bzero((void *)cpu_data_ptr, sizeof(cpu_data_t)); + base = os_atomic_add_orig(&percpu_base_cur, + percpu_section_size(), relaxed); + cpu_data_ptr = PERCPU_GET_WITH_BASE(base, cpu_data); cpu_stack_alloc(cpu_data_ptr); } - cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu); - if (cpu_data_ptr->cpu_processor == (struct processor *)NULL) { - goto cpu_data_alloc_error; - } - return cpu_data_ptr; - -cpu_data_alloc_error: - panic("cpu_data_alloc() failed\n"); - return (cpu_data_t *)NULL; } ast_t * @@ -715,8 +788,100 @@ cpu_number(void) return getCpuDatap()->cpu_number; } +vm_offset_t +current_percpu_base(void) +{ + return current_thread()->machine.pcpu_data_base; +} + uint64_t ml_get_wake_timebase(void) { return wake_abstime; } + +bool +ml_cpu_signal_is_enabled(void) +{ + return !(getCpuDatap()->cpu_signal & SIGPdisabled); +} + +bool +ml_cpu_can_exit(__unused int cpu_id) +{ + /* processor_exit() is always allowed on the S2R path */ + if (ml_is_quiescing()) { + return true; + } +#if HAS_CLUSTER && USE_APPLEARMSMP + /* + * Cyprus and newer chips can disable individual non-boot CPUs. The + * implementation polls cpuX_IMPL_CPU_STS, which differs on older chips. + */ + if (CpuDataEntries[cpu_id].cpu_data_vaddr != &BootCpuData) { + return true; + } +#endif + return false; +} + +void +ml_cpu_init_state(void) +{ + lck_grp_init(&cpu_lck_grp, "cpu_lck_grp", LCK_GRP_ATTR_NULL); + lck_rw_init(&cpu_state_lock, &cpu_lck_grp, LCK_ATTR_NULL); +} + +#ifdef USE_APPLEARMSMP + +void +ml_cpu_begin_state_transition(int cpu_id) +{ + lck_rw_lock_exclusive(&cpu_state_lock); + CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = true; + lck_rw_unlock_exclusive(&cpu_state_lock); +} + +void +ml_cpu_end_state_transition(int cpu_id) +{ + lck_rw_lock_exclusive(&cpu_state_lock); + CpuDataEntries[cpu_id].cpu_data_vaddr->in_state_transition = false; + lck_rw_unlock_exclusive(&cpu_state_lock); +} + +void +ml_cpu_begin_loop(void) +{ + lck_rw_lock_shared(&cpu_state_lock); +} + +void +ml_cpu_end_loop(void) +{ + lck_rw_unlock_shared(&cpu_state_lock); +} + +#else /* USE_APPLEARMSMP */ + +void +ml_cpu_begin_state_transition(__unused int cpu_id) +{ +} + +void +ml_cpu_end_state_transition(__unused int cpu_id) +{ +} + +void +ml_cpu_begin_loop(void) +{ +} + +void +ml_cpu_end_loop(void) +{ +} + +#endif /* USE_APPLEARMSMP */ diff --git a/osfmk/arm/cpu_data.h b/osfmk/arm/cpu_data.h index 7b001d176..56d2a6eb8 100644 --- a/osfmk/arm/cpu_data.h +++ b/osfmk/arm/cpu_data.h @@ -109,13 +109,10 @@ exception_stack_pointer(void) #define getCpuDatap() current_thread()->machine.CpuDatap #define current_cpu_datap() getCpuDatap() -extern int get_preemption_level(void); -extern void _enable_preemption_no_check(void); +extern int get_preemption_level(void); -#define enable_preemption_no_check() _enable_preemption_no_check() -#define mp_disable_preemption() _disable_preemption() -#define mp_enable_preemption() _enable_preemption() -#define mp_enable_preemption_no_check() _enable_preemption_no_check() +#define mp_disable_preemption() _disable_preemption() +#define mp_enable_preemption() _enable_preemption() #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/arm/cpu_data_internal.h b/osfmk/arm/cpu_data_internal.h index 98eac98c5..28690d7d8 100644 --- a/osfmk/arm/cpu_data_internal.h +++ b/osfmk/arm/cpu_data_internal.h @@ -36,9 +36,11 @@ #include #include #include +#include #include #include #include +#include #include #include #include @@ -59,33 +61,26 @@ typedef struct reset_handler_data { extern reset_handler_data_t ResetHandlerData; -#if __ARM_SMP__ -#ifdef CPU_COUNT -#define MAX_CPUS CPU_COUNT -#else -#define MAX_CPUS 2 -#endif -#else -#define MAX_CPUS 1 -#endif - /* Put the static check for cpumap_t here as it's defined in */ static_assert(sizeof(cpumap_t) * CHAR_BIT >= MAX_CPUS, "cpumap_t bitvector is too small for current MAX_CPUS value"); #ifdef __arm__ #define CPUWINDOWS_BASE_MASK 0xFFF00000UL #else -#define CPUWINDOWS_BASE_MASK 0xFFFFFFFFFFF00000UL +#define CPUWINDOWS_BASE_MASK 0xFFFFFFFFFFE00000UL #endif #define CPUWINDOWS_BASE (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) -#define CPUWINDOWS_TOP (CPUWINDOWS_BASE + (MAX_CPUS * CPUWINDOWS_MAX * PAGE_SIZE)) +#define CPUWINDOWS_TOP (CPUWINDOWS_BASE + (MAX_CPUS * CPUWINDOWS_MAX * ARM_PGBYTES)) + +static_assert((CPUWINDOWS_BASE >= VM_MIN_KERNEL_ADDRESS) && (CPUWINDOWS_TOP <= VM_MAX_KERNEL_ADDRESS), + "CPU copy windows too large for CPUWINDOWS_BASE_MASK value"); typedef struct cpu_data_entry { - void *cpu_data_paddr; /* Cpu data physical address */ - struct cpu_data *cpu_data_vaddr; /* Cpu data virtual address */ + void *cpu_data_paddr; /* Cpu data physical address */ + struct cpu_data *cpu_data_vaddr; /* Cpu data virtual address */ #if __arm__ - uint32_t cpu_data_offset_8; - uint32_t cpu_data_offset_12; + uint32_t cpu_data_offset_8; + uint32_t cpu_data_offset_12; #elif __arm64__ #else #error Check cpu_data_entry padding for this architecture @@ -95,8 +90,8 @@ typedef struct cpu_data_entry { typedef struct rtclock_timer { mpqueue_head_t queue; - uint64_t deadline; - uint32_t is_set:1, + uint64_t deadline; + uint32_t is_set:1, has_expired:1, :0; } rtclock_timer_t; @@ -110,7 +105,6 @@ typedef struct { uint64_t ipi_cnt; uint64_t ipi_cnt_wake; uint64_t timer_cnt; - uint64_t timer_cnt_wake; #if MONOTONIC uint64_t pmi_cnt_wake; #endif /* MONOTONIC */ @@ -122,35 +116,31 @@ typedef struct { } cpu_stat_t; typedef struct cpu_data { - unsigned short cpu_number; - unsigned short cpu_flags; - vm_offset_t istackptr; - vm_offset_t intstack_top; + unsigned short cpu_number; + unsigned short cpu_flags; + int cpu_type; + int cpu_subtype; + int cpu_threadtype; + + vm_offset_t istackptr; + vm_offset_t intstack_top; #if __arm64__ - vm_offset_t excepstackptr; - vm_offset_t excepstack_top; - boolean_t cluster_master; + vm_offset_t excepstackptr; + vm_offset_t excepstack_top; #else - vm_offset_t fiqstackptr; - vm_offset_t fiqstack_top; + vm_offset_t fiqstackptr; + vm_offset_t fiqstack_top; #endif - boolean_t interrupts_enabled; - thread_t cpu_active_thread; - vm_offset_t cpu_active_stack; - unsigned int cpu_ident; - cpu_id_t cpu_id; - unsigned volatile int cpu_signal; - void *cpu_cache_dispatch; - ast_t cpu_pending_ast; - struct processor *cpu_processor; - int cpu_type; - int cpu_subtype; - int cpu_threadtype; - int cpu_running; - -#ifdef __LP64__ - uint64_t cpu_base_timebase; - uint64_t cpu_timebase; + thread_t cpu_active_thread; + vm_offset_t cpu_active_stack; + cpu_id_t cpu_id; + unsigned volatile int cpu_signal; + ast_t cpu_pending_ast; + cache_dispatch_t cpu_cache_dispatch; + +#if __arm64__ + uint64_t cpu_base_timebase; + uint64_t cpu_timebase; #else union { struct { @@ -176,146 +166,156 @@ typedef struct cpu_data { #define cpu_timebase_low ctb.split.low #define cpu_timebase_high ctb.split.high #endif + bool cpu_hibernate; /* This cpu is currently hibernating the system */ + bool cpu_running; + bool cluster_master; + /* true if processor_start() or processor_exit() is operating on this CPU */ + bool in_state_transition; - uint32_t cpu_decrementer; - void *cpu_get_decrementer_func; - void *cpu_set_decrementer_func; - void *cpu_get_fiq_handler; + uint32_t cpu_decrementer; + get_decrementer_t cpu_get_decrementer_func; + set_decrementer_t cpu_set_decrementer_func; + fiq_handler_t cpu_get_fiq_handler; - void *cpu_tbd_hardware_addr; - void *cpu_tbd_hardware_val; + void *cpu_tbd_hardware_addr; + void *cpu_tbd_hardware_val; - void *cpu_console_buf; + void *cpu_console_buf; - void *cpu_idle_notify; - uint64_t cpu_idle_latency; - uint64_t cpu_idle_pop; + processor_idle_t cpu_idle_notify; + uint64_t cpu_idle_latency; + uint64_t cpu_idle_pop; #if __arm__ || __ARM_KERNEL_PROTECT__ - vm_offset_t cpu_exc_vectors; + vm_offset_t cpu_exc_vectors; #endif /* __ARM_KERNEL_PROTECT__ */ - vm_offset_t cpu_reset_handler; - uint32_t cpu_reset_type; - uintptr_t cpu_reset_assist; - - void *cpu_int_state; - IOInterruptHandler interrupt_handler; - void *interrupt_nub; - unsigned int interrupt_source; - void *interrupt_target; - void *interrupt_refCon; - - void *idle_timer_notify; - void *idle_timer_refcon; - uint64_t idle_timer_deadline; - - uint64_t quantum_timer_deadline; - uint64_t rtcPop; - rtclock_timer_t rtclock_timer; - struct _rtclock_data_ *rtclock_datap; - - arm_debug_state_t *cpu_user_debug; /* Current debug state */ - vm_offset_t cpu_debug_interface_map; - - volatile int debugger_active; - - void *cpu_xcall_p0; - void *cpu_xcall_p1; - void *cpu_imm_xcall_p0; - void *cpu_imm_xcall_p1; - -#if __ARM_SMP__ && defined(ARMA7) - volatile uint32_t cpu_CLW_active; - volatile uint64_t cpu_CLWFlush_req; - volatile uint64_t cpu_CLWFlush_last; - volatile uint64_t cpu_CLWClean_req; - volatile uint64_t cpu_CLWClean_last; + vm_offset_t cpu_reset_handler; + uintptr_t cpu_reset_assist; + uint32_t cpu_reset_type; + + unsigned int interrupt_source; + void *cpu_int_state; + IOInterruptHandler interrupt_handler; + void *interrupt_nub; + void *interrupt_target; + void *interrupt_refCon; + + idle_timer_t idle_timer_notify; + void *idle_timer_refcon; + uint64_t idle_timer_deadline; + + uint64_t rtcPop; + rtclock_timer_t rtclock_timer; + struct _rtclock_data_ *rtclock_datap; + + arm_debug_state_t *cpu_user_debug; /* Current debug state */ + vm_offset_t cpu_debug_interface_map; + + volatile int debugger_active; + volatile int PAB_active; /* Tells the console if we are dumping backtraces */ + + void *cpu_xcall_p0; + void *cpu_xcall_p1; + void *cpu_imm_xcall_p0; + void *cpu_imm_xcall_p1; + +#if defined(ARMA7) + volatile uint32_t cpu_CLW_active; + volatile uint64_t cpu_CLWFlush_req; + volatile uint64_t cpu_CLWFlush_last; + volatile uint64_t cpu_CLWClean_req; + volatile uint64_t cpu_CLWClean_last; #endif - #if __arm64__ - vm_offset_t coresight_base[CORESIGHT_REGIONS]; + vm_offset_t coresight_base[CORESIGHT_REGIONS]; #endif /* CCC ARMv8 registers */ - uint64_t cpu_regmap_paddr; + uint64_t cpu_regmap_paddr; - uint32_t cpu_phys_id; - uint32_t cpu_l2_access_penalty; - void *platform_error_handler; + uint32_t cpu_phys_id; + uint32_t cpu_l2_access_penalty; + platform_error_handler_t platform_error_handler; - int cpu_mcount_off; + int cpu_mcount_off; - #define ARM_CPU_ON_SLEEP_PATH 0x50535553UL - volatile unsigned int cpu_sleep_token; - unsigned int cpu_sleep_token_last; + #define ARM_CPU_ON_SLEEP_PATH 0x50535553UL + volatile unsigned int cpu_sleep_token; + unsigned int cpu_sleep_token_last; - cpu_stat_t cpu_stat; + cluster_type_t cpu_cluster_type; + uint32_t cpu_cluster_id; + uint32_t cpu_l2_id; + uint32_t cpu_l2_size; + uint32_t cpu_l3_id; + uint32_t cpu_l3_size; - volatile int PAB_active; /* Tells the console if we are dumping backtraces */ + enum { + CPU_NOT_HALTED = 0, + CPU_HALTED, + CPU_HALTED_WITH_STATE + } halt_status; +#if defined(HAS_APPLE_PAC) + uint64_t rop_key; + uint64_t jop_key; +#endif /* defined(HAS_APPLE_PAC) */ + /* large structs with large alignment requirements */ #if KPC /* double-buffered performance counter data */ - uint64_t *cpu_kpc_buf[2]; + uint64_t *cpu_kpc_buf[2]; /* PMC shadow and reload value buffers */ - uint64_t *cpu_kpc_shadow; - uint64_t *cpu_kpc_reload; + uint64_t *cpu_kpc_shadow; + uint64_t *cpu_kpc_reload; #endif #if MONOTONIC - struct mt_cpu cpu_monotonic; + struct mt_cpu cpu_monotonic; #endif /* MONOTONIC */ - cluster_type_t cpu_cluster_type; - uint32_t cpu_cluster_id; - uint32_t cpu_l2_id; - uint32_t cpu_l2_size; - uint32_t cpu_l3_id; - uint32_t cpu_l3_size; - + cpu_stat_t cpu_stat; #if !XNU_MONITOR - struct pmap_cpu_data cpu_pmap_cpu_data; + struct pmap_cpu_data cpu_pmap_cpu_data; +#endif + dbgwrap_thread_state_t halt_state; +#if DEVELOPMENT || DEBUG + uint64_t wfe_count; + uint64_t wfe_deadline_checks; + uint64_t wfe_terminations; #endif - dbgwrap_thread_state_t halt_state; - enum { - CPU_NOT_HALTED = 0, - CPU_HALTED, - CPU_HALTED_WITH_STATE - } halt_status; -#if defined(HAS_APPLE_PAC) - uint64_t rop_key; -#endif /* defined(HAS_APPLE_PAC) */ } cpu_data_t; /* * cpu_flags */ -#define SleepState 0x0800 -#define StartedState 0x1000 +#define SleepState 0x0800 +#define StartedState 0x1000 -extern cpu_data_entry_t CpuDataEntries[MAX_CPUS]; -extern cpu_data_t BootCpuData; -extern boot_args *BootArgs; +extern cpu_data_entry_t CpuDataEntries[MAX_CPUS]; +PERCPU_DECL(cpu_data_t, cpu_data); +#define BootCpuData __PERCPU_NAME(cpu_data) +extern boot_args *BootArgs; #if __arm__ -extern unsigned int *ExceptionLowVectorsBase; -extern unsigned int *ExceptionVectorsTable; +extern unsigned int *ExceptionLowVectorsBase; +extern unsigned int *ExceptionVectorsTable; #elif __arm64__ -extern unsigned int LowResetVectorBase; -extern unsigned int LowResetVectorEnd; +extern unsigned int LowResetVectorBase; +extern unsigned int LowResetVectorEnd; #if WITH_CLASSIC_S2R -extern uint8_t SleepToken[8]; +extern uint8_t SleepToken[8]; #endif -extern unsigned int LowExceptionVectorBase; +extern unsigned int LowExceptionVectorBase; #else #error Unknown arch #endif -extern cpu_data_t *cpu_datap(int cpu); -extern cpu_data_t *cpu_data_alloc(boolean_t is_boot); -extern void cpu_stack_alloc(cpu_data_t*); -extern void cpu_data_init(cpu_data_t *cpu_data_ptr); -extern void cpu_data_free(cpu_data_t *cpu_data_ptr); -extern kern_return_t cpu_data_register(cpu_data_t *cpu_data_ptr); -extern cpu_data_t *processor_to_cpu_datap( processor_t processor); +extern cpu_data_t *cpu_datap(int cpu); +extern cpu_data_t *cpu_data_alloc(boolean_t is_boot); +extern void cpu_stack_alloc(cpu_data_t*); +extern void cpu_data_init(cpu_data_t *cpu_data_ptr); +extern void cpu_data_free(cpu_data_t *cpu_data_ptr); +extern kern_return_t cpu_data_register(cpu_data_t *cpu_data_ptr); +extern cpu_data_t *processor_to_cpu_datap( processor_t processor); #if __arm64__ typedef struct sysreg_restore { diff --git a/osfmk/arm/cpu_internal.h b/osfmk/arm/cpu_internal.h index 8e4a31454..fd1e09c1b 100644 --- a/osfmk/arm/cpu_internal.h +++ b/osfmk/arm/cpu_internal.h @@ -63,7 +63,7 @@ extern void cpu_signal_cancel( #define SIGPdebug 0x00000010U /* Request Debug call */ #define SIGPLWFlush 0x00000020UL /* Request LWFlush call */ #define SIGPLWClean 0x00000040UL /* Request LWClean call */ -#define SIGPkptimer 0x00000100U /* Request kperf timer */ +#define SIGPkppet 0x00000100U /* Request kperf PET handler */ #define SIGPxcallImm 0x00000200U /* Send a cross-call, fail if already pending */ #define SIGPdisabled 0x80000000U /* Signal disabled */ @@ -75,9 +75,10 @@ extern void arm64_ipi_test(void); #endif /* defined(CONFIG_XNUPOST) && __arm64__ */ #if defined(KERNEL_INTEGRITY_CTRR) -extern void init_ctrr_cpu_start_lock(void); +extern void init_ctrr_cluster_states(void); extern lck_spin_t ctrr_cpu_start_lck; -extern bool ctrr_cluster_locked[__ARM_CLUSTER_COUNT__]; +enum ctrr_cluster_states { CTRR_UNLOCKED = 0, CTRR_LOCKING, CTRR_LOCKED }; +extern enum ctrr_cluster_states ctrr_cluster_locked[MAX_CPU_CLUSTERS]; #endif /* defined(KERNEL_INTEGRITY_CTRR) */ #endif /* _ARM_CPU_INTERNAL_H_ */ diff --git a/osfmk/arm/cpu_x86_64_capabilities.h b/osfmk/arm/cpu_x86_64_capabilities.h new file mode 100644 index 000000000..d740cf96c --- /dev/null +++ b/osfmk/arm/cpu_x86_64_capabilities.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2017 Apple Inc. All rights reserved. + */ + +#ifdef PRIVATE + +#ifndef _ARM_CPU_X86_64_CAPABILITIES_H +#define _ARM_CPU_X86_64_CAPABILITIES_H + +#ifndef __ASSEMBLER__ +#include +#include +#ifdef KERNEL_PRIVATE +#include +#endif +#endif + +/* + * This is the authoritative way to determine from x86_64 user mode what + * implementation-specific processor features are available. + * + * This API only supported for Apple internal use. + */ + +/* Bit definitions for emulated _cpu_capabilities: */ + +#define x86_64_kHasMMX 0x00000001 +#define x86_64_kHasSSE 0x00000002 +#define x86_64_kHasSSE2 0x00000004 +#define x86_64_kHasSSE3 0x00000008 +#define x86_64_kCache32 0x00000010 /* cache line size is 32 bytes */ +#define x86_64_kCache64 0x00000020 +#define x86_64_kCache128 0x00000040 +#define x86_64_kFastThreadLocalStorage 0x00000080 /* TLS ptr is kept in a user-mode-readable register */ +#define x86_64_kHasSupplementalSSE3 0x00000100 +#define x86_64_k64Bit 0x00000200 /* processor supports EM64T (not what mode you're running in) */ +#define x86_64_kHasSSE4_1 0x00000400 +#define x86_64_kHasSSE4_2 0x00000800 +#define x86_64_kHasAES 0x00001000 +#define x86_64_kInOrderPipeline 0x00002000 +#define x86_64_kSlow 0x00004000 /* tsc < nanosecond */ +#define x86_64_kUP 0x00008000 /* set if (kNumCPUs == 1) */ +#define x86_64_kNumCPUs 0x00FF0000 /* number of CPUs (see _NumCPUs() below) */ +#define x86_64_kNumCPUsShift 16 +#define x86_64_kHasAVX1_0 0x01000000 +#define x86_64_kHasRDRAND 0x02000000 +#define x86_64_kHasF16C 0x04000000 +#define x86_64_kHasENFSTRG 0x08000000 +#define x86_64_kHasFMA 0x10000000 +#define x86_64_kHasAVX2_0 0x20000000 +#define x86_64_kHasBMI1 0x40000000 +#define x86_64_kHasBMI2 0x80000000 +/* Extending into 64-bits from here: */ +#define x86_64_kHasRTM 0x0000000100000000ULL +#define x86_64_kHasHLE 0x0000000200000000ULL +#define x86_64_kHasRDSEED 0x0000000800000000ULL +#define x86_64_kHasADX 0x0000000400000000ULL +#define x86_64_kHasMPX 0x0000001000000000ULL +#define x86_64_kHasSGX 0x0000002000000000ULL +#if !defined(RC_HIDE_XNU_J137) +#define x86_64_kHasAVX512F 0x0000004000000000ULL +#define x86_64_kHasAVX512CD 0x0000008000000000ULL +#define x86_64_kHasAVX512DQ 0x0000010000000000ULL +#define x86_64_kHasAVX512BW 0x0000020000000000ULL +#define x86_64_kHasAVX512IFMA 0x0000040000000000ULL +#define x86_64_kHasAVX512VBMI 0x0000080000000000ULL +#define x86_64_kHasAVX512VL 0x0000100000000000ULL +#endif /* not RC_HIDE_XNU_J137 */ + +#define x86_64_kIsTranslated 0x4000000000000000ULL // isTranslated +/* Cambria specific. The address space page shift. */ +#define x86_64_kVmPageShift 0xFFB + +/* + * The effectively cast-in-stone x86_64 comm page address that we + * simulate for compatibility purposes. + */ + +#define X86_64_COMM_PAGE_BASE_ADDRESS (0x7fffffe00000ULL) +#define X86_64_COMM_PAGE_AREA_LENGTH 4096 +#define X86_64_COMM_PAGE_VERSION 14 +#define X86_64_MP_SPIN_TRIES 1000 + +#ifdef KERNEL_PRIVATE +extern vm_address_t x86_64_sharedpage_rw_addr; +extern uint64_t _get_x86_64_cpu_capabilities(void); +#endif + +typedef struct { +/* 0 */ uint8_t signature[16]; +/* 10 */ uint64_t cpu_capabilities64; +/* 18 */ uint8_t _unused[6]; +/* 1e */ uint16_t version; +/* 20 */ uint32_t cpu_capabilities; +/* 24 */ uint8_t _unused0[2]; +/* 26 */ uint16_t cache_linesize; +/* 28 */ volatile uint32_t sched_gen; +/* 2c */ volatile uint32_t memory_pressure; +/* 30 */ volatile uint32_t spin_count; +/* 34 */ volatile uint8_t active_cpus; +/* 35 */ uint8_t physical_cpus; +/* 36 */ uint8_t logical_cpus; +/* 37 */ uint8_t _unused1[1]; +/* 38 */ uint64_t memory_size; +/* 40 */ uint32_t cpufamily; +/* 44 */ volatile uint32_t kdebug_enable; +/* 48 */ volatile uint32_t atm_diagnostic_config; +/* 4C */ uint8_t cp_dtrace_dof_enabled; +/* 4D */ uint8_t cp_kernel_page_shift; /* _COMM_PAGE_VERSION >= 14 */ +/* 4E */ uint8_t cp_user_page_shift; /* _COMM_PAGE_VERSION >= 14 */ +/* 4F */ uint8_t _unused2; + volatile struct { +/* 50 */ uint64_t nt_tsc_base; +/* 58 */ uint32_t nt_scale; +/* 5c */ uint32_t nt_shift; +/* 60 */ uint64_t nt_ns_base; +/* 68 */ uint32_t nt_generation; +/* 6c */ uint32_t gtod_generation; +/* 70 */ uint64_t gtod_ns_base; +/* 78 */ uint64_t gtod_sec_base; + } time_data; + volatile union { + struct { +/* 80 */ uint64_t time; +/* 88 */ uint64_t time_supported; + } _; + uint8_t _fill[64]; + } approx; +/* c0 */ volatile uint64_t cont_timebase; +/* c8 */ volatile uint64_t boottime_usec; + new_commpage_timeofday_data_t new_time_data; +/* { */ +/* d0 uint64_t TimeStamp_tick; */ +/* d8 uint64_t TimeStamp_sec; */ +/* e0 uint64_t TimeStamp_frac; */ +/* e8 uint64_t Ticks_scale; */ +/* f0 uint64_t Ticks_per_sec; */ +/* } */ + +/* f8 */ uint64_t unused; +/* 100 */ uint64_t dyld_system_flags; + +/* 108 */ uint8_t unused2[3800]; +/* 0xFE0 */ uint8_t cp_aprr_shadow_supported; +/* 0xFE1 */ uint8_t unused3[7]; +/* 0xFE8 */ uint64_t cp_aprr_shadow_jit_rw; +/* 0xFF0*/ uint64_t cp_aprr_shadow_jit_rx; +/* 0xFF8 */ uint32_t unused4; +/* ffc */ uint32_t arm_cpufamily; +} x86_64_commpage_t; + +#endif /* _ARM_CPU_X86_64_CAPABILITIES_H */ +#endif /* PRIVATE */ diff --git a/osfmk/arm/cpuid.c b/osfmk/arm/cpuid.c index f976aea35..5225154f3 100644 --- a/osfmk/arm/cpuid.c +++ b/osfmk/arm/cpuid.c @@ -156,12 +156,6 @@ cpuid_get_cpufamily(void) case CPU_VID_APPLE: switch (cpuid_info()->arm_info.arm_part) { - case CPU_PART_SWIFT: - cpufamily = CPUFAMILY_ARM_SWIFT; - break; - case CPU_PART_CYCLONE: - cpufamily = CPUFAMILY_ARM_CYCLONE; - break; case CPU_PART_TYPHOON: case CPU_PART_TYPHOON_CAPRI: cpufamily = CPUFAMILY_ARM_TYPHOON; @@ -185,12 +179,10 @@ cpuid_get_cpufamily(void) case CPU_PART_TEMPEST_ARUBA: cpufamily = CPUFAMILY_ARM_VORTEX_TEMPEST; break; -#ifndef RC_HIDE_XNU_LIGHTNING case CPU_PART_LIGHTNING: case CPU_PART_THUNDER: cpufamily = CPUFAMILY_ARM_LIGHTNING_THUNDER; break; -#endif /* !RC_HIDE_XNU_LIGHTNING */ default: cpufamily = CPUFAMILY_UNKNOWN; break; @@ -205,6 +197,45 @@ cpuid_get_cpufamily(void) return cpufamily; } +int +cpuid_get_cpusubfamily(void) +{ + int cpusubfamily = CPUSUBFAMILY_UNKNOWN; + + if (cpuid_info()->arm_info.arm_implementor != CPU_VID_APPLE) { + return cpusubfamily; + } + + switch (cpuid_info()->arm_info.arm_part) { + case CPU_PART_TYPHOON: + case CPU_PART_TWISTER: + case CPU_PART_HURRICANE: + case CPU_PART_MONSOON: + case CPU_PART_MISTRAL: + case CPU_PART_VORTEX: + case CPU_PART_TEMPEST: + case CPU_PART_LIGHTNING: + case CPU_PART_THUNDER: + cpusubfamily = CPUSUBFAMILY_ARM_HP; + break; + case CPU_PART_TYPHOON_CAPRI: + case CPU_PART_TWISTER_ELBA_MALTA: + case CPU_PART_HURRICANE_MYST: + case CPU_PART_VORTEX_ARUBA: + case CPU_PART_TEMPEST_ARUBA: + cpusubfamily = CPUSUBFAMILY_ARM_HG; + break; + case CPU_PART_TEMPEST_M9: + cpusubfamily = CPUSUBFAMILY_ARM_M; + break; + default: + cpusubfamily = CPUFAMILY_UNKNOWN; + break; + } + + return cpusubfamily; +} + void do_debugid(void) { @@ -314,6 +345,15 @@ do_cacheid(void) cpuid_cache_info.c_bulksize_op = cpuid_cache_info.c_dsize; } + if (cpuid_cache_info.c_unified == 0) { + machine_write_csselr(CSSELR_L1, CSSELR_INSTR); + arm_cache_ccsidr_info.value = machine_read_ccsidr(); + uint32_t c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2)); + uint32_t c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1); + /* I cache size */ + cpuid_cache_info.c_isize = (arm_cache_ccsidr_info.bits.NumSets + 1) * c_linesz * c_assoc; + } + kprintf("%s() - %u bytes %s cache (I:%u D:%u (%s)), %u-way assoc, %u bytes/line\n", __FUNCTION__, cpuid_cache_info.c_dsize + cpuid_cache_info.c_isize, diff --git a/osfmk/arm/cpuid.h b/osfmk/arm/cpuid.h index 74aac691d..b9ae98583 100644 --- a/osfmk/arm/cpuid.h +++ b/osfmk/arm/cpuid.h @@ -109,12 +109,6 @@ typedef union { /* H4 (ARMv7 architecture) */ #define CPU_PART_CORTEXA9 0xC09 -/* H5 (SWIFT architecture) */ -#define CPU_PART_SWIFT 0x0 - -/* H6 (ARMv8 architecture) */ -#define CPU_PART_CYCLONE 0x1 - /* H7 (ARMv8 architecture) */ #define CPU_PART_TYPHOON 0x2 @@ -154,14 +148,15 @@ typedef union { /* H11G e-Core (ARMv8 architecture) */ #define CPU_PART_TEMPEST_ARUBA 0x11 -#ifndef RC_HIDE_XNU_LIGHTNING /* H12 p-Core (ARMv8 architecture) */ #define CPU_PART_LIGHTNING 0x12 /* H12 e-Core (ARMv8 architecture) */ #define CPU_PART_THUNDER 0x13 -#endif /* !RC_HIDE_XNU_LIGHTNING */ + + + /* Cache type identification */ @@ -236,6 +231,7 @@ extern "C" { extern void do_cpuid(void); extern arm_cpu_info_t *cpuid_info(void); extern int cpuid_get_cpufamily(void); +extern int cpuid_get_cpusubfamily(void); extern void do_debugid(void); extern arm_debug_info_t *arm_debug_info(void); diff --git a/osfmk/arm/cpuid_internal.h b/osfmk/arm/cpuid_internal.h index 9778d117b..4d17f6b84 100644 --- a/osfmk/arm/cpuid_internal.h +++ b/osfmk/arm/cpuid_internal.h @@ -29,6 +29,9 @@ #ifndef _ARM_CPUID_INTERNAL_H_ #define _ARM_CPUID_INTERNAL_H_ +#include +#include + void machine_do_debugid(void); arm_debug_info_t *machine_arm_debug_info(void); diff --git a/osfmk/arm/data.s b/osfmk/arm/data.s index b7e66378a..812ea77a4 100644 --- a/osfmk/arm/data.s +++ b/osfmk/arm/data.s @@ -110,7 +110,17 @@ LEXT(vfptrash_data) /* reserve space for read only page tables */ .align 14 LEXT(ropagetable_begin) +#if XNU_TARGET_OS_OSX + // A big auxKC might need more page tables, especially because + // it's not block mapped. + // Note that we don't distuinguish between KASAN or not: With + // a KASAN kernel, the effective auxKC limit is smaller. + .space 18*16*1024,0 +#elif KASAN + .space 16*16*1024,0 +#else .space 14*16*1024,0 +#endif #else LEXT(ropagetable_begin) #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ diff --git a/osfmk/arm/dwarf_unwind.h b/osfmk/arm/dwarf_unwind.h new file mode 100644 index 000000000..6dc4de667 --- /dev/null +++ b/osfmk/arm/dwarf_unwind.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + + +#ifndef _ARM_DWARF_UNWIND_H_ +#define _ARM_DWARF_UNWIND_H_ + +/* + * This file contains the architecture specific DWARF definitions needed for unwind + * information added to trap handlers. + */ + +#define DWARF_ARM_R0 0 +#define DWARF_ARM_R1 1 +#define DWARF_ARM_R2 2 +#define DWARF_ARM_R3 3 +#define DWARF_ARM_R4 4 +#define DWARF_ARM_R5 5 +#define DWARF_ARM_R6 6 +#define DWARF_ARM_R7 7 +#define DWARF_ARM_R8 8 +#define DWARF_ARM_R9 9 +#define DWARF_ARM_R10 10 +#define DWARF_ARM_R11 11 +#define DWARF_ARM_R12 12 +#define DWARF_ARM_SP 13 +#define DWARF_ARM_LR 14 +#define DWARF_ARM_PC 15 + +#define DW_OP_breg0 0x70 +#define DW_OP_breg8 0x78 +#define DW_OP_breg13 0x7d +#define DW_CFA_expression 0x10 +#define DW_OP_deref 0x06 +#define DW_OP_constu 0x10 +#define DW_OP_plus 0x22 + +#define DW_FORM_LENGTH 6 +#define DWARF_OFFSET_0 0 + +#define DWARF_ARM_R0_OFFSET 0 +#define DWARF_ARM_R1_OFFSET 4 +#define DWARF_ARM_R2_OFFSET 8 +#define DWARF_ARM_R3_OFFSET 12 +#define DWARF_ARM_R4_OFFSET 16 +#define DWARF_ARM_R5_OFFSET 20 +#define DWARF_ARM_R6_OFFSET 24 +#define DWARF_ARM_R7_OFFSET 28 +#define DWARF_ARM_R8_OFFSET 32 +#define DWARF_ARM_R9_OFFSET 36 +#define DWARF_ARM_R10_OFFSET 40 +#define DWARF_ARM_R11_OFFSET 44 +#define DWARF_ARM_R12_OFFSET 48 +#define DWARF_ARM_SP_OFFSET 52 +#define DWARF_ARM_LR_OFFSET 56 +#define DWARF_ARM_PC_OFFSET 60 + +/* The actual unwind directives added to trap handlers to let the debugger know where the register state is stored */ + +/* Unwind Prologue added to each function to indicate the start of the unwind information. */ + +#define UNWIND_PROLOGUE \ +.cfi_sections .eh_frame ;\ +.cfi_startproc ;\ +.cfi_signal_frame ;\ + + +/* Unwind Epilogue added to each function to indicate the end of the unwind information */ + +#define UNWIND_EPILOGUE .cfi_endproc + + + +#define UNWIND_DIRECTIVES \ +.cfi_escape DW_CFA_expression, DWARF_ARM_R0, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R0_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R1, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R1_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R2, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R2_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R3, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R3_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R4, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R4_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R5, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R5_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R6, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R6_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R7, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R7_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R8, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R8_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R9, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R9_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R10, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R10_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R11, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R11_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_R12, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_R12_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_SP, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_SP_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_LR, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_LR_OFFSET, DW_OP_plus ;\ +.cfi_escape DW_CFA_expression, DWARF_ARM_PC, DW_FORM_LENGTH, DW_OP_breg13, DWARF_OFFSET_0, DW_OP_deref, DW_OP_constu, DWARF_ARM_PC_OFFSET, DW_OP_plus ;\ + +#endif /* _ARM_DWARF_UNWIND_H_ */ diff --git a/osfmk/arm/genassym.c b/osfmk/arm/genassym.c index 585d71303..33e1b8ca3 100644 --- a/osfmk/arm/genassym.c +++ b/osfmk/arm/genassym.c @@ -81,10 +81,11 @@ #include #include #include +#include #include #include #include -#include +#include /* * genassym.c is used to produce an @@ -140,7 +141,6 @@ main( DECLARE("ACT_CPUDATAP", offsetof(struct thread, machine.CpuDatap)); DECLARE("ACT_MAP", offsetof(struct thread, map)); #if __ARM_USER_PROTECT__ - DECLARE("ACT_UPTW_TTC", offsetof(struct thread, machine.uptw_ttc)); DECLARE("ACT_UPTW_TTB", offsetof(struct thread, machine.uptw_ttb)); DECLARE("ACT_KPTW_TTB", offsetof(struct thread, machine.kptw_ttb)); DECLARE("ACT_ASID", offsetof(struct thread, machine.asid)); @@ -148,6 +148,7 @@ main( DECLARE("ACT_DEBUGDATA", offsetof(struct thread, machine.DebugData)); DECLARE("TH_IOTIER_OVERRIDE", offsetof(struct thread, iotier_override)); DECLARE("TH_RWLOCK_CNT", offsetof(struct thread, rwlock_count)); + DECLARE("TH_TMP_ALLOC_CNT", offsetof(struct thread, t_temp_alloc_count)); DECLARE("TH_SCHED_FLAGS", offsetof(struct thread, sched_flags)); DECLARE("TH_SFLAG_RW_PROMOTED", TH_SFLAG_RW_PROMOTED); @@ -199,8 +200,6 @@ main( DECLARE("KERN_INVALID_ADDRESS", KERN_INVALID_ADDRESS); - DECLARE("MAX_CPUS", MAX_CPUS); - DECLARE("cdeSize", sizeof(struct cpu_data_entry)); @@ -221,16 +220,12 @@ main( offsetof(cpu_data_t, fiqstack_top)); DECLARE("CPU_NUMBER_GS", offsetof(cpu_data_t, cpu_number)); - DECLARE("CPU_IDENT", - offsetof(cpu_data_t, cpu_ident)); DECLARE("CPU_RUNNING", offsetof(cpu_data_t, cpu_running)); DECLARE("CPU_MCOUNT_OFF", offsetof(cpu_data_t, cpu_mcount_off)); DECLARE("CPU_PENDING_AST", offsetof(cpu_data_t, cpu_pending_ast)); - DECLARE("CPU_PROCESSOR", - offsetof(cpu_data_t, cpu_processor)); DECLARE("CPU_CACHE_DISPATCH", offsetof(cpu_data_t, cpu_cache_dispatch)); DECLARE("CPU_BASE_TIMEBASE_LOW", @@ -325,22 +320,22 @@ main( DECLARE("TIMER_TSTAMP", offsetof(struct timer, tstamp)); DECLARE("THREAD_TIMER", - offsetof(struct processor, processor_data.thread_timer)); + offsetof(struct processor, thread_timer)); DECLARE("KERNEL_TIMER", - offsetof(struct processor, processor_data.kernel_timer)); + offsetof(struct processor, kernel_timer)); DECLARE("SYSTEM_STATE", - offsetof(struct processor, processor_data.system_state)); + offsetof(struct processor, system_state)); DECLARE("USER_STATE", - offsetof(struct processor, processor_data.user_state)); + offsetof(struct processor, user_state)); DECLARE("CURRENT_STATE", - offsetof(struct processor, processor_data.current_state)); + offsetof(struct processor, current_state)); DECLARE("SYSTEM_TIMER", offsetof(struct thread, system_timer)); DECLARE("USER_TIMER", offsetof(struct thread, user_timer)); -#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME +#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT DECLARE("PRECISE_USER_KERNEL_TIME", offsetof(struct thread, precise_user_kernel_time)); #endif @@ -358,7 +353,10 @@ main( offsetof(entropy_data_t, sample_count)); DECLARE("ENTROPY_BUFFER", offsetof(entropy_data_t, buffer)); - DECLARE("ENTROPY_BUFFER_INDEX_MASK", ENTROPY_BUFFER_INDEX_MASK); + DECLARE("ENTROPY_BUFFER_INDEX_MASK", + offsetof(entropy_data_t, buffer_index_mask)); + DECLARE("ENTROPY_BUFFER_ROR_MASK", + offsetof(entropy_data_t, ror_mask)); return 0; } diff --git a/osfmk/arm/kpc_arm.c b/osfmk/arm/kpc_arm.c index 5d882c13b..4fa4119f4 100644 --- a/osfmk/arm/kpc_arm.c +++ b/osfmk/arm/kpc_arm.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -54,6 +53,9 @@ static int first_time = 1; /* Private */ +static uint64_t get_counter_config(uint32_t counter); + + static boolean_t enable_counter(uint32_t counter) { @@ -220,6 +222,23 @@ set_running_configurable(uint64_t target_mask, uint64_t state_mask) ml_set_interrupts_enabled(enabled); } +static uintptr_t +get_interrupted_pc(bool *kernel_out) +{ + struct arm_saved_state *state = getCpuDatap()->cpu_int_state; + if (!state) { + return 0; + } + + bool kernel = !PSR_IS_USER(get_saved_state_cpsr(state)); + *kernel_out = kernel; + uintptr_t pc = get_saved_state_pc(state); + if (kernel) { + pc = VM_KERNEL_UNSLIDE(pc); + } + return pc; +} + void kpc_pmi_handler(cpu_id_t source); void kpc_pmi_handler(cpu_id_t source) @@ -258,7 +277,10 @@ kpc_pmi_handler(cpu_id_t source) += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* wrap */) + extra; if (FIXED_ACTIONID(ctr)) { - kpc_sample_kperf(FIXED_ACTIONID(ctr)); + bool kernel = false; + uintptr_t pc = get_interrupted_pc(&kernel); + kpc_sample_kperf(FIXED_ACTIONID(ctr), ctr, get_counter_config(ctr), + FIXED_SHADOW(ctr), pc, kernel ? KPC_KERNEL_PC : 0); } /* clear PMOVSR bit */ @@ -520,7 +542,7 @@ static void save_regs(void) { int i; - int cpuid = current_processor()->cpu_id; + int cpuid = cpu_number(); uint32_t PMCR = 0; __asm__ volatile ("dmb ish"); @@ -557,7 +579,7 @@ static void restore_regs(void) { int i; - int cpuid = current_processor()->cpu_id; + int cpuid = cpu_number(); uint64_t extra; uint32_t PMCR = 1; @@ -584,7 +606,10 @@ restore_regs(void) += (kpc_fixed_max() - FIXED_RELOAD(i) + 1 /* Wrap */) + extra; if (FIXED_ACTIONID(i)) { - kpc_sample_kperf(FIXED_ACTIONID(i)); + bool kernel = false; + uintptr_t pc = get_interrupted_pc(&kernel); + kpc_sample_kperf(FIXED_ACTIONID(i), i, get_counter_config(i), + FIXED_SHADOW(i), pc, kernel ? KPC_KERNEL_PC : 0); } } else { write_counter(i, saved_counter[cpuid][i]); @@ -818,7 +843,7 @@ kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) enabled = ml_set_interrupts_enabled(FALSE); if (curcpu) { - *curcpu = current_processor()->cpu_id; + *curcpu = cpu_number(); } cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl); diff --git a/osfmk/arm/locks.h b/osfmk/arm/locks.h index 1fbddf86d..f1f9fd55a 100644 --- a/osfmk/arm/locks.h +++ b/osfmk/arm/locks.h @@ -37,8 +37,6 @@ #ifdef MACH_KERNEL_PRIVATE -extern unsigned int LcksOpts; - #define enaLkDeb 0x00000001 /* Request debug in default attribute */ #define enaLkStat 0x00000002 /* Request statistic in default attribute */ #define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */ @@ -300,7 +298,7 @@ get_interrupts(void) #if __arm__ __asm__ volatile ("mrs %[state], cpsr" :[state] "=r" (state)); // Read cpsr #else - state = __builtin_arm_rsr64("DAIF"); // Read interrupt state + state = (long)__builtin_arm_rsr64("DAIF"); // Read interrupt state #endif return state; } @@ -321,7 +319,7 @@ restore_interrupts(long state) #if __arm__ __asm__ volatile ("msr cpsr, %[state]" :: [state] "r" (state) : "cc", "memory"); // Restore CPSR #elif __arm64__ - __builtin_arm_wsr64("DAIF", state); // Restore masks + __builtin_arm_wsr64("DAIF", (uint64_t)state); // Restore masks #endif } diff --git a/osfmk/arm/locks_arm.c b/osfmk/arm/locks_arm.c index 7fc463e63..98fd21b3a 100644 --- a/osfmk/arm/locks_arm.c +++ b/osfmk/arm/locks_arm.c @@ -63,7 +63,7 @@ #include -#include +#include #include #include #include @@ -105,15 +105,9 @@ // These are undesirable when in a panic or a debugger is runnning. #define LOCK_CORRECTNESS_PANIC() (kernel_debugger_entry_count == 0) -unsigned int LcksOpts = 0; - #define ADAPTIVE_SPIN_ENABLE 0x1 -#if __SMP__ int lck_mtx_adaptive_spin_mode = ADAPTIVE_SPIN_ENABLE; -#else /* __SMP__ */ -int lck_mtx_adaptive_spin_mode = 0; -#endif /* __SMP__ */ #define SPINWAIT_OWNER_CHECK_COUNT 4 @@ -127,7 +121,7 @@ typedef enum { SPINWAIT_DID_NOT_SPIN, /* Got the interlock, did not spin. */ } spinwait_result_t; -#if CONFIG_DTRACE && __SMP__ +#if CONFIG_DTRACE extern uint64_t dtrace_spin_threshold; #endif @@ -209,6 +203,18 @@ typedef void *pc_t; #define enable_interrupts() __asm__ volatile ("cpsie if" ::: "memory"); #endif +ZONE_VIEW_DEFINE(ZV_LCK_SPIN, "lck_spin", + KHEAP_ID_DEFAULT, sizeof(lck_spin_t)); + +ZONE_VIEW_DEFINE(ZV_LCK_MTX, "lck_mtx", + KHEAP_ID_DEFAULT, sizeof(lck_mtx_t)); + +ZONE_VIEW_DEFINE(ZV_LCK_MTX_EXT, "lck_mtx_ext", + KHEAP_ID_DEFAULT, sizeof(lck_mtx_ext_t)); + +ZONE_VIEW_DEFINE(ZV_LCK_RW, "lck_rw", + KHEAP_ID_DEFAULT, sizeof(lck_rw_t)); + /* * Forward declarations */ @@ -237,13 +243,13 @@ load_exclusive32(uint32_t *target, enum memory_order ord) uint32_t value; #if __arm__ - if (memory_order_has_release(ord)) { + if (_os_atomic_mo_has_release(ord)) { // Pre-load release barrier atomic_thread_fence(memory_order_release); } value = __builtin_arm_ldrex(target); #else - if (memory_order_has_acquire(ord)) { + if (_os_atomic_mo_has_acquire(ord)) { value = __builtin_arm_ldaex(target); // ldaxr } else { value = __builtin_arm_ldrex(target); // ldxr @@ -259,12 +265,12 @@ store_exclusive32(uint32_t *target, uint32_t value, enum memory_order ord) #if __arm__ err = __builtin_arm_strex(value, target); - if (memory_order_has_acquire(ord)) { + if (_os_atomic_mo_has_acquire(ord)) { // Post-store acquire barrier atomic_thread_fence(memory_order_acquire); } #else - if (memory_order_has_release(ord)) { + if (_os_atomic_mo_has_release(ord)) { err = __builtin_arm_stlex(value, target); // stlxr } else { err = __builtin_arm_strex(value, target); // stxr @@ -331,15 +337,26 @@ hw_atomic_test_and_set32(uint32_t *target, uint32_t test_mask, uint32_t set_mask return atomic_test_and_set32(target, test_mask, set_mask, ord, wait); } +/* + * To help _disable_preemption() inline everywhere with LTO, + * we keep these nice non inlineable functions as the panic() + * codegen setup is quite large and for weird reasons causes a frame. + */ +__abortlike +static void +_disable_preemption_overflow(void) +{ + panic("Preemption count overflow"); +} + void _disable_preemption(void) { thread_t thread = current_thread(); unsigned int count = thread->machine.preemption_count; - count += 1; - if (__improbable(count == 0)) { - panic("Preemption count overflow"); + if (__improbable(++count == 0)) { + _disable_preemption_overflow(); } os_atomic_store(&thread->machine.preemption_count, count, compiler_acq_rel); @@ -411,6 +428,18 @@ kernel_preempt_check(thread_t thread) } } +/* + * To help _enable_preemption() inline everywhere with LTO, + * we keep these nice non inlineable functions as the panic() + * codegen setup is quite large and for weird reasons causes a frame. + */ +__abortlike +static void +_enable_preemption_underflow(void) +{ + panic("Preemption count underflow"); +} + void _enable_preemption(void) { @@ -418,7 +447,7 @@ _enable_preemption(void) unsigned int count = thread->machine.preemption_count; if (__improbable(count == 0)) { - panic("Preemption count underflow"); + _enable_preemption_underflow(); } count -= 1; @@ -426,6 +455,8 @@ _enable_preemption(void) if (count == 0) { kernel_preempt_check(thread); } + + os_compiler_barrier(); } int @@ -442,12 +473,10 @@ lck_spin_alloc_init( lck_grp_t * grp, lck_attr_t * attr) { - lck_spin_t *lck; - - if ((lck = (lck_spin_t *) kalloc(sizeof(lck_spin_t))) != 0) { - lck_spin_init(lck, grp, attr); - } + lck_spin_t *lck; + lck = zalloc(ZV_LCK_SPIN); + lck_spin_init(lck, grp, attr); return lck; } @@ -460,7 +489,7 @@ lck_spin_free( lck_grp_t * grp) { lck_spin_destroy(lck, grp); - kfree(lck, sizeof(lck_spin_t)); + zfree(ZV_LCK_SPIN, lck); } /* @@ -716,7 +745,6 @@ int * compute the deadline to spin against when * waiting for a change of state on a lck_rw_t */ -#if __SMP__ static inline uint64_t lck_rw_deadline_for_spin(lck_rw_t *lck) { @@ -742,12 +770,10 @@ lck_rw_deadline_for_spin(lck_rw_t *lck) return mach_absolute_time() + (100000LL * 1000000000LL); } } -#endif // __SMP__ static boolean_t lck_rw_drain_status(lck_rw_t *lock, uint32_t status_mask, boolean_t wait __unused) { -#if __SMP__ uint64_t deadline = 0; uint32_t data; @@ -771,16 +797,6 @@ lck_rw_drain_status(lck_rw_t *lock, uint32_t status_mask, boolean_t wait __unuse } os_atomic_clear_exclusive(); return TRUE; -#else - uint32_t data; - - data = ordered_load_rw(lock); - if ((data & status_mask) == 0) { - return TRUE; - } else { - return FALSE; - } -#endif // __SMP__ } /* @@ -789,7 +805,6 @@ lck_rw_drain_status(lck_rw_t *lock, uint32_t status_mask, boolean_t wait __unuse static inline void lck_rw_interlock_spin(lck_rw_t *lock) { -#if __SMP__ uint32_t data; for (;;) { @@ -801,9 +816,6 @@ lck_rw_interlock_spin(lck_rw_t *lock) return; } } -#else - panic("lck_rw_interlock_spin(): Interlock locked %p %x", lock, lock->lck_rw_data); -#endif } /* @@ -839,13 +851,9 @@ lck_rw_grab(lck_rw_t *lock, int mode, boolean_t wait) uint32_t data, prev; boolean_t do_exch; -#if __SMP__ if (wait) { deadline = lck_rw_deadline_for_spin(lock); } -#else - wait = FALSE; // Don't spin on UP systems -#endif for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_acquire_smp); @@ -893,12 +901,10 @@ lck_rw_alloc_init( lck_grp_t *grp, lck_attr_t *attr) { - lck_rw_t *lck; - - if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0) { - lck_rw_init(lck, grp, attr); - } + lck_rw_t *lck; + lck = zalloc_flags(ZV_LCK_RW, Z_WAITOK | Z_ZERO); + lck_rw_init(lck, grp, attr); return lck; } @@ -911,7 +917,7 @@ lck_rw_free( lck_grp_t *grp) { lck_rw_destroy(lck, grp); - kfree(lck, sizeof(lck_rw_t)); + zfree(ZV_LCK_RW, lck); } /* @@ -974,6 +980,40 @@ lck_rw_lock( } } +#define LCK_RW_LOCK_EXCLUSIVE_TAS(lck) (atomic_test_and_set32(&(lck)->lck_rw_data, \ + (LCK_RW_SHARED_MASK | LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE | LCK_RW_INTERLOCK), \ + LCK_RW_WANT_EXCL, memory_order_acquire_smp, FALSE)) + +/* + * Routine: lck_rw_lock_exclusive_check_contended + */ +bool +lck_rw_lock_exclusive_check_contended(lck_rw_t *lock) +{ + thread_t thread = current_thread(); + bool contended = false; + + if (lock->lck_rw_can_sleep) { + thread->rwlock_count++; + } else if (get_preemption_level() == 0) { + panic("Taking non-sleepable RW lock with preemption enabled"); + } + if (LCK_RW_LOCK_EXCLUSIVE_TAS(lock)) { +#if CONFIG_DTRACE + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, lock, DTRACE_RW_EXCL); +#endif /* CONFIG_DTRACE */ + } else { + contended = true; + lck_rw_lock_exclusive_gen(lock); + } +#if MACH_ASSERT + thread_t owner = ordered_load_rw_owner(lock); + assertf(owner == THREAD_NULL, "state=0x%x, owner=%p", ordered_load_rw(lock), owner); +#endif + ordered_store_rw_owner(lock, thread); + return contended; +} + /* * Routine: lck_rw_lock_exclusive */ @@ -982,10 +1022,12 @@ lck_rw_lock_exclusive(lck_rw_t *lock) { thread_t thread = current_thread(); - thread->rwlock_count++; - if (atomic_test_and_set32(&lock->lck_rw_data, - (LCK_RW_SHARED_MASK | LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE | LCK_RW_INTERLOCK), - LCK_RW_WANT_EXCL, memory_order_acquire_smp, FALSE)) { + if (lock->lck_rw_can_sleep) { + thread->rwlock_count++; + } else if (get_preemption_level() == 0) { + panic("Taking non-sleepable RW lock with preemption enabled"); + } + if (LCK_RW_LOCK_EXCLUSIVE_TAS(lock)) { #if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, lock, DTRACE_RW_EXCL); #endif /* CONFIG_DTRACE */ @@ -1007,7 +1049,11 @@ lck_rw_lock_shared(lck_rw_t *lock) { uint32_t data, prev; - current_thread()->rwlock_count++; + if (lock->lck_rw_can_sleep) { + current_thread()->rwlock_count++; + } else if (get_preemption_level() == 0) { + panic("Taking non-sleepable RW lock with preemption enabled"); + } for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_acquire_smp); if (data & (LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE | LCK_RW_INTERLOCK)) { @@ -1098,7 +1144,11 @@ lck_rw_lock_shared_to_exclusive_failure( uint32_t rwlock_count; /* Check if dropping the lock means that we need to unpromote */ - rwlock_count = thread->rwlock_count--; + if (lck->lck_rw_can_sleep) { + rwlock_count = thread->rwlock_count--; + } else { + rwlock_count = UINT32_MAX; + } #if MACH_LDEBUG if (rwlock_count == 0) { panic("rw lock count underflow for thread %p", thread); @@ -1248,13 +1298,9 @@ lck_rw_lock_exclusive_to_shared(lck_rw_t *lock) for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_release_smp); if (data & LCK_RW_INTERLOCK) { -#if __SMP__ atomic_exchange_abort(); lck_rw_interlock_spin(lock); /* wait for interlock to clear */ continue; -#else - panic("lck_rw_lock_exclusive_to_shared(): Interlock locked (%p): %x", lock, data); -#endif // __SMP__ } data += LCK_RW_SHARED_READER; if (data & LCK_RW_WANT_UPGRADE) { @@ -1351,13 +1397,9 @@ lck_rw_try_lock_shared(lck_rw_t *lock) for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_acquire_smp); if (data & LCK_RW_INTERLOCK) { -#if __SMP__ atomic_exchange_abort(); lck_rw_interlock_spin(lock); continue; -#else - panic("lck_rw_try_lock_shared(): Interlock locked (%p): %x", lock, data); -#endif } if (data & (LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE)) { atomic_exchange_abort(); @@ -1373,7 +1415,13 @@ lck_rw_try_lock_shared(lck_rw_t *lock) thread_t owner = ordered_load_rw_owner(lock); assertf(owner == THREAD_NULL, "state=0x%x, owner=%p", ordered_load_rw(lock), owner); #endif - current_thread()->rwlock_count++; + + if (lock->lck_rw_can_sleep) { + current_thread()->rwlock_count++; + } else if (get_preemption_level() == 0) { + panic("Taking non-sleepable RW lock with preemption enabled"); + } + #if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, lock, DTRACE_RW_SHARED); #endif /* CONFIG_DTRACE */ @@ -1394,13 +1442,9 @@ lck_rw_try_lock_exclusive(lck_rw_t *lock) for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_acquire_smp); if (data & LCK_RW_INTERLOCK) { -#if __SMP__ atomic_exchange_abort(); lck_rw_interlock_spin(lock); continue; -#else - panic("lck_rw_try_lock_exclusive(): Interlock locked (%p): %x", lock, data); -#endif } if (data & (LCK_RW_SHARED_MASK | LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE)) { atomic_exchange_abort(); @@ -1413,7 +1457,11 @@ lck_rw_try_lock_exclusive(lck_rw_t *lock) cpu_pause(); } thread = current_thread(); - thread->rwlock_count++; + if (lock->lck_rw_can_sleep) { + thread->rwlock_count++; + } else if (get_preemption_level() == 0) { + panic("Taking non-sleepable RW lock with preemption enabled"); + } #if MACH_ASSERT thread_t owner = ordered_load_rw_owner(lock); assertf(owner == THREAD_NULL, "state=0x%x, owner=%p", ordered_load_rw(lock), owner); @@ -1684,13 +1732,9 @@ lck_rw_done(lck_rw_t *lock) for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_release_smp); if (data & LCK_RW_INTERLOCK) { /* wait for interlock to clear */ -#if __SMP__ atomic_exchange_abort(); lck_rw_interlock_spin(lock); continue; -#else - panic("lck_rw_done(): Interlock locked (%p): %x", lock, data); -#endif // __SMP__ } if (data & LCK_RW_SHARED_MASK) { /* lock is held shared */ assertf(lock->lck_rw_owner == THREAD_NULL, "state=0x%x, owner=%p", lock->lck_rw_data, lock->lck_rw_owner); @@ -1791,7 +1835,11 @@ lck_rw_done_gen( /* Check if dropping the lock means that we need to unpromote */ thread = current_thread(); - rwlock_count = thread->rwlock_count--; + if (fake_lck.can_sleep) { + rwlock_count = thread->rwlock_count--; + } else { + rwlock_count = UINT32_MAX; + } #if MACH_LDEBUG if (rwlock_count == 0) { panic("rw lock count underflow for thread %p", thread); @@ -1912,7 +1960,10 @@ lck_rw_lock_shared_gen( #endif /* CONFIG_DTRACE */ } - +/* + * Required to verify thread ownership for exclusive locks by virtue of PPL + * usage + */ void lck_rw_assert( lck_rw_t *lck, @@ -1993,10 +2044,8 @@ lck_mtx_alloc_init( { lck_mtx_t *lck; - if ((lck = (lck_mtx_t *) kalloc(sizeof(lck_mtx_t))) != 0) { - lck_mtx_init(lck, grp, attr); - } - + lck = zalloc(ZV_LCK_MTX); + lck_mtx_init(lck, grp, attr); return lck; } @@ -2009,7 +2058,7 @@ lck_mtx_free( lck_grp_t * grp) { lck_mtx_destroy(lck, grp); - kfree(lck, sizeof(lck_mtx_t)); + zfree(ZV_LCK_MTX, lck); } /* @@ -2034,12 +2083,11 @@ lck_mtx_init( #ifdef BER_XXX if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) { - if ((lck_ext = (lck_mtx_ext_t *) kalloc(sizeof(lck_mtx_ext_t))) != 0) { - lck_mtx_ext_init(lck_ext, grp, lck_attr); - lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT; - lck->lck_mtx_ptr = lck_ext; - lck->lck_mtx_type = LCK_MTX_TYPE; - } + lck_ext = zalloc(ZV_LCK_MTX_EXT); + lck_mtx_ext_init(lck_ext, grp, lck_attr); + lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT; + lck->lck_mtx_ptr = lck_ext; + lck->lck_mtx_type = LCK_MTX_TYPE; } else #endif { @@ -2144,6 +2192,10 @@ static inline void lck_mtx_check_preemption(lck_mtx_t *lock) { #if DEVELOPMENT || DEBUG + if (current_cpu_datap()->cpu_hibernate) { + return; + } + int pl = get_preemption_level(); if (pl != 0) { @@ -2237,14 +2289,9 @@ set_owner: if (waiters != 0) { state |= ARM_LCK_WAITERS; } -#if __SMP__ state |= LCK_ILOCK; // Preserve interlock ordered_store_mtx(lock, state); // Set ownership interlock_unlock(lock); // Release interlock, enable preemption -#else - ordered_store_mtx(lock, state); // Set ownership - enable_preemption(); -#endif done: load_memory_barrier(); @@ -2271,7 +2318,6 @@ static spinwait_result_t lck_mtx_lock_contended_spinwait_arm(lck_mtx_t *lock, thread_t thread, boolean_t interlocked) { int has_interlock = (int)interlocked; -#if __SMP__ __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lock); thread_t owner, prev_owner; uint64_t window_deadline, sliding_deadline, high_deadline; @@ -2345,8 +2391,7 @@ lck_mtx_lock_contended_spinwait_arm(lck_mtx_t *lock, thread_t thread, boolean_t * We are holding the interlock, so * we can safely dereference owner. */ - if (!(owner->machine.machine_thread_flags & MACHINE_THREAD_FLAGS_ON_CPU) || - (owner->state & TH_IDLE)) { + if (!machine_thread_on_core(owner) || (owner->state & TH_IDLE)) { retval = SPINWAIT_DID_NOT_SPIN; goto done_spinning; } @@ -2594,11 +2639,6 @@ done_spinning: KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_END, trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(LCK_MTX_STATE_TO_THREAD(state)), lock->lck_mtx_waiters, retval, 0); -#else /* __SMP__ */ - /* Spinwaiting is not useful on UP systems. */ -#pragma unused(lock, thread) - int retval = SPINWAIT_DID_NOT_SPIN; -#endif /* __SMP__ */ if ((!has_interlock) && (retval != SPINWAIT_ACQUIRED)) { /* We must own either the lock or the interlock on return. */ interlock_lock(lock); @@ -2684,7 +2724,6 @@ lck_mtx_try_lock_contended(lck_mtx_t *lock, thread_t thread) uintptr_t state; int waiters; -#if __SMP__ interlock_lock(lock); state = ordered_load_mtx(lock); holding_thread = LCK_MTX_STATE_TO_THREAD(state); @@ -2692,33 +2731,14 @@ lck_mtx_try_lock_contended(lck_mtx_t *lock, thread_t thread) interlock_unlock(lock); return FALSE; } -#else - disable_preemption_for_thread(thread); - state = ordered_load_mtx(lock); - if (state & LCK_ILOCK) { - panic("Unexpected interlock set (%p)", lock); - } - holding_thread = LCK_MTX_STATE_TO_THREAD(state); - if (holding_thread) { - enable_preemption(); - return FALSE; - } - state |= LCK_ILOCK; - ordered_store_mtx(lock, state); -#endif // __SMP__ waiters = lck_mtx_lock_acquire(lock, NULL); state = LCK_MTX_THREAD_TO_STATE(thread); if (waiters != 0) { state |= ARM_LCK_WAITERS; } -#if __SMP__ state |= LCK_ILOCK; // Preserve interlock ordered_store_mtx(lock, state); // Set ownership interlock_unlock(lock); // Release interlock, enable preemption -#else - ordered_store_mtx(lock, state); // Set ownership - enable_preemption(); -#endif load_memory_barrier(); turnstile_cleanup(); @@ -2818,24 +2838,11 @@ lck_mtx_unlock_contended(lck_mtx_t *lock, thread_t thread, boolean_t ilk_held) if (ilk_held) { state = ordered_load_mtx(lock); } else { -#if __SMP__ interlock_lock(lock); state = ordered_load_mtx(lock); if (thread != LCK_MTX_STATE_TO_THREAD(state)) { panic("lck_mtx_unlock(): Attempt to release lock not owned by thread (%p)", lock); } -#else - disable_preemption_for_thread(thread); - state = ordered_load_mtx(lock); - if (state & LCK_ILOCK) { - panic("lck_mtx_unlock(): Unexpected interlock set (%p)", lock); - } - if (thread != LCK_MTX_STATE_TO_THREAD(state)) { - panic("lck_mtx_unlock(): Attempt to release lock not owned by thread (%p)", lock); - } - state |= LCK_ILOCK; - ordered_store_mtx(lock, state); -#endif if (state & ARM_LCK_WAITERS) { if (lck_mtx_unlock_wakeup(lock, thread)) { state = ARM_LCK_WAITERS; @@ -2848,14 +2855,9 @@ lck_mtx_unlock_contended(lck_mtx_t *lock, thread_t thread, boolean_t ilk_held) } state &= ARM_LCK_WAITERS; /* Clear state, retain waiters bit */ unlock: -#if __SMP__ state |= LCK_ILOCK; ordered_store_mtx(lock, state); interlock_unlock(lock); -#else - ordered_store_mtx(lock, state); - enable_preemption(); -#endif if (cleanup) { /* * Do not do any turnstile operations outside of this block. @@ -2937,14 +2939,9 @@ lck_mtx_convert_spin(lck_mtx_t *lock) if (waiters != 0) { state |= ARM_LCK_WAITERS; } -#if __SMP__ state |= LCK_ILOCK; ordered_store_mtx(lock, state); // Set ownership interlock_unlock(lock); // Release interlock, enable preemption -#else - ordered_store_mtx(lock, state); // Set ownership - enable_preemption(); -#endif turnstile_cleanup(); } diff --git a/osfmk/arm/locore.s b/osfmk/arm/locore.s index 1a544b0d8..d3d808330 100644 --- a/osfmk/arm/locore.s +++ b/osfmk/arm/locore.s @@ -57,11 +57,13 @@ #include #include #include +#include #include #include #include #include #include "assym.s" +#include "dwarf_unwind.h" #define TRACE_SYSCALL 0 @@ -101,7 +103,6 @@ Lreset_low_vector: adr r4, EXT(ResetHandlerData) ldr r0, [r4, BOOT_ARGS] ldr r1, [r4, CPU_DATA_ENTRIES] -#if __ARM_SMP__ #if defined(ARMA7) // physical cpu number is stored in MPIDR Affinity level 0 mrc p15, 0, r6, c0, c0, 5 // Read MPIDR @@ -109,9 +110,6 @@ Lreset_low_vector: #else #error missing Who Am I implementation #endif -#else - mov r6, #0 -#endif /* __ARM_SMP__ */ // physical cpu number matches cpu number //#if cdeSize != 16 //#error cpu_data_entry is not 16bytes in size @@ -529,7 +527,7 @@ swi_from_user: /* Check for special mach_absolute_time trap value. * This is intended to be a super-lightweight call to ml_get_timebase(), which * is handrolled assembly and does not use the stack, thus not requiring us to setup a kernel stack. */ - cmp r12, #-3 + cmp r12, #MACH_ARM_TRAP_ABSTIME beq fleh_swi_trap_tb stmia sp, {r0-r12, sp, lr}^ // Save user context on PCB mov r7, #0 // Zero the frame pointer @@ -564,14 +562,14 @@ swi_from_user: #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME bl EXT(timer_state_event_user_to_kernel) mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW - add r8, r9, ACT_PCBDATA // Reload arm_saved_state pointer + add r8, r9, ACT_PCBDATA // Reload arm_saved_state pointer #endif - ldr r10, [r9, ACT_TASK] // Load the current task + ldr r10, [r9, ACT_TASK] // Load the current task /* enable interrupts */ - cpsie i // Enable IRQ + cpsie i // Enable IRQ - cmp r11, #-4 // Special value for mach_continuous_time + cmp r11, #MACH_ARM_TRAP_CONTTIME // Special value for mach_continuous_time beq fleh_swi_trap_mct cmp r11, #0x80000000 @@ -887,6 +885,9 @@ IF_USERMODE_EXCEPTION prefabt b load_and_go_user ELSE_IF_KERNELMODE_EXCEPTION prefabt + +UNWIND_PROLOGUE + /* * We have a kernel stack already, and I will use it to save contexts: * ------------------ @@ -941,6 +942,10 @@ ELSE_IF_KERNELMODE_EXCEPTION prefabt mov r0, sp ALIGN_STACK r1, r2 mov r1, T_PREFETCH_ABT // Pass abort type + + +UNWIND_DIRECTIVES + bl EXT(sleh_abort) // Call second level handler UNALIGN_STACK @@ -961,6 +966,7 @@ ELSE_IF_KERNELMODE_EXCEPTION prefabt b load_and_go_sys +UNWIND_EPILOGUE /* * First Level Exception Handler for Data Abort @@ -1028,6 +1034,9 @@ IF_USERMODE_EXCEPTION dataabt b load_and_go_user ELSE_IF_KERNELMODE_EXCEPTION dataabt + +UNWIND_PROLOGUE + /* * We have a kernel stack already, and I will use it to save contexts: * ------------------ @@ -1081,6 +1090,9 @@ ELSE_IF_KERNELMODE_EXCEPTION dataabt mov r0, sp // Argument ALIGN_STACK r1, r2 mov r1, T_DATA_ABT // Pass abort type + +UNWIND_DIRECTIVES + bl EXT(sleh_abort) // Call second level handler UNALIGN_STACK @@ -1179,7 +1191,9 @@ lags1: ldmia sp, {r0-r12} // Restore other registers movs pc, lr // Return to sys (svc, irq, fiq) - + +UNWIND_EPILOGUE + /* * First Level Exception Handler for address exception * Not supported @@ -1349,15 +1363,17 @@ fleh_irq_handler: mrc p15, 0, r9, c13, c0, 4 // Reload r9 from TPIDRPRW bl EXT(ml_get_timebase) // get current timebase LOAD_ADDR(r3, EntropyData) - ldr r2, [r3, ENTROPY_SAMPLE_COUNT] - add r1, r2, 1 - str r1, [r3, ENTROPY_SAMPLE_COUNT] - and r2, r2, ENTROPY_BUFFER_INDEX_MASK - add r1, r3, ENTROPY_BUFFER - ldr r4, [r1, r2, lsl #2] + ldr r1, [r3, ENTROPY_SAMPLE_COUNT] + ldr r2, [r3, ENTROPY_BUFFER_INDEX_MASK] + add r4, r1, 1 + and r5, r1, r2 + str r4, [r3, ENTROPY_SAMPLE_COUNT] + ldr r1, [r3, ENTROPY_BUFFER] + ldr r2, [r3, ENTROPY_BUFFER_ROR_MASK] + ldr r4, [r1, r5, lsl #2] + and r4, r4, r2 eor r0, r0, r4, ror #9 - str r0, [r1, r2, lsl #2] // Update gEntropie - + str r0, [r1, r5, lsl #2] return_from_irq: mov r5, #0 ldr r4, [r9, ACT_CPUDATAP] // Get current cpu @@ -1870,21 +1886,25 @@ return_to_user_now: /* * Assert that the preemption level is zero prior to the return to user space */ - ldr r1, [r9, ACT_PREEMPT_CNT] // Load preemption count - movs r1, r1 // Test - beq 0f // Continue if zero, or... - adr r0, L_lagu_panic_str // Load the panic string... - blx EXT(panic) // Finally, panic -0: - ldr r2, [r9, TH_RWLOCK_CNT] // Load RW lock count - movs r2, r2 // Test - beq 0f // Continue if zero, or... - adr r0, L_lagu_rwlock_cnt_panic_str // Load the panic string... - mov r1, r9 // Thread argument for panic string - blx EXT(panic) // Finally, panic + ldr r1, [r9, ACT_PREEMPT_CNT] // Load preemption count + cmp r1, #0 // Test + bne L_lagu_preempt_panic // Panic if not zero + +/* + * Assert that the preemption level is zero prior to the return to user space + */ + ldr r2, [r9, TH_RWLOCK_CNT] // Load RW lock count + cmp r2, #0 // Test + bne L_lagu_rwlock_cnt_panic // Panic if not zero #endif -0: +/* + * Assert that we aren't leaking KHEAP_TEMP allocations prior to the return to user space + */ + ldr r1, [r9, TH_TMP_ALLOC_CNT] // Load temp alloc count + cmp r1, #0 // Test + bne L_lagu_temp_alloc_cnt_panic // Panic if not zero + #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME bl EXT(timer_state_event_kernel_to_user) mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW @@ -1929,8 +1949,34 @@ return_to_user_now: nop // Hardware problem movs pc, lr // Return to user +/* + * r1: tmp alloc count + * r9: current_thread() + */ +L_lagu_temp_alloc_cnt_panic: + mov r0, r9 // Thread argument + blx EXT(kheap_temp_leak_panic) // Finally, panic + +#if MACH_ASSERT +/* + * r1: current preemption count + * r9: current_thread() + */ +L_lagu_preempt_panic: + adr r0, L_lagu_preempt_panic_str // Load the panic string... + blx EXT(panic) // Finally, panic + +/* + * r2: rwlock count + * r9: current_thread() + */ +L_lagu_rwlock_cnt_panic: + adr r0, L_lagu_rwlock_cnt_panic_str // Load the panic string... + mov r1, r9 // Thread argument for panic string + blx EXT(panic) // Finally, panic + .align 2 -L_lagu_panic_str: +L_lagu_preempt_panic_str: .asciz "load_and_go_user: preemption_level %d" .align 2 @@ -1938,11 +1984,12 @@ L_lagu_panic_str: L_lagu_rwlock_cnt_panic_str: .asciz "load_and_go_user: RW lock count not 0 on thread %p (%u)" .align 2 +#endif /* MACH_ASSERT */ - .align 2 + .align 2 L_evimpanic_str: - .ascii "Exception Vector: Illegal Mode: 0x%08X\n\000" - .align 2 + .ascii "Exception Vector: Illegal Mode: 0x%08X\n\000" + .align 2 .text .align 2 diff --git a/osfmk/arm/loose_ends.c b/osfmk/arm/loose_ends.c index cd9ff9021..a78f820ee 100644 --- a/osfmk/arm/loose_ends.c +++ b/osfmk/arm/loose_ends.c @@ -118,7 +118,7 @@ bzero_phys(addr64_t src, vm_size_t bytes) ppnum_t pn = (src >> PAGE_SHIFT); wimg_bits = pmap_cache_attributes(pn); - if ((wimg_bits & VM_WIMG_MASK) == VM_WIMG_DEFAULT) { + if (__probable((wimg_bits & VM_WIMG_MASK) == VM_WIMG_DEFAULT)) { /* Fast path - default attributes */ bzero((char *)phystokv((pmap_paddr_t) src), bytes); } else { @@ -517,6 +517,36 @@ memcmp(const void *s1, const void *s2, size_t n) return 0; } +unsigned long +memcmp_zero_ptr_aligned(const void *s, size_t n) +{ + uintptr_t p = (uintptr_t)s; + uintptr_t end = (uintptr_t)s + n; + uint32_t a, b; + + static_assert(sizeof(unsigned long) == sizeof(uint32_t)); + + a = *(const uint32_t *)p; + b = *(const uint32_t *)(end - sizeof(uint32_t)); + + /* + * align p to the next 64bit boundary + * align end to the previous 64bit boundary + * + * and do a nice ldrd loop. + */ + p = (p + sizeof(uint64_t) - 1) & -sizeof(uint64_t); + end &= -sizeof(uint64_t); + + for (; p < end; p += sizeof(uint64_t)) { + uint64_t v = *(const uint64_t *)p; + a |= (uint32_t)v; + b |= (uint32_t)(v >> 32); + } + + return a | b; +} + kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which) { diff --git a/osfmk/arm/lowglobals.h b/osfmk/arm/lowglobals.h index a63fe6f5b..129a0e1da 100644 --- a/osfmk/arm/lowglobals.h +++ b/osfmk/arm/lowglobals.h @@ -37,35 +37,37 @@ #define LOWGLO_LAYOUT_MAGIC 0xC0DEC0DE -#pragma pack(4) /* Make sure the structure stays as we defined it */ +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct lowglo { - unsigned char lgVerCode[8]; /* 0xffff1000 System verification code */ - uint32_t lgZero[2]; /* 0xffff1008 Double constant 0 */ - uint32_t lgStext; /* 0xffff1010 Start of kernel text */ - uint32_t lgRsv014[2]; /* 0xffff1014 Reserved */ - uint32_t lgVersion; /* 0xffff101C Pointer to kernel version string */ - uint32_t lgRsv020[216]; /* 0xffff1020 Reserved */ - uint32_t lgKmodptr; /* 0xffff1380 Pointer to kmod, debugging aid */ - uint32_t lgTransOff; /* 0xffff1384 Pointer to kdp_trans_off, debugging aid */ - uint32_t lgRsv388[3]; /* 0xffff1388 Reserved */ - uint32_t lgOSVersion; /* 0xffff1394 Pointer to OS version string */ - uint32_t lgRsv398; /* 0xffff1398 Reserved */ - uint32_t lgRebootFlag; /* 0xffff139C Pointer to debugger reboot trigger */ - uint32_t lgManualPktAddr; /* 0xffff13A0 Pointer to manual packet structure */ - uint32_t lgRsv3A4; /* 0xffff13A4 Reserved */ - uint32_t lgPmapMemQ; /* 0xffff13A8 Pointer to PMAP memory queue */ - uint32_t lgPmapMemPageOffset;/* 0xffff13AC Offset of physical page member in vm_page_with_ppnum_t */ - uint32_t lgPmapMemChainOffset;/*0xffff13B0 Offset of listq in vm_page_t or vm_page_with_ppnum_t */ - uint32_t lgStaticAddr; /* 0xffff13B4 Static allocation address */ - uint32_t lgStaticSize; /* 0xffff13B8 Static allocation size */ - uint32_t lgLayoutMajorVersion; /* 0xffff13BC Lowglo layout major version */ - uint32_t lgLayoutMagic; /* 0xffff13C0 Magic value evaluated to determine if lgLayoutVersion is valid */ - uint32_t lgPmapMemStartAddr; /* 0xffff13C4 Pointer to start of vm_page_t array */ - uint32_t lgPmapMemEndAddr; /* 0xffff13C8 Pointer to end of vm_page_t array */ - uint32_t lgPmapMemPagesize; /* 0xffff13CC size of vm_page_t */ - uint32_t lgPmapMemFirstppnum; /* 0xffff13D0 physical page number of the first vm_page_t in the array */ - uint32_t lgLayoutMinorVersion; /* 0xffff13D4 Lowglo layout minor version */ - uint32_t lgPageShift; /* 0xffff13D8 Number of shifts from page number to size */ + unsigned char lgVerCode[8]; /* 0xffff1000 System verification code */ + uint32_t lgZero[2]; /* 0xffff1008 Double constant 0 */ + uint32_t lgStext; /* 0xffff1010 Start of kernel text */ + uint32_t lgRsv014[2]; /* 0xffff1014 Reserved */ + uint32_t lgVersion; /* 0xffff101C Pointer to kernel version string */ + uint32_t lgRsv020[216]; /* 0xffff1020 Reserved */ + uint32_t lgKmodptr; /* 0xffff1380 Pointer to kmod, debugging aid */ + uint32_t lgTransOff; /* 0xffff1384 Pointer to kdp_trans_off, debugging aid */ + uint32_t lgRsv388[3]; /* 0xffff1388 Reserved */ + uint32_t lgOSVersion; /* 0xffff1394 Pointer to OS version string */ + uint32_t lgRsv398; /* 0xffff1398 Reserved */ + uint32_t lgRebootFlag; /* 0xffff139C Pointer to debugger reboot trigger */ + uint32_t lgManualPktAddr; /* 0xffff13A0 Pointer to manual packet structure */ + uint32_t lgRsv3A4; /* 0xffff13A4 Reserved */ + uint32_t lgPmapMemQ; /* 0xffff13A8 Pointer to PMAP memory queue */ + uint32_t lgPmapMemPageOffset; /* 0xffff13AC Offset of physical page member in vm_page_with_ppnum_t */ + uint32_t lgPmapMemChainOffset; /* 0xffff13B0 Offset of listq in vm_page_t or vm_page_with_ppnum_t */ + uint32_t lgStaticAddr; /* 0xffff13B4 Static allocation address */ + uint32_t lgStaticSize; /* 0xffff13B8 Static allocation size */ + uint32_t lgLayoutMajorVersion; /* 0xffff13BC Lowglo layout major version */ + uint32_t lgLayoutMagic; /* 0xffff13C0 Magic value evaluated to determine if lgLayoutVersion is valid */ + uint32_t lgPmapMemStartAddr; /* 0xffff13C4 Pointer to start of vm_page_t array */ + uint32_t lgPmapMemEndAddr; /* 0xffff13C8 Pointer to end of vm_page_t array */ + uint32_t lgPmapMemPagesize; /* 0xffff13CC size of vm_page_t */ + uint32_t lgPmapMemFirstppnum; /* 0xffff13D0 physical page number of the first vm_page_t in the array */ + uint32_t lgLayoutMinorVersion; /* 0xffff13D4 Lowglo layout minor version */ + uint32_t lgPageShift; /* 0xffff13D8 Number of shifts from page number to size */ + uint32_t lgVmFirstPhys; /* 0xffff13DC First physical address of kernel-managed DRAM (inclusive) */ + uint32_t lgVmLastPhys; /* 0xffff13E0 Last physical address of kernel-managed DRAM (exclusive) */ } lowglo; #pragma pack() diff --git a/osfmk/arm/lowmem_vectors.c b/osfmk/arm/lowmem_vectors.c index 77fe49fdd..96702d830 100644 --- a/osfmk/arm/lowmem_vectors.c +++ b/osfmk/arm/lowmem_vectors.c @@ -48,7 +48,7 @@ lowglo lowGlo __attribute__ ((aligned(PAGE_MAX_SIZE))) = { // Increment the minor version for changes that provide additonal info/function // but does not break current usage .lgLayoutMajorVersion = 3, - .lgLayoutMinorVersion = 0, + .lgLayoutMinorVersion = 2, .lgLayoutMagic = LOWGLO_LAYOUT_MAGIC, .lgVersion = (uint32_t)&version, .lgKmodptr = (uint32_t)&kmod, @@ -65,10 +65,11 @@ lowglo lowGlo __attribute__ ((aligned(PAGE_MAX_SIZE))) = { .lgPmapMemPageOffset = offsetof(struct vm_page_with_ppnum, vmp_phys_page), .lgPmapMemChainOffset = offsetof(struct vm_page, vmp_listq), .lgPmapMemPagesize = (uint32_t)sizeof(struct vm_page), - .lgPmapMemStartAddr = -1, .lgPmapMemEndAddr = -1, - .lgPmapMemFirstppnum = -1 + .lgPmapMemFirstppnum = -1, + .lgVmFirstPhys = -1, + .lgVmLastPhys = -1 }; void @@ -82,8 +83,18 @@ patch_low_glo_static_region(uint32_t address, uint32_t size) { lowGlo.lgStaticAddr = address; lowGlo.lgStaticSize = size; -} + /** + * These values are set in pmap_bootstrap() and represent the range of + * kernel managed memory. + */ + extern const pmap_paddr_t vm_first_phys; + extern const pmap_paddr_t vm_last_phys; + assertf((vm_first_phys != 0) && (vm_last_phys != 0), + "Tried setting the Low Globals before pmap_bootstrap()"); + lowGlo.lgVmFirstPhys = vm_first_phys; + lowGlo.lgVmLastPhys = vm_last_phys; +} void patch_low_glo_vm_page_info(void * start_addr, void * end_addr, uint32_t first_ppnum) diff --git a/osfmk/arm/machine_cpuid.c b/osfmk/arm/machine_cpuid.c index a29074a2c..b104780ac 100644 --- a/osfmk/arm/machine_cpuid.c +++ b/osfmk/arm/machine_cpuid.c @@ -84,10 +84,10 @@ void machine_write_csselr(csselr_cache_level level, csselr_cache_type type) { #if __arm__ - uint32_t csselr = (level | type); + uint32_t csselr = (uint32_t)level | (uint32_t)type; __builtin_arm_mcr(15, 2, csselr, 0, 0, 0); #else - uint64_t csselr = (level | type); + uint64_t csselr = (uint64_t)level | (uint64_t)type; __asm__ volatile ("msr CSSELR_EL1, %0" : : "r" (csselr)); #endif __builtin_arm_isb(ISB_SY); diff --git a/osfmk/arm/machine_routines.c b/osfmk/arm/machine_routines.c index 1a21043f4..145a783d3 100644 --- a/osfmk/arm/machine_routines.c +++ b/osfmk/arm/machine_routines.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -46,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -58,44 +60,34 @@ #include #endif -static int max_cpus_initialized = 0; -#define MAX_CPUS_SET 0x1 -#define MAX_CPUS_WAIT 0x2 - -static unsigned int avail_cpus = 0; +/* arm32 only supports a highly simplified topology, fixed at 1 cluster */ +static ml_topology_cpu_t topology_cpu_array[MAX_CPUS]; +static ml_topology_cluster_t topology_cluster = { + .cluster_id = 0, + .cluster_type = CLUSTER_TYPE_SMP, + .first_cpu_id = 0, +}; +static ml_topology_info_t topology_info = { + .version = CPU_TOPOLOGY_VERSION, + .num_clusters = 1, + .max_cluster_id = 0, + .cpus = topology_cpu_array, + .clusters = &topology_cluster, + .boot_cpu = &topology_cpu_array[0], + .boot_cluster = &topology_cluster, +}; uint32_t LockTimeOut; uint32_t LockTimeOutUsec; uint64_t TLockTimeOut; uint64_t MutexSpin; +extern uint32_t lockdown_done; uint64_t low_MutexSpin; int64_t high_MutexSpin; -boolean_t is_clock_configured = FALSE; - -#if CONFIG_NONFATAL_ASSERTS -extern int mach_assert; -#endif -extern volatile uint32_t debug_enabled; - -void machine_conf(void); - void machine_startup(__unused boot_args * args) { - int boot_arg; - -#if CONFIG_NONFATAL_ASSERTS - PE_parse_boot_argn("assert", &mach_assert, sizeof(mach_assert)); -#endif - - if (PE_parse_boot_argn("preempt", &boot_arg, sizeof(boot_arg))) { - default_preemption_rate = boot_arg; - } - if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof(boot_arg))) { - default_bg_preemption_rate = boot_arg; - } - machine_conf(); /* @@ -113,23 +105,6 @@ machine_boot_info( return PE_boot_args(); } -void -machine_conf(void) -{ - machine_info.memory_size = mem_size; -} - -void -machine_init(void) -{ - debug_log_init(); - clock_config(); - is_clock_configured = TRUE; - if (debug_enabled) { - pmap_map_globals(); - } -} - void slave_machine_init(__unused void *param) { @@ -150,47 +125,6 @@ machine_processor_shutdown( return Shutdown_context(doshutdown, processor); } -/* - * Routine: ml_init_max_cpus - * Function: - */ -void -ml_init_max_cpus(unsigned int max_cpus) -{ - boolean_t current_state; - - current_state = ml_set_interrupts_enabled(FALSE); - if (max_cpus_initialized != MAX_CPUS_SET) { - machine_info.max_cpus = max_cpus; - machine_info.physical_cpu_max = max_cpus; - machine_info.logical_cpu_max = max_cpus; - if (max_cpus_initialized == MAX_CPUS_WAIT) { - thread_wakeup((event_t) &max_cpus_initialized); - } - max_cpus_initialized = MAX_CPUS_SET; - } - (void) ml_set_interrupts_enabled(current_state); -} - -/* - * Routine: ml_get_max_cpus - * Function: - */ -unsigned int -ml_get_max_cpus(void) -{ - boolean_t current_state; - - current_state = ml_set_interrupts_enabled(FALSE); - if (max_cpus_initialized != MAX_CPUS_SET) { - max_cpus_initialized = MAX_CPUS_WAIT; - assert_wait((event_t) &max_cpus_initialized, THREAD_UNINT); - (void) thread_block(THREAD_CONTINUE_NULL); - } - (void) ml_set_interrupts_enabled(current_state); - return machine_info.max_cpus; -} - /* * Routine: ml_init_lock_timeout * Function: @@ -332,12 +266,6 @@ ml_get_max_offset( return pmap_max_offset(is64, pmap_max_offset_option); } -boolean_t -ml_wants_panic_trap_to_debugger(void) -{ - return FALSE; -} - void ml_panic_trap_to_debugger(__unused const char *panic_format_str, __unused va_list *panic_args, @@ -424,10 +352,7 @@ ml_install_interrupt_handler( cpu_data_ptr->interrupt_handler = handler; cpu_data_ptr->interrupt_refCon = refCon; - cpu_data_ptr->interrupts_enabled = TRUE; (void) ml_set_interrupts_enabled(current_state); - - initialize_screen(NULL, kPEAcquireScreen); } /* @@ -462,12 +387,6 @@ ml_init_timebase( } } -void -fiq_context_bootstrap(boolean_t enable_fiq) -{ - fiq_context_init(enable_fiq); -} - void ml_parse_cpu_topology(void) { @@ -476,46 +395,73 @@ ml_parse_cpu_topology(void) uint32_t cpu_boot_arg; int err; - err = DTLookupEntry(NULL, "/cpus", &entry); + err = SecureDTLookupEntry(NULL, "/cpus", &entry); assert(err == kSuccess); - err = DTInitEntryIterator(entry, &iter); + err = SecureDTInitEntryIterator(entry, &iter); assert(err == kSuccess); - while (kSuccess == DTIterateEntries(&iter, &child)) { + cpu_boot_arg = MAX_CPUS; + PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)); + + ml_topology_cluster_t *cluster = &topology_info.clusters[0]; + unsigned int cpu_id = 0; + while (kSuccess == SecureDTIterateEntries(&iter, &child)) { #if MACH_ASSERT unsigned int propSize; - void *prop = NULL; - if (avail_cpus == 0) { - if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) { - panic("unable to retrieve state for cpu %u", avail_cpus); + void const *prop = NULL; + if (cpu_id == 0) { + if (kSuccess != SecureDTGetProperty(child, "state", &prop, &propSize)) { + panic("unable to retrieve state for cpu %u", cpu_id); } - if (strncmp((char*)prop, "running", propSize) != 0) { + if (strncmp((char const *)prop, "running", propSize) != 0) { panic("cpu 0 has not been marked as running!"); } } - assert(kSuccess == DTGetProperty(child, "reg", &prop, &propSize)); - assert(avail_cpus == *((uint32_t*)prop)); + assert(kSuccess == SecureDTGetProperty(child, "reg", &prop, &propSize)); + assert(cpu_id == *((uint32_t const *)prop)); #endif - ++avail_cpus; - } + if (cpu_id >= cpu_boot_arg) { + break; + } + + ml_topology_cpu_t *cpu = &topology_info.cpus[cpu_id]; + + cpu->cpu_id = cpu_id; + cpu->phys_id = cpu_id; + cpu->cluster_type = cluster->cluster_type; - cpu_boot_arg = avail_cpus; - if (PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)) && - (avail_cpus > cpu_boot_arg)) { - avail_cpus = cpu_boot_arg; + cluster->num_cpus++; + cluster->cpu_mask |= 1ULL << cpu_id; + + topology_info.num_cpus++; + topology_info.max_cpu_id = cpu_id; + + cpu_id++; } - if (avail_cpus == 0) { + if (cpu_id == 0) { panic("No cpus found!"); } } +const ml_topology_info_t * +ml_get_topology_info(void) +{ + return &topology_info; +} + unsigned int ml_get_cpu_count(void) { - return avail_cpus; + return topology_info.num_cpus; +} + +unsigned int +ml_get_cluster_count(void) +{ + return topology_info.num_clusters; } int @@ -533,13 +479,35 @@ ml_get_boot_cluster(void) int ml_get_cpu_number(uint32_t phys_id) { + if (phys_id > (uint32_t)ml_get_max_cpu_number()) { + return -1; + } + return (int)phys_id; } +int +ml_get_cluster_number(__unused uint32_t phys_id) +{ + return 0; +} + int ml_get_max_cpu_number(void) { - return avail_cpus - 1; + return topology_info.num_cpus - 1; +} + +int +ml_get_max_cluster_number(void) +{ + return topology_info.max_cluster_id; +} + +unsigned int +ml_get_first_cpu_id(unsigned int cluster_id) +{ + return topology_info.clusters[cluster_id].first_cpu_id; } kern_return_t @@ -550,7 +518,8 @@ ml_processor_register(ml_processor_info_t *in_processor_info, cpu_data_t *this_cpu_datap; boolean_t is_boot_cpu; - if (in_processor_info->phys_id >= MAX_CPUS) { + const unsigned int max_cpu_id = ml_get_max_cpu_number(); + if (in_processor_info->phys_id > max_cpu_id) { /* * The physical CPU ID indicates that we have more CPUs than * this xnu build support. This probably means we have an @@ -560,11 +529,11 @@ ml_processor_register(ml_processor_info_t *in_processor_info, * is simply a convenient way to catch bugs in the pexpert * headers. */ - panic("phys_id %u is too large for MAX_CPUS (%u)", in_processor_info->phys_id, MAX_CPUS); + panic("phys_id %u is too large for max_cpu_id (%u)", in_processor_info->phys_id, max_cpu_id); } /* Fail the registration if the number of CPUs has been limited by boot-arg. */ - if ((in_processor_info->phys_id >= avail_cpus) || + if ((in_processor_info->phys_id >= topology_info.num_cpus) || (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) { return KERN_FAILURE; } @@ -591,22 +560,23 @@ ml_processor_register(ml_processor_info_t *in_processor_info, } } - this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle; - this_cpu_datap->cpu_cache_dispatch = in_processor_info->platform_cache_dispatch; + this_cpu_datap->cpu_idle_notify = in_processor_info->processor_idle; + this_cpu_datap->cpu_cache_dispatch = (cache_dispatch_t) in_processor_info->platform_cache_dispatch; nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency); this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr); - this_cpu_datap->idle_timer_notify = (void *) in_processor_info->idle_timer; + this_cpu_datap->idle_timer_notify = in_processor_info->idle_timer; this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon; - this_cpu_datap->platform_error_handler = (void *) in_processor_info->platform_error_handler; + this_cpu_datap->platform_error_handler = in_processor_info->platform_error_handler; this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr; this_cpu_datap->cpu_phys_id = in_processor_info->phys_id; this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty; + processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, this_cpu_datap); if (!is_boot_cpu) { - processor_init((struct processor *)this_cpu_datap->cpu_processor, - this_cpu_datap->cpu_number, processor_pset(master_processor)); + processor_init(processor, this_cpu_datap->cpu_number, + processor_pset(master_processor)); if (this_cpu_datap->cpu_l2_access_penalty) { /* @@ -615,12 +585,11 @@ ml_processor_register(ml_processor_info_t *in_processor_info, * scheduler, so that threads use the cores with better L2 * preferentially. */ - processor_set_primary(this_cpu_datap->cpu_processor, - master_processor); + processor_set_primary(processor, master_processor); } } - *processor_out = this_cpu_datap->cpu_processor; + *processor_out = processor; *ipi_handler_out = cpu_signal_handler; *pmi_handler_out = NULL; if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) { @@ -728,6 +697,13 @@ ml_io_map_wcomb( return io_map(phys_addr, size, VM_WIMG_WCOMB); } +void +ml_io_unmap(vm_offset_t addr, vm_size_t sz) +{ + pmap_remove(kernel_pmap, addr, addr + sz); + kmem_free(kernel_map, addr, sz); +} + /* boot memory allocation */ vm_offset_t ml_static_malloc( @@ -784,6 +760,18 @@ ml_static_slide( return VM_KERNEL_SLIDE(vaddr); } +kern_return_t +ml_static_verify_page_protections( + uint64_t base, uint64_t size, vm_prot_t prot) +{ + /* XXX Implement Me */ + (void)base; + (void)size; + (void)prot; + return KERN_FAILURE; +} + + vm_offset_t ml_static_unslide( vm_offset_t vaddr) @@ -812,6 +800,9 @@ ml_static_protect( if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) { panic("ml_static_protect(): WX request on %p", (void *) vaddr); } + if (lockdown_done && (new_prot & VM_PROT_EXECUTE)) { + panic("ml_static_protect(): attempt to inject executable mapping on %p", (void *) vaddr); + } /* Set up the protection bits, and block bits so we can validate block mappings. */ if (new_prot & VM_PROT_WRITE) { @@ -876,6 +867,7 @@ ml_static_mfree( vm_offset_t vaddr_cur; ppnum_t ppn; uint32_t freed_pages = 0; + uint32_t freed_kernelcache_pages = 0; /* It is acceptable (if bad) to fail to free. */ if (vaddr < VM_MIN_KERNEL_ADDRESS) { @@ -898,20 +890,17 @@ ml_static_mfree( if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) { panic("Failed ml_static_mfree on %p", (void *) vaddr_cur); } -#if 0 - /* - * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme - * relies on the persistence of these mappings for all time. - */ - // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE)); -#endif vm_page_create(ppn, (ppn + 1)); freed_pages++; + if (vaddr_cur >= segLOWEST && vaddr_cur < end_kern) { + freed_kernelcache_pages++; + } } } vm_page_lockspin_queues(); vm_page_wire_count -= freed_pages; vm_page_wire_count_initial -= freed_pages; + vm_page_kernelcache_count -= freed_kernelcache_pages; vm_page_unlock_queues(); #if DEBUG kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); @@ -1118,13 +1107,11 @@ ml_energy_stat(__unused thread_t t) void ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) { -#if CONFIG_EMBEDDED /* * For now: update the resource coalition stats of the * current thread's coalition */ task_coalition_update_gpu_stats(current_task(), gpu_ns_delta); -#endif } uint64_t @@ -1142,7 +1129,7 @@ timer_state_event(boolean_t switch_to_kernel) return; } - processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data; + processor_t pd = current_processor(); uint64_t now = ml_get_timebase(); timer_stop(pd->current_state, now); @@ -1240,3 +1227,35 @@ arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interru } } #endif // __ARM_USER_PROTECT__ + +void +machine_lockdown(void) +{ + arm_vm_prot_finalize(PE_state.bootArgs); + lockdown_done = 1; +} + +void +ml_lockdown_init(void) +{ +} + +void +ml_hibernate_active_pre(void) +{ +} + +void +ml_hibernate_active_post(void) +{ +} + +size_t +ml_get_vm_reserved_regions(bool vm_is64bit, struct vm_reserved_region **regions) +{ +#pragma unused(vm_is64bit) + assert(regions != NULL); + + *regions = NULL; + return 0; +} diff --git a/osfmk/arm/machine_routines.h b/osfmk/arm/machine_routines.h index 759802bdd..c3dd12751 100644 --- a/osfmk/arm/machine_routines.h +++ b/osfmk/arm/machine_routines.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2019 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -33,6 +33,7 @@ #define _ARM_MACHINE_ROUTINES_H_ #include +#include #include #include #include @@ -44,6 +45,12 @@ __BEGIN_DECLS +#ifdef XNU_KERNEL_PRIVATE +#ifdef __arm64__ +typedef bool (*expected_fault_handler_t)(arm_saved_state_t *); +#endif /* __arm64__ */ +#endif /* XNU_KERNEL_PRIVATE */ + /* Interrupt handling */ void ml_cpu_signal(unsigned int cpu_id); @@ -51,6 +58,7 @@ void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs); uint64_t ml_cpu_signal_deferred_get_timer(void); void ml_cpu_signal_deferred(unsigned int cpu_id); void ml_cpu_signal_retract(unsigned int cpu_id); +bool ml_cpu_signal_is_enabled(void); /* Initialize Interrupts */ void ml_init_interrupt(void); @@ -70,10 +78,41 @@ void ml_cause_interrupt(void); /* Clear interrupt spin debug state for thread */ #if INTERRUPT_MASKED_DEBUG +extern boolean_t interrupt_masked_debug; +extern uint64_t interrupt_masked_timeout; +extern uint64_t stackshot_interrupt_masked_timeout; + +#define INTERRUPT_MASKED_DEBUG_START(handler_addr, type) \ +do { \ + if (interrupt_masked_debug) { \ + thread_t thread = current_thread(); \ + thread->machine.int_type = type; \ + thread->machine.int_handler_addr = (uintptr_t)VM_KERNEL_STRIP_PTR(handler_addr); \ + thread->machine.inthandler_timestamp = ml_get_timebase(); \ + thread->machine.int_vector = (uintptr_t)NULL; \ + } \ +} while (0) + +#define INTERRUPT_MASKED_DEBUG_END() \ +do { \ + if (interrupt_masked_debug) { \ + thread_t thread = current_thread(); \ + ml_check_interrupt_handler_duration(thread); \ + } \ +} while (0) + +void ml_irq_debug_start(uintptr_t handler, uintptr_t vector); +void ml_irq_debug_end(void); + void ml_spin_debug_reset(thread_t thread); void ml_spin_debug_clear(thread_t thread); void ml_spin_debug_clear_self(void); void ml_check_interrupts_disabled_duration(thread_t thread); +void ml_check_stackshot_interrupt_disabled_duration(thread_t thread); +void ml_check_interrupt_handler_duration(thread_t thread); +#else +#define INTERRUPT_MASKED_DEBUG_START(handler_addr, type) +#define INTERRUPT_MASKED_DEBUG_END() #endif #ifdef XNU_KERNEL_PRIVATE @@ -85,9 +124,14 @@ extern uint64_t ml_get_booter_memory_size(void); /* Type for the Time Base Enable function */ typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable); -#if MACH_KERNEL_PRIVATE +#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) /* Type for the Processor Cache Dispatch function */ typedef void (*cache_dispatch_t)(cpu_id_t cpu_id, unsigned int select, unsigned int param0, unsigned int param1); + +typedef uint32_t (*get_decrementer_t)(void); +typedef void (*set_decrementer_t)(uint32_t); +typedef void (*fiq_handler_t)(void); + #endif #define CacheConfig 0x00000000UL @@ -192,12 +236,25 @@ void ml_parse_cpu_topology(void); unsigned int ml_get_cpu_count(void); +unsigned int ml_get_cluster_count(void); + int ml_get_boot_cpu_number(void); int ml_get_cpu_number(uint32_t phys_id); +int ml_get_cluster_number(uint32_t phys_id); + int ml_get_max_cpu_number(void); +int ml_get_max_cluster_number(void); + +unsigned int ml_get_first_cpu_id(unsigned int cluster_id); + +#ifdef __arm64__ +int ml_get_cluster_number_local(void); +unsigned int ml_get_cpu_number_local(void); +#endif /* __arm64__ */ + /* Struct for ml_cpu_get_info */ struct ml_cpu_info { unsigned long vector_unit; @@ -217,6 +274,148 @@ typedef enum { cluster_type_t ml_get_boot_cluster(void); +/*! + * @typedef ml_topology_cpu_t + * @brief Describes one CPU core in the topology. + * + * @field cpu_id Logical CPU ID (EDT: cpu-id): 0, 1, 2, 3, 4, ... + * @field phys_id Physical CPU ID (EDT: reg). Same as MPIDR[15:0], i.e. + * (cluster_id << 8) | core_number_within_cluster + * @field cluster_id Cluster ID (EDT: cluster-id) + * @field die_id Die ID (EDT: die-id) + * @field cluster_type The type of CPUs found in this cluster. + * @field l2_access_penalty Indicates that the scheduler should try to de-prioritize a core because + * L2 accesses are slower than on the boot processor. + * @field l2_cache_size Size of the L2 cache, in bytes. 0 if unknown or not present. + * @field l2_cache_id l2-cache-id property read from EDT. + * @field l3_cache_size Size of the L3 cache, in bytes. 0 if unknown or not present. + * @field l3_cache_id l3-cache-id property read from EDT. + * @field cpu_IMPL_regs IO-mapped virtual address of cpuX_IMPL (implementation-defined) register block. + * @field cpu_IMPL_pa Physical address of cpuX_IMPL register block. + * @field cpu_IMPL_len Length of cpuX_IMPL register block. + * @field cpu_UTTDBG_regs IO-mapped virtual address of cpuX_UTTDBG register block. + * @field cpu_UTTDBG_pa Physical address of cpuX_UTTDBG register block, if set in DT, else zero + * @field cpu_UTTDBG_len Length of cpuX_UTTDBG register block, if set in DT, else zero + * @field coresight_regs IO-mapped virtual address of CoreSight debug register block. + * @field coresight_pa Physical address of CoreSight register block. + * @field coresight_len Length of CoreSight register block. + * @field self_ipi_irq AIC IRQ vector for self IPI (cpuX->cpuX). 0 if unsupported. + * @field other_ipi_irq AIC IRQ vector for other IPI (cpuX->cpuY). 0 if unsupported. + * @field pmi_irq AIC IRQ vector for performance management IRQ. 0 if unsupported. + * @field die_cluster_id Cluster ID within the local die (EDT: die-cluster-id) + * @field cluster_core_id Core ID within the local cluster (EDT: cluster-core-id) + */ +typedef struct ml_topology_cpu { + unsigned int cpu_id; + uint32_t phys_id; + unsigned int cluster_id; + unsigned int die_id; + cluster_type_t cluster_type; + uint32_t l2_access_penalty; + uint32_t l2_cache_size; + uint32_t l2_cache_id; + uint32_t l3_cache_size; + uint32_t l3_cache_id; + vm_offset_t cpu_IMPL_regs; + uint64_t cpu_IMPL_pa; + uint64_t cpu_IMPL_len; + vm_offset_t cpu_UTTDBG_regs; + uint64_t cpu_UTTDBG_pa; + uint64_t cpu_UTTDBG_len; + vm_offset_t coresight_regs; + uint64_t coresight_pa; + uint64_t coresight_len; + int self_ipi_irq; + int other_ipi_irq; + int pmi_irq; + unsigned int die_cluster_id; + unsigned int cluster_core_id; +} ml_topology_cpu_t; + +/*! + * @typedef ml_topology_cluster_t + * @brief Describes one cluster in the topology. + * + * @field cluster_id Cluster ID (EDT: cluster-id) + * @field cluster_type The type of CPUs found in this cluster. + * @field num_cpus Total number of usable CPU cores in this cluster. + * @field first_cpu_id The cpu_id of the first CPU in the cluster. + * @field cpu_mask A bitmask representing the cpu_id's that belong to the cluster. Example: + * If the cluster contains CPU4 and CPU5, cpu_mask will be 0x30. + * @field acc_IMPL_regs IO-mapped virtual address of acc_IMPL (implementation-defined) register block. + * @field acc_IMPL_pa Physical address of acc_IMPL register block. + * @field acc_IMPL_len Length of acc_IMPL register block. + * @field cpm_IMPL_regs IO-mapped virtual address of cpm_IMPL (implementation-defined) register block. + * @field cpm_IMPL_pa Physical address of cpm_IMPL register block. + * @field cpm_IMPL_len Length of cpm_IMPL register block. + */ +typedef struct ml_topology_cluster { + unsigned int cluster_id; + cluster_type_t cluster_type; + unsigned int num_cpus; + unsigned int first_cpu_id; + uint64_t cpu_mask; + vm_offset_t acc_IMPL_regs; + uint64_t acc_IMPL_pa; + uint64_t acc_IMPL_len; + vm_offset_t cpm_IMPL_regs; + uint64_t cpm_IMPL_pa; + uint64_t cpm_IMPL_len; +} ml_topology_cluster_t; + +// Bump this version number any time any ml_topology_* struct changes, so +// that KPI users can check whether their headers are compatible with +// the running kernel. +#define CPU_TOPOLOGY_VERSION 1 + +/*! + * @typedef ml_topology_info_t + * @brief Describes the CPU topology for all APs in the system. Populated from EDT and read-only at runtime. + * @discussion This struct only lists CPU cores that are considered usable by both iBoot and XNU. Some + * physically present CPU cores may be considered unusable due to configuration options like + * the "cpus=" boot-arg. Cores that are disabled in hardware will not show up in EDT at all, so + * they also will not be present in this struct. + * + * @field version Version of the struct (set to CPU_TOPOLOGY_VERSION). + * @field num_cpus Total number of usable CPU cores. + * @field max_cpu_id The highest usable logical CPU ID. + * @field num_clusters Total number of AP CPU clusters on the system (usable or not). + * @field max_cluster_id The highest cluster ID found in EDT. + * @field cpus List of |num_cpus| entries. + * @field clusters List of |num_clusters| entries. + * @field boot_cpu Points to the |cpus| entry for the boot CPU. + * @field boot_cluster Points to the |clusters| entry which contains the boot CPU. + * @field chip_revision Silicon revision reported by iBoot, which comes from the + * SoC-specific fuse bits. See CPU_VERSION_xx macros for definitions. + */ +typedef struct ml_topology_info { + unsigned int version; + unsigned int num_cpus; + unsigned int max_cpu_id; + unsigned int num_clusters; + unsigned int max_cluster_id; + unsigned int max_die_id; + ml_topology_cpu_t *cpus; + ml_topology_cluster_t *clusters; + ml_topology_cpu_t *boot_cpu; + ml_topology_cluster_t *boot_cluster; + unsigned int chip_revision; +} ml_topology_info_t; + +/*! + * @function ml_get_topology_info + * @result A pointer to the read-only topology struct. Does not need to be freed. Returns NULL + * if the struct hasn't been initialized or the feature is unsupported. + */ +const ml_topology_info_t *ml_get_topology_info(void); + +/*! + * @function ml_map_cpu_pio + * @brief Maps per-CPU and per-cluster PIO registers found in EDT. This needs to be + * called after arm_vm_init() so it can't be part of ml_parse_cpu_topology(). + */ +void ml_map_cpu_pio(void); + /* Struct for ml_processor_register */ struct ml_processor_info { cpu_id_t cpu_id; @@ -245,12 +444,12 @@ struct ml_processor_info { }; typedef struct ml_processor_info ml_processor_info_t; -#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) +#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) /* Struct for ml_init_timebase */ struct tbd_ops { - void (*tbd_fiq_handler)(void); - uint32_t (*tbd_get_decrementer)(void); - void (*tbd_set_decrementer)(uint32_t dec_value); + fiq_handler_t tbd_fiq_handler; + get_decrementer_t tbd_get_decrementer; + set_decrementer_t tbd_set_decrementer; }; typedef struct tbd_ops *tbd_ops_t; typedef struct tbd_ops tbd_ops_data_t; @@ -289,9 +488,6 @@ kern_return_t ml_lockdown_handler_register(lockdown_handler_t, void *); #if XNU_KERNEL_PRIVATE void ml_lockdown_init(void); -/* Check if the machine layer wants to intercept a panic call */ -boolean_t ml_wants_panic_trap_to_debugger(void); - /* Machine layer routine for intercepting panics */ void ml_panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, @@ -313,6 +509,10 @@ vm_offset_t ml_static_vtop( vm_offset_t); +kern_return_t +ml_static_verify_page_protections( + uint64_t base, uint64_t size, vm_prot_t prot); + vm_offset_t ml_static_ptovirt( vm_offset_t); @@ -420,7 +620,7 @@ ml_static_protect( vm_offset_t ml_vtophys( vm_offset_t vaddr); -/* Get processor info */ +/* Get processor cache info */ void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info); #endif /* __APPLE_API_UNSTABLE */ @@ -451,6 +651,10 @@ vm_offset_t ml_io_map_with_prot( vm_size_t size, vm_prot_t prot); +void ml_io_unmap( + vm_offset_t addr, + vm_size_t sz); + void ml_get_bouncepool_info( vm_offset_t *phys_addr, vm_size_t *size); @@ -479,10 +683,12 @@ void ml_delay_on_yield(void); uint32_t ml_get_decrementer(void); -#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME +#include + +#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT void timer_state_event_user_to_kernel(void); void timer_state_event_kernel_to_user(void); -#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ +#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */ uint64_t ml_get_hwclock(void); @@ -527,6 +733,7 @@ void bzero_phys_nc(addr64_t src64, vm_size_t bytes); */ void fill32_dczva(addr64_t, vm_size_t); void fill32_nt(addr64_t, vm_size_t, uint32_t); +int cpu_interrupt_is_pending(void); #endif #endif @@ -540,12 +747,12 @@ void ml_thread_policy( #define MACHINE_NETWORK_WORKLOOP 0x00000001 #define MACHINE_NETWORK_NETISR 0x00000002 -/* Initialize the maximum number of CPUs */ -void ml_init_max_cpus( +/* Set the maximum number of CPUs */ +void ml_set_max_cpus( unsigned int max_cpus); -/* Return the maximum number of CPUs set by ml_init_max_cpus() */ -unsigned int ml_get_max_cpus( +/* Return the maximum number of CPUs set by ml_set_max_cpus(), waiting if necessary */ +unsigned int ml_wait_max_cpus( void); /* Return the maximum memory size */ @@ -592,19 +799,25 @@ vm_offset_t ml_stack_remaining(void); #ifdef MACH_KERNEL_PRIVATE uint32_t get_fpscr(void); void set_fpscr(uint32_t); +void machine_conf(void); +void machine_lockdown(void); #ifdef __arm64__ unsigned long update_mdscr(unsigned long clear, unsigned long set); #endif /* __arm64__ */ -extern void init_vfp(void); -extern boolean_t get_vfp_enabled(void); extern void arm_debug_set_cp14(arm_debug_state_t *debug_state); extern void fiq_context_init(boolean_t enable_fiq); -extern void fiq_context_bootstrap(boolean_t enable_fiq); extern void reenable_async_aborts(void); +#ifdef __arm__ +extern boolean_t get_vfp_enabled(void); extern void cpu_idle_wfi(boolean_t wfi_fast); +#endif + +#ifdef __arm64__ +uint64_t ml_cluster_wfe_timeout(uint32_t wfe_cluster_id); +#endif #ifdef MONITOR #define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */ @@ -613,11 +826,6 @@ unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); #endif /* MONITOR */ -#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) -void rorgn_stash_range(void); -void rorgn_lockdown(void); -#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ - #if __ARM_KERNEL_PROTECT__ extern void set_vbar_el1(uint64_t); #endif /* __ARM_KERNEL_PROTECT__ */ @@ -629,10 +837,20 @@ extern int set_be_bit(void); extern int clr_be_bit(void); extern int be_tracing(void); +/* Please note that cpu_broadcast_xcall is not as simple is you would like it to be. + * It will sometimes put the calling thread to sleep, and it is up to your callback + * to wake it up as needed, where "as needed" is defined as "all other CPUs have + * called the broadcast func". Look around the kernel for examples, or instead use + * cpu_broadcast_xcall_simple() which does indeed act like you would expect, given + * the prototype. cpu_broadcast_immediate_xcall has the same caveats and has a similar + * _simple() wrapper + */ typedef void (*broadcastFunc) (void *); unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t, broadcastFunc, void *); +unsigned int cpu_broadcast_xcall_simple(boolean_t, broadcastFunc, void *); kern_return_t cpu_xcall(int, broadcastFunc, void *); unsigned int cpu_broadcast_immediate_xcall(uint32_t *, boolean_t, broadcastFunc, void *); +unsigned int cpu_broadcast_immediate_xcall_simple(boolean_t, broadcastFunc, void *); kern_return_t cpu_immediate_xcall(int, broadcastFunc, void *); #ifdef KERNEL_PRIVATE @@ -876,6 +1094,30 @@ typedef void (*sched_perfcontrol_state_update_t)( perfcontrol_event event, uint32_t cpu_id, uint64_t timestamp, uint32_t flags, struct perfcontrol_thread_data *thr_data, __unused void *unused); +/* + * Thread Group Blocking Relationship Callout + * + * Parameters: + * blocked_tg - Thread group blocking on progress of another thread group + * blocking_tg - Thread group blocking progress of another thread group + * flags - Flags for other relevant information + * blocked_thr_state - Per-thread perfcontrol state for blocked thread + */ +typedef void (*sched_perfcontrol_thread_group_blocked_t)( + thread_group_data_t blocked_tg, thread_group_data_t blocking_tg, uint32_t flags, perfcontrol_state_t blocked_thr_state); + +/* + * Thread Group Unblocking Callout + * + * Parameters: + * unblocked_tg - Thread group being unblocked from making forward progress + * unblocking_tg - Thread group unblocking progress of another thread group + * flags - Flags for other relevant information + * unblocked_thr_state - Per-thread perfcontrol state for unblocked thread + */ +typedef void (*sched_perfcontrol_thread_group_unblocked_t)( + thread_group_data_t unblocked_tg, thread_group_data_t unblocking_tg, uint32_t flags, perfcontrol_state_t unblocked_thr_state); + /* * Callers should always use the CURRENT version so that the kernel can detect both older * and newer structure layouts. New callbacks should always be added at the end of the @@ -892,6 +1134,7 @@ typedef void (*sched_perfcontrol_state_update_t)( #define SCHED_PERFCONTROL_CALLBACKS_VERSION_5 (5) /* up-to state_update */ #define SCHED_PERFCONTROL_CALLBACKS_VERSION_6 (6) /* up-to thread_group_flags_update */ #define SCHED_PERFCONTROL_CALLBACKS_VERSION_7 (7) /* up-to work_interval_ctl */ +#define SCHED_PERFCONTROL_CALLBACKS_VERSION_8 (8) /* up-to thread_group_unblocked */ #define SCHED_PERFCONTROL_CALLBACKS_VERSION_CURRENT SCHED_PERFCONTROL_CALLBACKS_VERSION_6 struct sched_perfcontrol_callbacks { @@ -908,6 +1151,8 @@ struct sched_perfcontrol_callbacks { sched_perfcontrol_state_update_t state_update; sched_perfcontrol_thread_group_flags_update_t thread_group_flags_update; sched_perfcontrol_work_interval_ctl_t work_interval_ctl; + sched_perfcontrol_thread_group_blocked_t thread_group_blocked; + sched_perfcontrol_thread_group_unblocked_t thread_group_unblocked; }; typedef struct sched_perfcontrol_callbacks *sched_perfcontrol_callbacks_t; @@ -924,9 +1169,51 @@ extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_core extern void sched_perfcontrol_thread_group_recommend(void *data, cluster_type_t recommendation); extern void sched_override_recommended_cores_for_sleep(void); extern void sched_restore_recommended_cores_after_sleep(void); +extern void sched_perfcontrol_inherit_recommendation_from_tg(perfcontrol_class_t perfctl_class, boolean_t inherit); extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores); +/* + * Edge Scheduler-CLPC Interface + * + * sched_perfcontrol_thread_group_preferred_clusters_set() + * + * The Edge scheduler expects thread group recommendations to be specific clusters rather + * than just E/P. In order to allow more fine grained control, CLPC can specify an override + * preferred cluster per QoS bucket. CLPC passes a common preferred cluster `tg_preferred_cluster` + * and an array of size [PERFCONTROL_CLASS_MAX] with overrides for specific perfctl classes. + * The scheduler translates these preferences into sched_bucket + * preferences and applies the changes. + * + */ +/* Token to indicate a particular perfctl class is not overriden */ +#define SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE ((uint32_t)~0) + +/* + * CLPC can also indicate if there should be an immediate rebalancing of threads of this TG as + * part of this preferred cluster change. It does that by specifying the following options. + */ +#define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNING 0x1 +#define SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNABLE 0x2 +typedef uint64_t sched_perfcontrol_preferred_cluster_options_t; + +extern void sched_perfcontrol_thread_group_preferred_clusters_set(void *machine_data, uint32_t tg_preferred_cluster, + uint32_t overrides[PERFCONTROL_CLASS_MAX], sched_perfcontrol_preferred_cluster_options_t options); + +/* + * Edge Scheduler-CLPC Interface + * + * sched_perfcontrol_edge_matrix_get()/sched_perfcontrol_edge_matrix_set() + * + * The Edge scheduler uses edges between clusters to define the likelihood of migrating threads + * across clusters. The edge config between any two clusters defines the edge weight and whether + * migation and steal operations are allowed across that edge. The getter and setter allow CLPC + * to query and configure edge properties between various clusters on the platform. + */ + +extern void sched_perfcontrol_edge_matrix_get(sched_clutch_edge *edge_matrix, bool *edge_request_bitmap, uint64_t flags, uint64_t matrix_order); +extern void sched_perfcontrol_edge_matrix_set(sched_clutch_edge *edge_matrix, bool *edge_changes_bitmap, uint64_t flags, uint64_t matrix_order); + /* * Update the deadline after which sched_perfcontrol_deadline_passed will be called. * Returns TRUE if it successfully canceled a previously set callback, @@ -958,6 +1245,16 @@ typedef enum perfcontrol_callout_stat { uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, perfcontrol_callout_stat_t stat); +#ifdef __arm64__ +/* The performance controller may use this interface to recommend + * that CPUs in the designated cluster employ WFE rather than WFI + * within the idle loop, falling back to WFI after the specified + * timeout. The updates are expected to be serialized by the caller, + * the implementation is not required to perform internal synchronization. + */ +uint32_t ml_update_cluster_wfe_recommendation(uint32_t wfe_cluster_id, uint64_t wfe_timeout_abstime_interval, uint64_t wfe_hint_flags); +#endif /* __arm64__ */ + #if defined(HAS_APPLE_PAC) #define ONES(x) (BIT((x))-1) #define PTR_MASK ONES(64-T1SZ_BOOT) @@ -966,11 +1263,39 @@ uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, #define UNSIGN_PTR(p) \ SIGN(p) ? ((p) | PAC_MASK) : ((p) & ~PAC_MASK) +uint64_t ml_default_jop_pid(void); void ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit); -void ml_task_set_disable_user_jop(task_t task, boolean_t disable_user_jop); -void ml_thread_set_disable_user_jop(thread_t thread, boolean_t disable_user_jop); -void ml_set_kernelkey_enabled(boolean_t enable); +void ml_task_set_jop_pid(task_t task, task_t parent_task, boolean_t inherit); +void ml_task_set_jop_pid_from_shared_region(task_t task); +void ml_task_set_disable_user_jop(task_t task, uint8_t disable_user_jop); +void ml_thread_set_disable_user_jop(thread_t thread, uint8_t disable_user_jop); +void ml_thread_set_jop_pid(thread_t thread, task_t task); void *ml_auth_ptr_unchecked(void *ptr, unsigned key, uint64_t modifier); + +/** + * Temporarily enables a userspace JOP key in kernel space, so that the kernel + * can sign or auth pointers on that process's behalf. + * + * @note The caller must disable interrupts before calling + * ml_enable_user_jop_key(), and may only re-enable interrupts after the + * complementary ml_disable_user_jop_key() call. + * + * @param user_jop_key The userspace JOP key to temporarily use + * @return Saved JOP state, to be passed to the complementary + * ml_disable_user_jop_key() call + */ +uint64_t ml_enable_user_jop_key(uint64_t user_jop_key); + +/** + * Restores the previous JOP key state after a previous ml_enable_user_jop_key() + * call. + * + * @param user_jop_key The userspace JOP key previously passed to + * ml_enable_user_jop_key() + * @param saved_jop_state The saved JOP state returned by + * ml_enable_user_jop_key() + */ +void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state); #endif /* defined(HAS_APPLE_PAC) */ @@ -986,8 +1311,10 @@ uint8_t user_timebase_type(void); boolean_t ml_thread_is64bit(thread_t thread); #ifdef __arm64__ +bool ml_feature_supported(uint32_t feature_bit); void ml_set_align_checking(void); -boolean_t arm64_wfe_allowed(void); +extern void wfe_timeout_configure(void); +extern void wfe_timeout_init(void); #endif /* __arm64__ */ void ml_timer_evaluate(void); @@ -997,6 +1324,17 @@ void ml_gpu_stat_update(uint64_t); uint64_t ml_gpu_stat(thread_t); #endif /* __APPLE_API_PRIVATE */ + + +#if __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE) +extern void ml_expect_fault_begin(expected_fault_handler_t, uintptr_t); +extern void ml_expect_fault_end(void); +#endif /* __arm64__ && defined(CONFIG_XNUPOST) && defined(XNU_KERNEL_PRIVATE) */ + + +void ml_hibernate_active_pre(void); +void ml_hibernate_active_post(void); + __END_DECLS #endif /* _ARM_MACHINE_ROUTINES_H_ */ diff --git a/osfmk/arm/machine_routines_asm.s b/osfmk/arm/machine_routines_asm.s index 5b475d4fd..2d3a54065 100644 --- a/osfmk/arm/machine_routines_asm.s +++ b/osfmk/arm/machine_routines_asm.s @@ -115,9 +115,7 @@ LEXT(timer_grab) 0: ldr r2, [r0, TIMER_HIGH] ldr r3, [r0, TIMER_LOW] -#if __ARM_SMP__ dmb ish // dmb ish -#endif ldr r1, [r0, TIMER_HIGHCHK] cmp r1, r2 bne 0b @@ -128,13 +126,9 @@ LEXT(timer_grab) .globl EXT(timer_advance_internal_32) LEXT(timer_advance_internal_32) str r1, [r0, TIMER_HIGHCHK] -#if __ARM_SMP__ dmb ish // dmb ish -#endif str r2, [r0, TIMER_LOW] -#if __ARM_SMP__ dmb ish // dmb ish -#endif str r1, [r0, TIMER_HIGH] bx lr @@ -209,11 +203,7 @@ LEXT(sync_tlb_flush) .macro FLUSH_MMU_TLB mov r0, #0 -#if __ARM_SMP__ mcr p15, 0, r0, c8, c3, 0 // Invalidate Inner Shareable entire TLBs -#else - mcr p15, 0, r0, c8, c7, 0 // Invalidate entire TLB -#endif .endmacro /* @@ -273,11 +263,7 @@ LEXT(flush_core_tlb) bx lr .macro FLUSH_MMU_TLB_ENTRY -#if __ARM_SMP__ mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareableentry -#else - mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry -#endif .endmacro /* * void flush_mmu_tlb_entry_async(uint32_t) @@ -306,11 +292,7 @@ LEXT(flush_mmu_tlb_entry) .macro FLUSH_MMU_TLB_ENTRIES 1: -#if __ARM_SMP__ mcr p15, 0, r0, c8, c3, 1 // Invalidate TLB Inner Shareable entry -#else - mcr p15, 0, r0, c8, c7, 1 // Invalidate TLB entry -#endif add r0, r0, ARM_PGBYTES // Increment to the next page cmp r0, r1 // Loop if current address < end address blt 1b @@ -343,11 +325,7 @@ LEXT(flush_mmu_tlb_entries) .macro FLUSH_MMU_TLB_MVA_ENTRIES -#if __ARM_SMP__ mcr p15, 0, r0, c8, c3, 3 // Invalidate TLB Inner Shareable entries by mva -#else - mcr p15, 0, r0, c8, c7, 3 // Invalidate TLB Inner Shareable entries by mva -#endif .endmacro /* @@ -376,11 +354,7 @@ LEXT(flush_mmu_tlb_mva_entries) bx lr .macro FLUSH_MMU_TLB_ASID -#if __ARM_SMP__ mcr p15, 0, r0, c8, c3, 2 // Invalidate TLB Inner Shareable entries by asid -#else - mcr p15, 0, r0, c8, c7, 2 // Invalidate TLB entries by asid -#endif .endmacro /* @@ -473,7 +447,6 @@ LEXT(set_mmu_ttb_alternate) .globl EXT(get_mmu_ttb) LEXT(get_mmu_ttb) mrc p15, 0, r0, c2, c0, 0 // translation table to r0 - isb bx lr /* @@ -1278,35 +1251,6 @@ LEXT(ml_get_interrupts_enabled) * */ -#if defined(ARM_BOARD_CLASS_S7002) - .text - .align 2 - .globl EXT(fleh_fiq_s7002) -LEXT(fleh_fiq_s7002) - str r11, [r10, #PMGR_INTERVAL_TMR_CTL_OFFSET] // Clear the decrementer interrupt - mvn r13, #0 - str r13, [r8, CPU_DECREMENTER] - b EXT(fleh_dec) - - .text - .align 2 - .globl EXT(s7002_get_decrementer) -LEXT(s7002_get_decrementer) - ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address - add ip, ip, #PMGR_INTERVAL_TMR_OFFSET - ldr r0, [ip] // Get the Decrementer - bx lr - - .text - .align 2 - .globl EXT(s7002_set_decrementer) -LEXT(s7002_set_decrementer) - str r0, [r3, CPU_DECREMENTER] // Save the new dec value - ldr ip, [r3, CPU_TBD_HARDWARE_ADDR] // Get the hardware address - str r0, [ip, #PMGR_INTERVAL_TMR_OFFSET] // Store the new Decrementer - bx lr -#endif /* defined(ARM_BOARD_CLASS_S7002) */ - #if defined(ARM_BOARD_CLASS_T8002) .text .align 2 diff --git a/osfmk/arm/machine_routines_common.c b/osfmk/arm/machine_routines_common.c index b433dd658..f7fca614b 100644 --- a/osfmk/arm/machine_routines_common.c +++ b/osfmk/arm/machine_routines_common.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -50,11 +51,6 @@ #include -#if INTERRUPT_MASKED_DEBUG -extern boolean_t interrupt_masked_debug; -extern uint64_t interrupt_masked_timeout; -#endif - #if !HAS_CONTINUOUS_HWCLOCK extern uint64_t mach_absolutetime_asleep; #else @@ -62,6 +58,18 @@ extern uint64_t wake_abstime; static uint64_t wake_conttime = UINT64_MAX; #endif +extern volatile uint32_t debug_enabled; + +static int max_cpus_initialized = 0; +#define MAX_CPUS_SET 0x1 +#define MAX_CPUS_WAIT 0x2 + +LCK_GRP_DECLARE(max_cpus_grp, "max_cpus"); +LCK_MTX_DECLARE(max_cpus_lock, &max_cpus_grp); +uint32_t lockdown_done = 0; +boolean_t is_clock_configured = FALSE; + + static void sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused) { @@ -121,6 +129,20 @@ sched_perfcontrol_state_update_default( { } +static void +sched_perfcontrol_thread_group_blocked_default( + __unused thread_group_data_t blocked_tg, __unused thread_group_data_t blocking_tg, + __unused uint32_t flags, __unused perfcontrol_state_t blocked_thr_state) +{ +} + +static void +sched_perfcontrol_thread_group_unblocked_default( + __unused thread_group_data_t unblocked_tg, __unused thread_group_data_t unblocking_tg, + __unused uint32_t flags, __unused perfcontrol_state_t unblocked_thr_state) +{ +} + sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default; sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default; sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default; @@ -133,6 +155,8 @@ sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default; sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default; sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default; +sched_perfcontrol_thread_group_blocked_t sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default; +sched_perfcontrol_thread_group_unblocked_t sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default; void sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state) @@ -144,6 +168,44 @@ sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, un } if (callbacks) { +#if CONFIG_THREAD_GROUPS + if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_3) { + if (callbacks->thread_group_init != NULL) { + sched_perfcontrol_thread_group_init = callbacks->thread_group_init; + } else { + sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default; + } + if (callbacks->thread_group_deinit != NULL) { + sched_perfcontrol_thread_group_deinit = callbacks->thread_group_deinit; + } else { + sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default; + } + // tell CLPC about existing thread groups + thread_group_resync(TRUE); + } + + if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_6) { + if (callbacks->thread_group_flags_update != NULL) { + sched_perfcontrol_thread_group_flags_update = callbacks->thread_group_flags_update; + } else { + sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default; + } + } + + if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_8) { + if (callbacks->thread_group_blocked != NULL) { + sched_perfcontrol_thread_group_blocked = callbacks->thread_group_blocked; + } else { + sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default; + } + + if (callbacks->thread_group_unblocked != NULL) { + sched_perfcontrol_thread_group_unblocked = callbacks->thread_group_unblocked; + } else { + sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default; + } + } +#endif if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) { if (callbacks->work_interval_ctl != NULL) { @@ -206,6 +268,9 @@ sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, un } } else { /* reset to defaults */ +#if CONFIG_THREAD_GROUPS + thread_group_resync(FALSE); +#endif sched_perfcontrol_offcore = sched_perfcontrol_offcore_default; sched_perfcontrol_switch = sched_perfcontrol_switch_default; sched_perfcontrol_oncore = sched_perfcontrol_oncore_default; @@ -217,6 +282,8 @@ sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, un sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default; sched_perfcontrol_csw = sched_perfcontrol_csw_default; sched_perfcontrol_state_update = sched_perfcontrol_state_update_default; + sched_perfcontrol_thread_group_blocked = sched_perfcontrol_thread_group_blocked_default; + sched_perfcontrol_thread_group_unblocked = sched_perfcontrol_thread_group_unblocked_default; } } @@ -230,6 +297,11 @@ machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data * data->perfctl_class = thread_get_perfcontrol_class(thread); data->energy_estimate_nj = 0; data->thread_id = thread->thread_id; +#if CONFIG_THREAD_GROUPS + struct thread_group *tg = thread_group_get(thread); + data->thread_group_id = thread_group_get_id(tg); + data->thread_group_data = thread_group_get_machine_data(tg); +#endif data->scheduling_latency_at_same_basepri = same_pri_latency; data->perfctl_state = FIND_PERFCONTROL_STATE(thread); } @@ -289,6 +361,7 @@ perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, os_atomic_load_wide(&perfcontrol_callout_count[type], relaxed); } + void machine_switch_perfcontrol_context(perfcontrol_event event, uint64_t timestamp, @@ -297,6 +370,7 @@ machine_switch_perfcontrol_context(perfcontrol_event event, thread_t old, thread_t new) { + if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) { perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old); perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new); @@ -337,6 +411,7 @@ machine_switch_perfcontrol_state_update(perfcontrol_event event, uint32_t flags, thread_t thread) { + if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) { return; } @@ -376,10 +451,15 @@ machine_thread_going_on_core(thread_t new_thread, on_core.thread_id = new_thread->thread_id; on_core.energy_estimate_nj = 0; - on_core.qos_class = proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS); - on_core.urgency = urgency; + on_core.qos_class = (uint16_t)proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS); + on_core.urgency = (uint16_t)urgency; on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE; on_core.is_kernel_thread = new_thread->task == kernel_task; +#if CONFIG_THREAD_GROUPS + struct thread_group *tg = thread_group_get(new_thread); + on_core.thread_group_id = thread_group_get_id(tg); + on_core.thread_group_data = thread_group_get_machine_data(tg); +#endif on_core.scheduling_latency = sched_latency; on_core.start_time = timestamp; on_core.scheduling_latency_at_same_basepri = same_pri_latency; @@ -413,6 +493,11 @@ machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, off_core.thread_id = old_thread->thread_id; off_core.energy_estimate_nj = 0; off_core.end_time = last_dispatch; +#if CONFIG_THREAD_GROUPS + struct thread_group *tg = thread_group_get(old_thread); + off_core.thread_group_id = thread_group_get_id(tg); + off_core.thread_group_data = thread_group_get_machine_data(tg); +#endif #if MONOTONIC uint64_t counters[MT_CORE_NFIXED]; @@ -430,6 +515,132 @@ machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, #endif } +#if CONFIG_THREAD_GROUPS +void +machine_thread_group_init(struct thread_group *tg) +{ + if (sched_perfcontrol_thread_group_init == sched_perfcontrol_thread_group_default) { + return; + } + struct thread_group_data data; + data.thread_group_id = thread_group_get_id(tg); + data.thread_group_data = thread_group_get_machine_data(tg); + data.thread_group_size = thread_group_machine_data_size(); + sched_perfcontrol_thread_group_init(&data); +} + +void +machine_thread_group_deinit(struct thread_group *tg) +{ + if (sched_perfcontrol_thread_group_deinit == sched_perfcontrol_thread_group_default) { + return; + } + struct thread_group_data data; + data.thread_group_id = thread_group_get_id(tg); + data.thread_group_data = thread_group_get_machine_data(tg); + data.thread_group_size = thread_group_machine_data_size(); + sched_perfcontrol_thread_group_deinit(&data); +} + +void +machine_thread_group_flags_update(struct thread_group *tg, uint32_t flags) +{ + if (sched_perfcontrol_thread_group_flags_update == sched_perfcontrol_thread_group_default) { + return; + } + struct thread_group_data data; + data.thread_group_id = thread_group_get_id(tg); + data.thread_group_data = thread_group_get_machine_data(tg); + data.thread_group_size = thread_group_machine_data_size(); + data.thread_group_flags = flags; + sched_perfcontrol_thread_group_flags_update(&data); +} + +void +machine_thread_group_blocked(struct thread_group *blocked_tg, + struct thread_group *blocking_tg, + uint32_t flags, + thread_t blocked_thread) +{ + if (sched_perfcontrol_thread_group_blocked == sched_perfcontrol_thread_group_blocked_default) { + return; + } + + spl_t s = splsched(); + + perfcontrol_state_t state = FIND_PERFCONTROL_STATE(blocked_thread); + struct thread_group_data blocked_data; + assert(blocked_tg != NULL); + + blocked_data.thread_group_id = thread_group_get_id(blocked_tg); + blocked_data.thread_group_data = thread_group_get_machine_data(blocked_tg); + blocked_data.thread_group_size = thread_group_machine_data_size(); + + if (blocking_tg == NULL) { + /* + * For special cases such as the render server, the blocking TG is a + * well known TG. Only in that case, the blocking_tg should be NULL. + */ + assert(flags & PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER); + sched_perfcontrol_thread_group_blocked(&blocked_data, NULL, flags, state); + } else { + struct thread_group_data blocking_data; + blocking_data.thread_group_id = thread_group_get_id(blocking_tg); + blocking_data.thread_group_data = thread_group_get_machine_data(blocking_tg); + blocking_data.thread_group_size = thread_group_machine_data_size(); + sched_perfcontrol_thread_group_blocked(&blocked_data, &blocking_data, flags, state); + } + KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_BLOCK) | DBG_FUNC_START, + thread_tid(blocked_thread), thread_group_get_id(blocked_tg), + blocking_tg ? thread_group_get_id(blocking_tg) : THREAD_GROUP_INVALID, + flags); + + splx(s); +} + +void +machine_thread_group_unblocked(struct thread_group *unblocked_tg, + struct thread_group *unblocking_tg, + uint32_t flags, + thread_t unblocked_thread) +{ + if (sched_perfcontrol_thread_group_unblocked == sched_perfcontrol_thread_group_unblocked_default) { + return; + } + + spl_t s = splsched(); + + perfcontrol_state_t state = FIND_PERFCONTROL_STATE(unblocked_thread); + struct thread_group_data unblocked_data; + assert(unblocked_tg != NULL); + + unblocked_data.thread_group_id = thread_group_get_id(unblocked_tg); + unblocked_data.thread_group_data = thread_group_get_machine_data(unblocked_tg); + unblocked_data.thread_group_size = thread_group_machine_data_size(); + + if (unblocking_tg == NULL) { + /* + * For special cases such as the render server, the unblocking TG is a + * well known TG. Only in that case, the unblocking_tg should be NULL. + */ + assert(flags & PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER); + sched_perfcontrol_thread_group_unblocked(&unblocked_data, NULL, flags, state); + } else { + struct thread_group_data unblocking_data; + unblocking_data.thread_group_id = thread_group_get_id(unblocking_tg); + unblocking_data.thread_group_data = thread_group_get_machine_data(unblocking_tg); + unblocking_data.thread_group_size = thread_group_machine_data_size(); + sched_perfcontrol_thread_group_unblocked(&unblocked_data, &unblocking_data, flags, state); + } + KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_BLOCK) | DBG_FUNC_END, + thread_tid(unblocked_thread), thread_group_get_id(unblocked_tg), + unblocking_tg ? thread_group_get_id(unblocking_tg) : THREAD_GROUP_INVALID, + flags); + + splx(s); +} + +#endif /* CONFIG_THREAD_GROUPS */ void machine_max_runnable_latency(uint64_t bg_max_latency, @@ -461,7 +672,7 @@ machine_work_interval_notify(thread_t thread, perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread); struct perfcontrol_work_interval work_interval = { .thread_id = thread->thread_id, - .qos_class = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS), + .qos_class = (uint16_t)proc_get_effective_thread_policy(thread, TASK_POLICY_QOS), .urgency = kwi_args->urgency, .flags = kwi_args->notify_flags, .work_interval_id = kwi_args->work_interval_id, @@ -471,6 +682,12 @@ machine_work_interval_notify(thread_t thread, .next_start = kwi_args->next_start, .create_flags = kwi_args->create_flags, }; +#if CONFIG_THREAD_GROUPS + struct thread_group *tg; + tg = thread_group_get(thread); + work_interval.thread_group_id = thread_group_get_id(tg); + work_interval.thread_group_data = thread_group_get_machine_data(tg); +#endif sched_perfcontrol_work_interval_notify(state, &work_interval); } @@ -496,7 +713,9 @@ machine_perfcontrol_deadline_passed(uint64_t deadline) void ml_spin_debug_reset(thread_t thread) { - thread->machine.intmask_timestamp = ml_get_timebase(); + if (thread->machine.intmask_timestamp) { + thread->machine.intmask_timestamp = ml_get_timebase(); + } } /* @@ -521,17 +740,17 @@ ml_spin_debug_clear_self() ml_spin_debug_clear(current_thread()); } -void -ml_check_interrupts_disabled_duration(thread_t thread) +static inline void +__ml_check_interrupts_disabled_duration(thread_t thread, uint64_t timeout, bool is_int_handler) { uint64_t start; uint64_t now; - start = thread->machine.intmask_timestamp; + start = is_int_handler ? thread->machine.inthandler_timestamp : thread->machine.intmask_timestamp; if (start != 0) { now = ml_get_timebase(); - if ((now - start) > interrupt_masked_timeout * debug_cpu_performance_degradation_factor) { + if ((now - start) > timeout * debug_cpu_performance_degradation_factor) { mach_timebase_info_data_t timebase; clock_timebase_info(&timebase); @@ -540,13 +759,54 @@ ml_check_interrupts_disabled_duration(thread_t thread) * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the * mechanism enabled so that KASAN can catch any bugs in the mechanism itself. */ - panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer) / timebase.denom)); + if (is_int_handler) { + panic("Processing of an interrupt (type = %u, handler address = %p, vector = %p) took %llu nanoseconds (timeout = %llu ns)", + thread->machine.int_type, (void *)thread->machine.int_handler_addr, (void *)thread->machine.int_vector, + (((now - start) * timebase.numer) / timebase.denom), + ((timeout * debug_cpu_performance_degradation_factor) * timebase.numer) / timebase.denom); + } else { + panic("Interrupts held disabled for %llu nanoseconds (timeout = %llu ns)", + (((now - start) * timebase.numer) / timebase.denom), + ((timeout * debug_cpu_performance_degradation_factor) * timebase.numer) / timebase.denom); + } #endif } } return; } + +void +ml_check_interrupts_disabled_duration(thread_t thread) +{ + __ml_check_interrupts_disabled_duration(thread, interrupt_masked_timeout, false); +} + +void +ml_check_stackshot_interrupt_disabled_duration(thread_t thread) +{ + /* Use MAX() to let the user bump the timeout further if needed */ + __ml_check_interrupts_disabled_duration(thread, MAX(stackshot_interrupt_masked_timeout, interrupt_masked_timeout), false); +} + +void +ml_check_interrupt_handler_duration(thread_t thread) +{ + __ml_check_interrupts_disabled_duration(thread, interrupt_masked_timeout, true); +} + +void +ml_irq_debug_start(uintptr_t handler, uintptr_t vector) +{ + INTERRUPT_MASKED_DEBUG_START(handler, DBG_INTR_TYPE_OTHER); + current_thread()->machine.int_vector = (uintptr_t)VM_KERNEL_STRIP_PTR(vector); +} + +void +ml_irq_debug_end() +{ + INTERRUPT_MASKED_DEBUG_END(); +} #endif // INTERRUPT_MASKED_DEBUG @@ -569,7 +829,11 @@ ml_set_interrupts_enabled(boolean_t enable) if (interrupt_masked_debug) { // Interrupts are currently masked, we will enable them (after finishing this check) thread = current_thread(); - ml_check_interrupts_disabled_duration(thread); + if (stackshot_active()) { + ml_check_stackshot_interrupt_disabled_duration(thread); + } else { + ml_check_interrupts_disabled_duration(thread); + } thread->machine.intmask_timestamp = 0; } #endif // INTERRUPT_MASKED_DEBUG @@ -588,13 +852,13 @@ ml_set_interrupts_enabled(boolean_t enable) #if __arm__ __asm__ volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ #else - __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF)); + __builtin_arm_wsr("DAIFClr", DAIFSC_STANDARD_DISABLE); #endif } else if (!enable && ((state & INTERRUPT_MASK) == 0)) { #if __arm__ __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ #else - __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF)); + __builtin_arm_wsr("DAIFSet", DAIFSC_STANDARD_DISABLE); #endif #if INTERRUPT_MASKED_DEBUG if (interrupt_masked_debug) { @@ -649,19 +913,19 @@ ml_stack_remaining(void) } } -static boolean_t ml_quiescing; +static boolean_t ml_quiescing = FALSE; void ml_set_is_quiescing(boolean_t quiescing) { - assert(FALSE == ml_get_interrupts_enabled()); ml_quiescing = quiescing; + os_atomic_thread_fence(release); } boolean_t ml_is_quiescing(void) { - assert(FALSE == ml_get_interrupts_enabled()); + os_atomic_thread_fence(acquire); return ml_quiescing; } @@ -677,8 +941,10 @@ ml_get_booter_memory_size(void) roundsize >>= 1; } size = (size + roundsize - 1) & ~(roundsize - 1); - size -= BootArgs->memSize; } + + size -= BootArgs->memSize; + return size; } @@ -691,7 +957,9 @@ ml_get_abstime_offset(void) uint64_t ml_get_conttime_offset(void) { -#if HAS_CONTINUOUS_HWCLOCK +#if HIBERNATION && HAS_CONTINUOUS_HWCLOCK + return hwclock_conttime_offset; +#elif HAS_CONTINUOUS_HWCLOCK return 0; #else return rtclock_base_abstime + mach_absolutetime_asleep; @@ -746,8 +1014,9 @@ bool ml_snoop_thread_is_on_core(thread_t thread) { unsigned int cur_cpu_num = 0; + const unsigned int max_cpu_id = ml_get_max_cpu_number(); - for (cur_cpu_num = 0; cur_cpu_num < MAX_CPUS; cur_cpu_num++) { + for (cur_cpu_num = 0; cur_cpu_num <= max_cpu_id; cur_cpu_num++) { if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr) { if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr->cpu_active_thread == thread) { return true; @@ -757,3 +1026,63 @@ ml_snoop_thread_is_on_core(thread_t thread) return false; } + +int +ml_early_cpu_max_number(void) +{ + assert(startup_phase >= STARTUP_SUB_TUNABLES); + return ml_get_max_cpu_number(); +} + +void +ml_set_max_cpus(unsigned int max_cpus __unused) +{ + lck_mtx_lock(&max_cpus_lock); + if (max_cpus_initialized != MAX_CPUS_SET) { + if (max_cpus_initialized == MAX_CPUS_WAIT) { + thread_wakeup((event_t) &max_cpus_initialized); + } + max_cpus_initialized = MAX_CPUS_SET; + } + lck_mtx_unlock(&max_cpus_lock); +} + +unsigned int +ml_wait_max_cpus(void) +{ + assert(lockdown_done); + lck_mtx_lock(&max_cpus_lock); + while (max_cpus_initialized != MAX_CPUS_SET) { + max_cpus_initialized = MAX_CPUS_WAIT; + lck_mtx_sleep(&max_cpus_lock, LCK_SLEEP_DEFAULT, &max_cpus_initialized, THREAD_UNINT); + } + lck_mtx_unlock(&max_cpus_lock); + return machine_info.max_cpus; +} +void +machine_conf(void) +{ + /* + * This is known to be inaccurate. mem_size should always be capped at 2 GB + */ + machine_info.memory_size = (uint32_t)mem_size; + + // rdar://problem/58285685: Userland expects _COMM_PAGE_LOGICAL_CPUS to report + // (max_cpu_id+1) rather than a literal *count* of logical CPUs. + unsigned int num_cpus = ml_get_topology_info()->max_cpu_id + 1; + machine_info.max_cpus = num_cpus; + machine_info.physical_cpu_max = num_cpus; + machine_info.logical_cpu_max = num_cpus; +} + +void +machine_init(void) +{ + debug_log_init(); + clock_config(); + is_clock_configured = TRUE; + if (debug_enabled) { + pmap_map_globals(); + } + ml_lockdown_init(); +} diff --git a/osfmk/arm/misc_protos.h b/osfmk/arm/misc_protos.h index 3cd4964bc..82ce92628 100644 --- a/osfmk/arm/misc_protos.h +++ b/osfmk/arm/misc_protos.h @@ -34,12 +34,17 @@ #include -extern processor_t cpu_processor_alloc(boolean_t is_boot_cpu); -extern void cpu_processor_free(processor_t proc); +typedef struct boot_args boot_args; +/* The address of the end of the kernelcache. */ +extern vm_offset_t end_kern; +/* The lowest address in the kernelcache. */ +extern vm_offset_t segLOWEST; extern void machine_startup(__unused boot_args *args) __attribute__((noinline)); -extern void machine_lockdown_preflight(void); -extern void machine_lockdown(void); + + +extern void arm_auxkc_init(void *mh, void *base); + extern void arm_vm_init(uint64_t memory_size, boot_args *args); extern void arm_vm_prot_init(boot_args *args); extern void arm_vm_prot_finalize(boot_args *args); @@ -58,6 +63,25 @@ extern thread_t Shutdown_context(void (*doshutdown)(processor_t), processor_t p extern void __dead2 Call_continuation(thread_continue_t, void *, wait_result_t, boolean_t enable_interrupts); +/** + * Indicate during a context-switch event that we have updated some CPU + * state which requires a later context-sync event. + * + * On ARMv8.5 and later CPUs, this function sets a flag that will trigger an + * explicit isb instruction sometime before the upcoming eret instruction. + * + * Prior to ARMv8.5, the eret instruction itself is always synchronizing, and + * this function is an empty stub which serves only as documentation. + */ +static inline void +arm_context_switch_requires_sync(void) +{ +} + +#if __has_feature(ptrauth_calls) +extern boolean_t arm_user_jop_disabled(void); +#endif /* __has_feature(ptrauth_calls) */ + extern void DebuggerCall(unsigned int reason, void *ctx); extern void DebuggerXCall(void *ctx); @@ -82,10 +106,14 @@ extern boolean_t debug_state_is_valid64(arm_debug_state64_t *ds); extern int copyio_check_user_addr(user_addr_t user_addr, vm_size_t nbytes); +/* + * Get a quick virtual mapping of a physical page and run a callback on that + * page's virtual address. + */ +extern int apply_func_phys(addr64_t src64, vm_size_t bytes, int (*func)(void * buffer, vm_size_t bytes, void * arg), void * arg); + /* Top-Byte-Ignore */ -extern boolean_t user_tbi; #define TBI_MASK 0xff00000000000000 -#define user_tbi_enabled() (user_tbi) #define tbi_clear(addr) ((addr) & ~(TBI_MASK)) #else /* !defined(__arm__) && !defined(__arm64__) */ diff --git a/osfmk/arm/model_dep.c b/osfmk/arm/model_dep.c index 939e5eec6..b7cead5fa 100644 --- a/osfmk/arm/model_dep.c +++ b/osfmk/arm/model_dep.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2019 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -40,9 +40,7 @@ #include #include -#if defined(HAS_APPLE_PAC) #include -#endif #include #include @@ -74,6 +72,7 @@ #include /* for btop */ #include +#include #include #include #include @@ -87,11 +86,16 @@ void kdp_trap(unsigned int, struct arm_saved_state *); #endif extern kern_return_t do_stackshot(void *); -extern void kdp_snapshot_preflight(int pid, void *tracebuf, - uint32_t tracebuf_size, uint32_t flags, +extern void kdp_snapshot_preflight(int pid, void * tracebuf, + uint32_t tracebuf_size, uint64_t flags, kcdata_descriptor_t data_p, - boolean_t enable_faulting); + uint64_t since_timestamp, uint32_t pagetable_mask); extern int kdp_stack_snapshot_bytes_traced(void); +extern int kdp_stack_snapshot_bytes_uncompressed(void); + +#if INTERRUPT_MASKED_DEBUG +extern boolean_t interrupt_masked_debug; +#endif /* * Increment the PANICLOG_VERSION if you change the format of the panic @@ -109,19 +113,21 @@ extern void kdp_callouts(kdp_event_t event); /* #include */ #define MAXCOMLEN 16 -extern int proc_pid(void *p); +struct proc; +extern int proc_pid(struct proc *p); extern void proc_name_kdp(task_t, char *, int); /* * Make sure there's enough space to include the relevant bits in the format required * within the space allocated for the panic version string in the panic header. - * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)' + * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)'. */ #define PANIC_HEADER_VERSION_FMT_STR "%.14s (%.14s)" -extern const char version[]; -extern char osversion[]; -extern char osproductversion[]; +extern const char version[]; +extern char osversion[]; +extern char osproductversion[]; +extern char osreleasetype[]; #if defined(XNU_TARGET_OS_BRIDGE) extern char macosproductversion[]; @@ -133,9 +139,9 @@ extern uint32_t gPlatformMemoryID; extern uint64_t last_hwaccess_thread; -/*Choosing the size for gTargetTypeBuffer as 8 and size for gModelTypeBuffer as 32 +/*Choosing the size for gTargetTypeBuffer as 16 and size for gModelTypeBuffer as 32 * since the target name and model name typically doesn't exceed this size */ -extern char gTargetTypeBuffer[8]; +extern char gTargetTypeBuffer[16]; extern char gModelTypeBuffer[32]; decl_simple_lock_data(extern, clock_lock); @@ -145,6 +151,8 @@ extern boolean_t is_clock_configured; extern boolean_t kernelcache_uuid_valid; extern uuid_t kernelcache_uuid; +extern void stackshot_memcpy(void *dst, const void *src, size_t len); + /* Definitions for frame pointers */ #define FP_ALIGNMENT_MASK ((uint32_t)(0x3)) #define FP_LR_OFFSET ((uint32_t)4) @@ -166,6 +174,7 @@ boolean_t force_immediate_debug_halt = FALSE; unsigned int debug_ack_timeout_count = 0; volatile unsigned int debugger_sync = 0; volatile unsigned int mp_kdp_trap = 0; /* CPUs signalled by the debug CPU will spin on this */ +volatile unsigned int debug_cpus_spinning = 0; /* Number of signalled CPUs still spinning on mp_kdp_trap (in DebuggerXCall). */ unsigned int DebugContextCount = 0; #if defined(__arm64__) @@ -223,7 +232,7 @@ validate_ptr( */ static void print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker, - boolean_t is_64_bit) + boolean_t is_64_bit, boolean_t print_kexts_in_backtrace) { int i = 0; addr64_t lr; @@ -231,6 +240,7 @@ print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker, addr64_t fp_for_ppn; ppnum_t ppn; boolean_t dump_kernel_stack; + vm_offset_t raddrs[FP_MAX_NUM_TO_EVALUATE]; fp = topfp; fp_for_ppn = 0; @@ -305,8 +315,13 @@ print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker, } else { paniclog_append_noflush("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp); } + raddrs[i] = lr; } } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp)); + + if (print_kexts_in_backtrace && i != 0) { + kmod_panic_dump(&raddrs[0], i); + } } #define SANE_TASK_LIMIT 256 @@ -315,6 +330,44 @@ print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker, extern void panic_print_vnodes(void); +static void +panic_display_hung_cpus_help(void) +{ +#if defined(__arm64__) + const uint32_t pcsr_offset = 0x90; + + /* + * Print some info that might help in cases where nothing + * else does + */ + const ml_topology_info_t *info = ml_get_topology_info(); + if (info) { + unsigned i, retry; + + for (i = 0; i < info->num_cpus; i++) { + if (info->cpus[i].cpu_UTTDBG_regs) { + volatile uint64_t *pcsr = (volatile uint64_t*)(info->cpus[i].cpu_UTTDBG_regs + pcsr_offset); + volatile uint32_t *pcsrTrigger = (volatile uint32_t*)pcsr; + uint64_t pc = 0; + + // a number of retries are needed till this works + for (retry = 1024; retry && !pc; retry--) { + //a 32-bit read is required to make a PC sample be produced, else we'll only get a zero + (void)*pcsrTrigger; + pc = *pcsr; + } + + //postprocessing (same as astris does) + if (pc >> 48) { + pc |= 0xffff000000000000ull; + } + paniclog_append_noflush("CORE %u recently retired instr at 0x%016llx\n", i, pc); + } + } + } +#endif //defined(__arm64__) +} + static void do_print_all_backtraces(const char *message, uint64_t panic_options) { @@ -327,9 +380,12 @@ do_print_all_backtraces(const char *message, uint64_t panic_options) /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */ int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200; + int bytes_uncompressed = 0; uint64_t bytes_used = 0ULL; int err = 0; char *stackshot_begin_loc = NULL; + kc_format_t kc_format; + bool filesetKC = false; #if defined(__arm__) __asm__ volatile ("mov %0, r7":"=r"(cur_fp)); @@ -343,6 +399,10 @@ do_print_all_backtraces(const char *message, uint64_t panic_options) } panic_bt_depth++; + __unused bool result = PE_get_primary_kc_format(&kc_format); + assert(result == true); + filesetKC = kc_format == KCFormatFileset; + /* Truncate panic string to 1200 bytes */ paniclog_append_noflush("Debugger message: %.1200s\n", message); if (debug_enabled) { @@ -359,6 +419,8 @@ do_print_all_backtraces(const char *message, uint64_t panic_options) paniclog_append_noflush("Boot args: %s\n", PE_boot_args()); } paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID); + paniclog_append_noflush("OS release type: %.256s\n", + ('\0' != osreleasetype[0]) ? osreleasetype : "Not set yet"); paniclog_append_noflush("OS version: %.256s\n", ('\0' != osversion[0]) ? osversion : "Not set yet"); #if defined(XNU_TARGET_OS_BRIDGE) @@ -368,7 +430,11 @@ do_print_all_backtraces(const char *message, uint64_t panic_options) paniclog_append_noflush("Kernel version: %.512s\n", version); if (kernelcache_uuid_valid) { - paniclog_append_noflush("KernelCache UUID: "); + if (filesetKC) { + paniclog_append_noflush("Fileset Kernelcache UUID: "); + } else { + paniclog_append_noflush("KernelCache UUID: "); + } for (size_t index = 0; index < sizeof(uuid_t); index++) { paniclog_append_noflush("%02X", kernelcache_uuid[index]); } @@ -429,6 +495,7 @@ do_print_all_backtraces(const char *message, uint64_t panic_options) panic_display_kernel_aslr(); panic_display_times(); panic_display_zprint(); + panic_display_hung_cpus_help(); #if CONFIG_ZLEAKS panic_display_ztrace(); #endif /* CONFIG_ZLEAKS */ @@ -519,9 +586,9 @@ do_print_all_backtraces(const char *message, uint64_t panic_options) paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n", cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread)); #if __LP64__ - print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE); + print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE, filesetKC); #else - print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE); + print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, FALSE, filesetKC); #endif } else { paniclog_append_noflush("Could not print panicked thread backtrace:" @@ -529,6 +596,10 @@ do_print_all_backtraces(const char *message, uint64_t panic_options) } paniclog_append_noflush("\n"); + if (filesetKC) { + kext_dump_panic_lists(&paniclog_append_noflush); + paniclog_append_noflush("\n"); + } panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset; /* set the os version data in the panic header in the format 'Product Version (OS Version)' (only if they have been set) */ if ((osversion[0] != '\0') && (osproductversion[0] != '\0')) { @@ -557,13 +628,25 @@ do_print_all_backtraces(const char *message, uint64_t panic_options) bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base); err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr, - KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining - end_marker_bytes, + KCDATA_BUFFER_BEGIN_COMPRESSED, bytes_remaining - end_marker_bytes, KCFLAG_USE_MEMCOPY); if (err == KERN_SUCCESS) { + uint64_t stackshot_flags = (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT | + STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | STACKSHOT_DO_COMPRESS | + STACKSHOT_DISABLE_LATENCY_INFO | STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_GET_DQ | + STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT); + + err = kcdata_init_compress(&kc_panic_data, KCDATA_BUFFER_BEGIN_STACKSHOT, stackshot_memcpy, KCDCT_ZLIB); + if (err != KERN_SUCCESS) { + panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COMPRESS_FAILED; + stackshot_flags &= ~STACKSHOT_DO_COMPRESS; + } + if (filesetKC) { + stackshot_flags |= STACKSHOT_SAVE_KEXT_LOADINFO; + } + kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes, - (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT | - STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | - STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT), &kc_panic_data, 0); + stackshot_flags, &kc_panic_data, 0, 0); err = do_stackshot(NULL); bytes_traced = kdp_stack_snapshot_bytes_traced(); if (bytes_traced > 0 && !err) { @@ -573,7 +656,13 @@ do_print_all_backtraces(const char *message, uint64_t panic_options) panic_info->eph_stackshot_len = bytes_traced; panic_info->eph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); - paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced); + if (stackshot_flags & STACKSHOT_DO_COMPRESS) { + panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_DATA_COMPRESSED; + bytes_uncompressed = kdp_stack_snapshot_bytes_uncompressed(); + paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d (Uncompressed %d) **\n", bytes_traced, bytes_uncompressed); + } else { + paniclog_append_noflush("\n** Stackshot Succeeded ** Bytes Traced %d **\n", bytes_traced); + } } else { bytes_used = kcdata_memory_get_used_bytes(&kc_panic_data); if (bytes_used > 0) { @@ -815,6 +904,7 @@ DebuggerXCallEnter( debugger_sync = 0; mp_kdp_trap = 1; + debug_cpus_spinning = 0; /* * We need a barrier here to ensure CPUs see mp_kdp_trap and spin when responding @@ -845,6 +935,7 @@ DebuggerXCallEnter( if (KERN_SUCCESS == cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL)) { os_atomic_inc(&debugger_sync, relaxed); + os_atomic_inc(&debug_cpus_spinning, relaxed); } else { cpu_signal_failed = true; kprintf("cpu_signal failed in DebuggerXCallEnter\n"); @@ -894,8 +985,6 @@ DebuggerXCallEnter( } else { if (halt_status > 0) { paniclog_append_noflush("cpu %d halted with warning %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status)); - } else { - paniclog_append_noflush("cpu %d successfully halted\n", cpu); } target_cpu_datap->halt_status = CPU_HALTED; } @@ -916,6 +1005,7 @@ DebuggerXCallEnter( if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) { paniclog_append_noflush("Unable to obtain state for cpu %d with status %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status)); } else { + paniclog_append_noflush("cpu %d successfully halted\n", cpu); target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE; } } @@ -947,6 +1037,7 @@ DebuggerXCallReturn( void) { cpu_data_t *cpu_data_ptr = getCpuDatap(); + uint64_t max_mabs_time, current_mabs_time; cpu_data_ptr->debugger_active--; if (cpu_data_ptr->debugger_active != 0) { @@ -956,6 +1047,25 @@ DebuggerXCallReturn( mp_kdp_trap = 0; debugger_sync = 0; + nanoseconds_to_absolutetime(DEBUG_ACK_TIMEOUT, &max_mabs_time); + current_mabs_time = mach_absolute_time(); + max_mabs_time += current_mabs_time; + assert(max_mabs_time > current_mabs_time); + + /* + * Wait for other CPUs to stop spinning on mp_kdp_trap (see DebuggerXCall). + * It's possible for one or more CPUs to not decrement debug_cpus_spinning, + * since they may be stuck somewhere else with interrupts disabled. + * Wait for DEBUG_ACK_TIMEOUT ns for a response and move on if we don't get it. + * + * Note that the same is done in DebuggerXCallEnter, when we wait for other + * CPUS to update debugger_sync. If we time out, let's hope for all CPUs to be + * spinning in a debugger-safe context + */ + while ((debug_cpus_spinning != 0) && (current_mabs_time < max_mabs_time)) { + current_mabs_time = mach_absolute_time(); + } + /* Do we need a barrier here? */ __builtin_arm_dmb(DMB_ISH); } @@ -977,6 +1087,22 @@ DebuggerXCall( } kstackptr = current_thread()->machine.kstackptr; + +#if defined(__arm64__) + arm_kernel_saved_state_t *state = (arm_kernel_saved_state_t *)kstackptr; + + if (save_context) { + /* Save the interrupted context before acknowledging the signal */ + current_thread()->machine.kpcb = regs; + } else if (regs) { + /* zero old state so machine_trace_thread knows not to backtrace it */ + register_t pc = (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer); + state->fp = 0; + state->pc = pc; + state->lr = 0; + state->sp = 0; + } +#else arm_saved_state_t *state = (arm_saved_state_t *)kstackptr; if (save_context) { @@ -984,11 +1110,26 @@ DebuggerXCall( copy_signed_thread_state(state, regs); } else if (regs) { /* zero old state so machine_trace_thread knows not to backtrace it */ + register_t pc = (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer); set_saved_state_fp(state, 0); - set_saved_state_pc(state, (register_t)&_was_in_userspace); + set_saved_state_pc(state, pc); set_saved_state_lr(state, 0); set_saved_state_sp(state, 0); } +#endif + + /* + * When running in serial mode, the core capturing the dump may hold interrupts disabled + * for a time longer than the timeout. That path includes logic to reset the timestamp + * so that we do not eventually trigger the interrupt timeout assert(). + * + * Here we check whether other cores have already gone over the timeout at this point + * before spinning, so we at least cover the IPI reception path. After spinning, however, + * we reset the timestamp so as to avoid hitting the interrupt timeout assert(). + */ + if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) { + INTERRUPT_MASKED_DEBUG_END(); + } os_atomic_dec(&debugger_sync, relaxed); __builtin_arm_dmb(DMB_ISH); @@ -996,6 +1137,17 @@ DebuggerXCall( ; } + /** + * Alert the triggering CPU that this CPU is done spinning. The CPU that + * signalled all of the other CPUs will wait (in DebuggerXCallReturn) for + * all of the CPUs to exit the above loop before continuing. + */ + os_atomic_dec(&debug_cpus_spinning, relaxed); + + if ((serialmode & SERIALMODE_OUTPUT) || stackshot_active()) { + INTERRUPT_MASKED_DEBUG_START(current_thread()->machine.int_handler_addr, current_thread()->machine.int_type); + } + /* Any cleanup for our pushed context should go here */ } diff --git a/osfmk/arm/pcb.c b/osfmk/arm/pcb.c index f42c4f4e1..c4edbadb3 100644 --- a/osfmk/arm/pcb.c +++ b/osfmk/arm/pcb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2019 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -40,7 +40,6 @@ #include #include #include -#include #include #include @@ -55,7 +54,8 @@ extern int debug_task; -zone_t ads_zone; /* zone for debug_state area */ +/* zone for debug_state area */ +ZONE_DECLARE(ads_zone, "arm debug state", sizeof(arm_debug_state_t), ZC_NONE); /* * Routine: consider_machine_collect @@ -76,37 +76,62 @@ consider_machine_adjust(void) { } +static inline void +machine_thread_switch_cpu_data(thread_t old, thread_t new) +{ + /* + * We build with -fno-strict-aliasing, so the load through temporaries + * is required so that this generates a single load / store pair. + */ + cpu_data_t *datap = old->machine.CpuDatap; + vm_offset_t base = old->machine.pcpu_data_base; + + /* TODO: Should this be ordered? */ + + /* + * arm relies on CpuDatap being set for a thread that has run, + * so we only reset pcpu_data_base. + */ + old->machine.pcpu_data_base = -1; + + new->machine.CpuDatap = datap; + new->machine.pcpu_data_base = base; +} + /* * Routine: machine_switch_context * */ thread_t machine_switch_context( - thread_t old, - thread_continue_t continuation, - thread_t new) + thread_t old, + thread_continue_t continuation, + thread_t new) { thread_t retval; - cpu_data_t *cpu_data_ptr; -#define machine_switch_context_kprintf(x...) /* kprintf("machine_switch_con - * text: " x) */ +#define machine_switch_context_kprintf(x...) \ + /* kprintf("machine_switch_context: " x) */ - cpu_data_ptr = getCpuDatap(); - if (old == new) + if (old == new) { panic("machine_switch_context"); + } kpc_off_cpu(old); + /* + * If the thread is preempted while performing cache or TLB maintenance, + * it may be migrated to a different CPU between the completion of the relevant + * maintenance instruction and the synchronizing DSB. ARM requires that the + * synchronizing DSB must be issued *on the PE that issued the maintenance instruction* + * in order to guarantee completion of the instruction and visibility of its effects. + * Issue DSB here to enforce that guarantee. Note that due to __ARM_USER_PROTECT__, + * pmap_set_pmap() will not update TTBR0 (which ordinarily would include DSB). + */ + __builtin_arm_dsb(DSB_ISH); pmap_set_pmap(new->map->pmap, new); - new->machine.CpuDatap = cpu_data_ptr; - -#if __SMP__ - /* TODO: Should this be ordered? */ - old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU; - new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU; -#endif /* __SMP__ */ + machine_thread_switch_cpu_data(old, new); machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new); retval = Switch_context(old, continuation, new); @@ -118,7 +143,7 @@ machine_switch_context( boolean_t machine_thread_on_core(thread_t thread) { - return thread->machine.machine_thread_flags & MACHINE_THREAD_FLAGS_ON_CPU; + return thread->machine.pcpu_data_base != -1; } /* @@ -127,40 +152,35 @@ machine_thread_on_core(thread_t thread) */ kern_return_t machine_thread_create( - thread_t thread, -#if !__ARM_USER_PROTECT__ - __unused + thread_t thread, +#if !__ARM_USER_PROTECT__ + __unused #endif - task_t task) + task_t task) { - -#define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */ +#define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */ machine_thread_create_kprintf("thread = %x\n", thread); if (current_thread() != thread) { thread->machine.CpuDatap = (cpu_data_t *)0; + // setting this offset will cause trying to use it to panic + thread->machine.pcpu_data_base = -1; } thread->machine.preemption_count = 0; thread->machine.cthread_self = 0; -#if __ARM_USER_PROTECT__ +#if __ARM_USER_PROTECT__ { - struct pmap *new_pmap = vm_map_pmap(task->map); + struct pmap *new_pmap = vm_map_pmap(task->map); - thread->machine.kptw_ttb = ((unsigned int) kernel_pmap->ttep) | TTBR_SETUP; - thread->machine.asid = new_pmap->hw_asid; - if (new_pmap->tte_index_max == NTTES) { - thread->machine.uptw_ttc = 2; + thread->machine.kptw_ttb = ((unsigned int) kernel_pmap->ttep) | TTBR_SETUP; + thread->machine.asid = new_pmap->hw_asid; thread->machine.uptw_ttb = ((unsigned int) new_pmap->ttep) | TTBR_SETUP; - } else { - thread->machine.uptw_ttc = 1; - thread->machine.uptw_ttb = ((unsigned int) new_pmap->ttep ) | TTBR_SETUP; - } } #endif machine_thread_state_initialize(thread); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -169,12 +189,12 @@ machine_thread_create( */ void machine_thread_destroy( - thread_t thread) + thread_t thread) { - - if (thread->machine.DebugData != NULL) { - if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) + if (thread->machine.DebugData != NULL) { + if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) { arm_debug_set(NULL); + } zfree(ads_zone, thread->machine.DebugData); } } @@ -187,10 +207,6 @@ machine_thread_destroy( void machine_thread_init(void) { - ads_zone = zinit(sizeof(arm_debug_state_t), - THREAD_CHUNK * (sizeof(arm_debug_state_t)), - THREAD_CHUNK * (sizeof(arm_debug_state_t)), - "arm debug state"); } /* @@ -210,7 +226,7 @@ machine_thread_template_init(thread_t __unused thr_template) user_addr_t get_useraddr() { - return (current_thread()->machine.PcbData.pc); + return current_thread()->machine.PcbData.pc; } /* @@ -219,18 +235,18 @@ get_useraddr() */ vm_offset_t machine_stack_detach( - thread_t thread) + thread_t thread) { vm_offset_t stack; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH), - (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0); + (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0); stack = thread->kernel_stack; thread->kernel_stack = 0; thread->machine.kstackptr = 0; - return (stack); + return stack; } @@ -240,15 +256,15 @@ machine_stack_detach( */ void machine_stack_attach( - thread_t thread, - vm_offset_t stack) + thread_t thread, + vm_offset_t stack) { struct arm_saved_state *savestate; -#define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */ +#define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */ KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH), - (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0); + (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0); thread->kernel_stack = stack; thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state); @@ -271,16 +287,14 @@ machine_stack_attach( */ void machine_stack_handoff( - thread_t old, - thread_t new) + thread_t old, + thread_t new) { vm_offset_t stack; - cpu_data_t *cpu_data_ptr; kpc_off_cpu(old); stack = machine_stack_detach(old); - cpu_data_ptr = getCpuDatap(); new->kernel_stack = stack; new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state); if (stack == old->reserved_stack) { @@ -289,19 +303,22 @@ machine_stack_handoff( new->reserved_stack = stack; } + /* + * If the thread is preempted while performing cache or TLB maintenance, + * it may be migrated to a different CPU between the completion of the relevant + * maintenance instruction and the synchronizing DSB. ARM requires that the + * synchronizing DSB must be issued *on the PE that issued the maintenance instruction* + * in order to guarantee completion of the instruction and visibility of its effects. + * Issue DSB here to enforce that guarantee. Note that due to __ARM_USER_PROTECT__, + * pmap_set_pmap() will not update TTBR0 (which ordinarily would include DSB). + */ + __builtin_arm_dsb(DSB_ISH); pmap_set_pmap(new->map->pmap, new); - new->machine.CpuDatap = cpu_data_ptr; -#if __SMP__ - /* TODO: Should this be ordered? */ - old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU; - new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU; -#endif /* __SMP__ */ + machine_thread_switch_cpu_data(old, new); machine_set_current_thread(new); thread_initialize_kernel_state(new); - - return; } @@ -311,19 +328,20 @@ machine_stack_handoff( */ void call_continuation( - thread_continue_t continuation, - void *parameter, - wait_result_t wresult, - boolean_t enable_interrupts) + thread_continue_t continuation, + void *parameter, + wait_result_t wresult, + boolean_t enable_interrupts) { -#define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf: - * " x) */ +#define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf: + * " x) */ call_continuation_kprintf("thread = %x continuation = %x, stack = %x\n", current_thread(), continuation, current_thread()->machine.kstackptr); Call_continuation(continuation, parameter, wresult, enable_interrupts); } -void arm_debug_set(arm_debug_state_t *debug_state) +void +arm_debug_set(arm_debug_state_t *debug_state) { /* If this CPU supports the memory-mapped debug interface, use it, otherwise * attempt the Extended CP14 interface. The two routines need to be kept in sync, @@ -368,18 +386,15 @@ void arm_debug_set(arm_debug_state_t *debug_state) ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWVR))[i] = debug_state->wvr[i]; ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWCR))[i] = debug_state->wcr[i]; } - } + } // lock debug registers *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGLAR) = 0; - - } else if (debug_info->coprocessor_core_debug) { + } else if (debug_info->coprocessor_core_debug) { arm_debug_set_cp14(debug_state); } (void) ml_set_interrupts_enabled(intr); - - return; } /* @@ -388,19 +403,18 @@ void arm_debug_set(arm_debug_state_t *debug_state) */ void copy_debug_state( - arm_debug_state_t *src, - arm_debug_state_t *target, - __unused boolean_t all) + arm_debug_state_t *src, + arm_debug_state_t *target, + __unused boolean_t all) { bcopy(src, target, sizeof(arm_debug_state_t)); } kern_return_t machine_thread_set_tsd_base( - thread_t thread, - mach_vm_offset_t tsd_base) + thread_t thread, + mach_vm_offset_t tsd_base) { - if (thread->task == kernel_task) { return KERN_INVALID_ARGUMENT; } @@ -409,26 +423,25 @@ machine_thread_set_tsd_base( return KERN_INVALID_ARGUMENT; } - if (tsd_base > UINT32_MAX) + if (tsd_base > UINT32_MAX) { tsd_base = 0ULL; + } thread->machine.cthread_self = tsd_base; /* For current thread, make the TSD base active immediately */ if (thread == current_thread()) { - mp_disable_preemption(); - __asm__ volatile( - "mrc p15, 0, r6, c13, c0, 3\n" - "and r6, r6, #3\n" - "orr r6, r6, %0\n" - "mcr p15, 0, r6, c13, c0, 3\n" - : /* output */ - : "r"((uint32_t)tsd_base) /* input */ - : "r6" /* clobbered register */ - ); + __asm__ volatile ( + "mrc p15, 0, r6, c13, c0, 3\n" + "and r6, r6, #3\n" + "orr r6, r6, %0\n" + "mcr p15, 0, r6, c13, c0, 3\n" + : /* output */ + : "r"((uint32_t)tsd_base) /* input */ + : "r6" /* clobbered register */ + ); mp_enable_preemption(); - } return KERN_SUCCESS; diff --git a/osfmk/arm/pmap.c b/osfmk/arm/pmap.c index 2d082d60d..92337e64c 100644 --- a/osfmk/arm/pmap.c +++ b/osfmk/arm/pmap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2019 Apple Inc. All rights reserved. + * Copyright (c) 2011-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -59,6 +59,7 @@ #include #include +#include #include #include @@ -82,7 +83,10 @@ #if CONFIG_PGTRACE_NONKEXT #include #endif // CONFIG_PGTRACE_NONKEXT -#endif +#endif // CONFIG_PGTRACE +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) +#include +#endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) #endif #include @@ -94,21 +98,38 @@ #include #endif +#ifdef CONFIG_XNUPOST +#include +#endif + + +#if HIBERNATION +#include +#endif /* HIBERNATION */ + #define PMAP_TT_L0_LEVEL 0x0 #define PMAP_TT_L1_LEVEL 0x1 #define PMAP_TT_L2_LEVEL 0x2 #define PMAP_TT_L3_LEVEL 0x3 -#if (__ARM_VMSA__ == 7) -#define PMAP_TT_MAX_LEVEL PMAP_TT_L2_LEVEL + +#ifdef __ARM64_PMAP_SUBPAGE_L1__ +#if (__ARM_VMSA__ <= 7) +#error This is not supported for old-style page tables +#endif +#define PMAP_ROOT_ALLOC_SIZE (((ARM_TT_L1_INDEX_MASK >> ARM_TT_L1_SHIFT) + 1) * sizeof(tt_entry_t)) +#else +#if (__ARM_VMSA__ <= 7) +#define PMAP_ROOT_ALLOC_SIZE (ARM_PGBYTES * 2) #else -#define PMAP_TT_MAX_LEVEL PMAP_TT_L3_LEVEL +#define PMAP_ROOT_ALLOC_SIZE (ARM_PGBYTES) +#endif #endif -#define PMAP_TT_LEAF_LEVEL PMAP_TT_MAX_LEVEL -#define PMAP_TT_TWIG_LEVEL (PMAP_TT_MAX_LEVEL - 1) + +extern u_int32_t random(void); /* from */ static bool alloc_asid(pmap_t pmap); static void free_asid(pmap_t pmap); -static void flush_mmu_tlb_region_asid_async(vm_offset_t va, unsigned length, pmap_t pmap); +static void flush_mmu_tlb_region_asid_async(vm_offset_t va, size_t length, pmap_t pmap); static void flush_mmu_tlb_tte_asid_async(vm_offset_t va, pmap_t pmap); static void flush_mmu_tlb_full_asid_async(pmap_t pmap); static pt_entry_t wimg_to_pte(unsigned int wimg); @@ -116,7 +137,7 @@ static pt_entry_t wimg_to_pte(unsigned int wimg); struct page_table_ops { bool (*alloc_id)(pmap_t pmap); void (*free_id)(pmap_t pmap); - void (*flush_tlb_region_async)(vm_offset_t va, unsigned length, pmap_t pmap); + void (*flush_tlb_region_async)(vm_offset_t va, size_t length, pmap_t pmap); void (*flush_tlb_tte_async)(vm_offset_t va, pmap_t pmap); void (*flush_tlb_async)(pmap_t pmap); pt_entry_t (*wimg_to_pte)(unsigned int wimg); @@ -223,12 +244,27 @@ struct page_table_attr { const uintptr_t ap_xn; const uintptr_t ap_x; const unsigned int pta_root_level; + const unsigned int pta_sharedpage_level; const unsigned int pta_max_level; +#if __ARM_MIXED_PAGE_SIZE__ + const uint64_t pta_tcr_value; +#endif /* __ARM_MIXED_PAGE_SIZE__ */ + const uint64_t pta_page_size; + const uint64_t pta_page_shift; }; const struct page_table_attr pmap_pt_attr_4k = { .pta_level_info = pmap_table_level_info_4k, - .pta_root_level = PMAP_TT_L1_LEVEL, + .pta_root_level = (T0SZ_BOOT - 16) / 9, +#if __ARM_MIXED_PAGE_SIZE__ + .pta_sharedpage_level = PMAP_TT_L2_LEVEL, +#else /* __ARM_MIXED_PAGE_SIZE__ */ +#if __ARM_16K_PG__ + .pta_sharedpage_level = PMAP_TT_L2_LEVEL, +#else /* __ARM_16K_PG__ */ + .pta_sharedpage_level = PMAP_TT_L1_LEVEL, +#endif /* __ARM_16K_PG__ */ +#endif /* __ARM_MIXED_PAGE_SIZE__ */ .pta_max_level = PMAP_TT_L3_LEVEL, .pta_ops = &native_pt_ops, .ap_ro = ARM_PTE_AP(AP_RORO), @@ -237,11 +273,17 @@ const struct page_table_attr pmap_pt_attr_4k = { .ap_rwna = ARM_PTE_AP(AP_RWNA), .ap_xn = ARM_PTE_PNX | ARM_PTE_NX, .ap_x = ARM_PTE_PNX, +#if __ARM_MIXED_PAGE_SIZE__ + .pta_tcr_value = TCR_EL1_4KB, +#endif /* __ARM_MIXED_PAGE_SIZE__ */ + .pta_page_size = 4096, + .pta_page_shift = 12, }; const struct page_table_attr pmap_pt_attr_16k = { .pta_level_info = pmap_table_level_info_16k, .pta_root_level = PMAP_TT_L1_LEVEL, + .pta_sharedpage_level = PMAP_TT_L2_LEVEL, .pta_max_level = PMAP_TT_L3_LEVEL, .pta_ops = &native_pt_ops, .ap_ro = ARM_PTE_AP(AP_RORO), @@ -250,6 +292,11 @@ const struct page_table_attr pmap_pt_attr_16k = { .ap_rwna = ARM_PTE_AP(AP_RWNA), .ap_xn = ARM_PTE_PNX | ARM_PTE_NX, .ap_x = ARM_PTE_PNX, +#if __ARM_MIXED_PAGE_SIZE__ + .pta_tcr_value = TCR_EL1_16KB, +#endif /* __ARM_MIXED_PAGE_SIZE__ */ + .pta_page_size = 16384, + .pta_page_shift = 14, }; #if __ARM_16K_PG__ @@ -282,6 +329,12 @@ typedef struct page_table_attr pt_attr_t; #if (__ARM_VMSA__ > 7) static inline uint64_t +pt_attr_page_size(const pt_attr_t * const pt_attr) +{ + return pt_attr->pta_page_size; +} + +__unused static inline uint64_t pt_attr_ln_size(const pt_attr_t * const pt_attr, unsigned int level) { return pt_attr->pta_level_info[level].size; @@ -293,12 +346,24 @@ pt_attr_ln_shift(const pt_attr_t * const pt_attr, unsigned int level) return pt_attr->pta_level_info[level].shift; } -__unused static inline uint64_t +static inline uint64_t pt_attr_ln_offmask(const pt_attr_t * const pt_attr, unsigned int level) { return pt_attr->pta_level_info[level].offmask; } +__unused static inline uint64_t +pt_attr_ln_pt_offmask(const pt_attr_t * const pt_attr, unsigned int level) +{ + return pt_attr_ln_offmask(pt_attr, level); +} + +__unused static inline uint64_t +pt_attr_ln_index_mask(const pt_attr_t * const pt_attr, unsigned int level) +{ + return pt_attr->pta_level_info[level].index_mask; +} + static inline unsigned int pt_attr_twig_level(const pt_attr_t * const pt_attr) { @@ -311,6 +376,18 @@ pt_attr_root_level(const pt_attr_t * const pt_attr) return pt_attr->pta_root_level; } +/** + * This is the level at which to copy a pt_entry from the sharedpage_pmap into + * the user pmap. Typically L1 for 4K pages, and L2 for 16K pages. In this way, + * the sharedpage's L2/L3 page tables are reused in every 4k task, whereas only + * the L3 page table is reused in 16K tasks. + */ +static inline unsigned int +pt_attr_sharedpage_level(const pt_attr_t * const pt_attr) +{ + return pt_attr->pta_sharedpage_level; +} + static __unused inline uint64_t pt_attr_leaf_size(const pt_attr_t * const pt_attr) { @@ -408,6 +485,23 @@ pt_attr_leaf_x(const pt_attr_t * const pt_attr) } #else /* (__ARM_VMSA__ > 7) */ +static inline uint64_t +pt_attr_page_size(__unused const pt_attr_t * const pt_attr) +{ + return PAGE_SIZE; +} + +__unused static inline unsigned int +pt_attr_root_level(__unused const pt_attr_t * const pt_attr) +{ + return PMAP_TT_L1_LEVEL; +} + +__unused static inline unsigned int +pt_attr_sharedpage_level(__unused const pt_attr_t * const pt_attr) +{ + return PMAP_TT_L1_LEVEL; +} static inline unsigned int pt_attr_twig_level(__unused const pt_attr_t * const pt_attr) @@ -505,8 +599,39 @@ pt_attr_leaf_xn(__unused const pt_attr_t * const pt_attr) return ARM_PTE_NX; } +__unused static inline uintptr_t +pt_attr_ln_offmask(__unused const pt_attr_t * const pt_attr, unsigned int level) +{ + if (level == PMAP_TT_L1_LEVEL) { + return ARM_TT_L1_OFFMASK; + } else if (level == PMAP_TT_L2_LEVEL) { + return ARM_TT_L2_OFFMASK; + } + + return 0; +} + +static inline uintptr_t +pt_attr_ln_pt_offmask(__unused const pt_attr_t * const pt_attr, unsigned int level) +{ + if (level == PMAP_TT_L1_LEVEL) { + return ARM_TT_L1_PT_OFFMASK; + } else if (level == PMAP_TT_L2_LEVEL) { + return ARM_TT_L2_OFFMASK; + } + + return 0; +} + #endif /* (__ARM_VMSA__ > 7) */ +static inline unsigned int +pt_attr_leaf_level(const pt_attr_t * const pt_attr) +{ + return pt_attr_twig_level(pt_attr) + 1; +} + + static inline void pmap_sync_tlb(bool strong __unused) { @@ -537,6 +662,19 @@ int pmap_stats_assert = 1; #endif /* DEVELOPMENT || DEBUG */ +/* + * Represents a tlb range that will be flushed before exiting + * the ppl. + * Used by phys_attribute_clear_range to defer flushing pages in + * this range until the end of the operation. + */ +typedef struct pmap_tlb_flush_range { + pmap_t ptfr_pmap; + vm_map_address_t ptfr_start; + vm_map_address_t ptfr_end; + bool ptfr_flush_needed; +} pmap_tlb_flush_range_t; + #if XNU_MONITOR /* * PPL External References. @@ -559,7 +697,7 @@ extern unsigned long segSizePPLDATACONST; * PPL Global Variables */ -#if (DEVELOPMENT || DEBUG) +#if (DEVELOPMENT || DEBUG) || CONFIG_CSR_FROM_DT /* Indicates if the PPL will enforce mapping policies; set by -unsafe_kernel_text */ SECURITY_READ_ONLY_LATE(boolean_t) pmap_ppl_disable = FALSE; #else @@ -574,18 +712,12 @@ boolean_t pmap_ppl_locked_down MARK_AS_PMAP_DATA = FALSE; * maintain a list of free pages that the PPL owns. The kernel can give the PPL * additional pages. */ -decl_simple_lock_data(, pmap_ppl_free_page_lock MARK_AS_PMAP_DATA); +MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pmap_ppl_free_page_lock, 0); void ** pmap_ppl_free_page_list MARK_AS_PMAP_DATA = NULL; uint64_t pmap_ppl_free_page_count MARK_AS_PMAP_DATA = 0; uint64_t pmap_ppl_pages_returned_to_kernel_count_total = 0; -struct pmap_cpu_data_array_entry pmap_cpu_data_array[MAX_CPUS] MARK_AS_PMAP_DATA; - -#ifdef CPU_CLUSTER_OFFSETS -const uint64_t pmap_cluster_offsets[] = CPU_CLUSTER_OFFSETS; -_Static_assert((sizeof(pmap_cluster_offsets) / sizeof(pmap_cluster_offsets[0])) == __ARM_CLUSTER_COUNT__, - "pmap_cluster_offsets[] count does not match __ARM_CLUSTER_COUNT__"); -#endif +struct pmap_cpu_data_array_entry pmap_cpu_data_array[MAX_CPUS] MARK_AS_PMAP_DATA = {0}; extern void *pmap_stacks_start; extern void *pmap_stacks_end; @@ -595,7 +727,9 @@ SECURITY_READ_ONLY_LATE(pmap_paddr_t) ppl_cpu_save_area_start = 0; SECURITY_READ_ONLY_LATE(pmap_paddr_t) ppl_cpu_save_area_end = 0; /* Allocation data/locks for pmap structures. */ -decl_simple_lock_data(, pmap_free_list_lock MARK_AS_PMAP_DATA); +#if XNU_MONITOR +MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pmap_free_list_lock, 0); +#endif SECURITY_READ_ONLY_LATE(unsigned long) pmap_array_count = 0; SECURITY_READ_ONLY_LATE(void *) pmap_array_begin = NULL; SECURITY_READ_ONLY_LATE(void *) pmap_array_end = NULL; @@ -610,7 +744,8 @@ pmap_t pmap_free_list MARK_AS_PMAP_DATA = NULL; * Maximum number of ledgers allowed are maximum number of tasks * allowed on system plus some more i.e. ~10% of total tasks = 200. */ -#define MAX_PMAP_LEDGERS (MAX_ASID + 200) +#define MAX_PMAP_LEDGERS (pmap_max_asids + 200) +#define PMAP_ARRAY_SIZE (pmap_max_asids) typedef struct pmap_ledger_data { char pld_data[PMAP_LEDGER_DATA_BYTES]; @@ -626,7 +761,7 @@ typedef struct pmap_ledger { } pmap_ledger_t; SECURITY_READ_ONLY_LATE(bool) pmap_ledger_alloc_initialized = false; -decl_simple_lock_data(, pmap_ledger_lock MARK_AS_PMAP_DATA); +MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pmap_ledger_lock, 0); SECURITY_READ_ONLY_LATE(void *) pmap_ledger_refcnt_begin = NULL; SECURITY_READ_ONLY_LATE(void *) pmap_ledger_refcnt_end = NULL; SECURITY_READ_ONLY_LATE(os_refcnt_t *) pmap_ledger_refcnt = NULL; @@ -667,10 +802,6 @@ pmap_check_ledger_fields(ledger_t ledger) #endif /* !XNU_MONITOR */ -#if DEVELOPMENT || DEBUG -int panic_on_unsigned_execute = 0; -#endif /* DEVELOPMENT || DEBUG */ - /* Virtual memory region for early allocation */ #if (__ARM_VMSA__ == 7) @@ -681,6 +812,8 @@ int panic_on_unsigned_execute = 0; #define VREGION1_START ((VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) - VREGION1_HIGH_WINDOW) #define VREGION1_SIZE (trunc_page(VM_MAX_KERNEL_ADDRESS - (VREGION1_START))) +extern uint8_t bootstrap_pagetables[]; + extern unsigned int not_in_kdp; extern vm_offset_t first_avail; @@ -692,8 +825,13 @@ extern vm_offset_t virtual_space_start; /* Next available kernel VA */ extern vm_offset_t virtual_space_end; /* End of kernel address space */ extern vm_offset_t static_memory_end; +extern const vm_map_address_t physmap_base; +extern const vm_map_address_t physmap_end; + extern int maxproc, hard_maxproc; +vm_address_t MARK_AS_PMAP_DATA image4_slab = 0; + #if (__ARM_VMSA__ > 7) /* The number of address bits one TTBR can cover. */ #define PGTABLE_ADDR_BITS (64ULL - T0SZ_BOOT) @@ -720,20 +858,20 @@ const uint64_t arm64_root_pgtable_num_ttes = 0; struct pmap kernel_pmap_store MARK_AS_PMAP_DATA; SECURITY_READ_ONLY_LATE(pmap_t) kernel_pmap = &kernel_pmap_store; -struct vm_object pmap_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); /* store pt pages */ +struct vm_object pmap_object_store VM_PAGE_PACKED_ALIGNED; /* store pt pages */ vm_object_t pmap_object = &pmap_object_store; -static struct zone *pmap_zone; /* zone of pmap structures */ +static SECURITY_READ_ONLY_LATE(zone_t) pmap_zone; /* zone of pmap structures */ -decl_simple_lock_data(, pmaps_lock MARK_AS_PMAP_DATA); -decl_simple_lock_data(, tt1_lock MARK_AS_PMAP_DATA); +MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pmaps_lock, 0); +MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(tt1_lock, 0); unsigned int pmap_stamp MARK_AS_PMAP_DATA; queue_head_t map_pmap_list MARK_AS_PMAP_DATA; -decl_simple_lock_data(, pt_pages_lock MARK_AS_PMAP_DATA); +MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pt_pages_lock, 0); queue_head_t pt_page_list MARK_AS_PMAP_DATA; /* pt page ptd entries list */ -decl_simple_lock_data(, pmap_pages_lock MARK_AS_PMAP_DATA); +MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pmap_pages_lock, 0); typedef struct page_free_entry { struct page_free_entry *next; @@ -789,29 +927,27 @@ int nx_enabled = 1; /* enable no-execute pro int allow_data_exec = 0; /* No apps may execute data */ int allow_stack_exec = 0; /* No apps may execute from the stack */ unsigned long pmap_asid_flushes MARK_AS_PMAP_DATA = 0; +unsigned long pmap_asid_hits MARK_AS_PMAP_DATA = 0; +unsigned long pmap_asid_misses MARK_AS_PMAP_DATA = 0; #else /* DEVELOPMENT || DEBUG */ const int nx_enabled = 1; /* enable no-execute protection */ const int allow_data_exec = 0; /* No apps may execute data */ const int allow_stack_exec = 0; /* No apps may execute from the stack */ #endif /* DEVELOPMENT || DEBUG */ -/* - * pv_entry_t - structure to track the active mappings for a given page - */ -typedef struct pv_entry { - struct pv_entry *pve_next; /* next alias */ - pt_entry_t *pve_ptep; /* page table entry */ -} -#if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) -/* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers - * are 32-bit: - * Since pt_desc is 64-bit aligned and we cast often from pv_entry to - * pt_desc. +/** + * This variable is set true during hibernation entry to protect pmap data structures + * during image copying, and reset false on hibernation exit. */ -__attribute__ ((aligned(8))) pv_entry_t; +bool hib_entry_pmap_lockdown MARK_AS_PMAP_DATA = false; + +/* Macro used to ensure that pmap data structures aren't modified during hibernation image copying. */ +#if HIBERNATION +#define ASSERT_NOT_HIBERNATING() (assertf(!hib_entry_pmap_lockdown, \ + "Attempted to modify PMAP data structures after hibernation image copying has begun.")) #else -pv_entry_t; -#endif +#define ASSERT_NOT_HIBERNATING() +#endif /* HIBERNATION */ #define PV_ENTRY_NULL ((pv_entry_t *) 0) @@ -845,54 +981,57 @@ pmap_check_ledgers(__unused pmap_t pmap) SECURITY_READ_ONLY_LATE(pv_entry_t * *) pv_head_table; /* array of pv entry pointers */ -pv_entry_t *pv_free_list MARK_AS_PMAP_DATA; -pv_entry_t *pv_kern_free_list MARK_AS_PMAP_DATA; -decl_simple_lock_data(, pv_free_list_lock MARK_AS_PMAP_DATA); -decl_simple_lock_data(, pv_kern_free_list_lock MARK_AS_PMAP_DATA); +pv_free_list_t pv_free MARK_AS_PMAP_DATA = {0}; +pv_free_list_t pv_kern_free MARK_AS_PMAP_DATA = {0}; +MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pv_free_list_lock, 0); +MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pv_kern_free_list_lock, 0); -decl_simple_lock_data(, phys_backup_lock); +SIMPLE_LOCK_DECLARE(phys_backup_lock, 0); /* * pt_desc - structure to keep info on page assigned to page tables */ #if (__ARM_VMSA__ == 7) -#define PT_INDEX_MAX 1 -#else -#if (ARM_PGSHIFT == 14) -#define PT_INDEX_MAX 1 +#define PT_INDEX_MAX 1 +#else /* (__ARM_VMSA__ != 7) */ + +#if __ARM_MIXED_PAGE_SIZE__ +#define PT_INDEX_MAX (ARM_PGBYTES / 4096) +#elif (ARM_PGSHIFT == 14) +#define PT_INDEX_MAX 1 +#elif (ARM_PGSHIFT == 12) +#define PT_INDEX_MAX 4 #else -#define PT_INDEX_MAX 4 -#endif -#endif +#error Unsupported ARM_PGSHIFT +#endif /* (ARM_PGSHIFT != 14) */ + +#endif /* (__ARM_VMSA__ != 7) */ #define PT_DESC_REFCOUNT 0x4000U #define PT_DESC_IOMMU_REFCOUNT 0x8000U +typedef struct { + /* + * For non-leaf pagetables, should always be PT_DESC_REFCOUNT + * For leaf pagetables, should reflect the number of non-empty PTEs + * For IOMMU pages, should always be PT_DESC_IOMMU_REFCOUNT + */ + unsigned short refcnt; + /* + * For non-leaf pagetables, should be 0 + * For leaf pagetables, should reflect the number of wired entries + * For IOMMU pages, may optionally reflect a driver-defined refcount (IOMMU operations are implicitly wired) + */ + unsigned short wiredcnt; + vm_offset_t va; +} ptd_info_t; + typedef struct pt_desc { queue_chain_t pt_page; union { struct pmap *pmap; }; - /* - * Locate this struct towards the end of the pt_desc; our long term - * goal is to make this a VLA to avoid wasting memory if we don't need - * multiple entries. - */ - struct { - /* - * For non-leaf pagetables, should always be PT_DESC_REFCOUNT - * For leaf pagetables, should reflect the number of non-empty PTEs - * For IOMMU pages, should always be PT_DESC_IOMMU_REFCOUNT - */ - unsigned short refcnt; - /* - * For non-leaf pagetables, should be 0 - * For leaf pagetables, should reflect the number of wired entries - * For IOMMU pages, may optionally reflect a driver-defined refcount (IOMMU operations are implicitly wired) - */ - unsigned short wiredcnt; - vm_offset_t va; - } ptd_info[PT_INDEX_MAX]; + ptd_info_t ptd_info[PT_INDEX_MAX]; } pt_desc_t; @@ -948,18 +1087,24 @@ typedef u_int16_t pp_attr_t; #define PP_ATTR_PPL_OWNED_BITS (PP_ATTR_MONITOR | PP_ATTR_NO_MONITOR) #endif -SECURITY_READ_ONLY_LATE(pp_attr_t*) pp_attr_table; +SECURITY_READ_ONLY_LATE(volatile pp_attr_t*) pp_attr_table; +/** + * The layout of this structure needs to map 1-to-1 with the pmap-io-range device + * tree nodes. Astris (through the LowGlobals) also depends on the consistency + * of this structure. + */ typedef struct pmap_io_range { uint64_t addr; uint64_t len; #define PMAP_IO_RANGE_STRONG_SYNC (1UL << 31) // Strong DSB required for pages in this range #define PMAP_IO_RANGE_CARVEOUT (1UL << 30) // Corresponds to memory carved out by bootloader + #define PMAP_IO_RANGE_NEEDS_HIBERNATING (1UL << 29) // Pages in this range need to be included in the hibernation image uint32_t wimg; // lower 16 bits treated as pp_attr_t, upper 16 bits contain additional mapping flags uint32_t signature; // 4CC } __attribute__((packed)) pmap_io_range_t; -SECURITY_READ_ONLY_LATE(pmap_io_range_t*) io_attr_table; +SECURITY_READ_ONLY_LATE(pmap_io_range_t*) io_attr_table = (pmap_io_range_t*)0; SECURITY_READ_ONLY_LATE(pmap_paddr_t) vm_first_phys = (pmap_paddr_t) 0; SECURITY_READ_ONLY_LATE(pmap_paddr_t) vm_last_phys = (pmap_paddr_t) 0; @@ -968,24 +1113,32 @@ SECURITY_READ_ONLY_LATE(unsigned int) num_io_rgns = 0; SECURITY_READ_ONLY_LATE(boolean_t) pmap_initialized = FALSE; /* Has pmap_init completed? */ -SECURITY_READ_ONLY_LATE(uint64_t) pmap_nesting_size_min; -SECURITY_READ_ONLY_LATE(uint64_t) pmap_nesting_size_max; - SECURITY_READ_ONLY_LATE(vm_map_offset_t) arm_pmap_max_offset_default = 0x0; #if defined(__arm64__) SECURITY_READ_ONLY_LATE(vm_map_offset_t) arm64_pmap_max_offset_default = 0x0; #endif -#define PMAP_MAX_SW_ASID ((MAX_ASID + MAX_HW_ASID - 1) / MAX_HW_ASID) -_Static_assert(PMAP_MAX_SW_ASID <= (UINT8_MAX + 1), - "VASID bits can't be represented by an 8-bit integer"); +#if PMAP_PANIC_DEV_WIMG_ON_MANAGED && (DEVELOPMENT || DEBUG) +SECURITY_READ_ONLY_LATE(boolean_t) pmap_panic_dev_wimg_on_managed = TRUE; +#else +SECURITY_READ_ONLY_LATE(boolean_t) pmap_panic_dev_wimg_on_managed = FALSE; +#endif -decl_simple_lock_data(, asid_lock MARK_AS_PMAP_DATA); -static bitmap_t asid_bitmap[BITMAP_LEN(MAX_ASID)] MARK_AS_PMAP_DATA; +MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(asid_lock, 0); +SECURITY_READ_ONLY_LATE(static uint32_t) pmap_max_asids = 0; +SECURITY_READ_ONLY_LATE(int) pmap_asid_plru = 1; +SECURITY_READ_ONLY_LATE(uint16_t) asid_chunk_size = 0; +SECURITY_READ_ONLY_LATE(static bitmap_t*) asid_bitmap; +static bitmap_t asid_plru_bitmap[BITMAP_LEN(MAX_HW_ASIDS)] MARK_AS_PMAP_DATA; +static uint64_t asid_plru_generation[BITMAP_LEN(MAX_HW_ASIDS)] MARK_AS_PMAP_DATA = {0}; +static uint64_t asid_plru_gencount MARK_AS_PMAP_DATA = 0; -#if (__ARM_VMSA__ > 7) -SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; +#if (__ARM_VMSA__ > 7) +#if __ARM_MIXED_PAGE_SIZE__ +SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap_4k; +#endif +SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap_default; #endif #if XNU_MONITOR @@ -1011,19 +1164,6 @@ SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; #define pte_is_wired(pte) \ (((pte) & ARM_PTE_WIRED_MASK) == ARM_PTE_WIRED) -#define pte_set_wired(ptep, wired) \ - do { \ - SInt16 *ptd_wiredcnt_ptr; \ - ptd_wiredcnt_ptr = (SInt16 *)&(ptep_get_ptd(ptep)->ptd_info[ARM_PT_DESC_INDEX(ptep)].wiredcnt); \ - if (wired) { \ - *ptep |= ARM_PTE_WIRED; \ - OSAddAtomic16(1, ptd_wiredcnt_ptr); \ - } else { \ - *ptep &= ~ARM_PTE_WIRED; \ - OSAddAtomic16(-1, ptd_wiredcnt_ptr); \ - } \ - } while(0) - #define pte_was_writeable(pte) \ (((pte) & ARM_PTE_WRITEABLE) == ARM_PTE_WRITEABLE) @@ -1066,54 +1206,35 @@ SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; #define ARM_TT_PT_INDEX_MASK ARM_PGMASK #if (__ARM_VMSA__ == 7) -#define ARM_PT_DESC_INDEX_MASK 0x00000 -#define ARM_PT_DESC_INDEX_SHIFT 0 /* * Shift value used for reconstructing the virtual address for a PTE. */ #define ARM_TT_PT_ADDR_SHIFT (10U) -#define ptep_get_va(ptep) \ - ((((pt_desc_t *) (pvh_list(pai_to_pvh(pa_index(ml_static_vtop((((vm_offset_t)(ptep) & ~ARM_PGMASK))))))))->ptd_info[ARM_PT_DESC_INDEX(ptep)].va)+ ((((unsigned)(ptep)) & ARM_TT_PT_INDEX_MASK)<pmap)) #else #if (ARM_PGSHIFT == 12) -#define ARM_PT_DESC_INDEX_MASK ((PAGE_SHIFT_CONST == ARM_PGSHIFT )? 0x00000ULL : 0x03000ULL) -#define ARM_PT_DESC_INDEX_SHIFT ((PAGE_SHIFT_CONST == ARM_PGSHIFT )? 0 : 12) /* * Shift value used for reconstructing the virtual address for a PTE. */ #define ARM_TT_PT_ADDR_SHIFT (9ULL) #else -#define ARM_PT_DESC_INDEX_MASK (0x00000) -#define ARM_PT_DESC_INDEX_SHIFT (0) /* * Shift value used for reconstructing the virtual address for a PTE. */ #define ARM_TT_PT_ADDR_SHIFT (11ULL) #endif - -#define ARM_PT_DESC_INDEX(ptep) \ - (((unsigned)(ptep) & ARM_PT_DESC_INDEX_MASK) >> ARM_PT_DESC_INDEX_SHIFT) - -#define ptep_get_va(ptep) \ - ((((pt_desc_t *) (pvh_list(pai_to_pvh(pa_index(ml_static_vtop((((vm_offset_t)(ptep) & ~ARM_PGMASK))))))))->ptd_info[ARM_PT_DESC_INDEX(ptep)].va)+ ((((unsigned)(ptep)) & ARM_TT_PT_INDEX_MASK)<pmap)) #endif -#define ARM_PT_DESC_INDEX(ptep) \ - (((unsigned)(ptep) & ARM_PT_DESC_INDEX_MASK) >> ARM_PT_DESC_INDEX_SHIFT) - #define ptep_get_ptd(ptep) \ ((struct pt_desc *)(pvh_list(pai_to_pvh(pa_index(ml_static_vtop((vm_offset_t)(ptep))))))) @@ -1138,7 +1259,8 @@ SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; #define PVH_FLAG_LOCK (1ULL << PVH_LOCK_BIT) #define PVH_FLAG_EXEC (1ULL << 60) #define PVH_FLAG_LOCKDOWN (1ULL << 59) -#define PVH_HIGH_FLAGS (PVH_FLAG_CPU | PVH_FLAG_LOCK | PVH_FLAG_EXEC | PVH_FLAG_LOCKDOWN) +#define PVH_FLAG_HASHED (1ULL << 58) /* Used to mark that a page has been hashed into the hibernation image. */ +#define PVH_HIGH_FLAGS (PVH_FLAG_CPU | PVH_FLAG_LOCK | PVH_FLAG_EXEC | PVH_FLAG_LOCKDOWN | PVH_FLAG_HASHED) #else /* !__arm64__ */ @@ -1218,18 +1340,11 @@ SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; /* PPATTR Define Macros */ -#define ppattr_set_bits(h, b) \ - do { \ - while (!OSCompareAndSwap16(*(pp_attr_t *)(h), *(pp_attr_t *)(h) | (b), (pp_attr_t *)(h))); \ - } while (0) - -#define ppattr_clear_bits(h, b) \ - do { \ - while (!OSCompareAndSwap16(*(pp_attr_t *)(h), *(pp_attr_t *)(h) & ~(b), (pp_attr_t *)(h))); \ - } while (0) +#define ppattr_set_bits(h, b) os_atomic_or((h), (pp_attr_t)(b), acq_rel) +#define ppattr_clear_bits(h, b) os_atomic_andnot((h), (pp_attr_t)(b), acq_rel) #define ppattr_test_bits(h, b) \ - ((*(pp_attr_t *)(h) & (b)) == (b)) + ((*(h) & (pp_attr_t)(b)) == (pp_attr_t)(b)) #define pa_set_bits(x, b) \ do { \ @@ -1363,31 +1478,178 @@ SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; #endif + +static inline ptd_info_t * +ptd_get_info(pt_desc_t *ptd, const tt_entry_t *ttep) +{ + assert(ptd->ptd_info[0].refcnt != PT_DESC_IOMMU_REFCOUNT); +#if PT_INDEX_MAX == 1 + #pragma unused(ttep) + return &ptd->ptd_info[0]; +#else + uint64_t pmap_page_shift = pt_attr_leaf_shift(pmap_get_pt_attr(ptd->pmap)); + vm_offset_t ttep_page = (vm_offset_t)ttep >> pmap_page_shift; + unsigned int ttep_index = ttep_page & ((1U << (PAGE_SHIFT - pmap_page_shift)) - 1); + assert(ttep_index < PT_INDEX_MAX); + return &ptd->ptd_info[ttep_index]; +#endif +} + +static inline ptd_info_t * +ptep_get_info(const pt_entry_t *ptep) +{ + return ptd_get_info(ptep_get_ptd(ptep), ptep); +} + +static inline vm_map_address_t +ptep_get_va(const pt_entry_t *ptep) +{ + pv_entry_t **pv_h; + const pt_attr_t * const pt_attr = pmap_get_pt_attr(ptep_get_pmap(ptep)); + pv_h = pai_to_pvh(pa_index(ml_static_vtop(((vm_offset_t)ptep))));; + + assert(pvh_test_type(pv_h, PVH_TYPE_PTDP)); + pt_desc_t *ptdp = (pt_desc_t *)(pvh_list(pv_h)); + + vm_map_address_t va = ptd_get_info(ptdp, ptep)->va; + vm_offset_t ptep_index = ((vm_offset_t)ptep & pt_attr_leaf_offmask(pt_attr)) / sizeof(*ptep); + + va += (ptep_index << pt_attr_leaf_shift(pt_attr)); + + return va; +} + +static inline void +pte_set_wired(pmap_t pmap, pt_entry_t *ptep, boolean_t wired) +{ + if (wired) { + *ptep |= ARM_PTE_WIRED; + } else { + *ptep &= ~ARM_PTE_WIRED; + } + /* + * Do not track wired page count for kernel pagetable pages. Kernel mappings are + * not guaranteed to have PTDs in the first place, and kernel pagetable pages are + * never reclaimed. + */ + if (pmap == kernel_pmap) { + return; + } + unsigned short *ptd_wiredcnt_ptr; + ptd_wiredcnt_ptr = &(ptep_get_info(ptep)->wiredcnt); + if (wired) { + os_atomic_add(ptd_wiredcnt_ptr, (unsigned short)1, relaxed); + } else { + unsigned short prev_wired = os_atomic_sub_orig(ptd_wiredcnt_ptr, (unsigned short)1, relaxed); + if (__improbable(prev_wired == 0)) { + panic("pmap %p (pte %p): wired count underflow", pmap, ptep); + } + } +} + /* * Lock on pmap system */ -lck_grp_t pmap_lck_grp; +lck_grp_t pmap_lck_grp MARK_AS_PMAP_DATA; + +static inline void +pmap_lock_init(pmap_t pmap) +{ + lck_rw_init(&pmap->rwlock, &pmap_lck_grp, 0); + pmap->rwlock.lck_rw_can_sleep = FALSE; +} + +static inline void +pmap_lock_destroy(pmap_t pmap) +{ + lck_rw_destroy(&pmap->rwlock, &pmap_lck_grp); +} + +static inline void +pmap_lock(pmap_t pmap) +{ + #if !XNU_MONITOR + mp_disable_preemption(); + #endif + lck_rw_lock_exclusive(&pmap->rwlock); +} + +static inline void +pmap_lock_ro(pmap_t pmap) +{ + #if !XNU_MONITOR + mp_disable_preemption(); + #endif + lck_rw_lock_shared(&pmap->rwlock); +} -#define PMAP_LOCK_INIT(pmap) { \ - simple_lock_init(&(pmap)->lock, 0); \ - } +static inline void +pmap_unlock(pmap_t pmap) +{ + lck_rw_unlock_exclusive(&pmap->rwlock); + #if !XNU_MONITOR + mp_enable_preemption(); + #endif +} -#define PMAP_LOCK(pmap) { \ - pmap_simple_lock(&(pmap)->lock); \ +static inline void +pmap_unlock_ro(pmap_t pmap) +{ + lck_rw_unlock_shared(&pmap->rwlock); + #if !XNU_MONITOR + mp_enable_preemption(); + #endif } -#define PMAP_UNLOCK(pmap) { \ - pmap_simple_unlock(&(pmap)->lock); \ +static inline bool +pmap_try_lock(pmap_t pmap) +{ + bool ret; + + #if !XNU_MONITOR + mp_disable_preemption(); + #endif + ret = lck_rw_try_lock_exclusive(&pmap->rwlock); + if (!ret) { + #if !XNU_MONITOR + mp_enable_preemption(); + #endif + } + + return ret; } +//assert that ONLY READ lock is held +__unused static inline void +pmap_assert_locked_r(__unused pmap_t pmap) +{ #if MACH_ASSERT -#define PMAP_ASSERT_LOCKED(pmap) { \ - simple_lock_assert(&(pmap)->lock, LCK_ASSERT_OWNED); \ + lck_rw_assert(&pmap->rwlock, LCK_RW_ASSERT_SHARED); +#else + (void)pmap; +#endif } +//assert that ONLY WRITE lock is held +__unused static inline void +pmap_assert_locked_w(__unused pmap_t pmap) +{ +#if MACH_ASSERT + lck_rw_assert(&pmap->rwlock, LCK_RW_ASSERT_EXCLUSIVE); #else -#define PMAP_ASSERT_LOCKED(pmap) + (void)pmap; +#endif +} + +//assert that either READ or WRITE lock is held +__unused static inline void +pmap_assert_locked_any(__unused pmap_t pmap) +{ +#if MACH_ASSERT + lck_rw_assert(&pmap->rwlock, LCK_RW_ASSERT_HELD); #endif +} + #if defined(__arm64__) #define PVH_LOCK_WORD 1 /* Assumes little-endian */ @@ -1412,7 +1674,7 @@ lck_grp_t pmap_lck_grp; } while (0) #define PMAP_UPDATE_TLBS(pmap, s, e, strong) { \ - pmap_get_pt_ops(pmap)->flush_tlb_region_async(s, (unsigned)(e - s), pmap); \ + pmap_get_pt_ops(pmap)->flush_tlb_region_async(s, (size_t)((e) - (s)), pmap); \ pmap_sync_tlb(strong); \ } @@ -1480,10 +1742,6 @@ lck_grp_t pmap_lck_grp; #define USER_PMAP_IS_VALID(x) (PMAP_PTR_IS_VALID(x) && (os_atomic_load(&(x)->ref_count, relaxed) > 0)) -#define VALIDATE_USER_PMAP(x) \ - if (__improbable(!USER_PMAP_IS_VALID(x))) \ - panic("%s: invalid pmap %p", __func__, (x)); - #define VALIDATE_PMAP(x) \ if (__improbable(((x) != kernel_pmap) && !USER_PMAP_IS_VALID(x))) \ panic("%s: invalid pmap %p", __func__, (x)); @@ -1517,11 +1775,9 @@ pmap_ledger_validate(void * ledger) #else /* XNU_MONITOR */ -#define VALIDATE_USER_PMAP(x) -#define VALIDATE_PMAP(x) -#define VALIDATE_LEDGER(x) +#define VALIDATE_PMAP(x) assert((x) != NULL); -#endif +#endif /* XNU_MONITOR */ #if DEVELOPMENT || DEBUG @@ -1531,7 +1787,8 @@ pmap_ledger_validate(void * ledger) * in the boot arg * Level 1: pmap lifecycle (create/destroy/switch) * Level 2: mapping lifecycle (enter/remove/protect/nest/unnest) - * Level 3: internal state management (tte/attributes/fast-fault) + * Level 3: internal state management (attributes/fast-fault) + * Level 4-7: TTE traces for paging levels 0-3. TTBs are traced at level 4. */ SECURITY_READ_ONLY_LATE(unsigned int) pmap_trace_mask = 0; @@ -1540,52 +1797,44 @@ SECURITY_READ_ONLY_LATE(unsigned int) pmap_trace_mask = 0; if (__improbable((1 << (level)) & pmap_trace_mask)) { \ KDBG_RELEASE(__VA_ARGS__); \ } -#else +#else /* DEVELOPMENT || DEBUG */ #define PMAP_TRACE(level, ...) -#endif +#endif /* DEVELOPMENT || DEBUG */ /* * Internal function prototypes (forward declarations). */ -static void pv_init( - void); +typedef enum { + PV_ALLOC_SUCCESS, + PV_ALLOC_RETRY, + PV_ALLOC_FAIL +} pv_alloc_return_t; -static boolean_t pv_alloc( +static pv_alloc_return_t pv_alloc( pmap_t pmap, unsigned int pai, pv_entry_t **pvepp); -static void pv_free( - pv_entry_t *pvep); - -static void pv_list_free( - pv_entry_t *pvehp, - pv_entry_t *pvetp, - unsigned int cnt); - static void ptd_bootstrap( pt_desc_t *ptdp, unsigned int ptd_cnt); -static inline pt_desc_t *ptd_alloc_unlinked(bool reclaim); +static inline pt_desc_t *ptd_alloc_unlinked(void); -static pt_desc_t *ptd_alloc(pmap_t pmap, bool reclaim); +static pt_desc_t *ptd_alloc(pmap_t pmap); static void ptd_deallocate(pt_desc_t *ptdp); static void ptd_init( pt_desc_t *ptdp, pmap_t pmap, vm_map_address_t va, unsigned int ttlevel, pt_entry_t * pte_p); -static void pmap_zone_init( - void); - static void pmap_set_reference( ppnum_t pn); -ppnum_t pmap_vtophys( +pmap_paddr_t pmap_vtophys( pmap_t pmap, addr64_t va); void pmap_switch_user_ttb( @@ -1618,19 +1867,6 @@ static kern_return_t pmap_tt_allocate( static void pmap_tte_deallocate( pmap_t, tt_entry_t *, unsigned int); -#ifdef __ARM64_PMAP_SUBPAGE_L1__ -#if (__ARM_VMSA__ <= 7) -#error This is not supported for old-style page tables -#endif /* (__ARM_VMSA__ <= 7) */ -#define PMAP_ROOT_ALLOC_SIZE (((ARM_TT_L1_INDEX_MASK >> ARM_TT_L1_SHIFT) + 1) * sizeof(tt_entry_t)) -#else /* !defined(__ARM64_PMAP_SUBPAGE_L1__) */ -#if (__ARM_VMSA__ <= 7) -#define PMAP_ROOT_ALLOC_SIZE (ARM_PGBYTES * 2) -#else /* (__ARM_VMSA__ > 7) */ -#define PMAP_ROOT_ALLOC_SIZE (ARM_PGBYTES) -#endif /* (__ARM_VMSA__ > 7) */ -#endif /* !defined(__ARM64_PMAP_SUBPAGE_L1__) */ - const unsigned int arm_hardware_page_size = ARM_PGBYTES; const unsigned int arm_pt_desc_size = sizeof(pt_desc_t); const unsigned int arm_pt_root_size = PMAP_ROOT_ALLOC_SIZE; @@ -1658,7 +1894,8 @@ static boolean_t pmap_is_64bit(pmap_t); -#endif +#endif /* (__ARM_VMSA__ > 7) */ + static inline tt_entry_t *pmap_tte( pmap_t, vm_map_address_t); @@ -1668,14 +1905,14 @@ static inline pt_entry_t *pmap_pte( static void pmap_update_cache_attributes_locked( ppnum_t, unsigned); -boolean_t arm_clear_fast_fault( +static boolean_t arm_clear_fast_fault( ppnum_t ppnum, vm_prot_t fault_type); static pmap_paddr_t pmap_pages_reclaim( void); -static kern_return_t pmap_pages_alloc( +static kern_return_t pmap_pages_alloc_zeroed( pmap_paddr_t *pa, unsigned size, unsigned option); @@ -1699,9 +1936,39 @@ static uint64_t pte_to_xprr_perm(pt_entry_t pte); static pt_entry_t xprr_perm_to_pte(uint64_t perm); #endif /* __APRR_SUPPORTED__*/ +/* + * Temporary prototypes, while we wait for pmap_enter to move to taking an + * address instead of a page number. + */ +static kern_return_t +pmap_enter_addr( + pmap_t pmap, + vm_map_address_t v, + pmap_paddr_t pa, + vm_prot_t prot, + vm_prot_t fault_type, + unsigned int flags, + boolean_t wired); + +kern_return_t +pmap_enter_options_addr( + pmap_t pmap, + vm_map_address_t v, + pmap_paddr_t pa, + vm_prot_t prot, + vm_prot_t fault_type, + unsigned int flags, + boolean_t wired, + unsigned int options, + __unused void *arg); + +#ifdef CONFIG_XNUPOST +kern_return_t pmap_test(void); +#endif /* CONFIG_XNUPOST */ + #if XNU_MONITOR -static pmap_paddr_t pmap_alloc_page_for_kern(void); -static void pmap_alloc_page_for_ppl(void); +static pmap_paddr_t pmap_alloc_page_for_kern(unsigned int options); +static void pmap_alloc_page_for_ppl(unsigned int options); /* @@ -1747,13 +2014,16 @@ PMAP_SUPPORT_PROTOTYPES( vm_prot_t allow_mode, int options), ARM_FORCE_FAST_FAULT_INDEX); -PMAP_SUPPORT_PROTOTYPES( - kern_return_t, - mapping_free_prime, (void), MAPPING_FREE_PRIME_INDEX); +MARK_AS_PMAP_TEXT static boolean_t +arm_force_fast_fault_with_flush_range( + ppnum_t ppnum, + vm_prot_t allow_mode, + int options, + pmap_tlb_flush_range_t *flush_range); PMAP_SUPPORT_PROTOTYPES( kern_return_t, - mapping_replenish, (uint32_t kern_target_count, uint32_t user_target_count), MAPPING_REPLENISH_INDEX); + mapping_free_prime, (void), MAPPING_FREE_PRIME_INDEX); PMAP_SUPPORT_PROTOTYPES( boolean_t, @@ -1774,7 +2044,8 @@ PMAP_SUPPORT_PROTOTYPES( pmap_t, pmap_create_options, (ledger_t ledger, vm_map_size_t size, - unsigned int flags), PMAP_CREATE_INDEX); + unsigned int flags, + kern_return_t * kr), PMAP_CREATE_INDEX); PMAP_SUPPORT_PROTOTYPES( void, @@ -1784,7 +2055,7 @@ PMAP_SUPPORT_PROTOTYPES( kern_return_t, pmap_enter_options, (pmap_t pmap, vm_map_address_t v, - ppnum_t pn, + pmap_paddr_t pa, vm_prot_t prot, vm_prot_t fault_type, unsigned int flags, @@ -1792,14 +2063,9 @@ PMAP_SUPPORT_PROTOTYPES( unsigned int options), PMAP_ENTER_OPTIONS_INDEX); PMAP_SUPPORT_PROTOTYPES( - vm_offset_t, - pmap_extract, (pmap_t pmap, - vm_map_address_t va), PMAP_EXTRACT_INDEX); - -PMAP_SUPPORT_PROTOTYPES( - ppnum_t, - pmap_find_phys, (pmap_t pmap, - addr64_t va), PMAP_FIND_PHYS_INDEX); + pmap_paddr_t, + pmap_find_pa, (pmap_t pmap, + addr64_t va), PMAP_FIND_PA_INDEX); #if (__ARM_VMSA__ > 7) PMAP_SUPPORT_PROTOTYPES( @@ -1826,7 +2092,6 @@ PMAP_SUPPORT_PROTOTYPES( pmap_nest, (pmap_t grand, pmap_t subord, addr64_t vstart, - addr64_t nstart, uint64_t size), PMAP_NEST_INDEX); PMAP_SUPPORT_PROTOTYPES( @@ -1920,7 +2185,7 @@ PMAP_SUPPORT_PROTOTYPES( #if XNU_MONITOR PMAP_SUPPORT_PROTOTYPES( void, - pmap_mark_page_as_ppl_page, (pmap_paddr_t pa), PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX); + pmap_mark_page_as_ppl_page, (pmap_paddr_t pa, bool initially_free), PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX); #endif PMAP_SUPPORT_PROTOTYPES( @@ -1930,6 +2195,17 @@ PMAP_SUPPORT_PROTOTYPES( int options, void *arg), PHYS_ATTRIBUTE_CLEAR_INDEX); +#if __ARM_RANGE_TLBI__ +PMAP_SUPPORT_PROTOTYPES( + void, + phys_attribute_clear_range, (pmap_t pmap, + vm_map_address_t start, + vm_map_address_t end, + unsigned int bits, + unsigned int options), PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX); +#endif /* __ARM_RANGE_TLBI__ */ + + PMAP_SUPPORT_PROTOTYPES( void, pmap_switch, (pmap_t pmap), PMAP_SWITCH_INDEX); @@ -1948,25 +2224,34 @@ PMAP_SUPPORT_PROTOTYPES( pmap_release_ppl_pages_to_kernel, (void), PMAP_RELEASE_PAGES_TO_KERNEL_INDEX); #endif +PMAP_SUPPORT_PROTOTYPES( + void, + pmap_set_vm_map_cs_enforced, (pmap_t pmap, bool new_value), PMAP_SET_VM_MAP_CS_ENFORCED_INDEX); + PMAP_SUPPORT_PROTOTYPES( void, pmap_set_jit_entitled, (pmap_t pmap), PMAP_SET_JIT_ENTITLED_INDEX); +#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) +PMAP_SUPPORT_PROTOTYPES( + void, + pmap_disable_user_jop, (pmap_t pmap), PMAP_DISABLE_USER_JOP_INDEX); +#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */ + PMAP_SUPPORT_PROTOTYPES( void, pmap_trim, (pmap_t grand, pmap_t subord, addr64_t vstart, - addr64_t nstart, uint64_t size), PMAP_TRIM_INDEX); #if HAS_APPLE_PAC && XNU_MONITOR PMAP_SUPPORT_PROTOTYPES( void *, - pmap_sign_user_ptr, (void *value, ptrauth_key key, uint64_t discriminator), PMAP_SIGN_USER_PTR); + pmap_sign_user_ptr, (void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key), PMAP_SIGN_USER_PTR); PMAP_SUPPORT_PROTOTYPES( void *, - pmap_auth_user_ptr, (void *value, ptrauth_key key, uint64_t discriminator), PMAP_AUTH_USER_PTR); + pmap_auth_user_ptr, (void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key), PMAP_AUTH_USER_PTR); #endif /* HAS_APPLE_PAC && XNU_MONITOR */ @@ -2001,6 +2286,9 @@ PMAP_SUPPORT_PROTOTYPES( PMAP_LEDGER_FREE_INDEX); #endif + + + #if CONFIG_PGTRACE boolean_t pgtrace_enabled = 0; @@ -2055,17 +2343,21 @@ long long alloc_ttepages_count __attribute__((aligned(8))) MARK_AS_PMAP_DATA = 0 long long alloc_ptepages_count __attribute__((aligned(8))) MARK_AS_PMAP_DATA = 0LL; long long alloc_pmap_pages_count __attribute__((aligned(8))) = 0LL; -int pt_fake_zone_index = -1; /* index of pmap fake zone */ - #if XNU_MONITOR + +#if __has_feature(ptrauth_calls) +#define __ptrauth_ppl_handler __ptrauth(ptrauth_key_function_pointer, true, 0) +#else +#define __ptrauth_ppl_handler +#endif + /* * Table of function pointers used for PPL dispatch. */ -const void * const ppl_handler_table[PMAP_COUNT] = { +const void * __ptrauth_ppl_handler const ppl_handler_table[PMAP_COUNT] = { [ARM_FAST_FAULT_INDEX] = arm_fast_fault_internal, [ARM_FORCE_FAST_FAULT_INDEX] = arm_force_fast_fault_internal, [MAPPING_FREE_PRIME_INDEX] = mapping_free_prime_internal, - [MAPPING_REPLENISH_INDEX] = mapping_replenish_internal, [PHYS_ATTRIBUTE_CLEAR_INDEX] = phys_attribute_clear_internal, [PHYS_ATTRIBUTE_SET_INDEX] = phys_attribute_set_internal, [PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX] = pmap_batch_set_cache_attributes_internal, @@ -2073,8 +2365,7 @@ const void * const ppl_handler_table[PMAP_COUNT] = { [PMAP_CREATE_INDEX] = pmap_create_options_internal, [PMAP_DESTROY_INDEX] = pmap_destroy_internal, [PMAP_ENTER_OPTIONS_INDEX] = pmap_enter_options_internal, - [PMAP_EXTRACT_INDEX] = pmap_extract_internal, - [PMAP_FIND_PHYS_INDEX] = pmap_find_phys_internal, + [PMAP_FIND_PA_INDEX] = pmap_find_pa_internal, [PMAP_INSERT_SHAREDPAGE_INDEX] = pmap_insert_sharedpage_internal, [PMAP_IS_EMPTY_INDEX] = pmap_is_empty_internal, [PMAP_MAP_CPU_WINDOWS_COPY_INDEX] = pmap_map_cpu_windows_copy_internal, @@ -2099,6 +2390,7 @@ const void * const ppl_handler_table[PMAP_COUNT] = { [PMAP_FOOTPRINT_SUSPEND_INDEX] = pmap_footprint_suspend_internal, [PMAP_CPU_DATA_INIT_INDEX] = pmap_cpu_data_init_internal, [PMAP_RELEASE_PAGES_TO_KERNEL_INDEX] = pmap_release_ppl_pages_to_kernel_internal, + [PMAP_SET_VM_MAP_CS_ENFORCED_INDEX] = pmap_set_vm_map_cs_enforced_internal, [PMAP_SET_JIT_ENTITLED_INDEX] = pmap_set_jit_entitled_internal, [PMAP_TRIM_INDEX] = pmap_trim_internal, [PMAP_LEDGER_ALLOC_INIT_INDEX] = pmap_ledger_alloc_init_internal, @@ -2108,39 +2400,13 @@ const void * const ppl_handler_table[PMAP_COUNT] = { [PMAP_SIGN_USER_PTR] = pmap_sign_user_ptr_internal, [PMAP_AUTH_USER_PTR] = pmap_auth_user_ptr_internal, #endif /* HAS_APPLE_PAC && XNU_MONITOR */ +#if __ARM_RANGE_TLBI__ + [PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX] = phys_attribute_clear_range_internal, +#endif /* __ARM_RANGE_TLBI__ */ +#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) + [PMAP_DISABLE_USER_JOP_INDEX] = pmap_disable_user_jop_internal, +#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */ }; - -static uint64_t -pmap_get_ppl_cpu_id(void) -{ - uint64_t mpidr_el1_value = 0; - - /* We identify the CPU based on the constant bits of MPIDR_EL1. */ - MRS(mpidr_el1_value, "MPIDR_EL1"); - -#ifdef CPU_CLUSTER_OFFSETS - uint64_t cluster_id = (mpidr_el1_value & MPIDR_AFF1_MASK) >> MPIDR_AFF1_SHIFT; - assert(cluster_id < (sizeof(pmap_cluster_offsets) / sizeof(pmap_cluster_offsets[0]))); - - /* For multi-cluster configurations, AFF0 reflects the core number within the cluster. */ - mpidr_el1_value = (mpidr_el1_value & MPIDR_AFF0_MASK) + pmap_cluster_offsets[cluster_id]; -#else - /* - * AFF2 is not constant (it can change for e-core versus p-core on H9), - * so mask it out. - */ - mpidr_el1_value &= MPIDR_AFF0_MASK; -#endif - - if (mpidr_el1_value > MAX_CPUS) { - panic("%s: mpidr_el1_value=%#llx > MAX_CPUS=%#x", - __FUNCTION__, mpidr_el1_value, MAX_CPUS); - } - - return mpidr_el1_value; -} - - #endif @@ -2154,7 +2420,7 @@ pmap_cpu_data_init_internal(unsigned int cpu_number) #if XNU_MONITOR /* Verify cacheline-aligned */ - assert(((vm_offset_t)pmap_cpu_data & ((1 << L2_CLINE) - 1)) == 0); + assert(((vm_offset_t)pmap_cpu_data & ((1 << MAX_L2_CLINE) - 1)) == 0); if (pmap_cpu_data->cpu_number != PMAP_INVALID_CPU_NUM) { panic("%s: pmap_cpu_data->cpu_number=%u, " "cpu_number=%u", @@ -2203,7 +2469,6 @@ pmap_cpu_data_array_init(void) #if KASAN kasan_map_shadow(stack_va, PPL_STACK_SIZE, false); #endif - pmap_cpu_data_array[i].cpu_data.cpu_id = i; pmap_cpu_data_array[i].cpu_data.cpu_number = PMAP_INVALID_CPU_NUM; pmap_cpu_data_array[i].cpu_data.ppl_state = PPL_STATE_KERNEL; pmap_cpu_data_array[i].cpu_data.ppl_stack = (void*)(stack_va + PPL_STACK_SIZE); @@ -2236,15 +2501,8 @@ pmap_get_cpu_data(void) pmap_cpu_data_t * pmap_cpu_data = NULL; #if XNU_MONITOR - uint64_t cpu_id = 0; - - cpu_id = pmap_get_ppl_cpu_id(); - pmap_cpu_data = &pmap_cpu_data_array[cpu_id].cpu_data; - - if (pmap_cpu_data->cpu_id != cpu_id) { - panic("%s: CPU ID mismatch, cpu_id=0x%#llx, pmap_cpu_data->cpu_id=%#llx", - __FUNCTION__, cpu_id, pmap_cpu_data->cpu_id); - } + extern pmap_cpu_data_t* ml_get_ppl_cpu_data(void); + pmap_cpu_data = ml_get_ppl_cpu_data(); #else pmap_cpu_data = &getCpuDatap()->cpu_pmap_cpu_data; #endif @@ -2306,15 +2564,11 @@ pmap_set_range_xprr_perm(vm_address_t start, (void *)start, (void *)end, new_perm, expected_perm); } - if (start < gVirtBase) { - panic("%s: start is before physical aperture, " - "start=%p, end=%p, new_perm=%u, expected_perm=%u", - __FUNCTION__, - (void *)start, (void *)end, new_perm, expected_perm); - } + bool in_physmap = (start >= physmap_base) && (end < physmap_end); + bool in_static = (start >= gVirtBase) && (end < static_memory_end); - if (end > static_memory_end) { - panic("%s: end is after physical aperture, " + if (!(in_physmap || in_static)) { + panic("%s: address not in static region or physical aperture, " "start=%p, end=%p, new_perm=%u, expected_perm=%u", __FUNCTION__, (void *)start, (void *)end, new_perm, expected_perm); @@ -2361,7 +2615,7 @@ pmap_set_range_xprr_perm(vm_address_t start, * PTEs. */ pte_p = (pt_entry_t *)ttetokv(tte); - bpte_p = &pte_p[ptenum(va)]; + bpte_p = &pte_p[pte_index(pmap, native_pt_attr, va)]; epte_p = bpte_p + ((tte_end - va) >> pt_attr_leaf_shift(native_pt_attr)); for (cpte_p = bpte_p; cpte_p < epte_p; @@ -2439,7 +2693,12 @@ pmap_set_xprr_perm(vm_address_t page_kva, #endif /* XNU_MONITOR */ -/* TODO */ +/* + * pmap_pages_reclaim(): return a page by freeing an active pagetable page. + * To be eligible, a pt page must be assigned to a non-kernel pmap. + * It must not have any wired PTEs and must contain at least one valid PTE. + * If no eligible page is found in the pt page list, return 0. + */ pmap_paddr_t pmap_pages_reclaim( void) @@ -2449,17 +2708,12 @@ pmap_pages_reclaim( pt_desc_t *ptdp; /* - * pmap_pages_reclaim() is returning a page by freeing an active pt page. - * To be eligible, a pt page is assigned to a user pmap. It doesn't have any wired pte - * entry and it contains at least one valid pte entry. - * * In a loop, check for a page in the reclaimed pt page list. * if one is present, unlink that page and return the physical page address. * Otherwise, scan the pt page list for an eligible pt page to reclaim. * If found, invoke pmap_remove_range() on its pmap and address range then * deallocates that pt page. This will end up adding the pt page to the * reclaimed pt page list. - * If no eligible page were found in the pt page list, panic. */ pmap_simple_lock(&pmap_pages_lock); @@ -2485,7 +2739,7 @@ pmap_pages_reclaim( while (!queue_end(&pt_page_list, (queue_entry_t)ptdp)) { if ((ptdp->pmap->nested == FALSE) - && (pmap_simple_lock_try(&ptdp->pmap->lock))) { + && (pmap_try_lock(ptdp->pmap))) { assert(ptdp->pmap != kernel_pmap); unsigned refcnt_acc = 0; unsigned wiredcnt_acc = 0; @@ -2506,12 +2760,13 @@ pmap_pages_reclaim( * with it while we do that. */ break; } - pmap_simple_unlock(&ptdp->pmap->lock); + pmap_unlock(ptdp->pmap); } ptdp = (pt_desc_t *)queue_next((queue_t)ptdp); } if (!found_page) { - panic("%s: No eligible page in pt_page_list", __FUNCTION__); + pmap_simple_unlock(&pt_pages_lock); + return (pmap_paddr_t)0; } else { int remove_count = 0; bool need_strong_sync = false; @@ -2524,11 +2779,11 @@ pmap_pages_reclaim( pmap_simple_unlock(&pt_pages_lock); pmap = ptdp->pmap; - PMAP_ASSERT_LOCKED(pmap); // pmap lock should be held from loop above + pmap_assert_locked_w(pmap); // pmap write lock should be held from loop above - __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); - for (i = 0; i < PT_INDEX_MAX; i++) { + for (i = 0; i < (PAGE_SIZE / pt_attr_page_size(pt_attr)); i++) { va = ptdp->ptd_info[i].va; /* If the VA is bogus, this may represent an unallocated region @@ -2543,7 +2798,7 @@ pmap_pages_reclaim( && ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE)) { pte_p = (pt_entry_t *) ttetokv(*tte_p); bpte = &pte_p[pte_index(pmap, pt_attr, va)]; - epte = bpte + pt_attr_leaf_size(pt_attr) / sizeof(pt_entry_t); + epte = bpte + pt_attr_page_size(pt_attr) / sizeof(pt_entry_t); /* * Use PMAP_OPTIONS_REMOVE to clear any * "compressed" markers and update the @@ -2558,21 +2813,21 @@ pmap_pages_reclaim( remove_count += pmap_remove_range_options( pmap, va, bpte, epte, &rmv_spte, &need_strong_sync, PMAP_OPTIONS_REMOVE); - if (ptdp->ptd_info[ARM_PT_DESC_INDEX(pte_p)].refcnt != 0) { - panic("%s: ptdp %p, count %d", __FUNCTION__, ptdp, ptdp->ptd_info[ARM_PT_DESC_INDEX(pte_p)].refcnt); + if (ptd_get_info(ptdp, pte_p)->refcnt != 0) { + panic("%s: ptdp %p, count %d", __FUNCTION__, ptdp, ptd_get_info(ptdp, pte_p)->refcnt); } - pmap_tte_deallocate(pmap, tte_p, PMAP_TT_TWIG_LEVEL); + pmap_tte_deallocate(pmap, tte_p, pt_attr_twig_level(pt_attr)); if (remove_count > 0) { - pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, (unsigned int)pt_attr_leaf_table_size(pt_attr), pmap); + pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, (size_t)pt_attr_leaf_table_size(pt_attr), pmap); } else { pmap_get_pt_ops(pmap)->flush_tlb_tte_async(va, pmap); } } } // Undo the lock we grabbed when we found ptdp above - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); pmap_sync_tlb(need_strong_sync); } pmap_simple_lock(&pmap_pages_lock); @@ -2583,7 +2838,7 @@ pmap_pages_reclaim( /* * Return a PPL page to the free list. */ -static void +MARK_AS_PMAP_TEXT static void pmap_give_free_ppl_page(pmap_paddr_t paddr) { assert((paddr & ARM_PGMASK) == 0); @@ -2601,7 +2856,7 @@ pmap_give_free_ppl_page(pmap_paddr_t paddr) /* * Get a PPL page from the free list. */ -static pmap_paddr_t +MARK_AS_PMAP_TEXT static pmap_paddr_t pmap_get_free_ppl_page(void) { pmap_paddr_t result = 0; @@ -2629,7 +2884,7 @@ pmap_get_free_ppl_page(void) * as PPL-owned and only allowing the PPL to write to it. */ MARK_AS_PMAP_TEXT static void -pmap_mark_page_as_ppl_page_internal(pmap_paddr_t pa) +pmap_mark_page_as_ppl_page_internal(pmap_paddr_t pa, bool initially_free) { vm_offset_t kva = 0; unsigned int pai = 0; @@ -2640,8 +2895,6 @@ pmap_mark_page_as_ppl_page_internal(pmap_paddr_t pa) * intend to use it for monitor-y stuff (page tables, table pages, that * sort of thing). */ - assert(!TEST_PAGE_RATIO_4); - if (!pa_valid(pa)) { panic("%s: bad address, " "pa=%p", @@ -2681,18 +2934,19 @@ pmap_mark_page_as_ppl_page_internal(pmap_paddr_t pa) kva = phystokv(pa); pmap_set_xprr_perm(kva, XPRR_KERN_RW_PERM, XPRR_PPL_RW_PERM); - bzero((void *)(kva & ~PAGE_MASK), PAGE_SIZE); - pmap_give_free_ppl_page(pa); + if (initially_free) { + pmap_give_free_ppl_page(pa); + } } static void pmap_mark_page_as_ppl_page(pmap_paddr_t pa) { - pmap_mark_page_as_ppl_page_ppl(pa); + pmap_mark_page_as_ppl_page_ppl(pa, true); } -static void +MARK_AS_PMAP_TEXT static void pmap_mark_page_as_kernel_page(pmap_paddr_t pa) { vm_offset_t kva = 0; @@ -2777,13 +3031,29 @@ pmap_release_ppl_pages_to_kernel(void) } #endif +static inline void +pmap_enqueue_pages(vm_page_t m) +{ + vm_page_t m_prev; + vm_object_lock(pmap_object); + while (m != VM_PAGE_NULL) { + vm_page_insert_wired(m, pmap_object, (vm_object_offset_t) ((ptoa(VM_PAGE_GET_PHYS_PAGE(m))) - gPhysBase), VM_KERN_MEMORY_PTE); + m_prev = m; + m = NEXT_PAGE(m_prev); + *(NEXT_PAGE_PTR(m_prev)) = VM_PAGE_NULL; + } + vm_object_unlock(pmap_object); +} + static kern_return_t -pmap_pages_alloc( +pmap_pages_alloc_zeroed( pmap_paddr_t *pa, - unsigned size, - unsigned option) + unsigned size, + unsigned option) { #if XNU_MONITOR + ASSERT_NOT_HIBERNATING(); + if (size != PAGE_SIZE) { panic("%s: size != PAGE_SIZE, " "pa=%p, size=%u, option=%u", @@ -2791,77 +3061,91 @@ pmap_pages_alloc( pa, size, option); } - if (option & PMAP_PAGES_RECLAIM_NOWAIT) { - *pa = pmap_pages_reclaim(); - assert(*pa); - return KERN_SUCCESS; - } assert(option & PMAP_PAGES_ALLOCATE_NOWAIT); *pa = pmap_get_free_ppl_page(); + if ((*pa == 0) && (option & PMAP_PAGES_RECLAIM_NOWAIT)) { + *pa = pmap_pages_reclaim(); + } + if (*pa == 0) { return KERN_RESOURCE_SHORTAGE; } else { + bzero((void*)phystokv(*pa), size); return KERN_SUCCESS; } #else - vm_page_t m = VM_PAGE_NULL, m_prev; + vm_page_t m = VM_PAGE_NULL; - if (option & PMAP_PAGES_RECLAIM_NOWAIT) { - assert(size == PAGE_SIZE); - *pa = pmap_pages_reclaim(); - return KERN_SUCCESS; - } - if (size == PAGE_SIZE) { + thread_t self = current_thread(); + // We qualify to allocate reserved memory + uint16_t thread_options = self->options; + self->options |= TH_OPT_VMPRIV; + if (__probable(size == PAGE_SIZE)) { while ((m = vm_page_grab()) == VM_PAGE_NULL) { if (option & PMAP_PAGES_ALLOCATE_NOWAIT) { - return KERN_RESOURCE_SHORTAGE; + break; } VM_PAGE_WAIT(); } - vm_page_lock_queues(); - vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE); - vm_page_unlock_queues(); - } - if (size == 2 * PAGE_SIZE) { + if (m != VM_PAGE_NULL) { + vm_page_lock_queues(); + vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE); + vm_page_unlock_queues(); + } + } else if (size == 2 * PAGE_SIZE) { while (cpm_allocate(size, &m, 0, 1, TRUE, 0) != KERN_SUCCESS) { if (option & PMAP_PAGES_ALLOCATE_NOWAIT) { - return KERN_RESOURCE_SHORTAGE; + break; } VM_PAGE_WAIT(); } + } else { + panic("%s: invalid size %u", __func__, size); } - *pa = (pmap_paddr_t)ptoa(VM_PAGE_GET_PHYS_PAGE(m)); + self->options = thread_options; - vm_object_lock(pmap_object); - while (m != VM_PAGE_NULL) { - vm_page_insert_wired(m, pmap_object, (vm_object_offset_t) ((ptoa(VM_PAGE_GET_PHYS_PAGE(m))) - gPhysBase), VM_KERN_MEMORY_PTE); - m_prev = m; - m = NEXT_PAGE(m_prev); - *(NEXT_PAGE_PTR(m_prev)) = VM_PAGE_NULL; + if ((m == VM_PAGE_NULL) && (option & PMAP_PAGES_RECLAIM_NOWAIT)) { + assert(size == PAGE_SIZE); + *pa = pmap_pages_reclaim(); + if (*pa != 0) { + bzero((void*)phystokv(*pa), size); + return KERN_SUCCESS; + } + } + + if (m == VM_PAGE_NULL) { + return KERN_RESOURCE_SHORTAGE; } - vm_object_unlock(pmap_object); + + *pa = (pmap_paddr_t)ptoa(VM_PAGE_GET_PHYS_PAGE(m)); + + pmap_enqueue_pages(m); OSAddAtomic(size >> PAGE_SHIFT, &inuse_pmap_pages_count); OSAddAtomic64(size >> PAGE_SHIFT, &alloc_pmap_pages_count); + bzero((void*)phystokv(*pa), size); return KERN_SUCCESS; #endif } #if XNU_MONITOR static pmap_paddr_t -pmap_alloc_page_for_kern(void) +pmap_alloc_page_for_kern(unsigned int options) { - pmap_paddr_t paddr = 0; - vm_page_t m, m_prev; + pmap_paddr_t paddr; + vm_page_t m; while ((m = vm_page_grab()) == VM_PAGE_NULL) { + if (options & PMAP_PAGES_ALLOCATE_NOWAIT) { + return 0; + } VM_PAGE_WAIT(); } @@ -2871,21 +3155,11 @@ pmap_alloc_page_for_kern(void) paddr = (pmap_paddr_t)ptoa(VM_PAGE_GET_PHYS_PAGE(m)); - if (paddr == 0) { - panic("%s: paddr is 0", - __FUNCTION__); - } - - vm_object_lock(pmap_object); - - while (m != VM_PAGE_NULL) { - vm_page_insert_wired(m, pmap_object, (vm_object_offset_t) ((ptoa(VM_PAGE_GET_PHYS_PAGE(m))) - gPhysBase), VM_KERN_MEMORY_PTE); - m_prev = m; - m = NEXT_PAGE(m_prev); - *(NEXT_PAGE_PTR(m_prev)) = VM_PAGE_NULL; + if (__improbable(paddr == 0)) { + panic("%s: paddr is 0", __func__); } - vm_object_unlock(pmap_object); + pmap_enqueue_pages(m); OSAddAtomic(1, &inuse_pmap_pages_count); OSAddAtomic64(1, &alloc_pmap_pages_count); @@ -2894,9 +3168,17 @@ pmap_alloc_page_for_kern(void) } static void -pmap_alloc_page_for_ppl(void) +pmap_alloc_page_for_ppl(unsigned int options) { - pmap_mark_page_as_ppl_page(pmap_alloc_page_for_kern()); + thread_t self = current_thread(); + // We qualify to allocate reserved memory + uint16_t thread_options = self->options; + self->options |= TH_OPT_VMPRIV; + pmap_paddr_t paddr = pmap_alloc_page_for_kern(options); + self->options = thread_options; + if (paddr != 0) { + pmap_mark_page_as_ppl_page(paddr); + } } static pmap_t @@ -3035,26 +3317,78 @@ pmap_tt_ledger_debit( } } +static inline void +pmap_update_plru(uint16_t asid_index) +{ + if (__probable(pmap_asid_plru)) { + unsigned plru_index = asid_index >> 6; + if (__improbable(os_atomic_andnot(&asid_plru_bitmap[plru_index], (1ULL << (asid_index & 63)), relaxed) == 0)) { + asid_plru_generation[plru_index] = ++asid_plru_gencount; + asid_plru_bitmap[plru_index] = ((plru_index == (MAX_HW_ASIDS >> 6)) ? ~(1ULL << 63) : UINT64_MAX); + } + } +} + static bool alloc_asid(pmap_t pmap) { - int vasid; + int vasid = -1; uint16_t hw_asid; pmap_simple_lock(&asid_lock); - vasid = bitmap_first(&asid_bitmap[0], MAX_ASID); - if (vasid < 0) { + + if (__probable(pmap_asid_plru)) { + unsigned plru_index = 0; + uint64_t lowest_gen = asid_plru_generation[0]; + uint64_t lowest_gen_bitmap = asid_plru_bitmap[0]; + for (unsigned i = 1; i < (sizeof(asid_plru_generation) / sizeof(asid_plru_generation[0])); ++i) { + if (asid_plru_generation[i] < lowest_gen) { + plru_index = i; + lowest_gen = asid_plru_generation[i]; + lowest_gen_bitmap = asid_plru_bitmap[i]; + } + } + + for (; plru_index < BITMAP_LEN(pmap_max_asids); plru_index += ((MAX_HW_ASIDS + 1) >> 6)) { + uint64_t temp_plru = lowest_gen_bitmap & asid_bitmap[plru_index]; + if (temp_plru) { + vasid = (plru_index << 6) + lsb_first(temp_plru); +#if DEVELOPMENT || DEBUG + ++pmap_asid_hits; +#endif + break; + } + } + } + if (__improbable(vasid < 0)) { + // bitmap_first() returns highest-order bits first, but a 0-based scheme works + // slightly better with the collision detection scheme used by pmap_switch_internal(). + vasid = bitmap_lsb_first(&asid_bitmap[0], pmap_max_asids); +#if DEVELOPMENT || DEBUG + ++pmap_asid_misses; +#endif + } + if (__improbable(vasid < 0)) { pmap_simple_unlock(&asid_lock); return false; } - assert(vasid < MAX_ASID); + assert((uint32_t)vasid < pmap_max_asids); + assert(bitmap_test(&asid_bitmap[0], (unsigned int)vasid)); bitmap_clear(&asid_bitmap[0], (unsigned int)vasid); pmap_simple_unlock(&asid_lock); - // bitmap_first() returns highest-order bits first, but a 0-based scheme works - // slightly better with the collision detection scheme used by pmap_switch_internal(). - vasid = MAX_ASID - 1 - vasid; - hw_asid = vasid % MAX_HW_ASID; - pmap->sw_asid = vasid / MAX_HW_ASID; + hw_asid = vasid % asid_chunk_size; + pmap->sw_asid = (uint8_t)(vasid / asid_chunk_size); + if (__improbable(hw_asid == MAX_HW_ASIDS)) { + /* If we took a PLRU "miss" and ended up with a hardware ASID we can't actually support, + * reassign to a reserved VASID. */ + assert(pmap->sw_asid < UINT8_MAX); + pmap->sw_asid = UINT8_MAX; + /* Allocate from the high end of the hardware ASID range to reduce the likelihood of + * aliasing with vital system processes, which are likely to have lower ASIDs. */ + hw_asid = MAX_HW_ASIDS - 1 - (uint16_t)(vasid / asid_chunk_size); + assert(hw_asid < MAX_HW_ASIDS); + } + pmap_update_plru(hw_asid); hw_asid += 1; // Account for ASID 0, which is reserved for the kernel #if __ARM_KERNEL_PROTECT__ hw_asid <<= 1; // We're really handing out 2 hardware ASIDs, one for EL0 and one for EL1 access @@ -3067,17 +3401,25 @@ static void free_asid(pmap_t pmap) { unsigned int vasid; - uint16_t hw_asid = pmap->hw_asid; - assert(hw_asid != 0); // Should not try to free kernel ASID + uint16_t hw_asid = os_atomic_xchg(&pmap->hw_asid, 0, relaxed); + if (__improbable(hw_asid == 0)) { + return; + } #if __ARM_KERNEL_PROTECT__ hw_asid >>= 1; #endif hw_asid -= 1; - vasid = ((unsigned int)pmap->sw_asid * MAX_HW_ASID) + hw_asid; - vasid = MAX_ASID - 1 - vasid; + if (__improbable(pmap->sw_asid == UINT8_MAX)) { + vasid = ((MAX_HW_ASIDS - 1 - hw_asid) * asid_chunk_size) + MAX_HW_ASIDS; + } else { + vasid = ((unsigned int)pmap->sw_asid * asid_chunk_size) + hw_asid; + } + if (__probable(pmap_asid_plru)) { + os_atomic_or(&asid_plru_bitmap[hw_asid >> 6], (1ULL << (hw_asid & 63)), relaxed); + } pmap_simple_lock(&asid_lock); assert(!bitmap_test(&asid_bitmap[0], vasid)); bitmap_set(&asid_bitmap[0], vasid); @@ -3085,214 +3427,154 @@ free_asid(pmap_t pmap) } -#ifndef PMAP_PV_LOAD_FACTOR -#define PMAP_PV_LOAD_FACTOR 1 +#if XNU_MONITOR + +/* + * Increase the padding for PPL devices to accommodate increased + * mapping pressure from IOMMUs. This isn't strictly necessary, but + * will reduce the need to retry mappings due to PV allocation failure. + */ + +#define PV_LOW_WATER_MARK_DEFAULT (0x400) +#define PV_KERN_LOW_WATER_MARK_DEFAULT (0x400) +#define PV_ALLOC_CHUNK_INITIAL (0x400) +#define PV_KERN_ALLOC_CHUNK_INITIAL (0x400) +#define PV_CPU_MIN (0x80) +#define PV_CPU_MAX (0x400) + +#else + +#define PV_LOW_WATER_MARK_DEFAULT (0x200) +#define PV_KERN_LOW_WATER_MARK_DEFAULT (0x200) +#define PV_ALLOC_CHUNK_INITIAL (0x200) +#define PV_KERN_ALLOC_CHUNK_INITIAL (0x200) +#define PV_CPU_MIN (0x40) +#define PV_CPU_MAX (0x200) + #endif -#define PV_LOW_WATER_MARK_DEFAULT (0x200 * PMAP_PV_LOAD_FACTOR) -#define PV_KERN_LOW_WATER_MARK_DEFAULT (0x200 * PMAP_PV_LOAD_FACTOR) -#define PV_ALLOC_CHUNK_INITIAL (0x200 * PMAP_PV_LOAD_FACTOR) -#define PV_KERN_ALLOC_CHUNK_INITIAL (0x200 * PMAP_PV_LOAD_FACTOR) #define PV_ALLOC_INITIAL_TARGET (PV_ALLOC_CHUNK_INITIAL * 5) #define PV_KERN_ALLOC_INITIAL_TARGET (PV_KERN_ALLOC_CHUNK_INITIAL) -uint32_t pv_free_count MARK_AS_PMAP_DATA = 0; uint32_t pv_page_count MARK_AS_PMAP_DATA = 0; -uint32_t pv_kern_free_count MARK_AS_PMAP_DATA = 0; - -uint32_t pv_low_water_mark MARK_AS_PMAP_DATA; -uint32_t pv_kern_low_water_mark MARK_AS_PMAP_DATA; -uint32_t pv_alloc_chunk MARK_AS_PMAP_DATA; -uint32_t pv_kern_alloc_chunk MARK_AS_PMAP_DATA; -thread_t mapping_replenish_thread; -event_t mapping_replenish_event; -volatile uint32_t mappingrecurse = 0; +uint32_t pv_kern_low_water_mark MARK_AS_PMAP_DATA = PV_KERN_LOW_WATER_MARK_DEFAULT; +uint32_t pv_alloc_initial_target MARK_AS_PMAP_DATA = PV_ALLOC_INITIAL_TARGET; +uint32_t pv_kern_alloc_initial_target MARK_AS_PMAP_DATA = PV_KERN_ALLOC_INITIAL_TARGET; -unsigned pmap_mapping_thread_wakeups; unsigned pmap_reserve_replenish_stat MARK_AS_PMAP_DATA; unsigned pmap_kern_reserve_alloc_stat MARK_AS_PMAP_DATA; +static inline void pv_list_alloc(pv_entry_t **pv_ep); +static inline void pv_list_kern_alloc(pv_entry_t **pv_e); +static inline void pv_list_free(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt, uint32_t kern_target); -static void -pv_init( - void) -{ - simple_lock_init(&pv_free_list_lock, 0); - simple_lock_init(&pv_kern_free_list_lock, 0); - pv_free_list = PV_ENTRY_NULL; - pv_free_count = 0x0U; - pv_kern_free_list = PV_ENTRY_NULL; - pv_kern_free_count = 0x0U; -} - -static inline void PV_ALLOC(pv_entry_t **pv_ep); -static inline void PV_KERN_ALLOC(pv_entry_t **pv_e); -static inline void PV_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt, uint32_t kern_target); - -static boolean_t +static pv_alloc_return_t pv_alloc( pmap_t pmap, unsigned int pai, pv_entry_t **pvepp) { if (pmap != NULL) { - PMAP_ASSERT_LOCKED(pmap); + pmap_assert_locked_w(pmap); } ASSERT_PVH_LOCKED(pai); - PV_ALLOC(pvepp); - if (PV_ENTRY_NULL == *pvepp) { - if ((pmap == NULL) || (kernel_pmap == pmap)) { - PV_KERN_ALLOC(pvepp); - - if (PV_ENTRY_NULL == *pvepp) { - pv_entry_t *pv_e; - pv_entry_t *pv_eh; - pv_entry_t *pv_et; - int pv_cnt; - unsigned j; - pmap_paddr_t pa; - kern_return_t ret; - - UNLOCK_PVH(pai); - if (pmap != NULL) { - PMAP_UNLOCK(pmap); - } - - ret = pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT); - - if (ret == KERN_RESOURCE_SHORTAGE) { - ret = pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_RECLAIM_NOWAIT); - } - - if (ret != KERN_SUCCESS) { - panic("%s: failed to alloc page for kernel, ret=%d, " - "pmap=%p, pai=%u, pvepp=%p", - __FUNCTION__, ret, - pmap, pai, pvepp); - } - - pv_page_count++; - - pv_e = (pv_entry_t *)phystokv(pa); - pv_cnt = 0; - pv_eh = pv_et = PV_ENTRY_NULL; - *pvepp = pv_e; - pv_e++; - - for (j = 1; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) { - pv_e->pve_next = pv_eh; - pv_eh = pv_e; - - if (pv_et == PV_ENTRY_NULL) { - pv_et = pv_e; - } - pv_cnt++; - pv_e++; - } - PV_FREE_LIST(pv_eh, pv_et, pv_cnt, pv_kern_low_water_mark); - if (pmap != NULL) { - PMAP_LOCK(pmap); - } - LOCK_PVH(pai); - return FALSE; - } - } else { - UNLOCK_PVH(pai); - PMAP_UNLOCK(pmap); - - pv_entry_t *pv_e; - pv_entry_t *pv_eh; - pv_entry_t *pv_et; - int pv_cnt; - unsigned j; - pmap_paddr_t pa; - kern_return_t ret; - + pv_list_alloc(pvepp); + if (PV_ENTRY_NULL != *pvepp) { + return PV_ALLOC_SUCCESS; + } #if XNU_MONITOR - /* - * The PPL has no guarantee that its allocation - * will succeed, so steal pages if necessary to - * ensure that we can free up a PV allocation. - */ - ret = pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT); - - if (ret == KERN_RESOURCE_SHORTAGE) { - ret = pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_RECLAIM_NOWAIT); - } + unsigned alloc_flags = PMAP_PAGES_ALLOCATE_NOWAIT; #else - ret = pmap_pages_alloc(&pa, PAGE_SIZE, 0); + unsigned alloc_flags = 0; #endif + if ((pmap == NULL) || (kernel_pmap == pmap)) { + pv_list_kern_alloc(pvepp); - if (ret != KERN_SUCCESS) { - panic("%s: failed to alloc page, ret=%d, " - "pmap=%p, pai=%u, pvepp=%p", - __FUNCTION__, ret, - pmap, pai, pvepp); - } + if (PV_ENTRY_NULL != *pvepp) { + return PV_ALLOC_SUCCESS; + } + alloc_flags = PMAP_PAGES_ALLOCATE_NOWAIT | PMAP_PAGES_RECLAIM_NOWAIT; + } + pv_entry_t *pv_e; + pv_entry_t *pv_eh; + pv_entry_t *pv_et; + int pv_cnt; + pmap_paddr_t pa; + kern_return_t ret; + pv_alloc_return_t pv_status = PV_ALLOC_RETRY; - pv_page_count++; + UNLOCK_PVH(pai); + if (pmap != NULL) { + pmap_unlock(pmap); + } - pv_e = (pv_entry_t *)phystokv(pa); - pv_cnt = 0; - pv_eh = pv_et = PV_ENTRY_NULL; - *pvepp = pv_e; - pv_e++; + ret = pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, alloc_flags); - for (j = 1; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) { - pv_e->pve_next = pv_eh; - pv_eh = pv_e; + if (ret != KERN_SUCCESS) { + pv_status = PV_ALLOC_FAIL; + goto pv_alloc_cleanup; + } - if (pv_et == PV_ENTRY_NULL) { - pv_et = pv_e; - } - pv_cnt++; - pv_e++; - } + pv_page_count++; - PV_FREE_LIST(pv_eh, pv_et, pv_cnt, pv_kern_low_water_mark); + pv_e = (pv_entry_t *)phystokv(pa); + *pvepp = pv_e; + pv_cnt = (PAGE_SIZE / sizeof(pv_entry_t)) - 1; + pv_eh = pv_e + 1; + pv_et = &pv_e[pv_cnt]; - PMAP_LOCK(pmap); - LOCK_PVH(pai); - return FALSE; - } + pv_list_free(pv_eh, pv_et, pv_cnt, pv_kern_low_water_mark); +pv_alloc_cleanup: + if (pmap != NULL) { + pmap_lock(pmap); } - assert(PV_ENTRY_NULL != *pvepp); - return TRUE; + LOCK_PVH(pai); + return pv_status; } -static void -pv_free( +static inline void +pv_free_entry( pv_entry_t *pvep) { - PV_FREE_LIST(pvep, pvep, 1, pv_kern_low_water_mark); -} - -static void -pv_list_free( - pv_entry_t *pvehp, - pv_entry_t *pvetp, - unsigned int cnt) -{ - PV_FREE_LIST(pvehp, pvetp, cnt, pv_kern_low_water_mark); + pv_list_free(pvep, pvep, 1, pv_kern_low_water_mark); } static inline void -pv_water_mark_check(void) +pv_free_list_alloc(pv_free_list_t *free_list, pv_entry_t **pv_ep) { - if (__improbable((pv_free_count < pv_low_water_mark) || (pv_kern_free_count < pv_kern_low_water_mark))) { - if (!mappingrecurse && os_atomic_cmpxchg(&mappingrecurse, 0, 1, acq_rel)) { - thread_wakeup(&mapping_replenish_event); + assert(((free_list->list != NULL) && (free_list->count > 0)) || + ((free_list->list == NULL) && (free_list->count == 0))); + + if ((*pv_ep = free_list->list) != NULL) { + pv_entry_t *pv_e = *pv_ep; + if ((pv_e->pve_next == NULL) && (free_list->count > 1)) { + free_list->list = pv_e + 1; + } else { + free_list->list = pv_e->pve_next; + pv_e->pve_next = PV_ENTRY_NULL; } + free_list->count--; } } static inline void -PV_ALLOC(pv_entry_t **pv_ep) +pv_list_alloc(pv_entry_t **pv_ep) { assert(*pv_ep == PV_ENTRY_NULL); #if !XNU_MONITOR - if (pv_kern_free_count < pv_kern_low_water_mark) { + mp_disable_preemption(); +#endif + pmap_cpu_data_t *pmap_cpu_data = pmap_get_cpu_data(); + pv_free_list_alloc(&pmap_cpu_data->pv_free, pv_ep); +#if !XNU_MONITOR + mp_enable_preemption(); +#endif + if (*pv_ep != PV_ENTRY_NULL) { + return; + } +#if !XNU_MONITOR + if (pv_kern_free.count < pv_kern_low_water_mark) { /* * If the kernel reserved pool is low, let non-kernel mappings wait for a page * from the VM. @@ -3301,81 +3583,115 @@ PV_ALLOC(pv_entry_t **pv_ep) } #endif pmap_simple_lock(&pv_free_list_lock); - - if ((*pv_ep = pv_free_list) != 0) { - pv_free_list = (pv_entry_t *)(*pv_ep)->pve_next; - (*pv_ep)->pve_next = PV_ENTRY_NULL; - pv_free_count--; - } - + pv_free_list_alloc(&pv_free, pv_ep); pmap_simple_unlock(&pv_free_list_lock); } static inline void -PV_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt, uint32_t kern_target) +pv_list_free(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt, uint32_t kern_target) { - bool use_kernel_list = false; - pmap_simple_lock(&pv_kern_free_list_lock); - if (pv_kern_free_count < kern_target) { - pv_et->pve_next = pv_kern_free_list; - pv_kern_free_list = pv_eh; - pv_kern_free_count += pv_cnt; - use_kernel_list = true; + if (pv_cnt == 1) { + bool limit_exceeded = false; +#if !XNU_MONITOR + mp_disable_preemption(); +#endif + pmap_cpu_data_t *pmap_cpu_data = pmap_get_cpu_data(); + pv_et->pve_next = pmap_cpu_data->pv_free.list; + pmap_cpu_data->pv_free.list = pv_eh; + if (pmap_cpu_data->pv_free.count == PV_CPU_MIN) { + pmap_cpu_data->pv_free_tail = pv_et; + } + pmap_cpu_data->pv_free.count += pv_cnt; + if (__improbable(pmap_cpu_data->pv_free.count > PV_CPU_MAX)) { + pv_et = pmap_cpu_data->pv_free_tail; + pv_cnt = pmap_cpu_data->pv_free.count - PV_CPU_MIN; + pmap_cpu_data->pv_free.list = pmap_cpu_data->pv_free_tail->pve_next; + pmap_cpu_data->pv_free.count = PV_CPU_MIN; + limit_exceeded = true; + } +#if !XNU_MONITOR + mp_enable_preemption(); +#endif + if (__probable(!limit_exceeded)) { + return; + } } - pmap_simple_unlock(&pv_kern_free_list_lock); - - if (!use_kernel_list) { + if (__improbable(pv_kern_free.count < kern_target)) { + pmap_simple_lock(&pv_kern_free_list_lock); + pv_et->pve_next = pv_kern_free.list; + pv_kern_free.list = pv_eh; + pv_kern_free.count += pv_cnt; + pmap_simple_unlock(&pv_kern_free_list_lock); + } else { pmap_simple_lock(&pv_free_list_lock); - pv_et->pve_next = (pv_entry_t *)pv_free_list; - pv_free_list = pv_eh; - pv_free_count += pv_cnt; + pv_et->pve_next = pv_free.list; + pv_free.list = pv_eh; + pv_free.count += pv_cnt; pmap_simple_unlock(&pv_free_list_lock); } } static inline void -PV_KERN_ALLOC(pv_entry_t **pv_e) +pv_list_kern_alloc(pv_entry_t **pv_ep) { - assert(*pv_e == PV_ENTRY_NULL); + assert(*pv_ep == PV_ENTRY_NULL); pmap_simple_lock(&pv_kern_free_list_lock); - - if ((*pv_e = pv_kern_free_list) != 0) { - pv_kern_free_list = (pv_entry_t *)(*pv_e)->pve_next; - (*pv_e)->pve_next = PV_ENTRY_NULL; - pv_kern_free_count--; + if (pv_kern_free.count > 0) { pmap_kern_reserve_alloc_stat++; } - + pv_free_list_alloc(&pv_kern_free, pv_ep); pmap_simple_unlock(&pv_kern_free_list_lock); } +void +mapping_adjust(void) +{ + // Not implemented for arm/arm64 +} + /* - * Creates a target number of free pv_entry_t objects for the kernel free list - * and the general free list. + * Fills the kernel and general PV free lists back up to their low watermarks. */ MARK_AS_PMAP_TEXT static kern_return_t -mapping_free_prime_internal(void) +mapping_replenish_internal(uint32_t kern_target_count, uint32_t user_target_count) { - SECURITY_READ_ONLY_LATE(static boolean_t) mapping_free_prime_internal_called = FALSE; - SECURITY_READ_ONLY_LATE(static boolean_t) mapping_free_prime_internal_done = FALSE; + pv_entry_t *pv_eh; + pv_entry_t *pv_et; + int pv_cnt; + pmap_paddr_t pa; + kern_return_t ret = KERN_SUCCESS; - if (mapping_free_prime_internal_done) { - return KERN_FAILURE; - } - - if (!mapping_free_prime_internal_called) { - mapping_free_prime_internal_called = TRUE; + while ((pv_free.count < user_target_count) || (pv_kern_free.count < kern_target_count)) { +#if XNU_MONITOR + if ((ret = pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT)) != KERN_SUCCESS) { + return ret; + } +#else + ret = pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, 0); + assert(ret == KERN_SUCCESS); +#endif - pv_low_water_mark = PV_LOW_WATER_MARK_DEFAULT; + pv_page_count++; - /* Alterable via sysctl */ - pv_kern_low_water_mark = PV_KERN_LOW_WATER_MARK_DEFAULT; + pv_eh = (pv_entry_t *)phystokv(pa); + pv_cnt = PAGE_SIZE / sizeof(pv_entry_t); + pv_et = &pv_eh[pv_cnt - 1]; - pv_kern_alloc_chunk = PV_KERN_ALLOC_CHUNK_INITIAL; - pv_alloc_chunk = PV_ALLOC_CHUNK_INITIAL; + pmap_reserve_replenish_stat += pv_cnt; + pv_list_free(pv_eh, pv_et, pv_cnt, kern_target_count); } - return mapping_replenish_internal(PV_KERN_ALLOC_INITIAL_TARGET, PV_ALLOC_INITIAL_TARGET); + return ret; +} + +/* + * Creates a target number of free pv_entry_t objects for the kernel free list + * and the general free list. + */ +MARK_AS_PMAP_TEXT static kern_return_t +mapping_free_prime_internal(void) +{ + return mapping_replenish_internal(pv_kern_alloc_initial_target, pv_alloc_initial_target); } void @@ -3387,19 +3703,19 @@ mapping_free_prime(void) unsigned int i = 0; /* - * Allocate the needed PPL pages up front, to minimize the change that + * Allocate the needed PPL pages up front, to minimize the chance that * we will need to call into the PPL multiple times. */ - for (i = 0; i < PV_ALLOC_INITIAL_TARGET; i += (PAGE_SIZE / sizeof(pv_entry_t))) { - pmap_alloc_page_for_ppl(); + for (i = 0; i < pv_alloc_initial_target; i += (PAGE_SIZE / sizeof(pv_entry_t))) { + pmap_alloc_page_for_ppl(0); } - for (i = 0; i < PV_KERN_ALLOC_INITIAL_TARGET; i += (PAGE_SIZE / sizeof(pv_entry_t))) { - pmap_alloc_page_for_ppl(); + for (i = 0; i < pv_kern_alloc_initial_target; i += (PAGE_SIZE / sizeof(pv_entry_t))) { + pmap_alloc_page_for_ppl(0); } while ((kr = mapping_free_prime_ppl()) == KERN_RESOURCE_SHORTAGE) { - pmap_alloc_page_for_ppl(); + pmap_alloc_page_for_ppl(0); } #else kr = mapping_free_prime_internal(); @@ -3411,129 +3727,21 @@ mapping_free_prime(void) } } -void mapping_replenish(void); - -void -mapping_adjust(void) -{ - kern_return_t mres; - - mres = kernel_thread_start_priority((thread_continue_t)mapping_replenish, NULL, MAXPRI_KERNEL, &mapping_replenish_thread); - if (mres != KERN_SUCCESS) { - panic("%s: mapping_replenish thread creation failed", - __FUNCTION__); - } - thread_deallocate(mapping_replenish_thread); -} - -/* - * Fills the kernel and general PV free lists back up to their low watermarks. - */ -MARK_AS_PMAP_TEXT static kern_return_t -mapping_replenish_internal(uint32_t kern_target_count, uint32_t user_target_count) -{ - pv_entry_t *pv_e; - pv_entry_t *pv_eh; - pv_entry_t *pv_et; - int pv_cnt; - unsigned j; - pmap_paddr_t pa; - kern_return_t ret = KERN_SUCCESS; - - while ((pv_free_count < user_target_count) || (pv_kern_free_count < kern_target_count)) { - pv_cnt = 0; - pv_eh = pv_et = PV_ENTRY_NULL; - -#if XNU_MONITOR - if ((ret = pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT)) != KERN_SUCCESS) { - return ret; - } -#else - ret = pmap_pages_alloc(&pa, PAGE_SIZE, 0); - assert(ret == KERN_SUCCESS); -#endif - - pv_page_count++; - - pv_e = (pv_entry_t *)phystokv(pa); - - for (j = 0; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) { - pv_e->pve_next = pv_eh; - pv_eh = pv_e; - - if (pv_et == PV_ENTRY_NULL) { - pv_et = pv_e; - } - pv_cnt++; - pv_e++; - } - pmap_reserve_replenish_stat += pv_cnt; - PV_FREE_LIST(pv_eh, pv_et, pv_cnt, kern_target_count); - } - - return ret; -} - -/* - * Continuation function that keeps the PV free lists from running out of free - * elements. - */ -__attribute__((noreturn)) -void -mapping_replenish(void) -{ - kern_return_t kr; - - /* We qualify for VM privileges...*/ - current_thread()->options |= TH_OPT_VMPRIV; - - for (;;) { -#if XNU_MONITOR - - while ((kr = mapping_replenish_ppl(pv_kern_low_water_mark, pv_low_water_mark)) == KERN_RESOURCE_SHORTAGE) { - pmap_alloc_page_for_ppl(); - } -#else - kr = mapping_replenish_internal(pv_kern_low_water_mark, pv_low_water_mark); -#endif - - if (kr != KERN_SUCCESS) { - panic("%s: failed, kr=%d", __FUNCTION__, kr); - } - - /* Check if the kernel pool has been depleted since the - * first pass, to reduce refill latency. - */ - if (pv_kern_free_count < pv_kern_low_water_mark) { - continue; - } - /* Block sans continuation to avoid yielding kernel stack */ - assert_wait(&mapping_replenish_event, THREAD_UNINT); - mappingrecurse = 0; - thread_block(THREAD_CONTINUE_NULL); - pmap_mapping_thread_wakeups++; - } -} - - static void ptd_bootstrap( pt_desc_t *ptdp, unsigned int ptd_cnt) { simple_lock_init(&ptd_free_list_lock, 0); - while (ptd_cnt != 0) { - (*(void **)ptdp) = (void *)ptd_free_list; - ptd_free_list = ptdp; - ptdp++; - ptd_cnt--; - ptd_free_count++; - } + // Region represented by ptdp should be cleared by pmap_bootstrap() + *((void**)(&ptdp[ptd_cnt - 1])) = (void*)ptd_free_list; + ptd_free_list = ptdp; + ptd_free_count += ptd_cnt; ptd_preboot = FALSE; } static pt_desc_t* -ptd_alloc_unlinked(bool reclaim) +ptd_alloc_unlinked(void) { pt_desc_t *ptdp; unsigned i; @@ -3542,47 +3750,39 @@ ptd_alloc_unlinked(bool reclaim) pmap_simple_lock(&ptd_free_list_lock); } + assert(((ptd_free_list != NULL) && (ptd_free_count > 0)) || + ((ptd_free_list == NULL) && (ptd_free_count == 0))); + if (ptd_free_count == 0) { - unsigned int ptd_cnt; - pt_desc_t *ptdp_next; + unsigned int ptd_cnt = PAGE_SIZE / sizeof(pt_desc_t); if (ptd_preboot) { ptdp = (pt_desc_t *)avail_start; - avail_start += ARM_PGBYTES; - ptdp_next = ptdp; - ptd_cnt = ARM_PGBYTES / sizeof(pt_desc_t); + avail_start += PAGE_SIZE; + bzero(ptdp, PAGE_SIZE); } else { pmap_paddr_t pa; - kern_return_t ret; pmap_simple_unlock(&ptd_free_list_lock); - if (pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT) != KERN_SUCCESS) { - if (reclaim) { - ret = pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_RECLAIM_NOWAIT); - assert(ret == KERN_SUCCESS); - } else { - return NULL; - } + if (pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT) != KERN_SUCCESS) { + return NULL; } ptdp = (pt_desc_t *)phystokv(pa); pmap_simple_lock(&ptd_free_list_lock); - ptdp_next = ptdp; - ptd_cnt = PAGE_SIZE / sizeof(pt_desc_t); } - while (ptd_cnt != 0) { - (*(void **)ptdp_next) = (void *)ptd_free_list; - ptd_free_list = ptdp_next; - ptdp_next++; - ptd_cnt--; - ptd_free_count++; - } + *((void**)(&ptdp[ptd_cnt - 1])) = (void*)ptd_free_list; + ptd_free_list = ptdp; + ptd_free_count += ptd_cnt; } if ((ptdp = ptd_free_list) != PTD_ENTRY_NULL) { ptd_free_list = (pt_desc_t *)(*(void **)ptdp); + if ((ptd_free_list == NULL) && (ptd_free_count > 1)) { + ptd_free_list = ptdp + 1; + } ptd_free_count--; } else { panic("%s: out of ptd entry", @@ -3607,9 +3807,9 @@ ptd_alloc_unlinked(bool reclaim) } static inline pt_desc_t* -ptd_alloc(pmap_t pmap, bool reclaim) +ptd_alloc(pmap_t pmap) { - pt_desc_t *ptdp = ptd_alloc_unlinked(reclaim); + pt_desc_t *ptdp = ptd_alloc_unlinked(); if (ptdp == NULL) { return NULL; @@ -3663,6 +3863,8 @@ ptd_init( unsigned int level, pt_entry_t *pte_p) { + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + if (ptdp->pmap != pmap) { panic("%s: pmap mismatch, " "ptdp=%p, pmap=%p, va=%p, level=%u, pte_p=%p", @@ -3670,15 +3872,12 @@ ptd_init( ptdp, pmap, (void*)va, level, pte_p); } -#if (__ARM_VMSA__ == 7) - assert(level == 2); - ptdp->ptd_info[ARM_PT_DESC_INDEX(pte_p)].va = (vm_offset_t) va & ~(ARM_TT_L1_PT_OFFMASK); -#else - assert(level > pt_attr_root_level(pmap_get_pt_attr(pmap))); - ptdp->ptd_info[ARM_PT_DESC_INDEX(pte_p)].va = (vm_offset_t) va & ~(pt_attr_ln_offmask(pmap_get_pt_attr(pmap), level - 1)); -#endif - if (level < PMAP_TT_MAX_LEVEL) { - ptdp->ptd_info[ARM_PT_DESC_INDEX(pte_p)].refcnt = PT_DESC_REFCOUNT; + assert(level > pt_attr_root_level(pt_attr)); + ptd_info_t *ptd_info = ptd_get_info(ptdp, pte_p); + ptd_info->va = (vm_offset_t) va & ~pt_attr_ln_pt_offmask(pt_attr, level - 1); + + if (level < pt_attr_leaf_level(pt_attr)) { + ptd_info->refcnt = PT_DESC_REFCOUNT; } } @@ -3741,7 +3940,7 @@ pmap_pte( if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) { return PT_ENTRY_NULL; } - ptp = (pt_entry_t *) ttetokv(tte) + ptenum(addr); + ptp = (pt_entry_t *) ttetokv(tte) + pte_index(pmap, pt_addr, addr); return ptp; } @@ -3897,7 +4096,7 @@ is_pte_aprr_protected(pt_entry_t pte) #if __APRR_SUPPORTED__ static boolean_t -is_pte_xprr_protected(pt_entry_t pte) +is_pte_xprr_protected(pmap_t pmap __unused, pt_entry_t pte) { #if __APRR_SUPPORTED__ return is_pte_aprr_protected(pte); @@ -4214,20 +4413,91 @@ scan: #define PMAP_ALIGN(addr, align) ((addr) + ((align) - 1) & ~((align) - 1)) +static void +pmap_compute_pv_targets(void) +{ + DTEntry entry; + void const *prop = NULL; + int err; + unsigned int prop_size; + + err = SecureDTLookupEntry(NULL, "/defaults", &entry); + assert(err == kSuccess); + + if (kSuccess == SecureDTGetProperty(entry, "pmap-pv-count", &prop, &prop_size)) { + if (prop_size != sizeof(pv_alloc_initial_target)) { + panic("pmap-pv-count property is not a 32-bit integer"); + } + pv_alloc_initial_target = *((uint32_t const *)prop); + } + + if (kSuccess == SecureDTGetProperty(entry, "pmap-kern-pv-count", &prop, &prop_size)) { + if (prop_size != sizeof(pv_kern_alloc_initial_target)) { + panic("pmap-kern-pv-count property is not a 32-bit integer"); + } + pv_kern_alloc_initial_target = *((uint32_t const *)prop); + } + + if (kSuccess == SecureDTGetProperty(entry, "pmap-kern-pv-min", &prop, &prop_size)) { + if (prop_size != sizeof(pv_kern_low_water_mark)) { + panic("pmap-kern-pv-min property is not a 32-bit integer"); + } + pv_kern_low_water_mark = *((uint32_t const *)prop); + } +} + + +static uint32_t +pmap_compute_max_asids(void) +{ + DTEntry entry; + void const *prop = NULL; + uint32_t max_asids; + int err; + unsigned int prop_size; + + err = SecureDTLookupEntry(NULL, "/defaults", &entry); + assert(err == kSuccess); + + if (kSuccess != SecureDTGetProperty(entry, "pmap-max-asids", &prop, &prop_size)) { + /* TODO: consider allowing maxproc limits to be scaled earlier so that + * we can choose a more flexible default value here. */ + return MAX_ASIDS; + } + + if (prop_size != sizeof(max_asids)) { + panic("pmap-max-asids property is not a 32-bit integer"); + } + + max_asids = *((uint32_t const *)prop); + /* Round up to the nearest 64 to make things a bit easier for the Pseudo-LRU allocator. */ + max_asids = (max_asids + 63) & ~63UL; + + if (((max_asids + MAX_HW_ASIDS) / (MAX_HW_ASIDS + 1)) > MIN(MAX_HW_ASIDS, UINT8_MAX)) { + /* currently capped by size of pmap->sw_asid */ + panic("pmap-max-asids too large"); + } + if (max_asids == 0) { + panic("pmap-max-asids cannot be zero"); + } + return max_asids; +} + + static vm_size_t pmap_compute_io_rgns(void) { DTEntry entry; - pmap_io_range_t *ranges; + pmap_io_range_t const *ranges; uint64_t rgn_end; - void *prop = NULL; + void const *prop = NULL; int err; unsigned int prop_size; - err = DTLookupEntry(NULL, "/defaults", &entry); + err = SecureDTLookupEntry(NULL, "/defaults", &entry); assert(err == kSuccess); - if (kSuccess != DTGetProperty(entry, "pmap-io-ranges", &prop, &prop_size)) { + if (kSuccess != SecureDTGetProperty(entry, "pmap-io-ranges", &prop, &prop_size)) { return 0; } @@ -4282,8 +4552,8 @@ static void pmap_load_io_rgns(void) { DTEntry entry; - pmap_io_range_t *ranges; - void *prop = NULL; + pmap_io_range_t const *ranges; + void const *prop = NULL; int err; unsigned int prop_size; @@ -4291,10 +4561,10 @@ pmap_load_io_rgns(void) return; } - err = DTLookupEntry(NULL, "/defaults", &entry); + err = SecureDTLookupEntry(NULL, "/defaults", &entry); assert(err == kSuccess); - err = DTGetProperty(entry, "pmap-io-ranges", &prop, &prop_size); + err = SecureDTGetProperty(entry, "pmap-io-ranges", &prop, &prop_size); assert(err == kSuccess); ranges = prop; @@ -4396,6 +4666,7 @@ pmap_bootstrap( vm_size_t ptd_root_table_size; vm_size_t pp_attr_table_size; vm_size_t io_attr_table_size; + vm_size_t asid_table_size; unsigned int npages; vm_map_offset_t maxoffset; @@ -4407,7 +4678,11 @@ pmap_bootstrap( PE_parse_boot_argn("-unsafe_kernel_text", &pmap_ppl_disable, sizeof(pmap_ppl_disable)); #endif - simple_lock_init(&pmap_ppl_free_page_lock, 0); +#if CONFIG_CSR_FROM_DT + if (csr_unsafe_kernel_text) { + pmap_ppl_disable = true; + } +#endif /* CONFIG_CSR_FROM_DT */ #if __APRR_SUPPORTED__ if (((uintptr_t)(&ppl_trampoline_start)) % PAGE_SIZE) { @@ -4460,8 +4735,11 @@ pmap_bootstrap( #endif kernel_pmap->stamp = os_atomic_inc(&pmap_stamp, relaxed); - kernel_pmap->nested_region_grand_addr = 0x0ULL; - kernel_pmap->nested_region_subord_addr = 0x0ULL; +#if ARM_PARAMETERIZED_PMAP + kernel_pmap->pmap_pt_attr = native_pt_attr; +#endif /* ARM_PARAMETERIZED_PMAP */ + + kernel_pmap->nested_region_addr = 0x0ULL; kernel_pmap->nested_region_size = 0x0ULL; kernel_pmap->nested_region_asid_bitmap = NULL; kernel_pmap->nested_region_asid_bitmap_size = 0x0UL; @@ -4472,7 +4750,7 @@ pmap_bootstrap( kernel_pmap->hw_asid = 0; kernel_pmap->sw_asid = 0; - PMAP_LOCK_INIT(kernel_pmap); + pmap_lock_init(kernel_pmap); memset((void *) &kernel_pmap->stats, 0, sizeof(kernel_pmap->stats)); /* allocate space for and initialize the bookkeeping structures */ @@ -4482,6 +4760,19 @@ pmap_bootstrap( pv_head_size = round_page(sizeof(pv_entry_t *) * npages); // allocate enough initial PTDs to map twice the available physical memory ptd_root_table_size = sizeof(pt_desc_t) * (mem_size / ((PAGE_SIZE / sizeof(pt_entry_t)) * ARM_PGBYTES)) * 2; + pmap_max_asids = pmap_compute_max_asids(); + pmap_asid_plru = (pmap_max_asids > MAX_HW_ASIDS); + PE_parse_boot_argn("pmap_asid_plru", &pmap_asid_plru, sizeof(pmap_asid_plru)); + /* Align the range of available hardware ASIDs to a multiple of 64 to enable the + * masking used by the PLRU scheme. This means we must handle the case in which + * the returned hardware ASID is MAX_HW_ASIDS, which we do in alloc_asid() and free_asid(). */ + _Static_assert(sizeof(asid_plru_bitmap[0] == sizeof(uint64_t)), "bitmap_t is not a 64-bit integer"); + _Static_assert(((MAX_HW_ASIDS + 1) % 64) == 0, "MAX_HW_ASIDS + 1 is not divisible by 64"); + asid_chunk_size = (pmap_asid_plru ? (MAX_HW_ASIDS + 1) : MAX_HW_ASIDS); + + asid_table_size = sizeof(*asid_bitmap) * BITMAP_LEN(pmap_max_asids); + + pmap_compute_pv_targets(); pmap_struct_start = avail_start; @@ -4492,7 +4783,9 @@ pmap_bootstrap( pv_head_table = (pv_entry_t **) phystokv(avail_start); avail_start = PMAP_ALIGN(avail_start + pv_head_size, __alignof(pt_desc_t)); ptd_root_table = (pt_desc_t *)phystokv(avail_start); - avail_start = round_page(avail_start + ptd_root_table_size); + avail_start = PMAP_ALIGN(avail_start + ptd_root_table_size, __alignof(bitmap_t)); + asid_bitmap = (bitmap_t*)phystokv(avail_start); + avail_start = round_page(avail_start + asid_table_size); memset((char *)phystokv(pmap_struct_start), 0, avail_start - pmap_struct_start); @@ -4502,7 +4795,7 @@ pmap_bootstrap( #if XNU_MONITOR pmap_array_begin = (void *)phystokv(avail_start); pmap_array = pmap_array_begin; - avail_start += round_page(MAX_ASID * sizeof(struct pmap)); + avail_start += round_page(PMAP_ARRAY_SIZE * sizeof(struct pmap)); pmap_array_end = (void *)phystokv(avail_start); pmap_array_count = ((pmap_array_end - pmap_array_begin) / sizeof(struct pmap)); @@ -4518,17 +4811,12 @@ pmap_bootstrap( pmap_ledger_refcnt = pmap_ledger_refcnt_begin; avail_start += round_page(MAX_PMAP_LEDGERS * sizeof(os_refcnt_t)); pmap_ledger_refcnt_end = (void *)phystokv(avail_start); - - simple_lock_init(&pmap_ledger_lock, 0); #endif pmap_cpu_data_array_init(); vm_first_phys = gPhysBase; vm_last_phys = trunc_page(avail_end); - simple_lock_init(&pmaps_lock, 0); - simple_lock_init(&asid_lock, 0); - simple_lock_init(&tt1_lock, 0); queue_init(&map_pmap_list); queue_enter(&map_pmap_list, kernel_pmap, pmap_t, pmaps); free_page_size_tt_list = TT_FREE_ENTRY_NULL; @@ -4541,10 +4829,8 @@ pmap_bootstrap( free_tt_count = 0; free_tt_max = 0; - simple_lock_init(&pt_pages_lock, 0); queue_init(&pt_page_list); - simple_lock_init(&pmap_pages_lock, 0); pmap_pages_request_count = 0; pmap_pages_request_acum = 0; pmap_pages_reclaim_list = PAGE_FREE_ENTRY_NULL; @@ -4552,7 +4838,10 @@ pmap_bootstrap( virtual_space_start = vstart; virtual_space_end = VM_MAX_KERNEL_ADDRESS; - bitmap_full(&asid_bitmap[0], MAX_ASID); + bitmap_full(&asid_bitmap[0], pmap_max_asids); + bitmap_full(&asid_plru_bitmap[0], MAX_HW_ASIDS); + // Clear the highest-order bit, which corresponds to MAX_HW_ASIDS + 1 + asid_plru_bitmap[MAX_HW_ASIDS >> 6] = ~(1ULL << 63); @@ -4573,14 +4862,7 @@ pmap_bootstrap( } #endif -#if DEVELOPMENT || DEBUG - PE_parse_boot_argn("panic_on_unsigned_execute", &panic_on_unsigned_execute, sizeof(panic_on_unsigned_execute)); -#endif /* DEVELOPMENT || DEBUG */ - - pmap_nesting_size_min = ARM_NESTING_SIZE_MIN; - pmap_nesting_size_max = ARM_NESTING_SIZE_MAX; - - simple_lock_init(&phys_backup_lock, 0); + PE_parse_boot_argn("pmap_panic_dev_wimg_on_managed", &pmap_panic_dev_wimg_on_managed, sizeof(pmap_panic_dev_wimg_on_managed)); #if MACH_ASSERT @@ -4623,6 +4905,51 @@ pa_set_range_xprr_perm(pmap_paddr_t start_pa, pmap_set_range_xprr_perm(start_va, end_va, expected_perm, new_perm); } +static void +pmap_lockdown_kc(void) +{ + extern vm_offset_t vm_kernelcache_base; + extern vm_offset_t vm_kernelcache_top; + pmap_paddr_t start_pa = kvtophys(vm_kernelcache_base); + pmap_paddr_t end_pa = start_pa + (vm_kernelcache_top - vm_kernelcache_base); + pmap_paddr_t cur_pa = start_pa; + vm_offset_t cur_va = vm_kernelcache_base; + while (cur_pa < end_pa) { + vm_size_t range_size = end_pa - cur_pa; + vm_offset_t ptov_va = phystokv_range(cur_pa, &range_size); + if (ptov_va != cur_va) { + /* + * If the physical address maps back to a virtual address that is non-linear + * w.r.t. the kernelcache, that means it corresponds to memory that will be + * reclaimed by the OS and should therefore not be locked down. + */ + cur_pa += range_size; + cur_va += range_size; + continue; + } + unsigned int pai = (unsigned int)pa_index(cur_pa); + pv_entry_t **pv_h = pai_to_pvh(pai); + + vm_offset_t pvh_flags = pvh_get_flags(pv_h); + + if (__improbable(pvh_flags & PVH_FLAG_LOCKDOWN)) { + panic("pai %d already locked down", pai); + } + pvh_set_flags(pv_h, pvh_flags | PVH_FLAG_LOCKDOWN); + cur_pa += ARM_PGBYTES; + cur_va += ARM_PGBYTES; + } +#if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) + extern uint64_t ctrr_ro_test; + extern uint64_t ctrr_nx_test; + pmap_paddr_t exclude_pages[] = {kvtophys((vm_offset_t)&ctrr_ro_test), kvtophys((vm_offset_t)&ctrr_nx_test)}; + for (unsigned i = 0; i < (sizeof(exclude_pages) / sizeof(exclude_pages[0])); ++i) { + pv_entry_t **pv_h = pai_to_pvh(pa_index(exclude_pages[i])); + pvh_set_flags(pv_h, pvh_get_flags(pv_h) & ~PVH_FLAG_LOCKDOWN); + } +#endif +} + void pmap_static_allocations_done(void) { @@ -4630,52 +4957,43 @@ pmap_static_allocations_done(void) pmap_paddr_t monitor_end_pa; /* - * We allocate memory for bootstrap starting at topOfKernelData (which - * is at the end of the device tree and ramdisk data, if applicable). - * We use avail_start as a pointer to the first address that has not - * been reserved for bootstrap, so we know which pages to give to the - * virtual memory layer. + * Protect the bootstrap (V=P and V->P) page tables. * * These bootstrap allocations will be used primarily for page tables. * If we wish to secure the page tables, we need to start by marking * these bootstrap allocations as pages that we want to protect. */ - monitor_start_pa = BootArgs->topOfKernelData; - monitor_end_pa = BootArgs->topOfKernelData + BOOTSTRAP_TABLE_SIZE; + monitor_start_pa = kvtophys((vm_offset_t)&bootstrap_pagetables); + monitor_end_pa = monitor_start_pa + BOOTSTRAP_TABLE_SIZE; + + /* The bootstrap page tables are mapped RW at boostrap. */ + pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RW_PERM, XPRR_KERN_RO_PERM); /* - * The bootstrap page tables are mapped RO at boostrap. - * - * Note that this function call requests switching XPRR permissions from - * XPRR_KERN_RO_PERM to XPRR_KERN_RO_PERM. Whilst this may seem redundant, - * pa_set_range_xprr_perm() does other things too, such as calling - * pa_set_range_monitor() on the requested address range and performing a number - * of integrity checks on the PTEs. We should still - * call this function for all PPL-owned memory, regardless of whether - * permissions are required to be changed or not. + * We use avail_start as a pointer to the first address that has not + * been reserved for bootstrap, so we know which pages to give to the + * virtual memory layer. */ - pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RO_PERM, XPRR_KERN_RO_PERM); - - monitor_start_pa = BootArgs->topOfKernelData + BOOTSTRAP_TABLE_SIZE; + monitor_start_pa = BootArgs->topOfKernelData; monitor_end_pa = avail_start; /* The other bootstrap allocations are mapped RW at bootstrap. */ pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RW_PERM, XPRR_PPL_RW_PERM); /* - * The RO page tables are mapped RW at bootstrap and remain RW after the call - * to pa_set_range_xprr_perm(). We do this, as opposed to using XPRR_PPL_RW_PERM, - * to work around a functional issue on H11 devices where CTRR shifts the APRR - * lookup table index to USER_XO before APRR is applied, hence causing the hardware + * The RO page tables are mapped RW in arm_vm_init() and later restricted + * to RO in arm_vm_prot_finalize(), which is called after this function. + * Here we only need to mark the underlying physical pages as PPL-owned to ensure + * they can't be allocated for other uses. We don't need a special xPRR + * protection index, as there is no PPL_RO index, and these pages are ultimately + * protected by KTRR/CTRR. Furthermore, use of PPL_RW for these pages would + * expose us to a functional issue on H11 devices where CTRR shifts the APRR + * lookup table index to USER_XO before APRR is applied, leading the hardware * to believe we are dealing with an user XO page upon performing a translation. - * - * Note that this workaround does not pose a security risk, because the RO - * page tables still remain read-only, due to KTRR/CTRR, and further protecting - * them would be unnecessary. */ monitor_start_pa = kvtophys((vm_offset_t)&ropagetable_begin); monitor_end_pa = monitor_start_pa + ((vm_offset_t)&ropagetable_end - (vm_offset_t)&ropagetable_begin); - pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RW_PERM, XPRR_KERN_RW_PERM); + pa_set_range_monitor(monitor_start_pa, monitor_end_pa); monitor_start_pa = kvtophys(segPPLDATAB); monitor_end_pa = monitor_start_pa + segSizePPLDATA; @@ -4729,6 +5047,9 @@ pmap_static_allocations_done(void) * precaution. The real RW mappings are at a different location with guard pages. */ pa_set_range_xprr_perm(pmap_stacks_start_pa, pmap_stacks_end_pa, XPRR_PPL_RW_PERM, XPRR_KERN_RO_PERM); + + /* Prevent remapping of the kernelcache */ + pmap_lockdown_kc(); } @@ -4767,7 +5088,7 @@ pmap_virtual_region( ) { boolean_t ret = FALSE; -#if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__ +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) if (region_select == 0) { /* * In this config, the bootstrap mappings should occupy their own L2 @@ -4780,7 +5101,11 @@ pmap_virtual_region( #else #error Unsupported configuration #endif +#if defined(ARM_LARGE_MEMORY) + *size = ((KERNEL_PMAP_HEAP_RANGE_START - *startp) & ~PAGE_MASK); +#else *size = ((VM_MAX_KERNEL_ADDRESS - *startp) & ~PAGE_MASK); +#endif ret = TRUE; } #else @@ -4887,7 +5212,12 @@ pmap_init( pmap_initialized = TRUE; - pmap_zone_init(); + /* + * Create the zone of physical maps + * and the physical-to-virtual entries. + */ + pmap_zone = zone_create_ext("pmap", sizeof(struct pmap), + ZC_ZFREE_CLEARMEM, ZONE_ID_PMAP, NULL); /* @@ -4898,17 +5228,15 @@ pmap_init( _vm_object_allocate(mem_size, pmap_object); pmap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; - pv_init(); - /* * The values of [hard_]maxproc may have been scaled, make sure - * they are still less than the value of MAX_ASID. + * they are still less than the value of pmap_max_asids. */ - if (maxproc > MAX_ASID) { - maxproc = MAX_ASID; + if ((uint32_t)maxproc > pmap_max_asids) { + maxproc = pmap_max_asids; } - if (hard_maxproc > MAX_ASID) { - hard_maxproc = MAX_ASID; + if ((uint32_t)hard_maxproc > pmap_max_asids) { + hard_maxproc = pmap_max_asids; } #if CONFIG_PGTRACE @@ -4946,21 +5274,6 @@ pmap_assert_free(ppnum_t ppnum) #endif -/* - * Initialize zones used by pmap. - */ -static void -pmap_zone_init( - void) -{ - /* - * Create the zone of physical maps - * and the physical-to-virtual entries. - */ - pmap_zone = zinit((vm_size_t) sizeof(struct pmap), (vm_size_t) sizeof(struct pmap) * 256, - PAGE_SIZE, "pmap"); -} - #if XNU_MONITOR MARK_AS_PMAP_TEXT static void pmap_ledger_alloc_init_internal(size_t size) @@ -4974,7 +5287,8 @@ pmap_ledger_alloc_init_internal(size_t size) size); } - if (size != sizeof(pmap_ledger_data_t)) { + if ((size > sizeof(pmap_ledger_data_t)) || + ((sizeof(pmap_ledger_data_t) - size) % sizeof(struct ledger_entry))) { panic("%s: size mismatch, expected %lu, " "size=%lu", __func__, PMAP_LEDGER_DATA_BYTES, @@ -5094,7 +5408,7 @@ pmap_ledger_alloc(void) ledger_t retval = NULL; while ((retval = pmap_ledger_alloc_ppl()) == NULL) { - pmap_alloc_page_for_ppl(); + pmap_alloc_page_for_ppl(0); } return retval; @@ -5133,6 +5447,20 @@ pmap_ledger_free(ledger_t ledger) } #endif /* XNU_MONITOR */ +static vm_size_t +pmap_root_alloc_size(pmap_t pmap) +{ +#if (__ARM_VMSA__ > 7) +#pragma unused(pmap) + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + unsigned int root_level = pt_attr_root_level(pt_attr); + return ((pt_attr_ln_index_mask(pt_attr, root_level) >> pt_attr_ln_shift(pt_attr, root_level)) + 1) * sizeof(tt_entry_t); +#else + (void)pmap; + return PMAP_ROOT_ALLOC_SIZE; +#endif +} + /* * Create and return a physical map. * @@ -5149,7 +5477,8 @@ MARK_AS_PMAP_TEXT static pmap_t pmap_create_options_internal( ledger_t ledger, vm_map_size_t size, - unsigned int flags) + unsigned int flags, + kern_return_t *kr) { unsigned i; unsigned tte_index_max; @@ -5158,6 +5487,7 @@ pmap_create_options_internal( #if defined(HAS_APPLE_PAC) bool disable_jop = flags & PMAP_CREATE_DISABLE_JOP; #endif /* defined(HAS_APPLE_PAC) */ + kern_return_t local_kr = KERN_SUCCESS; /* * A software use-only map doesn't even need a pmap. @@ -5166,9 +5496,14 @@ pmap_create_options_internal( return PMAP_NULL; } + if (0 != (flags & ~PMAP_CREATE_KNOWN_FLAGS)) { + return PMAP_NULL; + } + #if XNU_MONITOR if ((p = pmap_alloc_pmap()) == PMAP_NULL) { - return PMAP_NULL; + local_kr = KERN_NO_SPACE; + goto pmap_create_fail; } if (ledger) { @@ -5181,12 +5516,16 @@ pmap_create_options_internal( * the translation table of the right size for the pmap. */ if ((p = (pmap_t) zalloc(pmap_zone)) == PMAP_NULL) { - return PMAP_NULL; + local_kr = KERN_RESOURCE_SHORTAGE; + goto pmap_create_fail; } #endif p->ledger = ledger; + + p->pmap_vm_map_cs_enforced = false; + if (flags & PMAP_CREATE_64BIT) { p->min = MACH_VM_MIN_ADDRESS; p->max = MACH_VM_MAX_ADDRESS; @@ -5194,7 +5533,6 @@ pmap_create_options_internal( p->min = VM_MIN_ADDRESS; p->max = VM_MAX_ADDRESS; } - #if defined(HAS_APPLE_PAC) p->disable_jop = disable_jop; #endif /* defined(HAS_APPLE_PAC) */ @@ -5211,36 +5549,42 @@ pmap_create_options_internal( p->nested_pmap = PMAP_NULL; #if ARM_PARAMETERIZED_PMAP + /* Default to the native pt_attr */ p->pmap_pt_attr = native_pt_attr; #endif /* ARM_PARAMETERIZED_PMAP */ +#if __ARM_MIXED_PAGE_SIZE__ + if (flags & PMAP_CREATE_FORCE_4K_PAGES) { + p->pmap_pt_attr = &pmap_pt_attr_4k; + } +#endif /* __ARM_MIXED_PAGE_SIZE__ */ if (!pmap_get_pt_ops(p)->alloc_id(p)) { + local_kr = KERN_NO_SPACE; goto id_alloc_fail; } - - - PMAP_LOCK_INIT(p); + pmap_lock_init(p); memset((void *) &p->stats, 0, sizeof(p->stats)); p->tt_entry_free = (tt_entry_t *)0; - tte_index_max = PMAP_ROOT_ALLOC_SIZE / sizeof(tt_entry_t); + tte_index_max = ((unsigned)pmap_root_alloc_size(p) / sizeof(tt_entry_t)); #if (__ARM_VMSA__ == 7) p->tte_index_max = tte_index_max; #endif #if XNU_MONITOR - p->tte = pmap_tt1_allocate(p, PMAP_ROOT_ALLOC_SIZE, PMAP_TT_ALLOCATE_NOWAIT); + p->tte = pmap_tt1_allocate(p, pmap_root_alloc_size(p), PMAP_TT_ALLOCATE_NOWAIT); #else - p->tte = pmap_tt1_allocate(p, PMAP_ROOT_ALLOC_SIZE, 0); + p->tte = pmap_tt1_allocate(p, pmap_root_alloc_size(p), 0); #endif if (!(p->tte)) { + local_kr = KERN_RESOURCE_SHORTAGE; goto tt1_alloc_fail; } p->ttep = ml_static_vtop((vm_offset_t)p->tte); - PMAP_TRACE(3, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(p), VM_KERNEL_ADDRHIDE(p->min), VM_KERNEL_ADDRHIDE(p->max), p->ttep); + PMAP_TRACE(4, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(p), VM_KERNEL_ADDRHIDE(p->min), VM_KERNEL_ADDRHIDE(p->max), p->ttep); /* nullify the translation table */ for (i = 0; i < tte_index_max; i++) { @@ -5252,8 +5596,7 @@ pmap_create_options_internal( /* * initialize the rest of the structure */ - p->nested_region_grand_addr = 0x0ULL; - p->nested_region_subord_addr = 0x0ULL; + p->nested_region_addr = 0x0ULL; p->nested_region_size = 0x0ULL; p->nested_region_asid_bitmap = NULL; p->nested_region_asid_bitmap_size = 0x0UL; @@ -5289,6 +5632,14 @@ id_alloc_fail: } #else zfree(pmap_zone, p); +#endif +pmap_create_fail: +#if XNU_MONITOR + pmap_pin_kernel_pages((vm_offset_t)kr, sizeof(*kr)); +#endif + *kr = local_kr; +#if XNU_MONITOR + pmap_unpin_kernel_pages((vm_offset_t)kr, sizeof(*kr)); #endif return PMAP_NULL; } @@ -5300,21 +5651,24 @@ pmap_create_options( unsigned int flags) { pmap_t pmap; + kern_return_t kr = KERN_SUCCESS; PMAP_TRACE(1, PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START, size, flags); ledger_reference(ledger); #if XNU_MONITOR - /* - * TODO: It should be valid for pmap_create_options_internal to fail; we could - * be out of ASIDs. - */ - while ((pmap = pmap_create_options_ppl(ledger, size, flags)) == PMAP_NULL) { - pmap_alloc_page_for_ppl(); + for (;;) { + pmap = pmap_create_options_ppl(ledger, size, flags, &kr); + if (kr != KERN_RESOURCE_SHORTAGE) { + break; + } + assert(pmap == PMAP_NULL); + pmap_alloc_page_for_ppl(0); + kr = KERN_SUCCESS; } #else - pmap = pmap_create_options_internal(ledger, size, flags); + pmap = pmap_create_options_internal(ledger, size, flags, &kr); #endif if (pmap == PMAP_NULL) { @@ -5393,6 +5747,54 @@ pmap_set_process( } #endif /* MACH_ASSERT */ +#if (__ARM_VMSA__ > 7) +/* + * pmap_deallocate_all_leaf_tts: + * + * Recursive function for deallocating all leaf TTEs. Walks the given TT, + * removing and deallocating all TTEs. + */ +MARK_AS_PMAP_TEXT static void +pmap_deallocate_all_leaf_tts(pmap_t pmap, tt_entry_t * first_ttep, unsigned level) +{ + tt_entry_t tte = ARM_TTE_EMPTY; + tt_entry_t * ttep = NULL; + tt_entry_t * last_ttep = NULL; + + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + + assert(level < pt_attr_leaf_level(pt_attr)); + + last_ttep = &first_ttep[ttn_index(pmap, pt_attr, ~0, level)]; + + for (ttep = first_ttep; ttep <= last_ttep; ttep++) { + tte = *ttep; + + if (!(tte & ARM_TTE_VALID)) { + continue; + } + + if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) { + panic("%s: found block mapping, ttep=%p, tte=%p, " + "pmap=%p, first_ttep=%p, level=%u", + __FUNCTION__, ttep, (void *)tte, + pmap, first_ttep, level); + } + + /* Must be valid, type table */ + if (level < pt_attr_twig_level(pt_attr)) { + /* If we haven't reached the twig level, recurse to the next level. */ + pmap_deallocate_all_leaf_tts(pmap, (tt_entry_t *)phystokv((tte) & ARM_TTE_TABLE_MASK), level + 1); + } + + /* Remove the TTE. */ + pmap_lock(pmap); + pmap_tte_deallocate(pmap, ttep, level); + pmap_unlock(pmap); + } +} +#endif /* (__ARM_VMSA__ > 7) */ + /* * We maintain stats and ledgers so that a task's physical footprint is: * phys_footprint = ((internal - alternate_accounting) @@ -5404,7 +5806,6 @@ pmap_set_process( * where "alternate_accounting" includes "iokit" and "purgeable" memory. */ - /* * Retire the given physical map from service. * Should only be called if the map contains @@ -5431,8 +5832,6 @@ pmap_destroy_internal( panic("pmap %p: attempt to destroy kernel pmap", pmap); } - pt_entry_t *ttep; - #if (__ARM_VMSA__ > 7) pmap_unmap_sharedpage(pmap); #endif /* (__ARM_VMSA__ > 7) */ @@ -5456,30 +5855,18 @@ pmap_destroy_internal( */ #if (__ARM_VMSA__ == 7) unsigned int i = 0; + pt_entry_t *ttep; - PMAP_LOCK(pmap); + pmap_lock(pmap); for (i = 0; i < pmap->tte_index_max; i++) { ttep = &pmap->tte[i]; if ((*ttep & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { pmap_tte_deallocate(pmap, ttep, PMAP_TT_L1_LEVEL); } } - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); #else /* (__ARM_VMSA__ == 7) */ - vm_map_address_t c; - unsigned int level; - - for (level = pt_attr->pta_max_level - 1; level >= pt_attr->pta_root_level; level--) { - for (c = pmap->min; c < pmap->max; c += pt_attr_ln_size(pt_attr, level)) { - ttep = pmap_ttne(pmap, level, c); - - if ((ttep != PT_ENTRY_NULL) && (*ttep & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { - PMAP_LOCK(pmap); - pmap_tte_deallocate(pmap, ttep, level); - PMAP_UNLOCK(pmap); - } - } - } + pmap_deallocate_all_leaf_tts(pmap, pmap->tte, pt_attr_root_level(pt_attr)); #endif /* (__ARM_VMSA__ == 7) */ @@ -5489,7 +5876,7 @@ pmap_destroy_internal( pmap_tt1_deallocate(pmap, pmap->tte, pmap->tte_index_max * sizeof(tt_entry_t), 0); pmap->tte_index_max = 0; #else /* (__ARM_VMSA__ == 7) */ - pmap_tt1_deallocate(pmap, pmap->tte, PMAP_ROOT_ALLOC_SIZE, 0); + pmap_tt1_deallocate(pmap, pmap->tte, pmap_root_alloc_size(pmap), 0); #endif /* (__ARM_VMSA__ == 7) */ pmap->tte = (tt_entry_t *) NULL; pmap->ttep = 0; @@ -5497,18 +5884,26 @@ pmap_destroy_internal( assert((tt_free_entry_t*)pmap->tt_entry_free == NULL); - pmap_get_pt_ops(pmap)->flush_tlb_async(pmap); - sync_tlb_flush(); + if (__improbable(pmap->nested)) { + pmap_get_pt_ops(pmap)->flush_tlb_region_async(pmap->nested_region_addr, pmap->nested_region_size, pmap); + sync_tlb_flush(); + } else { + pmap_get_pt_ops(pmap)->flush_tlb_async(pmap); + sync_tlb_flush(); + /* return its asid to the pool */ + pmap_get_pt_ops(pmap)->free_id(pmap); + /* release the reference we hold on the nested pmap */ + pmap_destroy_internal(pmap->nested_pmap); + } - /* return its asid to the pool */ - pmap_get_pt_ops(pmap)->free_id(pmap); pmap_check_ledgers(pmap); if (pmap->nested_region_asid_bitmap) { #if XNU_MONITOR pmap_pages_free(kvtophys((vm_offset_t)(pmap->nested_region_asid_bitmap)), PAGE_SIZE); #else - kfree(pmap->nested_region_asid_bitmap, pmap->nested_region_asid_bitmap_size * sizeof(unsigned int)); + kheap_free(KHEAP_DATA_BUFFERS, pmap->nested_region_asid_bitmap, + pmap->nested_region_asid_bitmap_size * sizeof(unsigned int)); #endif } @@ -5517,8 +5912,10 @@ pmap_destroy_internal( pmap_ledger_release(pmap->ledger); } + pmap_lock_destroy(pmap); pmap_free_pmap(pmap); #else + pmap_lock_destroy(pmap); zfree(pmap_zone, pmap); #endif } @@ -5584,6 +5981,10 @@ pmap_tt1_allocate( vm_address_t va_end; kern_return_t ret; + if ((size < PAGE_SIZE) && (size != PMAP_ROOT_ALLOC_SIZE)) { + size = PAGE_SIZE; + } + pmap_simple_lock(&tt1_lock); if ((size == PAGE_SIZE) && (free_page_size_tt_count != 0)) { free_page_size_tt_count--; @@ -5606,7 +6007,7 @@ pmap_tt1_allocate( return (tt_entry_t *)tt1; } - ret = pmap_pages_alloc(&pa, (unsigned)((size < PAGE_SIZE)? PAGE_SIZE : size), ((option & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)); + ret = pmap_pages_alloc_zeroed(&pa, (unsigned)((size < PAGE_SIZE)? PAGE_SIZE : size), ((option & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)); if (ret == KERN_RESOURCE_SHORTAGE) { return (tt_entry_t *)0; @@ -5653,6 +6054,10 @@ pmap_tt1_deallocate( { tt_free_entry_t *tt_entry; + if ((size < PAGE_SIZE) && (size != PMAP_ROOT_ALLOC_SIZE)) { + size = PAGE_SIZE; + } + tt_entry = (tt_free_entry_t *)tt; assert(not_in_kdp); pmap_simple_lock(&tt1_lock); @@ -5731,15 +6136,17 @@ pmap_tt_allocate( pmap_paddr_t pa; *ttp = NULL; - PMAP_LOCK(pmap); + pmap_lock(pmap); if ((tt_free_entry_t *)pmap->tt_entry_free != NULL) { - tt_free_entry_t *tt_free_next; + tt_free_entry_t *tt_free_cur, *tt_free_next; - tt_free_next = ((tt_free_entry_t *)pmap->tt_entry_free)->next; - *ttp = (tt_entry_t *)pmap->tt_entry_free; + tt_free_cur = ((tt_free_entry_t *)pmap->tt_entry_free); + tt_free_next = tt_free_cur->next; + tt_free_cur->next = NULL; + *ttp = (tt_entry_t *)tt_free_cur; pmap->tt_entry_free = (tt_entry_t *)tt_free_next; } - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); if (*ttp == NULL) { pt_desc_t *ptdp; @@ -5747,14 +6154,14 @@ pmap_tt_allocate( /* * Allocate a VM page for the level x page table entries. */ - while (pmap_pages_alloc(&pa, PAGE_SIZE, ((options & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)) != KERN_SUCCESS) { + while (pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, ((options & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)) != KERN_SUCCESS) { if (options & PMAP_OPTIONS_NOWAIT) { return KERN_RESOURCE_SHORTAGE; } VM_PAGE_WAIT(); } - while ((ptdp = ptd_alloc(pmap, false)) == NULL) { + while ((ptdp = ptd_alloc(pmap)) == NULL) { if (options & PMAP_OPTIONS_NOWAIT) { pmap_pages_free(pa, PAGE_SIZE); return KERN_RESOURCE_SHORTAGE; @@ -5762,7 +6169,7 @@ pmap_tt_allocate( VM_PAGE_WAIT(); } - if (level < PMAP_TT_MAX_LEVEL) { + if (level < pt_attr_leaf_level(pmap_get_pt_attr(pmap))) { OSAddAtomic64(1, &alloc_ttepages_count); OSAddAtomic(1, (pmap == kernel_pmap ? &inuse_kernel_ttepages_count : &inuse_user_ttepages_count)); } else { @@ -5776,20 +6183,19 @@ pmap_tt_allocate( pvh_update_head_unlocked(pai_to_pvh(pa_index(pa)), ptdp, PVH_TYPE_PTDP); - __unreachable_ok_push - if (TEST_PAGE_RATIO_4) { + uint64_t pmap_page_size = pt_attr_page_size(pmap_get_pt_attr(pmap)); + if (PAGE_SIZE > pmap_page_size) { vm_address_t va; vm_address_t va_end; - PMAP_LOCK(pmap); + pmap_lock(pmap); - for (va_end = phystokv(pa) + PAGE_SIZE, va = phystokv(pa) + ARM_PGBYTES; va < va_end; va = va + ARM_PGBYTES) { + for (va_end = phystokv(pa) + PAGE_SIZE, va = phystokv(pa) + pmap_page_size; va < va_end; va = va + pmap_page_size) { ((tt_free_entry_t *)va)->next = (tt_free_entry_t *)pmap->tt_entry_free; pmap->tt_entry_free = (tt_entry_t *)va; } - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); } - __unreachable_ok_pop *ttp = (tt_entry_t *)phystokv(pa); } @@ -5809,25 +6215,29 @@ pmap_tt_deallocate( unsigned int level) { pt_desc_t *ptdp; + ptd_info_t *ptd_info; unsigned pt_acc_cnt; - unsigned i, max_pt_index = PAGE_RATIO; + unsigned i; vm_offset_t free_page = 0; + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + unsigned max_pt_index = PAGE_SIZE / pt_attr_page_size(pt_attr); - PMAP_LOCK(pmap); + pmap_lock(pmap); ptdp = ptep_get_ptd((vm_offset_t)ttp); + ptd_info = ptd_get_info(ptdp, ttp); - ptdp->ptd_info[ARM_PT_DESC_INDEX(ttp)].va = (vm_offset_t)-1; + ptd_info->va = (vm_offset_t)-1; - if ((level < PMAP_TT_MAX_LEVEL) && (ptdp->ptd_info[ARM_PT_DESC_INDEX(ttp)].refcnt == PT_DESC_REFCOUNT)) { - ptdp->ptd_info[ARM_PT_DESC_INDEX(ttp)].refcnt = 0; + if ((level < pt_attr_leaf_level(pt_attr)) && (ptd_info->refcnt == PT_DESC_REFCOUNT)) { + ptd_info->refcnt = 0; } - if (ptdp->ptd_info[ARM_PT_DESC_INDEX(ttp)].refcnt != 0) { - panic("pmap_tt_deallocate(): ptdp %p, count %d\n", ptdp, ptdp->ptd_info[ARM_PT_DESC_INDEX(ttp)].refcnt); + if (ptd_info->refcnt != 0) { + panic("pmap_tt_deallocate(): ptdp %p, count %d\n", ptdp, ptd_info->refcnt); } - ptdp->ptd_info[ARM_PT_DESC_INDEX(ttp)].refcnt = 0; + ptd_info->refcnt = 0; for (i = 0, pt_acc_cnt = 0; i < max_pt_index; i++) { pt_acc_cnt += ptdp->ptd_info[i].refcnt; @@ -5873,13 +6283,13 @@ pmap_tt_deallocate( pmap->tt_entry_free = ttp; } - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); if (free_page != 0) { ptd_deallocate(ptep_get_ptd((vm_offset_t)free_page)); *(pt_desc_t **)pai_to_pvh(pa_index(ml_static_vtop(free_page))) = NULL; pmap_pages_free(ml_static_vtop(free_page), PAGE_SIZE); - if (level < PMAP_TT_MAX_LEVEL) { + if (level < pt_attr_leaf_level(pt_attr)) { OSAddAtomic(-1, (pmap == kernel_pmap ? &inuse_kernel_ttepages_count : &inuse_user_ttepages_count)); } else { OSAddAtomic(-1, (pmap == kernel_pmap ? &inuse_kernel_ptepages_count : &inuse_user_ptepages_count)); @@ -5889,6 +6299,16 @@ pmap_tt_deallocate( } } +/** + * Safely clear out a translation table entry. + * + * @note If the TTE to clear out points to a leaf table, then that leaf table + * must have a refcnt of zero before the TTE can be removed. + * + * @param pmap The pmap containing the page table whose TTE is being removed. + * @param ttep Pointer to the TTE that should be cleared out. + * @param level The level of the page table that contains the TTE to be removed. + */ static void pmap_tte_remove( pmap_t pmap, @@ -5897,16 +6317,17 @@ pmap_tte_remove( { tt_entry_t tte = *ttep; - if (tte == 0) { - panic("pmap_tte_deallocate(): null tt_entry ttep==%p\n", ttep); + if (__improbable(tte == 0)) { + panic("%s: null tt_entry ttep==%p", __func__, ttep); } - if (((level + 1) == PMAP_TT_MAX_LEVEL) && (tte_get_ptd(tte)->ptd_info[ARM_PT_DESC_INDEX(ttetokv(*ttep))].refcnt != 0)) { - panic("pmap_tte_deallocate(): pmap=%p ttep=%p ptd=%p refcnt=0x%x \n", pmap, ttep, - tte_get_ptd(tte), (tte_get_ptd(tte)->ptd_info[ARM_PT_DESC_INDEX(ttetokv(*ttep))].refcnt)); + if (__improbable((level == pt_attr_twig_level(pmap_get_pt_attr(pmap))) && + (ptep_get_info((pt_entry_t*)ttetokv(tte))->refcnt != 0))) { + panic("%s: non-zero pagetable refcount: pmap=%p ttep=%p ptd=%p refcnt=0x%x", __func__, + pmap, ttep, tte_get_ptd(tte), ptep_get_info((pt_entry_t*)ttetokv(tte))->refcnt); } -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) { tt_entry_t *ttep_4M = (tt_entry_t *) ((vm_offset_t)ttep & 0xFFFFFFF0); unsigned i; @@ -5919,9 +6340,26 @@ pmap_tte_remove( #else *ttep = (tt_entry_t) 0; FLUSH_PTE_STRONG(ttep); -#endif +#endif /* (__ARM_VMSA__ == 7) */ } +/** + * Given a pointer to an entry within a `level` page table, delete the + * page table at `level` + 1 that is represented by that entry. For instance, + * to delete an unused L3 table, `ttep` would be a pointer to the L2 entry that + * contains the PA of the L3 table, and `level` would be "2". + * + * @note If the table getting deallocated is a leaf table, then that leaf table + * must have a refcnt of zero before getting deallocated. All other levels + * must have a refcnt of PT_DESC_REFCOUNT in their page table descriptor. + * + * @param pmap The pmap that owns the page table to be deallocated. + * @param ttep Pointer to the `level` TTE to remove. + * @param level The level of the table that contains an entry pointing to the + * table to be removed. The deallocated page table will be a + * `level` + 1 table (so if `level` is 2, then an L3 table will be + * deleted). + */ static void pmap_tte_deallocate( pmap_t pmap, @@ -5931,43 +6369,43 @@ pmap_tte_deallocate( pmap_paddr_t pa; tt_entry_t tte; - PMAP_ASSERT_LOCKED(pmap); + pmap_assert_locked_w(pmap); tte = *ttep; -#if MACH_ASSERT +#if MACH_ASSERT if (tte_get_ptd(tte)->pmap != pmap) { - panic("pmap_tte_deallocate(): ptd=%p ptd->pmap=%p pmap=%p \n", - tte_get_ptd(tte), tte_get_ptd(tte)->pmap, pmap); + panic("%s: Passed in pmap doesn't own the page table to be deleted ptd=%p ptd->pmap=%p pmap=%p", + __func__, tte_get_ptd(tte), tte_get_ptd(tte)->pmap, pmap); } -#endif +#endif /* MACH_ASSERT */ pmap_tte_remove(pmap, ttep, level); if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { -#if MACH_ASSERT - { - pt_entry_t *pte_p = ((pt_entry_t *) (ttetokv(tte) & ~ARM_PGMASK)); - unsigned i; - - for (i = 0; i < (ARM_PGBYTES / sizeof(*pte_p)); i++, pte_p++) { - if (ARM_PTE_IS_COMPRESSED(*pte_p, pte_p)) { - panic("pmap_tte_deallocate: tte=0x%llx pmap=%p, pte_p=%p, pte=0x%llx compressed\n", - (uint64_t)tte, pmap, pte_p, (uint64_t)(*pte_p)); - } else if (((*pte_p) & ARM_PTE_TYPE_MASK) != ARM_PTE_TYPE_FAULT) { - panic("pmap_tte_deallocate: tte=0x%llx pmap=%p, pte_p=%p, pte=0x%llx\n", - (uint64_t)tte, pmap, pte_p, (uint64_t)(*pte_p)); - } + uint64_t pmap_page_size = pt_attr_page_size(pmap_get_pt_attr(pmap)); +#if MACH_ASSERT + pt_entry_t *pte_p = ((pt_entry_t *) (ttetokv(tte) & ~(pmap_page_size - 1))); + + for (unsigned i = 0; i < (pmap_page_size / sizeof(*pte_p)); i++, pte_p++) { + if (__improbable(ARM_PTE_IS_COMPRESSED(*pte_p, pte_p))) { + panic_plain("%s: Found compressed mapping in soon to be deleted " + "L%d table tte=0x%llx pmap=%p, pte_p=%p, pte=0x%llx", + __func__, level + 1, (uint64_t)tte, pmap, pte_p, (uint64_t)(*pte_p)); + } else if (__improbable(((*pte_p) & ARM_PTE_TYPE_MASK) != ARM_PTE_TYPE_FAULT)) { + panic_plain("%s: Found valid mapping in soon to be deleted L%d " + "table tte=0x%llx pmap=%p, pte_p=%p, pte=0x%llx", + __func__, level + 1, (uint64_t)tte, pmap, pte_p, (uint64_t)(*pte_p)); } } -#endif - PMAP_UNLOCK(pmap); +#endif /* MACH_ASSERT */ + pmap_unlock(pmap); /* Clear any page offset: we mean to free the whole page, but armv7 TTEs may only be * aligned on 1K boundaries. We clear the surrounding "chunk" of 4 TTEs above. */ - pa = tte_to_pa(tte) & ~ARM_PGMASK; + pa = tte_to_pa(tte) & ~(pmap_page_size - 1); pmap_tt_deallocate(pmap, (tt_entry_t *) phystokv(pa), level + 1); - PMAP_LOCK(pmap); + pmap_lock(pmap); } } @@ -5997,7 +6435,8 @@ pmap_remove_range( int num_changed = pmap_remove_range_options(pmap, va, bpte, epte, rmv_cnt, &need_strong_sync, PMAP_OPTIONS_REMOVE); if (num_changed > 0) { - PMAP_UPDATE_TLBS(pmap, va, va + (PAGE_SIZE * (epte - bpte)), need_strong_sync); + PMAP_UPDATE_TLBS(pmap, va, + va + (pt_attr_page_size(pmap_get_pt_attr(pmap)) * (epte - bpte)), need_strong_sync); } return num_changed; } @@ -6055,18 +6494,19 @@ pmap_remove_pv( pv_entry_t **pv_h, **pve_pp; pv_entry_t *pve_p; + ASSERT_NOT_HIBERNATING(); ASSERT_PVH_LOCKED(pai); pv_h = pai_to_pvh(pai); vm_offset_t pvh_flags = pvh_get_flags(pv_h); #if XNU_MONITOR - if (pvh_flags & PVH_FLAG_LOCKDOWN) { + if (__improbable(pvh_flags & PVH_FLAG_LOCKDOWN)) { panic("%d is locked down (%#lx), cannot remove", pai, pvh_flags); } #endif if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) { - if (__builtin_expect((cpte != pvh_ptep(pv_h)), 0)) { + if (__improbable((cpte != pvh_ptep(pv_h)))) { panic("%s: cpte=%p does not match pv_h=%p (%p), pai=0x%x\n", __func__, cpte, pv_h, pvh_ptep(pv_h), pai); } if (IS_ALTACCT_PAGE(pai, PV_ENTRY_NULL)) { @@ -6094,7 +6534,7 @@ pmap_remove_pv( pve_p = PVE_NEXT_PTR(pve_next(pve_p)); } - if (__builtin_expect((pve_p == PV_ENTRY_NULL), 0)) { + if (__improbable((pve_p == PV_ENTRY_NULL))) { panic("%s: cpte=%p (pai=0x%x) not in pv_h=%p\n", __func__, cpte, pai, pv_h); } @@ -6127,7 +6567,7 @@ pmap_remove_pv( } pvh_remove(pv_h, pve_pp, pve_p); - pv_free(pve_p); + pv_free_entry(pve_p); if (!pvh_test_type(pv_h, PVH_TYPE_NULL)) { pvh_set_flags(pv_h, pvh_flags); } @@ -6162,10 +6602,10 @@ pmap_remove_range_options( int num_alt_internal; uint64_t num_compressed, num_alt_compressed; - PMAP_ASSERT_LOCKED(pmap); + pmap_assert_locked_w(pmap); const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); - uint64_t pmap_page_size = pt_attr_leaf_size(pt_attr); + uint64_t pmap_page_size = pt_attr_page_size(pt_attr); if (__improbable((uintptr_t)epte > (((uintptr_t)bpte + pmap_page_size) & ~(pmap_page_size - 1)))) { panic("%s: PTE range [%p, %p) in pmap %p crosses page table boundary", __func__, bpte, epte, pmap); @@ -6182,7 +6622,7 @@ pmap_remove_range_options( num_alt_compressed = 0; for (cpte = bpte; cpte < epte; - cpte += PAGE_SIZE / ARM_PGBYTES, va += PAGE_SIZE) { + cpte += 1, va += pmap_page_size) { pt_entry_t spte; boolean_t managed = FALSE; @@ -6218,8 +6658,8 @@ pmap_remove_range_options( * our "compressed" markers, * so let's update it here. */ - if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_ptd(cpte)->ptd_info[ARM_PT_DESC_INDEX(cpte)].refcnt)) <= 0) { - panic("pmap_remove_range_options: over-release of ptdp %p for pte %p\n", ptep_get_ptd(cpte), cpte); + if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_info(cpte)->refcnt)) <= 0) { + panic("pmap_remove_range_options: over-release of ptdp %p for pte %p", ptep_get_ptd(cpte), cpte); } spte = *cpte; } @@ -6231,12 +6671,14 @@ pmap_remove_range_options( //assert(!ARM_PTE_IS_COMPRESSED(spte)); pa = pte_to_pa(spte); if (!pa_valid(pa)) { -#if XNU_MONITOR || HAS_MILD_DSB +#if XNU_MONITOR unsigned int cacheattr = pmap_cache_attributes((ppnum_t)atop(pa)); #endif #if XNU_MONITOR - if (!pmap_ppl_disable && (cacheattr & PP_ATTR_MONITOR)) { - panic("%s: attempt to remove mapping of PPL-protected I/O address 0x%llx", __func__, (uint64_t)pa); + if (__improbable((cacheattr & PP_ATTR_MONITOR) && + (pte_to_xprr_perm(spte) != XPRR_KERN_RO_PERM) && !pmap_ppl_disable)) { + panic("%s: attempt to remove mapping of writable PPL-protected I/O address 0x%llx", + __func__, (uint64_t)pa); } #endif break; @@ -6268,8 +6710,8 @@ pmap_remove_range_options( assertf((*cpte & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE, "invalid pte %p (=0x%llx)", cpte, (uint64_t)*cpte); #if MACH_ASSERT if (managed && (pmap != kernel_pmap) && (ptep_get_va(cpte) != va)) { - panic("pmap_remove_range_options(): cpte=%p ptd=%p pte=0x%llx va=0x%llx\n", - cpte, ptep_get_ptd(cpte), (uint64_t)*cpte, (uint64_t)va); + panic("pmap_remove_range_options(): VA mismatch: cpte=%p ptd=%p pte=0x%llx va=0x%llx, cpte va=0x%llx", + cpte, ptep_get_ptd(cpte), (uint64_t)*cpte, (uint64_t)va, (uint64_t)ptep_get_va(cpte)); } #endif WRITE_PTE_FAST(cpte, ARM_PTE_TYPE_FAULT); @@ -6280,8 +6722,8 @@ pmap_remove_range_options( (pmap != kernel_pmap)) { assertf(!ARM_PTE_IS_COMPRESSED(spte, cpte), "unexpected compressed pte %p (=0x%llx)", cpte, (uint64_t)spte); assertf((spte & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE, "invalid pte %p (=0x%llx)", cpte, (uint64_t)spte); - if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_ptd(cpte)->ptd_info[ARM_PT_DESC_INDEX(cpte)].refcnt)) <= 0) { - panic("pmap_remove_range_options: over-release of ptdp %p for pte %p\n", ptep_get_ptd(cpte), cpte); + if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_info(cpte)->refcnt)) <= 0) { + panic("pmap_remove_range_options: over-release of ptdp %p for pte %p", ptep_get_ptd(cpte), cpte); } if (rmv_cnt) { (*rmv_cnt)++; @@ -6289,7 +6731,7 @@ pmap_remove_range_options( } if (pte_is_wired(spte)) { - pte_set_wired(cpte, 0); + pte_set_wired(pmap, cpte, 0); num_unwired++; } /* @@ -6313,104 +6755,56 @@ pmap_remove_range_options( * Update the counts */ OSAddAtomic(-num_removed, (SInt32 *) &pmap->stats.resident_count); - pmap_ledger_debit(pmap, task_ledgers.phys_mem, machine_ptob(num_removed)); + pmap_ledger_debit(pmap, task_ledgers.phys_mem, num_removed * pmap_page_size * PAGE_RATIO); if (pmap != kernel_pmap) { - /* sanity checks... */ -#if MACH_ASSERT - if (pmap->stats.internal < num_internal) { - if ((!pmap_stats_assert || - !pmap->pmap_stats_assert)) { - printf("%d[%s] pmap_remove_range_options(%p,0x%llx,%p,%p,0x%x): num_internal=%d num_removed=%d num_unwired=%d num_external=%d num_reusable=%d num_compressed=%lld num_alt_internal=%d num_alt_compressed=%lld num_pte_changed=%d stats.internal=%d stats.reusable=%d\n", - pmap->pmap_pid, - pmap->pmap_procname, - pmap, - (uint64_t) va, - bpte, - epte, - options, - num_internal, - num_removed, - num_unwired, - num_external, - num_reusable, - num_compressed, - num_alt_internal, - num_alt_compressed, - num_pte_changed, - pmap->stats.internal, - pmap->stats.reusable); - } else { - panic("%d[%s] pmap_remove_range_options(%p,0x%llx,%p,%p,0x%x): num_internal=%d num_removed=%d num_unwired=%d num_external=%d num_reusable=%d num_compressed=%lld num_alt_internal=%d num_alt_compressed=%lld num_pte_changed=%d stats.internal=%d stats.reusable=%d", - pmap->pmap_pid, - pmap->pmap_procname, - pmap, - (uint64_t) va, - bpte, - epte, - options, - num_internal, - num_removed, - num_unwired, - num_external, - num_reusable, - num_compressed, - num_alt_internal, - num_alt_compressed, - num_pte_changed, - pmap->stats.internal, - pmap->stats.reusable); - } - } -#endif /* MACH_ASSERT */ - PMAP_STATS_ASSERTF(pmap->stats.external >= num_external, - pmap, - "pmap=%p num_external=%d stats.external=%d", - pmap, num_external, pmap->stats.external); - PMAP_STATS_ASSERTF(pmap->stats.internal >= num_internal, - pmap, - "pmap=%p num_internal=%d stats.internal=%d num_reusable=%d stats.reusable=%d", - pmap, - num_internal, pmap->stats.internal, - num_reusable, pmap->stats.reusable); - PMAP_STATS_ASSERTF(pmap->stats.reusable >= num_reusable, - pmap, - "pmap=%p num_internal=%d stats.internal=%d num_reusable=%d stats.reusable=%d", - pmap, - num_internal, pmap->stats.internal, - num_reusable, pmap->stats.reusable); - PMAP_STATS_ASSERTF(pmap->stats.compressed >= num_compressed, - pmap, - "pmap=%p num_compressed=%lld num_alt_compressed=%lld stats.compressed=%lld", - pmap, num_compressed, num_alt_compressed, - pmap->stats.compressed); - /* update pmap stats... */ OSAddAtomic(-num_unwired, (SInt32 *) &pmap->stats.wired_count); if (num_external) { - OSAddAtomic(-num_external, &pmap->stats.external); + __assert_only int32_t orig_external = OSAddAtomic(-num_external, &pmap->stats.external); + PMAP_STATS_ASSERTF(orig_external >= num_external, + pmap, + "pmap=%p bpte=%p epte=%p num_external=%d stats.external=%d", + pmap, bpte, epte, num_external, orig_external); } if (num_internal) { - OSAddAtomic(-num_internal, &pmap->stats.internal); + __assert_only int32_t orig_internal = OSAddAtomic(-num_internal, &pmap->stats.internal); + PMAP_STATS_ASSERTF(orig_internal >= num_internal, + pmap, + "pmap=%p bpte=%p epte=%p num_internal=%d stats.internal=%d num_reusable=%d stats.reusable=%d", + pmap, bpte, epte, + num_internal, orig_internal, + num_reusable, pmap->stats.reusable); } if (num_reusable) { - OSAddAtomic(-num_reusable, &pmap->stats.reusable); + __assert_only int32_t orig_reusable = OSAddAtomic(-num_reusable, &pmap->stats.reusable); + PMAP_STATS_ASSERTF(orig_reusable >= num_reusable, + pmap, + "pmap=%p bpte=%p epte=%p num_internal=%d stats.internal=%d num_reusable=%d stats.reusable=%d", + pmap, bpte, epte, + num_internal, pmap->stats.internal, + num_reusable, orig_reusable); } if (num_compressed) { - OSAddAtomic64(-num_compressed, &pmap->stats.compressed); + __assert_only uint64_t orig_compressed = OSAddAtomic64(-num_compressed, &pmap->stats.compressed); + PMAP_STATS_ASSERTF(orig_compressed >= num_compressed, + pmap, + "pmap=%p bpte=%p epte=%p num_compressed=%lld num_alt_compressed=%lld stats.compressed=%lld", + pmap, bpte, epte, num_compressed, num_alt_compressed, + orig_compressed); } /* ... and ledgers */ - pmap_ledger_debit(pmap, task_ledgers.wired_mem, machine_ptob(num_unwired)); - pmap_ledger_debit(pmap, task_ledgers.internal, machine_ptob(num_internal)); - pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, machine_ptob(num_alt_internal)); - pmap_ledger_debit(pmap, task_ledgers.alternate_accounting_compressed, machine_ptob(num_alt_compressed)); - pmap_ledger_debit(pmap, task_ledgers.internal_compressed, machine_ptob(num_compressed)); + pmap_ledger_debit(pmap, task_ledgers.wired_mem, (num_unwired) * pmap_page_size * PAGE_RATIO); + pmap_ledger_debit(pmap, task_ledgers.internal, (num_internal) * pt_attr_page_size(pt_attr) * PAGE_RATIO); + pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, (num_alt_internal) * pt_attr_page_size(pt_attr) * PAGE_RATIO); + pmap_ledger_debit(pmap, task_ledgers.alternate_accounting_compressed, (num_alt_compressed) * pt_attr_page_size(pt_attr) * PAGE_RATIO); + pmap_ledger_debit(pmap, task_ledgers.internal_compressed, (num_compressed) * pt_attr_page_size(pt_attr) * PAGE_RATIO); /* make needed adjustments to phys_footprint */ pmap_ledger_debit(pmap, task_ledgers.phys_footprint, - machine_ptob((num_internal - + ((num_internal - num_alt_internal) + (num_compressed - - num_alt_compressed))); + num_alt_compressed)) * pmap_page_size * PAGE_RATIO); } /* flush the ptable entries we have written */ @@ -6461,7 +6855,7 @@ pmap_remove_options_internal( __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); - PMAP_LOCK(pmap); + pmap_lock(pmap); tte_p = pmap_tte(pmap, start); @@ -6471,13 +6865,13 @@ pmap_remove_options_internal( if ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { pte_p = (pt_entry_t *) ttetokv(*tte_p); - bpte = &pte_p[ptenum(start)]; + bpte = &pte_p[pte_index(pmap, pt_attr, start)]; epte = bpte + ((end - start) >> pt_attr_leaf_shift(pt_attr)); remove_count += pmap_remove_range_options(pmap, start, bpte, epte, &rmv_spte, &need_strong_sync, options); - if (rmv_spte && (ptep_get_ptd(pte_p)->ptd_info[ARM_PT_DESC_INDEX(pte_p)].refcnt == 0) && + if (rmv_spte && (ptep_get_info(pte_p)->refcnt == 0) && (pmap != kernel_pmap) && (pmap->nested == FALSE)) { pmap_tte_deallocate(pmap, tte_p, pt_attr_twig_level(pt_attr)); flush_tte = true; @@ -6485,11 +6879,11 @@ pmap_remove_options_internal( } done: - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); if (remove_count > 0) { PMAP_UPDATE_TLBS(pmap, start, end, need_strong_sync); - } else if (flush_tte > 0) { + } else if (flush_tte) { pmap_get_pt_ops(pmap)->flush_tlb_tte_async(start, pmap); sync_tlb_flush(); } @@ -6517,7 +6911,7 @@ pmap_remove_options( VM_KERNEL_ADDRHIDE(end)); #if MACH_ASSERT - if ((start | end) & PAGE_MASK) { + if ((start | end) & pt_attr_leaf_offmask(pt_attr)) { panic("pmap_remove_options() pmap %p start 0x%llx end 0x%llx\n", pmap, (uint64_t)start, (uint64_t)end); } @@ -6575,23 +6969,28 @@ pmap_set_pmap( { pmap_switch(pmap); #if __ARM_USER_PROTECT__ - if (pmap->tte_index_max == NTTES) { - thread->machine.uptw_ttc = 2; - } else { - thread->machine.uptw_ttc = 1; - } thread->machine.uptw_ttb = ((unsigned int) pmap->ttep) | TTBR_SETUP; thread->machine.asid = pmap->hw_asid; #endif } static void -pmap_flush_core_tlb_asid(pmap_t pmap) +pmap_flush_core_tlb_asid_async(pmap_t pmap) { #if (__ARM_VMSA__ == 7) - flush_core_tlb_asid(pmap->hw_asid); + flush_core_tlb_asid_async(pmap->hw_asid); #else - flush_core_tlb_asid(((uint64_t) pmap->hw_asid) << TLBI_ASID_SHIFT); + flush_core_tlb_asid_async(((uint64_t) pmap->hw_asid) << TLBI_ASID_SHIFT); +#endif +} + +static inline bool +pmap_user_ttb_is_clear(void) +{ +#if (__ARM_VMSA__ > 7) + return get_mmu_ttb() == (invalid_ttep & TTBR_BADDR_MASK); +#else + return get_mmu_ttb() == kernel_pmap->ttep; #endif } @@ -6602,56 +7001,102 @@ pmap_switch_internal( VALIDATE_PMAP(pmap); pmap_cpu_data_t *cpu_data_ptr = pmap_get_cpu_data(); uint16_t asid_index = pmap->hw_asid; - boolean_t do_asid_flush = FALSE; + bool do_asid_flush = false; + if (__improbable((asid_index == 0) && (pmap != kernel_pmap))) { + panic("%s: attempt to activate pmap with invalid ASID %p", __func__, pmap); + } #if __ARM_KERNEL_PROTECT__ asid_index >>= 1; #endif -#if (__ARM_VMSA__ > 7) - pmap_t last_nested_pmap = cpu_data_ptr->cpu_nested_pmap; +#if (__ARM_VMSA__ > 7) + pmap_t last_nested_pmap = cpu_data_ptr->cpu_nested_pmap; + __unused const pt_attr_t *last_nested_pmap_attr = cpu_data_ptr->cpu_nested_pmap_attr; + __unused vm_map_address_t last_nested_region_addr = cpu_data_ptr->cpu_nested_region_addr; + __unused vm_map_offset_t last_nested_region_size = cpu_data_ptr->cpu_nested_region_size; + bool do_shared_region_flush = ((pmap != kernel_pmap) && (last_nested_pmap != NULL) && (pmap->nested_pmap != last_nested_pmap)); + bool break_before_make = do_shared_region_flush; +#else + bool do_shared_region_flush = false; + bool break_before_make = false; #endif -#if MAX_ASID > MAX_HW_ASID - if (asid_index > 0) { + if ((pmap_max_asids > MAX_HW_ASIDS) && (asid_index > 0)) { asid_index -= 1; + pmap_update_plru(asid_index); + /* Paranoia. */ - assert(asid_index < (sizeof(cpu_data_ptr->cpu_asid_high_bits) / sizeof(*cpu_data_ptr->cpu_asid_high_bits))); + assert(asid_index < (sizeof(cpu_data_ptr->cpu_sw_asids) / sizeof(*cpu_data_ptr->cpu_sw_asids))); /* Extract the "virtual" bits of the ASIDs (which could cause us to alias). */ - uint8_t asid_high_bits = pmap->sw_asid; - uint8_t last_asid_high_bits = cpu_data_ptr->cpu_asid_high_bits[asid_index]; + uint8_t new_sw_asid = pmap->sw_asid; + uint8_t last_sw_asid = cpu_data_ptr->cpu_sw_asids[asid_index]; - if (asid_high_bits != last_asid_high_bits) { + if (new_sw_asid != last_sw_asid) { /* * If the virtual ASID of the new pmap does not match the virtual ASID * last seen on this CPU for the physical ASID (that was a mouthful), * then this switch runs the risk of aliasing. We need to flush the * TLB for this phyiscal ASID in this case. */ - cpu_data_ptr->cpu_asid_high_bits[asid_index] = asid_high_bits; - do_asid_flush = TRUE; + cpu_data_ptr->cpu_sw_asids[asid_index] = new_sw_asid; + do_asid_flush = true; + break_before_make = true; } } -#endif /* MAX_ASID > MAX_HW_ASID */ - pmap_switch_user_ttb_internal(pmap); +#if __ARM_MIXED_PAGE_SIZE__ + if (pmap_get_pt_attr(pmap)->pta_tcr_value != get_tcr()) { + break_before_make = true; + } +#endif + if (__improbable(break_before_make && !pmap_user_ttb_is_clear())) { + PMAP_TRACE(1, PMAP_CODE(PMAP__CLEAR_USER_TTB), VM_KERNEL_ADDRHIDE(pmap), PMAP_VASID(pmap), pmap->hw_asid); + pmap_clear_user_ttb_internal(); + } #if (__ARM_VMSA__ > 7) /* If we're switching to a different nested pmap (i.e. shared region), we'll need * to flush the userspace mappings for that region. Those mappings are global * and will not be protected by the ASID. It should also be cheaper to flush the * entire local TLB rather than to do a broadcast MMU flush by VA region. */ - if ((pmap != kernel_pmap) && (last_nested_pmap != NULL) && (pmap->nested_pmap != last_nested_pmap)) { - flush_core_tlb(); - } else -#endif - if (do_asid_flush) { - pmap_flush_core_tlb_asid(pmap); + if (__improbable(do_shared_region_flush)) { +#if __ARM_RANGE_TLBI__ + uint64_t page_shift_prev = pt_attr_leaf_shift(last_nested_pmap_attr); + vm_map_offset_t npages_prev = last_nested_region_size >> page_shift_prev; + + /* NOTE: here we flush the global TLB entries for the previous nested region only. + * There may still be non-global entries that overlap with the incoming pmap's + * nested region. On Apple SoCs at least, this is acceptable. Those non-global entries + * must necessarily belong to a different ASID than the incoming pmap, or they would + * be flushed in the do_asid_flush case below. This will prevent them from conflicting + * with the incoming pmap's nested region. However, the ARMv8 ARM is not crystal clear + * on whether such a global/inactive-nonglobal overlap is acceptable, so we may need + * to consider additional invalidation here in the future. */ + if (npages_prev <= ARM64_TLB_RANGE_PAGES) { + flush_core_tlb_allrange_async(generate_rtlbi_param((ppnum_t)npages_prev, 0, last_nested_region_addr, page_shift_prev)); + } else { + do_asid_flush = false; + flush_core_tlb_async(); + } +#else + do_asid_flush = false; + flush_core_tlb_async(); +#endif // __ARM_RANGE_TLBI__ + } +#endif // (__ARM_VMSA__ > 7) + if (__improbable(do_asid_flush)) { + pmap_flush_core_tlb_asid_async(pmap); #if DEVELOPMENT || DEBUG os_atomic_inc(&pmap_asid_flushes, relaxed); #endif } + if (__improbable(do_asid_flush || do_shared_region_flush)) { + sync_tlb_flush(); + } + + pmap_switch_user_ttb_internal(pmap); } void @@ -6667,6 +7112,18 @@ pmap_switch( PMAP_TRACE(1, PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_END); } +void +pmap_require(pmap_t pmap) +{ +#if XNU_MONITOR + VALIDATE_PMAP(pmap); +#else + if (pmap != kernel_pmap) { + zone_id_require(ZONE_ID_PMAP, sizeof(struct pmap), pmap); + } +#endif +} + void pmap_page_protect( ppnum_t ppnum, @@ -6683,10 +7140,11 @@ pmap_page_protect( * page. */ MARK_AS_PMAP_TEXT static void -pmap_page_protect_options_internal( +pmap_page_protect_options_with_flush_range( ppnum_t ppnum, vm_prot_t prot, - unsigned int options) + unsigned int options, + pmap_tlb_flush_range_t *flush_range) { pmap_paddr_t phys = ptoa(ppnum); pv_entry_t **pv_h; @@ -6732,7 +7190,7 @@ pmap_page_protect_options_internal( pvh_flags = pvh_get_flags(pv_h); #if XNU_MONITOR - if (remove && (pvh_flags & PVH_FLAG_LOCKDOWN)) { + if (__improbable(remove && (pvh_flags & PVH_FLAG_LOCKDOWN))) { panic("%d is locked down (%#llx), cannot remove", pai, pvh_get_flags(pv_h)); } #endif @@ -6752,10 +7210,10 @@ pmap_page_protect_options_internal( } while ((pve_p != PV_ENTRY_NULL) || (pte_p != PT_ENTRY_NULL)) { - vm_map_address_t va; - pmap_t pmap; - pt_entry_t tmplate; - boolean_t update = FALSE; + vm_map_address_t va = 0; + pmap_t pmap = NULL; + pt_entry_t tmplate = ARM_PTE_TYPE_FAULT; + boolean_t update = FALSE; if (pve_p != PV_ENTRY_NULL) { pte_p = pve_get_ptep(pve_p); @@ -6764,7 +7222,7 @@ pmap_page_protect_options_internal( #ifdef PVH_FLAG_IOMMU if ((vm_offset_t)pte_p & PVH_FLAG_IOMMU) { #if XNU_MONITOR - if (pvh_flags & PVH_FLAG_LOCKDOWN) { + if (__improbable(pvh_flags & PVH_FLAG_LOCKDOWN)) { panic("pmap_page_protect: ppnum 0x%x locked down, cannot be owned by iommu 0x%llx, pve_p=%p", ppnum, (uint64_t)pte_p & ~PVH_FLAG_IOMMU, pve_p); } @@ -6818,13 +7276,17 @@ pmap_page_protect_options_internal( #else if ((prot & VM_PROT_EXECUTE)) #endif - { set_NX = FALSE;} else { + { + set_NX = FALSE; + } else { set_NX = TRUE; } /* Remove the mapping if new protection is NONE */ if (remove) { boolean_t is_altacct = FALSE; + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + pt_entry_t spte = *pte_p; if (IS_ALTACCT_PAGE(pai, pve_p)) { is_altacct = TRUE; @@ -6832,15 +7294,16 @@ pmap_page_protect_options_internal( is_altacct = FALSE; } - if (pte_is_wired(*pte_p)) { - pte_set_wired(pte_p, 0); + if (pte_is_wired(spte)) { + pte_set_wired(pmap, pte_p, 0); + spte = *pte_p; if (pmap != kernel_pmap) { - pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE); + pmap_ledger_debit(pmap, task_ledgers.wired_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO); OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count); } } - if (*pte_p != ARM_PTE_TYPE_FAULT && + if (spte != ARM_PTE_TYPE_FAULT && pmap != kernel_pmap && (options & PMAP_OPTIONS_COMPRESSOR) && IS_INTERNAL_PAGE(pai)) { @@ -6855,20 +7318,26 @@ pmap_page_protect_options_internal( tmplate = ARM_PTE_TYPE_FAULT; } - if ((*pte_p != ARM_PTE_TYPE_FAULT) && - tmplate == ARM_PTE_TYPE_FAULT && + /** + * The entry must be written before the refcnt is decremented to + * prevent use-after-free races with code paths that deallocate page + * tables based on a zero refcnt. + */ + if (spte != tmplate) { + WRITE_PTE_STRONG(pte_p, tmplate); + update = TRUE; + } + + if ((spte != ARM_PTE_TYPE_FAULT) && + (tmplate == ARM_PTE_TYPE_FAULT) && (pmap != kernel_pmap)) { - if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_ptd(pte_p)->ptd_info[ARM_PT_DESC_INDEX(pte_p)].refcnt)) <= 0) { + if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_info(pte_p)->refcnt)) <= 0) { panic("pmap_page_protect_options(): over-release of ptdp %p for pte %p\n", ptep_get_ptd(pte_p), pte_p); } } - if (*pte_p != tmplate) { - WRITE_PTE_STRONG(pte_p, tmplate); - update = TRUE; - } pvh_cnt++; - pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE); + pmap_ledger_debit(pmap, task_ledgers.phys_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO); OSAddAtomic(-1, (SInt32 *) &pmap->stats.resident_count); #if MACH_ASSERT @@ -6884,14 +7353,14 @@ pmap_page_protect_options_internal( if (IS_REUSABLE_PAGE(pai) && IS_INTERNAL_PAGE(pai) && !is_altacct) { - PMAP_STATS_ASSERTF(pmap->stats.reusable > 0, pmap, "stats.reusable %d", pmap->stats.reusable); - OSAddAtomic(-1, &pmap->stats.reusable); + __assert_only int32_t orig_reusable = OSAddAtomic(-1, &pmap->stats.reusable); + PMAP_STATS_ASSERTF(orig_reusable > 0, pmap, "stats.reusable %d", orig_reusable); } else if (IS_INTERNAL_PAGE(pai)) { - PMAP_STATS_ASSERTF(pmap->stats.internal > 0, pmap, "stats.internal %d", pmap->stats.internal); - OSAddAtomic(-1, &pmap->stats.internal); + __assert_only int32_t orig_internal = OSAddAtomic(-1, &pmap->stats.internal); + PMAP_STATS_ASSERTF(orig_internal > 0, pmap, "stats.internal %d", orig_internal); } else { - PMAP_STATS_ASSERTF(pmap->stats.external > 0, pmap, "stats.external %d", pmap->stats.external); - OSAddAtomic(-1, &pmap->stats.external); + __assert_only int32_t orig_external = OSAddAtomic(-1, &pmap->stats.external); + PMAP_STATS_ASSERTF(orig_external > 0, pmap, "stats.external %d", orig_external); } if ((options & PMAP_OPTIONS_COMPRESSOR) && IS_INTERNAL_PAGE(pai)) { @@ -6903,11 +7372,11 @@ pmap_page_protect_options_internal( if (IS_ALTACCT_PAGE(pai, pve_p)) { assert(IS_INTERNAL_PAGE(pai)); - pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE); - pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE); + pmap_ledger_debit(pmap, task_ledgers.internal, pt_attr_page_size(pt_attr) * PAGE_RATIO); + pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, pt_attr_page_size(pt_attr) * PAGE_RATIO); if (options & PMAP_OPTIONS_COMPRESSOR) { - pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE); - pmap_ledger_credit(pmap, task_ledgers.alternate_accounting_compressed, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.internal_compressed, pt_attr_page_size(pt_attr) * PAGE_RATIO); + pmap_ledger_credit(pmap, task_ledgers.alternate_accounting_compressed, pt_attr_page_size(pt_attr) * PAGE_RATIO); } /* @@ -6918,12 +7387,12 @@ pmap_page_protect_options_internal( } else if (IS_REUSABLE_PAGE(pai)) { assert(IS_INTERNAL_PAGE(pai)); if (options & PMAP_OPTIONS_COMPRESSOR) { - pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.internal_compressed, pt_attr_page_size(pt_attr) * PAGE_RATIO); /* was not in footprint, but is now */ - pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO); } } else if (IS_INTERNAL_PAGE(pai)) { - pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE); + pmap_ledger_debit(pmap, task_ledgers.internal, pt_attr_page_size(pt_attr) * PAGE_RATIO); /* * Update all stats related to physical footprint, which only @@ -6934,13 +7403,13 @@ pmap_page_protect_options_internal( * This removal is only being done so we can send this page to * the compressor; therefore it mustn't affect total task footprint. */ - pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.internal_compressed, pt_attr_page_size(pt_attr) * PAGE_RATIO); } else { /* * This internal page isn't going to the compressor, so adjust stats to keep * phys_footprint up to date. */ - pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); + pmap_ledger_debit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO); } } else { /* external page: no impact on ledgers */ @@ -6973,14 +7442,27 @@ pmap_page_protect_options_internal( } #if __APRR_SUPPORTED__ - if (__improbable(is_pte_xprr_protected(spte))) { - panic("pmap_page_protect: modifying an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx ppnum: 0x%x", - pte_p, pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)spte, (uint64_t)tmplate, (uint64_t)va, ppnum); + /** + * Enforce the policy that PPL xPRR mappings can't have their permissions changed after the fact. + * + * Certain userspace applications (e.g., CrashReporter and debuggers) have a need to remap JIT mappings to + * RO/RX, so we explicitly allow that. This doesn't compromise the security of the PPL since this only + * affects userspace mappings, so allow reducing permissions on JIT mappings to RO/RX. This is similar for + * user execute-only mappings. + */ + if (__improbable(is_pte_xprr_protected(pmap, spte) && (pte_to_xprr_perm(spte) != XPRR_USER_JIT_PERM) + && (pte_to_xprr_perm(spte) != XPRR_USER_XO_PERM))) { + panic("%s: modifying an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx ppnum: 0x%x", + __func__, pte_p, pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)spte, (uint64_t)tmplate, (uint64_t)va, ppnum); } - if (__improbable(is_pte_xprr_protected(tmplate))) { - panic("pmap_page_protect: creating an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx ppnum: 0x%x", - pte_p, pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)spte, (uint64_t)tmplate, (uint64_t)va, ppnum); + /** + * Enforce the policy that we can't create a new PPL protected mapping here except for user execute-only + * mappings (which doesn't compromise the security of the PPL since it's userspace-specific). + */ + if (__improbable(is_pte_xprr_protected(pmap, tmplate) && (pte_to_xprr_perm(tmplate) != XPRR_USER_XO_PERM))) { + panic("%s: creating an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx ppnum: 0x%x", + __func__, pte_p, pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)spte, (uint64_t)tmplate, (uint64_t)va, ppnum); } #endif /* __APRR_SUPPORTED__*/ @@ -6994,8 +7476,12 @@ pmap_page_protect_options_internal( /* Invalidate TLBs for all CPUs using it */ if (update) { + if (remove || !flush_range || + ((flush_range->ptfr_pmap != pmap) || va >= flush_range->ptfr_end || va < flush_range->ptfr_start)) { + pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, + pt_attr_page_size(pmap_get_pt_attr(pmap)) * PAGE_RATIO, pmap); + } tlb_flush_needed = TRUE; - pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, PAGE_SIZE, pmap); } #ifdef PVH_FLAG_IOMMU @@ -7017,10 +7503,6 @@ protect_skip_pve: pmap_set_ptov_ap(pai, AP_RWNA, tlb_flush_needed); } #endif - if (tlb_flush_needed) { - sync_tlb_flush(); - } - /* if we removed a bunch of entries, take care of them now */ if (remove) { if (new_pve_p != PV_ENTRY_NULL) { @@ -7036,11 +7518,30 @@ protect_skip_pve: UNLOCK_PVH(pai); + if (flush_range && tlb_flush_needed) { + if (!remove) { + flush_range->ptfr_flush_needed = true; + tlb_flush_needed = FALSE; + } + } + if (tlb_flush_needed) { + sync_tlb_flush(); + } + if (remove && (pvet_p != PV_ENTRY_NULL)) { - pv_list_free(pveh_p, pvet_p, pvh_cnt); + pv_list_free(pveh_p, pvet_p, pvh_cnt, pv_kern_low_water_mark); } } +MARK_AS_PMAP_TEXT static void +pmap_page_protect_options_internal( + ppnum_t ppnum, + vm_prot_t prot, + unsigned int options) +{ + pmap_page_protect_options_with_flush_range(ppnum, prot, options, NULL); +} + void pmap_page_protect_options( ppnum_t ppnum, @@ -7075,14 +7576,36 @@ pmap_page_protect_options( PMAP_TRACE(2, PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_END); } + +#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) +MARK_AS_PMAP_TEXT void +pmap_disable_user_jop_internal(pmap_t pmap) +{ + if (pmap == kernel_pmap) { + panic("%s: called with kernel_pmap\n", __func__); + } + pmap->disable_jop = true; +} + +void +pmap_disable_user_jop(pmap_t pmap) +{ +#if XNU_MONITOR + pmap_disable_user_jop_ppl(pmap); +#else + pmap_disable_user_jop_internal(pmap); +#endif +} +#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */ + /* * Indicates if the pmap layer enforces some additional restrictions on the * given set of protections. */ bool -pmap_has_prot_policy(__unused vm_prot_t prot) +pmap_has_prot_policy(__unused pmap_t pmap, __unused bool translated_allow_execute, __unused vm_prot_t prot) { - return FALSE; + return false; } /* @@ -7138,7 +7661,7 @@ pmap_protect_options_internal( #if (__ARM_VMSA__ > 7) case VM_PROT_EXECUTE: set_XO = TRUE; - /* fall through */ + OS_FALLTHROUGH; #endif case VM_PROT_READ: case VM_PROT_READ | VM_PROT_EXECUTE: @@ -7170,18 +7693,19 @@ pmap_protect_options_internal( } VALIDATE_PMAP(pmap); - PMAP_LOCK(pmap); + pmap_lock(pmap); + tte_p = pmap_tte(pmap, start); if ((tte_p != (tt_entry_t *) NULL) && (*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { bpte_p = (pt_entry_t *) ttetokv(*tte_p); - bpte_p = &bpte_p[ptenum(start)]; - epte_p = bpte_p + arm_atop(end - start); + bpte_p = &bpte_p[pte_index(pmap, pt_attr, start)]; + epte_p = bpte_p + ((end - start) >> pt_attr_leaf_shift(pt_attr)); pte_p = bpte_p; for (pte_p = bpte_p; pte_p < epte_p; - pte_p += PAGE_SIZE / ARM_PGBYTES) { + pte_p += PAGE_RATIO) { pt_entry_t spte; #if DEVELOPMENT || DEBUG boolean_t force_write = FALSE; @@ -7320,14 +7844,25 @@ pmap_protect_options_internal( pte_set_was_writeable(tmplate, false); #if __APRR_SUPPORTED__ - if (__improbable(is_pte_xprr_protected(spte) && (pte_to_xprr_perm(spte) != XPRR_USER_JIT_PERM) + /** + * Enforce the policy that PPL xPRR mappings can't have their permissions changed after the fact. + * + * Certain userspace applications (e.g., CrashReporter and debuggers) have a need to remap JIT mappings to + * RO/RX, so we explicitly allow that. This doesn't compromise the security of the PPL since this only + * affects userspace mappings, so allow reducing permissions on JIT mappings to RO/RX/XO. This is similar + * for user execute-only mappings. + */ + if (__improbable(is_pte_xprr_protected(pmap, spte) && (pte_to_xprr_perm(spte) != XPRR_USER_JIT_PERM) && (pte_to_xprr_perm(spte) != XPRR_USER_XO_PERM))) { - /* Only test for PPL protection here, User-JIT mappings may be mutated by this function. */ panic("%s: modifying a PPL mapping pte_p=%p pmap=%p prot=%d options=%u, pte=0x%llx, tmplate=0x%llx", __func__, pte_p, pmap, prot, options, (uint64_t)spte, (uint64_t)tmplate); } - if (__improbable(is_pte_xprr_protected(tmplate) && (pte_to_xprr_perm(tmplate) != XPRR_USER_XO_PERM))) { + /** + * Enforce the policy that we can't create a new PPL protected mapping here except for user execute-only + * mappings (which doesn't compromise the security of the PPL since it's userspace-specific). + */ + if (__improbable(is_pte_xprr_protected(pmap, tmplate) && (pte_to_xprr_perm(tmplate) != XPRR_USER_XO_PERM))) { panic("%s: creating an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pte=0x%llx, tmplate=0x%llx", __func__, pte_p, pmap, prot, options, (uint64_t)spte, (uint64_t)tmplate); } @@ -7343,7 +7878,7 @@ pmap_protect_options_internal( PMAP_UPDATE_TLBS(pmap, start, end, need_strong_sync); } - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); } void @@ -7359,7 +7894,7 @@ pmap_protect_options( __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); - if ((b | e) & PAGE_MASK) { + if ((b | e) & pt_attr_leaf_offmask(pt_attr)) { panic("pmap_protect_options() pmap %p start 0x%llx end 0x%llx\n", pmap, (uint64_t)b, (uint64_t)e); } @@ -7452,6 +7987,19 @@ pmap_map_block( return KERN_SUCCESS; } +kern_return_t +pmap_enter_addr( + pmap_t pmap, + vm_map_address_t v, + pmap_paddr_t pa, + vm_prot_t prot, + vm_prot_t fault_type, + unsigned int flags, + boolean_t wired) +{ + return pmap_enter_options_addr(pmap, v, pa, prot, fault_type, flags, wired, 0, NULL); +} + /* * Insert the given physical page (p) at * the specified virtual address (v) in the @@ -7475,35 +8023,37 @@ pmap_enter( unsigned int flags, boolean_t wired) { - return pmap_enter_options(pmap, v, pn, prot, fault_type, flags, wired, 0, NULL); + return pmap_enter_addr(pmap, v, ((pmap_paddr_t)pn) << PAGE_SHIFT, prot, fault_type, flags, wired); } - static inline void pmap_enter_pte(pmap_t pmap, pt_entry_t *pte_p, pt_entry_t pte, vm_map_address_t v) { + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + if (pmap != kernel_pmap && ((pte & ARM_PTE_WIRED) != (*pte_p & ARM_PTE_WIRED))) { - SInt16 *ptd_wiredcnt_ptr = (SInt16 *)&(ptep_get_ptd(pte_p)->ptd_info[ARM_PT_DESC_INDEX(pte_p)].wiredcnt); + SInt16 *ptd_wiredcnt_ptr = (SInt16 *)&(ptep_get_info(pte_p)->wiredcnt); if (pte & ARM_PTE_WIRED) { OSAddAtomic16(1, ptd_wiredcnt_ptr); - pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.wired_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO); OSAddAtomic(1, (SInt32 *) &pmap->stats.wired_count); } else { OSAddAtomic16(-1, ptd_wiredcnt_ptr); - pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE); + pmap_ledger_debit(pmap, task_ledgers.wired_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO); OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count); } } if (*pte_p != ARM_PTE_TYPE_FAULT && !ARM_PTE_IS_COMPRESSED(*pte_p, pte_p)) { WRITE_PTE_STRONG(pte_p, pte); - PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE, false); + PMAP_UPDATE_TLBS(pmap, v, v + (pt_attr_page_size(pt_attr) * PAGE_RATIO), false); } else { WRITE_PTE(pte_p, pte); __builtin_arm_isb(ISB_SY); } - PMAP_TRACE(3, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v), VM_KERNEL_ADDRHIDE(v + PAGE_SIZE), pte); + PMAP_TRACE(4 + pt_attr_leaf_level(pt_attr), PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), + VM_KERNEL_ADDRHIDE(v), VM_KERNEL_ADDRHIDE(v + (pt_attr_page_size(pt_attr) * PAGE_RATIO)), pte); } MARK_AS_PMAP_TEXT static pt_entry_t @@ -7569,7 +8119,7 @@ wimg_to_pte(unsigned int wimg) return pte; } -static boolean_t +static pv_alloc_return_t pmap_enter_pv( pmap_t pmap, pt_entry_t *pte_p, @@ -7582,12 +8132,13 @@ pmap_enter_pv( pv_h = pai_to_pvh(pai); boolean_t first_cpu_mapping; + ASSERT_NOT_HIBERNATING(); ASSERT_PVH_LOCKED(pai); vm_offset_t pvh_flags = pvh_get_flags(pv_h); #if XNU_MONITOR - if (pvh_flags & PVH_FLAG_LOCKDOWN) { + if (__improbable(pvh_flags & PVH_FLAG_LOCKDOWN)) { panic("%d is locked down (%#lx), cannot enter", pai, pvh_flags); } #endif @@ -7644,6 +8195,7 @@ pmap_enter_pv( CLR_ALTACCT_PAGE(pai, PV_ENTRY_NULL); } } else { + pv_alloc_return_t ret; if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) { pt_entry_t *pte1_p; @@ -7652,8 +8204,8 @@ pmap_enter_pv( */ pte1_p = pvh_ptep(pv_h); pvh_set_flags(pv_h, pvh_flags); - if ((*pve_p == PV_ENTRY_NULL) && (!pv_alloc(pmap, pai, pve_p))) { - return FALSE; + if ((*pve_p == PV_ENTRY_NULL) && ((ret = pv_alloc(pmap, pai, pve_p)) != PV_ALLOC_SUCCESS)) { + return ret; } pve_set_ptep(*pve_p, pte1_p); @@ -7678,8 +8230,8 @@ pmap_enter_pv( * add it to the list for this physical page. */ pvh_set_flags(pv_h, pvh_flags); - if ((*pve_p == PV_ENTRY_NULL) && (!pv_alloc(pmap, pai, pve_p))) { - return FALSE; + if ((*pve_p == PV_ENTRY_NULL) && ((ret = pv_alloc(pmap, pai, pve_p)) != PV_ALLOC_SUCCESS)) { + return ret; } pve_set_ptep(*pve_p, pte_p); @@ -7712,21 +8264,21 @@ pmap_enter_pv( pvh_set_flags(pv_h, pvh_flags); - return TRUE; + return PV_ALLOC_SUCCESS; } MARK_AS_PMAP_TEXT static kern_return_t pmap_enter_options_internal( pmap_t pmap, vm_map_address_t v, - ppnum_t pn, + pmap_paddr_t pa, vm_prot_t prot, vm_prot_t fault_type, unsigned int flags, boolean_t wired, unsigned int options) { - pmap_paddr_t pa = ptoa(pn); + ppnum_t pn = (ppnum_t)atop(pa); pt_entry_t pte; pt_entry_t spte; pt_entry_t *pte_p; @@ -7743,13 +8295,22 @@ pmap_enter_options_internal( __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); - if ((v) & PAGE_MASK) { + if ((v) & pt_attr_leaf_offmask(pt_attr)) { panic("pmap_enter_options() pmap %p v 0x%llx\n", pmap, (uint64_t)v); } - if ((prot & VM_PROT_EXECUTE) && (prot & VM_PROT_WRITE) && (pmap == kernel_pmap)) { - panic("pmap_enter_options(): WX request on kernel_pmap"); + if ((pa) & pt_attr_leaf_offmask(pt_attr)) { + panic("pmap_enter_options() pmap %p pa 0x%llx\n", + pmap, (uint64_t)pa); + } + + if ((prot & VM_PROT_EXECUTE) && (pmap == kernel_pmap)) { +#if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) + extern vm_offset_t ctrr_test_page; + if (__probable(v != ctrr_test_page)) +#endif + panic("pmap_enter_options(): attempt to add executable mapping to kernel_pmap"); } #if DEVELOPMENT || DEBUG @@ -7757,7 +8318,9 @@ pmap_enter_options_internal( #else if ((prot & VM_PROT_EXECUTE)) #endif - { set_NX = FALSE;} else { + { + set_NX = FALSE; + } else { set_NX = TRUE; } @@ -7775,7 +8338,7 @@ pmap_enter_options_internal( was_compressed = FALSE; was_alt_compressed = FALSE; - PMAP_LOCK(pmap); + pmap_lock(pmap); /* * Expand pmap to include this pte. Assume that @@ -7784,19 +8347,19 @@ pmap_enter_options_internal( */ while ((pte_p = pmap_pte(pmap, v)) == PT_ENTRY_NULL) { /* Must unlock to expand the pmap. */ - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); - kr = pmap_expand(pmap, v, options, PMAP_TT_MAX_LEVEL); + kr = pmap_expand(pmap, v, options, pt_attr_leaf_level(pt_attr)); if (kr != KERN_SUCCESS) { return kr; } - PMAP_LOCK(pmap); + pmap_lock(pmap); } if (options & PMAP_OPTIONS_NOENTER) { - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); return KERN_SUCCESS; } @@ -7814,15 +8377,15 @@ Pmap_enter_retry: /* one less "compressed" */ OSAddAtomic64(-1, &pmap->stats.compressed); pmap_ledger_debit(pmap, task_ledgers.internal_compressed, - PAGE_SIZE); + pt_attr_page_size(pt_attr) * PAGE_RATIO); was_compressed = TRUE; if (spte & ARM_PTE_COMPRESSED_ALT) { was_alt_compressed = TRUE; - pmap_ledger_debit(pmap, task_ledgers.alternate_accounting_compressed, PAGE_SIZE); + pmap_ledger_debit(pmap, task_ledgers.alternate_accounting_compressed, pt_attr_page_size(pt_attr) * PAGE_RATIO); } else { /* was part of the footprint */ - pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); + pmap_ledger_debit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO); } /* clear "compressed" marker */ @@ -7838,16 +8401,12 @@ Pmap_enter_retry: } if ((spte != ARM_PTE_TYPE_FAULT) && (pte_to_pa(spte) != pa)) { - pmap_remove_range(pmap, v, pte_p, pte_p + 1, 0); + pmap_remove_range(pmap, v, pte_p, pte_p + PAGE_RATIO, 0); } pte = pa_to_pte(pa) | ARM_PTE_TYPE; - /* Don't bother tracking wiring for kernel PTEs. We use ARM_PTE_WIRED to track - * wired memory statistics for user pmaps, but kernel PTEs are assumed - * to be wired in nearly all cases. For VM layer functionality, the wired - * count in vm_page_t is sufficient. */ - if (wired && pmap != kernel_pmap) { + if (wired) { pte |= ARM_PTE_WIRED; } @@ -7883,9 +8442,9 @@ Pmap_enter_retry: if (!pmap->nested) { pte |= ARM_PTE_NG; } else if ((pmap->nested_region_asid_bitmap) - && (v >= pmap->nested_region_subord_addr) - && (v < (pmap->nested_region_subord_addr + pmap->nested_region_size))) { - unsigned int index = (unsigned int)((v - pmap->nested_region_subord_addr) >> pt_attr_twig_shift(pt_attr)); + && (v >= pmap->nested_region_addr) + && (v < (pmap->nested_region_addr + pmap->nested_region_size))) { + unsigned int index = (unsigned int)((v - pmap->nested_region_addr) >> pt_attr_twig_shift(pt_attr)); if ((pmap->nested_region_asid_bitmap) && testbit(index, (int *)pmap->nested_region_asid_bitmap)) { @@ -7897,15 +8456,15 @@ Pmap_enter_retry: vm_map_address_t nest_vaddr; pt_entry_t *nest_pte_p; - nest_vaddr = v - pmap->nested_region_grand_addr + pmap->nested_region_subord_addr; + nest_vaddr = v - pmap->nested_region_addr + pmap->nested_region_addr; - if ((nest_vaddr >= pmap->nested_region_subord_addr) - && (nest_vaddr < (pmap->nested_region_subord_addr + pmap->nested_region_size)) + if ((nest_vaddr >= pmap->nested_region_addr) + && (nest_vaddr < (pmap->nested_region_addr + pmap->nested_region_size)) && ((nest_pte_p = pmap_pte(pmap->nested_pmap, nest_vaddr)) != PT_ENTRY_NULL) && (*nest_pte_p != ARM_PTE_TYPE_FAULT) && (!ARM_PTE_IS_COMPRESSED(*nest_pte_p, nest_pte_p)) && (((*nest_pte_p) & ARM_PTE_NG) != ARM_PTE_NG)) { - unsigned int index = (unsigned int)((v - pmap->nested_region_subord_addr) >> pt_attr_twig_shift(pt_attr)); + unsigned int index = (unsigned int)((v - pmap->nested_region_addr) >> pt_attr_twig_shift(pt_attr)); if ((pmap->nested_pmap->nested_region_asid_bitmap) && !testbit(index, (int *)pmap->nested_pmap->nested_region_asid_bitmap)) { @@ -7956,8 +8515,9 @@ Pmap_enter_retry: volatile uint16_t *refcnt = NULL; volatile uint16_t *wiredcnt = NULL; if (pmap != kernel_pmap) { - refcnt = &(ptep_get_ptd(pte_p)->ptd_info[ARM_PT_DESC_INDEX(pte_p)].refcnt); - wiredcnt = &(ptep_get_ptd(pte_p)->ptd_info[ARM_PT_DESC_INDEX(pte_p)].wiredcnt); + ptd_info_t *ptd_info = ptep_get_info(pte_p); + refcnt = &ptd_info->refcnt; + wiredcnt = &ptd_info->wiredcnt; /* Bump the wired count to keep the PTE page from being reclaimed. We need this because * we may drop the PVH and pmap locks later in pmap_enter() if we need to allocate * a new PV entry. */ @@ -7998,18 +8558,18 @@ Pmap_enter_loop: #if XNU_MONITOR /* The regular old kernel is not allowed to remap PPL pages. */ - if (pa_test_monitor(pa)) { + if (__improbable(pa_test_monitor(pa))) { panic("%s: page belongs to PPL, " - "pmap=%p, v=0x%llx, pn=%u, prot=0x%x, fault_type=0x%x, flags=0x%x, wired=%u, options=0x%x", + "pmap=%p, v=0x%llx, pa=%p, prot=0x%x, fault_type=0x%x, flags=0x%x, wired=%u, options=0x%x", __FUNCTION__, - pmap, v, pn, prot, fault_type, flags, wired, options); + pmap, v, (void*)pa, prot, fault_type, flags, wired, options); } - if (pvh_get_flags(pai_to_pvh(pai)) & PVH_FLAG_LOCKDOWN) { + if (__improbable(pvh_get_flags(pai_to_pvh(pai)) & PVH_FLAG_LOCKDOWN)) { panic("%s: page locked down, " - "pmap=%p, v=0x%llx, pn=%u, prot=0x%x, fault_type=0x%x, flags=0x%x, wired=%u, options=0x%x", + "pmap=%p, v=0x%llx, pa=%p, prot=0x%x, fault_type=0x%x, flags=0x%x, wired=%u, options=0x%x", __FUNCTION__, - pmap, v, pn, prot, fault_type, flags, wired, options); + pmap, v, (void *)pa, prot, fault_type, flags, wired, options); } #endif @@ -8033,8 +8593,13 @@ Pmap_enter_loop: UNLOCK_PVH(pai); goto Pmap_enter_retry; } - if (!pmap_enter_pv(pmap, pte_p, pai, options, &pve_p, &is_altacct)) { + pv_alloc_return_t pv_status = pmap_enter_pv(pmap, pte_p, pai, options, &pve_p, &is_altacct); + if (pv_status == PV_ALLOC_RETRY) { goto Pmap_enter_loop; + } else if (pv_status == PV_ALLOC_FAIL) { + UNLOCK_PVH(pai); + kr = KERN_RESOURCE_SHORTAGE; + goto Pmap_enter_cleanup; } pmap_enter_pte(pmap, pte_p, pte, v); @@ -8058,14 +8623,14 @@ Pmap_enter_loop: UNLOCK_PVH(pai); if (pmap != kernel_pmap) { - pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.phys_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO); if (is_internal) { /* * Make corresponding adjustments to * phys_footprint statistics. */ - pmap_ledger_credit(pmap, task_ledgers.internal, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.internal, pt_attr_page_size(pt_attr) * PAGE_RATIO); if (is_altacct) { /* * If this page is internal and @@ -8087,9 +8652,9 @@ Pmap_enter_loop: * is 0. That means: don't * touch phys_footprint here. */ - pmap_ledger_credit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.alternate_accounting, pt_attr_page_size(pt_attr) * PAGE_RATIO); } else { - pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO); } } } @@ -8112,7 +8677,7 @@ Pmap_enter_loop: pte |= pmap_get_pt_ops(pmap)->wimg_to_pte(wimg_bits); #if XNU_MONITOR - if (!pmap_ppl_disable && (wimg_bits & PP_ATTR_MONITOR)) { + if ((wimg_bits & PP_ATTR_MONITOR) && !pmap_ppl_disable) { uint64_t xprr_perm = pte_to_xprr_perm(pte); switch (xprr_perm) { case XPRR_KERN_RO_PERM: @@ -8145,30 +8710,28 @@ Pmap_enter_return: #if CONFIG_PGTRACE if (pgtrace_enabled) { // Clone and invalidate original mapping if eligible - for (int i = 0; i < PAGE_RATIO; i++) { - pmap_pgtrace_enter_clone(pmap, v + ARM_PGBYTES * i, 0, 0); - } + pmap_pgtrace_enter_clone(pmap, v + ARM_PGBYTES, 0, 0); } -#endif +#endif /* CONFIG_PGTRACE */ if (pve_p != PV_ENTRY_NULL) { - pv_free(pve_p); + pv_free_entry(pve_p); } if (wiredcnt_updated && (OSAddAtomic16(-1, (volatile int16_t*)wiredcnt) <= 0)) { panic("pmap_enter(): over-unwire of ptdp %p for pte %p\n", ptep_get_ptd(pte_p), pte_p); } - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); return kr; } kern_return_t -pmap_enter_options( +pmap_enter_options_addr( pmap_t pmap, vm_map_address_t v, - ppnum_t pn, + pmap_paddr_t pa, vm_prot_t prot, vm_prot_t fault_type, unsigned int flags, @@ -8178,35 +8741,48 @@ pmap_enter_options( { kern_return_t kr = KERN_FAILURE; + PMAP_TRACE(2, PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v), pn, prot); + VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v), pa, prot); + #if XNU_MONITOR - if (options & PMAP_OPTIONS_NOWAIT) { - /* If NOWAIT was requested, just return the result. */ - kr = pmap_enter_options_ppl(pmap, v, pn, prot, fault_type, flags, wired, options); - } else { - /* - * If NOWAIT was not requested, loop until the enter does not - * fail due to lack of resources. - */ - while ((kr = pmap_enter_options_ppl(pmap, v, pn, prot, fault_type, flags, wired, options | PMAP_OPTIONS_NOWAIT)) == KERN_RESOURCE_SHORTAGE) { - pv_water_mark_check(); - pmap_alloc_page_for_ppl(); + /* + * If NOWAIT was not requested, loop until the enter does not + * fail due to lack of resources. + */ + while ((kr = pmap_enter_options_ppl(pmap, v, pa, prot, fault_type, flags, wired, options | PMAP_OPTIONS_NOWAIT)) == KERN_RESOURCE_SHORTAGE) { + pmap_alloc_page_for_ppl((options & PMAP_OPTIONS_NOWAIT) ? PMAP_PAGES_ALLOCATE_NOWAIT : 0); + if (options & PMAP_OPTIONS_NOWAIT) { + break; } } pmap_ledger_check_balance(pmap); #else - kr = pmap_enter_options_internal(pmap, v, pn, prot, fault_type, flags, wired, options); + kr = pmap_enter_options_internal(pmap, v, pa, prot, fault_type, flags, wired, options); #endif - pv_water_mark_check(); PMAP_TRACE(2, PMAP_CODE(PMAP__ENTER) | DBG_FUNC_END, kr); return kr; } +kern_return_t +pmap_enter_options( + pmap_t pmap, + vm_map_address_t v, + ppnum_t pn, + vm_prot_t prot, + vm_prot_t fault_type, + unsigned int flags, + boolean_t wired, + unsigned int options, + __unused void *arg) +{ + return pmap_enter_options_addr(pmap, v, ((pmap_paddr_t)pn) << PAGE_SHIFT, prot, fault_type, flags, wired, options, arg); +} + /* * Routine: pmap_change_wiring * Function: Change the wiring attribute for a map/virtual-address @@ -8223,16 +8799,12 @@ pmap_change_wiring_internal( pt_entry_t *pte_p; pmap_paddr_t pa; - /* Don't bother tracking wiring for kernel PTEs. We use ARM_PTE_WIRED to track - * wired memory statistics for user pmaps, but kernel PTEs are assumed - * to be wired in nearly all cases. For VM layer functionality, the wired - * count in vm_page_t is sufficient. */ - if (pmap == kernel_pmap) { - return; - } - VALIDATE_USER_PMAP(pmap); + VALIDATE_PMAP(pmap); + + pmap_lock(pmap); + + const pt_attr_t * pt_attr = pmap_get_pt_attr(pmap); - PMAP_LOCK(pmap); pte_p = pmap_pte(pmap, v); assert(pte_p != PT_ENTRY_NULL); pa = pte_to_pa(*pte_p); @@ -8251,22 +8823,25 @@ pmap_change_wiring_internal( pa = new_pa; } - if (wired && !pte_is_wired(*pte_p)) { - pte_set_wired(pte_p, wired); - OSAddAtomic(+1, (SInt32 *) &pmap->stats.wired_count); - pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE); - } else if (!wired && pte_is_wired(*pte_p)) { - PMAP_STATS_ASSERTF(pmap->stats.wired_count >= 1, pmap, "stats.wired_count %d", pmap->stats.wired_count); - pte_set_wired(pte_p, wired); - OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count); - pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE); + if (wired != pte_is_wired(*pte_p)) { + pte_set_wired(pmap, pte_p, wired); + if (pmap != kernel_pmap) { + if (wired) { + OSAddAtomic(+1, (SInt32 *) &pmap->stats.wired_count); + pmap_ledger_credit(pmap, task_ledgers.wired_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO); + } else if (!wired) { + __assert_only int32_t orig_wired = OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count); + PMAP_STATS_ASSERTF(orig_wired > 0, pmap, "stats.wired_count %d", orig_wired); + pmap_ledger_debit(pmap, task_ledgers.wired_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO); + } + } } if (pa_valid(pa)) { UNLOCK_PVH((int)pa_index(pa)); } - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); } void @@ -8284,56 +8859,89 @@ pmap_change_wiring( #endif } -MARK_AS_PMAP_TEXT static ppnum_t -pmap_find_phys_internal( +MARK_AS_PMAP_TEXT static pmap_paddr_t +pmap_find_pa_internal( pmap_t pmap, addr64_t va) { - ppnum_t ppn = 0; + pmap_paddr_t pa = 0; VALIDATE_PMAP(pmap); if (pmap != kernel_pmap) { - PMAP_LOCK(pmap); + pmap_lock_ro(pmap); } - ppn = pmap_vtophys(pmap, va); + pa = pmap_vtophys(pmap, va); if (pmap != kernel_pmap) { - PMAP_UNLOCK(pmap); + pmap_unlock_ro(pmap); } - return ppn; + return pa; } -ppnum_t -pmap_find_phys( - pmap_t pmap, - addr64_t va) +pmap_paddr_t +pmap_find_pa_nofault(pmap_t pmap, addr64_t va) { - pmap_paddr_t pa = 0; + pmap_paddr_t pa = 0; if (pmap == kernel_pmap) { pa = mmu_kvtop(va); } else if ((current_thread()->map) && (pmap == vm_map_pmap(current_thread()->map))) { + /* + * Note that this doesn't account for PAN: mmu_uvtop() may return a valid + * translation even if PAN would prevent kernel access through the translation. + * It's therefore assumed the UVA will be accessed in a PAN-disabled context. + */ pa = mmu_uvtop(va); } + return pa; +} - if (pa) { - return (ppnum_t)(pa >> PAGE_SHIFT); +pmap_paddr_t +pmap_find_pa( + pmap_t pmap, + addr64_t va) +{ + pmap_paddr_t pa = pmap_find_pa_nofault(pmap, va); + + if (pa != 0) { + return pa; } if (not_in_kdp) { #if XNU_MONITOR - return pmap_find_phys_ppl(pmap, va); + return pmap_find_pa_ppl(pmap, va); #else - return pmap_find_phys_internal(pmap, va); + return pmap_find_pa_internal(pmap, va); #endif } else { return pmap_vtophys(pmap, va); } } +ppnum_t +pmap_find_phys_nofault( + pmap_t pmap, + addr64_t va) +{ + ppnum_t ppn; + ppn = atop(pmap_find_pa_nofault(pmap, va)); + return ppn; +} + +ppnum_t +pmap_find_phys( + pmap_t pmap, + addr64_t va) +{ + ppnum_t ppn; + ppn = atop(pmap_find_pa(pmap, va)); + return ppn; +} + + pmap_paddr_t kvtophys( vm_offset_t va) @@ -8352,7 +8960,7 @@ kvtophys( return (pmap_paddr_t)pa; } -ppnum_t +pmap_paddr_t pmap_vtophys( pmap_t pmap, addr64_t va) @@ -8361,127 +8969,75 @@ pmap_vtophys( return 0; } -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) tt_entry_t *tte_p, tte; pt_entry_t *pte_p; - ppnum_t ppn; + pmap_paddr_t pa; tte_p = pmap_tte(pmap, va); if (tte_p == (tt_entry_t *) NULL) { - return (ppnum_t) 0; + return (pmap_paddr_t) 0; } tte = *tte_p; if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { - pte_p = (pt_entry_t *) ttetokv(tte) + ptenum(va); - ppn = (ppnum_t) atop(pte_to_pa(*pte_p) | (va & ARM_PGMASK)); + pte_p = (pt_entry_t *) ttetokv(tte) + pte_index(pmap, pt_attr, va); + pa = pte_to_pa(*pte_p) | (va & ARM_PGMASK); + //LIONEL ppn = (ppnum_t) atop(pte_to_pa(*pte_p) | (va & ARM_PGMASK)); #if DEVELOPMENT || DEBUG - if (ppn != 0 && + if (atop(pa) != 0 && ARM_PTE_IS_COMPRESSED(*pte_p, pte_p)) { panic("pmap_vtophys(%p,0x%llx): compressed pte_p=%p 0x%llx with ppn=0x%x\n", - pmap, va, pte_p, (uint64_t) (*pte_p), ppn); + pmap, va, pte_p, (uint64_t) (*pte_p), atop(pa)); } #endif /* DEVELOPMENT || DEBUG */ } else if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) { if ((tte & ARM_TTE_BLOCK_SUPER) == ARM_TTE_BLOCK_SUPER) { - ppn = (ppnum_t) atop(suptte_to_pa(tte) | (va & ARM_TT_L1_SUPER_OFFMASK)); + pa = suptte_to_pa(tte) | (va & ARM_TT_L1_SUPER_OFFMASK); } else { - ppn = (ppnum_t) atop(sectte_to_pa(tte) | (va & ARM_TT_L1_BLOCK_OFFMASK)); + pa = sectte_to_pa(tte) | (va & ARM_TT_L1_BLOCK_OFFMASK); } } else { - ppn = 0; + pa = 0; } #else - tt_entry_t *ttp; - tt_entry_t tte; - ppnum_t ppn = 0; - - __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); - - /* Level 0 currently unused */ - - /* Get first-level (1GB) entry */ - ttp = pmap_tt1e(pmap, va); - tte = *ttp; - if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) { - return ppn; - } - - tte = ((tt_entry_t*) phystokv(tte & ARM_TTE_TABLE_MASK))[tt2_index(pmap, pt_attr, va)]; - - if ((tte & ARM_TTE_VALID) != (ARM_TTE_VALID)) { - return ppn; - } + tt_entry_t * ttp = NULL; + tt_entry_t * ttep = NULL; + tt_entry_t tte = ARM_TTE_EMPTY; + pmap_paddr_t pa = 0; + unsigned int cur_level; - if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) { - ppn = (ppnum_t) atop((tte & ARM_TTE_BLOCK_L2_MASK) | (va & ARM_TT_L2_OFFMASK)); - return ppn; - } - tte = ((tt_entry_t*) phystokv(tte & ARM_TTE_TABLE_MASK))[tt3_index(pmap, pt_attr, va)]; - ppn = (ppnum_t) atop((tte & ARM_PTE_MASK) | (va & ARM_TT_L3_OFFMASK)); -#endif + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); - return ppn; -} + ttp = pmap->tte; -MARK_AS_PMAP_TEXT static vm_offset_t -pmap_extract_internal( - pmap_t pmap, - vm_map_address_t va) -{ - pmap_paddr_t pa = 0; - ppnum_t ppn = 0; + for (cur_level = pt_attr_root_level(pt_attr); cur_level <= pt_attr_leaf_level(pt_attr); cur_level++) { + ttep = &ttp[ttn_index(pmap, pt_attr, va, cur_level)]; - if (pmap == NULL) { - return 0; - } + tte = *ttep; - VALIDATE_PMAP(pmap); + const uint64_t valid_mask = pt_attr->pta_level_info[cur_level].valid_mask; + const uint64_t type_mask = pt_attr->pta_level_info[cur_level].type_mask; + const uint64_t type_block = pt_attr->pta_level_info[cur_level].type_block; + const uint64_t offmask = pt_attr->pta_level_info[cur_level].offmask; - PMAP_LOCK(pmap); + if ((tte & valid_mask) != valid_mask) { + return (pmap_paddr_t) 0; + } - ppn = pmap_vtophys(pmap, va); + /* This detects both leaf entries and intermediate block mappings. */ + if ((tte & type_mask) == type_block) { + pa = ((tte & ARM_TTE_PA_MASK & ~offmask) | (va & offmask)); + break; + } - if (ppn != 0) { - pa = ptoa(ppn) | ((va) & PAGE_MASK); + ttp = (tt_entry_t*)phystokv(tte & ARM_TTE_TABLE_MASK); } - - PMAP_UNLOCK(pmap); +#endif return pa; } -/* - * Routine: pmap_extract - * Function: - * Extract the physical page address associated - * with the given map/virtual_address pair. - * - */ -vm_offset_t -pmap_extract( - pmap_t pmap, - vm_map_address_t va) -{ - pmap_paddr_t pa = 0; - - if (pmap == kernel_pmap) { - pa = mmu_kvtop(va); - } else if (pmap == vm_map_pmap(current_thread()->map)) { - pa = mmu_uvtop(va); - } - - if (pa) { - return pa; - } - -#if XNU_MONITOR - return pmap_extract_ppl(pmap, va); -#else - return pmap_extract_internal(pmap, va); -#endif -} - /* * pmap_init_pte_page - Initialize a page table page. */ @@ -8491,8 +9047,7 @@ pmap_init_pte_page( pt_entry_t *pte_p, vm_offset_t va, unsigned int ttlevel, - boolean_t alloc_ptd, - boolean_t clear) + boolean_t alloc_ptd) { pt_desc_t *ptdp = NULL; vm_offset_t *pvh; @@ -8506,7 +9061,10 @@ pmap_init_pte_page( * on 4KB hardware, we may already have allocated a page table descriptor for a * bootstrap request, so we check for an existing PTD here. */ - ptdp = ptd_alloc(pmap, true); + ptdp = ptd_alloc(pmap); + if (ptdp == NULL) { + panic("%s: unable to allocate PTD", __func__); + } pvh_update_head_unlocked(pvh, ptdp, PVH_TYPE_PTDP); } else { panic("pmap_init_pte_page(): pte_p %p", pte_p); @@ -8517,12 +9075,9 @@ pmap_init_pte_page( panic("pmap_init_pte_page(): invalid PVH type for pte_p %p", pte_p); } - if (clear) { - bzero(pte_p, ARM_PGBYTES); - // below barrier ensures the page zeroing is visible to PTW before - // it is linked to the PTE of previous level - __builtin_arm_dmb(DMB_ISHST); - } + // below barrier ensures previous updates to the page are visible to PTW before + // it is linked to the PTE of previous level + __builtin_arm_dmb(DMB_ISHST); ptd_init(ptdp, pmap, va, ttlevel, pte_p); } @@ -8574,10 +9129,10 @@ pmap_expand( { tt_entry_t *tte_next_p; - PMAP_LOCK(pmap); + pmap_lock(pmap); pa = 0; if (pmap_pte(pmap, v) != PT_ENTRY_NULL) { - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); return KERN_SUCCESS; } tte_p = &pmap->tte[ttenum(v & ~ARM_TT_L1_PT_OFFMASK)]; @@ -8593,12 +9148,12 @@ pmap_expand( tte_p = &pmap->tte[ttenum(v)]; *tte_p = pa_to_tte(pa) | (((v >> ARM_TT_L1_SHIFT) & 0x3) << 10) | ARM_TTE_TYPE_TABLE; FLUSH_PTE(tte_p); - PMAP_TRACE(3, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v & ~ARM_TT_L1_OFFMASK), + PMAP_TRACE(5, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v & ~ARM_TT_L1_OFFMASK), VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE), *tte_p); - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); return KERN_SUCCESS; } - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); } v = v & ~ARM_TT_L1_PT_OFFMASK; @@ -8614,19 +9169,19 @@ pmap_expand( VM_PAGE_WAIT(); } - PMAP_LOCK(pmap); + pmap_lock(pmap); /* * See if someone else expanded us first */ if (pmap_pte(pmap, v) == PT_ENTRY_NULL) { tt_entry_t *tte_next_p; - pmap_init_pte_page(pmap, (pt_entry_t *) tt_p, v, PMAP_TT_L2_LEVEL, FALSE, TRUE); + pmap_init_pte_page(pmap, (pt_entry_t *) tt_p, v, PMAP_TT_L2_LEVEL, FALSE); pa = kvtophys((vm_offset_t)tt_p); tte_p = &pmap->tte[ttenum(v)]; for (i = 0, tte_next_p = tte_p; i < 4; i++) { *tte_next_p = pa_to_tte(pa) | ARM_TTE_TYPE_TABLE; - PMAP_TRACE(3, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_PT_OFFMASK) + (i * ARM_TT_L1_SIZE)), + PMAP_TRACE(5, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_PT_OFFMASK) + (i * ARM_TT_L1_SIZE)), VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_PT_OFFMASK) + ((i + 1) * ARM_TT_L1_SIZE)), *tte_p); tte_next_p++; pa = pa + 0x400; @@ -8636,7 +9191,7 @@ pmap_expand( pa = 0x0ULL; tt_p = (tt_entry_t *)NULL; } - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); if (tt_p != (tt_entry_t *)NULL) { pmap_tt_deallocate(pmap, tt_p, PMAP_TT_L2_LEVEL); tt_p = (tt_entry_t *)NULL; @@ -8653,10 +9208,10 @@ pmap_expand( tt_p = (tt_entry_t *)NULL; for (; ttlevel < level; ttlevel++) { - PMAP_LOCK(pmap); + pmap_lock_ro(pmap); if (pmap_ttne(pmap, ttlevel + 1, v) == PT_ENTRY_NULL) { - PMAP_UNLOCK(pmap); + pmap_unlock_ro(pmap); while (pmap_tt_allocate(pmap, &tt_p, ttlevel + 1, ((options & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)) != KERN_SUCCESS) { if (options & PMAP_OPTIONS_NOWAIT) { return KERN_RESOURCE_SHORTAGE; @@ -8670,21 +9225,22 @@ pmap_expand( VM_PAGE_WAIT(); #endif } - PMAP_LOCK(pmap); + pmap_lock(pmap); if ((pmap_ttne(pmap, ttlevel + 1, v) == PT_ENTRY_NULL)) { - pmap_init_pte_page(pmap, (pt_entry_t *) tt_p, v, ttlevel + 1, FALSE, TRUE); + pmap_init_pte_page(pmap, (pt_entry_t *) tt_p, v, ttlevel + 1, FALSE); pa = kvtophys((vm_offset_t)tt_p); tte_p = pmap_ttne(pmap, ttlevel, v); *tte_p = (pa & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID; - PMAP_TRACE(ttlevel + 1, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v & ~pt_attr_ln_offmask(pt_attr, ttlevel)), + PMAP_TRACE(4 + ttlevel, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v & ~pt_attr_ln_offmask(pt_attr, ttlevel)), VM_KERNEL_ADDRHIDE((v & ~pt_attr_ln_offmask(pt_attr, ttlevel)) + pt_attr_ln_size(pt_attr, ttlevel)), *tte_p); pa = 0x0ULL; tt_p = (tt_entry_t *)NULL; } + pmap_unlock(pmap); + } else { + pmap_unlock_ro(pmap); } - PMAP_UNLOCK(pmap); - if (tt_p != (tt_entry_t *)NULL) { pmap_tt_deallocate(pmap, tt_p, ttlevel + 1); tt_p = (tt_entry_t *)NULL; @@ -8712,11 +9268,11 @@ pmap_collect(pmap_t pmap) } #if 0 - PMAP_LOCK(pmap); + pmap_lock(pmap); if ((pmap->nested == FALSE) && (pmap != kernel_pmap)) { /* TODO: Scan for vm page assigned to top level page tables with no reference */ } - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); #endif return; @@ -8855,7 +9411,7 @@ pmap_sync_page_attributes_phys( boolean_t coredumpok( vm_map_t map, - vm_offset_t va) + mach_vm_offset_t va) { pt_entry_t *pte_p; pt_entry_t spte; @@ -8912,11 +9468,12 @@ mapping_set_ref( * attributes alone. */ MARK_AS_PMAP_TEXT static void -phys_attribute_clear_internal( +phys_attribute_clear_with_flush_range( ppnum_t pn, unsigned int bits, int options, - void *arg) + void *arg, + pmap_tlb_flush_range_t *flush_range) { pmap_paddr_t pa = ptoa(pn); vm_prot_t allow_mode = VM_PROT_ALL; @@ -8924,18 +9481,19 @@ phys_attribute_clear_internal( #if XNU_MONITOR if (bits & PP_ATTR_PPL_OWNED_BITS) { panic("%s: illegal request, " - "pn=%u, bits=%#x, options=%#x, arg=%p", + "pn=%u, bits=%#x, options=%#x, arg=%p, flush_range=%p", __FUNCTION__, - pn, bits, options, arg); + pn, bits, options, arg, flush_range); } #endif if ((bits & PP_ATTR_MODIFIED) && (options & PMAP_OPTIONS_NOFLUSH) && - (arg == NULL)) { - panic("phys_attribute_clear(0x%x,0x%x,0x%x,%p): " + (arg == NULL) && + (flush_range == NULL)) { + panic("phys_attribute_clear(0x%x,0x%x,0x%x,%p,%p): " "should not clear 'modified' without flushing TLBs\n", - pn, bits, options, arg); + pn, bits, options, arg, flush_range); } assert(pn != vm_page_fictitious_addr); @@ -8943,7 +9501,7 @@ phys_attribute_clear_internal( if (options & PMAP_OPTIONS_CLEAR_WRITE) { assert(bits == PP_ATTR_MODIFIED); - pmap_page_protect_options_internal(pn, (VM_PROT_ALL & ~VM_PROT_WRITE), 0); + pmap_page_protect_options_with_flush_range(pn, (VM_PROT_ALL & ~VM_PROT_WRITE), 0, flush_range); /* * We short circuit this case; it should not need to * invoke arm_force_fast_fault, so just clear the modified bit. @@ -8971,12 +9529,124 @@ phys_attribute_clear_internal( return; } - if (arm_force_fast_fault_internal(pn, allow_mode, options)) { + if (arm_force_fast_fault_with_flush_range(pn, allow_mode, options, flush_range)) { pa_clear_bits(pa, bits); } - return; +} + +MARK_AS_PMAP_TEXT static void +phys_attribute_clear_internal( + ppnum_t pn, + unsigned int bits, + int options, + void *arg) +{ + phys_attribute_clear_with_flush_range(pn, bits, options, arg, NULL); +} + +#if __ARM_RANGE_TLBI__ +MARK_AS_PMAP_TEXT static void +phys_attribute_clear_twig_internal( + pmap_t pmap, + vm_map_address_t start, + vm_map_address_t end, + unsigned int bits, + unsigned int options, + pmap_tlb_flush_range_t *flush_range) +{ + pmap_assert_locked_r(pmap); + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + assert(end >= start); + assert((end - start) <= pt_attr_twig_size(pt_attr)); + pt_entry_t *pte_p, *start_pte_p, *end_pte_p, *curr_pte_p; + tt_entry_t *tte_p; + tte_p = pmap_tte(pmap, start); + + if (tte_p == (tt_entry_t *) NULL) { + return; + } + + if ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { + pte_p = (pt_entry_t *) ttetokv(*tte_p); + + start_pte_p = &pte_p[pte_index(pmap, pt_attr, start)]; + end_pte_p = start_pte_p + ((end - start) >> pt_attr_leaf_shift(pt_attr)); + assert(end_pte_p >= start_pte_p); + for (curr_pte_p = start_pte_p; curr_pte_p < end_pte_p; curr_pte_p++) { + pmap_paddr_t pa = pte_to_pa(*curr_pte_p); + if (pa_valid(pa)) { + ppnum_t pn = (ppnum_t) atop(pa); + phys_attribute_clear_with_flush_range(pn, bits, options, NULL, flush_range); + } + } + } +} + +MARK_AS_PMAP_TEXT static void +phys_attribute_clear_range_internal( + pmap_t pmap, + vm_map_address_t start, + vm_map_address_t end, + unsigned int bits, + unsigned int options) +{ + if (__improbable(end < start)) { + panic("%s: invalid address range %p, %p", __func__, (void*)start, (void*)end); + } + VALIDATE_PMAP(pmap); + + vm_map_address_t va = start; + pmap_tlb_flush_range_t flush_range = { + .ptfr_pmap = pmap, + .ptfr_start = start, + .ptfr_end = end, + .ptfr_flush_needed = false + }; + + pmap_lock_ro(pmap); + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + + while (va < end) { + vm_map_address_t curr_end; + + curr_end = ((va + pt_attr_twig_size(pt_attr)) & ~pt_attr_twig_offmask(pt_attr)); + if (curr_end > end) { + curr_end = end; + } + + phys_attribute_clear_twig_internal(pmap, va, curr_end, bits, options, &flush_range); + va = curr_end; + } + pmap_unlock_ro(pmap); + if (flush_range.ptfr_flush_needed) { + pmap_get_pt_ops(pmap)->flush_tlb_region_async( + flush_range.ptfr_start, + flush_range.ptfr_end - flush_range.ptfr_start, + flush_range.ptfr_pmap); + sync_tlb_flush(); + } } +static void +phys_attribute_clear_range( + pmap_t pmap, + vm_map_address_t start, + vm_map_address_t end, + unsigned int bits, + unsigned int options) +{ + PMAP_TRACE(3, PMAP_CODE(PMAP__ATTRIBUTE_CLEAR_RANGE) | DBG_FUNC_START, bits); + +#if XNU_MONITOR + phys_attribute_clear_range_ppl(pmap, start, end, bits, options); +#else + phys_attribute_clear_range_internal(pmap, start, end, bits, options); +#endif + + PMAP_TRACE(3, PMAP_CODE(PMAP__ATTRIBUTE_CLEAR_RANGE) | DBG_FUNC_END); +} +#endif /* __ARM_RANGE_TLBI__ */ + static void phys_attribute_clear( ppnum_t pn, @@ -9023,7 +9693,7 @@ phys_attribute_set_internal( } #endif - pa_set_bits(pa, bits); + pa_set_bits(pa, (uint16_t)bits); return; } @@ -9139,6 +9809,13 @@ pmap_get_refmod( | ((phys_attribute_test(pn, PP_ATTR_REFERENCED)) ? VM_MEM_REFERENCED : 0); } +static inline unsigned int +pmap_clear_refmod_mask_to_modified_bits(const unsigned int mask) +{ + return ((mask & VM_MEM_MODIFIED) ? PP_ATTR_MODIFIED : 0) | + ((mask & VM_MEM_REFERENCED) ? PP_ATTR_REFERENCED : 0); +} + /* * pmap_clear_refmod(phys, mask) * clears the referenced and modified bits as specified by the mask @@ -9153,11 +9830,47 @@ pmap_clear_refmod_options( { unsigned int bits; - bits = ((mask & VM_MEM_MODIFIED) ? PP_ATTR_MODIFIED : 0) | - ((mask & VM_MEM_REFERENCED) ? PP_ATTR_REFERENCED : 0); + bits = pmap_clear_refmod_mask_to_modified_bits(mask); phys_attribute_clear(pn, bits, options, arg); } +/* + * Perform pmap_clear_refmod_options on a virtual address range. + * The operation will be performed in bulk & tlb flushes will be coalesced + * if possible. + * + * Returns true if the operation is supported on this platform. + * If this function returns false, the operation is not supported and + * nothing has been modified in the pmap. + */ +bool +pmap_clear_refmod_range_options( + pmap_t pmap __unused, + vm_map_address_t start __unused, + vm_map_address_t end __unused, + unsigned int mask __unused, + unsigned int options __unused) +{ +#if __ARM_RANGE_TLBI__ + unsigned int bits; + bits = pmap_clear_refmod_mask_to_modified_bits(mask); + phys_attribute_clear_range(pmap, start, end, bits, options); + return true; +#else /* __ARM_RANGE_TLBI__ */ +#pragma unused(pmap, start, end, mask, options) + /* + * This operation allows the VM to bulk modify refmod bits on a virtually + * contiguous range of addresses. This is large performance improvement on + * platforms that support ranged tlbi instructions. But on older platforms, + * we can only flush per-page or the entire asid. So we currently + * only support this operation on platforms that support ranged tlbi. + * instructions. On other platforms, we require that + * the VM modify the bits on a per-page basis. + */ + return false; +#endif /* __ARM_RANGE_TLBI__ */ +} + void pmap_clear_refmod( ppnum_t pn, @@ -9354,30 +10067,13 @@ pmap_switch_user_ttb_internal( if (ttbr0_val != ttbr1_val) { panic("Misaligned ttbr0 %08X\n", ttbr0_val); } - } -#endif - if (pmap->tte_index_max == NTTES) { - /* Setting TTBCR.N for TTBR0 TTBR1 boundary at 0x40000000 */ - __asm__ volatile ("mcr p15,0,%0,c2,c0,2" : : "r"(2)); - __builtin_arm_isb(ISB_SY); -#if !__ARM_USER_PROTECT__ - set_mmu_ttb(pmap->ttep); -#endif - } else { -#if !__ARM_USER_PROTECT__ - set_mmu_ttb(pmap->ttep); -#endif - /* Setting TTBCR.N for TTBR0 TTBR1 boundary at 0x80000000 */ - __asm__ volatile ("mcr p15,0,%0,c2,c0,2" : : "r"(1)); - __builtin_arm_isb(ISB_SY); -#if MACH_ASSERT && __ARM_USER_PROTECT__ if (pmap->ttep & 0x1000) { panic("Misaligned ttbr0 %08X\n", pmap->ttep); } -#endif } - +#endif #if !__ARM_USER_PROTECT__ + set_mmu_ttb(pmap->ttep); set_context_id(pmap->hw_asid); #endif @@ -9385,27 +10081,39 @@ pmap_switch_user_ttb_internal( if (pmap != kernel_pmap) { cpu_data_ptr->cpu_nested_pmap = pmap->nested_pmap; + cpu_data_ptr->cpu_nested_pmap_attr = (cpu_data_ptr->cpu_nested_pmap == NULL) ? + NULL : pmap_get_pt_attr(cpu_data_ptr->cpu_nested_pmap); + cpu_data_ptr->cpu_nested_region_addr = pmap->nested_region_addr; + cpu_data_ptr->cpu_nested_region_size = pmap->nested_region_size; } - if (pmap == kernel_pmap) { - pmap_clear_user_ttb_internal(); - } else { + +#if __ARM_MIXED_PAGE_SIZE__ + if ((pmap != kernel_pmap) && (pmap_get_pt_attr(pmap)->pta_tcr_value != get_tcr())) { + set_tcr(pmap_get_pt_attr(pmap)->pta_tcr_value); + } +#endif /* __ARM_MIXED_PAGE_SIZE__ */ + + if (pmap != kernel_pmap) { set_mmu_ttb((pmap->ttep & TTBR_BADDR_MASK) | (((uint64_t)pmap->hw_asid) << TTBR_ASID_SHIFT)); + } else if (!pmap_user_ttb_is_clear()) { + pmap_clear_user_ttb_internal(); } #if defined(HAS_APPLE_PAC) && (__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) - if (!(BootArgs->bootFlags & kBootFlagsDisableJOP) && !(BootArgs->bootFlags & kBootFlagsDisableUserJOP)) { + if (!arm_user_jop_disabled()) { uint64_t sctlr = __builtin_arm_rsr64("SCTLR_EL1"); bool jop_enabled = sctlr & SCTLR_JOP_KEYS_ENABLED; if (!jop_enabled && !pmap->disable_jop) { // turn on JOP sctlr |= SCTLR_JOP_KEYS_ENABLED; __builtin_arm_wsr64("SCTLR_EL1", sctlr); - // no ISB necessary because this won't take effect until eret returns to EL0 + arm_context_switch_requires_sync(); } else if (jop_enabled && pmap->disable_jop) { // turn off JOP sctlr &= ~SCTLR_JOP_KEYS_ENABLED; __builtin_arm_wsr64("SCTLR_EL1", sctlr); + arm_context_switch_requires_sync(); } } #endif /* HAS_APPLE_PAC && (__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */ @@ -9438,26 +10146,21 @@ pmap_clear_user_ttb_internal(void) void pmap_clear_user_ttb(void) { + PMAP_TRACE(3, PMAP_CODE(PMAP__CLEAR_USER_TTB) | DBG_FUNC_START, NULL, 0, 0); #if XNU_MONITOR pmap_clear_user_ttb_ppl(); #else pmap_clear_user_ttb_internal(); #endif + PMAP_TRACE(3, PMAP_CODE(PMAP__CLEAR_USER_TTB) | DBG_FUNC_END); } -/* - * Routine: arm_force_fast_fault - * - * Function: - * Force all mappings for this page to fault according - * to the access modes allowed, so we can gather ref/modify - * bits again. - */ MARK_AS_PMAP_TEXT static boolean_t -arm_force_fast_fault_internal( +arm_force_fast_fault_with_flush_range( ppnum_t ppnum, vm_prot_t allow_mode, - int options) + int options, + pmap_tlb_flush_range_t *flush_range) { pmap_paddr_t phys = ptoa(ppnum); pv_entry_t *pve_p; @@ -9469,6 +10172,9 @@ arm_force_fast_fault_internal( boolean_t tlb_flush_needed = FALSE; boolean_t ref_fault; boolean_t mod_fault; + boolean_t clear_write_fault = FALSE; + boolean_t ref_aliases_mod = FALSE; + bool mustsynch = ((options & PMAP_OPTIONS_FF_LOCKED) == 0); assert(ppnum != vm_page_fictitious_addr); @@ -9480,7 +10186,9 @@ arm_force_fast_fault_internal( ref_fault = FALSE; mod_fault = FALSE; pai = (int)pa_index(phys); - LOCK_PVH(pai); + if (__probable(mustsynch)) { + LOCK_PVH(pai); + } pv_h = pai_to_pvh(pai); pte_p = PT_ENTRY_NULL; @@ -9521,13 +10229,62 @@ arm_force_fast_fault_internal( } pmap = ptep_get_pmap(pte_p); + const pt_attr_t * pt_attr = pmap_get_pt_attr(pmap); va = ptep_get_va(pte_p); - assert(va >= pmap->min && va < pmap->max); + assert(va >= pmap->min && va < pmap->max); + + /* update pmap stats and ledgers */ + if (IS_ALTACCT_PAGE(pai, pve_p)) { + /* + * We do not track "reusable" status for + * "alternate accounting" mappings. + */ + } else if ((options & PMAP_OPTIONS_CLEAR_REUSABLE) && + is_reusable && + is_internal && + pmap != kernel_pmap) { + /* one less "reusable" */ + __assert_only int32_t orig_reusable = OSAddAtomic(-1, &pmap->stats.reusable); + PMAP_STATS_ASSERTF(orig_reusable > 0, pmap, "stats.reusable %d", orig_reusable); + /* one more "internal" */ + __assert_only int32_t orig_internal = OSAddAtomic(+1, &pmap->stats.internal); + PMAP_STATS_PEAK(pmap->stats.internal); + PMAP_STATS_ASSERTF(orig_internal >= 0, pmap, "stats.internal %d", orig_internal); + pmap_ledger_credit(pmap, task_ledgers.internal, pt_attr_page_size(pt_attr) * PAGE_RATIO); + assert(!IS_ALTACCT_PAGE(pai, pve_p)); + assert(IS_INTERNAL_PAGE(pai)); + pmap_ledger_credit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO); + + /* + * Since the page is being marked non-reusable, we assume that it will be + * modified soon. Avoid the cost of another trap to handle the fast + * fault when we next write to this page. + */ + clear_write_fault = TRUE; + } else if ((options & PMAP_OPTIONS_SET_REUSABLE) && + !is_reusable && + is_internal && + pmap != kernel_pmap) { + /* one more "reusable" */ + __assert_only int32_t orig_reusable = OSAddAtomic(+1, &pmap->stats.reusable); + PMAP_STATS_PEAK(pmap->stats.reusable); + PMAP_STATS_ASSERTF(orig_reusable >= 0, pmap, "stats.reusable %d", orig_reusable); + /* one less "internal" */ + __assert_only int32_t orig_internal = OSAddAtomic(-1, &pmap->stats.internal); + PMAP_STATS_ASSERTF(orig_internal > 0, pmap, "stats.internal %d", orig_internal); + pmap_ledger_debit(pmap, task_ledgers.internal, pt_attr_page_size(pt_attr) * PAGE_RATIO); + assert(!IS_ALTACCT_PAGE(pai, pve_p)); + assert(IS_INTERNAL_PAGE(pai)); + pmap_ledger_debit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO); + } + + bool wiredskip = pte_is_wired(*pte_p) && + ((options & PMAP_OPTIONS_FF_WIRED) == 0); - if (pte_is_wired(*pte_p) || pmap == kernel_pmap) { + if (wiredskip) { result = FALSE; - break; + goto fff_skip_pve; } spte = *pte_p; @@ -9550,8 +10307,8 @@ arm_force_fast_fault_internal( mod_fault = TRUE; } } else { - if ((tmplate & ARM_PTE_APMASK) == ARM_PTE_AP(AP_RWRW)) { - tmplate = ((tmplate & ~ARM_PTE_APMASK) | pt_attr_leaf_ro(pmap_get_pt_attr(pmap))); + if ((tmplate & ARM_PTE_APMASK) == pt_attr_leaf_rw(pt_attr)) { + tmplate = ((tmplate & ~ARM_PTE_APMASK) | pt_attr_leaf_ro(pt_attr)); pte_set_was_writeable(tmplate, true); update_pte = TRUE; mod_fault = TRUE; @@ -9560,7 +10317,7 @@ arm_force_fast_fault_internal( } #if MACH_ASSERT && XNU_MONITOR - if (is_pte_xprr_protected(spte)) { + if (is_pte_xprr_protected(pmap, spte)) { if (pte_to_xprr_perm(spte) != pte_to_xprr_perm(tmplate)) { panic("%s: attempted to mutate an xPRR mapping pte_p=%p, pmap=%p, pv_h=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx, " "ppnum=0x%x, options=0x%x, allow_mode=0x%x", @@ -9570,11 +10327,15 @@ arm_force_fast_fault_internal( } #endif /* MACH_ASSERT && XNU_MONITOR */ - if (update_pte) { + if (result && update_pte) { if (*pte_p != ARM_PTE_TYPE_FAULT && !ARM_PTE_IS_COMPRESSED(*pte_p, pte_p)) { WRITE_PTE_STRONG(pte_p, tmplate); - pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, PAGE_SIZE, pmap); + if (!flush_range || + ((flush_range->ptfr_pmap != pmap) || va >= flush_range->ptfr_end || va < flush_range->ptfr_start)) { + pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, + pt_attr_page_size(pt_attr) * PAGE_RATIO, pmap); + } tlb_flush_needed = TRUE; } else { WRITE_PTE(pte_p, tmplate); @@ -9582,65 +10343,30 @@ arm_force_fast_fault_internal( } } - /* update pmap stats and ledgers */ - if (IS_ALTACCT_PAGE(pai, pve_p)) { - /* - * We do not track "reusable" status for - * "alternate accounting" mappings. - */ - } else if ((options & PMAP_OPTIONS_CLEAR_REUSABLE) && - is_reusable && - is_internal && - pmap != kernel_pmap) { - /* one less "reusable" */ - PMAP_STATS_ASSERTF(pmap->stats.reusable > 0, pmap, "stats.reusable %d", pmap->stats.reusable); - OSAddAtomic(-1, &pmap->stats.reusable); - /* one more "internal" */ - OSAddAtomic(+1, &pmap->stats.internal); - PMAP_STATS_PEAK(pmap->stats.internal); - PMAP_STATS_ASSERTF(pmap->stats.internal > 0, pmap, "stats.internal %d", pmap->stats.internal); - pmap_ledger_credit(pmap, task_ledgers.internal, machine_ptob(1)); - assert(!IS_ALTACCT_PAGE(pai, pve_p)); - assert(IS_INTERNAL_PAGE(pai)); - pmap_ledger_credit(pmap, task_ledgers.phys_footprint, machine_ptob(1)); - - /* - * Avoid the cost of another trap to handle the fast - * fault when we next write to this page: let's just - * handle that now since we already have all the - * necessary information. - */ - { - arm_clear_fast_fault(ppnum, VM_PROT_WRITE); - } - } else if ((options & PMAP_OPTIONS_SET_REUSABLE) && - !is_reusable && - is_internal && - pmap != kernel_pmap) { - /* one more "reusable" */ - OSAddAtomic(+1, &pmap->stats.reusable); - PMAP_STATS_PEAK(pmap->stats.reusable); - PMAP_STATS_ASSERTF(pmap->stats.reusable > 0, pmap, "stats.reusable %d", pmap->stats.reusable); - /* one less "internal" */ - PMAP_STATS_ASSERTF(pmap->stats.internal > 0, pmap, "stats.internal %d", pmap->stats.internal); - OSAddAtomic(-1, &pmap->stats.internal); - pmap_ledger_debit(pmap, task_ledgers.internal, machine_ptob(1)); - assert(!IS_ALTACCT_PAGE(pai, pve_p)); - assert(IS_INTERNAL_PAGE(pai)); - pmap_ledger_debit(pmap, task_ledgers.phys_footprint, machine_ptob(1)); - } - -#ifdef PVH_FLAG_IOMMU fff_skip_pve: -#endif pte_p = PT_ENTRY_NULL; if (pve_p != PV_ENTRY_NULL) { pve_p = PVE_NEXT_PTR(pve_next(pve_p)); } } + /* + * If we are using the same approach for ref and mod + * faults on this PTE, do not clear the write fault; + * this would cause both ref and mod to be set on the + * page again, and prevent us from taking ANY read/write + * fault on the mapping. + */ + if (clear_write_fault && !ref_aliases_mod) { + arm_clear_fast_fault(ppnum, VM_PROT_WRITE); + } if (tlb_flush_needed) { - sync_tlb_flush(); + if (flush_range) { + /* Delayed flush. Signal to the caller that the flush is needed. */ + flush_range->ptfr_flush_needed = true; + } else { + sync_tlb_flush(); + } } /* update global "reusable" status for this page */ @@ -9660,11 +10386,33 @@ fff_skip_pve: if (ref_fault) { SET_REFFAULT_PAGE(pai); } - - UNLOCK_PVH(pai); + if (__probable(mustsynch)) { + UNLOCK_PVH(pai); + } return result; } +MARK_AS_PMAP_TEXT static boolean_t +arm_force_fast_fault_internal( + ppnum_t ppnum, + vm_prot_t allow_mode, + int options) +{ + if (__improbable((options & PMAP_OPTIONS_FF_LOCKED) != 0)) { + panic("arm_force_fast_fault(0x%x, 0x%x, 0x%x): invalid options", ppnum, allow_mode, options); + } + return arm_force_fast_fault_with_flush_range(ppnum, allow_mode, options, NULL); +} + +/* + * Routine: arm_force_fast_fault + * + * Function: + * Force all mappings for this page to fault according + * to the access modes allowed, so we can gather ref/modify + * bits again. + */ + boolean_t arm_force_fast_fault( ppnum_t ppnum, @@ -9694,7 +10442,7 @@ arm_force_fast_fault( * Clear pending force fault for all mappings for this page based on * the observed fault type, update ref/modify bits. */ -boolean_t +MARK_AS_PMAP_TEXT static boolean_t arm_clear_fast_fault( ppnum_t ppnum, vm_prot_t fault_type) @@ -9778,7 +10526,7 @@ arm_clear_fast_fault( } #if MACH_ASSERT && XNU_MONITOR - if (is_pte_xprr_protected(spte)) { + if (is_pte_xprr_protected(pmap, spte)) { if (pte_to_xprr_perm(spte) != pte_to_xprr_perm(tmplate)) { panic("%s: attempted to mutate an xPRR mapping pte_p=%p, pmap=%p, pv_h=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx, " "ppnum=0x%x, fault_type=0x%x", @@ -9791,7 +10539,8 @@ arm_clear_fast_fault( if (spte != tmplate) { if (spte != ARM_PTE_TYPE_FAULT) { WRITE_PTE_STRONG(pte_p, tmplate); - pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, PAGE_SIZE, pmap); + pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, + pt_attr_page_size(pmap_get_pt_attr(pmap)) * PAGE_RATIO, pmap); tlb_flush_needed = TRUE; } else { WRITE_PTE(pte_p, tmplate); @@ -9843,7 +10592,7 @@ arm_fast_fault_internal( pmap_paddr_t pa; VALIDATE_PMAP(pmap); - PMAP_LOCK(pmap); + pmap_lock(pmap); /* * If the entry doesn't exist, is completely invalid, or is already @@ -9859,12 +10608,12 @@ arm_fast_fault_internal( if ((spte == ARM_PTE_TYPE_FAULT) || ARM_PTE_IS_COMPRESSED(spte, ptep)) { - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); return result; } if (!pa_valid(pa)) { - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); #if XNU_MONITOR if (pmap_cache_attributes((ppnum_t)atop(pa)) & PP_ATTR_MONITOR) { return KERN_PROTECTION_FAILURE; @@ -9888,13 +10637,13 @@ arm_fast_fault_internal( #endif /* !(__APRR_SUPPORTED__*/ } } else { - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); return result; } #if __APRR_SUPPORTED__ /* Check to see if this mapping had APRR restrictions. */ - if (is_pte_xprr_protected(spte)) { + if (is_pte_xprr_protected(pmap, spte)) { /* * We have faulted on an XPRR managed mapping; decide if the access should be * reattempted or if it should cause an exception. Now that all JIT entitled @@ -9944,7 +10693,7 @@ arm_fast_fault_internal( out: #endif /* __APRR_SUPPORTED__*/ UNLOCK_PVH(pai); - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); return result; } @@ -10054,16 +10803,6 @@ pmap_zero_part_page( bzero_phys((addr64_t) (ptoa(pn) + offset), len); } - -/* - * nop in current arm implementation - */ -void -inval_copy_windows( - __unused thread_t t) -{ -} - void pmap_map_globals( void) @@ -10111,21 +10850,21 @@ pmap_map_cpu_windows_copy_internal( vm_offset_t cpu_copywindow_vaddr = 0; bool need_strong_sync = false; -#if XNU_MONITOR || HAS_MILD_DSB +#if XNU_MONITOR unsigned int cacheattr = (!pa_valid(ptoa(pn)) ? pmap_cache_attributes(pn) : 0); need_strong_sync = ((cacheattr & PMAP_IO_RANGE_STRONG_SYNC) != 0); #endif #if XNU_MONITOR #ifdef __ARM_COHERENT_IO__ - if (pa_valid(ptoa(pn)) && !pmap_ppl_disable) { + if (__improbable(pa_valid(ptoa(pn)) && !pmap_ppl_disable)) { panic("%s: attempted to map a managed page, " "pn=%u, prot=0x%x, wimg_bits=0x%x", __FUNCTION__, pn, prot, wimg_bits); } - if (!pmap_ppl_disable && (cacheattr & PP_ATTR_MONITOR)) { - panic("%s: attempt to map PPL-protected I/O address 0x%llx", __func__, (uint64_t)ptoa(pn)); + if (__improbable((cacheattr & PP_ATTR_MONITOR) && (prot != VM_PROT_READ) && !pmap_ppl_disable)) { + panic("%s: attempt to map PPL-protected I/O address 0x%llx as writable", __func__, (uint64_t)ptoa(pn)); } #else /* __ARM_COHERENT_IO__ */ @@ -10216,6 +10955,171 @@ pmap_unmap_cpu_windows_copy( #endif } +#if XNU_MONITOR + +/* + * The HMAC SHA driver needs to be able to operate on physical pages in + * place without copying them out. This function provides an interface + * to run a callback on a given page, making use of a CPU copy window + * if necessary. + * + * This should only be used during the hibernation process since every DRAM page + * will be mapped as VM_WIMG_DEFAULT. This can cause coherency issues if the pages + * were originally mapped as VM_WIMG_IO/RT. In the hibernation case, by the time + * we start copying memory all other agents shouldn't be writing to memory so we + * can ignore these coherency issues. Regardless of this code, if other agents + * were modifying memory during the image creation process, there would be + * issues anyway. + */ +MARK_AS_PMAP_TEXT void +pmap_invoke_with_page( + ppnum_t page_number, + void *ctx, + void (*callback)(void *ctx, ppnum_t page_number, const void *page)) +{ +#if HIBERNATION + /* This function should only be used from within a hibernation context. */ + assert((gIOHibernateState == kIOHibernateStateHibernating) || + (gIOHibernateState == kIOHibernateStateWakingFromHibernate)); + + /* from bcopy_phys_internal */ + vm_offset_t src = ptoa_64(page_number); + vm_offset_t tmp_src; + bool use_copy_window_src = !pmap_valid_address(src); + unsigned int src_index; + if (use_copy_window_src) { + unsigned int wimg_bits_src = pmap_cache_attributes(page_number); + + /** + * Always map DRAM as VM_WIMG_DEFAULT (regardless of whether it's + * kernel-managed) to denote that it's safe to use memcpy on it. + */ + if (is_dram_addr(src)) { + wimg_bits_src = VM_WIMG_DEFAULT; + } + + src_index = pmap_map_cpu_windows_copy_internal(page_number, VM_PROT_READ, wimg_bits_src); + tmp_src = pmap_cpu_windows_copy_addr(pmap_get_cpu_data()->cpu_number, src_index); + } else { + vm_size_t count = PAGE_SIZE; + tmp_src = phystokv_range((pmap_paddr_t)src, &count); + } + + callback(ctx, page_number, (const void *)tmp_src); + + if (use_copy_window_src) { + pmap_unmap_cpu_windows_copy_internal(src_index); + } +#else + #pragma unused(page_number, ctx, callback) +#endif /* HIBERNATION */ +} + +/* + * Loop over every pmap_io_range (I/O ranges marked as owned by + * the PPL in the device tree) and conditionally call callback() on each range + * that needs to be included in the hibernation image. + * + * @param ctx Will be passed as-is into the callback method. Use NULL if no + * context is needed in the callback. + * @param callback Callback function invoked on each range (gated by flag). + */ +MARK_AS_PMAP_TEXT void +pmap_hibernate_invoke(void *ctx, void (*callback)(void *ctx, uint64_t addr, uint64_t len)) +{ + for (unsigned int i = 0; i < num_io_rgns; ++i) { + if (io_attr_table[i].wimg & PMAP_IO_RANGE_NEEDS_HIBERNATING) { + callback(ctx, io_attr_table[i].addr, io_attr_table[i].len); + } + } +} + +/** + * Set the HASHED pv_head_table flag for the passed in physical page if it's a + * PPL-owned page. Otherwise, do nothing. + * + * @param addr Physical address of the page to set the HASHED flag on. + */ +MARK_AS_PMAP_TEXT void +pmap_set_ppl_hashed_flag(const pmap_paddr_t addr) +{ + /* Ignore non-managed kernel memory. */ + if (!pa_valid(addr)) { + return; + } + + const int pai = (int)pa_index(addr); + if (pp_attr_table[pai] & PP_ATTR_MONITOR) { + pv_entry_t **pv_h = pai_to_pvh(pai); + + /* Mark that the PPL-owned page has been hashed into the hibernation image. */ + LOCK_PVH(pai); + pvh_set_flags(pv_h, pvh_get_flags(pv_h) | PVH_FLAG_HASHED); + UNLOCK_PVH(pai); + } +} + +/** + * Loop through every physical page in the system and clear out the HASHED flag + * on every PPL-owned page. That flag is used to keep track of which pages have + * been hashed into the hibernation image during the hibernation entry process. + * + * The HASHED flag needs to be cleared out between hibernation cycles because the + * pv_head_table and pp_attr_table's might have been copied into the hibernation + * image with the HASHED flag set on certain pages. It's important to clear the + * HASHED flag to ensure that the enforcement of all PPL-owned memory being hashed + * into the hibernation image can't be compromised across hibernation cycles. + */ +MARK_AS_PMAP_TEXT void +pmap_clear_ppl_hashed_flag_all(void) +{ + const int last_index = (int)pa_index(vm_last_phys); + pv_entry_t **pv_h = NULL; + + for (int pai = 0; pai < last_index; ++pai) { + pv_h = pai_to_pvh(pai); + + /* Test for PPL-owned pages that have the HASHED flag set in its pv_head_table entry. */ + if ((pvh_get_flags(pv_h) & PVH_FLAG_HASHED) && + (pp_attr_table[pai] & PP_ATTR_MONITOR)) { + LOCK_PVH(pai); + pvh_set_flags(pv_h, pvh_get_flags(pv_h) & ~PVH_FLAG_HASHED); + UNLOCK_PVH(pai); + } + } +} + +/** + * Enforce that all PPL-owned pages were hashed into the hibernation image. The + * ppl_hib driver will call this after all wired pages have been copied into the + * hibernation image. + */ +MARK_AS_PMAP_TEXT void +pmap_check_ppl_hashed_flag_all(void) +{ + const int last_index = (int)pa_index(vm_last_phys); + pv_entry_t **pv_h = NULL; + + for (int pai = 0; pai < last_index; ++pai) { + pv_h = pai_to_pvh(pai); + + /** + * The PMAP stacks are explicitly not saved into the image so skip checking + * the pages that contain the PMAP stacks. + */ + const bool is_pmap_stack = (pai >= (int)pa_index(pmap_stacks_start_pa)) && + (pai < (int)pa_index(pmap_stacks_end_pa)); + + if (!is_pmap_stack && + (pp_attr_table[pai] & PP_ATTR_MONITOR) && + !(pvh_get_flags(pv_h) & PVH_FLAG_HASHED)) { + panic("Found PPL-owned page that was not hashed into the hibernation image: pai %d", pai); + } + } +} + +#endif /* XNU_MONITOR */ + /* * Indicate that a pmap is intended to be used as a nested pmap * within one or more larger address spaces. This must be set @@ -10227,6 +11131,7 @@ pmap_set_nested_internal( { VALIDATE_PMAP(pmap); pmap->nested = TRUE; + pmap_get_pt_ops(pmap)->free_id(pmap); } void @@ -10272,7 +11177,7 @@ pmap_trim_range( pmap, (void*)start, (void*)end); } - nested_region_start = pmap->nested ? pmap->nested_region_subord_addr : pmap->nested_region_subord_addr; + nested_region_start = pmap->nested_region_addr; nested_region_end = nested_region_start + pmap->nested_region_size; if (__improbable((start < nested_region_start) || (end > nested_region_end))) { @@ -10290,7 +11195,7 @@ pmap_trim_range( /* Iterate over the range, trying to remove TTEs. */ for (cur = adjusted_start; (cur < adjusted_end) && (cur >= adjusted_start); cur += pt_attr_twig_size(pt_attr)) { - PMAP_LOCK(pmap); + pmap_lock(pmap); tte_p = pmap_tte(pmap, cur); @@ -10301,7 +11206,7 @@ pmap_trim_range( if ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { pte_p = (pt_entry_t *) ttetokv(*tte_p); - if ((ptep_get_ptd(pte_p)->ptd_info[ARM_PT_DESC_INDEX(pte_p)].refcnt == 0) && + if ((ptep_get_info(pte_p)->refcnt == 0) && (pmap != kernel_pmap)) { if (pmap->nested == TRUE) { /* Deallocate for the nested map. */ @@ -10317,7 +11222,7 @@ pmap_trim_range( } done: - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); } if (modified) { @@ -10326,12 +11231,12 @@ done: #if (__ARM_VMSA__ > 7) /* Remove empty L2 TTs. */ - adjusted_start = ((start + ARM_TT_L1_OFFMASK) & ~ARM_TT_L1_OFFMASK); - adjusted_end = end & ~ARM_TT_L1_OFFMASK; + adjusted_start = ((start + pt_attr_ln_offmask(pt_attr, PMAP_TT_L1_LEVEL)) & ~pt_attr_ln_offmask(pt_attr, PMAP_TT_L1_LEVEL)); + adjusted_end = end & ~pt_attr_ln_offmask(pt_attr, PMAP_TT_L1_LEVEL); - for (cur = adjusted_start; (cur < adjusted_end) && (cur >= adjusted_start); cur += ARM_TT_L1_SIZE) { + for (cur = adjusted_start; (cur < adjusted_end) && (cur >= adjusted_start); cur += pt_attr_ln_size(pt_attr, PMAP_TT_L1_LEVEL)) { /* For each L1 entry in our range... */ - PMAP_LOCK(pmap); + pmap_lock(pmap); bool remove_tt1e = true; tt_entry_t * tt1e_p = pmap_tt1e(pmap, cur); @@ -10341,19 +11246,19 @@ done: tt_entry_t tt1e; if (tt1e_p == NULL) { - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); continue; } tt1e = *tt1e_p; if (tt1e == ARM_TTE_TYPE_FAULT) { - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); continue; } tt2e_start = &((tt_entry_t*) phystokv(tt1e & ARM_TTE_TABLE_MASK))[0]; - tt2e_end = &tt2e_start[TTE_PGENTRIES]; + tt2e_end = &tt2e_start[pt_attr_page_size(pt_attr) / sizeof(*tt2e_start)]; for (tt2e_p = tt2e_start; tt2e_p < tt2e_end; tt2e_p++) { if (*tt2e_p != ARM_TTE_TYPE_FAULT) { @@ -10370,18 +11275,17 @@ done: PMAP_UPDATE_TLBS(pmap, cur, cur + PAGE_SIZE, false); } - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); } #endif /* (__ARM_VMSA__ > 7) */ } /* - * pmap_trim_internal(grand, subord, vstart, nstart, size) + * pmap_trim_internal(grand, subord, vstart, size) * * grand = pmap subord is nested in * subord = nested pmap * vstart = start of the used range in grand - * nstart = start of the used range in nstart * size = size of the used range * * Attempts to trim the shared region page tables down to only cover the given @@ -10392,22 +11296,15 @@ pmap_trim_internal( pmap_t grand, pmap_t subord, addr64_t vstart, - addr64_t nstart, uint64_t size) { - addr64_t vend, nend; + addr64_t vend; addr64_t adjust_offmask; if (__improbable(os_add_overflow(vstart, size, &vend))) { panic("%s: grand addr wraps around, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); - } - - if (__improbable(os_add_overflow(nstart, size, &nend))) { - panic("%s: nested addr wraps around, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, size); } VALIDATE_PMAP(grand); @@ -10415,38 +11312,31 @@ pmap_trim_internal( __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(grand); - PMAP_LOCK(subord); + pmap_lock(subord); - if (!subord->nested) { + if (__improbable(!subord->nested)) { panic("%s: subord is not nestable, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, size); } - if (grand->nested) { + if (__improbable(grand->nested)) { panic("%s: grand is nestable, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, size); } - if (grand->nested_pmap != subord) { + if (__improbable(grand->nested_pmap != subord)) { panic("%s: grand->nested != subord, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, size); } - if (size != 0) { - if ((vstart < grand->nested_region_grand_addr) || (vend > (grand->nested_region_grand_addr + grand->nested_region_size))) { - panic("%s: grand range not in nested region, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); - } - - if ((nstart < grand->nested_region_grand_addr) || (nend > (grand->nested_region_grand_addr + grand->nested_region_size))) { - panic("%s: subord range not in nested region, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); - } + if (__improbable((size != 0) && + ((vstart < grand->nested_region_addr) || (vend > (grand->nested_region_addr + grand->nested_region_size))))) { + panic("%s: grand range not in nested region, " + "grand=%p, subord=%p, vstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, size); } @@ -10455,26 +11345,26 @@ pmap_trim_internal( if (!grand->nested_bounds_set) { /* Inherit the bounds from subord. */ - grand->nested_region_true_start = (subord->nested_region_true_start - grand->nested_region_subord_addr) + grand->nested_region_grand_addr; - grand->nested_region_true_end = (subord->nested_region_true_end - grand->nested_region_subord_addr) + grand->nested_region_grand_addr; + grand->nested_region_true_start = subord->nested_region_true_start; + grand->nested_region_true_end = subord->nested_region_true_end; grand->nested_bounds_set = true; } - PMAP_UNLOCK(subord); + pmap_unlock(subord); return; } if ((!subord->nested_bounds_set) && size) { adjust_offmask = pt_attr_leaf_table_offmask(pt_attr); - subord->nested_region_true_start = nstart; - subord->nested_region_true_end = nend; + subord->nested_region_true_start = vstart; + subord->nested_region_true_end = vend; subord->nested_region_true_start &= ~adjust_offmask; if (__improbable(os_add_overflow(subord->nested_region_true_end, adjust_offmask, &subord->nested_region_true_end))) { panic("%s: padded true end wraps around, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, size); } subord->nested_region_true_end &= ~adjust_offmask; @@ -10483,22 +11373,22 @@ pmap_trim_internal( if (subord->nested_bounds_set) { /* Inherit the bounds from subord. */ - grand->nested_region_true_start = (subord->nested_region_true_start - grand->nested_region_subord_addr) + grand->nested_region_grand_addr; - grand->nested_region_true_end = (subord->nested_region_true_end - grand->nested_region_subord_addr) + grand->nested_region_grand_addr; + grand->nested_region_true_start = subord->nested_region_true_start; + grand->nested_region_true_end = subord->nested_region_true_end; grand->nested_bounds_set = true; /* If we know the bounds, we can trim the pmap. */ grand->nested_has_no_bounds_ref = false; - PMAP_UNLOCK(subord); + pmap_unlock(subord); } else { /* Don't trim if we don't know the bounds. */ - PMAP_UNLOCK(subord); + pmap_unlock(subord); return; } /* Trim grand to only cover the given range. */ - pmap_trim_range(grand, grand->nested_region_grand_addr, grand->nested_region_true_start); - pmap_trim_range(grand, grand->nested_region_true_end, (grand->nested_region_grand_addr + grand->nested_region_size)); + pmap_trim_range(grand, grand->nested_region_addr, grand->nested_region_true_start); + pmap_trim_range(grand, grand->nested_region_true_end, (grand->nested_region_addr + grand->nested_region_size)); /* Try to trim subord. */ pmap_trim_subord(subord); @@ -10509,16 +11399,16 @@ pmap_trim_self(pmap_t pmap) { if (pmap->nested_has_no_bounds_ref && pmap->nested_pmap) { /* If we have a no bounds ref, we need to drop it. */ - PMAP_LOCK(pmap->nested_pmap); + pmap_lock_ro(pmap->nested_pmap); pmap->nested_has_no_bounds_ref = false; boolean_t nested_bounds_set = pmap->nested_pmap->nested_bounds_set; - vm_map_offset_t nested_region_true_start = (pmap->nested_pmap->nested_region_true_start - pmap->nested_region_subord_addr) + pmap->nested_region_grand_addr; - vm_map_offset_t nested_region_true_end = (pmap->nested_pmap->nested_region_true_end - pmap->nested_region_subord_addr) + pmap->nested_region_grand_addr; - PMAP_UNLOCK(pmap->nested_pmap); + vm_map_offset_t nested_region_true_start = pmap->nested_pmap->nested_region_true_start; + vm_map_offset_t nested_region_true_end = pmap->nested_pmap->nested_region_true_end; + pmap_unlock_ro(pmap->nested_pmap); if (nested_bounds_set) { - pmap_trim_range(pmap, pmap->nested_region_grand_addr, nested_region_true_start); - pmap_trim_range(pmap, nested_region_true_end, (pmap->nested_region_grand_addr + pmap->nested_region_size)); + pmap_trim_range(pmap, pmap->nested_region_addr, nested_region_true_start); + pmap_trim_range(pmap, nested_region_true_end, (pmap->nested_region_addr + pmap->nested_region_size)); } /* * Try trimming the nested pmap, in case we had the @@ -10541,7 +11431,7 @@ pmap_trim_subord(pmap_t subord) { bool contract_subord = false; - PMAP_LOCK(subord); + pmap_lock(subord); subord->nested_no_bounds_refcnt--; @@ -10550,11 +11440,11 @@ pmap_trim_subord(pmap_t subord) contract_subord = true; } - PMAP_UNLOCK(subord); + pmap_unlock(subord); if (contract_subord) { - pmap_trim_range(subord, subord->nested_region_subord_addr, subord->nested_region_true_start); - pmap_trim_range(subord, subord->nested_region_true_end, subord->nested_region_subord_addr + subord->nested_region_size); + pmap_trim_range(subord, subord->nested_region_addr, subord->nested_region_true_start); + pmap_trim_range(subord, subord->nested_region_true_end, subord->nested_region_addr + subord->nested_region_size); } } @@ -10563,27 +11453,26 @@ pmap_trim( pmap_t grand, pmap_t subord, addr64_t vstart, - addr64_t nstart, uint64_t size) { #if XNU_MONITOR - pmap_trim_ppl(grand, subord, vstart, nstart, size); + pmap_trim_ppl(grand, subord, vstart, size); pmap_ledger_check_balance(grand); pmap_ledger_check_balance(subord); #else - pmap_trim_internal(grand, subord, vstart, nstart, size); + pmap_trim_internal(grand, subord, vstart, size); #endif } #if HAS_APPLE_PAC && XNU_MONITOR static void * -pmap_sign_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator) +pmap_sign_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key) { void *res = NULL; boolean_t current_intr_state = ml_set_interrupts_enabled(FALSE); - ml_set_kernelkey_enabled(FALSE); + uint64_t saved_jop_state = ml_enable_user_jop_key(jop_key); switch (key) { case ptrauth_key_asia: res = ptrauth_sign_unauthenticated(value, ptrauth_key_asia, discriminator); @@ -10594,7 +11483,7 @@ pmap_sign_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator default: panic("attempt to sign user pointer without process independent key"); } - ml_set_kernelkey_enabled(TRUE); + ml_disable_user_jop_key(jop_key, saved_jop_state); ml_set_interrupts_enabled(current_intr_state); @@ -10602,13 +11491,13 @@ pmap_sign_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator } void * -pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t discriminator) +pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key) { - return pmap_sign_user_ptr_internal(value, key, discriminator); + return pmap_sign_user_ptr_internal(value, key, discriminator, jop_key); } static void * -pmap_auth_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator) +pmap_auth_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key) { if ((key != ptrauth_key_asia) && (key != ptrauth_key_asda)) { panic("attempt to auth user pointer without process independent key"); @@ -10617,9 +11506,9 @@ pmap_auth_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator void *res = NULL; boolean_t current_intr_state = ml_set_interrupts_enabled(FALSE); - ml_set_kernelkey_enabled(FALSE); + uint64_t saved_jop_state = ml_enable_user_jop_key(jop_key); res = ml_auth_ptr_unchecked(value, key, discriminator); - ml_set_kernelkey_enabled(TRUE); + ml_disable_user_jop_key(jop_key, saved_jop_state); ml_set_interrupts_enabled(current_intr_state); @@ -10627,9 +11516,9 @@ pmap_auth_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator } void * -pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t discriminator) +pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key) { - return pmap_auth_user_ptr_internal(value, key, discriminator); + return pmap_auth_user_ptr_internal(value, key, discriminator, jop_key); } #endif /* HAS_APPLE_PAC && XNU_MONITOR */ @@ -10639,7 +11528,6 @@ pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t discriminator) * grand = the pmap that we will nest subord into * subord = the pmap that goes into the grand * vstart = start of range in pmap to be inserted - * nstart = start of range in pmap nested pmap * size = Size of nest area (up to 16TB) * * Inserts a pmap into another. This is used to implement shared segments. @@ -10651,29 +11539,27 @@ pmap_nest_internal( pmap_t grand, pmap_t subord, addr64_t vstart, - addr64_t nstart, uint64_t size) { kern_return_t kr = KERN_FAILURE; - vm_map_offset_t vaddr, nvaddr; + vm_map_offset_t vaddr; tt_entry_t *stte_p; tt_entry_t *gtte_p; unsigned int i; unsigned int num_tte; unsigned int nested_region_asid_bitmap_size; unsigned int* nested_region_asid_bitmap; - int expand_options = 0; + int expand_options = 0; + bool deref_subord = true; + pmap_t __ptrauth_only subord_addr; - addr64_t vend, nend; + addr64_t vend; if (__improbable(os_add_overflow(vstart, size, &vend))) { panic("%s: %p grand addr wraps around: 0x%llx + 0x%llx", __func__, grand, vstart, size); } - if (__improbable(os_add_overflow(nstart, size, &nend))) { - panic("%s: %p nested addr wraps around: 0x%llx + 0x%llx", __func__, subord, nstart, size); - } VALIDATE_PMAP(grand); - VALIDATE_PMAP(subord); + pmap_reference_internal(subord); // This call will also validate subord __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(grand); assert(pmap_get_pt_attr(subord) == pt_attr); @@ -10682,71 +11568,82 @@ pmap_nest_internal( expand_options |= PMAP_TT_ALLOCATE_NOWAIT; #endif - if (((size | vstart | nstart) & (pt_attr_leaf_table_offmask(pt_attr))) != 0x0ULL) { - panic("pmap_nest() pmap %p unaligned nesting request 0x%llx, 0x%llx, 0x%llx\n", grand, vstart, nstart, size); + if (__improbable(((size | vstart) & (pt_attr_leaf_table_offmask(pt_attr))) != 0x0ULL)) { + panic("pmap_nest() pmap %p unaligned nesting request 0x%llx, 0x%llx\n", grand, vstart, size); } - if (!subord->nested) { + if (__improbable(!subord->nested)) { panic("%s: subordinate pmap %p is not nestable", __func__, subord); } - if ((grand->nested_pmap != PMAP_NULL) && (grand->nested_pmap != subord)) { - panic("pmap_nest() pmap %p has a nested pmap\n", grand); - } - if (subord->nested_region_asid_bitmap == NULL) { nested_region_asid_bitmap_size = (unsigned int)(size >> pt_attr_twig_shift(pt_attr)) / (sizeof(unsigned int) * NBBY); #if XNU_MONITOR pmap_paddr_t pa = 0; - if ((nested_region_asid_bitmap_size * sizeof(unsigned int)) > PAGE_SIZE) { + if (__improbable((nested_region_asid_bitmap_size * sizeof(unsigned int)) > PAGE_SIZE)) { panic("%s: nested_region_asid_bitmap_size=%u will not fit in a page, " - "grand=%p, subord=%p, vstart=0x%llx, nstart=0x%llx, size=%llx", - __FUNCTION__, - nested_region_asid_bitmap_size, - grand, subord, vstart, nstart, size); + "grand=%p, subord=%p, vstart=0x%llx, size=%llx", + __FUNCTION__, nested_region_asid_bitmap_size, + grand, subord, vstart, size); } - kr = pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT); + kr = pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT); if (kr != KERN_SUCCESS) { - return kr; + goto nest_cleanup; } assert(pa); nested_region_asid_bitmap = (unsigned int *)phystokv(pa); #else - nested_region_asid_bitmap = kalloc(nested_region_asid_bitmap_size * sizeof(unsigned int)); + nested_region_asid_bitmap = kheap_alloc(KHEAP_DATA_BUFFERS, + nested_region_asid_bitmap_size * sizeof(unsigned int), + Z_WAITOK | Z_ZERO); #endif - bzero(nested_region_asid_bitmap, nested_region_asid_bitmap_size * sizeof(unsigned int)); - PMAP_LOCK(subord); + pmap_lock(subord); if (subord->nested_region_asid_bitmap == NULL) { - subord->nested_region_asid_bitmap = nested_region_asid_bitmap; subord->nested_region_asid_bitmap_size = nested_region_asid_bitmap_size; - subord->nested_region_subord_addr = nstart; + subord->nested_region_addr = vstart; subord->nested_region_size = (mach_vm_offset_t) size; + + /** + * Ensure that the rest of the subord->nested_region_* fields are + * initialized and visible before setting the nested_region_asid_bitmap + * field (which is used as the flag to say that the rest are initialized). + */ + __builtin_arm_dmb(DMB_ISHST); + subord->nested_region_asid_bitmap = nested_region_asid_bitmap; nested_region_asid_bitmap = NULL; } - PMAP_UNLOCK(subord); + pmap_unlock(subord); if (nested_region_asid_bitmap != NULL) { #if XNU_MONITOR pmap_pages_free(kvtophys((vm_offset_t)nested_region_asid_bitmap), PAGE_SIZE); #else - kfree(nested_region_asid_bitmap, nested_region_asid_bitmap_size * sizeof(unsigned int)); + kheap_free(KHEAP_DATA_BUFFERS, nested_region_asid_bitmap, + nested_region_asid_bitmap_size * sizeof(unsigned int)); #endif } } - if ((subord->nested_region_subord_addr + subord->nested_region_size) < nend) { + + /** + * Ensure subsequent reads of the subord->nested_region_* fields don't get + * speculated before their initialization. + */ + __builtin_arm_dmb(DMB_ISHLD); + + if ((subord->nested_region_addr + subord->nested_region_size) < vend) { uint64_t new_size; unsigned int new_nested_region_asid_bitmap_size; unsigned int* new_nested_region_asid_bitmap; nested_region_asid_bitmap = NULL; nested_region_asid_bitmap_size = 0; - new_size = nend - subord->nested_region_subord_addr; + new_size = vend - subord->nested_region_addr; /* We explicitly add 1 to the bitmap allocation size in order to avoid issues with truncation. */ new_nested_region_asid_bitmap_size = (unsigned int)((new_size >> pt_attr_twig_shift(pt_attr)) / (sizeof(unsigned int) * NBBY)) + 1; @@ -10754,30 +11651,31 @@ pmap_nest_internal( #if XNU_MONITOR pmap_paddr_t pa = 0; - if ((new_nested_region_asid_bitmap_size * sizeof(unsigned int)) > PAGE_SIZE) { + if (__improbable((new_nested_region_asid_bitmap_size * sizeof(unsigned int)) > PAGE_SIZE)) { panic("%s: new_nested_region_asid_bitmap_size=%u will not fit in a page, " - "grand=%p, subord=%p, vstart=0x%llx, nstart=0x%llx, size=%llx", - __FUNCTION__, - new_nested_region_asid_bitmap_size, - grand, subord, vstart, nstart, size); + "grand=%p, subord=%p, vstart=0x%llx, new_size=%llx", + __FUNCTION__, new_nested_region_asid_bitmap_size, + grand, subord, vstart, new_size); } - kr = pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT); + kr = pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT); if (kr != KERN_SUCCESS) { - return kr; + goto nest_cleanup; } assert(pa); new_nested_region_asid_bitmap = (unsigned int *)phystokv(pa); #else - new_nested_region_asid_bitmap = kalloc(new_nested_region_asid_bitmap_size * sizeof(unsigned int)); + new_nested_region_asid_bitmap = kheap_alloc(KHEAP_DATA_BUFFERS, + new_nested_region_asid_bitmap_size * sizeof(unsigned int), + Z_WAITOK | Z_ZERO); #endif - PMAP_LOCK(subord); + pmap_lock(subord); if (subord->nested_region_size < new_size) { - bzero(new_nested_region_asid_bitmap, new_nested_region_asid_bitmap_size * sizeof(unsigned int)); - bcopy(subord->nested_region_asid_bitmap, new_nested_region_asid_bitmap, subord->nested_region_asid_bitmap_size); + bcopy(subord->nested_region_asid_bitmap, + new_nested_region_asid_bitmap, subord->nested_region_asid_bitmap_size); nested_region_asid_bitmap_size = subord->nested_region_asid_bitmap_size; nested_region_asid_bitmap = subord->nested_region_asid_bitmap; subord->nested_region_asid_bitmap = new_nested_region_asid_bitmap; @@ -10785,24 +11683,41 @@ pmap_nest_internal( subord->nested_region_size = new_size; new_nested_region_asid_bitmap = NULL; } - PMAP_UNLOCK(subord); - if (nested_region_asid_bitmap != NULL) + pmap_unlock(subord); + if (nested_region_asid_bitmap != NULL) { #if XNU_MONITOR - {pmap_pages_free(kvtophys((vm_offset_t)nested_region_asid_bitmap), PAGE_SIZE);} + pmap_pages_free(kvtophys((vm_offset_t)nested_region_asid_bitmap), PAGE_SIZE); #else - { kfree(nested_region_asid_bitmap, nested_region_asid_bitmap_size * sizeof(unsigned int));} + kheap_free(KHEAP_DATA_BUFFERS, nested_region_asid_bitmap, + nested_region_asid_bitmap_size * sizeof(unsigned int)); #endif - if (new_nested_region_asid_bitmap != NULL) + } + if (new_nested_region_asid_bitmap != NULL) { #if XNU_MONITOR - {pmap_pages_free(kvtophys((vm_offset_t)new_nested_region_asid_bitmap), PAGE_SIZE);} + pmap_pages_free(kvtophys((vm_offset_t)new_nested_region_asid_bitmap), PAGE_SIZE); #else - { kfree(new_nested_region_asid_bitmap, new_nested_region_asid_bitmap_size * sizeof(unsigned int));} + kheap_free(KHEAP_DATA_BUFFERS, new_nested_region_asid_bitmap, + new_nested_region_asid_bitmap_size * sizeof(unsigned int)); #endif + } } - PMAP_LOCK(subord); - if (grand->nested_pmap == PMAP_NULL) { - grand->nested_pmap = subord; + pmap_lock(subord); + +#if __has_feature(ptrauth_calls) + subord_addr = ptrauth_sign_unauthenticated(subord, + ptrauth_key_process_independent_data, + ptrauth_blend_discriminator(&grand->nested_pmap, ptrauth_string_discriminator("pmap.nested_pmap"))); +#else + subord_addr = subord; +#endif // __has_feature(ptrauth_calls) + + if (os_atomic_cmpxchg(&grand->nested_pmap, PMAP_NULL, subord_addr, relaxed)) { + /* + * If this is grand's first nesting operation, keep the reference on subord. + * It will be released by pmap_destroy_internal() when grand is destroyed. + */ + deref_subord = false; if (!subord->nested_bounds_set) { /* @@ -10813,123 +11728,120 @@ pmap_nest_internal( subord->nested_no_bounds_refcnt++; } - grand->nested_region_grand_addr = vstart; - grand->nested_region_subord_addr = nstart; + grand->nested_region_addr = vstart; grand->nested_region_size = (mach_vm_offset_t) size; } else { - if ((grand->nested_region_grand_addr > vstart)) { + if (__improbable(grand->nested_pmap != subord)) { + panic("pmap_nest() pmap %p has a nested pmap\n", grand); + } else if (__improbable(grand->nested_region_addr > vstart)) { panic("pmap_nest() pmap %p : attempt to nest outside the nested region\n", grand); - } else if ((grand->nested_region_grand_addr + grand->nested_region_size) < vend) { - grand->nested_region_size = (mach_vm_offset_t)(vstart - grand->nested_region_grand_addr + size); + } else if ((grand->nested_region_addr + grand->nested_region_size) < vend) { + grand->nested_region_size = (mach_vm_offset_t)(vstart - grand->nested_region_addr + size); } } #if (__ARM_VMSA__ == 7) - nvaddr = (vm_map_offset_t) nstart; vaddr = (vm_map_offset_t) vstart; num_tte = size >> ARM_TT_L1_SHIFT; for (i = 0; i < num_tte; i++) { - if (((subord->nested_region_true_start) > nvaddr) || ((subord->nested_region_true_end) <= nvaddr)) { + if (((subord->nested_region_true_start) > vaddr) || ((subord->nested_region_true_end) <= vaddr)) { goto expand_next; } - stte_p = pmap_tte(subord, nvaddr); + stte_p = pmap_tte(subord, vaddr); if ((stte_p == (tt_entry_t *)NULL) || (((*stte_p) & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE)) { - PMAP_UNLOCK(subord); - kr = pmap_expand(subord, nvaddr, expand_options, PMAP_TT_L2_LEVEL); + pmap_unlock(subord); + kr = pmap_expand(subord, vaddr, expand_options, PMAP_TT_L2_LEVEL); if (kr != KERN_SUCCESS) { - PMAP_LOCK(grand); + pmap_lock(grand); goto done; } - PMAP_LOCK(subord); + pmap_lock(subord); } - PMAP_UNLOCK(subord); - PMAP_LOCK(grand); + pmap_unlock(subord); + pmap_lock(grand); stte_p = pmap_tte(grand, vaddr); if (stte_p == (tt_entry_t *)NULL) { - PMAP_UNLOCK(grand); + pmap_unlock(grand); kr = pmap_expand(grand, vaddr, expand_options, PMAP_TT_L1_LEVEL); if (kr != KERN_SUCCESS) { - PMAP_LOCK(grand); + pmap_lock(grand); goto done; } } else { - PMAP_UNLOCK(grand); + pmap_unlock(grand); kr = KERN_SUCCESS; } - PMAP_LOCK(subord); + pmap_lock(subord); expand_next: - nvaddr += ARM_TT_L1_SIZE; vaddr += ARM_TT_L1_SIZE; } #else - nvaddr = (vm_map_offset_t) nstart; + vaddr = (vm_map_offset_t) vstart; num_tte = (unsigned int)(size >> pt_attr_twig_shift(pt_attr)); for (i = 0; i < num_tte; i++) { - if (((subord->nested_region_true_start) > nvaddr) || ((subord->nested_region_true_end) <= nvaddr)) { + if (((subord->nested_region_true_start) > vaddr) || ((subord->nested_region_true_end) <= vaddr)) { goto expand_next; } - stte_p = pmap_tte(subord, nvaddr); + stte_p = pmap_tte(subord, vaddr); if (stte_p == PT_ENTRY_NULL || *stte_p == ARM_TTE_EMPTY) { - PMAP_UNLOCK(subord); - kr = pmap_expand(subord, nvaddr, expand_options, PMAP_TT_LEAF_LEVEL); + pmap_unlock(subord); + kr = pmap_expand(subord, vaddr, expand_options, pt_attr_leaf_level(pt_attr)); if (kr != KERN_SUCCESS) { - PMAP_LOCK(grand); + pmap_lock(grand); goto done; } - PMAP_LOCK(subord); + pmap_lock(subord); } expand_next: - nvaddr += pt_attr_twig_size(pt_attr); + vaddr += pt_attr_twig_size(pt_attr); } #endif - PMAP_UNLOCK(subord); + pmap_unlock(subord); /* * copy tte's from subord pmap into grand pmap */ - PMAP_LOCK(grand); - nvaddr = (vm_map_offset_t) nstart; + pmap_lock(grand); vaddr = (vm_map_offset_t) vstart; #if (__ARM_VMSA__ == 7) for (i = 0; i < num_tte; i++) { - if (((subord->nested_region_true_start) > nvaddr) || ((subord->nested_region_true_end) <= nvaddr)) { + if (((subord->nested_region_true_start) > vaddr) || ((subord->nested_region_true_end) <= vaddr)) { goto nest_next; } - stte_p = pmap_tte(subord, nvaddr); + stte_p = pmap_tte(subord, vaddr); gtte_p = pmap_tte(grand, vaddr); *gtte_p = *stte_p; nest_next: - nvaddr += ARM_TT_L1_SIZE; vaddr += ARM_TT_L1_SIZE; } #else for (i = 0; i < num_tte; i++) { - if (((subord->nested_region_true_start) > nvaddr) || ((subord->nested_region_true_end) <= nvaddr)) { + if (((subord->nested_region_true_start) > vaddr) || ((subord->nested_region_true_end) <= vaddr)) { goto nest_next; } - stte_p = pmap_tte(subord, nvaddr); + stte_p = pmap_tte(subord, vaddr); gtte_p = pmap_tte(grand, vaddr); if (gtte_p == PT_ENTRY_NULL) { - PMAP_UNLOCK(grand); - kr = pmap_expand(grand, vaddr, expand_options, PMAP_TT_TWIG_LEVEL); - PMAP_LOCK(grand); + pmap_unlock(grand); + kr = pmap_expand(grand, vaddr, expand_options, pt_attr_twig_level(pt_attr)); + pmap_lock(grand); if (kr != KERN_SUCCESS) { goto done; @@ -10941,7 +11853,6 @@ nest_next: nest_next: vaddr += pt_attr_twig_size(pt_attr); - nvaddr += pt_attr_twig_size(pt_attr); } #endif @@ -10950,16 +11861,15 @@ done: stte_p = pmap_tte(grand, vstart); FLUSH_PTE_RANGE_STRONG(stte_p, stte_p + num_tte); - -#if (__ARM_VMSA__ > 7) - /* - * check for overflow on LP64 arch - */ - assert((size & 0xFFFFFFFF00000000ULL) == 0); -#endif PMAP_UPDATE_TLBS(grand, vstart, vend, false); - PMAP_UNLOCK(grand); + pmap_unlock(grand); +#if XNU_MONITOR +nest_cleanup: +#endif + if (deref_subord) { + pmap_destroy_internal(subord); + } return kr; } @@ -10968,7 +11878,6 @@ pmap_nest( pmap_t grand, pmap_t subord, addr64_t vstart, - addr64_t nstart, uint64_t size) { kern_return_t kr = KERN_FAILURE; @@ -10978,14 +11887,14 @@ pmap_nest( VM_KERNEL_ADDRHIDE(vstart)); #if XNU_MONITOR - while ((kr = pmap_nest_ppl(grand, subord, vstart, nstart, size)) == KERN_RESOURCE_SHORTAGE) { - pmap_alloc_page_for_ppl(); + while ((kr = pmap_nest_ppl(grand, subord, vstart, size)) == KERN_RESOURCE_SHORTAGE) { + pmap_alloc_page_for_ppl(0); } pmap_ledger_check_balance(grand); pmap_ledger_check_balance(subord); #else - kr = pmap_nest_internal(grand, subord, vstart, nstart, size); + kr = pmap_nest_internal(grand, subord, vstart, size); #endif PMAP_TRACE(2, PMAP_CODE(PMAP__NEST) | DBG_FUNC_END, kr); @@ -11045,14 +11954,14 @@ pmap_unnest_options_internal( panic("%s: %p has no nested pmap", __func__, grand); } - if ((vaddr < grand->nested_region_grand_addr) || (vend > (grand->nested_region_grand_addr + grand->nested_region_size))) { + if ((vaddr < grand->nested_region_addr) || (vend > (grand->nested_region_addr + grand->nested_region_size))) { panic("%s: %p: unnest request to region not-fully-nested region [%p, %p)", __func__, grand, (void*)vaddr, (void*)vend); } - PMAP_LOCK(grand->nested_pmap); + pmap_lock(grand->nested_pmap); - start = vaddr - grand->nested_region_grand_addr + grand->nested_region_subord_addr; - start_index = (unsigned int)((vaddr - grand->nested_region_grand_addr) >> pt_attr_twig_shift(pt_attr)); + start = vaddr; + start_index = (unsigned int)((vaddr - grand->nested_region_addr) >> pt_attr_twig_shift(pt_attr)); max_index = (unsigned int)(start_index + (size >> pt_attr_twig_shift(pt_attr))); num_tte = (unsigned int)(size >> pt_attr_twig_shift(pt_attr)); @@ -11113,15 +12022,15 @@ pmap_unnest_options_internal( } FLUSH_PTE_RANGE_STRONG(bpte, epte); - flush_mmu_tlb_region_asid_async(start, (unsigned)size, grand->nested_pmap); } + flush_mmu_tlb_region_asid_async(vaddr, (unsigned)size, grand->nested_pmap); sync_tlb_flush(); - PMAP_UNLOCK(grand->nested_pmap); + pmap_unlock(grand->nested_pmap); } - PMAP_LOCK(grand); + pmap_lock(grand); /* * invalidate all pdes for segment at vaddr in pmap grand @@ -11150,7 +12059,7 @@ pmap_unnest_options_internal( FLUSH_PTE_RANGE_STRONG(tte_p, tte_p + num_tte); PMAP_UPDATE_TLBS(grand, start, vend, false); - PMAP_UNLOCK(grand); + pmap_unlock(grand); return KERN_SUCCESS; } @@ -11206,31 +12115,6 @@ pmap_disable_NX( } #endif -void -pt_fake_zone_init( - int zone_index) -{ - pt_fake_zone_index = zone_index; -} - -void -pt_fake_zone_info( - int *count, - vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size, - uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct) -{ - *count = inuse_pmap_pages_count; - *cur_size = PAGE_SIZE * (inuse_pmap_pages_count); - *max_size = PAGE_SIZE * (inuse_pmap_pages_count + vm_page_inactive_count + vm_page_active_count + vm_page_free_count); - *elem_size = PAGE_SIZE; - *alloc_size = PAGE_SIZE; - *sum_size = (alloc_pmap_pages_count) * PAGE_SIZE; - - *collectable = 1; - *exhaustable = 0; - *caller_acct = 1; -} - /* * flush a range of hardware TLB entries. * NOTE: assumes the smallest TLB entry in use will be for @@ -11241,7 +12125,7 @@ pt_fake_zone_info( #if __ARM_RANGE_TLBI__ #define ARM64_RANGE_TLB_FLUSH_THRESHOLD 1 -#define ARM64_FULL_TLB_FLUSH_THRESHOLD ARM64_16K_TLB_RANGE_PAGES +#define ARM64_FULL_TLB_FLUSH_THRESHOLD ARM64_TLB_RANGE_PAGES #else #define ARM64_FULL_TLB_FLUSH_THRESHOLD 256 #endif // __ARM_RANGE_TLBI__ @@ -11249,7 +12133,7 @@ pt_fake_zone_info( static void flush_mmu_tlb_region_asid_async( vm_offset_t va, - unsigned length, + size_t length, pmap_t pmap) { #if (__ARM_VMSA__ == 7) @@ -11288,7 +12172,9 @@ flush_mmu_tlb_region_asid_async( flush_mmu_tlb_entries_async(va, end); #else - unsigned npages = length >> pt_attr_leaf_shift(pmap_get_pt_attr(pmap)); + unsigned long pmap_page_shift = pt_attr_leaf_shift(pmap_get_pt_attr(pmap)); + const uint64_t pmap_page_size = 1ULL << pmap_page_shift; + ppnum_t npages = (ppnum_t)(length >> pmap_page_shift); uint32_t asid; asid = pmap->hw_asid; @@ -11308,7 +12194,7 @@ flush_mmu_tlb_region_asid_async( } #if __ARM_RANGE_TLBI__ if (npages > ARM64_RANGE_TLB_FLUSH_THRESHOLD) { - va = generate_rtlbi_param(npages, asid, va); + va = generate_rtlbi_param(npages, asid, va, pmap_page_shift); if (pmap->nested == TRUE) { flush_mmu_tlb_allrange_async(va); } else { @@ -11319,10 +12205,11 @@ flush_mmu_tlb_region_asid_async( #endif vm_offset_t end = tlbi_asid(asid) | tlbi_addr(va + length); va = tlbi_asid(asid) | tlbi_addr(va); + if (pmap->nested == TRUE) { - flush_mmu_tlb_allentries_async(va, end); + flush_mmu_tlb_allentries_async(va, end, pmap_page_size); } else { - flush_mmu_tlb_entries_async(va, end); + flush_mmu_tlb_entries_async(va, end, pmap_page_size); } #endif @@ -11706,6 +12593,27 @@ pmap_update_cache_attributes_locked( PMAP_TRACE(2, PMAP_CODE(PMAP__UPDATE_CACHING) | DBG_FUNC_START, ppnum, attributes); + if (pmap_panic_dev_wimg_on_managed) { + switch (attributes & VM_WIMG_MASK) { + case VM_WIMG_IO: // nGnRnE + case VM_WIMG_POSTED: // nGnRE + /* supported on DRAM, but slow, so we disallow */ + + case VM_WIMG_POSTED_REORDERED: // nGRE + case VM_WIMG_POSTED_COMBINED_REORDERED: // GRE + /* unsupported on DRAM */ + + panic("%s: trying to use unsupported VM_WIMG type for managed page, VM_WIMG=%x, ppnum=%#x", + __FUNCTION__, attributes & VM_WIMG_MASK, ppnum); + break; + + default: + /* not device type memory, all good */ + + break; + } + } + #if __ARM_PTE_PHYSMAP__ vm_offset_t kva = phystokv(phys); pte_p = pmap_pte(kernel_pmap, kva); @@ -11761,7 +12669,8 @@ pmap_update_cache_attributes_locked( tmplate |= pmap_get_pt_ops(pmap)->wimg_to_pte(attributes); WRITE_PTE_STRONG(pte_p, tmplate); - pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, PAGE_SIZE, pmap); + pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, + pt_attr_page_size(pmap_get_pt_attr(pmap)) * PAGE_RATIO, pmap); tlb_flush_needed = TRUE; #ifdef PVH_FLAG_IOMMU @@ -11773,29 +12682,41 @@ cache_skip_pve: } } if (tlb_flush_needed) { - sync_tlb_flush(); + /* For targets that distinguish between mild and strong DSB, mild DSB + * will not drain the prefetcher. This can lead to prefetch-driven + * cache fills that defeat the uncacheable requirement of the RT memory type. + * In those cases, strong DSB must instead be employed to drain the prefetcher. */ + pmap_sync_tlb((attributes & VM_WIMG_MASK) == VM_WIMG_RT); } PMAP_TRACE(2, PMAP_CODE(PMAP__UPDATE_CACHING) | DBG_FUNC_END, ppnum, attributes); } -#if (__ARM_VMSA__ == 7) -vm_map_address_t -pmap_create_sharedpage( - void) +#if (__ARM_VMSA__ == 7) +void +pmap_create_sharedpages(vm_map_address_t *kernel_data_addr, vm_map_address_t *kernel_text_addr, + vm_map_address_t *user_commpage_addr) { pmap_paddr_t pa; kern_return_t kr; - (void) pmap_pages_alloc(&pa, PAGE_SIZE, 0); - memset((char *) phystokv(pa), 0, PAGE_SIZE); + assert(kernel_data_addr != NULL); + assert(kernel_text_addr != NULL); + assert(user_commpage_addr != NULL); + + (void) pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, 0); kr = pmap_enter(kernel_pmap, _COMM_PAGE_BASE_ADDRESS, atop(pa), VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE); assert(kr == KERN_SUCCESS); - return (vm_map_address_t)phystokv(pa); + *kernel_data_addr = phystokv(pa); + // We don't have PFZ for 32 bit arm, always NULL + *kernel_text_addr = 0; + *user_commpage_addr = 0; } -#else + +#else /* __ARM_VMSA__ == 7 */ + static void pmap_update_tt3e( pmap_t pmap, @@ -11820,30 +12741,48 @@ pmap_update_tt3e( | ARM_PTE_SH(SH_INNER_MEMORY) | ARM_PTE_NX \ | ARM_PTE_PNX | ARM_PTE_AP(AP_RORO) | ARM_PTE_AF) -vm_map_address_t -pmap_create_sharedpage( - void - ) +/* Note absence of non-global bit and no-execute bit. */ +#define PMAP_COMM_PAGE_TEXT_PTE_TEMPLATE (ARM_PTE_TYPE_VALID \ + | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) \ + | ARM_PTE_SH(SH_INNER_MEMORY) | ARM_PTE_PNX \ + | ARM_PTE_AP(AP_RORO) | ARM_PTE_AF) + +void +pmap_create_sharedpages(vm_map_address_t *kernel_data_addr, vm_map_address_t *kernel_text_addr, + vm_map_address_t *user_text_addr) { - kern_return_t kr; - pmap_paddr_t pa = 0; + kern_return_t kr; + pmap_paddr_t data_pa = 0; // data address + pmap_paddr_t text_pa = 0; // text address + + *kernel_data_addr = 0; + *kernel_text_addr = 0; + *user_text_addr = 0; #if XNU_MONITOR - pa = pmap_alloc_page_for_kern(); - assert(pa); -#else + data_pa = pmap_alloc_page_for_kern(0); + assert(data_pa); + memset((char *) phystokv(data_pa), 0, PAGE_SIZE); +#if CONFIG_ARM_PFZ + text_pa = pmap_alloc_page_for_kern(0); + assert(text_pa); + memset((char *) phystokv(text_pa), 0, PAGE_SIZE); +#endif - (void) pmap_pages_alloc(&pa, PAGE_SIZE, 0); +#else /* XNU_MONITOR */ + (void) pmap_pages_alloc_zeroed(&data_pa, PAGE_SIZE, 0); +#if CONFIG_ARM_PFZ + (void) pmap_pages_alloc_zeroed(&text_pa, PAGE_SIZE, 0); #endif - memset((char *) phystokv(pa), 0, PAGE_SIZE); +#endif /* XNU_MONITOR */ #ifdef CONFIG_XNUPOST /* * The kernel pmap maintains a user accessible mapping of the commpage * to test PAN. */ - kr = pmap_enter(kernel_pmap, _COMM_HIGH_PAGE64_BASE_ADDRESS, (ppnum_t)atop(pa), VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE); + kr = pmap_enter(kernel_pmap, _COMM_HIGH_PAGE64_BASE_ADDRESS, (ppnum_t)atop(data_pa), VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE); assert(kr == KERN_SUCCESS); /* @@ -11862,7 +12801,7 @@ pmap_create_sharedpage( * create a dedicated pmap for the shared page. We forcibly nest the * translation tables from this pmap into other pmaps. The level we * will nest at depends on the MMU configuration (page size, TTBR range, - * etc). + * etc). Typically, this is at L1 for 4K tasks and L2 for 16K tasks. * * Note that this is NOT "the nested pmap" (which is used to nest the * shared cache). @@ -11870,32 +12809,96 @@ pmap_create_sharedpage( * Note that we update parameters of the entry for our unique needs (NG * entry, etc.). */ - sharedpage_pmap = pmap_create_options(NULL, 0x0, 0); - assert(sharedpage_pmap != NULL); + sharedpage_pmap_default = pmap_create_options(NULL, 0x0, 0); + assert(sharedpage_pmap_default != NULL); + + /* The user 64-bit mapping... */ + kr = pmap_enter_addr(sharedpage_pmap_default, _COMM_PAGE64_BASE_ADDRESS, data_pa, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE); + assert(kr == KERN_SUCCESS); + pmap_update_tt3e(sharedpage_pmap_default, _COMM_PAGE64_BASE_ADDRESS, PMAP_COMM_PAGE_PTE_TEMPLATE); +#if CONFIG_ARM_PFZ + /* User mapping of comm page text section for 64 bit mapping only + * + * We don't insert it into the 32 bit mapping because we don't want 32 bit + * user processes to get this page mapped in, they should never call into + * this page. + * + * The data comm page is in a pre-reserved L3 VA range and the text commpage + * is slid in the same L3 as the data commpage. It is either outside the + * max of user VA or is pre-reserved in the vm_map_exec(). This means that + * it is reserved and unavailable to mach VM for future mappings. + */ + const pt_attr_t * const pt_attr = pmap_get_pt_attr(sharedpage_pmap_default); + int num_ptes = pt_attr_leaf_size(pt_attr) >> PTE_SHIFT; + + vm_map_address_t commpage_text_va = 0; + + do { + int text_leaf_index = random() % num_ptes; + + // Generate a VA for the commpage text with the same root and twig index as data + // comm page, but with new leaf index we've just generated. + commpage_text_va = (_COMM_PAGE64_BASE_ADDRESS & ~pt_attr_leaf_index_mask(pt_attr)); + commpage_text_va |= (text_leaf_index << pt_attr_leaf_shift(pt_attr)); + } while (commpage_text_va == _COMM_PAGE64_BASE_ADDRESS); // Try again if we collide (should be unlikely) + + // Assert that this is empty + __assert_only pt_entry_t *ptep = pmap_pte(sharedpage_pmap_default, commpage_text_va); + assert(ptep != PT_ENTRY_NULL); + assert(*ptep == ARM_TTE_EMPTY); + + // At this point, we've found the address we want to insert our comm page at + kr = pmap_enter_addr(sharedpage_pmap_default, commpage_text_va, text_pa, VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE); + assert(kr == KERN_SUCCESS); + // Mark it as global page R/X so that it doesn't get thrown out on tlb flush + pmap_update_tt3e(sharedpage_pmap_default, commpage_text_va, PMAP_COMM_PAGE_TEXT_PTE_TEMPLATE); + + *user_text_addr = commpage_text_va; +#endif + + /* ...and the user 32-bit mapping. */ + kr = pmap_enter_addr(sharedpage_pmap_default, _COMM_PAGE32_BASE_ADDRESS, data_pa, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE); + assert(kr == KERN_SUCCESS); + pmap_update_tt3e(sharedpage_pmap_default, _COMM_PAGE32_BASE_ADDRESS, PMAP_COMM_PAGE_PTE_TEMPLATE); + +#if __ARM_MIXED_PAGE_SIZE__ + /** + * To handle 4K tasks a new view/pmap of the shared page is needed. These are a + * new set of page tables that point to the exact same 16K shared page as + * before. Only the first 4K of the 16K shared page is mapped since that's + * the only part that contains relevant data. + */ + sharedpage_pmap_4k = pmap_create_options(NULL, 0x0, PMAP_CREATE_FORCE_4K_PAGES); + assert(sharedpage_pmap_4k != NULL); /* The user 64-bit mapping... */ - kr = pmap_enter(sharedpage_pmap, _COMM_PAGE64_BASE_ADDRESS, (ppnum_t)atop(pa), VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE); + kr = pmap_enter_addr(sharedpage_pmap_4k, _COMM_PAGE64_BASE_ADDRESS, data_pa, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE); assert(kr == KERN_SUCCESS); - pmap_update_tt3e(sharedpage_pmap, _COMM_PAGE64_BASE_ADDRESS, PMAP_COMM_PAGE_PTE_TEMPLATE); + pmap_update_tt3e(sharedpage_pmap_4k, _COMM_PAGE64_BASE_ADDRESS, PMAP_COMM_PAGE_PTE_TEMPLATE); /* ...and the user 32-bit mapping. */ - kr = pmap_enter(sharedpage_pmap, _COMM_PAGE32_BASE_ADDRESS, (ppnum_t)atop(pa), VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE); + kr = pmap_enter_addr(sharedpage_pmap_4k, _COMM_PAGE32_BASE_ADDRESS, data_pa, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE); assert(kr == KERN_SUCCESS); - pmap_update_tt3e(sharedpage_pmap, _COMM_PAGE32_BASE_ADDRESS, PMAP_COMM_PAGE_PTE_TEMPLATE); + pmap_update_tt3e(sharedpage_pmap_4k, _COMM_PAGE32_BASE_ADDRESS, PMAP_COMM_PAGE_PTE_TEMPLATE); + +#endif /* For manipulation in kernel, go straight to physical page */ - return (vm_map_address_t)phystokv(pa); + *kernel_data_addr = phystokv(data_pa); + *kernel_text_addr = (text_pa) ? phystokv(text_pa) : 0; + + return; } + /* * Asserts to ensure that the TTEs we nest to map the shared page do not overlap - * with user controlled TTEs. + * with user controlled TTEs for regions that aren't explicitly reserved by the + * VM (e.g., _COMM_PAGE64_NESTING_START/_COMM_PAGE64_BASE_ADDRESS). */ #if (ARM_PGSHIFT == 14) -static_assert((_COMM_PAGE64_BASE_ADDRESS & ~ARM_TT_L2_OFFMASK) >= MACH_VM_MAX_ADDRESS); static_assert((_COMM_PAGE32_BASE_ADDRESS & ~ARM_TT_L2_OFFMASK) >= VM_MAX_ADDRESS); #elif (ARM_PGSHIFT == 12) -static_assert((_COMM_PAGE64_BASE_ADDRESS & ~ARM_TT_L1_OFFMASK) >= MACH_VM_MAX_ADDRESS); static_assert((_COMM_PAGE32_BASE_ADDRESS & ~ARM_TT_L1_OFFMASK) >= VM_MAX_ADDRESS); #else #error Nested shared page mapping is unsupported on this config @@ -11909,6 +12912,27 @@ pmap_insert_sharedpage_internal( vm_offset_t sharedpage_vaddr; pt_entry_t *ttep, *src_ttep; int options = 0; + pmap_t sharedpage_pmap = sharedpage_pmap_default; + + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + const unsigned int sharedpage_level = pt_attr_sharedpage_level(pt_attr); + +#if __ARM_MIXED_PAGE_SIZE__ +#if !__ARM_16K_PG__ + /* The following code assumes that sharedpage_pmap_default is a 16KB pmap. */ + #error "pmap_insert_sharedpage_internal requires a 16KB default kernel page size when __ARM_MIXED_PAGE_SIZE__ is enabled" +#endif /* !__ARM_16K_PG__ */ + + /* Choose the correct shared page pmap to use. */ + const uint64_t pmap_page_size = pt_attr_page_size(pt_attr); + if (pmap_page_size == 16384) { + sharedpage_pmap = sharedpage_pmap_default; + } else if (pmap_page_size == 4096) { + sharedpage_pmap = sharedpage_pmap_4k; + } else { + panic("No shared page pmap exists for the wanted page size: %llu", pmap_page_size); + } +#endif /* __ARM_MIXED_PAGE_SIZE__ */ VALIDATE_PMAP(pmap); #if XNU_MONITOR @@ -11925,36 +12949,31 @@ pmap_insert_sharedpage_internal( sharedpage_vaddr = _COMM_PAGE32_BASE_ADDRESS; } - PMAP_LOCK(pmap); + + pmap_lock(pmap); /* - * For 4KB pages, we can force the commpage to nest at the level one - * page table, as each entry is 1GB (i.e, there will be no overlap - * with regular userspace mappings). For 16KB pages, each level one - * entry is 64GB, so we must go to the second level entry (32MB) in - * order to nest. + * For 4KB pages, we either "nest" at the level one page table (1GB) or level + * two (2MB) depending on the address space layout. For 16KB pages, each level + * one entry is 64GB, so we must go to the second level entry (32MB) in order + * to "nest". + * + * Note: This is not "nesting" in the shared cache sense. This definition of + * nesting just means inserting pointers to pre-allocated tables inside of + * the passed in pmap to allow us to share page tables (which map the shared + * page) for every task. This saves at least one page of memory per process + * compared to creating new page tables in every process for mapping the + * shared page. */ -#if (ARM_PGSHIFT == 12) - (void)options; - - /* Just slam in the L1 entry. */ - ttep = pmap_tt1e(pmap, sharedpage_vaddr); - if (*ttep != ARM_PTE_EMPTY) { - panic("%s: Found something mapped at the commpage address?!", __FUNCTION__); - } - - src_ttep = pmap_tt1e(sharedpage_pmap, sharedpage_vaddr); -#elif (ARM_PGSHIFT == 14) - /* Allocate for the L2 entry if necessary, and slam it into place. */ - /* - * As long as we are use a three level page table, the first level - * should always exist, so we don't need to check for it. + /** + * Allocate the twig page tables if needed, and slam a pointer to the shared + * page's tables into place. */ - while (*pmap_tt1e(pmap, sharedpage_vaddr) == ARM_PTE_EMPTY) { - PMAP_UNLOCK(pmap); + while ((ttep = pmap_ttne(pmap, sharedpage_level, sharedpage_vaddr)) == TT_ENTRY_NULL) { + pmap_unlock(pmap); - kr = pmap_expand(pmap, sharedpage_vaddr, options, PMAP_TT_L2_LEVEL); + kr = pmap_expand(pmap, sharedpage_vaddr, options, sharedpage_level); if (kr != KERN_SUCCESS) { #if XNU_MONITOR @@ -11967,32 +12986,19 @@ pmap_insert_sharedpage_internal( } } - PMAP_LOCK(pmap); + pmap_lock(pmap); } - ttep = pmap_tt2e(pmap, sharedpage_vaddr); - if (*ttep != ARM_PTE_EMPTY) { panic("%s: Found something mapped at the commpage address?!", __FUNCTION__); } - src_ttep = pmap_tt2e(sharedpage_pmap, sharedpage_vaddr); -#endif + src_ttep = pmap_ttne(sharedpage_pmap, sharedpage_level, sharedpage_vaddr); - *ttep = *src_ttep; + *ttep = *src_ttep; FLUSH_PTE_STRONG(ttep); - /* TODO: Should we flush in the 64-bit case? */ - flush_mmu_tlb_region_asid_async(sharedpage_vaddr, PAGE_SIZE, kernel_pmap); - -#if (ARM_PGSHIFT == 12) - flush_mmu_tlb_entry_async(tlbi_addr(sharedpage_vaddr & ~ARM_TT_L1_OFFMASK) | tlbi_asid(pmap->hw_asid)); -#elif (ARM_PGSHIFT == 14) - flush_mmu_tlb_entry_async(tlbi_addr(sharedpage_vaddr & ~ARM_TT_L2_OFFMASK) | tlbi_asid(pmap->hw_asid)); -#endif - sync_tlb_flush(); - - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); return kr; } @@ -12003,6 +13009,27 @@ pmap_unmap_sharedpage( { pt_entry_t *ttep; vm_offset_t sharedpage_vaddr; + pmap_t sharedpage_pmap = sharedpage_pmap_default; + + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + const unsigned int sharedpage_level = pt_attr_sharedpage_level(pt_attr); + +#if __ARM_MIXED_PAGE_SIZE__ +#if !__ARM_16K_PG__ + /* The following code assumes that sharedpage_pmap_default is a 16KB pmap. */ + #error "pmap_unmap_sharedpage requires a 16KB default kernel page size when __ARM_MIXED_PAGE_SIZE__ is enabled" +#endif /* !__ARM_16K_PG__ */ + + /* Choose the correct shared page pmap to use. */ + const uint64_t pmap_page_size = pt_attr_page_size(pt_attr); + if (pmap_page_size == 16384) { + sharedpage_pmap = sharedpage_pmap_default; + } else if (pmap_page_size == 4096) { + sharedpage_pmap = sharedpage_pmap_4k; + } else { + panic("No shared page pmap exists for the wanted page size: %llu", pmap_page_size); + } +#endif /* __ARM_MIXED_PAGE_SIZE__ */ #if _COMM_PAGE_AREA_LENGTH != PAGE_SIZE #error We assume a single page. @@ -12014,38 +13041,22 @@ pmap_unmap_sharedpage( sharedpage_vaddr = _COMM_PAGE32_BASE_ADDRESS; } -#if (ARM_PGSHIFT == 12) - ttep = pmap_tt1e(pmap, sharedpage_vaddr); - - if (ttep == NULL) { - return; - } - /* It had better be mapped to the shared page */ - if (*ttep != ARM_TTE_EMPTY && *ttep != *pmap_tt1e(sharedpage_pmap, sharedpage_vaddr)) { - panic("%s: Something other than commpage mapped in shared page slot?", __FUNCTION__); - } -#elif (ARM_PGSHIFT == 14) - ttep = pmap_tt2e(pmap, sharedpage_vaddr); + ttep = pmap_ttne(pmap, sharedpage_level, sharedpage_vaddr); if (ttep == NULL) { return; } - /* It had better be mapped to the shared page */ - if (*ttep != ARM_TTE_EMPTY && *ttep != *pmap_tt2e(sharedpage_pmap, sharedpage_vaddr)) { + /* It had better be mapped to the shared page. */ + if (*ttep != ARM_TTE_EMPTY && *ttep != *pmap_ttne(sharedpage_pmap, sharedpage_level, sharedpage_vaddr)) { panic("%s: Something other than commpage mapped in shared page slot?", __FUNCTION__); } -#endif *ttep = ARM_TTE_EMPTY; - flush_mmu_tlb_region_asid_async(sharedpage_vaddr, PAGE_SIZE, kernel_pmap); + FLUSH_PTE_STRONG(ttep); -#if (ARM_PGSHIFT == 12) - flush_mmu_tlb_entry_async(tlbi_addr(sharedpage_vaddr & ~ARM_TT_L1_OFFMASK) | tlbi_asid(pmap->hw_asid)); -#elif (ARM_PGSHIFT == 14) - flush_mmu_tlb_entry_async(tlbi_addr(sharedpage_vaddr & ~ARM_TT_L2_OFFMASK) | tlbi_asid(pmap->hw_asid)); -#endif + flush_mmu_tlb_region_asid_async(sharedpage_vaddr, PAGE_SIZE, pmap); sync_tlb_flush(); } @@ -12057,7 +13068,7 @@ pmap_insert_sharedpage( kern_return_t kr = KERN_FAILURE; while ((kr = pmap_insert_sharedpage_ppl(pmap)) == KERN_RESOURCE_SHORTAGE) { - pmap_alloc_page_for_ppl(); + pmap_alloc_page_for_ppl(0); } pmap_ledger_check_balance(pmap); @@ -12080,6 +13091,13 @@ pmap_is_64bit( return pmap->is_64bit; } +bool +pmap_is_exotic( + pmap_t pmap __unused) +{ + return false; +} + #endif /* ARMTODO -- an implementation that accounts for @@ -12124,13 +13142,13 @@ pmap_is_empty_internal( unsigned int initial_not_in_kdp = not_in_kdp; if ((pmap != kernel_pmap) && (initial_not_in_kdp)) { - PMAP_LOCK(pmap); + pmap_lock_ro(pmap); } #if (__ARM_VMSA__ == 7) if (tte_index(pmap, pt_attr, va_end) >= pmap->tte_index_max) { if ((pmap != kernel_pmap) && (initial_not_in_kdp)) { - PMAP_UNLOCK(pmap); + pmap_unlock_ro(pmap); } return TRUE; } @@ -12158,7 +13176,7 @@ pmap_is_empty_internal( for (pte_p = bpte_p; pte_p < epte_p; pte_p++) { if (*pte_p != ARM_PTE_EMPTY) { if ((pmap != kernel_pmap) && (initial_not_in_kdp)) { - PMAP_UNLOCK(pmap); + pmap_unlock_ro(pmap); } return FALSE; } @@ -12168,7 +13186,7 @@ pmap_is_empty_internal( } if ((pmap != kernel_pmap) && (initial_not_in_kdp)) { - PMAP_UNLOCK(pmap); + pmap_unlock_ro(pmap); } return TRUE; @@ -12202,7 +13220,10 @@ pmap_max_64bit_offset( vm_map_offset_t max_offset_ret = 0; #if defined(__arm64__) - const vm_map_offset_t min_max_offset = SHARED_REGION_BASE_ARM64 + SHARED_REGION_SIZE_ARM64 + 0x20000000; // end of shared region + 512MB for various purposes + #define ARM64_MIN_MAX_ADDRESS (SHARED_REGION_BASE_ARM64 + SHARED_REGION_SIZE_ARM64 + 0x20000000) // end of shared region + 512MB for various purposes + _Static_assert((ARM64_MIN_MAX_ADDRESS > SHARED_REGION_BASE_ARM64) && (ARM64_MIN_MAX_ADDRESS <= MACH_VM_MAX_ADDRESS), + "Minimum address space size outside allowable range"); + const vm_map_offset_t min_max_offset = ARM64_MIN_MAX_ADDRESS; // end of shared region + 512MB for various purposes if (option == ARM_PMAP_MAX_OFFSET_DEFAULT) { max_offset_ret = arm64_pmap_max_offset_default; } else if (option == ARM_PMAP_MAX_OFFSET_MIN) { @@ -12376,11 +13397,11 @@ pmap_ppl_lockdown_page(vm_address_t kva) LOCK_PVH(pai); pv_entry_t **pv_h = pai_to_pvh(pai); - if (pa_test_monitor(pa)) { + if (__improbable(pa_test_monitor(pa))) { panic("%#lx: page %llx belongs to PPL", kva, pa); } - if (pvh_get_flags(pv_h) & (PVH_FLAG_LOCKDOWN | PVH_FLAG_EXEC)) { + if (__improbable(pvh_get_flags(pv_h) & (PVH_FLAG_LOCKDOWN | PVH_FLAG_EXEC))) { panic("%#lx: already locked down/executable (%#llx)", kva, pvh_get_flags(pv_h)); } @@ -12391,7 +13412,7 @@ pmap_ppl_lockdown_page(vm_address_t kva) } pt_entry_t tmplate = *pte_p; - if ((tmplate & ARM_PTE_APMASK) != ARM_PTE_AP(AP_RWNA)) { + if (__improbable((tmplate & ARM_PTE_APMASK) != ARM_PTE_AP(AP_RWNA))) { panic("%#lx: not a kernel r/w page (%#llx)", kva, tmplate & ARM_PTE_APMASK); } @@ -12418,7 +13439,7 @@ pmap_ppl_unlockdown_page(vm_address_t kva) vm_offset_t pvh_flags = pvh_get_flags(pv_h); - if (!(pvh_flags & PVH_FLAG_LOCKDOWN)) { + if (__improbable(!(pvh_flags & PVH_FLAG_LOCKDOWN))) { panic("unlockdown attempt on not locked down virtual %#lx/pai %d", kva, pai); } @@ -12464,36 +13485,38 @@ pmap_query_resident_internal( VALIDATE_PMAP(pmap); + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + /* Ensure that this request is valid, and addresses exactly one TTE. */ - if (__improbable((start % ARM_PGBYTES) || (end % ARM_PGBYTES))) { - panic("%s: address range %p, %p not page-aligned", __func__, (void*)start, (void*)end); + if (__improbable((start % pt_attr_page_size(pt_attr)) || + (end % pt_attr_page_size(pt_attr)))) { + panic("%s: address range %p, %p not page-aligned to 0x%llx", __func__, (void*)start, (void*)end, pt_attr_page_size(pt_attr)); } - if (__improbable((end < start) || ((end - start) > (PTE_PGENTRIES * ARM_PGBYTES)))) { + if (__improbable((end < start) || (end > ((start + pt_attr_twig_size(pt_attr)) & ~pt_attr_twig_offmask(pt_attr))))) { panic("%s: invalid address range %p, %p", __func__, (void*)start, (void*)end); } - PMAP_LOCK(pmap); + pmap_lock_ro(pmap); tte_p = pmap_tte(pmap, start); if (tte_p == (tt_entry_t *) NULL) { - PMAP_UNLOCK(pmap); + pmap_unlock_ro(pmap); return PMAP_RESIDENT_INVALID; } if ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { - __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); pte_p = (pt_entry_t *) ttetokv(*tte_p); bpte = &pte_p[pte_index(pmap, pt_attr, start)]; epte = &pte_p[pte_index(pmap, pt_attr, end)]; for (; bpte < epte; bpte++) { if (ARM_PTE_IS_COMPRESSED(*bpte, bpte)) { - compressed_bytes += ARM_PGBYTES; + compressed_bytes += pt_attr_page_size(pt_attr); } else if (pa_valid(pte_to_pa(*bpte))) { - resident_bytes += ARM_PGBYTES; + resident_bytes += pt_attr_page_size(pt_attr); } } } - PMAP_UNLOCK(pmap); + pmap_unlock_ro(pmap); if (compressed_bytes_p) { pmap_pin_kernel_pages((vm_offset_t)compressed_bytes_p, sizeof(*compressed_bytes_p)); @@ -12665,6 +13688,18 @@ typedef struct { pmap_pgtrace_page_state_t state; } pmap_pgtrace_page_t; +typedef struct { + queue_chain_t chain; + pmap_t pmap; + vm_map_offset_t va; +} pmap_va_t; + +static ZONE_VIEW_DEFINE(ZV_PMAP_VA, "pmap va", + KHEAP_ID_DEFAULT, sizeof(pmap_va_t)); + +static ZONE_VIEW_DEFINE(ZV_PMAP_PGTRACE, "pmap pgtrace", + KHEAP_ID_DEFAULT, sizeof(pmap_pgtrace_page_t)); + static struct { /* * pages - list of tracing page info @@ -12719,7 +13754,7 @@ pmap_pgtrace_enter_clone(pmap_t pmap, vm_map_offset_t va_page, vm_map_offset_t s pmap_pgtrace_page_t *p; bool found = false; - PMAP_ASSERT_LOCKED(pmap); + pmap_assert_locked_w(pmap); assert(va_page == arm_trunc_page(va_page)); PMAP_PGTRACE_LOCK(&ints); @@ -12930,7 +13965,7 @@ pmap_pgtrace_remove_all_clone(pmap_paddr_t pa) // sanitize maps in waste queue_iterate(mapwaste, map, pmap_pgtrace_map_t *, chain) { if (map->cloned == true) { - PMAP_LOCK(map->pmap); + pmap_lock(map->pmap); // restore back original pte ptep = pmap_pte(map->pmap, map->ova); @@ -12946,7 +13981,7 @@ pmap_pgtrace_remove_all_clone(pmap_paddr_t pa) PMAP_UPDATE_TLBS(kernel_pmap, map->cva[i], map->cva[i] + ARM_PGBYTES, false); } - PMAP_UNLOCK(map->pmap); + pmap_unlock(map->pmap); } map->pmap = NULL; @@ -13011,7 +14046,7 @@ pmap_pgtrace_clone_from_pa(pmap_t pmap, pmap_paddr_t pa, vm_map_offset_t start_o while (cur_page <= end_page) { vm_map_offset_t add = 0; - PMAP_LOCK(pmap); + pmap_lock(pmap); // skip uninterested space if (pmap == kernel_pmap && @@ -13054,7 +14089,7 @@ pmap_pgtrace_clone_from_pa(pmap_t pmap, pmap_paddr_t pa, vm_map_offset_t start_o add = ARM_PGBYTES; unlock_continue: - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); //overflow if (cur_page + add < cur_page) { @@ -13078,12 +14113,6 @@ pmap_pgtrace_clone_from_pvtable(pmap_paddr_t pa, vm_map_offset_t start_offset, v pt_entry_t *ptep; pmap_t pmap; - typedef struct { - queue_chain_t chain; - pmap_t pmap; - vm_map_offset_t va; - } pmap_va_t; - queue_head_t pmapvaq; pmap_va_t *pmapva; @@ -13098,7 +14127,7 @@ pmap_pgtrace_clone_from_pvtable(pmap_paddr_t pa, vm_map_offset_t start_offset, v ptep = pvh_ptep(pvh); pmap = ptep_get_pmap(ptep); - pmapva = (pmap_va_t *)kalloc(sizeof(pmap_va_t)); + pmapva = (pmap_va_t *)zalloc(ZV_PMAP_VA); pmapva->pmap = pmap; pmapva->va = ptep_get_va(ptep); @@ -13111,7 +14140,7 @@ pmap_pgtrace_clone_from_pvtable(pmap_paddr_t pa, vm_map_offset_t start_offset, v ptep = pve_get_ptep(pvep); pmap = ptep_get_pmap(ptep); - pmapva = (pmap_va_t *)kalloc(sizeof(pmap_va_t)); + pmapva = (pmap_va_t *)zalloc(ZV_PMAP_VA); pmapva->pmap = pmap; pmapva->va = ptep_get_va(ptep); @@ -13125,16 +14154,16 @@ pmap_pgtrace_clone_from_pvtable(pmap_paddr_t pa, vm_map_offset_t start_offset, v // clone them while making sure mapping still exists queue_iterate(&pmapvaq, pmapva, pmap_va_t *, chain) { - PMAP_LOCK(pmapva->pmap); + pmap_lock(pmapva->pmap); ptep = pmap_pte(pmapva->pmap, pmapva->va); if (pte_to_pa(*ptep) == pa) { if (pmap_pgtrace_enter_clone(pmapva->pmap, pmapva->va, start_offset, end_offset) == true) { ret++; } } - PMAP_UNLOCK(pmapva->pmap); + pmap_unlock(pmapva->pmap); - kfree(pmapva, sizeof(pmap_va_t)); + zfree(ZV_PMAP_VA, pmapva); } return ret; @@ -13150,7 +14179,7 @@ pmap_pgtrace_alloc_page(void) queue_head_t *mapwaste; pmap_pgtrace_map_t *map; - p = kalloc(sizeof(pmap_pgtrace_page_t)); + p = zalloc(ZV_PMAP_PGTRACE); assert(p); p->state = UNDEFINED; @@ -13179,12 +14208,12 @@ pmap_pgtrace_alloc_page(void) vm_map_unlock(kernel_map); // fill default clone page info and add to pool - map = kalloc(sizeof(pmap_pgtrace_map_t)); + map = zalloc(ZV_PMAP_PGTRACE); for (int j = 0; j < 3; j++) { vm_map_offset_t addr = newcva + j * ARM_PGBYTES; // pre-expand pmap while preemption enabled - kr = pmap_expand(kernel_pmap, addr, 0, PMAP_TT_MAX_LEVEL); + kr = pmap_expand(kernel_pmap, addr, 0, PMAP_TT_L3_LEVEL); if (kr != KERN_SUCCESS) { panic("%s: pmap_expand(kernel_pmap, addr=%llx) returns kr=%d\n", __func__, addr, kr); } @@ -13220,20 +14249,20 @@ pmap_pgtrace_free_page(pmap_pgtrace_page_t *p) while (!queue_empty(mapq)) { queue_remove_first(mapq, map, pmap_pgtrace_map_t *, chain); - kfree(map, sizeof(pmap_pgtrace_map_t)); + zfree(ZV_PMAP_PGTRACE, map); } while (!queue_empty(mappool)) { queue_remove_first(mappool, map, pmap_pgtrace_map_t *, chain); - kfree(map, sizeof(pmap_pgtrace_map_t)); + zfree(ZV_PMAP_PGTRACE, map); } while (!queue_empty(mapwaste)) { queue_remove_first(mapwaste, map, pmap_pgtrace_map_t *, chain); - kfree(map, sizeof(pmap_pgtrace_map_t)); + zfree(ZV_PMAP_PGTRACE, map); } - kfree(p, sizeof(pmap_pgtrace_page_t)); + zfree(ZV_PMAP_PGTRACE, p); } // construct page infos with the given address range @@ -13268,10 +14297,10 @@ pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end) // keep lock orders in pmap, kernel_pmap and pgtrace lock if (pmap != NULL) { - PMAP_LOCK(pmap); + pmap_lock_ro(pmap); } if (pmap != kernel_pmap) { - PMAP_LOCK(kernel_pmap); + pmap_lock_ro(kernel_pmap); } // addresses are physical if pmap is null @@ -13353,10 +14382,10 @@ pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end) // unlock locks PMAP_PGTRACE_UNLOCK(&ints); if (pmap != kernel_pmap) { - PMAP_UNLOCK(kernel_pmap); + pmap_unlock_ro(kernel_pmap); } if (pmap != NULL) { - PMAP_UNLOCK(pmap); + pmap_unlock_ro(pmap); } // now clone it @@ -13419,14 +14448,14 @@ pmap_pgtrace_delete_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end if (pmap == NULL) { pa_page = cur_page; } else { - PMAP_LOCK(pmap); + pmap_lock(pmap); ptep = pmap_pte(pmap, cur_page); if (ptep == NULL) { - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); goto cont; } pa_page = pte_to_pa(*ptep); - PMAP_UNLOCK(pmap); + pmap_unlock(pmap); } // remove all clones and validate @@ -13546,6 +14575,50 @@ pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_state_t *ss) } #endif +/** + * The minimum shared region nesting size is used by the VM to determine when to + * break up large mappings to nested regions. The smallest size that these + * mappings can be broken into is determined by what page table level those + * regions are being nested in at and the size of the page tables. + * + * For instance, if a nested region is nesting at L2 for a process utilizing + * 16KB page tables, then the minimum nesting size would be 32MB (size of an L2 + * block entry). + * + * @param pmap The target pmap to determine the block size based on whether it's + * using 16KB or 4KB page tables. + */ +uint64_t +pmap_shared_region_size_min(__unused pmap_t pmap) +{ +#if (__ARM_VMSA__ > 7) + const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + + /** + * We always nest the shared region at L2 (32MB for 16KB pages, 2MB for + * 4KB pages). This means that a target pmap will contain L2 entries that + * point to shared L3 page tables in the shared region pmap. + */ + return pt_attr_twig_size(pt_attr); + +#else + return ARM_NESTING_SIZE_MIN; +#endif +} + +/** + * The concept of a nesting size maximum was made to accomodate restrictions in + * place for nesting regions on PowerPC. There are no restrictions to max nesting + * sizes on x86/armv7/armv8 and this should get removed. + * + * TODO: Completely remove pmap_nesting_size_max() + */ +uint64_t +pmap_nesting_size_max(__unused pmap_t pmap) +{ + return ARM_NESTING_SIZE_MAX; +} + boolean_t pmap_enforces_execute_only( #if (__ARM_VMSA__ == 7) @@ -13560,6 +14633,38 @@ pmap_enforces_execute_only( #endif } +MARK_AS_PMAP_TEXT void +pmap_set_vm_map_cs_enforced_internal( + pmap_t pmap, + bool new_value) +{ + VALIDATE_PMAP(pmap); + pmap->pmap_vm_map_cs_enforced = new_value; +} + +void +pmap_set_vm_map_cs_enforced( + pmap_t pmap, + bool new_value) +{ +#if XNU_MONITOR + pmap_set_vm_map_cs_enforced_ppl(pmap, new_value); +#else + pmap_set_vm_map_cs_enforced_internal(pmap, new_value); +#endif +} + +extern int cs_process_enforcement_enable; +bool +pmap_get_vm_map_cs_enforced( + pmap_t pmap) +{ + if (cs_process_enforcement_enable) { + return true; + } + return pmap->pmap_vm_map_cs_enforced; +} + MARK_AS_PMAP_TEXT void pmap_set_jit_entitled_internal( __unused pmap_t pmap) @@ -13578,6 +14683,13 @@ pmap_set_jit_entitled( #endif } +bool +pmap_get_jit_entitled( + __unused pmap_t pmap) +{ + return false; +} + MARK_AS_PMAP_TEXT static kern_return_t pmap_query_page_info_internal( pmap_t pmap, @@ -13600,7 +14712,7 @@ pmap_query_page_info_internal( disp = 0; VALIDATE_PMAP(pmap); - PMAP_LOCK(pmap); + pmap_lock_ro(pmap); pte = pmap_pte(pmap, va); if (pte == PT_ENTRY_NULL) { @@ -13642,7 +14754,7 @@ pmap_query_page_info_internal( } done: - PMAP_UNLOCK(pmap); + pmap_unlock_ro(pmap); pmap_pin_kernel_pages((vm_offset_t)disp_p, sizeof(*disp_p)); *disp_p = disp; pmap_unpin_kernel_pages((vm_offset_t)disp_p, sizeof(*disp_p)); @@ -13721,42 +14833,47 @@ struct page_table_dump_header { uint64_t end_va; }; -static size_t +static kern_return_t pmap_dump_page_tables_recurse(pmap_t pmap, const tt_entry_t *ttp, unsigned int cur_level, + unsigned int level_mask, uint64_t start_va, - void *bufp, - void *buf_end) + void *buf_start, + void *buf_end, + size_t *bytes_copied) { - size_t bytes_used = 0; - uint64_t num_entries = ARM_PGBYTES / sizeof(*ttp); const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + uint64_t num_entries = pt_attr_page_size(pt_attr) / sizeof(*ttp); uint64_t size = pt_attr->pta_level_info[cur_level].size; uint64_t valid_mask = pt_attr->pta_level_info[cur_level].valid_mask; uint64_t type_mask = pt_attr->pta_level_info[cur_level].type_mask; uint64_t type_block = pt_attr->pta_level_info[cur_level].type_block; - if (cur_level == arm64_root_pgtable_level) { - num_entries = arm64_root_pgtable_num_ttes; + void *bufp = (uint8_t*)buf_start + *bytes_copied; + + if (cur_level == pt_attr_root_level(pt_attr)) { + num_entries = pmap_root_alloc_size(pmap) / sizeof(tt_entry_t); } uint64_t tt_size = num_entries * sizeof(tt_entry_t); const tt_entry_t *tt_end = &ttp[num_entries]; if (((vm_offset_t)buf_end - (vm_offset_t)bufp) < (tt_size + sizeof(struct page_table_dump_header))) { - return 0; + return KERN_INSUFFICIENT_BUFFER_SIZE; } - struct page_table_dump_header *header = (struct page_table_dump_header*)bufp; - header->pa = ml_static_vtop((vm_offset_t)ttp); - header->num_entries = num_entries; - header->start_va = start_va; - header->end_va = start_va + (num_entries * size); + if (level_mask & (1U << cur_level)) { + struct page_table_dump_header *header = (struct page_table_dump_header*)bufp; + header->pa = ml_static_vtop((vm_offset_t)ttp); + header->num_entries = num_entries; + header->start_va = start_va; + header->end_va = start_va + (num_entries * size); - bcopy(ttp, (uint8_t*)bufp + sizeof(*header), tt_size); - bytes_used += (sizeof(*header) + tt_size); + bcopy(ttp, (uint8_t*)bufp + sizeof(*header), tt_size); + *bytes_copied = *bytes_copied + sizeof(*header) + tt_size; + } uint64_t current_va = start_va; for (const tt_entry_t *ttep = ttp; ttep < tt_end; ttep++, current_va += size) { @@ -13769,7 +14886,7 @@ pmap_dump_page_tables_recurse(pmap_t pmap, if ((tte & type_mask) == type_block) { continue; } else { - if (cur_level >= PMAP_TT_MAX_LEVEL) { + if (cur_level >= pt_attr_leaf_level(pt_attr)) { panic("%s: corrupt entry %#llx at %p, " "ttp=%p, cur_level=%u, bufp=%p, buf_end=%p", __FUNCTION__, tte, ttep, @@ -13778,34 +14895,446 @@ pmap_dump_page_tables_recurse(pmap_t pmap, const tt_entry_t *next_tt = (const tt_entry_t*)phystokv(tte & ARM_TTE_TABLE_MASK); - size_t recurse_result = pmap_dump_page_tables_recurse(pmap, next_tt, cur_level + 1, current_va, (uint8_t*)bufp + bytes_used, buf_end); + kern_return_t recurse_result = pmap_dump_page_tables_recurse(pmap, next_tt, cur_level + 1, + level_mask, current_va, buf_start, buf_end, bytes_copied); - if (recurse_result == 0) { - return 0; + if (recurse_result != KERN_SUCCESS) { + return recurse_result; } - - bytes_used += recurse_result; } } - return bytes_used; + return KERN_SUCCESS; } -size_t -pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end) +kern_return_t +pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied) { if (not_in_kdp) { panic("pmap_dump_page_tables must only be called from kernel debugger context"); } - return pmap_dump_page_tables_recurse(pmap, pmap->tte, arm64_root_pgtable_level, pmap->min, bufp, buf_end); + return pmap_dump_page_tables_recurse(pmap, pmap->tte, pt_attr_root_level(pmap_get_pt_attr(pmap)), + level_mask, pmap->min, bufp, buf_end, bytes_copied); } #else /* defined(__arm64__) && (DEVELOPMENT || DEBUG) */ -size_t -pmap_dump_page_tables(pmap_t pmap __unused, void *bufp __unused, void *buf_end __unused) +kern_return_t +pmap_dump_page_tables(pmap_t pmap __unused, void *bufp __unused, void *buf_end __unused, + unsigned int level_mask __unused, size_t *bytes_copied __unused) { - return (size_t)-1; + return KERN_NOT_SUPPORTED; } - #endif /* !defined(__arm64__) */ + + +#ifdef CONFIG_XNUPOST +#ifdef __arm64__ +static volatile bool pmap_test_took_fault = false; + +static bool +pmap_test_fault_handler(arm_saved_state_t * state) +{ + bool retval = false; + uint32_t esr = get_saved_state_esr(state); + esr_exception_class_t class = ESR_EC(esr); + fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr)); + + if ((class == ESR_EC_DABORT_EL1) && + ((fsc == FSC_PERMISSION_FAULT_L3) || (fsc == FSC_ACCESS_FLAG_FAULT_L3))) { + pmap_test_took_fault = true; + /* return to the instruction immediately after the call to NX page */ + set_saved_state_pc(state, get_saved_state_pc(state) + 4); + retval = true; + } + + return retval; +} + +static bool +pmap_test_access(pmap_t pmap, vm_map_address_t va, bool should_fault, bool is_write) +{ + /* + * We're switching pmaps without using the normal thread mechanism; + * disable interrupts and preemption to avoid any unexpected memory + * accesses. + */ + boolean_t old_int_state = ml_set_interrupts_enabled(false); + pmap_t old_pmap = current_pmap(); + mp_disable_preemption(); + pmap_switch(pmap); + + pmap_test_took_fault = false; + + /* Disable PAN; pmap shouldn't be the kernel pmap. */ +#if __ARM_PAN_AVAILABLE__ + __builtin_arm_wsr("pan", 0); +#endif /* __ARM_PAN_AVAILABLE__ */ + ml_expect_fault_begin(pmap_test_fault_handler, va); + + if (is_write) { + *((volatile uint64_t*)(va)) = 0xdec0de; + } else { + volatile uint64_t tmp = *((volatile uint64_t*)(va)); + (void)tmp; + } + + /* Save the fault bool, and undo the gross stuff we did. */ + bool took_fault = pmap_test_took_fault; + ml_expect_fault_end(); +#if __ARM_PAN_AVAILABLE__ + __builtin_arm_wsr("pan", 1); +#endif /* __ARM_PAN_AVAILABLE__ */ + + pmap_switch(old_pmap); + mp_enable_preemption(); + ml_set_interrupts_enabled(old_int_state); + bool retval = (took_fault == should_fault); + return retval; +} + +static bool +pmap_test_read(pmap_t pmap, vm_map_address_t va, bool should_fault) +{ + bool retval = pmap_test_access(pmap, va, should_fault, false); + + if (!retval) { + T_FAIL("%s: %s, " + "pmap=%p, va=%p, should_fault=%u", + __func__, should_fault ? "did not fault" : "faulted", + pmap, (void*)va, (unsigned)should_fault); + } + + return retval; +} + +static bool +pmap_test_write(pmap_t pmap, vm_map_address_t va, bool should_fault) +{ + bool retval = pmap_test_access(pmap, va, should_fault, true); + + if (!retval) { + T_FAIL("%s: %s, " + "pmap=%p, va=%p, should_fault=%u", + __func__, should_fault ? "did not fault" : "faulted", + pmap, (void*)va, (unsigned)should_fault); + } + + return retval; +} + +static bool +pmap_test_check_refmod(pmap_paddr_t pa, unsigned int should_be_set) +{ + unsigned int should_be_clear = (~should_be_set) & (VM_MEM_REFERENCED | VM_MEM_MODIFIED); + unsigned int bits = pmap_get_refmod((ppnum_t)atop(pa)); + + bool retval = (((bits & should_be_set) == should_be_set) && ((bits & should_be_clear) == 0)); + + if (!retval) { + T_FAIL("%s: bits=%u, " + "pa=%p, should_be_set=%u", + __func__, bits, + (void*)pa, should_be_set); + } + + return retval; +} + +static __attribute__((noinline)) bool +pmap_test_read_write(pmap_t pmap, vm_map_address_t va, bool allow_read, bool allow_write) +{ + bool retval = (pmap_test_read(pmap, va, !allow_read) | pmap_test_write(pmap, va, !allow_write)); + return retval; +} + +static int +pmap_test_test_config(unsigned int flags) +{ + T_LOG("running pmap_test_test_config flags=0x%X", flags); + unsigned int map_count = 0; + unsigned long page_ratio = 0; + pmap_t pmap = pmap_create_options(NULL, 0, flags); + + if (!pmap) { + panic("Failed to allocate pmap"); + } + + __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap); + uintptr_t native_page_size = pt_attr_page_size(native_pt_attr); + uintptr_t pmap_page_size = pt_attr_page_size(pt_attr); + uintptr_t pmap_twig_size = pt_attr_twig_size(pt_attr); + + if (pmap_page_size <= native_page_size) { + page_ratio = native_page_size / pmap_page_size; + } else { + /* + * We claim to support a page_ratio of less than 1, which is + * not currently supported by the pmap layer; panic. + */ + panic("%s: page_ratio < 1, native_page_size=%lu, pmap_page_size=%lu" + "flags=%u", + __func__, native_page_size, pmap_page_size, + flags); + } + + if (PAGE_RATIO > 1) { + /* + * The kernel is deliberately pretending to have 16KB pages. + * The pmap layer has code that supports this, so pretend the + * page size is larger than it is. + */ + pmap_page_size = PAGE_SIZE; + native_page_size = PAGE_SIZE; + } + + /* + * Get two pages from the VM; one to be mapped wired, and one to be + * mapped nonwired. + */ + vm_page_t unwired_vm_page = vm_page_grab(); + vm_page_t wired_vm_page = vm_page_grab(); + + if ((unwired_vm_page == VM_PAGE_NULL) || (wired_vm_page == VM_PAGE_NULL)) { + panic("Failed to grab VM pages"); + } + + ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(unwired_vm_page); + ppnum_t wired_pn = VM_PAGE_GET_PHYS_PAGE(wired_vm_page); + + pmap_paddr_t pa = ptoa(pn); + pmap_paddr_t wired_pa = ptoa(wired_pn); + + /* + * We'll start mappings at the second twig TT. This keeps us from only + * using the first entry in each TT, which would trivially be address + * 0; one of the things we will need to test is retrieving the VA for + * a given PTE. + */ + vm_map_address_t va_base = pmap_twig_size; + vm_map_address_t wired_va_base = ((2 * pmap_twig_size) - pmap_page_size); + + if (wired_va_base < (va_base + (page_ratio * pmap_page_size))) { + /* + * Not exactly a functional failure, but this test relies on + * there being a spare PTE slot we can use to pin the TT. + */ + panic("Cannot pin translation table"); + } + + /* + * Create the wired mapping; this will prevent the pmap layer from + * reclaiming our test TTs, which would interfere with this test + * ("interfere" -> "make it panic"). + */ + pmap_enter_addr(pmap, wired_va_base, wired_pa, VM_PROT_READ, VM_PROT_READ, 0, true); + + /* + * Create read-only mappings of the nonwired page; if the pmap does + * not use the same page size as the kernel, create multiple mappings + * so that the kernel page is fully mapped. + */ + for (map_count = 0; map_count < page_ratio; map_count++) { + pmap_enter_addr(pmap, va_base + (pmap_page_size * map_count), pa + (pmap_page_size * (map_count)), VM_PROT_READ, VM_PROT_READ, 0, false); + } + + /* Validate that all the PTEs have the expected PA and VA. */ + for (map_count = 0; map_count < page_ratio; map_count++) { + pt_entry_t * ptep = pmap_pte(pmap, va_base + (pmap_page_size * map_count)); + + if (pte_to_pa(*ptep) != (pa + (pmap_page_size * map_count))) { + T_FAIL("Unexpected pa=%p, expected %p, map_count=%u", + (void*)pte_to_pa(*ptep), (void*)(pa + (pmap_page_size * map_count)), map_count); + } + + if (ptep_get_va(ptep) != (va_base + (pmap_page_size * map_count))) { + T_FAIL("Unexpected va=%p, expected %p, map_count=%u", + (void*)ptep_get_va(ptep), (void*)(va_base + (pmap_page_size * map_count)), map_count); + } + } + + T_LOG("Validate that reads to our mapping do not fault."); + pmap_test_read(pmap, va_base, false); + + T_LOG("Validate that writes to our mapping fault."); + pmap_test_write(pmap, va_base, true); + + T_LOG("Make the first mapping writable."); + pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false); + + T_LOG("Validate that writes to our mapping do not fault."); + pmap_test_write(pmap, va_base, false); + +#if PMAP_CS + bool pmap_cs_enforced = pmap->pmap_cs_enforced; + + T_LOG("Disable PMAP CS enforcement"); + pmap_cs_configure_enforcement(pmap, false); +#endif + + T_LOG("Make the first mapping XO."); + pmap_enter_addr(pmap, va_base, pa, VM_PROT_EXECUTE, VM_PROT_EXECUTE, 0, false); + +#if __APRR_SUPPORTED__ + T_LOG("Validate that reads to our mapping fault."); + pmap_test_read(pmap, va_base, true); +#else + T_LOG("Validate that reads to our mapping do not fault."); + pmap_test_read(pmap, va_base, false); +#endif + + T_LOG("Validate that writes to our mapping fault."); + pmap_test_write(pmap, va_base, true); + +#if PMAP_CS + T_LOG("Set PMAP CS enforcement configuration to previous value."); + pmap_cs_configure_enforcement(pmap, pmap_cs_enforced); +#endif + + /* + * For page ratios of greater than 1: validate that writes to the other + * mappings still fault. Remove the mappings afterwards (we're done + * with page ratio testing). + */ + for (map_count = 1; map_count < page_ratio; map_count++) { + pmap_test_write(pmap, va_base + (pmap_page_size * map_count), true); + pmap_remove(pmap, va_base + (pmap_page_size * map_count), va_base + (pmap_page_size * map_count) + pmap_page_size); + } + + T_LOG("Mark the page unreferenced and unmodified."); + pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED); + pmap_test_check_refmod(pa, 0); + + /* + * Begin testing the ref/mod state machine. Re-enter the mapping with + * different protection/fault_type settings, and confirm that the + * ref/mod state matches our expectations at each step. + */ + T_LOG("!ref/!mod: read, no fault. Expect ref/!mod"); + pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ, VM_PROT_NONE, 0, false); + pmap_test_check_refmod(pa, VM_MEM_REFERENCED); + + T_LOG("!ref/!mod: read, read fault. Expect ref/!mod"); + pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED); + pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ, VM_PROT_READ, 0, false); + pmap_test_check_refmod(pa, VM_MEM_REFERENCED); + + T_LOG("!ref/!mod: rw, read fault. Expect ref/!mod"); + pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED); + pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, false); + pmap_test_check_refmod(pa, VM_MEM_REFERENCED); + + T_LOG("ref/!mod: rw, read fault. Expect ref/!mod"); + pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ, 0, false); + pmap_test_check_refmod(pa, VM_MEM_REFERENCED); + + T_LOG("!ref/!mod: rw, rw fault. Expect ref/mod"); + pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED); + pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false); + pmap_test_check_refmod(pa, VM_MEM_REFERENCED | VM_MEM_MODIFIED); + + /* + * Shared memory testing; we'll have two mappings; one read-only, + * one read-write. + */ + vm_map_address_t rw_base = va_base; + vm_map_address_t ro_base = va_base + pmap_page_size; + + pmap_enter_addr(pmap, rw_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false); + pmap_enter_addr(pmap, ro_base, pa, VM_PROT_READ, VM_PROT_READ, 0, false); + + /* + * Test that we take faults as expected for unreferenced/unmodified + * pages. Also test the arm_fast_fault interface, to ensure that + * mapping permissions change as expected. + */ + T_LOG("!ref/!mod: expect no access"); + pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED); + pmap_test_read_write(pmap, ro_base, false, false); + pmap_test_read_write(pmap, rw_base, false, false); + + T_LOG("Read fault; expect !ref/!mod -> ref/!mod, read access"); + arm_fast_fault(pmap, rw_base, VM_PROT_READ, false, false); + pmap_test_check_refmod(pa, VM_MEM_REFERENCED); + pmap_test_read_write(pmap, ro_base, true, false); + pmap_test_read_write(pmap, rw_base, true, false); + + T_LOG("Write fault; expect ref/!mod -> ref/mod, read and write access"); + arm_fast_fault(pmap, rw_base, VM_PROT_READ | VM_PROT_WRITE, false, false); + pmap_test_check_refmod(pa, VM_MEM_REFERENCED | VM_MEM_MODIFIED); + pmap_test_read_write(pmap, ro_base, true, false); + pmap_test_read_write(pmap, rw_base, true, true); + + T_LOG("Write fault; expect !ref/!mod -> ref/mod, read and write access"); + pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED); + arm_fast_fault(pmap, rw_base, VM_PROT_READ | VM_PROT_WRITE, false, false); + pmap_test_check_refmod(pa, VM_MEM_REFERENCED | VM_MEM_MODIFIED); + pmap_test_read_write(pmap, ro_base, true, false); + pmap_test_read_write(pmap, rw_base, true, true); + + T_LOG("RW protect both mappings; should not change protections."); + pmap_protect(pmap, ro_base, ro_base + pmap_page_size, VM_PROT_READ | VM_PROT_WRITE); + pmap_protect(pmap, rw_base, rw_base + pmap_page_size, VM_PROT_READ | VM_PROT_WRITE); + pmap_test_read_write(pmap, ro_base, true, false); + pmap_test_read_write(pmap, rw_base, true, true); + + T_LOG("Read protect both mappings; RW mapping should become RO."); + pmap_protect(pmap, ro_base, ro_base + pmap_page_size, VM_PROT_READ); + pmap_protect(pmap, rw_base, rw_base + pmap_page_size, VM_PROT_READ); + pmap_test_read_write(pmap, ro_base, true, false); + pmap_test_read_write(pmap, rw_base, true, false); + + T_LOG("RW protect the page; mappings should not change protections."); + pmap_enter_addr(pmap, rw_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false); + pmap_page_protect(pn, VM_PROT_ALL); + pmap_test_read_write(pmap, ro_base, true, false); + pmap_test_read_write(pmap, rw_base, true, true); + + T_LOG("Read protect the page; RW mapping should become RO."); + pmap_page_protect(pn, VM_PROT_READ); + pmap_test_read_write(pmap, ro_base, true, false); + pmap_test_read_write(pmap, rw_base, true, false); + + T_LOG("Validate that disconnect removes all known mappings of the page."); + pmap_disconnect(pn); + if (!pmap_verify_free(pn)) { + T_FAIL("Page still has mappings"); + } + + T_LOG("Remove the wired mapping, so we can tear down the test map."); + pmap_remove(pmap, wired_va_base, wired_va_base + pmap_page_size); + pmap_destroy(pmap); + + T_LOG("Release the pages back to the VM."); + vm_page_lock_queues(); + vm_page_free(unwired_vm_page); + vm_page_free(wired_vm_page); + vm_page_unlock_queues(); + + T_LOG("Testing successful!"); + return 0; +} +#endif /* __arm64__ */ + +kern_return_t +pmap_test(void) +{ + T_LOG("Starting pmap_tests"); +#ifdef __arm64__ + int flags = 0; + flags |= PMAP_CREATE_64BIT; + +#if __ARM_MIXED_PAGE_SIZE__ + T_LOG("Testing VM_PAGE_SIZE_4KB"); + pmap_test_test_config(flags | PMAP_CREATE_FORCE_4K_PAGES); + T_LOG("Testing VM_PAGE_SIZE_16KB"); + pmap_test_test_config(flags); +#else /* __ARM_MIXED_PAGE_SIZE__ */ + pmap_test_test_config(flags); +#endif /* __ARM_MIXED_PAGE_SIZE__ */ + +#endif /* __arm64__ */ + T_PASS("completed pmap_test successfully"); + return KERN_SUCCESS; +} +#endif /* CONFIG_XNUPOST */ diff --git a/osfmk/arm/pmap.h b/osfmk/arm/pmap.h index 4cfec3ecf..7fe880f18 100644 --- a/osfmk/arm/pmap.h +++ b/osfmk/arm/pmap.h @@ -56,31 +56,87 @@ #define ASID_SHIFT (11) /* Shift for 2048 max virtual ASIDs (2048 pmaps) */ -#define MAX_ASID (1 << ASID_SHIFT) /* Max supported ASIDs (can be virtual) */ +#define MAX_ASIDS (1 << ASID_SHIFT) /* Max supported ASIDs (can be virtual) */ #ifndef ARM_ASID_SHIFT #define ARM_ASID_SHIFT (8) /* Shift for the maximum ARM ASID value (256) */ #endif -#define ARM_MAX_ASID (1 << ARM_ASID_SHIFT) /* Max ASIDs supported by the hardware */ +#define ARM_MAX_ASIDS (1 << ARM_ASID_SHIFT) /* Max ASIDs supported by the hardware */ #define NBBY 8 #if __ARM_KERNEL_PROTECT__ -#define MAX_HW_ASID ((ARM_MAX_ASID >> 1) - 1) +#define MAX_HW_ASIDS ((ARM_MAX_ASIDS >> 1) - 1) #else -#define MAX_HW_ASID (ARM_MAX_ASID - 1) +#define MAX_HW_ASIDS (ARM_MAX_ASIDS - 1) #endif #ifndef ARM_VMID_SHIFT #define ARM_VMID_SHIFT (8) #endif -#define ARM_MAX_VMID (1 << ARM_VMID_SHIFT) +#define ARM_MAX_VMIDS (1 << ARM_VMID_SHIFT) /* XPRR virtual register map */ #define CPUWINDOWS_MAX 4 +#if defined(__arm64__) + +#if defined(ARM_LARGE_MEMORY) +/* + * 2 L1 tables (Linear KVA and V=P), plus 2*16 L2 tables map up to (16*64GB) 1TB of DRAM + * Upper limit on how many pages can be consumed by bootstrap page tables + */ +#define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 34) +#else // ARM_LARGE_MEMORY +#define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8) +#endif + +typedef uint64_t tt_entry_t; /* translation table entry type */ +#define TT_ENTRY_NULL ((tt_entry_t *) 0) + +typedef uint64_t pt_entry_t; /* page table entry type */ +#define PT_ENTRY_NULL ((pt_entry_t *) 0) + +#elif defined(__arm__) + +typedef uint32_t tt_entry_t; /* translation table entry type */ +#define PT_ENTRY_NULL ((pt_entry_t *) 0) + +typedef uint32_t pt_entry_t; /* page table entry type */ +#define TT_ENTRY_NULL ((tt_entry_t *) 0) + +#else +#error unknown arch +#endif + +/* Forward declaration of the structure that controls page table + * geometry and TTE/PTE format. */ +struct page_table_attr; + +/* + * pv_entry_t - structure to track the active mappings for a given page + */ +typedef struct pv_entry { + struct pv_entry *pve_next; /* next alias */ + pt_entry_t *pve_ptep; /* page table entry */ +} +#if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) +/* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers + * are 32-bit: + * Since pt_desc is 64-bit aligned and we cast often from pv_entry to + * pt_desc. + */ +__attribute__ ((aligned(8))) pv_entry_t; +#else +pv_entry_t; +#endif + +typedef struct { + pv_entry_t *list; + uint32_t count; +} pv_free_list_t; + struct pmap_cpu_data { #if XNU_MONITOR - uint64_t cpu_id; void * ppl_kern_saved_sp; void * ppl_stack; arm_context_t * save_area; @@ -88,14 +144,17 @@ struct pmap_cpu_data { #endif #if defined(__arm64__) pmap_t cpu_nested_pmap; + const struct page_table_attr *cpu_nested_pmap_attr; + vm_map_address_t cpu_nested_region_addr; + vm_map_offset_t cpu_nested_region_size; #else pmap_t cpu_user_pmap; unsigned int cpu_user_pmap_stamp; #endif unsigned int cpu_number; bool copywindow_strong_sync[CPUWINDOWS_MAX]; - -#if MAX_ASID > MAX_HW_ASID + pv_free_list_t pv_free; + pv_entry_t *pv_free_tail; /* * This supports overloading of ARM ASIDs by the pmap. The field needs @@ -105,11 +164,10 @@ struct pmap_cpu_data { * ASID). * * If we were to use bitfield shenanigans here, we could save a bit of - * memory by only having enough bits to support MAX_ASID. However, such + * memory by only having enough bits to support MAX_ASIDS. However, such * an implementation would be more error prone. */ - uint8_t cpu_asid_high_bits[MAX_HW_ASID]; -#endif + uint8_t cpu_sw_asids[MAX_HW_ASIDS]; }; typedef struct pmap_cpu_data pmap_cpu_data_t; @@ -124,40 +182,26 @@ typedef struct pmap_cpu_data pmap_cpu_data_t; #include /* Base address for low globals. */ +#if defined(ARM_LARGE_MEMORY) +#define LOW_GLOBAL_BASE_ADDRESS 0xfffffe0000000000ULL +#else #define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL +#endif /* * This indicates (roughly) where there is free space for the VM * to use for the heap; this does not need to be precise. */ -#if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__ +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) +#if defined(ARM_LARGE_MEMORY) +#define KERNEL_PMAP_HEAP_RANGE_START (VM_MIN_KERNEL_AND_KEXT_ADDRESS+ARM_TT_L1_SIZE) +#else // ARM_LARGE_MEMORY #define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS +#endif // ARM_LARGE_MEMORY #else #define KERNEL_PMAP_HEAP_RANGE_START LOW_GLOBAL_BASE_ADDRESS #endif -#if defined(__arm64__) - -#define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8) - -typedef uint64_t tt_entry_t; /* translation table entry type */ -#define TT_ENTRY_NULL ((tt_entry_t *) 0) - -typedef uint64_t pt_entry_t; /* page table entry type */ -#define PT_ENTRY_NULL ((pt_entry_t *) 0) - -#elif defined(__arm__) - -typedef uint32_t tt_entry_t; /* translation table entry type */ -#define PT_ENTRY_NULL ((pt_entry_t *) 0) - -typedef uint32_t pt_entry_t; /* page table entry type */ -#define TT_ENTRY_NULL ((tt_entry_t *) 0) - -#else -#error unknown arch -#endif - struct page_table_level_info { const uint64_t size; const uint64_t offmask; @@ -168,6 +212,15 @@ struct page_table_level_info { const uint64_t type_block; }; +/* + * For setups where the kernel page size does not match the hardware + * page size (assumably, the kernel page size must be a multiple of + * the hardware page size), we will need to determine what the page + * ratio is. + */ +#define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT) +#define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4) + /* superpages */ #define SUPERPAGE_NBASEPAGES 1 /* No superpages support */ @@ -188,17 +241,10 @@ struct page_table_level_info { ((((vm_map_address_t)(x)) + ARM_PGMASK) & ~ARM_PGMASK) #define arm_trunc_page(x) (((vm_map_address_t)(x)) & ~ARM_PGMASK) +#if __arm__ /* Convert address offset to page table index */ #define ptenum(a) ((((a) & ARM_TT_LEAF_INDEX_MASK) >> ARM_TT_LEAF_SHIFT)) - -/* - * For setups where the kernel page size does not match the hardware - * page size (assumably, the kernel page size must be a multiple of - * the hardware page size), we will need to determine what the page - * ratio is. - */ -#define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT) -#define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4) +#endif #if (__ARM_VMSA__ <= 7) #define NTTES (ARM_PGBYTES / sizeof(tt_entry_t)) @@ -270,12 +316,11 @@ extern pmap_paddr_t mmu_uvtop(vm_offset_t va); #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK) #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK) -#define pa_to_pte(a) ((a) & ARM_PTE_MASK) -#define pte_to_pa(p) ((p) & ARM_PTE_MASK) +#define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK) +#define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK) #define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT) #define pte_increment_pa(p) ((p) += ptoa(1)) -#define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/ARM_PGBYTES)*ARM_TT_L2_SIZE) #define ARM_NESTING_SIZE_MAX (0x0000000010000000ULL) #define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE)) @@ -289,16 +334,12 @@ extern pmap_paddr_t mmu_uvtop(vm_offset_t va); #define pmap_cs_log_h(msg, args...) { if(pmap_cs_log_hacks) printf("PMAP_CS: " msg "\n", args); } #define pmap_cs_log pmap_cs_log_h -#define PMAP_CS_EXCEPTION_LIST_HACK 1 - #else #define pmap_cs_log(msg, args...) #define pmap_cs_log_h(msg, args...) #endif /* DEVELOPMENT || DEBUG */ -/* Forward struct declarations for the pmap data structure */ -struct page_table_attr; /* * Convert translation/page table entry to kernel virtual address @@ -307,7 +348,7 @@ struct page_table_attr; #define ptetokv(a) (phystokv(pte_to_pa(a))) struct pmap { - tt_entry_t *tte; /* translation table entries */ + tt_entry_t *XNU_PTRAUTH_SIGNED_PTR("pmap.tte") tte; /* translation table entries */ pmap_paddr_t ttep; /* translation table physical */ vm_map_address_t min; /* min address in pmap */ vm_map_address_t max; /* max address in pmap */ @@ -315,13 +356,14 @@ struct pmap { const struct page_table_attr * pmap_pt_attr; /* details about page table layout */ #endif /* ARM_PARAMETERIZED_PMAP */ ledger_t ledger; /* ledger tracking phys mappings */ - decl_simple_lock_data(, lock); /* lock on map */ + + decl_lck_rw_data(, rwlock); + struct pmap_statistics stats; /* map statistics */ queue_chain_t pmaps; /* global list of pmaps */ tt_entry_t *tt_entry_free; /* free translation table entries */ - struct pmap *nested_pmap; /* nested pmap */ - vm_map_address_t nested_region_grand_addr; - vm_map_address_t nested_region_subord_addr; + struct pmap *XNU_PTRAUTH_SIGNED_PTR("pmap.nested_pmap") nested_pmap; /* nested pmap */ + vm_map_address_t nested_region_addr; vm_map_offset_t nested_region_size; vm_map_offset_t nested_region_true_start; vm_map_offset_t nested_region_true_end; @@ -345,6 +387,7 @@ struct pmap { char pmap_procname[17]; bool pmap_stats_assert; #endif /* MACH_ASSERT */ + bool pmap_vm_map_cs_enforced; #if DEVELOPMENT || DEBUG bool footprint_suspended; bool footprint_was_suspended; @@ -396,16 +439,16 @@ extern void pmap_switch_user_ttb(pmap_t pmap); extern void pmap_clear_user_ttb(void); extern void pmap_bootstrap(vm_offset_t); extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t); +extern pmap_paddr_t pmap_find_pa(pmap_t map, addr64_t va); +extern pmap_paddr_t pmap_find_pa_nofault(pmap_t map, addr64_t va); extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va); +extern ppnum_t pmap_find_phys_nofault(pmap_t map, addr64_t va); extern void pmap_set_pmap(pmap_t pmap, thread_t thread); extern void pmap_collect(pmap_t pmap); extern void pmap_gc(void); -#if defined(__arm64__) -extern vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va); -#endif #if HAS_APPLE_PAC && XNU_MONITOR -extern void * pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t data); -extern void * pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t data); +extern void * pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key); +extern void * pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key); #endif /* HAS_APPLE_PAC && XNU_MONITOR */ /* @@ -468,12 +511,12 @@ extern void pmap_map_globals(void); extern vm_map_address_t pmap_map_bd_with_options(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, int32_t options); extern vm_map_address_t pmap_map_bd(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot); -extern void pmap_init_pte_page(pmap_t, pt_entry_t *, vm_offset_t, unsigned int ttlevel, boolean_t alloc_ptd, boolean_t clear); +extern void pmap_init_pte_page(pmap_t, pt_entry_t *, vm_offset_t, unsigned int ttlevel, boolean_t alloc_ptd); extern boolean_t pmap_valid_address(pmap_paddr_t addr); extern void pmap_disable_NX(pmap_t pmap); extern void pmap_set_nested(pmap_t pmap); -extern vm_map_address_t pmap_create_sharedpage(void); +extern void pmap_create_sharedpages(vm_map_address_t *kernel_data_addr, vm_map_address_t *kernel_text_addr, vm_map_address_t *user_text_addr); extern void pmap_insert_sharedpage(pmap_t pmap); extern void pmap_protect_sharedpage(void); @@ -481,9 +524,15 @@ extern vm_offset_t pmap_cpu_windows_copy_addr(int cpu_num, unsigned int index); extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn, vm_prot_t prot, unsigned int wimg_bits); extern void pmap_unmap_cpu_windows_copy(unsigned int index); -extern void pt_fake_zone_init(int); -extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *, - uint64_t *, int *, int *, int *); +#if XNU_MONITOR +/* exposed for use by the HMAC SHA driver */ +extern void pmap_invoke_with_page(ppnum_t page_number, void *ctx, + void (*callback)(void *ctx, ppnum_t page_number, const void *page)); +extern void pmap_hibernate_invoke(void *ctx, void (*callback)(void *ctx, uint64_t addr, uint64_t len)); +extern void pmap_set_ppl_hashed_flag(const pmap_paddr_t addr); +extern void pmap_clear_ppl_hashed_flag_all(void); +extern void pmap_check_ppl_hashed_flag_all(void); +#endif /* XNU_MONITOR */ extern boolean_t pmap_valid_page(ppnum_t pn); extern boolean_t pmap_bootloader_page(ppnum_t pn); @@ -506,6 +555,13 @@ boolean_t pmap_virtual_region(unsigned int region_select, vm_map_offset_t *start boolean_t pmap_enforces_execute_only(pmap_t pmap); + + +#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) +extern void +pmap_disable_user_jop(pmap_t pmap); +#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */ + /* pmap dispatch indices */ #define ARM_FAST_FAULT_INDEX 0 #define ARM_FORCE_FAST_FAULT_INDEX 1 @@ -518,8 +574,8 @@ boolean_t pmap_enforces_execute_only(pmap_t pmap); #define PMAP_CREATE_INDEX 8 #define PMAP_DESTROY_INDEX 9 #define PMAP_ENTER_OPTIONS_INDEX 10 -#define PMAP_EXTRACT_INDEX 11 -#define PMAP_FIND_PHYS_INDEX 12 +/* #define PMAP_EXTRACT_INDEX 11 -- Not used*/ +#define PMAP_FIND_PA_INDEX 12 #define PMAP_INSERT_SHAREDPAGE_INDEX 13 #define PMAP_IS_EMPTY_INDEX 14 #define PMAP_MAP_CPU_WINDOWS_COPY_INDEX 15 @@ -546,25 +602,35 @@ boolean_t pmap_enforces_execute_only(pmap_t pmap); #define PMAP_SET_JIT_ENTITLED_INDEX 36 -#define PMAP_UPDATE_COMPRESSOR_PAGE_INDEX 57 -#define PMAP_TRIM_INDEX 64 -#define PMAP_LEDGER_ALLOC_INIT_INDEX 65 -#define PMAP_LEDGER_ALLOC_INDEX 66 -#define PMAP_LEDGER_FREE_INDEX 67 +#define PMAP_UPDATE_COMPRESSOR_PAGE_INDEX 55 +#define PMAP_TRIM_INDEX 56 +#define PMAP_LEDGER_ALLOC_INIT_INDEX 57 +#define PMAP_LEDGER_ALLOC_INDEX 58 +#define PMAP_LEDGER_FREE_INDEX 59 #if HAS_APPLE_PAC && XNU_MONITOR -#define PMAP_SIGN_USER_PTR 68 -#define PMAP_AUTH_USER_PTR 69 +#define PMAP_SIGN_USER_PTR 60 +#define PMAP_AUTH_USER_PTR 61 #endif /* HAS_APPLE_PAC && XNU_MONITOR */ +#define PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX 66 + + +#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) +#define PMAP_DISABLE_USER_JOP_INDEX 69 +#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */ + + + +#define PMAP_SET_VM_MAP_CS_ENFORCED_INDEX 72 -#define PMAP_COUNT 71 +#define PMAP_COUNT 73 #define PMAP_INVALID_CPU_NUM (~0U) struct pmap_cpu_data_array_entry { pmap_cpu_data_t cpu_data; -} __attribute__((aligned(1 << L2_CLINE))); +} __attribute__((aligned(1 << MAX_L2_CLINE))); /* Initialize the pmap per-CPU data for the current CPU. */ extern void pmap_cpu_data_init(void); @@ -633,7 +699,7 @@ extern kern_return_t pmap_return(boolean_t do_panic, boolean_t do_recurse); extern lck_grp_t pmap_lck_grp; #if XNU_MONITOR -extern void CleanPoC_DcacheRegion_Force_nopreempt(vm_offset_t va, unsigned length); +extern void CleanPoC_DcacheRegion_Force_nopreempt(vm_offset_t va, size_t length); #define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force_nopreempt(va, sz) #define pmap_simple_lock(l) simple_lock_nopreempt(l, &pmap_lck_grp) #define pmap_simple_unlock(l) simple_unlock_nopreempt(l) diff --git a/osfmk/arm/proc_reg.h b/osfmk/arm/proc_reg.h index c5921cede..c04e0f478 100644 --- a/osfmk/arm/proc_reg.h +++ b/osfmk/arm/proc_reg.h @@ -75,18 +75,14 @@ #define __ARM_SUB_ARCH__ CPU_ARCH_ARMv7k #define __ARM_VMSA__ 7 #define __ARM_VFP__ 3 -#if defined(__XNU_UP__) -#define __ARM_SMP__ 0 -#else -#define __ARM_SMP__ 1 -/* For SMP kernels, force physical aperture to be mapped at PTE level so that its mappings + +/* Force physical aperture to be mapped at PTE level so that its mappings * can be updated to reflect cache attribute changes on alias mappings. This prevents * prefetched physical aperture cachelines from becoming dirty in L1 due to a write to * an uncached alias mapping on the same core. Subsequent uncached writes from another * core may not snoop this line, and the dirty line may end up being evicted later to * effectively overwrite the uncached writes from other cores. */ #define __ARM_PTE_PHYSMAP__ 1 -#endif /* __ARMA7_SMP__ controls whether we are consistent with the A7 MP_CORE spec; needed because entities other than * the xnu-managed processors may need to snoop our cache operations. */ @@ -96,132 +92,11 @@ #define __ARM_USER_PROTECT__ 1 #define __ARM_TIME_TIMEBASE_ONLY__ 1 -#elif defined (APPLETYPHOON) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_VFP__ 4 -#define __ARM_COHERENT_CACHE__ 1 -#define __ARM_COHERENT_IO__ 1 -#define __ARM_IC_NOALIAS_ICACHE__ 1 -#define __ARM_DEBUG__ 7 -#define __ARM_ENABLE_SWAP__ 1 -#define __ARM_V8_CRYPTO_EXTENSIONS__ 1 -#define __ARM64_PMAP_SUBPAGE_L1__ 1 -#define __ARM_KERNEL_PROTECT__ 1 - -#elif defined (APPLETWISTER) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_VFP__ 4 -#define __ARM_COHERENT_CACHE__ 1 -#define __ARM_COHERENT_IO__ 1 -#define __ARM_IC_NOALIAS_ICACHE__ 1 -#define __ARM_DEBUG__ 7 -#define __ARM_ENABLE_SWAP__ 1 -#define __ARM_V8_CRYPTO_EXTENSIONS__ 1 -#define __ARM_16K_PG__ 1 -#define __ARM64_PMAP_SUBPAGE_L1__ 1 -#define __ARM_KERNEL_PROTECT__ 1 - -#elif defined (APPLEHURRICANE) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_VFP__ 4 -#define __ARM_COHERENT_CACHE__ 1 -#define __ARM_COHERENT_IO__ 1 -#define __ARM_IC_NOALIAS_ICACHE__ 1 -#define __ARM_DEBUG__ 7 -#define __ARM_ENABLE_SWAP__ 1 -#define __ARM_V8_CRYPTO_EXTENSIONS__ 1 -#define __ARM_16K_PG__ 1 -#define __ARM64_PMAP_SUBPAGE_L1__ 1 -#define __ARM_KERNEL_PROTECT__ 1 -#define __ARM_GLOBAL_SLEEP_BIT__ 1 -#define __ARM_PAN_AVAILABLE__ 1 - -#elif defined (APPLEMONSOON) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_AMP__ 1 -#define __ARM_VFP__ 4 -#define __ARM_COHERENT_CACHE__ 1 -#define __ARM_COHERENT_IO__ 1 -#define __ARM_IC_NOALIAS_ICACHE__ 1 -#define __ARM_DEBUG__ 7 -#define __ARM_ENABLE_SWAP__ 1 -#define __ARM_V8_CRYPTO_EXTENSIONS__ 1 -#define __ARM_16K_PG__ 1 -#define __ARM64_PMAP_SUBPAGE_L1__ 1 -#define __ARM_KERNEL_PROTECT__ 1 -#define __ARM_GLOBAL_SLEEP_BIT__ 1 -#define __ARM_PAN_AVAILABLE__ 1 -#define __ARM_WKDM_ISA_AVAILABLE__ 1 -#define __PLATFORM_WKDM_ALIGNMENT_MASK__ (0x3FULL) -#define __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__ (64) -#define __ARM_CLUSTER_COUNT__ 2 - -#elif defined (APPLEVORTEX) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_VFP__ 4 -#define __ARM_COHERENT_CACHE__ 1 -#define __ARM_COHERENT_IO__ 1 -#define __ARM_IC_NOALIAS_ICACHE__ 1 -#define __ARM_DEBUG__ 7 -#define __ARM_ENABLE_SWAP__ 1 -#define __ARM_V8_CRYPTO_EXTENSIONS__ 1 -#define __ARM_16K_PG__ 1 -#define __ARM64_PMAP_SUBPAGE_L1__ 1 -#define __ARM_GLOBAL_SLEEP_BIT__ 1 -#define __ARM_PAN_AVAILABLE__ 1 -#define __ARM_WKDM_ISA_AVAILABLE__ 1 -#define __PLATFORM_WKDM_ALIGNMENT_MASK__ (0x3FULL) -#define __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__ (64) -#define __ARM_CLUSTER_COUNT__ 2 - -#elif defined (APPLELIGHTNING) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_AMP__ 1 -#define __ARM_VFP__ 4 -#define __ARM_COHERENT_CACHE__ 1 -#define __ARM_COHERENT_IO__ 1 -#define __ARM_IC_NOALIAS_ICACHE__ 1 -#define __ARM_DEBUG__ 7 -#define __ARM_ENABLE_SWAP__ 1 -#define __ARM_V8_CRYPTO_EXTENSIONS__ 1 -#define __ARM_16K_PG__ 1 -#define __ARM64_PMAP_SUBPAGE_L1__ 1 -#define __ARM_GLOBAL_SLEEP_BIT__ 1 -#define __ARM_PAN_AVAILABLE__ 1 -#define __ARM_WKDM_ISA_AVAILABLE__ 1 -#define __PLATFORM_WKDM_ALIGNMENT_MASK__ (0x3FULL) -#define __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__ (64) -#define __ARM_CLUSTER_COUNT__ 2 -#define -#define __APCFG_SUPPORTED__ 1 -#define __ARM_RANGE_TLBI__ 1 - -#elif defined (BCM2837) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_VFP__ 4 -#define __ARM_COHERENT_CACHE__ 1 -#define __ARM_DEBUG__ 7 -#define __ARM64_PMAP_SUBPAGE_L1__ 1 -#else -#error processor not supported #endif #if __ARM_42BIT_PA_SPACE__ /* For now, force the issue! */ +/* We need more VA space for the identity map to bootstrap the MMU */ #undef __ARM64_PMAP_SUBPAGE_L1__ #endif /* __ARM_42BIT_PA_SPACE__ */ @@ -242,20 +117,43 @@ #define __ARM_ENABLE_WFE_ 0 #endif /* defined(ARM_BOARD_WFE_TIMEOUT_NS) */ +/* + * MAX_PSETS allows the scheduler to create statically sized + * scheduling data structures (such as an array of processor sets, clutch + * buckets in Edge scheduler etc.). All current AMP platforms are dual + * pset and all non-AMP platforms are single pset architectures. This + * define might need to be conditionalized better (or moved to a better + * header) in the future. + * + * + */ +#if __ARM_AMP__ +#define MAX_PSETS 2 +#else /*__ARM_AMP__ */ +#define MAX_PSETS 1 +#endif /* __ARM_AMP__ */ + /* * The clutch scheduler is enabled only on non-AMP platforms for now. */ -#if !__ARM_AMP__ && CONFIG_CLUTCH +#if CONFIG_CLUTCH + +#if __ARM_AMP__ + +/* Enable the Edge scheduler for all J129 platforms */ +#if XNU_TARGET_OS_OSX #define CONFIG_SCHED_CLUTCH 1 -#else /* !__ARM_AMP__ && CONFIG_CLUTCH */ -#define CONFIG_SCHED_CLUTCH 0 -#endif /* !__ARM_AMP__ && CONFIG_CLUTCH */ +#define CONFIG_SCHED_EDGE 1 +#endif /* XNU_TARGET_OS_OSX */ -#if __ARM_AMP__ || CONFIG_SCHED_CLUTCH +#else /* __ARM_AMP__ */ +#define CONFIG_SCHED_CLUTCH 1 +#endif /* __ARM_AMP__ */ + +#endif /* CONFIG_CLUTCH */ + +/* Thread groups are enabled on all ARM platforms (irrespective of scheduler) */ #define CONFIG_THREAD_GROUPS 1 -#else /* __ARM_AMP__ || CONFIG_SCHED_CLUTCH */ -#define CONFIG_THREAD_GROUPS 0 -#endif #ifdef XNU_KERNEL_PRIVATE @@ -399,26 +297,7 @@ #define MMU_I_CLINE 5 /* cache line size as 1< #include +#include #include #include #if __arm64__ @@ -89,8 +90,10 @@ rtclock_early_init(void) #if DEVELOPMENT || DEBUG uint32_t tmp_mv = 1; +#if defined(APPLE_ARM64_ARCH_FAMILY) /* Enable MAT validation on A0 hardware by default. */ - absolute_time_validation = (get_arm_cpu_version() == 0x00); + absolute_time_validation = ml_get_topology_info()->chip_revision == CPU_VERSION_A0; +#endif if (kern_feature_override(KF_MATV_OVRD)) { absolute_time_validation = 0; @@ -284,8 +287,7 @@ rtclock_intr(__unused unsigned int is_user_context) cdp = getCpuDatap(); cdp->cpu_stat.timer_cnt++; - cdp->cpu_stat.timer_cnt_wake++; - SCHED_STATS_TIMER_POP(current_processor()); + SCHED_STATS_INC(timer_pop_count); assert(!ml_get_interrupts_enabled()); @@ -487,12 +489,7 @@ machine_delay_until(uint64_t interval, do { #if __ARM_ENABLE_WFE_ -#if __arm64__ - if (arm64_wfe_allowed()) -#endif /* __arm64__ */ - { - __builtin_arm_wfe(); - } + __builtin_arm_wfe(); #endif /* __ARM_ENABLE_WFE_ */ now = mach_absolute_time(); diff --git a/osfmk/arm/smp.h b/osfmk/arm/smp.h index 6b60210b1..f2b913956 100644 --- a/osfmk/arm/smp.h +++ b/osfmk/arm/smp.h @@ -31,7 +31,6 @@ #include -#define __SMP__ __ARM_SMP__ #define __AMP__ __ARM_AMP__ #endif /* _ARM_SMP_H_ */ diff --git a/osfmk/arm/start.s b/osfmk/arm/start.s index 2ecd28c66..ab928722e 100644 --- a/osfmk/arm/start.s +++ b/osfmk/arm/start.s @@ -30,6 +30,7 @@ #include #include #include "assym.s" +#include "caches_macros.s" .text .align 12 @@ -72,7 +73,7 @@ L_start_cpu_0: orr r5, r5, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute mcr p15, 0, r5, c2, c0, 0 // write kernel to translation table base 0 mcr p15, 0, r5, c2, c0, 1 // also to translation table base 1 - mov r5, #TTBCR_N_1GB_TTB0 // identify the split between 0 and 1 + mov r5, #TTBCR_N_SETUP // identify the split between 0 and 1 mcr p15, 0, r5, c2, c0, 2 // and set up the translation control reg ldr r2, [r1, CPU_NUMBER_GS] // Get cpu number mcr p15, 0, r2, c13, c0, 3 // Write TPIDRURO @@ -151,7 +152,7 @@ LEXT(_start) orr r5, r5, #(TTBR_SETUP & 0xFF00) // Setup PTWs memory attribute mcr p15, 0, r5, c2, c0, 0 // write kernel to translation table base 0 mcr p15, 0, r5, c2, c0, 1 // also to translation table base 1 - mov r5, #TTBCR_N_1GB_TTB0 // identify the split between 0 and 1 + mov r5, #TTBCR_N_SETUP // identify the split between 0 and 1 mcr p15, 0, r5, c2, c0, 2 // and set up the translation control reg // Mark the entries invalid in the 4 page trampoline translation table @@ -281,31 +282,34 @@ doneveqp: // clean the dcache mov r11, #0 + GET_CACHE_CONFIG r11, r2, r3, r4 + mov r11, #0 cleanflushway: cleanflushline: mcr p15, 0, r11, c7, c14, 2 // cleanflush dcache line by way/set - add r11, r11, #1 << MMU_I7SET // increment set index - tst r11, #1 << (MMU_NSET + MMU_I7SET) // look for overflow + add r11, r11, r2 // increment set index + tst r11, r3 // look for overflow beq cleanflushline - bic r11, r11, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow - adds r11, r11, #1 << MMU_I7WAY // increment way + bic r11, r11, r3 // clear set overflow + adds r11, r11, r4 // increment way bcc cleanflushway // loop - -#if __ARM_L2CACHE__ + HAS_L2_CACHE r11 + cmp r11, #0 + beq invall2skipl2dcache // Invalidate L2 cache + mov r11, #1 + GET_CACHE_CONFIG r11, r2, r3, r4 mov r11, #2 invall2flushway: invall2flushline: mcr p15, 0, r11, c7, c14, 2 // Invalidate dcache line by way/set - add r11, r11, #1 << L2_I7SET // increment set index - tst r11, #1 << (L2_NSET + L2_I7SET) // look for overflow + add r11, r11, r2 // increment set index + tst r11, r3 // look for overflow beq invall2flushline - bic r11, r11, #1 << (L2_NSET + L2_I7SET) // clear set overflow - adds r11, r11, #1 << L2_I7WAY // increment way + bic r11, r11, r3 // clear set overflow + adds r11, r11, r4 // increment way bcc invall2flushway // loop - -#endif - +invall2skipl2dcache: mov r11, #0 mcr p15, 0, r11, c13, c0, 3 // Write TPIDRURO LOAD_ADDR(sp, intstack_top) // Get interrupt stack top diff --git a/osfmk/arm/status.c b/osfmk/arm/status.c index 40c1f5e1d..35c454ac8 100644 --- a/osfmk/arm/status.c +++ b/osfmk/arm/status.c @@ -35,16 +35,14 @@ #include #include -struct arm_vfpv2_state -{ - __uint32_t __r[32]; - __uint32_t __fpscr; - +struct arm_vfpv2_state { + __uint32_t __r[32]; + __uint32_t __fpscr; }; -typedef struct arm_vfpv2_state arm_vfpv2_state_t; +typedef struct arm_vfpv2_state arm_vfpv2_state_t; -#define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \ +#define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \ (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t))) @@ -52,17 +50,17 @@ typedef struct arm_vfpv2_state arm_vfpv2_state_t; * Forward definitions */ void - thread_set_child(thread_t child, int pid); +thread_set_child(thread_t child, int pid); void - thread_set_parent(thread_t parent, int pid); +thread_set_parent(thread_t parent, int pid); /* * Maps state flavor to number of words in the state: */ /* __private_extern__ */ unsigned int _MachineStateCount[] = { - /* FLAVOR_LIST */ 0, + /* FLAVOR_LIST */ 0, [ARM_THREAD_STATE] = ARM_THREAD_STATE_COUNT, [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT, [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT, @@ -74,10 +72,10 @@ extern zone_t ads_zone; kern_return_t machine_thread_state_convert_to_user( - __unused thread_t thread, - __unused thread_flavor_t flavor, - __unused thread_state_t tstate, - __unused mach_msg_type_number_t *count) + __unused thread_t thread, + __unused thread_flavor_t flavor, + __unused thread_state_t tstate, + __unused mach_msg_type_number_t *count) { // No conversion to userspace representation on this platform return KERN_SUCCESS; @@ -85,10 +83,10 @@ machine_thread_state_convert_to_user( kern_return_t machine_thread_state_convert_from_user( - __unused thread_t thread, - __unused thread_flavor_t flavor, - __unused thread_state_t tstate, - __unused mach_msg_type_number_t count) + __unused thread_t thread, + __unused thread_flavor_t flavor, + __unused thread_state_t tstate, + __unused mach_msg_type_number_t count) { // No conversion from userspace representation on this platform return KERN_SUCCESS; @@ -96,8 +94,8 @@ machine_thread_state_convert_from_user( kern_return_t machine_thread_siguctx_pointer_convert_to_user( - __unused thread_t thread, - __unused user_addr_t *uctxp) + __unused thread_t thread, + __unused user_addr_t *uctxp) { // No conversion to userspace representation on this platform return KERN_SUCCESS; @@ -105,9 +103,9 @@ machine_thread_siguctx_pointer_convert_to_user( kern_return_t machine_thread_function_pointers_convert_from_user( - __unused thread_t thread, - __unused user_addr_t *fptrs, - __unused uint32_t count) + __unused thread_t thread, + __unused user_addr_t *fptrs, + __unused uint32_t count) { // No conversion from userspace representation on this platform return KERN_SUCCESS; @@ -119,19 +117,19 @@ machine_thread_function_pointers_convert_from_user( */ kern_return_t machine_thread_get_state( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t * count) + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t * count) { - -#define machine_thread_get_state_kprintf(x...) /* kprintf("machine_thread_get - * _state: " x) */ +#define machine_thread_get_state_kprintf(x...) /* kprintf("machine_thread_get + * _state: " x) */ switch (flavor) { case THREAD_STATE_FLAVOR_LIST: - if (*count < 4) - return (KERN_INVALID_ARGUMENT); + if (*count < 4) { + return KERN_INVALID_ARGUMENT; + } tstate[0] = ARM_THREAD_STATE; tstate[1] = ARM_VFP_STATE; @@ -141,8 +139,9 @@ machine_thread_get_state( break; case THREAD_STATE_FLAVOR_LIST_10_15: - if (*count < 5) - return (KERN_INVALID_ARGUMENT); + if (*count < 5) { + return KERN_INVALID_ARGUMENT; + } tstate[0] = ARM_THREAD_STATE; tstate[1] = ARM_VFP_STATE; @@ -153,110 +152,118 @@ machine_thread_get_state( break; case ARM_THREAD_STATE:{ - struct arm_thread_state *state; - struct arm_saved_state *saved_state; - arm_unified_thread_state_t *unified_state; - - unsigned int i; - if (*count < ARM_THREAD_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); - - if (*count == ARM_UNIFIED_THREAD_STATE_COUNT) { - unified_state = (arm_unified_thread_state_t *) tstate; - state = &unified_state->ts_32; - unified_state->ash.flavor = ARM_THREAD_STATE32; - unified_state->ash.count = ARM_THREAD_STATE32_COUNT; - } else { - state = (struct arm_thread_state *) tstate; - } - saved_state = &thread->machine.PcbData; - - state->sp = saved_state->sp; - state->lr = saved_state->lr; - state->pc = saved_state->pc; - state->cpsr = saved_state->cpsr; - for (i = 0; i < 13; i++) - state->r[i] = saved_state->r[i]; - machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n", - state->pc, state->r[0], state->sp); - - if (*count != ARM_UNIFIED_THREAD_STATE_COUNT) { - *count = ARM_THREAD_STATE_COUNT; - } - break; + struct arm_thread_state *state; + struct arm_saved_state *saved_state; + arm_unified_thread_state_t *unified_state; + + unsigned int i; + if (*count < ARM_THREAD_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + if (*count == ARM_UNIFIED_THREAD_STATE_COUNT) { + unified_state = (arm_unified_thread_state_t *) tstate; + state = &unified_state->ts_32; + unified_state->ash.flavor = ARM_THREAD_STATE32; + unified_state->ash.count = ARM_THREAD_STATE32_COUNT; + } else { + state = (struct arm_thread_state *) tstate; + } + saved_state = &thread->machine.PcbData; + + state->sp = saved_state->sp; + state->lr = saved_state->lr; + state->pc = saved_state->pc; + state->cpsr = saved_state->cpsr; + for (i = 0; i < 13; i++) { + state->r[i] = saved_state->r[i]; + } + machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n", + state->pc, state->r[0], state->sp); + + if (*count != ARM_UNIFIED_THREAD_STATE_COUNT) { + *count = ARM_THREAD_STATE_COUNT; } + break; + } case ARM_EXCEPTION_STATE:{ - struct arm_exception_state *state; - struct arm_saved_state *saved_state; + struct arm_exception_state *state; + struct arm_saved_state *saved_state; - if (*count < ARM_EXCEPTION_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*count < ARM_EXCEPTION_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } - state = (struct arm_exception_state *) tstate; - saved_state = &thread->machine.PcbData; + state = (struct arm_exception_state *) tstate; + saved_state = &thread->machine.PcbData; - state->exception = saved_state->exception; - state->fsr = saved_state->fsr; - state->far = saved_state->far; + state->exception = saved_state->exception; + state->fsr = saved_state->fsr; + state->far = saved_state->far; - *count = ARM_EXCEPTION_STATE_COUNT; - break; - } + *count = ARM_EXCEPTION_STATE_COUNT; + break; + } case ARM_VFP_STATE:{ -#if __ARM_VFP__ - struct arm_vfp_state *state; - struct arm_vfpsaved_state *saved_state; - unsigned int i; - unsigned int max; - - if (*count < ARM_VFP_STATE_COUNT) { - if (*count < ARM_VFPV2_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); - else - *count = ARM_VFPV2_STATE_COUNT; +#if __ARM_VFP__ + struct arm_vfp_state *state; + struct arm_vfpsaved_state *saved_state; + unsigned int i; + unsigned int max; + + if (*count < ARM_VFP_STATE_COUNT) { + if (*count < ARM_VFPV2_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } else { + *count = ARM_VFPV2_STATE_COUNT; } + } - if (*count == ARM_VFPV2_STATE_COUNT) - max = 32; - else - max = 64; + if (*count == ARM_VFPV2_STATE_COUNT) { + max = 32; + } else { + max = 64; + } - state = (struct arm_vfp_state *) tstate; - saved_state = find_user_vfp(thread); + state = (struct arm_vfp_state *) tstate; + saved_state = find_user_vfp(thread); - state->fpscr = saved_state->fpscr; - for (i = 0; i < max; i++) - state->r[i] = saved_state->r[i]; + state->fpscr = saved_state->fpscr; + for (i = 0; i < max; i++) { + state->r[i] = saved_state->r[i]; + } #endif - break; - } + break; + } case ARM_DEBUG_STATE:{ - arm_debug_state_t *state; - arm_debug_state_t *thread_state; - - if (*count < ARM_DEBUG_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); - - state = (arm_debug_state_t *) tstate; - thread_state = find_debug_state(thread); - - if (thread_state == NULL) - bzero(state, sizeof(arm_debug_state_t)); - else - bcopy(thread_state, state, sizeof(arm_debug_state_t)); - - *count = ARM_DEBUG_STATE_COUNT; - break; + arm_debug_state_t *state; + arm_debug_state_t *thread_state; + + if (*count < ARM_DEBUG_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; } + state = (arm_debug_state_t *) tstate; + thread_state = find_debug_state(thread); + + if (thread_state == NULL) { + bzero(state, sizeof(arm_debug_state_t)); + } else { + bcopy(thread_state, state, sizeof(arm_debug_state_t)); + } + + *count = ARM_DEBUG_STATE_COUNT; + break; + } + case ARM_PAGEIN_STATE:{ arm_pagein_state_t *state; if (*count < ARM_PAGEIN_STATE_COUNT) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - + state = (arm_pagein_state_t *)tstate; state->__pagein_error = thread->t_pagein_error; @@ -265,9 +272,9 @@ machine_thread_get_state( } default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -277,48 +284,50 @@ machine_thread_get_state( */ kern_return_t machine_thread_get_kern_state( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t * count) + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t * count) { - -#define machine_thread_get_kern_state_kprintf(x...) /* kprintf("machine_threa - * d_get_kern_state: " - * x) */ +#define machine_thread_get_kern_state_kprintf(x...) /* kprintf("machine_threa + * d_get_kern_state: " + * x) */ /* * This works only for an interrupted kernel thread */ - if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) + if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) { return KERN_FAILURE; + } switch (flavor) { case ARM_THREAD_STATE:{ - struct arm_thread_state *state; - struct arm_saved_state *saved_state; - unsigned int i; - if (*count < ARM_THREAD_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); + struct arm_thread_state *state; + struct arm_saved_state *saved_state; + unsigned int i; + if (*count < ARM_THREAD_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } - state = (struct arm_thread_state *) tstate; - saved_state = getCpuDatap()->cpu_int_state; - - state->sp = saved_state->sp; - state->lr = saved_state->lr; - state->pc = saved_state->pc; - state->cpsr = saved_state->cpsr; - for (i = 0; i < 13; i++) - state->r[i] = saved_state->r[i]; - machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n", - state->pc, state->r[0], state->sp); - *count = ARM_THREAD_STATE_COUNT; - break; + state = (struct arm_thread_state *) tstate; + saved_state = getCpuDatap()->cpu_int_state; + + state->sp = saved_state->sp; + state->lr = saved_state->lr; + state->pc = saved_state->pc; + state->cpsr = saved_state->cpsr; + for (i = 0; i < 13; i++) { + state->r[i] = saved_state->r[i]; } + machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n", + state->pc, state->r[0], state->sp); + *count = ARM_THREAD_STATE_COUNT; + break; + } default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } extern long long arm_debug_get(void); @@ -329,158 +338,162 @@ extern long long arm_debug_get(void); */ kern_return_t machine_thread_set_state( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t count) + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t count) { - -#define machine_thread_set_state_kprintf(x...) /* kprintf("machine_thread_set - * _state: " x) */ +#define machine_thread_set_state_kprintf(x...) /* kprintf("machine_thread_set + * _state: " x) */ switch (flavor) { case ARM_THREAD_STATE:{ - struct arm_thread_state *state; - struct arm_saved_state *saved_state; - arm_unified_thread_state_t *unified_state; - int old_psr; + struct arm_thread_state *state; + struct arm_saved_state *saved_state; + arm_unified_thread_state_t *unified_state; + int old_psr; - if (count < ARM_THREAD_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); + if (count < ARM_THREAD_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (count == ARM_UNIFIED_THREAD_STATE_COUNT) { - unified_state = (arm_unified_thread_state_t *) tstate; - state = &unified_state->ts_32; - } else { - state = (struct arm_thread_state *) tstate; - } - saved_state = &thread->machine.PcbData; - old_psr = saved_state->cpsr; - memcpy((char *) saved_state, (char *) state, sizeof(*state)); - /* - * do not allow privileged bits of the PSR to be - * changed - */ - saved_state->cpsr = (saved_state->cpsr & ~PSR_USER_MASK) | (old_psr & PSR_USER_MASK); - - machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n", - state->pc, state->r[0], state->sp); - break; + if (count == ARM_UNIFIED_THREAD_STATE_COUNT) { + unified_state = (arm_unified_thread_state_t *) tstate; + state = &unified_state->ts_32; + } else { + state = (struct arm_thread_state *) tstate; } + saved_state = &thread->machine.PcbData; + old_psr = saved_state->cpsr; + memcpy((char *) saved_state, (char *) state, sizeof(*state)); + /* + * do not allow privileged bits of the PSR to be + * changed + */ + saved_state->cpsr = (saved_state->cpsr & ~PSR_USER_MASK) | (old_psr & PSR_USER_MASK); + + machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n", + state->pc, state->r[0], state->sp); + break; + } case ARM_VFP_STATE:{ #if __ARM_VFP__ - struct arm_vfp_state *state; - struct arm_vfpsaved_state *saved_state; - unsigned int i; - unsigned int max; - - if (count < ARM_VFP_STATE_COUNT) { - if (count < ARM_VFPV2_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); - else - count = ARM_VFPV2_STATE_COUNT; + struct arm_vfp_state *state; + struct arm_vfpsaved_state *saved_state; + unsigned int i; + unsigned int max; + + if (count < ARM_VFP_STATE_COUNT) { + if (count < ARM_VFPV2_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } else { + count = ARM_VFPV2_STATE_COUNT; } + } - if (count == ARM_VFPV2_STATE_COUNT) - max = 32; - else - max = 64; + if (count == ARM_VFPV2_STATE_COUNT) { + max = 32; + } else { + max = 64; + } - state = (struct arm_vfp_state *) tstate; - saved_state = find_user_vfp(thread); + state = (struct arm_vfp_state *) tstate; + saved_state = find_user_vfp(thread); - saved_state->fpscr = state->fpscr; - for (i = 0; i < max; i++) - saved_state->r[i] = state->r[i]; + saved_state->fpscr = state->fpscr; + for (i = 0; i < max; i++) { + saved_state->r[i] = state->r[i]; + } #endif - break; - } + break; + } case ARM_EXCEPTION_STATE:{ + if (count < ARM_EXCEPTION_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (count < ARM_EXCEPTION_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); + break; + } + case ARM_DEBUG_STATE:{ + arm_debug_state_t *state; + arm_debug_state_t *thread_state; + boolean_t enabled = FALSE; + unsigned int i; - break; + if (count < ARM_DEBUG_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; } - case ARM_DEBUG_STATE:{ - arm_debug_state_t *state; - arm_debug_state_t *thread_state; - boolean_t enabled = FALSE; - unsigned int i; - - if (count < ARM_DEBUG_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); - - state = (arm_debug_state_t *) tstate; - thread_state = find_debug_state(thread); - - if (count < ARM_DEBUG_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); - - for (i = 0; i < 16; i++) { - /* do not allow context IDs to be set */ - if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) - || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED) - || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) - || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) { - return KERN_PROTECTION_FAILURE; - } - if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) - || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) { - enabled = TRUE; - } - } - - if (!enabled) { - if (thread_state != NULL) - { - void *pTmp = thread->machine.DebugData; - thread->machine.DebugData = NULL; - zfree(ads_zone, pTmp); - } - } - else - { - if (thread_state == NULL) - thread_state = zalloc(ads_zone); - - for (i = 0; i < 16; i++) { - /* set appropriate priviledge; mask out unknown bits */ - thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK - | ARM_DBGBCR_MATCH_MASK - | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK - | ARM_DBG_CR_ENABLE_MASK)) - | ARM_DBGBCR_TYPE_IVA - | ARM_DBG_CR_LINKED_UNLINKED - | ARM_DBG_CR_SECURITY_STATE_BOTH - | ARM_DBG_CR_MODE_CONTROL_USER; - thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK; - thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK - | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK - | ARM_DBGWCR_ACCESS_CONTROL_MASK - | ARM_DBG_CR_ENABLE_MASK)) - | ARM_DBG_CR_LINKED_UNLINKED - | ARM_DBG_CR_SECURITY_STATE_BOTH - | ARM_DBG_CR_MODE_CONTROL_USER; - thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK; - } - - if (thread->machine.DebugData == NULL) - thread->machine.DebugData = thread_state; - } - - if (thread == current_thread()) { - arm_debug_set(thread_state); + + state = (arm_debug_state_t *) tstate; + thread_state = find_debug_state(thread); + + if (count < ARM_DEBUG_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + for (i = 0; i < 16; i++) { + /* do not allow context IDs to be set */ + if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) + || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED) + || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) + || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) { + return KERN_PROTECTION_FAILURE; + } + if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) + || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) { + enabled = TRUE; } - - break; } - + + if (!enabled) { + if (thread_state != NULL) { + void *pTmp = thread->machine.DebugData; + thread->machine.DebugData = NULL; + zfree(ads_zone, pTmp); + } + } else { + if (thread_state == NULL) { + thread_state = zalloc(ads_zone); + } + + for (i = 0; i < 16; i++) { + /* set appropriate priviledge; mask out unknown bits */ + thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK + | ARM_DBGBCR_MATCH_MASK + | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK + | ARM_DBG_CR_ENABLE_MASK)) + | ARM_DBGBCR_TYPE_IVA + | ARM_DBG_CR_LINKED_UNLINKED + | ARM_DBG_CR_SECURITY_STATE_BOTH + | ARM_DBG_CR_MODE_CONTROL_USER; + thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK; + thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK + | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK + | ARM_DBGWCR_ACCESS_CONTROL_MASK + | ARM_DBG_CR_ENABLE_MASK)) + | ARM_DBG_CR_LINKED_UNLINKED + | ARM_DBG_CR_SECURITY_STATE_BOTH + | ARM_DBG_CR_MODE_CONTROL_USER; + thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK; + } + + if (thread->machine.DebugData == NULL) { + thread->machine.DebugData = thread_state; + } + } + + if (thread == current_thread()) { + arm_debug_set(thread_state); + } + + break; + } + default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } mach_vm_address_t @@ -524,16 +537,16 @@ void vfp_state_initialize(struct arm_vfpsaved_state *vfp_state) { /* Set default VFP state to RunFast mode: - * - * - flush-to-zero mode - * - default NaN mode - * - no enabled exceptions - * - * On the VFP11, this allows the use of floating point without - * trapping to support code, which we do not provide. With - * the Cortex-A8, this allows the use of the (much faster) NFP - * pipeline for single-precision operations. - */ + * + * - flush-to-zero mode + * - default NaN mode + * - no enabled exceptions + * + * On the VFP11, this allows the use of floating point without + * trapping to support code, which we do not provide. With + * the Cortex-A8, this allows the use of the (much faster) NFP + * pipeline for single-precision operations. + */ bzero(vfp_state, sizeof(*vfp_state)); vfp_state->fpscr = FPSCR_DEFAULT; @@ -547,14 +560,14 @@ vfp_state_initialize(struct arm_vfpsaved_state *vfp_state) */ kern_return_t machine_thread_dup( - thread_t self, - thread_t target, - __unused boolean_t is_corpse) + thread_t self, + thread_t target, + __unused boolean_t is_corpse) { struct arm_saved_state *self_saved_state; struct arm_saved_state *target_saved_state; -#if __ARM_VFP__ +#if __ARM_VFP__ struct arm_vfpsaved_state *self_vfp_state; struct arm_vfpsaved_state *target_vfp_state; #endif @@ -565,13 +578,13 @@ machine_thread_dup( target_saved_state = &target->machine.PcbData; bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state)); -#if __ARM_VFP__ +#if __ARM_VFP__ self_vfp_state = &self->machine.PcbData.VFPdata; target_vfp_state = &target->machine.PcbData.VFPdata; bcopy(self_vfp_state, target_vfp_state, sizeof(struct arm_vfpsaved_state)); #endif - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -580,9 +593,9 @@ machine_thread_dup( */ struct arm_saved_state * get_user_regs( - thread_t thread) + thread_t thread) { - return (&thread->machine.PcbData); + return &thread->machine.PcbData; } /* @@ -591,7 +604,7 @@ get_user_regs( */ struct arm_saved_state * find_user_regs( - thread_t thread) + thread_t thread) { return get_user_regs(thread); } @@ -602,16 +615,16 @@ find_user_regs( */ struct arm_saved_state * find_kern_regs( - thread_t thread) + thread_t thread) { /* - * This works only for an interrupted kernel thread - */ - if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) - return ((struct arm_saved_state *) NULL); - else - return (getCpuDatap()->cpu_int_state); - + * This works only for an interrupted kernel thread + */ + if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) { + return (struct arm_saved_state *) NULL; + } else { + return getCpuDatap()->cpu_int_state; + } } #if __ARM_VFP__ @@ -622,7 +635,7 @@ find_kern_regs( struct arm_vfpsaved_state * find_user_vfp( - thread_t thread) + thread_t thread) { return &thread->machine.PcbData.VFPdata; } @@ -630,9 +643,9 @@ find_user_vfp( arm_debug_state_t * find_debug_state( - thread_t thread) + thread_t thread) { - return thread->machine.DebugData; + return thread->machine.DebugData; } /* @@ -641,44 +654,46 @@ find_debug_state( */ kern_return_t thread_userstack( - __unused thread_t thread, - int flavor, - thread_state_t tstate, - unsigned int count, - mach_vm_offset_t * user_stack, - int *customstack, - __unused boolean_t is64bit -) + __unused thread_t thread, + int flavor, + thread_state_t tstate, + unsigned int count, + mach_vm_offset_t * user_stack, + int *customstack, + __unused boolean_t is64bit + ) { - switch (flavor) { case ARM_THREAD_STATE: - { - struct arm_thread_state *state; + { + struct arm_thread_state *state; - if (count < ARM_THREAD_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); + if (count < ARM_THREAD_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (customstack) - *customstack = 0; - state = (struct arm_thread_state *) tstate; + if (customstack) { + *customstack = 0; + } + state = (struct arm_thread_state *) tstate; - if (state->sp) { - *user_stack = CAST_USER_ADDR_T(state->sp); - if (customstack) - *customstack = 1; - } else { - *user_stack = CAST_USER_ADDR_T(USRSTACK); + if (state->sp) { + *user_stack = CAST_USER_ADDR_T(state->sp); + if (customstack) { + *customstack = 1; } + } else { + *user_stack = CAST_USER_ADDR_T(USRSTACK); } - break; + } + break; default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -694,7 +709,7 @@ thread_userstackdefault( { *default_user_stack = USRSTACK; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -706,8 +721,8 @@ thread_setuserstack(thread_t thread, mach_vm_address_t user_stack) { struct arm_saved_state *sv; -#define thread_setuserstack_kprintf(x...) /* kprintf("thread_setuserstac - * k: " x) */ +#define thread_setuserstack_kprintf(x...) /* kprintf("thread_setuserstac + * k: " x) */ sv = get_user_regs(thread); @@ -722,7 +737,7 @@ thread_setuserstack(thread_t thread, mach_vm_address_t user_stack) * Routine: thread_adjuserstack * */ -uint64_t +user_addr_t thread_adjuserstack(thread_t thread, int adjust) { struct arm_saved_state *sv; @@ -743,8 +758,8 @@ thread_setentrypoint(thread_t thread, mach_vm_offset_t entry) { struct arm_saved_state *sv; -#define thread_setentrypoint_kprintf(x...) /* kprintf("thread_setentrypoi - * nt: " x) */ +#define thread_setentrypoint_kprintf(x...) /* kprintf("thread_setentrypoi + * nt: " x) */ sv = get_user_regs(thread); @@ -761,40 +776,40 @@ thread_setentrypoint(thread_t thread, mach_vm_offset_t entry) */ kern_return_t thread_entrypoint( - __unused thread_t thread, - int flavor, - thread_state_t tstate, - __unused unsigned int count, - mach_vm_offset_t * entry_point -) + __unused thread_t thread, + int flavor, + thread_state_t tstate, + __unused unsigned int count, + mach_vm_offset_t * entry_point + ) { switch (flavor) { case ARM_THREAD_STATE: - { - struct arm_thread_state *state; + { + struct arm_thread_state *state; - if (count != ARM_THREAD_STATE_COUNT) { - return KERN_INVALID_ARGUMENT; - } + if (count != ARM_THREAD_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } - state = (struct arm_thread_state *) tstate; + state = (struct arm_thread_state *) tstate; - /* - * If a valid entry point is specified, use it. - */ - if (state->pc) { - *entry_point = CAST_USER_ADDR_T(state->pc); - } else { - *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS); - } + /* + * If a valid entry point is specified, use it. + */ + if (state->pc) { + *entry_point = CAST_USER_ADDR_T(state->pc); + } else { + *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS); } - break; + } + break; default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -804,8 +819,8 @@ thread_entrypoint( */ void thread_set_child( - thread_t child, - int pid) + thread_t child, + int pid) { struct arm_saved_state *child_state; @@ -822,8 +837,8 @@ thread_set_child( */ void thread_set_parent( - thread_t parent, - int pid) + thread_t parent, + int pid) { struct arm_saved_state *parent_state; @@ -854,30 +869,31 @@ act_thread_csave(void) ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context)); - if (ic == (struct arm_act_context *) NULL) - return ((void *) 0); + if (ic == (struct arm_act_context *) NULL) { + return (void *) 0; + } val = ARM_THREAD_STATE_COUNT; kret = machine_thread_get_state(current_thread(), - ARM_THREAD_STATE, - (thread_state_t) & ic->ss, - &val); + ARM_THREAD_STATE, + (thread_state_t) &ic->ss, + &val); if (kret != KERN_SUCCESS) { kfree(ic, sizeof(struct arm_act_context)); - return ((void *) 0); + return (void *) 0; } #if __ARM_VFP__ val = ARM_VFP_STATE_COUNT; kret = machine_thread_get_state(current_thread(), - ARM_VFP_STATE, - (thread_state_t) & ic->vfps, - &val); + ARM_VFP_STATE, + (thread_state_t) &ic->vfps, + &val); if (kret != KERN_SUCCESS) { kfree(ic, sizeof(struct arm_act_context)); - return ((void *) 0); + return (void *) 0; } #endif - return (ic); + return ic; } /* @@ -892,23 +908,26 @@ act_thread_catt(void *ctx) ic = (struct arm_act_context *) ctx; - if (ic == (struct arm_act_context *) NULL) + if (ic == (struct arm_act_context *) NULL) { return; + } kret = machine_thread_set_state(current_thread(), - ARM_THREAD_STATE, - (thread_state_t) & ic->ss, - ARM_THREAD_STATE_COUNT); - if (kret != KERN_SUCCESS) + ARM_THREAD_STATE, + (thread_state_t) &ic->ss, + ARM_THREAD_STATE_COUNT); + if (kret != KERN_SUCCESS) { goto out; + } #if __ARM_VFP__ kret = machine_thread_set_state(current_thread(), - ARM_VFP_STATE, - (thread_state_t) & ic->vfps, - ARM_VFP_STATE_COUNT); - if (kret != KERN_SUCCESS) + ARM_VFP_STATE, + (thread_state_t) &ic->vfps, + ARM_VFP_STATE_COUNT); + if (kret != KERN_SUCCESS) { goto out; + } #endif out: kfree(ic, sizeof(struct arm_act_context)); @@ -918,7 +937,7 @@ out: * Routine: act_thread_catt * */ -void +void act_thread_cfree(void *ctx) { kfree(ctx, sizeof(struct arm_act_context)); @@ -930,7 +949,7 @@ thread_set_wq_state32(thread_t thread, thread_state_t tstate) arm_thread_state_t *state; struct arm_saved_state *saved_state; thread_t curth = current_thread(); - spl_t s=0; + spl_t s = 0; saved_state = &thread->machine.PcbData; state = (arm_thread_state_t *)tstate; diff --git a/osfmk/arm/status_shared.c b/osfmk/arm/status_shared.c index b0a389990..5af3ff8e6 100644 --- a/osfmk/arm/status_shared.c +++ b/osfmk/arm/status_shared.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include diff --git a/osfmk/arm/task.h b/osfmk/arm/task.h index 7bdcec8af..2c0ed7ebd 100644 --- a/osfmk/arm/task.h +++ b/osfmk/arm/task.h @@ -56,15 +56,29 @@ */ /* - * Machine dependant task fields + * Machine dependent task fields */ +#ifdef MACH_KERNEL_PRIVATE +/* Provide access to target-specific defintions which may be used by + * consuming code, e.g. HYPERVISOR. */ +#include +#endif + + #if defined(HAS_APPLE_PAC) -#define MACHINE_TASK \ - void* task_debug; \ +#define TASK_ADDITIONS_PAC \ uint64_t rop_pid; \ - boolean_t disable_user_jop; + uint64_t jop_pid; \ + uint8_t disable_user_jop; #else -#define MACHINE_TASK \ - void* task_debug; +#define TASK_ADDITIONS_PAC #endif + + + + +#define MACHINE_TASK \ + void* task_debug; \ + TASK_ADDITIONS_PAC \ + diff --git a/osfmk/arm/thread.h b/osfmk/arm/thread.h index 3782d0f26..2386bd58f 100644 --- a/osfmk/arm/thread.h +++ b/osfmk/arm/thread.h @@ -67,6 +67,7 @@ #ifdef MACH_KERNEL_PRIVATE #include #include +#include #endif struct perfcontrol_state { @@ -80,7 +81,7 @@ extern unsigned int _MachineStateCount[]; #ifdef MACH_KERNEL_PRIVATE #if __arm64__ -typedef arm_context_t machine_thread_kernel_state; +typedef arm_kernel_context_t machine_thread_kernel_state; #else typedef struct arm_saved_state machine_thread_kernel_state; #endif @@ -88,7 +89,6 @@ typedef struct arm_saved_state machine_thread_kernel_state; struct machine_thread { #if __ARM_USER_PROTECT__ - unsigned int uptw_ttc; unsigned int uptw_ttb; unsigned int kptw_ttb; unsigned int asid; @@ -96,8 +96,10 @@ struct machine_thread { #if __arm64__ arm_context_t * contextData; /* allocated user context */ - arm_saved_state_t * upcb; /* pointer to user GPR state */ + arm_saved_state_t * XNU_PTRAUTH_SIGNED_PTR("machine_thread.upcb") upcb; /* pointer to user GPR state */ arm_neon_saved_state_t * uNeon; /* pointer to user VFP state */ + arm_saved_state_t * kpcb; /* pointer to kernel GPR state */ + uint64_t recover_far; #elif __arm__ struct arm_saved_state PcbData; #else @@ -114,27 +116,39 @@ struct machine_thread { vm_address_t cthread_self; /* for use of cthread package */ #endif - vm_offset_t kstackptr; /* top of kernel stack */ -#if defined(HAS_APPLE_PAC) - uint64_t rop_pid; - boolean_t disable_user_jop; -#endif - struct cpu_data * CpuDatap; /* current per cpu data */ - unsigned int preemption_count; /* preemption count */ - -#if __ARM_SMP__ -#define MACHINE_THREAD_FLAGS_ON_CPU (0x1) - - uint8_t machine_thread_flags; -#endif /* __ARM_SMP__ */ +#if __arm64__ + uint32_t recover_esr; +#endif /* __arm64__ */ + vm_offset_t kstackptr; /* top of kernel stack */ struct perfcontrol_state perfctrl_state; #if __arm64__ uint64_t energy_estimate_nj; #endif #if INTERRUPT_MASKED_DEBUG - uint64_t intmask_timestamp; /* timestamp of when interrupts were masked */ + uint64_t intmask_timestamp; /* timestamp of when interrupts were manually masked */ + uint64_t inthandler_timestamp; /* timestamp of when interrupt handler started */ + unsigned int int_type; /* interrupt type of the interrupt that was processed */ + uintptr_t int_handler_addr; /* slid, ptrauth-stripped virtual address of the interrupt handler */ + uintptr_t int_vector; /* IOInterruptVector */ +#endif + +#if __arm64__ && defined(CONFIG_XNUPOST) + volatile expected_fault_handler_t expected_fault_handler; + volatile uintptr_t expected_fault_addr; +#endif + + vm_offset_t pcpu_data_base; + struct cpu_data * CpuDatap; /* current per cpu data */ + unsigned int preemption_count; /* preemption count */ +#if __arm64__ + uint16_t exception_trace_code; +#endif +#if defined(HAS_APPLE_PAC) + uint8_t disable_user_jop; + uint64_t rop_pid; + uint64_t jop_pid; #endif }; #endif @@ -147,7 +161,9 @@ extern struct arm_vfpsaved_state * find_user_vfp(thread_t); extern arm_debug_state_t * find_debug_state(thread_t); #elif defined(__arm64__) extern arm_debug_state32_t * find_debug_state32(thread_t); +extern arm_debug_state32_t * find_or_allocate_debug_state32(thread_t); extern arm_debug_state64_t * find_debug_state64(thread_t); +extern arm_debug_state64_t * find_or_allocate_debug_state64(thread_t); extern arm_neon_saved_state_t * get_user_neon_regs(thread_t); #else #error unknown arch diff --git a/osfmk/arm/trap.c b/osfmk/arm/trap.c index 608593c86..b1e005c39 100644 --- a/osfmk/arm/trap.c +++ b/osfmk/arm/trap.c @@ -297,8 +297,8 @@ sleh_abort(struct arm_saved_state * regs, int type) if ((regs->fsr) & FSR_EXT) { cpu_data_t *cdp = getCpuDatap(); - if (cdp->platform_error_handler != (platform_error_handler_t) NULL) { - (*(platform_error_handler_t)cdp->platform_error_handler)(cdp->cpu_id, 0); + if (cdp->platform_error_handler != NULL) { + cdp->platform_error_handler(cdp->cpu_id, 0); /* If a platform error handler is registered, expect it to panic, not fall through */ panic("Unexpected return from platform_error_handler"); } @@ -625,14 +625,10 @@ sleh_alignment(struct arm_saved_state * regs) unsigned short ins16 = 0; /* Get aborted instruction */ -#if __ARM_SMP__ || __ARM_USER_PROTECT__ if (COPYIN((user_addr_t)(regs->pc), (char *)&ins16, (vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) { /* Failed to fetch instruction, return success to re-drive the exception */ return KERN_SUCCESS; } -#else - ins16 = *(unsigned short *) (regs->pc); -#endif /* * Map multi-word Thumb loads and stores to their ARM @@ -668,14 +664,10 @@ sleh_alignment(struct arm_saved_state * regs) } } else { /* Get aborted instruction */ -#if __ARM_SMP__ || __ARM_USER_PROTECT__ if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) { /* Failed to fetch instruction, return success to re-drive the exception */ return KERN_SUCCESS; } -#else - ins = *(unsigned int *) (regs->pc); -#endif } /* Don't try to emulate unconditional instructions */ @@ -855,7 +847,7 @@ void interrupt_stats(void); void interrupt_stats(void) { - SCHED_STATS_INTERRUPT(current_processor()); + SCHED_STATS_INC(interrupt_count); } __dead2 diff --git a/osfmk/arm/trap.h b/osfmk/arm/trap.h index fa179c8b5..757c603b5 100644 --- a/osfmk/arm/trap.h +++ b/osfmk/arm/trap.h @@ -84,6 +84,8 @@ #define T_PF_WRITE 0x2 /* write access */ #define T_PF_USER 0x4 /* from user state */ +#if defined(MACH_KERNEL_PRIVATE) + #if !defined(ASSEMBLER) && defined(MACH_KERNEL) #include @@ -267,9 +269,11 @@ extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, b typedef kern_return_t (*perfCallback)( int trapno, struct arm_saved_state *ss, - uintptr_t *, + int, int); #endif /* !ASSEMBLER && MACH_KERNEL */ +#endif /* MACH_KERNEL_PRIVATE */ + #endif /* _ARM_TRAP_H_ */ diff --git a/osfmk/arm64/Makefile b/osfmk/arm64/Makefile index ec8b11901..aab4908ff 100644 --- a/osfmk/arm64/Makefile +++ b/osfmk/arm64/Makefile @@ -6,6 +6,46 @@ export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir include $(MakeInc_cmd) include $(MakeInc_def) +ifeq ($(PLATFORM),MacOSX) + + +ARM_HEADER_FILES = \ + machine_cpuid.h \ + machine_machdep.h \ + +# Headers installed into Kernel.framework/Headers (public and internal SDKs). +INSTALL_KF_MD_LIST = $(ARM_HEADER_FILES) + +# Headers installed into Kernel.framework/PrivateHeaders (internal SDK only). +INSTALL_KF_MD_LCL_LIST = \ + lowglobals.h \ + machine_kpc.h \ + machine_remote_time.h \ + monotonic.h \ + pgtrace.h \ + proc_reg.h \ + tlb.h \ + $(ARM_HEADER_FILES) + +# TODO: Is there a reason that machine_machdep.h is not in this list? If not, these lists can be consolidated. +# Headers used to compile xnu +EXPORT_MD_LIST = \ + lowglobals.h \ + machine_cpuid.h \ + machine_kpc.h \ + machine_remote_time.h \ + monotonic.h \ + pgtrace.h \ + proc_reg.h \ + asm.h \ + tlb.h \ + pal_hibernate.h + +# These headers will be available with #include +EXPORT_MD_DIR = arm64 + +else # $(PLATFORM),MacOSX + ARM_HEADER_FILES = \ lowglobals.h \ @@ -24,9 +64,11 @@ INSTALL_KF_MD_LIST = $(ARM_HEADER_FILES) INSTALL_KF_MD_LCL_LIST = machine_kpc.h machine_remote_time.h monotonic.h pgtrace.h $(ARM_HEADER_FILES) -EXPORT_MD_LIST = machine_cpuid.h machine_kpc.h machine_remote_time.h monotonic.h proc_reg.h pgtrace.h asm.h tlb.h +EXPORT_MD_LIST = machine_cpuid.h machine_kpc.h machine_remote_time.h monotonic.h proc_reg.h pgtrace.h asm.h tlb.h pal_hibernate.h EXPORT_MD_DIR = arm64 +endif # $(PLATFORM),MacOSX + include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/osfmk/arm64/amcc_rorgn.c b/osfmk/arm64/amcc_rorgn.c new file mode 100644 index 000000000..35128d53e --- /dev/null +++ b/osfmk/arm64/amcc_rorgn.c @@ -0,0 +1,706 @@ +/* + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include + +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if HIBERNATION +#include +#endif /* HIBERNATION */ + +#if HAS_IOA +#define MAX_LOCK_GROUPS 2 // 2 lock groups (AMCC, IOA) +#define IOA_LOCK_GROUP 1 // IOA lock group index +#else +#define MAX_LOCK_GROUPS 1 // 1 lock group (AMCC) +#endif +#define AMCC_LOCK_GROUP 0 // AMCC lock group index +#define MAX_APERTURES 16 // Maximum number of register apertures +#define MAX_PLANES 16 // Maximum number of planes within each aperture + +#define LOCK_GROUP_HAS_CACHE_STATUS_REG (1 << 0) // Look for cache status register in the lock group +#define LOCK_GROUP_HAS_MASTER_LOCK_REG (1 << 1) // Look for master lock register in the lock group + +#define LOCK_TYPE_HAS_LOCK_REG (1 << 0) // Look for lock register in the lock type + +extern vm_offset_t segLOWESTRO; +extern vm_offset_t segHIGHESTRO; + +extern vm_offset_t segLASTB; +extern vm_offset_t segTEXTEXECB; +extern unsigned long segSizeLAST; +extern unsigned long segSizeLASTDATACONST; +extern unsigned long segSizeTEXTEXEC; + +typedef struct lock_reg { + uint32_t reg_offset; // Register offset + uint32_t reg_mask; // Register mask + uint32_t reg_value; // Regsiter value +} lock_reg_t; + +typedef struct lock_type { + uint32_t page_size_shift; // page shift used in lower/upper limit registers + lock_reg_t lower_limit_reg; // Lower limit register description + lock_reg_t upper_limit_reg; // Upper limit register description + lock_reg_t enable_reg; // Enable register description + lock_reg_t write_disable_reg; // Write disable register description + lock_reg_t lock_reg; // Lock register description +} lock_type_t; + +typedef struct lock_group { + uint32_t aperture_count; // Aperture count + uint32_t aperture_size; // Aperture size + uint32_t plane_count; // Number of planes in the aperture + uint32_t plane_stride; // Stride between planes in the aperture + uint64_t aperture_phys_addr[MAX_APERTURES]; // Apreture physical addresses + lock_reg_t cache_status_reg; // Cache status register description +#if HAS_IOA + lock_reg_t master_lock_reg; // Master lock register description +#endif + lock_type_t ctrr_a; // CTRR-A (KTRR) lock +} lock_group_t; + +SECURITY_READ_ONLY_LATE(lock_group_t) _lock_group[MAX_LOCK_GROUPS] = { {0} }; +SECURITY_READ_ONLY_LATE(bool) lock_regs_set = false; + +static vm_offset_t rorgn_begin = 0; +static vm_offset_t rorgn_end = 0; +SECURITY_READ_ONLY_LATE(vm_offset_t) ctrr_begin = 0; +SECURITY_READ_ONLY_LATE(vm_offset_t) ctrr_end = 0; + +static uint64_t lock_group_va[MAX_LOCK_GROUPS][MAX_APERTURES]; + +#if CONFIG_CSR_FROM_DT +SECURITY_READ_ONLY_LATE(bool) csr_unsafe_kernel_text = false; +#endif + +#if defined(KERNEL_INTEGRITY_KTRR) +#define CTRR_LOCK_MSR ARM64_REG_KTRR_LOCK_EL1 +#elif defined(KERNEL_INTEGRITY_CTRR) +#define CTRR_LOCK_MSR ARM64_REG_CTRR_LOCK_EL1 +#endif + +/* + * lock_group_t - describes all the parameters xnu needs to know to + * lock down the AMCC/IOA (Lock Group) Read Only Region(s) on cold start. + * This description assumes that each AMCC/IOA in a given system will + * be identical, respectively. The only variable are the number of + * apertures present and the physical base address of each aperture. + * + * General xnu lock group lockdown flow: + * - for each lock group: + * - ml_io_map all present lock group physical base addresses + * - assert all lock group begin/end page numbers set by iboot are identical + * - convert lock group begin/end page number to physical address + * - assert lock group begin/end page numbers match xnu view of read only region + * - assert lock group is not currently locked + * - ensure lock group master cache is disabled + * - write enable/lock registers to enable/lock the lock group read only region + */ + +static bool +_dt_get_uint32(DTEntry node, char const *name, uint32_t *dest) +{ + uint32_t const *value; + unsigned int size; + + if (SecureDTGetProperty(node, name, (void const **)&value, &size) != kSuccess) { + return false; + } + + if (size != sizeof(uint32_t)) { + panic("lock-regs: unexpected size %u", size); + } + + *dest = *value; + + return true; +} + +static uint32_t +_dt_get_uint32_required(DTEntry node, char const *name) +{ + uint32_t value; + + if (!_dt_get_uint32(node, name, &value)) { + panic("lock-regs: cannot find required property '%s'", name); + } + + return value; +} + +static bool +_dt_get_lock_reg(DTEntry node, lock_reg_t *reg, const char *parent_name, const char *reg_name, bool required, bool with_value) +{ + char prop_name[32]; + bool found; + + snprintf(prop_name, sizeof(prop_name), "%s-reg-offset", reg_name); + found = _dt_get_uint32(node, prop_name, ®->reg_offset); + if (!found) { + if (required) { + panic("%s: missing property '%s'", parent_name, prop_name); + } else { + return false; + } + } + + snprintf(prop_name, sizeof(prop_name), "%s-reg-mask", reg_name); + found = _dt_get_uint32(node, prop_name, ®->reg_mask); + if (!found) { + panic("%s: missing property '%s'", parent_name, prop_name); + } + + if (with_value) { + snprintf(prop_name, sizeof(prop_name), "%s-reg-value", reg_name); + found = _dt_get_uint32(node, prop_name, ®->reg_value); + if (!found) { + panic("%s: missing property '%s'", parent_name, prop_name); + } + } + + return true; +} + +static DTEntry +_dt_get_lock_group(DTEntry lock_regs_node, lock_group_t* lock_group, const char *group_name, uint32_t options) +{ + DTEntry group_node; + + // Find the lock group node. + if (SecureDTLookupEntry(lock_regs_node, group_name, &group_node) != kSuccess) { + panic("lock-regs: /chosen/lock-regs/%s not found", group_name); + } + + lock_group->aperture_count = _dt_get_uint32_required(group_node, "aperture-count"); + + if (lock_group->aperture_count > MAX_APERTURES) { + panic("%s: %s %u exceeds maximum %u", group_name, "aperture-count", lock_group->aperture_count, MAX_APERTURES); + } + + lock_group->aperture_size = _dt_get_uint32_required(group_node, "aperture-size"); + + if ((lock_group->aperture_count > 0) && (lock_group->aperture_size == 0)) { + panic("%s: have %u apertures, but 0 size", group_name, lock_group->aperture_count); + } + + lock_group->plane_count = _dt_get_uint32_required(group_node, "plane-count"); + + if (lock_group->plane_count > MAX_PLANES) { + panic("%s: %s %u exceeds maximum %u", group_name, "plane-count", lock_group->plane_count, MAX_PLANES); + } + + if (!_dt_get_uint32(group_node, "plane-stride", &lock_group->plane_stride)) { + lock_group->plane_stride = 0; + } + + if (lock_group->plane_count > 1) { + uint32_t aperture_size; + + if (lock_group->plane_stride == 0) { + panic("%s: plane-count (%u) > 1, but stride is 0/missing", group_name, lock_group->plane_count); + } + + if (os_mul_overflow(lock_group->plane_count, lock_group->plane_stride, &aperture_size) + || (aperture_size > lock_group->aperture_size)) { + panic("%s: aperture-size (%#x) is insufficent to cover plane-count (%#x) of plane-stride (%#x) bytes", group_name, lock_group->aperture_size, lock_group->plane_count, lock_group->plane_stride); + } + } + + uint64_t const *phys_bases = NULL; + unsigned int prop_size; + if (SecureDTGetProperty(group_node, "aperture-phys-addr", (const void**)&phys_bases, &prop_size) != kSuccess) { + panic("%s: missing required %s", group_name, "aperture-phys-addr"); + } + + if (prop_size != lock_group->aperture_count * sizeof(lock_group->aperture_phys_addr[0])) { + panic("%s: aperture-phys-addr size (%#x) != (aperture-count (%#x) * PA size (%#zx) = %#lx)", + group_name, prop_size, lock_group->aperture_count, sizeof(lock_group->aperture_phys_addr[0]), + lock_group->aperture_count * sizeof(lock_group->aperture_phys_addr[0])); + } + + memcpy(lock_group->aperture_phys_addr, phys_bases, prop_size); + + if (options & LOCK_GROUP_HAS_CACHE_STATUS_REG) { + _dt_get_lock_reg(group_node, &lock_group->cache_status_reg, group_name, "cache-status", true, true); + } + +#if HAS_IOA + if (options & LOCK_GROUP_HAS_MASTER_LOCK_REG) { + _dt_get_lock_reg(group_node, &lock_group->master_lock_reg, group_name, "master-lock", true, true); + } +#endif + + return group_node; +} + +static void +_dt_get_lock_type(DTEntry group_node, lock_type_t *lock_type, const char *group_name, const char *type_name, uint32_t options) +{ + DTEntry type_node; + bool has_lock = options & LOCK_TYPE_HAS_LOCK_REG; + + // Find the lock type type_node. + if (SecureDTLookupEntry(group_node, type_name, &type_node) != kSuccess) { + panic("lock-regs: /chosen/lock-regs/%s/%s not found", group_name, type_name); + } + + lock_type->page_size_shift = _dt_get_uint32_required(type_node, "page-size-shift"); + + // Find all of the regsiters for this lock type. + // Parent Register Descriptor Parent Name Reg Name Required Value + _dt_get_lock_reg(type_node, &lock_type->lower_limit_reg, type_name, "lower-limit", true, false); + _dt_get_lock_reg(type_node, &lock_type->upper_limit_reg, type_name, "upper-limit", true, false); + _dt_get_lock_reg(type_node, &lock_type->lock_reg, type_name, "lock", has_lock, true); + _dt_get_lock_reg(type_node, &lock_type->enable_reg, type_name, "enable", false, true); + _dt_get_lock_reg(type_node, &lock_type->write_disable_reg, type_name, "write-disable", false, true); +} + +/* + * find_lock_group_data: + * + * finds and gathers lock group (AMCC/IOA) data from device tree, returns it as lock_group_t + * + * called first time before IOKit start while still uniprocessor + * + */ +static lock_group_t const * _Nonnull +find_lock_group_data(void) +{ + DTEntry lock_regs_node = NULL; + DTEntry amcc_node = NULL; + + // Return the lock group data pointer if we already found and populated one. + if (lock_regs_set) { + return _lock_group; + } + + if (SecureDTLookupEntry(NULL, "/chosen/lock-regs", &lock_regs_node) != kSuccess) { + panic("lock-regs: /chosen/lock-regs not found (your iBoot or EDT may be too old)"); + } + + amcc_node = _dt_get_lock_group(lock_regs_node, &_lock_group[AMCC_LOCK_GROUP], "amcc", LOCK_GROUP_HAS_CACHE_STATUS_REG); + _dt_get_lock_type(amcc_node, &_lock_group[AMCC_LOCK_GROUP].ctrr_a, "amcc", "amcc-ctrr-a", LOCK_TYPE_HAS_LOCK_REG); + +#if HAS_IOA + DTEntry ioa_node = _dt_get_lock_group(lock_regs_node, &_lock_group[IOA_LOCK_GROUP], "ioa", LOCK_GROUP_HAS_MASTER_LOCK_REG); + _dt_get_lock_type(ioa_node, &_lock_group[IOA_LOCK_GROUP].ctrr_a, "ioa", "ioa-ctrr-a", 0); +#endif + + lock_regs_set = true; + + return _lock_group; +} + +void +rorgn_stash_range(void) +{ +#if DEVELOPMENT || DEBUG || CONFIG_DTRACE || CONFIG_CSR_FROM_DT + boolean_t rorgn_disable = FALSE; + +#if DEVELOPMENT || DEBUG + PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable, sizeof(rorgn_disable)); +#endif + +#if CONFIG_CSR_FROM_DT + if (csr_unsafe_kernel_text) { + rorgn_disable = true; + } +#endif + + if (rorgn_disable) { + /* take early out if boot arg present, don't query any machine registers to avoid + * dependency on amcc DT entry + */ + return; + } +#endif + lock_group_t const * const lock_group = find_lock_group_data(); + + /* Get the lock group read-only region range values, and stash them into rorgn_begin, rorgn_end. */ + uint64_t rorgn_begin_page[MAX_LOCK_GROUPS][MAX_APERTURES][MAX_PLANES]; + uint64_t rorgn_end_page[MAX_LOCK_GROUPS][MAX_APERTURES][MAX_PLANES]; + + for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) { + for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { + const uint64_t amcc_pa = lock_group[lg].aperture_phys_addr[aperture]; + + // VA space will be unmapped and freed after lockdown complete in rorgn_lockdown() + lock_group_va[lg][aperture] = ml_io_map(amcc_pa, lock_group[lg].aperture_size); + + if (lock_group_va[lg][aperture] == 0) { + panic("map aperture_phys_addr[%u]/%#x failed", aperture, lock_group[lg].aperture_size); + } + + for (unsigned int plane = 0; plane < lock_group[lg].plane_count; plane++) { + uint64_t reg_addr; + + reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.lower_limit_reg.reg_offset; + rorgn_begin_page[lg][aperture][plane] = *(volatile uint32_t *)reg_addr; + reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.upper_limit_reg.reg_offset; + rorgn_end_page[lg][aperture][plane] = *(volatile uint32_t *)reg_addr; + } + } + + assert(rorgn_end_page[lg][0][0] > rorgn_begin_page[lg][0][0]); + + for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { + for (unsigned int plane = 0; plane < lock_group[lg].plane_count; plane++) { + if ((rorgn_begin_page[lg][aperture][plane] != rorgn_begin_page[0][0][0]) + || (rorgn_end_page[lg][aperture][plane] != rorgn_end_page[0][0][0])) { + panic("Inconsistent memory config"); + } + } + } + + uint64_t page_bytes = 1ULL << lock_group[lg].ctrr_a.page_size_shift; + + /* rorgn_begin and rorgn_end are first and last byte inclusive of lock group read only region as determined by iBoot. */ + rorgn_begin = (rorgn_begin_page[0][0][0] << lock_group[lg].ctrr_a.page_size_shift) + gDramBase; + rorgn_end = (rorgn_end_page[0][0][0] << lock_group[lg].ctrr_a.page_size_shift) + gDramBase + page_bytes - 1; + } + + assert(segLOWESTRO && gVirtBase && gPhysBase); + + /* ctrr_begin and end are first and last bytes inclusive of MMU KTRR/CTRR region */ + ctrr_begin = kvtophys(segLOWESTRO); + +#if defined(KERNEL_INTEGRITY_KTRR) + + /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC read only region) + * + * +------------------+-----------+-----------------------------------+ + * | Largest Address | LAST | <- AMCC RO Region End (rorgn_end) | + * +------------------+-----------+-----------------------------------+ + * | | TEXT_EXEC | <- KTRR RO Region End (ctrr_end) | + * +------------------+-----------+-----------------------------------+ + * | | ... | | + * +------------------+-----------+-----------------------------------+ + * | Smallest Address | LOWEST | <- KTRR/AMCC RO Region Begin | + * | | | (ctrr_begin/rorgn_begin) | + * +------------------+-----------+-----------------------------------+ + * + */ + + ctrr_end = kvtophys(segLASTB) - segSizeLASTDATACONST - 1; + + /* assert not booted from kernel collection */ + assert(!segHIGHESTRO); + + /* assert that __LAST segment containing privileged insns is only a single page */ + assert(segSizeLAST == PAGE_SIZE); + + /* assert that segLAST is contiguous and just after/above/numerically higher than KTRR end */ + assert((ctrr_end + 1) == kvtophys(segTEXTEXECB) + segSizeTEXTEXEC); + + /* ensure that iboot and xnu agree on the amcc rorgn range */ + assert((rorgn_begin == ctrr_begin) && (rorgn_end == (ctrr_end + segSizeLASTDATACONST + segSizeLAST))); +#elif defined(KERNEL_INTEGRITY_CTRR) + + /* __LAST is part of MMU CTRR region. Can't use the KTRR style method of making + * __pinst no execute because PXN applies with MMU off in CTRR. + * + * +------------------+-----------+------------------------------+ + * | Largest Address | LAST | <- CTRR/AMCC RO Region End | + * | | | (ctrr_end/rorgn_end) | + * +------------------+-----------+------------------------------+ + * | | TEXT_EXEC | | + * +------------------+-----------+------------------------------+ + * | | ... | | + * +------------------+-----------+------------------------------+ + * | Smallest Address | LOWEST | <- CTRR/AMCC RO Region Begin | + * | | | (ctrr_begin/rorgn_begin) | + * +------------------+-----------+------------------------------+ + * + */ + + if (segHIGHESTRO) { + /* + * kernel collections may have additional kext RO data after kernel LAST + */ + assert(segLASTB + segSizeLAST <= segHIGHESTRO); + ctrr_end = kvtophys(segHIGHESTRO) - 1; + } else { + ctrr_end = kvtophys(segLASTB) + segSizeLAST - 1; + } + + /* ensure that iboot and xnu agree on the amcc rorgn range */ + assert((rorgn_begin == ctrr_begin) && (rorgn_end == ctrr_end)); +#endif +} + +#if DEVELOPMENT || DEBUG +static void +assert_all_lock_groups_unlocked(lock_group_t const *lock_groups) +{ + uint64_t reg_addr; + uint64_t ctrr_lock = 0; + bool locked = false; + bool write_disabled = false;; + + assert(lock_groups); + + for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) { + for (unsigned int aperture = 0; aperture < lock_groups[lg].aperture_count; aperture++) { +#if HAS_IOA + // Does the lock group define a master lock register? + if (lock_groups[lg].master_lock_reg.reg_mask != 0) { + reg_addr = lock_group_va[lg][aperture] + lock_groups[lg].master_lock_reg.reg_offset; + locked |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].master_lock_reg.reg_mask) == lock_groups[lg].master_lock_reg.reg_value); + } +#endif + for (unsigned int plane = 0; plane < lock_groups[lg].plane_count; plane++) { + // Does the lock group define a write disable register? + if (lock_groups[lg].ctrr_a.write_disable_reg.reg_mask != 0) { + reg_addr = lock_group_va[lg][aperture] + (plane * lock_groups[lg].plane_stride) + lock_groups[lg].ctrr_a.write_disable_reg.reg_offset; + write_disabled |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].ctrr_a.write_disable_reg.reg_mask) == lock_groups[lg].ctrr_a.write_disable_reg.reg_value); + } + + // Does the lock group define a lock register? + if (lock_groups[lg].ctrr_a.lock_reg.reg_mask != 0) { + reg_addr = lock_group_va[lg][aperture] + (plane * lock_groups[lg].plane_stride) + lock_groups[lg].ctrr_a.lock_reg.reg_offset; + locked |= ((*(volatile uint32_t *)reg_addr & lock_groups[lg].ctrr_a.lock_reg.reg_mask) == lock_groups[lg].ctrr_a.lock_reg.reg_value); + } + } + } + } + + ctrr_lock = __builtin_arm_rsr64(CTRR_LOCK_MSR); + + assert(!ctrr_lock); + assert(!write_disabled && !locked); +} +#endif + +static void +lock_all_lock_groups(lock_group_t const *lock_group, vm_offset_t begin, vm_offset_t end) +{ + uint64_t reg_addr; + assert(lock_group); + + /* + * [x] - ensure all in flight writes are flushed to the lock group before enabling RO Region Lock + * + * begin and end are first and last byte inclusive of lock group read only region + */ + + CleanPoC_DcacheRegion_Force(begin, end - begin + 1); + + for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) { + for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { + /* lock planes in reverse order: plane 0 should be locked last */ + unsigned int plane = lock_group[lg].plane_count - 1; + do { + // Enable the protection region if the lock group defines an enable register. + if (lock_group[lg].ctrr_a.enable_reg.reg_mask != 0) { + reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.enable_reg.reg_offset; + *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.enable_reg.reg_value; + } + + // Disable writes if the lock group defines a write disable register. + if (lock_group[lg].ctrr_a.write_disable_reg.reg_mask != 0) { + reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.write_disable_reg.reg_offset; + *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.write_disable_reg.reg_value; + } + + // Lock the lock if the lock group defines an enable register. + if (lock_group[lg].ctrr_a.lock_reg.reg_mask != 0) { + reg_addr = lock_group_va[lg][aperture] + (plane * lock_group[lg].plane_stride) + lock_group[lg].ctrr_a.lock_reg.reg_offset; + *(volatile uint32_t *)reg_addr = lock_group[lg].ctrr_a.lock_reg.reg_value; + } + + __builtin_arm_isb(ISB_SY); + } while (plane-- > 0); +#if HAS_IOA + // Lock the master lock if the lock group define a master lock register. + if (lock_group[lg].master_lock_reg.reg_mask != 0) { + reg_addr = lock_group_va[lg][aperture] + lock_group[lg].master_lock_reg.reg_offset; + *(volatile uint32_t *)reg_addr = lock_group[lg].master_lock_reg.reg_value; + } + __builtin_arm_isb(ISB_SY); +#endif + } + } +} + +static void +lock_mmu(uint64_t begin, uint64_t end) +{ +#if defined(KERNEL_INTEGRITY_KTRR) + + __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1, begin); + __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1, end); + __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1, 1ULL); + + /* flush TLB */ + + __builtin_arm_isb(ISB_SY); + flush_mmu_tlb(); + +#elif defined (KERNEL_INTEGRITY_CTRR) + /* this will lock the entire bootstrap cluster. non bootstrap clusters + * will be locked by respective cluster master in start.s */ + + __builtin_arm_wsr64(ARM64_REG_CTRR_A_LWR_EL1, begin); + __builtin_arm_wsr64(ARM64_REG_CTRR_A_UPR_EL1, end); + +#if !defined(APPLEVORTEX) + /* H12+ changed sequence, must invalidate TLB immediately after setting CTRR bounds */ + __builtin_arm_isb(ISB_SY); /* ensure all prior MSRs are complete */ + flush_mmu_tlb(); +#endif /* !defined(APPLEVORTEX) */ + + __builtin_arm_wsr64(ARM64_REG_CTRR_CTL_EL1, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT); + __builtin_arm_wsr64(ARM64_REG_CTRR_LOCK_EL1, 1ULL); + + uint64_t current_el = __builtin_arm_rsr64("CurrentEL"); + if (current_el == PSR64_MODE_EL2) { + // CTRR v2 has explicit registers for cluster config. they can only be written in EL2 + + __builtin_arm_wsr64(ACC_CTRR_A_LWR_EL2, begin); + __builtin_arm_wsr64(ACC_CTRR_A_UPR_EL2, end); + __builtin_arm_wsr64(ACC_CTRR_CTL_EL2, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT); + __builtin_arm_wsr64(ACC_CTRR_LOCK_EL2, 1ULL); + } + + __builtin_arm_isb(ISB_SY); /* ensure all prior MSRs are complete */ +#if defined(APPLEVORTEX) + flush_mmu_tlb(); +#endif /* defined(APPLEVORTEX) */ + +#else /* defined(KERNEL_INTEGRITY_KTRR) */ +#error KERNEL_INTEGRITY config error +#endif /* defined(KERNEL_INTEGRITY_KTRR) */ +} + +#if DEVELOPMENT || DEBUG +static void +assert_amcc_cache_disabled(lock_group_t const *lock_group) +{ + assert(lock_group); + + const lock_reg_t *cache_status_reg = &lock_group[AMCC_LOCK_GROUP].cache_status_reg; + + // If the platform does not define a cache status register, then we're done here. + if (cache_status_reg->reg_mask != 0) { + return; + } + + for (unsigned int aperture = 0; aperture < lock_group[AMCC_LOCK_GROUP].aperture_count; aperture++) { + for (unsigned int plane = 0; plane < lock_group[AMCC_LOCK_GROUP].plane_count; plane++) { + uint64_t reg_addr = lock_group_va[AMCC_LOCK_GROUP][aperture] + (plane * lock_group[AMCC_LOCK_GROUP].plane_stride) + cache_status_reg->reg_offset; + uint32_t reg_value = *(volatile uint32_t *)reg_addr; + assert((reg_value & cache_status_reg->reg_mask) == cache_status_reg->reg_value); + } + } +} +#endif /* DEVELOPMENT || DEBUG */ + +/* + * void rorgn_lockdown(void) + * + * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked + * + * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in + * start.s:start_cpu() for subsequent wake/resume of all cores + */ +void +rorgn_lockdown(void) +{ + boolean_t ctrr_disable = FALSE; + +#if DEVELOPMENT || DEBUG + PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable)); +#endif /* DEVELOPMENT || DEBUG */ + +#if CONFIG_CSR_FROM_DT + if (csr_unsafe_kernel_text) { + ctrr_disable = true; + } +#endif /* CONFIG_CSR_FROM_DT */ + + if (!ctrr_disable) { + lock_group_t const * const lock_group = find_lock_group_data(); + +#if DEVELOPMENT || DEBUG + assert_all_lock_groups_unlocked(lock_group); + + printf("RO Region Begin: %p End: %p\n", (void *)rorgn_begin, (void *)rorgn_end); + printf("CTRR (MMU) Begin: %p End: %p, setting lockdown\n", (void *)ctrr_begin, (void *)ctrr_end); + + assert_amcc_cache_disabled(lock_group); +#endif /* DEVELOPMENT || DEBUG */ + + // Lock the AMCC/IOA PIO lock registers. + lock_all_lock_groups(lock_group, phystokv(rorgn_begin), phystokv(rorgn_end)); + + /* + * KTRR/CTRR registers are inclusive of the smallest page size granule supported by processor MMU + * rather than the actual page size in use. Load the last byte of the end page, and let the HW + * truncate per the smallest page granule supported. Must use same treament in start.s for warm + * start of APs. + */ + lock_mmu(ctrr_begin, ctrr_end); + + // Unmap and free PIO VA space needed to lockdown the lock groups. + for (unsigned int lg = 0; lg < MAX_LOCK_GROUPS; lg++) { + for (unsigned int aperture = 0; aperture < lock_group[lg].aperture_count; aperture++) { + ml_io_unmap(lock_group_va[lg][aperture], lock_group[lg].aperture_size); + } + } + } + +#if defined(KERNEL_INTEGRITY_CTRR) + /* wake any threads blocked on cluster master lockdown */ + cpu_data_t *cdp; + + cdp = getCpuDatap(); + + cdp->cpu_cluster_id = ml_get_cluster_number_local(); + assert(cdp->cpu_cluster_id <= (uint32_t)ml_get_max_cluster_number()); + ctrr_cluster_locked[cdp->cpu_cluster_id] = CTRR_LOCKED; + thread_wakeup(&ctrr_cluster_locked[cdp->cpu_cluster_id]); +#endif +} + +#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ diff --git a/osfmk/arm64/amcc_rorgn.h b/osfmk/arm64/amcc_rorgn.h new file mode 100644 index 000000000..640e6af4e --- /dev/null +++ b/osfmk/arm64/amcc_rorgn.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) +#include + +void rorgn_stash_range(void); +void rorgn_lockdown(void); +extern vm_offset_t ctrr_begin, ctrr_end; +#if CONFIG_CSR_FROM_DT +extern bool csr_unsafe_kernel_text; +#endif /* CONFIG_CSR_FROM_DT */ +#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ diff --git a/osfmk/arm64/arm_vm_init.c b/osfmk/arm64/arm_vm_init.c index acdb849ee..54a45f202 100644 --- a/osfmk/arm64/arm_vm_init.c +++ b/osfmk/arm64/arm_vm_init.c @@ -90,6 +90,13 @@ extern vm_offset_t physmap_vbase; extern vm_offset_t physmap_vtop; #endif +/* + * We explicitly place this in const, as it is not const from a language + * perspective, but it is only modified before we actually switch away from + * the bootstrap page tables. + */ +SECURITY_READ_ONLY_LATE(uint8_t) bootstrap_pagetables[BOOTSTRAP_TABLE_SIZE] __attribute__((aligned(ARM_PGBYTES))); + /* * Denotes the end of xnu. */ @@ -123,6 +130,9 @@ SECURITY_READ_ONLY_LATE(vm_offset_t) vm_elinkedit; SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_builtinkmod_text; SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_builtinkmod_text_end; +SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernelcache_base; +SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernelcache_top; + /* Used by */ SECURITY_READ_ONLY_LATE(unsigned long) gVirtBase; SECURITY_READ_ONLY_LATE(unsigned long) gPhysBase; @@ -189,6 +199,8 @@ uint64_t mem_actual; /* The "One True" physical mem * address + 1 */ uint64_t max_mem; /* Size of physical memory (bytes), adjusted * by maxmem */ +uint64_t max_mem_actual; /* Actual size of physical memory (bytes), + * adjusted by the maxmem boot-arg */ uint64_t sane_size; /* Memory size to use for defaults * calculations */ /* This no longer appears to be used; kill it? */ @@ -196,11 +208,26 @@ addr64_t vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Highest kernel * virtual address known * to the VM system */ -SECURITY_READ_ONLY_LATE(vm_offset_t) segEXTRADATA; -SECURITY_READ_ONLY_LATE(unsigned long) segSizeEXTRADATA; +SECURITY_READ_ONLY_LATE(vm_offset_t) segEXTRADATA; +SECURITY_READ_ONLY_LATE(unsigned long) segSizeEXTRADATA; SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTTEXT; SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWEST; +SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTRO; +SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTRO; + +/* Only set when booted from MH_FILESET kernel collections */ +SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTKC; +SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTKC; +SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTROKC; +SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTROKC; +SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTAuxKC; +SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTAuxKC; +SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTROAuxKC; +SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTROAuxKC; +SECURITY_READ_ONLY_LATE(vm_offset_t) segLOWESTRXAuxKC; +SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTRXAuxKC; +SECURITY_READ_ONLY_LATE(vm_offset_t) segHIGHESTNLEAuxKC; SECURITY_READ_ONLY_LATE(static vm_offset_t) segTEXTB; SECURITY_READ_ONLY_LATE(static unsigned long) segSizeTEXT; @@ -221,8 +248,8 @@ SECURITY_READ_ONLY_LATE(void *) pmap_stacks_end = NULL; SECURITY_READ_ONLY_LATE(static vm_offset_t) segDATACONSTB; SECURITY_READ_ONLY_LATE(static unsigned long) segSizeDATACONST; -SECURITY_READ_ONLY_LATE(static vm_offset_t) segTEXTEXECB; -SECURITY_READ_ONLY_LATE(static unsigned long) segSizeTEXTEXEC; +SECURITY_READ_ONLY_LATE(vm_offset_t) segTEXTEXECB; +SECURITY_READ_ONLY_LATE(unsigned long) segSizeTEXTEXEC; SECURITY_READ_ONLY_LATE(static vm_offset_t) segDATAB; SECURITY_READ_ONLY_LATE(static unsigned long) segSizeDATA; @@ -238,13 +265,22 @@ extern vm_offset_t intstack_low_guard; extern vm_offset_t intstack_high_guard; extern vm_offset_t excepstack_high_guard; -SECURITY_READ_ONLY_LATE(static vm_offset_t) segLINKB; +SECURITY_READ_ONLY_LATE(vm_offset_t) segLINKB; SECURITY_READ_ONLY_LATE(static unsigned long) segSizeLINK; SECURITY_READ_ONLY_LATE(static vm_offset_t) segKLDB; SECURITY_READ_ONLY_LATE(static unsigned long) segSizeKLD; SECURITY_READ_ONLY_LATE(vm_offset_t) segLASTB; SECURITY_READ_ONLY_LATE(unsigned long) segSizeLAST; +SECURITY_READ_ONLY_LATE(vm_offset_t) segLASTDATACONSTB; +SECURITY_READ_ONLY_LATE(unsigned long) segSizeLASTDATACONST; + +SECURITY_READ_ONLY_LATE(vm_offset_t) sectHIBTEXTB; +SECURITY_READ_ONLY_LATE(unsigned long) sectSizeHIBTEXT; +SECURITY_READ_ONLY_LATE(vm_offset_t) segHIBDATAB; +SECURITY_READ_ONLY_LATE(unsigned long) segSizeHIBDATA; +SECURITY_READ_ONLY_LATE(vm_offset_t) sectHIBDATACONSTB; +SECURITY_READ_ONLY_LATE(unsigned long) sectSizeHIBDATACONST; SECURITY_READ_ONLY_LATE(vm_offset_t) segPRELINKTEXTB; SECURITY_READ_ONLY_LATE(unsigned long) segSizePRELINKTEXT; @@ -267,15 +303,25 @@ SECURITY_READ_ONLY_LATE(static unsigned long) segSizePLKLINKEDIT; SECURITY_READ_ONLY_LATE(static vm_offset_t) segPRELINKINFOB; SECURITY_READ_ONLY_LATE(static unsigned long) segSizePRELINKINFO; +/* Only set when booted from MH_FILESET primary kernel collection */ +SECURITY_READ_ONLY_LATE(vm_offset_t) segKCTEXTEXECB; +SECURITY_READ_ONLY_LATE(unsigned long) segSizeKCTEXTEXEC; +SECURITY_READ_ONLY_LATE(static vm_offset_t) segKCDATACONSTB; +SECURITY_READ_ONLY_LATE(static unsigned long) segSizeKCDATACONST; +SECURITY_READ_ONLY_LATE(static vm_offset_t) segKCDATAB; +SECURITY_READ_ONLY_LATE(static unsigned long) segSizeKCDATA; + SECURITY_READ_ONLY_LATE(static boolean_t) use_contiguous_hint = TRUE; -SECURITY_READ_ONLY_LATE(unsigned) PAGE_SHIFT_CONST; +SECURITY_READ_ONLY_LATE(int) PAGE_SHIFT_CONST; SECURITY_READ_ONLY_LATE(vm_offset_t) end_kern; SECURITY_READ_ONLY_LATE(vm_offset_t) etext; SECURITY_READ_ONLY_LATE(vm_offset_t) sdata; SECURITY_READ_ONLY_LATE(vm_offset_t) edata; +SECURITY_READ_ONLY_LATE(static vm_offset_t) auxkc_mh, auxkc_base, auxkc_right_above; + vm_offset_t alloc_ptpage(boolean_t map_static); SECURITY_READ_ONLY_LATE(vm_offset_t) ropage_next; @@ -296,6 +342,8 @@ SECURITY_READ_ONLY_LATE(pmap_paddr_t) avail_start; SECURITY_READ_ONLY_LATE(pmap_paddr_t) avail_end; SECURITY_READ_ONLY_LATE(pmap_paddr_t) real_avail_end; SECURITY_READ_ONLY_LATE(unsigned long) real_phys_size; +SECURITY_READ_ONLY_LATE(vm_map_address_t) physmap_base = (vm_map_address_t)0; +SECURITY_READ_ONLY_LATE(vm_map_address_t) physmap_end = (vm_map_address_t)0; #if __ARM_KERNEL_PROTECT__ extern void ExceptionVectorsBase; @@ -308,20 +356,21 @@ typedef struct { vm_size_t len; } ptov_table_entry; -#define PTOV_TABLE_SIZE 8 -SECURITY_READ_ONLY_LATE(static ptov_table_entry) ptov_table[PTOV_TABLE_SIZE]; -SECURITY_READ_ONLY_LATE(static boolean_t) kva_active = FALSE; +#define PTOV_TABLE_SIZE 8 +SECURITY_READ_ONLY_LATE(static ptov_table_entry) ptov_table[PTOV_TABLE_SIZE]; +SECURITY_READ_ONLY_LATE(static boolean_t) kva_active = FALSE; vm_map_address_t phystokv(pmap_paddr_t pa) { for (size_t i = 0; (i < PTOV_TABLE_SIZE) && (ptov_table[i].len != 0); i++) { - if ((pa >= ptov_table[i].pa) && (pa < (ptov_table[i].pa + ptov_table[i].len))) - return (pa - ptov_table[i].pa + ptov_table[i].va); + if ((pa >= ptov_table[i].pa) && (pa < (ptov_table[i].pa + ptov_table[i].len))) { + return pa - ptov_table[i].pa + ptov_table[i].va; + } } assertf((pa - gPhysBase) < real_phys_size, "%s: illegal PA: 0x%llx", __func__, (uint64_t)pa); - return (pa - gPhysBase + gVirtBase); + return pa - gPhysBase + gVirtBase; } vm_map_address_t @@ -331,27 +380,30 @@ phystokv_range(pmap_paddr_t pa, vm_size_t *max_len) for (size_t i = 0; (i < PTOV_TABLE_SIZE) && (ptov_table[i].len != 0); i++) { if ((pa >= ptov_table[i].pa) && (pa < (ptov_table[i].pa + ptov_table[i].len))) { len = ptov_table[i].len - (pa - ptov_table[i].pa); - if (*max_len > len) + if (*max_len > len) { *max_len = len; - return (pa - ptov_table[i].pa + ptov_table[i].va); + } + return pa - ptov_table[i].pa + ptov_table[i].va; } } len = PAGE_SIZE - (pa & PAGE_MASK); - if (*max_len > len) + if (*max_len > len) { *max_len = len; + } assertf((pa - gPhysBase) < real_phys_size, "%s: illegal PA: 0x%llx", __func__, (uint64_t)pa); - return (pa - gPhysBase + gVirtBase); + return pa - gPhysBase + gVirtBase; } vm_offset_t ml_static_vtop(vm_offset_t va) { for (size_t i = 0; (i < PTOV_TABLE_SIZE) && (ptov_table[i].len != 0); i++) { - if ((va >= ptov_table[i].va) && (va < (ptov_table[i].va + ptov_table[i].len))) - return (va - ptov_table[i].va + ptov_table[i].pa); + if ((va >= ptov_table[i].va) && (va < (ptov_table[i].va + ptov_table[i].len))) { + return va - ptov_table[i].va + ptov_table[i].pa; + } } assertf(((vm_address_t)(va) - gVirtBase) < gPhysSize, "%s: illegal VA: %p", __func__, (void*)va); - return ((vm_address_t)(va) - gVirtBase + gPhysBase); + return (vm_address_t)(va) - gVirtBase + gPhysBase; } /* @@ -362,7 +414,7 @@ static vm_offset_t round_up_pte_hint_address(vm_offset_t address) { vm_offset_t hint_size = ARM_PTE_SIZE << ARM_PTE_HINT_ENTRIES_SHIFT; - return ((address + (hint_size - 1)) & ~(hint_size - 1)); + return (address + (hint_size - 1)) & ~(hint_size - 1); } /* allocate a page for a page table: we support static and dynamic mappings. @@ -375,7 +427,9 @@ round_up_pte_hint_address(vm_offset_t address) * for dynamic mappings, we allocate from avail_start, which should remain RWNX. */ -vm_offset_t alloc_ptpage(boolean_t map_static) { +vm_offset_t +alloc_ptpage(boolean_t map_static) +{ vm_offset_t vaddr; #if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)) @@ -405,7 +459,9 @@ vm_offset_t alloc_ptpage(boolean_t map_static) { void dump_kva_l2(vm_offset_t tt_base, tt_entry_t *tt, int indent, uint64_t *rosz_out, uint64_t *rwsz_out); -void dump_kva_l2(vm_offset_t tt_base, tt_entry_t *tt, int indent, uint64_t *rosz_out, uint64_t *rwsz_out) { +void +dump_kva_l2(vm_offset_t tt_base, tt_entry_t *tt, int indent, uint64_t *rosz_out, uint64_t *rwsz_out) +{ unsigned int i; boolean_t cur_ro, prev_ro = 0; int start_entry = -1; @@ -413,9 +469,9 @@ void dump_kva_l2(vm_offset_t tt_base, tt_entry_t *tt, int indent, uint64_t *rosz pmap_paddr_t robegin = kvtophys((vm_offset_t)&ropagetable_begin); pmap_paddr_t roend = kvtophys((vm_offset_t)&ropagetable_end); boolean_t tt_static = kvtophys((vm_offset_t)tt) >= robegin && - kvtophys((vm_offset_t)tt) < roend; + kvtophys((vm_offset_t)tt) < roend; - for(i=0; i> 32),(uint32_t)start, - (uint32_t)(end >> 32),(uint32_t)end, - prev_ro ? "Static " : "Dynamic", - (sz >> 20)); + indent * 4, "", + (uint32_t)(start >> 32), (uint32_t)start, + (uint32_t)(end >> 32), (uint32_t)end, + prev_ro ? "Static " : "Dynamic", + (sz >> 20)); if (prev_ro) { *rosz_out += sz; @@ -459,25 +515,28 @@ void dump_kva_l2(vm_offset_t tt_base, tt_entry_t *tt, int indent, uint64_t *rosz } } -void dump_kva_space() { - uint64_t tot_rosz=0, tot_rwsz=0; +void +dump_kva_space() +{ + uint64_t tot_rosz = 0, tot_rwsz = 0; int ro_ptpages, rw_ptpages; pmap_paddr_t robegin = kvtophys((vm_offset_t)&ropagetable_begin); pmap_paddr_t roend = kvtophys((vm_offset_t)&ropagetable_end); boolean_t root_static = kvtophys((vm_offset_t)cpu_tte) >= robegin && - kvtophys((vm_offset_t)cpu_tte) < roend; + kvtophys((vm_offset_t)cpu_tte) < roend; uint64_t kva_base = ~((1ULL << (64 - T1SZ_BOOT)) - 1); printf("Root page table: %s\n", root_static ? "Static" : "Dynamic"); - for(unsigned int i=0; i= robegin && cur < roend; printf("0x%08x_%08x-0x%08x_%08x %s\n", - (uint32_t)(start >> 32),(uint32_t)start, - (uint32_t)(end >> 32),(uint32_t)end, - cur_ro ? "Static " : "Dynamic"); + (uint32_t)(start >> 32), (uint32_t)start, + (uint32_t)(end >> 32), (uint32_t)end, + cur_ro ? "Static " : "Dynamic"); dump_kva_l2(start, (tt_entry_t*)phystokv(cur), 1, &rosz, &rwsz); tot_rosz += rosz; @@ -496,9 +555,9 @@ void dump_kva_space() { } printf("L2 Address space mapped: Static %lluMB Dynamic %lluMB Total %lluMB\n", - tot_rosz >> 20, - tot_rwsz >> 20, - (tot_rosz >> 20) + (tot_rwsz >> 20)); + tot_rosz >> 20, + tot_rwsz >> 20, + (tot_rosz >> 20) + (tot_rwsz >> 20)); ro_ptpages = (int)((ropage_next - (vm_offset_t)&ropagetable_begin) >> ARM_PGSHIFT); rw_ptpages = (int)(lowGlo.lgStaticSize >> ARM_PGSHIFT); @@ -574,9 +633,9 @@ arm_vm_map(tt_entry_t * root_ttp, vm_offset_t vaddr, pt_entry_t pte) */ if (cpte != ARM_PTE_EMPTY) { panic("%s: cpte=%#llx is not empty, " - "vaddr=%#lx, pte=%#llx", - __FUNCTION__, cpte, - vaddr, pte); + "vaddr=%#lx, pte=%#llx", + __FUNCTION__, cpte, + vaddr, pte); } *ptep = pte; @@ -609,7 +668,8 @@ arm_vm_kernel_el0_map(vm_offset_t vaddr, pt_entry_t pte) * This function installs pte at vaddr for the EL1 kernel mappings. */ static void -arm_vm_kernel_el1_map(vm_offset_t vaddr, pt_entry_t pte) { +arm_vm_kernel_el1_map(vm_offset_t vaddr, pt_entry_t pte) +{ arm_vm_map(cpu_tte, vaddr, pte); } @@ -749,7 +809,8 @@ extern void bootstrap_instructions; * KTRR will cause us to fault on executable block mappings that cross the * KTRR boundary. */ -static void arm_replace_identity_map(boot_args * args) +static void +arm_replace_identity_map(void) { vm_offset_t addr; pmap_paddr_t paddr; @@ -768,12 +829,9 @@ static void arm_replace_identity_map(boot_args * args) paddr = kvtophys(addr); /* - * The V=P page tables (at the time this comment was written) start - * after the last bit of kernel data, and consist of 1 L1 page and 1 or - * more L2 pages. - * Grab references to those pages, and allocate an L3 page. + * Grab references to the V=P page tables, and allocate an L3 page. */ - l1_ptp_phys = args->topOfKernelData; + l1_ptp_phys = kvtophys((vm_offset_t)&bootstrap_pagetables); l1_ptp_virt = (tt_entry_t *)phystokv(l1_ptp_phys); tte1 = &l1_ptp_virt[L1_TABLE_INDEX(paddr)]; @@ -781,7 +839,7 @@ static void arm_replace_identity_map(boot_args * args) l2_ptp_phys = (*tte1) & ARM_TTE_TABLE_MASK; tte2 = &l2_ptp_virt[L2_TABLE_INDEX(paddr)]; - l3_ptp_virt = (pt_entry_t *)alloc_ptpage(FALSE); + l3_ptp_virt = (pt_entry_t *)alloc_ptpage(TRUE); l3_ptp_phys = kvtophys((vm_offset_t)l3_ptp_virt); ptep = &l3_ptp_virt[L3_TABLE_INDEX(paddr)]; @@ -797,12 +855,12 @@ static void arm_replace_identity_map(boot_args * args) *tte2 = ARM_TTE_BOOT_TABLE | (l3_ptp_phys & ARM_TTE_TABLE_MASK); *ptep = (paddr & ARM_PTE_MASK) | - ARM_PTE_TYPE_VALID | - ARM_PTE_SH(SH_OUTER_MEMORY) | - ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) | - ARM_PTE_AF | - ARM_PTE_AP(AP_RONA) | - ARM_PTE_NX; + ARM_PTE_TYPE_VALID | + ARM_PTE_SH(SH_OUTER_MEMORY) | + ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) | + ARM_PTE_AF | + ARM_PTE_AP(AP_RONA) | + ARM_PTE_NX; } #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ @@ -846,8 +904,8 @@ arm_kva_to_pte(vm_offset_t va) static void arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, pmap_paddr_t pa_offset, - int pte_prot_APX, int pte_prot_XN, unsigned granule, - pt_entry_t **deferred_pte, pt_entry_t *deferred_ptmp) + int pte_prot_APX, int pte_prot_XN, unsigned granule, + pt_entry_t **deferred_pte, pt_entry_t *deferred_ptmp) { if (va & ARM_TT_L2_OFFMASK) { /* ragged edge hanging over a ARM_TT_L2_SIZE boundary */ tt_entry_t *tte2; @@ -860,8 +918,9 @@ arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, va &= ~ARM_TT_L2_OFFMASK; pa = va - gVirtBase + gPhysBase - pa_offset; - if (pa >= real_avail_end) + if (pa >= real_avail_end) { return; + } tte2 = arm_kva_to_tte(va); @@ -873,6 +932,11 @@ arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, ppte = (pt_entry_t *)phystokv((tmplate & ARM_TTE_TABLE_MASK)); } else { // TTE must be reincarnated with page level mappings. + + // ... but we don't want to break up blocks on live + // translation tables. + assert(!kva_active); + ppte = (pt_entry_t*)alloc_ptpage(pa_offset == 0); bzero(ppte, ARM_PGBYTES); ppte_phys = kvtophys((vm_offset_t)ppte); @@ -881,8 +945,9 @@ arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, } vm_offset_t len = _end - va; - if ((pa + len) > real_avail_end) + if ((pa + len) > real_avail_end) { _end -= (pa + len - real_avail_end); + } assert((start - gVirtBase + gPhysBase - pa_offset) >= gPhysBase); /* Round up to the nearest PAGE_SIZE boundary when creating mappings: @@ -890,9 +955,8 @@ arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, * a ragged non-PAGE_SIZE-aligned edge. */ vm_offset_t rounded_end = round_page(_end); /* Apply the desired protections to the specified page range */ - for (i = 0; i <= (ARM_TT_L3_INDEX_MASK>>ARM_TT_L3_SHIFT); i++) { + for (i = 0; i <= (ARM_TT_L3_INDEX_MASK >> ARM_TT_L3_SHIFT); i++) { if ((start <= va) && (va < rounded_end)) { - ptmp = pa | ARM_PTE_AF | ARM_PTE_SH(SH_OUTER_MEMORY) | ARM_PTE_TYPE; ptmp = ptmp | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT); ptmp = ptmp | ARM_PTE_AP(pte_prot_APX); @@ -955,8 +1019,9 @@ arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, va += ARM_PGBYTES; pa += ARM_PGBYTES; } - if (recursive_pte != NULL) + if (recursive_pte != NULL) { *recursive_pte = recursive_ptmp; + } } } @@ -968,15 +1033,16 @@ arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, */ static void arm_vm_page_granular_prot(vm_offset_t start, unsigned long size, pmap_paddr_t pa_offset, - int tte_prot_XN, int pte_prot_APX, int pte_prot_XN, - unsigned granule) + int tte_prot_XN, int pte_prot_APX, int pte_prot_XN, + unsigned granule) { pt_entry_t *deferred_pte = NULL, deferred_ptmp = 0; vm_offset_t _end = start + size; vm_offset_t align_start = (start + ARM_TT_L2_OFFMASK) & ~ARM_TT_L2_OFFMASK; - if (size == 0x0UL) + if (size == 0x0UL) { return; + } if (align_start > _end) { arm_vm_page_granular_helper(start, _end, start, pa_offset, pte_prot_APX, pte_prot_XN, granule, NULL, NULL); @@ -987,8 +1053,8 @@ arm_vm_page_granular_prot(vm_offset_t start, unsigned long size, pmap_paddr_t pa while ((_end - align_start) >= ARM_TT_L2_SIZE) { if (!(granule & ARM64_GRANULE_ALLOW_BLOCK)) { - arm_vm_page_granular_helper(align_start, align_start+ARM_TT_L2_SIZE, align_start + 1, pa_offset, - pte_prot_APX, pte_prot_XN, granule, NULL, NULL); + arm_vm_page_granular_helper(align_start, align_start + ARM_TT_L2_SIZE, align_start + 1, pa_offset, + pte_prot_APX, pte_prot_XN, granule, NULL, NULL); } else { pmap_paddr_t pa = align_start - gVirtBase + gPhysBase - pa_offset; assert((pa & ARM_TT_L2_OFFMASK) == 0); @@ -999,15 +1065,16 @@ arm_vm_page_granular_prot(vm_offset_t start, unsigned long size, pmap_paddr_t pa if ((pa >= gPhysBase) && (pa < real_avail_end)) { tmplate = (pa & ARM_TTE_BLOCK_L2_MASK) | ARM_TTE_TYPE_BLOCK - | ARM_TTE_VALID | ARM_TTE_BLOCK_AF | ARM_TTE_BLOCK_NX - | ARM_TTE_BLOCK_AP(pte_prot_APX) | ARM_TTE_BLOCK_SH(SH_OUTER_MEMORY) - | ARM_TTE_BLOCK_ATTRINDX(CACHE_ATTRINDX_WRITEBACK); + | ARM_TTE_VALID | ARM_TTE_BLOCK_AF | ARM_TTE_BLOCK_NX + | ARM_TTE_BLOCK_AP(pte_prot_APX) | ARM_TTE_BLOCK_SH(SH_OUTER_MEMORY) + | ARM_TTE_BLOCK_ATTRINDX(CACHE_ATTRINDX_WRITEBACK); #if __ARM_KERNEL_PROTECT__ tmplate = tmplate | ARM_TTE_BLOCK_NG; #endif /* __ARM_KERNEL_PROTECT__ */ - if (tte_prot_XN) + if (tte_prot_XN) { tmplate = tmplate | ARM_TTE_BLOCK_PNX; + } *tte2 = tmplate; } @@ -1015,11 +1082,13 @@ arm_vm_page_granular_prot(vm_offset_t start, unsigned long size, pmap_paddr_t pa align_start += ARM_TT_L2_SIZE; } - if (align_start < _end) + if (align_start < _end) { arm_vm_page_granular_helper(align_start, _end, _end, pa_offset, pte_prot_APX, pte_prot_XN, granule, &deferred_pte, &deferred_ptmp); + } - if (deferred_pte != NULL) + if (deferred_pte != NULL) { *deferred_pte = deferred_ptmp; + } } static inline void @@ -1042,42 +1111,130 @@ arm_vm_page_granular_RWNX(vm_offset_t start, unsigned long size, unsigned granul /* used in the chosen/memory-map node, populated by iBoot. */ typedef struct MemoryMapFileInfo { - vm_offset_t paddr; - size_t length; + vm_offset_t paddr; + size_t length; } MemoryMapFileInfo; -void -arm_vm_prot_init(boot_args * args) +// Populate seg...AuxKC and fixup AuxKC permissions +static bool +arm_vm_auxkc_init(void) { + if (auxkc_mh == 0 || auxkc_base == 0) { + return false; // no auxKC. + } + + /* Fixup AuxKC and populate seg*AuxKC globals used below */ + arm_auxkc_init((void*)auxkc_mh, (void*)auxkc_base); + + if (segLOWESTAuxKC != segLOWEST) { + panic("segLOWESTAuxKC (%p) not equal to segLOWEST (%p). auxkc_mh: %p, auxkc_base: %p", + (void*)segLOWESTAuxKC, (void*)segLOWEST, + (void*)auxkc_mh, (void*)auxkc_base); + } + /* + * The AuxKC LINKEDIT segment needs to be covered by the RO region but is excluded + * from the RO address range returned by kernel_collection_adjust_mh_addrs(). + * Ensure the highest non-LINKEDIT address in the AuxKC is the current end of + * its RO region before extending it. + */ + assert(segHIGHESTROAuxKC == segHIGHESTNLEAuxKC); + assert(segHIGHESTAuxKC >= segHIGHESTROAuxKC); + if (segHIGHESTAuxKC > segHIGHESTROAuxKC) { + segHIGHESTROAuxKC = segHIGHESTAuxKC; + } + + /* + * The AuxKC RO region must be right below the device tree/trustcache so that it can be covered + * by CTRR, and the AuxKC RX region must be within the RO region. + */ + assert(segHIGHESTROAuxKC == auxkc_right_above); + assert(segHIGHESTRXAuxKC <= segHIGHESTROAuxKC); + assert(segLOWESTRXAuxKC <= segHIGHESTRXAuxKC); + assert(segLOWESTROAuxKC <= segLOWESTRXAuxKC); + assert(segLOWESTAuxKC <= segLOWESTROAuxKC); + + if (segHIGHESTRXAuxKC < segLOWEST) { + arm_vm_page_granular_RNX(segHIGHESTRXAuxKC, segLOWEST - segHIGHESTRXAuxKC, 0); + } + if (segLOWESTRXAuxKC < segHIGHESTRXAuxKC) { + arm_vm_page_granular_ROX(segLOWESTRXAuxKC, segHIGHESTRXAuxKC - segLOWESTRXAuxKC, 0); // Refined in OSKext::readPrelinkedExtensions + } + if (segLOWESTROAuxKC < segLOWESTRXAuxKC) { + arm_vm_page_granular_RNX(segLOWESTROAuxKC, segLOWESTRXAuxKC - segLOWESTROAuxKC, 0); + } + if (segLOWESTAuxKC < segLOWESTROAuxKC) { + arm_vm_page_granular_RWNX(segLOWESTAuxKC, segLOWESTROAuxKC - segLOWESTAuxKC, 0); + } + + return true; +} + +void +arm_vm_prot_init(__unused boot_args * args) +{ segLOWESTTEXT = UINT64_MAX; - if (segSizePRELINKTEXT && (segPRELINKTEXTB < segLOWESTTEXT)) segLOWESTTEXT = segPRELINKTEXTB; + if (segSizePRELINKTEXT && (segPRELINKTEXTB < segLOWESTTEXT)) { + segLOWESTTEXT = segPRELINKTEXTB; + } assert(segSizeTEXT); - if (segTEXTB < segLOWESTTEXT) segLOWESTTEXT = segTEXTB; + if (segTEXTB < segLOWESTTEXT) { + segLOWESTTEXT = segTEXTB; + } assert(segLOWESTTEXT < UINT64_MAX); segEXTRADATA = segLOWESTTEXT; segSizeEXTRADATA = 0; segLOWEST = segLOWESTTEXT; + segLOWESTRO = segLOWESTTEXT; + + if (segLOWESTKC && segLOWESTKC < segLOWEST) { + /* + * kernel collections have segments below the kernel. In particular the collection mach header + * is below PRELINK_TEXT and is not covered by any other segments already tracked. + */ + arm_vm_page_granular_RNX(segLOWESTKC, segLOWEST - segLOWESTKC, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); + segLOWEST = segLOWESTKC; + if (segLOWESTROKC && segLOWESTROKC < segLOWESTRO) { + segLOWESTRO = segLOWESTROKC; + } + if (segHIGHESTROKC && segHIGHESTROKC > segHIGHESTRO) { + segHIGHESTRO = segHIGHESTROKC; + } + } DTEntry memory_map; - MemoryMapFileInfo *trustCacheRange; + MemoryMapFileInfo const *trustCacheRange; unsigned int trustCacheRangeSize; int err; - err = DTLookupEntry(NULL, "chosen/memory-map", &memory_map); + if (SecureDTIsLockedDown()) { + segEXTRADATA = (vm_offset_t)PE_state.deviceTreeHead; + segSizeEXTRADATA = PE_state.deviceTreeSize; + } + + err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map); assert(err == kSuccess); - err = DTGetProperty(memory_map, "TrustCache", (void**)&trustCacheRange, &trustCacheRangeSize); + err = SecureDTGetProperty(memory_map, "TrustCache", (void const **)&trustCacheRange, &trustCacheRangeSize); if (err == kSuccess) { assert(trustCacheRangeSize == sizeof(MemoryMapFileInfo)); - segEXTRADATA = phystokv(trustCacheRange->paddr); - segSizeEXTRADATA = trustCacheRange->length; + if (segSizeEXTRADATA == 0) { + segEXTRADATA = phystokv(trustCacheRange->paddr); + segSizeEXTRADATA = trustCacheRange->length; + } else { + segSizeEXTRADATA += trustCacheRange->length; + } + } + if (segSizeEXTRADATA != 0) { if (segEXTRADATA <= segLOWEST) { segLOWEST = segEXTRADATA; + if (segEXTRADATA <= segLOWESTRO) { + segLOWESTRO = segEXTRADATA; + } } #if !(DEBUG || DEVELOPMENT) @@ -1088,9 +1245,44 @@ arm_vm_prot_init(boot_args * args) #endif /* !(DEBUG || DEVELOPMENT) */ arm_vm_page_granular_RNX(segEXTRADATA, segSizeEXTRADATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); + } + + const MemoryMapFileInfo *auxKC_range, *auxKC_header_range; + unsigned int auxKC_range_size, auxKC_header_range_size; + + err = SecureDTGetProperty(memory_map, "AuxKC", (const void**)&auxKC_range, + &auxKC_range_size); + if (err != kSuccess) { + goto noAuxKC; + } + assert(auxKC_range_size == sizeof(MemoryMapFileInfo)); + err = SecureDTGetProperty(memory_map, "AuxKC-mach_header", + (const void**)&auxKC_header_range, &auxKC_header_range_size); + if (err != kSuccess) { + goto noAuxKC; + } + assert(auxKC_header_range_size == sizeof(MemoryMapFileInfo)); + + auxkc_mh = phystokv(auxKC_header_range->paddr); + auxkc_base = phystokv(auxKC_range->paddr); + if (!auxkc_mh || !auxkc_base) { + goto noAuxKC; + } + if (auxkc_base < segLOWEST) { + auxkc_right_above = segLOWEST; + segLOWEST = auxkc_base; + } else { + panic("auxkc_base (%p) not below segLOWEST (%p)", (void*)auxkc_base, (void*)segLOWEST); } + /* Map AuxKC RWNX initially so that arm_vm_auxkc_init can traverse + * it and apply fixups (after we're off the bootstrap translation + * tables). + */ + arm_vm_page_granular_RWNX(auxkc_base, auxKC_range->length, 0); + +noAuxKC: /* Map coalesced kext TEXT segment RWNX for now */ arm_vm_page_granular_RWNX(segPRELINKTEXTB, segSizePRELINKTEXT, ARM64_GRANULE_ALLOW_BLOCK); // Refined in OSKext::readPrelinkedExtensions @@ -1133,20 +1325,7 @@ arm_vm_prot_init(boot_args * args) * NO, stuff in this segment gets modified during startup (viz. mac_policy_init()/mac_policy_list) * Make RNX in prot_finalize */ -#if XNU_MONITOR - /* The ropagetable region will ultimately be owned by the PPL. Set permissions - * on it separately to avoid applying mismatched block settings between this function, - * pmap_static_allocations_done(), and arm_vm_prot_finalize(). */ - vm_offset_t segDATACONSTE = segDATACONSTB + segSizeDATACONST; - - arm_vm_page_granular_RWNX(segDATACONSTB, (vm_offset_t)&ropagetable_begin - segDATACONSTB, ARM64_GRANULE_ALLOW_BLOCK); - arm_vm_page_granular_RWNX((vm_offset_t)&ropagetable_begin, - (vm_offset_t)&ropagetable_end - (vm_offset_t)&ropagetable_begin, ARM64_GRANULE_ALLOW_BLOCK); - arm_vm_page_granular_RWNX((vm_offset_t)&ropagetable_end, - segDATACONSTE - (vm_offset_t)&ropagetable_end, ARM64_GRANULE_ALLOW_BLOCK); -#else arm_vm_page_granular_RWNX(segDATACONSTB, segSizeDATACONST, ARM64_GRANULE_ALLOW_BLOCK); -#endif arm_vm_page_granular_ROX(segTEXTEXECB, segSizeTEXTEXEC, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); @@ -1162,6 +1341,8 @@ arm_vm_prot_init(boot_args * args) arm_vm_page_granular_RWNX(segPPLDATAB, segSizePPLDATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); #endif + arm_vm_page_granular_RWNX(segHIBDATAB, segSizeHIBDATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); + arm_vm_page_granular_RWNX(segBOOTDATAB, segSizeBOOTDATA, 0); arm_vm_page_granular_RNX((vm_offset_t)&intstack_low_guard, PAGE_MAX_SIZE, 0); arm_vm_page_granular_RNX((vm_offset_t)&intstack_high_guard, PAGE_MAX_SIZE, 0); @@ -1171,15 +1352,19 @@ arm_vm_prot_init(boot_args * args) arm_vm_page_granular_RWNX(segLINKB, segSizeLINK, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); arm_vm_page_granular_RWNX(segPLKLINKEDITB, segSizePLKLINKEDIT, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // Coalesced kext LINKEDIT segment arm_vm_page_granular_ROX(segLASTB, segSizeLAST, ARM64_GRANULE_ALLOW_BLOCK); // __LAST may be empty, but we cannot assume this - + if (segLASTDATACONSTB) { + arm_vm_page_granular_RWNX(segLASTDATACONSTB, segSizeLASTDATACONST, ARM64_GRANULE_ALLOW_BLOCK); // __LASTDATA_CONST may be empty, but we cannot assume this + } arm_vm_page_granular_RWNX(segPRELINKDATAB, segSizePRELINKDATA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // Prelink __DATA for kexts (RW data) - if (segSizePLKLLVMCOV > 0) + if (segSizePLKLLVMCOV > 0) { arm_vm_page_granular_RWNX(segPLKLLVMCOVB, segSizePLKLLVMCOV, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // LLVM code coverage data - + } arm_vm_page_granular_RWNX(segPRELINKINFOB, segSizePRELINKINFO, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); /* PreLinkInfoDictionary */ - arm_vm_page_granular_RNX(phystokv(args->topOfKernelData), BOOTSTRAP_TABLE_SIZE, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // Boot page tables; they should not be mutable. + /* Record the bounds of the kernelcache. */ + vm_kernelcache_base = segLOWEST; + vm_kernelcache_top = end_kern; } /* @@ -1198,37 +1383,41 @@ cmp_ptov_entries(const void *a, const void *b) const ptov_table_entry *entry_a = a; const ptov_table_entry *entry_b = b; // Sort in descending order of segment length - if (entry_a->len < entry_b->len) + if (entry_a->len < entry_b->len) { return 1; - else if (entry_a->len > entry_b->len) + } else if (entry_a->len > entry_b->len) { return -1; - else + } else { return 0; + } } SECURITY_READ_ONLY_LATE(static unsigned int) ptov_index = 0; +#define ROUND_L1(addr) (((addr) + ARM_TT_L1_OFFMASK) & ~(ARM_TT_L1_OFFMASK)) #define ROUND_TWIG(addr) (((addr) + ARM_TT_TWIG_OFFMASK) & ~(ARM_TT_TWIG_OFFMASK)) static void -arm_vm_physmap_slide(ptov_table_entry *temp_ptov_table, vm_map_address_t physmap_base, vm_map_address_t orig_va, vm_size_t len, int pte_prot_APX, unsigned granule) +arm_vm_physmap_slide(ptov_table_entry *temp_ptov_table, vm_map_address_t orig_va, vm_size_t len, int pte_prot_APX, unsigned granule) { pmap_paddr_t pa_offset; assert(ptov_index < PTOV_TABLE_SIZE); assert((orig_va & ARM_PGMASK) == 0); temp_ptov_table[ptov_index].pa = orig_va - gVirtBase + gPhysBase; - if (ptov_index == 0) + if (ptov_index == 0) { temp_ptov_table[ptov_index].va = physmap_base; - else + } else { temp_ptov_table[ptov_index].va = temp_ptov_table[ptov_index - 1].va + temp_ptov_table[ptov_index - 1].len; + } if (granule & ARM64_GRANULE_ALLOW_BLOCK) { vm_map_address_t orig_offset = temp_ptov_table[ptov_index].pa & ARM_TT_TWIG_OFFMASK; vm_map_address_t new_offset = temp_ptov_table[ptov_index].va & ARM_TT_TWIG_OFFMASK; - if (new_offset < orig_offset) + if (new_offset < orig_offset) { temp_ptov_table[ptov_index].va += (orig_offset - new_offset); - else if (new_offset > orig_offset) + } else if (new_offset > orig_offset) { temp_ptov_table[ptov_index].va = ROUND_TWIG(temp_ptov_table[ptov_index].va) + orig_offset; + } } assert((temp_ptov_table[ptov_index].va & ARM_PGMASK) == 0); temp_ptov_table[ptov_index].len = round_page(len); @@ -1242,7 +1431,7 @@ arm_vm_physmap_slide(ptov_table_entry *temp_ptov_table, vm_map_address_t physmap SECURITY_READ_ONLY_LATE(static boolean_t) keep_linkedit = FALSE; static void -arm_vm_physmap_init(boot_args *args, vm_map_address_t physmap_base, vm_map_address_t dynamic_memory_begin __unused) +arm_vm_physmap_init(boot_args *args) { ptov_table_entry temp_ptov_table[PTOV_TABLE_SIZE]; bzero(temp_ptov_table, sizeof(temp_ptov_table)); @@ -1252,15 +1441,14 @@ arm_vm_physmap_init(boot_args *args, vm_map_address_t physmap_base, vm_map_addre // must be at page granularity, so that PPL ownership or cache attribute changes can be reflected // in the physical aperture mappings. - // Slid region between gPhysBase and beginning of protected text - arm_vm_physmap_slide(temp_ptov_table, physmap_base, gVirtBase, segLOWEST - gVirtBase, AP_RWNA, 0); + arm_vm_physmap_slide(temp_ptov_table, gVirtBase, segLOWEST - gVirtBase, AP_RWNA, 0); // kext bootstrap segment - arm_vm_physmap_slide(temp_ptov_table, physmap_base, segKLDB, segSizeKLD, AP_RONA, 0); + arm_vm_physmap_slide(temp_ptov_table, segKLDB, segSizeKLD, AP_RONA, 0); // Early-boot data - arm_vm_physmap_slide(temp_ptov_table, physmap_base, segBOOTDATAB, segSizeBOOTDATA, AP_RONA, 0); + arm_vm_physmap_slide(temp_ptov_table, segBOOTDATAB, segSizeBOOTDATA, AP_RONA, 0); #if KASAN_DYNAMIC_BLACKLIST /* KASAN's dynamic blacklist needs to query the LINKEDIT segment at runtime. As such, the @@ -1268,27 +1456,32 @@ arm_vm_physmap_init(boot_args *args, vm_map_address_t physmap_base, vm_map_addre keep_linkedit = TRUE; #else PE_parse_boot_argn("keepsyms", &keep_linkedit, sizeof(keep_linkedit)); + if (kernel_mach_header_is_in_fileset(&_mh_execute_header)) { + keep_linkedit = TRUE; + } #endif if (!keep_linkedit) { // Kernel LINKEDIT - arm_vm_physmap_slide(temp_ptov_table, physmap_base, segLINKB, segSizeLINK, AP_RWNA, 0); + arm_vm_physmap_slide(temp_ptov_table, segLINKB, segSizeLINK, AP_RWNA, 0); // Prelinked kernel LINKEDIT - arm_vm_physmap_slide(temp_ptov_table, physmap_base, segPLKLINKEDITB, segSizePLKLINKEDIT, AP_RWNA, 0); + arm_vm_physmap_slide(temp_ptov_table, segPLKLINKEDITB, segSizePLKLINKEDIT, AP_RWNA, 0); } // Prelinked kernel plists - arm_vm_physmap_slide(temp_ptov_table, physmap_base, segPRELINKINFOB, segSizePRELINKINFO, AP_RWNA, 0); + arm_vm_physmap_slide(temp_ptov_table, segPRELINKINFOB, segSizePRELINKINFO, AP_RWNA, 0); - // Device tree, ramdisk, boot args - arm_vm_physmap_slide(temp_ptov_table, physmap_base, end_kern, (args->topOfKernelData - gPhysBase + gVirtBase) - end_kern, AP_RWNA, 0); - PE_slide_devicetree(temp_ptov_table[ptov_index - 1].va - end_kern); + // Device tree (if not locked down), ramdisk, boot args + arm_vm_physmap_slide(temp_ptov_table, end_kern, (args->topOfKernelData - gPhysBase + gVirtBase) - end_kern, AP_RWNA, 0); + if (!SecureDTIsLockedDown()) { + PE_slide_devicetree(temp_ptov_table[ptov_index - 1].va - end_kern); + } // Remainder of physical memory - arm_vm_physmap_slide(temp_ptov_table, physmap_base, (args->topOfKernelData + BOOTSTRAP_TABLE_SIZE - gPhysBase + gVirtBase), - real_avail_end - (args->topOfKernelData + BOOTSTRAP_TABLE_SIZE), AP_RWNA, 0); + arm_vm_physmap_slide(temp_ptov_table, (args->topOfKernelData - gPhysBase + gVirtBase), + real_avail_end - args->topOfKernelData, AP_RWNA, 0); - assert((temp_ptov_table[ptov_index - 1].va + temp_ptov_table[ptov_index - 1].len) <= dynamic_memory_begin); + assert((temp_ptov_table[ptov_index - 1].va + temp_ptov_table[ptov_index - 1].len) <= physmap_end); // Sort in descending order of segment length. LUT traversal is linear, so largest (most likely used) // segments should be placed earliest in the table to optimize lookup performance. @@ -1300,22 +1493,22 @@ arm_vm_physmap_init(boot_args *args, vm_map_address_t physmap_base, vm_map_addre #else static void -arm_vm_physmap_init(boot_args *args, vm_map_address_t physmap_base, vm_map_address_t dynamic_memory_begin __unused) +arm_vm_physmap_init(boot_args *args) { ptov_table_entry temp_ptov_table[PTOV_TABLE_SIZE]; bzero(temp_ptov_table, sizeof(temp_ptov_table)); // Will be handed back to VM layer through ml_static_mfree() in arm_vm_prot_finalize() - arm_vm_physmap_slide(temp_ptov_table, physmap_base, gVirtBase, segLOWEST - gVirtBase, AP_RWNA, + arm_vm_physmap_slide(temp_ptov_table, gVirtBase, segLOWEST - gVirtBase, AP_RWNA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); arm_vm_page_granular_RWNX(end_kern, phystokv(args->topOfKernelData) - end_kern, - ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); /* Device Tree, RAM Disk (if present), bootArgs */ + ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); /* Device Tree (if not locked down), RAM Disk (if present), bootArgs */ - arm_vm_physmap_slide(temp_ptov_table, physmap_base, (args->topOfKernelData + BOOTSTRAP_TABLE_SIZE - gPhysBase + gVirtBase), - real_avail_end - (args->topOfKernelData + BOOTSTRAP_TABLE_SIZE), AP_RWNA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // rest of physmem + arm_vm_physmap_slide(temp_ptov_table, (args->topOfKernelData - gPhysBase + gVirtBase), + real_avail_end - args->topOfKernelData, AP_RWNA, ARM64_GRANULE_ALLOW_BLOCK | ARM64_GRANULE_ALLOW_HINT); // rest of physmem - assert((temp_ptov_table[ptov_index - 1].va + temp_ptov_table[ptov_index - 1].len) <= dynamic_memory_begin); + assert((temp_ptov_table[ptov_index - 1].va + temp_ptov_table[ptov_index - 1].len) <= physmap_end); // Sort in descending order of segment length. LUT traversal is linear, so largest (most likely used) // segments should be placed earliest in the table to optimize lookup performance. @@ -1402,6 +1595,11 @@ arm_vm_prot_finalize(boot_args * args __unused) */ arm_vm_page_granular_RNX(segLASTB, segSizeLAST, ARM64_GRANULE_ALLOW_BLOCK); + /* __LASTDATA_CONST should no longer be writable. */ + if (segLASTDATACONSTB) { + arm_vm_page_granular_RNX(segLASTDATACONSTB, segSizeLASTDATACONST, ARM64_GRANULE_ALLOW_BLOCK); + } + /* * Must wait until all other region permissions are set before locking down DATA_CONST * as the kernel static page tables live in DATA_CONST on KTRR enabled systems @@ -1409,20 +1607,7 @@ arm_vm_prot_finalize(boot_args * args __unused) */ #endif -#if XNU_MONITOR - vm_offset_t segDATACONSTE = segDATACONSTB + segSizeDATACONST; - - /* - * For the moment, the RO pagetable allocation is part of the - * constant data segment, but it is technically owned by the - * PPL. Hence, we should not reprotect it. - */ - arm_vm_page_granular_RNX(segDATACONSTB, (vm_offset_t)&ropagetable_begin - segDATACONSTB, ARM64_GRANULE_ALLOW_BLOCK); - arm_vm_page_granular_RNX((vm_offset_t)&ropagetable_end, - segDATACONSTE - (vm_offset_t)&ropagetable_end, ARM64_GRANULE_ALLOW_BLOCK); -#else arm_vm_page_granular_RNX(segDATACONSTB, segSizeDATACONST, ARM64_GRANULE_ALLOW_BLOCK); -#endif __builtin_arm_dsb(DSB_ISH); flush_mmu_tlb(); @@ -1431,38 +1616,19 @@ arm_vm_prot_finalize(boot_args * args __unused) #define TBI_USER 0x1 #define TBI_KERNEL 0x2 -boolean_t user_tbi = TRUE; - /* * TBI (top-byte ignore) is an ARMv8 feature for ignoring the top 8 bits of * address accesses. It can be enabled separately for TTBR0 (user) and - * TTBR1 (kernel). We enable it by default for user only, but allow both - * to be controlled by the 'tbi' boot-arg. + * TTBR1 (kernel). We enable it by default for user only. */ static void set_tbi(void) { #if !__ARM_KERNEL_PROTECT__ - /* If we are not built with __ARM_KERNEL_PROTECT__, TBI can be turned - * off with a boot-arg. - */ uint64_t old_tcr, new_tcr; - int tbi = 0; - if (PE_parse_boot_argn("tbi", &tbi, sizeof(tbi))) - user_tbi = ((tbi & TBI_USER) == TBI_USER); old_tcr = new_tcr = get_tcr(); - new_tcr |= (user_tbi) ? TCR_TBI0_TOPBYTE_IGNORED : 0; - -#if !defined(HAS_APPLE_PAC) - /* - * arm_vm_init() runs after rebase_threaded_starts(), so enabling TBI1 - * at this point will break the computed pointer signatures. TBID1 - * could help mitigate this problem, but for now we'll just disable - * kernel TBI if PAC is being used. - */ - new_tcr |= (tbi & TBI_KERNEL) ? TCR_TBI1_TOPBYTE_IGNORED : 0; -#endif + new_tcr |= TCR_TBI0_TOPBYTE_IGNORED; if (old_tcr != new_tcr) { set_tcr(new_tcr); @@ -1471,6 +1637,52 @@ set_tbi(void) #endif /* !__ARM_KERNEL_PROTECT__ */ } +/* + * Initialize and enter blank (invalid) page tables in a L1 translation table for a given VA range. + * + * This is a helper function used to build up the initial page tables for the kernel translation table. + * With KERNEL_INTEGRITY we keep at least the root level of the kernel page table immutable, thus the need + * to preallocate before machine_lockdown any L1 entries necessary during the entire kernel runtime. + * + * For a given VA range, if necessary, allocate new L2 translation tables and install the table entries in + * the appropriate L1 table indexes. called before the translation table is active + * + * parameters: + * + * tt: virtual address of L1 translation table to modify + * start: beginning of VA range + * end: end of VA range + * static_map: whether to allocate the new translation table page from read only memory + * table_attrs: attributes of new table entry in addition to VALID and TYPE_TABLE attributes + * + */ + +static void +init_ptpages(tt_entry_t *tt, vm_map_address_t start, vm_map_address_t end, bool static_map, uint64_t table_attrs) +{ + tt_entry_t *l1_tte; + vm_offset_t ptpage_vaddr; + + l1_tte = tt + ((start & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); + + while (start < end) { + if (*l1_tte == ARM_TTE_EMPTY) { + /* Allocate a page and setup L1 Table TTE in L1 */ + ptpage_vaddr = alloc_ptpage(static_map); + *l1_tte = (kvtophys(ptpage_vaddr) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | table_attrs; + bzero((void *)ptpage_vaddr, ARM_PGBYTES); + } + + if ((start + ARM_TT_L1_SIZE) < start) { + /* If this is the last L1 entry, it must cover the last mapping. */ + break; + } + + start += ARM_TT_L1_SIZE; + l1_tte++; + } +} + #define ARM64_PHYSMAP_SLIDE_RANGE (1ULL << 30) // 1 GB #define ARM64_PHYSMAP_SLIDE_MASK (ARM64_PHYSMAP_SLIDE_RANGE - 1) @@ -1486,11 +1698,9 @@ arm_vm_init(uint64_t memory_size, boot_args * args) uint64_t mem_segments; vm_offset_t ptpage_vaddr; vm_map_address_t dynamic_memory_begin; - vm_map_address_t physmap_base; - /* - * Get the virtual and physical memory base from boot_args. + * Get the virtual and physical kernel-managed memory base from boot_args. */ gVirtBase = args->virtBase; gPhysBase = args->physBase; @@ -1510,12 +1720,24 @@ arm_vm_init(uint64_t memory_size, boot_args * args) */ gPhysSize = mem_size = ((gPhysBase + args->memSize) & ~PAGE_MASK) - gPhysBase; - if ((memory_size != 0) && (mem_size > memory_size)) + mem_actual = args->memSizeActual ? args->memSizeActual : mem_size; + + if ((memory_size != 0) && (mem_size > memory_size)) { mem_size = memory_size; - if (mem_size >= ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 4)) + max_mem_actual = memory_size; + } else { + max_mem_actual = mem_actual; + } + if (mem_size >= ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 2)) { panic("Unsupported memory configuration %lx\n", mem_size); + } - physmap_base = phystokv(args->topOfKernelData) + BOOTSTRAP_TABLE_SIZE; +#if defined(ARM_LARGE_MEMORY) + unsigned long physmap_l1_entries = ((real_phys_size + ARM64_PHYSMAP_SLIDE_RANGE) >> ARM_TT_L1_SHIFT) + 1; + physmap_base = VM_MIN_KERNEL_ADDRESS - (physmap_l1_entries << ARM_TT_L1_SHIFT); +#else + physmap_base = phystokv(args->topOfKernelData); +#endif // Slide the physical aperture to a random page-aligned location within the slide range uint64_t physmap_slide = early_random() & ARM64_PHYSMAP_SLIDE_MASK & ~((uint64_t)PAGE_MASK); @@ -1525,11 +1747,18 @@ arm_vm_init(uint64_t memory_size, boot_args * args) #if XNU_MONITOR physmap_base = ROUND_TWIG(physmap_base); +#if defined(ARM_LARGE_MEMORY) + static_memory_end = phystokv(args->topOfKernelData); +#else static_memory_end = physmap_base + mem_size; +#endif // ARM_LARGE_MEMORY + physmap_end = physmap_base + real_phys_size; #else static_memory_end = physmap_base + mem_size + (PTOV_TABLE_SIZE * ARM_TT_TWIG_SIZE); // worst possible case for block alignment + physmap_end = physmap_base + real_phys_size + (PTOV_TABLE_SIZE * ARM_TT_TWIG_SIZE); #endif -#if KASAN + +#if KASAN && !defined(ARM_LARGE_MEMORY) /* add the KASAN stolen memory to the physmap */ dynamic_memory_begin = static_memory_end + (shadow_ptop - shadow_pbase); #else @@ -1540,11 +1769,12 @@ arm_vm_init(uint64_t memory_size, boot_args * args) dynamic_memory_begin += PPL_STACK_REGION_SIZE; pmap_stacks_end = (void*)dynamic_memory_begin; #endif - if (dynamic_memory_begin > VM_MAX_KERNEL_ADDRESS) + if (dynamic_memory_begin > VM_MAX_KERNEL_ADDRESS) { panic("Unsupported memory configuration %lx\n", mem_size); + } - boot_ttep = args->topOfKernelData; - boot_tte = (tt_entry_t *) phystokv(boot_ttep); + boot_tte = (tt_entry_t *)&bootstrap_pagetables; + boot_ttep = kvtophys((vm_offset_t)boot_tte); #if DEVELOPMENT || DEBUG /* Sanity check - assert that BOOTSTRAP_TABLE_SIZE is sufficiently-large to @@ -1562,10 +1792,17 @@ arm_vm_init(uint64_t memory_size, boot_args * args) * TTBR0 L1, TTBR0 L2 - 1:1 bootstrap mapping. * TTBR1 L1, TTBR1 L2 - kernel mapping */ - avail_start = boot_ttep + BOOTSTRAP_TABLE_SIZE; + + /* + * TODO: free bootstrap table memory back to allocator. + * on large memory systems bootstrap tables could be quite large. + * after bootstrap complete, xnu can warm start with a single 16KB page mapping + * to trampoline to KVA. this requires only 3 pages to stay resident. + */ + avail_start = args->topOfKernelData; #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) - arm_replace_identity_map(args); + arm_replace_identity_map(); #endif /* Initialize invalid tte page */ @@ -1595,26 +1832,17 @@ arm_vm_init(uint64_t memory_size, boot_args * args) * * the so called physical aperture should be statically mapped */ - va_l1 = gVirtBase; - va_l1_end = dynamic_memory_begin; - cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); - - while (va_l1 < va_l1_end) { - if (*cpu_l1_tte == ARM_TTE_EMPTY) { - /* Allocate a page and setup L1 Table TTE in L1 */ - ptpage_vaddr = alloc_ptpage(TRUE); - *cpu_l1_tte = (kvtophys(ptpage_vaddr) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID; - bzero((void *)ptpage_vaddr, ARM_PGBYTES); - } + init_ptpages(cpu_tte, gVirtBase, dynamic_memory_begin, TRUE, 0); - if ((va_l1 + ARM_TT_L1_SIZE) < va_l1) { - /* If this is the last L1 entry, it must cover the last mapping. */ - break; - } +#if defined(ARM_LARGE_MEMORY) + /* + * Initialize l1 page table pages : + * on large memory systems the physical aperture exists separately below + * the rest of the kernel virtual address space + */ + init_ptpages(cpu_tte, physmap_base, ROUND_L1(physmap_end), TRUE, ARM_DYNAMIC_TABLE_XN); +#endif - va_l1 += ARM_TT_L1_SIZE; - cpu_l1_tte++; - } #if __ARM_KERNEL_PROTECT__ /* Expand the page tables to prepare for the EL0 mappings. */ @@ -1622,7 +1850,7 @@ arm_vm_init(uint64_t memory_size, boot_args * args) #endif /* __ARM_KERNEL_PROTECT__ */ /* - * Now retrieve addresses for end, edata, and etext from MACH-O headers + * Now retrieve addresses for various segments from kernel mach-o header */ segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &segSizePRELINKTEXT); segPLKDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PLK_DATA_CONST", &segSizePLKDATACONST); @@ -1640,7 +1868,7 @@ arm_vm_init(uint64_t memory_size, boot_args * args) segPPLDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PPLDATA", &segSizePPLDATA); #endif - segBOOTDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__BOOTDATA", &segSizeBOOTDATA); + segBOOTDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__BOOTDATA", &segSizeBOOTDATA); segLINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LINKEDIT", &segSizeLINK); segKLDB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLD", &segSizeKLD); segPRELINKDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_DATA", &segSizePRELINKDATA); @@ -1648,6 +1876,47 @@ arm_vm_init(uint64_t memory_size, boot_args * args) segPLKLLVMCOVB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PLK_LLVM_COV", &segSizePLKLLVMCOV); segPLKLINKEDITB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PLK_LINKEDIT", &segSizePLKLINKEDIT); segLASTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LAST", &segSizeLAST); + segLASTDATACONSTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LASTDATA_CONST", &segSizeLASTDATACONST); + + sectHIBTEXTB = (vm_offset_t) getsectdatafromheader(&_mh_execute_header, "__TEXT_EXEC", "__hib_text", §SizeHIBTEXT); + sectHIBDATACONSTB = (vm_offset_t) getsectdatafromheader(&_mh_execute_header, "__DATA_CONST", "__hib_const", §SizeHIBDATACONST); + segHIBDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__HIBDATA", &segSizeHIBDATA); + + if (kernel_mach_header_is_in_fileset(&_mh_execute_header)) { + kernel_mach_header_t *kc_mh = PE_get_kc_header(KCKindPrimary); + + // fileset has kext PLK_TEXT_EXEC under kernel collection TEXT_EXEC following kernel's LAST + segKCTEXTEXECB = (vm_offset_t) getsegdatafromheader(kc_mh, "__TEXT_EXEC", &segSizeKCTEXTEXEC); + assert(segPLKTEXTEXECB && !segSizePLKTEXTEXEC); // kernel PLK_TEXT_EXEC must be empty + assert(segLASTB && segSizeLAST); // kernel LAST must not be empty + assert(segKCTEXTEXECB <= segLASTB); // KC TEXT_EXEC must contain kernel LAST + assert(segKCTEXTEXECB + segSizeKCTEXTEXEC >= segLASTB + segSizeLAST); + segPLKTEXTEXECB = segLASTB + segSizeLAST; + segSizePLKTEXTEXEC = segSizeKCTEXTEXEC - (segPLKTEXTEXECB - segKCTEXTEXECB); + + // fileset has kext PLK_DATA_CONST under kernel collection DATA_CONST following kernel's LASTDATA_CONST + segKCDATACONSTB = (vm_offset_t) getsegdatafromheader(kc_mh, "__DATA_CONST", &segSizeKCDATACONST); + assert(segPLKDATACONSTB && !segSizePLKDATACONST); // kernel PLK_DATA_CONST must be empty + assert(segLASTDATACONSTB && segSizeLASTDATACONST); // kernel LASTDATA_CONST must be non-empty + assert(segKCDATACONSTB <= segLASTDATACONSTB); // KC DATA_CONST must contain kernel LASTDATA_CONST + assert(segKCDATACONSTB + segSizeKCDATACONST >= segLASTDATACONSTB + segSizeLASTDATACONST); + segPLKDATACONSTB = segLASTDATACONSTB + segSizeLASTDATACONST; + segSizePLKDATACONST = segSizeKCDATACONST - (segPLKDATACONSTB - segKCDATACONSTB); + + // fileset has kext PRELINK_DATA under kernel collection DATA following kernel's empty PRELINK_DATA + segKCDATAB = (vm_offset_t) getsegdatafromheader(kc_mh, "__DATA", &segSizeKCDATA); + assert(segPRELINKDATAB && !segSizePRELINKDATA); // kernel PRELINK_DATA must be empty + assert(segKCDATAB <= segPRELINKDATAB); // KC DATA must contain kernel PRELINK_DATA + assert(segKCDATAB + segSizeKCDATA >= segPRELINKDATAB + segSizePRELINKDATA); + segSizePRELINKDATA = segSizeKCDATA - (segPRELINKDATAB - segKCDATAB); + + // fileset has consolidated PRELINK_TEXT, PRELINK_INFO and LINKEDIT at the kernel collection level + assert(segPRELINKTEXTB && !segSizePRELINKTEXT); // kernel PRELINK_TEXT must be empty + segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(kc_mh, "__PRELINK_TEXT", &segSizePRELINKTEXT); + assert(segPRELINKINFOB && !segSizePRELINKINFO); // kernel PRELINK_INFO must be empty + segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(kc_mh, "__PRELINK_INFO", &segSizePRELINKINFO); + segLINKB = (vm_offset_t) getsegdatafromheader(kc_mh, "__LINKEDIT", &segSizeLINK); + } (void) PE_parse_boot_argn("use_contiguous_hint", &use_contiguous_hint, sizeof(use_contiguous_hint)); assert(segSizePRELINKTEXT < 0x03000000); /* 23355738 */ @@ -1660,7 +1929,7 @@ arm_vm_init(uint64_t memory_size, boot_args * args) etext = (vm_offset_t) segTEXTB + segSizeTEXT; sdata = (vm_offset_t) segDATAB; edata = (vm_offset_t) segDATAB + segSizeDATA; - end_kern = round_page(getlastaddr()); /* Force end to next page */ + end_kern = round_page(segHIGHESTKC ? segHIGHESTKC : getlastaddr()); /* Force end to next page */ vm_set_page_size(); @@ -1685,6 +1954,7 @@ arm_vm_init(uint64_t memory_size, boot_args * args) arm_vm_prot_init(args); + vm_page_kernelcache_count = (unsigned int) (atop_64(end_kern - segLOWEST)); /* * Initialize the page tables for the low globals: @@ -1703,31 +1973,24 @@ arm_vm_init(uint64_t memory_size, boot_args * args) * cover this address range: * KERNEL_DYNAMIC_ADDR - VM_MAX_KERNEL_ADDRESS */ - va_l1 = dynamic_memory_begin; - va_l1_end = VM_MAX_KERNEL_ADDRESS; - cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); - - while (va_l1 < va_l1_end) { - if (*cpu_l1_tte == ARM_TTE_EMPTY) { - /* Allocate a page and setup L1 Table TTE in L1 */ - ptpage_vaddr = alloc_ptpage(TRUE); - *cpu_l1_tte = (kvtophys(ptpage_vaddr) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN; - bzero((void *)ptpage_vaddr, ARM_PGBYTES); - } - - if ((va_l1 + ARM_TT_L1_SIZE) < va_l1) { - /* If this is the last L1 entry, it must cover the last mapping. */ - break; - } - - va_l1 += ARM_TT_L1_SIZE; - cpu_l1_tte++; - } +#if defined(ARM_LARGE_MEMORY) + /* + * dynamic mapped memory outside the VM allocator VA range required to bootstrap VM system + * don't expect to exceed 64GB, no sense mapping any more space between here and the VM heap range + */ + init_ptpages(cpu_tte, dynamic_memory_begin, ROUND_L1(dynamic_memory_begin), FALSE, ARM_DYNAMIC_TABLE_XN); +#else + /* + * TODO: do these pages really need to come from RO memory? + * With legacy 3 level table systems we never mapped more than a single L1 entry so this may be dead code + */ + init_ptpages(cpu_tte, dynamic_memory_begin, VM_MAX_KERNEL_ADDRESS, TRUE, ARM_DYNAMIC_TABLE_XN); +#endif #if KASAN /* record the extent of the physmap */ physmap_vbase = physmap_base; - physmap_vtop = static_memory_end; + physmap_vtop = physmap_end; kasan_init(); #endif /* KASAN */ @@ -1737,7 +2000,7 @@ arm_vm_init(uint64_t memory_size, boot_args * args) set_tbi(); - arm_vm_physmap_init(args, physmap_base, dynamic_memory_begin); + arm_vm_physmap_init(args); set_mmu_ttb_alternate(cpu_ttep & TTBR_BADDR_MASK); @@ -1752,15 +2015,49 @@ arm_vm_init(uint64_t memory_size, boot_args * args) cpu_tte = (tt_entry_t*)(phystokv(cpu_ttep)); invalid_tte = (tt_entry_t*)(phystokv(invalid_ttep)); + // From here on out, we're off the bootstrap translation tables. + + + /* AuxKC initialization has to be deferred until this point, since + * the AuxKC may not have been fully mapped in the bootstrap + * tables, if it spilled downwards into the prior L2 block. + * + * Now that its mapping set up by arm_vm_prot_init() is active, + * we can traverse and fix it up. + */ + + if (arm_vm_auxkc_init()) { + if (segLOWESTROAuxKC < segLOWESTRO) { + segLOWESTRO = segLOWESTROAuxKC; + } + if (segHIGHESTROAuxKC > segHIGHESTRO) { + segHIGHESTRO = segHIGHESTROAuxKC; + } + if (segLOWESTRXAuxKC < segLOWESTTEXT) { + segLOWESTTEXT = segLOWESTRXAuxKC; + } + assert(segLOWEST == segLOWESTAuxKC); + + // The preliminary auxKC mapping has been broken up. + flush_mmu_tlb(); + } + sane_size = mem_size - (avail_start - gPhysBase); max_mem = mem_size; vm_kernel_slid_base = segLOWESTTEXT; vm_kernel_slid_top = vm_prelink_einfo; - vm_kernel_slide = segTEXTB-VM_KERNEL_LINK_ADDRESS; + // vm_kernel_slide is set by arm_init()->arm_slide_rebase_and_sign_image() vm_kernel_stext = segTEXTB; - assert(segDATACONSTB == segTEXTB + segSizeTEXT); - assert(segTEXTEXECB == segDATACONSTB + segSizeDATACONST); - vm_kernel_etext = segTEXTB + segSizeTEXT + segSizeDATACONST + segSizeTEXTEXEC; + + if (kernel_mach_header_is_in_fileset(&_mh_execute_header)) { + // fileset has kext TEXT before kernel DATA_CONST + assert(segTEXTEXECB == segTEXTB + segSizeTEXT); + vm_kernel_etext = segTEXTB + segSizeTEXT + segSizeTEXTEXEC; + } else { + assert(segDATACONSTB == segTEXTB + segSizeTEXT); + assert(segTEXTEXECB == segDATACONSTB + segSizeDATACONST); + vm_kernel_etext = segTEXTB + segSizeTEXT + segSizeDATACONST + segSizeTEXTEXEC; + } dynamic_memory_begin = ROUND_TWIG(dynamic_memory_begin); #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) @@ -1805,14 +2102,13 @@ arm_vm_init(uint64_t memory_size, boot_args * args) cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); while (va_l1 < va_l1_end) { - va_l2 = va_l1; - if (((va_l1 & ~ARM_TT_L1_OFFMASK)+ARM_TT_L1_SIZE) < va_l1) { + if (((va_l1 & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE) < va_l1) { /* If this is the last L1 entry, it must cover the last mapping. */ va_l2_end = va_l1_end; } else { - va_l2_end = MIN((va_l1 & ~ARM_TT_L1_OFFMASK)+ARM_TT_L1_SIZE, va_l1_end); + va_l2_end = MIN((va_l1 & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE, va_l1_end); } cpu_l2_tte = ((tt_entry_t *) phystokv(((*cpu_l1_tte) & ARM_TTE_TABLE_MASK))) + ((va_l2 & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); @@ -1825,37 +2121,51 @@ arm_vm_init(uint64_t memory_size, boot_args * args) ptp = (pt_entry_t *) alloc_ptpage(FALSE); ptp_phys = (pmap_paddr_t)kvtophys((vm_offset_t)ptp); - pmap_init_pte_page(kernel_pmap, ptp, va_l2, 3, TRUE, TRUE); + bzero(ptp, ARM_PGBYTES); + pmap_init_pte_page(kernel_pmap, ptp, va_l2, 3, TRUE); - *cpu_l2_tte = (pa_to_tte (ptp_phys)) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN; + *cpu_l2_tte = (pa_to_tte(ptp_phys)) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN; va_l2 += ARM_TT_L2_SIZE; cpu_l2_tte++; - }; + } va_l1 = va_l2_end; cpu_l1_tte++; } +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) + /* + * In this configuration, the bootstrap mappings (arm_vm_init) and + * the heap mappings occupy separate L1 regions. Explicitly set up + * the heap L1 allocations here. + */ +#if defined(ARM_LARGE_MEMORY) + init_ptpages(cpu_tte, KERNEL_PMAP_HEAP_RANGE_START & ~ARM_TT_L1_OFFMASK, VM_MAX_KERNEL_ADDRESS, FALSE, ARM_DYNAMIC_TABLE_XN); +#else // defined(ARM_LARGE_MEMORY) + va_l1 = VM_MIN_KERNEL_ADDRESS & ~ARM_TT_L1_OFFMASK; + init_ptpages(cpu_tte, VM_MIN_KERNEL_ADDRESS & ~ARM_TT_L1_OFFMASK, VM_MAX_KERNEL_ADDRESS, FALSE, ARM_DYNAMIC_TABLE_XN); +#endif // defined(ARM_LARGE_MEMORY) +#endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) + /* * Initialize l3 page table pages : * cover this address range: - * (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) - VM_MAX_KERNEL_ADDRESS + * ((VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) - PE_EARLY_BOOT_VA) to VM_MAX_KERNEL_ADDRESS */ - va_l1 = VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK; + va_l1 = (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) - PE_EARLY_BOOT_VA; va_l1_end = VM_MAX_KERNEL_ADDRESS; cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); while (va_l1 < va_l1_end) { - va_l2 = va_l1; - if (((va_l1 & ~ARM_TT_L1_OFFMASK)+ARM_TT_L1_SIZE) < va_l1) { + if (((va_l1 & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE) < va_l1) { /* If this is the last L1 entry, it must cover the last mapping. */ va_l2_end = va_l1_end; } else { - va_l2_end = MIN((va_l1 & ~ARM_TT_L1_OFFMASK)+ARM_TT_L1_SIZE, va_l1_end); + va_l2_end = MIN((va_l1 & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE, va_l1_end); } cpu_l2_tte = ((tt_entry_t *) phystokv(((*cpu_l1_tte) & ARM_TTE_TABLE_MASK))) + ((va_l2 & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT); @@ -1868,42 +2178,19 @@ arm_vm_init(uint64_t memory_size, boot_args * args) ptp = (pt_entry_t *) alloc_ptpage(FALSE); ptp_phys = (pmap_paddr_t)kvtophys((vm_offset_t)ptp); - pmap_init_pte_page(kernel_pmap, ptp, va_l2, 3, TRUE, TRUE); + bzero(ptp, ARM_PGBYTES); + pmap_init_pte_page(kernel_pmap, ptp, va_l2, 3, TRUE); - *cpu_l2_tte = (pa_to_tte (ptp_phys)) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN; + *cpu_l2_tte = (pa_to_tte(ptp_phys)) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN; va_l2 += ARM_TT_L2_SIZE; cpu_l2_tte++; - }; + } va_l1 = va_l2_end; cpu_l1_tte++; } -#if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__ - /* - * In this configuration, the bootstrap mappings (arm_vm_init) and - * the heap mappings occupy separate L1 regions. Explicitly set up - * the heap L1 allocations here. - */ - va_l1 = VM_MIN_KERNEL_ADDRESS & ~ARM_TT_L1_OFFMASK; - cpu_l1_tte = cpu_tte + ((va_l1 & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT); - - while ((va_l1 >= (VM_MIN_KERNEL_ADDRESS & ~ARM_TT_L1_OFFMASK)) && (va_l1 < VM_MAX_KERNEL_ADDRESS)) { - /* - * If the L1 entry has not yet been allocated, allocate it - * now and treat it as a heap table. - */ - if (*cpu_l1_tte == ARM_TTE_EMPTY) { - tt_entry_t *new_tte = (tt_entry_t*)alloc_ptpage(FALSE); - bzero(new_tte, ARM_PGBYTES); - *cpu_l1_tte = (kvtophys((vm_offset_t)new_tte) & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID | ARM_DYNAMIC_TABLE_XN; - } - - cpu_l1_tte++; - va_l1 += ARM_TT_L1_SIZE; - } -#endif /* * Adjust avail_start so that the range that the VM owns @@ -1918,4 +2205,3 @@ arm_vm_init(uint64_t memory_size, boot_args * args) patch_low_glo_static_region(args->topOfKernelData, avail_start - args->topOfKernelData); enable_preemption(); } - diff --git a/osfmk/arm64/bsd_arm64.c b/osfmk/arm64/bsd_arm64.c index 0a76b1caf..e4ac467e5 100644 --- a/osfmk/arm64/bsd_arm64.c +++ b/osfmk/arm64/bsd_arm64.c @@ -54,7 +54,12 @@ #include +#if CONFIG_MACF +#include +#endif + extern void throttle_lowpri_io(int); +extern arm_debug_state64_t *find_or_allocate_debug_state64(thread_t thread); void mach_syscall(struct arm_saved_state*); typedef kern_return_t (*mach_call_t)(void *); @@ -116,10 +121,37 @@ arm_get_mach_syscall_args(struct arm_saved_state *state, struct mach_call_args * return KERN_SUCCESS; } +/** + * Marks or unmarks the given thread to be single stepped such + * that it executes exactly one instruction and then takes an exception to + * prevent further execution. + * + * @param thread 64 bit thread to be single stepped + * @param on boolean value representing whether the thread should be + * single stepped (on is true) or not (on is false) + * + * @returns KERN_SUCCESS if the status is successfully set or KERN_FAILURE if + * it fails for any reason. + */ kern_return_t -thread_setsinglestep(__unused thread_t thread, __unused int on) +thread_setsinglestep(thread_t thread, int on) { - return KERN_FAILURE; /* XXX TODO */ + arm_debug_state64_t *thread_state = find_or_allocate_debug_state64(thread); + + if (thread_state == NULL) { + return KERN_FAILURE; + } + + if (on) { + thread_state->mdscr_el1 |= MDSCR_SS; + } else { + thread_state->mdscr_el1 &= ~MDSCR_SS; + } + + if (thread == current_thread()) { + arm_debug_set64(thread->machine.DebugData); + } + return KERN_SUCCESS; } #if CONFIG_DTRACE @@ -201,8 +233,34 @@ mach_syscall(struct arm_saved_state *state) MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, args.arg1, args.arg2, args.arg3, args.arg4, 0); +#if CONFIG_MACF + /* + * Check syscall filter mask, if exists. + * + * Not all mach traps are filtered. e.g., mach_absolute_time() and + * mach_continuous_time(). See handle_svc(). + */ + task_t task = current_task(); + uint8_t *filter_mask = task->mach_trap_filter_mask; + + if (__improbable(filter_mask != NULL && + !bitstr_test(filter_mask, call_number))) { + if (mac_task_mach_trap_evaluate != NULL) { + retval = mac_task_mach_trap_evaluate(get_bsdtask_info(task), + call_number); + if (retval) { + goto skip_machcall; + } + } + } +#endif /* CONFIG_MACF */ + retval = mach_call(&args); +#if CONFIG_MACF +skip_machcall: +#endif + DEBUG_KPRINT_SYSCALL_MACH("mach_syscall: retval=0x%x (pid %d, tid %lld)\n", retval, proc_pid(current_proc()), thread_tid(current_thread())); diff --git a/osfmk/arm64/caches_asm.s b/osfmk/arm64/caches_asm.s index 87caca6e8..958bc1936 100644 --- a/osfmk/arm64/caches_asm.s +++ b/osfmk/arm64/caches_asm.s @@ -83,6 +83,60 @@ L_ipui_done: POP_FRAME ARM64_STACK_EPILOG +/* + * Obtains cache physical layout information required for way/set + * data cache maintenance operations. + * + * $0: Data cache level, starting from 0 + * $1: Output register for set increment + * $2: Output register for last valid set + * $3: Output register for way increment + */ +.macro GET_CACHE_CONFIG + lsl $0, $0, #1 + msr CSSELR_EL1, $0 // Select appropriate cache + isb // Synchronize context + + mrs $0, CCSIDR_EL1 + ubfx $1, $0, #3, #10 // extract number of ways - 1 + mov $2, $1 + add $1, $1, #1 // calculate number of ways + + mov $0, #63 + and $2, $2, $1 + cmp $2, #0 + cinc $0, $0, ne + clz $1, $1 + sub $0, $0, $1 + + mov $1, #32 // calculate way increment + sub $3, $1, $0 + mov $1, #1 + lsl $3, $1, $3 + + mrs $0, CCSIDR_EL1 + ubfx $1, $0, #0, #3 // extract log2(line size) - 4 + add $1, $1, #4 // calculate log2(line size) + mov $2, #1 + lsl $1, $2, $1 // calculate set increment + + ubfx $2, $0, #13, #15 // extract number of sets - 1 + add $2, $2, #1 // calculate number of sets + mul $2, $1, $2 // calculate last valid set +.endmacro + +/* + * Detects the presence of an L2 cache and returns 1 if implemented, + * zero otherwise. + * + * $0: Output register + */ +.macro HAS_L2_CACHE + mrs $0, CLIDR_EL1 + ubfx $0, $0, #3, #3 // extract L2 cache Ctype + cmp $0, #0x1 + cset $0, hi +.endmacro /* * void CleanPoC_Dcache(void) @@ -98,35 +152,37 @@ LEXT(CleanPoC_Dcache) /* "Fully Coherent." */ #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */ mov x0, #0 - mov x9, #(1 << MMU_I7SET) - mov x10, #(1 << (MMU_NSET + MMU_I7SET)) - mov x11, #(1 << MMU_I7WAY) + GET_CACHE_CONFIG x0, x9, x10, x11 + dmb sy + mov x0, #0 L_cpcd_dcacheway: L_cpcd_dcacheline: dc csw, x0 // clean dcache line by way/set add x0, x0, x9 // increment set index - tst x0, #(1 << (MMU_NSET + MMU_I7SET)) // look for overflow + tst x0, x10 // look for overflow b.eq L_cpcd_dcacheline bic x0, x0, x10 // clear set overflow adds w0, w0, w11 // increment way b.cc L_cpcd_dcacheway // loop -#if __ARM_L2CACHE__ - mov x0, #2 - mov x9, #(1 << L2_I7SET) - mov x10, #(1 << (L2_NSET + L2_I7SET)) - mov x11, #(1 << L2_I7WAY) + + HAS_L2_CACHE x0 + cbz x0, L_cpcd_skipl2dcache + mov x0, #1 + GET_CACHE_CONFIG x0, x9, x10, x11 + dsb sy + mov x0, #2 L_cpcd_l2dcacheway: L_cpcd_l2dcacheline: dc csw, x0 // clean dcache line by way/set add x0, x0, x9 // increment set index - tst x0, #(1 << (L2_NSET + L2_I7SET)) // look for overflow + tst x0, x10 // look for overflow b.eq L_cpcd_l2dcacheline bic x0, x0, x10 // clear set overflow adds w0, w0, w11 // increment way b.cc L_cpcd_l2dcacheway // loop -#endif +L_cpcd_skipl2dcache: #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */ dsb sy ret @@ -144,20 +200,20 @@ LEXT(CleanPoU_Dcache) /* "Fully Coherent." */ #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */ mov x0, #0 - mov x9, #(1 << MMU_I7SET) - mov x10, #(1 << (MMU_NSET + MMU_I7SET)) - mov x11, #(1 << MMU_I7WAY) + GET_CACHE_CONFIG x0, x9, x10, x11 + dmb sy + mov x0, #0 L_cpud_dcacheway: L_cpud_dcacheline: dc csw, x0 // clean dcache line by way/set add x0, x0, x9 // increment set index - tst x0, #(1 << (MMU_NSET + MMU_I7SET)) // look for overflow + tst x0, x10 // look for overflow b.eq L_cpud_dcacheline bic x0, x0, x10 // clear set overflow adds w0, w0, w11 // increment way b.cc L_cpud_dcacheway // loop -#endif /* defined(APPLE_ARM64_ARCH_FAMILY) */ + #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */ dsb sy ret @@ -190,7 +246,7 @@ L_cpudr_loop: ret /* - * void CleanPoC_DcacheRegion_internal(vm_offset_t va, unsigned length) + * void CleanPoC_DcacheRegion_internal(vm_offset_t va, size_t length) * * Clean d-cache region to Point of Coherency */ @@ -221,7 +277,7 @@ L_cpcdr_loop: ret /* - * void CleanPoC_DcacheRegion(vm_offset_t va, unsigned length) + * void CleanPoC_DcacheRegion(vm_offset_t va, size_t length) * * Clean d-cache region to Point of Coherency */ @@ -262,7 +318,7 @@ LEXT(CleanPoC_DcacheRegion_Force_nopreempt) #endif // APPLE_ARM64_ARCH_FAMILY /* - * void CleanPoC_DcacheRegion_Force(vm_offset_t va, unsigned length) + * void CleanPoC_DcacheRegion_Force(vm_offset_t va, size_t length) * * Clean d-cache region to Point of Coherency - when you really * need to flush even on coherent platforms, e.g. panic log @@ -298,35 +354,37 @@ LEXT(FlushPoC_Dcache) /* "Fully Coherent." */ #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */ mov x0, #0 - mov x9, #(1 << MMU_I7SET) - mov x10, #(1 << (MMU_NSET + MMU_I7SET)) - mov x11, #(1 << MMU_I7WAY) + GET_CACHE_CONFIG x0, x9, x10, x11 + dmb sy + mov x0, #0 L_fpcd_dcacheway: L_fpcd_dcacheline: dc cisw, x0 // clean invalidate dcache line by way/set add x0, x0, x9 // increment set index - tst x0, #(1 << (MMU_NSET + MMU_I7SET)) // look for overflow + tst x0, x10 // look for overflow b.eq L_fpcd_dcacheline bic x0, x0, x10 // clear set overflow adds w0, w0, w11 // increment way b.cc L_fpcd_dcacheway // loop -#if __ARM_L2CACHE__ + + HAS_L2_CACHE x0 + cbz x0, L_fpcd_skipl2dcache dsb sy + mov x0, #1 + GET_CACHE_CONFIG x0, x9, x10, x11 + mov x0, #2 - mov x9, #(1 << L2_I7SET) - mov x10, #(1 << (L2_NSET + L2_I7SET)) - mov x11, #(1 << L2_I7WAY) L_fpcd_l2dcacheway: L_fpcd_l2dcacheline: dc cisw, x0 // clean invalide dcache line by way/set add x0, x0, x9 // increment set index - tst x0, #(1 << (L2_NSET + L2_I7SET)) // look for overflow + tst x0, x10 // look for overflow b.eq L_fpcd_l2dcacheline bic x0, x0, x10 // clear set overflow adds w0, w0, w11 // increment way b.cc L_fpcd_l2dcacheway // loop -#endif +L_fpcd_skipl2dcache: #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */ dsb sy ret @@ -344,15 +402,15 @@ LEXT(FlushPoU_Dcache) /* "Fully Coherent." */ #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */ mov x0, #0 - mov x9, #(1 << MMU_I7SET) - mov x10, #(1 << (MMU_NSET + MMU_I7SET)) - mov x11, #(1 << MMU_I7WAY) + GET_CACHE_CONFIG x0, x9, x10, x11 + dmb sy + mov x0, #0 L_fpud_way: L_fpud_line: dc cisw, x0 // clean invalidate dcache line by way/set add x0, x0, x9 // increment set index - tst x0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow + tst x0, x10 // look for overflow b.eq L_fpud_line bic x0, x0, x10 // clear set overflow adds w0, w0, w11 // increment way diff --git a/osfmk/arm64/copyio.c b/osfmk/arm64/copyio.c index af47b7201..c7181aa50 100644 --- a/osfmk/arm64/copyio.c +++ b/osfmk/arm64/copyio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * Copyright (c) 2012-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -50,8 +51,8 @@ extern int copyoutstr_prevalidate(const void *kaddr, user_addr_t uaddr, size_t l extern pmap_t kernel_pmap; -/* On by default, optionally disabled by boot-arg */ -extern boolean_t copyio_zalloc_check; +extern const vm_map_address_t physmap_base; +extern const vm_map_address_t physmap_end; /*! * @typedef copyio_flags_t @@ -148,9 +149,15 @@ copy_validate(const user_addr_t user_addr, uintptr_t kernel_addr, } if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) { - if (__improbable((kernel_addr < VM_MIN_KERNEL_ADDRESS) || - os_add_overflow(kernel_addr, nbytes, &kernel_addr_last) || - (kernel_addr_last > VM_MAX_KERNEL_ADDRESS))) { + if (__improbable(os_add_overflow(kernel_addr, nbytes, &kernel_addr_last))) { + panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__, + (void *)user_addr, (void *)kernel_addr, nbytes); + } + + bool in_kva = (kernel_addr >= VM_MIN_KERNEL_ADDRESS) && (kernel_addr_last <= VM_MAX_KERNEL_ADDRESS); + bool in_physmap = (kernel_addr >= physmap_base) && (kernel_addr_last <= physmap_end); + + if (__improbable(!(in_kva || in_physmap))) { panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__, (void *)user_addr, (void *)kernel_addr, nbytes); } @@ -168,9 +175,15 @@ copy_validate(const user_addr_t user_addr, uintptr_t kernel_addr, } if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) { - if (__probable(copyio_zalloc_check)) { - vm_size_t kernel_buf_size = zone_element_size((void *)kernel_addr, NULL); - if (__improbable(kernel_buf_size && kernel_buf_size < nbytes)) { + if (__probable(!zalloc_disable_copyio_check)) { + zone_t src_zone = NULL; + vm_size_t kernel_buf_size = zone_element_size((void *)kernel_addr, &src_zone); + /* + * Size of elements in the permanent zone is not saved as a part of the + * zone's info + */ + if (__improbable(src_zone && !src_zone->permanent && + kernel_buf_size < nbytes)) { panic("copyio_preflight: kernel buffer 0x%lx has size %lu < nbytes %lu", kernel_addr, kernel_buf_size, nbytes); } diff --git a/osfmk/arm64/corecrypto/arm64_isa_compatibility.h b/osfmk/arm64/corecrypto/arm64_isa_compatibility.h new file mode 100644 index 000000000..b9549da35 --- /dev/null +++ b/osfmk/arm64/corecrypto/arm64_isa_compatibility.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +// #include +#include + +#if defined(__clang__) && ((defined(__apple_build_version__) && __apple_build_version__ > 5010000)) +#define __USES_V_CRYPTO_INTRINSICS 1 +#else +#define __USES_V_CRYPTO_INTRINSICS 0 +#endif + + +// AES INSTRUCTIONS +// aese.16b v0, v1 +// aesd.16b v0, v1 +// aesmc.16b v0, v1 +// aesimc.16b v0, v1 + +// SHA1 INTRINSICS +// sha1su0.4s v0, v1, v2 +// sha1su1.4s v0, v1 +// sha1c.4s v0, v1, v2 // or q0, s1, v2.4s +// sha1m.4s v0, v1, v2 // or q0, s1, v2.4s +// sha1p.4s v0, v1, v2 // or q0, s1, v2.4s +// sha1h.4s v0, v1 // or s0, s1 + +// SHA256 INTRINSICS +// sha256su0.4s v0, v1 +// sha256su1.4s v0, v1, v2 +// sha256h.4s v0, v1, v2 // or q0, q1, v2.4s +// sha256h2.4s v0, v1, v2 // or q0, q1, v2.4s + + +#if __USES_V_CRYPTO_INTRINSICS == 1 +.macro AESE +aese.16b v$0, v$1 +.endm + +.macro AESD +aesd.16b v$0, v$1 +.endm + +.macro AESMC +aesmc.16b v$0, v$1 +.endm + +.macro AESIMC +aesimc.16b v$0, v$1 +.endm + + +#else + +.macro AESE +aese q$0, q$1 +.endm + +.macro AESD +aesd q$0, q$1 +.endm + +.macro AESMC +aesmc q$0, q$1 +.endm + +.macro AESIMC +aesimc q$0, q$1 +.endm + +#endif + +#if __USES_V_CRYPTO_INTRINSICS == 1 + +.macro SHA1SU0 +sha1su0 v$0.4s, v$1.4s, v$2.4s +.endm + +.macro SHA1SU1 +sha1su1 v$0.4s, v$1.4s +.endm + +.macro SHA1C +sha1c q$0, s$1, v$2.4s +.endm + +.macro SHA1M +sha1m q$0, s$1, v$2.4s +.endm + +.macro SHA1P +sha1p q$0, s$1, v$2.4s +.endm + +.macro SHA1H +sha1h s$0, s$1 +.endm + +.macro SHA256SU0 +sha256su0 v$0.4s, v$1.4s +.endm + +.macro SHA256SU1 +sha256su1 v$0.4s, v$1.4s, v$2.4s +.endm + +.macro SHA256H +sha256h q$0, q$1, v$2.4s +.endm + +.macro SHA256H2 +sha256h2 q$0, q$1, v$2.4s +.endm + +#else + +.macro SHA1SU0 +sha1su0 q$0, q$1, q$2 +.endm + +.macro SHA1SU1 +sha1su1 q$0, q$1 +.endm + +.macro SHA1C +sha1c q$0, q$1, q$2 +.endm + +.macro SHA1M +sha1m q$0, q$1, q$2 +.endm + +.macro SHA1P +sha1p q$0, q$1, q$2 +.endm + +.macro SHA1H +sha1h q$0, q$1 +.endm + +.macro SHA256SU0 +sha256su0 q$0, q$1 +.endm + +.macro SHA256SU1 +sha256su1 q$0, q$1, q$2 +.endm + +.macro SHA256H +sha256h q$0, q$1, q$2 +.endm + +.macro SHA256H2 +sha256h2 q$0, q$1, q$2 +.endm + +#endif diff --git a/osfmk/arm64/corecrypto/sha256_compress_arm64.s b/osfmk/arm64/corecrypto/sha256_compress_arm64.s new file mode 100644 index 000000000..98b71c075 --- /dev/null +++ b/osfmk/arm64/corecrypto/sha256_compress_arm64.s @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +/* + This file provides armv7+neon hand implementation of the following function + + void SHA256_Transform(SHA256_ctx *ctx, char *data, unsigned int num_blocks); + + which is a C function in sha2.c (from xnu). + + sha256 algorithm per block description: + + 1. W(0:15) = big-endian (per 4 bytes) loading of input data (64 byte) + 2. load 8 digests a-h from ctx->state + 3. for r = 0:15 + T1 = h + Sigma1(e) + Ch(e,f,g) + K[r] + W[r]; + d += T1; + h = T1 + Sigma0(a) + Maj(a,b,c) + permute a,b,c,d,e,f,g,h into h,a,b,c,d,e,f,g + 4. for r = 16:63 + W[r] = W[r-16] + sigma1(W[r-2]) + W[r-7] + sigma0(W[r-15]); + T1 = h + Sigma1(e) + Ch(e,f,g) + K[r] + W[r]; + d += T1; + h = T1 + Sigma0(a) + Maj(a,b,c) + permute a,b,c,d,e,f,g,h into h,a,b,c,d,e,f,g + + In the assembly implementation: + - a circular window of message schedule W(r:r+15) is updated and stored in q0-q3 + - its corresponding W+K(r:r+15) is updated and stored in a stack space circular buffer + - the 8 digests (a-h) will be stored in GPR or memory + + the implementation per block looks like + + ---------------------------------------------------------------------------- + + load W(0:15) (big-endian per 4 bytes) into q0:q3 + pre_calculate and store W+K(0:15) in stack + + load digests a-h from ctx->state; + + for (r=0;r<48;r+=4) { + digests a-h update and permute round r:r+3 + update W([r:r+3]%16) and WK([r:r+3]%16) for the next 4th iteration + } + + for (r=48;r<64;r+=4) { + digests a-h update and permute round r:r+3 + } + + ctx->states += digests a-h; + + ---------------------------------------------------------------------------- + + our implementation (allows multiple blocks per call) pipelines the loading of W/WK of a future block + into the last 16 rounds of its previous block: + + ---------------------------------------------------------------------------- + + load W(0:15) (big-endian per 4 bytes) into q0:q3 + pre_calculate and store W+K(0:15) in stack + +L_loop: + + load digests a-h from ctx->state; + + for (r=0;r<48;r+=4) { + digests a-h update and permute round r:r+3 + update W([r:r+3]%16) and WK([r:r+3]%16) for the next 4th iteration + } + + num_block--; + if (num_block==0) jmp L_last_block; + + for (r=48;r<64;r+=4) { + digests a-h update and permute round r:r+3 + load W([r:r+3]%16) (big-endian per 4 bytes) into q0:q3 + pre_calculate and store W+K([r:r+3]%16) in stack + } + + ctx->states += digests a-h; + + jmp L_loop; + +L_last_block: + + for (r=48;r<64;r+=4) { + digests a-h update and permute round r:r+3 + } + + ctx->states += digests a-h; + + ------------------------------------------------------------------------ + + Apple CoreOS vector & numerics +*/ + +#if defined(__arm64__) + +#include "arm64_isa_compatibility.h" + +.subsections_via_symbols + .text + + .p2align 4 + +K256: + .long 0x428a2f98 + .long 0x71374491 + .long 0xb5c0fbcf + .long 0xe9b5dba5 + .long 0x3956c25b + .long 0x59f111f1 + .long 0x923f82a4 + .long 0xab1c5ed5 + .long 0xd807aa98 + .long 0x12835b01 + .long 0x243185be + .long 0x550c7dc3 + .long 0x72be5d74 + .long 0x80deb1fe + .long 0x9bdc06a7 + .long 0xc19bf174 + .long 0xe49b69c1 + .long 0xefbe4786 + .long 0x0fc19dc6 + .long 0x240ca1cc + .long 0x2de92c6f + .long 0x4a7484aa + .long 0x5cb0a9dc + .long 0x76f988da + .long 0x983e5152 + .long 0xa831c66d + .long 0xb00327c8 + .long 0xbf597fc7 + .long 0xc6e00bf3 + .long 0xd5a79147 + .long 0x06ca6351 + .long 0x14292967 + .long 0x27b70a85 + .long 0x2e1b2138 + .long 0x4d2c6dfc + .long 0x53380d13 + .long 0x650a7354 + .long 0x766a0abb + .long 0x81c2c92e + .long 0x92722c85 + .long 0xa2bfe8a1 + .long 0xa81a664b + .long 0xc24b8b70 + .long 0xc76c51a3 + .long 0xd192e819 + .long 0xd6990624 + .long 0xf40e3585 + .long 0x106aa070 + .long 0x19a4c116 + .long 0x1e376c08 + .long 0x2748774c + .long 0x34b0bcb5 + .long 0x391c0cb3 + .long 0x4ed8aa4a + .long 0x5b9cca4f + .long 0x682e6ff3 + .long 0x748f82ee + .long 0x78a5636f + .long 0x84c87814 + .long 0x8cc70208 + .long 0x90befffa + .long 0xa4506ceb + .long 0xbef9a3f7 + .long 0xc67178f2 + + + .p2align 4 + + .globl _AccelerateCrypto_SHA256_compress +_AccelerateCrypto_SHA256_compress: + + + #define hashes x0 + #define numblocks x1 + #define data x2 + #define ktable x3 + +#ifdef __ILP32__ + uxtw numblocks, numblocks // in arm64_32 size_t is 32-bit, so we need to extend it +#endif + + + adrp ktable, K256@page + cbnz numblocks, 1f // if number of blocks is nonzero, go on for sha256 transform operation + ret lr // otherwise, return +1: + add ktable, ktable, K256@pageoff + +#if BUILDKERNEL + // save q0-q7, q16-q24 8+8+1=19 + sub x4, sp, #17*16 + sub sp, sp, #17*16 + st1.4s {v0, v1, v2, v3}, [x4], #64 + st1.4s {v4, v5, v6, v7}, [x4], #64 + st1.4s {v16, v17, v18, v19}, [x4], #64 + st1.4s {v20, v21, v22, v23}, [x4], #64 + st1.4s {v24}, [x4], #16 +#endif + + ld1.4s {v0,v1,v2,v3}, [data], #64 // w0,w1,w2,w3 need to bswap into big-endian + + rev32.16b v0, v0 // byte swap of 1st 4 ints + ldr q21, [ktable, #16*0] + rev32.16b v1, v1 // byte swap of 2nd 4 ints + ldr q16, [hashes, #0] + rev32.16b v2, v2 // byte swap of 3rd 4 ints + ldr q17, [hashes, #16] + rev32.16b v3, v3 // byte swap of 4th 4 ints + ldr q22, [ktable, #16*1] + + mov.16b v18, v16 + ldr q23, [ktable, #16*2] + add.4s v4, v0, v21 // 1st 4 input + K256 + ldr q24, [ktable, #16*3] + add.4s v5, v1, v22 // 2nd 4 input + K256 + mov.16b v19, v17 + add.4s v6, v2, v23 // 3rd 4 input + K256 + add.4s v7, v3, v24 // 4th 4 input + K256 + add ktable, ktable, #16*4 + + + .macro sha256_round + mov.16b v20, v18 + SHA256SU0 $0, $1 + SHA256H 18, 19, $4 + SHA256SU1 $0, $2, $3 + SHA256H2 19, 20, $4 + add.4s $6, $5, $7 + .endm + + // 4 vector hashes update and load next vector rounds + .macro sha256_hash_load_round + mov.16b v20, v18 + SHA256H 18, 19, $0 + rev32.16b $1, $1 + SHA256H2 19, 20, $0 + add.4s $2, $1, $3 + .endm + + .macro sha256_hash_round + mov.16b v20, v18 + SHA256H 18, 19, $0 + SHA256H2 19, 20, $0 + .endm + + // 12 vector hash and sequence update rounds + mov w4, #3 +L_i_loop: + mov.16b v20, v18 + ldr q21, [ktable, #0] // k0 + SHA256SU0 0, 1 + ldr q22, [ktable, #16] // k1 + SHA256H 18, 19, 4 + ldr q23, [ktable, #32] // k2 + SHA256SU1 0, 2, 3 + ldr q24, [ktable, #48] // k3 + SHA256H2 19, 20, 4 + add ktable, ktable, #64 + add.4s v4, v0, v21 + + sha256_round 1, 2, 3, 0, 5, v1, v5, v22 + sha256_round 2, 3, 0, 1, 6, v2, v6, v23 + subs w4, w4, #1 + sha256_round 3, 0, 1, 2, 7, v3, v7, v24 + b.gt L_i_loop + + subs numblocks, numblocks, #1 // pre-decrement num_blocks by 1 + b.le L_wrapup + + sub ktable, ktable, #256 + +L_loop: + + ldr q0, [data, #0] + mov.16b v20, v18 + ldr q21, [ktable,#0] + SHA256H 18, 19, 4 + ldr q1, [data, #16] + rev32.16b v0, v0 + ldr q2, [data, #32] + SHA256H2 19, 20, 4 + ldr q3, [data, #48] + add.4s v4, v0, v21 + + ldr q22, [ktable,#16] + mov.16b v20, v18 + add data, data, #64 + SHA256H 18, 19, 5 + ldr q23, [ktable,#32] + rev32.16b v1, v1 + ldr q24, [ktable,#48] + SHA256H2 19, 20, 5 + add.4s v5, v1, v22 + + sha256_hash_load_round 6, v2, v6, v23 + sha256_hash_load_round 7, v3, v7, v24 + + add.4s v18, v16, v18 + add.4s v19, v17, v19 + mov.16b v16, v18 + mov.16b v17, v19 + + // 12 vector hash and sequence update rounds + mov.16b v20, v18 + ldr q21, [ktable, #16*4] // k0 + SHA256SU0 0, 1 + ldr q22, [ktable, #16*5] // k1 + SHA256H 18, 19, 4 + ldr q23, [ktable, #16*6] // k2 + SHA256SU1 0, 2, 3 + ldr q24, [ktable, #16*7] // k3 + SHA256H2 19, 20, 4 + add.4s v4, v0, v21 + + sha256_round 1, 2, 3, 0, 5, v1, v5, v22 + sha256_round 2, 3, 0, 1, 6, v2, v6, v23 + sha256_round 3, 0, 1, 2, 7, v3, v7, v24 + mov.16b v20, v18 + ldr q21, [ktable, #16*8] // k0 + SHA256SU0 0, 1 + ldr q22, [ktable, #16*9] // k1 + SHA256H 18, 19, 4 + ldr q23, [ktable, #16*10] // k2 + SHA256SU1 0, 2, 3 + ldr q24, [ktable, #16*11] // k3 + SHA256H2 19, 20, 4 + add.4s v4, v0, v21 + + sha256_round 1, 2, 3, 0, 5, v1, v5, v22 + sha256_round 2, 3, 0, 1, 6, v2, v6, v23 + sha256_round 3, 0, 1, 2, 7, v3, v7, v24 + + mov.16b v20, v18 + ldr q21, [ktable, #16*12] // k0 + SHA256SU0 0, 1 + ldr q22, [ktable, #16*13] // k1 + SHA256H 18, 19, 4 + ldr q23, [ktable, #16*14] // k2 + SHA256SU1 0, 2, 3 + ldr q24, [ktable, #16*15] // k3 + SHA256H2 19, 20, 4 + add.4s v4, v0, v21 + + sha256_round 1, 2, 3, 0, 5, v1, v5, v22 + sha256_round 2, 3, 0, 1, 6, v2, v6, v23 + sha256_round 3, 0, 1, 2, 7, v3, v7, v24 + + subs numblocks, numblocks, #1 // pre-decrement num_blocks by 1 + b.gt L_loop + +L_wrapup: + + sha256_hash_round 4 + sha256_hash_round 5 + sha256_hash_round 6 + sha256_hash_round 7 + + add.4s v16, v16, v18 + add.4s v17, v17, v19 + st1.4s {v16,v17}, [hashes] // hashes q16 : d,c,b,a q17 : h,g,f,e + +#if BUILDKERNEL + // restore q9-q13, q0-q7, q16-q31 + ld1.4s {v0, v1, v2, v3}, [sp], #64 + ld1.4s {v4, v5, v6, v7}, [sp], #64 + ld1.4s {v16, v17, v18, v19}, [sp], #64 + ld1.4s {v20, v21, v22, v23}, [sp], #64 + ld1.4s {v24}, [sp], #16 +#endif + + ret lr + + +#endif // arm64 + diff --git a/osfmk/arm64/cpu.c b/osfmk/arm64/cpu.c index 1ab9c9f1d..fc65933e5 100644 --- a/osfmk/arm64/cpu.c +++ b/osfmk/arm64/cpu.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -67,6 +68,14 @@ #include #endif /* MONOTONIC */ +#if HIBERNATION +#include +#include +#endif /* HIBERNATION */ + + +#include + extern boolean_t idle_enable; extern uint64_t wake_abstime; @@ -82,7 +91,7 @@ extern uintptr_t start_cpu; extern void exc_vectors_table; #endif /* __ARM_KERNEL_PROTECT__ */ -extern void __attribute__((noreturn)) arm64_prepare_for_sleep(void); +extern void __attribute__((noreturn)) arm64_prepare_for_sleep(boolean_t deep_sleep); extern void arm64_force_wfi_clock_gate(void); #if defined(APPLETYPHOON) // @@ -118,8 +127,14 @@ static int wfi_flags = 0; static uint64_t wfi_delay = 0; #endif /* DEVELOPMENT || DEBUG */ - -static bool idle_wfe_to_deadline = false; +#if DEVELOPMENT || DEBUG +static bool idle_proximate_timer_wfe = true; +static bool idle_proximate_io_wfe = true; +#define CPUPM_IDLE_WFE 0x5310300 +#else +static const bool idle_proximate_timer_wfe = true; +static const bool idle_proximate_io_wfe = true; +#endif #if __ARM_GLOBAL_SLEEP_BIT__ volatile boolean_t arm64_stall_sleep = TRUE; @@ -184,7 +199,8 @@ arm64_ipi_test() return; } - for (unsigned int i = 0; i < MAX_CPUS; ++i) { + const unsigned int max_cpu_id = ml_get_max_cpu_number(); + for (unsigned int i = 0; i <= max_cpu_id; ++i) { ipi_test_data = &arm64_ipi_test_data[i]; immediate_ipi_test_data = &arm64_ipi_test_data[i + MAX_CPUS]; *ipi_test_data = ~i; @@ -223,25 +239,19 @@ arm64_ipi_test() static void configure_coresight_registers(cpu_data_t *cdp) { - uint64_t addr; int i; assert(cdp); + vm_offset_t coresight_regs = ml_get_topology_info()->cpus[cdp->cpu_number].coresight_regs; /* * ARMv8 coresight registers are optional. If the device tree did not - * provide cpu_regmap_paddr, assume that coresight registers are not - * supported. + * provide either cpu_regmap_paddr (from the legacy "reg-private" EDT property) + * or coresight_regs (from the new "coresight-reg" property), assume that + * coresight registers are not supported. */ - if (cdp->cpu_regmap_paddr) { + if (cdp->cpu_regmap_paddr || coresight_regs) { for (i = 0; i < CORESIGHT_REGIONS; ++i) { - /* Skip CTI; these registers are debug-only (they are - * not present on production hardware), and there is - * at least one known Cyclone errata involving CTI - * (rdar://12802966). We have no known clients that - * need the kernel to unlock CTI, so it is safer - * to avoid doing the access. - */ if (i == CORESIGHT_CTI) { continue; } @@ -251,8 +261,12 @@ configure_coresight_registers(cpu_data_t *cdp) } if (!cdp->coresight_base[i]) { - addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i); - cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE); + if (coresight_regs) { + cdp->coresight_base[i] = coresight_regs + CORESIGHT_OFFSET(i); + } else { + uint64_t addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i); + cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE); + } /* * At this point, failing to io map the @@ -303,14 +317,21 @@ cpu_sleep(void) CleanPoC_Dcache(); - /* This calls: - * - * IOCPURunPlatformQuiesceActions when sleeping the boot cpu - * ml_arm_sleep() on all CPUs - * - * It does not return. - */ +#if USE_APPLEARMSMP + if (ml_is_quiescing()) { + PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id); + } else { + bool deep_sleep = PE_cpu_down(cpu_data_ptr->cpu_id); + cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH; + // hang CPU on spurious wakeup + cpu_data_ptr->cpu_reset_handler = (uintptr_t)0; + __builtin_arm_dsb(DSB_ISH); + CleanPoU_Dcache(); + arm64_prepare_for_sleep(deep_sleep); + } +#else PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id); +#endif /*NOTREACHED*/ } @@ -320,7 +341,7 @@ cpu_sleep(void) * is implemented, this returns 0 if there are no * interrupts pending, so it can be used as a boolean test. */ -static int +int cpu_interrupt_is_pending(void) { uint64_t isr_value; @@ -328,6 +349,46 @@ cpu_interrupt_is_pending(void) return (int)isr_value; } +static bool +cpu_proximate_timer(void) +{ + return !SetIdlePop(); +} + +static bool +wfe_to_deadline_or_interrupt(uint32_t cid, uint64_t wfe_deadline, __unused cpu_data_t *cdp) +{ + bool ipending = false; + while ((ipending = (cpu_interrupt_is_pending() != 0)) == false) { + /* Assumes event stream enablement + * TODO: evaluate temporarily stretching the per-CPU event + * interval to a larger value for possible efficiency + * improvements. + */ + __builtin_arm_wfe(); +#if DEVELOPMENT || DEBUG + cdp->wfe_count++; +#endif + if (wfe_deadline != ~0ULL) { +#if DEVELOPMENT || DEBUG + cdp->wfe_deadline_checks++; +#endif + /* Check if the WFE recommendation has expired. + * We do not recompute the deadline here. + */ + if ((ml_cluster_wfe_timeout(cid) == 0) || + mach_absolute_time() >= wfe_deadline) { +#if DEVELOPMENT || DEBUG + cdp->wfe_terminations++; +#endif + break; + } + } + } + /* TODO: worth refreshing pending interrupt status? */ + return ipending; +} + /* * Routine: cpu_idle * Function: @@ -337,38 +398,71 @@ cpu_idle(void) { cpu_data_t *cpu_data_ptr = getCpuDatap(); uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop; + bool idle_disallowed = false; - if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) { + if (__improbable((!idle_enable))) { + idle_disallowed = true; + } else if (__improbable(cpu_data_ptr->cpu_signal & SIGPdisabled)) { + idle_disallowed = true; + } + + if (__improbable(idle_disallowed)) { Idle_load_context(); } - if (!SetIdlePop()) { - /* If a deadline is pending, wait for it to elapse. */ - if (idle_wfe_to_deadline) { - if (arm64_wfe_allowed()) { - while (!cpu_interrupt_is_pending()) { - __builtin_arm_wfe(); - } + bool ipending = false; + uint32_t cid = ~0U; + + if (__probable(idle_proximate_io_wfe == true)) { + uint64_t wfe_deadline = 0; + /* Check for an active perf. controller generated + * WFE recommendation for this cluster. + */ + cid = cpu_data_ptr->cpu_cluster_id; + uint64_t wfe_ttd = 0; + if ((wfe_ttd = ml_cluster_wfe_timeout(cid)) != 0) { + wfe_deadline = mach_absolute_time() + wfe_ttd; + } + + if (wfe_deadline != 0) { + /* Poll issuing event-bounded WFEs until an interrupt + * arrives or the WFE recommendation expires + */ + ipending = wfe_to_deadline_or_interrupt(cid, wfe_deadline, cpu_data_ptr); +#if DEVELOPMENT || DEBUG + KDBG(CPUPM_IDLE_WFE, ipending, cpu_data_ptr->wfe_count, wfe_deadline, 0); +#endif + if (ipending == true) { + /* Back to machine_idle() */ + Idle_load_context(); } } + } + if (__improbable(cpu_proximate_timer())) { + if (idle_proximate_timer_wfe == true) { + /* Poll issuing WFEs until the expected + * timer FIQ arrives. + */ + ipending = wfe_to_deadline_or_interrupt(cid, ~0ULL, cpu_data_ptr); + assert(ipending == true); + } Idle_load_context(); } lastPop = cpu_data_ptr->rtcPop; - pmap_switch_user_ttb(kernel_pmap); cpu_data_ptr->cpu_active_thread = current_thread(); if (cpu_data_ptr->cpu_user_debug) { arm_debug_set(NULL); } cpu_data_ptr->cpu_user_debug = NULL; - if (cpu_data_ptr->cpu_idle_notify) { - ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks); + if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) { + cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks); } - if (cpu_data_ptr->idle_timer_notify != 0) { + if (cpu_data_ptr->idle_timer_notify != NULL) { if (new_idle_timeout_ticks == 0x0ULL) { /* turn off the idle timer */ cpu_data_ptr->idle_timer_deadline = 0x0ULL; @@ -390,7 +484,9 @@ cpu_idle(void) #endif /* MONOTONIC */ if (wfi) { +#if !defined(APPLE_ARM64_ARCH_FAMILY) platform_cache_idle_enter(); +#endif #if DEVELOPMENT || DEBUG // When simulating wfi overhead, @@ -439,8 +535,9 @@ cpu_idle(void) clock_delay_until(deadline); } #endif /* DEVELOPMENT || DEBUG */ - +#if !defined(APPLE_ARM64_ARCH_FAMILY) platform_cache_idle_exit(); +#endif } ClearIdlePop(TRUE); @@ -473,13 +570,11 @@ cpu_idle_exit(boolean_t from_reset) mt_cpu_run(cpu_data_ptr); #endif /* MONOTONIC */ - pmap_switch_user_ttb(cpu_data_ptr->cpu_active_thread->map->pmap); - - if (cpu_data_ptr->cpu_idle_notify) { - ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks); + if (wfi && (cpu_data_ptr->cpu_idle_notify != NULL)) { + cpu_data_ptr->cpu_idle_notify(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks); } - if (cpu_data_ptr->idle_timer_notify != 0) { + if (cpu_data_ptr->idle_timer_notify != NULL) { if (new_idle_timeout_ticks == 0x0ULL) { /* turn off the idle timer */ cpu_data_ptr->idle_timer_deadline = 0x0ULL; @@ -539,7 +634,6 @@ cpu_init(void) } cdp->cpu_stat.irq_ex_cnt_wake = 0; cdp->cpu_stat.ipi_cnt_wake = 0; - cdp->cpu_stat.timer_cnt_wake = 0; #if MONOTONIC cdp->cpu_stat.pmi_cnt_wake = 0; #endif /* MONOTONIC */ @@ -592,15 +686,15 @@ cpu_data_free(cpu_data_t *cpu_data_ptr) return; } - cpu_processor_free( cpu_data_ptr->cpu_processor); - if (CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_vaddr == cpu_data_ptr) { - CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_vaddr = NULL; - CpuDataEntries[cpu_data_ptr->cpu_number].cpu_data_paddr = 0; + int cpu_number = cpu_data_ptr->cpu_number; + + if (CpuDataEntries[cpu_number].cpu_data_vaddr == cpu_data_ptr) { + CpuDataEntries[cpu_number].cpu_data_vaddr = NULL; + CpuDataEntries[cpu_number].cpu_data_paddr = 0; __builtin_arm_dmb(DMB_ISH); // Ensure prior stores to cpu array are visible } (kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE); (kfree)((void *)(cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE), EXCEPSTACK_SIZE); - kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t)); } void @@ -609,17 +703,16 @@ cpu_data_init(cpu_data_t *cpu_data_ptr) uint32_t i; cpu_data_ptr->cpu_flags = 0; - cpu_data_ptr->interrupts_enabled = 0; cpu_data_ptr->cpu_int_state = 0; cpu_data_ptr->cpu_pending_ast = AST_NONE; - cpu_data_ptr->cpu_cache_dispatch = (void *) 0; + cpu_data_ptr->cpu_cache_dispatch = NULL; cpu_data_ptr->rtcPop = EndOfAllTime; cpu_data_ptr->rtclock_datap = &RTClockData; cpu_data_ptr->cpu_user_debug = NULL; cpu_data_ptr->cpu_base_timebase = 0; - cpu_data_ptr->cpu_idle_notify = (void *) 0; + cpu_data_ptr->cpu_idle_notify = NULL; cpu_data_ptr->cpu_idle_latency = 0x0ULL; cpu_data_ptr->cpu_idle_pop = 0x0ULL; cpu_data_ptr->cpu_reset_type = 0x0UL; @@ -658,10 +751,11 @@ cpu_data_init(cpu_data_t *cpu_data_ptr) pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL; pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM; + pmap_cpu_data_ptr->pv_free.list = NULL; + pmap_cpu_data_ptr->pv_free.count = 0; + pmap_cpu_data_ptr->pv_free_tail = NULL; - for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) { - pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0; - } + bzero(&(pmap_cpu_data_ptr->cpu_sw_asids[0]), sizeof(pmap_cpu_data_ptr->cpu_sw_asids)); #endif cpu_data_ptr->halt_status = CPU_NOT_HALTED; #if __ARM_KERNEL_PROTECT__ @@ -670,7 +764,9 @@ cpu_data_init(cpu_data_t *cpu_data_ptr) #if defined(HAS_APPLE_PAC) cpu_data_ptr->rop_key = 0; + cpu_data_ptr->jop_key = ml_default_jop_pid(); #endif + } kern_return_t @@ -691,18 +787,21 @@ cpu_data_register(cpu_data_t *cpu_data_ptr) } #if defined(KERNEL_INTEGRITY_CTRR) +/* Hibernation needs to reset this state, so data and text are in the hib segment; + * this allows them be accessed and executed early. + */ +LCK_GRP_DECLARE(ctrr_cpu_start_lock_grp, "ctrr_cpu_start_lock"); +LCK_SPIN_DECLARE(ctrr_cpu_start_lck, &ctrr_cpu_start_lock_grp); +enum ctrr_cluster_states ctrr_cluster_locked[MAX_CPU_CLUSTERS] MARK_AS_HIBERNATE_DATA; -lck_spin_t ctrr_cpu_start_lck; -bool ctrr_cluster_locked[__ARM_CLUSTER_COUNT__]; - +MARK_AS_HIBERNATE_TEXT void -init_ctrr_cpu_start_lock(void) +init_ctrr_cluster_states(void) { - lck_grp_t *ctrr_cpu_start_lock_grp = lck_grp_alloc_init("ctrr_cpu_start_lock", 0); - assert(ctrr_cpu_start_lock_grp); - lck_spin_init(&ctrr_cpu_start_lck, ctrr_cpu_start_lock_grp, NULL); + for (int i = 0; i < MAX_CPU_CLUSTERS; i++) { + ctrr_cluster_locked[i] = CTRR_UNLOCKED; + } } - #endif kern_return_t @@ -717,6 +816,7 @@ cpu_start(int cpu) configure_coresight_registers(cpu_data_ptr); } else { thread_t first_thread; + processor_t processor; cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr; @@ -724,32 +824,40 @@ cpu_start(int cpu) cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL; #endif - if (cpu_data_ptr->cpu_processor->startup_thread != THREAD_NULL) { - first_thread = cpu_data_ptr->cpu_processor->startup_thread; + processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpu_data_ptr); + if (processor->startup_thread != THREAD_NULL) { + first_thread = processor->startup_thread; } else { - first_thread = cpu_data_ptr->cpu_processor->idle_thread; + first_thread = processor->idle_thread; } cpu_data_ptr->cpu_active_thread = first_thread; first_thread->machine.CpuDatap = cpu_data_ptr; + first_thread->machine.pcpu_data_base = + (vm_address_t)cpu_data_ptr - __PERCPU_ADDR(cpu_data); configure_coresight_registers(cpu_data_ptr); flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE); flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE); #if defined(KERNEL_INTEGRITY_CTRR) - /* first time CPU starts, if not cluster master, and if cluster is not already locked, - * block until cluster becomes locked. */ - if (cpu_data_ptr->cpu_processor->active_thread == THREAD_NULL - && !cpu_data_ptr->cluster_master) { - lck_spin_lock(&ctrr_cpu_start_lck); - if (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] == 0) { - assert_wait(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id], THREAD_UNINT); - lck_spin_unlock(&ctrr_cpu_start_lck); - thread_block(THREAD_CONTINUE_NULL); - assert(ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] == 1); - } else { - lck_spin_unlock(&ctrr_cpu_start_lck); - } + + /* First CPU being started within a cluster goes ahead to lock CTRR for cluster; + * other CPUs block until cluster is locked. */ + lck_spin_lock(&ctrr_cpu_start_lck); + switch (ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id]) { + case CTRR_UNLOCKED: + ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] = CTRR_LOCKING; + lck_spin_unlock(&ctrr_cpu_start_lck); + break; + case CTRR_LOCKING: + assert_wait(&ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id], THREAD_UNINT); + lck_spin_unlock(&ctrr_cpu_start_lck); + thread_block(THREAD_CONTINUE_NULL); + assert(ctrr_cluster_locked[cpu_data_ptr->cpu_cluster_id] != CTRR_LOCKING); + break; + default: // CTRR_LOCKED + lck_spin_unlock(&ctrr_cpu_start_lck); + break; } #endif (void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL); @@ -868,11 +976,23 @@ ml_arm_sleep(void) *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0; } +#if HIBERNATION + uint32_t mode = hibernate_write_image(); + if (mode == kIOHibernatePostWriteHalt) { + HIBLOG("powering off after writing hibernation image\n"); + int halt_result = -1; + if (PE_halt_restart) { + halt_result = (*PE_halt_restart)(kPEHaltCPU); + } + panic("can't shutdown: PE_halt_restart returned %d", halt_result); + } +#endif /* HIBERNATION */ + #if MONOTONIC mt_sleep(); #endif /* MONOTONIC */ /* ARM64-specific preparation */ - arm64_prepare_for_sleep(); + arm64_prepare_for_sleep(true); } else { #if __ARM_GLOBAL_SLEEP_BIT__ /* @@ -902,7 +1022,7 @@ ml_arm_sleep(void) } /* ARM64-specific preparation */ - arm64_prepare_for_sleep(); + arm64_prepare_for_sleep(true); } } @@ -913,11 +1033,12 @@ cpu_machine_idle_init(boolean_t from_boot) cpu_data_t *cpu_data_ptr = getCpuDatap(); if (from_boot) { - unsigned long jtag = 0; int wfi_tmp = 1; uint32_t production = 1; DTEntry entry; + unsigned long jtag = 0; + if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) { if (jtag != 0) { idle_enable = FALSE; @@ -928,6 +1049,13 @@ cpu_machine_idle_init(boolean_t from_boot) idle_enable = TRUE; } +#if DEVELOPMENT || DEBUG + uint32_t wfe_mode = 0; + if (PE_parse_boot_argn("wfe_mode", &wfe_mode, sizeof(wfe_mode))) { + idle_proximate_timer_wfe = ((wfe_mode & 1) == 1); + idle_proximate_io_wfe = ((wfe_mode & 2) == 2); + } +#endif PE_parse_boot_argn("wfi", &wfi_tmp, sizeof(wfi_tmp)); // bits 7..0 give the wfi type @@ -955,8 +1083,6 @@ cpu_machine_idle_init(boolean_t from_boot) break; } - PE_parse_boot_argn("idle_wfe_to_deadline", &idle_wfe_to_deadline, sizeof(idle_wfe_to_deadline)); - ResetHandlerData.assist_reset_handler = 0; ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries); @@ -967,11 +1093,11 @@ cpu_machine_idle_init(boolean_t from_boot) #endif /* MONITOR */ // Determine if we are on production or debug chip - if (kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) { + if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) { unsigned int size; - void *prop; + void const *prop; - if (kSuccess == DTGetProperty(entry, "effective-production-status-ap", &prop, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "effective-production-status-ap", &prop, &size)) { if (size == 4) { bcopy(prop, &production, size); } @@ -1027,20 +1153,20 @@ sleep_token_buffer_init(void) cpu_data_t *cpu_data_ptr = getCpuDatap(); DTEntry entry; size_t size; - void **prop; + void const * const *prop; if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) { /* Find the stpage node in the device tree */ - if (kSuccess != DTLookupEntry(0, "stram", &entry)) { + if (kSuccess != SecureDTLookupEntry(0, "stram", &entry)) { return; } - if (kSuccess != DTGetProperty(entry, "reg", (void **)&prop, (unsigned int *)&size)) { + if (kSuccess != SecureDTGetProperty(entry, "reg", (const void **)&prop, (unsigned int *)&size)) { return; } /* Map the page into the kernel space */ - sleepTokenBuffer = ml_io_map(((vm_offset_t *)prop)[0], ((vm_size_t *)prop)[1]); + sleepTokenBuffer = ml_io_map(((vm_offset_t const *)prop)[0], ((vm_size_t const *)prop)[1]); } } #endif diff --git a/osfmk/arm64/cswitch.s b/osfmk/arm64/cswitch.s index d691acc1a..05c38a36d 100644 --- a/osfmk/arm64/cswitch.s +++ b/osfmk/arm64/cswitch.s @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "assym.s" @@ -45,16 +46,16 @@ * A subroutine invocation must preserve the contents of the registers r19-r29 * and SP. We also save IP0 and IP1, as machine_idle uses IP0 for saving the LR. */ - stp x16, x17, [$0, SS64_X16] - stp x19, x20, [$0, SS64_X19] - stp x21, x22, [$0, SS64_X21] - stp x23, x24, [$0, SS64_X23] - stp x25, x26, [$0, SS64_X25] - stp x27, x28, [$0, SS64_X27] - stp fp, lr, [$0, SS64_FP] - str xzr, [$0, SS64_PC] + stp x16, x17, [$0, SS64_KERNEL_X16] + stp x19, x20, [$0, SS64_KERNEL_X19] + stp x21, x22, [$0, SS64_KERNEL_X21] + stp x23, x24, [$0, SS64_KERNEL_X23] + stp x25, x26, [$0, SS64_KERNEL_X25] + stp x27, x28, [$0, SS64_KERNEL_X27] + stp fp, lr, [$0, SS64_KERNEL_FP] + str xzr, [$0, SS64_KERNEL_PC] MOV32 w$1, PSR64_KERNEL_POISON - str w$1, [$0, SS64_CPSR] + str w$1, [$0, SS64_KERNEL_CPSR] #ifdef HAS_APPLE_PAC stp x0, x1, [sp, #-16]! stp x2, x3, [sp, #-16]! @@ -72,15 +73,15 @@ mov x3, lr mov x4, x16 mov x5, x17 - bl EXT(ml_sign_thread_state) + bl EXT(ml_sign_kernel_thread_state) ldp x4, x5, [sp], #16 ldp x2, x3, [sp], #16 ldp x0, x1, [sp], #16 - ldp fp, lr, [$0, SS64_FP] + ldp fp, lr, [$0, SS64_KERNEL_FP] #endif /* defined(HAS_APPLE_PAC) */ mov x$1, sp - str x$1, [$0, SS64_SP] + str x$1, [$0, SS64_KERNEL_SP] /* AAPCS-64 Page 14 * @@ -88,14 +89,17 @@ * calls; the remaining registers (v0-v7, v16-v31) do not need to be preserved * (or should be preserved by the caller). */ - str d8, [$0, NS64_D8] - str d9, [$0, NS64_D9] - str d10,[$0, NS64_D10] - str d11,[$0, NS64_D11] - str d12,[$0, NS64_D12] - str d13,[$0, NS64_D13] - str d14,[$0, NS64_D14] - str d15,[$0, NS64_D15] + str d8, [$0, NS64_KERNEL_D8] + str d9, [$0, NS64_KERNEL_D9] + str d10,[$0, NS64_KERNEL_D10] + str d11,[$0, NS64_KERNEL_D11] + str d12,[$0, NS64_KERNEL_D12] + str d13,[$0, NS64_KERNEL_D13] + str d14,[$0, NS64_KERNEL_D14] + str d15,[$0, NS64_KERNEL_D15] + + mrs x$1, FPCR + str w$1, [$0, NS64_KERNEL_FPCR] .endmacro /* @@ -111,31 +115,36 @@ mov x22, x2 mov x0, $0 - AUTH_THREAD_STATE_IN_X0 x23, x24, x25, x26, x27 + AUTH_KERNEL_THREAD_STATE_IN_X0 x23, x24, x25, x26, x27 mov x0, x20 mov x1, x21 mov x2, x22 + ldr w$1, [$0, NS64_KERNEL_FPCR] + mrs x19, FPCR + CMSR FPCR, x19, x$1, 1 +1: + // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0 - ldp x19, x20, [$0, SS64_X19] - ldp x21, x22, [$0, SS64_X21] - ldp x23, x24, [$0, SS64_X23] - ldp x25, x26, [$0, SS64_X25] - ldp x27, x28, [$0, SS64_X27] - ldr fp, [$0, SS64_FP] + ldp x19, x20, [$0, SS64_KERNEL_X19] + ldp x21, x22, [$0, SS64_KERNEL_X21] + ldp x23, x24, [$0, SS64_KERNEL_X23] + ldp x25, x26, [$0, SS64_KERNEL_X25] + ldp x27, x28, [$0, SS64_KERNEL_X27] + ldr fp, [$0, SS64_KERNEL_FP] // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0 - ldr $1, [$0, SS64_SP] - mov sp, $1 - - ldr d8, [$0, NS64_D8] - ldr d9, [$0, NS64_D9] - ldr d10,[$0, NS64_D10] - ldr d11,[$0, NS64_D11] - ldr d12,[$0, NS64_D12] - ldr d13,[$0, NS64_D13] - ldr d14,[$0, NS64_D14] - ldr d15,[$0, NS64_D15] + ldr x$1, [$0, SS64_KERNEL_SP] + mov sp, x$1 + + ldr d8, [$0, NS64_KERNEL_D8] + ldr d9, [$0, NS64_KERNEL_D9] + ldr d10,[$0, NS64_KERNEL_D10] + ldr d11,[$0, NS64_KERNEL_D11] + ldr d12,[$0, NS64_KERNEL_D12] + ldr d13,[$0, NS64_KERNEL_D13] + ldr d14,[$0, NS64_KERNEL_D14] + ldr d15,[$0, NS64_KERNEL_D15] .endmacro @@ -157,40 +166,69 @@ orr $2, $1, $2 // Save new cthread/cpu to TPIDRRO_EL0 msr TPIDRRO_EL0, $2 msr TPIDR_EL0, xzr - /* ARM64_TODO Reserve x18 until we decide what to do with it */ - mov x18, $1 // ... and trash reserved x18 +#if DEBUG || DEVELOPMENT + ldr $1, [$0, TH_THREAD_ID] // Save the bottom 32-bits of the thread ID into + msr CONTEXTIDR_EL1, $1 // CONTEXTIDR_EL1 (top 32-bits are RES0). +#endif /* DEBUG || DEVELOPMENT */ .endmacro -#if defined(HAS_APPLE_PAC) /* - * set_process_dependent_keys + * set_process_dependent_keys_and_sync_context * - * Updates process dependent keys during context switch if necessary + * Updates process dependent keys and issues explicit context sync during context switch if necessary * Per CPU Data rop_key is initialized in arm_init() for bootstrap processor * and in cpu_data_init for slave processors * - * arg0 - New thread pointer/Current CPU key - * arg1 - Scratch register: New Thread Key - * arg2 - Scratch register: Current CPU Data pointer + * thread - New thread pointer + * new_key - Scratch register: New Thread Key + * tmp_key - Scratch register: Current CPU Key + * cpudatap - Scratch register: Current CPU Data pointer + * wsync - Half-width scratch register: CPU sync required flag + * + * to save on ISBs, for ARMv8.5 we use the CPU_SYNC_ON_CSWITCH field, cached in wsync, for pre-ARMv8.5, + * we just use wsync to keep track of needing an ISB */ -.macro set_process_dependent_keys - ldr $1, [$0, TH_ROP_PID] - ldr $2, [$0, ACT_CPUDATAP] - ldr $0, [$2, CPU_ROP_KEY] - cmp $0, $1 +.macro set_process_dependent_keys_and_sync_context thread, new_key, tmp_key, cpudatap, wsync + + +#if defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC) + ldr \cpudatap, [\thread, ACT_CPUDATAP] +#endif /* defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC) */ + + mov \wsync, #0 + + +#if defined(HAS_APPLE_PAC) + ldr \new_key, [\thread, TH_ROP_PID] + ldr \tmp_key, [\cpudatap, CPU_ROP_KEY] + cmp \new_key, \tmp_key b.eq 1f - str $1, [$2, CPU_ROP_KEY] - msr APIBKeyLo_EL1, $1 - add $1, $1, #1 - msr APIBKeyHi_EL1, $1 - add $1, $1, #1 - msr APDBKeyLo_EL1, $1 - add $1, $1, #1 - msr APDBKeyHi_EL1, $1 + str \new_key, [\cpudatap, CPU_ROP_KEY] + msr APIBKeyLo_EL1, \new_key + add \new_key, \new_key, #1 + msr APIBKeyHi_EL1, \new_key + add \new_key, \new_key, #1 + msr APDBKeyLo_EL1, \new_key + add \new_key, \new_key, #1 + msr APDBKeyHi_EL1, \new_key + mov \wsync, #1 +1: + +#if HAS_PAC_FAST_A_KEY_SWITCHING + IF_PAC_SLOW_A_KEY_SWITCHING Lskip_jop_keys_\@, \new_key + ldr \new_key, [\thread, TH_JOP_PID] + REPROGRAM_JOP_KEYS Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key + mov \wsync, #1 +Lskip_jop_keys_\@: +#endif /* HAS_PAC_FAST_A_KEY_SWITCHING */ + +#endif /* defined(HAS_APPLE_PAC) */ + + cbz \wsync, 1f isb sy + 1: .endmacro -#endif /* defined(HAS_APPLE_PAC) */ /* * void machine_load_context(thread_t thread) @@ -205,10 +243,8 @@ LEXT(machine_load_context) set_thread_registers x0, x1, x2 ldr x1, [x0, TH_KSTACKPTR] // Get top of kernel stack - load_general_registers x1, x2 -#ifdef HAS_APPLE_PAC - set_process_dependent_keys x0, x1, x2 -#endif + load_general_registers x1, 2 + set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4 mov x0, #0 // Clear argument to thread_continue ret @@ -232,23 +268,22 @@ LEXT(Call_continuation) mov sp, x5 // Set stack pointer mov fp, #0 // Clear the frame pointer -#if defined(HAS_APPLE_PAC) - set_process_dependent_keys x4, x5, x6 -#endif + set_process_dependent_keys_and_sync_context x4, x5, x6, x7, w20 - mov x20, x0 //continuation - mov x21, x1 //continuation parameter - mov x22, x2 //wait result + mov x20, x0 //continuation + mov x21, x1 //continuation parameter + mov x22, x2 //wait result - cbz x3, 1f - mov x0, #1 - bl EXT(ml_set_interrupts_enabled) + cbz x3, 1f + mov x0, #1 + bl EXT(ml_set_interrupts_enabled) 1: mov x0, x21 // Set the first parameter mov x1, x22 // Set the wait result arg #ifdef HAS_APPLE_PAC - blraaz x20 // Branch to the continuation + mov x21, THREAD_CONTINUE_T_DISC + blraa x20, x21 // Branch to the continuation #else blr x20 // Branch to the continuation #endif @@ -272,10 +307,8 @@ LEXT(Switch_context) Lswitch_threads: set_thread_registers x2, x3, x4 ldr x3, [x2, TH_KSTACKPTR] - load_general_registers x3, x4 -#if defined(HAS_APPLE_PAC) - set_process_dependent_keys x2, x3, x4 -#endif + load_general_registers x3, 4 + set_process_dependent_keys_and_sync_context x2, x3, x4, x5, w6 ret /* @@ -324,10 +357,8 @@ LEXT(Idle_context) LEXT(Idle_load_context) mrs x0, TPIDR_EL1 // Get thread pointer ldr x1, [x0, TH_KSTACKPTR] // Get the top of the kernel stack - load_general_registers x1, x2 -#ifdef HAS_APPLE_PAC - set_process_dependent_keys x0, x1, x2 -#endif + load_general_registers x1, 2 + set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4 ret .align 2 @@ -337,3 +368,4 @@ LEXT(machine_set_current_thread) ret +/* vim: set ts=4: */ diff --git a/osfmk/arm64/dwarf_unwind.h b/osfmk/arm64/dwarf_unwind.h new file mode 100644 index 000000000..5abcd368c --- /dev/null +++ b/osfmk/arm64/dwarf_unwind.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + + +#ifndef _ARM64_DWARF_UNWIND_H_ +#define _ARM64_DWARF_UNWIND_H_ + +/* + * This file contains the architecture specific DWARF definitions needed for unwind + * information added to trap handlers. + */ + +/* DWARF Register numbers for ARM64 registers contained in the saved state */ + +#define DWARF_ARM64_X0 0 +#define DWARF_ARM64_X1 1 +#define DWARF_ARM64_X2 2 +#define DWARF_ARM64_X3 3 +#define DWARF_ARM64_X4 4 +#define DWARF_ARM64_X5 5 +#define DWARF_ARM64_X6 6 +#define DWARF_ARM64_X7 7 +#define DWARF_ARM64_X8 8 +#define DWARF_ARM64_X9 9 +#define DWARF_ARM64_X10 10 +#define DWARF_ARM64_X11 11 +#define DWARF_ARM64_X12 12 +#define DWARF_ARM64_X13 13 +#define DWARF_ARM64_X14 14 +#define DWARF_ARM64_X15 15 +#define DWARF_ARM64_X16 16 +#define DWARF_ARM64_X17 17 +#define DWARF_ARM64_X18 18 +#define DWARF_ARM64_X19 19 +#define DWARF_ARM64_X20 20 +#define DWARF_ARM64_X21 21 +#define DWARF_ARM64_X22 22 +#define DWARF_ARM64_X23 23 +#define DWARF_ARM64_X24 24 +#define DWARF_ARM64_X25 25 +#define DWARF_ARM64_X26 26 +#define DWARF_ARM64_X27 27 +#define DWARF_ARM64_X28 28 + +#define DWARF_ARM64_FP 29 +#define DWARF_ARM64_LR 30 +#define DWARF_ARM64_SP 31 +#define DWARF_ARM64_PC 32 +#define DWARF_ARM64_CPSR 33 + +#define DW_OP_breg21 0x85 +#define DW_CFA_expression 0x10 + +#define DW_FORM_LEN_ONE_BYTE_SLEB 2 +#define DW_FORM_LEN_TWO_BYTE_SLEB 3 + +#define DWARF_ARM64_X0_OFFSET 8 +#define DWARF_ARM64_X1_OFFSET 16 +#define DWARF_ARM64_X2_OFFSET 24 +#define DWARF_ARM64_X3_OFFSET 32 +#define DWARF_ARM64_X4_OFFSET 40 +#define DWARF_ARM64_X5_OFFSET 48 +#define DWARF_ARM64_X6_OFFSET 56 +#define DWARF_ARM64_X7_OFFSET 0xc0, 0x00 +#define DWARF_ARM64_X8_OFFSET 0xc8, 0x00 +#define DWARF_ARM64_X9_OFFSET 0xd0, 0x00 +#define DWARF_ARM64_X10_OFFSET 0xd8, 0x00 +#define DWARF_ARM64_X11_OFFSET 0xe0, 0x00 +#define DWARF_ARM64_X12_OFFSET 0xe8, 0x00 +#define DWARF_ARM64_X13_OFFSET 0xf0, 0x00 +#define DWARF_ARM64_X14_OFFSET 0xf8, 0x00 +#define DWARF_ARM64_X15_OFFSET 0x80, 0x01 +#define DWARF_ARM64_X16_OFFSET 0x88, 0x01 +#define DWARF_ARM64_X17_OFFSET 0x90, 0x01 +#define DWARF_ARM64_X18_OFFSET 0x98, 0x01 +#define DWARF_ARM64_X19_OFFSET 0xa0, 0x01 + +#define DWARF_ARM64_X20_OFFSET 0xa8, 0x01 +#define DWARF_ARM64_X21_OFFSET 0xb0, 0x01 +#define DWARF_ARM64_X22_OFFSET 0xb8, 0x01 +#define DWARF_ARM64_X23_OFFSET 0xc0, 0x01 +#define DWARF_ARM64_X24_OFFSET 0xc8, 0x01 +#define DWARF_ARM64_X25_OFFSET 0xd0, 0x01 +#define DWARF_ARM64_X26_OFFSET 0xd8, 0x01 +#define DWARF_ARM64_X27_OFFSET 0xe0, 0x01 +#define DWARF_ARM64_X28_OFFSET 0xe8, 0x01 + +#define DWARF_ARM64_FP_OFFSET 0xf0, 0x01 +#define DWARF_ARM64_LR_OFFSET 0xf8, 0x01 +#define DWARF_ARM64_SP_OFFSET 0x80, 0x02 +#define DWARF_ARM64_PC_OFFSET 0x88, 0x02 +#define DWARF_ARM64_CPSR_OFFSET 0x90, 0x02 + +/* The actual unwind directives added to trap handlers to let the debugger know where the register state is stored */ + +/* Unwind Prologue added to each function to indicate the start of the unwind information. */ + +#define UNWIND_PROLOGUE \ +.cfi_sections .eh_frame %%\ +.cfi_startproc %%\ +.cfi_signal_frame %%\ + + +/* Unwind Epilogue added to each function to indicate the end of the unwind information */ + +#define UNWIND_EPILOGUE .cfi_endproc + + +#define UNWIND_DIRECTIVES \ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X0, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X0_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X1, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X1_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X2, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X2_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X3, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X3_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X4, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X4_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X5, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X5_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X6, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X6_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X7, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X7_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X8, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X8_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X9, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X9_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X10, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X10_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X11, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X11_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X12, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X12_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X13, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X13_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X14, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X14_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X15, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X15_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X16, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X16_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X17, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X17_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X18, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X18_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X19, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X19_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X20, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X20_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X21, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X21_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X22, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X22_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X23, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X23_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X24, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X24_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X25, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X25_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X26, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X26_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X27, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X27_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_X28, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_X28_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_FP, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_FP_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_LR, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_LR_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_SP, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_SP_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_PC, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_PC_OFFSET %%\ +.cfi_escape DW_CFA_expression, DWARF_ARM64_CPSR, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg21, DWARF_ARM64_CPSR_OFFSET %%\ + +#endif /* _ARM64_DWARF_UNWIND_H_ */ diff --git a/osfmk/arm64/exception_asm.h b/osfmk/arm64/exception_asm.h index 8234158e9..e3ec822bf 100644 --- a/osfmk/arm64/exception_asm.h +++ b/osfmk/arm64/exception_asm.h @@ -26,9 +26,9 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _PEXPERT_ARM_BOARD_CONFIG_H +#include #include -#endif +#include "assym.s" #if XNU_MONITOR /* Exit path defines; for controlling PPL -> kernel transitions. */ @@ -37,6 +37,9 @@ #define PPL_EXIT_BAD_CALL 2 /* The PPL request failed. */ #define PPL_EXIT_EXCEPTION 3 /* The PPL took an exception. */ +/* Guarded mode trap numbers: these are passed as the genter immediate. */ +#define GXF_ENTER_PPL 0 + #define KERNEL_MODE_ELR ELR_GL11 #define KERNEL_MODE_FAR FAR_GL11 #define KERNEL_MODE_ESR ESR_GL11 @@ -54,46 +57,55 @@ #define GUARDED_MODE_TPIDR TPIDR_EL1 /* - * GET_PMAP_CPU_DATA + * LOAD_PMAP_CPU_DATA * - * Retrieves the PPL per-CPU data for the current CPU. + * Loads the PPL per-CPU data array entry for the current CPU. * arg0 - Address of the PPL per-CPU data is returned through this * arg1 - Scratch register * arg2 - Scratch register * */ -.macro GET_PMAP_CPU_DATA -/* Get the CPU ID. */ -mrs $0, MPIDR_EL1 -#ifdef CPU_CLUSTER_OFFSETS -ubfx $1, $0, MPIDR_AFF1_SHIFT, MPIDR_AFF1_WIDTH -cmp $1, __ARM_CLUSTER_COUNT__ -b.hs . -adrp $2, EXT(pmap_cluster_offsets)@page -add $2, $2, EXT(pmap_cluster_offsets)@pageoff -ldr $1, [$2, $1, lsl #3] -and $0, $0, MPIDR_AFF0_MASK -add $0, $0, $1 -#else -and $0, $0, MPIDR_AFF0_MASK -#endif - -/* Get the PPL CPU data array. */ -adrp $1, EXT(pmap_cpu_data_array)@page -add $1, $1, EXT(pmap_cpu_data_array)@pageoff +.macro LOAD_PMAP_CPU_DATA + /* Get the CPU ID. */ + mrs $0, MPIDR_EL1 + ubfx $1, $0, MPIDR_AFF1_SHIFT, MPIDR_AFF1_WIDTH + adrp $2, EXT(cluster_offsets)@page + add $2, $2, EXT(cluster_offsets)@pageoff + ldr $1, [$2, $1, lsl #3] + + and $0, $0, MPIDR_AFF0_MASK + add $0, $0, $1 + + /* Get the PPL CPU data array. */ + adrp $1, EXT(pmap_cpu_data_array)@page + add $1, $1, EXT(pmap_cpu_data_array)@pageoff + + /* + * Sanity check the CPU ID (this is not a panic because this pertains to + * the hardware configuration; this should only fail if our + * understanding of the hardware is incorrect). + */ + cmp $0, MAX_CPUS + b.hs . + + mov $2, PMAP_CPU_DATA_ARRAY_ENTRY_SIZE + /* Get the PPL per-CPU data. */ + madd $0, $0, $2, $1 +.endmacro /* - * Sanity check the CPU ID (this is not a panic because this pertains to - * the hardware configuration; this should only fail if our - * understanding of the hardware is incorrect). + * GET_PMAP_CPU_DATA + * + * Retrieves the PPL per-CPU data for the current CPU. + * arg0 - Address of the PPL per-CPU data is returned through this + * arg1 - Scratch register + * arg2 - Scratch register + * */ -cmp $0, MAX_CPUS -b.hs . - -mov $2, PMAP_CPU_DATA_ARRAY_ENTRY_SIZE -/* Get the PPL per-CPU data. */ -madd $0, $0, $2, $1 +.macro GET_PMAP_CPU_DATA + LOAD_PMAP_CPU_DATA $0, $1, $2 .endmacro + #endif /* XNU_MONITOR */ /* @@ -105,13 +117,13 @@ madd $0, $0, $2, $1 * arg2 - 32-bit scratch reg */ .macro INIT_SAVED_STATE_FLAVORS -mov $1, ARM_SAVED_STATE64 // Set saved state to 64-bit flavor -mov $2, ARM_SAVED_STATE64_COUNT -stp $1, $2, [$0, SS_FLAVOR] -mov $1, ARM_NEON_SAVED_STATE64 // Set neon state to 64-bit flavor -str $1, [$0, NS_FLAVOR] -mov $1, ARM_NEON_SAVED_STATE64_COUNT -str $1, [$0, NS_COUNT] + mov $1, ARM_SAVED_STATE64 // Set saved state to 64-bit flavor + mov $2, ARM_SAVED_STATE64_COUNT + stp $1, $2, [$0, SS_FLAVOR] + mov $1, ARM_NEON_SAVED_STATE64 // Set neon state to 64-bit flavor + str $1, [$0, NS_FLAVOR] + mov $1, ARM_NEON_SAVED_STATE64_COUNT + str $1, [$0, NS_COUNT] .endmacro /* @@ -119,88 +131,171 @@ str $1, [$0, NS_COUNT] * * Spills the current set of registers (excluding x0, x1, sp) to the specified * save area. + * + * On CPUs with PAC, the kernel "A" keys are used to create a thread signature. + * These keys are deliberately kept loaded into the CPU for later kernel use. + * * x0 - Address of the save area */ -.macro SPILL_REGISTERS -stp x2, x3, [x0, SS64_X2] // Save remaining GPRs -stp x4, x5, [x0, SS64_X4] -stp x6, x7, [x0, SS64_X6] -stp x8, x9, [x0, SS64_X8] -stp x10, x11, [x0, SS64_X10] -stp x12, x13, [x0, SS64_X12] -stp x14, x15, [x0, SS64_X14] -stp x16, x17, [x0, SS64_X16] -stp x18, x19, [x0, SS64_X18] -stp x20, x21, [x0, SS64_X20] -stp x22, x23, [x0, SS64_X22] -stp x24, x25, [x0, SS64_X24] -stp x26, x27, [x0, SS64_X26] -stp x28, fp, [x0, SS64_X28] -str lr, [x0, SS64_LR] - -/* Save arm_neon_saved_state64 */ - -stp q0, q1, [x0, NS64_Q0] -stp q2, q3, [x0, NS64_Q2] -stp q4, q5, [x0, NS64_Q4] -stp q6, q7, [x0, NS64_Q6] -stp q8, q9, [x0, NS64_Q8] -stp q10, q11, [x0, NS64_Q10] -stp q12, q13, [x0, NS64_Q12] -stp q14, q15, [x0, NS64_Q14] -stp q16, q17, [x0, NS64_Q16] -stp q18, q19, [x0, NS64_Q18] -stp q20, q21, [x0, NS64_Q20] -stp q22, q23, [x0, NS64_Q22] -stp q24, q25, [x0, NS64_Q24] -stp q26, q27, [x0, NS64_Q26] -stp q28, q29, [x0, NS64_Q28] -stp q30, q31, [x0, NS64_Q30] - -mrs x22, ELR_EL1 // Get exception link register -mrs x23, SPSR_EL1 // Load CPSR into var reg x23 -mrs x24, FPSR -mrs x25, FPCR +.macro SPILL_REGISTERS mode + stp x2, x3, [x0, SS64_X2] // Save remaining GPRs + stp x4, x5, [x0, SS64_X4] + stp x6, x7, [x0, SS64_X6] + stp x8, x9, [x0, SS64_X8] + stp x10, x11, [x0, SS64_X10] + stp x12, x13, [x0, SS64_X12] + stp x14, x15, [x0, SS64_X14] + stp x16, x17, [x0, SS64_X16] + stp x18, x19, [x0, SS64_X18] + stp x20, x21, [x0, SS64_X20] + stp x22, x23, [x0, SS64_X22] + stp x24, x25, [x0, SS64_X24] + stp x26, x27, [x0, SS64_X26] + stp x28, fp, [x0, SS64_X28] + str lr, [x0, SS64_LR] + + /* Save arm_neon_saved_state64 */ + + stp q0, q1, [x0, NS64_Q0] + stp q2, q3, [x0, NS64_Q2] + stp q4, q5, [x0, NS64_Q4] + stp q6, q7, [x0, NS64_Q6] + stp q8, q9, [x0, NS64_Q8] + stp q10, q11, [x0, NS64_Q10] + stp q12, q13, [x0, NS64_Q12] + stp q14, q15, [x0, NS64_Q14] + stp q16, q17, [x0, NS64_Q16] + stp q18, q19, [x0, NS64_Q18] + stp q20, q21, [x0, NS64_Q20] + stp q22, q23, [x0, NS64_Q22] + stp q24, q25, [x0, NS64_Q24] + stp q26, q27, [x0, NS64_Q26] + stp q28, q29, [x0, NS64_Q28] + stp q30, q31, [x0, NS64_Q30] + + mrs x22, ELR_EL1 // Get exception link register + mrs x23, SPSR_EL1 // Load CPSR into var reg x23 + mrs x24, FPSR + mrs x25, FPCR #if defined(HAS_APPLE_PAC) -/* Save x1 and LR to preserve across call */ -mov x21, x1 -mov x20, lr + .if \mode != HIBERNATE_MODE + /** + * Restore kernel keys if: + * + * - Entering the kernel from EL0, and + * - CPU lacks fast A-key switching (fast A-key switching is + * implemented by reprogramming KERNKey on context switch) + */ + .if \mode == KERNEL_MODE +#if HAS_PAC_SLOW_A_KEY_SWITCHING + IF_PAC_FAST_A_KEY_SWITCHING Lskip_restore_kernel_keys_\@, x21 + and x21, x23, #(PSR64_MODE_EL_MASK) + cmp x21, #(PSR64_MODE_EL0) + bne Lskip_restore_kernel_keys_\@ -/* - * Create thread state signature - * - * Arg0: The ARM context pointer - * Arg1: The PC value to sign - * Arg2: The CPSR value to sign - * Arg3: The LR value to sign - * Arg4: The X16 value to sign - * Arg5: The X17 value to sign - */ -mov x1, x22 -mov w2, w23 -mov x3, x20 -mov x4, x16 -mov x5, x17 -bl _ml_sign_thread_state - -mov lr, x20 -mov x1, x21 + MOV64 x2, KERNEL_JOP_ID + mrs x3, TPIDR_EL1 + ldr x3, [x3, ACT_CPUDATAP] + REPROGRAM_JOP_KEYS Lskip_restore_kernel_keys_\@, x2, x3, x4 + isb sy +Lskip_restore_kernel_keys_\@: +#endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */ + .endif /* \mode == KERNEL_MODE */ + + /* Save x1 and LR to preserve across call */ + mov x21, x1 + mov x20, lr + + /* + * Create thread state signature + * + * Arg0: The ARM context pointer + * Arg1: The PC value to sign + * Arg2: The CPSR value to sign + * Arg3: The LR value to sign + * Arg4: The X16 value to sign + * Arg5: The X17 value to sign + */ + mov x1, x22 + mov w2, w23 + mov x3, x20 + mov x4, x16 + mov x5, x17 + bl _ml_sign_thread_state + mov lr, x20 + mov x1, x21 + .endif #endif /* defined(HAS_APPLE_PAC) */ -str x22, [x0, SS64_PC] // Save ELR to PCB -str w23, [x0, SS64_CPSR] // Save CPSR to PCB -str w24, [x0, NS64_FPSR] -str w25, [x0, NS64_FPCR] + str x22, [x0, SS64_PC] // Save ELR to PCB + str w23, [x0, SS64_CPSR] // Save CPSR to PCB + str w24, [x0, NS64_FPSR] + str w25, [x0, NS64_FPCR] -mrs x20, FAR_EL1 -mrs x21, ESR_EL1 + mrs x20, FAR_EL1 + mrs x21, ESR_EL1 -str x20, [x0, SS64_FAR] -str w21, [x0, SS64_ESR] + str x20, [x0, SS64_FAR] + str w21, [x0, SS64_ESR] .endmacro .macro DEADLOOP -b . + b . .endmacro + +// SP0 is expected to already be selected +.macro SWITCH_TO_KERN_STACK + ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1 + mov sp, x1 // Set the stack pointer to the kernel stack +.endmacro + +// SP0 is expected to already be selected +.macro SWITCH_TO_INT_STACK + mrs x1, TPIDR_EL1 + ldr x1, [x1, ACT_CPUDATAP] + ldr x1, [x1, CPU_ISTACKPTR] + mov sp, x1 // Set the stack pointer to the interrupt stack +.endmacro + +/* + * REENABLE_DAIF + * + * Restores the DAIF bits to their original state (well, the AIF bits at least). + * arg0 - DAIF bits (read from the DAIF interface) to restore + */ +.macro REENABLE_DAIF + /* AIF enable. */ + tst $0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF) + b.eq 3f + + /* IF enable. */ + tst $0, #(DAIF_IRQF | DAIF_FIQF) + b.eq 2f + + /* A enable. */ + tst $0, #(DAIF_ASYNCF) + b.eq 1f + + /* Enable nothing. */ + b 4f + + /* A enable. */ +1: + msr DAIFClr, #(DAIFSC_ASYNCF) + b 4f + + /* IF enable. */ +2: + msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF) + b 4f + + /* AIF enable. */ +3: + msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) + + /* Done! */ +4: +.endmacro + diff --git a/osfmk/arm64/genassym.c b/osfmk/arm64/genassym.c index 511460bdc..75501d846 100644 --- a/osfmk/arm64/genassym.c +++ b/osfmk/arm64/genassym.c @@ -85,6 +85,10 @@ #include #include #include +#if HIBERNATION +#include +#include +#endif /* HIBERNATION */ /* * genassym.c is used to produce an @@ -114,8 +118,10 @@ main(int argc, DECLARE("TH_RECOVER", offsetof(struct thread, recover)); DECLARE("TH_KSTACKPTR", offsetof(struct thread, machine.kstackptr)); + DECLARE("TH_THREAD_ID", offsetof(struct thread, thread_id)); #if defined(HAS_APPLE_PAC) DECLARE("TH_ROP_PID", offsetof(struct thread, machine.rop_pid)); + DECLARE("TH_JOP_PID", offsetof(struct thread, machine.jop_pid)); DECLARE("TH_DISABLE_USER_JOP", offsetof(struct thread, machine.disable_user_jop)); #endif /* defined(HAS_APPLE_PAC) */ @@ -127,11 +133,15 @@ main(int argc, DECLARE("ACT_DEBUGDATA", offsetof(struct thread, machine.DebugData)); DECLARE("TH_IOTIER_OVERRIDE", offsetof(struct thread, iotier_override)); DECLARE("TH_RWLOCK_CNT", offsetof(struct thread, rwlock_count)); + DECLARE("TH_TMP_ALLOC_CNT", offsetof(struct thread, t_temp_alloc_count)); + DECLARE("TH_TASK", offsetof(struct thread, task)); #if defined(HAS_APPLE_PAC) DECLARE("TASK_ROP_PID", offsetof(struct task, rop_pid)); + DECLARE("TASK_JOP_PID", offsetof(struct task, jop_pid)); #endif /* defined(HAS_APPLE_PAC) */ + DECLARE("ARM_CONTEXT_SIZE", sizeof(arm_context_t)); DECLARE("SS_FLAVOR", offsetof(arm_context_t, ss.ash.flavor)); @@ -146,6 +156,7 @@ main(int argc, DECLARE("SS64_X10", offsetof(arm_context_t, ss.ss_64.x[10])); DECLARE("SS64_X12", offsetof(arm_context_t, ss.ss_64.x[12])); DECLARE("SS64_X14", offsetof(arm_context_t, ss.ss_64.x[14])); + DECLARE("SS64_X15", offsetof(arm_context_t, ss.ss_64.x[15])); DECLARE("SS64_X16", offsetof(arm_context_t, ss.ss_64.x[16])); DECLARE("SS64_X18", offsetof(arm_context_t, ss.ss_64.x[18])); DECLARE("SS64_X19", offsetof(arm_context_t, ss.ss_64.x[19])); @@ -202,6 +213,41 @@ main(int argc, DECLARE("NS64_FPSR", offsetof(arm_context_t, ns.ns_64.fpsr)); DECLARE("NS64_FPCR", offsetof(arm_context_t, ns.ns_64.fpcr)); + DECLARE("ARM_KERNEL_CONTEXT_SIZE", sizeof(arm_kernel_context_t)); + + DECLARE("SS64_KERNEL_X16", offsetof(arm_kernel_context_t, ss.x[0])); + DECLARE("SS64_KERNEL_X17", offsetof(arm_kernel_context_t, ss.x[1])); + DECLARE("SS64_KERNEL_X19", offsetof(arm_kernel_context_t, ss.x[2])); + DECLARE("SS64_KERNEL_X20", offsetof(arm_kernel_context_t, ss.x[3])); + DECLARE("SS64_KERNEL_X21", offsetof(arm_kernel_context_t, ss.x[4])); + DECLARE("SS64_KERNEL_X22", offsetof(arm_kernel_context_t, ss.x[5])); + DECLARE("SS64_KERNEL_X23", offsetof(arm_kernel_context_t, ss.x[6])); + DECLARE("SS64_KERNEL_X24", offsetof(arm_kernel_context_t, ss.x[7])); + DECLARE("SS64_KERNEL_X25", offsetof(arm_kernel_context_t, ss.x[8])); + DECLARE("SS64_KERNEL_X26", offsetof(arm_kernel_context_t, ss.x[9])); + DECLARE("SS64_KERNEL_X27", offsetof(arm_kernel_context_t, ss.x[10])); + DECLARE("SS64_KERNEL_X28", offsetof(arm_kernel_context_t, ss.x[11])); + DECLARE("SS64_KERNEL_FP", offsetof(arm_kernel_context_t, ss.fp)); + DECLARE("SS64_KERNEL_LR", offsetof(arm_kernel_context_t, ss.lr)); + DECLARE("SS64_KERNEL_SP", offsetof(arm_kernel_context_t, ss.sp)); + DECLARE("SS64_KERNEL_PC", offsetof(arm_kernel_context_t, ss.pc)); + DECLARE("SS64_KERNEL_CPSR", offsetof(arm_kernel_context_t, ss.cpsr)); +#if defined(HAS_APPLE_PAC) + DECLARE("SS64_KERNEL_JOPHASH", offsetof(arm_kernel_context_t, ss.jophash)); +#endif /* defined(HAS_APPLE_PAC) */ + + DECLARE("NS64_KERNEL_D8", offsetof(arm_kernel_context_t, ns.d[0])); + DECLARE("NS64_KERNEL_D9", offsetof(arm_kernel_context_t, ns.d[1])); + DECLARE("NS64_KERNEL_D10", offsetof(arm_kernel_context_t, ns.d[2])); + DECLARE("NS64_KERNEL_D11", offsetof(arm_kernel_context_t, ns.d[3])); + DECLARE("NS64_KERNEL_D12", offsetof(arm_kernel_context_t, ns.d[4])); + DECLARE("NS64_KERNEL_D13", offsetof(arm_kernel_context_t, ns.d[5])); + DECLARE("NS64_KERNEL_D14", offsetof(arm_kernel_context_t, ns.d[6])); + DECLARE("NS64_KERNEL_D15", offsetof(arm_kernel_context_t, ns.d[7])); + + DECLARE("NS64_KERNEL_FPCR", offsetof(arm_kernel_context_t, ns.fpcr)); + + DECLARE("PGBYTES", ARM_PGBYTES); DECLARE("PGSHIFT", ARM_PGSHIFT); @@ -210,8 +256,6 @@ main(int argc, DECLARE("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE); DECLARE("TBI_MASK", TBI_MASK); - DECLARE("MAX_CPUS", MAX_CPUS); - DECLARE("cdeSize", sizeof(struct cpu_data_entry)); DECLARE("cdSize", sizeof(struct cpu_data)); @@ -231,7 +275,6 @@ main(int argc, DECLARE("CPU_STAT_IRQ_WAKE", offsetof(cpu_data_t, cpu_stat.irq_ex_cnt_wake)); DECLARE("CPU_RESET_HANDLER", offsetof(cpu_data_t, cpu_reset_handler)); DECLARE("CPU_PHYS_ID", offsetof(cpu_data_t, cpu_phys_id)); - DECLARE("CLUSTER_MASTER", offsetof(cpu_data_t, cluster_master)); DECLARE("RTCLOCKDataSize", sizeof(rtclock_data_t)); @@ -247,6 +290,7 @@ main(int argc, DECLARE("INTSTACK_SIZE", INTSTACK_SIZE); DECLARE("EXCEPSTACK_SIZE", EXCEPSTACK_SIZE); + DECLARE("PAGE_MAX_SHIFT", PAGE_MAX_SHIFT); DECLARE("PAGE_MAX_SIZE", PAGE_MAX_SIZE); DECLARE("BA_VIRT_BASE", offsetof(struct boot_args, virtBase)); @@ -269,7 +313,23 @@ main(int argc, #if defined(HAS_APPLE_PAC) DECLARE("CPU_ROP_KEY", offsetof(cpu_data_t, rop_key)); + DECLARE("CPU_JOP_KEY", offsetof(cpu_data_t, jop_key)); +#if __has_feature(ptrauth_function_pointer_type_discrimination) + DECLARE("THREAD_CONTINUE_T_DISC", __builtin_ptrauth_type_discriminator(thread_continue_t)); +#else + DECLARE("THREAD_CONTINUE_T_DISC", 0); +#endif /* __has_feature(ptrauth_function_pointer_type_discrimination) */ #endif /* defined(HAS_APPLE_PAC) */ + + +#if HIBERNATION + DECLARE("HIBHDR_STACKOFFSET", offsetof(IOHibernateImageHeader, restore1StackOffset)); + DECLARE("HIBTRAMP_TTBR0", offsetof(pal_hib_tramp_result_t, ttbr0)); + DECLARE("HIBTRAMP_TTBR1", offsetof(pal_hib_tramp_result_t, ttbr1)); + DECLARE("HIBTRAMP_MEMSLIDE", offsetof(pal_hib_tramp_result_t, memSlide)); + DECLARE("HIBTRAMP_KERNELSLIDE", offsetof(pal_hib_tramp_result_t, kernelSlide)); +#endif /* HIBERNATION */ + return 0; } diff --git a/osfmk/arm64/hibernate_arm64.c b/osfmk/arm64/hibernate_arm64.c new file mode 100644 index 000000000..5a3e356b4 --- /dev/null +++ b/osfmk/arm64/hibernate_arm64.c @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +/*! + * ARM64-specific functions required to support hibernation entry, and also to + * support hibernation exit after wired pages have already been restored. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if HIBERNATE_HMAC_IMAGE +#include +#include +#endif /* HIBERNATE_HMAC_IMAGE */ + +extern void +qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *)); + +void +pal_hib_teardown_pmap_structs(__unused addr64_t *unneeded_start, __unused addr64_t *unneeded_end) +{ +} + +void +pal_hib_rebuild_pmap_structs(void) +{ +} + +static void +set_dram_range(hibernate_bitmap_t *range, uint64_t start_addr, uint64_t size) +{ + uint64_t first_page = atop_64(start_addr); + uint64_t page_count = atop_64(size); + uint64_t last_page = first_page + page_count - 1; + + range->first_page = (uint32_t)first_page; + assert(range->first_page == first_page); // make sure the truncation wasn't lossy + + range->last_page = (uint32_t)last_page; + assert(range->last_page == last_page); // make sure the truncation wasn't lossy +} + +// Comparison function used to sort the DRAM ranges list. +static int +dram_range_compare(const void *a, const void *b) +{ + return ((const hibernate_bitmap_t *)a)->first_page - ((const hibernate_bitmap_t *)b)->first_page; +} + +hibernate_page_list_t * +hibernate_page_list_allocate(boolean_t log) +{ + vm_size_t size; + uint32_t bank; + uint32_t pages, page_count; + hibernate_page_list_t * list; + hibernate_bitmap_t * bitmap; + +#if HIBERNATE_HMAC_IMAGE + // Determine if any PPL-owned I/O ranges need to be hibernated, and if so, + // allocate bitmaps to represent those pages. + const ppl_hib_io_range *io_ranges = NULL; + uint16_t num_io_ranges = 0; + hibernate_bitmap_t * dram_ranges = NULL; + uint32_t num_banks = 1; + + ppl_hmac_get_io_ranges(&io_ranges, &num_io_ranges); + + // Allocate a single DRAM range to cover kernel-managed memory and one range + // per PPL-owned I/O range that needs to be hibernated. + if (io_ranges != NULL && num_io_ranges > 0) { + num_banks += num_io_ranges; + } + + dram_ranges = kheap_alloc(KHEAP_TEMP, + num_banks * sizeof(hibernate_bitmap_t), Z_WAITOK); + if (!dram_ranges) { + return NULL; + } + + // The 0th dram range is used to represent kernel-managed memory, so skip it + // when adding I/O ranges. + for (unsigned int i = 1; i < num_banks; ++i) { + dram_ranges[i].first_page = io_ranges[i - 1].first_page; + dram_ranges[i].last_page = (io_ranges[i - 1].first_page + io_ranges[i - 1].page_count) - 1; + } +#else + // Allocate a single DRAM range to cover the kernel-managed memory. + hibernate_bitmap_t dram_ranges[1]; + uint32_t num_banks = sizeof(dram_ranges) / sizeof(dram_ranges[0]); +#endif /* HIBERNATE_HMAC_IMAGE */ + + // All of kernel-managed memory can be described by one DRAM range + set_dram_range(&dram_ranges[0], gPhysBase, gPhysSize); + + // Sort the DRAM ranges based on the first page. Other parts of the hibernation + // flow expect these ranges to be in order. + qsort((void*)dram_ranges, num_banks, sizeof(dram_ranges[0]), dram_range_compare); + + // size the hibernation bitmap + + size = sizeof(hibernate_page_list_t); + page_count = 0; + for (bank = 0; bank < num_banks; bank++) { + pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page; + page_count += pages; + size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t); + } + + list = (hibernate_page_list_t *)kalloc(size); + if (!list) { + goto out; + } + + list->list_size = (uint32_t)size; + list->page_count = page_count; + list->bank_count = num_banks; + + // convert to hibernation bitmap. + + bitmap = &list->bank_bitmap[0]; + for (bank = 0; bank < num_banks; bank++) { + bitmap->first_page = dram_ranges[bank].first_page; + bitmap->last_page = dram_ranges[bank].last_page; + bitmap->bitmapwords = (bitmap->last_page + 1 + - bitmap->first_page + 31) >> 5; + if (log) { + HIBLOG("hib bank[%d]: 0x%llx (%d) end 0x%llx (%d)\n", + bank, + ptoa_64(bitmap->first_page), bitmap->first_page, + ptoa_64(bitmap->last_page), bitmap->last_page); + } + bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; + } + +out: +#if HIBERNATE_HMAC_IMAGE + kheap_free(KHEAP_TEMP, dram_ranges, + num_banks * sizeof(hibernate_bitmap_t)); +#endif /* HIBERNATE_HMAC_IMAGE */ + + return list; +} + +void +pal_hib_get_stack_pages(vm_offset_t *first_page, vm_offset_t *page_count) +{ + vm_offset_t stack_end = BootCpuData.intstack_top; + vm_offset_t stack_begin = stack_end - INTSTACK_SIZE; + *first_page = atop_64(kvtophys(stack_begin)); + *page_count = atop_64(round_page(stack_end) - trunc_page(stack_begin)); +} + +// mark pages not to be saved, but available for scratch usage during restore +void +hibernate_page_list_setall_machine(hibernate_page_list_t * page_list, + hibernate_page_list_t * page_list_wired, + boolean_t preflight, + uint32_t * pagesOut) +{ + vm_offset_t stack_first_page, stack_page_count; + pal_hib_get_stack_pages(&stack_first_page, &stack_page_count); + + extern pmap_paddr_t pmap_stacks_start_pa, pmap_stacks_end_pa; + vm_offset_t pmap_stack_page_count = atop_64(pmap_stacks_end_pa - pmap_stacks_start_pa); + + if (!preflight) { + // mark the stack as unavailable for clobbering during restore; + // we won't actually save it because we mark these pages as free + // in hibernate_page_list_set_volatile + hibernate_set_page_state(page_list, page_list_wired, + stack_first_page, stack_page_count, + kIOHibernatePageStateWiredSave); + + // Mark the PPL stack as not needing to be saved. Any PPL memory that is + // excluded from the image will need to be explicitly checked for in + // pmap_check_ppl_hashed_flag_all(). That function ensures that all + // PPL pages are contained within the image (so any memory explicitly + // not being saved, needs to be removed from the check). + hibernate_set_page_state(page_list, page_list_wired, + atop_64(pmap_stacks_start_pa), pmap_stack_page_count, + kIOHibernatePageStateFree); + } + *pagesOut += stack_page_count; + *pagesOut -= pmap_stack_page_count; +} + +// mark pages not to be saved and not for scratch usage during restore +void +hibernate_page_list_set_volatile(hibernate_page_list_t * page_list, + hibernate_page_list_t * page_list_wired, + uint32_t * pagesOut) +{ + vm_offset_t page, count; + + // hibernation restore runs on the interrupt stack, + // so we need to make sure we don't save it + pal_hib_get_stack_pages(&page, &count); + hibernate_set_page_state(page_list, page_list_wired, + page, count, + kIOHibernatePageStateFree); + *pagesOut -= count; +} + +kern_return_t +hibernate_processor_setup(IOHibernateImageHeader * header) +{ + cpu_datap(master_cpu)->cpu_hibernate = 1; + header->processorFlags = 0; + return KERN_SUCCESS; +} + +static boolean_t hibernate_vm_locks_safe; + +void +hibernate_vm_lock(void) +{ + if (kIOHibernateStateHibernating == gIOHibernateState) { + hibernate_vm_lock_queues(); + hibernate_vm_locks_safe = TRUE; + } +} + +void +hibernate_vm_unlock(void) +{ + assert(FALSE == ml_get_interrupts_enabled()); + if (kIOHibernateStateHibernating == gIOHibernateState) { + hibernate_vm_unlock_queues(); + } + ml_set_is_quiescing(TRUE); +} + +// processor_doshutdown() calls hibernate_vm_lock() and hibernate_vm_unlock() on sleep with interrupts disabled. +// ml_hibernate_active_post() calls hibernate_vm_lock_end() on wake before interrupts are enabled. +// VM locks are safely single threaded between hibernate_vm_lock() and hibernate_vm_lock_end(). + +void +hibernate_vm_lock_end(void) +{ + assert(FALSE == ml_get_interrupts_enabled()); + hibernate_vm_locks_safe = FALSE; +} + +boolean_t +hibernate_vm_locks_are_safe(void) +{ + assert(FALSE == ml_get_interrupts_enabled()); + return hibernate_vm_locks_safe; +} + +void +pal_hib_init(void) +{ +#if HIBERNATE_HMAC_IMAGE + gHibernateGlobals.hmacRegBase = ppl_hmac_get_reg_base(); +#endif /* HIBERNATE_HMAC_IMAGE */ +} + +void +pal_hib_write_hook(void) +{ +} diff --git a/osfmk/arm64/hibernate_ppl_hmac.c b/osfmk/arm64/hibernate_ppl_hmac.c new file mode 100644 index 000000000..dcd22884e --- /dev/null +++ b/osfmk/arm64/hibernate_ppl_hmac.c @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +/** + * These functions are wrappers around the PPL HIB extension. They provide a + * higher level interface to the PPL HIB ioctl interface, and include logic for + * turning the HMAC block on when necessary. Refer to the comments in the PPL HIB + * extension for more details. + */ +#include "hibernate_ppl_hmac.h" + +#include +#include +#include +#include +#include +#include "pal_hibernate.h" +#include + +#if XNU_MONITOR_PPL_HIB + + +#error New SoC defined in board_config.h that supports PPL HIB but no \ + embedded headers included in hibernate_ppl_hmac.c for that SoC. + + +#include +#include + +static ppl_iommu_state *pplHmacState; +static void *pplHmacScratchPage; + +static void +ppl_hmac_enable_aes_ps(void) +{ + static vm_address_t aes_ps_reg_base; + if (!aes_ps_reg_base) { + /* map the AES PS registers */ + aes_ps_reg_base = ml_io_map(PMGR_REG_BASE, PAGE_SIZE); + } + volatile uint32_t *psreg = (volatile uint32_t *)(aes_ps_reg_base + PMGR_AES_OFFSET); + // set PS_MANUAL to on + *psreg |= 0xf; + while ((*psreg & 0xf) != ((*psreg >> 4) & 0xf)) { + // poll until the block's PS_ACTUAL matches PS_MANUAL + } +} + +static int +hibernate_compress_page(const void *src, void *dst) +{ + assert((((uint64_t)src) & PAGE_MASK) == 0); + assert((((uint64_t)dst) & 63) == 0); + struct { + uint32_t count:8; + uint32_t svp:1; + uint32_t reserved:3; + uint32_t status:3; + uint32_t reserved2:17; + uint32_t popcnt:18; + uint32_t reserved3:14; + } result = { .status = ~0u }; + __asm__ volatile ("wkdmc %0, %1" : "=r"(result): "r"(dst), "0"(src)); + if (result.status) { + return -1; + } + if (result.svp) { + return 0; + } + return (result.count + 1) * 64; +} + +/* initialize context needed for ppl computations */ +kern_return_t +ppl_hmac_init(void) +{ + // don't initialize ppl_hib if hibernation isn't supported + if (!ppl_hib_hibernation_supported()) { + return KERN_FAILURE; + } + + if (!pplHmacState) { + /* construct context needed to talk to PPL */ + + ppl_iommu_state *pplState = NULL; + vm_address_t hmac_reg_base = 0; + + // turn on AES_PS + ppl_hmac_enable_aes_ps(); + + // set up the hmac engine + hmac_reg_base = ml_io_map(HMAC_REG_BASE, PAGE_SIZE); + ppl_hib_init_data init_data = { .version = PPL_HIB_VERSION, .hmac_reg_base = hmac_reg_base }; + kern_return_t kr = pmap_iommu_init(ppl_hib_get_desc(), "HMAC", &init_data, sizeof(init_data), &pplState); + if (kr != KERN_SUCCESS) { + printf("ppl_hmac_init: failed to initialize PPL state object: 0x%x\n", kr); + if (hmac_reg_base) { + ml_io_unmap(hmac_reg_base, PAGE_SIZE); + } + return kr; + } + + pplHmacState = pplState; + } + + return KERN_SUCCESS; +} + +/** + * Reset state for a new signature. + * + * @param wired_pages True if this context will be used to hash wired pages (image1), + * false otherwise (image2). + */ +void +ppl_hmac_reset(bool wired_pages) +{ + // make sure AES_PS is on + ppl_hmac_enable_aes_ps(); + + kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_RESET, + &wired_pages, sizeof(wired_pages), NULL, 0); + if (kr != KERN_SUCCESS) { + panic("ppl_hmac_reset: PPL ioctl PPL_HIB_IOCTL_RESET failed: 0x%x\n", kr); + } +} + +/** + * Inform HMAC driver that we're going to hibernate. + */ +void +ppl_hmac_hibernate_begin(void) +{ + uintptr_t scratchPage = 0; + kern_return_t kr = pmap_iommu_map(pplHmacState, NULL, 0, 0, &scratchPage); + if (kr != KERN_SUCCESS) { + panic("ppl_register_scratch_page: pmap_iommu_map failed: 0x%x\n", kr); + } + pplHmacScratchPage = (void *)scratchPage; +} + +/** + * Inform HMAC driver that we're done hibernating. + */ +void +ppl_hmac_hibernate_end(void) +{ + pmap_iommu_unmap(pplHmacState, NULL, 0, 0, NULL); + pplHmacScratchPage = NULL; +} + +/* get the hmac register base */ +vm_address_t +ppl_hmac_get_reg_base(void) +{ + return HMAC_REG_BASE; +} + +/** + * Update the PPL HMAC hash computation with the given page. + * + * @param pageNumber Page to add into the hash. + * @param uncompressed Out parameter that receives a pointer to the uncompressed data of the given page. + * @param compressed Buffer that will receive the compressed content of the given page + * @result The compressed size, 0 if the page was a single repeated value, or -1 if the page failed to compress. + */ +int +ppl_hmac_update_and_compress_page(ppnum_t pageNumber, void **uncompressed, void *compressed) +{ + kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_UPDATE_AND_COPY_PAGE, + &pageNumber, sizeof(pageNumber), NULL, 0); + if (kr != KERN_SUCCESS) { + panic("ppl_hmac_update_and_compress_page: PPL ioctl PPL_HIB_IOCTL_UPDATE_PAGE failed: 0x%x\n", kr); + } + // page was copied to scratch, so compress it into compressed + int result; + if (uncompressed) { + *uncompressed = pplHmacScratchPage; + } + if (compressed) { + result = hibernate_compress_page(pplHmacScratchPage, compressed); + } else { + result = 0; + } + return result; +} + +/* finalize HMAC calculation */ +void +ppl_hmac_final(uint8_t *output, size_t outputLen) +{ + if (outputLen != HMAC_HASH_SIZE) { + panic("ppl_hmac_final: outputLen should be %d but is %zu\n", HMAC_HASH_SIZE, outputLen); + } + uint8_t hashOutput[HMAC_HASH_SIZE]; + kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FINAL, NULL, 0, hashOutput, sizeof(hashOutput)); + if (kr != KERN_SUCCESS) { + panic("ppl_hmac_final: PPL ioctl PPL_HIB_IOCTL_FINAL failed: 0x%x\n", kr); + } + memcpy(output, hashOutput, HMAC_HASH_SIZE); +} + +/* HMAC the hibseg and get metadata */ +void +ppl_hmac_fetch_hibseg_and_info(void *buffer, + uint64_t bufferLen, + IOHibernateHibSegInfo *info) +{ + kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FETCH_HIBSEG, NULL, 0, buffer, bufferLen); + if (kr != KERN_SUCCESS) { + panic("ppl_hmac_fetch_hibseg_and_info: PPL ioctl PPL_HIB_IOCTL_FETCH_HIBSEG failed: 0x%x\n", kr); + } + IOHibernateHibSegInfo segInfo; + kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FETCH_HIBSEG_INFO, NULL, 0, &segInfo, sizeof(segInfo)); + if (kr != KERN_SUCCESS) { + panic("ppl_hmac_fetch_hibseg_and_info: PPL ioctl PPL_HIB_IOCTL_FETCH_HIBSEG_INFO failed: 0x%x\n", kr); + } + memcpy(info, &segInfo, sizeof(segInfo)); +} + +/* HMAC the entire read-only region, or compare to previous HMAC */ +void +ppl_hmac_compute_rorgn_hmac(void) +{ + kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_COMPUTE_RORGN_HMAC, NULL, 0, NULL, 0); + if (kr != KERN_SUCCESS) { + panic("ppl_hmac_compute_rorgn_hmac: PPL ioctl PPL_HIB_IOCTL_COMPUTE_RORGN_HMAC failed: 0x%x\n", kr); + } +} + +/** + * Finish hashing the hibernation image and return out the signed hash. This also + * hashes the hibernation header. + */ +void +ppl_hmac_finalize_image(const void *header, size_t headerLen, uint8_t *hmac, size_t hmacLen) +{ + if (hmacLen != HMAC_HASH_SIZE) { + panic("ppl_hmac_finalize_image: hmacLen should be %d but is %zu\n", HMAC_HASH_SIZE, hmacLen); + } + uint8_t hashOutput[HMAC_HASH_SIZE]; + kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FINALIZE_IMAGE, header, headerLen, hashOutput, sizeof(hashOutput)); + if (kr != KERN_SUCCESS) { + panic("ppl_hmac_finalize_image: PPL ioctl PPL_HIB_IOCTL_FINALIZE_IMAGE failed: 0x%x\n", kr); + } + memcpy(hmac, hashOutput, HMAC_HASH_SIZE); +} + + +/** + * Return back an array of I/O ranges that need to be included within the hibernation + * image. If there are no I/O ranges that need hashing, then `*io_ranges` will be + * NULL and `*num_io_ranges` will be zero. + */ +void +ppl_hmac_get_io_ranges(const ppl_hib_io_range **io_ranges, uint16_t *num_io_ranges) +{ + assert((io_ranges != NULL) && (num_io_ranges != NULL)); + + ppl_hib_get_io_ranges_data io; + kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_GET_IO_RANGES, NULL, 0, &io, sizeof(io)); + if (kr != KERN_SUCCESS) { + panic("ppl_hmac_finalize_image: PPL ioctl PPL_HIB_IOCTL_GET_IO_RANGES failed: 0x%x\n", kr); + } + + /** + * This returns back pointers to PPL-owned data but this is fine since the + * caller only needs read-only access to this data (and the kernel has RO + * access to PPL-owned memory). + */ + *io_ranges = io.ranges; + *num_io_ranges = io.num_io_ranges; +} + +#endif /* XNU_MONITOR_PPL_HIB */ diff --git a/osfmk/arm64/hibernate_ppl_hmac.h b/osfmk/arm64/hibernate_ppl_hmac.h new file mode 100644 index 000000000..4a82645ac --- /dev/null +++ b/osfmk/arm64/hibernate_ppl_hmac.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#ifndef _ARM64_HIBERNATE_PPL_HMAC_H_ +#define _ARM64_HIBERNATE_PPL_HMAC_H_ + +#include +#include +#include +#include +#include +#include +#include + +#define HMAC_HASH_SIZE 48 + +__BEGIN_DECLS + +kern_return_t ppl_hmac_init(void); +void ppl_hmac_reset(bool wired_pages); +void ppl_hmac_hibernate_begin(void); +void ppl_hmac_hibernate_end(void); +vm_address_t ppl_hmac_get_reg_base(void); +int ppl_hmac_update_and_compress_page(ppnum_t pageNumber, void **uncompressed, void *compressed); +void ppl_hmac_final(uint8_t *output, size_t outputLen); +void ppl_hmac_fetch_hibseg_and_info(/* out */ void *buffer, + /* in */ uint64_t bufferLen, + /* out */ IOHibernateHibSegInfo *info); +void ppl_hmac_compute_rorgn_hmac(void); +void ppl_hmac_finalize_image(const void *header, size_t headerLen, uint8_t *hmac, size_t hmacLen); +void ppl_hmac_get_io_ranges(const ppl_hib_io_range **io_ranges, uint16_t *num_io_ranges); + +__END_DECLS + +#endif /* _ARM64_HIBERNATE_PPL_HMAC_H_ */ diff --git a/osfmk/arm64/hibernate_restore.c b/osfmk/arm64/hibernate_restore.c new file mode 100644 index 000000000..8519f0a8c --- /dev/null +++ b/osfmk/arm64/hibernate_restore.c @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +/*! + * ARM64-specific functions required to support hibernation exit. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#if HIBERNATE_HMAC_IMAGE +#include +#include +#include +#endif /* HIBERNATE_HMAC_IMAGE */ + +pal_hib_tramp_result_t gHibTramp; +pal_hib_globals_t gHibernateGlobals; + +uintptr_t +hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, __unused uint32_t procFlags) +{ + void *d = (void*)pal_hib_map(DEST_COPY_AREA, dst); + __nosan_memcpy(d, (void*)src, len); + return (uintptr_t)d; +} + +uintptr_t +pal_hib_map(pal_hib_map_type_t virt, uint64_t phys) +{ + switch (virt) { + case DEST_COPY_AREA: + case COPY_PAGE_AREA: + case SCRATCH_AREA: + case WKDM_AREA: + return phys + gHibTramp.memSlide; + case BITMAP_AREA: + case IMAGE_AREA: + case IMAGE2_AREA: + return phys; + default: + HIB_ASSERT(0); + } +} + +void +pal_hib_restore_pal_state(__unused uint32_t *arg) +{ +} + +void +pal_hib_resume_init(pal_hib_ctx_t *ctx, hibernate_page_list_t *map, uint32_t *nextFree) +{ +#if HIBERNATE_HMAC_IMAGE + extern void AccelerateCrypto_SHA256_compress(ccdigest_state_t state, size_t numBlocks, const void *data); + ctx->di = (struct ccdigest_info){ + .output_size = CCSHA256_OUTPUT_SIZE, + .state_size = CCSHA256_STATE_SIZE, + .block_size = CCSHA256_BLOCK_SIZE, + .oid_size = ccoid_sha256_len, + .oid = CC_DIGEST_OID_SHA256, + .initial_state = ccsha256_initial_state, + .compress = AccelerateCrypto_SHA256_compress, + .final = ccdigest_final_64be, + }; + + SHA256_CTX shaCtx; + + // validate signature of handoff + uint32_t handoffPages = gIOHibernateCurrentHeader->handoffPages; + uint32_t handoffPageCount = gIOHibernateCurrentHeader->handoffPageCount; + + void *handoffSrc = (void *)pal_hib_map(IMAGE_AREA, ptoa_64(handoffPages)); + ppl_hib_init_context(&ctx->di, &shaCtx, 'HOFF'); + ccdigest_update(&ctx->di, shaCtx.ctx, sizeof(handoffPages), &handoffPages); + ccdigest_update(&ctx->di, shaCtx.ctx, sizeof(handoffPageCount), &handoffPageCount); + ccdigest_update(&ctx->di, shaCtx.ctx, ptoa_64(handoffPageCount), handoffSrc); + uint8_t handoffHMAC[CCSHA384_OUTPUT_SIZE]; + ppl_hib_compute_hmac(&ctx->di, &shaCtx, gHibernateGlobals.hmacRegBase, handoffHMAC); + HIB_ASSERT(__nosan_memcmp(handoffHMAC, gIOHibernateCurrentHeader->handoffHMAC, sizeof(handoffHMAC)) == 0); + + // construct a hibernate_scratch_t for storing all of the pages we restored + hibernate_scratch_init(&ctx->pagesRestored, map, nextFree); +#endif /* HIBERNATE_HMAC_IMAGE */ +} + +void +pal_hib_restored_page(pal_hib_ctx_t *ctx, pal_hib_restore_stage_t stage, ppnum_t ppnum) +{ +#if HIBERNATE_HMAC_IMAGE + if (stage != pal_hib_restore_stage_handoff_data) { + // remember that we restored this page + hibernate_scratch_write(&ctx->pagesRestored, &ppnum, sizeof(ppnum)); + } +#endif /* HIBERNATE_HMAC_IMAGE */ +} + +void +pal_hib_patchup(pal_hib_ctx_t *ctx) +{ +#if HIBERNATE_HMAC_IMAGE + // compute and validate the HMAC for the wired pages (image1) + SHA256_CTX shaCtx; + + hibernate_scratch_start_read(&ctx->pagesRestored); + uint64_t pageCount = ctx->pagesRestored.totalLength / sizeof(ppnum_t); + ppl_hib_init_context(&ctx->di, &shaCtx, 'PAG1'); + for (uint64_t i = 0; i < pageCount; i++) { + ppnum_t ppnum; + hibernate_scratch_read(&ctx->pagesRestored, &ppnum, sizeof(ppnum)); + vm_offset_t virtAddr = pal_hib_map(DEST_COPY_AREA, ptoa_64(ppnum)); + ccdigest_update(&ctx->di, shaCtx.ctx, sizeof(ppnum), &ppnum); + ccdigest_update(&ctx->di, shaCtx.ctx, PAGE_SIZE, (void *)virtAddr); + } + uint8_t image1PagesHMAC[CCSHA384_OUTPUT_SIZE]; + ppl_hib_compute_hmac(&ctx->di, &shaCtx, gHibernateGlobals.hmacRegBase, image1PagesHMAC); + HIB_ASSERT(__nosan_memcmp(image1PagesHMAC, gIOHibernateCurrentHeader->image1PagesHMAC, sizeof(image1PagesHMAC)) == 0); +#endif /* HIBERNATE_HMAC_IMAGE */ + + // DRAM pages are captured from a PPL context, so here we restore all cpu_data structures to a non-PPL context + extern struct pmap_cpu_data_array_entry pmap_cpu_data_array[MAX_CPUS]; + for (int i = 0; i < MAX_CPUS; i++) { + pmap_cpu_data_array[i].cpu_data.ppl_state = PPL_STATE_KERNEL; + pmap_cpu_data_array[i].cpu_data.ppl_kern_saved_sp = 0; + } + + // cluster CTRR state needs to be reconfigured + init_ctrr_cluster_states(); + + // Calls into the pmap that could potentially modify pmap data structures + // during image copying were explicitly blocked on hibernation entry. + // Resetting this variable to false allows those calls to be made again. + extern bool hib_entry_pmap_lockdown; + hib_entry_pmap_lockdown = false; +} + +void +pal_hib_decompress_page(void *src, void *dst, void *scratch, unsigned int compressedSize) +{ + const void *wkdmSrc; + if (((uint64_t)src) & 63) { + // the wkdm instruction requires that our source buffer be aligned, so copy into an aligned buffer if necessary + __nosan_memcpy(scratch, src, compressedSize); + wkdmSrc = scratch; + } else { + wkdmSrc = src; + } + HIB_ASSERT((((uint64_t)wkdmSrc) & 63) == 0); + HIB_ASSERT((((uint64_t)dst) & PAGE_MASK) == 0); + struct { + uint32_t reserved:12; + uint32_t status:3; + uint32_t reserved2:17; + uint32_t popcnt:18; + uint32_t reserved3:14; + } result = { .status = ~0u }; + __asm__ volatile ("wkdmd %0, %1" : "=r"(result): "r"(dst), "0"(wkdmSrc)); + HIB_ASSERT(result.status == 0); +} + +// proc_reg's ARM_TTE_TABLE_NS has both NSTABLE and NS set +#define ARM_LPAE_NSTABLE 0x8000000000000000ULL + +#define TOP_LEVEL 1 +#define LAST_TABLE_LEVEL 3 +#define PAGE_GRANULE_SHIFT 14 +#define PAGE_GRANULE_SIZE ((size_t)1< sizeof(uint64_t)) { + *(volatile uint64_t *)s = 0; + s += sizeof(uint64_t); + n -= sizeof(uint64_t); + } + while (n > sizeof(uint32_t)) { + *(volatile uint32_t *)s = 0; + s += sizeof(uint32_t); + n -= sizeof(uint32_t); + } + while (n) { + *(volatile char *)s = 0; + s++; + n--; + } +} + +static uint64_t +allocate_page(map_ctx *ctx) +{ + // pages that were unnecessary for preservation when we entered hibernation are + // marked as free in ctx->bitmap, so they are available for scratch usage during + // resume; here, we "borrow" one of these free pages to use as part of our temporary + // page tables + ppnum_t ppnum = hibernate_page_list_grab(ctx->bitmap, &ctx->nextFree); + hibernate_page_bitset(ctx->bitmap, FALSE, ppnum); + uint64_t result = ptoa_64(ppnum); + hib_bzero((void *)result, PAGE_SIZE); + return result; +} + +static void +create_map_entries(map_ctx *ctx, uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t map_flags) +{ + // if we've set gHibTramp.memSlide, we should already be running with the MMU on; + // in this case, we don't permit further modification to the page table + HIB_ASSERT(!gHibTramp.memSlide); + + int level = TOP_LEVEL; + volatile uint64_t *table_base = (uint64_t *)ctx->page_table_base; + if (map_flags == 0) { + paddr = 0; // no physical address for none mappings + } + + while (size) { + HIB_ASSERT(level >= 1); + HIB_ASSERT(level <= LAST_TABLE_LEVEL); + + size_t level_shift = LEVEL_SHIFT(level); + size_t level_entries = PAGE_GRANULE_SIZE / sizeof(uint64_t); + size_t level_size = 1ull << level_shift; + size_t level_mask = level_size - 1; + size_t index = (vaddr >> level_shift) & (level_entries - 1); + // Can we make block entries here? Must be permitted at this + // level, have enough bytes remaining, and both virtual and + // physical addresses aligned to a block. + if ((level >= 2) && + size >= level_size && + ((vaddr | paddr) & level_mask) == 0) { + // Map contiguous blocks. + size_t num_entries = MIN(size / level_size, level_entries - index); + if (map_flags) { + uint64_t entry = map_flags | ((level < LAST_TABLE_LEVEL) ? ARM_TTE_TYPE_BLOCK : ARM_TTE_TYPE_L3BLOCK); + for (size_t i = 0; i < num_entries; i++) { + HIB_ASSERT(PTE_EMPTY(table_base[index + i])); + table_base[index + i] = entry | paddr; + paddr += level_size; + } + } else { + // make sure all the corresponding entries are empty + for (size_t i = 0; i < num_entries; i++) { + HIB_ASSERT(PTE_EMPTY(table_base[index + i])); + } + } + size_t mapped = num_entries * level_size; + size -= mapped; + if (size) { + // map the remaining at the top level + level = TOP_LEVEL; + table_base = (uint64_t *)ctx->page_table_base; + vaddr += mapped; + // paddr already incremented above if necessary + } + } else { + // Sub-divide into a next level table. + HIB_ASSERT(level < LAST_TABLE_LEVEL); + uint64_t entry = table_base[index]; + HIB_ASSERT((entry & (ARM_TTE_VALID | ARM_TTE_TYPE_MASK)) != (ARM_TTE_VALID | ARM_TTE_TYPE_BLOCK)); // Breaking down blocks not implemented + uint64_t sub_base = entry & ARM_TTE_TABLE_MASK; + if (!sub_base) { + sub_base = allocate_page(ctx); + HIB_ASSERT((sub_base & PAGE_GRANULE_MASK) == 0); + table_base[index] = sub_base | ARM_LPAE_NSTABLE | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID; + } + // map into the sub table + level++; + table_base = (uint64_t *)sub_base; + } + } +} + +static void +map_range_start_end(map_ctx *ctx, uint64_t start, uint64_t end, uint64_t slide, uint64_t flags) +{ + HIB_ASSERT(end >= start); + create_map_entries(ctx, start + slide, start, end - start, flags); +} + +#define MAP_FLAGS_COMMON (ARM_PTE_AF | ARM_PTE_NS | ARM_TTE_VALID | ARM_PTE_SH(SH_OUTER_MEMORY) | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK)) +#define MAP_DEVICE (ARM_PTE_AF | ARM_TTE_VALID | ARM_PTE_PNX | ARM_PTE_NX | ARM_PTE_SH(SH_NONE) | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DISABLE)) +#define MAP_RO (MAP_FLAGS_COMMON | ARM_PTE_PNX | ARM_PTE_NX | ARM_PTE_AP(AP_RONA)) +#define MAP_RW (MAP_FLAGS_COMMON | ARM_PTE_PNX | ARM_PTE_NX) +#define MAP_RX (MAP_FLAGS_COMMON | ARM_PTE_AP(AP_RONA)) + +static void +map_register_page(map_ctx *ctx, vm_address_t regPage) +{ + uint64_t regBase = trunc_page(regPage); + if (regBase) { + map_range_start_end(ctx, regBase, regBase + PAGE_SIZE, 0, MAP_DEVICE); + } +} + +static void +iterate_bitmaps(const map_ctx *ctx, bool (^callback)(const hibernate_bitmap_t *bank_bitmap)) +{ + hibernate_bitmap_t *bank_bitmap = &ctx->bitmap->bank_bitmap[0]; + for (uint32_t bank = 0; bank < ctx->bitmap->bank_count; bank++) { + if (!callback(bank_bitmap)) { + return; + } + bank_bitmap = (hibernate_bitmap_t*)&bank_bitmap->bitmap[bank_bitmap->bitmapwords]; + } +} + +// during hibernation resume, we can't use the original kernel page table (because we don't know what it was), so we instead +// create a temporary page table to use during hibernation resume; since the original kernel page table was part of DRAM, +// it will be restored by the time we're done with hibernation resume, at which point we can jump through the reset vector +// to reload the original page table +void +pal_hib_resume_tramp(uint32_t headerPpnum) +{ + uint64_t header_phys = ptoa_64(headerPpnum); + IOHibernateImageHeader *header = (IOHibernateImageHeader *)header_phys; + IOHibernateHibSegInfo *seg_info = &header->hibSegInfo; + uint64_t hib_text_start = ptoa_64(header->restore1CodePhysPage); + + __block map_ctx ctx = {}; + uint64_t map_phys = header_phys + + (offsetof(IOHibernateImageHeader, fileExtentMap) + + header->fileExtentMapSize + + ptoa_32(header->restore1PageCount) + + header->previewSize); + ctx.bitmap = (hibernate_page_list_t *)map_phys; + + // find the bank describing xnu's map + __block uint64_t phys_start = 0, phys_end = 0; + iterate_bitmaps(&ctx, ^bool (const hibernate_bitmap_t *bank_bitmap) { + if ((bank_bitmap->first_page <= header->restore1CodePhysPage) && + (bank_bitmap->last_page >= header->restore1CodePhysPage)) { + phys_start = ptoa_64(bank_bitmap->first_page); + phys_end = ptoa_64(bank_bitmap->last_page) + PAGE_SIZE; + return false; + } + return true; + }); + + HIB_ASSERT(phys_start != 0); + HIB_ASSERT(phys_end != 0); + + hib_bzero(&gHibTramp, sizeof(gHibTramp)); + gHibTramp.kernelSlide = header->restore1CodeVirt - hib_text_start; + + // During hibernation resume, we create temporary mappings that do not collide with where any of the kernel mappings were originally. + // Technically, non-collision isn't a requirement, but doing this means that if some code accidentally jumps to a VA in the original + // kernel map, it won't be present in our temporary map and we'll get an exception when jumping to an unmapped address. + // The base address of our temporary mappings is adjusted by a random amount as a "poor-man's ASLR". We don’t have a good source of random + // numbers in this context, so we just use some of the bits from one of imageHeaderHMMAC, which should be random enough. + uint16_t rand = (uint16_t)(((header->imageHeaderHMAC[0]) << 8) | header->imageHeaderHMAC[1]); + uint64_t mem_slide = gHibTramp.kernelSlide - (phys_end - phys_start) * 4 - rand * 256 * PAGE_SIZE; + + // make sure we don't clobber any of the pages we need for restore + hibernate_reserve_restore_pages(header_phys, header, ctx.bitmap); + + // init nextFree + hibernate_page_list_grab(ctx.bitmap, &ctx.nextFree); + + // map ttbr1 pages + ctx.page_table_base = allocate_page(&ctx); + gHibTramp.ttbr1 = ctx.page_table_base; + + uint64_t first_seg_start = 0, last_seg_end = 0, hib_text_end = 0; + for (size_t i = 0; i < NUM_HIBSEGINFO_SEGMENTS; i++) { + uint64_t size = ptoa_64(seg_info->segments[i].pageCount); + if (size) { + uint64_t seg_start = ptoa_64(seg_info->segments[i].physPage); + uint64_t seg_end = seg_start + size; + uint32_t protection = seg_info->segments[i].protection; + if (protection != VM_PROT_NONE) { + // make sure the segment is in bounds + HIB_ASSERT(seg_start >= phys_start); + HIB_ASSERT(seg_end <= phys_end); + + if (!first_seg_start) { + first_seg_start = seg_start; + } + if (last_seg_end) { + // map the "hole" as RW + map_range_start_end(&ctx, last_seg_end, seg_start, mem_slide, MAP_RW); + } + // map the segments described in machine_header at their original locations + bool executable = (protection & VM_PROT_EXECUTE); + bool writeable = (protection & VM_PROT_WRITE); + uint64_t map_flags = executable ? MAP_RX : writeable ? MAP_RW : MAP_RO; + map_range_start_end(&ctx, seg_start, seg_end, gHibTramp.kernelSlide, map_flags); + last_seg_end = seg_end; + } + if (seg_info->segments[i].physPage == header->restore1CodePhysPage) { + // this is the hibtext segment, so remember where it ends + hib_text_end = seg_end; + } + } + } + // map the rest of kernel memory (the pages that come before and after our segments) as RW + map_range_start_end(&ctx, phys_start, first_seg_start, mem_slide, MAP_RW); + map_range_start_end(&ctx, last_seg_end, phys_end, mem_slide, MAP_RW); + + // map all of the remaining banks that we didn't already deal with + iterate_bitmaps(&ctx, ^bool (const hibernate_bitmap_t *bank_bitmap) { + uint64_t bank_start = ptoa_64(bank_bitmap->first_page); + uint64_t bank_end = ptoa_64(bank_bitmap->last_page) + PAGE_SIZE; + if (bank_start == phys_start) { + // skip this bank since we already covered it above + } else { + // map the bank RW + map_range_start_end(&ctx, bank_start, bank_end, mem_slide, MAP_RW); + } + return true; + }); + + // map ttbr0 pages + ctx.page_table_base = allocate_page(&ctx); + gHibTramp.ttbr0 = ctx.page_table_base; + + // map hib text P=V so that we can still execute at its physical address + map_range_start_end(&ctx, hib_text_start, hib_text_end, 0, MAP_RX); + + // map the hib image P=V, RW + uint64_t image_start = trunc_page(header_phys); + uint64_t image_end = round_page(header_phys + header->image1Size); + map_range_start_end(&ctx, image_start, image_end, 0, MAP_RW); + + // map the handoff pages P=V, RO + image_start = ptoa_64(header->handoffPages); + image_end = image_start + ptoa_64(header->handoffPageCount); + map_range_start_end(&ctx, image_start, image_end, 0, MAP_RO); + + // map some device register pages + if (gHibernateGlobals.dockChannelRegBase) { +#define dockchannel_uart_base gHibernateGlobals.dockChannelRegBase + vm_address_t dockChannelRegBase = trunc_page(&rDOCKCHANNELS_DEV_WSTAT(DOCKCHANNEL_UART_CHANNEL)); + map_register_page(&ctx, dockChannelRegBase); + } + map_register_page(&ctx, gHibernateGlobals.hibUartRegBase); + map_register_page(&ctx, gHibernateGlobals.hmacRegBase); + + gHibTramp.memSlide = mem_slide; +} diff --git a/osfmk/arm64/instructions.h b/osfmk/arm64/instructions.h new file mode 100644 index 000000000..4e4326097 --- /dev/null +++ b/osfmk/arm64/instructions.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#ifndef _INSTRUCTIONS_H_ +#define _INSTRUCTIONS_H_ + +#define ARM64_INSTR_CAS_MASK (0x3fa07c00) +#define ARM64_INSTR_CAS_BITS (0x08a07c00) +#define ARM64_INSTR_IS_CAS(x) (((x) & ARM64_INSTR_CAS_MASK) == ARM64_INSTR_CAS_BITS) + +#define ARM64_INSTR_CAS_SZ_MASK 0x3 +#define ARM64_INSTR_CAS_SZ_SHIFT 30 +#define ARM64_INSTR_CAS_SZ_GET(x) (((x) >> ARM64_INSTR_CAS_SZ_SHIFT) & ARM64_INSTR_CAS_SZ_MASK) + +#define ARM64_INSTR_CAS_A_MASK 0x1 +#define ARM64_INSTR_CAS_A_SHIFT 22 +#define ARM64_INSTR_CAS_A_GET(x) (((x) >> ARM64_INSTR_CAS_A_SHIFT) & ARM64_INSTR_CAS_A_MASK) + +#define ARM64_INSTR_CAS_RS_MASK 0x1f +#define ARM64_INSTR_CAS_RS_SHIFT 16 +#define ARM64_INSTR_CAS_RS_GET(x) (((x) >> ARM64_INSTR_CAS_RS_SHIFT) & ARM64_INSTR_CAS_RS_MASK) + +#define ARM64_INSTR_CAS_R_MASK 0x1 +#define ARM64_INSTR_CAS_R_SHIFT 15 +#define ARM64_INSTR_CAS_R_GET(x) (((x) >> ARM64_INSTR_CAS_R_SHIFT) & ARM64_INSTR_CAS_R_MASK) + +#define ARM64_INSTR_CAS_RN_MASK 0x1f +#define ARM64_INSTR_CAS_RN_SHIFT 5 +#define ARM64_INSTR_CAS_RN_GET(x) (((x) >> ARM64_INSTR_CAS_RN_SHIFT) & ARM64_INSTR_CAS_RN_MASK) + +#define ARM64_INSTR_CAS_RT_MASK 0x1f +#define ARM64_INSTR_CAS_RT_SHIFT 0 +#define ARM64_INSTR_CAS_RT_GET(x) (((x) >> ARM64_INSTR_CAS_RT_SHIFT) & ARM64_INSTR_CAS_RT_MASK) + + + +#define ARM64_INSTR_CASP_MASK (0xbfa07c00) +#define ARM64_INSTR_CASP_BITS (0x08207c00) +#define ARM64_INSTR_IS_CASP(x) (((x) & ARM64_INSTR_CASP_MASK) == ARM64_INSTR_CASP_BITS) + +#define ARM64_INSTR_CASP_SZ_MASK 0x1 +#define ARM64_INSTR_CASP_SZ_SHIFT 30 +#define ARM64_INSTR_CASP_SZ_GET(x) (((x) >> ARM64_INSTR_CASP_SZ_SHIFT) & ARM64_INSTR_CASP_SZ_MASK) + +#define ARM64_INSTR_CASP_A_MASK 0x1 +#define ARM64_INSTR_CASP_A_SHIFT 22 +#define ARM64_INSTR_CASP_A_GET(x) (((x) >> ARM64_INSTR_CASP_A_SHIFT) & ARM64_INSTR_CASP_A_MASK) + +#define ARM64_INSTR_CASP_RS_MASK 0x1f +#define ARM64_INSTR_CASP_RS_SHIFT 16 +#define ARM64_INSTR_CASP_RS_GET(x) (((x) >> ARM64_INSTR_CASP_RS_SHIFT) & ARM64_INSTR_CASP_RS_MASK) + +#define ARM64_INSTR_CASP_R_MASK 0x1 +#define ARM64_INSTR_CASP_R_SHIFT 15 +#define ARM64_INSTR_CASP_R_GET(x) (((x) >> ARM64_INSTR_CASP_R_SHIFT) & ARM64_INSTR_CASP_R_MASK) + +#define ARM64_INSTR_CASP_RN_MASK 0x1f +#define ARM64_INSTR_CASP_RN_SHIFT 5 +#define ARM64_INSTR_CASP_RN_GET(x) (((x) >> ARM64_INSTR_CASP_RN_SHIFT) & ARM64_INSTR_CASP_RN_MASK) + +#define ARM64_INSTR_CASP_RT_MASK 0x1f +#define ARM64_INSTR_CASP_RT_SHIFT 0 +#define ARM64_INSTR_CASP_RT_GET(x) (((x) >> ARM64_INSTR_CASP_RT_SHIFT) & ARM64_INSTR_CASP_RT_MASK) + + + +#define ARM64_INSTR_ATOMIC_LDST_MASK (0x3f208c00) +#define ARM64_INSTR_ATOMIC_LDST_BITS (0x38200000) +#define ARM64_INSTR_IS_ATOMIC_LDST(x) (((x) & ARM64_INSTR_ATOMIC_LDST_MASK) == ARM64_INSTR_ATOMIC_LDST_BITS) + +#define ARM64_INSTR_ATOMIC_LDST_SZ_MASK 0x3 +#define ARM64_INSTR_ATOMIC_LDST_SZ_SHIFT 30 +#define ARM64_INSTR_ATOMIC_LDST_SZ_GET(x) (((x) >> ARM64_INSTR_ATOMIC_LDST_SZ_SHIFT) & ARM64_INSTR_ATOMIC_LDST_SZ_MASK) + +#define ARM64_INSTR_ATOMIC_LDST_A_MASK 0x1 +#define ARM64_INSTR_ATOMIC_LDST_A_SHIFT 23 +#define ARM64_INSTR_ATOMIC_LDST_A_GET(x) (((x) >> ARM64_INSTR_ATOMIC_LDST_A_SHIFT) & ARM64_INSTR_ATOMIC_LDST_A_MASK) + +#define ARM64_INSTR_ATOMIC_LDST_R_MASK 0x1 +#define ARM64_INSTR_ATOMIC_LDST_R_SHIFT 22 +#define ARM64_INSTR_ATOMIC_LDST_R_GET(x) (((x) >> ARM64_INSTR_ATOMIC_LDST_R_SHIFT) & ARM64_INSTR_ATOMIC_LDST_R_MASK) + +#define ARM64_INSTR_ATOMIC_LDST_RS_MASK 0x1f +#define ARM64_INSTR_ATOMIC_LDST_RS_SHIFT 16 +#define ARM64_INSTR_ATOMIC_LDST_RS_GET(x) (((x) >> ARM64_INSTR_ATOMIC_LDST_RS_SHIFT) & ARM64_INSTR_ATOMIC_LDST_RS_MASK) + +#define ARM64_INSTR_ATOMIC_LDST_OPC_ADD 0 +#define ARM64_INSTR_ATOMIC_LDST_OPC_BIC 1 +#define ARM64_INSTR_ATOMIC_LDST_OPC_EOR 2 +#define ARM64_INSTR_ATOMIC_LDST_OPC_ORR 3 +#define ARM64_INSTR_ATOMIC_LDST_OPC_SMAX 4 +#define ARM64_INSTR_ATOMIC_LDST_OPC_SMIN 5 +#define ARM64_INSTR_ATOMIC_LDST_OPC_UMAX 6 +#define ARM64_INSTR_ATOMIC_LDST_OPC_UMIN 7 + +#define ARM64_INSTR_ATOMIC_LDST_OPC_MASK 0x7 +#define ARM64_INSTR_ATOMIC_LDST_OPC_SHIFT 12 +#define ARM64_INSTR_ATOMIC_LDST_OPC_GET(x) (((x) >> ARM64_INSTR_ATOMIC_LDST_OPC_SHIFT) & ARM64_INSTR_ATOMIC_LDST_OPC_MASK) + +#define ARM64_INSTR_ATOMIC_LDST_RN_MASK 0x1f +#define ARM64_INSTR_ATOMIC_LDST_RN_SHIFT 5 +#define ARM64_INSTR_ATOMIC_LDST_RN_GET(x) (((x) >> ARM64_INSTR_ATOMIC_LDST_RN_SHIFT) & ARM64_INSTR_ATOMIC_LDST_RN_MASK) + +#define ARM64_INSTR_ATOMIC_LDST_RT_MASK 0x1f +#define ARM64_INSTR_ATOMIC_LDST_RT_SHIFT 0 +#define ARM64_INSTR_ATOMIC_LDST_RT_GET(x) (((x) >> ARM64_INSTR_ATOMIC_LDST_RT_SHIFT) & ARM64_INSTR_ATOMIC_LDST_RT_MASK) + + + +#define ARM64_INSTR_SWP_MASK (0x3f208c00) +#define ARM64_INSTR_SWP_BITS (0x38208000) +#define ARM64_INSTR_IS_SWP(x) (((x) & ARM64_INSTR_SWP_MASK) == ARM64_INSTR_SWP_BITS) + +#define ARM64_INSTR_SWP_SZ_MASK 0x3 +#define ARM64_INSTR_SWP_SZ_SHIFT 30 +#define ARM64_INSTR_SWP_SZ_GET(x) (((x) >> ARM64_INSTR_SWP_SZ_SHIFT) & ARM64_INSTR_SWP_SZ_MASK) + +#define ARM64_INSTR_SWP_A_MASK 0x1 +#define ARM64_INSTR_SWP_A_SHIFT 23 +#define ARM64_INSTR_SWP_A_GET(x) (((x) >> ARM64_INSTR_SWP_A_SHIFT) & ARM64_INSTR_SWP_A_MASK) + +#define ARM64_INSTR_SWP_R_MASK 0x1 +#define ARM64_INSTR_SWP_R_SHIFT 22 +#define ARM64_INSTR_SWP_R_GET(x) (((x) >> ARM64_INSTR_SWP_R_SHIFT) & ARM64_INSTR_SWP_R_MASK) + +#define ARM64_INSTR_SWP_RS_MASK 0x1f +#define ARM64_INSTR_SWP_RS_SHIFT 16 +#define ARM64_INSTR_SWP_RS_GET(x) (((x) >> ARM64_INSTR_SWP_RS_SHIFT) & ARM64_INSTR_SWP_RS_MASK) + +#define ARM64_INSTR_SWP_OPC_MASK 0x7 +#define ARM64_INSTR_SWP_OPC_SHIFT 12 +#define ARM64_INSTR_SWP_OPC_GET(x) (((x) >> ARM64_INSTR_SWP_OPC_SHIFT) & ARM64_INSTR_SWP_OPC_MASK) + +#define ARM64_INSTR_SWP_RN_MASK 0x1f +#define ARM64_INSTR_SWP_RN_SHIFT 5 +#define ARM64_INSTR_SWP_RN_GET(x) (((x) >> ARM64_INSTR_SWP_RN_SHIFT) & ARM64_INSTR_SWP_RN_MASK) + +#define ARM64_INSTR_SWP_RT_MASK 0x1f +#define ARM64_INSTR_SWP_RT_SHIFT 0 +#define ARM64_INSTR_SWP_RT_GET(x) (((x) >> ARM64_INSTR_SWP_RT_SHIFT) & ARM64_INSTR_SWP_RT_MASK) + +#endif /* _INSTRUCTIONS_H_ */ diff --git a/osfmk/arm64/kpc.c b/osfmk/arm64/kpc.c index f19b8696b..973723ffc 100644 --- a/osfmk/arm64/kpc.c +++ b/osfmk/arm64/kpc.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include #include @@ -92,7 +92,7 @@ void kpc_pmi_handler(unsigned int ctr); /* force the CPMU clocks in case of a clocking bug */ #define PMCR0_CLKEN_SHIFT (31) -#define PMCR0_CLKEN_ENABLE_MASK (UINT64_C(1) << PMCR0_USEREN_SHIFT) +#define PMCR0_CLKEN_ENABLE_MASK (UINT64_C(1) << PMCR0_CLKEN_SHIFT) #define PMCR0_CLKEN_DISABLE_MASK (~PMCR0_CLKEN_ENABLE_MASK) /* 32 - 44 mirror the low bits for PMCs 8 and 9 */ @@ -775,7 +775,7 @@ kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) /* grab counters and CPU number as close as possible */ if (curcpu) { - *curcpu = current_processor()->cpu_id; + *curcpu = cpu_number(); } struct kpc_get_counters_remote hdl = { @@ -980,7 +980,37 @@ kpc_pmi_handler(unsigned int ctr) FIXED_SHADOW(ctr) += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra; if (FIXED_ACTIONID(ctr)) { - kpc_sample_kperf(FIXED_ACTIONID(ctr)); + uintptr_t pc = 0; + bool kernel = true; + struct arm_saved_state *state; + state = getCpuDatap()->cpu_int_state; + if (state) { + kernel = !PSR64_IS_USER(get_saved_state_cpsr(state)); + pc = get_saved_state_pc(state); + if (kernel) { + pc = VM_KERNEL_UNSLIDE(pc); + } + } + + uint64_t config = get_counter_config(ctr); + kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0; + bool custom_mode = false; + if ((config & CFGWORD_EL0A32EN_MASK) || (config & CFGWORD_EL0A64EN_MASK)) { + flags |= KPC_USER_COUNTING; + custom_mode = true; + } + if ((config & CFGWORD_EL1EN_MASK)) { + flags |= KPC_KERNEL_COUNTING; + custom_mode = true; + } + /* + * For backwards-compatibility. + */ + if (!custom_mode) { + flags |= KPC_USER_COUNTING | KPC_KERNEL_COUNTING; + } + kpc_sample_kperf(FIXED_ACTIONID(ctr), ctr, config & 0xff, FIXED_SHADOW(ctr), + pc, flags); } } diff --git a/osfmk/arm64/locore.s b/osfmk/arm64/locore.s index 75b314e83..b18a335e8 100644 --- a/osfmk/arm64/locore.s +++ b/osfmk/arm64/locore.s @@ -27,6 +27,7 @@ */ #include +#include #include #include #include @@ -35,6 +36,8 @@ #include #include "assym.s" #include +#include +#include "dwarf_unwind.h" #if __ARM_KERNEL_PROTECT__ #include @@ -183,7 +186,7 @@ /* Save the context that was interrupted. */ ldp x2, x3, [x3, SS64_X2] - SPILL_REGISTERS KERNEL_MODE + SPILL_REGISTERS PPL_MODE /* * Stash the function we wish to be invoked to deal with the exception; @@ -303,8 +306,7 @@ * save to kernel stack). * * Expects: - * {x0, x1, sp} - saved - * x0 - SP_EL0 + * {x0, x1} - saved * x1 - Exception syndrome * sp - Saved state * @@ -312,7 +314,7 @@ * */ .macro CHECK_KERNEL_STACK unused - stp x2, x3, [sp, SS64_X2] // Save {x2-x3} + stp x2, x3, [sp, #-16]! // Save {x2-x3} and x1, x1, #ESR_EC_MASK // Mask the exception class mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT) cmp x1, x2 // If we have a stack alignment exception @@ -320,6 +322,7 @@ mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT) cmp x1, x2 // If we have a data abort, we need to b.ne Lvalid_stack_\@ // ...validate the stack pointer + mrs x0, SP_EL0 // Get SP_EL0 mrs x1, TPIDR_EL1 // Get thread pointer Ltest_kstack_\@: ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack @@ -337,19 +340,25 @@ Ltest_istack_\@: cmp x0, x3 // if (SP_EL0 > istack bottom) b.gt Lvalid_stack_\@ // stack pointer valid Lcorrupt_stack_\@: + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 + sub sp, sp, ARM_CONTEXT_SIZE // Allocate exception frame + stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the exception frame + stp x2, x3, [sp, SS64_X2] // Save x2, x3 to the exception frame + mrs x0, SP_EL0 // Get SP_EL0 + str x0, [sp, SS64_SP] // Save sp to the exception frame INIT_SAVED_STATE_FLAVORS sp, w0, w1 mov x0, sp // Copy exception frame pointer to x0 adrp x1, fleh_invalid_stack@page // Load address for fleh add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there - ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3} b fleh_dispatch64 Lvalid_stack_\@: - ldp x2, x3, [sp, SS64_X2] // Restore {x2-x3} + ldp x2, x3, [sp], #16 // Restore {x2-x3} .endmacro #if __ARM_KERNEL_PROTECT__ - .text + .section __DATA_CONST,__const .align 3 .globl EXT(exc_vectors_table) LEXT(exc_vectors_table) @@ -453,6 +462,8 @@ Lel0_serror_vector_64: * END OF EXCEPTION VECTORS PAGE * *********************************/ + + .macro EL1_SP0_VECTOR msr SPSel, #0 // Switch to SP0 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame @@ -472,20 +483,16 @@ el1_sp0_synchronous_vector_long: b el1_sp0_synchronous_vector_ppl_check Lel1_sp0_synchronous_vector_kernel: #endif - sub sp, sp, ARM_CONTEXT_SIZE // Make space on the exception stack - stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the stack + stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack mrs x1, ESR_EL1 // Get the exception syndrome /* If the stack pointer is corrupt, it will manifest either as a data abort * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check * these quickly by testing bit 5 of the exception class. */ tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid - mrs x0, SP_EL0 // Get SP_EL0 - str x0, [sp, SS64_SP] // Save sp to the stack CHECK_KERNEL_STACK Lkernel_stack_valid: - ldp x0, x1, [sp, SS64_X0] // Restore x0, x1 - add sp, sp, ARM_CONTEXT_SIZE // Restore SP1 + ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack EL1_SP0_VECTOR adrp x1, EXT(fleh_synchronous)@page // Load address for fleh add x1, x1, EXT(fleh_synchronous)@pageoff @@ -497,10 +504,7 @@ el1_sp0_irq_vector_long: Lel1_sp0_irq_vector_kernel: #endif EL1_SP0_VECTOR - mrs x1, TPIDR_EL1 - ldr x1, [x1, ACT_CPUDATAP] - ldr x1, [x1, CPU_ISTACKPTR] - mov sp, x1 + SWITCH_TO_INT_STACK adrp x1, EXT(fleh_irq)@page // Load address for fleh add x1, x1, EXT(fleh_irq)@pageoff b fleh_dispatch64 @@ -512,10 +516,7 @@ el1_sp0_fiq_vector_long: Lel1_sp0_fiq_vector_kernel: #endif EL1_SP0_VECTOR - mrs x1, TPIDR_EL1 - ldr x1, [x1, ACT_CPUDATAP] - ldr x1, [x1, CPU_ISTACKPTR] - mov sp, x1 + SWITCH_TO_INT_STACK adrp x1, EXT(fleh_fiq)@page // Load address for fleh add x1, x1, EXT(fleh_fiq)@pageoff b fleh_dispatch64 @@ -579,20 +580,14 @@ el1_sp1_serror_vector_long: #endif .macro EL0_64_VECTOR - mov x18, #0 // Zero x18 to avoid leaking data to user SS stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack +#if __ARM_KERNEL_PROTECT__ + mov x18, #0 // Zero x18 to avoid leaking data to user SS +#endif #if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) // enable JOP for kernel - adrp x0, EXT(const_boot_args)@page - add x0, x0, EXT(const_boot_args)@pageoff - ldr x0, [x0, BA_BOOT_FLAGS] - and x0, x0, BA_BOOT_FLAGS_DISABLE_JOP - cbnz x0, 1f - // if disable jop is set, don't touch SCTLR (it's already off) - // if (!boot_args->kernel_jop_disable) { mrs x0, SCTLR_EL1 tbnz x0, SCTLR_PACIA_ENABLED_SHIFT, 1f - // turn on jop for kernel if it isn't already on // if (!jop_running) { MOV64 x1, SCTLR_JOP_KEYS_ENABLED orr x0, x0, x1 @@ -602,7 +597,6 @@ el1_sp1_serror_vector_long: cmp x0, x1 bne . // } - // } 1: #endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */ mrs x0, TPIDR_EL1 // Load the thread register @@ -614,44 +608,38 @@ el1_sp1_serror_vector_long: ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack msr SPSel, #0 // Switch to SP0 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB + mrs x1, TPIDR_EL1 // Load the thread register + + mov x0, sp // Copy the user PCB pointer to x0 + // x1 contains thread register .endmacro el0_synchronous_vector_64_long: - EL0_64_VECTOR - mrs x1, TPIDR_EL1 // Load the thread register - ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1 - mov sp, x1 // Set the stack pointer to the kernel stack + EL0_64_VECTOR sync + SWITCH_TO_KERN_STACK adrp x1, EXT(fleh_synchronous)@page // Load address for fleh add x1, x1, EXT(fleh_synchronous)@pageoff b fleh_dispatch64 el0_irq_vector_64_long: - EL0_64_VECTOR - mrs x1, TPIDR_EL1 - ldr x1, [x1, ACT_CPUDATAP] - ldr x1, [x1, CPU_ISTACKPTR] - mov sp, x1 // Set the stack pointer to the kernel stack + EL0_64_VECTOR irq + SWITCH_TO_INT_STACK adrp x1, EXT(fleh_irq)@page // load address for fleh add x1, x1, EXT(fleh_irq)@pageoff b fleh_dispatch64 el0_fiq_vector_64_long: - EL0_64_VECTOR - mrs x1, TPIDR_EL1 - ldr x1, [x1, ACT_CPUDATAP] - ldr x1, [x1, CPU_ISTACKPTR] - mov sp, x1 // Set the stack pointer to the kernel stack + EL0_64_VECTOR fiq + SWITCH_TO_INT_STACK adrp x1, EXT(fleh_fiq)@page // load address for fleh add x1, x1, EXT(fleh_fiq)@pageoff b fleh_dispatch64 el0_serror_vector_64_long: - EL0_64_VECTOR - mrs x1, TPIDR_EL1 // Load the thread register - ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1 - mov sp, x1 // Set the stack pointer to the kernel stack + EL0_64_VECTOR serror + SWITCH_TO_KERN_STACK adrp x1, EXT(fleh_serror)@page // load address for fleh add x1, x1, EXT(fleh_serror)@pageoff b fleh_dispatch64 @@ -744,6 +732,8 @@ fleh_dispatch64: cmp x23, #(PSR64_MODE_EL0) bne 1f + SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS +2: mov x2, #0 mov x3, #0 mov x4, #0 @@ -784,7 +774,7 @@ fleh_dispatch64: mov x26, #0 #endif -#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME +#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from b.ne 1f // kernel mode, so skip precise time update PUSH_FRAME @@ -792,7 +782,7 @@ fleh_dispatch64: POP_FRAME mov x0, x21 // Reload arm_context_t pointer 1: -#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ +#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */ /* Dispatch to FLEH */ @@ -803,6 +793,10 @@ fleh_dispatch64: .align 2 .global EXT(fleh_synchronous) LEXT(fleh_synchronous) + +UNWIND_PROLOGUE +UNWIND_DIRECTIVES + mrs x1, ESR_EL1 // Load exception syndrome mrs x2, FAR_EL1 // Load fault address @@ -827,12 +821,14 @@ Lvalid_link_register: CHECK_EXCEPTION_RETURN_DISPATCH_PPL #endif + mov x28, xzr // Don't need to check PFZ if there are ASTs b exception_return_dispatch Lfleh_sync_load_lr: ldr lr, [x0, SS64_LR] b Lvalid_link_register - +UNWIND_EPILOGUE + /* Shared prologue code for fleh_irq and fleh_fiq. * Does any interrupt booking we may want to do * before invoking the handler proper. @@ -901,6 +897,7 @@ LEXT(fleh_irq) CHECK_EXCEPTION_RETURN_DISPATCH_PPL #endif + mov x28, #1 // Set a bit to check PFZ if there are ASTs b exception_return_dispatch .text @@ -923,6 +920,7 @@ LEXT(fleh_fiq) CHECK_EXCEPTION_RETURN_DISPATCH_PPL #endif + mov x28, #1 // Set a bit to check PFZ if there are ASTs b exception_return_dispatch .text @@ -940,6 +938,7 @@ LEXT(fleh_serror) CHECK_EXCEPTION_RETURN_DISPATCH_PPL #endif + mov x28, xzr // Don't need to check PFZ If there are ASTs b exception_return_dispatch /* @@ -999,12 +998,13 @@ Lsp1_serror_str: exception_return_dispatch: ldr w0, [x21, SS64_CPSR] tst w0, PSR64_MODE_EL_MASK - b.ne return_to_kernel // return to kernel if M[3:2] > 0 + b.ne EXT(return_to_kernel) // return to kernel if M[3:2] > 0 b return_to_user .text .align 2 -return_to_kernel: + .global EXT(return_to_kernel) +LEXT(return_to_kernel) tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled mrs x3, TPIDR_EL1 // Load thread pointer ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count @@ -1026,51 +1026,82 @@ LEXT(thread_bootstrap_return) #if CONFIG_DTRACE bl EXT(dtrace_thread_bootstrap) #endif - b EXT(thread_exception_return) + b EXT(arm64_thread_exception_return) .text - .globl EXT(thread_exception_return) -LEXT(thread_exception_return) + .globl EXT(arm64_thread_exception_return) +LEXT(arm64_thread_exception_return) mrs x0, TPIDR_EL1 add x21, x0, ACT_CONTEXT ldr x21, [x21] + mov x28, xzr // - // Fall Through to return_to_user from thread_exception_return. + // Fall Through to return_to_user from arm64_thread_exception_return. // Note that if we move return_to_user or insert a new routine - // below thread_exception_return, the latter will need to change. + // below arm64_thread_exception_return, the latter will need to change. // .text +/* x21 is always the machine context pointer when we get here + * x28 is a bit indicating whether or not we should check if pc is in pfz */ return_to_user: check_user_asts: - mrs x3, TPIDR_EL1 // Load thread pointer + mrs x3, TPIDR_EL1 // Load thread pointer movn w2, #0 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user #if MACH_ASSERT ldr w0, [x3, TH_RWLOCK_CNT] - cbz w0, 1f // Detect unbalance RW lock/unlock - b rwlock_count_notzero -1: + cbnz w0, rwlock_count_notzero // Detect unbalanced RW lock/unlock + ldr w0, [x3, ACT_PREEMPT_CNT] - cbz w0, 1f - b preempt_count_notzero -1: + cbnz w0, preempt_count_notzero // Detect unbalanced enable/disable preemption #endif - + ldr w0, [x3, TH_TMP_ALLOC_CNT] + cbnz w0, tmp_alloc_count_nozero // Detect KHEAP_TEMP leaks + msr DAIFSet, #DAIFSC_ALL // Disable exceptions ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer ldr x0, [x4, CPU_PENDING_AST] // Get ASTs - cbnz x0, user_take_ast // If pending ASTs, go service them - -#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME + cbz x0, no_asts // If no asts, skip ahead + + cbz x28, user_take_ast // If we don't need to check PFZ, just handle asts + + /* At this point, we have ASTs and we need to check whether we are running in the + * preemption free zone (PFZ) or not. No ASTs are handled if we are running in + * the PFZ since we don't want to handle getting a signal or getting suspended + * while holding a spinlock in userspace. + * + * If userspace was in the PFZ, we know (via coordination with the PFZ code + * in commpage_asm.s) that it will not be using x15 and it is therefore safe + * to use it to indicate to userspace to come back to take a delayed + * preemption, at which point the ASTs will be handled. */ + mov x28, xzr // Clear the "check PFZ" bit so that we don't do this again + mov x19, x0 // Save x0 since it will be clobbered by commpage_is_in_pfz64 + + ldr x0, [x21, SS64_PC] // Load pc from machine state + bl EXT(commpage_is_in_pfz64) // pc in pfz? + cbz x0, restore_and_check_ast // No, deal with other asts + + mov x0, #1 + str x0, [x21, SS64_X15] // Mark x15 for userspace to take delayed preemption + mov x0, x19 // restore x0 to asts + b no_asts // pretend we have no asts + +restore_and_check_ast: + mov x0, x19 // restore x0 + b user_take_ast // Service pending asts +no_asts: + + +#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT mov x19, x3 // Preserve thread pointer across function call PUSH_FRAME bl EXT(timer_state_event_kernel_to_user) POP_FRAME mov x3, x19 -#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ +#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */ #if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT) /* Watchtower @@ -1101,8 +1132,33 @@ check_user_asts: ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context ldr x0, [x3, ACT_DEBUGDATA] - orr x1, x1, x0 // Thread debug state and live debug state both NULL? - cbnz x1, user_set_debug_state_and_return // If one or the other non-null, go set debug state + cmp x0, x1 + beq L_skip_user_set_debug_state // If active CPU debug state does not match thread debug state, apply thread state + +#if defined(APPLELIGHTNING) +/* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */ + + ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing + cbz x12, 1f + +#endif + +#if defined(APPLELIGHTNING) + + mrs x12, ARM64_REG_HID1 // if any debug session ever existed, set forceNexL3ClkOn + orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn + msr ARM64_REG_HID1, x12 +1: + +#endif + + PUSH_FRAME + bl EXT(arm_debug_set) // Establish thread debug state in live regs + POP_FRAME + mrs x3, TPIDR_EL1 // Reload thread pointer +L_skip_user_set_debug_state: + + b exception_return_unint_tpidr_x3 // @@ -1118,8 +1174,8 @@ exception_return_unint: exception_return_unint_tpidr_x3: mov sp, x21 // Reload the pcb pointer - /* ARM64_TODO Reserve x18 until we decide what to do with it */ - str xzr, [sp, SS64_X18] +exception_return_unint_tpidr_x3_dont_trash_x18: + #if __ARM_KERNEL_PROTECT__ /* @@ -1153,22 +1209,45 @@ Lexception_return_restore_registers: msr ELR_EL1, x1 // Load the return address into ELR msr SPSR_EL1, x2 // Load the return CPSR into SPSR msr FPSR, x3 - msr FPCR, x4 // Synchronized by ERET + mrs x5, FPCR + CMSR FPCR, x5, x4, 1 +1: -#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) - /* if eret to userspace, disable JOP */ - tbnz w2, PSR64_MODE_EL_SHIFT, Lskip_disable_jop +#if defined(HAS_APPLE_PAC) + // if (eret to userspace) { + and x2, x2, #(PSR64_MODE_EL_MASK) + cmp x2, #(PSR64_MODE_EL0) + bne Ldone_reconfigure_jop + // thread_t thread = current_thread(); + // bool disable_jop; + // if (arm_user_jop_disabled()) { + // /* if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on) */ + // disable_jop = true; + // } else { + // disable_jop = thread->machine.disable_user_jop; + // } +#if DEVELOPMENT || DEBUG adrp x4, EXT(const_boot_args)@page add x4, x4, EXT(const_boot_args)@pageoff ldr x4, [x4, BA_BOOT_FLAGS] - and x1, x4, BA_BOOT_FLAGS_DISABLE_JOP - cbnz x1, Lskip_disable_jop // if global JOP disabled, don't touch SCTLR (kernel JOP is already off) and x1, x4, BA_BOOT_FLAGS_DISABLE_USER_JOP - cbnz x1, Ldisable_jop // if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on) + cbnz x1, Ldisable_jop +#endif mrs x2, TPIDR_EL1 - ldr w2, [x2, TH_DISABLE_USER_JOP] - cbz w2, Lskip_disable_jop // if thread has JOP enabled, leave it on (kernel running with JOP on) + ldrb w1, [x2, TH_DISABLE_USER_JOP] + cbz w1, Lenable_jop + // if (disable_jop) { + // if (cpu does not have discrete JOP-at-EL1 bit) { + // disable_sctlr_jop_keys(); + // } + // } else { + // if (cpu does not have fast A-key switching) { + // reprogram_jop_keys(thread->machine.jop_pid); + // } + // } + // } Ldisable_jop: +#if !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) MOV64 x1, SCTLR_JOP_KEYS_ENABLED mrs x4, SCTLR_EL1 bic x4, x4, x1 @@ -1176,8 +1255,17 @@ Ldisable_jop: MOV64 x1, SCTLR_EL1_EXPECTED cmp x4, x1 bne . -Lskip_disable_jop: -#endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)*/ +#endif /* !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */ + b Ldone_reconfigure_jop +Lenable_jop: +#if HAS_PAC_SLOW_A_KEY_SWITCHING + IF_PAC_FAST_A_KEY_SWITCHING Ldone_reconfigure_jop, x1 + ldr x1, [x2, TH_JOP_PID] + ldr x2, [x2, ACT_CPUDATAP] + REPROGRAM_JOP_KEYS Ldone_reconfigure_jop, x1, x2, x3 +#endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */ +Ldone_reconfigure_jop: +#endif /* defined(HAS_APPLE_PAC) */ /* Restore arm_neon_saved_state64 */ ldp q0, q1, [x0, NS64_Q0] @@ -1261,29 +1349,6 @@ user_take_ast: POP_FRAME b check_user_asts // Now try again -user_set_debug_state_and_return: - -#if defined(APPLELIGHTNING) -/* rdar://53177964 ([Cebu Errata SW WA][v8Debug] MDR NEX L3 clock turns OFF during restoreCheckpoint due to SWStep getting masked) */ - - ARM64_IS_PCORE x12 // if we're not a pCORE, also do nothing - cbz x12, 1f - - mrs x12, ARM64_REG_HID1 // if any debug session ever existed, set forceNexL3ClkOn - orr x12, x12, ARM64_REG_HID1_forceNexL3ClkOn - msr ARM64_REG_HID1, x12 -1: - -#endif - - ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer - isb // Synchronize context - PUSH_FRAME - bl EXT(arm_debug_set) // Establish thread debug state in live regs - POP_FRAME - isb - b exception_return_unint // Continue, reloading the thread pointer - .text .align 2 preempt_underflow: @@ -1304,7 +1369,7 @@ rwlock_count_notzero: str x0, [sp, #-16]! // We'll print thread pointer ldr w0, [x0, TH_RWLOCK_CNT] str w0, [sp, #8] - adr x0, L_rwlock_count_notzero_str // Format string + adr x0, L_rwlock_count_notzero_str // Format string CALL_EXTERN panic // Game over L_rwlock_count_notzero_str: @@ -1317,14 +1382,18 @@ preempt_count_notzero: str x0, [sp, #-16]! // We'll print thread pointer ldr w0, [x0, ACT_PREEMPT_CNT] str w0, [sp, #8] - adr x0, L_preempt_count_notzero_str // Format string + adr x0, L_preempt_count_notzero_str // Format string CALL_EXTERN panic // Game over L_preempt_count_notzero_str: .asciz "preemption count not 0 on thread %p (%u)" #endif /* MACH_ASSERT */ -.align 2 + .text + .align 2 +tmp_alloc_count_nozero: + mrs x0, TPIDR_EL1 + CALL_EXTERN kheap_temp_leak_panic #if __ARM_KERNEL_PROTECT__ /* @@ -1420,17 +1489,11 @@ Lcorrupt_ppl_stack: b fleh_invalid_stack fleh_fiq_from_ppl: - mrs x1, TPIDR_EL1 - ldr x1, [x1, ACT_CPUDATAP] - ldr x1, [x1, CPU_ISTACKPTR] - mov sp, x1 + SWITCH_TO_INT_STACK b EXT(fleh_fiq) fleh_irq_from_ppl: - mrs x1, TPIDR_EL1 - ldr x1, [x1, ACT_CPUDATAP] - ldr x1, [x1, CPU_ISTACKPTR] - mov sp, x1 + SWITCH_TO_INT_STACK b EXT(fleh_irq) fleh_serror_from_ppl: @@ -1439,46 +1502,6 @@ fleh_serror_from_ppl: mov sp, x6 b EXT(fleh_serror) -/* - * REENABLE_DAIF - * - * Restores the DAIF bits to their original state (well, the AIF bits at least). - * arg0 - DAIF bits (read from the DAIF interface) to restore - */ -.macro REENABLE_DAIF - /* AIF enable. */ - tst $0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF) - b.eq 3f - - /* IF enable. */ - tst $0, #(DAIF_IRQF | DAIF_FIQF) - b.eq 2f - - /* A enable. */ - tst $0, #(DAIF_ASYNCF) - b.eq 1f - - /* Enable nothing. */ - b 4f - - /* A enable. */ -1: - msr DAIFClr, #(DAIFSC_ASYNCF) - b 4f - - /* IF enable. */ -2: - msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF) - b 4f - - /* AIF enable. */ -3: - msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) - - /* Done! */ -4: -.endmacro - #if XNU_MONITOR && __APRR_SUPPORTED__ /* @@ -1566,7 +1589,6 @@ LEXT(ppl_trampoline_start) cmp x15, PMAP_COUNT b.hs Lppl_fail_dispatch - /* Get the PPL CPU data structure. */ GET_PMAP_CPU_DATA x12, x13, x14 /* Mark this CPU as being in the PPL. */ @@ -1610,11 +1632,6 @@ Lppl_mark_cpu_as_dispatching: mov w13, #PPL_STATE_DISPATCH str w13, [x12, PMAP_CPU_DATA_PPL_STATE] - /* Get the handler for the request */ - adrp x9, EXT(ppl_handler_table)@page - add x9, x9, EXT(ppl_handler_table)@pageoff - ldr x10, [x9, x15, lsl #3] - /* Switch to the regular PPL stack. */ // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK] @@ -1624,10 +1641,15 @@ Lppl_mark_cpu_as_dispatching: // SP0 is now PPL stack mov sp, x9 - /* Save the old stack pointer off in case we need it. */ str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP] + /* Get the handler for the request */ + adrp x9, EXT(ppl_handler_table)@page + add x9, x9, EXT(ppl_handler_table)@pageoff + add x9, x9, x15, lsl #3 + ldr x10, [x9] + /* Branch to the code that will invoke the PPL request. */ b EXT(ppl_dispatch) @@ -1802,7 +1824,7 @@ LEXT(ppl_dispatch) /* Invoke the PPL method. */ #ifdef HAS_APPLE_PAC - blraaz x10 + blraa x10, x9 #else blr x10 #endif @@ -1827,11 +1849,12 @@ LEXT(ppl_bootstrap_dispatch) /* Get the requested PPL routine. */ adrp x9, EXT(ppl_handler_table)@page add x9, x9, EXT(ppl_handler_table)@pageoff - ldr x10, [x9, x15, lsl #3] + add x9, x9, x15, lsl #3 + ldr x10, [x9] /* Invoke the requested PPL routine. */ #ifdef HAS_APPLE_PAC - blraaz x10 + blraa x10, x9 #else blr x10 #endif @@ -1865,39 +1888,37 @@ Lppl_fail_bootstrap_dispatch: .align 2 .globl EXT(ml_panic_trap_to_debugger) LEXT(ml_panic_trap_to_debugger) -#if 0 - // TODO: why would we ever want to turn interrupts back on after going down panic path? - /* Grab the current AIF state, and disable AIF. */ mrs x10, DAIF -#endif msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) - // we want interrupts to stay masked after exiting PPL when calling into panic to halt system - // x10 is used in ppl_return_to_kernel_mode restore desired DAIF state after GEXIT - mrs x10, DAIF + adrp x12, EXT(pmap_ppl_locked_down)@page + ldr w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff] + cbz w12, Lnot_in_ppl_dispatch + + LOAD_PMAP_CPU_DATA x11, x12, x13 + + ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE] + cmp w12, #PPL_STATE_DISPATCH + b.ne Lnot_in_ppl_dispatch /* Indicate (for the PPL->kernel transition) that we are panicking. */ mov x15, #PPL_EXIT_PANIC_CALL - /* Get the PPL per-CPU data. */ - GET_PMAP_CPU_DATA x11, x12, x13 - /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */ ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP] mov sp, x12 - /* - * Mark this CPU as being in the PPL. Halt and catch fire if our state - * machine appears to be broken. - */ - ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE] - cmp w12, #PPL_STATE_DISPATCH - b.ne . + // we want interrupts to stay masked after exiting PPL when calling into panic to halt system + // x10 is used in ppl_return_to_kernel_mode restore desired DAIF state after GEXIT + mrs x10, DAIF mov w13, #PPL_STATE_PANIC str w13, [x11, PMAP_CPU_DATA_PPL_STATE] /* Now we are ready to exit the PPL. */ b ppl_return_to_kernel_mode +Lnot_in_ppl_dispatch: + REENABLE_DAIF x10 + ret .data Lppl_bad_call_panic_str: diff --git a/osfmk/arm64/loose_ends.c b/osfmk/arm64/loose_ends.c index 8b7ea91f4..8479f24bf 100644 --- a/osfmk/arm64/loose_ends.c +++ b/osfmk/arm64/loose_ends.c @@ -50,6 +50,7 @@ #include #include +#include #define INT_SIZE (BYTE_SIZE * sizeof (int)) @@ -72,8 +73,12 @@ bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags) addr64_t end __assert_only; kern_return_t res = KERN_SUCCESS; - assert(!__improbable(os_add_overflow(src, bytes, &end))); - assert(!__improbable(os_add_overflow(dst, bytes, &end))); + if (!BCOPY_PHYS_SRC_IS_USER(flags)) { + assert(!__improbable(os_add_overflow(src, bytes, &end))); + } + if (!BCOPY_PHYS_DST_IS_USER(flags)) { + assert(!__improbable(os_add_overflow(dst, bytes, &end))); + } while ((bytes > 0) && (res == KERN_SUCCESS)) { src_offset = src & PAGE_MASK; @@ -650,6 +655,7 @@ bcmp( } #undef memcmp +MARK_AS_HIBERNATE_TEXT int memcmp(const void *s1, const void *s2, size_t n) { @@ -800,3 +806,36 @@ kdp_register_callout(kdp_callout_fn_t fn, void *arg) #pragma unused(fn,arg) } #endif + +/* + * Get a quick virtual mapping of a physical page and run a callback on that + * page's virtual address. + * + * @param dst64 Physical address to access (doesn't need to be page-aligned). + * @param bytes Number of bytes to be accessed. This cannot cross page boundaries. + * @param func Callback function to call with the page's virtual address. + * @param arg Argument passed directly to `func`. + * + * @return The return value from `func`. + */ +int +apply_func_phys( + addr64_t dst64, + vm_size_t bytes, + int (*func)(void * buffer, vm_size_t bytes, void * arg), + void * arg) +{ + /* The physical aperture is only guaranteed to work with kernel-managed addresses. */ + if (!pmap_valid_address(dst64)) { + panic("%s address error: passed in address (%#llx) not a kernel managed address", + __FUNCTION__, dst64); + } + + /* Ensure we stay within a single page */ + if (((((uint32_t)dst64 & (ARM_PGBYTES - 1)) + bytes) > ARM_PGBYTES)) { + panic("%s alignment error: tried accessing addresses spanning more than one page %#llx %#lx", + __FUNCTION__, dst64, bytes); + } + + return func((void*)phystokv(dst64), bytes, arg); +} diff --git a/osfmk/arm64/lowglobals.h b/osfmk/arm64/lowglobals.h index 12aab8548..5fa0ea944 100644 --- a/osfmk/arm64/lowglobals.h +++ b/osfmk/arm64/lowglobals.h @@ -47,34 +47,40 @@ * reflected there as well. */ -#pragma pack(8) /* Make sure the structure stays as we defined it */ +#pragma pack(8) /* Make sure the structure stays as we defined it */ typedef struct lowglo { - unsigned char lgVerCode[8]; /* 0xffffff8000002000 System verification code */ - uint64_t lgZero; /* 0xffffff8000002008 Constant 0 */ - uint64_t lgStext; /* 0xffffff8000002010 Start of kernel text */ - uint64_t lgVersion; /* 0xffffff8000002018 Pointer to kernel version string */ - uint64_t lgOSVersion; /* 0xffffff8000002020 Pointer to OS version string */ - uint64_t lgKmodptr; /* 0xffffff8000002028 Pointer to kmod, debugging aid */ - uint64_t lgTransOff; /* 0xffffff8000002030 Pointer to kdp_trans_off, debugging aid */ - uint64_t lgRebootFlag; /* 0xffffff8000002038 Pointer to debugger reboot trigger */ - uint64_t lgManualPktAddr; /* 0xffffff8000002040 Pointer to manual packet structure */ - uint64_t lgAltDebugger; /* 0xffffff8000002048 Pointer to reserved space for alternate kernel debugger */ - uint64_t lgPmapMemQ; /* 0xffffff8000002050 Pointer to PMAP memory queue */ - uint64_t lgPmapMemPageOffset;/* 0xffffff8000002058 Offset of physical page member in vm_page_t or vm_page_with_ppnum_t */ - uint64_t lgPmapMemChainOffset;/*0xffffff8000002060 Offset of listq in vm_page_t or vm_page_with_ppnum_t */ - uint64_t lgStaticAddr; /* 0xffffff8000002068 Static allocation address */ - uint64_t lgStaticSize; /* 0xffffff8000002070 Static allocation size */ - uint64_t lgLayoutMajorVersion; /* 0xffffff8000002078 Lowglo major layout version */ - uint64_t lgLayoutMagic; /* 0xffffff8000002080 Magic value evaluated to determine if lgLayoutVersion is valid */ - uint64_t lgPmapMemStartAddr; /* 0xffffff8000002088 Pointer to start of vm_page_t array */ - uint64_t lgPmapMemEndAddr; /* 0xffffff8000002090 Pointer to end of vm_page_t array */ - uint64_t lgPmapMemPagesize; /* 0xffffff8000002098 size of vm_page_t */ - uint64_t lgPmapMemFromArrayMask; /* 0xffffff80000020A0 Mask to indicate page is from vm_page_t array */ - uint64_t lgPmapMemFirstppnum; /* 0xffffff80000020A8 physical page number of the first vm_page_t in the array */ - uint64_t lgPmapMemPackedShift; /* 0xffffff80000020B0 alignment of packed pointer */ - uint64_t lgPmapMemPackedBaseAddr;/* 0xffffff80000020B8 base address of that packed pointers are relative to */ - uint64_t lgLayoutMinorVersion; /* 0xffffff80000020C0 Lowglo minor layout version */ - uint64_t lgPageShift; /* 0xffffff80000020C8 number of shifts from page number to size */ + unsigned char lgVerCode[8]; /* 0xffffff8000002000 System verification code */ + uint64_t lgZero; /* 0xffffff8000002008 Constant 0 */ + uint64_t lgStext; /* 0xffffff8000002010 Start of kernel text */ + uint64_t lgVersion; /* 0xffffff8000002018 Pointer to kernel version string */ + uint64_t lgOSVersion; /* 0xffffff8000002020 Pointer to OS version string */ + uint64_t lgKmodptr; /* 0xffffff8000002028 Pointer to kmod, debugging aid */ + uint64_t lgTransOff; /* 0xffffff8000002030 Pointer to kdp_trans_off, debugging aid */ + uint64_t lgRebootFlag; /* 0xffffff8000002038 Pointer to debugger reboot trigger */ + uint64_t lgManualPktAddr; /* 0xffffff8000002040 Pointer to manual packet structure */ + uint64_t lgAltDebugger; /* 0xffffff8000002048 Pointer to reserved space for alternate kernel debugger */ + uint64_t lgPmapMemQ; /* 0xffffff8000002050 Pointer to PMAP memory queue */ + uint64_t lgPmapMemPageOffset; /* 0xffffff8000002058 Offset of physical page member in vm_page_t or vm_page_with_ppnum_t */ + uint64_t lgPmapMemChainOffset; /* 0xffffff8000002060 Offset of listq in vm_page_t or vm_page_with_ppnum_t */ + uint64_t lgStaticAddr; /* 0xffffff8000002068 Static allocation address */ + uint64_t lgStaticSize; /* 0xffffff8000002070 Static allocation size */ + uint64_t lgLayoutMajorVersion; /* 0xffffff8000002078 Lowglo major layout version */ + uint64_t lgLayoutMagic; /* 0xffffff8000002080 Magic value evaluated to determine if lgLayoutVersion is valid */ + uint64_t lgPmapMemStartAddr; /* 0xffffff8000002088 Pointer to start of vm_page_t array */ + uint64_t lgPmapMemEndAddr; /* 0xffffff8000002090 Pointer to end of vm_page_t array */ + uint64_t lgPmapMemPagesize; /* 0xffffff8000002098 size of vm_page_t */ + uint64_t lgPmapMemFromArrayMask; /* 0xffffff80000020A0 Mask to indicate page is from vm_page_t array */ + uint64_t lgPmapMemFirstppnum; /* 0xffffff80000020A8 physical page number of the first vm_page_t in the array */ + uint64_t lgPmapMemPackedShift; /* 0xffffff80000020B0 alignment of packed pointer */ + uint64_t lgPmapMemPackedBaseAddr; /* 0xffffff80000020B8 base address of that packed pointers are relative to */ + uint64_t lgLayoutMinorVersion; /* 0xffffff80000020C0 Lowglo minor layout version */ + uint64_t lgPageShift; /* 0xffffff80000020C8 number of shifts from page number to size */ + uint64_t lgVmFirstPhys; /* 0xffffff80000020D0 First physical address of kernel-managed DRAM (inclusive) */ + uint64_t lgVmLastPhys; /* 0xffffff80000020D8 Last physical address of kernel-managed DRAM (exclusive) */ + uint64_t lgPhysMapBase; /* 0xffffff80000020E0 First virtual address of the Physical Aperture (inclusive) */ + uint64_t lgPhysMapEnd; /* 0xffffff80000020E8 Last virtual address of the Physical Aperture (exclusive) */ + uint64_t lgPmapIoRangePtr; /* 0xffffff80000020F0 Pointer to an array of pmap_io_range_t objects obtained from the device tree. */ + uint64_t lgNumPmapIoRanges; /* 0xffffff80000020F8 Number of pmap_io_range regions in the array represented by lgPmapIoRangePtr. */ } lowglo; #pragma pack() diff --git a/osfmk/arm64/lowmem_vectors.c b/osfmk/arm64/lowmem_vectors.c index 2b22f1a6f..947799383 100644 --- a/osfmk/arm64/lowmem_vectors.c +++ b/osfmk/arm64/lowmem_vectors.c @@ -52,7 +52,7 @@ lowglo lowGlo __attribute__ ((aligned(PAGE_MAX_SIZE))) = { // Increment the minor version for changes that provide additonal info/function // but does not break current usage .lgLayoutMajorVersion = 3, - .lgLayoutMinorVersion = 0, + .lgLayoutMinorVersion = 2, .lgLayoutMagic = LOWGLO_LAYOUT_MAGIC, .lgVerCode = { 'K', 'r', 'a', 'k', 'e', 'n', ' ', ' ' }, .lgZero = 0, @@ -69,13 +69,19 @@ lowglo lowGlo __attribute__ ((aligned(PAGE_MAX_SIZE))) = { .lgPmapMemPageOffset = offsetof(struct vm_page_with_ppnum, vmp_phys_page), .lgPmapMemChainOffset = offsetof(struct vm_page, vmp_listq), .lgPmapMemPagesize = (uint64_t)sizeof(struct vm_page), - .lgPmapMemFromArrayMask = VM_PACKED_FROM_VM_PAGES_ARRAY, - .lgPmapMemPackedShift = VM_PACKED_POINTER_SHIFT, - .lgPmapMemPackedBaseAddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS, + .lgPmapMemFromArrayMask = VM_PAGE_PACKED_FROM_ARRAY, + .lgPmapMemPackedShift = VM_PAGE_PACKED_PTR_SHIFT, + .lgPmapMemPackedBaseAddr = VM_PAGE_PACKED_PTR_BASE, .lgPmapMemStartAddr = -1, .lgPmapMemEndAddr = -1, .lgPmapMemFirstppnum = -1, - .lgPageShift = ARM_PGSHIFT + .lgPageShift = ARM_PGSHIFT, + .lgVmFirstPhys = -1, + .lgVmLastPhys = -1, + .lgPhysMapBase = -1, + .lgPhysMapEnd = -1, + .lgPmapIoRangePtr = -1, + .lgNumPmapIoRanges = -1 }; void @@ -89,8 +95,44 @@ patch_low_glo_static_region(uint64_t address, uint64_t size) { lowGlo.lgStaticAddr = address; lowGlo.lgStaticSize = size; -} + /** + * These values are set in pmap_bootstrap() and represent the range of + * kernel managed memory. + */ + extern const pmap_paddr_t vm_first_phys; + extern const pmap_paddr_t vm_last_phys; + assertf((vm_first_phys != 0) && (vm_last_phys != 0), + "Tried setting the Low Globals before pmap_bootstrap()"); + lowGlo.lgVmFirstPhys = vm_first_phys; + lowGlo.lgVmLastPhys = vm_last_phys; + + /** + * These values are set in pmap_bootstrap() and represent an array of all + * kernel-managed I/O regions (pmap-io-ranges in the device tree). Some of + * these regions may include DRAM carved out for usage by other agents on + * the system. + * + * Need to forward-declare pmap_io_range_t since that only exists in the + * PMAP code. + */ + typedef struct pmap_io_range pmap_io_range_t; + extern const pmap_io_range_t* io_attr_table; + extern const unsigned int num_io_rgns; + lowGlo.lgPmapIoRangePtr = (uint64_t)io_attr_table; + lowGlo.lgNumPmapIoRanges = (uint64_t)num_io_rgns; + + /** + * These values are set in arm_vm_init() and represent the virtual address + * space used by the physical aperture. + */ + extern const vm_map_address_t physmap_base; + extern const vm_map_address_t physmap_end; + assertf((physmap_base != 0) && (physmap_end != 0), + "Tried setting the Low Globals before arm_vm_init()"); + lowGlo.lgPhysMapBase = physmap_base; + lowGlo.lgPhysMapEnd = physmap_end; +} void patch_low_glo_vm_page_info(void * start_addr, void * end_addr, uint32_t first_ppnum) diff --git a/osfmk/arm64/machine_remote_time.c b/osfmk/arm64/machine_remote_time.c index defc45b44..d78991a5d 100644 --- a/osfmk/arm64/machine_remote_time.c +++ b/osfmk/arm64/machine_remote_time.c @@ -36,10 +36,8 @@ #include #include -lck_spin_t *bt_spin_lock = NULL; _Atomic uint32_t bt_init_flag = 0; -extern lck_spin_t *ts_conversion_lock; extern void mach_bridge_add_timestamp(uint64_t remote_timestamp, uint64_t local_timestamp); extern void bt_calibration_thread_start(void); extern void bt_params_add(struct bt_params *params); @@ -53,13 +51,6 @@ mach_bridge_init_timestamp(void) return; } - /* Initialize the locks */ - static lck_grp_t *bt_lck_grp = NULL; - - bt_lck_grp = lck_grp_alloc_init("bridgetimestamp", LCK_GRP_ATTR_NULL); - bt_spin_lock = lck_spin_alloc_init(bt_lck_grp, NULL); - ts_conversion_lock = lck_spin_alloc_init(bt_lck_grp, NULL); - os_atomic_store(&bt_init_flag, 1, release); /* Start the kernel thread only after all the locks have been initialized */ @@ -82,9 +73,9 @@ mach_bridge_recv_timestamps(uint64_t remoteTimestamp, uint64_t localTimestamp) KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_RCV_TS), localTimestamp, remoteTimestamp); - lck_spin_lock(bt_spin_lock); + lck_spin_lock(&bt_spin_lock); mach_bridge_add_timestamp(remoteTimestamp, localTimestamp); - lck_spin_unlock(bt_spin_lock); + lck_spin_unlock(&bt_spin_lock); return; } @@ -106,9 +97,9 @@ mach_bridge_set_params(uint64_t local_timestamp, uint64_t remote_timestamp, doub params.base_local_ts = local_timestamp; params.base_remote_ts = remote_timestamp; params.rate = rate; - lck_spin_lock(ts_conversion_lock); + lck_spin_lock(&bt_ts_conversion_lock); bt_params_add(¶ms); - lck_spin_unlock(ts_conversion_lock); + lck_spin_unlock(&bt_ts_conversion_lock); KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_TS_PARAMS), params.base_local_ts, params.base_remote_ts, *(uint64_t *)((void *)¶ms.rate)); } diff --git a/osfmk/arm64/machine_routines.c b/osfmk/arm64/machine_routines.c index b1901d145..5823cc56a 100644 --- a/osfmk/arm64/machine_routines.c +++ b/osfmk/arm64/machine_routines.c @@ -42,39 +42,58 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #include +#include +#include +#include #include #include #include #include +#if HIBERNATION +#include +#include +#include +#endif /* HIBERNATION */ #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) -#include +#include #endif #include +/** + * On supported hardware, debuggable builds make the HID bits read-only + * without locking them. This lets people manually modify HID bits while + * debugging, since they can use a debugging tool to first reset the HID + * bits back to read/write. However it will still catch xnu changes that + * accidentally write to HID bits after they've been made read-only. + */ +#if HAS_TWO_STAGE_SPR_LOCK && !(DEVELOPMENT || DEBUG) +#define USE_TWO_STAGE_SPR_LOCK +#endif + #if KPC #include #endif +#define MPIDR_CPU_ID(mpidr_el1_val) (((mpidr_el1_val) & MPIDR_AFF0_MASK) >> MPIDR_AFF0_SHIFT) +#define MPIDR_CLUSTER_ID(mpidr_el1_val) (((mpidr_el1_val) & MPIDR_AFF1_MASK) >> MPIDR_AFF1_SHIFT) + #if HAS_CLUSTER static uint8_t cluster_initialized = 0; #endif - -static int max_cpus_initialized = 0; -#define MAX_CPUS_SET 0x1 -#define MAX_CPUS_WAIT 0x2 - uint32_t LockTimeOut; uint32_t LockTimeOutUsec; uint64_t TLockTimeOut; @@ -82,46 +101,71 @@ uint64_t MutexSpin; uint64_t low_MutexSpin; int64_t high_MutexSpin; -boolean_t is_clock_configured = FALSE; +static uint64_t ml_wfe_hint_max_interval; +#define MAX_WFE_HINT_INTERVAL_US (500ULL) -uint32_t yield_delay_us = 0; /* Must be less than cpu_idle_latency to ensure ml_delay_should_spin is true */ - -#if CONFIG_NONFATAL_ASSERTS -extern int mach_assert; -#endif -extern volatile uint32_t debug_enabled; +/* Must be less than cpu_idle_latency to ensure ml_delay_should_spin is true */ +TUNABLE(uint32_t, yield_delay_us, "yield_delay_us", 0); extern vm_offset_t segLOWEST; extern vm_offset_t segLOWESTTEXT; extern vm_offset_t segLASTB; extern unsigned long segSizeLAST; +/* ARM64 specific bounds; used to test for presence in the kernelcache. */ +extern vm_offset_t vm_kernelcache_base; +extern vm_offset_t vm_kernelcache_top; + #if defined(HAS_IPI) unsigned int gFastIPI = 1; #define kDeferredIPITimerDefault (64 * NSEC_PER_USEC) /* in nanoseconds */ -static uint64_t deferred_ipi_timer_ns = kDeferredIPITimerDefault; +static TUNABLE_WRITEABLE(uint64_t, deferred_ipi_timer_ns, "fastipitimeout", + kDeferredIPITimerDefault); #endif /* defined(HAS_IPI) */ -void machine_conf(void); - thread_t Idle_context(void); -SECURITY_READ_ONLY_LATE(static uint32_t) cpu_phys_ids[MAX_CPUS] = {[0 ... MAX_CPUS - 1] = (uint32_t)-1}; -SECURITY_READ_ONLY_LATE(static unsigned int) avail_cpus = 0; -SECURITY_READ_ONLY_LATE(static int) boot_cpu = -1; -SECURITY_READ_ONLY_LATE(static int) max_cpu_number = 0; -SECURITY_READ_ONLY_LATE(cluster_type_t) boot_cluster = CLUSTER_TYPE_SMP; +SECURITY_READ_ONLY_LATE(static ml_topology_cpu_t) topology_cpu_array[MAX_CPUS]; +SECURITY_READ_ONLY_LATE(static ml_topology_cluster_t) topology_cluster_array[MAX_CPU_CLUSTERS]; +SECURITY_READ_ONLY_LATE(static ml_topology_info_t) topology_info = { + .version = CPU_TOPOLOGY_VERSION, + .cpus = topology_cpu_array, + .clusters = topology_cluster_array, +}; +/** + * Represents the offset of each cluster within a hypothetical array of MAX_CPUS + * entries of an arbitrary data type. This is intended for use by specialized consumers + * that must quickly access per-CPU data using only the physical CPU ID (MPIDR_EL1), + * as follows: + * hypothetical_array[cluster_offsets[AFF1] + AFF0] + * Most consumers should instead use general-purpose facilities such as PERCPU or + * ml_get_cpu_number(). + */ +SECURITY_READ_ONLY_LATE(int64_t) cluster_offsets[MAX_CPU_CLUSTER_PHY_ID + 1]; -SECURITY_READ_ONLY_LATE(static uint32_t) fiq_eventi = UINT32_MAX; +SECURITY_READ_ONLY_LATE(static uint32_t) arm64_eventi = UINT32_MAX; -lockdown_handler_t lockdown_handler; -void *lockdown_this; -lck_mtx_t lockdown_handler_lck; -lck_grp_t *lockdown_handler_grp; -uint32_t lockdown_done; +extern uint32_t lockdown_done; + +/** + * Represents regions of virtual address space that should be reserved + * (pre-mapped) in each user address space. + */ +SECURITY_READ_ONLY_LATE(static struct vm_reserved_region) vm_reserved_regions[] = { + /* + * Reserve the virtual memory space representing the commpage nesting region + * to prevent user processes from allocating memory within it. The actual + * page table entries for the commpage are inserted by vm_commpage_enter(). + * This vm_map_enter() just prevents userspace from allocating/deallocating + * anything within the entire commpage nested region. + */ + { + .vmrr_name = "commpage nesting", + .vmrr_addr = _COMM_PAGE64_NESTING_START, + .vmrr_size = _COMM_PAGE64_NESTING_SIZE + } +}; -void ml_lockdown_init(void); -void ml_lockdown_run_handler(void); uint32_t get_arm_cpu_version(void); #if defined(HAS_IPI) @@ -135,16 +179,16 @@ ml_cpu_signal_type(unsigned int cpu_mpidr, uint32_t type) * to a single CPU. Otherwise we may migrate between choosing which * IPI mechanism to use and issuing the IPI. */ MRS(local_mpidr, "MPIDR_EL1"); - if ((local_mpidr & MPIDR_AFF1_MASK) == (cpu_mpidr & MPIDR_AFF1_MASK)) { - uint64_t x = type | (cpu_mpidr & MPIDR_AFF0_MASK); + if (MPIDR_CLUSTER_ID(local_mpidr) == MPIDR_CLUSTER_ID(cpu_mpidr)) { + uint64_t x = type | MPIDR_CPU_ID(cpu_mpidr); MSR(ARM64_REG_IPI_RR_LOCAL, x); } else { #define IPI_RR_TARGET_CLUSTER_SHIFT 16 - uint64_t x = type | ((cpu_mpidr & MPIDR_AFF1_MASK) << (IPI_RR_TARGET_CLUSTER_SHIFT - MPIDR_AFF1_SHIFT)) | (cpu_mpidr & MPIDR_AFF0_MASK); + uint64_t x = type | (MPIDR_CLUSTER_ID(cpu_mpidr) << IPI_RR_TARGET_CLUSTER_SHIFT) | MPIDR_CPU_ID(cpu_mpidr); MSR(ARM64_REG_IPI_RR_GLOBAL, x); } #else - uint64_t x = type | (cpu_mpidr & MPIDR_AFF0_MASK); + uint64_t x = type | MPIDR_CPU_ID(cpu_mpidr); MSR(ARM64_REG_IPI_RR, x); #endif } @@ -235,23 +279,14 @@ ml_cpu_signal_retract(unsigned int cpu_mpidr __unused) void machine_idle(void) { - __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF)); + /* Interrupts are expected to be masked on entry or re-entry via + * Idle_load_context() + */ + assert((__builtin_arm_rsr("DAIF") & DAIF_IRQF) == DAIF_IRQF); Idle_context(); __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF)); } -void -init_vfp(void) -{ - return; -} - -boolean_t -get_vfp_enabled(void) -{ - return TRUE; -} - void OSSynchronizeIO(void) { @@ -315,10 +350,21 @@ get_arm_cpu_version(void) return ((value & MIDR_EL1_REV_MASK) >> MIDR_EL1_REV_SHIFT) | ((value & MIDR_EL1_VAR_MASK) >> (MIDR_EL1_VAR_SHIFT - 4)); } +bool +ml_feature_supported(uint32_t feature_bit) +{ + uint64_t aidr_el1_value = 0; + + MRS(aidr_el1_value, "AIDR_EL1"); + + + return aidr_el1_value & feature_bit; +} + /* * user_cont_hwclock_allowed() * - * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0) + * Indicates whether we allow EL0 to read the virtual timebase (CNTVCT_EL0) * as a continuous time source (e.g. from mach_continuous_time) */ boolean_t @@ -338,331 +384,15 @@ user_timebase_type(void) return USER_TIMEBASE_SPEC; } -boolean_t -arm64_wfe_allowed(void) -{ - return TRUE; -} - -#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) - -uint64_t rorgn_begin __attribute__((section("__DATA, __const"))) = 0; -uint64_t rorgn_end __attribute__((section("__DATA, __const"))) = 0; -vm_offset_t amcc_base; - -static void assert_unlocked(void); -static void assert_amcc_cache_disabled(void); -static void lock_amcc(void); -static void lock_mmu(uint64_t begin, uint64_t end); - -void -rorgn_stash_range(void) -{ -#if DEVELOPMENT || DEBUG - boolean_t rorgn_disable = FALSE; - - PE_parse_boot_argn("-unsafe_kernel_text", &rorgn_disable, sizeof(rorgn_disable)); - - if (rorgn_disable) { - /* take early out if boot arg present, don't query any machine registers to avoid - * dependency on amcc DT entry - */ - return; - } -#endif - - /* Get the AMC values, and stash them into rorgn_begin, rorgn_end. - * gPhysBase is the base of DRAM managed by xnu. we need DRAM_BASE as - * the AMCC RO region begin/end registers are in units of 16KB page - * numbers from DRAM_BASE so we'll truncate gPhysBase at 512MB granule - * and assert the value is the canonical DRAM_BASE PA of 0x8_0000_0000 for arm64. - */ - - uint64_t dram_base = gPhysBase & ~0x1FFFFFFFULL; /* 512MB */ - assert(dram_base == 0x800000000ULL); - -#if defined(KERNEL_INTEGRITY_KTRR) - uint64_t soc_base = 0; - DTEntry entryP = NULL; - uintptr_t *reg_prop = NULL; - uint32_t prop_size = 0; - int rc; - - soc_base = pe_arm_get_soc_base_phys(); - rc = DTFindEntry("name", "mcc", &entryP); - assert(rc == kSuccess); - rc = DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); - assert(rc == kSuccess); - amcc_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); -#elif defined(KERNEL_INTEGRITY_CTRR) - /* TODO: t8020 mcc entry not in device tree yet; we'll do it LIVE */ -#define TEMP_AMCC_BASE_PA 0x200000000ULL -#define TEMP_AMCC_SZ 0x100000 - amcc_base = ml_io_map(TEMP_AMCC_BASE_PA, TEMP_AMCC_SZ); -#else -#error "KERNEL_INTEGRITY config error" -#endif - -#if defined(KERNEL_INTEGRITY_KTRR) - assert(rRORGNENDADDR > rRORGNBASEADDR); - rorgn_begin = (rRORGNBASEADDR << AMCC_PGSHIFT) + dram_base; - rorgn_end = (rRORGNENDADDR << AMCC_PGSHIFT) + dram_base; -#elif defined(KERNEL_INTEGRITY_CTRR) - rorgn_begin = rCTRR_AMCC_PLANE_REG(0, CTRR_A_BASEADDR); - rorgn_end = rCTRR_AMCC_PLANE_REG(0, CTRR_A_ENDADDR); - assert(rorgn_end > rorgn_begin); - - for (int i = 0; i < CTRR_AMCC_MAX_PLANES; ++i) { - uint32_t begin = rCTRR_AMCC_PLANE_REG(i, CTRR_A_BASEADDR); - uint32_t end = rCTRR_AMCC_PLANE_REG(i, CTRR_A_ENDADDR); - if (!(begin == rorgn_begin && end == rorgn_end)) { -#if DEVELOPMENT || DEBUG - panic("iboot programmed CTRR bounds are inconsistent"); -#else - panic("Inconsistent memory configuration"); -#endif - } - } - - // convert from page number from DRAM base to PA - rorgn_begin = (rorgn_begin << AMCC_PGSHIFT) + dram_base; - rorgn_end = (rorgn_end << AMCC_PGSHIFT) + dram_base; - -#else -#error KERNEL_INTEGRITY config error -#endif /* defined (KERNEL_INTEGRITY_KTRR) */ -} - -static void -assert_unlocked() -{ - uint64_t ktrr_lock = 0; - uint32_t rorgn_lock = 0; - - assert(amcc_base); -#if defined(KERNEL_INTEGRITY_KTRR) - rorgn_lock = rRORGNLOCK; - ktrr_lock = __builtin_arm_rsr64(ARM64_REG_KTRR_LOCK_EL1); -#elif defined(KERNEL_INTEGRITY_CTRR) - for (int i = 0; i < CTRR_AMCC_MAX_PLANES; ++i) { - rorgn_lock |= rCTRR_AMCC_PLANE_REG(i, CTRR_A_LOCK); - } - ktrr_lock = __builtin_arm_rsr64(ARM64_REG_CTRR_LOCK_EL1); -#else -#error KERNEL_INTEGRITY config error -#endif /* defined(KERNEL_INTEGRITY_KTRR) */ - - assert(!ktrr_lock); - assert(!rorgn_lock); -} - -static void -lock_amcc() -{ -#if defined(KERNEL_INTEGRITY_KTRR) - rRORGNLOCK = 1; - __builtin_arm_isb(ISB_SY); -#elif defined(KERNEL_INTEGRITY_CTRR) - /* lockdown planes in reverse order as plane 0 should be locked last */ - for (int i = 0; i < CTRR_AMCC_MAX_PLANES; ++i) { - rCTRR_AMCC_PLANE_REG(CTRR_AMCC_MAX_PLANES - i - 1, CTRR_A_ENABLE) = 1; - rCTRR_AMCC_PLANE_REG(CTRR_AMCC_MAX_PLANES - i - 1, CTRR_A_LOCK) = 1; - __builtin_arm_isb(ISB_SY); - } -#else -#error KERNEL_INTEGRITY config error -#endif -} - -static void -lock_mmu(uint64_t begin, uint64_t end) -{ -#if defined(KERNEL_INTEGRITY_KTRR) - - __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1, begin); - __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1, end); - __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1, 1ULL); - - /* flush TLB */ - - __builtin_arm_isb(ISB_SY); - flush_mmu_tlb(); - -#elif defined (KERNEL_INTEGRITY_CTRR) - /* this will lock the entire bootstrap cluster. non bootstrap clusters - * will be locked by respective cluster master in start.s */ - - __builtin_arm_wsr64(ARM64_REG_CTRR_A_LWR_EL1, begin); - __builtin_arm_wsr64(ARM64_REG_CTRR_A_UPR_EL1, end); - -#if !defined(APPLEVORTEX) - /* H12 changed sequence, must invalidate TLB immediately after setting CTRR bounds */ - __builtin_arm_isb(ISB_SY); /* ensure all prior MSRs are complete */ - flush_mmu_tlb(); -#endif /* !defined(APPLEVORTEX) */ - - __builtin_arm_wsr64(ARM64_REG_CTRR_CTL_EL1, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT); - __builtin_arm_wsr64(ARM64_REG_CTRR_LOCK_EL1, 1ULL); - - uint64_t current_el = __builtin_arm_rsr64("CurrentEL"); - if (current_el == PSR64_MODE_EL2) { - // CTRR v2 has explicit registers for cluster config. they can only be written in EL2 - - __builtin_arm_wsr64(ACC_CTRR_A_LWR_EL2, begin); - __builtin_arm_wsr64(ACC_CTRR_A_UPR_EL2, end); - __builtin_arm_wsr64(ACC_CTRR_CTL_EL2, CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT); - __builtin_arm_wsr64(ACC_CTRR_LOCK_EL2, 1ULL); - } - - __builtin_arm_isb(ISB_SY); /* ensure all prior MSRs are complete */ -#if defined(APPLEVORTEX) - flush_mmu_tlb(); -#endif /* defined(APPLEVORTEX) */ - -#else /* defined(KERNEL_INTEGRITY_KTRR) */ -#error KERNEL_INTEGRITY config error -#endif /* defined(KERNEL_INTEGRITY_KTRR) */ -} - -static void -assert_amcc_cache_disabled() -{ -#if defined(KERNEL_INTEGRITY_KTRR) - assert((rMCCGEN & 1) == 0); /* assert M$ disabled or LLC clean will be unreliable */ -#elif defined(KERNEL_INTEGRITY_CTRR) && (defined(ARM64_BOARD_CONFIG_T8006)) - /* - * T8006 differentiates between data and tag ways being powered up, so - * make sure to check that both are zero on its single memory plane. - */ - assert((rCTRR_AMCC_PLANE_REG(0, CTRR_AMCC_PWRONWAYCNTSTATUS) & - (AMCC_CURTAGWAYCNT_MASK | AMCC_CURDATWAYCNT_MASK)) == 0); -#elif defined (KERNEL_INTEGRITY_CTRR) - for (int i = 0; i < CTRR_AMCC_MAX_PLANES; ++i) { - assert(rCTRR_AMCC_PLANE_REG(i, CTRR_AMCC_WAYONCNT) == 0); - } -#else -#error KERNEL_INTEGRITY config error -#endif -} - -/* - * void rorgn_lockdown(void) - * - * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked - * - * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in - * start.s:start_cpu() for subsequent wake/resume of all cores - */ -void -rorgn_lockdown(void) -{ - vm_offset_t ktrr_begin, ktrr_end; - unsigned long last_segsz; - -#if DEVELOPMENT || DEBUG - boolean_t ktrr_disable = FALSE; - - PE_parse_boot_argn("-unsafe_kernel_text", &ktrr_disable, sizeof(ktrr_disable)); - - if (ktrr_disable) { - /* - * take early out if boot arg present, since we may not have amcc DT entry present - * we can't assert that iboot hasn't programmed the RO region lockdown registers - */ - goto out; - } -#endif /* DEVELOPMENT || DEBUG */ - - assert_unlocked(); - - /* [x] - Use final method of determining all kernel text range or expect crashes */ - ktrr_begin = segLOWEST; - assert(ktrr_begin && gVirtBase && gPhysBase); - - ktrr_begin = kvtophys(ktrr_begin); - - ktrr_end = kvtophys(segLASTB); - last_segsz = segSizeLAST; -#if defined(KERNEL_INTEGRITY_KTRR) - /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC KTRR region) */ - ktrr_end = (ktrr_end - 1) & ~AMCC_PGMASK; - /* ensure that iboot and xnu agree on the ktrr range */ - assert(rorgn_begin == ktrr_begin && rorgn_end == (ktrr_end + last_segsz)); - /* assert that __LAST segment containing privileged insns is only a single page */ - assert(last_segsz == PAGE_SIZE); -#elif defined(KERNEL_INTEGRITY_CTRR) - ktrr_end = (ktrr_end + last_segsz - 1) & ~AMCC_PGMASK; - /* __LAST is part of MMU CTRR region. Can't use the KTRR style method of making - * __pinst no execute because PXN applies with MMU off in CTRR. */ - assert(rorgn_begin == ktrr_begin && rorgn_end == ktrr_end); -#endif - - -#if DEBUG || DEVELOPMENT - printf("KTRR Begin: %p End: %p, setting lockdown\n", (void *)ktrr_begin, (void *)ktrr_end); -#endif - - /* [x] - ensure all in flight writes are flushed to AMCC before enabling RO Region Lock */ - - assert_amcc_cache_disabled(); - - CleanPoC_DcacheRegion_Force(phystokv(ktrr_begin), - (unsigned)((ktrr_end + last_segsz) - ktrr_begin + AMCC_PGMASK)); - - lock_amcc(); - - lock_mmu(ktrr_begin, ktrr_end); - -#if DEVELOPMENT || DEBUG -out: -#endif - -#if defined(KERNEL_INTEGRITY_CTRR) - { - /* wake any threads blocked on cluster master lockdown */ - cpu_data_t *cdp; - uint64_t mpidr_el1_value; - - cdp = getCpuDatap(); - MRS(mpidr_el1_value, "MPIDR_EL1"); - cdp->cpu_cluster_id = (mpidr_el1_value & MPIDR_AFF1_MASK) >> MPIDR_AFF1_SHIFT; - assert(cdp->cpu_cluster_id < __ARM_CLUSTER_COUNT__); - ctrr_cluster_locked[cdp->cpu_cluster_id] = 1; - thread_wakeup(&ctrr_cluster_locked[cdp->cpu_cluster_id]); - } -#endif - /* now we can run lockdown handler */ - ml_lockdown_run_handler(); -} - -#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ - void machine_startup(__unused boot_args * args) { - int boot_arg; - #if defined(HAS_IPI) && (DEVELOPMENT || DEBUG) if (!PE_parse_boot_argn("fastipi", &gFastIPI, sizeof(gFastIPI))) { gFastIPI = 1; } - - PE_parse_boot_argn("fastipitimeout", &deferred_ipi_timer_ns, sizeof(deferred_ipi_timer_ns)); #endif /* defined(HAS_IPI) && (DEVELOPMENT || DEBUG)*/ -#if CONFIG_NONFATAL_ASSERTS - PE_parse_boot_argn("assert", &mach_assert, sizeof(mach_assert)); -#endif - - if (PE_parse_boot_argn("preempt", &boot_arg, sizeof(boot_arg))) { - default_preemption_rate = boot_arg; - } - if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof(boot_arg))) { - default_bg_preemption_rate = boot_arg; - } - - PE_parse_boot_argn("yield_delay_us", &yield_delay_us, sizeof(yield_delay_us)); - machine_conf(); /* @@ -672,21 +402,12 @@ machine_startup(__unused boot_args * args) /* NOTREACHED */ } -void -machine_lockdown_preflight(void) -{ -#if CONFIG_KERNEL_INTEGRITY - -#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) - rorgn_stash_range(); -#endif - -#endif -} void machine_lockdown(void) { + arm_vm_prot_finalize(PE_state.bootArgs); + #if CONFIG_KERNEL_INTEGRITY #if KERNEL_INTEGRITY_WT /* Watchtower @@ -715,10 +436,30 @@ machine_lockdown(void) rorgn_lockdown(); #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ +#if HIBERNATION + /* sign the kernel read-only region */ + if (ppl_hmac_init() == KERN_SUCCESS) { + ppl_hmac_compute_rorgn_hmac(); + } +#endif /* HIBERNATION */ #endif /* CONFIG_KERNEL_INTEGRITY */ + +#if HIBERNATION + /* Avoid configuration security issues by panic'ing if hibernation is + * supported but we don't know how to invalidate SIO HMAC keys, see + * below. */ + if (ppl_hib_hibernation_supported() && + NULL == invalidate_hmac_function) { + panic("Invalidate HMAC function wasn't set when needed"); + } +#endif /* HIBERNATION */ + + + lockdown_done = 1; } + char * machine_boot_info( __unused char *buf, @@ -727,26 +468,6 @@ machine_boot_info( return PE_boot_args(); } -void -machine_conf(void) -{ - /* - * This is known to be inaccurate. mem_size should always be capped at 2 GB - */ - machine_info.memory_size = (uint32_t)mem_size; -} - -void -machine_init(void) -{ - debug_log_init(); - clock_config(); - is_clock_configured = TRUE; - if (debug_enabled) { - pmap_map_globals(); - } -} - void slave_machine_init(__unused void *param) { @@ -767,47 +488,6 @@ machine_processor_shutdown( return Shutdown_context(doshutdown, processor); } -/* - * Routine: ml_init_max_cpus - * Function: - */ -void -ml_init_max_cpus(unsigned int max_cpus) -{ - boolean_t current_state; - - current_state = ml_set_interrupts_enabled(FALSE); - if (max_cpus_initialized != MAX_CPUS_SET) { - machine_info.max_cpus = max_cpus; - machine_info.physical_cpu_max = max_cpus; - machine_info.logical_cpu_max = max_cpus; - if (max_cpus_initialized == MAX_CPUS_WAIT) { - thread_wakeup((event_t) &max_cpus_initialized); - } - max_cpus_initialized = MAX_CPUS_SET; - } - (void) ml_set_interrupts_enabled(current_state); -} - -/* - * Routine: ml_get_max_cpus - * Function: - */ -unsigned int -ml_get_max_cpus(void) -{ - boolean_t current_state; - - current_state = ml_set_interrupts_enabled(FALSE); - if (max_cpus_initialized != MAX_CPUS_SET) { - max_cpus_initialized = MAX_CPUS_WAIT; - assert_wait((event_t) &max_cpus_initialized, THREAD_UNINT); - (void) thread_block(THREAD_CONTINUE_NULL); - } - (void) ml_set_interrupts_enabled(current_state); - return machine_info.max_cpus; -} - /* * Routine: ml_init_lock_timeout * Function: @@ -853,6 +533,8 @@ ml_init_lock_timeout(void) * by setting high_MutexSpin through the sysctl. */ high_MutexSpin = low_MutexSpin; + + nanoseconds_to_absolutetime(MAX_WFE_HINT_INTERVAL_US * NSEC_PER_USEC, &ml_wfe_hint_max_interval); } /* @@ -1011,10 +693,7 @@ ml_install_interrupt_handler( cpu_data_ptr->interrupt_handler = handler; cpu_data_ptr->interrupt_refCon = refCon; - cpu_data_ptr->interrupts_enabled = TRUE; (void) ml_set_interrupts_enabled(current_state); - - initialize_screen(NULL, kPEAcquireScreen); } /* @@ -1058,6 +737,108 @@ ml_init_timebase( } } +#define ML_READPROP_MANDATORY UINT64_MAX + +static uint64_t +ml_readprop(const DTEntry entry, const char *propertyName, uint64_t default_value) +{ + void const *prop; + unsigned int propSize; + + if (SecureDTGetProperty(entry, propertyName, &prop, &propSize) == kSuccess) { + if (propSize == sizeof(uint8_t)) { + return *((uint8_t const *)prop); + } else if (propSize == sizeof(uint16_t)) { + return *((uint16_t const *)prop); + } else if (propSize == sizeof(uint32_t)) { + return *((uint32_t const *)prop); + } else if (propSize == sizeof(uint64_t)) { + return *((uint64_t const *)prop); + } else { + panic("CPU property '%s' has bad size %u", propertyName, propSize); + } + } else { + if (default_value == ML_READPROP_MANDATORY) { + panic("Missing mandatory property '%s'", propertyName); + } + return default_value; + } +} + +static boolean_t +ml_read_reg_range(const DTEntry entry, const char *propertyName, uint64_t *pa_ptr, uint64_t *len_ptr) +{ + uint64_t const *prop; + unsigned int propSize; + + if (SecureDTGetProperty(entry, propertyName, (void const **)&prop, &propSize) != kSuccess) { + return FALSE; + } + + if (propSize != sizeof(uint64_t) * 2) { + panic("Wrong property size for %s", propertyName); + } + + *pa_ptr = prop[0]; + *len_ptr = prop[1]; + return TRUE; +} + +static boolean_t +ml_is_boot_cpu(const DTEntry entry) +{ + void const *prop; + unsigned int propSize; + + if (SecureDTGetProperty(entry, "state", &prop, &propSize) != kSuccess) { + panic("unable to retrieve state for cpu"); + } + + if (strncmp((char const *)prop, "running", propSize) == 0) { + return TRUE; + } else { + return FALSE; + } +} + +static void +ml_read_chip_revision(unsigned int *rev __unused) +{ + // The CPU_VERSION_* macros are only defined on APPLE_ARM64_ARCH_FAMILY builds +#ifdef APPLE_ARM64_ARCH_FAMILY + DTEntry entryP; + + if ((SecureDTFindEntry("name", "arm-io", &entryP) == kSuccess)) { + *rev = (unsigned int)ml_readprop(entryP, "chip-revision", CPU_VERSION_UNKNOWN); + } else { + *rev = CPU_VERSION_UNKNOWN; + } +#endif +} + +static boolean_t +ml_parse_interrupt_prop(const DTEntry entry, ml_topology_cpu_t *cpu) +{ + uint32_t const *prop; + unsigned int propSize; + + if (SecureDTGetProperty(entry, "interrupts", (void const **)&prop, &propSize) != kSuccess) { + return FALSE; + } + + if (propSize == sizeof(uint32_t) * 1) { + cpu->pmi_irq = prop[0]; + return TRUE; + } else if (propSize == sizeof(uint32_t) * 3) { + cpu->self_ipi_irq = prop[0]; + cpu->pmi_irq = prop[1]; + cpu->other_ipi_irq = prop[2]; + return TRUE; + } else { + return FALSE; + } +} + void ml_parse_cpu_topology(void) { @@ -1066,59 +847,143 @@ ml_parse_cpu_topology(void) uint32_t cpu_boot_arg; int err; + int64_t cluster_phys_to_logical[MAX_CPU_CLUSTER_PHY_ID + 1]; + int64_t cluster_max_cpu_phys_id[MAX_CPU_CLUSTER_PHY_ID + 1]; cpu_boot_arg = MAX_CPUS; - PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)); - err = DTLookupEntry(NULL, "/cpus", &entry); + err = SecureDTLookupEntry(NULL, "/cpus", &entry); assert(err == kSuccess); - err = DTInitEntryIterator(entry, &iter); + err = SecureDTInitEntryIterator(entry, &iter); assert(err == kSuccess); - while (kSuccess == DTIterateEntries(&iter, &child)) { - unsigned int propSize; - void *prop = NULL; - int cpu_id = avail_cpus++; + for (int i = 0; i <= MAX_CPU_CLUSTER_PHY_ID; i++) { + cluster_offsets[i] = -1; + cluster_phys_to_logical[i] = -1; + cluster_max_cpu_phys_id[i] = 0; + } + + while (kSuccess == SecureDTIterateEntries(&iter, &child)) { + boolean_t is_boot_cpu = ml_is_boot_cpu(child); - if (kSuccess == DTGetProperty(child, "cpu-id", &prop, &propSize)) { - cpu_id = *((int32_t*)prop); + // If the number of CPUs is constrained by the cpus= boot-arg, and the boot CPU hasn't + // been added to the topology struct yet, and we only have one slot left, then skip + // every other non-boot CPU in order to leave room for the boot CPU. + // + // e.g. if the boot-args say "cpus=3" and CPU4 is the boot CPU, then the cpus[] + // array will list CPU0, CPU1, and CPU4. CPU2-CPU3 and CPU5-CPUn will be omitted. + if (topology_info.num_cpus >= (cpu_boot_arg - 1) && topology_info.boot_cpu == NULL && !is_boot_cpu) { + continue; + } + if (topology_info.num_cpus >= cpu_boot_arg) { + break; } - assert(cpu_id < MAX_CPUS); - assert(cpu_phys_ids[cpu_id] == (uint32_t)-1); + ml_topology_cpu_t *cpu = &topology_info.cpus[topology_info.num_cpus]; - if (boot_cpu == -1) { - if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) { - panic("unable to retrieve state for cpu %d", cpu_id); - } + cpu->cpu_id = topology_info.num_cpus++; + assert(cpu->cpu_id < MAX_CPUS); + topology_info.max_cpu_id = MAX(topology_info.max_cpu_id, cpu->cpu_id); - if (strncmp((char*)prop, "running", propSize) == 0) { - boot_cpu = cpu_id; - } - } - if (kSuccess != DTGetProperty(child, "reg", &prop, &propSize)) { - panic("unable to retrieve physical ID for cpu %d", cpu_id); - } + cpu->die_id = (int)ml_readprop(child, "die-id", 0); + topology_info.max_die_id = MAX(topology_info.max_die_id, cpu->die_id); + + cpu->phys_id = (uint32_t)ml_readprop(child, "reg", ML_READPROP_MANDATORY); + + cpu->l2_access_penalty = (uint32_t)ml_readprop(child, "l2-access-penalty", 0); + cpu->l2_cache_size = (uint32_t)ml_readprop(child, "l2-cache-size", 0); + cpu->l2_cache_id = (uint32_t)ml_readprop(child, "l2-cache-id", 0); + cpu->l3_cache_size = (uint32_t)ml_readprop(child, "l3-cache-size", 0); + cpu->l3_cache_id = (uint32_t)ml_readprop(child, "l3-cache-id", 0); + + ml_parse_interrupt_prop(child, cpu); + ml_read_reg_range(child, "cpu-uttdbg-reg", &cpu->cpu_UTTDBG_pa, &cpu->cpu_UTTDBG_len); + ml_read_reg_range(child, "cpu-impl-reg", &cpu->cpu_IMPL_pa, &cpu->cpu_IMPL_len); + ml_read_reg_range(child, "coresight-reg", &cpu->coresight_pa, &cpu->coresight_len); + cpu->cluster_type = CLUSTER_TYPE_SMP; - cpu_phys_ids[cpu_id] = *((uint32_t*)prop); - if ((cpu_id > max_cpu_number) && ((cpu_id == boot_cpu) || (avail_cpus <= cpu_boot_arg))) { - max_cpu_number = cpu_id; + /* + * Since we want to keep a linear cluster ID space, we cannot just rely + * on the value provided by EDT. Instead, use the MPIDR value to see if we have + * seen this exact cluster before. If so, then reuse that cluster ID for this CPU. + */ +#if HAS_CLUSTER + uint32_t phys_cluster_id = MPIDR_CLUSTER_ID(cpu->phys_id); +#else + uint32_t phys_cluster_id = 0; +#endif + assert(phys_cluster_id <= MAX_CPU_CLUSTER_PHY_ID); + cpu->cluster_id = ((cluster_phys_to_logical[phys_cluster_id] == -1) ? + topology_info.num_clusters : cluster_phys_to_logical[phys_cluster_id]); + + assert(cpu->cluster_id < MAX_CPU_CLUSTERS); + + ml_topology_cluster_t *cluster = &topology_info.clusters[cpu->cluster_id]; + if (cluster->num_cpus == 0) { + assert(topology_info.num_clusters < MAX_CPU_CLUSTERS); + + topology_info.num_clusters++; + topology_info.max_cluster_id = MAX(topology_info.max_cluster_id, cpu->cluster_id); + + cluster->cluster_id = cpu->cluster_id; + cluster->cluster_type = cpu->cluster_type; + cluster->first_cpu_id = cpu->cpu_id; + assert(cluster_phys_to_logical[phys_cluster_id] == -1); + cluster_phys_to_logical[phys_cluster_id] = cpu->cluster_id; + + // Since we don't have a per-cluster EDT node, this is repeated in each CPU node. + // If we wind up with a bunch of these, we might want to create separate per-cluster + // EDT nodes and have the CPU nodes reference them through a phandle. + ml_read_reg_range(child, "acc-impl-reg", &cluster->acc_IMPL_pa, &cluster->acc_IMPL_len); + ml_read_reg_range(child, "cpm-impl-reg", &cluster->cpm_IMPL_pa, &cluster->cpm_IMPL_len); } - } - if (avail_cpus > cpu_boot_arg) { - avail_cpus = cpu_boot_arg; - } +#if HAS_CLUSTER + if (MPIDR_CPU_ID(cpu->phys_id) > cluster_max_cpu_phys_id[phys_cluster_id]) { + cluster_max_cpu_phys_id[phys_cluster_id] = MPIDR_CPU_ID(cpu->phys_id); + } +#endif + + cpu->die_cluster_id = (int)ml_readprop(child, "die-cluster-id", MPIDR_CLUSTER_ID(cpu->phys_id)); + cpu->cluster_core_id = (int)ml_readprop(child, "cluster-core-id", MPIDR_CPU_ID(cpu->phys_id)); - if (avail_cpus == 0) { - panic("No cpus found!"); + cluster->num_cpus++; + cluster->cpu_mask |= 1ULL << cpu->cpu_id; + + if (is_boot_cpu) { + assert(topology_info.boot_cpu == NULL); + topology_info.boot_cpu = cpu; + topology_info.boot_cluster = cluster; + } } - if (boot_cpu == -1) { - panic("unable to determine boot cpu!"); +#if HAS_CLUSTER + /* + * Build the cluster offset array, ensuring that the region reserved + * for each physical cluster contains enough entries to be indexed + * by the maximum physical CPU ID (AFF0) within the cluster. + */ + unsigned int cur_cluster_offset = 0; + for (int i = 0; i <= MAX_CPU_CLUSTER_PHY_ID; i++) { + if (cluster_phys_to_logical[i] != -1) { + cluster_offsets[i] = cur_cluster_offset; + cur_cluster_offset += (cluster_max_cpu_phys_id[i] + 1); + } } + assert(cur_cluster_offset <= MAX_CPUS); +#else + /* + * For H10, there are really 2 physical clusters, but they are not separated + * into distinct ACCs. AFF1 therefore always reports 0, and AFF0 numbering + * is linear across both clusters. For the purpose of MPIDR_EL1-based indexing, + * treat H10 and earlier devices as though they contain a single cluster. + */ + cluster_offsets[0] = 0; +#endif + assert(topology_info.boot_cpu != NULL); + ml_read_chip_revision(&topology_info.chip_revision); /* * Set TPIDRRO_EL0 to indicate the correct cpu number, as we may @@ -1128,95 +993,162 @@ ml_parse_cpu_topology(void) * per-cpu data object. */ assert(__builtin_arm_rsr64("TPIDRRO_EL0") == 0); - __builtin_arm_wsr64("TPIDRRO_EL0", (uint64_t)boot_cpu); + __builtin_arm_wsr64("TPIDRRO_EL0", (uint64_t)topology_info.boot_cpu->cpu_id); +} + +const ml_topology_info_t * +ml_get_topology_info(void) +{ + return &topology_info; +} + +void +ml_map_cpu_pio(void) +{ + unsigned int i; + + for (i = 0; i < topology_info.num_cpus; i++) { + ml_topology_cpu_t *cpu = &topology_info.cpus[i]; + if (cpu->cpu_IMPL_pa) { + cpu->cpu_IMPL_regs = (vm_offset_t)ml_io_map(cpu->cpu_IMPL_pa, cpu->cpu_IMPL_len); + cpu->coresight_regs = (vm_offset_t)ml_io_map(cpu->coresight_pa, cpu->coresight_len); + } + if (cpu->cpu_UTTDBG_pa) { + cpu->cpu_UTTDBG_regs = (vm_offset_t)ml_io_map(cpu->cpu_UTTDBG_pa, cpu->cpu_UTTDBG_len); + } + } + + for (i = 0; i < topology_info.num_clusters; i++) { + ml_topology_cluster_t *cluster = &topology_info.clusters[i]; + if (cluster->acc_IMPL_pa) { + cluster->acc_IMPL_regs = (vm_offset_t)ml_io_map(cluster->acc_IMPL_pa, cluster->acc_IMPL_len); + } + if (cluster->cpm_IMPL_pa) { + cluster->cpm_IMPL_regs = (vm_offset_t)ml_io_map(cluster->cpm_IMPL_pa, cluster->cpm_IMPL_len); + } + } } unsigned int ml_get_cpu_count(void) { - return avail_cpus; + return topology_info.num_cpus; +} + +unsigned int +ml_get_cluster_count(void) +{ + return topology_info.num_clusters; } int ml_get_boot_cpu_number(void) { - return boot_cpu; + return topology_info.boot_cpu->cpu_id; } cluster_type_t ml_get_boot_cluster(void) { - return boot_cluster; + return topology_info.boot_cluster->cluster_type; } int ml_get_cpu_number(uint32_t phys_id) { - for (int log_id = 0; log_id <= ml_get_max_cpu_number(); ++log_id) { - if (cpu_phys_ids[log_id] == phys_id) { - return log_id; + phys_id &= MPIDR_AFF1_MASK | MPIDR_AFF0_MASK; + + for (unsigned i = 0; i < topology_info.num_cpus; i++) { + if (topology_info.cpus[i].phys_id == phys_id) { + return i; } } + return -1; } +int +ml_get_cluster_number(uint32_t phys_id) +{ + int cpu_id = ml_get_cpu_number(phys_id); + if (cpu_id < 0) { + return -1; + } + + ml_topology_cpu_t *cpu = &topology_info.cpus[cpu_id]; + + return cpu->cluster_id; +} + +unsigned int +ml_get_cpu_number_local(void) +{ + uint64_t mpidr_el1_value = 0; + unsigned cpu_id; + + /* We identify the CPU based on the constant bits of MPIDR_EL1. */ + MRS(mpidr_el1_value, "MPIDR_EL1"); + cpu_id = ml_get_cpu_number((uint32_t)mpidr_el1_value); + + assert(cpu_id <= (unsigned int)ml_get_max_cpu_number()); + + return cpu_id; +} + +int +ml_get_cluster_number_local() +{ + uint64_t mpidr_el1_value = 0; + unsigned cluster_id; + + /* We identify the cluster based on the constant bits of MPIDR_EL1. */ + MRS(mpidr_el1_value, "MPIDR_EL1"); + cluster_id = ml_get_cluster_number((uint32_t)mpidr_el1_value); + + assert(cluster_id <= (unsigned int)ml_get_max_cluster_number()); + + return cluster_id; +} + int ml_get_max_cpu_number(void) { - return max_cpu_number; + return topology_info.max_cpu_id; +} + +int +ml_get_max_cluster_number(void) +{ + return topology_info.max_cluster_id; } +unsigned int +ml_get_first_cpu_id(unsigned int cluster_id) +{ + return topology_info.clusters[cluster_id].first_cpu_id; +} void ml_lockdown_init() { - lockdown_handler_grp = lck_grp_alloc_init("lockdown_handler", NULL); - assert(lockdown_handler_grp != NULL); - - lck_mtx_init(&lockdown_handler_lck, lockdown_handler_grp, NULL); - -#if defined(KERNEL_INTEGRITY_CTRR) - init_ctrr_cpu_start_lock(); +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) + rorgn_stash_range(); #endif } kern_return_t ml_lockdown_handler_register(lockdown_handler_t f, void *this) { - if (lockdown_handler || !f) { + if (!f) { return KERN_FAILURE; } - lck_mtx_lock(&lockdown_handler_lck); - lockdown_handler = f; - lockdown_this = this; - -#if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)) - lockdown_done = 1; - lockdown_handler(this); -#else - if (lockdown_done) { - lockdown_handler(this); - } -#endif - lck_mtx_unlock(&lockdown_handler_lck); + assert(lockdown_done); + f(this); // XXX: f this whole function return KERN_SUCCESS; } -void -ml_lockdown_run_handler() -{ - lck_mtx_lock(&lockdown_handler_lck); - assert(!lockdown_done); - - lockdown_done = 1; - if (lockdown_handler) { - lockdown_handler(lockdown_this); - } - lck_mtx_unlock(&lockdown_handler_lck); -} - kern_return_t ml_processor_register(ml_processor_info_t *in_processor_info, processor_t *processor_out, ipi_handler_t *ipi_handler_out, @@ -1231,7 +1163,7 @@ ml_processor_register(ml_processor_info_t *in_processor_info, return KERN_FAILURE; } - if ((unsigned int)OSIncrementAtomic((SInt32*)®_cpu_count) >= avail_cpus) { + if ((unsigned)OSIncrementAtomic((SInt32*)®_cpu_count) >= topology_info.num_cpus) { return KERN_FAILURE; } @@ -1244,7 +1176,7 @@ ml_processor_register(ml_processor_info_t *in_processor_info, is_boot_cpu = TRUE; } - assert(in_processor_info->log_id < MAX_CPUS); + assert(in_processor_info->log_id <= (uint32_t)ml_get_max_cpu_number()); this_cpu_datap->cpu_id = in_processor_info->cpu_id; @@ -1254,22 +1186,22 @@ ml_processor_register(ml_processor_info_t *in_processor_info, } if (!is_boot_cpu) { - this_cpu_datap->cpu_number = in_processor_info->log_id; + this_cpu_datap->cpu_number = (unsigned short)(in_processor_info->log_id); if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) { goto processor_register_error; } } - this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle; - this_cpu_datap->cpu_cache_dispatch = in_processor_info->platform_cache_dispatch; + this_cpu_datap->cpu_idle_notify = in_processor_info->processor_idle; + this_cpu_datap->cpu_cache_dispatch = (cache_dispatch_t)in_processor_info->platform_cache_dispatch; nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency); this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr); - this_cpu_datap->idle_timer_notify = (void *) in_processor_info->idle_timer; + this_cpu_datap->idle_timer_notify = in_processor_info->idle_timer; this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon; - this_cpu_datap->platform_error_handler = (void *) in_processor_info->platform_error_handler; + this_cpu_datap->platform_error_handler = in_processor_info->platform_error_handler; this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr; this_cpu_datap->cpu_phys_id = in_processor_info->phys_id; this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty; @@ -1288,12 +1220,13 @@ ml_processor_register(ml_processor_info_t *in_processor_info, #endif /* HAS_CLUSTER */ pset = pset_find(in_processor_info->cluster_id, processor_pset(master_processor)); + assert(pset != NULL); kprintf("%s>cpu_id %p cluster_id %d cpu_number %d is type %d\n", __FUNCTION__, in_processor_info->cpu_id, in_processor_info->cluster_id, this_cpu_datap->cpu_number, in_processor_info->cluster_type); + processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, this_cpu_datap); if (!is_boot_cpu) { - processor_init((struct processor *)this_cpu_datap->cpu_processor, - this_cpu_datap->cpu_number, pset); + processor_init(processor, this_cpu_datap->cpu_number, pset); if (this_cpu_datap->cpu_l2_access_penalty) { /* @@ -1302,12 +1235,11 @@ ml_processor_register(ml_processor_info_t *in_processor_info, * scheduler, so that threads use the cores with better L2 * preferentially. */ - processor_set_primary(this_cpu_datap->cpu_processor, - master_processor); + processor_set_primary(processor, master_processor); } } - *processor_out = this_cpu_datap->cpu_processor; + *processor_out = processor; *ipi_handler_out = cpu_signal_handler; #if CPMU_AIC_PMI && MONOTONIC *pmi_handler_out = mt_cpmu_aic_pmi; @@ -1422,6 +1354,13 @@ ml_io_map_wcomb( return io_map(phys_addr, size, VM_WIMG_WCOMB); } +void +ml_io_unmap(vm_offset_t addr, vm_size_t sz) +{ + pmap_remove(kernel_pmap, addr, addr + sz); + kmem_free(kernel_map, addr, sz); +} + /* boot memory allocation */ vm_offset_t ml_static_malloc( @@ -1449,14 +1388,30 @@ vm_offset_t ml_static_slide( vm_offset_t vaddr) { - return phystokv(vaddr + vm_kernel_slide - gVirtBase + gPhysBase); + vm_offset_t slid_vaddr = vaddr + vm_kernel_slide; + + if ((slid_vaddr < vm_kernelcache_base) || (slid_vaddr >= vm_kernelcache_top)) { + /* This is only intended for use on kernelcache addresses. */ + return 0; + } + + /* + * Because the address is in the kernelcache, we can do a simple + * slide calculation. + */ + return slid_vaddr; } vm_offset_t ml_static_unslide( vm_offset_t vaddr) { - return ml_static_vtop(vaddr) - gPhysBase + gVirtBase - vm_kernel_slide; + if ((vaddr < vm_kernelcache_base) || (vaddr >= vm_kernelcache_top)) { + /* This is only intended for use on kernelcache addresses. */ + return 0; + } + + return vaddr - vm_kernel_slide; } extern tt_entry_t *arm_kva_to_tte(vm_offset_t va); @@ -1483,6 +1438,9 @@ ml_static_protect( if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) { panic("ml_static_protect(): WX request on %p", (void *) vaddr); } + if (lockdown_done && (new_prot & VM_PROT_EXECUTE)) { + panic("ml_static_protect(): attempt to inject executable mapping on %p", (void *) vaddr); + } /* Set up the protection bits, and block bits so we can validate block mappings. */ if (new_prot & VM_PROT_WRITE) { @@ -1511,8 +1469,8 @@ ml_static_protect( pt_entry_t ptmp; #if XNU_MONITOR - assert(!TEST_PAGE_RATIO_4); assert(!pmap_is_monitor(ppn)); + assert(!TEST_PAGE_RATIO_4); #endif tte2 = arm_kva_to_tte(vaddr_cur); @@ -1564,7 +1522,6 @@ ml_static_protect( } } else { ptmp = *pte_p; - /* We only need to update the page tables if the protections do not match. */ if ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot) { ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) | arm_prot; @@ -1596,6 +1553,7 @@ ml_static_mfree( vm_offset_t vaddr_cur; ppnum_t ppn; uint32_t freed_pages = 0; + uint32_t freed_kernelcache_pages = 0; /* It is acceptable (if bad) to fail to free. */ if (vaddr < VM_MIN_KERNEL_ADDRESS) { @@ -1619,21 +1577,17 @@ ml_static_mfree( panic("Failed ml_static_mfree on %p", (void *) vaddr_cur); } -#if 0 - /* - * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme - * relies on the persistence of these mappings for all time. - */ - // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE)); -#endif - vm_page_create(ppn, (ppn + 1)); freed_pages++; + if (vaddr_cur >= segLOWEST && vaddr_cur < end_kern) { + freed_kernelcache_pages++; + } } } vm_page_lockspin_queues(); vm_page_wire_count -= freed_pages; vm_page_wire_count_initial -= freed_pages; + vm_page_kernelcache_count -= freed_kernelcache_pages; vm_page_unlock_queues(); #if DEBUG kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); @@ -1834,9 +1788,9 @@ ml_set_decrementer(uint32_t dec_value) cdp->cpu_decrementer = dec_value; if (cdp->cpu_set_decrementer_func) { - ((void (*)(uint32_t))cdp->cpu_set_decrementer_func)(dec_value); + cdp->cpu_set_decrementer_func(dec_value); } else { - __asm__ volatile ("msr CNTP_TVAL_EL0, %0" : : "r"((uint64_t)dec_value)); + __builtin_arm_wsr64("CNTV_TVAL_EL0", (uint64_t)dec_value); } } @@ -1846,10 +1800,10 @@ ml_get_hwclock() uint64_t timebase; // ISB required by ARMV7C.b section B8.1.2 & ARMv8 section D6.1.2 - // "Reads of CNTPCT[_EL0] can occur speculatively and out of order relative + // "Reads of CNT[PV]CT[_EL0] can occur speculatively and out of order relative // to other instructions executed on the same processor." __builtin_arm_isb(ISB_SY); - timebase = __builtin_arm_rsr64("CNTPCT_EL0"); + timebase = __builtin_arm_rsr64("CNTVCT_EL0"); return timebase; } @@ -1860,6 +1814,20 @@ ml_get_timebase() return ml_get_hwclock() + getCpuDatap()->cpu_base_timebase; } +/* + * Get the speculative timebase without an ISB. + */ +__attribute__((unused)) +static uint64_t +ml_get_speculative_timebase() +{ + uint64_t timebase; + + timebase = __builtin_arm_rsr64("CNTVCT_EL0"); + + return timebase + getCpuDatap()->cpu_base_timebase; +} + uint32_t ml_get_decrementer() { @@ -1869,11 +1837,11 @@ ml_get_decrementer() assert(ml_get_interrupts_enabled() == FALSE); if (cdp->cpu_get_decrementer_func) { - dec = ((uint32_t (*)(void))cdp->cpu_get_decrementer_func)(); + dec = cdp->cpu_get_decrementer_func(); } else { uint64_t wide_val; - __asm__ volatile ("mrs %0, CNTP_TVAL_EL0" : "=r"(wide_val)); + wide_val = __builtin_arm_rsr64("CNTV_TVAL_EL0"); dec = (uint32_t)wide_val; assert(wide_val == (uint64_t)dec); } @@ -1884,24 +1852,8 @@ ml_get_decrementer() boolean_t ml_get_timer_pending() { - uint64_t cntp_ctl; - - __asm__ volatile ("mrs %0, CNTP_CTL_EL0" : "=r"(cntp_ctl)); - return ((cntp_ctl & CNTP_CTL_EL0_ISTATUS) != 0) ? TRUE : FALSE; -} - -boolean_t -ml_wants_panic_trap_to_debugger(void) -{ - boolean_t result = FALSE; -#if XNU_MONITOR - /* - * This looks racey, but if we are in the PPL, preemption will be - * disabled. - */ - result = ((pmap_get_cpu_data()->ppl_state == PPL_STATE_DISPATCH) && pmap_ppl_locked_down); -#endif - return result; + uint64_t cntv_ctl = __builtin_arm_rsr64("CNTV_CTL_EL0"); + return ((cntv_ctl & CNTV_CTL_EL0_ISTATUS) != 0) ? TRUE : FALSE; } static void @@ -2065,13 +2017,13 @@ _enable_timebase_event_stream(uint32_t bit_index) /* * If the SOC supports it (and it isn't broken), enable - * EL0 access to the physical timebase register. + * EL0 access to the timebase registers. */ if (user_timebase_type() != USER_TIMEBASE_NONE) { - cntkctl |= CNTKCTL_EL1_PL0PCTEN; + cntkctl |= (CNTKCTL_EL1_PL0PCTEN | CNTKCTL_EL1_PL0VCTEN); } - __asm__ volatile ("msr CNTKCTL_EL1, %0" : : "r"(cntkctl)); + __builtin_arm_wsr64("CNTKCTL_EL1", cntkctl); } /* @@ -2080,31 +2032,48 @@ _enable_timebase_event_stream(uint32_t bit_index) static void _enable_virtual_timer(void) { - uint64_t cntvctl = CNTP_CTL_EL0_ENABLE; /* One wants to use 32 bits, but "mrs" prefers it this way */ + uint64_t cntvctl = CNTV_CTL_EL0_ENABLE; /* One wants to use 32 bits, but "mrs" prefers it this way */ - __asm__ volatile ("msr CNTP_CTL_EL0, %0" : : "r"(cntvctl)); + __builtin_arm_wsr64("CNTV_CTL_EL0", cntvctl); + /* disable the physical timer as a precaution, as its registers reset to architecturally unknown values */ + __builtin_arm_wsr64("CNTP_CTL_EL0", CNTP_CTL_EL0_IMASKED); } -uint64_t events_per_sec = 0; - void fiq_context_init(boolean_t enable_fiq __unused) { - _enable_timebase_event_stream(fiq_eventi); - /* Interrupts still disabled. */ assert(ml_get_interrupts_enabled() == FALSE); _enable_virtual_timer(); } void -fiq_context_bootstrap(boolean_t enable_fiq) +wfe_timeout_init(void) +{ + _enable_timebase_event_stream(arm64_eventi); +} + +void +wfe_timeout_configure(void) { -#if defined(APPLE_ARM64_ARCH_FAMILY) || defined(BCM2837) /* Could fill in our own ops here, if we needed them */ - uint64_t ticks_per_sec, ticks_per_event; + uint64_t ticks_per_sec, ticks_per_event, events_per_sec = 0; uint32_t bit_index; + if (PE_parse_boot_argn("wfe_events_sec", &events_per_sec, sizeof(events_per_sec))) { + if (events_per_sec <= 0) { + events_per_sec = 1; + } else if (events_per_sec > USEC_PER_SEC) { + events_per_sec = USEC_PER_SEC; + } + } else { +#if defined(ARM_BOARD_WFE_TIMEOUT_NS) + events_per_sec = NSEC_PER_SEC / ARM_BOARD_WFE_TIMEOUT_NS; +#else /* !defined(ARM_BOARD_WFE_TIMEOUT_NS) */ + /* Default to 1usec (or as close as we can get) */ + events_per_sec = USEC_PER_SEC; +#endif /* !defined(ARM_BOARD_WFE_TIMEOUT_NS) */ + } ticks_per_sec = gPEClockFrequencyInfo.timebase_frequency_hz; ticks_per_event = ticks_per_sec / events_per_sec; bit_index = flsll(ticks_per_event) - 1; /* Highest bit set */ @@ -2124,11 +2093,8 @@ fiq_context_bootstrap(boolean_t enable_fiq) bit_index--; } - fiq_eventi = bit_index; -#else -#error Need a board configuration. -#endif - fiq_context_init(enable_fiq); + arm64_eventi = bit_index; + wfe_timeout_init(); } boolean_t @@ -2184,13 +2150,11 @@ ml_energy_stat(thread_t t) void ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) { -#if CONFIG_EMBEDDED /* * For now: update the resource coalition stats of the * current thread's coalition */ task_coalition_update_gpu_stats(current_task(), gpu_ns_delta); -#endif } uint64_t @@ -2199,7 +2163,8 @@ ml_gpu_stat(__unused thread_t t) return 0; } -#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME +#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT + static void timer_state_event(boolean_t switch_to_kernel) { @@ -2208,8 +2173,8 @@ timer_state_event(boolean_t switch_to_kernel) return; } - processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data; - uint64_t now = ml_get_timebase(); + processor_t pd = current_processor(); + uint64_t now = ml_get_speculative_timebase(); timer_stop(pd->current_state, now); pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state; @@ -2231,7 +2196,7 @@ timer_state_event_kernel_to_user(void) { timer_state_event(FALSE); } -#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ +#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */ /* * The following are required for parts of the kernel @@ -2307,15 +2272,39 @@ ex_cb_invoke( } #if defined(HAS_APPLE_PAC) +static inline bool +cpu_supports_userkeyen() +{ +#if HAS_APCTL_EL1_USERKEYEN + return true; +#else + return false; +#endif +} + +/** + * Returns the default JOP key. Depending on how the CPU diversifies userspace + * JOP keys, this value may reflect either KERNKeyLo or APIAKeyLo. + */ +uint64_t +ml_default_jop_pid(void) +{ + if (cpu_supports_userkeyen()) { + return KERNEL_KERNKEY_ID; + } else { + return KERNEL_JOP_ID; + } +} + void -ml_task_set_disable_user_jop(task_t task, boolean_t disable_user_jop) +ml_task_set_disable_user_jop(task_t task, uint8_t disable_user_jop) { assert(task); task->disable_user_jop = disable_user_jop; } void -ml_thread_set_disable_user_jop(thread_t thread, boolean_t disable_user_jop) +ml_thread_set_disable_user_jop(thread_t thread, uint8_t disable_user_jop) { assert(thread); thread->machine.disable_user_jop = disable_user_jop; @@ -2330,35 +2319,183 @@ ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit) task->rop_pid = early_random(); } } + +/** + * jop_pid may be inherited from the parent task or generated inside the shared + * region. Unfortunately these two parameters are available at very different + * times during task creation, so we need to split this into two steps. + */ +void +ml_task_set_jop_pid(task_t task, task_t parent_task, boolean_t inherit) +{ + if (inherit) { + task->jop_pid = parent_task->jop_pid; + } else { + task->jop_pid = ml_default_jop_pid(); + } +} + +void +ml_task_set_jop_pid_from_shared_region(task_t task) +{ + vm_shared_region_t sr = vm_shared_region_get(task); + /* + * If there's no shared region, we can assign the key arbitrarily. This + * typically happens when Mach-O image activation failed part of the way + * through, and this task is in the middle of dying with SIGKILL anyway. + */ + if (__improbable(!sr)) { + task->jop_pid = early_random(); + return; + } + vm_shared_region_deallocate(sr); + + /* + * Similarly we have to worry about jetsam having killed the task and + * already cleared the shared_region_id. + */ + task_lock(task); + if (task->shared_region_id != NULL) { + task->jop_pid = shared_region_find_key(task->shared_region_id); + } else { + task->jop_pid = early_random(); + } + task_unlock(task); +} + +void +ml_thread_set_jop_pid(thread_t thread, task_t task) +{ + thread->machine.jop_pid = task->jop_pid; +} #endif /* defined(HAS_APPLE_PAC) */ #if defined(HAS_APPLE_PAC) +#define _ml_auth_ptr_unchecked(_ptr, _suffix, _modifier) \ + asm volatile ("aut" #_suffix " %[ptr], %[modifier]" : [ptr] "+r"(_ptr) : [modifier] "r"(_modifier)); /* * ml_auth_ptr_unchecked: call this instead of ptrauth_auth_data * instrinsic when you don't want to trap on auth fail. * */ - void * ml_auth_ptr_unchecked(void *ptr, ptrauth_key key, uint64_t modifier) { switch (key & 0x3) { case ptrauth_key_asia: - asm volatile ("autia %[ptr], %[modifier]" : [ptr] "+r"(ptr) : [modifier] "r"(modifier)); + _ml_auth_ptr_unchecked(ptr, ia, modifier); break; case ptrauth_key_asib: - asm volatile ("autib %[ptr], %[modifier]" : [ptr] "+r"(ptr) : [modifier] "r"(modifier)); + _ml_auth_ptr_unchecked(ptr, ib, modifier); break; case ptrauth_key_asda: - asm volatile ("autda %[ptr], %[modifier]" : [ptr] "+r"(ptr) : [modifier] "r"(modifier)); + _ml_auth_ptr_unchecked(ptr, da, modifier); break; case ptrauth_key_asdb: - asm volatile ("autdb %[ptr], %[modifier]" : [ptr] "+r"(ptr) : [modifier] "r"(modifier)); + _ml_auth_ptr_unchecked(ptr, db, modifier); break; } return ptr; } #endif /* defined(HAS_APPLE_PAC) */ + +#ifdef CONFIG_XNUPOST +void +ml_expect_fault_begin(expected_fault_handler_t expected_fault_handler, uintptr_t expected_fault_addr) +{ + thread_t thread = current_thread(); + thread->machine.expected_fault_handler = expected_fault_handler; + thread->machine.expected_fault_addr = expected_fault_addr; +} + +void +ml_expect_fault_end(void) +{ + thread_t thread = current_thread(); + thread->machine.expected_fault_handler = NULL; + thread->machine.expected_fault_addr = 0; +} +#endif /* CONFIG_XNUPOST */ + +void +ml_hibernate_active_pre(void) +{ +#if HIBERNATION + if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) { + /* validate rorgn hmac */ + ppl_hmac_compute_rorgn_hmac(); + + hibernate_rebuild_vm_structs(); + } +#endif /* HIBERNATION */ +} + +void +ml_hibernate_active_post(void) +{ +#if HIBERNATION + if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) { + hibernate_machine_init(); + hibernate_vm_lock_end(); + current_cpu_datap()->cpu_hibernate = 0; + } +#endif /* HIBERNATION */ +} + +/** + * Return back a machine-dependent array of address space regions that should be + * reserved by the VM (pre-mapped in the address space). This will prevent user + * processes from allocating or deallocating from within these regions. + * + * @param vm_is64bit True if the process has a 64-bit address space. + * @param regions An out parameter representing an array of regions to reserve. + * + * @return The number of reserved regions returned through `regions`. + */ +size_t +ml_get_vm_reserved_regions(bool vm_is64bit, struct vm_reserved_region **regions) +{ + assert(regions != NULL); + + /** + * Reserved regions only apply to 64-bit address spaces. This is because + * we only expect to grow the maximum user VA address on 64-bit address spaces + * (we've essentially already reached the max for 32-bit spaces). The reserved + * regions should safely fall outside of the max user VA for 32-bit processes. + */ + if (vm_is64bit) { + *regions = vm_reserved_regions; + return ARRAY_COUNT(vm_reserved_regions); + } else { + /* Don't reserve any VA regions on arm64_32 processes. */ + *regions = NULL; + return 0; + } +} +/* These WFE recommendations are expected to be updated on a relatively + * infrequent cadence, possibly from a different cluster, hence + * false cacheline sharing isn't expected to be material + */ +static uint64_t arm64_cluster_wfe_recs[MAX_CPU_CLUSTERS]; + +uint32_t +ml_update_cluster_wfe_recommendation(uint32_t wfe_cluster_id, uint64_t wfe_timeout_abstime_interval, __unused uint64_t wfe_hint_flags) +{ + assert(wfe_cluster_id < MAX_CPU_CLUSTERS); + assert(wfe_timeout_abstime_interval <= ml_wfe_hint_max_interval); + os_atomic_store(&arm64_cluster_wfe_recs[wfe_cluster_id], wfe_timeout_abstime_interval, relaxed); + return 0; /* Success */ +} + +uint64_t +ml_cluster_wfe_timeout(uint32_t wfe_cluster_id) +{ + /* This and its consumer does not synchronize vis-a-vis updates + * of the recommendation; races are acceptable. + */ + uint64_t wfet = os_atomic_load(&arm64_cluster_wfe_recs[wfe_cluster_id], relaxed); + return wfet; +} diff --git a/osfmk/arm64/machine_routines_asm.h b/osfmk/arm64/machine_routines_asm.h index e1896caa6..86d4168b0 100644 --- a/osfmk/arm64/machine_routines_asm.h +++ b/osfmk/arm64/machine_routines_asm.h @@ -52,16 +52,16 @@ * tmp5 - scratch register 5 */ /* BEGIN IGNORE CODESTYLE */ -.macro AUTH_THREAD_STATE_IN_X0 tmp1, tmp2, tmp3, tmp4, tmp5, el0_state_allowed=0 - ldr w2, [x0, SS64_CPSR] +.macro AUTH_THREAD_STATE_IN_X0_COMMON tmp1, tmp2, tmp3, tmp4, tmp5, el0_state_allowed=0, PC_OFF=SS64_PC, CPSR_OFF=SS64_CPSR, X16_OFF=SS64_X16, LR_OFF=SS64_LR, check_func=ml_check_signed_state + ldr w2, [x0, \CPSR_OFF] .if \el0_state_allowed==0 #if __has_feature(ptrauth_calls) // If testing for a canary CPSR value, ensure that we do not observe writes to other fields without it dmb ld #endif .endif - ldr x1, [x0, SS64_PC] - ldp x16, x17, [x0, SS64_X16] + ldr x1, [x0, \PC_OFF] + ldp x16, x17, [x0, \X16_OFF] #if defined(HAS_APPLE_PAC) // Save x3-x5 to preserve across call @@ -80,10 +80,10 @@ */ mov \tmp1, x1 mov \tmp2, x2 - ldr x3, [x0, SS64_LR] + ldr x3, [x0, \LR_OFF] mov x4, x16 mov x5, x17 - bl EXT(ml_check_signed_state) + bl EXT(\check_func) mov x1, \tmp1 mov x2, \tmp2 @@ -100,9 +100,17 @@ mov x4, \tmp4 mov x5, \tmp5 #else - ldr lr, [x0, SS64_LR] + ldr lr, [x0, \LR_OFF] #endif /* defined(HAS_APPLE_PAC) */ .endmacro + +.macro AUTH_THREAD_STATE_IN_X0 tmp1, tmp2, tmp3, tmp4, tmp5, el0_state_allowed=0 + AUTH_THREAD_STATE_IN_X0_COMMON \tmp1, \tmp2, \tmp3, \tmp4, \tmp5, \el0_state_allowed +.endmacro + +.macro AUTH_KERNEL_THREAD_STATE_IN_X0 tmp1, tmp2, tmp3, tmp4, tmp5, el0_state_allowed=0 + AUTH_THREAD_STATE_IN_X0_COMMON \tmp1, \tmp2, \tmp3, \tmp4, \tmp5, \el0_state_allowed, SS64_KERNEL_PC, SS64_KERNEL_CPSR, SS64_KERNEL_X16, SS64_KERNEL_LR, ml_check_kernel_signed_state +.endmacro /* END IGNORE CODESTYLE */ /* vim: set ft=asm: */ diff --git a/osfmk/arm64/machine_routines_asm.s b/osfmk/arm64/machine_routines_asm.s index 191997c13..19e4b96d3 100644 --- a/osfmk/arm64/machine_routines_asm.s +++ b/osfmk/arm64/machine_routines_asm.s @@ -27,7 +27,9 @@ */ #include +#include #include +#include #include #include #include @@ -36,22 +38,78 @@ #if defined(HAS_APPLE_PAC) + +.macro SET_KERN_KEY dst, apctl_el1 + orr \dst, \apctl_el1, #APCTL_EL1_KernKeyEn +.endmacro + +.macro CLEAR_KERN_KEY dst, apctl_el1 + and \dst, \apctl_el1, #~APCTL_EL1_KernKeyEn +.endmacro + /* - * void - * ml_set_kernelkey_enabled(boolean_t enable) - * - * Toggle pointer auth kernel domain key diversification. Assembly to prevent compiler reordering. - * + * uint64_t ml_enable_user_jop_key(uint64_t user_jop_key) */ + .align 2 + .globl EXT(ml_enable_user_jop_key) +LEXT(ml_enable_user_jop_key) + mov x1, x0 + mrs x2, TPIDR_EL1 + ldr x2, [x2, ACT_CPUDATAP] + ldr x0, [x2, CPU_JOP_KEY] + + cmp x0, x1 + b.eq Lskip_program_el0_jop_key + /* + * We can safely write to the JOP key registers without updating + * current_cpu_datap()->jop_key. The complementary + * ml_disable_user_jop_key() call will put back the old value. Interrupts + * are also disabled, so nothing else will read this field in the meantime. + */ + SET_JOP_KEY_REGISTERS x1, x2 +Lskip_program_el0_jop_key: + + /* + * if (cpu has APCTL_EL1.UserKeyEn) { + * set APCTL_EL1.KernKeyEn // KERNKey is mixed into EL0 keys + * } else { + * clear APCTL_EL1.KernKeyEn // KERNKey is not mixed into EL0 keys + * } + */ + mrs x1, ARM64_REG_APCTL_EL1 +#if defined(HAS_APCTL_EL1_USERKEYEN) + SET_KERN_KEY x1, x1 +#else + CLEAR_KERN_KEY x1, x1 +#endif + msr ARM64_REG_APCTL_EL1, x1 + isb + ret +/* + * void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state) + */ .align 2 - .globl EXT(ml_set_kernelkey_enabled) -LEXT(ml_set_kernelkey_enabled) + .globl EXT(ml_disable_user_jop_key) +LEXT(ml_disable_user_jop_key) + cmp x0, x1 + b.eq Lskip_program_prev_jop_key + SET_JOP_KEY_REGISTERS x1, x2 +Lskip_program_prev_jop_key: + + /* + * if (cpu has APCTL_EL1.UserKeyEn) { + * clear APCTL_EL1.KernKeyEn // KERNKey is not mixed into EL1 keys + * } else { + * set APCTL_EL1.KernKeyEn // KERNKey is mixed into EL1 keys + * } + */ mrs x1, ARM64_REG_APCTL_EL1 - orr x2, x1, #APCTL_EL1_KernKeyEn - and x1, x1, #~APCTL_EL1_KernKeyEn - cmp w0, #0 - csel x1, x1, x2, eq +#if defined(HAS_APCTL_EL1_USERKEYEN) + CLEAR_KERN_KEY x1, x1 +#else + SET_KERN_KEY x1, x1 +#endif msr ARM64_REG_APCTL_EL1, x1 isb ret @@ -93,22 +151,8 @@ LEXT(set_nex_pg) cbz x14, Lnex_pg_done // Set the SEG-recommended value of 12 additional reset cycles - mrs x14, ARM64_REG_HID13 - and x14, x14, (~ARM64_REG_HID13_RstCyc_mask) - orr x14, x14, ARM64_REG_HID13_RstCyc_val - msr ARM64_REG_HID13, x14 - - // Load nexpg boot-arg - adrp x14, EXT(nex_pg)@page - add x14, x14, EXT(nex_pg)@pageoff - ldr w14, [x14] - - mrs x13, ARM64_REG_HID14 - and x13, x13, (~ARM64_REG_HID14_NexPwgEn) - cbz w14, Lset_nex_pg - orr x13, x13, ARM64_REG_HID14_NexPwgEn -Lset_nex_pg: - msr ARM64_REG_HID14, x13 + HID_INSERT_BITS ARM64_REG_HID13, ARM64_REG_HID13_RstCyc_mask, ARM64_REG_HID13_RstCyc_val, x13 + HID_SET_BITS ARM64_REG_HID14, ARM64_REG_HID14_NexPwgEn, x13 Lnex_pg_done: ret @@ -224,9 +268,11 @@ LEXT(set_mmu_ttb_alternate) mov lr, x1 #else #if defined(HAS_VMSA_LOCK) +#if DEBUG || DEVELOPMENT mrs x1, ARM64_REG_VMSA_LOCK_EL1 and x1, x1, #(VMSA_LOCK_TTBR1_EL1) cbnz x1, L_set_locked_reg_panic +#endif /* DEBUG || DEVELOPMENT */ #endif /* defined(HAS_VMSA_LOCK) */ msr TTBR1_EL1, x0 #endif /* defined(KERNEL_INTEGRITY_KTRR) */ @@ -250,6 +296,16 @@ LEXT(set_mmu_ttb) isb sy ret + +#if XNU_MONITOR + .text + .align 2 + .globl EXT(ml_get_ppl_cpu_data) +LEXT(ml_get_ppl_cpu_data) + LOAD_PMAP_CPU_DATA x0, x1, x2 + ret +#endif + /* * set AUX control register */ @@ -282,7 +338,11 @@ LEXT(set_vbar_el1) LEXT(vmsa_lock) isb sy mov x1, #(VMSA_LOCK_SCTLR_M_BIT) +#if __ARM_MIXED_PAGE_SIZE__ + mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_VBAR_EL1) +#else mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_TCR_EL1 | VMSA_LOCK_VBAR_EL1) +#endif orr x0, x0, x1 msr ARM64_REG_VMSA_LOCK_EL1, x0 isb sy @@ -297,26 +357,32 @@ LEXT(vmsa_lock) .globl EXT(set_tcr) LEXT(set_tcr) #if defined(APPLE_ARM64_ARCH_FAMILY) +#if DEBUG || DEVELOPMENT // Assert that T0Z is always equal to T1Z eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT) and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT) cbnz x1, L_set_tcr_panic +#endif /* DEBUG || DEVELOPMENT */ +#endif /* defined(APPLE_ARM64_ARCH_FAMILY) */ #if defined(KERNEL_INTEGRITY_KTRR) mov x1, lr bl EXT(pinst_set_tcr) mov lr, x1 #else #if defined(HAS_VMSA_LOCK) +#if DEBUG || DEVELOPMENT // assert TCR unlocked mrs x1, ARM64_REG_VMSA_LOCK_EL1 and x1, x1, #(VMSA_LOCK_TCR_EL1) cbnz x1, L_set_locked_reg_panic +#endif /* DEBUG || DEVELOPMENT */ #endif /* defined(HAS_VMSA_LOCK) */ msr TCR_EL1, x0 #endif /* defined(KERNEL_INTRITY_KTRR) */ isb sy ret +#if DEBUG || DEVELOPMENT L_set_tcr_panic: PUSH_FRAME sub sp, sp, #16 @@ -338,17 +404,7 @@ L_set_tcr_panic_str: L_set_locked_reg_panic_str: .asciz "attempt to set locked register: (%llx)\n" -#else -#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) - mov x1, lr - bl EXT(pinst_set_tcr) - mov lr, x1 -#else - msr TCR_EL1, x0 -#endif - isb sy - ret -#endif // defined(APPLE_ARM64_ARCH_FAMILY) +#endif /* DEBUG || DEVELOPMENT */ /* * MMU kernel virtual to physical address translation @@ -360,6 +416,7 @@ LEXT(mmu_kvtop) mrs x2, DAIF // Load current DAIF msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ at s1e1r, x0 // Translation Stage 1 EL1 + isb sy mrs x1, PAR_EL1 // Read result msr DAIF, x2 // Restore interrupt state tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid @@ -382,6 +439,7 @@ LEXT(mmu_uvtop) mrs x2, DAIF // Load current DAIF msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ at s1e0r, x0 // Translation Stage 1 EL0 + isb sy mrs x1, PAR_EL1 // Read result msr DAIF, x2 // Restore interrupt state tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid @@ -751,13 +809,24 @@ LEXT(arm64_prepare_for_sleep) #if defined(APPLETYPHOON) // - mrs x0, ARM64_REG_HID2 // Read HID2 - orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch - msr ARM64_REG_HID2, x0 // Write HID2 + HID_SET_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x9 dsb sy isb sy #endif +#if HAS_CLUSTER + cbnz x0, 1f // Skip if deep_sleep == true + // Mask FIQ and IRQ to avoid spurious wakeups + mrs x9, ARM64_REG_CYC_OVRD + and x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask)) + mov x10, #(ARM64_REG_CYC_OVRD_irq_disable | ARM64_REG_CYC_OVRD_fiq_disable) + orr x9, x9, x10 + msr ARM64_REG_CYC_OVRD, x9 + isb +1: +#endif + + cbz x0, 1f // Skip if deep_sleep == false #if __ARM_GLOBAL_SLEEP_BIT__ // Enable deep sleep mrs x1, ARM64_REG_ACC_OVRD @@ -781,17 +850,19 @@ LEXT(arm64_prepare_for_sleep) mov x1, ARM64_REG_CYC_CFG_deepSleep msr ARM64_REG_CYC_CFG, x1 #endif + +1: // Set "OK to power down" () - mrs x0, ARM64_REG_CYC_OVRD - orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down) + mrs x9, ARM64_REG_CYC_OVRD + orr x9, x9, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down) #if HAS_RETENTION_STATE - orr x0, x0, #(ARM64_REG_CYC_OVRD_disWfiRetn) + orr x9, x9, #(ARM64_REG_CYC_OVRD_disWfiRetn) #endif - msr ARM64_REG_CYC_OVRD, x0 + msr ARM64_REG_CYC_OVRD, x9 #if defined(APPLEMONSOON) || defined(APPLEVORTEX) - ARM64_IS_PCORE x0 - cbz x0, Lwfi_inst // skip if not p-core + ARM64_IS_PCORE x9 + cbz x9, Lwfi_inst // skip if not p-core /* : Flush the GUPS prefetcher prior to * wfi. A Skye HW bug can cause the GUPS prefetcher on p-cores @@ -807,16 +878,19 @@ LEXT(arm64_prepare_for_sleep) /* : Cyprus A0/A1 parts have a similar * bug in the HSP prefetcher that can be worked around through * the same method mentioned above for Skye. */ - SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL x0, VORTEX_CPU_VERSION_B0, Lwfi_inst + mrs x9, MIDR_EL1 + EXEC_COREALL_REVLO CPU_VERSION_B0, x9, x10 #endif - mrs x0, ARM64_REG_HID10 - orr x0, x0, #(ARM64_REG_HID10_DisHwpGups) - msr ARM64_REG_HID10, x0 + mrs x9, ARM64_REG_HID10 + orr x9, x9, #(ARM64_REG_HID10_DisHwpGups) + msr ARM64_REG_HID10, x9 isb sy - and x0, x0, #(~(ARM64_REG_HID10_DisHwpGups)) - msr ARM64_REG_HID10, x0 + and x9, x9, #(~(ARM64_REG_HID10_DisHwpGups)) + msr ARM64_REG_HID10, x9 isb sy #endif + EXEC_END + Lwfi_inst: dsb sy isb sy @@ -868,9 +942,7 @@ LEXT(typhoon_prepare_for_wfi) PUSH_FRAME // - mrs x0, ARM64_REG_HID2 // Read HID2 - orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch - msr ARM64_REG_HID2, x0 // Write HID2 + HID_SET_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x0 dsb sy isb sy @@ -885,10 +957,7 @@ LEXT(typhoon_return_from_wfi) PUSH_FRAME // - mrs x0, ARM64_REG_HID2 // Read HID2 - mov x1, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // - bic x0, x0, x1 // Clear HID.DisableMTLBPrefetchMTLBPrefetch - msr ARM64_REG_HID2, x0 // Write HID2 + HID_CLEAR_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x0 dsb sy isb sy @@ -1070,15 +1139,13 @@ LEXT(monitor_call) #endif #ifdef HAS_APPLE_PAC -/** - * void ml_sign_thread_state(arm_saved_state_t *ss, uint64_t pc, - * uint32_t cpsr, uint64_t lr, uint64_t x16, - * uint64_t x17) +/* + * SIGN_THREAD_STATE + * + * Macro that signs thread state. + * $0 - Offset in arm_saved_state to store JOPHASH value. */ - .text - .align 2 - .globl EXT(ml_sign_thread_state) -LEXT(ml_sign_thread_state) +.macro SIGN_THREAD_STATE pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */ /* * Mask off the carry flag so we don't need to re-sign when that flag is @@ -1089,18 +1156,21 @@ LEXT(ml_sign_thread_state) pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */ pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */ pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */ - str x1, [x0, SS64_JOPHASH] - ret + str x1, [x0, $0] +#if DEBUG || DEVELOPMENT + mrs x1, DAIF + tbz x1, #DAIF_IRQF_SHIFT, Lintr_enabled_panic +#endif /* DEBUG || DEVELOPMENT */ +.endmacro -/** - * void ml_check_signed_state(arm_saved_state_t *ss, uint64_t pc, - * uint32_t cpsr, uint64_t lr, uint64_t x16, - * uint64_t x17) +/* + * CHECK_SIGNED_STATE + * + * Macro that checks signed thread state. + * $0 - Offset in arm_saved_state to to read the JOPHASH value from. + * $1 - Label to jump to when check is unsuccessful. */ - .text - .align 2 - .globl EXT(ml_check_signed_state) -LEXT(ml_check_signed_state) +.macro CHECK_SIGNED_STATE pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */ /* * Mask off the carry flag so we don't need to re-sign when that flag is @@ -1111,17 +1181,98 @@ LEXT(ml_check_signed_state) pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */ pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */ pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */ - ldr x2, [x0, SS64_JOPHASH] + ldr x2, [x0, $0] cmp x1, x2 - b.ne Lcheck_hash_panic + b.ne $1 +#if DEBUG || DEVELOPMENT + mrs x1, DAIF + tbz x1, #DAIF_IRQF_SHIFT, Lintr_enabled_panic +#endif /* DEBUG || DEVELOPMENT */ +.endmacro + +/** + * void ml_sign_thread_state(arm_saved_state_t *ss, uint64_t pc, + * uint32_t cpsr, uint64_t lr, uint64_t x16, + * uint64_t x17) + */ + .text + .align 2 + .globl EXT(ml_sign_thread_state) +LEXT(ml_sign_thread_state) + SIGN_THREAD_STATE SS64_JOPHASH + ret + +/** + * void ml_sign_kernel_thread_state(arm_kernel_saved_state *ss, uint64_t pc, + * uint32_t cpsr, uint64_t lr, uint64_t x16, + * uint64_t x17) + */ + .text + .align 2 + .globl EXT(ml_sign_kernel_thread_state) +LEXT(ml_sign_kernel_thread_state) + SIGN_THREAD_STATE SS64_KERNEL_JOPHASH + ret + +/** + * void ml_check_signed_state(arm_saved_state_t *ss, uint64_t pc, + * uint32_t cpsr, uint64_t lr, uint64_t x16, + * uint64_t x17) + */ + .text + .align 2 + .globl EXT(ml_check_signed_state) +LEXT(ml_check_signed_state) + CHECK_SIGNED_STATE SS64_JOPHASH, Lcheck_hash_panic ret Lcheck_hash_panic: + /* + * ml_check_signed_state normally doesn't set up a stack frame, since it + * needs to work in the face of attackers that can modify the stack. + * However we lazily create one in the panic path: at this point we're + * *only* using the stack frame for unwinding purposes, and without one + * we'd be missing information about the caller. + */ + ARM64_STACK_PROLOG + PUSH_FRAME mov x1, x0 adr x0, Lcheck_hash_str CALL_EXTERN panic_with_thread_kernel_state + +/** + * void ml_check_kernel_signed_state(arm_kernel_saved_state *ss, uint64_t pc, + * uint32_t cpsr, uint64_t lr, uint64_t x16, + * uint64_t x17) + */ + .text + .align 2 + .globl EXT(ml_check_kernel_signed_state) +LEXT(ml_check_kernel_signed_state) + CHECK_SIGNED_STATE SS64_KERNEL_JOPHASH, Lcheck_kernel_hash_panic + ret +Lcheck_kernel_hash_panic: + ARM64_STACK_PROLOG + PUSH_FRAME + adr x0, Lcheck_hash_str + CALL_EXTERN panic + Lcheck_hash_str: .asciz "JOP Hash Mismatch Detected (PC, CPSR, or LR corruption)" +#if DEBUG || DEVELOPMENT +Lintr_enabled_panic: + ARM64_STACK_PROLOG + PUSH_FRAME + adr x0, Lintr_enabled_str + CALL_EXTERN panic +Lintr_enabled_str: + /* + * Please see the "Signing spilled register state" section of doc/pac.md + * for an explanation of why this is bad and how it should be fixed. + */ + .asciz "Signed thread state manipulated with interrupts enabled" +#endif /* DEBUG || DEVELOPMENT */ + /** * void ml_auth_thread_state_invalid_cpsr(arm_saved_state_t *ss) * diff --git a/osfmk/arm64/machine_task.c b/osfmk/arm64/machine_task.c index 6c6429d4f..695e3ee6b 100644 --- a/osfmk/arm64/machine_task.c +++ b/osfmk/arm64/machine_task.c @@ -58,6 +58,7 @@ #include #include + extern zone_t ads_zone; kern_return_t @@ -216,7 +217,6 @@ machine_task_terminate(task_t task) { if (task) { void *task_debug; - task_debug = task->task_debug; if (task_debug != NULL) { task->task_debug = NULL; diff --git a/osfmk/arm64/memcmp_zero.s b/osfmk/arm64/memcmp_zero.s new file mode 100644 index 000000000..0c0dca62c --- /dev/null +++ b/osfmk/arm64/memcmp_zero.s @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + * + * This file implements the following function for the arm64 architecture: + * + * int memcmp_zero_ptr_aligned(const void *s, size_t n); + * + * The memcmp_zero_ptr_aligned function checks string s of n bytes contains all zeros. + * Address and size of the string s must be pointer-aligned (8-byte for arm64). + * Return 0 if true, 1 otherwise. Also return 0 if n is 0. + */ + +/* this guard is used by tests */ +#ifdef __arm64__ + +#include "asm.h" + +.globl _memcmp_zero_ptr_aligned + +/***************************************************************************** + * Macros * + *****************************************************************************/ + +.macro EstablishFrame + ARM64_STACK_PROLOG + stp fp, lr, [sp, #-16]! + mov fp, sp +.endm + +.macro ClearFrameAndReturn + ldp fp, lr, [sp], #16 + ARM64_STACK_EPILOG +.endm + +/***************************************************************************** + * Constants * + *****************************************************************************/ + +.text +.align 5 + +/***************************************************************************** + * memcmp_zero_ptr_aligned entrypoint * + *****************************************************************************/ + +_memcmp_zero_ptr_aligned: + +// For the use case in , memory corruption should be rare +// so check for all zeros is fairly simple when early out is not necessary. +// We just load all the bytes and logical OR them together. If the result +// is still zero, all the bytes are zero. + + EstablishFrame + cmp x1, #64 + b.lo L_sizeIsSmall + +// Load the first 64 bytes, and compute the number of bytes to the +// first 64-byte aligned location. Even though we are going to test +// 64 bytes, only those preceeding that 64-byte location "count" towards +// reducing the length of the buffer or advancing the pointers. + mov x2, x0 // copy the original addr + add x0, x0, #64 + and x0, x0, #-64 // aligned addr + ldp q4, q5, [x2] + ldp q6, q7, [x2, #32] + sub x2, x0, x2 // bytes between original and aligned addr + sub x1, x1, x2 // update length + subs x1, x1, #64 // check length > 64 + b.ls L_cleanup + +L_loop: + ldp q0, q1, [x0] + ldp q2, q3, [x0, #32] + orr.16b v4, v4, v0 // use orr to keep non-zero bytes + orr.16b v5, v5, v1 + orr.16b v6, v6, v2 + orr.16b v7, v7, v3 + add x0, x0, #64 // advance pointer + subs x1, x1, #64 // check length > 64 + b.hi L_loop + +L_cleanup: +// Between 0 and 64 more bytes need to be tested. The exact +// number of bytes to test is x1 + 64. Instead of using smaller conditional +// checks, we simply check 64 unaligned bytes from x0+x1. This load may overlap +// with the previous one but it's ok. + add x0, x0, x1 + ldp q0, q1, [x0] + ldp q2, q3, [x0, #32] + orr.16b v4, v4, v0 // use orr to keep non-zero bytes + orr.16b v5, v5, v1 + orr.16b v6, v6, v2 + orr.16b v7, v7, v3 + + orr.16b v4, v4, v5 // reduce four regs into two + orr.16b v6, v6, v7 + orr.16b v4, v4, v6 // reduce two regs into one + umaxv.16b b0, v4 // reduce 16 bytes into one + umov w0, v0.b[0] // move byte to GPR for testing + tst w0, w0 + cset x0, ne // return 1 if non-zero, 0 otherwise + ClearFrameAndReturn + +L_sizeIsSmall: + cbz x1, L_sizeIsZero // return zero if length is zero + + mov x3, #0 +0: ldr x2, [x0],#8 + orr x3, x3, x2 // use orr to keep non-zero bytes + subs x1, x1, #8 // update length + b.hi 0b + + tst x3, x3 + cset x0, ne // return 1 if non-zero, 0 otherwise + ClearFrameAndReturn + +L_sizeIsZero: + mov x0, #0 + ClearFrameAndReturn + +#endif // __arm64__ diff --git a/osfmk/arm64/monotonic_arm64.c b/osfmk/arm64/monotonic_arm64.c index 963af5e56..8cf48ad70 100644 --- a/osfmk/arm64/monotonic_arm64.c +++ b/osfmk/arm64/monotonic_arm64.c @@ -37,7 +37,7 @@ #include /* CHAR_BIT */ #include #include -#include /* DTFindEntry */ +#include /* SecureDTFindEntry */ #include #include #include @@ -70,6 +70,8 @@ bool mt_core_supported = true; +static const ml_topology_info_t *topology_info; + /* * PMC[0-1] are the 48-bit fixed counters -- PMC0 is cycles and PMC1 is * instructions (see arm64/monotonic.h). @@ -328,9 +330,10 @@ static bool mt_uncore_initted = false; */ #if UNCORE_PER_CLUSTER -static vm_size_t cpm_impl_size = 0; -static uintptr_t cpm_impl[__ARM_CLUSTER_COUNT__] = {}; -static uintptr_t cpm_impl_phys[__ARM_CLUSTER_COUNT__] = {}; +#define MAX_NMONITORS MAX_CPU_CLUSTERS +static uintptr_t cpm_impl[MAX_NMONITORS] = {}; +#else +#define MAX_NMONITORS (1) #endif /* UNCORE_PER_CLUSTER */ #if UNCORE_VERSION >= 2 @@ -367,16 +370,6 @@ static_assert(sizeof(uncore_active_ctrs) * CHAR_BIT >= UNCORE_NCTRS, */ bool mt_uncore_enabled = false; -/* - * Each uncore unit has its own monitor, corresponding to the memory hierarchy - * of the LLCs. - */ -#if UNCORE_PER_CLUSTER -#define UNCORE_NMONITORS (__ARM_CLUSTER_COUNT__) -#else /* UNCORE_PER_CLUSTER */ -#define UNCORE_NMONITORS (1) -#endif /* !UNCORE_PER_CLUSTER */ - /* * The uncore_events are the event configurations for each uncore counter -- as * a union to make it easy to program the hardware registers. @@ -389,7 +382,7 @@ static struct uncore_config { union { uint16_t uccm_masks[UNCORE_NCTRS]; uint64_t uccm_regs[UNCORE_NCTRS / 4]; - } uc_cpu_masks[UNCORE_NMONITORS]; + } uc_cpu_masks[MAX_NMONITORS]; } uncore_config; static struct uncore_monitor { @@ -412,12 +405,27 @@ static struct uncore_monitor { * Whether this monitor needs its registers restored after wake. */ bool um_sleeping; -} uncore_monitors[UNCORE_NMONITORS]; +} uncore_monitors[MAX_NMONITORS]; + +/* + * Each uncore unit has its own monitor, corresponding to the memory hierarchy + * of the LLCs. + */ +static unsigned int +uncore_nmonitors(void) +{ +#if UNCORE_PER_CLUSTER + return topology_info->num_clusters; +#else /* UNCORE_PER_CLUSTER */ + return 1; +#endif /* !UNCORE_PER_CLUSTER */ +} static unsigned int uncmon_get_curid(void) { #if UNCORE_PER_CLUSTER + // Pointer arithmetic to translate cluster_id into a clusters[] index. return cpu_cluster_id(); #else /* UNCORE_PER_CLUSTER */ return 0; @@ -572,8 +580,6 @@ uncmon_write_counter_locked_l(__unused unsigned int monid, unsigned int ctr, #if UNCORE_PER_CLUSTER -static const uint8_t clust_offs[__ARM_CLUSTER_COUNT__] = CPU_CLUSTER_OFFSETS; - uintptr_t upmc_offs[UNCORE_NCTRS] = { [0] = 0x4100, [1] = 0x4248, [2] = 0x4110, [3] = 0x4250, [4] = 0x4120, [5] = 0x4258, [6] = 0x4130, [7] = 0x4260, [8] = 0x4140, [9] = 0x4268, @@ -584,7 +590,7 @@ uintptr_t upmc_offs[UNCORE_NCTRS] = { static inline uint64_t uncmon_read_counter_locked_r(unsigned int mon_id, unsigned int ctr) { - assert(mon_id < __ARM_CLUSTER_COUNT__); + assert(mon_id < uncore_nmonitors()); assert(ctr < UNCORE_NCTRS); return *(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]); } @@ -595,7 +601,7 @@ uncmon_write_counter_locked_r(unsigned int mon_id, unsigned int ctr, { assert(count < UPMC_MAX); assert(ctr < UNCORE_NCTRS); - assert(mon_id < __ARM_CLUSTER_COUNT__); + assert(mon_id < uncore_nmonitors()); *(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]) = count; } @@ -715,24 +721,7 @@ uncmon_get_pmi_mask(unsigned int monid) uint64_t pmi_mask = uncore_pmi_mask; #if UNCORE_PER_CLUSTER - /* - * Set up the mask for the high bits. - */ - uint64_t clust_cpumask; - if (monid == __ARM_CLUSTER_COUNT__ - 1) { - clust_cpumask = UINT64_MAX; - } else { - clust_cpumask = ((1ULL << clust_offs[monid + 1]) - 1); - } - - /* - * Mask off the low bits, if necessary. - */ - if (clust_offs[monid] != 0) { - clust_cpumask &= ~((1ULL << clust_offs[monid]) - 1); - } - - pmi_mask &= clust_cpumask; + pmi_mask &= topology_info->clusters[monid].cpu_mask; #else /* UNCORE_PER_CLUSTER */ #pragma unused(monid) #endif /* !UNCORE_PER_CLUSTER */ @@ -758,9 +747,7 @@ uncmon_init_locked_l(unsigned int monid) #if UNCORE_PER_CLUSTER -static vm_size_t acc_impl_size = 0; -static uintptr_t acc_impl[__ARM_CLUSTER_COUNT__] = {}; -static uintptr_t acc_impl_phys[__ARM_CLUSTER_COUNT__] = {}; +static uintptr_t acc_impl[MAX_NMONITORS] = {}; static void uncmon_init_locked_r(unsigned int monid) @@ -780,6 +767,11 @@ uncmon_init_locked_r(unsigned int monid) static int uncore_init(__unused mt_device_t dev) { +#if HAS_UNCORE_CTRS + assert(MT_NDEVS > 0); + mt_devices[MT_NDEVS - 1].mtd_nmonitors = (uint8_t)uncore_nmonitors(); +#endif + #if DEVELOPMENT || DEBUG /* * Development and debug kernels observe the `uncore_pmi_mask` boot-arg, @@ -790,10 +782,10 @@ uncore_init(__unused mt_device_t dev) sizeof(uncore_pmi_mask)); if (parsed_arg) { #if UNCORE_PER_CLUSTER - if (__builtin_popcount(uncore_pmi_mask) != __ARM_CLUSTER_COUNT__) { + if (__builtin_popcount(uncore_pmi_mask) != (int)uncore_nmonitors()) { panic("monotonic: invalid uncore PMI mask 0x%x", uncore_pmi_mask); } - for (unsigned int i = 0; i < __ARM_CLUSTER_COUNT__; i++) { + for (unsigned int i = 0; i < uncore_nmonitors(); i++) { if (__builtin_popcountll(uncmon_get_pmi_mask(i)) != 1) { panic("monotonic: invalid uncore PMI CPU for cluster %d in mask 0x%x", i, uncore_pmi_mask); @@ -808,9 +800,8 @@ uncore_init(__unused mt_device_t dev) #endif /* DEVELOPMENT || DEBUG */ { #if UNCORE_PER_CLUSTER - for (int i = 0; i < __ARM_CLUSTER_COUNT__; i++) { - /* route to the first CPU in each cluster */ - uncore_pmi_mask |= (1ULL << clust_offs[i]); + for (unsigned int i = 0; i < topology_info->num_clusters; i++) { + uncore_pmi_mask |= 1ULL << topology_info->clusters[i].first_cpu_id; } #else /* UNCORE_PER_CLUSTER */ /* arbitrarily route to core 0 */ @@ -821,15 +812,12 @@ uncore_init(__unused mt_device_t dev) unsigned int curmonid = uncmon_get_curid(); - for (unsigned int monid = 0; monid < UNCORE_NMONITORS; monid++) { + for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) { #if UNCORE_PER_CLUSTER - cpm_impl[monid] = (uintptr_t)ml_io_map(cpm_impl_phys[monid], - cpm_impl_size); - assert(cpm_impl[monid] != 0); - - acc_impl[monid] = (uintptr_t)ml_io_map(acc_impl_phys[monid], - acc_impl_size); - assert(acc_impl[monid] != 0); + ml_topology_cluster_t *cluster = &topology_info->clusters[monid]; + cpm_impl[monid] = (uintptr_t)cluster->cpm_IMPL_regs; + acc_impl[monid] = (uintptr_t)cluster->acc_IMPL_regs; + assert(cpm_impl[monid] != 0 && acc_impl[monid] != 0); #endif /* UNCORE_PER_CLUSTER */ struct uncore_monitor *mon = &uncore_monitors[monid]; @@ -890,7 +878,7 @@ uncore_read(uint64_t ctr_mask, uint64_t *counts_out) } unsigned int curmonid = uncmon_get_curid(); - for (unsigned int monid = 0; monid < UNCORE_NMONITORS; monid++) { + for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) { /* * Find this monitor's starting offset into the `counts_out` array. */ @@ -932,18 +920,18 @@ uncore_add(struct monotonic_config *config, uint32_t *ctr_out) uint32_t ctr = __builtin_ffsll(available) - 1; uncore_active_ctrs |= UINT64_C(1) << ctr; - uncore_config.uc_events.uce_ctrs[ctr] = config->event; + uncore_config.uc_events.uce_ctrs[ctr] = (uint8_t)config->event; uint64_t cpu_mask = UINT64_MAX; if (config->cpu_mask != 0) { cpu_mask = config->cpu_mask; } - for (int i = 0; i < UNCORE_NMONITORS; i++) { + for (unsigned int i = 0; i < uncore_nmonitors(); i++) { #if UNCORE_PER_CLUSTER - const unsigned int shift = clust_offs[i]; + const unsigned int shift = topology_info->clusters[i].first_cpu_id; #else /* UNCORE_PER_CLUSTER */ const unsigned int shift = 0; #endif /* !UNCORE_PER_CLUSTER */ - uncore_config.uc_cpu_masks[i].uccm_masks[ctr] = cpu_mask >> shift; + uncore_config.uc_cpu_masks[i].uccm_masks[ctr] = (uint16_t)(cpu_mask >> shift); } *ctr_out = ctr; @@ -965,7 +953,7 @@ uncore_reset(void) unsigned int curmonid = uncmon_get_curid(); - for (unsigned int monid = 0; monid < UNCORE_NMONITORS; monid++) { + for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) { struct uncore_monitor *mon = &uncore_monitors[monid]; bool remote = monid != curmonid; @@ -1006,7 +994,7 @@ uncore_reset(void) uncore_active_ctrs = 0; memset(&uncore_config, 0, sizeof(uncore_config)); - for (unsigned int monid = 0; monid < UNCORE_NMONITORS; monid++) { + for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) { struct uncore_monitor *mon = &uncore_monitors[monid]; bool remote = monid != curmonid; @@ -1068,7 +1056,7 @@ uncore_set_enabled(bool enable) mt_uncore_enabled = enable; unsigned int curmonid = uncmon_get_curid(); - for (unsigned int monid = 0; monid < UNCORE_NMONITORS; monid++) { + for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) { if (monid != curmonid) { #if UNCORE_PER_CLUSTER uncmon_set_enabled_r(monid, enable); @@ -1138,7 +1126,7 @@ uncore_save(void) unsigned int curmonid = uncmon_get_curid(); - for (unsigned int monid = 0; monid < UNCORE_NMONITORS; monid++) { + for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) { struct uncore_monitor *mon = &uncore_monitors[monid]; int intrs_en = uncmon_lock(mon); @@ -1190,53 +1178,6 @@ out: uncmon_unlock(mon, intrs_en); } -static void -uncore_early_init(void) -{ -#if UNCORE_PER_CLUSTER - /* - * Initialize the necessary PIO physical regions from the device tree. - */ - DTEntry armio_entry = NULL; - if ((DTFindEntry("name", "arm-io", &armio_entry) != kSuccess)) { - panic("unable to find arm-io DT entry"); - } - - uint64_t *regs; - unsigned int regs_size = 0; - if (DTGetProperty(armio_entry, "acc-impl", (void **)®s, ®s_size) != - kSuccess) { - panic("unable to find acc-impl DT property"); - } - /* - * Two 8-byte values are expected for each cluster -- the physical address - * of the region and its size. - */ - const unsigned int expected_size = - (typeof(expected_size))sizeof(uint64_t) * __ARM_CLUSTER_COUNT__ * 2; - if (regs_size != expected_size) { - panic("invalid size for acc-impl DT property"); - } - for (int i = 0; i < __ARM_CLUSTER_COUNT__; i++) { - acc_impl_phys[i] = regs[i * 2]; - } - acc_impl_size = regs[1]; - - regs_size = 0; - if (DTGetProperty(armio_entry, "cpm-impl", (void **)®s, ®s_size) != - kSuccess) { - panic("unable to find cpm-impl property"); - } - if (regs_size != expected_size) { - panic("invalid size for cpm-impl DT property"); - } - for (int i = 0; i < __ARM_CLUSTER_COUNT__; i++) { - cpm_impl_phys[i] = regs[i * 2]; - } - cpm_impl_size = regs[1]; -#endif /* UNCORE_PER_CLUSTER */ -} - #endif /* HAS_UNCORE_CTRS */ #pragma mark common hooks @@ -1244,9 +1185,7 @@ uncore_early_init(void) void mt_early_init(void) { -#if HAS_UNCORE_CTRS - uncore_early_init(); -#endif /* HAS_UNCORE_CTRS */ + topology_info = ml_get_topology_info(); } void @@ -1309,9 +1248,8 @@ uint64_t mt_count_pmis(void) { uint64_t npmis = 0; - int max_cpu = ml_get_max_cpu_number(); - for (int i = 0; i <= max_cpu; i++) { - cpu_data_t *cpu = (cpu_data_t *)CpuDataEntries[i].cpu_data_vaddr; + for (unsigned int i = 0; i < topology_info->num_cpus; i++) { + cpu_data_t *cpu = (cpu_data_t *)CpuDataEntries[topology_info->cpus[i].cpu_id].cpu_data_vaddr; npmis += cpu->cpu_monotonic.mtc_npmis; } return npmis; @@ -1502,7 +1440,6 @@ struct mt_device mt_devices[] = { .mtd_enable = uncore_set_enabled, .mtd_read = uncore_read, - .mtd_nmonitors = UNCORE_NMONITORS, .mtd_ncounters = UNCORE_NCTRS, } #endif /* HAS_UNCORE_CTRS */ diff --git a/osfmk/arm64/pac_asm.h b/osfmk/arm64/pac_asm.h new file mode 100644 index 000000000..205cd13a7 --- /dev/null +++ b/osfmk/arm64/pac_asm.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _ARM64_PAC_ASM_H_ +#define _ARM64_PAC_ASM_H_ + +#ifndef __ASSEMBLER__ +#error "This header should only be used in .s files" +#endif + +#include +#include +#include "assym.s" + +#if defined(HAS_APPLE_PAC) + +#if defined(HAS_APCTL_EL1_USERKEYEN) +#define HAS_PAC_FAST_A_KEY_SWITCHING 1 +#define HAS_PAC_SLOW_A_KEY_SWITCHING 0 + +.macro IF_PAC_FAST_A_KEY_SWITCHING label, tmp +.error "This macro should never need to be used on this CPU family." +.endmacro + +/* We know at compile time that this CPU family definitely doesn't need slow A-key switching */ +.macro IF_PAC_SLOW_A_KEY_SWITCHING label, tmp +.endmacro + +#else /* !&& !defined(HAS_APCTL_EL1_USERKEYEN) */ +#define HAS_PAC_FAST_A_KEY_SWITCHING 0 +#define HAS_PAC_SLOW_A_KEY_SWITCHING 1 + +/* We know at compile time that this CPU family definitely doesn't support fast A-key switching */ +.macro IF_PAC_FAST_A_KEY_SWITCHING label, tmp +.endmacro + +.macro IF_PAC_SLOW_A_KEY_SWITCHING label, tmp +.error "This macro should never need to be used on this CPU family." +.endmacro + +#endif /**/ + +/* BEGIN IGNORE CODESTYLE */ + +/** + * REPROGRAM_JOP_KEYS + * + * Reprograms the A-key registers if needed, and updates current_cpu_datap()->jop_key. + * + * On CPUs where fast A-key switching is implemented, this macro reprograms KERNKey_EL1. + * On other CPUs, it reprograms AP{D,I}AKey_EL1. + * + * skip_label - branch to this label if new_jop_key is already loaded into CPU + * new_jop_key - new APIAKeyLo value + * cpudatap - current cpu_data_t * + * tmp - scratch register + */ +.macro REPROGRAM_JOP_KEYS skip_label, new_jop_key, cpudatap, tmp + ldr \tmp, [\cpudatap, CPU_JOP_KEY] + cmp \new_jop_key, \tmp + b.eq \skip_label + SET_JOP_KEY_REGISTERS \new_jop_key, \tmp + str \new_jop_key, [\cpudatap, CPU_JOP_KEY] +.endmacro + +/** + * SET_JOP_KEY_REGISTERS + * + * Unconditionally reprograms the A-key registers. The caller is responsible for + * updating current_cpu_datap()->jop_key as needed. + * + * new_jop_key - new APIAKeyLo value + * tmp - scratch register + */ +.macro SET_JOP_KEY_REGISTERS new_jop_key, tmp +#if HAS_PAC_FAST_A_KEY_SWITCHING + IF_PAC_SLOW_A_KEY_SWITCHING Lslow_reprogram_jop_keys_\@, \tmp + msr KERNKeyLo_EL1, \new_jop_key + add \tmp, \new_jop_key, #1 + msr KERNKeyHi_EL1, \tmp +#endif /* HAS_PAC_FAST_A_KEY_SWITCHING */ +#if HAS_PAC_FAST_A_KEY_SWITCHING && HAS_PAC_SLOW_A_KEY_SWITCHING + b Lset_jop_key_registers_done_\@ +#endif /* HAS_PAC_FAST_A_KEY_SWITCHING && HAS_PAC_SLOW_A_KEY_SWITCHING */ + +#if HAS_PAC_SLOW_A_KEY_SWITCHING +Lslow_reprogram_jop_keys_\@: + msr APIAKeyLo_EL1, \new_jop_key + add \tmp, \new_jop_key, #1 + msr APIAKeyHi_EL1, \tmp + add \tmp, \tmp, #1 + msr APDAKeyLo_EL1, \tmp + add \tmp, \tmp, #1 + msr APDAKeyHi_EL1, \tmp +#endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */ + +Lset_jop_key_registers_done_\@: +.endmacro + +/* END IGNORE CODESTYLE */ + +#endif /* defined(HAS_APPLE_PAC) */ + +#endif /* _ARM64_PAC_ASM_H_ */ + +/* vim: set ts=4 ft=asm: */ diff --git a/osfmk/arm64/pal_hibernate.h b/osfmk/arm64/pal_hibernate.h new file mode 100644 index 000000000..88724f772 --- /dev/null +++ b/osfmk/arm64/pal_hibernate.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +/** + * ARM64 specific definitions for hibernation platform abstraction layer. + */ + +#ifndef _ARM64_PAL_HIBERNATE_H +#define _ARM64_PAL_HIBERNATE_H + +#include + +__BEGIN_DECLS + +/*! + * @enum pal_hib_map_type_t + * @discussion Parameter to pal_hib_map used to signify which memory region to map. + */ +typedef enum { + DEST_COPY_AREA = 1, + COPY_PAGE_AREA, + BITMAP_AREA, + IMAGE_AREA, + IMAGE2_AREA, + SCRATCH_AREA, + WKDM_AREA, +} pal_hib_map_type_t; + +/*! + * @struct pal_hib_ctx + * @discussion ARM64-specific PAL context; see pal_hib_ctx_t for details. + */ +struct pal_hib_ctx { +#if HIBERNATE_HMAC_IMAGE + struct ccdigest_info di; + hibernate_scratch_t pagesRestored; +#endif /* HIBERNATE_HMAC_IMAGE */ +}; + +/*! + * @typedef pal_hib_globals_t + * @discussion ARM64-specific state preserved pre-hibernation and needed during hibernation resume. + * + * @field dockChannelRegBase Physical address of the dockchannel registers + * @field dockChannelWstatMask Mask to apply to dockchannel WSTAT register to compute available FIFO entries + * @field hibUartRegBase Physical address of the UART registers + * @field hmacRegBase Physical address of the hmac block registers + */ +typedef struct { + uint64_t dockChannelRegBase; + uint64_t dockChannelWstatMask; + uint64_t hibUartRegBase; + uint64_t hmacRegBase; +} pal_hib_globals_t; +extern pal_hib_globals_t gHibernateGlobals; + +/*! + * @function pal_hib_get_stack_pages + * @discussion Returns the stack base address and number of pages to use during hibernation resume. + * + * @param first_page Out parameter: the base address of the hibernation resume stack + * @param page_count Out parameter: the number of pages in the hibernation stack + */ +void pal_hib_get_stack_pages(vm_offset_t *first_page, vm_offset_t *page_count); + +/*! + * @function pal_hib_resume_tramp + * @discussion Platform-specific system setup before calling hibernate_kernel_entrypoint. + * + * @param headerPpnum The page number of the IOHibernateImageHeader + */ +void pal_hib_resume_tramp(uint32_t headerPpnum); + +/*! + * @typedef pal_hib_tramp_result_t + * @discussion This type is used to store the result of pal_hib_resume_tramp. + * + * @field ttbr0 Physical address of the first level translation table (low mem) + * @field ttbr1 Physical address of the first level translation table (high mem) + * @field memSlide Offset from physical address to virtual address during hibernation resume + * @field kernelSlide Offset from physical address to virtual address in the kernel map + */ +typedef struct{ + uint64_t ttbr0; + uint64_t ttbr1; + uint64_t memSlide; + uint64_t kernelSlide; +} pal_hib_tramp_result_t; + +#if HIBERNATE_TRAP_HANDLER +/*! + * @function hibernate_trap + * @discussion Platform-specific function for handling a trap during hibernation resume. + * + * @param context The context captured during the trap + * @param trap_addr The address of the low level trap handler that was invoked + */ +void hibernate_trap(arm_context_t *context, uint64_t trap_addr) __attribute__((noreturn)); +#endif /* HIBERNATE_TRAP_HANDLER */ + +__END_DECLS + +#endif /* _ARM64_PAL_HIBERNATE_H */ diff --git a/osfmk/arm64/pcb.c b/osfmk/arm64/pcb.c index 9b2b057b1..5aa70ff7a 100644 --- a/osfmk/arm64/pcb.c +++ b/osfmk/arm64/pcb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2019 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -41,7 +41,6 @@ #include #include #include -#include #include #if MONOTONIC @@ -61,12 +60,13 @@ #include -#define USER_SS_ZONE_ALLOC_SIZE (0x4000) extern int debug_task; +extern bool need_wa_rdar_55577508; -zone_t ads_zone; /* zone for debug_state area */ -zone_t user_ss_zone; /* zone for user arm_context_t allocations */ +/* zone for debug_state area */ +ZONE_DECLARE(ads_zone, "arm debug state", sizeof(arm_debug_state_t), ZC_NONE); +ZONE_DECLARE(user_ss_zone, "user save state", sizeof(arm_context_t), ZC_NONE); /* * Routine: consider_machine_collect @@ -88,40 +88,78 @@ consider_machine_adjust(void) } + + +static inline void +machine_thread_switch_cpu_data(thread_t old, thread_t new) +{ + /* + * We build with -fno-strict-aliasing, so the load through temporaries + * is required so that this generates a single load / store pair. + */ + cpu_data_t *datap = old->machine.CpuDatap; + vm_offset_t base = old->machine.pcpu_data_base; + + /* TODO: Should this be ordered? */ + + old->machine.CpuDatap = NULL; + old->machine.pcpu_data_base = 0; + + new->machine.CpuDatap = datap; + new->machine.pcpu_data_base = base; +} + /* * Routine: machine_switch_context * */ thread_t machine_switch_context(thread_t old, - thread_continue_t continuation, - thread_t new) + thread_continue_t continuation, + thread_t new) { thread_t retval; pmap_t new_pmap; - cpu_data_t * cpu_data_ptr; + +#if __ARM_PAN_AVAILABLE__ + if (__improbable(__builtin_arm_rsr("pan") == 0)) { + panic("context switch with PAN disabled"); + } +#endif #define machine_switch_context_kprintf(x...) \ /* kprintf("machine_switch_context: " x) */ - cpu_data_ptr = getCpuDatap(); - if (old == new) + if (old == new) { panic("machine_switch_context"); + } kpc_off_cpu(old); + new_pmap = new->map->pmap; - if (old->map->pmap != new_pmap) + if (old->map->pmap != new_pmap) { pmap_switch(new_pmap); + } else { + /* + * If the thread is preempted while performing cache or TLB maintenance, + * it may be migrated to a different CPU between the completion of the relevant + * maintenance instruction and the synchronizing DSB. ARM requires that the + * synchronizing DSB must be issued *on the PE that issued the maintenance instruction* + * in order to guarantee completion of the instruction and visibility of its effects. + * Issue DSB here to enforce that guarantee. We only do this for the case in which + * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates + * TTBR0. Note also that cache maintenance may be performed in userspace, so we + * cannot further limit this operation e.g. by setting a per-thread flag to indicate + * a pending kernel TLB or cache maintenance instruction. + */ + __builtin_arm_dsb(DSB_ISH); + } - new->machine.CpuDatap = cpu_data_ptr; - - /* TODO: Should this be ordered? */ - old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU; - new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU; + machine_thread_switch_cpu_data(old, new); machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new); @@ -134,16 +172,17 @@ machine_switch_context(thread_t old, boolean_t machine_thread_on_core(thread_t thread) { - return thread->machine.machine_thread_flags & MACHINE_THREAD_FLAGS_ON_CPU; + return thread->machine.CpuDatap != NULL; } + /* * Routine: machine_thread_create * */ kern_return_t machine_thread_create(thread_t thread, - task_t task) + task_t task) { arm_context_t *thread_user_ss = NULL; kern_return_t result = KERN_SUCCESS; @@ -155,15 +194,21 @@ machine_thread_create(thread_t thread, if (current_thread() != thread) { thread->machine.CpuDatap = (cpu_data_t *)0; + // setting this offset will cause trying to use it to panic + thread->machine.pcpu_data_base = (vm_offset_t)VM_MIN_KERNEL_ADDRESS; } thread->machine.preemption_count = 0; thread->machine.cthread_self = 0; + thread->machine.kpcb = NULL; + thread->machine.exception_trace_code = 0; #if defined(HAS_APPLE_PAC) thread->machine.rop_pid = task->rop_pid; + thread->machine.jop_pid = task->jop_pid; thread->machine.disable_user_jop = task->disable_user_jop; #endif + if (task != kernel_task) { /* If this isn't a kernel thread, we'll have userspace state. */ thread->machine.contextData = (arm_context_t *)zalloc(user_ss_zone); @@ -187,7 +232,6 @@ machine_thread_create(thread_t thread, thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32; thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT; } - } else { thread->machine.upcb = NULL; thread->machine.uNeon = NULL; @@ -195,6 +239,7 @@ machine_thread_create(thread_t thread, } + bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state)); result = machine_thread_state_initialize(thread); @@ -233,7 +278,7 @@ machine_thread_destroy(thread_t thread) zfree(user_ss_zone, thread_user_ss); } - if (thread->machine.DebugData != NULL) { + if (thread->machine.DebugData != NULL) { if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) { arm_debug_set(NULL); } @@ -250,25 +295,6 @@ machine_thread_destroy(thread_t thread) void machine_thread_init(void) { - ads_zone = zinit(sizeof(arm_debug_state_t), - THREAD_CHUNK * (sizeof(arm_debug_state_t)), - THREAD_CHUNK * (sizeof(arm_debug_state_t)), - "arm debug state"); - - /* - * Create a zone for the user save state. At the time this zone was created, - * the user save state was 848 bytes, and the matching kalloc zone was 1024 - * bytes, which would result in significant amounts of wasted space if we - * simply used kalloc to allocate the user saved state. - * - * 0x4000 has been chosen as the allocation size, as it results in 272 bytes - * of wasted space per chunk, which should correspond to 19 allocations. - */ - user_ss_zone = zinit(sizeof(arm_context_t), - CONFIG_THREAD_MAX * (sizeof(arm_context_t)), - USER_SS_ZONE_ALLOC_SIZE, - "user save state"); - } /* @@ -288,7 +314,7 @@ machine_thread_template_init(thread_t __unused thr_template) user_addr_t get_useraddr() { - return (get_saved_state_pc(current_thread()->machine.upcb)); + return get_saved_state_pc(current_thread()->machine.upcb); } /* @@ -301,13 +327,13 @@ machine_stack_detach(thread_t thread) vm_offset_t stack; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH), - (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0); + (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0); stack = thread->kernel_stack; thread->kernel_stack = 0; thread->machine.kstackptr = 0; - return (stack); + return stack; } @@ -317,17 +343,18 @@ machine_stack_detach(thread_t thread) */ void machine_stack_attach(thread_t thread, - vm_offset_t stack) + vm_offset_t stack) { - struct arm_context *context; - struct arm_saved_state64 *savestate; + struct arm_kernel_context *context; + struct arm_kernel_saved_state *savestate; + struct arm_kernel_neon_saved_state *neon_savestate; uint32_t current_el; #define machine_stack_attach_kprintf(x...) \ /* kprintf("machine_stack_attach: " x) */ KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH), - (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0); + (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0); thread->kernel_stack = stack; thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state); @@ -337,7 +364,7 @@ machine_stack_attach(thread_t thread, current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL"); context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine; - savestate = saved_state64(&context->ss); + savestate = &context->ss; savestate->fp = 0; savestate->sp = thread->machine.kstackptr; @@ -353,42 +380,44 @@ machine_stack_attach(thread_t thread, /* Sign the initial kernel stack saved state */ boolean_t intr = ml_set_interrupts_enabled(FALSE); asm volatile ( - "mov x0, %[ss]" "\n" - - "mov x1, xzr" "\n" - "str x1, [x0, %[SS64_PC]]" "\n" - - "mov x2, %[default_cpsr_lo]" "\n" - "movk x2, %[default_cpsr_hi], lsl #16" "\n" - "str w2, [x0, %[SS64_CPSR]]" "\n" - - "adrp x3, _thread_continue@page" "\n" - "add x3, x3, _thread_continue@pageoff" "\n" - "str x3, [x0, %[SS64_LR]]" "\n" - - "mov x4, xzr" "\n" - "mov x5, xzr" "\n" - "stp x4, x5, [x0, %[SS64_X16]]" "\n" - - "mov x6, lr" "\n" - "bl _ml_sign_thread_state" "\n" - "mov lr, x6" "\n" - : - : [ss] "r"(&context->ss), - [default_cpsr_lo] "M"(default_cpsr & 0xFFFF), - [default_cpsr_hi] "M"(default_cpsr >> 16), - [SS64_X16] "i"(offsetof(struct arm_saved_state, ss_64.x[16])), - [SS64_PC] "i"(offsetof(struct arm_saved_state, ss_64.pc)), - [SS64_CPSR] "i"(offsetof(struct arm_saved_state, ss_64.cpsr)), - [SS64_LR] "i"(offsetof(struct arm_saved_state, ss_64.lr)) - : "x0", "x1", "x2", "x3", "x4", "x5", "x6" - ); + "mov x0, %[ss]" "\n" + + "mov x1, xzr" "\n" + "str x1, [x0, %[SS64_PC]]" "\n" + + "mov x2, %[default_cpsr_lo]" "\n" + "movk x2, %[default_cpsr_hi], lsl #16" "\n" + "str w2, [x0, %[SS64_CPSR]]" "\n" + + "adrp x3, _thread_continue@page" "\n" + "add x3, x3, _thread_continue@pageoff" "\n" + "str x3, [x0, %[SS64_LR]]" "\n" + + "mov x4, xzr" "\n" + "mov x5, xzr" "\n" + "stp x4, x5, [x0, %[SS64_X16]]" "\n" + + "mov x6, lr" "\n" + "bl _ml_sign_kernel_thread_state" "\n" + "mov lr, x6" "\n" + : + : [ss] "r"(&context->ss), + [default_cpsr_lo] "M"(default_cpsr & 0xFFFF), + [default_cpsr_hi] "M"(default_cpsr >> 16), + [SS64_X16] "i"(offsetof(struct arm_kernel_saved_state, x[0])), + [SS64_PC] "i"(offsetof(struct arm_kernel_saved_state, pc)), + [SS64_CPSR] "i"(offsetof(struct arm_kernel_saved_state, cpsr)), + [SS64_LR] "i"(offsetof(struct arm_kernel_saved_state, lr)) + : "x0", "x1", "x2", "x3", "x4", "x5", "x6" + ); ml_set_interrupts_enabled(intr); #else savestate->lr = (uintptr_t)thread_continue; savestate->cpsr = default_cpsr; savestate->pc = 0; #endif /* defined(HAS_APPLE_PAC) */ + neon_savestate = &context->ns; + neon_savestate->fpcr = FPCR_DEFAULT; machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp); } @@ -399,16 +428,20 @@ machine_stack_attach(thread_t thread, */ void machine_stack_handoff(thread_t old, - thread_t new) + thread_t new) { vm_offset_t stack; pmap_t new_pmap; - cpu_data_t * cpu_data_ptr; + +#if __ARM_PAN_AVAILABLE__ + if (__improbable(__builtin_arm_rsr("pan") == 0)) { + panic("stack handoff with PAN disabled"); + } +#endif kpc_off_cpu(old); stack = machine_stack_detach(old); - cpu_data_ptr = getCpuDatap(); new->kernel_stack = stack; new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state); if (stack == old->reserved_stack) { @@ -419,21 +452,31 @@ machine_stack_handoff(thread_t old, + new_pmap = new->map->pmap; - if (old->map->pmap != new_pmap) + if (old->map->pmap != new_pmap) { pmap_switch(new_pmap); + } else { + /* + * If the thread is preempted while performing cache or TLB maintenance, + * it may be migrated to a different CPU between the completion of the relevant + * maintenance instruction and the synchronizing DSB. ARM requires that the + * synchronizing DSB must be issued *on the PE that issued the maintenance instruction* + * in order to guarantee completion of the instruction and visibility of its effects. + * Issue DSB here to enforce that guarantee. We only do this for the case in which + * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates + * TTBR0. Note also that cache maintenance may be performed in userspace, so we + * cannot further limit this operation e.g. by setting a per-thread flag to indicate + * a pending kernel TLB or cache maintenance instruction. + */ + __builtin_arm_dsb(DSB_ISH); + } - new->machine.CpuDatap = cpu_data_ptr; - - /* TODO: Should this be ordered? */ - old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU; - new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU; + machine_thread_switch_cpu_data(old, new); machine_set_current_thread(new); thread_initialize_kernel_state(new); - - return; } @@ -443,9 +486,9 @@ machine_stack_handoff(thread_t old, */ void call_continuation(thread_continue_t continuation, - void *parameter, - wait_result_t wresult, - boolean_t enable_interrupts) + void *parameter, + wait_result_t wresult, + boolean_t enable_interrupts) { #define call_continuation_kprintf(x...) \ /* kprintf("call_continuation_kprintf:" x) */ @@ -456,29 +499,29 @@ call_continuation(thread_continue_t continuation, #define SET_DBGBCRn(n, value, accum) \ __asm__ volatile( \ - "msr DBGBCR" #n "_EL1, %[val]\n" \ - "orr %[result], %[result], %[val]\n" \ - : [result] "+r"(accum) : [val] "r"((value))) + "msr DBGBCR" #n "_EL1, %[val]\n" \ + "orr %[result], %[result], %[val]\n" \ + : [result] "+r"(accum) : [val] "r"((value))) #define SET_DBGBVRn(n, value) \ __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value)) #define SET_DBGWCRn(n, value, accum) \ __asm__ volatile( \ - "msr DBGWCR" #n "_EL1, %[val]\n" \ - "orr %[result], %[result], %[val]\n" \ - : [result] "+r"(accum) : [val] "r"((value))) + "msr DBGWCR" #n "_EL1, %[val]\n" \ + "orr %[result], %[result], %[val]\n" \ + : [result] "+r"(accum) : [val] "r"((value))) #define SET_DBGWVRn(n, value) \ __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value)) -void arm_debug_set32(arm_debug_state_t *debug_state) +void +arm_debug_set32(arm_debug_state_t *debug_state) { struct cpu_data * cpu_data_ptr; arm_debug_info_t * debug_info = arm_debug_info(); - boolean_t intr, set_mde = 0; + boolean_t intr; arm_debug_state_t off_state; - uint32_t i; uint64_t all_ctrls = 0; intr = ml_set_interrupts_enabled(FALSE); @@ -496,51 +539,67 @@ void arm_debug_set32(arm_debug_state_t *debug_state) case 16: SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]); SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls); + OS_FALLTHROUGH; case 15: SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]); SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls); + OS_FALLTHROUGH; case 14: SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]); SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls); + OS_FALLTHROUGH; case 13: SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]); SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls); + OS_FALLTHROUGH; case 12: SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]); SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls); + OS_FALLTHROUGH; case 11: SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]); SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls); + OS_FALLTHROUGH; case 10: SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]); SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls); + OS_FALLTHROUGH; case 9: SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]); SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls); + OS_FALLTHROUGH; case 8: SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]); SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls); + OS_FALLTHROUGH; case 7: SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]); SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls); + OS_FALLTHROUGH; case 6: SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]); SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls); + OS_FALLTHROUGH; case 5: SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]); SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls); + OS_FALLTHROUGH; case 4: SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]); SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls); + OS_FALLTHROUGH; case 3: SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]); SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls); + OS_FALLTHROUGH; case 2: SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]); SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls); + OS_FALLTHROUGH; case 1: SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]); SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls); + OS_FALLTHROUGH; default: break; } @@ -549,51 +608,67 @@ void arm_debug_set32(arm_debug_state_t *debug_state) case 16: SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]); SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls); + OS_FALLTHROUGH; case 15: SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]); SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls); + OS_FALLTHROUGH; case 14: SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]); SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls); + OS_FALLTHROUGH; case 13: SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]); SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls); + OS_FALLTHROUGH; case 12: SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]); SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls); + OS_FALLTHROUGH; case 11: SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]); SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls); + OS_FALLTHROUGH; case 10: SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]); SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls); + OS_FALLTHROUGH; case 9: SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]); SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls); + OS_FALLTHROUGH; case 8: SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]); SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls); + OS_FALLTHROUGH; case 7: SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]); SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls); + OS_FALLTHROUGH; case 6: SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]); SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls); + OS_FALLTHROUGH; case 5: SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]); SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls); + OS_FALLTHROUGH; case 4: SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]); SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls); + OS_FALLTHROUGH; case 3: SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]); SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls); + OS_FALLTHROUGH; case 2: SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]); SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls); + OS_FALLTHROUGH; case 1: SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]); SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls); + OS_FALLTHROUGH; default: break; } @@ -604,24 +679,10 @@ void arm_debug_set32(arm_debug_state_t *debug_state) } #endif - for (i = 0; i < debug_info->num_breakpoint_pairs; i++) { - if (0 != debug_state->uds.ds32.bcr[i]) { - set_mde = 1; - break; - } - } - - for (i = 0; i < debug_info->num_watchpoint_pairs; i++) { - if (0 != debug_state->uds.ds32.wcr[i]) { - set_mde = 1; - break; - } - } - /* * Breakpoint/Watchpoint Enable */ - if (set_mde) { + if (all_ctrls != 0) { update_mdscr(0, 0x8000); // MDSCR_EL1[MDE] } else { update_mdscr(0x8000, 0); @@ -635,7 +696,6 @@ void arm_debug_set32(arm_debug_state_t *debug_state) mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0); } else { - update_mdscr(0x1, 0); #if SINGLE_STEP_RETIRE_ERRATA @@ -645,17 +705,15 @@ void arm_debug_set32(arm_debug_state_t *debug_state) } (void) ml_set_interrupts_enabled(intr); - - return; } -void arm_debug_set64(arm_debug_state_t *debug_state) +void +arm_debug_set64(arm_debug_state_t *debug_state) { struct cpu_data * cpu_data_ptr; arm_debug_info_t * debug_info = arm_debug_info(); - boolean_t intr, set_mde = 0; + boolean_t intr; arm_debug_state_t off_state; - uint32_t i; uint64_t all_ctrls = 0; intr = ml_set_interrupts_enabled(FALSE); @@ -673,51 +731,67 @@ void arm_debug_set64(arm_debug_state_t *debug_state) case 16: SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]); SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls); + OS_FALLTHROUGH; case 15: SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]); SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls); + OS_FALLTHROUGH; case 14: SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]); SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls); + OS_FALLTHROUGH; case 13: SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]); SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls); + OS_FALLTHROUGH; case 12: SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]); SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls); + OS_FALLTHROUGH; case 11: SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]); SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls); + OS_FALLTHROUGH; case 10: SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]); SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls); + OS_FALLTHROUGH; case 9: SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]); SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls); + OS_FALLTHROUGH; case 8: SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]); SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls); + OS_FALLTHROUGH; case 7: SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]); SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls); + OS_FALLTHROUGH; case 6: SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]); SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls); + OS_FALLTHROUGH; case 5: SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]); SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls); + OS_FALLTHROUGH; case 4: SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]); SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls); + OS_FALLTHROUGH; case 3: SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]); SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls); + OS_FALLTHROUGH; case 2: SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]); SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls); + OS_FALLTHROUGH; case 1: SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]); SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls); + OS_FALLTHROUGH; default: break; } @@ -726,51 +800,67 @@ void arm_debug_set64(arm_debug_state_t *debug_state) case 16: SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]); SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls); + OS_FALLTHROUGH; case 15: SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]); SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls); + OS_FALLTHROUGH; case 14: SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]); SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls); + OS_FALLTHROUGH; case 13: SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]); SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls); + OS_FALLTHROUGH; case 12: SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]); SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls); + OS_FALLTHROUGH; case 11: SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]); SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls); + OS_FALLTHROUGH; case 10: SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]); SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls); + OS_FALLTHROUGH; case 9: SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]); SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls); + OS_FALLTHROUGH; case 8: SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]); SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls); + OS_FALLTHROUGH; case 7: SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]); SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls); + OS_FALLTHROUGH; case 6: SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]); SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls); + OS_FALLTHROUGH; case 5: SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]); SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls); + OS_FALLTHROUGH; case 4: SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]); SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls); + OS_FALLTHROUGH; case 3: SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]); SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls); + OS_FALLTHROUGH; case 2: SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]); SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls); + OS_FALLTHROUGH; case 1: SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]); SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls); + OS_FALLTHROUGH; default: break; } @@ -781,37 +871,23 @@ void arm_debug_set64(arm_debug_state_t *debug_state) } #endif - for (i = 0; i < debug_info->num_breakpoint_pairs; i++) { - if (0 != debug_state->uds.ds64.bcr[i]) { - set_mde = 1; - break; - } - } - - for (i = 0; i < debug_info->num_watchpoint_pairs; i++) { - if (0 != debug_state->uds.ds64.wcr[i]) { - set_mde = 1; - break; - } - } - /* * Breakpoint/Watchpoint Enable */ - if (set_mde) { + if (all_ctrls != 0) { update_mdscr(0, 0x8000); // MDSCR_EL1[MDE] + } else { + update_mdscr(0x8000, 0); } /* * Software debug single step enable */ if (debug_state->uds.ds64.mdscr_el1 & 0x1) { - update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set) mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0); } else { - update_mdscr(0x1, 0); #if SINGLE_STEP_RETIRE_ERRATA @@ -821,11 +897,10 @@ void arm_debug_set64(arm_debug_state_t *debug_state) } (void) ml_set_interrupts_enabled(intr); - - return; } -void arm_debug_set(arm_debug_state_t *debug_state) +void +arm_debug_set(arm_debug_state_t *debug_state) { if (debug_state) { switch (debug_state->dsh.flavor) { @@ -840,10 +915,11 @@ void arm_debug_set(arm_debug_state_t *debug_state) break; } } else { - if (thread_is_64bit_data(current_thread())) + if (thread_is_64bit_data(current_thread())) { arm_debug_set64(debug_state); - else + } else { arm_debug_set32(debug_state); + } } } @@ -854,13 +930,15 @@ debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state) arm_debug_info_t *debug_info = arm_debug_info(); uint32_t i; for (i = 0; i < debug_info->num_breakpoint_pairs; i++) { - if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) + if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) { return FALSE; + } } for (i = 0; i < debug_info->num_watchpoint_pairs; i++) { - if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) + if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) { return FALSE; + } } return TRUE; } @@ -871,13 +949,15 @@ debug_state_is_valid32(arm_debug_state32_t *debug_state) arm_debug_info_t *debug_info = arm_debug_info(); uint32_t i; for (i = 0; i < debug_info->num_breakpoint_pairs; i++) { - if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) + if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) { return FALSE; + } } for (i = 0; i < debug_info->num_watchpoint_pairs; i++) { - if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) + if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) { return FALSE; + } } return TRUE; } @@ -888,13 +968,15 @@ debug_state_is_valid64(arm_debug_state64_t *debug_state) arm_debug_info_t *debug_info = arm_debug_info(); uint32_t i; for (i = 0; i < debug_info->num_breakpoint_pairs; i++) { - if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) + if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) { return FALSE; + } } for (i = 0; i < debug_info->num_watchpoint_pairs; i++) { - if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) + if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) { return FALSE; + } } return TRUE; } @@ -905,31 +987,31 @@ debug_state_is_valid64(arm_debug_state64_t *debug_state) */ void copy_legacy_debug_state(arm_legacy_debug_state_t * src, - arm_legacy_debug_state_t * target, - __unused boolean_t all) + arm_legacy_debug_state_t * target, + __unused boolean_t all) { bcopy(src, target, sizeof(arm_legacy_debug_state_t)); } void copy_debug_state32(arm_debug_state32_t * src, - arm_debug_state32_t * target, - __unused boolean_t all) + arm_debug_state32_t * target, + __unused boolean_t all) { bcopy(src, target, sizeof(arm_debug_state32_t)); } void copy_debug_state64(arm_debug_state64_t * src, - arm_debug_state64_t * target, - __unused boolean_t all) + arm_debug_state64_t * target, + __unused boolean_t all) { bcopy(src, target, sizeof(arm_debug_state64_t)); } kern_return_t machine_thread_set_tsd_base(thread_t thread, - mach_vm_offset_t tsd_base) + mach_vm_offset_t tsd_base) { if (thread->task == kernel_task) { return KERN_INVALID_ARGUMENT; @@ -940,11 +1022,13 @@ machine_thread_set_tsd_base(thread_t thread, } if (thread_is_64bit_addr(thread)) { - if (tsd_base > vm_map_max(thread->map)) + if (tsd_base > vm_map_max(thread->map)) { tsd_base = 0ULL; + } } else { - if (tsd_base > UINT32_MAX) + if (tsd_base > UINT32_MAX) { tsd_base = 0ULL; + } } thread->machine.cthread_self = tsd_base; @@ -958,7 +1042,6 @@ machine_thread_set_tsd_base(thread_t thread, cpunum = tpidrro_el0 & (MACHDEP_CPUNUM_MASK); set_tpidrro(tsd_base | cpunum); mp_enable_preemption(); - } return KERN_SUCCESS; @@ -974,3 +1057,16 @@ machine_csv(__unused cpuvn_e cve) { return 0; } + + +#if __has_feature(ptrauth_calls) +boolean_t +arm_user_jop_disabled(void) +{ +#if DEVELOPMENT || DEBUG + return !!(BootArgs->bootFlags & kBootFlagsDisableUserJOP); +#else + return FALSE; +#endif +} +#endif /* __has_feature(ptrauth_calls) */ diff --git a/osfmk/arm64/platform_tests.c b/osfmk/arm64/platform_tests.c index 2fd98c9f7..4531e4de2 100644 --- a/osfmk/arm64/platform_tests.c +++ b/osfmk/arm64/platform_tests.c @@ -58,6 +58,7 @@ #include #include +#include #include #include #include @@ -80,6 +81,10 @@ #include #include +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) +#include +#endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) + kern_return_t arm64_lock_test(void); kern_return_t arm64_munger_test(void); kern_return_t ex_cb_test(void); @@ -279,17 +284,18 @@ const int limit = 1000000; static int lt_stress_local_counters[MAX_CPUS]; lck_ticket_t lt_ticket_lock; +lck_grp_t lt_ticket_grp; static void lt_stress_ticket_lock() { int local_counter = 0; - uint cpuid = current_processor()->cpu_id; + uint cpuid = cpu_number(); kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid); - lck_ticket_lock(<_ticket_lock); + lck_ticket_lock(<_ticket_lock, <_ticket_grp); lt_counter++; local_counter++; lck_ticket_unlock(<_ticket_lock); @@ -301,7 +307,7 @@ lt_stress_ticket_lock() kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid); while (lt_counter < limit) { - lck_ticket_lock(<_ticket_lock); + lck_ticket_lock(<_ticket_lock, <_ticket_grp); if (lt_counter < limit) { lt_counter++; local_counter++; @@ -910,7 +916,8 @@ lt_test_locks() /* Ticket locks stress test */ T_LOG("Running Ticket locks stress test with lck_ticket_lock()"); extern unsigned int real_ncpus; - lck_ticket_init(<_ticket_lock); + lck_grp_init(<_ticket_grp, "ticket lock stress", LCK_GRP_ATTR_NULL); + lck_ticket_init(<_ticket_lock, <_ticket_grp); lt_reset(); lt_target_done_threads = real_ncpus; for (processor_t processor = processor_list; processor != NULL; processor = processor->processor_list) { @@ -1027,8 +1034,10 @@ struct munger_test { {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwlwww), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwwllww), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, @@ -1183,24 +1192,32 @@ arm64_ropjop_test() /* how is ROP/JOP configured */ boolean_t config_rop_enabled = TRUE; - boolean_t config_jop_enabled = !(BootArgs->bootFlags & kBootFlagsDisableJOP); + boolean_t config_jop_enabled = TRUE; /* assert all AppleMode ROP/JOP features enabled */ uint64_t apctl = __builtin_arm_rsr64(ARM64_REG_APCTL_EL1); #if __APSTS_SUPPORTED__ uint64_t apsts = __builtin_arm_rsr64(ARM64_REG_APSTS_EL1); - T_ASSERT(apsts & APSTS_EL1_MKEYVld, NULL); + T_EXPECT(apsts & APSTS_EL1_MKEYVld, NULL); #else - T_ASSERT(apctl & APCTL_EL1_MKEYVld, NULL); + T_EXPECT(apctl & APCTL_EL1_MKEYVld, NULL); #endif /* __APSTS_SUPPORTED__ */ - T_ASSERT(apctl & APCTL_EL1_AppleMode, NULL); - T_ASSERT(apctl & APCTL_EL1_KernKeyEn, NULL); + T_EXPECT(apctl & APCTL_EL1_AppleMode, NULL); + + bool kernkeyen = apctl & APCTL_EL1_KernKeyEn; +#if HAS_APCTL_EL1_USERKEYEN + bool userkeyen = apctl & APCTL_EL1_UserKeyEn; +#else + bool userkeyen = false; +#endif + /* for KernKey to work as a diversifier, it must be enabled at exactly one of {EL0, EL1/2} */ + T_EXPECT(kernkeyen || userkeyen, "KernKey is enabled"); + T_EXPECT(!(kernkeyen && userkeyen), "KernKey is not simultaneously enabled at userspace and kernel space"); /* ROP/JOP keys enabled current status */ bool status_jop_enabled, status_rop_enabled; #if __APSTS_SUPPORTED__ /* H13+ */ - // TODO: update unit test to understand ROP/JOP enabled config for H13+ status_jop_enabled = status_rop_enabled = apctl & APCTL_EL1_EnAPKey1; #elif __APCFG_SUPPORTED__ /* H12 */ uint64_t apcfg_el1 = __builtin_arm_rsr64(APCFG_EL1); @@ -1212,8 +1229,8 @@ arm64_ropjop_test() #endif /* __APSTS_SUPPORTED__ */ /* assert configured and running status match */ - T_ASSERT(config_rop_enabled == status_rop_enabled, NULL); - T_ASSERT(config_jop_enabled == status_jop_enabled, NULL); + T_EXPECT(config_rop_enabled == status_rop_enabled, NULL); + T_EXPECT(config_jop_enabled == status_jop_enabled, NULL); if (config_jop_enabled) { @@ -1316,9 +1333,70 @@ arm64_late_pan_test() return KERN_SUCCESS; } +static bool +arm64_pan_test_pan_enabled_fault_handler(arm_saved_state_t * state) +{ + bool retval = false; + uint32_t esr = get_saved_state_esr(state); + esr_exception_class_t class = ESR_EC(esr); + fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr)); + uint32_t cpsr = get_saved_state_cpsr(state); + uint64_t far = get_saved_state_far(state); + + if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) && + (cpsr & PSR64_PAN) && + ((esr & ISS_DA_WNR) ? mmu_kvtop_wpreflight(far) : mmu_kvtop(far))) { + ++pan_exception_level; + // read the user-accessible value to make sure + // pan is enabled and produces a 2nd fault from + // the exception handler + if (pan_exception_level == 1) { + ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, far); + pan_fault_value = *(volatile char *)far; + ml_expect_fault_end(); + __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context + } + // this fault address is used for PAN test + // disable PAN and rerun + mask_saved_state_cpsr(state, 0, PSR64_PAN); + + retval = true; + } + + return retval; +} + +static bool +arm64_pan_test_pan_disabled_fault_handler(arm_saved_state_t * state) +{ + bool retval = false; + uint32_t esr = get_saved_state_esr(state); + esr_exception_class_t class = ESR_EC(esr); + fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr)); + uint32_t cpsr = get_saved_state_cpsr(state); + + if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3) && + !(cpsr & PSR64_PAN)) { + ++pan_exception_level; + // On an exception taken from a PAN-disabled context, verify + // that PAN is re-enabled for the exception handler and that + // accessing the test address produces a PAN fault. + ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr); + pan_fault_value = *(volatile char *)pan_test_addr; + ml_expect_fault_end(); + __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context + add_saved_state_pc(state, 4); + + retval = true; + } + + return retval; +} + kern_return_t arm64_pan_test() { + bool values_match = false; vm_offset_t priv_addr = _COMM_PAGE_SIGNATURE; T_LOG("Testing PAN."); @@ -1334,11 +1412,18 @@ arm64_pan_test() pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS - _COMM_PAGE_START_ADDRESS; + // Context-switch with PAN disabled is prohibited; prevent test logging from + // triggering a voluntary context switch. + mp_disable_preemption(); + // Below should trigger a PAN exception as pan_test_addr is accessible // in user mode // The exception handler, upon recognizing the fault address is pan_test_addr, // will disable PAN and rerun this instruction successfully - T_ASSERT(*(char *)pan_test_addr == *(char *)priv_addr, NULL); + ml_expect_fault_begin(arm64_pan_test_pan_enabled_fault_handler, pan_test_addr); + values_match = (*(volatile char *)pan_test_addr == *(volatile char *)priv_addr); + ml_expect_fault_end(); + T_ASSERT(values_match, NULL); T_ASSERT(pan_exception_level == 2, NULL); @@ -1352,7 +1437,9 @@ arm64_pan_test() // Force a permission fault while PAN is disabled to make sure PAN is // re-enabled during the exception handler. + ml_expect_fault_begin(arm64_pan_test_pan_disabled_fault_handler, pan_ro_addr); *((volatile uint64_t*)pan_ro_addr) = 0xFEEDFACECAFECAFE; + ml_expect_fault_end(); T_ASSERT(pan_exception_level == 2, NULL); @@ -1365,6 +1452,8 @@ arm64_pan_test() __builtin_arm_wsr("pan", 1); + mp_enable_preemption(); + return KERN_SUCCESS; } #endif /* __ARM_PAN_AVAILABLE__ */ @@ -1398,6 +1487,12 @@ ctrr_test(void) PE_parse_boot_argn("-unsafe_kernel_text", &ctrr_disable, sizeof(ctrr_disable)); +#if CONFIG_CSR_FROM_DT + if (csr_unsafe_kernel_text) { + ctrr_disable = TRUE; + } +#endif /* CONFIG_CSR_FROM_DT */ + if (ctrr_disable) { T_LOG("Skipping CTRR test when -unsafe_kernel_text boot-arg present"); return KERN_SUCCESS; @@ -1419,6 +1514,41 @@ ctrr_test(void) return KERN_SUCCESS; } +static bool +ctrr_test_ro_fault_handler(arm_saved_state_t * state) +{ + bool retval = false; + uint32_t esr = get_saved_state_esr(state); + esr_exception_class_t class = ESR_EC(esr); + fault_status_t fsc = ISS_DA_FSC(ESR_ISS(esr)); + + if ((class == ESR_EC_DABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) { + ctrr_exception_esr = esr; + add_saved_state_pc(state, 4); + retval = true; + } + + return retval; +} + +static bool +ctrr_test_nx_fault_handler(arm_saved_state_t * state) +{ + bool retval = false; + uint32_t esr = get_saved_state_esr(state); + esr_exception_class_t class = ESR_EC(esr); + fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr)); + + if ((class == ESR_EC_IABORT_EL1) && (fsc == FSC_PERMISSION_FAULT_L3)) { + ctrr_exception_esr = esr; + /* return to the instruction immediately after the call to NX page */ + set_saved_state_pc(state, get_saved_state_lr(state)); + retval = true; + } + + return retval; +} + /* test CTRR on a cpu, caller to bind thread to desired cpu */ /* ctrr_test_page was reserved during bootstrap process */ kern_return_t @@ -1429,13 +1559,12 @@ ctrr_test_cpu(void) void (*ctrr_nx_test_ptr)(void); kern_return_t kr; uint64_t prot = 0; - extern uint64_t rorgn_begin, rorgn_end; extern vm_offset_t virtual_space_start; - /* rorgn = [rorgn_begin_va, rorgn_end_va) */ + /* ctrr read only region = [rorgn_begin_va, rorgn_end_va) */ - vm_offset_t rorgn_begin_va = phystokv(rorgn_begin); - vm_offset_t rorgn_end_va = phystokv(rorgn_end) + PAGE_SIZE; + vm_offset_t rorgn_begin_va = phystokv(ctrr_begin); + vm_offset_t rorgn_end_va = phystokv(ctrr_end) + 1; vm_offset_t ro_test_va = (vm_offset_t)&ctrr_ro_test; vm_offset_t nx_test_va = (vm_offset_t)&ctrr_nx_test; @@ -1468,7 +1597,9 @@ ctrr_test_cpu(void) T_LOG("Read only region test writing to %p to provoke data abort", ctrr_ro_test_ptr); // should cause data abort + ml_expect_fault_begin(ctrr_test_ro_fault_handler, ctrr_test_va); *ctrr_ro_test_ptr = 1; + ml_expect_fault_end(); // ensure write permission fault at expected level // data abort handler will set ctrr_exception_esr when ctrr_test_va takes a permission fault @@ -1492,16 +1623,18 @@ ctrr_test_cpu(void) T_EXPECT(ARM_PTE_EXTRACT_AP(prot) == AP_RONA && (~prot & ARM_PTE_PNX), "Mapping is EL1 ROX"); ctrr_test_va = ctrr_test_page + (nx_test_va & PAGE_MASK); +#if __has_feature(ptrauth_calls) + ctrr_nx_test_ptr = ptrauth_sign_unauthenticated((void *)ctrr_test_va, ptrauth_key_function_pointer, 0); +#else ctrr_nx_test_ptr = (void *)ctrr_test_va; +#endif T_LOG("No execute test calling ctrr_nx_test_ptr(): %p to provoke instruction abort", ctrr_nx_test_ptr); -#if __has_feature(ptrauth_calls) - // must sign before calling if we're creating function pointers out of thin air - ctrr_nx_test_ptr = ptrauth_sign_unauthenticated(ctrr_nx_test_ptr, ptrauth_key_function_pointer, 0); -#endif // should cause prefetch abort + ml_expect_fault_begin(ctrr_test_nx_fault_handler, ctrr_test_va); ctrr_nx_test_ptr(); + ml_expect_fault_end(); // TODO: ensure execute permission fault at expected level T_EXPECT(ESR_EC(ctrr_exception_esr) == ESR_EC_IABORT_EL1, "Instruction abort from EL1 Expected"); @@ -1509,7 +1642,15 @@ ctrr_test_cpu(void) ctrr_test_va = 0; ctrr_exception_esr = 0; + pmap_remove(kernel_pmap, ctrr_test_page, ctrr_test_page + PAGE_SIZE); + + T_LOG("Expect no faults when reading CTRR region to verify correct programming of CTRR limits"); + for (vm_offset_t addr = rorgn_begin_va; addr < rorgn_end_va; addr += 8) { + volatile uint64_t x = *(uint64_t *)addr; + (void) x; /* read for side effect only */ + } + return KERN_SUCCESS; } #endif /* defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) */ diff --git a/osfmk/arm64/proc_reg.h b/osfmk/arm64/proc_reg.h index 7226338ad..a160b3b9d 100644 --- a/osfmk/arm64/proc_reg.h +++ b/osfmk/arm64/proc_reg.h @@ -99,6 +99,44 @@ #endif #endif /* __ARM_KERNEL_PROTECT */ +#if ARM_PARAMETERIZED_PMAP +/* + * ARM_PARAMETERIZED_PMAP configures the kernel to get the characteristics of + * the page tables (number of levels, size of the root allocation) from the + * pmap data structure, rather than treating them as compile-time constants. + * This allows the pmap code to dynamically adjust how it deals with page + * tables. + */ +#endif /* ARM_PARAMETERIZED_PMAP */ + +#if __ARM_MIXED_PAGE_SIZE__ +/* + * __ARM_MIXED_PAGE_SIZE__ configures the kernel to support page tables that do + * not use the kernel page size. This is primarily meant to support running + * 4KB page processes on a 16KB page kernel. + * + * This only covers support in the pmap/machine dependent layers. Any support + * elsewhere in the kernel must be managed separately. + */ +#if !ARM_PARAMETERIZED_PMAP +/* + * Page tables that use non-kernel page sizes require us to reprogram TCR based + * on the page tables we are switching to. This means that the parameterized + * pmap support is required. + */ +#error __ARM_MIXED_PAGE_SIZE__ requires ARM_PARAMETERIZED_PMAP +#endif /* !ARM_PARAMETERIZED_PMAP */ +#if __ARM_KERNEL_PROTECT__ +/* + * Because switching the page size requires updating TCR based on the pmap, and + * __ARM_KERNEL_PROTECT__ relies on TCR being programmed with constants, XNU + * does not currently support support configurations that use both + * __ARM_KERNEL_PROTECT__ and __ARM_MIXED_PAGE_SIZE__. + */ +#error __ARM_MIXED_PAGE_SIZE__ and __ARM_KERNEL_PROTECT__ are mutually exclusive +#endif /* __ARM_KERNEL_PROTECT__ */ +#endif /* __ARM_MIXED_PAGE_SIZE__ */ + /* * 64-bit Program Status Register (PSR64) * @@ -117,7 +155,7 @@ */ #define PSR64_NZCV_SHIFT 28 -#define PSR64_NZCV_MASK (1 << PSR64_NZCV_SHIFT) +#define PSR64_NZCV_MASK (0xF << PSR64_NZCV_SHIFT) #define PSR64_N_SHIFT 31 #define PSR64_N (1 << PSR64_N_SHIFT) @@ -140,6 +178,15 @@ #define PSR64_IL_SHIFT 20 #define PSR64_IL (1 << PSR64_IL_SHIFT) +/* + * SSBS is bit 12 for A64 SPSR and bit 23 for A32 SPSR + * I do not want to talk about it! + */ +#define PSR64_SSBS_SHIFT_32 23 +#define PSR64_SSBS_SHIFT_64 12 +#define PSR64_SSBS_32 (1 << PSR64_SSBS_SHIFT_32) +#define PSR64_SSBS_64 (1 << PSR64_SSBS_SHIFT_64) + /* * msr DAIF, Xn and mrs Xn, DAIF transfer into * and out of bits 9:6 @@ -161,6 +208,10 @@ #define SPSR_INTERRUPTS_ENABLED(x) (!(x & DAIF_FIQF)) +#define PSR64_SSBS_U32_DEFAULT (0) +#define PSR64_SSBS_U64_DEFAULT (0) +#define PSR64_SSBS_KRN_DEFAULT (0) + /* * msr DAIFSet, Xn, and msr DAIFClr, Xn transfer * from bits 3:0. @@ -179,6 +230,8 @@ #define PSR64_MODE_MASK 0x1F +#define PSR64_USER_MASK PSR64_NZCV_MASK + #define PSR64_MODE_USER32_THUMB 0x20 #define PSR64_MODE_RW_SHIFT 4 @@ -195,9 +248,9 @@ #define PSR64_MODE_SPX 0x1 #define PSR64_MODE_SP0 0 -#define PSR64_USER32_DEFAULT (PSR64_MODE_RW_32 | PSR64_MODE_EL0 | PSR64_MODE_SP0) -#define PSR64_USER64_DEFAULT (PSR64_MODE_RW_64 | PSR64_MODE_EL0 | PSR64_MODE_SP0) -#define PSR64_KERNEL_STANDARD (DAIF_STANDARD_DISABLE | PSR64_MODE_RW_64 | PSR64_MODE_EL1 | PSR64_MODE_SP0) +#define PSR64_USER32_DEFAULT (PSR64_MODE_RW_32 | PSR64_MODE_EL0 | PSR64_MODE_SP0 | PSR64_SSBS_U32_DEFAULT) +#define PSR64_USER64_DEFAULT (PSR64_MODE_RW_64 | PSR64_MODE_EL0 | PSR64_MODE_SP0 | PSR64_SSBS_U64_DEFAULT) +#define PSR64_KERNEL_STANDARD (DAIF_STANDARD_DISABLE | PSR64_MODE_RW_64 | PSR64_MODE_EL1 | PSR64_MODE_SP0 | PSR64_SSBS_KRN_DEFAULT) #if __ARM_PAN_AVAILABLE__ #define PSR64_KERNEL_DEFAULT (PSR64_KERNEL_STANDARD | PSR64_PAN) #else @@ -217,7 +270,9 @@ * System Control Register (SCTLR) */ -#define SCTLR_RESERVED ((3ULL << 28) | (1ULL << 22) | (1ULL << 20) | (1ULL << 11)) +#define SCTLR_DSSBS (1ULL << 44) + +#define SCTLR_RESERVED ((3ULL << 28) | (1ULL << 20)) #if defined(HAS_APPLE_PAC) // 31 PACIA_ENABLED AddPACIA and AuthIA functions enabled @@ -246,7 +301,9 @@ // 23 SPAN Set PAN #define SCTLR_PAN_UNCHANGED (1ULL << 23) -// 22 RES1 1 +// 22 EIS Taking an exception is a context synchronization event +#define SCTLR_EIS (1ULL << 22) + // 21 RES0 0 // 20 RES1 1 @@ -270,7 +327,9 @@ // 12 I Instruction cache enable #define SCTLR_I_ENABLED (1ULL << 12) -// 11 RES1 1 +// 11 EOS Exception return is a context synchronization event +#define SCTLR_EOS (1ULL << 11) + // 10 RES0 0 // 9 UMA User Mask Access @@ -302,10 +361,14 @@ // 0 M MMU enable #define SCTLR_M_ENABLED (1ULL << 0) +#define SCTLR_CSEH_DEFAULT (SCTLR_EIS | SCTLR_EOS) +#define SCTLR_DSSBS_DEFAULT (0) + #define SCTLR_EL1_DEFAULT \ (SCTLR_RESERVED | SCTLR_UCI_ENABLED | SCTLR_nTWE_WFE_ENABLED | SCTLR_DZE_ENABLED | \ SCTLR_I_ENABLED | SCTLR_SED_DISABLED | SCTLR_CP15BEN_ENABLED | \ - SCTLR_SA0_ENABLED | SCTLR_SA_ENABLED | SCTLR_C_ENABLED | SCTLR_M_ENABLED) + SCTLR_SA0_ENABLED | SCTLR_SA_ENABLED | SCTLR_C_ENABLED | SCTLR_M_ENABLED | \ + SCTLR_CSEH_DEFAULT | SCTLR_DSSBS_DEFAULT) /* * Coprocessor Access Control Register (CPACR) @@ -400,7 +463,7 @@ #define FPCR_OFE (1 << FPCR_OFE_SHIFT) #define FPCR_DZE (1 << FPCR_DZE_SHIFT) #define FPCR_IOE (1 << FPCR_IOE_SHIFT) -#define FPCR_DEFAULT (FPCR_DN) +#define FPCR_DEFAULT (0) #define FPCR_DEFAULT_32 (FPCR_DN|FPCR_FZ) /* @@ -469,6 +532,8 @@ #define TCR_SH0_INNER (3ULL << TCR_SH0_SHIFT) #define TCR_TG0_GRANULE_SHIFT (14ULL) +#define TCR_TG0_GRANULE_BITS (2ULL) +#define TCR_TG0_GRANULE_MASK ((1ULL << TCR_TG0_GRANULE_BITS) - 1ULL) #define TCR_TG0_GRANULE_4KB (0ULL << TCR_TG0_GRANULE_SHIFT) #define TCR_TG0_GRANULE_64KB (1ULL << TCR_TG0_GRANULE_SHIFT) @@ -515,6 +580,8 @@ #endif #define TCR_IPS_SHIFT 32ULL +#define TCR_IPS_BITS 3ULL +#define TCR_IPS_MASK ((1ULL << TCR_IPS_BITS) - 1ULL) #define TCR_IPS_32BITS (0ULL << TCR_IPS_SHIFT) #define TCR_IPS_36BITS (1ULL << TCR_IPS_SHIFT) #define TCR_IPS_40BITS (2ULL << TCR_IPS_SHIFT) @@ -534,6 +601,16 @@ #define TCR_TBID0_ENABLE 0 #endif +#define TCR_E0PD0_BIT (1ULL << 55) +#define TCR_E0PD1_BIT (1ULL << 56) + +#if defined(HAS_E0PD) +#define TCR_E0PD_VALUE (TCR_E0PD1_BIT) +#else +#define TCR_E0PD_VALUE 0 +#endif + + /* * Multiprocessor Affinity Register (MPIDR_EL1) * @@ -618,18 +695,56 @@ #define TCR_EL1_BASE \ (TCR_IPS_VALUE | TCR_SH0_OUTER | TCR_ORGN0_WRITEBACK | \ TCR_IRGN0_WRITEBACK | (T0SZ_BOOT << TCR_T0SZ_SHIFT) | \ - (TCR_TG0_GRANULE_SIZE) | TCR_SH1_OUTER | TCR_ORGN1_WRITEBACK | \ + TCR_SH1_OUTER | TCR_ORGN1_WRITEBACK | \ TCR_IRGN1_WRITEBACK | (TCR_TG1_GRANULE_SIZE) | \ - TCR_TBI0_TOPBYTE_IGNORED | (TCR_TBID0_ENABLE)) + TCR_TBI0_TOPBYTE_IGNORED | (TCR_TBID0_ENABLE) | TCR_E0PD_VALUE) #if __ARM_KERNEL_PROTECT__ -#define TCR_EL1_BOOT (TCR_EL1_BASE | (T1SZ_BOOT << TCR_T1SZ_SHIFT)) +#define TCR_EL1_BOOT (TCR_EL1_BASE | (T1SZ_BOOT << TCR_T1SZ_SHIFT) | (TCR_TG0_GRANULE_SIZE)) #define T1SZ_USER (T1SZ_BOOT + 1) -#define TCR_EL1_USER (TCR_EL1_BASE | (T1SZ_USER << TCR_T1SZ_SHIFT)) +#define TCR_EL1_USER (TCR_EL1_BASE | (T1SZ_USER << TCR_T1SZ_SHIFT) | (TCR_TG0_GRANULE_SIZE)) #else -#define TCR_EL1_BOOT (TCR_EL1_BASE | (T1SZ_BOOT << TCR_T1SZ_SHIFT)) +#define TCR_EL1_BOOT (TCR_EL1_BASE | (T1SZ_BOOT << TCR_T1SZ_SHIFT) | (TCR_TG0_GRANULE_SIZE)) #endif /* __ARM_KERNEL_PROTECT__ */ +#define TCR_EL1_4KB (TCR_EL1_BASE | (T1SZ_BOOT << TCR_T1SZ_SHIFT) | (TCR_TG0_GRANULE_4KB)) +#define TCR_EL1_16KB (TCR_EL1_BASE | (T1SZ_BOOT << TCR_T1SZ_SHIFT) | (TCR_TG0_GRANULE_16KB)) + + + + +/* + * Monitor Debug System Control Register (MDSCR) + */ + +#define MDSCR_TFO_SHIFT 31 +#define MDSCR_TFO (1ULL << MDSCR_TFO_SHIFT) +#define MDSCR_RXFULL_SHIFT 30 +#define MDSCR_RXFULL (1ULL << MDSCR_RXFULL_SHIFT) +#define MDSCR_TXFULL_SHIFT 29 +#define MDSCR_TXFULL (1ULL << MDSCR_TXFULL_SHIFT) +#define MDSCR_RXO_SHIFT 27 +#define MDSCR_RXO (1ULL << MDSCR_RXO_SHIFT) +#define MDSCR_TXU_SHIFT 26 +#define MDSCR_TXU (1ULL << MDSCR_TXU_SHIFT) +#define MDSCR_INTDIS_SHIFT 22 +#define MDSCR_INTDIS_MASK (0x2U << MDSCR_INTDIS_SHIFT) +#define MDSCR_TDA_SHIFT 21 +#define MDSCR_TDA (1ULL << MDSCR_TDA_SHIFT) +#define MDSCR_SC2_SHIFT 19 +#define MDSCR_SC2 (1ULL << MDSCR_SC2_SHIFT) +#define MDSCR_MDE_SHIFT 15 +#define MDSCR_MDE (1ULL << MDSCR_MDE_SHIFT) +#define MDSCR_HDE_SHIFT 14 +#define MDSCR_HDE (1ULL << MDSCR_HDE_SHIFT) +#define MDSCR_KDE_SHIFT 13 +#define MDSCR_KDE (1ULL << MDSCR_KDE_SHIFT) +#define MDSCR_TDCC_SHIFT 12 +#define MDSCR_TDCC (1ULL << MDSCR_TDCC_SHIFT) +#define MDSCR_ERR_SHIFT 6 +#define MDSCR_ERR (1ULL << MDSCR_ERR_SHIFT) +#define MDSCR_SS_SHIFT 0 +#define MDSCR_SS (1ULL << MDSCR_SS_SHIFT) /* * Translation Table Base Register (TTBR) @@ -793,7 +908,7 @@ #define ARM_16K_TT_L1_SIZE 0x0000001000000000ULL /* size of area covered by a tte */ #define ARM_16K_TT_L1_OFFMASK 0x0000000fffffffffULL /* offset within an L1 entry */ #define ARM_16K_TT_L1_SHIFT 36 /* page descriptor shift */ -#ifdef __ARM64_PMAP_SUBPAGE_L1__ +#if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__ /* This config supports 512GB per TTBR. */ #define ARM_16K_TT_L1_INDEX_MASK 0x0000007000000000ULL /* mask for getting index into L1 table from virtual address */ #else /* __ARM64_PMAP_SUBPAGE_L1__ */ @@ -804,7 +919,7 @@ #define ARM_4K_TT_L1_SIZE 0x0000000040000000ULL /* size of area covered by a tte */ #define ARM_4K_TT_L1_OFFMASK 0x000000003fffffffULL /* offset within an L1 entry */ #define ARM_4K_TT_L1_SHIFT 30 /* page descriptor shift */ -#ifdef __ARM64_PMAP_SUBPAGE_L1__ +#if __ARM64_PMAP_SUBPAGE_L1__ && !__ARM_16K_PG__ /* This config supports 256GB per TTBR. */ #define ARM_4K_TT_L1_INDEX_MASK 0x0000003fc0000000ULL /* mask for getting index into L1 table from virtual address */ #else /* __ARM64_PMAP_SUBPAGE_L1__ */ @@ -1099,16 +1214,7 @@ #define ARM_TTE_BLOCK_WRITEABLE 0x0800000000000000ULL /* value for software writeable bit */ #define ARM_TTE_BLOCK_WRITEABLEMASK 0x0800000000000000ULL /* software writeable mask */ -#ifdef __ARM_16K_PG__ -/* - * TODO: Do we care about the low bits being unused? It should technically - * work either way, but masking them out should be future proof; it is only a - * matter of time before someone wants to shove something into the free bits. - */ -#define ARM_TTE_TABLE_MASK (0x0000ffffffffc000ULL) /* mask for extracting pointer to next table (works at any level) */ -#else -#define ARM_TTE_TABLE_MASK (0x0000fffffffff000ULL) /* mask for extracting pointer to next table (works at any level) */ -#endif +#define ARM_TTE_TABLE_MASK 0x0000fffffffff000ULL /* mask for extracting pointer to next table (works at any level) */ #define ARM_TTE_TABLE_APSHIFT 61 #define ARM_TTE_TABLE_AP(x) ((x)<> 1) + 1) << 46) +#define RTLBI_TG(_page_shift_) ((uint64_t)((((_page_shift_) - 12) >> 1) + 1) << 46) #define RTLBI_SCALE_SHIFT (44) #define RTLBI_NUM_SHIFT (39) @@ -1343,6 +1447,7 @@ typedef enum { ESR_EC_MCR_MRC_CP14_TRAP = 0x05, ESR_EC_LDC_STC_CP14_TRAP = 0x06, ESR_EC_TRAP_SIMD_FP = 0x07, + ESR_EC_PTRAUTH_INSTR_TRAP = 0x09, ESR_EC_MCRR_MRRC_CP14_TRAP = 0x0c, ESR_EC_ILLEGAL_INSTR_SET = 0x0e, ESR_EC_SVC_32 = 0x11, @@ -1388,7 +1493,7 @@ typedef enum { FSC_SYNC_PARITY_TT_L2 = 0x1E, FSC_SYNC_PARITY_TT_L3 = 0x1F, FSC_ALIGNMENT_FAULT = 0x21, - FSC_DEBUG_FAULT = 0x22 + FSC_DEBUG_FAULT = 0x22, } fault_status_t; #endif /* ASSEMBLER */ @@ -1438,14 +1543,15 @@ typedef enum { * * 24 9 8 7 6 5 0 * +---------------+--+--+-+---+----+ - * |000000000000000|EA|CM|0|WnR|DFSC| + * |000000000000000|EA|CM|S1PTW|WnR|DFSC| * +---------------+--+--+-+---+----+ * * where: - * EA: External Abort type - * CM: Cache Maintenance operation - * WnR: Write not Read - * DFSC: Data Fault Status Code + * EA: External Abort type + * CM: Cache Maintenance operation + * WnR: Write not Read + * S1PTW: Stage 2 exception on Stage 1 page table walk + * DFSC: Data Fault Status Code */ #define ISS_DA_EA_SHIFT 9 #define ISS_DA_EA (0x1 << ISS_DA_EA_SHIFT) @@ -1456,6 +1562,9 @@ typedef enum { #define ISS_DA_WNR_SHIFT 6 #define ISS_DA_WNR (0x1 << ISS_DA_WNR_SHIFT) +#define ISS_DA_S1PTW_SHIFT 7 +#define ISS_DA_S1PTW (0x1 << ISS_DA_S1PTW_SHIFT) + #define ISS_DA_FSC_MASK 0x3F #define ISS_DA_FSC(x) (x & ISS_DA_FSC_MASK) @@ -1511,6 +1620,9 @@ typedef enum { #define ISS_BRK_COMMENT(x) (x & ISS_BRK_COMMENT_MASK) + + + /* * Physical Address Register (EL1) */ @@ -1537,8 +1649,8 @@ typedef enum { #define CNTKCTL_EL1_EVENTI_SHIFT (0x4) /* Shift for same */ #define CNTKCTL_EL1_EVENTDIR (0x1 << 3) /* 1: one-to-zero transition of specified bit causes event */ #define CNTKCTL_EL1_EVNTEN (0x1 << 2) /* 1: enable event stream */ -#define CNTKCTL_EL1_PL0VCTEN (0x1 << 1) /* 1: EL0 access to physical timebase + frequency reg enabled */ -#define CNTKCTL_EL1_PL0PCTEN (0x1 << 0) /* 1: EL0 access to virtual timebase + frequency reg enabled */ +#define CNTKCTL_EL1_PL0VCTEN (0x1 << 1) /* 1: EL0 access to virtual timebase + frequency reg enabled */ +#define CNTKCTL_EL1_PL0PCTEN (0x1 << 0) /* 1: EL0 access to physical timebase + frequency reg enabled */ #define CNTV_CTL_EL0_ISTATUS (0x1 << 2) /* (read only): whether interrupt asserted */ #define CNTV_CTL_EL0_IMASKED (0x1 << 1) /* 1: interrupt masked */ @@ -1565,6 +1677,36 @@ typedef enum { #define MIDR_EL1_IMP_SHIFT 24 #define MIDR_EL1_IMP_MASK (0xff << MIDR_EL1_IMP_SHIFT) +#define MIDR_FIJI (0x002 << MIDR_EL1_PNUM_SHIFT) +#define MIDR_CAPRI (0x003 << MIDR_EL1_PNUM_SHIFT) +#define MIDR_MAUI (0x004 << MIDR_EL1_PNUM_SHIFT) +#define MIDR_ELBA (0x005 << MIDR_EL1_PNUM_SHIFT) +#define MIDR_CAYMAN (0x006 << MIDR_EL1_PNUM_SHIFT) +#define MIDR_MYST (0x007 << MIDR_EL1_PNUM_SHIFT) +#define MIDR_SKYE_MONSOON (0x008 << MIDR_EL1_PNUM_SHIFT) +#define MIDR_SKYE_MISTRAL (0x009 << MIDR_EL1_PNUM_SHIFT) +#define MIDR_CYPRUS_VORTEX (0x00B << MIDR_EL1_PNUM_SHIFT) +#define MIDR_CYPRUS_TEMPEST (0x00C << MIDR_EL1_PNUM_SHIFT) +#define MIDR_M9 (0x00F << MIDR_EL1_PNUM_SHIFT) +#define MIDR_ARUBA_VORTEX (0x010 << MIDR_EL1_PNUM_SHIFT) +#define MIDR_ARUBA_TEMPEST (0x011 << MIDR_EL1_PNUM_SHIFT) + +#ifdef APPLELIGHTNING +#define MIDR_CEBU_LIGHTNING (0x012 << MIDR_EL1_PNUM_SHIFT) +#define MIDR_CEBU_THUNDER (0x013 << MIDR_EL1_PNUM_SHIFT) +#define MIDR_TURKS (0x026 << MIDR_EL1_PNUM_SHIFT) +#endif + + + +/* + * Apple-ISA-Extensions ID Register. + */ +#define AIDR_MUL53 (1 << 0) +#define AIDR_WKDM (1 << 1) +#define AIDR_ARCHRETENTION (1 << 2) + + /* * CoreSight debug registers */ @@ -1822,6 +1964,10 @@ typedef enum { #define ID_AA64ISAR0_EL1_CRC32_MASK (0xfull << ID_AA64ISAR0_EL1_CRC32_OFFSET) #define ID_AA64ISAR0_EL1_CRC32_EN (1ull << ID_AA64ISAR0_EL1_CRC32_OFFSET) +#define ID_AA64ISAR0_EL1_SHA3_OFFSET 32 +#define ID_AA64ISAR0_EL1_SHA3_MASK (0xfull << ID_AA64ISAR0_EL1_SHA3_OFFSET) +#define ID_AA64ISAR0_EL1_SHA3_EN (1ull << ID_AA64ISAR0_EL1_SHA3_OFFSET) + #define ID_AA64ISAR0_EL1_SHA2_OFFSET 12 #define ID_AA64ISAR0_EL1_SHA2_MASK (0xfull << ID_AA64ISAR0_EL1_SHA2_OFFSET) #define ID_AA64ISAR0_EL1_SHA2_EN (1ull << ID_AA64ISAR0_EL1_SHA2_OFFSET) @@ -1859,12 +2005,17 @@ typedef enum { #define APSTATE_G_SHIFT (0) #define APSTATE_P_SHIFT (1) #define APSTATE_A_SHIFT (2) +#define APSTATE_AP_MASK ((1ULL << APSTATE_A_SHIFT) | (1ULL << APSTATE_P_SHIFT)) #ifdef __APSTS_SUPPORTED__ #define APCTL_EL1_AppleMode (1ULL << 0) #define APCTL_EL1_KernKeyEn (1ULL << 1) #define APCTL_EL1_EnAPKey0 (1ULL << 2) #define APCTL_EL1_EnAPKey1 (1ULL << 3) +#ifdef HAS_APCTL_EL1_USERKEYEN +#define APCTL_EL1_UserKeyEn_OFFSET 4 +#define APCTL_EL1_UserKeyEn (1ULL << APCTL_EL1_UserKeyEn_OFFSET) +#endif /* HAS_APCTL_EL1_USERKEYEN */ #define APSTS_EL1_MKEYVld (1ULL << 0) #else #define APCTL_EL1_AppleMode (1ULL << 0) @@ -1872,61 +2023,313 @@ typedef enum { #define APCTL_EL1_KernKeyEn (1ULL << 2) #endif +#define ACTLR_EL1_EnTSO (1ULL << 1) +#define ACTLR_EL1_EnAPFLG (1ULL << 4) +#define ACTLR_EL1_EnAFP (1ULL << 5) +#define ACTLR_EL1_EnPRSV (1ULL << 6) + #define ACTLR_EL1_DisHWP_OFFSET 3 #define ACTLR_EL1_DisHWP_MASK (1ULL << ACTLR_EL1_DisHWP_OFFSET) #define ACTLR_EL1_DisHWP ACTLR_EL1_DisHWP_MASK +#if HAS_IC_INVAL_FILTERS +#define ACTLR_EL1_IC_IVAU_EnASID_OFFSET 12 +#define ACTLR_EL1_IC_IVAU_EnASID_MASK (1ULL << ACTLR_EL1_IC_IVAU_EnASID_OFFSET) +#define ACTLR_EL1_IC_IVAU_EnASID ACTLR_EL1_IC_IVAU_EnASID_MASK +#endif /* HAS_IC_INVAL_FILTERS */ + +#define AFPCR_DAZ_SHIFT (0) +#define AFPCR_FTZ_SHIFT (1) + #if defined(HAS_APPLE_PAC) // The value of ptrauth_string_discriminator("recover"), hardcoded so it can be used from assembly code #define PAC_DISCRIMINATOR_RECOVER 0x1e02 #endif + +#define CTR_EL0_L1Ip_OFFSET 14 +#define CTR_EL0_L1Ip_VIPT (2ULL << CTR_EL0_L1Ip_OFFSET) +#define CTR_EL0_L1Ip_PIPT (3ULL << CTR_EL0_L1Ip_OFFSET) +#define CTR_EL0_L1Ip_MASK (3ULL << CTR_EL0_L1Ip_OFFSET) + + #ifdef __ASSEMBLER__ /* - * Compute CPU version: - * Version is constructed as [4 bits of MIDR variant]:[4 bits of MIDR revision] - * - * Where the "variant" is the major number and the "revision" is the minor number. - * - * For example: - * Cyclone A0 is variant 0, revision 0, i.e. 0. - * Cyclone B0 is variant 1, revision 0, i.e. 0x10 - * $0 - register to place value in + * Conditionally write to system/special-purpose register. + * The register is written to only when the first two arguments + * do not match. If they do match, the macro jumps to a + * caller-provided label. + * The _ISB variant also conditionally issues an ISB after the MSR. + * + * $0 - System/special-purpose register to modify + * $1 - Register containing current FPCR value + * $2 - Register containing expected value + * $3 - Label to jump to when register is already set to expected value */ -.macro GET_MIDR_CPU_VERSION -mrs $0, MIDR_EL1 // Read MIDR_EL1 for CPUID -bfi $0, $0, #(MIDR_EL1_VAR_SHIFT - 4), #4 // move bits 3:0 (revision) to 19:16 (below variant) to get values adjacent -ubfx $0, $0, #(MIDR_EL1_VAR_SHIFT - 4), #8 // And extract the concatenated bitstring to beginning of register +.macro CMSR +cmp $1, $2 + +/* Skip expensive MSR if not required */ +b.eq $3f +msr $0, $2 +.endmacro + +.macro CMSR_ISB +CMSR $0, $1, $2, $3 +isb sy .endmacro /* - * To apply a workaround for CPU versions less than a given value - * (e.g. earlier than when a fix arrived) - * - * $0 - scratch register1 - * $1 - version at which to stop applying workaround - * $2 - label to branch to (at end of workaround) + * Modify FPCR only if it does not contain the XNU default value. + * $0 - Register containing current FPCR value + * $1 - Scratch register + * $2 - Label to jump to when FPCR is already set to default value */ -.macro SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL -GET_MIDR_CPU_VERSION $0 -cmp $0, $1 -b.pl $2 // Unsigned "greater or equal" +.macro SANITIZE_FPCR +mov $1, #FPCR_DEFAULT +CMSR FPCR, $0, $1, $2 .endmacro /* - * To apply a workaround for CPU versions greater than a given value - * (e.g. starting when a bug was introduced) - * - * $0 - scratch register1 - * $1 - version at which to stop applying workaround - * $2 - label to branch to (at end of workaround) + * Family of macros that can be used to protect code sections such that they + * are only executed on a particular SoC/Revision/CPU, and skipped otherwise. + * All macros will forward-jump to 1f when the condition is not matched. + * This label may be defined manually, or implicitly through the use of + * the EXEC_END macro. + * For cores, XX can be: EQ (equal), ALL (don't care). + * For revisions, XX can be: EQ (equal), LO (lower than), HS (higher or same), ALL (don't care). */ -.macro SKIP_IF_CPU_VERSION_LESS_THAN -GET_MIDR_CPU_VERSION $0 -cmp $0, $1 -b.mi $2 // Unsigned "strictly less than" + +/* + * $0 - MIDR_SOC[_CORE], e.g. MIDR_ARUBA_VORTEX + * $1 - CPU_VERSION_XX, e.g. CPU_VERSION_B1 + * $2 - GPR containing MIDR_EL1 value + * $3 - Scratch register + */ +.macro EXEC_COREEQ_REVEQ +and $3, $2, #MIDR_EL1_PNUM_MASK +cmp $3, $0 +b.ne 1f + +mov $3, $2 +bfi $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $3, $1 +b.ne 1f +.endmacro + +.macro EXEC_COREEQ_REVLO +and $3, $2, #MIDR_EL1_PNUM_MASK +cmp $3, $0 +b.ne 1f + +mov $3, $2 +bfi $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $3, $1 +b.pl 1f +.endmacro + +.macro EXEC_COREEQ_REVHS +and $3, $2, #MIDR_EL1_PNUM_MASK +cmp $3, $0 +b.ne 1f + +mov $3, $2 +bfi $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $3, $1 +b.mi 1f +.endmacro + +/* + * $0 - CPU_VERSION_XX, e.g. CPU_VERSION_B1 + * $1 - GPR containing MIDR_EL1 value + * $2 - Scratch register + */ +.macro EXEC_COREALL_REVEQ +mov $2, $1 +bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $2, $0 +b.ne 1f +.endmacro + +.macro EXEC_COREALL_REVLO +mov $2, $1 +bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $2, $0 +b.pl 1f +.endmacro + +.macro EXEC_COREALL_REVHS +mov $2, $1 +bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $2, $0 +b.mi 1f +.endmacro + +/* + * $0 - MIDR_SOC[_CORE], e.g. MIDR_ARUBA_VORTEX + * $1 - GPR containing MIDR_EL1 value + * $2 - Scratch register + */ +.macro EXEC_COREEQ_REVALL +and $2, $1, #MIDR_EL1_PNUM_MASK +cmp $2, $0 +b.ne 1f +.endmacro + +/* + * $0 - CPU_VERSION_XX, e.g. CPU_VERSION_B1 + * $1 - GPR containing MIDR_EL1 value + * $2 - Scratch register + */ +.macro EXEC_PCORE_REVEQ +mrs $2, MPIDR_EL1 +and $2, $2, #(MPIDR_PNE) +cmp $2, xzr +b.eq 1f + +mov $2, $1 +bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $2, $0 +b.ne 1f +.endmacro + +.macro EXEC_PCORE_REVLO +mrs $2, MPIDR_EL1 +and $2, $2, #(MPIDR_PNE) +cmp $2, xzr +b.eq 1f + +mov $2, $1 +bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $2, $0 +b.pl 1f +.endmacro + +.macro EXEC_PCORE_REVHS +mrs $2, MPIDR_EL1 +and $2, $2, #(MPIDR_PNE) +cmp $2, xzr +b.eq 1f + +mov $2, $1 +bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $2, $0 +b.mi 1f +.endmacro + +.macro EXEC_ECORE_REVEQ +mrs $2, MPIDR_EL1 +and $2, $2, #(MPIDR_PNE) +cmp $2, xzr +b.ne 1f + +mov $2, $1 +bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $2, $0 +b.ne 1f +.endmacro + +.macro EXEC_ECORE_REVLO +mrs $2, MPIDR_EL1 +and $2, $2, #(MPIDR_PNE) +cmp $2, xzr +b.ne 1f + +mov $2, $1 +bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $2, $0 +b.pl 1f +.endmacro + +.macro EXEC_ECORE_REVHS +mrs $2, MPIDR_EL1 +and $2, $2, #(MPIDR_PNE) +cmp $2, xzr +b.ne 1f + +mov $2, $1 +bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 +ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 +cmp $2, $0 +b.mi 1f +.endmacro + +/* + * $0 - GPR containing MIDR_EL1 value + * $1 - Scratch register + */ +.macro EXEC_PCORE_REVALL +mrs $1, MPIDR_EL1 +and $1, $1, #(MPIDR_PNE) +cmp $1, xzr +b.eq 1f +.endmacro + +.macro EXEC_ECORE_REVALL +mrs $1, MPIDR_EL1 +and $1, $1, #(MPIDR_PNE) +cmp $1, xzr +b.ne 1f +.endmacro + + + +/* + * Macro that defines the label that all EXEC_COREXX_REVXX macros jump to. + */ +.macro EXEC_END +1: +.endmacro + +/* + * Sets bits in an SPR register. + * arg0: Name of the register to be accessed. + * arg1: Mask of bits to be set. + * arg2: Scratch register + */ +.macro HID_SET_BITS +mrs $2, $0 +orr $2, $2, $1 +msr $0, $2 +.endmacro + +/* + * Clears bits in an SPR register. + * arg0: Name of the register to be accessed. + * arg1: Mask of bits to be cleared. + * arg2: Scratch register + */ +.macro HID_CLEAR_BITS +mrs $2, $0 +bic $2, $2, $1 +msr $0, $2 +.endmacro + +/* + * Clears bits in an SPR register. + * arg0: Name of the register to be accessed. + * arg1: Mask of bits to be cleared. + * arg2: Value to insert + * arg3: Scratch register + */ +.macro HID_INSERT_BITS +mrs $3, $0 +bic $3, $3, $1 +orr $3, $3, $2 +msr $0, $3 .endmacro /* @@ -1962,4 +2365,5 @@ nop #define PPL_STATE_EXCEPTION 3 #endif + #endif /* _ARM64_PROC_REG_H_ */ diff --git a/osfmk/arm64/sleh.c b/osfmk/arm64/sleh.c index 87891018b..745e9d17f 100644 --- a/osfmk/arm64/sleh.c +++ b/osfmk/arm64/sleh.c @@ -36,10 +36,12 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -53,6 +55,7 @@ #include #include +#include #include #include @@ -61,7 +64,9 @@ #include #endif -#include +#include + + #ifndef __arm64__ #error Should only be compiling for arm64. @@ -93,6 +98,13 @@ #define STR1(x) #x #define STR(x) STR1(x) +#define ARM64_KDBG_CODE_KERNEL (0 << 8) +#define ARM64_KDBG_CODE_USER (1 << 8) +#define ARM64_KDBG_CODE_GUEST (2 << 8) + +_Static_assert(ARM64_KDBG_CODE_GUEST <= KDBG_CODE_MAX, "arm64 KDBG trace codes out of range"); +_Static_assert(ARM64_KDBG_CODE_GUEST <= UINT16_MAX, "arm64 KDBG trace codes out of range"); + void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) __abortlike; void sleh_synchronous_sp1(arm_context_t *, uint32_t, vm_offset_t) __abortlike; @@ -109,11 +121,12 @@ static void handle_svc(arm_saved_state_t *); static void handle_mach_absolute_time_trap(arm_saved_state_t *); static void handle_mach_continuous_time_trap(arm_saved_state_t *); -static void handle_msr_trap(arm_saved_state_t *state, uint32_t iss); +static void handle_msr_trap(arm_saved_state_t *state, uint32_t esr); extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, bool, bool); static void handle_uncategorized(arm_saved_state_t *); +static void handle_kernel_breakpoint(arm_saved_state_t *, uint32_t) __dead2; static void handle_breakpoint(arm_saved_state_t *, uint32_t) __dead2; typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *); @@ -124,9 +137,9 @@ static int is_vm_fault(fault_status_t); static int is_translation_fault(fault_status_t); static int is_alignment_fault(fault_status_t); -typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); -static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); -static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); +typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t); +static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t); +static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t, expected_fault_handler_t); static void handle_pc_align(arm_saved_state_t *ss) __dead2; static void handle_sp_align(arm_saved_state_t *ss) __dead2; @@ -136,7 +149,7 @@ static void handle_fp_trap(arm_saved_state_t *ss, uint32_t esr) __dead2; static void handle_watchpoint(vm_offset_t fault_addr) __dead2; -static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t); +static void handle_abort(arm_saved_state_t *, uint32_t, vm_offset_t, vm_offset_t, abort_inspector_t, abort_handler_t, expected_fault_handler_t); static void handle_user_trapped_instruction32(arm_saved_state_t *, uint32_t esr) __dead2; @@ -148,6 +161,8 @@ void mach_syscall_trace_exit(unsigned int retval, unsigned int call_number); struct uthread; struct proc; +typedef uint32_t arm64_instr_t; + extern void unix_syscall(struct arm_saved_state * regs, thread_t thread_act, struct uthread * uthread, struct proc * proc); @@ -181,15 +196,6 @@ perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routi extern boolean_t pgtrace_enabled; #endif -#if __ARM_PAN_AVAILABLE__ -#ifdef CONFIG_XNUPOST -extern vm_offset_t pan_test_addr; -extern vm_offset_t pan_ro_addr; -extern volatile int pan_exception_level; -extern volatile char pan_fault_value; -#endif -#endif - #if HAS_TWO_STAGE_SPR_LOCK #ifdef CONFIG_XNUPOST extern volatile vm_offset_t spr_lock_test_addr; @@ -197,6 +203,12 @@ extern volatile uint32_t spr_lock_exception_esr; #endif #endif +#if INTERRUPT_MASKED_DEBUG +extern boolean_t interrupt_masked_debug; +#endif + +extern void arm64_thread_exception_return(void) __dead2; + #if defined(APPLETYPHOON) #define CPU_NAME "Typhoon" #elif defined(APPLETWISTER) @@ -229,8 +241,80 @@ void cpu_signal_handler(void); extern unsigned int gFastIPI; #endif /* defined(HAS_IPI) */ +static arm_saved_state64_t *original_faulting_state = NULL; + +TUNABLE(bool, fp_exceptions_enabled, "-fp_exceptions", false); + extern vm_offset_t static_memory_end; +static inline int +is_vm_fault(fault_status_t status) +{ + switch (status) { + case FSC_TRANSLATION_FAULT_L0: + case FSC_TRANSLATION_FAULT_L1: + case FSC_TRANSLATION_FAULT_L2: + case FSC_TRANSLATION_FAULT_L3: + case FSC_ACCESS_FLAG_FAULT_L1: + case FSC_ACCESS_FLAG_FAULT_L2: + case FSC_ACCESS_FLAG_FAULT_L3: + case FSC_PERMISSION_FAULT_L1: + case FSC_PERMISSION_FAULT_L2: + case FSC_PERMISSION_FAULT_L3: + return TRUE; + default: + return FALSE; + } +} + +static inline int +is_translation_fault(fault_status_t status) +{ + switch (status) { + case FSC_TRANSLATION_FAULT_L0: + case FSC_TRANSLATION_FAULT_L1: + case FSC_TRANSLATION_FAULT_L2: + case FSC_TRANSLATION_FAULT_L3: + return TRUE; + default: + return FALSE; + } +} + +static inline int +is_permission_fault(fault_status_t status) +{ + switch (status) { + case FSC_PERMISSION_FAULT_L1: + case FSC_PERMISSION_FAULT_L2: + case FSC_PERMISSION_FAULT_L3: + return TRUE; + default: + return FALSE; + } +} + +static inline int +is_alignment_fault(fault_status_t status) +{ + return status == FSC_ALIGNMENT_FAULT; +} + +static inline int +is_parity_error(fault_status_t status) +{ + switch (status) { + case FSC_SYNC_PARITY: + case FSC_ASYNC_PARITY: + case FSC_SYNC_PARITY_TT_L1: + case FSC_SYNC_PARITY_TT_L2: + case FSC_SYNC_PARITY_TT_L3: + return TRUE; + default: + return FALSE; + } +} + static inline unsigned __ror(unsigned value, unsigned shift) { @@ -376,8 +460,10 @@ arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far) kernel_integrity_error_handler(esr, far); #endif - if (cdp->platform_error_handler != (platform_error_handler_t) NULL) { - (*(platform_error_handler_t)cdp->platform_error_handler)(cdp->cpu_id, far); + if (PE_handle_platform_error(far)) { + return; + } else if (cdp->platform_error_handler != NULL) { + cdp->platform_error_handler(cdp->cpu_id, far); } else { arm64_implementation_specific_error(state, esr, far); } @@ -391,6 +477,8 @@ panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) ss_valid = is_saved_state64(ss); arm_saved_state64_t *state = saved_state64(ss); + os_atomic_cmpxchg(&original_faulting_state, NULL, state, seq_cst); + panic_plain("%s at pc 0x%016llx, lr 0x%016llx (saved state: %p%s)\n" "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n" "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n" @@ -426,8 +514,8 @@ sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unu if (IS_ARM_GDB_TRAP(instr)) { DebuggerCall(EXC_BREAKPOINT, state); } - // Intentionally fall through to panic if we return from the debugger } + OS_FALLTHROUGH; // panic if we return from the debugger default: panic_with_thread_kernel_state("Synchronous exception taken while SP1 selected", state); } @@ -448,6 +536,38 @@ handle_msr_write_from_xnupost(arm_saved_state_t *state, uint32_t esr) } #endif +__attribute__((noreturn)) +void +thread_exception_return() +{ + thread_t thread = current_thread(); + if (thread->machine.exception_trace_code != 0) { + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END, 0, 0, 0, 0, 0); + thread->machine.exception_trace_code = 0; + } + + arm64_thread_exception_return(); + __builtin_unreachable(); +} + +/* + * check whether task vtimers are running and set thread and CPU BSD AST + * + * must be called with interrupts masked so updates of fields are atomic + * must be emitted inline to avoid generating an FBT probe on the exception path + * + */ +__attribute__((__always_inline__)) +static inline void +task_vtimer_check(thread_t thread) +{ + if (__improbable(thread->task->vtimers)) { + thread->ast |= AST_BSD; + thread->machine.CpuDatap->cpu_pending_ast |= AST_BSD; + } +} + void sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) { @@ -458,15 +578,50 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) #if MACH_ASSERT int preemption_level = get_preemption_level(); #endif + expected_fault_handler_t expected_fault_handler = NULL; +#ifdef CONFIG_XNUPOST + expected_fault_handler_t saved_expected_fault_handler = NULL; + uintptr_t saved_expected_fault_addr = 0; +#endif /* CONFIG_XNUPOST */ ASSERT_CONTEXT_SANITY(context); + task_vtimer_check(thread); + +#if CONFIG_DTRACE + /* + * Handle kernel DTrace probes as early as possible to minimize the likelihood + * that this path will itself trigger a DTrace probe, which would lead to infinite + * probe recursion. + */ + if (__improbable((class == ESR_EC_UNCATEGORIZED) && tempDTraceTrapHook && + (tempDTraceTrapHook(EXC_BAD_INSTRUCTION, state, 0, 0) == KERN_SUCCESS))) { + return; + } +#endif + bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state)); + + /* + * Use KERNEL_DEBUG_CONSTANT_IST here to avoid producing tracepoints + * that would disclose the behavior of PT_DENY_ATTACH processes. + */ + if (is_user) { + thread->machine.exception_trace_code = (uint16_t)(ARM64_KDBG_CODE_USER | class); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_START, + esr, far, get_saved_state_pc(state), 0, 0); + } else { + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_START, + esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0); + } + if (__improbable(ESR_INSTR_IS_2BYTES(esr))) { /* * We no longer support 32-bit, which means no 2-byte * instructions. */ - if (PSR64_IS_USER(get_saved_state_cpsr(state))) { + if (is_user) { panic("Exception on 2-byte instruction, " "context=%p, esr=%#x, far=%p", context, esr, (void *)far); @@ -481,6 +636,20 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) thread->recover = (vm_offset_t)NULL; } +#ifdef CONFIG_XNUPOST + if (thread->machine.expected_fault_handler != NULL) { + saved_expected_fault_handler = thread->machine.expected_fault_handler; + saved_expected_fault_addr = thread->machine.expected_fault_addr; + + thread->machine.expected_fault_handler = NULL; + thread->machine.expected_fault_addr = 0; + + if (saved_expected_fault_addr == far) { + expected_fault_handler = saved_expected_fault_handler; + } + } +#endif /* CONFIG_XNUPOST */ + /* Inherit the interrupt masks from previous context */ if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) { ml_set_interrupts_enabled(TRUE); @@ -488,7 +657,7 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) switch (class) { case ESR_EC_SVC_64: - if (!is_saved_state64(state) || !PSR64_IS_USER(get_saved_state_cpsr(state))) { + if (!is_saved_state64(state) || !is_user) { panic("Invalid SVC_64 context"); } @@ -496,30 +665,24 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) break; case ESR_EC_DABORT_EL0: - handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort); - thread_exception_return(); + handle_abort(state, esr, far, recover, inspect_data_abort, handle_user_abort, expected_fault_handler); + break; case ESR_EC_MSR_TRAP: - handle_msr_trap(state, ESR_ISS(esr)); + handle_msr_trap(state, esr); break; + case ESR_EC_IABORT_EL0: - handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort); - thread_exception_return(); + handle_abort(state, esr, far, recover, inspect_instruction_abort, handle_user_abort, expected_fault_handler); + break; case ESR_EC_IABORT_EL1: -#if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) - { - extern volatile vm_offset_t ctrr_test_va; - if (ctrr_test_va && far == ctrr_test_va) { - extern volatile uint64_t ctrr_exception_esr; - ctrr_exception_esr = esr; - /* return to the instruction immediately after the call to NX page */ - set_saved_state_pc(state, get_saved_state_lr(state)); - break; - } +#ifdef CONFIG_XNUPOST + if ((expected_fault_handler != NULL) && expected_fault_handler(state)) { + break; } -#endif +#endif /* CONFIG_XNUPOST */ panic_with_thread_kernel_state("Kernel instruction fetch abort", state); @@ -528,7 +691,7 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) __builtin_unreachable(); case ESR_EC_DABORT_EL1: - handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort); + handle_abort(state, esr, far, recover, inspect_data_abort, handle_kernel_abort, expected_fault_handler); break; case ESR_EC_UNCATEGORIZED: @@ -552,7 +715,7 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) case ESR_EC_BRK_AARCH64: if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { - panic_with_thread_kernel_state("Break instruction exception from kernel. Panic (by design)", state); + handle_kernel_breakpoint(state, esr); } else { handle_breakpoint(state, esr); } @@ -635,16 +798,32 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) handle_fp_trap(state, esr); __builtin_unreachable(); - default: panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p", state, class, esr, (void *)far); __builtin_unreachable(); } +#ifdef CONFIG_XNUPOST + if (saved_expected_fault_handler != NULL) { + thread->machine.expected_fault_handler = saved_expected_fault_handler; + thread->machine.expected_fault_addr = saved_expected_fault_addr; + } +#endif /* CONFIG_XNUPOST */ + if (recover) { thread->recover = recover; } + if (is_user) { + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, thread->machine.exception_trace_code) | DBG_FUNC_END, + esr, far, get_saved_state_pc(state), 0, 0); + thread->machine.exception_trace_code = 0; + } else { + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_SYNC_ARM, ARM64_KDBG_CODE_KERNEL | class) | DBG_FUNC_END, + esr, VM_KERNEL_ADDRHIDE(far), VM_KERNEL_UNSLIDE(get_saved_state_pc(state)), 0, 0); + } #if MACH_ASSERT if (preemption_level != get_preemption_level()) { panic("synchronous exception changed preemption level from %d to %d", preemption_level, get_preemption_level()); @@ -667,9 +846,6 @@ handle_uncategorized(arm_saved_state_t *state) COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); #if CONFIG_DTRACE - if (tempDTraceTrapHook && (tempDTraceTrapHook(exception, state, 0, 0) == KERN_SUCCESS)) { - return; - } if (PSR64_IS_USER64(get_saved_state_cpsr(state))) { /* @@ -709,7 +885,6 @@ handle_uncategorized(arm_saved_state_t *state) if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { if (IS_ARM_GDB_TRAP(instr)) { boolean_t interrupt_state; - vm_offset_t kstackptr; exception = EXC_BREAKPOINT; interrupt_state = ml_set_interrupts_enabled(FALSE); @@ -717,10 +892,7 @@ handle_uncategorized(arm_saved_state_t *state) /* Save off the context here (so that the debug logic * can see the original state of this thread). */ - kstackptr = (vm_offset_t) current_thread()->machine.kstackptr; - if (kstackptr) { - copy_signed_thread_state(&((thread_kernel_state_t) kstackptr)->machine.ss, state); - } + current_thread()->machine.kpcb = state; /* Hop into the debugger (typically either due to a * fatal exception, an explicit panic, or a stackshot @@ -759,8 +931,52 @@ brk_comment_is_ptrauth(uint16_t comment) return comment >= ptrauth_brk_comment_base && comment <= ptrauth_brk_comment_base + ptrauth_key_asdb; } + +static inline const char * +brk_comment_to_ptrauth_key(uint16_t comment) +{ + switch (comment - ptrauth_brk_comment_base) { + case ptrauth_key_asia: + return "IA"; + case ptrauth_key_asib: + return "IB"; + case ptrauth_key_asda: + return "DA"; + case ptrauth_key_asdb: + return "DB"; + default: + __builtin_unreachable(); + } +} #endif /* __has_feature(ptrauth_calls) */ +static void +handle_kernel_breakpoint(arm_saved_state_t *state, uint32_t esr) +{ + uint16_t comment = ISS_BRK_COMMENT(esr); + +#if __has_feature(ptrauth_calls) + if (brk_comment_is_ptrauth(comment)) { + const char *msg_fmt = "Break 0x%04X instruction exception from kernel. Ptrauth failure with %s key resulted in 0x%016llx"; + char msg[strlen(msg_fmt) + - strlen("0x%04X") + strlen("0xFFFF") + - strlen("%s") + strlen("IA") + - strlen("0x%016llx") + strlen("0xFFFFFFFFFFFFFFFF") + + 1]; + const char *key = brk_comment_to_ptrauth_key(comment); + snprintf(msg, sizeof(msg), msg_fmt, comment, key, saved_state64(state)->x[16]); + + panic_with_thread_kernel_state(msg, state); + } +#endif /* __has_feature(ptrauth_calls) */ + + const char *msg_fmt = "Break 0x%04X instruction exception from kernel. Panic (by design)"; + char msg[strlen(msg_fmt) - strlen("0x%04X") + strlen("0xFFFF") + 1]; + snprintf(msg, sizeof(msg), msg_fmt, comment); + + panic_with_thread_kernel_state(msg, state); +} + static void handle_breakpoint(arm_saved_state_t *state, uint32_t esr __unused) { @@ -768,12 +984,12 @@ handle_breakpoint(arm_saved_state_t *state, uint32_t esr __unused) mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT}; mach_msg_type_number_t numcodes = 2; -#if __has_feature(ptrauth_calls) +#if __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__ if (ESR_EC(esr) == ESR_EC_BRK_AARCH64 && brk_comment_is_ptrauth(ISS_BRK_COMMENT(esr))) { exception |= EXC_PTRAUTH_BIT; } -#endif /* __has_feature(ptrauth_calls) */ +#endif /* __has_feature(ptrauth_calls) && !__ARM_ARCH_8_6__ */ codes[1] = get_saved_state_pc(state); exception_triage(exception, codes, numcodes); @@ -794,13 +1010,13 @@ handle_watchpoint(vm_offset_t fault_addr) static void handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover, - abort_inspector_t inspect_abort, abort_handler_t handler) + abort_inspector_t inspect_abort, abort_handler_t handler, expected_fault_handler_t expected_fault_handler) { fault_status_t fault_code; vm_prot_t fault_type; inspect_abort(ESR_ISS(esr), &fault_code, &fault_type); - handler(state, esr, fault_addr, fault_code, fault_type, recover); + handler(state, esr, fault_addr, fault_code, fault_type, recover, expected_fault_handler); } static void @@ -817,8 +1033,15 @@ inspect_data_abort(uint32_t iss, fault_status_t *fault_code, vm_prot_t *fault_ty getCpuDatap()->cpu_stat.data_ex_cnt++; *fault_code = ISS_DA_FSC(iss); - /* Cache operations report faults as write access. Change these to read access. */ - if ((iss & ISS_DA_WNR) && !(iss & ISS_DA_CM)) { + /* + * Cache maintenance operations always report faults as write access. + * Change these to read access, unless they report a permission fault. + * Only certain cache maintenance operations (e.g. 'dc ivac') require write + * access to the mapping, but if a cache maintenance operation that only requires + * read access generates a permission fault, then we will not be able to handle + * the fault regardless of whether we treat it as a read or write fault. + */ + if ((iss & ISS_DA_WNR) && (!(iss & ISS_DA_CM) || is_permission_fault(*fault_code))) { *fault_type = (VM_PROT_READ | VM_PROT_WRITE); } else { *fault_type = (VM_PROT_READ); @@ -931,8 +1154,18 @@ handle_fp_trap(arm_saved_state_t *state, uint32_t esr) mach_msg_type_number_t numcodes = 2; uint32_t instr = 0; + if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { + panic_with_thread_kernel_state("Floating point exception from kernel", state); + } + + COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); + codes[1] = instr; + /* The floating point trap flags are only valid if TFV is set. */ - if (!(esr & ISS_FP_TFV)) { + if (!fp_exceptions_enabled) { + exc = EXC_BAD_INSTRUCTION; + codes[0] = EXC_ARM_UNDEFINED; + } else if (!(esr & ISS_FP_TFV)) { codes[0] = EXC_ARM_FP_UNDEFINED; } else if (esr & ISS_FP_UFF) { codes[0] = EXC_ARM_FP_UF; @@ -950,14 +1183,39 @@ handle_fp_trap(arm_saved_state_t *state, uint32_t esr) panic("Unrecognized floating point exception, state=%p, esr=%#x", state, esr); } - COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); - codes[1] = instr; - exception_triage(exc, codes, numcodes); __builtin_unreachable(); } + +/* + * handle_alignment_fault_from_user: + * state: Saved state + * + * Attempts to deal with an alignment fault from userspace (possibly by + * emulating the faulting instruction). If emulation failed due to an + * unservicable fault, the ESR for that fault will be stored in the + * recovery_esr field of the thread by the exception code. + * + * Returns: + * -1: Emulation failed (emulation of state/instr not supported) + * 0: Successfully emulated the instruction + * EFAULT: Emulation failed (probably due to permissions) + * EINVAL: Emulation failed (probably due to a bad address) + */ +static int +handle_alignment_fault_from_user(arm_saved_state_t *state, kern_return_t *vmfr) +{ + int ret = -1; + +#pragma unused (state) +#pragma unused (vmfr) + + return ret; +} + + static void handle_sw_step_debug(arm_saved_state_t *state) { @@ -988,76 +1246,6 @@ handle_sw_step_debug(arm_saved_state_t *state) __builtin_unreachable(); } -static int -is_vm_fault(fault_status_t status) -{ - switch (status) { - case FSC_TRANSLATION_FAULT_L0: - case FSC_TRANSLATION_FAULT_L1: - case FSC_TRANSLATION_FAULT_L2: - case FSC_TRANSLATION_FAULT_L3: - case FSC_ACCESS_FLAG_FAULT_L1: - case FSC_ACCESS_FLAG_FAULT_L2: - case FSC_ACCESS_FLAG_FAULT_L3: - case FSC_PERMISSION_FAULT_L1: - case FSC_PERMISSION_FAULT_L2: - case FSC_PERMISSION_FAULT_L3: - return TRUE; - default: - return FALSE; - } -} - -static int -is_translation_fault(fault_status_t status) -{ - switch (status) { - case FSC_TRANSLATION_FAULT_L0: - case FSC_TRANSLATION_FAULT_L1: - case FSC_TRANSLATION_FAULT_L2: - case FSC_TRANSLATION_FAULT_L3: - return TRUE; - default: - return FALSE; - } -} - -#if __ARM_PAN_AVAILABLE__ || defined(KERNEL_INTEGRITY_CTRR) -static int -is_permission_fault(fault_status_t status) -{ - switch (status) { - case FSC_PERMISSION_FAULT_L1: - case FSC_PERMISSION_FAULT_L2: - case FSC_PERMISSION_FAULT_L3: - return TRUE; - default: - return FALSE; - } -} -#endif - -static int -is_alignment_fault(fault_status_t status) -{ - return status == FSC_ALIGNMENT_FAULT; -} - -static int -is_parity_error(fault_status_t status) -{ - switch (status) { - case FSC_SYNC_PARITY: - case FSC_ASYNC_PARITY: - case FSC_SYNC_PARITY_TT_L1: - case FSC_SYNC_PARITY_TT_L2: - case FSC_SYNC_PARITY_TT_L3: - return TRUE; - default: - return FALSE; - } -} - static void set_saved_state_pc_to_recovery_handler(arm_saved_state_t *iss, vm_offset_t recover) { @@ -1094,7 +1282,7 @@ set_saved_state_pc_to_recovery_handler(arm_saved_state_t *iss, vm_offset_t recov static void handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, - fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) + fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler) { exception_type_t exc = EXC_BAD_ACCESS; mach_exception_data_type_t codes[2]; @@ -1102,7 +1290,7 @@ handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr thread_t thread = current_thread(); (void)esr; - (void)state; + (void)expected_fault_handler; if (ml_at_interrupt_context()) { panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state); @@ -1117,7 +1305,7 @@ handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr assert(map != kernel_map); - if (!(fault_type & VM_PROT_EXECUTE) && user_tbi_enabled()) { + if (!(fault_type & VM_PROT_EXECUTE)) { vm_fault_addr = tbi_clear(fault_addr); } @@ -1125,14 +1313,14 @@ handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr if (thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */ if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */ if (recover) { + thread->machine.recover_esr = esr; + thread->machine.recover_far = vm_fault_addr; set_saved_state_pc_to_recovery_handler(state, recover); } else { - ml_set_interrupts_enabled(FALSE); panic_with_thread_kernel_state("copyin/out has no recovery point", state); } return; } else { - ml_set_interrupts_enabled(FALSE); panic_with_thread_kernel_state("Unexpected UMW page fault under dtrace_probe", state); } } @@ -1153,7 +1341,9 @@ handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr /* check to see if it is just a pmap ref/modify fault */ if ((result != KERN_SUCCESS) && !is_translation_fault(fault_code)) { - result = arm_fast_fault(map->pmap, trunc_page(vm_fault_addr), fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE); + result = arm_fast_fault(map->pmap, + vm_fault_addr, + fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), TRUE); } if (result != KERN_SUCCESS) { { @@ -1177,7 +1367,50 @@ handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr codes[0] = result; } else if (is_alignment_fault(fault_code)) { - codes[0] = EXC_ARM_DA_ALIGN; + kern_return_t vmfkr = KERN_SUCCESS; + thread->machine.recover_esr = 0; + thread->machine.recover_far = 0; + int result = handle_alignment_fault_from_user(state, &vmfkr); + if (result == 0) { + /* Successfully emulated, or instruction + * copyin() for decode/emulation failed. + * Continue, or redrive instruction. + */ + thread_exception_return(); + } else if (((result == EFAULT) || (result == EINVAL)) && + (thread->machine.recover_esr == 0)) { + /* + * If we didn't actually take a fault, but got one of + * these errors, then we failed basic sanity checks of + * the fault address. Treat this as an invalid + * address. + */ + codes[0] = KERN_INVALID_ADDRESS; + } else if ((result == EFAULT) && + (thread->machine.recover_esr)) { + /* + * Since alignment aborts are prioritized + * ahead of translation aborts, the misaligned + * atomic emulation flow may have triggered a + * VM pagefault, which the VM could not resolve. + * Report the VM fault error in codes[] + */ + + codes[0] = vmfkr; + assertf(vmfkr != KERN_SUCCESS, "Unexpected vmfkr 0x%x", vmfkr); + /* Cause ESR_EC to reflect an EL0 abort */ + thread->machine.recover_esr &= ~ESR_EC_MASK; + thread->machine.recover_esr |= (ESR_EC_DABORT_EL0 << ESR_EC_SHIFT); + set_saved_state_esr(thread->machine.upcb, thread->machine.recover_esr); + set_saved_state_far(thread->machine.upcb, thread->machine.recover_far); + fault_addr = thread->machine.recover_far; + } else { + /* This was just an unsupported alignment + * exception. Misaligned atomic emulation + * timeouts fall in this category. + */ + codes[0] = EXC_ARM_DA_ALIGN; + } } else if (is_parity_error(fault_code)) { #if defined(APPLE_ARM64_ARCH_FAMILY) if (fault_code == FSC_SYNC_PARITY) { @@ -1232,11 +1465,15 @@ is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fau static void handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, - fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) + fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover, expected_fault_handler_t expected_fault_handler) { thread_t thread = current_thread(); (void)esr; +#ifndef CONFIG_XNUPOST + (void)expected_fault_handler; +#endif /* CONFIG_XNUPOST */ + #if CONFIG_DTRACE if (is_vm_fault(fault_code) && thread->t_dtrace_inprobe) { /* Executing under dtrace_probe? */ if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */ @@ -1244,13 +1481,14 @@ handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_ad * Point to next instruction, or recovery handler if set. */ if (recover) { + thread->machine.recover_esr = esr; + thread->machine.recover_far = fault_addr; set_saved_state_pc_to_recovery_handler(state, recover); } else { add_saved_state_pc(state, 4); } return; } else { - ml_set_interrupts_enabled(FALSE); panic_with_thread_kernel_state("Unexpected page fault under dtrace_probe", state); } } @@ -1273,29 +1511,11 @@ handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_ad * when running with KTRR. */ -#if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST) - extern volatile vm_offset_t ctrr_test_va; - if (ctrr_test_va && fault_addr == ctrr_test_va && is_permission_fault(fault_code)) { - extern volatile uint64_t ctrr_exception_esr; - ctrr_exception_esr = esr; - add_saved_state_pc(state, 4); - return; - } -#endif - -#if __ARM_PAN_AVAILABLE__ && defined(CONFIG_XNUPOST) - if (is_permission_fault(fault_code) && !(get_saved_state_cpsr(state) & PSR64_PAN) && - (pan_ro_addr != 0) && (fault_addr == pan_ro_addr)) { - ++pan_exception_level; - // On an exception taken from a PAN-disabled context, verify - // that PAN is re-enabled for the exception handler and that - // accessing the test address produces a PAN fault. - pan_fault_value = *(char *)pan_test_addr; - __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context - add_saved_state_pc(state, 4); +#ifdef CONFIG_XNUPOST + if (expected_fault_handler && expected_fault_handler(state)) { return; } -#endif +#endif /* CONFIG_XNUPOST */ if (fault_addr >= gVirtBase && fault_addr < static_memory_end) { panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state); @@ -1325,7 +1545,9 @@ handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_ad /* check to see if it is just a pmap ref/modify fault */ if (!is_translation_fault(fault_code)) { - result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE); + result = arm_fast_fault(map->pmap, + fault_addr, + fault_type, (fault_code == FSC_ACCESS_FLAG_FAULT_L3), FALSE); if (result == KERN_SUCCESS) { return; } @@ -1348,28 +1570,14 @@ handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_ad * If we have a recover handler, invoke it now. */ if (recover) { + thread->machine.recover_esr = esr; + thread->machine.recover_far = fault_addr; set_saved_state_pc_to_recovery_handler(state, recover); return; } #if __ARM_PAN_AVAILABLE__ if (is_pan_fault(state, esr, fault_addr, fault_code)) { -#ifdef CONFIG_XNUPOST - if ((pan_test_addr != 0) && (fault_addr == pan_test_addr)) { - ++pan_exception_level; - // read the user-accessible value to make sure - // pan is enabled and produces a 2nd fault from - // the exception handler - if (pan_exception_level == 1) { - pan_fault_value = *(char *)pan_test_addr; - __builtin_arm_wsr("pan", 1); // turn PAN back on after the nested exception cleared it for this context - } - // this fault address is used for PAN test - // disable PAN and rerun - mask_saved_state_cpsr(state, 0, PSR64_PAN); - return; - } -#endif panic_with_thread_kernel_state("Privileged access never abort.", state); } #endif @@ -1380,6 +1588,8 @@ handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_ad #endif } else if (is_alignment_fault(fault_code)) { if (recover) { + thread->machine.recover_esr = esr; + thread->machine.recover_far = fault_addr; set_saved_state_pc_to_recovery_handler(state, recover); return; } @@ -1426,10 +1636,10 @@ handle_svc(arm_saved_state_t *state) mach_kauth_cred_uthread_update(); if (trap_no < 0) { - if (trap_no == -3) { + if (trap_no == MACH_ARM_TRAP_ABSTIME) { handle_mach_absolute_time_trap(state); return; - } else if (trap_no == -4) { + } else if (trap_no == MACH_ARM_TRAP_CONTTIME) { handle_mach_continuous_time_trap(state); return; } @@ -1462,30 +1672,31 @@ handle_mach_continuous_time_trap(arm_saved_state_t *state) saved_state64(state)->x[0] = now; } +__attribute__((noreturn)) static void -handle_msr_trap(arm_saved_state_t *state, uint32_t iss) +handle_msr_trap(arm_saved_state_t *state, uint32_t esr) { exception_type_t exception = EXC_BAD_INSTRUCTION; mach_exception_data_type_t codes[2] = {EXC_ARM_UNDEFINED}; mach_msg_type_number_t numcodes = 2; uint32_t instr = 0; - (void)iss; - if (!is_saved_state64(state)) { - panic("MSR/MRS trap (EC 0x%x) from 32-bit state\n", ESR_EC_MSR_TRAP); + panic("MSR/MRS trap (ESR 0x%x) from 32-bit state\n", esr); } if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { - panic("MSR/MRS trap (EC 0x%x) from kernel\n", ESR_EC_MSR_TRAP); + panic("MSR/MRS trap (ESR 0x%x) from kernel\n", esr); } COPYIN(get_saved_state_pc(state), (char *)&instr, sizeof(instr)); codes[1] = instr; exception_triage(exception, codes, numcodes); + __builtin_unreachable(); } + static void handle_user_trapped_instruction32(arm_saved_state_t *state, uint32_t esr) { @@ -1536,7 +1747,7 @@ sleh_irq(arm_saved_state_t *state) uint32_t old_entropy_sample_count = 0; size_t entropy_index = 0; uint32_t * entropy_data_ptr = NULL; - cpu_data_t * cdp = getCpuDatap(); + cpu_data_t * cdp __unused = getCpuDatap(); #if MACH_ASSERT int preemption_level = get_preemption_level(); #endif @@ -1544,11 +1755,15 @@ sleh_irq(arm_saved_state_t *state) sleh_interrupt_handler_prologue(state, DBG_INTR_TYPE_OTHER); +#if USE_APPLEARMSMP + PE_handle_ext_interrupt(); +#else /* Run the registered interrupt handler. */ cdp->interrupt_handler(cdp->interrupt_target, cdp->interrupt_refCon, cdp->interrupt_nub, cdp->interrupt_source); +#endif /* We use interrupt timing as an entropy source. */ timestamp = ml_get_timebase(); @@ -1563,12 +1778,12 @@ sleh_irq(arm_saved_state_t *state) old_entropy_sample_count = EntropyData.sample_count; EntropyData.sample_count += 1; - entropy_index = old_entropy_sample_count & ENTROPY_BUFFER_INDEX_MASK; + entropy_index = old_entropy_sample_count & EntropyData.buffer_index_mask; entropy_data_ptr = EntropyData.buffer + entropy_index; /* Mix the timestamp data and the old data together. */ old_entropy_data = *entropy_data_ptr; - *entropy_data_ptr = (uint32_t)timestamp ^ __ror(old_entropy_data, 9); + *entropy_data_ptr = (uint32_t)timestamp ^ (__ror(old_entropy_data, 9) & EntropyData.ror_mask); sleh_interrupt_handler_epilogue(); #if MACH_ASSERT @@ -1634,7 +1849,9 @@ sleh_fiq(arm_saved_state_t *state) #endif /* defined(HAS_IPI) */ #if MONOTONIC_FIQ if (type == DBG_INTR_TYPE_PMI) { + INTERRUPT_MASKED_DEBUG_START(mt_fiq, DBG_INTR_TYPE_PMI); mt_fiq(getCpuDatap(), pmcr0, upmsr); + INTERRUPT_MASKED_DEBUG_END(); } else #endif /* MONOTONIC_FIQ */ { @@ -1652,7 +1869,9 @@ sleh_fiq(arm_saved_state_t *state) * We can easily thread it through, but not bothering for the * moment (AArch32 doesn't either). */ + INTERRUPT_MASKED_DEBUG_START(rtclock_intr, DBG_INTR_TYPE_TIMER); rtclock_intr(TRUE); + INTERRUPT_MASKED_DEBUG_END(); } sleh_interrupt_handler_epilogue(); @@ -1666,6 +1885,10 @@ sleh_fiq(arm_saved_state_t *state) void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far) { + task_vtimer_check(current_thread()); + + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_START, + esr, VM_KERNEL_ADDRHIDE(far)); arm_saved_state_t *state = &context->ss; #if MACH_ASSERT int preemption_level = get_preemption_level(); @@ -1678,6 +1901,8 @@ sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far) panic("serror changed preemption level from %d to %d", preemption_level, get_preemption_level()); } #endif + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_SERR_ARM, 0) | DBG_FUNC_END, + esr, VM_KERNEL_ADDRHIDE(far)); } void @@ -1726,7 +1951,9 @@ syscall_trace( static void sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type) { - uint64_t is_user = PSR64_IS_USER(get_saved_state_cpsr(state)); + bool is_user = PSR64_IS_USER(get_saved_state_cpsr(state)); + + task_vtimer_check(current_thread()); uint64_t pc = is_user ? get_saved_state_pc(state) : VM_KERNEL_UNSLIDE(get_saved_state_pc(state)); @@ -1765,3 +1992,4 @@ sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t fa panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss); } + diff --git a/osfmk/arm64/start.s b/osfmk/arm64/start.s index 00cba8194..1f239f407 100644 --- a/osfmk/arm64/start.s +++ b/osfmk/arm64/start.s @@ -32,6 +32,7 @@ #include #include #include "assym.s" +#include #include #if __ARM_KERNEL_PROTECT__ @@ -99,7 +100,7 @@ .endmacro .macro MSR_SCTLR_EL1_X0 -#if defined(KERNEL_INTEGRITY_KTRR) +#if defined(KERNEL_INTEGRITY_KTRR) mov x1, lr // This may abort, do so on SP1 @@ -192,29 +193,33 @@ LEXT(reset_vector) * If either values are zero, we're debugging kernel so skip programming KTRR. */ - /* spin until bootstrap core has completed machine lockdown */ + /* refuse to boot if machine_lockdown() hasn't completed */ adrp x17, EXT(lockdown_done)@page -1: - ldr w18, [x17, EXT(lockdown_done)@pageoff] - cbz w18, 1b + ldr w17, [x17, EXT(lockdown_done)@pageoff] + cbz w17, . // load stashed rorgn_begin - adrp x17, EXT(rorgn_begin)@page - add x17, x17, EXT(rorgn_begin)@pageoff + adrp x17, EXT(ctrr_begin)@page + add x17, x17, EXT(ctrr_begin)@pageoff ldr x17, [x17] +#if DEBUG || DEVELOPMENT || CONFIG_DTRACE // if rorgn_begin is zero, we're debugging. skip enabling ktrr cbz x17, Lskip_ktrr +#else + cbz x17, . +#endif // load stashed rorgn_end - adrp x19, EXT(rorgn_end)@page - add x19, x19, EXT(rorgn_end)@pageoff + adrp x19, EXT(ctrr_end)@page + add x19, x19, EXT(ctrr_end)@pageoff ldr x19, [x19] +#if DEBUG || DEVELOPMENT || CONFIG_DTRACE cbz x19, Lskip_ktrr +#else + cbz x19, . +#endif - // program and lock down KTRR - // subtract one page from rorgn_end to make pinst insns NX msr ARM64_REG_KTRR_LOWER_EL1, x17 - sub x19, x19, #(1 << (ARM_PTE_SHIFT-12)), lsl #12 msr ARM64_REG_KTRR_UPPER_EL1, x19 mov x17, #1 msr ARM64_REG_KTRR_LOCK_EL1, x17 @@ -231,7 +236,7 @@ Lskip_ktrr: and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0 #endif ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries - add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS) + add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS) Lcheck_cpu_data_entry: ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address cbz x21, Lnext_cpu_data_entry @@ -251,29 +256,34 @@ Lfound_cpu_data_entry: * A_PXN and A_MMUON_WRPROTECT options provides something close to KTRR behavior */ - /* spin until bootstrap core has completed machine lockdown */ + /* refuse to boot if machine_lockdown() hasn't completed */ adrp x17, EXT(lockdown_done)@page -1: - ldr w18, [x17, EXT(lockdown_done)@pageoff] - cbz w18, 1b + ldr w17, [x17, EXT(lockdown_done)@pageoff] + cbz w17, . // load stashed rorgn_begin - adrp x17, EXT(rorgn_begin)@page - add x17, x17, EXT(rorgn_begin)@pageoff + adrp x17, EXT(ctrr_begin)@page + add x17, x17, EXT(ctrr_begin)@pageoff ldr x17, [x17] +#if DEBUG || DEVELOPMENT || CONFIG_DTRACE // if rorgn_begin is zero, we're debugging. skip enabling ctrr cbz x17, Lskip_ctrr +#else + cbz x17, . +#endif // load stashed rorgn_end - adrp x19, EXT(rorgn_end)@page - add x19, x19, EXT(rorgn_end)@pageoff + adrp x19, EXT(ctrr_end)@page + add x19, x19, EXT(ctrr_end)@pageoff ldr x19, [x19] +#if DEBUG || DEVELOPMENT || CONFIG_DTRACE cbz x19, Lskip_ctrr +#else + cbz x19, . +#endif mrs x18, ARM64_REG_CTRR_LOCK_EL1 cbnz x18, Lskip_ctrr /* don't touch if already locked */ - ldr w18, [x21, CLUSTER_MASTER] /* cluster master is unsigned int (32bit) */ - cbz w18, Lspin_ctrr_unlocked /* non-cluster master spins if CTRR unlocked (unexpected) */ msr ARM64_REG_CTRR_A_LWR_EL1, x17 msr ARM64_REG_CTRR_A_UPR_EL1, x19 mov x18, #(CTRR_CTL_EL1_A_PXN | CTRR_CTL_EL1_A_MMUON_WRPROTECT) @@ -311,10 +321,6 @@ Lskip_ctrr: bne Lskip_cpu_reset_handler 1: -#if HAS_NEX_PG - bl EXT(set_nex_pg) -#endif - #if HAS_BP_RET bl EXT(set_bp_ret) #endif @@ -447,7 +453,7 @@ start_cpu: ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size - ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data + adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags @@ -629,7 +635,7 @@ LEXT(start_first_cpu) ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size - ldr x25, [x20, BA_TOP_OF_KERNEL_DATA] // Get the top of the kernel data + adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags // Clear the register that will be used to store the userspace thread pointer and CPU number. @@ -678,7 +684,7 @@ LEXT(start_first_cpu) // Invalidate all entries in the bootstrap page tables mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template - mov x1, x25 // Start at top of kernel + mov x1, x25 // Start at V=P pagetable root mov x2, #(TTE_PGENTRIES) // Load number of entries per page lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages @@ -696,16 +702,26 @@ Linvalidate_bootstrap: // do { * If the base address belongs to TZ0, it may be dangerous for xnu to map * it (as it may be prefetched, despite being technically inaccessible). * In order to avoid this issue while keeping the mapping code simple, we - * may continue to use block mappings, but we will only map xnu's mach - * header to the end of memory. + * may continue to use block mappings, but we will only map the kernelcache + * mach header to the end of memory. * * Given that iBoot guarantees that the unslid kernelcache base address * will begin on an L2 boundary, this should prevent us from accidentally * mapping TZ0. */ - adrp x0, EXT(_mh_execute_header)@page // Use xnu's mach header as the start address - add x0, x0, EXT(_mh_execute_header)@pageoff + adrp x0, EXT(_mh_execute_header)@page // address of kernel mach header + add x0, x0, EXT(_mh_execute_header)@pageoff + ldr w1, [x0, #0x18] // load mach_header->flags + tbz w1, #0x1f, Lkernelcache_base_found // if MH_DYLIB_IN_CACHE unset, base is kernel mach header + ldr w1, [x0, #0x20] // load first segment cmd (offset sizeof(kernel_mach_header_t)) + cmp w1, #0x19 // must be LC_SEGMENT_64 + bne . + ldr x1, [x0, #0x38] // load first segment vmaddr + sub x1, x0, x1 // compute slide + MOV64 x0, VM_KERNEL_LINK_ADDRESS + add x0, x0, x1 // base is kernel link address + slide +Lkernelcache_base_found: /* * Adjust physical and virtual base addresses to account for physical * memory preceeding xnu Mach-O header @@ -768,7 +784,7 @@ Linvalidate_bootstrap: // do { * x21 - zero on cold boot, PA of cpu data on warm reset * x22 - Kernel virtual base * x23 - Kernel physical base - * x25 - PA of the end of the kernel + * x25 - PA of the V=P pagetable root * lr - KVA of C init routine * sp - SP_EL0 selected * @@ -777,6 +793,13 @@ Linvalidate_bootstrap: // do { * TPIDRRO_EL0 - CPU number */ common_start: + +#if HAS_NEX_PG + mov x19, lr + bl EXT(set_nex_pg) + mov lr, x19 +#endif + // Set the translation control register. adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure add x0, x0, EXT(sysreg_restore)@pageoff @@ -819,16 +842,14 @@ common_start: mov x1, #(MAIR_POSTED_COMBINED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED)) orr x0, x0, x1 msr MAIR_EL1, x0 + isb + tlbi vmalle1 + dsb ish #if defined(APPLEHURRICANE) - // Increase Snoop reservation in EDB to reduce starvation risk // Needs to be done before MMU is enabled - mrs x12, ARM64_REG_HID5 - and x12, x12, (~ARM64_REG_HID5_CrdEdbSnpRsvd_mask) - orr x12, x12, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE - msr ARM64_REG_HID5, x12 - + HID_INSERT_BITS ARM64_REG_HID5, ARM64_REG_HID5_CrdEdbSnpRsvd_mask, ARM64_REG_HID5_CrdEdbSnpRsvd_VALUE, x12 #endif #if defined(BCM2837) @@ -844,8 +865,6 @@ common_start: isb sy #endif - - #ifndef __ARM_IC_NOALIAS_ICACHE__ /* Invalidate the TLB and icache on systems that do not guarantee that the * caches are invalidated on reset. @@ -891,9 +910,16 @@ common_start: cbz x1, 1b // Poll APSTS_EL1.MKEYVld mrs x0, ARM64_REG_APCTL_EL1 orr x0, x0, #(APCTL_EL1_AppleMode) +#ifdef HAS_APCTL_EL1_USERKEYEN + orr x0, x0, #(APCTL_EL1_UserKeyEn) + and x0, x0, #~(APCTL_EL1_KernKeyEn) +#else /* !HAS_APCTL_EL1_USERKEYEN */ orr x0, x0, #(APCTL_EL1_KernKeyEn) +#endif /* HAS_APCTL_EL1_USERKEYEN */ and x0, x0, #~(APCTL_EL1_EnAPKey0) msr ARM64_REG_APCTL_EL1, x0 + + #else mrs x0, ARM64_REG_APCTL_EL1 and x1, x0, #(APCTL_EL1_MKEYVld) @@ -935,51 +961,30 @@ common_start: msr APGAKeyHi_EL1, x0 // Enable caches, MMU, ROP and JOP - mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF) - mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000) - orr x0, x0, x1 + MOV64 x0, SCTLR_EL1_DEFAULT orr x0, x0, #(SCTLR_PACIB_ENABLED) /* IB is ROP */ -#if DEBUG || DEVELOPMENT - and x2, x26, BA_BOOT_FLAGS_DISABLE_JOP #if __APCFG_SUPPORTED__ - // for APCFG systems, JOP keys are always on for EL1 unless ELXENKEY is cleared. + // for APCFG systems, JOP keys are always on for EL1. // JOP keys for EL0 will be toggled on the first time we pmap_switch to a pmap that has JOP enabled - cbz x2, Lenable_mmu - mrs x3, APCFG_EL1 - and x3, x3, #~(APCFG_EL1_ELXENKEY) - msr APCFG_EL1, x3 #else /* __APCFG_SUPPORTED__ */ - cbnz x2, Lenable_mmu -#endif /* __APCFG_SUPPORTED__ */ -#endif /* DEBUG || DEVELOPMENT */ - -#if !__APCFG_SUPPORTED__ MOV64 x1, SCTLR_JOP_KEYS_ENABLED orr x0, x0, x1 #endif /* !__APCFG_SUPPORTED__ */ -Lenable_mmu: #else /* HAS_APPLE_PAC */ // Enable caches and MMU - mov x0, #(SCTLR_EL1_DEFAULT & 0xFFFF) - mov x1, #(SCTLR_EL1_DEFAULT & 0xFFFF0000) - orr x0, x0, x1 + MOV64 x0, SCTLR_EL1_DEFAULT #endif /* HAS_APPLE_PAC */ MSR_SCTLR_EL1_X0 isb sy - MOV32 x1, SCTLR_EL1_DEFAULT + MOV64 x1, SCTLR_EL1_DEFAULT #if HAS_APPLE_PAC orr x1, x1, #(SCTLR_PACIB_ENABLED) #if !__APCFG_SUPPORTED__ MOV64 x2, SCTLR_JOP_KEYS_ENABLED -#if (DEBUG || DEVELOPMENT) - // Ignore the JOP bits, since we can't predict at compile time whether BA_BOOT_FLAGS_DISABLE_JOP is set - bic x0, x0, x2 -#else orr x1, x1, x2 -#endif /* (DEBUG || DEVELOPMENT) */ #endif /* !__APCFG_SUPPORTED__ */ #endif /* HAS_APPLE_PAC */ cmp x0, x1 @@ -998,8 +1003,8 @@ Lenable_mmu: #endif // Clear thread pointer - mov x0, #0 - msr TPIDR_EL1, x0 // Set thread register + msr TPIDR_EL1, xzr // Set thread register + #if defined(APPLE_ARM64_ARCH_FAMILY) // Initialization common to all Apple targets @@ -1010,383 +1015,29 @@ Lenable_mmu: ARM64_WRITE_EP_SPR x15, x12, ARM64_REG_EHID4, ARM64_REG_HID4 #endif // APPLE_ARM64_ARCH_FAMILY -#if defined(APPLETYPHOON) - // - // Typhoon-Specific initialization - // For tunable summary, see - // - - // - // Disable LSP flush with context switch to work around bug in LSP - // that can cause Typhoon to wedge when CONTEXTIDR is written. - // - // - - mrs x12, ARM64_REG_HID0 - orr x12, x12, ARM64_REG_HID0_LoopBuffDisb - msr ARM64_REG_HID0, x12 - - mrs x12, ARM64_REG_HID1 - orr x12, x12, ARM64_REG_HID1_rccDisStallInactiveIexCtl - msr ARM64_REG_HID1, x12 - - mrs x12, ARM64_REG_HID3 - orr x12, x12, ARM64_REG_HID3_DisXmonSnpEvictTriggerL2StarvationMode - msr ARM64_REG_HID3, x12 - - mrs x12, ARM64_REG_HID5 - and x12, x12, (~ARM64_REG_HID5_DisHwpLd) - and x12, x12, (~ARM64_REG_HID5_DisHwpSt) - msr ARM64_REG_HID5, x12 - - // Change the default memcache data set ID from 0 to 15 for all agents - mrs x12, ARM64_REG_HID8 - orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE) -#if ARM64_BOARD_CONFIG_T7001 - orr x12, x12, ARM64_REG_HID8_DataSetID2_VALUE -#endif // ARM64_BOARD_CONFIG_T7001 - msr ARM64_REG_HID8, x12 - isb sy -#endif // APPLETYPHOON - -#if defined(APPLETWISTER) - - // rdar://problem/36112905: Set CYC_CFG:skipInit to pull in isAlive by one DCLK - // to work around potential hang. Must only be applied to Maui C0. - mrs x12, MIDR_EL1 - ubfx x13, x12, #MIDR_EL1_PNUM_SHIFT, #12 - cmp x13, #4 // Part number 4 => Maui, 5 => Malta/Elba - bne Lskip_isalive - ubfx x13, x12, #MIDR_EL1_VAR_SHIFT, #4 - cmp x13, #2 // variant 2 => Maui C0 - b.lt Lskip_isalive - - mrs x12, ARM64_REG_CYC_CFG - orr x12, x12, ARM64_REG_CYC_CFG_skipInit - msr ARM64_REG_CYC_CFG, x12 - -Lskip_isalive: - - mrs x12, ARM64_REG_HID11 - and x12, x12, (~ARM64_REG_HID11_DisFillC1BubOpt) - msr ARM64_REG_HID11, x12 - - // Change the default memcache data set ID from 0 to 15 for all agents - mrs x12, ARM64_REG_HID8 - orr x12, x12, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE) - orr x12, x12, (ARM64_REG_HID8_DataSetID2_VALUE | ARM64_REG_HID8_DataSetID3_VALUE) - msr ARM64_REG_HID8, x12 - - // Use 4-cycle MUL latency to avoid denormal stalls - mrs x12, ARM64_REG_HID7 - orr x12, x12, #ARM64_REG_HID7_disNexFastFmul - msr ARM64_REG_HID7, x12 - - // disable reporting of TLB-multi-hit-error - // - mrs x12, ARM64_REG_LSU_ERR_STS - and x12, x12, (~ARM64_REG_LSU_ERR_STS_L1DTlbMultiHitEN) - msr ARM64_REG_LSU_ERR_STS, x12 - - isb sy -#endif // APPLETWISTER - -#if defined(APPLEHURRICANE) - - // IC prefetch configuration - // - mrs x12, ARM64_REG_HID0 - and x12, x12, (~ARM64_REG_HID0_ICPrefDepth_bmsk) - orr x12, x12, (1 << ARM64_REG_HID0_ICPrefDepth_bshift) - orr x12, x12, ARM64_REG_HID0_ICPrefLimitOneBrn - msr ARM64_REG_HID0, x12 - - // disable reporting of TLB-multi-hit-error - // - mrs x12, ARM64_REG_LSU_ERR_CTL - and x12, x12, (~ARM64_REG_LSU_ERR_CTL_L1DTlbMultiHitEN) - msr ARM64_REG_LSU_ERR_CTL, x12 - - // disable crypto fusion across decode groups - // - mrs x12, ARM64_REG_HID1 - orr x12, x12, ARM64_REG_HID1_disAESFuseAcrossGrp - msr ARM64_REG_HID1, x12 - -#if defined(ARM64_BOARD_CONFIG_T8011) - // Clear DisDcZvaCmdOnly - // Per Myst A0/B0 tunables document - // Myst: Confirm ACC Per-CPU Tunables - mrs x12, ARM64_REG_HID3 - and x12, x12, ~ARM64_REG_HID3_DisDcZvaCmdOnly - msr ARM64_REG_HID3, x12 - - mrs x12, ARM64_REG_EHID3 - and x12, x12, ~ARM64_REG_EHID3_DisDcZvaCmdOnly - msr ARM64_REG_EHID3, x12 -#endif /* defined(ARM64_BOARD_CONFIG_T8011) */ - -#endif // APPLEHURRICANE - -#if defined(APPLEMONSOON) - - /***** Tunables that apply to all skye cores, all chip revs *****/ - - // SW WAR/eval: WKdm write ack lost when bif_wke_colorWrAck_XXaH asserts concurrently for both colors - mrs x12, ARM64_REG_HID8 - orr x12, x12, #ARM64_REG_HID8_WkeForceStrictOrder - msr ARM64_REG_HID8, x12 - - // Skip if not E-core - ARM64_IS_PCORE x15 - cbnz x15, Lskip_skye_ecore_only - - /***** Tunables that only apply to skye e-cores, all chip revs *****/ - - // : Atomic launch eligibility is erroneously taken away when a store at SMB gets invalidated - mrs x12, ARM64_REG_EHID11 - and x12, x12, ~(ARM64_REG_EHID11_SmbDrainThresh_mask) - msr ARM64_REG_EHID11, x12 - -Lskip_skye_ecore_only: - - SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL x12, MONSOON_CPU_VERSION_B0, Lskip_skye_a0_workarounds - - // Skip if not E-core - cbnz x15, Lskip_skye_a0_ecore_only - - /***** Tunables that only apply to skye e-cores, chip revs < B0 *****/ - - // Disable downstream fill bypass logic - // [Tunable] Skye - L2E fill bypass collision from both pipes to ecore - mrs x12, ARM64_REG_EHID5 - orr x12, x12, ARM64_REG_EHID5_DisFillByp - msr ARM64_REG_EHID5, x12 - - // Disable forwarding of return addresses to the NFP - // Skye: FED incorrectly taking illegal va exception - mrs x12, ARM64_REG_EHID0 - orr x12, x12, ARM64_REG_EHID0_nfpRetFwdDisb - msr ARM64_REG_EHID0, x12 - -Lskip_skye_a0_ecore_only: - - /***** Tunables that apply to all skye cores, chip revs < B0 *****/ - - // Disable clock divider gating - // [Tunable/Errata][cpu_1p_1e] [CPGV2] ACC power down issue when link FSM switches from GO_DN to CANCEL and at the same time upStreamDrain request is set. - mrs x12, ARM64_REG_HID6 - orr x12, x12, ARM64_REG_HID6_DisClkDivGating - msr ARM64_REG_HID6, x12 - - // Disable clock dithering - // [Tunable] Skye A0: Linux: LLC PIO Errors - mrs x12, ARM64_REG_ACC_OVRD - orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr - msr ARM64_REG_ACC_OVRD, x12 - - mrs x12, ARM64_REG_ACC_EBLK_OVRD - orr x12, x12, ARM64_REG_ACC_OVRD_dsblClkDtr - msr ARM64_REG_ACC_EBLK_OVRD, x12 - -Lskip_skye_a0_workarounds: - - SKIP_IF_CPU_VERSION_LESS_THAN x12, MONSOON_CPU_VERSION_B0, Lskip_skye_post_a1_workarounds - - /***** Tunables that apply to all skye cores, chip revs >= B0 *****/ - - // : Disable refcount syncing between E and P - mrs x12, ARM64_REG_CYC_OVRD - and x12, x12, ~ARM64_REG_CYC_OVRD_dsblSnoopTime_mask - orr x12, x12, ARM64_REG_CYC_OVRD_dsblSnoopPTime - msr ARM64_REG_CYC_OVRD, x12 - -Lskip_skye_post_a1_workarounds: - -#endif /* defined(APPLEMONSOON) */ - -#if defined(APPLEVORTEX) - - ARM64_IS_PCORE x15 - - // Skip if not P-core - cbz x15, Lskip_cyprus_pcore_only - - mrs x12, ARM64_REG_HID1 - - mrs x13, MIDR_EL1 - ubfx x14, x13, #MIDR_EL1_PNUM_SHIFT, #12 - // Should be applied to all Aruba variants, but only Cyprus variants B0 and later - cmp x14, #0xb // Part number 11 => Cyprus, 16 => Aruba - bne Lbr_kill - ubfx x14, x13, #MIDR_EL1_VAR_SHIFT, #4 - cbz x14, Lskip_br_kill // variant 0 => Cyprus AX, 1 => Cyprus BX - -Lbr_kill: - - // rdar://problem/36716477: data corruption due to incorrect branch predictor resolution - orr x12, x12, ARM64_REG_HID1_enaBrKillLimit - -Lskip_br_kill: - - // rdar://problem/34435356: segfaults due to IEX clock-gating - orr x12, x12, ARM64_REG_HID1_rccForceAllIexL3ClksOn - msr ARM64_REG_HID1, x12 - -#if ARM64_BOARD_CONFIG_T8027 - // rdar://problem/40695685: Enable BIF fill buffer stall logic to prevent skid buffer overflow (Aruba A1 only) - mrs x12, ARM64_REG_HID5 - orr x12, x12, ARM64_REG_HID5_EnableDnFIFORdStall - msr ARM64_REG_HID5, x12 - -#endif /* ARM64_BOARD_CONFIG_T8027 */ - - // Prevent ordered loads from being dispatched from LSU until all prior loads have completed. - // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations - mrs x12, ARM64_REG_HID4 - orr x12, x12, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd - msr ARM64_REG_HID4, x12 - - // rdar://problem/38482968: [Cyprus Tunable] Poisoned cache line crossing younger load is not redirected by older load-barrier - mrs x12, ARM64_REG_HID3 - orr x12, x12, ARM64_REG_HID3_DisColorOpt - msr ARM64_REG_HID3, x12 - - // rdar://problem/41056604: disable faster launches of uncacheable unaligned stores to workaround load/load ordering violation - mrs x12, ARM64_REG_HID11 - orr x12, x12, ARM64_REG_HID11_DisX64NTLnchOpt - msr ARM64_REG_HID11, x12 - - b Lskip_cyprus_ecore_only - -Lskip_cyprus_pcore_only: - - // Prevent ordered loads from being dispatched from LSU until all prior loads have completed. - // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations - mrs x12, ARM64_REG_EHID4 - orr x12, x12, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd - msr ARM64_REG_EHID4, x12 - - // rdar://problem/36595004: Poisoned younger load is not redirected by older load-acquire - mrs x12, ARM64_REG_EHID3 - orr x12, x12, ARM64_REG_EHID3_DisColorOpt - msr ARM64_REG_EHID3, x12 - - // rdar://problem/37949166: Disable the extension of prefetcher training pipe clock gating, revert to default gating - mrs x12, ARM64_REG_EHID10 - orr x12, x12, ARM64_REG_EHID10_rccDisPwrSavePrfClkOff - msr ARM64_REG_EHID10, x12 - -Lskip_cyprus_ecore_only: - -#endif /* defined (APPLEVORTEX) */ - -#if defined(ARM64_BOARD_CONFIG_T8030) - // Cebu #include #include +#include #include #if __has_feature(ptrauth_calls) #include #endif + struct arm_vfpv2_state { __uint32_t __r[32]; __uint32_t __fpscr; @@ -53,6 +55,7 @@ typedef struct arm_vfpv2_state arm_vfpv2_state_t; */ void thread_set_child(thread_t child, int pid); void thread_set_parent(thread_t parent, int pid); +static void free_debug_state(thread_t thread); /* * Maps state flavor to number of words in the state: @@ -111,14 +114,26 @@ thread_state64_to_saved_state(const arm_thread_state64_t * ts64, assert(is_saved_state64(saved_state)); - set_saved_state_cpsr(saved_state, (ts64->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_64); #if __has_feature(ptrauth_calls) + MANIPULATE_SIGNED_THREAD_STATE(saved_state, + "and w2, w2, %w[not_psr64_user_mask] \n" + "mov w6, %w[cpsr] \n" + "and w6, w6, %w[psr64_user_mask] \n" + "orr w2, w2, w6 \n" + "str w2, [x0, %[SS64_CPSR]] \n", + [cpsr] "r"(ts64->cpsr), + [psr64_user_mask] "i"(PSR64_USER_MASK), + [not_psr64_user_mask] "i"(~PSR64_USER_MASK) + ); /* * Make writes to ts64->cpsr visible first, since it's useful as a * canary to detect thread-state corruption. */ __builtin_arm_dmb(DMB_ST); -#endif +#else + set_saved_state_cpsr(saved_state, + (get_saved_state_cpsr(saved_state) & ~PSR64_USER_MASK) | (ts64->cpsr & PSR64_USER_MASK)); +#endif /* __has_feature(ptrauth_calls) */ set_saved_state_fp(saved_state, ts64->fp); set_saved_state_lr(saved_state, ts64->lr); set_saved_state_sp(saved_state, ts64->sp); @@ -324,25 +339,29 @@ machine_thread_state_convert_to_user( // will round-trip correctly even if IA-signed again below (and IA-authd later) } - if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) { + if (arm_user_jop_disabled()) { return KERN_SUCCESS; } if (ts64->pc) { ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc, - ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc")); + ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"), + thread->machine.jop_pid); } if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) { ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr, - ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr")); + ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"), + thread->machine.jop_pid); } if (ts64->sp) { ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp, - ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp")); + ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"), + thread->machine.jop_pid); } if (ts64->fp) { ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp, - ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp")); + ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"), + thread->machine.jop_pid); } return KERN_SUCCESS; @@ -425,25 +444,29 @@ machine_thread_state_convert_from_user( // correctly below. } - if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) { + if (arm_user_jop_disabled()) { return KERN_SUCCESS; } if (ts64->pc) { ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc, - ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc")); + ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"), + thread->machine.jop_pid); } if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) { ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr, - ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr")); + ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"), + thread->machine.jop_pid); } if (ts64->sp) { ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp, - ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp")); + ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"), + thread->machine.jop_pid); } if (ts64->fp) { ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp, - ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp")); + ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"), + thread->machine.jop_pid); } return KERN_SUCCESS; @@ -460,7 +483,7 @@ machine_thread_state_convert_from_user( kern_return_t machine_thread_siguctx_pointer_convert_to_user( - __assert_only thread_t thread, + thread_t thread, user_addr_t *uctxp) { #if __has_feature(ptrauth_calls) @@ -469,13 +492,14 @@ machine_thread_siguctx_pointer_convert_to_user( return KERN_SUCCESS; } - if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) { + if (arm_user_jop_disabled()) { return KERN_SUCCESS; } if (*uctxp) { *uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp, - ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx")); + ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"), + thread->machine.jop_pid); } return KERN_SUCCESS; @@ -492,7 +516,7 @@ machine_thread_siguctx_pointer_convert_to_user( kern_return_t machine_thread_function_pointers_convert_from_user( - __assert_only thread_t thread, + thread_t thread, user_addr_t *fptrs, uint32_t count) { @@ -502,14 +526,14 @@ machine_thread_function_pointers_convert_from_user( return KERN_SUCCESS; } - if (BootArgs->bootFlags & kBootFlagsDisableUserJOP) { + if (arm_user_jop_disabled()) { return KERN_SUCCESS; } while (count--) { if (*fptrs) { *fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs, - ptrauth_key_function_pointer, 0); + ptrauth_key_function_pointer, 0, thread->machine.jop_pid); } fptrs++; } @@ -597,10 +621,14 @@ machine_thread_get_state(thread_t thread, return KERN_INVALID_ARGUMENT; } - kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb); + const arm_saved_state_t *current_state = thread->machine.upcb; + + kern_return_t rn = handle_get_arm64_thread_state(tstate, count, + current_state); if (rn) { return rn; } + break; } #endif @@ -788,6 +816,7 @@ machine_thread_get_state(thread_t thread, assert(sizeof(*state) == sizeof(*thread_state)); bcopy(thread_state, state, sizeof(arm_neon_state64_t)); + *count = ARM_NEON_STATE64_COUNT; break; } @@ -932,6 +961,7 @@ machine_thread_set_state(thread_t thread, return KERN_INVALID_ARGUMENT; } + rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb); if (rn) { return rn; @@ -988,22 +1018,13 @@ machine_thread_set_state(thread_t thread, } if (!enabled) { - arm_debug_state32_t *thread_state = find_debug_state32(thread); - if (thread_state != NULL) { - void *pTmp = thread->machine.DebugData; - thread->machine.DebugData = NULL; - zfree(ads_zone, pTmp); - } + free_debug_state(thread); } else { - arm_debug_state32_t *thread_state = find_debug_state32(thread); + arm_debug_state32_t *thread_state = find_or_allocate_debug_state32(thread); + if (thread_state == NULL) { - thread->machine.DebugData = zalloc(ads_zone); - bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); - thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32; - thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT; - thread_state = find_debug_state32(thread); + return KERN_FAILURE; } - assert(NULL != thread_state); for (i = 0; i < 16; i++) { /* set appropriate privilege; mask out unknown bits */ @@ -1051,7 +1072,7 @@ machine_thread_set_state(thread_t thread, state = (arm_debug_state32_t *) tstate; - if (state->mdscr_el1 & 0x1) { + if (state->mdscr_el1 & MDSCR_SS) { enabled = TRUE; } @@ -1070,27 +1091,18 @@ machine_thread_set_state(thread_t thread, } if (!enabled) { - arm_debug_state32_t *thread_state = find_debug_state32(thread); - if (thread_state != NULL) { - void *pTmp = thread->machine.DebugData; - thread->machine.DebugData = NULL; - zfree(ads_zone, pTmp); - } + free_debug_state(thread); } else { - arm_debug_state32_t *thread_state = find_debug_state32(thread); + arm_debug_state32_t * thread_state = find_or_allocate_debug_state32(thread); + if (thread_state == NULL) { - thread->machine.DebugData = zalloc(ads_zone); - bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); - thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32; - thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT; - thread_state = find_debug_state32(thread); + return KERN_FAILURE; } - assert(NULL != thread_state); - if (state->mdscr_el1 & 0x1) { - thread_state->mdscr_el1 |= 0x1; + if (state->mdscr_el1 & MDSCR_SS) { + thread_state->mdscr_el1 |= MDSCR_SS; } else { - thread_state->mdscr_el1 &= ~0x1; + thread_state->mdscr_el1 &= ~MDSCR_SS; } for (i = 0; i < 16; i++) { @@ -1137,7 +1149,7 @@ machine_thread_set_state(thread_t thread, state = (arm_debug_state64_t *) tstate; - if (state->mdscr_el1 & 0x1) { + if (state->mdscr_el1 & MDSCR_SS) { enabled = TRUE; } @@ -1155,27 +1167,18 @@ machine_thread_set_state(thread_t thread, } if (!enabled) { - arm_debug_state64_t *thread_state = find_debug_state64(thread); - if (thread_state != NULL) { - void *pTmp = thread->machine.DebugData; - thread->machine.DebugData = NULL; - zfree(ads_zone, pTmp); - } + free_debug_state(thread); } else { - arm_debug_state64_t *thread_state = find_debug_state64(thread); + arm_debug_state64_t *thread_state = find_or_allocate_debug_state64(thread); + if (thread_state == NULL) { - thread->machine.DebugData = zalloc(ads_zone); - bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); - thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64; - thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT; - thread_state = find_debug_state64(thread); + return KERN_FAILURE; } - assert(NULL != thread_state); - if (state->mdscr_el1 & 0x1) { - thread_state->mdscr_el1 |= 0x1; + if (state->mdscr_el1 & MDSCR_SS) { + thread_state->mdscr_el1 |= MDSCR_SS; } else { - thread_state->mdscr_el1 &= ~0x1; + thread_state->mdscr_el1 &= ~MDSCR_SS; } for (i = 0; i < 16; i++) { @@ -1274,6 +1277,7 @@ machine_thread_set_state(thread_t thread, assert(sizeof(*state) == sizeof(*thread_state)); bcopy(state, thread_state, sizeof(arm_neon_state64_t)); + thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64; thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT; break; @@ -1428,6 +1432,73 @@ find_debug_state64(thread_t thread) } } +/** + * Finds the debug state for the given 64 bit thread, allocating one if it + * does not exist. + * + * @param thread 64 bit thread to find or allocate debug state for + * + * @returns A pointer to the given thread's 64 bit debug state or a null + * pointer if the given thread is null or the allocation of a new + * debug state fails. + */ +arm_debug_state64_t * +find_or_allocate_debug_state64(thread_t thread) +{ + arm_debug_state64_t *thread_state = find_debug_state64(thread); + if (thread != NULL && thread_state == NULL) { + thread->machine.DebugData = zalloc(ads_zone); + if (thread->machine.DebugData != NULL) { + bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); + thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64; + thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT; + thread_state = find_debug_state64(thread); + } + } + return thread_state; +} + +/** + * Finds the debug state for the given 32 bit thread, allocating one if it + * does not exist. + * + * @param thread 32 bit thread to find or allocate debug state for + * + * @returns A pointer to the given thread's 32 bit debug state or a null + * pointer if the given thread is null or the allocation of a new + * debug state fails. + */ +arm_debug_state32_t * +find_or_allocate_debug_state32(thread_t thread) +{ + arm_debug_state32_t *thread_state = find_debug_state32(thread); + if (thread != NULL && thread_state == NULL) { + thread->machine.DebugData = zalloc(ads_zone); + if (thread->machine.DebugData != NULL) { + bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); + thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32; + thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT; + thread_state = find_debug_state32(thread); + } + } + return thread_state; +} + +/** + * Frees a thread's debug state if allocated. Otherwise does nothing. + * + * @param thread thread to free the debug state of + */ +static inline void +free_debug_state(thread_t thread) +{ + if (thread != NULL && thread->machine.DebugData != NULL) { + void *pTmp = thread->machine.DebugData; + thread->machine.DebugData = NULL; + zfree(ads_zone, pTmp); + } +} + /* * Routine: thread_userstack * @@ -1459,7 +1530,8 @@ thread_userstack(__unused thread_t thread, break; } - /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */ + /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */ + OS_FALLTHROUGH; case ARM_THREAD_STATE32: if (count != ARM_THREAD_STATE32_COUNT) { return KERN_INVALID_ARGUMENT; @@ -1541,7 +1613,7 @@ thread_setuserstack(thread_t thread, * Routine: thread_adjuserstack * */ -uint64_t +user_addr_t thread_adjuserstack(thread_t thread, int adjust) { @@ -1557,6 +1629,7 @@ thread_adjuserstack(thread_t thread, return sp; } + /* * Routine: thread_setentrypoint * diff --git a/osfmk/arm64/tlb.h b/osfmk/arm64/tlb.h index eb1face77..03911a095 100644 --- a/osfmk/arm64/tlb.h +++ b/osfmk/arm64/tlb.h @@ -86,37 +86,39 @@ flush_core_tlb(void) // flush_mmu_tlb_allentries_async: flush entries that map VA range, all ASIDS, all cores // start and end are in units of 4K pages. static inline void -flush_mmu_tlb_allentries_async(uint64_t start, uint64_t end) +flush_mmu_tlb_allentries_async(uint64_t start, uint64_t end, uint64_t pmap_page_size) { #if __ARM_16K_PG__ - start = start & ~0x3ULL; + if (pmap_page_size == 16384) { + start = start & ~0x3ULL; - /* - * The code below is not necessarily correct. From an overview of - * the client code, the expected contract for TLB flushes is that - * we will expand from an "address, length" pair to "start address, - * end address" in the course of a TLB flush. This suggests that - * a flush for "X, X+4" is actually only asking for a flush of a - * single 16KB page. At the same time, we'd like to be prepared - * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page - * number to a 16KB page boundary. This should deal correctly with - * unaligned inputs. - * - * If our expecations about client behavior are wrong however, this - * will lead to occasional TLB corruption on platforms with 16KB - * pages. - */ - end = (end + 0x3ULL) & ~0x3ULL; + /* + * The code below is not necessarily correct. From an overview of + * the client code, the expected contract for TLB flushes is that + * we will expand from an "address, length" pair to "start address, + * end address" in the course of a TLB flush. This suggests that + * a flush for "X, X+4" is actually only asking for a flush of a + * single 16KB page. At the same time, we'd like to be prepared + * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page + * number to a 16KB page boundary. This should deal correctly with + * unaligned inputs. + * + * If our expecations about client behavior are wrong however, this + * will lead to occasional TLB corruption on platforms with 16KB + * pages. + */ + end = (end + 0x3ULL) & ~0x3ULL; + } #endif // __ARM_16K_PG__ - for (; start < end; start += (ARM_PGBYTES / 4096)) { + for (; start < end; start += (pmap_page_size / 4096)) { asm volatile ("tlbi vaae1is, %0" : : "r"(start)); } } static inline void -flush_mmu_tlb_allentries(uint64_t start, uint64_t end) +flush_mmu_tlb_allentries(uint64_t start, uint64_t end, uint64_t pmap_page_size) { - flush_mmu_tlb_allentries_async(start, end); + flush_mmu_tlb_allentries_async(start, end, pmap_page_size); sync_tlb_flush(); } @@ -149,27 +151,29 @@ flush_mmu_tlb_entry(uint64_t val) // start and end must have the ASID in the high 16 bits, with the VA in units of 4K in the lowest bits // Will also flush global entries that match the VA range static inline void -flush_mmu_tlb_entries_async(uint64_t start, uint64_t end) +flush_mmu_tlb_entries_async(uint64_t start, uint64_t end, uint64_t pmap_page_size) { #if __ARM_16K_PG__ - start = start & ~0x3ULL; + if (pmap_page_size == 16384) { + start = start & ~0x3ULL; - /* - * The code below is not necessarily correct. From an overview of - * the client code, the expected contract for TLB flushes is that - * we will expand from an "address, length" pair to "start address, - * end address" in the course of a TLB flush. This suggests that - * a flush for "X, X+4" is actually only asking for a flush of a - * single 16KB page. At the same time, we'd like to be prepared - * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page - * number to a 16KB page boundary. This should deal correctly with - * unaligned inputs. - * - * If our expecations about client behavior are wrong however, this - * will lead to occasional TLB corruption on platforms with 16KB - * pages. - */ - end = (end + 0x3ULL) & ~0x3ULL; + /* + * The code below is not necessarily correct. From an overview of + * the client code, the expected contract for TLB flushes is that + * we will expand from an "address, length" pair to "start address, + * end address" in the course of a TLB flush. This suggests that + * a flush for "X, X+4" is actually only asking for a flush of a + * single 16KB page. At the same time, we'd like to be prepared + * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page + * number to a 16KB page boundary. This should deal correctly with + * unaligned inputs. + * + * If our expecations about client behavior are wrong however, this + * will lead to occasional TLB corruption on platforms with 16KB + * pages. + */ + end = (end + 0x3ULL) & ~0x3ULL; + } #endif // __ARM_16K_PG__ #if __ARM_KERNEL_PROTECT__ uint64_t asid = start >> TLBI_ASID_SHIFT; @@ -178,30 +182,30 @@ flush_mmu_tlb_entries_async(uint64_t start, uint64_t end) * ASID scheme, this means we should flush all ASIDs. */ if (asid == 0) { - for (; start < end; start += (ARM_PGBYTES / 4096)) { + for (; start < end; start += (pmap_page_size / 4096)) { asm volatile ("tlbi vaae1is, %0" : : "r"(start)); } return; } start = start | (1ULL << TLBI_ASID_SHIFT); end = end | (1ULL << TLBI_ASID_SHIFT); - for (; start < end; start += (ARM_PGBYTES / 4096)) { + for (; start < end; start += (pmap_page_size / 4096)) { start = start & ~(1ULL << TLBI_ASID_SHIFT); asm volatile ("tlbi vae1is, %0" : : "r"(start)); start = start | (1ULL << TLBI_ASID_SHIFT); asm volatile ("tlbi vae1is, %0" : : "r"(start)); } #else - for (; start < end; start += (ARM_PGBYTES / 4096)) { + for (; start < end; start += (pmap_page_size / 4096)) { asm volatile ("tlbi vae1is, %0" : : "r"(start)); } #endif /* __ARM_KERNEL_PROTECT__ */ } static inline void -flush_mmu_tlb_entries(uint64_t start, uint64_t end) +flush_mmu_tlb_entries(uint64_t start, uint64_t end, uint64_t pmap_page_size) { - flush_mmu_tlb_entries_async(start, end); + flush_mmu_tlb_entries_async(start, end, pmap_page_size); sync_tlb_flush(); } @@ -270,8 +274,8 @@ flush_core_tlb_asid(uint64_t val) #error __ARM_RANGE_TLBI__ + __ARM_KERNEL_PROTECT__ is not currently supported #endif -#define ARM64_16K_TLB_RANGE_PAGES (1ULL << 21) -#define rtlbi_addr(x) (((x) >> RTLBI_ADDR_SHIFT) & RTLBI_ADDR_MASK) +#define ARM64_TLB_RANGE_PAGES (1ULL << 21) +#define rtlbi_addr(x, shift) (((x) >> (shift)) & RTLBI_ADDR_MASK) #define rtlbi_scale(x) ((uint64_t)(x) << RTLBI_SCALE_SHIFT) #define rtlbi_num(x) ((uint64_t)(x) << RTLBI_NUM_SHIFT) @@ -280,7 +284,7 @@ flush_core_tlb_asid(uint64_t val) * pass to any of the TLBI by range methods. */ static inline uint64_t -generate_rtlbi_param(ppnum_t npages, uint32_t asid, vm_offset_t va) +generate_rtlbi_param(ppnum_t npages, uint32_t asid, vm_offset_t va, uint64_t pmap_page_shift) { /** * Per the armv8.4 RTLBI extension spec, the range encoded in the rtlbi register operand is defined by: @@ -290,7 +294,7 @@ generate_rtlbi_param(ppnum_t npages, uint32_t asid, vm_offset_t va) unsigned scale = ((order ? order : 1) - 1) / 5; unsigned granule = 1 << ((5 * scale) + 1); unsigned num = (((npages + granule - 1) & ~(granule - 1)) / granule) - 1; - return tlbi_asid(asid) | RTLBI_TG | rtlbi_scale(scale) | rtlbi_num(num) | rtlbi_addr(va); + return tlbi_asid(asid) | RTLBI_TG(pmap_page_shift) | rtlbi_scale(scale) | rtlbi_num(num) | rtlbi_addr(va, pmap_page_shift); } // flush_mmu_tlb_range: flush TLB entries that map a VA range using a single instruction @@ -325,6 +329,22 @@ flush_mmu_tlb_allrange(uint64_t val) sync_tlb_flush(); } +// flush_core_tlb_allrange: flush TLB entries that map a VA range using a single instruction, local core only +// The argument should be encoded according to generate_rtlbi_param(). +// Follows the same ASID matching behavior as flush_mmu_tlb_allentries() +static inline void +flush_core_tlb_allrange_async(uint64_t val) +{ + asm volatile ("tlbi rvaae1, %0" : : "r"(val)); +} + +static inline void +flush_core_tlb_allrange(uint64_t val) +{ + flush_core_tlb_allrange_async(val); + sync_tlb_flush(); +} + #endif // __ARM_RANGE_TLBI__ diff --git a/osfmk/arm64/tunables/tunables.s b/osfmk/arm64/tunables/tunables.s new file mode 100644 index 000000000..4d295ea90 --- /dev/null +++ b/osfmk/arm64/tunables/tunables.s @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#include + +#if defined(APPLETYPHOON) +#include "tunables_h7.s" +#elif defined(APPLETWISTER) +#include "tunables_h8.s" +#elif defined(APPLEHURRICANE) +#include "tunables_h9.s" +#elif defined(APPLEMONSOON) +#include "tunables_h10.s" +#elif defined(APPLEVORTEX) +#include "tunables_h11.s" +#elif defined(APPLELIGHTNING) +#include "tunables_h12.s" +#else +.macro APPLY_TUNABLES +.endmacro +#endif diff --git a/osfmk/arm64/tunables/tunables_h10.s b/osfmk/arm64/tunables/tunables_h10.s new file mode 100644 index 000000000..e246200ed --- /dev/null +++ b/osfmk/arm64/tunables/tunables_h10.s @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +.macro APPLY_TUNABLES + /***** Tunables that apply to all cores, all revisions *****/ + + // SW WAR/eval: WKdm write ack lost when bif_wke_colorWrAck_XXaH asserts concurrently for both colors + HID_SET_BITS ARM64_REG_HID8, ARM64_REG_HID8_WkeForceStrictOrder, $1 + + /***** Tunables that apply to all P cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to all E cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to specific cores, all revisions *****/ + EXEC_COREEQ_REVALL MIDR_SKYE_MISTRAL, $0, $1 + // : Atomic launch eligibility is erroneously taken away when a store at SMB gets invalidated + HID_CLEAR_BITS ARM64_REG_EHID11, ARM64_REG_EHID11_SmbDrainThresh_mask, $1 + EXEC_END + + /***** Tunables that apply to specific cores and revisions *****/ + EXEC_COREEQ_REVLO MIDR_SKYE_MISTRAL, CPU_VERSION_B0, $0, $1 + + // Disable downstream fill bypass logic + // [Tunable] Skye - L2E fill bypass collision from both pipes to ecore + HID_SET_BITS ARM64_REG_EHID5, ARM64_REG_EHID5_DisFillByp, $1 + + // Disable forwarding of return addresses to the NFP + // Skye: FED incorrectly taking illegal va exception + HID_SET_BITS ARM64_REG_EHID0, ARM64_REG_EHID0_nfpRetFwdDisb, $1 + + EXEC_END + + EXEC_COREALL_REVLO CPU_VERSION_B0, $0, $1 + + // Disable clock divider gating + // [Tunable/Errata][cpu_1p_1e] [CPGV2] ACC power down issue when link FSM switches from GO_DN to CANCEL and at the same time upStreamDrain request is set. + HID_SET_BITS ARM64_REG_HID6, ARM64_REG_HID6_DisClkDivGating, $1 + + // Disable clock dithering + // [Tunable] Skye A0: Linux: LLC PIO Errors + HID_SET_BITS ARM64_REG_ACC_OVRD, ARM64_REG_ACC_OVRD_dsblClkDtr, $1 + HID_SET_BITS ARM64_REG_ACC_EBLK_OVRD, ARM64_REG_ACC_OVRD_dsblClkDtr, $1 + + EXEC_END + + EXEC_COREALL_REVHS CPU_VERSION_B0, $0, $1 + // : Disable refcount syncing between E and P + HID_INSERT_BITS ARM64_REG_CYC_OVRD, ARM64_REG_CYC_OVRD_dsblSnoopTime_mask, ARM64_REG_CYC_OVRD_dsblSnoopPTime, $1 + EXEC_END +.endmacro \ No newline at end of file diff --git a/osfmk/arm64/tunables/tunables_h11.s b/osfmk/arm64/tunables/tunables_h11.s new file mode 100644 index 000000000..9fb5b0d9c --- /dev/null +++ b/osfmk/arm64/tunables/tunables_h11.s @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +.macro APPLY_TUNABLES + /***** Tunables that apply to all cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to all P cores, all revisions *****/ + EXEC_PCORE_REVALL $0, $1 + // rdar://problem/34435356: segfaults due to IEX clock-gating + HID_SET_BITS ARM64_REG_HID1, ARM64_REG_HID1_rccForceAllIexL3ClksOn, $1 + + // Prevent ordered loads from being dispatched from LSU until all prior loads have completed. + // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations + HID_SET_BITS ARM64_REG_HID4, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd, $1 + + // rdar://problem/38482968: [Cyprus Tunable] Poisoned cache line crossing younger load is not redirected by older load-barrier + HID_SET_BITS ARM64_REG_HID3, ARM64_REG_HID3_DisColorOpt, $1 + + // rdar://problem/41056604: disable faster launches of uncacheable unaligned stores to workaround load/load ordering violation + HID_SET_BITS ARM64_REG_HID11, ARM64_REG_HID11_DisX64NTLnchOpt, $1 + + EXEC_END + + /***** Tunables that apply to all E cores, all revisions *****/ + EXEC_ECORE_REVALL $0, $1 + // Prevent ordered loads from being dispatched from LSU until all prior loads have completed. + // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations + HID_SET_BITS ARM64_REG_EHID4, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd, $1 + + // rdar://problem/36595004: Poisoned younger load is not redirected by older load-acquire + HID_SET_BITS ARM64_REG_EHID3, ARM64_REG_EHID3_DisColorOpt, $1 + + // rdar://problem/37949166: Disable the extension of prefetcher training pipe clock gating, revert to default gating + HID_SET_BITS ARM64_REG_EHID10, ARM64_REG_EHID10_rccDisPwrSavePrfClkOff, $1 + + EXEC_END + + /***** Tunables that apply to specific cores, all revisions *****/ + // Should be applied to all Aruba variants, but only Cyprus variants B0 and later + EXEC_COREEQ_REVALL MIDR_ARUBA_VORTEX, $0, $1 + // rdar://problem/36716477: data corruption due to incorrect branch predictor resolution + HID_SET_BITS ARM64_REG_HID1, ARM64_REG_HID1_enaBrKillLimit, $1 + EXEC_END + + /***** Tunables that apply to specific cores and revisions *****/ + EXEC_COREEQ_REVHS MIDR_CYPRUS_VORTEX, CPU_VERSION_A1, $0, $1 + // rdar://problem/36716477: data corruption due to incorrect branch predictor resolution + HID_SET_BITS ARM64_REG_HID1, ARM64_REG_HID1_enaBrKillLimit, $1 + EXEC_END + + EXEC_COREEQ_REVEQ MIDR_ARUBA_VORTEX, CPU_VERSION_A1, $0, $1 + // rdar://problem/40695685: Enable BIF fill buffer stall logic to prevent skid buffer overflow (Aruba A1 only) + HID_SET_BITS ARM64_REG_HID5, ARM64_REG_HID5_EnableDnFIFORdStall, $1 + EXEC_END +.endmacro \ No newline at end of file diff --git a/osfmk/arm64/tunables/tunables_h12.s b/osfmk/arm64/tunables/tunables_h12.s new file mode 100644 index 000000000..7b988d0d1 --- /dev/null +++ b/osfmk/arm64/tunables/tunables_h12.s @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +.macro APPLY_TUNABLES + /***** Tunables that apply to all cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to all P cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to all E cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to specific cores, all revisions *****/ + EXEC_COREEQ_REVALL MIDR_CEBU_LIGHTNING, $0, $1 + // rdar://53907283 ([Cebu ACC Errata] Sibling Merge in LLC can cause UC load to violate ARM Memory Ordering Rules.) + HID_SET_BITS ARM64_REG_HID5, ARM64_REG_HID5_DisFill2cMerge, $1 + + // rdar://problem/54615539: [Cebu ACC Tunable]Cross-beat Crypto(AES/PMUL) ICache fusion is not disabled for branch uncondtional recoded instruction. + HID_SET_BITS ARM64_REG_HID0, ARM64_REG_HID0_CacheFusionDisable, $1 + + // rdar://problem/50664291: [Cebu B0/B1 Tunables][PerfVerif][LSU] Post-silicon tuning of STNT widget contiguous counter threshold + HID_INSERT_BITS ARM64_REG_HID4, ARM64_REG_HID4_CnfCntrThresh_mask, ARM64_REG_HID4_CnfCntrThresh_VALUE, $1 + + // rdar://problem/47744434: Barrier Load Ordering property is not satisfied for x64-loads + HID_SET_BITS ARM64_REG_HID9, ARM64_REG_HID9_EnableFixBug47221499, $1 + + // rdar://problem/50664291: [Cebu B0/B1 Tunables][PerfVerif][LSU] Post-silicon tuning of STNT widget contiguous counter threshold + HID_SET_BITS ARM64_REG_HID9, ARM64_REG_HID9_DisSTNTWidgetForUnalign, $1 + + // rdar://problem/47865629: RF bank and Multipass conflict forward progress widget does not handle 3+ cycle livelock + HID_SET_BITS ARM64_REG_HID16, ARM64_REG_HID16_EnRs4Sec, $1 + HID_CLEAR_BITS ARM64_REG_HID16, ARM64_REG_HID16_DisxPickRs45, $1 + HID_SET_BITS ARM64_REG_HID16, ARM64_REG_HID16_EnMPxPick45, $1 + HID_SET_BITS ARM64_REG_HID16, ARM64_REG_HID16_EnMPCyc7, $1 + + // Prevent ordered loads from being dispatched from LSU until all prior loads have completed. + // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations + HID_SET_BITS ARM64_REG_HID4, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd, $1 + + // rdar://problem/51690962: Disable Store-Non-Temporal downgrade widget + HID_SET_BITS ARM64_REG_HID4, ARM64_REG_HID4_DisSTNTWidget, $1 + + // rdar://problem/41056604: disable faster launches of uncacheable unaligned stores to workaround load/load ordering violation + HID_SET_BITS ARM64_REG_HID11, ARM64_REG_HID11_DisX64NTLnchOpt, $1 + + // rdar://problem/45024523: enable aggressive LEQ throttling to work around LEQ credit leak + HID_SET_BITS ARM64_REG_HID16, ARM64_REG_HID16_leqThrottleAggr, $1 + + // rdar://problem/41029832: configure dummy cycles to work around incorrect temp sensor readings on NEX power gating + HID_INSERT_BITS ARM64_REG_HID13, ARM64_REG_HID13_PreCyc_mask, ARM64_REG_HID13_PreCyc_VALUE, $1 + EXEC_END + + EXEC_COREEQ_REVALL MIDR_CEBU_THUNDER, $0, $1 + // rdar://53907283 ([Cebu ACC Errata] Sibling Merge in LLC can cause UC load to violate ARM Memory Ordering Rules.) + HID_SET_BITS ARM64_REG_HID5, ARM64_REG_HID5_DisFill2cMerge, $1 + + // rdar://problem/48476033: Prevent store-to-load forwarding for UC memory to avoid barrier ordering violation + HID_SET_BITS ARM64_REG_EHID10, ARM64_REG_EHID10_ForceWStDrainUc, $1 + + // Prevent ordered loads from being dispatched from LSU until all prior loads have completed. + // rdar://problem/34095873: AF2 ordering rules allow ARM device ordering violations + HID_SET_BITS ARM64_REG_EHID4, ARM64_REG_HID4_ForceNsOrdLdReqNoOlderLd, $1 + + // rdar://problem/37949166: Disable the extension of prefetcher training pipe clock gating, revert to default gating + HID_SET_BITS ARM64_REG_EHID10, ARM64_REG_EHID10_rccDisPwrSavePrfClkOff, $1 + EXEC_END + + EXEC_COREEQ_REVALL MIDR_TURKS, $0, $1 + // rdar://problem/53506680: [MP_CHECKER] Load STLFs from a completed UC/NC/NT store causing barrier ordering violation + HID_SET_BITS ARM64_REG_EHID10, ARM64_REG_EHID10_ForceWStDrainUc, $1 + EXEC_END + + /***** Tunables that apply to specific cores and revisions *****/ + /* N/A */ +.endmacro \ No newline at end of file diff --git a/osfmk/arm64/tunables/tunables_h7.s b/osfmk/arm64/tunables/tunables_h7.s new file mode 100644 index 000000000..d239bb993 --- /dev/null +++ b/osfmk/arm64/tunables/tunables_h7.s @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +.macro APPLY_TUNABLES + /***** Tunables that apply to all cores, all revisions *****/ + + // Disable LSP flush with context switch to work around bug in LSP + // that can cause Typhoon to wedge when CONTEXTIDR is written. + // + HID_SET_BITS ARM64_REG_HID0, ARM64_REG_HID0_LoopBuffDisb, $1 + HID_SET_BITS ARM64_REG_HID1, ARM64_REG_HID1_rccDisStallInactiveIexCtl, $1 + HID_SET_BITS ARM64_REG_HID3, ARM64_REG_HID3_DisXmonSnpEvictTriggerL2StarvationMode, $1 + HID_CLEAR_BITS ARM64_REG_HID5, (ARM64_REG_HID5_DisHwpLd | ARM64_REG_HID5_DisHwpSt), $1 + + // Change the default memcache data set ID from 0 to 15 for all agents + HID_SET_BITS ARM64_REG_HID8, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE), $1 + + /***** Tunables that apply to all P cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to all E cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to specific cores, all revisions *****/ + EXEC_COREEQ_REVALL MIDR_CAPRI, $0, $1 + HID_SET_BITS ARM64_REG_HID8, ARM64_REG_HID8_DataSetID2_VALUE, $1 + EXEC_END + + /***** Tunables that apply to specific cores and revisions *****/ + /* N/A */ + + isb sy +.endmacro \ No newline at end of file diff --git a/osfmk/arm64/tunables/tunables_h8.s b/osfmk/arm64/tunables/tunables_h8.s new file mode 100644 index 000000000..0f2a5d7a2 --- /dev/null +++ b/osfmk/arm64/tunables/tunables_h8.s @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +.macro APPLY_TUNABLES + /***** Tunables that apply to all cores, all revisions *****/ + HID_CLEAR_BITS ARM64_REG_HID11, ARM64_REG_HID11_DisFillC1BubOpt, $1 + + // Change the default memcache data set ID from 0 to 15 for all agents + HID_SET_BITS ARM64_REG_HID8, (ARM64_REG_HID8_DataSetID0_VALUE | ARM64_REG_HID8_DataSetID1_VALUE), $1 + HID_SET_BITS ARM64_REG_HID8, (ARM64_REG_HID8_DataSetID2_VALUE | ARM64_REG_HID8_DataSetID3_VALUE), $1 + + // Use 4-cycle MUL latency to avoid denormal stalls + HID_SET_BITS ARM64_REG_HID7, ARM64_REG_HID7_disNexFastFmul, $1 + + // disable reporting of TLB-multi-hit-error + // + HID_CLEAR_BITS ARM64_REG_LSU_ERR_STS, ARM64_REG_LSU_ERR_STS_L1DTlbMultiHitEN, $1 + + /***** Tunables that apply to all P cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to all E cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to specific cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to specific cores and revisions *****/ + + // rdar://problem/36112905: Set CYC_CFG:skipInit to pull in isAlive by one DCLK + // to work around potential hang. Must only be applied to Maui C0. + EXEC_COREEQ_REVEQ MIDR_MAUI, CPU_VERSION_C0, $0, $1 + HID_SET_BITS ARM64_REG_CYC_CFG, ARM64_REG_CYC_CFG_skipInit, $1 + EXEC_END + isb sy +.endmacro \ No newline at end of file diff --git a/osfmk/arm64/tunables/tunables_h9.s b/osfmk/arm64/tunables/tunables_h9.s new file mode 100644 index 000000000..c44e91c77 --- /dev/null +++ b/osfmk/arm64/tunables/tunables_h9.s @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +.macro APPLY_TUNABLES + /***** Tunables that apply to all cores, all revisions *****/ + + // IC prefetch configuration + // + HID_INSERT_BITS ARM64_REG_HID0, ARM64_REG_HID0_ICPrefDepth_bmsk, ARM64_REG_HID0_ICPrefDepth_VALUE, $1 + HID_SET_BITS ARM64_REG_HID0, ARM64_REG_HID0_ICPrefLimitOneBrn, $1 + + // disable reporting of TLB-multi-hit-error + // + HID_CLEAR_BITS ARM64_REG_LSU_ERR_CTL, ARM64_REG_LSU_ERR_CTL_L1DTlbMultiHitEN, $1 + + // disable crypto fusion across decode groups + // + HID_SET_BITS ARM64_REG_HID1, ARM64_REG_HID1_disAESFuseAcrossGrp, $1 + + /***** Tunables that apply to all P cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to all E cores, all revisions *****/ + /* N/A */ + + /***** Tunables that apply to specific cores, all revisions *****/ + EXEC_COREEQ_REVALL MIDR_MYST, $0, $1 + // Clear DisDcZvaCmdOnly + // Per Myst A0/B0 tunables document + // Myst: Confirm ACC Per-CPU Tunables + HID_CLEAR_BITS ARM64_REG_HID3, ARM64_REG_HID3_DisDcZvaCmdOnly, $1 + HID_CLEAR_BITS ARM64_REG_EHID3, ARM64_REG_HID3_DisDcZvaCmdOnly, $1 + EXEC_END + + /***** Tunables that apply to specific cores and revisions *****/ + /* N/A */ +.endmacro \ No newline at end of file diff --git a/osfmk/atm/Makefile b/osfmk/atm/Makefile index 88863e3b2..f10269b5d 100644 --- a/osfmk/atm/Makefile +++ b/osfmk/atm/Makefile @@ -54,7 +54,7 @@ ${MIGINCLUDES} : ${MIG_TYPES} ${MIG_UUHDRS} : \ %.h : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) "$@" $(_v)$(MIG) $(MIGFLAGS) \ -server /dev/null \ -user /dev/null \ @@ -63,7 +63,7 @@ ${MIG_UUHDRS} : \ ${MIG_USHDRS} : \ %_server.h : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)$(MIG) $(MIGFLAGS) \ -server /dev/null \ -user /dev/null \ @@ -101,7 +101,7 @@ ${COMP_FILES} : ${MIG_TYPES} ${MIG_KUSRC} : \ %_user.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ -user $*_user.c \ -header $*.h \ @@ -111,7 +111,7 @@ ${MIG_KUSRC} : \ ${MIG_KSSRC}: \ %_server.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ -user /dev/null \ -header /dev/null \ diff --git a/osfmk/atm/atm.c b/osfmk/atm/atm.c index ff1111080..11967ca38 100644 --- a/osfmk/atm/atm.c +++ b/osfmk/atm/atm.c @@ -27,153 +27,15 @@ */ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include - -#define MAX_ATM_VALUES (2 * 4096) -#define MAX_TRACE_BUFFER_SIZE (0x40000000) /* Restrict to 1GB per task */ - -#define ATM_VALUE_TO_HANDLE(x) (CAST_DOWN(atm_voucher_id_t, (x))) -#define HANDLE_TO_ATM_VALUE(x) (CAST_DOWN(atm_value_t, (x))) - -#define ATM_MAX_HASH_TABLE_SIZE (256) -#define AID_HASH_MASK (0xFF) -#define AID_TO_HASH(x) ((x) & (AID_HASH_MASK)) - -#define ATM_LIST_DEAD_MAX 15 - -#define AID_ARRAY_COUNT_MAX (256) - -struct atm_value_hash atm_value_hash_table[ATM_MAX_HASH_TABLE_SIZE]; -extern int maxproc; - -/* Global flag to disable ATM. ATM get value and memory registration will return error. */ -boolean_t disable_atm = FALSE; - -#if DEVELOPMENT || DEBUG -queue_head_t atm_descriptors_list; -queue_head_t atm_values_list; -#endif - -ipc_voucher_attr_control_t voucher_attr_control; /* communication channel from ATM to voucher system */ -static zone_t atm_value_zone, atm_descriptors_zone, atm_link_objects_zone; - -static aid_t get_aid(void); -static mach_atm_subaid_t get_subaid(void); -static atm_value_t atm_value_alloc_init(aid_t); -static void atm_value_dealloc(atm_value_t atm_value); -static void atm_hash_table_init(void); -static kern_return_t atm_value_hash_table_insert(atm_value_t new_atm_value); -static void atm_value_hash_table_delete(atm_value_t atm_value); -static atm_value_t get_atm_value_from_aid(aid_t aid) __unused; -static kern_return_t atm_listener_insert(atm_value_t atm_value, atm_task_descriptor_t task_descriptor, atm_guard_t guard); -static void atm_listener_delete_all(atm_value_t atm_value); -static atm_task_descriptor_t atm_task_descriptor_alloc_init(mach_port_t trace_buffer, uint64_t buffer_size, __assert_only task_t task); -static void atm_task_descriptor_dealloc(atm_task_descriptor_t task_descriptor); -static kern_return_t atm_value_unregister(atm_value_t atm_value, atm_task_descriptor_t task_descriptor, atm_guard_t guard); -static kern_return_t atm_value_register(atm_value_t atm_value, atm_task_descriptor_t task_descriptor, atm_guard_t guard); -static kern_return_t atm_listener_delete(atm_value_t atm_value, atm_task_descriptor_t task_descriptor, atm_guard_t guard); -static void atm_link_dealloc(atm_link_object_t link_object); - -kern_return_t -atm_release_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_t value, - mach_voucher_attr_value_reference_t sync); - -kern_return_t -atm_get_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_recipe_command_t command, - mach_voucher_attr_value_handle_array_t prev_values, - mach_msg_type_number_t __assert_only prev_value_count, - mach_voucher_attr_content_t recipe, - mach_voucher_attr_content_size_t recipe_size, - mach_voucher_attr_value_handle_t *out_value, - mach_voucher_attr_value_flags_t *out_flags, - ipc_voucher_t *out_value_voucher); - -kern_return_t -atm_extract_content( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_array_t values, - mach_msg_type_number_t value_count, - mach_voucher_attr_recipe_command_t *out_command, - mach_voucher_attr_content_t out_recipe, - mach_voucher_attr_content_size_t *in_out_recipe_size); - -kern_return_t -atm_command( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_array_t values, - mach_msg_type_number_t value_count, - mach_voucher_attr_command_t command, - mach_voucher_attr_content_t in_content, - mach_voucher_attr_content_size_t in_content_size, - mach_voucher_attr_content_t out_content, - mach_voucher_attr_content_size_t *in_out_content_size); - -void -atm_release(ipc_voucher_attr_manager_t __assert_only manager); - -/* - * communication channel from voucher system to ATM - */ -const struct ipc_voucher_attr_manager atm_manager = { - .ivam_release_value = atm_release_value, - .ivam_get_value = atm_get_value, - .ivam_extract_content = atm_extract_content, - .ivam_command = atm_command, - .ivam_release = atm_release, - .ivam_flags = IVAM_FLAGS_NONE, -}; - -#if DEVELOPMENT || DEBUG -decl_lck_mtx_data(, atm_descriptors_list_lock); -decl_lck_mtx_data(, atm_values_list_lock); - -lck_grp_t atm_dev_lock_grp; -lck_attr_t atm_dev_lock_attr; -lck_grp_attr_t atm_dev_lock_grp_attr; -#endif - -extern vm_map_t kernel_map; -/* - * Global aid. Incremented on each get_aid. - */ -aid_t global_aid; - -/* - * Global subaid. Incremented on each get_subaid. - */ -mach_atm_subaid_t global_subaid; - -/* - * Lock group attributes for atm sub system. - */ -lck_grp_t atm_lock_grp; -lck_attr_t atm_lock_attr; -lck_grp_attr_t atm_lock_grp_attr; +#include /* * Global that is set by diagnosticd and readable by userspace * via the commpage. */ static uint32_t atm_diagnostic_config; +static bool disable_atm; /* * Routine: atm_init @@ -181,15 +43,14 @@ static uint32_t atm_diagnostic_config; * Returns: None. */ void -atm_init() +atm_init(void) { - kern_return_t kr = KERN_SUCCESS; char temp_buf[20]; /* Disable atm if disable_atm present in device-tree properties or in boot-args */ if ((PE_get_default("kern.disable_atm", temp_buf, sizeof(temp_buf))) || (PE_parse_boot_argn("-disable_atm", temp_buf, sizeof(temp_buf)))) { - disable_atm = TRUE; + disable_atm = true; } if (!PE_parse_boot_argn("atm_diagnostic_config", &atm_diagnostic_config, sizeof(atm_diagnostic_config))) { @@ -198,895 +59,19 @@ atm_init() } } - /* setup zones for descriptors, values and link objects */ - atm_value_zone = zinit(sizeof(struct atm_value), - MAX_ATM_VALUES * sizeof(struct atm_value), - sizeof(struct atm_value), - "atm_values"); - - atm_descriptors_zone = zinit(sizeof(struct atm_task_descriptor), - MAX_ATM_VALUES * sizeof(struct atm_task_descriptor), - sizeof(struct atm_task_descriptor), - "atm_task_descriptors"); - - atm_link_objects_zone = zinit(sizeof(struct atm_link_object), - MAX_ATM_VALUES * sizeof(struct atm_link_object), - sizeof(struct atm_link_object), - "atm_link_objects"); - - /* Initialize atm lock group and lock attributes. */ - lck_grp_attr_setdefault(&atm_lock_grp_attr); - lck_grp_init(&atm_lock_grp, "atm_lock", &atm_lock_grp_attr); - lck_attr_setdefault(&atm_lock_attr); - - global_aid = 1; - global_subaid = 1; - atm_hash_table_init(); - -#if DEVELOPMENT || DEBUG - /* Initialize global atm development lock group and lock attributes. */ - lck_grp_attr_setdefault(&atm_dev_lock_grp_attr); - lck_grp_init(&atm_dev_lock_grp, "atm_dev_lock", &atm_dev_lock_grp_attr); - lck_attr_setdefault(&atm_dev_lock_attr); - - lck_mtx_init(&atm_descriptors_list_lock, &atm_dev_lock_grp, &atm_dev_lock_attr); - lck_mtx_init(&atm_values_list_lock, &atm_dev_lock_grp, &atm_dev_lock_attr); - - queue_init(&atm_descriptors_list); - queue_init(&atm_values_list); -#endif - - /* Register the atm manager with the Vouchers sub system. */ - kr = ipc_register_well_known_mach_voucher_attr_manager( - &atm_manager, - 0, - MACH_VOUCHER_ATTR_KEY_ATM, - &voucher_attr_control); - if (kr != KERN_SUCCESS) { - panic("ATM subsystem initialization failed"); - } - kprintf("ATM subsystem is initialized\n"); - return; } - -/* - * ATM Resource Manager Routines. - */ - - -/* - * Routine: atm_release_value - * Purpose: Release a value, if sync matches the sync count in value. - * Returns: KERN_SUCCESS: on Successful deletion. - * KERN_FAILURE: if sync value does not matches. - */ -kern_return_t -atm_release_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_t value, - mach_voucher_attr_value_reference_t sync) -{ - atm_value_t atm_value = ATM_VALUE_NULL; - - assert(MACH_VOUCHER_ATTR_KEY_ATM == key); - assert(manager == &atm_manager); - - atm_value = HANDLE_TO_ATM_VALUE(value); - if (atm_value == VAM_DEFAULT_VALUE) { - /* Return success for default value */ - return KERN_SUCCESS; - } - - if (atm_value->sync != sync) { - return KERN_FAILURE; - } - - /* Deallocate the atm value. */ - atm_value_hash_table_delete(atm_value); - atm_value_dealloc(atm_value); - return KERN_SUCCESS; -} - - /* - * Routine: atm_get_value - */ -kern_return_t -atm_get_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_recipe_command_t command, - mach_voucher_attr_value_handle_array_t prev_values, - mach_msg_type_number_t __assert_only prev_value_count, - mach_voucher_attr_content_t __unused recipe, - mach_voucher_attr_content_size_t __unused recipe_size, - mach_voucher_attr_value_handle_t *out_value, - mach_voucher_attr_value_flags_t *out_flags, - ipc_voucher_t *out_value_voucher) -{ - atm_value_t atm_value = ATM_VALUE_NULL; - mach_voucher_attr_value_handle_t atm_handle; - atm_task_descriptor_t task_descriptor = ATM_TASK_DESCRIPTOR_NULL; - task_t task; - aid_t aid; - atm_guard_t guard; - natural_t i; - kern_return_t kr = KERN_SUCCESS; - - assert(MACH_VOUCHER_ATTR_KEY_ATM == key); - assert(manager == &atm_manager); - - /* never an out voucher */ - *out_value_voucher = IPC_VOUCHER_NULL; - *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE; - - if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE)) { - return KERN_NOT_SUPPORTED; - } - - switch (command) { - case MACH_VOUCHER_ATTR_ATM_REGISTER: - - for (i = 0; i < prev_value_count; i++) { - atm_handle = prev_values[i]; - atm_value = HANDLE_TO_ATM_VALUE(atm_handle); - - if (atm_value == VAM_DEFAULT_VALUE) { - continue; - } - - if (recipe_size != sizeof(atm_guard_t)) { - kr = KERN_INVALID_ARGUMENT; - break; - } - memcpy(&guard, recipe, sizeof(atm_guard_t)); - - task = current_task(); - task_descriptor = task->atm_context; - - kr = atm_value_register(atm_value, task_descriptor, guard); - if (kr != KERN_SUCCESS) { - break; - } - - /* Increment sync value. */ - atm_sync_reference_internal(atm_value); - - *out_value = atm_handle; - return kr; - } - - *out_value = ATM_VALUE_TO_HANDLE(VAM_DEFAULT_VALUE); - break; - - case MACH_VOUCHER_ATTR_ATM_CREATE: - - /* Handle the old case where aid value is created in kernel */ - if (recipe_size == 0) { - aid = get_aid(); - } else if (recipe_size == sizeof(aid_t)) { - memcpy(&aid, recipe, sizeof(aid_t)); - } else { - kr = KERN_INVALID_ARGUMENT; - break; - } - - /* Allocate a new atm value. */ - atm_value = atm_value_alloc_init(aid); - if (atm_value == ATM_VALUE_NULL) { - kr = KERN_RESOURCE_SHORTAGE; - break; - } -redrive: - kr = atm_value_hash_table_insert(atm_value); - if (kr != KERN_SUCCESS) { - if (recipe_size == 0) { - atm_value->aid = get_aid(); - goto redrive; - } - atm_value_dealloc(atm_value); - break; - } - - *out_value = ATM_VALUE_TO_HANDLE(atm_value); - break; - - case MACH_VOUCHER_ATTR_ATM_NULL: - default: - kr = KERN_INVALID_ARGUMENT; - break; - } - - return kr; -} - - -/* - * Routine: atm_extract_content - * Purpose: Extract a set of aid from an array of voucher values. - * Returns: KERN_SUCCESS: on Success. - * KERN_FAILURE: one of the value is not present in the hash. - * KERN_NO_SPACE: insufficeint buffer provided to fill an array of aid. - */ -kern_return_t -atm_extract_content( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_array_t values, - mach_msg_type_number_t value_count, - mach_voucher_attr_recipe_command_t *out_command, - mach_voucher_attr_content_t out_recipe, - mach_voucher_attr_content_size_t *in_out_recipe_size) -{ - atm_value_t atm_value; - mach_voucher_attr_value_handle_t atm_handle; - natural_t i; - - assert(MACH_VOUCHER_ATTR_KEY_ATM == key); - assert(manager == &atm_manager); - - for (i = 0; i < value_count && *in_out_recipe_size > 0; i++) { - atm_handle = values[i]; - atm_value = HANDLE_TO_ATM_VALUE(atm_handle); - if (atm_value == VAM_DEFAULT_VALUE) { - continue; - } - - if ((sizeof(aid_t)) > *in_out_recipe_size) { - *in_out_recipe_size = 0; - return KERN_NO_SPACE; - } - - memcpy(&out_recipe[0], &atm_value->aid, sizeof(aid_t)); - *out_command = MACH_VOUCHER_ATTR_ATM_NULL; - *in_out_recipe_size = sizeof(aid_t); - return KERN_SUCCESS; - } - - *in_out_recipe_size = 0; - return KERN_SUCCESS; -} - -/* - * Routine: atm_command - * Purpose: Execute a command against a set of ATM values. - * Returns: KERN_SUCCESS: On successful execution of command. - * KERN_FAILURE: On failure. - */ -kern_return_t -atm_command( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_array_t values, - mach_msg_type_number_t value_count, - mach_voucher_attr_command_t command, - mach_voucher_attr_content_t in_content, - mach_voucher_attr_content_size_t in_content_size, - mach_voucher_attr_content_t out_content, - mach_voucher_attr_content_size_t *out_content_size) -{ - assert(MACH_VOUCHER_ATTR_KEY_ATM == key); - assert(manager == &atm_manager); - atm_value_t atm_value = ATM_VALUE_NULL; - natural_t i = 0; - mach_atm_subaid_t *subaid_array = NULL; - mach_atm_subaid_t next_subaid = 0; - uint32_t aid_array_count = 0; - atm_task_descriptor_t task_descriptor = ATM_TASK_DESCRIPTOR_NULL; - task_t task; - kern_return_t kr = KERN_SUCCESS; - atm_guard_t guard; - - switch (command) { - case ATM_ACTION_COLLECT: - /* Fall through */ - - case ATM_ACTION_LOGFAIL: - return KERN_NOT_SUPPORTED; - - case ATM_FIND_MIN_SUB_AID: - if ((in_content_size / sizeof(aid_t)) > (*out_content_size / sizeof(mach_atm_subaid_t))) { - return KERN_FAILURE; - } - - aid_array_count = in_content_size / sizeof(aid_t); - if (aid_array_count > AID_ARRAY_COUNT_MAX) { - return KERN_FAILURE; - } - - subaid_array = (mach_atm_subaid_t *) (void *) out_content; - for (i = 0; i < aid_array_count; i++) { - subaid_array[i] = ATM_SUBAID32_MAX; - } - - *out_content_size = aid_array_count * sizeof(mach_atm_subaid_t); - - kr = KERN_SUCCESS; - - break; - - case ATM_ACTION_UNREGISTER: - /* find the first non-default atm_value */ - for (i = 0; i < value_count; i++) { - atm_value = HANDLE_TO_ATM_VALUE(values[i]); - if (atm_value != VAM_DEFAULT_VALUE) { - break; - } - } - - /* if we are not able to find any atm values - * in stack then this call was made in error - */ - if (atm_value == NULL) { - return KERN_FAILURE; - } - if (in_content == NULL || in_content_size != sizeof(atm_guard_t)) { - return KERN_INVALID_ARGUMENT; - } - - memcpy(&guard, in_content, sizeof(atm_guard_t)); - task = current_task(); - task_descriptor = task->atm_context; - - kr = atm_value_unregister(atm_value, task_descriptor, guard); - - break; - - case ATM_ACTION_REGISTER: - for (i = 0; i < value_count; i++) { - atm_value = HANDLE_TO_ATM_VALUE(values[i]); - if (atm_value != VAM_DEFAULT_VALUE) { - break; - } - } - /* if we are not able to find any atm values - * in stack then this call was made in error - */ - if (atm_value == NULL) { - return KERN_FAILURE; - } - if (in_content == NULL || in_content_size != sizeof(atm_guard_t)) { - return KERN_INVALID_ARGUMENT; - } - - memcpy(&guard, in_content, sizeof(atm_guard_t)); - task = current_task(); - task_descriptor = task->atm_context; - - kr = atm_value_register(atm_value, task_descriptor, guard); - - break; - - case ATM_ACTION_GETSUBAID: - if (out_content == NULL || *out_content_size != sizeof(mach_atm_subaid_t)) { - return KERN_FAILURE; - } - - next_subaid = get_subaid(); - memcpy(out_content, &next_subaid, sizeof(mach_atm_subaid_t)); - break; - - default: - kr = KERN_INVALID_ARGUMENT; - break; - } - - return kr; -} - - -void -atm_release( - ipc_voucher_attr_manager_t __assert_only manager) -{ - assert(manager == &atm_manager); -} - - -/* - * Routine: atm_value_alloc_init - * Purpose: Allocates an atm value struct and initialize it. - * Returns: atm_value_t: On Success with a sync count on atm_value. - * ATM_VALUE_NULL: On failure. - */ -static atm_value_t -atm_value_alloc_init(aid_t aid) -{ - atm_value_t new_atm_value = ATM_VALUE_NULL; - - new_atm_value = (atm_value_t) zalloc(atm_value_zone); - if (new_atm_value == ATM_VALUE_NULL) { - panic("Ran out of ATM values structure.\n\n"); - } - - new_atm_value->aid = aid; - queue_init(&new_atm_value->listeners); - new_atm_value->sync = 1; - new_atm_value->listener_count = 0; - os_ref_init(&new_atm_value->reference_count, NULL); - lck_mtx_init(&new_atm_value->listener_lock, &atm_lock_grp, &atm_lock_attr); - -#if DEVELOPMENT || DEBUG - lck_mtx_lock(&atm_values_list_lock); - queue_enter(&atm_values_list, new_atm_value, atm_value_t, value_elt); - lck_mtx_unlock(&atm_values_list_lock); -#endif - return new_atm_value; -} - - -/* - * Routine: get_aid - * Purpose: Increment the global aid counter and return it. - * Returns: aid - */ -static aid_t -get_aid() -{ - aid_t aid; - aid = (aid_t)OSIncrementAtomic64((SInt64 *)&global_aid); - return aid; -} - - -/* - * Routine: get_subaid - * Purpose: Increment the global subaid counter and return it. - * Returns: subaid - */ -static mach_atm_subaid_t -get_subaid() -{ - mach_atm_subaid_t next_subaid; - next_subaid = (mach_atm_subaid_t)OSIncrementAtomic64((SInt64 *)&global_subaid); - return next_subaid; -} - - -/* - * Routine: atm_value_dealloc - * Purpose: Drops the reference on atm value and deallocates. - * Deletes all the listeners on deallocation. - * Returns: None. - */ -static void -atm_value_dealloc(atm_value_t atm_value) -{ - if (os_ref_release(&atm_value->reference_count) == 0) { - /* Free up the atm value and also remove all the listeners. */ - atm_listener_delete_all(atm_value); - - lck_mtx_destroy(&atm_value->listener_lock, &atm_lock_grp); - -#if DEVELOPMENT || DEBUG - lck_mtx_lock(&atm_values_list_lock); - queue_remove(&atm_values_list, atm_value, atm_value_t, value_elt); - lck_mtx_unlock(&atm_values_list_lock); -#endif - zfree(atm_value_zone, atm_value); - } -} - - -/* - * Routine: atm_hash_table_init - * Purpose: Initialize the atm aid hash table. - * Returns: None. - */ -static void -atm_hash_table_init() -{ - int i; - - for (i = 0; i < ATM_MAX_HASH_TABLE_SIZE; i++) { - queue_init(&atm_value_hash_table[i].hash_list); - lck_mtx_init(&atm_value_hash_table[i].hash_list_lock, &atm_lock_grp, &atm_lock_attr); - } -} - - -/* - * Routine: atm_value_hash_table_insert - * Purpose: Insert an atm value in the hash table. - * Returns: KERN_SUCCESS on success. - * KERN_NAME_EXISTS if atm value already in the hash table. - */ -static kern_return_t -atm_value_hash_table_insert(atm_value_t new_atm_value) -{ - int hash_index; - atm_value_hash_t hash_list_head; - aid_t aid = new_atm_value->aid; - atm_value_t next; - - hash_index = AID_TO_HASH(aid); - hash_list_head = &atm_value_hash_table[hash_index]; - - /* Lock the atm list and search for the aid. */ - lck_mtx_lock(&hash_list_head->hash_list_lock); - - queue_iterate(&hash_list_head->hash_list, next, atm_value_t, vid_hash_elt) { - if (next->aid == aid) { - /* - * aid found. return error. - */ - lck_mtx_unlock(&hash_list_head->hash_list_lock); - return KERN_NAME_EXISTS; - } - } - - /* Enter the aid in hash and return success. */ - queue_enter(&hash_list_head->hash_list, new_atm_value, atm_value_t, vid_hash_elt); - lck_mtx_unlock(&hash_list_head->hash_list_lock); - return KERN_SUCCESS; -} - - -/* - * Routine: atm_value_hash_table_delete - * Purpose: Delete the atm value from the hash table. - * Returns: None. - */ -static void -atm_value_hash_table_delete(atm_value_t atm_value) -{ - int hash_index; - atm_value_hash_t hash_list_head; - aid_t aid = atm_value->aid; - - hash_index = AID_TO_HASH(aid); - hash_list_head = &atm_value_hash_table[hash_index]; - - lck_mtx_lock(&hash_list_head->hash_list_lock); - queue_remove(&hash_list_head->hash_list, atm_value, atm_value_t, vid_hash_elt); - lck_mtx_unlock(&hash_list_head->hash_list_lock); -} - - -/* - * Routine: get_atm_value_from_aid - * Purpose: Search a given aid in atm value hash table and - * return the atm value stucture. - * Returns: atm value structure if aid found. - * ATM_VALUE_NULL: If aid not found in atm value hash table. - */ -static atm_value_t -get_atm_value_from_aid(aid_t aid) -{ - int hash_index; - atm_value_hash_t hash_list_head; - atm_value_t next; - - hash_index = AID_TO_HASH(aid); - hash_list_head = &atm_value_hash_table[hash_index]; - - /* Lock the atm list and search for the aid. */ - lck_mtx_lock(&hash_list_head->hash_list_lock); - - queue_iterate(&hash_list_head->hash_list, next, atm_value_t, vid_hash_elt) { - if (next->aid == aid) { - /* - * Aid found. Incerease ref count and return - * the atm value structure. - */ - os_ref_retain(&next->reference_count); - lck_mtx_unlock(&hash_list_head->hash_list_lock); - return next; - } - } - lck_mtx_unlock(&hash_list_head->hash_list_lock); - return ATM_VALUE_NULL; -} - - -/* - * Routine: atm_listener_insert - * Purpose: Insert a listener to an atm value. - * Returns: KERN_SUCCESS on success. - * KERN_FAILURE if the task is already present as a listener. - */ -static kern_return_t -atm_listener_insert( - atm_value_t atm_value, - atm_task_descriptor_t task_descriptor, - atm_guard_t guard) -{ - atm_link_object_t new_link_object; - atm_link_object_t next, elem; - int32_t freed_count = 0, dead_but_not_freed = 0, listener_count; - boolean_t element_found = FALSE; - queue_head_t free_listeners; - - new_link_object = (atm_link_object_t) zalloc(atm_link_objects_zone); - new_link_object->descriptor = task_descriptor; - os_ref_init(&new_link_object->reference_count, NULL); - new_link_object->guard = guard; - - /* Get a reference on the task descriptor */ - os_ref_retain(&task_descriptor->reference_count); - queue_init(&free_listeners); - listener_count = atm_value->listener_count; - - /* Check if the task is already on the listener list */ - lck_mtx_lock(&atm_value->listener_lock); - - next = (atm_link_object_t)(void *) queue_first(&atm_value->listeners); - while (!queue_end(&atm_value->listeners, (queue_entry_t)next)) { - elem = next; - next = (atm_link_object_t)(void *) queue_next(&next->listeners_element); - - /* Check for dead tasks */ - if (elem->descriptor->flags == ATM_TASK_DEAD) { - if ((dead_but_not_freed > ATM_LIST_DEAD_MAX) || elem->guard == 0) { - queue_remove(&atm_value->listeners, elem, atm_link_object_t, listeners_element); - queue_enter(&free_listeners, elem, atm_link_object_t, listeners_element); - atm_listener_count_decr_internal(atm_value); - freed_count++; - } else { - dead_but_not_freed++; - } - continue; - } - - if (element_found) { - continue; - } - - if (elem->descriptor == task_descriptor) { - /* Increment reference count on Link object. */ - os_ref_retain(&elem->reference_count); - - /* Replace the guard with the new one, the old guard is anyways on unregister path. */ - elem->guard = guard; - element_found = TRUE; - KERNEL_DEBUG_CONSTANT((ATM_CODE(ATM_GETVALUE_INFO, (ATM_VALUE_REPLACED))) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, guard, 0, 0); - } - } - - if (element_found) { - lck_mtx_unlock(&atm_value->listener_lock); - /* Drop the extra reference on task descriptor taken by this function. */ - atm_task_descriptor_dealloc(task_descriptor); - zfree(atm_link_objects_zone, new_link_object); - } else { - KERNEL_DEBUG_CONSTANT((ATM_CODE(ATM_GETVALUE_INFO, (ATM_VALUE_ADDED))) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, guard, 0, 0); - - queue_enter(&atm_value->listeners, new_link_object, atm_link_object_t, listeners_element); - atm_listener_count_incr_internal(atm_value); - lck_mtx_unlock(&atm_value->listener_lock); - } - - /* Free the link objects */ - while (!queue_empty(&free_listeners)) { - queue_remove_first(&free_listeners, next, atm_link_object_t, listeners_element); - - /* Deallocate the link object */ - atm_link_dealloc(next); - } - - KERNEL_DEBUG_CONSTANT((ATM_CODE(ATM_SUBAID_INFO, (ATM_LINK_LIST_TRIM))) | DBG_FUNC_NONE, - listener_count, freed_count, dead_but_not_freed, VM_KERNEL_ADDRPERM(atm_value), 1); - - return KERN_SUCCESS; -} - - -/* - * Routine: atm_listener_delete_all - * Purpose: Deletes all the listeners for an atm value. - * Returns: None. - */ -static void -atm_listener_delete_all(atm_value_t atm_value) -{ - atm_link_object_t next; - - while (!queue_empty(&atm_value->listeners)) { - queue_remove_first(&atm_value->listeners, next, atm_link_object_t, listeners_element); - - /* Deallocate the link object */ - atm_link_dealloc(next); - } -} - - -/* - * Routine: atm_listener_delete - * Purpose: Deletes a listerner for an atm value. - * Returns: KERN_SUCCESS on successful unregister. - * KERN_INVALID_VALUE on finding a different guard. - * KERN_FAILURE on failure. - */ -static kern_return_t -atm_listener_delete( - atm_value_t atm_value, - atm_task_descriptor_t task_descriptor, - atm_guard_t guard) -{ - queue_head_t free_listeners; - atm_link_object_t next, elem; - kern_return_t kr = KERN_FAILURE; - - queue_init(&free_listeners); - - lck_mtx_lock(&atm_value->listener_lock); - - next = (atm_link_object_t)(void *) queue_first(&atm_value->listeners); - while (!queue_end(&atm_value->listeners, (queue_entry_t)next)) { - elem = next; - next = (atm_link_object_t)(void *) queue_next(&next->listeners_element); - - if (elem->descriptor == task_descriptor) { - if (elem->guard == guard) { - KERNEL_DEBUG_CONSTANT((ATM_CODE(ATM_UNREGISTER_INFO, - (ATM_VALUE_UNREGISTERED))) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, guard, os_ref_get_count(&elem->reference_count), 0); - elem->guard = 0; - kr = KERN_SUCCESS; - } else { - KERNEL_DEBUG_CONSTANT((ATM_CODE(ATM_UNREGISTER_INFO, - (ATM_VALUE_DIFF_MAILBOX))) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, elem->guard, os_ref_get_count(&elem->reference_count), 0); - kr = KERN_INVALID_VALUE; - } - if (os_ref_release(&elem->reference_count) == 0) { - queue_remove(&atm_value->listeners, elem, atm_link_object_t, listeners_element); - queue_enter(&free_listeners, elem, atm_link_object_t, listeners_element); - atm_listener_count_decr_internal(atm_value); - } - break; - } - } - lck_mtx_unlock(&atm_value->listener_lock); - - while (!queue_empty(&free_listeners)) { - queue_remove_first(&free_listeners, next, atm_link_object_t, listeners_element); - - /* Deallocate the link object */ - atm_link_dealloc(next); - } - return kr; -} - - -/* - * Routine: atm_descriptor_alloc_init - * Purpose: Allocate an atm task descriptor and initialize it and takes a reference. - * Returns: atm task descriptor: On success. - * NULL: on error. - */ -static atm_task_descriptor_t -atm_task_descriptor_alloc_init( - mach_port_t trace_buffer, - uint64_t buffer_size, - task_t __assert_only task) -{ - atm_task_descriptor_t new_task_descriptor; - - new_task_descriptor = (atm_task_descriptor_t) zalloc(atm_descriptors_zone); - - new_task_descriptor->trace_buffer = trace_buffer; - new_task_descriptor->trace_buffer_size = buffer_size; - os_ref_init(&new_task_descriptor->reference_count, NULL); - new_task_descriptor->flags = 0; - lck_mtx_init(&new_task_descriptor->lock, &atm_lock_grp, &atm_lock_attr); - -#if DEVELOPMENT || DEBUG - new_task_descriptor->task = task; - lck_mtx_lock(&atm_descriptors_list_lock); - queue_enter(&atm_descriptors_list, new_task_descriptor, atm_task_descriptor_t, descriptor_elt); - lck_mtx_unlock(&atm_descriptors_list_lock); -#endif - - return new_task_descriptor; -} - - -/* - * Routine: atm_task_descriptor_dealloc - * Prupose: Drops the reference on atm descriptor. - * Returns: None. - */ -static void -atm_task_descriptor_dealloc(atm_task_descriptor_t task_descriptor) -{ - if (os_ref_release(&task_descriptor->reference_count) == 0) { -#if DEVELOPMENT || DEBUG - lck_mtx_lock(&atm_descriptors_list_lock); - queue_remove(&atm_descriptors_list, task_descriptor, atm_task_descriptor_t, descriptor_elt); - lck_mtx_unlock(&atm_descriptors_list_lock); -#endif - /* release the send right for the named memory entry */ - ipc_port_release_send(task_descriptor->trace_buffer); - lck_mtx_destroy(&task_descriptor->lock, &atm_lock_grp); - zfree(atm_descriptors_zone, task_descriptor); - } -} - - -/* - * Routine: atm_link_dealloc - * Prupose: Drops the reference on link object. + * Routine: atm_reset + * Purpose: re-initialize the atm subsystem (e.g. for userspace reboot) * Returns: None. */ -static void -atm_link_dealloc(atm_link_object_t link_object) -{ - /* Drop the reference on atm task descriptor. */ - atm_task_descriptor_dealloc(link_object->descriptor); - zfree(atm_link_objects_zone, link_object); -} - - -/* - * Routine: atm_register_trace_memory - * Purpose: Registers trace memory for a task. - * Returns: KERN_SUCCESS: on Success. - * KERN_FAILURE: on Error. - */ -kern_return_t -atm_register_trace_memory( - task_t task, - uint64_t trace_buffer_address, - uint64_t buffer_size) +void +atm_reset(void) { - atm_task_descriptor_t task_descriptor; - mach_port_t trace_buffer = MACH_PORT_NULL; - kern_return_t kr = KERN_SUCCESS; - - if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE)) { - return KERN_NOT_SUPPORTED; - } - - if (task != current_task()) { - return KERN_INVALID_ARGUMENT; - } - - if (task->atm_context != NULL - || (void *)trace_buffer_address == NULL - || buffer_size == 0 - || (buffer_size & PAGE_MASK) != 0 - || buffer_size > MAX_TRACE_BUFFER_SIZE) { - return KERN_INVALID_ARGUMENT; - } - - vm_map_t map = current_map(); - memory_object_size_t mo_size = (memory_object_size_t) buffer_size; - kr = mach_make_memory_entry_64(map, - &mo_size, - (mach_vm_offset_t)trace_buffer_address, - VM_PROT_READ, - &trace_buffer, - NULL); - if (kr != KERN_SUCCESS) { - return kr; - } - - task_descriptor = atm_task_descriptor_alloc_init(trace_buffer, buffer_size, task); - if (task_descriptor == ATM_TASK_DESCRIPTOR_NULL) { - ipc_port_release_send(trace_buffer); - return KERN_NO_SPACE; - } - - task_lock(task); - if (task->atm_context == NULL) { - task->atm_context = task_descriptor; - kr = KERN_SUCCESS; - } else { - kr = KERN_FAILURE; - } - task_unlock(task); - - if (kr != KERN_SUCCESS) { - /* undo the mapping and allocations since we failed to hook descriptor to task */ - atm_task_descriptor_dealloc(task_descriptor); - } - return KERN_SUCCESS; + atm_init(); + commpage_update_atm_diagnostic_config(atm_diagnostic_config); } /* @@ -1095,7 +80,6 @@ atm_register_trace_memory( * the new value. * Returns: Error if ATM is disabled. */ -extern uint32_t atm_diagnostic_config; /* Proxied to commpage for fast user access */ kern_return_t atm_set_diagnostic_config(uint32_t diagnostic_config) { @@ -1109,7 +93,6 @@ atm_set_diagnostic_config(uint32_t diagnostic_config) return KERN_SUCCESS; } - /* * Routine: atm_get_diagnostic_config * Purpose: Get global atm_diagnostic_config. @@ -1120,63 +103,3 @@ atm_get_diagnostic_config(void) { return atm_diagnostic_config; } - - -/* - * Routine: atm_value_unregister - * Purpose: Unregisters a process from an activity id. - * Returns: KERN_SUCCESS on successful unregister. - * KERN_INVALID_VALUE on finding a diff guard. - * KERN_FAILURE on failure. - */ -static kern_return_t -atm_value_unregister( - atm_value_t atm_value, - atm_task_descriptor_t task_descriptor, - atm_guard_t guard) -{ - kern_return_t kr; - - if (task_descriptor == ATM_TASK_DESCRIPTOR_NULL) { - return KERN_INVALID_TASK; - } - - kr = atm_listener_delete(atm_value, task_descriptor, guard); - return kr; -} - - -/* - * Routine: atm_value_register - * Purpose: Registers a process for an activity id. - * Returns: KERN_SUCCESS on successful register. - * KERN_INVALID_TASK on finding a null task atm context. - * KERN_FAILURE on failure. - */ -static kern_return_t -atm_value_register( - atm_value_t atm_value, - atm_task_descriptor_t task_descriptor, - atm_guard_t guard) -{ - kern_return_t kr; - - if (task_descriptor == ATM_TASK_DESCRIPTOR_NULL) { - return KERN_INVALID_TASK; - } - - kr = atm_listener_insert(atm_value, task_descriptor, guard); - return kr; -} - - -void -atm_task_descriptor_destroy(atm_task_descriptor_t task_descriptor) -{ - /* Mark the task dead in the task descriptor to make task descriptor eligible for cleanup. */ - lck_mtx_lock(&task_descriptor->lock); - task_descriptor->flags = ATM_TASK_DEAD; - lck_mtx_unlock(&task_descriptor->lock); - - atm_task_descriptor_dealloc(task_descriptor); -} diff --git a/osfmk/atm/atm_internal.h b/osfmk/atm/atm_internal.h index a8a4aace6..7a371e0e0 100644 --- a/osfmk/atm/atm_internal.h +++ b/osfmk/atm/atm_internal.h @@ -34,88 +34,14 @@ #include #include -#ifdef MACH_KERNEL_PRIVATE - -#include -#include -#include -#include - -/* Flags for atm task descriptor */ -#define ATM_TASK_DEAD 0x1 - -/* Default value for Voucher Attribute Manager for ATM */ -#define VAM_DEFAULT_VALUE NULL - -typedef mach_voucher_attr_value_handle_t atm_voucher_id_t; - -struct atm_task_descriptor { - decl_lck_mtx_data(, lock); /* lock to protect reference count */ - mach_port_t trace_buffer; /* named memory entry registered by user */ - uint64_t trace_buffer_size; /* size of the trace_buffer registered */ - os_refcnt_t reference_count; - uint8_t flags; -#if DEVELOPMENT || DEBUG - task_t task; /* task pointer for debugging purposes */ - queue_chain_t descriptor_elt; /* global chain of all descriptors */ -#endif -}; - -typedef struct atm_task_descriptor *atm_task_descriptor_t; -#define ATM_TASK_DESCRIPTOR_NULL NULL - -struct atm_value { - aid_t aid; /* activity id */ - queue_head_t listeners; /* List of listeners who register for this activity */ - decl_lck_mtx_data(, listener_lock); /* Lock to protect listener list */ - queue_chain_t vid_hash_elt; /* Next hash element in the global hash table */ -#if DEVELOPMENT || DEBUG - queue_chain_t value_elt; /* global chain of all values */ -#endif - uint32_t sync; /* Made ref count given to voucher sub system. */ - - uint32_t listener_count; - os_refcnt_t reference_count; /* use count on the atm value, 1 taken by the global hash table */ -}; - -#define atm_listener_count_incr_internal(elem) \ - (os_atomic_inc(&(elem)->listener_count, relaxed)) - -#define atm_listener_count_decr_internal(elem) \ - (os_atomic_dec(&(elem)->listener_count, relaxed)) - -#define atm_sync_reference_internal(elem) \ - (os_atomic_inc(&(elem)->sync, relaxed)) - -typedef struct atm_value *atm_value_t; -#define ATM_VALUE_NULL NULL - -/* Flags for atm link objects */ -#define ATM_LINK_REMOVE 0x1 - -struct atm_link_object { - atm_task_descriptor_t descriptor; - queue_chain_t listeners_element; /* Head is atm_value->listeners. */ - atm_guard_t guard; /* Guard registered by the user for an activity. */ - os_refcnt_t reference_count; -}; - -typedef struct atm_link_object *atm_link_object_t; - -struct atm_value_hash { - queue_head_t hash_list; - decl_lck_mtx_data(, hash_list_lock); /* lock to protect bucket list. */ -}; - -typedef struct atm_value_hash *atm_value_hash_t; - +#ifdef MACH_KERNEL_PRIVATE void atm_init(void); -void atm_task_descriptor_destroy(atm_task_descriptor_t task_descriptor); -kern_return_t atm_register_trace_memory(task_t task, uint64_t trace_buffer_address, uint64_t buffer_size); -kern_return_t atm_send_proc_inspect_notification(task_t task, int32_t traced_pid, uint64_t traced_uniqueid); - #endif /* MACH_KERNEL_PRIVATE */ +#ifdef XNU_KERNEL_PRIVATE +void atm_reset(void); +#endif /* XNU_KERNEL_PRIVATE */ + kern_return_t atm_set_diagnostic_config(uint32_t); uint32_t atm_get_diagnostic_config(void); diff --git a/osfmk/atm/atm_types.defs b/osfmk/atm/atm_types.defs index e2654eb9b..45b2784e6 100644 --- a/osfmk/atm/atm_types.defs +++ b/osfmk/atm/atm_types.defs @@ -32,17 +32,16 @@ #ifndef _ATM_ATM_TYPES_DEFS_ #define _ATM_ATM_TYPES_DEFS_ - #include -type aid_t = uint64_t; -type atm_aid_t = uint64_t; +type aid_t = uint64_t; +type atm_aid_t = uint64_t; type mach_atm_subaid_t = uint64_t; type atm_memory_descriptor_array_t = array[*:512] of mach_port_t; type atm_memory_size_array_t = array[*:512] of uint64_t; -import ; +import ; #endif /* _ATM_ATM_TYPES_DEFS_ */ /* vim: set ft=c : */ diff --git a/osfmk/atm/atm_types.h b/osfmk/atm/atm_types.h index 6008a71ab..b189d6237 100644 --- a/osfmk/atm/atm_types.h +++ b/osfmk/atm/atm_types.h @@ -32,6 +32,8 @@ #include #include +/* Everything here is Deprecated. will be removed soon */ + #define MACH_VOUCHER_ATTR_ATM_NULL ((mach_voucher_attr_recipe_command_t)501) #define MACH_VOUCHER_ATTR_ATM_CREATE ((mach_voucher_attr_recipe_command_t)510) #define MACH_VOUCHER_ATTR_ATM_REGISTER ((mach_voucher_attr_recipe_command_t)511) @@ -47,8 +49,6 @@ typedef uint32_t atm_action_t; typedef uint64_t atm_guard_t; typedef uint64_t aid_t; - -/* Deprecated. will be removed soon */ typedef uint64_t subaid_t; typedef uint64_t mailbox_offset_t; #define SUB_AID_MAX (UINT64_MAX) @@ -58,12 +58,11 @@ typedef uint32_t atm_subaid32_t; typedef uint64_t mach_atm_subaid_t; /* Used for mach based apis. */ typedef uint64_t atm_mailbox_offset_t; - typedef mach_port_t atm_memory_descriptor_t; typedef atm_memory_descriptor_t *atm_memory_descriptor_array_t; typedef uint64_t *atm_memory_size_array_t; -#define ATM_SUBAID32_MAX (UINT32_MAX) +#define ATM_SUBAID32_MAX (UINT32_MAX) #define ATM_TRACE_DISABLE (0x0100) /* OS_TRACE_MODE_DISABLE - Do not initialize the new logging*/ #define ATM_TRACE_OFF (0x0400) /* OS_TRACE_MODE_OFF - Don't drop log messages to new log buffers */ #define ATM_ENABLE_LEGACY_LOGGING (0x20000000) /* OS_TRACE_SYSTEMMODE_LEGACY_LOGGING - Enable legacy logging */ diff --git a/osfmk/bank/Makefile b/osfmk/bank/Makefile index 27dee2fdb..dd6ae3c89 100644 --- a/osfmk/bank/Makefile +++ b/osfmk/bank/Makefile @@ -52,7 +52,7 @@ ${MIGINCLUDES} : ${MIG_TYPES} ${MIG_UUHDRS} : \ %.h : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)$(MIG) $(MIGFLAGS) \ -server /dev/null \ -user /dev/null \ @@ -61,7 +61,7 @@ ${MIG_UUHDRS} : \ ${MIG_USHDRS} : \ %_server.h : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)$(MIG) $(MIGFLAGS) \ -server /dev/null \ -user /dev/null \ @@ -97,7 +97,7 @@ ${COMP_FILES} : ${MIG_TYPES} ${MIG_KUSRC} : \ %_user.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ -user $*_user.c \ -header $*.h \ @@ -107,7 +107,7 @@ ${MIG_KUSRC} : \ ${MIG_KSSRC}: \ %_server.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ -user /dev/null \ -header /dev/null \ diff --git a/osfmk/bank/bank.c b/osfmk/bank/bank.c index 5b4ba01d1..c847b2b67 100644 --- a/osfmk/bank/bank.c +++ b/osfmk/bank/bank.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2019 Apple Inc. All rights reserved. + * Copyright (c) 2012-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include @@ -46,7 +45,11 @@ #include #include -static zone_t bank_task_zone, bank_account_zone; +static ZONE_DECLARE(bank_task_zone, "bank_task", + sizeof(struct bank_task), ZC_NONE); +static ZONE_DECLARE(bank_account_zone, "bank_account", + sizeof(struct bank_account), ZC_NONE); + #define MAX_BANK_TASK (CONFIG_TASK_MAX) #define MAX_BANK_ACCOUNT (CONFIG_TASK_MAX + CONFIG_THREAD_MAX) @@ -64,11 +67,6 @@ extern struct persona *system_persona, *proxy_system_persona; uint32_t persona_get_id(struct persona *persona); extern int unique_persona; -#if DEVELOPMENT || DEBUG -queue_head_t bank_tasks_list; -queue_head_t bank_accounts_list; -#endif - static ledger_template_t bank_ledger_template = NULL; struct _bank_ledger_indices bank_ledgers = { .cpu_time = -1, .energy = -1 }; @@ -89,14 +87,14 @@ static struct thread_group *bank_get_bank_task_thread_group(bank_task_t bank_tas static struct thread_group *bank_get_bank_account_thread_group(bank_account_t bank_account __unused); static boolean_t bank_verify_persona_id(uint32_t persona_id); -static lck_spin_t g_bank_task_lock_data; /* lock to protect task->bank_context transition */ +/* lock to protect task->bank_context transition */ +static LCK_GRP_DECLARE(bank_lock_grp, "bank_lock"); +static LCK_ATTR_DECLARE(bank_lock_attr, 0, 0); +static LCK_SPIN_DECLARE_ATTR(g_bank_task_lock_data, &bank_lock_grp, &bank_lock_attr); -static uint32_t disable_persona_propogate_check = 0; +static TUNABLE(bool, disable_persona_propagate_check, + "disable_persona_propagate_check", false); -#define global_bank_task_lock_init() \ - lck_spin_init(&g_bank_task_lock_data, &bank_lock_grp, &bank_lock_attr) -#define global_bank_task_lock_destroy() \ - lck_spin_destroy(&g_bank_task_lock_data, &bank_lock_grp) #define global_bank_task_lock() \ lck_spin_lock_grp(&g_bank_task_lock_data, &bank_lock_grp) #define global_bank_task_lock_try() \ @@ -105,7 +103,8 @@ static uint32_t disable_persona_propogate_check = 0; lck_spin_unlock(&g_bank_task_lock_data) extern uint64_t proc_uniqueid(void *p); -extern int32_t proc_pid(void *p); +struct proc; +extern int32_t proc_pid(struct proc *p); extern int32_t proc_pidversion(void *p); extern uint32_t proc_persona_id(void *p); extern uint32_t proc_getuid(void *p); @@ -175,21 +174,13 @@ const struct ipc_voucher_attr_manager bank_manager = { #if DEVELOPMENT || DEBUG -decl_lck_mtx_data(, bank_tasks_list_lock); -decl_lck_mtx_data(, bank_accounts_list_lock); - -lck_grp_t bank_dev_lock_grp; -lck_attr_t bank_dev_lock_attr; -lck_grp_attr_t bank_dev_lock_grp_attr; +LCK_GRP_DECLARE(bank_dev_lock_grp, "bank_dev_lock"); +LCK_MTX_DECLARE(bank_tasks_list_lock, &bank_dev_lock_grp); +LCK_MTX_DECLARE(bank_accounts_list_lock, &bank_dev_lock_grp); +queue_head_t bank_tasks_list = QUEUE_HEAD_INITIALIZER(bank_tasks_list); +queue_head_t bank_accounts_list = QUEUE_HEAD_INITIALIZER(bank_accounts_list); #endif -/* - * Lock group attributes for bank sub system. - */ -lck_grp_t bank_lock_grp; -lck_attr_t bank_lock_attr; -lck_grp_attr_t bank_lock_grp_attr; - /* * Routine: bank_init * Purpose: Initialize the BANK subsystem. @@ -199,38 +190,9 @@ void bank_init() { kern_return_t kr = KERN_SUCCESS; - /* setup zones for bank_task and bank_account objects */ - bank_task_zone = zinit(sizeof(struct bank_task), - MAX_BANK_TASK * sizeof(struct bank_task), - sizeof(struct bank_task), - "bank_task"); - - bank_account_zone = zinit(sizeof(struct bank_account), - MAX_BANK_ACCOUNT * sizeof(struct bank_account), - sizeof(struct bank_account), - "bank_account"); init_bank_ledgers(); - /* Initialize bank lock group and lock attributes. */ - lck_grp_attr_setdefault(&bank_lock_grp_attr); - lck_grp_init(&bank_lock_grp, "bank_lock", &bank_lock_grp_attr); - lck_attr_setdefault(&bank_lock_attr); - global_bank_task_lock_init(); - -#if DEVELOPMENT || DEBUG - /* Initialize global bank development lock group and lock attributes. */ - lck_grp_attr_setdefault(&bank_dev_lock_grp_attr); - lck_grp_init(&bank_dev_lock_grp, "bank_dev_lock", &bank_dev_lock_grp_attr); - lck_attr_setdefault(&bank_dev_lock_attr); - - lck_mtx_init(&bank_tasks_list_lock, &bank_dev_lock_grp, &bank_dev_lock_attr); - lck_mtx_init(&bank_accounts_list_lock, &bank_dev_lock_grp, &bank_dev_lock_attr); - - queue_init(&bank_tasks_list); - queue_init(&bank_accounts_list); -#endif - /* Register the bank manager with the Vouchers sub system. */ kr = ipc_register_well_known_mach_voucher_attr_manager( &bank_manager, @@ -242,16 +204,7 @@ bank_init() } -#if DEVELOPMENT || DEBUG - uint32_t disable_persona_propogate_check_bootarg = 0; - if (PE_parse_boot_argn("disable_persona_propogate_check", &disable_persona_propogate_check_bootarg, - sizeof(disable_persona_propogate_check_bootarg))) { - disable_persona_propogate_check = (disable_persona_propogate_check_bootarg != 0) ? 1 : 0; - } -#endif - kprintf("BANK subsystem is initialized\n"); - return; } @@ -950,6 +903,9 @@ bank_task_alloc_init(task_t task) new_bank_task->bt_persona_id = proc_persona_id(task->bsd_info); new_bank_task->bt_uid = proc_getuid(task->bsd_info); new_bank_task->bt_gid = proc_getgid(task->bsd_info); +#if CONFIG_THREAD_GROUPS + new_bank_task->bt_thread_group = thread_group_retain(task_coalition_get_thread_group(task)); +#endif proc_getexecutableuuid(task->bsd_info, new_bank_task->bt_macho_uuid, sizeof(new_bank_task->bt_macho_uuid)); #if DEVELOPMENT || DEBUG @@ -977,8 +933,8 @@ bank_task_is_propagate_entitled(task_t t) return FALSE; } - /* If it's a platform binary, allow propogation by default */ - if (disable_persona_propogate_check || (t->t_flags & TF_PLATFORM)) { + /* If it's a platform binary, allow propagation by default */ + if (disable_persona_propagate_check || (t->t_flags & TF_PLATFORM)) { return TRUE; } @@ -1040,6 +996,9 @@ bank_account_alloc_init( new_bank_account->ba_holder = bank_holder; new_bank_account->ba_secureoriginator = bank_secureoriginator; new_bank_account->ba_proximateprocess = bank_proximateprocess; +#if CONFIG_THREAD_GROUPS + new_bank_account->ba_thread_group = thread_group; +#endif new_bank_account->ba_so_persona_id = persona_id; /* Iterate through accounts need to pay list to find the existing entry */ @@ -1084,6 +1043,10 @@ bank_account_alloc_init( bank_task_reference(bank_merchant); bank_task_reference(bank_secureoriginator); bank_task_reference(bank_proximateprocess); +#if CONFIG_THREAD_GROUPS + assert(new_bank_account->ba_thread_group != NULL); + thread_group_retain(new_bank_account->ba_thread_group); +#endif #if DEVELOPMENT || DEBUG new_bank_account->ba_task = NULL; @@ -1169,6 +1132,9 @@ bank_task_dealloc( lck_mtx_destroy(&bank_task->bt_acc_to_pay_lock, &bank_lock_grp); lck_mtx_destroy(&bank_task->bt_acc_to_charge_lock, &bank_lock_grp); +#if CONFIG_THREAD_GROUPS + thread_group_release(bank_task->bt_thread_group); +#endif #if DEVELOPMENT || DEBUG lck_mtx_lock(&bank_tasks_list_lock); @@ -1246,6 +1212,10 @@ bank_account_dealloc_with_sync( bank_task_dealloc(bank_merchant, 1); bank_task_dealloc(bank_secureoriginator, 1); bank_task_dealloc(bank_proximateprocess, 1); +#if CONFIG_THREAD_GROUPS + assert(bank_account->ba_thread_group != NULL); + thread_group_release(bank_account->ba_thread_group); +#endif #if DEVELOPMENT || DEBUG lck_mtx_lock(&bank_accounts_list_lock); @@ -1706,6 +1676,11 @@ bank_get_bank_task_thread_group(bank_task_t bank_task __unused) { struct thread_group *banktg = NULL; +#if CONFIG_THREAD_GROUPS + if (bank_task != BANK_TASK_NULL) { + banktg = bank_task->bt_thread_group; + } +#endif /* CONFIG_THREAD_GROUPS */ return banktg; } @@ -1719,6 +1694,11 @@ bank_get_bank_account_thread_group(bank_account_t bank_account __unused) { struct thread_group *banktg = NULL; +#if CONFIG_THREAD_GROUPS + if (bank_account != BANK_ACCOUNT_NULL) { + banktg = bank_account->ba_thread_group; + } +#endif /* CONFIG_THREAD_GROUPS */ return banktg; } diff --git a/osfmk/bank/bank_internal.h b/osfmk/bank/bank_internal.h index f78a64dda..8451c1c6a 100644 --- a/osfmk/bank/bank_internal.h +++ b/osfmk/bank/bank_internal.h @@ -71,7 +71,10 @@ struct bank_task { queue_head_t bt_accounts_to_charge; /* List of accounts I did work and need to charge */ decl_lck_mtx_data(, bt_acc_to_pay_lock); /* Lock to protect accounts to pay list */ decl_lck_mtx_data(, bt_acc_to_charge_lock); /* Lock to protect accounts to charge list */ - uint8_t bt_hasentitlement; /* If the secure persona entitlement is set on the task */ + boolean_t bt_hasentitlement; /* If the secure persona entitlement is set on the task */ +#if CONFIG_THREAD_GROUPS + struct thread_group * bt_thread_group; /* Task's home thread group pointer */ +#endif #if DEVELOPMENT || DEBUG queue_chain_t bt_global_elt; /* Element on the global bank task chain */ #endif @@ -126,6 +129,9 @@ struct bank_account { bank_task_t ba_proximateprocess; /* Process who propagated the voucher to us */ queue_chain_t ba_next_acc_to_pay; /* Next account I need to pay to */ queue_chain_t ba_next_acc_to_charge; /* Next account I need to charge to */ +#if CONFIG_THREAD_GROUPS + struct thread_group * ba_thread_group; /* thread group to be adopted */ +#endif #if DEVELOPMENT || DEBUG queue_chain_t ba_global_elt; /* Element on the global account chain */ #endif diff --git a/osfmk/conf/Makefile b/osfmk/conf/Makefile index 05c4b79cf..51eddb889 100644 --- a/osfmk/conf/Makefile +++ b/osfmk/conf/Makefile @@ -23,7 +23,7 @@ endif $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile: $(SRCROOT)/SETUP/config/doconf $(OBJROOT)/SETUP/config $(DOCONFDEPS) $(_v)$(MKDIR) $(TARGET)/$(CURRENT_KERNEL_CONFIG) - $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) + $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -platform $(PLATFORM) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) do_all: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile $(_v)${MAKE} \ diff --git a/osfmk/conf/Makefile.arm b/osfmk/conf/Makefile.arm index 17d6b3a8b..744268d49 100644 --- a/osfmk/conf/Makefile.arm +++ b/osfmk/conf/Makefile.arm @@ -5,6 +5,19 @@ # Files that must go in the __HIB segment: HIB_FILES= +vfp_state_test.o_CFLAGS_ADD += -mno-implicit-float + +# +# Diagnostic opt-outs. We need to make this list empty. +# +# DO NOT ADD MORE HERE. +# +# -Wno-implicit-int-conversion +vm_fault.o_CFLAGS_ADD += -Wno-implicit-int-conversion +vm_map.o_CFLAGS_ADD += -Wno-implicit-int-conversion +# -Wno-shorten-64-to-32 +vm_fault.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vm_map.o_CFLAGS_ADD += -Wno-shorten-64-to-32 ###################################################################### #END Machine dependent Makefile fragment for arm ###################################################################### diff --git a/osfmk/conf/Makefile.arm64 b/osfmk/conf/Makefile.arm64 index 2c3b7ec5c..fab485b60 100644 --- a/osfmk/conf/Makefile.arm64 +++ b/osfmk/conf/Makefile.arm64 @@ -5,9 +5,45 @@ CWARNFLAGS = $(CWARNFLAGS_STD) -Wshorten-64-to-32 # Files that must go in the __HIB segment: -HIB_FILES= +UNCONFIGURED_HIB_FILES= \ + hibernate_restore.o \ + bcopy.o \ + bzero.o \ + cc_clear.o \ + ccdigest_init.o \ + ccdigest_update.o \ + ccdigest_final_64be.o \ + cchmac.o \ + cchmac_init.o \ + cchmac_update.o \ + cchmac_final.o \ + ccsha256_K.o \ + ccsha256_initial_state.o \ + sha256_compress_arm64.o \ + memset_s.o + + +HIB_FILES=$(filter $(UNCONFIGURED_HIB_FILES),$(OBJS)) + +define ADD_HIB_CFLAGS +# Unconfigured __HIB files must be Mach-O for "setsegname" +$(1)_CFLAGS_ADD += $(CFLAGS_NOLTO_FLAG) +# KASAN must be disabled for unconfigured __HIB files +# because the kasan runtime isn't available during hibernation resume +$(1)_CFLAGS_ADD += -fno-sanitize=address -UKASAN +# Stack protector and stack check must be disabled because the stack protector runtime isn't available +$(1)_CFLAGS_ADD += -fno-stack-protector -fno-stack-check +endef + +$(foreach FILE,$(UNCONFIGURED_HIB_FILES),$(eval $(call ADD_HIB_CFLAGS,$(FILE)))) + +# hibernate_restore.o uses function pointers but the signing keys aren't set up yet, +# so compile this file with no ptrauth +hibernate_restore.o_CFLAGS_ADD += -fno-ptrauth-calls lz4.o_CFLAGS_ADD += -fbuiltin -O3 +vfp_state_test.o_CFLAGS_ADD += -mno-implicit-float + ###################################################################### #END Machine dependent Makefile fragment for arm64 diff --git a/osfmk/conf/Makefile.template b/osfmk/conf/Makefile.template index 2db9fb566..304d4ae7b 100644 --- a/osfmk/conf/Makefile.template +++ b/osfmk/conf/Makefile.template @@ -19,63 +19,6 @@ include $(MakeInc_def) CFLAGS+= -include meta_features.h -DMACH_KERNEL_PRIVATE -DMACH_KERNEL SFLAGS+= -include meta_features.h -# Objects that don't want -Wcast-align warning (8474835) -OBJS_NO_CAST_ALIGN = \ - atm_notification_user.o \ - model_dep.o \ - video_console.o \ - kdp_udp.o \ - kdp_machdep.o \ - host.o \ - processor.o \ - sched_prim.o \ - task.o \ - thread.o \ - threadinfo.o \ - gssd_mach.o \ - UNDRequest.o \ - panic_dialog.o \ - bsd_i386.o \ - commpage.o \ - cpu_threads.o \ - cpuid.o \ - locks_i386.o \ - locks_i386_opt.o \ - machine_task.o \ - mp_desc.o \ - pcb.o \ - pcb_native.o \ - kdp_x86_common.o \ - startup64.o \ - affinity.o \ - sched_grrr.o \ - sched_proto.o \ - stack.o \ - task_policy.o \ - wait_queue.o \ - bsd_kern.o \ - pmc.o \ - status.o \ - machine_routines.o \ - loose_ends.o \ - sleh.o \ - ccdigest_final_64be.o \ - ccdigest_init.o \ - ccdigest_update.o \ - cchmac_final.o \ - cchmac_init.o \ - ccsha1.o \ - ipc_object.o \ - ipc_kmsg.o \ - ipc_right.o - -# Objects that don't want -Wsign-compare warning (15294427) -OBJS_NO_SIGN_COMPARE = \ - atm_notification_user.o - -$(foreach file,$(OBJS_NO_CAST_ALIGN),$(eval $(call add_perfile_cflags,$(file),-Wno-cast-align))) -$(foreach file,$(OBJS_NO_SIGN_COMPARE),$(eval $(call add_perfile_cflags,$(file),-Wno-sign-compare))) - ifeq ($(KSANCOV),1) # Don't instrument functions called by the ksancov runtime. SanitizeCoverage does # not support blacklists, so exclude the whole file. @@ -126,6 +69,306 @@ COMP_SUBDIRS = \ %MACHDEP +# +# Diagnostic opt-outs. We need to make this list empty. +# +# DO NOT ADD MORE HERE. +# +vm_tests.o_CFLAGS_ADD += -O0 -g +# -Wno-atomic-implicit-seq-cst +mp.o_CFLAGS_ADD += -Wno-atomic-implicit-seq-cst +pmCPU.o_CFLAGS_ADD += -Wno-atomic-implicit-seq-cst +pmap_pcid.o_CFLAGS_ADD += -Wno-atomic-implicit-seq-cst +xcpm_dvfs.o_CFLAGS_ADD += -Wno-atomic-implicit-seq-cst +xcpm_fi.o_CFLAGS_ADD += -Wno-atomic-implicit-seq-cst +xcpm_idle.o_CFLAGS_ADD += -Wno-atomic-implicit-seq-cst +# -Wno-cast-align +bsd_i386.o_CFLAGS_ADD += -Wno-cast-align +ccdigest_final_64be.o_CFLAGS_ADD += -Wno-cast-align +ccdigest_init.o_CFLAGS_ADD += -Wno-cast-align +ccdigest_update.o_CFLAGS_ADD += -Wno-cast-align +cchmac_final.o_CFLAGS_ADD += -Wno-cast-align +cchmac_init.o_CFLAGS_ADD += -Wno-cast-align +commpage.o_CFLAGS_ADD += -Wno-cast-align +cpu_threads.o_CFLAGS_ADD += -Wno-cast-align +host.o_CFLAGS_ADD += -Wno-cast-align +kdp_machdep.o_CFLAGS_ADD += -Wno-cast-align +kdp_udp.o_CFLAGS_ADD += -Wno-cast-align +kdp_x86_common.o_CFLAGS_ADD += -Wno-cast-align +locks_i386.o_CFLAGS_ADD += -Wno-cast-align +machine_task.o_CFLAGS_ADD += -Wno-cast-align +model_dep.o_CFLAGS_ADD += -Wno-cast-align +mp_desc.o_CFLAGS_ADD += -Wno-cast-align +pcb.o_CFLAGS_ADD += -Wno-cast-align +pcb_native.o_CFLAGS_ADD += -Wno-cast-align +processor.o_CFLAGS_ADD += -Wno-cast-align +status.o_CFLAGS_ADD += -Wno-cast-align +task.o_CFLAGS_ADD += -Wno-cast-align +task_policy.o_CFLAGS_ADD += -Wno-cast-align +video_console.o_CFLAGS_ADD += -Wno-cast-align +# -Wno-implicit-int-conversion +acpi.o_CFLAGS_ADD += -Wno-implicit-int-conversion +commpage.o_CFLAGS_ADD += -Wno-implicit-int-conversion +cpu.o_CFLAGS_ADD += -Wno-implicit-int-conversion +cpu_threads.o_CFLAGS_ADD += -Wno-implicit-int-conversion +kdebug_trigger.o_CFLAGS_ADD += -Wno-implicit-int-conversion +kern_stackshot.o_CFLAGS_ADD += -Wno-implicit-int-conversion +lapic_native.o_CFLAGS_ADD += -Wno-implicit-int-conversion +loose_ends.o_CFLAGS_ADD += -Wno-implicit-int-conversion +lz4.o_CFLAGS_ADD += -Wno-implicit-int-conversion +model_dep.o_CFLAGS_ADD += -Wno-implicit-int-conversion +mp.o_CFLAGS_ADD += -Wno-implicit-int-conversion +mp_desc.o_CFLAGS_ADD += -Wno-implicit-int-conversion +pcb.o_CFLAGS_ADD += -Wno-implicit-int-conversion +pcb_native.o_CFLAGS_ADD += -Wno-implicit-int-conversion +pmap_pcid.o_CFLAGS_ADD += -Wno-implicit-int-conversion +pmap_x86_common.o_CFLAGS_ADD += -Wno-implicit-int-conversion +sched_clutch.o_CFLAGS_ADD += -Wno-implicit-int-conversion +thread_group.o_CFLAGS_ADD += -Wno-implicit-int-conversion +trap.o_CFLAGS_ADD += -Wno-implicit-int-conversion +uat.o_CFLAGS_ADD += -Wno-implicit-int-conversion +video_console.o_CFLAGS_ADD += -Wno-implicit-int-conversion +xcpm_dvfs.o_CFLAGS_ADD += -Wno-implicit-int-conversion +xcpm_ioctl.o_CFLAGS_ADD += -Wno-implicit-int-conversion +zalloc.o_CFLAGS_ADD += -Wno-implicit-int-conversion +# -Wno-shorten-64-to-32 +arm_vm_init.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +backtrace.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +btlog.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +caches.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +callstack.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +clock.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +clock_oldops.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +iokit_rpc.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +ipc_kmsg.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +ipc_pset.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +ipc_right.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +kdp_core.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +kdp_vm.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +kern_cdata.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +kern_stackshot.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +loose_ends.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +mach_msg.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +machine_routines.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +mk_timer.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +model_dep.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +pcb.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +pmap.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +processor_core.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +rtclock.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +status.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +telemetry.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vm_init.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vm_kern.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vm_object.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vm_shared_region_pager.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vm_swapfile_pager.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +vm_user.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +zalloc.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +# -Wno-sign-conversion +Diagnostics.o_CFLAGS_ADD += -Wno-sign-conversion +acpi.o_CFLAGS_ADD += -Wno-sign-conversion +action.o_CFLAGS_ADD += -Wno-sign-conversion +affinity.o_CFLAGS_ADD += -Wno-sign-conversion +alternate_debugger.o_CFLAGS_ADD += -Wno-sign-conversion +arcade.o_CFLAGS_ADD += -Wno-sign-conversion +arm_init.o_CFLAGS_ADD += -Wno-sign-conversion +arm_timer.o_CFLAGS_ADD += -Wno-sign-conversion +arm_vm_init.o_CFLAGS_ADD += -Wno-sign-conversion +ast.o_CFLAGS_ADD += -Wno-sign-conversion +backtrace.o_CFLAGS_ADD += -Wno-sign-conversion +bank.o_CFLAGS_ADD += -Wno-sign-conversion +bitmap_test.o_CFLAGS_ADD += -Wno-sign-conversion +bsd_arm64.o_CFLAGS_ADD += -Wno-sign-conversion +bsd_i386.o_CFLAGS_ADD += -Wno-sign-conversion +bsd_i386_native.o_CFLAGS_ADD += -Wno-sign-conversion +bsd_kern.o_CFLAGS_ADD += -Wno-sign-conversion +bsd_vm.o_CFLAGS_ADD += -Wno-sign-conversion +btlog.o_CFLAGS_ADD += -Wno-sign-conversion +caches.o_CFLAGS_ADD += -Wno-sign-conversion +callstack.o_CFLAGS_ADD += -Wno-sign-conversion +ccdrbg_nisthmac.o_CFLAGS_ADD += -Wno-sign-conversion +cchmac.o_CFLAGS_ADD += -Wno-sign-conversion +cchmac_final.o_CFLAGS_ADD += -Wno-sign-conversion +cchmac_init.o_CFLAGS_ADD += -Wno-sign-conversion +clock.o_CFLAGS_ADD += -Wno-sign-conversion +clock_oldops.o_CFLAGS_ADD += -Wno-sign-conversion +coalition.o_CFLAGS_ADD += -Wno-sign-conversion +commpage.o_CFLAGS_ADD += -Wno-sign-conversion +copyio.o_CFLAGS_ADD += -Wno-sign-conversion +corpse.o_CFLAGS_ADD += -Wno-sign-conversion +cpu.o_CFLAGS_ADD += -Wno-sign-conversion +cpu_common.o_CFLAGS_ADD += -Wno-sign-conversion +cpu_quiesce.o_CFLAGS_ADD += -Wno-sign-conversion +cpu_threads.o_CFLAGS_ADD += -Wno-sign-conversion +cpu_topology.o_CFLAGS_ADD += -Wno-sign-conversion +cpuid.o_CFLAGS_ADD += -Wno-sign-conversion +dbgwrap.o_CFLAGS_ADD += -Wno-sign-conversion +debug.o_CFLAGS_ADD += -Wno-sign-conversion +ecc_logging.o_CFLAGS_ADD += -Wno-sign-conversion +entropy.o_CFLAGS_ADD += -Wno-sign-conversion +exception.o_CFLAGS_ADD += -Wno-sign-conversion +fpu.o_CFLAGS_ADD += -Wno-sign-conversion +gzalloc.o_CFLAGS_ADD += -Wno-sign-conversion +host.o_CFLAGS_ADD += -Wno-sign-conversion +host_notify.o_CFLAGS_ADD += -Wno-sign-conversion +hv.o_CFLAGS_ADD += -Wno-sign-conversion +i386_init.o_CFLAGS_ADD += -Wno-sign-conversion +i386_timer.o_CFLAGS_ADD += -Wno-sign-conversion +i386_vm_init.o_CFLAGS_ADD += -Wno-sign-conversion +iokit_rpc.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_eventlink.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_host.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_importance.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_init.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_kmsg.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_kobject.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_mqueue.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_object.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_port.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_pset.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_right.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_space.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_tt.o_CFLAGS_ADD += -Wno-sign-conversion +ipc_voucher.o_CFLAGS_ADD += -Wno-sign-conversion +kalloc.o_CFLAGS_ADD += -Wno-sign-conversion +kdp.o_CFLAGS_ADD += -Wno-sign-conversion +kdp_core.o_CFLAGS_ADD += -Wno-sign-conversion +kdp_machdep.o_CFLAGS_ADD += -Wno-sign-conversion +kdp_serial.o_CFLAGS_ADD += -Wno-sign-conversion +kdp_vm.o_CFLAGS_ADD += -Wno-sign-conversion +kern_monotonic.o_CFLAGS_ADD += -Wno-sign-conversion +kern_stackshot.o_CFLAGS_ADD += -Wno-sign-conversion +kernel_tests.o_CFLAGS_ADD += -Wno-sign-conversion +kext_alloc.o_CFLAGS_ADD += -Wno-sign-conversion +kpc.o_CFLAGS_ADD += -Wno-sign-conversion +kpc_arm.o_CFLAGS_ADD += -Wno-sign-conversion +kpc_common.o_CFLAGS_ADD += -Wno-sign-conversion +kpc_x86.o_CFLAGS_ADD += -Wno-sign-conversion +kperf.o_CFLAGS_ADD += -Wno-sign-conversion +kperf_kpc.o_CFLAGS_ADD += -Wno-sign-conversion +kperf_mp.o_CFLAGS_ADD += -Wno-sign-conversion +kperf_timer.o_CFLAGS_ADD += -Wno-sign-conversion +kperfbsd.o_CFLAGS_ADD += -Wno-sign-conversion +ktest_emit.o_CFLAGS_ADD += -Wno-sign-conversion +lapic_native.o_CFLAGS_ADD += -Wno-sign-conversion +lazy.o_CFLAGS_ADD += -Wno-sign-conversion +ledger.o_CFLAGS_ADD += -Wno-sign-conversion +locks.o_CFLAGS_ADD += -Wno-sign-conversion +locks_arm.o_CFLAGS_ADD += -Wno-sign-conversion +locks_i386.o_CFLAGS_ADD += -Wno-sign-conversion +locks_i386_opt.o_CFLAGS_ADD += -Wno-sign-conversion +loose_ends.o_CFLAGS_ADD += -Wno-sign-conversion +lowmem_vectors.o_CFLAGS_ADD += -Wno-sign-conversion +ltable.o_CFLAGS_ADD += -Wno-sign-conversion +lz4.o_CFLAGS_ADD += -Wno-sign-conversion +mach_debug.o_CFLAGS_ADD += -Wno-sign-conversion +mach_kernelrpc.o_CFLAGS_ADD += -Wno-sign-conversion +mach_port.o_CFLAGS_ADD += -Wno-sign-conversion +machdep_call.o_CFLAGS_ADD += -Wno-sign-conversion +machine.o_CFLAGS_ADD += -Wno-sign-conversion +machine_check.o_CFLAGS_ADD += -Wno-sign-conversion +machine_remote_time.o_CFLAGS_ADD += -Wno-sign-conversion +machine_routines.o_CFLAGS_ADD += -Wno-sign-conversion +machine_routines_common.o_CFLAGS_ADD += -Wno-sign-conversion +machine_task.o_CFLAGS_ADD += -Wno-sign-conversion +meminfo.o_CFLAGS_ADD += -Wno-sign-conversion +memory_object.o_CFLAGS_ADD += -Wno-sign-conversion +mk_sp.o_CFLAGS_ADD += -Wno-sign-conversion +mk_timer.o_CFLAGS_ADD += -Wno-sign-conversion +model_dep.o_CFLAGS_ADD += -Wno-sign-conversion +monotonic_arm64.o_CFLAGS_ADD += -Wno-sign-conversion +monotonic_x86_64.o_CFLAGS_ADD += -Wno-sign-conversion +mp.o_CFLAGS_ADD += -Wno-sign-conversion +mp_desc.o_CFLAGS_ADD += -Wno-sign-conversion +mp_native.o_CFLAGS_ADD += -Wno-sign-conversion +mpsc_queue.o_CFLAGS_ADD += -Wno-sign-conversion +mtrr.o_CFLAGS_ADD += -Wno-sign-conversion +pal_routines.o_CFLAGS_ADD += -Wno-sign-conversion +pcb.o_CFLAGS_ADD += -Wno-sign-conversion +pcb_native.o_CFLAGS_ADD += -Wno-sign-conversion +pet.o_CFLAGS_ADD += -Wno-sign-conversion +platform_tests.o_CFLAGS_ADD += -Wno-sign-conversion +pmCPU.o_CFLAGS_ADD += -Wno-sign-conversion +pmap.o_CFLAGS_ADD += -Wno-sign-conversion +pmap_common.o_CFLAGS_ADD += -Wno-sign-conversion +pmap_pcid.o_CFLAGS_ADD += -Wno-sign-conversion +pmap_tests.o_CFLAGS_ADD += -Wno-sign-conversion +pmap_x86_common.o_CFLAGS_ADD += -Wno-sign-conversion +printf.o_CFLAGS_ADD += -Wno-sign-conversion +priority.o_CFLAGS_ADD += -Wno-sign-conversion +prng_random.o_CFLAGS_ADD += -Wno-sign-conversion +processor.o_CFLAGS_ADD += -Wno-sign-conversion +remote_time.o_CFLAGS_ADD += -Wno-sign-conversion +rtclock.o_CFLAGS_ADD += -Wno-sign-conversion +sart.o_CFLAGS_ADD += -Wno-sign-conversion +sched_amp.o_CFLAGS_ADD += -Wno-sign-conversion +sched_amp_common.o_CFLAGS_ADD += -Wno-sign-conversion +sched_average.o_CFLAGS_ADD += -Wno-sign-conversion +sched_clutch.o_CFLAGS_ADD += -Wno-sign-conversion +sched_dualq.o_CFLAGS_ADD += -Wno-sign-conversion +sched_multiq.o_CFLAGS_ADD += -Wno-sign-conversion +sched_prim.o_CFLAGS_ADD += -Wno-sign-conversion +sched_traditional.o_CFLAGS_ADD += -Wno-sign-conversion +serial_console.o_CFLAGS_ADD += -Wno-sign-conversion +serial_general.o_CFLAGS_ADD += -Wno-sign-conversion +sfi.o_CFLAGS_ADD += -Wno-sign-conversion +shart.o_CFLAGS_ADD += -Wno-sign-conversion +sleh.o_CFLAGS_ADD += -Wno-sign-conversion +stack.o_CFLAGS_ADD += -Wno-sign-conversion +startup.o_CFLAGS_ADD += -Wno-sign-conversion +status.o_CFLAGS_ADD += -Wno-sign-conversion +status_shared.o_CFLAGS_ADD += -Wno-sign-conversion +subrs.o_CFLAGS_ADD += -Wno-sign-conversion +sync_sema.o_CFLAGS_ADD += -Wno-sign-conversion +syscall_subr.o_CFLAGS_ADD += -Wno-sign-conversion +t8020dart.o_CFLAGS_ADD += -Wno-sign-conversion +task.o_CFLAGS_ADD += -Wno-sign-conversion +task_policy.o_CFLAGS_ADD += -Wno-sign-conversion +telemetry.o_CFLAGS_ADD += -Wno-sign-conversion +test_lock.o_CFLAGS_ADD += -Wno-sign-conversion +test_thread_call.o_CFLAGS_ADD += -Wno-sign-conversion +thread_call.o_CFLAGS_ADD += -Wno-sign-conversion +thread_group.o_CFLAGS_ADD += -Wno-sign-conversion +thread_policy.o_CFLAGS_ADD += -Wno-sign-conversion +timer_call.o_CFLAGS_ADD += -Wno-sign-conversion +trap.o_CFLAGS_ADD += -Wno-sign-conversion +trustcache.o_CFLAGS_ADD += -Wno-sign-conversion +turnstile.o_CFLAGS_ADD += -Wno-sign-conversion +uat.o_CFLAGS_ADD += -Wno-sign-conversion +ucode.o_CFLAGS_ADD += -Wno-sign-conversion +ux_handler.o_CFLAGS_ADD += -Wno-sign-conversion +vfp_state_test.o_CFLAGS_ADD += -Wno-sign-conversion +video_console.o_CFLAGS_ADD += -Wno-sign-conversion +video_scroll.o_CFLAGS_ADD += -Wno-sign-conversion +vm32_user.o_CFLAGS_ADD += -Wno-sign-conversion +vm_compressor.o_CFLAGS_ADD += -Wno-sign-conversion +vm_compressor_algorithms.o_CFLAGS_ADD += -Wno-sign-conversion +vm_compressor_backing_store.o_CFLAGS_ADD += -Wno-sign-conversion +vm_compressor_pager.o_CFLAGS_ADD += -Wno-sign-conversion +vm_fault.o_CFLAGS_ADD += -Wno-sign-conversion +vm_fourk_pager.o_CFLAGS_ADD += -Wno-sign-conversion +vm_kern.o_CFLAGS_ADD += -Wno-sign-conversion +vm_map.o_CFLAGS_ADD += -Wno-sign-conversion +vm_map_store_ll.o_CFLAGS_ADD += -Wno-sign-conversion +vm_map_store_rb.o_CFLAGS_ADD += -Wno-sign-conversion +vm_object.o_CFLAGS_ADD += -Wno-sign-conversion +vm_pageout.o_CFLAGS_ADD += -Wno-sign-conversion +vm_phantom_cache.o_CFLAGS_ADD += -Wno-sign-conversion +vm_purgeable.o_CFLAGS_ADD += -Wno-sign-conversion +vm_resident.o_CFLAGS_ADD += -Wno-sign-conversion +vm_shared_region.o_CFLAGS_ADD += -Wno-sign-conversion +vm_user.o_CFLAGS_ADD += -Wno-sign-conversion +vmx_cpu.o_CFLAGS_ADD += -Wno-sign-conversion +waitq.o_CFLAGS_ADD += -Wno-sign-conversion +work_interval.o_CFLAGS_ADD += -Wno-sign-conversion +xcpm_dvfs.o_CFLAGS_ADD += -Wno-sign-conversion +xcpm_fi.o_CFLAGS_ADD += -Wno-sign-conversion +xcpm_idle.o_CFLAGS_ADD += -Wno-sign-conversion +xcpm_ioctl.o_CFLAGS_ADD += -Wno-sign-conversion +zalloc.o_CFLAGS_ADD += -Wno-sign-conversion +zcache.o_CFLAGS_ADD += -Wno-sign-conversion + # Rebuild if per-file overrides change ${OBJS}: $(firstword $(MAKEFILE_LIST)) @@ -146,7 +389,7 @@ $(COMPONENT).filelist: $(OBJS) $(SEG_HACK) -n __HIB -o $${hib_file}__ $${hib_file} || exit 1; \ mv $${hib_file}__ $${hib_file} || exit 1; \ done - $(call makelog,$(ColorL)LDFILELIST$(Color0) $(ColorLF)$(COMPONENT)$(Color0)) + @$(LOG_LDFILELIST) "$(COMPONENT)" $(_v)for obj in ${OBJS}; do \ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \ done > $(COMPONENT).filelist @@ -170,8 +413,8 @@ endif -include genassym.d genassym.o: .CFLAGS $(firstword $(MAKEFILE_LIST)) genassym.o: $(SOURCE_DIR)/$(COMPONENT)/$(GENASSYM_LOCATION)/genassym.c - $(call makelog,[$(CMD_MC)] $(ColorH)GENASSYM$(Color0) $(ColorLF)$<$(Color0)) - $(_v)${GENASSYM_KCC} ${CFLAGS} ${CFLAGS_NOLTO_FLAG} -MD -S -o ${@} ${INCFLAGS} $< + @$(LOG_GENASSYM) "$<" + $(_v)${GENASSYM_KCC} ${CFLAGS} ${CFLAGS_NOLTO_FLAG} -fno-integrated-as -MD -S -o ${@} ${INCFLAGS} $< assym.s: genassym.o $(_v)sed -e '/^[[:space:]]*DEFINITION__define__/!d;{N;s/\n//;}' -e 's/^[[:space:]]*DEFINITION__define__\([^:]*\):.*ascii.*\"[\$$]*\([-0-9\#]*\)\".*$$/#define \1 \2/' -e 'p' -e 's/#//2' -e 's/^[[:space:]]*#define \([A-Za-z0-9_]*\)[[:space:]]*[\$$#]*\([-0-9]*\).*$$/#define \1_NUM \2/' genassym.o > $@ diff --git a/osfmk/conf/Makefile.x86_64 b/osfmk/conf/Makefile.x86_64 index 98df09944..34369e49d 100644 --- a/osfmk/conf/Makefile.x86_64 +++ b/osfmk/conf/Makefile.x86_64 @@ -2,14 +2,13 @@ #BEGIN Machine dependent Makefile fragment for x86_64 ###################################################################### -CWARNFLAGS = $(CWARNFLAGS_STD) -Wshorten-64-to-32 -Wno-atomic-implicit-seq-cst +CWARNFLAGS = $(CWARNFLAGS_STD) -Wno-atomic-implicit-seq-cst # Files that must go in the __HIB segment: UNCONFIGURED_HIB_FILES= \ WKdmDecompress_new.o \ WKdmData_new.o \ hibernate_restore.o \ - hibernate_bootstrap.o \ bcopy.o \ bzero.o @@ -19,7 +18,6 @@ HIB_FILES=$(filter $(UNCONFIGURED_HIB_FILES),$(OBJS)) WKdmDecompress_new.o_CFLAGS_ADD += -fno-stack-protector -fno-stack-check $(CFLAGS_NOLTO_FLAG) WKdmData_new.o_CFLAGS_ADD += -fno-stack-protector -fno-stack-check $(CFLAGS_NOLTO_FLAG) hibernate_restore.o_CFLAGS_ADD += -fno-stack-protector -fno-stack-check $(CFLAGS_NOLTO_FLAG) -hibernate_bootstrap.o_CFLAGS_ADD += -fno-stack-protector -fno-stack-check $(CFLAGS_NOLTO_FLAG) bcopy.o_CFLAGS_ADD += -fno-stack-protector -fno-stack-check $(CFLAGS_NOLTO_FLAG) bzero.o_CFLAGS_ADD += -fno-stack-protector -fno-stack-check $(CFLAGS_NOLTO_FLAG) fp_simd.o_SFLAGS_ADD += -mavx512f diff --git a/osfmk/conf/files b/osfmk/conf/files index 4d3aec134..991216abd 100644 --- a/osfmk/conf/files +++ b/osfmk/conf/files @@ -48,7 +48,6 @@ OPTIONS/mach_pagemap optional mach_pagemap OPTIONS/mach_vm_debug optional mach_vm_debug OPTIONS/mach_page_hash_stats optional mach_page_hash_stats OPTIONS/mig_debug optional mig_debug -OPTIONS/zone_debug optional zone_debug OPTIONS/vm_cpm optional vm_cpm OPTIONS/task_swapper optional task_swapper OPTIONS/stack_usage optional stack_usage @@ -91,6 +90,7 @@ osfmk/kdp/kdp_core.c optional mach_kdp osfmk/kdp/processor_core.c optional mach_kdp osfmk/kdp/kdp_serial.c optional config_serial_kdp osfmk/ipc/ipc_entry.c standard +osfmk/ipc/ipc_eventlink.c standard osfmk/ipc/ipc_hash.c standard osfmk/ipc/ipc_importance.c optional importance_inheritance osfmk/ipc/ipc_init.c standard @@ -150,9 +150,7 @@ osfmk/kern/mpsc_queue.c standard osfmk/kern/page_decrypt.c standard osfmk/kern/printf.c standard osfmk/kern/priority.c standard -osfmk/kern/priority_queue.c standard osfmk/kern/processor.c standard -osfmk/kern/processor_data.c standard osfmk/kern/restartable.c standard osfmk/kern/sched_average.c standard #ifdef __AMP__ @@ -208,6 +206,7 @@ osfmk/kern/suid_cred.c standard ./mach/clock_reply_user.c standard ./mach/exc_user.c standard ./mach/exc_server.c optional mach_bsd +./mach/mach_eventlink_server.c standard ./mach/host_priv_server.c standard ./mach/host_security_server.c standard ./mach/ktrace_background_user.c standard @@ -235,8 +234,10 @@ osfmk/tests/ktest_accessor.c optional config_xnupost osfmk/tests/ktest_emit.c optional config_xnupost osfmk/tests/ktest_global.c optional config_xnupost osfmk/tests/pmap_tests.c optional config_xnupost +osfmk/tests/ptrauth_data_tests.c optional config_xnupost osfmk/tests/bitmap_test.c optional config_xnupost osfmk/tests/test_thread_call.c optional config_xnupost +osfmk/tests/vfp_state_test.c optional config_xnupost ./mach/telemetry_notification_user.c optional config_telemetry osfmk/bank/bank.c standard osfmk/atm/atm.c optional config_atm @@ -286,6 +287,7 @@ osfmk/vm/vm_resident.c standard osfmk/vm/vm_shared_region.c standard osfmk/vm/vm_shared_region_pager.c standard osfmk/vm/vm_swapfile_pager.c standard +osfmk/vm/vm_tests.c standard osfmk/vm/vm_user.c standard osfmk/vm/vm32_user.c standard @@ -315,7 +317,7 @@ osfmk/kperf/pet.c optional kperf osfmk/kperf/thread_samplers.c optional kperf osfmk/kperf/task_samplers.c optional kperf osfmk/kperf/meminfo.c optional kperf -osfmk/kperf/kperf_timer.c optional kperf +osfmk/kperf/kptimer.c optional kperf osfmk/kperf/kperf_kpc.c optional kperf osfmk/kperf/kdebug_trigger.c optional kperf osfmk/kperf/lazy.c optional kperf @@ -333,22 +335,23 @@ osfmk/console/video_console.c optional video_console osfmk/kern/telemetry.c optional config_telemetry # Built-in corecrypto for early_random(): -osfmk/corecrypto/cc/src/cc_clear.c standard -osfmk/corecrypto/cc/src/cc_cmp_safe.c standard -osfmk/corecrypto/cc/src/cc_abort.c standard -osfmk/corecrypto/ccdbrg/src/ccdrbg_nisthmac.c standard -osfmk/corecrypto/ccdigest/src/ccdigest_init.c standard -osfmk/corecrypto/ccdigest/src/ccdigest_update.c standard -osfmk/corecrypto/cchmac/src/cchmac.c standard -osfmk/corecrypto/cchmac/src/cchmac_init.c standard -osfmk/corecrypto/cchmac/src/cchmac_update.c standard -osfmk/corecrypto/cchmac/src/cchmac_final.c standard -osfmk/corecrypto/ccsha1/src/ccdigest_final_64be.c standard +osfmk/corecrypto/cc_clear.c standard +osfmk/corecrypto/cc_cmp_safe.c standard +osfmk/corecrypto/cc_abort.c standard +osfmk/corecrypto/ccdrbg_nisthmac.c standard +osfmk/corecrypto/ccdigest_init.c standard +osfmk/corecrypto/ccdigest_update.c standard +osfmk/corecrypto/cchmac.c standard +osfmk/corecrypto/cchmac_init.c standard +osfmk/corecrypto/cchmac_update.c standard +osfmk/corecrypto/cchmac_final.c standard +osfmk/corecrypto/ccdigest_final_64be.c standard -osfmk/corecrypto/ccsha2/src/ccsha256_di.c standard -osfmk/corecrypto/ccsha2/src/ccsha256_initial_state.c standard -osfmk/corecrypto/ccsha2/src/ccsha256_K.c standard -osfmk/corecrypto/ccsha2/src/ccsha256_ltc_compress.c standard -osfmk/corecrypto/ccsha2/src/ccsha256_ltc_di.c standard +osfmk/corecrypto/ccsha256_di.c standard +osfmk/corecrypto/ccsha256_initial_state.c standard +osfmk/corecrypto/ccsha256_K.c standard +osfmk/corecrypto/ccsha256_ltc_compress.c standard +osfmk/corecrypto/ccsha256_ltc_di.c standard osfmk/prng/prng_random.c standard +osfmk/prng/entropy.c standard diff --git a/osfmk/conf/files.arm b/osfmk/conf/files.arm index 777e24bc6..f2ae9a6cc 100644 --- a/osfmk/conf/files.arm +++ b/osfmk/conf/files.arm @@ -68,7 +68,6 @@ osfmk/OPTIONS/ec optional ec osfmk/OPTIONS/hi_res_clock optional hi_res_clock # Kernel performance monitoring -osfmk/kperf/arm/kperf_mp.c optional kperf osfmk/arm/kpc_arm.c optional kpc osfmk/arm/monotonic_arm.c optional monotonic diff --git a/osfmk/conf/files.arm64 b/osfmk/conf/files.arm64 index 2fb849e8b..78324c05a 100644 --- a/osfmk/conf/files.arm64 +++ b/osfmk/conf/files.arm64 @@ -19,6 +19,7 @@ osfmk/arm64/WKdmCompress_4k.s standard osfmk/arm64/WKdmData.s standard osfmk/arm64/lz4_decode_arm64.s standard osfmk/arm64/lz4_encode_arm64.s standard +osfmk/arm64/amcc_rorgn.c standard osfmk/arm64/bcopy.s standard osfmk/arm64/bzero.s standard osfmk/arm/caches.c standard @@ -49,6 +50,7 @@ osfmk/arm64/machine_routines_asm.s standard osfmk/arm64/machine_task.c standard osfmk/arm/pal_routines.c standard osfmk/arm64/mcount.s optional profile +osfmk/arm64/memcmp_zero.s standard osfmk/arm64/strnlen.s standard osfmk/arm64/strncmp.s standard osfmk/arm/strncpy.c standard @@ -61,10 +63,15 @@ osfmk/arm64/status.c standard osfmk/arm/status_shared.c standard osfmk/arm/commpage/commpage.c standard +osfmk/arm/commpage/commpage_asm.s optional config_arm_pfz osfmk/kdp/ml/arm/kdp_machdep.c optional mach_kdp osfmk/kdp/ml/arm/kdp_vm.c optional mach_kdp +osfmk/arm64/hibernate_arm64.c optional hibernation +osfmk/arm64/hibernate_restore.c optional hibernation +osfmk/arm64/hibernate_asm.s optional hibernation + # DUMMIES TO FORCE GENERATION OF .h FILES osfmk/OPTIONS/ln optional ln osfmk/OPTIONS/eisa optional eisa @@ -73,7 +80,6 @@ osfmk/OPTIONS/ec optional ec osfmk/OPTIONS/hi_res_clock optional hi_res_clock # Kernel performance monitoring -osfmk/kperf/arm/kperf_mp.c optional kperf osfmk/arm64/kpc.c optional kpc osfmk/arm64/monotonic_arm64.c optional monotonic @@ -87,3 +93,4 @@ osfmk/arm64/alternate_debugger_asm.s optional alternate_debugger osfmk/arm64/pgtrace.c standard osfmk/arm64/pgtrace_decoder.c optional config_pgtrace_nonkext osfmk/arm64/machine_remote_time.c optional config_mach_bridge_recv_time +osfmk/arm64/corecrypto/sha256_compress_arm64.s standard diff --git a/osfmk/conf/files.x86_64 b/osfmk/conf/files.x86_64 index dd89ec483..5393a8cd6 100644 --- a/osfmk/conf/files.x86_64 +++ b/osfmk/conf/files.x86_64 @@ -95,7 +95,7 @@ osfmk/i386/ucode.c standard osfmk/i386/vmx/vmx_cpu.c optional config_vmx osfmk/i386/vmx/vmx_shims.c optional config_vmx -osfmk/kern/hv_support.c optional hypervisor +osfmk/kern/hv_support_kext.c optional hypervisor # DUMMIES TO FORCE GENERATION OF .h FILES #osfmk/OPTIONS/ln optional ln @@ -105,13 +105,12 @@ osfmk/kern/hv_support.c optional hypervisor #osfmk/OPTIONS/hi_res_clock optional hi_res_clock # Kernel performance monitoring -osfmk/kperf/x86_64/kperf_mp.c optional kperf osfmk/x86_64/kpc_x86.c optional kpc osfmk/x86_64/monotonic_x86_64.c optional monotonic -osfmk/i386/startup64.c standard osfmk/x86_64/idt64.s standard osfmk/i386/panic_hooks.c standard +osfmk/i386/panic_notify.c standard osfmk/x86_64/machine_remote_time.c optional config_mach_bridge_send_time diff --git a/osfmk/console/serial_console.c b/osfmk/console/serial_console.c index 163b90935..586562011 100644 --- a/osfmk/console/serial_console.c +++ b/osfmk/console/serial_console.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -30,18 +30,18 @@ #include #include #include -#include #include #include #include #endif /* __x86_64__ */ +#include #include #include #include #include #include -#include +#include #include #include #include @@ -67,15 +67,13 @@ static struct { char * buffer; int len; int used; + int nreserved; char * write_ptr; char * read_ptr; decl_simple_lock_data(, read_lock); - decl_simple_lock_data(, write_lock); + lck_ticket_t write_lock; } console_ring; -hw_lock_data_t cnputc_lock; -static volatile uint32_t console_output = 0; - /* * New allocation mechanism for console buffers * Total allocation: 1 * PAGE_SIZE @@ -98,14 +96,14 @@ static volatile uint32_t console_output = 0; /* * A serial line running at 115200 bps can output ~11.5 characters per millisecond. - * Synchronous serial logging with preemption+interrupts disabled fundamentally prevents us + * Synchronous serial logging with preemption disabled fundamentally prevents us * from hitting expected scheduling deadlines, but we can at least tone it down a bit. * * TODO: IOLog should use asynchronous serial logging instead of the synchronous serial console. (26555148) * * Keep interrupt disabled periods shorter than 1ms */ -#define MAX_INT_DISABLED_FLUSH_SIZE 8 +#define MAX_NO_PREEMPT_FLUSH_SIZE 8 #define MAX_TOTAL_FLUSH_SIZE (MAX(2, MAX_CPU_SLOTS) * CPU_CONS_BUF_SIZE) typedef struct console_buf { @@ -119,6 +117,12 @@ typedef struct console_buf { extern int serial_getc(void); extern void serial_putc(char); +#if DEBUG || DEVELOPMENT +TUNABLE(bool, allow_printf_from_interrupts_disabled_context, "nointr_consio", false); +#else +#define allow_printf_from_interrupts_disabled_context false +#endif + static void _serial_putc(int, int, int); SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = { @@ -132,42 +136,43 @@ SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = { SECURITY_READ_ONLY_EARLY(uint32_t) nconsops = (sizeof cons_ops / sizeof cons_ops[0]); +#if __x86_64__ uint32_t cons_ops_index = VC_CONS_OPS; +#else +SECURITY_READ_ONLY_LATE(uint32_t) cons_ops_index = VC_CONS_OPS; +#endif + +LCK_GRP_DECLARE(console_lck_grp, "console"); -#if defined(__x86_64__) || defined(__arm__) // NMI static variables #define NMI_STRING_SIZE 32 char nmi_string[NMI_STRING_SIZE] = "afDIGHr84A84jh19Kphgp428DNPdnapq"; static int nmi_counter = 0; -#endif /* __arm__ */ static bool console_suspended = false; -/* Wrapper for ml_set_interrupts_enabled */ -static void -console_restore_interrupts_state(boolean_t state) +static inline bool +console_io_allowed(void) { -#if INTERRUPT_MASKED_DEBUG - /* - * Serial console holds interrupts disabled for far too long - * and would trip the spin-debugger. If we are about to reenable - * interrupts then clear the timer and avoid panicking on the delay. - * Otherwise, let the code that printed with interrupt disabled - * take the panic when it reenables interrupts. - * Hopefully one day this is fixed so that this workaround is unnecessary. - */ - if (state == TRUE) { - ml_spin_debug_clear_self(); + if (!allow_printf_from_interrupts_disabled_context && + !console_suspended && + startup_phase >= STARTUP_SUB_EARLY_BOOT && + !ml_get_interrupts_enabled()) { +#if defined(__arm__) || defined(__arm64__) || DEBUG || DEVELOPMENT + panic("Console I/O from interrupt-disabled context"); +#else + return false; +#endif } -#endif /* INTERRUPT_MASKED_DEBUG */ - ml_set_interrupts_enabled(state); + + return true; } static void console_ring_lock_init(void) { simple_lock_init(&console_ring.read_lock, 0); - simple_lock_init(&console_ring.write_lock, 0); + lck_ticket_init(&console_ring.write_lock, &console_lck_grp); } void @@ -182,11 +187,15 @@ console_init(void) assert(console_ring.len > 0); - ret = kmem_alloc(kernel_map, (vm_offset_t *)&console_ring.buffer, KERN_CONSOLE_BUF_SIZE, VM_KERN_MEMORY_OSFMK); + ret = kmem_alloc_flags(kernel_map, (vm_offset_t *)&console_ring.buffer, + KERN_CONSOLE_BUF_SIZE + 2 * PAGE_SIZE, VM_KERN_MEMORY_OSFMK, + KMA_KOBJECT | KMA_PERMANENT | KMA_GUARD_FIRST | KMA_GUARD_LAST); if (ret != KERN_SUCCESS) { panic("console_ring_init() failed to allocate ring buffer, error %d\n", ret); } + console_ring.buffer += PAGE_SIZE; + /* setup memory for per cpu console buffers */ for (i = 0; i < MAX_CPU_SLOTS; i++) { p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t))); @@ -194,10 +203,10 @@ console_init(void) } console_ring.used = 0; + console_ring.nreserved = 0; console_ring.read_ptr = console_ring.buffer; console_ring.write_ptr = console_ring.buffer; console_ring_lock_init(); - hw_lock_init(&cnputc_lock); } void * @@ -241,24 +250,38 @@ console_cpu_free(void * buf) } } -static inline int -console_ring_space(void) +static inline char* +console_ring_reserve_space(int nchars) { - return console_ring.len - console_ring.used; + char *write_ptr = NULL; + lck_ticket_lock(&console_ring.write_lock, &console_lck_grp); + if ((console_ring.len - console_ring.used) >= nchars) { + console_ring.used += nchars; + mp_disable_preemption(); // Don't allow preemption while holding a reservation; otherwise console_ring_try_empty() could take arbitrarily long + os_atomic_inc(&console_ring.nreserved, relaxed); + write_ptr = console_ring.write_ptr; + console_ring.write_ptr = + console_ring.buffer + ((console_ring.write_ptr - console_ring.buffer + nchars) % console_ring.len); + } + lck_ticket_unlock(&console_ring.write_lock); + return write_ptr; } -static boolean_t -console_ring_put(char ch) +static inline void +console_ring_unreserve_space(void) { - if (console_ring.used < console_ring.len) { - console_ring.used++; - *console_ring.write_ptr++ = ch; - if (console_ring.write_ptr - console_ring.buffer == console_ring.len) { - console_ring.write_ptr = console_ring.buffer; - } - return TRUE; - } else { - return FALSE; + os_atomic_dec(&console_ring.nreserved, relaxed); + mp_enable_preemption(); +} + +static inline void +console_ring_put(char **write_ptr, char ch) +{ + assert(console_ring.nreserved > 0); + **write_ptr = ch; + ++(*write_ptr); + if ((*write_ptr - console_ring.buffer) == console_ring.len) { + *write_ptr = console_ring.buffer; } } @@ -279,46 +302,30 @@ cpu_buffer_size(console_buf_t * cbp) return (int)(cbp->buf_ptr - cbp->buf_base); } -static inline void -_cnputs(char * c, int size) +static inline uint32_t +get_cons_ops_index(void) { - /* The console device output routines are assumed to be - * non-reentrant. - */ -#ifdef __x86_64__ - uint32_t lock_timeout_ticks = UINT32_MAX; -#else - uint32_t lock_timeout_ticks = LockTimeOut * 2; // 250ms is not enough, 500 is just right -#endif + uint32_t idx = cons_ops_index; - mp_disable_preemption(); - if (!hw_lock_to(&cnputc_lock, lock_timeout_ticks, LCK_GRP_NULL)) { - /* If we timed out on the lock, and we're in the debugger, - * copy lock data for debugging and break the lock. - */ - hw_lock_data_t _shadow_lock; - memcpy(&_shadow_lock, &cnputc_lock, sizeof(cnputc_lock)); - if (kernel_debugger_entry_count) { - /* Since hw_lock_to takes a pre-emption count...*/ - mp_enable_preemption(); - hw_lock_init(&cnputc_lock); - hw_lock_lock(&cnputc_lock, LCK_GRP_NULL); - } else { - panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock, - _shadow_lock.lock_data, current_thread()); - } + if (idx >= nconsops) { + panic("Bad cons_ops_index: %d", idx); } + return idx; +} + +static inline void +_cnputs(char * c, int size) +{ + uint32_t idx = get_cons_ops_index(); + while (size-- > 0) { if (*c == '\n') { - cons_ops[cons_ops_index].putc(0, 0, '\r'); + cons_ops[idx].putc(0, 0, '\r'); } - cons_ops[cons_ops_index].putc(0, 0, *c); + cons_ops[idx].putc(0, 0, *c); c++; } - - hw_lock_unlock(&cnputc_lock); - mp_enable_preemption(); } void @@ -337,82 +344,42 @@ cnputcusr(char c) void cnputsusr(char *s, int size) { - if (size > 1) { - console_write(s, size); - return; - } - - boolean_t state; - - /* Spin (with pre-emption enabled) waiting for console_ring_try_empty() - * to complete output. There is a small window here where we could - * end up with a stale value of console_output, but it's unlikely, - * and _cnputs(), which outputs to the console device, is internally - * synchronized. There's something of a conflict between the - * character-at-a-time (with pre-emption enabled) unbuffered - * output model here, and the buffered output from cnputc(), - * whose consumers include printf() ( which outputs a sequence - * with pre-emption disabled, and should be safe to call with - * interrupts off); we don't want to disable pre-emption indefinitely - * here, and spinlocks and mutexes are inappropriate. - */ - while (console_output != 0) { - delay(1); - } - - /* - * We disable interrupts to avoid issues caused by rendevous IPIs - * and an interruptible core holding the lock while an uninterruptible - * core wants it. Stackshot is the prime example of this. - */ - state = ml_set_interrupts_enabled(FALSE); - _cnputs(s, 1); - console_restore_interrupts_state(state); + console_write(s, size); } static void console_ring_try_empty(void) { -#ifdef __x86_64__ - boolean_t handle_tlb_flushes = (ml_get_interrupts_enabled() == FALSE); -#endif /* __x86_64__ */ + char flush_buf[MAX_NO_PREEMPT_FLUSH_SIZE]; int nchars_out = 0; int total_chars_out = 0; int size_before_wrap = 0; + bool in_debugger = (kernel_debugger_entry_count > 0); - do { -#ifdef __x86_64__ - if (handle_tlb_flushes) { - handle_pending_TLB_flushes(); - } -#endif /* __x86_64__ */ + if (__improbable(!console_io_allowed())) { + return; + } + + do { /* * Try to get the read lock on the ring buffer to empty it. * If this fails someone else is already emptying... */ - if (!simple_lock_try(&console_ring.read_lock, LCK_GRP_NULL)) { - /* - * If multiple cores are spinning trying to empty the buffer, - * we may suffer lock starvation (get the read lock, but - * never the write lock, with other cores unable to get the - * read lock). As a result, insert a delay on failure, to - * let other cores have a turn. - */ - delay(1); + if (!in_debugger && !simple_lock_try(&console_ring.read_lock, &console_lck_grp)) { return; } - boolean_t state = ml_set_interrupts_enabled(FALSE); - - /* Indicate that we're in the process of writing a block of data to the console. */ - os_atomic_inc(&console_output, relaxed); - - simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL); + if (__probable(!in_debugger)) { + lck_ticket_lock(&console_ring.write_lock, &console_lck_grp); + while (os_atomic_load(&console_ring.nreserved, relaxed) > 0) { + cpu_pause(); + } + } /* try small chunk at a time, so we allow writes from other cpus into the buffer */ - nchars_out = MIN(console_ring.used, MAX_INT_DISABLED_FLUSH_SIZE); + nchars_out = MIN(console_ring.used, (int)sizeof(flush_buf)); /* account for data to be read before wrap around */ size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr); @@ -421,27 +388,31 @@ console_ring_try_empty(void) } if (nchars_out > 0) { - _cnputs(console_ring.read_ptr, nchars_out); + memcpy(flush_buf, console_ring.read_ptr, nchars_out); console_ring.read_ptr = console_ring.buffer + ((console_ring.read_ptr - console_ring.buffer + nchars_out) % console_ring.len); console_ring.used -= nchars_out; - total_chars_out += nchars_out; } - simple_unlock(&console_ring.write_lock); - - os_atomic_dec(&console_output, relaxed); + if (__probable(!in_debugger)) { + lck_ticket_unlock(&console_ring.write_lock); + } - simple_unlock(&console_ring.read_lock); + if (nchars_out > 0) { + total_chars_out += nchars_out; + _cnputs(flush_buf, nchars_out); + } - console_restore_interrupts_state(state); + if (__probable(!in_debugger)) { + simple_unlock(&console_ring.read_lock); + } /* * In case we end up being the console drain thread * for far too long, break out. Except in panic/suspend cases * where we should clear out full buffer. */ - if (!kernel_debugger_entry_count && !console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE)) { + if (!console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE)) { break; } } while (nchars_out > 0); @@ -465,35 +436,38 @@ void console_write(char * str, int size) { console_init(); - int chunk_size = size; + char *write_ptr; + int chunk_size = CPU_CONS_BUF_SIZE; int i = 0; - if (size > console_ring.len) { - chunk_size = CPU_CONS_BUF_SIZE; + if (__improbable(!console_io_allowed())) { + return; + } else if (__improbable(console_suspended)) { + /* + * Put directly to console if we're heading into suspend or if we're in + * the kernel debugger for a panic/stackshot. If any of the other cores + * happened to halt while holding any of the console locks, attempting + * to use the normal path will result in sadness. + */ + _cnputs(str, size); + return; } while (size > 0) { - boolean_t state = ml_set_interrupts_enabled(FALSE); - - simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL); - while (chunk_size > console_ring_space()) { - simple_unlock(&console_ring.write_lock); - console_restore_interrupts_state(state); - + if (size < chunk_size) { + chunk_size = size; + } + while ((write_ptr = console_ring_reserve_space(chunk_size)) == NULL) { console_ring_try_empty(); - - state = ml_set_interrupts_enabled(FALSE); - simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL); } for (i = 0; i < chunk_size; i++) { - console_ring_put(str[i]); + console_ring_put(&write_ptr, str[i]); } + console_ring_unreserve_space(); str = &str[i]; size -= chunk_size; - simple_unlock(&console_ring.write_lock); - console_restore_interrupts_state(state); } console_ring_try_empty(); @@ -504,8 +478,8 @@ cnputc(char c) { console_buf_t * cbp; cpu_data_t * cpu_data_p; - boolean_t state; boolean_t needs_print = TRUE; + char * write_ptr; char * cp; restart: @@ -514,31 +488,16 @@ restart: cbp = (console_buf_t *)cpu_data_p->cpu_console_buf; if (console_suspended || cbp == NULL) { mp_enable_preemption(); - /* Put directly if console ring is not initialized or we're heading into suspend */ + /* + * Put directly if console ring is not initialized or we're heading into suspend. + * Also do this if we're in the kernel debugger for a panic or stackshot. + * If any of the other cores happened to halt while holding any of the console + * locks, attempting to use the normal path will result in sadness. + */ _cnputs(&c, 1); return; } -#ifndef __x86_64__ - /* Is there a panic backtrace going on? */ - if (cpu_data_p->PAB_active) { - /* If another processor was in the process of emptying the - * console ring buffer when it received the panic backtrace - * signal, that processor will be spinning in DebugXCall() - * waiting for the panicking processor to finish printing - * the backtrace. But panicking processor will never - * be able to obtain the ring buffer lock since it is - * owned by a processor that's spinning in DebugXCall(). - * Blow away any locks that other processors may have on - * the console ring buffer so that the backtrace can - * complete. - */ - console_ring_lock_init(); - } -#endif /* __x86_64__ */ - - state = ml_set_interrupts_enabled(FALSE); - /* * add to stack buf * If the cpu buffer is full, we'll flush, then try @@ -546,11 +505,8 @@ restart: * it. */ if (needs_print && !cpu_buffer_put(cbp, c)) { - simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL); - - if (cpu_buffer_size(cbp) > console_ring_space()) { - simple_unlock(&console_ring.write_lock); - console_restore_interrupts_state(state); + write_ptr = console_ring_reserve_space(cpu_buffer_size(cbp)); + if (write_ptr == NULL) { mp_enable_preemption(); console_ring_try_empty(); @@ -558,24 +514,22 @@ restart: } for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) { - console_ring_put(*cp); + console_ring_put(&write_ptr, *cp); } - cbp->buf_ptr = cbp->buf_base; - simple_unlock(&console_ring.write_lock); + console_ring_unreserve_space(); + cbp->buf_ptr = cbp->buf_base; cpu_buffer_put(cbp, c); } needs_print = FALSE; if (c != '\n') { - console_restore_interrupts_state(state); mp_enable_preemption(); return; } /* We printed a newline, time to flush the CPU buffer to the global buffer */ - simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL); /* * Is there enough space in the shared ring buffer? @@ -584,24 +538,21 @@ restart: * avoid another cpu interjecting. */ - if (cpu_buffer_size(cbp) > console_ring_space()) { - simple_unlock(&console_ring.write_lock); - console_restore_interrupts_state(state); + write_ptr = console_ring_reserve_space(cpu_buffer_size(cbp)); + if (write_ptr == NULL) { mp_enable_preemption(); console_ring_try_empty(); - goto restart; } for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) { - console_ring_put(*cp); + console_ring_put(&write_ptr, *cp); } + console_ring_unreserve_space(); cbp->buf_ptr = cbp->buf_base; - simple_unlock(&console_ring.write_lock); - console_restore_interrupts_state(state); mp_enable_preemption(); console_ring_try_empty(); @@ -617,7 +568,6 @@ _serial_getc(__unused int a, __unused int b, boolean_t wait, __unused boolean_t c = serial_getc(); } while (wait && c < 0); -#if defined(__x86_64__) || defined(__arm__) // Check for the NMI string if (c == nmi_string[nmi_counter]) { nmi_counter++; @@ -630,7 +580,6 @@ _serial_getc(__unused int a, __unused int b, boolean_t wait, __unused boolean_t } else if (c != -1) { nmi_counter = 0; } -#endif return c; } @@ -638,19 +587,21 @@ _serial_getc(__unused int a, __unused int b, boolean_t wait, __unused boolean_t static void _serial_putc(__unused int a, __unused int b, int c) { - serial_putc(c); + serial_putc((char)c); } int cngetc(void) { - return cons_ops[cons_ops_index].getc(0, 0, TRUE, FALSE); + uint32_t idx = get_cons_ops_index(); + return cons_ops[idx].getc(0, 0, TRUE, FALSE); } int cnmaygetc(void) { - return cons_ops[cons_ops_index].getc(0, 0, FALSE, FALSE); + uint32_t idx = get_cons_ops_index(); + return cons_ops[idx].getc(0, 0, FALSE, FALSE); } int diff --git a/osfmk/console/serial_general.c b/osfmk/console/serial_general.c index 7a8cfeb00..62bde9741 100644 --- a/osfmk/console/serial_general.c +++ b/osfmk/console/serial_general.c @@ -156,7 +156,7 @@ console_printbuf_putc(int ch, void * arg) struct console_printbuf_state * info = (struct console_printbuf_state *)arg; info->total += 1; if (info->pos < (SERIAL_CONS_BUF_SIZE - 1)) { - info->str[info->pos] = ch; + info->str[info->pos] = (char)ch; info->pos += 1; } else { /* @@ -168,7 +168,7 @@ console_printbuf_putc(int ch, void * arg) info->str[info->pos] = '\0'; console_write(info->str, info->pos); info->pos = 0; - info->str[info->pos] = ch; + info->str[info->pos] = (char)ch; info->pos += 1; } } diff --git a/osfmk/console/serial_protos.h b/osfmk/console/serial_protos.h index e508ee10b..1e191633b 100644 --- a/osfmk/console/serial_protos.h +++ b/osfmk/console/serial_protos.h @@ -49,6 +49,7 @@ extern uint32_t serialmode; #define SERIALMODE_OUTPUT 0x1 #define SERIALMODE_INPUT 0x2 #define SERIALMODE_SYNCDRAIN 0x4 +#define SERIALMODE_BASE_TTY 0x8 /* Load Base/Recovery/FVUnlock TTY */ extern uint32_t cons_ops_index; extern const uint32_t nconsops; diff --git a/osfmk/console/video_console.c b/osfmk/console/video_console.c index 6a2131ca0..e7bf981be 100644 --- a/osfmk/console/video_console.c +++ b/osfmk/console/video_console.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -106,7 +106,7 @@ #include #include "iso_font.c" -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) #include "progress_meter_data.c" #endif @@ -131,10 +131,10 @@ static struct { void (*initialize)(struct vc_info * info); void (*enable)(boolean_t enable); void (*paint_char)(unsigned int xx, unsigned int yy, unsigned char ch, - int attrs, unsigned char ch_previous, - int attrs_previous); + int attrs, unsigned char ch_previous, + int attrs_previous); void (*clear_screen)(unsigned int xx, unsigned int yy, unsigned int top, - unsigned int bottom, int which); + unsigned int bottom, int which); void (*scroll_down)(int num, unsigned int top, unsigned int bottom); void (*scroll_up)(int num, unsigned int top, unsigned int bottom); void (*hide_cursor)(unsigned int xx, unsigned int yy); @@ -150,64 +150,40 @@ static uint32_t gc_buffer_columns; static uint32_t gc_buffer_rows; static uint32_t gc_buffer_size; -#if defined(__i386__) || defined(__x86_64__) -decl_simple_lock_data(static, vcputc_lock); -#define VCPUTC_LOCK_INIT() \ -MACRO_BEGIN \ - simple_lock_init(&vcputc_lock, 0); \ -MACRO_END +LCK_GRP_DECLARE(vconsole_lck_grp, "vconsole"); +static lck_ticket_t vcputc_lock; -#define VCPUTC_LOCK_LOCK() \ -MACRO_BEGIN \ - boolean_t istate = ml_get_interrupts_enabled(); \ - while (!simple_lock_try(&vcputc_lock, LCK_GRP_NULL)) \ - { \ - if (!istate) \ - handle_pending_TLB_flushes(); \ - cpu_pause(); \ - } \ -MACRO_END -#define VCPUTC_LOCK_UNLOCK() \ -MACRO_BEGIN \ - simple_unlock(&vcputc_lock); \ +#define VCPUTC_LOCK_INIT() \ +MACRO_BEGIN \ + lck_ticket_init(&vcputc_lock, &vconsole_lck_grp); \ MACRO_END -#else -static hw_lock_data_t vcputc_lock; -#define VCPUTC_LOCK_INIT() \ -MACRO_BEGIN \ - hw_lock_init(&vcputc_lock); \ +#define VCPUTC_LOCK_LOCK() \ +MACRO_BEGIN \ + lck_ticket_lock(&vcputc_lock, &vconsole_lck_grp); \ MACRO_END -#define VCPUTC_LOCK_LOCK() \ -MACRO_BEGIN \ - if (!hw_lock_to(&vcputc_lock, ~0U, LCK_GRP_NULL))\ - { \ - panic("VCPUTC_LOCK_LOCK"); \ - } \ +#define VCPUTC_LOCK_UNLOCK() \ +MACRO_BEGIN \ + lck_ticket_unlock(&vcputc_lock); \ MACRO_END -#define VCPUTC_LOCK_UNLOCK() \ -MACRO_BEGIN \ - hw_lock_unlock(&vcputc_lock); \ -MACRO_END -#endif /* -# Attribute codes: -# 00=none 01=bold 04=underscore 05=blink 07=reverse 08=concealed -# Text color codes: -# 30=black 31=red 32=green 33=yellow 34=blue 35=magenta 36=cyan 37=white -# Background color codes: -# 40=black 41=red 42=green 43=yellow 44=blue 45=magenta 46=cyan 47=white -*/ - -#define ATTR_NONE 0 -#define ATTR_BOLD 1 -#define ATTR_UNDER 2 -#define ATTR_REVERSE 4 + # Attribute codes: + # 00=none 01=bold 04=underscore 05=blink 07=reverse 08=concealed + # Text color codes: + # 30=black 31=red 32=green 33=yellow 34=blue 35=magenta 36=cyan 37=white + # Background color codes: + # 40=black 41=red 42=green 43=yellow 44=blue 45=magenta 46=cyan 47=white + */ + +#define ATTR_NONE 0 +#define ATTR_BOLD 1 +#define ATTR_UNDER 2 +#define ATTR_REVERSE 4 #define COLOR_BACKGROUND 0 #define COLOR_FOREGROUND 7 @@ -218,7 +194,7 @@ MACRO_END static unsigned char gc_color_code; /* VT100 state: */ -#define MAXPARS 16 +#define MAXPARS 16 static unsigned int gc_x, gc_y, gc_savex, gc_savey; static unsigned int gc_par[MAXPARS], gc_numpars, gc_hanging_cursor, gc_attr, gc_saveattr; @@ -226,41 +202,40 @@ static unsigned int gc_par[MAXPARS], gc_numpars, gc_hanging_cursor, gc_attr, gc_ static unsigned int gc_scrreg_top, gc_scrreg_bottom; enum vt100state_e { - ESnormal, /* Nothing yet */ - ESesc, /* Got ESC */ - ESsquare, /* Got ESC [ */ - ESgetpars, /* About to get or getting the parameters */ - ESgotpars, /* Finished getting the parameters */ - ESfunckey, /* Function key */ - EShash, /* DEC-specific stuff (screen align, etc.) */ - ESsetG0, /* Specify the G0 character set */ - ESsetG1, /* Specify the G1 character set */ + ESnormal, /* Nothing yet */ + ESesc, /* Got ESC */ + ESsquare, /* Got ESC [ */ + ESgetpars, /* About to get or getting the parameters */ + ESgotpars, /* Finished getting the parameters */ + ESfunckey, /* Function key */ + EShash, /* DEC-specific stuff (screen align, etc.) */ + ESsetG0, /* Specify the G0 character set */ + ESsetG1, /* Specify the G1 character set */ ESask, EScharsize, - ESignore /* Ignore this sequence */ + ESignore /* Ignore this sequence */ } gc_vt100state = ESnormal; -enum -{ - /* secs */ - kProgressAcquireDelay = 0, -#if CONFIG_EMBEDDED - kProgressReacquireDelay = 5, +enum{ + /* secs */ + kProgressAcquireDelay = 0, +#if !defined(XNU_TARGET_OS_OSX) + kProgressReacquireDelay = 5, #else - kProgressReacquireDelay = 5, + kProgressReacquireDelay = 5, #endif }; static int8_t vc_rotate_matr[4][2][2] = { - { { 1, 0 }, - { 0, 1 } }, - { { 0, 1 }, - { -1, 0 } }, - { { -1, 0 }, - { 0, -1 } }, - { { 0, -1 }, - { 1, 0 } } + { { 1, 0 }, + { 0, 1 } }, + { { 0, 1 }, + { -1, 0 } }, + { { -1, 0 }, + { 0, -1 } }, + { { 0, -1 }, + { 1, 0 } } }; static int gc_wrap_mode = 1, gc_relative_origin = 0; @@ -270,13 +245,13 @@ static int gc_charset_save[2] = { 0, 0 }; static void gc_clear_line(unsigned int xx, unsigned int yy, int which); static void gc_clear_screen(unsigned int xx, unsigned int yy, int top, - unsigned int bottom, int which); + unsigned int bottom, int which); static void gc_enable(boolean_t enable); static void gc_hide_cursor(unsigned int xx, unsigned int yy); static void gc_initialize(struct vc_info * info); static boolean_t gc_is_tab_stop(unsigned int column); static void gc_paint_char(unsigned int xx, unsigned int yy, unsigned char ch, - int attrs); + int attrs); static void gc_putchar(char ch); static void gc_putc_askcmd(unsigned char ch); static void gc_putc_charsetcmd(int charset, unsigned char ch); @@ -308,17 +283,17 @@ gc_clear_line(unsigned int xx, unsigned int yy, int which) */ switch (which) { - case 0: /* To end of line */ + case 0: /* To end of line */ start = xx; - end = vinfo.v_columns-1; + end = vinfo.v_columns - 1; break; - case 1: /* To start of line */ + case 1: /* To start of line */ start = 0; end = xx; break; - case 2: /* Whole line */ + case 2: /* Whole line */ start = 0; - end = vinfo.v_columns-1; + end = vinfo.v_columns - 1; break; default: return; @@ -331,31 +306,32 @@ gc_clear_line(unsigned int xx, unsigned int yy, int which) static void gc_clear_screen(unsigned int xx, unsigned int yy, int top, unsigned int bottom, - int which) + int which) { - if (!gc_buffer_size) return; + if (!gc_buffer_size) { + return; + } - if ( xx < gc_buffer_columns && yy < gc_buffer_rows && bottom <= gc_buffer_rows ) - { + if (xx < gc_buffer_columns && yy < gc_buffer_rows && bottom <= gc_buffer_rows) { uint32_t start, end; switch (which) { - case 0: /* To end of screen */ - start = (yy * gc_buffer_columns) + xx; - end = (bottom * gc_buffer_columns) - 1; - break; - case 1: /* To start of screen */ - start = (top * gc_buffer_columns); - end = (yy * gc_buffer_columns) + xx; - break; - case 2: /* Whole screen */ - start = (top * gc_buffer_columns); - end = (bottom * gc_buffer_columns) - 1; - break; - default: - start = 0; - end = 0; - break; + case 0: /* To end of screen */ + start = (yy * gc_buffer_columns) + xx; + end = (bottom * gc_buffer_columns) - 1; + break; + case 1: /* To start of screen */ + start = (top * gc_buffer_columns); + end = (yy * gc_buffer_columns) + xx; + break; + case 2: /* Whole screen */ + start = (top * gc_buffer_columns); + end = (bottom * gc_buffer_columns) - 1; + break; + default: + start = 0; + end = 0; + break; } memset(gc_buffer_attributes + start, ATTR_NONE, end - start + 1); @@ -378,20 +354,19 @@ gc_enable( boolean_t enable ) uint32_t buffer_size = 0; spl_t s; - if ( enable == FALSE ) - { + if (enable == FALSE) { // only disable console output if it goes to the graphics console - if ( console_is_serial() == FALSE ) + if (console_is_serial() == FALSE) { disableConsoleOutput = TRUE; + } gc_enabled = FALSE; gc_ops.enable(FALSE); } - s = splhigh( ); - VCPUTC_LOCK_LOCK( ); + s = splhigh(); + VCPUTC_LOCK_LOCK(); - if ( gc_buffer_size ) - { + if (gc_buffer_size) { buffer_attributes = gc_buffer_attributes; buffer_characters = gc_buffer_characters; buffer_colorcodes = gc_buffer_colorcodes; @@ -408,44 +383,46 @@ gc_enable( boolean_t enable ) gc_buffer_rows = 0; gc_buffer_size = 0; - VCPUTC_LOCK_UNLOCK( ); + VCPUTC_LOCK_UNLOCK(); splx( s ); - kfree( buffer_attributes, buffer_size ); - kfree( buffer_characters, buffer_size ); - kfree( buffer_colorcodes, buffer_size ); - kfree( buffer_tab_stops, buffer_columns ); - } - else - { - VCPUTC_LOCK_UNLOCK( ); + kheap_free( KHEAP_DATA_BUFFERS, buffer_attributes, buffer_size ); + kheap_free( KHEAP_DATA_BUFFERS, buffer_characters, buffer_size ); + kheap_free( KHEAP_DATA_BUFFERS, buffer_colorcodes, buffer_size ); + kheap_free( KHEAP_DATA_BUFFERS, buffer_tab_stops, buffer_columns ); + } else { + VCPUTC_LOCK_UNLOCK(); splx( s ); } - if ( enable ) - { - if ( vm_initialized ) - { + if (enable) { + if (vm_initialized) { buffer_columns = vinfo.v_columns; buffer_rows = vinfo.v_rows; buffer_size = buffer_columns * buffer_rows; - if ( buffer_size ) - { - buffer_attributes = (unsigned char *) kalloc( buffer_size ); - buffer_characters = (unsigned char *) kalloc( buffer_size ); - buffer_colorcodes = (unsigned char *) kalloc( buffer_size ); - buffer_tab_stops = (unsigned char *) kalloc( buffer_columns ); - - if ( buffer_attributes == NULL || - buffer_characters == NULL || - buffer_colorcodes == NULL || - buffer_tab_stops == NULL ) - { - if ( buffer_attributes ) kfree( buffer_attributes, buffer_size ); - if ( buffer_characters ) kfree( buffer_characters, buffer_size ); - if ( buffer_colorcodes ) kfree( buffer_colorcodes, buffer_size ); - if ( buffer_tab_stops ) kfree( buffer_tab_stops, buffer_columns ); + if (buffer_size) { + buffer_attributes = kheap_alloc( KHEAP_DATA_BUFFERS, buffer_size, Z_WAITOK ); + buffer_characters = kheap_alloc( KHEAP_DATA_BUFFERS, buffer_size, Z_WAITOK ); + buffer_colorcodes = kheap_alloc( KHEAP_DATA_BUFFERS, buffer_size, Z_WAITOK ); + buffer_tab_stops = kheap_alloc( KHEAP_DATA_BUFFERS, buffer_columns, Z_WAITOK ); + + if (buffer_attributes == NULL || + buffer_characters == NULL || + buffer_colorcodes == NULL || + buffer_tab_stops == NULL) { + if (buffer_attributes) { + kheap_free( KHEAP_DATA_BUFFERS, buffer_attributes, buffer_size ); + } + if (buffer_characters) { + kheap_free( KHEAP_DATA_BUFFERS, buffer_characters, buffer_size ); + } + if (buffer_colorcodes) { + kheap_free( KHEAP_DATA_BUFFERS, buffer_colorcodes, buffer_size ); + } + if (buffer_tab_stops) { + kheap_free( KHEAP_DATA_BUFFERS, buffer_tab_stops, buffer_columns ); + } buffer_attributes = NULL; buffer_characters = NULL; @@ -454,9 +431,7 @@ gc_enable( boolean_t enable ) buffer_columns = 0; buffer_rows = 0; buffer_size = 0; - } - else - { + } else { memset( buffer_attributes, ATTR_NONE, buffer_size ); memset( buffer_characters, ' ', buffer_size ); memset( buffer_colorcodes, COLOR_CODE_SET( 0, COLOR_FOREGROUND, TRUE ), buffer_size ); @@ -465,8 +440,8 @@ gc_enable( boolean_t enable ) } } - s = splhigh( ); - VCPUTC_LOCK_LOCK( ); + s = splhigh(); + VCPUTC_LOCK_LOCK(); gc_buffer_attributes = buffer_attributes; gc_buffer_characters = buffer_characters; @@ -478,7 +453,7 @@ gc_enable( boolean_t enable ) gc_reset_screen(); - VCPUTC_LOCK_UNLOCK( ); + VCPUTC_LOCK_UNLOCK(); splx( s ); gc_ops.clear_screen(gc_x, gc_y, 0, vinfo.v_rows, 2); @@ -493,8 +468,7 @@ gc_enable( boolean_t enable ) static void gc_hide_cursor(unsigned int xx, unsigned int yy) { - if ( xx < gc_buffer_columns && yy < gc_buffer_rows ) - { + if (xx < gc_buffer_columns && yy < gc_buffer_rows) { uint32_t index = (yy * gc_buffer_columns) + xx; unsigned char attribute = gc_buffer_attributes[index]; unsigned char character = gc_buffer_characters[index]; @@ -508,9 +482,7 @@ gc_hide_cursor(unsigned int xx, unsigned int yy) gc_update_color(COLOR_CODE_GET(colorcodesave, TRUE ), TRUE ); gc_update_color(COLOR_CODE_GET(colorcodesave, FALSE), FALSE); - } - else - { + } else { gc_ops.hide_cursor(xx, yy); } } @@ -518,8 +490,7 @@ gc_hide_cursor(unsigned int xx, unsigned int yy) static void gc_initialize(struct vc_info * info) { - if ( gc_initialized == FALSE ) - { + if (gc_initialized == FALSE) { /* Init our lock */ VCPUTC_LOCK_INIT(); @@ -535,8 +506,7 @@ gc_initialize(struct vc_info * info) static void gc_paint_char(unsigned int xx, unsigned int yy, unsigned char ch, int attrs) { - if ( xx < gc_buffer_columns && yy < gc_buffer_rows ) - { + if (xx < gc_buffer_columns && yy < gc_buffer_rows) { uint32_t index = (yy * gc_buffer_columns) + xx; gc_buffer_attributes[index] = attrs; @@ -551,10 +521,12 @@ static void gc_putchar(char ch) { if (!ch) { - return; /* ignore null characters */ + return; /* ignore null characters */ } switch (gc_vt100state) { - default:gc_vt100state = ESnormal; /* FALLTHROUGH */ + default: + gc_vt100state = ESnormal; + OS_FALLTHROUGH; case ESnormal: gc_putc_normal(ch); break; @@ -585,16 +557,18 @@ gc_putchar(char ch) } if (gc_x >= vinfo.v_columns) { - if (0 == vinfo.v_columns) + if (0 == vinfo.v_columns) { gc_x = 0; - else + } else { gc_x = vinfo.v_columns - 1; + } } if (gc_y >= vinfo.v_rows) { - if (0 == vinfo.v_rows) + if (0 == vinfo.v_rows) { gc_y = 0; - else + } else { gc_y = vinfo.v_rows - 1; + } } } @@ -602,22 +576,21 @@ static void gc_putc_askcmd(unsigned char ch) { if (ch >= '0' && ch <= '9') { - gc_par[gc_numpars] = (10*gc_par[gc_numpars]) + (ch-'0'); + gc_par[gc_numpars] = (10 * gc_par[gc_numpars]) + (ch - '0'); return; } gc_vt100state = ESnormal; switch (gc_par[0]) { - case 6: - gc_relative_origin = ch == 'h'; - break; - case 7: /* wrap around mode h=1, l=0*/ - gc_wrap_mode = ch == 'h'; - break; - default: - break; + case 6: + gc_relative_origin = ch == 'h'; + break; + case 7: /* wrap around mode h=1, l=0*/ + gc_wrap_mode = ch == 'h'; + break; + default: + break; } - } static void @@ -626,17 +599,16 @@ gc_putc_charsetcmd(int charset, unsigned char ch) gc_vt100state = ESnormal; switch (ch) { - case 'A' : - case 'B' : - default: - gc_charset[charset] = 0; - break; - case '0' : /* Graphic characters */ - case '2' : - gc_charset[charset] = 0x21; - break; + case 'A': + case 'B': + default: + gc_charset[charset] = 0; + break; + case '0': /* Graphic characters */ + case '2': + gc_charset[charset] = 0x21; + break; } - } static void @@ -645,24 +617,25 @@ gc_putc_charsizecmd(unsigned char ch) gc_vt100state = ESnormal; switch (ch) { - case '3' : - case '4' : - case '5' : - case '6' : - break; - case '8' : /* fill 'E's */ - { - unsigned int xx, yy; - for (yy = 0; yy < vinfo.v_rows; yy++) - for (xx = 0; xx < vinfo.v_columns; xx++) - gc_paint_char(xx, yy, 'E', ATTR_NONE); + case '3': + case '4': + case '5': + case '6': + break; + case '8': /* fill 'E's */ + { + unsigned int xx, yy; + for (yy = 0; yy < vinfo.v_rows; yy++) { + for (xx = 0; xx < vinfo.v_columns; xx++) { + gc_paint_char(xx, yy, 'E', ATTR_NONE); } - break; + } + } + break; } - } -static void +static void gc_putc_esc(unsigned char ch) { gc_vt100state = ESnormal; @@ -671,25 +644,27 @@ gc_putc_esc(unsigned char ch) case '[': gc_vt100state = ESsquare; break; - case 'c': /* Reset terminal */ + case 'c': /* Reset terminal */ gc_reset_vt100(); gc_clear_screen(gc_x, gc_y, 0, vinfo.v_rows, 2); gc_x = gc_y = 0; break; - case 'D': /* Line feed */ + case 'D': /* Line feed */ case 'E': - if (gc_y >= gc_scrreg_bottom -1) { + if (gc_y >= gc_scrreg_bottom - 1) { gc_scroll_up(1, gc_scrreg_top, gc_scrreg_bottom); gc_y = gc_scrreg_bottom - 1; } else { gc_y++; } - if (ch == 'E') gc_x = 0; + if (ch == 'E') { + gc_x = 0; + } break; - case 'H': /* Set tab stop */ + case 'H': /* Set tab stop */ gc_set_tab_stop(gc_x, TRUE); break; - case 'M': /* Cursor up */ + case 'M': /* Cursor up */ if (gc_y <= gc_scrreg_top) { gc_scroll_down(1, gc_scrreg_top, gc_scrreg_bottom); gc_y = gc_scrreg_top; @@ -700,7 +675,7 @@ gc_putc_esc(unsigned char ch) case '>': gc_reset_vt100(); break; - case '7': /* Save cursor */ + case '7': /* Save cursor */ gc_savex = gc_x; gc_savey = gc_y; gc_saveattr = gc_attr; @@ -708,7 +683,7 @@ gc_putc_esc(unsigned char ch) gc_charset_save[0] = gc_charset[0]; gc_charset_save[1] = gc_charset[1]; break; - case '8': /* Restore cursor */ + case '8': /* Restore cursor */ gc_x = gc_savex; gc_y = gc_savey; gc_attr = gc_saveattr; @@ -716,15 +691,15 @@ gc_putc_esc(unsigned char ch) gc_charset[0] = gc_charset_save[0]; gc_charset[1] = gc_charset_save[1]; break; - case 'Z': /* return terminal ID */ + case 'Z': /* return terminal ID */ break; - case '#': /* change characters height */ + case '#': /* change characters height */ gc_vt100state = EScharsize; break; case '(': gc_vt100state = ESsetG0; break; - case ')': /* character set sequence */ + case ')': /* character set sequence */ gc_vt100state = ESsetG1; break; case '=': @@ -733,10 +708,9 @@ gc_putc_esc(unsigned char ch) /* Rest not supported */ break; } - } -static void +static void gc_putc_getpars(unsigned char ch) { if (ch == '?') { @@ -750,98 +724,104 @@ gc_putc_getpars(unsigned char ch) } if (ch == ';' && gc_numpars < MAXPARS - 1) { gc_numpars++; - } else - if (ch >= '0' && ch <= '9') { - gc_par[gc_numpars] *= 10; - gc_par[gc_numpars] += ch - '0'; - } else { - gc_numpars++; - gc_vt100state = ESgotpars; - gc_putc_gotpars(ch); - } + } else if (ch >= '0' && ch <= '9') { + gc_par[gc_numpars] *= 10; + gc_par[gc_numpars] += ch - '0'; + } else { + gc_numpars++; + gc_vt100state = ESgotpars; + gc_putc_gotpars(ch); + } } -static void +static void gc_putc_gotpars(unsigned char ch) { unsigned int i; if (ch < ' ') { /* special case for vttest for handling cursor - movement in escape sequences */ + * movement in escape sequences */ gc_putc_normal(ch); gc_vt100state = ESgotpars; return; } gc_vt100state = ESnormal; switch (ch) { - case 'A': /* Up */ + case 'A': /* Up */ gc_y -= gc_par[0] ? gc_par[0] : 1; - if (gc_y < gc_scrreg_top) + if (gc_y < gc_scrreg_top) { gc_y = gc_scrreg_top; + } break; - case 'B': /* Down */ + case 'B': /* Down */ gc_y += gc_par[0] ? gc_par[0] : 1; - if (gc_y >= gc_scrreg_bottom) + if (gc_y >= gc_scrreg_bottom) { gc_y = gc_scrreg_bottom - 1; + } break; - case 'C': /* Right */ + case 'C': /* Right */ gc_x += gc_par[0] ? gc_par[0] : 1; - if (gc_x >= vinfo.v_columns) - gc_x = vinfo.v_columns-1; + if (gc_x >= vinfo.v_columns) { + gc_x = vinfo.v_columns - 1; + } break; - case 'D': /* Left */ - if (gc_par[0] > gc_x) + case 'D': /* Left */ + if (gc_par[0] > gc_x) { gc_x = 0; - else if (gc_par[0]) + } else if (gc_par[0]) { gc_x -= gc_par[0]; - else if (gc_x) + } else if (gc_x) { --gc_x; + } break; - case 'H': /* Set cursor position */ + case 'H': /* Set cursor position */ case 'f': gc_x = gc_par[1] ? gc_par[1] - 1 : 0; gc_y = gc_par[0] ? gc_par[0] - 1 : 0; - if (gc_relative_origin) + if (gc_relative_origin) { gc_y += gc_scrreg_top; + } gc_hanging_cursor = 0; break; - case 'X': /* clear p1 characters */ + case 'X': /* clear p1 characters */ if (gc_numpars) { - for (i = gc_x; i < gc_x + gc_par[0]; i++) + for (i = gc_x; i < gc_x + gc_par[0]; i++) { gc_paint_char(i, gc_y, ' ', ATTR_NONE); + } } break; - case 'J': /* Clear part of screen */ + case 'J': /* Clear part of screen */ gc_clear_screen(gc_x, gc_y, 0, vinfo.v_rows, gc_par[0]); break; - case 'K': /* Clear part of line */ + case 'K': /* Clear part of line */ gc_clear_line(gc_x, gc_y, gc_par[0]); break; - case 'g': /* tab stops */ + case 'g': /* tab stops */ switch (gc_par[0]) { - case 1: - case 2: /* reset tab stops */ - /* gc_reset_tabs(); */ - break; - case 3: /* Clear every tabs */ - { - for (i = 0; i <= vinfo.v_columns; i++) - gc_set_tab_stop(i, FALSE); - } - break; - case 0: - gc_set_tab_stop(gc_x, FALSE); - break; + case 1: + case 2: /* reset tab stops */ + /* gc_reset_tabs(); */ + break; + case 3: /* Clear every tabs */ + { + for (i = 0; i <= vinfo.v_columns; i++) { + gc_set_tab_stop(i, FALSE); + } + } + break; + case 0: + gc_set_tab_stop(gc_x, FALSE); + break; } break; - case 'm': /* Set attribute */ + case 'm': /* Set attribute */ for (i = 0; i < gc_numpars; i++) { switch (gc_par[i]) { case 0: gc_attr = ATTR_NONE; gc_update_color(COLOR_BACKGROUND, FALSE); - gc_update_color(COLOR_FOREGROUND, TRUE ); + gc_update_color(COLOR_FOREGROUND, TRUE ); break; case 1: gc_attr |= ATTR_BOLD; @@ -862,18 +842,20 @@ gc_putc_gotpars(unsigned char ch) gc_attr &= ~ATTR_REVERSE; break; case 5: - case 25: /* blink/no blink */ + case 25: /* blink/no blink */ break; default: - if (gc_par[i] >= 30 && gc_par[i] <= 37) + if (gc_par[i] >= 30 && gc_par[i] <= 37) { gc_update_color(gc_par[i] - 30, TRUE); - if (gc_par[i] >= 40 && gc_par[i] <= 47) + } + if (gc_par[i] >= 40 && gc_par[i] <= 47) { gc_update_color(gc_par[i] - 40, FALSE); + } break; } } break; - case 'r': /* Set scroll region */ + case 'r': /* Set scroll region */ gc_x = gc_y = 0; /* ensure top < bottom, and both within limits */ if ((gc_numpars > 0) && (gc_par[0] < vinfo.v_rows)) { @@ -883,50 +865,55 @@ gc_putc_gotpars(unsigned char ch) } if ((gc_numpars > 1) && (gc_par[1] <= vinfo.v_rows) && (gc_par[1] > gc_par[0])) { gc_scrreg_bottom = gc_par[1]; - if (gc_scrreg_bottom > vinfo.v_rows) + if (gc_scrreg_bottom > vinfo.v_rows) { gc_scrreg_bottom = vinfo.v_rows; + } } else { gc_scrreg_bottom = vinfo.v_rows; } - if (gc_relative_origin) + if (gc_relative_origin) { gc_y = gc_scrreg_top; + } break; } - } -static void +static void gc_putc_normal(unsigned char ch) { switch (ch) { - case '\a': /* Beep */ - break; - case 127: /* Delete */ - case '\b': /* Backspace */ + case '\a': /* Beep */ + break; + case 127: /* Delete */ + case '\b': /* Backspace */ if (gc_hanging_cursor) { gc_hanging_cursor = 0; - } else - if (gc_x > 0) { - gc_x--; - } + } else if (gc_x > 0) { + gc_x--; + } break; - case '\t': /* Tab */ - if (gc_buffer_tab_stops) while (gc_x < vinfo.v_columns && !gc_is_tab_stop(++gc_x)); + case '\t': /* Tab */ + if (gc_buffer_tab_stops) { + while (gc_x < vinfo.v_columns && !gc_is_tab_stop(++gc_x)) { + ; + } + } - if (gc_x >= vinfo.v_columns) - gc_x = vinfo.v_columns-1; + if (gc_x >= vinfo.v_columns) { + gc_x = vinfo.v_columns - 1; + } break; case 0x0b: case 0x0c: - case '\n': /* Line feed */ - if (gc_y >= gc_scrreg_bottom -1 ) { + case '\n': /* Line feed */ + if (gc_y >= gc_scrreg_bottom - 1) { gc_scroll_up(1, gc_scrreg_top, gc_scrreg_bottom); gc_y = gc_scrreg_bottom - 1; } else { gc_y++; } break; - case '\r': /* Carriage return */ + case '\r': /* Carriage return */ gc_x = 0; gc_hanging_cursor = 0; break; @@ -936,11 +923,11 @@ gc_putc_normal(unsigned char ch) case 0x0f: /* Select G0 charset (Control-O) */ gc_charset_select = 0; break; - case 0x18 : /* CAN : cancel */ - case 0x1A : /* like cancel */ - /* well, i do nothing here, may be later */ + case 0x18: /* CAN : cancel */ + case 0x1A: /* like cancel */ + /* well, i do nothing here, may be later */ break; - case '\033': /* Escape */ + case '\033': /* Escape */ gc_vt100state = ESesc; gc_hanging_cursor = 0; break; @@ -948,7 +935,7 @@ gc_putc_normal(unsigned char ch) if (ch >= ' ') { if (gc_hanging_cursor) { gc_x = 0; - if (gc_y >= gc_scrreg_bottom -1 ) { + if (gc_y >= gc_scrreg_bottom - 1) { gc_scroll_up(1, gc_scrreg_top, gc_scrreg_bottom); gc_y = gc_scrreg_bottom - 1; } else { @@ -957,7 +944,7 @@ gc_putc_normal(unsigned char ch) gc_hanging_cursor = 0; } gc_paint_char(gc_x, gc_y, (ch >= 0x60 && ch <= 0x7f) ? ch + gc_charset[gc_charset_select] - : ch, gc_attr); + : ch, gc_attr); if (gc_x == vinfo.v_columns - 1) { gc_hanging_cursor = gc_wrap_mode; } else { @@ -966,10 +953,9 @@ gc_putc_normal(unsigned char ch) } break; } - } -static void +static void gc_putc_square(unsigned char ch) { int i; @@ -982,7 +968,6 @@ gc_putc_square(unsigned char ch) gc_vt100state = ESgetpars; gc_putc_getpars(ch); - } static void @@ -996,13 +981,14 @@ static void gc_reset_tabs(void) { unsigned int i; - - if (!gc_buffer_tab_stops) return; + + if (!gc_buffer_tab_stops) { + return; + } for (i = 0; i < vinfo.v_columns; i++) { gc_buffer_tab_stops[i] = ((i % 8) == 0); } - } static void @@ -1013,14 +999,17 @@ gc_set_tab_stop(unsigned int column, boolean_t enabled) } } -static boolean_t gc_is_tab_stop(unsigned int column) +static boolean_t +gc_is_tab_stop(unsigned int column) { - if (gc_buffer_tab_stops == NULL) - return ((column % 8) == 0); - if (column < vinfo.v_columns) + if (gc_buffer_tab_stops == NULL) { + return (column % 8) == 0; + } + if (column < vinfo.v_columns) { return gc_buffer_tab_stops[column]; - else + } else { return FALSE; + } } static void @@ -1038,52 +1027,46 @@ gc_reset_vt100(void) gc_update_color(COLOR_FOREGROUND, TRUE); } -static void +static void gc_scroll_down(int num, unsigned int top, unsigned int bottom) { - if (!gc_buffer_size) return; + if (!gc_buffer_size) { + return; + } - if ( bottom <= gc_buffer_rows ) - { + if (bottom <= gc_buffer_rows) { unsigned char colorcodesave = gc_color_code; uint32_t column, row; uint32_t index, jump; jump = num * gc_buffer_columns; - for ( row = bottom - 1 ; row >= top + num ; row-- ) - { + for (row = bottom - 1; row >= top + num; row--) { index = row * gc_buffer_columns; - for ( column = 0 ; column < gc_buffer_columns ; index++, column++ ) - { - if ( gc_buffer_attributes[index] != gc_buffer_attributes[index - jump] || - gc_buffer_characters[index] != gc_buffer_characters[index - jump] || - gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index - jump] ) - { - if ( gc_color_code != gc_buffer_colorcodes[index - jump] ) - { + for (column = 0; column < gc_buffer_columns; index++, column++) { + if (gc_buffer_attributes[index] != gc_buffer_attributes[index - jump] || + gc_buffer_characters[index] != gc_buffer_characters[index - jump] || + gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index - jump]) { + if (gc_color_code != gc_buffer_colorcodes[index - jump]) { gc_update_color(COLOR_CODE_GET(gc_buffer_colorcodes[index - jump], TRUE ), TRUE ); gc_update_color(COLOR_CODE_GET(gc_buffer_colorcodes[index - jump], FALSE), FALSE); } - if ( gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index - jump] ) - { + if (gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index - jump]) { gc_ops.paint_char( /* xx */ column, - /* yy */ row, - /* ch */ gc_buffer_characters[index - jump], - /* attrs */ gc_buffer_attributes[index - jump], - /* ch_previous */ 0, - /* attrs_previous */ 0 ); - } - else - { + /* yy */ row, + /* ch */ gc_buffer_characters[index - jump], + /* attrs */ gc_buffer_attributes[index - jump], + /* ch_previous */ 0, + /* attrs_previous */ 0 ); + } else { gc_ops.paint_char( /* xx */ column, - /* yy */ row, - /* ch */ gc_buffer_characters[index - jump], - /* attrs */ gc_buffer_attributes[index - jump], - /* ch_previous */ gc_buffer_characters[index], - /* attrs_previous */ gc_buffer_attributes[index] ); + /* yy */ row, + /* ch */ gc_buffer_characters[index - jump], + /* attrs */ gc_buffer_attributes[index - jump], + /* ch_previous */ gc_buffer_characters[index], + /* attrs_previous */ gc_buffer_attributes[index] ); } gc_buffer_attributes[index] = gc_buffer_attributes[index - jump]; @@ -1093,41 +1076,34 @@ gc_scroll_down(int num, unsigned int top, unsigned int bottom) } } - if ( colorcodesave != gc_color_code ) - { + if (colorcodesave != gc_color_code) { gc_update_color(COLOR_CODE_GET(colorcodesave, TRUE ), TRUE ); gc_update_color(COLOR_CODE_GET(colorcodesave, FALSE), FALSE); } /* Now set the freed up lines to the background colour */ - for ( row = top ; row < top + num ; row++ ) - { + for (row = top; row < top + num; row++) { index = row * gc_buffer_columns; - for ( column = 0 ; column < gc_buffer_columns ; index++, column++ ) - { - if ( gc_buffer_attributes[index] != ATTR_NONE || - gc_buffer_characters[index] != ' ' || - gc_buffer_colorcodes[index] != gc_color_code ) - { - if ( gc_buffer_colorcodes[index] != gc_color_code ) - { + for (column = 0; column < gc_buffer_columns; index++, column++) { + if (gc_buffer_attributes[index] != ATTR_NONE || + gc_buffer_characters[index] != ' ' || + gc_buffer_colorcodes[index] != gc_color_code) { + if (gc_buffer_colorcodes[index] != gc_color_code) { gc_ops.paint_char( /* xx */ column, - /* yy */ row, - /* ch */ ' ', - /* attrs */ ATTR_NONE, - /* ch_previous */ 0, - /* attrs_previous */ 0 ); - } - else - { + /* yy */ row, + /* ch */ ' ', + /* attrs */ ATTR_NONE, + /* ch_previous */ 0, + /* attrs_previous */ 0 ); + } else { gc_ops.paint_char( /* xx */ column, - /* yy */ row, - /* ch */ ' ', - /* attrs */ ATTR_NONE, - /* ch_previous */ gc_buffer_characters[index], - /* attrs_previous */ gc_buffer_attributes[index] ); + /* yy */ row, + /* ch */ ' ', + /* attrs */ ATTR_NONE, + /* ch_previous */ gc_buffer_characters[index], + /* attrs_previous */ gc_buffer_attributes[index] ); } gc_buffer_attributes[index] = ATTR_NONE; @@ -1136,9 +1112,7 @@ gc_scroll_down(int num, unsigned int top, unsigned int bottom) } } } - } - else - { + } else { gc_ops.scroll_down(num, top, bottom); /* Now set the freed up lines to the background colour */ @@ -1147,52 +1121,46 @@ gc_scroll_down(int num, unsigned int top, unsigned int bottom) } } -static void +static void gc_scroll_up(int num, unsigned int top, unsigned int bottom) { - if (!gc_buffer_size) return; + if (!gc_buffer_size) { + return; + } - if ( bottom <= gc_buffer_rows ) - { + if (bottom <= gc_buffer_rows) { unsigned char colorcodesave = gc_color_code; uint32_t column, row; uint32_t index, jump; jump = num * gc_buffer_columns; - for ( row = top ; row < bottom - num ; row++ ) - { + for (row = top; row < bottom - num; row++) { index = row * gc_buffer_columns; - for ( column = 0 ; column < gc_buffer_columns ; index++, column++ ) - { - if ( gc_buffer_attributes[index] != gc_buffer_attributes[index + jump] || - gc_buffer_characters[index] != gc_buffer_characters[index + jump] || - gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index + jump] ) - { - if ( gc_color_code != gc_buffer_colorcodes[index + jump] ) - { + for (column = 0; column < gc_buffer_columns; index++, column++) { + if (gc_buffer_attributes[index] != gc_buffer_attributes[index + jump] || + gc_buffer_characters[index] != gc_buffer_characters[index + jump] || + gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index + jump]) { + if (gc_color_code != gc_buffer_colorcodes[index + jump]) { gc_update_color(COLOR_CODE_GET(gc_buffer_colorcodes[index + jump], TRUE ), TRUE ); gc_update_color(COLOR_CODE_GET(gc_buffer_colorcodes[index + jump], FALSE), FALSE); } - if ( gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index + jump] ) - { + if (gc_buffer_colorcodes[index] != gc_buffer_colorcodes[index + jump]) { gc_ops.paint_char( /* xx */ column, - /* yy */ row, - /* ch */ gc_buffer_characters[index + jump], - /* attrs */ gc_buffer_attributes[index + jump], - /* ch_previous */ 0, - /* attrs_previous */ 0 ); - } - else - { + /* yy */ row, + /* ch */ gc_buffer_characters[index + jump], + /* attrs */ gc_buffer_attributes[index + jump], + /* ch_previous */ 0, + /* attrs_previous */ 0 ); + } else { gc_ops.paint_char( /* xx */ column, - /* yy */ row, - /* ch */ gc_buffer_characters[index + jump], - /* attrs */ gc_buffer_attributes[index + jump], - /* ch_previous */ gc_buffer_characters[index], - /* attrs_previous */ gc_buffer_attributes[index] ); + /* yy */ row, + /* ch */ gc_buffer_characters[index + jump], + /* attrs */ gc_buffer_attributes[index + jump], + /* ch_previous */ gc_buffer_characters[index], + /* attrs_previous */ gc_buffer_attributes[index] ); } gc_buffer_attributes[index] = gc_buffer_attributes[index + jump]; @@ -1202,41 +1170,34 @@ gc_scroll_up(int num, unsigned int top, unsigned int bottom) } } - if ( colorcodesave != gc_color_code ) - { + if (colorcodesave != gc_color_code) { gc_update_color(COLOR_CODE_GET(colorcodesave, TRUE ), TRUE ); gc_update_color(COLOR_CODE_GET(colorcodesave, FALSE), FALSE); } /* Now set the freed up lines to the background colour */ - for ( row = bottom - num ; row < bottom ; row++ ) - { + for (row = bottom - num; row < bottom; row++) { index = row * gc_buffer_columns; - for ( column = 0 ; column < gc_buffer_columns ; index++, column++ ) - { - if ( gc_buffer_attributes[index] != ATTR_NONE || - gc_buffer_characters[index] != ' ' || - gc_buffer_colorcodes[index] != gc_color_code ) - { - if ( gc_buffer_colorcodes[index] != gc_color_code ) - { + for (column = 0; column < gc_buffer_columns; index++, column++) { + if (gc_buffer_attributes[index] != ATTR_NONE || + gc_buffer_characters[index] != ' ' || + gc_buffer_colorcodes[index] != gc_color_code) { + if (gc_buffer_colorcodes[index] != gc_color_code) { gc_ops.paint_char( /* xx */ column, - /* yy */ row, - /* ch */ ' ', - /* attrs */ ATTR_NONE, - /* ch_previous */ 0, - /* attrs_previous */ 0 ); - } - else - { + /* yy */ row, + /* ch */ ' ', + /* attrs */ ATTR_NONE, + /* ch_previous */ 0, + /* attrs_previous */ 0 ); + } else { gc_ops.paint_char( /* xx */ column, - /* yy */ row, - /* ch */ ' ', - /* attrs */ ATTR_NONE, - /* ch_previous */ gc_buffer_characters[index], - /* attrs_previous */ gc_buffer_attributes[index] ); + /* yy */ row, + /* ch */ ' ', + /* attrs */ ATTR_NONE, + /* ch_previous */ gc_buffer_characters[index], + /* attrs_previous */ gc_buffer_attributes[index] ); } gc_buffer_attributes[index] = ATTR_NONE; @@ -1245,9 +1206,7 @@ gc_scroll_up(int num, unsigned int top, unsigned int bottom) } } } - } - else - { + } else { gc_ops.scroll_up(num, top, bottom); /* Now set the freed up lines to the background colour */ @@ -1259,8 +1218,7 @@ gc_scroll_up(int num, unsigned int top, unsigned int bottom) static void gc_show_cursor(unsigned int xx, unsigned int yy) { - if ( xx < gc_buffer_columns && yy < gc_buffer_rows ) - { + if (xx < gc_buffer_columns && yy < gc_buffer_rows) { uint32_t index = (yy * gc_buffer_columns) + xx; unsigned char attribute = gc_buffer_attributes[index]; unsigned char character = gc_buffer_characters[index]; @@ -1274,9 +1232,7 @@ gc_show_cursor(unsigned int xx, unsigned int yy) gc_update_color(COLOR_CODE_GET(colorcodesave, TRUE ), TRUE ); gc_update_color(COLOR_CODE_GET(colorcodesave, FALSE), FALSE); - } - else - { + } else { gc_ops.show_cursor(xx, yy); } } @@ -1293,26 +1249,14 @@ gc_update_color(int color, boolean_t fore) void vcputc(__unused int l, __unused int u, int c) { - if ( gc_initialized && gc_enabled ) - { - spl_t s; - - s = splhigh(); -#if defined(__i386__) || defined(__x86_64__) - x86_filter_TLB_coherency_interrupts(TRUE); -#endif + if (gc_initialized && gc_enabled) { VCPUTC_LOCK_LOCK(); - if ( gc_enabled ) - { + if (gc_enabled) { gc_hide_cursor(gc_x, gc_y); gc_putchar(c); gc_show_cursor(gc_x, gc_y); } VCPUTC_LOCK_UNLOCK(); -#if defined(__i386__) || defined(__x86_64__) - x86_filter_TLB_coherency_interrupts(FALSE); -#endif - splx(s); } } @@ -1320,30 +1264,30 @@ vcputc(__unused int l, __unused int u, int c) * Video Console (Back-End) * ------------------------ */ - + /* * For the color support (Michel Pollet) */ -static unsigned char vc_color_index_table[33] = - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 2 }; +static unsigned char vc_color_index_table[33] = +{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 2 }; static uint32_t vc_colors[8][4] = { - { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 }, /* black */ - { 0x23232323, 0x7C007C00, 0x00FF0000, 0x3FF00000 }, /* red */ - { 0xb9b9b9b9, 0x03e003e0, 0x0000FF00, 0x000FFC00 }, /* green */ - { 0x05050505, 0x7FE07FE0, 0x00FFFF00, 0x3FFFFC00 }, /* yellow */ - { 0xd2d2d2d2, 0x001f001f, 0x000000FF, 0x000003FF }, /* blue */ + { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 }, /* black */ + { 0x23232323, 0x7C007C00, 0x00FF0000, 0x3FF00000 }, /* red */ + { 0xb9b9b9b9, 0x03e003e0, 0x0000FF00, 0x000FFC00 }, /* green */ + { 0x05050505, 0x7FE07FE0, 0x00FFFF00, 0x3FFFFC00 }, /* yellow */ + { 0xd2d2d2d2, 0x001f001f, 0x000000FF, 0x000003FF }, /* blue */ // { 0x80808080, 0x31933193, 0x00666699, 0x00000000 }, /* blue */ - { 0x18181818, 0x7C1F7C1F, 0x00FF00FF, 0x3FF003FF }, /* magenta */ - { 0xb4b4b4b4, 0x03FF03FF, 0x0000FFFF, 0x000FFFFF }, /* cyan */ - { 0x00000000, 0x7FFF7FFF, 0x00FFFFFF, 0x3FFFFFFF } /* white */ + { 0x18181818, 0x7C1F7C1F, 0x00FF00FF, 0x3FF003FF }, /* magenta */ + { 0xb4b4b4b4, 0x03FF03FF, 0x0000FFFF, 0x000FFFFF }, /* cyan */ + { 0x00000000, 0x7FFF7FFF, 0x00FFFFFF, 0x3FFFFFFF } /* white */ }; static uint32_t vc_color_fore = 0; static uint32_t vc_color_back = 0; -/* +/* * New Rendering code from Michel Pollet */ @@ -1356,48 +1300,51 @@ static uint32_t vc_rendered_font_size = 0; /* Size of a character in the table (bytes) */ static int vc_rendered_char_size = 0; -#define REN_MAX_DEPTH 32 +#define REN_MAX_DEPTH 32 static unsigned char vc_rendered_char[ISO_CHAR_HEIGHT * ((REN_MAX_DEPTH / 8) * ISO_CHAR_WIDTH)]; -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) +#define CONFIG_VC_PROGRESS_METER_SUPPORT 1 +#endif /* XNU_TARGET_OS_OSX */ + +#if defined(XNU_TARGET_OS_OSX) static void internal_set_progressmeter(int new_value); static void internal_enable_progressmeter(int new_value); -enum -{ - kProgressMeterOff = FALSE, - kProgressMeterUser = TRUE, - kProgressMeterKernel = 3, +enum{ + kProgressMeterOff = FALSE, + kProgressMeterUser = TRUE, + kProgressMeterKernel = 3, }; -enum -{ - kProgressMeterMax = 1024, - kProgressMeterEnd = 512, +enum{ + kProgressMeterMax = 1024, + kProgressMeterEnd = 512, }; -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ -static boolean_t vc_progress_white = +static boolean_t vc_progress_white = #ifdef CONFIG_VC_PROGRESS_WHITE - TRUE; + TRUE; #else /* !CONFIG_VC_PROGRESS_WHITE */ - FALSE; + FALSE; #endif /* !CONFIG_VC_PROGRESS_WHITE */ static int vc_acquire_delay = kProgressAcquireDelay; -static void +static void vc_clear_screen(unsigned int xx, unsigned int yy, unsigned int scrreg_top, - unsigned int scrreg_bottom, int which) + unsigned int scrreg_bottom, int which) { uint32_t *p, *endp, *row; int linelongs, col; int rowline, rowlongs; - if(!vinfo.v_depth) + if (!vinfo.v_depth) { return; + } linelongs = vinfo.v_rowbytes * (ISO_CHAR_HEIGHT >> 2); rowline = vinfo.v_rowscanbytes >> 2; @@ -1407,21 +1354,21 @@ vc_clear_screen(unsigned int xx, unsigned int yy, unsigned int scrreg_top, endp = (uint32_t*) vinfo.v_baseaddr; switch (which) { - case 0: /* To end of screen */ + case 0: /* To end of screen */ gc_clear_line(xx, yy, 0); if (yy < scrreg_bottom - 1) { p += (yy + 1) * linelongs; endp += scrreg_bottom * linelongs; } break; - case 1: /* To start of screen */ + case 1: /* To start of screen */ gc_clear_line(xx, yy, 1); if (yy > scrreg_top) { p += scrreg_top * linelongs; endp += yy * linelongs; } break; - case 2: /* Whole screen */ + case 2: /* Whole screen */ p += scrreg_top * linelongs; if (scrreg_bottom == vinfo.v_rows) { endp += rowlongs * vinfo.v_height; @@ -1431,9 +1378,10 @@ vc_clear_screen(unsigned int xx, unsigned int yy, unsigned int scrreg_top, break; } - for (row = p ; row < endp ; row += rowlongs) { - for (col = 0; col < rowline; col++) - *(row+col) = vc_color_back; + for (row = p; row < endp; row += rowlongs) { + for (col = 0; col < rowline; col++) { + *(row + col) = vc_color_back; + } } } @@ -1444,8 +1392,8 @@ vc_render_char(unsigned char ch, unsigned char *renderptr, short newdepth) unsigned char *charptr; unsigned short *shortptr; uint32_t *longptr; - } current; /* current place in rendered font, multiple types. */ - unsigned char *theChar; /* current char in iso_font */ + } current; /* current place in rendered font, multiple types. */ + unsigned char *theChar; /* current char in iso_font */ int line; current.charptr = renderptr; @@ -1455,142 +1403,162 @@ vc_render_char(unsigned char ch, unsigned char *renderptr, short newdepth) unsigned char mask = 1; do { switch (newdepth) { - case 8: + case 8: *current.charptr++ = (*theChar & mask) ? 0xFF : 0; break; case 16: *current.shortptr++ = (*theChar & mask) ? 0xFFFF : 0; break; - case 30: - case 32: + case 30: + case 32: *current.longptr++ = (*theChar & mask) ? 0xFFFFFFFF : 0; break; } mask <<= 1; - } while (mask); /* while the single bit drops to the right */ + } while (mask); /* while the single bit drops to the right */ theChar++; } } static void vc_paint_char_8(unsigned int xx, unsigned int yy, unsigned char ch, int attrs, - __unused unsigned char ch_previous, __unused int attrs_previous) + __unused unsigned char ch_previous, __unused int attrs_previous) { uint32_t *theChar; uint32_t *where; int i; - + if (vc_rendered_font) { theChar = (uint32_t*)(vc_rendered_font + (ch * vc_rendered_char_size)); } else { vc_render_char(ch, vc_rendered_char, 8); theChar = (uint32_t*)(vc_rendered_char); } - where = (uint32_t*)(vinfo.v_baseaddr + - (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + - (xx * ISO_CHAR_WIDTH)); - - if (!attrs) for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* No attr? FLY !*/ - uint32_t *store = where; - int x; - for (x = 0; x < 2; x++) { - uint32_t val = *theChar++; - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - } - - where = (uint32_t*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* a little slower */ - uint32_t *store = where, lastpixel = 0; - int x; - for (x = 0 ; x < 2; x++) { - uint32_t val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - if (lastpixel && !(save & 0xFF000000)) - val |= 0xff000000; - if ((save & 0xFFFF0000) == 0xFF000000) - val |= 0x00FF0000; - if ((save & 0x00FFFF00) == 0x00FF0000) - val |= 0x0000FF00; - if ((save & 0x0000FFFF) == 0x0000FF00) - val |= 0x000000FF; + where = (uint32_t*)(vinfo.v_baseaddr + + (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + + (xx * ISO_CHAR_WIDTH)); + + if (!attrs) { + for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* No attr? FLY !*/ + uint32_t *store = where; + int x; + for (x = 0; x < 2; x++) { + uint32_t val = *theChar++; + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == ISO_CHAR_HEIGHT-1) val = ~val; - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - lastpixel = save & 0xff; + where = (uint32_t*)(((unsigned char*)where) + vinfo.v_rowbytes); } - - where = (uint32_t*)(((unsigned char*)where)+vinfo.v_rowbytes); - } + } else { + for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* a little slower */ + uint32_t *store = where, lastpixel = 0; + int x; + for (x = 0; x < 2; x++) { + uint32_t val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (lastpixel && !(save & 0xFF000000)) { + val |= 0xff000000; + } + if ((save & 0xFFFF0000) == 0xFF000000) { + val |= 0x00FF0000; + } + if ((save & 0x00FFFF00) == 0x00FF0000) { + val |= 0x0000FF00; + } + if ((save & 0x0000FFFF) == 0x0000FF00) { + val |= 0x000000FF; + } + } + if (attrs & ATTR_REVERSE) { + val = ~val; + } + if (attrs & ATTR_UNDER && i == ISO_CHAR_HEIGHT - 1) { + val = ~val; + } + + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + lastpixel = save & 0xff; + } + where = (uint32_t*)(((unsigned char*)where) + vinfo.v_rowbytes); + } + } } static void vc_paint_char_16(unsigned int xx, unsigned int yy, unsigned char ch, int attrs, - __unused unsigned char ch_previous, - __unused int attrs_previous) + __unused unsigned char ch_previous, + __unused int attrs_previous) { uint32_t *theChar; uint32_t *where; int i; - + if (vc_rendered_font) { theChar = (uint32_t*)(vc_rendered_font + (ch * vc_rendered_char_size)); } else { vc_render_char(ch, vc_rendered_char, 16); theChar = (uint32_t*)(vc_rendered_char); } - where = (uint32_t*)(vinfo.v_baseaddr + - (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + - (xx * ISO_CHAR_WIDTH * 2)); - - if (!attrs) for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* No attrs ? FLY ! */ - uint32_t *store = where; - int x; - for (x = 0; x < 4; x++) { - uint32_t val = *theChar++; - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - } - - where = (uint32_t*)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* a little bit slower */ - uint32_t *store = where, lastpixel = 0; - int x; - for (x = 0 ; x < 4; x++) { - uint32_t val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - if (save == 0xFFFF0000) val |= 0xFFFF; - else if (lastpixel && !(save & 0xFFFF0000)) - val |= 0xFFFF0000; + where = (uint32_t*)(vinfo.v_baseaddr + + (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + + (xx * ISO_CHAR_WIDTH * 2)); + + if (!attrs) { + for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* No attrs ? FLY ! */ + uint32_t *store = where; + int x; + for (x = 0; x < 4; x++) { + uint32_t val = *theChar++; + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == ISO_CHAR_HEIGHT-1) val = ~val; - val = (vc_color_back & ~val) | (vc_color_fore & val); + where = (uint32_t*)(((unsigned char*)where) + vinfo.v_rowbytes); + } + } else { + for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* a little bit slower */ + uint32_t *store = where, lastpixel = 0; + int x; + for (x = 0; x < 4; x++) { + uint32_t val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (save == 0xFFFF0000) { + val |= 0xFFFF; + } else if (lastpixel && !(save & 0xFFFF0000)) { + val |= 0xFFFF0000; + } + } + if (attrs & ATTR_REVERSE) { + val = ~val; + } + if (attrs & ATTR_UNDER && i == ISO_CHAR_HEIGHT - 1) { + val = ~val; + } + + val = (vc_color_back & ~val) | (vc_color_fore & val); + + *store++ = val; + lastpixel = save & 0x7fff; + } - *store++ = val; - lastpixel = save & 0x7fff; + where = (uint32_t*)(((unsigned char*)where) + vinfo.v_rowbytes); } - - where = (uint32_t*)(((unsigned char*)where)+vinfo.v_rowbytes); } - } static void vc_paint_char_32(unsigned int xx, unsigned int yy, unsigned char ch, int attrs, - unsigned char ch_previous, int attrs_previous) + unsigned char ch_previous, int attrs_previous) { uint32_t *theChar; uint32_t *theCharPrevious; uint32_t *where; int i; - + if (vc_rendered_font) { theChar = (uint32_t*)(vc_rendered_font + (ch * vc_rendered_char_size)); theCharPrevious = (uint32_t*)(vc_rendered_font + (ch_previous * vc_rendered_char_size)); @@ -1605,65 +1573,74 @@ vc_paint_char_32(unsigned int xx, unsigned int yy, unsigned char ch, int attrs, if (attrs_previous) { theCharPrevious = NULL; } - where = (uint32_t*)(vinfo.v_baseaddr + - (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + - (xx * ISO_CHAR_WIDTH * 4)); + where = (uint32_t*)(vinfo.v_baseaddr + + (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + + (xx * ISO_CHAR_WIDTH * 4)); + + if (!attrs) { + for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* No attrs ? FLY ! */ + uint32_t *store = where; + int x; + for (x = 0; x < 8; x++) { + uint32_t val = *theChar++; + if (theCharPrevious == NULL || val != *theCharPrevious++) { + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + } else { + store++; + } + } + + where = (uint32_t *)(((unsigned char*)where) + vinfo.v_rowbytes); + } + } else { + for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* a little slower */ + uint32_t *store = where, lastpixel = 0; + int x; + for (x = 0; x < 8; x++) { + uint32_t val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (lastpixel && !save) { + val = 0xFFFFFFFF; + } + } + if (attrs & ATTR_REVERSE) { + val = ~val; + } + if (attrs & ATTR_UNDER && i == ISO_CHAR_HEIGHT - 1) { + val = ~val; + } - if (!attrs) for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* No attrs ? FLY ! */ - uint32_t *store = where; - int x; - for (x = 0; x < 8; x++) { - uint32_t val = *theChar++; - if (theCharPrevious == NULL || val != *theCharPrevious++ ) { val = (vc_color_back & ~val) | (vc_color_fore & val); *store++ = val; - } else { - store++; - } - } - - where = (uint32_t *)(((unsigned char*)where)+vinfo.v_rowbytes); - } else for (i = 0; i < ISO_CHAR_HEIGHT; i++) { /* a little slower */ - uint32_t *store = where, lastpixel = 0; - int x; - for (x = 0 ; x < 8; x++) { - uint32_t val = *theChar++, save = val; - if (attrs & ATTR_BOLD) { /* bold support */ - if (lastpixel && !save) - val = 0xFFFFFFFF; + lastpixel = save; } - if (attrs & ATTR_REVERSE) val = ~val; - if (attrs & ATTR_UNDER && i == ISO_CHAR_HEIGHT-1) val = ~val; - val = (vc_color_back & ~val) | (vc_color_fore & val); - *store++ = val; - lastpixel = save; + where = (uint32_t*)(((unsigned char*)where) + vinfo.v_rowbytes); } - - where = (uint32_t*)(((unsigned char*)where)+vinfo.v_rowbytes); } - } static void vc_paint_char(unsigned int xx, unsigned int yy, unsigned char ch, int attrs, - unsigned char ch_previous, int attrs_previous) + unsigned char ch_previous, int attrs_previous) { - if(!vinfo.v_depth) + if (!vinfo.v_depth) { return; + } - switch(vinfo.v_depth) { + switch (vinfo.v_depth) { case 8: vc_paint_char_8(xx, yy, ch, attrs, ch_previous, attrs_previous); break; case 16: vc_paint_char_16(xx, yy, ch, attrs, ch_previous, - attrs_previous); + attrs_previous); break; case 30: case 32: vc_paint_char_32(xx, yy, ch, attrs, ch_previous, - attrs_previous); + attrs_previous); break; } } @@ -1673,17 +1650,17 @@ vc_render_font(short newdepth) { static short olddepth = 0; - int charindex; /* index in ISO font */ + int charindex; /* index in ISO font */ unsigned char *rendered_font; unsigned int rendered_font_size; int rendered_char_size; spl_t s; if (vm_initialized == FALSE) { - return; /* nothing to do */ + return; /* nothing to do */ } if (olddepth == newdepth && vc_rendered_font) { - return; /* nothing to do */ + return; /* nothing to do */ } s = splhigh(); @@ -1701,14 +1678,14 @@ vc_render_font(short newdepth) splx(s); if (rendered_font) { - kfree(rendered_font, rendered_font_size); + kheap_free(KHEAP_DATA_BUFFERS, rendered_font, rendered_font_size); rendered_font = NULL; } if (newdepth) { rendered_char_size = ISO_CHAR_HEIGHT * (((newdepth + 7) / 8) * ISO_CHAR_WIDTH); - rendered_font_size = (ISO_CHAR_MAX-ISO_CHAR_MIN+1) * rendered_char_size; - rendered_font = (unsigned char *) kalloc(rendered_font_size); + rendered_font_size = (ISO_CHAR_MAX - ISO_CHAR_MIN + 1) * rendered_char_size; + rendered_font = kheap_alloc(KHEAP_DATA_BUFFERS, rendered_font_size, Z_WAITOK); } if (rendered_font == NULL) { @@ -1744,46 +1721,50 @@ vc_reverse_cursor(unsigned int xx, unsigned int yy) uint32_t *where; int line, col; - if(!vinfo.v_depth) + if (!vinfo.v_depth) { return; + } - where = (uint32_t*)(vinfo.v_baseaddr + - (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + - (xx /** ISO_CHAR_WIDTH*/ * vinfo.v_depth)); + where = (uint32_t*)(vinfo.v_baseaddr + + (yy * ISO_CHAR_HEIGHT * vinfo.v_rowbytes) + + (xx /** ISO_CHAR_WIDTH*/ * vinfo.v_depth)); for (line = 0; line < ISO_CHAR_HEIGHT; line++) { switch (vinfo.v_depth) { - case 8: - where[0] = ~where[0]; - where[1] = ~where[1]; - break; - case 16: - for (col = 0; col < 4; col++) - where[col] = ~where[col]; - break; - case 32: - for (col = 0; col < 8; col++) - where[col] = ~where[col]; - break; + case 8: + where[0] = ~where[0]; + where[1] = ~where[1]; + break; + case 16: + for (col = 0; col < 4; col++) { + where[col] = ~where[col]; + } + break; + case 32: + for (col = 0; col < 8; col++) { + where[col] = ~where[col]; + } + break; } - where = (uint32_t*)(((unsigned char*)where)+vinfo.v_rowbytes); + where = (uint32_t*)(((unsigned char*)where) + vinfo.v_rowbytes); } } -static void +static void vc_scroll_down(int num, unsigned int scrreg_top, unsigned int scrreg_bottom) { - uint32_t *from, *to, linelongs, i, line, rowline, rowscanline; + uint32_t *from, *to, linelongs, i, line, rowline, rowscanline; - if(!vinfo.v_depth) + if (!vinfo.v_depth) { return; + } linelongs = vinfo.v_rowbytes * (ISO_CHAR_HEIGHT >> 2); rowline = vinfo.v_rowbytes >> 2; rowscanline = vinfo.v_rowscanbytes >> 2; to = (uint32_t *) vinfo.v_baseaddr + (linelongs * scrreg_bottom) - - (rowline - rowscanline); - from = to - (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ + - (rowline - rowscanline); + from = to - (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ i = (scrreg_bottom - scrreg_top) - num; @@ -1792,9 +1773,9 @@ vc_scroll_down(int num, unsigned int scrreg_top, unsigned int scrreg_bottom) /* * Only copy what is displayed */ - video_scroll_down(from, - (from-(vinfo.v_rowscanbytes >> 2)), - to); + video_scroll_down(from, + (from - (vinfo.v_rowscanbytes >> 2)), + to); from -= rowline; to -= rowline; @@ -1802,20 +1783,21 @@ vc_scroll_down(int num, unsigned int scrreg_top, unsigned int scrreg_bottom) } } -static void +static void vc_scroll_up(int num, unsigned int scrreg_top, unsigned int scrreg_bottom) { uint32_t *from, *to, linelongs, i, line, rowline, rowscanline; - if(!vinfo.v_depth) + if (!vinfo.v_depth) { return; + } linelongs = vinfo.v_rowbytes * (ISO_CHAR_HEIGHT >> 2); rowline = vinfo.v_rowbytes >> 2; rowscanline = vinfo.v_rowscanbytes >> 2; to = (uint32_t *) vinfo.v_baseaddr + (scrreg_top * linelongs); - from = to + (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ + from = to + (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ i = (scrreg_bottom - scrreg_top) - num; @@ -1824,9 +1806,9 @@ vc_scroll_up(int num, unsigned int scrreg_top, unsigned int scrreg_bottom) /* * Only copy what is displayed */ - video_scroll_up(from, - (from+(vinfo.v_rowscanbytes >> 2)), - to); + video_scroll_up(from, + (from + (vinfo.v_rowscanbytes >> 2)), + to); from += rowline; to += rowline; @@ -1837,10 +1819,11 @@ vc_scroll_up(int num, unsigned int scrreg_top, unsigned int scrreg_bottom) static void vc_update_color(int color, boolean_t fore) { - if (!vinfo.v_depth) + if (!vinfo.v_depth) { return; + } if (fore) { - vc_color_fore = vc_colors[color][vc_color_index_table[vinfo.v_depth]]; + vc_color_fore = vc_colors[color][vc_color_index_table[vinfo.v_depth]]; } else { vc_color_back = vc_colors[color][vc_color_index_table[vinfo.v_depth]]; } @@ -1851,405 +1834,443 @@ vc_update_color(int color, boolean_t fore) * -------------------------------------- */ -static vc_progress_element * vc_progress; +static vc_progress_element * vc_progress; enum { kMaxProgressData = 3 }; static const unsigned char * vc_progress_data[kMaxProgressData]; static const unsigned char * vc_progress_alpha; -static boolean_t vc_progress_enable; +static boolean_t vc_progress_enable; static const unsigned char * vc_clut; static const unsigned char * vc_clut8; static unsigned char vc_revclut8[256]; -static uint32_t vc_progress_interval; -static uint32_t vc_progress_count; -static uint32_t vc_progress_angle; -static uint64_t vc_progress_deadline; -static thread_call_data_t vc_progress_call; -static boolean_t vc_needsave; -static void * vc_saveunder; -static vm_size_t vc_saveunder_len; -static int8_t vc_uiscale = 1; +static uint32_t vc_progress_interval; +static uint32_t vc_progress_count; +static uint32_t vc_progress_angle; +static uint64_t vc_progress_deadline; +static thread_call_data_t vc_progress_call; +static boolean_t vc_needsave; +static void * vc_saveunder; +static vm_size_t vc_saveunder_len; +static int8_t vc_uiscale = 1; vc_progress_user_options vc_progress_options; vc_progress_user_options vc_user_options; -decl_simple_lock_data(,vc_progress_lock); +decl_simple_lock_data(, vc_progress_lock); -#if !CONFIG_EMBEDDED -static int vc_progress_withmeter = 3; +#if defined(XNU_TARGET_OS_OSX) +static int vc_progress_withmeter = 3; int vc_progressmeter_enable; static int vc_progressmeter_drawn; -int vc_progressmeter_value; -static uint32_t vc_progressmeter_count; -static uint32_t vc_progress_meter_start; -static uint32_t vc_progress_meter_end; -static uint64_t vc_progressmeter_interval; -static uint64_t vc_progressmeter_deadline; -static thread_call_data_t vc_progressmeter_call; +int vc_progressmeter_value; +static uint32_t vc_progressmeter_count; +static uint32_t vc_progress_meter_start; +static uint32_t vc_progress_meter_end; +static uint64_t vc_progressmeter_interval; +static uint64_t vc_progressmeter_deadline; +static thread_call_data_t vc_progressmeter_call; static void * vc_progressmeter_backbuffer; -static boolean_t vc_progressmeter_hold; static uint32_t vc_progressmeter_diskspeed = 256; -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ enum { - kSave = 0x10, - kDataIndexed = 0x20, - kDataAlpha = 0x40, - kDataBack = 0x80, - kDataRotate = 0x03, + kSave = 0x10, + kDataIndexed = 0x20, + kDataAlpha = 0x40, + kDataBack = 0x80, + kDataRotate = 0x03, }; static void vc_blit_rect(int x, int y, int bx, - int width, int height, - int sourceWidth, int sourceHeight, - int sourceRow, int backRow, - const unsigned char * dataPtr, - void * backBuffer, - unsigned int flags); + int width, int height, + int sourceWidth, int sourceHeight, + int sourceRow, int backRow, + const unsigned char * dataPtr, + void * backBuffer, + unsigned int flags); static void vc_blit_rect_8(int x, int y, int bx, - int width, int height, - int sourceWidth, int sourceHeight, - int sourceRow, int backRow, - const unsigned char * dataPtr, - unsigned char * backBuffer, - unsigned int flags); + int width, int height, + int sourceWidth, int sourceHeight, + int sourceRow, int backRow, + const unsigned char * dataPtr, + unsigned char * backBuffer, + unsigned int flags); static void vc_blit_rect_16(int x, int y, int bx, - int width, int height, - int sourceWidth, int sourceHeight, - int sourceRow, int backRow, - const unsigned char * dataPtr, - unsigned short * backBuffer, - unsigned int flags); + int width, int height, + int sourceWidth, int sourceHeight, + int sourceRow, int backRow, + const unsigned char * dataPtr, + unsigned short * backBuffer, + unsigned int flags); static void vc_blit_rect_32(int x, int y, int bx, - int width, int height, - int sourceWidth, int sourceHeight, - int sourceRow, int backRow, - const unsigned char * dataPtr, - unsigned int * backBuffer, - unsigned int flags); + int width, int height, + int sourceWidth, int sourceHeight, + int sourceRow, int backRow, + const unsigned char * dataPtr, + unsigned int * backBuffer, + unsigned int flags); static void vc_blit_rect_30(int x, int y, int bx, - int width, int height, - int sourceWidth, int sourceHeight, - int sourceRow, int backRow, - const unsigned char * dataPtr, - unsigned int * backBuffer, - unsigned int flags); + int width, int height, + int sourceWidth, int sourceHeight, + int sourceRow, int backRow, + const unsigned char * dataPtr, + unsigned int * backBuffer, + unsigned int flags); static void vc_progress_task( void * arg0, void * arg ); -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) static void vc_progressmeter_task( void * arg0, void * arg ); -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ -static void vc_blit_rect(int x, int y, int bx, - int width, int height, - int sourceWidth, int sourceHeight, - int sourceRow, int backRow, - const unsigned char * dataPtr, - void * backBuffer, - unsigned int flags) -{ - if (!vinfo.v_depth) return; - if (((unsigned int)(x + width)) > vinfo.v_width) return; - if (((unsigned int)(y + height)) > vinfo.v_height) return; +static void +vc_blit_rect(int x, int y, int bx, + int width, int height, + int sourceWidth, int sourceHeight, + int sourceRow, int backRow, + const unsigned char * dataPtr, + void * backBuffer, + unsigned int flags) +{ + if (!vinfo.v_depth) { + return; + } + if (((unsigned int)(x + width)) > vinfo.v_width) { + return; + } + if (((unsigned int)(y + height)) > vinfo.v_height) { + return; + } - switch( vinfo.v_depth) { + switch (vinfo.v_depth) { case 8: - if( vc_clut8 == vc_clut) - vc_blit_rect_8( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned char *) backBuffer, flags ); - break; + if (vc_clut8 == vc_clut) { + vc_blit_rect_8( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned char *) backBuffer, flags ); + } + break; case 16: - vc_blit_rect_16( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned short *) backBuffer, flags ); - break; + vc_blit_rect_16( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned short *) backBuffer, flags ); + break; case 32: - vc_blit_rect_32( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned int *) backBuffer, flags ); - break; + vc_blit_rect_32( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned int *) backBuffer, flags ); + break; case 30: - vc_blit_rect_30( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned int *) backBuffer, flags ); - break; - } + vc_blit_rect_30( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned int *) backBuffer, flags ); + break; + } } static void vc_blit_rect_8(int x, int y, __unused int bx, - int width, int height, - int sourceWidth, int sourceHeight, - int sourceRow, __unused int backRow, - const unsigned char * dataPtr, - __unused unsigned char * backBuffer, - __unused unsigned int flags) -{ - volatile unsigned short * dst; - int line, col; - unsigned int data = 0, out = 0; - int sx, sy, a, b, c, d; - int scale = 0x10000; + int width, int height, + int sourceWidth, int sourceHeight, + int sourceRow, __unused int backRow, + const unsigned char * dataPtr, + __unused unsigned char * backBuffer, + __unused unsigned int flags) +{ + volatile unsigned short * dst; + int line, col; + unsigned int data = 0, out = 0; + int sx, sy, a, b, c, d; + int scale = 0x10000; - a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; - b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; - c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; - d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; + a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; + b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; + c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; + d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; - sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; - sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; + sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; + sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; - if (!sourceRow) data = (unsigned int)(uintptr_t)dataPtr; + if (!sourceRow) { + data = (unsigned int)(uintptr_t)dataPtr; + } - dst = (volatile unsigned short *) (vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x * 4)); + dst = (volatile unsigned short *) (vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 4)); - for( line = 0; line < height; line++) - { - for( col = 0; col < width; col++) - { - if (sourceRow) data = dataPtr[((sx + (col * a) + (line * b)) >> 16) - + sourceRow * (((sy + (col * c) + (line * d)) >> 16))]; - if (kDataAlpha & flags) - out = vc_revclut8[data]; - else - out = data; - *(dst + col) = out; - } - dst = (volatile unsigned short *) (((volatile char*)dst) + vinfo.v_rowbytes); - } + for (line = 0; line < height; line++) { + for (col = 0; col < width; col++) { + if (sourceRow) { + data = dataPtr[((sx + (col * a) + (line * b)) >> 16) + + sourceRow * (((sy + (col * c) + (line * d)) >> 16))]; + } + if (kDataAlpha & flags) { + out = vc_revclut8[data]; + } else { + out = data; + } + *(dst + col) = out; + } + dst = (volatile unsigned short *) (((volatile char*)dst) + vinfo.v_rowbytes); + } } /* For ARM, 16-bit is 565 (RGB); it is 1555 (XRGB) on other platforms */ #ifdef __arm__ -#define CLUT_MASK_R 0xf8 -#define CLUT_MASK_G 0xfc -#define CLUT_MASK_B 0xf8 -#define CLUT_SHIFT_R << 8 -#define CLUT_SHIFT_G << 3 -#define CLUT_SHIFT_B >> 3 -#define MASK_R 0xf800 -#define MASK_G 0x07e0 -#define MASK_B 0x001f -#define MASK_R_8 0x7f800 -#define MASK_G_8 0x01fe0 -#define MASK_B_8 0x000ff +#define CLUT_MASK_R 0xf8 +#define CLUT_MASK_G 0xfc +#define CLUT_MASK_B 0xf8 +#define CLUT_SHIFT_R << 8 +#define CLUT_SHIFT_G << 3 +#define CLUT_SHIFT_B >> 3 +#define MASK_R 0xf800 +#define MASK_G 0x07e0 +#define MASK_B 0x001f +#define MASK_R_8 0x7f800 +#define MASK_G_8 0x01fe0 +#define MASK_B_8 0x000ff #else -#define CLUT_MASK_R 0xf8 -#define CLUT_MASK_G 0xf8 -#define CLUT_MASK_B 0xf8 -#define CLUT_SHIFT_R << 7 -#define CLUT_SHIFT_G << 2 -#define CLUT_SHIFT_B >> 3 -#define MASK_R 0x7c00 -#define MASK_G 0x03e0 -#define MASK_B 0x001f -#define MASK_R_8 0x3fc00 -#define MASK_G_8 0x01fe0 -#define MASK_B_8 0x000ff +#define CLUT_MASK_R 0xf8 +#define CLUT_MASK_G 0xf8 +#define CLUT_MASK_B 0xf8 +#define CLUT_SHIFT_R << 7 +#define CLUT_SHIFT_G << 2 +#define CLUT_SHIFT_B >> 3 +#define MASK_R 0x7c00 +#define MASK_G 0x03e0 +#define MASK_B 0x001f +#define MASK_R_8 0x3fc00 +#define MASK_G_8 0x01fe0 +#define MASK_B_8 0x000ff #endif -static void vc_blit_rect_16( int x, int y, int bx, - int width, int height, - int sourceWidth, int sourceHeight, - int sourceRow, int backRow, - const unsigned char * dataPtr, - unsigned short * backPtr, - unsigned int flags) -{ - volatile unsigned short * dst; - int line, col; - unsigned int data = 0, out = 0, back = 0; - int sx, sy, a, b, c, d; - int scale = 0x10000; - - a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; - b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; - c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; - d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; - - sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; - sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; - - if (!sourceRow) data = (unsigned int)(uintptr_t)dataPtr; - - if (backPtr) - backPtr += bx; - dst = (volatile unsigned short *) (vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x * 2)); - - for( line = 0; line < height; line++) - { - for( col = 0; col < width; col++) - { - if (sourceRow) data = dataPtr[((sx + (col * a) + (line * b)) >> 16) - + sourceRow * (((sy + (col * c) + (line * d)) >> 16))]; - if (backPtr) { - if (kSave & flags) { - back = *(dst + col); - *backPtr++ = back; - } else - back = *backPtr++; - } - if (kDataIndexed & flags) { - out = ( (CLUT_MASK_R & (vc_clut[data*3 + 0])) CLUT_SHIFT_R) - | ( (CLUT_MASK_G & (vc_clut[data*3 + 1])) CLUT_SHIFT_G) - | ( (CLUT_MASK_B & (vc_clut[data*3 + 2])) CLUT_SHIFT_B); - } else if (kDataAlpha & flags) { - out = (((((back & MASK_R) * data) + MASK_R_8) >> 8) & MASK_R) - | (((((back & MASK_G) * data) + MASK_G_8) >> 8) & MASK_G) - | (((((back & MASK_B) * data) + MASK_B_8) >> 8) & MASK_B); - if (vc_progress_white) out += (((0xff - data) & CLUT_MASK_R) CLUT_SHIFT_R) - | (((0xff - data) & CLUT_MASK_G) CLUT_SHIFT_G) - | (((0xff - data) & CLUT_MASK_B) CLUT_SHIFT_B); - } else if (kDataBack & flags) - out = back; - else - out = data; - *(dst + col) = out; - } - dst = (volatile unsigned short *) (((volatile char*)dst) + vinfo.v_rowbytes); - if (backPtr) - backPtr += backRow - width; - } +static void +vc_blit_rect_16( int x, int y, int bx, + int width, int height, + int sourceWidth, int sourceHeight, + int sourceRow, int backRow, + const unsigned char * dataPtr, + unsigned short * backPtr, + unsigned int flags) +{ + volatile unsigned short * dst; + int line, col; + unsigned int data = 0, out = 0, back = 0; + int sx, sy, a, b, c, d; + int scale = 0x10000; + + a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; + b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; + c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; + d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; + + sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; + sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; + + if (!sourceRow) { + data = (unsigned int)(uintptr_t)dataPtr; + } + + if (backPtr) { + backPtr += bx; + } + dst = (volatile unsigned short *) (vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 2)); + + for (line = 0; line < height; line++) { + for (col = 0; col < width; col++) { + if (sourceRow) { + data = dataPtr[((sx + (col * a) + (line * b)) >> 16) + + sourceRow * (((sy + (col * c) + (line * d)) >> 16))]; + } + if (backPtr) { + if (kSave & flags) { + back = *(dst + col); + *backPtr++ = back; + } else { + back = *backPtr++; + } + } + if (kDataIndexed & flags) { + out = ((CLUT_MASK_R & (vc_clut[data * 3 + 0]))CLUT_SHIFT_R) + | ((CLUT_MASK_G & (vc_clut[data * 3 + 1]))CLUT_SHIFT_G) + | ((CLUT_MASK_B & (vc_clut[data * 3 + 2]))CLUT_SHIFT_B); + } else if (kDataAlpha & flags) { + out = (((((back & MASK_R) * data) + MASK_R_8) >> 8) & MASK_R) + | (((((back & MASK_G) * data) + MASK_G_8) >> 8) & MASK_G) + | (((((back & MASK_B) * data) + MASK_B_8) >> 8) & MASK_B); + if (vc_progress_white) { + out += (((0xff - data) & CLUT_MASK_R)CLUT_SHIFT_R) + | (((0xff - data) & CLUT_MASK_G)CLUT_SHIFT_G) + | (((0xff - data) & CLUT_MASK_B)CLUT_SHIFT_B); + } + } else if (kDataBack & flags) { + out = back; + } else { + out = data; + } + *(dst + col) = out; + } + dst = (volatile unsigned short *) (((volatile char*)dst) + vinfo.v_rowbytes); + if (backPtr) { + backPtr += backRow - width; + } + } } -static void vc_blit_rect_32(int x, int y, int bx, - int width, int height, - int sourceWidth, int sourceHeight, - int sourceRow, int backRow, - const unsigned char * dataPtr, - unsigned int * backPtr, - unsigned int flags) -{ - volatile unsigned int * dst; - int line, col; - unsigned int data = 0, out = 0, back = 0; - int sx, sy, a, b, c, d; - int scale = 0x10000; - - a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; - b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; - c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; - d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; - - sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; - sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; - - if (!sourceRow) data = (unsigned int)(uintptr_t)dataPtr; - - if (backPtr) - backPtr += bx; - dst = (volatile unsigned int *) (vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x * 4)); - - for( line = 0; line < height; line++) - { - for( col = 0; col < width; col++) - { - if (sourceRow) data = dataPtr[((sx + (col * a) + (line * b)) >> 16) - + sourceRow * (((sy + (col * c) + (line * d)) >> 16))]; - if (backPtr) { - if (kSave & flags) { - back = *(dst + col); - *backPtr++ = back; - } else - back = *backPtr++; - } - if (kDataIndexed & flags) { - out = (vc_clut[data*3 + 0] << 16) - | (vc_clut[data*3 + 1] << 8) - | (vc_clut[data*3 + 2]); - } else if (kDataAlpha & flags) { - out = (((((back & 0x00ff00ff) * data) + 0x00ff00ff) >> 8) & 0x00ff00ff) - | (((((back & 0x0000ff00) * data) + 0x0000ff00) >> 8) & 0x0000ff00); - if (vc_progress_white) out += ((0xff - data) << 16) +static void +vc_blit_rect_32(int x, int y, int bx, + int width, int height, + int sourceWidth, int sourceHeight, + int sourceRow, int backRow, + const unsigned char * dataPtr, + unsigned int * backPtr, + unsigned int flags) +{ + volatile unsigned int * dst; + int line, col; + unsigned int data = 0, out = 0, back = 0; + int sx, sy, a, b, c, d; + int scale = 0x10000; + + a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; + b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; + c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; + d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; + + sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; + sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; + + if (!sourceRow) { + data = (unsigned int)(uintptr_t)dataPtr; + } + + if (backPtr) { + backPtr += bx; + } + dst = (volatile unsigned int *) (vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 4)); + + for (line = 0; line < height; line++) { + for (col = 0; col < width; col++) { + if (sourceRow) { + data = dataPtr[((sx + (col * a) + (line * b)) >> 16) + + sourceRow * (((sy + (col * c) + (line * d)) >> 16))]; + } + if (backPtr) { + if (kSave & flags) { + back = *(dst + col); + *backPtr++ = back; + } else { + back = *backPtr++; + } + } + if (kDataIndexed & flags) { + out = (vc_clut[data * 3 + 0] << 16) + | (vc_clut[data * 3 + 1] << 8) + | (vc_clut[data * 3 + 2]); + } else if (kDataAlpha & flags) { + out = (((((back & 0x00ff00ff) * data) + 0x00ff00ff) >> 8) & 0x00ff00ff) + | (((((back & 0x0000ff00) * data) + 0x0000ff00) >> 8) & 0x0000ff00); + if (vc_progress_white) { + out += ((0xff - data) << 16) | ((0xff - data) << 8) | (0xff - data); - } else if (kDataBack & flags) - out = back; - else - out = data; - *(dst + col) = out; - } - dst = (volatile unsigned int *) (((volatile char*)dst) + vinfo.v_rowbytes); - if (backPtr) - backPtr += backRow - width; - } + } + } else if (kDataBack & flags) { + out = back; + } else { + out = data; + } + *(dst + col) = out; + } + dst = (volatile unsigned int *) (((volatile char*)dst) + vinfo.v_rowbytes); + if (backPtr) { + backPtr += backRow - width; + } + } } -static void vc_blit_rect_30(int x, int y, int bx, - int width, int height, - int sourceWidth, int sourceHeight, - int sourceRow, int backRow, - const unsigned char * dataPtr, - unsigned int * backPtr, - unsigned int flags) -{ - volatile unsigned int * dst; - int line, col; - unsigned int data = 0, out = 0, back = 0; - unsigned long long exp; - int sx, sy, a, b, c, d; - int scale = 0x10000; - - a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; - b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; - c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; - d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; - - sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; - sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; - - if (!sourceRow) data = (unsigned int)(uintptr_t)dataPtr; - - if (backPtr) - backPtr += bx; - dst = (volatile unsigned int *) (vinfo.v_baseaddr + - (y * vinfo.v_rowbytes) + - (x * 4)); - - for( line = 0; line < height; line++) - { - for( col = 0; col < width; col++) - { - if (sourceRow) data = dataPtr[((sx + (col * a) + (line * b)) >> 16) - + sourceRow * (((sy + (col * c) + (line * d)) >> 16))]; - if (backPtr) { - if (kSave & flags) { - back = *(dst + col); - *backPtr++ = back; - } else - back = *backPtr++; - } - if (kDataIndexed & flags) { - out = (vc_clut[data*3 + 0] << 22) - | (vc_clut[data*3 + 1] << 12) - | (vc_clut[data*3 + 2] << 2); - } else if (kDataAlpha & flags) { - exp = back; - exp = (((((exp & 0x3FF003FF) * data) + 0x0FF000FF) >> 8) & 0x3FF003FF) - | (((((exp & 0x000FFC00) * data) + 0x0003FC00) >> 8) & 0x000FFC00); - out = (unsigned int)exp; - if (vc_progress_white) out += ((0xFF - data) << 22) +static void +vc_blit_rect_30(int x, int y, int bx, + int width, int height, + int sourceWidth, int sourceHeight, + int sourceRow, int backRow, + const unsigned char * dataPtr, + unsigned int * backPtr, + unsigned int flags) +{ + volatile unsigned int * dst; + int line, col; + unsigned int data = 0, out = 0, back = 0; + unsigned long long exp; + int sx, sy, a, b, c, d; + int scale = 0x10000; + + a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; + b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; + c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; + d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; + + sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; + sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; + + if (!sourceRow) { + data = (unsigned int)(uintptr_t)dataPtr; + } + + if (backPtr) { + backPtr += bx; + } + dst = (volatile unsigned int *) (vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 4)); + + for (line = 0; line < height; line++) { + for (col = 0; col < width; col++) { + if (sourceRow) { + data = dataPtr[((sx + (col * a) + (line * b)) >> 16) + + sourceRow * (((sy + (col * c) + (line * d)) >> 16))]; + } + if (backPtr) { + if (kSave & flags) { + back = *(dst + col); + *backPtr++ = back; + } else { + back = *backPtr++; + } + } + if (kDataIndexed & flags) { + out = (vc_clut[data * 3 + 0] << 22) + | (vc_clut[data * 3 + 1] << 12) + | (vc_clut[data * 3 + 2] << 2); + } else if (kDataAlpha & flags) { + exp = back; + exp = (((((exp & 0x3FF003FF) * data) + 0x0FF000FF) >> 8) & 0x3FF003FF) + | (((((exp & 0x000FFC00) * data) + 0x0003FC00) >> 8) & 0x000FFC00); + out = (unsigned int)exp; + if (vc_progress_white) { + out += ((0xFF - data) << 22) | ((0xFF - data) << 12) | ((0xFF - data) << 2); - } else if (kDataBack & flags) - out = back; - else - out = data; - *(dst + col) = out; - } - dst = (volatile unsigned int *) (((volatile char*)dst) + vinfo.v_rowbytes); - if (backPtr) - backPtr += backRow - width; - } + } + } else if (kDataBack & flags) { + out = back; + } else { + out = data; + } + *(dst + col) = out; + } + dst = (volatile unsigned int *) (((volatile char*)dst) + vinfo.v_rowbytes); + if (backPtr) { + backPtr += backRow - width; + } + } } -static void vc_clean_boot_graphics(void) -{ -#if !CONFIG_EMBEDDED - // clean up possible FDE login graphics - vc_progress_set(FALSE, 0); - const unsigned char * - color = (typeof(color))(uintptr_t)(vc_progress_white ? 0x00000000 : 0xBFBFBFBF); - vc_blit_rect(0, 0, 0, vinfo.v_width, vinfo.v_height, vinfo.v_width, vinfo.v_height, 0, 0, color, NULL, 0); +static void +vc_clean_boot_graphics(void) +{ +#if defined(XNU_TARGET_OS_OSX) + // clean up possible FDE login graphics + vc_progress_set(FALSE, 0); + const unsigned char * + color = (typeof(color))(uintptr_t)(vc_progress_white ? 0x00000000 : 0xBFBFBFBF); + vc_blit_rect(0, 0, 0, vinfo.v_width, vinfo.v_height, vinfo.v_width, vinfo.v_height, 0, 0, color, NULL, 0); #endif } @@ -2269,28 +2290,28 @@ struct lzss_image_state { typedef struct lzss_image_state lzss_image_state; // returns 0 if OK, 1 if error -static inline int -vc_decompress_lzss_next_pixel (int next_data, lzss_image_state* state) +static inline int +vc_decompress_lzss_next_pixel(int next_data, lzss_image_state* state) { - uint32_t palette_index = 0; - uint32_t pixel_value = 0; + uint32_t palette_index = 0; + uint32_t pixel_value = 0; - palette_index = next_data * 3; + palette_index = next_data * 3; - pixel_value = ( (uint32_t) state->clut[palette_index + 0] << 16) - | ( (uint32_t) state->clut[palette_index + 1] << 8) - | ( (uint32_t) state->clut[palette_index + 2]); + pixel_value = ((uint32_t) state->clut[palette_index + 0] << 16) + | ((uint32_t) state->clut[palette_index + 1] << 8) + | ((uint32_t) state->clut[palette_index + 2]); - *(state->row_start + state->col) = pixel_value; + *(state->row_start + state->col) = pixel_value; - if (++state->col >= state->width) { - state->col = 0; - if (++state->row >= state->height) { - return 1; - } - state->row_start = (volatile uint32_t *) (((uintptr_t)state->row_start) + state->bytes_per_row); - } - return 0; + if (++state->col >= state->width) { + state->col = 0; + if (++state->row >= state->height) { + return 1; + } + state->row_start = (volatile uint32_t *) (((uintptr_t)state->row_start) + state->bytes_per_row); + } + return 0; } @@ -2300,413 +2321,454 @@ vc_decompress_lzss_next_pixel (int next_data, lzss_image_state* state) * The function vc_display_lzss_icon was copied from libkern/mkext.c, then modified. */ -/* - * TODO: Does lzss use too much stack? 4096 plus bytes... - * Can probably chop it down by 1/2. +/* + * TODO: Does lzss use too much stack? 4096 plus bytes... + * Can probably chop it down by 1/2. */ /************************************************************** - LZSS.C -- A Data Compression Program +* LZSS.C -- A Data Compression Program *************************************************************** - 4/6/1989 Haruhiko Okumura - Use, distribute, and modify this program freely. - Please send me your improved versions. - PC-VAN SCIENCE - NIFTY-Serve PAF01022 - CompuServe 74050,1022 - +* 4/6/1989 Haruhiko Okumura +* Use, distribute, and modify this program freely. +* Please send me your improved versions. +* PC-VAN SCIENCE +* NIFTY-Serve PAF01022 +* CompuServe 74050,1022 +* **************************************************************/ #define N 4096 /* size of ring buffer - must be power of 2 */ #define F 18 /* upper limit for match_length */ #define THRESHOLD 2 /* encode string into position and length - if match_length is greater than this */ + * if match_length is greater than this */ // returns 0 if OK, 1 if error // x and y indicate upper left corner of image location on screen int -vc_display_lzss_icon(uint32_t dst_x, uint32_t dst_y, - uint32_t image_width, uint32_t image_height, - const uint8_t *compressed_image, - uint32_t compressed_size, - const uint8_t *clut) +vc_display_lzss_icon(uint32_t dst_x, uint32_t dst_y, + uint32_t image_width, uint32_t image_height, + const uint8_t *compressed_image, + uint32_t compressed_size, + const uint8_t *clut) { - uint32_t* image_start; - uint32_t bytes_per_pixel = 4; - uint32_t bytes_per_row = vinfo.v_rowbytes; - - vc_clean_boot_graphics(); - - image_start = (uint32_t *) (vinfo.v_baseaddr + (dst_y * bytes_per_row) + (dst_x * bytes_per_pixel)); - - lzss_image_state state = {0, 0, image_width, image_height, bytes_per_row, image_start, clut}; - - int rval = 0; - - const uint8_t *src = compressed_image; - uint32_t srclen = compressed_size; - - /* ring buffer of size N, with extra F-1 bytes to aid string comparison */ - uint8_t text_buf[N + F - 1]; - const uint8_t *srcend = src + srclen; - int i, j, k, r, c; - unsigned int flags; - - srcend = src + srclen; - for (i = 0; i < N - F; i++) - text_buf[i] = ' '; - r = N - F; - flags = 0; - for ( ; ; ) { - if (((flags >>= 1) & 0x100) == 0) { - if (src < srcend) c = *src++; else break; - flags = c | 0xFF00; /* uses higher byte cleverly */ - } /* to count eight */ - if (flags & 1) { - if (src < srcend) c = *src++; else break; - rval = vc_decompress_lzss_next_pixel(c, &state); - if (rval != 0) - return rval; - text_buf[r++] = c; - r &= (N - 1); - } else { - if (src < srcend) i = *src++; else break; - if (src < srcend) j = *src++; else break; - i |= ((j & 0xF0) << 4); - j = (j & 0x0F) + THRESHOLD; - for (k = 0; k <= j; k++) { - c = text_buf[(i + k) & (N - 1)]; - rval = vc_decompress_lzss_next_pixel(c, &state); - if (rval != 0 ) - return rval; - text_buf[r++] = c; - r &= (N - 1); - } - } - } - return 0; + uint32_t* image_start; + uint32_t bytes_per_pixel = 4; + uint32_t bytes_per_row = vinfo.v_rowbytes; + + vc_clean_boot_graphics(); + + image_start = (uint32_t *) (vinfo.v_baseaddr + (dst_y * bytes_per_row) + (dst_x * bytes_per_pixel)); + + lzss_image_state state = {0, 0, image_width, image_height, bytes_per_row, image_start, clut}; + + int rval = 0; + + const uint8_t *src = compressed_image; + uint32_t srclen = compressed_size; + + /* ring buffer of size N, with extra F-1 bytes to aid string comparison */ + uint8_t text_buf[N + F - 1]; + const uint8_t *srcend = src + srclen; + int i, j, k, r, c; + unsigned int flags; + + srcend = src + srclen; + for (i = 0; i < N - F; i++) { + text_buf[i] = ' '; + } + r = N - F; + flags = 0; + for (;;) { + if (((flags >>= 1) & 0x100) == 0) { + if (src < srcend) { + c = *src++; + } else { + break; + } + flags = c | 0xFF00; /* uses higher byte cleverly */ + } /* to count eight */ + if (flags & 1) { + if (src < srcend) { + c = *src++; + } else { + break; + } + rval = vc_decompress_lzss_next_pixel(c, &state); + if (rval != 0) { + return rval; + } + text_buf[r++] = c; + r &= (N - 1); + } else { + if (src < srcend) { + i = *src++; + } else { + break; + } + if (src < srcend) { + j = *src++; + } else { + break; + } + i |= ((j & 0xF0) << 4); + j = (j & 0x0F) + THRESHOLD; + for (k = 0; k <= j; k++) { + c = text_buf[(i + k) & (N - 1)]; + rval = vc_decompress_lzss_next_pixel(c, &state); + if (rval != 0) { + return rval; + } + text_buf[r++] = c; + r &= (N - 1); + } + } + } + return 0; } -void noroot_icon_test(void) { - boolean_t o_vc_progress_enable = vc_progress_enable; +void +noroot_icon_test(void) +{ + boolean_t o_vc_progress_enable = vc_progress_enable; - vc_progress_enable = 1; + vc_progress_enable = 1; - PE_display_icon( 0, "noroot"); + PE_display_icon( 0, "noroot"); - vc_progress_enable = o_vc_progress_enable; + vc_progress_enable = o_vc_progress_enable; } -void vc_display_icon( vc_progress_element * desc, - const unsigned char * data ) +void +vc_display_icon( vc_progress_element * desc, + const unsigned char * data ) { - int x, y, width, height; - - if( vc_progress_enable && vc_clut) { + int x, y, width, height; - vc_clean_boot_graphics(); + if (vc_progress_enable && vc_clut) { + vc_clean_boot_graphics(); - width = desc->width; - height = desc->height; - x = desc->dx; - y = desc->dy; - if( 1 & desc->flags) { - x += ((vinfo.v_width - width) / 2); - y += ((vinfo.v_height - height) / 2); + width = desc->width; + height = desc->height; + x = desc->dx; + y = desc->dy; + if (1 & desc->flags) { + x += ((vinfo.v_width - width) / 2); + y += ((vinfo.v_height - height) / 2); + } + vc_blit_rect( x, y, 0, width, height, width, height, width, 0, data, NULL, kDataIndexed ); } - vc_blit_rect( x, y, 0, width, height, width, height, width, 0, data, NULL, kDataIndexed ); - } } void vc_progress_initialize( vc_progress_element * desc, - const unsigned char * data1x, - const unsigned char * data2x, - const unsigned char * data3x, - const unsigned char * clut ) + const unsigned char * data1x, + const unsigned char * data2x, + const unsigned char * data3x, + const unsigned char * clut ) { - uint64_t abstime; - - if( (!clut) || (!desc) || (!data1x)) - return; - vc_clut = clut; - vc_clut8 = clut; - - vc_progress = desc; - vc_progress_data[0] = data1x; - vc_progress_data[1] = data2x; - vc_progress_data[2] = data3x; - if( 2 & vc_progress->flags) - vc_progress_alpha = data1x - + vc_progress->count * vc_progress->width * vc_progress->height; - else - vc_progress_alpha = NULL; - - thread_call_setup(&vc_progress_call, vc_progress_task, NULL); - clock_interval_to_absolutetime_interval(vc_progress->time, 1000 * 1000, &abstime); - vc_progress_interval = (uint32_t)abstime; - -#if !CONFIG_EMBEDDED - thread_call_setup(&vc_progressmeter_call, vc_progressmeter_task, NULL); - clock_interval_to_absolutetime_interval(1000 / 8, 1000 * 1000, &abstime); - vc_progressmeter_interval = (uint32_t)abstime; -#endif /* !CONFIG_EMBEDDED */ + uint64_t abstime; + + if ((!clut) || (!desc) || (!data1x)) { + return; + } + vc_clut = clut; + vc_clut8 = clut; + vc_progress = desc; + vc_progress_data[0] = data1x; + vc_progress_data[1] = data2x; + vc_progress_data[2] = data3x; + if (2 & vc_progress->flags) { + vc_progress_alpha = data1x + + vc_progress->count * vc_progress->width * vc_progress->height; + } else { + vc_progress_alpha = NULL; + } + + thread_call_setup(&vc_progress_call, vc_progress_task, NULL); + clock_interval_to_absolutetime_interval(vc_progress->time, 1000 * 1000, &abstime); + vc_progress_interval = (uint32_t)abstime; + +#if defined(XNU_TARGET_OS_OSX) + thread_call_setup(&vc_progressmeter_call, vc_progressmeter_task, NULL); + clock_interval_to_absolutetime_interval(1000 / 8, 1000 * 1000, &abstime); + vc_progressmeter_interval = (uint32_t)abstime; +#endif /* defined(XNU_TARGET_OS_OSX) */ } void vc_progress_set(boolean_t enable, uint32_t vc_delay) { - spl_t s; - void *saveBuf = NULL; - vm_size_t saveLen = 0; - unsigned int count; - unsigned int index; - unsigned char pdata8; - unsigned short pdata16; - unsigned short * buf16; - unsigned int pdata32; - unsigned int * buf32; + spl_t s; + void *saveBuf = NULL; + vm_size_t saveLen = 0; + unsigned int count; + unsigned int index; + unsigned char pdata8; + unsigned short pdata16; + unsigned short * buf16; + unsigned int pdata32; + unsigned int * buf32; + + if (!vc_progress) { + return; + } -#if !CONFIG_EMBEDDED +#if defined(CONFIG_VC_PROGRESS_METER_SUPPORT) + +#if defined (__x86_64__) + if (kBootArgsFlagBlack & ((boot_args *) PE_state.bootArgs)->flags) { + return; + } +#endif /* defined (__x86_64__) */ + + if (1 & vc_progress_withmeter) { + if (enable) { + internal_enable_progressmeter(kProgressMeterKernel); + } - if (kBootArgsFlagBlack & ((boot_args *) PE_state.bootArgs)->flags) return; + s = splhigh(); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); + + if (vc_progress_enable != enable) { + vc_progress_enable = enable; + if (enable) { + vc_progressmeter_count = 0; + clock_interval_to_deadline(vc_delay, + 1000 * 1000 * 1000 /*second scale*/, + &vc_progressmeter_deadline); + thread_call_enter_delayed(&vc_progressmeter_call, vc_progressmeter_deadline); + } else { + thread_call_cancel(&vc_progressmeter_call); + } + } + + simple_unlock(&vc_progress_lock); + splx(s); + + if (!enable) { + internal_enable_progressmeter(kProgressMeterOff); + } + return; + } - if (1 & vc_progress_withmeter) - { - if (enable) internal_enable_progressmeter(kProgressMeterKernel); +#endif /* defined(CONFIG_VC_PROGRESS_METER_SUPPORT) */ + + if (enable) { + saveLen = (vc_progress->width * vc_uiscale) * (vc_progress->height * vc_uiscale) * ((vinfo.v_depth + 7) / 8); + saveBuf = kheap_alloc( KHEAP_DATA_BUFFERS, saveLen, Z_WAITOK ); + + switch (vinfo.v_depth) { + case 8: + for (count = 0; count < 256; count++) { + vc_revclut8[count] = vc_clut[0x01 * 3]; + pdata8 = (vc_clut[0x01 * 3] * count + 0x0ff) >> 8; + for (index = 0; index < 256; index++) { + if ((pdata8 == vc_clut[index * 3 + 0]) && + (pdata8 == vc_clut[index * 3 + 1]) && + (pdata8 == vc_clut[index * 3 + 2])) { + vc_revclut8[count] = index; + break; + } + } + } + memset( saveBuf, 0x01, saveLen ); + break; + + case 16: + buf16 = (unsigned short *) saveBuf; + pdata16 = ((vc_clut[0x01 * 3 + 0] & CLUT_MASK_R)CLUT_SHIFT_R) + | ((vc_clut[0x01 * 3 + 0] & CLUT_MASK_G)CLUT_SHIFT_G) + | ((vc_clut[0x01 * 3 + 0] & CLUT_MASK_B)CLUT_SHIFT_B); + for (count = 0; count < saveLen / 2; count++) { + buf16[count] = pdata16; + } + break; + + case 32: + buf32 = (unsigned int *) saveBuf; + pdata32 = ((vc_clut[0x01 * 3 + 0] & 0xff) << 16) + | ((vc_clut[0x01 * 3 + 1] & 0xff) << 8) + | ((vc_clut[0x01 * 3 + 2] & 0xff) << 0); + for (count = 0; count < saveLen / 4; count++) { + buf32[count] = pdata32; + } + break; + } + } s = splhigh(); simple_lock(&vc_progress_lock, LCK_GRP_NULL); - if( vc_progress_enable != enable) { - vc_progress_enable = enable; - if( enable) - { - vc_progressmeter_count = 0; - clock_interval_to_deadline(vc_delay, - 1000 * 1000 * 1000 /*second scale*/, - &vc_progressmeter_deadline); - thread_call_enter_delayed(&vc_progressmeter_call, vc_progressmeter_deadline); - } - else thread_call_cancel(&vc_progressmeter_call); + if (vc_progress_enable != enable) { + vc_progress_enable = enable; + if (enable) { + vc_needsave = TRUE; + vc_saveunder = saveBuf; + vc_saveunder_len = saveLen; + saveBuf = NULL; + saveLen = 0; + vc_progress_count = 0; + vc_progress_angle = 0; + + clock_interval_to_deadline(vc_delay, + 1000 * 1000 * 1000 /*second scale*/, + &vc_progress_deadline); + thread_call_enter_delayed(&vc_progress_call, vc_progress_deadline); + } else { + if (vc_saveunder) { + saveBuf = vc_saveunder; + saveLen = vc_saveunder_len; + vc_saveunder = NULL; + vc_saveunder_len = 0; + } + + thread_call_cancel(&vc_progress_call); + } } simple_unlock(&vc_progress_lock); splx(s); - if (!enable) internal_enable_progressmeter(kProgressMeterOff); - return; - } - -#endif /* !CONFIG_EMBEDDED */ - - if(!vc_progress) return; - - if( enable) { - saveLen = (vc_progress->width * vc_uiscale) * (vc_progress->height * vc_uiscale) * ((vinfo.v_depth + 7) / 8); - saveBuf = kalloc( saveLen ); - - switch( vinfo.v_depth) { - case 8 : - for( count = 0; count < 256; count++) { - vc_revclut8[count] = vc_clut[0x01 * 3]; - pdata8 = (vc_clut[0x01 * 3] * count + 0x0ff) >> 8; - for( index = 0; index < 256; index++) { - if( (pdata8 == vc_clut[index * 3 + 0]) && - (pdata8 == vc_clut[index * 3 + 1]) && - (pdata8 == vc_clut[index * 3 + 2])) { - vc_revclut8[count] = index; - break; - } - } - } - memset( saveBuf, 0x01, saveLen ); - break; - - case 16 : - buf16 = (unsigned short *) saveBuf; - pdata16 = ((vc_clut[0x01 * 3 + 0] & CLUT_MASK_R) CLUT_SHIFT_R) - | ((vc_clut[0x01 * 3 + 0] & CLUT_MASK_G) CLUT_SHIFT_G) - | ((vc_clut[0x01 * 3 + 0] & CLUT_MASK_B) CLUT_SHIFT_B); - for( count = 0; count < saveLen / 2; count++) - buf16[count] = pdata16; - break; - - case 32 : - buf32 = (unsigned int *) saveBuf; - pdata32 = ((vc_clut[0x01 * 3 + 0] & 0xff) << 16) - | ((vc_clut[0x01 * 3 + 1] & 0xff) << 8) - | ((vc_clut[0x01 * 3 + 2] & 0xff) << 0); - for( count = 0; count < saveLen / 4; count++) - buf32[count] = pdata32; - break; - } - } - - s = splhigh(); - simple_lock(&vc_progress_lock, LCK_GRP_NULL); - - if( vc_progress_enable != enable) { - vc_progress_enable = enable; - if( enable) { - vc_needsave = TRUE; - vc_saveunder = saveBuf; - vc_saveunder_len = saveLen; - saveBuf = NULL; - saveLen = 0; - vc_progress_count = 0; - vc_progress_angle = 0; - - clock_interval_to_deadline(vc_delay, - 1000 * 1000 * 1000 /*second scale*/, - &vc_progress_deadline); - thread_call_enter_delayed(&vc_progress_call, vc_progress_deadline); - - } else { - if( vc_saveunder) { - saveBuf = vc_saveunder; - saveLen = vc_saveunder_len; - vc_saveunder = NULL; - vc_saveunder_len = 0; - } - - thread_call_cancel(&vc_progress_call); - } - } - - simple_unlock(&vc_progress_lock); - splx(s); - - if( saveBuf) - kfree( saveBuf, saveLen ); + if (saveBuf) { + kheap_free( KHEAP_DATA_BUFFERS, saveBuf, saveLen ); + } } -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) -static uint32_t vc_progressmeter_range(uint32_t pos) +static uint32_t +vc_progressmeter_range(uint32_t pos) { - uint32_t ret; + uint32_t ret; - if (pos > kProgressMeterEnd) pos = kProgressMeterEnd; - ret = vc_progress_meter_start - + ((pos * (vc_progress_meter_end - vc_progress_meter_start)) / kProgressMeterEnd); + if (pos > kProgressMeterEnd) { + pos = kProgressMeterEnd; + } + ret = vc_progress_meter_start + + ((pos * (vc_progress_meter_end - vc_progress_meter_start)) / kProgressMeterEnd); - return (ret); + return ret; } static void vc_progressmeter_task(__unused void *arg0, __unused void *arg) { - spl_t s; - uint64_t interval; - - s = splhigh(); - simple_lock(&vc_progress_lock, LCK_GRP_NULL); - if (vc_progressmeter_enable) - { - uint32_t pos = (vc_progressmeter_count >> 13); - internal_set_progressmeter(vc_progressmeter_range(pos)); - if (pos < kProgressMeterEnd) - { - static uint16_t incr[8] = { 10000, 10000, 8192, 4096, 2048, 384, 384, 64 }; - vc_progressmeter_count += incr[(pos * 8) / kProgressMeterEnd]; + spl_t s; + uint64_t interval; - interval = vc_progressmeter_interval; - interval = ((interval * 256) / vc_progressmeter_diskspeed); - - clock_deadline_for_periodic_event(interval, mach_absolute_time(), &vc_progressmeter_deadline); - thread_call_enter_delayed(&vc_progressmeter_call, vc_progressmeter_deadline); + s = splhigh(); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); + if (kProgressMeterKernel == vc_progressmeter_enable) { + uint32_t pos = (vc_progressmeter_count >> 13); + internal_set_progressmeter(vc_progressmeter_range(pos)); + if (pos < kProgressMeterEnd) { + static uint16_t incr[8] = { 10000, 10000, 8192, 4096, 2048, 384, 384, 64 }; + vc_progressmeter_count += incr[(pos * 8) / kProgressMeterEnd]; + + interval = vc_progressmeter_interval; + interval = ((interval * 256) / vc_progressmeter_diskspeed); + + clock_deadline_for_periodic_event(interval, mach_absolute_time(), &vc_progressmeter_deadline); + thread_call_enter_delayed(&vc_progressmeter_call, vc_progressmeter_deadline); + } } - } - simple_unlock(&vc_progress_lock); - splx(s); + simple_unlock(&vc_progress_lock); + splx(s); } -void vc_progress_setdiskspeed(uint32_t speed) +void +vc_progress_setdiskspeed(uint32_t speed) { - vc_progressmeter_diskspeed = speed; + vc_progressmeter_diskspeed = speed; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ static void vc_progress_task(__unused void *arg0, __unused void *arg) { - spl_t s; - int x, y, width, height; - uint64_t x_pos, y_pos; - const unsigned char * data; - - s = splhigh(); - simple_lock(&vc_progress_lock, LCK_GRP_NULL); - - if( vc_progress_enable) do { - - vc_progress_count++; - if( vc_progress_count >= vc_progress->count) { - vc_progress_count = 0; - vc_progress_angle++; - } - - width = (vc_progress->width * vc_uiscale); - height = (vc_progress->height * vc_uiscale); - data = vc_progress_data[vc_uiscale - 1]; - if (!data) break; - - if (kVCUsePosition & vc_progress_options.options) { - /* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */ - switch (3 & vinfo.v_rotate) { - case kDataRotate0: - x_pos = vc_progress_options.x_pos; - y_pos = vc_progress_options.y_pos; - break; - case kDataRotate180: - x_pos = 0xFFFFFFFF - vc_progress_options.x_pos; - y_pos = 0xFFFFFFFF - vc_progress_options.y_pos; - break; - case kDataRotate90: - x_pos = 0xFFFFFFFF - vc_progress_options.y_pos; - y_pos = vc_progress_options.x_pos; - break; - case kDataRotate270: - x_pos = vc_progress_options.y_pos; - y_pos = 0xFFFFFFFF - vc_progress_options.x_pos; - break; - } - x = (uint32_t)((x_pos * (uint64_t) vinfo.v_width) / 0xFFFFFFFFULL); - y = (uint32_t)((y_pos * (uint64_t) vinfo.v_height) / 0xFFFFFFFFULL); - x -= (width / 2); - y -= (height / 2); - } else { - x = (vc_progress->dx * vc_uiscale); - y = (vc_progress->dy * vc_uiscale); - if( 1 & vc_progress->flags) { - x += ((vinfo.v_width - width) / 2); - y += ((vinfo.v_height - height) / 2); - } - } - - if ((x + width) > (int)vinfo.v_width) break; - if ((y + height) > (int)vinfo.v_height) break; - - data += vc_progress_count * width * height; - - vc_blit_rect( x, y, 0, - width, height, width, height, width, width, - data, vc_saveunder, - kDataAlpha - | (vc_progress_angle & kDataRotate) - | (vc_needsave ? kSave : 0) ); - vc_needsave = FALSE; - - clock_deadline_for_periodic_event(vc_progress_interval, mach_absolute_time(), &vc_progress_deadline); - thread_call_enter_delayed(&vc_progress_call, vc_progress_deadline); - } - while (FALSE); - simple_unlock(&vc_progress_lock); - splx(s); + spl_t s; + int x, y, width, height; + uint64_t x_pos, y_pos; + const unsigned char * data; + + s = splhigh(); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); + + if (vc_progress_enable) { + do { + vc_progress_count++; + if (vc_progress_count >= vc_progress->count) { + vc_progress_count = 0; + vc_progress_angle++; + } + + width = (vc_progress->width * vc_uiscale); + height = (vc_progress->height * vc_uiscale); + data = vc_progress_data[vc_uiscale - 1]; + if (!data) { + break; + } + + if (kVCUsePosition & vc_progress_options.options) { + /* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */ + switch (3 & vinfo.v_rotate) { + case kDataRotate0: + x_pos = vc_progress_options.x_pos; + y_pos = vc_progress_options.y_pos; + break; + case kDataRotate180: + x_pos = 0xFFFFFFFF - vc_progress_options.x_pos; + y_pos = 0xFFFFFFFF - vc_progress_options.y_pos; + break; + case kDataRotate90: + x_pos = 0xFFFFFFFF - vc_progress_options.y_pos; + y_pos = vc_progress_options.x_pos; + break; + case kDataRotate270: + x_pos = vc_progress_options.y_pos; + y_pos = 0xFFFFFFFF - vc_progress_options.x_pos; + break; + } + x = (uint32_t)((x_pos * (uint64_t) vinfo.v_width) / 0xFFFFFFFFULL); + y = (uint32_t)((y_pos * (uint64_t) vinfo.v_height) / 0xFFFFFFFFULL); + x -= (width / 2); + y -= (height / 2); + } else { + x = (vc_progress->dx * vc_uiscale); + y = (vc_progress->dy * vc_uiscale); + if (1 & vc_progress->flags) { + x += ((vinfo.v_width - width) / 2); + y += ((vinfo.v_height - height) / 2); + } + } + + if ((x + width) > (int)vinfo.v_width) { + break; + } + if ((y + height) > (int)vinfo.v_height) { + break; + } + + data += vc_progress_count * width * height; + + vc_blit_rect( x, y, 0, + width, height, width, height, width, width, + data, vc_saveunder, + kDataAlpha + | (vc_progress_angle & kDataRotate) + | (vc_needsave ? kSave : 0)); + vc_needsave = FALSE; + + clock_deadline_for_periodic_event(vc_progress_interval, mach_absolute_time(), &vc_progress_deadline); + thread_call_enter_delayed(&vc_progress_call, vc_progress_deadline); + }while (FALSE); + } + simple_unlock(&vc_progress_lock); + splx(s); } /* @@ -2732,34 +2794,34 @@ gc_pause( boolean_t pause, boolean_t graphics_now ) { spl_t s; - s = splhigh( ); - VCPUTC_LOCK_LOCK( ); + s = splhigh(); + VCPUTC_LOCK_LOCK(); - disableConsoleOutput = (pause && !console_is_serial()); - gc_enabled = (!pause && !graphics_now); + disableConsoleOutput = (pause && !console_is_serial()); + gc_enabled = (!pause && !graphics_now); - VCPUTC_LOCK_UNLOCK( ); + VCPUTC_LOCK_UNLOCK(); - simple_lock(&vc_progress_lock, LCK_GRP_NULL); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); - if (pause) - { - gc_paused_progress = vc_progress_enable; - vc_progress_enable = FALSE; - } - else vc_progress_enable = gc_paused_progress; + if (pause) { + gc_paused_progress = vc_progress_enable; + vc_progress_enable = FALSE; + } else { + vc_progress_enable = gc_paused_progress; + } - if (vc_progress_enable) - { -#if !CONFIG_EMBEDDED - if (1 & vc_progress_withmeter) thread_call_enter_delayed(&vc_progressmeter_call, vc_progressmeter_deadline); - else -#endif /* !CONFIG_EMBEDDED */ - thread_call_enter_delayed(&vc_progress_call, vc_progress_deadline); - } + if (vc_progress_enable) { +#if defined(XNU_TARGET_OS_OSX) + if (1 & vc_progress_withmeter) { + thread_call_enter_delayed(&vc_progressmeter_call, vc_progressmeter_deadline); + } else +#endif /* defined(XNU_TARGET_OS_OSX) */ + thread_call_enter_delayed(&vc_progress_call, vc_progress_deadline); + } - simple_unlock(&vc_progress_lock); - splx(s); + simple_unlock(&vc_progress_lock); + splx(s); } static void @@ -2784,8 +2846,11 @@ vc_initialize(__unused struct vc_info * vinfo_p) vinfo.v_columns = vinfo.v_width / ISO_CHAR_WIDTH; vinfo.v_rowscanbytes = ((vinfo.v_depth + 7) / 8) * vinfo.v_width; vc_uiscale = vinfo.v_scale; - if (vc_uiscale > kMaxProgressData) vc_uiscale = kMaxProgressData; - else if (!vc_uiscale) vc_uiscale = 1; + if (vc_uiscale > kMaxProgressData) { + vc_uiscale = kMaxProgressData; + } else if (!vc_uiscale) { + vc_uiscale = 1; + } } void @@ -2796,36 +2861,36 @@ initialize_screen(PE_Video * boot_vinfo, unsigned int op) boolean_t graphics_now; uint32_t delay; - if ( boot_vinfo ) - { + if (boot_vinfo) { struct vc_info new_vinfo = vinfo; boolean_t makeMapping = FALSE; - /* + /* * Copy parameters */ - if (kPEBaseAddressChange != op) - { - new_vinfo.v_width = (unsigned int)boot_vinfo->v_width; - new_vinfo.v_height = (unsigned int)boot_vinfo->v_height; - new_vinfo.v_depth = (unsigned int)boot_vinfo->v_depth; - new_vinfo.v_rowbytes = (unsigned int)boot_vinfo->v_rowBytes; - if (kernel_map == VM_MAP_NULL) { + if (kPEBaseAddressChange != op) { + new_vinfo.v_width = (unsigned int)boot_vinfo->v_width; + new_vinfo.v_height = (unsigned int)boot_vinfo->v_height; + new_vinfo.v_depth = (unsigned int)boot_vinfo->v_depth; + new_vinfo.v_rowbytes = (unsigned int)boot_vinfo->v_rowBytes; + if (kernel_map == VM_MAP_NULL) { // only booter supplies HW rotation new_vinfo.v_rotate = (unsigned int)boot_vinfo->v_rotate; - } + } #if defined(__i386__) || defined(__x86_64__) - new_vinfo.v_type = (unsigned int)boot_vinfo->v_display; + new_vinfo.v_type = (unsigned int)boot_vinfo->v_display; #else - new_vinfo.v_type = 0; + new_vinfo.v_type = 0; #endif - unsigned int scale = (unsigned int)boot_vinfo->v_scale; - if (scale == kPEScaleFactor1x ) - new_vinfo.v_scale = kPEScaleFactor1x; - else if (scale == kPEScaleFactor2x) - new_vinfo.v_scale = kPEScaleFactor2x; - else /* Scale factor not set, default to 1x */ - new_vinfo.v_scale = kPEScaleFactor1x; + unsigned int scale = (unsigned int)boot_vinfo->v_scale; + if (scale == kPEScaleFactor1x) { + new_vinfo.v_scale = kPEScaleFactor1x; + } else if (scale == kPEScaleFactor2x) { + new_vinfo.v_scale = kPEScaleFactor2x; + } + else { /* Scale factor not set, default to 1x */ + new_vinfo.v_scale = kPEScaleFactor1x; + } } new_vinfo.v_name[0] = 0; new_vinfo.v_physaddr = 0; @@ -2836,38 +2901,34 @@ initialize_screen(PE_Video * boot_vinfo, unsigned int op) */ newVideoVirt = boot_vinfo->v_baseAddr; makeMapping = (kernel_map == VM_MAP_NULL) || (0 != (1 & newVideoVirt)); - if (makeMapping) - { + if (makeMapping) { newVideoVirt = 0; - new_vinfo.v_physaddr = boot_vinfo->v_baseAddr & ~3UL; /* Get the physical address */ + new_vinfo.v_physaddr = boot_vinfo->v_baseAddr & ~3UL; /* Get the physical address */ #ifndef __LP64__ new_vinfo.v_physaddr |= (((uint64_t) boot_vinfo->v_baseAddrHigh) << 32); #endif kprintf("initialize_screen: b=%08llX, w=%08X, h=%08X, r=%08X, d=%08X\n", /* (BRINGUP) */ - new_vinfo.v_physaddr, new_vinfo.v_width, new_vinfo.v_height, new_vinfo.v_rowbytes, new_vinfo.v_type); /* (BRINGUP) */ + new_vinfo.v_physaddr, new_vinfo.v_width, new_vinfo.v_height, new_vinfo.v_rowbytes, new_vinfo.v_type); /* (BRINGUP) */ } - - if (!newVideoVirt && !new_vinfo.v_physaddr) /* Check to see if we have a framebuffer */ - { - kprintf("initialize_screen: No video - forcing serial mode\n"); /* (BRINGUP) */ - new_vinfo.v_depth = 0; /* vc routines are nop */ - (void)switch_to_serial_console(); /* Switch into serial mode */ - gc_graphics_boot = FALSE; /* Say we are not in graphics mode */ - disableConsoleOutput = FALSE; /* Allow printfs to happen */ + + if (!newVideoVirt && !new_vinfo.v_physaddr) { /* Check to see if we have a framebuffer */ + kprintf("initialize_screen: No video - forcing serial mode\n"); /* (BRINGUP) */ + new_vinfo.v_depth = 0; /* vc routines are nop */ + (void)switch_to_serial_console(); /* Switch into serial mode */ + gc_graphics_boot = FALSE; /* Say we are not in graphics mode */ + disableConsoleOutput = FALSE; /* Allow printfs to happen */ gc_acquired = TRUE; - } - else - { - if (makeMapping) - { - unsigned int flags = VM_WIMG_IO; - if (boot_vinfo->v_length != 0) + } else { + if (makeMapping) { + unsigned int flags = VM_WIMG_IO; + if (boot_vinfo->v_length != 0) { newMapSize = (unsigned int) round_page(boot_vinfo->v_length); - else - newMapSize = (unsigned int) round_page(new_vinfo.v_height * new_vinfo.v_rowbytes); /* Remember size */ - newVideoVirt = io_map_spec((vm_map_offset_t)new_vinfo.v_physaddr, newMapSize, flags); /* Allocate address space for framebuffer */ + } else { + newMapSize = (unsigned int) round_page(new_vinfo.v_height * new_vinfo.v_rowbytes); /* Remember size */ + } + newVideoVirt = io_map_spec((vm_map_offset_t)new_vinfo.v_physaddr, newMapSize, flags); /* Allocate address space for framebuffer */ } - new_vinfo.v_baseaddr = newVideoVirt + boot_vinfo->v_offset; /* Set the new framebuffer address */ + new_vinfo.v_baseaddr = newVideoVirt + boot_vinfo->v_offset; /* Set the new framebuffer address */ } #if defined(__x86_64__) @@ -2876,41 +2937,33 @@ initialize_screen(PE_Video * boot_vinfo, unsigned int op) #endif /* Update the vinfo structure atomically with respect to the vc_progress task if running */ - if (vc_progress) - { - simple_lock(&vc_progress_lock, LCK_GRP_NULL); - vinfo = new_vinfo; - simple_unlock(&vc_progress_lock); - } - else - { - vinfo = new_vinfo; + if (vc_progress) { + simple_lock(&vc_progress_lock, LCK_GRP_NULL); + vinfo = new_vinfo; + simple_unlock(&vc_progress_lock); + } else { + vinfo = new_vinfo; } // If we changed the virtual address, remove the old mapping - if (newVideoVirt != 0) - { - if (lastVideoVirt && lastVideoMapSize) /* Was the framebuffer mapped before? */ - { + if (newVideoVirt != 0) { + if (lastVideoVirt && lastVideoMapSize) { /* Was the framebuffer mapped before? */ /* XXX why only !4K? */ - if (!TEST_PAGE_SIZE_4K && lastVideoMapSize) - { + if (!TEST_PAGE_SIZE_4K && lastVideoMapSize) { pmap_remove(kernel_pmap, trunc_page_64(lastVideoVirt), - round_page_64(lastVideoVirt + lastVideoMapSize)); /* Toss mappings */ + round_page_64(lastVideoVirt + lastVideoMapSize)); /* Toss mappings */ } /* Was this not a special pre-VM mapping? */ - if (lastVideoMapKmap) - { - kmem_free(kernel_map, lastVideoVirt, lastVideoMapSize); /* Toss kernel addresses */ + if (lastVideoMapKmap) { + kmem_free(kernel_map, lastVideoVirt, lastVideoMapSize); /* Toss kernel addresses */ } } - lastVideoMapKmap = (NULL != kernel_map); /* Remember how mapped */ - lastVideoMapSize = newMapSize; /* Remember the size */ - lastVideoVirt = newVideoVirt; /* Remember the virtual framebuffer address */ + lastVideoMapKmap = (NULL != kernel_map); /* Remember how mapped */ + lastVideoMapSize = newMapSize; /* Remember the size */ + lastVideoVirt = newVideoVirt; /* Remember the virtual framebuffer address */ } - if (kPEBaseAddressChange != op) - { + if (kPEBaseAddressChange != op) { // Graphics mode setup by the booter. gc_ops.initialize = vc_initialize; @@ -2922,116 +2975,129 @@ initialize_screen(PE_Video * boot_vinfo, unsigned int op) gc_ops.hide_cursor = vc_reverse_cursor; gc_ops.show_cursor = vc_reverse_cursor; gc_ops.update_color = vc_update_color; - gc_initialize(&vinfo); + gc_initialize(&vinfo); } } - graphics_now = gc_graphics_boot && !gc_desire_text; - switch ( op ) - { - case kPEGraphicsMode: - gc_graphics_boot = TRUE; - gc_desire_text = FALSE; - break; + graphics_now = gc_graphics_boot && !gc_desire_text; + switch (op) { + case kPEGraphicsMode: + gc_graphics_boot = TRUE; + gc_desire_text = FALSE; + break; - case kPETextMode: - gc_graphics_boot = FALSE; - break; + case kPETextMode: + gc_graphics_boot = FALSE; + break; - case kPEAcquireScreen: - if ( gc_acquired ) break; + case kPEAcquireScreen: + if (gc_acquired) { + break; + } - vc_progress_options = vc_user_options; - bzero(&vc_user_options, sizeof(vc_user_options)); + vc_progress_options = vc_user_options; + bzero(&vc_user_options, sizeof(vc_user_options)); - if (kVCAcquireImmediate & vc_progress_options.options) delay = 0; - else if (kVCDarkReboot & vc_progress_options.options) delay = 120; - else delay = vc_acquire_delay; + if (kVCAcquireImmediate & vc_progress_options.options) { + delay = 0; + } else if (kVCDarkReboot & vc_progress_options.options) { + delay = 120; + } else { + delay = vc_acquire_delay; + } - if (kVCDarkBackground & vc_progress_options.options) vc_progress_white = TRUE; - else if (kVCLightBackground & vc_progress_options.options) vc_progress_white = FALSE; + if (kVCDarkBackground & vc_progress_options.options) { + vc_progress_white = TRUE; + } else if (kVCLightBackground & vc_progress_options.options) { + vc_progress_white = FALSE; + } #if !defined(XNU_TARGET_OS_BRIDGE) - vc_progress_set( graphics_now, delay ); + vc_progress_set( graphics_now, delay ); #endif /* !defined(XNU_TARGET_OS_BRIDGE) */ - gc_enable( !graphics_now ); - gc_acquired = TRUE; - gc_desire_text = FALSE; - break; + gc_enable( !graphics_now ); + gc_acquired = TRUE; + gc_desire_text = FALSE; + break; - case kPEDisableScreen: - if (gc_acquired) - { - gc_pause( TRUE, graphics_now ); - } - break; + case kPEDisableScreen: + if (gc_acquired) { + gc_pause( TRUE, graphics_now ); + } + break; - case kPEEnableScreen: - if (gc_acquired) - { - gc_pause( FALSE, graphics_now ); - } - break; + case kPEEnableScreen: + if (gc_acquired) { + gc_pause( FALSE, graphics_now ); + } + break; - case kPETextScreen: - if ( console_is_serial() ) break; + case kPETextScreen: + if (console_is_serial()) { + break; + } - if ( gc_acquired == FALSE ) - { - gc_desire_text = TRUE; - break; - } - if ( gc_graphics_boot == FALSE ) break; + if (gc_acquired == FALSE) { + gc_desire_text = TRUE; + break; + } + if (gc_graphics_boot == FALSE) { + break; + } - vc_progress_set( FALSE, 0 ); -#if !CONFIG_EMBEDDED - vc_enable_progressmeter( FALSE ); + vc_progress_set( FALSE, 0 ); +#if defined(XNU_TARGET_OS_OSX) + vc_enable_progressmeter( FALSE ); #endif - gc_enable( TRUE ); + gc_enable( TRUE ); + break; + + case kPEReleaseScreen: + gc_acquired = FALSE; + gc_desire_text = FALSE; + gc_enable( FALSE ); + if (gc_graphics_boot == FALSE) { break; + } - case kPEReleaseScreen: - gc_acquired = FALSE; - gc_desire_text = FALSE; - gc_enable( FALSE ); - if ( gc_graphics_boot == FALSE ) break; - - vc_progress_set( FALSE, 0 ); - vc_acquire_delay = kProgressReacquireDelay; - vc_progress_white = TRUE; -#if !CONFIG_EMBEDDED - vc_enable_progressmeter(FALSE); - vc_progress_withmeter &= ~1; + vc_progress_set( FALSE, 0 ); + vc_acquire_delay = kProgressReacquireDelay; + vc_progress_white = TRUE; +#if defined(XNU_TARGET_OS_OSX) + vc_enable_progressmeter(FALSE); + vc_progress_withmeter &= ~1; #endif - vc_clut8 = NULL; - break; + vc_clut8 = NULL; + break; -#if !CONFIG_EMBEDDED - case kPERefreshBootGraphics: - { - spl_t s; - boolean_t save; +#if defined(__x86_64__) + case kPERefreshBootGraphics: + { + spl_t s; + boolean_t save; - if (kBootArgsFlagBlack & ((boot_args *) PE_state.bootArgs)->flags) break; + if (kBootArgsFlagBlack & ((boot_args *) PE_state.bootArgs)->flags) { + break; + } - save = vc_progress_white; - vc_progress_white = (0 != (kBootArgsFlagBlackBg & ((boot_args *) PE_state.bootArgs)->flags)); + save = vc_progress_white; + vc_progress_white = (0 != (kBootArgsFlagBlackBg & ((boot_args *) PE_state.bootArgs)->flags)); - internal_enable_progressmeter(kProgressMeterKernel); + internal_enable_progressmeter(kProgressMeterKernel); - s = splhigh(); - simple_lock(&vc_progress_lock, LCK_GRP_NULL); + s = splhigh(); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); - vc_progressmeter_drawn = 0; - internal_set_progressmeter(vc_progressmeter_range(vc_progressmeter_count >> 13)); + vc_progressmeter_drawn = 0; + internal_set_progressmeter(vc_progressmeter_range(vc_progressmeter_count >> 13)); - simple_unlock(&vc_progress_lock); - splx(s); + simple_unlock(&vc_progress_lock); + splx(s); - internal_enable_progressmeter(kProgressMeterOff); - vc_progress_white = save; - } + internal_enable_progressmeter(kProgressMeterOff); + vc_progress_white = save; + } #endif } } @@ -3043,135 +3109,132 @@ vcattach(void) { vm_initialized = TRUE; -#if !CONFIG_EMBEDDED - const boot_args * bootargs = (typeof(bootargs)) PE_state.bootArgs; +#if defined(CONFIG_VC_PROGRESS_METER_SUPPORT) + const boot_args * bootargs = (typeof(bootargs))PE_state.bootArgs; - vc_progress_white = (0 != ((kBootArgsFlagBlackBg | kBootArgsFlagLoginUI) - & bootargs->flags)); PE_parse_boot_argn("meter", &vc_progress_withmeter, sizeof(vc_progress_withmeter)); - if (kBootArgsFlagInstallUI & bootargs->flags) - { - vc_progress_meter_start = (bootargs->bootProgressMeterStart * kProgressMeterMax) / 65535; - vc_progress_meter_end = (bootargs->bootProgressMeterEnd * kProgressMeterMax) / 65535; - } - else - { - vc_progress_meter_start = 0; - vc_progress_meter_end = kProgressMeterMax; +#if defined(__x86_64__) + vc_progress_white = (0 != ((kBootArgsFlagBlackBg | kBootArgsFlagLoginUI) + & bootargs->flags)); + if (kBootArgsFlagInstallUI & bootargs->flags) { + vc_progress_meter_start = (bootargs->bootProgressMeterStart * kProgressMeterMax) / 65535; + vc_progress_meter_end = (bootargs->bootProgressMeterEnd * kProgressMeterMax) / 65535; + } else { + vc_progress_meter_start = 0; + vc_progress_meter_end = kProgressMeterEnd; } -#endif +#else + vc_progress_meter_start = 0; + vc_progress_meter_end = kProgressMeterEnd; +#endif /* defined(__x86_64__ */ +#endif /* defined(CONFIG_VC_PROGRESS_METER_SUPPORT) */ simple_lock_init(&vc_progress_lock, 0); - if ( gc_graphics_boot == FALSE ) - { + if (gc_graphics_boot == FALSE) { long index; - if ( gc_acquired ) - { + if (gc_acquired) { initialize_screen(NULL, kPEReleaseScreen); } initialize_screen(NULL, kPEAcquireScreen); - for ( index = 0 ; index < msgbufp->msg_bufx ; index++ ) - { + for (index = 0; index < msgbufp->msg_bufx; index++) { if (msgbufp->msg_bufc[index] == '\0') { continue; } vcputc( 0, 0, msgbufp->msg_bufc[index] ); - if ( msgbufp->msg_bufc[index] == '\n' ) - { - vcputc( 0, 0,'\r' ); + if (msgbufp->msg_bufc[index] == '\n') { + vcputc( 0, 0, '\r' ); } } } } -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) // redraw progress meter between pixels start, end, position at pos, // options (including rotation) passed in flags static void vc_draw_progress_meter(unsigned int flags, int start, int end, int pos) { - const unsigned char *data; - int i, width, bx, srcRow, backRow; - int rectX, rectY, rectW, rectH; - int endCapPos, endCapStart; - int barWidth = kProgressBarWidth * vc_uiscale; - int barHeight = kProgressBarHeight * vc_uiscale; - int capWidth = kProgressBarCapWidth * vc_uiscale; - // 1 rounded fill, 0 square end - int style = (0 == (2 & vc_progress_withmeter)); - // 1 white, 0 greyed out - int onoff; - - for (i = start; i < end; i += width) - { - onoff = (i < pos); - endCapPos = ((style && onoff) ? pos : barWidth); - endCapStart = endCapPos - capWidth; - if (flags & kDataBack) { // restore back bits - width = end; // loop done after this iteration - data = NULL; - srcRow = 0; - } else if (i < capWidth) { // drawing the left cap - width = (end < capWidth) ? (end - i) : (capWidth - i); - data = progressmeter_leftcap[vc_uiscale >= 2][onoff]; - data += i; - srcRow = capWidth; - } else if (i < endCapStart) { // drawing the middle - width = (end < endCapStart) ? (end - i) : (endCapStart - i); - data = progressmeter_middle[vc_uiscale >= 2][onoff]; - srcRow = 1; - } else { // drawing the right cap - width = endCapPos - i; - data = progressmeter_rightcap[vc_uiscale >= 2][onoff]; - data += i - endCapStart; - srcRow = capWidth; - } - - switch (flags & kDataRotate) { - case kDataRotate90: // left middle, bar goes down - rectW = barHeight; - rectH = width; - rectX = ((vinfo.v_width / 3) - (barHeight / 2)); - rectY = ((vinfo.v_height - barWidth) / 2) + i; - bx = i * barHeight; - backRow = barHeight; - break; - case kDataRotate180: // middle upper, bar goes left - rectW = width; - rectH = barHeight; - rectX = ((vinfo.v_width - barWidth) / 2) + barWidth - width - i; - rectY = (vinfo.v_height / 3) - (barHeight / 2); - bx = barWidth - width - i; - backRow = barWidth; - break; - case kDataRotate270: // right middle, bar goes up - rectW = barHeight; - rectH = width; - rectX = (vinfo.v_width - (vinfo.v_width / 3) - (barHeight / 2)); - rectY = ((vinfo.v_height - barWidth) / 2) + barWidth - width - i; - bx = (barWidth - width - i) * barHeight; - backRow = barHeight; - break; - default: - case kDataRotate0: // middle lower, bar goes right - rectW = width; - rectH = barHeight; - rectX = ((vinfo.v_width - barWidth) / 2) + i; - rectY = vinfo.v_height - (vinfo.v_height / 3) - (barHeight / 2); - bx = i; - backRow = barWidth; - break; - } - vc_blit_rect(rectX, rectY, bx, rectW, rectH, width, barHeight, - srcRow, backRow, data, vc_progressmeter_backbuffer, flags); - } + const unsigned char *data; + int i, width, bx, srcRow, backRow; + int rectX, rectY, rectW, rectH; + int endCapPos, endCapStart; + int barWidth = kProgressBarWidth * vc_uiscale; + int barHeight = kProgressBarHeight * vc_uiscale; + int capWidth = kProgressBarCapWidth * vc_uiscale; + // 1 rounded fill, 0 square end + int style = (0 == (2 & vc_progress_withmeter)); + // 1 white, 0 greyed out + int onoff; + + for (i = start; i < end; i += width) { + onoff = (i < pos); + endCapPos = ((style && onoff) ? pos : barWidth); + endCapStart = endCapPos - capWidth; + if (flags & kDataBack) { // restore back bits + width = end;// loop done after this iteration + data = NULL; + srcRow = 0; + } else if (i < capWidth) { // drawing the left cap + width = (end < capWidth) ? (end - i) : (capWidth - i); + data = progressmeter_leftcap[vc_uiscale >= 2][onoff]; + data += i; + srcRow = capWidth; + } else if (i < endCapStart) { // drawing the middle + width = (end < endCapStart) ? (end - i) : (endCapStart - i); + data = progressmeter_middle[vc_uiscale >= 2][onoff]; + srcRow = 1; + } else { // drawing the right cap + width = endCapPos - i; + data = progressmeter_rightcap[vc_uiscale >= 2][onoff]; + data += i - endCapStart; + srcRow = capWidth; + } + + switch (flags & kDataRotate) { + case kDataRotate90: // left middle, bar goes down + rectW = barHeight; + rectH = width; + rectX = ((vinfo.v_width / 3) - (barHeight / 2)); + rectY = ((vinfo.v_height - barWidth) / 2) + i; + bx = i * barHeight; + backRow = barHeight; + break; + case kDataRotate180: // middle upper, bar goes left + rectW = width; + rectH = barHeight; + rectX = ((vinfo.v_width - barWidth) / 2) + barWidth - width - i; + rectY = (vinfo.v_height / 3) - (barHeight / 2); + bx = barWidth - width - i; + backRow = barWidth; + break; + case kDataRotate270: // right middle, bar goes up + rectW = barHeight; + rectH = width; + rectX = (vinfo.v_width - (vinfo.v_width / 3) - (barHeight / 2)); + rectY = ((vinfo.v_height - barWidth) / 2) + barWidth - width - i; + bx = (barWidth - width - i) * barHeight; + backRow = barHeight; + break; + default: + case kDataRotate0: // middle lower, bar goes right + rectW = width; + rectH = barHeight; + rectX = ((vinfo.v_width - barWidth) / 2) + i; + rectY = vinfo.v_height - (vinfo.v_height / 3) - (barHeight / 2); + bx = i; + backRow = barWidth; + break; + } + vc_blit_rect(rectX, rectY, bx, rectW, rectH, width, barHeight, + srcRow, backRow, data, vc_progressmeter_backbuffer, flags); + } } extern void IORecordProgressBackbuffer(void * buffer, size_t size, uint32_t theme); @@ -3179,130 +3242,123 @@ extern void IORecordProgressBackbuffer(void * buffer, size_t size, uint32_t them static void internal_enable_progressmeter(int new_value) { - spl_t s; - void * new_buffer; - boolean_t stashBackbuffer; - int flags = vinfo.v_rotate; - - stashBackbuffer = FALSE; - new_buffer = NULL; - if (new_value) - { - new_buffer = kalloc((kProgressBarWidth * vc_uiscale) - * (kProgressBarHeight * vc_uiscale) * sizeof(int)); - } - - s = splhigh(); - simple_lock(&vc_progress_lock, LCK_GRP_NULL); - - if (kProgressMeterUser == new_value) - { - if (gc_enabled || !gc_acquired || !gc_graphics_boot - || (kProgressMeterKernel == vc_progressmeter_enable)) new_value = vc_progressmeter_enable; - } - - if (new_value != vc_progressmeter_enable) - { - if (new_value) - { - if (kProgressMeterOff == vc_progressmeter_enable) - { - vc_progressmeter_backbuffer = new_buffer; - vc_draw_progress_meter(kDataAlpha | kSave | flags, 0, (kProgressBarWidth * vc_uiscale), 0); - new_buffer = NULL; - vc_progressmeter_drawn = 0; - } - vc_progressmeter_enable = new_value; + spl_t s; + void * new_buffer; + boolean_t stashBackbuffer; + int flags = vinfo.v_rotate; + + stashBackbuffer = FALSE; + new_buffer = NULL; + if (new_value) { + new_buffer = kheap_alloc(KHEAP_DATA_BUFFERS, + (kProgressBarWidth * vc_uiscale) * + (kProgressBarHeight * vc_uiscale) * sizeof(int), Z_WAITOK); + } + + s = splhigh(); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); + + if (kProgressMeterUser == new_value) { + if (gc_enabled || !gc_acquired || !gc_graphics_boot) { + new_value = vc_progressmeter_enable; + } + } + + if (new_value != vc_progressmeter_enable) { + if (new_value) { + if (kProgressMeterOff == vc_progressmeter_enable) { + vc_progressmeter_backbuffer = new_buffer; + vc_draw_progress_meter(kDataAlpha | kSave | flags, 0, (kProgressBarWidth * vc_uiscale), 0); + new_buffer = NULL; + vc_progressmeter_drawn = 0; + } + vc_progressmeter_enable = new_value; + } else if (vc_progressmeter_backbuffer) { + if (kProgressMeterUser == vc_progressmeter_enable) { + vc_draw_progress_meter(kDataBack | flags, 0, (kProgressBarWidth * vc_uiscale), vc_progressmeter_drawn); + } else { + stashBackbuffer = TRUE; + } + new_buffer = vc_progressmeter_backbuffer; + vc_progressmeter_backbuffer = NULL; + vc_progressmeter_enable = FALSE; + } + } + + simple_unlock(&vc_progress_lock); + splx(s); + + if (new_buffer) { + if (stashBackbuffer) { + IORecordProgressBackbuffer(new_buffer, + (kProgressBarWidth * vc_uiscale) + * (kProgressBarHeight * vc_uiscale) + * sizeof(int), + vc_progress_white); + } + kheap_free(KHEAP_DATA_BUFFERS, new_buffer, + (kProgressBarWidth * vc_uiscale) * + (kProgressBarHeight * vc_uiscale) * sizeof(int)); } - else if (vc_progressmeter_backbuffer) - { - if (kProgressMeterUser == vc_progressmeter_enable) - { - vc_draw_progress_meter(kDataBack | flags, 0, (kProgressBarWidth * vc_uiscale), vc_progressmeter_drawn); - } - else stashBackbuffer = TRUE; - new_buffer = vc_progressmeter_backbuffer; - vc_progressmeter_backbuffer = NULL; - vc_progressmeter_enable = FALSE; - } - } - - simple_unlock(&vc_progress_lock); - splx(s); - - if (new_buffer) - { - if (stashBackbuffer) IORecordProgressBackbuffer(new_buffer, - (kProgressBarWidth * vc_uiscale) - * (kProgressBarHeight * vc_uiscale) - * sizeof(int), - vc_progress_white); - kfree(new_buffer, (kProgressBarWidth * vc_uiscale) - * (kProgressBarHeight * vc_uiscale) * sizeof(int)); - } } static void internal_set_progressmeter(int new_value) { - int x1, x3; - int capRedraw; - // 1 rounded fill, 0 square end - int style = (0 == (2 & vc_progress_withmeter)); - int flags = kDataAlpha | vinfo.v_rotate; + int x1, x3; + int capRedraw; + // 1 rounded fill, 0 square end + int style = (0 == (2 & vc_progress_withmeter)); + int flags = kDataAlpha | vinfo.v_rotate; - if ((new_value < 0) || (new_value > kProgressMeterMax)) return; + if ((new_value < 0) || (new_value > kProgressMeterMax)) { + return; + } - if (vc_progressmeter_enable) - { - vc_progressmeter_value = new_value; + if (vc_progressmeter_enable) { + vc_progressmeter_value = new_value; - capRedraw = (style ? (kProgressBarCapWidth * vc_uiscale) : 0); - x3 = (((kProgressBarWidth * vc_uiscale) - 2 * capRedraw) * vc_progressmeter_value) / kProgressMeterMax; - x3 += (2 * capRedraw); + capRedraw = (style ? (kProgressBarCapWidth * vc_uiscale) : 0); + x3 = (((kProgressBarWidth * vc_uiscale) - 2 * capRedraw) * vc_progressmeter_value) / kProgressMeterMax; + x3 += (2 * capRedraw); - if (x3 > vc_progressmeter_drawn) - { - x1 = capRedraw; - if (x1 > vc_progressmeter_drawn) x1 = vc_progressmeter_drawn; - vc_draw_progress_meter(flags, vc_progressmeter_drawn - x1, x3, x3); - } - else - { - vc_draw_progress_meter(flags, x3 - capRedraw, vc_progressmeter_drawn, x3); + if (x3 > vc_progressmeter_drawn) { + x1 = capRedraw; + if (x1 > vc_progressmeter_drawn) { + x1 = vc_progressmeter_drawn; + } + vc_draw_progress_meter(flags, vc_progressmeter_drawn - x1, x3, x3); + } else { + vc_draw_progress_meter(flags, x3 - capRedraw, vc_progressmeter_drawn, x3); + } + vc_progressmeter_drawn = x3; } - vc_progressmeter_drawn = x3; - } } void vc_enable_progressmeter(int new_value) { - if (kProgressMeterKernel == vc_progressmeter_enable) - { - vc_progressmeter_hold = new_value; - } - else - { internal_enable_progressmeter(new_value ? kProgressMeterUser : kProgressMeterOff); - } } void vc_set_progressmeter(int new_value) { - spl_t s; + spl_t s; - s = splhigh(); - simple_lock(&vc_progress_lock, LCK_GRP_NULL); + s = splhigh(); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); - if (vc_progressmeter_enable && (kProgressMeterKernel != vc_progressmeter_enable)) - { - internal_set_progressmeter((new_value * kProgressMeterMax) / 100); - } + if (vc_progressmeter_enable) { + if (kProgressMeterKernel != vc_progressmeter_enable) { + internal_set_progressmeter(new_value); + } + } else { + vc_progressmeter_value = new_value; + } - simple_unlock(&vc_progress_lock); - splx(s); + simple_unlock(&vc_progress_lock); + splx(s); } -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ diff --git a/osfmk/console/video_console.h b/osfmk/console/video_console.h index f9c03eee1..56d310c19 100644 --- a/osfmk/console/video_console.h +++ b/osfmk/console/video_console.h @@ -138,7 +138,7 @@ int vc_display_lzss_icon(uint32_t dst_x, uint32_t dst_y, uint32_t compressed_size, const uint8_t *clut); -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) extern void vc_enable_progressmeter(int new_value); extern void vc_set_progressmeter(int new_value); @@ -146,7 +146,7 @@ extern int vc_progressmeter_enable; extern int vc_progressmeter_value; extern void vc_progress_setdiskspeed(uint32_t speed); -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ #endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/corecrypto/cc/src/cc_abort.c b/osfmk/corecrypto/cc_abort.c similarity index 97% rename from osfmk/corecrypto/cc/src/cc_abort.c rename to osfmk/corecrypto/cc_abort.c index 726af1668..053a62cd5 100644 --- a/osfmk/corecrypto/cc/src/cc_abort.c +++ b/osfmk/corecrypto/cc_abort.c @@ -35,7 +35,7 @@ #include //cc_abort() is implemented to comply with by FIPS 140-2, when DRBG produces -//two equal consecutive blocks. See rdar://19129408 +//two equal consecutive blocks. #if !CC_PROVIDES_ABORT diff --git a/osfmk/corecrypto/cc/src/cc_clear.c b/osfmk/corecrypto/cc_clear.c similarity index 91% rename from osfmk/corecrypto/cc/src/cc_clear.c rename to osfmk/corecrypto/cc_clear.c index db21af6c9..3bbd5db0f 100644 --- a/osfmk/corecrypto/cc/src/cc_clear.c +++ b/osfmk/corecrypto/cc_clear.c @@ -35,8 +35,6 @@ #include #include "corecrypto/fipspost_trace.h" -//rdar://problem/26986552 - #if (CC_HAS_MEMSET_S == 1) && (defined(__STDC_WANT_LIB_EXT1__) && (__STDC_WANT_LIB_EXT1__ == 1)) void cc_clear(size_t len, void *dst) @@ -62,10 +60,3 @@ cc_clear(size_t len, void *dst) } } #endif - -/* This is an altarnative for clang that should work - * void cc_clear(size_t len, void *dst) __attribute__ ((optnone)) - * { - * cc_clear(len,dst); - * } - */ diff --git a/osfmk/corecrypto/cc/src/cc_cmp_safe.c b/osfmk/corecrypto/cc_cmp_safe.c similarity index 100% rename from osfmk/corecrypto/cc/src/cc_cmp_safe.c rename to osfmk/corecrypto/cc_cmp_safe.c diff --git a/osfmk/corecrypto/ccsha1/src/ccdigest_final_64be.c b/osfmk/corecrypto/ccdigest_final_64be.c similarity index 100% rename from osfmk/corecrypto/ccsha1/src/ccdigest_final_64be.c rename to osfmk/corecrypto/ccdigest_final_64be.c diff --git a/osfmk/corecrypto/ccdigest_init.c b/osfmk/corecrypto/ccdigest_init.c new file mode 100644 index 000000000..b7aab64da --- /dev/null +++ b/osfmk/corecrypto/ccdigest_init.c @@ -0,0 +1,84 @@ +/* + * ccdigest_init.c + * corecrypto + * + * Created on 11/30/2010 + * + * Copyright (c) 2010,2011,2015 Apple Inc. All rights reserved. + * + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + +#if 0 +#if CC_LOGGING_AVAILABLE +#if CC_FEATURE_FLAGS_AVAILABLE + +#include "cclog_internal.h" +#include + +static void +log_trace(const struct ccdigest_info *di) +{ + if (!CC_FEATURE_ENABLED(ccdigest_logging)) { + return; + } + + if (ccdigest_oid_equal(di, CC_DIGEST_OID_MD2)) { + cclog_error_backtrace(CCLOG_CATEGORY_DEFAULT, "trace: md2"); + } else if (ccdigest_oid_equal(di, CC_DIGEST_OID_MD4)) { + cclog_error_backtrace(CCLOG_CATEGORY_DEFAULT, "trace: md4"); + } else if (ccdigest_oid_equal(di, CC_DIGEST_OID_MD5)) { + cclog_error_backtrace(CCLOG_CATEGORY_ALGORITHM_MD5, "trace: md5"); + } else if (ccdigest_oid_equal(di, CC_DIGEST_OID_SHA1)) { + cclog_error_backtrace(CCLOG_CATEGORY_ALGORITHM_SHA1, "trace: sha1"); + } else if (ccdigest_oid_equal(di, CC_DIGEST_OID_RMD160)) { + cclog_error_backtrace(CCLOG_CATEGORY_DEFAULT, "trace: rmd160"); + } +} + +#endif // CC_FEATURE_FLAGS_AVAILABLE +#endif // CC_LOGGING_AVAILABLE +#endif + +void +ccdigest_init(const struct ccdigest_info *di, ccdigest_ctx_t ctx) +{ +#if 0 +#if CC_LOGGING_AVAILABLE +#if CC_FEATURE_FLAGS_AVAILABLE + log_trace(di); +#endif // CC_FEATURE_FLAGS_AVAILABLE +#endif // CC_LOGGING_AVAILABLE +#endif + + ccdigest_copy_state(di, ccdigest_state_ccn(di, ctx), di->initial_state); + ccdigest_nbits(di, ctx) = 0; + ccdigest_num(di, ctx) = 0; +} diff --git a/osfmk/corecrypto/ccsha1/src/ccdigest_internal.h b/osfmk/corecrypto/ccdigest_internal.h similarity index 100% rename from osfmk/corecrypto/ccsha1/src/ccdigest_internal.h rename to osfmk/corecrypto/ccdigest_internal.h diff --git a/osfmk/corecrypto/ccdigest/src/ccdigest_update.c b/osfmk/corecrypto/ccdigest_update.c similarity index 92% rename from osfmk/corecrypto/ccdigest/src/ccdigest_update.c rename to osfmk/corecrypto/ccdigest_update.c index 6856c4e74..5dd1b91d1 100644 --- a/osfmk/corecrypto/ccdigest/src/ccdigest_update.c +++ b/osfmk/corecrypto/ccdigest_update.c @@ -48,13 +48,12 @@ ccdigest_update(const struct ccdigest_info *di, ccdigest_ctx_t ctx, size_t len, while (len > 0) { if (ccdigest_num(di, ctx) == 0 && len > di->block_size) { - //low-end processors are slow on divison - if (di->block_size == 1 << 6) { //sha256 + if (di->block_size == 1 << 6) { // md5 & sha1 & sha256 nblocks = len >> 6; - nbytes = len & 0xFFFFffC0; - } else if (di->block_size == 1 << 7) { //sha512 + nbytes = nblocks << 6; + } else if (di->block_size == 1 << 7) { // sha384 & sha512 nblocks = len >> 7; - nbytes = len & 0xFFFFff80; + nbytes = nblocks << 7; } else { nblocks = len / di->block_size; nbytes = nblocks * di->block_size; diff --git a/osfmk/corecrypto/ccdbrg/src/ccdrbg_nisthmac.c b/osfmk/corecrypto/ccdrbg_nisthmac.c similarity index 100% rename from osfmk/corecrypto/ccdbrg/src/ccdrbg_nisthmac.c rename to osfmk/corecrypto/ccdrbg_nisthmac.c diff --git a/osfmk/corecrypto/cchmac/src/cchmac.c b/osfmk/corecrypto/cchmac.c similarity index 100% rename from osfmk/corecrypto/cchmac/src/cchmac.c rename to osfmk/corecrypto/cchmac.c diff --git a/osfmk/corecrypto/cchmac/src/cchmac_final.c b/osfmk/corecrypto/cchmac_final.c similarity index 100% rename from osfmk/corecrypto/cchmac/src/cchmac_final.c rename to osfmk/corecrypto/cchmac_final.c diff --git a/osfmk/corecrypto/cchmac/src/cchmac_init.c b/osfmk/corecrypto/cchmac_init.c similarity index 100% rename from osfmk/corecrypto/cchmac/src/cchmac_init.c rename to osfmk/corecrypto/cchmac_init.c diff --git a/osfmk/corecrypto/cchmac/src/cchmac_update.c b/osfmk/corecrypto/cchmac_update.c similarity index 100% rename from osfmk/corecrypto/cchmac/src/cchmac_update.c rename to osfmk/corecrypto/cchmac_update.c diff --git a/osfmk/corecrypto/ccsha2/src/ccsha256_K.c b/osfmk/corecrypto/ccsha256_K.c similarity index 100% rename from osfmk/corecrypto/ccsha2/src/ccsha256_K.c rename to osfmk/corecrypto/ccsha256_K.c diff --git a/osfmk/corecrypto/ccsha2/src/ccsha256_di.c b/osfmk/corecrypto/ccsha256_di.c similarity index 100% rename from osfmk/corecrypto/ccsha2/src/ccsha256_di.c rename to osfmk/corecrypto/ccsha256_di.c diff --git a/osfmk/corecrypto/ccsha2/src/ccsha256_initial_state.c b/osfmk/corecrypto/ccsha256_initial_state.c similarity index 100% rename from osfmk/corecrypto/ccsha2/src/ccsha256_initial_state.c rename to osfmk/corecrypto/ccsha256_initial_state.c diff --git a/osfmk/corecrypto/ccsha2/src/ccsha256_ltc_compress.c b/osfmk/corecrypto/ccsha256_ltc_compress.c similarity index 100% rename from osfmk/corecrypto/ccsha2/src/ccsha256_ltc_compress.c rename to osfmk/corecrypto/ccsha256_ltc_compress.c diff --git a/osfmk/corecrypto/ccsha2/src/ccsha256_ltc_di.c b/osfmk/corecrypto/ccsha256_ltc_di.c similarity index 100% rename from osfmk/corecrypto/ccsha2/src/ccsha256_ltc_di.c rename to osfmk/corecrypto/ccsha256_ltc_di.c diff --git a/osfmk/corecrypto/ccsha2/src/ccsha2_internal.h b/osfmk/corecrypto/ccsha2_internal.h similarity index 93% rename from osfmk/corecrypto/ccsha2/src/ccsha2_internal.h rename to osfmk/corecrypto/ccsha2_internal.h index 5a174ab68..b6a1b249d 100644 --- a/osfmk/corecrypto/ccsha2/src/ccsha2_internal.h +++ b/osfmk/corecrypto/ccsha2_internal.h @@ -64,6 +64,7 @@ extern const struct ccdigest_info ccsha224_vng_intel_di; extern const struct ccdigest_info ccsha256_vng_intel_di; extern const struct ccdigest_info ccsha384_vng_intel_di; extern const struct ccdigest_info ccsha512_vng_intel_di; +extern const struct ccdigest_info ccsha512_256_vng_intel_di; #endif extern const struct ccdigest_info ccsha224_vng_intel_AVX2_di; @@ -76,6 +77,10 @@ extern const struct ccdigest_info ccsha384_vng_intel_SupplementalSSE3_di; extern const struct ccdigest_info ccsha512_vng_intel_AVX2_di; extern const struct ccdigest_info ccsha512_vng_intel_AVX1_di; extern const struct ccdigest_info ccsha512_vng_intel_SupplementalSSE3_di; +extern const struct ccdigest_info ccsha512_256_vng_intel_AVX2_di; +extern const struct ccdigest_info ccsha512_256_vng_intel_AVX1_di; +extern const struct ccdigest_info ccsha512_256_vng_intel_SupplementalSSE3_di; + #endif void ccsha256_vng_intel_sse3_compress(ccdigest_state_t state, size_t nblocks, const void *in) __asm__("_ccsha256_vng_intel_sse3_compress"); #endif @@ -89,6 +94,6 @@ extern const uint32_t ccsha224_initial_state[8]; extern const uint32_t ccsha256_initial_state[8]; extern const uint64_t ccsha384_initial_state[8]; extern const uint64_t ccsha512_initial_state[8]; - +extern const uint64_t ccsha512_256_initial_state[8]; #endif /* _CORECRYPTO_CCSHA2_INTERNAL_H_ */ diff --git a/osfmk/corpses/corpse.c b/osfmk/corpses/corpse.c index a3d283be4..d440c79bd 100644 --- a/osfmk/corpses/corpse.c +++ b/osfmk/corpses/corpse.c @@ -224,7 +224,7 @@ total_corpses_count(void) * Returns: KERN_SUCCESS if the policy allows for creating a corpse. */ static kern_return_t -task_crashinfo_get_ref(uint16_t kcd_u_flags) +task_crashinfo_get_ref(corpse_flags_t kcd_u_flags) { union corpse_creation_gate oldgate, newgate; @@ -256,7 +256,7 @@ task_crashinfo_get_ref(uint16_t kcd_u_flags) * release the slot for corpse being used. */ static kern_return_t -task_crashinfo_release_ref(uint16_t kcd_u_flags) +task_crashinfo_release_ref(corpse_flags_t kcd_u_flags) { union corpse_creation_gate oldgate, newgate; @@ -285,7 +285,7 @@ task_crashinfo_release_ref(uint16_t kcd_u_flags) kcdata_descriptor_t task_crashinfo_alloc_init(mach_vm_address_t crash_data_p, unsigned size, - uint32_t kc_u_flags, unsigned kc_flags) + corpse_flags_t kc_u_flags, unsigned kc_flags) { kcdata_descriptor_t kcdata; @@ -540,7 +540,7 @@ task_generate_corpse_internal( uint64_t *udata_buffer = NULL; int size = 0; int num_udata = 0; - uint16_t kc_u_flags = CORPSE_CRASHINFO_HAS_REF; + corpse_flags_t kc_u_flags = CORPSE_CRASHINFO_HAS_REF; #if CONFIG_MACF struct label *label = NULL; @@ -669,7 +669,7 @@ error_task_generate_corpse: } /* Free the udata buffer allocated in task_duplicate_map_and_threads */ if (udata_buffer != NULL) { - kfree(udata_buffer, size); + kheap_free(KHEAP_DATA_BUFFERS, udata_buffer, size); } return kr; @@ -726,7 +726,7 @@ task_map_corpse_info_64( { kern_return_t kr; mach_vm_offset_t crash_data_ptr = 0; - mach_vm_size_t size = CORPSEINFO_ALLOCATION_SIZE; + const mach_vm_size_t size = CORPSEINFO_ALLOCATION_SIZE; void *corpse_info_kernel = NULL; if (task == TASK_NULL || task_is_a_corpse_fork(task)) { @@ -743,7 +743,7 @@ task_map_corpse_info_64( if (kr != KERN_SUCCESS) { return kr; } - copyout(corpse_info_kernel, crash_data_ptr, size); + copyout(corpse_info_kernel, (user_addr_t)crash_data_ptr, (size_t)size); *kcd_addr_begin = crash_data_ptr; *kcd_size = size; diff --git a/osfmk/corpses/task_corpse.h b/osfmk/corpses/task_corpse.h index c51a3bbd0..0e7719ece 100644 --- a/osfmk/corpses/task_corpse.h +++ b/osfmk/corpses/task_corpse.h @@ -63,14 +63,16 @@ extern kern_return_t task_mark_corpse(task_t task); extern kern_return_t task_deliver_crash_notification(task_t, thread_t, exception_type_t, mach_exception_subcode_t); /* In the corpseinfo kcd_user_flags */ -#define CORPSE_CRASHINFO_HAS_REF 0x1 -#define CORPSE_CRASHINFO_USER_FAULT 0x2 +__options_closed_decl(corpse_flags_t, uint16_t, { + CORPSE_CRASHINFO_HAS_REF = 0x1, + CORPSE_CRASHINFO_USER_FAULT = 0x2 +}); extern kcdata_descriptor_t task_get_corpseinfo(task_t task); extern kcdata_descriptor_t task_crashinfo_alloc_init( mach_vm_address_t crash_data_p, - unsigned size, uint32_t kc_u_flags, unsigned kc_flags); + unsigned size, corpse_flags_t kc_u_flags, unsigned kc_flags); extern kern_return_t task_crashinfo_destroy(kcdata_descriptor_t data); extern void corpses_init(void); diff --git a/osfmk/default_pager/Makefile b/osfmk/default_pager/Makefile index 1a6d194df..c5b78d110 100644 --- a/osfmk/default_pager/Makefile +++ b/osfmk/default_pager/Makefile @@ -39,7 +39,7 @@ ${MIGINCLUDES} : ${MIG_TYPES} ${MIG_UUHDRS} : \ %.h : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)$(MIG) $(MIGFLAGS) \ -server /dev/null \ -user /dev/null \ @@ -48,7 +48,7 @@ ${MIG_UUHDRS} : \ ${MIG_USHDRS} : \ %_server.h : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)$(MIG) $(MIGFLAGS) \ -server /dev/null \ -user /dev/null \ @@ -99,7 +99,7 @@ ${COMP_FILES} : ${MIG_TYPES} ${MIG_KUSRC} : \ %_user.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ -user $*_user.c \ -header $*.h \ @@ -109,7 +109,7 @@ ${MIG_KUSRC} : \ ${MIG_KSSRC}: \ %_server.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ -user /dev/null \ -header /dev/null \ diff --git a/osfmk/device/Makefile b/osfmk/device/Makefile index 2f23d1231..aa7b6c448 100644 --- a/osfmk/device/Makefile +++ b/osfmk/device/Makefile @@ -45,7 +45,7 @@ COMP_FILES = ${DEVICE_FILES} do_build_all:: $(COMP_FILES) ${DEVICE_FILES}: device.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) "$@" $(_v)${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ -header /dev/null \ -user /dev/null \ diff --git a/osfmk/device/device_init.c b/osfmk/device/device_init.c index 8b1fedd41..a3b055193 100644 --- a/osfmk/device/device_init.c +++ b/osfmk/device/device_init.c @@ -82,11 +82,6 @@ ipc_port_t master_device_port; void *master_device_kobject; -lck_grp_attr_t * dev_lck_grp_attr; -lck_grp_t * dev_lck_grp; -lck_attr_t * dev_lck_attr; -lck_mtx_t iokit_obj_to_port_binding_lock; - void device_service_create(void) { @@ -97,16 +92,6 @@ device_service_create(void) kernel_set_special_port(host_priv_self(), HOST_IO_MASTER_PORT, ipc_port_make_send(master_device_port)); - /* allocate device lock group attribute and group */ - dev_lck_grp_attr = lck_grp_attr_alloc_init(); - dev_lck_grp = lck_grp_alloc_init("device", dev_lck_grp_attr); - - /* Allocate device lock attribute */ - dev_lck_attr = lck_attr_alloc_init(); - - /* Initialize the IOKit object to port binding lock */ - lck_mtx_init(&iokit_obj_to_port_binding_lock, dev_lck_grp, dev_lck_attr); - #if 0 ds_init(); net_io_init(); diff --git a/osfmk/device/iokit_rpc.c b/osfmk/device/iokit_rpc.c index c4c0bce85..f71023158 100644 --- a/osfmk/device/iokit_rpc.c +++ b/osfmk/device/iokit_rpc.c @@ -25,7 +25,6 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#include #include #include #include @@ -71,6 +70,9 @@ #define EXTERN #define MIGEXTERN +LCK_GRP_DECLARE(dev_lck_grp, "device"); +LCK_MTX_DECLARE(iokit_obj_to_port_binding_lock, &dev_lck_grp); + /* * Lookup a device by its port. * Doesn't consume the naked send right; produces a device reference. @@ -185,8 +187,6 @@ iokit_release_port_send( ipc_port_t port ) ipc_port_release_send( port ); } -extern lck_mtx_t iokit_obj_to_port_binding_lock; - EXTERN void iokit_lock_port( __unused ipc_port_t port ) { diff --git a/osfmk/device/subrs.c b/osfmk/device/subrs.c index 56b25cb7d..e5e7564a7 100644 --- a/osfmk/device/subrs.c +++ b/osfmk/device/subrs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -137,6 +137,7 @@ #include #include #include +#include /* String routines, from CMU */ #ifdef strcpy @@ -306,6 +307,7 @@ strrchr(const char *s, int c) return __CAST_AWAY_QUALIFIER(found, const, char *); } +#if CONFIG_VSPRINTF /* * Abstract: * strcpy copies the contents of the string "from" including @@ -314,7 +316,6 @@ strrchr(const char *s, int c) * Deprecation Warning: * strcpy() is being deprecated. Please use strlcpy() instead. */ -#if !CONFIG_EMBEDDED char * strcpy( char *to, @@ -385,52 +386,6 @@ atoi(const char *cp) return number; } -/* - * convert an ASCII string (decimal radix) to an integer - * inputs: - * p string pointer. - * t char **, return a pointer to the cahr which terminates the - * numeric string. - * returns: - * integer value of the numeric string. - * side effect: - * pointer to terminating char. - */ - -int -atoi_term( - char *p, /* IN */ - char **t) /* OUT */ -{ - int n; - int f; - - n = 0; - f = 0; - for (;; p++) { - switch (*p) { - case ' ': - case '\t': - continue; - case '-': - f++; - case '+': - p++; - } - break; - } - while (*p >= '0' && *p <= '9') { - n = n * 10 + *p++ - '0'; - } - - /* return pointer to terminating character */ - if (t) { - *t = p; - } - - return f? -n: n; -} - /* * Does the same thing as strlen, except only looks up * to max chars inside the buffer. @@ -495,11 +450,11 @@ itoa( return str; } +#if CONFIG_VSPRINTF /* * Deprecation Warning: * strcat() is being deprecated. Please use strlcat() instead. */ -#if !CONFIG_EMBEDDED char * strcat( char *dest, @@ -652,8 +607,8 @@ strprefix(const char *s1, const char *s2) return 1; } -char * -strnstr(char *s, const char *find, size_t slen) +const char * +strnstr(const char *s, const char *find, size_t slen) { char c, sc; size_t len; @@ -685,6 +640,7 @@ char * __strncat_chk(char *restrict dst, const char *restrict src, size_t len, s char * __strcpy_chk(char *restrict dst, const char *restrict src, size_t chk_size); char * __strcat_chk(char *restrict dst, const char *restrict src, size_t chk_size); +MARK_AS_HIBERNATE_TEXT void * __memcpy_chk(void *dst, void const *src, size_t s, size_t chk_size) { @@ -703,6 +659,7 @@ __memmove_chk(void *dst, void const *src, size_t s, size_t chk_size) return memmove(dst, src, s); } +MARK_AS_HIBERNATE_TEXT void * __memset_chk(void *dst, int c, size_t s, size_t chk_size) { diff --git a/osfmk/gssd/Makefile b/osfmk/gssd/Makefile index 3f42fac82..508c2b52d 100644 --- a/osfmk/gssd/Makefile +++ b/osfmk/gssd/Makefile @@ -41,12 +41,12 @@ COMP_FILES = ${MIG_KUSRC} do_build_all:: $(COMP_FILES) ${MIG_KUSRC} : gssd_mach.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) - $(_v)${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ - -user gssd_mach.c \ - -header gssd_mach.h \ - -server /dev/null \ - -sheader /dev/null \ + @$(LOG_MIG) "$@" + $(_v)${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ + -user gssd_mach.c \ + -header gssd_mach.h \ + -server /dev/null \ + -sheader /dev/null \ $< include $(MakeInc_rule) diff --git a/osfmk/i386/AT386/model_dep.c b/osfmk/i386/AT386/model_dep.c index 267b6259e..1962ce5d0 100644 --- a/osfmk/i386/AT386/model_dep.c +++ b/osfmk/i386/AT386/model_dep.c @@ -79,6 +79,7 @@ #include #include #include +#include #include #include #include @@ -90,6 +91,7 @@ #include #include #include +#include #include #include #include @@ -156,12 +158,11 @@ typedef enum paniclog_flush_type { void paniclog_flush_internal(paniclog_flush_type_t variant); extern const char version[]; -extern char osversion[]; -extern int max_unsafe_quanta; +extern char osversion[]; extern int max_poll_quanta; extern unsigned int panic_is_inited; -extern int proc_pid(void *p); +extern int proc_pid(struct proc *); /* Definitions for frame pointers */ #define FP_ALIGNMENT_MASK ((uint32_t)(0x3)) @@ -185,7 +186,6 @@ typedef struct _cframe_t { #endif } cframe_t; -static unsigned panic_io_port; static unsigned commit_paniclog_to_nvram; boolean_t coprocessor_paniclog_flush = FALSE; @@ -193,16 +193,21 @@ struct kcdata_descriptor kc_panic_data; static boolean_t begun_panic_stackshot = FALSE; extern kern_return_t do_stackshot(void *); -extern void kdp_snapshot_preflight(int pid, void *tracebuf, - uint32_t tracebuf_size, uint32_t flags, +extern void kdp_snapshot_preflight(int pid, void * tracebuf, + uint32_t tracebuf_size, uint64_t flags, kcdata_descriptor_t data_p, - boolean_t enable_faulting); + uint64_t since_timestamp, uint32_t pagetable_mask); extern int kdp_stack_snapshot_bytes_traced(void); +extern int kdp_stack_snapshot_bytes_uncompressed(void); +extern void stackshot_memcpy(void *dst, const void *src, size_t len); vm_offset_t panic_stackshot_buf = 0; size_t panic_stackshot_buf_len = 0; size_t panic_stackshot_len = 0; + +boolean_t is_clock_configured = FALSE; + /* * Backtrace a single frame. */ @@ -311,25 +316,11 @@ machine_startup(void) hw_lock_init(&pbtlock); /* initialize print backtrace lock */ - if (PE_parse_boot_argn("preempt", &boot_arg, sizeof(boot_arg))) { - default_preemption_rate = boot_arg; - } - if (PE_parse_boot_argn("unsafe", &boot_arg, sizeof(boot_arg))) { - max_unsafe_quanta = boot_arg; - } - if (PE_parse_boot_argn("poll", &boot_arg, sizeof(boot_arg))) { - max_poll_quanta = boot_arg; - } if (PE_parse_boot_argn("yield", &boot_arg, sizeof(boot_arg))) { sched_poll_yield_shift = boot_arg; } -/* The I/O port to issue a read from, in the event of a panic. Useful for - * triggering logic analyzers. - */ - if (PE_parse_boot_argn("panic_io_port", &boot_arg, sizeof(boot_arg))) { - /*I/O ports range from 0 through 0xFFFF */ - panic_io_port = boot_arg & 0xffff; - } + + panic_notify_init(); machine_conf(); @@ -414,85 +405,6 @@ efi_set_tables_64(EFI_SYSTEM_TABLE_64 * system_table) } while (FALSE); } -static void -efi_set_tables_32(EFI_SYSTEM_TABLE_32 * system_table) -{ - EFI_RUNTIME_SERVICES_32 *runtime; - uint32_t hdr_cksum; - uint32_t cksum; - - DPRINTF("Processing 32-bit EFI tables at %p\n", system_table); - do { - DPRINTF("Header:\n"); - DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature); - DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision); - DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize); - DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32); - DPRINTF("RuntimeServices: 0x%08x\n", system_table->RuntimeServices); - if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) { - kprintf("Bad EFI system table signature\n"); - break; - } - // Verify signature of the system table - hdr_cksum = system_table->Hdr.CRC32; - system_table->Hdr.CRC32 = 0; - DPRINTF("System table at %p HeaderSize 0x%x\n", system_table, system_table->Hdr.HeaderSize); - cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize); - - DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum); - system_table->Hdr.CRC32 = hdr_cksum; - if (cksum != hdr_cksum) { - kprintf("Bad EFI system table checksum\n"); - break; - } - - gPEEFISystemTable = system_table; - - if (system_table->RuntimeServices == 0) { - kprintf("No runtime table present\n"); - break; - } - DPRINTF("RuntimeServices table at 0x%x\n", system_table->RuntimeServices); - // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel. - // For a 64-bit kernel, booter provides a virtual address mod 4G - runtime = (EFI_RUNTIME_SERVICES_32 *) - (system_table->RuntimeServices | VM_MIN_KERNEL_ADDRESS); - DPRINTF("Runtime table addressed at %p\n", runtime); - if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) { - kprintf("Bad EFI runtime table signature\n"); - break; - } - - // Verify signature of runtime services table - hdr_cksum = runtime->Hdr.CRC32; - runtime->Hdr.CRC32 = 0; - cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize); - - DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum); - runtime->Hdr.CRC32 = hdr_cksum; - if (cksum != hdr_cksum) { - kprintf("Bad EFI runtime table checksum\n"); - break; - } - - DPRINTF("Runtime functions\n"); - DPRINTF(" GetTime : 0x%x\n", runtime->GetTime); - DPRINTF(" SetTime : 0x%x\n", runtime->SetTime); - DPRINTF(" GetWakeupTime : 0x%x\n", runtime->GetWakeupTime); - DPRINTF(" SetWakeupTime : 0x%x\n", runtime->SetWakeupTime); - DPRINTF(" SetVirtualAddressMap : 0x%x\n", runtime->SetVirtualAddressMap); - DPRINTF(" ConvertPointer : 0x%x\n", runtime->ConvertPointer); - DPRINTF(" GetVariable : 0x%x\n", runtime->GetVariable); - DPRINTF(" GetNextVariableName : 0x%x\n", runtime->GetNextVariableName); - DPRINTF(" SetVariable : 0x%x\n", runtime->SetVariable); - DPRINTF(" GetNextHighMonotonicCount: 0x%x\n", runtime->GetNextHighMonotonicCount); - DPRINTF(" ResetSystem : 0x%x\n", runtime->ResetSystem); - - gPEEFIRuntimeServices = runtime; - } while (FALSE); -} - - /* Map in EFI runtime areas. */ static void efi_init(void) @@ -552,7 +464,7 @@ efi_init(void) if (args->efiMode == kBootArgsEfiMode64) { efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable)); } else { - efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable)); + panic("Unsupported 32-bit EFI system table!"); } } while (FALSE); @@ -645,7 +557,7 @@ hibernate_newruntime_map(void * map, vm_size_t map_size, uint32_t system_table_o if (args->efiMode == kBootArgsEfiMode64) { efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable)); } else { - efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable)); + panic("Unsupported 32-bit EFI system table!"); } } while (FALSE); @@ -689,6 +601,7 @@ machine_init(void) * Configure clock devices. */ clock_config(); + is_clock_configured = TRUE; #if CONFIG_MTRR /* @@ -738,19 +651,6 @@ halt_all_cpus(boolean_t reboot) } } - -/* Issue an I/O port read if one has been requested - this is an event logic - * analyzers can use as a trigger point. - */ - -void -panic_io_port_read(void) -{ - if (panic_io_port) { - (void)inb(panic_io_port); - } -} - /* For use with the MP rendezvous mechanism */ @@ -766,7 +666,7 @@ void RecordPanicStackshot() { int err = 0; - size_t bytes_traced = 0, bytes_used = 0, bytes_remaining = 0; + size_t bytes_traced = 0, bytes_uncompressed = 0, bytes_used = 0, bytes_remaining = 0; char *stackshot_begin_loc = NULL; /* Don't re-enter this code if we panic here */ @@ -810,7 +710,7 @@ RecordPanicStackshot() err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)stackshot_begin_loc, - KCDATA_BUFFER_BEGIN_STACKSHOT, (unsigned int) bytes_remaining, KCFLAG_USE_MEMCOPY); + KCDATA_BUFFER_BEGIN_COMPRESSED, (unsigned int) bytes_remaining, KCFLAG_USE_MEMCOPY); if (err != KERN_SUCCESS) { panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR; panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); @@ -818,9 +718,16 @@ RecordPanicStackshot() return; } - uint32_t stackshot_flags = (STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT | - STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | - STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO); + uint64_t stackshot_flags = (STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT | + STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | STACKSHOT_DO_COMPRESS | + STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_DISABLE_LATENCY_INFO | STACKSHOT_GET_DQ); + + err = kcdata_init_compress(&kc_panic_data, KCDATA_BUFFER_BEGIN_STACKSHOT, stackshot_memcpy, KCDCT_ZLIB); + if (err != KERN_SUCCESS) { + panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_COMPRESS; + stackshot_flags &= ~STACKSHOT_DO_COMPRESS; + } + #if DEVELOPMENT /* * Include the shared cache layout in panic stackshots on DEVELOPMENT kernels so that we can symbolicate @@ -829,10 +736,11 @@ RecordPanicStackshot() stackshot_flags |= STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT; #endif - kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc, (uint32_t) bytes_remaining, stackshot_flags, &kc_panic_data, 0); + kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc, (uint32_t) bytes_remaining, stackshot_flags, &kc_panic_data, 0, 0); err = do_stackshot(NULL); - bytes_traced = (int) kdp_stack_snapshot_bytes_traced(); - bytes_used = (int) kcdata_memory_get_used_bytes(&kc_panic_data); + bytes_traced = (size_t) kdp_stack_snapshot_bytes_traced(); + bytes_uncompressed = (size_t) kdp_stack_snapshot_bytes_uncompressed(); + bytes_used = (size_t) kcdata_memory_get_used_bytes(&kc_panic_data); if ((err != KERN_SUCCESS) && (bytes_used > 0)) { /* @@ -855,8 +763,8 @@ RecordPanicStackshot() return; } - stackshot_flags = (STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_KCDATA_FORMAT | STACKSHOT_FROM_PANIC | - STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY); + stackshot_flags = (STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_KCDATA_FORMAT | STACKSHOT_FROM_PANIC | STACKSHOT_DISABLE_LATENCY_INFO | + STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO | STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY | STACKSHOT_GET_DQ); #if DEVELOPMENT /* * Include the shared cache layout in panic stackshots on DEVELOPMENT kernels so that we can symbolicate @@ -865,10 +773,11 @@ RecordPanicStackshot() stackshot_flags |= STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT; #endif - kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc, (uint32_t) bytes_remaining, stackshot_flags, &kc_panic_data, 0); + kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc, (uint32_t) bytes_remaining, stackshot_flags, &kc_panic_data, 0, 0); err = do_stackshot(NULL); - bytes_traced = (int) kdp_stack_snapshot_bytes_traced(); - bytes_used = (int) kcdata_memory_get_used_bytes(&kc_panic_data); + bytes_traced = (size_t) kdp_stack_snapshot_bytes_traced(); + bytes_uncompressed = (size_t) kdp_stack_snapshot_bytes_uncompressed(); + bytes_used = (size_t) kcdata_memory_get_used_bytes(&kc_panic_data); } if (err == KERN_SUCCESS) { @@ -876,11 +785,22 @@ RecordPanicStackshot() debug_buf_ptr += bytes_traced; } panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_SUCCEEDED; - panic_info->mph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc); - panic_info->mph_stackshot_len = (uint32_t) bytes_traced; + + /* On other systems this is not in the debug buffer itself, it's in a separate buffer allocated at boot. */ + if (extended_debug_log_enabled) { + panic_info->mph_stackshot_offset = PE_get_offset_into_panic_region(stackshot_begin_loc); + panic_info->mph_stackshot_len = (uint32_t) bytes_traced; + } else { + panic_info->mph_stackshot_offset = panic_info->mph_stackshot_len = 0; + } panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); - kdb_printf("\n** In Memory Panic Stackshot Succeeded ** Bytes Traced %zu **\n", bytes_traced); + if (stackshot_flags & STACKSHOT_DO_COMPRESS) { + panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_DATA_COMPRESSED; + kdb_printf("\n** In Memory Panic Stackshot Succeeded ** Bytes Traced %zu (Uncompressed %zu) **\n", bytes_traced, bytes_uncompressed); + } else { + kdb_printf("\n** In Memory Panic Stackshot Succeeded ** Bytes Traced %zu **\n", bytes_traced); + } /* Used by the code that writes the buffer to disk */ panic_stackshot_buf = (vm_offset_t) stackshot_begin_loc; @@ -924,7 +844,7 @@ SavePanicInfo( * Issue an I/O port read if one has been requested - this is an event logic * analyzers can use as a trigger point. */ - panic_io_port_read(); + panic_notify(); /* Obtain frame pointer for stack to trace */ if (panic_options & DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE) { @@ -1193,6 +1113,112 @@ panic_print_macho_symbol_name(kernel_mach_header_t *mh, vm_address_t search, con return 0; } +static void +panic_display_uptime(void) +{ + uint64_t uptime; + absolutetime_to_nanoseconds(mach_absolute_time(), &uptime); + + paniclog_append_noflush("\nSystem uptime in nanoseconds: %llu\n", uptime); +} + +extern uint32_t gIOHibernateCount; + +static void +panic_display_hib_count(void) +{ + paniclog_append_noflush("Hibernation exit count: %u\n", gIOHibernateCount); +} + +extern AbsoluteTime gIOLastSleepAbsTime; +extern AbsoluteTime gIOLastWakeAbsTime; +extern uint64_t gAcpiLastSleepTscBase; +extern uint64_t gAcpiLastSleepNanoBase; +extern uint64_t gAcpiLastWakeTscBase; +extern uint64_t gAcpiLastWakeNanoBase; +extern boolean_t is_clock_configured; + +static void +panic_display_times(void) +{ + if (!is_clock_configured) { + paniclog_append_noflush("Warning: clock is not configured. Can't get time\n"); + return; + } + + paniclog_append_noflush("Last Sleep: absolute base_tsc base_nano\n"); + paniclog_append_noflush(" Uptime : 0x%016llx\n", mach_absolute_time()); + paniclog_append_noflush(" Sleep : 0x%016llx 0x%016llx 0x%016llx\n", gIOLastSleepAbsTime, gAcpiLastSleepTscBase, gAcpiLastSleepNanoBase); + paniclog_append_noflush(" Wake : 0x%016llx 0x%016llx 0x%016llx\n", gIOLastWakeAbsTime, gAcpiLastWakeTscBase, gAcpiLastWakeNanoBase); +} + +static void +panic_display_disk_errors(void) +{ + if (panic_disk_error_description[0]) { + panic_disk_error_description[panic_disk_error_description_size - 1] = '\0'; + paniclog_append_noflush("Root disk errors: \"%s\"\n", panic_disk_error_description); + } +} + +static void +panic_display_shutdown_status(void) +{ +#if defined(__i386__) || defined(__x86_64__) + paniclog_append_noflush("System shutdown begun: %s\n", IOPMRootDomainGetWillShutdown() ? "YES" : "NO"); + if (gIOPolledCoreFileMode == kIOPolledCoreFileModeNotInitialized) { + paniclog_append_noflush("Panic diags file unavailable, panic occurred prior to initialization\n"); + } else if (gIOPolledCoreFileMode != kIOPolledCoreFileModeDisabled) { + /* + * If we haven't marked the corefile as explicitly disabled, and we've made it past initialization, then we know the current + * system was configured to use disk based diagnostics at some point. + */ + paniclog_append_noflush("Panic diags file available: %s (0x%x)\n", (gIOPolledCoreFileMode != kIOPolledCoreFileModeClosed) ? "YES" : "NO", kdp_polled_corefile_error()); + } +#endif +} + +extern const char version[]; +extern char osversion[]; + +static volatile uint32_t config_displayed = 0; + +static void +panic_display_system_configuration(boolean_t launchd_exit) +{ + if (!launchd_exit) { + panic_display_process_name(); + } + if (OSCompareAndSwap(0, 1, &config_displayed)) { + char buf[256]; + if (!launchd_exit && strlcpy(buf, PE_boot_args(), sizeof(buf))) { + paniclog_append_noflush("Boot args: %s\n", buf); + } + paniclog_append_noflush("\nMac OS version:\n%s\n", + (osversion[0] != 0) ? osversion : "Not yet set"); + paniclog_append_noflush("\nKernel version:\n%s\n", version); + panic_display_kernel_uuid(); + if (!launchd_exit) { + panic_display_kernel_aslr(); + panic_display_hibb(); + panic_display_pal_info(); + } + panic_display_model_name(); + panic_display_disk_errors(); + panic_display_shutdown_status(); + if (!launchd_exit) { + panic_display_hib_count(); + panic_display_uptime(); + panic_display_times(); + panic_display_zprint(); +#if CONFIG_ZLEAKS + panic_display_ztrace(); +#endif /* CONFIG_ZLEAKS */ + kext_dump_panic_lists(&paniclog_append_noflush); + } + } +} + extern kmod_info_t * kmod; /* the list of modules */ static void @@ -1583,3 +1609,117 @@ print_launchd_info(void) ; } } + +/* + * Compares 2 EFI GUIDs. Returns true if they match. + */ +static bool +efi_compare_guids(EFI_GUID *guid1, EFI_GUID *guid2) +{ + return (bcmp(guid1, guid2, sizeof(EFI_GUID)) == 0) ? true : false; +} + +/* + * Converts from an efiboot-originated virtual address to a physical + * address. + */ +static inline uint64_t +efi_efiboot_virtual_to_physical(uint64_t addr) +{ + if (addr >= VM_MIN_KERNEL_ADDRESS) { + return addr & (0x40000000ULL - 1); + } else { + return addr; + } +} + +/* + * Convers from a efiboot-originated virtual address to an accessible + * pointer to that physical address by translating it to a physmap-relative + * address. + */ +static void * +efi_efiboot_virtual_to_physmap_virtual(uint64_t addr) +{ + return PHYSMAP_PTOV(efi_efiboot_virtual_to_physical(addr)); +} + +/* + * Returns the physical address of the firmware table identified + * by the passed-in GUID, or 0 if the table could not be located. + */ +static uint64_t +efi_get_cfgtbl_by_guid(EFI_GUID *guidp) +{ + EFI_CONFIGURATION_TABLE_64 *cfg_table_entp, *cfgTable; + boot_args *args = (boot_args *)PE_state.bootArgs; + EFI_SYSTEM_TABLE_64 *estp; + uint32_t i, hdr_cksum, cksum; + + estp = (EFI_SYSTEM_TABLE_64 *)efi_efiboot_virtual_to_physmap_virtual(args->efiSystemTable); + + assert(estp != 0); + + // Verify signature of the system table + hdr_cksum = estp->Hdr.CRC32; + estp->Hdr.CRC32 = 0; + cksum = crc32(0L, estp, estp->Hdr.HeaderSize); + estp->Hdr.CRC32 = hdr_cksum; + + if (cksum != hdr_cksum) { + DPRINTF("efi_get_cfgtbl_by_guid: EST CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum); + DPRINTF("Bad EFI system table checksum\n"); + return 0; + } + + /* + * efiboot can (and will) change the address of ConfigurationTable (and each table's VendorTable address) + * to a kernel-virtual address. Reverse that to get the physical address, which we then use to get a + * physmap-based virtual address. + */ + cfgTable = (EFI_CONFIGURATION_TABLE_64 *)efi_efiboot_virtual_to_physmap_virtual(estp->ConfigurationTable); + + for (i = 0; i < estp->NumberOfTableEntries; i++) { + cfg_table_entp = (EFI_CONFIGURATION_TABLE_64 *)&cfgTable[i]; + + DPRINTF("EST: Comparing GUIDs for entry %d\n", i); + if (cfg_table_entp == 0) { + continue; + } + + if (efi_compare_guids(&cfg_table_entp->VendorGuid, guidp) == true) { + DPRINTF("GUID match: returning %p\n", (void *)(uintptr_t)cfg_table_entp->VendorTable); + return efi_efiboot_virtual_to_physical(cfg_table_entp->VendorTable); + } + } + + /* Not found */ + return 0; +} + +/* + * Returns the physical address of the RSDP (either v1 or >=v2) or 0 + * if the RSDP could not be located. + */ +uint64_t +efi_get_rsdp_physaddr(void) +{ + uint64_t rsdp_addr; +#define ACPI_RSDP_GUID \ + { 0xeb9d2d30, 0x2d88, 0x11d3, {0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d} } +#define ACPI_20_RSDP_GUID \ + { 0x8868e871, 0xe4f1, 0x11d3, {0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81} } + + static EFI_GUID EFI_RSDP_GUID_ACPI20 = ACPI_20_RSDP_GUID; + static EFI_GUID EFI_RSDP_GUID_ACPI10 = ACPI_RSDP_GUID; + + if ((rsdp_addr = efi_get_cfgtbl_by_guid(&EFI_RSDP_GUID_ACPI20)) == 0) { + DPRINTF("RSDP ACPI 2.0 lookup failed. Trying RSDP ACPI 1.0...\n"); + rsdp_addr = efi_get_cfgtbl_by_guid(&EFI_RSDP_GUID_ACPI10); + if (rsdp_addr == 0) { + DPRINTF("RSDP ACPI 1.0 lookup failed also.\n"); + } + } + + return rsdp_addr; +} diff --git a/osfmk/i386/Diagnostics.c b/osfmk/i386/Diagnostics.c index 4f1360216..54836380a 100644 --- a/osfmk/i386/Diagnostics.c +++ b/osfmk/i386/Diagnostics.c @@ -236,6 +236,7 @@ diagCall64(x86_saved_state_t * state) switch (cpuid_cpufamily()) { case CPUFAMILY_INTEL_SKYLAKE: case CPUFAMILY_INTEL_KABYLAKE: + case CPUFAMILY_INTEL_ICELAKE: ia_perf_limits = MSR_IA32_IA_PERF_LIMIT_REASONS_SKL; break; default: diff --git a/osfmk/i386/Makefile b/osfmk/i386/Makefile index e46ad5748..06391748e 100644 --- a/osfmk/i386/Makefile +++ b/osfmk/i386/Makefile @@ -41,6 +41,7 @@ EXPORT_ONLY_FILES = \ seg.h \ simple_lock.h \ smp.h \ + trap.h \ tsc.h \ tss.h \ ucode.h \ diff --git a/osfmk/i386/acpi.c b/osfmk/i386/acpi.c index ecd26bfae..d4e14d511 100644 --- a/osfmk/i386/acpi.c +++ b/osfmk/i386/acpi.c @@ -55,6 +55,16 @@ #include +#define UINT64 uint64_t +#define UINT32 uint32_t +#define UINT16 uint16_t +#define UINT8 uint8_t +#define RSDP_VERSION_ACPI10 0 +#define RSDP_VERSION_ACPI20 2 +#include +#include +#include + #include #include #include @@ -82,6 +92,12 @@ extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message); extern void fpinit(void); +#if DEVELOPMENT || DEBUG +#define DBG(x...) kprintf(x) +#else +#define DBG(x...) +#endif + vm_offset_t acpi_install_wake_handler(void) { @@ -159,10 +175,6 @@ acpi_hibernate(void *refcon) #endif /* CONFIG_SLEEP */ extern void slave_pstart(void); -extern void hibernate_rebuild_vm_structs(void); - -extern unsigned int wake_nkdbufs; -extern unsigned int trace_wrap; void acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) @@ -196,7 +208,7 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) } /* shutdown local APIC before passing control to firmware */ - lapic_shutdown(); + lapic_shutdown(true); #if HIBERNATION data.func = func; @@ -310,7 +322,7 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) /* re-enable and re-init local apic (prior to starting timers) */ if (lapic_probe()) { - lapic_configure(); + lapic_configure(true); } #if KASAN @@ -321,21 +333,13 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) kasan_unpoison_curstack(true); #endif -#if HIBERNATION - hibernate_rebuild_vm_structs(); -#endif - elapsed += mach_absolute_time() - start; rtc_decrementer_configure(); kdebug_enable = save_kdebug_enable; if (kdebug_enable == 0) { - if (wake_nkdbufs) { - start = mach_absolute_time(); - kdebug_trace_start(wake_nkdbufs, NULL, trace_wrap != 0, TRUE); - elapsed_trace_start += mach_absolute_time() - start; - } + elapsed_trace_start += kdebug_wake(); } start = mach_absolute_time(); @@ -345,16 +349,6 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) IOCPURunPlatformActiveActions(); -#if HIBERNATION - if (did_hibernate) { - KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START); - hibernate_machine_init(); - KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END); - - current_cpu_datap()->cpu_hibernate = 0; - } -#endif /* HIBERNATION */ - KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, start, elapsed, elapsed_trace_start, acpi_wake_abstime); @@ -384,6 +378,27 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) #endif /* CONFIG_SLEEP */ } +void +ml_hibernate_active_pre(void) +{ +#if HIBERNATION + hibernate_rebuild_vm_structs(); +#endif /* HIBERNATION */ +} + +void +ml_hibernate_active_post(void) +{ +#if HIBERNATION + if (current_cpu_datap()->cpu_hibernate) { + KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START); + hibernate_machine_init(); + KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END); + current_cpu_datap()->cpu_hibernate = 0; + } +#endif /* HIBERNATION */ +} + /* * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel * to idle the boot processor in the deepest C-state for S0 sleep. All slave @@ -475,13 +490,13 @@ acpi_idle_kernel(acpi_sleep_callback func, void *refcon) MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END, acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0); +#if MONOTONIC + mt_cpu_up(cpu_datap(0)); +#endif /* MONOTONIC */ + /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */ if (kdebug_enable == 0) { - if (wake_nkdbufs) { - __kdebug_only uint64_t start = mach_absolute_time(); - kdebug_trace_start(wake_nkdbufs, NULL, trace_wrap != 0, TRUE); - KDBG(IOKDBG_CODE(DBG_HIBERNATE, 15), start); - } + kdebug_wake(); } IOCPURunPlatformActiveActions(); @@ -526,3 +541,201 @@ ml_recent_wake(void) assert(ctime > acpi_wake_postrebase_abstime); return (ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC; } + +static uint8_t +cksum8(uint8_t *ptr, uint32_t size) +{ + uint8_t sum = 0; + uint32_t i; + + for (i = 0; i < size; i++) { + sum += ptr[i]; + } + + return sum; +} + +/* + * Parameterized search for a specified table given an sdtp (either RSDT or XSDT). + * Note that efiboot does not modify the addresses of tables in the RSDT or XSDT + * TableOffsetEntry array, so we do not need to "convert" from efiboot virtual to + * physical. + */ +#define SEARCH_FOR_ACPI_TABLE(sdtp, signature, entry_type) \ +{ \ + uint32_t i, pointer_count; \ + \ + /* Walk the list of tables in the *SDT, looking for the signature passed in */ \ + pointer_count = ((sdtp)->Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(entry_type); \ + \ + for (i = 0; i < pointer_count; i++) { \ + ACPI_TABLE_HEADER *next_table = \ + (ACPI_TABLE_HEADER *)PHYSMAP_PTOV( \ + (uintptr_t)(sdtp)->TableOffsetEntry[i]); \ + if (strncmp(&next_table->Signature[0], (signature), 4) == 0) { \ + /* \ + * Checksum the table first, then return it if the checksum \ + * is valid. \ + */ \ + if (cksum8((uint8_t *)next_table, next_table->Length) == 0) { \ + return next_table; \ + } else { \ + DBG("Invalid checksum for table [%s]@0x%lx!\n", (signature), \ + (unsigned long)(sdtp)->TableOffsetEntry[i]); \ + return NULL; \ + } \ + } \ + } \ + \ + return NULL; \ +} + +static ACPI_TABLE_HEADER * +acpi_find_table_via_xsdt(XSDT_DESCRIPTOR *xsdtp, const char *signature) +{ + SEARCH_FOR_ACPI_TABLE(xsdtp, signature, UINT64); +} + +static ACPI_TABLE_HEADER * +acpi_find_table_via_rsdt(RSDT_DESCRIPTOR *rsdtp, const char *signature) +{ + SEARCH_FOR_ACPI_TABLE(rsdtp, signature, UINT32); +} + +/* + * Returns a pointer to an ACPI table header corresponding to the table + * whose signature is passed in, or NULL if no such table could be found. + */ +static ACPI_TABLE_HEADER * +acpi_find_table(uintptr_t rsdp_physaddr, const char *signature) +{ + static RSDP_DESCRIPTOR *rsdp = NULL; + static XSDT_DESCRIPTOR *xsdtp = NULL; + static RSDT_DESCRIPTOR *rsdtp = NULL; + + if (signature == NULL) { + DBG("Invalid NULL signature passed to acpi_find_table\n"); + return NULL; + } + + /* + * RSDT or XSDT is required; without it, we cannot locate other tables. + */ + if (__improbable(rsdp == NULL || (rsdtp == NULL && xsdtp == NULL))) { + rsdp = PHYSMAP_PTOV(rsdp_physaddr); + + /* Verify RSDP signature */ + if (__improbable(strncmp((void *)rsdp, "RSD PTR ", 8) != 0)) { + DBG("RSDP signature mismatch: Aborting acpi_find_table\n"); + rsdp = NULL; + return NULL; + } + + /* Verify RSDP checksum */ + if (__improbable(cksum8((uint8_t *)rsdp, sizeof(RSDP_DESCRIPTOR)) != 0)) { + DBG("RSDP@0x%lx signature mismatch: Aborting acpi_find_table\n", + (unsigned long)rsdp_physaddr); + rsdp = NULL; + return NULL; + } + + /* Ensure the revision of the RSDP indicates the presence of an RSDT or XSDT */ + if (__improbable(rsdp->Revision >= RSDP_VERSION_ACPI20 && rsdp->XsdtPhysicalAddress == 0ULL)) { + DBG("RSDP XSDT Physical Address is 0!: Aborting acpi_find_table\n"); + rsdp = NULL; + return NULL; + } else if (__probable(rsdp->Revision >= RSDP_VERSION_ACPI20)) { + /* XSDT (with 64-bit pointers to tables) */ + rsdtp = NULL; + xsdtp = PHYSMAP_PTOV(rsdp->XsdtPhysicalAddress); + if (cksum8((uint8_t *)xsdtp, xsdtp->Length) != 0) { + DBG("ERROR: XSDT@0x%lx checksum is non-zero; not using this XSDT\n", + (unsigned long)rsdp->XsdtPhysicalAddress); + xsdtp = NULL; + return NULL; + } + } else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10 && rsdp->RsdtPhysicalAddress == 0)) { + DBG("RSDP RSDT Physical Address is 0!: Aborting acpi_find_table\n"); + rsdp = NULL; + return NULL; + } else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10)) { + /* RSDT (with 32-bit pointers to tables) */ + xsdtp = NULL; + rsdtp = PHYSMAP_PTOV((uintptr_t)rsdp->RsdtPhysicalAddress); + if (cksum8((uint8_t *)rsdtp, rsdtp->Length) != 0) { + DBG("ERROR: RSDT@0x%lx checksum is non-zero; not using this RSDT\n", + (unsigned long)rsdp->RsdtPhysicalAddress); + rsdtp = NULL; + return NULL; + } + } else { + DBG("Unrecognized RSDP Revision (0x%x): Aborting acpi_find_table\n", + rsdp->Revision); + rsdp = NULL; + return NULL; + } + } + + assert(xsdtp != NULL || rsdtp != NULL); + + if (__probable(xsdtp != NULL)) { + return acpi_find_table_via_xsdt(xsdtp, signature); + } else if (rsdtp != NULL) { + return acpi_find_table_via_rsdt(rsdtp, signature); + } + + return NULL; +} + +/* + * Returns the count of enabled logical processors present in the ACPI + * MADT, or 0 if the MADT could not be located. + */ +uint32_t +acpi_count_enabled_logical_processors(void) +{ + MULTIPLE_APIC_TABLE *madtp; + void *end_ptr; + APIC_HEADER *next_apic_entryp; + uint32_t enabled_cpu_count = 0; + uint64_t rsdp_physaddr; + + rsdp_physaddr = efi_get_rsdp_physaddr(); + if (__improbable(rsdp_physaddr == 0)) { + DBG("acpi_count_enabled_logical_processors: Could not get RSDP physaddr from EFI.\n"); + return 0; + } + + madtp = (MULTIPLE_APIC_TABLE *)acpi_find_table(rsdp_physaddr, ACPI_SIG_MADT); + + if (__improbable(madtp == NULL)) { + DBG("acpi_count_enabled_logical_processors: Could not find the MADT.\n"); + return 0; + } + + end_ptr = (void *)((uintptr_t)madtp + madtp->Length); + next_apic_entryp = (APIC_HEADER *)((uintptr_t)madtp + sizeof(MULTIPLE_APIC_TABLE)); + + while ((void *)next_apic_entryp < end_ptr) { + switch (next_apic_entryp->Type) { + case APIC_PROCESSOR: + { + MADT_PROCESSOR_APIC *madt_procp = (MADT_PROCESSOR_APIC *)next_apic_entryp; + if (madt_procp->ProcessorEnabled) { + enabled_cpu_count++; + } + + break; + } + + default: + DBG("Ignoring MADT entry type 0x%x length 0x%x\n", next_apic_entryp->Type, + next_apic_entryp->Length); + break; + } + + next_apic_entryp = (APIC_HEADER *)((uintptr_t)next_apic_entryp + next_apic_entryp->Length); + } + + return enabled_cpu_count; +} diff --git a/osfmk/i386/acpi.h b/osfmk/i386/acpi.h index 5d911b089..77fa3a959 100644 --- a/osfmk/i386/acpi.h +++ b/osfmk/i386/acpi.h @@ -48,6 +48,7 @@ extern vm_offset_t acpi_install_wake_handler(void); extern void acpi_sleep_kernel(acpi_sleep_callback func, void * refcon); extern void acpi_idle_kernel(acpi_sleep_callback func, void * refcon); void install_real_mode_bootstrap(void *prot_entry); +extern uint32_t acpi_count_enabled_logical_processors(void); #endif /* ASSEMBLER */ #endif /* !_I386_ACPI_H_ */ diff --git a/osfmk/i386/asm.h b/osfmk/i386/asm.h index 50905a62e..17b28bc99 100644 --- a/osfmk/i386/asm.h +++ b/osfmk/i386/asm.h @@ -91,9 +91,9 @@ #endif /* There is another definition of ALIGN for .c sources */ -#ifdef ASSEMBLER +#ifdef __ASSEMBLER__ #define ALIGN 4,0x90 -#endif /* ASSEMBLER */ +#endif /* __ASSEMBLER__ */ #ifndef FALIGN #define FALIGN ALIGN @@ -265,7 +265,7 @@ #define Lgmemload(lab,reg) movl Lgotoff(lab),reg #define Lgmemstore(reg,lab,tmp) movl reg,Lgotoff(lab) -#ifndef ASSEMBLER +#ifndef __ASSEMBLER__ /* These defines are here for .c files that wish to reference global symbols * within __asm__ statements. */ @@ -274,7 +274,7 @@ #else #define CC_SYM_PREFIX "" #endif /* __NO_UNDERSCORES__ */ -#endif /* ASSEMBLER */ +#endif /* __ASSEMBLER__ */ /* * The following macros make calls into C code. diff --git a/osfmk/i386/bsd_i386.c b/osfmk/i386/bsd_i386.c index ee93e5b10..7c01567ea 100644 --- a/osfmk/i386/bsd_i386.c +++ b/osfmk/i386/bsd_i386.c @@ -73,6 +73,10 @@ extern void mach_kauth_cred_uthread_update(void); extern void throttle_lowpri_io(int); #endif +#if CONFIG_MACF +#include +#endif + void * find_user_regs(thread_t); unsigned int get_msr_exportmask(void); @@ -552,8 +556,30 @@ mach_call_munger(x86_saved_state_t *state) MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, args.arg1, args.arg2, args.arg3, args.arg4, 0); +#if CONFIG_MACF + /* Check mach trap filter mask, if exists. */ + task_t task = current_task(); + uint8_t *filter_mask = task->mach_trap_filter_mask; + + if (__improbable(filter_mask != NULL && + !bitstr_test(filter_mask, call_number))) { + /* Not in filter mask, evaluate policy. */ + if (mac_task_mach_trap_evaluate != NULL) { + retval = mac_task_mach_trap_evaluate(get_bsdtask_info(task), + call_number); + if (retval) { + goto skip_machcall; + } + } + } +#endif /* CONFIG_MACF */ + retval = mach_call(&args); +#if CONFIG_MACF +skip_machcall: +#endif + DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, @@ -653,8 +679,30 @@ mach_call_munger64(x86_saved_state_t *state) mach_kauth_cred_uthread_update(); #endif +#if CONFIG_MACF + /* Check syscall filter mask, if exists. */ + task_t task = current_task(); + uint8_t *filter_mask = task->mach_trap_filter_mask; + + if (__improbable(filter_mask != NULL && + !bitstr_test(filter_mask, call_number))) { + /* Not in filter mask, evaluate policy. */ + if (mac_task_mach_trap_evaluate != NULL) { + regs->rax = mac_task_mach_trap_evaluate(get_bsdtask_info(task), + call_number); + if (regs->rax) { + goto skip_machcall; + } + } + } +#endif /* CONFIG_MACF */ + regs->rax = (uint64_t)mach_call((void *)&args); +#if CONFIG_MACF +skip_machcall: +#endif + DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, @@ -713,7 +761,7 @@ thread_setuserstack( * Returns the adjusted user stack pointer from the machine * dependent thread state info. Used for small (<2G) deltas. */ -uint64_t +user_addr_t thread_adjuserstack( thread_t thread, int adjust) diff --git a/osfmk/i386/commpage/commpage.c b/osfmk/i386/commpage/commpage.c index 2c4a40d83..001fe16af 100644 --- a/osfmk/i386/commpage/commpage.c +++ b/osfmk/i386/commpage/commpage.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003-2019 Apple Inc. All rights reserved. + * Copyright (c) 2003-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -247,9 +247,9 @@ commpage_specific_addr_of(char *commPageBase, commpage_address_t addr_at_runtime static int commpage_cpus( void ) { - int cpus; + unsigned int cpus; - cpus = ml_get_max_cpus(); // NB: this call can block + cpus = ml_wait_max_cpus(); // NB: this call can block if (cpus == 0) { panic("commpage cpus==0"); @@ -276,27 +276,28 @@ commpage_init_cpu_capabilities( void ) switch (cpu_info.vector_unit) { case 9: bits |= kHasAVX1_0; - /* fall thru */ + OS_FALLTHROUGH; case 8: bits |= kHasSSE4_2; - /* fall thru */ + OS_FALLTHROUGH; case 7: bits |= kHasSSE4_1; - /* fall thru */ + OS_FALLTHROUGH; case 6: bits |= kHasSupplementalSSE3; - /* fall thru */ + OS_FALLTHROUGH; case 5: bits |= kHasSSE3; - /* fall thru */ + OS_FALLTHROUGH; case 4: bits |= kHasSSE2; - /* fall thru */ + OS_FALLTHROUGH; case 3: bits |= kHasSSE; - /* fall thru */ + OS_FALLTHROUGH; case 2: bits |= kHasMMX; + OS_FALLTHROUGH; default: break; } @@ -509,7 +510,6 @@ commpage_populate_one( { uint8_t c1; uint16_t c2; - int c4; uint64_t c8; uint32_t cfamily; short version = _COMM_PAGE_THIS_VERSION; @@ -540,10 +540,7 @@ commpage_populate_one( } commpage_stuff(_COMM_PAGE_CACHE_LINESIZE, &c2, 2); - c4 = MP_SPIN_TRIES; - commpage_stuff(_COMM_PAGE_SPIN_COUNT, &c4, 4); - - /* machine_info valid after ml_get_max_cpus() */ + /* machine_info valid after ml_wait_max_cpus() */ c1 = machine_info.physical_cpu_max; commpage_stuff(_COMM_PAGE_PHYSICAL_CPUS, &c1, 1); c1 = machine_info.logical_cpu_max; @@ -554,6 +551,9 @@ commpage_populate_one( cfamily = cpuid_info()->cpuid_cpufamily; commpage_stuff(_COMM_PAGE_CPUFAMILY, &cfamily, 4); + c1 = PAGE_SHIFT; + commpage_stuff(_COMM_PAGE_KERNEL_PAGE_SHIFT, &c1, 1); + commpage_stuff(_COMM_PAGE_USER_PAGE_SHIFT_64, &c1, 1); if (next > _COMM_PAGE_END) { panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%p", next, commPagePtr); @@ -785,35 +785,6 @@ commpage_set_memory_pressure( } } - -/* Update _COMM_PAGE_SPIN_COUNT. We might want to reduce when running on a battery, etc. */ - -void -commpage_set_spin_count( - unsigned int count ) -{ - char *cp; - uint32_t *ip; - - if (count == 0) { /* we test for 0 after decrement, not before */ - count = 1; - } - - cp = commPagePtr32; - if (cp) { - cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_BASE_ADDRESS); - ip = (uint32_t*) (void *) cp; - *ip = (uint32_t) count; - } - - cp = commPagePtr64; - if (cp) { - cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_START_ADDRESS); - ip = (uint32_t*) (void *) cp; - *ip = (uint32_t) count; - } -} - /* Updated every time a logical CPU goes offline/online */ void commpage_update_active_cpus(void) @@ -930,13 +901,13 @@ commpage_update_dyld_flags(uint64_t value) cp = commPagePtr32; if (cp) { - cp += (_COMM_PAGE_DYLD_SYSTEM_FLAGS - _COMM_PAGE32_BASE_ADDRESS); + cp += (_COMM_PAGE_DYLD_FLAGS - _COMM_PAGE32_BASE_ADDRESS); *(uint64_t *)cp = value; } cp = commPagePtr64; if (cp) { - cp += (_COMM_PAGE_DYLD_SYSTEM_FLAGS - _COMM_PAGE32_BASE_ADDRESS); + cp += (_COMM_PAGE_DYLD_FLAGS - _COMM_PAGE32_BASE_ADDRESS); *(uint64_t *)cp = value; } } diff --git a/osfmk/i386/commpage/commpage.h b/osfmk/i386/commpage/commpage.h index 2bf2a41f9..6b573dea0 100644 --- a/osfmk/i386/commpage/commpage.h +++ b/osfmk/i386/commpage/commpage.h @@ -36,17 +36,6 @@ #include #endif /* __ASSEMBLER__ */ -/* When trying to acquire a spinlock or mutex, we will spin in - * user mode for awhile, before entering the kernel to relinquish. - * MP_SPIN_TRIES is the initial value of _COMM_PAGE_SPIN_COUNT. - * The idea is that _COMM_PAGE_SPIN_COUNT will be adjusted up or - * down as the machine is plugged in/out, etc. - * At present spinlocks do not use _COMM_PAGE_SPIN_COUNT. - * They use MP_SPIN_TRIES directly. - */ -#define MP_SPIN_TRIES 1000 - - /* The following macro is used to generate the 64-bit commpage address for a given * routine, based on its 32-bit address. This is used in the kernel to compile * the 64-bit commpage. Since the kernel can be a 32-bit object, cpu_capabilities.h diff --git a/osfmk/i386/commpage/commpage_asm.s b/osfmk/i386/commpage/commpage_asm.s index 1a695f0bd..47c81d1b9 100644 --- a/osfmk/i386/commpage/commpage_asm.s +++ b/osfmk/i386/commpage/commpage_asm.s @@ -31,34 +31,6 @@ #include #include -/* - * extern void commpage_sched_gen_inc(void); - */ - .text - - .globl _commpage_sched_gen_inc -_commpage_sched_gen_inc: - FRAME - - /* Increment 32-bit commpage field if present */ - movq _commPagePtr32(%rip),%rdx - testq %rdx,%rdx - je 1f - subq $(ASM_COMM_PAGE32_BASE_ADDRESS),%rdx - lock - incl ASM_COMM_PAGE_SCHED_GEN(%rdx) - - /* Increment 64-bit commpage field if present */ - movq _commPagePtr64(%rip),%rdx - testq %rdx,%rdx - je 1f - subq $(ASM_COMM_PAGE32_START_ADDRESS),%rdx - lock - incl ASM_COMM_PAGE_SCHED_GEN(%rdx) -1: - EMARF - ret - /* pointers to the 32-bit commpage routine descriptors */ /* WARNING: these must be sorted by commpage address! */ .const_data diff --git a/osfmk/i386/cpu.c b/osfmk/i386/cpu.c index bad6b1016..2b5ef1584 100644 --- a/osfmk/i386/cpu.c +++ b/osfmk/i386/cpu.c @@ -31,7 +31,6 @@ * cpu specific routines */ -#include #include #include #include @@ -196,33 +195,6 @@ cpu_machine_init( #endif } -processor_t -cpu_processor_alloc(boolean_t is_boot_cpu) -{ - int ret; - processor_t proc; - - if (is_boot_cpu) { - return &processor_master; - } - - ret = kmem_alloc(kernel_map, (vm_offset_t *) &proc, sizeof(*proc), VM_KERN_MEMORY_OSFMK); - if (ret != KERN_SUCCESS) { - return NULL; - } - - bzero((void *) proc, sizeof(*proc)); - return proc; -} - -void -cpu_processor_free(processor_t proc) -{ - if (proc != NULL && proc != &processor_master) { - kfree(proc, sizeof(*proc)); - } -} - processor_t current_processor(void) { diff --git a/osfmk/i386/cpu_capabilities.h b/osfmk/i386/cpu_capabilities.h index 89f8fc52d..c0b62d37b 100644 --- a/osfmk/i386/cpu_capabilities.h +++ b/osfmk/i386/cpu_capabilities.h @@ -86,6 +86,7 @@ #define kHasAVX512BITALG 0x0001000000000000ULL #define kHasAVX512VPOPCNTDQ 0x0002000000000000ULL +#define kIsTranslated 0x4000000000000000ULL #ifndef __ASSEMBLER__ #include @@ -182,16 +183,16 @@ _NumCPUs( void ) #define _COMM_PAGE_CPU_CAPABILITIES64 (_COMM_PAGE_START_ADDRESS+0x010) /* uint64_t _cpu_capabilities */ #define _COMM_PAGE_UNUSED (_COMM_PAGE_START_ADDRESS+0x018) /* 6 unused bytes */ #define _COMM_PAGE_VERSION (_COMM_PAGE_START_ADDRESS+0x01E) /* 16-bit version# */ -#define _COMM_PAGE_THIS_VERSION 13 /* in ver 13, _COMM_PAGE_NT_SHIFT defaults to 0 (was 32) */ +#define _COMM_PAGE_THIS_VERSION 14 #define _COMM_PAGE_CPU_CAPABILITIES (_COMM_PAGE_START_ADDRESS+0x020) /* uint32_t _cpu_capabilities (retained for compatibility) */ #define _COMM_PAGE_NCPUS (_COMM_PAGE_START_ADDRESS+0x022) /* uint8_t number of configured CPUs (hw.logicalcpu at boot time) */ #define _COMM_PAGE_UNUSED0 (_COMM_PAGE_START_ADDRESS+0x024) /* 2 unused bytes, previouly reserved for expansion of cpu_capabilities */ #define _COMM_PAGE_CACHE_LINESIZE (_COMM_PAGE_START_ADDRESS+0x026) /* uint16_t cache line size */ -#define _COMM_PAGE_SCHED_GEN (_COMM_PAGE_START_ADDRESS+0x028) /* uint32_t scheduler generation number (count of pre-emptions) */ +#define _COMM_PAGE_UNUSED4 (_COMM_PAGE_START_ADDRESS+0x028) /* used to be _COMM_PAGE_SCHED_GEN: uint32_t scheduler generation number (count of pre-emptions) */ #define _COMM_PAGE_MEMORY_PRESSURE (_COMM_PAGE_START_ADDRESS+0x02c) /* uint32_t copy of vm_memory_pressure */ -#define _COMM_PAGE_SPIN_COUNT (_COMM_PAGE_START_ADDRESS+0x030) /* uint32_t max spin count for mutex's */ +#define _COMM_PAGE_UNUSED3 (_COMM_PAGE_START_ADDRESS+0x030) /* used to be _COMM_PAGE_SPIN_COUNT: uint32_t max spin count for mutex's */ #define _COMM_PAGE_ACTIVE_CPUS (_COMM_PAGE_START_ADDRESS+0x034) /* uint8_t number of active CPUs (hw.activecpu) */ #define _COMM_PAGE_PHYSICAL_CPUS (_COMM_PAGE_START_ADDRESS+0x035) /* uint8_t number of physical CPUs (hw.physicalcpu_max) */ @@ -204,7 +205,9 @@ _NumCPUs( void ) #define _COMM_PAGE_ATM_DIAGNOSTIC_CONFIG (_COMM_PAGE_START_ADDRESS+0x48) /* uint32_t export "atm_diagnostic_config" to userspace */ #define _COMM_PAGE_DTRACE_DOF_ENABLED (_COMM_PAGE_START_ADDRESS+0x04C) /* uint8_t 0 if userspace DOF disable, 1 if enabled */ -#define _COMM_PAGE_UNUSED2 (_COMM_PAGE_START_ADDRESS+0x04D) /* [0x4D,0x50) unused */ +#define _COMM_PAGE_KERNEL_PAGE_SHIFT (_COMM_PAGE_START_ADDRESS+0x04D) /* uint8_t kernel vm page shift. COMM_PAGE_VERSION >= 14 */ +#define _COMM_PAGE_USER_PAGE_SHIFT_64 (_COMM_PAGE_START_ADDRESS+0x04E) /* uint8_t user vm page shift. COMM_PAGE_VERSION >= 14 */ +#define _COMM_PAGE_UNUSED2 (_COMM_PAGE_START_ADDRESS+0x04F) /* 0x4F unused */ #define _COMM_PAGE_TIME_DATA_START (_COMM_PAGE_START_ADDRESS+0x050) /* base of offsets below (_NT_SCALE etc) */ #define _COMM_PAGE_NT_TSC_BASE (_COMM_PAGE_START_ADDRESS+0x050) /* used by nanotime() */ @@ -226,7 +229,7 @@ _NumCPUs( void ) #define _COMM_PAGE_NEWTIMEOFDAY_DATA (_COMM_PAGE_START_ADDRESS+0x0D0) /* used by gettimeofday(). Currently, sizeof(new_commpage_timeofday_data_t) = 40 */ /* Resume packed values to the next cacheline */ -#define _COMM_PAGE_DYLD_SYSTEM_FLAGS (_COMM_PAGE_START_ADDRESS+0x100) /* uint64_t export kern.dyld_system_flags to userspace */ +#define _COMM_PAGE_DYLD_FLAGS (_COMM_PAGE_START_ADDRESS+0x100) /* uint64_t export kern.dyld_system_flags to userspace */ #define _COMM_PAGE_END (_COMM_PAGE_START_ADDRESS+0xfff) /* end of common page */ @@ -287,6 +290,12 @@ _NumCPUs( void ) #define _COMM_PAGE_COMPARE_AND_SWAP32B (_COMM_PAGE_START_ADDRESS+0xf80) /* compare-and-swap word w barrier */ #define _COMM_PAGE_COMPARE_AND_SWAP64B (_COMM_PAGE_START_ADDRESS+0xfc0) /* compare-and-swap doubleword w barrier */ +/* + * _COMM_PAGE_USER_PAGE_SHIFT32 and _COMM_PAGE_USER_PAGE_SHIFT64 are the same on x86. + * But both defined to maintain compatability with the arm commpage. + */ +#define _COMM_PAGE_USER_PAGE_SHIFT_32 _COMM_PAGE_USER_PAGE_SHIFT_64 + #ifdef __ASSEMBLER__ #ifdef __COMM_PAGE_SYMBOLS diff --git a/osfmk/i386/cpu_data.h b/osfmk/i386/cpu_data.h index 9f0e07fe8..10d235517 100644 --- a/osfmk/i386/cpu_data.h +++ b/osfmk/i386/cpu_data.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -199,6 +199,7 @@ typedef struct cpu_data { struct pal_cpu_data cpu_pal_data; /* PAL-specific data */ #define cpu_pd cpu_pal_data /* convenience alias */ struct cpu_data *cpu_this; /* pointer to myself */ + vm_offset_t cpu_pcpu_base; thread_t cpu_active_thread; thread_t cpu_nthread; int cpu_number; /* Logical CPU */ @@ -224,7 +225,6 @@ typedef struct cpu_data { boolean_t cpu_fixed_pmcs_enabled; #endif /* !MONOTONIC */ rtclock_timer_t rtclock_timer; - uint64_t quantum_timer_deadline; volatile addr64_t cpu_active_cr3 __attribute((aligned(64))); union { volatile uint32_t cpu_tlb_invalid; @@ -262,20 +262,10 @@ typedef struct cpu_data { uint16_t cpu_tlb_gen_counts_global[MAX_CPUS]; struct processor *cpu_processor; -#if NCOPY_WINDOWS > 0 - struct cpu_pmap *cpu_pmap; -#endif struct real_descriptor *cpu_ldtp; struct cpu_desc_table *cpu_desc_tablep; cpu_desc_index_t cpu_desc_index; int cpu_ldt; -#if NCOPY_WINDOWS > 0 - vm_offset_t cpu_copywindow_base; - uint64_t *cpu_copywindow_pdp; - - vm_offset_t cpu_physwindow_base; - uint64_t *cpu_physwindow_ptep; -#endif #define HWINTCNT_SIZE 256 uint32_t cpu_hwIntCnt[HWINTCNT_SIZE]; /* Interrupt counts */ @@ -313,6 +303,12 @@ typedef struct cpu_data { uint64_t cpu_rtime_total; uint64_t cpu_ixtime; uint64_t cpu_idle_exits; + /* + * Note that the cacheline-copy mechanism uses the cpu_rtimes field in the shadow CPU + * structures to temporarily stash the code cacheline that includes the instruction + * pointer at the time of the fault (this field is otherwise unused in the shadow + * CPU structures). + */ uint64_t cpu_rtimes[CPU_RTIME_BINS]; uint64_t cpu_itimes[CPU_ITIME_BINS]; #if !MONOTONIC @@ -362,76 +358,31 @@ typedef struct cpu_data { uint64_t cpu_pcid_last_cr3; #endif boolean_t cpu_rendezvous_in_progress; +#if CST_DEMOTION_DEBUG + /* Count of thread wakeups issued by this processor */ + uint64_t cpu_wakeups_issued_total; +#endif +#if DEBUG || DEVELOPMENT + uint64_t tsc_sync_delta; +#endif } cpu_data_t; extern cpu_data_t *cpu_data_ptr[]; -/* Macro to generate inline bodies to retrieve per-cpu data fields. */ -#if defined(__clang__) -#define GS_RELATIVE volatile __attribute__((address_space(256))) -#ifndef offsetof -#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE,MEMBER) +/* + * __SEG_GS marks %gs-relative operations: + * https://clang.llvm.org/docs/LanguageExtensions.html#memory-references-to-specified-segments + * https://gcc.gnu.org/onlinedocs/gcc/Named-Address-Spaces.html#x86-Named-Address-Spaces + */ +#if defined(__SEG_GS) +// __seg_gs exists +#elif defined(__clang__) +#define __seg_gs __attribute__((address_space(256))) +#else +#error use a compiler that supports address spaces or __seg_gs #endif -#define CPU_DATA_GET(member, type) \ - cpu_data_t GS_RELATIVE *cpu_data = \ - (cpu_data_t GS_RELATIVE *)0UL; \ - type ret; \ - ret = cpu_data->member; \ - return ret; - -#define CPU_DATA_GET_INDEX(member, index, type) \ - cpu_data_t GS_RELATIVE *cpu_data = \ - (cpu_data_t GS_RELATIVE *)0UL; \ - type ret; \ - ret = cpu_data->member[index]; \ - return ret; - -#define CPU_DATA_SET(member, value) \ - cpu_data_t GS_RELATIVE *cpu_data = \ - (cpu_data_t GS_RELATIVE *)0UL; \ - cpu_data->member = value; - -#define CPU_DATA_XCHG(member, value, type) \ - cpu_data_t GS_RELATIVE *cpu_data = \ - (cpu_data_t GS_RELATIVE *)0UL; \ - type ret; \ - ret = cpu_data->member; \ - cpu_data->member = value; \ - return ret; - -#else /* !defined(__clang__) */ - -#ifndef offsetof -#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) -#endif /* offsetof */ -#define CPU_DATA_GET(member, type) \ - type ret; \ - __asm__ volatile ("mov %%gs:%P1,%0" \ - : "=r" (ret) \ - : "i" (offsetof(cpu_data_t,member))); \ - return ret; - -#define CPU_DATA_GET_INDEX(member, index, type) \ - type ret; \ - __asm__ volatile ("mov %%gs:(%1),%0" \ - : "=r" (ret) \ - : "r" (offsetof(cpu_data_t,member[index]))); \ - return ret; - -#define CPU_DATA_SET(member, value) \ - __asm__ volatile ("mov %0,%%gs:%P1" \ - : \ - : "r" (value), "i" (offsetof(cpu_data_t,member))); - -#define CPU_DATA_XCHG(member, value, type) \ - type ret; \ - __asm__ volatile ("xchg %0,%%gs:%P1" \ - : "=r" (ret) \ - : "i" (offsetof(cpu_data_t,member)), "0" (value)); \ - return ret; - -#endif /* !defined(__clang__) */ +#define CPU_DATA() ((cpu_data_t __seg_gs *)0UL) /* * Everyone within the osfmk part of the kernel can use the fast @@ -458,13 +409,13 @@ extern cpu_data_t *cpu_data_ptr[]; static inline thread_t get_active_thread_volatile(void) { - CPU_DATA_GET(cpu_active_thread, thread_t) + return CPU_DATA()->cpu_active_thread; } static inline __attribute__((const)) thread_t get_active_thread(void) { - CPU_DATA_GET(cpu_active_thread, thread_t) + return CPU_DATA()->cpu_active_thread; } #define current_thread_fast() get_active_thread() @@ -476,28 +427,33 @@ get_active_thread(void) static inline int get_preemption_level(void) { - CPU_DATA_GET(cpu_preemption_level, int) + return CPU_DATA()->cpu_preemption_level; } static inline int get_interrupt_level(void) { - CPU_DATA_GET(cpu_interrupt_level, int) + return CPU_DATA()->cpu_interrupt_level; } static inline int get_cpu_number(void) { - CPU_DATA_GET(cpu_number, int) + return CPU_DATA()->cpu_number; +} +static inline vm_offset_t +get_current_percpu_base(void) +{ + return CPU_DATA()->cpu_pcpu_base; } static inline int get_cpu_phys_number(void) { - CPU_DATA_GET(cpu_phys_number, int) + return CPU_DATA()->cpu_phys_number; } static inline cpu_data_t * current_cpu_datap(void) { - CPU_DATA_GET(cpu_this, cpu_data_t *); + return CPU_DATA()->cpu_this; } /* @@ -584,6 +540,7 @@ rbtrace_bt(uint64_t *rets, int maxframes, cpu_data_t *cdata, uint64_t frameptr, } } +__attribute__((noinline)) static inline void pltrace_internal(boolean_t enable) { @@ -607,7 +564,7 @@ pltrace_internal(boolean_t enable) cdata->cpu_plri = cplrecord; - rbtrace_bt(plbts, MAX_TRACE_BTFRAMES - 1, cdata, (uint64_t)__builtin_frame_address(0), true); + rbtrace_bt(plbts, MAX_TRACE_BTFRAMES - 1, cdata, (uint64_t)__builtin_frame_address(0), false); } extern int plctrace_enabled; @@ -647,7 +604,7 @@ static inline uint32_t traptrace_start(int vecnum, uint64_t ipc, uint64_t sabs, uint64_t frameptr) { cpu_data_t *cdata; - int cpu_num, nextidx; + unsigned int cpu_num, nextidx; traptrace_entry_t *cur_traptrace_ring; if (__improbable(traptrace_enabled == 0 || traptrace_generators == 0)) { @@ -656,10 +613,10 @@ traptrace_start(int vecnum, uint64_t ipc, uint64_t sabs, uint64_t frameptr) assert(ml_get_interrupts_enabled() == FALSE); cdata = current_cpu_datap(); - cpu_num = cdata->cpu_number; - nextidx = traptrace_next[cpu_num]; + cpu_num = (unsigned int)cdata->cpu_number; + nextidx = (unsigned int)traptrace_next[cpu_num]; /* prevent nested interrupts from clobbering this record */ - traptrace_next[cpu_num] = ((nextidx + 1) >= traptrace_entries_per_cpu) ? 0 : (nextidx + 1); + traptrace_next[cpu_num] = (int)(((nextidx + 1) >= (unsigned int)traptrace_entries_per_cpu) ? 0 : (nextidx + 1)); cur_traptrace_ring = traptrace_ring[cpu_num]; @@ -676,7 +633,7 @@ traptrace_start(int vecnum, uint64_t ipc, uint64_t sabs, uint64_t frameptr) assert(nextidx <= 0xFFFF); - return ((unsigned)cpu_num << 16) | nextidx; + return (uint32_t)((cpu_num << 16) | nextidx); } static inline void @@ -691,7 +648,7 @@ traptrace_end(uint32_t index, uint64_t eabs) #endif /* DEVELOPMENT || DEBUG */ -static inline void +__header_always_inline void pltrace(boolean_t plenable) { #if DEVELOPMENT || DEBUG @@ -708,16 +665,9 @@ disable_preemption_internal(void) { assert(get_preemption_level() >= 0); - os_compiler_barrier(release); -#if defined(__clang__) - cpu_data_t GS_RELATIVE *cpu_data = (cpu_data_t GS_RELATIVE *)0UL; - cpu_data->cpu_preemption_level++; -#else - __asm__ volatile ("incl %%gs:%P0" - : - : "i" (offsetof(cpu_data_t, cpu_preemption_level))); -#endif - os_compiler_barrier(acquire); + os_compiler_barrier(); + CPU_DATA()->cpu_preemption_level++; + os_compiler_barrier(); pltrace(FALSE); } @@ -726,22 +676,11 @@ enable_preemption_internal(void) { assert(get_preemption_level() > 0); pltrace(TRUE); - os_compiler_barrier(release); -#if defined(__clang__) - cpu_data_t GS_RELATIVE *cpu_data = (cpu_data_t GS_RELATIVE *)0UL; - if (0 == --cpu_data->cpu_preemption_level) { + os_compiler_barrier(); + if (0 == --CPU_DATA()->cpu_preemption_level) { kernel_preempt_check(); } -#else - __asm__ volatile ("decl %%gs:%P0 \n\t" - "jne 1f \n\t" - "call _kernel_preempt_check \n\t" - "1:" - : /* no outputs */ - : "i" (offsetof(cpu_data_t, cpu_preemption_level)) - : "eax", "ecx", "edx", "cc", "memory"); -#endif - os_compiler_barrier(acquire); + os_compiler_barrier(); } static inline void @@ -750,17 +689,9 @@ enable_preemption_no_check(void) assert(get_preemption_level() > 0); pltrace(TRUE); - os_compiler_barrier(release); -#if defined(__clang__) - cpu_data_t GS_RELATIVE *cpu_data = (cpu_data_t GS_RELATIVE *)0UL; - cpu_data->cpu_preemption_level--; -#else - __asm__ volatile ("decl %%gs:%P0" - : /* no outputs */ - : "i" (offsetof(cpu_data_t, cpu_preemption_level)) - : "cc", "memory"); -#endif - os_compiler_barrier(acquire); + os_compiler_barrier(); + CPU_DATA()->cpu_preemption_level--; + os_compiler_barrier(); } static inline void diff --git a/osfmk/i386/cpu_number.h b/osfmk/i386/cpu_number.h index d58c4e982..bc2e120bb 100644 --- a/osfmk/i386/cpu_number.h +++ b/osfmk/i386/cpu_number.h @@ -69,14 +69,6 @@ /* Use a function to do this less directly. */ extern int cpu_number(void); -#ifdef MACH_KERNEL_PRIVATE -#include - -/* Get the cpu number directly from the pre-processor data area */ -#define cpu_number() get_cpu_number() - -#endif /* !MACH_KERNEL_PRIVATE */ - #endif /* KERNEL_PRIVATE */ #endif /* _I386_CPU_NUMBER_H_ */ diff --git a/osfmk/i386/cpu_threads.c b/osfmk/i386/cpu_threads.c index 890a6aa1f..f9317b13a 100644 --- a/osfmk/i386/cpu_threads.c +++ b/osfmk/i386/cpu_threads.c @@ -26,7 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include -#include +#include #include #include #include @@ -91,7 +91,8 @@ x86_cache_alloc(void) int i; if (x86_caches == NULL) { - cache = kalloc(sizeof(x86_cpu_cache_t) + (MAX_CPUS * sizeof(x86_lcpu_t *))); + cache = zalloc_permanent(sizeof(x86_cpu_cache_t) + + (MAX_CPUS * sizeof(x86_lcpu_t *)), ZALIGN(x86_cpu_cache_t)); if (cache == NULL) { return NULL; } @@ -101,7 +102,6 @@ x86_cache_alloc(void) cache->next = NULL; } - bzero(cache, sizeof(x86_cpu_cache_t)); cache->next = NULL; cache->maxcpus = MAX_CPUS; for (i = 0; i < cache->maxcpus; i += 1) { @@ -378,14 +378,12 @@ x86_core_alloc(int cpu) simple_unlock(&x86_topo_lock); } else { simple_unlock(&x86_topo_lock); - core = kalloc(sizeof(x86_core_t)); + core = zalloc_permanent_type(x86_core_t); if (core == NULL) { - panic("x86_core_alloc() kalloc of x86_core_t failed!\n"); + panic("x86_core_alloc() alloc of x86_core_t failed!\n"); } } - bzero((void *) core, sizeof(x86_core_t)); - core->pcore_num = cpup->cpu_phys_number / topoParms.nPThreadsPerCore; core->lcore_num = core->pcore_num % topoParms.nPCoresPerPackage; @@ -526,14 +524,12 @@ x86_die_alloc(int cpu) simple_unlock(&x86_topo_lock); } else { simple_unlock(&x86_topo_lock); - die = kalloc(sizeof(x86_die_t)); + die = zalloc_permanent_type(x86_die_t); if (die == NULL) { - panic("x86_die_alloc() kalloc of x86_die_t failed!\n"); + panic("x86_die_alloc() alloc of x86_die_t failed!\n"); } } - bzero((void *) die, sizeof(x86_die_t)); - die->pdie_num = cpup->cpu_phys_number / topoParms.nPThreadsPerDie; die->ldie_num = num_dies; @@ -569,14 +565,12 @@ x86_package_alloc(int cpu) simple_unlock(&x86_topo_lock); } else { simple_unlock(&x86_topo_lock); - pkg = kalloc(sizeof(x86_pkg_t)); + pkg = zalloc_permanent_type(x86_pkg_t); if (pkg == NULL) { - panic("x86_package_alloc() kalloc of x86_pkg_t failed!\n"); + panic("x86_package_alloc() alloc of x86_pkg_t failed!\n"); } } - bzero((void *) pkg, sizeof(x86_pkg_t)); - pkg->ppkg_num = cpup->cpu_phys_number / topoParms.nPThreadsPerPackage; pkg->lpkg_num = topoParms.nPackages; diff --git a/osfmk/i386/cpu_topology.c b/osfmk/i386/cpu_topology.c index 7396b1b5f..1ed64e3df 100644 --- a/osfmk/i386/cpu_topology.c +++ b/osfmk/i386/cpu_topology.c @@ -95,6 +95,12 @@ cpu_topology_sort(int ncpus) assert(cpu_number() == 0); assert(cpu_datap(0)->cpu_number == 0); + uint32_t cpus_per_pset = 0; + +#if DEVELOPMENT || DEBUG + PE_parse_boot_argn("cpus_per_pset", &cpus_per_pset, sizeof(cpus_per_pset)); +#endif + /* Lights out for this */ istate = ml_set_interrupts_enabled(FALSE); @@ -166,8 +172,8 @@ cpu_topology_sort(int ncpus) * for their LLC cache. Each affinity set possesses a processor set * into which each logical processor is added. */ - TOPO_DBG("cpu_topology_start() creating affinity sets:\n"); - for (i = 0; i < ncpus; i++) { + TOPO_DBG("cpu_topology_start() creating affinity sets:ncpus=%d max_cpus=%d\n", ncpus, machine_info.max_cpus); + for (i = 0; i < machine_info.max_cpus; i++) { cpu_data_t *cpup = cpu_datap(i); x86_lcpu_t *lcpup = cpu_to_lcpu(i); x86_cpu_cache_t *LLC_cachep; @@ -176,7 +182,7 @@ cpu_topology_sort(int ncpus) LLC_cachep = lcpup->caches[topoParms.LLCDepth]; assert(LLC_cachep->type == CPU_CACHE_TYPE_UNIF); aset = find_cache_affinity(LLC_cachep); - if (aset == NULL) { + if ((aset == NULL) || ((cpus_per_pset != 0) && (i % cpus_per_pset) == 0)) { aset = (x86_affinity_set_t *) kalloc(sizeof(*aset)); if (aset == NULL) { panic("cpu_topology_start() failed aset alloc"); @@ -210,6 +216,17 @@ cpu_topology_sort(int ncpus) processor_set_primary(cpup->cpu_processor, lprim); } } + + if (machine_info.max_cpus < machine_info.logical_cpu_max) { + /* boot-args cpus=n is set, so adjust max numbers to match */ + int logical_max = machine_info.max_cpus; + int physical_max = logical_max; + if (machine_info.logical_cpu_max != machine_info.physical_cpu_max) { + physical_max = (logical_max + 1) / 2; + } + machine_info.logical_cpu_max = logical_max; + machine_info.physical_cpu_max = physical_max; + } } /* We got a request to start a CPU. Check that this CPU is within the diff --git a/osfmk/i386/cpuid.c b/osfmk/i386/cpuid.c index 2187c5938..06ad4090d 100644 --- a/osfmk/i386/cpuid.c +++ b/osfmk/i386/cpuid.c @@ -33,6 +33,7 @@ #include #include +#include int force_tecs_at_idle; int tecs_mode_supported; @@ -212,6 +213,8 @@ static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table[] = { #define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \ sizeof(cpuid_cache_descriptor_t)) +boolean_t cpuid_tsx_disabled = false; /* true if XNU disabled TSX */ +boolean_t cpuid_tsx_supported = false; static void do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave); static void cpuid_do_precpuid_was(void); @@ -258,13 +261,27 @@ do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave) * Workaround for reclaiming perf counter 3 due to TSX memory ordering erratum. * This workaround does not support being forcibly set (since an MSR must be * enumerated, lest we #GP when forced to access it.) + * + * Note that if disabling TSX is supported, disablement is prefered over forcing + * TSX transactions to abort. */ - if (cpuid_wa_required(CPU_INTEL_TSXFA) == CWA_ON) { + if (cpuid_wa_required(CPU_INTEL_TSXDA) == CWA_ON) { + /* This must be executed on all logical processors */ + wrmsr64(MSR_IA32_TSX_CTRL, MSR_IA32_TSXCTRL_TSX_CPU_CLEAR | MSR_IA32_TSXCTRL_RTM_DISABLE); + } else if (cpuid_wa_required(CPU_INTEL_TSXFA) == CWA_ON) { /* This must be executed on all logical processors */ wrmsr64(MSR_IA32_TSX_FORCE_ABORT, rdmsr64(MSR_IA32_TSX_FORCE_ABORT) | MSR_IA32_TSXFA_RTM_FORCE_ABORT); } + if (((wa_reqd = cpuid_wa_required(CPU_INTEL_SRBDS)) & CWA_ON) != 0 && + ((wa_reqd & CWA_FORCE_ON) == CWA_ON || + (cpuinfo->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_SRBDS_CTRL) != 0)) { + /* This must be executed on all logical processors */ + uint64_t mcuoptctrl = rdmsr64(MSR_IA32_MCU_OPT_CTRL); + mcuoptctrl |= MSR_IA32_MCUOPTCTRL_RNGDS_MITG_DIS; + wrmsr64(MSR_IA32_MCU_OPT_CTRL, mcuoptctrl); + } if (on_slave) { return; @@ -279,7 +296,7 @@ do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave) force_tecs_at_idle = 1; } - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case CWA_ON: tecs_mode_supported = 1; break; @@ -326,7 +343,7 @@ cpuid_set_cache_info( i386_cpu_info_t * info_p ) if ((cpuid_result[j] >> 31) == 1) { /* bit31 is validity */ continue; } - ((uint32_t *) info_p->cache_info)[j] = cpuid_result[j]; + ((uint32_t *)(void *)info_p->cache_info)[j] = cpuid_result[j]; } /* first byte gives number of cpuid calls to get all descriptors */ for (i = 1; i < info_p->cache_info[0]; i++) { @@ -338,7 +355,7 @@ cpuid_set_cache_info( i386_cpu_info_t * info_p ) if ((cpuid_result[j] >> 31) == 1) { continue; } - ((uint32_t *) info_p->cache_info)[4 * i + j] = + ((uint32_t *)(void *)info_p->cache_info)[4 * i + j] = cpuid_result[j]; } } @@ -811,6 +828,8 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p) info_p->cpuid_leaf7_features = quad(reg[ecx], reg[ebx]); info_p->cpuid_leaf7_extfeatures = reg[edx]; + cpuid_tsx_supported = (reg[ebx] & (CPUID_LEAF7_FEATURE_HLE | CPUID_LEAF7_FEATURE_RTM)) != 0; + DBG(" Feature Leaf7:\n"); DBG(" EBX : 0x%x\n", reg[ebx]); DBG(" ECX : 0x%x\n", reg[ecx]); @@ -882,6 +901,11 @@ cpuid_set_cpufamily(i386_cpu_info_t *info_p) case CPUID_MODEL_KABYLAKE_DT: cpufamily = CPUFAMILY_INTEL_KABYLAKE; break; + case CPUID_MODEL_ICELAKE: + case CPUID_MODEL_ICELAKE_H: + case CPUID_MODEL_ICELAKE_DT: + cpufamily = CPUFAMILY_INTEL_ICELAKE; + break; } break; } @@ -938,6 +962,9 @@ cpuid_set_info(void) * (which determines whether SMT/Hyperthreading is active). */ + /* + * Not all VMMs emulate MSR_CORE_THREAD_COUNT (0x35). + */ if (0 != (info_p->cpuid_features & CPUID_FEATURE_VMM) && PE_parse_boot_argn("-nomsr35h", NULL, 0)) { info_p->core_count = 1; @@ -951,6 +978,11 @@ cpuid_set_info(void) info_p->thread_count = info_p->cpuid_logical_per_package; break; case CPUFAMILY_INTEL_WESTMERE: { + /* + * This should be the same as Nehalem but an A0 silicon bug returns + * invalid data in the top 12 bits. Hence, we use only bits [19..16] + * rather than [31..16] for core count - which actually can't exceed 8. + */ uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT); if (0 == msr) { /* Provide a non-zero default for some VMMs */ @@ -983,6 +1015,9 @@ cpuid_set_info(void) info_p->cpuid_model_string = ""; /* deprecated */ + /* Init CPU LBRs */ + i386_lbr_init(info_p, true); + do_cwas(info_p, FALSE); } @@ -1302,13 +1337,42 @@ cpuid_leaf7_extfeatures(void) return cpuid_info()->cpuid_leaf7_extfeatures; } +const char * +cpuid_vmm_family_string(void) +{ + switch (cpuid_vmm_info()->cpuid_vmm_family) { + case CPUID_VMM_FAMILY_NONE: + return "None"; + + case CPUID_VMM_FAMILY_VMWARE: + return "VMWare"; + + case CPUID_VMM_FAMILY_PARALLELS: + return "Parallels"; + + case CPUID_VMM_FAMILY_HYVE: + return "xHyve"; + + case CPUID_VMM_FAMILY_HVF: + return "HVF"; + + case CPUID_VMM_FAMILY_KVM: + return "KVM"; + + case CPUID_VMM_FAMILY_UNKNOWN: + /*FALLTHROUGH*/ + default: + return "Unknown VMM"; + } +} + static i386_vmm_info_t *_cpuid_vmm_infop = NULL; static i386_vmm_info_t _cpuid_vmm_info; static void cpuid_init_vmm_info(i386_vmm_info_t *info_p) { - uint32_t reg[4]; + uint32_t reg[4], maxbasic_regs[4]; uint32_t max_vmm_leaf; bzero(info_p, sizeof(*info_p)); @@ -1319,8 +1383,27 @@ cpuid_init_vmm_info(i386_vmm_info_t *info_p) DBG("cpuid_init_vmm_info(%p)\n", info_p); + /* + * Get the highest basic leaf value, then save the cpuid details for that leaf + * for comparison with the [ostensible] VMM leaf. + */ + cpuid_fn(0, reg); + cpuid_fn(reg[eax], maxbasic_regs); + /* do cpuid 0x40000000 to get VMM vendor */ cpuid_fn(0x40000000, reg); + + /* + * If leaf 0x40000000 is non-existent, cpuid will return the values as + * if the highest basic leaf was requested, so compare to those values + * we just retrieved to see if no vmm is present. + */ + if (bcmp(reg, maxbasic_regs, sizeof(reg)) == 0) { + info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_NONE; + DBG(" vmm_vendor : NONE\n"); + return; + } + max_vmm_leaf = reg[eax]; bcopy((char *)®[ebx], &info_p->cpuid_vmm_vendor[0], 4); bcopy((char *)®[ecx], &info_p->cpuid_vmm_vendor[4], 4); @@ -1330,9 +1413,18 @@ cpuid_init_vmm_info(i386_vmm_info_t *info_p) if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_VMWARE)) { /* VMware identification string: kb.vmware.com/kb/1009458 */ info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_VMWARE; - } else if (0 == strcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_PARALLELS)) { + } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_PARALLELS, 12)) { /* Parallels identification string */ info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_PARALLELS; + } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_HYVE, 12)) { + /* bhyve/xhyve identification string */ + info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_HYVE; + } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_HVF, 12)) { + /* HVF identification string */ + info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_HVF; + } else if (0 == bcmp(info_p->cpuid_vmm_vendor, CPUID_VMM_ID_KVM, 12)) { + /* KVM identification string */ + info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_KVM; } else { info_p->cpuid_vmm_family = CPUID_VMM_FAMILY_UNKNOWN; } @@ -1380,6 +1472,7 @@ cpuid_wa_required(cpu_wa_e wa) static uint64_t bootarg_cpu_wa_enables = 0; static uint64_t bootarg_cpu_wa_disables = 0; static int bootargs_overrides_processed = 0; + uint32_t reg[4]; if (!bootargs_overrides_processed) { if (!PE_parse_boot_argn("cwae", &bootarg_cpu_wa_enables, sizeof(bootarg_cpu_wa_enables))) { @@ -1425,6 +1518,11 @@ cpuid_wa_required(cpu_wa_e wa) break; case CPU_INTEL_TSXFA: + /* + * Note that if TSX was disabled in cpuid_do_precpuid_was(), the cached cpuid + * info will indicate that RTM is *not* supported and this workaround will not + * be enabled. + */ /* * Otherwise, if the CPU supports both TSX(HLE) and FORCE_ABORT, return that * the workaround should be enabled. @@ -1435,6 +1533,45 @@ cpuid_wa_required(cpu_wa_e wa) } break; + case CPU_INTEL_TSXDA: + /* + * Since this workaround might be requested before cpuid_set_info() is complete, + * we need to invoke cpuid directly when looking for the required bits. + */ + cpuid_fn(0x7, reg); + if (reg[edx] & CPUID_LEAF7_EXTFEATURE_ACAPMSR) { + uint64_t archcap_msr = rdmsr64(MSR_IA32_ARCH_CAPABILITIES); + /* + * If this CPU supports TSX (HLE being the proxy for TSX detection) AND it does + * not include a hardware fix for TAA and it supports the TSX_CTRL MSR, disable TSX entirely. + * (Note this can be overridden (above) if the cwad boot-arg's value has bit 2 set.) + */ + if ((reg[ebx] & CPUID_LEAF7_FEATURE_HLE) != 0 && + (archcap_msr & (MSR_IA32_ARCH_CAPABILITIES_TAA_NO | MSR_IA32_ARCH_CAPABILITIES_TSX_CTRL)) + == MSR_IA32_ARCH_CAPABILITIES_TSX_CTRL) { + return CWA_ON; + } + } + break; + + case CPU_INTEL_SRBDS: + /* + * SRBDS mitigations are enabled by default. CWA_ON returned here indicates + * the caller should disable the mitigation. Mitigations should be disabled + * at least for CPUs that advertise MDS_NO *and* (either TAA_NO is set OR TSX + * has been disabled). + */ + if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_SRBDS_CTRL) != 0) { + if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_ACAPMSR) != 0) { + uint64_t archcap_msr = rdmsr64(MSR_IA32_ARCH_CAPABILITIES); + if ((archcap_msr & MSR_IA32_ARCH_CAPABILITIES_MDS_NO) != 0 && + ((archcap_msr & MSR_IA32_ARCH_CAPABILITIES_TAA_NO) != 0 || + cpuid_tsx_disabled)) { + return CWA_ON; + } + } + } + break; default: break; @@ -1452,4 +1589,10 @@ cpuid_do_precpuid_was(void) * that data as well. */ + /* Note the TSX disablement, we do not support force-on since it depends on MSRs being present */ + if (cpuid_wa_required(CPU_INTEL_TSXDA) == CWA_ON) { + /* This must be executed on all logical processors */ + wrmsr64(MSR_IA32_TSX_CTRL, MSR_IA32_TSXCTRL_TSX_CPU_CLEAR | MSR_IA32_TSXCTRL_RTM_DISABLE); + cpuid_tsx_disabled = true; + } } diff --git a/osfmk/i386/cpuid.h b/osfmk/i386/cpuid.h index b63bd71bb..577bf6167 100644 --- a/osfmk/i386/cpuid.h +++ b/osfmk/i386/cpuid.h @@ -50,8 +50,11 @@ #define CPUID_VID_INTEL "GenuineIntel" #define CPUID_VID_AMD "AuthenticAMD" -#define CPUID_VMM_ID_VMWARE "VMwareVMware" +#define CPUID_VMM_ID_VMWARE "VMwareVMware" #define CPUID_VMM_ID_PARALLELS "Parallels\0\0\0" +#define CPUID_VMM_ID_HYVE "bhyve bhyve " +#define CPUID_VMM_ID_HVF "HVFHVFHVFHVF" +#define CPUID_VMM_ID_KVM "KVMKVMKVM\0\0\0" #define CPUID_STRING_UNKNOWN "Unknown CPU Typ" @@ -271,10 +274,21 @@ #define CPUID_MODEL_KABYLAKE_ULT 0x8E #define CPUID_MODEL_KABYLAKE_ULX 0x8E #define CPUID_MODEL_KABYLAKE_DT 0x9E +#define CPUID_MODEL_ICELAKE 0x7E +#define CPUID_MODEL_ICELAKE_ULT 0x7E +#define CPUID_MODEL_ICELAKE_ULX 0x7E +#define CPUID_MODEL_ICELAKE_DT 0x7D +#define CPUID_MODEL_ICELAKE_H 0x9F + +#define CPUID_VMM_FAMILY_NONE 0x0 +#define CPUID_VMM_FAMILY_UNKNOWN 0x1 +#define CPUID_VMM_FAMILY_VMWARE 0x2 +#define CPUID_VMM_FAMILY_PARALLELS 0x3 +#define CPUID_VMM_FAMILY_HYVE 0x4 +#define CPUID_VMM_FAMILY_HVF 0x5 +#define CPUID_VMM_FAMILY_KVM 0x6 + -#define CPUID_VMM_FAMILY_UNKNOWN 0x0 -#define CPUID_VMM_FAMILY_VMWARE 0x1 -#define CPUID_VMM_FAMILY_PARALLELS 0x2 #ifndef ASSEMBLER #include @@ -382,7 +396,7 @@ typedef struct { } cpuid_tsc_leaf_t; /* Physical CPU info - this is exported out of the kernel (kexts), so be wary of changes */ -typedef struct { +typedef struct i386_cpu_info { char cpuid_vendor[16]; char cpuid_brand_string[48]; const char *cpuid_model_string; @@ -475,7 +489,9 @@ typedef struct { typedef enum { CPU_INTEL_SEGCHK = 1, - CPU_INTEL_TSXFA = 2 + CPU_INTEL_TSXFA = 2, + CPU_INTEL_TSXDA = 4, + CPU_INTEL_SRBDS = 8 } cpu_wa_e; typedef enum { @@ -494,6 +510,7 @@ is_xeon_sp(uint8_t platid) if (platid != PLATID_MAYBE_XEON_SP) { return 0; } + boolean_t intrs = ml_set_interrupts_enabled(FALSE); outl(cfgAdr, XeonCapID5); uint32_t cap5reg = inl(cfgDat); @@ -535,13 +552,14 @@ extern uint32_t cpuid_cpufamily(void); extern i386_cpu_info_t *cpuid_info(void); extern void cpuid_set_info(void); +extern boolean_t cpuid_vmm_present(void); #ifdef MACH_KERNEL_PRIVATE -extern boolean_t cpuid_vmm_present(void); extern i386_vmm_info_t *cpuid_vmm_info(void); extern uint32_t cpuid_vmm_family(void); extern cwa_classifier_e cpuid_wa_required(cpu_wa_e wa); extern void cpuid_do_was(void); +extern const char *cpuid_vmm_family_string(void); #endif #ifdef __cplusplus diff --git a/osfmk/i386/endian.h b/osfmk/i386/endian.h index a83ba637b..487b3ed85 100644 --- a/osfmk/i386/endian.h +++ b/osfmk/i386/endian.h @@ -55,7 +55,7 @@ static __inline__ unsigned short ntohs(unsigned short w_int) { - return (w_int << 8) | (w_int >> 8); + return (unsigned short)((w_int << 8) | (w_int >> 8)); } #endif diff --git a/osfmk/i386/fpu.c b/osfmk/i386/fpu.c index 2e90ed752..e960ee4ef 100644 --- a/osfmk/i386/fpu.c +++ b/osfmk/i386/fpu.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -150,18 +150,18 @@ fxsave64(struct x86_fx_thread_state *a) #define IS_VALID_XSTATE(x) ((x) == FP || (x) == AVX || (x) == AVX512) -zone_t ifps_zone[] = { +SECURITY_READ_ONLY_LATE(zone_t) ifps_zone[] = { [FP] = NULL, [AVX] = NULL, [AVX512] = NULL }; -static uint32_t fp_state_size[] = { +static const uint32_t fp_state_size[] = { [FP] = sizeof(struct x86_fx_thread_state), [AVX] = sizeof(struct x86_avx_thread_state), [AVX512] = sizeof(struct x86_avx512_thread_state) }; -static const char *xstate_name[] = { +static const char *const xstate_name[] = { [UNDEFINED] = "UNDEFINED", [FP] = "FP", [AVX] = "AVX", @@ -507,22 +507,8 @@ init_fpu(void) static void * fp_state_alloc(xstate_t xs) { - struct x86_fx_thread_state *ifps; - assert(ifps_zone[xs] != NULL); - ifps = zalloc(ifps_zone[xs]); - -#if DEBUG - if (!(ALIGNED(ifps, 64))) { - panic("fp_state_alloc: %p, %u, %p, %u", - ifps, (unsigned) ifps_zone[xs]->elem_size, - (void *) ifps_zone[xs]->free_elements, - (unsigned) ifps_zone[xs]->alloc_size); - } -#endif - bzero(ifps, fp_state_size[xs]); - - return ifps; + return zalloc_flags(ifps_zone[xs], Z_WAITOK | Z_ZERO); } static inline void @@ -647,31 +633,19 @@ fpu_module_init(void) fpu_default); } - /* We explicitly choose an allocation size of 13 pages = 64 * 832 - * to eliminate waste for the 832 byte sized - * AVX XSAVE register save area. - */ - ifps_zone[fpu_default] = zinit(fp_state_size[fpu_default], - thread_max * fp_state_size[fpu_default], - 64 * fp_state_size[fpu_default], - "x86 fpsave state"); - /* To maintain the required alignment, disable * zone debugging for this zone as that appends * 16 bytes to each element. */ - zone_change(ifps_zone[fpu_default], Z_ALIGNMENT_REQUIRED, TRUE); + ifps_zone[fpu_default] = zone_create("x86 fpsave state", + fp_state_size[fpu_default], ZC_ALIGNMENT_REQUIRED | ZC_ZFREE_CLEARMEM); /* * If AVX512 is supported, create a separate savearea zone. - * with allocation size: 19 pages = 32 * 2668 */ if (fpu_capability == AVX512) { - ifps_zone[AVX512] = zinit(fp_state_size[AVX512], - thread_max * fp_state_size[AVX512], - 32 * fp_state_size[AVX512], - "x86 avx512 save state"); - zone_change(ifps_zone[AVX512], Z_ALIGNMENT_REQUIRED, TRUE); + ifps_zone[AVX512] = zone_create("x86 avx512 save state", + fp_state_size[AVX512], ZC_ALIGNMENT_REQUIRED | ZC_ZFREE_CLEARMEM); } /* Determine MXCSR reserved bits and configure initial FPU state*/ diff --git a/osfmk/i386/genassym.c b/osfmk/i386/genassym.c index b691ae36b..e28bb452e 100644 --- a/osfmk/i386/genassym.c +++ b/osfmk/i386/genassym.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -169,11 +169,8 @@ main( DECLARE("TH_PCB_ISS", offsetof(struct thread, machine.iss)); DECLARE("TH_PCB_IDS", offsetof(struct thread, machine.ids)); DECLARE("TH_PCB_FPS", offsetof(struct thread, machine.ifps)); -#if NCOPY_WINDOWS > 0 - DECLARE("TH_COPYIO_STATE", offsetof(struct thread, machine.copyio_state)); - DECLARE("WINDOWS_CLEAN", WINDOWS_CLEAN); -#endif DECLARE("TH_RWLOCK_COUNT", offsetof(struct thread, rwlock_count)); + DECLARE("TH_TMP_ALLOC_CNT", offsetof(struct thread, t_temp_alloc_count)); DECLARE("MAP_PMAP", offsetof(struct _vm_map, pmap)); @@ -294,7 +291,6 @@ main( DECLARE("ASM_COMM_PAGE32_BASE_ADDRESS", _COMM_PAGE32_BASE_ADDRESS); DECLARE("ASM_COMM_PAGE32_START_ADDRESS", _COMM_PAGE32_START_ADDRESS); - DECLARE("ASM_COMM_PAGE_SCHED_GEN", _COMM_PAGE_SCHED_GEN); DECLARE("KERNEL_PML4_INDEX", KERNEL_PML4_INDEX); DECLAREULL("KERNEL_BASE", KERNEL_BASE); @@ -417,6 +413,10 @@ main( offsetof(cpu_data_t, cd_estack)); DECLARE("CPU_DSHADOW", offsetof(cpu_data_t, cd_shadow)); +#if DEVELOPMENT || DEBUG + DECLARE("CPU_RTIMES", + offsetof(cpu_data_t, cpu_rtimes[0])); +#endif DECLARE("enaExpTrace", enaExpTrace); DECLARE("enaUsrFCall", enaUsrFCall); @@ -479,21 +479,21 @@ main( offsetof(struct timer, tstamp)); DECLARE("THREAD_TIMER", - offsetof(struct processor, processor_data.thread_timer)); + offsetof(struct processor, thread_timer)); DECLARE("KERNEL_TIMER", - offsetof(struct processor, processor_data.kernel_timer)); + offsetof(struct processor, kernel_timer)); DECLARE("SYSTEM_TIMER", offsetof(struct thread, system_timer)); DECLARE("USER_TIMER", offsetof(struct thread, user_timer)); DECLARE("SYSTEM_STATE", - offsetof(struct processor, processor_data.system_state)); + offsetof(struct processor, system_state)); DECLARE("USER_STATE", - offsetof(struct processor, processor_data.user_state)); + offsetof(struct processor, user_state)); DECLARE("IDLE_STATE", - offsetof(struct processor, processor_data.idle_state)); + offsetof(struct processor, idle_state)); DECLARE("CURRENT_STATE", - offsetof(struct processor, processor_data.current_state)); + offsetof(struct processor, current_state)); DECLARE("OnProc", OnProc); diff --git a/osfmk/i386/hibernate_i386.c b/osfmk/i386/hibernate_i386.c index 2c7a177f1..abfe74a22 100644 --- a/osfmk/i386/hibernate_i386.c +++ b/osfmk/i386/hibernate_i386.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2012 Apple Inc. All rights reserved. + * Copyright (c) 2004-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -25,6 +25,10 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +/*! + * i386/x86_64-specific functions required to support hibernation entry, and also to + * support hibernation exit after wired pages have already been restored. + */ #include #include @@ -41,6 +45,7 @@ #include #include +#include #include #include #include @@ -113,6 +118,7 @@ hibernate_page_list_allocate(boolean_t log) case kEfiACPIMemoryNVS: case kEfiPalCode: non_os_pagecount += num; + OS_FALLTHROUGH; // OS used dram case kEfiLoaderCode: @@ -292,3 +298,8 @@ hibernate_vm_locks_are_safe(void) assert(FALSE == ml_get_interrupts_enabled()); return hibernate_vm_locks_safe; } + +void +pal_hib_write_hook(void) +{ +} diff --git a/osfmk/i386/hibernate_restore.c b/osfmk/i386/hibernate_restore.c index f8b0bb195..8ec82e9f3 100644 --- a/osfmk/i386/hibernate_restore.c +++ b/osfmk/i386/hibernate_restore.c @@ -25,11 +25,16 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +/*! + * i386/x86_64-specific functions required to support hibernation resume. + */ + #include #include #include +#include -#include +#include extern pd_entry_t BootPTD[2048]; @@ -71,13 +76,15 @@ pal_hib_map(uintptr_t virt, uint64_t phys) case BITMAP_AREA: case IMAGE_AREA: case IMAGE2_AREA: + case SCRATCH_AREA: + case WKDM_AREA: break; default: asm("cli;hlt;"); break; } - if (phys < IMAGE2_AREA) { + if (phys < WKDM_AREA) { // first 4Gb is all mapped, // and do not expect source areas to cross 4Gb return phys; @@ -97,12 +104,28 @@ pal_hib_map(uintptr_t virt, uint64_t phys) } void -hibernateRestorePALState(uint32_t *arg) +pal_hib_restore_pal_state(uint32_t *arg) { (void)arg; } void -pal_hib_patchup(void) +pal_hib_resume_init(__unused pal_hib_ctx_t *ctx, __unused hibernate_page_list_t *map, __unused uint32_t *nextFree) +{ +} + +void +pal_hib_restored_page(__unused pal_hib_ctx_t *ctx, __unused pal_hib_restore_stage_t stage, __unused ppnum_t ppnum) +{ +} + +void +pal_hib_patchup(__unused pal_hib_ctx_t *ctx) +{ +} + +void +pal_hib_decompress_page(void *src, void *dst, void *scratch, unsigned int compressedSize) { + WKdm_decompress_new((WK_word*)src, (WK_word*)dst, (WK_word*)scratch, compressedSize); } diff --git a/osfmk/i386/hw_defs.h b/osfmk/i386/hw_defs.h index 945123b9b..1f4d0abfd 100644 --- a/osfmk/i386/hw_defs.h +++ b/osfmk/i386/hw_defs.h @@ -42,4 +42,9 @@ #define pmStatus 0x00 #define msrTSC 0x10 +#define cfgAdr 0xCF8 +#define cfgDat 0xCFC + +#define XeonCapID5 (0x80000000 | (1 << 16) | (30 << 11) | (3 << 8) | 0x98) + #endif /* _I386_HW_DEFS_H_ */ diff --git a/osfmk/i386/i386_init.c b/osfmk/i386/i386_init.c index 8c1662eea..12cdef33a 100644 --- a/osfmk/i386/i386_init.c +++ b/osfmk/i386/i386_init.c @@ -99,9 +99,14 @@ #include #include #include /* LcksOpts */ +#include #if DEBUG #include #endif +extern void xcpm_bootstrap(void); +#if DEVELOPMENT || DEBUG +#include +#endif #if MONOTONIC #include @@ -109,12 +114,17 @@ #include -#if DEBUG -#define DBG(x ...) kprintf(x) +#if DEBUG || DEVELOPMENT +#define DBG(x, ...) kprintf(x, ##__VA_ARGS__) +#define dyldLogFunc(x, ...) kprintf(x, ##__VA_ARGS__) #else #define DBG(x ...) #endif +#include +#include + + int debug_task; int early_boot = 1; @@ -148,6 +158,19 @@ int kernPhysPML4EntryCount; ppnum_t released_PT_ppn = 0; uint32_t released_PT_cnt = 0; +#if DEVELOPMENT || DEBUG +int panic_on_cacheline_mismatch = -1; +char panic_on_trap_procname[64]; +uint32_t panic_on_trap_mask; +#endif +bool last_branch_support_enabled; +int insn_copyin_count; +#if DEVELOPMENT || DEBUG +#define DEFAULT_INSN_COPYIN_COUNT x86_INSTRUCTION_STATE_MAX_INSN_BYTES +#else +#define DEFAULT_INSN_COPYIN_COUNT 192 +#endif + char *physfree; void idt64_remap(void); @@ -546,17 +569,105 @@ __attribute__((aligned(PAGE_SIZE))) = { }; static void -vstart_idt_init(void) +vstart_idt_init(boolean_t master) { x86_64_desc_register_t vstart_idt = { sizeof(master_boot_idt64), master_boot_idt64 }; - fix_desc64(master_boot_idt64, 32); + if (master) { + fix_desc64(master_boot_idt64, 32); + } lidt((void *)&vstart_idt); } +extern void *collection_base_pointers[KCNumKinds]; + +kern_return_t +i386_slide_individual_kext(kernel_mach_header_t *mh, uintptr_t slide) +{ + int ret = kernel_collection_slide(mh, (const void **) (void *)collection_base_pointers); + if (ret != 0) { + printf("Sliding pageable kc was stopped\n"); + return KERN_FAILURE; + } + + kernel_collection_adjust_fileset_entry_addrs(mh, slide); + return KERN_SUCCESS; +} + +kern_return_t +i386_slide_kext_collection_mh_addrs(kernel_mach_header_t *mh, uintptr_t slide, bool adjust_mach_headers) +{ + int ret = kernel_collection_slide(mh, (const void **) (void *)collection_base_pointers); + if (ret != KERN_SUCCESS) { + printf("Kernel Collection slide was stopped with value %d\n", ret); + return KERN_FAILURE; + } + + kernel_collection_adjust_mh_addrs(mh, slide, adjust_mach_headers, + NULL, NULL, NULL, NULL, NULL, NULL, NULL); + + return KERN_SUCCESS; +} + +static void +i386_slide_and_rebase_image(uintptr_t kstart_addr) +{ + extern uintptr_t kc_highest_nonlinkedit_vmaddr; + kernel_mach_header_t *k_mh, *kc_mh = NULL; + kernel_segment_command_t *seg; + uintptr_t slide; + + k_mh = &_mh_execute_header; + /* + * If we're not booting, an MH_FILESET, we don't need to slide + * anything because EFI has done that for us. When booting an + * MH_FILESET, EFI will slide the kernel proper, but not the kexts. + * Below, we infer the slide by comparing the slid address of the + * kernel's mach-o header and the unslid vmaddr of the first segment + * of the mach-o (which is assumed to always point to the mach-o + * header). + */ + if (!kernel_mach_header_is_in_fileset(k_mh)) { + DBG("[MH] kcgen-style KC\n"); + return; + } + + /* + * The kernel is part of a MH_FILESET kernel collection: determine slide + * based on first segment's mach-o vmaddr. + */ + seg = (kernel_segment_command_t *)((uintptr_t)k_mh + sizeof(*k_mh)); + assert(seg->cmd == LC_SEGMENT_KERNEL); + slide = (uintptr_t)k_mh - seg->vmaddr; + DBG("[MH] Sliding new-style KC: %llu\n", (unsigned long long)slide); + + /* + * The kernel collection mach-o header should be the start address + * passed to us by EFI. + */ + kc_mh = (kernel_mach_header_t *)(kstart_addr); + assert(kc_mh->filetype == MH_FILESET); + + PE_set_kc_header(KCKindPrimary, kc_mh, slide); + + /* + * rebase/slide all the kexts in the collection + * (EFI should have already rebased the kernel) + */ + kernel_collection_slide(kc_mh, (const void **) (void *)collection_base_pointers); + + + /* + * Now adjust the vmaddr fields of all mach-o headers + * and symbols in this MH_FILESET + */ + kernel_collection_adjust_mh_addrs(kc_mh, slide, false, + NULL, NULL, NULL, NULL, NULL, NULL, &kc_highest_nonlinkedit_vmaddr); +} + /* * vstart() is called in the natural mode (64bit for K64, 32 for K32) * on a set of bootstrap pagetables which use large, 2MB pages to map @@ -586,11 +697,12 @@ vstart(vm_offset_t boot_args_start) postcode(VSTART_ENTRY); + /* + * Set-up temporary trap handlers during page-table set-up. + */ + if (is_boot_cpu) { - /* - * Set-up temporary trap handlers during page-table set-up. - */ - vstart_idt_init(); + vstart_idt_init(TRUE); postcode(VSTART_IDT_INIT); /* @@ -621,6 +733,18 @@ vstart(vm_offset_t boot_args_start) &kernelBootArgs->ksize, &kernelBootArgs->kaddr); DBG("SMBIOS mem sz 0x%llx\n", kernelBootArgs->PhysicalMemorySize); + DBG("KC_hdrs_vaddr %p\n", (void *)kernelBootArgs->KC_hdrs_vaddr); + + if (kernelBootArgs->Version >= 2 && kernelBootArgs->Revision >= 1 && + kernelBootArgs->KC_hdrs_vaddr != 0) { + /* + * slide the header addresses in all mach-o segments and sections, and + * perform any new-style chained-fixup sliding for kexts, as necessary. + * Note that efiboot has already loaded the kernel and all LC_SEGMENT_64s + * that correspond to the kexts present in the primary KC, into slid addresses. + */ + i386_slide_and_rebase_image((uintptr_t)ml_static_ptovirt(kernelBootArgs->KC_hdrs_vaddr)); + } /* * Setup boot args given the physical start address. @@ -667,15 +791,28 @@ vstart(vm_offset_t boot_args_start) * via i386_init_slave() */ } else { + /* Slave CPUs should use the basic IDT until i386_init_slave() */ + vstart_idt_init(FALSE); + /* Switch to kernel's page tables (from the Boot PTs) */ set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4)); + /* Find our logical cpu number */ - cpu = lapic_to_cpu[(LAPIC_READ(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK]; + cpu = lapic_to_cpu[lapic_safe_apicid()]; #if DEBUG gsbase = rdmsr64(MSR_IA32_GS_BASE); #endif cpu_desc_load(cpu_datap(cpu)); - DBG("CPU: %d, GSBASE initial value: 0x%llx\n", cpu, gsbase); +#if DEBUG + DBG("CPU: %d, GSBASE initial value: 0x%llx\n", cpu, (unsigned long long)gsbase); +#endif + + /* + * Before we can discover our local APIC ID, we need to potentially + * initialize X2APIC, if it's enabled and firmware started us with + * the APIC in legacy mode. + */ + lapic_init_slave(); } early_boot = 0; @@ -707,7 +844,7 @@ i386_init(void) pal_i386_init(); tsc_init(); - rtclock_early_init(); /* mach_absolute_time() now functionsl */ + rtclock_early_init(); /* mach_absolute_time() now functional */ kernel_debug_string_early("i386_init"); pstate_trace(); @@ -719,9 +856,8 @@ i386_init(void) master_cpu = 0; - lck_mod_init(); - - printf_init(); /* Init this in case we need debugger */ + kernel_debug_string_early("kernel_startup_bootstrap"); + kernel_startup_bootstrap(); /* * Initialize the timer callout world @@ -732,19 +868,54 @@ i386_init(void) postcode(CPU_INIT_D); - panic_init(); /* Init this in case we need debugger */ - /* setup debugging output if one has been chosen */ - kernel_debug_string_early("PE_init_kprintf"); - PE_init_kprintf(FALSE); - - kernel_debug_string_early("kernel_early_bootstrap"); - kernel_early_bootstrap(); + kernel_startup_initialize_upto(STARTUP_SUB_KPRINTF); + kprintf("kprintf initialized\n"); if (!PE_parse_boot_argn("diag", &dgWork.dgFlags, sizeof(dgWork.dgFlags))) { dgWork.dgFlags = 0; } + if (PE_parse_boot_argn("insn_capcnt", &insn_copyin_count, sizeof(insn_copyin_count))) { + /* + * Enforce max and min values (allowing 0 to disable copying completely) + * for the instruction copyin count + */ + if (insn_copyin_count > x86_INSTRUCTION_STATE_MAX_INSN_BYTES || + (insn_copyin_count != 0 && insn_copyin_count < 64)) { + insn_copyin_count = DEFAULT_INSN_COPYIN_COUNT; + } + } else { + insn_copyin_count = DEFAULT_INSN_COPYIN_COUNT; + } + +#if DEVELOPMENT || DEBUG + if (!PE_parse_boot_argn("panic_clmismatch", &panic_on_cacheline_mismatch, + sizeof(panic_on_cacheline_mismatch))) { + panic_on_cacheline_mismatch = 0; + } + + if (!PE_parse_boot_argn("panic_on_trap_procname", &panic_on_trap_procname[0], + sizeof(panic_on_trap_procname))) { + panic_on_trap_procname[0] = 0; + } + + if (!PE_parse_boot_argn("panic_on_trap_mask", &panic_on_trap_mask, + sizeof(panic_on_trap_mask))) { + if (panic_on_trap_procname[0] != 0) { + panic_on_trap_mask = DEFAULT_PANIC_ON_TRAP_MASK; + } else { + panic_on_trap_mask = 0; + } + } +#endif + /* But allow that to be overridden via boot-arg: */ + if (!PE_parse_boot_argn("lbr_support", &last_branch_support_enabled, + sizeof(last_branch_support_enabled))) { + /* Disable LBR support by default due to its high context switch overhead */ + last_branch_support_enabled = false; + } + serialmode = 0; if (PE_parse_boot_argn("serial", &serialmode, sizeof(serialmode))) { /* We want a serial keyboard and/or console */ @@ -778,6 +949,8 @@ i386_init(void) maxmemtouse = ((uint64_t)maxmem) * MB; } + max_cpus_from_firmware = acpi_count_enabled_logical_processors(); + if (PE_parse_boot_argn("cpus", &cpus, sizeof(cpus))) { if ((0 < cpus) && (cpus < max_ncpus)) { max_ncpus = cpus; @@ -823,6 +996,7 @@ i386_init(void) kernel_debug_string_early("power_management_init"); power_management_init(); + xcpm_bootstrap(); #if MONOTONIC mt_cpu_up(cpu_datap(0)); @@ -862,7 +1036,15 @@ do_init_slave(boolean_t fast_restart) #endif LAPIC_INIT(); - lapic_configure(); + /* + * Note that the true argument here does not necessarily mean we're + * here from a resume (this code path is also executed on boot). + * The implementation of lapic_configure checks to see if the + * state variable has been initialized, as it would be before + * sleep. If it has not been, it's construed as an indicator of + * first boot. + */ + lapic_configure(true); LAPIC_DUMP(); LAPIC_CPU_MAP_DUMP(); @@ -873,6 +1055,9 @@ do_init_slave(boolean_t fast_restart) #endif /* update CPU microcode and apply CPU workarounds */ ucode_update_wake_and_apply_cpu_was(); + + /* Enable LBRs on non-boot CPUs */ + i386_lbr_init(cpuid_info(), false); } else { init_param = FAST_SLAVE_INIT; } @@ -926,7 +1111,6 @@ i386_init_slave_fast(void) do_init_slave(TRUE); } -#include /* TODO: Evaluate global PTEs for the double-mapped translations */ diff --git a/osfmk/i386/i386_timer.c b/osfmk/i386/i386_timer.c index 1d56ea08e..e4ad6d311 100644 --- a/osfmk/i386/i386_timer.c +++ b/osfmk/i386/i386_timer.c @@ -59,7 +59,7 @@ uint32_t spurious_timers; /* - * Event timer interrupt. + * Event timer interrupt. * * XXX a drawback of this implementation is that events serviced earlier must not set deadlines * that occur before the entire chain completes. @@ -67,83 +67,104 @@ uint32_t spurious_timers; * XXX a better implementation would use a set of generic callouts and iterate over them */ void -timer_intr(int user_mode, - uint64_t rip) +timer_intr(int user_mode, uint64_t rip) { - uint64_t abstime; - rtclock_timer_t *mytimer; - cpu_data_t *pp; - int64_t latency; - uint64_t pmdeadline; - boolean_t timer_processed = FALSE; + uint64_t orig_abstime, abstime; + rtclock_timer_t *mytimer; + cpu_data_t *pp; + uint64_t pmdeadline; + uint64_t min_deadline = EndOfAllTime; + uint64_t run_deadline = EndOfAllTime; + bool timer_processed = false; pp = current_cpu_datap(); - SCHED_STATS_TIMER_POP(current_processor()); + SCHED_STATS_INC(timer_pop_count); - abstime = mach_absolute_time(); /* Get the time now */ + orig_abstime = abstime = mach_absolute_time(); - /* has a pending clock timer expired? */ - mytimer = &pp->rtclock_timer; /* Point to the event timer */ - - if ((timer_processed = ((mytimer->deadline <= abstime) || - (abstime >= (mytimer->queue.earliest_soft_deadline))))) { + /* + * Has a pending clock timer expired? + */ + mytimer = &pp->rtclock_timer; + timer_processed = (mytimer->deadline <= abstime || + abstime >= mytimer->queue.earliest_soft_deadline); + if (timer_processed) { + uint64_t rtclock_deadline = MAX(mytimer->deadline, mytimer->when_set); /* - * Log interrupt service latency (-ve value expected by tool) - * a non-PM event is expected next. - * The requested deadline may be earlier than when it was set - * - use MAX to avoid reporting bogus latencies. - */ - latency = (int64_t) (abstime - MAX(mytimer->deadline, - mytimer->when_set)); - /* Log zero timer latencies when opportunistically processing - * coalesced timers. + * When opportunistically processing coalesced timers, don't factor + * their latency into the trace event. */ - if (latency < 0) { - TCOAL_DEBUG(0xEEEE0000, abstime, mytimer->queue.earliest_soft_deadline, abstime - mytimer->queue.earliest_soft_deadline, 0, 0); - latency = 0; + if (abstime > rtclock_deadline) { + TCOAL_DEBUG(0xEEEE0000, abstime, + mytimer->queue.earliest_soft_deadline, + abstime - mytimer->queue.earliest_soft_deadline, 0, 0); + } else { + min_deadline = rtclock_deadline; } - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - DECR_TRAP_LATENCY | DBG_FUNC_NONE, - -latency, - ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)), - user_mode, 0, 0); - - mytimer->has_expired = TRUE; /* Remember that we popped */ + mytimer->has_expired = TRUE; mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); mytimer->has_expired = FALSE; - /* Get the time again since we ran a bit */ + /* + * Get a more up-to-date current time after expiring the timer queue. + */ abstime = mach_absolute_time(); mytimer->when_set = abstime; } - /* is it time for power management state change? */ - if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) { - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - DECR_PM_DEADLINE | DBG_FUNC_START, - 0, 0, 0, 0, 0); - pmCPUDeadline(pp); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - DECR_PM_DEADLINE | DBG_FUNC_END, - 0, 0, 0, 0, 0); - timer_processed = TRUE; - abstime = mach_absolute_time(); /* Get the time again since we ran a bit */ + /* + * Has a per-CPU running timer expired? + */ + run_deadline = running_timers_expire(pp->cpu_processor, abstime); + if (run_deadline != EndOfAllTime) { + if (run_deadline < min_deadline) { + min_deadline = run_deadline; + } + timer_processed = true; + abstime = mach_absolute_time(); } - uint64_t quantum_deadline = pp->quantum_timer_deadline; - /* is it the quantum timer expiration? */ - if ((quantum_deadline <= abstime) && (quantum_deadline > 0)) { - pp->quantum_timer_deadline = 0; - quantum_timer_expire(abstime); + /* + * Log the timer latency *before* the power management events. + */ + if (__probable(timer_processed)) { + /* + * Log the maximum interrupt service latency experienced by a timer. + */ + int64_t latency = min_deadline == EndOfAllTime ? 0 : + (int64_t)(abstime - min_deadline); + /* + * Log interrupt service latency (-ve value expected by tool) + * a non-PM event is expected next. + * The requested deadline may be earlier than when it was set + * - use MAX to avoid reporting bogus latencies. + */ + KDBG_RELEASE(DECR_TRAP_LATENCY, -latency, + user_mode != 0 ? rip : VM_KERNEL_UNSLIDE(rip), user_mode); } - /* schedule our next deadline */ + /* + * Is it time for power management state change? + */ + if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) { + KDBG_RELEASE(DECR_PM_DEADLINE | DBG_FUNC_START); + pmCPUDeadline(pp); + KDBG_RELEASE(DECR_PM_DEADLINE | DBG_FUNC_END); + timer_processed = true; + /* + * XXX Nothing below needs an updated abstime, so omit the update. + */ + } + + /* + * Schedule the next deadline. + */ x86_lcpu()->rtcDeadline = EndOfAllTime; timer_resync_deadlines(); - if (__improbable(timer_processed == FALSE)) { + if (__improbable(!timer_processed)) { spurious_timers++; } } @@ -170,18 +191,6 @@ timer_set_deadline(uint64_t deadline) splx(s); } -void -quantum_timer_set_deadline(uint64_t deadline) -{ - cpu_data_t *pp; - /* We should've only come into this path with interrupts disabled */ - assert(ml_get_interrupts_enabled() == FALSE); - - pp = current_cpu_datap(); - pp->quantum_timer_deadline = deadline; - timer_resync_deadlines(); -} - /* * Re-evaluate the outstanding deadlines and select the most proximate. * @@ -192,7 +201,6 @@ timer_resync_deadlines(void) { uint64_t deadline = EndOfAllTime; uint64_t pmdeadline; - uint64_t quantum_deadline; rtclock_timer_t *mytimer; spl_t s = splclock(); cpu_data_t *pp; @@ -221,14 +229,11 @@ timer_resync_deadlines(void) deadline = pmdeadline; } - /* If we have the quantum timer setup, check that */ - quantum_deadline = pp->quantum_timer_deadline; - if ((quantum_deadline > 0) && - (quantum_deadline < deadline)) { - deadline = quantum_deadline; + uint64_t run_deadline = running_timers_deadline(pp->cpu_processor); + if (run_deadline < deadline) { + deadline = run_deadline; } - /* * Go and set the "pop" event. */ @@ -237,7 +242,7 @@ timer_resync_deadlines(void) /* Record non-PM deadline for latency tool */ if (decr != 0 && deadline != pmdeadline) { uint64_t queue_count = 0; - if (deadline != quantum_deadline) { + if (deadline != run_deadline) { /* * For non-quantum timer put the queue count * in the tracepoint. diff --git a/osfmk/i386/i386_vm_init.c b/osfmk/i386/i386_vm_init.c index a4edd4259..beba092e1 100644 --- a/osfmk/i386/i386_vm_init.c +++ b/osfmk/i386/i386_vm_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003-2012 Apple Inc. All rights reserved. + * Copyright (c) 2003-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -76,6 +76,7 @@ #include #include #include +#include #include #include #include @@ -84,11 +85,14 @@ #include #include +#define P2ROUNDUP(x, align) (-(-(x) & -(align))) vm_size_t mem_size = 0; pmap_paddr_t first_avail = 0;/* first after page tables */ -uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */ +uint64_t max_mem; /* Size of physical memory minus carveouts (bytes), adjusted by maxmem */ +uint64_t max_mem_actual; /* Actual size of physical memory (bytes) adjusted by + * the maxmem boot-arg */ uint64_t mem_actual; uint64_t sane_size = 0; /* Memory size for defaults calculations */ @@ -356,7 +360,17 @@ i386_vm_init(uint64_t maxmem, segSizeConst = segCONST->vmsize; econst = sconst + segSizeConst; - assert(((sconst | econst) & PAGE_MASK) == 0); + kc_format_t kc_format = KCFormatUnknown; + + /* XXX: FIXME_IN_dyld: For new-style kernel caches, the ending address of __DATA_CONST may not be page-aligned */ + if (PE_get_primary_kc_format(&kc_format) && kc_format == KCFormatFileset) { + /* Round up the end */ + econst = P2ROUNDUP(econst, PAGE_SIZE); + edata = P2ROUNDUP(edata, PAGE_SIZE); + } else { + assert(((sconst | econst) & PAGE_MASK) == 0); + assert(((sdata | edata) & PAGE_MASK) == 0); + } DPRINTF("segTEXTB = %p\n", (void *) segTEXTB); DPRINTF("segDATAB = %p\n", (void *) segDATAB); @@ -384,8 +398,38 @@ i386_vm_init(uint64_t maxmem, vm_prelink_einfo = segPRELINKINFOB + segSizePRELINKINFO; vm_slinkedit = segLINKB; vm_elinkedit = segLINKB + segSizeLINK; + + /* + * In the fileset world, we want to be able to (un)slide addresses from + * the kernel or any of the kexts (e.g., for kernel logging metadata + * passed between the kernel and logd in userspace). VM_KERNEL_UNSLIDE + * (via VM_KERNEL_IS_SLID) should apply to the addresses in the range + * from the first basement address to the last boot kc address. + * + * ^ + * : + * | + * vm_kernel_slid_top - --------------------------------------------- + * | + * : + * : Boot kc (kexts in the boot kc here) + * : - - - - - - - - - - - - - - - - - - - - - - - + * : + * : + * | Boot kc (kernel here) + * - --------------------------------------------- + * | + * : + * | Basement (kexts in pageable and aux kcs here) + * vm_kernel_slid_base - --------------------------------------------- + * 0 + */ + vm_kernel_slid_base = vm_kext_base + vm_kernel_slide; - vm_kernel_slid_top = vm_prelink_einfo; + vm_kernel_slid_top = (kc_format == KCFormatFileset) ? + vm_slinkedit : vm_prelink_einfo; + + vm_page_kernelcache_count = (unsigned int) (atop_64(vm_kernel_top - vm_kernel_base)); vm_set_page_size(); @@ -676,7 +720,7 @@ i386_vm_init(uint64_t maxmem, sane_size = mem_actual; /* - * We cap at KERNEL_MAXMEM bytes (currently 32GB for K32, 96GB for K64). + * We cap at KERNEL_MAXMEM bytes (currently 1536GB). * Unless overriden by the maxmem= boot-arg * -- which is a non-zero maxmem argument to this function. */ @@ -738,6 +782,7 @@ i386_vm_init(uint64_t maxmem, mem_size = (vm_size_t)sane_size; } max_mem = sane_size; + max_mem_actual = sane_size; kprintf("Physical memory %llu MB\n", sane_size / MB); diff --git a/osfmk/i386/lapic.h b/osfmk/i386/lapic.h index 9f06f52bc..43bd2db87 100644 --- a/osfmk/i386/lapic.h +++ b/osfmk/i386/lapic.h @@ -247,9 +247,11 @@ typedef uint32_t lapic_timer_count_t; (LAPIC_READ_OFFSET(ISR_BASE,(base+LAPIC_##src##_INTERRUPT)/32) \ & (1 <<((base + LAPIC_##src##_INTERRUPT)%32))) +extern uint32_t lapic_safe_apicid(void); +extern void lapic_init_slave(void); extern void lapic_init(void); -extern void lapic_configure(void); -extern void lapic_shutdown(void); +extern void lapic_configure(bool for_wake); +extern void lapic_shutdown(bool for_sleep); extern void lapic_smm_restore(void); extern boolean_t lapic_probe(void); extern void lapic_dump(void); diff --git a/osfmk/i386/lapic_native.c b/osfmk/i386/lapic_native.c index bf5e61a3c..82acb30f6 100644 --- a/osfmk/i386/lapic_native.c +++ b/osfmk/i386/lapic_native.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -84,6 +84,14 @@ static unsigned lapic_error_count_threshold = 5; static boolean_t lapic_dont_panic = FALSE; int lapic_max_interrupt_cpunum = 0; +typedef enum { + APIC_MODE_UNKNOWN = 0, + APIC_MODE_XAPIC = 1, + APIC_MODE_X2APIC = 2 +} apic_mode_t; + +static apic_mode_t apic_mode_before_sleep = APIC_MODE_UNKNOWN; + #ifdef MP_DEBUG void lapic_cpu_map_dump(void) @@ -108,13 +116,12 @@ lapic_cpu_map_dump(void) #endif /* MP_DEBUG */ static void -legacy_init(void) +map_local_apic(void) { + vm_map_offset_t lapic_vbase64; int result; kern_return_t kr; vm_map_entry_t entry; - vm_map_offset_t lapic_vbase64; - /* Establish a map to the local apic */ if (lapic_vbase == 0) { lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map); @@ -150,7 +157,23 @@ legacy_init(void) assert(kr == KERN_SUCCESS); } +} + +static void +legacy_init(void) +{ + uint32_t lo, hi; + rdmsr(MSR_IA32_APIC_BASE, lo, hi); + if ((lo & MSR_IA32_APIC_BASE_EXTENDED) != 0) { + /* + * If we're already in x2APIC mode, we MUST disable the local APIC + * before transitioning back to legacy APIC mode. + */ + lo &= ~(MSR_IA32_APIC_BASE_ENABLE | MSR_IA32_APIC_BASE_EXTENDED); + wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo); + wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo | MSR_IA32_APIC_BASE_ENABLE); + } /* * Set flat delivery model, logical processor id * This should already be the default set. @@ -193,7 +216,7 @@ static lapic_ops_table_t legacy_ops = { legacy_write_icr }; -static boolean_t is_x2apic = FALSE; +boolean_t is_x2apic = FALSE; static void x2apic_init(void) @@ -245,6 +268,99 @@ static lapic_ops_table_t x2apic_ops = { x2apic_write_icr }; +/* + * Used by APs to determine their APIC IDs; assumes master CPU has initialized + * the local APIC interfaces. + */ +uint32_t +lapic_safe_apicid(void) +{ + uint32_t lo; + uint32_t hi; + boolean_t is_lapic_enabled, is_local_x2apic; + + rdmsr(MSR_IA32_APIC_BASE, lo, hi); + is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0; + is_local_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0; + + if (is_lapic_enabled && is_local_x2apic) { + return x2apic_read(ID); + } else if (is_lapic_enabled) { + return (*LAPIC_MMIO(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK; + } else { + panic("Unknown Local APIC state!"); + /*NORETURN*/ + } +} + +static void +lapic_reinit(bool for_wake) +{ + uint32_t lo; + uint32_t hi; + boolean_t is_boot_processor; + boolean_t is_lapic_enabled; + boolean_t is_local_x2apic; + + rdmsr(MSR_IA32_APIC_BASE, lo, hi); + is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0; + is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0; + is_local_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0; + + /* + * If we're configured for x2apic mode and we're being asked to transition + * to legacy APIC mode, OR if we're in legacy APIC mode and we're being + * asked to transition to x2apic mode, call LAPIC_INIT(). + */ + if ((!is_local_x2apic && is_x2apic) || (is_local_x2apic && !is_x2apic)) { + LAPIC_INIT(); + /* Now re-read after LAPIC_INIT() */ + rdmsr(MSR_IA32_APIC_BASE, lo, hi); + is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0; + is_local_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0; + } + + if ((!is_lapic_enabled && !is_local_x2apic)) { + panic("Unexpected local APIC state\n"); + } + + /* + * If we did not select the same APIC mode as we had before sleep, flag + * that as an error (and panic on debug/development kernels). Note that + * we might get here with for_wake == true for the first boot case. In + * that case, apic_mode_before_sleep will be UNKNOWN (since we haven't + * slept yet), so we do not need to do any APIC checks. + */ + if (for_wake && + ((apic_mode_before_sleep == APIC_MODE_XAPIC && !is_lapic_enabled) || + (apic_mode_before_sleep == APIC_MODE_X2APIC && !is_local_x2apic))) { + kprintf("Inconsistent APIC state after wake (was %d before sleep, " + "now is %d)", apic_mode_before_sleep, + is_lapic_enabled ? APIC_MODE_XAPIC : APIC_MODE_X2APIC); +#if DEBUG || DEVELOPMENT + kprintf("HALTING.\n"); + /* + * Unfortunately, we cannot safely panic here because the + * executing CPU might not be fully initialized. The best + * we can do is just print a message to the console and + * halt. + */ + asm volatile ("cli; hlt;" ::: "memory"); +#endif + } +} + +void +lapic_init_slave(void) +{ + lapic_reinit(false); +#if DEBUG || DEVELOPMENT + if (rdmsr64(MSR_IA32_APIC_BASE) & MSR_IA32_APIC_BASE_BSP) { + panic("Calling lapic_init_slave() on the boot processor\n"); + } +#endif +} + void lapic_init(void) { @@ -258,7 +374,7 @@ lapic_init(void) is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0; is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0; is_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0; - lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE); + lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE); kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase, is_lapic_enabled ? "enabled" : "disabled", is_x2apic ? "extended" : "legacy", @@ -272,13 +388,30 @@ lapic_init(void) * Unless overriden by boot-arg. */ if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) { - PE_parse_boot_argn("-x2apic", &is_x2apic, sizeof(is_x2apic)); + /* + * If no x2apic boot-arg was set and if we're running under a VMM, + * autoenable x2APIC mode. + */ + if (PE_parse_boot_argn("x2apic", &is_x2apic, sizeof(is_x2apic)) == FALSE && + cpuid_vmm_info()->cpuid_vmm_family != CPUID_VMM_FAMILY_NONE) { + is_x2apic = TRUE; + } kprintf("x2APIC supported %s be enabled\n", is_x2apic ? "and will" : "but will not"); } lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops; + if (lapic_pbase != 0) { + /* + * APs might need to consult the local APIC via the MMIO interface + * to get their APIC IDs. + */ + map_local_apic(); + } else if (!is_x2apic) { + panic("Local APIC physical address was not set."); + } + LAPIC_INIT(); kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID), LAPIC_READ(LDR)); @@ -289,7 +422,7 @@ lapic_init(void) /* Set up the lapic_id <-> cpu_number map and add this boot processor */ lapic_cpu_map_init(); - lapic_cpu_map((LAPIC_READ(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK, 0); + lapic_cpu_map(lapic_safe_apicid(), 0); current_cpu_datap()->cpu_phys_number = cpu_to_lapic[0]; kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]); } @@ -348,7 +481,7 @@ lapic_dump(void) (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High" kprintf("LAPIC %d at %p version 0x%x\n", - (LAPIC_READ(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK, + lapic_safe_apicid(), (void *) lapic_vbase, LAPIC_READ(VERSION) & LAPIC_VERSION_MASK); kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n", @@ -473,12 +606,16 @@ lapic_probe(void) } void -lapic_shutdown(void) +lapic_shutdown(bool for_sleep) { uint32_t lo; uint32_t hi; uint32_t value; + if (for_sleep == true) { + apic_mode_before_sleep = (is_x2apic ? APIC_MODE_X2APIC : APIC_MODE_XAPIC); + } + /* Shutdown if local APIC was enabled by OS */ if (lapic_os_enabled == FALSE) { return; @@ -521,7 +658,7 @@ cpu_can_exit(int cpu) } void -lapic_configure(void) +lapic_configure(bool for_wake) { int value; @@ -538,6 +675,12 @@ lapic_configure(void) } } + /* + * Reinitialize the APIC (handles the case where we're configured to use the X2APIC + * but firmware configured the Legacy APIC): + */ + lapic_reinit(for_wake); + /* Accept all */ LAPIC_WRITE(TPR, 0); @@ -895,9 +1038,12 @@ lapic_send_ipi(int cpu, int vector) state = ml_set_interrupts_enabled(FALSE); - /* Wait for pending outgoing send to complete */ - while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) { - cpu_pause(); + /* X2APIC's ICR doesn't have a pending bit. */ + if (!is_x2apic) { + /* Wait for pending outgoing send to complete */ + while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) { + cpu_pause(); + } } LAPIC_WRITE_ICR(cpu_to_lapic[cpu], vector | LAPIC_ICR_DM_FIXED); diff --git a/osfmk/i386/locks.h b/osfmk/i386/locks.h index 760a167cf..736f7a4df 100644 --- a/osfmk/i386/locks.h +++ b/osfmk/i386/locks.h @@ -37,11 +37,6 @@ #include -extern unsigned int LcksOpts; -#if DEVELOPMENT || DEBUG -extern unsigned int LckDisablePreemptCheck; -#endif - #define enaLkDeb 0x00000001 /* Request debug in default attribute */ #define enaLkStat 0x00000002 /* Request statistic in default attribute */ #define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */ @@ -269,8 +264,8 @@ static_assert(sizeof(lck_rw_t) == LCK_RW_T_SIZE); #if LOCK_PRIVATE -#define disable_preemption_for_thread(t) ((cpu_data_t GS_RELATIVE *)0UL)->cpu_preemption_level++ -#define preemption_disabled_for_thread(t) (((cpu_data_t GS_RELATIVE *)0UL)->cpu_preemption_level > 0) +#define disable_preemption_for_thread(t) disable_preemption_internal() +#define preemption_disabled_for_thread(t) (get_preemption_level() > 0) #define LCK_MTX_THREAD_TO_STATE(t) ((uintptr_t)t) #define PLATFORM_LCK_ILOCK 0 diff --git a/osfmk/i386/locks_i386.c b/osfmk/i386/locks_i386.c index 25b329345..98d11ec50 100644 --- a/osfmk/i386/locks_i386.c +++ b/osfmk/i386/locks_i386.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -67,7 +67,7 @@ #include #include -#include +#include #include #include #include @@ -112,12 +112,6 @@ #define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG) -unsigned int LcksOpts = 0; - -#if DEVELOPMENT || DEBUG -unsigned int LckDisablePreemptCheck = 0; -#endif - /* Forwards */ #if USLOCK_DEBUG @@ -155,6 +149,18 @@ typedef void *pc_t; #endif /* lint */ #endif /* USLOCK_DEBUG */ +ZONE_VIEW_DEFINE(ZV_LCK_SPIN, "lck_spin", + KHEAP_ID_DEFAULT, sizeof(lck_spin_t)); + +ZONE_VIEW_DEFINE(ZV_LCK_MTX, "lck_mtx", + KHEAP_ID_DEFAULT, sizeof(lck_mtx_t)); + +ZONE_VIEW_DEFINE(ZV_LCK_MTX_EXT, "lck_mtx_ext", + KHEAP_ID_DEFAULT, sizeof(lck_mtx_ext_t)); + +ZONE_VIEW_DEFINE(ZV_LCK_RW, "lck_rw", + KHEAP_ID_DEFAULT, sizeof(lck_rw_t)); + /* * atomic exchange API is a low level abstraction of the operations * to atomically read, modify, and write a pointer. This abstraction works @@ -262,12 +268,10 @@ lck_spin_alloc_init( lck_grp_t *grp, lck_attr_t *attr) { - lck_spin_t *lck; - - if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0) { - lck_spin_init(lck, grp, attr); - } + lck_spin_t *lck; + lck = zalloc(ZV_LCK_SPIN); + lck_spin_init(lck, grp, attr); return lck; } @@ -280,7 +284,7 @@ lck_spin_free( lck_grp_t *grp) { lck_spin_destroy(lck, grp); - kfree(lck, sizeof(lck_spin_t)); + zfree(ZV_LCK_SPIN, lck); } /* @@ -612,7 +616,10 @@ void (usimple_lock_try_lock_loop)(usimple_lock_t l LCK_GRP_ARG(lck_grp_t *grp)) { - usimple_lock_try_lock_mp_signal_safe_loop_deadline(l, ULLONG_MAX, grp); + /* When the lock is not contended, grab the lock and go. */ + if (!simple_lock_try(l, grp)) { + usimple_lock_try_lock_mp_signal_safe_loop_deadline(l, ULLONG_MAX, grp); + } } unsigned @@ -622,9 +629,16 @@ int LCK_GRP_ARG(lck_grp_t *grp)) { uint64_t deadline; - uint64_t base_at = mach_absolute_time(); + uint64_t base_at; uint64_t duration_at; + /* Fast track for uncontended locks */ + if (simple_lock_try(l, grp)) { + return 1; + } + + base_at = mach_absolute_time(); + nanoseconds_to_absolutetime(duration, &duration_at); deadline = base_at + duration_at; if (deadline < base_at) { @@ -742,7 +756,7 @@ usld_lock_post( usimple_lock_t l, pc_t pc) { - int mycpu; + unsigned int mycpu; char caller[] = "successful usimple_lock"; @@ -759,11 +773,13 @@ usld_lock_post( caller, l); } - mycpu = cpu_number(); + mycpu = (unsigned int)cpu_number(); + assert(mycpu <= UCHAR_MAX); + l->debug.lock_thread = (void *)current_thread(); l->debug.state |= USLOCK_TAKEN; l->debug.lock_pc = pc; - l->debug.lock_cpu = mycpu; + l->debug.lock_cpu = (unsigned char)mycpu; } @@ -780,7 +796,7 @@ usld_unlock( usimple_lock_t l, pc_t pc) { - int mycpu; + unsigned int mycpu; char caller[] = "usimple_unlock"; @@ -789,6 +805,7 @@ usld_unlock( } mycpu = cpu_number(); + assert(mycpu <= UCHAR_MAX); if (!(l->debug.state & USLOCK_TAKEN)) { panic("%s: lock 0x%p hasn't been taken", @@ -809,7 +826,7 @@ usld_unlock( l->debug.lock_thread = INVALID_PC; l->debug.state &= ~USLOCK_TAKEN; l->debug.unlock_pc = pc; - l->debug.unlock_cpu = mycpu; + l->debug.unlock_cpu = (unsigned char)mycpu; } @@ -845,7 +862,7 @@ usld_lock_try_post( usimple_lock_t l, pc_t pc) { - int mycpu; + unsigned int mycpu; char caller[] = "successful usimple_lock_try"; if (!usld_lock_common_checks(l, caller)) { @@ -862,10 +879,12 @@ usld_lock_try_post( } mycpu = cpu_number(); + assert(mycpu <= UCHAR_MAX); + l->debug.lock_thread = (void *) current_thread(); l->debug.state |= USLOCK_TAKEN; l->debug.lock_pc = pc; - l->debug.lock_cpu = mycpu; + l->debug.lock_cpu = (unsigned char)mycpu; } #endif /* USLOCK_DEBUG */ @@ -877,13 +896,10 @@ lck_rw_alloc_init( lck_grp_t *grp, lck_attr_t *attr) { - lck_rw_t *lck; - - if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0) { - bzero(lck, sizeof(lck_rw_t)); - lck_rw_init(lck, grp, attr); - } + lck_rw_t *lck; + lck = zalloc_flags(ZV_LCK_RW, Z_WAITOK | Z_ZERO); + lck_rw_init(lck, grp, attr); return lck; } @@ -896,7 +912,7 @@ lck_rw_free( lck_grp_t *grp) { lck_rw_destroy(lck, grp); - kfree(lck, sizeof(lck_rw_t)); + zfree(ZV_LCK_RW, lck); } /* @@ -1623,6 +1639,29 @@ lck_rw_lock_shared_gen( #endif } +#define LCK_RW_LOCK_EXCLUSIVE_TAS(lck) (atomic_test_and_set32(&(lck)->data, \ + (LCK_RW_SHARED_MASK | LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE | LCK_RW_INTERLOCK), \ + LCK_RW_WANT_EXCL, memory_order_acquire_smp, FALSE)) + +/* + * Routine: lck_rw_lock_exclusive_check_contended + */ + +bool +lck_rw_lock_exclusive_check_contended(lck_rw_t *lock) +{ + bool contended = false; + current_thread()->rwlock_count++; + if (LCK_RW_LOCK_EXCLUSIVE_TAS(lock)) { +#if CONFIG_DTRACE + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, lock, DTRACE_RW_EXCL); +#endif /* CONFIG_DTRACE */ + } else { + contended = true; + lck_rw_lock_exclusive_gen(lock); + } + return contended; +} /* * Routine: lck_rw_lock_exclusive @@ -1632,9 +1671,7 @@ void lck_rw_lock_exclusive(lck_rw_t *lock) { current_thread()->rwlock_count++; - if (atomic_test_and_set32(&lock->data, - (LCK_RW_SHARED_MASK | LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE | LCK_RW_INTERLOCK), - LCK_RW_WANT_EXCL, memory_order_acquire_smp, FALSE)) { + if (LCK_RW_LOCK_EXCLUSIVE_TAS(lock)) { #if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, lock, DTRACE_RW_EXCL); #endif /* CONFIG_DTRACE */ @@ -2138,10 +2175,6 @@ kdp_lck_rw_lock_is_acquired_exclusive(lck_rw_t *lck) * on acquire. */ -#ifdef MUTEX_ZONE -extern zone_t lck_mtx_zone; -#endif - /* * Routine: lck_mtx_alloc_init */ @@ -2150,16 +2183,10 @@ lck_mtx_alloc_init( lck_grp_t *grp, lck_attr_t *attr) { - lck_mtx_t *lck; -#ifdef MUTEX_ZONE - if ((lck = (lck_mtx_t *)zalloc(lck_mtx_zone)) != 0) { - lck_mtx_init(lck, grp, attr); - } -#else - if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0) { - lck_mtx_init(lck, grp, attr); - } -#endif + lck_mtx_t *lck; + + lck = zalloc(ZV_LCK_MTX); + lck_mtx_init(lck, grp, attr); return lck; } @@ -2172,11 +2199,7 @@ lck_mtx_free( lck_grp_t *grp) { lck_mtx_destroy(lck, grp); -#ifdef MUTEX_ZONE - zfree(lck_mtx_zone, lck); -#else - kfree(lck, sizeof(lck_mtx_t)); -#endif + zfree(ZV_LCK_MTX, lck); } /* @@ -2224,11 +2247,10 @@ lck_mtx_init( } if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) { - if ((lck_ext = (lck_mtx_ext_t *)kalloc(sizeof(lck_mtx_ext_t))) != 0) { - lck_mtx_ext_init(lck_ext, grp, lck_attr); - lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT; - lck->lck_mtx_ptr = lck_ext; - } + lck_ext = zalloc(ZV_LCK_MTX_EXT); + lck_mtx_ext_init(lck_ext, grp, lck_attr); + lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT; + lck->lck_mtx_ptr = lck_ext; } else { lck->lck_mtx_owner = 0; lck->lck_mtx_state = 0; @@ -2312,7 +2334,7 @@ lck_mtx_destroy( lck_mtx_lock_mark_destroyed(lck, indirect); if (indirect) { - kfree(lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t)); + zfree(ZV_LCK_MTX_EXT, lck->lck_mtx_ptr); } lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX); lck_grp_deallocate(grp); @@ -2684,7 +2706,8 @@ try_again: lck_grp_mtx_update_direct_wait((struct _lck_mtx_ext_*)lock); } - /* just fall through case LCK_MTX_SPINWAIT_SPUN */ + /* just fall through case LCK_MTX_SPINWAIT_SPUN */ + OS_FALLTHROUGH; case LCK_MTX_SPINWAIT_SPUN_HIGH_THR: case LCK_MTX_SPINWAIT_SPUN_OWNER_NOT_CORE: case LCK_MTX_SPINWAIT_SPUN_NO_WINDOW_CONTENTION: diff --git a/osfmk/i386/locks_i386_opt.c b/osfmk/i386/locks_i386_opt.c index 60fceb2a4..14968bc1b 100644 --- a/osfmk/i386/locks_i386_opt.c +++ b/osfmk/i386/locks_i386_opt.c @@ -31,7 +31,6 @@ #include #include -#include #include #include #include @@ -71,6 +70,7 @@ */ #if DEVELOPMENT || DEBUG +TUNABLE(bool, LckDisablePreemptCheck, "-disable_mtx_chk", false); /* * If one or more simplelocks are currently held by a thread, diff --git a/osfmk/i386/machine_check.c b/osfmk/i386/machine_check.c index fd565d785..7041c4439 100644 --- a/osfmk/i386/machine_check.c +++ b/osfmk/i386/machine_check.c @@ -26,7 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#include +#include #include #include #include @@ -183,12 +183,11 @@ mca_cpu_alloc(cpu_data_t *cdp) */ mca_state_size = sizeof(mca_state_t) + sizeof(mca_mci_bank_t) * mca_error_bank_count; - cdp->cpu_mca_state = kalloc(mca_state_size); + cdp->cpu_mca_state = zalloc_permanent(mca_state_size, ZALIGN_PTR); if (cdp->cpu_mca_state == NULL) { printf("mca_cpu_alloc() failed for cpu %d\n", cdp->cpu_number); return; } - bzero((void *) cdp->cpu_mca_state, mca_state_size); /* * If the boot processor is yet have its allocation made, diff --git a/osfmk/i386/machine_routines.c b/osfmk/i386/machine_routines.c index e27b01b22..6357bdb8d 100644 --- a/osfmk/i386/machine_routines.c +++ b/osfmk/i386/machine_routines.c @@ -40,6 +40,7 @@ #include #include +#include #include #include #include @@ -54,6 +55,7 @@ #include #include #include +#include #if KPC #include #endif @@ -71,8 +73,6 @@ extern void wakeup(void *); -static int max_cpus_initialized = 0; - uint64_t LockTimeOut; uint64_t TLBTimeOut; uint64_t LockTimeOutTSC; @@ -92,7 +92,9 @@ uint32_t ml_timer_eager_evaluations; uint64_t ml_timer_eager_evaluation_max; static boolean_t ml_timer_evaluation_in_progress = FALSE; - +LCK_GRP_DECLARE(max_cpus_grp, "max_cpus"); +LCK_MTX_DECLARE(max_cpus_lock, &max_cpus_grp); +static int max_cpus_initialized = 0; #define MAX_CPUS_SET 0x1 #define MAX_CPUS_WAIT 0x2 @@ -142,6 +144,42 @@ ml_static_slide( return VM_KERNEL_SLIDE(vaddr); } +/* + * base must be page-aligned, and size must be a multiple of PAGE_SIZE + */ +kern_return_t +ml_static_verify_page_protections( + uint64_t base, uint64_t size, vm_prot_t prot) +{ + vm_prot_t pageprot; + uint64_t offset; + + DBG("ml_static_verify_page_protections: vaddr 0x%llx sz 0x%llx prot 0x%x\n", base, size, prot); + + /* + * base must be within the static bounds, defined to be: + * (vm_kernel_stext, kc_highest_nonlinkedit_vmaddr) + */ +#if DEVELOPMENT || DEBUG || KASAN + assert(kc_highest_nonlinkedit_vmaddr > 0 && base > vm_kernel_stext && base < kc_highest_nonlinkedit_vmaddr); +#else /* On release kernels, assume this is a protection mismatch failure. */ + if (kc_highest_nonlinkedit_vmaddr == 0 || base < vm_kernel_stext || base >= kc_highest_nonlinkedit_vmaddr) { + return KERN_FAILURE; + } +#endif + + for (offset = 0; offset < size; offset += PAGE_SIZE) { + if (pmap_get_prot(kernel_pmap, base + offset, &pageprot) == KERN_FAILURE) { + return KERN_FAILURE; + } + if ((pageprot & prot) != prot) { + return KERN_FAILURE; + } + } + + return KERN_SUCCESS; +} + vm_offset_t ml_static_unslide( vm_offset_t vaddr) @@ -214,6 +252,18 @@ ml_static_mfree( #endif } +/* Change page protections for addresses previously loaded by efiboot */ +kern_return_t +ml_static_protect(vm_offset_t vmaddr, vm_size_t size, vm_prot_t prot) +{ + boolean_t NX = !!!(prot & VM_PROT_EXECUTE), ro = !!!(prot & VM_PROT_WRITE); + + assert(prot & VM_PROT_READ); + + pmap_mark_range(kernel_pmap, vmaddr, size, NX, ro); + + return KERN_SUCCESS; +} /* virtual to physical on wired pages */ vm_offset_t @@ -434,8 +484,6 @@ ml_install_interrupt_handler( (IOInterruptHandler) handler, refCon); (void) ml_set_interrupts_enabled(current_state); - - initialize_screen(NULL, kPEAcquireScreen); } @@ -502,34 +550,18 @@ register_cpu( if (this_cpu_datap->lcpu.core == NULL) { goto failed; } - -#if NCOPY_WINDOWS > 0 - this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu); - if (this_cpu_datap->cpu_pmap == NULL) { - goto failed; - } -#endif - - this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu); - if (this_cpu_datap->cpu_processor == NULL) { - goto failed; - } - /* - * processor_init() deferred to topology start - * because "slot numbers" a.k.a. logical processor numbers - * are not yet finalized. - */ } + /* + * processor_init() deferred to topology start + * because "slot numbers" a.k.a. logical processor numbers + * are not yet finalized. + */ *processor_out = this_cpu_datap->cpu_processor; return KERN_SUCCESS; failed: - cpu_processor_free(this_cpu_datap->cpu_processor); -#if NCOPY_WINDOWS > 0 - pmap_cpu_free(this_cpu_datap->cpu_pmap); -#endif console_cpu_free(this_cpu_datap->cpu_console_buf); #if KPC kpc_unregister_cpu(this_cpu_datap); @@ -657,12 +689,22 @@ ml_cpu_get_info(ml_cpu_info_t *cpu_infop) } } -void -ml_init_max_cpus(unsigned long max_cpus) +int +ml_early_cpu_max_number(void) { - boolean_t current_state; + int n = max_ncpus; - current_state = ml_set_interrupts_enabled(FALSE); + assert(startup_phase >= STARTUP_SUB_TUNABLES); + if (max_cpus_from_firmware) { + n = MIN(n, max_cpus_from_firmware); + } + return n - 1; +} + +void +ml_set_max_cpus(unsigned int max_cpus) +{ + lck_mtx_lock(&max_cpus_lock); if (max_cpus_initialized != MAX_CPUS_SET) { if (max_cpus > 0 && max_cpus <= MAX_CPUS) { /* @@ -674,34 +716,25 @@ ml_init_max_cpus(unsigned long max_cpus) machine_info.max_cpus = (integer_t)MIN(max_cpus, max_ncpus); } if (max_cpus_initialized == MAX_CPUS_WAIT) { - wakeup((event_t)&max_cpus_initialized); + thread_wakeup((event_t) &max_cpus_initialized); } max_cpus_initialized = MAX_CPUS_SET; } - (void) ml_set_interrupts_enabled(current_state); + lck_mtx_unlock(&max_cpus_lock); } -int -ml_get_max_cpus(void) +unsigned int +ml_wait_max_cpus(void) { - boolean_t current_state; - - current_state = ml_set_interrupts_enabled(FALSE); - if (max_cpus_initialized != MAX_CPUS_SET) { + lck_mtx_lock(&max_cpus_lock); + while (max_cpus_initialized != MAX_CPUS_SET) { max_cpus_initialized = MAX_CPUS_WAIT; - assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT); - (void)thread_block(THREAD_CONTINUE_NULL); + lck_mtx_sleep(&max_cpus_lock, LCK_SLEEP_DEFAULT, &max_cpus_initialized, THREAD_UNINT); } - (void) ml_set_interrupts_enabled(current_state); + lck_mtx_unlock(&max_cpus_lock); return machine_info.max_cpus; } -boolean_t -ml_wants_panic_trap_to_debugger(void) -{ - return FALSE; -} - void ml_panic_trap_to_debugger(__unused const char *panic_format_str, __unused va_list *panic_args, @@ -713,6 +746,76 @@ ml_panic_trap_to_debugger(__unused const char *panic_format_str, return; } +static uint64_t +virtual_timeout_inflate64(unsigned int vti, uint64_t timeout, uint64_t max_timeout) +{ + if (vti >= 64) { + return max_timeout; + } + + if ((timeout << vti) >> vti != timeout) { + return max_timeout; + } + + if ((timeout << vti) > max_timeout) { + return max_timeout; + } + + return timeout << vti; +} + +static uint32_t +virtual_timeout_inflate32(unsigned int vti, uint32_t timeout, uint32_t max_timeout) +{ + if (vti >= 32) { + return max_timeout; + } + + if ((timeout << vti) >> vti != timeout) { + return max_timeout; + } + + return timeout << vti; +} + +/* + * Some timeouts are later adjusted or used in calculations setting + * other values. In order to avoid overflow, cap the max timeout as + * 2^47ns (~39 hours). + */ +static const uint64_t max_timeout_ns = 1ULL << 47; + +/* + * Inflate a timeout in absolutetime. + */ +static uint64_t +virtual_timeout_inflate_abs(unsigned int vti, uint64_t timeout) +{ + uint64_t max_timeout; + nanoseconds_to_absolutetime(max_timeout_ns, &max_timeout); + return virtual_timeout_inflate64(vti, timeout, max_timeout); +} + +/* + * Inflate a value in TSC ticks. + */ +static uint64_t +virtual_timeout_inflate_tsc(unsigned int vti, uint64_t timeout) +{ + const uint64_t max_timeout = tmrCvt(max_timeout_ns, tscFCvtn2t); + return virtual_timeout_inflate64(vti, timeout, max_timeout); +} + +/* + * Inflate a timeout in microseconds. + */ +static uint32_t +virtual_timeout_inflate_us(unsigned int vti, uint64_t timeout) +{ + const uint32_t max_timeout = ~0; + return virtual_timeout_inflate32(vti, timeout, max_timeout); +} + /* * Routine: ml_init_lock_timeout * Function: @@ -804,35 +907,39 @@ ml_init_lock_timeout(void) virtualized = ((cpuid_features() & CPUID_FEATURE_VMM) != 0); if (virtualized) { - int vti; + unsigned int vti; if (!PE_parse_boot_argn("vti", &vti, sizeof(vti))) { vti = 6; } printf("Timeouts adjusted for virtualization (<<%d)\n", vti); kprintf("Timeouts adjusted for virtualization (<<%d):\n", vti); -#define VIRTUAL_TIMEOUT_INFLATE64(_timeout) \ -MACRO_BEGIN \ - kprintf("%24s: 0x%016llx ", #_timeout, _timeout); \ - _timeout <<= vti; \ - kprintf("-> 0x%016llx\n", _timeout); \ +#define VIRTUAL_TIMEOUT_INFLATE_ABS(_timeout) \ +MACRO_BEGIN \ + kprintf("%24s: 0x%016llx ", #_timeout, _timeout); \ + _timeout = virtual_timeout_inflate_abs(vti, _timeout); \ + kprintf("-> 0x%016llx\n", _timeout); \ +MACRO_END + +#define VIRTUAL_TIMEOUT_INFLATE_TSC(_timeout) \ +MACRO_BEGIN \ + kprintf("%24s: 0x%016llx ", #_timeout, _timeout); \ + _timeout = virtual_timeout_inflate_tsc(vti, _timeout); \ + kprintf("-> 0x%016llx\n", _timeout); \ MACRO_END -#define VIRTUAL_TIMEOUT_INFLATE32(_timeout) \ -MACRO_BEGIN \ - kprintf("%24s: 0x%08x ", #_timeout, _timeout); \ - if ((_timeout <> vti == _timeout) \ - _timeout <<= vti; \ - else \ - _timeout = ~0; /* cap rather than overflow */ \ - kprintf("-> 0x%08x\n", _timeout); \ +#define VIRTUAL_TIMEOUT_INFLATE_US(_timeout) \ +MACRO_BEGIN \ + kprintf("%24s: 0x%08x ", #_timeout, _timeout); \ + _timeout = virtual_timeout_inflate_us(vti, _timeout); \ + kprintf("-> 0x%08x\n", _timeout); \ MACRO_END - VIRTUAL_TIMEOUT_INFLATE32(LockTimeOutUsec); - VIRTUAL_TIMEOUT_INFLATE64(LockTimeOut); - VIRTUAL_TIMEOUT_INFLATE64(LockTimeOutTSC); - VIRTUAL_TIMEOUT_INFLATE64(TLBTimeOut); - VIRTUAL_TIMEOUT_INFLATE64(MutexSpin); - VIRTUAL_TIMEOUT_INFLATE64(low_MutexSpin); - VIRTUAL_TIMEOUT_INFLATE64(reportphyreaddelayabs); + VIRTUAL_TIMEOUT_INFLATE_US(LockTimeOutUsec); + VIRTUAL_TIMEOUT_INFLATE_ABS(LockTimeOut); + VIRTUAL_TIMEOUT_INFLATE_TSC(LockTimeOutTSC); + VIRTUAL_TIMEOUT_INFLATE_ABS(TLBTimeOut); + VIRTUAL_TIMEOUT_INFLATE_ABS(MutexSpin); + VIRTUAL_TIMEOUT_INFLATE_ABS(low_MutexSpin); + VIRTUAL_TIMEOUT_INFLATE_ABS(reportphyreaddelayabs); } interrupt_latency_tracker_setup(); @@ -856,7 +963,7 @@ ml_delay_should_spin(uint64_t interval) return (interval < delay_spin_threshold) ? TRUE : FALSE; } -uint32_t yield_delay_us = 0; +TUNABLE(uint32_t, yield_delay_us, "yield_delay_us", 0); void ml_delay_on_yield(void) @@ -1071,11 +1178,11 @@ ml_entropy_collect(void) assert(cpu_number() == master_cpu); /* update buffer pointer cyclically */ - ep = EntropyData.buffer + (EntropyData.sample_count & ENTROPY_BUFFER_INDEX_MASK); + ep = EntropyData.buffer + (EntropyData.sample_count & EntropyData.buffer_index_mask); EntropyData.sample_count += 1; rdtsc_nofence(tsc_lo, tsc_hi); - *ep = ror32(*ep, 9) ^ tsc_lo; + *ep = (ror32(*ep, 9) & EntropyData.ror_mask) ^ tsc_lo; } uint64_t @@ -1121,14 +1228,12 @@ static boolean_t ml_quiescing; void ml_set_is_quiescing(boolean_t quiescing) { - assert(FALSE == ml_get_interrupts_enabled()); ml_quiescing = quiescing; } boolean_t ml_is_quiescing(void) { - assert(FALSE == ml_get_interrupts_enabled()); return ml_quiescing; } @@ -1137,3 +1242,50 @@ ml_get_booter_memory_size(void) { return 0; } + +void +machine_lockdown(void) +{ + x86_64_protect_data_const(); +} + +bool +ml_cpu_can_exit(__unused int cpu_id) +{ + return true; +} + +void +ml_cpu_init_state(void) +{ +} + +void +ml_cpu_begin_state_transition(__unused int cpu_id) +{ +} + +void +ml_cpu_end_state_transition(__unused int cpu_id) +{ +} + +void +ml_cpu_begin_loop(void) +{ +} + +void +ml_cpu_end_loop(void) +{ +} + +size_t +ml_get_vm_reserved_regions(bool vm_is64bit, struct vm_reserved_region **regions) +{ +#pragma unused(vm_is64bit) + assert(regions != NULL); + + *regions = NULL; + return 0; +} diff --git a/osfmk/i386/machine_routines.h b/osfmk/i386/machine_routines.h index 4f605d378..cedcdb7de 100644 --- a/osfmk/i386/machine_routines.h +++ b/osfmk/i386/machine_routines.h @@ -98,6 +98,12 @@ void ml_static_mfree( vm_offset_t, vm_size_t); +kern_return_t +ml_static_protect( + vm_offset_t start, + vm_size_t size, + vm_prot_t new_prot); + /* boot memory allocation */ vm_offset_t ml_static_malloc( vm_size_t size); @@ -105,6 +111,10 @@ vm_offset_t ml_static_malloc( vm_offset_t ml_static_slide( vm_offset_t vaddr); +kern_return_t +ml_static_verify_page_protections( + uint64_t base, uint64_t size, vm_prot_t prot); + vm_offset_t ml_static_unslide( vm_offset_t vaddr); @@ -122,9 +132,9 @@ boolean_t ml_validate_nofault( uint64_t ml_cpu_cache_size(unsigned int level); uint64_t ml_cpu_cache_sharing(unsigned int level); -/* Initialize the maximum number of CPUs */ -void ml_init_max_cpus( - unsigned long max_cpus); +/* Set the maximum number of CPUs */ +void ml_set_max_cpus( + unsigned int max_cpus); extern void ml_cpu_up(void); extern void ml_cpu_down(void); @@ -161,9 +171,6 @@ void plctrace_disable(void); /* Warm up a CPU to receive an interrupt */ kern_return_t ml_interrupt_prewarm(uint64_t deadline); -/* Check if the machine layer wants to intercept a panic call */ -boolean_t ml_wants_panic_trap_to_debugger(void); - /* Machine layer routine for intercepting panics */ void ml_panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, @@ -302,6 +309,10 @@ struct ml_cpu_info { typedef struct ml_cpu_info ml_cpu_info_t; +typedef enum { + CLUSTER_TYPE_SMP, +} cluster_type_t; + /* Get processor info */ void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info); @@ -315,8 +326,8 @@ void ml_thread_policy( #define MACHINE_NETWORK_WORKLOOP 0x00000001 #define MACHINE_NETWORK_NETISR 0x00000002 -/* Return the maximum number of CPUs set by ml_init_max_cpus() */ -int ml_get_max_cpus( +/* Return the maximum number of CPUs set by ml_set_max_cpus(), blocking if necessary */ +unsigned int ml_wait_max_cpus( void); /* @@ -368,6 +379,7 @@ __private_extern__ void ml_phys_write_data(uint64_t paddr, unsigned long long data, int size); __private_extern__ uintptr_t pmap_verify_noncacheable(uintptr_t vaddr); +void machine_lockdown(void); #endif /* MACH_KERNEL_PRIVATE */ #ifdef XNU_KERNEL_PRIVATE @@ -388,6 +400,19 @@ void ml_gpu_stat_update(uint64_t); uint64_t ml_gpu_stat(thread_t); boolean_t ml_recent_wake(void); +#ifdef MACH_KERNEL_PRIVATE +struct i386_cpu_info; +struct machine_thread; +/* LBR support */ +void i386_lbr_init(struct i386_cpu_info *info_p, bool is_master); +void i386_switch_lbrs(thread_t old, thread_t new); +int i386_lbr_native_state_to_mach_thread_state(struct machine_thread *pcb, last_branch_state_t *machlbrp); +void i386_lbr_synch(thread_t thr); +void i386_lbr_enable(void); +void i386_lbr_disable(void); +extern bool last_branch_support_enabled; +#endif + #define ALL_CORES_RECOMMENDED (~(uint64_t)0) extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores); @@ -402,5 +427,8 @@ extern uint32_t phywritepanic; extern uint64_t tracephyreaddelayabs; extern uint64_t tracephywritedelayabs; +void ml_hibernate_active_pre(void); +void ml_hibernate_active_post(void); + #endif /* XNU_KERNEL_PRIVATE */ #endif /* _I386_MACHINE_ROUTINES_H_ */ diff --git a/osfmk/i386/misc_protos.h b/osfmk/i386/misc_protos.h index c12a9aaf7..59507742b 100644 --- a/osfmk/i386/misc_protos.h +++ b/osfmk/i386/misc_protos.h @@ -47,11 +47,6 @@ extern void i386_vm_init( boolean_t, struct boot_args *); -#if NCOPY_WINDOWS > 0 -extern void cpu_userwindow_init(int); -extern void cpu_physwindow_init(int); -#endif - extern void machine_startup(void); extern void get_root_device(void); @@ -59,7 +54,6 @@ extern void picinit(void); extern void interrupt_processor( int cpu); extern void mp_probe_cpus(void); -extern void panic_io_port_read(void); extern void remote_kdb(void); extern void clear_kdb_intr(void); @@ -104,10 +98,6 @@ extern void cache_flush_page_phys(ppnum_t pa); extern void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count); extern void dcache_incoherent_io_store64(addr64_t pa, unsigned int count); - -extern processor_t cpu_processor_alloc(boolean_t is_boot_cpu); -extern void cpu_processor_free(processor_t proc); - extern void sysclk_gettime_interrupts_disabled( mach_timespec_t *cur_time); @@ -160,4 +150,8 @@ extern void pstate_trace(void); extern void mp_interrupt_watchdog(void); +extern kern_return_t i386_slide_individual_kext(kernel_mach_header_t *mh, uintptr_t slide); + +extern kern_return_t i386_slide_kext_collection_mh_addrs(kernel_mach_header_t *mh, uintptr_t slide, bool adjust_mach_headers); + #endif /* _I386_MISC_PROTOS_H_ */ diff --git a/osfmk/i386/mp.c b/osfmk/i386/mp.c index b6654cc39..ee5461366 100644 --- a/osfmk/i386/mp.c +++ b/osfmk/i386/mp.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -48,7 +48,7 @@ #include #include #include -#include +#include #include #include @@ -124,14 +124,14 @@ boolean_t mp_interrupt_watchdog_enabled = TRUE; uint32_t mp_interrupt_watchdog_events = 0; #endif -decl_simple_lock_data(, debugger_callback_lock); +SIMPLE_LOCK_DECLARE(debugger_callback_lock, 0); struct debugger_callback *debugger_callback = NULL; -decl_lck_mtx_data(static, mp_cpu_boot_lock); -lck_mtx_ext_t mp_cpu_boot_lock_ext; +static LCK_GRP_DECLARE(smp_lck_grp, "i386_smp"); +static LCK_MTX_EARLY_DECLARE(mp_cpu_boot_lock, &smp_lck_grp); /* Variables needed for MP rendezvous. */ -decl_simple_lock_data(, mp_rv_lock); +SIMPLE_LOCK_DECLARE(mp_rv_lock, 0); static void (*mp_rv_setup_func)(void *arg); static void (*mp_rv_action_func)(void *arg); static void (*mp_rv_teardown_func)(void *arg); @@ -163,8 +163,7 @@ static void (*mp_bc_action_func)(void *arg); static void *mp_bc_func_arg; static int mp_bc_ncpus; static volatile long mp_bc_count; -decl_lck_mtx_data(static, mp_bc_lock); -lck_mtx_ext_t mp_bc_lock_ext; +static LCK_MTX_EARLY_DECLARE(mp_bc_lock, &smp_lck_grp); static volatile int debugger_cpu = -1; volatile long NMIPI_acks = 0; volatile long NMI_count = 0; @@ -186,9 +185,6 @@ void i386_start_cpu(int lapic_id, int cpu_num); void i386_send_NMI(int cpu); void NMIPI_enable(boolean_t); -static lck_grp_t smp_lck_grp; -static lck_grp_attr_t smp_lck_grp_attr; - #define NUM_CPU_WARM_CALLS 20 struct timer_call cpu_warm_call_arr[NUM_CPU_WARM_CALLS]; queue_head_t cpu_warm_call_list; @@ -201,7 +197,7 @@ typedef struct cpu_warm_data { } *cpu_warm_data_t; static void cpu_prewarm_init(void); -static void cpu_warm_timer_call_func(call_entry_param_t p0, call_entry_param_t p1); +static void cpu_warm_timer_call_func(timer_call_param_t p0, timer_call_param_t p1); static void _cpu_warm_setup(void *arg); static timer_call_t grab_warm_timer_call(void); static void free_warm_timer_call(timer_call_t call); @@ -209,12 +205,6 @@ static void free_warm_timer_call(timer_call_t call); void smp_init(void) { - simple_lock_init(&mp_rv_lock, 0); - simple_lock_init(&debugger_callback_lock, 0); - lck_grp_attr_setdefault(&smp_lck_grp_attr); - lck_grp_init(&smp_lck_grp, "i386_smp", &smp_lck_grp_attr); - lck_mtx_init_ext(&mp_cpu_boot_lock, &mp_cpu_boot_lock_ext, &smp_lck_grp, LCK_ATTR_NULL); - lck_mtx_init_ext(&mp_bc_lock, &mp_bc_lock_ext, &smp_lck_grp, LCK_ATTR_NULL); console_init(); if (!i386_smp_init(LAPIC_NMI_INTERRUPT, NMIInterruptHandler, @@ -410,11 +400,18 @@ start_cpu(void *arg) tsc_delta = tsc_target - tsc_starter; kprintf("TSC sync for cpu %d: 0x%016llx delta 0x%llx (%lld)\n", psip->target_cpu, tsc_target, tsc_delta, tsc_delta); +#if DEBUG || DEVELOPMENT + /* + * Stash the delta for inspection later, since we can no + * longer print/log it with interrupts disabled. + */ + cpu_datap(psip->target_cpu)->tsc_sync_delta = tsc_delta; +#endif if (ABS(tsc_delta) > (int64_t) TSC_sync_margin) { #if DEBUG panic( #else - printf( + kprintf( #endif "Unsynchronized TSC for cpu %d: " "0x%016llx, delta 0x%llx\n", @@ -503,7 +500,7 @@ cpu_signal_handler(x86_saved_state_t *regs) int my_cpu; volatile int *my_word; - SCHED_STATS_IPI(current_processor()); + SCHED_STATS_INC(ipi_count); my_cpu = cpu_number(); my_word = &cpu_data_ptr[my_cpu]->cpu_signals; @@ -1175,7 +1172,7 @@ mp_cpus_call_cpu_init(int cpu) simple_lock_init(&cqp->lock, 0); queue_init(&cqp->queue); for (i = 0; i < MP_CPUS_CALL_BUFS_PER_CPU; i++) { - callp = (mp_call_t *) kalloc(sizeof(mp_call_t)); + callp = zalloc_permanent_type(mp_call_t); mp_call_free(callp); } @@ -1502,8 +1499,7 @@ mp_cpus_kick(cpumask_t cpus) mp_safe_spin_lock(&x86_topo_lock); for (cpu = 0; cpu < (cpu_t) real_ncpus; cpu++) { - if ((cpu == (cpu_t) cpu_number()) - || ((cpu_to_cpumask(cpu) & cpus) == 0) + if (((cpu_to_cpumask(cpu) & cpus) == 0) || !cpu_is_running(cpu)) { continue; } @@ -1945,6 +1941,12 @@ cpu_number(void) return get_cpu_number(); } +vm_offset_t +current_percpu_base(void) +{ + return get_current_percpu_base(); +} + static void cpu_prewarm_init() { @@ -1991,8 +1993,8 @@ free_warm_timer_call(timer_call_t call) */ static void cpu_warm_timer_call_func( - call_entry_param_t p0, - __unused call_entry_param_t p1) + timer_call_param_t p0, + __unused timer_call_param_t p1) { free_warm_timer_call((timer_call_t)p0); return; diff --git a/osfmk/i386/mp.h b/osfmk/i386/mp.h index 43e8085e0..1b07a8eda 100644 --- a/osfmk/i386/mp.h +++ b/osfmk/i386/mp.h @@ -92,6 +92,7 @@ __END_DECLS extern unsigned int real_ncpus; /* real number of cpus */ extern unsigned int max_ncpus; /* max number of cpus */ +extern unsigned int max_cpus_from_firmware; /* actual max cpus, from firmware (ACPI) */ decl_simple_lock_data(extern, kdb_lock); /* kdb lock */ __BEGIN_DECLS diff --git a/osfmk/i386/mp_desc.c b/osfmk/i386/mp_desc.c index ac756a7f9..f56db9d51 100644 --- a/osfmk/i386/mp_desc.c +++ b/osfmk/i386/mp_desc.c @@ -58,8 +58,8 @@ */ #include -#include #include +#include #include #include #include @@ -188,9 +188,12 @@ cpu_data_t *cpu_data_master = &scdatas[0]; cpu_data_t *cpu_data_ptr[MAX_CPUS] = {[0] = &scdatas[0] }; +SECURITY_READ_ONLY_LATE(struct percpu_base) percpu_base; + decl_simple_lock_data(, ncpus_lock); /* protects real_ncpus */ unsigned int real_ncpus = 1; unsigned int max_ncpus = MAX_CPUS; +unsigned int max_cpus_from_firmware = 0; extern void hi64_sysenter(void); extern void hi64_syscall(void); @@ -535,6 +538,36 @@ cpu_syscall_init(cpu_data_t *cdp) extern vm_offset_t dyn_dblmap(vm_offset_t, vm_offset_t); uint64_t ldt_alias_offset; +__startup_func +static void +cpu_data_startup_init(void) +{ + int flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_PERMANENT | + KMA_ZERO | KMA_KOBJECT; + uint32_t cpus = max_cpus_from_firmware; + vm_size_t size = percpu_section_size() * cpus; + kern_return_t kr; + + percpu_base.size = percpu_section_size(); + if (cpus == 0) { + panic("percpu: max_cpus_from_firmware not yet initialized"); + } + if (cpus == 1) { + percpu_base.start = VM_MAX_KERNEL_ADDRESS; + return; + } + + kr = kmem_alloc_flags(kernel_map, &percpu_base.start, + round_page(size) + 2 * PAGE_SIZE, VM_KERN_MEMORY_CPU, flags); + if (kr != KERN_SUCCESS) { + panic("percpu: kmem_alloc failed (%d)", kr); + } + + percpu_base.start += PAGE_SIZE - percpu_section_start(); + percpu_base.end = percpu_base.start + size - 1; +} +STARTUP(PERCPU, STARTUP_RANK_FIRST, cpu_data_startup_init); + cpu_data_t * cpu_data_alloc(boolean_t is_boot_cpu) { @@ -546,10 +579,7 @@ cpu_data_alloc(boolean_t is_boot_cpu) cdp = cpu_datap(0); if (cdp->cpu_processor == NULL) { simple_lock_init(&ncpus_lock, 0); - cdp->cpu_processor = cpu_processor_alloc(TRUE); -#if NCOPY_WINDOWS > 0 - cdp->cpu_pmap = pmap_cpu_alloc(TRUE); -#endif + cdp->cpu_processor = PERCPU_GET_MASTER(processor); } return cdp; } @@ -572,6 +602,9 @@ cpu_data_alloc(boolean_t is_boot_cpu) cdp->cpu_this = cdp; cdp->cpu_number = cnum; cdp->cd_shadow = &cpshadows[cnum]; + cdp->cpu_pcpu_base = percpu_base.start + (cnum - 1) * percpu_section_size(); + cdp->cpu_processor = PERCPU_GET_WITH_BASE(cdp->cpu_pcpu_base, processor); + /* * Allocate interrupt stack: */ @@ -739,93 +772,6 @@ valid_user_segment_selectors(uint16_t cs, valid_user_data_selector(gs); } -#if NCOPY_WINDOWS > 0 - -static vm_offset_t user_window_base = 0; - -void -cpu_userwindow_init(int cpu) -{ - cpu_data_t *cdp = cpu_data_ptr[cpu]; - vm_offset_t user_window; - vm_offset_t vaddr; - int num_cpus; - - num_cpus = ml_get_max_cpus(); - - if (cpu >= num_cpus) { - panic("cpu_userwindow_init: cpu > num_cpus"); - } - - if (user_window_base == 0) { - if (vm_allocate(kernel_map, &vaddr, - (NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE, - VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) { - panic("cpu_userwindow_init: " - "couldn't allocate user map window"); - } - - /* - * window must start on a page table boundary - * in the virtual address space - */ - user_window_base = (vaddr + (NBPDE - 1)) & ~(NBPDE - 1); - - /* - * get rid of any allocation leading up to our - * starting boundary - */ - vm_deallocate(kernel_map, vaddr, user_window_base - vaddr); - - /* - * get rid of tail that we don't need - */ - user_window = user_window_base + - (NBPDE * NCOPY_WINDOWS * num_cpus); - - vm_deallocate(kernel_map, user_window, - (vaddr + - ((NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE)) - - user_window); - } - - user_window = user_window_base + (cpu * NCOPY_WINDOWS * NBPDE); - - cdp->cpu_copywindow_base = user_window; - /* - * Abuse this pdp entry, the pdp now actually points to - * an array of copy windows addresses. - */ - cdp->cpu_copywindow_pdp = pmap_pde(kernel_pmap, user_window); -} - -void -cpu_physwindow_init(int cpu) -{ - cpu_data_t *cdp = cpu_data_ptr[cpu]; - vm_offset_t phys_window = cdp->cpu_physwindow_base; - - if (phys_window == 0) { - if (vm_allocate(kernel_map, &phys_window, - PAGE_SIZE, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_CPU)) - != KERN_SUCCESS) { - panic("cpu_physwindow_init: " - "couldn't allocate phys map window"); - } - - /* - * make sure the page that encompasses the - * pte pointer we're interested in actually - * exists in the page table - */ - pmap_expand(kernel_pmap, phys_window, PMAP_EXPAND_OPTIONS_NONE); - - cdp->cpu_physwindow_base = phys_window; - cdp->cpu_physwindow_ptep = vtopte(phys_window); - } -} -#endif /* NCOPY_WINDOWS > 0 */ - /* * Allocate a new interrupt stack for the boot processor from the * heap rather than continue to use the statically allocated space. diff --git a/osfmk/i386/mp_native.c b/osfmk/i386/mp_native.c index 1ba36090a..57c604228 100644 --- a/osfmk/i386/mp_native.c +++ b/osfmk/i386/mp_native.c @@ -59,7 +59,7 @@ i386_smp_init(int nmi_vector, i386_intr_func_t nmi_handler, int ipi_vector, i386 } lapic_init(); - lapic_configure(); + lapic_configure(false); lapic_set_intr_func(nmi_vector, nmi_handler); lapic_set_intr_func(ipi_vector, ipi_handler); diff --git a/osfmk/i386/mtrr.c b/osfmk/i386/mtrr.c index a72c09700..db86e8906 100644 --- a/osfmk/i386/mtrr.c +++ b/osfmk/i386/mtrr.c @@ -27,7 +27,7 @@ */ #include -#include +#include #include #include #include @@ -288,8 +288,8 @@ mtrr_init(void) /* allocate storage for variable ranges (can block?) */ if (mtrr_state.var_count) { mtrr_state.var_range = (mtrr_var_range_t *) - kalloc(sizeof(mtrr_var_range_t) * - mtrr_state.var_count); + zalloc_permanent(sizeof(mtrr_var_range_t) * + mtrr_state.var_count, ZALIGN(mtrr_var_range_t)); if (mtrr_state.var_range == NULL) { mtrr_state.var_count = 0; } diff --git a/osfmk/i386/pal_hibernate.h b/osfmk/i386/pal_hibernate.h index 32697bdf2..bb5f63f1a 100644 --- a/osfmk/i386/pal_hibernate.h +++ b/osfmk/i386/pal_hibernate.h @@ -25,9 +25,15 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +/** + * i386/x86_64 specific definitions for hibernation platform abstraction layer. + */ + #ifndef _I386_PAL_HIBERNATE_H #define _I386_PAL_HIBERNATE_H +__BEGIN_DECLS + #define HIB_MAP_SIZE (2*I386_LPGBYTES) #define DEST_COPY_AREA (4*GB - HIB_MAP_SIZE) /*4GB - 2*2m */ #define SRC_COPY_AREA (DEST_COPY_AREA - HIB_MAP_SIZE) @@ -35,13 +41,26 @@ #define BITMAP_AREA (COPY_PAGE_AREA - HIB_MAP_SIZE) #define IMAGE_AREA (BITMAP_AREA - HIB_MAP_SIZE) #define IMAGE2_AREA (IMAGE_AREA - HIB_MAP_SIZE) +#define SCRATCH_AREA (IMAGE2_AREA - HIB_MAP_SIZE) +#define WKDM_AREA (SCRATCH_AREA - HIB_MAP_SIZE) #define HIB_BASE segHIBB #define HIB_ENTRYPOINT acpi_wake_prot_entry -uintptr_t pal_hib_map(uintptr_t v, uint64_t p); -void hibernateRestorePALState(uint32_t *src); -void pal_hib_patchup(void); -#define PAL_HIBERNATE_MAGIC_1 0xfeedfacedeadbeef -#define PAL_HIBERNATE_MAGIC_2 0x41b312133714 +/*! + * @typedef pal_hib_map_type_t + * @discussion Parameter to pal_hib_map used to signify which memory region to map. + */ +typedef uintptr_t pal_hib_map_type_t; + +/*! + * @struct pal_hib_ctx + * @discussion x86_64-specific PAL context; see pal_hib_ctx_t for details. + */ +struct pal_hib_ctx { + char reserved; +}; + +__END_DECLS + #endif /* _I386_PAL_HIBERNATE_H */ diff --git a/osfmk/i386/pal_routines.h b/osfmk/i386/pal_routines.h index bfb34d31b..35d58108f 100644 --- a/osfmk/i386/pal_routines.h +++ b/osfmk/i386/pal_routines.h @@ -145,6 +145,7 @@ void pal_preemption_assert(void); /* Include a PAL-specific header, too, for xnu-internal overrides */ #include +#define PAL_XCPM_PROPERTY_VALUE 3 extern boolean_t virtualized; #define PAL_VIRTUALIZED_PROPERTY_VALUE 4 @@ -155,7 +156,7 @@ static inline void pal_get_resource_property(const char **property_name, int *property_value) { *property_name = PAL_AICPM_PROPERTY_NAME; - *property_value = PAL_AICPM_PROPERTY_VALUE; + *property_value = PAL_XCPM_PROPERTY_VALUE; if (virtualized) { *property_value = PAL_VIRTUALIZED_PROPERTY_VALUE; } diff --git a/osfmk/i386/panic_hooks.c b/osfmk/i386/panic_hooks.c index af8fa615e..df0f842b1 100644 --- a/osfmk/i386/panic_hooks.c +++ b/osfmk/i386/panic_hooks.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Apple Inc. All rights reserved. + * Copyright (c) 2014-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -146,6 +146,7 @@ panic_dump_mem(const void *addr, int len) switch (n % 3) { case 1: panic_dump_buf[n++] = 0; + OS_FALLTHROUGH; case 2: panic_dump_buf[n++] = 0; } diff --git a/osfmk/i386/panic_notify.c b/osfmk/i386/panic_notify.c new file mode 100644 index 000000000..169058e15 --- /dev/null +++ b/osfmk/i386/panic_notify.c @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include + +/* + * An I/O port to issue a read from, in the event of a panic. + * Useful for triggering logic analyzers. + */ +static uint16_t panic_io_port = 0; + +/* + * Similar to the panic_io_port, the pvpanic_io_port is used to notify + * interested parties (in this case the host/hypervisor), that a panic + * has occurred. + * Where it differs from panic_io_port is that it is written and read + * according to the pvpanic specification: + * https://raw.githubusercontent.com/qemu/qemu/master/docs/specs/pvpanic.txt + */ +static uint16_t pvpanic_io_port = 0; + +void +panic_notify_init(void) +{ + (void) PE_parse_boot_argn("panic_io_port", &panic_io_port, sizeof(panic_io_port)); + + /* + * XXX + * Defer reading the notifcation bit until panic time. This maintains + * backwards compatibility with Apple's QEMU. Once backwards + * compatibilty is no longer needed the check should be performed here + * before setting pvpanic_io_port. + */ + (void) PE_parse_boot_argn("pvpanic_io_port", &pvpanic_io_port, sizeof(pvpanic_io_port)); +} + +void +panic_notify(void) +{ + if (panic_io_port != 0) { + (void) inb(panic_io_port); + } + + if (pvpanic_io_port != 0 && + (inb(pvpanic_io_port) & PVPANIC_NOTIFICATION_BIT) != 0) { + outb(pvpanic_io_port, PVPANIC_NOTIFICATION_BIT); + } +} diff --git a/osfmk/i386/panic_notify.h b/osfmk/i386/panic_notify.h new file mode 100644 index 000000000..edf1c2db6 --- /dev/null +++ b/osfmk/i386/panic_notify.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _I386_PANIC_NOTIFY_H +#define _I386_PANIC_NOTIFY_H + +#define PVPANIC_NOTIFICATION_BIT (1 << 0) + +extern void panic_notify_init(void); +extern void panic_notify(void); + +#endif /* _I386_PANIC_NOTIFY_H */ diff --git a/osfmk/i386/pcb.c b/osfmk/i386/pcb.c index 1f154580b..603fb0008 100644 --- a/osfmk/i386/pcb.c +++ b/osfmk/i386/pcb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2016 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -125,8 +125,11 @@ unsigned int _MachineStateCount[] = { [x86_PAGEIN_STATE] = x86_PAGEIN_STATE_COUNT }; -zone_t iss_zone; /* zone for saved_state area */ -zone_t ids_zone; /* zone for debug_state area */ +ZONE_DECLARE(iss_zone, "x86_64 saved state", + sizeof(x86_saved_state_t), ZC_NONE); + +ZONE_DECLARE(ids_zone, "x86_64 debug state", + sizeof(x86_debug_state64_t), ZC_NONE); /* Forward */ @@ -664,8 +667,13 @@ set_thread_state64(thread_t thread, void *state, int full) if (full == TRUE) { ts = &((x86_thread_full_state64_t *)state)->ss64; + if (!valid_user_code_selector(((x86_thread_full_state64_t *)ts)->ss64.cs)) { + return KERN_INVALID_ARGUMENT; + } } else { ts = (x86_thread_state64_t *)state; + // In this case, ts->cs exists but is ignored, and + // CS is always set to USER_CS below instead. } pal_register_cache_state(thread, DIRTY); @@ -1722,6 +1730,71 @@ machine_thread_get_state( *count = x86_PAGEIN_STATE_COUNT; break; } + + case x86_INSTRUCTION_STATE: + { + if (*count < x86_INSTRUCTION_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + x86_instruction_state_t *state = (void *)tstate; + x86_instruction_state_t *src_state = THREAD_TO_PCB(thr_act)->insn_state; + + if (src_state != 0 && (src_state->insn_stream_valid_bytes > 0 || src_state->out_of_synch)) { +#if DEVELOPMENT || DEBUG + extern int insnstream_force_cacheline_mismatch; +#endif + size_t byte_count = (src_state->insn_stream_valid_bytes > x86_INSTRUCTION_STATE_MAX_INSN_BYTES) + ? x86_INSTRUCTION_STATE_MAX_INSN_BYTES : src_state->insn_stream_valid_bytes; + if (byte_count > 0) { + bcopy(src_state->insn_bytes, state->insn_bytes, byte_count); + } + state->insn_offset = src_state->insn_offset; + state->insn_stream_valid_bytes = byte_count; +#if DEVELOPMENT || DEBUG + state->out_of_synch = src_state->out_of_synch || insnstream_force_cacheline_mismatch; + insnstream_force_cacheline_mismatch = 0; /* One-shot, reset after use */ + + if (state->out_of_synch) { + bcopy(&src_state->insn_cacheline[0], &state->insn_cacheline[0], + x86_INSTRUCTION_STATE_CACHELINE_SIZE); + } else { + bzero(&state->insn_cacheline[0], x86_INSTRUCTION_STATE_CACHELINE_SIZE); + } +#else + state->out_of_synch = src_state->out_of_synch; +#endif + *count = x86_INSTRUCTION_STATE_COUNT; + } else { + *count = 0; + } + break; + } + + case x86_LAST_BRANCH_STATE: + { + boolean_t istate; + + if (!last_branch_support_enabled || *count < x86_LAST_BRANCH_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + istate = ml_set_interrupts_enabled(FALSE); + /* If the current thread is asking for its own LBR data, synch the LBRs first */ + if (thr_act == current_thread()) { + i386_lbr_synch(thr_act); + } + ml_set_interrupts_enabled(istate); + + if (i386_lbr_native_state_to_mach_thread_state(THREAD_TO_PCB(thr_act), (last_branch_state_t *)tstate) < 0) { + *count = 0; + return KERN_INVALID_ARGUMENT; + } + + *count = x86_LAST_BRANCH_STATE_COUNT; + break; + } + default: return KERN_INVALID_ARGUMENT; } @@ -1948,16 +2021,6 @@ machine_set_current_thread(thread_t thread) void machine_thread_init(void) { - iss_zone = zinit(sizeof(x86_saved_state_t), - thread_max * sizeof(x86_saved_state_t), - THREAD_CHUNK * sizeof(x86_saved_state_t), - "x86_64 saved state"); - - ids_zone = zinit(sizeof(x86_debug_state64_t), - thread_max * sizeof(x86_debug_state64_t), - THREAD_CHUNK * sizeof(x86_debug_state64_t), - "x86_64 debug state"); - fpu_module_init(); } diff --git a/osfmk/i386/pcb_native.c b/osfmk/i386/pcb_native.c index 81b4dfcd2..6960a022e 100644 --- a/osfmk/i386/pcb_native.c +++ b/osfmk/i386/pcb_native.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -64,7 +64,6 @@ #include #include -#include #include #include #include @@ -114,11 +113,326 @@ ASSERT_IS_16BYTE_MULTIPLE_SIZEOF(x86_saved_state_t); extern zone_t iss_zone; /* zone for saved_state area */ extern zone_t ids_zone; /* zone for debug_state area */ extern int tecs_mode_supported; +extern boolean_t cpuid_tsx_supported; + +bool lbr_need_tsx_workaround = false; int force_thread_policy_tecs; +struct lbr_group { + uint32_t msr_from; + uint32_t msr_to; + uint32_t msr_info; +}; + +struct cpu_lbrs { + uint32_t lbr_count; + struct lbr_group msr_lbrs[X86_MAX_LBRS]; +}; + +const struct cpu_lbrs *cpu_lbr_setp = NULL; +int cpu_lbr_type; + +const struct cpu_lbrs nhm_cpu_lbrs = { + 16 /* LBR count */, + { + { 0x680 /* FROM_0 */, 0x6c0 /* TO_0 */, 0 /* INFO_0 */ }, + { 0x681 /* FROM_1 */, 0x6c1 /* TO_1 */, 0 /* INFO_1 */ }, + { 0x682 /* FROM_2 */, 0x6c2 /* TO_2 */, 0 /* INFO_2 */ }, + { 0x683 /* FROM_3 */, 0x6c3 /* TO_3 */, 0 /* INFO_3 */ }, + { 0x684 /* FROM_4 */, 0x6c4 /* TO_4 */, 0 /* INFO_4 */ }, + { 0x685 /* FROM_5 */, 0x6c5 /* TO_5 */, 0 /* INFO_5 */ }, + { 0x686 /* FROM_6 */, 0x6c6 /* TO_6 */, 0 /* INFO_6 */ }, + { 0x687 /* FROM_7 */, 0x6c7 /* TO_7 */, 0 /* INFO_7 */ }, + { 0x688 /* FROM_8 */, 0x6c8 /* TO_8 */, 0 /* INFO_8 */ }, + { 0x689 /* FROM_9 */, 0x6c9 /* TO_9 */, 0 /* INFO_9 */ }, + { 0x68A /* FROM_10 */, 0x6ca /* TO_10 */, 0 /* INFO_10 */ }, + { 0x68B /* FROM_11 */, 0x6cb /* TO_11 */, 0 /* INFO_11 */ }, + { 0x68C /* FROM_12 */, 0x6cc /* TO_12 */, 0 /* INFO_12 */ }, + { 0x68D /* FROM_13 */, 0x6cd /* TO_13 */, 0 /* INFO_13 */ }, + { 0x68E /* FROM_14 */, 0x6ce /* TO_14 */, 0 /* INFO_14 */ }, + { 0x68F /* FROM_15 */, 0x6cf /* TO_15 */, 0 /* INFO_15 */ } + } +}, + skl_cpu_lbrs = { + 32 /* LBR count */, + { + { 0x680 /* FROM_0 */, 0x6c0 /* TO_0 */, 0xdc0 /* INFO_0 */ }, + { 0x681 /* FROM_1 */, 0x6c1 /* TO_1 */, 0xdc1 /* INFO_1 */ }, + { 0x682 /* FROM_2 */, 0x6c2 /* TO_2 */, 0xdc2 /* INFO_2 */ }, + { 0x683 /* FROM_3 */, 0x6c3 /* TO_3 */, 0xdc3 /* INFO_3 */ }, + { 0x684 /* FROM_4 */, 0x6c4 /* TO_4 */, 0xdc4 /* INFO_4 */ }, + { 0x685 /* FROM_5 */, 0x6c5 /* TO_5 */, 0xdc5 /* INFO_5 */ }, + { 0x686 /* FROM_6 */, 0x6c6 /* TO_6 */, 0xdc6 /* INFO_6 */ }, + { 0x687 /* FROM_7 */, 0x6c7 /* TO_7 */, 0xdc7 /* INFO_7 */ }, + { 0x688 /* FROM_8 */, 0x6c8 /* TO_8 */, 0xdc8 /* INFO_8 */ }, + { 0x689 /* FROM_9 */, 0x6c9 /* TO_9 */, 0xdc9 /* INFO_9 */ }, + { 0x68A /* FROM_10 */, 0x6ca /* TO_10 */, 0xdca /* INFO_10 */ }, + { 0x68B /* FROM_11 */, 0x6cb /* TO_11 */, 0xdcb /* INFO_11 */ }, + { 0x68C /* FROM_12 */, 0x6cc /* TO_12 */, 0xdcc /* INFO_12 */ }, + { 0x68D /* FROM_13 */, 0x6cd /* TO_13 */, 0xdcd /* INFO_13 */ }, + { 0x68E /* FROM_14 */, 0x6ce /* TO_14 */, 0xdce /* INFO_14 */ }, + { 0x68F /* FROM_15 */, 0x6cf /* TO_15 */, 0xdcf /* INFO_15 */ }, + { 0x690 /* FROM_16 */, 0x6d0 /* TO_16 */, 0xdd0 /* INFO_16 */ }, + { 0x691 /* FROM_17 */, 0x6d1 /* TO_17 */, 0xdd1 /* INFO_17 */ }, + { 0x692 /* FROM_18 */, 0x6d2 /* TO_18 */, 0xdd2 /* INFO_18 */ }, + { 0x693 /* FROM_19 */, 0x6d3 /* TO_19 */, 0xdd3 /* INFO_19 */ }, + { 0x694 /* FROM_20 */, 0x6d4 /* TO_20 */, 0xdd4 /* INFO_20 */ }, + { 0x695 /* FROM_21 */, 0x6d5 /* TO_21 */, 0xdd5 /* INFO_21 */ }, + { 0x696 /* FROM_22 */, 0x6d6 /* TO_22 */, 0xdd6 /* INFO_22 */ }, + { 0x697 /* FROM_23 */, 0x6d7 /* TO_23 */, 0xdd7 /* INFO_23 */ }, + { 0x698 /* FROM_24 */, 0x6d8 /* TO_24 */, 0xdd8 /* INFO_24 */ }, + { 0x699 /* FROM_25 */, 0x6d9 /* TO_25 */, 0xdd9 /* INFO_25 */ }, + { 0x69a /* FROM_26 */, 0x6da /* TO_26 */, 0xdda /* INFO_26 */ }, + { 0x69b /* FROM_27 */, 0x6db /* TO_27 */, 0xddb /* INFO_27 */ }, + { 0x69c /* FROM_28 */, 0x6dc /* TO_28 */, 0xddc /* INFO_28 */ }, + { 0x69d /* FROM_29 */, 0x6dd /* TO_29 */, 0xddd /* INFO_29 */ }, + { 0x69e /* FROM_30 */, 0x6de /* TO_30 */, 0xdde /* INFO_30 */ }, + { 0x69f /* FROM_31 */, 0x6df /* TO_31 */, 0xddf /* INFO_31 */ } + } +}; + +void +i386_lbr_disable(void) +{ + /* Enable LBRs */ + wrmsr64(MSR_IA32_DEBUGCTLMSR, rdmsr64(MSR_IA32_DEBUGCTLMSR) & ~DEBUGCTL_LBR_ENA); +} + +/* + * Disable ASAN for i386_lbr_enable and i386_lbr_init, otherwise we get a KASAN panic + * because the shadow map is not been initialized when these functions are called in + * early boot. + */ +void __attribute__((no_sanitize("address"))) +i386_lbr_enable(void) +{ + if (last_branch_support_enabled) { + /* Enable LBRs */ + wrmsr64(MSR_IA32_DEBUGCTLMSR, rdmsr64(MSR_IA32_DEBUGCTLMSR) | DEBUGCTL_LBR_ENA); + } +} + +void __attribute__((no_sanitize("address"))) +i386_lbr_init(i386_cpu_info_t *info_p, bool is_master) +{ + if (!last_branch_support_enabled) { + i386_lbr_disable(); + return; + } + + if (is_master) { + /* All NHM+ CPUs support PERF_CAPABILITIES, so no need to check cpuid for its presence */ + cpu_lbr_type = PERFCAP_LBR_TYPE(rdmsr64(MSR_IA32_PERF_CAPABILITIES)); + + switch (info_p->cpuid_cpufamily) { + case CPUFAMILY_INTEL_NEHALEM: + case CPUFAMILY_INTEL_WESTMERE: + /* NHM family shares an LBR_SELECT MSR for both logical CPUs per core */ + cpu_lbr_setp = &nhm_cpu_lbrs; + break; + + case CPUFAMILY_INTEL_SANDYBRIDGE: + case CPUFAMILY_INTEL_IVYBRIDGE: + /* SNB+ has dedicated LBR_SELECT MSRs for each logical CPU per core */ + cpu_lbr_setp = &nhm_cpu_lbrs; + break; + + case CPUFAMILY_INTEL_HASWELL: + case CPUFAMILY_INTEL_BROADWELL: + lbr_need_tsx_workaround = cpuid_tsx_supported ? false : true; + cpu_lbr_setp = &nhm_cpu_lbrs; + break; + + case CPUFAMILY_INTEL_SKYLAKE: + case CPUFAMILY_INTEL_KABYLAKE: + case CPUFAMILY_INTEL_ICELAKE: + cpu_lbr_setp = &skl_cpu_lbrs; + break; + + default: + panic("Unknown CPU family"); + } + } + + /* Configure LBR_SELECT for CPL > 0 records only */ + wrmsr64(MSR_IA32_LBR_SELECT, LBR_SELECT_CPL_EQ_0); + + /* Enable LBRs */ + wrmsr64(MSR_IA32_DEBUGCTLMSR, rdmsr64(MSR_IA32_DEBUGCTLMSR) | DEBUGCTL_LBR_ENA); +} + +int +i386_lbr_native_state_to_mach_thread_state(pcb_t pcb, last_branch_state_t *machlbrp) +{ + int last_entry; + int i, j, lbr_tos; + uint64_t from_rip, to_rip; +#define LBR_SENTINEL_KERNEL_MODE (0x66726d6b65726e6cULL /* "frmkernl" */ ) + + machlbrp->lbr_count = cpu_lbr_setp->lbr_count; + lbr_tos = pcb->lbrs.lbr_tos & (X86_MAX_LBRS - 1); + last_entry = (lbr_tos == (cpu_lbr_setp->lbr_count - 1)) ? 0 : (lbr_tos + 1); + + switch (cpu_lbr_type) { + case PERFCAP_LBR_TYPE_MISPRED: /* NHM */ + + machlbrp->lbr_supported_tsx = 0; + machlbrp->lbr_supported_cycle_count = 0; + for (j = 0, i = lbr_tos;; (i = (i == 0) ? (cpu_lbr_setp->lbr_count - 1) : (i - 1)), j++) { + to_rip = pcb->lbrs.lbrs[i].to_rip; + machlbrp->lbrs[j].to_ip = (to_rip > VM_MAX_USER_PAGE_ADDRESS) ? LBR_SENTINEL_KERNEL_MODE : to_rip; + from_rip = LBR_TYPE_MISPRED_FROMRIP(pcb->lbrs.lbrs[i].from_rip); + machlbrp->lbrs[j].from_ip = (from_rip > VM_MAX_USER_PAGE_ADDRESS) ? LBR_SENTINEL_KERNEL_MODE : from_rip; + machlbrp->lbrs[j].mispredict = LBR_TYPE_MISPRED_MISPREDICT(pcb->lbrs.lbrs[i].from_rip); + machlbrp->lbrs[j].tsx_abort = machlbrp->lbrs[j].in_tsx = 0; /* Not Supported */ + if (i == last_entry) { + break; + } + } + break; + + case PERFCAP_LBR_TYPE_TSXINFO: /* HSW/BDW */ + + machlbrp->lbr_supported_tsx = cpuid_tsx_supported ? 1 : 0; + machlbrp->lbr_supported_cycle_count = 0; + for (j = 0, i = lbr_tos;; (i = (i == 0) ? (cpu_lbr_setp->lbr_count - 1) : (i - 1)), j++) { + to_rip = pcb->lbrs.lbrs[i].to_rip; + machlbrp->lbrs[j].to_ip = (to_rip > VM_MAX_USER_PAGE_ADDRESS) ? LBR_SENTINEL_KERNEL_MODE : to_rip; + + from_rip = LBR_TYPE_TSXINFO_FROMRIP(pcb->lbrs.lbrs[i].from_rip); + machlbrp->lbrs[j].from_ip = (from_rip > VM_MAX_USER_PAGE_ADDRESS) ? LBR_SENTINEL_KERNEL_MODE : from_rip; + machlbrp->lbrs[j].mispredict = LBR_TYPE_TSXINFO_MISPREDICT(pcb->lbrs.lbrs[i].from_rip); + if (cpuid_tsx_supported) { + machlbrp->lbrs[j].tsx_abort = LBR_TYPE_TSXINFO_TSX_ABORT(pcb->lbrs.lbrs[i].from_rip); + machlbrp->lbrs[j].in_tsx = LBR_TYPE_TSXINFO_IN_TSX(pcb->lbrs.lbrs[i].from_rip); + } else { + machlbrp->lbrs[j].tsx_abort = 0; + machlbrp->lbrs[j].in_tsx = 0; + } + if (i == last_entry) { + break; + } + } + break; + + case PERFCAP_LBR_TYPE_EIP_WITH_LBRINFO: /* SKL+ */ + + machlbrp->lbr_supported_tsx = cpuid_tsx_supported ? 1 : 0; + machlbrp->lbr_supported_cycle_count = 1; + for (j = 0, i = lbr_tos;; (i = (i == 0) ? (cpu_lbr_setp->lbr_count - 1) : (i - 1)), j++) { + from_rip = pcb->lbrs.lbrs[i].from_rip; + machlbrp->lbrs[j].from_ip = (from_rip > VM_MAX_USER_PAGE_ADDRESS) ? LBR_SENTINEL_KERNEL_MODE : from_rip; + to_rip = pcb->lbrs.lbrs[i].to_rip; + machlbrp->lbrs[j].to_ip = (to_rip > VM_MAX_USER_PAGE_ADDRESS) ? LBR_SENTINEL_KERNEL_MODE : to_rip; + machlbrp->lbrs[j].mispredict = LBR_TYPE_EIP_WITH_LBRINFO_MISPREDICT(pcb->lbrs.lbrs[i].info); + machlbrp->lbrs[j].tsx_abort = LBR_TYPE_EIP_WITH_LBRINFO_TSX_ABORT(pcb->lbrs.lbrs[i].info); + machlbrp->lbrs[j].in_tsx = LBR_TYPE_EIP_WITH_LBRINFO_IN_TSX(pcb->lbrs.lbrs[i].info); + machlbrp->lbrs[j].cycle_count = LBR_TYPE_EIP_WITH_LBRINFO_CYC_COUNT(pcb->lbrs.lbrs[i].info); + if (i == last_entry) { + break; + } + } + break; + + default: +#if DEBUG || DEVELOPMENT + panic("Unknown LBR format: %d!", cpu_lbr_type); + /*NOTREACHED*/ +#else + return -1; +#endif + } + + return 0; +} + +void +i386_lbr_synch(thread_t thr) +{ + pcb_t old_pcb = THREAD_TO_PCB(thr); + int i; + + /* First, save current LBRs to the old thread's PCB */ + if (cpu_lbr_setp->msr_lbrs[0].msr_info != 0) { + for (i = 0; i < cpu_lbr_setp->lbr_count; i++) { + old_pcb->lbrs.lbrs[i].from_rip = rdmsr64(cpu_lbr_setp->msr_lbrs[i].msr_from); + old_pcb->lbrs.lbrs[i].to_rip = rdmsr64(cpu_lbr_setp->msr_lbrs[i].msr_to); + old_pcb->lbrs.lbrs[i].info = rdmsr64(cpu_lbr_setp->msr_lbrs[i].msr_info); + } + } else { + for (i = 0; i < cpu_lbr_setp->lbr_count; i++) { + old_pcb->lbrs.lbrs[i].from_rip = rdmsr64(cpu_lbr_setp->msr_lbrs[i].msr_from); + old_pcb->lbrs.lbrs[i].to_rip = rdmsr64(cpu_lbr_setp->msr_lbrs[i].msr_to); + } + } + + /* Finally, save the TOS */ + old_pcb->lbrs.lbr_tos = rdmsr64(MSR_IA32_LASTBRANCH_TOS); +} + void -act_machine_switch_pcb(__unused thread_t old, thread_t new) +i386_switch_lbrs(thread_t old, thread_t new) +{ + pcb_t new_pcb; + int i; + bool save_old = (old != NULL && old->task != kernel_task); + bool restore_new = (new->task != kernel_task); + + if (!save_old && !restore_new) { + return; + } + + assert(cpu_lbr_setp != NULL); + + new_pcb = THREAD_TO_PCB(new); + + i386_lbr_disable(); + + if (save_old) { + i386_lbr_synch(old); + } + + if (restore_new) { + /* Now restore the new threads's LBRs */ + if (cpu_lbr_setp->msr_lbrs[0].msr_info != 0) { + for (i = 0; i < cpu_lbr_setp->lbr_count; i++) { + wrmsr64(cpu_lbr_setp->msr_lbrs[i].msr_from, new_pcb->lbrs.lbrs[i].from_rip); + wrmsr64(cpu_lbr_setp->msr_lbrs[i].msr_to, new_pcb->lbrs.lbrs[i].to_rip); + wrmsr64(cpu_lbr_setp->msr_lbrs[i].msr_info, new_pcb->lbrs.lbrs[i].info); + } + } else { + if (lbr_need_tsx_workaround) { + for (i = 0; i < cpu_lbr_setp->lbr_count; i++) { + /* + * If TSX has been disabled, the hardware expects those two bits to be sign + * extensions of bit 47 (even though it didn't return them that way via the rdmsr!) + */ +#define BIT_47 (1ULL << 47) + wrmsr64(cpu_lbr_setp->msr_lbrs[i].msr_from, + new_pcb->lbrs.lbrs[i].from_rip | + ((new_pcb->lbrs.lbrs[i].from_rip & BIT_47) ? 0x6000000000000000ULL : 0)); + wrmsr64(cpu_lbr_setp->msr_lbrs[i].msr_to, + new_pcb->lbrs.lbrs[i].to_rip | + ((new_pcb->lbrs.lbrs[i].to_rip & BIT_47) ? 0x6000000000000000ULL : 0)); + } + } else { + for (i = 0; i < cpu_lbr_setp->lbr_count; i++) { + wrmsr64(cpu_lbr_setp->msr_lbrs[i].msr_from, new_pcb->lbrs.lbrs[i].from_rip); + wrmsr64(cpu_lbr_setp->msr_lbrs[i].msr_to, new_pcb->lbrs.lbrs[i].to_rip); + } + } + } + + /* Lastly, restore the new threads's TOS */ + wrmsr64(MSR_IA32_LASTBRANCH_TOS, new_pcb->lbrs.lbr_tos); + } + + i386_lbr_enable(); +} + +void +act_machine_switch_pcb(thread_t old, thread_t new) { pcb_t pcb = THREAD_TO_PCB(new); cpu_data_t *cdp = current_cpu_datap(); @@ -231,6 +545,10 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) cdp->cpu_curthread_do_segchk = new->machine.mthr_do_segchk; + if (last_branch_support_enabled) { + i386_switch_lbrs(old, new); + } + /* * Set the thread`s LDT or LDT entry. */ @@ -247,12 +565,6 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) user_ldt_set(new); cdp->cpu_curtask_has_ldt = 1; } - - /* - * Bump the scheduler generation count in the commpage. - * This can be read by user code to detect its preemption. - */ - commpage_sched_gen_inc(); } kern_return_t @@ -354,14 +666,7 @@ machine_thread_create( { pcb_t pcb = THREAD_TO_PCB(thread); -#if NCOPY_WINDOWS > 0 - inval_copy_windows(thread); - - thread->machine.physwindow_pte = 0; - thread->machine.physwindow_busy = 0; -#endif - - if (__improbable(force_thread_policy_tecs)) { + if ((task->t_flags & TF_TECS) || __improbable(force_thread_policy_tecs)) { thread->machine.mthr_do_segchk = 1; } else { thread->machine.mthr_do_segchk = 0; @@ -388,6 +693,8 @@ machine_thread_create( bzero((char *)pcb->iss, sizeof(x86_saved_state_t)); + bzero(&pcb->lbrs, sizeof(x86_lbrs_t)); + if (task_has_64Bit_addr(task)) { pcb->iss->flavor = x86_SAVED_STATE64; @@ -421,6 +728,12 @@ machine_thread_create( } + pcb->insn_state_copyin_failure_errorcode = 0; + if (pcb->insn_state != 0) { /* Reinit for new thread */ + bzero(pcb->insn_state, sizeof(x86_instruction_state_t)); + pcb->insn_state->insn_stream_valid_bytes = -1; + } + return KERN_SUCCESS; } @@ -451,6 +764,12 @@ machine_thread_destroy( zfree(ids_zone, pcb->ids); pcb->ids = NULL; } + + if (pcb->insn_state != 0) { + kfree(pcb->insn_state, sizeof(x86_instruction_state_t)); + pcb->insn_state = 0; + } + pcb->insn_state_copyin_failure_errorcode = 0; } kern_return_t diff --git a/osfmk/i386/phys.c b/osfmk/i386/phys.c index 3cdae0971..130c8aec3 100644 --- a/osfmk/i386/phys.c +++ b/osfmk/i386/phys.c @@ -209,14 +209,6 @@ ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) int err = 0; mp_disable_preemption(); -#if NCOPY_WINDOWS > 0 - mapwindow_t *src_map, *dst_map; - /* We rely on MTRRs here */ - src_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | ((pmap_paddr_t)src64 & PG_FRAME) | INTEL_PTE_REF)); - dst_map = pmap_get_mapwindow((pt_entry_t)(INTEL_PTE_VALID | INTEL_PTE_RW | ((pmap_paddr_t)dst64 & PG_FRAME) | INTEL_PTE_REF | INTEL_PTE_MOD)); - src = (void *) ((uintptr_t)src_map->prv_CADDR | ((uint32_t)src64 & INTEL_OFFMASK)); - dst = (void *) ((uintptr_t)dst_map->prv_CADDR | ((uint32_t)dst64 & INTEL_OFFMASK)); -#elif defined(__x86_64__) addr64_t debug_pa = 0; /* If either destination or source are outside the @@ -252,7 +244,6 @@ ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) kprintf("Remapping debugger physical window at %p to 0x%llx\n", (void *)debugger_window_kva, debug_pa); #endif } -#endif /* ensure we stay within a page */ if (((((uint32_t)src64 & (I386_PGBYTES - 1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES - 1)) + bytes) > I386_PGBYTES)) { panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64); @@ -280,10 +271,6 @@ ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) break; } -#if NCOPY_WINDOWS > 0 - pmap_put_mapwindow(src_map); - pmap_put_mapwindow(dst_map); -#endif mp_enable_preemption(); return err; diff --git a/osfmk/i386/pmCPU.c b/osfmk/i386/pmCPU.c index d0f1040fe..038ae1313 100644 --- a/osfmk/i386/pmCPU.c +++ b/osfmk/i386/pmCPU.c @@ -109,7 +109,7 @@ machine_idle(void) uint64_t ctime, rtime, itime; #if CST_DEMOTION_DEBUG processor_t cproc = my_cpu->cpu_processor; - uint64_t cwakeups = PROCESSOR_DATA(cproc, wakeups_issued_total); + uint64_t cwakeups = my_cpu->cpu_wakeups_issued_total; #endif /* CST_DEMOTION_DEBUG */ uint64_t esdeadline, ehdeadline; boolean_t do_process_pending_timers = FALSE; @@ -223,7 +223,7 @@ machine_idle_exit: TCOAL_DEBUG(0xBBBB0000 | DBG_FUNC_END, ctime, esdeadline, idle_pending_timers_processed, 0, 0); } #if CST_DEMOTION_DEBUG - uint64_t nwakeups = PROCESSOR_DATA(cproc, wakeups_issued_total); + uint64_t nwakeups = my_cpu->cpu_wakeups_issued_total; if ((nwakeups == cwakeups) && (topoParms.nLThreadsPerPackage == my_cpu->lcpu.package->num_idle)) { KERNEL_DEBUG_CONSTANT(0xceaa0000, cwakeups, 0, 0, 0, 0); diff --git a/osfmk/i386/pmap.h b/osfmk/i386/pmap.h index 06f61e536..34a434d7d 100644 --- a/osfmk/i386/pmap.h +++ b/osfmk/i386/pmap.h @@ -416,6 +416,7 @@ extern boolean_t pmap_ept_support_ad; #define PTE_VALID_MASK(is_ept) ((is_ept) ? (INTEL_EPT_READ | INTEL_EPT_WRITE | INTEL_EPT_EX) : INTEL_PTE_VALID) #define PTE_READ(is_ept) ((is_ept) ? INTEL_EPT_READ : INTEL_PTE_VALID) #define PTE_WRITE(is_ept) ((is_ept) ? INTEL_EPT_WRITE : INTEL_PTE_WRITE) +#define PTE_IS_EXECUTABLE(is_ept, pte) ((is_ept) ? (((pte) & INTEL_EPT_EX) != 0) : (((pte) & INTEL_PTE_NX) == 0)) #define PTE_PS INTEL_PTE_PS #define PTE_COMPRESSED INTEL_PTE_COMPRESSED #define PTE_COMPRESSED_ALT INTEL_PTE_COMPRESSED_ALT @@ -547,6 +548,7 @@ struct pmap { task_map_t pm_task_map; boolean_t pagezero_accessible; + boolean_t pm_vm_map_cs_enforced; /* is vm_map cs_enforced? */ #define PMAP_PCID_MAX_CPUS MAX_CPUS /* Must be a multiple of 8 */ pcid_t pmap_pcid_cpus[PMAP_PCID_MAX_CPUS]; volatile uint8_t pmap_pcid_coherency_vector[PMAP_PCID_MAX_CPUS]; @@ -584,35 +586,6 @@ is_ept_pmap(pmap_t p) void hv_ept_pmap_create(void **ept_pmap, void **eptp); -#if NCOPY_WINDOWS > 0 -#define PMAP_PDPT_FIRST_WINDOW 0 -#define PMAP_PDPT_NWINDOWS 4 -#define PMAP_PDE_FIRST_WINDOW (PMAP_PDPT_NWINDOWS) -#define PMAP_PDE_NWINDOWS 4 -#define PMAP_PTE_FIRST_WINDOW (PMAP_PDE_FIRST_WINDOW + PMAP_PDE_NWINDOWS) -#define PMAP_PTE_NWINDOWS 4 - -#define PMAP_NWINDOWS_FIRSTFREE (PMAP_PTE_FIRST_WINDOW + PMAP_PTE_NWINDOWS) -#define PMAP_WINDOW_SIZE 8 -#define PMAP_NWINDOWS (PMAP_NWINDOWS_FIRSTFREE + PMAP_WINDOW_SIZE) - -typedef struct { - pt_entry_t *prv_CMAP; - caddr_t prv_CADDR; -} mapwindow_t; - -typedef struct cpu_pmap { - int pdpt_window_index; - int pde_window_index; - int pte_window_index; - mapwindow_t mapwindow[PMAP_NWINDOWS]; -} cpu_pmap_t; - - -extern mapwindow_t *pmap_get_mapwindow(pt_entry_t pentry); -extern void pmap_put_mapwindow(mapwindow_t *map); -#endif - typedef struct pmap_memory_regions { ppnum_t base; /* first page of this region */ ppnum_t alloc_up; /* pages below this one have been "stolen" */ @@ -723,16 +696,15 @@ extern int pmap_list_resident_pages( vm_offset_t *listp, int space); extern void x86_filter_TLB_coherency_interrupts(boolean_t); + +extern void +pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, + boolean_t ro); + /* * Get cache attributes (as pagetable bits) for the specified phys page */ extern unsigned pmap_get_cache_attributes(ppnum_t, boolean_t is_ept); -#if NCOPY_WINDOWS > 0 -extern struct cpu_pmap *pmap_cpu_alloc( - boolean_t is_boot_cpu); -extern void pmap_cpu_free( - struct cpu_pmap *cp); -#endif extern kern_return_t pmap_map_block( pmap_t pmap, @@ -745,17 +717,21 @@ extern kern_return_t pmap_map_block( extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); -extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va); +extern pmap_paddr_t pmap_find_pa(pmap_t map, addr64_t va); +extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va); +extern ppnum_t pmap_find_phys_nofault(pmap_t pmap, addr64_t va); + +extern kern_return_t pmap_get_prot(pmap_t pmap, addr64_t va, vm_prot_t *protp); extern void pmap_cpu_init(void); extern void pmap_disable_NX(pmap_t pmap); -extern void pt_fake_zone_init(int); -extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *, - uint64_t *, int *, int *, int *); extern void pmap_pagetable_corruption_msg_log(int (*)(const char * fmt, ...)__printflike(1, 2)); extern void x86_64_protect_data_const(void); + +extern uint64_t pmap_commpage_size_min(pmap_t pmap); + /* * Macros for speed. */ @@ -778,28 +754,15 @@ extern void x86_64_protect_data_const(void); #define PMAP_DEACTIVATE_MAP(map, thread) #endif -#if NCOPY_WINDOWS > 0 #define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ spl_t spl; \ \ spl = splhigh(); \ - PMAP_DEACTIVATE_MAP(th->map, th); \ - th->map = new_map; \ - PMAP_ACTIVATE_MAP(th->map, th); \ - splx(spl); \ - inval_copy_windows(th); \ -} -#else -#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ - spl_t spl; \ - \ - spl = splhigh(); \ - PMAP_DEACTIVATE_MAP(th->map, th, my_cpu); \ + PMAP_DEACTIVATE_MAP(th->map, th, my_cpu); \ th->map = new_map; \ PMAP_ACTIVATE_MAP(th->map, th, my_cpu); \ splx(spl); \ } -#endif /* * Marking the current cpu's cr3 inactive is achieved by setting its lsb. diff --git a/osfmk/i386/pmap_internal.h b/osfmk/i386/pmap_internal.h index 5928bda3f..f9db3a737 100644 --- a/osfmk/i386/pmap_internal.h +++ b/osfmk/i386/pmap_internal.h @@ -270,7 +270,7 @@ typedef struct pv_hashed_entry { #define PV_HASHED_ENTRY_NULL ((pv_hashed_entry_t)0) -#define PVE_VA(pve) ((pve)->va_and_flags & ~PAGE_MASK) +#define PVE_VA(pve) ((pve)->va_and_flags & (vm_map_offset_t)~PAGE_MASK) #define PVE_FLAGS(pve) ((pve)->va_and_flags & PAGE_MASK) #define PVE_IS_ALTACCT 0x001 #define PVE_IS_ALTACCT_PAGE(pve) \ @@ -354,7 +354,7 @@ PV_HASHED_FREE_LIST(pv_hashed_entry_t pvh_eh, pv_hashed_entry_t pvh_et, int pv_c simple_lock(&pv_hashed_free_list_lock, LCK_GRP_NULL); pvh_et->qlink.next = (queue_entry_t)pv_hashed_free_list; pv_hashed_free_list = pvh_eh; - pv_hashed_free_count += pv_cnt; + pv_hashed_free_count += (uint32_t)pv_cnt; simple_unlock(&pv_hashed_free_list_lock); } @@ -387,7 +387,7 @@ PV_HASHED_KERN_FREE_LIST(pv_hashed_entry_t pvh_eh, pv_hashed_entry_t pvh_et, int simple_lock(&pv_hashed_kern_free_list_lock, LCK_GRP_NULL); pvh_et->qlink.next = (queue_entry_t)pv_hashed_kern_free_list; pv_hashed_kern_free_list = pvh_eh; - pv_hashed_kern_free_count += pv_cnt; + pv_hashed_kern_free_count += (uint32_t)pv_cnt; simple_unlock(&pv_hashed_kern_free_list_lock); } @@ -426,11 +426,11 @@ pmap_pv_throttle(__unused pmap_t p) #define IS_MANAGED_PAGE(x) \ ((unsigned int)(x) <= last_managed_page && \ - (pmap_phys_attributes[x] & PHYS_MANAGED)) + ((unsigned long long)pmap_phys_attributes[x] & PHYS_MANAGED)) #define IS_INTERNAL_PAGE(x) \ - (IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_INTERNAL)) + (IS_MANAGED_PAGE(x) && ((unsigned long long)pmap_phys_attributes[x] & PHYS_INTERNAL)) #define IS_REUSABLE_PAGE(x) \ - (IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_REUSABLE)) + (IS_MANAGED_PAGE(x) && ((unsigned long long)pmap_phys_attributes[x] & PHYS_REUSABLE)) #define IS_ALTACCT_PAGE(x, pve) \ (IS_MANAGED_PAGE((x)) && \ (PVE_IS_ALTACCT_PAGE((pve)))) @@ -562,7 +562,7 @@ pmap_pvh_unlink(pv_hashed_entry_t pvh) { pv_hashed_entry_t curh; pv_hashed_entry_t *pprevh; - int pvhash_idx; + uint32_t pvhash_idx; CHK_NPVHASH(); pvhash_idx = pvhashidx(pvh->pmap, PVE_VA(pvh)); @@ -595,7 +595,7 @@ pv_hash_add(pv_hashed_entry_t pvh_e, pv_rooted_entry_t pv_h) { pv_hashed_entry_t *hashp; - int pvhash_idx; + uint32_t pvhash_idx; CHK_NPVHASH(); pvhash_idx = pvhashidx(pvh_e->pmap, PVE_VA(pvh_e)); @@ -615,7 +615,7 @@ pv_hash_add(pv_hashed_entry_t pvh_e, static inline void pv_hash_remove(pv_hashed_entry_t pvh_e) { - int pvhash_idx; + uint32_t pvhash_idx; CHK_NPVHASH(); pvhash_idx = pvhashidx(pvh_e->pmap, PVE_VA(pvh_e)); @@ -885,7 +885,7 @@ pmap_pv_remove(pmap_t pmap, pv_hashed_entry_t pvh_e; pv_rooted_entry_t pv_h; pv_hashed_entry_t *pprevh; - int pvhash_idx; + uint32_t pvhash_idx; uint32_t pv_cnt; ppnum_t ppn; @@ -1013,7 +1013,7 @@ pmap_pv_is_altacct( { pv_hashed_entry_t pvh_e; pv_rooted_entry_t pv_h; - int pvhash_idx; + uint32_t pvhash_idx; boolean_t is_altacct; pvh_e = PV_HASHED_ENTRY_NULL; @@ -1052,29 +1052,28 @@ pmap_pv_is_altacct( return is_altacct; } -extern int pt_fake_zone_index; static inline void PMAP_ZINFO_PALLOC(pmap_t pmap, vm_size_t bytes) { - pmap_ledger_credit(pmap, task_ledgers.tkm_private, bytes); + pmap_ledger_credit(pmap, task_ledgers.tkm_private, (ledger_amount_t)bytes); } static inline void PMAP_ZINFO_PFREE(pmap_t pmap, vm_size_t bytes) { - pmap_ledger_debit(pmap, task_ledgers.tkm_private, bytes); + pmap_ledger_debit(pmap, task_ledgers.tkm_private, (ledger_amount_t)bytes); } static inline void PMAP_ZINFO_SALLOC(pmap_t pmap, vm_size_t bytes) { - pmap_ledger_credit(pmap, task_ledgers.tkm_shared, bytes); + pmap_ledger_credit(pmap, task_ledgers.tkm_shared, (ledger_amount_t)bytes); } static inline void PMAP_ZINFO_SFREE(pmap_t pmap, vm_size_t bytes) { - pmap_ledger_debit(pmap, task_ledgers.tkm_shared, bytes); + pmap_ledger_debit(pmap, task_ledgers.tkm_shared, (ledger_amount_t)bytes); } extern boolean_t pmap_initialized;/* Has pmap_init completed? */ diff --git a/osfmk/i386/pmap_x86_common.c b/osfmk/i386/pmap_x86_common.c index eae2bf321..b37111a7c 100644 --- a/osfmk/i386/pmap_x86_common.c +++ b/osfmk/i386/pmap_x86_common.c @@ -40,7 +40,7 @@ void pmap_remove_range( pt_entry_t *spte, pt_entry_t *epte); -void pmap_remove_range_options( +static void pmap_remove_range_options( pmap_t pmap, vm_map_offset_t va, pt_entry_t *spte, @@ -61,9 +61,23 @@ uint32_t pmap_update_clear_pte_count; * on a NBPDE boundary. */ -/* These symbols may be referenced directly by VM */ -uint64_t pmap_nesting_size_min = NBPDE; -uint64_t pmap_nesting_size_max = 0 - (uint64_t)NBPDE; +uint64_t +pmap_shared_region_size_min(__unused pmap_t pmap) +{ + return NBPDE; +} + +uint64_t +pmap_commpage_size_min(__unused pmap_t pmap) +{ + return NBPDE; +} + +uint64_t +pmap_nesting_size_max(__unused pmap_t pmap) +{ + return 0llu - (uint64_t)NBPDE; +} /* * kern_return_t pmap_nest(grand, subord, va_start, size) @@ -71,7 +85,6 @@ uint64_t pmap_nesting_size_max = 0 - (uint64_t)NBPDE; * grand = the pmap that we will nest subord into * subord = the pmap that goes into the grand * va_start = start of range in pmap to be inserted - * nstart = start of range in pmap nested pmap * size = Size of nest area (up to 16TB) * * Inserts a pmap into another. This is used to implement shared segments. @@ -90,9 +103,9 @@ uint64_t pmap_nesting_size_max = 0 - (uint64_t)NBPDE; */ kern_return_t -pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t nstart, uint64_t size) +pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, uint64_t size) { - vm_map_offset_t vaddr, nvaddr; + vm_map_offset_t vaddr; pd_entry_t *pde, *npde; unsigned int i; uint64_t num_pde; @@ -100,9 +113,8 @@ pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t nstart, uint6 assert(!is_ept_pmap(grand)); assert(!is_ept_pmap(subord)); - if ((size & (pmap_nesting_size_min - 1)) || - (va_start & (pmap_nesting_size_min - 1)) || - (nstart & (pmap_nesting_size_min - 1)) || + if ((size & (pmap_shared_region_size_min(grand) - 1)) || + (va_start & (pmap_shared_region_size_min(grand) - 1)) || ((size >> 28) > 65536)) { /* Max size we can nest is 16TB */ return KERN_INVALID_VALUE; } @@ -111,15 +123,11 @@ pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t nstart, uint6 panic("pmap_nest: size is invalid - %016llX\n", size); } - if (va_start != nstart) { - panic("pmap_nest: va_start(0x%llx) != nstart(0x%llx)\n", va_start, nstart); - } - PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(subord), VM_KERNEL_ADDRHIDE(va_start)); - nvaddr = (vm_map_offset_t)nstart; + vaddr = (vm_map_offset_t)va_start; num_pde = size >> PDESHIFT; PMAP_LOCK_EXCLUSIVE(subord); @@ -127,28 +135,28 @@ pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t nstart, uint6 subord->pm_shared = TRUE; for (i = 0; i < num_pde;) { - if (((nvaddr & PDPTMASK) == 0) && (num_pde - i) >= NPDEPG) { - npde = pmap64_pdpt(subord, nvaddr); + if (((vaddr & PDPTMASK) == 0) && (num_pde - i) >= NPDEPG) { + npde = pmap64_pdpt(subord, vaddr); while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) { PMAP_UNLOCK_EXCLUSIVE(subord); - pmap_expand_pdpt(subord, nvaddr, PMAP_EXPAND_OPTIONS_NONE); + pmap_expand_pdpt(subord, vaddr, PMAP_EXPAND_OPTIONS_NONE); PMAP_LOCK_EXCLUSIVE(subord); - npde = pmap64_pdpt(subord, nvaddr); + npde = pmap64_pdpt(subord, vaddr); } *npde |= INTEL_PDPTE_NESTED; - nvaddr += NBPDPT; + vaddr += NBPDPT; i += (uint32_t)NPDEPG; } else { - npde = pmap_pde(subord, nvaddr); + npde = pmap_pde(subord, vaddr); while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) { PMAP_UNLOCK_EXCLUSIVE(subord); - pmap_expand(subord, nvaddr, PMAP_EXPAND_OPTIONS_NONE); + pmap_expand(subord, vaddr, PMAP_EXPAND_OPTIONS_NONE); PMAP_LOCK_EXCLUSIVE(subord); - npde = pmap_pde(subord, nvaddr); + npde = pmap_pde(subord, vaddr); } - nvaddr += NBPDE; + vaddr += NBPDE; i++; } } @@ -232,8 +240,8 @@ pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) PMAP_TRACE(PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(vaddr)); - if ((size & (pmap_nesting_size_min - 1)) || - (vaddr & (pmap_nesting_size_min - 1))) { + if ((size & (pmap_shared_region_size_min(grand) - 1)) || + (vaddr & (pmap_shared_region_size_min(grand) - 1))) { panic("pmap_unnest(%p,0x%llx,0x%llx): unaligned...\n", grand, vaddr, size); } @@ -316,6 +324,51 @@ pmap_adjust_unnest_parameters(pmap_t p, vm_map_offset_t *s, vm_map_offset_t *e) return rval; } +pmap_paddr_t +pmap_find_pa(pmap_t pmap, addr64_t va) +{ + pt_entry_t *ptp; + pd_entry_t *pdep; + pd_entry_t pde; + pt_entry_t pte; + boolean_t is_ept, locked = FALSE; + pmap_paddr_t pa = 0; + + is_ept = is_ept_pmap(pmap); + + if ((pmap != kernel_pmap) && not_in_kdp) { + PMAP_LOCK_EXCLUSIVE(pmap); + locked = TRUE; + } else { + mp_disable_preemption(); + } + + if (os_ref_get_count(&pmap->ref_count) == 0) { + goto pfp_exit; + } + + pdep = pmap_pde(pmap, va); + + if ((pdep != PD_ENTRY_NULL) && ((pde = *pdep) & PTE_VALID_MASK(is_ept))) { + if (pde & PTE_PS) { + pa = pte_to_pa(pde) + (va & I386_LPGMASK); + } else { + ptp = pmap_pte(pmap, va); + if ((PT_ENTRY_NULL != ptp) && (((pte = *ptp) & PTE_VALID_MASK(is_ept)) != 0)) { + pa = pte_to_pa(pte) + (va & PAGE_MASK); + } + } + } +pfp_exit: + if (locked) { + PMAP_UNLOCK_EXCLUSIVE(pmap); + } else { + mp_enable_preemption(); + } + + return pa; +} + /* * pmap_find_phys returns the (4K) physical page number containing a * given virtual address in a given pmap. @@ -325,13 +378,43 @@ pmap_adjust_unnest_parameters(pmap_t p, vm_map_offset_t *s, vm_map_offset_t *e) */ ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va) +{ + ppnum_t ppn = 0; + pmap_paddr_t pa = 0; + + pa = pmap_find_pa(pmap, va); + ppn = (ppnum_t) i386_btop(pa); + + return ppn; +} + +ppnum_t +pmap_find_phys_nofault(pmap_t pmap, addr64_t va) +{ + if ((pmap == kernel_pmap) || + ((current_thread()->map) && (pmap == vm_map_pmap(current_thread()->map)))) { + return pmap_find_phys(pmap, va); + } + return 0; +} + +/* + * pmap_get_prot returns the equivalent Vm page protections + * set on a given address, 'va'. This function is used in the + * ml_static_verify_page_protections() routine which is used + * by the kext loading code to validate that the TEXT segment + * of a kext is mapped executable. + */ +kern_return_t +pmap_get_prot(pmap_t pmap, addr64_t va, vm_prot_t *protp) { pt_entry_t *ptp; pd_entry_t *pdep; - ppnum_t ppn = 0; pd_entry_t pde; pt_entry_t pte; boolean_t is_ept, locked = FALSE; + kern_return_t retval = KERN_FAILURE; + vm_prot_t prot = 0; is_ept = is_ept_pmap(pmap); @@ -350,15 +433,31 @@ pmap_find_phys(pmap_t pmap, addr64_t va) if ((pdep != PD_ENTRY_NULL) && ((pde = *pdep) & PTE_VALID_MASK(is_ept))) { if (pde & PTE_PS) { - ppn = (ppnum_t) i386_btop(pte_to_pa(pde)); - ppn += (ppnum_t) ptenum(va); + prot = VM_PROT_READ; + + if (pde & PTE_WRITE(is_ept)) { + prot |= VM_PROT_WRITE; + } + if (PTE_IS_EXECUTABLE(is_ept, pde)) { + prot |= VM_PROT_EXECUTE; + } + retval = KERN_SUCCESS; } else { ptp = pmap_pte(pmap, va); if ((PT_ENTRY_NULL != ptp) && (((pte = *ptp) & PTE_VALID_MASK(is_ept)) != 0)) { - ppn = (ppnum_t) i386_btop(pte_to_pa(pte)); + prot = VM_PROT_READ; + + if (pte & PTE_WRITE(is_ept)) { + prot |= VM_PROT_WRITE; + } + if (PTE_IS_EXECUTABLE(is_ept, pte)) { + prot |= VM_PROT_EXECUTE; + } + retval = KERN_SUCCESS; } } } + pfp_exit: if (locked) { PMAP_UNLOCK_EXCLUSIVE(pmap); @@ -366,7 +465,11 @@ pfp_exit: mp_enable_preemption(); } - return ppn; + if (protp) { + *protp = prot; + } + + return retval; } /* @@ -505,6 +608,21 @@ PTE_LOCK_UNLOCK(pt_entry_t *lpte) __c11_atomic_fetch_and((_Atomic pt_entry_t *)lpte, ~PTE_LOCK(0), memory_order_release_smp); } +kern_return_t +pmap_enter_options_addr( + pmap_t pmap, + vm_map_address_t v, + pmap_paddr_t pa, + vm_prot_t prot, + vm_prot_t fault_type, + unsigned int flags, + boolean_t wired, + unsigned int options, + __unused void *arg) +{ + return pmap_enter_options(pmap, v, intel_btop(pa), prot, fault_type, flags, wired, options, arg); +} + kern_return_t pmap_enter_options( pmap_t pmap, @@ -1216,7 +1334,7 @@ pmap_remove_range( PMAP_OPTIONS_REMOVE); } -void +static void pmap_remove_range_options( pmap_t pmap, vm_map_offset_t start_vaddr, @@ -1252,6 +1370,7 @@ pmap_remove_range_options( ledgers_compressed = 0; ledgers_alt_internal = 0; ledgers_alt_compressed = 0; + /* invalidate the PTEs first to "freeze" them */ for (cpte = spte, vaddr = start_vaddr; cpte < epte; @@ -1406,6 +1525,14 @@ check_pte_for_compressed_marker: } pvh_cnt++; } + /* We can encounter at most 'num_found' PTEs for this level + * Fewer may be encountered if some were replaced by + * compressed markers. No new valid PTEs can be created + * since the pmap lock is held exclusively. + */ + if (num_removed == num_found) { + break; + } } /* for loop */ if (pvh_eh != PV_HASHED_ENTRY_NULL) { @@ -1420,11 +1547,13 @@ update_counts: panic("pmap_remove_range: resident_count"); } #endif - pmap_ledger_debit(pmap, task_ledgers.phys_mem, machine_ptob(num_removed)); - PMAP_STATS_ASSERTF((pmap->stats.resident_count >= num_removed, - "pmap=%p num_removed=%d stats.resident_count=%d", - pmap, num_removed, pmap->stats.resident_count)); - OSAddAtomic(-num_removed, &pmap->stats.resident_count); + if (num_removed) { + pmap_ledger_debit(pmap, task_ledgers.phys_mem, machine_ptob(num_removed)); + PMAP_STATS_ASSERTF((pmap->stats.resident_count >= num_removed, + "pmap=%p num_removed=%d stats.resident_count=%d", + pmap, num_removed, pmap->stats.resident_count)); + OSAddAtomic(-num_removed, &pmap->stats.resident_count); + } if (pmap != kernel_pmap) { PMAP_STATS_ASSERTF((pmap->stats.external >= stats_external, @@ -1454,6 +1583,7 @@ update_counts: OSAddAtomic64(-stats_compressed, &pmap->stats.compressed); } /* update ledgers */ + if (ledgers_internal) { pmap_ledger_debit(pmap, task_ledgers.internal, @@ -1474,12 +1604,11 @@ update_counts: task_ledgers.alternate_accounting_compressed, machine_ptob(ledgers_alt_compressed)); } - pmap_ledger_debit(pmap, - task_ledgers.phys_footprint, - machine_ptob((ledgers_internal - - ledgers_alt_internal) + - (ledgers_compressed - - ledgers_alt_compressed))); + + uint64_t net_debit = (ledgers_internal - ledgers_alt_internal) + (ledgers_compressed - ledgers_alt_compressed); + if (net_debit) { + pmap_ledger_debit(pmap, task_ledgers.phys_footprint, machine_ptob(net_debit)); + } } #if TESTING @@ -1490,9 +1619,11 @@ update_counts: PMAP_STATS_ASSERTF((pmap->stats.wired_count >= num_unwired, "pmap=%p num_unwired=%d stats.wired_count=%d", pmap, num_unwired, pmap->stats.wired_count)); - OSAddAtomic(-num_unwired, &pmap->stats.wired_count); - pmap_ledger_debit(pmap, task_ledgers.wired_mem, machine_ptob(num_unwired)); + if (num_unwired != 0) { + OSAddAtomic(-num_unwired, &pmap->stats.wired_count); + pmap_ledger_debit(pmap, task_ledgers.wired_mem, machine_ptob(num_unwired)); + } return; } @@ -1512,7 +1643,7 @@ pmap_remove( { pmap_remove_options(map, s64, e64, PMAP_OPTIONS_REMOVE); } -#define PLCHECK_THRESHOLD (8) +#define PLCHECK_THRESHOLD (2) void pmap_remove_options( @@ -1586,13 +1717,13 @@ pmap_remove_options( if ((s64 < e64) && (traverse_count++ > PLCHECK_THRESHOLD)) { if (deadline == 0) { - deadline = rdtsc64() + max_preemption_latency_tsc; + deadline = rdtsc64_nofence() + max_preemption_latency_tsc; } else { - if (rdtsc64() > deadline) { + if (rdtsc64_nofence() > deadline) { PMAP_UNLOCK_EXCLUSIVE(map); __builtin_ia32_pause(); PMAP_LOCK_EXCLUSIVE(map); - deadline = rdtsc64() + max_preemption_latency_tsc; + deadline = rdtsc64_nofence() + max_preemption_latency_tsc; } } } @@ -2552,6 +2683,26 @@ done: return KERN_SUCCESS; } +void +pmap_set_vm_map_cs_enforced( + pmap_t pmap, + bool new_value) +{ + PMAP_LOCK_EXCLUSIVE(pmap); + pmap->pm_vm_map_cs_enforced = new_value; + PMAP_UNLOCK_EXCLUSIVE(pmap); +} +extern int cs_process_enforcement_enable; +bool +pmap_get_vm_map_cs_enforced( + pmap_t pmap) +{ + if (cs_process_enforcement_enable) { + return true; + } + return pmap->pm_vm_map_cs_enforced; +} + void pmap_set_jit_entitled(__unused pmap_t pmap) { @@ -2560,13 +2711,20 @@ pmap_set_jit_entitled(__unused pmap_t pmap) } bool -pmap_has_prot_policy(__unused vm_prot_t prot) +pmap_get_jit_entitled(__unused pmap_t pmap) +{ + /* The x86 pmap layer does not care if a map is using JIT. */ + return false; +} + +bool +pmap_has_prot_policy(__unused pmap_t pmap, __unused bool translated_allow_execute, __unused vm_prot_t prot) { /* * The x86 pmap layer does not apply any policy to any protection * types. */ - return FALSE; + return false; } uint64_t @@ -2576,7 +2734,7 @@ pmap_release_pages_fast(void) } void -pmap_trim(__unused pmap_t grand, __unused pmap_t subord, __unused addr64_t vstart, __unused addr64_t nstart, __unused uint64_t size) +pmap_trim(__unused pmap_t grand, __unused pmap_t subord, __unused addr64_t vstart, __unused uint64_t size) { return; } @@ -2607,10 +2765,11 @@ pmap_ledger_free(ledger_t ledger) __func__, ledger); } -size_t -pmap_dump_page_tables(pmap_t pmap __unused, void *bufp __unused, void *buf_end __unused) +kern_return_t +pmap_dump_page_tables(pmap_t pmap __unused, void *bufp __unused, void *buf_end __unused, + unsigned int level_mask __unused, size_t *bytes_copied __unused) { - return (size_t)-1; + return KERN_NOT_SUPPORTED; } void * @@ -2624,3 +2783,18 @@ void pmap_unmap_compressor_page(ppnum_t pn __unused, void *kva __unused) { } + +bool +pmap_clear_refmod_range_options( + pmap_t pmap __unused, + vm_map_address_t start __unused, + vm_map_address_t end __unused, + unsigned int mask __unused, + unsigned int options __unused) +{ + /* + * x86 doesn't have ranged tlbi instructions, and we already have + * the pmap_flush_context. This operation isn't implemented. + */ + return false; +} diff --git a/osfmk/i386/proc_reg.h b/osfmk/i386/proc_reg.h index c6b8f0be9..db965a0db 100644 --- a/osfmk/i386/proc_reg.h +++ b/osfmk/i386/proc_reg.h @@ -197,6 +197,16 @@ */ #define CONFIG_THREAD_GROUPS 0 +/* + * MAX_PSETS allows the scheduler to create statically sized + * scheduling data structures (such as an array of processor sets, clutch + * buckets in Edge scheduler etc.). + * + * + */ +#define MAX_PSETS 64 +#define MAX_CPUS 64 + #ifndef ASSEMBLER #include @@ -460,6 +470,26 @@ stac(void) __asm__ volatile("rdpmc" : "=a" (lo), "=d" (hi) : "c" (counter)) #ifdef XNU_KERNEL_PRIVATE + +#define X86_MAX_LBRS 32 +struct x86_lbr_record { + /* + * Note that some CPUs convey extra info in the upper bits of the from/to fields, + * whereas others convey that information in the LBR_INFO companion MSRs. + * The proper info will be extracted based on the CPU family detected at runtime + * when LBR thread state is requested. + */ + uint64_t from_rip; + uint64_t to_rip; + uint64_t info; +}; + +typedef struct x86_lbrs { + uint64_t lbr_tos; + struct x86_lbr_record lbrs[X86_MAX_LBRS]; +} x86_lbrs_t; + + extern void do_mfence(void); #define mfence() do_mfence() #endif @@ -495,6 +525,14 @@ rdtsc64(void) return ((hi) << 32) | (lo); } +static inline uint64_t +rdtsc64_nofence(void) +{ + uint64_t lo, hi; + rdtsc_nofence(lo, hi); + return ((hi) << 32) | (lo); +} + static inline uint64_t rdtscp64(uint32_t *aux) { @@ -558,12 +596,21 @@ __END_DECLS #define MSR_IA32_ARCH_CAPABILITIES_L1DF_NO (1ULL << 3) #define MSR_IA32_ARCH_CAPABILITIES_SSB_NO (1ULL << 4) #define MSR_IA32_ARCH_CAPABILITIES_MDS_NO (1ULL << 5) +#define MSR_IA32_ARCH_CAPABILITIES_IFU_NO (1ULL << 6) /* This CPU is not susceptible to the instruction-fetch erratum */ +#define MSR_IA32_ARCH_CAPABILITIES_TSX_CTRL (1ULL << 7) /* This CPU supports the TSX_CTRL MSR */ +#define MSR_IA32_ARCH_CAPABILITIES_TAA_NO (1ULL << 8) /* This CPU is not susceptible to TAA */ #define MSR_IA32_TSX_FORCE_ABORT 0x10f #define MSR_IA32_TSXFA_RTM_FORCE_ABORT (1ULL << 0) /* Bit 0 */ #define MSR_IA32_BBL_CR_CTL 0x119 +#define MSR_IA32_TSX_CTRL 0x122 +#define MSR_IA32_TSXCTRL_RTM_DISABLE (1ULL << 0) /* Bit 0 */ +#define MSR_IA32_TSXCTRL_TSX_CPU_CLEAR (1ULL << 1) /* Bit 1 */ + +#define MSR_IA32_MCU_OPT_CTRL 0x123 +#define MSR_IA32_MCUOPTCTRL_RNGDS_MITG_DIS (1ULL << 0) /* Bit 0 */ #define MSR_IA32_SYSENTER_CS 0x174 #define MSR_IA32_SYSENTER_ESP 0x175 @@ -584,12 +631,33 @@ __END_DECLS #define MSR_IA32_CLOCK_MODULATION 0x19a #define MSR_IA32_MISC_ENABLE 0x1a0 - - #define MSR_IA32_PACKAGE_THERM_STATUS 0x1b1 #define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x1b2 +#define MSR_IA32_LBR_SELECT 0x1c8 +#define LBR_SELECT_CPL_EQ_0 (1ULL) /* R/W When set, do not capture branches ending in ring 0 */ +#define LBR_SELECT_CPL_NEQ_0 (1ULL << 1) /* R/W When set, do not capture branches ending in ring >0 */ +#define LBR_SELECT_JCC (1ULL << 2) /* R/W When set, do not capture conditional branches */ +#define LBR_SELECT_NEAR_REL_CALL (1ULL << 3) /* R/W When set, do not capture near relative calls */ +#define LBR_SELECT_NEAR_IND_CALL (1ULL << 4) /* R/W When set, do not capture near indirect calls */ +#define LBR_SELECT_NEAR_RET (1ULL << 5) /* R/W When set, do not capture near returns */ +#define LBR_SELECT_NEAR_IND_JMP (1ULL << 6) /* R/W When set, do not capture near indirect jumps except near indirect calls and near returns */ +#define LBR_SELECT_NEAR_REL_JMP (1ULL << 7) /* R/W When set, do not capture near relative jumps except near relative calls. */ +#define LBR_SELECT_FAR_BRANCH (1ULL << 8) /* R/W When set, do not capture far branches */ +#define LBR_SELECT_HSW_EN_CALLSTACK1 (1ULL << 9) /* Enable LBR stack to use LIFO filtering to capture Call stack profile */ + +#define MSR_IA32_LASTBRANCH_TOS 0x1c9 + +/* LBR INFO MSR fields (SKL and later) */ +/* Same fields can be used for HSW in the FROM_x LBR MSRs */ +#define MSR_IA32_LBRINFO_TSX_ABORT (1ULL << 61) +#define MSR_IA32_LBRINFO_IN_TSX (1ULL << 62) +#define MSR_IA32_LBRINFO_MISPREDICT (1ULL << 63) +#define MSR_IA32_LBRINFO_CYCLECNT_MASK (0xFFFFULL) + #define MSR_IA32_DEBUGCTLMSR 0x1d9 +#define DEBUGCTL_LBR_ENA (1U) + #define MSR_IA32_LASTBRANCHFROMIP 0x1db #define MSR_IA32_LASTBRANCHTOIP 0x1dc #define MSR_IA32_LASTINTFROMIP 0x1dd @@ -615,6 +683,28 @@ __END_DECLS #define MSR_IA32_PERF_FIXED_CTR0 0x309 +#define MSR_IA32_PERF_CAPABILITIES 0x345 +#define PERFCAP_LBR_FMT_MASK (0x3f) +#define PERFCAP_LBR_TYPE(msrval) ((msrval) & PERFCAP_LBR_FMT_MASK) +#define PERFCAP_LBR_TYPE_MISPRED 3 /* NHM */ +#define PERFCAP_LBR_TYPE_TSXINFO 4 /* HSW/BDW */ +#define PERFCAP_LBR_TYPE_EIP_WITH_LBRINFO 5 /* SKL+ */ +/* Types 6 & 7 are for Goldmont and Goldmont Plus, respectively */ + +#define LBR_TYPE_MISPRED_FROMRIP(from_rip) (((from_rip) & 0xFFFFFFFFFFFFULL) | (((from_rip) & (1ULL << 47)) ? 0xFFFF000000000000ULL : 0)) +#define LBR_TYPE_MISPRED_MISPREDICT(from_rip) (((from_rip) & MSR_IA32_LBRINFO_MISPREDICT) ? 1 : 0) + +#define LBR_TYPE_TSXINFO_FROMRIP(from_rip) (LBR_TYPE_MISPRED_FROMRIP(from_rip)) +#define LBR_TYPE_TSXINFO_MISPREDICT(from_rip) (((from_rip) & MSR_IA32_LBRINFO_MISPREDICT) ? 1 : 0) +#define LBR_TYPE_TSXINFO_TSX_ABORT(from_rip) (((from_rip) & MSR_IA32_LBRINFO_TSX_ABORT) ? 1 : 0) +#define LBR_TYPE_TSXINFO_IN_TSX(from_rip) (((from_rip) & MSR_IA32_LBRINFO_IN_TSX) ? 1 : 0) + +#define LBR_TYPE_EIP_WITH_LBRINFO_MISPREDICT(lbrinfo) LBR_TYPE_TSXINFO_MISPREDICT(lbrinfo) +#define LBR_TYPE_EIP_WITH_LBRINFO_TSX_ABORT(lbrinfo) LBR_TYPE_TSXINFO_TSX_ABORT(lbrinfo) +#define LBR_TYPE_EIP_WITH_LBRINFO_IN_TSX(lbrinfo) LBR_TYPE_TSXINFO_IN_TSX(lbrinfo) +#define LBR_TYPE_EIP_WITH_LBRINFO_CYC_COUNT(lbrinfo) ((lbrinfo) & 0xFFFFULL) + + #define MSR_IA32_PERF_FIXED_CTR_CTRL 0x38D #define MSR_IA32_PERF_GLOBAL_STATUS 0x38E #define MSR_IA32_PERF_GLOBAL_CTRL 0x38F diff --git a/osfmk/i386/rtclock.c b/osfmk/i386/rtclock.c index bc6fa6524..08aed1af1 100644 --- a/osfmk/i386/rtclock.c +++ b/osfmk/i386/rtclock.c @@ -75,6 +75,11 @@ int rtclock_init(void); uint64_t tsc_rebase_abs_time = 0; +volatile uint64_t gAcpiLastSleepTscBase = 0; +volatile uint64_t gAcpiLastSleepNanoBase = 0; +volatile uint64_t gAcpiLastWakeTscBase = 0; +volatile uint64_t gAcpiLastWakeNanoBase = 0; + static void rtc_set_timescale(uint64_t cycles); static uint64_t rtc_export_speed(uint64_t cycles); @@ -148,7 +153,14 @@ _rtc_nanotime_init(pal_rtc_nanotime_t *rntp, uint64_t base) void rtc_nanotime_init(uint64_t base) { + gAcpiLastSleepTscBase = pal_rtc_nanotime_info.tsc_base; + gAcpiLastSleepNanoBase = pal_rtc_nanotime_info.ns_base; + _rtc_nanotime_init(&pal_rtc_nanotime_info, base); + + gAcpiLastWakeTscBase = pal_rtc_nanotime_info.tsc_base; + gAcpiLastWakeNanoBase = pal_rtc_nanotime_info.ns_base; + rtc_nanotime_set_commpage(&pal_rtc_nanotime_info); } diff --git a/osfmk/i386/seg.h b/osfmk/i386/seg.h index c456b17ef..eed66408b 100644 --- a/osfmk/i386/seg.h +++ b/osfmk/i386/seg.h @@ -58,6 +58,7 @@ #ifndef __ASSEMBLER__ #include #include +#include #include /* diff --git a/osfmk/i386/smp.h b/osfmk/i386/smp.h index 7a99b1793..69336fdf7 100644 --- a/osfmk/i386/smp.h +++ b/osfmk/i386/smp.h @@ -29,7 +29,4 @@ #ifndef _I386_SMP_H_ #define _I386_SMP_H_ -/* x86_64 kernels are always built SMP, even if only 1 CPU is active */ -#define __SMP__ 1 - #endif /* _I386_SMP_H_ */ diff --git a/osfmk/i386/startup64.c b/osfmk/i386/startup64.c deleted file mode 100644 index 2a363f7bc..000000000 --- a/osfmk/i386/startup64.c +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Copyright (c) 2006-2012 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -#include - -#include - -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include /* prototyping */ -#include - -#include -#include -#include -#include -#include -#include - -#include - -#include - -#include - -#if DEBUG -extern void dump_regs64(void); -extern void dump_gdt(void *); -extern void dump_ldt(void *); -extern void dump_idt(void *); -extern void dump_tss(void *); -extern void dump_frame32(x86_saved_state32_t *sp); -extern void dump_frame64(x86_saved_state64_t *sp); -extern void dump_frame(x86_saved_state_t *sp); - -void -dump_frame(x86_saved_state_t *sp) -{ - if (is_saved_state32(sp)) { - dump_frame32(&sp->ss_32); - } else if (is_saved_state64(sp)) { - dump_frame64(&sp->ss_64); - } else { - kprintf("dump_frame(%p) unknown type %d\n", sp, sp->flavor); - } -} - -void -dump_frame32(x86_saved_state32_t *sp) -{ - unsigned int i; - uint32_t *ip = (uint32_t *) sp; - - kprintf("dump_frame32(%p):\n", sp); - - for (i = 0; - i < sizeof(x86_saved_state32_t) / sizeof(uint32_t); - i++, ip++) { - kprintf("%p: 0x%08x\n", ip, *ip); - } - - kprintf("sp->gs: 0x%08x\n", sp->gs); - kprintf("sp->fs: 0x%08x\n", sp->fs); - kprintf("sp->es: 0x%08x\n", sp->es); - kprintf("sp->ds: 0x%08x\n", sp->ds); - kprintf("sp->edi: 0x%08x\n", sp->edi); - kprintf("sp->esi: 0x%08x\n", sp->esi); - kprintf("sp->ebp: 0x%08x\n", sp->ebp); - kprintf("sp->cr2: 0x%08x\n", sp->cr2); - kprintf("sp->ebx: 0x%08x\n", sp->ebx); - kprintf("sp->edx: 0x%08x\n", sp->edx); - kprintf("sp->ecx: 0x%08x\n", sp->ecx); - kprintf("sp->eax: 0x%08x\n", sp->eax); - kprintf("sp->trapno: 0x%08x\n", sp->eax); - kprintf("sp->eip: 0x%08x\n", sp->eip); - kprintf("sp->cs: 0x%08x\n", sp->cs); - kprintf("sp->efl: 0x%08x\n", sp->efl); - kprintf("sp->uesp: 0x%08x\n", sp->uesp); - kprintf("sp->ss: 0x%08x\n", sp->ss); - - postcode(0x99); -} - -void -dump_frame64(x86_saved_state64_t *sp) -{ - unsigned int i; - uint64_t *ip = (uint64_t *) sp; - - kprintf("dump_frame64(%p):\n", sp); - - for (i = 0; - i < sizeof(x86_saved_state64_t) / sizeof(uint64_t); - i++, ip++) { - kprintf("%p: 0x%016llx\n", ip, *ip); - } - - kprintf("sp->isf.trapno: 0x%08x\n", sp->isf.trapno); - kprintf("sp->isf.trapfn: 0x%016llx\n", sp->isf.trapfn); - kprintf("sp->isf.err: 0x%016llx\n", sp->isf.err); - kprintf("sp->isf.rip: 0x%016llx\n", sp->isf.rip); - kprintf("sp->isf.cs: 0x%016llx\n", sp->isf.cs); - kprintf("sp->isf.rflags: 0x%016llx\n", sp->isf.rflags); - kprintf("sp->isf.rsp: 0x%016llx\n", sp->isf.rsp); - kprintf("sp->isf.ss: 0x%016llx\n", sp->isf.ss); - - kprintf("sp->fs: 0x%016x\n", sp->fs); - kprintf("sp->gs: 0x%016x\n", sp->gs); - kprintf("sp->rax: 0x%016llx\n", sp->rax); - kprintf("sp->rcx: 0x%016llx\n", sp->rcx); - kprintf("sp->rbx: 0x%016llx\n", sp->rbx); - kprintf("sp->rbp: 0x%016llx\n", sp->rbp); - kprintf("sp->r11: 0x%016llx\n", sp->r11); - kprintf("sp->r12: 0x%016llx\n", sp->r12); - kprintf("sp->r13: 0x%016llx\n", sp->r13); - kprintf("sp->r14: 0x%016llx\n", sp->r14); - kprintf("sp->r15: 0x%016llx\n", sp->r15); - kprintf("sp->cr2: 0x%016llx\n", sp->cr2); - kprintf("sp->r9: 0x%016llx\n", sp->r9); - kprintf("sp->r8: 0x%016llx\n", sp->r8); - kprintf("sp->r10: 0x%016llx\n", sp->r10); - kprintf("sp->rdx: 0x%016llx\n", sp->rdx); - kprintf("sp->rsi: 0x%016llx\n", sp->rsi); - kprintf("sp->rdi: 0x%016llx\n", sp->rdi); - - postcode(0x98); -} - -void -dump_gdt(void *gdtp) -{ - unsigned int i; - uint32_t *ip = (uint32_t *) gdtp; - - kprintf("GDT:\n"); - for (i = 0; i < GDTSZ; i++, ip += 2) { - kprintf("%p: 0x%08x\n", ip + 0, *(ip + 0)); - kprintf("%p: 0x%08x\n", ip + 1, *(ip + 1)); - } -} - -void -dump_ldt(void *ldtp) -{ - unsigned int i; - uint32_t *ip = (uint32_t *) ldtp; - - kprintf("LDT:\n"); - for (i = 0; i < LDTSZ_MIN; i++, ip += 2) { - kprintf("%p: 0x%08x\n", ip + 0, *(ip + 0)); - kprintf("%p: 0x%08x\n", ip + 1, *(ip + 1)); - } -} - -void -dump_idt(void *idtp) -{ - unsigned int i; - uint32_t *ip = (uint32_t *) idtp; - - kprintf("IDT64:\n"); - for (i = 0; i < 16; i++, ip += 4) { - kprintf("%p: 0x%08x\n", ip + 0, *(ip + 0)); - kprintf("%p: 0x%08x\n", ip + 1, *(ip + 1)); - kprintf("%p: 0x%08x\n", ip + 2, *(ip + 2)); - kprintf("%p: 0x%08x\n", ip + 3, *(ip + 3)); - } -} - -void -dump_tss(void *tssp) -{ - unsigned int i; - uint32_t *ip = (uint32_t *) tssp; - - kprintf("TSS64:\n"); - for (i = 0; i < sizeof(master_ktss64) / sizeof(uint32_t); i++, ip++) { - kprintf("%p: 0x%08x\n", ip + 0, *(ip + 0)); - } -} - -void -dump_regs64(void) -{ -#define SNAP_REG(reg) \ - uint64_t reg; \ - __asm__ volatile("mov %%" #reg ", %0" : "=m" (reg)) - -#define KPRINT_REG(reg) \ - kprintf("%3s: %p\n", #reg, (void *) reg) - - SNAP_REG(rsp); - SNAP_REG(rbp); - SNAP_REG(rax); - SNAP_REG(rbx); - SNAP_REG(rcx); - SNAP_REG(rdx); - SNAP_REG(rsi); - SNAP_REG(rdi); - SNAP_REG(r8); - SNAP_REG(r9); - SNAP_REG(r10); - SNAP_REG(r11); - SNAP_REG(r12); - SNAP_REG(r13); - SNAP_REG(r14); - - KPRINT_REG(rsp); - KPRINT_REG(rbp); - KPRINT_REG(rax); - KPRINT_REG(rbx); - KPRINT_REG(rcx); - KPRINT_REG(rdx); - KPRINT_REG(rsi); - KPRINT_REG(rdi); - KPRINT_REG(r8); - KPRINT_REG(r9); - KPRINT_REG(r10); - KPRINT_REG(r11); - KPRINT_REG(r12); - KPRINT_REG(r13); - KPRINT_REG(r14); -} -#endif /* DEBUG */ diff --git a/osfmk/i386/thread.h b/osfmk/i386/thread.h index 1c1e8e926..8ae53e486 100644 --- a/osfmk/i386/thread.h +++ b/osfmk/i386/thread.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -143,23 +143,16 @@ struct machine_thread { #define OnProc 0x1 #define CopyIOActive 0x2 /* Checked to ensure DTrace actions do not re-enter copyio(). */ uint64_t thread_gpu_ns; -#if NCOPY_WINDOWS > 0 - struct { - user_addr_t user_base; - } copy_window[NCOPY_WINDOWS]; - int nxt_window; - int copyio_state; -#define WINDOWS_DIRTY 0 -#define WINDOWS_CLEAN 1 -#define WINDOWS_CLOSED 2 -#define WINDOWS_OPENED 3 - uint64_t physwindow_pte; - int physwindow_busy; -#endif - uint32_t last_xcpm_ttd; uint8_t last_xcpm_index; int mthr_do_segchk; + int insn_state_copyin_failure_errorcode; /* If insn_state is 0, this may hold the reason */ + x86_instruction_state_t *insn_state; +#if DEVELOPMENT || DEBUG + /* first byte specifies the offset of the instruction at the time of capture */ + uint8_t insn_cacheline[65]; /* XXX: Hard-coded cacheline size */ +#endif + x86_lbrs_t lbrs; }; typedef struct machine_thread *pcb_t; diff --git a/osfmk/i386/trap.c b/osfmk/i386/trap.c index 4231b1f26..4c74342a2 100644 --- a/osfmk/i386/trap.c +++ b/osfmk/i386/trap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -68,7 +68,7 @@ #include #include #include -#include /* panic_io_port_read() */ +#include #include #include @@ -101,6 +101,7 @@ #include #include #include +#include #if CONFIG_MCA #include #endif @@ -112,20 +113,38 @@ extern void throttle_lowpri_io(int); extern void kprint_state(x86_saved_state64_t *saved_state); +#if DEVELOPMENT || DEBUG +int insnstream_force_cacheline_mismatch = 0; +extern int panic_on_cacheline_mismatch; +extern char panic_on_trap_procname[]; +extern uint32_t panic_on_trap_mask; +#endif + +extern int insn_copyin_count; /* * Forward declarations */ static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl, kern_return_t fault_result) __dead2; static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip); +#if DEVELOPMENT || DEBUG +static __attribute__((noinline)) void copy_instruction_stream(thread_t thread, uint64_t rip, int trap_code, bool inspect_cacheline); +#else +static __attribute__((noinline)) void copy_instruction_stream(thread_t thread, uint64_t rip, int trap_code); +#endif #if CONFIG_DTRACE /* See */ perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */ extern boolean_t dtrace_tally_fault(user_addr_t); +extern boolean_t dtrace_handle_trap(int, x86_saved_state_t *); #endif +#ifdef MACH_BSD +extern char * proc_name_address(void *p); +#endif /* MACH_BSD */ + extern boolean_t pmap_smep_enabled; extern boolean_t pmap_smap_enabled; @@ -366,7 +385,7 @@ interrupt(x86_saved_state_t *state) (user_mode ? rip : VM_KERNEL_UNSLIDE(rip)), user_mode, itype, 0); - SCHED_STATS_INTERRUPT(current_processor()); + SCHED_STATS_INC(interrupt_count); #if CONFIG_TELEMETRY if (telemetry_needs_record) { @@ -509,9 +528,6 @@ kernel_trap( vm_prot_t prot; struct recovery *rp; vm_offset_t kern_ip; -#if NCOPY_WINDOWS > 0 - int fault_in_copy_window = -1; -#endif int is_user; int trap_pl = get_preemption_level(); @@ -549,6 +565,14 @@ kernel_trap( goto common_return; } } + + /* Handle traps originated from probe context. */ + if (thread != THREAD_NULL && thread->t_dtrace_inprobe) { + if (dtrace_handle_trap(type, state)) { + goto common_return; + } + } + #endif /* CONFIG_DTRACE */ /* @@ -579,79 +603,50 @@ kernel_trap( */ map = kernel_map; - if (__probable(thread != THREAD_NULL && thread->map != kernel_map)) { -#if NCOPY_WINDOWS > 0 - vm_offset_t copy_window_base; - vm_offset_t kvaddr; - int window_index; + if (__probable((thread != THREAD_NULL) && (thread->map != kernel_map) && + (vaddr < VM_MAX_USER_PAGE_ADDRESS))) { + /* fault occurred in userspace */ + map = thread->map; + + /* Intercept a potential Supervisor Mode Execute + * Protection fault. These criteria identify + * both NX faults and SMEP faults, but both + * are fatal. We avoid checking PTEs (racy). + * (The VM could just redrive a SMEP fault, hence + * the intercept). + */ + if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) && + (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) { + goto debugger_entry; + } - kvaddr = (vm_offset_t)vaddr; /* - * must determine if fault occurred in - * the copy window while pre-emption is - * disabled for this processor so that - * we only need to look at the window - * associated with this processor + * Additionally check for SMAP faults... + * which are characterized by page-present and + * the AC bit unset (i.e. not from copyin/out path). */ - copy_window_base = current_cpu_datap()->cpu_copywindow_base; - - if (kvaddr >= copy_window_base && kvaddr < (copy_window_base + (NBPDE * NCOPY_WINDOWS))) { - window_index = (int)((kvaddr - copy_window_base) / NBPDE); - - if (thread->machine.copy_window[window_index].user_base != (user_addr_t)-1) { - kvaddr -= (copy_window_base + (NBPDE * window_index)); - vaddr = thread->machine.copy_window[window_index].user_base + kvaddr; - - map = thread->map; - fault_in_copy_window = window_index; - } + if (__improbable(code & T_PF_PROT && + pmap_smap_enabled && + (saved_state->isf.rflags & EFL_AC) == 0)) { + goto debugger_entry; } -#else - if (__probable(vaddr < VM_MAX_USER_PAGE_ADDRESS)) { - /* fault occurred in userspace */ - map = thread->map; - - /* Intercept a potential Supervisor Mode Execute - * Protection fault. These criteria identify - * both NX faults and SMEP faults, but both - * are fatal. We avoid checking PTEs (racy). - * (The VM could just redrive a SMEP fault, hence - * the intercept). - */ - if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) && - (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) { - goto debugger_entry; - } - /* - * Additionally check for SMAP faults... - * which are characterized by page-present and - * the AC bit unset (i.e. not from copyin/out path). - */ - if (__improbable(code & T_PF_PROT && - pmap_smap_enabled && - (saved_state->isf.rflags & EFL_AC) == 0)) { - goto debugger_entry; - } - - /* - * If we're not sharing cr3 with the user - * and we faulted in copyio, - * then switch cr3 here and dismiss the fault. - */ - if (no_shared_cr3 && - (thread->machine.specFlags & CopyIOActive) && - map->pmap->pm_cr3 != get_cr3_base()) { - pmap_assert(current_cpu_datap()->cpu_pmap_pcid_enabled == FALSE); - set_cr3_raw(map->pmap->pm_cr3); - return; - } - if (__improbable(vaddr < PAGE_SIZE) && - ((thread->machine.specFlags & CopyIOActive) == 0)) { - goto debugger_entry; - } + /* + * If we're not sharing cr3 with the user + * and we faulted in copyio, + * then switch cr3 here and dismiss the fault. + */ + if (no_shared_cr3 && + (thread->machine.specFlags & CopyIOActive) && + map->pmap->pm_cr3 != get_cr3_base()) { + pmap_assert(current_cpu_datap()->cpu_pmap_pcid_enabled == FALSE); + set_cr3_raw(map->pmap->pm_cr3); + return; + } + if (__improbable(vaddr < PAGE_SIZE) && + ((thread->machine.specFlags & CopyIOActive) == 0)) { + goto debugger_entry; } -#endif } } @@ -722,14 +717,6 @@ kernel_trap( THREAD_UNINT, NULL, 0); if (result == KERN_SUCCESS) { -#if NCOPY_WINDOWS > 0 - if (fault_in_copy_window != -1) { - ml_set_interrupts_enabled(FALSE); - copy_window_fault(thread, map, - fault_in_copy_window); - (void) ml_set_interrupts_enabled(intr); - } -#endif /* NCOPY_WINDOWS > 0 */ goto common_return; } /* @@ -759,12 +746,13 @@ FALL_THROUGH: thread->recover = 0; goto common_return; } - /* - * Unanticipated page-fault errors in kernel - * should not happen. - * - * fall through... - */ + /* + * Unanticipated page-fault errors in kernel + * should not happen. + * + * fall through... + */ + OS_FALLTHROUGH; default: /* * Exception 15 is reserved but some chips may generate it @@ -823,7 +811,7 @@ panic_trap(x86_saved_state64_t *regs, uint32_t pl, kern_return_t fault_result) * Issue an I/O port read if one has been requested - this is an * event logic analyzers can use as a trigger point. */ - panic_io_port_read(); + panic_notify(); kprintf("CPU %d panic trap number 0x%x, rip 0x%016llx\n", cpu_number(), regs->isf.trapno, regs->isf.rip); @@ -900,7 +888,9 @@ user_trap( kern_return_t kret; user_addr_t rip; unsigned long dr6 = 0; /* 32 bit for i386, 64 bit for x86_64 */ + int current_cpu = cpu_number(); #if DEVELOPMENT || DEBUG + bool inspect_cacheline = false; uint32_t traptrace_index; #endif assert((is_saved_state32(saved_state) && !thread_is_64bit_addr(thread)) || @@ -912,7 +902,7 @@ user_trap( regs = saved_state64(saved_state); /* Record cpu where state was captured */ - regs->isf.cpu = cpu_number(); + regs->isf.cpu = current_cpu; type = regs->isf.trapno; err = (int)regs->isf.err & 0xffff; @@ -927,7 +917,7 @@ user_trap( regs = saved_state32(saved_state); /* Record cpu where state was captured */ - regs->cpu = cpu_number(); + regs->cpu = current_cpu; type = regs->trapno; err = regs->err & 0xffff; @@ -938,14 +928,34 @@ user_trap( #endif } +#if DEVELOPMENT || DEBUG + /* + * Copy the cacheline of code into the thread's instruction stream save area + * before enabling interrupts (the assumption is that we have not otherwise faulted or + * trapped since the original cache line stores). If the saved code is not valid, + * we'll catch it below when we process the copyin() for unhandled faults. + */ + if (type == T_PAGE_FAULT || type == T_INVALID_OPCODE || type == T_GENERAL_PROTECTION) { +#define CACHELINE_SIZE 64 + THREAD_TO_PCB(thread)->insn_cacheline[CACHELINE_SIZE] = (uint8_t)(rip & (CACHELINE_SIZE - 1)); + bcopy(&cpu_shadowp(current_cpu)->cpu_rtimes[0], + &THREAD_TO_PCB(thread)->insn_cacheline[0], + sizeof(THREAD_TO_PCB(thread)->insn_cacheline) - 1); + inspect_cacheline = true; + } +#endif - if ((type == T_DEBUG) && thread->machine.ids) { - unsigned long clear = 0; - /* Stash and clear this processor's DR6 value, in the event - * this was a debug register match - */ - __asm__ volatile ("mov %%db6, %0" : "=r" (dr6)); - __asm__ volatile ("mov %0, %%db6" : : "r" (clear)); + if (type == T_DEBUG) { + if (thread->machine.ids) { + unsigned long clear = 0; + /* Stash and clear this processor's DR6 value, in the event + * this was a debug register match + */ + __asm__ volatile ("mov %%db6, %0" : "=r" (dr6)); + __asm__ volatile ("mov %0, %%db6" : : "r" (clear)); + } + /* [Re]Enable LBRs *BEFORE* enabling interrupts to ensure we hit the right CPU */ + i386_lbr_enable(); } pal_sti(); @@ -1171,19 +1181,243 @@ user_trap( panic("Unexpected user trap, type %d", type); } + if (exc != 0) { + uint16_t cs; + boolean_t intrs; + + if (is_saved_state64(saved_state)) { + cs = saved_state64(saved_state)->isf.cs; + } else { + cs = saved_state32(saved_state)->cs; + } + + if (last_branch_support_enabled) { + intrs = ml_set_interrupts_enabled(FALSE); + /* + * This is a bit racy (it's possible for this thread to migrate to another CPU, then + * migrate back, but that seems rather rare in practice), but good enough to ensure + * the LBRs are saved before proceeding with exception/signal dispatch. + */ + if (current_cpu == cpu_number()) { + i386_lbr_synch(thread); + } + ml_set_interrupts_enabled(intrs); + } + + /* + * Do not try to copyin from the instruction stream if the page fault was due + * to an access to rip and was unhandled. + * Do not deal with cases when %cs != USER[64]_CS + * And of course there's no need to copy the instruction stream if the boot-arg + * was set to 0. + */ + if (insn_copyin_count > 0 && + (cs == USER64_CS || cs == USER_CS) && (type != T_PAGE_FAULT || vaddr != rip)) { #if DEVELOPMENT || DEBUG - if (traptrace_index != TRAPTRACE_INVALID_INDEX) { - traptrace_end(traptrace_index, mach_absolute_time()); - } + copy_instruction_stream(thread, rip, type, inspect_cacheline); +#else + copy_instruction_stream(thread, rip, type); #endif + } - if (exc != 0) { +#if DEVELOPMENT || DEBUG + if (traptrace_index != TRAPTRACE_INVALID_INDEX) { + traptrace_end(traptrace_index, mach_absolute_time()); + } +#endif /* * Note: Codepaths that directly return from user_trap() have pending * ASTs processed in locore */ i386_exception(exc, code, subcode); /* NOTREACHED */ + } else { +#if DEVELOPMENT || DEBUG + if (traptrace_index != TRAPTRACE_INVALID_INDEX) { + traptrace_end(traptrace_index, mach_absolute_time()); + } +#endif + } +} + +/* + * Copyin up to x86_INSTRUCTION_STATE_MAX_INSN_BYTES bytes from the page that includes `rip`, + * ensuring that we stay on the same page, clipping the start or end, as needed. + * Add the clipped amount back at the start or end, depending on where it fits. + * Consult the variable populated by the boot-arg `insn_capcnt' + */ +static __attribute__((noinline)) void +copy_instruction_stream(thread_t thread, uint64_t rip, int __unused trap_code +#if DEVELOPMENT || DEBUG + , bool inspect_cacheline +#endif + ) +{ +#if x86_INSTRUCTION_STATE_MAX_INSN_BYTES > 4096 +#error x86_INSTRUCTION_STATE_MAX_INSN_BYTES cannot exceed a page in size. +#endif + pcb_t pcb = THREAD_TO_PCB(thread); + vm_map_offset_t pagemask = ~vm_map_page_mask(current_map()); + vm_map_offset_t rip_page = rip & pagemask; + vm_map_offset_t start_addr; + vm_map_offset_t insn_offset; + vm_map_offset_t end_addr = rip + (insn_copyin_count / 2); + void *stack_buffer; + int copyin_err = 0; +#if defined(MACH_BSD) && (DEVELOPMENT || DEBUG) + void *procname; +#endif + +#if DEVELOPMENT || DEBUG + assert(insn_copyin_count <= x86_INSTRUCTION_STATE_MAX_INSN_BYTES); +#else + if (insn_copyin_count > x86_INSTRUCTION_STATE_MAX_INSN_BYTES || + insn_copyin_count < 64 /* CACHELINE_SIZE */) { + return; + } +#endif + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Walloca" + stack_buffer = __builtin_alloca(insn_copyin_count); +#pragma clang diagnostic pop + + if (rip >= (insn_copyin_count / 2)) { + start_addr = rip - (insn_copyin_count / 2); + } else { + start_addr = 0; + } + + if (start_addr < rip_page) { + insn_offset = (insn_copyin_count / 2) - (rip_page - start_addr); + end_addr += (rip_page - start_addr); + start_addr = rip_page; + } else if (end_addr >= (rip_page + (~pagemask + 1))) { + start_addr -= (end_addr - (rip_page + (~pagemask + 1))); /* Adjust start address backward */ + /* Adjust instruction offset due to start address change */ + insn_offset = (insn_copyin_count / 2) + (end_addr - (rip_page + (~pagemask + 1))); + end_addr = rip_page + (~pagemask + 1); /* clip to the start of the next page (non-inclusive */ + } else { + insn_offset = insn_copyin_count / 2; + } + + disable_preemption(); /* Prevent copyin from faulting in the instruction stream */ + if ( +#if DEVELOPMENT || DEBUG + (insnstream_force_cacheline_mismatch < 2) && +#endif + ((end_addr > start_addr) && (copyin_err = copyin(start_addr, stack_buffer, end_addr - start_addr)) == 0)) { + enable_preemption(); + + if (pcb->insn_state == 0) { + pcb->insn_state = kalloc(sizeof(x86_instruction_state_t)); + } + + if (pcb->insn_state != 0) { + bcopy(stack_buffer, pcb->insn_state->insn_bytes, end_addr - start_addr); + bzero(&pcb->insn_state->insn_bytes[end_addr - start_addr], + insn_copyin_count - (end_addr - start_addr)); + + pcb->insn_state->insn_stream_valid_bytes = (int)(end_addr - start_addr); + pcb->insn_state->insn_offset = (int)insn_offset; + +#if DEVELOPMENT || DEBUG + /* Now try to validate the cacheline we read at early-fault time matches the code + * copied in. Before we do that, we have to make sure the buffer contains a valid + * cacheline by looking for the 2 sentinel values written in the event the cacheline + * could not be copied. + */ +#define CACHELINE_DATA_NOT_PRESENT 0xdeadc0debeefcafeULL +#define CACHELINE_MASK (CACHELINE_SIZE - 1) + + if (inspect_cacheline && + (*(uint64_t *)(uintptr_t)&pcb->insn_cacheline[0] != CACHELINE_DATA_NOT_PRESENT && + *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[8] != CACHELINE_DATA_NOT_PRESENT)) { + /* + * The position of the cacheline in the instruction buffer is at offset + * insn_offset - (rip & CACHELINE_MASK) + */ + if (__improbable((rip & CACHELINE_MASK) > insn_offset)) { + printf("thread %p code cacheline @ %p clipped wrt copied-in code (offset %d)\n", + thread, (void *)(rip & ~CACHELINE_MASK), (int)(rip & CACHELINE_MASK)); + } else if (bcmp(&pcb->insn_state->insn_bytes[insn_offset - (rip & CACHELINE_MASK)], + &pcb->insn_cacheline[0], CACHELINE_SIZE) != 0 + || insnstream_force_cacheline_mismatch + ) { +#if x86_INSTRUCTION_STATE_CACHELINE_SIZE != CACHELINE_SIZE +#error cacheline size mismatch +#endif + bcopy(&pcb->insn_cacheline[0], &pcb->insn_state->insn_cacheline[0], + x86_INSTRUCTION_STATE_CACHELINE_SIZE); + /* Mark the instruction stream as being out-of-synch */ + pcb->insn_state->out_of_synch = 1; + + printf("thread %p code cacheline @ %p mismatches with copied-in code [trap 0x%x]\n", + thread, (void *)(rip & ~CACHELINE_MASK), trap_code); + for (int i = 0; i < 8; i++) { + printf("\t[%d] cl=0x%08llx vs. ci=0x%08llx\n", i, *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[i * 8], + *(uint64_t *)(uintptr_t)&pcb->insn_state->insn_bytes[(i * 8) + insn_offset - (rip & CACHELINE_MASK)]); + } + if (panic_on_cacheline_mismatch) { + panic("Cacheline mismatch while processing unhandled exception."); + } + } else { + printf("thread %p code cacheline @ %p DOES match with copied-in code\n", + thread, (void *)(rip & ~CACHELINE_MASK)); + pcb->insn_state->out_of_synch = 0; + } + } else if (inspect_cacheline) { + printf("thread %p could not capture code cacheline at fault IP %p [offset %d]\n", + (void *)thread, (void *)rip, (int)(insn_offset - (rip & CACHELINE_MASK))); + pcb->insn_state->out_of_synch = 0; + } +#else + pcb->insn_state->out_of_synch = 0; +#endif /* DEVELOPMENT || DEBUG */ + +#if defined(MACH_BSD) && (DEVELOPMENT || DEBUG) + if (panic_on_trap_procname[0] != 0) { + char procnamebuf[65] = {0}; + + if (thread->task->bsd_info != NULL) { + procname = proc_name_address(thread->task->bsd_info); + strlcpy(procnamebuf, procname, sizeof(procnamebuf)); + + if (strcasecmp(panic_on_trap_procname, procnamebuf) == 0 && + ((1U << trap_code) & panic_on_trap_mask) != 0) { + panic("Panic requested on trap type 0x%x for process `%s'", trap_code, + panic_on_trap_procname); + /*NORETURN*/ + } + } + } +#endif /* MACH_BSD && (DEVELOPMENT || DEBUG) */ + } + } else { + enable_preemption(); + + pcb->insn_state_copyin_failure_errorcode = copyin_err; +#if DEVELOPMENT || DEBUG + if (inspect_cacheline && pcb->insn_state == 0) { + pcb->insn_state = kalloc(sizeof(x86_instruction_state_t)); + } + if (pcb->insn_state != 0) { + pcb->insn_state->insn_stream_valid_bytes = 0; + pcb->insn_state->insn_offset = 0; + + if (inspect_cacheline && + (*(uint64_t *)(uintptr_t)&pcb->insn_cacheline[0] != CACHELINE_DATA_NOT_PRESENT && + *(uint64_t *)(uintptr_t)&pcb->insn_cacheline[8] != CACHELINE_DATA_NOT_PRESENT)) { + /* + * We can still copy the cacheline into the instruction state structure + * if it contains valid data + */ + pcb->insn_state->out_of_synch = 1; + bcopy(&pcb->insn_cacheline[0], &pcb->insn_state->insn_cacheline[0], + x86_INSTRUCTION_STATE_CACHELINE_SIZE); + } + } +#endif /* DEVELOPMENT || DEBUG */ } } diff --git a/osfmk/i386/trap.h b/osfmk/i386/trap.h index 5601e64f6..2753f1883 100644 --- a/osfmk/i386/trap.h +++ b/osfmk/i386/trap.h @@ -112,10 +112,19 @@ #define T_PF_RSVD 0x8 /* reserved bit set to 1 */ #define T_PF_EXECUTE 0x10 /* instruction fetch when NX */ +#if defined(MACH_KERNEL_PRIVATE) + #if !defined(ASSEMBLER) && defined(MACH_KERNEL) #include +#define DEFAULT_PANIC_ON_TRAP_MASK ((1U << T_INVALID_OPCODE) | \ + (1U << T_GENERAL_PROTECTION) | \ + (1U << T_PAGE_FAULT) | \ + (1U << T_SEGMENT_NOT_PRESENT) | \ + (1U << T_STACK_FAULT)) + + extern void i386_exception( int exc, mach_exception_code_t code, @@ -158,4 +167,6 @@ extern boolean_t kdp_i386_trap( #endif /* MACH_KDP */ #endif /* !ASSEMBLER && MACH_KERNEL */ +#endif /* MACH_KERNEL_PRIVATE */ + #endif /* _I386_TRAP_H_ */ diff --git a/osfmk/i386/trap_native.c b/osfmk/i386/trap_native.c index da7e55e20..33c9d9408 100644 --- a/osfmk/i386/trap_native.c +++ b/osfmk/i386/trap_native.c @@ -65,7 +65,7 @@ #include #include #include -#include /* panic_io_port_read() */ +#include #include #include @@ -130,7 +130,7 @@ panic_64(x86_saved_state_t *sp, __unused int pc, __unused const char *msg, boole * Issue an I/O port read if one has been requested - this is an * event logic analyzers can use as a trigger point. */ - panic_io_port_read(); + panic_notify(); /* diff --git a/osfmk/i386/tsc.c b/osfmk/i386/tsc.c index cd0aeb554..879b7df72 100644 --- a/osfmk/i386/tsc.c +++ b/osfmk/i386/tsc.c @@ -99,10 +99,10 @@ EFI_get_frequency(const char *prop) { uint64_t frequency = 0; DTEntry entry; - void *value; + void const *value; unsigned int size; - if (DTLookupEntry(0, "/efi/platform", &entry) != kSuccess) { + if (SecureDTLookupEntry(0, "/efi/platform", &entry) != kSuccess) { kprintf("EFI_get_frequency: didn't find /efi/platform\n"); return 0; } @@ -110,20 +110,20 @@ EFI_get_frequency(const char *prop) /* * While we're here, see if EFI published an initial TSC value. */ - if (DTGetProperty(entry, "InitialTSC", &value, &size) == kSuccess) { + if (SecureDTGetProperty(entry, "InitialTSC", &value, &size) == kSuccess) { if (size == sizeof(uint64_t)) { - tsc_at_boot = *(uint64_t *) value; + tsc_at_boot = *(uint64_t const *) value; kprintf("EFI_get_frequency: read InitialTSC: %llu\n", tsc_at_boot); } } - if (DTGetProperty(entry, prop, &value, &size) != kSuccess) { + if (SecureDTGetProperty(entry, prop, &value, &size) != kSuccess) { kprintf("EFI_get_frequency: property %s not found\n", prop); return 0; } if (size == sizeof(uint64_t)) { - frequency = *(uint64_t *) value; + frequency = *(uint64_t const *) value; kprintf("EFI_get_frequency: read %s value: %llu\n", prop, frequency); } @@ -141,8 +141,8 @@ tsc_init(void) boolean_t N_by_2_bus_ratio = FALSE; if (cpuid_vmm_present()) { - kprintf("VMM vendor %u TSC frequency %u KHz bus frequency %u KHz\n", - cpuid_vmm_info()->cpuid_vmm_family, + kprintf("VMM vendor %s TSC frequency %u KHz bus frequency %u KHz\n", + cpuid_vmm_family_string(), cpuid_vmm_info()->cpuid_vmm_tsc_frequency, cpuid_vmm_info()->cpuid_vmm_bus_frequency); @@ -166,6 +166,7 @@ tsc_init(void) switch (cpuid_cpufamily()) { case CPUFAMILY_INTEL_KABYLAKE: + case CPUFAMILY_INTEL_ICELAKE: case CPUFAMILY_INTEL_SKYLAKE: { /* * SkyLake and later has an Always Running Timer (ART) providing @@ -320,3 +321,31 @@ tsc_get_info(tscInfo_t *info) info->flex_ratio_min = flex_ratio_min; info->flex_ratio_max = flex_ratio_max; } + +#if DEVELOPMENT || DEBUG +void +cpu_data_tsc_sync_deltas_string(char *buf, uint32_t buflen, + uint32_t start_cpu, uint32_t end_cpu) +{ + int cnt; + uint32_t offset = 0; + + if (start_cpu >= real_ncpus || end_cpu >= real_ncpus) { + if (buflen >= 1) { + buf[0] = 0; + } + return; + } + + for (uint32_t curcpu = start_cpu; curcpu <= end_cpu; curcpu++) { + cnt = snprintf(buf + offset, buflen - offset, "0x%llx ", cpu_datap(curcpu)->tsc_sync_delta); + if (cnt < 0 || (offset + (unsigned) cnt >= buflen)) { + break; + } + offset += cnt; + } + if (offset >= 1) { + buf[offset - 1] = 0; /* Clip the final, trailing space */ + } +} +#endif /* DEVELOPMENT || DEBUG */ diff --git a/osfmk/i386/tsc.h b/osfmk/i386/tsc.h index cd8429b85..936f4ea47 100644 --- a/osfmk/i386/tsc.h +++ b/osfmk/i386/tsc.h @@ -78,6 +78,12 @@ typedef struct tscInfo tscInfo_t; extern void tsc_get_info(tscInfo_t *info); extern void tsc_init(void); + +#if DEVELOPMENT || DEBUG +extern void cpu_data_tsc_sync_deltas_string(char *buf, uint32_t buflen, + uint32_t start_cpu, uint32_t end_cpu); +#endif + #endif /* ASSEMBLER */ #endif /* _I386_TSC_H_ */ #endif /* KERNEL_PRIVATE */ diff --git a/osfmk/i386/ucode.c b/osfmk/i386/ucode.c index 9c10adb4d..4c5d43d09 100644 --- a/osfmk/i386/ucode.c +++ b/osfmk/i386/ucode.c @@ -37,7 +37,8 @@ #include #include #include -#include // mp_cpus_call +#include // mp_*_preemption +#include // mp_cpus_call #include #include #include // cpu_number diff --git a/osfmk/i386/vmx/vmx_cpu.c b/osfmk/i386/vmx/vmx_cpu.c index f2beaaab3..efd2ff662 100644 --- a/osfmk/i386/vmx/vmx_cpu.c +++ b/osfmk/i386/vmx/vmx_cpu.c @@ -283,7 +283,7 @@ static boolean_t vmx_globally_available(void) { unsigned int i; - unsigned int ncpus = ml_get_max_cpus(); + unsigned int ncpus = ml_wait_max_cpus(); boolean_t available = TRUE; for (i = 0; i < ncpus; i++) { diff --git a/osfmk/ipc/flipc.c b/osfmk/ipc/flipc.c index a8b265d23..2c0977b22 100644 --- a/osfmk/ipc/flipc.c +++ b/osfmk/ipc/flipc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -58,9 +58,8 @@ /*** FLIPC Internal Implementation (private to flipc.c) ***/ - -zone_t flipc_port_zone; - +ZONE_DECLARE(flipc_port_zone, "flipc ports", + sizeof(struct flipc_port), ZC_NOENCRYPT); /* Get the mnl_name associated with local ipc_port . * Returns MNL_NAME_NULL if is invalid or not a flipc port. @@ -353,27 +352,6 @@ flipc_cmd_ack(flipc_ack_msg_t fmsg, /*** FLIPC Node Managment Functions (called by mach node layer) ***/ -/* The mach node layer calls flipc_init() once before it calls any other - * flipc entry points. Returns KERN_SUCCESS on success; otherwise flipc - * is not initialized and cannot be used. - */ -kern_return_t -flipc_init(void) -{ - /* Create zone for flipc ports. - * TODO: Pick a better max value than ipc_port_max>>4 - */ - flipc_port_zone = zinit(sizeof(struct flipc_port), - (ipc_port_max >> 4) * sizeof(struct flipc_port), - sizeof(struct flipc_port), - "flipc ports"); - - zone_change(flipc_port_zone, Z_CALLERACCT, FALSE); - zone_change(flipc_port_zone, Z_NOENCRYPT, TRUE); - return KERN_SUCCESS; -} - - /* flipc_node_prepare() is called by mach node layer when a remote node is * registered by a link driver, or when the bootstrap port changes for the * local node. This is the flipc layer's opportunity to initialize per-node diff --git a/osfmk/ipc/flipc.h b/osfmk/ipc/flipc.h index a5049dac4..d924f9cf9 100644 --- a/osfmk/ipc/flipc.h +++ b/osfmk/ipc/flipc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -77,12 +77,6 @@ typedef struct flipc_port { extern mach_node_id_t localnode_id; // This node's FLIPC id. -/* The mach node layer calls flipc_init() once before it calls any other - * flipc entry points. Returns KERN_SUCCESS on success; otherwise flipc - * is not initialized and cannot be used. - */ -kern_return_t flipc_init(void); - /* flipc_node_prepare() is called by mach node layer when a remote node is * registered by a link driver. This is the flipc layer's opportunity to * convert it to a flipc port and hook it into any appropriate structures. diff --git a/osfmk/ipc/ipc_entry.h b/osfmk/ipc/ipc_entry.h index dab496ef8..5601b84c8 100644 --- a/osfmk/ipc/ipc_entry.h +++ b/osfmk/ipc/ipc_entry.h @@ -100,11 +100,16 @@ #define IPC_ENTRY_DIST_BITS 12 #define IPC_ENTRY_DIST_MAX ((1 << IPC_ENTRY_DIST_BITS) - 1) +#ifdef __LP64__ +#define IPC_ENTRY_INDEX_BITS 32 +#define IPC_ENTRY_INDEX_MAX (UINT32_MAX) +#else #define IPC_ENTRY_INDEX_BITS 20 #define IPC_ENTRY_INDEX_MAX ((1 << IPC_ENTRY_INDEX_BITS) - 1) +#endif struct ipc_entry { - struct ipc_object *ie_object; + struct ipc_object *XNU_PTRAUTH_SIGNED_PTR("ipc_entry.ie_object") ie_object; ipc_entry_bits_t ie_bits; uint32_t ie_dist : IPC_ENTRY_DIST_BITS; mach_port_index_t ie_index : IPC_ENTRY_INDEX_BITS; diff --git a/osfmk/ipc/ipc_eventlink.c b/osfmk/ipc/ipc_eventlink.c new file mode 100644 index 000000000..87a31f784 --- /dev/null +++ b/osfmk/ipc/ipc_eventlink.c @@ -0,0 +1,1161 @@ +/* + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static ZONE_DECLARE(ipc_eventlink_zone, "ipc_eventlink", + sizeof(struct ipc_eventlink_base), ZC_NONE); + +os_refgrp_decl(static, ipc_eventlink_refgrp, "eventlink", NULL); + +#if DEVELOPMENT || DEBUG +static queue_head_t ipc_eventlink_list = QUEUE_HEAD_INITIALIZER(ipc_eventlink_list); +static LCK_GRP_DECLARE(ipc_eventlink_dev_lock_grp, "ipc_eventlink_dev_lock"); +static LCK_SPIN_DECLARE(global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp); + +#define global_ipc_eventlink_lock() \ + lck_spin_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp) +#define global_ipc_eventlink_lock_try() \ + lck_spin_try_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp) +#define global_ipc_eventlink_unlock() \ + lck_spin_unlock(&global_ipc_eventlink_lock) + +#endif /* DEVELOPMENT || DEBUG */ + +/* Forward declarations */ +static struct ipc_eventlink_base * +ipc_eventlink_alloc(void); + +static void +ipc_eventlink_initialize( + struct ipc_eventlink_base *ipc_eventlink_base); + +static kern_return_t +ipc_eventlink_destroy_internal( + struct ipc_eventlink *ipc_eventlink); + +static kern_return_t +ipc_eventlink_signal( + struct ipc_eventlink *ipc_eventlink); + +static uint64_t +ipc_eventlink_signal_wait_until_trap_internal( + mach_port_name_t wait_port, + mach_port_name_t signal_port, + uint64_t count, + mach_eventlink_signal_wait_option_t el_option, + kern_clock_id_t clock_id, + uint64_t deadline); + +static kern_return_t +ipc_eventlink_signal_wait_internal( + struct ipc_eventlink *wait_eventlink, + struct ipc_eventlink *signal_eventlink, + uint64_t deadline, + uint64_t *count, + ipc_eventlink_option_t eventlink_option); + +static kern_return_t +ipc_eventlink_convert_wait_result(int wait_result); + +static kern_return_t +ipc_eventlink_signal_internal_locked( + struct ipc_eventlink *signal_eventlink, + ipc_eventlink_option_t eventlink_option); + +static kern_return_t +convert_port_to_eventlink_locked( + ipc_port_t port, + struct ipc_eventlink **ipc_eventlink_ptr); + +static kern_return_t +port_name_to_eventlink( + mach_port_name_t name, + struct ipc_eventlink **ipc_eventlink_ptr); + +/* + * Name: ipc_eventlink_alloc + * + * Description: Allocates an ipc_eventlink struct and initializes it. + * + * Args: None. + * + * Returns: + * ipc_eventlink_base on Success. + */ +static struct ipc_eventlink_base * +ipc_eventlink_alloc(void) +{ + struct ipc_eventlink_base *ipc_eventlink_base = IPC_EVENTLINK_BASE_NULL; + ipc_eventlink_base = zalloc(ipc_eventlink_zone); + + ipc_eventlink_initialize(ipc_eventlink_base); + +#if DEVELOPMENT || DEBUG + /* Add ipc_eventlink to global list */ + global_ipc_eventlink_lock(); + queue_enter(&ipc_eventlink_list, ipc_eventlink_base, + struct ipc_eventlink_base *, elb_global_elm); + global_ipc_eventlink_unlock(); +#endif + return ipc_eventlink_base; +} + +/* + * Name: ipc_eventlink_initialize + * + * Description: Initializes ipc eventlink struct. + * + * Args: ipc eventlink base. + * + * Returns: + * KERN_SUCCESS on Success. + */ +static void +ipc_eventlink_initialize( + struct ipc_eventlink_base *ipc_eventlink_base) +{ + int i; + kern_return_t kr; + + kr = waitq_init(&ipc_eventlink_base->elb_waitq, SYNC_POLICY_DISABLE_IRQ); + assert(kr == KERN_SUCCESS); + + /* Initialize the count to 2, refs for each ipc eventlink port */ + os_ref_init_count(&ipc_eventlink_base->elb_ref_count, &ipc_eventlink_refgrp, 2); + ipc_eventlink_base->elb_active = TRUE; + ipc_eventlink_base->elb_type = IPC_EVENTLINK_TYPE_NO_COPYIN; + + for (i = 0; i < 2; i++) { + struct ipc_eventlink *ipc_eventlink = &(ipc_eventlink_base->elb_eventlink[i]); + + ipc_eventlink->el_port = ipc_kobject_alloc_port((ipc_kobject_t)ipc_eventlink, + IKOT_EVENTLINK, IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST); + /* ipc_kobject_alloc_port never fails */ + ipc_eventlink->el_thread = THREAD_NULL; + ipc_eventlink->el_sync_counter = 0; + ipc_eventlink->el_wait_counter = UINT64_MAX; + ipc_eventlink->el_base = ipc_eventlink_base; + } +} + +/* + * Name: mach_eventlink_create + * + * Description: Allocates an ipc_eventlink struct and initializes it. + * + * Args: + * task : task port of the process + * mach_eventlink_create_option_t: option + * eventlink_port_pair: eventlink port array + * + * Returns: + * KERN_SUCCESS on Success. + */ +kern_return_t +mach_eventlink_create( + task_t task, + mach_eventlink_create_option_t elc_option, + eventlink_port_pair_t eventlink_port_pair) +{ + int i; + struct ipc_eventlink_base *ipc_eventlink_base; + + if (task == TASK_NULL || task != current_task() || + elc_option != MELC_OPTION_NO_COPYIN) { + return KERN_INVALID_ARGUMENT; + } + + ipc_eventlink_base = ipc_eventlink_alloc(); + + for (i = 0; i < 2; i++) { + eventlink_port_pair[i] = ipc_eventlink_base->elb_eventlink[i].el_port; + } + + return KERN_SUCCESS; +} + +/* + * Name: mach_eventlink_destroy + * + * Description: Destroy an ipc_eventlink, wakeup all threads. + * + * Args: + * eventlink: eventlink + * + * Returns: + * KERN_SUCCESS on Success. + */ +kern_return_t +mach_eventlink_destroy( + struct ipc_eventlink *ipc_eventlink) +{ + ipc_eventlink_destroy_internal(ipc_eventlink); + + /* mach_eventlink_destroy should succeed for terminated eventlink */ + return KERN_SUCCESS; +} + +/* + * Name: ipc_eventlink_destroy_internal + * + * Description: Destroy an ipc_eventlink, wakeup all threads. + * + * Args: + * eventlink: eventlink + * + * Returns: + * KERN_SUCCESS on Success. + */ +static kern_return_t +ipc_eventlink_destroy_internal( + struct ipc_eventlink *ipc_eventlink) +{ + spl_t s; + int i; + struct ipc_eventlink_base *ipc_eventlink_base; + thread_t associated_thread[2] = {}; + ipc_port_t ipc_eventlink_port = IPC_PORT_NULL; + ipc_port_t ipc_eventlink_port_remote = IPC_PORT_NULL; + + if (ipc_eventlink == IPC_EVENTLINK_NULL) { + return KERN_TERMINATED; + } + + s = splsched(); + ipc_eventlink_lock(ipc_eventlink); + + ipc_eventlink_base = ipc_eventlink->el_base; + + /* Check if the eventlink is active */ + if (!ipc_eventlink_active(ipc_eventlink)) { + ipc_eventlink_unlock(ipc_eventlink); + splx(s); + return KERN_TERMINATED; + } + + for (i = 0; i < 2; i++) { + struct ipc_eventlink *temp_ipc_eventlink = &ipc_eventlink_base->elb_eventlink[i]; + + /* Wakeup threads sleeping on eventlink */ + if (temp_ipc_eventlink->el_thread) { + associated_thread[i] = temp_ipc_eventlink->el_thread; + temp_ipc_eventlink->el_thread = THREAD_NULL; + + ipc_eventlink_signal_internal_locked(temp_ipc_eventlink, + IPC_EVENTLINK_FORCE_WAKEUP); + } + + /* Only destroy the port on which destroy was called */ + if (temp_ipc_eventlink == ipc_eventlink) { + ipc_eventlink_port = temp_ipc_eventlink->el_port; + assert(ipc_eventlink_port != IPC_PORT_NULL); + } else { + /* Do not destory the remote port, else eventlink_destroy will fail */ + ipc_eventlink_port_remote = temp_ipc_eventlink->el_port; + assert(ipc_eventlink_port_remote != IPC_PORT_NULL); + /* + * Take a reference on the remote port, since it could go + * away after eventlink lock is dropped. + */ + ip_reference(ipc_eventlink_port_remote); + } + assert(temp_ipc_eventlink->el_port != IPC_PORT_NULL); + temp_ipc_eventlink->el_port = IPC_PORT_NULL; + } + + /* Mark the eventlink as inactive */ + ipc_eventlink_base->elb_active = FALSE; + + ipc_eventlink_unlock(ipc_eventlink); + splx(s); + + /* Destroy the local eventlink port */ + ipc_port_dealloc_kernel(ipc_eventlink_port); + /* Drops port reference */ + + /* Clear the remote eventlink port without destroying it */ + ip_lock(ipc_eventlink_port_remote); + if (ip_active(ipc_eventlink_port_remote)) { + ipc_kobject_set_atomically(ipc_eventlink_port_remote, IKO_NULL, IKOT_EVENTLINK); + } + ip_unlock(ipc_eventlink_port_remote); + ip_release(ipc_eventlink_port_remote); + + for (i = 0; i < 2; i++) { + if (associated_thread[i] != THREAD_NULL && + associated_thread[i] != THREAD_ASSOCIATE_WILD) { + thread_deallocate(associated_thread[i]); + } + + /* Drop the eventlink reference given to port */ + ipc_eventlink_deallocate(ipc_eventlink); + } + return KERN_SUCCESS; +} + +/* + * Name: mach_eventlink_associate + * + * Description: Associate a thread to eventlink. + * + * Args: + * eventlink: eventlink + * thread: thread needs to be associated + * copyin_addr_wait: copyin addr for wait + * copyin_mask_wait: copyin mask for wait + * copyin_addr_signal: copyin addr for signal + * copyin_mask_signal: copyin mask for signal + * mach_eventlink_associate_option_t: option for eventlink associate + * + * Returns: + * KERN_SUCCESS on Success. + */ +kern_return_t +mach_eventlink_associate( + struct ipc_eventlink *ipc_eventlink, + thread_t thread, + mach_vm_address_t copyin_addr_wait, + uint64_t copyin_mask_wait, + mach_vm_address_t copyin_addr_signal, + uint64_t copyin_mask_signal, + mach_eventlink_associate_option_t ela_option) +{ + spl_t s; + + if (ipc_eventlink == IPC_EVENTLINK_NULL) { + return KERN_TERMINATED; + } + + if (copyin_addr_wait != 0 || copyin_mask_wait != 0 || + copyin_addr_signal != 0 || copyin_mask_signal != 0) { + return KERN_INVALID_ARGUMENT; + } + + if ((thread == NULL && ela_option == MELA_OPTION_NONE) || + (thread != NULL && ela_option == MELA_OPTION_ASSOCIATE_ON_WAIT)) { + return KERN_INVALID_ARGUMENT; + } + + s = splsched(); + ipc_eventlink_lock(ipc_eventlink); + + /* Check if eventlink is terminated */ + if (!ipc_eventlink_active(ipc_eventlink)) { + ipc_eventlink_unlock(ipc_eventlink); + splx(s); + return KERN_TERMINATED; + } + + if (ipc_eventlink->el_thread != NULL) { + ipc_eventlink_unlock(ipc_eventlink); + splx(s); + return KERN_NAME_EXISTS; + } + + if (ela_option == MELA_OPTION_ASSOCIATE_ON_WAIT) { + ipc_eventlink->el_thread = THREAD_ASSOCIATE_WILD; + } else { + thread_reference(thread); + ipc_eventlink->el_thread = thread; + } + + ipc_eventlink_unlock(ipc_eventlink); + splx(s); + return KERN_SUCCESS; +} + +/* + * Name: mach_eventlink_disassociate + * + * Description: Disassociate a thread from eventlink. + * Wake up the associated thread if blocked on eventlink. + * + * Args: + * eventlink: eventlink + * mach_eventlink_option_t: option for eventlink disassociate + * + * Returns: + * KERN_SUCCESS on Success. + */ +kern_return_t +mach_eventlink_disassociate( + struct ipc_eventlink *ipc_eventlink, + mach_eventlink_disassociate_option_t eld_option) +{ + spl_t s; + thread_t thread; + + if (ipc_eventlink == IPC_EVENTLINK_NULL) { + return KERN_TERMINATED; + } + + if (eld_option != MELD_OPTION_NONE) { + return KERN_INVALID_ARGUMENT; + } + + s = splsched(); + ipc_eventlink_lock(ipc_eventlink); + + /* Check if eventlink is terminated */ + if (!ipc_eventlink_active(ipc_eventlink)) { + ipc_eventlink_unlock(ipc_eventlink); + splx(s); + return KERN_TERMINATED; + } + + if (ipc_eventlink->el_thread == NULL) { + ipc_eventlink_unlock(ipc_eventlink); + splx(s); + return KERN_INVALID_ARGUMENT; + } + + thread = ipc_eventlink->el_thread; + ipc_eventlink->el_thread = NULL; + + /* wake up the thread if blocked */ + ipc_eventlink_signal_internal_locked(ipc_eventlink, + IPC_EVENTLINK_FORCE_WAKEUP); + + ipc_eventlink_unlock(ipc_eventlink); + splx(s); + + if (thread != THREAD_ASSOCIATE_WILD) { + thread_deallocate(thread); + } + return KERN_SUCCESS; +} + +/* + * Name: mach_eventlink_signal_trap + * + * Description: Increment the sync count of eventlink and + * wake up the thread waiting if sync counter is greater + * than wake counter. + * + * Args: + * eventlink: eventlink + * + * Returns: + * uint64_t: Contains count and error codes. + */ +uint64_t +mach_eventlink_signal_trap( + mach_port_name_t port, + uint64_t signal_count __unused) +{ + struct ipc_eventlink *ipc_eventlink; + kern_return_t kr; + uint64_t retval = 0; + + kr = port_name_to_eventlink(port, &ipc_eventlink); + if (kr == KERN_SUCCESS) { + /* Signal the remote side of the eventlink */ + kr = ipc_eventlink_signal(eventlink_remote_side(ipc_eventlink)); + + /* Deallocate ref returned by port_name_to_eventlink */ + ipc_eventlink_deallocate(ipc_eventlink); + } + + retval = encode_eventlink_count_and_error(0, kr); + return retval; +} + +/* + * Name: ipc_eventlink_signal + * + * Description: Increment the sync count of eventlink and + * wake up the thread waiting if sync counter is greater + * than wake counter. + * + * Args: + * eventlink: eventlink + * + * Returns: + * KERN_SUCCESS on Success. + */ +static kern_return_t +ipc_eventlink_signal( + struct ipc_eventlink *ipc_eventlink) +{ + kern_return_t kr; + spl_t s; + + if (ipc_eventlink == IPC_EVENTLINK_NULL) { + return KERN_INVALID_ARGUMENT; + } + + s = splsched(); + ipc_eventlink_lock(ipc_eventlink); + + /* Check if eventlink is terminated */ + if (!ipc_eventlink_active(ipc_eventlink)) { + ipc_eventlink_unlock(ipc_eventlink); + splx(s); + return KERN_TERMINATED; + } + + kr = ipc_eventlink_signal_internal_locked(ipc_eventlink, + IPC_EVENTLINK_NONE); + + ipc_eventlink_unlock(ipc_eventlink); + splx(s); + + if (kr == KERN_NOT_WAITING) { + kr = KERN_SUCCESS; + } + + return kr; +} + +/* + * Name: mach_eventlink_wait_until_trap + * + * Description: Wait until local signal count exceeds the + * specified count or deadline passes. + * + * Args: + * wait_port: eventlink port for wait + * count_ptr: signal count to wait on + * el_option: eventlink option + * clock_id: clock id + * deadline: deadline in mach_absolute_time + * + * Returns: + * uint64_t: contains count and error codes + */ +uint64_t +mach_eventlink_wait_until_trap( + mach_port_name_t eventlink_port, + uint64_t wait_count, + mach_eventlink_signal_wait_option_t option, + kern_clock_id_t clock_id, + uint64_t deadline) +{ + return ipc_eventlink_signal_wait_until_trap_internal( + eventlink_port, + MACH_PORT_NULL, + wait_count, + option, + clock_id, + deadline); +} + +/* + * Name: mach_eventlink_signal_wait_until + * + * Description: Signal the opposite side of the + * eventlink and wait until local signal count exceeds the + * specified count or deadline passes. + * + * Args: + * wait_port: eventlink port for wait + * count_ptr: signal count to wait on + * el_option: eventlink option + * clock_id: clock id + * deadline: deadline in mach_absolute_time + * + * Returns: + * uint64_t: contains count and error codes + */ +uint64_t +mach_eventlink_signal_wait_until_trap( + mach_port_name_t eventlink_port, + uint64_t wait_count, + uint64_t signal_count __unused, + mach_eventlink_signal_wait_option_t option, + kern_clock_id_t clock_id, + uint64_t deadline) +{ + return ipc_eventlink_signal_wait_until_trap_internal( + eventlink_port, + eventlink_port, + wait_count, + option, + clock_id, + deadline); +} + +/* + * Name: ipc_eventlink_signal_wait_until_trap_internal + * + * Description: Signal the opposite side of the + * eventlink and wait until local signal count exceeds the + * specified count or deadline passes. + * + * Args: + * wait_port: eventlink port for wait + * signal_port: eventlink port for signal + * count: signal count to wait on + * el_option: eventlink option + * clock_id: clock id + * deadline: deadline in mach_absolute_time + * + * Returns: + * uint64_t: contains signal count and error codes + */ +static uint64_t +ipc_eventlink_signal_wait_until_trap_internal( + mach_port_name_t wait_port, + mach_port_name_t signal_port, + uint64_t count, + mach_eventlink_signal_wait_option_t el_option, + kern_clock_id_t clock_id, + uint64_t deadline) +{ + struct ipc_eventlink *wait_ipc_eventlink = IPC_EVENTLINK_NULL; + struct ipc_eventlink *signal_ipc_eventlink = IPC_EVENTLINK_NULL; + kern_return_t kr; + ipc_eventlink_option_t ipc_eventlink_option = IPC_EVENTLINK_NONE; + + if (clock_id != KERN_CLOCK_MACH_ABSOLUTE_TIME) { + return encode_eventlink_count_and_error(count, KERN_INVALID_ARGUMENT); + } + + kr = port_name_to_eventlink(wait_port, &wait_ipc_eventlink); + if (kr == KERN_SUCCESS) { + assert(wait_ipc_eventlink != IPC_EVENTLINK_NULL); + + /* Get the remote side of eventlink for signal */ + if (signal_port != MACH_PORT_NULL) { + signal_ipc_eventlink = eventlink_remote_side(wait_ipc_eventlink); + } + + if (el_option & MELSW_OPTION_NO_WAIT) { + ipc_eventlink_option |= IPC_EVENTLINK_NO_WAIT; + } + + kr = ipc_eventlink_signal_wait_internal(wait_ipc_eventlink, + signal_ipc_eventlink, deadline, + &count, ipc_eventlink_option); + + /* release ref returned by port_name_to_eventlink */ + ipc_eventlink_deallocate(wait_ipc_eventlink); + } + return encode_eventlink_count_and_error(count, kr); +} + +/* + * Name: ipc_eventlink_signal_wait_internal + * + * Description: Signal the opposite side of the + * eventlink and wait until local signal count exceeds the + * specified count or deadline passes. + * + * Args: + * wait_eventlink: eventlink for wait + * signal_eventlink: eventlink for signal + * deadline: deadline in mach_absolute_time + * count_ptr: signal count to wait on + * el_option: eventlink option + * + * Returns: + * KERN_SUCCESS on Success. + * signal count is returned implicitly in count arg. + */ +static kern_return_t +ipc_eventlink_signal_wait_internal( + struct ipc_eventlink *wait_eventlink, + struct ipc_eventlink *signal_eventlink, + uint64_t deadline, + uint64_t *count, + ipc_eventlink_option_t eventlink_option) +{ + spl_t s; + kern_return_t kr = KERN_ALREADY_WAITING; + thread_t self = current_thread(); + struct ipc_eventlink_base *ipc_eventlink_base = wait_eventlink->el_base; + thread_t handoff_thread = THREAD_NULL; + thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE; + uint64_t old_signal_count; + wait_result_t wr; + + s = splsched(); + ipc_eventlink_lock(wait_eventlink); + + /* Check if eventlink is terminated */ + if (!ipc_eventlink_active(wait_eventlink)) { + kr = KERN_TERMINATED; + goto unlock; + } + + /* Check if waiting thread is associated to eventlink */ + if (wait_eventlink->el_thread != THREAD_ASSOCIATE_WILD && + wait_eventlink->el_thread != self) { + kr = KERN_INVALID_ARGUMENT; + goto unlock; + } + + /* Check if thread already waiting for associate on wait case */ + if (wait_eventlink->el_thread == THREAD_ASSOCIATE_WILD && + wait_eventlink->el_wait_counter != UINT64_MAX) { + kr = KERN_INVALID_ARGUMENT; + goto unlock; + } + + /* Check if the signal count exceeds the count provided */ + if (*count < wait_eventlink->el_sync_counter) { + *count = wait_eventlink->el_sync_counter; + kr = KERN_SUCCESS; + } else if (eventlink_option & IPC_EVENTLINK_NO_WAIT) { + /* Check if no block was passed */ + *count = wait_eventlink->el_sync_counter; + kr = KERN_OPERATION_TIMED_OUT; + } else { + /* Update the wait counter and add thread to waitq */ + wait_eventlink->el_wait_counter = *count; + old_signal_count = wait_eventlink->el_sync_counter; + + thread_set_pending_block_hint(self, kThreadWaitEventlink); + (void)waitq_assert_wait64_locked( + &ipc_eventlink_base->elb_waitq, + CAST_EVENT64_T(wait_eventlink), + THREAD_ABORTSAFE, + TIMEOUT_URGENCY_USER_NORMAL, + deadline, TIMEOUT_NO_LEEWAY, + self); + + eventlink_option |= IPC_EVENTLINK_HANDOFF; + } + + /* Check if we need to signal the other side of eventlink */ + if (signal_eventlink != IPC_EVENTLINK_NULL) { + kern_return_t signal_kr; + signal_kr = ipc_eventlink_signal_internal_locked(signal_eventlink, + eventlink_option); + + if (signal_kr == KERN_NOT_WAITING) { + assert(self->handoff_thread == THREAD_NULL); + } + } + + if (kr != KERN_ALREADY_WAITING) { + goto unlock; + } + + if (self->handoff_thread) { + handoff_thread = self->handoff_thread; + self->handoff_thread = THREAD_NULL; + handoff_option = THREAD_HANDOFF_SETRUN_NEEDED; + } + + ipc_eventlink_unlock(wait_eventlink); + splx(s); + + wr = thread_handoff_deallocate(handoff_thread, handoff_option); + kr = ipc_eventlink_convert_wait_result(wr); + + assert(self->handoff_thread == THREAD_NULL); + + /* Increment the count value if eventlink_signal was called */ + if (kr == KERN_SUCCESS) { + *count += 1; + } else { + *count = old_signal_count; + } + + return kr; + +unlock: + ipc_eventlink_unlock(wait_eventlink); + splx(s); + assert(self->handoff_thread == THREAD_NULL); + + return kr; +} + +/* + * Name: ipc_eventlink_convert_wait_result + * + * Description: Convert wait result to return value + * for wait trap. + * + * Args: + * wait_result: result from thread handoff + * + * Returns: + * KERN_SUCCESS on Success. + */ +static kern_return_t +ipc_eventlink_convert_wait_result(int wait_result) +{ + switch (wait_result) { + case THREAD_AWAKENED: + return KERN_SUCCESS; + + case THREAD_TIMED_OUT: + return KERN_OPERATION_TIMED_OUT; + + case THREAD_INTERRUPTED: + return KERN_ABORTED; + + case THREAD_RESTART: + return KERN_TERMINATED; + + default: + panic("ipc_eventlink_wait_block\n"); + return KERN_FAILURE; + } +} + +/* + * Name: ipc_eventlink_signal_internal_locked + * + * Description: Increment the sync count of eventlink and + * wake up the thread waiting if sync counter is greater + * than wake counter. + * + * Args: + * eventlink: eventlink + * ipc_eventlink_option_t: options + * + * Returns: + * KERN_SUCCESS on Success. + */ +static kern_return_t +ipc_eventlink_signal_internal_locked( + struct ipc_eventlink *signal_eventlink, + ipc_eventlink_option_t eventlink_option) +{ + kern_return_t kr = KERN_NOT_WAITING; + struct ipc_eventlink_base *ipc_eventlink_base = signal_eventlink->el_base; + + if (eventlink_option & IPC_EVENTLINK_FORCE_WAKEUP) { + /* Adjust the wait counter */ + signal_eventlink->el_wait_counter = UINT64_MAX; + + kr = waitq_wakeup64_all_locked( + &ipc_eventlink_base->elb_waitq, + CAST_EVENT64_T(signal_eventlink), + THREAD_RESTART, NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_KEEP_LOCKED); + return kr; + } + + /* Increment the eventlink sync count */ + signal_eventlink->el_sync_counter++; + + /* Check if thread needs to be woken up */ + if (signal_eventlink->el_sync_counter > signal_eventlink->el_wait_counter) { + waitq_options_t wq_option = (eventlink_option & IPC_EVENTLINK_HANDOFF) ? + WQ_OPTION_HANDOFF : WQ_OPTION_NONE; + + /* Adjust the wait counter */ + signal_eventlink->el_wait_counter = UINT64_MAX; + + kr = waitq_wakeup64_one_locked( + &ipc_eventlink_base->elb_waitq, + CAST_EVENT64_T(signal_eventlink), + THREAD_AWAKENED, NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_KEEP_LOCKED, + wq_option); + } + + return kr; +} + +/* + * Name: ipc_eventlink_reference + * + * Description: Increment ref on ipc eventlink struct + * + * Args: + * eventlink: eventlink + * + * Returns: None + */ +void +ipc_eventlink_reference( + struct ipc_eventlink *ipc_eventlink) +{ + os_ref_retain(&ipc_eventlink->el_base->elb_ref_count); +} + +/* + * Name: ipc_eventlink_deallocate + * + * Description: Decrement ref on ipc eventlink struct + * + * Args: + * eventlink: eventlink + * + * Returns: None + */ +void +ipc_eventlink_deallocate( + struct ipc_eventlink *ipc_eventlink) +{ + if (ipc_eventlink == IPC_EVENTLINK_NULL) { + return; + } + + struct ipc_eventlink_base *ipc_eventlink_base = ipc_eventlink->el_base; + + if (os_ref_release(&ipc_eventlink_base->elb_ref_count) > 0) { + return; + } + + assert(!ipc_eventlink_active(ipc_eventlink)); + +#if DEVELOPMENT || DEBUG + /* Remove ipc_eventlink to global list */ + global_ipc_eventlink_lock(); + queue_remove(&ipc_eventlink_list, ipc_eventlink_base, + struct ipc_eventlink_base *, elb_global_elm); + global_ipc_eventlink_unlock(); +#endif + zfree(ipc_eventlink_zone, ipc_eventlink_base); +} + +/* + * Name: convert_port_to_eventlink + * + * Description: Convert from a port name in the current + * space to an ipc eventlink. Produces an ipc eventlink ref, + * which may be null. + * + * Args: + * mach_port_t: eventlink port + * + * Returns: + * ipc_eventlink on Success. + */ +struct ipc_eventlink * +convert_port_to_eventlink( + mach_port_t port) +{ + struct ipc_eventlink *ipc_eventlink = IPC_EVENTLINK_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + convert_port_to_eventlink_locked(port, &ipc_eventlink); + ip_unlock(port); + } + + return ipc_eventlink; +} + +/* + * Name: convert_port_to_eventlink_locked + * + * Description: Convert from a port name in the current + * space to an ipc eventlink. Produces an ipc eventlink ref, + * which may be null. + * + * Args: + * mach_port_name_t: eventlink port name + * ipc_eventlink_ptr: pointer to return ipc_eventlink. + * + * Returns: + * KERN_SUCCESS on Success. + * KERN_TERMINATED on inactive eventlink. + */ +static kern_return_t +convert_port_to_eventlink_locked( + ipc_port_t port, + struct ipc_eventlink **ipc_eventlink_ptr) +{ + kern_return_t kr = KERN_INVALID_CAPABILITY; + struct ipc_eventlink *ipc_eventlink = IPC_EVENTLINK_NULL; + + if (ip_active(port) && + ip_kotype(port) == IKOT_EVENTLINK) { + ipc_eventlink = (struct ipc_eventlink *)port->ip_kobject; + + if (ipc_eventlink) { + ipc_eventlink_reference(ipc_eventlink); + kr = KERN_SUCCESS; + } else { + kr = KERN_TERMINATED; + } + } + + *ipc_eventlink_ptr = ipc_eventlink; + return kr; +} + +/* + * Name: port_name_to_eventlink + * + * Description: Convert from a port name in the current + * space to an ipc eventlink. Produces an ipc eventlink ref, + * which may be null. + * + * Args: + * mach_port_name_t: eventlink port name + * ipc_eventlink_ptr: ptr to pass eventlink struct + * + * Returns: + * KERN_SUCCESS on Success. + */ +static kern_return_t +port_name_to_eventlink( + mach_port_name_t name, + struct ipc_eventlink **ipc_eventlink_ptr) +{ + ipc_port_t kern_port; + kern_return_t kr; + + if (!MACH_PORT_VALID(name)) { + *ipc_eventlink_ptr = IPC_EVENTLINK_NULL; + return KERN_INVALID_NAME; + } + + kr = ipc_port_translate_send(current_space(), name, &kern_port); + if (kr != KERN_SUCCESS) { + *ipc_eventlink_ptr = IPC_EVENTLINK_NULL; + return kr; + } + /* have the port locked */ + assert(IP_VALID(kern_port)); + + kr = convert_port_to_eventlink_locked(kern_port, ipc_eventlink_ptr); + ip_unlock(kern_port); + + return kr; +} + +/* + * Name: ipc_eventlink_notify + * + * Description: Destroy an ipc_eventlink, wakeup all threads. + * + * Args: + * msg: msg contaning eventlink port + * + * Returns: + * None. + */ +void +ipc_eventlink_notify( + mach_msg_header_t *msg) +{ + kern_return_t kr; + mach_no_senders_notification_t *notification = (void *)msg; + ipc_port_t port = notification->not_header.msgh_remote_port; + struct ipc_eventlink *ipc_eventlink; + + if (!ip_active(port)) { + return; + } + + /* Get ipc_eventlink reference */ + ip_lock(port); + + /* Make sure port is still active */ + if (!ip_active(port)) { + ip_unlock(port); + return; + } + + convert_port_to_eventlink_locked(port, &ipc_eventlink); + ip_unlock(port); + + kr = ipc_eventlink_destroy_internal(ipc_eventlink); + if (kr == KERN_TERMINATED) { + /* eventlink is already inactive, destroy the port */ + ipc_port_dealloc_kernel(port); + } + + /* Drop the reference returned by convert_port_to_eventlink_locked */ + ipc_eventlink_deallocate(ipc_eventlink); +} + +#define WAITQ_TO_EVENTLINK(wq) ((struct ipc_eventlink_base *) ((uintptr_t)(wq) - offsetof(struct ipc_eventlink_base, elb_waitq))) + +/* + * Name: kdp_eventlink_find_owner + * + * Description: Find who will signal the waiting thread. + * + * Args: + * waitq: eventlink waitq + * wait_event: eventlink wait event + * waitinfo: waitinfo struct + * + * Returns: + * None. + */ +void +kdp_eventlink_find_owner( + struct waitq *waitq, + event64_t event, + thread_waitinfo_t *waitinfo) +{ + assert(waitinfo->wait_type == kThreadWaitEventlink); + waitinfo->owner = 0; + waitinfo->context = 0; + + if (waitq_held(waitq)) { + return; + } + + struct ipc_eventlink_base *ipc_eventlink_base = WAITQ_TO_EVENTLINK(waitq); + + if (event == CAST_EVENT64_T(&ipc_eventlink_base->elb_eventlink[0])) { + /* Use the other end of eventlink for signal thread */ + if (ipc_eventlink_base->elb_eventlink[1].el_thread != THREAD_ASSOCIATE_WILD) { + waitinfo->owner = thread_tid(ipc_eventlink_base->elb_eventlink[1].el_thread); + } else { + waitinfo->owner = 0; + } + } else if (event == CAST_EVENT64_T(&ipc_eventlink_base->elb_eventlink[1])) { + /* Use the other end of eventlink for signal thread */ + if (ipc_eventlink_base->elb_eventlink[0].el_thread != THREAD_ASSOCIATE_WILD) { + waitinfo->owner = thread_tid(ipc_eventlink_base->elb_eventlink[0].el_thread); + } else { + waitinfo->owner = 0; + } + } + + return; +} diff --git a/osfmk/ipc/ipc_eventlink.h b/osfmk/ipc/ipc_eventlink.h new file mode 100644 index 000000000..970d52939 --- /dev/null +++ b/osfmk/ipc/ipc_eventlink.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2000-2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _IPC_IPC_EVENTLINK_H_ +#define _IPC_IPC_EVENTLINK_H_ + +#ifdef MACH_KERNEL_PRIVATE + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +__options_decl(ipc_eventlink_option_t, uint64_t, { + IPC_EVENTLINK_NONE = 0, + IPC_EVENTLINK_NO_WAIT = 0x1, + IPC_EVENTLINK_HANDOFF = 0x2, + IPC_EVENTLINK_FORCE_WAKEUP = 0x4, +}); + +__options_decl(ipc_eventlink_type_t, uint8_t, { + IPC_EVENTLINK_TYPE_NO_COPYIN = 0x1, + IPC_EVENTLINK_TYPE_WITH_COPYIN = 0x2, +}); + +#define THREAD_ASSOCIATE_WILD ((struct thread *) -1) + +struct ipc_eventlink_base; + +struct ipc_eventlink { + ipc_port_t el_port; /* Port for eventlink object */ + thread_t el_thread; /* Thread associated with eventlink object */ + struct ipc_eventlink_base *el_base; /* eventlink base struct */ + uint64_t el_sync_counter; /* Sync counter for wait/ signal */ + uint64_t el_wait_counter; /* Counter passed in eventlink wait */ +}; + +struct ipc_eventlink_base { + struct ipc_eventlink elb_eventlink[2]; /* Eventlink pair */ + struct waitq elb_waitq; /* waitq */ + os_refcnt_t elb_ref_count; /* ref count for eventlink */ + uint32_t elb_active:1, + elb_type:8; +#if DEVELOPMENT || DEBUG + queue_chain_t elb_global_elm; /* Global list of eventlinks */ +#endif +}; + +#define IPC_EVENTLINK_BASE_NULL ((struct ipc_eventlink_base *)NULL) +#define ipc_eventlink_active(eventlink) ((eventlink)->el_base->elb_active == TRUE) + +#define eventlink_remote_side(eventlink) ((eventlink) == &((eventlink)->el_base->elb_eventlink[0]) ? \ + &((eventlink)->el_base->elb_eventlink[1]) : &((eventlink)->el_base->elb_eventlink[0])) + +#define ipc_eventlink_lock(eventlink) waitq_lock(&(eventlink)->el_base->elb_waitq) +#define ipc_eventlink_unlock(eventlink) waitq_unlock(&(eventlink)->el_base->elb_waitq) + +void ipc_eventlink_init(void); + +/* Function declarations */ +void +ipc_eventlink_init(void); + +struct ipc_eventlink * +convert_port_to_eventlink( + mach_port_t port); + +void +ipc_eventlink_reference( + struct ipc_eventlink *ipc_eventlink); + +void +ipc_eventlink_deallocate( + struct ipc_eventlink *ipc_eventlink); + +void +ipc_eventlink_notify( + mach_msg_header_t *msg); + +uint64_t + mach_eventlink_signal_trap( + mach_port_name_t port, + uint64_t signal_count __unused); + +uint64_t +mach_eventlink_wait_until_trap( + mach_port_name_t eventlink_port, + uint64_t wait_count, + mach_eventlink_signal_wait_option_t option, + kern_clock_id_t clock_id, + uint64_t deadline); + +uint64_t + mach_eventlink_signal_wait_until_trap( + mach_port_name_t eventlink_port, + uint64_t wait_count, + uint64_t signal_count __unused, + mach_eventlink_signal_wait_option_t option, + kern_clock_id_t clock_id, + uint64_t deadline); + +#endif /* MACH_KERNEL_PRIVATE */ +#endif /* _IPC_IPC_EVENTLINK_H_ */ diff --git a/osfmk/ipc/ipc_hash.c b/osfmk/ipc/ipc_hash.c index 0721448e1..efe06fd22 100644 --- a/osfmk/ipc/ipc_hash.c +++ b/osfmk/ipc/ipc_hash.c @@ -65,7 +65,6 @@ #include #include -#include #include #include #include diff --git a/osfmk/ipc/ipc_importance.c b/osfmk/ipc/ipc_importance.c index 2b7391cd6..47440d332 100644 --- a/osfmk/ipc/ipc_importance.c +++ b/osfmk/ipc/ipc_importance.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Apple Inc. All rights reserved. + * Copyright (c) 2013-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -36,7 +36,6 @@ #include #include #include -#include #include #include #include @@ -70,14 +69,8 @@ static boolean_t ipc_importance_delayed_drop_call_requested = FALSE; /* * Importance Voucher Attribute Manager */ +static LCK_SPIN_DECLARE_ATTR(ipc_importance_lock_data, &ipc_lck_grp, &ipc_lck_attr); -static lck_spin_t ipc_importance_lock_data; /* single lock for now */ - - -#define ipc_importance_lock_init() \ - lck_spin_init(&ipc_importance_lock_data, &ipc_lck_grp, &ipc_lck_attr) -#define ipc_importance_lock_destroy() \ - lck_spin_destroy(&ipc_importance_lock_data, &ipc_lck_grp) #define ipc_importance_lock() \ lck_spin_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp) #define ipc_importance_lock_try() \ @@ -151,13 +144,14 @@ ipc_importance_counter_init(ipc_importance_elem_t elem) #endif #if DEVELOPMENT || DEBUG -static queue_head_t global_iit_alloc_queue; +static queue_head_t global_iit_alloc_queue = + QUEUE_HEAD_INITIALIZER(global_iit_alloc_queue); #endif -/* TODO: remove this varibale when interactive daemon audit is complete */ -boolean_t ipc_importance_interactive_receiver = FALSE; - -static zone_t ipc_importance_task_zone; +static ZONE_DECLARE(ipc_importance_task_zone, "ipc task importance", + sizeof(struct ipc_importance_task), ZC_NOENCRYPT); +static ZONE_DECLARE(ipc_importance_inherit_zone, "ipc importance inherit", + sizeof(struct ipc_importance_inherit), ZC_NOENCRYPT); static zone_t ipc_importance_inherit_zone; static ipc_voucher_attr_control_t ipc_importance_control; @@ -1945,7 +1939,7 @@ retry: first_pass = FALSE; /* Need to make one - may race with others (be prepared to drop) */ - task_elem = (ipc_importance_task_t)zalloc(ipc_importance_task_zone); + task_elem = zalloc_flags(ipc_importance_task_zone, Z_WAITOK | Z_ZERO); if (IIT_NULL == task_elem) { goto retry; } @@ -1953,21 +1947,6 @@ retry: task_elem->iit_bits = IIE_TYPE_TASK | 2; /* one for task, one for return/made */ task_elem->iit_made = (made) ? 1 : 0; task_elem->iit_task = task; /* take actual ref when we're sure */ - task_elem->iit_updateq = NULL; - task_elem->iit_receiver = 0; - task_elem->iit_denap = 0; - task_elem->iit_donor = 0; - task_elem->iit_live_donor = 0; - task_elem->iit_updatepolicy = 0; - task_elem->iit_reserved = 0; - task_elem->iit_filelocks = 0; - task_elem->iit_updatetime = 0; - task_elem->iit_transitions = 0; - task_elem->iit_assertcnt = 0; - task_elem->iit_externcnt = 0; - task_elem->iit_externdrop = 0; - task_elem->iit_legacy_externcnt = 0; - task_elem->iit_legacy_externdrop = 0; #if IIE_REF_DEBUG ipc_importance_counter_init(&task_elem->iit_elem); #endif @@ -3831,34 +3810,8 @@ ipc_importance_manager_release( void ipc_importance_init(void) { - natural_t ipc_importance_max = (task_max + thread_max) * 2; - char temp_buf[26]; kern_return_t kr; - if (PE_parse_boot_argn("imp_interactive_receiver", temp_buf, sizeof(temp_buf))) { - ipc_importance_interactive_receiver = TRUE; - } - - ipc_importance_task_zone = zinit(sizeof(struct ipc_importance_task), - ipc_importance_max * sizeof(struct ipc_importance_task), - sizeof(struct ipc_importance_task), - "ipc task importance"); - zone_change(ipc_importance_task_zone, Z_NOENCRYPT, TRUE); - - ipc_importance_inherit_zone = zinit(sizeof(struct ipc_importance_inherit), - ipc_importance_max * sizeof(struct ipc_importance_inherit), - sizeof(struct ipc_importance_inherit), - "ipc importance inherit"); - zone_change(ipc_importance_inherit_zone, Z_NOENCRYPT, TRUE); - - -#if DEVELOPMENT || DEBUG - queue_init(&global_iit_alloc_queue); -#endif - - /* initialize global locking */ - ipc_importance_lock_init(); - kr = ipc_register_well_known_mach_voucher_attr_manager(&ipc_importance_manager, (mach_voucher_attr_value_handle_t)0, MACH_VOUCHER_ATTR_KEY_IMPORTANCE, diff --git a/osfmk/ipc/ipc_init.c b/osfmk/ipc/ipc_init.c index a03871cb8..b6ff1fc9e 100644 --- a/osfmk/ipc/ipc_init.c +++ b/osfmk/ipc/ipc_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -106,177 +106,82 @@ #include #include #include +#include #include /* NDR_record */ -vm_map_t ipc_kernel_map; -vm_size_t ipc_kernel_map_size = 1024 * 1024; +#define IPC_KERNEL_MAP_SIZE (1024 * 1024) +SECURITY_READ_ONLY_LATE(vm_map_t) ipc_kernel_map; /* values to limit physical copy out-of-line memory descriptors */ -vm_map_t ipc_kernel_copy_map; +SECURITY_READ_ONLY_LATE(vm_map_t) ipc_kernel_copy_map; #define IPC_KERNEL_COPY_MAP_SIZE (8 * 1024 * 1024) -vm_size_t ipc_kernel_copy_map_size = IPC_KERNEL_COPY_MAP_SIZE; -vm_size_t ipc_kmsg_max_vm_space = ((IPC_KERNEL_COPY_MAP_SIZE * 7) / 8); +const vm_size_t ipc_kmsg_max_vm_space = ((IPC_KERNEL_COPY_MAP_SIZE * 7) / 8); /* * values to limit inline message body handling * avoid copyin/out limits - even after accounting for maximum descriptor expansion. */ #define IPC_KMSG_MAX_SPACE (64 * 1024 * 1024) /* keep in sync with COPYSIZELIMIT_PANIC */ -vm_size_t ipc_kmsg_max_body_space = ((IPC_KMSG_MAX_SPACE * 3) / 4 - MAX_TRAILER_SIZE); +const vm_size_t ipc_kmsg_max_body_space = ((IPC_KMSG_MAX_SPACE * 3) / 4 - MAX_TRAILER_SIZE); -int ipc_space_max; -int ipc_port_max; -int ipc_pset_max; -int prioritize_launch = 1; -int enforce_strict_reply = 0; +LCK_GRP_DECLARE(ipc_lck_grp, "ipc"); +LCK_ATTR_DECLARE(ipc_lck_attr, 0, 0); - -lck_grp_t ipc_lck_grp; -lck_attr_t ipc_lck_attr; - -static lck_grp_attr_t ipc_lck_grp_attr; +/* + * XXX tunable, belongs in mach.message.h + */ +#define MSG_OOL_SIZE_SMALL_MAX (2*PAGE_SIZE) +SECURITY_READ_ONLY_LATE(vm_size_t) msg_ool_size_small; /* - * Routine: ipc_bootstrap + * Routine: ipc_init * Purpose: - * Initialization needed before the kernel task - * can be created. + * Final initialization */ - -void -ipc_bootstrap(void) +__startup_func +static void +ipc_init(void) { kern_return_t kr; - int prioritize_launch_bootarg; - int strict_reply_bootarg; - - lck_grp_attr_setdefault(&ipc_lck_grp_attr); - lck_grp_init(&ipc_lck_grp, "ipc", &ipc_lck_grp_attr); - lck_attr_setdefault(&ipc_lck_attr); - - ipc_port_multiple_lock_init(); - - ipc_port_timestamp_data = 0; - - /* all IPC zones should be exhaustible */ - - ipc_space_zone = zinit(sizeof(struct ipc_space), - ipc_space_max * sizeof(struct ipc_space), - sizeof(struct ipc_space), - "ipc spaces"); - zone_change(ipc_space_zone, Z_NOENCRYPT, TRUE); - - /* - * populate all port(set) zones - */ - ipc_object_zones[IOT_PORT] = - zinit(sizeof(struct ipc_port), - ipc_port_max * sizeof(struct ipc_port), - sizeof(struct ipc_port), - "ipc ports"); - /* cant charge callers for port allocations (references passed) */ - zone_change(ipc_object_zones[IOT_PORT], Z_CALLERACCT, FALSE); - zone_change(ipc_object_zones[IOT_PORT], Z_NOENCRYPT, TRUE); - zone_change(ipc_object_zones[IOT_PORT], Z_CLEARMEMORY, TRUE); - - ipc_object_zones[IOT_PORT_SET] = - zinit(sizeof(struct ipc_pset), - ipc_pset_max * sizeof(struct ipc_pset), - sizeof(struct ipc_pset), - "ipc port sets"); - zone_change(ipc_object_zones[IOT_PORT_SET], Z_NOENCRYPT, TRUE); - zone_change(ipc_object_zones[IOT_PORT_SET], Z_CLEARMEMORY, TRUE); - - /* - * Create the basic ipc_kmsg_t zone (the one we also cache) - * elements at the processor-level to avoid the locking. - */ - ipc_kmsg_zone = zinit(IKM_SAVED_KMSG_SIZE, - ipc_port_max * MACH_PORT_QLIMIT_DEFAULT * - IKM_SAVED_KMSG_SIZE, - IKM_SAVED_KMSG_SIZE, - "ipc kmsgs"); - zone_change(ipc_kmsg_zone, Z_CALLERACCT, FALSE); - zone_change(ipc_kmsg_zone, Z_CACHING_ENABLED, TRUE); + vm_offset_t min; /* create special spaces */ kr = ipc_space_create_special(&ipc_space_kernel); assert(kr == KERN_SUCCESS); - kr = ipc_space_create_special(&ipc_space_reply); assert(kr == KERN_SUCCESS); /* initialize modules with hidden data structures */ -#if MACH_ASSERT - ipc_port_debug_init(); -#endif - ipc_kobject_init(); - ipc_table_init(); - ipc_voucher_init(); - #if IMPORTANCE_INHERITANCE ipc_importance_init(); #endif - - semaphore_init(); - mk_timer_init(); - host_notify_init(); - #if CONFIG_ARCADE arcade_init(); #endif - suid_cred_init(); - - if (PE_parse_boot_argn("prioritize_launch", &prioritize_launch_bootarg, sizeof(prioritize_launch_bootarg))) { - prioritize_launch = !!prioritize_launch_bootarg; - } - if (PE_parse_boot_argn("ipc_strict_reply", &strict_reply_bootarg, sizeof(strict_reply_bootarg))) { - enforce_strict_reply = !!strict_reply_bootarg; - } -} - -/* - * XXX tunable, belongs in mach.message.h - */ -#define MSG_OOL_SIZE_SMALL_MAX (2*PAGE_SIZE) -vm_size_t msg_ool_size_small; - -/* - * Routine: ipc_init - * Purpose: - * Final initialization of the IPC system. - */ - -void -ipc_init(void) -{ - kern_return_t retval; - vm_offset_t min; - - retval = kmem_suballoc(kernel_map, &min, ipc_kernel_map_size, + kr = kmem_suballoc(kernel_map, &min, IPC_KERNEL_MAP_SIZE, TRUE, (VM_FLAGS_ANYWHERE), VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_IPC, &ipc_kernel_map); - if (retval != KERN_SUCCESS) { + if (kr != KERN_SUCCESS) { panic("ipc_init: kmem_suballoc of ipc_kernel_map failed"); } - retval = kmem_suballoc(kernel_map, &min, ipc_kernel_copy_map_size, + kr = kmem_suballoc(kernel_map, &min, IPC_KERNEL_COPY_MAP_SIZE, TRUE, (VM_FLAGS_ANYWHERE), VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_IPC, &ipc_kernel_copy_map); - if (retval != KERN_SUCCESS) { + if (kr != KERN_SUCCESS) { panic("ipc_init: kmem_suballoc of ipc_kernel_copy_map failed"); } @@ -294,12 +199,11 @@ ipc_init(void) } else { msg_ool_size_small = MSG_OOL_SIZE_SMALL_MAX; } - /* account for overhead to avoid spilling over a page */ - msg_ool_size_small -= cpy_kdata_hdr_sz; ipc_host_init(); ux_handler_init(); } +STARTUP(MACH_IPC, STARTUP_RANK_LAST, ipc_init); /* diff --git a/osfmk/ipc/ipc_init.h b/osfmk/ipc/ipc_init.h index d41f4f03e..91b95d73c 100644 --- a/osfmk/ipc/ipc_init.h +++ b/osfmk/ipc/ipc_init.h @@ -112,20 +112,10 @@ #ifndef _IPC_IPC_INIT_H_ #define _IPC_IPC_INIT_H_ -extern int ipc_space_max; -extern int ipc_port_max; -extern int ipc_pset_max; - /* * Exported interfaces */ -/* IPC initialization needed before creation of kernel task */ -extern void ipc_bootstrap(void); - -/* Remaining IPC initialization (not thread based) */ -extern void ipc_init(void); - /* IPC initialization dependent on thread call support */ extern void ipc_thread_call_init(void); diff --git a/osfmk/ipc/ipc_kmsg.c b/osfmk/ipc/ipc_kmsg.c index 95ff1fb29..e128f7138 100644 --- a/osfmk/ipc/ipc_kmsg.c +++ b/osfmk/ipc/ipc_kmsg.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -93,6 +93,7 @@ #include #include #include +#include #include @@ -139,6 +140,13 @@ #include #include +#include + +#include +#if __has_feature(ptrauth_calls) +#include +#endif + #pragma pack(4) typedef struct{ @@ -178,6 +186,310 @@ typedef union{ // END LP64 fixes +#if __has_feature(ptrauth_calls) +typedef uintptr_t ikm_sig_scratch_t; + +static void +ikm_init_sig( + __unused ipc_kmsg_t kmsg, + ikm_sig_scratch_t *scratchp) +{ + *scratchp = OS_PTRAUTH_DISCRIMINATOR("kmsg.ikm_signature"); +} + +static void +ikm_chunk_sig( + ipc_kmsg_t kmsg, + void *data, + size_t len, + ikm_sig_scratch_t *scratchp) +{ + int ptrauth_flags; + void *trailerp; + + /* + * if we happen to be doing the trailer chunk, + * diversify with the ptrauth-ed trailer pointer - + * as that is unchanging for the kmsg + */ + trailerp = (void *) + ((vm_offset_t)kmsg->ikm_header + + mach_round_msg(kmsg->ikm_header->msgh_size)); + + ptrauth_flags = (data == trailerp) ? PTRAUTH_ADDR_DIVERSIFY : 0; + *scratchp = ptrauth_utils_sign_blob_generic(data, len, *scratchp, ptrauth_flags); +} + +static uintptr_t +ikm_finalize_sig( + __unused ipc_kmsg_t kmsg, + ikm_sig_scratch_t *scratchp) +{ + return *scratchp; +} + +#elif defined(CRYPTO_SHA2) && !defined(__x86_64__) + +typedef SHA256_CTX ikm_sig_scratch_t; + +static void +ikm_init_sig( + __unused ipc_kmsg_t kmsg, + ikm_sig_scratch_t *scratchp) +{ + SHA256_Init(scratchp); + SHA256_Update(scratchp, &vm_kernel_addrhash_salt_ext, sizeof(uint64_t)); +} + +static void +ikm_chunk_sig( + __unused ipc_kmsg_t kmsg, + void *data, + size_t len, + ikm_sig_scratch_t *scratchp) +{ + SHA256_Update(scratchp, data, len); +} + +static uintptr_t +ikm_finalize_sig( + __unused ipc_kmsg_t kmsg, + ikm_sig_scratch_t *scratchp) +{ + uintptr_t sha_digest[SHA256_DIGEST_LENGTH / sizeof(uintptr_t)]; + + SHA256_Final((uint8_t *)sha_digest, scratchp); + + /* + * Only use one uintptr_t sized part of result for space and compat reasons. + * Truncation is better than XOR'ing the chunks together in hopes of higher + * entropy - because of its lower risk of collisions. + */ + return *sha_digest; +} + +#else +/* Stubbed out implementation (for __x86_64__ for now) */ + +typedef uintptr_t ikm_sig_scratch_t; + +static void +ikm_init_sig( + __unused ipc_kmsg_t kmsg, + ikm_sig_scratch_t *scratchp) +{ + *scratchp = 0; +} + +static void +ikm_chunk_sig( + __unused ipc_kmsg_t kmsg, + __unused void *data, + __unused size_t len, + __unused ikm_sig_scratch_t *scratchp) +{ + return; +} + +static uintptr_t +ikm_finalize_sig( + __unused ipc_kmsg_t kmsg, + ikm_sig_scratch_t *scratchp) +{ + return *scratchp; +} + +#endif + +static void +ikm_header_sig( + ipc_kmsg_t kmsg, + ikm_sig_scratch_t *scratchp) +{ + mach_msg_size_t dsc_count; + mach_msg_base_t base; + boolean_t complex; + + /* take a snapshot of the message header/body-count */ + base.header = *kmsg->ikm_header; + complex = ((base.header.msgh_bits & MACH_MSGH_BITS_COMPLEX) != 0); + if (complex) { + dsc_count = ((mach_msg_body_t *)(kmsg->ikm_header + 1))->msgh_descriptor_count; + } else { + dsc_count = 0; + } + base.body.msgh_descriptor_count = dsc_count; + + /* compute sig of a copy of the header with all varying bits masked off */ + base.header.msgh_bits &= MACH_MSGH_BITS_USER; + base.header.msgh_bits &= ~MACH_MSGH_BITS_VOUCHER_MASK; + ikm_chunk_sig(kmsg, &base, sizeof(mach_msg_base_t), scratchp); +} + +static void +ikm_trailer_sig( + ipc_kmsg_t kmsg, + ikm_sig_scratch_t *scratchp) +{ + mach_msg_max_trailer_t *trailerp; + + /* Add sig of the trailer contents */ + trailerp = (mach_msg_max_trailer_t *) + ((vm_offset_t)kmsg->ikm_header + + mach_round_msg(kmsg->ikm_header->msgh_size)); + ikm_chunk_sig(kmsg, trailerp, sizeof(*trailerp), scratchp); +} + +/* Compute the signature for the body bits of a message */ +static void +ikm_body_sig( + ipc_kmsg_t kmsg, + ikm_sig_scratch_t *scratchp) +{ + mach_msg_descriptor_t *kern_dsc; + mach_msg_size_t dsc_count; + mach_msg_body_t *body; + mach_msg_size_t i; + + if ((kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0) { + return; + } + body = (mach_msg_body_t *) (kmsg->ikm_header + 1); + dsc_count = body->msgh_descriptor_count; + + if (dsc_count == 0) { + return; + } + + kern_dsc = (mach_msg_descriptor_t *) (body + 1); + + /* Compute the signature for the whole descriptor array */ + ikm_chunk_sig(kmsg, kern_dsc, sizeof(*kern_dsc) * dsc_count, scratchp); + + /* look for descriptor contents that need a signature */ + for (i = 0; i < dsc_count; i++) { + switch (kern_dsc[i].type.type) { + case MACH_MSG_PORT_DESCRIPTOR: + case MACH_MSG_GUARDED_PORT_DESCRIPTOR: + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR: + break; + + case MACH_MSG_OOL_PORTS_DESCRIPTOR: { + mach_msg_ool_ports_descriptor_t *ports_dsc; + + /* Compute sig for the port/object pointers */ + ports_dsc = (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i]; + ikm_chunk_sig(kmsg, ports_dsc->address, ports_dsc->count * sizeof(ipc_object_t), scratchp); + break; + } + default: { + panic("ipc_kmsg_body_sig: invalid message descriptor"); + } + } + } +} + +static void +ikm_sign( + ipc_kmsg_t kmsg) +{ + ikm_sig_scratch_t scratch; + uintptr_t sig; + + zone_require(ipc_kmsg_zone, kmsg); + + ikm_init_sig(kmsg, &scratch); + + ikm_header_sig(kmsg, &scratch); +#if IKM_PARTIAL_SIG + /* save off partial signature for just header */ + sig = ikm_finalize_sig(kmsg, &scratch); + kmsg->ikm_header_sig = sig; +#endif + + ikm_trailer_sig(kmsg, &scratch); +#if IKM_PARTIAL_SIG + /* save off partial signature for header+trailer */ + sig = ikm_finalize_sig(kmsg, &scratch); + kmsg->ikm_headtrail_sig = sig; +#endif + + ikm_body_sig(kmsg, &scratch); + sig = ikm_finalize_sig(kmsg, &scratch); + kmsg->ikm_signature = sig; +} + +unsigned int ikm_signature_failures; +unsigned int ikm_signature_failure_id; +#if (DEVELOPMENT || DEBUG) +unsigned int ikm_signature_panic_disable; +unsigned int ikm_signature_header_failures; +unsigned int ikm_signature_trailer_failures; +#endif + +static void +ikm_validate_sig( + ipc_kmsg_t kmsg) +{ + ikm_sig_scratch_t scratch; + uintptr_t expected; + uintptr_t sig; + char *str; + + zone_require(ipc_kmsg_zone, kmsg); + + ikm_init_sig(kmsg, &scratch); + + ikm_header_sig(kmsg, &scratch); +#if IKM_PARTIAL_SIG + /* Do partial evaluation of just the header signature */ + sig = ikm_finalize_sig(kmsg, &scratch); + expected = kmsg->ikm_header_sig; + if (sig != expected) { + ikm_signature_header_failures++; + str = "header"; + goto failure; + } +#endif + + ikm_trailer_sig(kmsg, &scratch); +#if IKM_PARTIAL_SIG + /* Do partial evaluation of header+trailer signature */ + sig = ikm_finalize_sig(kmsg, &scratch); + expected = kmsg->ikm_headtrail_sig; + if (sig != expected) { + ikm_signature_trailer_failures++; + str = "trailer"; + goto failure; + } +#endif + + ikm_body_sig(kmsg, &scratch); + sig = ikm_finalize_sig(kmsg, &scratch); + + expected = kmsg->ikm_signature; + if (sig != expected) { + ikm_signature_failures++; + str = "full"; + +#if IKM_PARTIAL_SIG +failure: +#endif + { + mach_msg_id_t id = kmsg->ikm_header->msgh_id; + + ikm_signature_failure_id = id; +#if (DEVELOPMENT || DEBUG) + if (ikm_signature_panic_disable) { + return; + } +#endif + panic("ikm_validate_sig: %s signature mismatch: kmsg=0x%p, id=%d, sig=0x%zx (expected 0x%zx)", + str, kmsg, id, sig, expected); + } + } +} #if DEBUG_MSGS_K64 extern void ipc_pset_print64( @@ -509,8 +821,8 @@ ipc_msg_print_untyped64( extern vm_map_t ipc_kernel_copy_map; extern vm_size_t ipc_kmsg_max_space; -extern vm_size_t ipc_kmsg_max_vm_space; -extern vm_size_t ipc_kmsg_max_body_space; +extern const vm_size_t ipc_kmsg_max_vm_space; +extern const vm_size_t ipc_kmsg_max_body_space; extern vm_size_t msg_ool_size_small; #define MSG_OOL_SIZE_SMALL msg_ool_size_small @@ -822,6 +1134,7 @@ ipc_kmsg_trace_send(ipc_kmsg_t kmsg, if (is_task_64bit) { msg_size -= 16; } + break; default: break; } @@ -862,7 +1175,8 @@ ipc_kmsg_trace_send(ipc_kmsg_t kmsg, #endif /* zone for cached ipc_kmsg_t structures */ -zone_t ipc_kmsg_zone; +ZONE_DECLARE(ipc_kmsg_zone, "ipc kmsgs", IKM_SAVED_KMSG_SIZE, ZC_CACHING); +static TUNABLE(bool, enforce_strict_reply, "ipc_strict_reply", false); /* * Forward declarations @@ -890,8 +1204,6 @@ mach_msg_return_t ipc_kmsg_copyin_body( mach_msg_option_t *optionp); -extern int enforce_strict_reply; - static void ipc_kmsg_link_reply_context_locked( ipc_port_t reply_port, @@ -922,6 +1234,32 @@ ipc_kmsg_validate_reply_context_locked( * require locking. */ +/* + * Routine: ikm_set_header + * Purpose: + * Set the header (and data) pointers for a message. If the + * message is small, the data pointer is NULL and all the + * data resides within the fixed + * the cache, that is best. Otherwise, allocate a new one. + * Conditions: + * Nothing locked. + */ +static void +ikm_set_header( + ipc_kmsg_t kmsg, + void *data, + mach_msg_size_t mtsize) +{ + if (data) { + kmsg->ikm_data = data; + kmsg->ikm_header = (mach_msg_header_t *)(data + kmsg->ikm_size - mtsize); + } else { + assert(kmsg->ikm_size == IKM_SAVED_MSG_SIZE); + kmsg->ikm_header = (mach_msg_header_t *) + ((vm_offset_t)(kmsg + 1) + kmsg->ikm_size - mtsize); + } +} + /* * Routine: ipc_kmsg_alloc * Purpose: @@ -936,6 +1274,7 @@ ipc_kmsg_alloc( { mach_msg_size_t max_expanded_size; ipc_kmsg_t kmsg; + void *data; /* * LP64support - @@ -972,18 +1311,18 @@ ipc_kmsg_alloc( max_expanded_size = msg_and_trailer_size; } + kmsg = (ipc_kmsg_t)zalloc(ipc_kmsg_zone); + if (max_expanded_size < IKM_SAVED_MSG_SIZE) { max_expanded_size = IKM_SAVED_MSG_SIZE; /* round up for ikm_cache */ - } - if (max_expanded_size == IKM_SAVED_MSG_SIZE) { - kmsg = (ipc_kmsg_t)zalloc(ipc_kmsg_zone); - } else { - kmsg = (ipc_kmsg_t)kalloc(ikm_plus_overhead(max_expanded_size)); + data = NULL; + } else if (max_expanded_size > IKM_SAVED_MSG_SIZE) { + data = kheap_alloc(KHEAP_DATA_BUFFERS, max_expanded_size, Z_WAITOK); } if (kmsg != IKM_NULL) { ikm_init(kmsg, max_expanded_size); - ikm_set_header(kmsg, msg_and_trailer_size); + ikm_set_header(kmsg, data, msg_and_trailer_size); } return kmsg; @@ -1019,25 +1358,33 @@ ipc_kmsg_free( * mark it not in use. If the port isn't already dead, then * leave the message associated with it. Otherwise, free it. */ - port = ikm_prealloc_inuse_port(kmsg); - if (port != IP_NULL) { - ip_lock(port); - ikm_prealloc_clear_inuse(kmsg, port); - if (ip_active(port) && (port->ip_premsg == kmsg)) { - assert(IP_PREALLOC(port)); + if (size == IKM_SAVED_MSG_SIZE) { + if ((void *)kmsg->ikm_header < (void *)(kmsg + 1) || + (void *)kmsg->ikm_header >= (void *)(kmsg + 1) + IKM_SAVED_MSG_SIZE) { + panic("ipc_kmsg_free"); + } + port = ikm_prealloc_inuse_port(kmsg); + if (port != IP_NULL) { + ip_lock(port); + ikm_prealloc_clear_inuse(kmsg, port); + if (ip_active(port) && (port->ip_premsg == kmsg)) { + assert(IP_PREALLOC(port)); + ip_unlock(port); + ip_release(port); + return; + } ip_unlock(port); - ip_release(port); - return; + ip_release(port); /* May be last reference */ } - ip_unlock(port); - ip_release(port); /* May be last reference */ - } - - if (kmsg->ikm_size == IKM_SAVED_MSG_SIZE) { - zfree(ipc_kmsg_zone, kmsg); - return; + } else { + void *data = kmsg->ikm_data; + if ((void *)kmsg->ikm_header < data || + (void *)kmsg->ikm_header >= data + size) { + panic("ipc_kmsg_free"); + } + kheap_free(KHEAP_DATA_BUFFERS, data, size); } - kfree(kmsg, ikm_plus_overhead(size)); + zfree(ipc_kmsg_zone, kmsg); } @@ -1087,7 +1434,7 @@ ipc_kmsg_enqueue_qos( { ipc_kmsg_t first = queue->ikmq_base; ipc_kmsg_t prev; - mach_msg_priority_t override; + mach_msg_qos_t qos_ovr; if (first == IKM_NULL) { /* insert a first message */ @@ -1105,10 +1452,10 @@ ipc_kmsg_enqueue_qos( prev->ikm_next = kmsg; /* apply QoS overrides towards the head */ - override = kmsg->ikm_qos_override; + qos_ovr = kmsg->ikm_qos_override; while (prev != kmsg && - override > prev->ikm_qos_override) { - prev->ikm_qos_override = override; + qos_ovr > prev->ikm_qos_override) { + prev->ikm_qos_override = qos_ovr; prev = prev->ikm_prev; } @@ -1131,16 +1478,16 @@ ipc_kmsg_enqueue_qos( boolean_t ipc_kmsg_override_qos( - ipc_kmsg_queue_t queue, + ipc_kmsg_queue_t queue, ipc_kmsg_t kmsg, - mach_msg_priority_t override) + mach_msg_qos_t qos_ovr) { ipc_kmsg_t first = queue->ikmq_base; ipc_kmsg_t cur = kmsg; /* apply QoS overrides towards the head */ - while (override > cur->ikm_qos_override) { - cur->ikm_qos_override = override; + while (qos_ovr > cur->ikm_qos_override) { + cur->ikm_qos_override = qos_ovr; if (cur == first) { return TRUE; } @@ -1562,8 +1909,8 @@ ipc_kmsg_t ipc_kmsg_prealloc(mach_msg_size_t size) { #if defined(__LP64__) - if (size > MACH_MSG_SIZE_MAX - LEGACY_HEADER_SIZE_DELTA) { - return IKM_NULL; + if (size > IKM_SAVED_MSG_SIZE - LEGACY_HEADER_SIZE_DELTA) { + panic("ipc_kmsg_prealloc"); } size += LEGACY_HEADER_SIZE_DELTA; @@ -1693,6 +2040,7 @@ ipc_kmsg_get( * the cases where no implicit data is requested. */ trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)kmsg->ikm_header + size); + bzero(trailer, sizeof(*trailer)); trailer->msgh_sender = current_thread()->task->sec_token; trailer->msgh_audit = current_thread()->task->audit_token; trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; @@ -1776,7 +2124,7 @@ ipc_kmsg_get_from_kernel( return MACH_SEND_TOO_LARGE; } ikm_prealloc_set_inuse(kmsg, dest_port); - ikm_set_header(kmsg, msg_and_trailer_size); + ikm_set_header(kmsg, NULL, msg_and_trailer_size); ip_unlock(dest_port); } else { kmsg = ipc_kmsg_alloc(msg_and_trailer_size); @@ -1799,6 +2147,7 @@ ipc_kmsg_get_from_kernel( */ trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)kmsg->ikm_header + size); + bzero(trailer, sizeof(*trailer)); trailer->msgh_sender = KERNEL_SECURITY_TOKEN; trailer->msgh_audit = KERNEL_AUDIT_TOKEN; trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; @@ -1939,6 +2288,9 @@ retry: return MACH_MSG_SUCCESS; } + /* sign the reply message */ + ikm_sign(kmsg); + /* restart the KMSG_INFO tracing for the reply message */ KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START); port = kmsg->ikm_header->msgh_remote_port; @@ -2185,7 +2537,7 @@ ipc_kmsg_put_to_kernel( ipc_kmsg_free(kmsg); } -static mach_msg_priority_t +static pthread_priority_compact_t ipc_get_current_thread_priority(void) { thread_t thread = current_thread(); @@ -2197,33 +2549,43 @@ ipc_get_current_thread_priority(void) qos = thread_user_promotion_qos_for_pri(thread->base_pri); relpri = 0; } - return (mach_msg_priority_t)_pthread_priority_make_from_thread_qos(qos, relpri, 0); + return _pthread_priority_make_from_thread_qos(qos, relpri, 0); } static kern_return_t ipc_kmsg_set_qos( ipc_kmsg_t kmsg, mach_msg_option_t options, - mach_msg_priority_t override) + mach_msg_priority_t priority) { kern_return_t kr; ipc_port_t special_reply_port = kmsg->ikm_header->msgh_local_port; ipc_port_t dest_port = kmsg->ikm_header->msgh_remote_port; - kr = ipc_get_pthpriority_from_kmsg_voucher(kmsg, &kmsg->ikm_qos); - if (kr != KERN_SUCCESS) { - if (options & MACH_SEND_PROPAGATE_QOS) { - kmsg->ikm_qos = ipc_get_current_thread_priority(); - } else { - kmsg->ikm_qos = MACH_MSG_PRIORITY_UNSPECIFIED; + if ((options & MACH_SEND_OVERRIDE) && + !mach_msg_priority_is_pthread_priority(priority)) { + mach_msg_qos_t qos = mach_msg_priority_qos(priority); + int relpri = mach_msg_priority_relpri(priority); + mach_msg_qos_t ovr = mach_msg_priority_overide_qos(priority); + + kmsg->ikm_ppriority = _pthread_priority_make_from_thread_qos(qos, relpri, 0); + kmsg->ikm_qos_override = MAX(qos, ovr); + } else { + kr = ipc_get_pthpriority_from_kmsg_voucher(kmsg, &kmsg->ikm_ppriority); + if (kr != KERN_SUCCESS) { + if (options & MACH_SEND_PROPAGATE_QOS) { + kmsg->ikm_ppriority = ipc_get_current_thread_priority(); + } else { + kmsg->ikm_ppriority = MACH_MSG_PRIORITY_UNSPECIFIED; + } } - } - kmsg->ikm_qos_override = kmsg->ikm_qos; - if (options & MACH_SEND_OVERRIDE) { - pthread_priority_t pp = _pthread_priority_normalize_for_ipc(override); - if (pp > kmsg->ikm_qos) { - kmsg->ikm_qos_override = (mach_msg_priority_t)pp; + if (options & MACH_SEND_OVERRIDE) { + mach_msg_qos_t qos = _pthread_priority_thread_qos(kmsg->ikm_ppriority); + mach_msg_qos_t ovr = _pthread_priority_thread_qos(priority); + kmsg->ikm_qos_override = MAX(qos, ovr); + } else { + kmsg->ikm_qos_override = _pthread_priority_thread_qos(kmsg->ikm_ppriority); } } @@ -2457,7 +2819,7 @@ mach_msg_return_t ipc_kmsg_copyin_header( ipc_kmsg_t kmsg, ipc_space_t space, - mach_msg_priority_t override, + mach_msg_priority_t priority, mach_msg_option_t *optionp) { mach_msg_header_t *msg = kmsg->ikm_header; @@ -2833,7 +3195,7 @@ ipc_kmsg_copyin_header( needboost = ipc_port_request_sparm(dport, dest_name, dest_entry->ie_request, *optionp, - override); + priority); if (needboost == FALSE) { ip_unlock(dport); } @@ -2841,7 +3203,7 @@ ipc_kmsg_copyin_header( ipc_port_request_sparm(dport, dest_name, dest_entry->ie_request, *optionp, - override); + priority); ip_unlock(dport); #endif /* IMPORTANCE_INHERITANCE */ } else { @@ -2896,7 +3258,7 @@ ipc_kmsg_copyin_header( msg->msgh_local_port = ip_object_to_port(reply_port); /* capture the qos value(s) for the kmsg */ - ipc_kmsg_set_qos(kmsg, *optionp, override); + ipc_kmsg_set_qos(kmsg, *optionp, priority); if (release_port != IP_NULL) { ip_release(release_port); @@ -2973,7 +3335,7 @@ invalid_dest: static mach_msg_descriptor_t * ipc_kmsg_copyin_port_descriptor( - volatile mach_msg_port_descriptor_t *dsc, + mach_msg_port_descriptor_t *dsc, mach_msg_legacy_port_descriptor_t *user_dsc_in, ipc_space_t space, ipc_object_t dest, @@ -2981,16 +3343,16 @@ ipc_kmsg_copyin_port_descriptor( mach_msg_option_t *optionp, mach_msg_return_t *mr) { - volatile mach_msg_legacy_port_descriptor_t *user_dsc = user_dsc_in; + mach_msg_legacy_port_descriptor_t user_dsc = *user_dsc_in; mach_msg_type_name_t user_disp; mach_msg_type_name_t result_disp; mach_port_name_t name; ipc_object_t object; - user_disp = user_dsc->disposition; + user_disp = user_dsc.disposition; result_disp = ipc_object_copyin_type(user_disp); - name = (mach_port_name_t)user_dsc->name; + name = (mach_port_name_t)user_dsc.name; if (MACH_PORT_VALID(name)) { kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object, 0, NULL, kmsg->ikm_flags); if (kr != KERN_SUCCESS) { @@ -3118,6 +3480,7 @@ ipc_kmsg_copyin_ool_descriptor( } dsc->address = (void *)*copy; } + return user_dsc; } @@ -3221,7 +3584,7 @@ ipc_kmsg_copyin_ool_ports_descriptor( } if (deallocate) { - (void) mach_vm_deallocate(map, addr, (mach_vm_size_t)ports_length); + (void) mach_vm_deallocate(map, addr, (mach_vm_size_t)names_length); } objects = (ipc_object_t *) data; @@ -3332,6 +3695,7 @@ ipc_kmsg_copyin_guarded_port_descriptor( #if __LP64__ dsc->pad_end = 0; // debug, unnecessary #endif + return user_dsc; } @@ -3655,8 +4019,7 @@ clean_message: * Nothing locked. * Returns: * MACH_MSG_SUCCESS Successful copyin. - * MACH_SEND_INVALID_HEADER - * Illegal value in the message header bits. + * MACH_SEND_INVALID_HEADER Illegal value in the message header bits. * MACH_SEND_INVALID_DEST Can't copyin destination port. * MACH_SEND_INVALID_REPLY Can't copyin reply port. * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory. @@ -3670,19 +4033,35 @@ ipc_kmsg_copyin( ipc_kmsg_t kmsg, ipc_space_t space, vm_map_t map, - mach_msg_priority_t override, + mach_msg_priority_t priority, mach_msg_option_t *optionp) { mach_msg_return_t mr; + mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_remote_port); kmsg->ikm_header->msgh_bits &= MACH_MSGH_BITS_USER; - mr = ipc_kmsg_copyin_header(kmsg, space, override, optionp); + mr = ipc_kmsg_copyin_header(kmsg, space, priority, optionp); if (mr != MACH_MSG_SUCCESS) { return mr; } + /* Get the message filter policy if the task and port support filtering */ + mach_msg_filter_id fid = 0; + if (ip_enforce_msg_filtering(kmsg->ikm_header->msgh_remote_port) && + task_get_filter_msg_flag(current_task())) { + /* port label is yet to be supported */ + boolean_t allow_kmsg = mach_msg_fetch_filter_policy(NULL, kmsg->ikm_header->msgh_id, &fid); + if (!allow_kmsg) { + mach_port_guard_exception(dest_name, 0, 0, kGUARD_EXC_MSG_FILTERED); + /* no descriptors have been copied in yet */ + ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0); + return MACH_SEND_MSG_FILTERED; + } + kmsg->ikm_filter_policy_id = fid; + } + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_SEND) | DBG_FUNC_NONE, VM_KERNEL_ADDRPERM((uintptr_t)kmsg), (uintptr_t)kmsg->ikm_header->msgh_bits, @@ -3698,21 +4077,25 @@ ipc_kmsg_copyin( kmsg->ikm_voucher, kmsg->ikm_header->msgh_id); - if ((kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0) { - return MACH_MSG_SUCCESS; + if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) { + mr = ipc_kmsg_copyin_body( kmsg, space, map, optionp); + + /* unreachable if !DEBUG */ + __unreachable_ok_push + if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { + kprintf("body:\n"); + uint32_t i; + for (i = 0; i * 4 < (kmsg->ikm_header->msgh_size - sizeof(mach_msg_header_t)); i++) { + kprintf("%.4x\n", ((uint32_t *)(kmsg->ikm_header + 1))[i]); + } + } + __unreachable_ok_pop } - mr = ipc_kmsg_copyin_body( kmsg, space, map, optionp); - /* unreachable if !DEBUG */ - __unreachable_ok_push - if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { - kprintf("body:\n"); - uint32_t i; - for (i = 0; i * 4 < (kmsg->ikm_header->msgh_size - sizeof(mach_msg_header_t)); i++) { - kprintf("%.4x\n", ((uint32_t *)(kmsg->ikm_header + 1))[i]); - } + /* Sign the message contents */ + if (mr == MACH_MSG_SUCCESS) { + ikm_sign(kmsg); } - __unreachable_ok_pop return mr; } @@ -3768,35 +4151,33 @@ ipc_kmsg_copyin_from_kernel( ipc_object_copyin_type(lname))); kmsg->ikm_header->msgh_bits = bits; - if ((bits & MACH_MSGH_BITS_COMPLEX) == 0) { - return MACH_MSG_SUCCESS; - } } - /* - * Check if the remote port accepts ports in the body. - */ - if (dest->ip_no_grant) { - mach_msg_descriptor_t *saddr; - mach_msg_body_t *body; - mach_msg_type_number_t i, count; - - body = (mach_msg_body_t *) (kmsg->ikm_header + 1); - saddr = (mach_msg_descriptor_t *) (body + 1); - count = body->msgh_descriptor_count; - - for (i = 0; i < count; i++, saddr++) { - switch (saddr->type.type) { - case MACH_MSG_PORT_DESCRIPTOR: - case MACH_MSG_OOL_PORTS_DESCRIPTOR: - case MACH_MSG_GUARDED_PORT_DESCRIPTOR: - /* no descriptors have been copied in yet */ - ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0); - return MACH_SEND_NO_GRANT_DEST; + if (bits & MACH_MSGH_BITS_COMPLEX) { + /* + * Check if the remote port accepts ports in the body. + */ + if (dest->ip_no_grant) { + mach_msg_descriptor_t *saddr; + mach_msg_body_t *body; + mach_msg_type_number_t i, count; + + body = (mach_msg_body_t *) (kmsg->ikm_header + 1); + saddr = (mach_msg_descriptor_t *) (body + 1); + count = body->msgh_descriptor_count; + + for (i = 0; i < count; i++, saddr++) { + switch (saddr->type.type) { + case MACH_MSG_PORT_DESCRIPTOR: + case MACH_MSG_OOL_PORTS_DESCRIPTOR: + case MACH_MSG_GUARDED_PORT_DESCRIPTOR: + /* no descriptors have been copied in yet */ + ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0); + return MACH_SEND_NO_GRANT_DEST; + } } } - } - { + mach_msg_descriptor_t *saddr; mach_msg_body_t *body; mach_msg_type_number_t i, count; @@ -3915,6 +4296,10 @@ ipc_kmsg_copyin_from_kernel( } } } + + /* Add the signature to the message */ + ikm_sign(kmsg); + return MACH_MSG_SUCCESS; } @@ -3957,33 +4342,30 @@ ipc_kmsg_copyin_from_kernel_legacy( ipc_object_copyin_type(lname))); kmsg->ikm_header->msgh_bits = bits; - if ((bits & MACH_MSGH_BITS_COMPLEX) == 0) { - return MACH_MSG_SUCCESS; - } } - if (dest->ip_no_grant) { - mach_msg_descriptor_t *saddr; - mach_msg_body_t *body; - mach_msg_type_number_t i, count; - - body = (mach_msg_body_t *) (kmsg->ikm_header + 1); - saddr = (mach_msg_descriptor_t *) (body + 1); - count = body->msgh_descriptor_count; - - for (i = 0; i < count; i++, saddr++) { - switch (saddr->type.type) { - case MACH_MSG_PORT_DESCRIPTOR: - case MACH_MSG_OOL_PORTS_DESCRIPTOR: - case MACH_MSG_GUARDED_PORT_DESCRIPTOR: - /* no descriptors have been copied in yet */ - ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0); - return MACH_SEND_NO_GRANT_DEST; + if (bits & MACH_MSGH_BITS_COMPLEX) { + if (dest->ip_no_grant) { + mach_msg_descriptor_t *saddr; + mach_msg_body_t *body; + mach_msg_type_number_t i, count; + + body = (mach_msg_body_t *) (kmsg->ikm_header + 1); + saddr = (mach_msg_descriptor_t *) (body + 1); + count = body->msgh_descriptor_count; + + for (i = 0; i < count; i++, saddr++) { + switch (saddr->type.type) { + case MACH_MSG_PORT_DESCRIPTOR: + case MACH_MSG_OOL_PORTS_DESCRIPTOR: + case MACH_MSG_GUARDED_PORT_DESCRIPTOR: + /* no descriptors have been copied in yet */ + ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0); + return MACH_SEND_NO_GRANT_DEST; + } } } - } - { mach_msg_legacy_descriptor_t *saddr; mach_msg_descriptor_t *daddr; mach_msg_body_t *body; @@ -4154,6 +4536,9 @@ ipc_kmsg_copyin_from_kernel_legacy( } } } + + ikm_sign(kmsg); + return MACH_MSG_SUCCESS; } #endif /* IKM_SUPPORT_LEGACY */ @@ -4997,6 +5382,7 @@ ipc_kmsg_copyout_guarded_port_descriptor( return (mach_msg_descriptor_t *)dest_dsc; } + /* * Routine: ipc_kmsg_copyout_body * Purpose: @@ -5048,7 +5434,7 @@ ipc_kmsg_copyout_body( sdsc_count = 0; } - /* Now process the descriptors */ + /* Now process the descriptors - in reverse order */ for (i = dsc_count - 1; i >= 0; i--) { switch (kern_dsc[i].type.type) { case MACH_MSG_PORT_DESCRIPTOR: @@ -5167,6 +5553,8 @@ ipc_kmsg_copyout( { mach_msg_return_t mr; + ikm_validate_sig(kmsg); + mr = ipc_kmsg_copyout_header(kmsg, space, option); if (mr != MACH_MSG_SUCCESS) { return mr; @@ -5224,6 +5612,8 @@ ipc_kmsg_copyout_pseudo( /* Set ith_knote to ITH_KNOTE_PSEUDO */ current_thread()->ith_knote = ITH_KNOTE_PSEUDO; + ikm_validate_sig(kmsg); + assert(IO_VALID(dest)); #if 0 @@ -5283,6 +5673,8 @@ ipc_kmsg_copyout_dest( mach_msg_type_name_t voucher_type; mach_port_name_t dest_name, reply_name, voucher_name; + ikm_validate_sig(kmsg); + mbits = kmsg->ikm_header->msgh_bits; dest = ip_to_object(kmsg->ikm_header->msgh_remote_port); reply = ip_to_object(kmsg->ikm_header->msgh_local_port); @@ -5360,6 +5752,8 @@ ipc_kmsg_copyout_to_kernel( mach_msg_type_name_t reply_type; mach_port_name_t dest_name; + ikm_validate_sig(kmsg); + dest = ip_to_object(kmsg->ikm_header->msgh_remote_port); reply = kmsg->ikm_header->msgh_local_port; dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits); @@ -5419,6 +5813,8 @@ ipc_kmsg_copyout_to_kernel_legacy( mach_msg_type_name_t reply_type; mach_port_name_t dest_name; + ikm_validate_sig(kmsg); + dest = ip_to_object(kmsg->ikm_header->msgh_remote_port); reply = kmsg->ikm_header->msgh_local_port; dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits); @@ -5574,8 +5970,20 @@ ipc_kmsg_munge_trailer(mach_msg_max_trailer_t *in, void *_out, boolean_t is64bit #endif /* __arm64__ */ mach_msg_trailer_size_t +ipc_kmsg_trailer_size( + mach_msg_option_t option, + __unused thread_t thread) +{ + if (!(option & MACH_RCV_TRAILER_MASK)) { + return MACH_MSG_TRAILER_MINIMUM_SIZE; + } else { + return REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread), option); + } +} + +void ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused, - mach_msg_option_t option, thread_t thread, + mach_msg_option_t option, __unused thread_t thread, mach_port_seqno_t seqno, boolean_t minimal_trailer, mach_vm_offset_t context) { @@ -5602,7 +6010,7 @@ ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused, #endif /* __arm64__ */ if (!(option & MACH_RCV_TRAILER_MASK)) { - return trailer->msgh_trailer_size; + return; } trailer->msgh_seqno = seqno; @@ -5614,7 +6022,7 @@ ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused, } if (GET_RCV_ELEMENTS(option) >= MACH_RCV_TRAILER_AV) { - trailer->msgh_ad = 0; + trailer->msgh_ad = kmsg->ikm_filter_policy_id; } /* @@ -5631,8 +6039,7 @@ done: #ifdef __arm64__ ipc_kmsg_munge_trailer(trailer, real_trailer_out, thread_is_64bit_addr(thread)); #endif /* __arm64__ */ - - return trailer->msgh_trailer_size; + return; } mach_msg_header_t * diff --git a/osfmk/ipc/ipc_kmsg.h b/osfmk/ipc/ipc_kmsg.h index 68b7c4016..5a562adf8 100644 --- a/osfmk/ipc/ipc_kmsg.h +++ b/osfmk/ipc/ipc_kmsg.h @@ -82,10 +82,14 @@ #include #include -typedef uint32_t ipc_kmsg_flags_t; +typedef uint16_t ipc_kmsg_flags_t; #define IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND 0x1 /* Dest port contains an immovable send right */ +#if (DEVELOPMENT || DEBUG) +#define IKM_PARTIAL_SIG 1 /* Keep partial message signatures for better debug */ +#endif + /* * This structure is only the header for a kmsg buffer; * the actual buffer is normally larger. The rest of the buffer @@ -103,21 +107,32 @@ typedef uint32_t ipc_kmsg_flags_t; struct ipc_kmsg { mach_msg_size_t ikm_size; - ipc_kmsg_flags_t ikm_flags; + uint32_t ikm_ppriority; /* pthread priority of this kmsg */ struct ipc_kmsg *ikm_next; /* next message on port/discard queue */ struct ipc_kmsg *ikm_prev; /* prev message on port/discard queue */ - mach_msg_header_t *ikm_header; - ipc_port_t ikm_prealloc; /* port we were preallocated from */ - ipc_port_t ikm_voucher; /* voucher port carried */ - mach_msg_priority_t ikm_qos; /* qos of this kmsg */ - mach_msg_priority_t ikm_qos_override; /* qos override on this kmsg */ + union { + ipc_port_t XNU_PTRAUTH_SIGNED_PTR("kmsg.ikm_prealloc") ikmu_prealloc; /* port we were preallocated from */ + void *XNU_PTRAUTH_SIGNED_PTR("kmsg.ikm_data") ikmu_data; + } ikm_u; + mach_msg_header_t *XNU_PTRAUTH_SIGNED_PTR("kmsg.ikm_header") ikm_header; + ipc_port_t XNU_PTRAUTH_SIGNED_PTR("kmsg.ikm_voucher") ikm_voucher; /* voucher port carried */ struct ipc_importance_elem *ikm_importance; /* inherited from */ queue_chain_t ikm_inheritance; /* inherited from link */ struct turnstile *ikm_turnstile; /* send turnstile for ikm_prealloc port */ #if MACH_FLIPC struct mach_node *ikm_node; /* Originating node - needed for ack */ #endif +#if IKM_PARTIAL_SIG + uintptr_t ikm_header_sig; /* sig for just the header */ + uintptr_t ikm_headtrail_sig;/* sif for header and trailer */ +#endif + uintptr_t ikm_signature; /* sig for all kernel-processed data */ + ipc_kmsg_flags_t ikm_flags; + mach_msg_qos_t ikm_qos_override; /* qos override on this kmsg */ + mach_msg_filter_id ikm_filter_policy_id; /* Sandbox-specific policy id used for message filtering */ }; +#define ikm_prealloc ikm_u.ikmu_prealloc +#define ikm_data ikm_u.ikmu_data #if defined(__i386__) || defined(__arm__) #define IKM_SUPPORT_LEGACY 1 @@ -167,35 +182,44 @@ MACRO_END #define ikm_flipc_init(kmsg) #endif +#if IKM_PARTIAL_SIG #define ikm_init(kmsg, size) \ MACRO_BEGIN \ (kmsg)->ikm_size = (size); \ (kmsg)->ikm_flags = 0; \ (kmsg)->ikm_prealloc = IP_NULL; \ + (kmsg)->ikm_data = NULL; \ (kmsg)->ikm_voucher = IP_NULL; \ (kmsg)->ikm_importance = IIE_NULL; \ + (kmsg)->ikm_filter_policy_id = 0; \ + (kmsg)->ikm_header_sig = 0; \ + (kmsg)->ikm_headtrail_sig = 0; \ + (kmsg)->ikm_signature = 0; \ ikm_qos_init(kmsg); \ ikm_flipc_init(kmsg); \ assert((kmsg)->ikm_prev = (kmsg)->ikm_next = IKM_BOGUS); \ MACRO_END - -#define ikm_qos_init(kmsg) \ -MACRO_BEGIN \ - (kmsg)->ikm_qos = MACH_MSG_PRIORITY_UNSPECIFIED; \ - (kmsg)->ikm_qos_override = MACH_MSG_PRIORITY_UNSPECIFIED; \ -MACRO_END - -#define ikm_check_init(kmsg, size) \ -MACRO_BEGIN \ - assert((kmsg)->ikm_size == (size)); \ - assert((kmsg)->ikm_prev == IKM_BOGUS); \ - assert((kmsg)->ikm_next == IKM_BOGUS); \ +#else +#define ikm_init(kmsg, size) \ +MACRO_BEGIN \ + (kmsg)->ikm_size = (size); \ + (kmsg)->ikm_flags = 0; \ + (kmsg)->ikm_prealloc = IP_NULL; \ + (kmsg)->ikm_data = NULL; \ + (kmsg)->ikm_voucher = IP_NULL; \ + (kmsg)->ikm_importance = IIE_NULL; \ + (kmsg)->ikm_filter_policy_id = 0; \ + (kmsg)->ikm_signature = 0; \ + ikm_qos_init(kmsg); \ + ikm_flipc_init(kmsg); \ + assert((kmsg)->ikm_prev = (kmsg)->ikm_next = IKM_BOGUS); \ MACRO_END +#endif -#define ikm_set_header(kmsg, mtsize) \ +#define ikm_qos_init(kmsg) \ MACRO_BEGIN \ - (kmsg)->ikm_header = (mach_msg_header_t *) \ - ((vm_offset_t)((kmsg) + 1) + (kmsg)->ikm_size - (mtsize)); \ + (kmsg)->ikm_ppriority = MACH_MSG_PRIORITY_UNSPECIFIED; \ + (kmsg)->ikm_qos_override = THREAD_QOS_UNSPECIFIED; \ MACRO_END struct ipc_kmsg_queue { @@ -230,7 +254,7 @@ extern boolean_t ipc_kmsg_enqueue_qos( extern boolean_t ipc_kmsg_override_qos( ipc_kmsg_queue_t queue, ipc_kmsg_t kmsg, - mach_msg_priority_t override); + mach_msg_qos_t qos_ovr); /* Dequeue and return a kmsg */ extern ipc_kmsg_t ipc_kmsg_dequeue( @@ -323,7 +347,7 @@ extern void ipc_kmsg_put_to_kernel( extern mach_msg_return_t ipc_kmsg_copyin_header( ipc_kmsg_t kmsg, ipc_space_t space, - mach_msg_priority_t override, + mach_msg_priority_t priority, mach_msg_option_t *optionp); /* Copyin port rights and out-of-line memory from a user message */ @@ -331,7 +355,7 @@ extern mach_msg_return_t ipc_kmsg_copyin( ipc_kmsg_t kmsg, ipc_space_t space, vm_map_t map, - mach_msg_priority_t override, + mach_msg_priority_t priority, mach_msg_option_t *optionp); /* Copyin port rights and out-of-line memory from a kernel message */ @@ -404,6 +428,8 @@ extern void ipc_kmsg_copyout_to_kernel_legacy( #endif extern mach_msg_trailer_size_t +ipc_kmsg_trailer_size(mach_msg_option_t option, thread_t thread); +extern void ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space, mach_msg_option_t option, thread_t thread, mach_port_seqno_t seqno, boolean_t minimal_trailer, diff --git a/osfmk/ipc/ipc_mqueue.c b/osfmk/ipc/ipc_mqueue.c index 7626a1ad2..d715828ab 100644 --- a/osfmk/ipc/ipc_mqueue.c +++ b/osfmk/ipc/ipc_mqueue.c @@ -643,10 +643,10 @@ ipc_mqueue_send( * The message queue is not locked. * The caller holds a reference on the message queue. */ -extern void +void ipc_mqueue_override_send( ipc_mqueue_t mqueue, - mach_msg_priority_t override) + mach_msg_qos_t qos_ovr) { boolean_t __unused full_queue_empty = FALSE; @@ -657,7 +657,7 @@ ipc_mqueue_override_send( if (imq_full(mqueue)) { ipc_kmsg_t first = ipc_kmsg_queue_first(&mqueue->imq_messages); - if (first && ipc_kmsg_override_qos(&mqueue->imq_messages, first, override)) { + if (first && ipc_kmsg_override_qos(&mqueue->imq_messages, first, qos_ovr)) { ipc_object_t object = imq_to_object(mqueue); assert(io_otype(object) == IOT_PORT); ipc_port_t port = ip_object_to_port(object); @@ -969,8 +969,9 @@ ipc_mqueue_receive_results(wait_result_t saved_wait_result) if (option & MACH_RCV_LARGE) { return; } - + return; case MACH_MSG_SUCCESS: + return; case MACH_PEEK_READY: return; @@ -1755,7 +1756,7 @@ ipc_mqueue_set_qlimit( mqueue->imq_msgcount++; /* give it to the awakened thread */ } } - mqueue->imq_qlimit = qlimit; + mqueue->imq_qlimit = (uint16_t)qlimit; imq_unlock(mqueue); } diff --git a/osfmk/ipc/ipc_mqueue.h b/osfmk/ipc/ipc_mqueue.h index f982ba677..98ea22434 100644 --- a/osfmk/ipc/ipc_mqueue.h +++ b/osfmk/ipc/ipc_mqueue.h @@ -267,7 +267,7 @@ extern mach_msg_return_t ipc_mqueue_preflight_send( /* Set a [send-possible] override on the mqueue */ extern void ipc_mqueue_override_send( ipc_mqueue_t mqueue, - mach_msg_priority_t override); + mach_msg_qos_t qos_ovr); /* Deliver message to message queue or waiting receiver */ extern void ipc_mqueue_post( diff --git a/osfmk/ipc/ipc_notify.c b/osfmk/ipc/ipc_notify.c index 9744b2b62..c415f0b3b 100644 --- a/osfmk/ipc/ipc_notify.c +++ b/osfmk/ipc/ipc_notify.c @@ -158,8 +158,6 @@ void ipc_notify_send_once( ipc_port_t port) { - ipc_port_adjust_special_reply_port(port, IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN); - (void)mach_notify_send_once(port); /* send-once right consumed */ } diff --git a/osfmk/ipc/ipc_object.c b/osfmk/ipc/ipc_object.c index adeef2d72..5086568f6 100644 --- a/osfmk/ipc/ipc_object.c +++ b/osfmk/ipc/ipc_object.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -94,7 +94,16 @@ #include -zone_t ipc_object_zones[IOT_NUMBER]; +SECURITY_READ_ONLY_LATE(zone_t) ipc_object_zones[IOT_NUMBER]; + +ZONE_INIT(&ipc_object_zones[IOT_PORT], "ipc ports", sizeof(struct ipc_port), + ZC_NOENCRYPT | ZC_CACHING | ZC_ZFREE_CLEARMEM | ZC_NOSEQUESTER, + ZONE_ID_IPC_PORT, NULL); + +ZONE_INIT(&ipc_object_zones[IOT_PORT_SET], "ipc port sets", + sizeof(struct ipc_pset), + ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM | ZC_NOSEQUESTER, + ZONE_ID_IPC_PORT_SET, NULL); /* * Routine: ipc_object_reference @@ -495,8 +504,13 @@ void ipc_object_validate( ipc_object_t object) { - int otype = (io_otype(object) == IOT_PORT_SET) ? IOT_PORT_SET : IOT_PORT; - zone_require(object, ipc_object_zones[otype]); + if (io_otype(object) != IOT_PORT_SET) { + zone_id_require(ZONE_ID_IPC_PORT, + sizeof(struct ipc_port), object); + } else { + zone_id_require(ZONE_ID_IPC_PORT_SET, + sizeof(struct ipc_pset), object); + } } /* @@ -974,22 +988,6 @@ ipc_object_copyout( io_unlock(object); ipc_entry_dealloc(space, name, entry); is_write_unlock(space); - - switch (msgt_name) { - case MACH_MSG_TYPE_PORT_SEND_ONCE: - ipc_port_release_sonce(ip_object_to_port(object)); - break; - case MACH_MSG_TYPE_PORT_SEND: - ipc_port_release_send(ip_object_to_port(object)); - break; - default: - /* - * We don't allow labeling of "kobjects" with receive - * rights at user-space or port-sets. So, if we get this far, - * something went VERY wrong. - */ - panic("ipc_object_copyout: bad port label check failure"); - } return KERN_INVALID_CAPABILITY; } @@ -1094,17 +1092,6 @@ ipc_object_copyout_name( io_unlock(object); ipc_entry_dealloc(space, name, entry); is_write_unlock(space); - - switch (msgt_name) { - case MACH_MSG_TYPE_PORT_SEND_ONCE: - ipc_port_release_sonce(ip_object_to_port(object)); - break; - case MACH_MSG_TYPE_PORT_SEND: - ipc_port_release_send(ip_object_to_port(object)); - break; - default: - panic("ipc_object_copyout_name: bad port label check failure"); - } return KERN_INVALID_CAPABILITY; } diff --git a/osfmk/ipc/ipc_object.h b/osfmk/ipc/ipc_object.h index 4ca1ad542..83021dd24 100644 --- a/osfmk/ipc/ipc_object.h +++ b/osfmk/ipc/ipc_object.h @@ -198,7 +198,7 @@ extern boolean_t io_lock_try( * and zfree modifies that to point to the next free zone element. */ #define IO_MAX_REFERENCES \ - (unsigned)(~0 ^ (1U << (sizeof(int)*BYTE_SIZE - 1))) + (unsigned)(~0U ^ (1U << (sizeof(int)*BYTE_SIZE - 1))) static inline void io_reference(ipc_object_t io) @@ -333,7 +333,7 @@ extern kern_return_t ipc_object_copyin( ipc_object_t *objectp, mach_port_context_t context, mach_msg_guard_flags_t *guard_flags, - uint32_t kmsg_flags); + uint16_t kmsg_flags); /* Copyin a naked capability from the kernel */ extern void ipc_object_copyin_from_kernel( diff --git a/osfmk/ipc/ipc_port.c b/osfmk/ipc/ipc_port.c index b8cddf28a..8ba9fcf0a 100644 --- a/osfmk/ipc/ipc_port.c +++ b/osfmk/ipc/ipc_port.c @@ -69,7 +69,6 @@ * Functions to manipulate IPC ports. */ -#include #include #include @@ -93,15 +92,17 @@ #include #include #include +#include #include #include -decl_lck_spin_data(, ipc_port_multiple_lock_data); -ipc_port_timestamp_t ipc_port_timestamp_data; -int ipc_portbt; -extern int prioritize_launch; +static TUNABLE(bool, prioritize_launch, "prioritize_launch", true); +TUNABLE_WRITEABLE(int, ipc_portbt, "ipc_portbt", false); + +LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr); +ipc_port_timestamp_t ipc_port_timestamp_data; #if MACH_ASSERT void ipc_port_init_debug( @@ -368,8 +369,8 @@ ipc_port_request_sparm( ipc_port_t port, __assert_only mach_port_name_t name, ipc_port_request_index_t index, - mach_msg_option_t option, - mach_msg_priority_t override) + mach_msg_option_t option, + mach_msg_priority_t priority) { if (index != IE_REQ_NONE) { ipc_port_request_t ipr, table; @@ -389,7 +390,15 @@ ipc_port_request_sparm( if (option & MACH_SEND_OVERRIDE) { /* apply override to message queue */ - ipc_mqueue_override_send(&port->ip_messages, override); + mach_msg_qos_t qos_ovr; + if (mach_msg_priority_is_pthread_priority(priority)) { + qos_ovr = _pthread_priority_thread_qos(priority); + } else { + qos_ovr = mach_msg_priority_overide_qos(priority); + } + if (qos_ovr) { + ipc_mqueue_override_send(&port->ip_messages, qos_ovr); + } } #if IMPORTANCE_INHERITANCE @@ -674,6 +683,11 @@ ipc_port_init( port->ip_immovable_send = 0; port->ip_impcount = 0; + if (flags & IPC_PORT_INIT_FILTER_MESSAGE) { + port->ip_object.io_bits |= IP_BIT_FILTER_MSG; + } + + port->ip_tg_block_tracking = (flags & IPC_PORT_INIT_TG_BLOCK_TRACKING) != 0; port->ip_specialreply = (flags & IPC_PORT_INIT_SPECIAL_REPLY) != 0; port->ip_sync_link_state = PORT_SYNC_LINK_ANY; port->ip_sync_bootstrap_checkin = 0; @@ -1342,11 +1356,43 @@ ipc_port_watchport_elem(ipc_port_t port) static inline struct task_watchport_elem * ipc_port_update_watchport_elem(ipc_port_t port, struct task_watchport_elem *we) { + assert(!port->ip_specialreply); struct task_watchport_elem *old_we = ipc_port_watchport_elem(port); port->ip_messages.imq_wait_queue.waitq_tspriv = we; return old_we; } +/* + * Routine: ipc_special_reply_stash_pid_locked + * Purpose: + * Set the pid of process that copied out send once right to special reply port. + * + * Conditions: + * port locked + */ +static inline void +ipc_special_reply_stash_pid_locked(ipc_port_t port, int pid) +{ + assert(port->ip_specialreply); + port->ip_messages.imq_wait_queue.waitq_priv_pid = pid; + return; +} + +/* + * Routine: ipc_special_reply_get_pid_locked + * Purpose: + * Get the pid of process that copied out send once right to special reply port. + * + * Conditions: + * port locked + */ +int +ipc_special_reply_get_pid_locked(ipc_port_t port) +{ + assert(port->ip_specialreply); + return port->ip_messages.imq_wait_queue.waitq_priv_pid; +} + /* * Update the recv turnstile inheritor for a port. * @@ -1596,6 +1642,7 @@ ipc_port_link_special_reply_port( boolean_t sync_bootstrap_checkin) { boolean_t drop_turnstile_ref = FALSE; + boolean_t special_reply = FALSE; /* Check if dest_port needs a turnstile */ ipc_port_send_turnstile_prepare(dest_port); @@ -1604,12 +1651,14 @@ ipc_port_link_special_reply_port( ip_lock(special_reply_port); imq_lock(&special_reply_port->ip_messages); - if (sync_bootstrap_checkin && special_reply_port->ip_specialreply) { + special_reply = special_reply_port->ip_specialreply; + + if (sync_bootstrap_checkin && special_reply) { special_reply_port->ip_sync_bootstrap_checkin = 1; } /* Check if we need to drop the acquired turnstile ref on dest port */ - if (!special_reply_port->ip_specialreply || + if (!special_reply || special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY || special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) { drop_turnstile_ref = TRUE; @@ -1623,6 +1672,15 @@ ipc_port_link_special_reply_port( imq_unlock(&special_reply_port->ip_messages); ip_unlock(special_reply_port); + if (special_reply) { + /* + * For special reply ports, if the destination port is + * marked with the thread group blocked tracking flag, + * callout to the performance controller. + */ + ipc_port_thread_group_blocked(dest_port); + } + if (drop_turnstile_ref) { ipc_port_send_turnstile_complete(dest_port); } @@ -1630,6 +1688,68 @@ ipc_port_link_special_reply_port( return; } +/* + * Routine: ipc_port_thread_group_blocked + * Purpose: + * Call thread_group_blocked callout if the port + * has ip_tg_block_tracking bit set and the thread + * has not made this callout already. + * + * Conditions: + * Nothing is locked. + */ +void +ipc_port_thread_group_blocked(ipc_port_t port __unused) +{ +#if CONFIG_THREAD_GROUPS + bool port_tg_block_tracking = false; + thread_t self = current_thread(); + + if (self->thread_group == NULL || + (self->options & TH_OPT_IPC_TG_BLOCKED)) { + return; + } + + port_tg_block_tracking = port->ip_tg_block_tracking; + if (!port_tg_block_tracking) { + return; + } + + machine_thread_group_blocked(self->thread_group, NULL, + PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self); + + self->options |= TH_OPT_IPC_TG_BLOCKED; +#endif +} + +/* + * Routine: ipc_port_thread_group_unblocked + * Purpose: + * Call thread_group_unblocked callout if the + * thread had previously made a thread_group_blocked + * callout before (indicated by TH_OPT_IPC_TG_BLOCKED + * flag on the thread). + * + * Conditions: + * Nothing is locked. + */ +void +ipc_port_thread_group_unblocked(void) +{ +#if CONFIG_THREAD_GROUPS + thread_t self = current_thread(); + + if (!(self->options & TH_OPT_IPC_TG_BLOCKED)) { + return; + } + + machine_thread_group_unblocked(self->thread_group, NULL, + PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self); + + self->options &= ~TH_OPT_IPC_TG_BLOCKED; +#endif +} + #if DEVELOPMENT || DEBUG inline void ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port) @@ -1777,6 +1897,20 @@ not_special: break; } + /* + * Stash (or unstash) the server's PID in the ip_sorights field of the + * special reply port, so that stackshot can later retrieve who the client + * is blocked on. + */ + if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT && + sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) { + ipc_special_reply_stash_pid_locked(special_reply_port, pid_from_task(current_task())); + } else if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE && + sync_link_state == PORT_SYNC_LINK_ANY) { + /* If we are resetting the special reply port, remove the stashed pid. */ + ipc_special_reply_stash_pid_locked(special_reply_port, 0); + } + special_reply_port->ip_sync_link_state = sync_link_state; switch (sync_link_state) { @@ -1866,8 +2000,7 @@ ipc_port_adjust_sync_link_state_locked( case PORT_SYNC_LINK_RCV_THREAD: /* deallocate the thread reference for the inheritor */ thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref); - /* Fall through */ - + OS_FALLTHROUGH; default: klist_init(&port->ip_messages.imq_klist); } @@ -2083,6 +2216,10 @@ ipc_port_clear_watchport_elem_internal( ip_lock_held(port); imq_held(&port->ip_messages); + if (port->ip_specialreply) { + return NULL; + } + return ipc_port_update_watchport_elem(port, NULL); } @@ -2824,7 +2961,8 @@ kdp_mqueue_send_find_owner(struct waitq * waitq, __assert_only event64_t event, turnstile = waitq_to_turnstile(waitq); ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */ - assert(kdp_is_in_zone(port, "ipc ports")); + + zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port); waitinfo->owner = 0; waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port); @@ -2888,7 +3026,8 @@ kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, waitinfo->owner = 0; if (imq_is_set(mqueue)) { /* we are waiting on a port set */ ipc_pset_t set = ips_from_mq(mqueue); - assert(kdp_is_in_zone(set, "ipc port sets")); + + zone_id_require(ZONE_ID_IPC_PORT_SET, sizeof(struct ipc_pset), set); /* Reset wait type to specify waiting on port set receive */ waitinfo->wait_type = kThreadWaitPortSetReceive; @@ -2899,7 +3038,8 @@ kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, /* There is no specific owner "at the other end" of a port set, so leave unset. */ } else { ipc_port_t port = ip_from_mq(mqueue); - assert(kdp_is_in_zone(port, "ipc ports")); + + zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port); waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port); if (ip_lock_held_kdp(port)) { @@ -2926,8 +3066,8 @@ kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, * deallocation is intercepted via io_free. */ #if 0 -queue_head_t port_alloc_queue; -lck_spin_t port_alloc_queue_lock; +queue_head_t port_alloc_queue = QUEUE_HEAD_INITIALIZER(port_alloc_queue); +LCK_SPIN_DECLARE(port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr); #endif unsigned long port_count = 0; @@ -2944,23 +3084,6 @@ int db_port_walk( unsigned int ref_search, unsigned int ref_target); -/* - * Initialize global state needed for run-time - * port debugging. - */ -void -ipc_port_debug_init(void) -{ -#if 0 - queue_init(&port_alloc_queue); - lck_spin_init(&port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr); -#endif - - if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt, sizeof(ipc_portbt))) { - ipc_portbt = 0; - } -} - #ifdef MACH_BSD extern int proc_pid(struct proc*); #endif /* MACH_BSD */ diff --git a/osfmk/ipc/ipc_port.h b/osfmk/ipc/ipc_port.h index b9acef764..6af6e0673 100644 --- a/osfmk/ipc/ipc_port.h +++ b/osfmk/ipc/ipc_port.h @@ -122,14 +122,15 @@ struct ipc_port { struct ipc_mqueue ip_messages; union { - struct ipc_space *receiver; - struct ipc_port *destination; + struct ipc_space * receiver; + struct ipc_port * destination; ipc_port_timestamp_t timestamp; } data; + /* update host_request_notification if this union is changed */ union { - ipc_kobject_t kobject; - ipc_kobject_label_t kolabel; + ipc_kobject_t XNU_PTRAUTH_SIGNED_PTR("ipc_port.kobject") kobject; + ipc_kobject_label_t XNU_PTRAUTH_SIGNED_PTR("ipc_port.kolabel") kolabel; ipc_importance_task_t imp_task; ipc_port_t sync_inheritor_port; struct knote *sync_inheritor_knote; @@ -158,7 +159,8 @@ struct ipc_port { ip_immovable_receive:1, /* the receive right cannot be moved out of a space, until it is destroyed */ ip_no_grant:1, /* Port wont accept complex messages containing (ool) port descriptors */ ip_immovable_send:1, /* No send(once) rights to this port can be moved out of a space */ - ip_impcount:18; /* number of importance donations in nested queue */ + ip_tg_block_tracking:1, /* Track blocking relationship between thread groups during sync IPC */ + ip_impcount:17; /* number of importance donations in nested queue */ mach_port_mscount_t ip_mscount; mach_port_rights_t ip_srights; @@ -290,6 +292,8 @@ MACRO_END #define ip_full_kernel(port) imq_full_kernel(&(port)->ip_messages) #define ip_full(port) imq_full(&(port)->ip_messages) +/* Bits reserved in IO_BITS_PORT_INFO are defined here */ + /* * JMM - Preallocation flag * This flag indicates that there is a message buffer preallocated for this @@ -314,6 +318,13 @@ MACRO_BEGIN \ (port)->ip_premsg = IKM_NULL; \ MACRO_END +/* + * This flag indicates that the port has opted into message filtering based + * on a policy defined in the Sandbox. + */ +#define IP_BIT_FILTER_MSG 0x00001000 +#define ip_enforce_msg_filtering(port) (((port)->ip_object.io_bits & IP_BIT_FILTER_MSG) != 0) + /* JMM - address alignment/packing for LP64 */ struct ipc_port_request { union { @@ -355,9 +366,6 @@ extern lck_attr_t ipc_lck_attr; extern lck_spin_t ipc_port_multiple_lock_data; -#define ipc_port_multiple_lock_init() \ - lck_spin_init(&ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr) - #define ipc_port_multiple_lock() \ lck_spin_lock_grp(&ipc_port_multiple_lock_data, &ipc_lck_grp) @@ -458,7 +466,7 @@ extern boolean_t ipc_port_request_sparm( mach_port_name_t name, ipc_port_request_index_t index, mach_msg_option_t option, - mach_msg_priority_t override); + mach_msg_priority_t priority); /* Make a port-deleted request */ extern void ipc_port_pdrequest( @@ -479,10 +487,12 @@ extern boolean_t ipc_port_clear_receiver( boolean_t should_destroy); __options_decl(ipc_port_init_flags_t, uint32_t, { - IPC_PORT_INIT_NONE = 0x00000000, - IPC_PORT_INIT_MAKE_SEND_RIGHT = 0x00000001, - IPC_PORT_INIT_MESSAGE_QUEUE = 0x00000002, - IPC_PORT_INIT_SPECIAL_REPLY = 0x00000004, + IPC_PORT_INIT_NONE = 0x00000000, + IPC_PORT_INIT_MAKE_SEND_RIGHT = 0x00000001, + IPC_PORT_INIT_MESSAGE_QUEUE = 0x00000002, + IPC_PORT_INIT_SPECIAL_REPLY = 0x00000004, + IPC_PORT_INIT_FILTER_MESSAGE = 0x00000008, + IPC_PORT_INIT_TG_BLOCK_TRACKING = 0x00000010, }); /* Initialize a newly-allocated port */ @@ -660,6 +670,11 @@ extern mach_port_name_t ipc_port_copyout_send( ipc_port_t sright, ipc_space_t space); +extern void ipc_port_thread_group_blocked( + ipc_port_t port); + +extern void ipc_port_thread_group_unblocked(void); + #endif /* MACH_KERNEL_PRIVATE */ #if KERNEL_PRIVATE @@ -712,9 +727,6 @@ extern void ipc_port_dealloc_special( /* Track low-level port deallocation */ extern void ipc_port_track_dealloc( ipc_port_t port); - -/* Initialize general port debugging state */ -extern void ipc_port_debug_init(void); #endif /* MACH_ASSERT */ extern void ipc_port_recv_update_inheritor(ipc_port_t port, @@ -725,6 +737,9 @@ extern void ipc_port_send_update_inheritor(ipc_port_t port, struct turnstile *turnstile, turnstile_update_flags_t flags); +extern int +ipc_special_reply_get_pid_locked(ipc_port_t port); + #define ipc_port_alloc_kernel() \ ipc_port_alloc_special(ipc_space_kernel, IPC_PORT_INIT_NONE) #define ipc_port_dealloc_kernel(port) \ diff --git a/osfmk/ipc/ipc_pset.c b/osfmk/ipc/ipc_pset.c index e73364b48..7f247520f 100644 --- a/osfmk/ipc/ipc_pset.c +++ b/osfmk/ipc/ipc_pset.c @@ -77,6 +77,7 @@ #include #include +#include /* * Routine: ipc_pset_alloc @@ -414,8 +415,7 @@ static int filt_machport_adjust_qos(struct knote *kn, ipc_kmsg_t first) { if (kn->kn_sfflags & MACH_RCV_MSG) { - int qos = _pthread_priority_thread_qos(first->ikm_qos_override); - return FILTER_ADJUST_EVENT_QOS(qos); + return FILTER_ADJUST_EVENT_QOS(first->ikm_qos_override); } return 0; } @@ -1206,8 +1206,8 @@ filt_machportprocess(struct knote *kn, struct kevent_qos_s *kev) * QoS values in the continuation save area on successful receive. */ if (kev->fflags == MACH_MSG_SUCCESS) { - kev->ext[2] = ((uint64_t)self->ith_qos << 32) | - (uint64_t)self->ith_qos_override; + kev->ext[2] = ((uint64_t)self->ith_ppriority << 32) | + _pthread_priority_make_from_thread_qos(self->ith_qos_override, 0, 0); } return FILTER_ACTIVE; diff --git a/osfmk/ipc/ipc_space.c b/osfmk/ipc/ipc_space.c index 3d2a0dc13..82eeb6a90 100644 --- a/osfmk/ipc/ipc_space.c +++ b/osfmk/ipc/ipc_space.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -92,7 +92,9 @@ #define NUM_SEQ_ENTRIES 8 #endif -zone_t ipc_space_zone; +ZONE_DECLARE(ipc_space_zone, "ipc spaces", + sizeof(struct ipc_space), ZC_NOENCRYPT); + ipc_space_t ipc_space_kernel; ipc_space_t ipc_space_reply; diff --git a/osfmk/ipc/ipc_space.h b/osfmk/ipc/ipc_space.h index 8a4466a4d..9b611f963 100644 --- a/osfmk/ipc/ipc_space.h +++ b/osfmk/ipc/ipc_space.h @@ -118,9 +118,9 @@ struct ipc_space { ipc_entry_num_t is_table_size; /* current size of table */ ipc_entry_num_t is_table_hashed;/* count of hashed elements */ ipc_entry_num_t is_table_free; /* count of free elements */ - ipc_entry_t is_table; /* an array of entries */ - struct ipc_table_size *is_table_next; /* info for larger table */ - task_t is_task; /* associated task */ + ipc_entry_t XNU_PTRAUTH_SIGNED_PTR("ipc_space.is_table") is_table; /* an array of entries */ + struct ipc_table_size * XNU_PTRAUTH_SIGNED_PTR("ipc_space.is_table_next") is_table_next; /* info for larger table */ + task_t XNU_PTRAUTH_SIGNED_PTR("ipc_space.is_task") is_task; /* associated task */ ipc_label_t is_label; /* [private] mandatory access label */ ipc_entry_num_t is_low_mod; /* lowest modified entry during growth */ ipc_entry_num_t is_high_mod; /* highest modified entry during growth */ @@ -154,7 +154,7 @@ static inline void is_done_growing(ipc_space_t is) { assert(is_growing(is)); - OSBitAndAtomic(~IS_GROWING, &is->is_bits); + OSBitAndAtomic((ipc_space_refs_t)~IS_GROWING, &is->is_bits); } extern zone_t ipc_space_zone; diff --git a/osfmk/ipc/ipc_table.c b/osfmk/ipc/ipc_table.c index f903101ff..603b6e3ff 100644 --- a/osfmk/ipc/ipc_table.c +++ b/osfmk/ipc/ipc_table.c @@ -71,11 +71,11 @@ #include #include -ipc_table_size_t ipc_table_entries = NULL; -unsigned int ipc_table_entries_size = CONFIG_IPC_TABLE_ENTRIES_STEPS; +#define IPC_TABLE_ENTRIES_SIZE CONFIG_IPC_TABLE_ENTRIES_STEPS +SECURITY_READ_ONLY_LATE(struct ipc_table_size) ipc_table_entries[IPC_TABLE_ENTRIES_SIZE]; -ipc_table_size_t ipc_table_requests; -unsigned int ipc_table_requests_size = 64; +#define IPC_TABLE_REQUESTS_SIZE 64 +SECURITY_READ_ONLY_LATE(struct ipc_table_size) ipc_table_requests[IPC_TABLE_REQUESTS_SIZE]; static void ipc_table_fill( @@ -119,38 +119,30 @@ ipc_table_fill( } } -void +__startup_func +static void ipc_table_init(void) { - ipc_table_entries = (ipc_table_size_t) - kalloc(sizeof(struct ipc_table_size) * - ipc_table_entries_size); - assert(ipc_table_entries != ITS_NULL); - - ipc_table_fill(ipc_table_entries, ipc_table_entries_size - 1, + ipc_table_fill(ipc_table_entries, IPC_TABLE_ENTRIES_SIZE - 1, 16, sizeof(struct ipc_entry)); /* the last two elements should have the same size */ - ipc_table_entries[ipc_table_entries_size - 1].its_size = - ipc_table_entries[ipc_table_entries_size - 2].its_size; + ipc_table_entries[IPC_TABLE_ENTRIES_SIZE - 1].its_size = + ipc_table_entries[IPC_TABLE_ENTRIES_SIZE - 2].its_size; /* make sure the robin hood hashing in ipc hash will work */ - assert(ipc_table_entries[ipc_table_entries_size - 1].its_size <= + assert(ipc_table_entries[IPC_TABLE_ENTRIES_SIZE - 1].its_size <= IPC_ENTRY_INDEX_MAX); - ipc_table_requests = (ipc_table_size_t) - kalloc(sizeof(struct ipc_table_size) * - ipc_table_requests_size); - assert(ipc_table_requests != ITS_NULL); - - ipc_table_fill(ipc_table_requests, ipc_table_requests_size - 1, + ipc_table_fill(ipc_table_requests, IPC_TABLE_REQUESTS_SIZE - 1, 2, sizeof(struct ipc_port_request)); /* the last element should have zero size */ - ipc_table_requests[ipc_table_requests_size - 1].its_size = 0; + ipc_table_requests[IPC_TABLE_REQUESTS_SIZE - 1].its_size = 0; } +STARTUP(MACH_IPC, STARTUP_RANK_FIRST, ipc_table_init); /* @@ -164,10 +156,8 @@ ipc_table_init(void) unsigned int ipc_table_max_entries(void) { - if (!ipc_table_entries || ipc_table_entries_size < 2) { - return 0; - } - return (unsigned int)ipc_table_entries[ipc_table_entries_size - 1].its_size; + static_assert(IPC_TABLE_ENTRIES_SIZE >= 1); + return (unsigned int)ipc_table_entries[IPC_TABLE_ENTRIES_SIZE - 1].its_size; } @@ -182,10 +172,8 @@ ipc_table_max_entries(void) unsigned int ipc_table_max_requests(void) { - if (!ipc_table_requests || ipc_table_requests_size < 2) { - return 0; - } - return (unsigned int)ipc_table_requests[ipc_table_requests_size - 2].its_size; + static_assert(IPC_TABLE_REQUESTS_SIZE >= 2); + return (unsigned int)ipc_table_requests[IPC_TABLE_REQUESTS_SIZE - 2].its_size; } diff --git a/osfmk/ipc/ipc_table.h b/osfmk/ipc/ipc_table.h index 28e79b356..3c67dbba3 100644 --- a/osfmk/ipc/ipc_table.h +++ b/osfmk/ipc/ipc_table.h @@ -105,11 +105,8 @@ struct ipc_table_size { ipc_table_elems_t its_size; /* number of elements in table */ }; -extern ipc_table_size_t ipc_table_entries; -extern ipc_table_size_t ipc_table_requests; - -/* Initialize IPC capabilities table storage */ -extern void ipc_table_init(void); +extern struct ipc_table_size ipc_table_entries[]; +extern struct ipc_table_size ipc_table_requests[]; /* * Note that ipc_table_alloc and ipc_table_free diff --git a/osfmk/ipc/ipc_voucher.c b/osfmk/ipc/ipc_voucher.c index bc739faa3..d37ce1b37 100644 --- a/osfmk/ipc/ipc_voucher.c +++ b/osfmk/ipc/ipc_voucher.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Apple Inc. All rights reserved. + * Copyright (c) 2013-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -50,8 +50,16 @@ */ uint32_t ipc_voucher_trace_contents = 0; -static zone_t ipc_voucher_zone; -static zone_t ipc_voucher_attr_control_zone; +static SECURITY_READ_ONLY_LATE(zone_t) ipc_voucher_zone; +static ZONE_DECLARE(ipc_voucher_attr_control_zone, "ipc voucher attr controls", + sizeof(struct ipc_voucher_attr_control), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM); + +ZONE_INIT(&ipc_voucher_zone, "ipc vouchers", sizeof(struct ipc_voucher), + ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM | ZC_NOSEQUESTER, + ZONE_ID_IPC_VOUCHERS, NULL); + +#define voucher_require(v) \ + zone_id_require(ZONE_ID_IPC_VOUCHERS, sizeof(struct ipc_voucher), v) /* * Voucher hash table @@ -60,11 +68,9 @@ static zone_t ipc_voucher_attr_control_zone; #define IV_HASH_BUCKET(x) ((x) % IV_HASH_BUCKETS) static queue_head_t ivht_bucket[IV_HASH_BUCKETS]; -static lck_spin_t ivht_lock_data; +static LCK_SPIN_DECLARE_ATTR(ivht_lock_data, &ipc_lck_grp, &ipc_lck_attr); static uint32_t ivht_count = 0; -#define ivht_lock_init() \ - lck_spin_init(&ivht_lock_data, &ipc_lck_grp, &ipc_lck_attr) #define ivht_lock_destroy() \ lck_spin_destroy(&ivht_lock_data, &ipc_lck_grp) #define ivht_lock() \ @@ -83,10 +89,8 @@ static uint32_t ivht_count = 0; */ static iv_index_t ivgt_keys_in_use = MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN; static ipc_voucher_global_table_element iv_global_table[MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN]; -static lck_spin_t ivgt_lock_data; +static LCK_SPIN_DECLARE_ATTR(ivgt_lock_data, &ipc_lck_grp, &ipc_lck_attr); -#define ivgt_lock_init() \ - lck_spin_init(&ivgt_lock_data, &ipc_lck_grp, &ipc_lck_attr) #define ivgt_lock_destroy() \ lck_spin_destroy(&ivgt_lock_data, &ipc_lck_grp) #define ivgt_lock() \ @@ -192,44 +196,16 @@ ipc_voucher_prepare_processing_recipe( ipc_voucher_attr_manager_flags flags, int *need_processing); -#if defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) || defined(MACH_VOUCHER_ATTR_KEY_TEST) -void user_data_attr_manager_init(void); -#endif - -void +__startup_func +static void ipc_voucher_init(void) { - natural_t ipc_voucher_max = (task_max + thread_max) * 2; - natural_t attr_manager_max = MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN; - iv_index_t i; - - ipc_voucher_zone = zinit(sizeof(struct ipc_voucher), - ipc_voucher_max * sizeof(struct ipc_voucher), - sizeof(struct ipc_voucher), - "ipc vouchers"); - zone_change(ipc_voucher_zone, Z_NOENCRYPT, TRUE); - zone_change(ipc_voucher_zone, Z_CLEARMEMORY, TRUE); - - ipc_voucher_attr_control_zone = zinit(sizeof(struct ipc_voucher_attr_control), - attr_manager_max * sizeof(struct ipc_voucher_attr_control), - sizeof(struct ipc_voucher_attr_control), - "ipc voucher attr controls"); - zone_change(ipc_voucher_attr_control_zone, Z_NOENCRYPT, TRUE); - zone_change(ipc_voucher_attr_control_zone, Z_CLEARMEMORY, TRUE); - /* initialize voucher hash */ - ivht_lock_init(); - for (i = 0; i < IV_HASH_BUCKETS; i++) { + for (iv_index_t i = 0; i < IV_HASH_BUCKETS; i++) { queue_init(&ivht_bucket[i]); } - - /* initialize global table locking */ - ivgt_lock_init(); - -#if defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) || defined(MACH_VOUCHER_ATTR_KEY_TEST) - user_data_attr_manager_init(); -#endif } +STARTUP(MACH_IPC, STARTUP_RANK_FIRST, ipc_voucher_init); ipc_voucher_t iv_alloc(iv_index_t entries) @@ -382,20 +358,27 @@ unsafe_convert_port_to_voucher( return (uintptr_t)IV_NULL; } - uintptr_t voucher = (uintptr_t)port->ip_kobject; - /* * No need to lock because we have a reference on the * port, and if it is a true voucher port, that reference * keeps the voucher bound to the port (and active). */ if (ip_kotype(port) == IKOT_VOUCHER) { - return voucher; + return (uintptr_t)port->ip_kobject; } } return (uintptr_t)IV_NULL; } +static ipc_voucher_t +ip_get_voucher(ipc_port_t port) +{ + ipc_voucher_t voucher = (ipc_voucher_t)ip_get_kobject(port); + require_ip_active(port); + voucher_require(voucher); + return voucher; +} + /* * Routine: convert_port_to_voucher * Purpose: @@ -410,22 +393,13 @@ ipc_voucher_t convert_port_to_voucher( ipc_port_t port) { - if (IP_VALID(port)) { - zone_require(port, ipc_object_zones[IOT_PORT]); - ipc_voucher_t voucher = (ipc_voucher_t) ip_get_kobject(port); - + if (IP_VALID(port) && ip_kotype(port) == IKOT_VOUCHER) { /* * No need to lock because we have a reference on the * port, and if it is a true voucher port, that reference * keeps the voucher bound to the port (and active). */ - if (ip_kotype(port) != IKOT_VOUCHER) { - return IV_NULL; - } - - require_ip_active(port); - - zone_require(voucher, ipc_voucher_zone); + ipc_voucher_t voucher = ip_get_voucher(port); ipc_voucher_reference(voucher); return voucher; } @@ -492,13 +466,11 @@ ipc_voucher_notify(mach_msg_header_t *msg) { mach_no_senders_notification_t *notification = (void *)msg; ipc_port_t port = notification->not_header.msgh_remote_port; - ipc_voucher_t voucher = (ipc_voucher_t)ip_get_kobject(port); + ipc_voucher_t voucher = ip_get_voucher(port); - require_ip_active(port); assert(IKOT_VOUCHER == ip_kotype(port)); /* consume the reference donated by convert_voucher_to_port */ - zone_require(voucher, ipc_voucher_zone); ipc_voucher_release(voucher); } @@ -512,7 +484,7 @@ convert_voucher_to_port(ipc_voucher_t voucher) return IP_NULL; } - zone_require(voucher, ipc_voucher_zone); + voucher_require(voucher); assert(os_ref_get_count(&voucher->iv_refs) > 0); /* @@ -520,7 +492,7 @@ convert_voucher_to_port(ipc_voucher_t voucher) * if this is the first send right */ if (!ipc_kobject_make_send_lazy_alloc_port(&voucher->iv_port, - (ipc_kobject_t)voucher, IKOT_VOUCHER)) { + (ipc_kobject_t)voucher, IKOT_VOUCHER, false, 0)) { ipc_voucher_release(voucher); } return voucher->iv_port; @@ -676,7 +648,6 @@ convert_port_to_voucher_attr_control( ipc_port_t port) { if (IP_VALID(port)) { - zone_require(port, ipc_object_zones[IOT_PORT]); ipc_voucher_attr_control_t ivac = (ipc_voucher_attr_control_t) ip_get_kobject(port); /* @@ -690,7 +661,7 @@ convert_port_to_voucher_attr_control( } require_ip_active(port); - zone_require(ivac, ipc_voucher_attr_control_zone); + zone_require(ipc_voucher_attr_control_zone, ivac); ivac_reference(ivac); return ivac; } @@ -715,7 +686,6 @@ ipc_voucher_attr_control_notify(mach_msg_header_t *msg) /* release the reference donated by convert_voucher_attr_control_to_port */ ivac = (ipc_voucher_attr_control_t)ip_get_kobject(port); - zone_require(ivac, ipc_voucher_attr_control_zone); ivac_release(ivac); } @@ -729,14 +699,14 @@ convert_voucher_attr_control_to_port(ipc_voucher_attr_control_t control) return IP_NULL; } - zone_require(control, ipc_voucher_attr_control_zone); + zone_require(ipc_voucher_attr_control_zone, control); /* * make a send right and donate our reference for * ipc_voucher_attr_control_notify if this is the first send right */ if (!ipc_kobject_make_send_lazy_alloc_port(&control->ivac_port, - (ipc_kobject_t)control, IKOT_VOUCHER_ATTR_CONTROL)) { + (ipc_kobject_t)control, IKOT_VOUCHER_ATTR_CONTROL, false, 0)) { ivac_release(control); } return control->ivac_port; @@ -1517,7 +1487,7 @@ ipc_execute_voucher_recipe_command( } break; } - /* fall thru for single key redemption */ + OS_FALLTHROUGH; /* fall thru for single key redemption */ /* * DEFAULT: @@ -2647,7 +2617,7 @@ ipc_get_pthpriority_from_kmsg_voucher( return KERN_FAILURE; } - pthread_priority_voucher = (ipc_voucher_t)ip_get_kobject(kmsg->ikm_voucher); + pthread_priority_voucher = ip_get_voucher(kmsg->ikm_voucher); kr = mach_voucher_extract_attr_recipe(pthread_priority_voucher, MACH_VOUCHER_ATTR_KEY_PTHPRIORITY, content_data, @@ -2692,7 +2662,7 @@ ipc_voucher_send_preprocessing(ipc_kmsg_t kmsg) } /* setup recipe for preprocessing of all the attributes. */ - pre_processed_voucher = (ipc_voucher_t)ip_get_kobject(kmsg->ikm_voucher); + pre_processed_voucher = ip_get_voucher(kmsg->ikm_voucher); kr = ipc_voucher_prepare_processing_recipe(pre_processed_voucher, (mach_voucher_attr_raw_recipe_array_t)recipes, @@ -2741,7 +2711,7 @@ ipc_voucher_receive_postprocessing( } /* setup recipe for auto redeem of all the attributes. */ - sent_voucher = (ipc_voucher_t)ip_get_kobject(kmsg->ikm_voucher); + sent_voucher = ip_get_voucher(kmsg->ikm_voucher); kr = ipc_voucher_prepare_processing_recipe(sent_voucher, (mach_voucher_attr_raw_recipe_array_t)recipes, @@ -2918,10 +2888,8 @@ typedef struct user_data_value_element *user_data_element_t; #define USER_DATA_HASH_BUCKET(x) ((x) % USER_DATA_HASH_BUCKETS) static queue_head_t user_data_bucket[USER_DATA_HASH_BUCKETS]; -static lck_spin_t user_data_lock_data; +static LCK_SPIN_DECLARE_ATTR(user_data_lock_data, &ipc_lck_grp, &ipc_lck_attr); -#define user_data_lock_init() \ - lck_spin_init(&user_data_lock_data, &ipc_lck_grp, &ipc_lck_attr) #define user_data_lock_destroy() \ lck_spin_destroy(&user_data_lock_data, &ipc_lck_grp) #define user_data_lock() \ @@ -3254,48 +3222,36 @@ user_data_release( panic("Voucher user-data manager released"); } -static int user_data_manager_inited = 0; - -void -user_data_attr_manager_init() +__startup_func +static void +user_data_attr_manager_init(void) { kern_return_t kr; #if defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) - if ((user_data_manager_inited & 0x1) != 0x1) { - kr = ipc_register_well_known_mach_voucher_attr_manager(&user_data_manager, - (mach_voucher_attr_value_handle_t)0, - MACH_VOUCHER_ATTR_KEY_USER_DATA, - &user_data_control); - if (KERN_SUCCESS != kr) { - printf("Voucher user-data manager register(USER-DATA) returned %d", kr); - } else { - user_data_manager_inited |= 0x1; - } + kr = ipc_register_well_known_mach_voucher_attr_manager(&user_data_manager, + (mach_voucher_attr_value_handle_t)0, + MACH_VOUCHER_ATTR_KEY_USER_DATA, + &user_data_control); + if (KERN_SUCCESS != kr) { + printf("Voucher user-data manager register(USER-DATA) returned %d", kr); } #endif #if defined(MACH_VOUCHER_ATTR_KEY_TEST) - if ((user_data_manager_inited & 0x2) != 0x2) { - kr = ipc_register_well_known_mach_voucher_attr_manager(&user_data_manager, - (mach_voucher_attr_value_handle_t)0, - MACH_VOUCHER_ATTR_KEY_TEST, - &test_control); - if (KERN_SUCCESS != kr) { - printf("Voucher user-data manager register(TEST) returned %d", kr); - } else { - user_data_manager_inited |= 0x2; - } + kr = ipc_register_well_known_mach_voucher_attr_manager(&user_data_manager, + (mach_voucher_attr_value_handle_t)0, + MACH_VOUCHER_ATTR_KEY_TEST, + &test_control); + if (KERN_SUCCESS != kr) { + printf("Voucher user-data manager register(TEST) returned %d", kr); } #endif #if defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) || defined(MACH_VOUCHER_ATTR_KEY_TEST) - int i; - - for (i = 0; i < USER_DATA_HASH_BUCKETS; i++) { + for (int i = 0; i < USER_DATA_HASH_BUCKETS; i++) { queue_init(&user_data_bucket[i]); } - - user_data_lock_init(); #endif } +STARTUP(MACH_IPC, STARTUP_RANK_FIRST, user_data_attr_manager_init); -#endif /* MACH_DEBUG */ +#endif /* MACH_VOUCHER_ATTR_KEY_USER_DATA || MACH_VOUCHER_ATTR_KEY_TEST */ diff --git a/osfmk/ipc/ipc_voucher.h b/osfmk/ipc/ipc_voucher.h index 248fda4c4..55efcf30e 100644 --- a/osfmk/ipc/ipc_voucher.h +++ b/osfmk/ipc/ipc_voucher.h @@ -45,8 +45,6 @@ extern lck_grp_t ipc_lck_grp; extern lck_attr_t ipc_lck_attr; -extern void ipc_voucher_init(void); - /* some shorthand for longer types */ typedef mach_voucher_attr_value_handle_t iv_value_handle_t; typedef mach_voucher_attr_value_reference_t iv_value_refs_t; diff --git a/osfmk/ipc/mach_debug.c b/osfmk/ipc/mach_debug.c index baa3b96d2..903fc3bde 100644 --- a/osfmk/ipc/mach_debug.c +++ b/osfmk/ipc/mach_debug.c @@ -158,8 +158,8 @@ mach_port_get_srights( #if !MACH_IPC_DEBUG kern_return_t -mach_port_space_info( - __unused ipc_space_t space, +mach_port_space_info_from_user( + __unused mach_port_t port, __unused ipc_info_space_t *infop, __unused ipc_info_name_array_t *tablep, __unused mach_msg_type_number_t *tableCntp, @@ -168,7 +168,17 @@ mach_port_space_info( { return KERN_FAILURE; } + #else +kern_return_t +mach_port_space_info( + ipc_space_t space, + ipc_info_space_t *infop, + ipc_info_name_array_t *tablep, + mach_msg_type_number_t *tableCntp, + __unused ipc_info_tree_name_array_t *treep, + __unused mach_msg_type_number_t *treeCntp); + kern_return_t mach_port_space_info( ipc_space_t space, @@ -306,6 +316,29 @@ mach_port_space_info( *treeCntp = 0; return KERN_SUCCESS; } + +kern_return_t +mach_port_space_info_from_user( + mach_port_t port, + ipc_info_space_t *infop, + ipc_info_name_array_t *tablep, + mach_msg_type_number_t *tableCntp, + __unused ipc_info_tree_name_array_t *treep, + __unused mach_msg_type_number_t *treeCntp) +{ + kern_return_t kr; + + ipc_space_t space = convert_port_to_space_check_type(port, NULL, TASK_FLAVOR_READ, FALSE); + + if (space == IPC_SPACE_NULL) { + return KERN_INVALID_ARGUMENT; + } + + kr = mach_port_space_info(space, infop, tablep, tableCntp, treep, treeCntp); + + ipc_space_release(space); + return kr; +} #endif /* MACH_IPC_DEBUG */ /* @@ -453,16 +486,34 @@ mach_port_dnrequest_info( #if !MACH_IPC_DEBUG kern_return_t -mach_port_kobject_description( - __unused ipc_space_t space, +mach_port_kobject_from_user( + __unused mach_port_t port, + __unused mach_port_name_t name, + __unused natural_t *typep, + __unused mach_vm_address_t *addrp) +{ + return KERN_FAILURE; +} + +kern_return_t +mach_port_kobject_description_from_user( + __unused mach_port_t port, __unused mach_port_name_t name, __unused natural_t *typep, __unused mach_vm_address_t *addrp, - __unused kobject_description_t desc) + __unused kobject_description_t des) { return KERN_FAILURE; } #else +kern_return_t +mach_port_kobject_description( + ipc_space_t space, + mach_port_name_t name, + natural_t *typep, + mach_vm_address_t *addrp, + kobject_description_t desc); + kern_return_t mach_port_kobject_description( ipc_space_t space, @@ -475,6 +526,7 @@ mach_port_kobject_description( ipc_port_t port; kern_return_t kr; mach_vm_address_t kaddr; + io_object_t obj = NULL; if (space == IS_NULL) { return KERN_INVALID_TASK; @@ -505,17 +557,10 @@ mach_port_kobject_description( *typep = (unsigned int) ip_kotype(port); kaddr = (mach_vm_address_t)ip_get_kobject(port); *addrp = 0; -#if (DEVELOPMENT || DEBUG) - if (kaddr && ip_is_kobject(port)) { - *addrp = VM_KERNEL_UNSLIDE_OR_PERM(kaddr); - } -#endif - io_object_t obj = NULL; - natural_t kotype = ip_kotype(port); if (desc) { *desc = '\0'; - switch (kotype) { + switch (ip_kotype(port)) { case IKOT_IOKIT_OBJECT: case IKOT_IOKIT_CONNECT: case IKOT_IOKIT_IDENT: @@ -528,6 +573,9 @@ mach_port_kobject_description( break; } } +#if (DEVELOPMENT || DEBUG) + *addrp = VM_KERNEL_UNSLIDE_OR_PERM(kaddr); +#endif ip_unlock(port); @@ -538,7 +586,13 @@ mach_port_kobject_description( return KERN_SUCCESS; } -#endif /* MACH_IPC_DEBUG */ + +kern_return_t +mach_port_kobject( + ipc_space_t space, + mach_port_name_t name, + natural_t *typep, + mach_vm_address_t *addrp); kern_return_t mach_port_kobject( @@ -550,6 +604,40 @@ mach_port_kobject( return mach_port_kobject_description(space, name, typep, addrp, NULL); } +kern_return_t +mach_port_kobject_description_from_user( + mach_port_t port, + mach_port_name_t name, + natural_t *typep, + mach_vm_address_t *addrp, + kobject_description_t desc) +{ + kern_return_t kr; + + ipc_space_t space = convert_port_to_space_check_type(port, NULL, TASK_FLAVOR_READ, FALSE); + + if (space == IPC_SPACE_NULL) { + return KERN_INVALID_ARGUMENT; + } + + kr = mach_port_kobject_description(space, name, typep, addrp, desc); + + ipc_space_release(space); + return kr; +} + +kern_return_t +mach_port_kobject_from_user( + mach_port_t port, + mach_port_name_t name, + natural_t *typep, + mach_vm_address_t *addrp) +{ + return mach_port_kobject_description_from_user(port, name, typep, addrp, NULL); +} + +#endif /* MACH_IPC_DEBUG */ + /* * Routine: mach_port_kernel_object [Legacy kernel call] * Purpose: @@ -570,8 +658,8 @@ mach_port_kobject( #if !MACH_IPC_DEBUG kern_return_t -mach_port_kernel_object( - __unused ipc_space_t space, +mach_port_kernel_object_from_user( + __unused mach_port_t port, __unused mach_port_name_t name, __unused unsigned int *typep, __unused unsigned int *addrp) @@ -579,6 +667,13 @@ mach_port_kernel_object( return KERN_FAILURE; } #else +kern_return_t +mach_port_kernel_object( + ipc_space_t space, + mach_port_name_t name, + unsigned int *typep, + unsigned int *addrp); + kern_return_t mach_port_kernel_object( ipc_space_t space, @@ -593,6 +688,27 @@ mach_port_kernel_object( *addrp = (unsigned int) addr; return kr; } + +kern_return_t +mach_port_kernel_object_from_user( + mach_port_t port, + mach_port_name_t name, + unsigned int *typep, + unsigned int *addrp) +{ + kern_return_t kr; + + ipc_space_t space = convert_port_to_space_check_type(port, NULL, TASK_FLAVOR_READ, FALSE); + + if (space == IPC_SPACE_NULL) { + return KERN_INVALID_ARGUMENT; + } + + kr = mach_port_kernel_object(space, name, typep, addrp); + + ipc_space_release(space); + return kr; +} #endif /* MACH_IPC_DEBUG */ #if (DEVELOPMENT || DEBUG) diff --git a/osfmk/ipc/mach_kernelrpc.c b/osfmk/ipc/mach_kernelrpc.c index 603f841d1..66263ba91 100644 --- a/osfmk/ipc/mach_kernelrpc.c +++ b/osfmk/ipc/mach_kernelrpc.c @@ -38,6 +38,14 @@ #include #include +kern_return_t +mach_port_get_attributes( + ipc_space_t space, + mach_port_name_t name, + int flavor, + mach_port_info_t info, + mach_msg_type_number_t *count); + int _kernelrpc_mach_vm_allocate_trap(struct _kernelrpc_mach_vm_allocate_trap_args *args) { @@ -188,25 +196,6 @@ done: return rv; } -int -_kernelrpc_mach_port_destroy_trap(struct _kernelrpc_mach_port_destroy_args *args) -{ - task_t task = port_name_to_task(args->target); - int rv = MACH_SEND_INVALID_DEST; - - if (task != current_task()) { - goto done; - } - - rv = mach_port_destroy(task->itk_space, args->name); - -done: - if (task) { - task_deallocate(task); - } - return rv; -} - int _kernelrpc_mach_port_deallocate_trap(struct _kernelrpc_mach_port_deallocate_args *args) { @@ -313,7 +302,7 @@ done: int _kernelrpc_mach_port_get_attributes_trap(struct _kernelrpc_mach_port_get_attributes_args *args) { - task_inspect_t task = port_name_to_task_inspect(args->target); + task_inspect_t task = port_name_to_task_read_no_eval(args->target); int rv = MACH_SEND_INVALID_DEST; mach_msg_type_number_t count; @@ -323,7 +312,7 @@ _kernelrpc_mach_port_get_attributes_trap(struct _kernelrpc_mach_port_get_attribu // MIG does not define the type or size of the mach_port_info_t out array // anywhere, so derive them from the field in the generated reply struct -#define MACH_PORT_INFO_OUT (((__Reply__mach_port_get_attributes_t*)NULL)->port_info_out) +#define MACH_PORT_INFO_OUT (((__Reply__mach_port_get_attributes_from_user_t*)NULL)->port_info_out) #define MACH_PORT_INFO_STACK_LIMIT 80 // current size is 68 == 17 * sizeof(integer_t) _Static_assert(sizeof(MACH_PORT_INFO_OUT) < MACH_PORT_INFO_STACK_LIMIT, "mach_port_info_t has grown significantly, reevaluate stack usage"); @@ -573,51 +562,49 @@ host_create_mach_voucher_trap(struct host_create_mach_voucher_args *args) ipc_voucher_t new_voucher = IV_NULL; ipc_port_t voucher_port = IPC_PORT_NULL; mach_port_name_t voucher_name = 0; - kern_return_t kr = 0; + kern_return_t kr = KERN_SUCCESS; if (host == HOST_NULL) { return MACH_SEND_INVALID_DEST; } - if (args->recipes_size < 0) { return KERN_INVALID_ARGUMENT; - } else if (args->recipes_size > MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE) { + } + if (args->recipes_size > MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE) { return MIG_ARRAY_TOO_LARGE; } - if (args->recipes_size < MACH_VOUCHER_TRAP_STACK_LIMIT) { - /* keep small recipes on the stack for speed */ - uint8_t krecipes[args->recipes_size]; - if (copyin(CAST_USER_ADDR_T(args->recipes), (void *)krecipes, args->recipes_size)) { - kr = KERN_MEMORY_ERROR; - goto done; - } - kr = host_create_mach_voucher(host, krecipes, args->recipes_size, &new_voucher); - } else { - uint8_t *krecipes = kalloc((vm_size_t)args->recipes_size); - if (!krecipes) { - kr = KERN_RESOURCE_SHORTAGE; - goto done; - } + /* keep small recipes on the stack for speed */ + uint8_t buf[MACH_VOUCHER_TRAP_STACK_LIMIT]; + uint8_t *krecipes = buf; - if (copyin(CAST_USER_ADDR_T(args->recipes), (void *)krecipes, args->recipes_size)) { - kfree(krecipes, (vm_size_t)args->recipes_size); - kr = KERN_MEMORY_ERROR; - goto done; + if (args->recipes_size > MACH_VOUCHER_TRAP_STACK_LIMIT) { + krecipes = kheap_alloc(KHEAP_TEMP, args->recipes_size, Z_WAITOK); + if (krecipes == NULL) { + return KERN_RESOURCE_SHORTAGE; } - - kr = host_create_mach_voucher(host, krecipes, args->recipes_size, &new_voucher); - kfree(krecipes, (vm_size_t)args->recipes_size); } - if (kr == 0) { - voucher_port = convert_voucher_to_port(new_voucher); - voucher_name = ipc_port_copyout_send(voucher_port, current_space()); + if (copyin(CAST_USER_ADDR_T(args->recipes), (void *)krecipes, args->recipes_size)) { + kr = KERN_MEMORY_ERROR; + goto done; + } - kr = copyout(&voucher_name, args->voucher, sizeof(voucher_name)); + kr = host_create_mach_voucher(host, krecipes, args->recipes_size, &new_voucher); + if (kr != KERN_SUCCESS) { + goto done; } + voucher_port = convert_voucher_to_port(new_voucher); + voucher_name = ipc_port_copyout_send(voucher_port, current_space()); + + kr = copyout(&voucher_name, args->voucher, sizeof(voucher_name)); + done: + if (args->recipes_size > MACH_VOUCHER_TRAP_STACK_LIMIT) { + kheap_free(KHEAP_TEMP, krecipes, args->recipes_size); + } + return kr; } @@ -641,51 +628,40 @@ mach_voucher_extract_attr_recipe_trap(struct mach_voucher_extract_attr_recipe_ar return MACH_SEND_INVALID_DEST; } + /* keep small recipes on the stack for speed */ + uint8_t buf[MACH_VOUCHER_TRAP_STACK_LIMIT]; + uint8_t *krecipe = buf; mach_msg_type_number_t max_sz = sz; - if (sz < MACH_VOUCHER_TRAP_STACK_LIMIT) { - /* keep small recipes on the stack for speed */ - uint8_t krecipe[sz]; - bzero(krecipe, sz); - if (copyin(CAST_USER_ADDR_T(args->recipe), (void *)krecipe, sz)) { - kr = KERN_MEMORY_ERROR; - goto done; - } - kr = mach_voucher_extract_attr_recipe(voucher, args->key, - (mach_voucher_attr_raw_recipe_t)krecipe, &sz); - assert(sz <= max_sz); - - if (kr == KERN_SUCCESS && sz > 0) { - kr = copyout(krecipe, CAST_USER_ADDR_T(args->recipe), sz); - } - } else { - uint8_t *krecipe = kalloc((vm_size_t)max_sz); + if (max_sz > MACH_VOUCHER_TRAP_STACK_LIMIT) { + krecipe = kheap_alloc(KHEAP_TEMP, max_sz, Z_WAITOK); if (!krecipe) { - kr = KERN_RESOURCE_SHORTAGE; - goto done; + return KERN_RESOURCE_SHORTAGE; } + } - if (copyin(CAST_USER_ADDR_T(args->recipe), (void *)krecipe, sz)) { - kfree(krecipe, (vm_size_t)max_sz); - kr = KERN_MEMORY_ERROR; - goto done; - } + if (copyin(CAST_USER_ADDR_T(args->recipe), (void *)krecipe, max_sz)) { + kr = KERN_MEMORY_ERROR; + goto done; + } - kr = mach_voucher_extract_attr_recipe(voucher, args->key, - (mach_voucher_attr_raw_recipe_t)krecipe, &sz); - assert(sz <= max_sz); + kr = mach_voucher_extract_attr_recipe(voucher, args->key, + (mach_voucher_attr_raw_recipe_t)krecipe, &sz); + assert(sz <= max_sz); - if (kr == KERN_SUCCESS && sz > 0) { - kr = copyout(krecipe, CAST_USER_ADDR_T(args->recipe), sz); - } - kfree(krecipe, (vm_size_t)max_sz); + if (kr == KERN_SUCCESS && sz > 0) { + kr = copyout(krecipe, CAST_USER_ADDR_T(args->recipe), sz); } - if (kr == KERN_SUCCESS) { kr = copyout(&sz, args->recipe_size, sizeof(sz)); } + done: + if (max_sz > MACH_VOUCHER_TRAP_STACK_LIMIT) { + kheap_free(KHEAP_TEMP, krecipe, max_sz); + } + ipc_voucher_release(voucher); return kr; } diff --git a/osfmk/ipc/mach_msg.c b/osfmk/ipc/mach_msg.c index 01a5531cf..0ed0d9332 100644 --- a/osfmk/ipc/mach_msg.c +++ b/osfmk/ipc/mach_msg.c @@ -88,10 +88,10 @@ #include #include #include -#include #include #include #include +#include #include @@ -158,11 +158,11 @@ mach_msg_receive_results_complete(ipc_object_t object); const security_token_t KERNEL_SECURITY_TOKEN = KERNEL_SECURITY_TOKEN_VALUE; const audit_token_t KERNEL_AUDIT_TOKEN = KERNEL_AUDIT_TOKEN_VALUE; -mach_msg_format_0_trailer_t trailer_template = { - /* mach_msg_trailer_type_t */ MACH_MSG_TRAILER_FORMAT_0, - /* mach_msg_trailer_size_t */ MACH_MSG_TRAILER_MINIMUM_SIZE, - /* mach_port_seqno_t */ 0, - /* security_token_t */ KERNEL_SECURITY_TOKEN_VALUE +const mach_msg_max_trailer_t trailer_template = { + .msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0, + .msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE, + .msgh_sender = KERNEL_SECURITY_TOKEN_VALUE, + .msgh_audit = KERNEL_AUDIT_TOKEN_VALUE }; /* @@ -197,7 +197,7 @@ mach_msg_send( mach_msg_option_t option, mach_msg_size_t send_size, mach_msg_timeout_t send_timeout, - mach_msg_priority_t override) + mach_msg_priority_t priority) { ipc_space_t space = current_space(); vm_map_t map = current_map(); @@ -245,12 +245,13 @@ mach_msg_send( * the cases where no implicit data is requested. */ trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)kmsg->ikm_header + send_size); + bzero(trailer, sizeof(*trailer)); trailer->msgh_sender = current_thread()->task->sec_token; trailer->msgh_audit = current_thread()->task->audit_token; trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; - mr = ipc_kmsg_copyin(kmsg, space, map, override, &option); + mr = ipc_kmsg_copyin(kmsg, space, map, priority, &option); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_free(kmsg); @@ -322,6 +323,7 @@ mach_msg_receive_results( mach_port_seqno_t seqno = self->ith_seqno; mach_msg_trailer_size_t trailer_size; + mach_vm_address_t context; mach_msg_size_t size = 0; /* @@ -391,17 +393,20 @@ mach_msg_receive_results( /* auto redeem the voucher in the message */ ipc_voucher_receive_postprocessing(kmsg, option); - trailer_size = ipc_kmsg_add_trailer(kmsg, space, option, self, seqno, FALSE, - kmsg->ikm_header->msgh_remote_port->ip_context); + /* Save destination port context for the trailer before copyout */ + context = kmsg->ikm_header->msgh_remote_port->ip_context; mr = ipc_kmsg_copyout(kmsg, space, map, MACH_MSG_BODY_NULL, option); + trailer_size = ipc_kmsg_trailer_size(option, self); + if (mr != MACH_MSG_SUCCESS) { /* already received importance, so have to undo that here */ ipc_importance_unreceive(kmsg, option); /* if we had a body error copyout what we have, otherwise a simple header/trailer */ if ((mr & ~MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) { + ipc_kmsg_add_trailer(kmsg, space, option, self, seqno, FALSE, context); if (ipc_kmsg_put(kmsg, option, rcv_addr, rcv_size, trailer_size, &size) == MACH_RCV_INVALID_DATA) { mr = MACH_RCV_INVALID_DATA; } @@ -413,8 +418,9 @@ mach_msg_receive_results( } } else { /* capture ksmg QoS values to the thread continuation state */ - self->ith_qos = kmsg->ikm_qos; + self->ith_ppriority = kmsg->ikm_ppriority; self->ith_qos_override = kmsg->ikm_qos_override; + ipc_kmsg_add_trailer(kmsg, space, option, self, seqno, FALSE, context); mr = ipc_kmsg_put(kmsg, option, rcv_addr, rcv_size, trailer_size, &size); } @@ -484,6 +490,7 @@ mach_msg_receive_continue(void) mach_msg_return_t mr; thread_t self = current_thread(); + ipc_port_thread_group_unblocked(); if (self->ith_state == MACH_PEEK_READY) { mr = MACH_PEEK_READY; } else { @@ -513,7 +520,7 @@ mach_msg_overwrite_trap( mach_msg_size_t rcv_size = args->rcv_size; mach_port_name_t rcv_name = args->rcv_name; mach_msg_timeout_t msg_timeout = args->timeout; - mach_msg_priority_t override = args->override; + mach_msg_priority_t priority = args->priority; mach_vm_address_t rcv_msg_addr = args->rcv_msg; __unused mach_port_seqno_t temp_seqno = 0; @@ -542,12 +549,12 @@ mach_msg_overwrite_trap( 0, 0, 0); - mr = ipc_kmsg_copyin(kmsg, space, map, override, &option); + mr = ipc_kmsg_copyin(kmsg, space, map, priority, &option); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_free(kmsg); KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); - return mr; + goto end; } mr = ipc_kmsg_send(kmsg, option, msg_timeout); @@ -556,7 +563,7 @@ mach_msg_overwrite_trap( mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map, MACH_MSG_BODY_NULL); (void) ipc_kmsg_put(kmsg, option, msg_addr, send_size, 0, NULL); KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); - return mr; + goto end; } } @@ -568,7 +575,7 @@ mach_msg_overwrite_trap( mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object); if (mr != MACH_MSG_SUCCESS) { - return mr; + goto end; } /* hold ref for object */ @@ -577,10 +584,10 @@ mach_msg_overwrite_trap( special_reply_port = ip_object_to_port(object); /* link the special reply port to the destination */ mr = mach_msg_rcv_link_special_reply_port(special_reply_port, - (mach_port_name_t)override); + (mach_port_name_t)priority); if (mr != MACH_MSG_SUCCESS) { io_release(object); - return mr; + goto end; } } @@ -601,10 +608,13 @@ mach_msg_overwrite_trap( if ((option & MACH_RCV_TIMEOUT) && msg_timeout == 0) { thread_poll_yield(self); } - return mach_msg_receive_results(NULL); + mr = mach_msg_receive_results(NULL); + goto end; } - return MACH_MSG_SUCCESS; +end: + ipc_port_thread_group_unblocked(); + return mr; } /* @@ -748,6 +758,7 @@ msg_receive_error( mach_vm_address_t context; mach_msg_trailer_size_t trailer_size; mach_msg_max_trailer_t *trailer; + thread_t self = current_thread(); context = kmsg->ikm_header->msgh_remote_port->ip_context; @@ -764,13 +775,13 @@ msg_receive_error( ((vm_offset_t)kmsg->ikm_header + mach_round_msg(sizeof(mach_msg_header_t))); kmsg->ikm_header->msgh_size = sizeof(mach_msg_header_t); - bcopy((char *)&trailer_template, + bcopy((const char *)&trailer_template, (char *)trailer, sizeof(trailer_template)); - trailer_size = ipc_kmsg_add_trailer(kmsg, space, - option, current_thread(), seqno, - TRUE, context); + trailer_size = ipc_kmsg_trailer_size(option, self); + ipc_kmsg_add_trailer(kmsg, space, option, self, + seqno, TRUE, context); /* * Copy the message to user space and return the size @@ -783,3 +794,41 @@ msg_receive_error( return MACH_MSG_SUCCESS; } } + +static mach_msg_fetch_filter_policy_cbfunc_t mach_msg_fetch_filter_policy_callback = NULL; + +kern_return_t +mach_msg_filter_register_callback( + const struct mach_msg_filter_callbacks *callbacks) +{ + if (callbacks == NULL) { + return KERN_INVALID_ARGUMENT; + } + + if (callbacks->version >= MACH_MSG_FILTER_CALLBACKS_VERSION_0) { + if (mach_msg_fetch_filter_policy_callback != NULL) { + return KERN_FAILURE; + } + mach_msg_fetch_filter_policy_callback = callbacks->fetch_filter_policy; + } + + return KERN_SUCCESS; +} + +/* This function should only be called if the task and port allow message filtering */ +boolean_t +mach_msg_fetch_filter_policy( + void *port_label, + mach_msg_id_t msgh_id, + mach_msg_filter_id *fid) +{ + boolean_t ret = TRUE; + + if (mach_msg_fetch_filter_policy_callback == NULL) { + *fid = MACH_MSG_FILTER_POLICY_ALLOW; + return true; + } + ret = mach_msg_fetch_filter_policy_callback(current_task(), port_label, msgh_id, fid); + + return ret; +} diff --git a/osfmk/ipc/mach_port.c b/osfmk/ipc/mach_port.c index af47faa37..86d9ddf2a 100644 --- a/osfmk/ipc/mach_port.c +++ b/osfmk/ipc/mach_port.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -82,7 +82,6 @@ #include #include #include -#include #include #include #include @@ -98,11 +97,19 @@ #include #include #include +#include +#include #if IMPORTANCE_INHERITANCE #include #endif +kern_return_t mach_port_get_attributes(ipc_space_t space, mach_port_name_t name, + int flavor, mach_port_info_t info, mach_msg_type_number_t *count); +kern_return_t mach_port_get_context(ipc_space_t space, mach_port_name_t name, + mach_vm_address_t *context); +kern_return_t mach_port_get_set_status(ipc_space_t space, mach_port_name_t name, + mach_port_name_t **members, mach_msg_type_number_t *membersCnt); /* Zeroed template of qos flags */ @@ -921,7 +928,7 @@ mach_port_get_refs( switch (right) { case MACH_PORT_RIGHT_SEND_ONCE: assert(urefs == 1); - /* fall-through */ + OS_FALLTHROUGH; case MACH_PORT_RIGHT_PORT_SET: case MACH_PORT_RIGHT_RECEIVE: @@ -1236,6 +1243,25 @@ mach_port_get_context( return KERN_SUCCESS; } +kern_return_t +mach_port_get_context_from_user( + mach_port_t port, + mach_port_name_t name, + mach_vm_address_t *context) +{ + kern_return_t kr; + + ipc_space_t space = convert_port_to_space_check_type(port, NULL, TASK_FLAVOR_READ, FALSE); + + if (space == IPC_SPACE_NULL) { + return KERN_INVALID_ARGUMENT; + } + + kr = mach_port_get_context(space, name, context); + + ipc_space_release(space); + return kr; +} /* * Routine: mach_port_set_context [kernel call] @@ -1419,6 +1445,27 @@ mach_port_get_set_status( return KERN_SUCCESS; } +kern_return_t +mach_port_get_set_status_from_user( + mach_port_t port, + mach_port_name_t name, + mach_port_name_t **members, + mach_msg_type_number_t *membersCnt) +{ + kern_return_t kr; + + ipc_space_t space = convert_port_to_space_check_type(port, NULL, TASK_FLAVOR_READ, FALSE); + + if (space == IPC_SPACE_NULL) { + return KERN_INVALID_ARGUMENT; + } + + kr = mach_port_get_set_status(space, name, members, membersCnt); + + ipc_space_release(space); + return kr; +} + /* * Routine: mach_port_move_member [kernel call] * Purpose: @@ -1854,8 +1901,6 @@ mach_port_get_status_helper( return; } - - kern_return_t mach_port_get_attributes( ipc_space_t space, @@ -1977,6 +2022,28 @@ mach_port_get_attributes( return KERN_SUCCESS; } +kern_return_t +mach_port_get_attributes_from_user( + mach_port_t port, + mach_port_name_t name, + int flavor, + mach_port_info_t info, + mach_msg_type_number_t *count) +{ + kern_return_t kr; + + ipc_space_t space = convert_port_to_space_check_type(port, NULL, TASK_FLAVOR_READ, FALSE); + + if (space == IPC_SPACE_NULL) { + return KERN_INVALID_ARGUMENT; + } + + kr = mach_port_get_attributes(space, name, flavor, info, count); + + ipc_space_release(space); + return kr; +} + kern_return_t mach_port_set_attributes( ipc_space_t space, @@ -2434,6 +2501,7 @@ mach_port_guard_ast(thread_t t, case kGUARD_EXC_INCORRECT_GUARD: case kGUARD_EXC_IMMOVABLE: case kGUARD_EXC_STRICT_REPLY: + case kGUARD_EXC_MSG_FILTERED: task_exception_notify(EXC_GUARD, code, subcode); task_bsdtask_kill(task); break; @@ -2511,6 +2579,30 @@ mach_port_construct( init_flags |= IPC_PORT_INIT_MAKE_SEND_RIGHT; } + if (options->flags & MPO_FILTER_MSG) { + init_flags |= IPC_PORT_INIT_FILTER_MESSAGE; + } + + if (options->flags & MPO_TG_BLOCK_TRACKING) { + /* Check the task role to allow only TASK_GRAPHICS_SERVER to set this option */ + if (proc_get_effective_task_policy(current_task(), + TASK_POLICY_ROLE) != TASK_GRAPHICS_SERVER) { + return KERN_DENIED; + } + + /* + * Check the work interval port passed in to make sure it is the render server type. + * Since the creation of the render server work interval is privileged, this check + * acts as a guard to make sure only the render server is setting the thread group + * blocking behavior on the port. + */ + mach_port_name_t wi_port_name = options->work_interval_port; + if (work_interval_port_type_render_server(wi_port_name) == false) { + return KERN_INVALID_ARGUMENT; + } + init_flags |= IPC_PORT_INIT_TG_BLOCK_TRACKING; + } + /* Allocate a new port in the IPC space */ kr = ipc_port_alloc(space, init_flags, name, &port); if (kr != KERN_SUCCESS) { diff --git a/osfmk/kdp/kdp.c b/osfmk/kdp/kdp.c index 6cb2ee5a8..1a5459468 100644 --- a/osfmk/kdp/kdp.c +++ b/osfmk/kdp/kdp.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,6 +37,7 @@ #include #include +#include #include #include /* bcopy */ @@ -50,36 +51,36 @@ #include #include -#define DO_ALIGN 1 /* align all packet data accesses */ +#define DO_ALIGN 1 /* align all packet data accesses */ #define KDP_TEST_HARNESS 0 -#if KDP_TEST_HARNESS -#define dprintf(x) kprintf x +#if KDP_TEST_HARNESS +#define dprintf(x) kprintf x #else #define dprintf(x) #endif -static kdp_dispatch_t - dispatch_table[KDP_INVALID_REQUEST-KDP_CONNECT] = - { -/* 0 */ kdp_connect, -/* 1 */ kdp_disconnect, -/* 2 */ kdp_hostinfo, -/* 3 */ kdp_version, -/* 4 */ kdp_maxbytes, -/* 5 */ kdp_readmem, -/* 6 */ kdp_writemem, -/* 7 */ kdp_readregs, -/* 8 */ kdp_writeregs, +SECURITY_READ_ONLY_EARLY(static kdp_dispatch_t) +dispatch_table[KDP_INVALID_REQUEST - KDP_CONNECT] = +{ +/* 0 */ kdp_connect, +/* 1 */ kdp_disconnect, +/* 2 */ kdp_hostinfo, +/* 3 */ kdp_version, +/* 4 */ kdp_maxbytes, +/* 5 */ kdp_readmem, +/* 6 */ kdp_writemem, +/* 7 */ kdp_readregs, +/* 8 */ kdp_writeregs, /* 9 */ kdp_unknown, /* A */ kdp_unknown, -/* B */ kdp_suspend, -/* C */ kdp_resumecpus, -/* D */ kdp_unknown, +/* B */ kdp_suspend, +/* C */ kdp_resumecpus, +/* D */ kdp_unknown, /* E */ kdp_unknown, /* F */ kdp_breakpoint_set, /*10 */ kdp_breakpoint_remove, -/*11 */ kdp_regions, +/*11 */ kdp_regions, /*12 */ kdp_reattach, /*13 */ kdp_reboot, /*14 */ kdp_readmem64, @@ -94,9 +95,9 @@ static kdp_dispatch_t /*1D */ kdp_readmsr64, /*1E */ kdp_writemsr64, /*1F */ kdp_dumpinfo, - }; - -kdp_glob_t kdp; +}; + +kdp_glob_t kdp; #define MAX_BREAKPOINTS 100 @@ -104,14 +105,14 @@ kdp_glob_t kdp; * Version 11 of the KDP Protocol adds support for 64-bit wide memory * addresses (read/write and breakpoints) as well as a dedicated * kernelversion request. Version 12 adds read/writing of physical - * memory with 64-bit wide memory addresses. + * memory with 64-bit wide memory addresses. */ #define KDP_VERSION 12 typedef struct{ - mach_vm_address_t address; - uint32_t bytesused; - uint8_t oldbytes[MAX_BREAKINSN_BYTES]; + mach_vm_address_t address; + uint32_t bytesused; + uint8_t oldbytes[MAX_BREAKINSN_BYTES]; } kdp_breakpoint_record_t; static kdp_breakpoint_record_t breakpoint_list[MAX_BREAKPOINTS]; @@ -122,841 +123,870 @@ int noresume_on_disconnect = 0; kdp_error_t kdp_set_breakpoint_internal( - mach_vm_address_t address - ); + mach_vm_address_t address + ); kdp_error_t kdp_remove_breakpoint_internal( - mach_vm_address_t address - ); + mach_vm_address_t address + ); boolean_t kdp_packet( - unsigned char *pkt, - int *len, - unsigned short *reply_port -) + unsigned char *pkt, + int *len, + unsigned short *reply_port + ) { - static unsigned aligned_pkt[1538/sizeof(unsigned)+1]; // max ether pkt - kdp_pkt_t *rd = (kdp_pkt_t *)&aligned_pkt; - size_t plen = *len; - kdp_req_t req; - boolean_t ret; + static unsigned aligned_pkt[1538 / sizeof(unsigned) + 1];// max ether pkt + kdp_pkt_t *rd = (kdp_pkt_t *)&aligned_pkt; + size_t plen = *len; + kdp_req_t req; + boolean_t ret; #if DO_ALIGN - if (plen > sizeof(aligned_pkt)) { - printf("kdp_packet bad len %lu\n", plen); - return FALSE; - } - bcopy((char *)pkt, (char *)rd, plen); + if (plen > sizeof(aligned_pkt)) { + printf("kdp_packet bad len %lu\n", plen); + return FALSE; + } + bcopy((char *)pkt, (char *)rd, plen); #else - rd = (kdp_pkt_t *)pkt; + rd = (kdp_pkt_t *)pkt; #endif - if (plen < sizeof (rd->hdr) || rd->hdr.len != plen) { - printf("kdp_packet bad len pkt %lu hdr %d\n", plen, rd->hdr.len); - - return (FALSE); - } - - if (rd->hdr.is_reply) { - printf("kdp_packet reply recvd req %x seq %x\n", - rd->hdr.request, rd->hdr.seq); - - return (FALSE); - } - - req = rd->hdr.request; - if (req >= KDP_INVALID_REQUEST) { - printf("kdp_packet bad request %x len %d seq %x key %x\n", - rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key); + if (plen < sizeof(rd->hdr) || rd->hdr.len != plen) { + printf("kdp_packet bad len pkt %lu hdr %d\n", plen, rd->hdr.len); + + return FALSE; + } + + if (rd->hdr.is_reply) { + printf("kdp_packet reply recvd req %x seq %x\n", + rd->hdr.request, rd->hdr.seq); + + return FALSE; + } + + req = rd->hdr.request; + if (req >= KDP_INVALID_REQUEST) { + printf("kdp_packet bad request %x len %d seq %x key %x\n", + rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key); + + return FALSE; + } - return (FALSE); - } - - ret = ((*dispatch_table[req - KDP_CONNECT])(rd, len, reply_port)); + ret = ((*dispatch_table[req - KDP_CONNECT])(rd, len, reply_port)); #if DO_ALIGN - bcopy((char *)rd, (char *) pkt, *len); + bcopy((char *)rd, (char *) pkt, *len); #endif - return ret; + return ret; } static boolean_t kdp_unknown( - kdp_pkt_t *pkt, - __unused int *len, - __unused unsigned short *reply_port -) + kdp_pkt_t *pkt, + __unused int *len, + __unused unsigned short *reply_port + ) { - kdp_pkt_t *rd = (kdp_pkt_t *)pkt; + kdp_pkt_t *rd = (kdp_pkt_t *)pkt; - printf("kdp_unknown request %x len %d seq %x key %x\n", - rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key); + printf("kdp_unknown request %x len %d seq %x key %x\n", + rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key); - return (FALSE); + return FALSE; } static boolean_t kdp_connect( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_connect_req_t *rq = &pkt->connect_req; - size_t plen = *len; - kdp_connect_reply_t *rp = &pkt->connect_reply; - uint16_t rport, eport; - uint32_t key; - uint8_t seq; - - if (plen < sizeof (*rq)) - return (FALSE); - - dprintf(("kdp_connect seq %x greeting %s\n", rq->hdr.seq, rq->greeting)); - - rport = rq->req_reply_port; - eport = rq->exc_note_port; - key = rq->hdr.key; - seq = rq->hdr.seq; - if (kdp.is_conn) { - if ((seq == kdp.conn_seq) && /* duplicate request */ - (rport == kdp.reply_port) && - (eport == kdp.exception_port) && - (key == kdp.session_key)) - rp->error = KDPERR_NO_ERROR; - else - rp->error = KDPERR_ALREADY_CONNECTED; - } - else { - kdp.reply_port = rport; - kdp.exception_port = eport; - kdp.is_conn = TRUE; - kdp.conn_seq = seq; - kdp.session_key = key; - - rp->error = KDPERR_NO_ERROR; - } - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - *reply_port = rport; - *len = rp->hdr.len; - - if (current_debugger == KDP_CUR_DB) - active_debugger=1; - - return (TRUE); + kdp_connect_req_t *rq = &pkt->connect_req; + size_t plen = *len; + kdp_connect_reply_t *rp = &pkt->connect_reply; + uint16_t rport, eport; + uint32_t key; + uint8_t seq; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + dprintf(("kdp_connect seq %x greeting %s\n", rq->hdr.seq, rq->greeting)); + + rport = rq->req_reply_port; + eport = rq->exc_note_port; + key = rq->hdr.key; + seq = rq->hdr.seq; + if (kdp.is_conn) { + if ((seq == kdp.conn_seq) && /* duplicate request */ + (rport == kdp.reply_port) && + (eport == kdp.exception_port) && + (key == kdp.session_key)) { + rp->error = KDPERR_NO_ERROR; + } else { + rp->error = KDPERR_ALREADY_CONNECTED; + } + } else { + kdp.reply_port = rport; + kdp.exception_port = eport; + kdp.is_conn = TRUE; + kdp.conn_seq = seq; + kdp.session_key = key; + + rp->error = KDPERR_NO_ERROR; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + *reply_port = rport; + *len = rp->hdr.len; + + if (current_debugger == KDP_CUR_DB) { + active_debugger = 1; + } + + return TRUE; } static boolean_t kdp_disconnect( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_disconnect_req_t *rq = &pkt->disconnect_req; - size_t plen = *len; - kdp_disconnect_reply_t *rp = &pkt->disconnect_reply; - - if (plen < sizeof (*rq)) - return (FALSE); - - if (!kdp.is_conn) - return (FALSE); - - dprintf(("kdp_disconnect\n")); - - *reply_port = kdp.reply_port; - - kdp.reply_port = kdp.exception_port = 0; - kdp.is_halted = kdp.is_conn = FALSE; - kdp.exception_seq = kdp.conn_seq = 0; - kdp.session_key = 0; - - if (debugger_panic_str != NULL) - reattach_wait = 1; + kdp_disconnect_req_t *rq = &pkt->disconnect_req; + size_t plen = *len; + kdp_disconnect_reply_t *rp = &pkt->disconnect_reply; - if (noresume_on_disconnect == 1) { - reattach_wait = 1; - noresume_on_disconnect = 0; - } - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - *len = rp->hdr.len; - - if (current_debugger == KDP_CUR_DB) - active_debugger=0; - - return (TRUE); + if (plen < sizeof(*rq)) { + return FALSE; + } + + if (!kdp.is_conn) { + return FALSE; + } + + dprintf(("kdp_disconnect\n")); + + *reply_port = kdp.reply_port; + + kdp.reply_port = kdp.exception_port = 0; + kdp.is_halted = kdp.is_conn = FALSE; + kdp.exception_seq = kdp.conn_seq = 0; + kdp.session_key = 0; + + if (debugger_panic_str != NULL) { + reattach_wait = 1; + } + + if (noresume_on_disconnect == 1) { + reattach_wait = 1; + noresume_on_disconnect = 0; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + *len = rp->hdr.len; + + if (current_debugger == KDP_CUR_DB) { + active_debugger = 0; + } + + return TRUE; } static boolean_t kdp_reattach( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_reattach_req_t *rq = &pkt->reattach_req; + kdp_reattach_req_t *rq = &pkt->reattach_req; - kdp.is_conn = TRUE; - kdp_disconnect(pkt, len, reply_port); - *reply_port = rq->req_reply_port; - reattach_wait = 1; - return (TRUE); + kdp.is_conn = TRUE; + kdp_disconnect(pkt, len, reply_port); + *reply_port = rq->req_reply_port; + reattach_wait = 1; + return TRUE; } static boolean_t kdp_hostinfo( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_hostinfo_req_t *rq = &pkt->hostinfo_req; - size_t plen = *len; - kdp_hostinfo_reply_t *rp = &pkt->hostinfo_reply; + kdp_hostinfo_req_t *rq = &pkt->hostinfo_req; + size_t plen = *len; + kdp_hostinfo_reply_t *rp = &pkt->hostinfo_reply; - if (plen < sizeof (*rq)) - return (FALSE); + if (plen < sizeof(*rq)) { + return FALSE; + } - dprintf(("kdp_hostinfo\n")); + dprintf(("kdp_hostinfo\n")); - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); - kdp_machine_hostinfo(&rp->hostinfo); + kdp_machine_hostinfo(&rp->hostinfo); - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } static boolean_t kdp_kernelversion( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_kernelversion_req_t *rq = &pkt->kernelversion_req; - size_t plen = *len; - kdp_kernelversion_reply_t *rp = &pkt->kernelversion_reply; - size_t slen; - - if (plen < sizeof (*rq)) - return (FALSE); - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - dprintf(("kdp_kernelversion\n")); - slen = strlcpy(rp->version, kdp_kernelversion_string, MAX_KDP_DATA_SIZE); - - rp->hdr.len += slen + 1; /* strlcpy returns the amount copied with NUL */ - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + kdp_kernelversion_req_t *rq = &pkt->kernelversion_req; + size_t plen = *len; + kdp_kernelversion_reply_t *rp = &pkt->kernelversion_reply; + size_t slen; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + dprintf(("kdp_kernelversion\n")); + slen = strlcpy(rp->version, kdp_kernelversion_string, MAX_KDP_DATA_SIZE); + + rp->hdr.len += slen + 1; /* strlcpy returns the amount copied with NUL */ + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } static boolean_t kdp_suspend( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_suspend_req_t *rq = &pkt->suspend_req; - size_t plen = *len; - kdp_suspend_reply_t *rp = &pkt->suspend_reply; + kdp_suspend_req_t *rq = &pkt->suspend_req; + size_t plen = *len; + kdp_suspend_reply_t *rp = &pkt->suspend_reply; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); - if (plen < sizeof (*rq)) - return (FALSE); + dprintf(("kdp_suspend\n")); - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); + kdp.is_halted = TRUE; - dprintf(("kdp_suspend\n")); + *reply_port = kdp.reply_port; + *len = rp->hdr.len; - kdp.is_halted = TRUE; - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + return TRUE; } static boolean_t kdp_resumecpus( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_resumecpus_req_t *rq = &pkt->resumecpus_req; - size_t plen = *len; - kdp_resumecpus_reply_t *rp = &pkt->resumecpus_reply; - - if (plen < sizeof (*rq)) - return (FALSE); - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - dprintf(("kdp_resumecpus %x\n", rq->cpu_mask)); - - kdp.is_halted = FALSE; - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + kdp_resumecpus_req_t *rq = &pkt->resumecpus_req; + size_t plen = *len; + kdp_resumecpus_reply_t *rp = &pkt->resumecpus_reply; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + dprintf(("kdp_resumecpus %x\n", rq->cpu_mask)); + + kdp.is_halted = FALSE; + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } static boolean_t kdp_writemem( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_writemem_req_t *rq = &pkt->writemem_req; - size_t plen = *len; - kdp_writemem_reply_t *rp = &pkt->writemem_reply; - mach_vm_size_t cnt; - - if (plen < sizeof (*rq)) - return (FALSE); - - if (rq->nbytes > MAX_KDP_DATA_SIZE) - rp->error = KDPERR_BAD_NBYTES; - else { - dprintf(("kdp_writemem addr %x size %d\n", rq->address, rq->nbytes)); - cnt = kdp_machine_vm_write((caddr_t)rq->data, (mach_vm_address_t)rq->address, rq->nbytes); - rp->error = KDPERR_ACCESS(rq->nbytes, cnt); - dprintf((" cnt %lld error %d\n", cnt, rp->error)); - } - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + kdp_writemem_req_t *rq = &pkt->writemem_req; + size_t plen = *len; + kdp_writemem_reply_t *rp = &pkt->writemem_reply; + mach_vm_size_t cnt; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + if (rq->nbytes > MAX_KDP_DATA_SIZE) { + rp->error = KDPERR_BAD_NBYTES; + } else { + dprintf(("kdp_writemem addr %x size %d\n", rq->address, rq->nbytes)); + cnt = kdp_machine_vm_write((caddr_t)rq->data, (mach_vm_address_t)rq->address, rq->nbytes); + rp->error = KDPERR_ACCESS(rq->nbytes, cnt); + dprintf((" cnt %lld error %d\n", cnt, rp->error)); + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } static boolean_t kdp_writemem64( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_writemem64_req_t *rq = &pkt->writemem64_req; - size_t plen = *len; - kdp_writemem64_reply_t *rp = &pkt->writemem64_reply; - mach_vm_size_t cnt; - - if (plen < sizeof (*rq)) - return (FALSE); - - if (rq->nbytes > MAX_KDP_DATA_SIZE) - rp->error = KDPERR_BAD_NBYTES; - else { - dprintf(("kdp_writemem64 addr %llx size %d\n", rq->address, rq->nbytes)); - cnt = kdp_machine_vm_write((caddr_t)rq->data, (mach_vm_address_t)rq->address, (mach_vm_size_t)rq->nbytes); - rp->error = KDPERR_ACCESS(rq->nbytes, cnt); - dprintf((" cnt %lld error %d\n", cnt, rp->error)); - } - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + kdp_writemem64_req_t *rq = &pkt->writemem64_req; + size_t plen = *len; + kdp_writemem64_reply_t *rp = &pkt->writemem64_reply; + mach_vm_size_t cnt; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + if (rq->nbytes > MAX_KDP_DATA_SIZE) { + rp->error = KDPERR_BAD_NBYTES; + } else { + dprintf(("kdp_writemem64 addr %llx size %d\n", rq->address, rq->nbytes)); + cnt = kdp_machine_vm_write((caddr_t)rq->data, (mach_vm_address_t)rq->address, (mach_vm_size_t)rq->nbytes); + rp->error = KDPERR_ACCESS(rq->nbytes, cnt); + dprintf((" cnt %lld error %d\n", cnt, rp->error)); + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } static boolean_t kdp_writephysmem64( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_writephysmem64_req_t *rq = &pkt->writephysmem64_req; - size_t plen = *len; - kdp_writephysmem64_reply_t *rp = &pkt->writephysmem64_reply; - mach_vm_size_t cnt; - unsigned int size; - - if (plen < sizeof (*rq)) - return (FALSE); - - size = rq->nbytes; - if (size > MAX_KDP_DATA_SIZE) - rp->error = KDPERR_BAD_NBYTES; - else { - dprintf(("kdp_writephysmem64 addr %llx size %d\n", rq->address, size)); - cnt = kdp_machine_phys_write(rq, rq->data, rq->lcpu); - rp->error = KDPERR_ACCESS(size, cnt); - dprintf((" cnt %lld error %d\n", cnt, rp->error)); - } - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + kdp_writephysmem64_req_t *rq = &pkt->writephysmem64_req; + size_t plen = *len; + kdp_writephysmem64_reply_t *rp = &pkt->writephysmem64_reply; + mach_vm_size_t cnt; + unsigned int size; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + size = rq->nbytes; + if (size > MAX_KDP_DATA_SIZE) { + rp->error = KDPERR_BAD_NBYTES; + } else { + dprintf(("kdp_writephysmem64 addr %llx size %d\n", rq->address, size)); + cnt = kdp_machine_phys_write(rq, rq->data, rq->lcpu); + rp->error = KDPERR_ACCESS(size, cnt); + dprintf((" cnt %lld error %d\n", cnt, rp->error)); + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } static boolean_t kdp_readmem( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_readmem_req_t *rq = &pkt->readmem_req; - size_t plen = *len; - kdp_readmem_reply_t *rp = &pkt->readmem_reply; - mach_vm_size_t cnt; - unsigned int size; - - if (plen < sizeof (*rq)) - return (FALSE); - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - size = rq->nbytes; - if (size > MAX_KDP_DATA_SIZE) - rp->error = KDPERR_BAD_NBYTES; - else { - dprintf(("kdp_readmem addr %x size %d\n", rq->address, size)); - cnt = kdp_machine_vm_read((mach_vm_address_t)rq->address, (caddr_t)rp->data, rq->nbytes); - rp->error = KDPERR_ACCESS(size, cnt); - dprintf((" cnt %lld error %d\n", cnt, rp->error)); - - rp->hdr.len += cnt; - } - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + kdp_readmem_req_t *rq = &pkt->readmem_req; + size_t plen = *len; + kdp_readmem_reply_t *rp = &pkt->readmem_reply; + mach_vm_size_t cnt; + unsigned int size; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + size = rq->nbytes; + if (size > MAX_KDP_DATA_SIZE) { + rp->error = KDPERR_BAD_NBYTES; + } else { + dprintf(("kdp_readmem addr %x size %d\n", rq->address, size)); + cnt = kdp_machine_vm_read((mach_vm_address_t)rq->address, (caddr_t)rp->data, rq->nbytes); + rp->error = KDPERR_ACCESS(size, cnt); + dprintf((" cnt %lld error %d\n", cnt, rp->error)); + + rp->hdr.len += cnt; + } + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } static boolean_t kdp_readmem64( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_readmem64_req_t *rq = &pkt->readmem64_req; - size_t plen = *len; - kdp_readmem64_reply_t *rp = &pkt->readmem64_reply; - mach_vm_size_t cnt; - unsigned int size; - - if (plen < sizeof (*rq)) - return (FALSE); - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - size = rq->nbytes; - if (size > MAX_KDP_DATA_SIZE) - rp->error = KDPERR_BAD_NBYTES; - else { - dprintf(("kdp_readmem64 addr %llx size %d\n", rq->address, size)); - cnt = kdp_machine_vm_read((mach_vm_address_t)rq->address, (caddr_t)rp->data, rq->nbytes); - rp->error = KDPERR_ACCESS(size, cnt); - dprintf((" cnt %lld error %d\n", cnt, rp->error)); - - rp->hdr.len += cnt; - } - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + kdp_readmem64_req_t *rq = &pkt->readmem64_req; + size_t plen = *len; + kdp_readmem64_reply_t *rp = &pkt->readmem64_reply; + mach_vm_size_t cnt; + unsigned int size; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + size = rq->nbytes; + if (size > MAX_KDP_DATA_SIZE) { + rp->error = KDPERR_BAD_NBYTES; + } else { + dprintf(("kdp_readmem64 addr %llx size %d\n", rq->address, size)); + cnt = kdp_machine_vm_read((mach_vm_address_t)rq->address, (caddr_t)rp->data, rq->nbytes); + rp->error = KDPERR_ACCESS(size, cnt); + dprintf((" cnt %lld error %d\n", cnt, rp->error)); + + rp->hdr.len += cnt; + } + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } static boolean_t kdp_readphysmem64( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_readphysmem64_req_t *rq = &pkt->readphysmem64_req; - size_t plen = *len; - kdp_readphysmem64_reply_t *rp = &pkt->readphysmem64_reply; - mach_vm_size_t cnt; - unsigned int size; - - if (plen < sizeof (*rq)) - return (FALSE); - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - size = rq->nbytes; - if (size > MAX_KDP_DATA_SIZE) - rp->error = KDPERR_BAD_NBYTES; - else { - dprintf(("kdp_readphysmem64 addr %llx size %d\n", rq->address, size)); - cnt = kdp_machine_phys_read(rq, rp->data, rq->lcpu); - rp->error = KDPERR_ACCESS(size, cnt); - dprintf((" cnt %lld error %d\n", cnt, rp->error)); - - rp->hdr.len += cnt; - } - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + kdp_readphysmem64_req_t *rq = &pkt->readphysmem64_req; + size_t plen = *len; + kdp_readphysmem64_reply_t *rp = &pkt->readphysmem64_reply; + mach_vm_size_t cnt; + unsigned int size; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + size = rq->nbytes; + if (size > MAX_KDP_DATA_SIZE) { + rp->error = KDPERR_BAD_NBYTES; + } else { + dprintf(("kdp_readphysmem64 addr %llx size %d\n", rq->address, size)); + cnt = kdp_machine_phys_read(rq, rp->data, rq->lcpu); + rp->error = KDPERR_ACCESS(size, cnt); + dprintf((" cnt %lld error %d\n", cnt, rp->error)); + + rp->hdr.len += cnt; + } + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } static boolean_t kdp_maxbytes( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_maxbytes_req_t *rq = &pkt->maxbytes_req; - size_t plen = *len; - kdp_maxbytes_reply_t *rp = &pkt->maxbytes_reply; + kdp_maxbytes_req_t *rq = &pkt->maxbytes_req; + size_t plen = *len; + kdp_maxbytes_reply_t *rp = &pkt->maxbytes_reply; + + if (plen < sizeof(*rq)) { + return FALSE; + } - if (plen < sizeof (*rq)) - return (FALSE); + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); + dprintf(("kdp_maxbytes\n")); - dprintf(("kdp_maxbytes\n")); + rp->max_bytes = MAX_KDP_DATA_SIZE; - rp->max_bytes = MAX_KDP_DATA_SIZE; + *reply_port = kdp.reply_port; + *len = rp->hdr.len; - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + return TRUE; } static boolean_t kdp_version( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_version_req_t *rq = &pkt->version_req; - size_t plen = *len; - kdp_version_reply_t *rp = &pkt->version_reply; - - if (plen < sizeof (*rq)) - return (FALSE); - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - dprintf(("kdp_version\n")); - - rp->version = KDP_VERSION; - if (!(kdp_flag & KDP_BP_DIS)) - rp->feature = KDP_FEATURE_BP; - else - rp->feature = 0; - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + kdp_version_req_t *rq = &pkt->version_req; + size_t plen = *len; + kdp_version_reply_t *rp = &pkt->version_reply; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + dprintf(("kdp_version\n")); + + rp->version = KDP_VERSION; + if (!(kdp_flag & KDP_BP_DIS)) { + rp->feature = KDP_FEATURE_BP; + } else { + rp->feature = 0; + } + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } static boolean_t kdp_regions( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_regions_req_t *rq = &pkt->regions_req; - size_t plen = *len; - kdp_regions_reply_t *rp = &pkt->regions_reply; - kdp_region_t *r; + kdp_regions_req_t *rq = &pkt->regions_req; + size_t plen = *len; + kdp_regions_reply_t *rp = &pkt->regions_reply; + kdp_region_t *r; + + if (plen < sizeof(*rq)) { + return FALSE; + } - if (plen < sizeof (*rq)) - return (FALSE); + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + dprintf(("kdp_regions\n")); - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); + r = rp->regions; + rp->nregions = 0; - dprintf(("kdp_regions\n")); + r->address = 0; + r->nbytes = 0xffffffff; - r = rp->regions; - rp->nregions = 0; + r->protection = VM_PROT_ALL; r++; rp->nregions++; - r->address = 0; - r->nbytes = 0xffffffff; + rp->hdr.len += rp->nregions * sizeof(kdp_region_t); + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; - r->protection = VM_PROT_ALL; r++; rp->nregions++; - - rp->hdr.len += rp->nregions * sizeof (kdp_region_t); - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + return TRUE; } static boolean_t kdp_writeregs( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_writeregs_req_t *rq = &pkt->writeregs_req; - size_t plen = *len; - int size; - kdp_writeregs_reply_t *rp = &pkt->writeregs_reply; - - if (plen < sizeof (*rq)) - return (FALSE); - - size = rq->hdr.len - (unsigned)sizeof(kdp_hdr_t) - (unsigned)sizeof(unsigned int); - rp->error = kdp_machine_write_regs(rq->cpu, rq->flavor, rq->data, &size); - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + kdp_writeregs_req_t *rq = &pkt->writeregs_req; + size_t plen = *len; + int size; + kdp_writeregs_reply_t *rp = &pkt->writeregs_reply; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + size = rq->hdr.len - (unsigned)sizeof(kdp_hdr_t) - (unsigned)sizeof(unsigned int); + rp->error = kdp_machine_write_regs(rq->cpu, rq->flavor, rq->data, &size); + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } static boolean_t kdp_readregs( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_readregs_req_t *rq = &pkt->readregs_req; - size_t plen = *len; - kdp_readregs_reply_t *rp = &pkt->readregs_reply; - int size; - - if (plen < sizeof (*rq)) - return (FALSE); - - rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - rp->error = kdp_machine_read_regs(rq->cpu, rq->flavor, rp->data, &size); - rp->hdr.len += size; - - *reply_port = kdp.reply_port; - *len = rp->hdr.len; - - return (TRUE); + kdp_readregs_req_t *rq = &pkt->readregs_req; + size_t plen = *len; + kdp_readregs_reply_t *rp = &pkt->readregs_reply; + int size; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof(*rp); + + rp->error = kdp_machine_read_regs(rq->cpu, rq->flavor, rp->data, &size); + rp->hdr.len += size; + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return TRUE; } -boolean_t +boolean_t kdp_breakpoint_set( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_breakpoint_req_t *rq = &pkt->breakpoint_req; + kdp_breakpoint_req_t *rq = &pkt->breakpoint_req; kdp_breakpoint_reply_t *rp = &pkt->breakpoint_reply; - size_t plen = *len; - kdp_error_t kerr; - - if (plen < sizeof (*rq)) - return (FALSE); - + size_t plen = *len; + kdp_error_t kerr; + + if (plen < sizeof(*rq)) { + return FALSE; + } + dprintf(("kdp_breakpoint_set %x\n", rq->address)); kerr = kdp_set_breakpoint_internal((mach_vm_address_t)rq->address); - - rp->error = kerr; - + + rp->error = kerr; + rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); + rp->hdr.len = sizeof(*rp); *reply_port = kdp.reply_port; *len = rp->hdr.len; - - return (TRUE); + + return TRUE; } -boolean_t +boolean_t kdp_breakpoint64_set( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_breakpoint64_req_t *rq = &pkt->breakpoint64_req; + kdp_breakpoint64_req_t *rq = &pkt->breakpoint64_req; kdp_breakpoint64_reply_t *rp = &pkt->breakpoint64_reply; - size_t plen = *len; - kdp_error_t kerr; - - if (plen < sizeof (*rq)) - return (FALSE); - + size_t plen = *len; + kdp_error_t kerr; + + if (plen < sizeof(*rq)) { + return FALSE; + } + dprintf(("kdp_breakpoint64_set %llx\n", rq->address)); kerr = kdp_set_breakpoint_internal((mach_vm_address_t)rq->address); - - rp->error = kerr; - + + rp->error = kerr; + rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); + rp->hdr.len = sizeof(*rp); *reply_port = kdp.reply_port; *len = rp->hdr.len; - - return (TRUE); + + return TRUE; } -boolean_t +boolean_t kdp_breakpoint_remove( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_breakpoint_req_t *rq = &pkt->breakpoint_req; + kdp_breakpoint_req_t *rq = &pkt->breakpoint_req; kdp_breakpoint_reply_t *rp = &pkt->breakpoint_reply; - size_t plen = *len; - kdp_error_t kerr; - if (plen < sizeof (*rq)) - return (FALSE); - + size_t plen = *len; + kdp_error_t kerr; + if (plen < sizeof(*rq)) { + return FALSE; + } + dprintf(("kdp_breakpoint_remove %x\n", rq->address)); kerr = kdp_remove_breakpoint_internal((mach_vm_address_t)rq->address); - - rp->error = kerr; - + + rp->error = kerr; + rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); + rp->hdr.len = sizeof(*rp); *reply_port = kdp.reply_port; *len = rp->hdr.len; - - return (TRUE); + + return TRUE; } -boolean_t +boolean_t kdp_breakpoint64_remove( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { - kdp_breakpoint64_req_t *rq = &pkt->breakpoint64_req; + kdp_breakpoint64_req_t *rq = &pkt->breakpoint64_req; kdp_breakpoint64_reply_t *rp = &pkt->breakpoint64_reply; - size_t plen = *len; - kdp_error_t kerr; - - if (plen < sizeof (*rq)) - return (FALSE); - + size_t plen = *len; + kdp_error_t kerr; + + if (plen < sizeof(*rq)) { + return FALSE; + } + dprintf(("kdp_breakpoint64_remove %llx\n", rq->address)); kerr = kdp_remove_breakpoint_internal((mach_vm_address_t)rq->address); - - rp->error = kerr; - + + rp->error = kerr; + rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); + rp->hdr.len = sizeof(*rp); *reply_port = kdp.reply_port; *len = rp->hdr.len; - - return (TRUE); + + return TRUE; } kdp_error_t kdp_set_breakpoint_internal( - mach_vm_address_t address -) + mach_vm_address_t address + ) { - - uint8_t breakinstr[MAX_BREAKINSN_BYTES], oldinstr[MAX_BREAKINSN_BYTES]; - uint32_t breakinstrsize = sizeof(breakinstr); - mach_vm_size_t cnt; - int i; - + uint8_t breakinstr[MAX_BREAKINSN_BYTES], oldinstr[MAX_BREAKINSN_BYTES]; + uint32_t breakinstrsize = sizeof(breakinstr); + mach_vm_size_t cnt; + int i; + kdp_machine_get_breakinsn(breakinstr, &breakinstrsize); - - if(breakpoints_initialized == 0) - { - for(i=0;(i < MAX_BREAKPOINTS); breakpoint_list[i].address=0, i++); + + if (breakpoints_initialized == 0) { + for (i = 0; (i < MAX_BREAKPOINTS); breakpoint_list[i].address = 0, i++) { + ; + } breakpoints_initialized++; - } - + } + cnt = kdp_machine_vm_read(address, (caddr_t)&oldinstr, (mach_vm_size_t)breakinstrsize); - + if (0 == memcmp(oldinstr, breakinstr, breakinstrsize)) { printf("A trap was already set at that address, not setting new breakpoint\n"); - + return KDPERR_BREAKPOINT_ALREADY_SET; } - - for(i=0;(i < MAX_BREAKPOINTS) && (breakpoint_list[i].address != 0); i++); - + + for (i = 0; (i < MAX_BREAKPOINTS) && (breakpoint_list[i].address != 0); i++) { + ; + } + if (i == MAX_BREAKPOINTS) { return KDPERR_MAX_BREAKPOINTS; } - + breakpoint_list[i].address = address; memcpy(breakpoint_list[i].oldbytes, oldinstr, breakinstrsize); breakpoint_list[i].bytesused = breakinstrsize; - + cnt = kdp_machine_vm_write((caddr_t)&breakinstr, address, breakinstrsize); - + return KDPERR_NO_ERROR; } kdp_error_t kdp_remove_breakpoint_internal( - mach_vm_address_t address -) + mach_vm_address_t address + ) { - mach_vm_size_t cnt; - int i; - - for(i=0;(i < MAX_BREAKPOINTS) && (breakpoint_list[i].address != address); i++); - - if (i == MAX_BREAKPOINTS) - { - return KDPERR_BREAKPOINT_NOT_FOUND; + mach_vm_size_t cnt; + int i; + + for (i = 0; (i < MAX_BREAKPOINTS) && (breakpoint_list[i].address != address); i++) { + ; } - + + if (i == MAX_BREAKPOINTS) { + return KDPERR_BREAKPOINT_NOT_FOUND; + } + breakpoint_list[i].address = 0; cnt = kdp_machine_vm_write((caddr_t)&breakpoint_list[i].oldbytes, address, breakpoint_list[i].bytesused); - + return KDPERR_NO_ERROR; } @@ -965,21 +995,19 @@ kdp_remove_all_breakpoints(void) { int i; boolean_t breakpoint_found = FALSE; - - if (breakpoints_initialized) - { - for(i=0;i < MAX_BREAKPOINTS; i++) - { - if (breakpoint_list[i].address) - { + + if (breakpoints_initialized) { + for (i = 0; i < MAX_BREAKPOINTS; i++) { + if (breakpoint_list[i].address) { kdp_machine_vm_write((caddr_t)&(breakpoint_list[i].oldbytes), (mach_vm_address_t)breakpoint_list[i].address, (mach_vm_size_t)breakpoint_list[i].bytesused); breakpoint_found = TRUE; breakpoint_list[i].address = 0; } } - - if (breakpoint_found) + + if (breakpoint_found) { printf("kdp_remove_all_breakpoints: found extant breakpoints, removing them.\n"); + } } return breakpoint_found; } @@ -987,169 +1015,175 @@ kdp_remove_all_breakpoints(void) boolean_t kdp_reboot( __unused kdp_pkt_t *pkt, - __unused int *len, + __unused int *len, __unused unsigned short *reply_port -) + ) { dprintf(("kdp_reboot\n")); kdp_machine_reboot(); - - return (TRUE); // no, not really, we won't return + + return TRUE; // no, not really, we won't return } static boolean_t kdp_readioport( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port - ) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { kdp_readioport_req_t *rq = &pkt->readioport_req; kdp_readioport_reply_t *rp = &pkt->readioport_reply; size_t plen = *len; - if (plen < sizeof (*rq)) - return (FALSE); - + if (plen < sizeof(*rq)) { + return FALSE; + } + rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - if (rq->nbytes > MAX_KDP_DATA_SIZE) + rp->hdr.len = sizeof(*rp); + + if (rq->nbytes > MAX_KDP_DATA_SIZE) { rp->error = KDPERR_BAD_NBYTES; - else { + } else { #if KDP_TEST_HARNESS - uint16_t addr = rq->address; + uint16_t addr = rq->address; #endif uint16_t size = rq->nbytes; dprintf(("kdp_readioport addr %x size %d\n", addr, size)); rp->error = kdp_machine_ioport_read(rq, rp->data, rq->lcpu); - if (rp->error == KDPERR_NO_ERROR) + if (rp->error == KDPERR_NO_ERROR) { rp->hdr.len += size; + } } - + *reply_port = kdp.reply_port; *len = rp->hdr.len; - - return (TRUE); + + return TRUE; } static boolean_t kdp_writeioport( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port - ) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { kdp_writeioport_req_t *rq = &pkt->writeioport_req; kdp_writeioport_reply_t *rp = &pkt->writeioport_reply; - size_t plen = *len; - - if (plen < sizeof (*rq)) - return (FALSE); - - if (rq->nbytes > MAX_KDP_DATA_SIZE) + size_t plen = *len; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + if (rq->nbytes > MAX_KDP_DATA_SIZE) { rp->error = KDPERR_BAD_NBYTES; - else { - dprintf(("kdp_writeioport addr %x size %d\n", rq->address, - rq->nbytes)); - + } else { + dprintf(("kdp_writeioport addr %x size %d\n", rq->address, + rq->nbytes)); + rp->error = kdp_machine_ioport_write(rq, rq->data, rq->lcpu); } - + rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - + rp->hdr.len = sizeof(*rp); + *reply_port = kdp.reply_port; *len = rp->hdr.len; - - return (TRUE); + + return TRUE; } static boolean_t kdp_readmsr64( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port -) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { kdp_readmsr64_req_t *rq = &pkt->readmsr64_req; kdp_readmsr64_reply_t *rp = &pkt->readmsr64_reply; size_t plen = *len; - if (plen < sizeof (*rq)) - return (FALSE); - + if (plen < sizeof(*rq)) { + return FALSE; + } + rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - + rp->hdr.len = sizeof(*rp); + dprintf(("kdp_readmsr64 lcpu %x addr %x\n", rq->lcpu, rq->address)); rp->error = kdp_machine_msr64_read(rq, rp->data, rq->lcpu); - if (rp->error == KDPERR_NO_ERROR) + if (rp->error == KDPERR_NO_ERROR) { rp->hdr.len += sizeof(uint64_t); - + } + *reply_port = kdp.reply_port; *len = rp->hdr.len; - - return (TRUE); + + return TRUE; } static boolean_t kdp_writemsr64( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port - ) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { kdp_writemsr64_req_t *rq = &pkt->writemsr64_req; kdp_writemsr64_reply_t *rp = &pkt->writemsr64_reply; - size_t plen = *len; - - if (plen < sizeof (*rq)) - return (FALSE); - - dprintf(("kdp_writemsr64 lcpu %x addr %x\n", rq->lcpu, rq->address)); + size_t plen = *len; + + if (plen < sizeof(*rq)) { + return FALSE; + } + + dprintf(("kdp_writemsr64 lcpu %x addr %x\n", rq->lcpu, rq->address)); rp->error = kdp_machine_msr64_write(rq, rq->data, rq->lcpu); - + rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - + rp->hdr.len = sizeof(*rp); + *reply_port = kdp.reply_port; *len = rp->hdr.len; - - return (TRUE); + + return TRUE; } static boolean_t kdp_dumpinfo( - kdp_pkt_t *pkt, - int *len, - unsigned short *reply_port - ) + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port + ) { kdp_dumpinfo_req_t *rq = &pkt->dumpinfo_req; kdp_dumpinfo_reply_t *rp = &pkt->dumpinfo_reply; - size_t plen = *len; - - if (plen < sizeof (*rq)) - return (FALSE); - + size_t plen = *len; + + if (plen < sizeof(*rq)) { + return FALSE; + } + dprintf(("kdp_dumpinfo file=%s destip=%s routerip=%s\n", rq->name, rq->destip, rq->routerip)); rp->hdr.is_reply = 1; - rp->hdr.len = sizeof (*rp); - - if ((rq->type & KDP_DUMPINFO_MASK) != KDP_DUMPINFO_GETINFO) { - kdp_set_dump_info(rq->type, rq->name, rq->destip, rq->routerip, - rq->port); - } + rp->hdr.len = sizeof(*rp); - /* gather some stats for reply */ - kdp_get_dump_info(rp); + if ((rq->type & KDP_DUMPINFO_MASK) != KDP_DUMPINFO_GETINFO) { + kdp_set_dump_info(rq->type, rq->name, rq->destip, rq->routerip, + rq->port); + } + + /* gather some stats for reply */ + kdp_get_dump_info(rp); *reply_port = kdp.reply_port; *len = rp->hdr.len; - - return (TRUE); -} + return TRUE; +} diff --git a/osfmk/kdp/kdp_core.c b/osfmk/kdp/kdp_core.c index 0033e27cb..07adb9b56 100644 --- a/osfmk/kdp/kdp_core.c +++ b/osfmk/kdp/kdp_core.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2015-2017 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2015-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -54,7 +54,7 @@ #include #endif /* defined(__x86_64__) */ -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) #include #include #include @@ -66,17 +66,17 @@ #if !defined(ROUNDDOWN) #define ROUNDDOWN(a, b) ((a) & ~((b) - 1)) #endif -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ typedef int (*pmap_traverse_callback)(vm_map_offset_t start, - vm_map_offset_t end, - void *context); + vm_map_offset_t end, + void *context); extern int pmap_traverse_present_mappings(pmap_t pmap, - vm_map_offset_t start, - vm_map_offset_t end, - pmap_traverse_callback callback, - void *context); + vm_map_offset_t start, + vm_map_offset_t end, + pmap_traverse_callback callback, + void *context); static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context); static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context); @@ -86,34 +86,33 @@ static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb c static int kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start, - vm_map_offset_t end, - void *context); + vm_map_offset_t end, + void *context); static int kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start, - vm_map_offset_t end, - void *context); + vm_map_offset_t end, + void *context); static int kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start, - vm_map_offset_t end, - void *context); + vm_map_offset_t end, + void *context); struct kdp_core_out_vars; -typedef int (*kern_dump_output_proc)(unsigned int request, char *corename, - uint64_t length, void *panic_data); - -struct kdp_core_out_vars -{ - kern_dump_output_proc outproc; - z_output_func zoutput; - size_t zipped; - uint64_t totalbytes; - uint64_t lastpercent; - IOReturn error; - unsigned outremain; - unsigned outlen; - unsigned writes; - Bytef * outbuf; +typedef int (*kern_dump_output_proc)(unsigned int request, char *corename, + uint64_t length, void *panic_data); + +struct kdp_core_out_vars { + kern_dump_output_proc outproc; + z_output_func zoutput; + size_t zipped; + uint64_t totalbytes; + uint64_t lastpercent; + IOReturn error; + unsigned outremain; + unsigned outlen; + unsigned writes; + Bytef * outbuf; }; extern uint32_t kdp_crashdump_pkt_size; @@ -121,11 +120,11 @@ extern uint32_t kdp_crashdump_pkt_size; static vm_offset_t kdp_core_zmem; static size_t kdp_core_zsize; static size_t kdp_core_zoffset; -static z_stream kdp_core_zs; +static z_stream kdp_core_zs; static uint64_t kdp_core_total_size; static uint64_t kdp_core_total_size_sent_uncomp; -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) struct xnu_hw_shmem_dbg_command_info *hwsd_info = NULL; #define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2 @@ -140,15 +139,15 @@ struct xnu_hw_shmem_dbg_command_info *hwsd_info = NULL; #define OPTIMAL_ASTRIS_READSIZE 4064 struct kdp_hw_shmem_dbg_buf_elm { - vm_offset_t khsd_buf; - uint32_t khsd_data_length; - STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm) khsd_elms; + vm_offset_t khsd_buf; + uint32_t khsd_data_length; + STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm) khsd_elms; }; static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) free_hw_shmem_dbg_bufs = - STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs); + STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs); static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) hw_shmem_dbg_bufs_to_flush = - STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush); + STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush); static struct kdp_hw_shmem_dbg_buf_elm *currently_filling_buf = NULL; static struct kdp_hw_shmem_dbg_buf_elm *currently_flushing_buf = NULL; @@ -160,7 +159,7 @@ static uint64_t kdp_hw_shmem_dbg_contact_deadline = 0; static uint64_t kdp_hw_shmem_dbg_contact_deadline_interval = 0; #define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30 -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ static boolean_t kern_dump_successful = FALSE; @@ -173,16 +172,19 @@ struct mach_core_fileheader kdp_core_header = { }; uint64_t kdp_core_ramdisk_addr = 0; uint64_t kdp_core_ramdisk_size = 0; -boolean_t kdp_has_polled_corefile(void) +boolean_t +kdp_has_polled_corefile(void) { - return (NULL != gIOPolledCoreFileVars); + return NULL != gIOPolledCoreFileVars; } -kern_return_t kdp_polled_corefile_error(void) +kern_return_t +kdp_polled_corefile_error(void) { - return gIOPolledCoreFileOpenRet; + return gIOPolledCoreFileOpenRet; } -#if CONFIG_EMBEDDED + +#if defined(__arm__) || defined(__arm64__) /* * Whenever we start a coredump, make sure the buffers * are all on the free queue and the state is as expected. @@ -190,7 +192,7 @@ kern_return_t kdp_polled_corefile_error(void) * a previous coredump attempt failed. */ static void -kern_dump_hw_shmem_dbg_reset() +kern_dump_hw_shmem_dbg_reset(void) { struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL, *tmp_elm = NULL; @@ -238,7 +240,7 @@ kern_dump_hw_shmem_dbg_reset() * buffer back to the free queue. */ static int -kern_dump_hw_shmem_dbg_process_buffers() +kern_dump_hw_shmem_dbg_process_buffers(void) { FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) { @@ -247,7 +249,7 @@ kern_dump_hw_shmem_dbg_process_buffers() } else if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BUF_EMPTY) { if (hwsd_info->xhsdci_seq_no != (kdp_hw_shmem_dbg_seq_no + 1)) { kern_coredump_log(NULL, "Detected stale/invalid seq num. Expected: %d, received %d\n", - (kdp_hw_shmem_dbg_seq_no + 1), hwsd_info->xhsdci_seq_no); + (kdp_hw_shmem_dbg_seq_no + 1), hwsd_info->xhsdci_seq_no); hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR; FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); return -1; @@ -276,7 +278,7 @@ kern_dump_hw_shmem_dbg_process_buffers() } kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() + - kdp_hw_shmem_dbg_contact_deadline_interval; + kdp_hw_shmem_dbg_contact_deadline_interval; return 0; } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline) { @@ -298,7 +300,7 @@ kern_dump_hw_shmem_dbg_process_buffers() * if it is non-zero (an error). */ static int -kern_dump_hw_shmem_dbg_get_buffer() +kern_dump_hw_shmem_dbg_get_buffer(void) { int ret = 0; @@ -325,7 +327,7 @@ kern_dump_hw_shmem_dbg_get_buffer() */ static int kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename, - uint64_t length, void * data) + uint64_t length, void * data) { int ret = 0; @@ -341,7 +343,7 @@ kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename * before setting the connection status to done. */ while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush) || - currently_flushing_buf != NULL) { + currently_flushing_buf != NULL) { ret = kern_dump_hw_shmem_dbg_process_buffers(); if (ret) { return ret; @@ -355,7 +357,7 @@ kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename */ if (hwsd_info->xhsdci_seq_no < kdp_hw_shmem_dbg_seq_no) { kern_coredump_log(NULL, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n", - kdp_hw_shmem_dbg_seq_no, hwsd_info->xhsdci_seq_no); + kdp_hw_shmem_dbg_seq_no, hwsd_info->xhsdci_seq_no); return -1; } @@ -404,9 +406,9 @@ kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename assert(kdp_hw_shmem_dbg_bufsize >= currently_filling_buf->khsd_data_length); bytes_to_copy = MIN(bytes_remaining, kdp_hw_shmem_dbg_bufsize - - currently_filling_buf->khsd_data_length); + currently_filling_buf->khsd_data_length); bcopy(data, (void *)(currently_filling_buf->khsd_buf + currently_filling_buf->khsd_data_length), - bytes_to_copy); + bytes_to_copy); currently_filling_buf->khsd_data_length += bytes_to_copy; @@ -429,74 +431,73 @@ kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename return ret; } -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ -static IOReturn -kern_dump_disk_proc(unsigned int request, __unused char *corename, - uint64_t length, void * data) +static IOReturn +kern_dump_disk_proc(unsigned int request, __unused char *corename, + uint64_t length, void * data) { - uint64_t noffset; - uint32_t err = kIOReturnSuccess; - - switch (request) - { - case KDP_WRQ: - err = IOPolledFileSeek(gIOPolledCoreFileVars, 0); - if (kIOReturnSuccess != err) { - kern_coredump_log(NULL, "IOPolledFileSeek(gIOPolledCoreFileVars, 0) returned 0x%x\n", err); - break; - } - err = IOPolledFilePollersOpen(gIOPolledCoreFileVars, kIOPolledBeforeSleepState, false); - break; - - case KDP_SEEK: - noffset = *((uint64_t *) data); - err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL); - if (kIOReturnSuccess != err) { - kern_coredump_log(NULL, "IOPolledFileWrite (during seek) returned 0x%x\n", err); - break; - } - err = IOPolledFileSeek(gIOPolledCoreFileVars, noffset); - if (kIOReturnSuccess != err) { - kern_coredump_log(NULL, "IOPolledFileSeek(0x%llx) returned 0x%x\n", noffset, err); - } - break; - - case KDP_DATA: - err = IOPolledFileWrite(gIOPolledCoreFileVars, data, length, NULL); - if (kIOReturnSuccess != err) { - kern_coredump_log(NULL, "IOPolledFileWrite(gIOPolledCoreFileVars, %p, 0x%llx, NULL) returned 0x%x\n", - data, length, err); - break; - } - break; - -#if CONFIG_EMBEDDED + uint64_t noffset; + uint32_t err = kIOReturnSuccess; + + switch (request) { + case KDP_WRQ: + err = IOPolledFileSeek(gIOPolledCoreFileVars, 0); + if (kIOReturnSuccess != err) { + kern_coredump_log(NULL, "IOPolledFileSeek(gIOPolledCoreFileVars, 0) returned 0x%x\n", err); + break; + } + err = IOPolledFilePollersOpen(gIOPolledCoreFileVars, kIOPolledBeforeSleepState, false); + break; + + case KDP_SEEK: + noffset = *((uint64_t *) data); + err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL); + if (kIOReturnSuccess != err) { + kern_coredump_log(NULL, "IOPolledFileWrite (during seek) returned 0x%x\n", err); + break; + } + err = IOPolledFileSeek(gIOPolledCoreFileVars, noffset); + if (kIOReturnSuccess != err) { + kern_coredump_log(NULL, "IOPolledFileSeek(0x%llx) returned 0x%x\n", noffset, err); + } + break; + + case KDP_DATA: + err = IOPolledFileWrite(gIOPolledCoreFileVars, data, length, NULL); + if (kIOReturnSuccess != err) { + kern_coredump_log(NULL, "IOPolledFileWrite(gIOPolledCoreFileVars, %p, 0x%llx, NULL) returned 0x%x\n", + data, length, err); + break; + } + break; + +#if defined(__arm__) || defined(__arm64__) /* Only supported on embedded by the underlying polled mode driver */ case KDP_FLUSH: - err = IOPolledFileFlush(gIOPolledCoreFileVars); - if (kIOReturnSuccess != err) { - kern_coredump_log(NULL, "IOPolledFileFlush() returned 0x%x\n", err); - break; - } - break; -#endif + err = IOPolledFileFlush(gIOPolledCoreFileVars); + if (kIOReturnSuccess != err) { + kern_coredump_log(NULL, "IOPolledFileFlush() returned 0x%x\n", err); + break; + } + break; +#endif /* defined(__arm__) || defined(__arm64__) */ - case KDP_EOF: - err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL); - if (kIOReturnSuccess != err) { - kern_coredump_log(NULL, "IOPolledFileWrite (during EOF) returned 0x%x\n", err); - break; - } - err = IOPolledFilePollersClose(gIOPolledCoreFileVars, kIOPolledBeforeSleepState); - if (kIOReturnSuccess != err) { - kern_coredump_log(NULL, "IOPolledFilePollersClose (during EOF) returned 0x%x\n", err); - break; - } - break; - } - - return (err); + case KDP_EOF: + err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL); + if (kIOReturnSuccess != err) { + kern_coredump_log(NULL, "IOPolledFileWrite (during EOF) returned 0x%x\n", err); + break; + } + err = IOPolledFilePollersClose(gIOPolledCoreFileVars, kIOPolledBeforeSleepState); + if (kIOReturnSuccess != err) { + kern_coredump_log(NULL, "IOPolledFilePollersClose (during EOF) returned 0x%x\n", err); + break; + } + break; + } + + return err; } /* @@ -505,22 +506,22 @@ kern_dump_disk_proc(unsigned int request, __unused char *corename, static int kdp_core_zoutput(z_streamp strm, Bytef *buf, unsigned len) { - struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque; - IOReturn ret; + struct kdp_core_out_vars * vars = (typeof(vars))strm->opaque; + IOReturn ret; - vars->zipped += len; + vars->zipped += len; - if (vars->error >= 0) - { - if ((ret = (*vars->outproc)(KDP_DATA, NULL, len, buf)) != kIOReturnSuccess) - { - kern_coredump_log(NULL, "(kdp_core_zoutput) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n", + if (vars->error >= 0) { + if ((ret = (*vars->outproc)(KDP_DATA, NULL, len, buf)) != kIOReturnSuccess) { + kern_coredump_log(NULL, "(kdp_core_zoutput) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n", len, buf, ret); - vars->error = ret; + vars->error = ret; + } + if (!buf && !len) { + kern_coredump_log(NULL, "100.."); + } } - if (!buf && !len) kern_coredump_log(NULL, "100.."); - } - return (len); + return len; } /* @@ -529,153 +530,160 @@ kdp_core_zoutput(z_streamp strm, Bytef *buf, unsigned len) static int kdp_core_zoutputbuf(z_streamp strm, Bytef *inbuf, unsigned inlen) { - struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque; - unsigned remain; - IOReturn ret; - unsigned chunk; - boolean_t flush; - - remain = inlen; - vars->zipped += inlen; - flush = (!inbuf && !inlen); - - while ((vars->error >= 0) && (remain || flush)) - { - chunk = vars->outremain; - if (chunk > remain) chunk = remain; - if (!inbuf) bzero(&vars->outbuf[vars->outlen - vars->outremain], chunk); - else - { - bcopy(inbuf, &vars->outbuf[vars->outlen - vars->outremain], chunk); - inbuf += chunk; - } - vars->outremain -= chunk; - remain -= chunk; - - if (vars->outremain && !flush) break; - if ((ret = (*vars->outproc)(KDP_DATA, NULL, - vars->outlen - vars->outremain, - vars->outbuf)) != kIOReturnSuccess) - { - kern_coredump_log(NULL, "(kdp_core_zoutputbuf) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n", + struct kdp_core_out_vars * vars = (typeof(vars))strm->opaque; + unsigned remain; + IOReturn ret; + unsigned chunk; + boolean_t flush; + + remain = inlen; + vars->zipped += inlen; + flush = (!inbuf && !inlen); + + while ((vars->error >= 0) && (remain || flush)) { + chunk = vars->outremain; + if (chunk > remain) { + chunk = remain; + } + if (!inbuf) { + bzero(&vars->outbuf[vars->outlen - vars->outremain], chunk); + } else { + bcopy(inbuf, &vars->outbuf[vars->outlen - vars->outremain], chunk); + inbuf += chunk; + } + vars->outremain -= chunk; + remain -= chunk; + + if (vars->outremain && !flush) { + break; + } + if ((ret = (*vars->outproc)(KDP_DATA, NULL, + vars->outlen - vars->outremain, + vars->outbuf)) != kIOReturnSuccess) { + kern_coredump_log(NULL, "(kdp_core_zoutputbuf) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n", (vars->outlen - vars->outremain), vars->outbuf, ret); - vars->error = ret; - } - if (flush) - { - kern_coredump_log(NULL, "100.."); - flush = false; + vars->error = ret; + } + if (flush) { + kern_coredump_log(NULL, "100.."); + flush = false; + } + vars->outremain = vars->outlen; } - vars->outremain = vars->outlen; - } - return (inlen); + return inlen; } static int kdp_core_zinput(z_streamp strm, Bytef *buf, unsigned size) { - struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque; - uint64_t percent, total_in = 0; - unsigned len; + struct kdp_core_out_vars * vars = (typeof(vars))strm->opaque; + uint64_t percent, total_in = 0; + unsigned len; - len = strm->avail_in; - if (len > size) len = size; - if (len == 0) return 0; + len = strm->avail_in; + if (len > size) { + len = size; + } + if (len == 0) { + return 0; + } - if (strm->next_in != (Bytef *) strm) memcpy(buf, strm->next_in, len); - else bzero(buf, len); - strm->adler = z_crc32(strm->adler, buf, len); + if (strm->next_in != (Bytef *) strm) { + memcpy(buf, strm->next_in, len); + } else { + bzero(buf, len); + } + strm->adler = z_crc32(strm->adler, buf, len); - strm->avail_in -= len; - strm->next_in += len; - strm->total_in += len; + strm->avail_in -= len; + strm->next_in += len; + strm->total_in += len; - if (0 == (511 & vars->writes++)) - { - total_in = strm->total_in; - kdp_core_total_size_sent_uncomp = strm->total_in; + if (0 == (511 & vars->writes++)) { + total_in = strm->total_in; + kdp_core_total_size_sent_uncomp = strm->total_in; - percent = (total_in * 100) / vars->totalbytes; - if ((percent - vars->lastpercent) >= 10) - { - vars->lastpercent = percent; - kern_coredump_log(NULL, "%lld..\n", percent); + percent = (total_in * 100) / vars->totalbytes; + if ((percent - vars->lastpercent) >= 10) { + vars->lastpercent = percent; + kern_coredump_log(NULL, "%lld..\n", percent); + } } - } - return (int)len; + return (int)len; } static IOReturn kdp_core_stream_output_chunk(struct kdp_core_out_vars * vars, unsigned length, void * data) { - z_stream * zs; - int zr; - boolean_t flush; - - zs = &kdp_core_zs; - - if (kdp_corezip_disabled) - { - (*vars->zoutput)(zs, data, length); - } - else - { + z_stream * zs; + int zr; + boolean_t flush; - flush = (!length && !data); - zr = Z_OK; + zs = &kdp_core_zs; - assert(!zs->avail_in); + if (kdp_corezip_disabled) { + (*vars->zoutput)(zs, data, length); + } else { + flush = (!length && !data); + zr = Z_OK; + + assert(!zs->avail_in); + + while (vars->error >= 0) { + if (!zs->avail_in && !flush) { + if (!length) { + break; + } + zs->next_in = data ? data : (Bytef *) zs /* zero marker */; + zs->avail_in = length; + length = 0; + } + if (!zs->avail_out) { + zs->next_out = (Bytef *) zs; + zs->avail_out = UINT32_MAX; + } + zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH); + if (Z_STREAM_END == zr) { + break; + } + if (zr != Z_OK) { + kern_coredump_log(NULL, "ZERR %d\n", zr); + vars->error = zr; + } + } - while (vars->error >= 0) - { - if (!zs->avail_in && !flush) - { - if (!length) break; - zs->next_in = data ? data : (Bytef *) zs /* zero marker */; - zs->avail_in = length; - length = 0; - } - if (!zs->avail_out) - { - zs->next_out = (Bytef *) zs; - zs->avail_out = UINT32_MAX; - } - zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH); - if (Z_STREAM_END == zr) break; - if (zr != Z_OK) - { - kern_coredump_log(NULL, "ZERR %d\n", zr); - vars->error = zr; - } + if (flush) { + (*vars->zoutput)(zs, NULL, 0); + } } - if (flush) (*vars->zoutput)(zs, NULL, 0); - } - - return (vars->error); + return vars->error; } kern_return_t kdp_core_output(void *kdp_core_out_vars, uint64_t length, void * data) { - IOReturn err; - unsigned int chunk; - enum { kMaxZLibChunk = 1024*1024*1024 }; - struct kdp_core_out_vars *vars = (struct kdp_core_out_vars *)kdp_core_out_vars; - - do - { - if (length <= kMaxZLibChunk) chunk = (typeof(chunk)) length; - else chunk = kMaxZLibChunk; - err = kdp_core_stream_output_chunk(vars, chunk, data); - - length -= chunk; - if (data) data = (void *) (((uintptr_t) data) + chunk); - } - while (length && (kIOReturnSuccess == err)); - - return (err); + IOReturn err; + unsigned int chunk; + enum { kMaxZLibChunk = 1024 * 1024 * 1024 }; + struct kdp_core_out_vars *vars = (struct kdp_core_out_vars *)kdp_core_out_vars; + + do{ + if (length <= kMaxZLibChunk) { + chunk = (typeof(chunk))length; + } else { + chunk = kMaxZLibChunk; + } + err = kdp_core_stream_output_chunk(vars, chunk, data); + + length -= chunk; + if (data) { + data = (void *) (((uintptr_t) data) + chunk); + } + }while (length && (kIOReturnSuccess == err)); + + return err; } #if defined(__arm__) || defined(__arm64__) @@ -688,212 +696,201 @@ extern vm_size_t c_buffers_size; ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr) { - ppnum_t ppn = 0; - uint64_t vincr = PAGE_SIZE_64; - - assert(!(vaddr & PAGE_MASK_64)); - - /* VA ranges to exclude */ - if (vaddr == c_buffers) - { - /* compressor data */ - ppn = 0; - vincr = c_buffers_size; - } - else if (vaddr == kdp_core_zmem) - { - /* zlib working memory */ - ppn = 0; - vincr = kdp_core_zsize; - } - else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr)) - { - ppn = 0; - vincr = kdp_core_ramdisk_size; - } - else + ppnum_t ppn = 0; + uint64_t vincr = PAGE_SIZE_64; + + assert(!(vaddr & PAGE_MASK_64)); + + /* VA ranges to exclude */ + if (vaddr == c_buffers) { + /* compressor data */ + ppn = 0; + vincr = c_buffers_size; + } else if (vaddr == kdp_core_zmem) { + /* zlib working memory */ + ppn = 0; + vincr = kdp_core_zsize; + } else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr)) { + ppn = 0; + vincr = kdp_core_ramdisk_size; + } else #if defined(__arm64__) && defined(CONFIG_XNUPOST) - if (vaddr == _COMM_HIGH_PAGE64_BASE_ADDRESS) - { - /* not readable */ - ppn = 0; - vincr = _COMM_PAGE_AREA_LENGTH; - } - else + if (vaddr == _COMM_HIGH_PAGE64_BASE_ADDRESS) { + /* not readable */ + ppn = 0; + vincr = _COMM_PAGE_AREA_LENGTH; + } else #endif /* defined(__arm64__) */ #if defined(__arm__) || defined(__arm64__) - if (vaddr == phystokv(avail_start)) - { - /* physical memory map */ - ppn = 0; - vincr = (avail_end - avail_start); - } - else + if (vaddr == phystokv(avail_start)) { + /* physical memory map */ + ppn = 0; + vincr = (avail_end - avail_start); + } else #endif /* defined(__arm__) || defined(__arm64__) */ - ppn = pmap_find_phys(kernel_pmap, vaddr); + { + ppn = (pvphysaddr != NULL ? + pmap_find_phys(kernel_pmap, vaddr) : + pmap_find_phys_nofault(kernel_pmap, vaddr)); + } - *pvincr = round_page_64(vincr); + *pvincr = round_page_64(vincr); - if (ppn && pvphysaddr) - { - uint64_t phys = ptoa_64(ppn); - if (physmap_enclosed(phys)) { - *pvphysaddr = phystokv(phys); - } else { - ppn = 0; + if (ppn && pvphysaddr) { + uint64_t phys = ptoa_64(ppn); + if (physmap_enclosed(phys)) { + *pvphysaddr = phystokv(phys); + } else { + ppn = 0; + } } - } - return (ppn); + return ppn; } int pmap_traverse_present_mappings(pmap_t __unused pmap, - vm_map_offset_t start, - vm_map_offset_t end, - pmap_traverse_callback callback, - void *context) + vm_map_offset_t start, + vm_map_offset_t end, + pmap_traverse_callback callback, + void *context) { - IOReturn ret; - vm_map_offset_t vcurstart, vcur; - uint64_t vincr = 0; - vm_map_offset_t debug_start = trunc_page((vm_map_offset_t) debug_buf_base); - vm_map_offset_t debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size)); + IOReturn ret; + vm_map_offset_t vcurstart, vcur; + uint64_t vincr = 0; + vm_map_offset_t debug_start = trunc_page((vm_map_offset_t) debug_buf_base); + vm_map_offset_t debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size)); #if defined(XNU_TARGET_OS_BRIDGE) - vm_map_offset_t macos_panic_start = trunc_page((vm_map_offset_t) macos_panic_base); - vm_map_offset_t macos_panic_end = round_page((vm_map_offset_t) (macos_panic_base + macos_panic_size)); + vm_map_offset_t macos_panic_start = trunc_page((vm_map_offset_t) macos_panic_base); + vm_map_offset_t macos_panic_end = round_page((vm_map_offset_t) (macos_panic_base + macos_panic_size)); #endif - boolean_t lastvavalid; + boolean_t lastvavalid; #if defined(__arm__) || defined(__arm64__) - vm_page_t m = VM_PAGE_NULL; + vm_page_t m = VM_PAGE_NULL; #endif #if defined(__x86_64__) - assert(!is_ept_pmap(pmap)); + assert(!is_ept_pmap(pmap)); #endif - /* Assumes pmap is locked, or being called from the kernel debugger */ - - if (start > end) return (KERN_INVALID_ARGUMENT); + /* Assumes pmap is locked, or being called from the kernel debugger */ - ret = KERN_SUCCESS; - lastvavalid = FALSE; - for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end); ) { - ppnum_t ppn = 0; + if (start > end) { + return KERN_INVALID_ARGUMENT; + } + + ret = KERN_SUCCESS; + lastvavalid = FALSE; + for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end);) { + ppnum_t ppn = 0; #if defined(__arm__) || defined(__arm64__) - /* We're at the start of the physmap, so pull out the pagetable pages that - * are accessed through that region.*/ - if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store)) - m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq); + /* We're at the start of the physmap, so pull out the pagetable pages that + * are accessed through that region.*/ + if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store)) { + m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq); + } - if (m != VM_PAGE_NULL) - { - vm_map_offset_t vprev = vcur; - ppn = (ppnum_t)atop(avail_end); - while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m)) - { - /* Ignore pages that come from the static region and have already been dumped.*/ - if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start)) - { - ppn = VM_PAGE_GET_PHYS_PAGE(m); - break; - } - m = (vm_page_t)vm_page_queue_next(&m->vmp_listq); - } - vincr = PAGE_SIZE_64; - if (ppn == atop(avail_end)) - { - vm_object_unlock(&pmap_object_store); - m = VM_PAGE_NULL; - // avail_end is not a valid physical address, - // so phystokv(avail_end) may not produce the expected result. - vcur = phystokv(avail_start) + (avail_end - avail_start); - } else { - m = (vm_page_t)vm_page_queue_next(&m->vmp_listq); - vcur = phystokv(ptoa(ppn)); - } - if (vcur != vprev) - { - ret = callback(vcurstart, vprev, context); - lastvavalid = FALSE; - } - } - if (m == VM_PAGE_NULL) - ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL); + if (m != VM_PAGE_NULL) { + vm_map_offset_t vprev = vcur; + ppn = (ppnum_t)atop(avail_end); + while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m)) { + /* Ignore pages that come from the static region and have already been dumped.*/ + if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start)) { + ppn = VM_PAGE_GET_PHYS_PAGE(m); + break; + } + m = (vm_page_t)vm_page_queue_next(&m->vmp_listq); + } + vincr = PAGE_SIZE_64; + if (ppn == atop(avail_end)) { + vm_object_unlock(&pmap_object_store); + m = VM_PAGE_NULL; + // avail_end is not a valid physical address, + // so phystokv(avail_end) may not produce the expected result. + vcur = phystokv(avail_start) + (avail_end - avail_start); + } else { + m = (vm_page_t)vm_page_queue_next(&m->vmp_listq); + vcur = phystokv(ptoa(ppn)); + } + if (vcur != vprev) { + ret = callback(vcurstart, vprev, context); + lastvavalid = FALSE; + } + } + if (m == VM_PAGE_NULL) { + ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL); + } #else /* defined(__arm__) || defined(__arm64__) */ - ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL); + ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL); #endif - if (ppn != 0) - { - if (((vcur < debug_start) || (vcur >= debug_end)) - && !(pmap_valid_page(ppn) || bootloader_valid_page(ppn)) + if (ppn != 0) { + if (((vcur < debug_start) || (vcur >= debug_end)) + && !(pmap_valid_page(ppn) || bootloader_valid_page(ppn)) #if defined(XNU_TARGET_OS_BRIDGE) - // include the macOS panic region if it's mapped - && ((vcur < macos_panic_start) || (vcur >= macos_panic_end)) + // include the macOS panic region if it's mapped + && ((vcur < macos_panic_start) || (vcur >= macos_panic_end)) #endif - ) - { - /* not something we want */ - ppn = 0; - } - } + ) { + /* not something we want */ + ppn = 0; + } + } - if (ppn != 0) { - if (!lastvavalid) { - /* Start of a new virtual region */ - vcurstart = vcur; - lastvavalid = TRUE; - } - } else { - if (lastvavalid) { - /* end of a virtual region */ - ret = callback(vcurstart, vcur, context); - lastvavalid = FALSE; - } + if (ppn != 0) { + if (!lastvavalid) { + /* Start of a new virtual region */ + vcurstart = vcur; + lastvavalid = TRUE; + } + } else { + if (lastvavalid) { + /* end of a virtual region */ + ret = callback(vcurstart, vcur, context); + lastvavalid = FALSE; + } #if defined(__x86_64__) - /* Try to skip by 2MB if possible */ - if ((vcur & PDMASK) == 0) { - pd_entry_t *pde; - pde = pmap_pde(pmap, vcur); - if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) { - /* Make sure we wouldn't overflow */ - if (vcur < (end - NBPD)) { - vincr = NBPD; - } - } - } + /* Try to skip by 2MB if possible */ + if ((vcur & PDMASK) == 0) { + pd_entry_t *pde; + pde = pmap_pde(pmap, vcur); + if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) { + /* Make sure we wouldn't overflow */ + if (vcur < (end - NBPD)) { + vincr = NBPD; + } + } + } #endif /* defined(__x86_64__) */ + } + vcur += vincr; + } + + if ((ret == KERN_SUCCESS) && lastvavalid) { + /* send previous run */ + ret = callback(vcurstart, vcur, context); } - vcur += vincr; - } - - if ((ret == KERN_SUCCESS) && lastvavalid) { - /* send previous run */ - ret = callback(vcurstart, vcur, context); - } #if KASAN - if (ret == KERN_SUCCESS) { - ret = kasan_traverse_mappings(callback, context); - } + if (ret == KERN_SUCCESS) { + ret = kasan_traverse_mappings(callback, context); + } #endif - return (ret); + return ret; } -struct kern_dump_preflight_context -{ +struct kern_dump_preflight_context { uint32_t region_count; uint64_t dumpable_bytes; }; int kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start, - vm_map_offset_t end, - void *context) + vm_map_offset_t end, + void *context) { struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context; IOReturn ret = KERN_SUCCESS; @@ -901,20 +898,19 @@ kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start, kdc->region_count++; kdc->dumpable_bytes += (end - start); - return (ret); + return ret; } -struct kern_dump_send_seg_desc_context -{ +struct kern_dump_send_seg_desc_context { core_save_segment_descriptions_cb callback; void *context; }; int kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start, - vm_map_offset_t end, - void *context) + vm_map_offset_t end, + void *context) { struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context; uint64_t seg_start = (uint64_t) start; @@ -923,16 +919,15 @@ kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start, return kds_context->callback(seg_start, seg_end, kds_context->context); } -struct kern_dump_send_segdata_context -{ +struct kern_dump_send_segdata_context { core_save_segment_data_cb callback; void *context; }; int kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start, - vm_map_offset_t end, - void *context) + vm_map_offset_t end, + void *context) { struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context; @@ -944,13 +939,14 @@ kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, voi { struct kern_dump_preflight_context kdc_preflight = { }; uint64_t thread_state_size = 0, thread_count = 0; + vm_map_offset_t vstart = kdp_core_start_addr(); kern_return_t ret; ret = pmap_traverse_present_mappings(kernel_pmap, - VM_MIN_KERNEL_AND_KEXT_ADDRESS, - VM_MAX_KERNEL_ADDRESS, - kern_dump_pmap_traverse_preflight_callback, - &kdc_preflight); + vstart, + VM_MAX_KERNEL_ADDRESS, + kern_dump_pmap_traverse_preflight_callback, + &kdc_preflight); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "save_summary: pmap traversal failed: %d\n", ret); return ret; @@ -959,13 +955,14 @@ kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, voi kern_collectth_state_size(&thread_count, &thread_state_size); ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes, - thread_count, thread_state_size, 0, context); + thread_count, thread_state_size, 0, context); return ret; } static int kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context) { + vm_map_offset_t vstart = kdp_core_start_addr(); kern_return_t ret; struct kern_dump_send_seg_desc_context kds_context; @@ -973,10 +970,10 @@ kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descrip kds_context.context = context; ret = pmap_traverse_present_mappings(kernel_pmap, - VM_MIN_KERNEL_AND_KEXT_ADDRESS, - VM_MAX_KERNEL_ADDRESS, - kern_dump_pmap_traverse_send_segdesc_callback, - &kds_context); + vstart, + VM_MAX_KERNEL_ADDRESS, + kern_dump_pmap_traverse_send_segdesc_callback, + &kds_context); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "save_seg_desc: pmap traversal failed: %d\n", ret); return ret; @@ -996,7 +993,7 @@ kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_s if (thread_state_size > 0) { void * iter = NULL; do { - kern_collectth_state (current_thread(), buf, thread_state_size, &iter); + kern_collectth_state(current_thread(), buf, thread_state_size, &iter); ret = callback(buf, context); if (ret != KERN_SUCCESS) { @@ -1017,6 +1014,7 @@ kern_dump_save_sw_vers(__unused void *refcon, core_save_sw_vers_cb callback, voi static int kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context) { + vm_map_offset_t vstart = kdp_core_start_addr(); kern_return_t ret; struct kern_dump_send_segdata_context kds_context; @@ -1024,8 +1022,8 @@ kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb cal kds_context.context = context; ret = pmap_traverse_present_mappings(kernel_pmap, - VM_MIN_KERNEL_AND_KEXT_ADDRESS, - VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context); + vstart, + VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "save_seg_data: pmap traversal failed: %d\n", ret); return ret; @@ -1079,14 +1077,14 @@ kern_dump_update_header(struct kdp_core_out_vars *outvars) foffset = 0; if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) { kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n", - sizeof(foffset), &foffset, foffset, ret); + sizeof(foffset), &foffset, foffset, ret); return ret; } if ((ret = (outvars->outproc)(KDP_DATA, NULL, sizeof(kdp_core_header), &kdp_core_header)) != kIOReturnSuccess) { kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n", - sizeof(kdp_core_header), &kdp_core_header, ret); - return ret; + sizeof(kdp_core_header), &kdp_core_header, ret); + return ret; } if ((ret = (outvars->outproc)(KDP_DATA, NULL, 0, NULL)) != kIOReturnSuccess) { @@ -1094,12 +1092,12 @@ kern_dump_update_header(struct kdp_core_out_vars *outvars) return ret; } -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) if ((ret = (outvars->outproc)(KDP_FLUSH, NULL, 0, NULL)) != kIOReturnSuccess) { kern_coredump_log(NULL, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret); return ret; } -#endif +#endif /* defined(__arm__) || defined(__arm64__) */ return KERN_SUCCESS; } @@ -1117,7 +1115,7 @@ kern_dump_record_file(void *kdp_core_out_vars, const char *filename, uint64_t fi kdp_core_header.files[kdp_core_header.num_files].gzip_offset = file_offset; kdp_core_header.files[kdp_core_header.num_files].gzip_length = outvars->zipped; strncpy((char *)&kdp_core_header.files[kdp_core_header.num_files].core_name, filename, - MACH_CORE_FILEHEADER_NAMELEN); + MACH_CORE_FILEHEADER_NAMELEN); kdp_core_header.files[kdp_core_header.num_files].core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0'; kdp_core_header.num_files++; kdp_core_header.signature = MACH_CORE_FILEHEADER_SIGNATURE; @@ -1138,7 +1136,7 @@ kern_dump_seek_to_next_file(void *kdp_core_out_vars, uint64_t next_file_offset) if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != kIOReturnSuccess) { kern_coredump_log(NULL, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n", - sizeof(next_file_offset), &next_file_offset, next_file_offset, ret); + sizeof(next_file_offset), &next_file_offset, next_file_offset, ret); } return ret; @@ -1149,8 +1147,8 @@ do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant) { struct kdp_core_out_vars outvars = { }; - char *log_start = NULL, *buf = NULL; - size_t existing_log_size = 0, new_log_len = 0; + char *coredump_log_start = NULL, *buf = NULL; + size_t reserved_debug_logsize = 0, prior_debug_logsize = 0; uint64_t foffset = 0; int ret = 0; boolean_t output_opened = FALSE, dump_succeeded = TRUE; @@ -1159,21 +1157,21 @@ do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant) * Record the initial panic log buffer length so we can dump the coredump log * and panic log to disk */ - log_start = debug_buf_ptr; -#if CONFIG_EMBEDDED + coredump_log_start = debug_buf_ptr; +#if defined(__arm__) || defined(__arm64__) assert(panic_info->eph_other_log_offset != 0); assert(panic_info->eph_panic_log_len != 0); /* Include any data from before the panic log as well */ - existing_log_size = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) + - panic_info->eph_panic_log_len + panic_info->eph_other_log_len; -#else /* CONFIG_EMBEDDED */ + prior_debug_logsize = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) + + panic_info->eph_panic_log_len + panic_info->eph_other_log_len; +#else /* defined(__arm__) || defined(__arm64__) */ if (panic_info->mph_panic_log_offset != 0) { - existing_log_size = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) + - panic_info->mph_panic_log_len + panic_info->mph_other_log_len; + prior_debug_logsize = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) + + panic_info->mph_panic_log_len + panic_info->mph_other_log_len; } -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ - assert (existing_log_size <= debug_buf_size); + assert(prior_debug_logsize <= debug_buf_size); if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) { /* Open the file for output */ @@ -1191,22 +1189,23 @@ do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant) if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) { outvars.zoutput = kdp_core_zoutput; + reserved_debug_logsize = prior_debug_logsize + KERN_COREDUMP_MAXDEBUGLOGSIZE; /* Space for file header, panic log, core log */ - foffset = (KERN_COREDUMP_HEADERSIZE + existing_log_size + KERN_COREDUMP_MAXDEBUGLOGSIZE + - KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1) & ~(KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1); + foffset = ((KERN_COREDUMP_HEADERSIZE + reserved_debug_logsize + (KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1)) \ + & ~(KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1)); kdp_core_header.log_offset = KERN_COREDUMP_HEADERSIZE; /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */ if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) { kern_coredump_log(NULL, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n", - sizeof(foffset), &foffset, foffset, ret); + sizeof(foffset), &foffset, foffset, ret); dump_succeeded = FALSE; goto exit; } } else if (kd_variant == KERN_DUMP_NET) { assert((kdp_core_zoffset + kdp_crashdump_pkt_size) <= kdp_core_zsize); outvars.zoutput = kdp_core_zoutputbuf; -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) } else { /* KERN_DUMP_HW_SHMEM_DBG */ outvars.zoutput = kdp_core_zoutput; kern_dump_hw_shmem_dbg_reset(); @@ -1218,7 +1217,7 @@ do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant) #endif kern_coredump_log(NULL, "%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local cores..." : - "Transmitting kernel state, please wait:\n"); + "Transmitting kernel state, please wait:\n"); #if defined(__x86_64__) @@ -1230,7 +1229,7 @@ do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant) dump_succeeded = FALSE; } else if ((ret = kdp_core_output(&outvars, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) { kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outvars, %lu, %p) returned 0x%x\n", - panic_stackshot_len, (void *) panic_stackshot_buf, ret); + panic_stackshot_len, (void *) panic_stackshot_buf, ret); dump_succeeded = FALSE; } else if ((ret = kdp_core_output(&outvars, 0, NULL)) != KERN_SUCCESS) { kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", &outvars, ret); @@ -1265,63 +1264,71 @@ do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant) } if (kd_variant == KERN_DUMP_DISK) { + assert(reserved_debug_logsize != 0); + size_t remaining_debug_logspace = reserved_debug_logsize; + /* Write the debug log -- first seek to the end of the corefile header */ foffset = KERN_COREDUMP_HEADERSIZE; if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) { kern_coredump_log(NULL, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n", - sizeof(foffset), &foffset, foffset, ret); + sizeof(foffset), &foffset, foffset, ret); dump_succeeded = FALSE; goto exit; } - new_log_len = debug_buf_ptr - log_start; - if (new_log_len > KERN_COREDUMP_MAXDEBUGLOGSIZE) { - new_log_len = KERN_COREDUMP_MAXDEBUGLOGSIZE; - } - - /* This data is after the panic stackshot, we need to write it separately */ -#if CONFIG_EMBEDDED - existing_log_size -= panic_info->eph_other_log_len; + /* First flush the data from just the paniclog */ + size_t initial_log_length = 0; +#if defined(__arm__) || defined(__arm64__) + initial_log_length = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) + + panic_info->eph_panic_log_len; #else - if (existing_log_size) { - existing_log_size -= panic_info->mph_other_log_len; + if (panic_info->mph_panic_log_offset != 0) { + initial_log_length = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) + + panic_info->mph_panic_log_len; } #endif - /* - * Write out the paniclog (from the beginning of the debug - * buffer until the start of the stackshot) - */ buf = debug_buf_base; - if ((ret = (*outproc)(KDP_DATA, NULL, existing_log_size, buf)) != kIOReturnSuccess) { - kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n", - existing_log_size, buf, ret); - dump_succeeded = FALSE; - goto exit; + if ((ret = (*outproc)(KDP_DATA, NULL, initial_log_length, buf)) != kIOReturnSuccess) { + kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n", + initial_log_length, buf, ret); + dump_succeeded = FALSE; + goto exit; } + remaining_debug_logspace -= initial_log_length; + + /* Next include any log data from after the stackshot (the beginning of the 'other' log). */ +#if defined(__arm__) || defined(__arm64__) + buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset); +#else /* - * The next part of the log we're interested in is the beginning of the 'other' log. - * Include any data after the panic stackshot but before we started the coredump log - * (see above) + * There may be no paniclog if we're doing a coredump after a call to Debugger() on x86 if debugger_is_panic was + * configured to FALSE based on the boot-args. In that case just start from where the debug buffer was when + * we began taking a coredump. */ -#if CONFIG_EMBEDDED - buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset); - new_log_len += panic_info->eph_other_log_len; -#else /* CONFIG_EMBEDDED */ - buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset); - new_log_len += panic_info->mph_other_log_len; -#endif /* CONFIG_EMBEDDED */ + if (panic_info->mph_other_log_offset != 0) { + buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset); + } else { + buf = coredump_log_start; + } +#endif + assert(debug_buf_ptr >= buf); + + size_t other_log_length = debug_buf_ptr - buf; + if (other_log_length > remaining_debug_logspace) { + other_log_length = remaining_debug_logspace; + } /* Write the coredump log */ - if ((ret = (*outproc)(KDP_DATA, NULL, new_log_len, buf)) != kIOReturnSuccess) { + if ((ret = (*outproc)(KDP_DATA, NULL, other_log_length, buf)) != kIOReturnSuccess) { kern_coredump_log(NULL, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n", - new_log_len, buf, ret); + other_log_length, buf, ret); dump_succeeded = FALSE; goto exit; } - kdp_core_header.log_length = existing_log_size + new_log_len; + kdp_core_header.log_length = initial_log_length + other_log_length; kern_dump_update_header(&outvars); } @@ -1333,23 +1340,23 @@ exit: } /* If applicable, update the panic header and flush it so we update the CRC */ -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) panic_info->eph_panic_flags |= (dump_succeeded ? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE : - EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED); + EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED); paniclog_flush(); #else if (panic_info->mph_panic_log_offset != 0) { panic_info->mph_panic_flags |= (dump_succeeded ? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE : - MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED); + MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED); paniclog_flush(); } #endif - return (dump_succeeded ? 0 : -1); + return dump_succeeded ? 0 : -1; } boolean_t -dumped_kernel_core() +dumped_kernel_core(void) { return kern_dump_successful; } @@ -1363,11 +1370,17 @@ kern_dump(enum kern_dump_type kd_variant) kasan_disable(); #endif if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) { - if (dumped_local) return (0); - if (local_dump_in_progress) return (-1); + if (dumped_local) { + return 0; + } + if (local_dump_in_progress) { + return -1; + } local_dump_in_progress = TRUE; -#if CONFIG_EMBEDDED - hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_BUSY; +#if defined(__arm__) || defined(__arm64__) + if (hwsd_info != NULL) { + hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_BUSY; + } #endif ret = do_kern_dump(&kern_dump_disk_proc, kd_variant); if (ret == 0) { @@ -1377,7 +1390,7 @@ kern_dump(enum kern_dump_type kd_variant) } return ret; -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) } else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) { ret = do_kern_dump(&kern_dump_hw_shmem_dbg_buffer_proc, KERN_DUMP_HW_SHMEM_DBG); if (ret == 0) { @@ -1394,10 +1407,14 @@ kern_dump(enum kern_dump_type kd_variant) } } -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) void -panic_spin_shmcon() +panic_spin_shmcon(void) { + if (!PE_i_can_has_debugger(NULL)) { + return; + } + if (hwsd_info == NULL) { kern_coredump_log(NULL, "handshake structure not initialized\n"); return; @@ -1405,7 +1422,7 @@ panic_spin_shmcon() kern_coredump_log(NULL, "\nPlease go to https://panic.apple.com to report this panic\n"); kern_coredump_log(NULL, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n", - hwsd_info, (void *)kvtophys((vm_offset_t)hwsd_info)); + hwsd_info, (void *)kvtophys((vm_offset_t)hwsd_info)); hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY; hwsd_info->xhsdci_seq_no = 0; @@ -1418,32 +1435,34 @@ panic_spin_shmcon() } if ((hwsd_info->xhsdci_status == XHSDCI_COREDUMP_REMOTE_DONE) || - (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR)) { + (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR)) { hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY; hwsd_info->xhsdci_seq_no = 0; FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info)); } } } -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ static void * kdp_core_zalloc(void * __unused ref, u_int items, u_int size) { - void * result; + void * result; - result = (void *) (kdp_core_zmem + kdp_core_zoffset); - kdp_core_zoffset += ~31L & (31 + (items * size)); // 32b align for vector crc - assert(kdp_core_zoffset <= kdp_core_zsize); + result = (void *) (kdp_core_zmem + kdp_core_zoffset); + kdp_core_zoffset += ~31L & (31 + (items * size)); // 32b align for vector crc + assert(kdp_core_zoffset <= kdp_core_zsize); - return (result); + return result; } static void -kdp_core_zfree(void * __unused ref, void * __unused ptr) {} +kdp_core_zfree(void * __unused ref, void * __unused ptr) +{ +} -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) #define LEVEL Z_BEST_SPEED #define NETBUF 0 #else @@ -1457,26 +1476,28 @@ kdp_core_init(void) int wbits = 12; int memlevel = 3; kern_return_t kr; -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) int i = 0; vm_offset_t kdp_core_hw_shmem_buf = 0; struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL; cache_info_t *cpuid_cache_info = NULL; -#endif +#endif /* defined(__arm__) || defined(__arm64__) */ kern_coredump_callback_config core_config = { }; - if (kdp_core_zs.zalloc) return; + if (kdp_core_zs.zalloc) { + return; + } kdp_core_zsize = round_page(NETBUF + zlib_deflate_memory_size(wbits, memlevel)); printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize); kr = kmem_alloc(kernel_map, &kdp_core_zmem, kdp_core_zsize, VM_KERN_MEMORY_DIAG); - assert (KERN_SUCCESS == kr); + assert(KERN_SUCCESS == kr); kdp_core_zoffset = 0; kdp_core_zs.zalloc = kdp_core_zalloc; kdp_core_zs.zfree = kdp_core_zfree; if (deflateInit2(&kdp_core_zs, LEVEL, Z_DEFLATED, - wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY)) { + wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY)) { /* Allocation failed */ bzero(&kdp_core_zs, sizeof(kdp_core_zs)); kdp_core_zoffset = 0; @@ -1495,18 +1516,23 @@ kdp_core_init(void) kr = kern_register_xnu_coredump_helper(&core_config); assert(KERN_SUCCESS == kr); -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) if (!PE_consistent_debug_enabled()) { return; } + if (!PE_i_can_has_debugger(NULL)) { + return; + } + /* * We need to allocate physically contiguous memory since astris isn't capable * of doing address translations while the CPUs are running. */ kdp_hw_shmem_dbg_bufsize = KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE; - kr = kmem_alloc_contig(kernel_map, &kdp_core_hw_shmem_buf, kdp_hw_shmem_dbg_bufsize, VM_MAP_PAGE_MASK(kernel_map), - 0, 0, KMA_KOBJECT, VM_KERN_MEMORY_DIAG); + kr = kmem_alloc_contig(kernel_map, &kdp_core_hw_shmem_buf, + kdp_hw_shmem_dbg_bufsize, VM_MAP_PAGE_MASK(kernel_map), + 0, 0, KMA_KOBJECT, VM_KERN_MEMORY_DIAG); assert(KERN_SUCCESS == kr); /* @@ -1537,7 +1563,7 @@ kdp_core_init(void) STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush); for (i = 0; i < KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS; i++) { - cur_elm = kalloc(sizeof(*cur_elm)); + cur_elm = zalloc_permanent_type(typeof(*cur_elm)); assert(cur_elm != NULL); cur_elm->khsd_buf = kdp_core_hw_shmem_buf; @@ -1549,11 +1575,11 @@ kdp_core_init(void) } nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS * NSEC_PER_SEC, - &kdp_hw_shmem_dbg_contact_deadline_interval); + &kdp_hw_shmem_dbg_contact_deadline_interval); PE_consistent_debug_register(kDbgIdAstrisConnection, kvtophys((vm_offset_t) hwsd_info), sizeof(pmap_paddr_t)); PE_consistent_debug_register(kDbgIdAstrisConnectionVers, CUR_XNU_HWSDCI_STRUCT_VERS, sizeof(uint32_t)); -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ } #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ diff --git a/osfmk/kdp/kdp_core.h b/osfmk/kdp/kdp_core.h index 1b93c16e9..86e11aa0d 100644 --- a/osfmk/kdp/kdp_core.h +++ b/osfmk/kdp/kdp_core.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2003-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -85,7 +85,7 @@ struct corehdr { #define CORE_REMOTE_PORT 1069 /* hardwired, we can't really query the services file */ -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) /* * xnu shared memory hardware debugger support * @@ -122,7 +122,7 @@ struct xnu_hw_shmem_dbg_command_info { void panic_spin_shmcon(void); -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ void kdp_panic_dump(void); void begin_panic_transfer(void); @@ -134,7 +134,7 @@ void kdp_get_dump_info(kdp_dumpinfo_reply_t *rp); enum kern_dump_type { KERN_DUMP_DISK, /* local, on device core dump */ KERN_DUMP_NET, /* kdp network core dump */ -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) KERN_DUMP_HW_SHMEM_DBG, /* coordinated hardware shared memory debugger core dump */ #endif KERN_DUMP_STACKSHOT_DISK, /* local, stackshot on device coredump */ diff --git a/osfmk/kdp/kdp_dyld.h b/osfmk/kdp/kdp_dyld.h index b363b896c..289c5d4d0 100644 --- a/osfmk/kdp/kdp_dyld.h +++ b/osfmk/kdp/kdp_dyld.h @@ -132,4 +132,9 @@ struct user64_dyld_all_image_infos { /* the following fields are only in version 16 (macOS 10.13, iOS 12.0) and later */ user64_addr_t compact_dyld_image_info_addr; user64_size_t compact_dyld_image_info_size; + uint32_t platform; + /* the following fields are only in version 17 (macOS 10.16) and later */ + uint32_t aotInfoArrayCount; + user64_addr_t aotInfoArray; + uint64_t aotTimestamp; }; diff --git a/osfmk/kdp/kdp_internal.h b/osfmk/kdp/kdp_internal.h index 356429b67..7fb3e852f 100644 --- a/osfmk/kdp/kdp_internal.h +++ b/osfmk/kdp/kdp_internal.h @@ -109,7 +109,8 @@ kdp_exception_ack( extern void kdp_panic( - const char *msg + const char *fmt, + ... ); extern @@ -208,3 +209,6 @@ int int kdp_machine_msr64_write(kdp_writemsr64_req_t *, caddr_t /* data */, uint16_t /* lcpu */); + +vm_map_offset_t +kdp_core_start_addr(void); diff --git a/osfmk/kdp/kdp_serial.c b/osfmk/kdp/kdp_serial.c index c4e013d52..d30275a9f 100644 --- a/osfmk/kdp/kdp_serial.c +++ b/osfmk/kdp/kdp_serial.c @@ -49,7 +49,7 @@ kdp_serial_out(unsigned char byte, void (*outFunc)(char)) outFunc(SKDP_ESC_CHAR); byte = ~byte; } - outFunc(byte); + outFunc((char)byte); } void @@ -62,10 +62,10 @@ kdp_serialize_packet(unsigned char *packet, unsigned int len, void (*outFunc)(ch // insert the CRC between back to back STARTs which is compatible with old clients crc = (uint32_t) z_crc32(0, packet, len); outFunc(SKDP_START_CHAR); - kdp_serial_out((crc >> 0), outFunc); - kdp_serial_out((crc >> 8), outFunc); - kdp_serial_out((crc >> 16), outFunc); - kdp_serial_out((crc >> 24), outFunc); + kdp_serial_out((unsigned char)(crc >> 0), outFunc); + kdp_serial_out((unsigned char)(crc >> 8), outFunc); + kdp_serial_out((unsigned char)(crc >> 16), outFunc); + kdp_serial_out((unsigned char)(crc >> 24), outFunc); outFunc(SKDP_START_CHAR); for (index = 0; index < len; index++) { diff --git a/osfmk/kdp/kdp_udp.c b/osfmk/kdp/kdp_udp.c index 260f10ddb..c33882f8d 100644 --- a/osfmk/kdp/kdp_udp.c +++ b/osfmk/kdp/kdp_udp.c @@ -100,8 +100,8 @@ static u_short ip_id; /* ip packet ctr, for ids */ * UDP protocol implementation. * Per RFC 768, August, 1980. */ -#define UDP_TTL 60 /* deflt time to live for UDP packets */ -static int udp_ttl = UDP_TTL; +#define UDP_TTL 60 /* default time to live for UDP packets */ +static u_char udp_ttl = UDP_TTL; static unsigned char exception_seq; struct kdp_ipovly { @@ -280,7 +280,7 @@ static boolean_t save_ip_in_nvram = FALSE; static volatile boolean_t panicd_specified = FALSE; static boolean_t router_specified = FALSE; static boolean_t corename_specified = FALSE; -static unsigned int panicd_port = CORE_REMOTE_PORT; +static unsigned short panicd_port = CORE_REMOTE_PORT; static struct kdp_ether_addr etherbroadcastaddr = {.ether_addr_octet = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}; @@ -296,7 +296,7 @@ static boolean_t flag_dont_abort_panic_dump = FALSE; static boolean_t flag_arp_resolved = FALSE; static unsigned int panic_timeout = 100000; -static unsigned int last_panic_port = CORE_REMOTE_PORT; +static unsigned short last_panic_port = CORE_REMOTE_PORT; #define KDP_THROTTLE_VALUE (10ULL * NSEC_PER_SEC) @@ -415,19 +415,11 @@ kdp_register_send_receive( kdp_send_t send, kdp_receive_t receive) { - unsigned int debug = 0; + unsigned int debug = debug_boot_arg; - PE_parse_boot_argn("debug", &debug, sizeof(debug)); - -#if defined(__arm__) || defined(__arm64__) - { - uint32_t debug_flags; - - if (!PE_i_can_has_debugger(&debug_flags)) { - debug = 0; - } + if (!kernel_debugging_allowed()) { + return; } -#endif if (!debug) { return; @@ -540,9 +532,10 @@ ip_sum( } sum = (high << 8) + low; - sum = (sum >> 16) + (sum & 65535); + sum = (sum >> 16) + (sum & USHRT_MAX); + sum = (sum > USHRT_MAX) ? sum - USHRT_MAX : sum; - return sum > 65535 ? sum - 65535 : sum; + return (unsigned short)sum; } static void @@ -558,7 +551,12 @@ kdp_reply( struct kdp_ether_header *eh = NULL; if (!pkt.input) { - kdp_panic("kdp_reply"); + kdp_panic("kdp_reply: no input packet"); + } + + /* Packet size cannot be larger than the static space allocated for it. */ + if (pkt.len > KDP_MAXPACKET) { + kdp_panic("kdp_send: packet too large (%d > %u)", pkt.len, KDP_MAXPACKET); } pkt.off -= (unsigned int)sizeof(struct kdp_udpiphdr); @@ -585,7 +583,7 @@ kdp_reply( #else ip = (struct kdp_ip *)&pkt.data[pkt.off]; #endif - ip->ip_len = htons(sizeof(struct kdp_udpiphdr) + pkt.len); + ip->ip_len = htons((ushort_t)(sizeof(struct kdp_udpiphdr) + pkt.len)); ip->ip_v = IPVERSION; ip->ip_id = htons(ip_id++); ip->ip_hl = sizeof(struct kdp_ip) >> 2; @@ -609,7 +607,6 @@ kdp_reply( pkt.len += (unsigned int)sizeof(struct kdp_ether_header); // save reply for possible retransmission - assert(pkt.len <= KDP_MAXPACKET); if (!sideband) { bcopy((char *)&pkt, (char *)&saved_reply, sizeof(saved_reply)); } @@ -632,7 +629,12 @@ kdp_send( struct kdp_ether_header *eh; if (pkt.input) { - kdp_panic("kdp_send"); + kdp_panic("kdp_send: no input packet"); + } + + /* Packet size cannot be larger than the static space allocated for it. */ + if (pkt.len > KDP_MAXPACKET) { + kdp_panic("kdp_send: packet too large (%d > %u)", pkt.len, KDP_MAXPACKET); } pkt.off -= (unsigned int)sizeof(struct kdp_udpiphdr); @@ -658,7 +660,7 @@ kdp_send( #else ip = (struct kdp_ip *)&pkt.data[pkt.off]; #endif - ip->ip_len = htons(sizeof(struct kdp_udpiphdr) + pkt.len); + ip->ip_len = htons((ushort_t)(sizeof(struct kdp_udpiphdr) + pkt.len)); ip->ip_v = IPVERSION; ip->ip_id = htons(ip_id++); ip->ip_hl = sizeof(struct kdp_ip) >> 2; @@ -1241,10 +1243,6 @@ kdp_connection_wait(void) printf("\nWaiting for remote debugger connection.\n"); kprintf("\nWaiting for remote debugger connection.\n"); -#ifdef ARM - printf("\nPlease go to https://panic.apple.com to report this panic\n"); -#endif - if (reattach_wait == 0) { if ((kdp_flag & KDP_GETC_ENA) && (0 != kdp_getc())) { printf("Options..... Type\n"); @@ -1476,7 +1474,11 @@ again: kdp_sync_cache(); - if (reattach_wait == 1) { +#if defined(__x86_64__) + /* We only support returning from KDP on x86 */ + if (reattach_wait == 1) +#endif + { goto again; } @@ -1506,7 +1508,7 @@ create_panic_header(unsigned int request, const char *corename, struct kdp_ether_header *eh; struct corehdr *coreh; const char *mode = "octet"; - char modelen = strlen(mode) + 1; + size_t modelen = strlen(mode) + 1; size_t fmask_size = sizeof(KDP_FEATURE_MASK_STRING) + sizeof(kdp_crashdump_feature_mask); @@ -1536,7 +1538,7 @@ create_panic_header(unsigned int request, const char *corename, #else ip = (struct kdp_ip *)&pkt.data[pkt.off]; #endif - ip->ip_len = htons(sizeof(struct kdp_udpiphdr) + pkt.len); + ip->ip_len = htons((ushort_t)(sizeof(struct kdp_udpiphdr) + pkt.len)); ip->ip_v = IPVERSION; ip->ip_id = htons(ip_id++); ip->ip_hl = sizeof(struct kdp_ip) >> 2; @@ -1555,35 +1557,36 @@ create_panic_header(unsigned int request, const char *corename, coreh->th_opcode = htons((u_short)request); if (request == KDP_WRQ) { - char *cp; - size_t length_remaining = (sizeof(pkt.data) - pkt.off), bytes_filled = 0; - - cp = coreh->th_u.tu_rpl; - bytes_filled = strlcpy(cp, corename, length_remaining); - cp += bytes_filled; - *cp++ = '\0'; - /* account for the extra NULL character that has been added historically */ - length_remaining -= (bytes_filled + 1); - - bytes_filled = strlcpy(cp, mode, length_remaining); - cp += bytes_filled; - *cp++ = '\0'; - /* account for the extra NULL character that has been added historically */ - length_remaining -= (bytes_filled + 1); - - bytes_filled = strlcpy(cp, KDP_FEATURE_MASK_STRING, length_remaining); - cp += bytes_filled; - *cp++ = '\0'; - /* account for the extra NULL character that has been added historically */ - length_remaining -= (bytes_filled + 1); + char *cp = coreh->th_u.tu_rpl; + /* Calculate available string space (remaining space after accounting for mandatory components). */ + size_t length_remaining = (sizeof(pkt.data) - pkt.off - offsetof(struct corehdr, th_u) + - sizeof(kdp_crashdump_feature_mask) - sizeof(kdp_crashdump_pkt_size)); + + /* account for the extra NULL characters that have been added historically */ + int len = snprintf(cp, length_remaining, "%s%c%s%c%s", corename, '\0', mode, '\0', KDP_FEATURE_MASK_STRING); + if (len < 0) { + kdb_printf("Unable to create core header packet.\n"); + return NULL; + } else if (len >= length_remaining) { + kdb_printf("dumpinfo does not fit into KDP packet.\n"); + return NULL; + } + cp += len; + /* Append feature flags. The value is already converted with htonl in startup code. */ bcopy(&kdp_crashdump_feature_mask, cp, sizeof(kdp_crashdump_feature_mask)); - kdp_crashdump_pkt_size = KDP_LARGE_CRASHDUMP_PKT_SIZE; cp += sizeof(kdp_crashdump_feature_mask); - length_remaining -= sizeof(kdp_crashdump_feature_mask); - PE_parse_boot_argn("kdp_crashdump_pkt_size", &kdp_crashdump_pkt_size, sizeof(kdp_crashdump_pkt_size)); - *(uint32_t *)cp = htonl(kdp_crashdump_pkt_size); + /* Override default packet size from boot arguments (if present). */ + kdp_crashdump_pkt_size = KDP_LARGE_CRASHDUMP_PKT_SIZE; + if (PE_parse_boot_argn("kdp_crashdump_pkt_size", &kdp_crashdump_pkt_size, sizeof(kdp_crashdump_pkt_size)) && + (kdp_crashdump_pkt_size > KDP_LARGE_CRASHDUMP_PKT_SIZE)) { + kdp_crashdump_pkt_size = KDP_LARGE_CRASHDUMP_PKT_SIZE; + kdb_printf("kdp_crashdump_pkt_size is too large. Reverting to %d\n", kdp_crashdump_pkt_size); + } + + uint32_t pktsz = htonl(kdp_crashdump_pkt_size); + bcopy(&pktsz, cp, sizeof(uint32_t)); } else { coreh->th_block = htonl((unsigned int) block); } @@ -1688,6 +1691,10 @@ TRANSMIT_RETRY: } th = create_panic_header(request, corename, (unsigned)length, panic_block); + if (th == NULL) { + printf("Unable to get panic header.\n"); + return -4; + } if (request == KDP_DATA) { /* as all packets are kdp_crashdump_pkt_size in length, the last packet @@ -1814,7 +1821,7 @@ isdigit(char c) static int kdp_get_xnu_version(char *versionbuf) { - char *versionpos; + const char *versionpos; char vstr[20]; int retval = -1; char *vptr; @@ -1867,8 +1874,11 @@ kdp_set_dump_info(const uint32_t flags, const char *filename, corename_specified = FALSE; } - if (port) { - panicd_port = port; + /* Accept only valid UDP port numbers. */ + if (port && port <= USHRT_MAX) { + panicd_port = (unsigned short)port; + } else { + kdb_printf("kdp_set_dump_info: Skipping invalid panicd port %d (using %d)\n", port, panicd_port); } /* on a disconnect, should we stay in KDP or not? */ @@ -2150,7 +2160,7 @@ kdp_serial_receive(void *rpkt, unsigned int *rpkt_len, unsigned int timeout) if (readkar >= 0) { unsigned char *packet; // printf("got char %02x\n", readkar); - if ((packet = kdp_unserialize_packet(readkar, rpkt_len))) { + if ((packet = kdp_unserialize_packet((unsigned char)readkar, rpkt_len))) { memcpy(rpkt, packet, *rpkt_len); return; } @@ -2208,6 +2218,7 @@ kdp_init(void) strlcpy(kdp_kernelversion_string, version, sizeof(kdp_kernelversion_string)); /* Relies on platform layer calling panic_init() before kdp_init() */ + assert(startup_phase >= STARTUP_SUB_TUNABLES); if (kernel_uuid_string[0] != '\0') { /* * Update kdp_kernelversion_string with our UUID @@ -2248,25 +2259,25 @@ kdp_init(void) boolean_t kdp_match_name_found = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname)); boolean_t kdp_not_serial = kdp_match_name_found ? (strncmp(kdpname, "serial", sizeof(kdpname))) : TRUE; -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) //respect any custom debugger boot-args if (kdp_match_name_found && kdp_not_serial) { return; } -#else /* CONFIG_EMBEDDED */ +#else /* defined(__arm__) || defined(__arm64__) */ // serial must be explicitly requested if (!kdp_match_name_found || kdp_not_serial) { return; } -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) if (kdp_not_serial && PE_consistent_debug_enabled() && debug_boot_arg) { return; } else { printf("Serial requested, consistent debug disabled or debug boot arg not present, configuring debugging over serial\n"); } -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ kprintf("Initializing serial KDP\n"); @@ -2385,16 +2396,15 @@ kdp_raise_exception( ) #endif { -#if CONFIG_EMBEDDED - assert(PE_i_can_has_debugger(NULL)); +#if defined(__arm__) || defined(__arm64__) + assert(kernel_debugging_allowed()); #endif #if CONFIG_KDP_INTERACTIVE_DEBUGGING - kdp_debugger_loop(exception, code, subcode, saved_state); #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ - assert(current_debugger != KDP_CUR_DB); + assert(current_debugger != KDP_CUR_DB); panic_spin_forever(); #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ } diff --git a/osfmk/kdp/ml/arm/kdp_machdep.c b/osfmk/kdp/ml/arm/kdp_machdep.c index 25da7d706..42b755900 100644 --- a/osfmk/kdp/ml/arm/kdp_machdep.c +++ b/osfmk/kdp/ml/arm/kdp_machdep.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include @@ -60,16 +59,15 @@ int machine_trace_thread(thread_t thread, char * tracebound, int nframes, boolean_t user_p, - boolean_t trace_fp, uint32_t * thread_trace_flags); int machine_trace_thread64(thread_t thread, char * tracepos, char * tracebound, int nframes, boolean_t user_p, - boolean_t trace_fp, uint32_t * thread_trace_flags, - uint64_t *sp); + uint64_t *sp, + vm_offset_t fp); void kdp_trap(unsigned int, struct arm_saved_state * saved_state); @@ -259,9 +257,16 @@ kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo) __attribute__((noreturn)) void -kdp_panic(const char * msg) +kdp_panic(const char * fmt, ...) { - printf("kdp panic: %s\n", msg); + char kdp_fmt[256]; + va_list args; + + va_start(args, fmt); + (void) snprintf(kdp_fmt, sizeof(kdp_fmt), "kdp panic: %s", fmt); + vprintf(kdp_fmt, args); + va_end(args); + while (1) { } ; @@ -391,12 +396,11 @@ machine_trace_thread(thread_t thread, char * tracebound, int nframes, boolean_t user_p, - boolean_t trace_fp, uint32_t * thread_trace_flags) { uint32_align2_t * tracebuf = (uint32_align2_t *)tracepos; - vm_size_t framesize = (trace_fp ? 2 : 1) * sizeof(uint32_t); + vm_size_t framesize = sizeof(uint32_t); vm_offset_t stacklimit = 0; vm_offset_t stacklimit_bottom = 0; @@ -424,9 +428,6 @@ machine_trace_thread(thread_t thread, /* Fake up a stack frame for the PC */ *tracebuf++ = (uint32_t)get_saved_state_pc(state); - if (trace_fp) { - *tracebuf++ = (uint32_t)get_saved_state_sp(state); - } framecount++; bt_vm_map = thread->task->map; } else { @@ -464,9 +465,6 @@ machine_trace_thread(thread_t thread, for (; framecount < nframes; framecount++) { *tracebuf++ = prevlr; - if (trace_fp) { - *tracebuf++ = (uint32_t)fp; - } /* Invalid frame */ if (!fp) { @@ -573,28 +571,26 @@ machine_trace_thread64(thread_t thread, char * tracebound, int nframes, boolean_t user_p, - boolean_t trace_fp, uint32_t * thread_trace_flags, - uint64_t *sp_out) + uint64_t *sp_out, + vm_offset_t fp) { #pragma unused(sp_out) #if defined(__arm__) -#pragma unused(thread, tracepos, tracebound, nframes, user_p, trace_fp, thread_trace_flags) +#pragma unused(thread, tracepos, tracebound, nframes, user_p, thread_trace_flags, fp) return 0; #elif defined(__arm64__) uint64_t * tracebuf = (uint64_t *)tracepos; - vm_size_t framesize = (trace_fp ? 2 : 1) * sizeof(uint64_t); + vm_size_t framesize = sizeof(uint64_t); vm_offset_t stacklimit = 0; vm_offset_t stacklimit_bottom = 0; int framecount = 0; - vm_offset_t fp = 0; vm_offset_t pc = 0; vm_offset_t sp = 0; vm_offset_t prevfp = 0; uint64_t prevlr = 0; - struct arm_saved_state * state; vm_offset_t kern_virt_addr = 0; vm_map_t bt_vm_map = VM_MAP_NULL; @@ -608,32 +604,41 @@ machine_trace_thread64(thread_t thread, if (user_p) { /* Examine the user savearea */ - state = thread->machine.upcb; + struct arm_saved_state * state = thread->machine.upcb; stacklimit = (is_64bit_addr) ? MACH_VM_MAX_ADDRESS : VM_MAX_ADDRESS; stacklimit_bottom = (is_64bit_addr) ? MACH_VM_MIN_ADDRESS : VM_MIN_ADDRESS; /* Fake up a stack frame for the PC */ *tracebuf++ = get_saved_state_pc(state); - if (trace_fp) { - *tracebuf++ = get_saved_state_sp(state); - } framecount++; bt_vm_map = thread->task->map; + + /* Get the frame pointer */ + if (fp == 0) { + fp = get_saved_state_fp(state); + } + + /* Fill in the current link register */ + prevlr = get_saved_state_lr(state); + pc = get_saved_state_pc(state); + sp = get_saved_state_sp(state); } else { /* kstackptr may not always be there, so recompute it */ - state = &thread_get_kernel_state(thread)->machine.ss; + struct arm_kernel_saved_state * state = &thread_get_kernel_state(thread)->machine.ss; stacklimit = VM_MAX_KERNEL_ADDRESS; stacklimit_bottom = VM_MIN_KERNEL_ADDRESS; bt_vm_map = kernel_map; - } - /* Get the frame pointer */ - fp = get_saved_state_fp(state); + /* Get the frame pointer */ + if (fp == 0) { + fp = state->fp; + } - /* Fill in the current link register */ - prevlr = get_saved_state_lr(state); - pc = get_saved_state_pc(state); - sp = get_saved_state_sp(state); + /* Fill in the current link register */ + prevlr = state->lr; + pc = state->pc; + sp = state->sp; + } if (!user_p && !prevlr && !fp && !sp && !pc) { return 0; @@ -645,9 +650,6 @@ machine_trace_thread64(thread_t thread, for (; framecount < nframes; framecount++) { *tracebuf++ = prevlr; - if (trace_fp) { - *tracebuf++ = fp; - } /* Invalid frame */ if (!fp) { diff --git a/osfmk/kdp/ml/arm/kdp_vm.c b/osfmk/kdp/ml/arm/kdp_vm.c index 7a26f78dd..c9ed0df44 100644 --- a/osfmk/kdp/ml/arm/kdp_vm.c +++ b/osfmk/kdp/ml/arm/kdp_vm.c @@ -61,7 +61,6 @@ kdp_vtophys( vm_offset_t va) { pmap_paddr_t pa; - ppnum_t pp; /* Ensure that the provided va resides within the provided pmap range. */ if (!pmap || ((pmap != kernel_pmap) && ((va < pmap->min) || (va >= pmap->max)))) { @@ -74,15 +73,10 @@ kdp_vtophys( return 0; /* Just return if no translation */ } - pp = pmap_find_phys(pmap, va); /* Get the page number */ - if (!pp) { - return 0; /* Just return if no translation */ - } - pa = ((pmap_paddr_t) pp << PAGE_SHIFT) | (va & PAGE_MASK); /* Insert page offset */ + pa = pmap_find_pa(pmap, va); /* Get the physical address */ return pa; } - /* * kdp_machine_vm_read * @@ -334,24 +328,37 @@ kern_collectth_state(thread_t thread __unused, void *buffer, uint64_t size, void return; } - if ((cpudatap == NULL) || (cpudatap->cpu_processor == NULL) || (cpudatap->cpu_processor->active_thread == NULL)) { + processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpudatap); + if ((cpudatap == NULL) || (processor->active_thread == NULL)) { bzero(state, hdr->count * sizeof(uint32_t)); return; } - vm_offset_t kstackptr = (vm_offset_t) cpudatap->cpu_processor->active_thread->machine.kstackptr; - arm_saved_state_t *saved_state = (arm_saved_state_t *) kstackptr; - #if defined(__arm64__) - - state->fp = saved_state->ss_64.fp; - state->lr = saved_state->ss_64.lr; - state->sp = saved_state->ss_64.sp; - state->pc = saved_state->ss_64.pc; - state->cpsr = saved_state->ss_64.cpsr; - bcopy(&saved_state->ss_64.x[0], &state->x[0], sizeof(state->x)); + void *kpcb = processor->active_thread->machine.kpcb; + if (kpcb != NULL) { + arm_saved_state_t *saved_state = (arm_saved_state_t *)kpcb; + + state->fp = saved_state->ss_64.fp; + state->lr = saved_state->ss_64.lr; + state->sp = saved_state->ss_64.sp; + state->pc = saved_state->ss_64.pc; + state->cpsr = saved_state->ss_64.cpsr; + bcopy(&saved_state->ss_64.x[0], &state->x[0], sizeof(state->x)); + } else { + vm_offset_t kstackptr = (vm_offset_t) processor->active_thread->machine.kstackptr; + arm_kernel_saved_state_t *saved_state = (arm_kernel_saved_state_t *) kstackptr; + + state->fp = saved_state->fp; + state->lr = saved_state->lr; + state->sp = saved_state->sp; + state->pc = saved_state->pc; + state->cpsr = saved_state->cpsr; + } #else /* __arm64__ */ + vm_offset_t kstackptr = (vm_offset_t) processor->active_thread->machine.kstackptr; + arm_saved_state_t *saved_state = (arm_saved_state_t *) kstackptr; state->lr = saved_state->lr; state->sp = saved_state->sp; @@ -361,3 +368,25 @@ kern_collectth_state(thread_t thread __unused, void *buffer, uint64_t size, void #endif /* !__arm64__ */ } + +/* + * kdp_core_start_addr + * + * return the address where the kernel core file starts + * + * The kernel start address is VM_MIN_KERNEL_AND_KEXT_ADDRESS + * unless the physical aperture has been relocated below + * VM_MIN_KERNEL_AND_KEXT_ADDRESS as in the case of + * ARM_LARGE_MEMORY systems + * + */ +vm_map_offset_t +kdp_core_start_addr() +{ +#if defined(__arm64__) + extern const vm_map_address_t physmap_base; + return MIN(physmap_base, VM_MIN_KERNEL_AND_KEXT_ADDRESS); +#else /* !defined(__arm64__) */ + return VM_MIN_KERNEL_AND_KEXT_ADDRESS; +#endif /* !defined(__arm64__) */ +} diff --git a/osfmk/kdp/ml/i386/kdp_x86_common.c b/osfmk/kdp/ml/i386/kdp_x86_common.c index 934bce867..96eb2dd93 100644 --- a/osfmk/kdp/ml/i386/kdp_x86_common.c +++ b/osfmk/kdp/ml/i386/kdp_x86_common.c @@ -78,14 +78,8 @@ kdp_vtophys( vm_offset_t va) { pmap_paddr_t pa; - ppnum_t pp; - pp = pmap_find_phys(pmap, va); - if (!pp) { - return 0; - } - - pa = ((pmap_paddr_t)pp << PAGE_SHIFT) | (va & PAGE_MASK); + pa = pmap_find_pa(pmap, va); return pa; } diff --git a/osfmk/kdp/ml/x86_64/kdp_machdep.c b/osfmk/kdp/ml/x86_64/kdp_machdep.c index 0dc052a3e..f7d619d4a 100644 --- a/osfmk/kdp/ml/x86_64/kdp_machdep.c +++ b/osfmk/kdp/ml/x86_64/kdp_machdep.c @@ -44,7 +44,6 @@ #include #include #include -#include #define KDP_TEST_HARNESS 0 #if KDP_TEST_HARNESS @@ -274,10 +273,18 @@ kdp_machine_hostinfo( void kdp_panic( - const char *msg + const char *fmt, + ... ) { - kprintf("kdp panic: %s\n", msg); + char kdp_fmt[256]; + va_list args; + + va_start(args, fmt); + (void) snprintf(kdp_fmt, sizeof(kdp_fmt), "kdp panic: %s", fmt); + vprintf(kdp_fmt, args); + va_end(args); + __asm__ volatile ("hlt"); } @@ -463,11 +470,10 @@ machine_trace_thread(thread_t thread, char * tracebound, int nframes, boolean_t user_p, - boolean_t trace_fp, uint32_t * thread_trace_flags) { uint32_t * tracebuf = (uint32_t *)tracepos; - uint32_t framesize = (trace_fp ? 2 : 1) * sizeof(uint32_t); + uint32_t framesize = sizeof(uint32_t); uint32_t fence = 0; uint32_t stackptr = 0; @@ -495,9 +501,6 @@ machine_trace_thread(thread_t thread, for (framecount = 0; framecount < nframes; framecount++) { *tracebuf++ = prev_eip; - if (trace_fp) { - *tracebuf++ = stackptr; - } /* Invalid frame, or hit fence */ if (!stackptr || (stackptr == fence)) { @@ -562,12 +565,12 @@ machine_trace_thread64(thread_t thread, char * tracebound, int nframes, boolean_t user_p, - boolean_t trace_fp, uint32_t * thread_trace_flags, - uint64_t *sp) + uint64_t *sp, + vm_offset_t fp) { uint64_t * tracebuf = (uint64_t *)tracepos; - unsigned framesize = (trace_fp ? 2 : 1) * sizeof(addr64_t); + unsigned framesize = sizeof(addr64_t); uint32_t fence = 0; addr64_t stackptr = 0; @@ -583,13 +586,17 @@ machine_trace_thread64(thread_t thread, x86_saved_state64_t *iss64; iss64 = USER_REGS64(thread); prev_rip = iss64->isf.rip; - stackptr = iss64->rbp; + if (fp == 0) { + stackptr = iss64->rbp; + } bt_vm_map = thread->task->map; if (sp && user_p) { *sp = iss64->isf.rsp; } } else { - stackptr = STACK_IKS(thread->kernel_stack)->k_rbp; + if (fp == 0) { + stackptr = STACK_IKS(thread->kernel_stack)->k_rbp; + } prev_rip = STACK_IKS(thread->kernel_stack)->k_rip; prev_rip = VM_KERNEL_UNSLIDE(prev_rip); bt_vm_map = kernel_map; @@ -597,9 +604,6 @@ machine_trace_thread64(thread_t thread, for (framecount = 0; framecount < nframes; framecount++) { *tracebuf++ = prev_rip; - if (trace_fp) { - *tracebuf++ = stackptr; - } if (!stackptr || (stackptr == fence)) { break; diff --git a/osfmk/kdp/ml/x86_64/kdp_vm.c b/osfmk/kdp/ml/x86_64/kdp_vm.c index 8b102b787..7b237bb44 100644 --- a/osfmk/kdp/ml/x86_64/kdp_vm.c +++ b/osfmk/kdp/ml/x86_64/kdp_vm.c @@ -175,3 +175,15 @@ kdp_dump_trap( kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state); return 0; } + +/* + * kdp_core_start_addr + * + * return the address where the kernel core file starts + * + */ +vm_map_offset_t +kdp_core_start_addr() +{ + return VM_MIN_KERNEL_AND_KEXT_ADDRESS; +} diff --git a/osfmk/kdp/processor_core.c b/osfmk/kdp/processor_core.c index d0c41d90a..41737a553 100644 --- a/osfmk/kdp/processor_core.c +++ b/osfmk/kdp/processor_core.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include #include @@ -127,7 +127,7 @@ kern_register_coredump_helper_internal(int kern_coredump_config_vers, const kern } #endif - core_helper = kalloc(sizeof(*core_helper)); + core_helper = zalloc_permanent_type(struct kern_coredump_core); core_helper->kcc_next = NULL; core_helper->kcc_refcon = refcon; if (xnu_callback) { @@ -739,10 +739,10 @@ kern_coredump_log(void *context, const char *string, ...) va_list coredump_log_args; va_start(coredump_log_args, string); - _doprnt(string, &coredump_log_args, consdebug_putc, 0); + _doprnt(string, &coredump_log_args, consdebug_putc, 16); va_end(coredump_log_args); -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) paniclog_flush(); #endif } diff --git a/osfmk/kern/Makefile b/osfmk/kern/Makefile index 9f53a26ec..7d2ddfc5e 100644 --- a/osfmk/kern/Makefile +++ b/osfmk/kern/Makefile @@ -16,6 +16,7 @@ DATAFILES = \ PRIVATE_DATAFILES = \ arithmetic_128.h \ block_hint.h \ + cambria_layout.h \ cs_blobs.h \ debug.h \ ecc.h \ @@ -35,7 +36,6 @@ EXPORT_FILES = \ backtrace.h \ bits.h \ btlog.h \ - call_entry.h \ circle_queue.h \ clock.h \ coalition.h \ @@ -44,6 +44,7 @@ EXPORT_FILES = \ energy_perf.h \ extmod_statistics.h \ hv_support.h \ + hv_support_kext.h \ ipc_mig.h \ ipc_misc.h \ kalloc.h \ @@ -64,6 +65,7 @@ EXPORT_FILES = \ processor.h \ queue.h \ mpsc_queue.h \ + percpu.h \ priority_queue.h \ sched_prim.h \ sfi.h \ @@ -79,10 +81,14 @@ EXPORT_FILES = \ work_interval.h \ zalloc.h + +# Installs header file for Apple internal use for kernel extensions - +# $(DSTROOT)/System/Library/Frameworks/Kernel.framework/PrivateHeaders PRIVATE_EXPORT_FILES = \ build_config.h \ mach_node_link.h \ - copyout_shim.h + copyout_shim.h \ + mach_filter.h XNU_ONLY_EXPORTS = \ arcade.h \ diff --git a/osfmk/kern/affinity.c b/osfmk/kern/affinity.c index c04c05f30..078a642ae 100644 --- a/osfmk/kern/affinity.c +++ b/osfmk/kern/affinity.c @@ -84,13 +84,13 @@ static affinity_set_t affinity_set_remove(affinity_set_t aset, thread_t thread); * has a single pset, and last-processor affinity is * more important than pset affinity. */ -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) boolean_t affinity_sets_enabled = FALSE; int affinity_sets_mapping = 0; -#else /* !CONFIG_EMBEDDED */ +#else /* !defined(XNU_TARGET_OS_OSX) */ boolean_t affinity_sets_enabled = TRUE; int affinity_sets_mapping = 1; -#endif /* !CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ boolean_t thread_affinity_is_supported(void) @@ -533,14 +533,14 @@ affinity_set_find(affinity_space_t space, uint32_t tag) static void affinity_set_place(affinity_space_t aspc, affinity_set_t new_aset) { - unsigned int num_cpu_asets = ml_get_max_affinity_sets(); - unsigned int set_occupancy[num_cpu_asets]; - unsigned int i; - unsigned int i_least_occupied; + unsigned short set_occupancy[MAX_CPUS] = { 0 }; + unsigned num_cpu_asets = ml_get_max_affinity_sets(); + unsigned i_least_occupied; affinity_set_t aset; - for (i = 0; i < num_cpu_asets; i++) { - set_occupancy[i] = 0; + if (__improbable(num_cpu_asets > MAX_CPUS)) { + // If this triggers then the array needs to be made bigger. + panic("num_cpu_asets = %d > %d too big in %s\n", num_cpu_asets, MAX_CPUS, __FUNCTION__); } /* @@ -568,7 +568,7 @@ affinity_set_place(affinity_space_t aspc, affinity_set_t new_aset) } else { i_least_occupied = (unsigned int)(((uintptr_t)aspc % 127) % num_cpu_asets); } - for (i = 0; i < num_cpu_asets; i++) { + for (unsigned i = 0; i < num_cpu_asets; i++) { unsigned int j = (i_least_occupied + i) % num_cpu_asets; if (set_occupancy[j] == 0) { i_least_occupied = j; diff --git a/osfmk/kern/arcade.c b/osfmk/kern/arcade.c index 8bd54f11d..30edda823 100644 --- a/osfmk/kern/arcade.c +++ b/osfmk/kern/arcade.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -39,7 +39,6 @@ #include #include -#include #include #include #include @@ -93,9 +92,8 @@ arcade_prepare(task_t task, thread_t thread) thread_ast_set(thread, AST_ARCADE); } -static lck_grp_attr_t *arcade_upcall_lck_grp_attr; -static lck_grp_t *arcade_upcall_lck_grp; -static lck_mtx_t arcade_upcall_mutex; +static LCK_GRP_DECLARE(arcade_upcall_lck_grp, "arcade_upcall"); +static LCK_MTX_DECLARE(arcade_upcall_mutex, &arcade_upcall_lck_grp); static ipc_port_t arcade_upcall_port = IP_NULL; static boolean_t arcade_upcall_refresh_in_progress = FALSE; @@ -106,14 +104,10 @@ arcade_init(void) { ipc_port_t port; - arcade_upcall_lck_grp_attr = lck_grp_attr_alloc_init(); - arcade_upcall_lck_grp = lck_grp_alloc_init("arcade_upcall", arcade_upcall_lck_grp_attr); - lck_mtx_init(&arcade_upcall_mutex, arcade_upcall_lck_grp, NULL); - /* Initialize the global arcade_register kobject and associated port */ port = ipc_kobject_alloc_port((ipc_kobject_t)&arcade_register_global, IKOT_ARCADE_REG, IPC_KOBJECT_ALLOC_MAKE_SEND); - arcade_register_global.ar_port = port; + os_atomic_store(&arcade_register_global.ar_port, port, release); } arcade_register_t @@ -207,16 +201,9 @@ arcade_upcall_refresh(uint64_t deadline) arcade_upcall_port = IP_NULL; } -#if 0 if (host_get_fairplayd_port(host_priv_self(), &fairplayd_port) != KERN_SUCCESS) { panic("arcade_upcall_refresh(get fairplayd)"); } -#else - /* Temporary hack because launchd is rejecting the other special port number */ - if (host_get_unfreed_port(host_priv_self(), &fairplayd_port) != KERN_SUCCESS) { - panic("arcade_upcall_refresh(get fairplayd)"); - } -#endif /* If no valid fairplayd port registered, we're done */ if (!IP_VALID(fairplayd_port)) { @@ -343,7 +330,7 @@ restart: switch (kr) { case MACH_SEND_INVALID_DEST: vm_map_copy_discard(copy); - /* fall thru */ + OS_FALLTHROUGH; case MIG_SERVER_DIED: goto restart; case KERN_SUCCESS: diff --git a/osfmk/kern/audit_sessionport.c b/osfmk/kern/audit_sessionport.c index a3e20dcc1..991941f57 100644 --- a/osfmk/kern/audit_sessionport.c +++ b/osfmk/kern/audit_sessionport.c @@ -59,7 +59,7 @@ audit_session_mksend(struct auditinfo_addr *aia_p, ipc_port_t *sessionport) { audit_session_aiaref(aia_p); if (!ipc_kobject_make_send_lazy_alloc_port(sessionport, - (ipc_kobject_t)aia_p, IKOT_AU_SESSIONPORT)) { + (ipc_kobject_t)aia_p, IKOT_AU_SESSIONPORT, false, 0)) { audit_session_aiaunref(aia_p); } diff --git a/osfmk/kern/backtrace.c b/osfmk/kern/backtrace.c index 3de320d9f..205fbc52d 100644 --- a/osfmk/kern/backtrace.c +++ b/osfmk/kern/backtrace.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Apple Inc. All rights reserved. + * Copyright (c) 2016-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -49,7 +50,7 @@ unsigned int __attribute__((noinline)) backtrace(uintptr_t *bt, unsigned int max_frames, bool *was_truncated_out) { return backtrace_frame(bt, max_frames, __builtin_frame_address(0), - was_truncated_out); + was_truncated_out); } /* @@ -239,7 +240,7 @@ backtrace_interrupted(uintptr_t *bt, unsigned int max_frames, } return backtrace_frame(bt + 1, max_frames - 1, (void *)fp, - was_truncated_out) + 1; + was_truncated_out) + 1; } unsigned int @@ -247,12 +248,12 @@ backtrace_user(uintptr_t *bt, unsigned int max_frames, int *error_out, bool *user_64_out, bool *was_truncated_out) { return backtrace_thread_user(current_thread(), bt, max_frames, - error_out, user_64_out, was_truncated_out); + error_out, user_64_out, was_truncated_out, true); } unsigned int backtrace_thread_user(void *thread, uintptr_t *bt, unsigned int max_frames, - int *error_out, bool *user_64_out, bool *was_truncated_out) + int *error_out, bool *user_64_out, bool *was_truncated_out, __unused bool faults_permitted) { bool user_64; uintptr_t pc = 0, fp = 0, next_fp = 0; @@ -263,6 +264,7 @@ backtrace_thread_user(void *thread, uintptr_t *bt, unsigned int max_frames, assert(bt != NULL); assert(max_frames > 0); + assert((max_frames == 1) || (faults_permitted == true)); #if defined(__x86_64__) @@ -270,7 +272,6 @@ backtrace_thread_user(void *thread, uintptr_t *bt, unsigned int max_frames, #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP))) x86_saved_state_t *state = get_user_regs(thread); - if (!state) { return EINVAL; } @@ -286,9 +287,6 @@ backtrace_thread_user(void *thread, uintptr_t *bt, unsigned int max_frames, #elif defined(__arm64__) - /* ARM expects stack frames to be aligned to 16 bytes */ -#define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL) - struct arm_saved_state *state = get_user_regs(thread); if (!state) { return EINVAL; @@ -298,6 +296,12 @@ backtrace_thread_user(void *thread, uintptr_t *bt, unsigned int max_frames, pc = get_saved_state_pc(state); fp = get_saved_state_fp(state); + + /* ARM expects stack frames to be aligned to 16 bytes */ +#define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL) + + + #elif defined(__arm__) /* ARM expects stack frames to be aligned to 16 bytes */ diff --git a/osfmk/kern/backtrace.h b/osfmk/kern/backtrace.h index 4123482e1..08ecd569b 100644 --- a/osfmk/kern/backtrace.h +++ b/osfmk/kern/backtrace.h @@ -138,7 +138,8 @@ unsigned int backtrace_user(uintptr_t *bt, unsigned int btlen, int *error, * @see backtrace_user */ unsigned int backtrace_thread_user(void *thread, uintptr_t *bt, - unsigned int btlen, int *error, bool *user64, bool *was_truncated); + unsigned int btlen, int *error, bool *user64, bool *was_truncated, + bool faults_permitted); __END_DECLS diff --git a/osfmk/kern/bits.h b/osfmk/kern/bits.h index 00dbc4b78..89012eb5a 100644 --- a/osfmk/kern/bits.h +++ b/osfmk/kern/bits.h @@ -31,16 +31,24 @@ #ifndef __BITS_H__ #define __BITS_H__ +#ifdef KERNEL #include #include +#else +#include +#include +#define kalloc(x) malloc(x) +#define kfree(x, y) free(x) +#endif #include #include +#include typedef unsigned int uint; #define BIT(b) (1ULL << (b)) -#define mask(width) (width >= 64 ? -1 : (BIT(width) - 1)) +#define mask(width) (width >= 64 ? -1ULL : (BIT(width) - 1)) #define extract(x, shift, width) ((((uint64_t)(x)) >> (shift)) & mask(width)) #define bits(x, hi, lo) extract((x), (lo), (hi) - (lo) + 1) @@ -130,7 +138,7 @@ bit_next(uint64_t bitmap, int previous_bit) inline static int lsb_first(uint64_t bitmap) { - return __builtin_ffsll(bitmap) - 1; + return __builtin_ffsll((long long)bitmap) - 1; } /* Returns the least significant '1' bit that is more significant than previous_bit, @@ -205,7 +213,39 @@ bitmap_zero(bitmap_t *map, uint nbits) inline static bitmap_t * bitmap_full(bitmap_t *map, uint nbits) { - return (bitmap_t *)memset((void *)map, ~0, BITMAP_SIZE(nbits)); + uint i; + + for (i = 0; i < bitmap_index(nbits - 1); i++) { + map[i] = ~((uint64_t)0); + } + + uint nbits_filled = i * 64; + + if (nbits > nbits_filled) { + map[i] = mask(nbits - nbits_filled); + } + + return map; +} + +inline static bool +bitmap_is_full(bitmap_t *map, uint nbits) +{ + uint i; + + for (i = 0; i < bitmap_index(nbits - 1); i++) { + if (map[i] != ~((uint64_t)0)) { + return false; + } + } + + uint nbits_filled = i * 64; + + if (nbits > nbits_filled) { + return map[i] == mask(nbits - nbits_filled); + } + + return true; } inline static bitmap_t * @@ -251,7 +291,7 @@ atomic_bitmap_clear(_Atomic bitmap_t *map, uint n, int mem_order) } inline static bool -bitmap_test(bitmap_t *map, uint n) +bitmap_test(const bitmap_t *map, uint n) { return bit_test(map[bitmap_index(n)], bitmap_bit(n)); } @@ -269,6 +309,42 @@ bitmap_first(bitmap_t *map, uint nbits) return -1; } +inline static void +bitmap_not(bitmap_t *out, const bitmap_t *in, uint nbits) +{ + for (uint i = 0; i <= bitmap_index(nbits - 1); i++) { + out[i] = ~in[i]; + } +} + +inline static void +bitmap_and(bitmap_t *out, const bitmap_t *in1, const bitmap_t *in2, uint nbits) +{ + for (uint i = 0; i <= bitmap_index(nbits - 1); i++) { + out[i] = in1[i] & in2[i]; + } +} + +inline static void +bitmap_and_not(bitmap_t *out, const bitmap_t *in1, const bitmap_t *in2, uint nbits) +{ + for (uint i = 0; i <= bitmap_index(nbits - 1); i++) { + out[i] = in1[i] & ~in2[i]; + } +} + +inline static bool +bitmap_equal(const bitmap_t *in1, const bitmap_t *in2, uint nbits) +{ + for (uint i = 0; i <= bitmap_index(nbits - 1); i++) { + if (in1[i] != in2[i]) { + return false; + } + } + + return true; +} + inline static int bitmap_and_not_mask_first(bitmap_t *map, bitmap_t *mask, uint nbits) { @@ -283,7 +359,7 @@ bitmap_and_not_mask_first(bitmap_t *map, bitmap_t *mask, uint nbits) } inline static int -bitmap_lsb_first(bitmap_t *map, uint nbits) +bitmap_lsb_first(const bitmap_t *map, uint nbits) { for (uint i = 0; i <= bitmap_index(nbits - 1); i++) { if (map[i] == 0) { @@ -296,7 +372,7 @@ bitmap_lsb_first(bitmap_t *map, uint nbits) } inline static int -bitmap_next(bitmap_t *map, uint prev) +bitmap_next(const bitmap_t *map, uint prev) { if (prev == 0) { return -1; @@ -319,7 +395,7 @@ bitmap_next(bitmap_t *map, uint prev) } inline static int -bitmap_lsb_next(bitmap_t *map, uint nbits, uint prev) +bitmap_lsb_next(const bitmap_t *map, uint nbits, uint prev) { if ((prev + 1) >= nbits) { return -1; diff --git a/osfmk/kern/block_hint.h b/osfmk/kern/block_hint.h index a28e09f22..f512457c2 100644 --- a/osfmk/kern/block_hint.h +++ b/osfmk/kern/block_hint.h @@ -49,6 +49,7 @@ typedef enum thread_snapshot_wait_flags { kThreadWaitWorkloopSyncWait = 0x10, kThreadWaitOnProcess = 0x11, kThreadWaitSleepWithInheritor = 0x12, + kThreadWaitEventlink = 0x13, kThreadWaitCompressor = 0x14, } __attribute__((packed)) block_hint_t; @@ -57,8 +58,8 @@ _Static_assert(sizeof(block_hint_t) <= sizeof(short), #ifdef XNU_KERNEL_PRIVATE +struct turnstile; struct waitq; -struct stackshot_thread_waitinfo; typedef struct stackshot_thread_waitinfo thread_waitinfo_t; /* Used for stackshot_thread_waitinfo_unsafe */ @@ -74,6 +75,7 @@ extern void kdp_workloop_sync_wait_find_owner(thread_t thread, event64_t event, extern void kdp_wait4_find_process(thread_t thread, event64_t event, thread_waitinfo_t *waitinfo); extern void kdp_sleep_with_inheritor_find_owner(struct waitq * waitq, __unused event64_t event, thread_waitinfo_t * waitinfo); extern void kdp_turnstile_fill_tsinfo(struct turnstile *ts, thread_turnstileinfo_t *tsinfo); +void kdp_eventlink_find_owner(struct waitq *waitq, event64_t event, thread_waitinfo_t *waitinfo); #endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/kern/bsd_kern.c b/osfmk/kern/bsd_kern.c index e89c8cfb8..cb46e621b 100644 --- a/osfmk/kern/bsd_kern.c +++ b/osfmk/kern/bsd_kern.c @@ -45,6 +45,7 @@ #include #include #include +#include #if MONOTONIC #include @@ -90,6 +91,7 @@ extern void psignal(void *, int); void * get_bsdtask_info(task_t t) { + proc_require(t->bsd_info, PROC_REQUIRE_ALLOW_NULL | PROC_REQUIRE_ALLOW_KERNPROC); return t->bsd_info; } @@ -107,7 +109,12 @@ task_bsdtask_kill(task_t t) void * get_bsdthreadtask_info(thread_t th) { - return th->task != TASK_NULL ? th->task->bsd_info : NULL; + void *bsd_info = NULL; + + if (th->task) { + bsd_info = get_bsdtask_info(th->task); + } + return bsd_info; } /* @@ -375,10 +382,6 @@ swap_task_map(task_t task, thread_t thread, vm_map_t map) mp_enable_preemption(); task_unlock(task); -#if defined(__x86_64__) && NCOPY_WINDOWS > 0 - inval_copy_windows(thread); -#endif - return old_map; } @@ -425,26 +428,39 @@ get_task_resident_max(task_t task) return (uint64_t)pmap_resident_max(map->pmap) * PAGE_SIZE_64; } +/* + * Get the balance for a given field in the task ledger. + * Returns 0 if the entry is invalid. + */ +static uint64_t +get_task_ledger_balance(task_t task, int entry) +{ + ledger_amount_t balance = 0; + + ledger_get_balance(task->ledger, entry, &balance); + return balance; +} + uint64_t get_task_purgeable_size(task_t task) { kern_return_t ret; - ledger_amount_t credit, debit; + ledger_amount_t balance = 0; uint64_t volatile_size = 0; - ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_volatile, &credit, &debit); + ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile, &balance); if (ret != KERN_SUCCESS) { return 0; } - volatile_size += (credit - debit); + volatile_size += balance; - ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_volatile_compressed, &credit, &debit); + ret = ledger_get_balance(task->ledger, task_ledgers.purgeable_volatile_compressed, &balance); if (ret != KERN_SUCCESS) { return 0; } - volatile_size += (credit - debit); + volatile_size += balance; return volatile_size; } @@ -455,15 +471,7 @@ get_task_purgeable_size(task_t task) uint64_t get_task_phys_footprint(task_t task) { - kern_return_t ret; - ledger_amount_t credit, debit; - - ret = ledger_get_entries(task->ledger, task_ledgers.phys_footprint, &credit, &debit); - if (KERN_SUCCESS == ret) { - return credit - debit; - } - - return 0; + return get_task_ledger_balance(task, task_ledgers.phys_footprint); } #if CONFIG_LEDGER_INTERVAL_MAX @@ -524,66 +532,84 @@ get_task_phys_footprint_limit(task_t task) uint64_t get_task_internal(task_t task) { - kern_return_t ret; - ledger_amount_t credit, debit; + return get_task_ledger_balance(task, task_ledgers.internal); +} - ret = ledger_get_entries(task->ledger, task_ledgers.internal, &credit, &debit); - if (KERN_SUCCESS == ret) { - return credit - debit; - } +uint64_t +get_task_internal_compressed(task_t task) +{ + return get_task_ledger_balance(task, task_ledgers.internal_compressed); +} - return 0; +uint64_t +get_task_purgeable_nonvolatile(task_t task) +{ + return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile); } uint64_t -get_task_internal_compressed(task_t task) +get_task_purgeable_nonvolatile_compressed(task_t task) { - kern_return_t ret; - ledger_amount_t credit, debit; + return get_task_ledger_balance(task, task_ledgers.purgeable_nonvolatile_compressed); +} - ret = ledger_get_entries(task->ledger, task_ledgers.internal_compressed, &credit, &debit); - if (KERN_SUCCESS == ret) { - return credit - debit; - } +uint64_t +get_task_alternate_accounting(task_t task) +{ + return get_task_ledger_balance(task, task_ledgers.alternate_accounting); +} - return 0; +uint64_t +get_task_alternate_accounting_compressed(task_t task) +{ + return get_task_ledger_balance(task, task_ledgers.alternate_accounting_compressed); } uint64_t -get_task_purgeable_nonvolatile(task_t task) +get_task_page_table(task_t task) { - kern_return_t ret; - ledger_amount_t credit, debit; + return get_task_ledger_balance(task, task_ledgers.page_table); +} - ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile, &credit, &debit); - if (KERN_SUCCESS == ret) { - return credit - debit; - } +#if CONFIG_FREEZE +uint64_t +get_task_frozen_to_swap(task_t task) +{ + return get_task_ledger_balance(task, task_ledgers.frozen_to_swap); +} +#endif /* CONFIG_FREEZE */ - return 0; +uint64_t +get_task_iokit_mapped(task_t task) +{ + return get_task_ledger_balance(task, task_ledgers.iokit_mapped); } uint64_t -get_task_purgeable_nonvolatile_compressed(task_t task) +get_task_network_nonvolatile(task_t task) { - kern_return_t ret; - ledger_amount_t credit, debit; + return get_task_ledger_balance(task, task_ledgers.network_nonvolatile); +} - ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile_compressed, &credit, &debit); - if (KERN_SUCCESS == ret) { - return credit - debit; - } +uint64_t +get_task_network_nonvolatile_compressed(task_t task) +{ + return get_task_ledger_balance(task, task_ledgers.network_nonvolatile_compressed); +} - return 0; +uint64_t +get_task_wired_mem(task_t task) +{ + return get_task_ledger_balance(task, task_ledgers.wired_mem); } uint64_t -get_task_alternate_accounting(task_t task) +get_task_tagged_footprint(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; - ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting, &credit, &debit); + ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint, &credit, &debit); if (KERN_SUCCESS == ret) { return credit - debit; } @@ -592,12 +618,12 @@ get_task_alternate_accounting(task_t task) } uint64_t -get_task_alternate_accounting_compressed(task_t task) +get_task_tagged_footprint_compressed(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; - ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting_compressed, &credit, &debit); + ret = ledger_get_entries(task->ledger, task_ledgers.tagged_footprint_compressed, &credit, &debit); if (KERN_SUCCESS == ret) { return credit - debit; } @@ -606,12 +632,12 @@ get_task_alternate_accounting_compressed(task_t task) } uint64_t -get_task_page_table(task_t task) +get_task_media_footprint(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; - ret = ledger_get_entries(task->ledger, task_ledgers.page_table, &credit, &debit); + ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint, &credit, &debit); if (KERN_SUCCESS == ret) { return credit - debit; } @@ -620,12 +646,12 @@ get_task_page_table(task_t task) } uint64_t -get_task_iokit_mapped(task_t task) +get_task_media_footprint_compressed(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; - ret = ledger_get_entries(task->ledger, task_ledgers.iokit_mapped, &credit, &debit); + ret = ledger_get_entries(task->ledger, task_ledgers.media_footprint_compressed, &credit, &debit); if (KERN_SUCCESS == ret) { return credit - debit; } @@ -634,12 +660,12 @@ get_task_iokit_mapped(task_t task) } uint64_t -get_task_network_nonvolatile(task_t task) +get_task_graphics_footprint(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; - ret = ledger_get_entries(task->ledger, task_ledgers.network_nonvolatile, &credit, &debit); + ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint, &credit, &debit); if (KERN_SUCCESS == ret) { return credit - debit; } @@ -647,13 +673,14 @@ get_task_network_nonvolatile(task_t task) return 0; } + uint64_t -get_task_network_nonvolatile_compressed(task_t task) +get_task_graphics_footprint_compressed(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; - ret = ledger_get_entries(task->ledger, task_ledgers.network_nonvolatile_compressed, &credit, &debit); + ret = ledger_get_entries(task->ledger, task_ledgers.graphics_footprint_compressed, &credit, &debit); if (KERN_SUCCESS == ret) { return credit - debit; } @@ -662,12 +689,12 @@ get_task_network_nonvolatile_compressed(task_t task) } uint64_t -get_task_wired_mem(task_t task) +get_task_neural_footprint(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; - ret = ledger_get_entries(task->ledger, task_ledgers.wired_mem, &credit, &debit); + ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint, &credit, &debit); if (KERN_SUCCESS == ret) { return credit - debit; } @@ -675,14 +702,13 @@ get_task_wired_mem(task_t task) return 0; } - uint64_t -get_task_cpu_time(task_t task) +get_task_neural_footprint_compressed(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; - ret = ledger_get_entries(task->ledger, task_ledgers.cpu_time, &credit, &debit); + ret = ledger_get_entries(task->ledger, task_ledgers.neural_footprint_compressed, &credit, &debit); if (KERN_SUCCESS == ret) { return credit - debit; } @@ -690,6 +716,12 @@ get_task_cpu_time(task_t task) return 0; } +uint64_t +get_task_cpu_time(task_t task) +{ + return get_task_ledger_balance(task, task_ledgers.cpu_time); +} + uint32_t get_task_loadTag(task_t task) { @@ -734,7 +766,7 @@ vm_map_size_t get_vmmap_size( vm_map_t map) { - return map->size; + return vm_map_adjusted_size(map); } #if CONFIG_COREDUMP @@ -930,7 +962,7 @@ fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo) map = (task == kernel_task)? kernel_map: task->map; - ptinfo->pti_virtual_size = map->size; + ptinfo->pti_virtual_size = vm_map_adjusted_size(map); ptinfo->pti_resident_size = (mach_vm_size_t)(pmap_resident_count(map->pmap)) * PAGE_SIZE_64; @@ -1103,12 +1135,10 @@ fill_task_rusage(task_t task, rusage_info_current *ri) ri->ri_system_time = powerinfo.total_system; ri->ri_runnable_time = runnable_time; - ledger_get_balance(task->ledger, task_ledgers.phys_footprint, - (ledger_amount_t *)&ri->ri_phys_footprint); + ri->ri_phys_footprint = get_task_phys_footprint(task); ledger_get_balance(task->ledger, task_ledgers.phys_mem, (ledger_amount_t *)&ri->ri_resident_size); - ledger_get_balance(task->ledger, task_ledgers.wired_mem, - (ledger_amount_t *)&ri->ri_wired_size); + ri->ri_wired_size = get_task_wired_mem(task); ri->ri_pageins = task->pageins; diff --git a/osfmk/kern/btlog.c b/osfmk/kern/btlog.c index 93f6e3117..718f5b044 100644 --- a/osfmk/kern/btlog.c +++ b/osfmk/kern/btlog.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -131,9 +132,6 @@ struct btlog { */ }; -extern boolean_t vm_kernel_ready; -extern boolean_t kmem_alloc_ready; - #define lookup_btrecord(btlog, index) \ ((btlog_record_t *)(btlog->btrecords + index * btlog->btrecord_size)) @@ -239,7 +237,8 @@ btlog_create(size_t numrecords, size_t btrecord_size = 0; uintptr_t free_elem = 0, next_free_elem = 0; - if (vm_kernel_ready && !kmem_alloc_ready) { + if (startup_phase >= STARTUP_SUB_VM_KERNEL && + startup_phase < STARTUP_SUB_KMEM_ALLOC) { return NULL; } @@ -280,7 +279,7 @@ btlog_create(size_t numrecords, numrecords = MIN(BTLOG_MAX_RECORDS, (buffersize_needed - sizeof(btlog_t)) / btrecord_size); - if (kmem_alloc_ready) { + if (__probable(startup_phase >= STARTUP_SUB_KMEM_ALLOC)) { ret = kmem_alloc(kernel_map, &buffer, buffersize_needed, VM_KERN_MEMORY_DIAG); if (ret != KERN_SUCCESS) { return NULL; diff --git a/osfmk/kern/call_entry.h b/osfmk/kern/call_entry.h deleted file mode 100644 index 979e57a8f..000000000 --- a/osfmk/kern/call_entry.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/* - * Declarations for generic call outs. - */ - -#ifndef _KERN_CALL_ENTRY_H_ -#define _KERN_CALL_ENTRY_H_ - -#ifdef XNU_KERNEL_PRIVATE -#include - -#if !CONFIG_EMBEDDED -#define TIMER_TRACE 1 -#endif - -typedef void *call_entry_param_t; -typedef void (*call_entry_func_t)( - call_entry_param_t param0, - call_entry_param_t param1); - -typedef struct call_entry { - queue_chain_t q_link; - queue_head_t *queue; - call_entry_func_t func; - call_entry_param_t param0; - call_entry_param_t param1; - uint64_t deadline; -#if TIMER_TRACE - uint64_t entry_time; -#endif -} call_entry_data_t; - -typedef struct call_entry *call_entry_t; - -#ifdef MACH_KERNEL_PRIVATE - -#define call_entry_setup(entry, pfun, p0) \ -MACRO_BEGIN \ - (entry)->func = (call_entry_func_t)(pfun); \ - (entry)->param0 = (call_entry_param_t)(p0); \ - (entry)->queue = NULL; \ - (entry)->deadline = 0; \ - queue_chain_init((entry)->q_link); \ -MACRO_END - -#define qe(x) ((queue_entry_t)(x)) -#define CE(x) ((call_entry_t)(x)) - -static __inline__ queue_head_t * -call_entry_enqueue_tail( - call_entry_t entry, - queue_t queue) -{ - queue_t old_queue = entry->queue; - - if (old_queue != NULL) { - re_queue_tail(queue, &entry->q_link); - } else { - enqueue_tail(queue, &entry->q_link); - } - - entry->queue = queue; - - return old_queue; -} - -static __inline__ queue_head_t * -call_entry_dequeue( - call_entry_t entry) -{ - queue_t old_queue = entry->queue; - - if (old_queue != NULL) { - (void)remque(qe(entry)); - - entry->queue = NULL; - } - return old_queue; -} - -static __inline__ queue_head_t * -call_entry_enqueue_deadline( - call_entry_t entry, - queue_head_t *queue, - uint64_t deadline) -{ - queue_t old_queue = entry->queue; - call_entry_t current; - - if (old_queue != queue || entry->deadline < deadline) { - if (old_queue == NULL) { - current = CE(queue_first(queue)); - } else if (old_queue != queue) { - (void)remque(qe(entry)); - current = CE(queue_first(queue)); - } else { - current = CE(queue_next(qe(entry))); - (void)remque(qe(entry)); - } - - while (TRUE) { - if (queue_end(queue, qe(current)) || - deadline < current->deadline) { - current = CE(queue_prev(qe(current))); - break; - } - - current = CE(queue_next(qe(current))); - } - insque(qe(entry), qe(current)); - } else if (deadline < entry->deadline) { - current = CE(queue_prev(qe(entry))); - - (void)remque(qe(entry)); - - while (TRUE) { - if (queue_end(queue, qe(current)) || - current->deadline <= deadline) { - break; - } - - current = CE(queue_prev(qe(current))); - } - insque(qe(entry), qe(current)); - } - entry->queue = queue; - entry->deadline = deadline; - - return old_queue; -} -#endif /* MACH_KERNEL_PRIVATE */ - -#endif /* XNU_KERNEL_PRIVATE */ - -#endif /* _KERN_CALL_ENTRY_H_ */ diff --git a/osfmk/kern/cambria_layout.h b/osfmk/kern/cambria_layout.h new file mode 100644 index 000000000..e6dd2d4a9 --- /dev/null +++ b/osfmk/kern/cambria_layout.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef KERN_CAMBRIA_LAYOUT_H +#define KERN_CAMBRIA_LAYOUT_H + +/* + * xnu's current understanding of Cambria's structure layout. Cambria + * should include static_asserts to check that these values are accurate. + */ + +#define KCAMBRIA_THCTX_RSP_OFFSET (96) +#define KCAMBRIA_THCTX_RBP_OFFSET (104) +#define KCAMBRIA_THCTX_LR_OFFSET (344) + +#endif /* !defined(KERN_CAMBRIA_LAYOUT_H) */ diff --git a/osfmk/kern/clock.c b/osfmk/kern/clock.c index 101678d7b..1264c6661 100644 --- a/osfmk/kern/clock.c +++ b/osfmk/kern/clock.c @@ -86,16 +86,30 @@ #include #include +#if HIBERNATION && HAS_CONTINUOUS_HWCLOCK +// On ARM64, the hwclock keeps ticking across a normal S2R so we use it to reset the +// system clock after a normal wake. However, on hibernation we cut power to the hwclock, +// so we have to add an offset to the hwclock to compute continuous_time after hibernate resume. +uint64_t hwclock_conttime_offset = 0; +#endif /* HIBERNATION && HAS_CONTINUOUS_HWCLOCK */ + +#if HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK +#define ENABLE_LEGACY_CLOCK_CODE 1 +#endif /* HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK */ + +#if HIBERNATION_USES_LEGACY_CLOCK +#include +#endif /* HIBERNATION_USES_LEGACY_CLOCK */ + uint32_t hz_tick_interval = 1; -#if !HAS_CONTINUOUS_HWCLOCK +#if ENABLE_LEGACY_CLOCK_CODE static uint64_t has_monotonic_clock = 0; -#endif +#endif /* ENABLE_LEGACY_CLOCK_CODE */ + +SIMPLE_LOCK_DECLARE(clock_lock, 0); -decl_simple_lock_data(, clock_lock); -lck_grp_attr_t * settime_lock_grp_attr; -lck_grp_t * settime_lock_grp; -lck_attr_t * settime_lock_attr; -lck_mtx_t settime_lock; +static LCK_GRP_DECLARE(settime_lock_grp, "settime"); +static LCK_MTX_DECLARE(settime_lock, &settime_lock_grp); #define clock_lock() \ simple_lock(&clock_lock, LCK_GRP_NULL) @@ -103,9 +117,6 @@ lck_mtx_t settime_lock; #define clock_unlock() \ simple_unlock(&clock_lock) -#define clock_lock_init() \ - simple_lock_init(&clock_lock, 0) - #ifdef kdp_simple_lock_is_acquired boolean_t kdp_clock_is_locked() @@ -236,7 +247,7 @@ bintime2nsclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *nano *nanosecs = ((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32; } -#if !defined(HAS_CONTINUOUS_HWCLOCK) +#if ENABLE_LEGACY_CLOCK_CODE static __inline void bintime2absolutetime(const struct bintime *_bt, uint64_t *abs) { @@ -253,7 +264,7 @@ struct latched_time { extern int kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -#endif +#endif /* ENABLE_LEGACY_CLOCK_CODE */ /* * Time of day (calendar) variables. * @@ -274,9 +285,9 @@ static struct clock_calend { struct bintime offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */ struct bintime bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */ struct bintime boottime; /* boot time expressed in (sec, 64 bits frac of a second) */ -#if !HAS_CONTINUOUS_HWCLOCK +#if ENABLE_LEGACY_CLOCK_CODE struct bintime basesleep; -#endif +#endif /* ENABLE_LEGACY_CLOCK_CODE */ } clock_calend; static uint64_t ticks_per_sec; /* ticks in a second (expressed in abs time) */ @@ -346,13 +357,6 @@ MACRO_END void clock_config(void) { - clock_lock_init(); - - settime_lock_grp_attr = lck_grp_attr_alloc_init(); - settime_lock_grp = lck_grp_alloc_init("settime grp", settime_lock_grp_attr); - settime_lock_attr = lck_attr_alloc_init(); - lck_mtx_init(&settime_lock, settime_lock_grp, settime_lock_attr); - clock_oldconfig(); ntp_init(); @@ -487,7 +491,7 @@ get_scale_factors_from_adj(int64_t adjustment, uint64_t* tick_scale_x, uint64_t* * Keep it as additional adjustment for the next sec. */ frac = (adjustment > 0)? ((uint32_t) adjustment) : -((uint32_t) (-adjustment)); - *s_adj_nsx = (frac > 0)? frac << 32 : -((-frac) << 32); + *s_adj_nsx = (frac > 0)? ((uint64_t) frac) << 32 : -(((uint64_t) (-frac)) << 32); return; } @@ -1031,7 +1035,7 @@ clock_initialize_calendar(void) clock_usec_t utc_offset_microsecs; spl_t s; struct bintime bt; -#if !HAS_CONTINUOUS_HWCLOCK +#if ENABLE_LEGACY_CLOCK_CODE struct bintime monotonic_bt; struct latched_time monotonic_time; uint64_t monotonic_usec_total; @@ -1039,12 +1043,12 @@ clock_initialize_calendar(void) clock_usec_t microsys2, monotonic_usec; size_t size; -#endif +#endif /* ENABLE_LEGACY_CLOCK_CODE */ //Get the UTC time and corresponding sys time PEGetUTCTimeOfDay(&secs, µsecs); clock_get_system_microtime(&sys, µsys); -#if !HAS_CONTINUOUS_HWCLOCK +#if ENABLE_LEGACY_CLOCK_CODE /* * If the platform has a monotonic clock, use kern.monotonicclock_usecs * to estimate the sleep/wake time, otherwise use the UTC time to estimate @@ -1060,7 +1064,7 @@ clock_initialize_calendar(void) absolutetime_to_microtime(monotonic_time.mach_time, &sys2, µsys2); os_log(OS_LOG_DEFAULT, "%s system has monotonic clock\n", __func__); } -#endif +#endif /* ENABLE_LEGACY_CLOCK_CODE */ s = splclock(); clock_lock(); @@ -1111,7 +1115,7 @@ clock_initialize_calendar(void) clock_calend.s_scale_ns = NSEC_PER_SEC; clock_calend.s_adj_nsx = 0; -#if !HAS_CONTINUOUS_HWCLOCK +#if ENABLE_LEGACY_CLOCK_CODE if (has_monotonic_clock) { monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC; monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC; @@ -1124,7 +1128,7 @@ clock_initialize_calendar(void) // set the baseleep as the difference between monotonic clock - sys clock_calend.basesleep = monotonic_bt; } -#endif +#endif /* ENABLE_LEGACY_CLOCK_CODE */ commpage_update_mach_continuous_time(mach_absolutetime_asleep); #if DEVELOPMENT || DEBUG @@ -1173,8 +1177,8 @@ scale_sleep_time(void) bintime_add(&clock_calend.bintime, &sleep_time); } -void -clock_wakeup_calendar(void) +static void +clock_wakeup_calendar_hwclock(void) { spl_t s; @@ -1184,22 +1188,23 @@ clock_wakeup_calendar(void) commpage_disable_timestamp(); uint64_t abstime = mach_absolute_time(); - uint64_t total_sleep_time = ml_get_hwclock() - abstime; + uint64_t total_sleep_time = mach_continuous_time() - abstime; mach_absolutetime_last_sleep = total_sleep_time - mach_absolutetime_asleep; mach_absolutetime_asleep = total_sleep_time; scale_sleep_time(); - KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE) | DBG_FUNC_NONE, - (uintptr_t) mach_absolutetime_last_sleep, - (uintptr_t) mach_absolutetime_asleep, - (uintptr_t) (mach_absolutetime_last_sleep >> 32), - (uintptr_t) (mach_absolutetime_asleep >> 32), - 0); + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE), + (uintptr_t)mach_absolutetime_last_sleep, + (uintptr_t)mach_absolutetime_asleep, + (uintptr_t)(mach_absolutetime_last_sleep >> 32), + (uintptr_t)(mach_absolutetime_asleep >> 32)); commpage_update_mach_continuous_time(mach_absolutetime_asleep); +#if HIBERNATION + commpage_update_mach_continuous_time_hw_offset(hwclock_conttime_offset); +#endif adjust_cont_time_thread_calls(); clock_unlock(); @@ -1212,10 +1217,12 @@ clock_wakeup_calendar(void) #endif } -#else /* HAS_CONTINUOUS_HWCLOCK */ +#endif /* HAS_CONTINUOUS_HWCLOCK */ -void -clock_wakeup_calendar(void) +#if ENABLE_LEGACY_CLOCK_CODE + +static void +clock_wakeup_calendar_legacy(void) { clock_sec_t wake_sys_sec; clock_usec_t wake_sys_usec; @@ -1397,13 +1404,11 @@ clock_wakeup_calendar(void) } } done: - KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE) | DBG_FUNC_NONE, - (uintptr_t) mach_absolutetime_last_sleep, - (uintptr_t) mach_absolutetime_asleep, - (uintptr_t) (mach_absolutetime_last_sleep >> 32), - (uintptr_t) (mach_absolutetime_asleep >> 32), - 0); + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE), + (uintptr_t)mach_absolutetime_last_sleep, + (uintptr_t)mach_absolutetime_asleep, + (uintptr_t)(mach_absolutetime_last_sleep >> 32), + (uintptr_t)(mach_absolutetime_asleep >> 32)); commpage_update_mach_continuous_time(mach_absolutetime_asleep); adjust_cont_time_thread_calls(); @@ -1429,7 +1434,26 @@ done: #endif } -#endif /* !HAS_CONTINUOUS_HWCLOCK */ +#endif /* ENABLE_LEGACY_CLOCK_CODE */ + +void +clock_wakeup_calendar(void) +{ +#if HAS_CONTINUOUS_HWCLOCK +#if HIBERNATION_USES_LEGACY_CLOCK + if (gIOHibernateState) { + // if we're resuming from hibernation, we have to take the legacy wakeup path + return clock_wakeup_calendar_legacy(); + } +#endif /* HIBERNATION_USES_LEGACY_CLOCK */ + // use the hwclock wakeup path + return clock_wakeup_calendar_hwclock(); +#elif ENABLE_LEGACY_CLOCK_CODE + return clock_wakeup_calendar_legacy(); +#else +#error "can't determine which clock code to run" +#endif +} /* * clock_get_boottime_nanotime: @@ -1504,6 +1528,7 @@ mach_wait_until_trap( uint64_t deadline = args->deadline; wait_result_t wresult; + wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL, deadline, 0); if (wresult == THREAD_WAITING) { @@ -1624,6 +1649,20 @@ clock_interval_to_deadline( } } +void +nanoseconds_to_deadline( + uint64_t interval, + uint64_t *result) +{ + uint64_t abstime; + + nanoseconds_to_absolutetime(interval, &abstime); + + if (os_add_overflow(mach_absolute_time(), abstime, result)) { + *result = UINT64_MAX; + } +} + void clock_absolutetime_interval_to_deadline( uint64_t abstime, @@ -1683,7 +1722,9 @@ clock_deadline_for_periodic_event( uint64_t mach_continuous_time(void) { -#if HAS_CONTINUOUS_HWCLOCK +#if HIBERNATION && HAS_CONTINUOUS_HWCLOCK + return ml_get_hwclock() + hwclock_conttime_offset; +#elif HAS_CONTINUOUS_HWCLOCK return ml_get_hwclock(); #else while (1) { @@ -1703,7 +1744,7 @@ uint64_t mach_continuous_approximate_time(void) { #if HAS_CONTINUOUS_HWCLOCK - return ml_get_hwclock(); + return mach_continuous_time(); #else while (1) { uint64_t read1 = mach_absolutetime_asleep; diff --git a/osfmk/kern/clock.h b/osfmk/kern/clock.h index d6d9b82ab..44cd5533d 100644 --- a/osfmk/kern/clock.h +++ b/osfmk/kern/clock.h @@ -225,6 +225,10 @@ extern void clock_interval_to_deadline( uint32_t scale_factor, uint64_t *result); +extern void nanoseconds_to_deadline( + uint64_t interval, + uint64_t *result); + extern void clock_interval_to_absolutetime_interval( uint32_t interval, uint32_t scale_factor, @@ -269,6 +273,9 @@ extern uint64_t continuoustime_to_absolutetime( extern uint64_t mach_absolutetime_asleep; extern uint64_t mach_absolutetime_last_sleep; +#if HIBERNATION && HAS_CONTINUOUS_HWCLOCK +extern uint64_t hwclock_conttime_offset; +#endif #ifdef KERNEL_PRIVATE diff --git a/osfmk/kern/clock_oldops.c b/osfmk/kern/clock_oldops.c index 3ec6264d8..6ae3475bc 100644 --- a/osfmk/kern/clock_oldops.c +++ b/osfmk/kern/clock_oldops.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -97,11 +97,12 @@ typedef struct alarm alarm_data_t; /* local data declarations */ decl_simple_lock_data(static, alarm_lock); /* alarm synchronization */ -static struct zone *alarm_zone; /* zone for user alarms */ +/* zone for user alarms */ +static ZONE_DECLARE(alarm_zone, "alarms", sizeof(struct alarm), ZC_NONE); static struct alarm *alrmfree; /* alarm free list pointer */ static struct alarm *alrmdone; /* alarm done list pointer */ static struct alarm *alrmlist; -static long alrm_seqno; /* uniquely identifies alarms */ +static long alrm_seqno; /* uniquely identifies alarms */ static thread_call_data_t alarm_done_call; static timer_call_data_t alarm_expire_timer; @@ -237,26 +238,16 @@ clock_oldinit(void) void clock_service_create(void) { - clock_t clock; - int i; - /* * Initialize ipc clock services. */ - for (i = 0; i < clock_count; i++) { - clock = &clock_list[i]; + for (int i = 0; i < clock_count; i++) { + clock_t clock = &clock_list[i]; if (clock->cl_ops) { ipc_clock_init(clock); ipc_clock_enable(clock); } } - - /* - * Perform miscellaneous late - * initialization. - */ - i = sizeof(struct alarm); - alarm_zone = zinit(i, (4096 / i) * i, 10 * i, "alarms"); } /* diff --git a/osfmk/kern/coalition.c b/osfmk/kern/coalition.c index 025a2c3f1..e77ca4e68 100644 --- a/osfmk/kern/coalition.c +++ b/osfmk/kern/coalition.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -33,7 +33,6 @@ #include #include #include -#include #include #include /* for TASK_CHUNK */ #if MONOTONIC @@ -87,12 +86,10 @@ extern int proc_selfpid(void); int unrestrict_coalition_syscalls; int merge_adaptive_coalitions; -lck_attr_t coalitions_lck_attr; -lck_grp_t coalitions_lck_grp; -lck_grp_attr_t coalitions_lck_grp_attr; +LCK_GRP_DECLARE(coalitions_lck_grp, "coalition"); /* coalitions_list_lock protects coalition_count, coalitions queue, next_coalition_id. */ -decl_lck_mtx_data(static, coalitions_list_lock); +static LCK_MTX_DECLARE(coalitions_list_lock, &coalitions_lck_grp); static uint64_t coalition_count; static uint64_t coalition_next_id = 1; static queue_head_t coalitions_q; @@ -100,8 +97,6 @@ static queue_head_t coalitions_q; coalition_t init_coalition[COALITION_NUM_TYPES]; coalition_t corpse_coalition[COALITION_NUM_TYPES]; -zone_t coalition_zone; - static const char * coal_type_str(int type) { @@ -231,6 +226,9 @@ struct i_resource_coalition { * when the member tasks' resource usage changes. */ ledger_t resource_monitor_ledger; +#if CONFIG_PHYS_WRITE_ACCT + uint64_t fs_metadata_writes; +#endif /* CONFIG_PHYS_WRITE_ACCT */ }; /* @@ -326,6 +324,9 @@ static const struct coalition_type }, }; +ZONE_DECLARE(coalition_zone, "coalitions", + sizeof(struct coalition), ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM); + #define coal_call(coal, func, ...) \ (s_coalition_types[(coal)->type].func)(coal, ## __VA_ARGS__) @@ -340,11 +341,11 @@ static const struct coalition_type * On non-embedded platforms, since not all coalitions have jetsam coalitions * track focal counts on the resource coalition. */ -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_JETSAM -#else /* CONFIG_EMBEDDED */ +#else /* !XNU_TARGET_OS_OSX */ #define COALITION_FOCAL_TASKS_ACCOUNTING COALITION_TYPE_RESOURCE -#endif /* CONFIG_EMBEDDED */ +#endif /* !XNU_TARGET_OS_OSX */ /* @@ -635,11 +636,14 @@ i_coal_resource_remove_task(coalition_t coal, task_t task) ledger_rollup(cr->ledger, task->ledger); cr->bytesread += task->task_io_stats->disk_reads.size; cr->byteswritten += task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size; -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) cr->gpu_time += task_gpu_utilisation(task); -#else +#endif /* defined(__x86_64__) */ + +#if defined(__arm__) || defined(__arm64__) cr->energy += task_energy(task); -#endif +#endif /* defined(__arm__) || defined(__arm64__) */ + cr->logical_immediate_writes += task->task_writes_counters_internal.task_immediate_writes; cr->logical_deferred_writes += task->task_writes_counters_internal.task_deferred_writes; cr->logical_invalidated_writes += task->task_writes_counters_internal.task_invalidated_writes; @@ -648,6 +652,9 @@ i_coal_resource_remove_task(coalition_t coal, task_t task) cr->logical_deferred_writes_to_external += task->task_writes_counters_external.task_deferred_writes; cr->logical_invalidated_writes_to_external += task->task_writes_counters_external.task_invalidated_writes; cr->logical_metadata_writes_to_external += task->task_writes_counters_external.task_metadata_writes; +#if CONFIG_PHYS_WRITE_ACCT + cr->fs_metadata_writes += task->task_fs_metadata_writes; +#endif /* CONFIG_PHYS_WRITE_ACCT */ cr->cpu_ptime += task_cpu_ptime(task); task_update_cpu_time_qos_stats(task, cr->cpu_time_eqos, cr->cpu_time_rqos); #if MONOTONIC @@ -703,6 +710,10 @@ i_coal_resource_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coal callback(coal, ctx, t); } +#if CONFIG_PHYS_WRITE_ACCT +extern uint64_t kernel_pm_writes; +#endif /* CONFIG_PHYS_WRITE_ACCT */ + kern_return_t coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_usage *cru_out) { @@ -745,6 +756,9 @@ coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_us uint64_t logical_deferred_writes_to_external = coal->r.logical_deferred_writes_to_external; uint64_t logical_invalidated_writes_to_external = coal->r.logical_invalidated_writes_to_external; uint64_t logical_metadata_writes_to_external = coal->r.logical_metadata_writes_to_external; +#if CONFIG_PHYS_WRITE_ACCT + uint64_t fs_metadata_writes = coal->r.fs_metadata_writes; +#endif /* CONFIG_PHYS_WRITE_ACCT */ int64_t cpu_time_billed_to_me = 0; int64_t cpu_time_billed_to_others = 0; int64_t energy_billed_to_me = 0; @@ -774,11 +788,14 @@ coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_us ledger_rollup(sum_ledger, task->ledger); bytesread += task->task_io_stats->disk_reads.size; byteswritten += task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size; -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) gpu_time += task_gpu_utilisation(task); -#else +#endif /* defined(__x86_64__) */ + +#if defined(__arm__) || defined(__arm64__) energy += task_energy(task); -#endif +#endif /* defined(__arm__) || defined(__arm64__) */ + logical_immediate_writes += task->task_writes_counters_internal.task_immediate_writes; logical_deferred_writes += task->task_writes_counters_internal.task_deferred_writes; logical_invalidated_writes += task->task_writes_counters_internal.task_invalidated_writes; @@ -787,6 +804,9 @@ coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_us logical_deferred_writes_to_external += task->task_writes_counters_external.task_deferred_writes; logical_invalidated_writes_to_external += task->task_writes_counters_external.task_invalidated_writes; logical_metadata_writes_to_external += task->task_writes_counters_external.task_metadata_writes; +#if CONFIG_PHYS_WRITE_ACCT + fs_metadata_writes += task->task_fs_metadata_writes; +#endif /* CONFIG_PHYS_WRITE_ACCT */ cpu_ptime += task_cpu_ptime(task); task_update_cpu_time_qos_stats(task, cpu_time_eqos, cpu_time_rqos); @@ -867,6 +887,11 @@ coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_us cru_out->logical_deferred_writes_to_external = logical_deferred_writes_to_external; cru_out->logical_invalidated_writes_to_external = logical_invalidated_writes_to_external; cru_out->logical_metadata_writes_to_external = logical_metadata_writes_to_external; +#if CONFIG_PHYS_WRITE_ACCT + cru_out->fs_metadata_writes = fs_metadata_writes; +#else + cru_out->fs_metadata_writes = 0; +#endif /* CONFIG_PHYS_WRITE_ACCT */ cru_out->cpu_ptime = cpu_ptime; cru_out->cpu_time_eqos_len = COALITION_NUM_THREAD_QOS_TYPES; memcpy(cru_out->cpu_time_eqos, cpu_time_eqos, sizeof(cru_out->cpu_time_eqos)); @@ -875,6 +900,17 @@ coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_us ledger_dereference(sum_ledger); sum_ledger = LEDGER_NULL; +#if CONFIG_PHYS_WRITE_ACCT + // kernel_pm_writes are only recorded under kernel_task coalition + if (coalition_id(coal) == COALITION_ID_KERNEL) { + cru_out->pm_writes = kernel_pm_writes; + } else { + cru_out->pm_writes = 0; + } +#else + cru_out->pm_writes = 0; +#endif /* CONFIG_PHYS_WRITE_ACCT */ + if (last_became_nonempty_time) { time_nonempty += mach_absolute_time() - last_became_nonempty_time; } @@ -899,6 +935,26 @@ i_coal_jetsam_init(coalition_t coal, boolean_t privileged) queue_head_init(coal->j.services); queue_head_init(coal->j.other); +#if CONFIG_THREAD_GROUPS + switch (coal->role) { + case COALITION_ROLE_SYSTEM: + coal->j.thread_group = thread_group_find_by_id_and_retain(THREAD_GROUP_SYSTEM); + break; + case COALITION_ROLE_BACKGROUND: + coal->j.thread_group = thread_group_find_by_id_and_retain(THREAD_GROUP_BACKGROUND); + break; + case COALITION_ROLE_ADAPTIVE: + if (merge_adaptive_coalitions) { + coal->j.thread_group = thread_group_find_by_id_and_retain(THREAD_GROUP_ADAPTIVE); + } else { + coal->j.thread_group = thread_group_create_and_retain(); + } + break; + default: + coal->j.thread_group = thread_group_create_and_retain(); + } + assert(coal->j.thread_group != NULL); +#endif return KERN_SUCCESS; } @@ -913,6 +969,12 @@ i_coal_jetsam_dealloc(__unused coalition_t coal) assert(queue_empty(&coal->j.other)); assert(coal->j.leader == TASK_NULL); +#if CONFIG_THREAD_GROUPS + /* disassociate from the thread group */ + assert(coal->j.thread_group != NULL); + thread_group_release(coal->j.thread_group); + coal->j.thread_group = NULL; +#endif } static kern_return_t @@ -1007,7 +1069,7 @@ i_coal_jetsam_set_taskrole(coalition_t coal, task_t task, int role) panic("%s: task %p attempting to set role %d " "in coalition %p to which it does not belong!", __func__, task, role, coal); } - /* fall through */ + OS_FALLTHROUGH; case COALITION_TASKROLE_UNDEF: coal_dbg("setting PID:%d as UNDEF in %lld", task_pid(task), coal->id); @@ -1099,10 +1161,12 @@ i_coal_jetsam_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coalit * Condition: coalitions_list_lock must be UNLOCKED. */ kern_return_t -coalition_create_internal(int type, int role, boolean_t privileged, coalition_t *out) +coalition_create_internal(int type, int role, boolean_t privileged, coalition_t *out, uint64_t *coalition_id) { kern_return_t kr; struct coalition *new_coal; + uint64_t cid; + uint32_t ctype; if (type < 0 || type > COALITION_TYPE_MAX) { return KERN_INVALID_ARGUMENT; @@ -1132,18 +1196,32 @@ coalition_create_internal(int type, int role, boolean_t privileged, coalition_t new_coal->should_notify = 1; #endif - lck_mtx_init(&new_coal->lock, &coalitions_lck_grp, &coalitions_lck_attr); + lck_mtx_init(&new_coal->lock, &coalitions_lck_grp, LCK_ATTR_NULL); lck_mtx_lock(&coalitions_list_lock); new_coal->id = coalition_next_id++; coalition_count++; enqueue_tail(&coalitions_q, &new_coal->coalitions); +#if CONFIG_THREAD_GROUPS + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_NEW), + new_coal->id, new_coal->type, + (new_coal->type == COALITION_TYPE_JETSAM && new_coal->j.thread_group) ? + thread_group_get_id(new_coal->j.thread_group) : 0); + +#else KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_NEW), new_coal->id, new_coal->type); +#endif + cid = new_coal->id; + ctype = new_coal->type; lck_mtx_unlock(&coalitions_list_lock); - coal_dbg("id:%llu, type:%s", new_coal->id, coal_type_str(new_coal->type)); + coal_dbg("id:%llu, type:%s", cid, coal_type_str(ctype)); + + if (coalition_id != NULL) { + *coalition_id = cid; + } *out = new_coal; return KERN_SUCCESS; @@ -1180,8 +1258,15 @@ coalition_release(coalition_t coal) assert(coal->reaped); assert(coal->focal_task_count == 0); assert(coal->nonfocal_task_count == 0); +#if CONFIG_THREAD_GROUPS + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_FREE), + coal->id, coal->type, + coal->type == COALITION_TYPE_JETSAM ? + coal->j.thread_group : 0); +#else KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_FREE), coal->id, coal->type); +#endif coal_call(coal, dealloc); @@ -1319,6 +1404,7 @@ coalition_find_and_activate_by_id(uint64_t cid) uint64_t coalition_id(coalition_t coal) { + assert(coal != COALITION_NULL); return coal->id; } @@ -1473,6 +1559,73 @@ coalition_set_efficient(coalition_t coal) coalition_unlock(coal); } +#if CONFIG_THREAD_GROUPS +struct thread_group * +task_coalition_get_thread_group(task_t task) +{ + coalition_t coal = task->coalition[COALITION_TYPE_JETSAM]; + /* return system thread group for non-jetsam coalitions */ + if (coal == COALITION_NULL) { + return init_coalition[COALITION_TYPE_JETSAM]->j.thread_group; + } + return coal->j.thread_group; +} + + +struct thread_group * +kdp_coalition_get_thread_group(coalition_t coal) +{ + if (coal->type != COALITION_TYPE_JETSAM) { + return NULL; + } + assert(coal->j.thread_group != NULL); + return coal->j.thread_group; +} + +struct thread_group * +coalition_get_thread_group(coalition_t coal) +{ + if (coal->type != COALITION_TYPE_JETSAM) { + return NULL; + } + assert(coal->j.thread_group != NULL); + return thread_group_retain(coal->j.thread_group); +} + +void +coalition_set_thread_group(coalition_t coal, struct thread_group *tg) +{ + assert(coal != COALITION_NULL); + assert(tg != NULL); + + if (coal->type != COALITION_TYPE_JETSAM) { + return; + } + struct thread_group *old_tg = coal->j.thread_group; + assert(old_tg != NULL); + coal->j.thread_group = tg; + + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_THREAD_GROUP_SET), + coal->id, coal->type, thread_group_get_id(tg)); + + thread_group_release(old_tg); +} + +void +task_coalition_thread_group_focal_update(task_t task) +{ + assert(task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING] != COALITION_NULL); + thread_group_flags_update_lock(); + uint32_t focal_count = task_coalition_focal_count(task); + if (focal_count) { + thread_group_set_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_UI_APP); + } else { + thread_group_clear_flags_locked(task_coalition_get_thread_group(task), THREAD_GROUP_FLAGS_UI_APP); + } + thread_group_flags_update_unlock(); +} + +#endif void coalition_for_each_task(coalition_t coal, void *ctx, @@ -1896,12 +2049,6 @@ coalitions_init(void) int i; const struct coalition_type *ctype; - coalition_zone = zinit( - sizeof(struct coalition), - CONFIG_COALITION_MAX * sizeof(struct coalition), - COALITION_CHUNK * sizeof(struct coalition), - "coalitions"); - zone_change(coalition_zone, Z_NOENCRYPT, TRUE); queue_head_init(coalitions_q); if (!PE_parse_boot_argn("unrestrict_coalition_syscalls", &unrestrict_coalition_syscalls, @@ -1914,11 +2061,6 @@ coalitions_init(void) merge_adaptive_coalitions = 0; } - lck_grp_attr_setdefault(&coalitions_lck_grp_attr); - lck_grp_init(&coalitions_lck_grp, "coalition", &coalitions_lck_grp_attr); - lck_attr_setdefault(&coalitions_lck_attr); - lck_mtx_init(&coalitions_list_lock, &coalitions_lck_grp, &coalitions_lck_attr); - init_task_ledgers(); init_coalition_ledgers(); @@ -1936,12 +2078,15 @@ coalitions_init(void) if (!ctype->has_default) { continue; } - kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, TRUE, &init_coalition[ctype->type]); + kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, TRUE, &init_coalition[ctype->type], NULL); if (kr != KERN_SUCCESS) { panic("%s: could not create init %s coalition: kr:%d", __func__, coal_type_str(i), kr); } - kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, FALSE, &corpse_coalition[ctype->type]); + if (i == COALITION_TYPE_RESOURCE) { + assert(COALITION_ID_KERNEL == init_coalition[ctype->type]->id); + } + kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, FALSE, &corpse_coalition[ctype->type], NULL); if (kr != KERN_SUCCESS) { panic("%s: could not create corpse %s coalition: kr:%d", __func__, coal_type_str(i), kr); @@ -2269,7 +2414,7 @@ coalition_get_sort_list(coalition_t coal, int sort_order, queue_t list, switch (sort_order) { case COALITION_SORT_DEFAULT: sort_array[0].usr_order = 0; - /* fall-through */ + OS_FALLTHROUGH; case COALITION_SORT_MEM_ASC: case COALITION_SORT_MEM_DEC: sort_array[0].bytes = get_task_phys_footprint(coal->j.leader); @@ -2296,7 +2441,7 @@ coalition_get_sort_list(coalition_t coal, int sort_order, queue_t list, switch (sort_order) { case COALITION_SORT_DEFAULT: sort_array[ntasks].usr_order = 0; - /* fall-through */ + OS_FALLTHROUGH; case COALITION_SORT_MEM_ASC: case COALITION_SORT_MEM_DEC: sort_array[ntasks].bytes = get_task_phys_footprint(task); diff --git a/osfmk/kern/coalition.h b/osfmk/kern/coalition.h index 5afbf004b..9d6fb4937 100644 --- a/osfmk/kern/coalition.h +++ b/osfmk/kern/coalition.h @@ -138,7 +138,7 @@ kern_return_t coalition_request_terminate_internal(coalition_t coal); * KERN_RESOURCE_SHORTAGE Unable to allocate kernel resources for a * new coalition. */ -kern_return_t coalition_create_internal(int type, int role, boolean_t privileged, coalition_t *out); +kern_return_t coalition_create_internal(int type, int role, boolean_t privileged, coalition_t *out, uint64_t *cid); boolean_t coalition_term_requested(coalition_t coal); boolean_t coalition_is_terminated(coalition_t coal); diff --git a/osfmk/kern/cpu_quiesce.c b/osfmk/kern/cpu_quiesce.c index 57c43f5b4..2ca5f67f0 100644 --- a/osfmk/kern/cpu_quiesce.c +++ b/osfmk/kern/cpu_quiesce.c @@ -35,7 +35,7 @@ #include #include -#include +#include #include #include @@ -77,6 +77,13 @@ static _Atomic checkin_mask_t cpu_quiescing_checkin_state; static uint64_t cpu_checkin_last_commit; +struct cpu_quiesce { + cpu_quiescent_state_t state; + uint64_t last_checkin; +}; + +static struct cpu_quiesce PERCPU_DATA(cpu_quiesce); + #define CPU_CHECKIN_MIN_INTERVAL_US 4000 /* 4ms */ #define CPU_CHECKIN_MIN_INTERVAL_MAX_US USEC_PER_SEC /* 1s */ static uint64_t cpu_checkin_min_interval; @@ -182,16 +189,16 @@ cpu_quiescent_counter_needs_commit(checkin_mask_t state) void cpu_quiescent_counter_join(__unused uint64_t ctime) { - processor_t processor = current_processor(); - __assert_only int cpuid = processor->cpu_id; + struct cpu_quiesce *st = PERCPU_GET(cpu_quiesce); + __assert_only int cpuid = cpu_number(); - assert(processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_NONE || - processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_LEFT); + assert(st->state == CPU_QUIESCE_COUNTER_NONE || + st->state == CPU_QUIESCE_COUNTER_LEFT); assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) & (cpu_expected_bit(cpuid) | cpu_checked_in_bit(cpuid))) == 0); - processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_PENDING_JOIN; + st->state = CPU_QUIESCE_COUNTER_PENDING_JOIN; /* * Mark the processor to call cpu_quiescent_counter_ast before it @@ -207,10 +214,10 @@ cpu_quiescent_counter_join(__unused uint64_t ctime) void cpu_quiescent_counter_ast(void) { - processor_t processor = current_processor(); - int cpuid = processor->cpu_id; + struct cpu_quiesce *st = PERCPU_GET(cpu_quiesce); + int cpuid = cpu_number(); - assert(processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_PENDING_JOIN); + assert(st->state == CPU_QUIESCE_COUNTER_PENDING_JOIN); /* We had better not already be joined. */ assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) & @@ -231,8 +238,8 @@ cpu_quiescent_counter_ast(void) * its expected bit. */ - processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_JOINED; - processor->cpu_quiesce_last_checkin = mach_absolute_time(); + st->state = CPU_QUIESCE_COUNTER_JOINED; + st->last_checkin = mach_absolute_time(); checkin_mask_t old_mask, new_mask; os_atomic_rmw_loop(&cpu_quiescing_checkin_state, old_mask, new_mask, acquire, { @@ -258,23 +265,23 @@ cpu_quiescent_counter_ast(void) void cpu_quiescent_counter_leave(uint64_t ctime) { - processor_t processor = current_processor(); - int cpuid = processor->cpu_id; + struct cpu_quiesce *st = PERCPU_GET(cpu_quiesce); + int cpuid = cpu_number(); - assert(processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_JOINED || - processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_PENDING_JOIN); + assert(st->state == CPU_QUIESCE_COUNTER_JOINED || + st->state == CPU_QUIESCE_COUNTER_PENDING_JOIN); /* We no longer need the cpu_quiescent_counter_ast callback to be armed */ ast_off(AST_UNQUIESCE); - if (processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_PENDING_JOIN) { + if (st->state == CPU_QUIESCE_COUNTER_PENDING_JOIN) { /* We never actually joined, so we don't have to do the work to leave. */ - processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_LEFT; + st->state = CPU_QUIESCE_COUNTER_LEFT; return; } /* Leaving can't be deferred, even if we're within the min interval */ - processor->cpu_quiesce_last_checkin = ctime; + st->last_checkin = ctime; checkin_mask_t mask = cpu_checked_in_bit(cpuid) | cpu_expected_bit(cpuid); @@ -283,7 +290,7 @@ cpu_quiescent_counter_leave(uint64_t ctime) assert((orig_state & cpu_expected_bit(cpuid))); - processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_LEFT; + st->state = CPU_QUIESCE_COUNTER_LEFT; if (cpu_quiescent_counter_needs_commit(orig_state)) { /* @@ -311,22 +318,22 @@ cpu_quiescent_counter_leave(uint64_t ctime) void cpu_quiescent_counter_checkin(uint64_t ctime) { - processor_t processor = current_processor(); - int cpuid = processor->cpu_id; + struct cpu_quiesce *st = PERCPU_GET(cpu_quiesce); + int cpuid = cpu_number(); - assert(processor->cpu_quiesce_state != CPU_QUIESCE_COUNTER_NONE); + assert(st->state != CPU_QUIESCE_COUNTER_NONE); /* If we're not joined yet, we don't need to check in */ - if (__probable(processor->cpu_quiesce_state != CPU_QUIESCE_COUNTER_JOINED)) { + if (__probable(st->state != CPU_QUIESCE_COUNTER_JOINED)) { return; } /* If we've checked in recently, we don't need to check in yet. */ - if (__probable((ctime - processor->cpu_quiesce_last_checkin) <= cpu_checkin_min_interval)) { + if (__probable((ctime - st->last_checkin) <= cpu_checkin_min_interval)) { return; } - processor->cpu_quiesce_last_checkin = ctime; + st->last_checkin = ctime; checkin_mask_t state = os_atomic_load(&cpu_quiescing_checkin_state, relaxed); @@ -361,10 +368,10 @@ cpu_quiescent_counter_checkin(uint64_t ctime) void cpu_quiescent_counter_assert_ast(void) { - processor_t processor = current_processor(); - int cpuid = processor->cpu_id; + struct cpu_quiesce *st = PERCPU_GET(cpu_quiesce); + int cpuid = cpu_number(); - assert(processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_JOINED); + assert(st->state == CPU_QUIESCE_COUNTER_JOINED); checkin_mask_t state = os_atomic_load(&cpu_quiescing_checkin_state, relaxed); assert((state & cpu_expected_bit(cpuid))); diff --git a/osfmk/kern/cs_blobs.h b/osfmk/kern/cs_blobs.h index 5a26da667..899a6a27f 100644 --- a/osfmk/kern/cs_blobs.h +++ b/osfmk/kern/cs_blobs.h @@ -29,6 +29,8 @@ #ifndef _KERN_CODESIGN_H_ #define _KERN_CODESIGN_H_ +#include + /* code signing attributes of a process */ #define CS_VALID 0x00000001 /* dynamically valid */ #define CS_ADHOC 0x00000002 /* ad hoc signed */ @@ -48,10 +50,11 @@ #define CS_ENTITLEMENTS_VALIDATED 0x00004000 /* code signature permits restricted entitlements */ #define CS_NVRAM_UNRESTRICTED 0x00008000 /* has com.apple.rootless.restricted-nvram-variables.heritable entitlement */ -#define CS_RUNTIME 0x00010000 /* Apply hardened runtime policies */ +#define CS_RUNTIME 0x00010000 /* Apply hardened runtime policies */ +#define CS_LINKER_SIGNED 0x00020000 /* Automatically signed by the linker */ #define CS_ALLOWED_MACHO (CS_ADHOC | CS_HARD | CS_KILL | CS_CHECK_EXPIRATION | \ - CS_RESTRICT | CS_ENFORCEMENT | CS_REQUIRE_LV | CS_RUNTIME) + CS_RESTRICT | CS_ENFORCEMENT | CS_REQUIRE_LV | CS_RUNTIME | CS_LINKER_SIGNED) #define CS_EXEC_SET_HARD 0x00100000 /* set CS_HARD on any exec'ed process */ #define CS_EXEC_SET_KILL 0x00200000 /* set CS_KILL on any exec'ed process */ @@ -72,11 +75,11 @@ /* executable segment flags */ -#define CS_EXECSEG_MAIN_BINARY 0x1 /* executable segment denotes main binary */ +#define CS_EXECSEG_MAIN_BINARY 0x1 /* executable segment denotes main binary */ #define CS_EXECSEG_ALLOW_UNSIGNED 0x10 /* allow unsigned pages (for debugging) */ -#define CS_EXECSEG_DEBUGGER 0x20 /* main binary is debugger */ -#define CS_EXECSEG_JIT 0x40 /* JIT enabled */ -#define CS_EXECSEG_SKIP_LV 0x80 /* OBSOLETE: skip library validation */ +#define CS_EXECSEG_DEBUGGER 0x20 /* main binary is debugger */ +#define CS_EXECSEG_JIT 0x40 /* JIT enabled */ +#define CS_EXECSEG_SKIP_LV 0x80 /* OBSOLETE: skip library validation */ #define CS_EXECSEG_CAN_LOAD_CDHASH 0x100 /* can bless cdhash for execution */ #define CS_EXECSEG_CAN_EXEC_CDHASH 0x200 /* can execute blessed cdhash */ @@ -97,6 +100,8 @@ enum { CS_SUPPORTSTEAMID = 0x20200, CS_SUPPORTSCODELIMIT64 = 0x20300, CS_SUPPORTSEXECSEG = 0x20400, + CS_SUPPORTSRUNTIME = 0x20500, + CS_SUPPORTSLINKAGE = 0x20600, CSSLOT_CODEDIRECTORY = 0, /* slot index for CodeDirectory */ CSSLOT_INFOSLOT = 1, @@ -135,6 +140,10 @@ enum { CS_SIGNER_TYPE_UNKNOWN = 0, CS_SIGNER_TYPE_LEGACYVPN = 5, CS_SIGNER_TYPE_MAC_APP_STORE = 6, + + CS_SUPPL_SIGNER_TYPE_UNKNOWN = 0, + CS_SUPPL_SIGNER_TYPE_TRUSTCACHE = 7, + CS_SUPPL_SIGNER_TYPE_LOCAL = 8, }; #define KERNEL_HAVE_CS_CODEDIRECTORY 1 @@ -179,6 +188,19 @@ typedef struct __CodeDirectory { uint64_t execSegLimit; /* limit of executable segment */ uint64_t execSegFlags; /* executable segment flags */ char end_withExecSeg[0]; + /* Version 0x20500 */ + uint32_t runtime; + uint32_t preEncryptOffset; + char end_withPreEncryptOffset[0]; + + /* Version 0x20600 */ + uint8_t linkageHashType; + uint8_t linkageTruncated; + uint16_t spare4; + uint32_t linkageOffset; + uint32_t linkageSize; + char end_withLinkage[0]; + /* followed by dynamic content as located by offset fields above */ } CS_CodeDirectory diff --git a/osfmk/kern/debug.c b/osfmk/kern/debug.c index 8578f687a..0cb5ea810 100644 --- a/osfmk/kern/debug.c +++ b/osfmk/kern/debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -62,6 +62,7 @@ #include #include #include +#include #include #include #include @@ -71,7 +72,7 @@ #include #include #include -#include +#include #include #include #include @@ -102,10 +103,11 @@ #include #include #include +#include #include -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) #include /* For gPanicBase */ #include #include @@ -117,14 +119,19 @@ extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info; extern int vsnprintf(char *, size_t, const char *, va_list); #endif +#if CONFIG_CSR +#include +#endif + +extern int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize ); + unsigned int halt_in_debugger = 0; unsigned int current_debugger = 0; unsigned int active_debugger = 0; unsigned int panicDebugging = FALSE; -unsigned int kdebug_serial = FALSE; unsigned int kernel_debugger_entry_count = 0; -#if !defined (__x86_64__) +#if defined(__arm__) || defined(__arm64__) struct additional_panic_data_buffer *panic_data_buffers = NULL; #endif @@ -147,24 +154,46 @@ struct additional_panic_data_buffer *panic_data_buffers = NULL; #define panic_stop() panic_spin_forever() #endif -#define CPUDEBUGGEROP PROCESSOR_DATA(current_processor(), debugger_state).db_current_op -#define CPUDEBUGGERMSG PROCESSOR_DATA(current_processor(), debugger_state).db_message -#define CPUPANICSTR PROCESSOR_DATA(current_processor(), debugger_state).db_panic_str -#define CPUPANICARGS PROCESSOR_DATA(current_processor(), debugger_state).db_panic_args -#define CPUPANICOPTS PROCESSOR_DATA(current_processor(), debugger_state).db_panic_options -#define CPUPANICDATAPTR PROCESSOR_DATA(current_processor(), debugger_state).db_panic_data_ptr -#define CPUDEBUGGERSYNC PROCESSOR_DATA(current_processor(), debugger_state).db_proceed_on_sync_failure -#define CPUDEBUGGERCOUNT PROCESSOR_DATA(current_processor(), debugger_state).db_entry_count -#define CPUDEBUGGERRET PROCESSOR_DATA(current_processor(), debugger_state).db_op_return -#define CPUPANICCALLER PROCESSOR_DATA(current_processor(), debugger_state).db_panic_caller +struct debugger_state { + uint64_t db_panic_options; + debugger_op db_current_op; + boolean_t db_proceed_on_sync_failure; + const char *db_message; + const char *db_panic_str; + va_list *db_panic_args; + void *db_panic_data_ptr; + unsigned long db_panic_caller; + /* incremented whenever we panic or call Debugger (current CPU panic level) */ + uint32_t db_entry_count; + kern_return_t db_op_return; +}; +static struct debugger_state PERCPU_DATA(debugger_state); + +/* __pure2 is correct if this function is called with preemption disabled */ +static inline __pure2 struct debugger_state * +current_debugger_state(void) +{ + return PERCPU_GET(debugger_state); +} + +#define CPUDEBUGGEROP current_debugger_state()->db_current_op +#define CPUDEBUGGERMSG current_debugger_state()->db_message +#define CPUPANICSTR current_debugger_state()->db_panic_str +#define CPUPANICARGS current_debugger_state()->db_panic_args +#define CPUPANICOPTS current_debugger_state()->db_panic_options +#define CPUPANICDATAPTR current_debugger_state()->db_panic_data_ptr +#define CPUDEBUGGERSYNC current_debugger_state()->db_proceed_on_sync_failure +#define CPUDEBUGGERCOUNT current_debugger_state()->db_entry_count +#define CPUDEBUGGERRET current_debugger_state()->db_op_return +#define CPUPANICCALLER current_debugger_state()->db_panic_caller #if DEVELOPMENT || DEBUG -#define DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED(requested) \ -MACRO_BEGIN \ - if (requested) { \ - volatile int *badpointer = (int *)4; \ - *badpointer = 0; \ - } \ +#define DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED(requested) \ +MACRO_BEGIN \ + if (requested) { \ + volatile int *badpointer = (int *)4; \ + *badpointer = 0; \ + } \ MACRO_END #endif /* DEVELOPMENT || DEBUG */ @@ -184,29 +213,23 @@ void panic_spin_forever(void) __dead2; extern kern_return_t do_stackshot(void); extern void PE_panic_hook(const char*); -#if CONFIG_NONFATAL_ASSERTS -int mach_assert = 1; -#endif - #define NESTEDDEBUGGERENTRYMAX 5 static unsigned int max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX; -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) #define DEBUG_BUF_SIZE (4096) -#define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace" -#else -#define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data)) -/* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */ -static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements"); -#define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace" -#endif -/* debug_buf is directly linked with iBoot panic region for embedded targets */ -#if CONFIG_EMBEDDED +/* debug_buf is directly linked with iBoot panic region for arm targets */ char *debug_buf_base = NULL; char *debug_buf_ptr = NULL; unsigned int debug_buf_size = 0; -#else + +SECURITY_READ_ONLY_LATE(boolean_t) kdp_explicitly_requested = FALSE; +#else /* defined(__arm__) || defined(__arm64__) */ +#define DEBUG_BUF_SIZE ((3 * PAGE_SIZE) + offsetof(struct macos_panic_header, mph_data)) +/* EXTENDED_DEBUG_BUF_SIZE definition is now in debug.h */ +static_assert(((EXTENDED_DEBUG_BUF_SIZE % PANIC_FLUSH_BOUNDARY) == 0), "Extended debug buf size must match SMC alignment requirements"); + char debug_buf[DEBUG_BUF_SIZE]; struct macos_panic_header *panic_info = (struct macos_panic_header *)debug_buf; char *debug_buf_base = (debug_buf + offsetof(struct macos_panic_header, mph_data)); @@ -220,6 +243,12 @@ char *debug_buf_ptr = (debug_buf + offsetof(struct macos_panic_header, mph_data) unsigned int debug_buf_size = (DEBUG_BUF_SIZE - offsetof(struct macos_panic_header, mph_data)); boolean_t extended_debug_log_enabled = FALSE; +#endif /* defined(__arm__) || defined(__arm64__) */ + +#if defined(XNU_TARGET_OS_OSX) +#define KDBG_TRACE_PANIC_FILENAME "/var/tmp/panic.trace" +#else +#define KDBG_TRACE_PANIC_FILENAME "/var/log/panic.trace" #endif /* Debugger state */ @@ -235,6 +264,14 @@ boolean_t kernelcache_uuid_valid = FALSE; uuid_t kernelcache_uuid; uuid_string_t kernelcache_uuid_string; +boolean_t pageablekc_uuid_valid = FALSE; +uuid_t pageablekc_uuid; +uuid_string_t pageablekc_uuid_string; + +boolean_t auxkc_uuid_valid = FALSE; +uuid_t auxkc_uuid; +uuid_string_t auxkc_uuid_string; + /* * By default we treat Debugger() the same as calls to panic(), unless * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set. @@ -244,11 +281,7 @@ uuid_string_t kernelcache_uuid_string; */ static boolean_t debugger_is_panic = TRUE; -#if DEVELOPMENT || DEBUG -boolean_t debug_boot_arg_inited = FALSE; -#endif - -SECURITY_READ_ONLY_LATE(unsigned int) debug_boot_arg; +TUNABLE(unsigned int, debug_boot_arg, "debug", 0); char kernel_uuid_string[37]; /* uuid_string_t */ char kernelcache_uuid_string[37]; /* uuid_string_t */ @@ -271,7 +304,23 @@ SECURITY_READ_ONLY_LATE(vm_offset_t) phys_carveout = 0; SECURITY_READ_ONLY_LATE(uintptr_t) phys_carveout_pa = 0; SECURITY_READ_ONLY_LATE(size_t) phys_carveout_size = 0; -void +boolean_t +kernel_debugging_allowed(void) +{ +#if XNU_TARGET_OS_OSX +#if CONFIG_CSR + if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) { + return FALSE; + } +#endif /* CONFIG_CSR */ + return TRUE; +#else /* XNU_TARGET_OS_OSX */ + return PE_i_can_has_debugger(NULL); +#endif /* XNU_TARGET_OS_OSX */ +} + +__startup_func +static void panic_init(void) { unsigned long uuidlen = 0; @@ -283,49 +332,37 @@ panic_init(void) uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid_string); } -#if CONFIG_NONFATAL_ASSERTS - if (!PE_parse_boot_argn("assertions", &mach_assert, sizeof(mach_assert))) { - mach_assert = 1; - } -#endif - /* - * Initialize the value of the debug boot-arg + * Take the value of the debug boot-arg into account */ - debug_boot_arg = 0; -#if ((CONFIG_EMBEDDED && MACH_KDP) || defined(__x86_64__)) - if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof(debug_boot_arg))) { -#if DEVELOPMENT || DEBUG +#if MACH_KDP + if (kernel_debugging_allowed() && debug_boot_arg) { if (debug_boot_arg & DB_HALT) { halt_in_debugger = 1; } -#endif -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) if (debug_boot_arg & DB_NMI) { panicDebugging = TRUE; } #else panicDebugging = TRUE; -#if KDEBUG_MOJO_TRACE - if (debug_boot_arg & DB_PRT_KDEBUG) { - kdebug_serial = TRUE; - } -#endif -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ } if (!PE_parse_boot_argn("nested_panic_max", &max_debugger_entry_count, sizeof(max_debugger_entry_count))) { max_debugger_entry_count = NESTEDDEBUGGERENTRYMAX; } -#endif /* ((CONFIG_EMBEDDED && MACH_KDP) || defined(__x86_64__)) */ +#if defined(__arm__) || defined(__arm64__) + char kdpname[80]; -#if DEVELOPMENT || DEBUG - debug_boot_arg_inited = TRUE; -#endif + kdp_explicitly_requested = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname)); +#endif /* defined(__arm__) || defined(__arm64__) */ + +#endif /* MACH_KDP */ -#if !CONFIG_EMBEDDED +#if defined (__x86_64__) /* * By default we treat Debugger() the same as calls to panic(), unless * we have debug boot-args present and the DB_KERN_DUMP_ON_NMI *NOT* set. @@ -339,6 +376,7 @@ panic_init(void) } #endif } +STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, panic_init); #if defined (__x86_64__) void @@ -349,14 +387,12 @@ extended_debug_log_init(void) * Allocate an extended panic log buffer that has space for the panic * stackshot at the end. Update the debug buf pointers appropriately * to point at this new buffer. - */ - char *new_debug_buf = kalloc(EXTENDED_DEBUG_BUF_SIZE); - /* + * * iBoot pre-initializes the panic region with the NULL character. We set this here * so we can accurately calculate the CRC for the region without needing to flush the * full region over SMC. */ - memset(new_debug_buf, '\0', EXTENDED_DEBUG_BUF_SIZE); + char *new_debug_buf = kalloc_flags(EXTENDED_DEBUG_BUF_SIZE, Z_WAITOK | Z_ZERO); panic_info = (struct macos_panic_header *)new_debug_buf; debug_buf_ptr = debug_buf_base = (new_debug_buf + offsetof(struct macos_panic_header, mph_data)); @@ -378,7 +414,7 @@ extended_debug_log_init(void) void debug_log_init(void) { -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) if (!gPanicBase) { printf("debug_log_init: Error!! gPanicBase is still not initialized\n"); return; @@ -447,7 +483,7 @@ phys_carveout_init(void) } static void -DebuggerLock() +DebuggerLock(void) { int my_cpu = cpu_number(); int debugger_exp_cpu = DEBUGGER_NO_CPU; @@ -465,7 +501,7 @@ DebuggerLock() } static void -DebuggerUnlock() +DebuggerUnlock(void) { assert(atomic_load_explicit(&debugger_cpu, memory_order_relaxed) == cpu_number()); @@ -485,9 +521,9 @@ DebuggerUnlock() static kern_return_t DebuggerHaltOtherCores(boolean_t proceed_on_failure) { -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) return DebuggerXCallEnter(proceed_on_failure); -#else /* CONFIG_EMBEDDED */ +#else /* defined(__arm__) || defined(__arm64__) */ #pragma unused(proceed_on_failure) mp_kdp_enter(proceed_on_failure); return KERN_SUCCESS; @@ -495,11 +531,11 @@ DebuggerHaltOtherCores(boolean_t proceed_on_failure) } static void -DebuggerResumeOtherCores() +DebuggerResumeOtherCores(void) { -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) DebuggerXCallReturn(); -#else /* CONFIG_EMBEDDED */ +#else /* defined(__arm__) || defined(__arm64__) */ mp_kdp_exit(); #endif } @@ -535,8 +571,8 @@ DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_pani } /* - * Save the requested debugger state/action into the current processor's processor_data - * and trap to the debugger. + * Save the requested debugger state/action into the current processor's + * percu state and trap to the debugger. */ kern_return_t DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str, @@ -550,6 +586,13 @@ DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_ db_panic_options, db_panic_data_ptr, db_proceed_on_sync_failure, db_panic_caller); + /* + * On ARM this generates an uncategorized exception -> sleh code -> + * DebuggerCall -> kdp_trap -> handle_debugger_trap + * So that is how XNU ensures that only one core can panic. + * The rest of the cores are halted by IPI if possible; if that + * fails it will fall back to dbgwrap. + */ TRAP_DEBUGGER; ret = CPUDEBUGGERRET; @@ -567,6 +610,8 @@ Assert( ) { #if CONFIG_NONFATAL_ASSERTS + static TUNABLE(bool, mach_assert, "assertions", true); + if (!mach_assert) { kprintf("%s:%d non-fatal Assertion: %s", file, line, expression); return; @@ -576,6 +621,11 @@ Assert( panic_plain("%s:%d Assertion failed: %s", file, line, expression); } +boolean_t +debug_is_current_cpu_in_panic_state(void) +{ + return current_debugger_state()->db_entry_count > 0; +} void Debugger(const char *message) @@ -599,7 +649,11 @@ DebuggerWithContext(unsigned int reason, void *ctx, const char *message, static boolean_t in_panic_kprintf = FALSE; /* Notify any listeners that we've started a panic */ - PEHaltRestart(kPEPanicBegin); + uint32_t panic_details = 0; + if (debugger_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) { + panic_details |= kPanicDetailsForcePowerOff; + } + PEHaltRestartInternal(kPEPanicBegin, panic_details); if (!in_panic_kprintf) { in_panic_kprintf = TRUE; @@ -615,6 +669,9 @@ DebuggerWithContext(unsigned int reason, void *ctx, const char *message, panic_spin_forever(); } + /* Handle any necessary platform specific actions before we proceed */ + PEInitiatePanic(); + #if DEVELOPMENT || DEBUG DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((debugger_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY)); #endif @@ -685,7 +742,7 @@ kdp_callouts(kdp_event_t event) } } -#if !defined (__x86_64__) +#if defined(__arm__) || defined(__arm64__) /* * Register an additional buffer with data to include in the panic log * @@ -723,13 +780,13 @@ register_additional_panic_data_buffer(const char *producer_name, void *buf, int return; } -#endif /* !defined (__x86_64__) */ +#endif /* defined(__arm__) || defined(__arm64__) */ /* * An overview of the xnu panic path: * * Several panic wrappers (panic(), panic_with_options(), etc.) all funnel into panic_trap_to_debugger(). - * panic_trap_to_debugger() sets the panic state in the current processor's processor_data_t prior + * panic_trap_to_debugger() sets the panic state in the current processor's debugger_state prior * to trapping into the debugger. Once we trap to the debugger, we end up in handle_debugger_trap() * which tries to acquire the panic lock by atomically swapping the current CPU number into debugger_cpu. * debugger_cpu acts as a synchronization point, from which the winning CPU can halt the other cores and @@ -801,10 +858,7 @@ panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsign mmiotrace_enabled = 0; #endif - if (ml_wants_panic_trap_to_debugger()) { - ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller); - __builtin_trap(); - } + ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller); CPUDEBUGGERCOUNT++; @@ -812,7 +866,11 @@ panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsign static boolean_t in_panic_kprintf = FALSE; /* Notify any listeners that we've started a panic */ - PEHaltRestart(kPEPanicBegin); + uint32_t panic_details = 0; + if (panic_options_mask & DEBUGGER_OPTION_PANICLOGANDREBOOT) { + panic_details |= kPanicDetailsForcePowerOff; + } + PEHaltRestartInternal(kPEPanicBegin, panic_details); if (!in_panic_kprintf) { in_panic_kprintf = TRUE; @@ -828,6 +886,9 @@ panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsign panic_spin_forever(); } + /* Handle any necessary platform specific actions before we proceed */ + PEInitiatePanic(); + #if DEVELOPMENT || DEBUG DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED((panic_options_mask & DEBUGGER_OPTION_RECURPANIC_ENTRY)); #endif @@ -896,9 +957,9 @@ panic_spin_forever(void) static void kdp_machine_reboot_type(unsigned int type, uint64_t debugger_flags) { - printf("Attempting system restart..."); + printf("Attempting system restart...\n"); if ((type == kPEPanicRestartCPU) && (debugger_flags & DEBUGGER_OPTION_SKIP_PANICEND_CALLOUTS)) { - PEHaltRestart(kPEPanicRestartCPUNoPanicEndCallouts); + PEHaltRestart(kPEPanicRestartCPUNoCallouts); } else { PEHaltRestart(type); } @@ -950,7 +1011,11 @@ debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned * panicking. Only do this for Debugger() calls if we're treating * Debugger() calls like panic(). */ - PEHaltRestart(kPEPanicBegin); + uint32_t panic_details = 0; + if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) { + panic_details |= kPanicDetailsForcePowerOff; + } + PEHaltRestartInternal(kPEPanicBegin, panic_details); /* * Set the begin pointer in the panic log structure. We key off of this @@ -1034,17 +1099,25 @@ debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) { paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (error : 0x%x)", kdp_polled_corefile_error()); -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED; paniclog_flush(); -#else /* CONFIG_EMBEDDED */ +#else /* defined(__arm__) || defined(__arm64__) */ if (panic_info->mph_panic_log_offset != 0) { panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED; paniclog_flush(); } -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ } - } else { + } +#if XNU_MONITOR + else if ((pmap_get_cpu_data()->ppl_state == PPL_STATE_PANIC) && (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI))) { + paniclog_append_noflush("skipping local kernel core because the PPL is in PANIC state"); + panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED; + paniclog_flush(); + } +#endif /* XNU_MONITOR */ + else { int ret = -1; #if defined (__x86_64__) @@ -1082,7 +1155,11 @@ debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned } /* If KDP is configured, try to trap to the debugger */ +#if defined(__arm__) || defined(__arm64__) + if (kdp_explicitly_requested && (current_debugger != NO_CUR_DB)) { +#else if (current_debugger != NO_CUR_DB) { +#endif kdp_raise_exception(exception, code, subcode, state); /* * Only return if we entered via Debugger and it's safe to return @@ -1096,12 +1173,12 @@ debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned } } -#if CONFIG_EMBEDDED - if (panicDebugging) { - /* If panic debugging is configured, spin for astris to connect */ +#if defined(__arm__) || defined(__arm64__) + if (PE_i_can_has_debugger(NULL) && panicDebugging) { + /* If panic debugging is configured and we're on a dev fused device, spin for astris to connect */ panic_spin_shmcon(); } -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ if (!panicDebugging) { @@ -1145,6 +1222,9 @@ handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int sub /* Update the global panic/debugger nested entry level */ kernel_debugger_entry_count = CPUDEBUGGERCOUNT; + if (kernel_debugger_entry_count > 0) { + console_suspend(); + } /* * TODO: Should we do anything special for nested panics here? i.e. if we've trapped more than twice @@ -1357,24 +1437,49 @@ unpackA(char *inbuf, uint32_t length) } #endif /* defined (__x86_64__) */ -extern void *proc_name_address(void *p); +extern char *proc_name_address(void *); +extern char *proc_longname_address(void *); -static void +__private_extern__ void panic_display_process_name(void) { - /* because of scoping issues len(p_comm) from proc_t is hard coded here */ - char proc_name[17] = "Unknown"; + proc_name_t proc_name = {}; task_t ctask = 0; void *cbsd_info = 0; + vm_size_t size; - if (ml_nofault_copy((vm_offset_t)¤t_thread()->task, (vm_offset_t) &ctask, sizeof(task_t)) == sizeof(task_t)) { - if (ml_nofault_copy((vm_offset_t)&ctask->bsd_info, (vm_offset_t)&cbsd_info, sizeof(cbsd_info)) == sizeof(cbsd_info)) { - if (cbsd_info && (ml_nofault_copy((vm_offset_t) proc_name_address(cbsd_info), (vm_offset_t) &proc_name, sizeof(proc_name)) > 0)) { - proc_name[sizeof(proc_name) - 1] = '\0'; - } + size = ml_nofault_copy((vm_offset_t)¤t_thread()->task, + (vm_offset_t)&ctask, sizeof(task_t)); + if (size != sizeof(task_t)) { + goto out; + } + + size = ml_nofault_copy((vm_offset_t)&ctask->bsd_info, + (vm_offset_t)&cbsd_info, sizeof(cbsd_info)); + if (size != sizeof(cbsd_info)) { + goto out; + } + + if (cbsd_info == NULL) { + goto out; + } + + size = ml_nofault_copy((vm_offset_t)proc_longname_address(cbsd_info), + (vm_offset_t)&proc_name, sizeof(proc_name)); + + if (size == 0 || proc_name[0] == '\0') { + size = ml_nofault_copy((vm_offset_t)proc_name_address(cbsd_info), + (vm_offset_t)&proc_name, + MIN(sizeof(command_t), sizeof(proc_name))); + if (size > 0) { + proc_name[size - 1] = '\0'; } } - paniclog_append_noflush("\nBSD process name corresponding to current thread: %s\n", proc_name); + +out: + proc_name[sizeof(proc_name) - 1] = '\0'; + paniclog_append_noflush("\nProcess name corresponding to current thread: %s\n", + proc_name[0] != '\0' ? proc_name : "Unknown"); } unsigned @@ -1422,92 +1527,34 @@ panic_display_kernel_uuid(void) void panic_display_kernel_aslr(void) { - if (vm_kernel_slide) { - paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide); - paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext); - } -} + kc_format_t kc_format; -void -panic_display_hibb(void) -{ -#if defined(__i386__) || defined (__x86_64__) - paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base); -#endif -} + PE_get_primary_kc_format(&kc_format); -static void -panic_display_uptime(void) -{ - uint64_t uptime; - absolutetime_to_nanoseconds(mach_absolute_time(), &uptime); - - paniclog_append_noflush("\nSystem uptime in nanoseconds: %llu\n", uptime); -} + if (kc_format == KCFormatFileset) { + void *kch = PE_get_kc_header(KCKindPrimary); -static void -panic_display_disk_errors(void) -{ - if (panic_disk_error_description[0]) { - panic_disk_error_description[sizeof(panic_disk_error_description) - 1] = '\0'; - paniclog_append_noflush("Root disk errors: \"%s\"\n", panic_disk_error_description); + paniclog_append_noflush("KernelCache slide: 0x%016lx\n", (unsigned long) vm_kernel_slide); + paniclog_append_noflush("KernelCache base: %p\n", (void*) kch); + paniclog_append_noflush("Kernel slide: 0x%016lx\n", vm_kernel_stext - (unsigned long)kch + vm_kernel_slide); + } else if (vm_kernel_slide) { + paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide); } -} - -static void -panic_display_shutdown_status(void) -{ -#if defined(__i386__) || defined(__x86_64__) - paniclog_append_noflush("System shutdown begun: %s\n", IOPMRootDomainGetWillShutdown() ? "YES" : "NO"); - if (gIOPolledCoreFileMode == kIOPolledCoreFileModeNotInitialized) { - paniclog_append_noflush("Panic diags file unavailable, panic occurred prior to initialization\n"); - } else if (gIOPolledCoreFileMode != kIOPolledCoreFileModeDisabled) { - /* - * If we haven't marked the corefile as explicitly disabled, and we've made it past initialization, then we know the current - * system was configured to use disk based diagnostics at some point. - */ - paniclog_append_noflush("Panic diags file available: %s (0x%x)\n", (gIOPolledCoreFileMode != kIOPolledCoreFileModeClosed) ? "YES" : "NO", kdp_polled_corefile_error()); + paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext); +#if defined(__arm64__) + if (kc_format == KCFormatFileset) { + extern vm_offset_t segTEXTEXECB; + paniclog_append_noflush("Kernel text exec base: 0x%016lx\n", (unsigned long)segTEXTEXECB); } #endif } -extern const char version[]; -extern char osversion[]; - -static volatile uint32_t config_displayed = 0; - -__private_extern__ void -panic_display_system_configuration(boolean_t launchd_exit) +void +panic_display_hibb(void) { - if (!launchd_exit) { - panic_display_process_name(); - } - if (OSCompareAndSwap(0, 1, &config_displayed)) { - char buf[256]; - if (!launchd_exit && strlcpy(buf, PE_boot_args(), sizeof(buf))) { - paniclog_append_noflush("Boot args: %s\n", buf); - } - paniclog_append_noflush("\nMac OS version:\n%s\n", - (osversion[0] != 0) ? osversion : "Not yet set"); - paniclog_append_noflush("\nKernel version:\n%s\n", version); - panic_display_kernel_uuid(); - if (!launchd_exit) { - panic_display_kernel_aslr(); - panic_display_hibb(); - panic_display_pal_info(); - } - panic_display_model_name(); - panic_display_disk_errors(); - panic_display_shutdown_status(); - if (!launchd_exit) { - panic_display_uptime(); - panic_display_zprint(); -#if CONFIG_ZLEAKS - panic_display_ztrace(); -#endif /* CONFIG_ZLEAKS */ - kext_dump_panic_lists(&paniclog_append_noflush); - } - } +#if defined(__i386__) || defined (__x86_64__) + paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base); +#endif } extern unsigned int stack_total; @@ -1523,33 +1570,39 @@ extern mach_memory_info_t *panic_kext_memory_info; extern vm_size_t panic_kext_memory_size; __private_extern__ void -panic_display_zprint() +panic_display_zprint(void) { if (panic_include_zprint == TRUE) { - unsigned int i; struct zone zone_copy; paniclog_append_noflush("%-20s %10s %10s\n", "Zone Name", "Cur Size", "Free Size"); - for (i = 0; i < num_zones; i++) { - if (ml_nofault_copy((vm_offset_t)(&zone_array[i]), (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) { - if (zone_copy.cur_size > (1024 * 1024)) { - paniclog_append_noflush("%-20s %10lu %10lu\n", zone_copy.zone_name, (uintptr_t)zone_copy.cur_size, (uintptr_t)(zone_copy.countfree * zone_copy.elem_size)); + zone_index_foreach(i) { + if (ml_nofault_copy((vm_offset_t)&zone_array[i], + (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) { + if (zone_copy.page_count > atop(1024 * 1024)) { + paniclog_append_noflush("%-8s%-20s %10llu %10lu\n", + zone_heap_name(&zone_copy), + zone_copy.z_name, ptoa_64(zone_copy.page_count), + (uintptr_t)zone_size_free(&zone_copy)); } } } - paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks", (uintptr_t)(kernel_stack_size * stack_total)); - + paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks", + (uintptr_t)(kernel_stack_size * stack_total)); #if defined (__x86_64__) - paniclog_append_noflush("%-20s %10lu\n", "PageTables", (uintptr_t)(PAGE_SIZE * inuse_ptepages_count)); + paniclog_append_noflush("%-20s %10lu\n", "PageTables", + (uintptr_t)ptoa(inuse_ptepages_count)); #endif + paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large", + (uintptr_t)kalloc_large_total); - paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large", (uintptr_t)kalloc_large_total); if (panic_kext_memory_info) { mach_memory_info_t *mem_info = panic_kext_memory_info; paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size"); - for (i = 0; i < (panic_kext_memory_size / sizeof(mach_zone_info_t)); i++) { - if (((mem_info[i].flags & VM_KERN_SITE_TYPE) == VM_KERN_SITE_KMOD) && (mem_info[i].size > (1024 * 1024))) { + for (uint32_t i = 0; i < (panic_kext_memory_size / sizeof(mach_zone_info_t)); i++) { + if (((mem_info[i].flags & VM_KERN_SITE_TYPE) == VM_KERN_SITE_KMOD) && + (mem_info[i].size > (1024 * 1024))) { paniclog_append_noflush("%-5lld %10lld\n", mem_info[i].site, mem_info[i].size); } } @@ -1559,7 +1612,7 @@ panic_display_zprint() #if CONFIG_ECC_LOGGING __private_extern__ void -panic_display_ecc_errors() +panic_display_ecc_errors(void) { uint32_t count = ecc_log_get_correction_count(); @@ -1643,19 +1696,37 @@ kern_feature_override(uint32_t fmask) boolean_t on_device_corefile_enabled(void) { - assert(debug_boot_arg_inited); + assert(startup_phase >= STARTUP_SUB_TUNABLES); #if CONFIG_KDP_INTERACTIVE_DEBUGGING - if ((debug_boot_arg != 0) && !(debug_boot_arg & DB_DISABLE_LOCAL_CORE)) { + if (debug_boot_arg == 0) { + return FALSE; + } + if (debug_boot_arg & DB_DISABLE_LOCAL_CORE) { + return FALSE; + } +#if !XNU_TARGET_OS_OSX + /* + * outside of macOS, if there's a debug boot-arg set and local + * cores aren't explicitly disabled, we always write a corefile. + */ + return TRUE; +#else /* !XNU_TARGET_OS_OSX */ + /* + * on macOS, if corefiles on panic are requested and local cores + * aren't disabled we write a local core. + */ + if (debug_boot_arg & (DB_KERN_DUMP_ON_NMI | DB_KERN_DUMP_ON_PANIC)) { return TRUE; } -#endif +#endif /* !XNU_TARGET_OS_OSX */ +#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ return FALSE; } boolean_t panic_stackshot_to_disk_enabled(void) { - assert(debug_boot_arg_inited); + assert(startup_phase >= STARTUP_SUB_TUNABLES); #if defined(__x86_64__) if (PEGetCoprocessorVersion() < kCoprocessorVersion2) { /* Only enabled on pre-Gibraltar machines where it hasn't been disabled explicitly */ @@ -1668,3 +1739,28 @@ panic_stackshot_to_disk_enabled(void) #endif return FALSE; } + +#if DEBUG || DEVELOPMENT +const char * +sysctl_debug_get_preoslog(size_t *size) +{ + int result = 0; + void *preoslog_pa = NULL; + int preoslog_size = 0; + + result = IODTGetLoaderInfo("preoslog", &preoslog_pa, &preoslog_size); + if (result || preoslog_pa == NULL || preoslog_size == 0) { + kprintf("Couldn't obtain preoslog region: result = %d, preoslog_pa = %p, preoslog_size = %d\n", result, preoslog_pa, preoslog_size); + *size = 0; + return NULL; + } + + /* + * Beware: + * On release builds, we would need to call IODTFreeLoaderInfo("preoslog", preoslog_pa, preoslog_size) to free the preoslog buffer. + * On Development & Debug builds, we retain the buffer so it can be extracted from coredumps. + */ + *size = preoslog_size; + return (char *)(ml_static_ptovirt((vm_offset_t)(preoslog_pa))); +} +#endif /* DEBUG || DEVELOPMENT */ diff --git a/osfmk/kern/debug.h b/osfmk/kern/debug.h index 981eb3619..fe3d5eed2 100644 --- a/osfmk/kern/debug.h +++ b/osfmk/kern/debug.h @@ -223,17 +223,20 @@ enum generic_snapshot_flags { #define VM_PRESSURE_TIME_WINDOW 5 /* seconds */ -enum { +__options_decl(stackshot_flags_t, uint64_t, { STACKSHOT_GET_DQ = 0x01, STACKSHOT_SAVE_LOADINFO = 0x02, STACKSHOT_GET_GLOBAL_MEM_STATS = 0x04, STACKSHOT_SAVE_KEXT_LOADINFO = 0x08, - STACKSHOT_GET_MICROSTACKSHOT = 0x10, - STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE = 0x20, - STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE = 0x40, - STACKSHOT_SET_MICROSTACKSHOT_MARK = 0x80, + /* + * 0x10, 0x20, 0x40 and 0x80 are reserved. + * + * See microstackshot_flags_t whose members used to be part of this + * declaration. + */ STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY = 0x100, STACKSHOT_GET_BOOT_PROFILE = 0x200, + STACKSHOT_DO_COMPRESS = 0x400, STACKSHOT_SAVE_IMP_DONATION_PIDS = 0x2000, STACKSHOT_SAVE_IN_KERNEL_BUFFER = 0x4000, STACKSHOT_RETRIEVE_EXISTING_BUFFER = 0x8000, @@ -257,13 +260,23 @@ enum { STACKSHOT_INSTRS_CYCLES = 0x8000000, STACKSHOT_ASID = 0x10000000, STACKSHOT_PAGE_TABLES = 0x20000000, -}; + STACKSHOT_DISABLE_LATENCY_INFO = 0x40000000, +}); + +__options_decl(microstackshot_flags_t, uint32_t, { + STACKSHOT_GET_MICROSTACKSHOT = 0x10, + STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE = 0x20, + STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE = 0x40, + STACKSHOT_SET_MICROSTACKSHOT_MARK = 0x80, +}); #define STACKSHOT_THREAD_SNAPSHOT_MAGIC 0xfeedface #define STACKSHOT_TASK_SNAPSHOT_MAGIC 0xdecafbad #define STACKSHOT_MEM_AND_IO_SNAPSHOT_MAGIC 0xbfcabcde #define STACKSHOT_MICRO_SNAPSHOT_MAGIC 0x31c54011 +#define STACKSHOT_PAGETABLES_MASK_ALL ~0 + #define KF_INITIALIZED (0x1) #define KF_SERIAL_OVRD (0x2) #define KF_PMAPV_OVRD (0x4) @@ -320,6 +333,8 @@ struct embedded_panic_header { #define EMBEDDED_PANIC_HEADER_FLAG_BUTTON_RESET_PANIC 0x80 #define EMBEDDED_PANIC_HEADER_FLAG_COPROC_INITIATED_PANIC 0x100 #define EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED 0x200 +#define EMBEDDED_PANIC_HEADER_FLAG_COMPRESS_FAILED 0x400 +#define EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_DATA_COMPRESSED 0x800 #define EMBEDDED_PANIC_HEADER_CURRENT_VERSION 2 #define EMBEDDED_PANIC_MAGIC 0x46554E4B /* FUNK */ @@ -353,6 +368,7 @@ struct macos_panic_header { #define MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE 0x100 #define MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED 0x200 #define MACOS_PANIC_HEADER_FLAG_STACKSHOT_KERNEL_ONLY 0x400 +#define MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_COMPRESS 0x800 /* * Any change to the below structure should mirror the structure defined in MacEFIFirmware @@ -469,7 +485,7 @@ enum { * dump. */ #define DB_NMI_BTN_ENA 0x8000 /* Enable button to directly trigger NMI */ -#define DB_PRT_KDEBUG 0x10000 /* kprintf KDEBUG traces */ +/* 0x10000 was DB_PRT_KDEBUG (kprintf kdebug events), feature removed */ #define DB_DISABLE_LOCAL_CORE 0x20000 /* ignore local kernel core dump support */ #define DB_DISABLE_GZIP_CORE 0x40000 /* don't gzip kernel core dumps */ #define DB_DISABLE_CROSS_PANIC 0x80000 /* x86 only - don't trigger cross panics. Only @@ -507,7 +523,7 @@ __BEGIN_DECLS #define LINE_NUMBER(x) __STRINGIFY(x) #define PANIC_LOCATION __FILE__ ":" LINE_NUMBER(__LINE__) -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) #define panic(ex, ...) ({ \ __asm__("" ::: "memory"); \ (panic)(# ex, ## __VA_ARGS__); \ @@ -525,7 +541,7 @@ void panic_with_options(unsigned int reason, void *ctx, void Debugger(const char * message); void populate_model_name(char *); -#if !defined (__x86_64__) +#if defined(__arm__) || defined(__arm64__) /* Note that producer_name and buf should never be de-allocated as we reference these during panic */ void register_additional_panic_data_buffer(const char *producer_name, void *buf, int len); #endif @@ -572,6 +588,7 @@ void panic_stackshot_reset_state(void); * @param size the size of the buffer * @param flags flags to be passed to the stackshot * @param delta_since_timestamp start time for delta period + * @param pagetable_mask if pagetable dumping is set in flags, the mask of page table levels to dump * @bytes_traced a pointer to be filled with the length of the stackshot * */ @@ -579,8 +596,8 @@ void panic_stackshot_reset_state(void); extern "C" { #endif kern_return_t -stack_snapshot_from_kernel(int pid, void *buf, uint32_t size, uint32_t flags, - uint64_t delta_since_timestamp, unsigned *bytes_traced); +stack_snapshot_from_kernel(int pid, void *buf, uint32_t size, uint64_t flags, + uint64_t delta_since_timestamp, uint32_t pagetable_mask, unsigned *bytes_traced); /* * Returns whether on device corefiles are enabled based on the build @@ -598,11 +615,11 @@ boolean_t panic_stackshot_to_disk_enabled(void); } #endif -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) extern char debug_buf[]; extern boolean_t coprocessor_paniclog_flush; extern boolean_t extended_debug_log_enabled; -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(__x86_64__) */ extern char *debug_buf_base; @@ -617,14 +634,19 @@ extern size_t panic_disk_error_description_size; extern unsigned char *kernel_uuid; extern unsigned int debug_boot_arg; -#if DEVELOPMENT || DEBUG -extern boolean_t debug_boot_arg_inited; -#endif extern boolean_t kernelcache_uuid_valid; extern uuid_t kernelcache_uuid; extern uuid_string_t kernelcache_uuid_string; +extern boolean_t pageablekc_uuid_valid; +extern uuid_t pageablekc_uuid; +extern uuid_string_t pageablekc_uuid_string; + +extern boolean_t auxkc_uuid_valid; +extern uuid_t auxkc_uuid; +extern uuid_string_t auxkc_uuid_string; + #ifdef __cplusplus extern "C" { #endif @@ -644,7 +666,6 @@ extern unsigned int active_debugger; extern unsigned int kernel_debugger_entry_count; extern unsigned int panicDebugging; -extern unsigned int kdebug_serial; extern const char *debugger_panic_str; @@ -653,8 +674,7 @@ extern unsigned int debug_buf_size; extern void debug_log_init(void); extern void debug_putc(char); - -extern void panic_init(void); +extern boolean_t debug_is_current_cpu_in_panic_state(void); /* * Initialize the physical carveout requested with the `phys_carveout_mb` @@ -666,7 +686,7 @@ extern void phys_carveout_init(void); extern uintptr_t phys_carveout_pa; extern size_t phys_carveout_size; - +extern boolean_t kernel_debugging_allowed(void); #if defined (__x86_64__) extern void extended_debug_log_init(void); @@ -684,12 +704,12 @@ extern size_t panic_stackshot_len; void SavePanicInfo(const char *message, void *panic_data, uint64_t panic_options); void paniclog_flush(void); -void panic_display_system_configuration(boolean_t launchd_exit); void panic_display_zprint(void); void panic_display_kernel_aslr(void); void panic_display_hibb(void); void panic_display_model_name(void); void panic_display_kernel_uuid(void); +void panic_display_process_name(void); #if CONFIG_ZLEAKS void panic_display_ztrace(void); #endif /* CONFIG_ZLEAKS */ @@ -751,8 +771,7 @@ zone_leaks_scan(uintptr_t * instances, uint32_t count, uint32_t zoneSize, uint32 } #endif -extern boolean_t -kdp_is_in_zone(void *addr, const char *zone_name); +const char *sysctl_debug_get_preoslog(size_t *size); #endif /* DEBUG || DEVELOPMENT */ #endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/kern/ecc.h b/osfmk/kern/ecc.h index 4e13838d0..18c4b6f1f 100644 --- a/osfmk/kern/ecc.h +++ b/osfmk/kern/ecc.h @@ -46,7 +46,6 @@ extern kern_return_t ecc_log_record_event(const struct ecc_event *ev); #endif #ifdef XNU_KERNEL_PRIVATE -extern void ecc_log_init(void); extern kern_return_t ecc_log_get_next_event(struct ecc_event *ev); extern uint32_t ecc_log_get_correction_count(void); #endif diff --git a/osfmk/kern/ecc_logging.c b/osfmk/kern/ecc_logging.c index eb227cb6e..35d258372 100644 --- a/osfmk/kern/ecc_logging.c +++ b/osfmk/kern/ecc_logging.c @@ -44,18 +44,10 @@ struct ecc_event ecc_data[ECC_EVENT_BUFFER_COUNT]; static uint32_t ecc_data_next_read; static uint32_t ecc_data_next_write; static boolean_t ecc_data_empty = TRUE; // next read == next write : empty or full? -static lck_grp_t *ecc_data_lock_group; -static lck_spin_t ecc_data_lock; +static LCK_GRP_DECLARE(ecc_data_lock_group, "ecc-data"); +static LCK_SPIN_DECLARE(ecc_data_lock, &ecc_data_lock_group); static uint32_t ecc_correction_count; -void -ecc_log_init() -{ - ecc_data_lock_group = lck_grp_alloc_init("ecc-data", NULL); - lck_spin_init(&ecc_data_lock, ecc_data_lock_group, NULL); - OSMemoryBarrier(); -} - uint32_t ecc_log_get_correction_count() { diff --git a/osfmk/kern/exception.c b/osfmk/kern/exception.c index 4cbd102c1..91c688db5 100644 --- a/osfmk/kern/exception.c +++ b/osfmk/kern/exception.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -87,6 +87,8 @@ #include #include +#include + #include #include @@ -457,6 +459,7 @@ exception_triage_thread( lck_mtx_t *mutex; kern_return_t kr = KERN_FAILURE; + assert(exception != EXC_RPC_ALERT); /* @@ -528,6 +531,7 @@ out: * Returns: * KERN_SUCCESS if exception is handled by any of the handlers. */ +int debug4k_panic_on_exception = 0; kern_return_t exception_triage( exception_type_t exception, @@ -535,6 +539,12 @@ exception_triage( mach_msg_type_number_t codeCnt) { thread_t thread = current_thread(); + if (VM_MAP_PAGE_SIZE(thread->task->map) < PAGE_SIZE) { + DEBUG4K_EXC("thread %p task %p map %p exception %d codes 0x%llx 0x%llx \n", thread, thread->task, thread->task->map, exception, code[0], code[1]); + if (debug4k_panic_on_exception) { + panic("DEBUG4K %s:%d thread %p task %p map %p exception %d codes 0x%llx 0x%llx \n", __FUNCTION__, __LINE__, thread, thread->task, thread->task->map, exception, code[0], code[1]); + } + } #if __has_feature(ptrauth_calls) /* * If it is a ptrauth violation, then check if the task has the TF_PAC_EXC_FATAL diff --git a/osfmk/kern/exception.h b/osfmk/kern/exception.h index 163994656..ef530a63d 100644 --- a/osfmk/kern/exception.h +++ b/osfmk/kern/exception.h @@ -43,11 +43,11 @@ * There are arrays of these maintained at the activation, task, and host. */ struct exception_action { - struct ipc_port *port; /* exception port */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("exception_action.port") port; /* exception port */ thread_state_flavor_t flavor; /* state flavor to send */ exception_behavior_t behavior; /* exception type to raise */ boolean_t privileged; /* survives ipc_task_reset */ - struct label *label; /* MAC label associated with action */ + struct label * XNU_PTRAUTH_SIGNED_PTR("exception_action.label") label; /* MAC label associated with action */ }; /* Initialize global state needed for exceptions. */ diff --git a/osfmk/kern/gzalloc.c b/osfmk/kern/gzalloc.c index 465fed0fe..ff7dec6bc 100644 --- a/osfmk/kern/gzalloc.c +++ b/osfmk/kern/gzalloc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -66,8 +66,6 @@ * gzalloc_zscale= specify size multiplier for the dedicated gzalloc submap */ -#include - #include #include #include @@ -80,8 +78,7 @@ #include #include #include -#include -#include +#include #include #include @@ -96,7 +93,6 @@ #include #include -extern boolean_t vm_kernel_ready, kmem_ready; boolean_t gzalloc_mode = FALSE; uint32_t pdzalloc_count, pdzfree_count; @@ -136,60 +132,55 @@ extern zone_t vm_page_zone; static zone_t gztrackzone = NULL; static char gznamedzone[MAX_ZONE_NAME] = ""; -void -gzalloc_reconfigure(__unused zone_t z) -{ - /* Nothing for now */ -} - boolean_t gzalloc_enabled(void) { return gzalloc_mode; } -static inline boolean_t -gzalloc_tracked(zone_t z) -{ - return gzalloc_mode && - (((z->elem_size >= gzalloc_min) && (z->elem_size <= gzalloc_max)) || (z == gztrackzone)) && - (z->gzalloc_exempt == 0); -} - void gzalloc_zone_init(zone_t z) { - if (gzalloc_mode) { - bzero(&z->gz, sizeof(z->gz)); + if (gzalloc_mode == 0) { + return; + } - if (track_this_zone(z->zone_name, gznamedzone)) { - gztrackzone = z; - } + bzero(&z->gz, sizeof(z->gz)); + + if (track_this_zone(z->z_name, gznamedzone)) { + gztrackzone = z; + } - if (gzfc_size && - gzalloc_tracked(z)) { - vm_size_t gzfcsz = round_page(sizeof(*z->gz.gzfc) * gzfc_size); + if (!z->gzalloc_exempt) { + z->gzalloc_tracked = (z == gztrackzone) || + ((zone_elem_size(z) >= gzalloc_min) && (zone_elem_size(z) <= gzalloc_max)); + } - /* If the VM/kmem system aren't yet configured, carve - * out the free element cache structure directly from the - * gzalloc_reserve supplied by the pmap layer. - */ - if (!kmem_ready) { - if (gzalloc_reserve_size < gzfcsz) { - panic("gzalloc reserve exhausted"); - } + if (gzfc_size && z->gzalloc_tracked) { + vm_size_t gzfcsz = round_page(sizeof(*z->gz.gzfc) * gzfc_size); + kern_return_t kr; - z->gz.gzfc = (vm_offset_t *)gzalloc_reserve; - gzalloc_reserve += gzfcsz; - gzalloc_reserve_size -= gzfcsz; - } else { - kern_return_t kr; + /* If the VM/kmem system aren't yet configured, carve + * out the free element cache structure directly from the + * gzalloc_reserve supplied by the pmap layer. + */ + if (__improbable(startup_phase < STARTUP_SUB_KMEM)) { + if (gzalloc_reserve_size < gzfcsz) { + panic("gzalloc reserve exhausted"); + } - if ((kr = kernel_memory_allocate(kernel_map, (vm_offset_t *)&z->gz.gzfc, gzfcsz, 0, KMA_KOBJECT, VM_KERN_MEMORY_OSFMK)) != KERN_SUCCESS) { - panic("zinit/gzalloc: kernel_memory_allocate failed (%d) for 0x%lx bytes", kr, (unsigned long) gzfcsz); - } + z->gz.gzfc = (vm_offset_t *)gzalloc_reserve; + gzalloc_reserve += gzfcsz; + gzalloc_reserve_size -= gzfcsz; + bzero(z->gz.gzfc, gzfcsz); + } else { + kr = kernel_memory_allocate(kernel_map, + (vm_offset_t *)&z->gz.gzfc, gzfcsz, 0, + KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_OSFMK); + if (kr != KERN_SUCCESS) { + panic("%s: kernel_memory_allocate failed (%d) for 0x%lx bytes", + __func__, kr, (unsigned long)gzfcsz); } - bzero((void *)z->gz.gzfc, gzfcsz); } } } @@ -198,63 +189,63 @@ gzalloc_zone_init(zone_t z) void gzalloc_empty_free_cache(zone_t zone) { - if (__improbable(gzalloc_tracked(zone))) { - kern_return_t kr; - int freed_elements = 0; - vm_offset_t free_addr = 0; - vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE); - vm_offset_t gzfcsz = round_page(sizeof(*zone->gz.gzfc) * gzfc_size); - vm_offset_t gzfc_copy; - - kr = kmem_alloc(kernel_map, &gzfc_copy, gzfcsz, VM_KERN_MEMORY_OSFMK); - if (kr != KERN_SUCCESS) { - panic("gzalloc_empty_free_cache: kmem_alloc: 0x%x", kr); - } + kern_return_t kr; + int freed_elements = 0; + vm_offset_t free_addr = 0; + vm_offset_t rounded_size = round_page(zone_elem_size(zone) + GZHEADER_SIZE); + vm_offset_t gzfcsz = round_page(sizeof(*zone->gz.gzfc) * gzfc_size); + vm_offset_t gzfc_copy; - /* Reset gzalloc_data. */ - lock_zone(zone); - memcpy((void *)gzfc_copy, (void *)zone->gz.gzfc, gzfcsz); - bzero((void *)zone->gz.gzfc, gzfcsz); - zone->gz.gzfc_index = 0; - unlock_zone(zone); + assert(zone->gzalloc_tracked); // the caller is responsible for checking - /* Free up all the cached elements. */ - for (uint32_t index = 0; index < gzfc_size; index++) { - free_addr = ((vm_offset_t *)gzfc_copy)[index]; - if (free_addr && free_addr >= gzalloc_map_min && free_addr < gzalloc_map_max) { - kr = vm_map_remove( - gzalloc_map, - free_addr, - free_addr + rounded_size + (1 * PAGE_SIZE), - VM_MAP_REMOVE_KUNWIRE); - if (kr != KERN_SUCCESS) { - panic("gzalloc_empty_free_cache: vm_map_remove: %p, 0x%x", (void *)free_addr, kr); - } - OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed); - OSAddAtomic64(-((SInt32) (rounded_size - zone->elem_size)), &gzalloc_wasted); + kr = kmem_alloc(kernel_map, &gzfc_copy, gzfcsz, VM_KERN_MEMORY_OSFMK); + if (kr != KERN_SUCCESS) { + panic("gzalloc_empty_free_cache: kmem_alloc: 0x%x", kr); + } - freed_elements++; + /* Reset gzalloc_data. */ + lock_zone(zone); + memcpy((void *)gzfc_copy, (void *)zone->gz.gzfc, gzfcsz); + bzero((void *)zone->gz.gzfc, gzfcsz); + zone->gz.gzfc_index = 0; + unlock_zone(zone); + + /* Free up all the cached elements. */ + for (uint32_t index = 0; index < gzfc_size; index++) { + free_addr = ((vm_offset_t *)gzfc_copy)[index]; + if (free_addr && free_addr >= gzalloc_map_min && free_addr < gzalloc_map_max) { + kr = vm_map_remove(gzalloc_map, free_addr, + free_addr + rounded_size + (1 * PAGE_SIZE), + VM_MAP_REMOVE_KUNWIRE); + if (kr != KERN_SUCCESS) { + panic("gzalloc_empty_free_cache: vm_map_remove: %p, 0x%x", (void *)free_addr, kr); } - } - /* - * TODO: Consider freeing up zone->gz.gzfc as well if it didn't come from the gzalloc_reserve pool. - * For now we're reusing this buffer across zdestroy's. We would have to allocate it again on a - * subsequent zinit() as well. - */ - - /* Decrement zone counters. */ - lock_zone(zone); - zone->count -= freed_elements; - zone->cur_size -= (freed_elements * rounded_size); - unlock_zone(zone); + OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed); + OSAddAtomic64(-((SInt32) (rounded_size - zone_elem_size(zone))), &gzalloc_wasted); - kmem_free(kernel_map, gzfc_copy, gzfcsz); + freed_elements++; + } } + /* + * TODO: Consider freeing up zone->gz.gzfc as well if it didn't come from the gzalloc_reserve pool. + * For now we're reusing this buffer across zdestroy's. We would have to allocate it again on a + * subsequent zinit() as well. + */ + + /* Decrement zone counters. */ + lock_zone(zone); + zone->countfree += freed_elements; + zone->page_count -= freed_elements; + unlock_zone(zone); + + kmem_free(kernel_map, gzfc_copy, gzfcsz); } -void +__startup_func +static void gzalloc_configure(void) { +#if !KASAN_ZALLOC char temp_buf[16]; if (PE_parse_boot_argn("-gzalloc_mode", temp_buf, sizeof(temp_buf))) { @@ -321,7 +312,9 @@ gzalloc_configure(void) gzalloc_reserve_size = GZALLOC_RESERVE_SIZE_DEFAULT; gzalloc_reserve = (vm_offset_t) pmap_steal_memory(gzalloc_reserve_size); } +#endif } +STARTUP(PMAP_STEAL, STARTUP_RANK_FIRST, gzalloc_configure); void gzalloc_init(vm_size_t max_zonemap_size) @@ -338,68 +331,74 @@ gzalloc_init(vm_size_t max_zonemap_size) &gzalloc_map); if (retval != KERN_SUCCESS) { - panic("zone_init: kmem_suballoc(gzalloc_map, 0x%lx, %u) failed", max_zonemap_size, gzalloc_zonemap_scale); + panic("zone_init: kmem_suballoc(gzalloc_map, 0x%lx, %u) failed", + max_zonemap_size, gzalloc_zonemap_scale); } gzalloc_map_max = gzalloc_map_min + (max_zonemap_size * gzalloc_zonemap_scale); } } vm_offset_t -gzalloc_alloc(zone_t zone, boolean_t canblock) +gzalloc_alloc(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags) { vm_offset_t addr = 0; - if (__improbable(gzalloc_tracked(zone))) { - if (get_preemption_level() != 0) { - if (canblock == TRUE) { - pdzalloc_count++; - } else { - return 0; - } + assert(zone->gzalloc_tracked); // the caller is responsible for checking + + if (get_preemption_level() != 0) { + if (flags & Z_NOWAIT) { + return 0; } + pdzalloc_count++; + } - vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE); - vm_offset_t residue = rounded_size - zone->elem_size; - vm_offset_t gzaddr = 0; - gzhdr_t *gzh, *gzhcopy = NULL; + bool kmem_ready = (startup_phase >= STARTUP_SUB_KMEM); + vm_offset_t rounded_size = round_page(zone_elem_size(zone) + GZHEADER_SIZE); + vm_offset_t residue = rounded_size - zone_elem_size(zone); + vm_offset_t gzaddr = 0; + gzhdr_t *gzh, *gzhcopy = NULL; - if (!kmem_ready || (vm_page_zone == ZONE_NULL)) { - /* Early allocations are supplied directly from the - * reserve. - */ - if (gzalloc_reserve_size < (rounded_size + PAGE_SIZE)) { - panic("gzalloc reserve exhausted"); - } - gzaddr = gzalloc_reserve; - /* No guard page for these early allocations, just - * waste an additional page. - */ - gzalloc_reserve += rounded_size + PAGE_SIZE; - gzalloc_reserve_size -= rounded_size + PAGE_SIZE; - OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_alloc); - } else { - kern_return_t kr = kernel_memory_allocate(gzalloc_map, - &gzaddr, rounded_size + (1 * PAGE_SIZE), - 0, KMA_KOBJECT | KMA_ATOMIC | gzalloc_guard, - VM_KERN_MEMORY_OSFMK); - if (kr != KERN_SUCCESS) { - panic("gzalloc: kernel_memory_allocate for size 0x%llx failed with %d", (uint64_t)rounded_size, kr); - } + if (!kmem_ready || (vm_page_zone == ZONE_NULL)) { + /* Early allocations are supplied directly from the + * reserve. + */ + if (gzalloc_reserve_size < (rounded_size + PAGE_SIZE)) { + panic("gzalloc reserve exhausted"); } - - if (gzalloc_uf_mode) { - gzaddr += PAGE_SIZE; - /* The "header" becomes a "footer" in underflow - * mode. - */ - gzh = (gzhdr_t *) (gzaddr + zone->elem_size); - addr = gzaddr; - gzhcopy = (gzhdr_t *) (gzaddr + rounded_size - sizeof(gzhdr_t)); - } else { - gzh = (gzhdr_t *) (gzaddr + residue - GZHEADER_SIZE); - addr = (gzaddr + residue); + gzaddr = gzalloc_reserve; + /* No guard page for these early allocations, just + * waste an additional page. + */ + gzalloc_reserve += rounded_size + PAGE_SIZE; + gzalloc_reserve_size -= rounded_size + PAGE_SIZE; + OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_alloc); + } else { + kern_return_t kr = kernel_memory_allocate(gzalloc_map, + &gzaddr, rounded_size + (1 * PAGE_SIZE), + 0, KMA_KOBJECT | KMA_ATOMIC | gzalloc_guard, + VM_KERN_MEMORY_OSFMK); + if (kr != KERN_SUCCESS) { + panic("gzalloc: kernel_memory_allocate for size 0x%llx failed with %d", + (uint64_t)rounded_size, kr); } + } + if (gzalloc_uf_mode) { + gzaddr += PAGE_SIZE; + /* The "header" becomes a "footer" in underflow + * mode. + */ + gzh = (gzhdr_t *) (gzaddr + zone_elem_size(zone)); + addr = gzaddr; + gzhcopy = (gzhdr_t *) (gzaddr + rounded_size - sizeof(gzhdr_t)); + } else { + gzh = (gzhdr_t *) (gzaddr + residue - GZHEADER_SIZE); + addr = (gzaddr + residue); + } + + if (zone->zfree_clear_mem) { + bzero((void *)gzaddr, rounded_size); + } else { /* Fill with a pattern on allocation to trap uninitialized * data use. Since the element size may be "rounded up" * by higher layers such as the kalloc layer, this may @@ -411,173 +410,182 @@ gzalloc_alloc(zone_t zone, boolean_t canblock) * prefixed to the allocation. */ memset((void *)gzaddr, gzalloc_fill_pattern, rounded_size); + } - gzh->gzone = (kmem_ready && vm_page_zone) ? zone : GZDEADZONE; - gzh->gzsize = (uint32_t) zone->elem_size; - gzh->gzsig = GZALLOC_SIGNATURE; + gzh->gzone = (kmem_ready && vm_page_zone) ? zone : GZDEADZONE; + gzh->gzsize = (uint32_t)zone_elem_size(zone); + gzh->gzsig = GZALLOC_SIGNATURE; - /* In underflow detection mode, stash away a copy of the - * metadata at the edge of the allocated range, for - * retrieval by gzalloc_element_size() - */ - if (gzhcopy) { - *gzhcopy = *gzh; - } + /* In underflow detection mode, stash away a copy of the + * metadata at the edge of the allocated range, for + * retrieval by gzalloc_element_size() + */ + if (gzhcopy) { + *gzhcopy = *gzh; + } - lock_zone(zone); - assert(zone->zone_valid); - zone->count++; - zone->sum_count++; - zone->cur_size += rounded_size; - unlock_zone(zone); + lock_zone(zone); + assert(zone->z_self == zone); + zone->countfree--; + zone->page_count += 1; + zpercpu_get(zstats)->zs_mem_allocated += rounded_size; +#if ZALLOC_DETAILED_STATS + zpercpu_get(zstats)->zs_mem_wasted += rounded_size - zone_elem_size(zone); +#endif /* ZALLOC_DETAILED_STATS */ + unlock_zone(zone); + + OSAddAtomic64((SInt32) rounded_size, &gzalloc_allocated); + OSAddAtomic64((SInt32) (rounded_size - zone_elem_size(zone)), &gzalloc_wasted); - OSAddAtomic64((SInt32) rounded_size, &gzalloc_allocated); - OSAddAtomic64((SInt32) (rounded_size - zone->elem_size), &gzalloc_wasted); - } return addr; } -boolean_t -gzalloc_free(zone_t zone, void *addr) +void +gzalloc_free(zone_t zone, zone_stats_t zstats, void *addr) { - boolean_t gzfreed = FALSE; kern_return_t kr; - if (__improbable(gzalloc_tracked(zone))) { - gzhdr_t *gzh; - vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE); - vm_offset_t residue = rounded_size - zone->elem_size; - vm_offset_t saddr; - vm_offset_t free_addr = 0; + assert(zone->gzalloc_tracked); // the caller is responsible for checking - if (gzalloc_uf_mode) { - gzh = (gzhdr_t *)((vm_offset_t)addr + zone->elem_size); - saddr = (vm_offset_t) addr - PAGE_SIZE; - } else { - gzh = (gzhdr_t *)((vm_offset_t)addr - GZHEADER_SIZE); - saddr = ((vm_offset_t)addr) - residue; - } + gzhdr_t *gzh; + vm_offset_t rounded_size = round_page(zone_elem_size(zone) + GZHEADER_SIZE); + vm_offset_t residue = rounded_size - zone_elem_size(zone); + vm_offset_t saddr; + vm_offset_t free_addr = 0; - if ((saddr & PAGE_MASK) != 0) { - panic("gzalloc_free: invalid address supplied: %p (adjusted: 0x%lx) for zone with element sized 0x%lx\n", addr, saddr, zone->elem_size); - } + if (gzalloc_uf_mode) { + gzh = (gzhdr_t *)((vm_offset_t)addr + zone_elem_size(zone)); + saddr = (vm_offset_t) addr - PAGE_SIZE; + } else { + gzh = (gzhdr_t *)((vm_offset_t)addr - GZHEADER_SIZE); + saddr = ((vm_offset_t)addr) - residue; + } - if (gzfc_size) { - if (gzalloc_dfree_check) { - uint32_t gd; + if ((saddr & PAGE_MASK) != 0) { + panic("%s: invalid address supplied: " + "%p (adjusted: 0x%lx) for zone with element sized 0x%lx\n", + __func__, addr, saddr, zone_elem_size(zone)); + } - lock_zone(zone); - assert(zone->zone_valid); - for (gd = 0; gd < gzfc_size; gd++) { - if (zone->gz.gzfc[gd] == saddr) { - panic("gzalloc: double free detected, freed address: 0x%lx, current free cache index: %d, freed index: %d", saddr, zone->gz.gzfc_index, gd); - } - } - unlock_zone(zone); + if (gzfc_size && gzalloc_dfree_check) { + lock_zone(zone); + assert(zone->z_self == zone); + for (uint32_t gd = 0; gd < gzfc_size; gd++) { + if (zone->gz.gzfc[gd] != saddr) { + continue; } + panic("%s: double free detected, freed address: 0x%lx, " + "current free cache index: %d, freed index: %d", + __func__, saddr, zone->gz.gzfc_index, gd); } + unlock_zone(zone); + } - if (gzalloc_consistency_checks) { - if (gzh->gzsig != GZALLOC_SIGNATURE) { - panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x", addr, GZALLOC_SIGNATURE, gzh->gzsig); - } - - if (gzh->gzone != zone && (gzh->gzone != GZDEADZONE)) { - panic("%s: Mismatched zone or under/overflow, current zone: %p, recorded zone: %p, address: %p", __FUNCTION__, zone, gzh->gzone, (void *)addr); - } - /* Partially redundant given the zone check, but may flag header corruption */ - if (gzh->gzsize != zone->elem_size) { - panic("Mismatched zfree or under/overflow for zone %p, recorded size: 0x%x, element size: 0x%x, address: %p\n", zone, gzh->gzsize, (uint32_t) zone->elem_size, (void *)addr); - } - - char *gzc, *checkstart, *checkend; - if (gzalloc_uf_mode) { - checkstart = (char *) ((uintptr_t) gzh + sizeof(gzh)); - checkend = (char *) ((((vm_offset_t)addr) & ~PAGE_MASK) + PAGE_SIZE); - } else { - checkstart = (char *) trunc_page_64(addr); - checkend = (char *)gzh; - } - - for (gzc = checkstart; gzc < checkend; gzc++) { - if (*gzc != gzalloc_fill_pattern) { - panic("GZALLOC: detected over/underflow, byte at %p, element %p, contents 0x%x from 0x%lx byte sized zone (%s) doesn't match fill pattern (%c)", gzc, addr, *gzc, zone->elem_size, zone->zone_name, gzalloc_fill_pattern); - } - } + if (gzalloc_consistency_checks) { + if (gzh->gzsig != GZALLOC_SIGNATURE) { + panic("GZALLOC signature mismatch for element %p, " + "expected 0x%x, found 0x%x", + addr, GZALLOC_SIGNATURE, gzh->gzsig); } - if (!kmem_ready || gzh->gzone == GZDEADZONE) { - /* For now, just leak frees of early allocations - * performed before kmem is fully configured. - * They don't seem to get freed currently; - * consider ml_static_mfree in the future. - */ - OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_free); - return TRUE; + if (gzh->gzone != zone && (gzh->gzone != GZDEADZONE)) { + panic("%s: Mismatched zone or under/overflow, " + "current zone: %p, recorded zone: %p, address: %p", + __func__, zone, gzh->gzone, (void *)addr); + } + /* Partially redundant given the zone check, but may flag header corruption */ + if (gzh->gzsize != zone_elem_size(zone)) { + panic("Mismatched zfree or under/overflow for zone %p, " + "recorded size: 0x%x, element size: 0x%x, address: %p", + zone, gzh->gzsize, (uint32_t)zone_elem_size(zone), (void *)addr); } - if (get_preemption_level() != 0) { - pdzfree_count++; + char *gzc, *checkstart, *checkend; + if (gzalloc_uf_mode) { + checkstart = (char *) ((uintptr_t) gzh + sizeof(gzh)); + checkend = (char *) ((((vm_offset_t)addr) & ~PAGE_MASK) + PAGE_SIZE); + } else { + checkstart = (char *) trunc_page_64(addr); + checkend = (char *)gzh; } - if (gzfc_size) { - /* Either write protect or unmap the newly freed - * allocation - */ - kr = vm_map_protect( - gzalloc_map, - saddr, - saddr + rounded_size + (1 * PAGE_SIZE), - gzalloc_prot, - FALSE); - if (kr != KERN_SUCCESS) { - panic("%s: vm_map_protect: %p, 0x%x", __FUNCTION__, (void *)saddr, kr); + for (gzc = checkstart; gzc < checkend; gzc++) { + if (*gzc == gzalloc_fill_pattern) { + continue; } - } else { - free_addr = saddr; + panic("%s: detected over/underflow, byte at %p, element %p, " + "contents 0x%x from 0x%lx byte sized zone (%s%s) " + "doesn't match fill pattern (%c)", + __func__, gzc, addr, *gzc, zone_elem_size(zone), + zone_heap_name(zone), zone->z_name, gzalloc_fill_pattern); } + } - lock_zone(zone); - assert(zone->zone_valid); + if ((startup_phase < STARTUP_SUB_KMEM) || gzh->gzone == GZDEADZONE) { + /* For now, just leak frees of early allocations + * performed before kmem is fully configured. + * They don't seem to get freed currently; + * consider ml_static_mfree in the future. + */ + OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_free); + return; + } - /* Insert newly freed element into the protected free element - * cache, and rotate out the LRU element. + if (get_preemption_level() != 0) { + pdzfree_count++; + } + + if (gzfc_size) { + /* Either write protect or unmap the newly freed + * allocation */ - if (gzfc_size) { - if (zone->gz.gzfc_index >= gzfc_size) { - zone->gz.gzfc_index = 0; - } - free_addr = zone->gz.gzfc[zone->gz.gzfc_index]; - zone->gz.gzfc[zone->gz.gzfc_index++] = saddr; + kr = vm_map_protect(gzalloc_map, saddr, + saddr + rounded_size + (1 * PAGE_SIZE), + gzalloc_prot, FALSE); + if (kr != KERN_SUCCESS) { + panic("%s: vm_map_protect: %p, 0x%x", __func__, (void *)saddr, kr); } + } else { + free_addr = saddr; + } + + lock_zone(zone); + assert(zone->z_self == zone); - if (free_addr) { - zone->count--; - zone->cur_size -= rounded_size; + /* Insert newly freed element into the protected free element + * cache, and rotate out the LRU element. + */ + if (gzfc_size) { + if (zone->gz.gzfc_index >= gzfc_size) { + zone->gz.gzfc_index = 0; } + free_addr = zone->gz.gzfc[zone->gz.gzfc_index]; + zone->gz.gzfc[zone->gz.gzfc_index++] = saddr; + } - unlock_zone(zone); + if (free_addr) { + zone->countfree++; + zone->page_count -= 1; + } - if (free_addr) { - // TODO: consider using physical reads to check for - // corruption while on the protected freelist - // (i.e. physical corruption) - kr = vm_map_remove( - gzalloc_map, - free_addr, - free_addr + rounded_size + (1 * PAGE_SIZE), - VM_MAP_REMOVE_KUNWIRE); - if (kr != KERN_SUCCESS) { - panic("gzfree: vm_map_remove: %p, 0x%x", (void *)free_addr, kr); - } - // TODO: sysctl-ize for quick reference - OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed); - OSAddAtomic64(-((SInt32) (rounded_size - zone->elem_size)), &gzalloc_wasted); - } + zpercpu_get(zstats)->zs_mem_freed += rounded_size; + unlock_zone(zone); - gzfreed = TRUE; + if (free_addr) { + // TODO: consider using physical reads to check for + // corruption while on the protected freelist + // (i.e. physical corruption) + kr = vm_map_remove(gzalloc_map, free_addr, + free_addr + rounded_size + (1 * PAGE_SIZE), + VM_MAP_REMOVE_KUNWIRE); + if (kr != KERN_SUCCESS) { + panic("gzfree: vm_map_remove: %p, 0x%x", (void *)free_addr, kr); + } + // TODO: sysctl-ize for quick reference + OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed); + OSAddAtomic64(-((SInt32) (rounded_size - zone_elem_size(zone))), + &gzalloc_wasted); } - return gzfreed; } boolean_t @@ -594,7 +602,8 @@ gzalloc_element_size(void *gzaddr, zone_t *z, vm_size_t *gzsz) if (vmef == FALSE) { panic("GZALLOC: unable to locate map entry for %p\n", (void *)a); } - assertf(gzvme->vme_atomic != 0, "GZALLOC: VM map entry inconsistency, vme: %p, start: %llu end: %llu", gzvme, gzvme->vme_start, gzvme->vme_end); + assertf(gzvme->vme_atomic != 0, "GZALLOC: VM map entry inconsistency, " + "vme: %p, start: %llu end: %llu", gzvme, gzvme->vme_start, gzvme->vme_end); /* Locate the gzalloc metadata adjoining the element */ if (gzalloc_uf_mode == TRUE) { @@ -625,11 +634,12 @@ gzalloc_element_size(void *gzaddr, zone_t *z, vm_size_t *gzsz) } if (gzh->gzsig != GZALLOC_SIGNATURE) { - panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x", (void *)a, GZALLOC_SIGNATURE, gzh->gzsig); + panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x", + (void *)a, GZALLOC_SIGNATURE, gzh->gzsig); } - *gzsz = gzh->gzone->elem_size; - if (__improbable((gzalloc_tracked(gzh->gzone)) == FALSE)) { + *gzsz = zone_elem_size(gzh->gzone); + if (__improbable(!gzh->gzone->gzalloc_tracked)) { panic("GZALLOC: zone mismatch (%p)\n", gzh->gzone); } diff --git a/osfmk/kern/host.c b/osfmk/kern/host.c index 20c95b23e..526bfce31 100644 --- a/osfmk/kern/host.c +++ b/osfmk/kern/host.c @@ -109,6 +109,9 @@ #include +vm_statistics64_data_t PERCPU_DATA(vm_stat); +uint64_t PERCPU_DATA(vm_page_grab_count); + host_data_t realhost; vm_extmod_statistics_data_t host_extmod_statistics; @@ -159,6 +162,8 @@ host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_t return KERN_SUCCESS; } +extern int sched_allow_NO_SMT_threads; + kern_return_t host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count) { @@ -169,7 +174,7 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num switch (flavor) { case HOST_BASIC_INFO: { host_basic_info_t basic_info; - int master_id; + int master_id = master_processor->cpu_id; /* * Basic information about this host. @@ -181,15 +186,19 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num basic_info = (host_basic_info_t)info; basic_info->memory_size = machine_info.memory_size; + basic_info->cpu_type = slot_type(master_id); + basic_info->cpu_subtype = slot_subtype(master_id); basic_info->max_cpus = machine_info.max_cpus; #if defined(__x86_64__) - basic_info->avail_cpus = processor_avail_count_user; + if (sched_allow_NO_SMT_threads && current_task()->t_flags & TF_NO_SMT) { + basic_info->avail_cpus = primary_processor_avail_count_user; + } else { + basic_info->avail_cpus = processor_avail_count_user; + } #else basic_info->avail_cpus = processor_avail_count; #endif - master_id = master_processor->cpu_id; - basic_info->cpu_type = slot_type(master_id); - basic_info->cpu_subtype = slot_subtype(master_id); + if (*count >= HOST_BASIC_INFO_COUNT) { basic_info->cpu_threadtype = slot_threadtype(master_id); @@ -201,6 +210,7 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num basic_info->logical_cpu = machine_info.logical_cpu; #endif basic_info->logical_cpu_max = machine_info.logical_cpu_max; + basic_info->max_mem = machine_info.max_mem; *count = HOST_BASIC_INFO_COUNT; @@ -359,6 +369,7 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num user_arch_info->cpu_subtype = slot_subtype(master_id); #endif + *count = HOST_PREFERRED_USER_ARCH_COUNT; return KERN_SUCCESS; @@ -373,8 +384,6 @@ kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t inf kern_return_t host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count) { - uint32_t i; - if (host == HOST_NULL) { return KERN_INVALID_HOST; } @@ -397,8 +406,6 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty } case HOST_VM_INFO: { - processor_t processor; - vm_statistics64_t stat; vm_statistics64_data_t host_vm_stat; vm_statistics_t stat32; mach_msg_type_number_t original_count; @@ -407,27 +414,18 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty return KERN_FAILURE; } - processor = processor_list; - stat = &PROCESSOR_DATA(processor, vm_stat); - host_vm_stat = *stat; - - if (processor_count > 1) { - simple_lock(&processor_list_lock, LCK_GRP_NULL); - - while ((processor = processor->processor_list) != NULL) { - stat = &PROCESSOR_DATA(processor, vm_stat); - - host_vm_stat.zero_fill_count += stat->zero_fill_count; - host_vm_stat.reactivations += stat->reactivations; - host_vm_stat.pageins += stat->pageins; - host_vm_stat.pageouts += stat->pageouts; - host_vm_stat.faults += stat->faults; - host_vm_stat.cow_faults += stat->cow_faults; - host_vm_stat.lookups += stat->lookups; - host_vm_stat.hits += stat->hits; - } - - simple_unlock(&processor_list_lock); + host_vm_stat = *PERCPU_GET_MASTER(vm_stat); + + percpu_foreach_secondary(stat, vm_stat) { + vm_statistics64_data_t data = *stat; + host_vm_stat.zero_fill_count += data.zero_fill_count; + host_vm_stat.reactivations += data.reactivations; + host_vm_stat.pageins += data.pageins; + host_vm_stat.pageouts += data.pageouts; + host_vm_stat.faults += data.faults; + host_vm_stat.cow_faults += data.cow_faults; + host_vm_stat.lookups += data.lookups; + host_vm_stat.hits += data.hits; } stat32 = (vm_statistics_t)info; @@ -436,11 +434,7 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count); if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl * lq; - - lq = &vm_page_local_q[i].vpl_un.vpl; - + zpercpu_foreach(lq, vm_page_local_q) { stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count); } } @@ -496,7 +490,7 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \ MACRO_END #define GET_TICKS_VALUE_FROM_TIMER(processor, state, timer) \ - MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&PROCESSOR_DATA(processor, timer))); \ + MACRO_BEGIN GET_TICKS_VALUE(state, timer_grab(&(processor)->timer)); \ MACRO_END cpu_load_info = (host_cpu_load_info_t)info; @@ -522,11 +516,11 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_USER, system_state); } - idle_state = &PROCESSOR_DATA(processor, idle_state); + idle_state = &processor->idle_state; idle_time_snapshot1 = timer_grab(idle_state); idle_time_tstamp1 = idle_state->tstamp; - if (PROCESSOR_DATA(processor, current_state) != idle_state) { + if (processor->current_state != idle_state) { /* Processor is non-idle, so idle timer should be accurate */ GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state); } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) || @@ -593,8 +587,8 @@ extern uint32_t c_segment_pages_compressed; uint64_t host_statistics_time_window; -static lck_mtx_t host_statistics_lck; -static lck_grp_t* host_statistics_lck_grp; +static LCK_GRP_DECLARE(host_statistics_lck_grp, "host_statistics"); +static LCK_MTX_DECLARE(host_statistics_lck, &host_statistics_lck_grp); #define HOST_VM_INFO64_REV0 0 #define HOST_VM_INFO64_REV1 1 @@ -644,8 +638,6 @@ static struct host_stats_cache g_host_stats_cache[NUM_HOST_INFO_DATA_TYPES] = { void host_statistics_init(void) { - host_statistics_lck_grp = lck_grp_alloc_init("host_statistics", LCK_GRP_ATTR_NULL); - lck_mtx_init(&host_statistics_lck, host_statistics_lck_grp, LCK_ATTR_NULL); nanoseconds_to_absolutetime((HOST_STATISTICS_TIME_WINDOW * NSEC_PER_SEC), &host_statistics_time_window); } @@ -798,126 +790,113 @@ out: return rate_limited; } -kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count); - kern_return_t -host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count) +vm_stats(void *info, unsigned int *count) { - uint32_t i; + vm_statistics64_data_t host_vm_stat; + mach_msg_type_number_t original_count; + unsigned int local_q_internal_count; + unsigned int local_q_external_count; - if (host == HOST_NULL) { - return KERN_INVALID_HOST; + if (*count < HOST_VM_INFO64_REV0_COUNT) { + return KERN_FAILURE; } - switch (flavor) { - case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */ - { - processor_t processor; - vm_statistics64_t stat; - vm_statistics64_data_t host_vm_stat; - mach_msg_type_number_t original_count; - unsigned int local_q_internal_count; - unsigned int local_q_external_count; - - if (*count < HOST_VM_INFO64_REV0_COUNT) { - return KERN_FAILURE; - } - - processor = processor_list; - stat = &PROCESSOR_DATA(processor, vm_stat); - host_vm_stat = *stat; - - if (processor_count > 1) { - simple_lock(&processor_list_lock, LCK_GRP_NULL); - - while ((processor = processor->processor_list) != NULL) { - stat = &PROCESSOR_DATA(processor, vm_stat); - - host_vm_stat.zero_fill_count += stat->zero_fill_count; - host_vm_stat.reactivations += stat->reactivations; - host_vm_stat.pageins += stat->pageins; - host_vm_stat.pageouts += stat->pageouts; - host_vm_stat.faults += stat->faults; - host_vm_stat.cow_faults += stat->cow_faults; - host_vm_stat.lookups += stat->lookups; - host_vm_stat.hits += stat->hits; - host_vm_stat.compressions += stat->compressions; - host_vm_stat.decompressions += stat->decompressions; - host_vm_stat.swapins += stat->swapins; - host_vm_stat.swapouts += stat->swapouts; - } - - simple_unlock(&processor_list_lock); - } - - stat = (vm_statistics64_t)info; - - stat->free_count = vm_page_free_count + vm_page_speculative_count; - stat->active_count = vm_page_active_count; - - local_q_internal_count = 0; - local_q_external_count = 0; - if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl * lq; - - lq = &vm_page_local_q[i].vpl_un.vpl; - - stat->active_count += lq->vpl_count; - local_q_internal_count += lq->vpl_internal_count; - local_q_external_count += lq->vpl_external_count; - } + host_vm_stat = *PERCPU_GET_MASTER(vm_stat); + + percpu_foreach_secondary(stat, vm_stat) { + vm_statistics64_data_t data = *stat; + host_vm_stat.zero_fill_count += data.zero_fill_count; + host_vm_stat.reactivations += data.reactivations; + host_vm_stat.pageins += data.pageins; + host_vm_stat.pageouts += data.pageouts; + host_vm_stat.faults += data.faults; + host_vm_stat.cow_faults += data.cow_faults; + host_vm_stat.lookups += data.lookups; + host_vm_stat.hits += data.hits; + host_vm_stat.compressions += data.compressions; + host_vm_stat.decompressions += data.decompressions; + host_vm_stat.swapins += data.swapins; + host_vm_stat.swapouts += data.swapouts; + } + + vm_statistics64_t stat = (vm_statistics64_t)info; + + stat->free_count = vm_page_free_count + vm_page_speculative_count; + stat->active_count = vm_page_active_count; + + local_q_internal_count = 0; + local_q_external_count = 0; + if (vm_page_local_q) { + zpercpu_foreach(lq, vm_page_local_q) { + stat->active_count += lq->vpl_count; + local_q_internal_count += lq->vpl_internal_count; + local_q_external_count += lq->vpl_external_count; } - stat->inactive_count = vm_page_inactive_count; + } + stat->inactive_count = vm_page_inactive_count; #if CONFIG_EMBEDDED - stat->wire_count = vm_page_wire_count; + stat->wire_count = vm_page_wire_count; #else - stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count; + stat->wire_count = vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count; #endif - stat->zero_fill_count = host_vm_stat.zero_fill_count; - stat->reactivations = host_vm_stat.reactivations; - stat->pageins = host_vm_stat.pageins; - stat->pageouts = host_vm_stat.pageouts; - stat->faults = host_vm_stat.faults; - stat->cow_faults = host_vm_stat.cow_faults; - stat->lookups = host_vm_stat.lookups; - stat->hits = host_vm_stat.hits; + stat->zero_fill_count = host_vm_stat.zero_fill_count; + stat->reactivations = host_vm_stat.reactivations; + stat->pageins = host_vm_stat.pageins; + stat->pageouts = host_vm_stat.pageouts; + stat->faults = host_vm_stat.faults; + stat->cow_faults = host_vm_stat.cow_faults; + stat->lookups = host_vm_stat.lookups; + stat->hits = host_vm_stat.hits; - stat->purgeable_count = vm_page_purgeable_count; - stat->purges = vm_page_purged_count; + stat->purgeable_count = vm_page_purgeable_count; + stat->purges = vm_page_purged_count; - stat->speculative_count = vm_page_speculative_count; + stat->speculative_count = vm_page_speculative_count; - /* - * Fill in extra info added in later revisions of the - * vm_statistics data structure. Fill in only what can fit - * in the data structure the caller gave us ! + /* + * Fill in extra info added in later revisions of the + * vm_statistics data structure. Fill in only what can fit + * in the data structure the caller gave us ! + */ + original_count = *count; + *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */ + if (original_count >= HOST_VM_INFO64_REV1_COUNT) { + /* rev1 added "throttled count" */ + stat->throttled_count = vm_page_throttled_count; + /* rev1 added "compression" info */ + stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT; + stat->compressions = host_vm_stat.compressions; + stat->decompressions = host_vm_stat.decompressions; + stat->swapins = host_vm_stat.swapins; + stat->swapouts = host_vm_stat.swapouts; + /* rev1 added: + * "external page count" + * "anonymous page count" + * "total # of pages (uncompressed) held in the compressor" */ - original_count = *count; - *count = HOST_VM_INFO64_REV0_COUNT; /* rev0 already filled in */ - if (original_count >= HOST_VM_INFO64_REV1_COUNT) { - /* rev1 added "throttled count" */ - stat->throttled_count = vm_page_throttled_count; - /* rev1 added "compression" info */ - stat->compressor_page_count = VM_PAGE_COMPRESSOR_COUNT; - stat->compressions = host_vm_stat.compressions; - stat->decompressions = host_vm_stat.decompressions; - stat->swapins = host_vm_stat.swapins; - stat->swapouts = host_vm_stat.swapouts; - /* rev1 added: - * "external page count" - * "anonymous page count" - * "total # of pages (uncompressed) held in the compressor" - */ - stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count); - stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count); - stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed; - *count = HOST_VM_INFO64_REV1_COUNT; - } + stat->external_page_count = (vm_page_pageable_external_count + local_q_external_count); + stat->internal_page_count = (vm_page_pageable_internal_count + local_q_internal_count); + stat->total_uncompressed_pages_in_compressor = c_segment_pages_compressed; + *count = HOST_VM_INFO64_REV1_COUNT; + } - return KERN_SUCCESS; + return KERN_SUCCESS; +} + +kern_return_t host_statistics64(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count); + +kern_return_t +host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_msg_type_number_t * count) +{ + if (host == HOST_NULL) { + return KERN_INVALID_HOST; } + switch (flavor) { + case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */ + return vm_stats(info, count); + case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */ { vm_extmod_statistics_t out_extmod_statistics; @@ -1014,18 +993,11 @@ set_sched_stats_active(boolean_t active) uint64_t get_pages_grabbed_count(void) { - processor_t processor; uint64_t pages_grabbed_count = 0; - simple_lock(&processor_list_lock, LCK_GRP_NULL); - - processor = processor_list; - - while (processor) { - pages_grabbed_count += PROCESSOR_DATA(processor, page_grab_count); - processor = processor->processor_list; + percpu_foreach(count, vm_page_grab_count) { + pages_grabbed_count += *count; } - simple_unlock(&processor_list_lock); return pages_grabbed_count; } @@ -1034,50 +1006,52 @@ get_pages_grabbed_count(void) kern_return_t get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count) { - processor_t processor; + uint32_t pos = 0; if (!sched_stats_active) { return KERN_FAILURE; } - simple_lock(&processor_list_lock, LCK_GRP_NULL); + percpu_foreach_base(pcpu_base) { + struct sched_statistics stats; + processor_t processor; - if (*count < (processor_count + 1) * sizeof(struct _processor_statistics_np)) { /* One for RT */ - simple_unlock(&processor_list_lock); - return KERN_FAILURE; - } + pos += sizeof(struct _processor_statistics_np); + if (pos > *count) { + return KERN_FAILURE; + } - processor = processor_list; - while (processor) { - struct processor_sched_statistics * stats = &processor->processor_data.sched_stats; + stats = *PERCPU_GET_WITH_BASE(pcpu_base, sched_stats); + processor = PERCPU_GET_WITH_BASE(pcpu_base, processor); out->ps_cpuid = processor->cpu_id; - out->ps_csw_count = stats->csw_count; - out->ps_preempt_count = stats->preempt_count; - out->ps_preempted_rt_count = stats->preempted_rt_count; - out->ps_preempted_by_rt_count = stats->preempted_by_rt_count; - out->ps_rt_sched_count = stats->rt_sched_count; - out->ps_interrupt_count = stats->interrupt_count; - out->ps_ipi_count = stats->ipi_count; - out->ps_timer_pop_count = stats->timer_pop_count; + out->ps_csw_count = stats.csw_count; + out->ps_preempt_count = stats.preempt_count; + out->ps_preempted_rt_count = stats.preempted_rt_count; + out->ps_preempted_by_rt_count = stats.preempted_by_rt_count; + out->ps_rt_sched_count = stats.rt_sched_count; + out->ps_interrupt_count = stats.interrupt_count; + out->ps_ipi_count = stats.ipi_count; + out->ps_timer_pop_count = stats.timer_pop_count; out->ps_runq_count_sum = SCHED(processor_runq_stats_count_sum)(processor); - out->ps_idle_transitions = stats->idle_transitions; - out->ps_quantum_timer_expirations = stats->quantum_timer_expirations; + out->ps_idle_transitions = stats.idle_transitions; + out->ps_quantum_timer_expirations = stats.quantum_timer_expirations; out++; - processor = processor->processor_list; } - *count = (uint32_t)(processor_count * sizeof(struct _processor_statistics_np)); - - simple_unlock(&processor_list_lock); - /* And include RT Queue information */ + pos += sizeof(struct _processor_statistics_np); + if (pos > *count) { + return KERN_FAILURE; + } + bzero(out, sizeof(*out)); out->ps_cpuid = (-1); out->ps_runq_count_sum = SCHED(rt_runq_count_sum)(); out++; - *count += (uint32_t)sizeof(struct _processor_statistics_np); + + *count = pos; return KERN_SUCCESS; } @@ -1258,6 +1232,8 @@ is_valid_host_special_port(int id) ((id <= HOST_LAST_SPECIAL_KERNEL_PORT) || (id > HOST_MAX_SPECIAL_KERNEL_PORT)); } +extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc; + /* * Kernel interface for setting a special port. */ @@ -1278,7 +1254,7 @@ kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port) host_lock(host_priv); old_port = host_priv->special[id]; - if ((id == HOST_AMFID_PORT) && (task_pid(current_task()) != 1)) { + if ((id == HOST_AMFID_PORT) && (current_task()->bsd_info != initproc)) { host_unlock(host_priv); return KERN_NO_ACCESS; } @@ -1323,7 +1299,7 @@ kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp) * routine; use kernel_set_special_port() instead. */ kern_return_t -host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port) +host_set_special_port_from_user(host_priv_t host_priv, int id, ipc_port_t port) { if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) { return KERN_INVALID_ARGUMENT; @@ -1333,6 +1309,16 @@ host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port) return KERN_NO_ACCESS; } + return host_set_special_port(host_priv, id, port); +} + +kern_return_t +host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port) +{ + if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) { + return KERN_INVALID_ARGUMENT; + } + #if CONFIG_MACF if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) { return KERN_NO_ACCESS; @@ -1352,10 +1338,8 @@ host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port) */ kern_return_t -host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp) +host_get_special_port_from_user(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp) { - ipc_port_t port; - if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) { return KERN_INVALID_ARGUMENT; } @@ -1363,14 +1347,25 @@ host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port task_t task = current_task(); if (task && task_is_driver(task) && id > HOST_MAX_SPECIAL_KERNEL_PORT) { /* allow HID drivers to get the sysdiagnose port for keychord handling */ - if (IOTaskHasEntitlement(task, kIODriverKitHIDFamilyEventServiceEntitlementKey) && - id == HOST_SYSDIAGNOSE_PORT) { + if (id == HOST_SYSDIAGNOSE_PORT && + IOTaskHasEntitlement(task, kIODriverKitHIDFamilyEventServiceEntitlementKey)) { goto get_special_port; } return KERN_NO_ACCESS; } - get_special_port: + return host_get_special_port(host_priv, node, id, portp); +} + +kern_return_t +host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port_t * portp) +{ + ipc_port_t port; + + if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) { + return KERN_INVALID_ARGUMENT; + } + host_lock(host_priv); port = realhost.special[id]; *portp = ipc_port_copy_send(port); @@ -1434,7 +1429,7 @@ host_set_atm_diagnostic_flag(host_t host, uint32_t diagnostic_flag) kern_return_t host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config) { -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) if (host_priv == HOST_PRIV_NULL) { return KERN_INVALID_ARGUMENT; } diff --git a/osfmk/kern/host.h b/osfmk/kern/host.h index 8ada4462a..ea288aa66 100644 --- a/osfmk/kern/host.h +++ b/osfmk/kern/host.h @@ -78,7 +78,7 @@ struct host { decl_lck_mtx_data(, lock); /* lock to protect exceptions */ - ipc_port_t special[HOST_MAX_SPECIAL_PORT + 1]; + ipc_port_t XNU_PTRAUTH_SIGNED_PTR("host.special") special[HOST_MAX_SPECIAL_PORT + 1]; struct exception_action exc_actions[EXC_TYPES_COUNT]; }; @@ -106,6 +106,10 @@ typedef struct { extern expired_task_statistics_t dead_task_statistics; +extern kern_return_t host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port); +extern kern_return_t host_get_special_port(host_priv_t host_priv, + __unused int node, int id, ipc_port_t * portp); + #endif /* MACH_KERNEL_PRIVATE */ /* @@ -117,7 +121,6 @@ __BEGIN_DECLS extern host_t host_self(void); extern host_priv_t host_priv_self(void); extern host_security_t host_security_self(void); - __END_DECLS #endif /* _KERN_HOST_H_ */ diff --git a/osfmk/kern/host_notify.c b/osfmk/kern/host_notify.c index 36175a0a2..024a9b1c6 100644 --- a/osfmk/kern/host_notify.c +++ b/osfmk/kern/host_notify.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003-2019 Apple Inc. All rights reserved. + * Copyright (c) 2003-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -37,46 +37,33 @@ #include "mach/host_notify_reply.h" -decl_lck_mtx_data(, host_notify_lock); +struct host_notify_entry { + queue_chain_t entries; + ipc_port_t port; +}; +typedef struct host_notify_entry *host_notify_t; + +LCK_GRP_DECLARE(host_notify_lock_grp, "host_notify"); +LCK_MTX_EARLY_DECLARE(host_notify_lock, &host_notify_lock_grp); -lck_mtx_ext_t host_notify_lock_ext; -lck_grp_t host_notify_lock_grp; -lck_attr_t host_notify_lock_attr; -static lck_grp_attr_t host_notify_lock_grp_attr; -static zone_t host_notify_zone; +static ZONE_DECLARE(host_notify_zone, "host_notify", + sizeof(struct host_notify_entry), ZC_NONE); -static queue_head_t host_notify_queue[HOST_NOTIFY_TYPE_MAX + 1]; +static queue_head_t host_notify_queue[HOST_NOTIFY_TYPE_MAX + 1]; static mach_msg_id_t host_notify_replyid[HOST_NOTIFY_TYPE_MAX + 1] = { HOST_CALENDAR_CHANGED_REPLYID, HOST_CALENDAR_SET_REPLYID }; -struct host_notify_entry { - queue_chain_t entries; - ipc_port_t port; -}; - -typedef struct host_notify_entry *host_notify_t; - -void +__startup_func +static void host_notify_init(void) { - int i; - - for (i = 0; i <= HOST_NOTIFY_TYPE_MAX; i++) { + for (int i = 0; i <= HOST_NOTIFY_TYPE_MAX; i++) { queue_init(&host_notify_queue[i]); } - - lck_grp_attr_setdefault(&host_notify_lock_grp_attr); - lck_grp_init(&host_notify_lock_grp, "host_notify", &host_notify_lock_grp_attr); - lck_attr_setdefault(&host_notify_lock_attr); - - lck_mtx_init_ext(&host_notify_lock, &host_notify_lock_ext, &host_notify_lock_grp, &host_notify_lock_attr); - - i = sizeof(struct host_notify_entry); - host_notify_zone = - zinit(i, (4096 * i), (16 * i), "host_notify"); } +STARTUP(MACH_IPC, STARTUP_RANK_FIRST, host_notify_init); kern_return_t host_request_notification( @@ -106,7 +93,8 @@ host_request_notification( lck_mtx_lock(&host_notify_lock); ip_lock(port); - if (!ip_active(port) || port->ip_tempowner || ip_kotype(port) != IKOT_NONE) { + if (!ip_active(port) || port->ip_tempowner || port->ip_specialreply || + ip_is_kolabeled(port) || ip_kotype(port) != IKOT_NONE) { ip_unlock(port); lck_mtx_unlock(&host_notify_lock); diff --git a/osfmk/kern/host_notify.h b/osfmk/kern/host_notify.h index adfffc5e9..6aa5016ab 100644 --- a/osfmk/kern/host_notify.h +++ b/osfmk/kern/host_notify.h @@ -46,8 +46,6 @@ void host_notify_port_destroy( void host_notify_calendar_change(void); void host_notify_calendar_set(void); -void host_notify_init(void); - #endif /* MACH_KERNEL_PRIVATE */ #endif /* _KERN_HOST_NOTIFY_H_ */ diff --git a/osfmk/kern/host_statistics.h b/osfmk/kern/host_statistics.h index aa7e4a5da..9d21a4a4c 100644 --- a/osfmk/kern/host_statistics.h +++ b/osfmk/kern/host_statistics.h @@ -40,19 +40,23 @@ #include #include -#include +#include +#include extern uint64_t get_pages_grabbed_count(void); -#define VM_STAT_INCR(event) \ -MACRO_BEGIN \ - OSAddAtomic64(1, (SInt64 *) (&(PROCESSOR_DATA(current_processor(), vm_stat).event))); \ +PERCPU_DECL(vm_statistics64_data_t, vm_stat); +PERCPU_DECL(uint64_t, vm_page_grab_count); + +#define VM_STAT_INCR(event) \ +MACRO_BEGIN \ + os_atomic_inc(&PERCPU_GET(vm_stat)->event, relaxed); \ MACRO_END -#define VM_STAT_INCR_BY(event, amount) \ -MACRO_BEGIN \ - OSAddAtomic64((amount), (SInt64 *) (&(PROCESSOR_DATA(current_processor(), vm_stat).event))); \ +#define VM_STAT_INCR_BY(event, amount) \ +MACRO_BEGIN \ + os_atomic_add(&PERCPU_GET(vm_stat)->event, amount, relaxed); \ MACRO_END #endif /* _KERN_HOST_STATISTICS_H_ */ diff --git a/osfmk/kern/hv_support.h b/osfmk/kern/hv_support.h index a945a18b9..e1dcf768d 100644 --- a/osfmk/kern/hv_support.h +++ b/osfmk/kern/hv_support.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Apple Inc. All rights reserved. + * Copyright (c) 2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -33,54 +33,13 @@ extern "C" { #endif -#include -#include - -typedef enum { - HV_DEBUG_STATE -} hv_volatile_state_t; - -typedef enum { - HV_TASK_TRAP = 0, - HV_THREAD_TRAP = 1 -} hv_trap_type_t; - -typedef kern_return_t (*hv_trap_t) (void *target, uint64_t arg); - -typedef struct { - const hv_trap_t *traps; - unsigned trap_count; -} hv_trap_table_t; - -typedef struct { - void (*dispatch)(void *vcpu); - void (*preempt)(void *vcpu); - void (*suspend)(void); - void (*thread_destroy)(void *vcpu); - void (*task_destroy)(void *vm); - void (*volatile_state)(void *vcpu, int state); - void (*memory_pressure)(void); -} hv_callbacks_t; - -extern hv_callbacks_t hv_callbacks; -extern int hv_support_available; +#if defined(__x86_64__) +#include +#else +#error unsupported arch +#endif -extern void hv_support_init(void); -extern int hv_get_support(void); -extern void hv_set_task_target(void *target); -extern void hv_set_thread_target(void *target); -extern void *hv_get_task_target(void); -extern void *hv_get_thread_target(void); -extern int hv_get_volatile_state(hv_volatile_state_t state); -extern kern_return_t hv_set_traps(hv_trap_type_t trap_type, - const hv_trap_t *traps, unsigned trap_count); -extern void hv_release_traps(hv_trap_type_t trap_type); -extern kern_return_t hv_set_callbacks(hv_callbacks_t callbacks); -extern void hv_release_callbacks(void); -extern void hv_suspend(void); -extern kern_return_t hv_task_trap(uint64_t index, uint64_t arg); -extern kern_return_t hv_thread_trap(uint64_t index, uint64_t arg); -extern boolean_t hv_ast_pending(void); +extern int hv_disable; #if defined(__cplusplus) } diff --git a/osfmk/kern/hv_support.c b/osfmk/kern/hv_support_kext.c similarity index 85% rename from osfmk/kern/hv_support.c rename to osfmk/kern/hv_support_kext.c index 74a06ea76..ca9054202 100644 --- a/osfmk/kern/hv_support.c +++ b/osfmk/kern/hv_support_kext.c @@ -32,6 +32,7 @@ #include #include #include +#include #if defined(__x86_64__) && CONFIG_VMX #include @@ -41,6 +42,8 @@ int hv_support_available = 0; +int hv_disable = 0; + /* callbacks for tasks/threads with associated hv objects */ hv_callbacks_t hv_callbacks = { .dispatch = NULL, /* thread is being dispatched for execution */ @@ -64,8 +67,8 @@ static hv_trap_table_t hv_trap_table[] = { }; static int hv_callbacks_enabled = 0; -static lck_grp_t *hv_support_lck_grp = NULL; -static lck_mtx_t *hv_support_lck_mtx = NULL; +static LCK_GRP_DECLARE(hv_support_lck_grp, "hv_support"); +static LCK_MTX_DECLARE(hv_support_lck_mtx, &hv_support_lck_grp); /* hv_support boot initialization */ void @@ -74,12 +77,6 @@ hv_support_init(void) #if defined(__x86_64__) && CONFIG_VMX hv_support_available = vmx_hv_support(); #endif - - hv_support_lck_grp = lck_grp_alloc_init("hv_support", LCK_GRP_ATTR_NULL); - assert(hv_support_lck_grp); - - hv_support_lck_mtx = lck_mtx_alloc_init(hv_support_lck_grp, LCK_ATTR_NULL); - assert(hv_support_lck_mtx); } /* returns true if hv_support is available on this machine */ @@ -141,14 +138,14 @@ hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps, hv_trap_table_t *trap_table = &hv_trap_table[trap_type]; kern_return_t kr = KERN_FAILURE; - lck_mtx_lock(hv_support_lck_mtx); + lck_mtx_lock(&hv_support_lck_mtx); if (trap_table->trap_count == 0) { trap_table->traps = traps; OSMemoryBarrier(); trap_table->trap_count = trap_count; kr = KERN_SUCCESS; } - lck_mtx_unlock(hv_support_lck_mtx); + lck_mtx_unlock(&hv_support_lck_mtx); return kr; } @@ -159,11 +156,11 @@ hv_release_traps(hv_trap_type_t trap_type) { hv_trap_table_t *trap_table = &hv_trap_table[trap_type]; - lck_mtx_lock(hv_support_lck_mtx); + lck_mtx_lock(&hv_support_lck_mtx); trap_table->trap_count = 0; OSMemoryBarrier(); trap_table->traps = NULL; - lck_mtx_unlock(hv_support_lck_mtx); + lck_mtx_unlock(&hv_support_lck_mtx); } /* register callbacks for certain task/thread events for tasks/threads with @@ -173,13 +170,13 @@ hv_set_callbacks(hv_callbacks_t callbacks) { kern_return_t kr = KERN_FAILURE; - lck_mtx_lock(hv_support_lck_mtx); + lck_mtx_lock(&hv_support_lck_mtx); if (hv_callbacks_enabled == 0) { hv_callbacks = callbacks; hv_callbacks_enabled = 1; kr = KERN_SUCCESS; } - lck_mtx_unlock(hv_support_lck_mtx); + lck_mtx_unlock(&hv_support_lck_mtx); return kr; } @@ -188,19 +185,18 @@ hv_set_callbacks(hv_callbacks_t callbacks) void hv_release_callbacks(void) { - lck_mtx_lock(hv_support_lck_mtx); + lck_mtx_lock(&hv_support_lck_mtx); hv_callbacks = (hv_callbacks_t) { .dispatch = NULL, .preempt = NULL, .suspend = NULL, .thread_destroy = NULL, .task_destroy = NULL, - .volatile_state = NULL, - .memory_pressure = NULL + .volatile_state = NULL }; hv_callbacks_enabled = 0; - lck_mtx_unlock(hv_support_lck_mtx); + lck_mtx_unlock(&hv_support_lck_mtx); } /* system suspend notification */ @@ -235,5 +231,23 @@ hv_thread_trap(uint64_t index, uint64_t arg) boolean_t hv_ast_pending(void) { - return current_cpu_datap()->cpu_pending_ast & (AST_APC | AST_BSD); + return current_cpu_datap()->cpu_pending_ast != 0; +} + +void __attribute__((__noreturn__)) +hv_port_notify(mach_msg_header_t *msg __unused) +{ + panic("%s: not supported in this configuration", __func__); +} + +void +hv_trace_guest_enter(uint32_t vcpu_id, uint64_t *vcpu_regs) +{ + DTRACE_HV2(guest__enter, uint32_t, vcpu_id, uint64_t *, vcpu_regs); +} + +void +hv_trace_guest_exit(uint32_t vcpu_id, uint64_t *vcpu_regs) +{ + DTRACE_HV2(guest__exit, uint32_t, vcpu_id, uint64_t *, vcpu_regs); } diff --git a/osfmk/kern/hv_support_kext.h b/osfmk/kern/hv_support_kext.h new file mode 100644 index 000000000..0b7fa64d1 --- /dev/null +++ b/osfmk/kern/hv_support_kext.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2013 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _KERN_HV_SUPPORT_KEXT_H_ +#define _KERN_HV_SUPPORT_KEXT_H_ + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include +#include + +typedef enum { + HV_DEBUG_STATE +} hv_volatile_state_t; + +typedef enum { + HV_TASK_TRAP = 0, + HV_THREAD_TRAP = 1 +} hv_trap_type_t; + +typedef kern_return_t (*hv_trap_t) (void *target, uint64_t arg); + +typedef struct { + const hv_trap_t *traps; + unsigned trap_count; +} hv_trap_table_t; + +typedef struct { + void (*dispatch)(void *vcpu); + void (*preempt)(void *vcpu); + void (*suspend)(void); + void (*thread_destroy)(void *vcpu); + void (*task_destroy)(void *vm); + void (*volatile_state)(void *vcpu, int state); + void (*memory_pressure)(void); +} hv_callbacks_t; + +extern hv_callbacks_t hv_callbacks; +extern int hv_support_available; + +extern void hv_support_init(void); +extern int hv_get_support(void); +extern void hv_set_task_target(void *target); +extern void hv_set_thread_target(void *target); +extern void *hv_get_task_target(void); +extern void *hv_get_thread_target(void); +extern int hv_get_volatile_state(hv_volatile_state_t state); +extern kern_return_t hv_set_traps(hv_trap_type_t trap_type, + const hv_trap_t *traps, unsigned trap_count); +extern void hv_release_traps(hv_trap_type_t trap_type); +extern kern_return_t hv_set_callbacks(hv_callbacks_t callbacks); +extern void hv_release_callbacks(void); +extern void hv_suspend(void); +extern kern_return_t hv_task_trap(uint64_t index, uint64_t arg); +extern kern_return_t hv_thread_trap(uint64_t index, uint64_t arg); +extern boolean_t hv_ast_pending(void); +extern void hv_port_notify(mach_msg_header_t *msg); + +extern void hv_trace_guest_enter(uint32_t vcpu_id, uint64_t *vcpu_regs); +extern void hv_trace_guest_exit(uint32_t vcpu_id, uint64_t *vcpu_regs); + +#if defined(__cplusplus) +} +#endif + +#endif /* _KERN_HV_SUPPORT_KEXT_H_ */ diff --git a/osfmk/kern/ipc_host.c b/osfmk/kern/ipc_host.c index 7cf4b903b..427bcee12 100644 --- a/osfmk/kern/ipc_host.c +++ b/osfmk/kern/ipc_host.c @@ -93,7 +93,6 @@ ref_pset_port_locked( */ extern lck_grp_t host_notify_lock_grp; -extern lck_attr_t host_notify_lock_attr; void ipc_host_init(void) @@ -101,7 +100,7 @@ ipc_host_init(void) ipc_port_t port; int i; - lck_mtx_init(&realhost.lock, &host_notify_lock_grp, &host_notify_lock_attr); + lck_mtx_init(&realhost.lock, &host_notify_lock_grp, LCK_ATTR_NULL); /* * Allocate and set up the two host ports. diff --git a/osfmk/kern/ipc_kobject.c b/osfmk/kern/ipc_kobject.c index c942f3141..9a3e468b2 100644 --- a/osfmk/kern/ipc_kobject.c +++ b/osfmk/kern/ipc_kobject.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2016 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -105,6 +105,7 @@ #include #include +#include #include #include @@ -146,12 +147,17 @@ #include #include +#if HYPERVISOR +#include +#endif + #include #include extern char *proc_name_address(void *p); -extern int proc_pid(void *p); +struct proc; +extern int proc_pid(struct proc *p); /* * Routine: ipc_kobject_notify @@ -167,6 +173,7 @@ typedef struct { mach_msg_id_t num; mig_routine_t routine; int size; + int kobjidx; #if MACH_COUNTERS mach_counter_t callcount; #endif @@ -175,17 +182,21 @@ typedef struct { #define MAX_MIG_ENTRIES 1031 #define MIG_HASH(x) (x) +#define KOBJ_IDX_NOT_SET (-1) + #ifndef max #define max(a, b) (((a) > (b)) ? (a) : (b)) #endif /* max */ -static mig_hash_t mig_buckets[MAX_MIG_ENTRIES]; -static int mig_table_max_displ; -static mach_msg_size_t mig_reply_size = sizeof(mig_reply_error_t); +static SECURITY_READ_ONLY_LATE(mig_hash_t) mig_buckets[MAX_MIG_ENTRIES]; +static SECURITY_READ_ONLY_LATE(int) mig_table_max_displ; +SECURITY_READ_ONLY_LATE(int) mach_kobj_count; /* count of total number of kobjects */ -static zone_t ipc_kobject_label_zone; +static ZONE_DECLARE(ipc_kobject_label_zone, "ipc kobject labels", + sizeof(struct ipc_kobject_label), ZC_NONE); -const struct mig_subsystem *mig_e[] = { +__startup_data +static const struct mig_subsystem *mig_e[] = { (const struct mig_subsystem *)&mach_vm_subsystem, (const struct mig_subsystem *)&mach_port_subsystem, (const struct mig_subsystem *)&mach_host_subsystem, @@ -222,6 +233,7 @@ const struct mig_subsystem *mig_e[] = { #if CONFIG_ARCADE (const struct mig_subsystem *)&arcade_register_subsystem, #endif + (const struct mig_subsystem *)&mach_eventlink_subsystem, }; static void @@ -260,31 +272,65 @@ mig_init(void) } else { mig_buckets[pos].size = mig_e[i]->maxsize; } + mig_buckets[pos].kobjidx = KOBJ_IDX_NOT_SET; mig_table_max_displ = max(howmany, mig_table_max_displ); + mach_kobj_count++; } } } - printf("mig_table_max_displ = %d\n", mig_table_max_displ); + printf("mig_table_max_displ = %d mach_kobj_count = %d\n", + mig_table_max_displ, mach_kobj_count); } +STARTUP(MACH_IPC, STARTUP_RANK_FIRST, mig_init); /* - * Routine: ipc_kobject_init - * Purpose: - * Deliver notifications to kobjects that care about them. + * Do a hash table lookup for given msgh_id. Return 0 + * if not found. */ -void -ipc_kobject_init(void) +static mig_hash_t * +find_mig_hash_entry(int msgh_id) +{ + unsigned int i = (unsigned int)MIG_HASH(msgh_id); + int max_iter = mig_table_max_displ; + mig_hash_t *ptr; + + do { + ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES]; + } while (msgh_id != ptr->num && ptr->num && --max_iter); + + if (!ptr->routine || msgh_id != ptr->num) { + ptr = (mig_hash_t *)0; + } else { +#if MACH_COUNTERS + ptr->callcount++; +#endif + } + + return ptr; +} + +/* + * Routine: ipc_kobject_set_kobjidx + * Purpose: + * Set the index for the kobject filter + * mask for a given message ID. + */ +kern_return_t +ipc_kobject_set_kobjidx( + int msgh_id, + int index) { - int label_max = CONFIG_TASK_MAX + CONFIG_THREAD_MAX + 1000 /* UEXT estimate */; + mig_hash_t *ptr = find_mig_hash_entry(msgh_id); + + if (ptr == (mig_hash_t *)0) { + return KERN_INVALID_ARGUMENT; + } - mig_init(); + assert(index < mach_kobj_count); + ptr->kobjidx = index; - ipc_kobject_label_zone = - zinit(sizeof(struct ipc_kobject_label), - label_max * sizeof(struct ipc_kobject_label), - sizeof(struct ipc_kobject_label), - "ipc kobject labels"); + return KERN_SUCCESS; } /* @@ -306,7 +352,7 @@ ipc_kobject_server( ipc_kmsg_t reply; kern_return_t kr; ipc_port_t replyp = IPC_PORT_NULL; - mach_msg_format_0_trailer_t *trailer; + mach_msg_max_trailer_t *trailer; mig_hash_t *ptr; task_t task = TASK_NULL; uint32_t exec_token; @@ -329,26 +375,15 @@ ipc_kobject_server( goto msgdone; } } - /* - * Find corresponding mig_hash entry if any - */ - { - unsigned int i = (unsigned int)MIG_HASH(request_msgh_id); - int max_iter = mig_table_max_displ; - do { - ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES]; - } while (request_msgh_id != ptr->num && ptr->num && --max_iter); + /* Find corresponding mig_hash entry, if any */ + ptr = find_mig_hash_entry(request_msgh_id); - if (!ptr->routine || request_msgh_id != ptr->num) { - ptr = (mig_hash_t *)0; - reply_size = mig_reply_size; - } else { - reply_size = ptr->size; -#if MACH_COUNTERS - ptr->callcount++; -#endif - } + /* Get the reply_size. */ + if (ptr == (mig_hash_t *)0) { + reply_size = sizeof(mig_reply_error_t); + } else { + reply_size = ptr->size; } /* round up for trailer size */ @@ -401,12 +436,36 @@ ipc_kobject_server( * Check if the port is a task port, if its a task port then * snapshot the task exec token before the mig routine call. */ - if (ikot == IKOT_TASK) { - task = convert_port_to_task_with_exec_token(port, &exec_token); + if (ikot == IKOT_TASK_CONTROL) { + task = convert_port_to_task_with_exec_token(port, &exec_token, TRUE); } +#if CONFIG_MACF + int idx = ptr->kobjidx; + task_t curtask = current_task(); + uint8_t *filter_mask = curtask->mach_kobj_filter_mask; + + /* Check kobject mig filter mask, if exists. */ + if (__improbable(filter_mask != NULL && idx != KOBJ_IDX_NOT_SET && + !bitstr_test(filter_mask, idx))) { + /* Not in filter mask, evaluate policy. */ + if (mac_task_kobj_msg_evaluate != NULL) { + kr = mac_task_kobj_msg_evaluate(get_bsdtask_info(curtask), + request_msgh_id, idx); + if (kr != KERN_SUCCESS) { + ((mig_reply_error_t *) reply->ikm_header)->RetCode = kr; + goto skip_kobjcall; + } + } + } +#endif /* CONFIG_MACF */ + (*ptr->routine)(request->ikm_header, reply->ikm_header); +#if CONFIG_MACF +skip_kobjcall: +#endif + /* Check if the exec token changed during the mig routine */ if (task != TASK_NULL) { if (exec_token != task->exec_token) { @@ -580,10 +639,11 @@ msgdone: reply = new_reply; } - trailer = (mach_msg_format_0_trailer_t *) + trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)reply->ikm_header + (int)reply->ikm_header->msgh_size); - + bzero(trailer, sizeof(*trailer)); trailer->msgh_sender = KERNEL_SECURITY_TOKEN; + trailer->msgh_audit = KERNEL_AUDIT_TOKEN; trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; @@ -759,17 +819,41 @@ boolean_t ipc_kobject_make_send_lazy_alloc_port( ipc_port_t *port_store, ipc_kobject_t kobject, - ipc_kobject_type_t type) + ipc_kobject_type_t type, + boolean_t __ptrauth_only should_ptrauth, + uint64_t __ptrauth_only ptrauth_discriminator) { - ipc_port_t port, previous; + ipc_port_t port, previous, __ptrauth_only port_addr; boolean_t rc = FALSE; port = os_atomic_load(port_store, dependency); +#if __has_feature(ptrauth_calls) + /* If we're on a ptrauth system and this port is signed, authenticate and strip the pointer */ + if (should_ptrauth && IP_VALID(port)) { + port = ptrauth_auth_data(port, + ptrauth_key_process_independent_data, + ptrauth_blend_discriminator(port_store, ptrauth_discriminator)); + } +#endif // __has_feature(ptrauth_calls) + if (!IP_VALID(port)) { port = ipc_kobject_alloc_port(kobject, type, IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST); - if (os_atomic_cmpxchgv(port_store, IP_NULL, port, &previous, release)) { + +#if __has_feature(ptrauth_calls) + if (should_ptrauth) { + port_addr = ptrauth_sign_unauthenticated(port, + ptrauth_key_process_independent_data, + ptrauth_blend_discriminator(port_store, ptrauth_discriminator)); + } else { + port_addr = port; + } +#else + port_addr = port; +#endif // __has_feature(ptrauth_calls) + + if (os_atomic_cmpxchgv(port_store, IP_NULL, port_addr, &previous, release)) { return TRUE; } @@ -952,6 +1036,11 @@ ipc_kobject_label_check( return TRUE; } + /* Never OK to copyout the receive right for a labeled kobject */ + if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) { + panic("ipc_kobject_label_check: attempted receive right copyout for labeled kobject"); + } + labelp = port->ip_kolabel; return (labelp->ikol_label & space->is_label) == labelp->ikol_label; } @@ -998,7 +1087,11 @@ ipc_kobject_notify( semaphore_notify(request_header); return TRUE; - case IKOT_TASK: + case IKOT_EVENTLINK: + ipc_eventlink_notify(request_header); + return TRUE; + + case IKOT_TASK_CONTROL: task_port_notify(request_header); return TRUE; @@ -1035,10 +1128,22 @@ ipc_kobject_notify( case IKOT_WORK_INTERVAL: work_interval_port_notify(request_header); return TRUE; - + case IKOT_TASK_READ: + case IKOT_TASK_INSPECT: + task_port_with_flavor_notify(request_header); + return TRUE; + case IKOT_THREAD_READ: + case IKOT_THREAD_INSPECT: + thread_port_with_flavor_notify(request_header); + return TRUE; case IKOT_SUID_CRED: suid_cred_notify(request_header); return TRUE; +#if HYPERVISOR + case IKOT_HYPERVISOR: + hv_port_notify(request_header); + return TRUE; +#endif } break; diff --git a/osfmk/kern/ipc_kobject.h b/osfmk/kern/ipc_kobject.h index 03014d1dc..02614a531 100644 --- a/osfmk/kern/ipc_kobject.h +++ b/osfmk/kern/ipc_kobject.h @@ -89,17 +89,17 @@ typedef natural_t ipc_kobject_type_t; -#define IKOT_NONE 0 -#define IKOT_THREAD 1 -#define IKOT_TASK 2 -#define IKOT_HOST 3 +#define IKOT_NONE 0 +#define IKOT_THREAD_CONTROL 1 +#define IKOT_TASK_CONTROL 2 +#define IKOT_HOST 3 #define IKOT_HOST_PRIV 4 #define IKOT_PROCESSOR 5 -#define IKOT_PSET 6 +#define IKOT_PSET 6 #define IKOT_PSET_NAME 7 -#define IKOT_TIMER 8 +#define IKOT_TIMER 8 #define IKOT_PAGING_REQUEST 9 -#define IKOT_MIG 10 +#define IKOT_MIG 10 #define IKOT_MEMORY_OBJECT 11 #define IKOT_XMM_PAGER 12 #define IKOT_XMM_KERNEL 13 @@ -107,20 +107,20 @@ typedef natural_t ipc_kobject_type_t; #define IKOT_UND_REPLY 15 #define IKOT_HOST_NOTIFY 16 #define IKOT_HOST_SECURITY 17 -#define IKOT_LEDGER 18 +#define IKOT_LEDGER 18 #define IKOT_MASTER_DEVICE 19 #define IKOT_TASK_NAME 20 #define IKOT_SUBSYSTEM 21 #define IKOT_IO_DONE_QUEUE 22 #define IKOT_SEMAPHORE 23 #define IKOT_LOCK_SET 24 -#define IKOT_CLOCK 25 +#define IKOT_CLOCK 25 #define IKOT_CLOCK_CTRL 26 #define IKOT_IOKIT_IDENT 27 #define IKOT_NAMED_ENTRY 28 #define IKOT_IOKIT_CONNECT 29 #define IKOT_IOKIT_OBJECT 30 -#define IKOT_UPL 31 +#define IKOT_UPL 31 #define IKOT_MEM_OBJ_CONTROL 32 #define IKOT_AU_SESSIONPORT 33 #define IKOT_FILEPORT 34 @@ -132,21 +132,31 @@ typedef natural_t ipc_kobject_type_t; #define IKOT_UX_HANDLER 40 #define IKOT_UEXT_OBJECT 41 #define IKOT_ARCADE_REG 42 - +#define IKOT_EVENTLINK 43 +#define IKOT_TASK_INSPECT 44 +#define IKOT_TASK_READ 45 +#define IKOT_THREAD_INSPECT 46 +#define IKOT_THREAD_READ 47 #define IKOT_SUID_CRED 48 +#define IKOT_HYPERVISOR 49 /* * Add new entries here and adjust IKOT_UNKNOWN. * Please keep ipc/ipc_object.c:ikot_print_array up to date. */ -#define IKOT_UNKNOWN 49 /* magic catchall */ +#define IKOT_UNKNOWN 50 /* magic catchall */ #define IKOT_MAX_TYPE (IKOT_UNKNOWN+1) /* # of IKOT_ types */ +/* set the bitstring index for kobject */ +extern kern_return_t ipc_kobject_set_kobjidx( + int msgid, + int index); + #ifdef MACH_KERNEL_PRIVATE struct ipc_kobject_label { ipc_label_t ikol_label; /* [private] mandatory access label */ - ipc_kobject_t ikol_kobject; /* actual kobject address */ + ipc_kobject_t XNU_PTRAUTH_SIGNED_PTR("ipc_kobject_label.ikol_kobject") ikol_kobject; /* actual kobject address */ }; /* initialization of kobject subsystem */ @@ -200,7 +210,9 @@ extern ipc_port_t ipc_kobject_alloc_labeled_port( extern boolean_t ipc_kobject_make_send_lazy_alloc_port( ipc_port_t *port_store, ipc_kobject_t kobject, - ipc_kobject_type_t type) __result_use_check; + ipc_kobject_type_t type, + boolean_t should_ptrauth, + uint64_t ptrauth_discriminator) __result_use_check; /* Makes a send right, lazily allocating a kobject port, arming for no-senders, never fails */ extern boolean_t ipc_kobject_make_send_lazy_alloc_labeled_port( diff --git a/osfmk/kern/ipc_mig.c b/osfmk/kern/ipc_mig.c index 6896e3793..d47764b90 100644 --- a/osfmk/kern/ipc_mig.c +++ b/osfmk/kern/ipc_mig.c @@ -485,31 +485,15 @@ kernel_mach_msg_rpc( } } - /* - * Check to see how much of the message/trailer can be received. - * We chose the maximum trailer that will fit, since we don't - * have options telling us which trailer elements the caller needed. - */ - if (rcv_size >= kmsg->ikm_header->msgh_size) { - mach_msg_format_0_trailer_t *trailer = (mach_msg_format_0_trailer_t *) - ((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size); + mach_msg_format_0_trailer_t *trailer = (mach_msg_format_0_trailer_t *) + ((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size); - if (rcv_size >= kmsg->ikm_header->msgh_size + MAX_TRAILER_SIZE) { - /* Enough room for a maximum trailer */ - trailer->msgh_trailer_size = MAX_TRAILER_SIZE; - } else if (rcv_size < kmsg->ikm_header->msgh_size + - trailer->msgh_trailer_size) { - /* no room for even the basic (default) trailer */ - trailer->msgh_trailer_size = 0; - } - assert(trailer->msgh_trailer_type == MACH_MSG_TRAILER_FORMAT_0); - rcv_size = kmsg->ikm_header->msgh_size + trailer->msgh_trailer_size; - mr = MACH_MSG_SUCCESS; - } else { - mr = MACH_RCV_TOO_LARGE; + /* must be able to receive message proper */ + if (rcv_size < kmsg->ikm_header->msgh_size) { + ipc_kmsg_destroy(kmsg); + return MACH_RCV_TOO_LARGE; } - /* * We want to preserve rights and memory in reply! * We don't have to put them anywhere; just leave them @@ -524,6 +508,27 @@ kernel_mach_msg_rpc( #else ipc_kmsg_copyout_to_kernel(kmsg, ipc_space_reply); #endif + + /* Determine what trailer bits we can receive (as no option specified) */ + if (rcv_size < kmsg->ikm_header->msgh_size + MACH_MSG_TRAILER_MINIMUM_SIZE) { + rcv_size = kmsg->ikm_header->msgh_size; + } else { + if (rcv_size >= kmsg->ikm_header->msgh_size + MAX_TRAILER_SIZE) { + /* + * Enough room for a maximum trailer. + * JMM - we really should set the expected receiver-set fields: + * (seqno, context, filterid, etc...) but nothing currently + * expects them anyway. + */ + trailer->msgh_trailer_size = MAX_TRAILER_SIZE; + } else { + assert(trailer->msgh_trailer_size == MACH_MSG_TRAILER_MINIMUM_SIZE); + } + rcv_size = kmsg->ikm_header->msgh_size + trailer->msgh_trailer_size; + } + assert(trailer->msgh_trailer_type == MACH_MSG_TRAILER_FORMAT_0); + mr = MACH_MSG_SUCCESS; + ipc_kmsg_put_to_kernel(msg, kmsg, rcv_size); return mr; } @@ -645,7 +650,7 @@ mach_msg_overwrite( mach_msg_size_t rcv_size, mach_port_name_t rcv_name, __unused mach_msg_timeout_t msg_timeout, - mach_msg_priority_t override, + mach_msg_priority_t priority, __unused mach_msg_header_t *rcv_msg, __unused mach_msg_size_t rcv_msg_size) { @@ -696,12 +701,13 @@ mach_msg_overwrite( * the cases where no implicit data is requested. */ max_trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)kmsg->ikm_header + send_size); + bzero(max_trailer, sizeof(*max_trailer)); max_trailer->msgh_sender = current_thread()->task->sec_token; max_trailer->msgh_audit = current_thread()->task->audit_token; max_trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; max_trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; - mr = ipc_kmsg_copyin(kmsg, space, map, override, &option); + mr = ipc_kmsg_copyin(kmsg, space, map, priority, &option); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_free(kmsg); @@ -718,6 +724,7 @@ mach_msg_overwrite( if (option & MACH_RCV_MSG) { thread_t self = current_thread(); + mach_vm_address_t context; do { ipc_object_t object; @@ -749,8 +756,7 @@ mach_msg_overwrite( return mr; } - trailer_size = ipc_kmsg_add_trailer(kmsg, space, option, current_thread(), seqno, TRUE, - kmsg->ikm_header->msgh_remote_port->ip_context); + trailer_size = ipc_kmsg_trailer_size(option, self); if (rcv_size < (kmsg->ikm_header->msgh_size + trailer_size)) { ipc_kmsg_copyout_dest(kmsg, space); @@ -759,9 +765,14 @@ mach_msg_overwrite( return MACH_RCV_TOO_LARGE; } + /* Save destination port context for the trailer before copyout */ + context = kmsg->ikm_header->msgh_remote_port->ip_context; + mr = ipc_kmsg_copyout(kmsg, space, map, MACH_MSG_BODY_NULL, option); + if (mr != MACH_MSG_SUCCESS) { if ((mr & ~MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) { + ipc_kmsg_add_trailer(kmsg, space, option, self, seqno, TRUE, context); ipc_kmsg_put_to_kernel(msg, kmsg, kmsg->ikm_header->msgh_size + trailer_size); } else { @@ -772,7 +783,7 @@ mach_msg_overwrite( return mr; } - + ipc_kmsg_add_trailer(kmsg, space, option, self, seqno, TRUE, context); (void) memcpy((void *) msg, (const void *) kmsg->ikm_header, kmsg->ikm_header->msgh_size + trailer_size); ipc_kmsg_free(kmsg); @@ -1025,7 +1036,7 @@ convert_mig_object_to_port( * if this is the first send right */ if (!ipc_kobject_make_send_lazy_alloc_port(&mig_object->port, - (ipc_kobject_t) mig_object, IKOT_MIG)) { + (ipc_kobject_t) mig_object, IKOT_MIG, false, 0)) { mig_object_deallocate(mig_object); } diff --git a/osfmk/kern/ipc_sync.c b/osfmk/kern/ipc_sync.c index cd1dd1afd..643c38fbb 100644 --- a/osfmk/kern/ipc_sync.c +++ b/osfmk/kern/ipc_sync.c @@ -140,7 +140,7 @@ convert_semaphore_to_port(semaphore_t semaphore) * semaphore_notify if this is the first send right */ if (!ipc_kobject_make_send_lazy_alloc_port(&semaphore->port, - (ipc_kobject_t) semaphore, IKOT_SEMAPHORE)) { + (ipc_kobject_t) semaphore, IKOT_SEMAPHORE, false, 0)) { semaphore_dereference(semaphore); } return semaphore->port; diff --git a/osfmk/kern/ipc_tt.c b/osfmk/kern/ipc_tt.c index 44c93bae2..7a7d3b783 100644 --- a/osfmk/kern/ipc_tt.c +++ b/osfmk/kern/ipc_tt.c @@ -102,16 +102,28 @@ #include #endif -#if CONFIG_EMBEDDED && !SECURE_KERNEL +#if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL extern int cs_relax_platform_task_ports; #endif +extern boolean_t IOTaskHasEntitlement(task_t, const char *); + /* forward declarations */ -task_t convert_port_to_locked_task(ipc_port_t port); +task_t convert_port_to_locked_task(ipc_port_t port, boolean_t eval); task_inspect_t convert_port_to_locked_task_inspect(ipc_port_t port); +task_read_t convert_port_to_locked_task_read(ipc_port_t port); +static task_read_t convert_port_to_task_read_locked(ipc_port_t port); +static kern_return_t port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor); +static kern_return_t port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor); +static task_inspect_t convert_port_to_task_inspect_locked(ipc_port_t port); static void ipc_port_bind_special_reply_port_locked(ipc_port_t port); static kern_return_t ipc_port_unbind_special_reply_port(thread_t thread, boolean_t unbind_active_port); kern_return_t task_conversion_eval(task_t caller, task_t victim); +static ipc_space_t convert_port_to_space_no_eval(ipc_port_t port); +static task_t convert_port_to_task_no_eval(ipc_port_t port); +static thread_t convert_port_to_thread_no_eval(ipc_port_t port); +static ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor); +static ipc_port_t convert_thread_to_port_with_flavor(thread_t thread, mach_thread_flavor_t flavor); /* * Routine: ipc_task_init @@ -132,6 +144,7 @@ ipc_task_init( ipc_space_t space; ipc_port_t kport; ipc_port_t nport; + kern_return_t kr; int i; @@ -144,6 +157,7 @@ ipc_task_init( space->is_task = task; kport = ipc_port_alloc_kernel(); + if (kport == IP_NULL) { panic("ipc_task_init"); } @@ -154,17 +168,22 @@ ipc_task_init( } itk_lock_init(task); - task->itk_self = kport; - task->itk_nself = nport; - task->itk_resume = IP_NULL; /* Lazily allocated on-demand */ + task->itk_self[TASK_FLAVOR_CONTROL] = kport; + task->itk_self[TASK_FLAVOR_NAME] = nport; + + /* Lazily allocated on-demand */ + task->itk_self[TASK_FLAVOR_INSPECT] = IP_NULL; + task->itk_self[TASK_FLAVOR_READ] = IP_NULL; + task->itk_resume = IP_NULL; + if (task_is_a_corpse_fork(task)) { /* * No sender's notification for corpse would not * work with a naked send right in kernel. */ - task->itk_sself = IP_NULL; + task->itk_settable_self = IP_NULL; } else { - task->itk_sself = ipc_port_make_send(kport); + task->itk_settable_self = ipc_port_make_send(kport); } task->itk_debug_control = IP_NULL; task->itk_space = space; @@ -202,7 +221,7 @@ ipc_task_init( } } else { itk_lock(parent); - assert(parent->itk_self != IP_NULL); + assert(parent->itk_self[TASK_FLAVOR_CONTROL] != IP_NULL); /* inherit registered ports */ @@ -259,16 +278,27 @@ ipc_task_enable( { ipc_port_t kport; ipc_port_t nport; + ipc_port_t iport; + ipc_port_t rdport; itk_lock(task); - kport = task->itk_self; + kport = task->itk_self[TASK_FLAVOR_CONTROL]; if (kport != IP_NULL) { - ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK); + ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK_CONTROL); } - nport = task->itk_nself; + nport = task->itk_self[TASK_FLAVOR_NAME]; if (nport != IP_NULL) { ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME); } + iport = task->itk_self[TASK_FLAVOR_INSPECT]; + if (iport != IP_NULL) { + ipc_kobject_set(iport, (ipc_kobject_t) task, IKOT_TASK_INSPECT); + } + rdport = task->itk_self[TASK_FLAVOR_READ]; + if (rdport != IP_NULL) { + ipc_kobject_set(rdport, (ipc_kobject_t) task, IKOT_TASK_READ); + } + itk_unlock(task); } @@ -286,17 +316,27 @@ ipc_task_disable( { ipc_port_t kport; ipc_port_t nport; + ipc_port_t iport; + ipc_port_t rdport; ipc_port_t rport; itk_lock(task); - kport = task->itk_self; + kport = task->itk_self[TASK_FLAVOR_CONTROL]; if (kport != IP_NULL) { ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); } - nport = task->itk_nself; + nport = task->itk_self[TASK_FLAVOR_NAME]; if (nport != IP_NULL) { ipc_kobject_set(nport, IKO_NULL, IKOT_NONE); } + iport = task->itk_self[TASK_FLAVOR_INSPECT]; + if (iport != IP_NULL) { + ipc_kobject_set(iport, IKO_NULL, IKOT_NONE); + } + rdport = task->itk_self[TASK_FLAVOR_READ]; + if (rdport != IP_NULL) { + ipc_kobject_set(rdport, IKO_NULL, IKOT_NONE); + } rport = task->itk_resume; if (rport != IP_NULL) { @@ -332,22 +372,30 @@ ipc_task_terminate( { ipc_port_t kport; ipc_port_t nport; + ipc_port_t iport; + ipc_port_t rdport; ipc_port_t rport; int i; itk_lock(task); - kport = task->itk_self; + kport = task->itk_self[TASK_FLAVOR_CONTROL]; if (kport == IP_NULL) { /* the task is already terminated (can this happen?) */ itk_unlock(task); return; } - task->itk_self = IP_NULL; + task->itk_self[TASK_FLAVOR_CONTROL] = IP_NULL; + + rdport = task->itk_self[TASK_FLAVOR_READ]; + task->itk_self[TASK_FLAVOR_READ] = IP_NULL; - nport = task->itk_nself; + iport = task->itk_self[TASK_FLAVOR_INSPECT]; + task->itk_self[TASK_FLAVOR_INSPECT] = IP_NULL; + + nport = task->itk_self[TASK_FLAVOR_NAME]; assert(nport != IP_NULL); - task->itk_nself = IP_NULL; + task->itk_self[TASK_FLAVOR_NAME] = IP_NULL; rport = task->itk_resume; task->itk_resume = IP_NULL; @@ -356,8 +404,8 @@ ipc_task_terminate( /* release the naked send rights */ - if (IP_VALID(task->itk_sself)) { - ipc_port_release_send(task->itk_sself); + if (IP_VALID(task->itk_settable_self)) { + ipc_port_release_send(task->itk_settable_self); } for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { @@ -402,6 +450,12 @@ ipc_task_terminate( /* destroy the kernel ports */ ipc_port_dealloc_kernel(kport); ipc_port_dealloc_kernel(nport); + if (iport != IP_NULL) { + ipc_port_dealloc_kernel(iport); + } + if (rdport != IP_NULL) { + ipc_port_dealloc_kernel(rdport); + } if (rport != IP_NULL) { ipc_port_dealloc_kernel(rport); } @@ -414,8 +468,8 @@ ipc_task_terminate( * Purpose: * Reset a task's IPC state to protect it when * it enters an elevated security context. The - * task name port can remain the same - since - * it represents no specific privilege. + * task name port can remain the same - since it + * represents no specific privilege. * Conditions: * Nothing locked. The task must be suspended. * (Or the current thread must be in the task.) @@ -427,6 +481,8 @@ ipc_task_reset( { ipc_port_t old_kport, new_kport; ipc_port_t old_sself; + ipc_port_t old_rdport; + ipc_port_t old_iport; ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; int i; @@ -435,12 +491,14 @@ ipc_task_reset( struct label *unset_label = mac_exc_create_label(); #endif - new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task, IKOT_TASK, + new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task, IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_MAKE_SEND); itk_lock(task); - old_kport = task->itk_self; + old_kport = task->itk_self[TASK_FLAVOR_CONTROL]; + old_rdport = task->itk_self[TASK_FLAVOR_READ]; + old_iport = task->itk_self[TASK_FLAVOR_INSPECT]; if (old_kport == IP_NULL) { /* the task is already terminated (can this happen?) */ @@ -453,8 +511,8 @@ ipc_task_reset( return; } - old_sself = task->itk_sself; - task->itk_sself = task->itk_self = new_kport; + old_sself = task->itk_settable_self; + task->itk_settable_self = task->itk_self[TASK_FLAVOR_CONTROL] = new_kport; /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */ ip_lock(old_kport); @@ -462,6 +520,10 @@ ipc_task_reset( task->exec_token += 1; ip_unlock(old_kport); + /* Reset the read and inspect flavors of task port */ + task->itk_self[TASK_FLAVOR_READ] = IP_NULL; + task->itk_self[TASK_FLAVOR_INSPECT] = IP_NULL; + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { old_exc_actions[i] = IP_NULL; @@ -501,8 +563,14 @@ ipc_task_reset( } }/* for */ - /* destroy the kernel port */ + /* destroy all task port flavors */ ipc_port_dealloc_kernel(old_kport); + if (old_rdport != IP_NULL) { + ipc_port_dealloc_kernel(old_rdport); + } + if (old_iport != IP_NULL) { + ipc_port_dealloc_kernel(old_iport); + } } /* @@ -519,10 +587,12 @@ ipc_thread_init( { ipc_port_t kport; - kport = ipc_kobject_alloc_port((ipc_kobject_t)thread, IKOT_THREAD, + kport = ipc_kobject_alloc_port((ipc_kobject_t)thread, IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_MAKE_SEND); - thread->ith_sself = thread->ith_self = kport; + thread->ith_settable_self = thread->ith_self[THREAD_FLAVOR_CONTROL] = kport; + thread->ith_self[THREAD_FLAVOR_INSPECT] = IP_NULL; + thread->ith_self[THREAD_FLAVOR_READ] = IP_NULL; thread->ith_special_reply_port = NULL; thread->exc_actions = NULL; @@ -568,16 +638,33 @@ ipc_thread_destroy_exc_actions( } } +/* + * Routine: ipc_thread_disable + * Purpose: + * Clean up and destroy a thread's IPC state. + * Conditions: + * Thread locked. + */ void ipc_thread_disable( thread_t thread) { - ipc_port_t kport = thread->ith_self; + ipc_port_t kport = thread->ith_self[THREAD_FLAVOR_CONTROL]; + ipc_port_t iport = thread->ith_self[THREAD_FLAVOR_INSPECT]; + ipc_port_t rdport = thread->ith_self[THREAD_FLAVOR_READ]; if (kport != IP_NULL) { ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); } + if (iport != IP_NULL) { + ipc_kobject_set(iport, IKO_NULL, IKOT_NONE); + } + + if (rdport != IP_NULL) { + ipc_kobject_set(rdport, IKO_NULL, IKOT_NONE); + } + /* unbind the thread special reply port */ if (IP_VALID(thread->ith_special_reply_port)) { ipc_port_unbind_special_reply_port(thread, TRUE); @@ -596,27 +683,34 @@ void ipc_thread_terminate( thread_t thread) { - ipc_port_t kport = thread->ith_self; + ipc_port_t kport = IP_NULL; + ipc_port_t iport = IP_NULL; + ipc_port_t rdport = IP_NULL; + ipc_port_t ith_rpc_reply = IP_NULL; - if (kport != IP_NULL) { - int i; + thread_mtx_lock(thread); - if (IP_VALID(thread->ith_sself)) { - ipc_port_release_send(thread->ith_sself); + kport = thread->ith_self[THREAD_FLAVOR_CONTROL]; + iport = thread->ith_self[THREAD_FLAVOR_INSPECT]; + rdport = thread->ith_self[THREAD_FLAVOR_READ]; + + if (kport != IP_NULL) { + if (IP_VALID(thread->ith_settable_self)) { + ipc_port_release_send(thread->ith_settable_self); } - thread->ith_sself = thread->ith_self = IP_NULL; + thread->ith_settable_self = thread->ith_self[THREAD_FLAVOR_CONTROL] = IP_NULL; + thread->ith_self[THREAD_FLAVOR_INSPECT] = IP_NULL; + thread->ith_self[THREAD_FLAVOR_READ] = IP_NULL; if (thread->exc_actions != NULL) { - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { + for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (IP_VALID(thread->exc_actions[i].port)) { ipc_port_release_send(thread->exc_actions[i].port); } } ipc_thread_destroy_exc_actions(thread); } - - ipc_port_dealloc_kernel(kport); } #if IMPORTANCE_INHERITANCE @@ -624,12 +718,23 @@ ipc_thread_terminate( #endif assert(ipc_kmsg_queue_empty(&thread->ith_messages)); + ith_rpc_reply = thread->ith_rpc_reply; + thread->ith_rpc_reply = IP_NULL; - if (thread->ith_rpc_reply != IP_NULL) { - ipc_port_dealloc_reply(thread->ith_rpc_reply); - } + thread_mtx_unlock(thread); - thread->ith_rpc_reply = IP_NULL; + if (kport != IP_NULL) { + ipc_port_dealloc_kernel(kport); + } + if (iport != IP_NULL) { + ipc_port_dealloc_kernel(iport); + } + if (rdport != IP_NULL) { + ipc_port_dealloc_kernel(rdport); + } + if (ith_rpc_reply != IP_NULL) { + ipc_port_dealloc_reply(ith_rpc_reply); + } } /* @@ -637,9 +742,10 @@ ipc_thread_terminate( * Purpose: * Reset the IPC state for a given Mach thread when * its task enters an elevated security context. - * Both the thread port and its exception ports have + * All flavors of thread port and its exception ports have * to be reset. Its RPC reply port cannot have any - * rights outstanding, so it should be fine. + * rights outstanding, so it should be fine. The thread + * inspect and read port are set to NULL. * Conditions: * Nothing locked. */ @@ -650,21 +756,25 @@ ipc_thread_reset( { ipc_port_t old_kport, new_kport; ipc_port_t old_sself; + ipc_port_t old_rdport; + ipc_port_t old_iport; ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; boolean_t has_old_exc_actions = FALSE; - int i; + int i; #if CONFIG_MACF struct label *new_label = mac_exc_create_label(); #endif - new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread, IKOT_THREAD, + new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread, IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_MAKE_SEND); thread_mtx_lock(thread); - old_kport = thread->ith_self; - old_sself = thread->ith_sself; + old_kport = thread->ith_self[THREAD_FLAVOR_CONTROL]; + old_rdport = thread->ith_self[THREAD_FLAVOR_READ]; + old_iport = thread->ith_self[THREAD_FLAVOR_INSPECT]; + old_sself = thread->ith_settable_self; if (old_kport == IP_NULL && thread->inspection == FALSE) { /* the is already terminated (can this happen?) */ @@ -677,10 +787,19 @@ ipc_thread_reset( return; } - thread->ith_sself = thread->ith_self = new_kport; + thread->ith_settable_self = thread->ith_self[THREAD_FLAVOR_CONTROL] = new_kport; + thread->ith_self[THREAD_FLAVOR_READ] = IP_NULL; + thread->ith_self[THREAD_FLAVOR_INSPECT] = IP_NULL; + if (old_kport != IP_NULL) { ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE); } + if (old_rdport != IP_NULL) { + ipc_kobject_set(old_rdport, IKO_NULL, IKOT_NONE); + } + if (old_iport != IP_NULL) { + ipc_kobject_set(old_iport, IKO_NULL, IKOT_NONE); + } /* * Only ports that were set by root-owned processes @@ -723,6 +842,12 @@ ipc_thread_reset( if (old_kport != IP_NULL) { ipc_port_dealloc_kernel(old_kport); } + if (old_rdport != IP_NULL) { + ipc_port_dealloc_kernel(old_rdport); + } + if (old_iport != IP_NULL) { + ipc_port_dealloc_kernel(old_iport); + } /* unbind the thread special reply port */ if (IP_VALID(thread->ith_special_reply_port)) { @@ -752,9 +877,9 @@ retrieve_task_self_fast( assert(task == current_task()); itk_lock(task); - assert(task->itk_self != IP_NULL); + assert(task->itk_self[TASK_FLAVOR_CONTROL] != IP_NULL); - if ((port = task->itk_sself) == task->itk_self) { + if ((port = task->itk_settable_self) == task->itk_self[TASK_FLAVOR_CONTROL]) { /* no interposing */ sright = ipc_port_copy_send(port); assert(sright == port); @@ -789,9 +914,9 @@ retrieve_thread_self_fast( thread_mtx_lock(thread); - assert(thread->ith_self != IP_NULL); + assert(thread->ith_self[THREAD_FLAVOR_CONTROL] != IP_NULL); - if ((port = thread->ith_sself) == thread->ith_self) { + if ((port = thread->ith_settable_self) == thread->ith_self[THREAD_FLAVOR_CONTROL]) { /* no interposing */ sright = ipc_port_copy_send(port); assert(sright == port); @@ -1000,37 +1125,135 @@ ipc_port_unbind_special_reply_port( kern_return_t thread_get_special_port( - thread_t thread, - int which, - ipc_port_t *portp) + thread_inspect_t thread, + int which, + ipc_port_t *portp); + +kern_return_t +static +thread_get_special_port_internal( + thread_inspect_t thread, + int which, + ipc_port_t *portp, + mach_thread_flavor_t flavor) { - kern_return_t result = KERN_SUCCESS; - ipc_port_t *whichp; + kern_return_t kr; + ipc_port_t port; if (thread == THREAD_NULL) { return KERN_INVALID_ARGUMENT; } + if ((kr = port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) { + return kr; + } + + thread_mtx_lock(thread); + if (!thread->active) { + thread_mtx_unlock(thread); + return KERN_FAILURE; + } + switch (which) { case THREAD_KERNEL_PORT: - whichp = &thread->ith_sself; + port = ipc_port_copy_send(thread->ith_settable_self); + thread_mtx_unlock(thread); + break; + + case THREAD_READ_PORT: + case THREAD_INSPECT_PORT: + thread_mtx_unlock(thread); + mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ? + THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT; + /* convert_thread_to_port_with_flavor consumes a thread reference */ + thread_reference(thread); + port = convert_thread_to_port_with_flavor(thread, current_flavor); break; default: + thread_mtx_unlock(thread); return KERN_INVALID_ARGUMENT; } - thread_mtx_lock(thread); + *portp = port; - if (thread->active) { - *portp = ipc_port_copy_send(*whichp); - } else { - result = KERN_FAILURE; + return KERN_SUCCESS; +} + +kern_return_t +thread_get_special_port( + thread_inspect_t thread, + int which, + ipc_port_t *portp) +{ + return thread_get_special_port_internal(thread, which, portp, THREAD_FLAVOR_CONTROL); +} + +kern_return_t +thread_get_special_port_from_user( + mach_port_t port, + int which, + ipc_port_t *portp) +{ + ipc_kobject_type_t kotype; + kern_return_t kr; + + thread_t thread = convert_port_to_thread_check_type(port, &kotype, THREAD_FLAVOR_INSPECT, FALSE); + + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; } - thread_mtx_unlock(thread); + switch (kotype) { + case IKOT_THREAD_CONTROL: + kr = thread_get_special_port_internal(thread, which, portp, THREAD_FLAVOR_CONTROL); + break; + case IKOT_THREAD_READ: + kr = thread_get_special_port_internal(thread, which, portp, THREAD_FLAVOR_READ); + break; + case IKOT_THREAD_INSPECT: + kr = thread_get_special_port_internal(thread, which, portp, THREAD_FLAVOR_INSPECT); + break; + default: + panic("strange kobject type"); + break; + } - return result; + thread_deallocate(thread); + return kr; +} + +static kern_return_t +port_allowed_with_thread_flavor( + int which, + mach_thread_flavor_t flavor) +{ + switch (flavor) { + case THREAD_FLAVOR_CONTROL: + return KERN_SUCCESS; + + case THREAD_FLAVOR_READ: + + switch (which) { + case THREAD_READ_PORT: + case THREAD_INSPECT_PORT: + return KERN_SUCCESS; + default: + return KERN_INVALID_CAPABILITY; + } + + case THREAD_FLAVOR_INSPECT: + + switch (which) { + case THREAD_INSPECT_PORT: + return KERN_SUCCESS; + default: + return KERN_INVALID_CAPABILITY; + } + + default: + return KERN_INVALID_CAPABILITY; + } } /* @@ -1042,10 +1265,11 @@ thread_get_special_port( * Nothing locked. If successful, consumes * the supplied send right. * Returns: - * KERN_SUCCESS Changed the special port. - * KERN_INVALID_ARGUMENT The thread is null. - * KERN_FAILURE The thread is dead. - * KERN_INVALID_ARGUMENT Invalid special port. + * KERN_SUCCESS Changed the special port. + * KERN_INVALID_ARGUMENT The thread is null. + * KERN_FAILURE The thread is dead. + * KERN_INVALID_ARGUMENT Invalid special port. + * KERN_NO_ACCESS Restricted access to set port. */ kern_return_t @@ -1063,7 +1287,17 @@ thread_set_special_port( switch (which) { case THREAD_KERNEL_PORT: - whichp = &thread->ith_sself; +#if CONFIG_CSR + if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) { + /* + * Only allow setting of thread-self + * special port from user-space when SIP is + * disabled (for Mach-on-Mach emulation). + */ + return KERN_NO_ACCESS; + } +#endif + whichp = &thread->ith_settable_self; break; default: @@ -1106,27 +1340,49 @@ kern_return_t task_get_special_port( task_t task, int which, - ipc_port_t *portp) + ipc_port_t *portp); + +static kern_return_t +task_get_special_port_internal( + task_t task, + int which, + ipc_port_t *portp, + mach_task_flavor_t flavor) { + kern_return_t kr; ipc_port_t port; if (task == TASK_NULL) { return KERN_INVALID_ARGUMENT; } + if ((kr = port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) { + return kr; + } + itk_lock(task); - if (task->itk_self == IP_NULL) { + if (task->itk_self[TASK_FLAVOR_CONTROL] == IP_NULL) { itk_unlock(task); return KERN_FAILURE; } switch (which) { case TASK_KERNEL_PORT: - port = ipc_port_copy_send(task->itk_sself); + port = ipc_port_copy_send(task->itk_settable_self); break; + case TASK_READ_PORT: + case TASK_INSPECT_PORT: + itk_unlock(task); + mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ? + TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT; + /* convert_task_to_port_with_flavor consumes a task reference */ + task_reference(task); + port = convert_task_to_port_with_flavor(task, current_flavor); + goto copyout; + case TASK_NAME_PORT: - port = ipc_port_make_send(task->itk_nself); + port = ipc_port_make_send(task->itk_self[TASK_FLAVOR_NAME]); break; case TASK_HOST_PORT: @@ -1153,12 +1409,92 @@ task_get_special_port( itk_unlock(task); return KERN_INVALID_ARGUMENT; } + itk_unlock(task); +copyout: *portp = port; return KERN_SUCCESS; } +kern_return_t +task_get_special_port( + task_t task, + int which, + ipc_port_t *portp) +{ + return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL); +} + +kern_return_t +task_get_special_port_from_user( + mach_port_t port, + int which, + ipc_port_t *portp) +{ + ipc_kobject_type_t kotype; + kern_return_t kr; + + task_t task = convert_port_to_task_check_type(port, &kotype, TASK_FLAVOR_INSPECT, FALSE); + + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } + + switch (kotype) { + case IKOT_TASK_CONTROL: + kr = task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL); + break; + case IKOT_TASK_READ: + kr = task_get_special_port_internal(task, which, portp, TASK_FLAVOR_READ); + break; + case IKOT_TASK_INSPECT: + kr = task_get_special_port_internal(task, which, portp, TASK_FLAVOR_INSPECT); + break; + default: + panic("strange kobject type"); + break; + } + + task_deallocate(task); + return kr; +} + +static kern_return_t +port_allowed_with_task_flavor( + int which, + mach_task_flavor_t flavor) +{ + switch (flavor) { + case TASK_FLAVOR_CONTROL: + return KERN_SUCCESS; + + case TASK_FLAVOR_READ: + + switch (which) { + case TASK_READ_PORT: + case TASK_INSPECT_PORT: + case TASK_NAME_PORT: + return KERN_SUCCESS; + default: + return KERN_INVALID_CAPABILITY; + } + + case TASK_FLAVOR_INSPECT: + + switch (which) { + case TASK_INSPECT_PORT: + case TASK_NAME_PORT: + return KERN_SUCCESS; + default: + return KERN_INVALID_CAPABILITY; + } + + default: + return KERN_INVALID_CAPABILITY; + } +} + /* * Routine: task_set_special_port [kernel call] * Purpose: @@ -1232,71 +1568,74 @@ task_set_special_port_internal( int which, ipc_port_t port) { - ipc_port_t *whichp; - ipc_port_t old; + ipc_port_t old = IP_NULL; + kern_return_t rc = KERN_INVALID_ARGUMENT; if (task == TASK_NULL) { - return KERN_INVALID_ARGUMENT; + goto out; + } + + itk_lock(task); + if (task->itk_self[TASK_FLAVOR_CONTROL] == IP_NULL) { + rc = KERN_FAILURE; + goto out_unlock; } switch (which) { case TASK_KERNEL_PORT: - whichp = &task->itk_sself; + old = task->itk_settable_self; + task->itk_settable_self = port; break; case TASK_HOST_PORT: - whichp = &task->itk_host; + old = task->itk_host; + task->itk_host = port; break; case TASK_BOOTSTRAP_PORT: - whichp = &task->itk_bootstrap; + old = task->itk_bootstrap; + task->itk_bootstrap = port; break; + /* Never allow overwrite of seatbelt port */ case TASK_SEATBELT_PORT: - whichp = &task->itk_seatbelt; + if (IP_VALID(task->itk_seatbelt)) { + rc = KERN_NO_ACCESS; + goto out_unlock; + } + task->itk_seatbelt = port; break; + /* Never allow overwrite of the task access port */ case TASK_ACCESS_PORT: - whichp = &task->itk_task_access; + if (IP_VALID(task->itk_task_access)) { + rc = KERN_NO_ACCESS; + goto out_unlock; + } + task->itk_task_access = port; break; case TASK_DEBUG_CONTROL_PORT: - whichp = &task->itk_debug_control; + old = task->itk_debug_control; + task->itk_debug_control = port; break; default: - return KERN_INVALID_ARGUMENT; + rc = KERN_INVALID_ARGUMENT; + goto out_unlock; }/* switch */ - itk_lock(task); - if (task->itk_self == IP_NULL) { - itk_unlock(task); - return KERN_FAILURE; - } - - /* Never allow overwrite of seatbelt, or task access ports */ - switch (which) { - case TASK_SEATBELT_PORT: - case TASK_ACCESS_PORT: - if (IP_VALID(*whichp)) { - itk_unlock(task); - return KERN_NO_ACCESS; - } - break; - default: - break; - } + rc = KERN_SUCCESS; - old = *whichp; - *whichp = port; +out_unlock: itk_unlock(task); if (IP_VALID(old)) { ipc_port_release_send(old); } - return KERN_SUCCESS; +out: + return rc; } - /* * Routine: mach_ports_register [kernel call] * Purpose: @@ -1344,7 +1683,7 @@ mach_ports_register( } itk_lock(task); - if (task->itk_self == IP_NULL) { + if (task->itk_self[TASK_FLAVOR_CONTROL] == IP_NULL) { itk_unlock(task); return KERN_INVALID_ARGUMENT; } @@ -1420,7 +1759,7 @@ mach_ports_lookup( } itk_lock(task); - if (task->itk_self == IP_NULL) { + if (task->itk_self[TASK_FLAVOR_CONTROL] == IP_NULL) { itk_unlock(task); kfree(memory, size); @@ -1445,8 +1784,6 @@ mach_ports_lookup( return KERN_SUCCESS; } -extern zone_t task_zone; - kern_return_t task_conversion_eval(task_t caller, task_t victim) { @@ -1470,11 +1807,11 @@ task_conversion_eval(task_t caller, task_t victim) return KERN_INVALID_SECURITY; } - zone_require(victim, task_zone); + task_require(victim); -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) /* - * On embedded platforms, only a platform binary can resolve the task port + * On platforms other than macOS, only a platform binary can resolve the task port * of another platform binary. */ if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) { @@ -1488,7 +1825,7 @@ task_conversion_eval(task_t caller, task_t victim) } #endif /* SECURE_KERNEL */ } -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ return KERN_SUCCESS; } @@ -1503,7 +1840,7 @@ task_conversion_eval(task_t caller, task_t victim) * Nothing locked, blocking OK. */ task_t -convert_port_to_locked_task(ipc_port_t port) +convert_port_to_locked_task(ipc_port_t port, boolean_t eval) { int try_failed_count = 0; @@ -1512,14 +1849,14 @@ convert_port_to_locked_task(ipc_port_t port) task_t task; ip_lock(port); - if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) { + if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL)) { ip_unlock(port); return TASK_NULL; } task = (task_t) ip_get_kobject(port); assert(task != TASK_NULL); - if (task_conversion_eval(ct, task)) { + if (eval && task_conversion_eval(ct, task)) { ip_unlock(port); return TASK_NULL; } @@ -1558,7 +1895,9 @@ convert_port_to_locked_task_inspect(ipc_port_t port) task_inspect_t task; ip_lock(port); - if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) { + if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL && + ip_kotype(port) != IKOT_TASK_READ && + ip_kotype(port) != IKOT_TASK_INSPECT)) { ip_unlock(port); return TASK_INSPECT_NULL; } @@ -1580,28 +1919,70 @@ convert_port_to_locked_task_inspect(ipc_port_t port) return TASK_INSPECT_NULL; } +/* + * Routine: convert_port_to_locked_task_read + * Purpose: + * Internal helper routine to convert from a port to a locked + * task read right. Used by internal routines that try to convert from a + * task read port to a reference on some task related object. + * Conditions: + * Nothing locked, blocking OK. + */ +task_read_t +convert_port_to_locked_task_read(ipc_port_t port) +{ + int try_failed_count = 0; + + while (IP_VALID(port)) { + task_read_t task; + + ip_lock(port); + if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL && + ip_kotype(port) != IKOT_TASK_READ)) { + ip_unlock(port); + return TASK_READ_NULL; + } + task = (task_read_t)port->ip_kobject; + assert(task != TASK_READ_NULL); + /* + * Normal lock ordering puts task_lock() before ip_lock(). + * Attempt out-of-order locking here. + */ + if (task_lock_try((task_t)task)) { + ip_unlock(port); + return task; + } + try_failed_count++; + + ip_unlock(port); + mutex_pause(try_failed_count); + } + return TASK_READ_NULL; +} + static task_t convert_port_to_task_locked( ipc_port_t port, - uint32_t *exec_token) + uint32_t *exec_token, + boolean_t eval) { task_t task = TASK_NULL; ip_lock_held(port); require_ip_active(port); - if (ip_kotype(port) == IKOT_TASK) { - task_t ct = current_task(); + if (ip_kotype(port) == IKOT_TASK_CONTROL) { task = (task_t) ip_get_kobject(port); assert(task != TASK_NULL); - if (task_conversion_eval(ct, task)) { + if (eval && task_conversion_eval(current_task(), task)) { return TASK_NULL; } if (exec_token) { *exec_token = task->exec_token; } + task_reference_internal(task); } @@ -1621,14 +2002,15 @@ convert_port_to_task_locked( task_t convert_port_to_task_with_exec_token( ipc_port_t port, - uint32_t *exec_token) + uint32_t *exec_token, + boolean_t eval) { task_t task = TASK_NULL; if (IP_VALID(port)) { ip_lock(port); if (ip_active(port)) { - task = convert_port_to_task_locked(port, exec_token); + task = convert_port_to_task_locked(port, exec_token, eval); } ip_unlock(port); } @@ -1649,43 +2031,126 @@ task_t convert_port_to_task( ipc_port_t port) { - return convert_port_to_task_with_exec_token(port, NULL); + return convert_port_to_task_with_exec_token(port, NULL, TRUE); } - /* - * Routine: convert_port_to_task_name + * Routine: convert_port_to_task_no_eval * Purpose: - * Convert from a port to a task name. - * Doesn't consume the port ref; produces a task name ref, + * Convert from a port to a task, skips task_conversion_eval. + * Doesn't consume the port ref; produces a task ref, * which may be null. * Conditions: * Nothing locked. */ -task_name_t -convert_port_to_task_name( +static task_t +convert_port_to_task_no_eval( ipc_port_t port) { - task_name_t task = TASK_NULL; + return convert_port_to_task_with_exec_token(port, NULL, FALSE); +} + +/* + * Routine: convert_port_to_task_name + * Purpose: + * Convert from a port to a task name. + * Doesn't consume the port ref; produces a task name ref, + * which may be null. + * Conditions: + * Nothing locked. + */ + +static task_name_t +convert_port_to_task_name_locked( + ipc_port_t port) +{ + task_name_t task = TASK_NAME_NULL; + + ip_lock_held(port); + require_ip_active(port); + + if (ip_kotype(port) == IKOT_TASK_CONTROL || + ip_kotype(port) == IKOT_TASK_READ || + ip_kotype(port) == IKOT_TASK_INSPECT || + ip_kotype(port) == IKOT_TASK_NAME) { + task = (task_name_t) ip_get_kobject(port); + assert(task != TASK_NAME_NULL); + + task_reference_internal(task); + } + + return task; +} + +task_name_t +convert_port_to_task_name( + ipc_port_t port) +{ + task_name_t task = TASK_NULL; if (IP_VALID(port)) { ip_lock(port); + if (ip_active(port)) { + task = convert_port_to_task_name_locked(port); + } + ip_unlock(port); + } - if (ip_active(port) && - (ip_kotype(port) == IKOT_TASK || - ip_kotype(port) == IKOT_TASK_NAME)) { - task = (task_name_t) ip_get_kobject(port); - assert(task != TASK_NAME_NULL); + return task; +} - task_reference_internal(task); - } +/* + * Routine: convert_port_to_task_policy + * Purpose: + * Convert from a port to a task. + * Doesn't consume the port ref; produces a task ref, + * which may be null. + * If the port is being used with task_port_set(), any task port + * type other than TASK_CONTROL requires an entitlement. If the + * port is being used with task_port_get(), TASK_NAME requires an + * entitlement. + * Conditions: + * Nothing locked. + */ +static task_t +convert_port_to_task_policy(ipc_port_t port, boolean_t set) +{ + task_t task = TASK_NULL; + task_t ctask = current_task(); - ip_unlock(port); + if (!IP_VALID(port)) { + return TASK_NULL; + } + + task = set ? + convert_port_to_task(port) : + convert_port_to_task_inspect(port); + + if (task == TASK_NULL && + IOTaskHasEntitlement(ctask, "com.apple.private.task_policy")) { + task = convert_port_to_task_name(port); + } + + if (task_conversion_eval(ctask, task) != KERN_SUCCESS) { + task_deallocate(task); + return TASK_NULL; } return task; } +task_policy_set_t +convert_port_to_task_policy_set(ipc_port_t port) +{ + return convert_port_to_task_policy(port, true); +} + +task_policy_get_t +convert_port_to_task_policy_get(ipc_port_t port) +{ + return convert_port_to_task_policy(port, false); +} + static task_inspect_t convert_port_to_task_inspect_locked( ipc_port_t port) @@ -1695,7 +2160,9 @@ convert_port_to_task_inspect_locked( ip_lock_held(port); require_ip_active(port); - if (ip_kotype(port) == IKOT_TASK) { + if (ip_kotype(port) == IKOT_TASK_CONTROL || + ip_kotype(port) == IKOT_TASK_READ || + ip_kotype(port) == IKOT_TASK_INSPECT) { task = (task_inspect_t) ip_get_kobject(port); assert(task != TASK_INSPECT_NULL); @@ -1705,6 +2172,246 @@ convert_port_to_task_inspect_locked( return task; } +static task_read_t +convert_port_to_task_read_locked( + ipc_port_t port) +{ + task_read_t task = TASK_READ_NULL; + + ip_lock_held(port); + require_ip_active(port); + + if (ip_kotype(port) == IKOT_TASK_CONTROL || + ip_kotype(port) == IKOT_TASK_READ) { + task_t ct = current_task(); + task = (task_t)port->ip_kobject; + + assert(task != TASK_READ_NULL); + + if (task_conversion_eval(ct, task)) { + return TASK_READ_NULL; + } + + task_reference_internal(task); + } + + return task; +} + +/* + * Routine: convert_port_to_task_check_type + * Purpose: + * Convert from a port to a task based on port's type. + * Doesn't consume the port ref; produces a task ref, + * which may be null. + * Arguments: + * port: The port that we do conversion on + * kotype: Returns the IKOT_TYPE of the port, if translation succeeded + * at_most: The lowest capability flavor allowed. In mach_task_flavor_t, + * the higher the flavor number, the lesser the capability, hence the name. + * eval_check: Whether to run task_conversion_eval check during the conversion. + * For backward compatibility, some interfaces does not run conversion + * eval on IKOT_TASK_CONTROL. + * Conditions: + * Nothing locked. + * Returns: + * task_t and port's type, if translation succeeded; + * TASK_NULL and IKOT_NONE, if translation failed + */ +task_t +convert_port_to_task_check_type( + ipc_port_t port, + ipc_kobject_type_t *kotype, + mach_task_flavor_t at_most, + boolean_t eval_check) +{ + task_t task = TASK_NULL; + ipc_kobject_type_t type = IKOT_NONE; + + if (!IP_VALID(port) || !ip_active(port)) { + goto out; + } + + switch (ip_kotype(port)) { + case IKOT_TASK_CONTROL: + task = eval_check ? convert_port_to_task(port) : convert_port_to_task_no_eval(port); + if (task != TASK_NULL) { + type = IKOT_TASK_CONTROL; + } + break; + case IKOT_TASK_READ: + if (at_most >= TASK_FLAVOR_READ) { + task = convert_port_to_task_read(port); + if (task != TASK_READ_NULL) { + type = IKOT_TASK_READ; + } + } + break; + case IKOT_TASK_INSPECT: + if (at_most >= TASK_FLAVOR_INSPECT) { + task = convert_port_to_task_inspect(port); + if (task != TASK_INSPECT_NULL) { + type = IKOT_TASK_INSPECT; + } + } + break; + case IKOT_TASK_NAME: + if (at_most >= TASK_FLAVOR_NAME) { + task = convert_port_to_task_name(port); + if (task != TASK_NAME_NULL) { + type = IKOT_TASK_NAME; + } + } + break; + default: + break; + } + +out: + if (kotype) { + *kotype = type; + } + return task; +} + +/* + * Routine: convert_port_to_thread_check_type + * Purpose: + * Convert from a port to a thread based on port's type. + * Doesn't consume the port ref; produces a thread ref, + * which may be null. + * This conversion routine is _ONLY_ supposed to be used + * by thread_get_special_port. + * Arguments: + * port: The port that we do conversion on + * kotype: Returns the IKOT_TYPE of the port, if translation succeeded + * at_most: The lowest capability flavor allowed. In mach_thread_flavor_t, + * the higher the flavor number, the lesser the capability, hence the name. + * eval_check: Whether to run task_conversion_eval check during the conversion. + * For backward compatibility, some interfaces do not run + * conversion eval on IKOT_THREAD_CONTROL. + * Conditions: + * Nothing locked. + * Returns: + * thread_t and port's type, if translation succeeded; + * THREAD_NULL and IKOT_NONE, if translation failed + */ +thread_t +convert_port_to_thread_check_type( + ipc_port_t port, + ipc_kobject_type_t *kotype, + mach_thread_flavor_t at_most, + boolean_t eval_check) +{ + thread_t thread = THREAD_NULL; + ipc_kobject_type_t type = IKOT_NONE; + + if (!IP_VALID(port) || !ip_active(port)) { + goto out; + } + + switch (ip_kotype(port)) { + case IKOT_THREAD_CONTROL: + thread = eval_check ? convert_port_to_thread(port) : convert_port_to_thread_no_eval(port); + if (thread != THREAD_NULL) { + type = IKOT_THREAD_CONTROL; + } + break; + case IKOT_THREAD_READ: + if (at_most >= THREAD_FLAVOR_READ) { + thread = convert_port_to_thread_read(port); + if (thread != THREAD_READ_NULL) { + type = IKOT_THREAD_READ; + } + } + break; + case IKOT_THREAD_INSPECT: + if (at_most >= THREAD_FLAVOR_INSPECT) { + thread = convert_port_to_thread_inspect(port); + if (thread != THREAD_INSPECT_NULL) { + type = IKOT_THREAD_INSPECT; + } + } + break; + default: + break; + } + +out: + if (kotype) { + *kotype = type; + } + return thread; +} + +/* + * Routine: convert_port_to_space_check_type + * Purpose: + * Convert from a port to a space based on port's type. + * Doesn't consume the port ref; produces a space ref, + * which may be null. + * Arguments: + * port: The port that we do conversion on + * kotype: Returns the IKOT_TYPE of the port, if translation succeeded + * at_most: The lowest capability flavor allowed. In mach_task_flavor_t, + * the higher the flavor number, the lesser the capability, hence the name. + * eval_check: Whether to run task_conversion_eval check during the conversion. + * For backward compatibility, some interfaces do not run + * conversion eval on IKOT_TASK_CONTROL. + * Conditions: + * Nothing locked. + * Returns: + * ipc_space_t and port's type, if translation succeeded; + * IPC_SPACE_NULL and IKOT_NONE, if translation failed + */ +ipc_space_t +convert_port_to_space_check_type( + ipc_port_t port, + ipc_kobject_type_t *kotype, + mach_task_flavor_t at_most, + boolean_t eval_check) +{ + ipc_space_t space = IPC_SPACE_NULL; + ipc_kobject_type_t type = IKOT_NONE; + + if (!IP_VALID(port) || !ip_active(port)) { + goto out; + } + + switch (ip_kotype(port)) { + case IKOT_TASK_CONTROL: + space = eval_check ? convert_port_to_space(port) : convert_port_to_space_no_eval(port); + if (space != IPC_SPACE_NULL) { + type = IKOT_TASK_CONTROL; + } + break; + case IKOT_TASK_READ: + if (at_most >= TASK_FLAVOR_READ) { + space = convert_port_to_space_read(port); + if (space != IPC_SPACE_READ_NULL) { + type = IKOT_TASK_READ; + } + } + break; + case IKOT_TASK_INSPECT: + if (at_most >= TASK_FLAVOR_INSPECT) { + space = convert_port_to_space_inspect(port); + if (space != IPC_SPACE_INSPECT_NULL) { + type = IKOT_TASK_INSPECT; + } + } + break; + default: + break; + } + +out: + if (kotype) { + *kotype = type; + } + return space; +} + /* * Routine: convert_port_to_task_inspect * Purpose: @@ -1731,6 +2438,32 @@ convert_port_to_task_inspect( return task; } +/* + * Routine: convert_port_to_task_read + * Purpose: + * Convert from a port to a task read right + * Doesn't consume the port ref; produces a task ref, + * which may be null. + * Conditions: + * Nothing locked. + */ +task_read_t +convert_port_to_task_read( + ipc_port_t port) +{ + task_read_t task = TASK_READ_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port)) { + task = convert_port_to_task_read_locked(port); + } + ip_unlock(port); + } + + return task; +} + /* * Routine: convert_port_to_task_suspension_token * Purpose: @@ -1764,7 +2497,7 @@ convert_port_to_task_suspension_token( } /* - * Routine: convert_port_to_space + * Routine: convert_port_to_space_with_flavor * Purpose: * Convert from a port to a space. * Doesn't consume the port ref; produces a space ref, @@ -1772,14 +2505,29 @@ convert_port_to_task_suspension_token( * Conditions: * Nothing locked. */ -ipc_space_t -convert_port_to_space( - ipc_port_t port) +static ipc_space_t +convert_port_to_space_with_flavor( + ipc_port_t port, + mach_task_flavor_t flavor, + boolean_t eval) { ipc_space_t space; task_t task; - task = convert_port_to_locked_task(port); + switch (flavor) { + case TASK_FLAVOR_CONTROL: + task = convert_port_to_locked_task(port, eval); + break; + case TASK_FLAVOR_READ: + task = convert_port_to_locked_task_read(port); + break; + case TASK_FLAVOR_INSPECT: + task = convert_port_to_locked_task_inspect(port); + break; + default: + task = TASK_NULL; + break; + } if (task == TASK_NULL) { return IPC_SPACE_NULL; @@ -1796,41 +2544,36 @@ convert_port_to_space( return space; } -/* - * Routine: convert_port_to_space_inspect - * Purpose: - * Convert from a port to a space inspect right. - * Doesn't consume the port ref; produces a space inspect ref, - * which may be null. - * Conditions: - * Nothing locked. - */ -ipc_space_inspect_t -convert_port_to_space_inspect( +ipc_space_t +convert_port_to_space( ipc_port_t port) { - ipc_space_inspect_t space; - task_inspect_t task; - - task = convert_port_to_locked_task_inspect(port); + return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL, TRUE); +} - if (task == TASK_INSPECT_NULL) { - return IPC_SPACE_INSPECT_NULL; - } +static ipc_space_t +convert_port_to_space_no_eval( + ipc_port_t port) +{ + return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL, FALSE); +} - if (!task->active) { - task_unlock(task); - return IPC_SPACE_INSPECT_NULL; - } +ipc_space_read_t +convert_port_to_space_read( + ipc_port_t port) +{ + return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ, TRUE); +} - space = (ipc_space_inspect_t)task->itk_space; - is_reference((ipc_space_t)space); - task_unlock((task_t)task); - return space; +ipc_space_inspect_t +convert_port_to_space_inspect( + ipc_port_t port) +{ + return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT, TRUE); } /* - * Routine: convert_port_to_map + * Routine: convert_port_to_map_with_flavor * Purpose: * Convert from a port to a map. * Doesn't consume the port ref; produces a map ref, @@ -1839,14 +2582,28 @@ convert_port_to_space_inspect( * Nothing locked. */ -vm_map_t -convert_port_to_map( - ipc_port_t port) +static vm_map_t +convert_port_to_map_with_flavor( + ipc_port_t port, + mach_task_flavor_t flavor) { task_t task; vm_map_t map; - task = convert_port_to_locked_task(port); + switch (flavor) { + case TASK_FLAVOR_CONTROL: + task = convert_port_to_locked_task(port, TRUE); + break; + case TASK_FLAVOR_READ: + task = convert_port_to_locked_task_read(port); + break; + case TASK_FLAVOR_INSPECT: + task = convert_port_to_locked_task_inspect(port); + break; + default: + task = TASK_NULL; + break; + } if (task == TASK_NULL) { return VM_MAP_NULL; @@ -1858,11 +2615,45 @@ convert_port_to_map( } map = task->map; + if (map->pmap == kernel_pmap) { + if (flavor == TASK_FLAVOR_CONTROL) { + panic("userspace has control access to a " + "kernel map %p through task %p", map, task); + } + if (task != kernel_task) { + panic("userspace has access to a " + "kernel map %p through task %p", map, task); + } + } else { + pmap_require(map->pmap); + } + vm_map_reference_swap(map); task_unlock(task); return map; } +vm_map_read_t +convert_port_to_map( + ipc_port_t port) +{ + return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL); +} + +vm_map_read_t +convert_port_to_map_read( + ipc_port_t port) +{ + return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ); +} + +vm_map_inspect_t +convert_port_to_map_inspect( + ipc_port_t port) +{ + return convert_port_to_map_with_flavor(port, TASK_FLAVOR_INSPECT); +} + /* * Routine: convert_port_to_thread @@ -1877,14 +2668,15 @@ convert_port_to_map( static thread_t convert_port_to_thread_locked( ipc_port_t port, - port_to_thread_options_t options) + port_to_thread_options_t options, + boolean_t eval) { thread_t thread = THREAD_NULL; ip_lock_held(port); require_ip_active(port); - if (ip_kotype(port) == IKOT_THREAD) { + if (ip_kotype(port) == IKOT_THREAD_CONTROL) { thread = (thread_t) ip_get_kobject(port); assert(thread != THREAD_NULL); @@ -1900,7 +2692,7 @@ convert_port_to_thread_locked( } } else { /* Use task conversion rules for thread control conversions */ - if (task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) { + if (eval && task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) { return THREAD_NULL; } } @@ -1915,12 +2707,126 @@ thread_t convert_port_to_thread( ipc_port_t port) { - thread_t thread = THREAD_NULL; + thread_t thread = THREAD_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port)) { + thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE, TRUE); + } + ip_unlock(port); + } + + return thread; +} + +static thread_t +convert_port_to_thread_no_eval( + ipc_port_t port) +{ + thread_t thread = THREAD_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port)) { + thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE, FALSE); + } + ip_unlock(port); + } + + return thread; +} + +/* + * Routine: convert_port_to_thread_inspect + * Purpose: + * Convert from a port to a thread inspect right + * Doesn't consume the port ref; produces a thread ref, + * which may be null. + * Conditions: + * Nothing locked. + */ +static thread_inspect_t +convert_port_to_thread_inspect_locked( + ipc_port_t port) +{ + thread_inspect_t thread = THREAD_INSPECT_NULL; + + ip_lock_held(port); + require_ip_active(port); + + if (ip_kotype(port) == IKOT_THREAD_CONTROL || + ip_kotype(port) == IKOT_THREAD_READ || + ip_kotype(port) == IKOT_THREAD_INSPECT) { + thread = (thread_inspect_t)port->ip_kobject; + assert(thread != THREAD_INSPECT_NULL); + thread_reference_internal((thread_t)thread); + } + + return thread; +} + +thread_inspect_t +convert_port_to_thread_inspect( + ipc_port_t port) +{ + thread_inspect_t thread = THREAD_INSPECT_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port)) { + thread = convert_port_to_thread_inspect_locked(port); + } + ip_unlock(port); + } + + return thread; +} + +/* + * Routine: convert_port_to_thread_read + * Purpose: + * Convert from a port to a thread read right + * Doesn't consume the port ref; produces a thread ref, + * which may be null. + * Conditions: + * Nothing locked. + */ +static thread_read_t +convert_port_to_thread_read_locked( + ipc_port_t port) +{ + thread_read_t thread = THREAD_READ_NULL; + + ip_lock_held(port); + require_ip_active(port); + + if (ip_kotype(port) == IKOT_THREAD_CONTROL || + ip_kotype(port) == IKOT_THREAD_READ) { + thread = (thread_read_t) ip_get_kobject(port); + assert(thread != THREAD_READ_NULL); + + /* Use task conversion rules for thread control conversions */ + if (task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) { + return THREAD_READ_NULL; + } + + thread_reference_internal((thread_t)thread); + } + + return thread; +} + +thread_read_t +convert_port_to_thread_read( + ipc_port_t port) +{ + thread_read_t thread = THREAD_READ_NULL; if (IP_VALID(port)) { ip_lock(port); if (ip_active(port)) { - thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE); + thread = convert_port_to_thread_read_locked(port); } ip_unlock(port); } @@ -1928,59 +2834,80 @@ convert_port_to_thread( return thread; } + /* - * Routine: convert_port_to_thread_inspect + * Routine: convert_thread_to_port_with_flavor * Purpose: - * Convert from a port to a thread inspection right - * Doesn't consume the port ref; produces a thread ref, - * which may be null. + * Convert from a thread to a port of given flavor. + * Consumes a thread ref; produces a naked send right + * which may be invalid. * Conditions: * Nothing locked. */ -thread_inspect_t -convert_port_to_thread_inspect( - ipc_port_t port) +static ipc_port_t +convert_thread_to_port_with_flavor( + thread_t thread, + mach_thread_flavor_t flavor) { - thread_inspect_t thread = THREAD_INSPECT_NULL; + ipc_port_t port = IP_NULL; - if (IP_VALID(port)) { - ip_lock(port); + thread_mtx_lock(thread); - if (ip_active(port) && - ip_kotype(port) == IKOT_THREAD) { - thread = (thread_inspect_t) ip_get_kobject(port); - assert(thread != THREAD_INSPECT_NULL); - thread_reference_internal((thread_t)thread); + if (thread->ith_self[THREAD_FLAVOR_CONTROL] == IP_NULL) { + goto exit; + } + + if (flavor == THREAD_FLAVOR_CONTROL) { + port = ipc_port_make_send(thread->ith_self[flavor]); + } else { + if (!thread->active) { + goto exit; } - ip_unlock(port); + ipc_kobject_type_t kotype = (flavor == THREAD_FLAVOR_READ) ? IKOT_THREAD_READ : IKOT_THREAD_INSPECT; + /* + * Claim a send right on the thread read/inspect port, and request a no-senders + * notification on that port (if none outstanding). A thread reference is not + * donated here even though the ports are created lazily because it doesn't own the + * kobject that it points to. Threads manage their lifetime explicitly and + * have to synchronize with each other, between the task/thread terminating and the + * send-once notification firing, and this is done under the thread mutex + * rather than with atomics. + */ + (void)ipc_kobject_make_send_lazy_alloc_port(&thread->ith_self[flavor], (ipc_kobject_t)thread, + kotype, false, 0); + port = thread->ith_self[flavor]; } - return thread; +exit: + thread_mtx_unlock(thread); + thread_deallocate(thread); + return port; } -/* - * Routine: convert_thread_inspect_to_port - * Purpose: - * Convert from a thread inspect reference to a port. - * Consumes a thread ref; - * As we never export thread inspect ports, always - * creates a NULL port. - * Conditions: - * Nothing locked. - */ +ipc_port_t +convert_thread_to_port( + thread_t thread) +{ + return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_CONTROL); +} + +ipc_port_t +convert_thread_read_to_port(thread_read_t thread) +{ + return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_READ); +} ipc_port_t convert_thread_inspect_to_port(thread_inspect_t thread) { - thread_deallocate(thread); - return IP_NULL; + return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_INSPECT); } /* * Routine: port_name_to_thread * Purpose: - * Convert from a port name to an thread reference + * Convert from a port name to a thread reference * A name of MACH_PORT_NULL is valid for the null thread. * Conditions: * Nothing locked. @@ -1997,7 +2924,7 @@ port_name_to_thread( if (MACH_PORT_VALID(name)) { kr = ipc_port_translate_send(current_space(), name, &kport); if (kr == KERN_SUCCESS) { - thread = convert_port_to_thread_locked(kport, options); + thread = convert_port_to_thread_locked(kport, options, TRUE); ip_unlock(kport); } } @@ -2005,6 +2932,14 @@ port_name_to_thread( return thread; } +/* + * Routine: port_name_to_task + * Purpose: + * Convert from a port name to a task reference + * A name of MACH_PORT_NULL is valid for the null task. + * Conditions: + * Nothing locked. + */ task_t port_name_to_task( mach_port_name_t name) @@ -2016,13 +2951,84 @@ port_name_to_task( if (MACH_PORT_VALID(name)) { kr = ipc_port_translate_send(current_space(), name, &kport); if (kr == KERN_SUCCESS) { - task = convert_port_to_task_locked(kport, NULL); + task = convert_port_to_task_locked(kport, NULL, TRUE); ip_unlock(kport); } } return task; } +/* + * Routine: port_name_to_task_read + * Purpose: + * Convert from a port name to a task reference + * A name of MACH_PORT_NULL is valid for the null task. + * Conditions: + * Nothing locked. + */ +task_read_t +port_name_to_task_read( + mach_port_name_t name) +{ + ipc_port_t kport; + kern_return_t kr; + task_read_t tr = TASK_READ_NULL; + + if (MACH_PORT_VALID(name)) { + kr = ipc_port_translate_send(current_space(), name, &kport); + if (kr == KERN_SUCCESS) { + tr = convert_port_to_task_read_locked(kport); + ip_unlock(kport); + } + } + return tr; +} + +/* + * Routine: port_name_to_task_read_no_eval + * Purpose: + * Convert from a port name to a task reference + * A name of MACH_PORT_NULL is valid for the null task. + * It doesnt run the task_conversion_eval check if the port + * is of type IKOT_TASK_CONTROL. + * Conditions: + * Nothing locked. + */ +task_read_t +port_name_to_task_read_no_eval( + mach_port_name_t name) +{ + ipc_port_t kport; + kern_return_t kr; + task_read_t tr = TASK_READ_NULL; + + if (MACH_PORT_VALID(name)) { + kr = ipc_port_translate_send(current_space(), name, &kport); + if (kr == KERN_SUCCESS) { + switch (ip_kotype(kport)) { + case IKOT_TASK_CONTROL: + tr = convert_port_to_task_locked(kport, NULL, FALSE); + break; + case IKOT_TASK_READ: + tr = convert_port_to_task_read_locked(kport); + break; + default: + break; + } + ip_unlock(kport); + } + } + return tr; +} + +/* + * Routine: port_name_to_task_inspect + * Purpose: + * Convert from a port name to a task reference + * A name of MACH_PORT_NULL is valid for the null task. + * Conditions: + * Nothing locked. + */ task_inspect_t port_name_to_task_inspect( mach_port_name_t name) @@ -2041,6 +3047,32 @@ port_name_to_task_inspect( return ti; } +/* + * Routine: port_name_to_task_name + * Purpose: + * Convert from a port name to a task reference + * A name of MACH_PORT_NULL is valid for the null task. + * Conditions: + * Nothing locked. + */ +task_name_t +port_name_to_task_name( + mach_port_name_t name) +{ + ipc_port_t kport; + kern_return_t kr; + task_name_t tn = TASK_NAME_NULL; + + if (MACH_PORT_VALID(name)) { + kr = ipc_port_translate_send(current_space(), name, &kport); + if (kr == KERN_SUCCESS) { + tn = convert_port_to_task_name_locked(kport); + ip_unlock(kport); + } + } + return tn; +} + /* * Routine: port_name_to_host * Purpose: @@ -2068,52 +3100,83 @@ port_name_to_host( } /* - * Routine: convert_task_to_port + * Routine: convert_task_to_port_with_flavor * Purpose: - * Convert from a task to a port. + * Convert from a task to a port of given flavor. * Consumes a task ref; produces a naked send right * which may be invalid. * Conditions: * Nothing locked. */ - -ipc_port_t -convert_task_to_port( - task_t task) +static ipc_port_t +convert_task_to_port_with_flavor( + task_t task, + mach_task_flavor_t flavor) { - ipc_port_t port; + ipc_port_t port = IP_NULL; + ipc_kobject_type_t kotype = IKOT_NONE; itk_lock(task); - if (task->itk_self != IP_NULL) { - port = ipc_port_make_send(task->itk_self); - } else { - port = IP_NULL; + switch (flavor) { + case TASK_FLAVOR_CONTROL: + case TASK_FLAVOR_NAME: + port = ipc_port_make_send(task->itk_self[flavor]); + break; + /* + * Claim a send right on the task read/inspect port, and request a no-senders + * notification on that port (if none outstanding). A task reference is + * deliberately not donated here because ipc_kobject_make_send_lazy_alloc_port + * is used only for convenience and these ports don't control the lifecycle of + * the task kobject. Instead, the task's itk_lock is used to synchronize the + * handling of the no-senders notification with the task termination. + */ + case TASK_FLAVOR_READ: + case TASK_FLAVOR_INSPECT: + if (task->itk_self[TASK_FLAVOR_CONTROL] == IP_NULL) { + /* task is either disabled or terminated */ + goto exit; + } + kotype = (flavor == TASK_FLAVOR_READ) ? IKOT_TASK_READ : IKOT_TASK_INSPECT; + (void)ipc_kobject_make_send_lazy_alloc_port((ipc_port_t *) &task->itk_self[flavor], + (ipc_kobject_t)task, kotype, true, OS_PTRAUTH_DISCRIMINATOR("task.itk_self")); + port = task->itk_self[flavor]; + + break; } +exit: itk_unlock(task); - task_deallocate(task); return port; } -/* - * Routine: convert_task_inspect_to_port - * Purpose: - * Convert from a task inspect reference to a port. - * Consumes a task ref; - * As we never export task inspect ports, always - * creates a NULL port. - * Conditions: - * Nothing locked. - */ +ipc_port_t +convert_task_to_port( + task_t task) +{ + return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL); +} + +ipc_port_t +convert_task_read_to_port( + task_read_t task) +{ + return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ); +} + ipc_port_t convert_task_inspect_to_port( task_inspect_t task) { - task_deallocate(task); + return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT); +} - return IP_NULL; +ipc_port_t +convert_task_name_to_port( + task_name_t task) +{ + return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME); } /* @@ -2155,80 +3218,37 @@ convert_task_suspension_token_to_port( return port; } - -/* - * Routine: convert_task_name_to_port - * Purpose: - * Convert from a task name ref to a port. - * Consumes a task name ref; produces a naked send right - * which may be invalid. - * Conditions: - * Nothing locked. - */ - -ipc_port_t -convert_task_name_to_port( - task_name_t task_name) -{ - ipc_port_t port; - - itk_lock(task_name); - if (task_name->itk_nself != IP_NULL) { - port = ipc_port_make_send(task_name->itk_nself); - } else { - port = IP_NULL; - } - itk_unlock(task_name); - - task_name_deallocate(task_name); - return port; -} - /* - * Routine: convert_thread_to_port + * Routine: space_deallocate * Purpose: - * Convert from a thread to a port. - * Consumes an thread ref; produces a naked send right - * which may be invalid. + * Deallocate a space ref produced by convert_port_to_space. * Conditions: * Nothing locked. */ -ipc_port_t -convert_thread_to_port( - thread_t thread) +void +space_deallocate( + ipc_space_t space) { - ipc_port_t port; - - thread_mtx_lock(thread); - - if (thread->ith_self != IP_NULL) { - port = ipc_port_make_send(thread->ith_self); - } else { - port = IP_NULL; + if (space != IS_NULL) { + is_release(space); } - - thread_mtx_unlock(thread); - - thread_deallocate(thread); - - return port; } /* - * Routine: space_deallocate + * Routine: space_read_deallocate * Purpose: - * Deallocate a space ref produced by convert_port_to_space. + * Deallocate a space read ref produced by convert_port_to_space_read. * Conditions: * Nothing locked. */ void -space_deallocate( - ipc_space_t space) +space_read_deallocate( + ipc_space_read_t space) { - if (space != IS_NULL) { - is_release(space); + if (space != IS_INSPECT_NULL) { + is_release((ipc_space_t)space); } } @@ -2249,6 +3269,7 @@ space_inspect_deallocate( } } + /* * Routine: thread/task_set_exception_ports [kernel call] * Purpose: @@ -2303,6 +3324,7 @@ thread_set_exception_ports( } } + /* * Check the validity of the thread_state_flavor by calling the * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in @@ -2398,6 +3420,7 @@ task_set_exception_ports( } } + /* * Check the validity of the thread_state_flavor by calling the * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in @@ -2413,7 +3436,7 @@ task_set_exception_ports( itk_lock(task); - if (task->itk_self == IP_NULL) { + if (task->itk_self[TASK_FLAVOR_CONTROL] == IP_NULL) { itk_unlock(task); return KERN_FAILURE; @@ -2523,6 +3546,7 @@ thread_swap_exception_ports( } } + if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) { return KERN_INVALID_ARGUMENT; } @@ -2535,7 +3559,9 @@ thread_swap_exception_ports( if (!thread->active) { thread_mtx_unlock(thread); - +#if CONFIG_MACF + mac_exc_free_label(new_label); +#endif return KERN_FAILURE; } @@ -2644,6 +3670,7 @@ task_swap_exception_ports( } } + if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) { return KERN_INVALID_ARGUMENT; } @@ -2654,9 +3681,11 @@ task_swap_exception_ports( itk_lock(task); - if (task->itk_self == IP_NULL) { + if (task->itk_self[TASK_FLAVOR_CONTROL] == IP_NULL) { itk_unlock(task); - +#if CONFIG_MACF + mac_exc_free_label(new_label); +#endif return KERN_FAILURE; } @@ -2738,6 +3767,15 @@ task_swap_exception_ports( * Illegal mask bit set. * KERN_FAILURE The thread is dead. */ +kern_return_t +thread_get_exception_ports( + thread_t thread, + exception_mask_t exception_mask, + exception_mask_array_t masks, + mach_msg_type_number_t *CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors); kern_return_t thread_get_exception_ports( @@ -2809,6 +3847,40 @@ done: return KERN_SUCCESS; } +kern_return_t +thread_get_exception_ports_from_user( + mach_port_t port, + exception_mask_t exception_mask, + exception_mask_array_t masks, + mach_msg_type_number_t *CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors) +{ + kern_return_t kr; + + thread_t thread = convert_port_to_thread_check_type(port, NULL, THREAD_FLAVOR_CONTROL, FALSE); + + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } + + kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors); + + thread_deallocate(thread); + return kr; +} + +kern_return_t +task_get_exception_ports( + task_t task, + exception_mask_t exception_mask, + exception_mask_array_t masks, + mach_msg_type_number_t *CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors); + kern_return_t task_get_exception_ports( task_t task, @@ -2831,7 +3903,7 @@ task_get_exception_ports( itk_lock(task); - if (task->itk_self == IP_NULL) { + if (task->itk_self[TASK_FLAVOR_CONTROL] == IP_NULL) { itk_unlock(task); return KERN_FAILURE; @@ -2873,3 +3945,27 @@ task_get_exception_ports( return KERN_SUCCESS; } + +kern_return_t +task_get_exception_ports_from_user( + mach_port_t port, + exception_mask_t exception_mask, + exception_mask_array_t masks, + mach_msg_type_number_t *CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors) +{ + kern_return_t kr; + + task_t task = convert_port_to_task_check_type(port, NULL, TASK_FLAVOR_CONTROL, FALSE); + + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } + + kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors); + + task_deallocate(task); + return kr; +} diff --git a/osfmk/kern/ipc_tt.h b/osfmk/kern/ipc_tt.h index 5ad86b999..2ebe7e289 100644 --- a/osfmk/kern/ipc_tt.h +++ b/osfmk/kern/ipc_tt.h @@ -123,14 +123,47 @@ extern ipc_port_t retrieve_task_self_fast( extern ipc_port_t retrieve_thread_self_fast( thread_t thread); +/* Check port's type, and convert to correct task type */ +extern task_t convert_port_to_task_check_type( + ipc_port_t port, + ipc_kobject_type_t *kotype, + mach_task_flavor_t at_most, + int eval_check); + +/* Check port's type, and convert to correct thread type */ +extern thread_t convert_port_to_thread_check_type( + ipc_port_t port, + ipc_kobject_type_t *kotype, + mach_thread_flavor_t at_most, + int eval_check); + +/* Check port's type, and convert to correct ipc_space type */ +extern ipc_space_t convert_port_to_space_check_type( + ipc_port_t port, + ipc_kobject_type_t *kotype, + mach_task_flavor_t at_most, + int eval_check); + /* Convert from a port to a task name */ extern task_name_t convert_port_to_task_name( ipc_port_t port); +/* Convert from a port to a task for task_policy_set(). */ +extern task_policy_set_t convert_port_to_task_policy_set( + ipc_port_t port); + +/* Convert from a port to a task for task_policy_get(). */ +extern task_policy_get_t convert_port_to_task_policy_get( + ipc_port_t port); + /* Convert from a port to a task inspect */ extern task_inspect_t convert_port_to_task_inspect( ipc_port_t port); +/* Convert from a port to a task read */ +extern task_read_t convert_port_to_task_read( + ipc_port_t port); + /* Convert from a port to a task */ extern task_t convert_port_to_task( ipc_port_t port); @@ -138,12 +171,22 @@ extern task_t convert_port_to_task( extern task_t convert_port_to_task_with_exec_token( ipc_port_t port, - uint32_t *exec_token); + uint32_t *exec_token, + boolean_t eval); extern task_t port_name_to_task( mach_port_name_t name); -extern task_t port_name_to_task_inspect( +extern task_read_t port_name_to_task_read( + mach_port_name_t name); + +extern task_read_t port_name_to_task_read_no_eval( + mach_port_name_t name); + +extern task_inspect_t port_name_to_task_inspect( + mach_port_name_t name); + +extern task_t port_name_to_task_name( mach_port_name_t name); extern host_t port_name_to_host( @@ -156,6 +199,10 @@ extern boolean_t ref_task_port_locked( extern ipc_space_t convert_port_to_space( ipc_port_t port); +/* Convert from a port to a space inspection right */ +extern ipc_space_read_t convert_port_to_space_read( + ipc_port_t port); + /* Convert from a port to a space inspection right */ extern ipc_space_inspect_t convert_port_to_space_inspect( ipc_port_t port); @@ -167,6 +214,14 @@ extern boolean_t ref_space_port_locked( extern vm_map_t convert_port_to_map( ipc_port_t port); +/* Convert from a port to a map read */ +extern vm_map_read_t convert_port_to_map_read( + ipc_port_t port); + +/* Convert from a port to a map inspect */ +extern vm_map_inspect_t convert_port_to_map_inspect( + ipc_port_t port); + /* Convert from a port to a thread */ extern thread_t convert_port_to_thread( ipc_port_t port); @@ -175,6 +230,10 @@ extern thread_t convert_port_to_thread( extern thread_inspect_t convert_port_to_thread_inspect( ipc_port_t port); +/* Convert from a port to a thread read */ +extern thread_read_t convert_port_to_thread_read( + ipc_port_t port); + __options_decl(port_to_thread_options_t, uint32_t, { PORT_TO_THREAD_NONE = 0x0000, PORT_TO_THREAD_IN_CURRENT_TASK = 0x0001, @@ -189,6 +248,9 @@ extern thread_t port_name_to_thread( extern void space_deallocate( ipc_space_t space); +extern void space_read_deallocate( + ipc_space_read_t space); + extern void space_inspect_deallocate( ipc_space_inspect_t space); diff --git a/osfmk/kern/kalloc.c b/osfmk/kern/kalloc.c index 31a5ec30f..6c4127fa6 100644 --- a/osfmk/kern/kalloc.c +++ b/osfmk/kern/kalloc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -64,34 +64,44 @@ * to be used by the kernel to manage dynamic memory fast. */ -#include - #include #include #include #include #include -#include +#include #include #include +#include #include #include #include -#include #include #include +#include -#ifdef MACH_BSD -zone_t kalloc_zone(vm_size_t); -#endif +/* #define KALLOC_DEBUG 1 */ #define KALLOC_MAP_SIZE_MIN (16 * 1024 * 1024) #define KALLOC_MAP_SIZE_MAX (128 * 1024 * 1024) -vm_map_t kalloc_map; -vm_size_t kalloc_max; -vm_size_t kalloc_max_prerounded; -vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */ + +static SECURITY_READ_ONLY_LATE(vm_offset_t) kalloc_map_min; +static SECURITY_READ_ONLY_LATE(vm_offset_t) kalloc_map_max; +static SECURITY_READ_ONLY_LATE(vm_size_t) kalloc_max; +SECURITY_READ_ONLY_LATE(vm_size_t) kalloc_max_prerounded; +/* size of kallocs that can come from kernel map */ +SECURITY_READ_ONLY_LATE(vm_size_t) kalloc_kernmap_size; +SECURITY_READ_ONLY_LATE(vm_map_t) kalloc_map; +#if DEBUG || DEVELOPMENT +static TUNABLE(bool, kheap_temp_debug, "kheap_temp_debug", false); + +#define KHT_BT_COUNT 14 +struct kheap_temp_header { + queue_chain_t kht_hdr_link; + uintptr_t kht_hdr_pcs[KHT_BT_COUNT]; +}; +#endif /* how many times we couldn't allocate out of kalloc_map and fell back to kernel_map */ unsigned long kalloc_fallback_count; @@ -102,31 +112,13 @@ vm_size_t kalloc_large_max; vm_size_t kalloc_largest_allocated = 0; uint64_t kalloc_large_sum; -int kalloc_fake_zone_index = -1; /* index of our fake zone in statistics arrays */ - -vm_offset_t kalloc_map_min; -vm_offset_t kalloc_map_max; +LCK_GRP_DECLARE(kalloc_lck_grp, "kalloc.large"); +LCK_SPIN_DECLARE(kalloc_lock, &kalloc_lck_grp); -#ifdef MUTEX_ZONE -/* - * Diagnostic code to track mutexes separately rather than via the 2^ zones - */ -zone_t lck_mtx_zone; -#endif - -static void -KALLOC_ZINFO_SALLOC(vm_size_t bytes) -{ - thread_t thr = current_thread(); - ledger_debit(thr->t_ledger, task_ledgers.tkm_shared, bytes); -} +#define kalloc_spin_lock() lck_spin_lock(&kalloc_lock) +#define kalloc_unlock() lck_spin_unlock(&kalloc_lock) -static void -KALLOC_ZINFO_SFREE(vm_size_t bytes) -{ - thread_t thr = current_thread(); - ledger_credit(thr->t_ledger, task_ledgers.tkm_shared, bytes); -} +#pragma mark initialization /* * All allocations of size less than kalloc_max are rounded to the next nearest @@ -144,13 +136,16 @@ KALLOC_ZINFO_SFREE(vm_size_t bytes) * from kernel map rather than kalloc_map. */ -#define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN) #define KiB(x) (1024 * (x)) /* - * The k_zone_config table defines the configuration of zones on various platforms. + * The k_zone_cfg table defines the configuration of zones on various platforms. * The currently defined list of zones and their per-CPU caching behavior are as - * follows (X:zone not present; N:zone present no cpu-caching; Y:zone present with cpu-caching): + * follows + * + * X:zone not present + * N:zone present no cpu-caching + * Y:zone present with cpu-caching * * Size macOS(64-bit) embedded(32-bit) embedded(64-bit) *-------- ---------------- ---------------- ---------------- @@ -196,14 +191,19 @@ KALLOC_ZINFO_SFREE(vm_size_t bytes) * 32768 X X N * */ -static const struct kalloc_zone_config { +struct kalloc_zone_cfg { bool kzc_caching; - int kzc_size; + uint32_t kzc_size; const char *kzc_name; -} k_zone_config[] = { -#define KZC_ENTRY(SIZE, caching) { .kzc_caching = (caching), .kzc_size = (SIZE), .kzc_name = "kalloc." #SIZE } +}; +static SECURITY_READ_ONLY_LATE(struct kalloc_zone_cfg) k_zone_cfg[] = { +#define KZC_ENTRY(SIZE, caching) { \ + .kzc_caching = (caching), \ + .kzc_size = (SIZE), \ + .kzc_name = "kalloc." #SIZE \ +} -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) #if KALLOC_MINSIZE == 16 && KALLOC_LOG2_MINALIGN == 4 /* Zone config for embedded 64-bit platforms */ @@ -280,7 +280,7 @@ static const struct kalloc_zone_config { #error missing or invalid zone size parameters for kalloc #endif -#else /* CONFIG_EMBEDDED */ +#else /* !defined(XNU_TARGET_OS_OSX) */ /* Zone config for macOS 64-bit platforms */ KZC_ENTRY(16, true), @@ -311,61 +311,197 @@ static const struct kalloc_zone_config { KZC_ENTRY(12288, false), KZC_ENTRY(16384, false) -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ #undef KZC_ENTRY }; -#define MAX_K_ZONE (int)(sizeof(k_zone_config) / sizeof(k_zone_config[0])) +#define MAX_K_ZONE(kzc) (uint32_t)(sizeof(kzc) / sizeof(kzc[0])) /* * Many kalloc() allocations are for small structures containing a few - * pointers and longs - the k_zone_dlut[] direct lookup table, indexed by + * pointers and longs - the dlut[] direct lookup table, indexed by * size normalized to the minimum alignment, finds the right zone index * for them in one dereference. */ -#define INDEX_ZDLUT(size) \ - (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN) -#define N_K_ZDLUT (2048 / KALLOC_MINALIGN) -/* covers sizes [0 .. 2048 - KALLOC_MINALIGN] */ -#define MAX_SIZE_ZDLUT ((N_K_ZDLUT - 1) * KALLOC_MINALIGN) +#define INDEX_ZDLUT(size) (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN) +#define MAX_SIZE_ZDLUT ((KALLOC_DLUT_SIZE - 1) * KALLOC_MINALIGN) + +static SECURITY_READ_ONLY_LATE(zone_t) k_zone_default[MAX_K_ZONE(k_zone_cfg)]; +static SECURITY_READ_ONLY_LATE(zone_t) k_zone_data_buffers[MAX_K_ZONE(k_zone_cfg)]; +static SECURITY_READ_ONLY_LATE(zone_t) k_zone_kext[MAX_K_ZONE(k_zone_cfg)]; + +#if VM_MAX_TAG_ZONES +#if __LP64__ +static_assert(VM_MAX_TAG_ZONES >= + MAX_K_ZONE(k_zone_cfg) + MAX_K_ZONE(k_zone_cfg) + MAX_K_ZONE(k_zone_cfg)); +#else +static_assert(VM_MAX_TAG_ZONES >= MAX_K_ZONE(k_zone_cfg)); +#endif +#endif -static int8_t k_zone_dlut[N_K_ZDLUT]; /* table of indices into k_zone[] */ +const char * const kalloc_heap_names[] = { + [KHEAP_ID_NONE] = "", + [KHEAP_ID_DEFAULT] = "default.", + [KHEAP_ID_DATA_BUFFERS] = "data.", + [KHEAP_ID_KEXT] = "kext.", +}; /* - * If there's no hit in the DLUT, then start searching from k_zindex_start. + * Default kalloc heap configuration */ -static int k_zindex_start; +static SECURITY_READ_ONLY_LATE(struct kheap_zones) kalloc_zones_default = { + .cfg = k_zone_cfg, + .heap_id = KHEAP_ID_DEFAULT, + .k_zone = k_zone_default, + .max_k_zone = MAX_K_ZONE(k_zone_cfg) +}; +SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_DEFAULT[1] = { + { + .kh_zones = &kalloc_zones_default, + .kh_name = "default.", + .kh_heap_id = KHEAP_ID_DEFAULT, + } +}; -static zone_t k_zone[MAX_K_ZONE]; +KALLOC_HEAP_DEFINE(KHEAP_TEMP, "temp allocations", KHEAP_ID_DEFAULT); -/* #define KALLOC_DEBUG 1 */ -/* forward declarations */ +/* + * Bag of bytes heap configuration + */ +static SECURITY_READ_ONLY_LATE(struct kheap_zones) kalloc_zones_data_buffers = { + .cfg = k_zone_cfg, + .heap_id = KHEAP_ID_DATA_BUFFERS, + .k_zone = k_zone_data_buffers, + .max_k_zone = MAX_K_ZONE(k_zone_cfg) +}; +SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_DATA_BUFFERS[1] = { + { + .kh_zones = &kalloc_zones_data_buffers, + .kh_name = "data.", + .kh_heap_id = KHEAP_ID_DATA_BUFFERS, + } +}; -lck_grp_t kalloc_lck_grp; -lck_mtx_t kalloc_lock; -#define kalloc_spin_lock() lck_mtx_lock_spin(&kalloc_lock) -#define kalloc_unlock() lck_mtx_unlock(&kalloc_lock) +/* + * Kext heap configuration + */ +static SECURITY_READ_ONLY_LATE(struct kheap_zones) kalloc_zones_kext = { + .cfg = k_zone_cfg, + .heap_id = KHEAP_ID_KEXT, + .k_zone = k_zone_kext, + .max_k_zone = MAX_K_ZONE(k_zone_cfg) +}; +SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_KEXT[1] = { + { + .kh_zones = &kalloc_zones_kext, + .kh_name = "kext.", + .kh_heap_id = KHEAP_ID_KEXT, + } +}; +KALLOC_HEAP_DEFINE(KERN_OS_MALLOC, "kern_os_malloc", KHEAP_ID_KEXT); -/* OSMalloc local data declarations */ -static -queue_head_t OSMalloc_tag_list; +/* + * Initialize kalloc heap: Create zones, generate direct lookup table and + * do a quick test on lookups + */ +__startup_func +static void +kalloc_zones_init(struct kheap_zones *zones) +{ + struct kalloc_zone_cfg *cfg = zones->cfg; + zone_t *k_zone = zones->k_zone; + vm_size_t size; -lck_grp_t *OSMalloc_tag_lck_grp; -lck_mtx_t OSMalloc_tag_lock; + /* + * Allocate a zone for each size we are going to handle. + */ + for (uint32_t i = 0; i < zones->max_k_zone && + (size = cfg[i].kzc_size) < kalloc_max; i++) { + zone_create_flags_t flags = ZC_KASAN_NOREDZONE | + ZC_KASAN_NOQUARANTINE | ZC_KALLOC_HEAP; + if (cfg[i].kzc_caching) { + flags |= ZC_CACHING; + } -#define OSMalloc_tag_spin_lock() lck_mtx_lock_spin(&OSMalloc_tag_lock) -#define OSMalloc_tag_unlock() lck_mtx_unlock(&OSMalloc_tag_lock) + k_zone[i] = zone_create_ext(cfg[i].kzc_name, size, flags, + ZONE_ID_ANY, ^(zone_t z){ + z->kalloc_heap = zones->heap_id; + }); + /* + * Set the updated elem size back to the config + */ + cfg[i].kzc_size = k_zone[i]->z_elem_size; + } + /* + * Count all the "raw" views for zones in the heap. + */ + zone_view_count += zones->max_k_zone; + + /* + * Build the Direct LookUp Table for small allocations + * As k_zone_cfg is shared between the heaps the + * Direct LookUp Table is also shared and doesn't need to + * be rebuilt per heap. + */ + size = 0; + for (int i = 0; i <= KALLOC_DLUT_SIZE; i++, size += KALLOC_MINALIGN) { + uint8_t zindex = 0; + + while ((vm_size_t)(cfg[zindex].kzc_size) < size) { + zindex++; + } + + if (i == KALLOC_DLUT_SIZE) { + zones->k_zindex_start = zindex; + break; + } + zones->dlut[i] = zindex; + } -/* OSMalloc forward declarations */ -void OSMalloc_init(void); -void OSMalloc_Tagref(OSMallocTag tag); -void OSMalloc_Tagrele(OSMallocTag tag); +#ifdef KALLOC_DEBUG + printf("kalloc_init: k_zindex_start %d\n", zones->k_zindex_start); + + /* + * Do a quick synthesis to see how well/badly we can + * find-a-zone for a given size. + * Useful when debugging/tweaking the array of zone sizes. + * Cache misses probably more critical than compare-branches! + */ + for (uint32_t i = 0; i < zones->max_k_zone; i++) { + vm_size_t testsize = (vm_size_t)(cfg[i].kzc_size - 1); + int compare = 0; + uint8_t zindex; + + if (testsize < MAX_SIZE_ZDLUT) { + compare += 1; /* 'if' (T) */ + + long dindex = INDEX_ZDLUT(testsize); + zindex = (int)zones->dlut[dindex]; + } else if (testsize < kalloc_max_prerounded) { + compare += 2; /* 'if' (F), 'if' (T) */ + + zindex = zones->k_zindex_start; + while ((vm_size_t)(cfg[zindex].kzc_size) < testsize) { + zindex++; + compare++; /* 'while' (T) */ + } + compare++; /* 'while' (F) */ + } else { + break; /* not zone-backed */ + } + zone_t z = k_zone[zindex]; + printf("kalloc_init: req size %4lu: %8s.%16s took %d compare%s\n", + (unsigned long)testsize, kalloc_heap_names[zones->heap_id], + z->z_name, compare, compare == 1 ? "" : "s"); + } +#endif +} /* * Initialize the memory allocator. This should be called only @@ -375,13 +511,13 @@ void OSMalloc_Tagrele(OSMallocTag tag); * This initializes all of the zones. */ -void -kalloc_init( - void) +__startup_func +static void +kalloc_init(void) { kern_return_t retval; vm_offset_t min; - vm_size_t size, kalloc_map_size; + vm_size_t kalloc_map_size; vm_map_kernel_flags_t vmk_flags; /* @@ -402,11 +538,8 @@ kalloc_init( vmk_flags.vmkf_permanent = TRUE; retval = kmem_suballoc(kernel_map, &min, kalloc_map_size, - FALSE, - (VM_FLAGS_ANYWHERE), - vmk_flags, - VM_KERN_MEMORY_KALLOC, - &kalloc_map); + FALSE, VM_FLAGS_ANYWHERE, vmk_flags, + VM_KERN_MEMORY_KALLOC, &kalloc_map); if (retval != KERN_SUCCESS) { panic("kalloc_init: kmem_suballoc failed"); @@ -415,7 +548,8 @@ kalloc_init( kalloc_map_min = min; kalloc_map_max = min + kalloc_map_size - 1; - kalloc_max = (k_zone_config[MAX_K_ZONE - 1].kzc_size << 1); + struct kheap_zones *khz_default = &kalloc_zones_default; + kalloc_max = (khz_default->cfg[khz_default->max_k_zone - 1].kzc_size << 1); if (kalloc_max < KiB(16)) { kalloc_max = KiB(16); } @@ -426,141 +560,99 @@ kalloc_init( kalloc_kernmap_size = (kalloc_max * 16) + 1; kalloc_largest_allocated = kalloc_kernmap_size; - /* - * Allocate a zone for each size we are going to handle. - */ - for (int i = 0; i < MAX_K_ZONE && (size = k_zone_config[i].kzc_size) < kalloc_max; i++) { - k_zone[i] = zinit(size, size, size, k_zone_config[i].kzc_name); + /* Initialize kalloc default heap */ + kalloc_zones_init(&kalloc_zones_default); - /* - * Don't charge the caller for the allocation, as we aren't sure how - * the memory will be handled. - */ - zone_change(k_zone[i], Z_CALLERACCT, FALSE); -#if VM_MAX_TAG_ZONES - if (zone_tagging_on) { - zone_change(k_zone[i], Z_TAGS_ENABLED, TRUE); - } -#endif - zone_change(k_zone[i], Z_KASAN_QUARANTINE, FALSE); - if (k_zone_config[i].kzc_caching) { - zone_change(k_zone[i], Z_CACHING_ENABLED, TRUE); - } + /* Initialize kalloc data buffers heap */ + if (ZSECURITY_OPTIONS_SUBMAP_USER_DATA & zsecurity_options) { + kalloc_zones_init(&kalloc_zones_data_buffers); + } else { + *KHEAP_DATA_BUFFERS = *KHEAP_DEFAULT; } - /* - * Build the Direct LookUp Table for small allocations - */ - size = 0; - for (int i = 0; i <= N_K_ZDLUT; i++, size += KALLOC_MINALIGN) { - int zindex = 0; - - while ((vm_size_t)k_zone_config[zindex].kzc_size < size) { - zindex++; - } - - if (i == N_K_ZDLUT) { - k_zindex_start = zindex; - break; - } - k_zone_dlut[i] = (int8_t)zindex; + /* Initialize kalloc kext heap */ + if (ZSECURITY_OPTIONS_SEQUESTER_KEXT_KALLOC & zsecurity_options) { + kalloc_zones_init(&kalloc_zones_kext); + } else { + *KHEAP_KEXT = *KHEAP_DEFAULT; } +} +STARTUP(ZALLOC, STARTUP_RANK_THIRD, kalloc_init); -#ifdef KALLOC_DEBUG - printf("kalloc_init: k_zindex_start %d\n", k_zindex_start); - /* - * Do a quick synthesis to see how well/badly we can - * find-a-zone for a given size. - * Useful when debugging/tweaking the array of zone sizes. - * Cache misses probably more critical than compare-branches! - */ - for (int i = 0; i < MAX_K_ZONE; i++) { - vm_size_t testsize = (vm_size_t)k_zone_config[i].kzc_size - 1; - int compare = 0; - int zindex; +#pragma mark accessors - if (testsize < MAX_SIZE_ZDLUT) { - compare += 1; /* 'if' (T) */ +static void +KALLOC_ZINFO_SALLOC(vm_size_t bytes) +{ + thread_t thr = current_thread(); + ledger_debit_thread(thr, thr->t_ledger, task_ledgers.tkm_shared, bytes); +} - long dindex = INDEX_ZDLUT(testsize); - zindex = (int)k_zone_dlut[dindex]; - } else if (testsize < kalloc_max_prerounded) { - compare += 2; /* 'if' (F), 'if' (T) */ +static void +KALLOC_ZINFO_SFREE(vm_size_t bytes) +{ + thread_t thr = current_thread(); + ledger_credit_thread(thr, thr->t_ledger, task_ledgers.tkm_shared, bytes); +} - zindex = k_zindex_start; - while ((vm_size_t)k_zone_config[zindex].kzc_size < testsize) { - zindex++; - compare++; /* 'while' (T) */ - } - compare++; /* 'while' (F) */ - } else { - break; /* not zone-backed */ - } - zone_t z = k_zone[zindex]; - printf("kalloc_init: req size %4lu: %11s took %d compare%s\n", - (unsigned long)testsize, z->zone_name, compare, - compare == 1 ? "" : "s"); +static inline vm_map_t +kalloc_map_for_addr(vm_address_t addr) +{ + if (addr >= kalloc_map_min && addr < kalloc_map_max) { + return kalloc_map; } -#endif - - lck_grp_init(&kalloc_lck_grp, "kalloc.large", LCK_GRP_ATTR_NULL); - lck_mtx_init(&kalloc_lock, &kalloc_lck_grp, LCK_ATTR_NULL); - OSMalloc_init(); -#ifdef MUTEX_ZONE - lck_mtx_zone = zinit(sizeof(struct _lck_mtx_), 1024 * 256, 4096, "lck_mtx"); -#endif + return kernel_map; } -/* - * Given an allocation size, return the kalloc zone it belongs to. - * Direct LookUp Table variant. - */ -static __inline zone_t -get_zone_dlut(vm_size_t size) +static inline vm_map_t +kalloc_map_for_size(vm_size_t size) { - long dindex = INDEX_ZDLUT(size); - int zindex = (int)k_zone_dlut[dindex]; - return k_zone[zindex]; + if (size < kalloc_kernmap_size) { + return kalloc_map; + } + return kernel_map; } -/* As above, but linear search k_zone_config[] for the next zone that fits. */ - -static __inline zone_t -get_zone_search(vm_size_t size, int zindex) +zone_t +kalloc_heap_zone_for_size(kalloc_heap_t kheap, vm_size_t size) { - assert(size < kalloc_max_prerounded); + struct kheap_zones *khz = kheap->kh_zones; - while ((vm_size_t)k_zone_config[zindex].kzc_size < size) { - zindex++; + if (size < MAX_SIZE_ZDLUT) { + uint32_t zindex = khz->dlut[INDEX_ZDLUT(size)]; + return khz->k_zone[zindex]; } - assert(zindex < MAX_K_ZONE && - (vm_size_t)k_zone_config[zindex].kzc_size < kalloc_max); + if (size < kalloc_max_prerounded) { + uint32_t zindex = khz->k_zindex_start; + while (khz->cfg[zindex].kzc_size < size) { + zindex++; + } + assert(zindex < khz->max_k_zone); + return khz->k_zone[zindex]; + } - return k_zone[zindex]; + return ZONE_NULL; } static vm_size_t -vm_map_lookup_kalloc_entry_locked( - vm_map_t map, - void *addr) +vm_map_lookup_kalloc_entry_locked(vm_map_t map, void *addr) { - boolean_t ret; - vm_map_entry_t vm_entry = NULL; + vm_map_entry_t vm_entry = NULL; - ret = vm_map_lookup_entry(map, (vm_map_offset_t)addr, &vm_entry); - if (!ret) { - panic("Attempting to lookup/free an address not allocated via kalloc! (vm_map_lookup_entry() failed map: %p, addr: %p)\n", - map, addr); + if (!vm_map_lookup_entry(map, (vm_map_offset_t)addr, &vm_entry)) { + panic("address %p not allocated via kalloc, map %p", + addr, map); } if (vm_entry->vme_start != (vm_map_offset_t)addr) { - panic("Attempting to lookup/free the middle of a kalloc'ed element! (map: %p, addr: %p, entry: %p)\n", - map, addr, vm_entry); + panic("address %p inside vm entry %p [%p:%p), map %p", + addr, vm_entry, (void *)vm_entry->vme_start, + (void *)vm_entry->vme_end, map); } if (!vm_entry->vme_atomic) { - panic("Attempting to lookup/free an address not managed by kalloc! (map: %p, addr: %p, entry: %p)\n", - map, addr, vm_entry); + panic("address %p not managed by kalloc (entry %p, map %p)", + addr, vm_entry, map); } return vm_entry->vme_end - vm_entry->vme_start; } @@ -578,21 +670,17 @@ kalloc_size(void *addr) } #else vm_size_t -kalloc_size( - void *addr) +kalloc_size(void *addr) { - vm_map_t map; - vm_size_t size; + vm_map_t map; + vm_size_t size; size = zone_element_size(addr, NULL); if (size) { return size; } - if (((vm_offset_t)addr >= kalloc_map_min) && ((vm_offset_t)addr < kalloc_map_max)) { - map = kalloc_map; - } else { - map = kernel_map; - } + + map = kalloc_map_for_addr((vm_offset_t)addr); vm_map_lock_read(map); size = vm_map_lookup_kalloc_entry_locked(map, addr); vm_map_unlock_read(map); @@ -601,241 +689,404 @@ kalloc_size( #endif vm_size_t -kalloc_bucket_size( - vm_size_t size) +kalloc_bucket_size(vm_size_t size) { - zone_t z; - vm_map_t map; + zone_t z = kalloc_heap_zone_for_size(KHEAP_DEFAULT, size); + vm_map_t map = kalloc_map_for_size(size); - if (size < MAX_SIZE_ZDLUT) { - z = get_zone_dlut(size); - return z->elem_size; + if (z) { + return zone_elem_size(z); } + return vm_map_round_page(size, VM_MAP_PAGE_MASK(map)); +} - if (size < kalloc_max_prerounded) { - z = get_zone_search(size, k_zindex_start); - return z->elem_size; - } +#pragma mark kalloc - if (size >= kalloc_kernmap_size) { - map = kernel_map; - } else { - map = kalloc_map; +void +kheap_temp_leak_panic(thread_t self) +{ +#if DEBUG || DEVELOPMENT + if (__improbable(kheap_temp_debug)) { + struct kheap_temp_header *hdr = qe_dequeue_head(&self->t_temp_alloc_list, + struct kheap_temp_header, kht_hdr_link); + + panic_plain("KHEAP_TEMP leak on thread %p (%d), allocated at:\n" + " %#016lx\n" " %#016lx\n" " %#016lx\n" " %#016lx\n" + " %#016lx\n" " %#016lx\n" " %#016lx\n" " %#016lx\n" + " %#016lx\n" " %#016lx\n" " %#016lx\n" " %#016lx\n" + " %#016lx\n" " %#016lx\n", + self, self->t_temp_alloc_count, + hdr->kht_hdr_pcs[0], hdr->kht_hdr_pcs[1], + hdr->kht_hdr_pcs[2], hdr->kht_hdr_pcs[3], + hdr->kht_hdr_pcs[4], hdr->kht_hdr_pcs[5], + hdr->kht_hdr_pcs[6], hdr->kht_hdr_pcs[7], + hdr->kht_hdr_pcs[8], hdr->kht_hdr_pcs[9], + hdr->kht_hdr_pcs[10], hdr->kht_hdr_pcs[11], + hdr->kht_hdr_pcs[12], hdr->kht_hdr_pcs[13]); } - - return vm_map_round_page(size, VM_MAP_PAGE_MASK(map)); + panic("KHEAP_TEMP leak on thread %p (%d) " + "(boot with kheap_temp_debug=1 to debug)", + self, self->t_temp_alloc_count); +#else /* !DEBUG && !DEVELOPMENT */ + panic("KHEAP_TEMP leak on thread %p (%d)", + self, self->t_temp_alloc_count); +#endif /* !DEBUG && !DEVELOPMENT */ } -#if KASAN_KALLOC -vm_size_t -(kfree_addr)(void *addr) +__abortlike +static void +kheap_temp_overuse_panic(thread_t self) { - vm_size_t origsz = kalloc_size(addr); - kfree(addr, origsz); - return origsz; + panic("too many KHEAP_TEMP allocations in flight: %d", + self->t_temp_alloc_count); } -#else -vm_size_t -(kfree_addr)( - void *addr) -{ - vm_map_t map; - vm_size_t size = 0; - kern_return_t ret; - zone_t z; - size = zone_element_size(addr, &z); - if (size) { - DTRACE_VM3(kfree, vm_size_t, -1, vm_size_t, z->elem_size, void*, addr); - zfree(z, addr); - return size; +__attribute__((noinline)) +static struct kalloc_result +kalloc_large( + kalloc_heap_t kheap, + vm_size_t req_size, + vm_size_t size, + zalloc_flags_t flags, + vm_allocation_site_t *site) +{ + int kma_flags = KMA_ATOMIC | KMA_KOBJECT; + vm_tag_t tag = VM_KERN_MEMORY_KALLOC; + vm_map_t alloc_map; + vm_offset_t addr; + + if (flags & Z_NOFAIL) { + panic("trying to kalloc(Z_NOFAIL) with a large size (%zd)", + (size_t)size); + } + /* kmem_alloc could block so we return if noblock */ + if (flags & Z_NOWAIT) { + return (struct kalloc_result){ }; } - if (((vm_offset_t)addr >= kalloc_map_min) && ((vm_offset_t)addr < kalloc_map_max)) { - map = kalloc_map; - } else { - map = kernel_map; + if (flags & Z_NOPAGEWAIT) { + kma_flags |= KMA_NOPAGEWAIT; } - if ((vm_offset_t)addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS) { - panic("kfree on an address not in the kernel & kext address range! addr: %p\n", addr); + if (flags & Z_ZERO) { + kma_flags |= KMA_ZERO; } - vm_map_lock(map); - size = vm_map_lookup_kalloc_entry_locked(map, addr); - ret = vm_map_remove_locked(map, - vm_map_trunc_page((vm_map_offset_t)addr, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page((vm_map_offset_t)addr + size, - VM_MAP_PAGE_MASK(map)), - VM_MAP_REMOVE_KUNWIRE); - if (ret != KERN_SUCCESS) { - panic("vm_map_remove_locked() failed for kalloc vm_entry! addr: %p, map: %p ret: %d\n", - addr, map, ret); - } - vm_map_unlock(map); - DTRACE_VM3(kfree, vm_size_t, -1, vm_size_t, size, void*, addr); - - kalloc_spin_lock(); - assert(kalloc_large_total >= size); - kalloc_large_total -= size; - kalloc_large_inuse--; - kalloc_unlock(); - - KALLOC_ZINFO_SFREE(size); - return size; -} +#if KASAN_KALLOC + /* large allocation - use guard pages instead of small redzones */ + size = round_page(req_size + 2 * PAGE_SIZE); + assert(size >= MAX_SIZE_ZDLUT && size >= kalloc_max_prerounded); +#else + size = round_page(size); #endif -void * -kalloc_canblock( - vm_size_t *psize, - boolean_t canblock, - vm_allocation_site_t *site) -{ - zone_t z; - vm_size_t size; - void *addr; - vm_tag_t tag; + alloc_map = kalloc_map_for_size(size); - tag = VM_KERN_MEMORY_KALLOC; - size = *psize; + if (site) { + tag = vm_tag_alloc(site); + } -#if KASAN_KALLOC - /* expand the allocation to accomodate redzones */ - vm_size_t req_size = size; - size = kasan_alloc_resize(req_size); -#endif + if (kmem_alloc_flags(alloc_map, &addr, size, tag, kma_flags) != KERN_SUCCESS) { + if (alloc_map != kernel_map) { + if (kalloc_fallback_count++ == 0) { + printf("%s: falling back to kernel_map\n", __func__); + } + if (kmem_alloc_flags(kernel_map, &addr, size, tag, kma_flags) != KERN_SUCCESS) { + addr = 0; + } + } else { + addr = 0; + } + } - if (size < MAX_SIZE_ZDLUT) { - z = get_zone_dlut(size); - } else if (size < kalloc_max_prerounded) { - z = get_zone_search(size, k_zindex_start); - } else { + if (addr != 0) { + kalloc_spin_lock(); /* - * If size is too large for a zone, then use kmem_alloc. - * (We use kmem_alloc instead of kmem_alloc_kobject so that - * krealloc can use kmem_realloc.) + * Thread-safe version of the workaround for 4740071 + * (a double FREE()) */ - vm_map_t alloc_map; + if (size > kalloc_largest_allocated) { + kalloc_largest_allocated = size; + } + + kalloc_large_inuse++; + assert(kalloc_large_total + size >= kalloc_large_total); /* no wrap around */ + kalloc_large_total += size; + kalloc_large_sum += size; - /* kmem_alloc could block so we return if noblock */ - if (!canblock) { - return NULL; + if (kalloc_large_total > kalloc_large_max) { + kalloc_large_max = kalloc_large_total; } + kalloc_unlock(); + + KALLOC_ZINFO_SALLOC(size); + } #if KASAN_KALLOC - /* large allocation - use guard pages instead of small redzones */ - size = round_page(req_size + 2 * PAGE_SIZE); - assert(size >= MAX_SIZE_ZDLUT && size >= kalloc_max_prerounded); + /* fixup the return address to skip the redzone */ + addr = kasan_alloc(addr, size, req_size, PAGE_SIZE); + /* + * Initialize buffer with unique pattern only if memory + * wasn't expected to be zeroed. + */ + if (!(flags & Z_ZERO)) { + kasan_leak_init(addr, req_size); + } #else - size = round_page(size); + req_size = size; #endif - if (size >= kalloc_kernmap_size) { - alloc_map = kernel_map; - } else { - alloc_map = kalloc_map; - } + if (addr && kheap == KHEAP_TEMP) { + thread_t self = current_thread(); - if (site) { - tag = vm_tag_alloc(site); + if (self->t_temp_alloc_count++ > UINT16_MAX) { + kheap_temp_overuse_panic(self); } - - if (kmem_alloc_flags(alloc_map, (vm_offset_t *)&addr, size, tag, KMA_ATOMIC) != KERN_SUCCESS) { - if (alloc_map != kernel_map) { - if (kalloc_fallback_count++ == 0) { - printf("%s: falling back to kernel_map\n", __func__); - } - if (kmem_alloc_flags(kernel_map, (vm_offset_t *)&addr, size, tag, KMA_ATOMIC) != KERN_SUCCESS) { - addr = NULL; - } - } else { - addr = NULL; - } +#if DEBUG || DEVELOPMENT + if (__improbable(kheap_temp_debug)) { + struct kheap_temp_header *hdr = (void *)addr; + enqueue_head(&self->t_temp_alloc_list, + &hdr->kht_hdr_link); + backtrace(hdr->kht_hdr_pcs, KHT_BT_COUNT, NULL); + req_size -= sizeof(struct kheap_temp_header); + addr += sizeof(struct kheap_temp_header); } +#endif /* DEBUG || DEVELOPMENT */ + } - if (addr != NULL) { - kalloc_spin_lock(); - /* - * Thread-safe version of the workaround for 4740071 - * (a double FREE()) - */ - if (size > kalloc_largest_allocated) { - kalloc_largest_allocated = size; - } - - kalloc_large_inuse++; - assert(kalloc_large_total + size >= kalloc_large_total); /* no wrap around */ - kalloc_large_total += size; - kalloc_large_sum += size; - - if (kalloc_large_total > kalloc_large_max) { - kalloc_large_max = kalloc_large_total; - } + DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, req_size, void*, addr); + return (struct kalloc_result){ .addr = (void *)addr, .size = req_size }; +} - kalloc_unlock(); +struct kalloc_result +kalloc_ext( + kalloc_heap_t kheap, + vm_size_t req_size, + zalloc_flags_t flags, + vm_allocation_site_t *site) +{ + vm_tag_t tag = VM_KERN_MEMORY_KALLOC; + vm_size_t size; + void *addr; + zone_t z; - KALLOC_ZINFO_SALLOC(size); +#if DEBUG || DEVELOPMENT + if (__improbable(kheap_temp_debug)) { + if (kheap == KHEAP_TEMP) { + req_size += sizeof(struct kheap_temp_header); } + } +#endif /* DEBUG || DEVELOPMENT */ + + /* + * Kasan for kalloc heaps will put the redzones *inside* + * the allocation, and hence augment its size. + * + * kalloc heaps do not use zone_t::kasan_redzone. + */ #if KASAN_KALLOC - /* fixup the return address to skip the redzone */ - addr = (void *)kasan_alloc((vm_offset_t)addr, size, req_size, PAGE_SIZE); + size = kasan_alloc_resize(req_size); #else - *psize = size; + size = req_size; #endif - DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, *psize, void*, addr); - return addr; + z = kalloc_heap_zone_for_size(kheap, size); + if (__improbable(z == ZONE_NULL)) { + return kalloc_large(kheap, req_size, size, flags, site); } + #ifdef KALLOC_DEBUG - if (size > z->elem_size) { - panic("%s: z %p (%s) but requested size %lu", __func__, - z, z->zone_name, (unsigned long)size); + if (size > zone_elem_size(z)) { + panic("%s: z %p (%s%s) but requested size %lu", __func__, z, + kalloc_heap_names[kheap->kh_zones->heap_id], z->z_name, + (unsigned long)size); } #endif - - assert(size <= z->elem_size); + assert(size <= zone_elem_size(z)); #if VM_MAX_TAG_ZONES if (z->tags && site) { tag = vm_tag_alloc(site); - if (!canblock && !vm_allocation_zone_totals[tag]) { + if ((flags & (Z_NOWAIT | Z_NOPAGEWAIT)) && !vm_allocation_zone_totals[tag]) { tag = VM_KERN_MEMORY_KALLOC; } } #endif - - addr = zalloc_canblock_tag(z, canblock, size, tag); + addr = zalloc_ext(z, kheap->kh_stats ?: z->z_stats, + flags | Z_VM_TAG(tag), zone_elem_size(z) - size); #if KASAN_KALLOC - /* fixup the return address to skip the redzone */ - addr = (void *)kasan_alloc((vm_offset_t)addr, z->elem_size, req_size, KASAN_GUARD_SIZE); - - /* For KASan, the redzone lives in any additional space, so don't - * expand the allocation. */ + addr = (void *)kasan_alloc((vm_offset_t)addr, zone_elem_size(z), + req_size, KASAN_GUARD_SIZE); #else - *psize = z->elem_size; + req_size = zone_elem_size(z); #endif - DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, *psize, void*, addr); - return addr; + if (addr && kheap == KHEAP_TEMP) { + thread_t self = current_thread(); + + if (self->t_temp_alloc_count++ > UINT16_MAX) { + kheap_temp_overuse_panic(self); + } +#if DEBUG || DEVELOPMENT + if (__improbable(kheap_temp_debug)) { + struct kheap_temp_header *hdr = (void *)addr; + enqueue_head(&self->t_temp_alloc_list, + &hdr->kht_hdr_link); + backtrace(hdr->kht_hdr_pcs, KHT_BT_COUNT, NULL); + req_size -= sizeof(struct kheap_temp_header); + addr += sizeof(struct kheap_temp_header); + } +#endif /* DEBUG || DEVELOPMENT */ + } + + DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, req_size, void*, addr); + return (struct kalloc_result){ .addr = addr, .size = req_size }; } void * -kalloc_external( - vm_size_t size); +kalloc_external(vm_size_t size); void * -kalloc_external( - vm_size_t size) +kalloc_external(vm_size_t size) { - return kalloc_tag_bt(size, VM_KERN_MEMORY_KALLOC); + return kheap_alloc_tag_bt(KHEAP_KEXT, size, Z_WAITOK, VM_KERN_MEMORY_KALLOC); } -void -(kfree)( - void *data, - vm_size_t size) + +#pragma mark kfree + +__attribute__((noinline)) +static void +kfree_large(vm_offset_t addr, vm_size_t size) { + vm_map_t map = kalloc_map_for_addr(addr); + kern_return_t ret; + vm_offset_t end; + + if (addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS || + os_add_overflow(addr, size, &end) || + end > VM_MAX_KERNEL_ADDRESS) { + panic("kfree: address range (%p, %ld) doesn't belong to the kernel", + (void *)addr, (uintptr_t)size); + } + + if (size == 0) { + vm_map_lock(map); + size = vm_map_lookup_kalloc_entry_locked(map, (void *)addr); + ret = vm_map_remove_locked(map, + vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(map)), + vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(map)), + VM_MAP_REMOVE_KUNWIRE); + if (ret != KERN_SUCCESS) { + panic("kfree: vm_map_remove_locked() failed for " + "addr: %p, map: %p ret: %d", (void *)addr, map, ret); + } + vm_map_unlock(map); + } else { + size = round_page(size); + + if (size > kalloc_largest_allocated) { + panic("kfree: size %lu > kalloc_largest_allocated %lu", + (uintptr_t)size, (uintptr_t)kalloc_largest_allocated); + } + kmem_free(map, addr, size); + } + + kalloc_spin_lock(); + + assert(kalloc_large_total >= size); + kalloc_large_total -= size; + kalloc_large_inuse--; + + kalloc_unlock(); + +#if !KASAN_KALLOC + DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, size, void*, addr); +#endif + + KALLOC_ZINFO_SFREE(size); + return; +} + +__abortlike +static void +kfree_heap_confusion_panic(kalloc_heap_t kheap, void *data, size_t size, zone_t z) +{ + if (z->kalloc_heap == KHEAP_ID_NONE) { + panic("kfree: addr %p, size %zd found in regular zone '%s%s'", + data, size, zone_heap_name(z), z->z_name); + } else { + panic("kfree: addr %p, size %zd found in heap %s* instead of %s*", + data, size, zone_heap_name(z), + kalloc_heap_names[kheap->kh_heap_id]); + } +} + +__abortlike +static void +kfree_size_confusion_panic(zone_t z, void *data, size_t size, size_t zsize) +{ + if (z) { + panic("kfree: addr %p, size %zd found in zone '%s%s' " + "with elem_size %zd", + data, size, zone_heap_name(z), z->z_name, zsize); + } else { + panic("kfree: addr %p, size %zd not found in any zone", + data, size); + } +} + +__abortlike +static void +kfree_size_invalid_panic(void *data, size_t size) +{ + panic("kfree: addr %p trying to free with nonsensical size %zd", + data, size); +} + +__abortlike +static void +krealloc_size_invalid_panic(void *data, size_t size) +{ + panic("krealloc: addr %p trying to free with nonsensical size %zd", + data, size); +} + +__abortlike +static void +kfree_temp_imbalance_panic(void *data, size_t size) +{ + panic("kfree: KHEAP_TEMP allocation imbalance freeing addr %p, size %zd", + data, size); +} + +/* used to implement kheap_free_addr() */ +#define KFREE_UNKNOWN_SIZE ((vm_size_t)~0) +#define KFREE_ABSURD_SIZE \ + ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_AND_KEXT_ADDRESS) / 2) + +static void +kfree_ext(kalloc_heap_t kheap, void *data, vm_size_t size) +{ + zone_stats_t zs = NULL; zone_t z; + vm_size_t zsize; + + if (__improbable(data == NULL)) { + return; + } + + if (kheap == KHEAP_TEMP) { + assert(size != KFREE_UNKNOWN_SIZE); + if (current_thread()->t_temp_alloc_count-- == 0) { + kfree_temp_imbalance_panic(data, size); + } +#if DEBUG || DEVELOPMENT + if (__improbable(kheap_temp_debug)) { + size += sizeof(struct kheap_temp_header); + data -= sizeof(struct kheap_temp_header); + remqueue(&((struct kheap_temp_header *)data)->kht_hdr_link); + } +#endif /* DEBUG || DEVELOPMENT */ + } #if KASAN_KALLOC /* @@ -843,6 +1094,9 @@ void * quarantine. `data` may then point to a different allocation. */ vm_size_t user_size = size; + if (size == KFREE_UNKNOWN_SIZE) { + user_size = size = kalloc_size(data); + } kasan_check_free((vm_address_t)data, size, KASAN_HEAP_KALLOC); data = (void *)kasan_dealloc((vm_address_t)data, &size); kasan_free(&data, &size, KASAN_HEAP_KALLOC, NULL, user_size, true); @@ -851,88 +1105,195 @@ void } #endif - if (size < MAX_SIZE_ZDLUT) { - z = get_zone_dlut(size); - } else if (size < kalloc_max_prerounded) { - z = get_zone_search(size, k_zindex_start); - } else { - /* if size was too large for a zone, then use kmem_free */ - - vm_map_t alloc_map = kernel_map; - size = round_page(size); + if (size >= kalloc_max_prerounded && size != KFREE_UNKNOWN_SIZE) { + return kfree_large((vm_offset_t)data, size); + } - if ((((vm_offset_t) data) >= kalloc_map_min) && (((vm_offset_t) data) <= kalloc_map_max)) { - alloc_map = kalloc_map; - } - if (size > kalloc_largest_allocated) { - panic("kfree: size %lu > kalloc_largest_allocated %lu", (unsigned long)size, (unsigned long)kalloc_largest_allocated); + zsize = zone_element_size(data, &z); + if (size == KFREE_UNKNOWN_SIZE) { + if (zsize == 0) { + return kfree_large((vm_offset_t)data, 0); } - kmem_free(alloc_map, (vm_offset_t)data, size); - kalloc_spin_lock(); - - assert(kalloc_large_total >= size); - kalloc_large_total -= size; - kalloc_large_inuse--; + size = zsize; + } else if (size > zsize) { + kfree_size_confusion_panic(z, data, size, zsize); + } - kalloc_unlock(); + if (kheap != KHEAP_ANY) { + if (kheap->kh_heap_id != z->kalloc_heap) { + kfree_heap_confusion_panic(kheap, data, size, z); + } + zs = kheap->kh_stats; + } else if (z->kalloc_heap != KHEAP_ID_DEFAULT && + z->kalloc_heap != KHEAP_ID_KEXT) { + kfree_heap_confusion_panic(kheap, data, size, z); + } #if !KASAN_KALLOC - DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, size, void*, data); + DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, zsize, void*, data); #endif + zfree_ext(z, zs ?: z->z_stats, data); +} - KALLOC_ZINFO_SFREE(size); - return; +void +(kfree)(void *addr, vm_size_t size) +{ + if (size > KFREE_ABSURD_SIZE) { + kfree_size_invalid_panic(addr, size); } + kfree_ext(KHEAP_ANY, addr, size); +} - /* free to the appropriate zone */ -#ifdef KALLOC_DEBUG - if (size > z->elem_size) { - panic("%s: z %p (%s) but requested size %lu", __func__, - z, z->zone_name, (unsigned long)size); +void +(kheap_free)(kalloc_heap_t kheap, void *addr, vm_size_t size) +{ + if (size > KFREE_ABSURD_SIZE) { + kfree_size_invalid_panic(addr, size); } -#endif - assert(size <= z->elem_size); -#if !KASAN_KALLOC - DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, z->elem_size, void*, data); -#endif - zfree(z, data); + kfree_ext(kheap, addr, size); } -#ifdef MACH_BSD -zone_t -kalloc_zone( - vm_size_t size) +void +(kheap_free_addr)(kalloc_heap_t kheap, void *addr) { - if (size < MAX_SIZE_ZDLUT) { - return get_zone_dlut(size); + kfree_ext(kheap, addr, KFREE_UNKNOWN_SIZE); +} + +static struct kalloc_result +_krealloc_ext( + kalloc_heap_t kheap, + void *addr, + vm_size_t old_size, + vm_size_t new_size, + zalloc_flags_t flags, + vm_allocation_site_t *site) +{ + vm_size_t old_bucket_size, new_bucket_size, min_size; + struct kalloc_result kr; + + if (new_size == 0) { + kfree_ext(kheap, addr, old_size); + return (struct kalloc_result){ }; } - if (size <= kalloc_max) { - return get_zone_search(size, k_zindex_start); + + if (addr == NULL) { + return kalloc_ext(kheap, new_size, flags, site); } - return ZONE_NULL; -} + + /* + * Find out the size of the bucket in which the new sized allocation + * would land. If it matches the bucket of the original allocation, + * simply return the same address. + */ + new_bucket_size = kalloc_bucket_size(new_size); + if (old_size == KFREE_UNKNOWN_SIZE) { + old_size = old_bucket_size = kalloc_size(addr); + } else { + old_bucket_size = kalloc_bucket_size(old_size); + } + min_size = MIN(old_size, new_size); + + if (old_bucket_size == new_bucket_size) { + kr.addr = addr; +#if KASAN_KALLOC + kr.size = new_size; +#else + kr.size = new_bucket_size; #endif + } else { + kr = kalloc_ext(kheap, new_size, flags & ~Z_ZERO, site); + if (kr.addr == NULL) { + return kr; + } + memcpy(kr.addr, addr, min_size); + kfree_ext(kheap, addr, old_size); + } + if ((flags & Z_ZERO) && kr.size > min_size) { + bzero(kr.addr + min_size, kr.size - min_size); + } + return kr; +} + +struct kalloc_result +krealloc_ext( + kalloc_heap_t kheap, + void *addr, + vm_size_t old_size, + vm_size_t new_size, + zalloc_flags_t flags, + vm_allocation_site_t *site) +{ + if (old_size > KFREE_ABSURD_SIZE) { + krealloc_size_invalid_panic(addr, old_size); + } + return _krealloc_ext(kheap, addr, old_size, new_size, flags, site); +} + +struct kalloc_result +kheap_realloc_addr( + kalloc_heap_t kheap, + void *addr, + vm_size_t size, + zalloc_flags_t flags, + vm_allocation_site_t *site) +{ + return _krealloc_ext(kheap, addr, KFREE_UNKNOWN_SIZE, size, flags, site); +} + +__startup_func void -OSMalloc_init( - void) +kheap_startup_init(kalloc_heap_t kheap) { - queue_init(&OSMalloc_tag_list); + struct kheap_zones *zones; + + switch (kheap->kh_heap_id) { + case KHEAP_ID_DEFAULT: + zones = KHEAP_DEFAULT->kh_zones; + break; + case KHEAP_ID_DATA_BUFFERS: + zones = KHEAP_DATA_BUFFERS->kh_zones; + break; + case KHEAP_ID_KEXT: + zones = KHEAP_KEXT->kh_zones; + break; + default: + panic("kalloc_heap_startup_init: invalid KHEAP_ID: %d", + kheap->kh_heap_id); + } + + kheap->kh_heap_id = zones->heap_id; + kheap->kh_zones = zones; + kheap->kh_stats = zalloc_percpu_permanent_type(struct zone_stats); + kheap->kh_next = zones->views; + zones->views = kheap; - OSMalloc_tag_lck_grp = lck_grp_alloc_init("OSMalloc_tag", LCK_GRP_ATTR_NULL); - lck_mtx_init(&OSMalloc_tag_lock, OSMalloc_tag_lck_grp, LCK_ATTR_NULL); + zone_view_count += 1; } +#pragma mark OSMalloc +/* + * This is a deprecated interface, here only for legacy reasons. + * There is no internal variant of any of these symbols on purpose. + */ +#define OSMallocDeprecated +#include + +static KALLOC_HEAP_DEFINE(OSMALLOC, "osmalloc", KHEAP_ID_KEXT); +static queue_head_t OSMalloc_tag_list = QUEUE_HEAD_INITIALIZER(OSMalloc_tag_list); +static LCK_GRP_DECLARE(OSMalloc_tag_lck_grp, "OSMalloc_tag"); +static LCK_SPIN_DECLARE(OSMalloc_tag_lock, &OSMalloc_tag_lck_grp); + +#define OSMalloc_tag_spin_lock() lck_spin_lock(&OSMalloc_tag_lock) +#define OSMalloc_tag_unlock() lck_spin_unlock(&OSMalloc_tag_lock) + +extern typeof(OSMalloc_Tagalloc) OSMalloc_Tagalloc_external; OSMallocTag -OSMalloc_Tagalloc( - const char *str, - uint32_t flags) +OSMalloc_Tagalloc_external(const char *str, uint32_t flags) { - OSMallocTag OSMTag; - - OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag)); + OSMallocTag OSMTag; - bzero((void *)OSMTag, sizeof(*OSMTag)); + OSMTag = kheap_alloc(OSMALLOC, sizeof(*OSMTag), Z_WAITOK | Z_ZERO); if (flags & OSMT_PAGEABLE) { OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE; @@ -949,69 +1310,75 @@ OSMalloc_Tagalloc( return OSMTag; } -void -OSMalloc_Tagref( - OSMallocTag tag) +static void +OSMalloc_Tagref(OSMallocTag tag) { if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) { - panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state); + panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", + tag->OSMT_name, tag->OSMT_state); } os_atomic_inc(&tag->OSMT_refcnt, relaxed); } -void -OSMalloc_Tagrele( - OSMallocTag tag) +static void +OSMalloc_Tagrele(OSMallocTag tag) { if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) { - panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state); + panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", + tag->OSMT_name, tag->OSMT_state); } - if (os_atomic_dec(&tag->OSMT_refcnt, relaxed) == 0) { - if (os_atomic_cmpxchg(&tag->OSMT_state, OSMT_VALID | OSMT_RELEASED, OSMT_VALID | OSMT_RELEASED, acq_rel)) { - OSMalloc_tag_spin_lock(); - (void)remque((queue_entry_t)tag); - OSMalloc_tag_unlock(); - kfree(tag, sizeof(*tag)); - } else { - panic("OSMalloc_Tagrele():'%s' has refcnt 0\n", tag->OSMT_name); - } + if (os_atomic_dec(&tag->OSMT_refcnt, relaxed) != 0) { + return; + } + + if (os_atomic_cmpxchg(&tag->OSMT_state, + OSMT_VALID | OSMT_RELEASED, OSMT_VALID | OSMT_RELEASED, acq_rel)) { + OSMalloc_tag_spin_lock(); + (void)remque((queue_entry_t)tag); + OSMalloc_tag_unlock(); + kheap_free(OSMALLOC, tag, sizeof(*tag)); + } else { + panic("OSMalloc_Tagrele():'%s' has refcnt 0\n", tag->OSMT_name); } } +extern typeof(OSMalloc_Tagfree) OSMalloc_Tagfree_external; void -OSMalloc_Tagfree( - OSMallocTag tag) +OSMalloc_Tagfree_external(OSMallocTag tag) { - if (!os_atomic_cmpxchg(&tag->OSMT_state, OSMT_VALID, OSMT_VALID | OSMT_RELEASED, acq_rel)) { - panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n", tag->OSMT_name, tag->OSMT_state); + if (!os_atomic_cmpxchg(&tag->OSMT_state, + OSMT_VALID, OSMT_VALID | OSMT_RELEASED, acq_rel)) { + panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n", + tag->OSMT_name, tag->OSMT_state); } if (os_atomic_dec(&tag->OSMT_refcnt, relaxed) == 0) { OSMalloc_tag_spin_lock(); (void)remque((queue_entry_t)tag); OSMalloc_tag_unlock(); - kfree(tag, sizeof(*tag)); + kheap_free(OSMALLOC, tag, sizeof(*tag)); } } +extern typeof(OSMalloc) OSMalloc_external; void * -OSMalloc( - uint32_t size, - OSMallocTag tag) +OSMalloc_external( + uint32_t size, OSMallocTag tag) { - void *addr = NULL; + void *addr = NULL; kern_return_t kr; OSMalloc_Tagref(tag); - if ((tag->OSMT_attr & OSMT_PAGEABLE) - && (size & ~PAGE_MASK)) { - if ((kr = kmem_alloc_pageable_external(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS) { + if ((tag->OSMT_attr & OSMT_PAGEABLE) && (size & ~PAGE_MASK)) { + if ((kr = kmem_alloc_pageable_external(kernel_map, + (vm_offset_t *)&addr, size)) != KERN_SUCCESS) { addr = NULL; } } else { - addr = kalloc_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC); + addr = kheap_alloc_tag_bt(OSMALLOC, size, + Z_WAITOK, VM_KERN_MEMORY_KALLOC); } if (!addr) { @@ -1021,10 +1388,9 @@ OSMalloc( return addr; } +extern typeof(OSMalloc_nowait) OSMalloc_nowait_external; void * -OSMalloc_nowait( - uint32_t size, - OSMallocTag tag) +OSMalloc_nowait_external(uint32_t size, OSMallocTag tag) { void *addr = NULL; @@ -1034,7 +1400,8 @@ OSMalloc_nowait( OSMalloc_Tagref(tag); /* XXX: use non-blocking kalloc for now */ - addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC); + addr = kheap_alloc_tag_bt(OSMALLOC, (vm_size_t)size, + Z_NOWAIT, VM_KERN_MEMORY_KALLOC); if (addr == NULL) { OSMalloc_Tagrele(tag); } @@ -1042,10 +1409,9 @@ OSMalloc_nowait( return addr; } +extern typeof(OSMalloc_noblock) OSMalloc_noblock_external; void * -OSMalloc_noblock( - uint32_t size, - OSMallocTag tag) +OSMalloc_noblock_external(uint32_t size, OSMallocTag tag) { void *addr = NULL; @@ -1054,7 +1420,8 @@ OSMalloc_noblock( } OSMalloc_Tagref(tag); - addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC); + addr = kheap_alloc_tag_bt(OSMALLOC, (vm_size_t)size, + Z_NOWAIT, VM_KERN_MEMORY_KALLOC); if (addr == NULL) { OSMalloc_Tagrele(tag); } @@ -1062,25 +1429,85 @@ OSMalloc_noblock( return addr; } +extern typeof(OSFree) OSFree_external; void -OSFree( - void *addr, - uint32_t size, - OSMallocTag tag) +OSFree_external(void *addr, uint32_t size, OSMallocTag tag) { if ((tag->OSMT_attr & OSMT_PAGEABLE) && (size & ~PAGE_MASK)) { kmem_free(kernel_map, (vm_offset_t)addr, size); } else { - kfree(addr, size); + kheap_free(OSMALLOC, addr, size); } OSMalloc_Tagrele(tag); } -uint32_t -OSMalloc_size( - void *addr) +#pragma mark kern_os_malloc + +void * +kern_os_malloc_external(size_t size); +void * +kern_os_malloc_external(size_t size) { - return (uint32_t)kalloc_size(addr); + if (size == 0) { + return NULL; + } + + return kheap_alloc_tag_bt(KERN_OS_MALLOC, size, Z_WAITOK | Z_ZERO, + VM_KERN_MEMORY_LIBKERN); +} + +void +kern_os_free_external(void *addr); +void +kern_os_free_external(void *addr) +{ + kheap_free_addr(KERN_OS_MALLOC, addr); +} + +void * +kern_os_realloc_external(void *addr, size_t nsize); +void * +kern_os_realloc_external(void *addr, size_t nsize) +{ + VM_ALLOC_SITE_STATIC(VM_TAG_BT, VM_KERN_MEMORY_LIBKERN); + + return kheap_realloc_addr(KERN_OS_MALLOC, addr, nsize, + Z_WAITOK | Z_ZERO, &site).addr; +} + +void +kern_os_zfree(zone_t zone, void *addr, vm_size_t size) +{ + if (zsecurity_options & ZSECURITY_OPTIONS_STRICT_IOKIT_FREE + || zone_owns(zone, addr)) { + zfree(zone, addr); + } else { + /* + * Third party kexts might not know about the operator new + * and be allocated from the KEXT heap + */ + printf("kern_os_zfree: kheap_free called for object from zone %s\n", + zone->z_name); + kheap_free(KHEAP_KEXT, addr, size); + } +} + +void +kern_os_kfree(void *addr, vm_size_t size) +{ + if (zsecurity_options & ZSECURITY_OPTIONS_STRICT_IOKIT_FREE) { + kheap_free(KHEAP_DEFAULT, addr, size); + } else { + /* + * Third party kexts may not know about newly added operator + * default new/delete. If they call new for any iokit object + * it will end up coming from the KEXT heap. If these objects + * are freed by calling release() or free(), the internal + * version of operator delete is called and the kernel ends + * up freeing the object to the DEFAULT heap. + */ + kheap_free(KHEAP_ANY, addr, size); + } } diff --git a/osfmk/kern/kalloc.h b/osfmk/kern/kalloc.h index 0a1d56917..f4fe20e33 100644 --- a/osfmk/kern/kalloc.h +++ b/osfmk/kern/kalloc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -61,146 +61,347 @@ #include #include -#include #include +#include __BEGIN_DECLS #if XNU_KERNEL_PRIVATE -extern void * -kalloc_canblock( - vm_size_t * size, - boolean_t canblock, - vm_allocation_site_t * site); +/*! + * @typedef kalloc_heap_t + * + * @abstract + * A kalloc heap view represents a sub-accounting context + * for a given kalloc heap. + */ +typedef struct kalloc_heap { + struct kheap_zones *kh_zones; + zone_stats_t kh_stats; + const char *kh_name; + struct kalloc_heap *kh_next; + zone_kheap_id_t kh_heap_id; +} *kalloc_heap_t; + +/*! + * @macro KALLOC_HEAP_DECLARE + * + * @abstract + * (optionally) declare a kalloc heap view in a header. + * + * @discussion + * Unlike kernel zones, new full blown heaps cannot be instantiated. + * However new accounting views of the base heaps can be made. + */ +#define KALLOC_HEAP_DECLARE(var) \ + extern struct kalloc_heap var[1] -extern vm_size_t -kalloc_size( - void * addr); +/** + * @const KHEAP_ANY + * + * @brief + * A value that represents either the default or kext heap for codepaths that + * need to allow @c kheap_free() to either one. + * + * @discussion + * When the memory provenance is not known, this value can be used to free + * memory indiscriminately. + * + * Note: code using this constant can likely be used as a gadget to free + * arbitrary memory and its use is strongly discouraged. + */ +#define KHEAP_ANY ((struct kalloc_heap *)NULL) -extern vm_size_t -kfree_addr( - void * addr); +/** + * @const KHEAP_DATA_BUFFERS + * + * @brief + * The builtin heap for bags of pure bytes. + * + * @discussion + * This set of kalloc zones should contain pure bags of bytes with no pointers + * or length/offset fields. + * + * The zones forming the heap aren't sequestered from each other, however the + * entire heap lives in a different submap from any other kernel allocation. + * + * The main motivation behind this separation is due to the fact that a lot of + * these objects have been used by attackers to spray the heap to make it more + * predictable while exploiting use-after-frees or overflows. + * + * Common attributes that make these objects useful for spraying includes + * control of: + * - Data in allocation + * - Time of alloc and free (lifetime) + * - Size of allocation + */ +KALLOC_HEAP_DECLARE(KHEAP_DATA_BUFFERS); -extern vm_size_t -kalloc_bucket_size( - vm_size_t size); +/** + * @const KHEAP_KEXT + * + * @brief + * The builtin heap for allocations made by kexts. + * + * @discussion + * This set of kalloc zones should contain allocations from kexts and the + * individual zones in this heap are sequestered. + */ +KALLOC_HEAP_DECLARE(KHEAP_KEXT); -#define kalloc(size) \ - ({ VM_ALLOC_SITE_STATIC(0, 0); \ - vm_size_t tsize = (size); \ - kalloc_canblock(&tsize, TRUE, &site); }) +/** + * @const KHEAP_DEFAULT + * + * @brief + * The builtin default core kernel kalloc heap. + * + * @discussion + * This set of kalloc zones should contain other objects that don't have their + * own security mitigations. The individual zones are themselves sequestered. + */ +KALLOC_HEAP_DECLARE(KHEAP_DEFAULT); -#define kalloc_tag(size, itag) \ - ({ VM_ALLOC_SITE_STATIC(0, (itag)); \ - vm_size_t tsize = (size); \ - kalloc_canblock(&tsize, TRUE, &site); }) +/** + * @const KHEAP_TEMP + * + * @brief + * A heap that represents allocations that are always done in "scope" of + * a thread. + * + * @discussion + * Allocations in this heap must be allocated and freed "in scope", which means: + * - the thread that did the allocation will be the one doing the free, + * - allocations will be freed by the time the thread returns to userspace. + * + * This is an alias on the @c KHEAP_DEFAULT heap with added checks. + */ +KALLOC_HEAP_DECLARE(KHEAP_TEMP); -#define kalloc_tag_bt(size, itag) \ - ({ VM_ALLOC_SITE_STATIC(VM_TAG_BT, (itag)); \ - vm_size_t tsize = (size); \ - kalloc_canblock(&tsize, TRUE, &site); }) +/*! + * @macro KALLOC_HEAP_DEFINE + * + * @abstract + * Defines a given kalloc heap view and what it points to. + * + * @discussion + * Kalloc heaps are views over one of the pre-defined builtin heaps + * (such as @c KHEAP_DATA_BUFFERS or @c KHEAP_DEFAULT). Instantiating + * a new one allows for accounting of allocations through this view. + * + * Kalloc heap views are initialized during the @c STARTUP_SUB_ZALLOC phase, + * as the last rank. If views on zones are created, these must have been + * created before this stage. + * + * @param var the name for the zone view. + * @param name a string describing the zone view. + * @param heap_id a @c KHEAP_ID_* constant. + */ +#define KALLOC_HEAP_DEFINE(var, name, heap_id) \ + SECURITY_READ_ONLY_LATE(struct kalloc_heap) var[1] = { { \ + .kh_name = name, \ + .kh_heap_id = heap_id, \ + } }; \ + STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, kheap_startup_init, var) -#define kalloc_noblock(size) \ - ({ VM_ALLOC_SITE_STATIC(0, 0); \ - vm_size_t tsize = (size); \ - kalloc_canblock(&tsize, FALSE, &site); }) +#define kalloc(size) \ + kheap_alloc(KHEAP_DEFAULT, size, Z_WAITOK) -#define kalloc_noblock_tag(size, itag) \ - ({ VM_ALLOC_SITE_STATIC(0, (itag)); \ - vm_size_t tsize = (size); \ - kalloc_canblock(&tsize, FALSE, &site); }) +#define kalloc_flags(size, flags) \ + kheap_alloc(KHEAP_DEFAULT, size, flags) -#define kalloc_noblock_tag_bt(size, itag) \ - ({ VM_ALLOC_SITE_STATIC(VM_TAG_BT, (itag)); \ - vm_size_t tsize = (size); \ - kalloc_canblock(&tsize, FALSE, &site); }) +#define kalloc_tag(size, itag) \ + kheap_alloc_tag(KHEAP_DEFAULT, size, Z_WAITOK, itag) +#define kalloc_tag_bt(size, itag) \ + kheap_alloc_tag_bt(KHEAP_DEFAULT, size, Z_WAITOK, itag) -/* these versions update the size reference with the actual size allocated */ +#define krealloc(elem, old_size, new_size, flags) \ + kheap_realloc(KHEAP_DEFAULT, elem, old_size, new_size, flags) -#define kallocp(size) \ - ({ VM_ALLOC_SITE_STATIC(0, 0); \ - kalloc_canblock((size), TRUE, &site); }) +/* + * These versions allow specifying the kalloc heap to allocate memory + * from + */ +#define kheap_alloc(kalloc_heap, size, flags) \ + ({ VM_ALLOC_SITE_STATIC(0, 0); \ + kalloc_ext(kalloc_heap, size, flags, &site).addr; }) -#define kallocp_tag(size, itag) \ - ({ VM_ALLOC_SITE_STATIC(0, (itag)); \ - kalloc_canblock((size), TRUE, &site); }) +#define kheap_alloc_tag(kalloc_heap, size, flags, itag) \ + ({ VM_ALLOC_SITE_STATIC(0, (itag)); \ + kalloc_ext(kalloc_heap, size, flags, &site).addr; }) -#define kallocp_tag_bt(size, itag) \ - ({ VM_ALLOC_SITE_STATIC(VM_TAG_BT, (itag)); \ - kalloc_canblock((size), TRUE, &site); }) +#define kheap_alloc_tag_bt(kalloc_heap, size, flags, itag) \ + ({ VM_ALLOC_SITE_STATIC(VM_TAG_BT, (itag)); \ + kalloc_ext(kalloc_heap, size, flags, &site).addr; }) -#define kallocp_noblock(size) \ - ({ VM_ALLOC_SITE_STATIC(0, 0); \ - kalloc_canblock((size), FALSE, &site); }) +#define kheap_realloc(kalloc_heap, elem, old_size, new_size, flags) \ + ({ VM_ALLOC_SITE_STATIC(0, 0); \ + krealloc_ext(kalloc_heap, elem, old_size, new_size, flags, &site).addr; }) -#define kallocp_noblock_tag_bt(size, itag) \ - ({ VM_ALLOC_SITE_STATIC(VM_TAG_BT, (itag)); \ - kalloc_canblock((size), FALSE, &site); }) - - - -extern void kfree(void *data, - vm_size_t size); - -#define kfree(data, size) \ -_Pragma("clang diagnostic push") \ -_Pragma("clang diagnostic ignored \"-Wshadow\"") \ - do { \ - _Static_assert(sizeof (data) == sizeof (void *) || sizeof (data) == sizeof (mach_vm_address_t), "data is not a pointer"); \ - void *__tmp_addr = (void *) data; \ - vm_size_t __tmp_size = size; \ - data = (__typeof__(data)) NULL; \ - (kfree)(__tmp_addr, __tmp_size); \ - } while (0) \ -_Pragma("clang diagnostic pop") - -#define kfree_addr(addr) \ -_Pragma("clang diagnostic push") \ -_Pragma("clang diagnostic ignored \"-Wshadow\"") \ - do { \ - _Static_assert(sizeof (addr) == sizeof (void *) || sizeof (addr) == sizeof (mach_vm_address_t), "addr is not a pointer"); \ - void *__tmp_addr = (void *) addr; \ - addr = (__typeof__(addr)) NULL; \ - (kfree_addr)(__tmp_addr); \ - } while (0) \ -_Pragma("clang diagnostic pop") +extern void +kfree( + void *data, + vm_size_t size); -#else /* XNU_KERNEL_PRIVATE */ +extern void +kheap_free( + kalloc_heap_t heap, + void *data, + vm_size_t size); -extern void *kalloc(vm_size_t size) __attribute__((alloc_size(1))); +__abortlike +extern void +kheap_temp_leak_panic(thread_t self); -extern void *kalloc_noblock(vm_size_t size) __attribute__((alloc_size(1))); +#else /* XNU_KERNEL_PRIVATE */ + +extern void *kalloc(vm_size_t size) __attribute__((alloc_size(1))); -extern void kfree(void *data, - vm_size_t size); +extern void kfree(void *data, vm_size_t size); #endif /* !XNU_KERNEL_PRIVATE */ +#pragma mark implementation details +#if XNU_KERNEL_PRIVATE +#pragma GCC visibility push(hidden) -__END_DECLS +/* Used by kern_os_* and operator new */ +KALLOC_HEAP_DECLARE(KERN_OS_MALLOC); + +extern void kheap_startup_init( + kalloc_heap_t heap); -#ifdef MACH_KERNEL_PRIVATE -extern void kalloc_init(void); +/* + * This type is used so that kalloc_internal has good calling conventions + * for callers who want to cheaply both know the allocated address + * and the actual size of the allocation. + */ +struct kalloc_result { + void *addr; + vm_size_t size; +}; + +extern struct kalloc_result +kalloc_ext( + kalloc_heap_t kheap, + vm_size_t size, + zalloc_flags_t flags, + vm_allocation_site_t *site); + +extern struct kalloc_result +krealloc_ext( + kalloc_heap_t kheap, + void *addr, + vm_size_t old_size, + vm_size_t new_size, + zalloc_flags_t flags, + vm_allocation_site_t *site); + +extern struct kalloc_result +kheap_realloc_addr( + kalloc_heap_t kheap, + void *addr, + vm_size_t new_size, + zalloc_flags_t flags, + vm_allocation_site_t *site); -extern void kalloc_fake_zone_init( int ); -extern void kalloc_fake_zone_info( - int *count, - vm_size_t *cur_size, - vm_size_t *max_size, - vm_size_t *elem_size, - vm_size_t *alloc_size, - uint64_t *sum_size, - int *collectable, - int *exhaustable, - int *caller_acct); +/* these versions update the size reference with the actual size allocated */ + +static inline void * +kallocp_ext( + kalloc_heap_t kheap, + vm_size_t *size, + zalloc_flags_t flags, + vm_allocation_site_t *site) +{ + struct kalloc_result kar = kalloc_ext(kheap, *size, flags, site); + *size = kar.size; + return kar.addr; +} + +#define kallocp(sizep) \ + ({ VM_ALLOC_SITE_STATIC(0, 0); \ + kallocp_ext(KHEAP_DEFAULT, sizep, Z_WAITOK, &site); }) + +#define kallocp_tag(sizep, itag) \ + ({ VM_ALLOC_SITE_STATIC(0, (itag)); \ + kallocp_ext(KHEAP_DEFAULT, sizep, Z_WAITOK, &site); }) + +#define kallocp_tag_bt(sizep, itag) \ + ({ VM_ALLOC_SITE_STATIC(VM_TAG_BT, (itag)); \ + kallocp_ext(KHEAP_DEFAULT, sizep, Z_WAITOK, &site); }) + +extern vm_size_t +kalloc_size( + void *addr); + +extern void +kheap_free_addr( + kalloc_heap_t heap, + void *addr); + +extern vm_size_t +kalloc_bucket_size( + vm_size_t size); + +/* + * These macros set "elem" to NULL on free. + * + * Note: all values passed to k*free() might be in the element to be freed, + * temporaries must be taken, and the resetting to be done prior to free. + */ +#define kfree(elem, size) ({ \ + _Static_assert(sizeof(elem) == sizeof(void *), "elem isn't pointer sized"); \ + __auto_type __kfree_eptr = &(elem); \ + __auto_type __kfree_elem = *__kfree_eptr; \ + __auto_type __kfree_size = (size); \ + *__kfree_eptr = (__typeof__(__kfree_elem))NULL; \ + (kfree)((void *)__kfree_elem, __kfree_size); \ +}) + +#define kheap_free(heap, elem, size) ({ \ + _Static_assert(sizeof(elem) == sizeof(void *), "elem isn't pointer sized"); \ + __auto_type __kfree_heap = (heap); \ + __auto_type __kfree_eptr = &(elem); \ + __auto_type __kfree_elem = *__kfree_eptr; \ + __auto_type __kfree_size = (size); \ + *__kfree_eptr = (__typeof__(__kfree_elem))NULL; \ + (kheap_free)(__kfree_heap, (void *)__kfree_elem, __kfree_size); \ +}) + +#define kheap_free_addr(heap, elem) ({ \ + _Static_assert(sizeof(elem) == sizeof(void *), "elem isn't pointer sized"); \ + __auto_type __kfree_heap = (heap); \ + __auto_type __kfree_eptr = &(elem); \ + __auto_type __kfree_elem = *__kfree_eptr; \ + *__kfree_eptr = (__typeof__(__kfree_elem))NULL; \ + (kheap_free_addr)(__kfree_heap, (void *)__kfree_elem); \ +}) + +extern zone_t +kalloc_heap_zone_for_size( + kalloc_heap_t heap, + vm_size_t size); extern vm_size_t kalloc_max_prerounded; extern vm_size_t kalloc_large_total; -#endif /* MACH_KERNEL_PRIVATE */ +extern void +kern_os_kfree( + void *addr, + vm_size_t size); + +#pragma GCC visibility pop +#endif /* XNU_KERNEL_PRIVATE */ + +extern void +kern_os_zfree( + zone_t zone, + void *addr, + vm_size_t size); + +__END_DECLS #endif /* _KERN_KALLOC_H_ */ diff --git a/osfmk/kern/kcdata.h b/osfmk/kern/kcdata.h index f00a3be8f..f2eaf624c 100644 --- a/osfmk/kern/kcdata.h +++ b/osfmk/kern/kcdata.h @@ -190,6 +190,52 @@ * kcdata_add_type_definition(kcdata_p, KCTYPE_SAMPLE_DISK_IO_STATS, "sample_disk_io_stats", * &disk_io_stats_def[0], sizeof(disk_io_stats_def)/sizeof(struct kcdata_subtype_descriptor)); * + * Feature description: Compression + * -------------------- + * In order to avoid keeping large amunt of memory reserved for a panic stackshot, kcdata has support + * for compressing the buffer in a streaming fashion. New data pushed to the kcdata buffer will be + * automatically compressed using an algorithm selected by the API user (currently, we only support + * pass-through and zlib, in the future we plan to add WKDM support, see: 57913859). + * + * To start using compression, call: + * kcdata_init_compress(kcdata_p, hdr_tag, memcpy_f, comp_type); + * where: + * `kcdata_p` is the kcdata buffer that will be used + * `hdr_tag` is the usual header tag denoting what type of kcdata buffer this will be + * `memcpy_f` a memcpy(3) function to use to copy into the buffer, optional. + * `compy_type` is the compression type, see KCDCT_ZLIB for an example. + * + * Once compression is initialized: + * (1) all self-describing APIs will automatically compress + * (2) you can now use the following APIs to compress data into the buffer: + * (None of the following will compress unless kcdata_init_compress() has been called) + * + * - kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data) + * Pushes the buffer of kctype @type at[@input_data, @input_data + @size] + * into the kcdata buffer @data, compressing if needed. + * + * - kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element, + * uint32_t size_of_element, uint32_t count, const void *input_data) + * Pushes the array found at @input_data, with element type @type_of_element, where + * each element is of size @size_of_element and there are @count elements into the kcdata buffer + * at @data. + * + * - kcdata_compression_window_open/close(kcdata_descriptor_t data) + * In case the data you are trying to push to the kcdata buffer @data is difficult to predict, + * you can open a "compression window". Between an open and a close, no compression will be done. + * Once you clsoe the window, the underlying compression algorithm will compress the data into the buffer + * and automatically rewind the current end marker of the kcdata buffer. + * There is an ASCII art in kern_cdata.c to aid the reader in understanding + * this. + * + * - kcdata_finish_compression(kcdata_descriptor_t data) + * Must be called at the end to flush any underlying buffers used by the compression algorithms. + * This function will also add some statistics about the compression to the buffer which helps with + * decompressing later. + * + * Once you are done with the kcdata buffer, call kcdata_deinit_compress to + * free any buffers that may have been allocated internal to the compression + * algorithm. */ @@ -401,6 +447,7 @@ struct kcdata_type_definition { #define KCDATA_TYPE_PID 0x36u /* int32_t */ #define KCDATA_TYPE_PROCNAME 0x37u /* char * */ #define KCDATA_TYPE_NESTED_KCDATA 0x38u /* nested kcdata buffer */ +#define KCDATA_TYPE_LIBRARY_AOTINFO 0x39u /* struct user64_dyld_aot_info */ #define KCDATA_TYPE_BUFFER_END 0xF19158EDu @@ -410,16 +457,18 @@ struct kcdata_type_definition { * numbers are byteswaps of each other */ -#define KCDATA_BUFFER_BEGIN_CRASHINFO 0xDEADF157u /* owner: corpses/task_corpse.h */ - /* type-range: 0x800 - 0x8ff */ -#define KCDATA_BUFFER_BEGIN_STACKSHOT 0x59a25807u /* owner: sys/stackshot.h */ - /* type-range: 0x900 - 0x93f */ -#define KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT 0xDE17A59Au /* owner: sys/stackshot.h */ - /* type-range: 0x940 - 0x9ff */ -#define KCDATA_BUFFER_BEGIN_OS_REASON 0x53A20900u /* owner: sys/reason.h */ - /* type-range: 0x1000-0x103f */ -#define KCDATA_BUFFER_BEGIN_XNUPOST_CONFIG 0x1e21c09fu /* owner: osfmk/tests/kernel_tests.c */ - /* type-range: 0x1040-0x105f */ +#define KCDATA_BUFFER_BEGIN_CRASHINFO 0xDEADF157u /* owner: corpses/task_corpse.h */ + /* type-range: 0x800 - 0x8ff */ +#define KCDATA_BUFFER_BEGIN_STACKSHOT 0x59a25807u /* owner: sys/stackshot.h */ + /* type-range: 0x900 - 0x93f */ +#define KCDATA_BUFFER_BEGIN_COMPRESSED 0x434f4d50u /* owner: sys/stackshot.h */ + /* type-range: 0x900 - 0x93f */ +#define KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT 0xDE17A59Au /* owner: sys/stackshot.h */ + /* type-range: 0x940 - 0x9ff */ +#define KCDATA_BUFFER_BEGIN_OS_REASON 0x53A20900u /* owner: sys/reason.h */ + /* type-range: 0x1000-0x103f */ +#define KCDATA_BUFFER_BEGIN_XNUPOST_CONFIG 0x1e21c09fu /* owner: osfmk/tests/kernel_tests.c */ + /* type-range: 0x1040-0x105f */ /* next type range number available 0x1060 */ /**************** definitions for XNUPOST *********************/ @@ -477,6 +526,12 @@ struct kcdata_type_definition { #define STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT 0x927u /* same as KCDATA_TYPE_LIBRARY_LOADINFO64 */ #define STACKSHOT_KCTYPE_THREAD_DISPATCH_QUEUE_LABEL 0x928u /* dispatch queue label */ #define STACKSHOT_KCTYPE_THREAD_TURNSTILEINFO 0x929u /* struct stackshot_thread_turnstileinfo */ +#define STACKSHOT_KCTYPE_TASK_CPU_ARCHITECTURE 0x92au /* struct stackshot_cpu_architecture */ +#define STACKSHOT_KCTYPE_LATENCY_INFO 0x92bu /* struct stackshot_latency_collection */ +#define STACKSHOT_KCTYPE_LATENCY_INFO_TASK 0x92cu /* struct stackshot_latency_task */ +#define STACKSHOT_KCTYPE_LATENCY_INFO_THREAD 0x92du /* struct stackshot_latency_thread */ +#define STACKSHOT_KCTYPE_LOADINFO64_TEXT_EXEC 0x92eu /* TEXT_EXEC load info -- same as KCDATA_TYPE_LIBRARY_LOADINFO64 */ +#define STACKSHOT_KCTYPE_AOTCACHE_LOADINFO 0x92fu /* struct dyld_aot_cache_uuid_info */ #define STACKSHOT_KCTYPE_TASK_DELTA_SNAPSHOT 0x940u /* task_delta_snapshot_v2 */ #define STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT 0x941u /* thread_delta_snapshot_v* */ @@ -508,6 +563,13 @@ struct dyld_uuid_info_64_v2 { uint64_t imageSlidBaseAddress; /* slid base address of image */ }; +struct dyld_aot_cache_uuid_info { + uint64_t x86SlidBaseAddress; /* slid base address of x86 shared cache */ + uuid_t x86UUID; /* UUID of x86 shared cache */ + uint64_t aotSlidBaseAddress; /* slide base address of aot cache */ + uuid_t aotUUID; /* UUID of aot shared cache */ +}; + struct user32_dyld_uuid_info { uint32_t imageLoadAddress; /* base address image is mapped into */ uuid_t imageUUID; /* UUID of image */ @@ -518,6 +580,15 @@ struct user64_dyld_uuid_info { uuid_t imageUUID; /* UUID of image */ }; +#define DYLD_AOT_IMAGE_KEY_SIZE 32 + +struct user64_dyld_aot_info { + uint64_t x86LoadAddress; + uint64_t aotLoadAddress; + uint64_t aotImageSize; + uint8_t aotImageKey[DYLD_AOT_IMAGE_KEY_SIZE]; +}; + enum task_snapshot_flags { /* k{User,Kernel}64_p (values 0x1 and 0x2) are defined in generic_snapshot_flags */ kTaskRsrcFlagged = 0x4, // In the EXC_RESOURCE danger zone? @@ -546,6 +617,7 @@ enum task_snapshot_flags { /* 0x2000000 unused */ kTaskIsDirtyTracked = 0x4000000, kTaskAllowIdleExit = 0x8000000, + kTaskIsTranslated = 0x10000000, }; enum thread_snapshot_flags { @@ -823,10 +895,12 @@ typedef struct stackshot_thread_turnstileinfo { uint64_t turnstile_context; /* Associated data (either thread id, or workq addr) */ uint8_t turnstile_priority; uint8_t number_of_hops; -#define STACKSHOT_TURNSTILE_STATUS_UNKNOWN (1 << 0) /* The final inheritor is unknown (bug?) */ -#define STACKSHOT_TURNSTILE_STATUS_LOCKED_WAITQ (1 << 1) /* A waitq was found to be locked */ -#define STACKSHOT_TURNSTILE_STATUS_WORKQUEUE (1 << 2) /* The final inheritor is a workqueue */ -#define STACKSHOT_TURNSTILE_STATUS_THREAD (1 << 3) /* The final inheritor is a thread */ +#define STACKSHOT_TURNSTILE_STATUS_UNKNOWN 0x01 /* The final inheritor is unknown (bug?) */ +#define STACKSHOT_TURNSTILE_STATUS_LOCKED_WAITQ 0x02 /* A waitq was found to be locked */ +#define STACKSHOT_TURNSTILE_STATUS_WORKQUEUE 0x04 /* The final inheritor is a workqueue */ +#define STACKSHOT_TURNSTILE_STATUS_THREAD 0x08 /* The final inheritor is a thread */ +#define STACKSHOT_TURNSTILE_STATUS_BLOCKED_ON_TASK 0x10 /* blocked on task, dind't find thread */ +#define STACKSHOT_TURNSTILE_STATUS_HELD_IPLOCK 0x20 /* the ip_lock was held */ uint64_t turnstile_flags; } __attribute__((packed)) thread_turnstileinfo_t; @@ -838,12 +912,52 @@ typedef struct stackshot_thread_turnstileinfo { #define STACKSHOT_WAITOWNER_THREQUESTED (UINT64_MAX - 6) /* workloop waiting for a new worker thread */ #define STACKSHOT_WAITOWNER_SUSPENDED (UINT64_MAX - 7) /* workloop is suspended */ +struct stackshot_cpu_architecture { + int32_t cputype; + int32_t cpusubtype; +} __attribute__((packed)); struct stack_snapshot_stacktop { uint64_t sp; uint8_t stack_contents[8]; }; +/* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */ +struct stackshot_latency_collection { + uint64_t latency_version; + uint64_t setup_latency; + uint64_t total_task_iteration_latency; + uint64_t total_terminated_task_iteration_latency; +} __attribute__((packed)); + +/* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */ +struct stackshot_latency_task { + uint64_t task_uniqueid; + uint64_t setup_latency; + uint64_t task_thread_count_loop_latency; + uint64_t task_thread_data_loop_latency; + uint64_t cur_tsnap_latency; + uint64_t pmap_latency; + uint64_t bsd_proc_ids_latency; + uint64_t misc_latency; + uint64_t misc2_latency; + uint64_t end_latency; +} __attribute__((packed)); + +/* only collected if STACKSHOT_COLLECTS_LATENCY_INFO is set to !0 */ +struct stackshot_latency_thread { + uint64_t thread_id; + uint64_t cur_thsnap1_latency; + uint64_t dispatch_serial_latency; + uint64_t dispatch_label_latency; + uint64_t cur_thsnap2_latency; + uint64_t thread_name_latency; + uint64_t sur_times_latency; + uint64_t user_stack_latency; + uint64_t kernel_stack_latency; + uint64_t misc_latency; +} __attribute__((packed)); + /**************** definitions for crashinfo *********************/ @@ -913,8 +1027,15 @@ struct crashinfo_proc_uniqidentifierinfo { #define TASK_CRASHINFO_LEDGER_WIRED_MEM 0x82A /* uint64_t */ #define TASK_CRASHINFO_PROC_PERSONA_ID 0x82B /* uid_t */ #define TASK_CRASHINFO_MEMORY_LIMIT_INCREASE 0x82C /* uint32_t */ - - +#define TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT 0x82D /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_TAGGED_FOOTPRINT_COMPRESSED 0x82E /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT 0x82F /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_MEDIA_FOOTPRINT_COMPRESSED 0x830 /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT 0x831 /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_GRAPHICS_FOOTPRINT_COMPRESSED 0x832 /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT 0x833 /* uint64_t */ +#define TASK_CRASHINFO_LEDGER_NEURAL_FOOTPRINT_COMPRESSED 0x834 /* uint64_t */ +#define TASK_CRASHINFO_MEMORYSTATUS_EFFECTIVE_PRIORITY 0x835 /* int32_t */ #define TASK_CRASHINFO_END KCDATA_TYPE_BUFFER_END diff --git a/osfmk/kern/kern_cdata.c b/osfmk/kern/kern_cdata.c index 91c29df10..c70c6f4f0 100644 --- a/osfmk/kern/kern_cdata.c +++ b/osfmk/kern/kern_cdata.c @@ -39,6 +39,35 @@ #include static kern_return_t kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data, uint32_t type, uint32_t size, uint64_t flags, mach_vm_address_t *user_addr); +static size_t kcdata_get_memory_size_for_data(uint32_t size); +static kern_return_t kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t flags); +static kern_return_t kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size); +static kern_return_t kcdata_write_compression_stats(kcdata_descriptor_t data); +static kern_return_t kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin); + +/* + * zlib will need to store its metadata and this value is indifferent from the + * window bits and other zlib internals + */ +#define ZLIB_METADATA_SIZE 1440 + +/* #define kcdata_debug_printf printf */ +#define kcdata_debug_printf(...) ; + +#pragma pack(push, 4) + +/* Internal structs for convenience */ +struct _uint64_with_description_data { + char desc[KCDATA_DESC_MAXLEN]; + uint64_t data; +}; + +struct _uint32_with_description_data { + char desc[KCDATA_DESC_MAXLEN]; + uint32_t data; +}; + +#pragma pack(pop) /* * Estimates how large of a buffer that should be allocated for a buffer that will contain @@ -53,11 +82,24 @@ kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size) /* * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding */ - uint32_t max_padding_bytes = num_items * (KCDATA_ALIGNMENT_SIZE - 1); - uint32_t item_description_bytes = num_items * sizeof(struct kcdata_item); - uint32_t begin_and_end_marker_bytes = 2 * sizeof(struct kcdata_item); + uint32_t max_padding_bytes = 0; + uint32_t max_padding_with_item_description_bytes = 0; + uint32_t estimated_required_buffer_size = 0; + const uint32_t begin_and_end_marker_bytes = 2 * sizeof(struct kcdata_item); + + if (os_mul_overflow(num_items, KCDATA_ALIGNMENT_SIZE - 1, &max_padding_bytes)) { + panic("%s: Overflow in required buffer size estimate", __func__); + } + + if (os_mul_and_add_overflow(num_items, sizeof(struct kcdata_item), max_padding_bytes, &max_padding_with_item_description_bytes)) { + panic("%s: Overflow in required buffer size estimate", __func__); + } - return max_padding_bytes + item_description_bytes + begin_and_end_marker_bytes + payload_size; + if (os_add3_overflow(max_padding_with_item_description_bytes, begin_and_end_marker_bytes, payload_size, &estimated_required_buffer_size)) { + panic("%s: Overflow in required buffer size estimate", __func__); + } + + return estimated_required_buffer_size; } kcdata_descriptor_t @@ -65,15 +107,15 @@ kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, un { kcdata_descriptor_t data = NULL; mach_vm_address_t user_addr = 0; + uint16_t clamped_flags = (uint16_t) flags; - data = kalloc(sizeof(struct kcdata_descriptor)); + data = kalloc_flags(sizeof(struct kcdata_descriptor), Z_WAITOK | Z_ZERO); if (data == NULL) { return NULL; } - bzero(data, sizeof(struct kcdata_descriptor)); data->kcd_addr_begin = buffer_addr_p; data->kcd_addr_end = buffer_addr_p; - data->kcd_flags = (flags & KCFLAG_USE_COPYOUT)? KCFLAG_USE_COPYOUT : KCFLAG_USE_MEMCOPY; + data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY; data->kcd_length = size; /* Initialize the BEGIN header */ @@ -89,6 +131,7 @@ kern_return_t kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags) { mach_vm_address_t user_addr = 0; + uint16_t clamped_flags = (uint16_t) flags; if (data == NULL) { return KERN_INVALID_ARGUMENT; @@ -96,7 +139,7 @@ kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_add bzero(data, sizeof(struct kcdata_descriptor)); data->kcd_addr_begin = buffer_addr_p; data->kcd_addr_end = buffer_addr_p; - data->kcd_flags = (flags & KCFLAG_USE_COPYOUT)? KCFLAG_USE_COPYOUT : KCFLAG_USE_MEMCOPY; + data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY; data->kcd_length = size; /* Initialize the BEGIN header */ @@ -120,6 +163,27 @@ kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd) return ((uint64_t)kcd->kcd_addr_end - (uint64_t)kcd->kcd_addr_begin) + sizeof(struct kcdata_item); } +uint64_t +kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd) +{ + kern_return_t kr; + + assert(kcd != NULL); + if (kcd->kcd_flags & KCFLAG_USE_COMPRESSION) { + uint64_t totalout, totalin; + + kr = kcdata_get_compression_stats(kcd, &totalout, &totalin); + if (kr == KERN_SUCCESS) { + return totalin; + } else { + return 0; + } + } else { + /* If compression wasn't used, get the number of bytes used */ + return kcdata_memory_get_used_bytes(kcd); + } +} + /* * Free up the memory associated with kcdata */ @@ -138,7 +202,663 @@ kcdata_memory_destroy(kcdata_descriptor_t data) return KERN_SUCCESS; } +/* Used by zlib to allocate space in its metadata section */ +static void * +kcdata_compress_zalloc(void *opaque, u_int items, u_int size) +{ + void *result; + struct kcdata_compress_descriptor *cd = opaque; + int alloc_size = ~31L & (31 + (items * size)); + + result = (void *)(cd->kcd_cd_base + cd->kcd_cd_offset); + if ((uintptr_t) result + alloc_size > (uintptr_t) cd->kcd_cd_base + cd->kcd_cd_maxoffset) { + result = Z_NULL; + } else { + cd->kcd_cd_offset += alloc_size; + } + + kcdata_debug_printf("%s: %d * %d = %d => %p\n", __func__, items, size, items * size, result); + + return result; +} + +/* Used by zlib to free previously allocated space in its metadata section */ +static void +kcdata_compress_zfree(void *opaque, void *ptr) +{ + (void) opaque; + (void) ptr; + + kcdata_debug_printf("%s: ptr %p\n", __func__, ptr); + + /* + * Since the buffers we are using are temporary, we don't worry about + * freeing memory for now. Besides, testing has shown that zlib only calls + * this at the end, near deflateEnd() or a Z_FINISH deflate() call. + */ +} + +/* Used to initialize the selected compression algorithm's internal state (if any) */ +static kern_return_t +kcdata_init_compress_state(kcdata_descriptor_t data, void (*memcpy_f)(void *, const void *, size_t), uint64_t type, mach_vm_address_t totalout_addr, mach_vm_address_t totalin_addr) +{ + kern_return_t ret = KERN_SUCCESS; + size_t size; + int wbits = 12, memlevel = 3; + struct kcdata_compress_descriptor *cd = &data->kcd_comp_d; + + cd->kcd_cd_memcpy_f = memcpy_f; + cd->kcd_cd_compression_type = type; + cd->kcd_cd_totalout_addr = totalout_addr; + cd->kcd_cd_totalin_addr = totalin_addr; + + switch (type) { + case KCDCT_ZLIB: + /* allocate space for the metadata used by zlib */ + size = round_page(ZLIB_METADATA_SIZE + zlib_deflate_memory_size(wbits, memlevel)); + kcdata_debug_printf("%s: size = %zu kcd_length: %d\n", __func__, size, data->kcd_length); + kcdata_debug_printf("%s: kcd buffer [%p - %p]\n", __func__, (void *) data->kcd_addr_begin, (void *) data->kcd_addr_begin + data->kcd_length); + + if (4 * size > data->kcd_length) { + return KERN_INSUFFICIENT_BUFFER_SIZE; + } + + cd->kcd_cd_zs.avail_in = 0; + cd->kcd_cd_zs.next_in = NULL; + cd->kcd_cd_zs.avail_out = 0; + cd->kcd_cd_zs.next_out = NULL; + cd->kcd_cd_zs.opaque = cd; + cd->kcd_cd_zs.zalloc = kcdata_compress_zalloc; + cd->kcd_cd_zs.zfree = kcdata_compress_zfree; + cd->kcd_cd_base = (void *) data->kcd_addr_begin + data->kcd_length - size; + data->kcd_length -= size; + cd->kcd_cd_offset = 0; + cd->kcd_cd_maxoffset = size; + cd->kcd_cd_flags = 0; + + kcdata_debug_printf("%s: buffer [%p - %p]\n", __func__, cd->kcd_cd_base, cd->kcd_cd_base + size); + + if (deflateInit2(&cd->kcd_cd_zs, Z_BEST_SPEED, Z_DEFLATED, wbits, memlevel, Z_DEFAULT_STRATEGY) != Z_OK) { + kcdata_debug_printf("EMERGENCY: deflateInit2 failed!\n"); + ret = KERN_INVALID_ARGUMENT; + } + break; + default: + panic("kcdata_init_compress_state: invalid compression type: %d", (int) type); + } + + return ret; +} + + +/* + * Turn on the compression logic for kcdata + */ +kern_return_t +kcdata_init_compress(kcdata_descriptor_t data, int hdr_tag, void (*memcpy_f)(void *, const void *, size_t), uint64_t type) +{ + kern_return_t kr; + mach_vm_address_t user_addr, totalout_addr, totalin_addr; + struct _uint64_with_description_data save_data; + const uint64_t size_req = sizeof(save_data); + + assert(data && (data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0); + + /* reset the compression descriptor */ + bzero(&data->kcd_comp_d, sizeof(struct kcdata_compress_descriptor)); + + /* add the header information */ + kcdata_add_uint64_with_description(data, type, "kcd_c_type"); + + /* reserve space to write total out */ + bzero(&save_data, size_req); + strlcpy(&(save_data.desc[0]), "kcd_c_totalout", sizeof(save_data.desc)); + kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalout_addr); + if (kr != KERN_SUCCESS) { + return kr; + } + memcpy((void *)totalout_addr, &save_data, size_req); + + /* space for total in */ + bzero(&save_data, size_req); + strlcpy(&(save_data.desc[0]), "kcd_c_totalin", sizeof(save_data.desc)); + kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalin_addr); + if (kr != KERN_SUCCESS) { + return kr; + } + memcpy((void *)totalin_addr, &save_data, size_req); + + /* add the inner buffer */ + kcdata_get_memory_addr(data, hdr_tag, 0, &user_addr); + + /* save the flag */ + data->kcd_flags |= KCFLAG_USE_COMPRESSION; + + /* initialize algorithm specific state */ + kr = kcdata_init_compress_state(data, memcpy_f, type, totalout_addr + offsetof(struct _uint64_with_description_data, data), totalin_addr + offsetof(struct _uint64_with_description_data, data)); + if (kr != KERN_SUCCESS) { + kcdata_debug_printf("%s: failed to initialize compression state!\n", __func__); + return kr; + } + + return KERN_SUCCESS; +} + +static inline +int +kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush) +{ + switch (flush) { + case KCDCF_NO_FLUSH: return Z_NO_FLUSH; + case KCDCF_SYNC_FLUSH: return Z_SYNC_FLUSH; + case KCDCF_FINISH: return Z_FINISH; + default: panic("invalid kcdata_zlib_translate_kcd_cf_flag flag"); + } +} + +static inline +int +kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush) +{ + switch (flush) { + case KCDCF_NO_FLUSH: /* fall through */ + case KCDCF_SYNC_FLUSH: return Z_OK; + case KCDCF_FINISH: return Z_STREAM_END; + default: panic("invalid kcdata_zlib_translate_kcd_cf_expected_ret flag"); + } +} + +/* Called by kcdata_do_compress() when the configured compression algorithm is zlib */ +static kern_return_t +kcdata_do_compress_zlib(kcdata_descriptor_t data, void *inbuffer, + size_t insize, void *outbuffer, size_t outsize, size_t *wrote, + enum kcdata_compression_flush flush) +{ + struct kcdata_compress_descriptor *cd = &data->kcd_comp_d; + z_stream *zs = &cd->kcd_cd_zs; + int expected_ret, ret; + + zs->next_out = outbuffer; + zs->avail_out = (unsigned int) outsize; + zs->next_in = inbuffer; + zs->avail_in = (unsigned int) insize; + ret = deflate(zs, kcdata_zlib_translate_kcd_cf_flag(flush)); + if (zs->avail_in != 0 || zs->avail_out <= 0) { + return KERN_INSUFFICIENT_BUFFER_SIZE; + } + + expected_ret = kcdata_zlib_translate_kcd_cf_expected_ret(flush); + if (ret != expected_ret) { + /* + * Should only fail with catastrophic, unrecoverable cases (i.e., + * corrupted z_stream, or incorrect configuration) + */ + panic("zlib kcdata compression ret = %d\n", ret); + } + + kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d; ret = %ld\n", + __func__, outbuffer, outsize, inbuffer, insize, flush, outsize - zs->avail_out); + if (wrote) { + *wrote = outsize - zs->avail_out; + } + return KERN_SUCCESS; +} + +/* + * Compress the buffer at @inbuffer (of size @insize) into the kcdata buffer + * @outbuffer (of size @outsize). Flush based on the @flush parameter. + * + * Returns KERN_SUCCESS on success, or KERN_INSUFFICIENT_BUFFER_SIZE if + * @outsize isn't sufficient. Also, writes the number of bytes written in the + * @outbuffer to @wrote. + */ +static kern_return_t +kcdata_do_compress(kcdata_descriptor_t data, void *inbuffer, size_t insize, + void *outbuffer, size_t outsize, size_t *wrote, enum kcdata_compression_flush flush) +{ + struct kcdata_compress_descriptor *cd = &data->kcd_comp_d; + + assert(data->kcd_flags & KCFLAG_USE_COMPRESSION); + + kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d\n", + __func__, outbuffer, outsize, inbuffer, insize, flush); + + /* don't compress if we are in a window */ + if (cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK || data->kcd_comp_d.kcd_cd_compression_type == KCDCT_NONE) { + assert(cd->kcd_cd_memcpy_f); + if (outsize >= insize) { + cd->kcd_cd_memcpy_f(outbuffer, inbuffer, insize); + if (wrote) { + *wrote = insize; + } + return KERN_SUCCESS; + } else { + return KERN_INSUFFICIENT_BUFFER_SIZE; + } + } + + switch (data->kcd_comp_d.kcd_cd_compression_type) { + case KCDCT_ZLIB: + return kcdata_do_compress_zlib(data, inbuffer, insize, outbuffer, outsize, wrote, flush); + default: + panic("invalid compression type 0x%llx in kcdata_do_compress", data->kcd_comp_d.kcd_cd_compression_type); + } +} + +static size_t +kcdata_compression_bound_zlib(kcdata_descriptor_t data, size_t size) +{ + struct kcdata_compress_descriptor *cd = &data->kcd_comp_d; + z_stream *zs = &cd->kcd_cd_zs; + + return (size_t) deflateBound(zs, (unsigned long) size); +} + + +/* + * returns the worst-case, maximum length of the compressed data when + * compressing a buffer of size @size using the configured algorithm. + */ +static size_t +kcdata_compression_bound(kcdata_descriptor_t data, size_t size) +{ + switch (data->kcd_comp_d.kcd_cd_compression_type) { + case KCDCT_ZLIB: + return kcdata_compression_bound_zlib(data, size); + case KCDCT_NONE: + return size; + default: + panic("%s: unknown compression method", __func__); + } +} + +/* + * kcdata_compress_chunk_with_flags: + * Compress buffer found at @input_data (length @input_size) to the kcdata + * buffer described by @data. This method will construct the kcdata_item_t + * required by parsers using the type information @type and flags @flags. + * + * Returns KERN_SUCCESS when successful. Currently, asserts on failure. + */ +kern_return_t +kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t kcdata_flags) +{ + assert(data); + assert((data->kcd_flags & KCFLAG_USE_COMPRESSION)); + assert(input_data); + struct kcdata_item info; + char padding_data[16] = {0}; + struct kcdata_compress_descriptor *cd = &data->kcd_comp_d; + size_t wrote = 0; + kern_return_t kr; + + kcdata_debug_printf("%s: type: %d input_data: %p (%d) kcdata_flags: 0x%llx\n", + __func__, type, input_data, input_size, kcdata_flags); + + /* + * first, get memory space. The uncompressed size must fit in the remained + * of the kcdata buffer, in case the compression algorithm doesn't actually + * compress the data at all. + */ + size_t total_uncompressed_size = kcdata_compression_bound(data, (size_t) kcdata_get_memory_size_for_data(input_size)); + if (total_uncompressed_size > data->kcd_length || + data->kcd_length - total_uncompressed_size < data->kcd_addr_end - data->kcd_addr_begin) { + kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %zu\n", + __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, total_uncompressed_size); + return KERN_INSUFFICIENT_BUFFER_SIZE; + } + uint32_t padding = kcdata_calc_padding(input_size); + assert(padding < sizeof(padding_data)); + + void *space_start = (void *) data->kcd_addr_end; + void *space_ptr = space_start; + + /* create the output stream */ + size_t total_uncompressed_space_remaining = total_uncompressed_size; + + /* create the info data */ + bzero(&info, sizeof(info)); + info.type = type; + info.size = input_size + padding; + info.flags = kcdata_flags; + + /* + * The next possibly three compresses are needed separately because of the + * scatter-gather nature of this operation. The kcdata item header (info) + * and padding are on the stack, while the actual data is somewhere else. + * */ + + /* create the input stream for info & compress */ + enum kcdata_compression_flush flush = (padding || input_size) ? KCDCF_NO_FLUSH : + cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH : + KCDCF_SYNC_FLUSH; + kr = kcdata_do_compress(data, &info, sizeof(info), space_ptr, total_uncompressed_space_remaining, &wrote, flush); + if (kr != KERN_SUCCESS) { + return kr; + } + kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote); + space_ptr += wrote; + total_uncompressed_space_remaining -= wrote; + + /* If there is input provided, compress that here */ + if (input_size) { + flush = padding ? KCDCF_NO_FLUSH : + cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH : + KCDCF_SYNC_FLUSH; + kr = kcdata_do_compress(data, (void *) (uintptr_t) input_data, input_size, space_ptr, total_uncompressed_space_remaining, &wrote, flush); + if (kr != KERN_SUCCESS) { + return kr; + } + kcdata_debug_printf("%s: 2nd wrote = %zu\n", __func__, wrote); + space_ptr += wrote; + total_uncompressed_space_remaining -= wrote; + } + + /* If the item and its data require padding to maintain alignment, + * "compress" that into the output buffer. */ + if (padding) { + /* write the padding */ + kr = kcdata_do_compress(data, padding_data, padding, space_ptr, total_uncompressed_space_remaining, &wrote, + cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH : KCDCF_SYNC_FLUSH); + if (kr != KERN_SUCCESS) { + return kr; + } + kcdata_debug_printf("%s: 3rd wrote = %zu\n", __func__, wrote); + if (wrote == 0) { + return KERN_FAILURE; + } + space_ptr += wrote; + total_uncompressed_space_remaining -= wrote; + } + + assert((size_t)(space_ptr - space_start) <= total_uncompressed_size); + + /* move the end marker forward */ + data->kcd_addr_end = (mach_vm_address_t) (space_start + (total_uncompressed_size - total_uncompressed_space_remaining)); + + return KERN_SUCCESS; +} + +/* + * kcdata_compress_chunk: + * Like kcdata_compress_chunk_with_flags(), but uses the default set of kcdata flags, + * i.e. padding and also saves the amount of padding bytes. + * + * Returns are the same as in kcdata_compress_chunk_with_flags() + */ +kern_return_t +kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size) +{ + /* these flags are for kcdata - store that the struct is padded and store the amount of padding bytes */ + uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(input_size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING; + return kcdata_compress_chunk_with_flags(data, type, input_data, input_size, flags); +} + +kern_return_t +kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data) +{ + if (data->kcd_flags & KCFLAG_USE_COMPRESSION) { + return kcdata_compress_chunk(data, type, input_data, size); + } else { + kern_return_t ret; + mach_vm_address_t uaddr = 0; + ret = kcdata_get_memory_addr(data, type, size, &uaddr); + if (ret != KERN_SUCCESS) { + return ret; + } + + kcdata_memcpy(data, uaddr, input_data, size); + return KERN_SUCCESS; + } +} + +kern_return_t +kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element, uint32_t size_of_element, uint32_t count, const void *input_data) +{ + uint64_t flags = type_of_element; + flags = (flags << 32) | count; + uint32_t total_size = count * size_of_element; + uint32_t pad = kcdata_calc_padding(total_size); + + if (data->kcd_flags & KCFLAG_USE_COMPRESSION) { + return kcdata_compress_chunk_with_flags(data, KCDATA_TYPE_ARRAY_PAD0 | pad, input_data, total_size, flags); + } else { + kern_return_t ret; + mach_vm_address_t uaddr = 0; + ret = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, &uaddr); + if (ret != KERN_SUCCESS) { + return ret; + } + + kcdata_memcpy(data, uaddr, input_data, total_size); + return KERN_SUCCESS; + } +} + +/* A few words on how window compression works: + * + * This is how the buffer looks when the window is opened: + * + * X---------------------------------------------------------------------X + * | | | + * | Filled with stackshot data | Zero bytes | + * | | | + * X---------------------------------------------------------------------X + * ^ + * \ - kcd_addr_end + * + * Opening a window will save the current kcd_addr_end to kcd_cd_mark_begin. + * + * Any kcdata_* operation will then push data to the buffer like normal. (If + * you call any compressing functions they will pass-through, i.e. no + * compression will be done) Once the window is closed, the following takes + * place: + * + * X---------------------------------------------------------------------X + * | | | | | + * | Existing data | New data | Scratch buffer | | + * | | | | | + * X---------------------------------------------------------------------X + * ^ ^ ^ + * | | | + * \ -kcd_cd_mark_begin | | + * | | + * \ - kcd_addr_end | + * | + * kcd_addr_end + (kcd_addr_end - kcd_cd_mark_begin) - / + * + * (1) The data between kcd_cd_mark_begin and kcd_addr_end is fed to the + * compression algorithm to compress to the scratch buffer. + * (2) The scratch buffer's contents are copied into the area denoted "New + * data" above. Effectively overwriting the uncompressed data with the + * compressed one. + * (3) kcd_addr_end is then rewound to kcd_cd_mark_begin + sizeof_compressed_data + */ + +/* Record the state, and restart compression from this later */ +void +kcdata_compression_window_open(kcdata_descriptor_t data) +{ + struct kcdata_compress_descriptor *cd = &data->kcd_comp_d; + assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0); + + if (data->kcd_flags & KCFLAG_USE_COMPRESSION) { + cd->kcd_cd_flags |= KCD_CD_FLAG_IN_MARK; + cd->kcd_cd_mark_begin = data->kcd_addr_end; + } +} + +/* Compress the region between the mark and the current end */ +kern_return_t +kcdata_compression_window_close(kcdata_descriptor_t data) +{ + struct kcdata_compress_descriptor *cd = &data->kcd_comp_d; + uint64_t total_size, max_size; + void *space_start, *space_ptr; + size_t total_uncompressed_space_remaining, wrote = 0; + kern_return_t kr; + + if ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0) { + return KERN_SUCCESS; + } + + assert(cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK); + + if (data->kcd_addr_end == (mach_vm_address_t) cd->kcd_cd_mark_begin) { + /* clear the window marker and return, this is a no-op */ + cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK; + return KERN_SUCCESS; + } + + assert(cd->kcd_cd_mark_begin < data->kcd_addr_end); + total_size = data->kcd_addr_end - (uint64_t) cd->kcd_cd_mark_begin; + max_size = (uint64_t) kcdata_compression_bound(data, total_size); + kcdata_debug_printf("%s: total_size = %lld\n", __func__, total_size); + + /* + * first, get memory space. The uncompressed size must fit in the remained + * of the kcdata buffer, in case the compression algorithm doesn't actually + * compress the data at all. + */ + if (max_size > data->kcd_length || + data->kcd_length - max_size < data->kcd_addr_end - data->kcd_addr_begin) { + kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %lld\n", + __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, max_size); + return KERN_INSUFFICIENT_BUFFER_SIZE; + } + + /* clear the window marker */ + cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK; + + space_start = (void *) data->kcd_addr_end; + space_ptr = space_start; + total_uncompressed_space_remaining = (unsigned int) max_size; + kr = kcdata_do_compress(data, (void *) cd->kcd_cd_mark_begin, total_size, space_ptr, + total_uncompressed_space_remaining, &wrote, KCDCF_SYNC_FLUSH); + if (kr != KERN_SUCCESS) { + return kr; + } + kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote); + if (wrote == 0) { + return KERN_FAILURE; + } + space_ptr += wrote; + total_uncompressed_space_remaining -= wrote; + + assert((size_t)(space_ptr - space_start) <= max_size); + + /* copy to the original location */ + kcdata_memcpy(data, cd->kcd_cd_mark_begin, space_start, (uint32_t) (max_size - total_uncompressed_space_remaining)); + + /* rewind the end marker */ + data->kcd_addr_end = cd->kcd_cd_mark_begin + (max_size - total_uncompressed_space_remaining); + + return KERN_SUCCESS; +} + +static kern_return_t +kcdata_get_compression_stats_zlib(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin) +{ + struct kcdata_compress_descriptor *cd = &data->kcd_comp_d; + z_stream *zs = &cd->kcd_cd_zs; + assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0); + + *totalout = (uint64_t) zs->total_out; + *totalin = (uint64_t) zs->total_in; + + return KERN_SUCCESS; +} + +static kern_return_t +kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin) +{ + kern_return_t kr; + + switch (data->kcd_comp_d.kcd_cd_compression_type) { + case KCDCT_ZLIB: + kr = kcdata_get_compression_stats_zlib(data, totalout, totalin); + break; + case KCDCT_NONE: + kr = KERN_SUCCESS; + break; + default: + panic("invalid compression flag 0x%llx in kcdata_write_compression_stats", (data->kcd_comp_d.kcd_cd_compression_type)); + } + + return kr; +} + +kern_return_t +kcdata_write_compression_stats(kcdata_descriptor_t data) +{ + kern_return_t kr; + uint64_t totalout, totalin; + + kr = kcdata_get_compression_stats(data, &totalout, &totalin); + if (kr != KERN_SUCCESS) { + return kr; + } + + *(uint64_t *)data->kcd_comp_d.kcd_cd_totalout_addr = totalout; + *(uint64_t *)data->kcd_comp_d.kcd_cd_totalin_addr = totalin; + + return kr; +} + +static kern_return_t +kcdata_finish_compression_zlib(kcdata_descriptor_t data) +{ + struct kcdata_compress_descriptor *cd = &data->kcd_comp_d; + z_stream *zs = &cd->kcd_cd_zs; + + /* + * macOS on x86 w/ coprocessor ver. 2 and later context: Stackshot compression leaves artifacts + * in the panic buffer which interferes with CRC checks. The CRC is calculated here over the full + * buffer but only the portion with valid panic data is sent to iBoot via the SMC. When iBoot + * calculates the CRC to compare with the value in the header it uses a zero-filled buffer. + * The stackshot compression leaves non-zero bytes behind so those must be cleared prior to the CRC calculation. + * + * All other contexts: The stackshot compression artifacts are present in its panic buffer but the CRC check + * is done on the same buffer for the before and after calculation so there's nothing functionally + * broken. The same buffer cleanup is done here for completeness' sake. + * From rdar://problem/64381661 + */ + + void* stackshot_end = (char*)data->kcd_addr_begin + kcdata_memory_get_used_bytes(data); + uint32_t zero_fill_size = data->kcd_length - kcdata_memory_get_used_bytes(data); + bzero(stackshot_end, zero_fill_size); + + if (deflateEnd(zs) == Z_OK) { + return KERN_SUCCESS; + } else { + return KERN_FAILURE; + } +} + +kern_return_t +kcdata_finish_compression(kcdata_descriptor_t data) +{ + kcdata_write_compression_stats(data); + + switch (data->kcd_comp_d.kcd_cd_compression_type) { + case KCDCT_ZLIB: + data->kcd_length += data->kcd_comp_d.kcd_cd_maxoffset; + return kcdata_finish_compression_zlib(data); + case KCDCT_NONE: + return KERN_SUCCESS; + default: + panic("invalid compression type 0x%llxin kcdata_finish_compression", data->kcd_comp_d.kcd_cd_compression_type); + } +} + +void +kcd_finalize_compression(kcdata_descriptor_t data) +{ + if (data->kcd_flags & KCFLAG_USE_COMPRESSION) { + data->kcd_comp_d.kcd_cd_flags |= KCD_CD_FLAG_FINALIZE; + } +} /* * Routine: kcdata_get_memory_addr @@ -205,6 +925,9 @@ kcdata_get_memory_addr_with_flavor( return KERN_INVALID_ARGUMENT; } + assert(((data->kcd_flags & KCFLAG_USE_COMPRESSION) && (data->kcd_comp_d.kcd_cd_flags & KCD_CD_FLAG_IN_MARK)) + || ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0)); + bzero(&info, sizeof(info)); info.type = type; info.size = size; @@ -213,7 +936,7 @@ kcdata_get_memory_addr_with_flavor( /* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */ if (total_size + sizeof(info) > data->kcd_length || data->kcd_length - (total_size + sizeof(info)) < data->kcd_addr_end - data->kcd_addr_begin) { - return KERN_RESOURCE_SHORTAGE; + return KERN_INSUFFICIENT_BUFFER_SIZE; } kr = kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info)); @@ -241,6 +964,16 @@ kcdata_get_memory_addr_with_flavor( } } +/* Routine: kcdata_get_memory_size_for_data + * Desc: returns the amount of memory that is required to store the information + * in kcdata + */ +static size_t +kcdata_get_memory_size_for_data(uint32_t size) +{ + return size + kcdata_calc_padding(size) + sizeof(struct kcdata_item); +} + /* * Routine: kcdata_get_memory_addr_for_array * Desc: get memory address in the userspace memory for corpse info @@ -291,16 +1024,25 @@ kcdata_add_container_marker( { mach_vm_address_t user_addr; kern_return_t kr; + uint32_t data_size; + assert(header_type == KCDATA_TYPE_CONTAINER_END || header_type == KCDATA_TYPE_CONTAINER_BEGIN); - uint32_t data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0; - kr = kcdata_get_memory_addr_with_flavor(data, header_type, data_size, identifier, &user_addr); - if (kr != KERN_SUCCESS) { - return kr; - } - if (data_size) { - kr = kcdata_memcpy(data, user_addr, &container_type, data_size); + data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0; + + if (!(data->kcd_flags & KCFLAG_USE_COMPRESSION)) { + kr = kcdata_get_memory_addr_with_flavor(data, header_type, data_size, identifier, &user_addr); + if (kr != KERN_SUCCESS) { + return kr; + } + + if (data_size) { + kr = kcdata_memcpy(data, user_addr, &container_type, data_size); + } + } else { + kr = kcdata_compress_chunk_with_flags(data, header_type, &container_type, data_size, identifier); } + return kr; } @@ -423,21 +1165,6 @@ kcdata_add_type_definition( return kr; } -#pragma pack(4) - -/* Internal structs for convenience */ -struct _uint64_with_description_data { - char desc[KCDATA_DESC_MAXLEN]; - uint64_t data; -}; - -struct _uint32_with_description_data { - char desc[KCDATA_DESC_MAXLEN]; - uint32_t data; -}; - -#pragma pack() - kern_return_t kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, const char * description) { @@ -454,6 +1181,11 @@ kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc)); save_data.data = data; + if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) { + /* allocate space for the output */ + return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT64_DESC, &save_data, size_req); + } + kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT64_DESC, size_req, &user_addr); if (kr != KERN_SUCCESS) { return kr; @@ -488,10 +1220,16 @@ kcdata_add_uint32_with_description( strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc)); save_data.data = data; + if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) { + /* allocate space for the output */ + return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT32_DESC, &save_data, size_req); + } + kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT32_DESC, size_req, &user_addr); if (kr != KERN_SUCCESS) { return kr; } + if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) { if (copyout(&save_data, user_addr, size_req)) { return KERN_NO_ACCESS; @@ -499,6 +1237,7 @@ kcdata_add_uint32_with_description( } else { memcpy((void *)user_addr, &save_data, size_req); } + return KERN_SUCCESS; } diff --git a/osfmk/kern/kern_cdata.h b/osfmk/kern/kern_cdata.h index 398e8a122..42a584ca6 100644 --- a/osfmk/kern/kern_cdata.h +++ b/osfmk/kern/kern_cdata.h @@ -31,6 +31,9 @@ #include #include +#ifdef XNU_KERNEL_PRIVATE +#include +#endif /* * Do not use these macros! @@ -55,20 +58,68 @@ #define kcdata_get_data_with_desc(buf, desc, data) kcdata_iter_get_data_with_desc(KCDATA_ITEM_ITER(buf),desc,data,NULL) /* Do not use these macros! */ -#ifdef KERNEL +__options_decl(kcd_compression_type_t, uint64_t, { + KCDCT_NONE = 0x00, + KCDCT_ZLIB = 0x01, +}); +#ifdef KERNEL #ifdef XNU_KERNEL_PRIVATE -/* Structure to save information about corpse data */ +__options_decl(kcd_cd_flag_t, uint64_t, { + KCD_CD_FLAG_IN_MARK = 0x01, + KCD_CD_FLAG_FINALIZE = 0x02, +}); + +/* Structure to save zstream and other compression metadata */ +struct kcdata_compress_descriptor { + z_stream kcd_cd_zs; + void *kcd_cd_base; + uint64_t kcd_cd_offset; + size_t kcd_cd_maxoffset; + uint64_t kcd_cd_mark_begin; + kcd_cd_flag_t kcd_cd_flags; + kcd_compression_type_t kcd_cd_compression_type; + void (*kcd_cd_memcpy_f)(void *, const void *, size_t); + mach_vm_address_t kcd_cd_totalout_addr; + mach_vm_address_t kcd_cd_totalin_addr; +}; + +/* + * Various, compression algorithm agnostic flags for controlling writes to the + * output buffer. + */ +enum kcdata_compression_flush { + /* + * Hint that no flush is needed because more data is expected. Doesn't + * guarantee that no data will be written to the output buffer, since the + * underlying algorithm may decide that it's running out of space and may + * flush to the output buffer. + */ + KCDCF_NO_FLUSH, + /* + * Hint to flush all internal buffers to the output buffers. + */ + KCDCF_SYNC_FLUSH, + /* + * Hint that this is going to be the last call to the compression function, + * so flush all output buffers and mark state as finished. + */ + KCDCF_FINISH, +}; + +/* Structure to save information about kcdata */ struct kcdata_descriptor { uint32_t kcd_length; uint16_t kcd_flags; #define KCFLAG_USE_MEMCOPY 0x0 #define KCFLAG_USE_COPYOUT 0x1 #define KCFLAG_NO_AUTO_ENDBUFFER 0x2 +#define KCFLAG_USE_COMPRESSION 0x4 uint16_t kcd_user_flags; /* reserved for subsystems using kcdata */ mach_vm_address_t kcd_addr_begin; mach_vm_address_t kcd_addr_end; + struct kcdata_compress_descriptor kcd_comp_d; }; typedef struct kcdata_descriptor * kcdata_descriptor_t; @@ -92,6 +143,14 @@ kern_return_t kcdata_undo_add_container_begin(kcdata_descriptor_t data); kern_return_t kcdata_write_buffer_end(kcdata_descriptor_t data); void *kcdata_memory_get_begin_addr(kcdata_descriptor_t data); +kern_return_t kcdata_init_compress(kcdata_descriptor_t, int hdr_tag, void (*memcpy_f)(void *, const void *, size_t), uint64_t type); +kern_return_t kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data); +kern_return_t kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element, uint32_t size_of_element, uint32_t count, const void *input_data); +kern_return_t kcdata_compress_memory_addr(kcdata_descriptor_t data, void *ptr); +kern_return_t kcdata_finish_compression(kcdata_descriptor_t data); +void kcdata_compression_window_open(kcdata_descriptor_t data); +kern_return_t kcdata_compression_window_close(kcdata_descriptor_t data); +void kcd_finalize_compression(kcdata_descriptor_t data); #else /* XNU_KERNEL_PRIVATE */ @@ -101,6 +160,7 @@ typedef void * kcdata_descriptor_t; uint32_t kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size); uint64_t kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd); +uint64_t kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd); kern_return_t kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, const void * src_addr, uint32_t size); kern_return_t kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size); kern_return_t kcdata_get_memory_addr(kcdata_descriptor_t data, uint32_t type, uint32_t size, mach_vm_address_t * user_addr); diff --git a/osfmk/kern/kern_stackshot.c b/osfmk/kern/kern_stackshot.c index cdb62018c..6edec68be 100644 --- a/osfmk/kern/kern_stackshot.c +++ b/osfmk/kern/kern_stackshot.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2017 Apple Inc. All rights reserved. + * Copyright (c) 2013-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -49,8 +49,10 @@ #include /* bcopy */ +#include #include #include +#include #include #include #include @@ -64,15 +66,14 @@ #include #include #include +#include #if defined(__x86_64__) #include #include #endif -#if CONFIG_EMBEDDED -#include /* For gPanicBase/gPanicBase */ -#endif +#include #if MONOTONIC #include @@ -80,6 +81,12 @@ #include +#if DEBUG || DEVELOPMENT +# define STACKSHOT_COLLECTS_LATENCY_INFO 1 +#else +# define STACKSHOT_COLLECTS_LATENCY_INFO 0 +#endif /* DEBUG || DEVELOPMENT */ + extern unsigned int not_in_kdp; @@ -91,13 +98,18 @@ extern addr64_t kdp_vtophys(pmap_t pmap, addr64_t va); int kdp_snapshot = 0; static kern_return_t stack_snapshot_ret = 0; static uint32_t stack_snapshot_bytes_traced = 0; +static uint32_t stack_snapshot_bytes_uncompressed = 0; +#if STACKSHOT_COLLECTS_LATENCY_INFO +static bool collect_latency_info = true; +#endif static kcdata_descriptor_t stackshot_kcdata_p = NULL; static void *stack_snapshot_buf; static uint32_t stack_snapshot_bufsize; int stack_snapshot_pid; -static uint32_t stack_snapshot_flags; +static uint64_t stack_snapshot_flags; static uint64_t stack_snapshot_delta_since_timestamp; +static uint32_t stack_snapshot_pagetable_mask; static boolean_t panic_stackshot; static boolean_t stack_enable_faulting = FALSE; @@ -111,19 +123,21 @@ int kernel_stackshot_buf_size = 0; void * stackshot_snapbuf = NULL; /* Used by stack_snapshot2 (to be removed) */ +#if INTERRUPT_MASKED_DEBUG +extern boolean_t interrupt_masked_debug; +#endif + __private_extern__ void stackshot_init( void ); static boolean_t memory_iszero(void *addr, size_t size); -#if CONFIG_TELEMETRY -kern_return_t stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval); -#endif uint32_t get_stackshot_estsize(uint32_t prev_size_hint); kern_return_t kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config, size_t stackshot_config_size, boolean_t stackshot_from_user); kern_return_t do_stackshot(void *); -void kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint32_t flags, kcdata_descriptor_t data_p, uint64_t since_timestamp); +void kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint64_t flags, kcdata_descriptor_t data_p, uint64_t since_timestamp, uint32_t pagetable_mask); boolean_t stackshot_thread_is_idle_worker_unsafe(thread_t thread); -static int kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t *pBytesTraced); +static int kdp_stackshot_kcdata_format(int pid, uint64_t trace_flags, uint32_t *pBytesTraced, uint32_t *pBytesUncompressed); uint32_t kdp_stack_snapshot_bytes_traced(void); +uint32_t kdp_stack_snapshot_bytes_uncompressed(void); static void kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap); static boolean_t kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, boolean_t try_fault, uint32_t *kdp_fault_result); static int kdp_copyin_string(task_t task, uint64_t addr, char *buf, int buf_sz, boolean_t try_fault, uint32_t *kdp_fault_results); @@ -139,10 +153,15 @@ static void stackshot_coalition_jetsam_count(void *arg, int i, coali static void stackshot_coalition_jetsam_snapshot(void *arg, int i, coalition_t coal); #endif /* CONFIG_COALITIONS */ +#if CONFIG_THREAD_GROUPS +static void stackshot_thread_group_count(void *arg, int i, struct thread_group *tg); +static void stackshot_thread_group_snapshot(void *arg, int i, struct thread_group *tg); +#endif /* CONFIG_THREAD_GROUPS */ extern uint32_t workqueue_get_pwq_state_kdp(void *proc); -extern int proc_pid(void *p); +struct proc; +extern int proc_pid(struct proc *p); extern uint64_t proc_uniqueid(void *p); extern uint64_t proc_was_throttled(void *p); extern uint64_t proc_did_throttle(void *p); @@ -152,6 +171,7 @@ static uint64_t proc_did_throttle_from_task(task_t task); extern void proc_name_kdp(task_t task, char * buf, int size); extern int proc_threadname_kdp(void * uth, char * buf, size_t size); extern void proc_starttime_kdp(void * p, uint64_t * tv_sec, uint64_t * tv_usec, uint64_t * abstime); +extern void proc_archinfo_kdp(void* p, cpu_type_t* cputype, cpu_subtype_t* cpusubtype); extern boolean_t proc_binary_uuid_kdp(task_t task, uuid_t uuid); extern int memorystatus_get_pressure_status_kdp(void); extern void memorystatus_proc_flags_unsafe(void * v, boolean_t *is_dirty, boolean_t *is_dirty_tracked, boolean_t *allow_idle_exit); @@ -186,7 +206,7 @@ vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t m vm_offset_t kdp_find_phys(vm_map_t map, vm_offset_t target_addr, boolean_t try_fault, uint32_t *kdp_fault_results); static size_t stackshot_strlcpy(char *dst, const char *src, size_t maxlen); -static void stackshot_memcpy(void *dst, const void *src, size_t len); +void stackshot_memcpy(void *dst, const void *src, size_t len); /* Clears caching information used by the above validation routine * (in case the current map has been changed or cleared). @@ -214,10 +234,8 @@ static boolean_t validate_next_addr = TRUE; /* * Stackshot locking and other defines. */ -static lck_grp_t *stackshot_subsys_lck_grp; -static lck_grp_attr_t *stackshot_subsys_lck_grp_attr; -static lck_attr_t *stackshot_subsys_lck_attr; -static lck_mtx_t stackshot_subsys_mutex; +static LCK_GRP_DECLARE(stackshot_subsys_lck_grp, "stackshot_subsys_lock"); +static LCK_MTX_DECLARE(stackshot_subsys_mutex, &stackshot_subsys_lck_grp); #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex) #define STACKSHOT_SUBSYS_TRY_LOCK() lck_mtx_try_lock(&stackshot_subsys_mutex) @@ -227,6 +245,7 @@ static lck_mtx_t stackshot_subsys_mutex; #define SANE_TRACEBUF_SIZE (8ULL * 1024ULL * 1024ULL) #define TRACEBUF_SIZE_PER_GB (1024ULL * 1024ULL) +#define GIGABYTES (1024ULL * 1024ULL * 1024ULL) SECURITY_READ_ONLY_LATE(static uint32_t) max_tracebuf_size = SANE_TRACEBUF_SIZE; @@ -254,18 +273,10 @@ stackshot_init( void ) { mach_timebase_info_data_t timebase; - stackshot_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); - - stackshot_subsys_lck_grp = lck_grp_alloc_init("stackshot_subsys_lock", stackshot_subsys_lck_grp_attr); - - stackshot_subsys_lck_attr = lck_attr_alloc_init(); - - lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr); - clock_timebase_info(&timebase); fault_stats.sfs_system_max_fault_time = ((KDP_FAULT_PATH_MAX_TIME_PER_STACKSHOT_NSECS * timebase.denom) / timebase.numer); - max_tracebuf_size = MAX(max_tracebuf_size, (ROUNDUP(max_mem, (1024ULL * 1024ULL * 1024ULL)) / TRACEBUF_SIZE_PER_GB)); + max_tracebuf_size = MAX(max_tracebuf_size, ((ROUNDUP(max_mem, GIGABYTES) / GIGABYTES) * TRACEBUF_SIZE_PER_GB)); PE_parse_boot_argn("stackshot_maxsz", &max_tracebuf_size, sizeof(max_tracebuf_size)); } @@ -335,7 +346,7 @@ stackshot_trap() kern_return_t -stack_snapshot_from_kernel(int pid, void *buf, uint32_t size, uint32_t flags, uint64_t delta_since_timestamp, unsigned *bytes_traced) +stack_snapshot_from_kernel(int pid, void *buf, uint32_t size, uint64_t flags, uint64_t delta_since_timestamp, uint32_t pagetable_mask, unsigned *bytes_traced) { kern_return_t error = KERN_SUCCESS; boolean_t istate; @@ -377,7 +388,8 @@ stack_snapshot_from_kernel(int pid, void *buf, uint32_t size, uint32_t flags, ui istate = ml_set_interrupts_enabled(FALSE); /* Preload trace parameters*/ - kdp_snapshot_preflight(pid, buf, size, flags, &kcdata, delta_since_timestamp); + kdp_snapshot_preflight(pid, buf, size, flags, &kcdata, + delta_since_timestamp, pagetable_mask); /* * Trap to the debugger to obtain a coherent stack snapshot; this populates @@ -442,17 +454,6 @@ stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flag goto unlock_exit; } - if (flags & STACKSHOT_GET_BOOT_PROFILE) { - if (tracebuf_size > SANE_BOOTPROFILE_TRACEBUF_SIZE) { - error = KERN_INVALID_ARGUMENT; - goto unlock_exit; - } - - bytes_traced = tracebuf_size; - error = bootprofile_gather(tracebuf, &bytes_traced); - *retval = (int)bytes_traced; - } - unlock_exit: STACKSHOT_SUBSYS_UNLOCK(); exit: @@ -470,9 +471,18 @@ get_stackshot_estsize(uint32_t prev_size_hint) vm_size_t thread_total; vm_size_t task_total; uint32_t estimated_size; + size_t est_thread_size = sizeof(struct thread_snapshot); + size_t est_task_size = sizeof(struct task_snapshot) + TASK_UUID_AVG_SIZE; - thread_total = (threads_count * sizeof(struct thread_snapshot)); - task_total = (tasks_count * (sizeof(struct task_snapshot) + TASK_UUID_AVG_SIZE)); +#if STACKSHOT_COLLECTS_LATENCY_INFO + if (collect_latency_info) { + est_thread_size += sizeof(struct stackshot_latency_thread); + est_task_size += sizeof(struct stackshot_latency_task); + } +#endif + + thread_total = (threads_count * est_thread_size); + task_total = (tasks_count * est_task_size); estimated_size = (uint32_t) VM_MAP_ROUND_PAGE((thread_total + task_total + STACKSHOT_SUPP_SIZE), PAGE_MASK); if (estimated_size < prev_size_hint) { @@ -544,9 +554,10 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi uint64_t out_buffer_addr; uint64_t out_size_addr; int pid = -1; - uint32_t flags; + uint64_t flags; uint64_t since_timestamp; uint32_t size_hint = 0; + uint32_t pagetable_mask = STACKSHOT_PAGETABLES_MASK_ALL; if (stackshot_config == NULL) { return KERN_INVALID_ARGUMENT; @@ -574,6 +585,13 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi if (config->sc_size <= max_tracebuf_size) { size_hint = config->sc_size; } + /* + * Retain the pre-sc_pagetable_mask behavior of STACKSHOT_PAGE_TABLES, + * dump every level if the pagetable_mask is not set + */ + if (flags & STACKSHOT_PAGE_TABLES && config->sc_pagetable_mask) { + pagetable_mask = config->sc_pagetable_mask; + } break; default: return KERN_NOT_SUPPORTED; @@ -587,6 +605,11 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi if (flags & (STACKSHOT_TRYLOCK | STACKSHOT_SAVE_IN_KERNEL_BUFFER | STACKSHOT_FROM_PANIC)) { return KERN_NO_ACCESS; } +#if !DEVELOPMENT && !DEBUG + if (flags & (STACKSHOT_DO_COMPRESS)) { + return KERN_NO_ACCESS; + } +#endif } else { if (!(flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER)) { return KERN_NOT_SUPPORTED; @@ -597,6 +620,12 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi return KERN_NOT_SUPPORTED; } + /* Compresssed delta stackshots or page dumps are not yet supported */ + if (((flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) || (flags & STACKSHOT_PAGE_TABLES)) + && (flags & STACKSHOT_DO_COMPRESS)) { + return KERN_NOT_SUPPORTED; + } + /* * If we're not saving the buffer in the kernel pointer, we need a place to copy into. */ @@ -671,13 +700,27 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi } - uint32_t hdr_tag = (flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) ? KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT : KCDATA_BUFFER_BEGIN_STACKSHOT; + uint32_t hdr_tag = (flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) ? KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT + : (flags & STACKSHOT_DO_COMPRESS) ? KCDATA_BUFFER_BEGIN_COMPRESSED + : KCDATA_BUFFER_BEGIN_STACKSHOT; kcdata_p = kcdata_memory_alloc_init((mach_vm_address_t)stackshotbuf, hdr_tag, stackshotbuf_size, KCFLAG_USE_MEMCOPY | KCFLAG_NO_AUTO_ENDBUFFER); stackshot_duration_outer = NULL; uint64_t time_start = mach_absolute_time(); + /* if compression was requested, allocate the extra zlib scratch area */ + if (flags & STACKSHOT_DO_COMPRESS) { + hdr_tag = (flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) ? KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT + : KCDATA_BUFFER_BEGIN_STACKSHOT; + error = kcdata_init_compress(kcdata_p, hdr_tag, stackshot_memcpy, KCDCT_ZLIB); + if (error != KERN_SUCCESS) { + os_log(OS_LOG_DEFAULT, "failed to initialize compression: %d!\n", + (int) error); + goto error_exit; + } + } + /* * Disable interrupts and save the current interrupt state. */ @@ -686,7 +729,8 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi /* * Load stackshot parameters. */ - kdp_snapshot_preflight(pid, stackshotbuf, stackshotbuf_size, flags, kcdata_p, since_timestamp); + kdp_snapshot_preflight(pid, stackshotbuf, stackshotbuf_size, flags, kcdata_p, since_timestamp, + pagetable_mask); error = stackshot_trap(); @@ -778,8 +822,8 @@ error_exit: * Cache stack snapshot parameters in preparation for a trace. */ void -kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint32_t flags, - kcdata_descriptor_t data_p, uint64_t since_timestamp) +kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint64_t flags, + kcdata_descriptor_t data_p, uint64_t since_timestamp, uint32_t pagetable_mask) { uint64_t microsecs = 0, secs = 0; clock_get_calendar_microtime((clock_sec_t *)&secs, (clock_usec_t *)µsecs); @@ -790,6 +834,7 @@ kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint32_ stack_snapshot_bufsize = tracebuf_size; stack_snapshot_flags = flags; stack_snapshot_delta_since_timestamp = since_timestamp; + stack_snapshot_pagetable_mask = pagetable_mask; panic_stackshot = ((flags & STACKSHOT_FROM_PANIC) != 0); @@ -798,6 +843,7 @@ kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint32_ stackshot_kcdata_p = data_p; stack_snapshot_bytes_traced = 0; + stack_snapshot_bytes_uncompressed = 0; } void @@ -818,6 +864,12 @@ kdp_stack_snapshot_bytes_traced(void) return stack_snapshot_bytes_traced; } +uint32_t +kdp_stack_snapshot_bytes_uncompressed(void) +{ + return stack_snapshot_bytes_uncompressed; +} + static boolean_t memory_iszero(void *addr, size_t size) { @@ -914,17 +966,19 @@ static kern_return_t kcdata_record_shared_cache_info(kcdata_descriptor_t kcd, task_t task, unaligned_u64 *task_snap_ss_flags) { kern_return_t error = KERN_SUCCESS; - mach_vm_address_t out_addr = 0; uint64_t shared_cache_slide = 0; uint64_t shared_cache_base_address = 0; uint32_t kdp_fault_results = 0; + struct dyld_uuid_info_64_v2 shared_cache_data = {0}; + assert(task_snap_ss_flags != NULL); if (task->shared_region && ml_validate_nofault((vm_offset_t)task->shared_region, sizeof(struct vm_shared_region))) { struct vm_shared_region *sr = task->shared_region; shared_cache_base_address = sr->sr_base_address + sr->sr_first_mapping; + } else { *task_snap_ss_flags |= kTaskSharedRegionInfoUnavailable; goto error_exit; @@ -935,22 +989,21 @@ kcdata_record_shared_cache_info(kcdata_descriptor_t kcd, task_t task, unaligned_ goto error_exit; } + /* - * No refcounting here, but we are in debugger - * context, so that should be safe. + * No refcounting here, but we are in debugger context, so that should be safe. */ - shared_cache_slide = task->shared_region->sr_slide_info.slide; + shared_cache_slide = task->shared_region->sr_slide; if (task->shared_region == init_task_shared_region) { /* skip adding shared cache info -- it's the same as the system level one */ goto error_exit; } - kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO, sizeof(struct dyld_uuid_info_64_v2), &out_addr)); - struct dyld_uuid_info_64_v2 *shared_cache_data = (struct dyld_uuid_info_64_v2 *)out_addr; - shared_cache_data->imageLoadAddress = shared_cache_slide; - stackshot_memcpy(shared_cache_data->imageUUID, task->shared_region->sr_uuid, sizeof(task->shared_region->sr_uuid)); - shared_cache_data->imageSlidBaseAddress = shared_cache_base_address; + shared_cache_data.imageLoadAddress = shared_cache_slide; + stackshot_memcpy(&shared_cache_data.imageUUID, task->shared_region->sr_uuid, sizeof(task->shared_region->sr_uuid)); + shared_cache_data.imageSlidBaseAddress = shared_cache_base_address; + kcd_exit_on_error(kcdata_push_data(kcd, STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO, sizeof(struct dyld_uuid_info_64_v2), &shared_cache_data)); error_exit: if (kdp_fault_results & KDP_FAULT_RESULT_PAGED_OUT) { @@ -969,7 +1022,7 @@ error_exit: } static kern_return_t -kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_flags, boolean_t have_pmap, unaligned_u64 *task_snap_ss_flags) +kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint64_t trace_flags, boolean_t have_pmap, unaligned_u64 *task_snap_ss_flags) { boolean_t save_loadinfo_p = ((trace_flags & STACKSHOT_SAVE_LOADINFO) != 0); boolean_t save_kextloadinfo_p = ((trace_flags & STACKSHOT_SAVE_KEXT_LOADINFO) != 0); @@ -983,6 +1036,7 @@ kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_fla uint64_t uuid_info_timestamp = 0; uint32_t kdp_fault_results = 0; + assert(task_snap_ss_flags != NULL); int task_pid = pid_from_task(task); @@ -999,6 +1053,7 @@ kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_fla if (task_image_infos.version >= DYLD_ALL_IMAGE_INFOS_TIMESTAMP_MINIMUM_VERSION) { uuid_info_timestamp = task_image_infos.timestamp; } + } } else { struct user32_dyld_all_image_infos task_image_infos; @@ -1020,6 +1075,8 @@ kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_fla if (!uuid_info_addr) { uuid_info_count = 0; } + + } if (have_pmap && task_pid == 0) { @@ -1035,6 +1092,9 @@ kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_fla uint32_t uuid_info_size = (uint32_t)(task_64bit_addr ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info)); uint32_t uuid_info_array_size = 0; + /* Open a compression window to avoid overflowing the stack */ + kcdata_compression_window_open(kcd); + /* If we found some UUID information, first try to copy it in -- this will only be non-zero if we had a pmap above */ if (uuid_info_count > 0) { uuid_info_array_size = uuid_info_count * uuid_info_size; @@ -1073,31 +1133,56 @@ kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_fla stackshot_memcpy(&uuid_info->imageLoadAddress, &image_load_address, sizeof(image_load_address)); } } + + kcd_exit_on_error(kcdata_compression_window_close(kcd)); } else if (task_pid == 0 && uuid_info_count > 0 && uuid_info_count < MAX_LOADINFOS) { uintptr_t image_load_address; do { -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) if (kernelcache_uuid_valid && !save_kextloadinfo_p) { - kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_KERNELCACHE_LOADINFO, sizeof(struct dyld_uuid_info_64), &out_addr)); - struct dyld_uuid_info_64 *kc_uuid = (struct dyld_uuid_info_64 *)out_addr; - kc_uuid->imageLoadAddress = VM_MIN_KERNEL_AND_KEXT_ADDRESS; - stackshot_memcpy(&kc_uuid->imageUUID, &kernelcache_uuid, sizeof(uuid_t)); + struct dyld_uuid_info_64 kc_uuid = {0}; + kc_uuid.imageLoadAddress = VM_MIN_KERNEL_AND_KEXT_ADDRESS; + stackshot_memcpy(&kc_uuid.imageUUID, &kernelcache_uuid, sizeof(uuid_t)); + kcd_exit_on_error(kcdata_push_data(kcd, STACKSHOT_KCTYPE_KERNELCACHE_LOADINFO, sizeof(struct dyld_uuid_info_64), &kc_uuid)); break; } -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ if (!kernel_uuid || !ml_validate_nofault((vm_offset_t)kernel_uuid, sizeof(uuid_t))) { /* Kernel UUID not found or inaccessible */ break; } - kcd_exit_on_error(kcdata_get_memory_addr_for_array( - kcd, (sizeof(kernel_uuid_info) == sizeof(struct user64_dyld_uuid_info)) ? KCDATA_TYPE_LIBRARY_LOADINFO64 - : KCDATA_TYPE_LIBRARY_LOADINFO, - sizeof(kernel_uuid_info), uuid_info_count, &out_addr)); + uint32_t uuid_type = KCDATA_TYPE_LIBRARY_LOADINFO; + if ((sizeof(kernel_uuid_info) == sizeof(struct user64_dyld_uuid_info))) { + uuid_type = KCDATA_TYPE_LIBRARY_LOADINFO64; +#if defined(__arm64__) + kc_format_t primary_kc_type = KCFormatUnknown; + if (PE_get_primary_kc_format(&primary_kc_type) && (primary_kc_type == KCFormatFileset)) { + /* return TEXT_EXEC based load information on arm devices running with fileset kernelcaches */ + uuid_type = STACKSHOT_KCTYPE_LOADINFO64_TEXT_EXEC; + } +#endif + } + + /* + * The element count of the array can vary - avoid overflowing the + * stack by opening a window. + */ + kcdata_compression_window_open(kcd); + kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, uuid_type, + sizeof(kernel_uuid_info), uuid_info_count, &out_addr)); kernel_uuid_info *uuid_info_array = (kernel_uuid_info *)out_addr; + image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(vm_kernel_stext); +#if defined(__arm64__) + if (uuid_type == STACKSHOT_KCTYPE_LOADINFO64_TEXT_EXEC) { + /* If we're reporting TEXT_EXEC load info, populate the TEXT_EXEC base instead */ + extern vm_offset_t segTEXTEXECB; + image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(segTEXTEXECB); + } +#endif uuid_info_array[0].imageLoadAddress = image_load_address; stackshot_memcpy(&uuid_info_array[0].imageUUID, kernel_uuid, sizeof(uuid_t)); @@ -1108,10 +1193,17 @@ kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_fla uint32_t kexti; for (kexti = 0; kexti < gLoadedKextSummaries->numSummaries; kexti++) { image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries->summaries[kexti].address); +#if defined(__arm64__) + if (uuid_type == STACKSHOT_KCTYPE_LOADINFO64_TEXT_EXEC) { + /* If we're reporting TEXT_EXEC load info, populate the TEXT_EXEC base instead */ + image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries->summaries[kexti].text_exec_address); + } +#endif uuid_info_array[kexti + 1].imageLoadAddress = image_load_address; stackshot_memcpy(&uuid_info_array[kexti + 1].imageUUID, &gLoadedKextSummaries->summaries[kexti].uuid, sizeof(uuid_t)); } } + kcd_exit_on_error(kcdata_compression_window_close(kcd)); } while (0); } @@ -1140,6 +1232,8 @@ kcdata_record_task_iostats(kcdata_descriptor_t kcd, task_t task) /* I/O Statistics if any counters are non zero */ assert(IO_NUM_PRIORITIES == STACKSHOT_IO_NUM_PRIORITIES); if (task->task_io_stats && !memory_iszero(task->task_io_stats, sizeof(struct io_stat_info))) { + /* struct io_stats_snapshot is quite large - avoid overflowing the stack. */ + kcdata_compression_window_open(kcd); kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_IOSTATS, sizeof(struct io_stats_snapshot), &out_addr)); struct io_stats_snapshot *_iostat = (struct io_stats_snapshot *)out_addr; _iostat->ss_disk_reads_count = task->task_io_stats->disk_reads.count; @@ -1158,8 +1252,10 @@ kcdata_record_task_iostats(kcdata_descriptor_t kcd, task_t task) _iostat->ss_io_priority_count[i] = task->task_io_stats->io_priority[i].count; _iostat->ss_io_priority_size[i] = task->task_io_stats->io_priority[i].size; } + kcd_exit_on_error(kcdata_compression_window_close(kcd)); } + error_exit: return error; } @@ -1168,23 +1264,38 @@ error_exit: static kern_return_t kcdata_record_task_instrs_cycles(kcdata_descriptor_t kcd, task_t task) { - uint64_t instrs = 0, cycles = 0; - mt_stackshot_task(task, &instrs, &cycles); + struct instrs_cycles_snapshot instrs_cycles = {0}; + uint64_t ics_instructions; + uint64_t ics_cycles; - kern_return_t error = KERN_SUCCESS; - mach_vm_address_t out_addr = 0; - kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_INSTRS_CYCLES, sizeof(struct instrs_cycles_snapshot), &out_addr)); - struct instrs_cycles_snapshot *instrs_cycles = (struct instrs_cycles_snapshot *)out_addr; - instrs_cycles->ics_instructions = instrs; - instrs_cycles->ics_cycles = cycles; + mt_stackshot_task(task, &ics_instructions, &ics_cycles); + instrs_cycles.ics_instructions = ics_instructions; + instrs_cycles.ics_cycles = ics_cycles; -error_exit: - return error; + return kcdata_push_data(kcd, STACKSHOT_KCTYPE_INSTRS_CYCLES, sizeof(instrs_cycles), &instrs_cycles); } #endif /* MONOTONIC */ static kern_return_t -kcdata_record_task_snapshot(kcdata_descriptor_t kcd, task_t task, uint32_t trace_flags, boolean_t have_pmap, unaligned_u64 **task_snap_ss_flags) +kcdata_record_task_cpu_architecture(kcdata_descriptor_t kcd, task_t task) +{ + struct stackshot_cpu_architecture cpu_architecture = {0}; + int32_t cputype; + int32_t cpusubtype; + + proc_archinfo_kdp(task->bsd_info, &cputype, &cpusubtype); + cpu_architecture.cputype = cputype; + cpu_architecture.cpusubtype = cpusubtype; + + return kcdata_push_data(kcd, STACKSHOT_KCTYPE_TASK_CPU_ARCHITECTURE, sizeof(struct stackshot_cpu_architecture), &cpu_architecture); +} + +static kern_return_t +#if STACKSHOT_COLLECTS_LATENCY_INFO +kcdata_record_task_snapshot(kcdata_descriptor_t kcd, task_t task, uint64_t trace_flags, boolean_t have_pmap, unaligned_u64 task_snap_ss_flags, struct stackshot_latency_task *latency_info) +#else +kcdata_record_task_snapshot(kcdata_descriptor_t kcd, task_t task, uint64_t trace_flags, boolean_t have_pmap, unaligned_u64 task_snap_ss_flags) +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ { boolean_t collect_delta_stackshot = ((trace_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) != 0); boolean_t collect_iostats = !collect_delta_stackshot && !(trace_flags & STACKSHOT_NO_IO_STATS); @@ -1200,20 +1311,31 @@ kcdata_record_task_snapshot(kcdata_descriptor_t kcd, task_t task, uint32_t trace kern_return_t error = KERN_SUCCESS; mach_vm_address_t out_addr = 0; struct task_snapshot_v2 * cur_tsnap = NULL; - - assert(task_snap_ss_flags != NULL); +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info->cur_tsnap_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ int task_pid = pid_from_task(task); uint64_t task_uniqueid = get_task_uniqueid(task); uint64_t proc_starttime_secs = 0; + if (task_pid && (task_did_exec_internal(task) || task_is_exec_copy_internal(task))) { + /* + * if this task is a transit task from another one, show the pid as + * negative + */ + task_pid = 0 - task_pid; + } + + /* the task_snapshot_v2 struct is large - avoid overflowing the stack */ + kcdata_compression_window_open(kcd); kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_TASK_SNAPSHOT, sizeof(struct task_snapshot_v2), &out_addr)); cur_tsnap = (struct task_snapshot_v2 *)out_addr; bzero(cur_tsnap, sizeof(*cur_tsnap)); cur_tsnap->ts_unique_pid = task_uniqueid; cur_tsnap->ts_ss_flags = kcdata_get_task_ss_flags(task); - *task_snap_ss_flags = (unaligned_u64 *)&cur_tsnap->ts_ss_flags; + cur_tsnap->ts_ss_flags |= task_snap_ss_flags; cur_tsnap->ts_user_time_in_terminated_threads = task->total_user_time; cur_tsnap->ts_system_time_in_terminated_threads = task->total_system_time; @@ -1232,50 +1354,68 @@ kcdata_record_task_snapshot(kcdata_descriptor_t kcd, task_t task, uint32_t trace LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | task->effective_policy.tep_latency_qos); cur_tsnap->ts_pid = task_pid; + /* Add the BSD process identifiers */ + if (task_pid != -1 && task->bsd_info != NULL) { + proc_name_kdp(task, cur_tsnap->ts_p_comm, sizeof(cur_tsnap->ts_p_comm)); + } else { + cur_tsnap->ts_p_comm[0] = '\0'; +#if IMPORTANCE_INHERITANCE && (DEVELOPMENT || DEBUG) + if (task->task_imp_base != NULL) { + stackshot_strlcpy(cur_tsnap->ts_p_comm, &task->task_imp_base->iit_procname[0], + MIN((int)sizeof(task->task_imp_base->iit_procname), (int)sizeof(cur_tsnap->ts_p_comm))); + } +#endif /* IMPORTANCE_INHERITANCE && (DEVELOPMENT || DEBUG) */ + } + + kcd_exit_on_error(kcdata_compression_window_close(kcd)); + +#if CONFIG_COALITIONS + if (task_pid != -1 && task->bsd_info != NULL && + ((trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS) && (task->coalition[COALITION_TYPE_JETSAM] != NULL))) { + uint64_t jetsam_coal_id = coalition_id(task->coalition[COALITION_TYPE_JETSAM]); + kcd_exit_on_error(kcdata_push_data(kcd, STACKSHOT_KCTYPE_JETSAM_COALITION, sizeof(jetsam_coal_id), &jetsam_coal_id)); + } +#endif /* CONFIG_COALITIONS */ + #if __arm__ || __arm64__ if (collect_asid && have_pmap) { uint32_t asid = PMAP_VASID(task->map->pmap); - kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_ASID, sizeof(uint32_t), &out_addr)); - stackshot_memcpy((void*)out_addr, &asid, sizeof(asid)); + kcd_exit_on_error(kcdata_push_data(kcd, STACKSHOT_KCTYPE_ASID, sizeof(asid), &asid)); } #endif + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info->cur_tsnap_latency = mach_absolute_time() - latency_info->cur_tsnap_latency; + latency_info->pmap_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + if (collect_pagetables && have_pmap) { #if INTERRUPT_MASKED_DEBUG // pagetable dumps can be large; reset the interrupt timeout to avoid a panic ml_spin_debug_clear_self(); #endif - size_t bytes_dumped = pmap_dump_page_tables(task->map->pmap, kcd_end_address(kcd), kcd_max_address(kcd)); - if (bytes_dumped == 0) { - error = KERN_INSUFFICIENT_BUFFER_SIZE; - goto error_exit; - } else if (bytes_dumped == (size_t)-1) { - error = KERN_NOT_SUPPORTED; + size_t bytes_dumped = 0; + error = pmap_dump_page_tables(task->map->pmap, kcd_end_address(kcd), kcd_max_address(kcd), stack_snapshot_pagetable_mask, &bytes_dumped); + if (error != KERN_SUCCESS) { goto error_exit; } else { + /* Variable size array - better not have it on the stack. */ + kcdata_compression_window_open(kcd); kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, STACKSHOT_KCTYPE_PAGE_TABLES, sizeof(uint64_t), (uint32_t)(bytes_dumped / sizeof(uint64_t)), &out_addr)); + kcd_exit_on_error(kcdata_compression_window_close(kcd)); } } - /* Add the BSD process identifiers */ - if (task_pid != -1 && task->bsd_info != NULL) { - proc_name_kdp(task, cur_tsnap->ts_p_comm, sizeof(cur_tsnap->ts_p_comm)); -#if CONFIG_COALITIONS - if ((trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS) && (task->coalition[COALITION_TYPE_JETSAM] != NULL)) { - uint64_t jetsam_coal_id = coalition_id(task->coalition[COALITION_TYPE_JETSAM]); - kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_JETSAM_COALITION, sizeof(jetsam_coal_id), &out_addr)); - stackshot_memcpy((void*)out_addr, &jetsam_coal_id, sizeof(jetsam_coal_id)); - } -#endif /* CONFIG_COALITIONS */ - } else { - cur_tsnap->ts_p_comm[0] = '\0'; -#if IMPORTANCE_INHERITANCE && (DEVELOPMENT || DEBUG) - if (task->task_imp_base != NULL) { - stackshot_strlcpy(cur_tsnap->ts_p_comm, &task->task_imp_base->iit_procname[0], - MIN((int)sizeof(task->task_imp_base->iit_procname), (int)sizeof(cur_tsnap->ts_p_comm))); - } -#endif /* IMPORTANCE_INHERITANCE && (DEVELOPMENT || DEBUG) */ - } +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info->pmap_latency = mach_absolute_time() - latency_info->pmap_latency; + latency_info->bsd_proc_ids_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info->bsd_proc_ids_latency = mach_absolute_time() - latency_info->bsd_proc_ids_latency; + latency_info->end_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ if (collect_iostats) { kcd_exit_on_error(kcdata_record_task_iostats(kcd, task)); @@ -1287,12 +1427,18 @@ kcdata_record_task_snapshot(kcdata_descriptor_t kcd, task_t task, uint32_t trace } #endif /* MONOTONIC */ + kcd_exit_on_error(kcdata_record_task_cpu_architecture(kcd, task)); + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info->end_latency = mach_absolute_time() - latency_info->end_latency; +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + error_exit: return error; } static kern_return_t -kcdata_record_task_delta_snapshot(kcdata_descriptor_t kcd, task_t task, uint32_t trace_flags, boolean_t have_pmap, unaligned_u64 **task_snap_ss_flags) +kcdata_record_task_delta_snapshot(kcdata_descriptor_t kcd, task_t task, uint64_t trace_flags, boolean_t have_pmap, unaligned_u64 task_snap_ss_flags) { #if !MONOTONIC #pragma unused(trace_flags) @@ -1309,7 +1455,6 @@ kcdata_record_task_delta_snapshot(kcdata_descriptor_t kcd, task_t task, uint32_t #endif /* MONOTONIC */ uint64_t task_uniqueid = get_task_uniqueid(task); - assert(task_snap_ss_flags != NULL); kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_TASK_DELTA_SNAPSHOT, sizeof(struct task_delta_snapshot_v2), &out_addr)); @@ -1317,7 +1462,7 @@ kcdata_record_task_delta_snapshot(kcdata_descriptor_t kcd, task_t task, uint32_t cur_tsnap->tds_unique_pid = task_uniqueid; cur_tsnap->tds_ss_flags = kcdata_get_task_ss_flags(task); - *task_snap_ss_flags = (unaligned_u64 *)&cur_tsnap->tds_ss_flags; + cur_tsnap->tds_ss_flags |= task_snap_ss_flags; cur_tsnap->tds_user_time_in_terminated_threads = task->total_user_time; cur_tsnap->tds_system_time_in_terminated_threads = task->total_system_time; @@ -1388,18 +1533,22 @@ error_exit: static kern_return_t kcdata_record_thread_snapshot( - kcdata_descriptor_t kcd, thread_t thread, task_t task, uint32_t trace_flags, boolean_t have_pmap, boolean_t thread_on_core) + kcdata_descriptor_t kcd, thread_t thread, task_t task, uint64_t trace_flags, boolean_t have_pmap, boolean_t thread_on_core) { boolean_t dispatch_p = ((trace_flags & STACKSHOT_GET_DQ) != 0); boolean_t active_kthreads_only_p = ((trace_flags & STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY) != 0); - boolean_t trace_fp_p = false; boolean_t collect_delta_stackshot = ((trace_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) != 0); boolean_t collect_iostats = !collect_delta_stackshot && !(trace_flags & STACKSHOT_NO_IO_STATS); #if MONOTONIC boolean_t collect_instrs_cycles = ((trace_flags & STACKSHOT_INSTRS_CYCLES) != 0); #endif /* MONOTONIC */ - kern_return_t error = KERN_SUCCESS; + +#if STACKSHOT_COLLECTS_LATENCY_INFO + struct stackshot_latency_thread latency_info; + latency_info.cur_thsnap1_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + mach_vm_address_t out_addr = 0; int saved_count = 0; @@ -1424,6 +1573,12 @@ kcdata_record_thread_snapshot( cur_thread_snap->ths_voucher_identifier = 0; } +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.cur_thsnap1_latency = mach_absolute_time() - latency_info.cur_thsnap1_latency; + latency_info.dispatch_serial_latency = mach_absolute_time(); + latency_info.dispatch_label_latency = 0; +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + cur_thread_snap->ths_dqserialnum = 0; if (dispatch_p && (task != kernel_task) && (task->active) && have_pmap) { uint64_t dqkeyaddr = thread_dispatchqaddr(thread); @@ -1439,6 +1594,11 @@ kcdata_record_thread_snapshot( cur_thread_snap->ths_dqserialnum = dqserialnum; } +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.dispatch_serial_latency = mach_absolute_time() - latency_info.dispatch_serial_latency; + latency_info.dispatch_label_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + /* try copying in the queue label */ uint64_t label_offs = get_task_dispatchqueue_label_offset(task); if (label_offs) { @@ -1459,10 +1619,20 @@ kcdata_record_thread_snapshot( } } } +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.dispatch_label_latency = mach_absolute_time() - latency_info.dispatch_label_latency; +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ } } } +#if STACKSHOT_COLLECTS_LATENCY_INFO + if ((cur_thread_snap->ths_ss_flags & kHasDispatchSerial) == 0) { + latency_info.dispatch_serial_latency = 0; + } + latency_info.cur_thsnap2_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + tval = safe_grab_timer_value(&thread->user_timer); cur_thread_snap->ths_user_time = tval; tval = safe_grab_timer_value(&thread->system_timer); @@ -1515,7 +1685,7 @@ kcdata_record_thread_snapshot( cur_thread_snap->ths_rqos = thread->requested_policy.thrp_qos; cur_thread_snap->ths_rqos_override = MAX(thread->requested_policy.thrp_qos_override, thread->requested_policy.thrp_qos_workq_override); - cur_thread_snap->ths_io_tier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO); + cur_thread_snap->ths_io_tier = (uint8_t) proc_get_effective_thread_policy(thread, TASK_POLICY_IO); cur_thread_snap->ths_thread_t = VM_KERNEL_UNSLIDE_OR_PERM(thread); static_assert(sizeof(thread->effective_policy) == sizeof(uint64_t)); @@ -1523,6 +1693,11 @@ kcdata_record_thread_snapshot( cur_thread_snap->ths_requested_policy = *(unaligned_u64 *) &thread->requested_policy; cur_thread_snap->ths_effective_policy = *(unaligned_u64 *) &thread->effective_policy; +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.cur_thsnap2_latency = mach_absolute_time() - latency_info.cur_thsnap2_latency; + latency_info.thread_name_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + /* if there is thread name then add to buffer */ cur_thread_name[0] = '\0'; proc_threadname_kdp(thread->uthread, cur_thread_name, STACKSHOT_MAX_THREAD_NAME_SIZE); @@ -1531,6 +1706,11 @@ kcdata_record_thread_snapshot( stackshot_memcpy((void *)out_addr, (void *)cur_thread_name, sizeof(cur_thread_name)); } +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.thread_name_latency = mach_absolute_time() - latency_info.thread_name_latency; + latency_info.sur_times_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + /* record system, user, and runnable times */ time_value_t user_time, system_time, runnable_time; thread_read_times(thread, &user_time, &system_time, &runnable_time); @@ -1542,6 +1722,11 @@ kcdata_record_thread_snapshot( .runnable_usec = (uint64_t)runnable_time.seconds * USEC_PER_SEC + runnable_time.microseconds, }; +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.sur_times_latency = mach_absolute_time() - latency_info.sur_times_latency; + latency_info.user_stack_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + /* Trace user stack, if any */ if (!active_kthreads_only_p && task->active && thread->task->map != kernel_map) { uint32_t thread_snapshot_flags = 0; @@ -1550,12 +1735,15 @@ kcdata_record_thread_snapshot( if (is_64bit_data) { uint64_t sp = 0; out_addr = (mach_vm_address_t)kcd_end_address(kcd); + + uintptr_t fp = 0; + + saved_count = machine_trace_thread64(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, TRUE, - trace_fp_p, &thread_snapshot_flags, &sp); + &thread_snapshot_flags, &sp, fp); if (saved_count > 0) { - int frame_size = trace_fp_p ? sizeof(struct stack_snapshot_frame64) : sizeof(uint64_t); - kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, trace_fp_p ? STACKSHOT_KCTYPE_USER_STACKFRAME64 - : STACKSHOT_KCTYPE_USER_STACKLR64, + int frame_size = sizeof(uint64_t); + kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, STACKSHOT_KCTYPE_USER_STACKLR64, frame_size, saved_count / frame_size, &out_addr)); cur_thread_snap->ths_ss_flags |= kUser64_p; } @@ -1571,15 +1759,14 @@ kcdata_record_thread_snapshot( memcpy(stacktop->stack_contents, (void*) kern_virt_addr, 8); } } -#endif +#endif /* __x86_64__ */ } else { out_addr = (mach_vm_address_t)kcd_end_address(kcd); - saved_count = machine_trace_thread(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, TRUE, trace_fp_p, + saved_count = machine_trace_thread(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, TRUE, &thread_snapshot_flags); if (saved_count > 0) { - int frame_size = trace_fp_p ? sizeof(struct stack_snapshot_frame32) : sizeof(uint32_t); - kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, trace_fp_p ? STACKSHOT_KCTYPE_USER_STACKFRAME - : STACKSHOT_KCTYPE_USER_STACKLR, + int frame_size = sizeof(uint32_t); + kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, STACKSHOT_KCTYPE_USER_STACKLR, frame_size, saved_count / frame_size, &out_addr)); } } @@ -1589,6 +1776,11 @@ kcdata_record_thread_snapshot( } } +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.user_stack_latency = mach_absolute_time() - latency_info.user_stack_latency; + latency_info.kernel_stack_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + /* Call through to the machine specific trace routines * Frames are added past the snapshot header. */ @@ -1596,24 +1788,23 @@ kcdata_record_thread_snapshot( uint32_t thread_snapshot_flags = 0; #if defined(__LP64__) out_addr = (mach_vm_address_t)kcd_end_address(kcd); - saved_count = machine_trace_thread64(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, FALSE, trace_fp_p, - &thread_snapshot_flags, NULL); + saved_count = machine_trace_thread64(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, FALSE, + &thread_snapshot_flags, NULL, 0); if (saved_count > 0) { - int frame_size = trace_fp_p ? sizeof(struct stack_snapshot_frame64) : sizeof(uint64_t); + int frame_size = sizeof(uint64_t); cur_thread_snap->ths_ss_flags |= kKernel64_p; - kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, trace_fp_p ? STACKSHOT_KCTYPE_KERN_STACKFRAME64 - : STACKSHOT_KCTYPE_KERN_STACKLR64, + kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, STACKSHOT_KCTYPE_KERN_STACKLR64, frame_size, saved_count / frame_size, &out_addr)); } #else out_addr = (mach_vm_address_t)kcd_end_address(kcd); - saved_count = machine_trace_thread(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, FALSE, trace_fp_p, + saved_count = machine_trace_thread(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, FALSE, &thread_snapshot_flags); if (saved_count > 0) { - int frame_size = trace_fp_p ? sizeof(struct stack_snapshot_frame32) : sizeof(uint32_t); + int frame_size = sizeof(uint32_t); kcd_exit_on_error( - kcdata_get_memory_addr_for_array(kcd, trace_fp_p ? STACKSHOT_KCTYPE_KERN_STACKFRAME : STACKSHOT_KCTYPE_KERN_STACKLR, - frame_size, saved_count / frame_size, &out_addr)); + kcdata_get_memory_addr_for_array(kcd, STACKSHOT_KCTYPE_KERN_STACKLR, frame_size, + saved_count / frame_size, &out_addr)); } #endif if (thread_snapshot_flags != 0) { @@ -1621,6 +1812,18 @@ kcdata_record_thread_snapshot( } } +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.kernel_stack_latency = mach_absolute_time() - latency_info.kernel_stack_latency; + latency_info.misc_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + +#if CONFIG_THREAD_GROUPS + if (trace_flags & STACKSHOT_THREAD_GROUP) { + uint64_t thread_group_id = thread->thread_group ? thread_group_get_id(thread->thread_group) : 0; + kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_THREAD_GROUP, sizeof(thread_group_id), &out_addr)); + stackshot_memcpy((void*)out_addr, &thread_group_id, sizeof(uint64_t)); + } +#endif /* CONFIG_THREAD_GROUPS */ if (collect_iostats) { kcd_exit_on_error(kcdata_record_thread_iostats(kcd, thread)); @@ -1638,6 +1841,13 @@ kcdata_record_thread_snapshot( } #endif /* MONOTONIC */ +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.misc_latency = mach_absolute_time() - latency_info.misc_latency; + if (collect_latency_info) { + kcd_exit_on_error(kcdata_push_data(kcd, STACKSHOT_KCTYPE_LATENCY_INFO_THREAD, sizeof(latency_info), &latency_info)); + } +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + error_exit: return error; } @@ -1681,7 +1891,7 @@ kcdata_record_thread_delta_snapshot(struct thread_delta_snapshot_v3 * cur_thread cur_thread_snap->tds_rqos = thread->requested_policy.thrp_qos; cur_thread_snap->tds_rqos_override = MAX(thread->requested_policy.thrp_qos_override, thread->requested_policy.thrp_qos_workq_override); - cur_thread_snap->tds_io_tier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO); + cur_thread_snap->tds_io_tier = (uint8_t) proc_get_effective_thread_policy(thread, TASK_POLICY_IO); static_assert(sizeof(thread->effective_policy) == sizeof(uint64_t)); static_assert(sizeof(thread->requested_policy) == sizeof(uint64_t)); @@ -1709,7 +1919,7 @@ enum thread_classification { }; static enum thread_classification -classify_thread(thread_t thread, boolean_t * thread_on_core_p, uint32_t trace_flags) +classify_thread(thread_t thread, boolean_t * thread_on_core_p, uint64_t trace_flags) { boolean_t collect_delta_stackshot = ((trace_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) != 0); @@ -1733,7 +1943,7 @@ classify_thread(thread_t thread, boolean_t * thread_on_core_p, uint32_t trace_fl struct stackshot_context { int pid; - uint32_t trace_flags; + uint64_t trace_flags; }; static kern_return_t @@ -1744,7 +1954,6 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) boolean_t collect_delta_stackshot = ((ctx->trace_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) != 0); boolean_t save_owner_info = ((ctx->trace_flags & STACKSHOT_THREAD_WAITINFO) != 0); - kern_return_t error = KERN_SUCCESS; mach_vm_address_t out_addr = 0; int saved_count = 0; @@ -1752,15 +1961,18 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) int task_pid = 0; uint64_t task_uniqueid = 0; int num_delta_thread_snapshots = 0; - int num_nonrunnable_threads = 0; int num_waitinfo_threads = 0; int num_turnstileinfo_threads = 0; uint64_t task_start_abstime = 0; - boolean_t task_delta_stackshot = FALSE; boolean_t have_map = FALSE, have_pmap = FALSE; boolean_t some_thread_ran = FALSE; - unaligned_u64 *task_snap_ss_flags = NULL; + unaligned_u64 task_snap_ss_flags = 0; + +#if STACKSHOT_COLLECTS_LATENCY_INFO + struct stackshot_latency_task latency_info; + latency_info.setup_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ #if INTERRUPT_MASKED_DEBUG && MONOTONIC uint64_t task_begin_cpu_cycle_count = 0; @@ -1780,19 +1992,22 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) task_pid = pid_from_task(task); task_uniqueid = get_task_uniqueid(task); - if (!task->active || task_is_a_corpse(task)) { + if (!task->active || task_is_a_corpse(task) || task_is_a_corpse_fork(task)) { /* - * Not interested in terminated tasks without threads, and - * at the moment, stackshot can't handle a task without a name. + * Not interested in terminated tasks without threads. */ if (queue_empty(&task->threads) || task_pid == -1) { return KERN_SUCCESS; } } - if (collect_delta_stackshot) { - proc_starttime_kdp(task->bsd_info, NULL, NULL, &task_start_abstime); - } + /* All PIDs should have the MSB unset */ + assert((task_pid & (1ULL << 31)) == 0); + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.setup_latency = mach_absolute_time() - latency_info.setup_latency; + latency_info.task_uniqueid = task_uniqueid; +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ /* Trace everything, unless a process was specified */ if ((ctx->pid == -1) || (ctx->pid == task_pid)) { @@ -1800,29 +2015,100 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_BEGIN, STACKSHOT_KCCONTAINER_TASK, task_uniqueid)); - if (!collect_delta_stackshot || (task_start_abstime == 0) || - (task_start_abstime > stack_snapshot_delta_since_timestamp)) { - kcd_exit_on_error(kcdata_record_task_snapshot(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, &task_snap_ss_flags)); + if (collect_delta_stackshot) { + /* + * For delta stackshots we need to know if a thread from this task has run since the + * previous timestamp to decide whether we're going to record a full snapshot and UUID info. + */ + thread_t thread = THREAD_NULL; + queue_iterate(&task->threads, thread, thread_t, task_threads) + { + if ((thread == NULL) || !ml_validate_nofault((vm_offset_t)thread, sizeof(struct thread))) { + error = KERN_FAILURE; + goto error_exit; + } + + if (active_kthreads_only_p && thread->kernel_stack == 0) { + continue; + } + + boolean_t thread_on_core; + enum thread_classification thread_classification = classify_thread(thread, &thread_on_core, ctx->trace_flags); + + switch (thread_classification) { + case tc_full_snapshot: + some_thread_ran = TRUE; + break; + case tc_delta_snapshot: + num_delta_thread_snapshots++; + break; + } + } + } + + if (collect_delta_stackshot) { + proc_starttime_kdp(task->bsd_info, NULL, NULL, &task_start_abstime); + } + + /* Next record any relevant UUID info and store the task snapshot */ + if (!collect_delta_stackshot || + (task_start_abstime == 0) || + (task_start_abstime > stack_snapshot_delta_since_timestamp) || + some_thread_ran) { + /* + * Collect full task information in these scenarios: + * + * 1) a full stackshot + * 2) a delta stackshot where the task started after the previous full stackshot + * 3) a delta stackshot where any thread from the task has run since the previous full stackshot + * + * because the task may have exec'ed, changing its name, architecture, load info, etc + */ + + kcd_exit_on_error(kcdata_record_shared_cache_info(stackshot_kcdata_p, task, &task_snap_ss_flags)); + kcd_exit_on_error(kcdata_record_uuid_info(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, &task_snap_ss_flags)); +#if STACKSHOT_COLLECTS_LATENCY_INFO + kcd_exit_on_error(kcdata_record_task_snapshot(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, task_snap_ss_flags, &latency_info)); +#else + kcd_exit_on_error(kcdata_record_task_snapshot(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, task_snap_ss_flags)); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ } else { - task_delta_stackshot = TRUE; - kcd_exit_on_error(kcdata_record_task_delta_snapshot(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, &task_snap_ss_flags)); + kcd_exit_on_error(kcdata_record_task_delta_snapshot(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, task_snap_ss_flags)); + } + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.misc_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + + struct thread_delta_snapshot_v3 * delta_snapshots = NULL; + int current_delta_snapshot_index = 0; + if (num_delta_thread_snapshots > 0) { + kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT, + sizeof(struct thread_delta_snapshot_v3), + num_delta_thread_snapshots, &out_addr)); + delta_snapshots = (struct thread_delta_snapshot_v3 *)out_addr; } - /* Iterate over task threads */ + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.task_thread_count_loop_latency = mach_absolute_time(); +#endif + /* + * Iterate over the task threads to save thread snapshots and determine + * how much space we need for waitinfo and turnstile info + */ thread_t thread = THREAD_NULL; queue_iterate(&task->threads, thread, thread_t, task_threads) { - uint64_t thread_uniqueid; - if ((thread == NULL) || !ml_validate_nofault((vm_offset_t)thread, sizeof(struct thread))) { error = KERN_FAILURE; goto error_exit; } + uint64_t thread_uniqueid; if (active_kthreads_only_p && thread->kernel_stack == 0) { continue; } - thread_uniqueid = thread_tid(thread); boolean_t thread_on_core; @@ -1833,24 +2119,28 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) /* add thread marker */ kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_BEGIN, STACKSHOT_KCCONTAINER_THREAD, thread_uniqueid)); - kcd_exit_on_error( - kcdata_record_thread_snapshot(stackshot_kcdata_p, thread, task, ctx->trace_flags, have_pmap, thread_on_core)); + + /* thread snapshot can be large, including strings, avoid overflowing the stack. */ + kcdata_compression_window_open(stackshot_kcdata_p); + + kcd_exit_on_error(kcdata_record_thread_snapshot(stackshot_kcdata_p, thread, task, ctx->trace_flags, have_pmap, thread_on_core)); + + kcd_exit_on_error(kcdata_compression_window_close(stackshot_kcdata_p)); /* mark end of thread snapshot data */ kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_END, STACKSHOT_KCCONTAINER_THREAD, thread_uniqueid)); - - some_thread_ran = TRUE; break; - case tc_delta_snapshot: - num_delta_thread_snapshots++; + kcd_exit_on_error(kcdata_record_thread_delta_snapshot(&delta_snapshots[current_delta_snapshot_index++], thread, thread_on_core)); break; } - /* We want to report owner information regardless of whether a thread + /* + * We want to report owner information regardless of whether a thread * has changed since the last delta, whether it's a normal stackshot, - * or whether it's nonrunnable */ + * or whether it's nonrunnable + */ if (save_owner_info) { if (stackshot_thread_has_valid_waitinfo(thread)) { num_waitinfo_threads++; @@ -1861,91 +2151,84 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) } } } +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.task_thread_count_loop_latency = mach_absolute_time() - latency_info.task_thread_count_loop_latency; +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ - struct thread_delta_snapshot_v3 * delta_snapshots = NULL; - int current_delta_snapshot_index = 0; - - if (num_delta_thread_snapshots > 0) { - kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT, - sizeof(struct thread_delta_snapshot_v3), - num_delta_thread_snapshots, &out_addr)); - delta_snapshots = (struct thread_delta_snapshot_v3 *)out_addr; - } - - uint64_t * nonrunnable_tids = NULL; - - if (num_nonrunnable_threads > 0) { - kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_NONRUNNABLE_TIDS, - sizeof(uint64_t), num_nonrunnable_threads, &out_addr)); - nonrunnable_tids = (uint64_t *)out_addr; - } thread_waitinfo_t *thread_waitinfo = NULL; thread_turnstileinfo_t *thread_turnstileinfo = NULL; int current_waitinfo_index = 0; int current_turnstileinfo_index = 0; + /* allocate space for the wait and turnstil info */ + if (num_waitinfo_threads > 0 || num_turnstileinfo_threads > 0) { + /* thread waitinfo and turnstileinfo can be quite large, avoid overflowing the stack */ + kcdata_compression_window_open(stackshot_kcdata_p); + + if (num_waitinfo_threads > 0) { + kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_WAITINFO, + sizeof(thread_waitinfo_t), num_waitinfo_threads, &out_addr)); + thread_waitinfo = (thread_waitinfo_t *)out_addr; + } - if (num_waitinfo_threads > 0) { - kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_WAITINFO, - sizeof(thread_waitinfo_t), num_waitinfo_threads, &out_addr)); - thread_waitinfo = (thread_waitinfo_t *)out_addr; - } - - if (num_turnstileinfo_threads > 0) { - /* get space for the turnstile info */ - kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_TURNSTILEINFO, - sizeof(thread_turnstileinfo_t), num_turnstileinfo_threads, &out_addr)); - thread_turnstileinfo = (thread_turnstileinfo_t *)out_addr; + if (num_turnstileinfo_threads > 0) { + /* get space for the turnstile info */ + kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_TURNSTILEINFO, + sizeof(thread_turnstileinfo_t), num_turnstileinfo_threads, &out_addr)); + thread_turnstileinfo = (thread_turnstileinfo_t *)out_addr; + } } - if (num_delta_thread_snapshots > 0 || num_nonrunnable_threads > 0 || - num_waitinfo_threads > 0 || num_turnstileinfo_threads > 0) { - queue_iterate(&task->threads, thread, thread_t, task_threads) - { - if (active_kthreads_only_p && thread->kernel_stack == 0) { - continue; - } +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.misc_latency = mach_absolute_time() - latency_info.misc_latency; + latency_info.task_thread_data_loop_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ - /* If we want owner info, we should capture it regardless of its classification */ - if (save_owner_info) { - if (stackshot_thread_has_valid_waitinfo(thread)) { - stackshot_thread_wait_owner_info( - thread, - &thread_waitinfo[current_waitinfo_index++]); - } + /* Iterate over the task's threads to save the wait and turnstile info */ + queue_iterate(&task->threads, thread, thread_t, task_threads) + { + uint64_t thread_uniqueid; - if (stackshot_thread_has_valid_turnstileinfo(thread)) { - stackshot_thread_turnstileinfo( - thread, - &thread_turnstileinfo[current_turnstileinfo_index++]); - } - } + if (active_kthreads_only_p && thread->kernel_stack == 0) { + continue; + } - boolean_t thread_on_core; - enum thread_classification thread_classification = classify_thread(thread, &thread_on_core, ctx->trace_flags); + thread_uniqueid = thread_tid(thread); - switch (thread_classification) { - case tc_full_snapshot: - /* full thread snapshot captured above */ - continue; + /* If we want owner info, we should capture it regardless of its classification */ + if (save_owner_info) { + if (stackshot_thread_has_valid_waitinfo(thread)) { + stackshot_thread_wait_owner_info( + thread, + &thread_waitinfo[current_waitinfo_index++]); + } - case tc_delta_snapshot: - kcd_exit_on_error(kcdata_record_thread_delta_snapshot(&delta_snapshots[current_delta_snapshot_index++], - thread, thread_on_core)); - break; + if (stackshot_thread_has_valid_turnstileinfo(thread)) { + stackshot_thread_turnstileinfo( + thread, + &thread_turnstileinfo[current_turnstileinfo_index++]); } } + } + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.task_thread_data_loop_latency = mach_absolute_time() - latency_info.task_thread_data_loop_latency; + latency_info.misc2_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ #if DEBUG || DEVELOPMENT - if (current_delta_snapshot_index != num_delta_thread_snapshots) { - panic("delta thread snapshot count mismatch while capturing snapshots for task %p. expected %d, found %d", task, - num_delta_thread_snapshots, current_delta_snapshot_index); - } - if (current_waitinfo_index != num_waitinfo_threads) { - panic("thread wait info count mismatch while capturing snapshots for task %p. expected %d, found %d", task, - num_waitinfo_threads, current_waitinfo_index); - } + if (current_delta_snapshot_index != num_delta_thread_snapshots) { + panic("delta thread snapshot count mismatch while capturing snapshots for task %p. expected %d, found %d", task, + num_delta_thread_snapshots, current_delta_snapshot_index); + } + if (current_waitinfo_index != num_waitinfo_threads) { + panic("thread wait info count mismatch while capturing snapshots for task %p. expected %d, found %d", task, + num_waitinfo_threads, current_waitinfo_index); + } #endif + + if (num_waitinfo_threads > 0 || num_turnstileinfo_threads > 0) { + kcd_exit_on_error(kcdata_compression_window_close(stackshot_kcdata_p)); } #if IMPORTANCE_INHERITANCE @@ -1958,52 +2241,56 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) saved_count = task_importance_list_pids(task, TASK_IMP_LIST_DONATING_PIDS, (void *)kcd_end_address(stackshot_kcdata_p), TASK_IMP_WALK_LIMIT); if (saved_count > 0) { + /* Variable size array - better not have it on the stack. */ + kcdata_compression_window_open(stackshot_kcdata_p); kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_DONATING_PIDS, sizeof(int32_t), saved_count, &out_addr)); + kcd_exit_on_error(kcdata_compression_window_close(stackshot_kcdata_p)); } } #endif - if (!collect_delta_stackshot || (num_delta_thread_snapshots != task->thread_count) || !task_delta_stackshot) { - /* - * Collect shared cache info and UUID info in these scenarios - * 1) a full stackshot - * 2) a delta stackshot where the task started after the previous full stackshot OR - * any thread from the task has run since the previous full stackshot - */ - - kcd_exit_on_error(kcdata_record_shared_cache_info(stackshot_kcdata_p, task, task_snap_ss_flags)); - kcd_exit_on_error(kcdata_record_uuid_info(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, task_snap_ss_flags)); - } - #if INTERRUPT_MASKED_DEBUG && MONOTONIC if (!panic_stackshot) { kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, (mt_cur_cpu_cycles() - task_begin_cpu_cycle_count), "task_cpu_cycle_count")); } #endif + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.misc2_latency = mach_absolute_time() - latency_info.misc2_latency; + if (collect_latency_info) { + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_LATENCY_INFO_TASK, sizeof(latency_info), &latency_info)); + } +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + /* mark end of task snapshot data */ kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_END, STACKSHOT_KCCONTAINER_TASK, task_uniqueid)); } + error_exit: return error; } static kern_return_t -kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTraced) +kdp_stackshot_kcdata_format(int pid, uint64_t trace_flags, uint32_t * pBytesTraced, uint32_t * pBytesUncompressed) { kern_return_t error = KERN_SUCCESS; mach_vm_address_t out_addr = 0; uint64_t abs_time = 0, abs_time_end = 0; - uint64_t *abs_time_addr = NULL; uint64_t system_state_flags = 0; task_t task = TASK_NULL; mach_timebase_info_data_t timebase = {0, 0}; uint32_t length_to_copy = 0, tmp32 = 0; abs_time = mach_absolute_time(); + uint64_t last_task_start_time = 0; + +#if STACKSHOT_COLLECTS_LATENCY_INFO + struct stackshot_latency_collection latency_info; +#endif #if INTERRUPT_MASKED_DEBUG && MONOTONIC uint64_t stackshot_begin_cpu_cycle_count = 0; @@ -2013,15 +2300,20 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac } #endif +#if STACKSHOT_COLLECTS_LATENCY_INFO + collect_latency_info = trace_flags & STACKSHOT_DISABLE_LATENCY_INFO ? false : true; +#endif + /* process the flags */ boolean_t collect_delta_stackshot = ((trace_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) != 0); boolean_t use_fault_path = ((trace_flags & (STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_ENABLE_BT_FAULTING)) != 0); stack_enable_faulting = (trace_flags & (STACKSHOT_ENABLE_BT_FAULTING)); -#if CONFIG_EMBEDDED - /* KEXTs can't be described by just a base address on embedded */ - trace_flags &= ~(STACKSHOT_SAVE_KEXT_LOADINFO); -#endif + /* Currently we only support returning explicit KEXT load info on fileset kernels */ + kc_format_t primary_kc_type = KCFormatUnknown; + if (PE_get_primary_kc_format(&primary_kc_type) && (primary_kc_type != KCFormatFileset)) { + trace_flags &= ~(STACKSHOT_SAVE_KEXT_LOADINFO); + } struct stackshot_context ctx = {}; ctx.trace_flags = trace_flags; @@ -2047,59 +2339,62 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac /* begin saving data into the buffer */ *pBytesTraced = 0; - kcd_exit_on_error(kcdata_add_uint32_with_description(stackshot_kcdata_p, trace_flags, "stackshot_in_flags")); + if (pBytesUncompressed) { + *pBytesUncompressed = 0; + } + kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, trace_flags, "stackshot_in_flags")); kcd_exit_on_error(kcdata_add_uint32_with_description(stackshot_kcdata_p, (uint32_t)pid, "stackshot_in_pid")); kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, system_state_flags, "system_state_flags")); + if (trace_flags & STACKSHOT_PAGE_TABLES) { + kcd_exit_on_error(kcdata_add_uint32_with_description(stackshot_kcdata_p, stack_snapshot_pagetable_mask, "stackshot_pagetable_mask")); + } + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.setup_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ #if CONFIG_JETSAM tmp32 = memorystatus_get_pressure_status_kdp(); - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_JETSAM_LEVEL, sizeof(uint32_t), &out_addr)); - stackshot_memcpy((void *)out_addr, &tmp32, sizeof(tmp32)); + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_JETSAM_LEVEL, sizeof(uint32_t), &tmp32)); #endif if (!collect_delta_stackshot) { tmp32 = THREAD_POLICY_INTERNAL_STRUCT_VERSION; - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_POLICY_VERSION, sizeof(uint32_t), &out_addr)); - stackshot_memcpy((void *)out_addr, &tmp32, sizeof(tmp32)); + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_POLICY_VERSION, sizeof(uint32_t), &tmp32)); tmp32 = PAGE_SIZE; - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_KERN_PAGE_SIZE, sizeof(uint32_t), &out_addr)); - stackshot_memcpy((void *)out_addr, &tmp32, sizeof(tmp32)); + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_KERN_PAGE_SIZE, sizeof(uint32_t), &tmp32)); /* save boot-args and osversion string */ length_to_copy = MIN((uint32_t)(strlen(version) + 1), OSVERSIZE); - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_OSVERSION, length_to_copy, &out_addr)); - stackshot_strlcpy((char*)out_addr, &version[0], length_to_copy); + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_OSVERSION, length_to_copy, (const void *)version)); + length_to_copy = MIN((uint32_t)(strlen(PE_boot_args()) + 1), BOOT_LINE_LENGTH); - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_BOOTARGS, length_to_copy, &out_addr)); - stackshot_strlcpy((char*)out_addr, PE_boot_args(), length_to_copy); + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_BOOTARGS, length_to_copy, PE_boot_args())); - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, KCDATA_TYPE_TIMEBASE, sizeof(timebase), &out_addr)); - stackshot_memcpy((void *)out_addr, &timebase, sizeof(timebase)); + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, KCDATA_TYPE_TIMEBASE, sizeof(timebase), &timebase)); } else { - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_DELTA_SINCE_TIMESTAMP, sizeof(uint64_t), &out_addr)); - stackshot_memcpy((void*)out_addr, &stack_snapshot_delta_since_timestamp, sizeof(stack_snapshot_delta_since_timestamp)); + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_DELTA_SINCE_TIMESTAMP, sizeof(uint64_t), &stack_snapshot_delta_since_timestamp)); } - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &out_addr)); - abs_time_addr = (uint64_t *)out_addr; - stackshot_memcpy((void *)abs_time_addr, &abs_time, sizeof(uint64_t)); + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &abs_time)); - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, KCDATA_TYPE_USECS_SINCE_EPOCH, sizeof(uint64_t), &out_addr)); - stackshot_memcpy((void *)out_addr, &stackshot_microsecs, sizeof(uint64_t)); + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, KCDATA_TYPE_USECS_SINCE_EPOCH, sizeof(uint64_t), &stackshot_microsecs)); /* record system level shared cache load info (if available) */ if (!collect_delta_stackshot && init_task_shared_region && ml_validate_nofault((vm_offset_t)init_task_shared_region, sizeof(struct vm_shared_region))) { - struct dyld_uuid_info_64_v2 *sys_shared_cache_info = NULL; - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO, - sizeof(struct dyld_uuid_info_64_v2), &out_addr)); - sys_shared_cache_info = (struct dyld_uuid_info_64_v2 *)out_addr; + struct dyld_uuid_info_64_v2 sys_shared_cache_info = {0}; + + stackshot_memcpy(sys_shared_cache_info.imageUUID, &init_task_shared_region->sr_uuid, sizeof(init_task_shared_region->sr_uuid)); + sys_shared_cache_info.imageLoadAddress = + init_task_shared_region->sr_slide; + sys_shared_cache_info.imageSlidBaseAddress = + init_task_shared_region->sr_slide + init_task_shared_region->sr_base_address; - stackshot_memcpy(sys_shared_cache_info->imageUUID, &init_task_shared_region->sr_uuid, sizeof(init_task_shared_region->sr_uuid)); - sys_shared_cache_info->imageLoadAddress = init_task_shared_region->sr_slide_info.slide; - sys_shared_cache_info->imageSlidBaseAddress = init_task_shared_region->sr_slide_info.slide + init_task_shared_region->sr_base_address; + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO, + sizeof(struct dyld_uuid_info_64_v2), &sys_shared_cache_info)); if (trace_flags & STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT) { /* @@ -2109,71 +2404,142 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac if (init_task_shared_region->sr_images && ml_validate_nofault((vm_offset_t)init_task_shared_region->sr_images, (init_task_shared_region->sr_images_count * sizeof(struct dyld_uuid_info_64)))) { assert(init_task_shared_region->sr_images_count != 0); - kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT, - sizeof(struct dyld_uuid_info_64), - init_task_shared_region->sr_images_count, &out_addr)); - stackshot_memcpy((void*)out_addr, init_task_shared_region->sr_images, - (init_task_shared_region->sr_images_count * sizeof(struct dyld_uuid_info_64))); + kcd_exit_on_error(kcdata_push_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT, sizeof(struct dyld_uuid_info_64), init_task_shared_region->sr_images_count, init_task_shared_region->sr_images)); } } } /* Add requested information first */ if (trace_flags & STACKSHOT_GET_GLOBAL_MEM_STATS) { - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_GLOBAL_MEM_STATS, sizeof(struct mem_and_io_snapshot), &out_addr)); - kdp_mem_and_io_snapshot((struct mem_and_io_snapshot *)out_addr); + struct mem_and_io_snapshot mais = {0}; + kdp_mem_and_io_snapshot(&mais); + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_GLOBAL_MEM_STATS, sizeof(mais), &mais)); } -#if CONFIG_COALITIONS - int num_coalitions = 0; - struct jetsam_coalition_snapshot *coalitions = NULL; +#if CONFIG_THREAD_GROUPS + struct thread_group_snapshot_v2 *thread_groups = NULL; + int num_thread_groups = 0; #if INTERRUPT_MASKED_DEBUG && MONOTONIC - uint64_t coalition_begin_cpu_cycle_count = 0; + uint64_t thread_group_begin_cpu_cycle_count = 0; - if (!panic_stackshot && (trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS)) { - coalition_begin_cpu_cycle_count = mt_cur_cpu_cycles(); + if (!panic_stackshot && (trace_flags & STACKSHOT_THREAD_GROUP)) { + thread_group_begin_cpu_cycle_count = mt_cur_cpu_cycles(); } #endif - /* Iterate over coalitions */ - if (trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS) { - if (coalition_iterate_stackshot(stackshot_coalition_jetsam_count, &num_coalitions, COALITION_TYPE_JETSAM) != KERN_SUCCESS) { - trace_flags &= ~(STACKSHOT_SAVE_JETSAM_COALITIONS); + + /* Iterate over thread group names */ + if (trace_flags & STACKSHOT_THREAD_GROUP) { + /* Variable size array - better not have it on the stack. */ + kcdata_compression_window_open(stackshot_kcdata_p); + + if (thread_group_iterate_stackshot(stackshot_thread_group_count, &num_thread_groups) != KERN_SUCCESS) { + trace_flags &= ~(STACKSHOT_THREAD_GROUP); } - } - if (trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS) { - if (num_coalitions > 0) { - kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_JETSAM_COALITION_SNAPSHOT, sizeof(struct jetsam_coalition_snapshot), num_coalitions, &out_addr)); - coalitions = (struct jetsam_coalition_snapshot*)out_addr; + + if (num_thread_groups > 0) { + kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_GROUP_SNAPSHOT, sizeof(struct thread_group_snapshot_v2), num_thread_groups, &out_addr)); + thread_groups = (struct thread_group_snapshot_v2 *)out_addr; } - if (coalition_iterate_stackshot(stackshot_coalition_jetsam_snapshot, coalitions, COALITION_TYPE_JETSAM) != KERN_SUCCESS) { + if (thread_group_iterate_stackshot(stackshot_thread_group_snapshot, thread_groups) != KERN_SUCCESS) { error = KERN_FAILURE; goto error_exit; } + + kcd_exit_on_error(kcdata_compression_window_close(stackshot_kcdata_p)); } + #if INTERRUPT_MASKED_DEBUG && MONOTONIC - if (!panic_stackshot && (coalition_begin_cpu_cycle_count != 0)) { - kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, (mt_cur_cpu_cycles() - coalition_begin_cpu_cycle_count), - "coalitions_cpu_cycle_count")); + if (!panic_stackshot && (thread_group_begin_cpu_cycle_count != 0)) { + kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, (mt_cur_cpu_cycles() - thread_group_begin_cpu_cycle_count), + "thread_groups_cpu_cycle_count")); } #endif #else - trace_flags &= ~(STACKSHOT_SAVE_JETSAM_COALITIONS); -#endif /* CONFIG_COALITIONS */ - trace_flags &= ~(STACKSHOT_THREAD_GROUP); +#endif /* CONFIG_THREAD_GROUPS */ + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.setup_latency = mach_absolute_time() - latency_info.setup_latency; + latency_info.total_task_iteration_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ /* Iterate over tasks */ queue_iterate(&tasks, task, task_t, tasks) { + if (collect_delta_stackshot) { + uint64_t abstime; + proc_starttime_kdp(task->bsd_info, NULL, NULL, &abstime); + + if (abstime > last_task_start_time) { + last_task_start_time = abstime; + } + } + error = kdp_stackshot_record_task(&ctx, task); if (error) { goto error_exit; } } + + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.total_task_iteration_latency = mach_absolute_time() - latency_info.total_task_iteration_latency; +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + +#if CONFIG_COALITIONS + /* Don't collect jetsam coalition data in delta stakshots - these don't change */ + if (!collect_delta_stackshot || (last_task_start_time > stack_snapshot_delta_since_timestamp)) { + int num_coalitions = 0; + struct jetsam_coalition_snapshot *coalitions = NULL; + +#if INTERRUPT_MASKED_DEBUG && MONOTONIC + uint64_t coalition_begin_cpu_cycle_count = 0; + + if (!panic_stackshot && (trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS)) { + coalition_begin_cpu_cycle_count = mt_cur_cpu_cycles(); + } +#endif + + /* Iterate over coalitions */ + if (trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS) { + if (coalition_iterate_stackshot(stackshot_coalition_jetsam_count, &num_coalitions, COALITION_TYPE_JETSAM) != KERN_SUCCESS) { + trace_flags &= ~(STACKSHOT_SAVE_JETSAM_COALITIONS); + } + } + if (trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS) { + if (num_coalitions > 0) { + /* Variable size array - better not have it on the stack. */ + kcdata_compression_window_open(stackshot_kcdata_p); + kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_JETSAM_COALITION_SNAPSHOT, sizeof(struct jetsam_coalition_snapshot), num_coalitions, &out_addr)); + coalitions = (struct jetsam_coalition_snapshot*)out_addr; + + if (coalition_iterate_stackshot(stackshot_coalition_jetsam_snapshot, coalitions, COALITION_TYPE_JETSAM) != KERN_SUCCESS) { + error = KERN_FAILURE; + goto error_exit; + } + + kcd_exit_on_error(kcdata_compression_window_close(stackshot_kcdata_p)); + } + } +#if INTERRUPT_MASKED_DEBUG && MONOTONIC + if (!panic_stackshot && (coalition_begin_cpu_cycle_count != 0)) { + kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, (mt_cur_cpu_cycles() - coalition_begin_cpu_cycle_count), + "coalitions_cpu_cycle_count")); + } +#endif + } +#else + trace_flags &= ~(STACKSHOT_SAVE_JETSAM_COALITIONS); +#endif /* CONFIG_COALITIONS */ + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.total_terminated_task_iteration_latency = mach_absolute_time(); +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + /* * Iterate over the tasks in the terminated tasks list. We only inspect * tasks that have a valid bsd_info pointer where P_LPEXIT is NOT set. @@ -2191,25 +2557,41 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac } } + +#if STACKSHOT_COLLECTS_LATENCY_INFO + latency_info.total_terminated_task_iteration_latency = mach_absolute_time() - latency_info.total_terminated_task_iteration_latency; +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ + if (use_fault_path) { - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_STACKSHOT_FAULT_STATS, - sizeof(struct stackshot_fault_stats), &out_addr)); - stackshot_memcpy((void*)out_addr, &fault_stats, sizeof(struct stackshot_fault_stats)); + kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_STACKSHOT_FAULT_STATS, + sizeof(struct stackshot_fault_stats), &fault_stats); + } + +#if STACKSHOT_COLLECTS_LATENCY_INFO + if (collect_latency_info) { + latency_info.latency_version = 1; + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_LATENCY_INFO, sizeof(latency_info), &latency_info)); } +#endif /* STACKSHOT_COLLECTS_LATENCY_INFO */ /* update timestamp of the stackshot */ abs_time_end = mach_absolute_time(); #if DEVELOPMENT || DEBUG - kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_STACKSHOT_DURATION, - sizeof(struct stackshot_duration), &out_addr)); - struct stackshot_duration * stackshot_duration = (struct stackshot_duration *)out_addr; - stackshot_duration->stackshot_duration = (abs_time_end - abs_time); - stackshot_duration->stackshot_duration_outer = 0; - stackshot_duration_outer = (unaligned_u64 *)&stackshot_duration->stackshot_duration_outer; + struct stackshot_duration stackshot_duration; + stackshot_duration.stackshot_duration = (abs_time_end - abs_time); + stackshot_duration.stackshot_duration_outer = 0; + + if ((trace_flags & STACKSHOT_DO_COMPRESS) == 0) { + kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_STACKSHOT_DURATION, + sizeof(struct stackshot_duration), &out_addr)); + struct stackshot_duration *duration_p = (void *) out_addr; + stackshot_memcpy(duration_p, &stackshot_duration, sizeof(*duration_p)); + stackshot_duration_outer = (unaligned_u64 *)&duration_p->stackshot_duration_outer; + } else { + kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_STACKSHOT_DURATION, sizeof(stackshot_duration), &stackshot_duration)); + stackshot_duration_outer = NULL; + } #endif - stackshot_memcpy((void *)abs_time_addr, &abs_time_end, sizeof(uint64_t)); - - kcd_exit_on_error(kcdata_add_uint32_with_description(stackshot_kcdata_p, trace_flags, "stackshot_out_flags")); #if INTERRUPT_MASKED_DEBUG && MONOTONIC if (!panic_stackshot) { @@ -2218,20 +2600,32 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac } #endif + kcd_finalize_compression(stackshot_kcdata_p); + kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, trace_flags, "stackshot_out_flags")); + kcd_exit_on_error(kcdata_write_buffer_end(stackshot_kcdata_p)); /* === END of populating stackshot data === */ *pBytesTraced = (uint32_t) kcdata_memory_get_used_bytes(stackshot_kcdata_p); + *pBytesUncompressed = (uint32_t) kcdata_memory_get_uncompressed_bytes(stackshot_kcdata_p); + error_exit: #if INTERRUPT_MASKED_DEBUG - if (!panic_stackshot) { + if (trace_flags & STACKSHOT_DO_COMPRESS) { + ml_spin_debug_clear_self(); + } +#if defined(STACKSHOT_INTERRUPTS_MASKED_CHECK_DISABLED) + ml_spin_debug_clear_self(); +#endif + + if (!panic_stackshot && interrupt_masked_debug) { /* * Try to catch instances where stackshot takes too long BEFORE returning from * the debugger */ - ml_check_interrupts_disabled_duration(current_thread()); + ml_check_stackshot_interrupt_disabled_duration(current_thread()); } #endif @@ -2271,29 +2665,12 @@ kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap) unsigned int pages_wanted; kern_return_t kErr; - processor_t processor; - vm_statistics64_t stat; - vm_statistics64_data_t host_vm_stat; - - processor = processor_list; - stat = &PROCESSOR_DATA(processor, vm_stat); - host_vm_stat = *stat; + uint64_t compressions = 0; + uint64_t decompressions = 0; - if (processor_count > 1) { - /* - * processor_list may be in the process of changing as we are - * attempting a stackshot. Ordinarily it will be lock protected, - * but it is not safe to lock in the context of the debugger. - * Fortunately we never remove elements from the processor list, - * and only add to to the end of the list, so we SHOULD be able - * to walk it. If we ever want to truly tear down processors, - * this will have to change. - */ - while ((processor = processor->processor_list) != NULL) { - stat = &PROCESSOR_DATA(processor, vm_stat); - host_vm_stat.compressions += stat->compressions; - host_vm_stat.decompressions += stat->decompressions; - } + percpu_foreach(stat, vm_stat) { + compressions += stat->compressions; + decompressions += stat->decompressions; } memio_snap->snapshot_magic = STACKSHOT_MEM_AND_IO_SNAPSHOT_MAGIC; @@ -2306,8 +2683,8 @@ kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap) memio_snap->throttled_pages = vm_page_throttled_count; memio_snap->busy_buffer_count = count_busy_buffers(); memio_snap->filebacked_pages = vm_page_pageable_external_count; - memio_snap->compressions = (uint32_t)host_vm_stat.compressions; - memio_snap->decompressions = (uint32_t)host_vm_stat.decompressions; + memio_snap->compressions = (uint32_t)compressions; + memio_snap->decompressions = (uint32_t)decompressions; memio_snap->compressor_size = VM_PAGE_COMPRESSOR_COUNT; kErr = mach_vm_pressure_monitor(FALSE, VM_PRESSURE_TIME_WINDOW, &pages_reclaimed, &pages_wanted); @@ -2325,7 +2702,7 @@ kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap) void stackshot_memcpy(void *dst, const void *src, size_t len) { -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) if (panic_stackshot) { uint8_t *dest_bytes = (uint8_t *)dst; const uint8_t *src_bytes = (const uint8_t *)src; @@ -2352,6 +2729,17 @@ stackshot_strlcpy(char *dst, const char *src, size_t maxlen) return srclen; } +static inline void +kdp_extract_page_mask_and_size(vm_map_t map, int *effective_page_mask, int *effective_page_size) +{ + if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) { + *effective_page_mask = VM_MAP_PAGE_MASK(map); + *effective_page_size = VM_MAP_PAGE_SIZE(map); + } else { + *effective_page_mask = PAGE_MASK; + *effective_page_size = PAGE_SIZE; + } +} /* * Returns the physical address of the specified map:target address, @@ -2363,11 +2751,14 @@ kdp_find_phys(vm_map_t map, vm_offset_t target_addr, boolean_t try_fault, uint32 vm_offset_t cur_phys_addr; unsigned cur_wimg_bits; uint64_t fault_start_time = 0; + int effective_page_mask, effective_page_size; if (map == VM_MAP_NULL) { return 0; } + kdp_extract_page_mask_and_size(map, &effective_page_mask, &effective_page_size); + cur_phys_addr = kdp_vtophys(map->pmap, target_addr); if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr))) { if (!try_fault || fault_stats.sfs_stopped_faulting) { @@ -2383,14 +2774,14 @@ kdp_find_phys(vm_map_t map, vm_offset_t target_addr, boolean_t try_fault, uint32 * vm map and try a lightweight fault. Update fault path usage stats. */ fault_start_time = mach_absolute_time(); - cur_phys_addr = kdp_lightweight_fault(map, (target_addr & ~PAGE_MASK)); + cur_phys_addr = kdp_lightweight_fault(map, (target_addr & ~effective_page_mask)); fault_stats.sfs_time_spent_faulting += (mach_absolute_time() - fault_start_time); if ((fault_stats.sfs_time_spent_faulting >= fault_stats.sfs_system_max_fault_time) && !panic_stackshot) { fault_stats.sfs_stopped_faulting = (uint8_t) TRUE; } - cur_phys_addr += (target_addr & PAGE_MASK); + cur_phys_addr += (target_addr & effective_page_mask); if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr))) { if (kdp_fault_results) { @@ -2433,20 +2824,24 @@ kdp_copyin_word( } } -int -kdp_copyin_string( +static int +kdp_copyin_string_slowpath( task_t task, uint64_t addr, char *buf, int buf_sz, boolean_t try_fault, uint32_t *kdp_fault_results) { int i; uint64_t validated = 0, valid_from; uint64_t phys_src, phys_dest; + int effective_page_mask, effective_page_size; + vm_map_t map = task->map; + + kdp_extract_page_mask_and_size(map, &effective_page_mask, &effective_page_size); for (i = 0; i < buf_sz; i++) { if (validated == 0) { valid_from = i; - phys_src = kdp_find_phys(task->map, addr + i, try_fault, kdp_fault_results); + phys_src = kdp_find_phys(map, addr + i, try_fault, kdp_fault_results); phys_dest = kvtophys((vm_offset_t)&buf[i]); - uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK); + uint64_t src_rem = effective_page_size - (phys_src & effective_page_mask); uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK); if (phys_src && phys_dest) { validated = MIN(src_rem, dst_rem); @@ -2473,13 +2868,35 @@ kdp_copyin_string( return -1; } +int +kdp_copyin_string( + task_t task, uint64_t addr, char *buf, int buf_sz, boolean_t try_fault, uint32_t *kdp_fault_results) +{ + /* try to opportunistically copyin 32 bytes, most strings should fit */ + char optbuffer[32]; + boolean_t res; + + bzero(optbuffer, sizeof(optbuffer)); + res = kdp_copyin(task->map, addr, optbuffer, sizeof(optbuffer), try_fault, kdp_fault_results); + if (res == FALSE || strnlen(optbuffer, sizeof(optbuffer)) == sizeof(optbuffer)) { + /* try the slowpath */ + return kdp_copyin_string_slowpath(task, addr, buf, buf_sz, try_fault, kdp_fault_results); + } + + /* success */ + return (int) strlcpy(buf, optbuffer, buf_sz) + 1; +} + boolean_t kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, boolean_t try_fault, uint32_t *kdp_fault_results) { size_t rem = size; char *kvaddr = dest; + int effective_page_mask, effective_page_size; + + kdp_extract_page_mask_and_size(map, &effective_page_mask, &effective_page_size); -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) /* Identify if destination buffer is in panic storage area */ if (panic_stackshot && ((vm_offset_t)dest >= gPanicBase) && ((vm_offset_t)dest < (gPanicBase + gPanicSize))) { if (((vm_offset_t)dest + size) > (gPanicBase + gPanicSize)) { @@ -2491,21 +2908,21 @@ kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, boolean_t try_ while (rem) { uint64_t phys_src = kdp_find_phys(map, uaddr, try_fault, kdp_fault_results); uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr); - uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK); + uint64_t src_rem = effective_page_size - (phys_src & effective_page_mask); uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK); size_t cur_size = (uint32_t) MIN(src_rem, dst_rem); cur_size = MIN(cur_size, rem); if (phys_src && phys_dest) { -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) /* - * On embedded the panic buffer is mapped as device memory and doesn't allow + * On arm devices the panic buffer is mapped as device memory and doesn't allow * unaligned accesses. To prevent these, we copy over bytes individually here. */ if (panic_stackshot) { stackshot_memcpy(kvaddr, (const void *)phystokv(phys_src), cur_size); } else -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ bcopy_phys(phys_src, phys_dest, cur_size); } else { break; @@ -2527,7 +2944,12 @@ do_stackshot(void *context) stack_snapshot_ret = kdp_stackshot_kcdata_format(stack_snapshot_pid, stack_snapshot_flags, - &stack_snapshot_bytes_traced); + &stack_snapshot_bytes_traced, + &stack_snapshot_bytes_uncompressed); + + if (stack_snapshot_ret == KERN_SUCCESS && stack_snapshot_flags & STACKSHOT_DO_COMPRESS) { + kcdata_finish_compression(stackshot_kcdata_p); + } kdp_snapshot--; return stack_snapshot_ret; @@ -2653,6 +3075,12 @@ stackshot_coalition_jetsam_snapshot(void *arg, int i, coalition_t coal) jcs->jcs_flags |= kCoalitionPrivileged; } +#if CONFIG_THREAD_GROUPS + struct thread_group *thread_group = kdp_coalition_get_thread_group(coal); + if (thread_group) { + jcs->jcs_thread_group = thread_group_get_id(thread_group); + } +#endif /* CONFIG_THREAD_GROUPS */ leader = kdp_coalition_get_leader(coal); if (leader) { @@ -2663,6 +3091,27 @@ stackshot_coalition_jetsam_snapshot(void *arg, int i, coalition_t coal) } #endif /* CONFIG_COALITIONS */ +#if CONFIG_THREAD_GROUPS +static void +stackshot_thread_group_count(void *arg, int i, struct thread_group *tg) +{ +#pragma unused(i, tg) + unsigned int *n = (unsigned int*)arg; + (*n)++; +} + +static void +stackshot_thread_group_snapshot(void *arg, int i, struct thread_group *tg) +{ + struct thread_group_snapshot_v2 *thread_groups = (struct thread_group_snapshot_v2 *)arg; + struct thread_group_snapshot_v2 *tgs = &thread_groups[i]; + uint64_t flags = kdp_thread_group_get_flags(tg); + tgs->tgs_id = thread_group_get_id(tg); + stackshot_memcpy(tgs->tgs_name, thread_group_get_name(tg), THREAD_GROUP_MAXNAME); + tgs->tgs_flags = ((flags & THREAD_GROUP_FLAGS_EFFICIENT) ? kThreadGroupEfficient : 0) | + ((flags & THREAD_GROUP_FLAGS_UI_APP) ? kThreadGroupUIApp : 0); +} +#endif /* CONFIG_THREAD_GROUPS */ /* Determine if a thread has waitinfo that stackshot can provide */ static int @@ -2753,6 +3202,9 @@ stackshot_thread_wait_owner_info(thread_t thread, thread_waitinfo_t *waitinfo) case kThreadWaitSleepWithInheritor: kdp_sleep_with_inheritor_find_owner(thread->waitq, thread->wait_event, waitinfo); break; + case kThreadWaitEventlink: + kdp_eventlink_find_owner(thread->waitq, thread->wait_event, waitinfo); + break; case kThreadWaitCompressor: kdp_compressor_busy_find_owner(thread->wait_event, waitinfo); break; diff --git a/osfmk/kern/kern_types.h b/osfmk/kern/kern_types.h index 3e9f12902..aee8081b1 100644 --- a/osfmk/kern/kern_types.h +++ b/osfmk/kern/kern_types.h @@ -57,7 +57,7 @@ typedef struct wait_queue *wait_queue_t; #define WAIT_QUEUE_NULL ((wait_queue_t) 0) #define SIZEOF_WAITQUEUE sizeof(struct wait_queue) -typedef vm_offset_t ipc_kobject_t; +typedef void * ipc_kobject_t; #define IKO_NULL ((ipc_kobject_t) 0) #endif /* KERNEL_PRIVATE */ @@ -276,16 +276,27 @@ typedef enum perfcontrol_event { } perfcontrol_event; /* - * Flags for the sched_perfcontrol_csw_t & sched_perfcontrol_state_update_t + * Flags for the sched_perfcontrol_csw_t, sched_perfcontrol_state_update_t + * & sched_perfcontrol_thread_group_blocked_t/sched_perfcontrol_thread_group_unblocked_t * callouts. * Currently defined flags are: - * PERFCONTROL_CALLOUT_WAKE_UNSAFE - Flag to indicate its unsafe to - * do a wakeup as part of this callout. If this is set, it - * indicates that the scheduler holds a spinlock which might be needed - * in the wakeup path. In that case CLPC should do a thread_call - * instead of a direct wakeup to run their workloop thread. + * + * PERFCONTROL_CALLOUT_WAKE_UNSAFE: Flag to indicate its unsafe to + * do a wakeup as part of this callout. If this is set, it + * indicates that the scheduler holds a spinlock which might be needed + * in the wakeup path. In that case CLPC should do a thread_call + * instead of a direct wakeup to run their workloop thread. + * + * PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER: Flag to indicate + * that the render server thread group is blocking/unblocking progress + * of another thread group. The render server thread group is well + * known to CLPC, so XNU simply passes this flag instead of taking + * a reference on it. It is illegal to pass both the TG identity and + * this flag in the callout; this flag should only be set with the + * blocking/unblocking TG being NULL. */ -#define PERFCONTROL_CALLOUT_WAKE_UNSAFE 0x1 +#define PERFCONTROL_CALLOUT_WAKE_UNSAFE (0x1) +#define PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER (0x2) /* * Enum to define the perfcontrol class for thread. @@ -310,8 +321,37 @@ typedef enum perfcontrol_class { PERFCONTROL_CLASS_UI = 7, /* Above UI Thread */ PERFCONTROL_CLASS_ABOVEUI = 8, + /* Maximum class */ + PERFCONTROL_CLASS_MAX = 9, } perfcontrol_class_t; +/* + * struct sched_clutch_edge + * + * Represents an edge from one cluster to another in the Edge Scheduler. + * An edge has the following properties: + * - Edge Weight: A value which indicates the likelihood of migrating threads + * across that edge. The actual unit of the edge weight is in (usecs) of + * scheduling delay. + * - Migration Allowed: Bit indicating if migrations are allowed across this + * edge from src to dst. + * - Steal Allowed: Bit indicating whether the dst cluster is allowed to steal + * across that edge when a processor in that cluster goes idle. + * + * These values can be modified by CLPC for better load balancing, thermal + * mitigations etc. + */ +typedef union sched_clutch_edge { + struct { + uint32_t + /* boolean_t */ sce_migration_allowed : 1, + /* boolean_t */ sce_steal_allowed : 1, + _reserved : 30; + uint32_t sce_migration_weight; + }; + uint64_t sce_edge_packed; +} sched_clutch_edge; + #endif /* KERNEL_PRIVATE */ #endif /* _KERN_KERN_TYPES_H_ */ diff --git a/osfmk/kern/kext_alloc.c b/osfmk/kern/kext_alloc.c index 3d5702d35..5a689341f 100644 --- a/osfmk/kern/kext_alloc.c +++ b/osfmk/kern/kext_alloc.c @@ -38,6 +38,7 @@ #include #include +#include #include #define KASLR_IOREG_DEBUG 0 @@ -60,6 +61,7 @@ static mach_vm_offset_t kext_post_boot_base = 0; * kernel's text segment. To ensure this happens, we snag 2GB of kernel VM * as early as possible for kext allocations. */ +__startup_func void kext_alloc_init(void) { @@ -131,6 +133,31 @@ kext_alloc_init(void) #endif /* CONFIG_KEXT_BASEMENT */ } +/* + * Get a vm addr in the kext submap where a kext + * collection of given size could be mapped. + */ +vm_offset_t +get_address_from_kext_map(vm_size_t fsize) +{ + vm_offset_t addr = 0; + kern_return_t ret; + + ret = kext_alloc(&addr, fsize, false); + assert(ret == KERN_SUCCESS); + + if (ret != KERN_SUCCESS) { + return 0; + } + + kext_free(addr, fsize); + + addr += VM_MAP_PAGE_SIZE(g_kext_map); + addr = vm_map_trunc_page(addr, + VM_MAP_PAGE_MASK(g_kext_map)); + return addr; +} + kern_return_t kext_alloc(vm_offset_t *_addr, vm_size_t size, boolean_t fixed) { @@ -143,6 +170,20 @@ kext_alloc(vm_offset_t *_addr, vm_size_t size, boolean_t fixed) int flags = (fixed) ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE; #if CONFIG_KEXT_BASEMENT + kc_format_t kcformat; + if (PE_get_primary_kc_format(&kcformat) && kcformat == KCFormatFileset) { + /* + * There is no need for a kext basement when booting with the + * new MH_FILESET format kext collection. + */ + rval = mach_vm_allocate_kernel(g_kext_map, &addr, size, flags, VM_KERN_MEMORY_KEXT); + if (rval != KERN_SUCCESS) { + printf("vm_allocate failed - %d\n", rval); + goto finish; + } + goto check_reachable; + } + /* Allocate the kext virtual memory * 10608884 - use mach_vm_map since we want VM_FLAGS_ANYWHERE allocated past * kext_post_boot_base (when possible). mach_vm_allocate will always @@ -167,6 +208,7 @@ kext_alloc(vm_offset_t *_addr, vm_size_t size, boolean_t fixed) printf("mach_vm_map failed - %d\n", rval); goto finish; } +check_reachable: #else rval = mach_vm_allocate_kernel(g_kext_map, &addr, size, flags, VM_KERN_MEMORY_KEXT); if (rval != KERN_SUCCESS) { @@ -200,3 +242,27 @@ kext_free(vm_offset_t addr, vm_size_t size) rval = mach_vm_deallocate(g_kext_map, addr, size); assert(rval == KERN_SUCCESS); } + +kern_return_t +kext_receipt(void **addrp, size_t *sizep) +{ + if (addrp == NULL || sizep == NULL) { + return KERN_FAILURE; + } + + kernel_mach_header_t *kc = PE_get_kc_header(KCKindAuxiliary); + if (kc == NULL) { + return KERN_FAILURE; + } + + size_t size; + void *addr = getsectdatafromheader(kc, + kReceiptInfoSegment, kAuxKCReceiptSection, &size); + if (addr == NULL) { + return KERN_FAILURE; + } + + *addrp = addr; + *sizep = size; + return KERN_SUCCESS; +} diff --git a/osfmk/kern/kext_alloc.h b/osfmk/kern/kext_alloc.h index 79bbbb8d3..a629bec2e 100644 --- a/osfmk/kern/kext_alloc.h +++ b/osfmk/kern/kext_alloc.h @@ -33,12 +33,16 @@ __BEGIN_DECLS +vm_offset_t get_address_from_kext_map(vm_size_t fsize); + void kext_alloc_init(void); kern_return_t kext_alloc(vm_offset_t *addr, vm_size_t size, boolean_t fixed); void kext_free(vm_offset_t addr, vm_size_t size); +kern_return_t kext_receipt(void **addrp, size_t *sizep); + __END_DECLS #endif /* _KEXT_ALLOC_H_ */ diff --git a/osfmk/kern/kpc.h b/osfmk/kern/kpc.h index b59a37b7d..743e0d2bd 100644 --- a/osfmk/kern/kpc.h +++ b/osfmk/kern/kpc.h @@ -96,15 +96,9 @@ extern bool kpc_supported; /* bootstrap */ extern void kpc_init(void); -/* common initialization */ -extern void kpc_common_init(void); - /* Architecture specific initialisation */ extern void kpc_arch_init(void); -/* Thread counting initialization */ -extern void kpc_thread_init(void); - /* Get the bitmask of available classes */ extern uint32_t kpc_get_classes(void); @@ -306,26 +300,35 @@ struct kpc_get_counters_remote { uint64_t *buf; }; -extern int kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf); -extern int kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf); -extern int kpc_get_fixed_counters(uint64_t *counterv); -extern int kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask); -extern boolean_t kpc_is_running_fixed(void); -extern boolean_t kpc_is_running_configurable(uint64_t pmc_mask); -extern uint32_t kpc_fixed_count(void); -extern uint32_t kpc_configurable_count(void); -extern uint32_t kpc_fixed_config_count(void); -extern uint32_t kpc_configurable_config_count(uint64_t pmc_mask); -extern uint32_t kpc_rawpmu_config_count(void); -extern int kpc_get_fixed_config(kpc_config_t *configv); -extern int kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask); -extern int kpc_get_rawpmu_config(kpc_config_t *configv); -extern uint64_t kpc_fixed_max(void); -extern uint64_t kpc_configurable_max(void); -extern int kpc_set_config_arch(struct kpc_config_remote *mp_config); -extern int kpc_set_period_arch(struct kpc_config_remote *mp_config); -extern void kpc_sample_kperf(uint32_t actionid); -extern int kpc_set_running_arch(struct kpc_running_remote *mp_config); +int kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf); +int kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf); +int kpc_get_fixed_counters(uint64_t *counterv); +int kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask); +boolean_t kpc_is_running_fixed(void); +boolean_t kpc_is_running_configurable(uint64_t pmc_mask); +uint32_t kpc_fixed_count(void); +uint32_t kpc_configurable_count(void); +uint32_t kpc_fixed_config_count(void); +uint32_t kpc_configurable_config_count(uint64_t pmc_mask); +uint32_t kpc_rawpmu_config_count(void); +int kpc_get_fixed_config(kpc_config_t *configv); +int kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask); +int kpc_get_rawpmu_config(kpc_config_t *configv); +uint64_t kpc_fixed_max(void); +uint64_t kpc_configurable_max(void); +int kpc_set_config_arch(struct kpc_config_remote *mp_config); +int kpc_set_period_arch(struct kpc_config_remote *mp_config); + +__options_decl(kperf_kpc_flags_t, uint16_t, { + KPC_KERNEL_PC = 0x01, + KPC_KERNEL_COUNTING = 0x02, + KPC_USER_COUNTING = 0x04, +}); + +void kpc_sample_kperf(uint32_t actionid, uint32_t counter, uint64_t config, + uint64_t count, uintptr_t pc, kperf_kpc_flags_t flags); + +int kpc_set_running_arch(struct kpc_running_remote *mp_config); /* diff --git a/osfmk/kern/kpc_common.c b/osfmk/kern/kpc_common.c index 1208bc2b8..d1f9fef21 100644 --- a/osfmk/kern/kpc_common.c +++ b/osfmk/kern/kpc_common.c @@ -52,9 +52,8 @@ uint32_t kpc_actionid[KPC_MAX_COUNTERS]; COUNTERBUF_SIZE_PER_CPU) /* locks */ -static lck_grp_attr_t *kpc_config_lckgrp_attr = NULL; -static lck_grp_t *kpc_config_lckgrp = NULL; -static lck_mtx_t kpc_config_lock; +static LCK_GRP_DECLARE(kpc_config_lckgrp, "kpc"); +static LCK_MTX_DECLARE(kpc_config_lock, &kpc_config_lckgrp); /* state specifying if all counters have been requested by kperf */ static boolean_t force_all_ctrs = FALSE; @@ -70,12 +69,19 @@ static bool kpc_calling_pm = false; boolean_t kpc_context_switch_active = FALSE; bool kpc_supported = true; -void -kpc_common_init(void) +static uint64_t * +kpc_percpu_alloc(void) { - kpc_config_lckgrp_attr = lck_grp_attr_alloc_init(); - kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr); - lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL); + return kheap_alloc_tag(KHEAP_DATA_BUFFERS, COUNTERBUF_SIZE_PER_CPU, + Z_WAITOK | Z_ZERO, VM_KERN_MEMORY_DIAG); +} + +static void +kpc_percpu_free(uint64_t *buf) +{ + if (buf) { + kheap_free(KHEAP_DATA_BUFFERS, buf, COUNTERBUF_SIZE_PER_CPU); + } } boolean_t @@ -98,24 +104,19 @@ kpc_register_cpu(struct cpu_data *cpu_data) * allocate the memory here. */ - if ((cpu_data->cpu_kpc_buf[0] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { + if ((cpu_data->cpu_kpc_buf[0] = kpc_percpu_alloc()) == NULL) { goto error; } - if ((cpu_data->cpu_kpc_buf[1] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { + if ((cpu_data->cpu_kpc_buf[1] = kpc_percpu_alloc()) == NULL) { goto error; } - if ((cpu_data->cpu_kpc_shadow = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { + if ((cpu_data->cpu_kpc_shadow = kpc_percpu_alloc()) == NULL) { goto error; } - if ((cpu_data->cpu_kpc_reload = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { + if ((cpu_data->cpu_kpc_reload = kpc_percpu_alloc()) == NULL) { goto error; } - memset(cpu_data->cpu_kpc_buf[0], 0, COUNTERBUF_SIZE_PER_CPU); - memset(cpu_data->cpu_kpc_buf[1], 0, COUNTERBUF_SIZE_PER_CPU); - memset(cpu_data->cpu_kpc_shadow, 0, COUNTERBUF_SIZE_PER_CPU); - memset(cpu_data->cpu_kpc_reload, 0, COUNTERBUF_SIZE_PER_CPU); - /* success */ return TRUE; @@ -129,19 +130,19 @@ kpc_unregister_cpu(struct cpu_data *cpu_data) { assert(cpu_data); if (cpu_data->cpu_kpc_buf[0] != NULL) { - kfree(cpu_data->cpu_kpc_buf[0], COUNTERBUF_SIZE_PER_CPU); + kpc_percpu_free(cpu_data->cpu_kpc_buf[0]); cpu_data->cpu_kpc_buf[0] = NULL; } if (cpu_data->cpu_kpc_buf[1] != NULL) { - kfree(cpu_data->cpu_kpc_buf[1], COUNTERBUF_SIZE_PER_CPU); + kpc_percpu_free(cpu_data->cpu_kpc_buf[1]); cpu_data->cpu_kpc_buf[1] = NULL; } if (cpu_data->cpu_kpc_shadow != NULL) { - kfree(cpu_data->cpu_kpc_shadow, COUNTERBUF_SIZE_PER_CPU); + kpc_percpu_free(cpu_data->cpu_kpc_shadow); cpu_data->cpu_kpc_shadow = NULL; } if (cpu_data->cpu_kpc_reload != NULL) { - kfree(cpu_data->cpu_kpc_reload, COUNTERBUF_SIZE_PER_CPU); + kpc_percpu_free(cpu_data->cpu_kpc_reload); cpu_data->cpu_kpc_reload = NULL; } } @@ -311,7 +312,7 @@ kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf) /* grab counters and CPU number as close as possible */ if (curcpu) { - *curcpu = current_processor()->cpu_id; + *curcpu = cpu_number(); } if (classes & KPC_CLASS_FIXED_MASK) { @@ -359,7 +360,7 @@ int kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes, int *curcpu, uint64_t *buf) { - int curcpu_id = current_processor()->cpu_id; + int curcpu_id = cpu_number(); uint32_t cfg_count = kpc_configurable_count(), offset = 0; uint64_t pmc_mask = 0ULL; boolean_t enabled; @@ -368,7 +369,7 @@ kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes, enabled = ml_set_interrupts_enabled(FALSE); - curcpu_id = current_processor()->cpu_id; + curcpu_id = cpu_number(); if (curcpu) { *curcpu = curcpu_id; } @@ -534,30 +535,27 @@ kpc_get_counterbuf_size(void) uint64_t * kpc_counterbuf_alloc(void) { - uint64_t *buf = NULL; - - buf = kalloc_tag(COUNTERBUF_SIZE, VM_KERN_MEMORY_DIAG); - if (buf) { - bzero(buf, COUNTERBUF_SIZE); - } - - return buf; + return kheap_alloc_tag(KHEAP_DATA_BUFFERS, COUNTERBUF_SIZE, + Z_WAITOK | Z_ZERO, VM_KERN_MEMORY_DIAG); } void kpc_counterbuf_free(uint64_t *buf) { if (buf) { - kfree(buf, COUNTERBUF_SIZE); + kheap_free(KHEAP_DATA_BUFFERS, buf, COUNTERBUF_SIZE); } } void -kpc_sample_kperf(uint32_t actionid) +kpc_sample_kperf(uint32_t actionid, uint32_t counter, uint64_t config, + uint64_t count, uintptr_t pc, kperf_kpc_flags_t flags) { struct kperf_sample sbuf; - BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START); + uint64_t desc = config | (uint64_t)counter << 32 | (uint64_t)flags << 48; + + BUF_DATA(PERF_KPC_HNDLR | DBG_FUNC_START, desc, count, pc); thread_t thread = current_thread(); task_t task = get_threadtask(thread); @@ -836,7 +834,7 @@ kpc_release_pm_counters(void) uint8_t kpc_popcount(uint64_t value) { - return __builtin_popcountll(value); + return (uint8_t)__builtin_popcountll(value); } uint64_t diff --git a/osfmk/kern/kpc_thread.c b/osfmk/kern/kpc_thread.c index a2f1fe6a0..b28a7e81c 100644 --- a/osfmk/kern/kpc_thread.c +++ b/osfmk/kern/kpc_thread.c @@ -57,17 +57,8 @@ boolean_t kpc_off_cpu_active = FALSE; static uint32_t kpc_thread_classes = 0; static uint32_t kpc_thread_classes_count = 0; -static lck_grp_attr_t *kpc_thread_lckgrp_attr = NULL; -static lck_grp_t *kpc_thread_lckgrp = NULL; -static lck_mtx_t kpc_thread_lock; - -void -kpc_thread_init(void) -{ - kpc_thread_lckgrp_attr = lck_grp_attr_alloc_init(); - kpc_thread_lckgrp = lck_grp_alloc_init("kpc", kpc_thread_lckgrp_attr); - lck_mtx_init(&kpc_thread_lock, kpc_thread_lckgrp, LCK_ATTR_NULL); -} +static LCK_GRP_DECLARE(kpc_thread_lckgrp, "kpc thread"); +static LCK_MTX_DECLARE(kpc_thread_lock, &kpc_thread_lckgrp); uint32_t kpc_get_thread_counting(void) diff --git a/osfmk/kern/ledger.c b/osfmk/kern/ledger.c index e905ee666..4f9606843 100644 --- a/osfmk/kern/ledger.c +++ b/osfmk/kern/ledger.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2018 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2010-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -96,7 +96,7 @@ struct entry_template { struct ledger_callback *et_callback; }; -lck_grp_t ledger_lck_grp; +LCK_GRP_DECLARE(ledger_lck_grp, "ledger"); os_refgrp_decl(static, ledger_refgrp, "ledger", NULL); /* @@ -174,12 +174,6 @@ nsecs_to_abstime(uint64_t nsecs) return abstime; } -void -ledger_init(void) -{ - lck_grp_init(&ledger_lck_grp, "ledger", LCK_GRP_ATTR_NULL); -} - ledger_template_t ledger_template_create(const char *name) { @@ -371,9 +365,7 @@ ledger_template_complete(ledger_template_t template) { size_t ledger_size; ledger_size = sizeof(struct ledger) + (template->lt_cnt * sizeof(struct ledger_entry)); - template->lt_zone = zinit(ledger_size, CONFIG_TASK_MAX * ledger_size, - ledger_size, - template->lt_name); + template->lt_zone = zone_create(template->lt_name, ledger_size, ZC_NONE); template->lt_initialized = true; } @@ -659,12 +651,12 @@ ledger_refill(uint64_t now, ledger_t ledger, int entry) due = balance; } - assertf(due >= 0, "now=%llu, ledger=%p, entry=%d, balance=%lld, due=%lld", now, ledger, entry, balance, due); - - OSAddAtomic64(due, &le->le_debit); - - assert(le->le_debit >= 0); - + if (due < 0 && (le->le_flags & LF_PANIC_ON_NEGATIVE)) { + assertf(due >= 0, "now=%llu, ledger=%p, entry=%d, balance=%lld, due=%lld", now, ledger, entry, balance, due); + } else { + OSAddAtomic64(due, &le->le_debit); + assert(le->le_debit >= 0); + } /* * If we've completely refilled the pool, set the refill time to now. * Otherwise set it to the time at which it last should have been @@ -1675,7 +1667,8 @@ ledger_template_info(void **buf, int *len) if (*len > l->l_size) { *len = l->l_size; } - lti = kalloc((*len) * sizeof(struct ledger_template_info)); + lti = kheap_alloc(KHEAP_DATA_BUFFERS, + (*len) * sizeof(struct ledger_template_info), Z_WAITOK); if (lti == NULL) { return ENOMEM; } @@ -1732,7 +1725,8 @@ ledger_get_task_entry_info_multiple(task_t task, void **buf, int *len) if (*len > l->l_size) { *len = l->l_size; } - lei = kalloc((*len) * sizeof(struct ledger_entry_info)); + lei = kheap_alloc(KHEAP_DATA_BUFFERS, + (*len) * sizeof(struct ledger_entry_info), Z_WAITOK); if (lei == NULL) { return ENOMEM; } diff --git a/osfmk/kern/ledger.h b/osfmk/kern/ledger.h index e3a2ec2e6..9fdc93f84 100644 --- a/osfmk/kern/ledger.h +++ b/osfmk/kern/ledger.h @@ -139,8 +139,6 @@ typedef struct ledger_template *ledger_template_t; typedef void (*ledger_callback_t)(int warning, const void * param0, const void *param1); -extern void ledger_init(void); - extern ledger_template_t ledger_template_create(const char *name); extern ledger_template_t ledger_template_copy(ledger_template_t template, const char *name); extern void ledger_template_dereference(ledger_template_t template); diff --git a/osfmk/kern/lock_group.h b/osfmk/kern/lock_group.h index e677ded1a..04cda5f18 100644 --- a/osfmk/kern/lock_group.h +++ b/osfmk/kern/lock_group.h @@ -31,17 +31,21 @@ #include #include -#define LCK_GRP_NULL (lck_grp_t *)NULL +__BEGIN_DECLS -typedef unsigned int lck_type_t; +#define LCK_GRP_NULL (lck_grp_t *)NULL -#define LCK_TYPE_SPIN 1 -#define LCK_TYPE_MTX 2 -#define LCK_TYPE_RW 3 +typedef enum lck_type { + LCK_TYPE_SPIN, + LCK_TYPE_MTX, + LCK_TYPE_RW, + LCK_TYPE_TICKET +} lck_type_t; #if XNU_KERNEL_PRIVATE - +#include #include + /* * Arguments wrapped in LCK_GRP_ARG() will be elided * when LOCK_STATS is not set. @@ -71,15 +75,18 @@ typedef struct _lck_grp_stat_ { typedef struct _lck_grp_stats_ { #if LOCK_STATS - lck_grp_stat_t lgss_spin_held; - lck_grp_stat_t lgss_spin_miss; - lck_grp_stat_t lgss_spin_spin; + lck_grp_stat_t lgss_spin_held; + lck_grp_stat_t lgss_spin_miss; + lck_grp_stat_t lgss_spin_spin; + lck_grp_stat_t lgss_ticket_held; + lck_grp_stat_t lgss_ticket_miss; + lck_grp_stat_t lgss_ticket_spin; #endif /* LOCK_STATS */ - lck_grp_stat_t lgss_mtx_held; - lck_grp_stat_t lgss_mtx_direct_wait; - lck_grp_stat_t lgss_mtx_miss; - lck_grp_stat_t lgss_mtx_wait; + lck_grp_stat_t lgss_mtx_held; + lck_grp_stat_t lgss_mtx_direct_wait; + lck_grp_stat_t lgss_mtx_miss; + lck_grp_stat_t lgss_mtx_wait; } lck_grp_stats_t; #define LCK_GRP_MAX_NAME 64 @@ -88,6 +95,7 @@ typedef struct _lck_grp_ { queue_chain_t lck_grp_link; os_refcnt_t lck_grp_refcnt; uint32_t lck_grp_spincnt; + uint32_t lck_grp_ticketcnt; uint32_t lck_grp_mtxcnt; uint32_t lck_grp_rwcnt; uint32_t lck_grp_attr; @@ -99,65 +107,107 @@ typedef struct _lck_grp_ { typedef struct _lck_grp_ lck_grp_t; #endif /* XNU_KERNEL_PRIVATE */ +#define LCK_GRP_ATTR_STAT 0x1 +#define LCK_GRP_ATTR_TIME_STAT 0x2 -#ifdef MACH_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE typedef struct _lck_grp_attr_ { uint32_t grp_attr_val; } lck_grp_attr_t; -extern lck_grp_attr_t LockDefaultGroupAttr; +struct lck_grp_attr_startup_spec { + lck_grp_attr_t *grp_attr; + uint32_t grp_attr_set_flags; + uint32_t grp_attr_clear_flags; +}; -#define LCK_GRP_ATTR_STAT 0x1 -#define LCK_GRP_ATTR_TIME_STAT 0x2 +struct lck_grp_startup_spec { + lck_grp_t *grp; + const char *grp_name; + lck_grp_attr_t *grp_attr; +}; + +extern void lck_grp_attr_startup_init( + struct lck_grp_attr_startup_spec *spec); + +extern void lck_grp_startup_init( + struct lck_grp_startup_spec *spec); + +/* + * Auto-initializing lock group declarations + * ----------------------------------------- + * + * Use LCK_GRP_DECLARE to declare an automatically initialized group. + * + * Unless you need to configure your lock groups in very specific ways, + * there is no point creating explicit lock group attributes. If however + * you do need to tune the group, then LCK_GRP_DECLARE_ATTR can be used + * and takes an extra lock group attr argument previously declared with + * LCK_GRP_ATTR_DECLARE. + */ +#define LCK_GRP_ATTR_DECLARE(var, set_flags, clear_flags) \ + SECURITY_READ_ONLY_LATE(lck_grp_attr_t) var; \ + static __startup_data struct lck_grp_attr_startup_spec \ + __startup_lck_grp_attr_spec_ ## var = { &var, set_flags, clear_flags }; \ + STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_SECOND, lck_grp_attr_startup_init, \ + &__startup_lck_grp_attr_spec_ ## var) + +#define LCK_GRP_DECLARE_ATTR(var, name, attr) \ + __PLACE_IN_SECTION("__DATA,__lock_grp") lck_grp_t var; \ + static __startup_data struct lck_grp_startup_spec \ + __startup_lck_grp_spec_ ## var = { &var, name, attr }; \ + STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_THIRD, lck_grp_startup_init, \ + &__startup_lck_grp_spec_ ## var) + +#define LCK_GRP_DECLARE(var, name) \ + LCK_GRP_DECLARE_ATTR(var, name, LCK_GRP_ATTR_NULL); #else typedef struct __lck_grp_attr__ lck_grp_attr_t; -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ #define LCK_GRP_ATTR_NULL (lck_grp_attr_t *)NULL -__BEGIN_DECLS - extern lck_grp_attr_t *lck_grp_attr_alloc_init( void); -extern void lck_grp_attr_setdefault( - lck_grp_attr_t *attr); +extern void lck_grp_attr_setdefault( + lck_grp_attr_t *attr); -extern void lck_grp_attr_setstat( - lck_grp_attr_t *attr); +extern void lck_grp_attr_setstat( + lck_grp_attr_t *attr); -extern void lck_grp_attr_free( - lck_grp_attr_t *attr); +extern void lck_grp_attr_free( + lck_grp_attr_t *attr); -extern lck_grp_t *lck_grp_alloc_init( +extern lck_grp_t *lck_grp_alloc_init( const char* grp_name, lck_grp_attr_t *attr); -extern void lck_grp_free( +extern void lck_grp_free( lck_grp_t *grp); -__END_DECLS - #ifdef MACH_KERNEL_PRIVATE -extern void lck_grp_init( +extern void lck_grp_init( lck_grp_t *grp, const char* grp_name, - lck_grp_attr_t *attr); + lck_grp_attr_t *attr); -extern void lck_grp_reference( +extern void lck_grp_reference( lck_grp_t *grp); -extern void lck_grp_deallocate( +extern void lck_grp_deallocate( lck_grp_t *grp); -extern void lck_grp_lckcnt_incr( +extern void lck_grp_lckcnt_incr( lck_grp_t *grp, lck_type_t lck_type); -extern void lck_grp_lckcnt_decr( +extern void lck_grp_lckcnt_decr( lck_grp_t *grp, lck_type_t lck_type); #endif /* MACH_KERNEL_PRIVATE */ +__END_DECLS + #endif /* _KERN_LOCK_GROUP_H */ diff --git a/osfmk/kern/lock_stat.h b/osfmk/kern/lock_stat.h index e89732566..0214122e5 100644 --- a/osfmk/kern/lock_stat.h +++ b/osfmk/kern/lock_stat.h @@ -54,64 +54,70 @@ /* * DTrace lockstat probe definitions * - * Spinlocks */ -#define LS_LCK_SPIN_LOCK_ACQUIRE 0 -#define LS_LCK_SPIN_LOCK_SPIN 1 -#define LS_LCK_SPIN_UNLOCK_RELEASE 2 -/* - * Mutexes can also have interlock-spin events, which are - * unique to our lock implementation. - */ -#define LS_LCK_MTX_LOCK_ACQUIRE 3 -#define LS_LCK_MTX_LOCK_BLOCK 5 -#define LS_LCK_MTX_LOCK_SPIN 6 -#define LS_LCK_MTX_LOCK_ILK_SPIN 7 -#define LS_LCK_MTX_TRY_LOCK_ACQUIRE 8 -#define LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE 9 -#define LS_LCK_MTX_UNLOCK_RELEASE 10 - -#define LS_LCK_MTX_LOCK_SPIN_ACQUIRE 39 -/* - * Provide a parallel set for indirect mutexes - */ -#define LS_LCK_MTX_EXT_LOCK_ACQUIRE 17 -#define LS_LCK_MTX_EXT_LOCK_BLOCK 18 -#define LS_LCK_MTX_EXT_LOCK_SPIN 19 -#define LS_LCK_MTX_EXT_LOCK_ILK_SPIN 20 -#define LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE 21 -#define LS_LCK_MTX_EXT_UNLOCK_RELEASE 22 - -/* - * Reader-writer locks support a blocking upgrade primitive, as - * well as the possibility of spinning on the interlock. - */ -#define LS_LCK_RW_LOCK_SHARED_ACQUIRE 23 -#define LS_LCK_RW_LOCK_SHARED_BLOCK 24 -#define LS_LCK_RW_LOCK_SHARED_SPIN 25 - -#define LS_LCK_RW_LOCK_EXCL_ACQUIRE 26 -#define LS_LCK_RW_LOCK_EXCL_BLOCK 27 -#define LS_LCK_RW_LOCK_EXCL_SPIN 28 - -#define LS_LCK_RW_DONE_RELEASE 29 - -#define LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE 30 -#define LS_LCK_RW_TRY_LOCK_SHARED_SPIN 31 - -#define LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE 32 -#define LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN 33 - -#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE 34 -#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN 35 -#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK 36 - -#define LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE 37 -#define LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN 38 - -#define LS_NPROBES 40 -#define LS_LCK_INVALID LS_NPROBES +enum lockstat_probe_id { + /* Spinlocks */ + LS_LCK_SPIN_LOCK_ACQUIRE, + LS_LCK_SPIN_LOCK_SPIN, + LS_LCK_SPIN_UNLOCK_RELEASE, + + /* + * Mutexes can also have interlock-spin events, which are + * unique to our lock implementation. + */ + LS_LCK_MTX_LOCK_ACQUIRE, + LS_LCK_MTX_LOCK_BLOCK, + LS_LCK_MTX_LOCK_SPIN, + LS_LCK_MTX_LOCK_ILK_SPIN, + LS_LCK_MTX_TRY_LOCK_ACQUIRE, + LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, + LS_LCK_MTX_UNLOCK_RELEASE, + LS_LCK_MTX_LOCK_SPIN_ACQUIRE, + + /* + * Provide a parallel set for indirect mutexes + */ + LS_LCK_MTX_EXT_LOCK_ACQUIRE, + LS_LCK_MTX_EXT_LOCK_BLOCK, + LS_LCK_MTX_EXT_LOCK_SPIN, + LS_LCK_MTX_EXT_LOCK_ILK_SPIN, + LS_LCK_MTX_EXT_UNLOCK_RELEASE, + + /* + * Reader-writer locks support a blocking upgrade primitive, as + * well as the possibility of spinning on the interlock. + */ + LS_LCK_RW_LOCK_SHARED_ACQUIRE, + LS_LCK_RW_LOCK_SHARED_BLOCK, + LS_LCK_RW_LOCK_SHARED_SPIN, + + LS_LCK_RW_LOCK_EXCL_ACQUIRE, + LS_LCK_RW_LOCK_EXCL_BLOCK, + LS_LCK_RW_LOCK_EXCL_SPIN, + + LS_LCK_RW_DONE_RELEASE, + + LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, + LS_LCK_RW_TRY_LOCK_SHARED_SPIN, + + LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, + LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN, + + LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, + LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, + LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, + + LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE, + LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN, + + /* Ticket lock */ + LS_LCK_TICKET_LOCK_ACQUIRE, + LS_LCK_TICKET_LOCK_RELEASE, + LS_LCK_TICKET_LOCK_SPIN, + + LS_NPROBES +}; #if CONFIG_DTRACE extern uint32_t lockstat_probemap[LS_NPROBES]; @@ -158,23 +164,27 @@ lck_grp_stat_disable(lck_grp_stat_t *stat) } #if MACH_KERNEL_PRIVATE -#if LOCK_STATS - static inline void lck_grp_inc_stats(lck_grp_t *grp, lck_grp_stat_t *stat) { +#pragma unused(grp) if (__improbable(stat->lgs_enablings)) { +#if ATOMIC_STAT_UPDATES uint64_t val = os_atomic_inc_orig(&stat->lgs_count, relaxed); -#if CONFIG_DTRACE +#else + uint64_t val = stat->lgs_count++; +#endif /* ATOMIC_STAT_UPDATES */ +#if CONFIG_DTRACE && LOCK_STATS if (__improbable(stat->lgs_limit && (val % (stat->lgs_limit)) == 0)) { lockprof_invoke(grp, stat, val); } #else #pragma unused(val) -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE && LOCK_STATS */ } } +#if LOCK_STATS static inline void lck_grp_inc_time_stats(lck_grp_t *grp, lck_grp_stat_t *stat, uint64_t time) { @@ -259,17 +269,68 @@ lck_grp_spin_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp)) return enabled; } -static void inline -lck_grp_mtx_inc_stats( - uint64_t* stat) +static inline void +lck_grp_ticket_update_held(void *lock LCK_GRP_ARG(lck_grp_t *grp)) { -#if ATOMIC_STAT_UPDATES - os_atomic_inc(stat, relaxed); -#else - *stat = (*stat)++; -#endif /* ATOMIC_STAT_UPDATES */ +#pragma unused(lock) +#if CONFIG_DTRACE + LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_ACQUIRE, lock, (uintptr_t)LCK_GRP_PROBEARG(grp)); +#endif +#if LOCK_STATS + if (!grp) { + return; + } + lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_ticket_held; + lck_grp_inc_stats(grp, stat); +#endif /* LOCK_STATS */ +} + +static inline void +lck_grp_ticket_update_miss(void *lock LCK_GRP_ARG(lck_grp_t *grp)) +{ +#pragma unused(lock) +#if LOCK_STATS + if (!grp) { + return; + } + lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_ticket_miss; + lck_grp_inc_stats(grp, stat); +#endif /* LOCK_STATS */ +} + +static inline boolean_t +lck_grp_ticket_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp)) +{ +#pragma unused(lock) + boolean_t enabled = FALSE; +#if CONFIG_DTRACE + enabled |= lockstat_probemap[LS_LCK_TICKET_LOCK_SPIN] != 0; +#endif /* CONFIG_DTRACE */ +#if LOCK_STATS + enabled |= (grp && grp->lck_grp_stats.lgss_ticket_spin.lgs_enablings); +#endif /* LOCK_STATS */ + return enabled; +} + +static inline void +lck_grp_ticket_update_spin(void *lock LCK_GRP_ARG(lck_grp_t *grp), uint64_t time) +{ +#pragma unused(lock, time) +#if CONFIG_DTRACE + if (time > dtrace_spin_threshold) { + LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_SPIN, lock, time LCK_GRP_ARG((uintptr_t)grp)); + } +#endif /* CONFIG_DTRACE */ +#if LOCK_STATS + if (!grp) { + return; + } + lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_ticket_spin; + lck_grp_inc_time_stats(grp, stat, time); +#endif /* LOCK_STATS */ } + static void inline lck_grp_mtx_update_miss( struct _lck_mtx_ext_ *lock, @@ -279,8 +340,9 @@ lck_grp_mtx_update_miss( #if LOG_FIRST_MISS_ALONE if ((*first_miss & 1) == 0) { #endif /* LOG_FIRST_MISS_ALONE */ - uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_miss.lgs_count; - lck_grp_mtx_inc_stats(stat); + lck_grp_t *grp = lock->lck_mtx_grp; + lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_miss; + lck_grp_inc_stats(grp, stat); #if LOG_FIRST_MISS_ALONE *first_miss |= 1; @@ -292,8 +354,9 @@ static void inline lck_grp_mtx_update_direct_wait( struct _lck_mtx_ext_ *lock) { - uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_direct_wait.lgs_count; - lck_grp_mtx_inc_stats(stat); + lck_grp_t *grp = lock->lck_mtx_grp; + lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_direct_wait; + lck_grp_inc_stats(grp, stat); } static void inline @@ -305,9 +368,9 @@ lck_grp_mtx_update_wait( #if LOG_FIRST_MISS_ALONE if ((*first_miss & 2) == 0) { #endif /* LOG_FIRST_MISS_ALONE */ - uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_wait.lgs_count; - lck_grp_mtx_inc_stats(stat); - + lck_grp_t *grp = lock->lck_mtx_grp; + lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_wait; + lck_grp_inc_stats(grp, stat); #if LOG_FIRST_MISS_ALONE *first_miss |= 2; } @@ -318,8 +381,10 @@ static void inline lck_grp_mtx_update_held( struct _lck_mtx_ext_ *lock) { - uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_held.lgs_count; - lck_grp_mtx_inc_stats(stat); + lck_grp_t *grp = lock->lck_mtx_grp; + lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_mtx_held; + lck_grp_inc_stats(grp, stat); } + #endif /* MACH_KERNEL_PRIVATE */ #endif /* _KERN_LOCKSTAT_H */ diff --git a/osfmk/kern/locks.c b/osfmk/kern/locks.c index 78aee369c..aa28feb62 100644 --- a/osfmk/kern/locks.c +++ b/osfmk/kern/locks.c @@ -66,7 +66,7 @@ #include #include #include -#include +#include #include #include #include @@ -103,11 +103,23 @@ static lck_mtx_ext_t lck_grp_lock_ext; SECURITY_READ_ONLY_LATE(boolean_t) spinlock_timeout_panic = TRUE; +/* Obtain "lcks" options:this currently controls lock statistics */ +TUNABLE(uint32_t, LcksOpts, "lcks", 0); + +ZONE_VIEW_DEFINE(ZV_LCK_GRP_ATTR, "lck_grp_attr", + KHEAP_ID_DEFAULT, sizeof(lck_grp_attr_t)); + +ZONE_VIEW_DEFINE(ZV_LCK_GRP, "lck_grp", + KHEAP_ID_DEFAULT, sizeof(lck_grp_t)); + +ZONE_VIEW_DEFINE(ZV_LCK_ATTR, "lck_attr", + KHEAP_ID_DEFAULT, sizeof(lck_attr_t)); + lck_grp_attr_t LockDefaultGroupAttr; -lck_grp_t LockCompatGroup; -lck_attr_t LockDefaultLckAttr; +lck_grp_t LockCompatGroup; +lck_attr_t LockDefaultLckAttr; -#if CONFIG_DTRACE && __SMP__ +#if CONFIG_DTRACE #if defined (__x86_64__) uint64_t dtrace_spin_threshold = 500; // 500ns #elif defined(__arm__) || defined(__arm64__) @@ -125,28 +137,10 @@ unslide_for_kdebug(void* object) } } -/* - * Routine: lck_mod_init - */ - -void -lck_mod_init( - void) +__startup_func +static void +lck_mod_init(void) { - /* - * Obtain "lcks" options:this currently controls lock statistics - */ - if (!PE_parse_boot_argn("lcks", &LcksOpts, sizeof(LcksOpts))) { - LcksOpts = 0; - } - - -#if (DEVELOPMENT || DEBUG) && defined(__x86_64__) - if (!PE_parse_boot_argn("-disable_mtx_chk", &LckDisablePreemptCheck, sizeof(LckDisablePreemptCheck))) { - LckDisablePreemptCheck = 0; - } -#endif /* (DEVELOPMENT || DEBUG) && defined(__x86_64__) */ - queue_init(&lck_grp_queue); /* @@ -158,6 +152,7 @@ lck_mod_init( (void) strncpy(LockCompatGroup.lck_grp_name, "Compatibility APIs", LCK_GRP_MAX_NAME); LockCompatGroup.lck_grp_attr = LCK_ATTR_NONE; + if (LcksOpts & enaLkStat) { LockCompatGroup.lck_grp_attr |= LCK_GRP_ATTR_STAT; } @@ -175,6 +170,7 @@ lck_mod_init( lck_mtx_init_ext(&lck_grp_lock, &lck_grp_lock_ext, &LockCompatGroup, &LockDefaultLckAttr); } +STARTUP(LOCKS_EARLY, STARTUP_RANK_FIRST, lck_mod_init); /* * Routine: lck_grp_attr_alloc_init @@ -186,10 +182,8 @@ lck_grp_attr_alloc_init( { lck_grp_attr_t *attr; - if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0) { - lck_grp_attr_setdefault(attr); - } - + attr = zalloc(ZV_LCK_GRP_ATTR); + lck_grp_attr_setdefault(attr); return attr; } @@ -218,6 +212,7 @@ void lck_grp_attr_setstat( lck_grp_attr_t *attr) { +#pragma unused(attr) os_atomic_or(&attr->grp_attr_val, LCK_GRP_ATTR_STAT, relaxed); } @@ -230,7 +225,7 @@ void lck_grp_attr_free( lck_grp_attr_t *attr) { - kfree(attr, sizeof(lck_grp_attr_t)); + zfree(ZV_LCK_GRP_ATTR, attr); } @@ -245,10 +240,8 @@ lck_grp_alloc_init( { lck_grp_t *grp; - if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0) { - lck_grp_init(grp, grp_name, attr); - } - + grp = zalloc(ZV_LCK_GRP); + lck_grp_init(grp, grp_name, attr); return grp; } @@ -289,8 +282,9 @@ lck_grp_init(lck_grp_t * grp, const char * grp_name, lck_grp_attr_t * attr) lck_grp_stat_enable(&stats->lgss_mtx_held); lck_grp_stat_enable(&stats->lgss_mtx_miss); lck_grp_stat_enable(&stats->lgss_mtx_direct_wait); + lck_grp_stat_enable(&stats->lgss_mtx_wait); } - if (grp->lck_grp_attr * LCK_GRP_ATTR_TIME_STAT) { + if (grp->lck_grp_attr & LCK_GRP_ATTR_TIME_STAT) { #if LOCK_STATS lck_grp_stats_t *stats = &grp->lck_grp_stats; lck_grp_stat_enable(&stats->lgss_spin_spin); @@ -345,7 +339,7 @@ lck_grp_deallocate( return; } - kfree(grp, sizeof(lck_grp_t)); + zfree(ZV_LCK_GRP, grp); } /* @@ -369,6 +363,9 @@ lck_grp_lckcnt_incr( case LCK_TYPE_RW: lckcnt = &grp->lck_grp_rwcnt; break; + case LCK_TYPE_TICKET: + lckcnt = &grp->lck_grp_ticketcnt; + break; default: return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type); } @@ -398,6 +395,9 @@ lck_grp_lckcnt_decr( case LCK_TYPE_RW: lckcnt = &grp->lck_grp_rwcnt; break; + case LCK_TYPE_TICKET: + lckcnt = &grp->lck_grp_ticketcnt; + break; default: panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type); return; @@ -417,10 +417,8 @@ lck_attr_alloc_init( { lck_attr_t *attr; - if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0) { - lck_attr_setdefault(attr); - } - + attr = zalloc(ZV_LCK_ATTR); + lck_attr_setdefault(attr); return attr; } @@ -491,7 +489,7 @@ void lck_attr_free( lck_attr_t *attr) { - kfree(attr, sizeof(lck_attr_t)); + zfree(ZV_LCK_ATTR, attr); } /* @@ -505,7 +503,6 @@ hw_lock_init(hw_lock_t lock) ordered_store_hw(lock, 0); } -#if __SMP__ static inline bool hw_lock_trylock_contended(hw_lock_t lock, uintptr_t newval) { @@ -590,12 +587,10 @@ hw_lock_lock_contended(hw_lock_t lock, uintptr_t data, uint64_t timeout, boolean } return 0; } -#endif // __SMP__ void * hw_wait_while_equals(void **address, void *current) { -#if __SMP__ void *v; uint64_t end = 0; @@ -622,10 +617,6 @@ hw_wait_while_equals(void **address, void *current) panic("Wait while equals timeout @ *%p == %p", address, v); } } -#else // !__SMP__ - panic("Value at %p is %p", address, current); - __builtin_unreachable(); -#endif // !__SMP__ } static inline void @@ -634,7 +625,6 @@ hw_lock_lock_internal(hw_lock_t lock, thread_t thread LCK_GRP_ARG(lck_grp_t *grp uintptr_t state; state = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK; -#if __SMP__ #if LOCK_PRETEST if (ordered_load_hw(lock)) { goto contended; @@ -648,12 +638,6 @@ contended: #endif // LOCK_PRETEST hw_lock_lock_contended(lock, state, 0, spinlock_timeout_panic LCK_GRP_ARG(grp)); end: -#else // __SMP__ - if (lock->lock_data) { - panic("Spinlock held %p", lock); - } - lock->lock_data = state; -#endif // __SMP__ lck_grp_spin_update_held(lock LCK_GRP_ARG(grp)); return; @@ -706,7 +690,6 @@ int thread = current_thread(); disable_preemption_for_thread(thread); state = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK; -#if __SMP__ #if LOCK_PRETEST if (ordered_load_hw(lock)) { goto contended; @@ -721,13 +704,6 @@ contended: #endif // LOCK_PRETEST success = hw_lock_lock_contended(lock, state, timeout, FALSE LCK_GRP_ARG(grp)); end: -#else // __SMP__ - (void)timeout; - if (ordered_load_hw(lock) == 0) { - ordered_store_hw(lock, state); - success = 1; - } -#endif // __SMP__ if (success) { lck_grp_spin_update_held(lock LCK_GRP_ARG(grp)); } @@ -744,7 +720,6 @@ hw_lock_try_internal(hw_lock_t lock, thread_t thread LCK_GRP_ARG(lck_grp_t *grp) { int success = 0; -#if __SMP__ #if LOCK_PRETEST if (ordered_load_hw(lock)) { goto failed; @@ -752,12 +727,6 @@ hw_lock_try_internal(hw_lock_t lock, thread_t thread LCK_GRP_ARG(lck_grp_t *grp) #endif // LOCK_PRETEST success = os_atomic_cmpxchg(&lock->lock_data, 0, LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK, acquire); -#else - if (lock->lock_data == 0) { - lock->lock_data = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK; - success = 1; - } -#endif // __SMP__ #if LOCK_PRETEST failed: @@ -836,34 +805,20 @@ hw_lock_held(hw_lock_t lock) return ordered_load_hw(lock) != 0; } -#if __SMP__ static unsigned int hw_lock_bit_to_contended(hw_lock_bit_t *lock, uint32_t mask, uint32_t timeout LCK_GRP_ARG(lck_grp_t *grp)); -#endif static inline unsigned int hw_lock_bit_to_internal(hw_lock_bit_t *lock, unsigned int bit, uint32_t timeout LCK_GRP_ARG(lck_grp_t *grp)) { unsigned int success = 0; uint32_t mask = (1 << bit); -#if !__SMP__ - uint32_t state; -#endif -#if __SMP__ if (__improbable(!hw_atomic_test_and_set32(lock, mask, mask, memory_order_acquire, FALSE))) { success = hw_lock_bit_to_contended(lock, mask, timeout LCK_GRP_ARG(grp)); } else { success = 1; } -#else // __SMP__ - (void)timeout; - state = ordered_load_bit(lock); - if (!(mask & state)) { - ordered_store_bit(lock, state | mask); - success = 1; - } -#endif // __SMP__ if (success) { lck_grp_spin_update_held(lock LCK_GRP_ARG(grp)); @@ -880,7 +835,6 @@ int return hw_lock_bit_to_internal(lock, bit, timeout LCK_GRP_ARG(grp)); } -#if __SMP__ static unsigned int NOINLINE hw_lock_bit_to_contended(hw_lock_bit_t *lock, uint32_t mask, uint32_t timeout LCK_GRP_ARG(lck_grp_t *grp)) { @@ -921,7 +875,6 @@ end: return 1; } -#endif // __SMP__ void (hw_lock_bit)(hw_lock_bit_t * lock, unsigned int bit LCK_GRP_ARG(lck_grp_t *grp)) @@ -929,11 +882,7 @@ void if (hw_lock_bit_to(lock, bit, LOCK_PANIC_TIMEOUT, LCK_GRP_PROBEARG(grp))) { return; } -#if __SMP__ panic("hw_lock_bit(): timed out (%p)", lock); -#else - panic("hw_lock_bit(): interlock held (%p)", lock); -#endif } void @@ -945,11 +894,7 @@ void if (hw_lock_bit_to_internal(lock, bit, LOCK_PANIC_TIMEOUT LCK_GRP_ARG(grp))) { return; } -#if __SMP__ panic("hw_lock_bit_nopreempt(): timed out (%p)", lock); -#else - panic("hw_lock_bit_nopreempt(): interlock held (%p)", lock); -#endif } unsigned @@ -957,22 +902,11 @@ int (hw_lock_bit_try)(hw_lock_bit_t * lock, unsigned int bit LCK_GRP_ARG(lck_grp_t *grp)) { uint32_t mask = (1 << bit); -#if !__SMP__ - uint32_t state; -#endif boolean_t success = FALSE; _disable_preemption(); -#if __SMP__ // TODO: consider weak (non-looping) atomic test-and-set success = hw_atomic_test_and_set32(lock, mask, mask, memory_order_acquire, FALSE); -#else - state = ordered_load_bit(lock); - if (!(mask & state)) { - ordered_store_bit(lock, state | mask); - success = TRUE; - } -#endif // __SMP__ if (!success) { _enable_preemption(); } @@ -988,19 +922,11 @@ static inline void hw_unlock_bit_internal(hw_lock_bit_t *lock, unsigned int bit) { uint32_t mask = (1 << bit); -#if !__SMP__ - uint32_t state; -#endif -#if __SMP__ os_atomic_andnot(lock, mask, release); #if __arm__ set_event(); #endif -#else // __SMP__ - state = ordered_load_bit(lock); - ordered_store_bit(lock, state & ~mask); -#endif // __SMP__ #if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_SPIN_UNLOCK_RELEASE, lock, bit); #endif @@ -3251,3 +3177,64 @@ lck_mtx_gate_assert(__assert_only lck_mtx_t *lock, gate_t *gate, int flags) gate_assert(gate, flags); } + +#pragma mark - LCK_*_DECLARE support + +__startup_func +void +lck_grp_attr_startup_init(struct lck_grp_attr_startup_spec *sp) +{ + lck_grp_attr_t *attr = sp->grp_attr; + lck_grp_attr_setdefault(attr); + attr->grp_attr_val |= sp->grp_attr_set_flags; + attr->grp_attr_val &= ~sp->grp_attr_clear_flags; +} + +__startup_func +void +lck_grp_startup_init(struct lck_grp_startup_spec *sp) +{ + lck_grp_init(sp->grp, sp->grp_name, sp->grp_attr); +} + +__startup_func +void +lck_attr_startup_init(struct lck_attr_startup_spec *sp) +{ + lck_attr_t *attr = sp->lck_attr; + lck_attr_setdefault(attr); + attr->lck_attr_val |= sp->lck_attr_set_flags; + attr->lck_attr_val &= ~sp->lck_attr_clear_flags; +} + +__startup_func +void +lck_spin_startup_init(struct lck_spin_startup_spec *sp) +{ + lck_spin_init(sp->lck, sp->lck_grp, sp->lck_attr); +} + +__startup_func +void +lck_mtx_startup_init(struct lck_mtx_startup_spec *sp) +{ + if (sp->lck_ext) { + lck_mtx_init_ext(sp->lck, sp->lck_ext, sp->lck_grp, sp->lck_attr); + } else { + lck_mtx_init(sp->lck, sp->lck_grp, sp->lck_attr); + } +} + +__startup_func +void +lck_rw_startup_init(struct lck_rw_startup_spec *sp) +{ + lck_rw_init(sp->lck, sp->lck_grp, sp->lck_attr); +} + +__startup_func +void +usimple_lock_startup_init(struct usimple_lock_startup_spec *sp) +{ + simple_lock_init(sp->lck, sp->lck_init_arg); +} diff --git a/osfmk/kern/locks.h b/osfmk/kern/locks.h index 51c1da4c1..e8475383c 100644 --- a/osfmk/kern/locks.h +++ b/osfmk/kern/locks.h @@ -29,21 +29,15 @@ #ifndef _KERN_LOCKS_H_ #define _KERN_LOCKS_H_ -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include -#ifdef MACH_KERNEL_PRIVATE -#include - -extern void lck_mod_init( - void); - -#endif +__BEGIN_DECLS typedef unsigned int lck_sleep_action_t; @@ -63,40 +57,44 @@ typedef unsigned int lck_wake_action_t; #define LCK_WAKE_DEFAULT 0x00 /* If waiters are present, transfer their push to the wokenup thread */ #define LCK_WAKE_DO_NOT_TRANSFER_PUSH 0x01 /* Do not transfer waiters push when waking up */ -#ifdef MACH_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE +#include + typedef struct _lck_attr_ { unsigned int lck_attr_val; } lck_attr_t; extern lck_attr_t LockDefaultLckAttr; -#define LCK_ATTR_NONE 0 - -#define LCK_ATTR_DEBUG 0x00000001 +#define LCK_ATTR_NONE 0 +#define LCK_ATTR_DEBUG 0x00000001 #define LCK_ATTR_RW_SHARED_PRIORITY 0x00010000 - -#else +#else /* !XNU_KERNEL_PRIVATE */ typedef struct __lck_attr__ lck_attr_t; -#endif +#endif /* !XNU_KERNEL_PRIVATE */ #define LCK_ATTR_NULL (lck_attr_t *)NULL -__BEGIN_DECLS - -extern lck_attr_t *lck_attr_alloc_init( - void); +extern lck_attr_t *lck_attr_alloc_init(void); -extern void lck_attr_setdefault( +extern void lck_attr_setdefault( lck_attr_t *attr); -extern void lck_attr_setdebug( +extern void lck_attr_setdebug( lck_attr_t *attr); -extern void lck_attr_cleardebug( +extern void lck_attr_cleardebug( lck_attr_t *attr); #ifdef XNU_KERNEL_PRIVATE +#if __x86_64__ +/* + * Extended mutexes are only implemented on x86_64 + */ +#define HAS_EXT_MUTEXES 1 +#endif /* __x86_64__ */ + typedef union { uint16_t tcurnext; struct { @@ -110,127 +108,148 @@ typedef struct { uintptr_t lck_owner; } lck_ticket_t; -void lck_ticket_init(lck_ticket_t *tlock); +void lck_ticket_init(lck_ticket_t *tlock, lck_grp_t *grp); + +#if LOCK_STATS +void lck_ticket_lock(lck_ticket_t *tlock, lck_grp_t *grp); +#else void lck_ticket_lock(lck_ticket_t *tlock); +#define lck_ticket_lock(tlock, grp) lck_ticket_lock(tlock) +#endif /* LOCK_STATS */ + void lck_ticket_unlock(lck_ticket_t *tlock); void lck_ticket_assert_owned(lck_ticket_t *tlock); -extern void lck_attr_rw_shared_priority( +extern void lck_attr_rw_shared_priority( lck_attr_t *attr); #endif -extern void lck_attr_free( +extern void lck_attr_free( lck_attr_t *attr); #define decl_lck_spin_data(class, name) class lck_spin_t name -extern lck_spin_t *lck_spin_alloc_init( +extern lck_spin_t *lck_spin_alloc_init( lck_grp_t *grp, lck_attr_t *attr); -extern void lck_spin_init( +extern void lck_spin_init( lck_spin_t *lck, lck_grp_t *grp, lck_attr_t *attr); -extern void lck_spin_lock( +extern void lck_spin_lock( lck_spin_t *lck); -extern void lck_spin_lock_grp( +extern void lck_spin_lock_grp( lck_spin_t *lck, lck_grp_t *grp); -extern void lck_spin_unlock( +extern void lck_spin_unlock( lck_spin_t *lck); -extern void lck_spin_destroy( +extern void lck_spin_destroy( lck_spin_t *lck, lck_grp_t *grp); -extern void lck_spin_free( +extern void lck_spin_free( lck_spin_t *lck, lck_grp_t *grp); extern wait_result_t lck_spin_sleep( - lck_spin_t *lck, + lck_spin_t *lck, lck_sleep_action_t lck_sleep_action, - event_t event, + event_t event, wait_interrupt_t interruptible); extern wait_result_t lck_spin_sleep_grp( - lck_spin_t *lck, + lck_spin_t *lck, lck_sleep_action_t lck_sleep_action, - event_t event, + event_t event, wait_interrupt_t interruptible, lck_grp_t *grp); extern wait_result_t lck_spin_sleep_deadline( - lck_spin_t *lck, + lck_spin_t *lck, lck_sleep_action_t lck_sleep_action, - event_t event, + event_t event, wait_interrupt_t interruptible, - uint64_t deadline); + uint64_t deadline); #ifdef KERNEL_PRIVATE -extern void lck_spin_lock_nopreempt( lck_spin_t *lck); -extern void lck_spin_lock_nopreempt_grp( lck_spin_t *lck, lck_grp_t *grp); +extern void lck_spin_lock_nopreempt( + lck_spin_t *lck); + +extern void lck_spin_lock_nopreempt_grp( + lck_spin_t *lck, lck_grp_t *grp); -extern void lck_spin_unlock_nopreempt( lck_spin_t *lck); +extern void lck_spin_unlock_nopreempt( + lck_spin_t *lck); + +extern boolean_t lck_spin_try_lock_grp( + lck_spin_t *lck, + lck_grp_t *grp); -extern boolean_t lck_spin_try_lock_grp( lck_spin_t *lck, lck_grp_t *grp); +extern boolean_t lck_spin_try_lock( + lck_spin_t *lck); -extern boolean_t lck_spin_try_lock( lck_spin_t *lck); +extern boolean_t lck_spin_try_lock_nopreempt( + lck_spin_t *lck); -extern boolean_t lck_spin_try_lock_nopreempt( lck_spin_t *lck); -extern boolean_t lck_spin_try_lock_nopreempt_grp( lck_spin_t *lck, lck_grp_t *grp); +extern boolean_t lck_spin_try_lock_nopreempt_grp( + lck_spin_t *lck, + lck_grp_t *grp); /* NOT SAFE: To be used only by kernel debugger to avoid deadlock. */ -extern boolean_t kdp_lck_spin_is_acquired( lck_spin_t *lck); +extern boolean_t kdp_lck_spin_is_acquired( + lck_spin_t *lck); struct _lck_mtx_ext_; -extern void lck_mtx_init_ext(lck_mtx_t *lck, struct _lck_mtx_ext_ *lck_ext, - lck_grp_t *grp, lck_attr_t *attr); +extern void lck_mtx_init_ext( + lck_mtx_t *lck, + struct _lck_mtx_ext_ *lck_ext, + lck_grp_t *grp, + lck_attr_t *attr); #endif - #define decl_lck_mtx_data(class, name) class lck_mtx_t name -extern lck_mtx_t *lck_mtx_alloc_init( +extern lck_mtx_t *lck_mtx_alloc_init( lck_grp_t *grp, lck_attr_t *attr); -extern void lck_mtx_init( +extern void lck_mtx_init( lck_mtx_t *lck, lck_grp_t *grp, lck_attr_t *attr); -extern void lck_mtx_lock( +extern void lck_mtx_lock( lck_mtx_t *lck); -extern void lck_mtx_unlock( +extern void lck_mtx_unlock( lck_mtx_t *lck); -extern void lck_mtx_destroy( +extern void lck_mtx_destroy( lck_mtx_t *lck, lck_grp_t *grp); -extern void lck_mtx_free( +extern void lck_mtx_free( lck_mtx_t *lck, lck_grp_t *grp); extern wait_result_t lck_mtx_sleep( - lck_mtx_t *lck, + lck_mtx_t *lck, lck_sleep_action_t lck_sleep_action, - event_t event, + event_t event, wait_interrupt_t interruptible); extern wait_result_t lck_mtx_sleep_deadline( - lck_mtx_t *lck, + lck_mtx_t *lck, lck_sleep_action_t lck_sleep_action, - event_t event, + event_t event, wait_interrupt_t interruptible, - uint64_t deadline); + uint64_t deadline); #ifdef KERNEL_PRIVATE /* @@ -822,46 +841,46 @@ extern int lck_mtx_test_mtx_contended_loop_time(int iter, char* buf #endif #ifdef KERNEL_PRIVATE -extern boolean_t lck_mtx_try_lock( +extern boolean_t lck_mtx_try_lock( lck_mtx_t *lck); -extern void mutex_pause(uint32_t); +extern void mutex_pause(uint32_t); -extern void lck_mtx_yield( +extern void lck_mtx_yield( lck_mtx_t *lck); -extern boolean_t lck_mtx_try_lock_spin( +extern boolean_t lck_mtx_try_lock_spin( lck_mtx_t *lck); -extern void lck_mtx_lock_spin( +extern void lck_mtx_lock_spin( lck_mtx_t *lck); -extern boolean_t kdp_lck_mtx_lock_spin_is_acquired( +extern boolean_t kdp_lck_mtx_lock_spin_is_acquired( lck_mtx_t *lck); -extern void lck_mtx_convert_spin( +extern void lck_mtx_convert_spin( lck_mtx_t *lck); -extern void lck_mtx_lock_spin_always( +extern void lck_mtx_lock_spin_always( lck_mtx_t *lck); -extern boolean_t lck_mtx_try_lock_spin_always( +extern boolean_t lck_mtx_try_lock_spin_always( lck_mtx_t *lck); #define lck_mtx_unlock_always(l) lck_mtx_unlock(l) -extern void lck_spin_assert( +extern void lck_spin_assert( lck_spin_t *lck, - unsigned int type); + unsigned int type); -extern boolean_t kdp_lck_rw_lock_is_acquired_exclusive( +extern boolean_t kdp_lck_rw_lock_is_acquired_exclusive( lck_rw_t *lck); #endif /* KERNEL_PRIVATE */ -extern void lck_mtx_assert( +extern void lck_mtx_assert( lck_mtx_t *lck, - unsigned int type); + unsigned int type); #if MACH_ASSERT #define LCK_MTX_ASSERT(lck, type) lck_mtx_assert((lck),(type)) @@ -883,8 +902,6 @@ extern void lck_mtx_assert( #define LCK_RW_ASSERT_DEBUG(lck, type) #endif /* DEBUG */ -__END_DECLS - #define LCK_ASSERT_OWNED 1 #define LCK_ASSERT_NOTOWNED 2 @@ -893,23 +910,23 @@ __END_DECLS #ifdef MACH_KERNEL_PRIVATE struct turnstile; -extern void lck_mtx_lock_wait( +extern void lck_mtx_lock_wait( lck_mtx_t *lck, thread_t holder, struct turnstile **ts); -extern int lck_mtx_lock_acquire( +extern int lck_mtx_lock_acquire( lck_mtx_t *lck, struct turnstile *ts); -extern boolean_t lck_mtx_unlock_wakeup( +extern boolean_t lck_mtx_unlock_wakeup( lck_mtx_t *lck, thread_t holder); -extern boolean_t lck_mtx_ilk_unlock( +extern boolean_t lck_mtx_ilk_unlock( lck_mtx_t *lck); -extern boolean_t lck_mtx_ilk_try_lock( +extern boolean_t lck_mtx_ilk_try_lock( lck_mtx_t *lck); extern void lck_mtx_wakeup_adjust_pri(thread_t thread, integer_t priority); @@ -930,39 +947,51 @@ typedef unsigned int lck_rw_type_t; #define LCK_RW_ASSERT_NOTHELD 0x04 #endif -__BEGIN_DECLS - -extern lck_rw_t *lck_rw_alloc_init( +extern lck_rw_t *lck_rw_alloc_init( lck_grp_t *grp, lck_attr_t *attr); -extern void lck_rw_init( +extern void lck_rw_init( lck_rw_t *lck, lck_grp_t *grp, lck_attr_t *attr); -extern void lck_rw_lock( +extern void lck_rw_lock( lck_rw_t *lck, - lck_rw_type_t lck_rw_type); + lck_rw_type_t lck_rw_type); -extern void lck_rw_unlock( +extern void lck_rw_unlock( lck_rw_t *lck, - lck_rw_type_t lck_rw_type); + lck_rw_type_t lck_rw_type); -extern void lck_rw_lock_shared( +extern void lck_rw_lock_shared( lck_rw_t *lck); -extern void lck_rw_unlock_shared( +extern void lck_rw_unlock_shared( lck_rw_t *lck); -extern boolean_t lck_rw_lock_yield_shared( +extern boolean_t lck_rw_lock_yield_shared( lck_rw_t *lck, - boolean_t force_yield); + boolean_t force_yield); -extern void lck_rw_lock_exclusive( +extern void lck_rw_lock_exclusive( + lck_rw_t *lck); +/* + * Grabs the lock exclusive. + * Returns true iff the thread spun or blocked while attempting to + * acquire the lock. + * + * Note that the return value is ONLY A HEURISTIC w.r.t. the lock's + * contention. + * + * This routine IS EXPERIMENTAL. + * It's only used for the vm object lock, and use for other subsystems + * is UNSUPPORTED. + */ +extern bool lck_rw_lock_exclusive_check_contended( lck_rw_t *lck); -extern void lck_rw_unlock_exclusive( +extern void lck_rw_unlock_exclusive( lck_rw_t *lck); #ifdef XNU_KERNEL_PRIVATE @@ -971,7 +1000,7 @@ extern void lck_rw_unlock_exclusive( * read-write locks do not have a concept of ownership, so lck_rw_assert() * merely asserts that someone is holding the lock, not necessarily the caller. */ -extern void lck_rw_assert( +extern void lck_rw_assert( lck_rw_t *lck, unsigned int type); @@ -987,45 +1016,152 @@ extern lck_rw_type_t lck_rw_done( lck_rw_t *lck); #endif -extern void lck_rw_destroy( +extern void lck_rw_destroy( lck_rw_t *lck, lck_grp_t *grp); -extern void lck_rw_free( +extern void lck_rw_free( lck_rw_t *lck, lck_grp_t *grp); extern wait_result_t lck_rw_sleep( - lck_rw_t *lck, + lck_rw_t *lck, lck_sleep_action_t lck_sleep_action, - event_t event, + event_t event, wait_interrupt_t interruptible); extern wait_result_t lck_rw_sleep_deadline( - lck_rw_t *lck, + lck_rw_t *lck, lck_sleep_action_t lck_sleep_action, - event_t event, + event_t event, wait_interrupt_t interruptible, - uint64_t deadline); + uint64_t deadline); -extern boolean_t lck_rw_lock_shared_to_exclusive( +extern boolean_t lck_rw_lock_shared_to_exclusive( lck_rw_t *lck); -extern void lck_rw_lock_exclusive_to_shared( +extern void lck_rw_lock_exclusive_to_shared( lck_rw_t *lck); -extern boolean_t lck_rw_try_lock( +extern boolean_t lck_rw_try_lock( lck_rw_t *lck, - lck_rw_type_t lck_rw_type); + lck_rw_type_t lck_rw_type); #ifdef KERNEL_PRIVATE -extern boolean_t lck_rw_try_lock_shared( +extern boolean_t lck_rw_try_lock_shared( lck_rw_t *lck); -extern boolean_t lck_rw_try_lock_exclusive( +extern boolean_t lck_rw_try_lock_exclusive( lck_rw_t *lck); + #endif +#if XNU_KERNEL_PRIVATE + +struct lck_attr_startup_spec { + lck_attr_t *lck_attr; + uint32_t lck_attr_set_flags; + uint32_t lck_attr_clear_flags; +}; + +struct lck_spin_startup_spec { + lck_spin_t *lck; + lck_grp_t *lck_grp; + lck_attr_t *lck_attr; +}; + +struct lck_mtx_startup_spec { + lck_mtx_t *lck; + struct _lck_mtx_ext_ *lck_ext; + lck_grp_t *lck_grp; + lck_attr_t *lck_attr; +}; + +struct lck_rw_startup_spec { + lck_rw_t *lck; + lck_grp_t *lck_grp; + lck_attr_t *lck_attr; +}; + +extern void lck_attr_startup_init( + struct lck_attr_startup_spec *spec); + +extern void lck_spin_startup_init( + struct lck_spin_startup_spec *spec); + +extern void lck_mtx_startup_init( + struct lck_mtx_startup_spec *spec); + +extern void lck_rw_startup_init( + struct lck_rw_startup_spec *spec); + +/* + * Auto-initializing locks declarations + * ------------------------------------ + * + * Unless you need to configure your locks in very specific ways, + * there is no point creating explicit lock attributes. For most + * static locks, these declaration macros can be used: + * + * - LCK_SPIN_DECLARE for spinlocks, + * - LCK_MTX_EARLY_DECLARE for mutexes initialized before memory + * allocations are possible, + * - LCK_MTX_DECLARE for mutexes, + * - LCK_RW_DECLARE for reader writer locks. + * + * For cases when some particular attributes need to be used, + * these come in *_ATTR variants that take a variable declared with + * LCK_ATTR_DECLARE as an argument. + */ +#define LCK_ATTR_DECLARE(var, set_flags, clear_flags) \ + SECURITY_READ_ONLY_LATE(lck_attr_t) var; \ + static __startup_data struct lck_attr_startup_spec \ + __startup_lck_attr_spec_ ## var = { &var, set_flags, clear_flags }; \ + STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_SECOND, lck_attr_startup_init, \ + &__startup_lck_attr_spec_ ## var) + +#define LCK_SPIN_DECLARE_ATTR(var, grp, attr) \ + lck_spin_t var; \ + static __startup_data struct lck_spin_startup_spec \ + __startup_lck_spin_spec_ ## var = { &var, grp, attr }; \ + STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_FOURTH, lck_spin_startup_init, \ + &__startup_lck_spin_spec_ ## var) + +#define LCK_SPIN_DECLARE(var, grp) \ + LCK_SPIN_DECLARE_ATTR(var, grp, LCK_ATTR_NULL) + +#define LCK_MTX_DECLARE_ATTR(var, grp, attr) \ + lck_mtx_t var; \ + static __startup_data struct lck_mtx_startup_spec \ + __startup_lck_mtx_spec_ ## var = { &var, NULL, grp, attr }; \ + STARTUP_ARG(LOCKS, STARTUP_RANK_FIRST, lck_mtx_startup_init, \ + &__startup_lck_mtx_spec_ ## var) + +#define LCK_MTX_DECLARE(var, grp) \ + LCK_MTX_DECLARE_ATTR(var, grp, LCK_ATTR_NULL) + +#define LCK_MTX_EARLY_DECLARE_ATTR(var, grp, attr) \ + lck_mtx_ext_t var ## _ext; \ + lck_mtx_t var; \ + static __startup_data struct lck_mtx_startup_spec \ + __startup_lck_mtx_spec_ ## var = { &var, &var ## _ext, grp, attr }; \ + STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_FOURTH, lck_mtx_startup_init, \ + &__startup_lck_mtx_spec_ ## var) + +#define LCK_MTX_EARLY_DECLARE(var, grp) \ + LCK_MTX_EARLY_DECLARE_ATTR(var, grp, LCK_ATTR_NULL) + +#define LCK_RW_DECLARE_ATTR(var, grp, attr) \ + lck_rw_t var; \ + static __startup_data struct lck_rw_startup_spec \ + __startup_lck_rw_spec_ ## var = { &var, grp, attr }; \ + STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_FOURTH, lck_rw_startup_init, \ + &__startup_lck_rw_spec_ ## var) + +#define LCK_RW_DECLARE(var, grp) \ + LCK_RW_DECLARE_ATTR(var, grp, LCK_ATTR_NULL) + +#endif /* XNU_KERNEL_PRIVATE */ __END_DECLS diff --git a/osfmk/kern/ltable.c b/osfmk/kern/ltable.c index 7c5b79a24..fb4f482cc 100644 --- a/osfmk/kern/ltable.c +++ b/osfmk/kern/ltable.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Apple Inc. All rights reserved. + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -27,6 +27,7 @@ */ #include #include +#include #include #include #include @@ -35,7 +36,7 @@ #include -#define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align))) +#define P2ROUNDUP(x, align) (-(-((uintptr_t)(x)) & -((uintptr_t)align))) #define ROUNDDOWN(x, y) (((x)/(y))*(y)) /* ---------------------------------------------------------------------- @@ -44,17 +45,34 @@ * * ---------------------------------------------------------------------- */ -vm_size_t g_lt_max_tbl_size; -static lck_grp_t g_lt_lck_grp; - /* default VA space for link tables (zone allocated) */ #define DEFAULT_MAX_TABLE_SIZE P2ROUNDUP(8 * 1024 * 1024, PAGE_SIZE) +TUNABLE(vm_size_t, g_lt_max_tbl_size, "lt_tbl_size", 0); +LCK_GRP_DECLARE(g_lt_lck_grp, "link_table_locks"); + #if DEVELOPMENT || DEBUG /* global for lldb macros */ uint64_t g_lt_idx_max = LT_IDX_MAX; #endif +__startup_func +static void +ltable_startup_tunables_init(void) +{ + // make sure that if a boot-arg was passed, g_lt_max_tbl_size + // is a PAGE_SIZE multiple. + // + // Also set the default for platforms where PAGE_SIZE + // isn't a compile time constant. + if (g_lt_max_tbl_size == 0) { + g_lt_max_tbl_size = (typeof(g_lt_max_tbl_size))DEFAULT_MAX_TABLE_SIZE; + } else { + g_lt_max_tbl_size = round_page(g_lt_max_tbl_size); + } +} +STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, ltable_startup_tunables_init); + /* construct a link table element from an offset and mask into a slab */ #define lt_elem_ofst_slab(slab, slab_msk, ofst) \ @@ -151,31 +169,6 @@ lt_elem_set_type(struct lt_elem *elem, int type) } -/** - * ltable_bootstrap: bootstrap a link table - * - * Called once at system boot - */ -void -ltable_bootstrap(void) -{ - static int s_is_bootstrapped = 0; - - uint32_t tmp32 = 0; - - if (s_is_bootstrapped) { - return; - } - s_is_bootstrapped = 1; - - g_lt_max_tbl_size = DEFAULT_MAX_TABLE_SIZE; - if (PE_parse_boot_argn("lt_tbl_size", &tmp32, sizeof(tmp32)) == TRUE) { - g_lt_max_tbl_size = (vm_size_t)P2ROUNDUP(tmp32, PAGE_SIZE); - } - - lck_grp_init(&g_lt_lck_grp, "link_table_locks", LCK_GRP_ATTR_NULL); -} - /** * ltable_init: initialize a link table with given parameters * @@ -246,7 +239,7 @@ ltable_init(struct link_table *table, const char *name, /* initialize the table's slab zone (for table growth) */ ltdbg("Initializing %s zone: slab:%d (%d,0x%x) max:%ld", name, slab_sz, slab_shift, slab_msk, max_tbl_sz); - slab_zone = zinit(slab_sz, max_tbl_sz, slab_sz, name); + slab_zone = zone_create(name, slab_sz, ZC_NONE); assert(slab_zone != ZONE_NULL); /* allocate the first slab and populate it */ @@ -371,8 +364,9 @@ ltable_grow(struct link_table *table, uint32_t min_free) /* allocate another slab */ slab = (struct lt_elem *)zalloc(table->slab_zone); if (slab == NULL) { - panic("Can't allocate a %s table (%p) slab from zone:%p", - table->slab_zone->zone_name, table, table->slab_zone); + panic("Can't allocate a %s%s table (%p) slab from zone:%p", + zone_heap_name(table->slab_zone), zone_name(table->slab_zone), + table, table->slab_zone); } memset(slab, 0, table->slab_sz); diff --git a/osfmk/kern/ltable.h b/osfmk/kern/ltable.h index bc05894c3..a5b8a1b67 100644 --- a/osfmk/kern/ltable.h +++ b/osfmk/kern/ltable.h @@ -163,15 +163,6 @@ struct link_table { #endif } __attribute__((aligned(8))); - -/** - * ltable_bootstrap: bootstrap a link table - * - * Called once at system boot - */ -extern void ltable_bootstrap(void); - - /** * ltable_init: initialize a link table with given parameters * diff --git a/osfmk/kern/mach_filter.h b/osfmk/kern/mach_filter.h new file mode 100644 index 000000000..26b00e0bf --- /dev/null +++ b/osfmk/kern/mach_filter.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2020 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _KERN_MACH_FILTER_H_ +#define _KERN_MACH_FILTER_H_ + +#if KERNEL_PRIVATE + +#include +#include + +/* Sandbox-specific calls for task based message filtering */ +typedef boolean_t (*mach_msg_fetch_filter_policy_cbfunc_t) (struct task *task, void *portlabel, + mach_msg_id_t msgid, mach_msg_filter_id *fpid); + +struct mach_msg_filter_callbacks { + unsigned int version; + const mach_msg_fetch_filter_policy_cbfunc_t fetch_filter_policy; +}; + +#define MACH_MSG_FILTER_CALLBACKS_VERSION_0 (0) /* up-to fetch_filter_policy */ +#define MACH_MSG_FILTER_CALLBACKS_CURRENT MACH_MSG_FILTER_CALLBACKS_VERSION_0 + +__BEGIN_DECLS + +int mach_msg_filter_register_callback(const struct mach_msg_filter_callbacks *callbacks); + +__END_DECLS + +#endif /* KERNEL_PRIVATE */ + +#if XNU_KERNEL_PRIVATE +boolean_t mach_msg_fetch_filter_policy(void *portlabel, mach_msg_id_t msgh_id, mach_msg_filter_id *fid); +#endif /* XNU_KERNEL_PRIVATE */ + +#endif /* _KERN_MACH_FILTER_H_ */ diff --git a/osfmk/kern/mach_node.c b/osfmk/kern/mach_node.c index d0d03cf62..223140ee4 100644 --- a/osfmk/kern/mach_node.c +++ b/osfmk/kern/mach_node.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -73,21 +73,19 @@ mach_node_t localnode; // This /*** Private to mach_node layer ***/ -static int mach_nodes_to_publish; -static mach_node_t mach_node_table[MACH_NODES_MAX]; -static lck_spin_t mach_node_table_lock_data; +static int mach_nodes_to_publish; +static mach_node_t mach_node_table[MACH_NODES_MAX]; +static LCK_SPIN_DECLARE_ATTR(mach_node_table_lock_data, + &ipc_lck_grp, &ipc_lck_attr); #define MACH_NODE_TABLE_LOCK() lck_spin_lock(&mach_node_table_lock_data) #define MACH_NODE_TABLE_UNLOCK() lck_spin_unlock(&mach_node_table_lock_data) -#define MACH_NODE_TABLE_LOCK_INIT() lck_spin_init(&mach_node_table_lock_data, \ - &ipc_lck_grp, &ipc_lck_attr) static volatile SInt64 mnl_name_next; -static queue_head_t mnl_name_table[MNL_NAME_TABLE_SIZE]; -static lck_spin_t mnl_name_table_lock_data; +static queue_head_t mnl_name_table[MNL_NAME_TABLE_SIZE]; +static LCK_SPIN_DECLARE_ATTR(mnl_name_table_lock_data, + &ipc_lck_grp, &ipc_lck_attr); #define MNL_NAME_TABLE_LOCK() lck_spin_lock(&mnl_name_table_lock_data) #define MNL_NAME_TABLE_UNLOCK() lck_spin_unlock(&mnl_name_table_lock_data) -#define MNL_NAME_TABLE_LOCK_INIT() lck_spin_init(&mnl_name_table_lock_data, \ - &ipc_lck_grp, &ipc_lck_attr) static void mach_node_init(void); static void mnl_name_table_init(void); @@ -113,14 +111,12 @@ mach_node_init(void) localnode_id, MACH_NODES_MAX); mach_node_table_init(); mnl_name_table_init(); - flipc_init(); } // TODO: else block until init is finished (init completion race) } void mach_node_table_init(void) { - MACH_NODE_TABLE_LOCK_INIT(); MACH_NODE_TABLE_LOCK(); /* Start with an enpty node table. */ @@ -134,7 +130,7 @@ mach_node_table_init(void) MACH_NODE_TABLE_UNLOCK(); /* Set up localnode's struct */ - bzero(localnode, sizeof(localnode)); + bzero(localnode, sizeof(*localnode)); localnode->info.datamodel = LOCAL_DATA_MODEL; localnode->info.byteorder = OSHostByteOrder(); localnode->info.proto_vers_min = MNL_PROTOCOL_V1; @@ -471,7 +467,6 @@ mnl_name_free(mnl_name_t name __unused) void mnl_name_table_init(void) { - MNL_NAME_TABLE_LOCK_INIT(); MNL_NAME_TABLE_LOCK(); // Set the first name to this node's bootstrap name diff --git a/osfmk/kern/machine.c b/osfmk/kern/machine.c index c90a8ea6d..248790205 100644 --- a/osfmk/kern/machine.c +++ b/osfmk/kern/machine.c @@ -90,6 +90,7 @@ #include #include +#include #if HIBERNATION #include @@ -101,7 +102,7 @@ extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t); #endif #if defined(__x86_64__) -#include +#include #include #endif @@ -133,32 +134,27 @@ processor_up( { processor_set_t pset; spl_t s; - boolean_t pset_online = false; s = splsched(); init_ast_check(processor); pset = processor->processor_set; pset_lock(pset); - if (pset->online_processor_count == 0) { - /* About to bring the first processor of a pset online */ - pset_online = true; - } + ++pset->online_processor_count; pset_update_processor_state(pset, processor, PROCESSOR_RUNNING); os_atomic_inc(&processor_avail_count, relaxed); if (processor->is_recommended) { os_atomic_inc(&processor_avail_count_user, relaxed); + SCHED(pset_made_schedulable)(processor, pset, false); } - commpage_update_active_cpus(); - if (pset_online) { - /* New pset is coming up online; callout to the - * scheduler in case it wants to adjust runqs. - */ - SCHED(pset_made_schedulable)(processor, pset, true); - /* pset lock dropped */ - } else { - pset_unlock(pset); + if (processor->processor_primary == processor) { + os_atomic_inc(&primary_processor_avail_count, relaxed); + if (processor->is_recommended) { + os_atomic_inc(&primary_processor_avail_count_user, relaxed); + } } + commpage_update_active_cpus(); + pset_unlock(pset); ml_cpu_up(); splx(s); @@ -214,6 +210,7 @@ processor_shutdown( processor_set_t pset; spl_t s; + ml_cpu_begin_state_transition(processor->cpu_id); s = splsched(); pset = processor->processor_set; pset_lock(pset); @@ -223,10 +220,22 @@ processor_shutdown( */ pset_unlock(pset); splx(s); + ml_cpu_end_state_transition(processor->cpu_id); return KERN_SUCCESS; } + if (!ml_cpu_can_exit(processor->cpu_id)) { + /* + * Failure if disallowed by arch code. + */ + pset_unlock(pset); + splx(s); + ml_cpu_end_state_transition(processor->cpu_id); + + return KERN_FAILURE; + } + if (processor->state == PROCESSOR_START) { /* * Failure if currently being started. @@ -254,10 +263,12 @@ processor_shutdown( if (processor->state == PROCESSOR_SHUTDOWN) { pset_unlock(pset); splx(s); + ml_cpu_end_state_transition(processor->cpu_id); return KERN_SUCCESS; } + ml_broadcast_cpu_event(CPU_EXIT_REQUESTED, processor->cpu_id); pset_update_processor_state(pset, processor, PROCESSOR_SHUTDOWN); pset_unlock(pset); @@ -265,6 +276,8 @@ processor_shutdown( splx(s); cpu_exit_wait(processor->cpu_id); + ml_cpu_end_state_transition(processor->cpu_id); + ml_broadcast_cpu_event(CPU_EXITED, processor->cpu_id); return KERN_SUCCESS; } @@ -314,6 +327,12 @@ processor_doshutdown( if (processor->is_recommended) { os_atomic_dec(&processor_avail_count_user, relaxed); } + if (processor->processor_primary == processor) { + os_atomic_dec(&primary_processor_avail_count, relaxed); + if (processor->is_recommended) { + os_atomic_dec(&primary_processor_avail_count_user, relaxed); + } + } commpage_update_active_cpus(); SCHED(processor_queue_shutdown)(processor); /* pset lock dropped */ @@ -357,6 +376,7 @@ processor_offline( assert(ml_get_interrupts_enabled() == FALSE); assert(self->continuation == NULL); assert(processor->processor_offlined == false); + assert(processor->running_timers_active == false); bool enforce_quiesce_safety = gEnforceQuiesceSafety; @@ -425,7 +445,7 @@ processor_offline_intstack( assert(processor == current_processor()); assert(processor->active_thread == current_thread()); - timer_stop(PROCESSOR_DATA(processor, current_state), processor->last_dispatch); + timer_stop(processor->current_state, processor->last_dispatch); cpu_quiescent_counter_leave(processor->last_dispatch); @@ -528,7 +548,7 @@ ml_io_read(uintptr_t vaddr, int size) (void)ml_set_interrupts_enabled(istate); if (phyreadpanic && (machine_timeout_suspended() == FALSE)) { - panic_io_port_read(); + panic_notify(); panic("Read from IO vaddr 0x%lx paddr 0x%lx took %llu ns, " "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", vaddr, paddr, (eabs - sabs), result, sabs, eabs, @@ -645,7 +665,7 @@ ml_io_write(uintptr_t vaddr, uint64_t val, int size) (void)ml_set_interrupts_enabled(istate); if (phywritepanic && (machine_timeout_suspended() == FALSE)) { - panic_io_port_read(); + panic_notify(); panic("Write to IO vaddr %p paddr %p val 0x%llx took %llu ns," " (start: %llu, end: %llu), ceiling: %llu", (void *)vaddr, (void *)paddr, val, (eabs - sabs), sabs, eabs, @@ -700,3 +720,49 @@ ml_io_write64(uintptr_t vaddr, uint64_t val) { ml_io_write(vaddr, val, 8); } + +struct cpu_callback_chain_elem { + cpu_callback_t fn; + void *param; + struct cpu_callback_chain_elem *next; +}; + +static struct cpu_callback_chain_elem *cpu_callback_chain; +static LCK_GRP_DECLARE(cpu_callback_chain_lock_grp, "cpu_callback_chain"); +static LCK_SPIN_DECLARE(cpu_callback_chain_lock, &cpu_callback_chain_lock_grp); + +void +cpu_event_register_callback(cpu_callback_t fn, void *param) +{ + struct cpu_callback_chain_elem *new_elem; + + new_elem = zalloc_permanent_type(struct cpu_callback_chain_elem); + if (!new_elem) { + panic("can't allocate cpu_callback_chain_elem"); + } + + lck_spin_lock(&cpu_callback_chain_lock); + new_elem->next = cpu_callback_chain; + new_elem->fn = fn; + new_elem->param = param; + os_atomic_store(&cpu_callback_chain, new_elem, release); + lck_spin_unlock(&cpu_callback_chain_lock); +} + +__attribute__((noreturn)) +void +cpu_event_unregister_callback(__unused cpu_callback_t fn) +{ + panic("Unfortunately, cpu_event_unregister_callback is unimplemented."); +} + +void +ml_broadcast_cpu_event(enum cpu_event event, unsigned int cpu_or_cluster) +{ + struct cpu_callback_chain_elem *cursor; + + cursor = os_atomic_load(&cpu_callback_chain, dependency); + for (; cursor != NULL; cursor = cursor->next) { + cursor->fn(cursor->param, event, cpu_or_cluster); + } +} diff --git a/osfmk/kern/machine.h b/osfmk/kern/machine.h index 2c285e5c4..724cf19cc 100644 --- a/osfmk/kern/machine.h +++ b/osfmk/kern/machine.h @@ -164,5 +164,12 @@ extern void machine_switch_perfcontrol_state_update(perfcontrol_event event, uint32_t flags, thread_t thread); +#if CONFIG_THREAD_GROUPS +extern void machine_thread_group_init(struct thread_group *tg); +extern void machine_thread_group_deinit(struct thread_group *tg); +extern void machine_thread_group_flags_update(struct thread_group *tg, uint32_t flags); +extern void machine_thread_group_blocked(struct thread_group *tg_blocked, struct thread_group *tg_blocking, uint32_t flags, thread_t blocked_thread); +extern void machine_thread_group_unblocked(struct thread_group *tg_unblocked, struct thread_group *tg_unblocking, uint32_t flags, thread_t unblocked_thread); +#endif #endif /* _KERN_MACHINE_H_ */ diff --git a/osfmk/kern/misc_protos.h b/osfmk/kern/misc_protos.h index 21742d664..13ee8946e 100644 --- a/osfmk/kern/misc_protos.h +++ b/osfmk/kern/misc_protos.h @@ -144,14 +144,10 @@ extern int copyoutmsg( user_addr_t user_addr, mach_msg_size_t nbytes); -/* Invalidate copy window(s) cache */ -extern void inval_copy_windows(thread_t); -extern void copy_window_fault(thread_t, vm_map_t, int); - extern int sscanf(const char *input, const char *fmt, ...) __scanflike(2, 3); /* sprintf() is being deprecated. Please use snprintf() instead. */ -extern integer_t sprintf(char *buf, const char *fmt, ...) __deprecated; +extern integer_t sprintf(char *buf, const char *fmt, ...) __printflike(2, 3) __deprecated; extern int printf(const char *format, ...) __printflike(1, 2); extern int vprintf(const char *format, va_list ap); @@ -176,12 +172,10 @@ extern int kdb_log(const char *format, ...) __printflike(1, 2); extern int kdb_printf_unbuffered(const char *format, ...) __printflike(1, 2); -extern void printf_init(void); - extern int snprintf(char *, size_t, const char *, ...) __printflike(3, 4); extern int scnprintf(char *, size_t, const char *, ...) __printflike(3, 4); -extern void log(int level, char *fmt, ...); +extern void log(int level, char *fmt, ...) __printflike(2, 3); void _doprnt( @@ -265,7 +259,9 @@ extern kern_return_t kernel_get_special_port( user_addr_t get_useraddr(void); /* symbol lookup */ +#ifndef __cplusplus struct kmod_info_t; +#endif extern uint64_t early_random(void); diff --git a/osfmk/kern/mk_timer.c b/osfmk/kern/mk_timer.c index 730d876b1..abf6e3be1 100644 --- a/osfmk/kern/mk_timer.c +++ b/osfmk/kern/mk_timer.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -47,7 +47,17 @@ #include #include -static zone_t mk_timer_zone; +struct mk_timer { + decl_simple_lock_data(, lock); + thread_call_data_t mkt_thread_call; + uint32_t is_dead:1, + is_armed:1; + int active; + ipc_port_t port; +}; + +static ZONE_DECLARE(mk_timer_zone, "mk_timer", + sizeof(struct mk_timer), ZC_NOENCRYPT); static mach_port_qos_t mk_timer_qos = { .name = FALSE, @@ -63,7 +73,7 @@ mach_port_name_t mk_timer_create_trap( __unused struct mk_timer_create_trap_args *args) { - mk_timer_t timer; + struct mk_timer* timer; ipc_space_t myspace = current_space(); mach_port_name_t name = MACH_PORT_NULL; ipc_port_init_flags_t init_flags; @@ -71,12 +81,12 @@ mk_timer_create_trap( kern_return_t result; /* Allocate and initialize local state of a timer object */ - timer = (mk_timer_t)zalloc(mk_timer_zone); + timer = (struct mk_timer*)zalloc(mk_timer_zone); if (timer == NULL) { return MACH_PORT_NULL; } simple_lock_init(&timer->lock, 0); - thread_call_setup(&timer->call_entry, mk_timer_expire, timer); + thread_call_setup(&timer->mkt_thread_call, mk_timer_expire, timer); timer->is_armed = timer->is_dead = FALSE; timer->active = 0; @@ -114,11 +124,11 @@ void mk_timer_port_destroy( ipc_port_t port) { - mk_timer_t timer = NULL; + struct mk_timer* timer = NULL; ip_lock(port); if (ip_kotype(port) == IKOT_TIMER) { - timer = (mk_timer_t) ip_get_kobject(port); + timer = (struct mk_timer*) ip_get_kobject(port); assert(timer != NULL); ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE); simple_lock(&timer->lock, LCK_GRP_NULL); @@ -127,7 +137,7 @@ mk_timer_port_destroy( ip_unlock(port); if (timer != NULL) { - if (thread_call_cancel(&timer->call_entry)) { + if (thread_call_cancel(&timer->mkt_thread_call)) { timer->active--; } timer->is_armed = FALSE; @@ -145,25 +155,12 @@ mk_timer_port_destroy( } } -void -mk_timer_init(void) -{ - int s = sizeof(mk_timer_data_t); - - assert(!(mk_timer_zone != NULL)); - - mk_timer_zone = zinit(s, (4096 * s), (16 * s), "mk_timer"); - - zone_change(mk_timer_zone, Z_NOENCRYPT, TRUE); -} - static void mk_timer_expire( void *p0, __unused void *p1) { - mk_timer_t timer = p0; - ipc_port_t port; + struct mk_timer* timer = p0; simple_lock(&timer->lock, LCK_GRP_NULL); @@ -173,7 +170,7 @@ mk_timer_expire( return; } - port = timer->port; + ipc_port_t port = timer->port; assert(port != IP_NULL); assert(timer->active == 1); @@ -236,6 +233,7 @@ mk_timer_destroy_trap( if (ip_kotype(port) == IKOT_TIMER) { ip_unlock(port); + /* TODO: this should be mach_port_mod_refs */ result = mach_port_destroy(myspace, name); } else { ip_unlock(port); @@ -262,7 +260,7 @@ mk_timer_destroy_trap( static kern_return_t mk_timer_arm_trap_internal(mach_port_name_t name, uint64_t expire_time, uint64_t mk_leeway, uint64_t mk_timer_flags) { - mk_timer_t timer; + struct mk_timer* timer; ipc_space_t myspace = current_space(); ipc_port_t port; kern_return_t result; @@ -273,7 +271,8 @@ mk_timer_arm_trap_internal(mach_port_name_t name, uint64_t expire_time, uint64_t } if (ip_kotype(port) == IKOT_TIMER) { - timer = (mk_timer_t) ip_get_kobject(port); + + timer = (struct mk_timer*) ip_get_kobject(port); assert(timer != NULL); simple_lock(&timer->lock, LCK_GRP_NULL); @@ -295,12 +294,12 @@ mk_timer_arm_trap_internal(mach_port_name_t name, uint64_t expire_time, uint64_t } if (!thread_call_enter_delayed_with_leeway( - &timer->call_entry, NULL, + &timer->mkt_thread_call, NULL, expire_time, mk_leeway, tcflags)) { timer->active++; } } else { - if (!thread_call_enter1(&timer->call_entry, NULL)) { + if (!thread_call_enter1(&timer->mkt_thread_call, NULL)) { timer->active++; } } @@ -346,7 +345,7 @@ mk_timer_cancel_trap( mach_port_name_t name = args->name; mach_vm_address_t result_time_addr = args->result_time; uint64_t armed_time = 0; - mk_timer_t timer; + struct mk_timer* timer; ipc_space_t myspace = current_space(); ipc_port_t port; kern_return_t result; @@ -357,15 +356,15 @@ mk_timer_cancel_trap( } if (ip_kotype(port) == IKOT_TIMER) { - timer = (mk_timer_t) ip_get_kobject(port); + timer = (struct mk_timer*) ip_get_kobject(port); assert(timer != NULL); simple_lock(&timer->lock, LCK_GRP_NULL); assert(timer->port == port); ip_unlock(port); if (timer->is_armed) { - armed_time = timer->call_entry.tc_call.deadline; - if (thread_call_cancel(&timer->call_entry)) { + armed_time = thread_call_get_armed_deadline(&timer->mkt_thread_call); + if (thread_call_cancel(&timer->mkt_thread_call)) { timer->active--; } timer->is_armed = FALSE; @@ -377,10 +376,8 @@ mk_timer_cancel_trap( result = KERN_INVALID_ARGUMENT; } - if (result == KERN_SUCCESS) { - if (result_time_addr != 0 && - copyout((void *)&armed_time, result_time_addr, - sizeof(armed_time)) != 0) { + if (result == KERN_SUCCESS && result_time_addr != 0) { + if (copyout((void *)&armed_time, result_time_addr, sizeof(armed_time)) != 0) { result = KERN_FAILURE; } } diff --git a/osfmk/kern/mk_timer.h b/osfmk/kern/mk_timer.h index 01af9ed5a..e8081de03 100644 --- a/osfmk/kern/mk_timer.h +++ b/osfmk/kern/mk_timer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -40,23 +40,7 @@ #ifdef MACH_KERNEL_PRIVATE #include -#include - -struct mk_timer { - decl_simple_lock_data(, lock); - thread_call_data_t call_entry; - uint32_t is_dead:1, - is_armed:1; - int active; - ipc_port_t port; -}; - -typedef struct mk_timer *mk_timer_t, mk_timer_data_t; - -void mk_timer_port_destroy( - ipc_port_t port); - -void mk_timer_init(void); +void mk_timer_port_destroy(ipc_port_t port); #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/kern/monotonic.h b/osfmk/kern/monotonic.h index 9b744407b..ec2129ca6 100644 --- a/osfmk/kern/monotonic.h +++ b/osfmk/kern/monotonic.h @@ -107,13 +107,6 @@ void mt_sleep(void); */ void mt_wake_per_core(void); -#if __ARM_CLUSTER_COUNT__ -/* - * Called when a cluster is initialized. - */ -void mt_cluster_init(void); -#endif /* __ARM_CLUSTER_COUNT__ */ - /* * "Up-call" to the Mach layer to update counters from a PMI. */ diff --git a/osfmk/kern/mpqueue.h b/osfmk/kern/mpqueue.h index 0c966c67b..1420801ba 100644 --- a/osfmk/kern/mpqueue.h +++ b/osfmk/kern/mpqueue.h @@ -6,12 +6,15 @@ __BEGIN_DECLS #ifdef MACH_KERNEL_PRIVATE +#include + /*----------------------------------------------------------------*/ /* * Define macros for queues with locks. */ struct mpqueue_head { struct queue_entry head; /* header for queue */ + struct priority_queue_deadline_min mpq_pqhead; uint64_t earliest_soft_deadline; uint64_t count; lck_mtx_t lock_data; @@ -22,9 +25,6 @@ struct mpqueue_head { typedef struct mpqueue_head mpqueue_head_t; -#define round_mpq(size) (size) - - #if defined(__i386__) || defined(__x86_64__) #define mpqueue_init(q, lck_grp, lck_attr) \ @@ -36,6 +36,7 @@ MACRO_BEGIN \ lck_attr); \ (q)->earliest_soft_deadline = UINT64_MAX; \ (q)->count = 0; \ + priority_queue_init(&(q)->mpq_pqhead); \ MACRO_END #else @@ -46,6 +47,7 @@ MACRO_BEGIN \ lck_mtx_init(&(q)->lock_data, \ lck_grp, \ lck_attr); \ + priority_queue_init(&(q)->mpq_pqhead); \ MACRO_END #endif diff --git a/osfmk/kern/mpsc_queue.c b/osfmk/kern/mpsc_queue.c index 4784b0dc1..1b47d8b4e 100644 --- a/osfmk/kern/mpsc_queue.c +++ b/osfmk/kern/mpsc_queue.c @@ -125,9 +125,22 @@ static void _mpsc_queue_thread_continue(void *param, wait_result_t wr __unused) { mpsc_daemon_queue_t dq = param; + mpsc_daemon_queue_kind_t kind = dq->mpd_kind; + thread_t self = dq->mpd_thread; + + __builtin_assume(self != THREAD_NULL); + + if (kind == MPSC_QUEUE_KIND_THREAD_CRITICAL) { + self->options |= TH_OPT_SYSTEM_CRITICAL; + } assert(dq->mpd_thread == current_thread()); - _mpsc_daemon_queue_drain(dq, dq->mpd_thread); + _mpsc_daemon_queue_drain(dq, self); + + if (kind == MPSC_QUEUE_KIND_THREAD_CRITICAL) { + self->options &= ~TH_OPT_SYSTEM_CRITICAL; + } + thread_block_parameter(_mpsc_queue_thread_continue, dq); } @@ -234,14 +247,9 @@ static void _mpsc_daemon_queue_drain(mpsc_daemon_queue_t dq, thread_t self) { mpsc_daemon_invoke_fn_t invoke = dq->mpd_invoke; - mpsc_daemon_queue_kind_t kind = dq->mpd_kind; mpsc_queue_chain_t head, cur, tail; mpsc_daemon_queue_state_t st; - if (kind == MPSC_QUEUE_KIND_THREAD_CRITICAL) { - self->options |= TH_OPT_SYSTEM_CRITICAL; - } - again: /* * Most of the time we're woken up because we're dirty, @@ -308,10 +316,6 @@ again: /* dereferencing `dq` past this point is unsafe */ - if (kind == MPSC_QUEUE_KIND_THREAD_CRITICAL) { - self->options &= ~TH_OPT_SYSTEM_CRITICAL; - } - if (__improbable(st & MPSC_QUEUE_STATE_CANCELED)) { thread_wakeup(&dq->mpd_state); if (self) { diff --git a/osfmk/kern/mpsc_queue.h b/osfmk/kern/mpsc_queue.h index a2a6218ec..4f186ee26 100644 --- a/osfmk/kern/mpsc_queue.h +++ b/osfmk/kern/mpsc_queue.h @@ -464,11 +464,11 @@ typedef enum mpsc_daemon_queue_kind { * @brief * Internal type, not to be used by clients. */ -typedef enum mpsc_daemon_queue_state { +__options_decl(mpsc_daemon_queue_state_t, uint32_t, { MPSC_QUEUE_STATE_DRAINING = 0x0001, MPSC_QUEUE_STATE_WAKEUP = 0x0002, MPSC_QUEUE_STATE_CANCELED = 0x0004, -} mpsc_daemon_queue_state_t; +}); struct mpsc_daemon_queue { mpsc_daemon_queue_kind_t mpd_kind; diff --git a/osfmk/kern/percpu.h b/osfmk/kern/percpu.h new file mode 100644 index 000000000..4b2ddfaf8 --- /dev/null +++ b/osfmk/kern/percpu.h @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _KERN_PERCPU_H_ +#define _KERN_PERCPU_H_ + +#include + +__BEGIN_DECLS + +#if XNU_KERNEL_PRIVATE +#include +#include + +#pragma GCC visibility push(hidden) + +/*! + * @macro PERCPU_DECL + * + * @abstract + * Declares a per-CPU variable in a header. + * + * @param type_t the per-CPU variable type + * @param name the per-CPU variable name + */ +#define PERCPU_DECL(type_t, name) \ + extern type_t __PERCPU_NAME(name) + +/*! + * @macro PERCPU_DATA + * + * @abstract + * Defines a per-CPU variable in a translation unit. + * + * @discussion + * @c PERCPU_DECL can be used in headers to export the variable to clients. + * + * By default, per-cpu data is 0-initialized. Per-CPU data is allocated during + * the STARTUP_SUB_KMEM_ALLOC phase and can be initialized with a STARTUP + * callback in any later phase. + * + * Usage is: + * + * [ static ] type PERCPU_DATA(name); + * + * + * @param name the per-CPU variable name + */ +#define PERCPU_DATA(name) \ + __percpu __PERCPU_NAME(name) = {0} + +/*! + * @macro PERCPU_GET + * + * @abstract + * Gets a pointer to the per-CPU instance of the variable for the processor the + * code is currently running on. + * + * @discussion + * It is expected that preemption or interrupts are disabled when this is used, + * as a context-switch might move the current thread to another CPU. + * + * It is also valid in code that wasn't already disabling preemption and cares + * about code-gen size a lot to use this outside of a preemption-disabled + * section provided that the data is modified using atomics. + * + * Note that if several per-CPU pointers are acquired in short succession, + * @c PERCPU_GET_WITH_BASE can be used to avoid the repeated calls to + * @c current_percpu_base() which the compiler wont't elide. + * + * @param name the per-CPU variable name + */ +#define PERCPU_GET(name) \ + __PERCPU_CAST(name, current_percpu_base() + __PERCPU_ADDR(name)) + +/*! + * @function current_percpu_base() + * + * @abstract + * Returns an offset that can be passed to @c PERCPU_GET_WITH_BASE(). + * + * @see PERCPU_GET() for conditions of use. + */ +extern vm_offset_t current_percpu_base(void); + +/*! + * @macro PERCPU_GET_MASTER + * + * @abstract + * Gets a pointer to the master per-CPU instance of the variable. + * + * @param base the per-CPU base to use + * @param name the per-CPU variable name + */ +#define PERCPU_GET_MASTER(name) \ + (&__PERCPU_NAME(name)) + +/*! + * @macro PERCPU_GET_WITH_BASE + * + * @abstract + * Gets a pointer to the per-CPU instance of the variable for the specified + * base. + * + * @param base the per-CPU base to use + * @param name the per-CPU variable name + */ +#define PERCPU_GET_WITH_BASE(base, name) \ + __PERCPU_CAST(name, base + __PERCPU_ADDR(name)) + +/*! + * @macro PERCPU_GET_RELATIVE + * + * @abstract + * Gets a pointer to the per-CPU instance of a variable relative to another + * known one. + * + * @description + * When a per-CPU slot address is known, but the caller doesn't know the base + * from which it was derived, then this allows to compute another per-CPU slot + * address for a different variable but for the same CPU, without any loads. + * + * @param name the per-CPU variable name + * @param other the other per-CPU variable name + * @param ptr a pointer to the other variable slot + */ +#define PERCPU_GET_RELATIVE(name, other, ptr) ({ \ + __PERCPU_TYPE(other) __other_ptr = (ptr); /* type check */ \ + vm_offset_t __offs = __PERCPU_ADDR(name) - __PERCPU_ADDR(other); \ + __PERCPU_CAST(name, (vm_address_t)__other_ptr + __offs); \ +}) + +/*! + * @macro percpu_foreach_base() + * + * @abstract + * Enumerates all Per-CPU variable bases. + * + * @param it the name of the iterator + */ +#define percpu_foreach_base(it) \ + for (vm_offset_t it = 0, \ + __next_ ## it = percpu_base.start, \ + __end_ ## it = percpu_base.end; \ + \ + it <= __end_ ## it; \ + \ + it = __next_ ## it, \ + __next_ ## it += percpu_section_size()) + +/*! + * @macro percpu_foreach() + * + * @abstract + * Enumerates all Per-CPU variable instances. + * + * @param it the name of the iterator + * @param name the per-CPU variable name + */ +#define percpu_foreach(it, name) \ + for (__PERCPU_TYPE(name) it, \ + __base_ ## it = NULL, \ + __next_ ## it = (typeof(it))percpu_base.start, \ + __end_ ## it = (typeof(it))percpu_base.end; \ + \ + (it = (typeof(it))(__PERCPU_ADDR(name) + (vm_address_t)__base_ ## it), \ + __base_ ## it <= __end_ ## it); \ + \ + __base_ ## it = __next_ ## it, \ + __next_ ## it = (typeof(it))((vm_address_t)__base_ ## it + percpu_section_size())) + +/*! + * @macro percpu_foreach_secondary_base() + * + * @abstract + * Enumerates all Per-CPU variable bases, skipping the master slot. + * + * @param it the name of the iterator + */ +#define percpu_foreach_secondary_base(it) \ + for (vm_offset_t it = percpu_base.start, __end_ ## it = percpu_base.end; \ + it <= __end_ ## it; it += percpu_section_size()) + +/*! + * @macro percpu_foreach_secondary() + * + * @abstract + * Enumerates all Per-CPU variable instances, skipping the master slot. + * + * @param it the name of the iterator + * @param name the per-CPU variable name + */ +#define percpu_foreach_secondary(it, name) \ + for (__PERCPU_TYPE(name) it, \ + __base_ ## it = (typeof(it))percpu_base.start, \ + __end_ ## it = (typeof(it))percpu_base.end; \ + \ + (it = (typeof(it))(__PERCPU_ADDR(name) + (vm_address_t)__base_ ## it), \ + __base_ ## it <= __end_ ## it); \ + \ + __base_ ## it = (typeof(it))((vm_address_t)__base_ ## it + percpu_section_size())) + +#pragma mark - implementation details + +/* + * Below this point are implementation details that should not be used directly, + * except by the macros above, or architecture specific code. + */ + +#define __percpu __attribute__((section("__DATA, __percpu"))) +#define __PERCPU_NAME(name) percpu_slot_ ## name +#define __PERCPU_ADDR(name) ((vm_offset_t)&__PERCPU_NAME(name)) +#define __PERCPU_TYPE(name) typeof(&__PERCPU_NAME(name)) +#define __PERCPU_CAST(name, expr) ((__PERCPU_TYPE(name))(expr)) + +/* + * Note for implementors: + * + * A `base` represents a pointer in the percpu allocation offset by + * `percpu_section_start()` so that PERCPU_GET() is a single addition. + * + * percpu_base.end is inclusive, so that percpu_foreach() and + * percpu_foreach_base() can do a `<=` comparison. + * + * Because the first base is `0` (because the master CPU is using the static + * percpu section), it allows for the compiler to know that for the first + * iteration the comparison is always true. + */ +extern struct percpu_base { + vm_address_t start; + vm_address_t end; + vm_offset_t size; +} percpu_base; + +static __pure2 inline vm_offset_t +percpu_section_start(void) +{ + extern char __percpu_section_start[] __SECTION_START_SYM("__DATA", "__percpu"); + return (vm_offset_t)__percpu_section_start; +} + +static __pure2 inline vm_offset_t +percpu_section_end(void) +{ + extern char __percpu_section_end[] __SECTION_END_SYM("__DATA", "__percpu"); + return (vm_offset_t)__percpu_section_end; +} + +static __pure2 inline vm_size_t +percpu_section_size(void) +{ + return percpu_section_end() - percpu_section_start(); +} + +#pragma GCC visibility pop +#endif /* XNU_KERNEL_PRIVATE */ + +__END_DECLS + +#endif /* _KERN_PERCPU_H_ */ diff --git a/osfmk/kern/policy_internal.h b/osfmk/kern/policy_internal.h index 094113569..fc1b4b6f4 100644 --- a/osfmk/kern/policy_internal.h +++ b/osfmk/kern/policy_internal.h @@ -84,7 +84,7 @@ extern kern_return_t task_importance(task_t task, integer_t importance); #define TASK_POLICY_DARWIN_BG_IOPOL IMP_TASK_POLICY_DARWIN_BG_IOPOL /* task-only attributes */ -#define TASK_POLICY_TAL IMP_TASK_POLICY_TAL +/* unused was: IMP_TASK_POLICY_TAL */ #define TASK_POLICY_BOOST IMP_TASK_POLICY_BOOST #define TASK_POLICY_ROLE IMP_TASK_POLICY_ROLE /* unused 0x2B */ @@ -128,12 +128,12 @@ extern void proc_set_thread_policy_with_tid(task_t task, uint64_t tid, int categ extern boolean_t thread_has_qos_policy(thread_t thread); extern kern_return_t thread_remove_qos_policy(thread_t thread); -extern int proc_darwin_role_to_task_role(int darwin_role, int* task_role); -extern int proc_task_role_to_darwin_role(int task_role); +extern int proc_darwin_role_to_task_role(int darwin_role, task_role_t* task_role); +extern int proc_task_role_to_darwin_role(task_role_t task_role); /* Functions used by kern_exec.c */ extern void task_set_main_thread_qos(task_t task, thread_t main_thread); -extern void proc_set_task_spawnpolicy(task_t task, thread_t thread, int apptype, int qos_clamp, int role, +extern void proc_set_task_spawnpolicy(task_t task, thread_t thread, int apptype, int qos_clamp, task_role_t role, ipc_port_t * portwatch_ports, uint32_t portwatch_count); extern void proc_inherit_task_role(task_t new_task, task_t old_task); @@ -183,17 +183,17 @@ extern void thread_set_workq_pri(thread_t thread, thread_qos_t qos, integer_t pr extern uint8_t thread_workq_pri_for_qos(thread_qos_t qos) __pure2; extern thread_qos_t thread_workq_qos_for_pri(int priority); -extern int +extern thread_qos_t task_get_default_manager_qos(task_t task); extern void proc_thread_qos_deallocate(thread_t thread); extern int task_clear_cpuusage(task_t task, int cpumon_entitled); -#if CONFIG_EMBEDDED +#if CONFIG_TASKWATCH /* Taskwatch related external BSD interface */ extern int proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind); -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_TASKWATCH */ /* Importance inheritance functions not under IMPORTANCE_INHERITANCE */ extern void task_importance_mark_donor(task_t task, boolean_t donating); @@ -225,7 +225,7 @@ extern boolean_t proc_task_is_tal(task_t task); extern int proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep); -extern int proc_set_task_ruse_cpu(task_t task, uint32_t policy, uint8_t percentage, +extern int proc_set_task_ruse_cpu(task_t task, uint16_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline, int cpumon_entitled); extern int task_suspend_cpumon(task_t task); extern int task_resume_cpumon(task_t task); @@ -300,8 +300,8 @@ extern void thread_policy_update_locked(thread_t thread, task_pend_token_t pend_ extern void thread_policy_update_complete_unlocked(thread_t task, task_pend_token_t pend_token); typedef struct { - int qos_pri[THREAD_QOS_LAST]; - int qos_iotier[THREAD_QOS_LAST]; + int16_t qos_pri[THREAD_QOS_LAST]; + int16_t qos_iotier[THREAD_QOS_LAST]; uint32_t qos_through_qos[THREAD_QOS_LAST]; uint32_t qos_latency_qos[THREAD_QOS_LAST]; } qos_policy_params_t; @@ -328,14 +328,13 @@ extern void thread_policy_create(thread_t thread); extern boolean_t task_is_daemon(task_t task); extern boolean_t task_is_app(task_t task); -#if CONFIG_EMBEDDED +#if CONFIG_TASKWATCH /* Taskwatch related external interface */ extern void thead_remove_taskwatch(thread_t thread); extern void task_removewatchers(task_t task); -extern void task_watch_init(void); typedef struct task_watcher task_watch_t; -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_TASKWATCH */ #if IMPORTANCE_INHERITANCE extern boolean_t task_is_marked_importance_donor(task_t task); @@ -354,7 +353,7 @@ extern boolean_t task_is_marked_importance_denap_receiver(task_t task); extern void proc_init_cpumon_params(void); extern void thread_policy_init(void); -int task_compute_main_thread_qos(task_t task); +thread_qos_t task_compute_main_thread_qos(task_t task); /* thread policy internals */ extern void thread_policy_reset(thread_t thread); diff --git a/osfmk/kern/printf.c b/osfmk/kern/printf.c index 0feea0aeb..898086d8e 100644 --- a/osfmk/kern/printf.c +++ b/osfmk/kern/printf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -267,6 +267,10 @@ __doprnt( } numeric_type = INT; int nprinted = 0; + if (radix < 2 || radix > 36) { + radix = 10; + } + while ((c = *fmt) != '\0') { if (c != '%') { (*putc)(c, arg); @@ -443,7 +447,7 @@ __doprnt( } case 'c': - c = va_arg(argp, int); + c = (char)va_arg(argp, int); (*putc)(c, arg); nprinted++; break; @@ -503,6 +507,7 @@ __doprnt( case 'o': truncate = _doprnt_truncates; + OS_FALLTHROUGH; case 'O': base = 8; goto print_unsigned; @@ -532,13 +537,14 @@ __doprnt( } case 'd': + case 'i': truncate = _doprnt_truncates; base = 10; goto print_signed; case 'u': truncate = _doprnt_truncates; - /* FALLTHROUGH */ + OS_FALLTHROUGH; case 'U': base = 10; goto print_unsigned; @@ -548,7 +554,7 @@ __doprnt( if (sizeof(int) < sizeof(void *)) { long_long = 1; } - /* FALLTHROUGH */ + OS_FALLTHROUGH; case 'x': truncate = _doprnt_truncates; base = 16; @@ -561,14 +567,14 @@ __doprnt( case 'r': truncate = _doprnt_truncates; - /* FALLTHROUGH */ + OS_FALLTHROUGH; case 'R': base = radix; goto print_signed; case 'n': truncate = _doprnt_truncates; - /* FALLTHROUGH */ + OS_FALLTHROUGH; case 'N': base = radix; goto print_unsigned; @@ -740,7 +746,7 @@ dummy_putc(int ch, void *arg) * already panicing), so we'll just do nothing instead of crashing. */ if (real_putc) { - real_putc(ch); + real_putc((char)ch); } } @@ -770,32 +776,44 @@ _doprnt_log( boolean_t new_printf_cpu_number = FALSE; #endif /* MP_PRINTF */ -decl_simple_lock_data(, printf_lock); -decl_simple_lock_data(, bsd_log_spinlock); - -lck_grp_t oslog_stream_lock_grp; -decl_lck_spin_data(, oslog_stream_lock); -void oslog_lock_init(void); +SIMPLE_LOCK_DECLARE(bsd_log_spinlock, 0); -extern void bsd_log_init(void); -void bsd_log_lock(void); +bool bsd_log_lock(bool); +void bsd_log_lock_safe(void); void bsd_log_unlock(void); -void -printf_init(void) +/* + * Locks OS log lock and returns true if successful, false otherwise. Locking + * always succeeds in a safe context but may block. Locking in an unsafe context + * never blocks but fails if someone else is already holding the lock. + * + * A caller is responsible to decide whether the context is safe or not. + * + * As a rule of thumb following cases are *not* considered safe: + * - Interrupts are disabled + * - Pre-emption is disabled + * - When in a debugger + * - During a panic + */ +bool +bsd_log_lock(bool safe) { - /* - * Lock is only really needed after the first thread is created. - */ - simple_lock_init(&printf_lock, 0); - simple_lock_init(&bsd_log_spinlock, 0); - bsd_log_init(); + if (!safe) { + assert(!oslog_is_safe()); + return simple_lock_try(&bsd_log_spinlock, LCK_GRP_NULL); + } + simple_lock(&bsd_log_spinlock, LCK_GRP_NULL); + return true; } +/* + * Locks OS log lock assuming the context is safe. See bsd_log_lock() comment + * for details. + */ void -bsd_log_lock(void) +bsd_log_lock_safe(void) { - simple_lock(&bsd_log_spinlock, LCK_GRP_NULL); + (void) bsd_log_lock(true); } void @@ -804,13 +822,6 @@ bsd_log_unlock(void) simple_unlock(&bsd_log_spinlock); } -void -oslog_lock_init(void) -{ - lck_grp_init(&oslog_stream_lock_grp, "oslog stream", LCK_GRP_ATTR_NULL); - lck_spin_init(&oslog_stream_lock, &oslog_stream_lock_grp, LCK_ATTR_NULL); -} - /* derived from boot_gets */ void safe_gets( @@ -818,12 +829,12 @@ safe_gets( int maxlen) { char *lp; - int c; + char c; char *strmax = str + maxlen - 1; /* allow space for trailing 0 */ lp = str; for (;;) { - c = cngetc(); + c = (char)cngetc(); switch (c) { case '\n': case '\r': @@ -983,7 +994,7 @@ paniclog_append_noflush(const char *fmt, ...) va_list listp; va_start(listp, fmt); - _doprnt_log(fmt, &listp, consdebug_putc, 16); + _doprnt_log(fmt, &listp, consdebug_putc_unbuffered, 16); va_end(listp); return 0; @@ -998,7 +1009,7 @@ kdb_printf(const char *fmt, ...) _doprnt_log(fmt, &listp, consdebug_putc, 16); va_end(listp); -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) paniclog_flush(); #endif @@ -1014,7 +1025,7 @@ kdb_log(const char *fmt, ...) _doprnt(fmt, &listp, consdebug_log, 16); va_end(listp); -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) paniclog_flush(); #endif @@ -1030,15 +1041,14 @@ kdb_printf_unbuffered(const char *fmt, ...) _doprnt(fmt, &listp, consdebug_putc_unbuffered, 16); va_end(listp); -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) paniclog_flush(); #endif return 0; } -#if !CONFIG_EMBEDDED - +#if CONFIG_VSPRINTF static void copybyte(int c, void *arg) { @@ -1049,7 +1059,7 @@ copybyte(int c, void *arg) * the inside pointer. */ char** p = arg; /* cast outside pointer */ - **p = c; /* store character */ + **p = (char)c; /* store character */ (*p)++; /* increment inside pointer */ } @@ -1070,4 +1080,4 @@ sprintf(char *buf, const char *fmt, ...) *copybyte_str = '\0'; return (int)strlen(buf); } -#endif /* !CONFIG_EMBEDDED */ +#endif /* CONFIG_VSPRINTF */ diff --git a/osfmk/kern/priority.c b/osfmk/kern/priority.c index 5ac1ce756..abdfa868e 100644 --- a/osfmk/kern/priority.c +++ b/osfmk/kern/priority.c @@ -110,7 +110,7 @@ thread_quantum_expire( KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_START, 0, 0, 0, 0, 0); - SCHED_STATS_QUANTUM_TIMER_EXPIRATION(processor); + SCHED_STATS_INC(quantum_timer_expirations); /* * We bill CPU time to both the individual thread and its task. @@ -131,12 +131,12 @@ thread_quantum_expire( (thread->quantum_remaining - thread->t_deduct_bank_ledger_time)); } thread->t_deduct_bank_ledger_time = 0; - ctime = mach_absolute_time(); #ifdef CONFIG_MACH_APPROXIMATE_TIME commpage_update_mach_approximate_time(ctime); #endif + sched_update_pset_avg_execution_time(processor->processor_set, thread->quantum_remaining, ctime, thread->th_sched_bucket); #if MONOTONIC mt_sched_update(thread); @@ -201,8 +201,8 @@ thread_quantum_expire( * during privilege transitions, synthesize an event now. */ if (!thread->precise_user_kernel_time) { - timer_update(PROCESSOR_DATA(processor, current_state), ctime); - timer_update(PROCESSOR_DATA(processor, thread_timer), ctime); + timer_update(processor->current_state, ctime); + timer_update(processor->thread_timer, ctime); timer_update(&thread->runnable_timer, ctime); } @@ -233,8 +233,7 @@ thread_quantum_expire( ast_propagate(thread); thread_unlock(thread); - - timer_call_quantum_timer_enter(&processor->quantum_timer, thread, + running_timer_enter(processor, RUNNING_TIMER_QUANTUM, thread, processor->quantum_end, ctime); /* Tell platform layer that we are still running this thread */ @@ -279,11 +278,11 @@ sched_set_thread_base_priority(thread_t thread, int priority) } int old_base_pri = thread->base_pri; - thread->req_base_pri = priority; + thread->req_base_pri = (int16_t)priority; if (thread->sched_flags & TH_SFLAG_BASE_PRI_FROZEN) { priority = MAX(priority, old_base_pri); } - thread->base_pri = priority; + thread->base_pri = (int16_t)priority; if ((thread->state & TH_RUN) == TH_RUN) { assert(thread->last_made_runnable_time != THREAD_NOT_RUNNABLE); @@ -335,11 +334,11 @@ sched_set_kernel_thread_priority(thread_t thread, int new_priority) if (new_priority > thread->max_priority) { new_priority = thread->max_priority; } -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) if (new_priority < MAXPRI_THROTTLE) { new_priority = MAXPRI_THROTTLE; } -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ thread->importance = new_priority - thread->task_priority; @@ -370,10 +369,10 @@ thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options) uint32_t sched_flags = thread->sched_flags; sched_mode_t sched_mode = thread->sched_mode; - int priority = thread->base_pri; + int16_t priority = thread->base_pri; if (sched_mode == TH_MODE_TIMESHARE) { - priority = SCHED(compute_timeshare_priority)(thread); + priority = (int16_t)SCHED(compute_timeshare_priority)(thread); } if (sched_flags & TH_SFLAG_DEPRESS) { @@ -428,6 +427,9 @@ sched_default_quantum_expire(thread_t thread __unused) */ } +int smt_timeshare_enabled = 1; +int smt_sched_bonus_16ths = 8; + #if defined(CONFIG_SCHED_TIMESHARE_CORE) /* @@ -458,7 +460,11 @@ lightweight_update_priority(thread_t thread) * resources. */ if (thread->pri_shift < INT8_MAX) { - thread->sched_usage += delta; + if (thread_no_smt(thread) && smt_timeshare_enabled) { + thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4)); + } else { + thread->sched_usage += delta; + } } thread->cpu_delta += delta; @@ -531,19 +537,25 @@ const struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { extern int sched_pri_decay_band_limit; -/* Only use the decay floor logic on embedded non-clutch schedulers */ -#if CONFIG_EMBEDDED && !CONFIG_SCHED_CLUTCH +/* Only use the decay floor logic on non-macOS and non-clutch schedulers */ +#if !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH int sched_compute_timeshare_priority(thread_t thread) { - int decay_amount = (thread->sched_usage >> thread->pri_shift); + int decay_amount; int decay_limit = sched_pri_decay_band_limit; if (thread->base_pri > BASEPRI_FOREGROUND) { decay_limit += (thread->base_pri - BASEPRI_FOREGROUND); } + if (thread->pri_shift == INT8_MAX) { + decay_amount = 0; + } else { + decay_amount = (thread->sched_usage >> thread->pri_shift); + } + if (decay_amount > decay_limit) { decay_amount = decay_limit; } @@ -564,13 +576,17 @@ sched_compute_timeshare_priority(thread_t thread) return priority; } -#else /* CONFIG_EMBEDDED && !CONFIG_SCHED_CLUTCH */ +#else /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */ int sched_compute_timeshare_priority(thread_t thread) { /* start with base priority */ - int priority = thread->base_pri - (thread->sched_usage >> thread->pri_shift); + int priority = thread->base_pri; + + if (thread->pri_shift != INT8_MAX) { + priority -= (thread->sched_usage >> thread->pri_shift); + } if (priority < MINPRI_USER) { priority = MINPRI_USER; @@ -581,7 +597,7 @@ sched_compute_timeshare_priority(thread_t thread) return priority; } -#endif /* CONFIG_EMBEDDED && !CONFIG_SCHED_CLUTCH */ +#endif /* !defined(XNU_TARGET_OS_OSX) && !CONFIG_SCHED_CLUTCH */ /* * can_update_priority @@ -635,7 +651,11 @@ update_priority( * determine if the system was in a contended state. */ if (thread->pri_shift < INT8_MAX) { - thread->sched_usage += delta; + if (thread_no_smt(thread) && smt_timeshare_enabled) { + thread->sched_usage += (delta + ((delta * smt_sched_bonus_16ths) >> 4)); + } else { + thread->sched_usage += delta; + } } thread->cpu_usage += delta + thread->cpu_delta; @@ -729,6 +749,26 @@ sched_decr_bucket(sched_bucket_t bucket) os_atomic_dec(&sched_run_buckets[bucket], relaxed); } +static void +sched_add_bucket(sched_bucket_t bucket, uint8_t run_weight) +{ + assert(bucket >= TH_BUCKET_FIXPRI && + bucket <= TH_BUCKET_SHARE_BG); + + os_atomic_add(&sched_run_buckets[bucket], run_weight, relaxed); +} + +static void +sched_sub_bucket(sched_bucket_t bucket, uint8_t run_weight) +{ + assert(bucket >= TH_BUCKET_FIXPRI && + bucket <= TH_BUCKET_SHARE_BG); + + assert(os_atomic_load(&sched_run_buckets[bucket], relaxed) > 0); + + os_atomic_sub(&sched_run_buckets[bucket], run_weight, relaxed); +} + uint32_t sched_run_incr(thread_t thread) { @@ -753,6 +793,35 @@ sched_run_decr(thread_t thread) return new_count; } +uint32_t +sched_smt_run_incr(thread_t thread) +{ + assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN); + + uint8_t run_weight = (thread_no_smt(thread) && smt_timeshare_enabled) ? 2 : 1; + thread->sched_saved_run_weight = run_weight; + + uint32_t new_count = os_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed); + + sched_add_bucket(thread->th_sched_bucket, run_weight); + + return new_count; +} + +uint32_t +sched_smt_run_decr(thread_t thread) +{ + assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN); + + uint8_t run_weight = thread->sched_saved_run_weight; + + sched_sub_bucket(thread->th_sched_bucket, run_weight); + + uint32_t new_count = os_atomic_sub(&sched_run_buckets[TH_BUCKET_RUN], run_weight, relaxed); + + return new_count; +} + void sched_update_thread_bucket(thread_t thread) { @@ -793,6 +862,46 @@ sched_update_thread_bucket(thread_t thread) } } +void +sched_smt_update_thread_bucket(thread_t thread) +{ + sched_bucket_t old_bucket = thread->th_sched_bucket; + sched_bucket_t new_bucket = TH_BUCKET_RUN; + + switch (thread->sched_mode) { + case TH_MODE_FIXED: + case TH_MODE_REALTIME: + new_bucket = TH_BUCKET_FIXPRI; + break; + + case TH_MODE_TIMESHARE: + if (thread->base_pri > BASEPRI_DEFAULT) { + new_bucket = TH_BUCKET_SHARE_FG; + } else if (thread->base_pri > BASEPRI_UTILITY) { + new_bucket = TH_BUCKET_SHARE_DF; + } else if (thread->base_pri > MAXPRI_THROTTLE) { + new_bucket = TH_BUCKET_SHARE_UT; + } else { + new_bucket = TH_BUCKET_SHARE_BG; + } + break; + + default: + panic("unexpected mode: %d", thread->sched_mode); + break; + } + + if (old_bucket != new_bucket) { + thread->th_sched_bucket = new_bucket; + thread->pri_shift = sched_pri_shifts[new_bucket]; + + if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) { + sched_sub_bucket(old_bucket, thread->sched_saved_run_weight); + sched_add_bucket(new_bucket, thread->sched_saved_run_weight); + } + } +} + /* * Set the thread's true scheduling mode * Called with thread mutex and thread locked @@ -816,6 +925,18 @@ sched_set_thread_mode(thread_t thread, sched_mode_t new_mode) break; } +#if CONFIG_SCHED_AUTO_JOIN + /* + * Realtime threads might have auto-joined a work interval based on + * make runnable relationships. If such an RT thread is now being demoted + * to non-RT, unjoin the thread from the work interval. + */ + if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) && (new_mode != TH_MODE_REALTIME)) { + assert((thread->sched_mode == TH_MODE_REALTIME) || (thread->th_work_interval_flags & TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK)); + work_interval_auto_join_demote(thread); + } +#endif /* CONFIG_SCHED_AUTO_JOIN */ + thread->sched_mode = new_mode; SCHED(update_thread_bucket)(thread); diff --git a/osfmk/kern/priority_queue.c b/osfmk/kern/priority_queue.c deleted file mode 100644 index 85ee093ae..000000000 --- a/osfmk/kern/priority_queue.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -#include -#include - -#ifdef __LP64__ -static_assert(PRIORITY_QUEUE_ENTRY_CHILD_BITS >= VM_KERNEL_POINTER_SIGNIFICANT_BITS, - "Priority Queue child pointer packing failed"); -#endif - -priority_queue_entry_t -pqueue_pair_meld(priority_queue_entry_t elt, priority_queue_compare_fn_t cmp_fn) -{ - priority_queue_entry_t pq_meld_result = NULL; - priority_queue_entry_t pair_list = NULL; - - assert(elt); // caller needs to check this. - - /* Phase 1: */ - /* Split the list into a set of pairs going front to back. */ - /* Hook these pairs onto an intermediary list in reverse order of traversal.*/ - - do { - /* Consider two elements at a time for pairing */ - priority_queue_entry_t pair_item_a = elt; - priority_queue_entry_t pair_item_b = elt->next; - if (pair_item_b == NULL) { - /* Odd number of elements in the list; link the odd element */ - /* as it is on the intermediate list. */ - pair_item_a->prev = pair_list; - pair_list = pair_item_a; - break; - } - /* Found two elements to pair up */ - elt = pair_item_b->next; - priority_queue_entry_t pair = pqueue_merge(pair_item_a, pair_item_b, cmp_fn); - /* Link the pair onto the intermediary list */ - pair->prev = pair_list; - pair_list = pair; - } while (elt != NULL); - - /* Phase 2: Merge all the pairs in the pair_list */ - do { - elt = pair_list->prev; - pq_meld_result = pqueue_merge(pq_meld_result, pair_list, cmp_fn); - pair_list = elt; - } while (pair_list != NULL); - - return pq_meld_result; -} - -void -pqueue_destroy(struct priority_queue *q, size_t offset, - void (^callback)(void *e)) -{ - assert(callback != NULL); - priority_queue_entry_t head = pqueue_unpack_root(q); - priority_queue_entry_t tail = head; - - while (head != NULL) { - priority_queue_entry_t child_list = pqueue_entry_unpack_child(head); - if (child_list) { - tail->next = child_list; - while (tail->next) { - tail = tail->next; - } - } - - priority_queue_entry_t elt = head; - head = head->next; - callback((void *)elt - offset); - } - - /* poison the queue now that it's destroyed */ - q->pq_root_packed = ~0UL; -} diff --git a/osfmk/kern/priority_queue.h b/osfmk/kern/priority_queue.h index fc35f70a3..7b7aea50d 100644 --- a/osfmk/kern/priority_queue.h +++ b/osfmk/kern/priority_queue.h @@ -29,12 +29,17 @@ #ifndef _KERN_PRIORITY_QUEUE_H_ #define _KERN_PRIORITY_QUEUE_H_ -#include +#if KERNEL +#include #include #include +#endif +#include #include +#pragma GCC visibility push(hidden) + __BEGIN_DECLS /* @@ -42,60 +47,49 @@ __BEGIN_DECLS * * Reference Papers: * - A Back-to-Basics Empirical Study of Priority Queues (https://arxiv.org/abs/1403.0252) - * - The Pairing Heap: A New Form of Self-Adjusting Heap (https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf) - * - * The XNU implementation is a basic version of the pairing heap. It allows for O(1) insertion and amortized O(log n) - * deletion. It is not a stable data structure since adding stability would need more pointers and hence more memory. - * - * The implementation supports two types of key storage: - * - * Type 1: PRIORITY_QUEUE_GENERIC_KEY + * - The Pairing Heap: A New Form of Self-Adjusting Heap + * (https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf) * - * This flag is useful when the priorities are either larger than 8-bits or the node comparision needs - * extra information other than the priority. The nodes do not store the priorities themselves and on - * comparision, callout to the comparator (of type priority_queue_compare_fn_t) provided as part of - * initialization. + * The XNU implementation is a basic version of the pairing heap. + * It allows for O(1) insertion and amortized O(log n) deletion. * - * Sample Initialization: + * It is not a stable data structure by default since adding stability would + * need more pointers and hence more memory. * - * { - * static struct priority_queue pq; - * priority_queue_init(pq, PRIORITY_QUEUE_MAX_HEAP | PRIORITY_QUEUE_GENERIC_KEY); - * } + * Type of queues * - * For this type, all insertions, priority_increase, priority_decrease must pass PRIORITY_QUEUE_KEY_NONE - * as the priority key field. + * There are several types of priority queues, with types named: * - * Type 2: PRIORITY_QUEUE_BUILTIN_KEY + * struct priority_queue__ * - * This type is useful when the priorities need to be stored within the data structure itself. - * Each node in the priority queue maintains a 8-bit priority key. + * In the rest of this header, `struct priority_queue` is used as + * a generic type to mean any priority_queue type. * - * Sample Initialization: - * { - * static struct priority_queue pq; - * priority_queue_init(pq, PRIORITY_QUEUE_MAX_HEAP | PRIORITY_QUEUE_BUILTIN_KEY); - * } + * min/max refers to whether the priority queue is a min or a max heap. * + * the subtype can be: * - * Min / Max Heap: + * - sched, in which case the key is built in the linkage and assumed to + * be a scheduler priority. * - * The semantics of Min/Max heap are not used by the implementation, it assumes that the comparison block - * that is passed to the insertion / removal / ... macros provides the right ordering. - * - * However for human readability purposes, whether this heap is a MIN or MAX heap is passed - * at initialization time, and will influence whether accessors like priority_queue_min - * or priority_queue_max can be used. + * - sched_stable, in which case the key is a combination of: + * * a scheduler priority + * * whether the entry was preempted or not + * * a timestamp. * + * - generic, in which case a comparison function must be passed to + * the priority_queue_init. * * Element Linkage: * * Both types use a common queue head and linkage pattern. * The head of a priority queue is declared as: * - * struct priority_queue pq_head; + * struct priority_queue__ pq_head; + * + * Elements in this queue are linked together using one of the struct + * priority_queue_entry_ objects embedded within a structure: * - * Elements in this queue are linked together using struct priority_queue_entry objects embedded within a structure: * struct some_data { * int field1; * int field2; @@ -106,34 +100,24 @@ __BEGIN_DECLS * }; * struct some_data is referred to as the queue "element" * - * This method uses the next, prev and child pointers of the struct priority_queue_entry linkage object embedded in a - * queue element to point to other elements in the queue. The head of the priority queue (the priority_queue - * object) will point to the root of the pairing heap (NULL if heap is empty). This method allows multiple chains - * through a given object, by embedding multiple priority_queue_entry objects in the structure, while simultaneously - * providing fast removal and insertion into the heap using only priority_queue_entry object pointers. + * This method uses the next, prev and child pointers of the struct + * priority_queue_entry linkage object embedded in a queue element to + * point to other elements in the queue. The head of the priority queue + * (the priority_queue object) will point to the root of the pairing + * heap (NULL if heap is empty). This method allows multiple chains + * through a given object, by embedding multiple priority_queue_entry + * objects in the structure, while simultaneously providing fast removal + * and insertion into the heap using only priority_queue_entry object + * pointers. */ /* * Priority keys maintained by the data structure. - * Since the priority is packed in the node itself, it restricts keys to be 8-bits only. + * Since the priority is packed in the node itself, it restricts keys to be 16-bits only. */ #define PRIORITY_QUEUE_KEY_NONE 0 -typedef uint8_t priority_queue_key_t; - -/* - * Flags passed to priority_queue_init() - * - * One key type must be picked (default is BUILTIN_KEY) - * Min or Max heap must be picked (default is MAX_HEAP) - */ -typedef enum priority_queue_flags { - PRIORITY_QUEUE_BUILTIN_KEY = 0x0, - PRIORITY_QUEUE_GENERIC_KEY = 0x1, - PRIORITY_QUEUE_MAX_HEAP = 0x0, - PRIORITY_QUEUE_MIN_HEAP = 0x2, -#define PRIORITY_QUEUE_BUILTIN_MAX_HEAP (PRIORITY_QUEUE_MAX_HEAP | PRIORITY_QUEUE_BUILTIN_KEY) -} priority_queue_flags_t; +typedef uint16_t priority_queue_key_t; #ifdef __LP64__ @@ -144,29 +128,73 @@ typedef enum priority_queue_flags { * implementation. The idea is to define the packed location as a long and * for unpacking simply cast it to a full pointer which sign extends it. */ -#define PRIORITY_QUEUE_ENTRY_CHILD_BITS 56 -#define PRIORITY_QUEUE_ENTRY_KEY_BITS 8 +#define PRIORITY_QUEUE_ENTRY_CHILD_BITS 48 +#define PRIORITY_QUEUE_ENTRY_KEY_BITS 16 typedef struct priority_queue_entry { - struct priority_queue_entry *next; - struct priority_queue_entry *prev; - long key: PRIORITY_QUEUE_ENTRY_KEY_BITS; + struct priority_queue_entry *next; + struct priority_queue_entry *prev; + long __key: PRIORITY_QUEUE_ENTRY_KEY_BITS; long child: PRIORITY_QUEUE_ENTRY_CHILD_BITS; } *priority_queue_entry_t; +typedef struct priority_queue_entry_deadline { + struct priority_queue_entry_deadline *next; + struct priority_queue_entry_deadline *prev; + long __key: PRIORITY_QUEUE_ENTRY_KEY_BITS; + long child: PRIORITY_QUEUE_ENTRY_CHILD_BITS; + uint64_t deadline; +} *priority_queue_entry_deadline_t; + +typedef struct priority_queue_entry_sched { + struct priority_queue_entry_sched *next; + struct priority_queue_entry_sched *prev; + long key: PRIORITY_QUEUE_ENTRY_KEY_BITS; + long child: PRIORITY_QUEUE_ENTRY_CHILD_BITS; +} *priority_queue_entry_sched_t; + +typedef struct priority_queue_entry_stable { + struct priority_queue_entry_stable *next; + struct priority_queue_entry_stable *prev; + long key: PRIORITY_QUEUE_ENTRY_KEY_BITS; + long child: PRIORITY_QUEUE_ENTRY_CHILD_BITS; + uint64_t stamp; +} *priority_queue_entry_stable_t; + #else /* __LP64__ */ +typedef struct priority_queue_entry { + struct priority_queue_entry *next; + struct priority_queue_entry *prev; + long child; +} *priority_queue_entry_t; + +typedef struct priority_queue_entry_deadline { + struct priority_queue_entry_deadline *next; + struct priority_queue_entry_deadline *prev; + long child; + uint64_t deadline; +} *priority_queue_entry_deadline_t; + /* * For 32-bit platforms, use an extra field to store the key since child pointer packing * is not an option. The child is maintained as a long to use the same packing/unpacking * routines that work for 64-bit platforms. */ -typedef struct priority_queue_entry { - struct priority_queue_entry *next; - struct priority_queue_entry *prev; +typedef struct priority_queue_entry_sched { + struct priority_queue_entry_sched *next; + struct priority_queue_entry_sched *prev; long child; priority_queue_key_t key; -} *priority_queue_entry_t; +} *priority_queue_entry_sched_t; + +typedef struct priority_queue_entry_stable { + struct priority_queue_entry_stable *next; + struct priority_queue_entry_stable *prev; + long child; + priority_queue_key_t key; + uint64_t stamp; +} *priority_queue_entry_stable_t; #endif /* __LP64__ */ @@ -180,57 +208,81 @@ typedef struct priority_queue_entry { typedef int (^priority_queue_compare_fn_t)(struct priority_queue_entry *e1, struct priority_queue_entry *e2); -/* - * Standard comparision routines for max and min heap. - * Must be used with PRIORITY_QUEUE_BUILTIN_KEY only. - */ -static inline int -priority_queue_element_builtin_key_compare(priority_queue_entry_t e1, priority_queue_entry_t e2) -{ - return (int)e2->key - (int)e1->key; -} +#define priority_heap_compare_ints(a, b) ((a) < (b) ? 1 : -1) #define priority_heap_make_comparator(name1, name2, type, field, ...) \ - (^int(priority_queue_entry_t __e1, priority_queue_entry_t __e2){ \ - type *name1 = pqe_element_fast(__e1, type, field); \ - type *name2 = pqe_element_fast(__e2, type, field); \ - __VA_ARGS__; \ + (^int(priority_queue_entry_t __e1, priority_queue_entry_t __e2){ \ + type *name1 = pqe_element_fast(__e1, type, field); \ + type *name2 = pqe_element_fast(__e2, type, field); \ + __VA_ARGS__; \ }) -#define PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE \ - (^int(priority_queue_entry_t e1, priority_queue_entry_t e2){ \ - return -priority_queue_element_builtin_key_compare(e1, e2); \ - }) +/* + * Type for any priority queue, only used for documentation purposes. + */ +struct priority_queue; -#define PRIORITY_QUEUE_SCHED_PRI_MIN_HEAP_COMPARE \ - (^int(priority_queue_entry_t e1, priority_queue_entry_t e2){ \ - return priority_queue_element_builtin_key_compare(e1, e2); \ - }) +/* + * Type of generic heaps + */ +struct priority_queue_min { + struct priority_queue_entry *pq_root; + priority_queue_compare_fn_t pq_cmp_fn; +}; +struct priority_queue_max { + struct priority_queue_entry *pq_root; + priority_queue_compare_fn_t pq_cmp_fn; +}; /* - * Helper routines for packing/unpacking the child pointer in heap nodes. - * On 64-bit platforms, these routines rely on the fact that the sign extension - * for the lower 56-bits of a kernel pointer results in the real pointer. The trick - * works for NULL pointers as well. - * */ -#define pqueue_entry_pack_child(qe, child_ptr) ((qe)->child = (long)(child_ptr)) -#define pqueue_entry_unpack_child(qe) ((struct priority_queue_entry *)((qe)->child)) + * Type of deadline heaps + */ +struct priority_queue_deadline_min { + struct priority_queue_entry_deadline *pq_root; +}; +struct priority_queue_deadline_max { + struct priority_queue_entry_deadline *pq_root; +}; /* - * Priority queue head structure. - * Stores the comparision function using pointer packing. The remaining bit is used - * for type of the queue. + * Type of scheduler priority based heaps */ -struct priority_queue { +struct priority_queue_sched_min { + struct priority_queue_entry_sched *pq_root; +}; +struct priority_queue_sched_max { + struct priority_queue_entry_sched *pq_root; +}; + /* - * we pack priority_queue_flags_t in the least significant two bits - * of the root pointer. + * Type of scheduler priority based stable heaps */ -#define PRIORITY_QUEUE_ROOT_FLAGS_MASK (3ul) -#define PRIORITY_QUEUE_ROOT_POINTER_MASK (~PRIORITY_QUEUE_ROOT_FLAGS_MASK) - unsigned long pq_root_packed; +struct priority_queue_sched_stable_min { + struct priority_queue_entry_stable *pq_root; +}; +struct priority_queue_sched_stable_max { + struct priority_queue_entry_stable *pq_root; }; +#pragma mark generic interface + +#define PRIORITY_QUEUE_INITIALIZER { .pq_root = NULL } + +#define __pqueue_overloadable __attribute__((overloadable)) + +#define priority_queue_is_min_heap(pq) _Generic(pq, \ + struct priority_queue_min *: true, \ + struct priority_queue_max *: false, \ + struct priority_queue_deadline_min *: true, \ + struct priority_queue_deadline_max *: false, \ + struct priority_queue_sched_min *: true, \ + struct priority_queue_sched_max *: false, \ + struct priority_queue_sched_stable_min *: true, \ + struct priority_queue_sched_stable_max *: false) + +#define priority_queue_is_max_heap(pq) \ + (!priority_queue_is_min_heap(pq)) + /* * Macro: pqe_element_fast * Function: @@ -265,572 +317,392 @@ struct priority_queue { * Returns: * containing qe */ -#define pqe_element(qe, type, field) ({ \ - priority_queue_entry_t _tmp_entry = (qe); \ - _tmp_entry ? pqe_element_fast(_tmp_entry, type, field) : ((type *)NULL); \ +#define pqe_element(qe, type, field) ({ \ + __auto_type _tmp_entry = (qe); \ + _tmp_entry ? pqe_element_fast(_tmp_entry, type, field) : ((type *)NULL);\ }) -#define pqueue_has_generic_keys(p) \ - (((p)->pq_root_packed & PRIORITY_QUEUE_GENERIC_KEY) != 0) - -#define pqueue_has_builtin_keys(p) \ - (((p)->pq_root_packed & PRIORITY_QUEUE_GENERIC_KEY) == 0) - -#define pqueue_is_min_heap(p) \ - (((p)->pq_root_packed & PRIORITY_QUEUE_MIN_HEAP) != 0) - -#define pqueue_is_max_heap(p) \ - (((p)->pq_root_packed & PRIORITY_QUEUE_MIN_HEAP) == 0) - /* - * Macro: pqueue_pack_root - * Function: - * Pack the root pointer of the head. - * Header: - * pqueue_pack_root(q, root_ptr) - * q - * root_ptr + * Priority Queue functionality routines */ -#define pqueue_pack_root(q, root_ptr) \ -MACRO_BEGIN \ - uintptr_t __flags = (q)->pq_root_packed & PRIORITY_QUEUE_ROOT_FLAGS_MASK; \ - (q)->pq_root_packed = (uintptr_t)(root_ptr) | __flags; \ -MACRO_END /* - * Macro: pqueue_unpack_root + * Macro: priority_queue_empty * Function: - * Unpack the root pointer from the head of the priority queue. + * Tests whether a priority queue is empty. * Header: - * pqueue_unpack_root(q) - * q - * Returns: - * + * boolean_t priority_queue_empty(pq) + * pq */ -#define pqueue_unpack_root(q) \ - ((priority_queue_entry_t)((q)->pq_root_packed & PRIORITY_QUEUE_ROOT_POINTER_MASK)) +#define priority_queue_empty(pq) ((pq)->pq_root == NULL) /* - * Macro: pqueue_list_remove + * Macro: priority_queue_init * Function: - * Helper routine to remove an element from the list at its level + * Initialize a . * Header: - * pqueue_list_remove(elt) - * elt + * priority_queue_init(pq) + * pq + * (optional) comparator function * Returns: * None */ -static inline void -pqueue_list_remove(priority_queue_entry_t elt) -{ - assert(elt->prev != NULL); - /* Check if elt is head of list at its level; */ - /* If yes, make the next node the head at that level */ - /* Else, remove elt from the list at that level */ - if (pqueue_entry_unpack_child(elt->prev) == elt) { - pqueue_entry_pack_child(elt->prev, elt->next); - } else { - elt->prev->next = elt->next; - } - /* Update prev for next element in list */ - if (elt->next != NULL) { - elt->next->prev = elt->prev; - } -} - -/* - * Macro: pqueue_merge - * Function: - * Helper routine to merge two subtrees of the heap to form a single tree and - * maintain the parent > child invariant. If the two keys are equal, the current - * implementation makes the first subtree the parent and the second one the child. - * Header: - * pqueue_merge(subtree_a, subtree_b, cmp_fn) - * subtree_a - * subtree_b - * comparator function - * Returns: - * pointing to root of the merged tree - */ -static inline priority_queue_entry_t -pqueue_merge(priority_queue_entry_t subtree_a, priority_queue_entry_t subtree_b, - priority_queue_compare_fn_t cmp_fn) -{ - priority_queue_entry_t merge_result = NULL; - if (subtree_a == NULL) { - merge_result = subtree_b; - } else if (subtree_b == NULL || (subtree_a == subtree_b)) { - merge_result = subtree_a; - } else { - priority_queue_entry_t parent = subtree_a; - priority_queue_entry_t child = subtree_b; - if (cmp_fn(subtree_a, subtree_b) < 0) { - parent = subtree_b; - child = subtree_a; - } - /* Insert the child as the first element in the parent's child list */ - child->next = pqueue_entry_unpack_child(parent); - child->prev = parent; - if (pqueue_entry_unpack_child(parent) != NULL) { - pqueue_entry_unpack_child(parent)->prev = child; - } - /* Create the parent child relationship */ - pqueue_entry_pack_child(parent, child); - parent->next = NULL; - parent->prev = NULL; - merge_result = parent; - } - return merge_result; -} +__pqueue_overloadable +extern void +priority_queue_init(struct priority_queue *pq, ...); /* - * Macro: pqueue_pair_meld - * Function: - * Helper routine to splitwise pair a set of subtrees on a list at a given level and then - * meld them together to form a new tree while maintaining the invariant parent > child. - * - * The caller must check the element is non NULL. - * - * Header: - * pqueue_pair_meld(elt, cmp_fn) - * elt - * comparator function - * Returns: - * pointing to root of the melded tree - */ -priority_queue_entry_t -pqueue_pair_meld(priority_queue_entry_t e, priority_queue_compare_fn_t cmp_fn); - -/* - * Macro: pqueue_update_key + * Macro: priority_queue_entry_init * Function: - * Helper routine to update the key for a node in the heap. Note that the priority keys are only - * maintained for the PRIORITY_QUEUE_BUILTIN_KEY type of priority queue. For PRIORITY_QUEUE_GENERIC_KEY, - * this routine does nothing. + * Initialize a priority_queue_entry_t * Header: - * pqueue_update_key(que, elt, new_key) - * que - * elt - * new_key + * priority_queue_entry_init(qe) + * qe * Returns: * None */ -static inline void -pqueue_update_key(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_key_t new_key) -{ - if (pqueue_has_builtin_keys(que)) { - assert(new_key <= UINT8_MAX); - elt->key = new_key; - } else { - assert(new_key == PRIORITY_QUEUE_KEY_NONE); - } -} - -/* - * Macro: pqueue_remove_root - * Function: - * Helper routine to remove the root element in a priority queue. - * Header: - * pqueue_remove_root(que, cmp_fn) - * que - * old_root - * comparator function - * Returns: - * old_root - */ -static inline priority_queue_entry_t -pqueue_remove_root(struct priority_queue *que, priority_queue_entry_t old_root, - priority_queue_compare_fn_t cmp_fn) -{ - priority_queue_entry_t new_root = pqueue_entry_unpack_child(old_root); - if (new_root) { - new_root = pqueue_pair_meld(new_root, cmp_fn); - } - pqueue_pack_root(que, new_root); - return old_root; -} - -/* - * Macro: pqueue_remove_non_root - * Function: - * Helper routine to remove a non root element in a priority queue. - * Header: - * pqueue_remove_non_root(que, cmp_fn) - * que - * elt - * comparator function - * Returns: - * elt - */ -static inline priority_queue_entry_t -pqueue_remove_non_root(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_compare_fn_t cmp_fn) -{ - priority_queue_entry_t child, new_root; - - /* To remove a non-root element with children levels, */ - /* - Remove element from its current level iist */ - /* - Pairwise split all the elements in the child level list */ - /* - Meld all these splits (right-to-left) to form new subtree */ - /* - Merge the root subtree with the newly formed subtree */ - pqueue_list_remove(elt); - - child = pqueue_entry_unpack_child(elt); - if (child) { - child = pqueue_pair_meld(child, cmp_fn); - new_root = pqueue_merge(pqueue_unpack_root(que), child, cmp_fn); - pqueue_pack_root(que, new_root); - } - - return elt; -} +#define priority_queue_entry_init(qe) \ + __builtin_bzero(qe, sizeof(*(qe))) /* - * Macro: pqueue_destroy + * Macro: priority_queue_destroy * Function: * Destroy a priority queue safely. This routine accepts a callback * to handle any cleanup for elements in the priority queue. The queue does * not maintain its invariants while getting destroyed. The priority queue and * the linkage nodes need to be re-initialized before re-using them. - * - * Note: the offset is the offset to the linkage inside the elements - * That are linked inside the priority heap, because pqueue_destroy - * can't use pqe_element. * Header: - * pqueue_destroy(q, offset, callback) - * q - * offset + * priority_queue_destroy(pq, type, field, callback) + * pq * callback for each element * * Returns: * None */ -void - pqueue_destroy(struct priority_queue *q, size_t offset, - void (^callback)(void *e)); - -/* - * Priority Queue functionality routines - */ +#define priority_queue_destroy(pq, type, field, callback) \ +MACRO_BEGIN \ + void (^__callback)(type *) = (callback); /* type check */ \ + _priority_queue_destroy(pq, offsetof(type, field), \ + (void (^)(void *))(__callback)); \ +MACRO_END /* - * Macro: priority_queue_empty + * Macro: priority_queue_min * Function: - * Tests whether a priority queue is empty. + * Lookup the minimum in a min-priority queue. + * * Header: - * boolean_t priority_queue_empty(q) - * q + * priority_queue_min(pq, type, field) + * pq + * type of element in priority queue + * chain field in (*) + * Returns: + * root element */ -#define priority_queue_empty(q) (pqueue_unpack_root((q)) == NULL) +#define priority_queue_min(pq, type, field) ({ \ + static_assert(priority_queue_is_min_heap(pq), "queue is min heap"); \ + pqe_element((pq)->pq_root, type, field); \ +}) /* - * Macro: priority_queue_entry_key + * Macro: priority_queue_max * Function: - * Returns the priority queue entry key for an element on a PRIORITY_QUEUE_BUILTIN_KEY - * queue. It should not be called for an element on a PRIORITY_QUEUE_GENERIC_KEY queue. + * Lookup the maximum element in a max-priority queue. + * * Header: - * priority_queue_key_t priority_queue_entry_key(q, elt) - * q - * elt + * priority_queue_max(pq, type, field) + * pq + * type of element in priority queue + * chain field in (*) + * Returns: + * root element */ -#define priority_queue_entry_key(q, elt) ({ \ - assert(pqueue_has_builtin_keys(q)); \ - (priority_queue_key_t)((elt)->key); \ +#define priority_queue_max(pq, type, field) ({ \ + static_assert(priority_queue_is_max_heap(pq), "queue is max heap"); \ + pqe_element((pq)->pq_root, type, field); \ }) /* - * Macro: priority_queue_init + * Macro: priority_queue_insert * Function: - * Initialze a by setting the flags - * Valid flags are: - * - PRIORITY_QUEUE_BUILTIN_KEY or PRIORITY_QUEUE_GENERIC_KEY - * - PRIORITY_QUEUE_MAX_HEAP or PRIORITY_QUEUE_MIN_HEAP + * Insert an element into the priority queue + * + * The caller must have set the key prio to insertion + * * Header: - * priority_queue_init(q, cmp_fn, queue_type) - * q - * queue_flags + * priority_queue_insert(pq, elt, new_key) + * pq + * elt * Returns: - * None + * Whether the inserted element became the new root */ -#define priority_queue_init(q, flags) \ -MACRO_BEGIN \ - pqueue_pack_root((q), NULL); \ - (q)->pq_root_packed = (flags); \ -MACRO_END +extern bool +priority_queue_insert(struct priority_queue *pq, + struct priority_queue_entry *elt) __pqueue_overloadable; /* - * Macro: priority_queue_entry_init + * Macro: priority_queue_remove_min * Function: - * Initialze a priority_queue_entry_t + * Remove the minimum element in a min-heap priority queue. * Header: - * priority_queue_entry_init(qe) - * qe + * priority_queue_remove_min(pq, type, field) + * pq + * type of element in priority queue + * chain field in (*) * Returns: - * None + * max element */ -#define priority_queue_entry_init(qe) \ -MACRO_BEGIN \ - (qe)->next = NULL; \ - (qe)->prev = NULL; \ - pqueue_entry_pack_child((qe), NULL); \ - (qe)->key = PRIORITY_QUEUE_KEY_NONE; \ -MACRO_END +#define priority_queue_remove_min(pq, type, field) ({ \ + static_assert(priority_queue_is_min_heap(pq), "queue is min heap"); \ + pqe_element(_priority_queue_remove_root(pq), type, field); \ +}) /* - * Macro: priority_queue_insert + * Macro: priority_queue_remove_max * Function: - * Insert an element into the priority queue + * Remove the maximum element in a max-heap priority queue. * Header: - * priority_queue_insert(que, elt, new_key, cmp_fn) - * que - * elt - * new_key - * comparator function + * priority_queue_remove_max(pq, type, field) + * pq + * type of element in priority queue + * chain field in (*) * Returns: - * Whether the inserted element became the new root + * max element */ -static inline boolean_t -priority_queue_insert(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_key_t new_key, priority_queue_compare_fn_t cmp_fn) -{ - priority_queue_entry_t new_root; - - pqueue_update_key(que, elt, new_key); - new_root = pqueue_merge(pqueue_unpack_root(que), elt, cmp_fn); - pqueue_pack_root(que, new_root); - return new_root == elt; -} +#define priority_queue_remove_max(pq, type, field) ({ \ + static_assert(priority_queue_is_max_heap(pq), "queue is max heap"); \ + pqe_element(_priority_queue_remove_root(pq), type, field); \ +}) /* * Macro: priority_queue_remove * Function: * Removes an element from the priority queue * Header: - * priority_queue_remove(que, elt, cmp_fn) - * que + * priority_queue_remove(pq, elt) + * pq * elt - * comparator function * Returns: * Whether the removed element was the root */ -static inline boolean_t -priority_queue_remove(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_compare_fn_t cmp_fn) -{ - if (elt == pqueue_unpack_root(que)) { - pqueue_remove_root(que, elt, cmp_fn); - priority_queue_entry_init(elt); - return TRUE; - } else { - pqueue_remove_non_root(que, elt, cmp_fn); - priority_queue_entry_init(elt); - return FALSE; - } -} +extern bool +priority_queue_remove(struct priority_queue *pq, + struct priority_queue_entry *elt) __pqueue_overloadable; + /* - * Macro: priority_queue_entry_decrease - * - * WARNING: - * This function is badly named for a min-heap, as it means the element - * moves toward the root, which happens if the key value became smaller. + * Macro: priority_queue_entry_decreased * * Function: - * Decrease the priority of an element in the priority queue. Since the heap invariant is to always - * have the maximum element at the root, the most efficient way to implement this is to remove - * the element and re-insert it into the heap. + * Signal the priority queue that the entry priority has decreased. + * + * The new value for the element priority must have been set + * prior to calling this function. * - * For PRIORITY_QUEUE_BUILTIN_KEY, the new_key is passed into this routine since the priority is - * maintained by the data structure. For PRIORITY_QUEUE_GENERIC_KEY, the caller must update the priority - * in the element and then call this routine. For the new_key field, it must pass PRIORITY_QUEUE_KEY_NONE. * Header: - * priority_queue_entry_decrease(que, elt, new_key, cmp_fn) - * que + * priority_queue_entry_decreased(pq, elt) + * pq * elt - * new_key - * comparator function * Returns: * Whether the update caused the root or its key to change. */ -static inline boolean_t -priority_queue_entry_decrease(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_key_t new_key, priority_queue_compare_fn_t cmp_fn) -{ - boolean_t was_root = priority_queue_remove(que, elt, cmp_fn); - /* Insert it back in the heap; insertion also causes the priority update in the element */ - priority_queue_insert(que, elt, new_key, cmp_fn); - return was_root; -} +extern bool +priority_queue_entry_decreased(struct priority_queue *pq, + struct priority_queue_entry *elt) __pqueue_overloadable; /* - * Macro: priority_queue_entry_increase - * - * WARNING: - * This function is badly named for a min-heap, as it means the element - * moves away from the root, which happens if the key value became larger. + * Macro: priority_queue_entry_increased * * Function: - * Increase the priority of an element in the priority queue. If the root is being increased, no change - * to the data structure is needed. For elements at any other level, unhook it from that level and - * re-merge it. + * Signal the priority queue that the entry priority has increased. + * + * The new value for the element priority must have been set + * prior to calling this function. * - * For PRIORITY_QUEUE_BUILTIN_KEY, the new_key is passed into this routine since the priority is - * maintained by the data structure. For PRIORITY_QUEUE_GENERIC_KEY, the caller must update the priority - * in the element and then call this routine. For the new_key field, it must pass PRIORITY_QUEUE_KEY_NONE. * Header: - * priority_queue_entry_increase(que, elt, new_key, cmp_fn) - * que + * priority_queue_entry_increased(pq, elt, new_key) + * pq * elt - * new_key - * comparator function * Returns: * Whether the update caused the root or its key to change. */ -static inline boolean_t -priority_queue_entry_increase(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_key_t new_key, priority_queue_compare_fn_t cmp_fn) -{ - if (elt == pqueue_unpack_root(que)) { - pqueue_update_key(que, elt, new_key); - return TRUE; - } - - /* Remove the element from its current level list */ - pqueue_list_remove(elt); - /* Re-insert the element into the heap with a merge */ - return priority_queue_insert(que, elt, new_key, cmp_fn); -} +extern bool +priority_queue_entry_increased(struct priority_queue *pq, + struct priority_queue_entry *elt) __pqueue_overloadable; -/* - * Min/Max nodes lookup and removal routines - * Since the data structure is unaware of the type of heap being constructed, it provides both the min - * and max variants of the lookup and removal routines. Both variants do the exact same operation and - * it is up to the callers to call the right variant which makes semantic sense for the type of heap. - */ -/* - * Macro: priority_queue_max - * Function: - * Lookup the max element in a priority queue. It simply returns the root of the - * priority queue. - * Header: - * priority_queue_max(q, type, field) - * q - * type of element in priority queue - * chain field in (*) - * Returns: - * max element - */ -#define priority_queue_max(q, type, field) ({ \ - assert(pqueue_is_max_heap(q)); \ - pqe_element(pqueue_unpack_root(q), type, field); \ -}) +#pragma mark priority_queue_sched_* -/* - * Macro: priority_queue_min - * Function: - * Lookup the min element in a priority queue. It simply returns the root of the - * priority queue. - * Header: - * priority_queue_min(q, type, field) - * q - * type of element in priority queue - * chain field in (*) - * Returns: - * min element - */ -#define priority_queue_min(q, type, field) ({ \ - assert(pqueue_is_min_heap(q)); \ - pqe_element(pqueue_unpack_root(q), type, field); \ -}) +__enum_decl(priority_queue_entry_sched_modifier_t, uint8_t, { + PRIORITY_QUEUE_ENTRY_NONE = 0, + PRIORITY_QUEUE_ENTRY_PREEMPTED = 1, +}); + +#define priority_queue_is_sched_heap(pq) _Generic(pq, \ + struct priority_queue_sched_min *: true, \ + struct priority_queue_sched_max *: true, \ + struct priority_queue_sched_stable_min *: true, \ + struct priority_queue_sched_stable_max *: true, \ + default: false) /* - * Macro: priority_queue_max_key + * Macro: priority_queue_entry_set_sched_pri + * * Function: - * Lookup the max key in a priority queue. + * Sets the scheduler priority on an entry supporting this concept. + * + * The priority is expected to fit on 8 bits. + * An optional sorting modifier. + * * Header: - * priority_queue_max_key(q) - * q - * Returns: - * max key + * priority_queue_entry_set_sched_pri(pq, elt, pri, modifier) + * pq + * elt + * pri + * modifier */ -#define priority_queue_max_key(q) ({ \ - assert(pqueue_is_max_heap(q)); \ - priority_queue_entry_key(q, pqueue_unpack_root(q)); \ -}) +#define priority_queue_entry_set_sched_pri(pq, elt, pri, modifier) \ +MACRO_BEGIN \ + static_assert(priority_queue_is_sched_heap(pq), "is a sched heap"); \ + (elt)->key = (priority_queue_key_t)(((pri) << 8) + (modifier)); \ +MACRO_END /* - * Macro: priority_queue_min_key + * Macro: priority_queue_entry_sched_pri + * * Function: - * Lookup the min key in a priority queue. + * Return the scheduler priority on an entry supporting this + * concept. + * * Header: - * priority_queue_min_key(q) - * q + * priority_queue_entry_sched_pri(pq, elt) + * pq + * elt + * * Returns: - * min key + * The scheduler priority of this entry */ -#define priority_queue_min_key(q) ({ \ - assert(pqueue_is_min_heap(q)); \ - priority_queue_entry_key(pqueue_unpack_root(q)); \ +#define priority_queue_entry_sched_pri(pq, elt) ({ \ + static_assert(priority_queue_is_sched_heap(pq), "is a sched heap"); \ + (priority_queue_key_t)((elt)->key >> 8); \ }) /* - * Macro: priority_queue_remove_max + * Macro: priority_queue_entry_sched_modifier + * * Function: - * Remove the max element in a priority queue. - * Uses the priority_queue_remove() routine to actually do the removal. + * Return the scheduler modifier on an entry supporting this + * concept. + * * Header: - * priority_queue_remove_max(q, type, field) - * q - * type of element in priority queue - * chain field in (*) + * priority_queue_entry_sched_modifier(pq, elt) + * pq + * elt + * * Returns: - * max element + * The scheduler priority of this entry */ -#define priority_queue_remove_max(q, type, field, cmp_fn) ({ \ - assert(pqueue_is_max_heap(q)); \ - pqe_element(pqueue_remove_root(q, pqueue_unpack_root(q), cmp_fn), type, field); \ +#define priority_queue_entry_sched_modifier(pq, elt) ({ \ + static_assert(priority_queue_is_sched_heap(pq), "is a sched heap"); \ + (priority_queue_entry_sched_modifier_t)(elt)->key; \ }) /* - * Macro: priority_queue_remove_min + * Macro: priority_queue_min_sched_pri + * * Function: - * Remove the min element in a priority queue. - * Uses the priority_queue_remove() routine to actually do the removal. + * Return the scheduler priority of the minimum element + * of a scheduler priority queue. + * * Header: - * priority_queue_remove_min(q, type, field) - * q - * type of element in priority queue - * chain field in (*) + * priority_queue_min_sched_pri(pq) + * pq + * * Returns: - * min element + * The scheduler priority of this entry */ -#define priority_queue_remove_min(q, type, field, cmp_fn) ({ \ - assert(pqueue_is_min_heap(q)); \ - pqe_element(pqueue_remove_root(q, pqueue_unpack_root(q), cmp_fn), type, field); \ +#define priority_queue_min_sched_pri(pq) ({ \ + static_assert(priority_queue_is_min_heap(pq), "queue is min heap"); \ + priority_queue_entry_sched_pri(pq, (pq)->pq_root); \ }) /* - * Macro: priority_queue_destroy + * Macro: priority_queue_max_sched_pri + * * Function: - * Destroy a priority queue safely. This routine accepts a callback - * to handle any cleanup for elements in the priority queue. The queue does - * not maintain its invariants while getting destroyed. The priority queue and - * the linkage nodes need to be re-initialized before re-using them. + * Return the scheduler priority of the maximum element + * of a scheduler priority queue. + * * Header: - * priority_queue_destroy(q, type, field, callback) - * q - * type of element in priority queue - * chain field in (*) - * callback for each element + * priority_queue_max_sched_pri(pq) + * pq * * Returns: - * None + * The scheduler priority of this entry */ -#define priority_queue_destroy(q, type, field, callback, ...) \ - pqueue_destroy(q, offsetof(type, field), callback, ##__VA_ARGS__) +#define priority_queue_max_sched_pri(pq) ({ \ + static_assert(priority_queue_is_max_heap(pq), "queue is max heap"); \ + priority_queue_entry_sched_pri(pq, (pq)->pq_root); \ +}) + + +#pragma mark implementation details + +#define PRIORITY_QUEUE_MAKE_BASE(pqueue_t, pqelem_t) \ + \ +__pqueue_overloadable extern void \ +_priority_queue_destroy(pqueue_t pq, uintptr_t offset, void (^cb)(void *)); \ + \ +__pqueue_overloadable extern bool \ +priority_queue_insert(pqueue_t que, pqelem_t elt); \ + \ +__pqueue_overloadable extern pqelem_t \ +_priority_queue_remove_root(pqueue_t que); \ + \ +__pqueue_overloadable extern bool \ +priority_queue_remove(pqueue_t que, pqelem_t elt); \ + \ +__pqueue_overloadable extern bool \ +priority_queue_entry_decreased(pqueue_t que, pqelem_t elt); \ + \ +__pqueue_overloadable extern bool \ +priority_queue_entry_increased(pqueue_t que, pqelem_t elt) + +#define PRIORITY_QUEUE_MAKE(pqueue_t, pqelem_t) \ +__pqueue_overloadable \ +static inline void \ +priority_queue_init(pqueue_t que) \ +{ \ + __builtin_bzero(que, sizeof(*que)); \ +} \ + \ +PRIORITY_QUEUE_MAKE_BASE(pqueue_t, pqelem_t) + +#define PRIORITY_QUEUE_MAKE_CB(pqueue_t, pqelem_t) \ +__pqueue_overloadable \ +static inline void \ +priority_queue_init(pqueue_t pq, priority_queue_compare_fn_t cmp_fn) \ +{ \ + pq->pq_root = NULL; \ + pq->pq_cmp_fn = cmp_fn; \ +} \ + \ +PRIORITY_QUEUE_MAKE_BASE(pqueue_t, pqelem_t) + +PRIORITY_QUEUE_MAKE_CB(struct priority_queue_min *, priority_queue_entry_t); +PRIORITY_QUEUE_MAKE_CB(struct priority_queue_max *, priority_queue_entry_t); + +PRIORITY_QUEUE_MAKE(struct priority_queue_deadline_min *, priority_queue_entry_deadline_t); +PRIORITY_QUEUE_MAKE(struct priority_queue_deadline_max *, priority_queue_entry_deadline_t); + +PRIORITY_QUEUE_MAKE(struct priority_queue_sched_min *, priority_queue_entry_sched_t); +PRIORITY_QUEUE_MAKE(struct priority_queue_sched_max *, priority_queue_entry_sched_t); + +PRIORITY_QUEUE_MAKE(struct priority_queue_sched_stable_min *, priority_queue_entry_stable_t); +PRIORITY_QUEUE_MAKE(struct priority_queue_sched_stable_max *, priority_queue_entry_stable_t); __END_DECLS +#pragma GCC visibility pop + #endif /* _KERN_PRIORITY_QUEUE_H_ */ diff --git a/osfmk/kern/processor.c b/osfmk/kern/processor.c index 85c506f04..c5d47ff84 100644 --- a/osfmk/kern/processor.c +++ b/osfmk/kern/processor.c @@ -67,16 +67,20 @@ #include #include #include +#include +#include +#include #include #include #include #include #include #include -#include -#include +#include +#if KPERF +#include +#endif /* KPERF */ #include -#include #include @@ -93,34 +97,45 @@ #include struct processor_set pset0; -struct pset_node pset_node0; -decl_simple_lock_data(static, pset_node_lock); - -lck_grp_t pset_lck_grp; - -queue_head_t tasks; -queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */ -queue_head_t corpse_tasks; -int tasks_count; -int terminated_tasks_count; -queue_head_t threads; -int threads_count; -decl_lck_mtx_data(, tasks_threads_lock); -decl_lck_mtx_data(, tasks_corpse_lock); - -processor_t processor_list; -unsigned int processor_count; -static processor_t processor_list_tail; -decl_simple_lock_data(, processor_list_lock); - -uint32_t processor_avail_count; -uint32_t processor_avail_count_user; - -processor_t master_processor; +struct pset_node pset_node0; + +static SIMPLE_LOCK_DECLARE(pset_node_lock, 0); +LCK_GRP_DECLARE(pset_lck_grp, "pset"); + +queue_head_t tasks; +queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */ +queue_head_t corpse_tasks; +int tasks_count; +int terminated_tasks_count; +queue_head_t threads; +int threads_count; +LCK_GRP_DECLARE(task_lck_grp, "task"); +LCK_ATTR_DECLARE(task_lck_attr, 0, 0); +LCK_MTX_DECLARE_ATTR(tasks_threads_lock, &task_lck_grp, &task_lck_attr); +LCK_MTX_DECLARE_ATTR(tasks_corpse_lock, &task_lck_grp, &task_lck_attr); + +processor_t processor_list; +unsigned int processor_count; +static processor_t processor_list_tail; +SIMPLE_LOCK_DECLARE(processor_list_lock, 0); + +uint32_t processor_avail_count; +uint32_t processor_avail_count_user; +uint32_t primary_processor_avail_count; +uint32_t primary_processor_avail_count_user; + int master_cpu = 0; -boolean_t sched_stats_active = FALSE; +struct processor PERCPU_DATA(processor); processor_t processor_array[MAX_SCHED_CPUS] = { 0 }; +processor_set_t pset_array[MAX_PSETS] = { 0 }; + +static timer_call_func_t running_timer_funcs[] = { + [RUNNING_TIMER_QUANTUM] = thread_quantum_expire, + [RUNNING_TIMER_KPERF] = kperf_timer_expire, +}; +static_assert(sizeof(running_timer_funcs) / sizeof(running_timer_funcs[0]) + == RUNNING_TIMER_MAX, "missing running timer function"); #if defined(CONFIG_XNUPOST) kern_return_t ipi_test(void); @@ -158,10 +173,6 @@ int sched_enable_smt = 1; void processor_bootstrap(void) { - lck_grp_init(&pset_lck_grp, "pset", LCK_GRP_ATTR_NULL); - - simple_lock_init(&pset_node_lock, 0); - pset_node0.psets = &pset0; pset_init(&pset0, &pset_node0); @@ -170,10 +181,6 @@ processor_bootstrap(void) queue_init(&threads); queue_init(&corpse_tasks); - simple_lock_init(&processor_list_lock, 0); - - master_processor = cpu_to_processor(master_cpu); - processor_init(master_processor, master_cpu, &pset0); } @@ -184,26 +191,25 @@ processor_bootstrap(void) */ void processor_init( - processor_t processor, - int cpu_id, - processor_set_t pset) + processor_t processor, + int cpu_id, + processor_set_t pset) { spl_t s; + assert(cpu_id < MAX_SCHED_CPUS); + processor->cpu_id = cpu_id; + if (processor != master_processor) { /* Scheduler state for master_processor initialized in sched_init() */ SCHED(processor_init)(processor); } - assert(cpu_id < MAX_SCHED_CPUS); - processor->state = PROCESSOR_OFF_LINE; processor->active_thread = processor->startup_thread = processor->idle_thread = THREAD_NULL; processor->processor_set = pset; processor_state_update_idle(processor); processor->starting_pri = MINPRI; - processor->cpu_id = cpu_id; - timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor); processor->quantum_end = UINT64_MAX; processor->deadline = UINT64_MAX; processor->first_timeslice = FALSE; @@ -213,11 +219,18 @@ processor_init( processor->is_SMT = false; processor->is_recommended = true; processor->processor_self = IP_NULL; - processor_data_init(processor); processor->processor_list = NULL; - processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_NONE; - processor->cpu_quiesce_last_checkin = 0; processor->must_idle = false; + processor->running_timers_active = false; + for (int i = 0; i < RUNNING_TIMER_MAX; i++) { + timer_call_setup(&processor->running_timers[i], + running_timer_funcs[i], processor); + running_timer_clear(processor, i); + } + + timer_init(&processor->idle_state); + timer_init(&processor->system_state); + timer_init(&processor->user_state); s = splsched(); pset_lock(pset); @@ -246,6 +259,8 @@ processor_init( simple_unlock(&processor_list_lock); } +bool system_is_SMT = false; + void processor_set_primary( processor_t processor, @@ -267,9 +282,16 @@ processor_set_primary( primary->is_SMT = TRUE; processor->is_SMT = TRUE; + if (!system_is_SMT) { + system_is_SMT = true; + } + processor_set_t pset = processor->processor_set; spl_t s = splsched(); pset_lock(pset); + if (!pset->is_SMT) { + pset->is_SMT = true; + } bit_clear(pset->primary_map, processor->cpu_id); pset_unlock(pset); splx(s); @@ -283,16 +305,59 @@ processor_pset( return processor->processor_set; } +#if CONFIG_SCHED_EDGE + +cluster_type_t +pset_type_for_id(uint32_t cluster_id) +{ + return pset_array[cluster_id]->pset_type; +} + +/* + * Processor foreign threads + * + * With the Edge scheduler, each pset maintains a bitmap of processors running threads + * which are foreign to the pset/cluster. A thread is defined as foreign for a cluster + * if its of a different type than its preferred cluster type (E/P). The bitmap should + * be updated every time a new thread is assigned to run on a processor. + * + * This bitmap allows the Edge scheduler to quickly find CPUs running foreign threads + * for rebalancing. + */ +static void +processor_state_update_running_foreign(processor_t processor, thread_t thread) +{ + cluster_type_t current_processor_type = pset_type_for_id(processor->processor_set->pset_cluster_id); + cluster_type_t thread_type = pset_type_for_id(sched_edge_thread_preferred_cluster(thread)); + + /* Update the bitmap for the pset only for unbounded non-RT threads. */ + if ((processor->current_pri < BASEPRI_RTQUEUES) && (thread->bound_processor == PROCESSOR_NULL) && (current_processor_type != thread_type)) { + bit_set(processor->processor_set->cpu_running_foreign, processor->cpu_id); + } else { + bit_clear(processor->processor_set->cpu_running_foreign, processor->cpu_id); + } +} +#else /* CONFIG_SCHED_EDGE */ +static void +processor_state_update_running_foreign(__unused processor_t processor, __unused thread_t thread) +{ +} +#endif /* CONFIG_SCHED_EDGE */ + void processor_state_update_idle(processor_t processor) { processor->current_pri = IDLEPRI; processor->current_sfi_class = SFI_CLASS_KERNEL; processor->current_recommended_pset_type = PSET_SMP; +#if CONFIG_THREAD_GROUPS + processor->current_thread_group = NULL; +#endif processor->current_perfctl_class = PERFCONTROL_CLASS_IDLE; processor->current_urgency = THREAD_URGENCY_NONE; processor->current_is_NO_SMT = false; processor->current_is_bound = false; + os_atomic_store(&processor->processor_set->cpu_running_buckets[processor->cpu_id], TH_BUCKET_SCHED_MAX, relaxed); } void @@ -301,25 +366,30 @@ processor_state_update_from_thread(processor_t processor, thread_t thread) processor->current_pri = thread->sched_pri; processor->current_sfi_class = thread->sfi_class; processor->current_recommended_pset_type = recommended_pset_type(thread); + processor_state_update_running_foreign(processor, thread); + /* Since idle and bound threads are not tracked by the edge scheduler, ignore when those threads go on-core */ + sched_bucket_t bucket = ((thread->state & TH_IDLE) || (thread->bound_processor != PROCESSOR_NULL)) ? TH_BUCKET_SCHED_MAX : thread->th_sched_bucket; + os_atomic_store(&processor->processor_set->cpu_running_buckets[processor->cpu_id], bucket, relaxed); + +#if CONFIG_THREAD_GROUPS + processor->current_thread_group = thread_group_get(thread); +#endif processor->current_perfctl_class = thread_get_perfcontrol_class(thread); processor->current_urgency = thread_get_urgency(thread, NULL, NULL); -#if DEBUG || DEVELOPMENT - processor->current_is_NO_SMT = (thread->sched_flags & TH_SFLAG_NO_SMT) || (thread->task->t_flags & TF_NO_SMT); -#else - processor->current_is_NO_SMT = (thread->sched_flags & TH_SFLAG_NO_SMT); -#endif + processor->current_is_NO_SMT = thread_no_smt(thread); processor->current_is_bound = thread->bound_processor != PROCESSOR_NULL; } void processor_state_update_explicit(processor_t processor, int pri, sfi_class_id_t sfi_class, - pset_cluster_type_t pset_type, perfcontrol_class_t perfctl_class, thread_urgency_t urgency) + pset_cluster_type_t pset_type, perfcontrol_class_t perfctl_class, thread_urgency_t urgency, sched_bucket_t bucket) { processor->current_pri = pri; processor->current_sfi_class = sfi_class; processor->current_recommended_pset_type = pset_type; processor->current_perfctl_class = perfctl_class; processor->current_urgency = urgency; + os_atomic_store(&processor->processor_set->cpu_running_buckets[processor->cpu_id], bucket, relaxed); } pset_node_t @@ -337,7 +407,7 @@ pset_create( return processor_pset(master_processor); } - processor_set_t *prev, pset = kalloc(sizeof(*pset)); + processor_set_t *prev, pset = zalloc_permanent_type(struct processor_set); if (pset != PROCESSOR_SET_NULL) { pset_init(pset, node); @@ -358,7 +428,7 @@ pset_create( } /* - * Find processor set in specified node with specified cluster_id. + * Find processor set with specified cluster_id. * Returns default_pset if not found. */ processor_set_t @@ -378,7 +448,7 @@ pset_find( } pset = pset->pset_list; } - } while ((node = node->node_list) != NULL); + } while (pset == NULL && (node = node->node_list) != NULL); simple_unlock(&pset_node_lock); if (pset == NULL) { return default_pset; @@ -386,6 +456,7 @@ pset_find( return pset; } + /* * Initialize the given processor_set structure. */ @@ -394,20 +465,32 @@ pset_init( processor_set_t pset, pset_node_t node) { + static uint32_t pset_count = 0; + if (pset != &pset0) { - /* Scheduler state for pset0 initialized in sched_init() */ + /* + * Scheduler runqueue initialization for non-boot psets. + * This initialization for pset0 happens in sched_init(). + */ SCHED(pset_init)(pset); SCHED(rt_init)(pset); } pset->online_processor_count = 0; pset->load_average = 0; + bzero(&pset->pset_load_average, sizeof(pset->pset_load_average)); +#if CONFIG_SCHED_EDGE + bzero(&pset->pset_execution_time, sizeof(pset->pset_execution_time)); +#endif /* CONFIG_SCHED_EDGE */ pset->cpu_set_low = pset->cpu_set_hi = 0; pset->cpu_set_count = 0; pset->last_chosen = -1; pset->cpu_bitmask = 0; pset->recommended_bitmask = 0; pset->primary_map = 0; + pset->realtime_map = 0; + pset->cpu_running_foreign = 0; + for (uint i = 0; i < PROCESSOR_STATE_LEN; i++) { pset->cpu_state_map[i] = 0; } @@ -422,12 +505,24 @@ pset_init( pset->pset_name_self = IP_NULL; pset->pset_list = PROCESSOR_SET_NULL; pset->node = node; - pset->pset_cluster_type = PSET_SMP; - pset->pset_cluster_id = 0; + + /* + * The pset_cluster_type & pset_cluster_id for all psets + * on the platform are initialized as part of the SCHED(init). + * That works well for small cluster platforms; for large cluster + * count systems, it might be cleaner to do all the setup + * dynamically in SCHED(pset_init). + * + * + */ + pset->is_SMT = false; simple_lock(&pset_node_lock, LCK_GRP_NULL); - node->pset_count++; + pset->pset_id = pset_count++; + bit_set(node->pset_map, pset->pset_id); simple_unlock(&pset_node_lock); + + pset_array[pset->pset_id] = pset; } kern_return_t @@ -529,18 +624,18 @@ processor_info( cpu_load_info = (processor_cpu_load_info_t) info; if (precise_user_kernel_time) { cpu_load_info->cpu_ticks[CPU_STATE_USER] = - (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval); + (uint32_t)(timer_grab(&processor->user_state) / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = - (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval); + (uint32_t)(timer_grab(&processor->system_state) / hz_tick_interval); } else { - uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) + - timer_grab(&PROCESSOR_DATA(processor, system_state)); + uint64_t tval = timer_grab(&processor->user_state) + + timer_grab(&processor->system_state); cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0; } - idle_state = &PROCESSOR_DATA(processor, idle_state); + idle_state = &processor->idle_state; idle_time_snapshot1 = timer_grab(idle_state); idle_time_tstamp1 = idle_state->tstamp; @@ -553,7 +648,7 @@ processor_info( * have evidence that the timer is being updated * concurrently, we consider its value up-to-date. */ - if (PROCESSOR_DATA(processor, current_state) != idle_state) { + if (processor->current_state != idle_state) { cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = (uint32_t)(idle_time_snapshot1 / hz_tick_interval); } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) || @@ -629,12 +724,14 @@ processor_start( scheduler_disable = true; } + ml_cpu_begin_state_transition(processor->cpu_id); s = splsched(); pset = processor->processor_set; pset_lock(pset); if (processor->state != PROCESSOR_OFF_LINE) { pset_unlock(pset); splx(s); + ml_cpu_end_state_transition(processor->cpu_id); return KERN_FAILURE; } @@ -654,6 +751,7 @@ processor_start( pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE); pset_unlock(pset); splx(s); + ml_cpu_end_state_transition(processor->cpu_id); return result; } @@ -673,6 +771,7 @@ processor_start( pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE); pset_unlock(pset); splx(s); + ml_cpu_end_state_transition(processor->cpu_id); return result; } @@ -693,6 +792,7 @@ processor_start( ipc_processor_init(processor); } + ml_broadcast_cpu_event(CPU_BOOT_REQUESTED, processor->cpu_id); result = cpu_start(processor->cpu_id); if (result != KERN_SUCCESS) { s = splsched(); @@ -700,6 +800,7 @@ processor_start( pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE); pset_unlock(pset); splx(s); + ml_cpu_end_state_transition(processor->cpu_id); return result; } @@ -709,6 +810,8 @@ processor_start( } ipc_processor_enable(processor); + ml_cpu_end_state_transition(processor->cpu_id); + ml_broadcast_cpu_event(CPU_ACTIVE, processor->cpu_id); return KERN_SUCCESS; } @@ -1072,7 +1175,7 @@ processor_set_policy_disable( * * Common internals for processor_set_{threads,tasks} */ -kern_return_t +static kern_return_t processor_set_things( processor_set_t pset, void **thing_list, @@ -1310,17 +1413,17 @@ processor_set_things( return KERN_SUCCESS; } - /* * processor_set_tasks: * * List all tasks in the processor set. */ -kern_return_t -processor_set_tasks( +static kern_return_t +processor_set_tasks_internal( processor_set_t pset, task_array_t *task_list, - mach_msg_type_number_t *count) + mach_msg_type_number_t *count, + int flavor) { kern_return_t ret; mach_msg_type_number_t i; @@ -1331,12 +1434,66 @@ processor_set_tasks( } /* do the conversion that Mig should handle */ - for (i = 0; i < *count; i++) { - (*task_list)[i] = (task_t)convert_task_to_port((*task_list)[i]); + switch (flavor) { + case TASK_FLAVOR_CONTROL: + for (i = 0; i < *count; i++) { + (*task_list)[i] = (task_t)convert_task_to_port((*task_list)[i]); + } + break; + case TASK_FLAVOR_READ: + for (i = 0; i < *count; i++) { + (*task_list)[i] = (task_t)convert_task_read_to_port((*task_list)[i]); + } + break; + case TASK_FLAVOR_INSPECT: + for (i = 0; i < *count; i++) { + (*task_list)[i] = (task_t)convert_task_inspect_to_port((*task_list)[i]); + } + break; + case TASK_FLAVOR_NAME: + for (i = 0; i < *count; i++) { + (*task_list)[i] = (task_t)convert_task_name_to_port((*task_list)[i]); + } + break; + default: + return KERN_INVALID_ARGUMENT; } + return KERN_SUCCESS; } +kern_return_t +processor_set_tasks( + processor_set_t pset, + task_array_t *task_list, + mach_msg_type_number_t *count) +{ + return processor_set_tasks_internal(pset, task_list, count, TASK_FLAVOR_CONTROL); +} + +/* + * processor_set_tasks_with_flavor: + * + * Based on flavor, return task/inspect/read port to all tasks in the processor set. + */ +kern_return_t +processor_set_tasks_with_flavor( + processor_set_t pset, + mach_task_flavor_t flavor, + task_array_t *task_list, + mach_msg_type_number_t *count) +{ + switch (flavor) { + case TASK_FLAVOR_CONTROL: + case TASK_FLAVOR_READ: + case TASK_FLAVOR_INSPECT: + case TASK_FLAVOR_NAME: + return processor_set_tasks_internal(pset, task_list, count, flavor); + default: + return KERN_INVALID_ARGUMENT; + } +} + /* * processor_set_threads: * @@ -1351,7 +1508,7 @@ processor_set_threads( { return KERN_FAILURE; } -#elif defined(CONFIG_EMBEDDED) +#elif !defined(XNU_TARGET_OS_OSX) kern_return_t processor_set_threads( __unused processor_set_t pset, @@ -1419,39 +1576,119 @@ pset_reference( return; } +#if CONFIG_THREAD_GROUPS -#if CONFIG_SCHED_CLUTCH - -/* - * The clutch scheduler decides the recommendation of a thread based - * on its thread group's properties and recommendations. The only thread - * level property it looks at is the bucket for the thread to implement - * the policy of not running Utility & BG buckets on the P-cores. Any - * other policy being added to this routine might need to be reflected - * in places such as sched_clutch_hierarchy_thread_pset() & - * sched_clutch_migrate_thread_group() which rely on getting the recommendations - * right. - * - * Note: The current implementation does not support TH_SFLAG_ECORE_ONLY & - * TH_SFLAG_PCORE_ONLY flags which are used for debugging utilities. A similar - * version of that functionality can be implemented by putting these flags - * on a thread group instead of individual thread basis. - * - */ pset_cluster_type_t -recommended_pset_type(thread_t thread) +thread_group_pset_recommendation(__unused struct thread_group *tg, __unused cluster_type_t recommendation) { - (void)thread; +#if __AMP__ + switch (recommendation) { + case CLUSTER_TYPE_SMP: + default: + /* + * In case of SMP recommendations, check if the thread + * group has special flags which restrict it to the E + * cluster. + */ + if (thread_group_smp_restricted(tg)) { + return PSET_AMP_E; + } + return PSET_AMP_P; + case CLUSTER_TYPE_E: + return PSET_AMP_E; + case CLUSTER_TYPE_P: + return PSET_AMP_P; + } +#else /* __AMP__ */ return PSET_SMP; +#endif /* __AMP__ */ } -#else /* CONFIG_SCHED_CLUTCH */ +#endif pset_cluster_type_t recommended_pset_type(thread_t thread) { +#if CONFIG_THREAD_GROUPS && __AMP__ + if (thread == THREAD_NULL) { + return PSET_AMP_E; + } + + if (thread->sched_flags & TH_SFLAG_ECORE_ONLY) { + return PSET_AMP_E; + } else if (thread->sched_flags & TH_SFLAG_PCORE_ONLY) { + return PSET_AMP_P; + } + + if (thread->base_pri <= MAXPRI_THROTTLE) { + if (os_atomic_load(&sched_perfctl_policy_bg, relaxed) != SCHED_PERFCTL_POLICY_FOLLOW_GROUP) { + return PSET_AMP_E; + } + } else if (thread->base_pri <= BASEPRI_UTILITY) { + if (os_atomic_load(&sched_perfctl_policy_util, relaxed) != SCHED_PERFCTL_POLICY_FOLLOW_GROUP) { + return PSET_AMP_E; + } + } + +#if DEVELOPMENT || DEBUG + extern bool system_ecore_only; + extern processor_set_t pcore_set; + if (system_ecore_only) { + if (thread->task->pset_hint == pcore_set) { + return PSET_AMP_P; + } + return PSET_AMP_E; + } +#endif + + struct thread_group *tg = thread_group_get(thread); + cluster_type_t recommendation = thread_group_recommendation(tg); + switch (recommendation) { + case CLUSTER_TYPE_SMP: + default: + if (thread->task == kernel_task) { + return PSET_AMP_E; + } + return PSET_AMP_P; + case CLUSTER_TYPE_E: + return PSET_AMP_E; + case CLUSTER_TYPE_P: + return PSET_AMP_P; + } +#else (void)thread; return PSET_SMP; +#endif +} + +#if CONFIG_THREAD_GROUPS && __AMP__ + +void +sched_perfcontrol_inherit_recommendation_from_tg(perfcontrol_class_t perfctl_class, boolean_t inherit) +{ + sched_perfctl_class_policy_t sched_policy = inherit ? SCHED_PERFCTL_POLICY_FOLLOW_GROUP : SCHED_PERFCTL_POLICY_RESTRICT_E; + + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_AMP_PERFCTL_POLICY_CHANGE) | DBG_FUNC_NONE, perfctl_class, sched_policy, 0, 0); + + switch (perfctl_class) { + case PERFCONTROL_CLASS_UTILITY: + os_atomic_store(&sched_perfctl_policy_util, sched_policy, relaxed); + break; + case PERFCONTROL_CLASS_BACKGROUND: + os_atomic_store(&sched_perfctl_policy_bg, sched_policy, relaxed); + break; + default: + panic("perfctl_class invalid"); + break; + } +} + +#elif defined(__arm64__) + +/* Define a stub routine since this symbol is exported on all arm64 platforms */ +void +sched_perfcontrol_inherit_recommendation_from_tg(__unused perfcontrol_class_t perfctl_class, __unused boolean_t inherit) +{ } -#endif /* CONFIG_SCHED_CLUTCH */ +#endif /* defined(__arm64__) */ diff --git a/osfmk/kern/processor.h b/osfmk/kern/processor.h index faac9b224..698deb50b 100644 --- a/osfmk/kern/processor.h +++ b/osfmk/kern/processor.h @@ -77,13 +77,14 @@ #include #include #include +#include #include #include #include +#include #include -#include -#include #include +#include #include #include @@ -153,45 +154,83 @@ typedef enum { #endif } pset_cluster_type_t; +#if __AMP__ + +typedef enum { + SCHED_PERFCTL_POLICY_DEFAULT, /* static policy: set at boot */ + SCHED_PERFCTL_POLICY_FOLLOW_GROUP, /* dynamic policy: perfctl_class follows thread group across amp clusters */ + SCHED_PERFCTL_POLICY_RESTRICT_E, /* dynamic policy: limits perfctl_class to amp e cluster */ +} sched_perfctl_class_policy_t; + +extern _Atomic sched_perfctl_class_policy_t sched_perfctl_policy_util; +extern _Atomic sched_perfctl_class_policy_t sched_perfctl_policy_bg; + +#endif /* __AMP__ */ + typedef bitmap_t cpumap_t; +#if __arm64__ + +/* + * pset_execution_time_t + * + * The pset_execution_time_t type is used to maintain the average + * execution time of threads on a pset. Since the avg. execution time is + * updated from contexts where the pset lock is not held, it uses a + * double-wide RMW loop to update these values atomically. + */ +typedef union { + struct { + uint64_t pset_avg_thread_execution_time; + uint64_t pset_execution_time_last_update; + }; + unsigned __int128 pset_execution_time_packed; +} pset_execution_time_t; + +#endif /* __arm64__ */ + struct processor_set { + int pset_id; int online_processor_count; - int load_average; - int cpu_set_low, cpu_set_hi; int cpu_set_count; int last_chosen; + + uint64_t load_average; + uint64_t pset_load_average[TH_BUCKET_SCHED_MAX]; + uint64_t pset_load_last_update; cpumap_t cpu_bitmask; cpumap_t recommended_bitmask; cpumap_t cpu_state_map[PROCESSOR_STATE_LEN]; cpumap_t primary_map; + cpumap_t realtime_map; + cpumap_t cpu_running_foreign; + sched_bucket_t cpu_running_buckets[MAX_CPUS]; + #define SCHED_PSET_TLOCK (1) -#if __SMP__ #if defined(SCHED_PSET_TLOCK) /* TODO: reorder struct for temporal cache locality */ __attribute__((aligned(128))) lck_ticket_t sched_lock; #else /* SCHED_PSET_TLOCK*/ __attribute__((aligned(128))) lck_spin_t sched_lock; /* lock for above */ #endif /* SCHED_PSET_TLOCK*/ -#endif #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ) struct run_queue pset_runq; /* runq for this processor set */ #endif struct rt_queue rt_runq; /* realtime runq for this processor set */ #if CONFIG_SCHED_CLUTCH - struct sched_clutch_root pset_clutch_root; /* clutch hierarchy root */ + struct sched_clutch_root pset_clutch_root; /* clutch hierarchy root */ #endif /* CONFIG_SCHED_CLUTCH */ #if defined(CONFIG_SCHED_TRADITIONAL) - int pset_runq_bound_count; + int pset_runq_bound_count; /* # of threads in runq bound to any processor in pset */ #endif /* CPUs that have been sent an unacknowledged remote AST for scheduling purposes */ - cpumap_t pending_AST_URGENT_cpu_mask; - cpumap_t pending_AST_PREEMPT_cpu_mask; + cpumap_t pending_AST_URGENT_cpu_mask; + cpumap_t pending_AST_PREEMPT_cpu_mask; #if defined(CONFIG_SCHED_DEFERRED_AST) /* * A separate mask, for ASTs that we may be able to cancel. This is dependent on @@ -204,9 +243,9 @@ struct processor_set { * of spurious ASTs in the system, and let processors spend longer periods in * IDLE. */ - cpumap_t pending_deferred_AST_cpu_mask; + cpumap_t pending_deferred_AST_cpu_mask; #endif - cpumap_t pending_spill_cpu_mask; + cpumap_t pending_spill_cpu_mask; struct ipc_port * pset_self; /* port for operations */ struct ipc_port * pset_name_self; /* port for information */ @@ -214,95 +253,154 @@ struct processor_set { processor_set_t pset_list; /* chain of associated psets */ pset_node_t node; uint32_t pset_cluster_id; + + /* + * Currently the scheduler uses a mix of pset_cluster_type_t & cluster_type_t + * for recommendations etc. It might be useful to unify these as a single type. + */ pset_cluster_type_t pset_cluster_type; + cluster_type_t pset_type; + +#if CONFIG_SCHED_EDGE + bitmap_t foreign_psets[BITMAP_LEN(MAX_PSETS)]; + sched_clutch_edge sched_edges[MAX_PSETS]; + pset_execution_time_t pset_execution_time[TH_BUCKET_SCHED_MAX]; +#endif /* CONFIG_SCHED_EDGE */ + bool is_SMT; /* pset contains SMT processors */ }; extern struct processor_set pset0; +typedef bitmap_t pset_map_t; + struct pset_node { processor_set_t psets; /* list of associated psets */ - uint32_t pset_count; /* count of associated psets */ - pset_node_t nodes; /* list of associated subnodes */ - pset_node_t node_list; /* chain of associated nodes */ + pset_node_t nodes; /* list of associated subnodes */ + pset_node_t node_list; /* chain of associated nodes */ + + pset_node_t parent; - pset_node_t parent; + pset_map_t pset_map; /* map of associated psets */ + _Atomic pset_map_t pset_idle_map; /* psets with at least one IDLE CPU */ + _Atomic pset_map_t pset_idle_primary_map; /* psets with at least one IDLE primary CPU */ + _Atomic pset_map_t pset_non_rt_map; /* psets with at least one available CPU not running a realtime thread */ + _Atomic pset_map_t pset_non_rt_primary_map;/* psets with at least one available primary CPU not running a realtime thread */ }; extern struct pset_node pset_node0; -extern queue_head_t tasks, terminated_tasks, threads, corpse_tasks; /* Terminated tasks are ONLY for stackshot */ -extern int tasks_count, terminated_tasks_count, threads_count; +extern queue_head_t tasks, threads, corpse_tasks; +extern int tasks_count, terminated_tasks_count, threads_count; decl_lck_mtx_data(extern, tasks_threads_lock); decl_lck_mtx_data(extern, tasks_corpse_lock); +/* + * The terminated tasks queue should only be inspected elsewhere by stackshot. + */ +extern queue_head_t terminated_tasks; + struct processor { processor_state_t state; /* See above */ bool is_SMT; bool is_recommended; - struct thread *active_thread; /* thread running on processor */ - struct thread *idle_thread; /* this processor's idle thread. */ - struct thread *startup_thread; + bool current_is_NO_SMT; /* cached TH_SFLAG_NO_SMT of current thread */ + bool current_is_bound; /* current thread is bound to this processor */ + struct thread *active_thread; /* thread running on processor */ + struct thread *idle_thread; /* this processor's idle thread. */ + struct thread *startup_thread; processor_set_t processor_set; /* assigned set */ + /* + * XXX All current_* fields should be grouped together, as they're + * updated at the same time. + */ int current_pri; /* priority of current thread */ sfi_class_id_t current_sfi_class; /* SFI class of current thread */ perfcontrol_class_t current_perfctl_class; /* Perfcontrol class for current thread */ - pset_cluster_type_t current_recommended_pset_type; /* Cluster type recommended for current thread */ + /* + * The cluster type recommended for the current thread. + */ + pset_cluster_type_t current_recommended_pset_type; thread_urgency_t current_urgency; /* cached urgency of current thread */ - bool current_is_NO_SMT; /* cached TH_SFLAG_NO_SMT of current thread */ - bool current_is_bound; /* current thread is bound to this processor */ +#if CONFIG_SCHED_TRADITIONAL + int runq_bound_count; /* # of threads bound to this processor */ +#endif /* CONFIG_SCHED_TRADITIONAL */ + +#if CONFIG_THREAD_GROUPS + struct thread_group *current_thread_group; /* thread_group of current thread */ +#endif int starting_pri; /* priority of current thread as it was when scheduled */ int cpu_id; /* platform numeric id */ - cpu_quiescent_state_t cpu_quiesce_state; - uint64_t cpu_quiesce_last_checkin; - timer_call_data_t quantum_timer; /* timer for quantum expiration */ - uint64_t quantum_end; /* time when current quantum ends */ - uint64_t last_dispatch; /* time of last dispatch */ + uint64_t quantum_end; /* time when current quantum ends */ + uint64_t last_dispatch; /* time of last dispatch */ - uint64_t kperf_last_sample_time; /* time of last kperf sample */ +#if KPERF + uint64_t kperf_last_sample_time; /* time of last kperf sample */ +#endif /* KPERF */ - uint64_t deadline; /* current deadline */ + uint64_t deadline; /* for next realtime thread */ bool first_timeslice; /* has the quantum expired since context switch */ - bool processor_offlined; /* has the processor been explicitly processor_offline'ed */ + + bool processor_offlined; /* has the processor been explicitly processor_offline'ed */ bool must_idle; /* Needs to be forced idle as next selected thread is allowed on this processor */ - processor_t processor_primary; /* pointer to primary processor for - * secondary SMT processors, or a pointer - * to ourselves for primaries or non-SMT */ - processor_t processor_secondary; + bool running_timers_active; /* whether the running timers should fire */ + struct timer_call running_timers[RUNNING_TIMER_MAX]; -#if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ) +#if CONFIG_SCHED_TRADITIONAL || CONFIG_SCHED_MULTIQ struct run_queue runq; /* runq for this processor */ -#endif +#endif /* CONFIG_SCHED_TRADITIONAL || CONFIG_SCHED_MULTIQ */ -#if defined(CONFIG_SCHED_TRADITIONAL) - int runq_bound_count; /* # of threads bound to this processor */ -#endif -#if defined(CONFIG_SCHED_GRRR) - struct grrr_run_queue grrr_runq; /* Group Ratio Round-Robin runq */ -#endif - struct ipc_port * processor_self; /* port for operations */ +#if CONFIG_SCHED_GRRR + struct grrr_run_queue grrr_runq; /* Group Ratio Round-Robin runq */ +#endif /* CONFIG_SCHED_GRRR */ - processor_t processor_list; /* all existing processors */ - processor_data_t processor_data; /* per-processor data */ + /* + * Pointer to primary processor for secondary SMT processors, or a + * pointer to ourselves for primaries or non-SMT. + */ + processor_t processor_primary; + processor_t processor_secondary; + struct ipc_port *processor_self; /* port for operations */ + + processor_t processor_list; /* all existing processors */ + + /* Processor state statistics */ + timer_data_t idle_state; + timer_data_t system_state; + timer_data_t user_state; + + timer_t current_state; /* points to processor's idle, system, or user state timer */ + + /* Thread execution timers */ + timer_t thread_timer; /* points to current thread's user or system timer */ + timer_t kernel_timer; /* points to current thread's system_timer */ + + uint64_t timer_call_ttd; /* current timer call time-to-deadline */ }; -extern processor_t processor_list; +extern processor_t processor_list; decl_simple_lock_data(extern, processor_list_lock); -#define MAX_SCHED_CPUS 64 /* Maximum number of CPUs supported by the scheduler. bits.h:bitmap_*() macros need to be used to support greater than 64 */ +/* + * Maximum number of CPUs supported by the scheduler. bits.h bitmap macros + * need to be used to support greater than 64. + */ +#define MAX_SCHED_CPUS 64 extern processor_t processor_array[MAX_SCHED_CPUS]; /* array indexed by cpuid */ +extern processor_set_t pset_array[MAX_PSETS]; /* array indexed by pset_id */ extern uint32_t processor_avail_count; extern uint32_t processor_avail_count_user; +extern uint32_t primary_processor_avail_count; +extern uint32_t primary_processor_avail_count_user; -extern processor_t master_processor; - -extern boolean_t sched_stats_active; +#define master_processor PERCPU_GET_MASTER(processor) +PERCPU_DECL(struct processor, processor); extern processor_t current_processor(void); @@ -310,10 +408,9 @@ extern processor_t current_processor(void); extern lck_grp_t pset_lck_grp; -#if __SMP__ #if defined(SCHED_PSET_TLOCK) -#define pset_lock_init(p) lck_ticket_init(&(p)->sched_lock) -#define pset_lock(p) lck_ticket_lock(&(p)->sched_lock) +#define pset_lock_init(p) lck_ticket_init(&(p)->sched_lock, &pset_lck_grp) +#define pset_lock(p) lck_ticket_lock(&(p)->sched_lock, &pset_lck_grp) #define pset_unlock(p) lck_ticket_unlock(&(p)->sched_lock) #define pset_assert_locked(p) lck_ticket_assert_owned(&(p)->sched_lock) #else /* SCHED_PSET_TLOCK*/ @@ -323,26 +420,12 @@ extern lck_grp_t pset_lck_grp; #define pset_assert_locked(p) LCK_SPIN_ASSERT(&(p)->sched_lock, LCK_ASSERT_OWNED) #endif /*!SCHED_PSET_TLOCK*/ -#define rt_lock_lock(p) simple_lock(&SCHED(rt_runq)(p)->rt_lock, &pset_lck_grp) -#define rt_lock_unlock(p) simple_unlock(&SCHED(rt_runq)(p)->rt_lock) -#define rt_lock_init(p) simple_lock_init(&SCHED(rt_runq)(p)->rt_lock, 0) -#else -#define pset_lock(p) do { (void)p; } while(0) -#define pset_unlock(p) do { (void)p; } while(0) -#define pset_lock_init(p) do { (void)p; } while(0) -#define pset_assert_locked(p) do { (void)p; } while(0) - -#define rt_lock_lock(p) do { (void)p; } while(0) -#define rt_lock_unlock(p) do { (void)p; } while(0) -#define rt_lock_init(p) do { (void)p; } while(0) -#endif /* SMP */ - extern void processor_bootstrap(void); extern void processor_init( processor_t processor, - int cpu_id, - processor_set_t processor_set); + int cpu_id, + processor_set_t processor_set); extern void processor_set_primary( processor_t processor, @@ -352,12 +435,13 @@ extern kern_return_t processor_shutdown( processor_t processor); extern kern_return_t processor_start_from_user( - processor_t processor); + processor_t processor); extern kern_return_t processor_exit_from_user( - processor_t processor); + processor_t processor); -kern_return_t -sched_processor_enable(processor_t processor, boolean_t enable); +extern kern_return_t sched_processor_enable( + processor_t processor, + boolean_t enable); extern void processor_queue_shutdown( processor_t processor); @@ -368,45 +452,46 @@ extern void processor_queue_shutdown( extern processor_set_t processor_pset( processor_t processor); -extern pset_node_t pset_node_root(void); +extern pset_node_t pset_node_root(void); extern processor_set_t pset_create( pset_node_t node); extern void pset_init( processor_set_t pset, - pset_node_t node); + pset_node_t node); + +extern processor_set_t pset_find( + uint32_t cluster_id, + processor_set_t default_pset); -extern processor_set_t pset_find( - uint32_t cluster_id, - processor_set_t default_pset); extern kern_return_t processor_info_count( - processor_flavor_t flavor, + processor_flavor_t flavor, mach_msg_type_number_t *count); #define pset_deallocate(x) #define pset_reference(x) -extern void machine_run_count( - uint32_t count); +extern void machine_run_count( + uint32_t count); -extern processor_t machine_choose_processor( +extern processor_t machine_choose_processor( processor_set_t pset, - processor_t processor); + processor_t processor); #define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets) #define PSET_THING_TASK 0 #define PSET_THING_THREAD 1 -extern kern_return_t processor_set_things( - processor_set_t pset, - void **thing_list, - mach_msg_type_number_t *count, - int type); - -extern pset_cluster_type_t recommended_pset_type(thread_t thread); +extern pset_cluster_type_t recommended_pset_type( + thread_t thread); +#if CONFIG_THREAD_GROUPS +extern pset_cluster_type_t thread_group_pset_recommendation( + struct thread_group *tg, + cluster_type_t recommendation); +#endif /* CONFIG_THREAD_GROUPS */ inline static bool pset_is_recommended(processor_set_t pset) @@ -414,21 +499,63 @@ pset_is_recommended(processor_set_t pset) return (pset->recommended_bitmask & pset->cpu_bitmask) != 0; } -extern void processor_state_update_idle(processor_t processor); -extern void processor_state_update_from_thread(processor_t processor, thread_t thread); -extern void processor_state_update_explicit(processor_t processor, int pri, - sfi_class_id_t sfi_class, pset_cluster_type_t pset_type, - perfcontrol_class_t perfctl_class, thread_urgency_t urgency); +extern void processor_state_update_idle( + processor_t processor); + +extern void processor_state_update_from_thread( + processor_t processor, + thread_t thread); + +extern void processor_state_update_explicit( + processor_t processor, + int pri, + sfi_class_id_t sfi_class, + pset_cluster_type_t pset_type, + perfcontrol_class_t perfctl_class, + thread_urgency_t urgency, + sched_bucket_t bucket); #define PSET_LOAD_NUMERATOR_SHIFT 16 #define PSET_LOAD_FRACTIONAL_SHIFT 4 +#if CONFIG_SCHED_EDGE + +extern cluster_type_t pset_type_for_id(uint32_t cluster_id); + +/* + * The Edge scheduler uses average scheduling latency as the metric for making + * thread migration decisions. One component of avg scheduling latency is the load + * average on the cluster. + * + * Load Average Fixed Point Arithmetic + * + * The load average is maintained as a 24.8 fixed point arithmetic value for precision. + * When multiplied by the average execution time, it needs to be rounded up (based on + * the most significant bit of the fractional part) for better accuracy. After rounding + * up, the whole number part of the value is used as the actual load value for + * migrate/steal decisions. + */ +#define SCHED_PSET_LOAD_EWMA_FRACTION_BITS 8 +#define SCHED_PSET_LOAD_EWMA_ROUND_BIT (1 << (SCHED_PSET_LOAD_EWMA_FRACTION_BITS - 1)) +#define SCHED_PSET_LOAD_EWMA_FRACTION_MASK ((1 << SCHED_PSET_LOAD_EWMA_FRACTION_BITS) - 1) + +inline static int +sched_get_pset_load_average(processor_set_t pset, sched_bucket_t sched_bucket) +{ + return (int)(((pset->pset_load_average[sched_bucket] + SCHED_PSET_LOAD_EWMA_ROUND_BIT) >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS) * + pset->pset_execution_time[sched_bucket].pset_avg_thread_execution_time); +} + +#else /* CONFIG_SCHED_EDGE */ inline static int -sched_get_pset_load_average(processor_set_t pset) +sched_get_pset_load_average(processor_set_t pset, __unused sched_bucket_t sched_bucket) { - return pset->load_average >> (PSET_LOAD_NUMERATOR_SHIFT - PSET_LOAD_FRACTIONAL_SHIFT); + return (int)pset->load_average >> (PSET_LOAD_NUMERATOR_SHIFT - PSET_LOAD_FRACTIONAL_SHIFT); } -extern void sched_update_pset_load_average(processor_set_t pset); +#endif /* CONFIG_SCHED_EDGE */ + +extern void sched_update_pset_load_average(processor_set_t pset, uint64_t curtime); +extern void sched_update_pset_avg_execution_time(processor_set_t pset, uint64_t delta, uint64_t curtime, sched_bucket_t sched_bucket); inline static void pset_update_processor_state(processor_set_t pset, processor_t processor, uint new_state) @@ -436,7 +563,7 @@ pset_update_processor_state(processor_set_t pset, processor_t processor, uint ne pset_assert_locked(pset); uint old_state = processor->state; - uint cpuid = processor->cpu_id; + uint cpuid = (uint)processor->cpu_id; assert(processor->processor_set == pset); assert(bit_test(pset->cpu_bitmask, cpuid)); @@ -450,11 +577,57 @@ pset_update_processor_state(processor_set_t pset, processor_t processor, uint ne bit_set(pset->cpu_state_map[new_state], cpuid); if ((old_state == PROCESSOR_RUNNING) || (new_state == PROCESSOR_RUNNING)) { - sched_update_pset_load_average(pset); + sched_update_pset_load_average(pset, 0); if (new_state == PROCESSOR_RUNNING) { assert(processor == current_processor()); } } + if ((old_state == PROCESSOR_IDLE) || (new_state == PROCESSOR_IDLE)) { + if (new_state == PROCESSOR_IDLE) { + bit_clear(pset->realtime_map, cpuid); + } + + pset_node_t node = pset->node; + + if (bit_count(node->pset_map) == 1) { + /* Node has only a single pset, so skip node pset map updates */ + return; + } + + if (new_state == PROCESSOR_IDLE) { + if (processor->processor_primary == processor) { + if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) { + atomic_bit_set(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed); + } + if (!bit_test(atomic_load(&node->pset_idle_primary_map), pset->pset_id)) { + atomic_bit_set(&node->pset_idle_primary_map, pset->pset_id, memory_order_relaxed); + } + } + if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) { + atomic_bit_set(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed); + } + if (!bit_test(atomic_load(&node->pset_idle_map), pset->pset_id)) { + atomic_bit_set(&node->pset_idle_map, pset->pset_id, memory_order_relaxed); + } + } else { + cpumap_t idle_map = pset->cpu_state_map[PROCESSOR_IDLE]; + if (idle_map == 0) { + /* No more IDLE CPUs */ + if (bit_test(atomic_load(&node->pset_idle_map), pset->pset_id)) { + atomic_bit_clear(&node->pset_idle_map, pset->pset_id, memory_order_relaxed); + } + } + if (processor->processor_primary == processor) { + idle_map &= pset->primary_map; + if (idle_map == 0) { + /* No more IDLE primary CPUs */ + if (bit_test(atomic_load(&node->pset_idle_primary_map), pset->pset_id)) { + atomic_bit_clear(&node->pset_idle_primary_map, pset->pset_id, memory_order_relaxed); + } + } + } + } + } } #else /* MACH_KERNEL_PRIVATE */ @@ -462,10 +635,10 @@ pset_update_processor_state(processor_set_t pset, processor_t processor, uint ne __BEGIN_DECLS extern void pset_deallocate( - processor_set_t pset); + processor_set_t pset); extern void pset_reference( - processor_set_t pset); + processor_set_t pset); __END_DECLS @@ -478,7 +651,6 @@ extern processor_t cpu_to_processor(int cpu); extern kern_return_t enable_smt_processors(bool enable); -extern boolean_t processor_in_panic_context(processor_t processor); __END_DECLS #endif /* KERNEL_PRIVATE */ diff --git a/osfmk/kern/processor_data.h b/osfmk/kern/processor_data.h deleted file mode 100644 index bee13ded0..000000000 --- a/osfmk/kern/processor_data.h +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright (c) 2003-2009 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/* - * Machine independent per processor data. - */ - -#ifndef _KERN_PROCESSOR_DATA_H_ -#define _KERN_PROCESSOR_DATA_H_ - -/* - * #include kern/processor.h instead of this file. - */ - -#ifdef XNU_KERNEL_PRIVATE - -#ifdef MACH_KERNEL_PRIVATE - -#include -#include -#include - -struct processor_sched_statistics { - uint32_t csw_count; - uint32_t preempt_count; - uint32_t preempted_rt_count; - uint32_t preempted_by_rt_count; - uint32_t rt_sched_count; - uint32_t interrupt_count; - uint32_t ipi_count; - uint32_t timer_pop_count; - uint32_t idle_transitions; - uint32_t quantum_timer_expirations; -}; - -struct processor_data { - /* Processor state statistics */ - timer_data_t idle_state; - timer_data_t system_state; - timer_data_t user_state; - - timer_t current_state; /* points to processor's idle, system, or user state timer */ - - /* Thread execution timers */ - timer_t thread_timer; /* points to current thread's user or system timer */ - timer_t kernel_timer; /* points to current thread's system_timer */ - - /* Kernel stack cache */ - struct stack_cache { - vm_offset_t free; - unsigned int count; - } stack_cache; - - /* VM event counters */ - vm_statistics64_data_t vm_stat; - - /* waitq prepost cache */ -#define WQP_CACHE_MAX 50 - struct wqp_cache { - uint64_t head; - unsigned int avail; - } wqp_cache; - - int start_color; - unsigned long page_grab_count; - void *free_pages; - struct processor_sched_statistics sched_stats; - uint64_t timer_call_ttd; /* current timer call time-to-deadline */ - uint64_t wakeups_issued_total; /* Count of thread wakeups issued - * by this processor - */ - struct debugger_state { - debugger_op db_current_op; - const char *db_message; - const char *db_panic_str; - va_list *db_panic_args; - uint64_t db_panic_options; - void *db_panic_data_ptr; - boolean_t db_proceed_on_sync_failure; - uint32_t db_entry_count; /* incremented whenever we panic or call Debugger (current CPU panic level) */ - kern_return_t db_op_return; - unsigned long db_panic_caller; - } debugger_state; -}; - -typedef struct processor_data processor_data_t; - -#define PROCESSOR_DATA(processor, member) \ - (processor)->processor_data.member - -extern void processor_data_init( - processor_t processor); - -#define SCHED_STATS_INTERRUPT(p) \ -MACRO_BEGIN \ - if (__builtin_expect(sched_stats_active, 0)) { \ - (p)->processor_data.sched_stats.interrupt_count++; \ - } \ -MACRO_END - -#define SCHED_STATS_TIMER_POP(p) \ -MACRO_BEGIN \ - if (__builtin_expect(sched_stats_active, 0)) { \ - (p)->processor_data.sched_stats.timer_pop_count++; \ - } \ -MACRO_END - -#define SCHED_STATS_IPI(p) \ -MACRO_BEGIN \ - if (__builtin_expect(sched_stats_active, 0)) { \ - (p)->processor_data.sched_stats.ipi_count++; \ - } \ -MACRO_END - -#define SCHED_STATS_CPU_IDLE_START(p) \ -MACRO_BEGIN \ - if (__builtin_expect(sched_stats_active, 0)) { \ - (p)->processor_data.sched_stats.idle_transitions++; \ - } \ -MACRO_END - -#define SCHED_STATS_QUANTUM_TIMER_EXPIRATION(p) \ -MACRO_BEGIN \ - if (__builtin_expect(sched_stats_active, 0)) { \ - (p)->processor_data.sched_stats.quantum_timer_expirations++; \ - } \ -MACRO_END - -#endif /* MACH_KERNEL_PRIVATE */ - -extern boolean_t processor_in_panic_context(processor_t processor); - -#endif /* XNU_KERNEL_PRIVATE */ - -#endif /* _KERN_PROCESSOR_DATA_H_ */ diff --git a/osfmk/kern/queue.h b/osfmk/kern/queue.h index 68032cbe7..ac758f79e 100644 --- a/osfmk/kern/queue.h +++ b/osfmk/kern/queue.h @@ -360,14 +360,7 @@ static __inline__ void remque( queue_entry_t elt) { - queue_entry_t next_elt, prev_elt; - - __QUEUE_ELT_VALIDATE(elt); - next_elt = elt->next; - prev_elt = elt->prev; /* next_elt may equal prev_elt (and the queue head) if elt was the only element */ - next_elt->prev = prev_elt; - prev_elt->next = next_elt; - __DEQUEUE_ELT_CLEANUP(elt); + remqueue(elt); } /* @@ -572,8 +565,34 @@ re_queue_tail(queue_t que, queue_entry_t elt) _tmp_element; \ }) +/* Peek at the next element, or return NULL if the next element is head (indicating queue_end) */ +#define qe_queue_next(head, element, type, field) ({ \ + queue_entry_t _tmp_entry = queue_next(&(element)->field); \ + type *_tmp_element = (type*) NULL; \ + if (_tmp_entry != (queue_entry_t) head) \ + _tmp_element = qe_element(_tmp_entry, type, field); \ + _tmp_element; \ +}) + +/* Peek at the prev element, or return NULL if the prev element is head (indicating queue_end) */ +#define qe_queue_prev(head, element, type, field) ({ \ + queue_entry_t _tmp_entry = queue_prev(&(element)->field); \ + type *_tmp_element = (type*) NULL; \ + if (_tmp_entry != (queue_entry_t) head) \ + _tmp_element = qe_element(_tmp_entry, type, field); \ + _tmp_element; \ +}) + #endif /* XNU_KERNEL_PRIVATE */ +/* + * Macro: QUEUE_HEAD_INITIALIZER() + * Function: + * Static queue head initializer + */ +#define QUEUE_HEAD_INITIALIZER(name) \ + { &name, &name } + /* * Macro: queue_init * Function: diff --git a/osfmk/kern/remote_time.c b/osfmk/kern/remote_time.c index 0fa8aa491..d792a2927 100644 --- a/osfmk/kern/remote_time.c +++ b/osfmk/kern/remote_time.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Apple Inc. All rights reserved. + * Copyright (c) 2017-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -40,14 +40,17 @@ #include #include +LCK_GRP_DECLARE(bt_lck_grp, "bridge timestamp"); +LCK_SPIN_DECLARE(bt_spin_lock, &bt_lck_grp); +LCK_SPIN_DECLARE(bt_ts_conversion_lock, &bt_lck_grp); +LCK_SPIN_DECLARE(bt_maintenance_lock, &bt_lck_grp); + #if CONFIG_MACH_BRIDGE_SEND_TIME uint32_t bt_enable_flag = 0; -lck_spin_t *bt_maintenance_lock = NULL; _Atomic uint32_t bt_init_flag = 0; void mach_bridge_timer_maintenance(void); -void mach_bridge_timer_init(void); uint32_t mach_bridge_timer_enable(uint32_t new_value, int change); /* @@ -64,28 +67,14 @@ mach_bridge_timer_maintenance(void) return; } - lck_spin_lock(bt_maintenance_lock); + lck_spin_lock(&bt_maintenance_lock); if (!bt_enable_flag) { goto done; } mach_bridge_send_timestamp(0); done: - lck_spin_unlock(bt_maintenance_lock); -} - -/* - * This function should be called only once from the callback - * registration function - */ -void -mach_bridge_timer_init(void) -{ - assert(!os_atomic_load(&bt_init_flag, relaxed)); - /* Initialize the lock */ - static lck_grp_t *bt_lck_grp = NULL; - bt_lck_grp = lck_grp_alloc_init("bridgetimestamp", LCK_GRP_ATTR_NULL); - bt_maintenance_lock = lck_spin_alloc_init(bt_lck_grp, NULL); + lck_spin_unlock(&bt_maintenance_lock); } /* @@ -98,12 +87,12 @@ mach_bridge_timer_enable(uint32_t new_value, int change) { uint32_t current_value = 0; assert(os_atomic_load(&bt_init_flag, relaxed)); - lck_spin_lock(bt_maintenance_lock); + lck_spin_lock(&bt_maintenance_lock); if (change) { bt_enable_flag = new_value; } current_value = bt_enable_flag; - lck_spin_unlock(bt_maintenance_lock); + lck_spin_unlock(&bt_maintenance_lock); return current_value; } @@ -118,7 +107,6 @@ mach_bridge_timer_enable(uint32_t new_value, int change) */ void mach_bridge_add_timestamp(uint64_t remote_timestamp, uint64_t local_timestamp); void bt_calibration_thread_start(void); -lck_spin_t *ts_conversion_lock = NULL; void bt_params_add(struct bt_params *params); /* function called by sysctl */ @@ -128,14 +116,13 @@ struct bt_params bt_params_get_latest(void); * Platform specific bridge time receiving interface. * These variables should be exported by the platform specific time receiving code. */ -extern lck_spin_t *bt_spin_lock; extern _Atomic uint32_t bt_init_flag; static uint64_t received_local_timestamp = 0; static uint64_t received_remote_timestamp = 0; /* * Buffer the previous timestamp pairs and rate - * It is protected by the ts_conversion_lock + * It is protected by the bt_ts_conversion_lock */ #define BT_PARAMS_COUNT 10 static struct bt_params bt_params_hist[BT_PARAMS_COUNT] = {}; @@ -144,7 +131,7 @@ static int bt_params_idx = -1; void bt_params_add(struct bt_params *params) { - lck_spin_assert(ts_conversion_lock, LCK_ASSERT_OWNED); + lck_spin_assert(&bt_ts_conversion_lock, LCK_ASSERT_OWNED); bt_params_idx = (bt_params_idx + 1) % BT_PARAMS_COUNT; bt_params_hist[bt_params_idx] = *params; @@ -154,7 +141,7 @@ bt_params_add(struct bt_params *params) static inline struct bt_params* bt_params_find(uint64_t local_ts) { - lck_spin_assert(ts_conversion_lock, LCK_ASSERT_OWNED); + lck_spin_assert(&bt_ts_conversion_lock, LCK_ASSERT_OWNED); int idx = bt_params_idx; if (idx < 0) { @@ -176,7 +163,7 @@ bt_params_find(uint64_t local_ts) static inline struct bt_params bt_params_get_latest_locked(void) { - lck_spin_assert(ts_conversion_lock, LCK_ASSERT_OWNED); + lck_spin_assert(&bt_ts_conversion_lock, LCK_ASSERT_OWNED); struct bt_params latest_params = {}; if (bt_params_idx >= 0) { @@ -193,9 +180,9 @@ bt_params_get_latest(void) /* Check if ts_converison_lock has been initialized */ if (os_atomic_load(&bt_init_flag, acquire)) { - lck_spin_lock(ts_conversion_lock); + lck_spin_lock(&bt_ts_conversion_lock); latest_params = bt_params_get_latest_locked(); - lck_spin_unlock(ts_conversion_lock); + lck_spin_unlock(&bt_ts_conversion_lock); } return latest_params; } @@ -206,7 +193,7 @@ bt_params_get_latest(void) void mach_bridge_add_timestamp(uint64_t remote_timestamp, uint64_t local_timestamp) { - lck_spin_assert(bt_spin_lock, LCK_ASSERT_OWNED); + lck_spin_assert(&bt_spin_lock, LCK_ASSERT_OWNED); /* sleep/wake might return the same mach_absolute_time as the previous timestamp pair */ if ((received_local_timestamp == local_timestamp) || @@ -225,7 +212,7 @@ mach_bridge_compute_rate(uint64_t new_local_ts, uint64_t new_remote_ts, { int64_t rdiff = (int64_t)new_remote_ts - (int64_t)old_remote_ts; int64_t ldiff = (int64_t)new_local_ts - (int64_t)old_local_ts; - double calc_rate = ((double)rdiff) / ldiff; + double calc_rate = ((double)rdiff) / (double)ldiff; return calc_rate; } @@ -260,7 +247,7 @@ bt_calibration_thread(void) static uint64_t ts_pair_mismatch = 0; static uint32_t ts_pair_mismatch_reset_count = 0; spl_t s = splsched(); - lck_spin_lock(bt_spin_lock); + lck_spin_lock(&bt_spin_lock); if (!received_remote_timestamp) { if (PE_parse_boot_argn("rt_ini_count", &max_initial_sample_count, sizeof(uint32_t)) == TRUE) { @@ -329,12 +316,12 @@ recalculate: prev_local_ts = prev_received_local_ts; prev_remote_ts = prev_received_remote_ts; } - lck_spin_unlock(bt_spin_lock); + lck_spin_unlock(&bt_spin_lock); splx(s); struct bt_params bt_params = {}; - lck_spin_lock(ts_conversion_lock); + lck_spin_lock(&bt_ts_conversion_lock); if (reset) { if (skip_reset_count > 0) { KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_SKIP_TS), curr_local_ts, curr_remote_ts, @@ -361,7 +348,7 @@ recalculate: if (bt_params_idx >= 0) { bt_params_snapshot = bt_params_hist[bt_params_idx]; } - lck_spin_unlock(ts_conversion_lock); + lck_spin_unlock(&bt_ts_conversion_lock); if (bt_params_snapshot.rate == 0.0) { /* * The rate should never be 0 because we always expect a reset/wake @@ -376,7 +363,7 @@ recalculate: ts_pair_mismatch_reset_count = 0; KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_RESET_TS), curr_local_ts, curr_remote_ts, 3); s = splsched(); - lck_spin_lock(bt_spin_lock); + lck_spin_lock(&bt_spin_lock); goto block; } @@ -406,7 +393,7 @@ recalculate: ts_pair_mismatch_reset_count++; KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_RESET_TS), curr_local_ts, curr_remote_ts, 4); s = splsched(); - lck_spin_lock(bt_spin_lock); + lck_spin_lock(&bt_spin_lock); goto block; } } @@ -417,7 +404,7 @@ recalculate: KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_OBSV_RATE), *(uint64_t *)((void *)&observed_rate)); ts_pair_mismatch = ts_pair_mismatch > 0 ? (ts_pair_mismatch - 1) : 0; s = splsched(); - lck_spin_lock(bt_spin_lock); + lck_spin_lock(&bt_spin_lock); goto block; } if (initial_sample_count <= MIN_INITIAL_SAMPLE_COUNT) { @@ -438,7 +425,7 @@ recalculate: * This ensures that we always use the same parameters to compute remote * timestamp for a given local timestamp. */ - lck_spin_lock(ts_conversion_lock); + lck_spin_lock(&bt_ts_conversion_lock); absolutetime_to_nanoseconds(mach_absolute_time(), &bt_params.base_local_ts); bt_params.base_remote_ts = mach_bridge_compute_timestamp(bt_params.base_local_ts, &bt_params_snapshot); bt_params.rate = new_rate; @@ -449,10 +436,10 @@ recalculate: bt_params.base_remote_ts, *(uint64_t *)((void *)&bt_params.rate)); skip_reset: - lck_spin_unlock(ts_conversion_lock); + lck_spin_unlock(&bt_ts_conversion_lock); s = splsched(); - lck_spin_lock(bt_spin_lock); + lck_spin_lock(&bt_spin_lock); /* Check if a new timestamp pair was received */ if (received_local_timestamp != curr_local_abs) { recalculate_count++; @@ -460,7 +447,7 @@ skip_reset: } block: assert_wait((event_t)bt_params_hist, THREAD_UNINT); - lck_spin_unlock(bt_spin_lock); + lck_spin_unlock(&bt_spin_lock); splx(s); thread_block((thread_continue_t)bt_calibration_thread); } @@ -521,7 +508,7 @@ mach_bridge_remote_time(uint64_t local_timestamp) uint64_t remote_timestamp = 0; - lck_spin_lock(ts_conversion_lock); + lck_spin_lock(&bt_ts_conversion_lock); uint64_t now = mach_absolute_time(); if (!local_timestamp) { local_timestamp = now; @@ -537,7 +524,7 @@ mach_bridge_remote_time(uint64_t local_timestamp) struct bt_params params = bt_params_get_latest_locked(); remote_timestamp = mach_bridge_compute_timestamp(local_timestamp, ¶ms); #endif /* defined(XNU_TARGET_OS_BRIDGE) */ - lck_spin_unlock(ts_conversion_lock); + lck_spin_unlock(&bt_ts_conversion_lock); KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_REMOTE_TIME), local_timestamp, remote_timestamp, now); return remote_timestamp; diff --git a/osfmk/kern/remote_time.h b/osfmk/kern/remote_time.h index 020e845b4..8bbc4f715 100644 --- a/osfmk/kern/remote_time.h +++ b/osfmk/kern/remote_time.h @@ -69,6 +69,14 @@ mach_bridge_compute_timestamp(uint64_t local_ts_ns, struct bt_params *params) } uint64_t mach_bridge_remote_time(uint64_t); + +#if XNU_KERNEL_PRIVATE +#include +extern lck_spin_t bt_maintenance_lock; +extern lck_spin_t bt_spin_lock; +extern lck_spin_t bt_ts_conversion_lock; +#endif + __END_DECLS #endif /* REMOTE_TIME_H */ diff --git a/osfmk/kern/restartable.c b/osfmk/kern/restartable.c index c4e0a7aa0..1ecd4a57d 100644 --- a/osfmk/kern/restartable.c +++ b/osfmk/kern/restartable.c @@ -87,8 +87,8 @@ struct restartable_ranges { #endif static queue_head_t rr_hash[RR_HASH_SIZE]; -lck_spin_t rr_spinlock; -lck_grp_t rr_lock_grp; +LCK_GRP_DECLARE(rr_lock_grp, "restartable ranges"); +LCK_SPIN_DECLARE(rr_spinlock, &rr_lock_grp); #define rr_lock() lck_spin_lock_grp(&rr_spinlock, &rr_lock_grp) #define rr_unlock() lck_spin_unlock(&rr_spinlock); @@ -365,8 +365,6 @@ thread_reset_pcs_ast(thread_t thread) void restartable_init(void) { - lck_grp_init(&rr_lock_grp, "restartable ranges", LCK_GRP_ATTR_NULL); - lck_spin_init(&rr_spinlock, &rr_lock_grp, LCK_ATTR_NULL); for (size_t i = 0; i < RR_HASH_SIZE; i++) { queue_head_init(rr_hash[i]); } @@ -387,6 +385,7 @@ task_restartable_ranges_register( return KERN_FAILURE; } + kr = _ranges_validate(task, ranges, count); if (kr == KERN_SUCCESS) { diff --git a/osfmk/kern/sched.h b/osfmk/kern/sched.h index be8727dd0..cd4ab0825 100644 --- a/osfmk/kern/sched.h +++ b/osfmk/kern/sched.h @@ -74,7 +74,6 @@ #include #include #include -#include #include #define NRQS_MAX (128) /* maximum number of priority levels */ @@ -250,9 +249,6 @@ rq_bitmap_clear(bitmap_t *map, u_int n) struct rt_queue { _Atomic int count; /* # of threads total */ queue_head_t queue; /* all runnable RT threads */ -#if __SMP__ - decl_simple_lock_data(, rt_lock); -#endif struct runq_stats runq_stats; }; typedef struct rt_queue *rt_queue_t; @@ -346,7 +342,6 @@ extern uint32_t default_timeshare_constraint; extern uint32_t max_rt_quantum, min_rt_quantum; extern int default_preemption_rate; -extern int default_bg_preemption_rate; #if defined(CONFIG_SCHED_TIMESHARE_CORE) @@ -389,6 +384,7 @@ extern void compute_pmap_gc_throttle( #if defined(CONFIG_SCHED_TIMESHARE_CORE) #define MAX_LOAD (NRQS - 1) +#define SCHED_PRI_SHIFT_MAX ((8 * sizeof(uint32_t)) - 1) extern uint32_t sched_pri_shifts[TH_BUCKET_MAX]; extern uint32_t sched_fixed_shift; extern int8_t sched_load_shifts[NRQS]; @@ -414,6 +410,10 @@ extern uint32_t sched_run_incr(thread_t thread); extern uint32_t sched_run_decr(thread_t thread); extern void sched_update_thread_bucket(thread_t thread); +extern uint32_t sched_smt_run_incr(thread_t thread); +extern uint32_t sched_smt_run_decr(thread_t thread); +extern void sched_smt_update_thread_bucket(thread_t thread); + #define SCHED_DECAY_TICKS 32 struct shift_data { int shift1; @@ -424,11 +424,13 @@ struct shift_data { * thread_timer_delta macro takes care of both thread timers. */ #define thread_timer_delta(thread, delta) \ -MACRO_BEGIN \ - (delta) = (typeof(delta))timer_delta(&(thread)->system_timer, \ - &(thread)->system_timer_save); \ - (delta) += (typeof(delta))timer_delta(&(thread)->user_timer, \ - &(thread)->user_timer_save); \ +MACRO_BEGIN \ + (delta) = (typeof(delta))timer_delta(&(thread)->system_timer, \ + &(thread)->system_timer_save); \ + (delta) += (typeof(delta))timer_delta(&(thread)->user_timer, \ + &(thread)->user_timer_save); \ MACRO_END +extern bool system_is_SMT; + #endif /* _KERN_SCHED_H_ */ diff --git a/osfmk/kern/sched_amp.c b/osfmk/kern/sched_amp.c index 50c381008..2f06bca90 100644 --- a/osfmk/kern/sched_amp.c +++ b/osfmk/kern/sched_amp.c @@ -119,6 +119,7 @@ const struct sched_dispatch_table sched_amp_dispatch = { .steal_thread_enabled = sched_amp_steal_thread_enabled, .steal_thread = sched_amp_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, + .choose_node = sched_amp_choose_node, .choose_processor = sched_amp_choose_processor, .processor_enqueue = sched_amp_processor_enqueue, .processor_queue_shutdown = sched_amp_processor_queue_shutdown, @@ -486,7 +487,7 @@ sched_amp_steal_thread(processor_set_t pset) assert(nset != pset); - if (sched_get_pset_load_average(nset) >= sched_amp_steal_threshold(nset, spill_pending)) { + if (sched_get_pset_load_average(nset, 0) >= sched_amp_steal_threshold(nset, spill_pending)) { pset_unlock(pset); pset = nset; @@ -494,12 +495,12 @@ sched_amp_steal_thread(processor_set_t pset) pset_lock(pset); /* Allow steal if load average still OK, no idle cores, and more threads on runq than active cores DISPATCHING */ - if ((sched_get_pset_load_average(pset) >= sched_amp_steal_threshold(pset, spill_pending)) && + if ((sched_get_pset_load_average(pset, 0) >= sched_amp_steal_threshold(pset, spill_pending)) && (pset->pset_runq.count > bit_count(pset->cpu_state_map[PROCESSOR_DISPATCHING])) && (bit_count(pset->recommended_bitmask & pset->cpu_state_map[PROCESSOR_IDLE]) == 0)) { thread = run_queue_dequeue(&pset->pset_runq, SCHED_HEADQ); KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_AMP_STEAL) | DBG_FUNC_NONE, spill_pending, 0, 0, 0); - sched_update_pset_load_average(pset); + sched_update_pset_load_average(pset, 0); } } @@ -630,6 +631,7 @@ sched_amp_choose_processor(processor_set_t pset, processor_t processor, thread_t processor_set_t nset = pset; bool choose_pcores; + again: choose_pcores = pcores_recommended(thread); @@ -722,7 +724,7 @@ extern void sysctl_thread_bind_cluster_type(char cluster_type); void sysctl_thread_bind_cluster_type(char cluster_type) { - thread_bind_cluster_type(cluster_type); + thread_bind_cluster_type(current_thread(), cluster_type, false); } extern char sysctl_get_task_cluster_type(void); diff --git a/osfmk/kern/sched_amp_common.c b/osfmk/kern/sched_amp_common.c index 1158090d9..68ad551ce 100644 --- a/osfmk/kern/sched_amp_common.c +++ b/osfmk/kern/sched_amp_common.c @@ -44,6 +44,7 @@ #include #include #include +#include #if __AMP__ @@ -85,18 +86,12 @@ sched_amp_init(void) pcore_set->pset_cluster_type = PSET_AMP_P; pcore_set->pset_cluster_id = 1; -#if !CONFIG_SCHED_CLUTCH - /* - * For non-clutch scheduler, allow system to be e-core only. - * Clutch scheduler support for this feature needs to be implemented. - */ #if DEVELOPMENT || DEBUG if (PE_parse_boot_argn("enable_skstsct", NULL, 0)) { system_ecore_only = true; } #endif /* DEVELOPMENT || DEBUG */ -#endif /* !CONFIG_SCHED_CLUTCH */ sched_timeshare_init(); } @@ -113,6 +108,17 @@ int sched_amp_spill_steal = 1; int sched_amp_spill_deferred_ipi = 1; int sched_amp_pcores_preempt_immediate_ipi = 1; +/* + * sched_perfcontrol_inherit_recommendation_from_tg changes amp + * scheduling policy away from default and allows policy to be + * modified at run-time. + * + * once modified from default, the policy toggles between "follow + * thread group" and "restrict to e". + */ + +_Atomic sched_perfctl_class_policy_t sched_perfctl_policy_util = SCHED_PERFCTL_POLICY_DEFAULT; +_Atomic sched_perfctl_class_policy_t sched_perfctl_policy_bg = SCHED_PERFCTL_POLICY_DEFAULT; /* * sched_amp_spill_threshold() @@ -242,12 +248,9 @@ should_spill_to_ecores(processor_set_t nset, thread_t thread) return false; } -#if !CONFIG_SCHED_CLUTCH - /* Per-thread P-core scheduling support needs to be implemented for clutch scheduler */ if (thread->sched_flags & TH_SFLAG_PCORE_ONLY) { return false; } -#endif /* !CONFIG_SCHED_CLUTCH */ if (thread->sched_pri >= BASEPRI_RTQUEUES) { /* Never spill realtime threads */ @@ -259,7 +262,7 @@ should_spill_to_ecores(processor_set_t nset, thread_t thread) return false; } - if ((sched_get_pset_load_average(nset) >= sched_amp_spill_threshold(nset)) && /* There is already a load on P cores */ + if ((sched_get_pset_load_average(nset, 0) >= sched_amp_spill_threshold(nset)) && /* There is already a load on P cores */ pset_should_accept_spilled_thread(ecore_set, thread->sched_pri)) { /* There are lower priority E cores */ return true; } @@ -461,19 +464,32 @@ sched_amp_qos_max_parallelism(int qos, uint64_t options) } /* - * The current AMP scheduler policy is not run - * background and utility threads on the P-Cores. + * The default AMP scheduler policy is to run utility and by + * threads on E-Cores only. Run-time policy adjustment unlocks + * ability of utility and bg to threads to be scheduled based on + * run-time conditions. */ switch (qos) { case THREAD_QOS_UTILITY: + return (os_atomic_load(&sched_perfctl_policy_util, relaxed) == SCHED_PERFCTL_POLICY_DEFAULT) ? ecount : (ecount + pcount); case THREAD_QOS_BACKGROUND: case THREAD_QOS_MAINTENANCE: - return ecount; + return (os_atomic_load(&sched_perfctl_policy_bg, relaxed) == SCHED_PERFCTL_POLICY_DEFAULT) ? ecount : (ecount + pcount); default: return ecount + pcount; } } +pset_node_t +sched_amp_choose_node(thread_t thread) +{ + if (recommended_pset_type(thread) == PSET_AMP_P) { + return pcore_set->node; + } else { + return ecore_set->node; + } +} + /* * sched_amp_rt_runq() */ @@ -512,17 +528,15 @@ sched_amp_rt_queue_shutdown(processor_t processor) queue_init(&tqueue); - rt_lock_lock(pset); - while (rt_runq_count(pset) > 0) { thread = qe_dequeue_head(&pset->rt_runq.queue, struct thread, runq_links); thread->runq = PROCESSOR_NULL; - SCHED_STATS_RUNQ_CHANGE(&pset->rt_runq.runq_stats, pset->rt_runq.count); + SCHED_STATS_RUNQ_CHANGE(&pset->rt_runq.runq_stats, + os_atomic_load(&pset->rt_runq.count, relaxed)); rt_runq_count_decr(pset); enqueue_tail(&tqueue, &thread->runq_links); } - rt_lock_unlock(pset); - sched_update_pset_load_average(pset); + sched_update_pset_load_average(pset, 0); pset_unlock(pset); qe_foreach_element_safe(thread, &tqueue, runq_links) { @@ -552,7 +566,7 @@ sched_amp_rt_runq_scan(sched_update_scan_context_t scan_context) spl_t s = splsched(); do { while (pset != NULL) { - rt_lock_lock(pset); + pset_lock(pset); qe_foreach_element_safe(thread, &pset->rt_runq.queue, runq_links) { if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) { @@ -560,7 +574,7 @@ sched_amp_rt_runq_scan(sched_update_scan_context_t scan_context) } } - rt_lock_unlock(pset); + pset_unlock(pset); pset = pset->pset_list; } diff --git a/osfmk/kern/sched_amp_common.h b/osfmk/kern/sched_amp_common.h index e29cf07f3..2c3a5b525 100644 --- a/osfmk/kern/sched_amp_common.h +++ b/osfmk/kern/sched_amp_common.h @@ -76,6 +76,8 @@ int64_t sched_amp_rt_runq_count_sum(void); uint32_t sched_amp_qos_max_parallelism(int qos, uint64_t options); void sched_amp_bounce_thread_group_from_ecores(processor_set_t pset, struct thread_group *tg); +pset_node_t sched_amp_choose_node(thread_t thread); + #endif /* __AMP__ */ #endif /* _KERN_SCHED_AMP_COMMON_H_ */ diff --git a/osfmk/kern/sched_average.c b/osfmk/kern/sched_average.c index a6a855c9f..f74e8b960 100644 --- a/osfmk/kern/sched_average.c +++ b/osfmk/kern/sched_average.c @@ -292,7 +292,13 @@ compute_averages(uint64_t stdelta) /* Update the global pri_shifts based on the latest values */ for (uint32_t i = TH_BUCKET_SHARE_FG; i <= TH_BUCKET_SHARE_BG; i++) { uint32_t bucket_load = SCHED_LOAD_EWMA_UNSCALE(sched_load[i]); - sched_pri_shifts[i] = sched_fixed_shift - sched_load_shifts[bucket_load]; + uint32_t shift = sched_fixed_shift - sched_load_shifts[bucket_load]; + + if (shift > SCHED_PRI_SHIFT_MAX) { + sched_pri_shifts[i] = INT8_MAX; + } else { + sched_pri_shifts[i] = shift; + } } /* diff --git a/osfmk/kern/sched_clutch.c b/osfmk/kern/sched_clutch.c index 172efd303..40fb7319d 100644 --- a/osfmk/kern/sched_clutch.c +++ b/osfmk/kern/sched_clutch.c @@ -46,9 +46,9 @@ #include #include -#if __AMP__ +#if CONFIG_SCHED_EDGE #include -#endif /* __AMP__ */ +#endif /* CONFIG_SCHED_EDGE */ #if CONFIG_SCHED_CLUTCH @@ -56,16 +56,28 @@ /* Root level hierarchy management */ static void sched_clutch_root_init(sched_clutch_root_t, processor_set_t); -static void sched_clutch_root_bucket_init(sched_clutch_root_bucket_t, sched_bucket_t); +static void sched_clutch_root_bucket_init(sched_clutch_root_bucket_t, sched_bucket_t, bool); static void sched_clutch_root_pri_update(sched_clutch_root_t); -static sched_clutch_root_bucket_t sched_clutch_root_highest_root_bucket(sched_clutch_root_t, uint64_t); static void sched_clutch_root_urgency_inc(sched_clutch_root_t, thread_t); static void sched_clutch_root_urgency_dec(sched_clutch_root_t, thread_t); +__enum_decl(sched_clutch_highest_root_bucket_type_t, uint32_t, { + SCHED_CLUTCH_HIGHEST_ROOT_BUCKET_NONE = 0, + SCHED_CLUTCH_HIGHEST_ROOT_BUCKET_UNBOUND_ONLY = 1, + SCHED_CLUTCH_HIGHEST_ROOT_BUCKET_ALL = 2, +}); + +static sched_clutch_root_bucket_t sched_clutch_root_highest_root_bucket(sched_clutch_root_t, uint64_t, sched_clutch_highest_root_bucket_type_t); + +#if CONFIG_SCHED_EDGE +/* Support for foreign threads on AMP platforms */ +static boolean_t sched_clutch_root_foreign_empty(sched_clutch_root_t); +static thread_t sched_clutch_root_highest_foreign_thread_remove(sched_clutch_root_t); +#endif /* CONFIG_SCHED_EDGE */ + /* Root bucket level hierarchy management */ static uint64_t sched_clutch_root_bucket_deadline_calculate(sched_clutch_root_bucket_t, uint64_t); static void sched_clutch_root_bucket_deadline_update(sched_clutch_root_bucket_t, sched_clutch_root_t, uint64_t); -static int sched_clutch_root_bucket_pri_compare(sched_clutch_root_bucket_t, sched_clutch_root_bucket_t); /* Options for clutch bucket ordering in the runq */ __options_decl(sched_clutch_bucket_options_t, uint32_t, { @@ -84,45 +96,43 @@ static void sched_clutch_bucket_hierarchy_remove(sched_clutch_root_t, sched_clut static boolean_t sched_clutch_bucket_runnable(sched_clutch_bucket_t, sched_clutch_root_t, uint64_t, sched_clutch_bucket_options_t); static boolean_t sched_clutch_bucket_update(sched_clutch_bucket_t, sched_clutch_root_t, uint64_t, sched_clutch_bucket_options_t); static void sched_clutch_bucket_empty(sched_clutch_bucket_t, sched_clutch_root_t, uint64_t, sched_clutch_bucket_options_t); - -static void sched_clutch_bucket_cpu_usage_update(sched_clutch_bucket_t, uint64_t); -static void sched_clutch_bucket_cpu_blocked_update(sched_clutch_bucket_t, uint64_t); static uint8_t sched_clutch_bucket_pri_calculate(sched_clutch_bucket_t, uint64_t); -static sched_clutch_bucket_t sched_clutch_root_bucket_highest_clutch_bucket(sched_clutch_root_bucket_t); + +/* Clutch bucket group level properties management */ +static void sched_clutch_bucket_group_cpu_usage_update(sched_clutch_bucket_group_t, uint64_t); +static void sched_clutch_bucket_group_cpu_adjust(sched_clutch_bucket_group_t, uint8_t); +static void sched_clutch_bucket_group_timeshare_update(sched_clutch_bucket_group_t, sched_clutch_bucket_t, uint64_t); +static uint8_t sched_clutch_bucket_group_pending_ageout(sched_clutch_bucket_group_t, uint64_t); +static uint32_t sched_clutch_bucket_group_run_count_inc(sched_clutch_bucket_group_t); +static uint32_t sched_clutch_bucket_group_run_count_dec(sched_clutch_bucket_group_t); +static uint8_t sched_clutch_bucket_group_interactivity_score_calculate(sched_clutch_bucket_group_t, uint64_t); /* Clutch timeshare properties updates */ static uint32_t sched_clutch_run_bucket_incr(sched_clutch_t, sched_bucket_t); static uint32_t sched_clutch_run_bucket_decr(sched_clutch_t, sched_bucket_t); -static void sched_clutch_bucket_cpu_adjust(sched_clutch_bucket_t); -static void sched_clutch_bucket_timeshare_update(sched_clutch_bucket_t); -static boolean_t sched_thread_sched_pri_promoted(thread_t); + /* Clutch membership management */ static boolean_t sched_clutch_thread_insert(sched_clutch_root_t, thread_t, integer_t); static void sched_clutch_thread_remove(sched_clutch_root_t, thread_t, uint64_t, sched_clutch_bucket_options_t); -static thread_t sched_clutch_thread_highest(sched_clutch_root_t); +static thread_t sched_clutch_thread_highest_remove(sched_clutch_root_t); /* Clutch properties updates */ static uint32_t sched_clutch_root_urgency(sched_clutch_root_t); static uint32_t sched_clutch_root_count_sum(sched_clutch_root_t); static int sched_clutch_root_priority(sched_clutch_root_t); +static sched_clutch_bucket_t sched_clutch_root_bucket_highest_clutch_bucket(sched_clutch_root_bucket_t); +static boolean_t sched_thread_sched_pri_promoted(thread_t); -#if __AMP__ +#if CONFIG_SCHED_EDGE /* System based routines */ -static bool sched_clutch_pset_available(processor_set_t); -#endif /* __AMP__ */ +static bool sched_edge_pset_available(processor_set_t); +static uint32_t sched_edge_thread_bound_cluster_id(thread_t); +#endif /* CONFIG_SCHED_EDGE */ /* Helper debugging routines */ static inline void sched_clutch_hierarchy_locked_assert(sched_clutch_root_t); - - -/* - * Global priority queue comparator routine for root buckets. The - * routine implements the priority queue as a minimum deadline queue - * to achieve EDF scheduling. - */ -priority_queue_compare_fn_t sched_clutch_root_bucket_compare; - +extern processor_set_t pset_array[MAX_PSETS]; /* * Special markers for buckets that have invalid WCELs/quantums etc. @@ -180,6 +190,17 @@ static uint64_t sched_clutch_root_bucket_warp[TH_BUCKET_SCHED_MAX] = {0}; * (combined with the root level bucket quantums) restricts how much * the lower priority levels can preempt the higher priority threads. */ + +#if XNU_TARGET_OS_OSX +static uint32_t sched_clutch_thread_quantum_us[TH_BUCKET_SCHED_MAX] = { + 10000, /* FIXPRI (10ms) */ + 10000, /* FG (10ms) */ + 10000, /* IN (10ms) */ + 10000, /* DF (10ms) */ + 4000, /* UT (4ms) */ + 2000 /* BG (2ms) */ +}; +#else /* XNU_TARGET_OS_OSX */ static uint32_t sched_clutch_thread_quantum_us[TH_BUCKET_SCHED_MAX] = { 10000, /* FIXPRI (10ms) */ 10000, /* FG (10ms) */ @@ -188,12 +209,9 @@ static uint32_t sched_clutch_thread_quantum_us[TH_BUCKET_SCHED_MAX] = { 4000, /* UT (4ms) */ 2000 /* BG (2ms) */ }; -static uint64_t sched_clutch_thread_quantum[TH_BUCKET_SCHED_MAX] = {0}; +#endif /* XNU_TARGET_OS_OSX */ -enum sched_clutch_state { - SCHED_CLUTCH_STATE_EMPTY = 0, - SCHED_CLUTCH_STATE_RUNNABLE, -}; +static uint64_t sched_clutch_thread_quantum[TH_BUCKET_SCHED_MAX] = {0}; /* * sched_clutch_us_to_abstime() @@ -213,6 +231,9 @@ sched_clutch_us_to_abstime(uint32_t *us_vals, uint64_t *abstime_vals) } } +/* Clutch/Edge Scheduler Debugging support */ +#define SCHED_CLUTCH_DBG_THR_COUNT_PACK(a, b, c) ((uint64_t)c | ((uint64_t)b << 16) | ((uint64_t)a << 32)) + #if DEVELOPMENT || DEBUG /* @@ -268,30 +289,13 @@ sched_clutch_thr_count_dec( } } -#if __AMP__ - /* - * sched_clutch_pset_available() - * - * Routine to determine if a pset is available for scheduling. + * The clutch scheduler attempts to ageout the CPU usage of clutch bucket groups + * based on the amount of time they have been pending and the load at that + * scheduling bucket level. Since the clutch bucket groups are global (i.e. span + * multiple clusters, its important to keep the load also as a global counter. */ -static bool -sched_clutch_pset_available(processor_set_t pset) -{ - /* Check if cluster has none of the CPUs available */ - if (pset->online_processor_count == 0) { - return false; - } - - /* Check if the cluster is not recommended by CLPC */ - if (!pset_is_recommended(pset)) { - return false; - } - - return true; -} - -#endif /* __AMP__ */ +static uint32_t _Atomic sched_clutch_global_bucket_load[TH_BUCKET_SCHED_MAX]; /* * sched_clutch_root_init() @@ -307,24 +311,31 @@ sched_clutch_root_init( root_clutch->scr_priority = NOPRI; root_clutch->scr_urgency = 0; root_clutch->scr_pset = pset; +#if CONFIG_SCHED_EDGE + root_clutch->scr_cluster_id = pset->pset_cluster_id; +#else /* CONFIG_SCHED_EDGE */ + root_clutch->scr_cluster_id = 0; +#endif /* CONFIG_SCHED_EDGE */ /* Initialize the queue which maintains all runnable clutch_buckets for timesharing purposes */ queue_init(&root_clutch->scr_clutch_buckets); - /* Initialize the queue which maintains all runnable foreign clutch buckets */ - queue_init(&root_clutch->scr_foreign_buckets); + /* Initialize the priority queue which maintains all runnable foreign clutch buckets */ + priority_queue_init(&root_clutch->scr_foreign_buckets); + bzero(&root_clutch->scr_cumulative_run_count, sizeof(root_clutch->scr_cumulative_run_count)); + bitmap_zero(root_clutch->scr_bound_runnable_bitmap, TH_BUCKET_SCHED_MAX); + bitmap_zero(root_clutch->scr_bound_warp_available, TH_BUCKET_SCHED_MAX); + priority_queue_init(&root_clutch->scr_bound_root_buckets); /* Initialize the bitmap and priority queue of runnable root buckets */ - sched_clutch_root_bucket_compare = priority_heap_make_comparator(a, b, struct sched_clutch_root_bucket, scrb_pqlink, { - return (a->scrb_deadline < b->scrb_deadline) ? 1 : ((a->scrb_deadline == b->scrb_deadline) ? 0 : -1); - }); - priority_queue_init(&root_clutch->scr_root_buckets, PRIORITY_QUEUE_GENERIC_KEY | PRIORITY_QUEUE_MIN_HEAP); - bitmap_zero(root_clutch->scr_runnable_bitmap, TH_BUCKET_SCHED_MAX); - bitmap_zero(root_clutch->scr_warp_available, TH_BUCKET_SCHED_MAX); + priority_queue_init(&root_clutch->scr_unbound_root_buckets); + bitmap_zero(root_clutch->scr_unbound_runnable_bitmap, TH_BUCKET_SCHED_MAX); + bitmap_zero(root_clutch->scr_unbound_warp_available, TH_BUCKET_SCHED_MAX); /* Initialize all the root buckets */ for (uint32_t i = 0; i < TH_BUCKET_SCHED_MAX; i++) { - sched_clutch_root_bucket_init(&root_clutch->scr_buckets[i], i); + sched_clutch_root_bucket_init(&root_clutch->scr_unbound_buckets[i], i, false); + sched_clutch_root_bucket_init(&root_clutch->scr_bound_buckets[i], i, true); } } @@ -463,115 +474,226 @@ sched_clutch_bucket_runq_rotate( static void sched_clutch_root_bucket_init( sched_clutch_root_bucket_t root_bucket, - sched_bucket_t bucket) + sched_bucket_t bucket, + bool bound_root_bucket) { root_bucket->scrb_bucket = bucket; - sched_clutch_bucket_runq_init(&root_bucket->scrb_clutch_buckets); + if (bound_root_bucket) { + /* For bound root buckets, initialize the bound thread runq. */ + root_bucket->scrb_bound = true; + run_queue_init(&root_bucket->scrb_bound_thread_runq); + } else { + /* + * The unbounded root buckets contain a runq of runnable clutch buckets + * which then hold the runnable threads. + */ + root_bucket->scrb_bound = false; + sched_clutch_bucket_runq_init(&root_bucket->scrb_clutch_buckets); + } priority_queue_entry_init(&root_bucket->scrb_pqlink); - root_bucket->scrb_deadline = SCHED_CLUTCH_INVALID_TIME_64; + root_bucket->scrb_pqlink.deadline = SCHED_CLUTCH_INVALID_TIME_64; root_bucket->scrb_warped_deadline = 0; root_bucket->scrb_warp_remaining = sched_clutch_root_bucket_warp[root_bucket->scrb_bucket]; + root_bucket->scrb_starvation_avoidance = false; + root_bucket->scrb_starvation_ts = 0; } /* - * sched_clutch_root_bucket_pri_compare() - * - * Routine to compare root buckets based on the highest runnable clutch - * bucket priorities in the root buckets. - */ -static int -sched_clutch_root_bucket_pri_compare( - sched_clutch_root_bucket_t a, - sched_clutch_root_bucket_t b) -{ - sched_clutch_bucket_t a_highest = sched_clutch_root_bucket_highest_clutch_bucket(a); - sched_clutch_bucket_t b_highest = sched_clutch_root_bucket_highest_clutch_bucket(b); - return (a_highest->scb_priority > b_highest->scb_priority) ? - 1 : ((a_highest->scb_priority == b_highest->scb_priority) ? 0 : -1); -} - -/* - * sched_clutch_root_select_aboveui() - * * Special case scheduling for Above UI bucket. * * AboveUI threads are typically system critical threads that need low latency * which is why they are handled specially. * * Since the priority range for AboveUI and FG Timeshare buckets overlap, it is - * important to maintain some native priority order between those buckets. The policy - * implemented here is to compare the highest clutch buckets of both buckets; if the + * important to maintain some native priority order between those buckets. For unbounded + * root buckets, the policy is to compare the highest clutch buckets of both buckets; if the * Above UI bucket is higher, schedule it immediately. Otherwise fall through to the - * deadline based scheduling which should pickup the timeshare buckets. + * deadline based scheduling which should pickup the timeshare buckets. For the bound + * case, the policy simply compares the priority of the highest runnable threads in + * the above UI and timeshare buckets. * * The implementation allows extremely low latency CPU access for Above UI threads * while supporting the use case of high priority timeshare threads contending with * lower priority fixed priority threads. */ -static boolean_t -sched_clutch_root_select_aboveui( + + +/* + * sched_clutch_root_unbound_select_aboveui() + * + * Routine to determine if the above UI unbounded bucket should be selected for execution. + */ +static bool +sched_clutch_root_unbound_select_aboveui( sched_clutch_root_t root_clutch) { - if (bitmap_test(root_clutch->scr_runnable_bitmap, TH_BUCKET_FIXPRI)) { - sched_clutch_root_bucket_t root_bucket_aboveui = &root_clutch->scr_buckets[TH_BUCKET_FIXPRI]; - sched_clutch_root_bucket_t root_bucket_sharefg = &root_clutch->scr_buckets[TH_BUCKET_SHARE_FG]; - - if (!bitmap_test(root_clutch->scr_runnable_bitmap, TH_BUCKET_SHARE_FG)) { + if (bitmap_test(root_clutch->scr_unbound_runnable_bitmap, TH_BUCKET_FIXPRI)) { + sched_clutch_root_bucket_t root_bucket_aboveui = &root_clutch->scr_unbound_buckets[TH_BUCKET_FIXPRI]; + sched_clutch_root_bucket_t root_bucket_sharefg = &root_clutch->scr_unbound_buckets[TH_BUCKET_SHARE_FG]; + if (!bitmap_test(root_clutch->scr_unbound_runnable_bitmap, TH_BUCKET_SHARE_FG)) { /* If the timeshare FG bucket is not runnable, pick the aboveUI bucket for scheduling */ return true; } - if (sched_clutch_root_bucket_pri_compare(root_bucket_aboveui, root_bucket_sharefg) >= 0) { - /* If the aboveUI bucket has a higher native clutch bucket priority, schedule it */ + sched_clutch_bucket_t clutch_bucket_aboveui = sched_clutch_root_bucket_highest_clutch_bucket(root_bucket_aboveui); + sched_clutch_bucket_t clutch_bucket_sharefg = sched_clutch_root_bucket_highest_clutch_bucket(root_bucket_sharefg); + if (clutch_bucket_aboveui->scb_priority >= clutch_bucket_sharefg->scb_priority) { return true; } } return false; } +/* + * sched_clutch_root_bound_select_aboveui() + * + * Routine to determine if the above UI bounded bucket should be selected for execution. + */ +static bool +sched_clutch_root_bound_select_aboveui( + sched_clutch_root_t root_clutch) +{ + sched_clutch_root_bucket_t root_bucket_aboveui = &root_clutch->scr_bound_buckets[TH_BUCKET_FIXPRI]; + sched_clutch_root_bucket_t root_bucket_sharefg = &root_clutch->scr_bound_buckets[TH_BUCKET_SHARE_FG]; + if (root_bucket_aboveui->scrb_bound_thread_runq.count == 0) { + return false; + } + return root_bucket_aboveui->scrb_bound_thread_runq.highq >= root_bucket_sharefg->scrb_bound_thread_runq.highq; +} /* * sched_clutch_root_highest_root_bucket() * * Main routine to find the highest runnable root level bucket. * This routine is called from performance sensitive contexts; so it is - * crucial to keep this O(1). - * + * crucial to keep this O(1). The options parameter determines if + * the selection logic should look at unbounded threads only (for + * cross-cluster stealing operations) or both bounded and unbounded + * threads (for selecting next thread for execution on current cluster). */ static sched_clutch_root_bucket_t sched_clutch_root_highest_root_bucket( sched_clutch_root_t root_clutch, - uint64_t timestamp) + uint64_t timestamp, + sched_clutch_highest_root_bucket_type_t type) { sched_clutch_hierarchy_locked_assert(root_clutch); - if (bitmap_lsb_first(root_clutch->scr_runnable_bitmap, TH_BUCKET_SCHED_MAX) == -1) { + int highest_runnable_bucket = -1; + if (type == SCHED_CLUTCH_HIGHEST_ROOT_BUCKET_UNBOUND_ONLY) { + highest_runnable_bucket = bitmap_lsb_first(root_clutch->scr_unbound_runnable_bitmap, TH_BUCKET_SCHED_MAX); + } else { + int highest_unbound_runnable_bucket = bitmap_lsb_first(root_clutch->scr_unbound_runnable_bitmap, TH_BUCKET_SCHED_MAX); + int highest_bound_runnable_bucket = bitmap_lsb_first(root_clutch->scr_bound_runnable_bitmap, TH_BUCKET_SCHED_MAX); + highest_runnable_bucket = (highest_bound_runnable_bucket != -1) ? ((highest_unbound_runnable_bucket != -1) ? MIN(highest_bound_runnable_bucket, highest_unbound_runnable_bucket) : highest_bound_runnable_bucket) : highest_unbound_runnable_bucket; + } + + if (highest_runnable_bucket == -1) { return NULL; } - if (sched_clutch_root_select_aboveui(root_clutch)) { - return &root_clutch->scr_buckets[TH_BUCKET_FIXPRI]; + /* Above UI root bucket selection (see comment above for more details on this special case handling) */ + bool unbound_aboveui = sched_clutch_root_unbound_select_aboveui(root_clutch); + if (type == SCHED_CLUTCH_HIGHEST_ROOT_BUCKET_UNBOUND_ONLY) { + if (unbound_aboveui) { + return &root_clutch->scr_unbound_buckets[TH_BUCKET_FIXPRI]; + } + /* Fall through to selecting a timeshare root bucket */ + } else { + bool bound_aboveui = sched_clutch_root_bound_select_aboveui(root_clutch); + sched_clutch_root_bucket_t unbound_aboveui_root_bucket = &root_clutch->scr_unbound_buckets[TH_BUCKET_FIXPRI]; + sched_clutch_root_bucket_t bound_aboveui_root_bucket = &root_clutch->scr_bound_buckets[TH_BUCKET_FIXPRI]; + + if (unbound_aboveui && bound_aboveui) { + /* + * In this scenario both the bounded and unbounded above UI buckets are runnable; choose based on the + * highest runnable priority in both the buckets. + * */ + int bound_aboveui_pri = root_clutch->scr_bound_buckets[TH_BUCKET_FIXPRI].scrb_bound_thread_runq.highq; + sched_clutch_bucket_t clutch_bucket = sched_clutch_root_bucket_highest_clutch_bucket(unbound_aboveui_root_bucket); + int unbound_aboveui_pri = priority_queue_max_sched_pri(&clutch_bucket->scb_clutchpri_prioq); + return (bound_aboveui_pri >= unbound_aboveui_pri) ? bound_aboveui_root_bucket : unbound_aboveui_root_bucket; + } + if (unbound_aboveui) { + return unbound_aboveui_root_bucket; + } + if (bound_aboveui) { + return bound_aboveui_root_bucket; + } + /* Fall through to selecting a timeshare root bucket */ } /* - * Above UI bucket is not runnable or has a low priority clutch bucket; use the earliest deadline model - * to schedule threads. The idea is that as the timeshare buckets use CPU, they will drop their - * interactivity score and allow low priority AboveUI clutch buckets to be scheduled. + * Above UI bucket is not runnable or has a low priority runnable thread; use the + * earliest deadline model to schedule threads. The idea is that as the timeshare + * buckets use CPU, they will drop their interactivity score/sched priority and + * allow the low priority AboveUI buckets to be scheduled. */ /* Find the earliest deadline bucket */ - sched_clutch_root_bucket_t edf_bucket = priority_queue_min(&root_clutch->scr_root_buckets, struct sched_clutch_root_bucket, scrb_pqlink); - + sched_clutch_root_bucket_t edf_bucket = NULL; sched_clutch_root_bucket_t warp_bucket = NULL; int warp_bucket_index = -1; -evaluate_warp_buckets: - /* Check if any higher runnable buckets have warp available */ - warp_bucket_index = bitmap_lsb_first(root_clutch->scr_warp_available, TH_BUCKET_SCHED_MAX); + +evaluate_root_buckets: + if (type == SCHED_CLUTCH_HIGHEST_ROOT_BUCKET_UNBOUND_ONLY) { + edf_bucket = priority_queue_min(&root_clutch->scr_unbound_root_buckets, struct sched_clutch_root_bucket, scrb_pqlink); + } else { + sched_clutch_root_bucket_t unbound_bucket = priority_queue_min(&root_clutch->scr_unbound_root_buckets, struct sched_clutch_root_bucket, scrb_pqlink); + sched_clutch_root_bucket_t bound_bucket = priority_queue_min(&root_clutch->scr_bound_root_buckets, struct sched_clutch_root_bucket, scrb_pqlink); + if (bound_bucket && unbound_bucket) { + /* If bound and unbound root buckets are runnable, select the one with the earlier deadline */ + edf_bucket = (bound_bucket->scrb_pqlink.deadline <= unbound_bucket->scrb_pqlink.deadline) ? bound_bucket : unbound_bucket; + } else { + edf_bucket = (bound_bucket) ? bound_bucket : unbound_bucket; + } + } + /* + * Check if any of the buckets have warp available. The implementation only allows root buckets to warp ahead of + * buckets of the same type (i.e. bound/unbound). The reason for doing that is because warping is a concept that + * makes sense between root buckets of the same type since its effectively a scheduling advantage over a lower + * QoS root bucket. + */ + bitmap_t *warp_available_bitmap = (edf_bucket->scrb_bound) ? (root_clutch->scr_bound_warp_available) : (root_clutch->scr_unbound_warp_available); + warp_bucket_index = bitmap_lsb_first(warp_available_bitmap, TH_BUCKET_SCHED_MAX); if ((warp_bucket_index == -1) || (warp_bucket_index >= edf_bucket->scrb_bucket)) { - /* No higher buckets have warp available; choose the edf bucket and replenish its warp */ - sched_clutch_root_bucket_deadline_update(edf_bucket, root_clutch, timestamp); - edf_bucket->scrb_warp_remaining = sched_clutch_root_bucket_warp[edf_bucket->scrb_bucket]; - edf_bucket->scrb_warped_deadline = SCHED_CLUTCH_ROOT_BUCKET_WARP_UNUSED; - bitmap_set(root_clutch->scr_warp_available, edf_bucket->scrb_bucket); + /* No higher buckets have warp left; best choice is the EDF based bucket */ + if (edf_bucket->scrb_starvation_avoidance) { + /* + * Indicates that the earliest deadline bucket is in starvation avoidance mode. Check to see if the + * starvation avoidance window is still open and return this bucket if it is. + * + * The starvation avoidance window is calculated based on the quantum of threads at that bucket and + * the number of CPUs in the cluster. The idea is to basically provide one quantum worth of starvation + * avoidance across all CPUs. + */ + uint64_t starvation_window = sched_clutch_thread_quantum[edf_bucket->scrb_bucket] / root_clutch->scr_pset->online_processor_count; + if (timestamp < (edf_bucket->scrb_starvation_ts + starvation_window)) { + return edf_bucket; + } else { + /* Starvation avoidance window is over; update deadline and re-evaluate EDF */ + edf_bucket->scrb_starvation_avoidance = false; + edf_bucket->scrb_starvation_ts = 0; + sched_clutch_root_bucket_deadline_update(edf_bucket, root_clutch, timestamp); + } + goto evaluate_root_buckets; + } + + /* Looks like the EDF bucket is not in starvation avoidance mode; check if it should be */ + if (highest_runnable_bucket < edf_bucket->scrb_bucket) { + /* Since a higher bucket is runnable, it indicates that the EDF bucket should be in starvation avoidance */ + edf_bucket->scrb_starvation_avoidance = true; + edf_bucket->scrb_starvation_ts = timestamp; + } else { + /* EDF bucket is being selected in the natural order; update deadline and reset warp */ + sched_clutch_root_bucket_deadline_update(edf_bucket, root_clutch, timestamp); + edf_bucket->scrb_warp_remaining = sched_clutch_root_bucket_warp[edf_bucket->scrb_bucket]; + edf_bucket->scrb_warped_deadline = SCHED_CLUTCH_ROOT_BUCKET_WARP_UNUSED; + if (edf_bucket->scrb_bound) { + bitmap_set(root_clutch->scr_bound_warp_available, edf_bucket->scrb_bucket); + } else { + bitmap_set(root_clutch->scr_unbound_warp_available, edf_bucket->scrb_bucket); + } + } return edf_bucket; } @@ -579,7 +701,7 @@ evaluate_warp_buckets: * Looks like there is a root bucket which is higher in the natural priority * order than edf_bucket and might have some warp remaining. */ - warp_bucket = &root_clutch->scr_buckets[warp_bucket_index]; + warp_bucket = (edf_bucket->scrb_bound) ? &root_clutch->scr_bound_buckets[warp_bucket_index] : &root_clutch->scr_unbound_buckets[warp_bucket_index]; if (warp_bucket->scrb_warped_deadline == SCHED_CLUTCH_ROOT_BUCKET_WARP_UNUSED) { /* Root bucket has not used any of its warp; set a deadline to expire its warp and return it */ warp_bucket->scrb_warped_deadline = timestamp + warp_bucket->scrb_warp_remaining; @@ -597,8 +719,12 @@ evaluate_warp_buckets: * warp bucket selection logic. */ warp_bucket->scrb_warp_remaining = 0; - bitmap_clear(root_clutch->scr_warp_available, warp_bucket->scrb_bucket); - goto evaluate_warp_buckets; + if (warp_bucket->scrb_bound) { + bitmap_clear(root_clutch->scr_bound_warp_available, warp_bucket->scrb_bucket); + } else { + bitmap_clear(root_clutch->scr_unbound_warp_available, warp_bucket->scrb_bucket); + } + goto evaluate_root_buckets; } /* @@ -637,13 +763,16 @@ sched_clutch_root_bucket_deadline_update( /* The algorithm never uses the deadlines for scheduling TH_BUCKET_FIXPRI bucket */ return; } - uint64_t old_deadline = root_bucket->scrb_deadline; + + uint64_t old_deadline = root_bucket->scrb_pqlink.deadline; uint64_t new_deadline = sched_clutch_root_bucket_deadline_calculate(root_bucket, timestamp); - assert(old_deadline <= new_deadline); + if (__improbable(old_deadline > new_deadline)) { + panic("old_deadline (%llu) > new_deadline (%llu); root_bucket (%d); timestamp (%llu)", old_deadline, new_deadline, root_bucket->scrb_bucket, timestamp); + } if (old_deadline != new_deadline) { - root_bucket->scrb_deadline = new_deadline; - /* Since the priority queue is a min-heap, use the decrease routine even though the deadline has a larger value now */ - priority_queue_entry_decrease(&root_clutch->scr_root_buckets, &root_bucket->scrb_pqlink, PRIORITY_QUEUE_KEY_NONE, sched_clutch_root_bucket_compare); + root_bucket->scrb_pqlink.deadline = new_deadline; + struct priority_queue_deadline_min *prioq = (root_bucket->scrb_bound) ? &root_clutch->scr_bound_root_buckets : &root_clutch->scr_unbound_root_buckets; + priority_queue_entry_increased(prioq, &root_bucket->scrb_pqlink); } } @@ -660,21 +789,28 @@ sched_clutch_root_bucket_runnable( uint64_t timestamp) { /* Mark the root bucket as runnable */ - bitmap_set(root_clutch->scr_runnable_bitmap, root_bucket->scrb_bucket); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_CLUTCH_ROOT_BUCKET_STATE) | DBG_FUNC_NONE, - root_bucket->scrb_bucket, SCHED_CLUTCH_STATE_RUNNABLE, 0, 0, 0); + bitmap_t *runnable_bitmap = (root_bucket->scrb_bound) ? root_clutch->scr_bound_runnable_bitmap : root_clutch->scr_unbound_runnable_bitmap; + bitmap_set(runnable_bitmap, root_bucket->scrb_bucket); if (root_bucket->scrb_bucket == TH_BUCKET_FIXPRI) { /* Since the TH_BUCKET_FIXPRI bucket is not scheduled based on deadline, nothing more needed here */ return; } - root_bucket->scrb_deadline = sched_clutch_root_bucket_deadline_calculate(root_bucket, timestamp); - priority_queue_insert(&root_clutch->scr_root_buckets, &root_bucket->scrb_pqlink, PRIORITY_QUEUE_KEY_NONE, sched_clutch_root_bucket_compare); - + if (root_bucket->scrb_starvation_avoidance == false) { + /* + * Only update the deadline if the bucket was not in starvation avoidance mode. If the bucket was in + * starvation avoidance and its window has expired, the highest root bucket selection logic will notice + * that and fix it up. + */ + root_bucket->scrb_pqlink.deadline = sched_clutch_root_bucket_deadline_calculate(root_bucket, timestamp); + } + struct priority_queue_deadline_min *prioq = (root_bucket->scrb_bound) ? &root_clutch->scr_bound_root_buckets : &root_clutch->scr_unbound_root_buckets; + priority_queue_insert(prioq, &root_bucket->scrb_pqlink); if (root_bucket->scrb_warp_remaining) { /* Since the bucket has some warp remaining and its now runnable, mark it as available for warp */ - bitmap_set(root_clutch->scr_warp_available, root_bucket->scrb_bucket); + bitmap_t *warp_bitmap = (root_bucket->scrb_bound) ? root_clutch->scr_bound_warp_available : root_clutch->scr_unbound_warp_available; + bitmap_set(warp_bitmap, root_bucket->scrb_bucket); } } @@ -690,18 +826,20 @@ sched_clutch_root_bucket_empty( sched_clutch_root_t root_clutch, uint64_t timestamp) { - bitmap_clear(root_clutch->scr_runnable_bitmap, root_bucket->scrb_bucket); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_CLUTCH_ROOT_BUCKET_STATE) | DBG_FUNC_NONE, - root_bucket->scrb_bucket, SCHED_CLUTCH_STATE_EMPTY, 0, 0, 0); + bitmap_t *runnable_bitmap = (root_bucket->scrb_bound) ? root_clutch->scr_bound_runnable_bitmap : root_clutch->scr_unbound_runnable_bitmap; + bitmap_clear(runnable_bitmap, root_bucket->scrb_bucket); if (root_bucket->scrb_bucket == TH_BUCKET_FIXPRI) { /* Since the TH_BUCKET_FIXPRI bucket is not scheduled based on deadline, nothing more needed here */ return; } - priority_queue_remove(&root_clutch->scr_root_buckets, &root_bucket->scrb_pqlink, sched_clutch_root_bucket_compare); + struct priority_queue_deadline_min *prioq = (root_bucket->scrb_bound) ? &root_clutch->scr_bound_root_buckets : &root_clutch->scr_unbound_root_buckets; + priority_queue_remove(prioq, &root_bucket->scrb_pqlink); + + bitmap_t *warp_bitmap = (root_bucket->scrb_bound) ? root_clutch->scr_bound_warp_available : root_clutch->scr_unbound_warp_available; + bitmap_clear(warp_bitmap, root_bucket->scrb_bucket); - bitmap_clear(root_clutch->scr_warp_available, root_bucket->scrb_bucket); if (root_bucket->scrb_warped_deadline > timestamp) { /* * For root buckets that were using the warp, check if the warp @@ -720,44 +858,68 @@ sched_clutch_root_bucket_empty( } } +static int +sched_clutch_global_bucket_load_get( + sched_bucket_t bucket) +{ + return (int)os_atomic_load(&sched_clutch_global_bucket_load[bucket], relaxed); +} + /* * sched_clutch_root_pri_update() * * The root level priority is used for thread selection and preemption * logic. + * + * The logic uses the same decision as thread selection for deciding between the + * above UI and timeshare buckets. If one of the timesharing buckets have to be + * used for priority calculation, the logic is slightly different from thread + * selection, because thread selection considers deadlines, warps etc. to + * decide the most optimal bucket at a given timestamp. Since the priority + * value is used for preemption decisions only, it needs to be based on the + * highest runnable thread available in the timeshare domain. This logic can + * be made more sophisticated if there are cases of unnecessary preemption + * being seen in workloads. */ static void sched_clutch_root_pri_update( sched_clutch_root_t root_clutch) { sched_clutch_hierarchy_locked_assert(root_clutch); - if (bitmap_lsb_first(root_clutch->scr_runnable_bitmap, TH_BUCKET_SCHED_MAX) == -1) { - /* No runnable root buckets */ - root_clutch->scr_priority = NOPRI; - assert(root_clutch->scr_urgency == 0); - return; + int16_t root_bound_pri = NOPRI; + int16_t root_unbound_pri = NOPRI; + + if (bitmap_lsb_first(root_clutch->scr_bound_runnable_bitmap, TH_BUCKET_SCHED_MAX) == -1) { + goto root_pri_update_unbound; } - sched_clutch_root_bucket_t root_bucket = NULL; - /* Special case for AboveUI (uses same logic as thread selection) */ - if (sched_clutch_root_select_aboveui(root_clutch)) { - root_bucket = &root_clutch->scr_buckets[TH_BUCKET_FIXPRI]; + sched_clutch_root_bucket_t root_bucket_bound = NULL; + if (sched_clutch_root_bound_select_aboveui(root_clutch)) { + root_bucket_bound = &root_clutch->scr_bound_buckets[TH_BUCKET_FIXPRI]; } else { - /* - * AboveUI bucket is not runnable or has a low clutch bucket priority, - * select the next runnable root bucket in natural priority order. This logic - * is slightly different from thread selection, because thread selection - * considers deadlines, warps etc. to decide the most optimal bucket at a - * given timestamp. Since the priority value is used for preemption decisions - * only, it needs to be based on the highest runnable thread available in - * the timeshare domain. - */ - int root_bucket_index = bitmap_lsb_next(root_clutch->scr_runnable_bitmap, TH_BUCKET_SCHED_MAX, TH_BUCKET_FIXPRI); + int root_bucket_index = bitmap_lsb_next(root_clutch->scr_bound_runnable_bitmap, TH_BUCKET_SCHED_MAX, TH_BUCKET_FIXPRI); + assert(root_bucket_index != -1); + root_bucket_bound = &root_clutch->scr_bound_buckets[root_bucket_index]; + } + root_bound_pri = root_bucket_bound->scrb_bound_thread_runq.highq; + +root_pri_update_unbound: + if (bitmap_lsb_first(root_clutch->scr_unbound_runnable_bitmap, TH_BUCKET_SCHED_MAX) == -1) { + goto root_pri_update_complete; + } + sched_clutch_root_bucket_t root_bucket_unbound = NULL; + if (sched_clutch_root_unbound_select_aboveui(root_clutch)) { + root_bucket_unbound = &root_clutch->scr_unbound_buckets[TH_BUCKET_FIXPRI]; + } else { + int root_bucket_index = bitmap_lsb_next(root_clutch->scr_unbound_runnable_bitmap, TH_BUCKET_SCHED_MAX, TH_BUCKET_FIXPRI); assert(root_bucket_index != -1); - root_bucket = &root_clutch->scr_buckets[root_bucket_index]; + root_bucket_unbound = &root_clutch->scr_unbound_buckets[root_bucket_index]; } /* For the selected root bucket, find the highest priority clutch bucket */ - sched_clutch_bucket_t clutch_bucket = sched_clutch_root_bucket_highest_clutch_bucket(root_bucket); - root_clutch->scr_priority = priority_queue_max_key(&clutch_bucket->scb_clutchpri_prioq); + sched_clutch_bucket_t clutch_bucket = sched_clutch_root_bucket_highest_clutch_bucket(root_bucket_unbound); + root_unbound_pri = priority_queue_max_sched_pri(&clutch_bucket->scb_clutchpri_prioq); + +root_pri_update_complete: + root_clutch->scr_priority = MAX(root_bound_pri, root_unbound_pri); } /* @@ -805,48 +967,81 @@ sched_clutch_root_urgency_dec( * * The second level of scheduling is the clutch bucket level scheduling * which tries to schedule thread groups within root_buckets. Each - * clutch represents a thread group and a clutch_bucket represents + * clutch represents a thread group and a clutch_bucket_group represents * threads at a particular sched_bucket within that thread group. The - * goal of this level of scheduling is to allow interactive thread + * clutch_bucket_group contains a clutch_bucket per cluster on the system + * where it holds the runnable threads destined for execution on that + * cluster. + * + * The goal of this level of scheduling is to allow interactive thread * groups low latency access to the CPU. It also provides slight * scheduling preference for App and unrestricted thread groups. * * The clutch bucket scheduling algorithm measures an interactivity - * score for all clutch buckets. The interactivity score is based + * score for all clutch bucket groups. The interactivity score is based * on the ratio of the CPU used and the voluntary blocking of threads - * within the clutch bucket. The algorithm is very close to the ULE + * within the clutch bucket group. The algorithm is very close to the ULE * scheduler on FreeBSD in terms of calculations. The interactivity * score provides an interactivity boost in the range of * [0:SCHED_CLUTCH_BUCKET_INTERACTIVE_PRI * 2] which allows interactive * thread groups to win over CPU spinners. + * + * The interactivity score of the clutch bucket group is combined with the + * highest base/promoted priority of threads in the clutch bucket to form + * the overall priority of the clutch bucket. */ /* Priority boost range for interactivity */ -#define SCHED_CLUTCH_BUCKET_INTERACTIVE_PRI_DEFAULT (8) -uint8_t sched_clutch_bucket_interactive_pri = SCHED_CLUTCH_BUCKET_INTERACTIVE_PRI_DEFAULT; +#define SCHED_CLUTCH_BUCKET_GROUP_INTERACTIVE_PRI_DEFAULT (8) +uint8_t sched_clutch_bucket_group_interactive_pri = SCHED_CLUTCH_BUCKET_GROUP_INTERACTIVE_PRI_DEFAULT; /* window to scale the cpu usage and blocked values (currently 500ms). Its the threshold of used+blocked */ -uint64_t sched_clutch_bucket_adjust_threshold = 0; -#define SCHED_CLUTCH_BUCKET_ADJUST_THRESHOLD_USECS (500000) +uint64_t sched_clutch_bucket_group_adjust_threshold = 0; +#define SCHED_CLUTCH_BUCKET_GROUP_ADJUST_THRESHOLD_USECS (500000) /* The ratio to scale the cpu/blocked time per window */ -#define SCHED_CLUTCH_BUCKET_ADJUST_RATIO (10) - -/* rate at which interactivity score is recalculated. This keeps the score smooth in terms of extremely bursty behavior */ -uint64_t sched_clutch_bucket_interactivity_delta = 0; -#define SCHED_CLUTCH_BUCKET_INTERACTIVITY_DELTA_USECS_DEFAULT (25000) +#define SCHED_CLUTCH_BUCKET_GROUP_ADJUST_RATIO (10) /* * In order to allow App thread groups some preference over daemon thread - * groups, the App clutch_buckets get a 8 point boost. The boost value should + * groups, the App clutch_buckets get a priority boost. The boost value should * be chosen such that badly behaved apps are still penalized over well - * behaved interactive daemon clutch_buckets. + * behaved interactive daemons. */ -#define SCHED_CLUTCH_BUCKET_PRI_BOOST_DEFAULT (8) -uint8_t sched_clutch_bucket_pri_boost = SCHED_CLUTCH_BUCKET_PRI_BOOST_DEFAULT; +static uint8_t sched_clutch_bucket_group_pri_boost[SCHED_CLUTCH_TG_PRI_MAX] = { + [SCHED_CLUTCH_TG_PRI_LOW] = 0, + [SCHED_CLUTCH_TG_PRI_MED] = 2, + [SCHED_CLUTCH_TG_PRI_HIGH] = 4, +}; /* Initial value for voluntary blocking time for the clutch_bucket */ -#define SCHED_CLUTCH_BUCKET_BLOCKED_TS_INVALID (uint32_t)(~0) +#define SCHED_CLUTCH_BUCKET_GROUP_BLOCKED_TS_INVALID (uint64_t)(~0) + +/* Value indicating the clutch bucket is not pending execution */ +#define SCHED_CLUTCH_BUCKET_GROUP_PENDING_INVALID ((uint64_t)(~0)) + +/* + * Thread group CPU starvation avoidance + * + * In heavily CPU contended scenarios, it is possible that some thread groups + * which have a low interactivity score do not get CPU time at all. In order to + * resolve that, the scheduler tries to ageout the CPU usage of the clutch + * bucket group when it has been pending execution for a certain time as defined + * by the sched_clutch_bucket_group_pending_delta_us values below. + * + * The values chosen here are very close to the WCEL values for each sched bucket. + * These values are multiplied by the load average of the relevant root bucket to + * provide an estimate of the actual clutch bucket load. + */ +static uint32_t sched_clutch_bucket_group_pending_delta_us[TH_BUCKET_SCHED_MAX] = { + SCHED_CLUTCH_INVALID_TIME_32, /* FIXPRI */ + 10000, /* FG */ + 37500, /* IN */ + 75000, /* DF */ + 150000, /* UT */ + 250000, /* BG */ +}; +static uint64_t sched_clutch_bucket_group_pending_delta[TH_BUCKET_SCHED_MAX] = {0}; /* * sched_clutch_bucket_init() @@ -856,7 +1051,7 @@ uint8_t sched_clutch_bucket_pri_boost = SCHED_CLUTCH_BUCKET_PRI_BOOST_DEFAULT; static void sched_clutch_bucket_init( sched_clutch_bucket_t clutch_bucket, - sched_clutch_t clutch, + sched_clutch_bucket_group_t clutch_bucket_group, sched_bucket_t bucket) { bzero(clutch_bucket, sizeof(struct sched_clutch_bucket)); @@ -864,22 +1059,49 @@ sched_clutch_bucket_init( clutch_bucket->scb_bucket = bucket; /* scb_priority will be recalculated when a thread is inserted in the clutch bucket */ clutch_bucket->scb_priority = 0; +#if CONFIG_SCHED_EDGE + clutch_bucket->scb_foreign = false; + priority_queue_entry_init(&clutch_bucket->scb_foreignlink); +#endif /* CONFIG_SCHED_EDGE */ + clutch_bucket->scb_group = clutch_bucket_group; + clutch_bucket->scb_root = NULL; + priority_queue_init(&clutch_bucket->scb_clutchpri_prioq); + priority_queue_init(&clutch_bucket->scb_thread_runq); + queue_init(&clutch_bucket->scb_thread_timeshare_queue); +} + +/* + * sched_clutch_bucket_group_init() + * + * Initializer for clutch bucket groups. + */ +static void +sched_clutch_bucket_group_init( + sched_clutch_bucket_group_t clutch_bucket_group, + sched_clutch_t clutch, + sched_bucket_t bucket) +{ + bzero(clutch_bucket_group, sizeof(struct sched_clutch_bucket_group)); + clutch_bucket_group->scbg_bucket = bucket; + clutch_bucket_group->scbg_clutch = clutch; + for (int i = 0; i < MAX_PSETS; i++) { + sched_clutch_bucket_init(&clutch_bucket_group->scbg_clutch_buckets[i], clutch_bucket_group, bucket); + } + os_atomic_store(&clutch_bucket_group->scbg_timeshare_tick, 0, relaxed); + os_atomic_store(&clutch_bucket_group->scbg_pri_shift, INT8_MAX, relaxed); + os_atomic_store(&clutch_bucket_group->scbg_preferred_cluster, pset0.pset_cluster_id, relaxed); /* * All thread groups should be initialized to be interactive; this allows the newly launched * thread groups to fairly compete with already running thread groups. */ - clutch_bucket->scb_interactivity_score = (sched_clutch_bucket_interactive_pri * 2); - clutch_bucket->scb_foreign = false; - - os_atomic_store(&clutch_bucket->scb_timeshare_tick, 0, relaxed); - os_atomic_store(&clutch_bucket->scb_pri_shift, INT8_MAX, relaxed); - - clutch_bucket->scb_interactivity_ts = 0; - clutch_bucket->scb_blocked_ts = SCHED_CLUTCH_BUCKET_BLOCKED_TS_INVALID; - clutch_bucket->scb_clutch = clutch; - clutch_bucket->scb_root = NULL; - priority_queue_init(&clutch_bucket->scb_clutchpri_prioq, PRIORITY_QUEUE_BUILTIN_KEY | PRIORITY_QUEUE_MAX_HEAP); - run_queue_init(&clutch_bucket->scb_runq); + clutch_bucket_group->scbg_interactivity_data.scct_count = (sched_clutch_bucket_group_interactive_pri * 2); + clutch_bucket_group->scbg_interactivity_data.scct_timestamp = 0; + os_atomic_store(&clutch_bucket_group->scbg_cpu_data.cpu_data.scbcd_cpu_blocked, (clutch_cpu_data_t)sched_clutch_bucket_group_adjust_threshold, relaxed); +#if !__LP64__ + lck_spin_init(&clutch_bucket_group->scbg_stats_lock, &pset_lck_grp, NULL); +#endif /* !__LP64__ */ + clutch_bucket_group->scbg_blocked_data.scct_timestamp = SCHED_CLUTCH_BUCKET_GROUP_BLOCKED_TS_INVALID; + clutch_bucket_group->scbg_pending_data.scct_timestamp = SCHED_CLUTCH_BUCKET_GROUP_PENDING_INVALID; } /* @@ -896,7 +1118,7 @@ sched_clutch_init_with_thread_group( /* Initialize all the clutch buckets */ for (uint32_t i = 0; i < TH_BUCKET_SCHED_MAX; i++) { - sched_clutch_bucket_init(&(clutch->sc_clutch_buckets[i]), clutch, i); + sched_clutch_bucket_group_init(&(clutch->sc_clutch_groups[i]), clutch, i); } /* Grouping specific fields */ @@ -916,34 +1138,136 @@ sched_clutch_destroy( assert(os_atomic_load(&clutch->sc_thr_count, relaxed) == 0); } -#if __AMP__ +#if CONFIG_SCHED_EDGE /* - * sched_clutch_bucket_foreign() + * The current edge scheduler still relies on globals for E & P clusters. It uses these + * globals for the following operations: + * - Sysctl support for configuring edges + * - Edge scheduler initialization * - * Identifies if the clutch bucket is a foreign (not recommended for) this - * hierarchy. This is possible due to the recommended hierarchy/pset not - * available for scheduling currently. + * These should be removed for multi-cluster platforms once a clear policy for the above + * operations is defined. + * */ -static boolean_t -sched_clutch_bucket_foreign(sched_clutch_root_t root_clutch, sched_clutch_bucket_t clutch_bucket) -{ - assert(clutch_bucket->scb_thr_count > 0); - if (!sched_clutch_pset_available(root_clutch->scr_pset)) { - /* Even though the pset was not available for scheduling, threads - * are being put in its runq (this might be due to the other pset - * being turned off and this being the master processor pset). - * Mark the clutch bucket as foreign so that when the other - * pset becomes available, it moves the clutch bucket accordingly. - */ - return true; +static uint32_t ecore_cluster_id = 0; +static uint32_t pcore_cluster_id = 1; + +/* + * Edge Scheduler Preferred Cluster Mechanism + * + * In order to have better control over various QoS buckets within a thread group, the Edge + * scheduler allows CLPC to specify a preferred cluster for each QoS level in a TG. These + * preferences are stored at the sched_clutch_bucket_group level since that represents all + * threads at a particular QoS level within a sched_clutch. For any lookup of preferred + * cluster, the logic always goes back to the preference stored at the clutch_bucket_group. + */ + +static uint32_t +sched_edge_clutch_bucket_group_preferred_cluster(sched_clutch_bucket_group_t clutch_bucket_group) +{ + return os_atomic_load(&clutch_bucket_group->scbg_preferred_cluster, relaxed); +} + +static uint32_t +sched_clutch_bucket_preferred_cluster(sched_clutch_bucket_t clutch_bucket) +{ + return sched_edge_clutch_bucket_group_preferred_cluster(clutch_bucket->scb_group); +} + +uint32_t +sched_edge_thread_preferred_cluster(thread_t thread) +{ + if (SCHED_CLUTCH_THREAD_CLUSTER_BOUND(thread)) { + /* For threads bound to a specific cluster, return the bound cluster id */ + return sched_edge_thread_bound_cluster_id(thread); + } + + sched_clutch_t clutch = sched_clutch_for_thread(thread); + sched_clutch_bucket_group_t clutch_bucket_group = &clutch->sc_clutch_groups[thread->th_sched_bucket]; + return sched_edge_clutch_bucket_group_preferred_cluster(clutch_bucket_group); +} + +/* + * Edge Scheduler Foreign Bucket Support + * + * In the Edge Scheduler, each cluster maintains a priority queue of clutch buckets containing + * threads that are not native to the cluster. A clutch bucket is considered native if its + * preferred cluster has the same type as the cluster its enqueued in. The foreign clutch + * bucket priority queue is used for rebalance operations to get threads back to their native + * cluster quickly. + * + * It is possible to make this policy even more aggressive by considering all clusters that + * are not the preferred cluster as the foreign cluster, but that would mean a lot of thread + * migrations which might have performance implications. + */ + +static void +sched_clutch_bucket_mark_native(sched_clutch_bucket_t clutch_bucket, sched_clutch_root_t root_clutch) +{ + if (clutch_bucket->scb_foreign) { + clutch_bucket->scb_foreign = false; + priority_queue_remove(&root_clutch->scr_foreign_buckets, &clutch_bucket->scb_foreignlink); + } +} + +static void +sched_clutch_bucket_mark_foreign(sched_clutch_bucket_t clutch_bucket, sched_clutch_root_t root_clutch) +{ + if (!clutch_bucket->scb_foreign) { + clutch_bucket->scb_foreign = true; + priority_queue_entry_set_sched_pri(&root_clutch->scr_foreign_buckets, &clutch_bucket->scb_foreignlink, clutch_bucket->scb_priority, 0); + priority_queue_insert(&root_clutch->scr_foreign_buckets, &clutch_bucket->scb_foreignlink); + } +} + +/* + * Edge Scheduler Cumulative Load Average + * + * The Edge scheduler maintains a per-QoS/scheduling bucket load average for + * making thread migration decisions. The per-bucket load is maintained as a + * cumulative count since higher scheduling buckets impact load on lower buckets + * for thread migration decisions. + * + */ + +static void +sched_edge_cluster_cumulative_count_incr(sched_clutch_root_t root_clutch, sched_bucket_t bucket) +{ + switch (bucket) { + case TH_BUCKET_FIXPRI: os_atomic_inc(&root_clutch->scr_cumulative_run_count[TH_BUCKET_FIXPRI], relaxed); OS_FALLTHROUGH; + case TH_BUCKET_SHARE_FG: os_atomic_inc(&root_clutch->scr_cumulative_run_count[TH_BUCKET_SHARE_FG], relaxed); OS_FALLTHROUGH; + case TH_BUCKET_SHARE_IN: os_atomic_inc(&root_clutch->scr_cumulative_run_count[TH_BUCKET_SHARE_IN], relaxed); OS_FALLTHROUGH; + case TH_BUCKET_SHARE_DF: os_atomic_inc(&root_clutch->scr_cumulative_run_count[TH_BUCKET_SHARE_DF], relaxed); OS_FALLTHROUGH; + case TH_BUCKET_SHARE_UT: os_atomic_inc(&root_clutch->scr_cumulative_run_count[TH_BUCKET_SHARE_UT], relaxed); OS_FALLTHROUGH; + case TH_BUCKET_SHARE_BG: os_atomic_inc(&root_clutch->scr_cumulative_run_count[TH_BUCKET_SHARE_BG], relaxed); break; + default: + panic("Unexpected sched_bucket passed to sched_edge_cluster_cumulative_count_incr()"); + } +} + +static void +sched_edge_cluster_cumulative_count_decr(sched_clutch_root_t root_clutch, sched_bucket_t bucket) +{ + switch (bucket) { + case TH_BUCKET_FIXPRI: os_atomic_dec(&root_clutch->scr_cumulative_run_count[TH_BUCKET_FIXPRI], relaxed); OS_FALLTHROUGH; + case TH_BUCKET_SHARE_FG: os_atomic_dec(&root_clutch->scr_cumulative_run_count[TH_BUCKET_SHARE_FG], relaxed); OS_FALLTHROUGH; + case TH_BUCKET_SHARE_IN: os_atomic_dec(&root_clutch->scr_cumulative_run_count[TH_BUCKET_SHARE_IN], relaxed); OS_FALLTHROUGH; + case TH_BUCKET_SHARE_DF: os_atomic_dec(&root_clutch->scr_cumulative_run_count[TH_BUCKET_SHARE_DF], relaxed); OS_FALLTHROUGH; + case TH_BUCKET_SHARE_UT: os_atomic_dec(&root_clutch->scr_cumulative_run_count[TH_BUCKET_SHARE_UT], relaxed); OS_FALLTHROUGH; + case TH_BUCKET_SHARE_BG: os_atomic_dec(&root_clutch->scr_cumulative_run_count[TH_BUCKET_SHARE_BG], relaxed); break; + default: + panic("Unexpected sched_bucket passed to sched_edge_cluster_cumulative_count_decr()"); } - thread_t thread = run_queue_peek(&clutch_bucket->scb_runq); - pset_cluster_type_t pset_type = recommended_pset_type(thread); - return pset_type != root_clutch->scr_pset->pset_cluster_type; } -#endif /* __AMP__ */ +uint16_t +sched_edge_cluster_cumulative_count(sched_clutch_root_t root_clutch, sched_bucket_t bucket) +{ + return os_atomic_load(&root_clutch->scr_cumulative_run_count[bucket], relaxed); +} + +#endif /* CONFIG_SCHED_EDGE */ /* * sched_clutch_bucket_hierarchy_insert() @@ -963,14 +1287,14 @@ sched_clutch_bucket_hierarchy_insert( /* Enqueue the timeshare clutch buckets into the global runnable clutch_bucket list; used for sched tick operations */ enqueue_tail(&root_clutch->scr_clutch_buckets, &clutch_bucket->scb_listlink); } -#if __AMP__ +#if CONFIG_SCHED_EDGE /* Check if the bucket is a foreign clutch bucket and add it to the foreign buckets list */ - if (sched_clutch_bucket_foreign(root_clutch, clutch_bucket)) { - clutch_bucket->scb_foreign = true; - enqueue_tail(&root_clutch->scr_foreign_buckets, &clutch_bucket->scb_foreignlink); + uint32_t preferred_cluster = sched_clutch_bucket_preferred_cluster(clutch_bucket); + if (pset_type_for_id(preferred_cluster) != pset_type_for_id(root_clutch->scr_cluster_id)) { + sched_clutch_bucket_mark_foreign(clutch_bucket, root_clutch); } -#endif /* __AMP__ */ - sched_clutch_root_bucket_t root_bucket = &root_clutch->scr_buckets[bucket]; +#endif /* CONFIG_SCHED_EDGE */ + sched_clutch_root_bucket_t root_bucket = &root_clutch->scr_unbound_buckets[bucket]; /* If this is the first clutch bucket in the root bucket, insert the root bucket into the root priority queue */ if (sched_clutch_bucket_runq_empty(&root_bucket->scrb_clutch_buckets)) { @@ -980,8 +1304,7 @@ sched_clutch_bucket_hierarchy_insert( /* Insert the clutch bucket into the root bucket run queue with order based on options */ sched_clutch_bucket_runq_enqueue(&root_bucket->scrb_clutch_buckets, clutch_bucket, options); os_atomic_store(&clutch_bucket->scb_root, root_clutch, relaxed); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_CLUTCH_TG_BUCKET_STATE) | DBG_FUNC_NONE, - thread_group_get_id(clutch_bucket->scb_clutch->sc_tg), clutch_bucket->scb_bucket, SCHED_CLUTCH_STATE_RUNNABLE, clutch_bucket->scb_priority, 0); + os_atomic_inc(&sched_clutch_global_bucket_load[bucket], relaxed); } /* @@ -1002,26 +1325,21 @@ sched_clutch_bucket_hierarchy_remove( /* Remove the timeshare clutch bucket from the globally runnable clutch_bucket list */ remqueue(&clutch_bucket->scb_listlink); } -#if __AMP__ - if (clutch_bucket->scb_foreign) { - clutch_bucket->scb_foreign = false; - remqueue(&clutch_bucket->scb_foreignlink); - } -#endif /* __AMP__ */ +#if CONFIG_SCHED_EDGE + sched_clutch_bucket_mark_native(clutch_bucket, root_clutch); +#endif /* CONFIG_SCHED_EDGE */ - sched_clutch_root_bucket_t root_bucket = &root_clutch->scr_buckets[bucket]; + sched_clutch_root_bucket_t root_bucket = &root_clutch->scr_unbound_buckets[bucket]; /* Remove the clutch bucket from the root bucket priority queue */ sched_clutch_bucket_runq_remove(&root_bucket->scrb_clutch_buckets, clutch_bucket); os_atomic_store(&clutch_bucket->scb_root, NULL, relaxed); - clutch_bucket->scb_blocked_ts = timestamp; - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_CLUTCH_TG_BUCKET_STATE) | DBG_FUNC_NONE, - thread_group_get_id(clutch_bucket->scb_clutch->sc_tg), clutch_bucket->scb_bucket, SCHED_CLUTCH_STATE_EMPTY, 0, 0); /* If the root bucket priority queue is now empty, remove it from the root priority queue */ if (sched_clutch_bucket_runq_empty(&root_bucket->scrb_clutch_buckets)) { sched_clutch_root_bucket_empty(root_bucket, root_clutch, timestamp); } + os_atomic_dec(&sched_clutch_global_bucket_load[bucket], relaxed); } /* @@ -1038,9 +1356,9 @@ sched_clutch_bucket_base_pri( sched_clutch_bucket_t clutch_bucket) { uint8_t clutch_boost = 0; - assert(clutch_bucket->scb_runq.count != 0); + assert(priority_queue_empty(&clutch_bucket->scb_thread_runq) == false); - sched_clutch_t clutch = clutch_bucket->scb_clutch; + sched_clutch_t clutch = clutch_bucket->scb_group->scbg_clutch; /* * Since the clutch bucket can contain threads that are members of the group due @@ -1048,85 +1366,45 @@ sched_clutch_bucket_base_pri( * the entire clutch bucket should be based on the highest thread (promoted or base) * in the clutch bucket. */ - uint8_t max_pri = priority_queue_empty(&clutch_bucket->scb_clutchpri_prioq) ? 0 : priority_queue_max_key(&clutch_bucket->scb_clutchpri_prioq); - - /* - * For all AboveUI clutch buckets and clutch buckets for thread groups that - * havent been specified as SCHED_CLUTCH_TG_PRI_LOW, give a priority boost - */ - if ((clutch_bucket->scb_bucket == TH_BUCKET_FIXPRI) || - (os_atomic_load(&clutch->sc_tg_priority, relaxed) != SCHED_CLUTCH_TG_PRI_LOW)) { - clutch_boost = sched_clutch_bucket_pri_boost; + uint8_t max_pri = 0; + if (!priority_queue_empty(&clutch_bucket->scb_clutchpri_prioq)) { + max_pri = priority_queue_max_sched_pri(&clutch_bucket->scb_clutchpri_prioq); } + + sched_clutch_tg_priority_t tg_pri = os_atomic_load(&clutch->sc_tg_priority, relaxed); + clutch_boost = sched_clutch_bucket_group_pri_boost[tg_pri]; return max_pri + clutch_boost; } /* - * sched_clutch_bucket_interactivity_score_calculate() + * sched_clutch_interactivity_from_cpu_data() * - * Routine to calculate the interactivity score for the clutch bucket. The - * interactivity score is based on the ratio of CPU used by all threads in - * the bucket and the blocked time of the bucket as a whole. + * Routine to calculate the interactivity score of a clutch bucket group from its CPU usage */ static uint8_t -sched_clutch_bucket_interactivity_score_calculate( - sched_clutch_bucket_t clutch_bucket, - uint64_t timestamp) +sched_clutch_interactivity_from_cpu_data(sched_clutch_bucket_group_t clutch_bucket_group) { - if (clutch_bucket->scb_bucket == TH_BUCKET_FIXPRI) { - /* - * Since the root bucket selection algorithm for Above UI looks at clutch bucket - * priorities, make sure all AboveUI buckets are marked interactive. - */ - assert(clutch_bucket->scb_interactivity_score == (2 * sched_clutch_bucket_interactive_pri)); - return clutch_bucket->scb_interactivity_score; - } - - if (clutch_bucket->scb_interactivity_ts == 0) { - /* - * This indicates a newly initialized clutch bucket; return the default interactivity score - * and update timestamp. - */ - clutch_bucket->scb_interactivity_ts = timestamp; - return clutch_bucket->scb_interactivity_score; - } - - if (timestamp < (clutch_bucket->scb_interactivity_ts + sched_clutch_bucket_interactivity_delta)) { - return clutch_bucket->scb_interactivity_score; - } - - /* Check if the clutch bucket accounting needs to be scaled */ - sched_clutch_bucket_cpu_adjust(clutch_bucket); - clutch_bucket->scb_interactivity_ts = timestamp; - sched_clutch_bucket_cpu_data_t scb_cpu_data; - scb_cpu_data.scbcd_cpu_data_packed = os_atomic_load_wide(&clutch_bucket->scb_cpu_data.scbcd_cpu_data_packed, relaxed); + scb_cpu_data.scbcd_cpu_data_packed = os_atomic_load_wide(&clutch_bucket_group->scbg_cpu_data.scbcd_cpu_data_packed, relaxed); clutch_cpu_data_t cpu_used = scb_cpu_data.cpu_data.scbcd_cpu_used; clutch_cpu_data_t cpu_blocked = scb_cpu_data.cpu_data.scbcd_cpu_blocked; + uint8_t interactive_score = 0; - /* - * In extremely CPU contended cases, it is possible that the clutch bucket has been runnable - * for a long time but none of its threads have been picked up for execution. In that case, both - * the CPU used and blocked would be 0. - */ if ((cpu_blocked == 0) && (cpu_used == 0)) { - return clutch_bucket->scb_interactivity_score; + return (uint8_t)clutch_bucket_group->scbg_interactivity_data.scct_count; } - /* * For all timeshare buckets, calculate the interactivity score of the bucket * and add it to the base priority */ - uint8_t interactive_score = 0; if (cpu_blocked > cpu_used) { /* Interactive clutch_bucket case */ - interactive_score = sched_clutch_bucket_interactive_pri + - ((sched_clutch_bucket_interactive_pri * (cpu_blocked - cpu_used)) / cpu_blocked); + interactive_score = sched_clutch_bucket_group_interactive_pri + + ((sched_clutch_bucket_group_interactive_pri * (cpu_blocked - cpu_used)) / cpu_blocked); } else { /* Non-interactive clutch_bucket case */ - interactive_score = ((sched_clutch_bucket_interactive_pri * cpu_blocked) / cpu_used); + interactive_score = ((sched_clutch_bucket_group_interactive_pri * cpu_blocked) / cpu_used); } - clutch_bucket->scb_interactivity_score = interactive_score; return interactive_score; } @@ -1138,7 +1416,6 @@ sched_clutch_bucket_interactivity_score_calculate( * of the clutch bucket and applies an interactivity score boost to the * highly responsive clutch buckets. */ - static uint8_t sched_clutch_bucket_pri_calculate( sched_clutch_bucket_t clutch_bucket, @@ -1150,12 +1427,12 @@ sched_clutch_bucket_pri_calculate( } uint8_t base_pri = sched_clutch_bucket_base_pri(clutch_bucket); - uint8_t interactive_score = sched_clutch_bucket_interactivity_score_calculate(clutch_bucket, timestamp); + uint8_t interactive_score = sched_clutch_bucket_group_interactivity_score_calculate(clutch_bucket->scb_group, timestamp); assert(((uint64_t)base_pri + interactive_score) <= UINT8_MAX); uint8_t pri = base_pri + interactive_score; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_CLUTCH_TG_BUCKET_PRI) | DBG_FUNC_NONE, - thread_group_get_id(clutch_bucket->scb_clutch->sc_tg), clutch_bucket->scb_bucket, pri, interactive_score, 0); + thread_group_get_id(clutch_bucket->scb_group->scbg_clutch->sc_tg), clutch_bucket->scb_bucket, pri, interactive_score, 0); return pri; } @@ -1190,11 +1467,12 @@ sched_clutch_bucket_runnable( sched_clutch_bucket_options_t options) { sched_clutch_hierarchy_locked_assert(root_clutch); - sched_clutch_bucket_cpu_blocked_update(clutch_bucket, timestamp); + /* Since the clutch bucket became newly runnable, update its pending timestamp */ clutch_bucket->scb_priority = sched_clutch_bucket_pri_calculate(clutch_bucket, timestamp); sched_clutch_bucket_hierarchy_insert(root_clutch, clutch_bucket, clutch_bucket->scb_bucket, timestamp, options); + /* Update the timesharing properties of this clutch_bucket; also done every sched_tick */ - sched_clutch_bucket_timeshare_update(clutch_bucket); + sched_clutch_bucket_group_timeshare_update(clutch_bucket->scb_group, clutch_bucket, timestamp); int16_t root_old_pri = root_clutch->scr_priority; sched_clutch_root_pri_update(root_clutch); return root_clutch->scr_priority > root_old_pri; @@ -1217,7 +1495,7 @@ sched_clutch_bucket_update( { sched_clutch_hierarchy_locked_assert(root_clutch); uint64_t new_pri = sched_clutch_bucket_pri_calculate(clutch_bucket, timestamp); - sched_clutch_bucket_runq_t bucket_runq = &root_clutch->scr_buckets[clutch_bucket->scb_bucket].scrb_clutch_buckets; + sched_clutch_bucket_runq_t bucket_runq = &root_clutch->scr_unbound_buckets[clutch_bucket->scb_bucket].scrb_clutch_buckets; if (new_pri == clutch_bucket->scb_priority) { /* * If SCHED_CLUTCH_BUCKET_OPTIONS_SAMEPRI_RR is specified, move the clutch bucket @@ -1230,7 +1508,18 @@ sched_clutch_bucket_update( return false; } sched_clutch_bucket_runq_remove(bucket_runq, clutch_bucket); +#if CONFIG_SCHED_EDGE + if (clutch_bucket->scb_foreign) { + priority_queue_remove(&root_clutch->scr_foreign_buckets, &clutch_bucket->scb_foreignlink); + } +#endif /* CONFIG_SCHED_EDGE */ clutch_bucket->scb_priority = new_pri; +#if CONFIG_SCHED_EDGE + if (clutch_bucket->scb_foreign) { + priority_queue_entry_set_sched_pri(&root_clutch->scr_foreign_buckets, &clutch_bucket->scb_foreignlink, clutch_bucket->scb_priority, 0); + priority_queue_insert(&root_clutch->scr_foreign_buckets, &clutch_bucket->scb_foreignlink); + } +#endif /* CONFIG_SCHED_EDGE */ sched_clutch_bucket_runq_enqueue(bucket_runq, clutch_bucket, options); int16_t root_old_pri = root_clutch->scr_priority; @@ -1268,98 +1557,91 @@ sched_clutch_cpu_usage_update( thread_t thread, uint64_t delta) { - if (!SCHED_CLUTCH_THREAD_ELIGIBLE(thread)) { + if (!SCHED_CLUTCH_THREAD_ELIGIBLE(thread) || SCHED_CLUTCH_THREAD_CLUSTER_BOUND(thread)) { return; } + sched_clutch_t clutch = sched_clutch_for_thread(thread); - sched_clutch_bucket_t clutch_bucket = &(clutch->sc_clutch_buckets[thread->th_sched_bucket]); - sched_clutch_bucket_cpu_usage_update(clutch_bucket, delta); + sched_clutch_bucket_group_t clutch_bucket_group = &(clutch->sc_clutch_groups[thread->th_sched_bucket]); + sched_clutch_bucket_group_cpu_usage_update(clutch_bucket_group, delta); } /* - * sched_clutch_bucket_cpu_usage_update() + * sched_clutch_bucket_group_cpu_usage_update() * * Routine to update the CPU usage of the clutch_bucket. */ static void -sched_clutch_bucket_cpu_usage_update( - sched_clutch_bucket_t clutch_bucket, +sched_clutch_bucket_group_cpu_usage_update( + sched_clutch_bucket_group_t clutch_bucket_group, uint64_t delta) { - if (clutch_bucket->scb_bucket == TH_BUCKET_FIXPRI) { + if (clutch_bucket_group->scbg_bucket == TH_BUCKET_FIXPRI) { /* Since Above UI bucket has maximum interactivity score always, nothing to do here */ return; } - - /* - * The CPU usage should not overflow the clutch_cpu_data_t type. Since the usage is used to - * calculate interactivity score, it is safe to restrict it to CLUTCH_CPU_DATA_MAX. - */ - delta = MIN(delta, CLUTCH_CPU_DATA_MAX); - os_atomic_add_orig(&(clutch_bucket->scb_cpu_data.cpu_data.scbcd_cpu_used), (clutch_cpu_data_t)delta, relaxed); + delta = MIN(delta, sched_clutch_bucket_group_adjust_threshold); + os_atomic_add(&(clutch_bucket_group->scbg_cpu_data.cpu_data.scbcd_cpu_used), (clutch_cpu_data_t)delta, relaxed); } /* - * sched_clutch_bucket_cpu_blocked_update() + * sched_clutch_bucket_group_cpu_pending_adjust() * - * Routine to update CPU blocked time for clutch_bucket. + * Routine to calculate the adjusted CPU usage value based on the pending intervals. The calculation is done + * such that one "pending interval" provides one point improvement in interactivity score. */ -static void -sched_clutch_bucket_cpu_blocked_update( - sched_clutch_bucket_t clutch_bucket, - uint64_t timestamp) +static inline uint64_t +sched_clutch_bucket_group_cpu_pending_adjust( + uint64_t cpu_used, + uint64_t cpu_blocked, + uint8_t pending_intervals) { - if ((clutch_bucket->scb_bucket == TH_BUCKET_FIXPRI) || - (clutch_bucket->scb_blocked_ts == SCHED_CLUTCH_BUCKET_BLOCKED_TS_INVALID)) { - /* For Above UI bucket and a newly initialized clutch bucket, nothing to do here */ - return; - } - - uint64_t blocked_time = timestamp - clutch_bucket->scb_blocked_ts; - if (blocked_time > sched_clutch_bucket_adjust_threshold) { - blocked_time = sched_clutch_bucket_adjust_threshold; + uint64_t cpu_used_adjusted = 0; + if (cpu_blocked < cpu_used) { + cpu_used_adjusted = (sched_clutch_bucket_group_interactive_pri * cpu_blocked * cpu_used); + cpu_used_adjusted = cpu_used_adjusted / ((sched_clutch_bucket_group_interactive_pri * cpu_blocked) + (cpu_used * pending_intervals)); + } else { + uint64_t adjust_factor = (cpu_blocked * pending_intervals) / sched_clutch_bucket_group_interactive_pri; + cpu_used_adjusted = (adjust_factor > cpu_used) ? 0 : (cpu_used - adjust_factor); } - - /* - * The CPU blocked should not overflow the clutch_cpu_data_t type. Since the blocked is used to - * calculate interactivity score, it is safe to restrict it to CLUTCH_CPU_DATA_MAX. - */ - blocked_time = MIN(blocked_time, CLUTCH_CPU_DATA_MAX); - clutch_cpu_data_t __assert_only cpu_blocked_orig = os_atomic_add_orig(&(clutch_bucket->scb_cpu_data.cpu_data.scbcd_cpu_blocked), (clutch_cpu_data_t)blocked_time, relaxed); - /* The blocked time is scaled every so often, it should never overflow */ - assert(blocked_time <= (CLUTCH_CPU_DATA_MAX - cpu_blocked_orig)); + return cpu_used_adjusted; } /* - * sched_clutch_bucket_cpu_adjust() + * sched_clutch_bucket_group_cpu_adjust() * * Routine to scale the cpu usage and blocked time once the sum gets bigger - * than sched_clutch_bucket_adjust_threshold. Allows the values to remain + * than sched_clutch_bucket_group_adjust_threshold. Allows the values to remain * manageable and maintain the same ratio while allowing clutch buckets to * adjust behavior and reflect in the interactivity score in a reasonable - * amount of time. + * amount of time. Also adjusts the CPU usage based on pending_intervals + * which allows ageout of CPU to avoid starvation in highly contended scenarios. */ static void -sched_clutch_bucket_cpu_adjust( - sched_clutch_bucket_t clutch_bucket) +sched_clutch_bucket_group_cpu_adjust( + sched_clutch_bucket_group_t clutch_bucket_group, + uint8_t pending_intervals) { sched_clutch_bucket_cpu_data_t old_cpu_data = {}; sched_clutch_bucket_cpu_data_t new_cpu_data = {}; - do { - old_cpu_data.scbcd_cpu_data_packed = os_atomic_load_wide(&clutch_bucket->scb_cpu_data.scbcd_cpu_data_packed, relaxed); + os_atomic_rmw_loop(&clutch_bucket_group->scbg_cpu_data.scbcd_cpu_data_packed, old_cpu_data.scbcd_cpu_data_packed, new_cpu_data.scbcd_cpu_data_packed, relaxed, { clutch_cpu_data_t cpu_used = old_cpu_data.cpu_data.scbcd_cpu_used; clutch_cpu_data_t cpu_blocked = old_cpu_data.cpu_data.scbcd_cpu_blocked; - if ((cpu_used + cpu_blocked) < sched_clutch_bucket_adjust_threshold) { - return; - } - /* - * The accumulation of CPU used and blocked is past the threshold; scale it - * down to lose old history. - */ - new_cpu_data.cpu_data.scbcd_cpu_used = cpu_used / SCHED_CLUTCH_BUCKET_ADJUST_RATIO; - new_cpu_data.cpu_data.scbcd_cpu_blocked = cpu_blocked / SCHED_CLUTCH_BUCKET_ADJUST_RATIO; - } while (!os_atomic_cmpxchg(&clutch_bucket->scb_cpu_data.scbcd_cpu_data_packed, old_cpu_data.scbcd_cpu_data_packed, new_cpu_data.scbcd_cpu_data_packed, relaxed)); + if ((pending_intervals == 0) && (cpu_used + cpu_blocked) < sched_clutch_bucket_group_adjust_threshold) { + /* No changes to the CPU used and blocked values */ + os_atomic_rmw_loop_give_up(); + } + if ((cpu_used + cpu_blocked) >= sched_clutch_bucket_group_adjust_threshold) { + /* Only keep the recent CPU history to better indicate how this TG has been behaving */ + cpu_used = cpu_used / SCHED_CLUTCH_BUCKET_GROUP_ADJUST_RATIO; + cpu_blocked = cpu_blocked / SCHED_CLUTCH_BUCKET_GROUP_ADJUST_RATIO; + } + /* Use the shift passed in to ageout the CPU usage */ + cpu_used = (clutch_cpu_data_t)sched_clutch_bucket_group_cpu_pending_adjust(cpu_used, cpu_blocked, pending_intervals); + new_cpu_data.cpu_data.scbcd_cpu_used = cpu_used; + new_cpu_data.cpu_data.scbcd_cpu_blocked = cpu_blocked; + }); } /* @@ -1400,9 +1682,8 @@ sched_clutch_run_bucket_incr( sched_bucket_t bucket) { assert(bucket != TH_BUCKET_RUN); - sched_clutch_bucket_t clutch_bucket = &(clutch->sc_clutch_buckets[bucket]); - uint32_t result = os_atomic_inc(&(clutch_bucket->scb_run_count), relaxed); - return result; + sched_clutch_bucket_group_t clutch_bucket_group = &(clutch->sc_clutch_groups[bucket]); + return sched_clutch_bucket_group_run_count_inc(clutch_bucket_group); } /* @@ -1429,20 +1710,25 @@ sched_clutch_run_bucket_decr( sched_bucket_t bucket) { assert(bucket != TH_BUCKET_RUN); - sched_clutch_bucket_t clutch_bucket = &(clutch->sc_clutch_buckets[bucket]); - uint32_t result = os_atomic_dec(&(clutch_bucket->scb_run_count), relaxed); - return result; + sched_clutch_bucket_group_t clutch_bucket_group = &(clutch->sc_clutch_groups[bucket]); + return sched_clutch_bucket_group_run_count_dec(clutch_bucket_group); } /* - * sched_clutch_bucket_timeshare_update() + * sched_clutch_bucket_group_timeshare_update() * - * Routine to update the load and priority shift for the clutch_bucket every - * sched_tick. For runnable clutch_buckets, the sched tick handling code - * iterates the clutch buckets and calls this routine. For all others, the - * clutch_bucket maintains a "last updated schedtick" parameter. As threads - * become runnable in the clutch bucket, if this value is outdated, the load - * and shifts are updated. + * Routine to update the load and priority shift for the clutch_bucket_group + * every sched_tick. For multi-cluster platforms, each QoS level will have multiple + * clutch buckets with runnable threads in them. So it is important to maintain + * the timesharing information at the clutch_bucket_group level instead of + * individual clutch buckets (because the algorithm is trying to timeshare all + * threads at the same QoS irrespective of which hierarchy they are enqueued in). + * + * The routine is called from the sched tick handling code to make sure this value + * is updated at least once every sched tick. For clutch bucket groups which have + * not been runnable for very long, the clutch_bucket_group maintains a "last + * updated schedtick" parameter. As threads become runnable in the clutch bucket group, + * if this value is outdated, the load and shifts are updated. * * Possible optimization: * - The current algorithm samples the load every sched tick (125ms). @@ -1452,26 +1738,39 @@ sched_clutch_run_bucket_decr( * shift calculation. */ static void -sched_clutch_bucket_timeshare_update( - sched_clutch_bucket_t clutch_bucket) +sched_clutch_bucket_group_timeshare_update( + sched_clutch_bucket_group_t clutch_bucket_group, + sched_clutch_bucket_t clutch_bucket, + uint64_t ctime) { - if (clutch_bucket->scb_bucket < TH_BUCKET_SHARE_FG) { + if (clutch_bucket_group->scbg_bucket < TH_BUCKET_SHARE_FG) { + /* No timesharing needed for fixed priority Above UI threads */ return; } /* - * Update the timeshare parameters for the clutch bucket if they havent been updated - * in this tick. + * Update the timeshare parameters for the clutch bucket group + * if they havent been updated in this tick. */ - uint32_t bucket_sched_ts = os_atomic_load(&clutch_bucket->scb_timeshare_tick, relaxed); + uint32_t sched_ts = os_atomic_load(&clutch_bucket_group->scbg_timeshare_tick, relaxed); uint32_t current_sched_ts = sched_tick; - if (bucket_sched_ts != current_sched_ts) { - os_atomic_store(&clutch_bucket->scb_timeshare_tick, current_sched_ts, relaxed); - uint32_t bucket_load = (os_atomic_load(&clutch_bucket->scb_run_count, relaxed) / processor_avail_count); - bucket_load = MIN(bucket_load, NRQS - 1); - uint32_t pri_shift = sched_fixed_shift - sched_load_shifts[bucket_load]; - os_atomic_store(&clutch_bucket->scb_pri_shift, pri_shift, relaxed); + if (sched_ts < current_sched_ts) { + os_atomic_store(&clutch_bucket_group->scbg_timeshare_tick, current_sched_ts, relaxed); + /* NCPU wide workloads should not experience decay */ + uint64_t bucket_group_run_count = os_atomic_load_wide(&clutch_bucket_group->scbg_blocked_data.scct_count, relaxed) - 1; + uint32_t bucket_group_load = (uint32_t)(bucket_group_run_count / processor_avail_count); + bucket_group_load = MIN(bucket_group_load, NRQS - 1); + uint32_t pri_shift = sched_fixed_shift - sched_load_shifts[bucket_group_load]; + /* Ensure that the pri_shift value is reasonable */ + pri_shift = (pri_shift > SCHED_PRI_SHIFT_MAX) ? INT8_MAX : pri_shift; + os_atomic_store(&clutch_bucket_group->scbg_pri_shift, pri_shift, relaxed); } + + /* + * Update the clutch bucket priority; this allows clutch buckets that have been pending + * for a long time to get an updated interactivity score. + */ + sched_clutch_bucket_update(clutch_bucket, clutch_bucket->scb_root, ctime, SCHED_CLUTCH_BUCKET_OPTIONS_NONE); } /* @@ -1505,7 +1804,10 @@ sched_clutch_thread_clutch_update( thread->sched_usage += cpu_delta; } thread->cpu_delta += cpu_delta; - sched_clutch_bucket_cpu_usage_update(&(old_clutch->sc_clutch_buckets[thread->th_sched_bucket]), cpu_delta); + if (!SCHED_CLUTCH_THREAD_CLUSTER_BOUND(thread)) { + sched_clutch_bucket_group_t clutch_bucket_group = &(old_clutch->sc_clutch_groups[thread->th_sched_bucket]); + sched_clutch_bucket_group_cpu_usage_update(clutch_bucket_group, cpu_delta); + } } if (new_clutch) { @@ -1515,6 +1817,397 @@ sched_clutch_thread_clutch_update( /* Thread Insertion/Removal/Selection routines */ +#if CONFIG_SCHED_EDGE + +/* + * Edge Scheduler Bound Thread Support + * + * The edge scheduler allows threads to be bound to specific clusters. The scheduler + * maintains a separate runq on the clutch root to hold these bound threads. These + * bound threads count towards the root priority and thread count, but are ignored + * for thread migration/steal decisions. Bound threads that are enqueued in the + * separate runq have the th_bound_cluster_enqueued flag set to allow easy + * removal. + * + * Bound Threads Timesharing + * The bound threads share the timesharing properties of the clutch bucket group they are + * part of. They contribute to the load and use priority shifts/decay values from the + * clutch bucket group. + */ + +static boolean_t +sched_edge_bound_thread_insert( + sched_clutch_root_t root_clutch, + thread_t thread, + integer_t options) +{ + /* Update the clutch runnable count and priority */ + sched_clutch_thr_count_inc(&root_clutch->scr_thr_count); + sched_clutch_root_bucket_t root_bucket = &root_clutch->scr_bound_buckets[thread->th_sched_bucket]; + if (root_bucket->scrb_bound_thread_runq.count == 0) { + sched_clutch_root_bucket_runnable(root_bucket, root_clutch, mach_absolute_time()); + } + + assert((thread->th_bound_cluster_enqueued) == false); + run_queue_enqueue(&root_bucket->scrb_bound_thread_runq, thread, options); + thread->th_bound_cluster_enqueued = true; + + int16_t root_old_pri = root_clutch->scr_priority; + sched_clutch_root_pri_update(root_clutch); + return root_clutch->scr_priority > root_old_pri; +} + +static void +sched_edge_bound_thread_remove( + sched_clutch_root_t root_clutch, + thread_t thread) +{ + sched_clutch_root_bucket_t root_bucket = &root_clutch->scr_bound_buckets[thread->th_sched_bucket]; + assert((thread->th_bound_cluster_enqueued) == true); + run_queue_remove(&root_bucket->scrb_bound_thread_runq, thread); + thread->th_bound_cluster_enqueued = false; + + /* Update the clutch runnable count and priority */ + sched_clutch_thr_count_dec(&root_clutch->scr_thr_count); + if (root_bucket->scrb_bound_thread_runq.count == 0) { + sched_clutch_root_bucket_empty(root_bucket, root_clutch, mach_absolute_time()); + } + sched_clutch_root_pri_update(root_clutch); +} + +#endif /* CONFIG_SCHED_EDGE */ + +/* + * sched_clutch_thread_bound_lookup() + * + * Routine to lookup the highest priority runnable thread in a bounded root bucket. + */ +static thread_t +sched_clutch_thread_bound_lookup( + __unused sched_clutch_root_t root_clutch, + sched_clutch_root_bucket_t root_bucket) +{ + return run_queue_peek(&root_bucket->scrb_bound_thread_runq); +} + +/* + * Clutch Bucket Group Thread Counts and Pending time calculation + * + * The pending time on the clutch_bucket_group allows the scheduler to track if it + * needs to ageout the CPU usage because the clutch_bucket_group has been pending for + * a very long time. The pending time is set to the timestamp as soon as a thread becomes + * runnable. When a thread is picked up for execution from this clutch_bucket_group, the + * pending time is advanced to the time of thread selection. + * + * Since threads for a clutch bucket group can be added or removed from multiple CPUs + * simulataneously, it is important that the updates to thread counts and pending timestamps + * happen atomically. The implementation relies on the following aspects to make that work + * as expected: + * - The clutch scheduler would be deployed on single cluster platforms where the pset lock + * is held when threads are added/removed and pending timestamps are updated + * - The edge scheduler would have support for double wide 128 bit atomics which allow the + * thread count and pending timestamp to be updated atomically. + * + * Clutch bucket group interactivity timestamp and score updates also rely on the properties + * above to atomically update the interactivity score for a clutch bucket group. + */ + +#if CONFIG_SCHED_EDGE + +static void +sched_clutch_bucket_group_thr_count_inc( + sched_clutch_bucket_group_t clutch_bucket_group, + uint64_t timestamp) +{ + sched_clutch_counter_time_t old_pending_data; + sched_clutch_counter_time_t new_pending_data; + os_atomic_rmw_loop(&clutch_bucket_group->scbg_pending_data.scct_packed, old_pending_data.scct_packed, new_pending_data.scct_packed, relaxed, { + new_pending_data.scct_count = old_pending_data.scct_count + 1; + new_pending_data.scct_timestamp = old_pending_data.scct_timestamp; + if (old_pending_data.scct_count == 0) { + new_pending_data.scct_timestamp = timestamp; + } + }); +} + +static void +sched_clutch_bucket_group_thr_count_dec( + sched_clutch_bucket_group_t clutch_bucket_group, + uint64_t timestamp) +{ + sched_clutch_counter_time_t old_pending_data; + sched_clutch_counter_time_t new_pending_data; + os_atomic_rmw_loop(&clutch_bucket_group->scbg_pending_data.scct_packed, old_pending_data.scct_packed, new_pending_data.scct_packed, relaxed, { + new_pending_data.scct_count = old_pending_data.scct_count - 1; + if (new_pending_data.scct_count == 0) { + new_pending_data.scct_timestamp = SCHED_CLUTCH_BUCKET_GROUP_PENDING_INVALID; + } else { + new_pending_data.scct_timestamp = timestamp; + } + }); +} + +static uint8_t +sched_clutch_bucket_group_pending_ageout( + sched_clutch_bucket_group_t clutch_bucket_group, + uint64_t timestamp) +{ + int bucket_load = sched_clutch_global_bucket_load_get(clutch_bucket_group->scbg_bucket); + sched_clutch_counter_time_t old_pending_data; + sched_clutch_counter_time_t new_pending_data; + uint8_t cpu_usage_shift = 0; + + os_atomic_rmw_loop(&clutch_bucket_group->scbg_pending_data.scct_packed, old_pending_data.scct_packed, new_pending_data.scct_packed, relaxed, { + cpu_usage_shift = 0; + uint64_t old_pending_ts = old_pending_data.scct_timestamp; + bool old_update = (old_pending_ts >= timestamp); + bool no_pending_time = (old_pending_ts == SCHED_CLUTCH_BUCKET_GROUP_PENDING_INVALID); + bool no_bucket_load = (bucket_load == 0); + if (old_update || no_pending_time || no_bucket_load) { + os_atomic_rmw_loop_give_up(); + } + + /* Calculate the time the clutch bucket group has been pending */ + uint64_t pending_delta = timestamp - old_pending_ts; + uint64_t interactivity_delta = sched_clutch_bucket_group_pending_delta[clutch_bucket_group->scbg_bucket] * bucket_load; + if (pending_delta < interactivity_delta) { + os_atomic_rmw_loop_give_up(); + } + cpu_usage_shift = (pending_delta / interactivity_delta); + new_pending_data.scct_timestamp = old_pending_ts + (cpu_usage_shift * interactivity_delta); + new_pending_data.scct_count = old_pending_data.scct_count; + }); + return cpu_usage_shift; +} + +static uint8_t +sched_clutch_bucket_group_interactivity_score_calculate( + sched_clutch_bucket_group_t clutch_bucket_group, + uint64_t timestamp) +{ + if (clutch_bucket_group->scbg_bucket == TH_BUCKET_FIXPRI) { + /* + * Since the root bucket selection algorithm for Above UI looks at clutch bucket + * priorities, make sure all AboveUI buckets are marked interactive. + */ + assert(clutch_bucket_group->scbg_interactivity_data.scct_count == (2 * sched_clutch_bucket_group_interactive_pri)); + return (uint8_t)clutch_bucket_group->scbg_interactivity_data.scct_count; + } + /* Check if the clutch bucket group CPU usage needs to be aged out due to pending time */ + uint8_t pending_intervals = sched_clutch_bucket_group_pending_ageout(clutch_bucket_group, timestamp); + /* Adjust CPU stats based on the calculated shift and to make sure only recent behavior is used */ + sched_clutch_bucket_group_cpu_adjust(clutch_bucket_group, pending_intervals); + uint8_t interactivity_score = sched_clutch_interactivity_from_cpu_data(clutch_bucket_group); + sched_clutch_counter_time_t old_interactivity_data; + sched_clutch_counter_time_t new_interactivity_data; + + bool score_updated = os_atomic_rmw_loop(&clutch_bucket_group->scbg_interactivity_data.scct_packed, old_interactivity_data.scct_packed, new_interactivity_data.scct_packed, relaxed, { + if (old_interactivity_data.scct_timestamp >= timestamp) { + os_atomic_rmw_loop_give_up(); + } + new_interactivity_data.scct_timestamp = timestamp; + if (old_interactivity_data.scct_timestamp != 0) { + new_interactivity_data.scct_count = interactivity_score; + } + }); + if (score_updated) { + return (uint8_t)new_interactivity_data.scct_count; + } else { + return (uint8_t)old_interactivity_data.scct_count; + } +} + +#else /* CONFIG_SCHED_EDGE */ + +/* + * For the clutch scheduler, atomicity is ensured by making sure all operations + * are happening under the pset lock of the only cluster present on the platform. + */ +static void +sched_clutch_bucket_group_thr_count_inc( + sched_clutch_bucket_group_t clutch_bucket_group, + uint64_t timestamp) +{ + sched_clutch_hierarchy_locked_assert(&pset0.pset_clutch_root); + if (clutch_bucket_group->scbg_pending_data.scct_count == 0) { + clutch_bucket_group->scbg_pending_data.scct_timestamp = timestamp; + } + clutch_bucket_group->scbg_pending_data.scct_count++; +} + +static void +sched_clutch_bucket_group_thr_count_dec( + sched_clutch_bucket_group_t clutch_bucket_group, + uint64_t timestamp) +{ + sched_clutch_hierarchy_locked_assert(&pset0.pset_clutch_root); + clutch_bucket_group->scbg_pending_data.scct_count--; + if (clutch_bucket_group->scbg_pending_data.scct_count == 0) { + clutch_bucket_group->scbg_pending_data.scct_timestamp = SCHED_CLUTCH_BUCKET_GROUP_PENDING_INVALID; + } else { + clutch_bucket_group->scbg_pending_data.scct_timestamp = timestamp; + } +} + +static uint8_t +sched_clutch_bucket_group_pending_ageout( + sched_clutch_bucket_group_t clutch_bucket_group, + uint64_t timestamp) +{ + sched_clutch_hierarchy_locked_assert(&pset0.pset_clutch_root); + int bucket_load = sched_clutch_global_bucket_load_get(clutch_bucket_group->scbg_bucket); + uint64_t old_pending_ts = clutch_bucket_group->scbg_pending_data.scct_timestamp; + bool old_update = (old_pending_ts >= timestamp); + bool no_pending_time = (old_pending_ts == SCHED_CLUTCH_BUCKET_GROUP_PENDING_INVALID); + bool no_bucket_load = (bucket_load == 0); + if (old_update || no_pending_time || no_bucket_load) { + return 0; + } + uint64_t pending_delta = timestamp - old_pending_ts; + uint64_t interactivity_delta = sched_clutch_bucket_group_pending_delta[clutch_bucket_group->scbg_bucket] * bucket_load; + if (pending_delta < interactivity_delta) { + return 0; + } + uint8_t cpu_usage_shift = (pending_delta / interactivity_delta); + clutch_bucket_group->scbg_pending_data.scct_timestamp = old_pending_ts + (cpu_usage_shift * interactivity_delta); + return cpu_usage_shift; +} + +static uint8_t +sched_clutch_bucket_group_interactivity_score_calculate( + sched_clutch_bucket_group_t clutch_bucket_group, + uint64_t timestamp) +{ + sched_clutch_hierarchy_locked_assert(&pset0.pset_clutch_root); + if (clutch_bucket_group->scbg_bucket == TH_BUCKET_FIXPRI) { + /* + * Since the root bucket selection algorithm for Above UI looks at clutch bucket + * priorities, make sure all AboveUI buckets are marked interactive. + */ + assert(clutch_bucket_group->scbg_interactivity_data.scct_count == (2 * sched_clutch_bucket_group_interactive_pri)); + return (uint8_t)clutch_bucket_group->scbg_interactivity_data.scct_count; + } + /* Check if the clutch bucket group CPU usage needs to be aged out due to pending time */ + uint8_t pending_intervals = sched_clutch_bucket_group_pending_ageout(clutch_bucket_group, timestamp); + /* Adjust CPU stats based on the calculated shift and to make sure only recent behavior is used */ + sched_clutch_bucket_group_cpu_adjust(clutch_bucket_group, pending_intervals); + uint8_t interactivity_score = sched_clutch_interactivity_from_cpu_data(clutch_bucket_group); + if (timestamp > clutch_bucket_group->scbg_interactivity_data.scct_timestamp) { + clutch_bucket_group->scbg_interactivity_data.scct_count = interactivity_score; + clutch_bucket_group->scbg_interactivity_data.scct_timestamp = timestamp; + return interactivity_score; + } else { + return (uint8_t)clutch_bucket_group->scbg_interactivity_data.scct_count; + } +} + +#endif /* CONFIG_SCHED_EDGE */ + +/* + * Clutch Bucket Group Run Count and Blocked Time Accounting + * + * The clutch bucket group maintains the number of runnable/running threads in the group. + * Since the blocked time of the clutch bucket group is based on this count, it is + * important to make sure the blocking timestamp and the run count are updated atomically. + * + * Since the run count increments happen without any pset locks held, the scheduler makes + * these updates atomic in the following way: + * - On 64-bit platforms, it uses double wide atomics to update the count & timestamp + * - On 32-bit platforms, it uses a lock to synchronize the count & timestamp update + */ + +#if !__LP64__ + +static uint32_t +sched_clutch_bucket_group_run_count_inc( + sched_clutch_bucket_group_t clutch_bucket_group) +{ + uint64_t blocked_time = 0; + uint64_t updated_run_count = 0; + + lck_spin_lock(&clutch_bucket_group->scbg_stats_lock); + if ((clutch_bucket_group->scbg_blocked_data.scct_timestamp != SCHED_CLUTCH_BUCKET_GROUP_BLOCKED_TS_INVALID) && + (clutch_bucket_group->scbg_blocked_data.scct_count == 0)) { + /* Run count is transitioning from 0 to 1; calculate blocked time and add it to CPU data */ + blocked_time = mach_absolute_time() - clutch_bucket_group->scbg_blocked_data.scct_timestamp; + clutch_bucket_group->scbg_blocked_data.scct_timestamp = SCHED_CLUTCH_BUCKET_GROUP_BLOCKED_TS_INVALID; + } + clutch_bucket_group->scbg_blocked_data.scct_count = clutch_bucket_group->scbg_blocked_data.scct_count + 1; + updated_run_count = clutch_bucket_group->scbg_blocked_data.scct_count; + lck_spin_unlock(&clutch_bucket_group->scbg_stats_lock); + + blocked_time = MIN(blocked_time, sched_clutch_bucket_group_adjust_threshold); + os_atomic_add(&(clutch_bucket_group->scbg_cpu_data.cpu_data.scbcd_cpu_blocked), (clutch_cpu_data_t)blocked_time, relaxed); + return (uint32_t)updated_run_count; +} + +static uint32_t +sched_clutch_bucket_group_run_count_dec( + sched_clutch_bucket_group_t clutch_bucket_group) +{ + uint64_t updated_run_count = 0; + + lck_spin_lock(&clutch_bucket_group->scbg_stats_lock); + clutch_bucket_group->scbg_blocked_data.scct_count = clutch_bucket_group->scbg_blocked_data.scct_count - 1; + if (clutch_bucket_group->scbg_blocked_data.scct_count == 0) { + /* Run count is transitioning from 1 to 0; start the blocked timer */ + clutch_bucket_group->scbg_blocked_data.scct_timestamp = mach_absolute_time(); + } + updated_run_count = clutch_bucket_group->scbg_blocked_data.scct_count; + lck_spin_unlock(&clutch_bucket_group->scbg_stats_lock); + return (uint32_t)updated_run_count; +} + +#else /* !__LP64__ */ + +static uint32_t +sched_clutch_bucket_group_run_count_inc( + sched_clutch_bucket_group_t clutch_bucket_group) +{ + sched_clutch_counter_time_t old_blocked_data; + sched_clutch_counter_time_t new_blocked_data; + + bool update_blocked_time = false; + os_atomic_rmw_loop(&clutch_bucket_group->scbg_blocked_data.scct_packed, old_blocked_data.scct_packed, new_blocked_data.scct_packed, relaxed, { + new_blocked_data.scct_count = old_blocked_data.scct_count + 1; + new_blocked_data.scct_timestamp = old_blocked_data.scct_timestamp; + update_blocked_time = false; + if (old_blocked_data.scct_count == 0) { + new_blocked_data.scct_timestamp = SCHED_CLUTCH_BUCKET_GROUP_BLOCKED_TS_INVALID; + update_blocked_time = true; + } + }); + if (update_blocked_time && (old_blocked_data.scct_timestamp != SCHED_CLUTCH_BUCKET_GROUP_BLOCKED_TS_INVALID)) { + uint64_t ctime = mach_absolute_time(); + if (ctime > old_blocked_data.scct_timestamp) { + uint64_t blocked_time = ctime - old_blocked_data.scct_timestamp; + blocked_time = MIN(blocked_time, sched_clutch_bucket_group_adjust_threshold); + os_atomic_add(&(clutch_bucket_group->scbg_cpu_data.cpu_data.scbcd_cpu_blocked), (clutch_cpu_data_t)blocked_time, relaxed); + } + } + return (uint32_t)new_blocked_data.scct_count; +} + +static uint32_t +sched_clutch_bucket_group_run_count_dec( + sched_clutch_bucket_group_t clutch_bucket_group) +{ + sched_clutch_counter_time_t old_blocked_data; + sched_clutch_counter_time_t new_blocked_data; + + uint64_t ctime = mach_absolute_time(); + os_atomic_rmw_loop(&clutch_bucket_group->scbg_blocked_data.scct_packed, old_blocked_data.scct_packed, new_blocked_data.scct_packed, relaxed, { + new_blocked_data.scct_count = old_blocked_data.scct_count - 1; + new_blocked_data.scct_timestamp = old_blocked_data.scct_timestamp; + if (new_blocked_data.scct_count == 0) { + new_blocked_data.scct_timestamp = ctime; + } + }); + return (uint32_t)new_blocked_data.scct_count; +} + +#endif /* !__LP64__ */ + /* * sched_clutch_thread_insert() * @@ -1531,25 +2224,55 @@ sched_clutch_thread_insert( boolean_t result = FALSE; sched_clutch_hierarchy_locked_assert(root_clutch); +#if CONFIG_SCHED_EDGE + sched_edge_cluster_cumulative_count_incr(root_clutch, thread->th_sched_bucket); + /* + * Check if the thread is bound and is being enqueued in its desired bound cluster. + * One scenario where a bound thread might not be getting enqueued in the bound cluster + * hierarchy would be if the thread is "soft" bound and the bound cluster is + * de-recommended. In that case, the thread should be treated as an unbound + * thread. + */ + if (SCHED_CLUTCH_THREAD_CLUSTER_BOUND(thread) && (sched_edge_thread_bound_cluster_id(thread) == root_clutch->scr_cluster_id)) { + return sched_edge_bound_thread_insert(root_clutch, thread, options); + } +#endif /* CONFIG_SCHED_EDGE */ sched_clutch_t clutch = sched_clutch_for_thread(thread); assert(thread->thread_group == clutch->sc_tg); uint64_t current_timestamp = mach_absolute_time(); - sched_clutch_bucket_t clutch_bucket = &(clutch->sc_clutch_buckets[thread->th_sched_bucket]); + sched_clutch_bucket_group_t clutch_bucket_group = &(clutch->sc_clutch_groups[thread->th_sched_bucket]); + sched_clutch_bucket_t clutch_bucket = &(clutch_bucket_group->scbg_clutch_buckets[root_clutch->scr_cluster_id]); assert((clutch_bucket->scb_root == NULL) || (clutch_bucket->scb_root == root_clutch)); - /* Insert thread into the clutch_bucket runq using sched_pri */ - run_queue_enqueue(&clutch_bucket->scb_runq, thread, options); + /* + * Thread linkage in clutch_bucket + * + * A thread has a few linkages within the clutch bucket: + * - A stable priority queue linkage which is the main runqueue (based on sched_pri) for the clutch bucket + * - A regular priority queue linkage which is based on thread's base/promoted pri (used for clutch bucket priority calculation) + * - A queue linkage used for timesharing operations of threads at the scheduler tick + */ + + /* Insert thread into the clutch_bucket stable priority runqueue using sched_pri */ + thread->th_clutch_runq_link.stamp = current_timestamp; + priority_queue_entry_set_sched_pri(&clutch_bucket->scb_thread_runq, &thread->th_clutch_runq_link, thread->sched_pri, + (options & SCHED_TAILQ) ? PRIORITY_QUEUE_ENTRY_NONE : PRIORITY_QUEUE_ENTRY_PREEMPTED); + priority_queue_insert(&clutch_bucket->scb_thread_runq, &thread->th_clutch_runq_link); + + /* Insert thread into clutch_bucket priority queue based on the promoted or base priority */ + priority_queue_entry_set_sched_pri(&clutch_bucket->scb_clutchpri_prioq, &thread->th_clutch_pri_link, + sched_thread_sched_pri_promoted(thread) ? thread->sched_pri : thread->base_pri, false); + priority_queue_insert(&clutch_bucket->scb_clutchpri_prioq, &thread->th_clutch_pri_link); + + /* Insert thread into timesharing queue of the clutch bucket */ + enqueue_tail(&clutch_bucket->scb_thread_timeshare_queue, &thread->th_clutch_timeshare_link); + /* Increment the urgency counter for the root if necessary */ sched_clutch_root_urgency_inc(root_clutch, thread); - /* Insert thread into clutch_bucket priority queue based on the promoted or base priority */ - priority_queue_insert(&clutch_bucket->scb_clutchpri_prioq, &thread->sched_clutchpri_link, - sched_thread_sched_pri_promoted(thread) ? thread->sched_pri : thread->base_pri, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); os_atomic_inc(&clutch->sc_thr_count, relaxed); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_CLUTCH_THREAD_STATE) | DBG_FUNC_NONE, - thread_group_get_id(clutch_bucket->scb_clutch->sc_tg), clutch_bucket->scb_bucket, thread_tid(thread), SCHED_CLUTCH_STATE_RUNNABLE, 0); + sched_clutch_bucket_group_thr_count_inc(clutch_bucket->scb_group, current_timestamp); /* Enqueue the clutch into the hierarchy (if needed) and update properties; pick the insertion order based on thread options */ sched_clutch_bucket_options_t scb_options = (options & SCHED_HEADQ) ? SCHED_CLUTCH_BUCKET_OPTIONS_HEADQ : SCHED_CLUTCH_BUCKET_OPTIONS_TAILQ; @@ -1562,6 +2285,10 @@ sched_clutch_thread_insert( sched_clutch_thr_count_inc(&root_clutch->scr_thr_count); result = sched_clutch_bucket_update(clutch_bucket, root_clutch, current_timestamp, scb_options); } + + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_CLUTCH_THR_COUNT) | DBG_FUNC_NONE, + root_clutch->scr_cluster_id, thread_group_get_id(clutch_bucket->scb_group->scbg_clutch->sc_tg), clutch_bucket->scb_bucket, + SCHED_CLUTCH_DBG_THR_COUNT_PACK(root_clutch->scr_thr_count, os_atomic_load(&clutch->sc_thr_count, relaxed), clutch_bucket->scb_thr_count)); return result; } @@ -1580,25 +2307,33 @@ sched_clutch_thread_remove( sched_clutch_bucket_options_t options) { sched_clutch_hierarchy_locked_assert(root_clutch); +#if CONFIG_SCHED_EDGE + sched_edge_cluster_cumulative_count_decr(root_clutch, thread->th_sched_bucket); + if (thread->th_bound_cluster_enqueued) { + sched_edge_bound_thread_remove(root_clutch, thread); + return; + } +#endif /* CONFIG_SCHED_EDGE */ sched_clutch_t clutch = sched_clutch_for_thread(thread); assert(thread->thread_group == clutch->sc_tg); assert(thread->runq != PROCESSOR_NULL); - sched_clutch_bucket_t clutch_bucket = &(clutch->sc_clutch_buckets[thread->th_sched_bucket]); + sched_clutch_bucket_group_t clutch_bucket_group = &(clutch->sc_clutch_groups[thread->th_sched_bucket]); + sched_clutch_bucket_t clutch_bucket = &(clutch_bucket_group->scbg_clutch_buckets[root_clutch->scr_cluster_id]); assert(clutch_bucket->scb_root == root_clutch); /* Decrement the urgency counter for the root if necessary */ sched_clutch_root_urgency_dec(root_clutch, thread); /* Remove thread from the clutch_bucket */ - run_queue_remove(&clutch_bucket->scb_runq, thread); + priority_queue_remove(&clutch_bucket->scb_thread_runq, &thread->th_clutch_runq_link); + remqueue(&thread->th_clutch_timeshare_link); + thread->runq = PROCESSOR_NULL; - priority_queue_remove(&clutch_bucket->scb_clutchpri_prioq, &thread->sched_clutchpri_link, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_CLUTCH_THREAD_STATE) | DBG_FUNC_NONE, - thread_group_get_id(clutch_bucket->scb_clutch->sc_tg), clutch_bucket->scb_bucket, thread_tid(thread), SCHED_CLUTCH_STATE_EMPTY, 0); + priority_queue_remove(&clutch_bucket->scb_clutchpri_prioq, &thread->th_clutch_pri_link); /* Update counts at various levels of the hierarchy */ os_atomic_dec(&clutch->sc_thr_count, relaxed); + sched_clutch_bucket_group_thr_count_dec(clutch_bucket->scb_group, current_timestamp); sched_clutch_thr_count_dec(&root_clutch->scr_thr_count); sched_clutch_thr_count_dec(&clutch_bucket->scb_thr_count); @@ -1608,10 +2343,38 @@ sched_clutch_thread_remove( } else { sched_clutch_bucket_update(clutch_bucket, root_clutch, current_timestamp, options); } + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_CLUTCH_THR_COUNT) | DBG_FUNC_NONE, + root_clutch->scr_cluster_id, thread_group_get_id(clutch_bucket->scb_group->scbg_clutch->sc_tg), clutch_bucket->scb_bucket, + SCHED_CLUTCH_DBG_THR_COUNT_PACK(root_clutch->scr_thr_count, os_atomic_load(&clutch->sc_thr_count, relaxed), clutch_bucket->scb_thr_count)); +} + +/* + * sched_clutch_thread_unbound_lookup() + * + * Routine to find the highest unbound thread in the root clutch. + * Helps find threads easily for steal/migrate scenarios in the + * Edge scheduler. + */ +static thread_t +sched_clutch_thread_unbound_lookup( + sched_clutch_root_t root_clutch, + sched_clutch_root_bucket_t root_bucket) +{ + sched_clutch_hierarchy_locked_assert(root_clutch); + + /* Find the highest priority clutch bucket in this root bucket */ + sched_clutch_bucket_t clutch_bucket = sched_clutch_root_bucket_highest_clutch_bucket(root_bucket); + assert(clutch_bucket != NULL); + + /* Find the highest priority runnable thread in this clutch bucket */ + thread_t thread = priority_queue_max(&clutch_bucket->scb_thread_runq, struct thread, th_clutch_runq_link); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_CLUTCH_THREAD_SELECT) | DBG_FUNC_NONE, + thread_tid(thread), thread_group_get_id(clutch_bucket->scb_group->scbg_clutch->sc_tg), clutch_bucket->scb_bucket, 0, 0); + return thread; } /* - * sched_clutch_thread_highest() + * sched_clutch_thread_highest_remove() * * Routine to find and remove the highest priority thread * from the sched clutch hierarchy. The algorithm looks at the @@ -1620,36 +2383,27 @@ sched_clutch_thread_remove( * pset lock held. */ static thread_t -sched_clutch_thread_highest( +sched_clutch_thread_highest_remove( sched_clutch_root_t root_clutch) { sched_clutch_hierarchy_locked_assert(root_clutch); uint64_t current_timestamp = mach_absolute_time(); - /* Select the highest priority root bucket */ - sched_clutch_root_bucket_t root_bucket = sched_clutch_root_highest_root_bucket(root_clutch, current_timestamp); + sched_clutch_root_bucket_t root_bucket = sched_clutch_root_highest_root_bucket(root_clutch, current_timestamp, SCHED_CLUTCH_HIGHEST_ROOT_BUCKET_ALL); if (root_bucket == NULL) { return THREAD_NULL; } - /* Since a thread is being picked from this root bucket, update its deadline */ - sched_clutch_root_bucket_deadline_update(root_bucket, root_clutch, current_timestamp); - - /* Find the highest priority clutch bucket in this root bucket */ - sched_clutch_bucket_t clutch_bucket = sched_clutch_root_bucket_highest_clutch_bucket(root_bucket); - assert(clutch_bucket != NULL); - - /* Find the highest priority runnable thread in this clutch bucket */ - thread_t thread = run_queue_peek(&clutch_bucket->scb_runq); - assert(thread != NULL); - /* Remove and return the thread from the hierarchy; also round robin the clutch bucket if the priority remains unchanged */ - sched_clutch_thread_remove(root_clutch, thread, current_timestamp, SCHED_CLUTCH_BUCKET_OPTIONS_SAMEPRI_RR); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_CLUTCH_THREAD_SELECT) | DBG_FUNC_NONE, - thread_tid(thread), thread_group_get_id(clutch_bucket->scb_clutch->sc_tg), clutch_bucket->scb_bucket, 0, 0); - return thread; + thread_t highest_thread = THREAD_NULL; + if (root_bucket->scrb_bound) { + highest_thread = sched_clutch_thread_bound_lookup(root_clutch, root_bucket); + } else { + highest_thread = sched_clutch_thread_unbound_lookup(root_clutch, root_bucket); + } + sched_clutch_thread_remove(root_clutch, highest_thread, current_timestamp, SCHED_CLUTCH_BUCKET_OPTIONS_SAMEPRI_RR); + return highest_thread; } - /* High level global accessor routines */ /* @@ -1706,6 +2460,42 @@ sched_clutch_root_count( return root_clutch->scr_thr_count; } +#if CONFIG_SCHED_EDGE + +/* + * sched_clutch_root_foreign_empty() + * + * Routine to check if the foreign clutch bucket priority list is empty for a cluster. + */ +static boolean_t +sched_clutch_root_foreign_empty( + sched_clutch_root_t root_clutch) +{ + return priority_queue_empty(&root_clutch->scr_foreign_buckets); +} + +/* + * sched_clutch_root_highest_foreign_thread_remove() + * + * Routine to return the thread in the highest priority clutch bucket in a cluster. + * Must be called with the pset for the cluster locked. + */ +static thread_t +sched_clutch_root_highest_foreign_thread_remove( + sched_clutch_root_t root_clutch) +{ + thread_t thread = THREAD_NULL; + if (priority_queue_empty(&root_clutch->scr_foreign_buckets)) { + return thread; + } + sched_clutch_bucket_t clutch_bucket = priority_queue_max(&root_clutch->scr_foreign_buckets, struct sched_clutch_bucket, scb_foreignlink); + thread = priority_queue_max(&clutch_bucket->scb_thread_runq, struct thread, th_clutch_runq_link); + sched_clutch_thread_remove(root_clutch, thread, mach_absolute_time(), 0); + return thread; +} + +#endif /* CONFIG_SCHED_EDGE */ + /* * sched_clutch_thread_pri_shift() * @@ -1720,12 +2510,12 @@ sched_clutch_thread_pri_shift( sched_bucket_t bucket) { if (!SCHED_CLUTCH_THREAD_ELIGIBLE(thread)) { - return UINT8_MAX; + return INT8_MAX; } assert(bucket != TH_BUCKET_RUN); sched_clutch_t clutch = sched_clutch_for_thread(thread); - sched_clutch_bucket_t clutch_bucket = &(clutch->sc_clutch_buckets[bucket]); - return os_atomic_load(&clutch_bucket->scb_pri_shift, relaxed); + sched_clutch_bucket_group_t clutch_bucket_group = &(clutch->sc_clutch_groups[bucket]); + return os_atomic_load(&clutch_bucket_group->scbg_pri_shift, relaxed); } #pragma mark -- Clutch Scheduler Algorithm @@ -1733,9 +2523,6 @@ sched_clutch_thread_pri_shift( static void sched_clutch_init(void); -static void -sched_clutch_timebase_init(void); - static thread_t sched_clutch_steal_thread(processor_set_t pset); @@ -1800,7 +2587,7 @@ sched_clutch_update_thread_bucket(thread_t thread); const struct sched_dispatch_table sched_clutch_dispatch = { .sched_name = "clutch", .init = sched_clutch_init, - .timebase_init = sched_clutch_timebase_init, + .timebase_init = sched_timeshare_timebase_init, .processor_init = sched_clutch_processor_init, .pset_init = sched_clutch_pset_init, .maintenance_continuation = sched_timeshare_maintenance_continue, @@ -1808,6 +2595,7 @@ const struct sched_dispatch_table sched_clutch_dispatch = { .steal_thread_enabled = sched_steal_thread_enabled, .steal_thread = sched_clutch_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, + .choose_node = sched_choose_node, .choose_processor = choose_processor, .processor_enqueue = sched_clutch_processor_enqueue, .processor_queue_shutdown = sched_clutch_processor_queue_shutdown, @@ -1832,11 +2620,11 @@ const struct sched_dispatch_table sched_clutch_dispatch = { .thread_avoid_processor = sched_clutch_thread_avoid_processor, .processor_balance = sched_SMT_balance, - .rt_runq = sched_rtglobal_runq, - .rt_init = sched_rtglobal_init, - .rt_queue_shutdown = sched_rtglobal_queue_shutdown, - .rt_runq_scan = sched_rtglobal_runq_scan, - .rt_runq_count_sum = sched_rtglobal_runq_count_sum, + .rt_runq = sched_rtlocal_runq, + .rt_init = sched_rtlocal_init, + .rt_queue_shutdown = sched_rtlocal_queue_shutdown, + .rt_runq_scan = sched_rtlocal_runq_scan, + .rt_runq_count_sum = sched_rtlocal_runq_count_sum, .qos_max_parallelism = sched_qos_max_parallelism, .check_spill = sched_check_spill, @@ -1902,33 +2690,26 @@ sched_clutch_pset_init(processor_set_t pset) sched_clutch_root_init(&pset->pset_clutch_root, pset); } +static void +sched_clutch_tunables_init(void) +{ + sched_clutch_us_to_abstime(sched_clutch_root_bucket_wcel_us, sched_clutch_root_bucket_wcel); + sched_clutch_us_to_abstime(sched_clutch_root_bucket_warp_us, sched_clutch_root_bucket_warp); + sched_clutch_us_to_abstime(sched_clutch_thread_quantum_us, sched_clutch_thread_quantum); + clock_interval_to_absolutetime_interval(SCHED_CLUTCH_BUCKET_GROUP_ADJUST_THRESHOLD_USECS, + NSEC_PER_USEC, &sched_clutch_bucket_group_adjust_threshold); + assert(sched_clutch_bucket_group_adjust_threshold <= CLUTCH_CPU_DATA_MAX); + sched_clutch_us_to_abstime(sched_clutch_bucket_group_pending_delta_us, sched_clutch_bucket_group_pending_delta); +} + static void sched_clutch_init(void) { - if (!PE_parse_boot_argn("sched_clutch_bucket_interactive_pri", &sched_clutch_bucket_interactive_pri, sizeof(sched_clutch_bucket_interactive_pri))) { - sched_clutch_bucket_interactive_pri = SCHED_CLUTCH_BUCKET_INTERACTIVE_PRI_DEFAULT; - } - if (!PE_parse_boot_argn("sched_clutch_bucket_pri_boost", &sched_clutch_bucket_pri_boost, sizeof(sched_clutch_bucket_pri_boost))) { - sched_clutch_bucket_pri_boost = SCHED_CLUTCH_BUCKET_PRI_BOOST_DEFAULT; + if (!PE_parse_boot_argn("sched_clutch_bucket_group_interactive_pri", &sched_clutch_bucket_group_interactive_pri, sizeof(sched_clutch_bucket_group_interactive_pri))) { + sched_clutch_bucket_group_interactive_pri = SCHED_CLUTCH_BUCKET_GROUP_INTERACTIVE_PRI_DEFAULT; } sched_timeshare_init(); -} - -static void -sched_clutch_timebase_init(void) -{ - sched_timeshare_timebase_init(); - sched_clutch_us_to_abstime(sched_clutch_root_bucket_wcel_us, sched_clutch_root_bucket_wcel); - sched_clutch_us_to_abstime(sched_clutch_root_bucket_warp_us, sched_clutch_root_bucket_warp); - sched_clutch_us_to_abstime(sched_clutch_thread_quantum_us, sched_clutch_thread_quantum); - clock_interval_to_absolutetime_interval(SCHED_CLUTCH_BUCKET_ADJUST_THRESHOLD_USECS, - NSEC_PER_USEC, &sched_clutch_bucket_adjust_threshold); - - uint32_t interactivity_delta = 0; - if (!PE_parse_boot_argn("sched_clutch_bucket_interactivity_delta_usecs", &interactivity_delta, sizeof(interactivity_delta))) { - interactivity_delta = SCHED_CLUTCH_BUCKET_INTERACTIVITY_DELTA_USECS_DEFAULT; - } - clock_interval_to_absolutetime_interval(interactivity_delta, NSEC_PER_USEC, &sched_clutch_bucket_interactivity_delta); + sched_clutch_tunables_init(); } static thread_t @@ -1962,7 +2743,7 @@ sched_clutch_choose_thread( thread_t thread = THREAD_NULL; if (choose_from_boundq == false) { sched_clutch_root_t pset_clutch_root = sched_clutch_processor_root_clutch(processor); - thread = sched_clutch_thread_highest(pset_clutch_root); + thread = sched_clutch_thread_highest_remove(pset_clutch_root); } else { thread = run_queue_dequeue(bound_runq, SCHED_HEADQ); } @@ -2088,7 +2869,7 @@ sched_clutch_processor_queue_shutdown(processor_t processor) queue_init(&tqueue); while (sched_clutch_root_count(pset_clutch_root) > 0) { - thread = sched_clutch_thread_highest(pset_clutch_root); + thread = sched_clutch_thread_highest_remove(pset_clutch_root); enqueue_tail(&tqueue, &thread->runq_links); } @@ -2096,11 +2877,8 @@ sched_clutch_processor_queue_shutdown(processor_t processor) qe_foreach_element_safe(thread, &tqueue, runq_links) { remqueue(&thread->runq_links); - thread_lock(thread); - thread_setrun(thread, SCHED_TAILQ); - thread_unlock(thread); } } @@ -2142,31 +2920,9 @@ sched_clutch_processor_queue_remove( } static thread_t -sched_clutch_steal_thread(processor_set_t pset) +sched_clutch_steal_thread(__unused processor_set_t pset) { - processor_set_t nset, cset = pset; - thread_t thread; - - do { - sched_clutch_root_t pset_clutch_root = &cset->pset_clutch_root; - if (sched_clutch_root_count(pset_clutch_root) > 0) { - thread = sched_clutch_thread_highest(pset_clutch_root); - pset_unlock(cset); - return thread; - } - - nset = next_pset(cset); - - if (nset != pset) { - pset_unlock(cset); - - cset = nset; - pset_lock(cset); - } - } while (nset != pset); - - pset_unlock(cset); - + /* Thread stealing is not enabled for single cluster clutch scheduler platforms */ return THREAD_NULL; } @@ -2213,31 +2969,44 @@ sched_clutch_thread_update_scan(sched_update_scan_context_t scan_context) thread_update_process_threads(); } while (restart_needed); - pset = &pset0; + pset_node_t node = &pset_node0; + pset = node->psets; do { do { - s = splsched(); - pset_lock(pset); - - if (sched_clutch_root_count(&pset->pset_clutch_root) > 0) { - queue_t clutch_bucket_list = &pset->pset_clutch_root.scr_clutch_buckets; - sched_clutch_bucket_t clutch_bucket; - qe_foreach_element(clutch_bucket, clutch_bucket_list, scb_listlink) { - sched_clutch_bucket_timeshare_update(clutch_bucket); - restart_needed = runq_scan(&clutch_bucket->scb_runq, scan_context); - if (restart_needed) { - break; + restart_needed = FALSE; + while (pset != NULL) { + s = splsched(); + pset_lock(pset); + + if (sched_clutch_root_count(&pset->pset_clutch_root) > 0) { + for (sched_bucket_t bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) { + restart_needed = runq_scan(&pset->pset_clutch_root.scr_bound_buckets[bucket].scrb_bound_thread_runq, scan_context); + if (restart_needed) { + break; + } } + queue_t clutch_bucket_list = &pset->pset_clutch_root.scr_clutch_buckets; + sched_clutch_bucket_t clutch_bucket; + qe_foreach_element(clutch_bucket, clutch_bucket_list, scb_listlink) { + sched_clutch_bucket_group_timeshare_update(clutch_bucket->scb_group, clutch_bucket, scan_context->sched_tick_last_abstime); + restart_needed = sched_clutch_timeshare_scan(&clutch_bucket->scb_thread_timeshare_queue, clutch_bucket->scb_thr_count, scan_context); + } + } + + pset_unlock(pset); + splx(s); + + if (restart_needed) { + break; } + pset = pset->pset_list; } - pset_unlock(pset); - splx(s); if (restart_needed) { break; } - } while ((pset = pset->pset_list) != NULL); + } while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL)); /* Ok, we now have a collection of candidates -- fix them. */ thread_update_process_threads(); @@ -2343,7 +3112,6 @@ sched_clutch_update_thread_bucket(thread_t thread) sched_bucket_t old_bucket = thread->th_sched_bucket; sched_bucket_t new_bucket = TH_BUCKET_RUN; assert(thread->runq == PROCESSOR_NULL); - int pri = (sched_thread_sched_pri_promoted(thread)) ? thread->sched_pri : thread->base_pri; switch (thread->sched_mode) { @@ -2374,7 +3142,6 @@ sched_clutch_update_thread_bucket(thread_t thread) thread->th_sched_bucket = new_bucket; thread->pri_shift = sched_clutch_thread_pri_shift(thread, new_bucket); - /* * Since this is called after the thread has been removed from the runq, * only the run counts need to be updated. The re-insert into the runq @@ -2386,62 +3153,78 @@ sched_clutch_update_thread_bucket(thread_t thread) } } -#if __AMP__ +#if CONFIG_SCHED_EDGE /* Implementation of the AMP version of the clutch scheduler */ +static void +sched_edge_init(void); + static thread_t -sched_clutch_amp_steal_thread(processor_set_t pset); +sched_edge_processor_idle(processor_set_t pset); static ast_t -sched_clutch_amp_processor_csw_check(processor_t processor); +sched_edge_processor_csw_check(processor_t processor); static boolean_t -sched_clutch_amp_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte); +sched_edge_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte); static boolean_t -sched_clutch_amp_processor_queue_empty(processor_t processor); +sched_edge_processor_queue_empty(processor_t processor); static thread_t -sched_clutch_amp_choose_thread(processor_t processor, int priority, ast_t reason); +sched_edge_choose_thread(processor_t processor, int priority, ast_t reason); static void -sched_clutch_amp_processor_queue_shutdown(processor_t processor); +sched_edge_processor_queue_shutdown(processor_t processor); static processor_t -sched_clutch_amp_choose_processor(processor_set_t pset, processor_t processor, thread_t thread); +sched_edge_choose_processor(processor_set_t pset, processor_t processor, thread_t thread); static bool -sched_clutch_amp_thread_avoid_processor(processor_t processor, thread_t thread); +sched_edge_thread_avoid_processor(processor_t processor, thread_t thread); -static bool -sched_clutch_amp_thread_should_yield(processor_t processor, thread_t thread); +static void +sched_edge_balance(processor_t cprocessor, processor_set_t cpset); static void -sched_clutch_migrate_foreign_buckets(processor_t processor, processor_set_t dst_pset, boolean_t drop_lock); +sched_edge_check_spill(processor_set_t pset, thread_t thread); + +static bool +sched_edge_thread_should_yield(processor_t processor, thread_t thread); static void -sched_clutch_amp_thread_group_recommendation_change(struct thread_group *tg, cluster_type_t new_recommendation); +sched_edge_pset_made_schedulable(processor_t processor, processor_set_t dst_pset, boolean_t drop_lock); + +static bool +sched_edge_steal_thread_enabled(processor_set_t pset); + +static sched_ipi_type_t +sched_edge_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event); + +static uint32_t +sched_edge_qos_max_parallelism(int qos, uint64_t options); -const struct sched_dispatch_table sched_clutch_amp_dispatch = { - .sched_name = "clutch_amp", - .init = sched_amp_init, - .timebase_init = sched_clutch_timebase_init, +const struct sched_dispatch_table sched_edge_dispatch = { + .sched_name = "edge", + .init = sched_edge_init, + .timebase_init = sched_timeshare_timebase_init, .processor_init = sched_clutch_processor_init, .pset_init = sched_clutch_pset_init, .maintenance_continuation = sched_timeshare_maintenance_continue, - .choose_thread = sched_clutch_amp_choose_thread, - .steal_thread_enabled = sched_amp_steal_thread_enabled, - .steal_thread = sched_clutch_amp_steal_thread, + .choose_thread = sched_edge_choose_thread, + .steal_thread_enabled = sched_edge_steal_thread_enabled, + .steal_thread = sched_edge_processor_idle, .compute_timeshare_priority = sched_compute_timeshare_priority, - .choose_processor = sched_clutch_amp_choose_processor, + .choose_node = sched_choose_node, + .choose_processor = sched_edge_choose_processor, .processor_enqueue = sched_clutch_processor_enqueue, - .processor_queue_shutdown = sched_clutch_amp_processor_queue_shutdown, + .processor_queue_shutdown = sched_edge_processor_queue_shutdown, .processor_queue_remove = sched_clutch_processor_queue_remove, - .processor_queue_empty = sched_clutch_amp_processor_queue_empty, + .processor_queue_empty = sched_edge_processor_queue_empty, .priority_is_urgent = priority_is_urgent, - .processor_csw_check = sched_clutch_amp_processor_csw_check, - .processor_queue_has_priority = sched_clutch_amp_processor_queue_has_priority, + .processor_csw_check = sched_edge_processor_csw_check, + .processor_queue_has_priority = sched_edge_processor_queue_has_priority, .initial_quantum_size = sched_clutch_initial_quantum_size, .initial_thread_sched_mode = sched_clutch_initial_thread_sched_mode, .can_update_priority = can_update_priority, @@ -2455,8 +3238,8 @@ const struct sched_dispatch_table sched_clutch_amp_dispatch = { .multiple_psets_enabled = TRUE, .sched_groups_enabled = FALSE, .avoid_processor_enabled = TRUE, - .thread_avoid_processor = sched_clutch_amp_thread_avoid_processor, - .processor_balance = sched_amp_balance, + .thread_avoid_processor = sched_edge_thread_avoid_processor, + .processor_balance = sched_edge_balance, .rt_runq = sched_amp_rt_runq, .rt_init = sched_amp_rt_init, @@ -2464,52 +3247,225 @@ const struct sched_dispatch_table sched_clutch_amp_dispatch = { .rt_runq_scan = sched_amp_rt_runq_scan, .rt_runq_count_sum = sched_amp_rt_runq_count_sum, - .qos_max_parallelism = sched_amp_qos_max_parallelism, - .check_spill = sched_amp_check_spill, - .ipi_policy = sched_amp_ipi_policy, - .thread_should_yield = sched_clutch_amp_thread_should_yield, + .qos_max_parallelism = sched_edge_qos_max_parallelism, + .check_spill = sched_edge_check_spill, + .ipi_policy = sched_edge_ipi_policy, + .thread_should_yield = sched_edge_thread_should_yield, .run_count_incr = sched_clutch_run_incr, .run_count_decr = sched_clutch_run_decr, .update_thread_bucket = sched_clutch_update_thread_bucket, - .pset_made_schedulable = sched_clutch_migrate_foreign_buckets, - .thread_group_recommendation_change = sched_clutch_amp_thread_group_recommendation_change, + .pset_made_schedulable = sched_edge_pset_made_schedulable, + .thread_group_recommendation_change = NULL, }; -extern processor_set_t ecore_set; -extern processor_set_t pcore_set; +static struct processor_set pset1; +static struct pset_node pset_node1; +static bitmap_t sched_edge_available_pset_bitmask[BITMAP_LEN(MAX_PSETS)]; + +/* + * sched_edge_pset_available() + * + * Routine to determine if a pset is available for scheduling. + */ +static bool +sched_edge_pset_available(processor_set_t pset) +{ + return bitmap_test(sched_edge_available_pset_bitmask, pset->pset_cluster_id); +} + +/* + * sched_edge_thread_bound_cluster_id() + * + * Routine to determine which cluster a particular thread is bound to. Uses + * the sched_flags on the thread to map back to a specific cluster id. + * + * + */ +static uint32_t +sched_edge_thread_bound_cluster_id(thread_t thread) +{ + assert(SCHED_CLUTCH_THREAD_CLUSTER_BOUND(thread)); + if (thread->sched_flags & TH_SFLAG_ECORE_ONLY) { + return (pset_array[0]->pset_type == CLUSTER_TYPE_E) ? 0 : 1; + } else { + return (pset_array[0]->pset_type == CLUSTER_TYPE_P) ? 0 : 1; + } +} + +/* Forward declaration for some thread migration routines */ +static boolean_t sched_edge_foreign_runnable_thread_available(processor_set_t pset); +static boolean_t sched_edge_foreign_running_thread_available(processor_set_t pset); +static processor_set_t sched_edge_steal_candidate(processor_set_t pset); +static processor_set_t sched_edge_migrate_candidate(processor_set_t preferred_pset, thread_t thread, processor_set_t locked_pset, bool switch_pset_locks); + +/* + * sched_edge_config_set() + * + * Support to update an edge configuration. Typically used by CLPC to affect thread migration + * policies in the scheduler. + */ +static void +sched_edge_config_set(uint32_t src_cluster, uint32_t dst_cluster, sched_clutch_edge edge_config) +{ + sched_clutch_edge *edge = &pset_array[src_cluster]->sched_edges[dst_cluster]; + edge->sce_edge_packed = edge_config.sce_edge_packed; +} + +/* + * sched_edge_config_get() + * + * Support to get an edge configuration. Typically used by CLPC to query edge configs to decide + * if it needs to update edges. + */ +static sched_clutch_edge +sched_edge_config_get(uint32_t src_cluster, uint32_t dst_cluster) +{ + return pset_array[src_cluster]->sched_edges[dst_cluster]; +} + +#if DEVELOPMENT || DEBUG + +/* + * Helper Routines for edge scheduler sysctl configuration + * + * The current support is limited to dual cluster AMP platforms. + * + */ + +kern_return_t +sched_edge_sysctl_configure_e_to_p(uint64_t edge_config) +{ + pset_array[ecore_cluster_id]->sched_edges[pcore_cluster_id].sce_edge_packed = edge_config; + return KERN_SUCCESS; +} + +kern_return_t +sched_edge_sysctl_configure_p_to_e(uint64_t edge_config) +{ + pset_array[pcore_cluster_id]->sched_edges[ecore_cluster_id].sce_edge_packed = edge_config; + return KERN_SUCCESS; +} + +sched_clutch_edge +sched_edge_e_to_p(void) +{ + return sched_edge_config_get(ecore_cluster_id, pcore_cluster_id); +} + +sched_clutch_edge +sched_edge_p_to_e(void) +{ + return sched_edge_config_get(pcore_cluster_id, ecore_cluster_id); +} + +#endif /* DEVELOPMENT || DEBUG */ + +/* + * sched_edge_matrix_set() + * + * Routine to update various edges in the cluster edge matrix. The edge_changes_bitmap + * indicates which edges need to be updated. Both the edge_matrix & edge_changes_bitmap + * are MAX_PSETS * MAX_PSETS matrices flattened into a single dimensional array. + */ +void +sched_edge_matrix_set(sched_clutch_edge *edge_matrix, bool *edge_changes_bitmap, __unused uint64_t flags, uint64_t matrix_order) +{ + uint32_t edge_index = 0; + for (uint32_t src_cluster = 0; src_cluster < matrix_order; src_cluster++) { + for (uint32_t dst_cluster = 0; dst_cluster < matrix_order; dst_cluster++) { + if (edge_changes_bitmap[edge_index]) { + sched_edge_config_set(src_cluster, dst_cluster, edge_matrix[edge_index]); + } + edge_index++; + } + } +} + +/* + * sched_edge_matrix_get() + * + * Routine to retrieve various edges in the cluster edge matrix. The edge_request_bitmap + * indicates which edges need to be retrieved. Both the edge_matrix & edge_request_bitmap + * are MAX_PSETS * MAX_PSETS matrices flattened into a single dimensional array. + */ +void +sched_edge_matrix_get(sched_clutch_edge *edge_matrix, bool *edge_request_bitmap, __unused uint64_t flags, uint64_t matrix_order) +{ + uint32_t edge_index = 0; + for (uint32_t src_cluster = 0; src_cluster < matrix_order; src_cluster++) { + for (uint32_t dst_cluster = 0; dst_cluster < matrix_order; dst_cluster++) { + if (edge_request_bitmap[edge_index]) { + edge_matrix[edge_index] = sched_edge_config_get(src_cluster, dst_cluster); + } + edge_index++; + } + } +} + +/* + * sched_edge_init() + * + * Routine to initialize the data structures for the Edge scheduler. The current implementation + * relies on this being enabled for a dual cluster AMP system. Once a better design for MAX_PSETS, + * edge config etc. is defined, it should be made more generic to handle the multi-cluster + * platorms. + * + */ +static void +sched_edge_init(void) +{ + processor_set_t ecore_set = &pset0; + processor_set_t pcore_set = &pset1; + + if (ml_get_boot_cluster() == CLUSTER_TYPE_P) { + /* If the device boots on a P-cluster, fixup the IDs etc. */ + pcore_set = &pset0; + ecore_set = &pset1; + bitmap_set(sched_edge_available_pset_bitmask, pcore_cluster_id); + } else { + bitmap_set(sched_edge_available_pset_bitmask, ecore_cluster_id); + } + + ecore_set->pset_cluster_type = PSET_AMP_E; + ecore_set->pset_cluster_id = ecore_cluster_id; + + pcore_set->pset_cluster_type = PSET_AMP_P; + pcore_set->pset_cluster_id = pcore_cluster_id; + + pset_init(&pset1, &pset_node1); + pset_node1.psets = &pset1; + pset_node0.node_list = &pset_node1; + + pset_array[ecore_cluster_id] = ecore_set; + pset_array[ecore_cluster_id]->pset_type = CLUSTER_TYPE_E; + bitmap_set(pset_array[ecore_cluster_id]->foreign_psets, pcore_cluster_id); + + sched_edge_config_set(ecore_cluster_id, ecore_cluster_id, (sched_clutch_edge){.sce_migration_weight = 0, .sce_migration_allowed = 0, .sce_steal_allowed = 0}); + sched_edge_config_set(ecore_cluster_id, pcore_cluster_id, (sched_clutch_edge){.sce_migration_weight = 0, .sce_migration_allowed = 0, .sce_steal_allowed = 0}); + + pset_array[pcore_cluster_id] = pcore_set; + pset_array[pcore_cluster_id]->pset_type = CLUSTER_TYPE_P; + bitmap_set(pset_array[pcore_cluster_id]->foreign_psets, ecore_cluster_id); + + sched_edge_config_set(pcore_cluster_id, pcore_cluster_id, (sched_clutch_edge){.sce_migration_weight = 0, .sce_migration_allowed = 0, .sce_steal_allowed = 0}); + sched_edge_config_set(pcore_cluster_id, ecore_cluster_id, (sched_clutch_edge){.sce_migration_weight = 64, .sce_migration_allowed = 1, .sce_steal_allowed = 1}); + + sched_timeshare_init(); + sched_clutch_tunables_init(); +} static thread_t -sched_clutch_amp_choose_thread( +sched_edge_choose_thread( processor_t processor, int priority, __unused ast_t reason) { - processor_set_t pset = processor->processor_set; - bool spill_pending = false; - int spill_pri = -1; - - if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) { - spill_pending = true; - spill_pri = sched_clutch_root_priority(&pcore_set->pset_clutch_root); - } - int clutch_pri = sched_clutch_root_priority(sched_clutch_processor_root_clutch(processor)); run_queue_t bound_runq = sched_clutch_bound_runq(processor); boolean_t choose_from_boundq = false; if ((bound_runq->highq < priority) && - (clutch_pri < priority) && - (spill_pri < priority)) { - return THREAD_NULL; - } - - if ((spill_pri > bound_runq->highq) && - (spill_pri > clutch_pri)) { - /* - * There is a higher priority thread on the P-core runq, - * so returning THREAD_NULL here will cause thread_select() - * to call sched_clutch_amp_steal_thread() to try to get it. - */ + (clutch_pri < priority)) { return THREAD_NULL; } @@ -2520,7 +3476,7 @@ sched_clutch_amp_choose_thread( thread_t thread = THREAD_NULL; if (choose_from_boundq == false) { sched_clutch_root_t pset_clutch_root = sched_clutch_processor_root_clutch(processor); - thread = sched_clutch_thread_highest(pset_clutch_root); + thread = sched_clutch_thread_highest_remove(pset_clutch_root); } else { thread = run_queue_dequeue(bound_runq, SCHED_HEADQ); } @@ -2528,32 +3484,67 @@ sched_clutch_amp_choose_thread( } static boolean_t -sched_clutch_amp_processor_queue_empty(processor_t processor) +sched_edge_processor_queue_empty(processor_t processor) { - processor_set_t pset = processor->processor_set; - bool spill_pending = bit_test(pset->pending_spill_cpu_mask, processor->cpu_id); - return (sched_clutch_root_count(sched_clutch_processor_root_clutch(processor)) == 0) && - (sched_clutch_bound_runq(processor)->count == 0) && - !spill_pending; + (sched_clutch_bound_runq(processor)->count == 0); +} + +static void +sched_edge_check_spill(__unused processor_set_t pset, __unused thread_t thread) +{ + assert(thread->bound_processor == PROCESSOR_NULL); } +__options_decl(sched_edge_thread_yield_reason_t, uint32_t, { + SCHED_EDGE_YIELD_RUNQ_NONEMPTY = 0x0, + SCHED_EDGE_YIELD_FOREIGN_RUNNABLE = 0x1, + SCHED_EDGE_YIELD_FOREIGN_RUNNING = 0x2, + SCHED_EDGE_YIELD_STEAL_POSSIBLE = 0x3, + SCHED_EDGE_YIELD_DISALLOW = 0x4, +}); + static bool -sched_clutch_amp_thread_should_yield(processor_t processor, thread_t thread) +sched_edge_thread_should_yield(processor_t processor, __unused thread_t thread) { - if (!sched_clutch_amp_processor_queue_empty(processor) || (rt_runq_count(processor->processor_set) > 0)) { + if (!sched_edge_processor_queue_empty(processor) || (rt_runq_count(processor->processor_set) > 0)) { + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_SHOULD_YIELD) | DBG_FUNC_NONE, + thread_tid(thread), processor->processor_set->pset_cluster_id, 0, SCHED_EDGE_YIELD_RUNQ_NONEMPTY); return true; } - if ((processor->processor_set->pset_cluster_type == PSET_AMP_E) && (recommended_pset_type(thread) == PSET_AMP_P)) { - return sched_clutch_root_count(&pcore_set->pset_clutch_root) > 0; + /* + * The yield logic should follow the same logic that steal_thread () does. The + * thread_should_yield() is effectively trying to quickly check that if the + * current thread gave up CPU, is there any other thread that would execute + * on this CPU. So it needs to provide the same answer as the steal_thread()/ + * processor Idle logic. + */ + if (sched_edge_foreign_runnable_thread_available(processor->processor_set)) { + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_SHOULD_YIELD) | DBG_FUNC_NONE, + thread_tid(thread), processor->processor_set->pset_cluster_id, 0, SCHED_EDGE_YIELD_FOREIGN_RUNNABLE); + return true; + } + if (sched_edge_foreign_running_thread_available(processor->processor_set)) { + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_SHOULD_YIELD) | DBG_FUNC_NONE, + thread_tid(thread), processor->processor_set->pset_cluster_id, 0, SCHED_EDGE_YIELD_FOREIGN_RUNNING); + return true; + } + + processor_set_t steal_candidate = sched_edge_steal_candidate(processor->processor_set); + if (steal_candidate != NULL) { + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_SHOULD_YIELD) | DBG_FUNC_NONE, + thread_tid(thread), processor->processor_set->pset_cluster_id, 0, SCHED_EDGE_YIELD_STEAL_POSSIBLE); + return true; } + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_SHOULD_YIELD) | DBG_FUNC_NONE, thread_tid(thread), processor->processor_set->pset_cluster_id, + 0, SCHED_EDGE_YIELD_DISALLOW); return false; } static ast_t -sched_clutch_amp_processor_csw_check(processor_t processor) +sched_edge_processor_csw_check(processor_t processor) { boolean_t has_higher; int pri; @@ -2563,21 +3554,7 @@ sched_clutch_amp_processor_csw_check(processor_t processor) assert(processor->active_thread != NULL); - processor_set_t pset = processor->processor_set; - bool spill_pending = false; - int spill_pri = -1; - int spill_urgency = 0; - - if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) { - spill_pending = true; - spill_pri = sched_clutch_root_priority(&pcore_set->pset_clutch_root); - spill_urgency = sched_clutch_root_urgency(&pcore_set->pset_clutch_root); - } - pri = MAX(clutch_pri, bound_runq->highq); - if (spill_pending) { - pri = MAX(pri, spill_pri); - } if (processor->first_timeslice) { has_higher = (pri > processor->current_pri); @@ -2594,10 +3571,6 @@ sched_clutch_amp_processor_csw_check(processor_t processor) return AST_PREEMPT | AST_URGENT; } - if (spill_urgency > 0) { - return AST_PREEMPT | AST_URGENT; - } - return AST_PREEMPT; } @@ -2605,25 +3578,13 @@ sched_clutch_amp_processor_csw_check(processor_t processor) } static boolean_t -sched_clutch_amp_processor_queue_has_priority(processor_t processor, +sched_edge_processor_queue_has_priority(processor_t processor, int priority, boolean_t gte) { - bool spill_pending = false; - int spill_pri = -1; - processor_set_t pset = processor->processor_set; - - if (pset == ecore_set && bit_test(pset->pending_spill_cpu_mask, processor->cpu_id)) { - spill_pending = true; - spill_pri = sched_clutch_root_priority(&pcore_set->pset_clutch_root); - } run_queue_t bound_runq = sched_clutch_bound_runq(processor); int qpri = MAX(sched_clutch_root_priority(sched_clutch_processor_root_clutch(processor)), bound_runq->highq); - if (spill_pending) { - qpri = MAX(qpri, spill_pri); - } - if (gte) { return qpri >= priority; } else { @@ -2631,56 +3592,8 @@ sched_clutch_amp_processor_queue_has_priority(processor_t processor, } } -/* - * sched_clutch_hierarchy_thread_pset() - * - * Routine to determine where a thread should be enqueued based on its - * recommendation if this is the first runnable thread in the clutch_bucket - * or its clutch bucket's hierarchy membership. - */ -static processor_set_t -sched_clutch_hierarchy_thread_pset(thread_t thread) -{ - if (SCHED_CLUTCH_THREAD_ELIGIBLE(thread) == false) { - return (recommended_pset_type(thread) == PSET_AMP_P) ? pcore_set : ecore_set; - } - - sched_clutch_t clutch = sched_clutch_for_thread(thread); - sched_clutch_bucket_t clutch_bucket = &(clutch->sc_clutch_buckets[thread->th_sched_bucket]); - sched_clutch_root_t scb_root = os_atomic_load(&clutch_bucket->scb_root, relaxed); - if (scb_root) { - /* Clutch bucket is already runnable, return the pset hierarchy its part of */ - return scb_root->scr_pset; - } - return (recommended_pset_type(thread) == PSET_AMP_E) ? ecore_set : pcore_set; -} - -/* - * sched_clutch_thread_pset_recommended() - * - * Routine to determine if the thread should be placed on the provided pset. - * The routine first makes sure the cluster is available for scheduling. If - * it is available, it looks at the thread's recommendation. Called - * with the pset lock held. - */ -static bool -sched_clutch_thread_pset_recommended(thread_t thread, processor_set_t pset) -{ - if (!sched_clutch_pset_available(pset)) { - return false; - } - - /* At this point, all clusters should be available and recommended */ - if (sched_clutch_hierarchy_thread_pset(thread) != pset) { - return false; - } - - return true; -} - - static void -sched_clutch_amp_processor_queue_shutdown(processor_t processor) +sched_edge_processor_queue_shutdown(processor_t processor) { processor_set_t pset = processor->processor_set; sched_clutch_root_t pset_clutch_root = sched_clutch_processor_root_clutch(processor); @@ -2693,9 +3606,11 @@ sched_clutch_amp_processor_queue_shutdown(processor_t processor) return; } + bitmap_clear(sched_edge_available_pset_bitmask, pset->pset_cluster_id); + queue_init(&tqueue); while (sched_clutch_root_count(pset_clutch_root) > 0) { - thread = sched_clutch_thread_highest(pset_clutch_root); + thread = sched_clutch_thread_highest_remove(pset_clutch_root); enqueue_tail(&tqueue, &thread->runq_links); } pset_unlock(pset); @@ -2708,333 +3623,987 @@ sched_clutch_amp_processor_queue_shutdown(processor_t processor) } } -static thread_t -sched_clutch_amp_steal_thread(processor_set_t pset) +/* + * sched_edge_cluster_load_metric() + * + * The load metric for a cluster is a measure of the average scheduling latency + * experienced by threads on that cluster. It is a product of the average number + * of threads in the runqueue and the average execution time for threads. The metric + * has special values in the following cases: + * - UINT32_MAX: If the cluster is not available for scheduling, its load is set to + * the maximum value to disallow any threads to migrate to this cluster. + * - 0: If there are idle CPUs in the cluster or an empty runqueue; this allows threads + * to be spread across the platform quickly for ncpu wide workloads. + */ +static uint32_t +sched_edge_cluster_load_metric(processor_set_t pset, sched_bucket_t sched_bucket) { - thread_t thread = THREAD_NULL; - processor_set_t nset = pset; - - if (pcore_set->online_processor_count == 0) { - /* Nothing to steal from */ - goto out; - } - - if (pset->pset_cluster_type == PSET_AMP_P) { - /* P cores don't steal from E cores */ - goto out; + if (sched_edge_pset_available(pset) == false) { + return UINT32_MAX; } + return (uint32_t)sched_get_pset_load_average(pset, sched_bucket); +} - processor_t processor = current_processor(); - assert(pset == processor->processor_set); - - bool spill_pending = bit_test(pset->pending_spill_cpu_mask, processor->cpu_id); - bit_clear(pset->pending_spill_cpu_mask, processor->cpu_id); +/* + * + * Edge Scheduler Steal/Rebalance logic + * + * = Generic scheduler logic = + * + * The SCHED(steal_thread) scheduler callout is invoked when the processor does not + * find any thread for execution in its runqueue. The aim of the steal operation + * is to find other threads running/runnable in other clusters which should be + * executed here. + * + * If the steal callout does not return a thread, the thread_select() logic calls + * SCHED(processor_balance) callout which is supposed to IPI other CPUs to rebalance + * threads and idle out the current CPU. + * + * = SCHED(steal_thread) for Edge Scheduler = + * + * The edge scheduler hooks into sched_edge_processor_idle() for steal_thread. This + * routine tries to do the following operations in order: + * (1) Find foreign runnnable threads in non-native cluster + * runqueues (sched_edge_foreign_runnable_thread_remove()) + * (2) Check if foreign threads are running on the non-native + * clusters (sched_edge_foreign_running_thread_available()) + * - If yes, return THREAD_NULL for the steal callout and + * perform rebalancing as part of SCHED(processor_balance) i.e. sched_edge_balance() + * (3) Steal a thread from another cluster based on edge + * weights (sched_edge_steal_thread()) + * + * = SCHED(processor_balance) for Edge Scheduler = + * + * If steal_thread did not return a thread for the processor, use + * sched_edge_balance() to rebalance foreign running threads and idle out this CPU. + * + * = Clutch Bucket Preferred Cluster Overrides = + * + * Since these operations (just like thread migrations on enqueue) + * move threads across clusters, they need support for handling clutch + * bucket group level preferred cluster recommendations. + * For (1), a clutch bucket will be in the foreign runnable queue based + * on the clutch bucket group preferred cluster. + * For (2), the running thread will set the bit on the processor based + * on its preferred cluster type. + * For (3), the edge configuration would prevent threads from being stolen + * in the wrong direction. + * + * = SCHED(thread_should_yield) = + * The thread_should_yield() logic needs to have the same logic as sched_edge_processor_idle() + * since that is expecting the same answer as if thread_select() was called on a core + * with an empty runqueue. + */ - nset = pcore_set; +static bool +sched_edge_steal_thread_enabled(__unused processor_set_t pset) +{ + /* + * For edge scheduler, the gating for steal is being done by sched_edge_steal_candidate() + */ + return true; +} - assert(nset != pset); +static processor_set_t +sched_edge_steal_candidate(processor_set_t pset) +{ + /* + * Edge Scheduler Optimization + * + * Investigate a better policy for stealing. The current implementation looks + * at all the incoming weights for the pset that just became idle and sees which + * clusters have loads > edge weights. It is effectively trying to simulate a + * overload migration as if a thread had become runnable on the candidate cluster. + * + * The logic today bails as soon as it finds a cluster where the cluster load is + * greater than the edge weight. This helps the check to be quick which is useful + * for sched_edge_thread_should_yield() which uses this. Maybe it should have a + * more advanced version for the actual steal operation which looks for the + * maximum delta etc. + */ + processor_set_t target_pset = NULL; + uint32_t dst_cluster_id = pset->pset_cluster_id; - if (sched_get_pset_load_average(nset) >= sched_amp_steal_threshold(nset, spill_pending)) { - pset_unlock(pset); + for (int cluster_id = 0; cluster_id < MAX_PSETS; cluster_id++) { + processor_set_t candidate_pset = pset_array[cluster_id]; - pset = nset; + if (candidate_pset == pset) { + continue; + } - pset_lock(pset); + sched_clutch_edge *incoming_edge = &pset_array[cluster_id]->sched_edges[dst_cluster_id]; + if (incoming_edge->sce_steal_allowed == false) { + continue; + } - /* Allow steal if load average still OK, no idle cores, and more threads on runq than active cores DISPATCHING */ - if ((sched_get_pset_load_average(pset) >= sched_amp_steal_threshold(pset, spill_pending)) && - ((int)sched_clutch_root_count(&pset->pset_clutch_root) > bit_count(pset->cpu_state_map[PROCESSOR_DISPATCHING])) && - (bit_count(pset->recommended_bitmask & pset->cpu_state_map[PROCESSOR_IDLE]) == 0)) { - thread = sched_clutch_thread_highest(&pset->pset_clutch_root); - KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_AMP_STEAL) | DBG_FUNC_NONE, spill_pending, 0, 0, 0); - sched_update_pset_load_average(pset); + uint32_t incoming_weight = incoming_edge->sce_migration_weight; + int highest_runnable_bucket = bitmap_lsb_first(candidate_pset->pset_clutch_root.scr_unbound_runnable_bitmap, TH_BUCKET_SCHED_MAX); + if (highest_runnable_bucket == -1) { + /* Candidate cluster runq is empty */ + continue; + } + /* Use the load metrics for highest runnable bucket since that would be stolen next */ + uint32_t candidate_load = sched_edge_cluster_load_metric(candidate_pset, (sched_bucket_t)highest_runnable_bucket); + if (candidate_load > incoming_weight) { + /* Only steal from the candidate if its load is higher than the incoming edge and it has runnable threads */ + target_pset = candidate_pset; + break; } } -out: - pset_unlock(pset); - return thread; + return target_pset; } -/* Return true if this thread should not continue running on this processor */ -static bool -sched_clutch_amp_thread_avoid_processor(processor_t processor, thread_t thread) +static boolean_t +sched_edge_foreign_runnable_thread_available(processor_set_t pset) { - if (processor->processor_set->pset_cluster_type == PSET_AMP_E) { - if (sched_clutch_thread_pset_recommended(thread, pcore_set)) { - return true; + /* Find all the clusters that are foreign for this cluster */ + bitmap_t *foreign_pset_bitmap = pset->foreign_psets; + for (int cluster = bitmap_first(foreign_pset_bitmap, MAX_PSETS); cluster >= 0; cluster = bitmap_next(foreign_pset_bitmap, cluster)) { + /* + * For each cluster, see if there are any runnable foreign threads. + * This check is currently being done without the pset lock to make it cheap for + * the common case. + */ + processor_set_t target_pset = pset_array[cluster]; + if (sched_edge_pset_available(target_pset) == false) { + continue; } - } else if (processor->processor_set->pset_cluster_type == PSET_AMP_P) { - if (!sched_clutch_thread_pset_recommended(thread, pcore_set)) { + + if (!sched_clutch_root_foreign_empty(&target_pset->pset_clutch_root)) { return true; } } - return false; } -static processor_t -sched_clutch_amp_choose_processor(processor_set_t pset, processor_t processor, thread_t thread) +static thread_t +sched_edge_foreign_runnable_thread_remove(processor_set_t pset, uint64_t ctime) { - /* Bound threads don't call this function */ - assert(thread->bound_processor == PROCESSOR_NULL); + thread_t thread = THREAD_NULL; - processor_set_t nset; - processor_t chosen_processor = PROCESSOR_NULL; + /* Find all the clusters that are foreign for this cluster */ + bitmap_t *foreign_pset_bitmap = pset->foreign_psets; + for (int cluster = bitmap_first(foreign_pset_bitmap, MAX_PSETS); cluster >= 0; cluster = bitmap_next(foreign_pset_bitmap, cluster)) { + /* + * For each cluster, see if there are any runnable foreign threads. + * This check is currently being done without the pset lock to make it cheap for + * the common case. + */ + processor_set_t target_pset = pset_array[cluster]; + if (sched_edge_pset_available(target_pset) == false) { + continue; + } -select_pset: - nset = (pset == ecore_set) ? pcore_set : ecore_set; - if (!sched_clutch_pset_available(pset)) { - /* If the current pset is not available for scheduling, just use the other pset */ - pset_unlock(pset); - pset_lock(nset); - goto select_processor; + if (sched_clutch_root_foreign_empty(&target_pset->pset_clutch_root)) { + continue; + } + /* + * Looks like there are runnable foreign threads in the hierarchy; lock the pset + * and get the highest priority thread. + */ + pset_lock(target_pset); + if (sched_edge_pset_available(target_pset)) { + thread = sched_clutch_root_highest_foreign_thread_remove(&target_pset->pset_clutch_root); + sched_update_pset_load_average(target_pset, ctime); + } + pset_unlock(target_pset); + + /* + * Edge Scheduler Optimization + * + * The current implementation immediately returns as soon as it finds a foreign + * runnable thread. This could be enhanced to look at highest priority threads + * from all foreign clusters and pick the highest amongst them. That would need + * some form of global state across psets to make that kind of a check cheap. + */ + if (thread != THREAD_NULL) { + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_REBAL_RUNNABLE) | DBG_FUNC_NONE, thread_tid(thread), pset->pset_cluster_id, target_pset->pset_cluster_id, 0); + break; + } + /* Looks like the thread escaped after the check but before the pset lock was taken; continue the search */ } - /* Check if the thread is recommended to run on this pset */ - if (sched_clutch_thread_pset_recommended(thread, pset)) { - nset = pset; - goto select_processor; - } else { - /* pset not recommended; try the other pset */ - pset_unlock(pset); - pset_lock(nset); - pset = nset; - goto select_pset; + return thread; +} + +static boolean_t +sched_edge_foreign_running_thread_available(processor_set_t pset) +{ + bitmap_t *foreign_pset_bitmap = pset->foreign_psets; + for (int cluster = bitmap_first(foreign_pset_bitmap, MAX_PSETS); cluster >= 0; cluster = bitmap_next(foreign_pset_bitmap, cluster)) { + /* Skip the pset if its not schedulable */ + processor_set_t target_pset = pset_array[cluster]; + if (sched_edge_pset_available(target_pset) == false) { + continue; + } + + uint64_t running_foreign_bitmap = target_pset->cpu_state_map[PROCESSOR_RUNNING] & target_pset->cpu_running_foreign; + if (lsb_first(running_foreign_bitmap) != -1) { + /* Found a non-native CPU running a foreign thread; rebalance is needed */ + return true; + } } + return false; +} -select_processor: - if (!sched_clutch_pset_available(nset)) { +static thread_t +sched_edge_steal_thread(processor_set_t pset) +{ + thread_t thread = THREAD_NULL; + processor_set_t steal_from_pset = sched_edge_steal_candidate(pset); + if (steal_from_pset) { /* - * It looks like both psets are not available due to some - * reason. In that case, just use the master processor's - * pset for scheduling. + * sched_edge_steal_candidate() has found a pset which is ideal to steal from. + * Lock the pset and select the highest thread in that runqueue. */ - if (master_processor->processor_set != nset) { - pset_unlock(nset); - nset = master_processor->processor_set; - pset_lock(nset); + pset_lock(steal_from_pset); + if (bitmap_first(steal_from_pset->pset_clutch_root.scr_unbound_runnable_bitmap, TH_BUCKET_SCHED_MAX) != -1) { + uint64_t current_timestamp = mach_absolute_time(); + sched_clutch_root_bucket_t root_bucket = sched_clutch_root_highest_root_bucket(&steal_from_pset->pset_clutch_root, current_timestamp, SCHED_CLUTCH_HIGHEST_ROOT_BUCKET_UNBOUND_ONLY); + thread = sched_clutch_thread_unbound_lookup(&steal_from_pset->pset_clutch_root, root_bucket); + sched_clutch_thread_remove(&steal_from_pset->pset_clutch_root, thread, current_timestamp, SCHED_CLUTCH_BUCKET_OPTIONS_SAMEPRI_RR); + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_STEAL) | DBG_FUNC_NONE, thread_tid(thread), pset->pset_cluster_id, steal_from_pset->pset_cluster_id, 0); + sched_update_pset_load_average(steal_from_pset, current_timestamp); } + /* + * Edge Scheduler Optimization + * Maybe this needs to circle around if the steal candidate did not have any threads by + * by the time the pset lock was taken. + */ + pset_unlock(steal_from_pset); } - chosen_processor = choose_processor(nset, processor, thread); - assert(chosen_processor->processor_set == nset); - return chosen_processor; + return thread; +} + +/* + * sched_edge_processor_idle() + * + * The routine is the implementation for steal_thread() for the Edge scheduler. + */ +static thread_t +sched_edge_processor_idle(processor_set_t pset) +{ + thread_t thread = THREAD_NULL; + + uint64_t ctime = mach_absolute_time(); + + /* Each of the operations acquire the lock for the pset they target */ + pset_unlock(pset); + + /* Find highest priority runnable thread on all non-native clusters */ + thread = sched_edge_foreign_runnable_thread_remove(pset, ctime); + if (thread != THREAD_NULL) { + return thread; + } + + /* Find foreign running threads to rebalance; the actual rebalance is done in sched_edge_balance() */ + boolean_t rebalance_needed = sched_edge_foreign_running_thread_available(pset); + if (rebalance_needed) { + return THREAD_NULL; + } + + /* No foreign threads found; find a thread to steal from a pset based on weights/loads etc. */ + thread = sched_edge_steal_thread(pset); + return thread; +} + +/* Return true if this thread should not continue running on this processor */ +static bool +sched_edge_thread_avoid_processor(processor_t processor, thread_t thread) +{ + processor_set_t preferred_pset = pset_array[sched_edge_thread_preferred_cluster(thread)]; + /* + * For long running parallel workloads, it is important to rebalance threads across + * E/P clusters so that they make equal forward progress. This is achieved through + * threads expiring their quantum on the non-preferred cluster type and explicitly + * rebalancing to the preferred cluster runqueue. + * + * + * For multi-cluster platforms, it mignt be useful to move the thread incase its + * preferred pset is idle now. + */ + if (processor->processor_set->pset_type != preferred_pset->pset_type) { + return true; + } + /* If thread already running on preferred cluster, do not avoid */ + if (processor->processor_set == preferred_pset) { + return false; + } + /* + * The thread is running on a processor that is of the same type as the + * preferred pset, but is not the actual preferred pset. In that case + * look at edge weights to see if this thread should continue execution + * here or go back to its preferred cluster. + * + * + * This logic needs to ensure that the current thread is not counted against + * the load average for the current pset otherwise it would always end up avoiding + * the current cluster. + */ + processor_set_t chosen_pset = sched_edge_migrate_candidate(preferred_pset, thread, processor->processor_set, false); + return chosen_pset != processor->processor_set; +} + +static void +sched_edge_balance(__unused processor_t cprocessor, processor_set_t cpset) +{ + assert(cprocessor == current_processor()); + pset_unlock(cpset); + + uint64_t ast_processor_map = 0; + sched_ipi_type_t ipi_type[MAX_CPUS] = {SCHED_IPI_NONE}; + + bitmap_t *foreign_pset_bitmap = cpset->foreign_psets; + for (int cluster = bitmap_first(foreign_pset_bitmap, MAX_PSETS); cluster >= 0; cluster = bitmap_next(foreign_pset_bitmap, cluster)) { + /* Skip the pset if its not schedulable */ + processor_set_t target_pset = pset_array[cluster]; + if (sched_edge_pset_available(target_pset) == false) { + continue; + } + + pset_lock(target_pset); + uint64_t cpu_running_foreign_map = (target_pset->cpu_running_foreign & target_pset->cpu_state_map[PROCESSOR_RUNNING]); + for (int cpuid = lsb_first(cpu_running_foreign_map); cpuid >= 0; cpuid = lsb_next(cpu_running_foreign_map, cpuid)) { + processor_t target_cpu = processor_array[cpuid]; + ipi_type[target_cpu->cpu_id] = sched_ipi_action(target_cpu, NULL, false, SCHED_IPI_EVENT_REBALANCE); + if (ipi_type[cpuid] != SCHED_IPI_NONE) { + bit_set(ast_processor_map, cpuid); + } + } + pset_unlock(target_pset); + } + + for (int cpuid = lsb_first(ast_processor_map); cpuid >= 0; cpuid = lsb_next(ast_processor_map, cpuid)) { + processor_t ast_processor = processor_array[cpuid]; + sched_ipi_perform(ast_processor, ipi_type[cpuid]); + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_REBAL_RUNNING) | DBG_FUNC_NONE, 0, cprocessor->cpu_id, cpuid, 0); + } +} + +/* + * sched_edge_migrate_edges_evaluate() + * + * Routine to find the candidate for thread migration based on edge weights. + * + * Returns the most ideal cluster for execution of this thread based on outgoing edges of the preferred pset. Can + * return preferred_pset if its the most ideal destination for this thread. + */ +static processor_set_t +sched_edge_migrate_edges_evaluate(processor_set_t preferred_pset, uint32_t preferred_cluster_load, thread_t thread) +{ + processor_set_t selected_pset = preferred_pset; + uint32_t preferred_cluster_id = preferred_pset->pset_cluster_id; + cluster_type_t preferred_cluster_type = pset_type_for_id(preferred_cluster_id); + + /* Look at edge deltas with other clusters to find the ideal migration candidate */ + sched_clutch_edge *edge = preferred_pset->sched_edges; + uint32_t max_edge_delta = 0; + + /* + * Edge Scheduler Optimization + * + * For really large cluster count systems, it might make sense to optimize the + * clusters iterated by using bitmaps and skipping over clusters that are not + * available for scheduling or have migration disabled from this cluster. + */ + for (uint32_t cluster_id = 0; cluster_id < MAX_PSETS; cluster_id++) { + processor_set_t dst_pset = pset_array[cluster_id]; + if (cluster_id == preferred_cluster_id) { + continue; + } + + if (edge[cluster_id].sce_migration_allowed == false) { + continue; + } + + uint32_t dst_load = sched_edge_cluster_load_metric(dst_pset, thread->th_sched_bucket); + if (dst_load > preferred_cluster_load) { + continue; + } + + /* + * Fast path for idle dst cluster + * + * For extremely parallel workloads, it is important to load up + * all clusters as quickly as possible. This short-circuit allows + * that. + * + * + * For multi-cluster platforms, the loop should start with the homogeneous + * clusters first. + */ + if (dst_load == 0) { + selected_pset = dst_pset; + break; + } + + uint32_t edge_delta = preferred_cluster_load - dst_load; + if (edge_delta < edge[cluster_id].sce_migration_weight) { + continue; + } + + if (edge_delta < max_edge_delta) { + continue; + } + + if (edge_delta == max_edge_delta) { + /* If the edge delta is the same as the max delta, make sure a homogeneous cluster is picked */ + boolean_t selected_homogeneous = (pset_type_for_id(selected_pset->pset_cluster_id) == preferred_cluster_type); + boolean_t candidate_homogeneous = (pset_type_for_id(dst_pset->pset_cluster_id) == preferred_cluster_type); + if (selected_homogeneous || !candidate_homogeneous) { + continue; + } + } + /* dst_pset seems to be the best candidate for migration */ + max_edge_delta = edge_delta; + selected_pset = dst_pset; + } + return selected_pset; } /* - * AMP Clutch Scheduler Thread Migration + * sched_edge_candidate_alternative() * - * For the AMP version of the clutch scheduler the thread is always scheduled via its - * thread group. So it is important to make sure that the thread group is part of the - * correct processor set hierarchy. In order to do that, the clutch scheduler moves - * all eligble clutch buckets to the correct hierarchy when the recommendation of a - * thread group is changed by CLPC. + * Routine to find an alternative cluster from candidate_cluster_bitmap since the + * selected_pset is not available for execution. The logic tries to prefer homogeneous + * clusters over heterogeneous clusters since this is typically used in thread + * placement decisions. */ +_Static_assert(MAX_PSETS <= 64, "Unable to fit maximum number of psets in uint64_t bitmask"); +static processor_set_t +sched_edge_candidate_alternative(processor_set_t selected_pset, uint64_t candidate_cluster_bitmap) +{ + /* + * It looks like the most ideal pset is not available for scheduling currently. + * Try to find a homogeneous cluster that is still available. + */ + bitmap_t *foreign_clusters = selected_pset->foreign_psets; + uint64_t available_native_clusters = ~(foreign_clusters[0]) & candidate_cluster_bitmap; + int available_cluster_id = lsb_first(available_native_clusters); + if (available_cluster_id == -1) { + /* Looks like none of the homogeneous clusters are available; pick the first available cluster */ + available_cluster_id = bit_first(candidate_cluster_bitmap); + } + assert(available_cluster_id != -1); + return pset_array[available_cluster_id]; +} /* - * sched_clutch_recommended_pset() + * sched_edge_switch_pset_lock() * - * Routine to decide which hierarchy the thread group should be in based on the - * recommendation and other thread group and system properties. This routine is - * used to determine if thread group migration is necessary and should mimic the - * logic in sched_clutch_thread_pset_recommended() & recommended_pset_type(). + * Helper routine for sched_edge_migrate_candidate() which switches pset locks (if needed) based on + * switch_pset_locks. + * Returns the newly locked pset after the switch. */ static processor_set_t -sched_clutch_recommended_pset(sched_clutch_t sched_clutch, cluster_type_t recommendation) +sched_edge_switch_pset_lock(processor_set_t selected_pset, processor_set_t locked_pset, bool switch_pset_locks) { - if (!sched_clutch_pset_available(pcore_set)) { - return ecore_set; + if (!switch_pset_locks) { + return locked_pset; } + if (selected_pset != locked_pset) { + pset_unlock(locked_pset); + pset_lock(selected_pset); + return selected_pset; + } else { + return locked_pset; + } +} - if (!sched_clutch_pset_available(ecore_set)) { - return pcore_set; +/* + * sched_edge_migrate_candidate() + * + * Routine to find an appropriate cluster for scheduling a thread. The routine looks at the properties of + * the thread and the preferred cluster to determine the best available pset for scheduling. + * + * The switch_pset_locks parameter defines whether the routine should switch pset locks to provide an + * accurate scheduling decision. This mode is typically used when choosing a pset for scheduling a thread since the + * decision has to be synchronized with another CPU changing the recommendation of clusters available + * on the system. If this parameter is set to false, this routine returns the best effort indication of + * the cluster the thread should be scheduled on. It is typically used in fast path contexts (such as + * SCHED(thread_avoid_processor) to determine if there is a possibility of scheduling this thread on a + * more appropriate cluster. + * + * Routine returns the most ideal cluster for scheduling. If switch_pset_locks is set, it ensures that the + * resultant pset lock is held. + */ +static processor_set_t +sched_edge_migrate_candidate(processor_set_t preferred_pset, thread_t thread, processor_set_t locked_pset, bool switch_pset_locks) +{ + __kdebug_only uint32_t preferred_cluster_id = preferred_pset->pset_cluster_id; + processor_set_t selected_pset = preferred_pset; + + if (SCHED_CLUTCH_THREAD_CLUSTER_BOUND(thread)) { + /* For bound threads always recommend the cluster its bound to */ + selected_pset = pset_array[sched_edge_thread_bound_cluster_id(thread)]; + locked_pset = sched_edge_switch_pset_lock(selected_pset, locked_pset, switch_pset_locks); + if (sched_edge_pset_available(selected_pset) || (SCHED_CLUTCH_THREAD_CLUSTER_BOUND_SOFT(thread) == false)) { + /* + * If the bound cluster is not available, check if the thread is soft bound. For soft bound threads, + * fall through to the regular cluster selection logic which handles unavailable clusters + * appropriately. If the thread is hard bound, then return the bound cluster always. + */ + return selected_pset; + } + } + + uint64_t candidate_cluster_bitmap = mask(MAX_PSETS); + if (thread->sched_pri >= BASEPRI_RTQUEUES) { + /* For realtime threads, try and schedule them on the preferred pset always */ + goto migrate_candidate_available_check; + } + + /* + * If a thread is being rebalanced for achieving equal progress of parallel workloads, + * it needs to end up on the preferred runqueue. + */ + uint32_t preferred_cluster_load = sched_edge_cluster_load_metric(preferred_pset, thread->th_sched_bucket); + boolean_t amp_rebalance = (thread->reason & (AST_REBALANCE | AST_QUANTUM)) == (AST_REBALANCE | AST_QUANTUM); + if ((preferred_cluster_load == 0) || amp_rebalance) { + goto migrate_candidate_available_check; + } + + /* Look at edge weights to decide the most ideal migration candidate for this thread */ + selected_pset = sched_edge_migrate_edges_evaluate(preferred_pset, preferred_cluster_load, thread); + +migrate_candidate_available_check: + locked_pset = sched_edge_switch_pset_lock(selected_pset, locked_pset, switch_pset_locks); + if (sched_edge_pset_available(selected_pset) == true) { + if (selected_pset != preferred_pset) { + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_CLUSTER_OVERLOAD) | DBG_FUNC_NONE, thread_tid(thread), preferred_cluster_id, selected_pset->pset_cluster_id, preferred_cluster_load); + } + return selected_pset; + } + /* Looks like selected_pset is not available for scheduling; remove it from candidate_cluster_bitmap */ + bitmap_clear(&candidate_cluster_bitmap, selected_pset->pset_cluster_id); + if (__improbable(bitmap_first(&candidate_cluster_bitmap, MAX_PSETS) == -1)) { + /* + * None of the clusters are available for scheduling; this situation should be rare but if it happens, + * simply return the boot cluster. + */ + selected_pset = &pset0; + locked_pset = sched_edge_switch_pset_lock(selected_pset, locked_pset, switch_pset_locks); + if (selected_pset != preferred_pset) { + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_CLUSTER_OVERLOAD) | DBG_FUNC_NONE, thread_tid(thread), preferred_cluster_id, selected_pset->pset_cluster_id, preferred_cluster_load); + } + return selected_pset; } + /* Try and find an alternative for the selected pset */ + selected_pset = sched_edge_candidate_alternative(selected_pset, candidate_cluster_bitmap); + goto migrate_candidate_available_check; +} + +static processor_t +sched_edge_choose_processor(processor_set_t pset, processor_t processor, thread_t thread) +{ + /* Bound threads don't call this function */ + assert(thread->bound_processor == PROCESSOR_NULL); + processor_t chosen_processor = PROCESSOR_NULL; /* - * If all clusters are available and recommended, use the recommendation - * to decide which cluster to use. + * sched_edge_preferred_pset() returns the preferred pset for a given thread. + * It should take the passed in "pset" as a hint which represents the recency metric for + * pset selection logic. */ - pset_cluster_type_t type = thread_group_pset_recommendation(sched_clutch->sc_tg, recommendation); - return (type == PSET_AMP_E) ? ecore_set : pcore_set; + processor_set_t preferred_pset = pset_array[sched_edge_thread_preferred_cluster(thread)]; + processor_set_t chosen_pset = preferred_pset; + /* + * If the preferred pset is overloaded, find a pset which is the best candidate to migrate + * threads to. sched_edge_migrate_candidate() returns the preferred pset + * if it has capacity; otherwise finds the best candidate pset to migrate this thread to. + * + * + * It might be useful to build a recency metric for the thread for multiple clusters and + * factor that into the migration decisions. + */ + chosen_pset = sched_edge_migrate_candidate(preferred_pset, thread, pset, true); + chosen_processor = choose_processor(chosen_pset, processor, thread); + assert(chosen_processor->processor_set == chosen_pset); + return chosen_processor; } +/* + * sched_edge_clutch_bucket_threads_drain() + * + * Drains all the runnable threads which are not restricted to the root_clutch (due to clutch + * bucket overrides etc.) into a local thread queue. + */ static void -sched_clutch_bucket_threads_drain(sched_clutch_bucket_t clutch_bucket, sched_clutch_root_t root_clutch, queue_t clutch_threads) +sched_edge_clutch_bucket_threads_drain(sched_clutch_bucket_t clutch_bucket, sched_clutch_root_t root_clutch, queue_t clutch_threads) { - uint16_t thread_count = clutch_bucket->scb_thr_count; - thread_t thread; + thread_t thread = THREAD_NULL; uint64_t current_timestamp = mach_approximate_time(); - while (thread_count > 0) { - thread = run_queue_peek(&clutch_bucket->scb_runq); + qe_foreach_element_safe(thread, &clutch_bucket->scb_thread_timeshare_queue, th_clutch_timeshare_link) { sched_clutch_thread_remove(root_clutch, thread, current_timestamp, SCHED_CLUTCH_BUCKET_OPTIONS_NONE); enqueue_tail(clutch_threads, &thread->runq_links); - thread_count--; } +} - /* - * This operation should have drained the clutch bucket and pulled it out of the - * hierarchy. - */ - assert(clutch_bucket->scb_thr_count == 0); - assert(clutch_bucket->scb_root == NULL); +/* + * sched_edge_run_drained_threads() + * + * Makes all drained threads in a local queue runnable. + */ +static void +sched_edge_run_drained_threads(queue_t clutch_threads) +{ + thread_t thread; + /* Now setrun all the threads in the local queue */ + qe_foreach_element_safe(thread, clutch_threads, runq_links) { + remqueue(&thread->runq_links); + thread_lock(thread); + thread_setrun(thread, SCHED_TAILQ); + thread_unlock(thread); + } +} + +/* + * sched_edge_update_preferred_cluster() + * + * Routine to update the preferred cluster for QoS buckets within a thread group. + * The buckets to be updated are specifed as a bitmap (clutch_bucket_modify_bitmap). + */ +static void +sched_edge_update_preferred_cluster( + sched_clutch_t sched_clutch, + bitmap_t *clutch_bucket_modify_bitmap, + uint32_t *tg_bucket_preferred_cluster) +{ + for (int bucket = bitmap_first(clutch_bucket_modify_bitmap, TH_BUCKET_SCHED_MAX); bucket >= 0; bucket = bitmap_next(clutch_bucket_modify_bitmap, bucket)) { + os_atomic_store(&sched_clutch->sc_clutch_groups[bucket].scbg_preferred_cluster, tg_bucket_preferred_cluster[bucket], relaxed); + } } /* - * sched_clutch_migrate_thread_group() + * sched_edge_migrate_thread_group_runnable_threads() * - * Routine to implement the migration of threads when the thread group + * Routine to implement the migration of threads on a cluster when the thread group * recommendation is updated. The migration works using a 2-phase * algorithm. * - * Phase 1: With the source pset (determined by sched_clutch_recommended_pset) - * locked, drain all the runnable threads into a local queue and update the TG - * recommendation. + * Phase 1: With the pset lock held, check the recommendation of the clutch buckets. + * For each clutch bucket, if it needs to be migrated immediately, drain the threads + * into a local thread queue. Otherwise mark the clutch bucket as native/foreign as + * appropriate. + * + * Phase 2: After unlocking the pset, drain all the threads from the local thread + * queue and mark them runnable which should land them in the right hierarchy. + * + * The routine assumes that the preferences for the clutch buckets/clutch bucket + * groups have already been updated by the caller. * - * Phase 2: Call thread_setrun() on all the drained threads. Since the TG recommendation - * has been updated, these should all end up in the right hierarchy. + * - Called with the pset locked and interrupts disabled. + * - Returns with the pset unlocked. */ static void -sched_clutch_migrate_thread_group(sched_clutch_t sched_clutch, cluster_type_t new_recommendation) +sched_edge_migrate_thread_group_runnable_threads( + sched_clutch_t sched_clutch, + sched_clutch_root_t root_clutch, + bitmap_t *clutch_bucket_modify_bitmap, + __unused uint32_t *tg_bucket_preferred_cluster, + bool migrate_immediately) { - thread_t thread; - - /* If the thread group is empty, just update the recommendation */ - if (os_atomic_load(&sched_clutch->sc_thr_count, relaxed) == 0) { - thread_group_update_recommendation(sched_clutch->sc_tg, new_recommendation); - return; - } - - processor_set_t dst_pset = sched_clutch_recommended_pset(sched_clutch, new_recommendation); - processor_set_t src_pset = (dst_pset == pcore_set) ? ecore_set : pcore_set; - + /* Queue to hold threads that have been drained from clutch buckets to be migrated */ queue_head_t clutch_threads; queue_init(&clutch_threads); - /* Interrupts need to be disabled to make sure threads wont become runnable during the - * migration and attempt to grab the pset/thread locks. - */ - spl_t s = splsched(); - - pset_lock(src_pset); - for (sched_bucket_t bucket = TH_BUCKET_FIXPRI; bucket < TH_BUCKET_SCHED_MAX; bucket++) { - sched_clutch_bucket_t clutch_bucket = &(sched_clutch->sc_clutch_buckets[bucket]); + for (int bucket = bitmap_first(clutch_bucket_modify_bitmap, TH_BUCKET_SCHED_MAX); bucket >= 0; bucket = bitmap_next(clutch_bucket_modify_bitmap, bucket)) { + /* Get the clutch bucket for this cluster and sched bucket */ + sched_clutch_bucket_group_t clutch_bucket_group = &(sched_clutch->sc_clutch_groups[bucket]); + sched_clutch_bucket_t clutch_bucket = &(clutch_bucket_group->scbg_clutch_buckets[root_clutch->scr_cluster_id]); sched_clutch_root_t scb_root = os_atomic_load(&clutch_bucket->scb_root, relaxed); - if ((scb_root == NULL) || (scb_root->scr_pset == dst_pset)) { + if (scb_root == NULL) { /* Clutch bucket not runnable or already in the right hierarchy; nothing to do here */ + assert(clutch_bucket->scb_thr_count == 0); continue; } - assert(scb_root->scr_pset == src_pset); - /* Now remove all the threads from the runq so that thread->runq is set correctly */ - sched_clutch_bucket_threads_drain(clutch_bucket, scb_root, &clutch_threads); + assert(scb_root == root_clutch); + uint32_t clutch_bucket_preferred_cluster = sched_clutch_bucket_preferred_cluster(clutch_bucket); + + if (migrate_immediately) { + /* + * For transitions where threads need to be migrated immediately, drain the threads into a + * local queue unless we are looking at the clutch buckets for the newly recommended + * cluster. + */ + if (root_clutch->scr_cluster_id != clutch_bucket_preferred_cluster) { + sched_edge_clutch_bucket_threads_drain(clutch_bucket, scb_root, &clutch_threads); + } else { + sched_clutch_bucket_mark_native(clutch_bucket, root_clutch); + } + } else { + /* Check if this cluster is the same type as the newly recommended cluster */ + boolean_t homogeneous_cluster = (pset_type_for_id(root_clutch->scr_cluster_id) == pset_type_for_id(clutch_bucket_preferred_cluster)); + /* + * If threads do not have to be migrated immediately, just change the native/foreign + * flag on the clutch bucket. + */ + if (homogeneous_cluster) { + sched_clutch_bucket_mark_native(clutch_bucket, root_clutch); + } else { + sched_clutch_bucket_mark_foreign(clutch_bucket, root_clutch); + } + } + } + + pset_unlock(root_clutch->scr_pset); + sched_edge_run_drained_threads(&clutch_threads); +} + +/* + * sched_edge_migrate_thread_group_running_threads() + * + * Routine to find all running threads of a thread group on a specific cluster + * and IPI them if they need to be moved immediately. + */ +static void +sched_edge_migrate_thread_group_running_threads( + sched_clutch_t sched_clutch, + sched_clutch_root_t root_clutch, + __unused bitmap_t *clutch_bucket_modify_bitmap, + uint32_t *tg_bucket_preferred_cluster, + bool migrate_immediately) +{ + if (migrate_immediately == false) { + /* If CLPC has recommended not to move threads immediately, nothing to do here */ + return; } /* - * Now that all the clutch buckets have been drained, update the TG recommendation. - * This operation needs to be done with the pset lock held to make sure that anyone - * coming in before the migration started would get the original pset as the root - * of this sched_clutch and attempt to hold the src_pset lock. Once the TG changes, - * all threads that are becoming runnable would find the clutch bucket empty and - * the TG recommendation would coax them to enqueue it in the new recommended - * hierarchy. This effectively synchronizes with other threads calling - * thread_setrun() and trying to decide which pset the thread/clutch_bucket - * belongs in. + * Edge Scheduler Optimization + * + * When the system has a large number of clusters and cores, it might be useful to + * narrow down the iteration by using a thread running bitmap per clutch. */ - thread_group_update_recommendation(sched_clutch->sc_tg, new_recommendation); - pset_unlock(src_pset); + uint64_t ast_processor_map = 0; + sched_ipi_type_t ipi_type[MAX_CPUS] = {SCHED_IPI_NONE}; - /* Now setrun all the threads in the local queue */ - qe_foreach_element_safe(thread, &clutch_threads, runq_links) { - remqueue(&thread->runq_links); - thread_lock(thread); - thread_setrun(thread, SCHED_TAILQ); - thread_unlock(thread); + uint64_t running_map = root_clutch->scr_pset->cpu_state_map[PROCESSOR_RUNNING]; + /* + * Iterate all CPUs and look for the ones running threads from this thread group and are + * not restricted to the specific cluster (due to overrides etc.) + */ + for (int cpuid = lsb_first(running_map); cpuid >= 0; cpuid = lsb_next(running_map, cpuid)) { + processor_t src_processor = processor_array[cpuid]; + boolean_t expected_tg = (src_processor->current_thread_group == sched_clutch->sc_tg); + sched_bucket_t processor_sched_bucket = src_processor->processor_set->cpu_running_buckets[cpuid]; + boolean_t non_preferred_cluster = tg_bucket_preferred_cluster[processor_sched_bucket] != root_clutch->scr_cluster_id; + + if (expected_tg && non_preferred_cluster) { + ipi_type[cpuid] = sched_ipi_action(src_processor, NULL, false, SCHED_IPI_EVENT_REBALANCE); + if (ipi_type[cpuid] != SCHED_IPI_NONE) { + bit_set(ast_processor_map, cpuid); + } else if (src_processor == current_processor()) { + ast_on(AST_PREEMPT); + bit_set(root_clutch->scr_pset->pending_AST_PREEMPT_cpu_mask, cpuid); + } + } } - splx(s); + /* Perform all the IPIs */ + if (bit_first(ast_processor_map) != -1) { + for (int cpuid = lsb_first(ast_processor_map); cpuid >= 0; cpuid = lsb_next(ast_processor_map, cpuid)) { + processor_t ast_processor = processor_array[cpuid]; + sched_ipi_perform(ast_processor, ipi_type[cpuid]); + } + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_AMP_RECOMMENDATION_CHANGE) | DBG_FUNC_NONE, thread_group_get_id(sched_clutch->sc_tg), ast_processor_map, 0, 0); + } } -static void -sched_clutch_amp_thread_group_recommendation_change(struct thread_group *tg, cluster_type_t new_recommendation) +/* + * sched_edge_tg_preferred_cluster_change() + * + * Routine to handle changes to a thread group's recommendation. In the Edge Scheduler, the preferred cluster + * is specified on a per-QoS basis within a thread group. The routine updates the preferences and performs + * thread migrations based on the policy specified by CLPC. + * tg_bucket_preferred_cluster is an array of size TH_BUCKET_SCHED_MAX which specifies the new preferred cluster + * for each QoS within the thread group. + */ +void +sched_edge_tg_preferred_cluster_change(struct thread_group *tg, uint32_t *tg_bucket_preferred_cluster, sched_perfcontrol_preferred_cluster_options_t options) { + sched_clutch_t clutch = sched_clutch_for_thread_group(tg); /* - * For the clutch scheduler, the change in recommendation moves the thread group - * to the right hierarchy. sched_clutch_migrate_thread_group() is also responsible - * for updating the recommendation of the thread group. + * In order to optimize the processing, create a bitmap which represents all QoS buckets + * for which the preferred cluster has changed. */ - sched_clutch_migrate_thread_group(&tg->tg_sched_clutch, new_recommendation); - - if (new_recommendation != CLUSTER_TYPE_P) { + bitmap_t clutch_bucket_modify_bitmap[BITMAP_LEN(TH_BUCKET_SCHED_MAX)] = {0}; + for (sched_bucket_t bucket = TH_BUCKET_FIXPRI; bucket < TH_BUCKET_SCHED_MAX; bucket++) { + uint32_t old_preferred_cluster = sched_edge_clutch_bucket_group_preferred_cluster(&clutch->sc_clutch_groups[bucket]); + uint32_t new_preferred_cluster = tg_bucket_preferred_cluster[bucket]; + if (old_preferred_cluster != new_preferred_cluster) { + bitmap_set(clutch_bucket_modify_bitmap, bucket); + } + } + if (bitmap_lsb_first(clutch_bucket_modify_bitmap, TH_BUCKET_SCHED_MAX) == -1) { + /* No changes in any clutch buckets; nothing to do here */ return; } - sched_amp_bounce_thread_group_from_ecores(ecore_set, tg); + for (uint32_t cluster_id = 0; cluster_id < MAX_PSETS; cluster_id++) { + processor_set_t pset = pset_array[cluster_id]; + spl_t s = splsched(); + pset_lock(pset); + /* + * The first operation is to update the preferred cluster for all QoS buckets within the + * thread group so that any future threads becoming runnable would see the new preferred + * cluster value. + */ + sched_edge_update_preferred_cluster(clutch, clutch_bucket_modify_bitmap, tg_bucket_preferred_cluster); + /* + * Currently iterates all clusters looking for running threads for a TG to be migrated. Can be optimized + * by keeping a per-clutch bitmap of clusters running threads for a particular TG. + * + * + */ + /* Migrate all running threads of the TG on this cluster based on options specified by CLPC */ + sched_edge_migrate_thread_group_running_threads(clutch, &pset->pset_clutch_root, clutch_bucket_modify_bitmap, + tg_bucket_preferred_cluster, (options & SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNING)); + /* Migrate all runnable threads of the TG in this cluster's hierarchy based on options specified by CLPC */ + sched_edge_migrate_thread_group_runnable_threads(clutch, &pset->pset_clutch_root, clutch_bucket_modify_bitmap, + tg_bucket_preferred_cluster, (options & SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNABLE)); + /* sched_edge_migrate_thread_group_runnable_threads() returns with pset unlocked */ + splx(s); + } } /* - * sched_clutch_migrate_foreign_buckets() + * sched_edge_pset_made_schedulable() * * Routine to migrate all the clutch buckets which are not in their recommended - * pset hierarchy now that a new pset has become runnable. The algorithm is - * similar to sched_clutch_migrate_thread_group(). + * pset hierarchy now that a new pset has become runnable. Its possible that this + * routine is called when the pset is already marked schedulable. * - * Invoked with the newly recommended pset lock held and interrupts disabled. + * Invoked with the pset lock held and interrupts disabled. */ static void -sched_clutch_migrate_foreign_buckets(__unused processor_t processor, processor_set_t dst_pset, boolean_t drop_lock) +sched_edge_pset_made_schedulable(__unused processor_t processor, processor_set_t dst_pset, boolean_t drop_lock) { - thread_t thread; - processor_set_t src_pset = (dst_pset == pcore_set) ? ecore_set : pcore_set; - - if (!sched_clutch_pset_available(dst_pset)) { - /* - * It is possible that some state about the pset changed, - * but its still not available for scheduling. Nothing to - * do here in that case. - */ + if (bitmap_test(sched_edge_available_pset_bitmask, dst_pset->pset_cluster_id)) { + /* Nothing to do here since pset is already marked schedulable */ if (drop_lock) { pset_unlock(dst_pset); } return; } - pset_unlock(dst_pset); - queue_head_t clutch_threads; - queue_init(&clutch_threads); - sched_clutch_root_t src_root = &src_pset->pset_clutch_root; - - pset_lock(src_pset); - queue_t clutch_bucket_list = &src_pset->pset_clutch_root.scr_foreign_buckets; - - if (sched_clutch_root_count(src_root) == 0) { - /* No threads present in this hierarchy */ - pset_unlock(src_pset); - goto migration_complete; - } + bitmap_set(sched_edge_available_pset_bitmask, dst_pset->pset_cluster_id); - sched_clutch_bucket_t clutch_bucket; - qe_foreach_element_safe(clutch_bucket, clutch_bucket_list, scb_foreignlink) { - sched_clutch_root_t scb_root = os_atomic_load(&clutch_bucket->scb_root, relaxed); - assert(scb_root->scr_pset == src_pset); - /* Now remove all the threads from the runq so that thread->runq is set correctly */ - sched_clutch_bucket_threads_drain(clutch_bucket, scb_root, &clutch_threads); - assert(clutch_bucket->scb_foreign == false); - } - pset_unlock(src_pset); - - /* Now setrun all the threads in the local queue */ - qe_foreach_element_safe(thread, &clutch_threads, runq_links) { - remqueue(&thread->runq_links); + thread_t thread = sched_edge_processor_idle(dst_pset); + if (thread != THREAD_NULL) { thread_lock(thread); thread_setrun(thread, SCHED_TAILQ); thread_unlock(thread); } -migration_complete: if (!drop_lock) { pset_lock(dst_pset); } } -#endif /* __AMP__ */ +extern int sched_amp_spill_deferred_ipi; +extern int sched_amp_pcores_preempt_immediate_ipi; + +int sched_edge_migrate_ipi_immediate = 1; + +sched_ipi_type_t +sched_edge_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event) +{ + processor_set_t pset = dst->processor_set; + assert(bit_test(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id) == false); + assert(dst != current_processor()); + + boolean_t deferred_ipi_supported = false; +#if defined(CONFIG_SCHED_DEFERRED_AST) + deferred_ipi_supported = true; +#endif /* CONFIG_SCHED_DEFERRED_AST */ + + switch (event) { + case SCHED_IPI_EVENT_SPILL: + /* For Spill event, use deferred IPIs if sched_amp_spill_deferred_ipi set */ + if (deferred_ipi_supported && sched_amp_spill_deferred_ipi) { + return sched_ipi_deferred_policy(pset, dst, event); + } + break; + case SCHED_IPI_EVENT_PREEMPT: + /* For preemption, the default policy is to use deferred IPIs + * for Non-RT P-core preemption. Override that behavior if + * sched_amp_pcores_preempt_immediate_ipi is set + */ + if (thread && thread->sched_pri < BASEPRI_RTQUEUES) { + if (sched_edge_migrate_ipi_immediate) { + /* + * For workloads that are going wide, it might be useful use Immediate IPI to + * wakeup the idle CPU if the scheduler estimates that the preferred pset will + * be busy for the deferred IPI timeout. The Edge Scheduler uses the avg execution + * latency on the preferred pset as an estimate of busyness. + * + * + */ + processor_set_t preferred_pset = pset_array[sched_edge_thread_preferred_cluster(thread)]; + if ((preferred_pset->pset_execution_time[thread->th_sched_bucket].pset_avg_thread_execution_time * NSEC_PER_USEC) >= ml_cpu_signal_deferred_get_timer()) { + return dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE; + } + } + if (sched_amp_pcores_preempt_immediate_ipi && (pset_type_for_id(pset->pset_cluster_id) == CLUSTER_TYPE_P)) { + return dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE; + } + } + break; + default: + break; + } + /* Default back to the global policy for all other scenarios */ + return sched_ipi_policy(dst, thread, dst_idle, event); +} + +/* + * sched_edge_qos_max_parallelism() + */ +uint32_t +sched_edge_qos_max_parallelism(int qos, uint64_t options) +{ + uint32_t ecount = 0; + uint32_t pcount = 0; + + for (int cluster_id = 0; cluster_id < MAX_PSETS; cluster_id++) { + processor_set_t pset = pset_array[cluster_id]; + if (pset_type_for_id(cluster_id) == CLUSTER_TYPE_P) { + pcount += pset->cpu_set_count; + } else { + ecount += pset->cpu_set_count; + } + } + + if (options & QOS_PARALLELISM_REALTIME) { + /* For realtime threads on AMP, we would want them + * to limit the width to just the P-cores since we + * do not spill/rebalance for RT threads. + */ + return pcount; + } + + /* + * The Edge scheduler supports per-QoS recommendations for thread groups. + * This enables lower QoS buckets (such as UT) to be scheduled on all + * CPUs on the system. + * + * The only restriction is for BG/Maintenance QoS classes for which the + * performance controller would never recommend execution on the P-cores. + * If that policy changes in the future, this value should be changed. + */ + switch (qos) { + case THREAD_QOS_BACKGROUND: + case THREAD_QOS_MAINTENANCE: + return ecount; + default: + return ecount + pcount; + } +} + + + +#endif /* CONFIG_SCHED_EDGE */ #endif /* CONFIG_SCHED_CLUTCH */ diff --git a/osfmk/kern/sched_clutch.h b/osfmk/kern/sched_clutch.h index 5600eb378..f353a5e7a 100644 --- a/osfmk/kern/sched_clutch.h +++ b/osfmk/kern/sched_clutch.h @@ -47,9 +47,12 @@ * daemon thread groups which are marked "Efficient" on AMP * systems. */ -#define SCHED_CLUTCH_TG_PRI_LOW 0x0 -#define SCHED_CLUTCH_TG_PRI_MED 0x1 -#define SCHED_CLUTCH_TG_PRI_HIGH 0x2 +__enum_decl(sched_clutch_tg_priority_t, uint8_t, { + SCHED_CLUTCH_TG_PRI_LOW = 0, + SCHED_CLUTCH_TG_PRI_MED = 1, + SCHED_CLUTCH_TG_PRI_HIGH = 2, + SCHED_CLUTCH_TG_PRI_MAX = 3, +}); /* * For the current implementation, bound threads are not managed @@ -58,13 +61,22 @@ */ #define SCHED_CLUTCH_THREAD_ELIGIBLE(thread) ((thread->bound_processor) == PROCESSOR_NULL) +#if CONFIG_SCHED_EDGE +#define SCHED_CLUTCH_THREAD_CLUSTER_BOUND(thread) ((thread->sched_flags & (TH_SFLAG_ECORE_ONLY | TH_SFLAG_PCORE_ONLY)) != 0) +#define SCHED_CLUTCH_THREAD_CLUSTER_BOUND_SOFT(thread) ((thread->sched_flags & TH_SFLAG_BOUND_SOFT) != 0) + +#else /* CONFIG_SCHED_EDGE */ +#define SCHED_CLUTCH_THREAD_CLUSTER_BOUND(thread) (0) +#define SCHED_CLUTCH_THREAD_CLUSTER_BOUND_SOFT(thread) (0) +#endif /* CONFIG_SCHED_EDGE */ + /* * Clutch Bucket Runqueue Structure. */ struct sched_clutch_bucket_runq { int scbrq_highq; - bitmap_t scbrq_bitmap[BITMAP_LEN(NRQS_MAX)]; int scbrq_count; + bitmap_t scbrq_bitmap[BITMAP_LEN(NRQS_MAX)]; circle_queue_head_t scbrq_queues[NRQS_MAX]; }; typedef struct sched_clutch_bucket_runq *sched_clutch_bucket_runq_t; @@ -94,16 +106,25 @@ typedef struct sched_clutch_bucket_runq *sched_clutch_bucket_runq_t; struct sched_clutch_root_bucket { /* (I) sched bucket represented by this root bucket */ uint8_t scrb_bucket; - /* (P) priority queue for all clutch buckets in this sched bucket */ - struct sched_clutch_bucket_runq scrb_clutch_buckets; + /* (I) Indicates the root bucket represents cluster bound threads */ + bool scrb_bound; + /* (P) Indicates if the root bucket is in starvation avoidance mode */ + bool scrb_starvation_avoidance; + + union { + /* (P) priority queue for all unbound clutch buckets in this sched bucket */ + struct sched_clutch_bucket_runq scrb_clutch_buckets; + /* (P) Runqueue for all bound threads part of this root bucket */ + struct run_queue scrb_bound_thread_runq; + }; /* (P) priority queue entry to use for enqueueing root bucket into root prioq */ - struct priority_queue_entry scrb_pqlink; - /* (P) ageout deadline for this root bucket */ - uint64_t scrb_deadline; + struct priority_queue_entry_deadline scrb_pqlink; /* (P) warped deadline for root bucket */ uint64_t scrb_warped_deadline; /* (P) warp remaining for root bucket */ uint64_t scrb_warp_remaining; + /* (P) timestamp for the start of the starvation avoidance window */ + uint64_t scrb_starvation_ts; }; typedef struct sched_clutch_root_bucket *sched_clutch_root_bucket_t; @@ -126,6 +147,7 @@ struct sched_clutch_root { /* (P) root level urgency; represents the urgency of the whole hierarchy for pre-emption purposes */ int16_t scr_urgency; + uint32_t scr_cluster_id; /* (I) processor set this hierarchy belongs to */ processor_set_t scr_pset; /* @@ -133,23 +155,37 @@ struct sched_clutch_root { * allows easy iteration in the sched tick based timesharing code */ queue_head_t scr_clutch_buckets; + /* - * (P) list of all runnable foreign buckets in this hierarchy; + * (P) priority queue of all runnable foreign buckets in this hierarchy; * used for tracking thread groups which need to be migrated when - * psets are available + * psets are available or rebalancing threads on CPU idle. */ - queue_head_t scr_foreign_buckets; + struct priority_queue_sched_max scr_foreign_buckets; /* Root level bucket management */ - /* (P) bitmap of all runnable clutch_root_buckets; used for root pri calculation */ - bitmap_t scr_runnable_bitmap[BITMAP_LEN(TH_BUCKET_SCHED_MAX)]; - /* (P) bitmap of all runnable root buckets which have warps remaining */ - bitmap_t scr_warp_available[BITMAP_LEN(TH_BUCKET_SCHED_MAX)]; - /* (P) priority queue of all runnable clutch_root_buckets */ - struct priority_queue scr_root_buckets; - /* (P) storage for all possible clutch_root_buckets */ - struct sched_clutch_root_bucket scr_buckets[TH_BUCKET_SCHED_MAX]; + /* (P) bitmap of all runnable unbounded root buckets */ + bitmap_t scr_unbound_runnable_bitmap[BITMAP_LEN(TH_BUCKET_SCHED_MAX)]; + /* (P) bitmap of all runnable unbounded root buckets which have warps remaining */ + bitmap_t scr_unbound_warp_available[BITMAP_LEN(TH_BUCKET_SCHED_MAX)]; + /* (P) bitmap of all runnable bounded root buckets */ + bitmap_t scr_bound_runnable_bitmap[BITMAP_LEN(TH_BUCKET_SCHED_MAX)]; + /* (P) bitmap of all runnable bounded root buckets which have warps remaining */ + bitmap_t scr_bound_warp_available[BITMAP_LEN(TH_BUCKET_SCHED_MAX)]; + + /* (P) priority queue of all runnable unbounded root buckets in deadline order */ + struct priority_queue_deadline_min scr_unbound_root_buckets; + /* (P) priority queue of all bounded root buckets in deadline order */ + struct priority_queue_deadline_min scr_bound_root_buckets; + + /* (P) cumulative run counts at each bucket for load average calculation */ + uint16_t _Atomic scr_cumulative_run_count[TH_BUCKET_SCHED_MAX]; + + /* (P) storage for all unbound clutch_root_buckets */ + struct sched_clutch_root_bucket scr_unbound_buckets[TH_BUCKET_SCHED_MAX]; + /* (P) storage for all bound clutch_root_buckets */ + struct sched_clutch_root_bucket scr_bound_buckets[TH_BUCKET_SCHED_MAX]; }; typedef struct sched_clutch_root *sched_clutch_root_t; @@ -167,19 +203,19 @@ struct sched_clutch; * types used are different based on the platform. */ -#if __arm64__ +#if __LP64__ #define CLUTCH_CPU_DATA_MAX (UINT64_MAX) typedef uint64_t clutch_cpu_data_t; typedef unsigned __int128 clutch_cpu_data_wide_t; -#else /* __arm64__ */ +#else /* __LP64__ */ #define CLUTCH_CPU_DATA_MAX (UINT32_MAX) typedef uint32_t clutch_cpu_data_t; typedef uint64_t clutch_cpu_data_wide_t; -#endif /* __arm64__ */ +#endif /* __LP64__ */ typedef union sched_clutch_bucket_cpu_data { struct { @@ -195,61 +231,112 @@ typedef union sched_clutch_bucket_cpu_data { * struct sched_clutch_bucket * * A sched_clutch_bucket represents the set of threads for a thread - * group at a particular scheduling bucket. It maintains information - * about the CPU usage & blocking behavior of all threads part of - * the clutch_bucket and maintains the timesharing attributes for - * threads in its runq. It uses the decay based algorithm to timeshare - * among threads in the runq. + * group at a particular scheduling bucket in a specific cluster. + * It maintains information about the CPU usage & blocking behavior + * of all threads part of the clutch_bucket. It inherits the timeshare + * values from the clutch_bucket_group for decay and timesharing among + * threads in the clutch. + * + * Since the clutch bucket is a per thread group per-QoS entity it is + * important to keep its size small and the structure well aligned. */ struct sched_clutch_bucket { +#if CONFIG_SCHED_EDGE + /* (P) flag to indicate if the bucket is a foreign bucket */ + bool scb_foreign; +#endif /* CONFIG_SCHED_EDGE */ /* (I) bucket for the clutch_bucket */ uint8_t scb_bucket; /* (P) priority of the clutch bucket */ uint8_t scb_priority; - /* (P) interactivity score of the clutch bucket */ - uint8_t scb_interactivity_score; - /* (P) flag to indicate if the bucket is a foreign bucket */ - bool scb_foreign; - - /* Properties used for timesharing threads in this clutch_bucket */ - /* (P) number of threads in this clutch_bucket; should match runq.count */ uint16_t scb_thr_count; - /* (A) run count (running + runnable) for this clutch_bucket */ - uint16_t _Atomic scb_run_count; - /* (A) sched tick when the clutch bucket load/shifts were updated */ - uint32_t _Atomic scb_timeshare_tick; - /* (A) priority shifts for threads in the clutch_bucket */ - uint32_t _Atomic scb_pri_shift; - /* (P) linkage for all clutch_buckets in a root bucket; used for tick operations */ - queue_chain_t scb_listlink; - -#if __AMP__ - /* (P) linkage for all "foreign" clutch buckets in the root clutch */ - queue_chain_t scb_foreignlink; -#endif /* __AMP__ */ - - /* (P) timestamp for the last time the interactivity score was updated */ - uint64_t scb_interactivity_ts; - /* (P) timestamp for the last time the clutch_bucket blocked */ - uint64_t scb_blocked_ts; - - /* (A) CPU usage information for the clutch bucket */ - sched_clutch_bucket_cpu_data_t scb_cpu_data; - /* (P) linkage for clutch_bucket in root_bucket runqueue */ - queue_chain_t scb_runqlink; - /* (I) clutch to which this clutch bucket belongs */ - struct sched_clutch *scb_clutch; + /* Pointer to the clutch bucket group this clutch bucket belongs to */ + struct sched_clutch_bucket_group *scb_group; /* (A) pointer to the root of the hierarchy this bucket is in */ struct sched_clutch_root *scb_root; /* (P) priority queue of threads based on their promoted/base priority */ - struct priority_queue scb_clutchpri_prioq; + struct priority_queue_sched_max scb_clutchpri_prioq; /* (P) runq of threads in clutch_bucket */ - struct run_queue scb_runq; + struct priority_queue_sched_stable_max scb_thread_runq; + + /* (P) linkage for all clutch_buckets in a root bucket; used for tick operations */ + queue_chain_t scb_listlink; + /* (P) linkage for clutch_bucket in root_bucket runqueue */ + queue_chain_t scb_runqlink; + /* (P) queue of threads for timesharing purposes */ + queue_head_t scb_thread_timeshare_queue; +#if CONFIG_SCHED_EDGE + /* (P) linkage for all "foreign" clutch buckets in the root clutch */ + struct priority_queue_entry_sched scb_foreignlink; +#endif /* CONFIG_SCHED_EDGE */ }; typedef struct sched_clutch_bucket *sched_clutch_bucket_t; +/* + * sched_clutch_counter_time_t + * + * Holds thread counts and a timestamp (typically for a clutch bucket group). + * Used to allow atomic updates to these fields. + */ +typedef union sched_clutch_counter_time { + struct { + uint64_t scct_count; + uint64_t scct_timestamp; + }; +#if __LP64__ + unsigned __int128 scct_packed; +#endif /* __LP64__ */ +} __attribute__((aligned(16))) sched_clutch_counter_time_t; + +/* + * struct sched_clutch_bucket_group + * + * It represents all the threads for a thread group at a particular + * QoS/Scheduling bucket. This structure also maintains the timesharing + * properties that are used for decay calculation for all threads in the + * thread group at the specific scheduling bucket. + */ +struct sched_clutch_bucket_group { + /* (I) bucket for the clutch_bucket_group */ + uint8_t scbg_bucket; + /* (A) sched tick when the clutch bucket group load/shifts were updated */ + uint32_t _Atomic scbg_timeshare_tick; + /* (A) priority shifts for threads in the clutch_bucket_group */ + uint32_t _Atomic scbg_pri_shift; + /* (A) preferred cluster ID for clutch bucket */ + uint32_t _Atomic scbg_preferred_cluster; + /* (I) clutch to which this clutch bucket_group belongs */ + struct sched_clutch *scbg_clutch; +#if !__LP64__ + /* Lock for synchronizing updates to blocked data (only on platforms without 128-atomics) */ + lck_spin_t scbg_stats_lock; +#endif /* !__LP64__ */ + /* (A/L depending on arch) holds blcked timestamp and runnable/running count */ + sched_clutch_counter_time_t scbg_blocked_data; + /* (P/A depending on scheduler) holds pending timestamp and thread count */ + sched_clutch_counter_time_t scbg_pending_data; + /* (P/A depending on scheduler) holds interactivity timestamp and score */ + sched_clutch_counter_time_t scbg_interactivity_data; + /* (A) CPU usage information for the clutch bucket group */ + sched_clutch_bucket_cpu_data_t scbg_cpu_data; + + /* + * Edge Scheduler Optimization + * + * Currently the array is statically sized based on MAX_PSETS. + * If that definition does not exist (or has a large theoretical + * max value), this could be a dynamic array based on ml_topology_info* + * routines. + * + * + */ + /* Storage for all clutch buckets for a thread group at scbg_bucket */ + struct sched_clutch_bucket scbg_clutch_buckets[MAX_PSETS]; +}; +typedef struct sched_clutch_bucket_group *sched_clutch_bucket_group_t; + /* * struct sched_clutch @@ -270,14 +357,14 @@ struct sched_clutch { */ union { /* (A) priority specified by the thread grouping mechanism */ - uint8_t _Atomic sc_tg_priority; + sched_clutch_tg_priority_t _Atomic sc_tg_priority; }; union { /* (I) Pointer to thread group */ struct thread_group *sc_tg; }; /* (I) storage for all clutch_buckets for this clutch */ - struct sched_clutch_bucket sc_clutch_buckets[TH_BUCKET_SCHED_MAX]; + struct sched_clutch_bucket_group sc_clutch_groups[TH_BUCKET_SCHED_MAX]; }; typedef struct sched_clutch *sched_clutch_t; @@ -288,6 +375,7 @@ void sched_clutch_destroy(sched_clutch_t); /* Clutch thread membership management */ void sched_clutch_thread_clutch_update(thread_t, sched_clutch_t, sched_clutch_t); +uint32_t sched_edge_thread_preferred_cluster(thread_t); /* Clutch timesharing stats management */ uint32_t sched_clutch_thread_run_bucket_incr(thread_t, sched_bucket_t); @@ -300,6 +388,32 @@ uint32_t sched_clutch_root_count(sched_clutch_root_t); /* Grouping specific external routines */ extern sched_clutch_t sched_clutch_for_thread(thread_t); +extern sched_clutch_t sched_clutch_for_thread_group(struct thread_group *); + +#if CONFIG_SCHED_EDGE + +/* + * Getter and Setter for Edge configuration. Used by CLPC to affect thread migration behavior. + */ +void sched_edge_matrix_get(sched_clutch_edge *edge_matrix, bool *edge_request_bitmap, uint64_t flags, uint64_t matrix_order); +void sched_edge_matrix_set(sched_clutch_edge *edge_matrix, bool *edge_changes_bitmap, uint64_t flags, uint64_t matrix_order); +void sched_edge_tg_preferred_cluster_change(struct thread_group *tg, uint32_t *tg_bucket_preferred_cluster, sched_perfcontrol_preferred_cluster_options_t options); + +uint16_t sched_edge_cluster_cumulative_count(sched_clutch_root_t root_clutch, sched_bucket_t bucket); + +#if DEVELOPMENT || DEBUG +/* + * Sysctl support for dynamically configuring edge properties. + * + * + */ +kern_return_t sched_edge_sysctl_configure_e_to_p(uint64_t); +kern_return_t sched_edge_sysctl_configure_p_to_e(uint64_t); +sched_clutch_edge sched_edge_e_to_p(void); +sched_clutch_edge sched_edge_p_to_e(void); +#endif /* DEVELOPMENT || DEBUG */ + +#endif /* CONFIG_SCHED_EDGE */ #endif /* CONFIG_SCHED_CLUTCH */ diff --git a/osfmk/kern/sched_dualq.c b/osfmk/kern/sched_dualq.c index 0f04cd427..fb9241b0a 100644 --- a/osfmk/kern/sched_dualq.c +++ b/osfmk/kern/sched_dualq.c @@ -109,6 +109,7 @@ const struct sched_dispatch_table sched_dualq_dispatch = { .steal_thread_enabled = sched_steal_thread_enabled, .steal_thread = sched_dualq_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, + .choose_node = sched_choose_node, .choose_processor = choose_processor, .processor_enqueue = sched_dualq_processor_enqueue, .processor_queue_shutdown = sched_dualq_processor_queue_shutdown, @@ -133,19 +134,19 @@ const struct sched_dispatch_table sched_dualq_dispatch = { .thread_avoid_processor = sched_dualq_thread_avoid_processor, .processor_balance = sched_SMT_balance, - .rt_runq = sched_rtglobal_runq, - .rt_init = sched_rtglobal_init, - .rt_queue_shutdown = sched_rtglobal_queue_shutdown, - .rt_runq_scan = sched_rtglobal_runq_scan, - .rt_runq_count_sum = sched_rtglobal_runq_count_sum, + .rt_runq = sched_rtlocal_runq, + .rt_init = sched_rtlocal_init, + .rt_queue_shutdown = sched_rtlocal_queue_shutdown, + .rt_runq_scan = sched_rtlocal_runq_scan, + .rt_runq_count_sum = sched_rtlocal_runq_count_sum, .qos_max_parallelism = sched_qos_max_parallelism, .check_spill = sched_check_spill, .ipi_policy = sched_ipi_policy, .thread_should_yield = sched_thread_should_yield, - .run_count_incr = sched_run_incr, - .run_count_decr = sched_run_decr, - .update_thread_bucket = sched_update_thread_bucket, + .run_count_incr = sched_smt_run_incr, + .run_count_decr = sched_smt_run_decr, + .update_thread_bucket = sched_smt_update_thread_bucket, .pset_made_schedulable = sched_pset_made_schedulable, }; @@ -453,12 +454,15 @@ sched_dualq_steal_thread(processor_set_t pset) processor_set_t nset = next_pset(cset); thread_t thread; + /* Secondary processors on SMT systems never steal */ + assert(current_processor()->processor_primary == current_processor()); + while (nset != pset) { pset_unlock(cset); cset = nset; pset_lock(cset); - if (cset->pset_runq.count > 0) { + if (pset_has_stealable_threads(cset)) { /* Need task_restrict logic here */ thread = run_queue_dequeue(&cset->pset_runq, SCHED_HEADQ); pset_unlock(cset); diff --git a/osfmk/kern/sched_grrr.c b/osfmk/kern/sched_grrr.c index 5f663aba7..ddfa90ad7 100644 --- a/osfmk/kern/sched_grrr.c +++ b/osfmk/kern/sched_grrr.c @@ -201,6 +201,7 @@ const struct sched_dispatch_table sched_grrr_dispatch = { .steal_thread_enabled = sched_steal_thread_DISABLED, .steal_thread = sched_grrr_steal_thread, .compute_timeshare_priority = sched_grrr_compute_priority, + .choose_node = sched_choose_node, .choose_processor = sched_grrr_choose_processor, .processor_enqueue = sched_grrr_processor_enqueue, .processor_queue_shutdown = sched_grrr_processor_queue_shutdown, @@ -225,11 +226,11 @@ const struct sched_dispatch_table sched_grrr_dispatch = { .thread_avoid_processor = NULL, .processor_balance = sched_SMT_balance, - .rt_runq = sched_rtglobal_runq, - .rt_init = sched_rtglobal_init, - .rt_queue_shutdown = sched_rtglobal_queue_shutdown, - .rt_runq_scan = sched_rtglobal_runq_scan, - .rt_runq_count_sum = sched_rtglobal_runq_count_sum, + .rt_runq = sched_rtlocal_runq, + .rt_init = sched_rtlocal_init, + .rt_queue_shutdown = sched_rtlocal_queue_shutdown, + .rt_runq_scan = sched_rtlocal_runq_scan, + .rt_runq_count_sum = sched_rtlocal_runq_count_sum, .qos_max_parallelism = sched_qos_max_parallelism, .check_spill = sched_check_spill, @@ -558,17 +559,17 @@ grrr_priority_mapping_init(void) /* Map 0->0 up to 10->20 */ for (i = 0; i <= 10; i++) { - grrr_priority_mapping[i] = 2 * i; + grrr_priority_mapping[i] = (grrr_proportional_priority_t)(2 * i); } /* Map user priorities 11->33 up to 51 -> 153 */ for (i = 11; i <= 51; i++) { - grrr_priority_mapping[i] = 3 * i; + grrr_priority_mapping[i] = (grrr_proportional_priority_t)(3 * i); } /* Map high priorities 52->180 up to 127->255 */ for (i = 52; i <= 127; i++) { - grrr_priority_mapping[i] = 128 + i; + grrr_priority_mapping[i] = (grrr_proportional_priority_t)(128 + i); } for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) { @@ -581,7 +582,7 @@ grrr_priority_mapping_init(void) #endif /* Groups of 4 */ - grrr_group_mapping[i] = i >> 2; + grrr_group_mapping[i] = (grrr_group_index_t)(i >> 2); } } @@ -722,10 +723,6 @@ grrr_enqueue( gindex = grrr_group_mapping[gpriority]; group = &rq->groups[gindex]; -#if 0 - thread->grrr_deficit = 0; -#endif - if (group->count == 0) { /* Empty group, this is the first client */ enqueue_tail(&group->clients, (queue_entry_t)thread); diff --git a/osfmk/kern/sched_multiq.c b/osfmk/kern/sched_multiq.c index a96f7bd63..518a9a846 100644 --- a/osfmk/kern/sched_multiq.c +++ b/osfmk/kern/sched_multiq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Apple Inc. All rights reserved. + * Copyright (c) 2013-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -224,17 +224,14 @@ static integer_t drain_depth_limit; #define DEFAULT_DRAIN_CEILING BASEPRI_FOREGROUND static integer_t drain_ceiling; -static struct zone *sched_group_zone; +static ZONE_DECLARE(sched_group_zone, "sched groups", + sizeof(struct sched_group), ZC_NOENCRYPT | ZC_NOCALLOUT); static uint64_t num_sched_groups = 0; static queue_head_t sched_groups; -static lck_attr_t sched_groups_lock_attr; -static lck_grp_t sched_groups_lock_grp; -static lck_grp_attr_t sched_groups_lock_grp_attr; - -static lck_mtx_t sched_groups_lock; - +static LCK_GRP_DECLARE(sched_groups_lock_grp, "sched_groups"); +static LCK_MTX_DECLARE(sched_groups_lock, &sched_groups_lock_grp); static void sched_multiq_init(void); @@ -302,6 +299,7 @@ const struct sched_dispatch_table sched_multiq_dispatch = { .steal_thread_enabled = sched_steal_thread_DISABLED, .steal_thread = sched_multiq_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, + .choose_node = sched_choose_node, .choose_processor = choose_processor, .processor_enqueue = sched_multiq_processor_enqueue, .processor_queue_shutdown = sched_multiq_processor_queue_shutdown, @@ -326,11 +324,11 @@ const struct sched_dispatch_table sched_multiq_dispatch = { .thread_avoid_processor = sched_multiq_thread_avoid_processor, .processor_balance = sched_SMT_balance, - .rt_runq = sched_rtglobal_runq, - .rt_init = sched_rtglobal_init, - .rt_queue_shutdown = sched_rtglobal_queue_shutdown, - .rt_runq_scan = sched_rtglobal_runq_scan, - .rt_runq_count_sum = sched_rtglobal_runq_count_sum, + .rt_runq = sched_rtlocal_runq, + .rt_init = sched_rtlocal_init, + .rt_queue_shutdown = sched_rtlocal_queue_shutdown, + .rt_runq_scan = sched_rtlocal_runq_scan, + .rt_runq_count_sum = sched_rtlocal_runq_count_sum, .qos_max_parallelism = sched_qos_max_parallelism, .check_spill = sched_check_spill, @@ -367,22 +365,8 @@ sched_multiq_init(void) printf("multiq scheduler config: deep-drain %d, ceiling %d, depth limit %d, band limit %d, sanity check %d\n", deep_drain, drain_ceiling, drain_depth_limit, drain_band_limit, multiq_sanity_check); - sched_group_zone = zinit( - sizeof(struct sched_group), - task_max * sizeof(struct sched_group), - PAGE_SIZE, - "sched groups"); - - zone_change(sched_group_zone, Z_NOENCRYPT, TRUE); - zone_change(sched_group_zone, Z_NOCALLOUT, TRUE); - queue_init(&sched_groups); - lck_grp_attr_setdefault(&sched_groups_lock_grp_attr); - lck_grp_init(&sched_groups_lock_grp, "sched_groups", &sched_groups_lock_grp_attr); - lck_attr_setdefault(&sched_groups_lock_attr); - lck_mtx_init(&sched_groups_lock, &sched_groups_lock_grp, &sched_groups_lock_attr); - sched_timeshare_init(); } @@ -423,9 +407,9 @@ sched_group_create(void) run_queue_init(&sched_group->runq); - for (int i = 0; i < NRQS; i++) { + for (size_t i = 0; i < NRQS; i++) { sched_group->entries[i].runq = 0; - sched_group->entries[i].sched_pri = i; + sched_group->entries[i].sched_pri = (int16_t)i; } lck_mtx_lock(&sched_groups_lock); diff --git a/osfmk/kern/sched_prim.c b/osfmk/kern/sched_prim.c index 42e73b4f0..5d04d976e 100644 --- a/osfmk/kern/sched_prim.c +++ b/osfmk/kern/sched_prim.c @@ -125,6 +125,9 @@ #include #include +struct sched_statistics PERCPU_DATA(sched_stats); +bool sched_stats_active; + int rt_runq_count(processor_set_t pset) { @@ -144,19 +147,19 @@ rt_runq_count_decr(processor_set_t pset) } #define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */ -int default_preemption_rate = DEFAULT_PREEMPTION_RATE; +TUNABLE(int, default_preemption_rate, "preempt", DEFAULT_PREEMPTION_RATE); #define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */ -int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE; +TUNABLE(int, default_bg_preemption_rate, "bg_preempt", DEFAULT_BG_PREEMPTION_RATE); -#define MAX_UNSAFE_QUANTA 800 -int max_unsafe_quanta = MAX_UNSAFE_QUANTA; +#define MAX_UNSAFE_QUANTA 800 +TUNABLE(int, max_unsafe_quanta, "unsafe", MAX_UNSAFE_QUANTA); -#define MAX_POLL_QUANTA 2 -int max_poll_quanta = MAX_POLL_QUANTA; +#define MAX_POLL_QUANTA 2 +TUNABLE(int, max_poll_quanta, "poll", MAX_POLL_QUANTA); #define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */ -int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT; +int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT; uint64_t max_poll_computation; @@ -181,6 +184,8 @@ uint32_t default_timeshare_constraint; uint32_t max_rt_quantum; uint32_t min_rt_quantum; +uint32_t rt_constraint_threshold; + #if defined(CONFIG_SCHED_TIMESHARE_CORE) unsigned sched_tick; @@ -225,6 +230,7 @@ extern char *proc_name_address(struct proc *p); #endif /* __arm__ || __arm64__ */ uint64_t sched_one_second_interval; +boolean_t allow_direct_handoff = TRUE; /* Forwards */ @@ -304,6 +310,7 @@ static int cpu_throttle_enabled = 1; void sched_init(void) { + boolean_t direct_handoff = FALSE; kprintf("Scheduler: Default of %s\n", SCHED(sched_name)); if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) { @@ -331,6 +338,10 @@ sched_init(void) SCHED(pset_init)(&pset0); SCHED(processor_init)(master_processor); + + if (PE_parse_boot_argn("direct_handoff", &direct_handoff, sizeof(direct_handoff))) { + allow_direct_handoff = direct_handoff; + } } void @@ -404,7 +415,7 @@ sched_timeshare_timebase_init(void) /* timeshare load calculation interval & deadline initialization */ clock_interval_to_absolutetime_interval(sched_load_compute_interval_us, NSEC_PER_USEC, &sched_load_compute_interval_abs); - sched_load_compute_deadline = sched_load_compute_interval_abs; + os_atomic_init(&sched_load_compute_deadline, sched_load_compute_interval_abs); /* * Compute conversion factor from usage to @@ -438,40 +449,11 @@ sched_timeshare_timebase_init(void) void pset_rt_init(processor_set_t pset) { - rt_lock_init(pset); - os_atomic_init(&pset->rt_runq.count, 0); queue_init(&pset->rt_runq.queue); memset(&pset->rt_runq.runq_stats, 0, sizeof pset->rt_runq.runq_stats); } -rt_queue_t -sched_rtglobal_runq(processor_set_t pset) -{ - (void)pset; - - return &pset0.rt_runq; -} - -void -sched_rtglobal_init(processor_set_t pset) -{ - if (pset == &pset0) { - return pset_rt_init(pset); - } - - /* Only pset0 rt_runq is used, so make it easy to detect - * buggy accesses to others. - */ - memset(&pset->rt_runq, 0xfd, sizeof pset->rt_runq); -} - -void -sched_rtglobal_queue_shutdown(processor_t processor) -{ - (void)processor; -} - static void sched_realtime_timebase_init(void) { @@ -487,6 +469,11 @@ sched_realtime_timebase_init(void) 50, 1000 * NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); max_rt_quantum = (uint32_t)abstime; + + /* constraint threshold for sending backup IPIs (4 ms) */ + clock_interval_to_absolutetime_interval(4, NSEC_PER_MSEC, &abstime); + assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); + rt_constraint_threshold = (uint32_t)abstime; } void @@ -517,7 +504,7 @@ sched_steal_thread_DISABLED(processor_set_t pset) bool sched_steal_thread_enabled(processor_set_t pset) { - return pset->node->pset_count > 1; + return bit_count(pset->node->pset_map) > 1; } #if defined(CONFIG_SCHED_TIMESHARE_CORE) @@ -649,6 +636,9 @@ thread_unblock( thread->wait_timer_is_set = FALSE; } + boolean_t aticontext, pidle; + ml_get_power_state(&aticontext, &pidle); + /* * Update scheduling state: not waiting, * set running. @@ -670,6 +660,12 @@ thread_unblock( /* Update the runnable thread count */ new_run_count = SCHED(run_count_incr)(thread); + +#if CONFIG_SCHED_AUTO_JOIN + if (aticontext == FALSE && work_interval_should_propagate(cthread, thread)) { + work_interval_auto_join_propagate(cthread, thread); + } +#endif /*CONFIG_SCHED_AUTO_JOIN */ } else { /* * Either the thread is idling in place on another processor, @@ -683,7 +679,6 @@ thread_unblock( new_run_count = os_atomic_load(&sched_run_buckets[TH_BUCKET_RUN], relaxed); } - /* * Calculate deadline for real-time threads. */ @@ -708,13 +703,11 @@ thread_unblock( * DRK: consider removing the callout wakeup counters in the future * they're present for verification at the moment. */ - boolean_t aticontext, pidle; - ml_get_power_state(&aticontext, &pidle); if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) { DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, thread->task->bsd_info); - uint64_t ttd = PROCESSOR_DATA(current_processor(), timer_call_ttd); + uint64_t ttd = current_processor()->timer_call_ttd; if (ttd) { if (ttd <= timer_deadline_tracking_bin_1) { @@ -748,8 +741,8 @@ thread_unblock( } if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) { - thread->callout_woken_from_icontext = aticontext; - thread->callout_woken_from_platform_idle = pidle; + thread->callout_woken_from_icontext = !!aticontext; + thread->callout_woken_from_platform_idle = !!pidle; thread->callout_woke_thread = FALSE; } @@ -769,6 +762,30 @@ thread_unblock( return ready_for_runq; } +/* + * Routine: thread_allowed_for_handoff + * Purpose: + * Check if the thread is allowed for handoff operation + * Conditions: + * thread lock held, IPC locks may be held. + * TODO: In future, do not allow handoff if threads have different cluster + * recommendations. + */ +boolean_t +thread_allowed_for_handoff( + thread_t thread) +{ + thread_t self = current_thread(); + + if (allow_direct_handoff && + thread->sched_mode == TH_MODE_REALTIME && + self->sched_mode == TH_MODE_REALTIME) { + return TRUE; + } + + return FALSE; +} + /* * Routine: thread_go * Purpose: @@ -785,8 +802,11 @@ thread_unblock( kern_return_t thread_go( thread_t thread, - wait_result_t wresult) + wait_result_t wresult, + waitq_options_t option) { + thread_t self = current_thread(); + assert_thread_magic(thread); assert(thread->at_safe_point == FALSE); @@ -802,7 +822,14 @@ thread_go( backtrace(&thread->thread_wakeup_bt[0], (sizeof(thread->thread_wakeup_bt) / sizeof(uintptr_t)), NULL); #endif - thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); + if ((option & WQ_OPTION_HANDOFF) && + thread_allowed_for_handoff(thread)) { + thread_reference(thread); + assert(self->handoff_thread == NULL); + self->handoff_thread = thread; + } else { + thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); + } } return KERN_SUCCESS; @@ -1380,7 +1407,7 @@ clear_wait_internal( /* TODO: Can we instead assert TH_TERMINATE is not set? */ if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) { - return thread_go(thread, wresult); + return thread_go(thread, wresult, WQ_OPTION_NONE); } else { return KERN_NOT_WAITING; } @@ -1734,7 +1761,6 @@ sched_vm_group_maintenance(void) int sched_smt_balance = 1; #endif -#if __SMP__ /* Invoked with pset locked, returns with pset unlocked */ void sched_SMT_balance(processor_t cprocessor, processor_set_t cpset) @@ -1787,14 +1813,35 @@ smt_balance_exit: sched_ipi_perform(ast_processor, ipi_type); } } -#else -/* Invoked with pset locked, returns with pset unlocked */ -void -sched_SMT_balance(__unused processor_t cprocessor, processor_set_t cpset) + +static cpumap_t +pset_available_cpumap(processor_set_t pset) { - pset_unlock(cpset); + return (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING] | pset->cpu_state_map[PROCESSOR_RUNNING]) & + pset->recommended_bitmask; +} + +static cpumap_t +pset_available_but_not_running_cpumap(processor_set_t pset) +{ + return (pset->cpu_state_map[PROCESSOR_IDLE] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) & + pset->recommended_bitmask; +} + +bool +pset_has_stealable_threads(processor_set_t pset) +{ + pset_assert_locked(pset); + + cpumap_t avail_map = pset_available_but_not_running_cpumap(pset); + /* + * Secondary CPUs never steal, so allow stealing of threads if there are more threads than + * available primary CPUs + */ + avail_map &= pset->primary_map; + + return (pset->pset_runq.count > 0) && ((pset->pset_runq.count + rt_runq_count(pset)) > bit_count(avail_map)); } -#endif /* __SMP__ */ /* * Called with pset locked, on a processor that is committing to run a new thread @@ -1804,6 +1851,8 @@ sched_SMT_balance(__unused processor_t cprocessor, processor_set_t cpset) static void pset_commit_processor_to_new_thread(processor_set_t pset, processor_t processor, thread_t new_thread) { + pset_assert_locked(pset); + if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) { assert(current_thread() == processor->idle_thread); @@ -1817,12 +1866,54 @@ pset_commit_processor_to_new_thread(processor_set_t pset, processor_t processor, } processor_state_update_from_thread(processor, new_thread); + + if (new_thread->sched_pri >= BASEPRI_RTQUEUES) { + bit_set(pset->realtime_map, processor->cpu_id); + } else { + bit_clear(pset->realtime_map, processor->cpu_id); + } + + pset_node_t node = pset->node; + + if (bit_count(node->pset_map) == 1) { + /* Node has only a single pset, so skip node pset map updates */ + return; + } + + cpumap_t avail_map = pset_available_cpumap(pset); + + if (new_thread->sched_pri >= BASEPRI_RTQUEUES) { + if ((avail_map & pset->realtime_map) == avail_map) { + /* No more non-RT CPUs in this pset */ + atomic_bit_clear(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed); + } + avail_map &= pset->primary_map; + if ((avail_map & pset->realtime_map) == avail_map) { + /* No more non-RT primary CPUs in this pset */ + atomic_bit_clear(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed); + } + } else { + if ((avail_map & pset->realtime_map) != avail_map) { + if (!bit_test(atomic_load(&node->pset_non_rt_map), pset->pset_id)) { + atomic_bit_set(&node->pset_non_rt_map, pset->pset_id, memory_order_relaxed); + } + } + avail_map &= pset->primary_map; + if ((avail_map & pset->realtime_map) != avail_map) { + if (!bit_test(atomic_load(&node->pset_non_rt_primary_map), pset->pset_id)) { + atomic_bit_set(&node->pset_non_rt_primary_map, pset->pset_id, memory_order_relaxed); + } + } + } } -static processor_t choose_processor_for_realtime_thread(processor_set_t pset); +static processor_t choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool consider_secondaries); static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset); +#if defined(__x86_64__) static bool these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map); +#endif static bool sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor); +static bool processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor); int sched_allow_rt_smt = 1; int sched_avoid_cpu0 = 1; @@ -1949,33 +2040,29 @@ restart: */ if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) { if (rt_runq_count(pset) > 0) { - rt_lock_lock(pset); + thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links); - if (rt_runq_count(pset) > 0) { - thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links); - - if (next_rt->realtime.deadline < processor->deadline && - (next_rt->bound_processor == PROCESSOR_NULL || - next_rt->bound_processor == processor)) { - /* The next RT thread is better, so pick it off the runqueue. */ - goto pick_new_rt_thread; - } + if (next_rt->realtime.deadline < processor->deadline && + (next_rt->bound_processor == PROCESSOR_NULL || + next_rt->bound_processor == processor)) { + /* The next RT thread is better, so pick it off the runqueue. */ + goto pick_new_rt_thread; } - - rt_lock_unlock(pset); } /* This is still the best RT thread to run. */ processor->deadline = thread->realtime.deadline; - sched_update_pset_load_average(pset); + sched_update_pset_load_average(pset, 0); processor_t next_rt_processor = PROCESSOR_NULL; sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE; - if (rt_runq_count(pset) > 0) { - next_rt_processor = choose_processor_for_realtime_thread(pset); + if (rt_runq_count(pset) - bit_count(pset->pending_AST_URGENT_cpu_mask) > 0) { + next_rt_processor = choose_processor_for_realtime_thread(pset, processor, true); if (next_rt_processor) { + SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, + (uintptr_t)0, (uintptr_t)-4, next_rt_processor->cpu_id, next_rt_processor->state, 0); if (next_rt_processor->state == PROCESSOR_IDLE) { pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING); } @@ -1996,7 +2083,7 @@ restart: /* This thread is still the highest priority runnable (non-idle) thread */ processor->deadline = UINT64_MAX; - sched_update_pset_load_average(pset); + sched_update_pset_load_average(pset, 0); pset_unlock(pset); return thread; @@ -2014,63 +2101,58 @@ restart: /* OK, so we're not going to run the current thread. Look at the RT queue. */ bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor); if ((rt_runq_count(pset) > 0) && ok_to_run_realtime_thread) { - rt_lock_lock(pset); + thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links); - if ((rt_runq_count(pset) > 0) && ok_to_run_realtime_thread) { - thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links); - - if (__probable((next_rt->bound_processor == PROCESSOR_NULL || - (next_rt->bound_processor == processor)))) { + if (__probable((next_rt->bound_processor == PROCESSOR_NULL || + (next_rt->bound_processor == processor)))) { pick_new_rt_thread: - new_thread = qe_dequeue_head(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links); + new_thread = qe_dequeue_head(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links); - new_thread->runq = PROCESSOR_NULL; - SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset)); - rt_runq_count_decr(pset); + new_thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset)); + rt_runq_count_decr(pset); - processor->deadline = new_thread->realtime.deadline; + processor->deadline = new_thread->realtime.deadline; - pset_commit_processor_to_new_thread(pset, processor, new_thread); + pset_commit_processor_to_new_thread(pset, processor, new_thread); - rt_lock_unlock(pset); - sched_update_pset_load_average(pset); + sched_update_pset_load_average(pset, 0); - processor_t ast_processor = PROCESSOR_NULL; - processor_t next_rt_processor = PROCESSOR_NULL; - sched_ipi_type_t ipi_type = SCHED_IPI_NONE; - sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE; + processor_t ast_processor = PROCESSOR_NULL; + processor_t next_rt_processor = PROCESSOR_NULL; + sched_ipi_type_t ipi_type = SCHED_IPI_NONE; + sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE; - if (processor->processor_secondary != NULL) { - processor_t sprocessor = processor->processor_secondary; - if ((sprocessor->state == PROCESSOR_RUNNING) || (sprocessor->state == PROCESSOR_DISPATCHING)) { - ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL); - ast_processor = sprocessor; - } + if (processor->processor_secondary != NULL) { + processor_t sprocessor = processor->processor_secondary; + if ((sprocessor->state == PROCESSOR_RUNNING) || (sprocessor->state == PROCESSOR_DISPATCHING)) { + ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL); + ast_processor = sprocessor; } - if (rt_runq_count(pset) > 0) { - next_rt_processor = choose_processor_for_realtime_thread(pset); - if (next_rt_processor) { - if (next_rt_processor->state == PROCESSOR_IDLE) { - pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING); - } - next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT); + } + if (rt_runq_count(pset) - bit_count(pset->pending_AST_URGENT_cpu_mask) > 0) { + next_rt_processor = choose_processor_for_realtime_thread(pset, processor, true); + if (next_rt_processor) { + SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, + (uintptr_t)0, (uintptr_t)-5, next_rt_processor->cpu_id, next_rt_processor->state, 0); + if (next_rt_processor->state == PROCESSOR_IDLE) { + pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING); } + next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT); } - pset_unlock(pset); - - if (ast_processor) { - sched_ipi_perform(ast_processor, ipi_type); - } + } + pset_unlock(pset); - if (next_rt_processor) { - sched_ipi_perform(next_rt_processor, next_rt_ipi_type); - } + if (ast_processor) { + sched_ipi_perform(ast_processor, ipi_type); + } - return new_thread; + if (next_rt_processor) { + sched_ipi_perform(next_rt_processor, next_rt_ipi_type); } - } - rt_lock_unlock(pset); + return new_thread; + } } if (secondary_can_only_run_realtime_thread) { goto idle; @@ -2080,9 +2162,8 @@ pick_new_rt_thread: /* No RT threads, so let's look at the regular threads. */ if ((new_thread = SCHED(choose_thread)(processor, MINPRI, *reason)) != THREAD_NULL) { - sched_update_pset_load_average(pset); - pset_commit_processor_to_new_thread(pset, processor, new_thread); + sched_update_pset_load_average(pset, 0); processor_t ast_processor = PROCESSOR_NULL; sched_ipi_type_t ipi_type = SCHED_IPI_NONE; @@ -2107,8 +2188,7 @@ pick_new_rt_thread: goto idle; } -#if __SMP__ - if (SCHED(steal_thread_enabled)(pset)) { + if (SCHED(steal_thread_enabled)(pset) && (processor->processor_primary == processor)) { /* * No runnable threads, attempt to steal * from other processors. Returns with pset lock dropped. @@ -2147,7 +2227,6 @@ pick_new_rt_thread: goto restart; } } -#endif idle: /* @@ -2159,12 +2238,8 @@ idle: processor_state_update_idle(processor); } -#if __SMP__ /* Invoked with pset locked, returns with pset unlocked */ SCHED(processor_balance)(processor, pset); -#else - pset_unlock(pset); -#endif new_thread = processor->idle_thread; } while (new_thread == THREAD_NULL); @@ -2210,7 +2285,8 @@ thread_invoke( #endif #if defined(CONFIG_SCHED_TIMESHARE_CORE) - if ((thread->state & TH_IDLE) == 0) { + if (!((thread->state & TH_IDLE) != 0 || + ((reason & AST_HANDOFF) && self->sched_mode == TH_MODE_REALTIME))) { sched_timeshare_consider_maintenance(ctime); } #endif @@ -2297,14 +2373,14 @@ thread_invoke( self->last_run_time = ctime; processor_timer_switch_thread(ctime, &thread->system_timer); timer_update(&thread->runnable_timer, ctime); - PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer; + processor->kernel_timer = &thread->system_timer; /* * Since non-precise user/kernel time doesn't update the state timer * during privilege transitions, synthesize an event now. */ if (!thread->precise_user_kernel_time) { - timer_update(PROCESSOR_DATA(processor, current_state), ctime); + timer_update(processor->current_state, ctime); } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, @@ -2453,14 +2529,14 @@ need_stack: self->last_run_time = ctime; processor_timer_switch_thread(ctime, &thread->system_timer); timer_update(&thread->runnable_timer, ctime); - PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer; + processor->kernel_timer = &thread->system_timer; /* * Since non-precise user/kernel time doesn't update the state timer * during privilege transitions, synthesize an event now. */ if (!thread->precise_user_kernel_time) { - timer_update(PROCESSOR_DATA(processor, current_state), ctime); + timer_update(processor->current_state, ctime); } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, @@ -2644,6 +2720,7 @@ thread_dispatch( thread_t self) { processor_t processor = self->last_processor; + bool was_idle = false; assert(processor == current_processor()); assert(self == current_thread_volatile()); @@ -2691,6 +2768,7 @@ thread_dispatch( } if (thread->state & TH_IDLE) { + was_idle = true; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), 0, thread->state, @@ -2721,6 +2799,14 @@ thread_dispatch( (consumed - thread->t_deduct_bank_ledger_time)); } thread->t_deduct_bank_ledger_time = 0; + if (consumed > 0) { + /* + * This should never be negative, but in traces we are seeing some instances + * of consumed being negative. + * thread_dispatch() thread CPU consumed calculation sometimes results in negative value + */ + sched_update_pset_avg_execution_time(current_processor()->processor_set, consumed, processor->last_dispatch, thread->th_sched_bucket); + } } wake_lock(thread); @@ -2878,6 +2964,12 @@ thread_dispatch( new_run_count = SCHED(run_count_decr)(thread); +#if CONFIG_SCHED_AUTO_JOIN + if ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) != 0) { + work_interval_auto_join_unwind(thread); + } +#endif /* CONFIG_SCHED_AUTO_JOIN */ + #if CONFIG_SCHED_SFI if (thread->reason & AST_SFI) { thread->wait_sfi_begin_time = processor->last_dispatch; @@ -2910,12 +3002,17 @@ thread_dispatch( } } } + /* + * The thread could have been added to the termination queue, so it's + * unsafe to use after this point. + */ + thread = THREAD_NULL; } int urgency = THREAD_URGENCY_NONE; uint64_t latency = 0; - /* Update (new) current thread and reprogram quantum timer */ + /* Update (new) current thread and reprogram running timers */ thread_lock(self); if (!(self->state & TH_IDLE)) { @@ -2954,15 +3051,23 @@ thread_dispatch( /* * Set up quantum timer and timeslice. */ - processor->quantum_end = processor->last_dispatch + self->quantum_remaining; - timer_call_quantum_timer_enter(&processor->quantum_timer, self, - processor->quantum_end, processor->last_dispatch); + processor->quantum_end = processor->last_dispatch + + self->quantum_remaining; + running_timer_setup(processor, RUNNING_TIMER_QUANTUM, self, + processor->quantum_end, processor->last_dispatch); + if (was_idle) { + /* + * kperf's running timer is active whenever the idle thread for a + * CPU is not running. + */ + kperf_running_setup(processor, processor->last_dispatch); + } + running_timers_activate(processor); processor->first_timeslice = TRUE; } else { - timer_call_quantum_timer_cancel(&processor->quantum_timer); + running_timers_deactivate(processor); processor->first_timeslice = FALSE; - thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self); } @@ -3096,6 +3201,14 @@ thread_run( reason = AST_HANDOFF; } + /* + * If this thread hadn't been setrun'ed, it + * might not have a chosen processor, so give it one + */ + if (new_thread->chosen_processor == NULL) { + new_thread->chosen_processor = current_processor(); + } + self->continuation = continuation; self->parameter = parameter; @@ -3340,32 +3453,100 @@ run_queue_peek( } } -/* Assumes RT lock is not held, and acquires splsched/rt_lock itself */ +rt_queue_t +sched_rtlocal_runq(processor_set_t pset) +{ + return &pset->rt_runq; +} + void -sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context) +sched_rtlocal_init(processor_set_t pset) { - spl_t s; + pset_rt_init(pset); +} + +void +sched_rtlocal_queue_shutdown(processor_t processor) +{ + processor_set_t pset = processor->processor_set; thread_t thread; + queue_head_t tqueue; - processor_set_t pset = &pset0; + pset_lock(pset); - s = splsched(); - rt_lock_lock(pset); + /* We only need to migrate threads if this is the last active or last recommended processor in the pset */ + if ((pset->online_processor_count > 0) && pset_is_recommended(pset)) { + pset_unlock(pset); + return; + } - qe_foreach_element_safe(thread, &pset->rt_runq.queue, runq_links) { - if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) { - scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time; - } + queue_init(&tqueue); + + while (rt_runq_count(pset) > 0) { + thread = qe_dequeue_head(&pset->rt_runq.queue, struct thread, runq_links); + thread->runq = PROCESSOR_NULL; + SCHED_STATS_RUNQ_CHANGE(&pset->rt_runq.runq_stats, rt_runq_count(pset)); + rt_runq_count_decr(pset); + enqueue_tail(&tqueue, &thread->runq_links); + } + sched_update_pset_load_average(pset, 0); + pset_unlock(pset); + + qe_foreach_element_safe(thread, &tqueue, runq_links) { + remqueue(&thread->runq_links); + + thread_lock(thread); + + thread_setrun(thread, SCHED_TAILQ); + + thread_unlock(thread); } +} + +/* Assumes RT lock is not held, and acquires splsched/rt_lock itself */ +void +sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context) +{ + thread_t thread; + + pset_node_t node = &pset_node0; + processor_set_t pset = node->psets; + + spl_t s = splsched(); + do { + while (pset != NULL) { + pset_lock(pset); + + qe_foreach_element_safe(thread, &pset->rt_runq.queue, runq_links) { + if (thread->last_made_runnable_time < scan_context->earliest_rt_make_runnable_time) { + scan_context->earliest_rt_make_runnable_time = thread->last_made_runnable_time; + } + } - rt_lock_unlock(pset); + pset_unlock(pset); + + pset = pset->pset_list; + } + } while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL)); splx(s); } int64_t -sched_rtglobal_runq_count_sum(void) +sched_rtlocal_runq_count_sum(void) { - return pset0.rt_runq.runq_stats.count_sum; + pset_node_t node = &pset_node0; + processor_set_t pset = node->psets; + int64_t count = 0; + + do { + while (pset != NULL) { + count += pset->rt_runq.runq_stats.count_sum; + + pset = pset->pset_list; + } + } while (((node = node->node_list) != NULL) && ((pset = node->psets) != NULL)); + + return count; } /* @@ -3380,7 +3561,7 @@ realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thre uint64_t deadline = thread->realtime.deadline; boolean_t preempt = FALSE; - rt_lock_lock(pset); + pset_assert_locked(pset); if (queue_empty(queue)) { enqueue_tail(queue, &thread->runq_links); @@ -3409,11 +3590,36 @@ realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thre SCHED_STATS_RUNQ_CHANGE(&SCHED(rt_runq)(pset)->runq_stats, rt_runq_count(pset)); rt_runq_count_incr(pset); - rt_lock_unlock(pset); - return preempt; } +#define MAX_BACKUP_PROCESSORS 7 +#if defined(__x86_64__) +#define DEFAULT_BACKUP_PROCESSORS 1 +#else +#define DEFAULT_BACKUP_PROCESSORS 0 +#endif + +int sched_rt_n_backup_processors = DEFAULT_BACKUP_PROCESSORS; + +int +sched_get_rt_n_backup_processors(void) +{ + return sched_rt_n_backup_processors; +} + +void +sched_set_rt_n_backup_processors(int n) +{ + if (n < 0) { + n = 0; + } else if (n > MAX_BACKUP_PROCESSORS) { + n = MAX_BACKUP_PROCESSORS; + } + + sched_rt_n_backup_processors = n; +} + /* * realtime_setrun: * @@ -3424,67 +3630,105 @@ realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thre */ static void realtime_setrun( - processor_t processor, + processor_t chosen_processor, thread_t thread) { - processor_set_t pset = processor->processor_set; + processor_set_t pset = chosen_processor->processor_set; pset_assert_locked(pset); ast_t preempt; - sched_ipi_type_t ipi_type = SCHED_IPI_NONE; + int n_backup = 0; - thread->chosen_processor = processor; + if (thread->realtime.constraint <= rt_constraint_threshold) { + n_backup = sched_rt_n_backup_processors; + } + assert((n_backup >= 0) && (n_backup <= MAX_BACKUP_PROCESSORS)); + + sched_ipi_type_t ipi_type[MAX_BACKUP_PROCESSORS + 1] = {}; + processor_t ipi_processor[MAX_BACKUP_PROCESSORS + 1] = {}; + + thread->chosen_processor = chosen_processor; /* */ assert(thread->bound_processor == PROCESSOR_NULL); - if (processor->current_pri < BASEPRI_RTQUEUES) { - preempt = (AST_PREEMPT | AST_URGENT); - } else if (thread->realtime.deadline < processor->deadline) { - preempt = (AST_PREEMPT | AST_URGENT); - } else { - preempt = AST_NONE; - } + realtime_queue_insert(chosen_processor, pset, thread); - realtime_queue_insert(processor, pset, thread); + processor_t processor = chosen_processor; + bool chosen_process_is_secondary = chosen_processor->processor_primary != chosen_processor; - ipi_type = SCHED_IPI_NONE; - if (preempt != AST_NONE) { - if (processor->state == PROCESSOR_IDLE) { - processor_state_update_from_thread(processor, thread); - processor->deadline = thread->realtime.deadline; - pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING); - if (processor == current_processor()) { - ast_on(preempt); - } else { - ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_PREEMPT); + int count = 0; + for (int i = 0; i <= n_backup; i++) { + if (i > 0) { + processor = choose_processor_for_realtime_thread(pset, chosen_processor, chosen_process_is_secondary); + if ((processor == PROCESSOR_NULL) || (sched_avoid_cpu0 && (processor->cpu_id == 0))) { + break; } - } else if (processor->state == PROCESSOR_DISPATCHING) { - if ((processor->current_pri < thread->sched_pri) || (processor->deadline > thread->realtime.deadline)) { + SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)-3, processor->cpu_id, processor->state, 0); + } + ipi_type[i] = SCHED_IPI_NONE; + ipi_processor[i] = processor; + count++; + + if (processor->current_pri < BASEPRI_RTQUEUES) { + preempt = (AST_PREEMPT | AST_URGENT); + } else if (thread->realtime.deadline < processor->deadline) { + preempt = (AST_PREEMPT | AST_URGENT); + } else { + preempt = AST_NONE; + } + + if (preempt != AST_NONE) { + if (processor->state == PROCESSOR_IDLE) { processor_state_update_from_thread(processor, thread); processor->deadline = thread->realtime.deadline; - } - } else { - if (processor == current_processor()) { - ast_on(preempt); + pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING); + if (processor == current_processor()) { + ast_on(preempt); - if ((preempt & AST_URGENT) == AST_URGENT) { - bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id); - } + if ((preempt & AST_URGENT) == AST_URGENT) { + bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id); + } - if ((preempt & AST_PREEMPT) == AST_PREEMPT) { - bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id); + if ((preempt & AST_PREEMPT) == AST_PREEMPT) { + bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id); + } + } else { + ipi_type[i] = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_PREEMPT); + } + } else if (processor->state == PROCESSOR_DISPATCHING) { + if ((processor->current_pri < thread->sched_pri) || (processor->deadline > thread->realtime.deadline)) { + processor_state_update_from_thread(processor, thread); + processor->deadline = thread->realtime.deadline; } } else { - ipi_type = sched_ipi_action(processor, thread, false, SCHED_IPI_EVENT_PREEMPT); + if (processor == current_processor()) { + ast_on(preempt); + + if ((preempt & AST_URGENT) == AST_URGENT) { + bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id); + } + + if ((preempt & AST_PREEMPT) == AST_PREEMPT) { + bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id); + } + } else { + ipi_type[i] = sched_ipi_action(processor, thread, false, SCHED_IPI_EVENT_PREEMPT); + } } + } else { + /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */ } - } else { - /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */ } pset_unlock(pset); - sched_ipi_perform(processor, ipi_type); + + assert((count > 0) && (count <= (n_backup + 1))); + for (int i = 0; i < count; i++) { + assert(ipi_processor[i] != PROCESSOR_NULL); + sched_ipi_perform(ipi_processor[i], ipi_type[i]); + } } @@ -3665,7 +3909,7 @@ processor_setrun( } SCHED(processor_enqueue)(processor, thread, options); - sched_update_pset_load_average(pset); + sched_update_pset_load_average(pset, 0); if (preempt != AST_NONE) { if (processor->state == PROCESSOR_IDLE) { @@ -3748,8 +3992,19 @@ choose_next_pset( return nset; } -/* - * choose_processor: +inline static processor_set_t +change_locked_pset(processor_set_t current_pset, processor_set_t new_pset) +{ + if (current_pset != new_pset) { + pset_unlock(current_pset); + pset_lock(new_pset); + } + + return new_pset; +} + +/* + * choose_processor: * * Choose a processor for the thread, beginning at * the pset. Accepts an optional processor hint in @@ -3805,8 +4060,6 @@ choose_processor( processor = PROCESSOR_NULL; } else if (!processor->is_recommended) { processor = PROCESSOR_NULL; - } else if ((thread->sched_pri >= BASEPRI_RTQUEUES) && !sched_ok_to_run_realtime_thread(pset, processor)) { - processor = PROCESSOR_NULL; } else { switch (processor->state) { case PROCESSOR_START: @@ -3823,7 +4076,11 @@ choose_processor( * idle processor. The platform layer had an opportunity to provide * the "least cost idle" processor above. */ - return processor; + if ((thread->sched_pri < BASEPRI_RTQUEUES) || processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) { + return processor; + } + processor = PROCESSOR_NULL; + break; case PROCESSOR_RUNNING: case PROCESSOR_DISPATCHING: /* @@ -3832,7 +4089,7 @@ choose_processor( * to regain their previous executing processor. */ if ((thread->sched_pri >= BASEPRI_RTQUEUES) && - (processor->current_pri < BASEPRI_RTQUEUES)) { + processor_is_fast_track_candidate_for_realtime_thread(pset, processor)) { return processor; } @@ -3887,31 +4144,46 @@ choose_processor( lc_processor = processor; } - do { - int cpuid; + if (thread->sched_pri >= BASEPRI_RTQUEUES) { + pset_node_t node = pset->node; + int consider_secondaries = (!pset->is_SMT) || (bit_count(node->pset_map) == 1) || (node->pset_non_rt_primary_map == 0); + for (; consider_secondaries < 2; consider_secondaries++) { + pset = change_locked_pset(pset, starting_pset); + do { + processor = choose_processor_for_realtime_thread(pset, PROCESSOR_NULL, consider_secondaries); + if (processor) { + return processor; + } - if (thread->sched_pri >= BASEPRI_RTQUEUES) { - processor = choose_processor_for_realtime_thread(pset); - if (processor) { - return processor; - } - } else { - /* - * Choose an idle processor, in pset traversal order - */ + /* NRG Collect processor stats for furthest deadline etc. here */ - uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] & - pset->primary_map & - pset->recommended_bitmask); + nset = next_pset(pset); - /* there shouldn't be a pending AST if the processor is idle */ - assert((idle_primary_map & pset->pending_AST_URGENT_cpu_mask) == 0); + if (nset != starting_pset) { + pset = change_locked_pset(pset, nset); + } + } while (nset != starting_pset); + } + /* Or we could just let it change to starting_pset in the loop above */ + pset = change_locked_pset(pset, starting_pset); + } - cpuid = lsb_first(idle_primary_map); - if (cpuid >= 0) { - processor = processor_array[cpuid]; - return processor; - } + do { + /* + * Choose an idle processor, in pset traversal order + */ + + uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] & + pset->primary_map & + pset->recommended_bitmask); + + /* there shouldn't be a pending AST if the processor is idle */ + assert((idle_primary_map & pset->pending_AST_URGENT_cpu_mask) == 0); + + int cpuid = lsb_first(idle_primary_map); + if (cpuid >= 0) { + processor = processor_array[cpuid]; + return processor; } /* @@ -4098,10 +4370,7 @@ choose_processor( nset = next_pset(pset); if (nset != starting_pset) { - pset_unlock(pset); - - pset = nset; - pset_lock(pset); + pset = change_locked_pset(pset, nset); } } while (nset != starting_pset); @@ -4139,22 +4408,18 @@ choose_processor( * platforms, simply return the master_processor. */ fallback_processor = true; -#if CONFIG_SCHED_CLUTCH && __AMP__ +#if CONFIG_SCHED_EDGE processor = processor_array[lsb_first(starting_pset->primary_map)]; -#else /* CONFIG_SCHED_CLUTCH && __AMP__ */ +#else /* CONFIG_SCHED_EDGE */ processor = master_processor; -#endif /* CONFIG_SCHED_CLUTCH && __AMP__ */ +#endif /* CONFIG_SCHED_EDGE */ } /* * Check that the correct processor set is * returned locked. */ - if (pset != processor->processor_set) { - pset_unlock(pset); - pset = processor->processor_set; - pset_lock(pset); - } + pset = change_locked_pset(pset, processor->processor_set); /* * We must verify that the chosen processor is still available. @@ -4176,6 +4441,138 @@ choose_processor( return processor; } +/* + * Default implementation of SCHED(choose_node)() + * for single node systems + */ +pset_node_t +sched_choose_node(__unused thread_t thread) +{ + return &pset_node0; +} + +/* + * choose_starting_pset: + * + * Choose a starting processor set for the thread. + * May return a processor hint within the pset. + * + * Returns a starting processor set, to be used by + * choose_processor. + * + * The thread must be locked. The resulting pset is unlocked on return, + * and is chosen without taking any pset locks. + */ +processor_set_t +choose_starting_pset(pset_node_t node, thread_t thread, processor_t *processor_hint) +{ + processor_set_t pset; + processor_t processor = PROCESSOR_NULL; + + if (thread->affinity_set != AFFINITY_SET_NULL) { + /* + * Use affinity set policy hint. + */ + pset = thread->affinity_set->aset_pset; + } else if (thread->last_processor != PROCESSOR_NULL) { + /* + * Simple (last processor) affinity case. + */ + processor = thread->last_processor; + pset = processor->processor_set; + } else { + /* + * No Affinity case: + * + * Utilitize a per task hint to spread threads + * among the available processor sets. + * NRG this seems like the wrong thing to do. + * See also task->pset_hint = pset in thread_setrun() + */ + task_t task = thread->task; + + pset = task->pset_hint; + if (pset == PROCESSOR_SET_NULL) { + pset = current_processor()->processor_set; + } + + pset = choose_next_pset(pset); + } + + if (!bit_test(node->pset_map, pset->pset_id)) { + /* pset is not from this node so choose one that is */ + int id = lsb_first(node->pset_map); + assert(id >= 0); + pset = pset_array[id]; + } + + if (bit_count(node->pset_map) == 1) { + /* Only a single pset in this node */ + goto out; + } + + bool avoid_cpu0 = false; + +#if defined(__x86_64__) + if ((thread->sched_pri >= BASEPRI_RTQUEUES) && sched_avoid_cpu0) { + /* Avoid the pset containing cpu0 */ + avoid_cpu0 = true; + /* Assert that cpu0 is in pset0. I expect this to be true on __x86_64__ */ + assert(bit_test(pset_array[0]->cpu_bitmask, 0)); + } +#endif + + if (thread->sched_pri >= BASEPRI_RTQUEUES) { + pset_map_t rt_target_map = atomic_load(&node->pset_non_rt_primary_map); + if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) { + if (avoid_cpu0) { + rt_target_map = bit_ror64(rt_target_map, 1); + } + int rotid = lsb_first(rt_target_map); + if (rotid >= 0) { + int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid; + pset = pset_array[id]; + goto out; + } + } + if (!pset->is_SMT || !sched_allow_rt_smt) { + /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */ + goto out; + } + rt_target_map = atomic_load(&node->pset_non_rt_map); + if ((avoid_cpu0 && pset->pset_id == 0) || !bit_test(rt_target_map, pset->pset_id)) { + if (avoid_cpu0) { + rt_target_map = bit_ror64(rt_target_map, 1); + } + int rotid = lsb_first(rt_target_map); + if (rotid >= 0) { + int id = avoid_cpu0 ? ((rotid + 1) & 63) : rotid; + pset = pset_array[id]; + goto out; + } + } + /* All psets are full of RT threads - fall back to choose processor to find the furthest deadline RT thread */ + } else { + pset_map_t idle_map = atomic_load(&node->pset_idle_map); + if (!bit_test(idle_map, pset->pset_id)) { + int next_idle_pset_id = lsb_first(idle_map); + if (next_idle_pset_id >= 0) { + pset = pset_array[next_idle_pset_id]; + } + } + } + +out: + if ((processor != PROCESSOR_NULL) && (processor->processor_set != pset)) { + processor = PROCESSOR_NULL; + } + if (processor != PROCESSOR_NULL) { + *processor_hint = processor; + } + + return pset; +} + /* * thread_setrun: * @@ -4207,59 +4604,23 @@ thread_setrun( assert(thread->runq == PROCESSOR_NULL); -#if __SMP__ if (thread->bound_processor == PROCESSOR_NULL) { /* * Unbound case. */ - if (thread->affinity_set != AFFINITY_SET_NULL) { - /* - * Use affinity set policy hint. - */ - pset = thread->affinity_set->aset_pset; - pset_lock(pset); + processor_t processor_hint = PROCESSOR_NULL; + pset_node_t node = SCHED(choose_node)(thread); + processor_set_t starting_pset = choose_starting_pset(node, thread, &processor_hint); - processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread); - pset = processor->processor_set; + pset_lock(starting_pset); - SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0); - } else if (thread->last_processor != PROCESSOR_NULL) { - /* - * Simple (last processor) affinity case. - */ - processor = thread->last_processor; - pset = processor->processor_set; - pset_lock(pset); - processor = SCHED(choose_processor)(pset, processor, thread); - pset = processor->processor_set; - - SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), thread->last_processor->cpu_id, processor->cpu_id, processor->state, 0); - } else { - /* - * No Affinity case: - * - * Utilitize a per task hint to spread threads - * among the available processor sets. - */ - task_t task = thread->task; - - pset = task->pset_hint; - if (pset == PROCESSOR_SET_NULL) { - pset = current_processor()->processor_set; - } - - pset = choose_next_pset(pset); - pset_lock(pset); - - processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread); - pset = processor->processor_set; - task->pset_hint = pset; + processor = SCHED(choose_processor)(starting_pset, processor_hint, thread); + pset = processor->processor_set; + task_t task = thread->task; + task->pset_hint = pset; /* NRG this is done without holding the task lock */ - SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0); - } + SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0); } else { /* * Bound case: @@ -4273,13 +4634,6 @@ thread_setrun( SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0); } -#else /* !__SMP__ */ - /* Only one processor to choose */ - assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == master_processor); - processor = master_processor; - pset = processor->processor_set; - pset_lock(pset); -#endif /* !__SMP__ */ /* * Dispatch the thread on the chosen processor. @@ -4373,7 +4727,6 @@ csw_check_locked( } } -#if __SMP__ /* * If the current thread is running on a processor that is no longer recommended, * urgently preempt it, at which point thread_select() should @@ -4382,14 +4735,12 @@ csw_check_locked( if (!processor->is_recommended) { return check_reason | AST_PREEMPT | AST_URGENT; } -#endif result = SCHED(processor_csw_check)(processor); if (result != AST_NONE) { return check_reason | result | (thread_eager_preemption(thread) ? AST_URGENT : AST_NONE); } -#if __SMP__ /* * Same for avoid-processor * @@ -4412,7 +4763,6 @@ csw_check_locked( processor->processor_primary != processor) { return check_reason | AST_PREEMPT; } -#endif if (thread->state & TH_SUSP) { return check_reason | AST_PREEMPT; @@ -4508,14 +4858,14 @@ ast_check(processor_t processor) void set_sched_pri( thread_t thread, - int new_priority, + int16_t new_priority, set_sched_pri_options_t options) { bool is_current_thread = (thread == current_thread()); bool removed_from_runq = false; bool lazy_update = ((options & SETPRI_LAZY) == SETPRI_LAZY); - int old_priority = thread->sched_pri; + int16_t old_priority = thread->sched_pri; /* If we're already at this priority, no need to mess with the runqueue */ if (new_priority == old_priority) { @@ -4636,17 +4986,25 @@ thread_run_queue_remove_for_handoff(thread_t thread) thread_lock(thread); /* - * Check that the thread is not bound - * to a different processor, and that realtime - * is not involved. + * Check that the thread is not bound to a different processor, + * NO_SMT flag is not set on the thread, cluster type of + * processor matches with thread if the thread is pinned to a + * particular cluster and that realtime is not involved. * - * Next, pull it off its run queue. If it - * doesn't come, it's not eligible. + * Next, pull it off its run queue. If it doesn't come, it's not eligible. */ - processor_t processor = current_processor(); - if (processor->current_pri < BASEPRI_RTQUEUES && thread->sched_pri < BASEPRI_RTQUEUES && - (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)) { + if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor) + && (!thread_no_smt(thread)) + && (processor->current_pri < BASEPRI_RTQUEUES) + && (thread->sched_pri < BASEPRI_RTQUEUES) +#if __AMP__ + && ((!(thread->sched_flags & TH_SFLAG_PCORE_ONLY)) || + processor->processor_set->pset_cluster_type == PSET_AMP_P) + && ((!(thread->sched_flags & TH_SFLAG_ECORE_ONLY)) || + processor->processor_set->pset_cluster_type == PSET_AMP_E) +#endif /* __AMP__ */ + ) { if (thread_run_queue_remove(thread)) { pulled_thread = thread; } @@ -4657,6 +5015,58 @@ thread_run_queue_remove_for_handoff(thread_t thread) return pulled_thread; } +/* + * thread_prepare_for_handoff + * + * Make the thread ready for handoff. + * If the thread was runnable then pull it off the runq, if the thread could + * not be pulled, return NULL. + * + * If the thread was woken up from wait for handoff, make sure it is not bound to + * different processor. + * + * Called at splsched + * + * Returns the thread that was pulled or THREAD_NULL if no thread could be pulled. + * This may be different than the thread that was passed in. + */ +thread_t +thread_prepare_for_handoff(thread_t thread, thread_handoff_option_t option) +{ + thread_t pulled_thread = THREAD_NULL; + + if (option & THREAD_HANDOFF_SETRUN_NEEDED) { + processor_t processor = current_processor(); + thread_lock(thread); + + /* + * Check that the thread is not bound to a different processor, + * NO_SMT flag is not set on the thread and cluster type of + * processor matches with thread if the thread is pinned to a + * particular cluster. Call setrun instead if above conditions + * are not satisfied. + */ + if ((thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor) + && (!thread_no_smt(thread)) +#if __AMP__ + && ((!(thread->sched_flags & TH_SFLAG_PCORE_ONLY)) || + processor->processor_set->pset_cluster_type == PSET_AMP_P) + && ((!(thread->sched_flags & TH_SFLAG_ECORE_ONLY)) || + processor->processor_set->pset_cluster_type == PSET_AMP_E) +#endif /* __AMP__ */ + ) { + pulled_thread = thread; + } else { + thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); + } + thread_unlock(thread); + } else { + pulled_thread = thread_run_queue_remove_for_handoff(thread); + } + + return pulled_thread; +} + /* * thread_run_queue_remove: * @@ -4708,7 +5118,7 @@ thread_run_queue_remove( processor_set_t pset = processor->processor_set; - rt_lock_lock(pset); + pset_lock(pset); if (thread->runq != PROCESSOR_NULL) { /* @@ -4725,7 +5135,7 @@ thread_run_queue_remove( removed = TRUE; } - rt_lock_unlock(pset); + pset_unlock(pset); return removed; } @@ -4872,12 +5282,13 @@ processor_idle( MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_START, (uintptr_t)thread_tid(thread), 0, 0, 0, 0); - SCHED_STATS_CPU_IDLE_START(processor); + SCHED_STATS_INC(idle_transitions); + assert(processor->running_timers_active == false); uint64_t ctime = mach_absolute_time(); - timer_switch(&PROCESSOR_DATA(processor, system_state), ctime, &PROCESSOR_DATA(processor, idle_state)); - PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state); + timer_switch(&processor->system_state, ctime, &processor->idle_state); + processor->current_state = &processor->idle_state; cpu_quiescent_counter_leave(ctime); @@ -4949,8 +5360,8 @@ processor_idle( ctime = mach_absolute_time(); - timer_switch(&PROCESSOR_DATA(processor, idle_state), ctime, &PROCESSOR_DATA(processor, system_state)); - PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state); + timer_switch(&processor->idle_state, ctime, &processor->system_state); + processor->current_state = &processor->system_state; cpu_quiescent_counter_join(ctime); @@ -4969,6 +5380,8 @@ processor_idle( thread_t new_thread = thread_select(current_thread, processor, &reason); thread_unlock(current_thread); + assert(processor->running_timers_active == false); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_END, (uintptr_t)thread_tid(thread), processor->state, (uintptr_t)thread_tid(new_thread), reason, 0); @@ -5071,7 +5484,7 @@ sched_startup(void) #endif /* __arm__ || __arm64__ */ result = kernel_thread_start_priority((thread_continue_t)sched_init_thread, - (void *)SCHED(maintenance_continuation), MAXPRI_KERNEL, &thread); + NULL, MAXPRI_KERNEL, &thread); if (result != KERN_SUCCESS) { panic("sched_startup"); } @@ -5148,6 +5561,7 @@ sched_timeshare_maintenance_continue(void) sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta); } + scan_context.sched_tick_last_abstime = sched_tick_last_abstime; KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_START, sched_tick_delta, late_time, 0, 0, 0); @@ -5280,7 +5694,7 @@ sched_timeshare_consider_maintenance(uint64_t ctime) #endif /* CONFIG_SCHED_TIMESHARE_CORE */ void -sched_init_thread(void (*continuation)(void)) +sched_init_thread(void) { thread_block(THREAD_CONTINUE_NULL); @@ -5290,7 +5704,7 @@ sched_init_thread(void (*continuation)(void)) sched_maintenance_thread = thread; - continuation(); + SCHED(maintenance_continuation)(); /*NOTREACHED*/ } @@ -5355,6 +5769,33 @@ thread_update_process_threads(void) thread_update_count = 0; } +static boolean_t +runq_scan_thread( + thread_t thread, + sched_update_scan_context_t scan_context) +{ + assert_thread_magic(thread); + + if (thread->sched_stamp != sched_tick && + thread->sched_mode == TH_MODE_TIMESHARE) { + if (thread_update_add_thread(thread) == FALSE) { + return TRUE; + } + } + + if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) { + if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) { + scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time; + } + } else { + if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) { + scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time; + } + } + + return FALSE; +} + /* * Scan a runq for candidate threads. * @@ -5382,23 +5823,8 @@ runq_scan( cqe_foreach_element(thread, queue, runq_links) { assert(count > 0); - assert_thread_magic(thread); - - if (thread->sched_stamp != sched_tick && - thread->sched_mode == TH_MODE_TIMESHARE) { - if (thread_update_add_thread(thread) == FALSE) { - return TRUE; - } - } - - if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) { - if (thread->last_made_runnable_time < scan_context->earliest_bg_make_runnable_time) { - scan_context->earliest_bg_make_runnable_time = thread->last_made_runnable_time; - } - } else { - if (thread->last_made_runnable_time < scan_context->earliest_normal_make_runnable_time) { - scan_context->earliest_normal_make_runnable_time = thread->last_made_runnable_time; - } + if (runq_scan_thread(thread, scan_context) == TRUE) { + return TRUE; } count--; } @@ -5407,6 +5833,33 @@ runq_scan( return FALSE; } +#if CONFIG_SCHED_CLUTCH + +boolean_t +sched_clutch_timeshare_scan( + queue_t thread_queue, + uint16_t thread_count, + sched_update_scan_context_t scan_context) +{ + if (thread_count == 0) { + return FALSE; + } + + thread_t thread; + qe_foreach_element_safe(thread, thread_queue, th_clutch_timeshare_link) { + if (runq_scan_thread(thread, scan_context) == TRUE) { + return TRUE; + } + thread_count--; + } + + assert(thread_count == 0); + return FALSE; +} + + +#endif /* CONFIG_SCHED_CLUTCH */ + #endif /* CONFIG_SCHED_TIMESHARE_CORE */ boolean_t @@ -5468,10 +5921,10 @@ thread_clear_eager_preempt(thread_t thread) void sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int otherpri) { - struct processor_sched_statistics *stats; + struct sched_statistics *stats; boolean_t to_realtime = FALSE; - stats = &processor->processor_data.sched_stats; + stats = PERCPU_GET_RELATIVE(sched_stats, processor, processor); stats->csw_count++; if (otherpri >= BASEPRI_REALTIME) { @@ -5884,8 +6337,6 @@ sched_update_recommended_cores(uint64_t recommended_cores) bit_set(recommended_cores, master_processor->cpu_id); /* add boot processor or we hang */ } - boolean_t pset_newly_recommended = false; - /* First set recommended cores */ pset_lock(pset); avail_count = 0; @@ -5894,15 +6345,11 @@ sched_update_recommended_cores(uint64_t recommended_cores) if (nset != pset) { pset_unlock(pset); pset = nset; - pset_newly_recommended = false; pset_lock(pset); } if (bit_test(recommended_cores, processor->cpu_id)) { processor->is_recommended = TRUE; - if (bit_first(pset->recommended_bitmask) == -1) { - pset_newly_recommended = true; - } bit_set(pset->recommended_bitmask, processor->cpu_id); if (processor->state == PROCESSOR_IDLE) { @@ -5912,8 +6359,6 @@ sched_update_recommended_cores(uint64_t recommended_cores) } if (processor->state != PROCESSOR_OFF_LINE) { avail_count++; - } - if (pset_newly_recommended) { SCHED(pset_made_schedulable)(processor, pset, false); } } @@ -6024,11 +6469,7 @@ int sched_allow_NO_SMT_threads = 1; bool thread_no_smt(thread_t thread) { -#if DEBUG || DEVELOPMENT return sched_allow_NO_SMT_threads && (thread->bound_processor == PROCESSOR_NULL) && ((thread->sched_flags & TH_SFLAG_NO_SMT) || (thread->task->t_flags & TF_NO_SMT)); -#else - return sched_allow_NO_SMT_threads && (thread->bound_processor == PROCESSOR_NULL) && (thread->sched_flags & TH_SFLAG_NO_SMT); -#endif } bool @@ -6058,32 +6499,186 @@ sched_perfcontrol_update_callback_deadline(uint64_t new_deadline) #endif /* __arm64__ */ +#if CONFIG_SCHED_EDGE + +#define SCHED_PSET_LOAD_EWMA_TC_NSECS 10000000u + +/* + * sched_edge_pset_running_higher_bucket() + * + * Routine to calculate cumulative running counts for each scheduling + * bucket. This effectively lets the load calculation calculate if a + * cluster is running any threads at a QoS lower than the thread being + * migrated etc. + */ + +static void +sched_edge_pset_running_higher_bucket(processor_set_t pset, uint32_t *running_higher) +{ + bitmap_t *active_map = &pset->cpu_state_map[PROCESSOR_RUNNING]; + + /* Edge Scheduler Optimization */ + for (int cpu = bitmap_first(active_map, MAX_CPUS); cpu >= 0; cpu = bitmap_next(active_map, cpu)) { + sched_bucket_t cpu_bucket = os_atomic_load(&pset->cpu_running_buckets[cpu], relaxed); + for (sched_bucket_t bucket = cpu_bucket; bucket < TH_BUCKET_SCHED_MAX; bucket++) { + running_higher[bucket]++; + } + } +} + +/* + * sched_update_pset_load_average() + * + * Updates the load average for each sched bucket for a cluster. + * This routine must be called with the pset lock held. + */ void -sched_update_pset_load_average(processor_set_t pset) +sched_update_pset_load_average(processor_set_t pset, uint64_t curtime) { -#if CONFIG_SCHED_CLUTCH - int non_rt_load = sched_clutch_root_count(&pset->pset_clutch_root); -#else /* CONFIG_SCHED_CLUTCH */ - int non_rt_load = pset->pset_runq.count; -#endif /* CONFIG_SCHED_CLUTCH */ + if (pset->online_processor_count == 0) { + /* Looks like the pset is not runnable any more; nothing to do here */ + return; + } + + /* + * Edge Scheduler Optimization + * + * See if more callers of this routine can pass in timestamps to avoid the + * mach_absolute_time() call here. + */ + + if (!curtime) { + curtime = mach_absolute_time(); + } + uint64_t last_update = os_atomic_load(&pset->pset_load_last_update, relaxed); + int64_t delta_ticks = curtime - last_update; + if (delta_ticks < 0) { + return; + } + + uint64_t delta_nsecs = 0; + absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs); + + if (__improbable(delta_nsecs > UINT32_MAX)) { + delta_nsecs = UINT32_MAX; + } + + uint32_t running_higher[TH_BUCKET_SCHED_MAX] = {0}; + sched_edge_pset_running_higher_bucket(pset, running_higher); + + for (sched_bucket_t sched_bucket = TH_BUCKET_FIXPRI; sched_bucket < TH_BUCKET_SCHED_MAX; sched_bucket++) { + uint64_t old_load_average = os_atomic_load(&pset->pset_load_average[sched_bucket], relaxed); + uint64_t old_load_average_factor = old_load_average * SCHED_PSET_LOAD_EWMA_TC_NSECS; + uint32_t current_runq_depth = (sched_edge_cluster_cumulative_count(&pset->pset_clutch_root, sched_bucket) + rt_runq_count(pset) + running_higher[sched_bucket]) / pset->online_processor_count; + + /* + * For the new load average multiply current_runq_depth by delta_nsecs (which resuts in a 32.0 value). + * Since we want to maintain the load average as a 24.8 fixed arithmetic value for precision, the + * new load averga needs to be shifted before it can be added to the old load average. + */ + uint64_t new_load_average_factor = (current_runq_depth * delta_nsecs) << SCHED_PSET_LOAD_EWMA_FRACTION_BITS; + + /* + * For extremely parallel workloads, it is important that the load average on a cluster moves zero to non-zero + * instantly to allow threads to be migrated to other (potentially idle) clusters quickly. Hence use the EWMA + * when the system is already loaded; otherwise for an idle system use the latest load average immediately. + */ + int old_load_shifted = (int)((old_load_average + SCHED_PSET_LOAD_EWMA_ROUND_BIT) >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS); + boolean_t load_uptick = (old_load_shifted == 0) && (current_runq_depth != 0); + boolean_t load_downtick = (old_load_shifted != 0) && (current_runq_depth == 0); + uint64_t load_average; + if (load_uptick || load_downtick) { + load_average = (current_runq_depth << SCHED_PSET_LOAD_EWMA_FRACTION_BITS); + } else { + /* Indicates a loaded system; use EWMA for load average calculation */ + load_average = (old_load_average_factor + new_load_average_factor) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS); + } + os_atomic_store(&pset->pset_load_average[sched_bucket], load_average, relaxed); + KDBG(MACHDBG_CODE(DBG_MACH_SCHED_CLUTCH, MACH_SCHED_EDGE_LOAD_AVG) | DBG_FUNC_NONE, pset->pset_cluster_id, (load_average >> SCHED_PSET_LOAD_EWMA_FRACTION_BITS), load_average & SCHED_PSET_LOAD_EWMA_FRACTION_MASK, sched_bucket); + } + os_atomic_store(&pset->pset_load_last_update, curtime, relaxed); +} + +void +sched_update_pset_avg_execution_time(processor_set_t pset, uint64_t execution_time, uint64_t curtime, sched_bucket_t sched_bucket) +{ + pset_execution_time_t old_execution_time_packed, new_execution_time_packed; + uint64_t avg_thread_execution_time = 0; + + os_atomic_rmw_loop(&pset->pset_execution_time[sched_bucket].pset_execution_time_packed, + old_execution_time_packed.pset_execution_time_packed, + new_execution_time_packed.pset_execution_time_packed, relaxed, { + uint64_t last_update = old_execution_time_packed.pset_execution_time_last_update; + int64_t delta_ticks = curtime - last_update; + if (delta_ticks < 0) { + /* + * Its possible that another CPU came in and updated the pset_execution_time + * before this CPU could do it. Since the average execution time is meant to + * be an approximate measure per cluster, ignore the older update. + */ + os_atomic_rmw_loop_give_up(return ); + } + uint64_t delta_nsecs = 0; + absolutetime_to_nanoseconds(delta_ticks, &delta_nsecs); + + uint64_t nanotime = 0; + absolutetime_to_nanoseconds(execution_time, &nanotime); + uint64_t execution_time_us = nanotime / NSEC_PER_USEC; + + uint64_t old_execution_time = (old_execution_time_packed.pset_avg_thread_execution_time * SCHED_PSET_LOAD_EWMA_TC_NSECS); + uint64_t new_execution_time = (execution_time_us * delta_nsecs); + + avg_thread_execution_time = (old_execution_time + new_execution_time) / (delta_nsecs + SCHED_PSET_LOAD_EWMA_TC_NSECS); + new_execution_time_packed.pset_avg_thread_execution_time = avg_thread_execution_time; + new_execution_time_packed.pset_execution_time_last_update = curtime; + }); + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_AVG_EXEC_TIME) | DBG_FUNC_NONE, pset->pset_cluster_id, avg_thread_execution_time, sched_bucket); +} + +#else /* CONFIG_SCHED_EDGE */ +void +sched_update_pset_load_average(processor_set_t pset, __unused uint64_t curtime) +{ + int non_rt_load = pset->pset_runq.count; int load = ((bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + non_rt_load + rt_runq_count(pset)) << PSET_LOAD_NUMERATOR_SHIFT); - int new_load_average = (pset->load_average + load) >> 1; + int new_load_average = ((int)pset->load_average + load) >> 1; pset->load_average = new_load_average; - #if (DEVELOPMENT || DEBUG) #if __AMP__ if (pset->pset_cluster_type == PSET_AMP_P) { - KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_LOAD_AVERAGE) | DBG_FUNC_NONE, sched_get_pset_load_average(pset), (bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + pset->pset_runq.count + rt_runq_count(pset))); + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PSET_LOAD_AVERAGE) | DBG_FUNC_NONE, sched_get_pset_load_average(pset, 0), (bit_count(pset->cpu_state_map[PROCESSOR_RUNNING]) + pset->pset_runq.count + rt_runq_count(pset))); } #endif #endif } +void +sched_update_pset_avg_execution_time(__unused processor_set_t pset, __unused uint64_t execution_time, __unused uint64_t curtime, __unused sched_bucket_t sched_bucket) +{ +} +#endif /* CONFIG_SCHED_EDGE */ + +/* pset is locked */ +static bool +processor_is_fast_track_candidate_for_realtime_thread(processor_set_t pset, processor_t processor) +{ + int cpuid = processor->cpu_id; +#if defined(__x86_64__) + if (sched_avoid_cpu0 && (cpuid == 0)) { + return false; + } +#endif + + cpumap_t fasttrack_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map; + + return bit_test(fasttrack_map, cpuid); +} + /* pset is locked */ static processor_t -choose_processor_for_realtime_thread(processor_set_t pset) +choose_processor_for_realtime_thread(processor_set_t pset, processor_t skip_processor, bool consider_secondaries) { #if defined(__x86_64__) bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0); @@ -6091,68 +6686,78 @@ choose_processor_for_realtime_thread(processor_set_t pset) const bool avoid_cpu0 = false; #endif - uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask & ~pset->pending_AST_URGENT_cpu_mask); + cpumap_t cpu_map = pset_available_cpumap(pset) & ~pset->pending_AST_URGENT_cpu_mask & ~pset->realtime_map; + if (skip_processor) { + bit_clear(cpu_map, skip_processor->cpu_id); + } + + cpumap_t primary_map = cpu_map & pset->primary_map; if (avoid_cpu0) { - cpu_map = bit_ror64(cpu_map, 1); + primary_map = bit_ror64(primary_map, 1); } - for (int rotid = lsb_first(cpu_map); rotid >= 0; rotid = lsb_next(cpu_map, rotid)) { - int cpuid = avoid_cpu0 ? ((rotid + 1) & 63) : rotid; + int rotid = lsb_first(primary_map); + if (rotid >= 0) { + int cpuid = avoid_cpu0 ? ((rotid + 1) & 63) : rotid; processor_t processor = processor_array[cpuid]; - if (processor->processor_primary != processor) { - continue; - } - - if (processor->state == PROCESSOR_IDLE) { - return processor; - } - - if ((processor->state != PROCESSOR_RUNNING) && (processor->state != PROCESSOR_DISPATCHING)) { - continue; - } - - if (processor->current_pri >= BASEPRI_RTQUEUES) { - continue; - } - return processor; } - if (!sched_allow_rt_smt) { - return PROCESSOR_NULL; + if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) { + goto out; } /* Consider secondary processors */ + cpumap_t secondary_map = cpu_map & ~pset->primary_map; if (avoid_cpu0) { /* Also avoid cpu1 */ - cpu_map = bit_ror64(cpu_map, 1); + secondary_map = bit_ror64(secondary_map, 2); } - for (int rotid = lsb_first(cpu_map); rotid >= 0; rotid = lsb_next(cpu_map, rotid)) { + rotid = lsb_first(secondary_map); + if (rotid >= 0) { int cpuid = avoid_cpu0 ? ((rotid + 2) & 63) : rotid; processor_t processor = processor_array[cpuid]; - if (processor->processor_primary == processor) { - continue; - } + return processor; + } - if (processor->state == PROCESSOR_IDLE) { - return processor; - } +out: + if (skip_processor) { + return PROCESSOR_NULL; + } - if ((processor->state != PROCESSOR_RUNNING) && (processor->state != PROCESSOR_DISPATCHING)) { - continue; - } + /* + * If we didn't find an obvious processor to choose, but there are still more CPUs + * not already running realtime threads than realtime threads in the realtime run queue, + * this thread belongs in this pset, so choose some other processor in this pset + * to ensure the thread is enqueued here. + */ + cpumap_t non_realtime_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map; + if (bit_count(non_realtime_map) > rt_runq_count(pset)) { + cpu_map = non_realtime_map; + assert(cpu_map != 0); + int cpuid = bit_first(cpu_map); + assert(cpuid >= 0); + return processor_array[cpuid]; + } - if (processor->current_pri >= BASEPRI_RTQUEUES) { - continue; - } + if (!pset->is_SMT || !sched_allow_rt_smt || !consider_secondaries) { + goto skip_secondaries; + } - return processor; + non_realtime_map = pset_available_cpumap(pset) & ~pset->realtime_map; + if (bit_count(non_realtime_map) > rt_runq_count(pset)) { + cpu_map = non_realtime_map; + assert(cpu_map != 0); + int cpuid = bit_first(cpu_map); + assert(cpuid >= 0); + return processor_array[cpuid]; } +skip_secondaries: return PROCESSOR_NULL; } @@ -6160,44 +6765,19 @@ choose_processor_for_realtime_thread(processor_set_t pset) static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset) { - return these_processors_are_running_realtime_threads(pset, pset->primary_map); + cpumap_t cpu_map = pset_available_cpumap(pset) & pset->primary_map & ~pset->realtime_map; + return rt_runq_count(pset) > bit_count(cpu_map); } +#if defined(__x86_64__) /* pset is locked */ static bool these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map) { - uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask) & these_map; - - for (int cpuid = lsb_first(cpu_map); cpuid >= 0; cpuid = lsb_next(cpu_map, cpuid)) { - processor_t processor = processor_array[cpuid]; - - if (processor->state == PROCESSOR_IDLE) { - return false; - } - - if (processor->state == PROCESSOR_DISPATCHING) { - return false; - } - - if (processor->state != PROCESSOR_RUNNING) { - /* - * All other processor states are considered unavailable to run - * realtime threads. In particular, we prefer an available secondary - * processor over the risk of leaving a realtime thread on the run queue - * while waiting for a processor in PROCESSOR_START state, - * which should anyway be a rare case. - */ - continue; - } - - if (processor->current_pri < BASEPRI_RTQUEUES) { - return false; - } - } - - return true; + cpumap_t cpu_map = pset_available_cpumap(pset) & these_map & ~pset->realtime_map; + return rt_runq_count(pset) > bit_count(cpu_map); } +#endif static bool sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor) @@ -6209,7 +6789,7 @@ sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor) } else if (sched_avoid_cpu0 && (processor->cpu_id == 1) && processor->is_SMT) { ok_to_run_realtime_thread = sched_allow_rt_smt && these_processors_are_running_realtime_threads(pset, ~0x2); } else if (processor->processor_primary != processor) { - ok_to_run_realtime_thread = sched_allow_rt_smt && all_available_primaries_are_running_realtime_threads(pset); + ok_to_run_realtime_thread = (sched_allow_rt_smt && all_available_primaries_are_running_realtime_threads(pset)); } #else (void)pset; @@ -6229,14 +6809,17 @@ sched_pset_made_schedulable(__unused processor_t processor, processor_set_t pset void thread_set_no_smt(bool set) { + if (!system_is_SMT) { + /* Not a machine that supports SMT */ + return; + } + thread_t thread = current_thread(); spl_t s = splsched(); thread_lock(thread); if (set) { thread->sched_flags |= TH_SFLAG_NO_SMT; - } else { - thread->sched_flags &= ~TH_SFLAG_NO_SMT; } thread_unlock(thread); splx(s); @@ -6248,27 +6831,48 @@ thread_get_no_smt(void) return current_thread()->sched_flags & TH_SFLAG_NO_SMT; } +extern void task_set_no_smt(task_t); +void +task_set_no_smt(task_t task) +{ + if (!system_is_SMT) { + /* Not a machine that supports SMT */ + return; + } + + if (task == TASK_NULL) { + task = current_task(); + } + + task_lock(task); + task->t_flags |= TF_NO_SMT; + task_unlock(task); +} + #if DEBUG || DEVELOPMENT extern void sysctl_task_set_no_smt(char no_smt); void sysctl_task_set_no_smt(char no_smt) { - thread_t thread = current_thread(); - task_t task = thread->task; + if (!system_is_SMT) { + /* Not a machine that supports SMT */ + return; + } + + task_t task = current_task(); + task_lock(task); if (no_smt == '1') { task->t_flags |= TF_NO_SMT; - } else { - task->t_flags &= ~TF_NO_SMT; } + task_unlock(task); } extern char sysctl_task_get_no_smt(void); char sysctl_task_get_no_smt(void) { - thread_t thread = current_thread(); - task_t task = thread->task; + task_t task = current_task(); if (task->t_flags & TF_NO_SMT) { return '1'; @@ -6279,14 +6883,15 @@ sysctl_task_get_no_smt(void) __private_extern__ void -thread_bind_cluster_type(char cluster_type) +thread_bind_cluster_type(thread_t thread, char cluster_type, bool soft_bound) { #if __AMP__ - thread_t thread = current_thread(); - spl_t s = splsched(); thread_lock(thread); - thread->sched_flags &= ~(TH_SFLAG_ECORE_ONLY | TH_SFLAG_PCORE_ONLY); + thread->sched_flags &= ~(TH_SFLAG_ECORE_ONLY | TH_SFLAG_PCORE_ONLY | TH_SFLAG_BOUND_SOFT); + if (soft_bound) { + thread->sched_flags |= TH_SFLAG_BOUND_SOFT; + } switch (cluster_type) { case 'e': case 'E': @@ -6302,8 +6907,12 @@ thread_bind_cluster_type(char cluster_type) thread_unlock(thread); splx(s); - thread_block(THREAD_CONTINUE_NULL); + if (thread == current_thread()) { + thread_block(THREAD_CONTINUE_NULL); + } #else /* __AMP__ */ + (void)thread; (void)cluster_type; + (void)soft_bound; #endif /* __AMP__ */ } diff --git a/osfmk/kern/sched_prim.h b/osfmk/kern/sched_prim.h index 9276e2563..fd768d26f 100644 --- a/osfmk/kern/sched_prim.h +++ b/osfmk/kern/sched_prim.h @@ -66,19 +66,23 @@ #ifndef _KERN_SCHED_PRIM_H_ #define _KERN_SCHED_PRIM_H_ +#include #include #include #include #include #include +#include #include -#include #include +extern int thread_get_current_cpuid(void); + #ifdef MACH_KERNEL_PRIVATE #include #include +#include /* Initialization */ extern void sched_init(void); @@ -89,13 +93,13 @@ extern void sched_timebase_init(void); extern void pset_rt_init(processor_set_t pset); -extern void sched_rtglobal_init(processor_set_t pset); +extern void sched_rtlocal_init(processor_set_t pset); -extern rt_queue_t sched_rtglobal_runq(processor_set_t pset); +extern rt_queue_t sched_rtlocal_runq(processor_set_t pset); -extern void sched_rtglobal_queue_shutdown(processor_t processor); +extern void sched_rtlocal_queue_shutdown(processor_t processor); -extern int64_t sched_rtglobal_runq_count_sum(void); +extern int64_t sched_rtlocal_runq_count_sum(void); extern void sched_check_spill(processor_set_t pset, thread_t thread); @@ -126,7 +130,13 @@ extern boolean_t thread_unblock( /* Unblock and dispatch thread */ extern kern_return_t thread_go( thread_t thread, - wait_result_t wresult); + wait_result_t wresult, + waitq_options_t option); + +/* Check if direct handoff is allowed */ +extern boolean_t +thread_allowed_for_handoff( + thread_t thread); /* Handle threads at context switch */ extern void thread_dispatch( @@ -162,7 +172,7 @@ __options_decl(set_sched_pri_options_t, uint32_t, { /* Set the current scheduled priority */ extern void set_sched_pri( thread_t thread, - int priority, + int16_t priority, set_sched_pri_options_t options); /* Set base priority of the specified thread */ @@ -198,7 +208,7 @@ extern void thread_recompute_sched_pri( set_sched_pri_options_t options); /* Periodic scheduler activity */ -extern void sched_init_thread(void (*)(void)); +extern void sched_init_thread(void); /* Perform sched_tick housekeeping activities */ extern boolean_t can_update_priority( @@ -250,6 +260,17 @@ extern processor_set_t task_choose_pset( extern processor_t thread_bind( processor_t processor); +extern bool pset_has_stealable_threads( + processor_set_t pset); + +extern processor_set_t choose_starting_pset( + pset_node_t node, + thread_t thread, + processor_t *processor_hint); + +extern pset_node_t sched_choose_node( + thread_t thread); + /* Choose the best processor to run a thread */ extern processor_t choose_processor( processor_set_t pset, @@ -286,10 +307,11 @@ struct sched_update_scan_context { uint64_t earliest_bg_make_runnable_time; uint64_t earliest_normal_make_runnable_time; uint64_t earliest_rt_make_runnable_time; + uint64_t sched_tick_last_abstime; }; typedef struct sched_update_scan_context *sched_update_scan_context_t; -extern void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context); +extern void sched_rtlocal_runq_scan(sched_update_scan_context_t scan_context); extern void sched_pset_made_schedulable( processor_t processor, @@ -344,6 +366,10 @@ extern boolean_t thread_update_add_thread(thread_t thread); extern void thread_update_process_threads(void); extern boolean_t runq_scan(run_queue_t runq, sched_update_scan_context_t scan_context); +#if CONFIG_SCHED_CLUTCH +extern boolean_t sched_clutch_timeshare_scan(queue_t thread_queue, uint16_t count, sched_update_scan_context_t scan_context); +#endif /* CONFIG_SCHED_CLUTCH */ + extern void sched_timeshare_init(void); extern void sched_timeshare_timebase_init(void); extern void sched_timeshare_maintenance_continue(void); @@ -384,6 +410,21 @@ __private_extern__ kern_return_t clear_wait_internal( thread_t thread, wait_result_t result); +struct sched_statistics { + uint32_t csw_count; + uint32_t preempt_count; + uint32_t preempted_rt_count; + uint32_t preempted_by_rt_count; + uint32_t rt_sched_count; + uint32_t interrupt_count; + uint32_t ipi_count; + uint32_t timer_pop_count; + uint32_t idle_transitions; + uint32_t quantum_timer_expirations; +}; +PERCPU_DECL(struct sched_statistics, sched_stats); +extern bool sched_stats_active; + extern void sched_stats_handle_csw( processor_t processor, int reasons, @@ -394,25 +435,30 @@ extern void sched_stats_handle_runq_change( struct runq_stats *stats, int old_count); +#define SCHED_STATS_INC(field) \ +MACRO_BEGIN \ + if (__improbable(sched_stats_active)) { \ + PERCPU_GET(sched_stats)->field++; \ + } \ +MACRO_END #if DEBUG -#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \ -do { \ - if (__builtin_expect(sched_stats_active, 0)) { \ - sched_stats_handle_csw((processor), \ - (reasons), (selfpri), (otherpri)); \ - } \ -} while (0) +#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \ +MACRO_BEGIN \ + if (__improbable(sched_stats_active)) { \ + sched_stats_handle_csw((processor), \ + (reasons), (selfpri), (otherpri)); \ + } \ +MACRO_END -#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \ -do { \ - if (__builtin_expect(sched_stats_active, 0)) { \ - sched_stats_handle_runq_change((stats), \ - (old_count)); \ - } \ -} while (0) +#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \ +MACRO_BEGIN \ + if (__improbable(sched_stats_active)) { \ + sched_stats_handle_runq_change((stats), (old_count)); \ + } \ +MACRO_END #else /* DEBUG */ @@ -422,20 +468,24 @@ do { \ #endif /* DEBUG */ extern uint32_t sched_debug_flags; -#define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001 +#define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001 #define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002 -#define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \ - if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \ - KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ - } \ - } while(0) - -#define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \ - if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \ - KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ - } \ - } while(0) +#define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) \ +MACRO_BEGIN \ + if (__improbable(sched_debug_flags & \ + SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \ + KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ + } \ +MACRO_END + +#define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) \ +MACRO_BEGIN \ + if (__improbable(sched_debug_flags & \ + SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \ + KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ + } \ +MACRO_END /* Tells if there are "active" RT threads in the system (provided by CPU PM) */ extern void active_rt_threads( @@ -454,7 +504,10 @@ __BEGIN_DECLS #ifdef XNU_KERNEL_PRIVATE -extern void thread_bind_cluster_type(char cluster_type); +extern void thread_bind_cluster_type(thread_t, char cluster_type, bool soft_bind); + +extern int sched_get_rt_n_backup_processors(void); +extern void sched_set_rt_n_backup_processors(int n); /* Toggles a global override to turn off CPU Throttling */ extern void sys_override_cpu_throttle(boolean_t enable_override); @@ -480,12 +533,20 @@ extern void thread_exception_return(void) __dead2; /* String declaring the name of the current scheduler */ extern char sched_string[SCHED_STRING_MAX_LENGTH]; +__options_decl(thread_handoff_option_t, uint32_t, { + THREAD_HANDOFF_NONE = 0, + THREAD_HANDOFF_SETRUN_NEEDED = 0x1, +}); + +/* Remove thread from its run queue */ +thread_t thread_prepare_for_handoff(thread_t thread, thread_handoff_option_t option); + /* Attempt to context switch to a specific runnable thread */ -extern wait_result_t thread_handoff_deallocate(thread_t thread); +extern wait_result_t thread_handoff_deallocate(thread_t thread, thread_handoff_option_t option); __attribute__((nonnull(1, 2))) extern void thread_handoff_parameter(thread_t thread, - thread_continue_t continuation, void *parameter) __dead2; + thread_continue_t continuation, void *parameter, thread_handoff_option_t) __dead2; extern struct waitq *assert_wait_queue(event_t event); @@ -581,13 +642,19 @@ extern boolean_t preemption_enabled(void); * a function pointer table. */ -#if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ) && !defined(CONFIG_SCHED_CLUTCH) +#if !defined(CONFIG_SCHED_TRADITIONAL) && !defined(CONFIG_SCHED_PROTO) && !defined(CONFIG_SCHED_GRRR) && !defined(CONFIG_SCHED_MULTIQ) && !defined(CONFIG_SCHED_CLUTCH) && !defined(CONFIG_SCHED_EDGE) #error Enable at least one scheduler algorithm in osfmk/conf/MASTER.XXX #endif #if __AMP__ + +#if CONFIG_SCHED_EDGE +extern const struct sched_dispatch_table sched_edge_dispatch; +#define SCHED(f) (sched_edge_dispatch.f) +#else /* CONFIG_SCHED_EDGE */ extern const struct sched_dispatch_table sched_amp_dispatch; #define SCHED(f) (sched_amp_dispatch.f) +#endif /* CONFIG_SCHED_EDGE */ #else /* __AMP__ */ @@ -634,6 +701,12 @@ struct sched_dispatch_table { */ int (*compute_timeshare_priority)(thread_t thread); + /* + * Pick the best node for a thread to run on. + */ + pset_node_t (*choose_node)( + thread_t thread); + /* * Pick the best processor for a thread (any kind of thread) to run on. */ @@ -762,6 +835,10 @@ struct sched_dispatch_table { /* Routine to inform the scheduler when a new pset becomes schedulable */ void (*pset_made_schedulable)(processor_t processor, processor_set_t pset, boolean_t drop_lock); +#if CONFIG_THREAD_GROUPS + /* Routine to inform the scheduler when CLPC changes a thread group recommendation */ + void (*thread_group_recommendation_change)(struct thread_group *tg, cluster_type_t new_recommendation); +#endif }; #if defined(CONFIG_SCHED_TRADITIONAL) @@ -789,6 +866,11 @@ extern const struct sched_dispatch_table sched_grrr_dispatch; extern const struct sched_dispatch_table sched_clutch_dispatch; #endif +#if defined(CONFIG_SCHED_EDGE) +extern const struct sched_dispatch_table sched_edge_dispatch; +#endif + + #endif /* MACH_KERNEL_PRIVATE */ __END_DECLS diff --git a/osfmk/kern/sched_proto.c b/osfmk/kern/sched_proto.c index ce5d226f1..9693756fd 100644 --- a/osfmk/kern/sched_proto.c +++ b/osfmk/kern/sched_proto.c @@ -164,6 +164,7 @@ const struct sched_dispatch_table sched_proto_dispatch = { .steal_thread_enabled = sched_steal_thread_DISABLED, .steal_thread = sched_proto_steal_thread, .compute_timeshare_priority = sched_proto_compute_priority, + .choose_node = sched_choose_node, .choose_processor = sched_proto_choose_processor, .processor_enqueue = sched_proto_processor_enqueue, .processor_queue_shutdown = sched_proto_processor_queue_shutdown, @@ -188,11 +189,11 @@ const struct sched_dispatch_table sched_proto_dispatch = { .thread_avoid_processor = NULL, .processor_balance = sched_SMT_balance, - .rt_runq = sched_rtglobal_runq, - .rt_init = sched_rtglobal_init, - .rt_queue_shutdown = sched_rtglobal_queue_shutdown, - .rt_runq_scan = sched_rtglobal_runq_scan, - .rt_runq_count_sum = sched_rtglobal_runq_count_sum, + .rt_runq = sched_rtlocal_runq, + .rt_init = sched_rtlocal_init, + .rt_queue_shutdown = sched_rtlocal_queue_shutdown, + .rt_runq_scan = sched_rtlocal_runq_scan, + .rt_runq_count_sum = sched_rtlocal_runq_count_sum, .qos_max_parallelism = sched_qos_max_parallelism, .check_spill = sched_check_spill, diff --git a/osfmk/kern/sched_traditional.c b/osfmk/kern/sched_traditional.c index 3297904d0..930f9feab 100644 --- a/osfmk/kern/sched_traditional.c +++ b/osfmk/kern/sched_traditional.c @@ -143,6 +143,7 @@ const struct sched_dispatch_table sched_traditional_dispatch = { .steal_thread_enabled = sched_traditional_steal_thread_enabled, .steal_thread = sched_traditional_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, + .choose_node = sched_choose_node, .choose_processor = choose_processor, .processor_enqueue = sched_traditional_processor_enqueue, .processor_queue_shutdown = sched_traditional_processor_queue_shutdown, @@ -167,11 +168,11 @@ const struct sched_dispatch_table sched_traditional_dispatch = { .thread_avoid_processor = NULL, .processor_balance = sched_SMT_balance, - .rt_runq = sched_rtglobal_runq, - .rt_init = sched_rtglobal_init, - .rt_queue_shutdown = sched_rtglobal_queue_shutdown, - .rt_runq_scan = sched_rtglobal_runq_scan, - .rt_runq_count_sum = sched_rtglobal_runq_count_sum, + .rt_runq = sched_rtlocal_runq, + .rt_init = sched_rtlocal_init, + .rt_queue_shutdown = sched_rtlocal_queue_shutdown, + .rt_runq_scan = sched_rtlocal_runq_scan, + .rt_runq_count_sum = sched_rtlocal_runq_count_sum, .qos_max_parallelism = sched_qos_max_parallelism, .check_spill = sched_check_spill, @@ -194,6 +195,7 @@ const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch .steal_thread_enabled = sched_steal_thread_enabled, .steal_thread = sched_traditional_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, + .choose_node = sched_choose_node, .choose_processor = choose_processor, .processor_enqueue = sched_traditional_processor_enqueue, .processor_queue_shutdown = sched_traditional_processor_queue_shutdown, @@ -218,11 +220,11 @@ const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch .thread_avoid_processor = NULL, .processor_balance = sched_SMT_balance, - .rt_runq = sched_rtglobal_runq, - .rt_init = sched_rtglobal_init, - .rt_queue_shutdown = sched_rtglobal_queue_shutdown, - .rt_runq_scan = sched_rtglobal_runq_scan, - .rt_runq_count_sum = sched_rtglobal_runq_count_sum, + .rt_runq = sched_rtlocal_runq, + .rt_init = sched_rtlocal_init, + .rt_queue_shutdown = sched_rtlocal_queue_shutdown, + .rt_runq_scan = sched_rtlocal_runq_scan, + .rt_runq_count_sum = sched_rtlocal_runq_count_sum, .qos_max_parallelism = sched_qos_max_parallelism, .check_spill = sched_check_spill, diff --git a/osfmk/kern/sfi.c b/osfmk/kern/sfi.c index d9ee0728f..99a482107 100644 --- a/osfmk/kern/sfi.c +++ b/osfmk/kern/sfi.c @@ -30,8 +30,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -131,23 +131,28 @@ typedef struct { static inline void _sfi_wait_cleanup(void); -#define SFI_CLASS_REGISTER(clsid, ledger_name) \ -static void __attribute__((noinline, noreturn)) \ +static void sfi_class_register(sfi_class_registration_t *); + +#define SFI_CLASS_REGISTER(clsid, ledger_name) \ + \ +static void __attribute__((noinline, noreturn)) \ SFI_ ## clsid ## _THREAD_IS_WAITING(void *arg __unused, wait_result_t wret __unused) \ -{ \ - _sfi_wait_cleanup(); \ - thread_exception_return(); \ -} \ - \ -_Static_assert(SFI_CLASS_ ## clsid < MAX_SFI_CLASS_ID, "Invalid ID"); \ - \ -__attribute__((section("__DATA,__sfi_class_reg"), used)) \ -static sfi_class_registration_t SFI_ ## clsid ## _registration = { \ - .class_id = SFI_CLASS_ ## clsid, \ - .class_continuation = SFI_ ## clsid ## _THREAD_IS_WAITING, \ - .class_name = "SFI_CLASS_" # clsid, \ - .class_ledger_name = "SFI_CLASS_" # ledger_name, \ -} +{ \ + _sfi_wait_cleanup(); \ + thread_exception_return(); \ +} \ + \ +static_assert(SFI_CLASS_ ## clsid < MAX_SFI_CLASS_ID, "Invalid ID"); \ + \ +static __startup_data sfi_class_registration_t \ +SFI_ ## clsid ## _registration = { \ + .class_id = SFI_CLASS_ ## clsid, \ + .class_continuation = SFI_ ## clsid ## _THREAD_IS_WAITING, \ + .class_name = "SFI_CLASS_" # clsid, \ + .class_ledger_name = "SFI_CLASS_" # ledger_name, \ +}; \ +STARTUP_ARG(TUNABLES, STARTUP_RANK_MIDDLE, \ + sfi_class_register, &SFI_ ## clsid ## _registration) /* SFI_CLASS_UNSPECIFIED not included here */ SFI_CLASS_REGISTER(MAINTENANCE, MAINTENANCE); @@ -188,7 +193,7 @@ struct sfi_class_state { /* Static configuration performed in sfi_early_init() */ struct sfi_class_state sfi_classes[MAX_SFI_CLASS_ID]; -int sfi_enabled_class_count; +int sfi_enabled_class_count; // protected by sfi_lock and used atomically static void sfi_timer_global_off( timer_call_param_t param0, @@ -198,51 +203,24 @@ static void sfi_timer_per_class_on( timer_call_param_t param0, timer_call_param_t param1); -static sfi_class_registration_t * -sfi_get_registration_data(unsigned long *count) -{ - unsigned long sectlen = 0; - void *sectdata; - - sectdata = getsectdatafromheader(&_mh_execute_header, "__DATA", "__sfi_class_reg", §len); - if (sectdata) { - if (sectlen % sizeof(sfi_class_registration_t) != 0) { - /* corrupt data? */ - panic("__sfi_class_reg section has invalid size %lu", sectlen); - __builtin_unreachable(); - } - - *count = sectlen / sizeof(sfi_class_registration_t); - return (sfi_class_registration_t *)sectdata; - } else { - panic("__sfi_class_reg section not found"); - __builtin_unreachable(); - } -} - /* Called early in boot, when kernel is single-threaded */ -void -sfi_early_init(void) +__startup_func +static void +sfi_class_register(sfi_class_registration_t *reg) { - unsigned long i, count; - sfi_class_registration_t *registrations; - - registrations = sfi_get_registration_data(&count); - for (i = 0; i < count; i++) { - sfi_class_id_t class_id = registrations[i].class_id; + sfi_class_id_t class_id = reg->class_id; - assert(class_id < MAX_SFI_CLASS_ID); /* should be caught at compile-time */ - if (class_id < MAX_SFI_CLASS_ID) { - if (sfi_classes[class_id].continuation != NULL) { - panic("Duplicate SFI registration for class 0x%x", class_id); - } - sfi_classes[class_id].class_sfi_is_enabled = FALSE; - sfi_classes[class_id].class_in_on_phase = TRUE; - sfi_classes[class_id].continuation = registrations[i].class_continuation; - sfi_classes[class_id].class_name = registrations[i].class_name; - sfi_classes[class_id].class_ledger_name = registrations[i].class_ledger_name; - } + if (class_id >= MAX_SFI_CLASS_ID) { + panic("Invalid SFI class 0x%x", class_id); + } + if (sfi_classes[class_id].continuation != NULL) { + panic("Duplicate SFI registration for class 0x%x", class_id); } + sfi_classes[class_id].class_sfi_is_enabled = FALSE; + sfi_classes[class_id].class_in_on_phase = TRUE; + sfi_classes[class_id].continuation = reg->class_continuation; + sfi_classes[class_id].class_name = reg->class_name; + sfi_classes[class_id].class_ledger_name = reg->class_ledger_name; } void @@ -254,7 +232,7 @@ sfi_init(void) simple_lock_init(&sfi_lock, 0); timer_call_setup(&sfi_timer_call_entry, sfi_timer_global_off, NULL); sfi_window_is_set = FALSE; - sfi_enabled_class_count = 0; + os_atomic_init(&sfi_enabled_class_count, 0); sfi_is_enabled = FALSE; for (i = 0; i < MAX_SFI_CLASS_ID; i++) { @@ -507,7 +485,7 @@ sfi_set_window(uint64_t window_usecs) sfi_window_interval = interval; sfi_window_is_set = TRUE; - if (sfi_enabled_class_count == 0) { + if (os_atomic_load(&sfi_enabled_class_count, relaxed) == 0) { /* Can't program timer yet */ } else if (!sfi_is_enabled) { sfi_is_enabled = TRUE; @@ -660,7 +638,7 @@ sfi_set_class_offtime(sfi_class_id_t class_id, uint64_t offtime_usecs) /* We never re-program the per-class on-timer, but rather just let it expire naturally */ if (!sfi_classes[class_id].class_sfi_is_enabled) { - sfi_enabled_class_count++; + os_atomic_inc(&sfi_enabled_class_count, relaxed); } sfi_classes[class_id].off_time_usecs = offtime_usecs; sfi_classes[class_id].off_time_interval = interval; @@ -700,13 +678,13 @@ sfi_class_offtime_cancel(sfi_class_id_t class_id) /* We never re-program the per-class on-timer, but rather just let it expire naturally */ if (sfi_classes[class_id].class_sfi_is_enabled) { - sfi_enabled_class_count--; + os_atomic_dec(&sfi_enabled_class_count, relaxed); } sfi_classes[class_id].off_time_usecs = 0; sfi_classes[class_id].off_time_interval = 0; sfi_classes[class_id].class_sfi_is_enabled = FALSE; - if (sfi_enabled_class_count == 0) { + if (os_atomic_load(&sfi_enabled_class_count, relaxed) == 0) { sfi_is_enabled = FALSE; } @@ -766,6 +744,16 @@ sfi_thread_classify(thread_t thread) sched_mode_t thmode = thread->sched_mode; boolean_t focal = FALSE; + /* kernel threads never reach the user AST boundary, and are in a separate world for SFI */ + if (is_kernel_thread) { + return SFI_CLASS_KERNEL; + } + + /* no need to re-classify threads unless there is at least one enabled SFI class */ + if (os_atomic_load(&sfi_enabled_class_count, relaxed) == 0) { + return SFI_CLASS_OPTED_OUT; + } + int task_role = proc_get_effective_task_policy(task, TASK_POLICY_ROLE); int latency_qos = proc_get_effective_task_policy(task, TASK_POLICY_LATENCY_QOS); int managed_task = proc_get_effective_task_policy(task, TASK_POLICY_SFI_MANAGED); @@ -773,11 +761,6 @@ sfi_thread_classify(thread_t thread) int thread_qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS); int thread_bg = proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG); - /* kernel threads never reach the user AST boundary, and are in a separate world for SFI */ - if (is_kernel_thread) { - return SFI_CLASS_KERNEL; - } - if (thread_qos == THREAD_QOS_MAINTENANCE) { return SFI_CLASS_MAINTENANCE; } @@ -963,14 +946,12 @@ _sfi_wait_cleanup(void) */ if (self->wait_sfi_begin_time != 0) { -#if !CONFIG_EMBEDDED uint64_t made_runnable = os_atomic_load(&self->last_made_runnable_time, relaxed); int64_t sfi_wait_time = made_runnable - self->wait_sfi_begin_time; assert(sfi_wait_time >= 0); ledger_credit(self->task->ledger, task_ledgers.sfi_wait_times[current_sfi_wait_class], sfi_wait_time); -#endif /* !CONFIG_EMBEDDED */ self->wait_sfi_begin_time = 0; } diff --git a/osfmk/kern/sfi.h b/osfmk/kern/sfi.h index effa349ef..0fbd177ee 100644 --- a/osfmk/kern/sfi.h +++ b/osfmk/kern/sfi.h @@ -38,7 +38,6 @@ #include extern void sfi_init(void); -extern void sfi_early_init(void); extern sfi_class_id_t sfi_get_ledger_alias_for_class(sfi_class_id_t class_id); extern int sfi_ledger_entry_add(ledger_template_t template, sfi_class_id_t class_id); diff --git a/osfmk/kern/simple_lock.h b/osfmk/kern/simple_lock.h index d791567b0..67eb28971 100644 --- a/osfmk/kern/simple_lock.h +++ b/osfmk/kern/simple_lock.h @@ -76,7 +76,7 @@ #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -142,8 +142,32 @@ extern void hw_lock_unlock_nopreempt( extern unsigned int hw_lock_held( hw_lock_t); -extern boolean_t hw_atomic_test_and_set32(uint32_t *target, uint32_t test_mask, uint32_t set_mask, enum memory_order ord, boolean_t wait); -#endif /* MACH_KERNEL_PRIVATE */ +extern boolean_t hw_atomic_test_and_set32( + uint32_t *target, + uint32_t test_mask, + uint32_t set_mask, + enum memory_order ord, + boolean_t wait); + +#endif /* MACH_KERNEL_PRIVATE */ +#if XNU_KERNEL_PRIVATE + +struct usimple_lock_startup_spec { + usimple_lock_t lck; + unsigned short lck_init_arg; +}; + +extern void usimple_lock_startup_init( + struct usimple_lock_startup_spec *spec); + +#define SIMPLE_LOCK_DECLARE(var, arg) \ + decl_simple_lock_data(, var); \ + static __startup_data struct usimple_lock_startup_spec \ + __startup_usimple_lock_spec_ ## var = { &var, arg }; \ + STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_FOURTH, usimple_lock_startup_init, \ + &__startup_usimple_lock_spec_ ## var) + +#endif /* XNU_KERNEL_PRIVATE */ __BEGIN_DECLS diff --git a/osfmk/kern/stack.c b/osfmk/kern/stack.c index 0481ddfdb..11830d79a 100644 --- a/osfmk/kern/stack.c +++ b/osfmk/kern/stack.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -68,10 +69,8 @@ static unsigned int stack_hiwat; unsigned int stack_total; /* current total count */ unsigned long long stack_allocs; /* total count of allocations */ -static int stack_fake_zone_index = -1; /* index in zone_info array */ - static unsigned int stack_free_target; -static int stack_free_delta; +static int stack_free_delta; static unsigned int stack_new_count; /* total new stack allocations */ @@ -82,6 +81,12 @@ vm_offset_t kernel_stack_size; vm_offset_t kernel_stack_mask; vm_offset_t kernel_stack_depth_max; +struct stack_cache { + vm_offset_t free; + unsigned int count; +}; +static struct stack_cache PERCPU_DATA(stack_cache); + /* * The next field is at the base of the stack, * so the low end is left unsullied. @@ -248,7 +253,7 @@ stack_free_stack( #endif s = splsched(); - cache = &PROCESSOR_DATA(current_processor(), stack_cache); + cache = PERCPU_GET(stack_cache); if (cache->count < STACK_CACHE_SIZE) { stack_next(stack) = cache->free; cache->free = stack; @@ -283,7 +288,7 @@ stack_alloc_try( struct stack_cache *cache; vm_offset_t stack; - cache = &PROCESSOR_DATA(current_processor(), stack_cache); + cache = PERCPU_GET(stack_cache); stack = cache->free; if (stack != 0) { cache->free = stack_next(stack); @@ -407,42 +412,6 @@ compute_stack_target( splx(s); } -void -stack_fake_zone_init(int zone_index) -{ - stack_fake_zone_index = zone_index; -} - -void -stack_fake_zone_info(int *count, - vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size, - uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct) -{ - unsigned int total, hiwat, free; - unsigned long long all; - spl_t s; - - s = splsched(); - stack_lock(); - all = stack_allocs; - total = stack_total; - hiwat = stack_hiwat; - free = stack_free_count; - stack_unlock(); - splx(s); - - *count = total - free; - *cur_size = kernel_stack_size * total; - *max_size = kernel_stack_size * hiwat; - *elem_size = kernel_stack_size; - *alloc_size = kernel_stack_size; - *sum_size = all * kernel_stack_size; - - *collectable = 1; - *exhaustable = 0; - *caller_acct = 1; -} - /* OBSOLETE */ void stack_privilege( thread_t thread); @@ -504,13 +473,13 @@ processor_set_stack_usage( lck_mtx_unlock(&tasks_threads_lock); if (size != 0) { - kfree(addr, size); + kheap_free(KHEAP_TEMP, addr, size); } assert(size_needed > 0); size = size_needed; - addr = kalloc(size); + addr = kheap_alloc(KHEAP_TEMP, size, Z_WAITOK); if (addr == 0) { return KERN_RESOURCE_SHORTAGE; } @@ -544,7 +513,7 @@ processor_set_stack_usage( } if (size != 0) { - kfree(addr, size); + kheap_free(KHEAP_TEMP, addr, size); } *totalp = total; diff --git a/osfmk/kern/startup.c b/osfmk/kern/startup.c index 36a97ec7d..d2b50a60a 100644 --- a/osfmk/kern/startup.c +++ b/osfmk/kern/startup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -101,6 +101,9 @@ #include #include #include +#if KPERF +#include +#endif /* KPERF */ #include #include #include @@ -113,6 +116,7 @@ #include #include #include +#include #include #include #include @@ -167,19 +171,11 @@ extern void vnguard_policy_init(void); #include -#if defined(__arm__) || defined(__arm64__) -#include // for arm_vm_prot_finalize -#endif - #include static void kernel_bootstrap_thread(void); static void load_context( thread_t thread); -#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 -extern void cpu_userwindow_init(int); -extern void cpu_physwindow_init(int); -#endif #if CONFIG_ECC_LOGGING #include @@ -202,55 +198,175 @@ extern void bsd_scale_setup(int); extern unsigned int semaphore_max; extern void stackshot_init(void); extern void ktrace_init(void); -extern void oslog_init(void); /* * Running in virtual memory, on the interrupt stack. */ +extern struct startup_entry startup_entries[] +__SECTION_START_SYM(STARTUP_HOOK_SEGMENT, STARTUP_HOOK_SECTION); + +extern struct startup_entry startup_entries_end[] +__SECTION_END_SYM(STARTUP_HOOK_SEGMENT, STARTUP_HOOK_SECTION); + +static struct startup_entry *__startup_data startup_entry_cur = startup_entries; + +SECURITY_READ_ONLY_LATE(startup_subsystem_id_t) startup_phase = STARTUP_SUB_NONE; + extern int serverperfmode; +#if DEBUG || DEVELOPMENT +TUNABLE(startup_debug_t, startup_debug, "startup_debug", 0); +#endif + /* size of kernel trace buffer, disabled by default */ -unsigned int new_nkdbufs = 0; -unsigned int wake_nkdbufs = 0; -unsigned int write_trace_on_panic = 0; -unsigned int trace_wrap = 0; -boolean_t trace_serial = FALSE; -boolean_t early_boot_complete = FALSE; +TUNABLE(unsigned int, new_nkdbufs, "trace", 0); +TUNABLE(unsigned int, wake_nkdbufs, "trace_wake", 0); +TUNABLE(unsigned int, write_trace_on_panic, "trace_panic", 0); +TUNABLE(unsigned int, trace_wrap, "trace_wrap", 0); /* mach leak logging */ -int log_leaks = 0; +TUNABLE(int, log_leaks, "-l", 0); static inline void kernel_bootstrap_log(const char *message) { -// kprintf("kernel_bootstrap: %s\n", message); + if ((startup_debug & STARTUP_DEBUG_VERBOSE) && + startup_phase >= STARTUP_SUB_KPRINTF) { + kprintf("kernel_bootstrap: %s\n", message); + } kernel_debug_string_early(message); } static inline void kernel_bootstrap_thread_log(const char *message) { -// kprintf("kernel_bootstrap_thread: %s\n", message); + if ((startup_debug & STARTUP_DEBUG_VERBOSE) && + startup_phase >= STARTUP_SUB_KPRINTF) { + kprintf("kernel_bootstrap_thread: %s\n", message); + } kernel_debug_string_early(message); } +extern void +qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *)); + +__startup_func +static int +startup_entry_cmp(const void *e1, const void *e2) +{ + const struct startup_entry *a = e1; + const struct startup_entry *b = e2; + if (a->subsystem == b->subsystem) { + if (a->rank == b->rank) { + return 0; + } + return a->rank > b->rank ? 1 : -1; + } + return a->subsystem > b->subsystem ? 1 : -1; +} + +__startup_func void -kernel_early_bootstrap(void) +kernel_startup_bootstrap(void) { - /* serverperfmode is needed by timer setup */ - if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof(serverperfmode))) { - serverperfmode = 1; + /* + * Sort the various STARTUP() entries by subsystem/rank. + */ + size_t n = startup_entries_end - startup_entries; + + if (n == 0) { + panic("Section %s,%s missing", + STARTUP_HOOK_SEGMENT, STARTUP_HOOK_SECTION); + } + if (((uintptr_t)startup_entries_end - (uintptr_t)startup_entries) % + sizeof(struct startup_entry)) { + panic("Section %s,%s has invalid size", + STARTUP_HOOK_SEGMENT, STARTUP_HOOK_SECTION); } -#if CONFIG_SCHED_SFI + qsort(startup_entries, n, sizeof(struct startup_entry), startup_entry_cmp); + /* - * Configure SFI classes + * Then initialize all tunables, and early locks */ - sfi_early_init(); -#endif + kernel_startup_initialize_upto(STARTUP_SUB_LOCKS_EARLY); +} + +__startup_func +extern void +kernel_startup_tunable_init(const struct startup_tunable_spec *spec) +{ + if (PE_parse_boot_argn(spec->name, spec->var_addr, spec->var_len)) { + if (spec->var_is_bool) { + /* make sure bool's are valued in {0, 1} */ + *(bool *)spec->var_addr = *(uint8_t *)spec->var_addr; + } + } +} + +static void +kernel_startup_log(startup_subsystem_id_t subsystem) +{ + static const char *names[] = { + [STARTUP_SUB_TUNABLES] = "tunables", + [STARTUP_SUB_LOCKS_EARLY] = "locks_early", + [STARTUP_SUB_KPRINTF] = "kprintf", + + [STARTUP_SUB_PMAP_STEAL] = "pmap_steal", + [STARTUP_SUB_VM_KERNEL] = "vm_kernel", + [STARTUP_SUB_KMEM] = "kmem", + [STARTUP_SUB_KMEM_ALLOC] = "kmem_alloc", + [STARTUP_SUB_ZALLOC] = "zalloc", + [STARTUP_SUB_PERCPU] = "percpu", + [STARTUP_SUB_LOCKS] = "locks", + + [STARTUP_SUB_CODESIGNING] = "codesigning", + [STARTUP_SUB_OSLOG] = "oslog", + [STARTUP_SUB_MACH_IPC] = "mach_ipc", + [STARTUP_SUB_EARLY_BOOT] = "early_boot", + + /* LOCKDOWN is special and its value won't fit here. */ + }; + static startup_subsystem_id_t logged = STARTUP_SUB_NONE; + + if (subsystem <= logged) { + return; + } + + if (subsystem < sizeof(names) / sizeof(names[0]) && names[subsystem]) { + kernel_bootstrap_log(names[subsystem]); + } + logged = subsystem; } +__startup_func +void +kernel_startup_initialize_upto(startup_subsystem_id_t upto) +{ + struct startup_entry *cur = startup_entry_cur; + + assert(startup_phase < upto); + + while (cur < startup_entries_end && cur->subsystem <= upto) { + if ((startup_debug & STARTUP_DEBUG_VERBOSE) && + startup_phase >= STARTUP_SUB_KPRINTF) { + kprintf("%s[%d, rank %d]: %p(%p)\n", __func__, + cur->subsystem, cur->rank, cur->func, cur->arg); + } + startup_phase = cur->subsystem - 1; + kernel_startup_log(cur->subsystem); + cur->func(cur->arg); + startup_entry_cur = ++cur; + } + kernel_startup_log(upto); + + if ((startup_debug & STARTUP_DEBUG_VERBOSE) && + upto >= STARTUP_SUB_KPRINTF) { + kprintf("%s: reached phase %d\n", __func__, upto); + } + startup_phase = upto; +} void kernel_bootstrap(void) @@ -261,32 +377,21 @@ kernel_bootstrap(void) printf("%s\n", version); /* log kernel version */ - if (PE_parse_boot_argn("-l", namep, sizeof(namep))) { /* leaks logging */ - log_leaks = 1; - } - - PE_parse_boot_argn("trace", &new_nkdbufs, sizeof(new_nkdbufs)); - PE_parse_boot_argn("trace_wake", &wake_nkdbufs, sizeof(wake_nkdbufs)); - PE_parse_boot_argn("trace_panic", &write_trace_on_panic, sizeof(write_trace_on_panic)); - PE_parse_boot_argn("trace_wrap", &trace_wrap, sizeof(trace_wrap)); - scale_setup(); kernel_bootstrap_log("vm_mem_bootstrap"); vm_mem_bootstrap(); - kernel_bootstrap_log("cs_init"); - cs_init(); - - kernel_bootstrap_log("vm_mem_init"); - vm_mem_init(); - machine_info.memory_size = (uint32_t)mem_size; +#if XNU_TARGET_OS_OSX + machine_info.max_mem = max_mem_actual; +#else machine_info.max_mem = max_mem; +#endif /* XNU_TARGET_OS_OSX */ machine_info.major_version = version_major; machine_info.minor_version = version_minor; - oslog_init(); + kernel_startup_initialize_upto(STARTUP_SUB_OSLOG); #if KASAN kernel_bootstrap_log("kasan_late_init"); @@ -298,11 +403,6 @@ kernel_bootstrap(void) telemetry_init(); #endif -#if CONFIG_CSR - kernel_bootstrap_log("csr_init"); - csr_init(); -#endif - if (PE_i_can_has_debugger(NULL)) { if (PE_parse_boot_argn("-show_pointers", &namep, sizeof(namep))) { doprnt_hide_pointers = FALSE; @@ -322,22 +422,15 @@ kernel_bootstrap(void) kernel_bootstrap_log("sched_init"); sched_init(); - kernel_bootstrap_log("ltable_bootstrap"); - ltable_bootstrap(); - kernel_bootstrap_log("waitq_bootstrap"); waitq_bootstrap(); - kernel_bootstrap_log("ipc_bootstrap"); - ipc_bootstrap(); - #if CONFIG_MACF kernel_bootstrap_log("mac_policy_init"); mac_policy_init(); #endif - kernel_bootstrap_log("ipc_init"); - ipc_init(); + kernel_startup_initialize_upto(STARTUP_SUB_MACH_IPC); /* * As soon as the virtual memory system is up, we record @@ -358,11 +451,13 @@ kernel_bootstrap(void) kernel_bootstrap_log("clock_init"); clock_init(); - ledger_init(); - /* * Initialize the IPC, task, and thread subsystems. */ +#if CONFIG_THREAD_GROUPS + kernel_bootstrap_log("thread_group_init"); + thread_group_init(); +#endif #if CONFIG_COALITIONS kernel_bootstrap_log("coalitions_init"); @@ -406,6 +501,7 @@ kernel_bootstrap(void) host_statistics_init(); /* initialize exceptions */ + kernel_bootstrap_log("exception_init"); exception_init(); #if CONFIG_SCHED_SFI @@ -439,13 +535,11 @@ kernel_bootstrap(void) /*NOTREACHED*/ } -int kth_started = 0; - -vm_offset_t vm_kernel_addrperm; -vm_offset_t buf_kernel_addrperm; -vm_offset_t vm_kernel_addrperm_ext; -uint64_t vm_kernel_addrhash_salt; -uint64_t vm_kernel_addrhash_salt_ext; +SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_addrperm; +SECURITY_READ_ONLY_LATE(vm_offset_t) buf_kernel_addrperm; +SECURITY_READ_ONLY_LATE(vm_offset_t) vm_kernel_addrperm_ext; +SECURITY_READ_ONLY_LATE(uint64_t) vm_kernel_addrhash_salt; +SECURITY_READ_ONLY_LATE(uint64_t) vm_kernel_addrhash_salt_ext; /* * Now running in a thread. Kick off other services, @@ -456,7 +550,6 @@ kernel_bootstrap_thread(void) { processor_t processor = current_processor(); -#define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */ kernel_bootstrap_thread_log("idle_thread_create"); /* * Create the idle processor thread. @@ -487,6 +580,13 @@ kernel_bootstrap_thread(void) kernel_bootstrap_thread_log("thread_call_initialize"); thread_call_initialize(); + /* + * Work interval subsystem initialization. + * Needs to be done once thread calls have been initialized. + */ + kernel_bootstrap_thread_log("work_interval_initialize"); + work_interval_subsystem_init(); + /* * Remain on current processor as * additional processors come online. @@ -517,16 +617,6 @@ kernel_bootstrap_thread(void) */ device_service_create(); - kth_started = 1; - -#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 - /* - * Create and initialize the physical copy window for processor 0 - * This is required before starting kicking off IOKit. - */ - cpu_physwindow_init(0); -#endif - phys_carveout_init(); #if MACH_KDP @@ -542,11 +632,8 @@ kernel_bootstrap_thread(void) kpc_init(); #endif -#if CONFIG_ECC_LOGGING - ecc_log_init(); -#endif - #if HYPERVISOR + kernel_bootstrap_thread_log("hv_support_init"); hv_support_init(); #endif @@ -565,17 +652,17 @@ kernel_bootstrap_thread(void) char trace_typefilter[256] = {}; PE_parse_boot_arg_str("trace_typefilter", trace_typefilter, sizeof(trace_typefilter)); - kdebug_init(new_nkdbufs, trace_typefilter, trace_wrap); +#if KPERF + kperf_init(); +#endif /* KPERF */ + kdebug_init(new_nkdbufs, trace_typefilter, + (trace_wrap ? KDOPT_WRAPPING : 0) | KDOPT_ATBOOT); #ifdef MACH_BSD kernel_bootstrap_log("bsd_early_init"); bsd_early_init(); #endif -#if defined(__arm64__) - ml_lockdown_init(); -#endif - #ifdef IOKIT kernel_bootstrap_log("PE_init_iokit"); PE_init_iokit(); @@ -587,7 +674,7 @@ kernel_bootstrap_thread(void) * Past this point, kernel subsystems that expect to operate with * interrupts or preemption enabled may begin enforcement. */ - early_boot_complete = TRUE; + kernel_startup_initialize_upto(STARTUP_SUB_EARLY_BOOT); #if INTERRUPT_MASKED_DEBUG // Reset interrupts masked timeout before we enable interrupts @@ -595,21 +682,14 @@ kernel_bootstrap_thread(void) #endif (void) spllo(); /* Allow interruptions */ -#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 /* - * Create and initialize the copy window for processor 0 - * This also allocates window space for all other processors. - * However, this is dependent on the number of processors - so this call - * must be after IOKit has been started because IOKit performs processor - * discovery. + * This will start displaying progress to the user, start as early as possible */ - cpu_userwindow_init(0); -#endif + initialize_screen(NULL, kPEAcquireScreen); /* * Initialize the shared region module. */ - vm_shared_region_init(); vm_commpage_init(); vm_commpage_text_init(); @@ -617,32 +697,28 @@ kernel_bootstrap_thread(void) kernel_bootstrap_log("mac_policy_initmach"); mac_policy_initmach(); #if CONFIG_VNGUARD + kernel_bootstrap_log("vnguard_policy_init"); vnguard_policy_init(); #endif #endif #if CONFIG_DTRACE + kernel_bootstrap_log("dtrace_early_init"); dtrace_early_init(); sdt_early_init(); #endif + kernel_startup_initialize_upto(STARTUP_SUB_LOCKDOWN); + /* * Get rid of segments used to bootstrap kext loading. This removes * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands. * Must be done prior to lockdown so that we can free (and possibly relocate) * the static KVA mappings used for the jettisoned bootstrap segments. */ + kernel_bootstrap_log("OSKextRemoveKextBootstrap"); OSKextRemoveKextBootstrap(); -#if defined(__arm__) || defined(__arm64__) -#if CONFIG_KERNEL_INTEGRITY - machine_lockdown_preflight(); -#endif - /* - * Finalize protections on statically mapped pages now that comm page mapping is established. - */ - arm_vm_prot_finalize(PE_state.bootArgs); -#endif /* * Initialize the globals used for permuting kernel @@ -662,8 +738,19 @@ kernel_bootstrap_thread(void) read_random(&vm_kernel_addrhash_salt, sizeof(vm_kernel_addrhash_salt)); read_random(&vm_kernel_addrhash_salt_ext, sizeof(vm_kernel_addrhash_salt_ext)); - vm_set_restrictions(); + /* No changes to kernel text and rodata beyond this point. */ + kernel_bootstrap_log("machine_lockdown"); + machine_lockdown(); +#ifdef IOKIT + kernel_bootstrap_log("PE_lockdown_iokit"); + PE_lockdown_iokit(); +#endif + /* + * max_cpus must be nailed down by the time PE_lockdown_iokit() finishes, + * at the latest + */ + vm_set_restrictions(machine_info.max_cpus); #ifdef CONFIG_XNUPOST kern_return_t result = kernel_list_tests(); @@ -675,6 +762,10 @@ kernel_bootstrap_thread(void) #endif /* CONFIG_XNUPOST */ +#if KPERF + kperf_init_early(); +#endif + /* * Start the user bootstrap. */ @@ -682,10 +773,6 @@ kernel_bootstrap_thread(void) bsd_init(); #endif -#if defined (__x86_64__) - x86_64_protect_data_const(); -#endif - /* * Get rid of pages used for early boot tracing. @@ -694,7 +781,7 @@ kernel_bootstrap_thread(void) serial_keyboard_init(); /* Start serial keyboard if wanted */ - vm_page_init_local_q(); + vm_page_init_local_q(machine_info.max_cpus); thread_bind(PROCESSOR_NULL); @@ -823,21 +910,23 @@ load_context( processor->active_thread = thread; processor_state_update_explicit(processor, thread->sched_pri, - SFI_CLASS_KERNEL, PSET_SMP, thread_get_perfcontrol_class(thread), THREAD_URGENCY_NONE); + SFI_CLASS_KERNEL, PSET_SMP, thread_get_perfcontrol_class(thread), THREAD_URGENCY_NONE, + ((thread->state & TH_IDLE) || (thread->bound_processor != PROCESSOR_NULL)) ? TH_BUCKET_SCHED_MAX : thread->th_sched_bucket); processor->current_is_bound = thread->bound_processor != PROCESSOR_NULL; processor->current_is_NO_SMT = false; +#if CONFIG_THREAD_GROUPS + processor->current_thread_group = thread_group_get(thread); +#endif processor->starting_pri = thread->sched_pri; processor->deadline = UINT64_MAX; thread->last_processor = processor; - processor_up(processor); - processor->last_dispatch = mach_absolute_time(); timer_start(&thread->system_timer, processor->last_dispatch); - PROCESSOR_DATA(processor, thread_timer) = PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer; + processor->thread_timer = processor->kernel_timer = &thread->system_timer; - timer_start(&PROCESSOR_DATA(processor, system_state), processor->last_dispatch); - PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, system_state); + timer_start(&processor->system_state, processor->last_dispatch); + processor->current_state = &processor->system_state; cpu_quiescent_counter_join(processor->last_dispatch); @@ -846,26 +935,19 @@ load_context( load_context_kprintf("machine_load_context\n"); -#if __arm__ || __arm64__ -#if __SMP__ - /* TODO: Should this be ordered? */ - thread->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU; -#endif /* __SMP__ */ -#endif /* __arm__ || __arm64__ */ - machine_load_context(thread); /*NOTREACHED*/ } void -scale_setup() +scale_setup(void) { int scale = 0; #if defined(__LP64__) typeof(task_max) task_max_base = task_max; /* Raise limits for servers with >= 16G */ - if ((serverperfmode != 0) && ((uint64_t)sane_size >= (uint64_t)(16 * 1024 * 1024 * 1024ULL))) { + if ((serverperfmode != 0) && ((uint64_t)max_mem_actual >= (uint64_t)(16 * 1024 * 1024 * 1024ULL))) { scale = (int)((uint64_t)sane_size / (uint64_t)(8 * 1024 * 1024 * 1024ULL)); /* limit to 128 G */ if (scale > 16) { @@ -873,12 +955,12 @@ scale_setup() } task_max_base = 2500; /* Raise limits for machines with >= 3GB */ - } else if ((uint64_t)sane_size >= (uint64_t)(3 * 1024 * 1024 * 1024ULL)) { - if ((uint64_t)sane_size < (uint64_t)(8 * 1024 * 1024 * 1024ULL)) { + } else if ((uint64_t)max_mem_actual >= (uint64_t)(3 * 1024 * 1024 * 1024ULL)) { + if ((uint64_t)max_mem_actual < (uint64_t)(8 * 1024 * 1024 * 1024ULL)) { scale = 2; } else { /* limit to 64GB */ - scale = MIN(16, (int)((uint64_t)sane_size / (uint64_t)(4 * 1024 * 1024 * 1024ULL))); + scale = MIN(16, (int)((uint64_t)max_mem_actual / (uint64_t)(4 * 1024 * 1024 * 1024ULL))); } } @@ -892,9 +974,4 @@ scale_setup() #endif bsd_scale_setup(scale); - - ipc_space_max = SPACE_MAX; - ipc_port_max = PORT_MAX; - ipc_pset_max = SET_MAX; - semaphore_max = SEMAPHORE_MAX; } diff --git a/osfmk/kern/startup.h b/osfmk/kern/startup.h index 946454e83..bd574100a 100644 --- a/osfmk/kern/startup.h +++ b/osfmk/kern/startup.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2008 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -34,43 +34,302 @@ #ifndef _KERN_STARTUP_H_ #define _KERN_STARTUP_H_ -#include +#include +#include + +#include + __BEGIN_DECLS +#pragma GCC visibility push(hidden) + +/*! + * @enum startup_subsystem_id_t + * + * @abstract + * Represents a stage of kernel intialization, ubnd allows for subsystems + * to register initializers for a specific stage. + * + * @discussion + * Documentation of each subsystem initialization sequence exists in + * @file doc/startup.md. + */ +__enum_decl(startup_subsystem_id_t, uint32_t, { + STARTUP_SUB_NONE = 0, /**< reserved for the startup subsystem */ + + STARTUP_SUB_TUNABLES, /**< support for the tunables subsystem */ + STARTUP_SUB_LOCKS_EARLY, /**< early locking, before zalloc */ + STARTUP_SUB_KPRINTF, /**< kprintf initialization */ + + STARTUP_SUB_PMAP_STEAL, /**< to perform various pmap carveouts */ + STARTUP_SUB_VM_KERNEL, /**< once the kernel VM is ready */ + STARTUP_SUB_KMEM, /**< once kmem is ready */ + STARTUP_SUB_KMEM_ALLOC, /**< once kmem_alloc is ready */ + STARTUP_SUB_ZALLOC, /**< initialize zalloc and kalloc */ + STARTUP_SUB_PERCPU, /**< initialize the percpu subsystem */ + STARTUP_SUB_LOCKS, /**< various subsystem locks */ + + STARTUP_SUB_CODESIGNING, /**< codesigning subsystem */ + STARTUP_SUB_OSLOG, /**< oslog and kernel loggging */ + STARTUP_SUB_MACH_IPC, /**< Mach IPC */ + STARTUP_SUB_EARLY_BOOT, /**< interrupts/premption are turned on */ + + STARTUP_SUB_LOCKDOWN = ~0u, /**< reserved for the startup subsystem */ +}); + +/*! + * Stores the last subsystem to have been fully initialized; + */ +extern startup_subsystem_id_t startup_phase; + +/*! + * @enum startup_debug_t + * + * @abstract + * Flags set in the @c startup_debug global to configure startup debugging. + */ +__options_decl(startup_debug_t, uint32_t, { + STARTUP_DEBUG_NONE = 0x00000000, + STARTUP_DEBUG_VERBOSE = 0x00000001, +}); + +#if DEBUG || DEVELOPMENT +extern startup_debug_t startup_debug; +#else +#define startup_debug STARTUP_DEBUG_NONE +#endif + +/*! + * @enum startup_rank + * + * @abstract + * Specifies in which rank a given initializer runs within a given section + * to register initializers for a specific rank within the subsystem. + * + * @description + * A startup function, declared with @c STARTUP or @c STARTUP_ARG, can specify + * an rank within the subsystem they initialize. + * + * @c STARTUP_RANK_NTH(n) will let callbacks be run at stage @c n (0-based). + * + * @c STARTUP_RANK_FIRST, @c STARTUP_RANK_SECOND, @c STARTUP_RANK_THIRD and + * @c STARTUP_RANK_FOURTH are given as conveniency names for these. + * + * @c STARTUP_RANK_MIDDLE is a reserved value that will let startup functions + * run after all the @c STARTUP_RANK_NTH(n) ones have. + * + * @c STARTUP_RANK_NTH_LATE_NTH(n) will let callbacks be run then in @c n rank + * after the @c STARTUP_RANK_MIDDLE ones (0-based). + * + * @c STARTUP_RANK_LAST callbacks will run absolutely last after everything + * else did for this subsystem. + */ +__enum_decl(startup_rank_t, uint32_t, { +#define STARTUP_RANK_NTH(n) \ + (enum startup_rank)(n) + STARTUP_RANK_FIRST = 0, + STARTUP_RANK_SECOND = 1, + STARTUP_RANK_THIRD = 2, + STARTUP_RANK_FOURTH = 3, + + STARTUP_RANK_MIDDLE = 0x7fffffff, + +#define STARTUP_RANK_LATE_NTH(n) \ + (enum startup_rank)(STARTUP_RANK_MIDDLE + 1 + (n)) + + STARTUP_RANK_LAST = 0xffffffff, +}); + +#if KASAN +/* + * The use of weird sections that get unmapped confuse the hell out of kasan, + * so for KASAN leave things in regular __TEXT/__DATA segments + */ +#define STARTUP_CODE_SEGSECT "__TEXT,__text" +#define STARTUP_DATA_SEGSECT "__DATA,__init" +#define STARTUP_HOOK_SEGMENT "__DATA" +#define STARTUP_HOOK_SECTION "__init_entry_set" +#elif defined(__x86_64__) +/* Intel doesn't have a __BOOTDATA but doesn't protect __KLD */ +#define STARTUP_CODE_SEGSECT "__TEXT,__text" +#define STARTUP_DATA_SEGSECT "__KLD,__init" +#define STARTUP_HOOK_SEGMENT "__KLD" +#define STARTUP_HOOK_SECTION "__init_entry_set" +#else +/* arm protects __KLD early, so use __BOOTDATA for data */ +#define STARTUP_CODE_SEGSECT "__TEXT,__text" +#define STARTUP_DATA_SEGSECT "__BOOTDATA,__init" +#define STARTUP_HOOK_SEGMENT "__BOOTDATA" +#define STARTUP_HOOK_SECTION "__init_entry_set" +#endif + +/*! + * @macro __startup_func + * + * @abstract + * Attribute to place on functions used only during the kernel startup phase. + * + * @description + * Code marked with this attribute will be unmapped after kernel lockdown. + */ +#define __startup_func \ + __PLACE_IN_SECTION(STARTUP_CODE_SEGSECT) \ + __attribute__((noinline, visibility("hidden"))) + +/*! + * @macro __startup_data + * + * @abstract + * Attribute to place on globals used during the kernel startup phase. + * + * @description + * Data marked with this attribute will be unmapped after kernel lockdown. + */ +#define __startup_data \ + __PLACE_IN_SECTION(STARTUP_DATA_SEGSECT) + +/*! + * @macro STARTUP + * + * @abstract + * Declares a kernel startup callback. + */ +#define STARTUP(subsystem, rank, func) \ + __STARTUP(func, __LINE__, subsystem, rank, func) + +/*! + * @macro STARTUP_ARG + * + * @abstract + * Declares a kernel startup callback that takes an argument. + */ +#define STARTUP_ARG(subsystem, rank, func, arg) \ + __STARTUP_ARG(func, __LINE__, subsystem, rank, func, arg) + +/*! + * @macro TUNABLE + * + * @abstract + * Declares a read-only kernel tunable that is read from a boot-arg with + * a default value, without further processing. + * + * @param type_t + * Should be an integer type or bool. + * + * @param var + * The name of the C variable to use for storage. + * + * @param key + * The name of the boot-arg to parse for initialization + * + * @param default_value + * The default value for the tunable if the boot-arg is absent. + */ +#define TUNABLE(type_t, var, key, default_value) \ + SECURITY_READ_ONLY_LATE(type_t) var = default_value; \ + __TUNABLE(type_t, var, key) + +/*! + * @macro TUNABLE_WRITEABLE + * + * @abstract + * Declares a writeable kernel tunable that is read from a boot-arg with + * a default value, without further processing. + * + * @param type_t + * Should be an integer type or bool. + * + * @param var + * The name of the C variable to use for storage. + * + * @param key + * The name of the boot-arg to parse for initialization + * + * @param default_value + * The default value for the tunable if the boot-arg is absent. + */ +#define TUNABLE_WRITEABLE(type_t, var, key, default_value) \ + type_t var = default_value; \ + __TUNABLE(type_t, var, key) + +#pragma mark - internals + +#define __TUNABLE(type_t, var, key) \ + static __startup_data char __startup_TUNABLES_name_ ## var[] = key; \ + static __startup_data struct startup_tunable_spec \ + __startup_TUNABLES_spec_ ## var = { \ + .name = __startup_TUNABLES_name_ ## var, \ + .var_addr = &var, \ + .var_len = sizeof(type_t), \ + .var_is_bool = __builtin_types_compatible_p(bool, type_t), \ + }; \ + __STARTUP_ARG(var, __LINE__, TUNABLES, STARTUP_RANK_FIRST, \ + kernel_startup_tunable_init, &__startup_TUNABLES_spec_ ## var) + + +#define __STARTUP1(name, line, subsystem, rank, func, a, b) \ + __PLACE_IN_SECTION(STARTUP_HOOK_SEGMENT "," STARTUP_HOOK_SECTION) \ + static const struct startup_entry \ + __startup_ ## subsystem ## _entry_ ## name ## _ ## line = { \ + STARTUP_SUB_ ## subsystem, \ + rank, (typeof(func(a))(*)(const void *))func, b, \ + } + +#define __STARTUP(name, line, subsystem, rank, func) \ + __STARTUP1(name, line, subsystem, rank, func, , NULL) + +#define __STARTUP_ARG(name, line, subsystem, rank, func, arg) \ + __STARTUP1(name, line, subsystem, rank, func, arg, arg) + +struct startup_entry { + startup_subsystem_id_t subsystem; + startup_rank_t rank; + void (*func)(const void *); + const void *arg; +}; + +struct startup_tunable_spec { + const char *name; + void *var_addr; + int var_len; + bool var_is_bool; +}; + /* * Kernel and machine startup declarations */ /* Initialize kernel */ -extern void kernel_early_bootstrap(void); +extern void kernel_startup_bootstrap(void); +extern void kernel_startup_initialize_upto(startup_subsystem_id_t upto); +extern void kernel_startup_tunable_init(const struct startup_tunable_spec *); extern void kernel_bootstrap(void); /* Initialize machine dependent stuff */ -extern void machine_init(void); +extern void machine_init(void); -extern void slave_main(void *machine_param); +extern void slave_main(void *machine_param); /* * The following must be implemented in machine dependent code. */ /* Slave cpu initialization */ -extern void slave_machine_init(void *machine_param); +extern void slave_machine_init(void *machine_param); /* Device subystem initialization */ -extern void device_service_create(void); +extern void device_service_create(void); #ifdef MACH_BSD /* BSD subsystem initialization */ -extern void bsd_init(void); +extern void bsd_init(void); extern void bsd_early_init(void); -/* codesigning subsystem initialization */ -extern void cs_init(void); - #endif /* MACH_BSD */ +#pragma GCC visibility pop + __END_DECLS #endif /* _KERN_STARTUP_H_ */ diff --git a/osfmk/kern/suid_cred.c b/osfmk/kern/suid_cred.c index b876d731c..c2a963d04 100644 --- a/osfmk/kern/suid_cred.c +++ b/osfmk/kern/suid_cred.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -61,8 +61,6 @@ struct ucred; extern int kauth_cred_issuser(struct ucred *); extern struct ucred *kauth_cred_get(void); -static struct zone *suid_cred_zone = NULL; - /* Data associated with the suid cred port. Consumed during posix_spawn(). */ struct suid_cred { ipc_port_t port; @@ -70,6 +68,9 @@ struct suid_cred { uint32_t uid; }; +static ZONE_DECLARE(suid_cred_zone, "suid_cred", + sizeof(struct suid_cred), ZC_NONE); + /* Allocs a new suid credential. The vnode reference will be owned by the newly * created suid_cred_t. */ static suid_cred_t @@ -142,7 +143,7 @@ convert_suid_cred_to_port(suid_cred_t sc) } if (!ipc_kobject_make_send_lazy_alloc_port(&sc->port, - (ipc_kobject_t) sc, IKOT_SUID_CRED)) { + (ipc_kobject_t) sc, IKOT_SUID_CRED, false, 0)) { suid_cred_free(sc); return IP_NULL; } @@ -190,13 +191,6 @@ suid_cred_verify(ipc_port_t port, struct vnode *vnode, uint32_t *uid) return ret; } -void -suid_cred_init(void) -{ - const size_t sc_size = sizeof(struct suid_cred); - suid_cred_zone = zinit(sc_size, 1024 * sc_size, 0, "suid_cred"); -} - kern_return_t task_create_suid_cred( task_t task, diff --git a/osfmk/kern/suid_cred.h b/osfmk/kern/suid_cred.h index ff057621a..06af66395 100644 --- a/osfmk/kern/suid_cred.h +++ b/osfmk/kern/suid_cred.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -38,8 +38,6 @@ struct vnode; extern ipc_port_t convert_suid_cred_to_port(suid_cred_t); -extern void suid_cred_init(void); - extern void suid_cred_notify(mach_msg_header_t *msg); extern int suid_cred_verify(ipc_port_t port, struct vnode *vnode, uint32_t *uid); diff --git a/osfmk/kern/sync_lock.c b/osfmk/kern/sync_lock.c index fe550ee59..4a0e9c05d 100644 --- a/osfmk/kern/sync_lock.c +++ b/osfmk/kern/sync_lock.c @@ -41,7 +41,6 @@ #include #include -#include #include #include #include diff --git a/osfmk/kern/sync_sema.c b/osfmk/kern/sync_sema.c index 062c9cb4b..31ea5caae 100644 --- a/osfmk/kern/sync_sema.c +++ b/osfmk/kern/sync_sema.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2009 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -63,8 +63,7 @@ static unsigned int semaphore_event; #define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event) -zone_t semaphore_zone; -unsigned int semaphore_max; +ZONE_DECLARE(semaphore_zone, "semaphores", sizeof(struct semaphore), ZC_NONE); os_refgrp_decl(static, sema_refgrp, "semaphore", NULL); @@ -111,7 +110,7 @@ semaphore_convert_wait_result( int wait_result); void -semaphore_wait_continue(void); +semaphore_wait_continue(void *arg __unused, wait_result_t wr); static kern_return_t semaphore_wait_internal( @@ -134,22 +133,6 @@ semaphore_deadline( return abstime; } -/* - * ROUTINE: semaphore_init [private] - * - * Initialize the semaphore mechanisms. - * Right now, we only need to initialize the semaphore zone. - */ -void -semaphore_init(void) -{ - semaphore_zone = zinit(sizeof(struct semaphore), - semaphore_max * sizeof(struct semaphore), - sizeof(struct semaphore), - "semaphores"); - zone_change(semaphore_zone, Z_NOENCRYPT, TRUE); -} - /* * Routine: semaphore_create * @@ -160,15 +143,14 @@ kern_return_t semaphore_create( task_t task, semaphore_t *new_semaphore, - int policy, - int value) + int policy, + int value) { - semaphore_t s = SEMAPHORE_NULL; + semaphore_t s = SEMAPHORE_NULL; kern_return_t kret; - *new_semaphore = SEMAPHORE_NULL; - if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX) { + if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX || policy < 0) { return KERN_INVALID_ARGUMENT; } @@ -410,12 +392,15 @@ semaphore_signal_internal( } if (semaphore->count < 0) { + waitq_options_t wq_option = (options & SEMAPHORE_THREAD_HANDOFF) ? + WQ_OPTION_HANDOFF : WQ_OPTION_NONE; kr = waitq_wakeup64_one_locked( &semaphore->waitq, SEMAPHORE_EVENT, THREAD_AWAKENED, NULL, WAITQ_ALL_PRIORITIES, - WAITQ_KEEP_LOCKED); + WAITQ_KEEP_LOCKED, + wq_option); if (kr == KERN_SUCCESS) { semaphore_unlock(semaphore); splx(spl_level); @@ -653,10 +638,9 @@ semaphore_convert_wait_result(int wait_result) * It returns directly to user space. */ void -semaphore_wait_continue(void) +semaphore_wait_continue(void *arg __unused, wait_result_t wr) { thread_t self = current_thread(); - int wait_result = self->wait_result; void (*caller_cont)(kern_return_t) = self->sth_continuation; assert(self->sth_waitsemaphore != SEMAPHORE_NULL); @@ -665,8 +649,9 @@ semaphore_wait_continue(void) semaphore_dereference(self->sth_signalsemaphore); } + assert(self->handoff_thread == THREAD_NULL); assert(caller_cont != (void (*)(kern_return_t))0); - (*caller_cont)(semaphore_convert_wait_result(wait_result)); + (*caller_cont)(semaphore_convert_wait_result(wr)); } /* @@ -694,6 +679,10 @@ semaphore_wait_internal( spl_level = splsched(); semaphore_lock(wait_semaphore); + thread_t self = current_thread(); + thread_t handoff_thread = THREAD_NULL; + thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE; + int semaphore_signal_options = SEMAPHORE_SIGNAL_PREPOST; if (!wait_semaphore->active) { kr = KERN_TERMINATED; @@ -703,8 +692,6 @@ semaphore_wait_internal( } else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) { kr = KERN_OPERATION_TIMED_OUT; } else { - thread_t self = current_thread(); - wait_semaphore->count = -1; /* we don't keep an actual count */ thread_set_pending_block_hint(self, kThreadWaitSemaphore); @@ -715,6 +702,8 @@ semaphore_wait_internal( TIMEOUT_URGENCY_USER_NORMAL, deadline, TIMEOUT_NO_LEEWAY, self); + + semaphore_signal_options |= SEMAPHORE_THREAD_HANDOFF; } semaphore_unlock(wait_semaphore); splx(spl_level); @@ -732,10 +721,10 @@ semaphore_wait_internal( * our intention to wait above). */ signal_kr = semaphore_signal_internal(signal_semaphore, - THREAD_NULL, - SEMAPHORE_SIGNAL_PREPOST); + THREAD_NULL, semaphore_signal_options); if (signal_kr == KERN_NOT_WAITING) { + assert(self->handoff_thread == THREAD_NULL); signal_kr = KERN_SUCCESS; } else if (signal_kr == KERN_TERMINATED) { /* @@ -751,8 +740,7 @@ semaphore_wait_internal( * (most important) result. Otherwise, * return the KERN_TERMINATED status. */ - thread_t self = current_thread(); - + assert(self->handoff_thread == THREAD_NULL); clear_wait(self, THREAD_INTERRUPTED); kr = semaphore_convert_wait_result(self->wait_result); if (kr == KERN_ABORTED) { @@ -766,27 +754,34 @@ semaphore_wait_internal( * return now that we have signalled the signal semaphore. */ if (kr != KERN_ALREADY_WAITING) { + assert(self->handoff_thread == THREAD_NULL); return kr; } + if (self->handoff_thread) { + handoff_thread = self->handoff_thread; + self->handoff_thread = THREAD_NULL; + handoff_option = THREAD_HANDOFF_SETRUN_NEEDED; + } /* * Now, we can block. If the caller supplied a continuation * pointer of his own for after the block, block with the - * appropriate semaphore continuation. Thiswill gather the + * appropriate semaphore continuation. This will gather the * semaphore results, release references on the semaphore(s), * and then call the caller's continuation. */ if (caller_cont) { - thread_t self = current_thread(); - self->sth_continuation = caller_cont; self->sth_waitsemaphore = wait_semaphore; self->sth_signalsemaphore = signal_semaphore; - wait_result = thread_block((thread_continue_t)semaphore_wait_continue); + + thread_handoff_parameter(handoff_thread, semaphore_wait_continue, + NULL, handoff_option); } else { - wait_result = thread_block(THREAD_CONTINUE_NULL); + wait_result = thread_handoff_deallocate(handoff_thread, handoff_option); } + assert(self->handoff_thread == THREAD_NULL); return semaphore_convert_wait_result(wait_result); } @@ -1210,7 +1205,8 @@ kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_ { semaphore_t sem = WAITQ_TO_SEMA(waitq); assert(event == SEMAPHORE_EVENT); - assert(kdp_is_in_zone(sem, "semaphores")); + + zone_require(semaphore_zone, sem); waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port); if (sem->owner) { diff --git a/osfmk/kern/sync_sema.h b/osfmk/kern/sync_sema.h index bb2f0a174..03b6d6d08 100644 --- a/osfmk/kern/sync_sema.h +++ b/osfmk/kern/sync_sema.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -61,8 +61,6 @@ typedef struct semaphore { #define semaphore_lock(semaphore) waitq_lock(&(semaphore)->waitq) #define semaphore_unlock(semaphore) waitq_unlock(&(semaphore)->waitq) -extern void semaphore_init(void); - extern void semaphore_reference(semaphore_t semaphore); extern void semaphore_dereference(semaphore_t semaphore); extern void semaphore_destroy_all(task_t task); diff --git a/osfmk/kern/syscall_emulation.c b/osfmk/kern/syscall_emulation.c index b2de9dfb8..a7ce9c946 100644 --- a/osfmk/kern/syscall_emulation.c +++ b/osfmk/kern/syscall_emulation.c @@ -64,7 +64,6 @@ #include #include #include -#include #include #include diff --git a/osfmk/kern/syscall_subr.c b/osfmk/kern/syscall_subr.c index ef2316883..4d65fe2ae 100644 --- a/osfmk/kern/syscall_subr.c +++ b/osfmk/kern/syscall_subr.c @@ -334,21 +334,25 @@ thread_yield_with_continuation( __builtin_unreachable(); } - /* This function is called after an assert_wait(), therefore it must not * cause another wait until after the thread_run() or thread_block() * + * Following are the calling convention for thread ref deallocation. + * + * 1) If no continuation is provided, then thread ref is consumed. + * (thread_handoff_deallocate convention). + * + * 2) If continuation is provided with option THREAD_HANDOFF_SETRUN_NEEDED + * then thread ref is always consumed. * - * When called with a NULL continuation, the thread ref is consumed - * (thread_handoff_deallocate calling convention) else it is up to the - * continuation to do the cleanup (thread_handoff_parameter calling convention) - * and it instead doesn't return. + * 3) If continuation is provided with option THREAD_HANDOFF_NONE then thread + * ref is not consumed and it is upto the continuation to deallocate + * the thread reference. */ static wait_result_t thread_handoff_internal(thread_t thread, thread_continue_t continuation, - void *parameter) + void *parameter, thread_handoff_option_t option) { - thread_t deallocate_thread = THREAD_NULL; thread_t self = current_thread(); /* @@ -357,18 +361,19 @@ thread_handoff_internal(thread_t thread, thread_continue_t continuation, if (thread != THREAD_NULL) { spl_t s = splsched(); - thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread); + thread_t pulled_thread = thread_prepare_for_handoff(thread, option); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SWITCH) | DBG_FUNC_NONE, thread_tid(thread), thread->state, pulled_thread ? TRUE : FALSE, 0, 0); - if (pulled_thread != THREAD_NULL) { - if (continuation == NULL) { - /* We can't be dropping the last ref here */ - thread_deallocate_safe(thread); - } + /* Deallocate thread ref if needed */ + if (continuation == NULL || (option & THREAD_HANDOFF_SETRUN_NEEDED)) { + /* Use the safe version of thread deallocate */ + thread_deallocate_safe(thread); + } + if (pulled_thread != THREAD_NULL) { int result = thread_run(self, continuation, parameter, pulled_thread); splx(s); @@ -376,32 +381,25 @@ thread_handoff_internal(thread_t thread, thread_continue_t continuation, } splx(s); - - deallocate_thread = thread; - thread = THREAD_NULL; } int result = thread_block_parameter(continuation, parameter); - if (deallocate_thread != THREAD_NULL) { - thread_deallocate(deallocate_thread); - } - return result; } void thread_handoff_parameter(thread_t thread, thread_continue_t continuation, - void *parameter) + void *parameter, thread_handoff_option_t option) { - thread_handoff_internal(thread, continuation, parameter); + thread_handoff_internal(thread, continuation, parameter, option); panic("NULL continuation passed to %s", __func__); __builtin_unreachable(); } wait_result_t -thread_handoff_deallocate(thread_t thread) +thread_handoff_deallocate(thread_t thread, thread_handoff_option_t option) { - return thread_handoff_internal(thread, NULL, NULL); + return thread_handoff_internal(thread, NULL, NULL, option); } /* diff --git a/osfmk/kern/syscall_sw.c b/osfmk/kern/syscall_sw.c index 571530ed4..f6a6c7ad7 100644 --- a/osfmk/kern/syscall_sw.c +++ b/osfmk/kern/syscall_sw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2005, 2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -119,7 +119,7 @@ const mach_trap_t mach_trap_table[MACH_TRAP_TABLE_COUNT] = { /* 14 */ MACH_TRAP(_kernelrpc_mach_vm_protect_trap, 5, 7, munge_wllww), /* 15 */ MACH_TRAP(_kernelrpc_mach_vm_map_trap, 6, 8, munge_wwllww), /* 16 */ MACH_TRAP(_kernelrpc_mach_port_allocate_trap, 3, 3, munge_www), -/* 17 */ MACH_TRAP(_kernelrpc_mach_port_destroy_trap, 2, 2, munge_ww), +/* 17 */ MACH_TRAP(kern_invalid, 0, 0, NULL), /* 18 */ MACH_TRAP(_kernelrpc_mach_port_deallocate_trap, 2, 2, munge_ww), /* 19 */ MACH_TRAP(_kernelrpc_mach_port_mod_refs_trap, 4, 4, munge_wwww), /* 20 */ MACH_TRAP(_kernelrpc_mach_port_move_member_trap, 3, 3, munge_www), @@ -166,7 +166,6 @@ const mach_trap_t mach_trap_table[MACH_TRAP_TABLE_COUNT] = { /* 61 */ MACH_TRAP(thread_switch, 3, 3, munge_www), /* 62 */ MACH_TRAP(clock_sleep_trap, 5, 5, munge_wwwww), /* 63 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* traps 64 - 95 reserved (debo) */ /* 64 */ MACH_TRAP(kern_invalid, 0, 0, NULL), /* 65 */ MACH_TRAP(kern_invalid, 0, 0, NULL), /* 66 */ MACH_TRAP(kern_invalid, 0, 0, NULL), @@ -199,12 +198,11 @@ const mach_trap_t mach_trap_table[MACH_TRAP_TABLE_COUNT] = { /* 93 */ MACH_TRAP(mk_timer_arm_trap, 2, 3, munge_wl), /* 94 */ MACH_TRAP(mk_timer_cancel_trap, 2, 2, munge_ww), /* 95 */ MACH_TRAP(mk_timer_arm_leeway_trap, 4, 6, munge_wlll), -/* traps 64 - 95 reserved (debo) */ /* 96 */ MACH_TRAP(debug_control_port_for_pid, 3, 3, munge_www), /* 97 */ MACH_TRAP(kern_invalid, 0, 0, NULL), /* 98 */ MACH_TRAP(kern_invalid, 0, 0, NULL), /* 99 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* traps 100-107 reserved for iokit (esb) */ +/* traps 100-107 reserved for IOKit */ /* 100 */ MACH_TRAP(iokit_user_client_trap, 8, 8, munge_wwwwwwww), /* 101 */ MACH_TRAP(kern_invalid, 0, 0, NULL), /* 102 */ MACH_TRAP(kern_invalid, 0, 0, NULL), @@ -213,7 +211,6 @@ const mach_trap_t mach_trap_table[MACH_TRAP_TABLE_COUNT] = { /* 105 */ MACH_TRAP(kern_invalid, 0, 0, NULL), /* 106 */ MACH_TRAP(kern_invalid, 0, 0, NULL), /* 107 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* traps 108-127 unused */ /* 108 */ MACH_TRAP(kern_invalid, 0, 0, NULL), /* 109 */ MACH_TRAP(kern_invalid, 0, 0, NULL), /* 110 */ MACH_TRAP(kern_invalid, 0, 0, NULL), @@ -254,7 +251,7 @@ const char * mach_syscall_name_table[MACH_TRAP_TABLE_COUNT] = { /* 14 */ "_kernelrpc_mach_vm_protect_trap", /* 15 */ "_kernelrpc_mach_vm_map_trap", /* 16 */ "_kernelrpc_mach_port_allocate_trap", -/* 17 */ "_kernelrpc_mach_port_destroy_trap", +/* 17 */ "kern_invalid", /* 18 */ "_kernelrpc_mach_port_deallocate_trap", /* 19 */ "_kernelrpc_mach_port_mod_refs_trap", /* 20 */ "_kernelrpc_mach_port_move_member_trap", @@ -314,9 +311,9 @@ const char * mach_syscall_name_table[MACH_TRAP_TABLE_COUNT] = { /* 73 */ "kern_invalid", /* 74 */ "kern_invalid", /* 75 */ "kern_invalid", -/* 76 */ "kern_invalid", -/* 77 */ "_kernelrpc_mach_port_type_trap", -/* 78 */ "_kernelrpc_mach_port_request_notification_trap", +/* 76 */ "_kernelrpc_mach_port_type_trap", +/* 77 */ "_kernelrpc_mach_port_request_notification_trap", +/* 78 */ "kern_invalid", /* 79 */ "kern_invalid", /* 80 */ "kern_invalid", /* 81 */ "kern_invalid", diff --git a/osfmk/kern/task.c b/osfmk/kern/task.c index 6c1d1aa80..0a336a8f0 100644 --- a/osfmk/kern/task.c +++ b/osfmk/kern/task.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -167,10 +167,6 @@ #include -#if CONFIG_ATM -#include -#endif - #include /* picks up ledger.h */ #if CONFIG_MACF @@ -184,24 +180,36 @@ extern int kpc_force_all_ctrs(task_t, int); #endif SECURITY_READ_ONLY_LATE(task_t) kernel_task; -SECURITY_READ_ONLY_LATE(zone_t) task_zone; -lck_attr_t task_lck_attr; -lck_grp_t task_lck_grp; -lck_grp_attr_t task_lck_grp_attr; + +static SECURITY_READ_ONLY_LATE(zone_t) task_zone; +ZONE_INIT(&task_zone, "tasks", sizeof(struct task), + ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM, + ZONE_ID_TASK, NULL); extern int exc_via_corpse_forking; extern int corpse_for_fatal_memkill; extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p); +extern void task_disown_frozen_csegs(task_t owner_task); /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */ int audio_active = 0; +/* + * structure for tracking zone usage + * Used either one per task/thread for all zones or . + */ +typedef struct zinfo_usage_store_t { + /* These fields may be updated atomically, and so must be 8 byte aligned */ + uint64_t alloc __attribute__((aligned(8))); /* allocation counter */ + uint64_t free __attribute__((aligned(8))); /* free counter */ +} zinfo_usage_store_t; + zinfo_usage_store_t tasks_tkm_private; zinfo_usage_store_t tasks_tkm_shared; /* A container to accumulate statistics for expired tasks */ expired_task_statistics_t dead_task_statistics; -lck_spin_t dead_task_statistics_lock; +LCK_SPIN_DECLARE_ATTR(dead_task_statistics_lock, &task_lck_grp, &task_lck_attr); ledger_template_t task_ledger_template = NULL; @@ -244,9 +252,9 @@ SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__( .neural_footprint_compressed = -1, .platform_idle_wakeups = -1, .interrupt_wakeups = -1, -#if !CONFIG_EMBEDDED +#if CONFIG_SCHED_SFI .sfi_wait_times = { 0 /* initialized at runtime */}, -#endif /* !CONFIG_EMBEDDED */ +#endif /* CONFIG_SCHED_SFI */ .cpu_time_billed_to_me = -1, .cpu_time_billed_to_others = -1, .physical_writes = -1, @@ -258,8 +266,15 @@ SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__( .pages_grabbed_iopl = -1, .pages_grabbed_upl = -1, #endif +#if CONFIG_FREEZE + .frozen_to_swap = -1, +#endif /* CONFIG_FREEZE */ .energy_billed_to_me = -1, - .energy_billed_to_others = -1}; + .energy_billed_to_others = -1, +#if CONFIG_PHYS_WRITE_ACCT + .fs_metadata_writes = -1, +#endif /* CONFIG_PHYS_WRITE_ACCT */ +}; /* System sleep state */ boolean_t tasks_suspend_state; @@ -301,12 +316,12 @@ extern kern_return_t thread_resume(thread_t thread); int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */ int task_wakeups_monitor_rate; /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */ -int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */ +unsigned int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */ int disable_exc_resource; /* Global override to supress EXC_RESOURCE for resource monitor violations. */ ledger_amount_t max_task_footprint = 0; /* Per-task limit on physical memory consumption in bytes */ -int max_task_footprint_warning_level = 0; /* Per-task limit warning percentage */ +unsigned int max_task_footprint_warning_level = 0; /* Per-task limit warning percentage */ int max_task_footprint_mb = 0; /* Per-task limit on physical memory consumption in megabytes */ /* I/O Monitor Limits */ @@ -336,15 +351,15 @@ int hwm_user_cores = 0; /* high watermark violations generate user core files */ #endif #ifdef MACH_BSD -extern uint32_t proc_platform(struct proc *); -extern uint32_t proc_sdk(struct proc *); +extern uint32_t proc_platform(const struct proc *); +extern uint32_t proc_min_sdk(struct proc *); extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long); extern int proc_pid(struct proc *p); extern int proc_selfpid(void); extern struct proc *current_proc(void); extern char *proc_name_address(struct proc *p); extern uint64_t get_dispatchqueue_offset_from_proc(void *); -extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize); +extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, uint32_t bufsize); extern void workq_proc_suspended(struct proc *p); extern void workq_proc_resumed(struct proc *p); @@ -353,7 +368,7 @@ extern void proc_memstat_terminated(struct proc* p, boolean_t set); extern void memorystatus_on_ledger_footprint_exceeded(int warning, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal); extern void memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal); extern boolean_t memorystatus_allowed_vm_map_fork(task_t task); -extern uint64_t memorystatus_available_memory_internal(proc_t p); +extern uint64_t memorystatus_available_memory_internal(struct proc *p); #if DEVELOPMENT || DEBUG extern void memorystatus_abort_vm_map_fork(task_t); @@ -459,31 +474,6 @@ task_set_64bit( thread_mtx_lock(thread); machine_thread_switch_addrmode(thread); thread_mtx_unlock(thread); - -#if defined(__arm64__) - /* specifically, if running on H9 */ - if (thread == current_thread()) { - uint64_t arg1, arg2; - int urgency; - spl_t spl = splsched(); - /* - * This call tell that the current thread changed it's 32bitness. - * Other thread were no more on core when 32bitness was changed, - * but current_thread() is on core and the previous call to - * machine_thread_going_on_core() gave 32bitness which is now wrong. - * - * This is needed for bring-up, a different callback should be used - * in the future. - * - * TODO: Remove this callout when we no longer support 32-bit code on H9 - */ - thread_lock(thread); - urgency = thread_get_urgency(thread, &arg1, &arg2); - machine_thread_going_on_core(thread, urgency, 0, 0, mach_approximate_time()); - thread_unlock(thread); - splx(spl); - } -#endif /* defined(__arm64__) */ } #endif /* defined(__x86_64__) || defined(__arm64__) */ @@ -562,17 +552,6 @@ task_set_mach_header_address( task_unlock(task); } -void -task_atm_reset(__unused task_t task) -{ -#if CONFIG_ATM - if (task->atm_context != NULL) { - atm_task_descriptor_destroy(task->atm_context); - task->atm_context = NULL; - } -#endif -} - void task_bank_reset(__unused task_t task) { @@ -752,7 +731,7 @@ task_reference_internal(task_t task) void * bt[TASK_REF_BTDEPTH]; int numsaved = 0; - zone_require(task, task_zone); + task_require(task); os_ref_retain(&task->ref_count); numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH); @@ -778,24 +757,6 @@ task_deallocate_internal(task_t task) void task_init(void) { - lck_grp_attr_setdefault(&task_lck_grp_attr); - lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr); - lck_attr_setdefault(&task_lck_attr); - lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr); - lck_mtx_init(&tasks_corpse_lock, &task_lck_grp, &task_lck_attr); - - task_zone = zinit( - sizeof(struct task), - task_max * sizeof(struct task), - TASK_CHUNK * sizeof(struct task), - "tasks"); - - zone_change(task_zone, Z_NOENCRYPT, TRUE); - -#if CONFIG_EMBEDDED - task_watch_init(); -#endif /* CONFIG_EMBEDDED */ - /* * Configure per-task memory limit. * The boot-arg is interpreted as Megabytes, @@ -944,6 +905,7 @@ task_init(void) #if defined(HAS_APPLE_PAC) kernel_task->rop_pid = KERNEL_ROP_ID; + kernel_task->jop_pid = ml_default_jop_pid(); // kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on // disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon) ml_task_set_disable_user_jop(kernel_task, FALSE); @@ -951,7 +913,6 @@ task_init(void) vm_map_deallocate(kernel_task->map); kernel_task->map = kernel_map; - lck_spin_init(&dead_task_statistics_lock, &task_lck_grp, &task_lck_attr); } /* @@ -1123,6 +1084,9 @@ init_task_ledgers(void) task_ledgers.neural_nofootprint_compressed = ledger_entry_add(t, "neural_nofootprint_compressed", "physmem", "bytes"); task_ledgers.neural_footprint_compressed = ledger_entry_add(t, "neural_footprint_compressed", "physmem", "bytes"); +#if CONFIG_FREEZE + task_ledgers.frozen_to_swap = ledger_entry_add(t, "frozen_to_swap", "physmem", "bytes"); +#endif /* CONFIG_FREEZE */ task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power", "count"); @@ -1163,6 +1127,9 @@ init_task_ledgers(void) task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes"); task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes"); task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes"); +#if CONFIG_PHYS_WRITE_ACCT + task_ledgers.fs_metadata_writes = ledger_entry_add(t, "fs_metadata_writes", "res", "bytes"); +#endif /* CONFIG_PHYS_WRITE_ACCT */ task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj"); task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj"); @@ -1186,6 +1153,9 @@ init_task_ledgers(void) (task_ledgers.tagged_footprint < 0) || (task_ledgers.tagged_nofootprint_compressed < 0) || (task_ledgers.tagged_footprint_compressed < 0) || +#if CONFIG_FREEZE + (task_ledgers.frozen_to_swap < 0) || +#endif /* CONFIG_FREEZE */ (task_ledgers.network_volatile < 0) || (task_ledgers.network_nonvolatile < 0) || (task_ledgers.network_volatile_compressed < 0) || @@ -1208,6 +1178,9 @@ init_task_ledgers(void) (task_ledgers.physical_writes < 0) || (task_ledgers.logical_writes < 0) || (task_ledgers.logical_writes_to_external < 0) || +#if CONFIG_PHYS_WRITE_ACCT + (task_ledgers.fs_metadata_writes < 0) || +#endif /* CONFIG_PHYS_WRITE_ACCT */ (task_ledgers.energy_billed_to_me < 0) || (task_ledgers.energy_billed_to_others < 0) ) { @@ -1231,6 +1204,7 @@ init_task_ledgers(void) ledger_track_credit_only(t, task_ledgers.pages_grabbed_iopl); ledger_track_credit_only(t, task_ledgers.pages_grabbed_upl); #endif + ledger_track_credit_only(t, task_ledgers.tagged_nofootprint); ledger_track_credit_only(t, task_ledgers.tagged_footprint); ledger_track_credit_only(t, task_ledgers.tagged_nofootprint_compressed); @@ -1266,6 +1240,9 @@ init_task_ledgers(void) ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile); ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed); ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed); +#if CONFIG_PHYS_WRITE_ACCT + ledger_panic_on_negative(t, task_ledgers.fs_metadata_writes); +#endif /* CONFIG_PHYS_WRITE_ACCT */ ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint); ledger_panic_on_negative(t, task_ledgers.tagged_footprint); @@ -1343,9 +1320,11 @@ task_create_internal( #if defined(HAS_APPLE_PAC) ml_task_set_rop_pid(new_task, parent_task, inherit_memory); + ml_task_set_jop_pid(new_task, parent_task, inherit_memory); ml_task_set_disable_user_jop(new_task, inherit_memory ? parent_task->disable_user_jop : FALSE); #endif + new_task->ledger = ledger; #if defined(CONFIG_SCHED_MULTIQ) @@ -1388,9 +1367,6 @@ task_create_internal( new_task->restartable_ranges = NULL; new_task->task_exc_guard = 0; -#if CONFIG_ATM - new_task->atm_context = NULL; -#endif new_task->bank_context = NULL; #ifdef MACH_BSD @@ -1400,6 +1376,9 @@ task_create_internal( #if CONFIG_MACF new_task->crash_label = NULL; + + new_task->mach_trap_filter_mask = NULL; + new_task->mach_kobj_filter_mask = NULL; #endif #if CONFIG_MEMORYSTATUS @@ -1456,11 +1435,11 @@ task_create_internal( new_task->hv_task_target = NULL; #endif /* HYPERVISOR */ -#if CONFIG_EMBEDDED +#if CONFIG_TASKWATCH queue_init(&new_task->task_watchers); new_task->num_taskwatchers = 0; new_task->watchapplying = 0; -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_TASKWATCH */ new_task->mem_notify_reserved = 0; new_task->memlimit_attrs_reserved = 0; @@ -1478,6 +1457,15 @@ task_create_internal( shared_region = vm_shared_region_get(parent_task); vm_shared_region_set(new_task, shared_region); +#if __has_feature(ptrauth_calls) + /* use parent's shared_region_id */ + char *shared_region_id = task_get_vm_shared_region_id_and_jop_pid(parent_task, NULL); + if (shared_region_id != NULL) { + shared_region_key_alloc(shared_region_id, FALSE, 0); /* get a reference */ + } + task_set_shared_region_id(new_task, shared_region_id); +#endif /* __has_feature(ptrauth_calls) */ + if (task_has_64Bit_addr(parent_task)) { task_set_64Bit_addr(new_task); } @@ -1496,11 +1484,17 @@ task_create_internal( new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task); -#if DEBUG || DEVELOPMENT if (parent_task->t_flags & TF_NO_SMT) { new_task->t_flags |= TF_NO_SMT; } -#endif + + if (parent_task->t_flags & TF_TECS) { + new_task->t_flags |= TF_TECS; + } + + if (parent_task->t_flags & TF_FILTER_MSG) { + new_task->t_flags |= TF_FILTER_MSG; + } new_task->priority = BASEPRI_DEFAULT; new_task->max_priority = MAXPRI_USER; @@ -1539,9 +1533,9 @@ task_create_internal( } /* Allocate I/O Statistics */ - new_task->task_io_stats = (io_stat_info_t)kalloc(sizeof(struct io_stat_info)); + new_task->task_io_stats = kheap_alloc(KHEAP_DATA_BUFFERS, + sizeof(struct io_stat_info), Z_WAITOK | Z_ZERO); assert(new_task->task_io_stats != NULL); - bzero(new_task->task_io_stats, sizeof(struct io_stat_info)); bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats)); bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats)); @@ -1588,6 +1582,9 @@ task_create_internal( new_task->task_writes_counters_external.task_deferred_writes = 0; new_task->task_writes_counters_external.task_invalidated_writes = 0; new_task->task_writes_counters_external.task_metadata_writes = 0; +#if CONFIG_PHYS_WRITE_ACCT + new_task->task_fs_metadata_writes = 0; +#endif /* CONFIG_PHYS_WRITE_ACCT */ new_task->task_energy = 0; #if MONOTONIC @@ -1642,6 +1639,11 @@ task_create_internal( new_task->task_objects_disowned = FALSE; new_task->task_owned_objects = 0; queue_init(&new_task->task_objq); + +#if CONFIG_FREEZE + queue_init(&new_task->task_frozen_cseg_q); +#endif /* CONFIG_FREEZE */ + task_objq_lock_init(new_task); #if __arm64__ @@ -1729,6 +1731,9 @@ task_rollup_accounting_info(task_t to_task, task_t from_task) to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes; to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes; to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes; +#if CONFIG_PHYS_WRITE_ACCT + to_task->task_fs_metadata_writes = from_task->task_fs_metadata_writes; +#endif /* CONFIG_PHYS_WRITE_ACCT */ to_task->task_energy = from_task->task_energy; /* Skip ledger roll up for memory accounting entries */ @@ -1804,18 +1809,14 @@ task_deallocate( terminated_tasks_count--; lck_mtx_unlock(&tasks_threads_lock); - /* - * remove the reference on atm descriptor - */ - task_atm_reset(task); - /* * remove the reference on bank context */ task_bank_reset(task); if (task->task_io_stats) { - kfree(task->task_io_stats, sizeof(struct io_stat_info)); + kheap_free(KHEAP_DATA_BUFFERS, task->task_io_stats, + sizeof(struct io_stat_info)); } /* @@ -1918,7 +1919,8 @@ task_deallocate( task_crashinfo_destroy(task->corpse_info); task->corpse_info = NULL; if (corpse_info_kernel) { - kfree(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE); + kheap_free(KHEAP_DATA_BUFFERS, corpse_info_kernel, + CORPSEINFO_ALLOCATION_SIZE); } } #endif @@ -1931,6 +1933,7 @@ task_deallocate( #endif assert(queue_empty(&task->task_objq)); + task_objq_lock_destroy(task); zfree(task_zone, task); } @@ -1947,6 +1950,28 @@ task_name_deallocate( return task_deallocate((task_t)task_name); } +/* + * task_policy_set_deallocate: + * + * Drop a reference on a task type. + */ +void +task_policy_set_deallocate(task_policy_set_t task_policy_set) +{ + return task_deallocate((task_t)task_policy_set); +} + +/* + * task_policy_get_deallocate: + * + * Drop a reference on a task type. + */ +void +task_policy_get_deallocate(task_policy_get_t task_policy_get) +{ + return task_deallocate((task_t)task_policy_get); +} + /* * task_inspect_deallocate: * @@ -1959,6 +1984,18 @@ task_inspect_deallocate( return task_deallocate((task_t)task_inspect); } +/* + * task_read_deallocate: + * + * Drop a reference on task read port. + */ +void +task_read_deallocate( + task_read_t task_read) +{ + return task_deallocate((task_t)task_read); +} + /* * task_suspension_token_deallocate: * @@ -2017,12 +2054,12 @@ task_collect_crash_info( #endif task_unlock(task); - crash_data_kernel = (void *) kalloc(CORPSEINFO_ALLOCATION_SIZE); + crash_data_kernel = kheap_alloc(KHEAP_DATA_BUFFERS, + CORPSEINFO_ALLOCATION_SIZE, Z_WAITOK | Z_ZERO); if (crash_data_kernel == NULL) { kr = KERN_RESOURCE_SHORTAGE; goto out_no_lock; } - bzero(crash_data_kernel, CORPSEINFO_ALLOCATION_SIZE); crash_data_ptr = (mach_vm_offset_t) crash_data_kernel; /* Do not get a corpse ref for corpse fork */ @@ -2038,7 +2075,8 @@ task_collect_crash_info( task_unlock(task); kr = KERN_SUCCESS; } else { - kfree(crash_data_kernel, CORPSEINFO_ALLOCATION_SIZE); + kheap_free(KHEAP_DATA_BUFFERS, crash_data_kernel, + CORPSEINFO_ALLOCATION_SIZE); kr = KERN_FAILURE; } @@ -2046,7 +2084,8 @@ task_collect_crash_info( task_crashinfo_destroy(crash_data_release); } if (crash_data_kernel_release != NULL) { - kfree(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE); + kheap_free(KHEAP_DATA_BUFFERS, crash_data_kernel_release, + CORPSEINFO_ALLOCATION_SIZE); } } else { task_unlock(task); @@ -2284,7 +2323,7 @@ task_port_notify(mach_msg_header_t *msg) task_t task; require_ip_active(port); - assert(IKOT_TASK == ip_kotype(port)); + assert(IKOT_TASK_CONTROL == ip_kotype(port)); task = (task_t) ip_get_kobject(port); assert(task_is_a_corpse(task)); @@ -2296,6 +2335,70 @@ task_port_notify(mach_msg_header_t *msg) task_terminate_internal(task); } +/* + * task_port_with_flavor_notify + * + * Called whenever the Mach port system detects no-senders on + * the task inspect or read port. These ports are allocated lazily and + * should be deallocated here when there are no senders remaining. + */ +void +task_port_with_flavor_notify(mach_msg_header_t *msg) +{ + mach_no_senders_notification_t *notification = (void *)msg; + ipc_port_t port = notification->not_header.msgh_remote_port; + task_t task; + mach_task_flavor_t flavor; + ipc_kobject_type_t kotype; + + ip_lock(port); + if (port->ip_srights > 0) { + ip_unlock(port); + return; + } + task = (task_t)port->ip_kobject; + kotype = ip_kotype(port); + if (task != TASK_NULL) { + assert((IKOT_TASK_READ == kotype) || (IKOT_TASK_INSPECT == kotype)); + task_reference_internal(task); + } + ip_unlock(port); + + if (task == TASK_NULL) { + /* The task is exiting or disabled; it will eventually deallocate the port */ + return; + } + + itk_lock(task); + ip_lock(port); + require_ip_active(port); + /* + * Check for a stale no-senders notification. A call to any function + * that vends out send rights to this port could resurrect it between + * this notification being generated and actually being handled here. + */ + if (port->ip_srights > 0) { + ip_unlock(port); + itk_unlock(task); + task_deallocate(task); + return; + } + + if (kotype == IKOT_TASK_READ) { + flavor = TASK_FLAVOR_READ; + } else { + flavor = TASK_FLAVOR_INSPECT; + } + assert(task->itk_self[flavor] == port); + task->itk_self[flavor] = IP_NULL; + port->ip_kobject = IKOT_NONE; + ip_unlock(port); + itk_unlock(task); + task_deallocate(task); + + ipc_port_dealloc_kernel(port); +} + /* * task_wait_till_threads_terminate_locked * @@ -2389,7 +2492,7 @@ task_duplicate_map_and_threads( est_knotes = kevent_proc_copy_uptrs(p, NULL, 0); if (est_knotes > 0) { buf_size = (est_knotes + 32) * sizeof(uint64_t); - buffer = (uint64_t *) kalloc(buf_size); + buffer = kheap_alloc(KHEAP_DATA_BUFFERS, buf_size, Z_WAITOK); num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size); if (num_knotes > est_knotes + 32) { num_knotes = est_knotes + 32; @@ -2400,13 +2503,14 @@ task_duplicate_map_and_threads( active_thread_count = task->active_thread_count; if (active_thread_count == 0) { if (buffer != NULL) { - kfree(buffer, buf_size); + kheap_free(KHEAP_DATA_BUFFERS, buffer, buf_size); } task_resume_internal(task); return KERN_FAILURE; } - thread_array = (thread_t *) kalloc(sizeof(thread_t) * active_thread_count); + thread_array = kheap_alloc(KHEAP_TEMP, + sizeof(thread_t) * active_thread_count, Z_WAITOK); /* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */ task_lock(task); @@ -2470,7 +2574,7 @@ task_duplicate_map_and_threads( for (i = 0; i < array_count; i++) { thread_deallocate(thread_array[i]); } - kfree(thread_array, sizeof(thread_t) * active_thread_count); + kheap_free(KHEAP_TEMP, thread_array, sizeof(thread_t) * active_thread_count); if (kr == KERN_SUCCESS) { *thread_ret = thread_return; @@ -2482,7 +2586,7 @@ task_duplicate_map_and_threads( thread_deallocate(thread_return); } if (buffer != NULL) { - kfree(buffer, buf_size); + kheap_free(KHEAP_DATA_BUFFERS, buffer, buf_size); } } @@ -2495,6 +2599,9 @@ extern void task_set_can_use_secluded_mem_locked( boolean_t can_use_secluded_mem); #endif /* CONFIG_SECLUDED_MEMORY */ +#if MACH_ASSERT +int debug4k_panic_on_terminate = 0; +#endif /* MACH_ASSERT */ kern_return_t task_terminate_internal( task_t task) @@ -2617,13 +2724,13 @@ task_terminate_internal( // PR-17045188: Revisit implementation // task_partial_reap(task, pid); -#if CONFIG_EMBEDDED +#if CONFIG_TASKWATCH /* * remove all task watchers */ task_removewatchers(task); -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_TASKWATCH */ /* * Destroy all synchronizers owned by the task. @@ -2683,6 +2790,12 @@ task_terminate_internal( strlcpy(procname, "", sizeof(procname)); } pmap_set_process(task->map->pmap, pid, procname); + if (vm_map_page_shift(task->map) < (int)PAGE_SHIFT) { + DEBUG4K_LIFE("map %p procname: %s\n", task->map, procname); + if (debug4k_panic_on_terminate) { + panic("DEBUG4K: %s:%d %d[%s] map %p\n", __FUNCTION__, __LINE__, pid, procname, task->map); + } + } #endif /* MACH_ASSERT */ vm_map_terminate(task->map); @@ -2690,6 +2803,9 @@ task_terminate_internal( /* release our shared region */ vm_shared_region_set(task, NULL); +#if __has_feature(ptrauth_calls) + task_set_shared_region_id(task, NULL); +#endif /* __has_feature(ptrauth_calls) */ lck_mtx_lock(&tasks_threads_lock); queue_remove(&tasks, task, task_t, tasks); @@ -2718,6 +2834,14 @@ task_terminate_internal( coalitions_remove_task(task); #endif +#if CONFIG_FREEZE + extern int vm_compressor_available; + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE && vm_compressor_available) { + task_disown_frozen_csegs(task); + assert(queue_empty(&task->task_frozen_cseg_q)); + } +#endif /* CONFIG_FREEZE */ + /* * Get rid of the task active reference on itself. */ @@ -3080,11 +3204,12 @@ task_release( return KERN_SUCCESS; } -kern_return_t -task_threads( - task_t task, - thread_act_array_t *threads_out, - mach_msg_type_number_t *count) +static kern_return_t +task_threads_internal( + task_t task, + thread_act_array_t *threads_out, + mach_msg_type_number_t *count, + mach_thread_flavor_t flavor) { mach_msg_type_number_t actual; thread_t *thread_list; @@ -3093,12 +3218,12 @@ task_threads( void *addr; unsigned int i, j; + size = 0; addr = NULL; + if (task == TASK_NULL) { return KERN_INVALID_ARGUMENT; } - size = 0; addr = NULL; - for (;;) { task_lock(task); if (!task->active) { @@ -3188,14 +3313,74 @@ task_threads( /* do the conversion that Mig should handle */ - for (i = 0; i < actual; ++i) { - ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]); + switch (flavor) { + case THREAD_FLAVOR_CONTROL: + for (i = 0; i < actual; ++i) { + ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]); + } + break; + case THREAD_FLAVOR_READ: + for (i = 0; i < actual; ++i) { + ((ipc_port_t *) thread_list)[i] = convert_thread_read_to_port(thread_list[i]); + } + break; + case THREAD_FLAVOR_INSPECT: + for (i = 0; i < actual; ++i) { + ((ipc_port_t *) thread_list)[i] = convert_thread_inspect_to_port(thread_list[i]); + } + break; + default: + return KERN_INVALID_ARGUMENT; } } return KERN_SUCCESS; } +kern_return_t +task_threads( + task_t task, + thread_act_array_t *threads_out, + mach_msg_type_number_t *count) +{ + return task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL); +} + + +kern_return_t +task_threads_from_user( + mach_port_t port, + thread_act_array_t *threads_out, + mach_msg_type_number_t *count) +{ + ipc_kobject_type_t kotype; + kern_return_t kr; + + task_t task = convert_port_to_task_check_type(port, &kotype, TASK_FLAVOR_INSPECT, FALSE); + + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } + + switch (kotype) { + case IKOT_TASK_CONTROL: + kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL); + break; + case IKOT_TASK_READ: + kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ); + break; + case IKOT_TASK_INSPECT: + kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT); + break; + default: + panic("strange kobject type"); + break; + } + + task_deallocate(task); + return kr; +} + #define TASK_HOLD_NORMAL 0 #define TASK_HOLD_PIDSUSPEND 1 #define TASK_HOLD_LEGACY 2 @@ -3215,10 +3400,10 @@ place_task_hold( return KERN_SUCCESS; } - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_SUSPEND) | DBG_FUNC_NONE, - task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id, - task->user_stop_count, task->user_stop_count + 1, 0); + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_SUSPEND), + task_pid(task), + task->thread_count > 0 ?((thread_t)queue_first(&task->threads))->thread_id : 0, + task->user_stop_count, task->user_stop_count + 1); #if MACH_ASSERT current_task()->suspends_outstanding++; @@ -3364,10 +3549,9 @@ task_suspend( * Claim a send right on the task resume port, and request a no-senders * notification on that port (if none outstanding). */ - (void)ipc_kobject_make_send_lazy_alloc_port(&task->itk_resume, - (ipc_kobject_t)task, IKOT_TASK_RESUME); + (void)ipc_kobject_make_send_lazy_alloc_port((ipc_port_t *) &task->itk_resume, + (ipc_kobject_t)task, IKOT_TASK_RESUME, true, OS_PTRAUTH_DISCRIMINATOR("task.itk_resume")); port = task->itk_resume; - task_unlock(task); /* @@ -4201,6 +4385,7 @@ task_disconnect_page_mappings(task_t task) */ extern void vm_wake_compactor_swapper(void); extern queue_head_t c_swapout_list_head; +extern struct freezer_context freezer_context_global; kern_return_t task_freeze( @@ -4235,6 +4420,8 @@ task_freeze( } task->changing_freeze_state = TRUE; + freezer_context_global.freezer_ctx_task = task; + task_unlock(task); kr = vm_map_freeze(task, @@ -4251,6 +4438,20 @@ task_freeze( if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) { task->frozen = TRUE; + + freezer_context_global.freezer_ctx_task = NULL; + freezer_context_global.freezer_ctx_uncompressed_pages = 0; + + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { + /* + * reset the counter tracking the # of swapped compressed pages + * because we are now done with this freeze session and task. + */ + + *dirty_count = (uint32_t) (freezer_context_global.freezer_ctx_swapped_bytes / PAGE_SIZE_64); /*used to track pageouts*/ + } + + freezer_context_global.freezer_ctx_swapped_bytes = 0; } task->changing_freeze_state = FALSE; @@ -4311,6 +4512,32 @@ task_thaw( return KERN_SUCCESS; } +void +task_update_frozen_to_swap_acct(task_t task, int64_t amount, freezer_acct_op_t op) +{ + /* + * We don't assert that the task lock is held because we call this + * routine from the decompression path and we won't be holding the + * task lock. However, since we are in the context of the task we are + * safe. + * In the case of the task_freeze path, we call it from behind the task + * lock but we don't need to because we have a reference on the proc + * being frozen. + */ + + assert(task); + if (amount == 0) { + return; + } + + if (op == CREDIT_TO_SWAP) { + ledger_credit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount); + } else if (op == DEBIT_FROM_SWAP) { + ledger_debit_nocheck(task->ledger, task_ledgers.frozen_to_swap, amount); + } else { + panic("task_update_frozen_to_swap_acct: Invalid ledger op\n"); + } +} #endif /* CONFIG_FREEZE */ kern_return_t @@ -4375,29 +4602,14 @@ task_set_info( if (task == TASK_NULL) { return KERN_INVALID_ARGUMENT; } - switch (flavor) { #if CONFIG_ATM case TASK_TRACE_MEMORY_INFO: - { - if (task_info_count != TASK_TRACE_MEMORY_INFO_COUNT) { - return KERN_INVALID_ARGUMENT; - } - - assert(task_info_in != NULL); - task_trace_memory_info_t mem_info; - mem_info = (task_trace_memory_info_t) task_info_in; - kern_return_t kr = atm_register_trace_memory(task, - mem_info->user_memory_address, - mem_info->buffer_size); - return kr; - } - -#endif + return KERN_NOT_SUPPORTED; +#endif // CONFIG_ATM default: return KERN_INVALID_ARGUMENT; } - return KERN_SUCCESS; } int radar_20146450 = 1; @@ -4423,6 +4635,7 @@ task_info( return KERN_INVALID_ARGUMENT; } + switch (flavor) { case TASK_BASIC_INFO_32: case TASK_BASIC2_INFO_32: @@ -4443,7 +4656,7 @@ task_info( basic_info = (task_basic_info_32_t)task_info_out; map = (task == kernel_task)? kernel_map: task->map; - basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size; + basic_info->virtual_size = (typeof(basic_info->virtual_size))vm_map_adjusted_size(map); if (flavor == TASK_BASIC2_INFO_32) { /* * The "BASIC2" flavor gets the maximum resident @@ -4489,7 +4702,7 @@ task_info( basic_info = (task_basic_info_64_2_t)task_info_out; map = (task == kernel_task)? kernel_map: task->map; - basic_info->virtual_size = map->size; + basic_info->virtual_size = vm_map_adjusted_size(map); basic_info->resident_size = (mach_vm_size_t)(pmap_resident_count(map->pmap)) * PAGE_SIZE_64; @@ -4528,7 +4741,7 @@ task_info( basic_info = (task_basic_info_64_t)task_info_out; map = (task == kernel_task)? kernel_map: task->map; - basic_info->virtual_size = map->size; + basic_info->virtual_size = vm_map_adjusted_size(map); basic_info->resident_size = (mach_vm_size_t)(pmap_resident_count(map->pmap)) * PAGE_SIZE_64; @@ -4568,7 +4781,7 @@ task_info( map = (task == kernel_task) ? kernel_map : task->map; - basic_info->virtual_size = map->size; + basic_info->virtual_size = vm_map_adjusted_size(map); basic_info->resident_size = (mach_vm_size_t)(pmap_resident_count(map->pmap)); @@ -4985,7 +5198,7 @@ task_info( uint32_t platform, sdk; p = current_proc(); platform = proc_platform(p); - sdk = proc_sdk(p); + sdk = proc_min_sdk(p); if (original_task_info_count > TASK_VM_INFO_REV2_COUNT && platform == PLATFORM_IOS && sdk != 0 && @@ -5028,7 +5241,7 @@ task_info( vm_map_lock_read(map); } - vm_info->virtual_size = (typeof(vm_info->virtual_size))map->size; + vm_info->virtual_size = (typeof(vm_info->virtual_size))vm_map_adjusted_size(map); vm_info->region_count = map->hdr.nentries; vm_info->page_size = vm_map_page_size(map); @@ -5406,11 +5619,11 @@ task_power_info_locked( info->total_system = task->total_system_time; runnable_time_sum = task->total_runnable_time; -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) if (infov2) { infov2->task_energy = task->task_energy; } -#endif +#endif /* defined(__arm__) || defined(__arm64__) */ if (ginfo) { ginfo->task_gpu_utilisation = task->task_gpu_ns; @@ -5435,11 +5648,11 @@ task_power_info_locked( info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1; info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2; -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) if (infov2) { infov2->task_energy += ml_energy_stat(thread); } -#endif +#endif /* defined(__arm__) || defined(__arm64__) */ tval = timer_grab(&thread->user_timer); info->total_user += tval; @@ -5485,7 +5698,7 @@ task_gpu_utilisation( task_t task) { uint64_t gpu_time = 0; -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) thread_t thread; task_lock(task); @@ -5501,10 +5714,10 @@ task_gpu_utilisation( } task_unlock(task); -#else /* CONFIG_EMBEDDED */ +#else /* defined(__x86_64__) */ /* silence compiler warning */ (void)task; -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(__x86_64__) */ return gpu_time; } @@ -6299,7 +6512,7 @@ task_set_phys_footprint_limit_internal( */ ledger_set_limit(task->ledger, task_ledgers.phys_footprint, max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY, - max_task_footprint ? max_task_footprint_warning_level : 0); + max_task_footprint ? (uint8_t)max_task_footprint_warning_level : 0); task_lock(task); task_set_memlimit_is_active(task, memlimit_is_active); @@ -6394,6 +6607,35 @@ task_set_thread_limit(task_t task, uint16_t thread_limit) } } +#if XNU_TARGET_OS_OSX +boolean_t +task_has_system_version_compat_enabled(task_t task) +{ + boolean_t enabled = FALSE; + + task_lock(task); + enabled = (task->t_flags & TF_SYS_VERSION_COMPAT); + task_unlock(task); + + return enabled; +} + +void +task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat) +{ + assert(task == current_task()); + assert(task != kernel_task); + + task_lock(task); + if (enable_system_version_compat) { + task->t_flags |= TF_SYS_VERSION_COMPAT; + } else { + task->t_flags &= ~TF_SYS_VERSION_COMPAT; + } + task_unlock(task); +} +#endif /* XNU_TARGET_OS_OSX */ + /* * We need to export some functions to other components that * are currently implemented in macros within the osfmk @@ -6446,6 +6688,69 @@ task_pid(task_t task) return -1; } +#if __has_feature(ptrauth_calls) +/* + * Get the shared region id and jop signing key for the task. + * The function will allocate a kalloc buffer and return + * it to caller, the caller needs to free it. This is used + * for getting the information via task port. + */ +char * +task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *jop_pid) +{ + size_t len; + char *shared_region_id = NULL; + + task_lock(task); + if (task->shared_region_id == NULL) { + task_unlock(task); + return NULL; + } + len = strlen(task->shared_region_id) + 1; + + /* don't hold task lock while allocating */ + task_unlock(task); + shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS, len, Z_WAITOK); + task_lock(task); + + if (task->shared_region_id == NULL) { + task_unlock(task); + kheap_free(KHEAP_DATA_BUFFERS, shared_region_id, len); + return NULL; + } + assert(len == strlen(task->shared_region_id) + 1); /* should never change */ + strlcpy(shared_region_id, task->shared_region_id, len); + task_unlock(task); + + /* find key from its auth pager */ + if (jop_pid != NULL) { + *jop_pid = shared_region_find_key(shared_region_id); + } + + return shared_region_id; +} + +/* + * set the shared region id for a task + */ +void +task_set_shared_region_id(task_t task, char *id) +{ + char *old_id; + + task_lock(task); + old_id = task->shared_region_id; + task->shared_region_id = id; + task->shared_region_auth_remapped = FALSE; + task_unlock(task); + + /* free any pre-existing shared region id */ + if (old_id != NULL) { + shared_region_key_dealloc(old_id); + kheap_free(KHEAP_DATA_BUFFERS, old_id, strlen(old_id) + 1); + } +} +#endif /* __has_feature(ptrauth_calls) */ /* * This routine finds a thread in a task by its unique id @@ -6573,7 +6878,7 @@ task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz) #ifndef CONFIG_NOMONITORS ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval, - task_wakeups_monitor_ustackshots_trigger_pct); + (uint8_t)task_wakeups_monitor_ustackshots_trigger_pct); ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC); ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups); #endif /* CONFIG_NOMONITORS */ @@ -6722,6 +7027,39 @@ global_update_logical_writes(int64_t io_delta, int64_t *global_write_count) return needs_telemetry; } +void +task_update_physical_writes(__unused task_t task, __unused task_physical_write_flavor_t flavor, __unused uint64_t io_size, __unused task_balance_flags_t flags) +{ +#if CONFIG_PHYS_WRITE_ACCT + if (!io_size) { + return; + } + + /* + * task == NULL means that we have to update kernel_task ledgers + */ + if (!task) { + task = kernel_task; + } + + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PHYS_WRITE_ACCT)) | DBG_FUNC_NONE, + task_pid(task), flavor, io_size, flags, 0); + DTRACE_IO4(physical_writes, struct task *, task, task_physical_write_flavor_t, flavor, uint64_t, io_size, task_balance_flags_t, flags); + + if (flags & TASK_BALANCE_CREDIT) { + if (flavor == TASK_PHYSICAL_WRITE_METADATA) { + OSAddAtomic64(io_size, (SInt64 *)&(task->task_fs_metadata_writes)); + ledger_credit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size); + } + } else if (flags & TASK_BALANCE_DEBIT) { + if (flavor == TASK_PHYSICAL_WRITE_METADATA) { + OSAddAtomic64(-1 * io_size, (SInt64 *)&(task->task_fs_metadata_writes)); + ledger_debit_nocheck(task->ledger, task_ledgers.fs_metadata_writes, io_size); + } + } +#endif /* CONFIG_PHYS_WRITE_ACCT */ +} + void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp) { @@ -7177,7 +7515,7 @@ task_set_could_use_secluded_mem( task_t task, boolean_t could_use_secluded_mem) { - task->task_could_use_secluded_mem = could_use_secluded_mem; + task->task_could_use_secluded_mem = !!could_use_secluded_mem; } void @@ -7185,7 +7523,7 @@ task_set_could_also_use_secluded_mem( task_t task, boolean_t could_also_use_secluded_mem) { - task->task_could_also_use_secluded_mem = could_also_use_secluded_mem; + task->task_could_also_use_secluded_mem = !!could_also_use_secluded_mem; } boolean_t @@ -7205,13 +7543,17 @@ task_can_use_secluded_mem( } /* - * If a single task is using more than some amount of - * memory, allow it to dip into secluded and also begin - * suppression of secluded memory until the tasks exits. + * If a single task is using more than some large amount of + * memory (i.e. secluded_shutoff_trigger) and is approaching + * its task limit, allow it to dip into secluded and begin + * suppression of rebuilding secluded memory until that task exits. */ if (is_alloc && secluded_shutoff_trigger != 0) { uint64_t phys_used = get_task_phys_footprint(task); - if (phys_used > secluded_shutoff_trigger) { + uint64_t limit = get_task_phys_footprint_limit(task); + if (phys_used > secluded_shutoff_trigger && + limit > secluded_shutoff_trigger && + phys_used > limit - secluded_shutoff_headroom) { start_secluded_suppression(task); return TRUE; } @@ -7342,8 +7684,9 @@ task_set_exc_guard_behavior( #if __arm64__ extern int legacy_footprint_entitlement_mode; -extern void memorystatus_act_on_legacy_footprint_entitlement(proc_t, boolean_t); -extern void memorystatus_act_on_ios13extended_footprint_entitlement(proc_t); +extern void memorystatus_act_on_legacy_footprint_entitlement(struct proc *, boolean_t); +extern void memorystatus_act_on_ios13extended_footprint_entitlement(struct proc *); + void task_set_legacy_footprint( @@ -7442,30 +7785,29 @@ task_set_memory_ownership_transfer( boolean_t value) { task_lock(task); - task->task_can_transfer_memory_ownership = value; + task->task_can_transfer_memory_ownership = !!value; task_unlock(task); } void -task_copy_vmobjects(task_t task, vm_object_query_t query, int len, int64_t* num) +task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num) { vm_object_t find_vmo; - unsigned int i = 0; - unsigned int vmobj_limit = len / sizeof(vm_object_query_data_t); + size_t size = 0; task_objq_lock(task); if (query != NULL) { queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq) { - vm_object_query_t p = &query[i]; + vm_object_query_t p = &query[size++]; - /* - * Clear the entire vm_object_query_t struct as we are using - * only the first 6 bits in the uint64_t bitfield for this - * anonymous struct member. - */ - bzero(p, sizeof(*p)); + /* make sure to not overrun */ + if (size * sizeof(vm_object_query_data_t) > len) { + --size; + break; + } + bzero(p, sizeof(*p)); p->object_id = (vm_object_id_t) VM_KERNEL_ADDRPERM(find_vmo); p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0; p->resident_size = find_vmo->resident_page_count * PAGE_SIZE; @@ -7480,22 +7822,131 @@ task_copy_vmobjects(task_t task, vm_object_query_t query, int len, int64_t* num) } else { p->compressed_size = 0; } - - i++; - - /* Make sure to not overrun */ - if (i == vmobj_limit) { - break; - } } } else { - i = task->task_owned_objects; + size = (size_t)task->task_owned_objects; } task_objq_unlock(task); - *num = i; + *num = size; +} + +void +task_set_filter_msg_flag( + task_t task, + boolean_t flag) +{ + assert(task != TASK_NULL); + + task_lock(task); + if (flag) { + task->t_flags |= TF_FILTER_MSG; + } else { + task->t_flags &= ~TF_FILTER_MSG; + } + task_unlock(task); +} + +boolean_t +task_get_filter_msg_flag( + task_t task) +{ + uint32_t flags = 0; + + if (!task) { + return false; + } + + flags = os_atomic_load(&task->t_flags, relaxed); + return (flags & TF_FILTER_MSG) ? TRUE : FALSE; +} +bool +task_is_exotic( + task_t task) +{ + if (task == TASK_NULL) { + return false; + } + return vm_map_is_exotic(get_task_map(task)); +} + +bool +task_is_alien( + task_t task) +{ + if (task == TASK_NULL) { + return false; + } + return vm_map_is_alien(get_task_map(task)); } + + +#if CONFIG_MACF +/* Set the filter mask for Mach traps. */ +void +mac_task_set_mach_filter_mask(task_t task, uint8_t *maskptr) +{ + assert(task); + + task->mach_trap_filter_mask = maskptr; +} + +/* Set the filter mask for kobject msgs. */ +void +mac_task_set_kobj_filter_mask(task_t task, uint8_t *maskptr) +{ + assert(task); + + task->mach_kobj_filter_mask = maskptr; +} + +/* Hook for mach trap/sc filter evaluation policy. */ +mac_task_mach_filter_cbfunc_t mac_task_mach_trap_evaluate = NULL; + +/* Hook for kobj message filter evaluation policy. */ +mac_task_kobj_filter_cbfunc_t mac_task_kobj_msg_evaluate = NULL; + +/* Set the callback hooks for the filtering policy. */ +int +mac_task_register_filter_callbacks( + const mac_task_mach_filter_cbfunc_t mach_cbfunc, + const mac_task_kobj_filter_cbfunc_t kobj_cbfunc) +{ + if (mach_cbfunc != NULL) { + if (mac_task_mach_trap_evaluate != NULL) { + return KERN_FAILURE; + } + mac_task_mach_trap_evaluate = mach_cbfunc; + } + if (kobj_cbfunc != NULL) { + if (mac_task_kobj_msg_evaluate != NULL) { + return KERN_FAILURE; + } + mac_task_kobj_msg_evaluate = kobj_cbfunc; + } + + return KERN_SUCCESS; +} +#endif /* CONFIG_MACF */ + +void +task_transfer_mach_filter_bits( + task_t new_task, + task_t old_task) +{ +#ifdef CONFIG_MACF + /* Copy mach trap and kernel object mask pointers to new task. */ + new_task->mach_trap_filter_mask = old_task->mach_trap_filter_mask; + new_task->mach_kobj_filter_mask = old_task->mach_kobj_filter_mask; +#endif + /* If filter message flag is set then set it in the new task. */ + if (task_get_filter_msg_flag(old_task)) { + new_task->t_flags |= TF_FILTER_MSG; + } +} + + #if __has_feature(ptrauth_calls) #define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception" @@ -7527,3 +7978,27 @@ task_is_pac_exception_fatal( return (bool)(flags & TF_PAC_EXC_FATAL); } #endif /* __has_feature(ptrauth_calls) */ + +void +task_set_tecs(task_t task) +{ + if (task == TASK_NULL) { + task = current_task(); + } + + if (!machine_csv(CPUVN_CI)) { + return; + } + + LCK_MTX_ASSERT(&task->lock, LCK_MTX_ASSERT_NOTOWNED); + + task_lock(task); + + task->t_flags |= TF_TECS; + + thread_t thread; + queue_iterate(&task->threads, thread, thread_t, task_threads) { + machine_tecs(thread); + } + task_unlock(task); +} diff --git a/osfmk/kern/task.h b/osfmk/kern/task.h index df8c779dd..d74ddc935 100644 --- a/osfmk/kern/task.h +++ b/osfmk/kern/task.h @@ -130,10 +130,6 @@ #include #include -#ifdef CONFIG_ATM -#include -#endif - struct _cpu_time_qos_stats { uint64_t cpu_time_qos_default; uint64_t cpu_time_qos_maintenance; @@ -166,7 +162,7 @@ struct task { uint32_t vtimers; /* Miscellaneous */ - vm_map_t map; /* Address space description */ + vm_map_t XNU_PTRAUTH_SIGNED_PTR("task.map") map; /* Address space description */ queue_chain_t tasks; /* global list of tasks */ struct task_watchports *watchports; /* watchports passed in spawn */ turnstile_inheritor_t returnwait_inheritor; /* inheritor for task_wait */ @@ -190,8 +186,8 @@ struct task { integer_t user_stop_count; /* outstanding stops */ integer_t legacy_stop_count; /* outstanding legacy stops */ - integer_t priority; /* base priority for threads */ - integer_t max_priority; /* maximum priority for threads */ + int16_t priority; /* base priority for threads */ + int16_t max_priority; /* maximum priority for threads */ integer_t importance; /* priority offset (BSD 'nice' value) */ @@ -207,22 +203,25 @@ struct task { /* IPC structures */ decl_lck_mtx_data(, itk_lock_data); - struct ipc_port *itk_self; /* not a right, doesn't hold ref */ - struct ipc_port *itk_nself; /* not a right, doesn't hold ref */ - struct ipc_port *itk_sself; /* a send right */ + /* + * Different flavors of task port. + * These flavors TASK_FLAVOR_* are defined in mach_types.h + */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_self") itk_self[TASK_SELF_PORT_COUNT]; /* does not hold right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_settable_self") itk_settable_self; /* a send right */ struct exception_action exc_actions[EXC_TYPES_COUNT]; /* a send right each valid element */ - struct ipc_port *itk_host; /* a send right */ - struct ipc_port *itk_bootstrap; /* a send right */ - struct ipc_port *itk_seatbelt; /* a send right */ - struct ipc_port *itk_gssd; /* yet another send right */ - struct ipc_port *itk_debug_control; /* send right for debugmode communications */ - struct ipc_port *itk_task_access; /* and another send right */ - struct ipc_port *itk_resume; /* a receive right to resume this task */ - struct ipc_port *itk_registered[TASK_PORT_REGISTER_MAX]; + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_host") itk_host; /* a send right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_bootstrap") itk_bootstrap; /* a send right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_seatbelt") itk_seatbelt; /* a send right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_gssd") itk_gssd; /* yet another send right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_debug_control") itk_debug_control; /* send right for debugmode communications */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_task_access") itk_task_access; /* and another send right */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_resume") itk_resume; /* a receive right to resume this task */ + struct ipc_port * XNU_PTRAUTH_SIGNED_PTR("task.itk_registered") itk_registered[TASK_PORT_REGISTER_MAX]; /* all send rights */ - struct ipc_space *itk_space; + struct ipc_space * XNU_PTRAUTH_SIGNED_PTR("task.itk_space") itk_space; ledger_t ledger; /* Synchronizer ownership information */ @@ -247,15 +246,21 @@ struct task { uint32_t ps_switch; /* total pset switches */ #ifdef MACH_BSD - void *bsd_info; + void * XNU_PTRAUTH_SIGNED_PTR("task.bsd_info") bsd_info; #endif kcdata_descriptor_t corpse_info; uint64_t crashed_thread_id; queue_chain_t corpse_tasks; #ifdef CONFIG_MACF struct label * crash_label; + uint8_t * mach_trap_filter_mask; /* Mach trap filter bitmask (len: mach_trap_count bits) */ + uint8_t * mach_kobj_filter_mask; /* Mach kobject filter bitmask (len: mach_kobj_count bits) */ #endif struct vm_shared_region *shared_region; +#if __has_feature(ptrauth_calls) + char *shared_region_id; /* determines which ptr auth key to use */ + bool shared_region_auth_remapped; /* authenticated sections ready for use */ +#endif /* __has_feature(ptrauth_calls) */ volatile uint32_t t_flags; /* general-purpose task flags protected by task_lock (TL) */ #define TF_NONE 0 #define TF_64B_ADDR 0x00000001 /* task has 64-bit addressing */ @@ -271,7 +276,10 @@ struct task { #define TF_CA_CLIENT_WI 0x00000800 /* task has CA_CLIENT work interval */ #define TF_DARKWAKE_MODE 0x00001000 /* task is in darkwake mode */ #define TF_NO_SMT 0x00002000 /* task threads must not be paired with SMT threads */ -#define TF_PAC_EXC_FATAL 0x00004000 /* task is marked a corpse if a PAC exception occurs */ +#define TF_FILTER_MSG 0x00004000 /* task calls into message filter callback before sending a message */ +#define TF_SYS_VERSION_COMPAT 0x00008000 /* shim task accesses to OS version data (macOS - app compatibility) */ +#define TF_PAC_EXC_FATAL 0x00010000 /* task is marked a corpse if a PAC exception occurs */ +#define TF_TECS 0x00020000 /* task threads must enable CPU security */ /* * Task is running within a 64-bit address space. @@ -355,15 +363,12 @@ struct task { uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */ uint64_t rusage_cpu_deadline; thread_call_t rusage_cpu_callt; -#if CONFIG_EMBEDDED +#if CONFIG_TASKWATCH queue_head_t task_watchers; /* app state watcher threads */ int num_taskwatchers; int watchapplying; -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_TASKWATCH */ -#if CONFIG_ATM - struct atm_task_descriptor *atm_context; /* pointer to per task atm descriptor */ -#endif struct bank_task *bank_context; /* pointer to per task bank structure */ #if IMPORTANCE_INHERITANCE @@ -450,7 +455,7 @@ struct task { #endif #if HYPERVISOR - void *hv_task_target; /* hypervisor virtual machine object associated with this task */ + void * XNU_PTRAUTH_SIGNED_PTR("task.hv_task_target") hv_task_target; /* hypervisor virtual machine object associated with this task */ #endif /* HYPERVISOR */ #if CONFIG_SECLUDED_MEMORY @@ -467,6 +472,12 @@ struct task { mach_vm_address_t mach_header_vm_address; uint32_t loadTag; /* dext ID used for logging identity */ +#if CONFIG_FREEZE + queue_head_t task_frozen_cseg_q; /* queue of csegs frozen to NAND */ +#endif /* CONFIG_FREEZE */ +#if CONFIG_PHYS_WRITE_ACCT + uint64_t task_fs_metadata_writes; +#endif /* CONFIG_PHYS_WRITE_ACCT */ }; /* @@ -478,12 +489,19 @@ extern task_exc_guard_behavior_t task_exc_guard_default; extern kern_return_t task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *); +static inline void +task_require(struct task *task) +{ + zone_id_require(ZONE_ID_TASK, sizeof(struct task), task); +} + #define task_lock(task) lck_mtx_lock(&(task)->lock) #define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED) #define task_lock_try(task) lck_mtx_try_lock(&(task)->lock) #define task_unlock(task) lck_mtx_unlock(&(task)->lock) #define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr) +#define task_objq_lock_destroy(task) lck_mtx_destroy(&(task)->task_objq_lock, &vm_object_lck_grp) #define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock) #define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED) #define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock) @@ -501,15 +519,13 @@ extern kern_return_t #define TASK_REFERENCE_LEAK_DEBUG 0 -extern zone_t task_zone; - #if TASK_REFERENCE_LEAK_DEBUG extern void task_reference_internal(task_t task); extern os_ref_count_t task_deallocate_internal(task_t task); #else -#define task_reference_internal(task) \ -MACRO_BEGIN \ - zone_require(task, task_zone); \ +#define task_reference_internal(task) \ +MACRO_BEGIN \ + task_require(task); \ os_ref_retain(&(task)->ref_count); \ MACRO_END #define task_deallocate_internal(task) os_ref_release(&(task)->ref_count) @@ -612,6 +628,8 @@ __BEGIN_DECLS #ifdef KERNEL_PRIVATE extern boolean_t task_is_app_suspended(task_t task); +extern bool task_is_exotic(task_t task); +extern bool task_is_alien(task_t task); #endif #ifdef XNU_KERNEL_PRIVATE @@ -680,6 +698,16 @@ extern kern_return_t task_freeze( extern kern_return_t task_thaw( task_t task); +typedef enum { + CREDIT_TO_SWAP = 1, + DEBIT_FROM_SWAP = 2 +} freezer_acct_op_t; + +extern void task_update_frozen_to_swap_acct( + task_t task, + int64_t amount, + freezer_acct_op_t op); + #endif /* CONFIG_FREEZE */ /* Halt all other threads in the current task */ @@ -788,6 +816,7 @@ extern kern_return_t task_collect_crash_info( #endif int is_corpse_fork); void task_port_notify(mach_msg_header_t *msg); +void task_port_with_flavor_notify(mach_msg_header_t *msg); void task_wait_till_threads_terminate_locked(task_t task); /* JMM - should just be temporary (implementation in bsd_kern still) */ @@ -822,11 +851,23 @@ extern uint64_t get_task_alternate_accounting(task_t); extern uint64_t get_task_alternate_accounting_compressed(task_t); extern uint64_t get_task_memory_region_count(task_t); extern uint64_t get_task_page_table(task_t); +#if CONFIG_FREEZE +extern uint64_t get_task_frozen_to_swap(task_t); +#endif extern uint64_t get_task_network_nonvolatile(task_t); extern uint64_t get_task_network_nonvolatile_compressed(task_t); extern uint64_t get_task_wired_mem(task_t); extern uint32_t get_task_loadTag(task_t task); +extern uint64_t get_task_tagged_footprint(task_t task); +extern uint64_t get_task_tagged_footprint_compressed(task_t task); +extern uint64_t get_task_media_footprint(task_t task); +extern uint64_t get_task_media_footprint_compressed(task_t task); +extern uint64_t get_task_graphics_footprint(task_t task); +extern uint64_t get_task_graphics_footprint_compressed(task_t task); +extern uint64_t get_task_neural_footprint(task_t task); +extern uint64_t get_task_neural_footprint_compressed(task_t task); + extern kern_return_t task_convert_phys_footprint_limit(int, int *); extern kern_return_t task_set_phys_footprint_limit_internal(task_t, int, int *, boolean_t, boolean_t); extern kern_return_t task_get_phys_footprint_limit(task_t task, int *limit_mb); @@ -841,6 +882,10 @@ extern void task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit extern void task_set_thread_limit(task_t task, uint16_t thread_limit); +#if XNU_TARGET_OS_OSX +extern boolean_t task_has_system_version_compat_enabled(task_t task); +extern void task_set_system_version_compat_enabled(task_t task, boolean_t enable_system_version_compat); +#endif extern boolean_t is_kerneltask(task_t task); extern boolean_t is_corpsetask(task_t task); @@ -916,6 +961,12 @@ struct _task_ledger_indices { int pages_grabbed_iopl; int pages_grabbed_upl; #endif +#if CONFIG_FREEZE + int frozen_to_swap; +#endif /* CONFIG_FREEZE */ +#if CONFIG_PHYS_WRITE_ACCT + int fs_metadata_writes; +#endif /* CONFIG_PHYS_WRITE_ACCT */ }; extern struct _task_ledger_indices task_ledgers; @@ -941,7 +992,6 @@ extern void task_clear_return_wait(task_t task, uint32_t flags); extern void task_wait_to_return(void) __attribute__((noreturn)); extern event_t task_get_return_wait_event(task_t task); -extern void task_atm_reset(task_t task); extern void task_bank_reset(task_t task); extern void task_bank_init(task_t task); @@ -950,6 +1000,12 @@ extern void task_prep_arcade(task_t task, thread_t thread); #endif /* CONFIG_ARCADE */ extern int task_pid(task_t task); + +#if __has_feature(ptrauth_calls) +char *task_get_vm_shared_region_id_and_jop_pid(task_t task, uint64_t *); +void task_set_shared_region_id(task_t task, char *id); +#endif /* __has_feature(ptrauth_calls) */ + extern boolean_t task_has_assertions(task_t task); /* End task_policy */ @@ -961,13 +1017,20 @@ extern void task_set_message_app_suspended(task_t task, boolean_t enable); extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task); -extern void task_copy_vmobjects(task_t task, vm_object_query_t query, int len, int64_t* num); +extern void task_copy_vmobjects(task_t task, vm_object_query_t query, size_t len, size_t *num); + +extern void task_set_filter_msg_flag(task_t task, boolean_t flag); +extern boolean_t task_get_filter_msg_flag(task_t task); + +extern void task_transfer_mach_filter_bits(task_t new_task, task_t old_mask); #if __has_feature(ptrauth_calls) extern bool task_is_pac_exception_fatal(task_t task); extern void task_set_pac_exception_fatal_flag(task_t task); #endif /*__has_feature(ptrauth_calls)*/ +extern void task_set_tecs(task_t task); + #endif /* XNU_KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE @@ -986,6 +1049,7 @@ extern boolean_t get_task_frozen(task_t); extern ipc_port_t convert_task_to_port(task_t); extern ipc_port_t convert_task_name_to_port(task_name_t); extern ipc_port_t convert_task_inspect_to_port(task_inspect_t); +extern ipc_port_t convert_task_read_to_port(task_read_t); extern ipc_port_t convert_task_suspension_token_to_port(task_suspension_token_t task); /* Convert from a port (in this case, an SO right to a task's resume port) to a task. */ @@ -999,6 +1063,17 @@ extern boolean_t task_suspension_notify(mach_msg_header_t *); #define TASK_WRITE_METADATA 0x8 extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp); +__enum_decl(task_balance_flags_t, uint8_t, { + TASK_BALANCE_CREDIT = 0x1, + TASK_BALANCE_DEBIT = 0x2, +}); + +__enum_decl(task_physical_write_flavor_t, uint8_t, { + TASK_PHYSICAL_WRITE_METADATA = 0x1, +}); +extern void task_update_physical_writes(task_t task, task_physical_write_flavor_t flavor, + uint64_t io_size, task_balance_flags_t flags); + #if CONFIG_SECLUDED_MEMORY extern void task_set_can_use_secluded_mem( task_t task, @@ -1041,9 +1116,18 @@ extern void task_deallocate( extern void task_name_deallocate( task_name_t task_name); +extern void task_policy_set_deallocate( + task_policy_set_t task_policy_set); + +extern void task_policy_get_deallocate( + task_policy_get_t task_policy_get); + extern void task_inspect_deallocate( task_inspect_t task_inspect); +extern void task_read_deallocate( + task_read_t task_read); + extern void task_suspension_token_deallocate( task_suspension_token_t token); @@ -1056,6 +1140,7 @@ extern void task_set_memory_ownership_transfer( task_t task, boolean_t value); + __END_DECLS #endif /* _KERN_TASK_H_ */ diff --git a/osfmk/kern/task_policy.c b/osfmk/kern/task_policy.c index e51b5b7bc..4423fd077 100644 --- a/osfmk/kern/task_policy.c +++ b/osfmk/kern/task_policy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -48,10 +48,10 @@ #if CONFIG_TELEMETRY #include #endif -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) #include #include -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ #if IMPORTANCE_INHERITANCE #include @@ -139,7 +139,7 @@ static void proc_set_task_policy_locked(task_t task, int category, int flavor, int value, int value2); static void task_policy_update_locked(task_t task, task_pend_token_t pend_token); -static void task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_token_t pend_token); +static void task_policy_update_internal_locked(task_t task, bool in_create, task_pend_token_t pend_token); /* For attributes that have two scalars as input/output */ static void proc_set_task_policy2(task_t task, int category, int flavor, int value1, int value2); @@ -167,7 +167,7 @@ static void task_action_cpuusage(thread_call_param_t param0, thread_call_param_t #ifdef MACH_BSD typedef struct proc * proc_t; -int proc_pid(void *proc); +int proc_pid(struct proc *proc); extern int proc_selfpid(void); extern char * proc_name_address(void *p); extern char * proc_best_name(proc_t proc); @@ -178,8 +178,7 @@ extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, #endif /* MACH_BSD */ -#if CONFIG_EMBEDDED -/* TODO: make CONFIG_TASKWATCH */ +#if CONFIG_TASKWATCH /* Taskwatch related helper functions */ static void set_thread_appbg(thread_t thread, int setbg, int importance); static void add_taskwatch_locked(task_t task, task_watch_t * twp); @@ -201,7 +200,7 @@ typedef struct thread_watchlist { int importance; /* importance to be restored if thread is being made active */ } thread_watchlist_t; -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_TASKWATCH */ extern int memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap); @@ -357,7 +356,7 @@ task_policy_set( return KERN_INVALID_ARGUMENT; } -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) /* On embedded, you can't modify your own role. */ if (current_task() == task) { return KERN_INVALID_ARGUMENT; @@ -454,14 +453,14 @@ task_policy_set( case TASK_SUPPRESSION_POLICY: { -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) /* * Suppression policy is not enabled for embedded * because apps aren't marked as denap receivers */ result = KERN_INVALID_ARGUMENT; break; -#else /* CONFIG_EMBEDDED */ +#else /* !defined(XNU_TARGET_OS_OSX) */ task_suppression_policy_t info = (task_suppression_policy_t)policy_info; @@ -516,7 +515,7 @@ task_policy_set( break; -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ } default: @@ -663,6 +662,7 @@ task_policy_get( info->flags |= (task_is_marked_importance_denap_receiver(task) ? TASK_DENAP_RECEIVER : 0); info->flags |= (task_is_marked_importance_donor(task) ? TASK_IMP_DONOR : 0); info->flags |= (task_is_marked_live_importance_donor(task) ? TASK_IMP_LIVE_DONOR : 0); + info->flags |= (get_task_pidsuspended(task) ? TASK_IS_PIDSUSPENDED : 0); info->imp_transitions = task->task_imp_base->iit_transitions; } else { info->imp_assertcnt = 0; @@ -753,7 +753,7 @@ task_policy_create(task_t task, task_t parent_task) task_pid(task), teffective_0(task), teffective_1(task), task->priority, 0); - task_policy_update_internal_locked(task, TRUE, NULL); + task_policy_update_internal_locked(task, true, NULL); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_END, @@ -772,7 +772,7 @@ task_policy_update_locked(task_t task, task_pend_token_t pend_token) task_pid(task), teffective_0(task), teffective_1(task), task->priority, 0); - task_policy_update_internal_locked(task, FALSE, pend_token); + task_policy_update_internal_locked(task, false, pend_token); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK)) | DBG_FUNC_END, @@ -793,7 +793,7 @@ task_policy_update_locked(task_t task, task_pend_token_t pend_token) */ static void -task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_token_t pend_token) +task_policy_update_internal_locked(task_t task, bool in_create, task_pend_token_t pend_token) { /* * Step 1: @@ -817,8 +817,7 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t /* Set task qos clamp and ceiling */ next.tep_qos_clamp = requested.trp_qos_clamp; - if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT || - requested.trp_apptype == TASK_APPTYPE_APP_TAL) { + if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT) { switch (next.tep_role) { case TASK_FOREGROUND_APPLICATION: /* Foreground apps get urgent scheduler priority */ @@ -870,9 +869,17 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t } /* Calculate DARWIN_BG */ - boolean_t wants_darwinbg = FALSE; - boolean_t wants_all_sockets_bg = FALSE; /* Do I want my existing sockets to be bg */ - boolean_t wants_watchersbg = FALSE; /* Do I want my pidbound threads to be bg */ + bool wants_darwinbg = false; + bool wants_all_sockets_bg = false; /* Do I want my existing sockets to be bg */ + bool wants_watchersbg = false; /* Do I want my pidbound threads to be bg */ + bool adaptive_bg_only = false; /* This task is BG only because it's adaptive unboosted */ + + /* Adaptive daemons are DARWIN_BG unless boosted, and don't get network throttled. */ + if (requested.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && + requested.trp_boosted == 0) { + wants_darwinbg = true; + adaptive_bg_only = true; + } /* * If DARWIN_BG has been requested at either level, it's engaged. @@ -882,39 +889,26 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t */ if (requested.trp_int_darwinbg || requested.trp_ext_darwinbg || next.tep_role == TASK_DARWINBG_APPLICATION) { - wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = TRUE; - } - - /* - * Deprecated TAL implementation for TAL apptype - * Background TAL apps are throttled when TAL is enabled - */ - if (requested.trp_apptype == TASK_APPTYPE_APP_TAL && - requested.trp_role == TASK_BACKGROUND_APPLICATION && - requested.trp_tal_enabled == 1) { - next.tep_tal_engaged = 1; + wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = true; + adaptive_bg_only = false; } - /* New TAL implementation based on TAL role alone, works for all apps */ - if ((requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT || - requested.trp_apptype == TASK_APPTYPE_APP_TAL) && + /* Application launching in special Transparent App Lifecycle throttle mode */ + if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT && requested.trp_role == TASK_THROTTLE_APPLICATION) { next.tep_tal_engaged = 1; } - /* Adaptive daemons are DARWIN_BG unless boosted, and don't get network throttled. */ - if (requested.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && - requested.trp_boosted == 0) { - wants_darwinbg = TRUE; - } - /* Background daemons are always DARWIN_BG, no exceptions, and don't get network throttled. */ if (requested.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) { - wants_darwinbg = TRUE; + wants_darwinbg = true; + adaptive_bg_only = false; } - if (next.tep_qos_clamp == THREAD_QOS_BACKGROUND || next.tep_qos_clamp == THREAD_QOS_MAINTENANCE) { - wants_darwinbg = TRUE; + if (next.tep_qos_clamp == THREAD_QOS_BACKGROUND || + next.tep_qos_clamp == THREAD_QOS_MAINTENANCE) { + wants_darwinbg = true; + adaptive_bg_only = false; } /* Calculate side effects of DARWIN_BG */ @@ -934,20 +928,22 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t next.tep_watchers_bg = 1; } + next.tep_adaptive_bg = adaptive_bg_only; + /* Calculate low CPU priority */ - boolean_t wants_lowpri_cpu = FALSE; + boolean_t wants_lowpri_cpu = false; if (wants_darwinbg) { - wants_lowpri_cpu = TRUE; + wants_lowpri_cpu = true; } if (next.tep_tal_engaged) { - wants_lowpri_cpu = TRUE; + wants_lowpri_cpu = true; } if (requested.trp_sup_lowpri_cpu && requested.trp_boosted == 0) { - wants_lowpri_cpu = TRUE; + wants_lowpri_cpu = true; } if (wants_lowpri_cpu) { @@ -993,14 +989,14 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t } /* Calculate suppression-active flag */ - boolean_t appnap_transition = FALSE; + boolean_t appnap_transition = false; if (requested.trp_sup_active && requested.trp_boosted == 0) { next.tep_sup_active = 1; } if (task->effective_policy.tep_sup_active != next.tep_sup_active) { - appnap_transition = TRUE; + appnap_transition = true; } /* Calculate timer QOS */ @@ -1137,11 +1133,11 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t pend_token->tpt_update_timers = 1; } -#if CONFIG_EMBEDDED +#if CONFIG_TASKWATCH if (prev.tep_watchers_bg != next.tep_watchers_bg) { pend_token->tpt_update_watchers = 1; } -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_TASKWATCH */ if (prev.tep_live_donor != next.tep_live_donor) { pend_token->tpt_update_live_donor = 1; @@ -1152,7 +1148,7 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t * Update other subsystems as necessary if something has changed */ - boolean_t update_threads = FALSE, update_sfi = FALSE; + bool update_threads = false, update_sfi = false; /* * Check for the attributes that thread_policy_update_internal_locked() consults, @@ -1169,8 +1165,9 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t prev.tep_through_qos != next.tep_through_qos || prev.tep_lowpri_cpu != next.tep_lowpri_cpu || prev.tep_new_sockets_bg != next.tep_new_sockets_bg || - prev.tep_terminated != next.tep_terminated) { - update_threads = TRUE; + prev.tep_terminated != next.tep_terminated || + prev.tep_adaptive_bg != next.tep_adaptive_bg) { + update_threads = true; } /* @@ -1180,20 +1177,20 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t if (prev.tep_latency_qos != next.tep_latency_qos || prev.tep_role != next.tep_role || prev.tep_sfi_managed != next.tep_sfi_managed) { - update_sfi = TRUE; + update_sfi = true; } /* Reflect task role transitions into the coalition role counters */ if (prev.tep_role != next.tep_role) { if (task_policy_update_coalition_focal_tasks(task, prev.tep_role, next.tep_role, pend_token)) { - update_sfi = TRUE; + update_sfi = true; } } - boolean_t update_priority = FALSE; + bool update_priority = false; - int priority = BASEPRI_DEFAULT; - int max_priority = MAXPRI_USER; + int16_t priority = BASEPRI_DEFAULT; + int16_t max_priority = MAXPRI_USER; if (next.tep_lowpri_cpu) { priority = MAXPRI_THROTTLE; @@ -1218,7 +1215,7 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t priority += task->importance; if (task->effective_policy.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) { - int qos_clamp_priority = thread_qos_policy_params.qos_pri[task->effective_policy.tep_qos_clamp]; + int16_t qos_clamp_priority = thread_qos_policy_params.qos_pri[task->effective_policy.tep_qos_clamp]; priority = MIN(priority, qos_clamp_priority); max_priority = MIN(max_priority, qos_clamp_priority); @@ -1239,7 +1236,7 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t /* update the scheduling priority for the task */ task->max_priority = max_priority; task->priority = priority; - update_priority = TRUE; + update_priority = true; } /* Loop over the threads in the task: @@ -1276,7 +1273,7 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t * [and optionally its live-donor status] * On macOS only. */ - if (appnap_transition == TRUE) { + if (appnap_transition) { if (task->effective_policy.tep_sup_active == 1) { memorystatus_update_priority_for_appnap(((proc_t) task->bsd_info), TRUE); } else { @@ -1371,11 +1368,11 @@ task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token) ml_timer_evaluate(); } -#if CONFIG_EMBEDDED +#if CONFIG_TASKWATCH if (pend_token->tpt_update_watchers) { apply_appstate_watchers(task); } -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_TASKWATCH */ if (pend_token->tpt_update_live_donor) { task_importance_update_live_donor(task); @@ -1389,6 +1386,11 @@ task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token) } #endif /* CONFIG_SCHED_SFI */ +#if CONFIG_THREAD_GROUPS + if (pend_token->tpt_update_tg_ui_flag) { + task_coalition_thread_group_focal_update(task); + } +#endif /* CONFIG_THREAD_GROUPS */ } /* @@ -1530,11 +1532,6 @@ proc_set_task_policy_locked(task_t task, /* Category: ATTRIBUTE */ - case TASK_POLICY_TAL: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.trp_tal_enabled = value; - break; - case TASK_POLICY_BOOST: assert(category == TASK_POLICY_ATTRIBUTE); requested.trp_boosted = value; @@ -1631,7 +1628,7 @@ proc_get_task_policy(task_t task, } break; case TASK_POLICY_DARWIN_BG_IOPOL: - assert(category == TASK_POLICY_ATTRIBUTE); + assert(category == TASK_POLICY_INTERNAL); value = proc_tier_to_iopol(requested.trp_bg_iotier, 0); break; case TASK_POLICY_ROLE: @@ -1841,7 +1838,7 @@ proc_tier_to_iopol(int tier, int passive) } int -proc_darwin_role_to_task_role(int darwin_role, int* task_role) +proc_darwin_role_to_task_role(int darwin_role, task_role_t* task_role) { integer_t role = TASK_UNSPECIFIED; @@ -1877,7 +1874,7 @@ proc_darwin_role_to_task_role(int darwin_role, int* task_role) } int -proc_task_role_to_darwin_role(int task_role) +proc_task_role_to_darwin_role(task_role_t task_role) { switch (task_role) { case TASK_FOREGROUND_APPLICATION: @@ -1900,7 +1897,8 @@ proc_task_role_to_darwin_role(int task_role) /* TODO: remove this variable when interactive daemon audit period is over */ -extern boolean_t ipc_importance_interactive_receiver; +static TUNABLE(bool, ipc_importance_interactive_receiver, + "imp_interactive_receiver", false); /* * Called at process exec to initialize the apptype, qos clamp, and qos seed of a process @@ -1908,7 +1906,7 @@ extern boolean_t ipc_importance_interactive_receiver; * TODO: Make this function more table-driven instead of ad-hoc */ void -proc_set_task_spawnpolicy(task_t task, thread_t thread, int apptype, int qos_clamp, int role, +proc_set_task_spawnpolicy(task_t task, thread_t thread, int apptype, int qos_clamp, task_role_t role, ipc_port_t * portwatch_ports, uint32_t portwatch_count) { struct task_pend_token pend_token = {}; @@ -1919,18 +1917,17 @@ proc_set_task_spawnpolicy(task_t task, thread_t thread, int apptype, int qos_cla apptype, 0); switch (apptype) { - case TASK_APPTYPE_APP_TAL: case TASK_APPTYPE_APP_DEFAULT: /* Apps become donors via the 'live-donor' flag instead of the static donor flag */ task_importance_mark_donor(task, FALSE); task_importance_mark_live_donor(task, TRUE); task_importance_mark_receiver(task, FALSE); -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) task_importance_mark_denap_receiver(task, FALSE); #else - /* Apps are de-nap recievers on desktop for suppression behaviors */ + /* Apps are de-nap recievers on macOS for suppression behaviors */ task_importance_mark_denap_receiver(task, TRUE); -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ break; case TASK_APPTYPE_DAEMON_INTERACTIVE: @@ -2005,23 +2002,18 @@ proc_set_task_spawnpolicy(task_t task, thread_t thread, int apptype, int qos_cla task_lock(task); - if (apptype == TASK_APPTYPE_APP_TAL) { - /* TAL starts off enabled by default */ - task->requested_policy.trp_tal_enabled = 1; - } - if (apptype != TASK_APPTYPE_NONE) { task->requested_policy.trp_apptype = apptype; } -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) /* Remove this after launchd starts setting it properly */ if (apptype == TASK_APPTYPE_APP_DEFAULT && role == TASK_UNSPECIFIED) { task->requested_policy.trp_role = TASK_FOREGROUND_APPLICATION; } else #endif if (role != TASK_UNSPECIFIED) { - task->requested_policy.trp_role = role; + task->requested_policy.trp_role = (uint32_t)role; } if (qos_clamp != THREAD_QOS_UNSPECIFIED) { @@ -2057,17 +2049,17 @@ proc_inherit_task_role(task_t new_task, proc_set_task_policy(new_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, role); } -extern void *initproc; +extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc; /* * Compute the default main thread qos for a task */ -int +thread_qos_t task_compute_main_thread_qos(task_t task) { - int primordial_qos = THREAD_QOS_UNSPECIFIED; + thread_qos_t primordial_qos = THREAD_QOS_UNSPECIFIED; - int qos_clamp = task->requested_policy.trp_qos_clamp; + thread_qos_t qos_clamp = task->requested_policy.trp_qos_clamp; switch (task->requested_policy.trp_apptype) { case TASK_APPTYPE_APP_TAL: @@ -2171,7 +2163,7 @@ proc_get_darwinbgstate(task_t task, uint32_t * flagsp) *flagsp |= PROC_FLAG_DARWINBG; } -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) { *flagsp |= PROC_FLAG_IOS_APPLEDAEMON; } @@ -2179,7 +2171,7 @@ proc_get_darwinbgstate(task_t task, uint32_t * flagsp) if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) { *flagsp |= PROC_FLAG_IOS_IMPPROMOTION; } -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ if (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_DEFAULT || task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) { @@ -2481,7 +2473,7 @@ proc_init_cpumon_params(void) * CPU limit. All other types of notifications force task-wide scope for the limit. */ int -proc_set_task_ruse_cpu(task_t task, uint32_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline, +proc_set_task_ruse_cpu(task_t task, uint16_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline, int cpumon_entitled) { int error = 0; @@ -2959,15 +2951,9 @@ task_action_cpuusage(thread_call_param_t param0, __unused thread_call_param_t pa * Routines for taskwatch and pidbind */ -#if CONFIG_EMBEDDED +#if CONFIG_TASKWATCH -lck_mtx_t task_watch_mtx; - -void -task_watch_init(void) -{ - lck_mtx_init(&task_watch_mtx, &task_lck_grp, &task_lck_attr); -} +LCK_MTX_DECLARE_ATTR(task_watch_mtx, &task_lck_grp, &task_lck_attr); static void task_watch_lock(void) @@ -3107,26 +3093,25 @@ retry: return; } - threadlist = (thread_watchlist_t *)kalloc(numwatchers * sizeof(thread_watchlist_t)); + threadlist = kheap_alloc(KHEAP_TEMP, + numwatchers * sizeof(thread_watchlist_t), Z_WAITOK | Z_ZERO); if (threadlist == NULL) { return; } - bzero(threadlist, numwatchers * sizeof(thread_watchlist_t)); - task_watch_lock(); /*serialize application of app state changes */ if (task->watchapplying != 0) { lck_mtx_sleep(&task_watch_mtx, LCK_SLEEP_DEFAULT, &task->watchapplying, THREAD_UNINT); task_watch_unlock(); - kfree(threadlist, numwatchers * sizeof(thread_watchlist_t)); + kheap_free(KHEAP_TEMP, threadlist, numwatchers * sizeof(thread_watchlist_t)); goto retry; } if (numwatchers != task->num_taskwatchers) { task_watch_unlock(); - kfree(threadlist, numwatchers * sizeof(thread_watchlist_t)); + kheap_free(KHEAP_TEMP, threadlist, numwatchers * sizeof(thread_watchlist_t)); goto retry; } @@ -3155,7 +3140,7 @@ retry: set_thread_appbg(threadlist[j].thread, setbg, threadlist[j].importance); thread_deallocate(threadlist[j].thread); } - kfree(threadlist, numwatchers * sizeof(thread_watchlist_t)); + kheap_free(KHEAP_TEMP, threadlist, numwatchers * sizeof(thread_watchlist_t)); task_watch_lock(); @@ -3189,66 +3174,34 @@ thead_remove_taskwatch(thread_t thread) void task_removewatchers(task_t task) { - int numwatchers = 0, i, j; - task_watch_t ** twplist = NULL; - task_watch_t * twp = NULL; - -retry: - if ((numwatchers = task->num_taskwatchers) == 0) { - return; - } - - twplist = (task_watch_t **)kalloc(numwatchers * sizeof(task_watch_t *)); - if (twplist == NULL) { - return; - } + queue_head_t queue; + task_watch_t *twp; - bzero(twplist, numwatchers * sizeof(task_watch_t *)); + queue_init(&queue); task_watch_lock(); - if (task->num_taskwatchers == 0) { - task_watch_unlock(); - goto out; - } - - if (numwatchers != task->num_taskwatchers) { - task_watch_unlock(); - kfree(twplist, numwatchers * sizeof(task_watch_t *)); - numwatchers = 0; - goto retry; - } - - i = 0; - while ((twp = (task_watch_t *)dequeue_head(&task->task_watchers)) != NULL) { - twplist[i] = twp; - task->num_taskwatchers--; + movqueue(&queue, &task->task_watchers); + queue_iterate(&queue, twp, task_watch_t *, tw_links) { /* * Since the linkage is removed and thead state cleanup is already set up, * remove the refernce from the thread. */ twp->tw_thread->taskwatch = NULL; /* removed linkage, clear thread holding ref */ - i++; - if ((task->num_taskwatchers == 0) || (i > numwatchers)) { - break; - } } + task->num_taskwatchers = 0; task_watch_unlock(); - for (j = 0; j < i; j++) { - twp = twplist[j]; + while ((twp = qe_dequeue_head(&task->task_watchers, task_watch_t, tw_links)) != NULL) { /* remove thread and network bg */ set_thread_appbg(twp->tw_thread, 0, twp->tw_importance); thread_deallocate(twp->tw_thread); task_deallocate(twp->tw_task); kfree(twp, sizeof(task_watch_t)); } - -out: - kfree(twplist, numwatchers * sizeof(task_watch_t *)); } -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_TASKWATCH */ /* * Routines for importance donation/inheritance/boosting diff --git a/osfmk/kern/telemetry.c b/osfmk/kern/telemetry.c index 29595b721..b777052d4 100644 --- a/osfmk/kern/telemetry.c +++ b/osfmk/kern/telemetry.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2019 Apple Inc. All rights reserved. + * Copyright (c) 2012-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -60,7 +60,8 @@ #define TELEMETRY_DEBUG 0 -extern int proc_pid(void *); +struct proc; +extern int proc_pid(struct proc *); extern char *proc_name_address(void *p); extern uint64_t proc_uniqueid(void *p); extern uint64_t proc_was_throttled(void *p); @@ -116,9 +117,9 @@ struct micro_snapshot_buffer telemetry_buffer = { int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked? int telemetry_buffer_notify_at = 0; -lck_grp_t telemetry_lck_grp; -lck_mtx_t telemetry_mtx; -lck_mtx_t telemetry_pmi_mtx; +LCK_GRP_DECLARE(telemetry_lck_grp, "telemetry group"); +LCK_MTX_DECLARE(telemetry_mtx, &telemetry_lck_grp); +LCK_MTX_DECLARE(telemetry_pmi_mtx, &telemetry_lck_grp); #define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while (0) #define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx) @@ -133,11 +134,8 @@ telemetry_init(void) kern_return_t ret; uint32_t telemetry_notification_leeway; - lck_grp_init(&telemetry_lck_grp, "telemetry group", LCK_GRP_ATTR_NULL); - lck_mtx_init(&telemetry_mtx, &telemetry_lck_grp, LCK_ATTR_NULL); - lck_mtx_init(&telemetry_pmi_mtx, &telemetry_lck_grp, LCK_ATTR_NULL); - - if (!PE_parse_boot_argn("telemetry_buffer_size", &telemetry_buffer.size, sizeof(telemetry_buffer.size))) { + if (!PE_parse_boot_argn("telemetry_buffer_size", + &telemetry_buffer.size, sizeof(telemetry_buffer.size))) { telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE; } @@ -152,7 +150,8 @@ telemetry_init(void) } bzero((void *) telemetry_buffer.buffer, telemetry_buffer.size); - if (!PE_parse_boot_argn("telemetry_notification_leeway", &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) { + if (!PE_parse_boot_argn("telemetry_notification_leeway", + &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) { /* * By default, notify the user to collect the buffer when there is this much space left in the buffer. */ @@ -165,19 +164,21 @@ telemetry_init(void) } telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway; - if (!PE_parse_boot_argn("telemetry_sample_rate", &telemetry_sample_rate, sizeof(telemetry_sample_rate))) { + if (!PE_parse_boot_argn("telemetry_sample_rate", + &telemetry_sample_rate, sizeof(telemetry_sample_rate))) { telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE; } /* * To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args. */ - if (!PE_parse_boot_argn("telemetry_sample_all_tasks", &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) { -#if CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) + if (!PE_parse_boot_argn("telemetry_sample_all_tasks", + &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) { +#if !defined(XNU_TARGET_OS_OSX) && !(DEVELOPMENT || DEBUG) telemetry_sample_all_tasks = FALSE; #else telemetry_sample_all_tasks = TRUE; -#endif /* CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) */ +#endif /* !defined(XNU_TARGET_OS_OSX) && !(DEVELOPMENT || DEBUG) */ } kprintf("Telemetry: Sampling %stasks once per %u second%s\n", @@ -513,7 +514,7 @@ telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct micro (copyin(shared_cache_base_address + sc_header_uuid_offset, (char *)&shared_cache_header.uuid, sizeof(shared_cache_header.uuid)) == 0)) { shared_cache_uuid_valid = 1; - shared_cache_slide = vm_shared_region_get_slide(sr); + shared_cache_slide = sr->sr_slide; } // vm_shared_region_get() gave us a reference on the shared region. vm_shared_region_deallocate(sr); @@ -565,7 +566,9 @@ telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct micro char *uuid_info_array = NULL; if (uuid_info_count > 0) { - if ((uuid_info_array = (char *)kalloc(uuid_info_array_size)) == NULL) { + uuid_info_array = kheap_alloc(KHEAP_TEMP, + uuid_info_array_size, Z_WAITOK); + if (uuid_info_array == NULL) { return; } @@ -574,7 +577,7 @@ telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct micro * It may be nonresident, in which case just fix up nloadinfos to 0 in the task snapshot. */ if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) { - kfree(uuid_info_array, uuid_info_array_size); + kheap_free(KHEAP_TEMP, uuid_info_array, uuid_info_array_size); uuid_info_array = NULL; uuid_info_array_size = 0; } @@ -702,6 +705,7 @@ copytobuffer: tsnap->ss_flags |= kTaskIsSuppressed; } + tsnap->latency_qos = task_grab_latency_qos(task); strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm)); @@ -870,7 +874,7 @@ cancel_sample: } if (uuid_info_array != NULL) { - kfree(uuid_info_array, uuid_info_array_size); + kheap_free(KHEAP_TEMP, uuid_info_array, uuid_info_array_size); } } @@ -1036,18 +1040,18 @@ out: #define BOOTPROFILE_MAX_BUFFER_SIZE (64*1024*1024) /* see also COPYSIZELIMIT_PANIC */ -vm_offset_t bootprofile_buffer = 0; -uint32_t bootprofile_buffer_size = 0; -uint32_t bootprofile_buffer_current_position = 0; -uint32_t bootprofile_interval_ms = 0; -uint32_t bootprofile_stackshot_flags = 0; -uint64_t bootprofile_interval_abs = 0; -uint64_t bootprofile_next_deadline = 0; -uint32_t bootprofile_all_procs = 0; -char bootprofile_proc_name[17]; +vm_offset_t bootprofile_buffer = 0; +uint32_t bootprofile_buffer_size = 0; +uint32_t bootprofile_buffer_current_position = 0; +uint32_t bootprofile_interval_ms = 0; +uint64_t bootprofile_stackshot_flags = 0; +uint64_t bootprofile_interval_abs = 0; +uint64_t bootprofile_next_deadline = 0; +uint32_t bootprofile_all_procs = 0; +char bootprofile_proc_name[17]; uint64_t bootprofile_delta_since_timestamp = 0; -lck_grp_t bootprofile_lck_grp; -lck_mtx_t bootprofile_mtx; +LCK_GRP_DECLARE(bootprofile_lck_grp, "bootprofile_group"); +LCK_MTX_DECLARE(bootprofile_mtx, &bootprofile_lck_grp); enum { @@ -1073,10 +1077,8 @@ bootprofile_init(void) kern_return_t ret; char type[32]; - lck_grp_init(&bootprofile_lck_grp, "bootprofile group", LCK_GRP_ATTR_NULL); - lck_mtx_init(&bootprofile_mtx, &bootprofile_lck_grp, LCK_ATTR_NULL); - - if (!PE_parse_boot_argn("bootprofile_buffer_size", &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) { + if (!PE_parse_boot_argn("bootprofile_buffer_size", + &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) { bootprofile_buffer_size = 0; } @@ -1084,15 +1086,18 @@ bootprofile_init(void) bootprofile_buffer_size = BOOTPROFILE_MAX_BUFFER_SIZE; } - if (!PE_parse_boot_argn("bootprofile_interval_ms", &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) { + if (!PE_parse_boot_argn("bootprofile_interval_ms", + &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) { bootprofile_interval_ms = 0; } - if (!PE_parse_boot_argn("bootprofile_stackshot_flags", &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) { + if (!PE_parse_boot_argn("bootprofile_stackshot_flags", + &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) { bootprofile_stackshot_flags = 0; } - if (!PE_parse_boot_argn("bootprofile_proc_name", &bootprofile_proc_name, sizeof(bootprofile_proc_name))) { + if (!PE_parse_boot_argn("bootprofile_proc_name", + &bootprofile_proc_name, sizeof(bootprofile_proc_name))) { bootprofile_all_procs = 1; bootprofile_proc_name[0] = '\0'; } @@ -1123,7 +1128,8 @@ bootprofile_init(void) } bzero((void *) bootprofile_buffer, bootprofile_buffer_size); - kprintf("Boot profile: Sampling %s once per %u ms at %s\n", bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms, + kprintf("Boot profile: Sampling %s once per %u ms at %s\n", + bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms, bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown")); timer_call_setup(&bootprofile_timer_call_entry, @@ -1191,9 +1197,9 @@ bootprofile_timer_call( /* initiate a stackshot with whatever portion of the buffer is left */ if (bootprofile_buffer_current_position < bootprofile_buffer_size) { - uint32_t flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_TRYLOCK | STACKSHOT_SAVE_LOADINFO + uint64_t flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_TRYLOCK | STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS; -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) flags |= STACKSHOT_SAVE_KEXT_LOADINFO; #endif @@ -1213,7 +1219,7 @@ bootprofile_timer_call( kern_return_t r = stack_snapshot_from_kernel( pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position), bootprofile_buffer_size - bootprofile_buffer_current_position, - flags, bootprofile_delta_since_timestamp, &retbytes); + flags, bootprofile_delta_since_timestamp, 0, &retbytes); /* * We call with STACKSHOT_TRYLOCK because the stackshot lock is coarser diff --git a/osfmk/kern/thread.c b/osfmk/kern/thread.c index fde3ef327..63ee8528a 100644 --- a/osfmk/kern/thread.c +++ b/osfmk/kern/thread.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -166,12 +166,16 @@ #include #include -static struct zone *thread_zone; -static lck_grp_attr_t thread_lck_grp_attr; -lck_attr_t thread_lck_attr; -lck_grp_t thread_lck_grp; +#if CONFIG_MACF +#include +#endif + +LCK_GRP_DECLARE(thread_lck_grp, "thread"); + +ZONE_DECLARE(thread_zone, "threads", sizeof(struct thread), ZC_ZFREE_CLEARMEM); -struct zone *thread_qos_override_zone; +ZONE_DECLARE(thread_qos_override_zone, "thread qos override", + sizeof(struct thread_qos_override), ZC_NOENCRYPT); static struct mpsc_daemon_queue thread_stack_queue; static struct mpsc_daemon_queue thread_terminate_queue; @@ -188,7 +192,25 @@ struct thread_exception_elt { thread_t exception_thread; }; -static struct thread thread_template, init_thread; +static SECURITY_READ_ONLY_LATE(struct thread) thread_template = { +#if MACH_ASSERT + .thread_magic = THREAD_MAGIC, +#endif /* MACH_ASSERT */ + .wait_result = THREAD_WAITING, + .options = THREAD_ABORTSAFE, + .state = TH_WAIT | TH_UNINT, + .th_sched_bucket = TH_BUCKET_RUN, + .base_pri = BASEPRI_DEFAULT, + .realtime.deadline = UINT64_MAX, + .last_made_runnable_time = THREAD_NOT_RUNNABLE, + .last_basepri_change_time = THREAD_NOT_RUNNABLE, +#if defined(CONFIG_SCHED_TIMESHARE_CORE) + .pri_shift = INT8_MAX, +#endif + /* timers are initialized in thread_bootstrap */ +}; + +static struct thread init_thread; static void thread_deallocate_enqueue(thread_t thread); static void thread_deallocate_complete(thread_t thread); @@ -233,7 +255,9 @@ extern int exc_resource_threads_enabled; */ #define CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT 70 -int cpumon_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */ +/* Percentage. Level at which we start gathering telemetry. */ +static TUNABLE(uint8_t, cpumon_ustackshots_trigger_pct, + "cpumon_ustackshots_trigger_pct", CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT); void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void); #if DEVELOPMENT || DEBUG void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t, int); @@ -246,177 +270,42 @@ void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(t os_refgrp_decl(static, thread_refgrp, "thread", NULL); +static inline void +init_thread_from_template(thread_t thread) +{ + /* + * In general, struct thread isn't trivially-copyable, since it may + * contain pointers to thread-specific state. This may be enforced at + * compile time on architectures that store authed + diversified + * pointers in machine_thread. + * + * In this specific case, where we're initializing a new thread from a + * thread_template, we know all diversified pointers are NULL; these are + * safe to bitwise copy. + */ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wnontrivial-memaccess" + memcpy(thread, &thread_template, sizeof(*thread)); +#pragma clang diagnostic pop +} + thread_t thread_bootstrap(void) { /* * Fill in a template thread for fast initialization. */ - -#if MACH_ASSERT - thread_template.thread_magic = THREAD_MAGIC; -#endif /* MACH_ASSERT */ - - thread_template.runq = PROCESSOR_NULL; - - thread_template.reason = AST_NONE; - thread_template.at_safe_point = FALSE; - thread_template.wait_event = NO_EVENT64; - thread_template.waitq = NULL; - thread_template.wait_result = THREAD_WAITING; - thread_template.options = THREAD_ABORTSAFE; - thread_template.state = TH_WAIT | TH_UNINT; - thread_template.wake_active = FALSE; - thread_template.continuation = THREAD_CONTINUE_NULL; - thread_template.parameter = NULL; - - thread_template.importance = 0; - thread_template.sched_mode = TH_MODE_NONE; - thread_template.sched_flags = 0; - thread_template.saved_mode = TH_MODE_NONE; - thread_template.safe_release = 0; - thread_template.th_sched_bucket = TH_BUCKET_RUN; - - thread_template.sfi_class = SFI_CLASS_UNSPECIFIED; - thread_template.sfi_wait_class = SFI_CLASS_UNSPECIFIED; - - thread_template.active = 0; - thread_template.started = 0; - thread_template.static_param = 0; - thread_template.policy_reset = 0; - - thread_template.base_pri = BASEPRI_DEFAULT; - thread_template.sched_pri = 0; - thread_template.max_priority = 0; - thread_template.task_priority = 0; - thread_template.rwlock_count = 0; - thread_template.waiting_for_mutex = NULL; - - - thread_template.realtime.deadline = UINT64_MAX; - - thread_template.quantum_remaining = 0; - thread_template.last_run_time = 0; - thread_template.last_made_runnable_time = THREAD_NOT_RUNNABLE; - thread_template.last_basepri_change_time = THREAD_NOT_RUNNABLE; - thread_template.same_pri_latency = 0; - - thread_template.computation_metered = 0; - thread_template.computation_epoch = 0; - -#if defined(CONFIG_SCHED_TIMESHARE_CORE) - thread_template.sched_stamp = 0; - thread_template.pri_shift = INT8_MAX; - thread_template.sched_usage = 0; - thread_template.cpu_usage = thread_template.cpu_delta = 0; -#endif - thread_template.c_switch = thread_template.p_switch = thread_template.ps_switch = 0; - -#if MONOTONIC - memset(&thread_template.t_monotonic, 0, - sizeof(thread_template.t_monotonic)); -#endif /* MONOTONIC */ - - thread_template.bound_processor = PROCESSOR_NULL; - thread_template.last_processor = PROCESSOR_NULL; - - thread_template.sched_call = NULL; - timer_init(&thread_template.user_timer); timer_init(&thread_template.system_timer); timer_init(&thread_template.ptime); timer_init(&thread_template.runnable_timer); - thread_template.user_timer_save = 0; - thread_template.system_timer_save = 0; - thread_template.vtimer_user_save = 0; - thread_template.vtimer_prof_save = 0; - thread_template.vtimer_rlim_save = 0; - thread_template.vtimer_qos_save = 0; - -#if CONFIG_SCHED_SFI - thread_template.wait_sfi_begin_time = 0; -#endif - - thread_template.wait_timer_is_set = FALSE; - thread_template.wait_timer_active = 0; - - thread_template.depress_timer_active = 0; - - thread_template.recover = (vm_offset_t)NULL; - - thread_template.map = VM_MAP_NULL; -#if DEVELOPMENT || DEBUG - thread_template.pmap_footprint_suspended = FALSE; -#endif /* DEVELOPMENT || DEBUG */ - -#if CONFIG_DTRACE - thread_template.t_dtrace_predcache = 0; - thread_template.t_dtrace_vtime = 0; - thread_template.t_dtrace_tracing = 0; -#endif /* CONFIG_DTRACE */ - -#if KPERF - thread_template.kperf_ast = 0; - thread_template.kperf_pet_gen = 0; - thread_template.kperf_c_switch = 0; - thread_template.kperf_pet_cnt = 0; -#endif - -#if KPC - thread_template.kpc_buf = NULL; -#endif - -#if HYPERVISOR - thread_template.hv_thread_target = NULL; -#endif /* HYPERVISOR */ - -#if (DEVELOPMENT || DEBUG) - thread_template.t_page_creation_throttled_hard = 0; - thread_template.t_page_creation_throttled_soft = 0; -#endif /* DEVELOPMENT || DEBUG */ - thread_template.t_page_creation_throttled = 0; - thread_template.t_page_creation_count = 0; - thread_template.t_page_creation_time = 0; - - thread_template.affinity_set = NULL; - - thread_template.syscalls_unix = 0; - thread_template.syscalls_mach = 0; - - thread_template.t_ledger = LEDGER_NULL; - thread_template.t_threadledger = LEDGER_NULL; - thread_template.t_bankledger = LEDGER_NULL; - thread_template.t_deduct_bank_ledger_time = 0; - - thread_template.requested_policy = (struct thread_requested_policy) {}; - thread_template.effective_policy = (struct thread_effective_policy) {}; - - bzero(&thread_template.overrides, sizeof(thread_template.overrides)); - thread_template.kevent_overrides = 0; - - thread_template.iotier_override = THROTTLE_LEVEL_NONE; - thread_template.thread_io_stats = NULL; -#if CONFIG_EMBEDDED - thread_template.taskwatch = NULL; -#endif /* CONFIG_EMBEDDED */ - thread_template.thread_callout_interrupt_wakeups = thread_template.thread_callout_platform_idle_wakeups = 0; - - thread_template.thread_timer_wakeups_bin_1 = thread_template.thread_timer_wakeups_bin_2 = 0; - thread_template.callout_woken_from_icontext = thread_template.callout_woken_from_platform_idle = 0; - thread_template.guard_exc_fatal = 0; - - thread_template.thread_tag = 0; - - thread_template.ith_voucher_name = MACH_PORT_NULL; - thread_template.ith_voucher = IPC_VOUCHER_NULL; - - thread_template.th_work_interval = NULL; - - thread_template.decompressions = 0; - init_thread = thread_template; + init_thread_from_template(&init_thread); /* fiddle with init thread to skip asserts in set_sched_pri */ init_thread.sched_pri = MAXPRI_KERNEL; +#if DEBUG || DEVELOPMENT + queue_init(&init_thread.t_temp_alloc_list); +#endif /* DEBUG || DEVELOPMENT */ return &init_thread; } @@ -427,31 +316,9 @@ thread_machine_init_template(void) machine_thread_template_init(&thread_template); } -extern boolean_t allow_qos_policy_set; - void thread_init(void) { - thread_zone = zinit( - sizeof(struct thread), - thread_max * sizeof(struct thread), - THREAD_CHUNK * sizeof(struct thread), - "threads"); - - thread_qos_override_zone = zinit( - sizeof(struct thread_qos_override), - 4 * thread_max * sizeof(struct thread_qos_override), - PAGE_SIZE, - "thread qos override"); - zone_change(thread_qos_override_zone, Z_EXPAND, TRUE); - zone_change(thread_qos_override_zone, Z_COLLECT, TRUE); - zone_change(thread_qos_override_zone, Z_CALLERACCT, FALSE); - zone_change(thread_qos_override_zone, Z_NOENCRYPT, TRUE); - - lck_grp_attr_setdefault(&thread_lck_grp_attr); - lck_grp_init(&thread_lck_grp, "thread", &thread_lck_grp_attr); - lck_attr_setdefault(&thread_lck_attr); - stack_init(); thread_policy_init(); @@ -462,13 +329,6 @@ thread_init(void) */ machine_thread_init(); - if (!PE_parse_boot_argn("cpumon_ustackshots_trigger_pct", &cpumon_ustackshots_trigger_pct, - sizeof(cpumon_ustackshots_trigger_pct))) { - cpumon_ustackshots_trigger_pct = CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT; - } - - PE_parse_boot_argn("-qos-policy-allow", &allow_qos_policy_set, sizeof(allow_qos_policy_set)); - init_thread_ledgers(); } @@ -515,6 +375,10 @@ thread_terminate_self(void) task_t task; int threadcnt; + if (thread->t_temp_alloc_count) { + kheap_temp_leak_panic(thread); + } + pal_thread_terminate_self(thread); DTRACE_PROC(lwp__exit); @@ -535,9 +399,9 @@ thread_terminate_self(void) thread_unlock(thread); splx(s); -#if CONFIG_EMBEDDED +#if CONFIG_TASKWATCH thead_remove_taskwatch(thread); -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_TASKWATCH */ work_interval_thread_terminate(thread); @@ -547,6 +411,8 @@ thread_terminate_self(void) thread_mtx_unlock(thread); + assert(thread->th_work_interval == NULL); + bank_swap_thread_bank_ledger(thread, NULL); if (kdebug_enable && bsd_hasthreadname(thread->uthread)) { @@ -564,6 +430,24 @@ thread_terminate_self(void) long dbg_arg2 = 0; kdbg_trace_data(thread->task->bsd_info, &dbg_arg1, &dbg_arg2); +#if MONOTONIC + if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_THR_EXIT)) { + uint64_t counts[MT_CORE_NFIXED]; + uint64_t thread_user_time; + uint64_t thread_system_time; + thread_user_time = timer_grab(&thread->user_timer); + thread_system_time = timer_grab(&thread->system_timer); + mt_fixed_thread_counts(thread, counts); + KDBG_RELEASE(DBG_MT_INSTRS_CYCLES_THR_EXIT, +#ifdef MT_CORE_INSTRS + counts[MT_CORE_INSTRS], +#else /* defined(MT_CORE_INSTRS) */ + 0, +#endif/* !defined(MT_CORE_INSTRS) */ + counts[MT_CORE_CYCLES], + thread_system_time, thread_user_time); + } +#endif/* MONOTONIC */ KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE_PID, dbg_arg1, dbg_arg2); } @@ -586,6 +470,25 @@ thread_terminate_self(void) /* since we're the last thread in this process, trace out the command name too */ long args[4] = {}; kdbg_trace_string(thread->task->bsd_info, &args[0], &args[1], &args[2], &args[3]); +#if MONOTONIC + if (kdebug_debugid_enabled(DBG_MT_INSTRS_CYCLES_PROC_EXIT)) { + uint64_t counts[MT_CORE_NFIXED]; + uint64_t task_user_time; + uint64_t task_system_time; + mt_fixed_task_counts(task, counts); + /* since the thread time is not yet added to the task */ + task_user_time = task->total_user_time + timer_grab(&thread->user_timer); + task_system_time = task->total_system_time + timer_grab(&thread->system_timer); + KDBG_RELEASE((DBG_MT_INSTRS_CYCLES_PROC_EXIT), +#ifdef MT_CORE_INSTRS + counts[MT_CORE_INSTRS], +#else /* defined(MT_CORE_INSTRS) */ + 0, +#endif/* !defined(MT_CORE_INSTRS) */ + counts[MT_CORE_CYCLES], + task_system_time, task_user_time); + } +#endif/* MONOTONIC */ KDBG_RELEASE(TRACE_STRING_PROC_EXIT, args[0], args[1], args[2], args[3]); } @@ -690,9 +593,13 @@ thread_terminate_self(void) assert((thread->sched_flags & TH_SFLAG_RW_PROMOTED) == 0); assert((thread->sched_flags & TH_SFLAG_EXEC_PROMOTED) == 0); assert((thread->sched_flags & TH_SFLAG_PROMOTED) == 0); + assert((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) == 0); + assert(thread->th_work_interval_flags == TH_WORK_INTERVAL_FLAGS_NONE); assert(thread->kern_promotion_schedpri == 0); assert(thread->waiting_for_mutex == NULL); assert(thread->rwlock_count == 0); + assert(thread->handoff_thread == THREAD_NULL); + assert(thread->th_work_interval == NULL); thread_unlock(thread); /* splsched */ @@ -783,7 +690,8 @@ thread_deallocate_complete( } if (thread->thread_io_stats) { - kfree(thread->thread_io_stats, sizeof(struct io_stat_info)); + kheap_free(KHEAP_DATA_BUFFERS, thread->thread_io_stats, + sizeof(struct io_stat_info)); } if (thread->kernel_stack != 0) { @@ -815,6 +723,19 @@ thread_inspect_deallocate( return thread_deallocate((thread_t)thread_inspect); } +/* + * thread_read_deallocate: + * + * Drop a reference on thread read port. + */ +void +thread_read_deallocate( + thread_read_t thread_read) +{ + return thread_deallocate((thread_t)thread_read); +} + + /* * thread_exception_queue_invoke: * @@ -1165,10 +1086,13 @@ thread_create_internal( } if (new_thread != first_thread) { - *new_thread = thread_template; + init_thread_from_template(new_thread); } os_ref_init_count(&new_thread->ref_count, &thread_refgrp, 2); +#if DEBUG || DEVELOPMENT + queue_init(&new_thread->t_temp_alloc_list); +#endif /* DEBUG || DEVELOPMENT */ #ifdef MACH_BSD new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0); @@ -1206,25 +1130,28 @@ thread_create_internal( thread_lock_init(new_thread); wake_lock_init(new_thread); - lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr); + lck_mtx_init(&new_thread->mutex, &thread_lck_grp, LCK_ATTR_NULL); ipc_thread_init(new_thread); new_thread->continuation = continuation; new_thread->parameter = parameter; new_thread->inheritor_flags = TURNSTILE_UPDATE_FLAGS_NONE; - priority_queue_init(&new_thread->sched_inheritor_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); - priority_queue_init(&new_thread->base_inheritor_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); + priority_queue_init(&new_thread->sched_inheritor_queue); + priority_queue_init(&new_thread->base_inheritor_queue); #if CONFIG_SCHED_CLUTCH - priority_queue_entry_init(&new_thread->sched_clutchpri_link); + priority_queue_entry_init(&new_thread->th_clutch_runq_link); + priority_queue_entry_init(&new_thread->th_clutch_pri_link); #endif /* CONFIG_SCHED_CLUTCH */ +#if CONFIG_SCHED_EDGE + new_thread->th_bound_cluster_enqueued = false; +#endif /* CONFIG_SCHED_EDGE */ + /* Allocate I/O Statistics structure */ - new_thread->thread_io_stats = (io_stat_info_t)kalloc(sizeof(struct io_stat_info)); + new_thread->thread_io_stats = kheap_alloc(KHEAP_DATA_BUFFERS, + sizeof(struct io_stat_info), Z_WAITOK | Z_ZERO); assert(new_thread->thread_io_stats != NULL); - bzero(new_thread->thread_io_stats, sizeof(struct io_stat_info)); #if KASAN kasan_init_thread(&new_thread->kasan_data); @@ -1239,6 +1166,8 @@ thread_create_internal( new_thread->decmp_upl = NULL; #endif /* CONFIG_IOSCHED */ + new_thread->thread_region_page_shift = 0; + #if DEVELOPMENT || DEBUG task_lock(parent_task); uint16_t thread_limit = parent_task->task_thread_limit; @@ -1282,7 +1211,8 @@ thread_create_internal( #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); - kfree(new_thread->thread_io_stats, sizeof(struct io_stat_info)); + kheap_free(KHEAP_DATA_BUFFERS, new_thread->thread_io_stats, + sizeof(struct io_stat_info)); lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); @@ -1340,17 +1270,20 @@ thread_create_internal( new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; +#if CONFIG_THREAD_GROUPS + thread_group_init_thread(new_thread, parent_task); +#endif /* CONFIG_THREAD_GROUPS */ int new_priority = (priority < 0) ? parent_task->priority: priority; new_priority = (priority < 0)? parent_task->priority: priority; if (new_priority > new_thread->max_priority) { new_priority = new_thread->max_priority; } -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) if (new_priority < MAXPRI_THROTTLE) { new_priority = MAXPRI_THROTTLE; } -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ new_thread->importance = new_priority - new_thread->task_priority; @@ -1365,11 +1298,9 @@ thread_create_internal( #endif /* CONFIG_SCHED_CLUTCH */ #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */ -#if CONFIG_EMBEDDED if (parent_task->max_priority <= MAXPRI_THROTTLE) { sched_thread_mode_demote(new_thread, TH_SFLAG_THROTTLED); } -#endif /* CONFIG_EMBEDDED */ thread_policy_create(new_thread); @@ -1392,6 +1323,8 @@ thread_create_internal( } new_thread->corpse_dup = FALSE; new_thread->turnstile = turnstile_alloc(); + + *out_thread = new_thread; if (kdebug_enable) { @@ -1443,6 +1376,13 @@ thread_create_internal2( return KERN_INVALID_ARGUMENT; } +#if CONFIG_MACF + if (from_user && current_task() != task && + mac_proc_check_remote_thread_create(task, -1, NULL, 0) != 0) { + return KERN_DENIED; + } +#endif + result = thread_create_internal(task, -1, continuation, NULL, TH_OPTION_NONE, &thread); if (result != KERN_SUCCESS) { return result; @@ -1573,6 +1513,13 @@ thread_create_running_internal2( return KERN_INVALID_ARGUMENT; } +#if CONFIG_MACF + if (from_user && current_task() != task && + mac_proc_check_remote_thread_create(task, flavor, new_state, new_state_count) != 0) { + return KERN_DENIED; + } +#endif + result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, NULL, TH_OPTION_NONE, &thread); @@ -1691,7 +1638,7 @@ kernel_thread_create( stack_alloc(thread); assert(thread->kernel_stack != 0); -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) if (priority > BASEPRI_KERNEL) #endif thread->reserved_stack = thread->kernel_stack; @@ -1849,7 +1796,7 @@ thread_info_internal( return KERN_INVALID_ARGUMENT; } - identifier_info = (thread_identifier_info_t) thread_info_out; + identifier_info = __IGNORE_WCASTALIGN((thread_identifier_info_t)thread_info_out); s = splsched(); thread_lock(thread); @@ -1947,7 +1894,7 @@ thread_info_internal( return KERN_SUCCESS; } else if (flavor == THREAD_EXTENDED_INFO) { thread_basic_info_data_t basic_info; - thread_extended_info_t extended_info = (thread_extended_info_t) thread_info_out; + thread_extended_info_t extended_info = __IGNORE_WCASTALIGN((thread_extended_info_t)thread_info_out); if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) { return KERN_INVALID_ARGUMENT; @@ -1991,7 +1938,7 @@ thread_info_internal( return KERN_INVALID_ARGUMENT; } - dbg_info = (thread_debug_info_internal_t) thread_info_out; + dbg_info = __IGNORE_WCASTALIGN((thread_debug_info_internal_t)thread_info_out); dbg_info->page_creation_count = thread->t_page_creation_count; *thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT; @@ -2057,7 +2004,7 @@ thread_get_runtime_self(void) /* Not interrupt safe, as the scheduler may otherwise update timer values underneath us */ interrupt_state = ml_set_interrupts_enabled(FALSE); processor = current_processor(); - timer_update(PROCESSOR_DATA(processor, thread_timer), mach_absolute_time()); + timer_update(processor->thread_timer, mach_absolute_time()); runtime = (timer_grab(&thread->user_timer) + timer_grab(&thread->system_timer)); ml_set_interrupts_enabled(interrupt_state); @@ -2607,7 +2554,7 @@ thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns) * This calculation is the converse to the one in thread_set_cpulimit(). */ absolutetime_to_nanoseconds(abstime, &limittime); - *percentage = (limittime * 100ULL) / *interval_ns; + *percentage = (uint8_t)((limittime * 100ULL) / *interval_ns); assert(*percentage <= 100); if (thread->options & TH_OPT_PROC_CPULIMIT) { @@ -2891,6 +2838,9 @@ thread_set_voucher_name(mach_port_name_t voucher_name) thread_mtx_unlock(thread); bank_swap_thread_bank_ledger(thread, bankledger); +#if CONFIG_THREAD_GROUPS + thread_group_set_bank(thread, banktg); +#endif /* CONFIG_THREAD_GROUPS */ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE, @@ -2980,6 +2930,9 @@ thread_set_mach_voucher( thread_mtx_unlock(thread); bank_swap_thread_bank_ledger(thread, bankledger); +#if CONFIG_THREAD_GROUPS + thread_group_set_bank(thread, banktg); +#endif /* CONFIG_THREAD_GROUPS */ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE, @@ -3040,6 +2993,31 @@ thread_get_current_voucher_origin_pid( return kr; } +#if CONFIG_THREAD_GROUPS +/* + * Returns the current thread's voucher-carried thread group + * + * Reference is borrowed from this being the current voucher, so it does NOT + * return a reference to the group. + */ +struct thread_group * +thread_get_current_voucher_thread_group(thread_t thread) +{ + assert(thread == current_thread()); + + if (thread->ith_voucher == NULL) { + return NULL; + } + + ledger_t bankledger = NULL; + struct thread_group *banktg = NULL; + + bank_get_bank_ledger_thread_group_and_persona(thread->ith_voucher, &bankledger, &banktg, NULL); + + return banktg; +} + +#endif /* CONFIG_THREAD_GROUPS */ boolean_t thread_has_thread_name(thread_t th) @@ -3063,6 +3041,19 @@ thread_set_thread_name(thread_t th, const char* name) } } +void +thread_get_thread_name(thread_t th, char* name) +{ + if (!name) { + return; + } + if ((th) && (th->uthread)) { + bsd_getthreadname(th->uthread, name); + } else { + name[0] = '\0'; + } +} + void thread_set_honor_qlimit(thread_t thread) { @@ -3142,6 +3133,99 @@ thread_kern_get_kernel_maxpri(void) { return MAXPRI_KERNEL; } +/* + * thread_port_with_flavor_notify + * + * Called whenever the Mach port system detects no-senders on + * the thread inspect or read port. These ports are allocated lazily and + * should be deallocated here when there are no senders remaining. + */ +void +thread_port_with_flavor_notify(mach_msg_header_t *msg) +{ + mach_no_senders_notification_t *notification = (void *)msg; + ipc_port_t port = notification->not_header.msgh_remote_port; + thread_t thread; + mach_thread_flavor_t flavor; + ipc_kobject_type_t kotype; + + ip_lock(port); + if (port->ip_srights > 0) { + ip_unlock(port); + return; + } + thread = (thread_t)port->ip_kobject; + kotype = ip_kotype(port); + if (thread != THREAD_NULL) { + assert((IKOT_THREAD_READ == kotype) || (IKOT_THREAD_INSPECT == kotype)); + thread_reference_internal(thread); + } + ip_unlock(port); + + if (thread == THREAD_NULL) { + /* The thread is exiting or disabled; it will eventually deallocate the port */ + return; + } + + thread_mtx_lock(thread); + ip_lock(port); + require_ip_active(port); + /* + * Check for a stale no-senders notification. A call to any function + * that vends out send rights to this port could resurrect it between + * this notification being generated and actually being handled here. + */ + if (port->ip_srights > 0) { + ip_unlock(port); + thread_mtx_unlock(thread); + thread_deallocate(thread); + return; + } + if (kotype == IKOT_THREAD_READ) { + flavor = THREAD_FLAVOR_READ; + } else { + flavor = THREAD_FLAVOR_INSPECT; + } + assert(thread->ith_self[flavor] == port); + thread->ith_self[flavor] = IP_NULL; + port->ip_kobject = IKOT_NONE; + ip_unlock(port); + thread_mtx_unlock(thread); + thread_deallocate(thread); + + ipc_port_dealloc_kernel(port); +} + +/* + * The 'thread_region_page_shift' is used by footprint + * to specify the page size that it will use to + * accomplish its accounting work on the task being + * inspected. Since footprint uses a thread for each + * task that it works on, we need to keep the page_shift + * on a per-thread basis. + */ + +int +thread_self_region_page_shift(void) +{ + /* + * Return the page shift that this thread + * would like to use for its accounting work. + */ + return current_thread()->thread_region_page_shift; +} + +void +thread_self_region_page_shift_set( + int pgshift) +{ + /* + * Set the page shift that this thread + * would like to use for its accounting work + * when dealing with a task. + */ + current_thread()->thread_region_page_shift = pgshift; +} #if CONFIG_DTRACE uint32_t @@ -3228,7 +3312,7 @@ dtrace_calc_thread_recent_vtime(thread_t thread) uint64_t abstime = mach_absolute_time(); timer_t timer; - timer = PROCESSOR_DATA(processor, thread_timer); + timer = processor->thread_timer; return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) + (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */ diff --git a/osfmk/kern/thread.h b/osfmk/kern/thread.h index 380c3ad40..475f0c678 100644 --- a/osfmk/kern/thread.h +++ b/osfmk/kern/thread.h @@ -98,6 +98,17 @@ #include +#ifdef XNU_KERNEL_PRIVATE +/* Thread tags; for easy identification. */ +__options_closed_decl(thread_tag_t, uint16_t, { + THREAD_TAG_MAINTHREAD = 0x01, + THREAD_TAG_CALLOUT = 0x02, + THREAD_TAG_IOWORKLOOP = 0x04, + THREAD_TAG_PTHREAD = 0x10, + THREAD_TAG_WORKQUEUE = 0x20, +}); +#endif /* XNU_KERNEL_PRIVATE */ + #ifdef MACH_KERNEL_PRIVATE #include @@ -147,10 +158,18 @@ #include #endif /* MONOTONIC */ -#if CONFIG_EMBEDDED +#if CONFIG_TASKWATCH /* Taskwatch related. TODO: find this a better home */ typedef struct task_watcher task_watch_t; -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_TASKWATCH */ + +__options_decl(thread_work_interval_flags_t, uint32_t, { + TH_WORK_INTERVAL_FLAGS_NONE = 0x0, +#if CONFIG_SCHED_AUTO_JOIN + /* Flags to indicate status about work interval thread is currently part of */ + TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK = 0x1, +#endif /* CONFIG_SCHED_AUTO_JOIN */ +}); struct thread { #if MACH_ASSERT @@ -175,7 +194,7 @@ struct thread { queue_chain_t runq_links; /* run queue links */ queue_chain_t wait_links; /* wait queue links */ struct mpsc_queue_chain mpsc_links; /* thread daemon mpsc links */ - struct priority_queue_entry wait_prioq_links; /* priority ordered waitq links */ + struct priority_queue_entry_sched wait_prioq_links; /* priority ordered waitq links */ }; event64_t wait_event; /* wait queue event */ @@ -183,27 +202,32 @@ struct thread { struct waitq *waitq; /* wait queue this thread is enqueued on */ struct turnstile *turnstile; /* thread's turnstile, protected by primitives interlock */ void *inheritor; /* inheritor of the primitive the thread will block on */ - struct priority_queue sched_inheritor_queue; /* Inheritor queue for kernel promotion */ - struct priority_queue base_inheritor_queue; /* Inheritor queue for user promotion */ + struct priority_queue_sched_max sched_inheritor_queue; /* Inheritor queue for kernel promotion */ + struct priority_queue_sched_max base_inheritor_queue; /* Inheritor queue for user promotion */ + +#if CONFIG_SCHED_EDGE + boolean_t th_bound_cluster_enqueued; +#endif /* CONFIG_SCHED_EDGE */ #if CONFIG_SCHED_CLUTCH /* * In the clutch scheduler, the threads are maintained in runqs at the clutch_bucket - * level (clutch_bucket defines a unique thread group and scheduling bucket pair). In - * order to determine the priority of the clutch bucket as a whole, it is necessary to - * find the highest thread in it. The thread could be present in the clutch bucket due - * to its base_pri or its promoted pri. This link is used to maintain that queue. + * level (clutch_bucket defines a unique thread group and scheduling bucket pair). The + * thread is linked via a couple of linkages in the clutch bucket: + * + * - A stable priority queue linkage which is the main runqueue (based on sched_pri) for the clutch bucket + * - A regular priority queue linkage which is based on thread's base/promoted pri (used for clutch bucket priority calculation) + * - A queue linkage used for timesharing operations of threads at the scheduler tick */ - struct priority_queue_entry sched_clutchpri_link; - + struct priority_queue_entry_stable th_clutch_runq_link; + struct priority_queue_entry_sched th_clutch_pri_link; + queue_chain_t th_clutch_timeshare_link; #endif /* CONFIG_SCHED_CLUTCH */ /* Data updated during assert_wait/thread_wakeup */ -#if __SMP__ decl_simple_lock_data(, sched_lock); /* scheduling lock (thread_lock()) */ decl_simple_lock_data(, wake_lock); /* for thread stop / wait (wake_lock()) */ -#endif - uint16_t options; /* options set by thread itself */ + uint16_t options; /* options set by thread itself */ #define TH_OPT_INTMASK 0x0003 /* interrupt / abort level */ #define TH_OPT_VMPRIV 0x0004 /* may allocate reserved memory */ #define TH_OPT_SYSTEM_CRITICAL 0x0010 /* Thread must always be allowed to run - even under heavy load */ @@ -215,56 +239,56 @@ struct thread { #define TH_OPT_HONOR_QLIMIT 0x0400 /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */ #define TH_OPT_SEND_IMPORTANCE 0x0800 /* Thread will allow importance donation from kernel rpc */ #define TH_OPT_ZONE_PRIV 0x1000 /* Thread may use the zone replenish reserve */ - - bool wake_active; /* wake event on stop */ - bool at_safe_point; /* thread_abort_safely allowed */ - ast_t reason; /* why we blocked */ - uint32_t quantum_remaining; - wait_result_t wait_result; /* outcome of wait - - * may be examined by this thread - * WITHOUT locking */ +#define TH_OPT_IPC_TG_BLOCKED 0x2000 /* Thread blocked in sync IPC and has made the thread group blocked callout */ + + bool wake_active; /* wake event on stop */ + bool at_safe_point; /* thread_abort_safely allowed */ + uint8_t sched_saved_run_weight; + ast_t reason; /* why we blocked */ + uint32_t quantum_remaining; + wait_result_t wait_result; /* outcome of wait - + * may be examined by this thread + * WITHOUT locking */ thread_continue_t continuation; /* continue here next dispatch */ - void *parameter; /* continuation parameter */ + void *parameter; /* continuation parameter */ /* Data updated/used in thread_invoke */ - vm_offset_t kernel_stack; /* current kernel stack */ - vm_offset_t reserved_stack; /* reserved kernel stack */ + vm_offset_t kernel_stack; /* current kernel stack */ + vm_offset_t reserved_stack; /* reserved kernel stack */ #if KASAN struct kasan_thread_data kasan_data; #endif - #if CONFIG_KSANCOV - void *ksancov_data; + void *ksancov_data; #endif /* Thread state: */ - int state; + int state; /* * Thread states [bits or'ed] */ -#define TH_WAIT 0x01 /* queued for waiting */ -#define TH_SUSP 0x02 /* stopped or requested to stop */ -#define TH_RUN 0x04 /* running or on runq */ -#define TH_UNINT 0x08 /* waiting uninteruptibly */ -#define TH_TERMINATE 0x10 /* halted at termination */ -#define TH_TERMINATE2 0x20 /* added to termination queue */ -#define TH_WAIT_REPORT 0x40 /* the wait is using the sched_call, - * only set if TH_WAIT is also set */ -#define TH_IDLE 0x80 /* idling processor */ +#define TH_WAIT 0x01 /* queued for waiting */ +#define TH_SUSP 0x02 /* stopped or requested to stop */ +#define TH_RUN 0x04 /* running or on runq */ +#define TH_UNINT 0x08 /* waiting uninteruptibly */ +#define TH_TERMINATE 0x10 /* halted at termination */ +#define TH_TERMINATE2 0x20 /* added to termination queue */ +#define TH_WAIT_REPORT 0x40 /* the wait is using the sched_call, + * only set if TH_WAIT is also set */ +#define TH_IDLE 0x80 /* idling processor */ /* Scheduling information */ - sched_mode_t sched_mode; /* scheduling mode */ - sched_mode_t saved_mode; /* saved mode during forced mode demotion */ + sched_mode_t sched_mode; /* scheduling mode */ + sched_mode_t saved_mode; /* saved mode during forced mode demotion */ /* This thread's contribution to global sched counters */ - sched_bucket_t th_sched_bucket; - - sfi_class_id_t sfi_class; /* SFI class (XXX Updated on CSW/QE/AST) */ - sfi_class_id_t sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */ + sched_bucket_t th_sched_bucket; + sfi_class_id_t sfi_class; /* SFI class (XXX Updated on CSW/QE/AST) */ + sfi_class_id_t sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */ - uint32_t sched_flags; /* current flag bits */ + uint32_t sched_flags; /* current flag bits */ #define TH_SFLAG_NO_SMT 0x0001 /* On an SMT CPU, this thread must be scheduled alone */ #define TH_SFLAG_FAILSAFE 0x0002 /* fail-safe has tripped */ #define TH_SFLAG_THROTTLED 0x0004 /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */ @@ -290,97 +314,103 @@ struct thread { #define TH_SFLAG_EXEC_PROMOTED 0x8000 /* promote reason: thread is in an exec */ +#define TH_SFLAG_THREAD_GROUP_AUTO_JOIN 0x10000 /* thread has been auto-joined to thread group */ +#if __AMP__ +#define TH_SFLAG_BOUND_SOFT 0x20000 /* thread is soft bound to a cluster; can run anywhere if bound cluster unavailable */ +#endif /* __AMP__ */ /* 'promote reasons' that request a priority floor only, not a custom priority */ #define TH_SFLAG_PROMOTE_REASON_MASK (TH_SFLAG_RW_PROMOTED | TH_SFLAG_WAITQ_PROMOTED | TH_SFLAG_EXEC_PROMOTED) #define TH_SFLAG_RW_PROMOTED_BIT (10) /* 0x400 */ - int16_t sched_pri; /* scheduled (current) priority */ - int16_t base_pri; /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */ - int16_t req_base_pri; /* requested base priority */ - int16_t max_priority; /* copy of max base priority */ - int16_t task_priority; /* copy of task base priority */ - int16_t promotion_priority; /* priority thread is currently promoted to */ - -#if defined(CONFIG_SCHED_GRRR) -#if 0 - uint16_t grrr_deficit; /* fixed point (1/1000th quantum) fractional deficit */ -#endif -#endif + int16_t sched_pri; /* scheduled (current) priority */ + int16_t base_pri; /* effective base priority (equal to req_base_pri unless TH_SFLAG_BASE_PRI_FROZEN) */ + int16_t req_base_pri; /* requested base priority */ + int16_t max_priority; /* copy of max base priority */ + int16_t task_priority; /* copy of task base priority */ + int16_t promotion_priority; /* priority thread is currently promoted to */ - int iotier_override; /* atomic operations to set, cleared on ret to user */ - os_refcnt_t ref_count; /* number of references to me */ + int iotier_override; /* atomic operations to set, cleared on ret to user */ + os_refcnt_t ref_count; /* number of references to me */ - lck_mtx_t* waiting_for_mutex; /* points to mutex we're waiting for until we acquire it */ + lck_mtx_t* waiting_for_mutex; /* points to mutex we're waiting for until we acquire it */ - uint32_t rwlock_count; /* Number of lck_rw_t locks held by thread */ + uint32_t rwlock_count; /* Number of lck_rw_t locks held by thread */ + uint32_t t_temp_alloc_count; /* In flight temporary allocations */ +#if DEBUG || DEVELOPMENT + queue_head_t t_temp_alloc_list; +#endif /* DEBUG || DEVELOPMENT */ - integer_t importance; /* task-relative importance */ + integer_t importance; /* task-relative importance */ /* Priority depression expiration */ - integer_t depress_timer_active; + integer_t depress_timer_active; timer_call_data_t depress_timer; + /* real-time parameters */ - struct { /* see mach/thread_policy.h */ - uint32_t period; - uint32_t computation; - uint32_t constraint; - boolean_t preemptible; - uint64_t deadline; - } realtime; - - uint64_t last_run_time; /* time when thread was switched away from */ - uint64_t last_made_runnable_time; /* time when thread was unblocked or preempted */ - uint64_t last_basepri_change_time; /* time when thread was last changed in basepri while runnable */ - uint64_t same_pri_latency; + struct { /* see mach/thread_policy.h */ + uint32_t period; + uint32_t computation; + uint32_t constraint; + boolean_t preemptible; + uint64_t deadline; + } realtime; + + uint64_t last_run_time; /* time when thread was switched away from */ + uint64_t last_made_runnable_time; /* time when thread was unblocked or preempted */ + uint64_t last_basepri_change_time; /* time when thread was last changed in basepri while runnable */ + uint64_t same_pri_latency; #define THREAD_NOT_RUNNABLE (~0ULL) +#if CONFIG_THREAD_GROUPS + struct thread_group *thread_group; +#endif #if defined(CONFIG_SCHED_MULTIQ) - sched_group_t sched_group; + sched_group_t sched_group; #endif /* defined(CONFIG_SCHED_MULTIQ) */ /* Data used during setrun/dispatch */ timer_data_t system_timer; /* system mode timer */ - processor_t bound_processor; /* bound to a processor? */ - processor_t last_processor; /* processor last dispatched on */ - processor_t chosen_processor; /* Where we want to run this thread */ + processor_t bound_processor; /* bound to a processor? */ + processor_t last_processor; /* processor last dispatched on */ + processor_t chosen_processor; /* Where we want to run this thread */ /* Fail-safe computation since last unblock or qualifying yield */ - uint64_t computation_metered; - uint64_t computation_epoch; - uint64_t safe_release; /* when to release fail-safe */ + uint64_t computation_metered; + uint64_t computation_epoch; + uint64_t safe_release; /* when to release fail-safe */ /* Call out from scheduler */ - void (*sched_call)(int type, thread_t thread); + void (*sched_call)(int type, thread_t thread); #if defined(CONFIG_SCHED_PROTO) - uint32_t runqueue_generation; /* last time runqueue was drained */ + uint32_t runqueue_generation; /* last time runqueue was drained */ #endif /* Statistics and timesharing calculations */ #if defined(CONFIG_SCHED_TIMESHARE_CORE) - natural_t sched_stamp; /* last scheduler tick */ - natural_t sched_usage; /* timesharing cpu usage [sched] */ - natural_t pri_shift; /* usage -> priority from pset */ - natural_t cpu_usage; /* instrumented cpu usage [%cpu] */ - natural_t cpu_delta; /* accumulated cpu_usage delta */ + natural_t sched_stamp; /* last scheduler tick */ + natural_t sched_usage; /* timesharing cpu usage [sched] */ + natural_t pri_shift; /* usage -> priority from pset */ + natural_t cpu_usage; /* instrumented cpu usage [%cpu] */ + natural_t cpu_delta; /* accumulated cpu_usage delta */ #endif /* CONFIG_SCHED_TIMESHARE_CORE */ - uint32_t c_switch; /* total context switches */ - uint32_t p_switch; /* total processor switches */ - uint32_t ps_switch; /* total pset switches */ + uint32_t c_switch; /* total context switches */ + uint32_t p_switch; /* total processor switches */ + uint32_t ps_switch; /* total pset switches */ integer_t mutex_count; /* total count of locks held */ /* Timing data structures */ - int precise_user_kernel_time; /* precise user/kernel enabled for this thread */ - timer_data_t user_timer; /* user mode timer */ - uint64_t user_timer_save; /* saved user timer value */ - uint64_t system_timer_save; /* saved system timer value */ - uint64_t vtimer_user_save; /* saved values for vtimers */ - uint64_t vtimer_prof_save; - uint64_t vtimer_rlim_save; - uint64_t vtimer_qos_save; + int precise_user_kernel_time; /* precise user/kernel enabled for this thread */ + timer_data_t user_timer; /* user mode timer */ + uint64_t user_timer_save; /* saved user timer value */ + uint64_t system_timer_save; /* saved system timer value */ + uint64_t vtimer_user_save; /* saved values for vtimers */ + uint64_t vtimer_prof_save; + uint64_t vtimer_rlim_save; + uint64_t vtimer_qos_save; timer_data_t ptime; /* time executing in P mode */ timer_data_t runnable_timer; /* time the thread is runnable (including running) */ @@ -394,12 +424,12 @@ struct thread { * Processor/cache affinity * - affinity_threads links task threads with the same affinity set */ - queue_chain_t affinity_threads; - affinity_set_t affinity_set; + queue_chain_t affinity_threads; + affinity_set_t affinity_set; -#if CONFIG_EMBEDDED - task_watch_t * taskwatch; /* task watch */ -#endif /* CONFIG_EMBEDDED */ +#if CONFIG_TASKWATCH + task_watch_t *taskwatch; /* task watch */ +#endif /* CONFIG_TASKWATCH */ /* Various bits of state to stash across a continuation, exclusive to the current thread block point */ union { @@ -417,8 +447,8 @@ struct thread { struct ipc_kmsg *kmsg; /* received message */ struct ipc_mqueue *peekq; /* mqueue to peek at */ struct { - mach_msg_priority_t qos; /* received message qos */ - mach_msg_priority_t oqos; /* override qos for message */ + uint32_t ppri; /* received message pthread_priority_t */ + mach_msg_qos_t oqos; /* override qos for message */ } received_qos; }; mach_msg_continue_t continuation; @@ -426,8 +456,8 @@ struct thread { struct { struct semaphore *waitsemaphore; /* semaphore ref */ struct semaphore *signalsemaphore; /* semaphore ref */ - int options; /* semaphore options */ - kern_return_t result; /* primary result */ + int options; /* semaphore options */ + kern_return_t result; /* primary result */ mach_msg_continue_t continuation; } sema; struct { @@ -438,11 +468,8 @@ struct thread { /* Only user threads can cause guard exceptions, only kernel threads can be thread call threads */ union { - /* Group and call this thread is working on behalf of */ - struct { - struct thread_call_group * thc_group; - struct thread_call * thc_call; /* debug only, may be deallocated */ - } thc_state; + /* Thread call thread's state structure, stored on its stack */ + struct thread_call_thread_state *thc_state; /* Structure to save information about guard exception */ struct { @@ -452,9 +479,9 @@ struct thread { }; /* Kernel holds on this thread */ - int16_t suspend_count; + int16_t suspend_count; /* User level suspensions */ - int16_t user_stop_count; + int16_t user_stop_count; /* IPC data structures */ #if IMPORTANCE_INHERITANCE @@ -464,19 +491,20 @@ struct thread { mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */ /* Pending thread ast(s) */ - ast_t ast; + ast_t ast; /* Ast/Halt data structures */ - vm_offset_t recover; /* page fault recover(copyin/out) */ + vm_offset_t recover; /* page fault recover(copyin/out) */ - queue_chain_t threads; /* global list of all threads */ + queue_chain_t threads; /* global list of all threads */ /* Activation */ - queue_chain_t task_threads; + queue_chain_t task_threads; /* Task membership */ - struct task *task; - vm_map_t map; + struct task *task; + vm_map_t map; + thread_t handoff_thread; #if DEVELOPMENT || DEBUG bool pmap_footprint_suspended; #endif /* DEVELOPMENT || DEBUG */ @@ -499,33 +527,36 @@ struct thread { decl_lck_mtx_data(, mutex); - /* Ports associated with this thread */ - struct ipc_port *ith_self; /* not a right, doesn't hold ref */ - struct ipc_port *ith_sself; /* a send right */ - struct ipc_port *ith_special_reply_port; /* ref to special reply port */ + /* + * Different flavors of thread port. + * These flavors THREAD_FLAVOR_* are defined in mach_types.h + */ + struct ipc_port *ith_self[THREAD_SELF_PORT_COUNT]; /* does not hold right */ + struct ipc_port *ith_settable_self; /* a send right */ + struct ipc_port *ith_special_reply_port; /* ref to special reply port */ struct exception_action *exc_actions; #ifdef MACH_BSD - void *uthread; + void *uthread; #endif #if CONFIG_DTRACE - uint16_t t_dtrace_flags; /* DTrace thread states */ + uint16_t t_dtrace_flags; /* DTrace thread states */ #define TH_DTRACE_EXECSUCCESS 0x01 - uint16_t t_dtrace_inprobe; /* Executing under dtrace_probe */ - uint32_t t_dtrace_predcache; /* DTrace per thread predicate value hint */ - int64_t t_dtrace_tracing; /* Thread time under dtrace_probe() */ - int64_t t_dtrace_vtime; + uint16_t t_dtrace_inprobe; /* Executing under dtrace_probe */ + uint32_t t_dtrace_predcache; /* DTrace per thread predicate value hint */ + int64_t t_dtrace_tracing; /* Thread time under dtrace_probe() */ + int64_t t_dtrace_vtime; #endif - clock_sec_t t_page_creation_time; - uint32_t t_page_creation_count; - uint32_t t_page_creation_throttled; + clock_sec_t t_page_creation_time; + uint32_t t_page_creation_count; + uint32_t t_page_creation_throttled; #if (DEVELOPMENT || DEBUG) - uint64_t t_page_creation_throttled_hard; - uint64_t t_page_creation_throttled_soft; + uint64_t t_page_creation_throttled_hard; + uint64_t t_page_creation_throttled_soft; #endif /* DEVELOPMENT || DEBUG */ - int t_pagein_error; /* for vm_fault(), holds error from vnop_pagein() */ + int t_pagein_error; /* for vm_fault(), holds error from vnop_pagein() */ #ifdef KPERF /* The high 8 bits are the number of frames to sample of a user callstack. */ @@ -546,22 +577,25 @@ struct thread { /* only go up to T_KPERF_ACTIONID_OFFSET - 1 */ #ifdef KPERF - uint32_t kperf_ast; - uint32_t kperf_pet_gen; /* last generation of PET that sampled this thread*/ - uint32_t kperf_c_switch; /* last dispatch detection */ - uint32_t kperf_pet_cnt; /* how many times a thread has been sampled by PET */ + uint32_t kperf_ast; + uint32_t kperf_pet_gen; /* last generation of PET that sampled this thread*/ + uint32_t kperf_c_switch; /* last dispatch detection */ + uint32_t kperf_pet_cnt; /* how many times a thread has been sampled by PET */ #endif #ifdef KPC /* accumulated performance counters for this thread */ - uint64_t *kpc_buf; + uint64_t *kpc_buf; #endif #if HYPERVISOR /* hypervisor virtual CPU object associated with this thread */ - void *hv_thread_target; + void *hv_thread_target; #endif /* HYPERVISOR */ + /*** Machine-dependent state ***/ + struct machine_thread machine; + /* Statistics accumulated per-thread and aggregated per-task */ uint32_t syscalls_unix; uint32_t syscalls_mach; @@ -571,15 +605,12 @@ struct thread { uint64_t t_deduct_bank_ledger_time; /* cpu time to be deducted from bank ledger */ uint64_t t_deduct_bank_ledger_energy; /* energy to be deducted from bank ledger */ - uint64_t thread_id; /*system wide unique thread-id*/ + uint64_t thread_id; /*system wide unique thread-id*/ #if MONOTONIC - struct mt_thread t_monotonic; + struct mt_thread t_monotonic; #endif /* MONOTONIC */ - /*** Machine-dependent state ***/ - struct machine_thread machine; - /* policy is protected by the thread mutex */ struct thread_requested_policy requested_policy; struct thread_effective_policy effective_policy; @@ -593,45 +624,51 @@ struct thread { user_addr_t override_resource; } *overrides; - uint32_t kevent_overrides; - uint8_t user_promotion_basepri; - uint8_t kern_promotion_schedpri; - _Atomic uint16_t kevent_ast_bits; + uint32_t kevent_overrides; + uint8_t user_promotion_basepri; + uint8_t kern_promotion_schedpri; + _Atomic uint16_t kevent_ast_bits; + + io_stat_info_t thread_io_stats; /* per-thread I/O statistics */ - io_stat_info_t thread_io_stats; /* per-thread I/O statistics */ + uint32_t thread_callout_interrupt_wakeups; + uint32_t thread_callout_platform_idle_wakeups; + uint32_t thread_timer_wakeups_bin_1; + uint32_t thread_timer_wakeups_bin_2; + thread_tag_t thread_tag; - uint32_t thread_callout_interrupt_wakeups; - uint32_t thread_callout_platform_idle_wakeups; - uint32_t thread_timer_wakeups_bin_1; - uint32_t thread_timer_wakeups_bin_2; - uint16_t thread_tag; /* * callout_* fields are only set for thread call threads whereas guard_exc_fatal is set * by user threads on themselves while taking a guard exception. So it's okay for them to * share this bitfield. */ - uint16_t callout_woken_from_icontext:1, + uint16_t + callout_woken_from_icontext:1, callout_woken_from_platform_idle:1, callout_woke_thread:1, guard_exc_fatal:1, thread_bitfield_unused:12; - mach_port_name_t ith_voucher_name; - ipc_voucher_t ith_voucher; + mach_port_name_t ith_voucher_name; + ipc_voucher_t ith_voucher; #if CONFIG_IOSCHED - void *decmp_upl; + void *decmp_upl; #endif /* CONFIG_IOSCHED */ /* work interval (if any) associated with the thread. Uses thread mutex */ struct work_interval *th_work_interval; + thread_work_interval_flags_t th_work_interval_flags; #if SCHED_TRACE_THREAD_WAKEUPS uintptr_t thread_wakeup_bt[64]; #endif turnstile_update_flags_t inheritor_flags; /* inheritor flags for inheritor field */ - block_hint_t pending_block_hint; - block_hint_t block_hint; /* What type of primitive last caused us to block. */ - integer_t decompressions; /* Per-thread decompressions counter to be added to per-task decompressions counter */ + block_hint_t pending_block_hint; + block_hint_t block_hint; /* What type of primitive last caused us to block. */ + integer_t decompressions; /* Per-thread decompressions counter to be added to per-task decompressions counter */ + int thread_region_page_shift; /* Page shift that this thread would like to use when */ + /* introspecting a task. This is currently being used */ + /* by footprint which uses a thread for each task being inspected. */ }; #define ith_state saved.receive.state @@ -645,7 +682,7 @@ struct thread { #define ith_kmsg saved.receive.kmsg #define ith_peekq saved.receive.peekq #define ith_knote saved.receive.knote -#define ith_qos saved.receive.received_qos.qos +#define ith_ppriority saved.receive.received_qos.ppri #define ith_qos_override saved.receive.received_qos.oqos #define ith_seqno saved.receive.seqno @@ -698,6 +735,9 @@ extern void thread_deallocate( extern void thread_inspect_deallocate( thread_inspect_t thread); +extern void thread_read_deallocate( + thread_read_t thread); + extern void thread_terminate_self(void); extern kern_return_t thread_terminate_internal( @@ -741,7 +781,6 @@ extern boolean_t thread_is_active(thread_t thread); extern lck_grp_t thread_lck_grp; /* Locking for scheduler state, always acquired with interrupts disabled (splsched()) */ -#if __SMP__ #define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0) #define thread_lock(th) simple_lock(&(th)->sched_lock, &thread_lck_grp) #define thread_unlock(th) simple_unlock(&(th)->sched_lock) @@ -749,15 +788,6 @@ extern lck_grp_t thread_lck_grp; #define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0) #define wake_lock(th) simple_lock(&(th)->wake_lock, &thread_lck_grp) #define wake_unlock(th) simple_unlock(&(th)->wake_lock) -#else -#define thread_lock_init(th) do { (void)th; } while(0) -#define thread_lock(th) do { (void)th; } while(0) -#define thread_unlock(th) do { (void)th; } while(0) - -#define wake_lock_init(th) do { (void)th; } while(0) -#define wake_lock(th) do { (void)th; } while(0) -#define wake_unlock(th) do { (void)th; } while(0) -#endif #define thread_should_halt_fast(thread) (!(thread)->active) @@ -867,6 +897,7 @@ extern void machine_thread_init(void); extern void machine_thread_template_init(thread_t thr_template); + extern kern_return_t machine_thread_create( thread_t thread, task_t task); @@ -896,6 +927,7 @@ extern kern_return_t machine_thread_set_tsd_base( #define thread_mtx_lock(thread) lck_mtx_lock(&(thread)->mutex) #define thread_mtx_try(thread) lck_mtx_try_lock(&(thread)->mutex) #define thread_mtx_unlock(thread) lck_mtx_unlock(&(thread)->mutex) +#define thread_mtx_held(thread) lck_mtx_assert(&(thread)->mutex, LCK_MTX_ASSERT_OWNED) extern void thread_apc_ast(thread_t thread); @@ -906,23 +938,14 @@ void act_machine_sv_free(thread_t, int); vm_offset_t min_valid_stack_address(void); vm_offset_t max_valid_stack_address(void); -static inline uint16_t -thread_set_tag_internal(thread_t thread, uint16_t tag) -{ - return os_atomic_or_orig(&thread->thread_tag, tag, relaxed); -} - -static inline uint16_t -thread_get_tag_internal(thread_t thread) -{ - return thread->thread_tag; -} - extern bool thread_no_smt(thread_t thread); extern bool processor_active_thread_no_smt(processor_t processor); extern void thread_set_options(uint32_t thopt); +#if CONFIG_THREAD_GROUPS +struct thread_group *thread_get_current_voucher_thread_group(thread_t thread); +#endif /* CONFIG_THREAD_GROUPS */ #else /* MACH_KERNEL_PRIVATE */ @@ -993,19 +1016,24 @@ __BEGIN_DECLS #ifdef XNU_KERNEL_PRIVATE -/* - * Thread tags; for easy identification. - */ -#define THREAD_TAG_MAINTHREAD 0x1 -#define THREAD_TAG_CALLOUT 0x2 -#define THREAD_TAG_IOWORKLOOP 0x4 +uint16_t thread_set_tag(thread_t thread, uint16_t tag); +uint16_t thread_get_tag(thread_t thread); -#define THREAD_TAG_PTHREAD 0x10 -#define THREAD_TAG_WORKQUEUE 0x20 +#ifdef MACH_KERNEL_PRIVATE +static inline thread_tag_t +thread_set_tag_internal(thread_t thread, thread_tag_t tag) +{ + return os_atomic_or_orig(&thread->thread_tag, tag, relaxed); +} -uint16_t thread_set_tag(thread_t, uint16_t); -uint16_t thread_get_tag(thread_t); -uint64_t thread_last_run_time(thread_t); +static inline thread_tag_t +thread_get_tag_internal(thread_t thread) +{ + return thread->thread_tag; +} +#endif /* MACH_KERNEL_PRIVATE */ + +uint64_t thread_last_run_time(thread_t thread); extern kern_return_t thread_state_initialize( thread_t thread); @@ -1086,10 +1114,11 @@ extern void thread_setuserstack( thread_t thread, mach_vm_offset_t user_stack); -extern uint64_t thread_adjuserstack( +extern user_addr_t thread_adjuserstack( thread_t thread, int adjust); + extern void thread_setentrypoint( thread_t thread, mach_vm_offset_t entry); @@ -1176,7 +1205,6 @@ extern void uthread_cleanup_name(void *uthread); extern void uthread_cleanup(task_t, void *, void *); extern void uthread_zone_free(void *); extern void uthread_cred_free(void *); - extern void uthread_reset_proc_refcount(void *); #if PROC_REF_DEBUG extern int uthread_get_proc_refcount(void *); @@ -1274,40 +1302,44 @@ extern kern_return_t machine_thread_function_pointers_convert_from_user( user_addr_t *fptrs, uint32_t count); -/* Get a backtrace for a threads kernel or user stack (user_p), with pc and optionally - * frame pointer (getfp). Returns bytes added to buffer, and kThreadTruncatedBT in - * thread_trace_flags if a user page is not present after kdp_lightweight_fault() is - * called. +/* + * Get a backtrace for a threads kernel or user stack (user_p), using fp to start the + * backtrace if provided. + * + * Returns bytes added to buffer, and kThreadTruncatedBT in thread_trace_flags if a + * user page is not present after kdp_lightweight_fault() is * called. */ -extern int machine_trace_thread( +extern int machine_trace_thread( thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p, - boolean_t getfp, uint32_t *thread_trace_flags); -extern int machine_trace_thread64(thread_t thread, +extern int machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p, - boolean_t getfp, uint32_t *thread_trace_flags, - uint64_t *sp); + uint64_t *sp, + vm_offset_t fp); /* * Get the duration of the given thread's last wait. */ uint64_t thread_get_last_wait_duration(thread_t thread); -extern void thread_set_no_smt(bool set); extern bool thread_get_no_smt(void); #endif /* XNU_KERNEL_PRIVATE */ +#ifdef KERNEL_PRIVATE +extern void thread_set_no_smt(bool set); +#endif /* KERNEL_PRIVATE */ + /*! @function thread_has_thread_name * @abstract Checks if a thread has a name. * @discussion This function takes one input, a thread, and returns a boolean value indicating if that thread already has a name associated with it. @@ -1324,6 +1356,11 @@ extern boolean_t thread_has_thread_name(thread_t th); */ extern void thread_set_thread_name(thread_t th, const char* name); +#ifdef XNU_KERNEL_PRIVATE +extern void +thread_get_thread_name(thread_t th, char* name); +#endif /* XNU_KERNEL_PRIVATE */ + /*! @function kernel_thread_start * @abstract Create a kernel thread. * @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread). @@ -1345,11 +1382,15 @@ void thread_set_honor_qlimit(thread_t thread); void thread_clear_honor_qlimit(thread_t thread); extern ipc_port_t convert_thread_to_port(thread_t); extern ipc_port_t convert_thread_inspect_to_port(thread_inspect_t); +extern ipc_port_t convert_thread_read_to_port(thread_read_t); extern boolean_t is_vm_privileged(void); extern boolean_t set_vm_privilege(boolean_t); extern kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t new_name); extern void *thread_iokit_tls_get(uint32_t index); extern void thread_iokit_tls_set(uint32_t index, void * data); +extern void thread_port_with_flavor_notify(mach_msg_header_t *msg); +extern int thread_self_region_page_shift(void); +extern void thread_self_region_page_shift_set(int pgshift); #endif /* KERNEL_PRIVATE */ __END_DECLS diff --git a/osfmk/kern/thread_act.c b/osfmk/kern/thread_act.c index 944d61d99..ccfb5eb3d 100644 --- a/osfmk/kern/thread_act.c +++ b/osfmk/kern/thread_act.c @@ -74,6 +74,7 @@ #include #include #include +#include #include @@ -643,6 +644,67 @@ thread_set_state_from_user( return thread_set_state_internal(thread, flavor, state, state_count, TRUE); } +kern_return_t +thread_convert_thread_state( + thread_t thread, + int direction, + thread_state_flavor_t flavor, + thread_state_t in_state, /* pointer to IN array */ + mach_msg_type_number_t in_state_count, + thread_state_t out_state, /* pointer to OUT array */ + mach_msg_type_number_t *out_state_count) /*IN/OUT*/ +{ + kern_return_t kr; + thread_t to_thread = THREAD_NULL; + thread_t from_thread = THREAD_NULL; + mach_msg_type_number_t state_count = in_state_count; + + if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF && + direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) { + return KERN_INVALID_ARGUMENT; + } + + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } + + if (state_count > *out_state_count) { + return KERN_INSUFFICIENT_BUFFER_SIZE; + } + + if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) { + to_thread = thread; + from_thread = current_thread(); + } else { + to_thread = current_thread(); + from_thread = thread; + } + + /* Authenticate and convert thread state to kernel representation */ + kr = machine_thread_state_convert_from_user(from_thread, flavor, + in_state, state_count); + + /* Return early if one of the thread was jop disabled while other wasn't */ + if (kr != KERN_SUCCESS) { + return kr; + } + + /* Convert thread state to target thread user representation */ + kr = machine_thread_state_convert_to_user(to_thread, flavor, + in_state, &state_count); + + if (kr == KERN_SUCCESS) { + if (state_count <= *out_state_count) { + memcpy(out_state, in_state, state_count * sizeof(uint32_t)); + *out_state_count = state_count; + } else { + kr = KERN_INSUFFICIENT_BUFFER_SIZE; + } + } + + return kr; +} + /* * Kernel-internal "thread" interfaces used outside this file: */ @@ -690,7 +752,6 @@ thread_state_initialize( return result; } - kern_return_t thread_dup( thread_t target) @@ -1002,6 +1063,7 @@ thread_apc_ast(thread_t thread) thread_mtx_unlock(thread); } + /* Prototype, see justification above */ kern_return_t act_set_state( diff --git a/osfmk/kern/thread_call.c b/osfmk/kern/thread_call.c index 7c8be9695..afe86a612 100644 --- a/osfmk/kern/thread_call.c +++ b/osfmk/kern/thread_call.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. + * Copyright (c) 1993-1995, 1999-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -42,7 +42,6 @@ #include #include -#include #include #include @@ -54,8 +53,10 @@ #endif #include -static zone_t thread_call_zone; -static struct waitq daemon_waitq; +static ZONE_DECLARE(thread_call_zone, "thread_call", + sizeof(thread_call_data_t), ZC_NOENCRYPT); + +static struct waitq daemon_waitq; typedef enum { TCF_ABSOLUTE = 0, @@ -70,72 +71,75 @@ __options_decl(thread_call_group_flags_t, uint32_t, { }); static struct thread_call_group { + __attribute__((aligned(128))) lck_ticket_t tcg_lock; + const char * tcg_name; queue_head_t pending_queue; uint32_t pending_count; queue_head_t delayed_queues[TCF_COUNT]; + struct priority_queue_deadline_min delayed_pqueues[TCF_COUNT]; timer_call_data_t delayed_timers[TCF_COUNT]; timer_call_data_t dealloc_timer; struct waitq idle_waitq; + uint64_t idle_timestamp; uint32_t idle_count, active_count, blocked_count; uint32_t tcg_thread_pri; uint32_t target_thread_count; - uint64_t idle_timestamp; - thread_call_group_flags_t flags; + thread_call_group_flags_t tcg_flags; } thread_call_groups[THREAD_CALL_INDEX_MAX] = { [THREAD_CALL_INDEX_HIGH] = { .tcg_name = "high", .tcg_thread_pri = BASEPRI_PREEMPT_HIGH, .target_thread_count = 4, - .flags = TCG_NONE, + .tcg_flags = TCG_NONE, }, [THREAD_CALL_INDEX_KERNEL] = { .tcg_name = "kernel", .tcg_thread_pri = BASEPRI_KERNEL, .target_thread_count = 1, - .flags = TCG_PARALLEL, + .tcg_flags = TCG_PARALLEL, }, [THREAD_CALL_INDEX_USER] = { .tcg_name = "user", .tcg_thread_pri = BASEPRI_DEFAULT, .target_thread_count = 1, - .flags = TCG_PARALLEL, + .tcg_flags = TCG_PARALLEL, }, [THREAD_CALL_INDEX_LOW] = { .tcg_name = "low", .tcg_thread_pri = MAXPRI_THROTTLE, .target_thread_count = 1, - .flags = TCG_PARALLEL, + .tcg_flags = TCG_PARALLEL, }, [THREAD_CALL_INDEX_KERNEL_HIGH] = { .tcg_name = "kernel-high", .tcg_thread_pri = BASEPRI_PREEMPT, .target_thread_count = 2, - .flags = TCG_NONE, + .tcg_flags = TCG_NONE, }, [THREAD_CALL_INDEX_QOS_UI] = { .tcg_name = "qos-ui", .tcg_thread_pri = BASEPRI_FOREGROUND, .target_thread_count = 1, - .flags = TCG_NONE, + .tcg_flags = TCG_NONE, }, [THREAD_CALL_INDEX_QOS_IN] = { .tcg_name = "qos-in", .tcg_thread_pri = BASEPRI_USER_INITIATED, .target_thread_count = 1, - .flags = TCG_NONE, + .tcg_flags = TCG_NONE, }, [THREAD_CALL_INDEX_QOS_UT] = { .tcg_name = "qos-ut", .tcg_thread_pri = BASEPRI_UTILITY, .target_thread_count = 1, - .flags = TCG_NONE, + .tcg_flags = TCG_NONE, }, }; @@ -147,19 +151,41 @@ typedef struct thread_call_group *thread_call_group_t; #define THREAD_CALL_MACH_FACTOR_CAP 3 #define THREAD_CALL_GROUP_MAX_THREADS 500 -static boolean_t thread_call_daemon_awake; +struct thread_call_thread_state { + struct thread_call_group * thc_group; + struct thread_call * thc_call; /* debug only, may be deallocated */ + uint64_t thc_call_start; + uint64_t thc_call_soft_deadline; + uint64_t thc_call_hard_deadline; + uint64_t thc_call_pending_timestamp; + uint64_t thc_IOTES_invocation_timestamp; + thread_call_func_t thc_func; + thread_call_param_t thc_param0; + thread_call_param_t thc_param1; +}; + +static bool thread_call_daemon_awake = true; +/* + * This special waitq exists because the daemon thread + * might need to be woken while already holding a global waitq locked. + */ +static struct waitq daemon_waitq; + static thread_call_data_t internal_call_storage[INTERNAL_CALL_COUNT]; static queue_head_t thread_call_internal_queue; int thread_call_internal_queue_count = 0; static uint64_t thread_call_dealloc_interval_abs; -static __inline__ thread_call_t _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0); -static __inline__ void _internal_call_release(thread_call_t call); -static __inline__ boolean_t _pending_call_enqueue(thread_call_t call, thread_call_group_t group); -static boolean_t _delayed_call_enqueue(thread_call_t call, thread_call_group_t group, +static void _internal_call_init(void); + +static thread_call_t _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0); +static bool _is_internal_call(thread_call_t call); +static void _internal_call_release(thread_call_t call); +static bool _pending_call_enqueue(thread_call_t call, thread_call_group_t group, uint64_t now); +static bool _delayed_call_enqueue(thread_call_t call, thread_call_group_t group, uint64_t deadline, thread_call_flavor_t flavor); -static __inline__ boolean_t _call_dequeue(thread_call_t call, thread_call_group_t group); -static __inline__ void thread_call_wake(thread_call_group_t group); +static bool _call_dequeue(thread_call_t call, thread_call_group_t group); +static void thread_call_wake(thread_call_group_t group); static void thread_call_daemon(void *arg); static void thread_call_thread(thread_call_group_t group, wait_result_t wres); static void thread_call_dealloc_timer(timer_call_param_t p0, timer_call_param_t p1); @@ -167,7 +193,7 @@ static void thread_call_group_setup(thread_call_group_t grou static void sched_call_thread(int type, thread_t thread); static void thread_call_start_deallocate_timer(thread_call_group_t group); static void thread_call_wait_locked(thread_call_t call, spl_t s); -static boolean_t thread_call_wait_once_locked(thread_call_t call, spl_t s); +static bool thread_call_wait_once_locked(thread_call_t call, spl_t s); static boolean_t thread_call_enter_delayed_internal(thread_call_t call, thread_call_func_t alt_func, thread_call_param_t alt_param0, @@ -177,42 +203,204 @@ static boolean_t thread_call_enter_delayed_internal(thread_call_t /* non-static so dtrace can find it rdar://problem/31156135&31379348 */ extern void thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1); -lck_grp_t thread_call_lck_grp; -lck_mtx_t thread_call_lock_data; +LCK_GRP_DECLARE(thread_call_lck_grp, "thread_call"); -#define thread_call_lock_spin() \ - lck_mtx_lock_spin_always(&thread_call_lock_data) -#define thread_call_unlock() \ - lck_mtx_unlock_always(&thread_call_lock_data) +static void +thread_call_lock_spin(thread_call_group_t group) +{ + lck_ticket_lock(&group->tcg_lock, &thread_call_lck_grp); +} + +static void +thread_call_unlock(thread_call_group_t group) +{ + lck_ticket_unlock(&group->tcg_lock); +} -#define tc_deadline tc_call.deadline +static void __assert_only +thread_call_assert_locked(thread_call_group_t group) +{ + lck_ticket_assert_owned(&group->tcg_lock); +} -extern boolean_t mach_timer_coalescing_enabled; -static inline spl_t -disable_ints_and_lock(void) +static spl_t +disable_ints_and_lock(thread_call_group_t group) { spl_t s = splsched(); - thread_call_lock_spin(); + thread_call_lock_spin(group); return s; } -static inline void -enable_ints_and_unlock(spl_t s) +static void +enable_ints_and_unlock(thread_call_group_t group, spl_t s) { - thread_call_unlock(); + thread_call_unlock(group); splx(s); } -static inline boolean_t +/* Lock held */ +static thread_call_group_t +thread_call_get_group(thread_call_t call) +{ + thread_call_index_t index = call->tc_index; + + assert(index >= 0 && index < THREAD_CALL_INDEX_MAX); + + return &thread_call_groups[index]; +} + +/* Lock held */ +static thread_call_flavor_t +thread_call_get_flavor(thread_call_t call) +{ + return (call->tc_flags & THREAD_CALL_FLAG_CONTINUOUS) ? TCF_CONTINUOUS : TCF_ABSOLUTE; +} + +/* Lock held */ +static thread_call_flavor_t +thread_call_set_flavor(thread_call_t call, thread_call_flavor_t flavor) +{ + assert(flavor == TCF_CONTINUOUS || flavor == TCF_ABSOLUTE); + thread_call_flavor_t old_flavor = thread_call_get_flavor(call); + + if (old_flavor != flavor) { + if (flavor == TCF_CONTINUOUS) { + call->tc_flags |= THREAD_CALL_FLAG_CONTINUOUS; + } else { + call->tc_flags &= ~THREAD_CALL_FLAG_CONTINUOUS; + } + } + + return old_flavor; +} + +/* returns true if it was on a queue */ +static bool +thread_call_enqueue_tail( + thread_call_t call, + queue_t new_queue) +{ + queue_t old_queue = call->tc_queue; + + thread_call_group_t group = thread_call_get_group(call); + thread_call_flavor_t flavor = thread_call_get_flavor(call); + + if (old_queue != NULL && + old_queue != &group->delayed_queues[flavor]) { + panic("thread call (%p) on bad queue (old_queue: %p)", call, old_queue); + } + + if (old_queue == &group->delayed_queues[flavor]) { + priority_queue_remove(&group->delayed_pqueues[flavor], &call->tc_pqlink); + } + + if (old_queue == NULL) { + enqueue_tail(new_queue, &call->tc_qlink); + } else { + re_queue_tail(new_queue, &call->tc_qlink); + } + + call->tc_queue = new_queue; + + return old_queue != NULL; +} + +static queue_head_t * +thread_call_dequeue( + thread_call_t call) +{ + queue_t old_queue = call->tc_queue; + + thread_call_group_t group = thread_call_get_group(call); + thread_call_flavor_t flavor = thread_call_get_flavor(call); + + if (old_queue != NULL && + old_queue != &group->pending_queue && + old_queue != &group->delayed_queues[flavor]) { + panic("thread call (%p) on bad queue (old_queue: %p)", call, old_queue); + } + + if (old_queue == &group->delayed_queues[flavor]) { + priority_queue_remove(&group->delayed_pqueues[flavor], &call->tc_pqlink); + } + + if (old_queue != NULL) { + remqueue(&call->tc_qlink); + + call->tc_queue = NULL; + } + return old_queue; +} + +static queue_head_t * +thread_call_enqueue_deadline( + thread_call_t call, + thread_call_group_t group, + thread_call_flavor_t flavor, + uint64_t deadline) +{ + queue_t old_queue = call->tc_queue; + queue_t new_queue = &group->delayed_queues[flavor]; + + thread_call_flavor_t old_flavor = thread_call_set_flavor(call, flavor); + + if (old_queue != NULL && + old_queue != &group->pending_queue && + old_queue != &group->delayed_queues[old_flavor]) { + panic("thread call (%p) on bad queue (old_queue: %p)", call, old_queue); + } + + if (old_queue == new_queue) { + /* optimize the same-queue case to avoid a full re-insert */ + uint64_t old_deadline = call->tc_pqlink.deadline; + call->tc_pqlink.deadline = deadline; + + if (old_deadline < deadline) { + priority_queue_entry_increased(&group->delayed_pqueues[flavor], + &call->tc_pqlink); + } else { + priority_queue_entry_decreased(&group->delayed_pqueues[flavor], + &call->tc_pqlink); + } + } else { + if (old_queue == &group->delayed_queues[old_flavor]) { + priority_queue_remove(&group->delayed_pqueues[old_flavor], + &call->tc_pqlink); + } + + call->tc_pqlink.deadline = deadline; + + priority_queue_insert(&group->delayed_pqueues[flavor], &call->tc_pqlink); + } + + if (old_queue == NULL) { + enqueue_tail(new_queue, &call->tc_qlink); + } else if (old_queue != new_queue) { + re_queue_tail(new_queue, &call->tc_qlink); + } + + call->tc_queue = new_queue; + + return old_queue; +} + +uint64_t +thread_call_get_armed_deadline(thread_call_t call) +{ + return call->tc_pqlink.deadline; +} + + +static bool group_isparallel(thread_call_group_t group) { - return (group->flags & TCG_PARALLEL) != 0; + return (group->tcg_flags & TCG_PARALLEL) != 0; } -static boolean_t +static bool thread_call_group_should_add_thread(thread_call_group_t group) { if ((group->active_count + group->blocked_count + group->idle_count) >= THREAD_CALL_GROUP_MAX_THREADS) { @@ -221,17 +409,17 @@ thread_call_group_should_add_thread(thread_call_group_t group) group->active_count, group->blocked_count, group->idle_count); } - if (group_isparallel(group) == FALSE) { + if (group_isparallel(group) == false) { if (group->pending_count > 0 && group->active_count == 0) { - return TRUE; + return true; } - return FALSE; + return false; } if (group->pending_count > 0) { if (group->idle_count > 0) { - return FALSE; + return false; } uint32_t thread_count = group->active_count; @@ -248,41 +436,26 @@ thread_call_group_should_add_thread(thread_call_group_t group) (thread_count < group->target_thread_count) || ((group->pending_count > THREAD_CALL_ADD_RATIO * thread_count) && (sched_mach_factor < THREAD_CALL_MACH_FACTOR_CAP))) { - return TRUE; + return true; } } - return FALSE; -} - -/* Lock held */ -static inline thread_call_group_t -thread_call_get_group(thread_call_t call) -{ - thread_call_index_t index = call->tc_index; - - assert(index >= 0 && index < THREAD_CALL_INDEX_MAX); - - return &thread_call_groups[index]; -} - -/* Lock held */ -static inline thread_call_flavor_t -thread_call_get_flavor(thread_call_t call) -{ - return (call->tc_flags & THREAD_CALL_CONTINUOUS) ? TCF_CONTINUOUS : TCF_ABSOLUTE; + return false; } static void thread_call_group_setup(thread_call_group_t group) { + lck_ticket_init(&group->tcg_lock, &thread_call_lck_grp); + queue_init(&group->pending_queue); - queue_init(&group->delayed_queues[TCF_ABSOLUTE]); - queue_init(&group->delayed_queues[TCF_CONTINUOUS]); - /* TODO: Consolidate to one hard timer for each group */ - timer_call_setup(&group->delayed_timers[TCF_ABSOLUTE], thread_call_delayed_timer, group); - timer_call_setup(&group->delayed_timers[TCF_CONTINUOUS], thread_call_delayed_timer, group); + for (thread_call_flavor_t flavor = 0; flavor < TCF_COUNT; flavor++) { + queue_init(&group->delayed_queues[flavor]); + priority_queue_init(&group->delayed_pqueues[flavor]); + timer_call_setup(&group->delayed_timers[flavor], thread_call_delayed_timer, group); + } + timer_call_setup(&group->dealloc_timer, thread_call_dealloc_timer, group); /* Reverse the wait order so we re-use the most recently parked thread from the pool */ @@ -293,7 +466,7 @@ thread_call_group_setup(thread_call_group_t group) * Simple wrapper for creating threads bound to * thread call groups. */ -static kern_return_t +static void thread_call_thread_create( thread_call_group_t group) { @@ -305,7 +478,7 @@ thread_call_thread_create( result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, thread_pri, &thread); if (result != KERN_SUCCESS) { - return result; + panic("cannot create new thread call thread %d", result); } if (thread_pri <= BASEPRI_KERNEL) { @@ -324,7 +497,6 @@ thread_call_thread_create( thread_set_thread_name(thread, name); thread_deallocate(thread); - return KERN_SUCCESS; } /* @@ -336,14 +508,6 @@ thread_call_thread_create( void thread_call_initialize(void) { - int tc_size = sizeof(thread_call_data_t); - thread_call_zone = zinit(tc_size, 4096 * tc_size, 16 * tc_size, "thread_call"); - zone_change(thread_call_zone, Z_CALLERACCT, FALSE); - zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); - - lck_grp_init(&thread_call_lck_grp, "thread_call", LCK_GRP_ATTR_NULL); - lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, LCK_ATTR_NULL); - nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs); waitq_init(&daemon_waitq, SYNC_POLICY_DISABLE_IRQ | SYNC_POLICY_FIFO); @@ -351,20 +515,7 @@ thread_call_initialize(void) thread_call_group_setup(&thread_call_groups[i]); } - spl_t s = disable_ints_and_lock(); - - queue_init(&thread_call_internal_queue); - for ( - thread_call_t call = internal_call_storage; - call < &internal_call_storage[INTERNAL_CALL_COUNT]; - call++) { - enqueue_tail(&thread_call_internal_queue, &call->tc_call.q_link); - thread_call_internal_queue_count++; - } - - thread_call_daemon_awake = TRUE; - - enable_ints_and_unlock(s); + _internal_call_init(); thread_t thread; kern_return_t result; @@ -385,12 +536,35 @@ thread_call_setup( thread_call_param_t param0) { bzero(call, sizeof(*call)); - call_entry_setup((call_entry_t)call, func, param0); - /* Thread calls default to the HIGH group unless otherwise specified */ - call->tc_index = THREAD_CALL_INDEX_HIGH; + *call = (struct thread_call) { + .tc_func = func, + .tc_param0 = param0, + + /* + * Thread calls default to the HIGH group + * unless otherwise specified. + */ + .tc_index = THREAD_CALL_INDEX_HIGH, + }; +} + +static void +_internal_call_init(void) +{ + /* Function-only thread calls are only kept in the default HIGH group */ + thread_call_group_t group = &thread_call_groups[THREAD_CALL_INDEX_HIGH]; + + spl_t s = disable_ints_and_lock(group); + + queue_init(&thread_call_internal_queue); + + for (unsigned i = 0; i < INTERNAL_CALL_COUNT; i++) { + enqueue_tail(&thread_call_internal_queue, &internal_call_storage[i].tc_qlink); + thread_call_internal_queue_count++; + } - /* THREAD_CALL_ALLOC not set, memory owned by caller */ + enable_ints_and_unlock(group, s); } /* @@ -400,45 +574,63 @@ thread_call_setup( * * Called with thread_call_lock held. */ -static __inline__ thread_call_t +static thread_call_t _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0) { - thread_call_t call; + /* Function-only thread calls are only kept in the default HIGH group */ + thread_call_group_t group = &thread_call_groups[THREAD_CALL_INDEX_HIGH]; - if (queue_empty(&thread_call_internal_queue)) { - panic("_internal_call_allocate"); - } + spl_t s = disable_ints_and_lock(group); - call = qe_dequeue_head(&thread_call_internal_queue, struct thread_call, tc_call.q_link); + thread_call_t call = qe_dequeue_head(&thread_call_internal_queue, + struct thread_call, tc_qlink); + + if (call == NULL) { + panic("_internal_call_allocate: thread_call_internal_queue empty"); + } thread_call_internal_queue_count--; thread_call_setup(call, func, param0); call->tc_refs = 0; call->tc_flags = 0; /* THREAD_CALL_ALLOC not set, do not free back to zone */ + enable_ints_and_unlock(group, s); return call; } +/* Check if a call is internal and needs to be returned to the internal pool. */ +static bool +_is_internal_call(thread_call_t call) +{ + if (call >= internal_call_storage && + call < &internal_call_storage[INTERNAL_CALL_COUNT]) { + assert((call->tc_flags & THREAD_CALL_ALLOC) == 0); + return true; + } + return false; +} + /* * _internal_call_release: * * Release an internal callout entry which - * is no longer pending (or delayed). This is - * safe to call on a non-internal entry, in which - * case nothing happens. + * is no longer pending (or delayed). * * Called with thread_call_lock held. */ -static __inline__ void +static void _internal_call_release(thread_call_t call) { - if (call >= internal_call_storage && - call < &internal_call_storage[INTERNAL_CALL_COUNT]) { - assert((call->tc_flags & THREAD_CALL_ALLOC) == 0); - enqueue_head(&thread_call_internal_queue, &call->tc_call.q_link); - thread_call_internal_queue_count++; - } + assert(_is_internal_call(call)); + + thread_call_group_t group = thread_call_get_group(call); + + assert(group == &thread_call_groups[THREAD_CALL_INDEX_HIGH]); + thread_call_assert_locked(group); + + enqueue_head(&thread_call_internal_queue, &call->tc_qlink); + thread_call_internal_queue_count++; } /* @@ -452,39 +644,36 @@ _internal_call_release(thread_call_t call) * * Called with thread_call_lock held. */ -static __inline__ boolean_t -_pending_call_enqueue(thread_call_t call, - thread_call_group_t group) +static bool +_pending_call_enqueue(thread_call_t call, + thread_call_group_t group, + uint64_t now) { if ((THREAD_CALL_ONCE | THREAD_CALL_RUNNING) == (call->tc_flags & (THREAD_CALL_ONCE | THREAD_CALL_RUNNING))) { - call->tc_deadline = 0; + call->tc_pqlink.deadline = 0; - uint32_t flags = call->tc_flags; + thread_call_flags_t flags = call->tc_flags; call->tc_flags |= THREAD_CALL_RESCHEDULE; - if ((flags & THREAD_CALL_RESCHEDULE) != 0) { - return TRUE; - } else { - return FALSE; - } + assert(call->tc_queue == NULL); + + return flags & THREAD_CALL_RESCHEDULE; } - queue_head_t *old_queue = call_entry_enqueue_tail(CE(call), &group->pending_queue); + call->tc_pending_timestamp = now; - if (old_queue == NULL) { + bool was_on_queue = thread_call_enqueue_tail(call, &group->pending_queue); + + if (!was_on_queue) { call->tc_submit_count++; - } else if (old_queue != &group->pending_queue && - old_queue != &group->delayed_queues[TCF_ABSOLUTE] && - old_queue != &group->delayed_queues[TCF_CONTINUOUS]) { - panic("tried to move a thread call (%p) between groups (old_queue: %p)", call, old_queue); } group->pending_count++; thread_call_wake(group); - return old_queue != NULL; + return was_on_queue; } /* @@ -499,7 +688,7 @@ _pending_call_enqueue(thread_call_t call, * * Called with thread_call_lock held. */ -static boolean_t +static bool _delayed_call_enqueue( thread_call_t call, thread_call_group_t group, @@ -508,32 +697,23 @@ _delayed_call_enqueue( { if ((THREAD_CALL_ONCE | THREAD_CALL_RUNNING) == (call->tc_flags & (THREAD_CALL_ONCE | THREAD_CALL_RUNNING))) { - call->tc_deadline = deadline; + call->tc_pqlink.deadline = deadline; - uint32_t flags = call->tc_flags; + thread_call_flags_t flags = call->tc_flags; call->tc_flags |= THREAD_CALL_RESCHEDULE; - if ((flags & THREAD_CALL_RESCHEDULE) != 0) { - return TRUE; - } else { - return FALSE; - } + assert(call->tc_queue == NULL); + thread_call_set_flavor(call, flavor); + + return flags & THREAD_CALL_RESCHEDULE; } - queue_head_t *old_queue = call_entry_enqueue_deadline(CE(call), - &group->delayed_queues[flavor], - deadline); + queue_head_t *old_queue = thread_call_enqueue_deadline(call, group, flavor, deadline); if (old_queue == &group->pending_queue) { group->pending_count--; } else if (old_queue == NULL) { call->tc_submit_count++; - } else if (old_queue == &group->delayed_queues[TCF_ABSOLUTE] || - old_queue == &group->delayed_queues[TCF_CONTINUOUS]) { - /* TODO: if it's in the other delayed queue, that might not be OK */ - // we did nothing, and that's fine - } else { - panic("tried to move a thread call (%p) between groups (old_queue: %p)", call, old_queue); } return old_queue != NULL; @@ -548,27 +728,24 @@ _delayed_call_enqueue( * * Called with thread_call_lock held. */ -static __inline__ boolean_t +static bool _call_dequeue( thread_call_t call, thread_call_group_t group) { - queue_head_t *old_queue; + queue_head_t *old_queue = thread_call_dequeue(call); - old_queue = call_entry_dequeue(CE(call)); + if (old_queue == NULL) { + return false; + } - if (old_queue != NULL) { - assert(old_queue == &group->pending_queue || - old_queue == &group->delayed_queues[TCF_ABSOLUTE] || - old_queue == &group->delayed_queues[TCF_CONTINUOUS]); + call->tc_finish_count++; - call->tc_finish_count++; - if (old_queue == &group->pending_queue) { - group->pending_count--; - } + if (old_queue == &group->pending_queue) { + group->pending_count--; } - return old_queue != NULL; + return true; } /* @@ -595,22 +772,22 @@ _arm_delayed_call_timer(thread_call_t new_call, return false; } - thread_call_t call = qe_queue_first(&group->delayed_queues[flavor], struct thread_call, tc_call.q_link); + thread_call_t call = priority_queue_min(&group->delayed_pqueues[flavor], struct thread_call, tc_pqlink); /* We only need to change the hard timer if this new call is the first in the list */ if (new_call != NULL && new_call != call) { return false; } - assert((call->tc_soft_deadline != 0) && ((call->tc_soft_deadline <= call->tc_call.deadline))); + assert((call->tc_soft_deadline != 0) && ((call->tc_soft_deadline <= call->tc_pqlink.deadline))); uint64_t fire_at = call->tc_soft_deadline; if (flavor == TCF_CONTINUOUS) { - assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == THREAD_CALL_CONTINUOUS); + assert(call->tc_flags & THREAD_CALL_FLAG_CONTINUOUS); fire_at = continuoustime_to_absolutetime(fire_at); } else { - assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == 0); + assert((call->tc_flags & THREAD_CALL_FLAG_CONTINUOUS) == 0); } /* @@ -619,7 +796,7 @@ _arm_delayed_call_timer(thread_call_t new_call, * This is a valid coalescing behavior, but masks a possible window to * fire a timer instead of going idle. */ - uint64_t leeway = call->tc_call.deadline - call->tc_soft_deadline; + uint64_t leeway = call->tc_pqlink.deadline - call->tc_soft_deadline; timer_call_enter_with_leeway(&group->delayed_timers[flavor], (timer_call_param_t)flavor, fire_at, leeway, @@ -650,15 +827,17 @@ _cancel_func_from_queue(thread_call_func_t func, boolean_t call_removed = FALSE; thread_call_t call; - qe_foreach_element_safe(call, queue, tc_call.q_link) { - if (call->tc_call.func != func || - call->tc_call.param0 != param0) { + qe_foreach_element_safe(call, queue, tc_qlink) { + if (call->tc_func != func || + call->tc_param0 != param0) { continue; } _call_dequeue(call, group); - _internal_call_release(call); + if (_is_internal_call(call)) { + _internal_call_release(call); + } call_removed = TRUE; if (!remove_all) { @@ -716,6 +895,9 @@ thread_call_func_delayed_with_leeway( * * This iterates all of the pending or delayed thread calls in the group, * which is really inefficient. Switch to an allocated thread call instead. + * + * TODO: Give 'func' thread calls their own group, so this silliness doesn't + * affect the main 'high' group. */ boolean_t thread_call_func_cancel( @@ -727,11 +909,11 @@ thread_call_func_cancel( assert(func != NULL); - spl_t s = disable_ints_and_lock(); - /* Function-only thread calls are only kept in the default HIGH group */ thread_call_group_t group = &thread_call_groups[THREAD_CALL_INDEX_HIGH]; + spl_t s = disable_ints_and_lock(group); + if (cancel_all) { /* exhaustively search every queue, and return true if any search found something */ result = _cancel_func_from_queue(func, param, group, cancel_all, &group->pending_queue) | @@ -744,7 +926,7 @@ thread_call_func_cancel( _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_CONTINUOUS]); } - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); return result; } @@ -881,11 +1063,13 @@ boolean_t thread_call_free( thread_call_t call) { - spl_t s = disable_ints_and_lock(); + thread_call_group_t group = thread_call_get_group(call); + + spl_t s = disable_ints_and_lock(group); - if (call->tc_call.queue != NULL || + if (call->tc_queue != NULL || ((call->tc_flags & THREAD_CALL_RESCHEDULE) != 0)) { - thread_call_unlock(); + thread_call_unlock(group); splx(s); return FALSE; @@ -901,7 +1085,7 @@ thread_call_free( thread_call_wait_once_locked(call, s); /* thread call lock has been unlocked */ } else { - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); } if (refs == 0) { @@ -932,24 +1116,21 @@ thread_call_enter1( thread_call_t call, thread_call_param_t param1) { - boolean_t result = TRUE; - thread_call_group_t group; - - assert(call->tc_call.func != NULL); - + assert(call->tc_func != NULL); assert((call->tc_flags & THREAD_CALL_SIGNAL) == 0); - group = thread_call_get_group(call); + thread_call_group_t group = thread_call_get_group(call); + bool result = true; - spl_t s = disable_ints_and_lock(); + spl_t s = disable_ints_and_lock(group); - if (call->tc_call.queue != &group->pending_queue) { - result = _pending_call_enqueue(call, group); + if (call->tc_queue != &group->pending_queue) { + result = _pending_call_enqueue(call, group, mach_absolute_time()); } - call->tc_call.param1 = param1; + call->tc_param1 = param1; - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); return result; } @@ -1020,33 +1201,31 @@ thread_call_enter_delayed_internal( uint64_t leeway, unsigned int flags) { - boolean_t result = TRUE; - thread_call_group_t group; - uint64_t now, sdeadline, slop; - uint32_t urgency; + uint64_t now, sdeadline; thread_call_flavor_t flavor = (flags & THREAD_CALL_CONTINUOUS) ? TCF_CONTINUOUS : TCF_ABSOLUTE; /* direct mapping between thread_call, timer_call, and timeout_urgency values */ - urgency = (flags & TIMEOUT_URGENCY_MASK); - - spl_t s = disable_ints_and_lock(); + uint32_t urgency = (flags & TIMEOUT_URGENCY_MASK); if (call == NULL) { /* allocate a structure out of internal storage, as a convenience for BSD callers */ call = _internal_call_allocate(alt_func, alt_param0); } - assert(call->tc_call.func != NULL); - group = thread_call_get_group(call); + assert(call->tc_func != NULL); + thread_call_group_t group = thread_call_get_group(call); + + spl_t s = disable_ints_and_lock(group); - /* TODO: assert that call is not enqueued before flipping the flag */ + /* + * kevent and IOTES let you change flavor for an existing timer, so we have to + * support flipping flavors for enqueued thread calls. + */ if (flavor == TCF_CONTINUOUS) { now = mach_continuous_time(); - call->tc_flags |= THREAD_CALL_CONTINUOUS; } else { now = mach_absolute_time(); - call->tc_flags &= ~THREAD_CALL_CONTINUOUS; } call->tc_flags |= THREAD_CALL_DELAYED; @@ -1054,7 +1233,7 @@ thread_call_enter_delayed_internal( call->tc_soft_deadline = sdeadline = deadline; boolean_t ratelimited = FALSE; - slop = timer_call_slop(deadline, now, urgency, current_thread(), &ratelimited); + uint64_t slop = timer_call_slop(deadline, now, urgency, current_thread(), &ratelimited); if ((flags & THREAD_CALL_DELAY_LEEWAY) != 0 && leeway > slop) { slop = leeway; @@ -1067,26 +1246,26 @@ thread_call_enter_delayed_internal( } if (ratelimited) { - call->tc_flags |= TIMER_CALL_RATELIMITED; + call->tc_flags |= THREAD_CALL_RATELIMITED; } else { - call->tc_flags &= ~TIMER_CALL_RATELIMITED; + call->tc_flags &= ~THREAD_CALL_RATELIMITED; } - call->tc_call.param1 = param1; + call->tc_param1 = param1; call->tc_ttd = (sdeadline > now) ? (sdeadline - now) : 0; - result = _delayed_call_enqueue(call, group, deadline, flavor); + bool result = _delayed_call_enqueue(call, group, deadline, flavor); _arm_delayed_call_timer(call, group, flavor); #if CONFIG_DTRACE - DTRACE_TMR5(thread_callout__create, thread_call_func_t, call->tc_call.func, + DTRACE_TMR5(thread_callout__create, thread_call_func_t, call->tc_func, uint64_t, (deadline - sdeadline), uint64_t, (call->tc_ttd >> 32), (unsigned) (call->tc_ttd & 0xFFFFFFFF), call); #endif - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); return result; } @@ -1095,30 +1274,32 @@ thread_call_enter_delayed_internal( * Remove a callout entry from the queue * Called with thread_call_lock held */ -static boolean_t +static bool thread_call_cancel_locked(thread_call_t call) { - boolean_t canceled = (0 != (THREAD_CALL_RESCHEDULE & call->tc_flags)); - call->tc_flags &= ~THREAD_CALL_RESCHEDULE; + bool canceled; + + if (call->tc_flags & THREAD_CALL_RESCHEDULE) { + call->tc_flags &= ~THREAD_CALL_RESCHEDULE; + canceled = true; - if (canceled) { /* if reschedule was set, it must not have been queued */ - assert(call->tc_call.queue == NULL); + assert(call->tc_queue == NULL); } else { - boolean_t do_cancel_callout = FALSE; + bool queue_head_changed = false; thread_call_flavor_t flavor = thread_call_get_flavor(call); thread_call_group_t group = thread_call_get_group(call); - if ((call->tc_call.deadline != 0) && - (call == qe_queue_first(&group->delayed_queues[flavor], struct thread_call, tc_call.q_link))) { - assert(call->tc_call.queue == &group->delayed_queues[flavor]); - do_cancel_callout = TRUE; + if (call->tc_pqlink.deadline != 0 && + call == priority_queue_min(&group->delayed_pqueues[flavor], struct thread_call, tc_pqlink)) { + assert(call->tc_queue == &group->delayed_queues[flavor]); + queue_head_changed = true; } canceled = _call_dequeue(call, group); - if (do_cancel_callout) { + if (queue_head_changed) { if (_arm_delayed_call_timer(NULL, group, flavor) == false) { timer_call_cancel(&group->delayed_timers[flavor]); } @@ -1126,7 +1307,7 @@ thread_call_cancel_locked(thread_call_t call) } #if CONFIG_DTRACE - DTRACE_TMR4(thread_callout__cancel, thread_call_func_t, call->tc_call.func, + DTRACE_TMR4(thread_callout__cancel, thread_call_func_t, call->tc_func, 0, (call->tc_ttd >> 32), (unsigned) (call->tc_ttd & 0xFFFFFFFF)); #endif @@ -1144,11 +1325,13 @@ thread_call_cancel_locked(thread_call_t call) boolean_t thread_call_cancel(thread_call_t call) { - spl_t s = disable_ints_and_lock(); + thread_call_group_t group = thread_call_get_group(call); + + spl_t s = disable_ints_and_lock(group); boolean_t result = thread_call_cancel_locked(call); - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); return result; } @@ -1164,6 +1347,8 @@ thread_call_cancel(thread_call_t call) boolean_t thread_call_cancel_wait(thread_call_t call) { + thread_call_group_t group = thread_call_get_group(call); + if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) { panic("thread_call_cancel_wait: can't wait on thread call whose storage I don't own"); } @@ -1172,12 +1357,15 @@ thread_call_cancel_wait(thread_call_t call) panic("unsafe thread_call_cancel_wait"); } - if (current_thread()->thc_state.thc_call == call) { + thread_t self = current_thread(); + + if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) && + self->thc_state && self->thc_state->thc_call == call) { panic("thread_call_cancel_wait: deadlock waiting on self from inside call: %p to function %p", - call, call->tc_call.func); + call, call->tc_func); } - spl_t s = disable_ints_and_lock(); + spl_t s = disable_ints_and_lock(group); boolean_t canceled = thread_call_cancel_locked(call); @@ -1201,7 +1389,7 @@ thread_call_cancel_wait(thread_call_t call) thread_call_wait_locked(call, s); /* thread call lock unlocked */ } else { - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); } } @@ -1222,7 +1410,7 @@ thread_call_cancel_wait(thread_call_t call) * For high-priority group, only does wakeup/creation if there are no threads * running. */ -static __inline__ void +static void thread_call_wake( thread_call_group_t group) { @@ -1231,19 +1419,26 @@ thread_call_wake( * Traditional behavior: wake only if no threads running. */ if (group_isparallel(group) || group->active_count == 0) { - if (waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64, - THREAD_AWAKENED, WAITQ_ALL_PRIORITIES) == KERN_SUCCESS) { - group->idle_count--; group->active_count++; + if (group->idle_count) { + __assert_only kern_return_t kr; - if (group->idle_count == 0 && (group->flags & TCG_DEALLOC_ACTIVE) == TCG_DEALLOC_ACTIVE) { + kr = waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64, + THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); + assert(kr == KERN_SUCCESS); + + group->idle_count--; + group->active_count++; + + if (group->idle_count == 0 && (group->tcg_flags & TCG_DEALLOC_ACTIVE) == TCG_DEALLOC_ACTIVE) { if (timer_call_cancel(&group->dealloc_timer) == TRUE) { - group->flags &= ~TCG_DEALLOC_ACTIVE; + group->tcg_flags &= ~TCG_DEALLOC_ACTIVE; } } } else { - if (!thread_call_daemon_awake && thread_call_group_should_add_thread(group)) { - thread_call_daemon_awake = TRUE; - waitq_wakeup64_one(&daemon_waitq, NO_EVENT64, + if (thread_call_group_should_add_thread(group) && + os_atomic_cmpxchg(&thread_call_daemon_awake, + false, true, relaxed)) { + waitq_wakeup64_all(&daemon_waitq, NO_EVENT64, THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); } } @@ -1262,10 +1457,13 @@ sched_call_thread( { thread_call_group_t group; - group = thread->thc_state.thc_group; + assert(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT); + assert(thread->thc_state != NULL); + + group = thread->thc_state->thc_group; assert((group - &thread_call_groups[0]) < THREAD_CALL_INDEX_MAX); - thread_call_lock_spin(); + thread_call_lock_spin(group); switch (type) { case SCHED_CALL_BLOCK: @@ -1284,7 +1482,7 @@ sched_call_thread( break; } - thread_call_unlock(); + thread_call_unlock(group); } /* @@ -1293,17 +1491,15 @@ sched_call_thread( * anyone who might be waiting on this work item and frees it * if the client has so requested. */ -static boolean_t +static bool thread_call_finish(thread_call_t call, thread_call_group_t group, spl_t *s) { - uint64_t time; - uint32_t flags; - boolean_t signal; - boolean_t repend = FALSE; + assert(thread_call_get_group(call) == group); + + bool repend = false; + bool signal = call->tc_flags & THREAD_CALL_SIGNAL; call->tc_finish_count++; - flags = call->tc_flags; - signal = ((THREAD_CALL_SIGNAL & flags) != 0); if (!signal) { /* The thread call thread owns a ref until the call is finished */ @@ -1313,58 +1509,61 @@ thread_call_finish(thread_call_t call, thread_call_group_t group, spl_t *s) call->tc_refs--; } + thread_call_flags_t old_flags = call->tc_flags; call->tc_flags &= ~(THREAD_CALL_RESCHEDULE | THREAD_CALL_RUNNING | THREAD_CALL_WAIT); - if ((call->tc_refs != 0) && ((flags & THREAD_CALL_RESCHEDULE) != 0)) { - assert(flags & THREAD_CALL_ONCE); + if (call->tc_refs != 0 && (old_flags & THREAD_CALL_RESCHEDULE) != 0) { + assert(old_flags & THREAD_CALL_ONCE); thread_call_flavor_t flavor = thread_call_get_flavor(call); - if (THREAD_CALL_DELAYED & flags) { - time = mach_absolute_time(); + if (old_flags & THREAD_CALL_DELAYED) { + uint64_t now = mach_absolute_time(); if (flavor == TCF_CONTINUOUS) { - time = absolutetime_to_continuoustime(time); + now = absolutetime_to_continuoustime(now); } - if (call->tc_soft_deadline <= time) { - call->tc_flags &= ~(THREAD_CALL_DELAYED | TIMER_CALL_RATELIMITED); - call->tc_deadline = 0; + if (call->tc_soft_deadline <= now) { + /* The deadline has already expired, go straight to pending */ + call->tc_flags &= ~(THREAD_CALL_DELAYED | THREAD_CALL_RATELIMITED); + call->tc_pqlink.deadline = 0; } } - if (call->tc_deadline) { - _delayed_call_enqueue(call, group, call->tc_deadline, flavor); + + if (call->tc_pqlink.deadline) { + _delayed_call_enqueue(call, group, call->tc_pqlink.deadline, flavor); if (!signal) { _arm_delayed_call_timer(call, group, flavor); } } else if (signal) { call->tc_submit_count++; - repend = TRUE; + repend = true; } else { - _pending_call_enqueue(call, group); + _pending_call_enqueue(call, group, mach_absolute_time()); } } if (!signal && (call->tc_refs == 0)) { - if ((flags & THREAD_CALL_WAIT) != 0) { - panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_call.func); + if ((old_flags & THREAD_CALL_WAIT) != 0) { + panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_func); } assert(call->tc_finish_count == call->tc_submit_count); - enable_ints_and_unlock(*s); + enable_ints_and_unlock(group, *s); zfree(thread_call_zone, call); - *s = disable_ints_and_lock(); + *s = disable_ints_and_lock(group); } - if ((flags & THREAD_CALL_WAIT) != 0) { + if ((old_flags & THREAD_CALL_WAIT) != 0) { /* * Dropping lock here because the sched call for the * high-pri group can take the big lock from under * a thread lock. */ - thread_call_unlock(); + thread_call_unlock(group); thread_wakeup((event_t)call); - thread_call_lock_spin(); + thread_call_lock_spin(group); /* THREAD_CALL_SIGNAL call may have been freed */ } @@ -1379,10 +1578,11 @@ thread_call_finish(thread_call_t call, thread_call_group_t group, spl_t *s) * Note that the thread call object can be deallocated by the function if we do not control its storage. */ static void __attribute__((noinline)) -thread_call_invoke(thread_call_func_t func, thread_call_param_t param0, thread_call_param_t param1, thread_call_t call) +thread_call_invoke(thread_call_func_t func, + thread_call_param_t param0, + thread_call_param_t param1, + __unused thread_call_t call) { - current_thread()->thc_state.thc_call = call; - #if DEVELOPMENT || DEBUG KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED, MACH_CALLOUT) | DBG_FUNC_START, @@ -1408,8 +1608,6 @@ thread_call_invoke(thread_call_func_t func, thread_call_param_t param0, thread_c MACHDBG_CODE(DBG_MACH_SCHED, MACH_CALLOUT) | DBG_FUNC_END, VM_KERNEL_UNSLIDE(func), 0, 0, 0, 0); #endif /* DEVELOPMENT || DEBUG */ - - current_thread()->thc_state.thc_call = NULL; } /* @@ -1420,8 +1618,7 @@ thread_call_thread( thread_call_group_t group, wait_result_t wres) { - thread_t self = current_thread(); - boolean_t canwait; + thread_t self = current_thread(); if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0) { (void)thread_set_tag_internal(self, THREAD_TAG_CALLOUT); @@ -1438,54 +1635,71 @@ thread_call_thread( panic("thread_terminate() returned?"); } - spl_t s = disable_ints_and_lock(); + spl_t s = disable_ints_and_lock(group); + + struct thread_call_thread_state thc_state = { .thc_group = group }; + self->thc_state = &thc_state; - self->thc_state.thc_group = group; thread_sched_call(self, sched_call_thread); while (group->pending_count > 0) { - thread_call_t call; - thread_call_func_t func; - thread_call_param_t param0, param1; - - call = qe_dequeue_head(&group->pending_queue, struct thread_call, tc_call.q_link); + thread_call_t call = qe_dequeue_head(&group->pending_queue, + struct thread_call, tc_qlink); assert(call != NULL); + group->pending_count--; + if (group->pending_count == 0) { + assert(queue_empty(&group->pending_queue)); + } - func = call->tc_call.func; - param0 = call->tc_call.param0; - param1 = call->tc_call.param1; + thread_call_func_t func = call->tc_func; + thread_call_param_t param0 = call->tc_param0; + thread_call_param_t param1 = call->tc_param1; - call->tc_call.queue = NULL; + call->tc_queue = NULL; - _internal_call_release(call); + if (_is_internal_call(call)) { + _internal_call_release(call); + } /* * Can only do wakeups for thread calls whose storage * we control. */ - if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) { - canwait = TRUE; + bool needs_finish = false; + if (call->tc_flags & THREAD_CALL_ALLOC) { + needs_finish = true; call->tc_flags |= THREAD_CALL_RUNNING; call->tc_refs++; /* Delay free until we're done */ - } else { - canwait = FALSE; } - enable_ints_and_unlock(s); + thc_state.thc_call = call; + thc_state.thc_call_pending_timestamp = call->tc_pending_timestamp; + thc_state.thc_call_soft_deadline = call->tc_soft_deadline; + thc_state.thc_call_hard_deadline = call->tc_pqlink.deadline; + thc_state.thc_func = func; + thc_state.thc_param0 = param0; + thc_state.thc_param1 = param1; + thc_state.thc_IOTES_invocation_timestamp = 0; + + enable_ints_and_unlock(group, s); + + thc_state.thc_call_start = mach_absolute_time(); thread_call_invoke(func, param0, param1, call); + thc_state.thc_call = NULL; + if (get_preemption_level() != 0) { int pl = get_preemption_level(); panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)", pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1); } - s = disable_ints_and_lock(); + s = disable_ints_and_lock(group); - if (canwait) { - /* Frees if so desired */ + if (needs_finish) { + /* Release refcount, may free */ thread_call_finish(call, group, &s); } } @@ -1504,6 +1718,8 @@ thread_call_thread( self->callout_woken_from_platform_idle = FALSE; self->callout_woke_thread = FALSE; + self->thc_state = NULL; + if (group_isparallel(group)) { /* * For new style of thread group, thread always blocks. @@ -1518,7 +1734,7 @@ thread_call_thread( group->idle_timestamp = mach_absolute_time(); } - if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) && + if (((group->tcg_flags & TCG_DEALLOC_ACTIVE) == 0) && ((group->active_count + group->idle_count) > group->target_thread_count)) { thread_call_start_deallocate_timer(group); } @@ -1529,7 +1745,7 @@ thread_call_thread( panic("kcall worker unable to assert wait?"); } - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); thread_block_parameter((thread_continue_t)thread_call_thread, group); } else { @@ -1538,19 +1754,36 @@ thread_call_thread( waitq_assert_wait64(&group->idle_waitq, NO_EVENT64, THREAD_UNINT, 0); /* Interrupted means to exit */ - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); thread_block_parameter((thread_continue_t)thread_call_thread, group); /* NOTREACHED */ } } - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); thread_terminate(self); /* NOTREACHED */ } +void +thread_call_start_iotes_invocation(__assert_only thread_call_t call) +{ + thread_t self = current_thread(); + + if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0) { + /* not a thread call thread, might be a workloop IOTES */ + return; + } + + assert(self->thc_state); + assert(self->thc_state->thc_call == call); + + self->thc_state->thc_IOTES_invocation_timestamp = mach_absolute_time(); +} + + /* * thread_call_daemon: walk list of groups, allocating * threads if appropriate (as determined by @@ -1559,36 +1792,34 @@ thread_call_thread( static void thread_call_daemon_continue(__unused void *arg) { - spl_t s = disable_ints_and_lock(); + do { + os_atomic_store(&thread_call_daemon_awake, false, relaxed); - /* Starting at zero happens to be high-priority first. */ - for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) { - thread_call_group_t group = &thread_call_groups[i]; - while (thread_call_group_should_add_thread(group)) { - group->active_count++; + /* Starting at zero happens to be high-priority first. */ + for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) { + thread_call_group_t group = &thread_call_groups[i]; + + spl_t s = disable_ints_and_lock(group); - enable_ints_and_unlock(s); - - kern_return_t kr = thread_call_thread_create(group); - if (kr != KERN_SUCCESS) { - /* - * On failure, just pause for a moment and give up. - * We can try again later. - */ - delay(10000); /* 10 ms */ - s = disable_ints_and_lock(); - goto out; + while (thread_call_group_should_add_thread(group)) { + group->active_count++; + + enable_ints_and_unlock(group, s); + + thread_call_thread_create(group); + + s = disable_ints_and_lock(group); } - s = disable_ints_and_lock(); + enable_ints_and_unlock(group, s); } - } + } while (os_atomic_load(&thread_call_daemon_awake, relaxed)); -out: - thread_call_daemon_awake = FALSE; waitq_assert_wait64(&daemon_waitq, NO_EVENT64, THREAD_UNINT, 0); - enable_ints_and_unlock(s); + if (os_atomic_load(&thread_call_daemon_awake, relaxed)) { + clear_wait(current_thread(), THREAD_AWAKENED); + } thread_block_parameter((thread_continue_t)thread_call_daemon_continue, NULL); /* NOTREACHED */ @@ -1617,18 +1848,18 @@ thread_call_daemon( static void thread_call_start_deallocate_timer(thread_call_group_t group) { - __assert_only boolean_t already_enqueued; + __assert_only bool already_enqueued; assert(group->idle_count > 0); - assert((group->flags & TCG_DEALLOC_ACTIVE) == 0); + assert((group->tcg_flags & TCG_DEALLOC_ACTIVE) == 0); - group->flags |= TCG_DEALLOC_ACTIVE; + group->tcg_flags |= TCG_DEALLOC_ACTIVE; uint64_t deadline = group->idle_timestamp + thread_call_dealloc_interval_abs; already_enqueued = timer_call_enter(&group->dealloc_timer, deadline, 0); - assert(already_enqueued == FALSE); + assert(already_enqueued == false); } /* non-static so dtrace can find it rdar://problem/31156135&31379348 */ @@ -1640,10 +1871,8 @@ thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1) thread_call_t call; uint64_t now; - boolean_t restart; - boolean_t repend; - thread_call_lock_spin(); + thread_call_lock_spin(group); if (flavor == TCF_CONTINUOUS) { now = mach_continuous_time(); @@ -1653,69 +1882,61 @@ thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1) panic("invalid timer flavor: %d", flavor); } - do { - restart = FALSE; - qe_foreach_element_safe(call, &group->delayed_queues[flavor], tc_call.q_link) { - if (flavor == TCF_CONTINUOUS) { - assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == THREAD_CALL_CONTINUOUS); - } else { - assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == 0); - } + while ((call = priority_queue_min(&group->delayed_pqueues[flavor], + struct thread_call, tc_pqlink)) != NULL) { + assert(thread_call_get_group(call) == group); + assert(thread_call_get_flavor(call) == flavor); - /* - * if we hit a call that isn't yet ready to expire, - * then we're done for now - * TODO: The next timer in the list could have a larger leeway - * and therefore be ready to expire. - * Sort by deadline then by soft deadline to avoid this - */ - if (call->tc_soft_deadline > now) { - break; - } + /* + * if we hit a call that isn't yet ready to expire, + * then we're done for now + * TODO: The next timer in the list could have a larger leeway + * and therefore be ready to expire. + */ + if (call->tc_soft_deadline > now) { + break; + } - /* - * If we hit a rate-limited timer, don't eagerly wake it up. - * Wait until it reaches the end of the leeway window. - * - * TODO: What if the next timer is not rate-limited? - * Have a separate rate-limited queue to avoid this - */ - if ((call->tc_flags & THREAD_CALL_RATELIMITED) && - (call->tc_call.deadline > now) && - (ml_timer_forced_evaluation() == FALSE)) { - break; - } + /* + * If we hit a rate-limited timer, don't eagerly wake it up. + * Wait until it reaches the end of the leeway window. + * + * TODO: What if the next timer is not rate-limited? + * Have a separate rate-limited queue to avoid this + */ + if ((call->tc_flags & THREAD_CALL_RATELIMITED) && + (call->tc_pqlink.deadline > now) && + (ml_timer_forced_evaluation() == FALSE)) { + break; + } - if (THREAD_CALL_SIGNAL & call->tc_flags) { - __assert_only queue_head_t *old_queue; - old_queue = call_entry_dequeue(&call->tc_call); - assert(old_queue == &group->delayed_queues[flavor]); - - do { - thread_call_func_t func = call->tc_call.func; - thread_call_param_t param0 = call->tc_call.param0; - thread_call_param_t param1 = call->tc_call.param1; - - call->tc_flags |= THREAD_CALL_RUNNING; - thread_call_unlock(); - thread_call_invoke(func, param0, param1, call); - thread_call_lock_spin(); - - repend = thread_call_finish(call, group, NULL); - } while (repend); - - /* call may have been freed */ - restart = TRUE; - break; - } else { - _pending_call_enqueue(call, group); - } + if (THREAD_CALL_SIGNAL & call->tc_flags) { + __assert_only queue_head_t *old_queue; + old_queue = thread_call_dequeue(call); + assert(old_queue == &group->delayed_queues[flavor]); + + do { + thread_call_func_t func = call->tc_func; + thread_call_param_t param0 = call->tc_param0; + thread_call_param_t param1 = call->tc_param1; + + call->tc_flags |= THREAD_CALL_RUNNING; + + thread_call_unlock(group); + thread_call_invoke(func, param0, param1, call); + thread_call_lock_spin(group); + + /* finish may detect that the call has been re-pended */ + } while (thread_call_finish(call, group, NULL)); + /* call may have been freed by the finish */ + } else { + _pending_call_enqueue(call, group, now); } - } while (restart); + } _arm_delayed_call_timer(call, group, flavor); - thread_call_unlock(); + thread_call_unlock(group); } static void @@ -1725,7 +1946,7 @@ thread_call_delayed_timer_rescan(thread_call_group_t group, thread_call_t call; uint64_t now; - spl_t s = disable_ints_and_lock(); + spl_t s = disable_ints_and_lock(group); assert(ml_timer_forced_evaluation() == TRUE); @@ -1735,16 +1956,27 @@ thread_call_delayed_timer_rescan(thread_call_group_t group, now = mach_absolute_time(); } - qe_foreach_element_safe(call, &group->delayed_queues[flavor], tc_call.q_link) { + qe_foreach_element_safe(call, &group->delayed_queues[flavor], tc_qlink) { if (call->tc_soft_deadline <= now) { - _pending_call_enqueue(call, group); + _pending_call_enqueue(call, group, now); } else { - uint64_t skew = call->tc_call.deadline - call->tc_soft_deadline; - assert(call->tc_call.deadline >= call->tc_soft_deadline); + uint64_t skew = call->tc_pqlink.deadline - call->tc_soft_deadline; + assert(call->tc_pqlink.deadline >= call->tc_soft_deadline); /* * On a latency quality-of-service level change, * re-sort potentially rate-limited callout. The platform * layer determines which timers require this. + * + * This trick works by updating the deadline value to + * equal soft-deadline, effectively crushing away + * timer coalescing slop values for any armed + * timer in the queue. + * + * TODO: keep a hint on the timer to tell whether its inputs changed, so we + * only have to crush coalescing for timers that need it. + * + * TODO: Keep a separate queue of timers above the re-sort + * threshold, so we only have to look at those. */ if (timer_resort_threshold(skew)) { _call_dequeue(call, group); @@ -1755,15 +1987,16 @@ thread_call_delayed_timer_rescan(thread_call_group_t group, _arm_delayed_call_timer(NULL, group, flavor); - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); } void thread_call_delayed_timer_rescan_all(void) { for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) { - thread_call_delayed_timer_rescan(&thread_call_groups[i], TCF_ABSOLUTE); - thread_call_delayed_timer_rescan(&thread_call_groups[i], TCF_CONTINUOUS); + for (thread_call_flavor_t flavor = 0; flavor < TCF_COUNT; flavor++) { + thread_call_delayed_timer_rescan(&thread_call_groups[i], flavor); + } } } @@ -1780,17 +2013,17 @@ thread_call_dealloc_timer( thread_call_group_t group = (thread_call_group_t)p0; uint64_t now; kern_return_t res; - boolean_t terminated = FALSE; + bool terminated = false; - thread_call_lock_spin(); + thread_call_lock_spin(group); - assert((group->flags & TCG_DEALLOC_ACTIVE) == TCG_DEALLOC_ACTIVE); + assert(group->tcg_flags & TCG_DEALLOC_ACTIVE); now = mach_absolute_time(); if (group->idle_count > 0) { if (now > group->idle_timestamp + thread_call_dealloc_interval_abs) { - terminated = TRUE; + terminated = true; group->idle_count--; res = waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64, THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES); @@ -1800,7 +2033,7 @@ thread_call_dealloc_timer( } } - group->flags &= ~TCG_DEALLOC_ACTIVE; + group->tcg_flags &= ~TCG_DEALLOC_ACTIVE; /* * If we still have an excess of threads, schedule another @@ -1818,7 +2051,7 @@ thread_call_dealloc_timer( thread_call_start_deallocate_timer(group); } - thread_call_unlock(); + thread_call_unlock(group); } /* @@ -1834,15 +2067,17 @@ thread_call_dealloc_timer( * Takes the thread call lock locked, returns unlocked * This lets us avoid a spurious take/drop after waking up from thread_block */ -static boolean_t +static bool thread_call_wait_once_locked(thread_call_t call, spl_t s) { assert(call->tc_flags & THREAD_CALL_ALLOC); assert(call->tc_flags & THREAD_CALL_ONCE); + thread_call_group_t group = thread_call_get_group(call); + if ((call->tc_flags & THREAD_CALL_RUNNING) == 0) { - enable_ints_and_unlock(s); - return FALSE; + enable_ints_and_unlock(group, s); + return false; } /* call is running, so we have to wait for it */ @@ -1853,7 +2088,7 @@ thread_call_wait_once_locked(thread_call_t call, spl_t s) panic("Unable to assert wait: %d", res); } - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); res = thread_block(THREAD_CONTINUE_NULL); if (res != THREAD_AWAKENED) { @@ -1861,7 +2096,7 @@ thread_call_wait_once_locked(thread_call_t call, spl_t s) } /* returns unlocked */ - return TRUE; + return true; } /* @@ -1888,14 +2123,19 @@ thread_call_wait_once(thread_call_t call) panic("unsafe thread_call_wait_once"); } - if (current_thread()->thc_state.thc_call == call) { + thread_t self = current_thread(); + + if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) && + self->thc_state && self->thc_state->thc_call == call) { panic("thread_call_wait_once: deadlock waiting on self from inside call: %p to function %p", - call, call->tc_call.func); + call, call->tc_func); } - spl_t s = disable_ints_and_lock(); + thread_call_group_t group = thread_call_get_group(call); + + spl_t s = disable_ints_and_lock(group); - boolean_t waited = thread_call_wait_once_locked(call, s); + bool waited = thread_call_wait_once_locked(call, s); /* thread call lock unlocked */ return waited; @@ -1913,32 +2153,31 @@ thread_call_wait_once(thread_call_t call) static void thread_call_wait_locked(thread_call_t call, spl_t s) { - uint64_t submit_count; - wait_result_t res; + thread_call_group_t group = thread_call_get_group(call); assert(call->tc_flags & THREAD_CALL_ALLOC); - submit_count = call->tc_submit_count; + uint64_t submit_count = call->tc_submit_count; while (call->tc_finish_count < submit_count) { call->tc_flags |= THREAD_CALL_WAIT; - res = assert_wait(call, THREAD_UNINT); + wait_result_t res = assert_wait(call, THREAD_UNINT); if (res != THREAD_WAITING) { panic("Unable to assert wait: %d", res); } - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); res = thread_block(THREAD_CONTINUE_NULL); if (res != THREAD_AWAKENED) { panic("Awoken with %d?", res); } - s = disable_ints_and_lock(); + s = disable_ints_and_lock(group); } - enable_ints_and_unlock(s); + enable_ints_and_unlock(group, s); } /* @@ -1948,11 +2187,11 @@ thread_call_wait_locked(thread_call_t call, spl_t s) boolean_t thread_call_isactive(thread_call_t call) { - boolean_t active; + thread_call_group_t group = thread_call_get_group(call); - spl_t s = disable_ints_and_lock(); - active = (call->tc_submit_count > call->tc_finish_count); - enable_ints_and_unlock(s); + spl_t s = disable_ints_and_lock(group); + boolean_t active = (call->tc_submit_count > call->tc_finish_count); + enable_ints_and_unlock(group, s); return active; } @@ -1964,15 +2203,13 @@ thread_call_isactive(thread_call_t call) void adjust_cont_time_thread_calls(void) { - spl_t s = disable_ints_and_lock(); - for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) { thread_call_group_t group = &thread_call_groups[i]; + spl_t s = disable_ints_and_lock(group); /* only the continuous timers need to be re-armed */ _arm_delayed_call_timer(NULL, group, TCF_CONTINUOUS); + enable_ints_and_unlock(group, s); } - - enable_ints_and_unlock(s); } diff --git a/osfmk/kern/thread_call.h b/osfmk/kern/thread_call.h index f9730a78e..254ef28b8 100644 --- a/osfmk/kern/thread_call.h +++ b/osfmk/kern/thread_call.h @@ -342,9 +342,10 @@ __END_DECLS #ifdef MACH_KERNEL_PRIVATE -#include +#include +#include -typedef enum { +__enum_closed_decl(thread_call_index_t, uint16_t, { THREAD_CALL_INDEX_HIGH = 0, THREAD_CALL_INDEX_KERNEL = 1, THREAD_CALL_INDEX_USER = 2, @@ -354,29 +355,42 @@ typedef enum { THREAD_CALL_INDEX_QOS_IN = 6, THREAD_CALL_INDEX_QOS_UT = 7, THREAD_CALL_INDEX_MAX = 8, /* count of thread call indexes */ -} thread_call_index_t; +}); + +__options_closed_decl(thread_call_flags_t, uint16_t, { + THREAD_CALL_ALLOC = 0x0001, /* memory owned by thread_call.c */ + THREAD_CALL_WAIT = 0x0002, /* thread waiting for call to finish running */ + THREAD_CALL_DELAYED = 0x0004, /* deadline based */ + THREAD_CALL_RUNNING = 0x0008, /* currently executing on a thread */ + THREAD_CALL_SIGNAL = 0x0010, /* call from timer interrupt instead of thread */ + THREAD_CALL_ONCE = 0x0020, /* pend the enqueue if re-armed while running */ + THREAD_CALL_RESCHEDULE = 0x0040, /* enqueue is pending due to re-arm while running */ + THREAD_CALL_RATELIMITED = 0x0080, /* timer doesn't fire until slop+deadline */ + THREAD_CALL_FLAG_CONTINUOUS = 0x0100, /* deadline is in continuous time */ +}); struct thread_call { - struct call_entry tc_call; /* Must be first for queue macros */ - uint64_t tc_submit_count; - uint64_t tc_finish_count; - uint64_t tc_ttd; /* Time to deadline at creation */ - uint64_t tc_soft_deadline; - thread_call_index_t tc_index; - uint32_t tc_flags; - int32_t tc_refs; + /* Originally requested deadline */ + uint64_t tc_soft_deadline; + /* Deadline presented to hardware (post-leeway) stored in tc_pqlink.deadline */ + struct priority_queue_entry_deadline tc_pqlink; + /* Which queue head is this call enqueued on */ + queue_head_t *tc_queue; + queue_chain_t tc_qlink; + thread_call_index_t tc_index; + thread_call_flags_t tc_flags; + int32_t tc_refs; + /* Time to deadline at creation */ + uint64_t tc_ttd; + /* Timestamp of enqueue on pending queue */ + uint64_t tc_pending_timestamp; + thread_call_func_t tc_func; + thread_call_param_t tc_param0; + thread_call_param_t tc_param1; + uint64_t tc_submit_count; + uint64_t tc_finish_count; }; -#define THREAD_CALL_ALLOC 0x01 /* memory owned by thread_call.c */ -#define THREAD_CALL_WAIT 0x02 /* thread waiting for call to finish running */ -#define THREAD_CALL_DELAYED 0x04 /* deadline based */ -#define THREAD_CALL_RUNNING 0x08 /* currently executing on a thread */ -#define THREAD_CALL_SIGNAL 0x10 /* call from timer interrupt instead of thread */ -#define THREAD_CALL_ONCE 0x20 /* pend the enqueue if re-armed while running */ -#define THREAD_CALL_RESCHEDULE 0x40 /* enqueue is pending due to re-arm while running */ -#define THREAD_CALL_RATELIMITED TIMEOUT_URGENCY_RATELIMITED /* 0x80 */ -/* THREAD_CALL_CONTINUOUS 0x100 */ - typedef struct thread_call thread_call_data_t; extern void thread_call_initialize(void); @@ -387,6 +401,10 @@ extern void thread_call_setup( thread_call_param_t param0); extern void thread_call_delayed_timer_rescan_all(void); +extern uint64_t thread_call_get_armed_deadline(thread_call_t call); + +struct thread_call_thread_state; + #endif /* MACH_KERNEL_PRIVATE */ #ifdef XNU_KERNEL_PRIVATE @@ -426,7 +444,10 @@ extern boolean_t thread_call_func_cancel( /* * Called on the wake path to adjust the thread callouts running in mach_continuous_time */ -void adjust_cont_time_thread_calls(void); +extern void adjust_cont_time_thread_calls(void); + +/* called by IOTimerEventSource to track when the workloop lock has been taken */ +extern void thread_call_start_iotes_invocation(thread_call_t call); __END_DECLS diff --git a/osfmk/kern/thread_group.c b/osfmk/kern/thread_group.c index 49f212298..147925485 100644 --- a/osfmk/kern/thread_group.c +++ b/osfmk/kern/thread_group.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Apple Inc. All rights reserved. + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -42,16 +41,901 @@ #include #include +#if CONFIG_THREAD_GROUPS + +#define CACHELINE_SIZE (1 << MMU_CLINE) + +struct thread_group { + uint64_t tg_id; + char tg_name[THREAD_GROUP_MAXNAME]; + struct os_refcnt tg_refcount; + uint32_t tg_flags; + cluster_type_t tg_recommendation; + queue_chain_t tg_queue_chain; +#if CONFIG_SCHED_CLUTCH + struct sched_clutch tg_sched_clutch; +#endif /* CONFIG_SCHED_CLUTCH */ + // 16 bytes of padding here + uint8_t tg_machine_data[] __attribute__((aligned(CACHELINE_SIZE))); +} __attribute__((aligned(8))); + +static SECURITY_READ_ONLY_LATE(zone_t) tg_zone; +static uint32_t tg_count; +static queue_head_t tg_queue; +static LCK_GRP_DECLARE(tg_lck_grp, "thread_group"); +static LCK_MTX_DECLARE(tg_lock, &tg_lck_grp); +static LCK_SPIN_DECLARE(tg_flags_update_lock, &tg_lck_grp); + +static uint64_t tg_next_id = 0; +static uint32_t tg_size; +static uint32_t tg_machine_data_size; +static struct thread_group *tg_system; +static struct thread_group *tg_background; +static struct thread_group *tg_adaptive; +static struct thread_group *tg_vm; +static struct thread_group *tg_io_storage; +static struct thread_group *tg_perf_controller; +int tg_set_by_bankvoucher; + +static bool thread_group_retain_try(struct thread_group *tg); + +/* + * Initialize thread groups at boot + */ +void +thread_group_init(void) +{ + // Get thread group structure extension from EDT or boot-args (which can override EDT) + if (!PE_parse_boot_argn("kern.thread_group_extra_bytes", &tg_machine_data_size, sizeof(tg_machine_data_size))) { + if (!PE_get_default("kern.thread_group_extra_bytes", &tg_machine_data_size, sizeof(tg_machine_data_size))) { + tg_machine_data_size = 8; + } + } + + // Check if thread group can be set by voucher adoption from EDT or boot-args (which can override EDT) + if (!PE_parse_boot_argn("kern.thread_group_set_by_bankvoucher", &tg_set_by_bankvoucher, sizeof(tg_set_by_bankvoucher))) { + if (!PE_get_default("kern.thread_group_set_by_bankvoucher", &tg_set_by_bankvoucher, sizeof(tg_set_by_bankvoucher))) { + tg_set_by_bankvoucher = 1; + } + } + + tg_size = sizeof(struct thread_group) + tg_machine_data_size; + if (tg_size % CACHELINE_SIZE) { + tg_size += CACHELINE_SIZE - (tg_size % CACHELINE_SIZE); + } + tg_machine_data_size = tg_size - sizeof(struct thread_group); + // printf("tg_size=%d(%lu+%d)\n", tg_size, sizeof(struct thread_group), tg_machine_data_size); + assert(offsetof(struct thread_group, tg_machine_data) % CACHELINE_SIZE == 0); + tg_zone = zone_create("thread_groups", tg_size, ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED); + + queue_head_init(tg_queue); + tg_system = thread_group_create_and_retain(); + thread_group_set_name(tg_system, "system"); + tg_background = thread_group_create_and_retain(); + thread_group_set_name(tg_background, "background"); + tg_adaptive = thread_group_create_and_retain(); + thread_group_set_name(tg_adaptive, "adaptive"); + tg_vm = thread_group_create_and_retain(); + thread_group_set_name(tg_vm, "VM"); + tg_io_storage = thread_group_create_and_retain(); + thread_group_set_name(tg_io_storage, "io storage"); + tg_perf_controller = thread_group_create_and_retain(); + thread_group_set_name(tg_perf_controller, "perf_controller"); + + /* + * If CLPC is disabled, it would recommend SMP for all thread groups. + * In that mode, the scheduler would like to restrict the kernel thread + * groups to the E-cluster while all other thread groups are run on the + * P-cluster. To identify the kernel thread groups, mark them with a + * special flag THREAD_GROUP_FLAGS_SMP_RESTRICT which is looked at by + * recommended_pset_type(). + */ + tg_system->tg_flags |= THREAD_GROUP_FLAGS_SMP_RESTRICT; + tg_vm->tg_flags |= THREAD_GROUP_FLAGS_SMP_RESTRICT; + tg_io_storage->tg_flags |= THREAD_GROUP_FLAGS_SMP_RESTRICT; + tg_perf_controller->tg_flags |= THREAD_GROUP_FLAGS_SMP_RESTRICT; +} + +#if CONFIG_SCHED_CLUTCH +/* + * sched_clutch_for_thread + * + * The routine provides a back linkage from the thread to the + * sched_clutch it belongs to. This relationship is based on the + * thread group membership of the thread. Since that membership is + * changed from the thread context with the thread lock held, this + * linkage should be looked at only with the thread lock held or + * when the thread cannot be running (for eg. the thread is in the + * runq and being removed as part of thread_select(). + */ +sched_clutch_t +sched_clutch_for_thread(thread_t thread) +{ + assert(thread->thread_group != NULL); + return &(thread->thread_group->tg_sched_clutch); +} + +sched_clutch_t +sched_clutch_for_thread_group(struct thread_group *thread_group) +{ + return &(thread_group->tg_sched_clutch); +} + +/* + * Translate the TG flags to a priority boost for the sched_clutch. + * This priority boost will apply to the entire clutch represented + * by the thread group. + */ +static void +sched_clutch_update_tg_flags(sched_clutch_t clutch, uint8_t flags) +{ + sched_clutch_tg_priority_t sc_tg_pri = 0; + if (flags & THREAD_GROUP_FLAGS_UI_APP) { + sc_tg_pri = SCHED_CLUTCH_TG_PRI_HIGH; + } else if (flags & THREAD_GROUP_FLAGS_EFFICIENT) { + sc_tg_pri = SCHED_CLUTCH_TG_PRI_LOW; + } else { + sc_tg_pri = SCHED_CLUTCH_TG_PRI_MED; + } + os_atomic_store(&clutch->sc_tg_priority, sc_tg_pri, relaxed); +} + +#endif /* CONFIG_SCHED_CLUTCH */ + +/* + * Use a spinlock to protect all thread group flag updates. + * The lock should not have heavy contention since these flag updates should + * be infrequent. If this lock has contention issues, it should be changed to + * a per thread-group lock. + * + * The lock protects the flags field in the thread_group structure. It is also + * held while doing callouts to CLPC to reflect these flag changes. + */ + +void +thread_group_flags_update_lock(void) +{ + lck_spin_lock_grp(&tg_flags_update_lock, &tg_lck_grp); +} + +void +thread_group_flags_update_unlock(void) +{ + lck_spin_unlock(&tg_flags_update_lock); +} + +/* + * Inform platform code about already existing thread groups + * or ask it to free state for all thread groups + */ +void +thread_group_resync(boolean_t create) +{ + struct thread_group *tg; + + lck_mtx_lock(&tg_lock); + qe_foreach_element(tg, &tg_queue, tg_queue_chain) { + if (create) { + machine_thread_group_init(tg); + } else { + machine_thread_group_deinit(tg); + } + } + lck_mtx_unlock(&tg_lock); +} + +/* + * Create new thread group and add new reference to it. + */ +struct thread_group * +thread_group_create_and_retain(void) +{ + struct thread_group *tg; + + tg = (struct thread_group *)zalloc(tg_zone); + if (tg == NULL) { + panic("thread group zone over commit"); + } + assert((uintptr_t)tg % CACHELINE_SIZE == 0); + bzero(tg, sizeof(struct thread_group)); + +#if CONFIG_SCHED_CLUTCH + /* + * The clutch scheduler maintains a bunch of runqs per thread group. For + * each thread group it maintains a sched_clutch structure. The lifetime + * of that structure is tied directly to the lifetime of the thread group. + */ + sched_clutch_init_with_thread_group(&(tg->tg_sched_clutch), tg); + + /* + * Since the thread group flags are used to determine any priority promotions + * for the threads in the thread group, initialize them to 0. + */ + sched_clutch_update_tg_flags(&(tg->tg_sched_clutch), 0); + +#endif /* CONFIG_SCHED_CLUTCH */ + + lck_mtx_lock(&tg_lock); + tg->tg_id = tg_next_id++; + tg->tg_recommendation = CLUSTER_TYPE_SMP; // no recommendation yet + os_ref_init(&tg->tg_refcount, NULL); + tg_count++; + enqueue_tail(&tg_queue, &tg->tg_queue_chain); + lck_mtx_unlock(&tg_lock); + + // call machine layer init before this thread group becomes visible + machine_thread_group_init(tg); + + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_NEW), tg->tg_id); + + return tg; +} + +/* + * Point newly created thread to its home thread group + */ +void +thread_group_init_thread(thread_t t, task_t task) +{ + struct thread_group *tg = task_coalition_get_thread_group(task); + t->thread_group = tg; + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_SET), + THREAD_GROUP_INVALID, tg->tg_id, (uintptr_t)thread_tid(t)); +} + +/* + * Set thread group name + */ +void +thread_group_set_name(__unused struct thread_group *tg, __unused const char *name) +{ + if (name == NULL) { + return; + } + if (!thread_group_retain_try(tg)) { + return; + } + if (tg->tg_name[0] == '\0') { + strncpy(&tg->tg_name[0], name, THREAD_GROUP_MAXNAME); +#if defined(__LP64__) + KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_NAME), + tg->tg_id, + *(uint64_t*)(void*)&tg->tg_name[0], + *(uint64_t*)(void*)&tg->tg_name[sizeof(uint64_t)] + ); +#else /* defined(__LP64__) */ + KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_NAME), + tg->tg_id, + *(uint32_t*)(void*)&tg->tg_name[0], + *(uint32_t*)(void*)&tg->tg_name[sizeof(uint32_t)] + ); +#endif /* defined(__LP64__) */ + } + thread_group_release(tg); +} + +void +thread_group_set_flags(struct thread_group *tg, uint64_t flags) +{ + thread_group_flags_update_lock(); + thread_group_set_flags_locked(tg, flags); + thread_group_flags_update_unlock(); +} + +void +thread_group_clear_flags(struct thread_group *tg, uint64_t flags) +{ + thread_group_flags_update_lock(); + thread_group_clear_flags_locked(tg, flags); + thread_group_flags_update_unlock(); +} + +/* + * Set thread group flags and perform related actions. + * The tg_flags_update_lock should be held. + * Currently supported flags are: + * - THREAD_GROUP_FLAGS_EFFICIENT + * - THREAD_GROUP_FLAGS_UI_APP + */ + +void +thread_group_set_flags_locked(struct thread_group *tg, uint64_t flags) +{ + if ((flags & THREAD_GROUP_FLAGS_VALID) != flags) { + panic("thread_group_set_flags: Invalid flags %llu", flags); + } + + if ((tg->tg_flags & flags) == flags) { + return; + } + + __kdebug_only uint64_t old_flags = tg->tg_flags; + tg->tg_flags |= flags; + machine_thread_group_flags_update(tg, tg->tg_flags); +#if CONFIG_SCHED_CLUTCH + sched_clutch_update_tg_flags(&(tg->tg_sched_clutch), tg->tg_flags); +#endif /* CONFIG_SCHED_CLUTCH */ + KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_FLAGS), + tg->tg_id, tg->tg_flags, old_flags); +} + +/* + * Clear thread group flags and perform related actions + * The tg_flags_update_lock should be held. + * Currently supported flags are: + * - THREAD_GROUP_FLAGS_EFFICIENT + * - THREAD_GROUP_FLAGS_UI_APP + */ + +void +thread_group_clear_flags_locked(struct thread_group *tg, uint64_t flags) +{ + if ((flags & THREAD_GROUP_FLAGS_VALID) != flags) { + panic("thread_group_clear_flags: Invalid flags %llu", flags); + } + + if ((tg->tg_flags & flags) == 0) { + return; + } + + __kdebug_only uint64_t old_flags = tg->tg_flags; + tg->tg_flags &= ~flags; +#if CONFIG_SCHED_CLUTCH + sched_clutch_update_tg_flags(&(tg->tg_sched_clutch), tg->tg_flags); +#endif /* CONFIG_SCHED_CLUTCH */ + machine_thread_group_flags_update(tg, tg->tg_flags); + KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_FLAGS), + tg->tg_id, tg->tg_flags, old_flags); +} + + + +/* + * Find thread group with specified name and put new reference to it. + */ +struct thread_group * +thread_group_find_by_name_and_retain(char *name) +{ + struct thread_group *result = NULL; + + if (name == NULL) { + return NULL; + } + + if (strncmp("system", name, THREAD_GROUP_MAXNAME) == 0) { + return thread_group_retain(tg_system); + } else if (strncmp("background", name, THREAD_GROUP_MAXNAME) == 0) { + return thread_group_retain(tg_background); + } else if (strncmp("adaptive", name, THREAD_GROUP_MAXNAME) == 0) { + return thread_group_retain(tg_adaptive); + } else if (strncmp("perf_controller", name, THREAD_GROUP_MAXNAME) == 0) { + return thread_group_retain(tg_perf_controller); + } + + struct thread_group *tg; + lck_mtx_lock(&tg_lock); + qe_foreach_element(tg, &tg_queue, tg_queue_chain) { + if (strncmp(tg->tg_name, name, THREAD_GROUP_MAXNAME) == 0 && + thread_group_retain_try(tg)) { + result = tg; + break; + } + } + lck_mtx_unlock(&tg_lock); + return result; +} + +/* + * Find thread group with specified ID and add new reference to it. + */ +struct thread_group * +thread_group_find_by_id_and_retain(uint64_t id) +{ + struct thread_group *tg = NULL; + struct thread_group *result = NULL; + + switch (id) { + case THREAD_GROUP_SYSTEM: + result = tg_system; + thread_group_retain(tg_system); + break; + case THREAD_GROUP_BACKGROUND: + result = tg_background; + thread_group_retain(tg_background); + break; + case THREAD_GROUP_ADAPTIVE: + result = tg_adaptive; + thread_group_retain(tg_adaptive); + break; + case THREAD_GROUP_VM: + result = tg_vm; + thread_group_retain(tg_vm); + break; + case THREAD_GROUP_IO_STORAGE: + result = tg_io_storage; + thread_group_retain(tg_io_storage); + break; + case THREAD_GROUP_PERF_CONTROLLER: + result = tg_perf_controller; + thread_group_retain(tg_perf_controller); + break; + default: + lck_mtx_lock(&tg_lock); + qe_foreach_element(tg, &tg_queue, tg_queue_chain) { + if (tg->tg_id == id && thread_group_retain_try(tg)) { + result = tg; + break; + } + } + lck_mtx_unlock(&tg_lock); + } + return result; +} + +/* + * Add new reference to specified thread group + */ +struct thread_group * +thread_group_retain(struct thread_group *tg) +{ + os_ref_retain(&tg->tg_refcount); + return tg; +} + +/* + * Similar to thread_group_retain, but fails for thread groups with a + * zero reference count. Returns true if retained successfully. + */ +static bool +thread_group_retain_try(struct thread_group *tg) +{ + return os_ref_retain_try(&tg->tg_refcount); +} + +/* + * Drop a reference to specified thread group + */ +void +thread_group_release(struct thread_group *tg) +{ + if (os_ref_release(&tg->tg_refcount) == 0) { + lck_mtx_lock(&tg_lock); + tg_count--; + remqueue(&tg->tg_queue_chain); + lck_mtx_unlock(&tg_lock); + static_assert(THREAD_GROUP_MAXNAME >= (sizeof(uint64_t) * 2), "thread group name is too short"); + static_assert(__alignof(struct thread_group) >= __alignof(uint64_t), "thread group name is not 8 bytes aligned"); +#if defined(__LP64__) + KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_NAME_FREE), + tg->tg_id, + *(uint64_t*)(void*)&tg->tg_name[0], + *(uint64_t*)(void*)&tg->tg_name[sizeof(uint64_t)] + ); +#else /* defined(__LP64__) */ + KDBG(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_NAME_FREE), + tg->tg_id, + *(uint32_t*)(void*)&tg->tg_name[0], + *(uint32_t*)(void*)&tg->tg_name[sizeof(uint32_t)] + ); +#endif /* defined(__LP64__) */ + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_FREE), tg->tg_id); +#if CONFIG_SCHED_CLUTCH + sched_clutch_destroy(&(tg->tg_sched_clutch)); +#endif /* CONFIG_SCHED_CLUTCH */ + machine_thread_group_deinit(tg); + zfree(tg_zone, tg); + } +} + +/* + * Get thread's current thread group + */ +inline struct thread_group * +thread_group_get(thread_t t) +{ + return t->thread_group; +} + +struct thread_group * +thread_group_get_home_group(thread_t t) +{ + return task_coalition_get_thread_group(t->task); +} + +#if CONFIG_SCHED_AUTO_JOIN + +/* + * thread_set_thread_group_auto_join() + * + * Sets the thread group of a thread based on auto-join rules. + * + * Preconditions: + * - Thread must not be part of a runq (freshly made runnable threads or terminating only) + * - Thread must be locked by the caller already + */ +static void +thread_set_thread_group_auto_join(thread_t t, struct thread_group *tg, __unused struct thread_group *old_tg) +{ + assert(t->runq == PROCESSOR_NULL); + t->thread_group = tg; + + /* + * If the thread group is being changed for the current thread, callout to + * CLPC to update the thread's information at that layer. This makes sure CLPC + * has consistent state when the current thread is going off-core. + */ + if (t == current_thread()) { + uint64_t ctime = mach_approximate_time(); + uint64_t arg1, arg2; + machine_thread_going_on_core(t, thread_get_urgency(t, &arg1, &arg2), 0, 0, ctime); + machine_switch_perfcontrol_state_update(THREAD_GROUP_UPDATE, ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, t); + } +} + +#endif /* CONFIG_SCHED_AUTO_JOIN */ + +/* + * thread_set_thread_group_explicit() + * + * Sets the thread group of a thread based on default non auto-join rules. + * + * Preconditions: + * - Thread must be the current thread + * - Caller must not have the thread locked + * - Interrupts must be disabled + */ +static void +thread_set_thread_group_explicit(thread_t t, struct thread_group *tg, __unused struct thread_group *old_tg) +{ + assert(t == current_thread()); + /* + * In the clutch scheduler world, the runq membership of the thread + * is based on its thread group membership and its scheduling bucket. + * In order to synchronize with the priority (and therefore bucket) + * getting updated concurrently, it is important to perform the + * thread group change also under the thread lock. + */ + thread_lock(t); + t->thread_group = tg; + +#if CONFIG_SCHED_CLUTCH + sched_clutch_t old_clutch = (old_tg) ? &(old_tg->tg_sched_clutch) : NULL; + sched_clutch_t new_clutch = (tg) ? &(tg->tg_sched_clutch) : NULL; + if (SCHED_CLUTCH_THREAD_ELIGIBLE(t)) { + sched_clutch_thread_clutch_update(t, old_clutch, new_clutch); + } +#endif /* CONFIG_SCHED_CLUTCH */ + + thread_unlock(t); + + uint64_t ctime = mach_approximate_time(); + uint64_t arg1, arg2; + machine_thread_going_on_core(t, thread_get_urgency(t, &arg1, &arg2), 0, 0, ctime); + machine_switch_perfcontrol_state_update(THREAD_GROUP_UPDATE, ctime, 0, t); +} + +/* + * thread_set_thread_group() + * + * Overrides the current home thread group with an override group. However, + * an adopted work interval overrides the override. Does not take a reference + * on the group, so caller must guarantee group lifetime lasts as long as the + * group is set. + * + * The thread group is set according to a hierarchy: + * + * 1) work interval specified group (explicit API) + * 2) Auto-join thread group (wakeup tracking for special work intervals) + * 3) bank voucher carried group (implicitly set) + * 4) coalition default thread group (ambient) + */ +static void +thread_set_thread_group(thread_t t, struct thread_group *tg, bool auto_join) +{ + struct thread_group *home_tg = thread_group_get_home_group(t); + struct thread_group *old_tg = NULL; + + if (tg == NULL) { + /* when removing an override, revert to home group */ + tg = home_tg; + } + + spl_t s = splsched(); + + old_tg = t->thread_group; + + if (old_tg != tg) { + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_THREAD_GROUP, MACH_THREAD_GROUP_SET), + t->thread_group ? t->thread_group->tg_id : 0, + tg->tg_id, (uintptr_t)thread_tid(t), home_tg->tg_id); + + /* + * Based on whether this is a change due to auto-join, the join does + * different things and has different expectations. + */ + if (auto_join) { +#if CONFIG_SCHED_AUTO_JOIN + /* + * set thread group with auto-join rules. This has the + * implicit assumption that the thread lock is already held. + * Also this could happen to any thread (current or thread + * being context switched). + */ + thread_set_thread_group_auto_join(t, tg, old_tg); +#else /* CONFIG_SCHED_AUTO_JOIN */ + panic("Auto-Join unsupported on this platform"); +#endif /* CONFIG_SCHED_AUTO_JOIN */ + } else { + /* + * set thread group with the explicit join rules. This has + * the implicit assumption that the thread is not locked. Also + * this would be done only to the current thread. + */ + thread_set_thread_group_explicit(t, tg, old_tg); + } + } + + splx(s); +} + +void +thread_group_set_bank(thread_t t, struct thread_group *tg) +{ + /* work interval group overrides any bank override group */ + if (t->th_work_interval) { + return; + } + + /* boot arg disables groups in bank */ + if (tg_set_by_bankvoucher == FALSE) { + return; + } + + thread_set_thread_group(t, tg, false); +} + +/* + * thread_set_work_interval_thread_group() + * + * Sets the thread's group to the work interval thread group. + * If auto_join == true, thread group is being overriden through scheduler + * auto-join policies. + * + * Preconditions for auto-join case: + * - t is not current_thread and t should be locked. + * - t should not be running on a remote core; thread context switching is a valid state for this. + */ +void +thread_set_work_interval_thread_group(thread_t t, struct thread_group *tg, bool auto_join) +{ + if (tg == NULL) { + /* + * when removing a work interval override, fall back + * to the current voucher override. + * + * In the auto_join case, the thread is already locked by the caller so + * its unsafe to get the thread group from the current voucher (since + * that might require taking task lock and ivac lock). However, the + * auto-join policy does not allow threads to switch thread groups based + * on voucher overrides. + * + * For the normal case, lookup the thread group from the currently adopted + * voucher and use that as the fallback tg. + */ + + if (auto_join == false) { + tg = thread_get_current_voucher_thread_group(t); + } + } + + thread_set_thread_group(t, tg, auto_join); +} + +inline cluster_type_t +thread_group_recommendation(struct thread_group *tg) +{ + if (tg == NULL) { + return CLUSTER_TYPE_SMP; + } else { + return tg->tg_recommendation; + } +} + +inline uint64_t +thread_group_get_id(struct thread_group *tg) +{ + return tg->tg_id; +} + +uint32_t +thread_group_count(void) +{ + return tg_count; +} + +/* + * Can only be called while tg cannot be destroyed + */ +inline const char* +thread_group_get_name(struct thread_group *tg) +{ + return tg->tg_name; +} + +inline void * +thread_group_get_machine_data(struct thread_group *tg) +{ + return &tg->tg_machine_data; +} + +inline uint32_t +thread_group_machine_data_size(void) +{ + return tg_machine_data_size; +} + +kern_return_t +thread_group_iterate_stackshot(thread_group_iterate_fn_t callout, void *arg) +{ + struct thread_group *tg; + int i = 0; + qe_foreach_element(tg, &tg_queue, tg_queue_chain) { + if (tg == NULL || !ml_validate_nofault((vm_offset_t)tg, sizeof(struct thread_group))) { + return KERN_FAILURE; + } + callout(arg, i, tg); + i++; + } + return KERN_SUCCESS; +} -#if CONFIG_EMBEDDED void thread_group_join_io_storage(void) { + struct thread_group *tg = thread_group_find_by_id_and_retain(THREAD_GROUP_IO_STORAGE); + assert(tg != NULL); + thread_set_thread_group(current_thread(), tg, false); +} + +void +thread_group_join_perf_controller(void) +{ + struct thread_group *tg = thread_group_find_by_id_and_retain(THREAD_GROUP_PERF_CONTROLLER); + assert(tg != NULL); + thread_set_thread_group(current_thread(), tg, false); +} + +void +thread_group_vm_add(void) +{ + assert(tg_vm != NULL); + thread_set_thread_group(current_thread(), thread_group_find_by_id_and_retain(THREAD_GROUP_VM), false); +} + +uint64_t +kdp_thread_group_get_flags(struct thread_group *tg) +{ + return tg->tg_flags; +} + +/* + * Returns whether the thread group is restricted to the E-cluster when CLPC is + * turned off. + */ +boolean_t +thread_group_smp_restricted(struct thread_group *tg) +{ + if (tg->tg_flags & THREAD_GROUP_FLAGS_SMP_RESTRICT) { + return true; + } else { + return false; + } +} + +void +thread_group_update_recommendation(struct thread_group *tg, cluster_type_t new_recommendation) +{ + /* + * Since the tg->tg_recommendation field is read by CPUs trying to determine + * where a thread/thread group needs to be placed, it is important to use + * atomic operations to update the recommendation. + */ + os_atomic_store(&tg->tg_recommendation, new_recommendation, relaxed); } +#if CONFIG_SCHED_EDGE + +int sched_edge_restrict_ut = 1; +int sched_edge_restrict_bg = 1; + void -sched_perfcontrol_thread_group_recommend(void *machine_data __unused, cluster_type_t new_recommendation __unused) +sched_perfcontrol_thread_group_recommend(__unused void *machine_data, __unused cluster_type_t new_recommendation) { + struct thread_group *tg = (struct thread_group *)((uintptr_t)machine_data - offsetof(struct thread_group, tg_machine_data)); + /* + * CLUSTER_TYPE_SMP was used for some debugging support when CLPC dynamic control was turned off. + * In more recent implementations, CLPC simply recommends "P-spill" when dynamic control is turned off. So it should + * never be recommending CLUSTER_TYPE_SMP for thread groups. + */ + assert(new_recommendation != CLUSTER_TYPE_SMP); + /* + * The Edge scheduler expects preferred cluster recommendations for each QoS level within a TG. Until the new CLPC + * routine is being called, fake out the call from the old CLPC interface. + */ + uint32_t tg_bucket_preferred_cluster[TH_BUCKET_SCHED_MAX] = {0}; + /* + * For all buckets higher than UT, apply the recommendation to the thread group bucket + */ + for (sched_bucket_t bucket = TH_BUCKET_FIXPRI; bucket < TH_BUCKET_SHARE_UT; bucket++) { + tg_bucket_preferred_cluster[bucket] = (new_recommendation == pset_type_for_id(0)) ? 0 : 1; + } + /* For UT & BG QoS, set the recommendation only if they havent been restricted via sysctls */ + if (!sched_edge_restrict_ut) { + tg_bucket_preferred_cluster[TH_BUCKET_SHARE_UT] = (new_recommendation == pset_type_for_id(0)) ? 0 : 1; + } + if (!sched_edge_restrict_bg) { + tg_bucket_preferred_cluster[TH_BUCKET_SHARE_BG] = (new_recommendation == pset_type_for_id(0)) ? 0 : 1; + } + sched_perfcontrol_preferred_cluster_options_t options = 0; + if (new_recommendation == CLUSTER_TYPE_P) { + options |= SCHED_PERFCONTROL_PREFERRED_CLUSTER_MIGRATE_RUNNING; + } + sched_edge_tg_preferred_cluster_change(tg, tg_bucket_preferred_cluster, options); } -#endif /* CONFIG_EMBEDDED */ +void +sched_perfcontrol_edge_matrix_get(sched_clutch_edge *edge_matrix, bool *edge_request_bitmap, uint64_t flags, uint64_t matrix_order) +{ + sched_edge_matrix_get(edge_matrix, edge_request_bitmap, flags, matrix_order); +} + +void +sched_perfcontrol_edge_matrix_set(sched_clutch_edge *edge_matrix, bool *edge_changes_bitmap, uint64_t flags, uint64_t matrix_order) +{ + sched_edge_matrix_set(edge_matrix, edge_changes_bitmap, flags, matrix_order); +} + +void +sched_perfcontrol_thread_group_preferred_clusters_set(void *machine_data, uint32_t tg_preferred_cluster, + uint32_t overrides[PERFCONTROL_CLASS_MAX], sched_perfcontrol_preferred_cluster_options_t options) +{ + struct thread_group *tg = (struct thread_group *)((uintptr_t)machine_data - offsetof(struct thread_group, tg_machine_data)); + uint32_t tg_bucket_preferred_cluster[TH_BUCKET_SCHED_MAX] = { + [TH_BUCKET_FIXPRI] = (overrides[PERFCONTROL_CLASS_ABOVEUI] != SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE) ? overrides[PERFCONTROL_CLASS_ABOVEUI] : tg_preferred_cluster, + [TH_BUCKET_SHARE_FG] = (overrides[PERFCONTROL_CLASS_UI] != SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE) ? overrides[PERFCONTROL_CLASS_UI] : tg_preferred_cluster, + [TH_BUCKET_SHARE_IN] = (overrides[PERFCONTROL_CLASS_UI] != SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE) ? overrides[PERFCONTROL_CLASS_UI] : tg_preferred_cluster, + [TH_BUCKET_SHARE_DF] = (overrides[PERFCONTROL_CLASS_NONUI] != SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE) ? overrides[PERFCONTROL_CLASS_NONUI] : tg_preferred_cluster, + [TH_BUCKET_SHARE_UT] = (overrides[PERFCONTROL_CLASS_UTILITY] != SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE) ? overrides[PERFCONTROL_CLASS_UTILITY] : tg_preferred_cluster, + [TH_BUCKET_SHARE_BG] = (overrides[PERFCONTROL_CLASS_BACKGROUND] != SCHED_PERFCONTROL_PREFERRED_CLUSTER_OVERRIDE_NONE) ? overrides[PERFCONTROL_CLASS_BACKGROUND] : tg_preferred_cluster, + }; + sched_edge_tg_preferred_cluster_change(tg, tg_bucket_preferred_cluster, options); +} + +#else /* CONFIG_SCHED_EDGE */ + +void +sched_perfcontrol_thread_group_recommend(__unused void *machine_data, __unused cluster_type_t new_recommendation) +{ + struct thread_group *tg = (struct thread_group *)((uintptr_t)machine_data - offsetof(struct thread_group, tg_machine_data)); + SCHED(thread_group_recommendation_change)(tg, new_recommendation); +} + +void +sched_perfcontrol_edge_matrix_get(__unused sched_clutch_edge *edge_matrix, __unused bool *edge_request_bitmap, __unused uint64_t flags, __unused uint64_t matrix_order) +{ +} + +void +sched_perfcontrol_edge_matrix_set(__unused sched_clutch_edge *edge_matrix, __unused bool *edge_changes_bitmap, __unused uint64_t flags, __unused uint64_t matrix_order) +{ +} + +void +sched_perfcontrol_thread_group_preferred_clusters_set(__unused void *machine_data, __unused uint32_t tg_preferred_cluster, + __unused uint32_t overrides[PERFCONTROL_CLASS_MAX], __unused sched_perfcontrol_preferred_cluster_options_t options) +{ +} + +#endif /* CONFIG_SCHED_EDGE */ + +#endif /* CONFIG_THREAD_GROUPS */ diff --git a/osfmk/kern/thread_group.h b/osfmk/kern/thread_group.h index 3e5aaa61d..f18259bd7 100644 --- a/osfmk/kern/thread_group.h +++ b/osfmk/kern/thread_group.h @@ -36,6 +36,73 @@ struct thread_group; #include /* for proc_reg.h / CONFIG_THREAD_GROUPS */ +#ifndef CONFIG_THREAD_GROUPS +#error "The platform must define CONFIG_THREAD_GROUPS to 0 or 1" +#endif +#if CONFIG_THREAD_GROUPS +#include +#include + +#define THREAD_GROUP_MAX (CONFIG_TASK_MAX + 10) +#define THREAD_GROUP_MAXNAME (16) + +#define THREAD_GROUP_SYSTEM 0 // kernel (-VM) + launchd +#define THREAD_GROUP_BACKGROUND 1 // background daemons +#define THREAD_GROUP_ADAPTIVE 2 // adaptive daemons +#define THREAD_GROUP_VM 3 // kernel VM threads +#define THREAD_GROUP_IO_STORAGE 4 // kernel io storage threads +#define THREAD_GROUP_PERF_CONTROLLER 5 // kernel CLPC threads + +#define THREAD_GROUP_INVALID UINT64_MAX + +/* Thread group flags */ +#define THREAD_GROUP_FLAGS_EFFICIENT 0x1 +#define THREAD_GROUP_FLAGS_UI_APP 0x2 +#define THREAD_GROUP_FLAGS_SMP_RESTRICT 0x4 +#define THREAD_GROUP_FLAGS_VALID (THREAD_GROUP_FLAGS_EFFICIENT | THREAD_GROUP_FLAGS_UI_APP | THREAD_GROUP_FLAGS_SMP_RESTRICT) + +__BEGIN_DECLS + +void thread_group_init(void); +void thread_group_resync(boolean_t create); +struct thread_group *thread_group_create_and_retain(void); +void thread_group_init_thread(thread_t t, task_t task); +void thread_group_set_name(struct thread_group *tg, const char *name); +void thread_group_flags_update_lock(void); +void thread_group_flags_update_unlock(void); +void thread_group_set_flags(struct thread_group *tg, uint64_t flags); +void thread_group_clear_flags(struct thread_group *tg, uint64_t flags); +void thread_group_set_flags_locked(struct thread_group *tg, uint64_t flags); +void thread_group_clear_flags_locked(struct thread_group *tg, uint64_t flags); +struct thread_group *thread_group_find_by_name_and_retain(char *name); +struct thread_group *thread_group_find_by_id_and_retain(uint64_t id); +struct thread_group *thread_group_retain(struct thread_group *tg); +void thread_group_release(struct thread_group *tg); +struct thread_group *thread_group_get(thread_t t); +struct thread_group *thread_group_get_home_group(thread_t t); +void thread_group_set_bank(thread_t t, struct thread_group *tg); +uint64_t thread_group_get_id(struct thread_group *tg); +uint32_t thread_group_count(void); +const char * thread_group_get_name(struct thread_group *tg); +void * thread_group_get_machine_data(struct thread_group *tg); +uint32_t thread_group_machine_data_size(void); +cluster_type_t thread_group_recommendation(struct thread_group *tg); + +typedef void (*thread_group_iterate_fn_t)(void*, int, struct thread_group *); +kern_return_t thread_group_iterate_stackshot(thread_group_iterate_fn_t callout, void *arg); +uint64_t kdp_thread_group_get_flags(struct thread_group *); +boolean_t thread_group_smp_restricted(struct thread_group *tg); +void thread_group_update_recommendation(struct thread_group *tg, cluster_type_t new_recommendation); + +void thread_set_work_interval_thread_group(thread_t t, struct thread_group *tg, bool auto_join); + +#if XNU_KERNEL_PRIVATE +void thread_group_vm_add(void); +#endif + +__END_DECLS + +#endif /* CONFIG_THREAD_GROUPS */ #endif // _KERN_THREAD_GROUP_H_ diff --git a/osfmk/kern/thread_policy.c b/osfmk/kern/thread_policy.c index 3ba515bef..e82a67b72 100644 --- a/osfmk/kern/thread_policy.c +++ b/osfmk/kern/thread_policy.c @@ -137,10 +137,10 @@ static int proc_get_thread_policy_locked(thread_t thread, int category, int flavor, int* value2); static void -thread_policy_update_spinlocked(thread_t thread, boolean_t recompute_priority, task_pend_token_t pend_token); +thread_policy_update_spinlocked(thread_t thread, bool recompute_priority, task_pend_token_t pend_token); static void -thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_priority, task_pend_token_t pend_token); +thread_policy_update_internal_spinlocked(thread_t thread, bool recompute_priority, task_pend_token_t pend_token); void thread_policy_init(void) @@ -258,7 +258,7 @@ thread_qos_scaled_relative_priority(int qos, int qos_relprio) * flag set by -qos-policy-allow boot-arg to allow * testing thread qos policy from userspace */ -boolean_t allow_qos_policy_set = FALSE; +static TUNABLE(bool, allow_qos_policy_set, "-qos-policy-allow", false); kern_return_t thread_policy_set( @@ -276,7 +276,7 @@ thread_policy_set( return KERN_INVALID_ARGUMENT; } - if (allow_qos_policy_set == FALSE) { + if (!allow_qos_policy_set) { if (thread_is_static_param(thread)) { return KERN_POLICY_STATIC; } @@ -365,6 +365,8 @@ thread_policy_set_internal( } info = (thread_time_constraint_policy_t)policy_info; + + if (info->constraint < info->computation || info->computation > max_rt_quantum || info->computation < min_rt_quantum) { @@ -437,7 +439,7 @@ thread_policy_set_internal( return thread_affinity_set(thread, info->affinity_tag); } -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) case THREAD_BACKGROUND_POLICY: { thread_background_policy_t info; @@ -468,7 +470,7 @@ thread_policy_set_internal( break; } -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ case THREAD_THROUGHPUT_QOS_POLICY: { @@ -698,7 +700,7 @@ thread_workq_pri_for_qos(thread_qos_t qos) thread_qos_t thread_workq_qos_for_pri(int priority) { - int qos; + thread_qos_t qos; if (priority > thread_qos_policy_params.qos_pri[THREAD_QOS_USER_INTERACTIVE]) { // indicate that workq should map >UI threads to workq's // internal notation for above-UI work. @@ -1042,7 +1044,11 @@ thread_recompute_priority( * via THREAD_PRECEDENCE_POLICY. */ if (priority > thread->max_priority) { - priority = thread->max_priority; + if (thread->effective_policy.thep_promote_above_task) { + priority = MAX(thread->max_priority, thread->user_promotion_basepri); + } else { + priority = thread->max_priority; + } } else if (priority < MINPRI) { priority = MINPRI; } @@ -1068,12 +1074,12 @@ thread_recompute_priority( } } -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) /* No one can have a base priority less than MAXPRI_THROTTLE */ if (priority < MAXPRI_THROTTLE) { priority = MAXPRI_THROTTLE; } -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ sched_set_thread_base_priority(thread, priority); } @@ -1099,16 +1105,15 @@ thread_policy_update_tasklocked( __unused integer_t old_max_priority = thread->max_priority; - thread->task_priority = priority; - thread->max_priority = max_priority; + assert(priority >= INT16_MIN && priority <= INT16_MAX); + thread->task_priority = (int16_t)priority; + + assert(max_priority >= INT16_MIN && max_priority <= INT16_MAX); + thread->max_priority = (int16_t)max_priority; -#if CONFIG_EMBEDDED /* - * When backgrounding a thread, iOS has the semantic that - * realtime and fixed priority threads should be demoted - * to timeshare background threads. - * - * On OSX, realtime and fixed priority threads don't lose their mode. + * When backgrounding a thread, realtime and fixed priority threads + * should be demoted to timeshare background threads. * * TODO: Do this inside the thread policy update routine in order to avoid double * remove/reinsert for a runnable thread @@ -1118,9 +1123,8 @@ thread_policy_update_tasklocked( } else if ((max_priority > MAXPRI_THROTTLE) && (old_max_priority <= MAXPRI_THROTTLE)) { sched_thread_mode_undemote(thread, TH_SFLAG_THROTTLED); } -#endif /* CONFIG_EMBEDDED */ - thread_policy_update_spinlocked(thread, TRUE, pend_token); + thread_policy_update_spinlocked(thread, true, pend_token); thread_unlock(thread); splx(s); @@ -1265,6 +1269,7 @@ thread_policy_get( info->preemptible = TRUE; } + break; } @@ -1447,7 +1452,7 @@ thread_policy_create(thread_t thread) /* We pass a pend token but ignore it */ struct task_pend_token pend_token = {}; - thread_policy_update_internal_spinlocked(thread, TRUE, &pend_token); + thread_policy_update_internal_spinlocked(thread, true, &pend_token); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_THREAD))) | DBG_FUNC_END, @@ -1456,7 +1461,7 @@ thread_policy_create(thread_t thread) } static void -thread_policy_update_spinlocked(thread_t thread, boolean_t recompute_priority, task_pend_token_t pend_token) +thread_policy_update_spinlocked(thread_t thread, bool recompute_priority, task_pend_token_t pend_token) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_THREAD) | DBG_FUNC_START), @@ -1484,7 +1489,7 @@ thread_policy_update_spinlocked(thread_t thread, boolean_t recompute_priority, t * Called with thread spinlock locked, task may be locked, thread mutex may be locked */ static void -thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_priority, +thread_policy_update_internal_spinlocked(thread_t thread, bool recompute_priority, task_pend_token_t pend_token) { /* @@ -1516,6 +1521,17 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr next_qos = MAX(requested.thrp_qos_workq_override, next_qos); } + if (task_effective.tep_darwinbg && task_effective.tep_adaptive_bg && + requested.thrp_qos_promote > THREAD_QOS_BACKGROUND) { + /* + * This thread is turnstile-boosted higher than the adaptive clamp + * by a synchronous waiter. Allow that to override the adaptive + * clamp temporarily for this thread only. + */ + next.thep_promote_above_task = true; + next_qos = requested.thrp_qos_promote; + } + next.thep_qos = next_qos; /* A task clamp will result in an effective QoS even when requested is UNSPECIFIED */ @@ -1534,14 +1550,12 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr next.thep_qos_promote = next.thep_qos; /* The ceiling only applies to threads that are in the QoS world */ + /* TODO: is it appropriate for this to limit a turnstile-boosted thread's QoS? */ if (task_effective.tep_qos_ceiling != THREAD_QOS_UNSPECIFIED && next.thep_qos != THREAD_QOS_UNSPECIFIED) { next.thep_qos = MIN(task_effective.tep_qos_ceiling, next.thep_qos); } - /* Apply the sync ipc qos override */ - assert(requested.thrp_qos_sync_ipc_override == THREAD_QOS_UNSPECIFIED); - /* * The QoS relative priority is only applicable when the original programmer's * intended (requested) QoS is in effect. When the QoS is clamped (e.g. @@ -1559,8 +1573,12 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr } /* Calculate DARWIN_BG */ - boolean_t wants_darwinbg = FALSE; - boolean_t wants_all_sockets_bg = FALSE; /* Do I want my existing sockets to be bg */ + bool wants_darwinbg = false; + bool wants_all_sockets_bg = false; /* Do I want my existing sockets to be bg */ + + if (task_effective.tep_darwinbg && !next.thep_promote_above_task) { + wants_darwinbg = true; + } /* * If DARWIN_BG has been requested at either level, it's engaged. @@ -1569,20 +1587,16 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr * after they're created */ if (requested.thrp_int_darwinbg || requested.thrp_ext_darwinbg) { - wants_all_sockets_bg = wants_darwinbg = TRUE; + wants_all_sockets_bg = wants_darwinbg = true; } if (requested.thrp_pidbind_bg) { - wants_all_sockets_bg = wants_darwinbg = TRUE; - } - - if (task_effective.tep_darwinbg) { - wants_darwinbg = TRUE; + wants_all_sockets_bg = wants_darwinbg = true; } if (next.thep_qos == THREAD_QOS_BACKGROUND || next.thep_qos == THREAD_QOS_MAINTENANCE) { - wants_darwinbg = TRUE; + wants_darwinbg = true; } /* Calculate side effects of DARWIN_BG */ @@ -1616,7 +1630,9 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr iopol = MAX(iopol, task_effective.tep_bg_iotier); } - iopol = MAX(iopol, task_effective.tep_io_tier); + if (!next.thep_promote_above_task) { + iopol = MAX(iopol, task_effective.tep_io_tier); + } /* Look up the associated IO tier value for the QoS class */ iopol = MAX(iopol, thread_qos_policy_params.qos_iotier[next.thep_qos]); @@ -1631,11 +1647,10 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr * the passive bit so that a thread doesn't end up stuck in its own throttle * window when the override goes away. */ - boolean_t qos_io_override_active = FALSE; - if (thread_qos_policy_params.qos_iotier[next.thep_qos] < - thread_qos_policy_params.qos_iotier[requested.thrp_qos]) { - qos_io_override_active = TRUE; - } + + int next_qos_iotier = thread_qos_policy_params.qos_iotier[next.thep_qos]; + int req_qos_iotier = thread_qos_policy_params.qos_iotier[requested.thrp_qos]; + bool qos_io_override_active = (next_qos_iotier < req_qos_iotier); /* Calculate Passive IO policy */ if (requested.thrp_ext_iopassive || @@ -1648,7 +1663,10 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr /* Calculate timer QOS */ uint32_t latency_qos = requested.thrp_latency_qos; - latency_qos = MAX(latency_qos, task_effective.tep_latency_qos); + if (!next.thep_promote_above_task) { + latency_qos = MAX(latency_qos, task_effective.tep_latency_qos); + } + latency_qos = MAX(latency_qos, thread_qos_policy_params.qos_latency_qos[next.thep_qos]); next.thep_latency_qos = latency_qos; @@ -1656,7 +1674,10 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr /* Calculate throughput QOS */ uint32_t through_qos = requested.thrp_through_qos; - through_qos = MAX(through_qos, task_effective.tep_through_qos); + if (!next.thep_promote_above_task) { + through_qos = MAX(through_qos, task_effective.tep_through_qos); + } + through_qos = MAX(through_qos, thread_qos_policy_params.qos_through_qos[next.thep_qos]); next.thep_through_qos = through_qos; @@ -1717,6 +1738,7 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr if (prev.thep_qos != next.thep_qos || prev.thep_qos_relprio != next.thep_qos_relprio || prev.thep_qos_ui_is_urgent != next.thep_qos_ui_is_urgent || + prev.thep_promote_above_task != next.thep_promote_above_task || prev.thep_terminated != next.thep_terminated || pend_token->tpt_force_recompute_pri == 1 || recompute_priority) { @@ -1860,7 +1882,7 @@ proc_set_thread_policy_spinlocked(thread_t thread, thread_set_requested_policy_spinlocked(thread, category, flavor, value, value2, pend_token); - thread_policy_update_spinlocked(thread, FALSE, pend_token); + thread_policy_update_spinlocked(thread, false, pend_token); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_THREAD))) | DBG_FUNC_END, @@ -2541,16 +2563,16 @@ proc_thread_qos_add_override_internal(thread_t thread, /* since first_override_for_resource was TRUE */ override->override_contended_resource_count = 1; override->override_resource = resource; - override->override_resource_type = resource_type; + override->override_resource_type = (int16_t)resource_type; override->override_qos = THREAD_QOS_UNSPECIFIED; thread->overrides = override; } if (override) { if (override->override_qos == THREAD_QOS_UNSPECIFIED) { - override->override_qos = override_qos; + override->override_qos = (int16_t)override_qos; } else { - override->override_qos = MAX(override->override_qos, override_qos); + override->override_qos = MAX(override->override_qos, (int16_t)override_qos); } } @@ -2763,7 +2785,7 @@ task_set_main_thread_qos(task_t task, thread_t thread) thread_tid(thread), threquested_0(thread), threquested_1(thread), thread->requested_policy.thrp_qos, 0); - int primordial_qos = task_compute_main_thread_qos(task); + thread_qos_t primordial_qos = task_compute_main_thread_qos(task); proc_set_thread_policy_locked(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_AND_RELPRIO, primordial_qos, 0, &pend_token); @@ -2784,10 +2806,10 @@ task_set_main_thread_qos(task_t task, thread_t thread) * Return a good guess at what the initial manager QoS will be * Dispatch can override this in userspace if it so chooses */ -int +thread_qos_t task_get_default_manager_qos(task_t task) { - int primordial_qos = task_compute_main_thread_qos(task); + thread_qos_t primordial_qos = task_compute_main_thread_qos(task); if (primordial_qos == THREAD_QOS_LEGACY) { primordial_qos = THREAD_QOS_USER_INITIATED; @@ -2806,7 +2828,7 @@ boolean_t thread_recompute_kernel_promotion_locked(thread_t thread) { boolean_t needs_update = FALSE; - int kern_promotion_schedpri = thread_get_inheritor_turnstile_sched_priority(thread); + uint8_t kern_promotion_schedpri = (uint8_t)thread_get_inheritor_turnstile_sched_priority(thread); /* * For now just assert that kern_promotion_schedpri <= MAXPRI_PROMOTE. @@ -2848,7 +2870,7 @@ thread_recompute_user_promotion_locked(thread_t thread) { boolean_t needs_update = FALSE; struct task_pend_token pend_token = {}; - int user_promotion_basepri = MIN(thread_get_inheritor_turnstile_base_priority(thread), MAXPRI_USER); + uint8_t user_promotion_basepri = MIN((uint8_t)thread_get_inheritor_turnstile_base_priority(thread), MAXPRI_USER); int old_base_pri = thread->base_pri; thread_qos_t qos_promotion; @@ -2903,7 +2925,7 @@ thread_recompute_user_promotion_locked(thread_t thread) thread_qos_t thread_user_promotion_qos_for_pri(int priority) { - int qos; + thread_qos_t qos; for (qos = THREAD_QOS_USER_INTERACTIVE; qos > THREAD_QOS_MAINTENANCE; qos--) { if (thread_qos_policy_params.qos_pri[qos] <= priority) { return qos; @@ -3085,7 +3107,7 @@ thread_get_requested_qos(thread_t thread, int *relpri) int relprio_value = 0; thread_qos_t qos; - qos = proc_get_thread_policy_locked(thread, TASK_POLICY_ATTRIBUTE, + qos = (thread_qos_t)proc_get_thread_policy_locked(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_AND_RELPRIO, &relprio_value); if (relpri) { *relpri = -relprio_value; diff --git a/osfmk/kern/timer.c b/osfmk/kern/timer.c index 4be4464cb..695efa4c9 100644 --- a/osfmk/kern/timer.c +++ b/osfmk/kern/timer.c @@ -62,7 +62,9 @@ #include #include -#if CONFIG_EMBEDDED +#include + +#if CONFIG_SKIP_PRECISE_USER_KERNEL_TIME && !HAS_FAST_CNTVCT int precise_user_kernel_time = 0; #else int precise_user_kernel_time = 1; @@ -139,10 +141,10 @@ processor_timer_switch_thread(uint64_t tstamp, timer_t new_timer) timer_t timer; /* Update current timer. */ - timer = PROCESSOR_DATA(processor, thread_timer); + timer = processor->thread_timer; timer_advance(timer, tstamp - timer->tstamp); /* Start new timer. */ - PROCESSOR_DATA(processor, thread_timer) = new_timer; + processor->thread_timer = new_timer; new_timer->tstamp = tstamp; } diff --git a/osfmk/kern/timer_call.c b/osfmk/kern/timer_call.c index 06918edf5..4fa1d2412 100644 --- a/osfmk/kern/timer_call.c +++ b/osfmk/kern/timer_call.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include @@ -66,31 +65,16 @@ #define TIMER_KDEBUG_TRACE(x...) #endif - -lck_grp_t timer_call_lck_grp; -lck_attr_t timer_call_lck_attr; -lck_grp_attr_t timer_call_lck_grp_attr; - -lck_grp_t timer_longterm_lck_grp; -lck_attr_t timer_longterm_lck_attr; -lck_grp_attr_t timer_longterm_lck_grp_attr; +LCK_GRP_DECLARE(timer_call_lck_grp, "timer_call"); +LCK_GRP_DECLARE(timer_longterm_lck_grp, "timer_longterm"); /* Timer queue lock must be acquired with interrupts disabled (under splclock()) */ -#if __SMP__ #define timer_queue_lock_spin(queue) \ lck_mtx_lock_spin_always(&queue->lock_data) #define timer_queue_unlock(queue) \ lck_mtx_unlock_always(&queue->lock_data) -#else -#define timer_queue_lock_spin(queue) (void)1 -#define timer_queue_unlock(queue) (void)1 -#endif -#define QUEUE(x) ((queue_t)(x)) -#define MPQUEUE(x) ((mpqueue_head_t *)(x)) -#define TIMER_CALL(x) ((timer_call_t)(x)) -#define TCE(x) (&(x->call_entry)) /* * The longterm timer object is a global structure holding all timers * beyond the short-term, local timer queue threshold. The boot processor @@ -250,10 +234,6 @@ timer_call_init_abstime(void) void timer_call_init(void) { - lck_attr_setdefault(&timer_call_lck_attr); - lck_grp_attr_setdefault(&timer_call_lck_grp_attr); - lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr); - timer_longterm_init(); timer_call_init_abstime(); } @@ -263,7 +243,7 @@ void timer_call_queue_init(mpqueue_head_t *queue) { DBG("timer_call_queue_init(%p)\n", queue); - mpqueue_init(queue, &timer_call_lck_grp, &timer_call_lck_attr); + mpqueue_init(queue, &timer_call_lck_grp, LCK_ATTR_NULL); } @@ -274,123 +254,158 @@ timer_call_setup( timer_call_param_t param0) { DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0); - call_entry_setup(TCE(call), func, param0); - simple_lock_init(&(call)->lock, 0); - call->async_dequeue = FALSE; + + *call = (struct timer_call) { + .tc_func = func, + .tc_param0 = param0, + .tc_async_dequeue = false, + }; + + simple_lock_init(&(call)->tc_lock, 0); } -#if TIMER_ASSERT + +static mpqueue_head_t* +mpqueue_for_timer_call(timer_call_t entry) +{ + queue_t queue_entry_is_on = entry->tc_queue; + /* 'cast' the queue back to the orignal mpqueue */ + return __container_of(queue_entry_is_on, struct mpqueue_head, head); +} + + static __inline__ mpqueue_head_t * timer_call_entry_dequeue( timer_call_t entry) { - mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); + mpqueue_head_t *old_mpqueue = mpqueue_for_timer_call(entry); - if (!hw_lock_held((hw_lock_t)&entry->lock)) { + /* The entry was always on a queue */ + assert(old_mpqueue != NULL); + +#if TIMER_ASSERT + if (!hw_lock_held((hw_lock_t)&entry->tc_lock)) { panic("_call_entry_dequeue() " "entry %p is not locked\n", entry); } + /* * XXX The queue lock is actually a mutex in spin mode * but there's no way to test for it being held * so we pretend it's a spinlock! */ - if (!hw_lock_held((hw_lock_t)&old_queue->lock_data)) { + if (!hw_lock_held((hw_lock_t)&old_mpqueue->lock_data)) { panic("_call_entry_dequeue() " - "queue %p is not locked\n", old_queue); + "queue %p is not locked\n", old_mpqueue); + } +#endif /* TIMER_ASSERT */ + + if (old_mpqueue != timer_longterm_queue) { + priority_queue_remove(&old_mpqueue->mpq_pqhead, + &entry->tc_pqlink); } - call_entry_dequeue(TCE(entry)); - old_queue->count--; + remqueue(&entry->tc_qlink); - return old_queue; + entry->tc_queue = NULL; + + old_mpqueue->count--; + + return old_mpqueue; } static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( - timer_call_t entry, - mpqueue_head_t *queue, - uint64_t deadline) + timer_call_t entry, + mpqueue_head_t *new_mpqueue, + uint64_t deadline) { - mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); + mpqueue_head_t *old_mpqueue = mpqueue_for_timer_call(entry); - if (!hw_lock_held((hw_lock_t)&entry->lock)) { +#if TIMER_ASSERT + if (!hw_lock_held((hw_lock_t)&entry->tc_lock)) { panic("_call_entry_enqueue_deadline() " "entry %p is not locked\n", entry); } + /* XXX More lock pretense: */ - if (!hw_lock_held((hw_lock_t)&queue->lock_data)) { + if (!hw_lock_held((hw_lock_t)&new_mpqueue->lock_data)) { panic("_call_entry_enqueue_deadline() " - "queue %p is not locked\n", queue); + "queue %p is not locked\n", new_mpqueue); } - if (old_queue != NULL && old_queue != queue) { + + if (old_mpqueue != NULL && old_mpqueue != new_mpqueue) { panic("_call_entry_enqueue_deadline() " - "old_queue %p != queue", old_queue); + "old_mpqueue %p != new_mpqueue", old_mpqueue); } +#endif /* TIMER_ASSERT */ - call_entry_enqueue_deadline(TCE(entry), QUEUE(queue), deadline); - -/* For efficiency, track the earliest soft deadline on the queue, so that - * fuzzy decisions can be made without lock acquisitions. - */ - timer_call_t thead = (timer_call_t)queue_first(&queue->head); - - queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline; + /* no longterm queue involved */ + assert(new_mpqueue != timer_longterm_queue); + assert(old_mpqueue != timer_longterm_queue); - if (old_queue) { - old_queue->count--; - } - queue->count++; + if (old_mpqueue == new_mpqueue) { + /* optimize the same-queue case to avoid a full re-insert */ + uint64_t old_deadline = entry->tc_pqlink.deadline; + entry->tc_pqlink.deadline = deadline; - return old_queue; -} - -#else - -static __inline__ mpqueue_head_t * -timer_call_entry_dequeue( - timer_call_t entry) -{ - mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); + if (old_deadline < deadline) { + priority_queue_entry_increased(&new_mpqueue->mpq_pqhead, + &entry->tc_pqlink); + } else { + priority_queue_entry_decreased(&new_mpqueue->mpq_pqhead, + &entry->tc_pqlink); + } + } else { + if (old_mpqueue != NULL) { + priority_queue_remove(&old_mpqueue->mpq_pqhead, + &entry->tc_pqlink); - call_entry_dequeue(TCE(entry)); - old_queue->count--; + re_queue_tail(&new_mpqueue->head, &entry->tc_qlink); + } else { + enqueue_tail(&new_mpqueue->head, &entry->tc_qlink); + } - return old_queue; -} + entry->tc_queue = &new_mpqueue->head; + entry->tc_pqlink.deadline = deadline; -static __inline__ mpqueue_head_t * -timer_call_entry_enqueue_deadline( - timer_call_t entry, - mpqueue_head_t *queue, - uint64_t deadline) -{ - mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); + priority_queue_insert(&new_mpqueue->mpq_pqhead, &entry->tc_pqlink); + } - call_entry_enqueue_deadline(TCE(entry), QUEUE(queue), deadline); /* For efficiency, track the earliest soft deadline on the queue, * so that fuzzy decisions can be made without lock acquisitions. */ - timer_call_t thead = (timer_call_t)queue_first(&queue->head); - queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline; + timer_call_t thead = priority_queue_min(&new_mpqueue->mpq_pqhead, struct timer_call, tc_pqlink); + + new_mpqueue->earliest_soft_deadline = thead->tc_flags & TIMER_CALL_RATELIMITED ? thead->tc_pqlink.deadline : thead->tc_soft_deadline; - if (old_queue) { - old_queue->count--; + if (old_mpqueue) { + old_mpqueue->count--; } - queue->count++; + new_mpqueue->count++; - return old_queue; + return old_mpqueue; } -#endif - static __inline__ void timer_call_entry_enqueue_tail( timer_call_t entry, mpqueue_head_t *queue) { - call_entry_enqueue_tail(TCE(entry), QUEUE(queue)); + /* entry is always dequeued before this call */ + assert(entry->tc_queue == NULL); + + /* + * this is only used for timer_longterm_queue, which is unordered + * and thus needs no priority queueing + */ + assert(queue == timer_longterm_queue); + + enqueue_tail(&queue->head, &entry->tc_qlink); + + entry->tc_queue = &queue->head; + queue->count++; return; } @@ -403,11 +418,17 @@ static __inline__ void timer_call_entry_dequeue_async( timer_call_t entry) { - mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); - if (old_queue) { - old_queue->count--; - (void) remque(qe(entry)); - entry->async_dequeue = TRUE; + mpqueue_head_t *old_mpqueue = mpqueue_for_timer_call(entry); + if (old_mpqueue) { + old_mpqueue->count--; + + if (old_mpqueue != timer_longterm_queue) { + priority_queue_remove(&old_mpqueue->mpq_pqhead, + &entry->tc_pqlink); + } + + remqueue(&entry->tc_qlink); + entry->tc_async_dequeue = true; } return; } @@ -429,30 +450,27 @@ timer_call_enqueue_deadline_unlocked( timer_call_param_t param1, uint32_t callout_flags) { - call_entry_t entry = TCE(call); - mpqueue_head_t *old_queue; - DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue); - simple_lock(&call->lock, LCK_GRP_NULL); + simple_lock(&call->tc_lock, LCK_GRP_NULL); - old_queue = MPQUEUE(entry->queue); + mpqueue_head_t *old_queue = mpqueue_for_timer_call(call); if (old_queue != NULL) { timer_queue_lock_spin(old_queue); - if (call->async_dequeue) { + if (call->tc_async_dequeue) { /* collision (1c): timer already dequeued, clear flag */ #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), - call->async_dequeue, - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), + call->tc_async_dequeue, + VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue), 0x1c, 0); timer_call_enqueue_deadline_unlocked_async1++; #endif - call->async_dequeue = FALSE; - entry->queue = NULL; + call->tc_async_dequeue = false; + call->tc_queue = NULL; } else if (old_queue != queue) { timer_call_entry_dequeue(call); #if TIMER_ASSERT @@ -470,14 +488,14 @@ timer_call_enqueue_deadline_unlocked( timer_queue_lock_spin(queue); } - call->soft_deadline = soft_deadline; - call->flags = callout_flags; - TCE(call)->param1 = param1; - call->ttd = ttd; + call->tc_soft_deadline = soft_deadline; + call->tc_flags = callout_flags; + call->tc_param1 = param1; + call->tc_ttd = ttd; timer_call_entry_enqueue_deadline(call, queue, deadline); timer_queue_unlock(queue); - simple_unlock(&call->lock); + simple_unlock(&call->tc_lock); return old_queue; } @@ -490,36 +508,35 @@ mpqueue_head_t * timer_call_dequeue_unlocked( timer_call_t call) { - call_entry_t entry = TCE(call); - mpqueue_head_t *old_queue; - DBG("timer_call_dequeue_unlocked(%p)\n", call); - simple_lock(&call->lock, LCK_GRP_NULL); - old_queue = MPQUEUE(entry->queue); + simple_lock(&call->tc_lock, LCK_GRP_NULL); + + mpqueue_head_t *old_queue = mpqueue_for_timer_call(call); + #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), - call->async_dequeue, - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), + call->tc_async_dequeue, + VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue), 0, 0); #endif if (old_queue != NULL) { timer_queue_lock_spin(old_queue); - if (call->async_dequeue) { + if (call->tc_async_dequeue) { /* collision (1c): timer already dequeued, clear flag */ #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), - call->async_dequeue, - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), + call->tc_async_dequeue, + VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue), 0x1c, 0); timer_call_dequeue_unlocked_async1++; #endif - call->async_dequeue = FALSE; - entry->queue = NULL; + call->tc_async_dequeue = false; + call->tc_queue = NULL; } else { timer_call_entry_dequeue(call); } @@ -528,12 +545,12 @@ timer_call_dequeue_unlocked( } timer_queue_unlock(old_queue); } - simple_unlock(&call->lock); + simple_unlock(&call->tc_lock); return old_queue; } -static uint64_t -past_deadline_timer_handle(uint64_t deadline, uint64_t ctime) +uint64_t +timer_call_past_deadline_timer_handle(uint64_t deadline, uint64_t ctime) { uint64_t delta = (ctime - deadline); @@ -583,12 +600,6 @@ past_deadline_timer_handle(uint64_t deadline, uint64_t ctime) */ /* - * Inlines timer_call_entry_dequeue() and timer_call_entry_enqueue_deadline() - * cast between pointer types (mpqueue_head_t *) and (queue_t) so that - * we can use the call_entry_dequeue() and call_entry_enqueue_deadline() - * methods to operate on timer_call structs as if they are call_entry structs. - * These structures are identical except for their queue head pointer fields. - * * In the debug case, we assert that the timer call locking protocol * is being obeyed. */ @@ -609,7 +620,7 @@ timer_call_enter_internal( uint32_t urgency; uint64_t sdeadline, ttd; - assert(call->call_entry.func != NULL); + assert(call->tc_func != NULL); s = splclock(); sdeadline = deadline; @@ -636,7 +647,7 @@ timer_call_enter_internal( } if (__improbable(deadline < ctime)) { - deadline = past_deadline_timer_handle(deadline, ctime); + deadline = timer_call_past_deadline_timer_handle(deadline, ctime); sdeadline = deadline; } @@ -648,8 +659,8 @@ timer_call_enter_internal( ttd = sdeadline - ctime; #if CONFIG_DTRACE - DTRACE_TMR7(callout__create, timer_call_func_t, TCE(call)->func, - timer_call_param_t, TCE(call)->param0, uint32_t, flags, + DTRACE_TMR7(callout__create, timer_call_func_t, call->tc_func, + timer_call_param_t, call->tc_param0, uint32_t, flags, (deadline - sdeadline), (ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call); #endif @@ -668,7 +679,7 @@ timer_call_enter_internal( } #if TIMER_TRACE - TCE(call)->entry_time = ctime; + call->tc_entry_time = ctime; #endif TIMER_KDEBUG_TRACE(KDEBUG_TRACE, @@ -716,81 +727,6 @@ timer_call_enter_with_leeway( return timer_call_enter_internal(call, param1, deadline, leeway, flags, ratelimited); } -boolean_t -timer_call_quantum_timer_enter( - timer_call_t call, - timer_call_param_t param1, - uint64_t deadline, - uint64_t ctime) -{ - assert(call->call_entry.func != NULL); - assert(ml_get_interrupts_enabled() == FALSE); - - uint32_t flags = TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL; - - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(call), - VM_KERNEL_ADDRHIDE(param1), deadline, - flags, 0); - - if (__improbable(deadline < ctime)) { - deadline = past_deadline_timer_handle(deadline, ctime); - } - - uint64_t ttd = deadline - ctime; -#if CONFIG_DTRACE - DTRACE_TMR7(callout__create, timer_call_func_t, TCE(call)->func, - timer_call_param_t, TCE(call)->param0, uint32_t, flags, 0, - (ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call); -#endif - - quantum_timer_set_deadline(deadline); - TCE(call)->deadline = deadline; - TCE(call)->param1 = param1; - call->ttd = ttd; - call->flags = flags; - -#if TIMER_TRACE - TCE(call)->entry_time = ctime; -#endif - - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(call), - 1, deadline, 0, 0); - - return true; -} - - -boolean_t -timer_call_quantum_timer_cancel( - timer_call_t call) -{ - assert(ml_get_interrupts_enabled() == FALSE); - - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CANCEL | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(call), TCE(call)->deadline, - 0, call->flags, 0); - - TCE(call)->deadline = 0; - quantum_timer_set_deadline(0); - - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CANCEL | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(call), 0, - TCE(call)->deadline - mach_absolute_time(), - TCE(call)->deadline - TCE(call)->entry_time, 0); - -#if CONFIG_DTRACE - DTRACE_TMR6(callout__cancel, timer_call_func_t, TCE(call)->func, - timer_call_param_t, TCE(call)->param0, uint32_t, call->flags, 0, - (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF)); -#endif - - return true; -} - boolean_t timer_call_cancel( timer_call_t call) @@ -803,35 +739,38 @@ timer_call_cancel( TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CANCEL | DBG_FUNC_START, VM_KERNEL_UNSLIDE_OR_PERM(call), - TCE(call)->deadline, call->soft_deadline, call->flags, 0); + call->tc_pqlink.deadline, call->tc_soft_deadline, call->tc_flags, 0); old_queue = timer_call_dequeue_unlocked(call); if (old_queue != NULL) { timer_queue_lock_spin(old_queue); - if (!queue_empty(&old_queue->head)) { - timer_queue_cancel(old_queue, TCE(call)->deadline, CE(queue_first(&old_queue->head))->deadline); - timer_call_t thead = (timer_call_t)queue_first(&old_queue->head); - old_queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline; + + timer_call_t new_head = priority_queue_min(&old_queue->mpq_pqhead, struct timer_call, tc_pqlink); + + if (new_head) { + timer_queue_cancel(old_queue, call->tc_pqlink.deadline, new_head->tc_pqlink.deadline); + old_queue->earliest_soft_deadline = new_head->tc_flags & TIMER_CALL_RATELIMITED ? new_head->tc_pqlink.deadline : new_head->tc_soft_deadline; } else { - timer_queue_cancel(old_queue, TCE(call)->deadline, UINT64_MAX); + timer_queue_cancel(old_queue, call->tc_pqlink.deadline, UINT64_MAX); old_queue->earliest_soft_deadline = UINT64_MAX; } + timer_queue_unlock(old_queue); } TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CANCEL | DBG_FUNC_END, VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE_OR_PERM(old_queue), - TCE(call)->deadline - mach_absolute_time(), - TCE(call)->deadline - TCE(call)->entry_time, 0); + call->tc_pqlink.deadline - mach_absolute_time(), + call->tc_pqlink.deadline - call->tc_entry_time, 0); splx(s); #if CONFIG_DTRACE - DTRACE_TMR6(callout__cancel, timer_call_func_t, TCE(call)->func, - timer_call_param_t, TCE(call)->param0, uint32_t, call->flags, 0, - (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF)); -#endif + DTRACE_TMR6(callout__cancel, timer_call_func_t, call->tc_func, + timer_call_param_t, call->tc_param0, uint32_t, call->tc_flags, 0, + (call->tc_ttd >> 32), (unsigned) (call->tc_ttd & 0xFFFFFFFF)); +#endif /* CONFIG_DTRACE */ return old_queue != NULL; } @@ -852,11 +791,16 @@ timer_queue_shutdown( s = splclock(); - /* Note comma operator in while expression re-locking each iteration */ - while ((void)timer_queue_lock_spin(queue), !queue_empty(&queue->head)) { - call = TIMER_CALL(queue_first(&queue->head)); + while (TRUE) { + timer_queue_lock_spin(queue); + + call = qe_queue_first(&queue->head, struct timer_call, tc_qlink); + + if (call == NULL) { + break; + } - if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) { + if (!simple_lock_try(&call->tc_lock, LCK_GRP_NULL)) { /* * case (2b) lock order inversion, dequeue and skip * Don't change the call_entry queue back-pointer @@ -868,15 +812,15 @@ timer_queue_shutdown( TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), - call->async_dequeue, - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), + call->tc_async_dequeue, + VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue), 0x2b, 0); #endif timer_queue_unlock(queue); continue; } - boolean_t call_local = ((call->flags & TIMER_CALL_LOCAL) != 0); + boolean_t call_local = ((call->tc_flags & TIMER_CALL_LOCAL) != 0); /* remove entry from old queue */ timer_call_entry_dequeue(call); @@ -884,17 +828,17 @@ timer_queue_shutdown( if (call_local == FALSE) { /* and queue it on new, discarding LOCAL timers */ - new_queue = timer_queue_assign(TCE(call)->deadline); + new_queue = timer_queue_assign(call->tc_pqlink.deadline); timer_queue_lock_spin(new_queue); timer_call_entry_enqueue_deadline( - call, new_queue, TCE(call)->deadline); + call, new_queue, call->tc_pqlink.deadline); timer_queue_unlock(new_queue); } else { timer_queue_shutdown_discarded++; } assert(call_local == FALSE); - simple_unlock(&call->lock); + simple_unlock(&call->tc_lock); } timer_queue_unlock(queue); @@ -902,51 +846,6 @@ timer_queue_shutdown( } -void -quantum_timer_expire( - uint64_t deadline) -{ - processor_t processor = current_processor(); - timer_call_t call = TIMER_CALL(&(processor->quantum_timer)); - - if (__improbable(TCE(call)->deadline > deadline)) { - panic("CPU quantum timer deadlin out of sync with timer call deadline"); - } - - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_EXPIRE | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(call), - TCE(call)->deadline, - TCE(call)->deadline, - TCE(call)->entry_time, 0); - - timer_call_func_t func = TCE(call)->func; - timer_call_param_t param0 = TCE(call)->param0; - timer_call_param_t param1 = TCE(call)->param1; - - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CALLOUT | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), - VM_KERNEL_ADDRHIDE(param0), - VM_KERNEL_ADDRHIDE(param1), - 0); - -#if CONFIG_DTRACE - DTRACE_TMR7(callout__start, timer_call_func_t, func, - timer_call_param_t, param0, unsigned, call->flags, - 0, (call->ttd >> 32), - (unsigned) (call->ttd & 0xFFFFFFFF), call); -#endif - (*func)(param0, param1); - - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CALLOUT | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), - VM_KERNEL_ADDRHIDE(param0), - VM_KERNEL_ADDRHIDE(param1), - 0); -} - static uint32_t timer_queue_expire_lock_skips; uint64_t timer_queue_expire_with_options( @@ -958,6 +857,14 @@ timer_queue_expire_with_options( uint32_t tc_iterations = 0; DBG("timer_queue_expire(%p,)\n", queue); + /* 'rescan' means look at every timer in the list, instead of + * early-exiting when the head of the list expires in the future. + * when 'rescan' is true, iterate by linked list instead of priority queue. + * + * TODO: if we keep a deadline ordered and soft-deadline ordered + * priority queue, then it's no longer necessary to do that + */ + uint64_t cur_deadline = deadline; timer_queue_lock_spin(queue); @@ -970,29 +877,33 @@ timer_queue_expire_with_options( } if (call == NULL) { - call = TIMER_CALL(queue_first(&queue->head)); + if (rescan == FALSE) { + call = priority_queue_min(&queue->mpq_pqhead, struct timer_call, tc_pqlink); + } else { + call = qe_queue_first(&queue->head, struct timer_call, tc_qlink); + } } - if (call->soft_deadline <= cur_deadline) { + if (call->tc_soft_deadline <= cur_deadline) { timer_call_func_t func; timer_call_param_t param0, param1; - TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->soft_deadline, 0, 0, 0); + TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->tc_soft_deadline, 0, 0, 0); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_EXPIRE | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), - call->soft_deadline, - TCE(call)->deadline, - TCE(call)->entry_time, 0); + call->tc_soft_deadline, + call->tc_pqlink.deadline, + call->tc_entry_time, 0); - if ((call->flags & TIMER_CALL_RATELIMITED) && - (TCE(call)->deadline > cur_deadline)) { + if ((call->tc_flags & TIMER_CALL_RATELIMITED) && + (call->tc_pqlink.deadline > cur_deadline)) { if (rescan == FALSE) { break; } } - if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) { + if (!simple_lock_try(&call->tc_lock, LCK_GRP_NULL)) { /* case (2b) lock inversion, dequeue and skip */ timer_queue_expire_lock_skips++; timer_call_entry_dequeue_async(call); @@ -1002,11 +913,11 @@ timer_queue_expire_with_options( timer_call_entry_dequeue(call); - func = TCE(call)->func; - param0 = TCE(call)->param0; - param1 = TCE(call)->param1; + func = call->tc_func; + param0 = call->tc_param0; + param1 = call->tc_param1; - simple_unlock(&call->lock); + simple_unlock(&call->tc_lock); timer_queue_unlock(queue); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, @@ -1018,15 +929,15 @@ timer_queue_expire_with_options( #if CONFIG_DTRACE DTRACE_TMR7(callout__start, timer_call_func_t, func, - timer_call_param_t, param0, unsigned, call->flags, - 0, (call->ttd >> 32), - (unsigned) (call->ttd & 0xFFFFFFFF), call); + timer_call_param_t, param0, unsigned, call->tc_flags, + 0, (call->tc_ttd >> 32), + (unsigned) (call->tc_ttd & 0xFFFFFFFF), call); #endif /* Maintain time-to-deadline in per-processor data * structure for thread wakeup deadline statistics. */ - uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd)); - *ttdp = call->ttd; + uint64_t *ttdp = ¤t_processor()->timer_call_ttd; + *ttdp = call->tc_ttd; (*func)(param0, param1); *ttdp = 0; #if CONFIG_DTRACE @@ -1046,8 +957,8 @@ timer_queue_expire_with_options( if (__probable(rescan == FALSE)) { break; } else { - int64_t skew = TCE(call)->deadline - call->soft_deadline; - assert(TCE(call)->deadline >= call->soft_deadline); + int64_t skew = call->tc_pqlink.deadline - call->tc_soft_deadline; + assert(call->tc_pqlink.deadline >= call->tc_soft_deadline); /* DRK: On a latency quality-of-service level change, * re-sort potentially rate-limited timers. The platform @@ -1060,16 +971,18 @@ timer_queue_expire_with_options( */ if (timer_resort_threshold(skew)) { - if (__probable(simple_lock_try(&call->lock, LCK_GRP_NULL))) { + if (__probable(simple_lock_try(&call->tc_lock, LCK_GRP_NULL))) { + /* TODO: don't need to dequeue before enqueue */ timer_call_entry_dequeue(call); - timer_call_entry_enqueue_deadline(call, queue, call->soft_deadline); - simple_unlock(&call->lock); + timer_call_entry_enqueue_deadline(call, queue, call->tc_soft_deadline); + simple_unlock(&call->tc_lock); call = NULL; } } if (call) { - call = TIMER_CALL(queue_next(qe(call))); - if (queue_end(&queue->head, qe(call))) { + call = qe_queue_next(&queue->head, call, struct timer_call, tc_qlink); + + if (call == NULL) { break; } } @@ -1077,10 +990,11 @@ timer_queue_expire_with_options( } } - if (!queue_empty(&queue->head)) { - call = TIMER_CALL(queue_first(&queue->head)); - cur_deadline = TCE(call)->deadline; - queue->earliest_soft_deadline = (call->flags & TIMER_CALL_RATELIMITED) ? TCE(call)->deadline: call->soft_deadline; + call = priority_queue_min(&queue->mpq_pqhead, struct timer_call, tc_pqlink); + + if (call) { + cur_deadline = call->tc_pqlink.deadline; + queue->earliest_soft_deadline = (call->tc_flags & TIMER_CALL_RATELIMITED) ? call->tc_pqlink.deadline: call->tc_soft_deadline; } else { queue->earliest_soft_deadline = cur_deadline = UINT64_MAX; } @@ -1141,45 +1055,45 @@ timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to) timer_queue_lock_spin(queue_to); - head_to = TIMER_CALL(queue_first(&queue_to->head)); - if (queue_empty(&queue_to->head)) { + head_to = priority_queue_min(&queue_to->mpq_pqhead, struct timer_call, tc_pqlink); + + if (head_to == NULL) { timers_migrated = -1; goto abort1; } timer_queue_lock_spin(queue_from); - if (queue_empty(&queue_from->head)) { + call = priority_queue_min(&queue_from->mpq_pqhead, struct timer_call, tc_pqlink); + + if (call == NULL) { timers_migrated = -2; goto abort2; } - call = TIMER_CALL(queue_first(&queue_from->head)); - if (TCE(call)->deadline < TCE(head_to)->deadline) { + if (call->tc_pqlink.deadline < head_to->tc_pqlink.deadline) { timers_migrated = 0; goto abort2; } /* perform scan for non-migratable timers */ - do { - if (call->flags & TIMER_CALL_LOCAL) { + qe_foreach_element(call, &queue_from->head, tc_qlink) { + if (call->tc_flags & TIMER_CALL_LOCAL) { timers_migrated = -3; goto abort2; } - call = TIMER_CALL(queue_next(qe(call))); - } while (!queue_end(&queue_from->head, qe(call))); + } /* migration loop itself -- both queues are locked */ - while (!queue_empty(&queue_from->head)) { - call = TIMER_CALL(queue_first(&queue_from->head)); - if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) { + qe_foreach_element_safe(call, &queue_from->head, tc_qlink) { + if (!simple_lock_try(&call->tc_lock, LCK_GRP_NULL)) { /* case (2b) lock order inversion, dequeue only */ #ifdef TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), - VM_KERNEL_UNSLIDE_OR_PERM(call->lock.interlock.lock_data), + VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue), + 0, 0x2b, 0); #endif timer_queue_migrate_lock_skips++; @@ -1188,9 +1102,9 @@ timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to) } timer_call_entry_dequeue(call); timer_call_entry_enqueue_deadline( - call, queue_to, TCE(call)->deadline); + call, queue_to, call->tc_pqlink.deadline); timers_migrated++; - simple_unlock(&call->lock); + simple_unlock(&call->tc_lock); } queue_from->earliest_soft_deadline = UINT64_MAX; abort2: @@ -1228,18 +1142,14 @@ timer_queue_trace( DECR_TIMER_QUEUE | DBG_FUNC_START, queue->count, mach_absolute_time(), 0, 0, 0); - if (!queue_empty(&queue->head)) { - call = TIMER_CALL(queue_first(&queue->head)); - do { - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_QUEUE | DBG_FUNC_NONE, - call->soft_deadline, - TCE(call)->deadline, - TCE(call)->entry_time, - VM_KERNEL_UNSLIDE(TCE(call)->func), - 0); - call = TIMER_CALL(queue_next(qe(call))); - } while (!queue_end(&queue->head, qe(call))); + qe_foreach_element(call, &queue->head, tc_qlink) { + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_QUEUE | DBG_FUNC_NONE, + call->tc_soft_deadline, + call->tc_pqlink.deadline, + call->tc_entry_time, + VM_KERNEL_UNSLIDE(call->tc_func), + 0); } TIMER_KDEBUG_TRACE(KDEBUG_TRACE, @@ -1303,13 +1213,13 @@ timer_longterm_enqueue_unlocked(timer_call_t call, * whether an update is necessary. */ assert(!ml_get_interrupts_enabled()); - simple_lock(&call->lock, LCK_GRP_NULL); + simple_lock(&call->tc_lock, LCK_GRP_NULL); timer_queue_lock_spin(timer_longterm_queue); - TCE(call)->deadline = deadline; - TCE(call)->param1 = param1; - call->ttd = ttd; - call->soft_deadline = soft_deadline; - call->flags = callout_flags; + call->tc_pqlink.deadline = deadline; + call->tc_param1 = param1; + call->tc_ttd = ttd; + call->tc_soft_deadline = soft_deadline; + call->tc_flags = callout_flags; timer_call_entry_enqueue_tail(call, timer_longterm_queue); tlp->enqueues++; @@ -1325,7 +1235,7 @@ timer_longterm_enqueue_unlocked(timer_call_t call, update_required = TRUE; } timer_queue_unlock(timer_longterm_queue); - simple_unlock(&call->lock); + simple_unlock(&call->tc_lock); if (update_required) { /* @@ -1360,7 +1270,6 @@ void timer_longterm_scan(timer_longterm_t *tlp, uint64_t time_start) { - queue_entry_t qe; timer_call_t call; uint64_t threshold; uint64_t deadline; @@ -1384,19 +1293,16 @@ timer_longterm_scan(timer_longterm_t *tlp, timer_master_queue = timer_queue_cpu(master_cpu); timer_queue_lock_spin(timer_master_queue); - qe = queue_first(&timer_longterm_queue->head); - while (!queue_end(&timer_longterm_queue->head, qe)) { - call = TIMER_CALL(qe); - deadline = call->soft_deadline; - qe = queue_next(qe); - if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) { + qe_foreach_element_safe(call, &timer_longterm_queue->head, tc_qlink) { + deadline = call->tc_soft_deadline; + if (!simple_lock_try(&call->tc_lock, LCK_GRP_NULL)) { /* case (2c) lock order inversion, dequeue only */ #ifdef TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), - VM_KERNEL_UNSLIDE_OR_PERM(call->lock.interlock.lock_data), + VM_KERNEL_UNSLIDE_OR_PERM(call->tc_queue), + 0, 0x2c, 0); #endif timer_call_entry_dequeue_async(call); @@ -1421,14 +1327,14 @@ timer_longterm_scan(timer_longterm_t *tlp, TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ESCALATE | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), - TCE(call)->deadline, - TCE(call)->entry_time, - VM_KERNEL_UNSLIDE(TCE(call)->func), + call->tc_pqlink.deadline, + call->tc_entry_time, + VM_KERNEL_UNSLIDE(call->tc_func), 0); tlp->escalates++; timer_call_entry_dequeue(call); timer_call_entry_enqueue_deadline( - call, timer_master_queue, TCE(call)->deadline); + call, timer_master_queue, call->tc_pqlink.deadline); /* * A side-effect of the following call is to update * the actual hardware deadline if required. @@ -1440,7 +1346,7 @@ timer_longterm_scan(timer_longterm_t *tlp, tlp->threshold.call = call; } } - simple_unlock(&call->lock); + simple_unlock(&call->tc_lock); /* Abort scan if we're taking too long. */ if (mach_absolute_time() > time_limit) { @@ -1590,12 +1496,7 @@ timer_longterm_init(void) tlp->threshold.preempted = TIMER_LONGTERM_NONE; tlp->threshold.deadline = TIMER_LONGTERM_NONE; - lck_attr_setdefault(&timer_longterm_lck_attr); - lck_grp_attr_setdefault(&timer_longterm_lck_grp_attr); - lck_grp_init(&timer_longterm_lck_grp, - "timer_longterm", &timer_longterm_lck_grp_attr); - mpqueue_init(&tlp->queue, - &timer_longterm_lck_grp, &timer_longterm_lck_attr); + mpqueue_init(&tlp->queue, &timer_longterm_lck_grp, LCK_ATTR_NULL); timer_call_setup(&tlp->threshold.timer, timer_longterm_callout, (timer_call_param_t) tlp); @@ -1654,7 +1555,6 @@ static void timer_master_scan(timer_longterm_t *tlp, uint64_t now) { - queue_entry_t qe; timer_call_t call; uint64_t threshold; uint64_t deadline; @@ -1669,15 +1569,12 @@ timer_master_scan(timer_longterm_t *tlp, timer_master_queue = timer_queue_cpu(master_cpu); timer_queue_lock_spin(timer_master_queue); - qe = queue_first(&timer_master_queue->head); - while (!queue_end(&timer_master_queue->head, qe)) { - call = TIMER_CALL(qe); - deadline = TCE(call)->deadline; - qe = queue_next(qe); - if ((call->flags & TIMER_CALL_LOCAL) != 0) { + qe_foreach_element_safe(call, &timer_master_queue->head, tc_qlink) { + deadline = call->tc_pqlink.deadline; + if ((call->tc_flags & TIMER_CALL_LOCAL) != 0) { continue; } - if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) { + if (!simple_lock_try(&call->tc_lock, LCK_GRP_NULL)) { /* case (2c) lock order inversion, dequeue only */ timer_call_entry_dequeue_async(call); continue; @@ -1691,7 +1588,7 @@ timer_master_scan(timer_longterm_t *tlp, tlp->threshold.call = call; } } - simple_unlock(&call->lock); + simple_unlock(&call->tc_lock); } timer_queue_unlock(timer_master_queue); } @@ -1925,3 +1822,228 @@ timer_set_user_idle_level(int ilevel) return KERN_SUCCESS; } + +#pragma mark - running timers + +#define RUNNING_TIMER_FAKE_FLAGS (TIMER_CALL_SYS_CRITICAL | \ + TIMER_CALL_LOCAL) + +/* + * timer_call_trace_* functions mimic the tracing behavior from the normal + * timer_call subsystem, so tools continue to function. + */ + +static void +timer_call_trace_enter_before(struct timer_call *call, uint64_t deadline, + uint32_t flags, uint64_t now) +{ +#pragma unused(call, deadline, flags, now) + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_START, + VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_ADDRHIDE(call->tc_param1), + deadline, flags, 0); +#if CONFIG_DTRACE + uint64_t ttd = deadline - now; + DTRACE_TMR7(callout__create, timer_call_func_t, call->tc_func, + timer_call_param_t, call->tc_param0, uint32_t, flags, 0, + (ttd >> 32), (unsigned int)(ttd & 0xFFFFFFFF), NULL); +#endif /* CONFIG_DTRACE */ + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_END, + VM_KERNEL_UNSLIDE_OR_PERM(call), 0, deadline, 0, 0); +} + +static void +timer_call_trace_enter_after(struct timer_call *call, uint64_t deadline) +{ +#pragma unused(call, deadline) + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_END, + VM_KERNEL_UNSLIDE_OR_PERM(call), 0, deadline, 0, 0); +} + +static void +timer_call_trace_cancel(struct timer_call *call) +{ +#pragma unused(call) + __unused uint64_t deadline = call->tc_pqlink.deadline; + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CANCEL | DBG_FUNC_START, + VM_KERNEL_UNSLIDE_OR_PERM(call), deadline, 0, + call->tc_flags, 0); + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CANCEL | DBG_FUNC_END, + VM_KERNEL_UNSLIDE_OR_PERM(call), 0, deadline - mach_absolute_time(), + deadline - call->tc_entry_time, 0); +#if CONFIG_DTRACE +#if TIMER_TRACE + uint64_t ttd = deadline - call->tc_entry_time; +#else + uint64_t ttd = UINT64_MAX; +#endif /* TIMER_TRACE */ + DTRACE_TMR6(callout__cancel, timer_call_func_t, call->tc_func, + timer_call_param_t, call->tc_param0, uint32_t, call->tc_flags, 0, + (ttd >> 32), (unsigned int)(ttd & 0xFFFFFFFF)); +#endif /* CONFIG_DTRACE */ +} + +static void +timer_call_trace_expire_entry(struct timer_call *call) +{ +#pragma unused(call) + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CALLOUT | DBG_FUNC_START, + VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(call->tc_func), + VM_KERNEL_ADDRHIDE(call->tc_param0), + VM_KERNEL_ADDRHIDE(call->tc_param1), + 0); +#if CONFIG_DTRACE +#if TIMER_TRACE + uint64_t ttd = call->tc_pqlink.deadline - call->tc_entry_time; +#else /* TIMER_TRACE */ + uint64_t ttd = UINT64_MAX; +#endif /* TIMER_TRACE */ + DTRACE_TMR7(callout__start, timer_call_func_t, call->tc_func, + timer_call_param_t, call->tc_param0, unsigned, call->tc_flags, + 0, (ttd >> 32), (unsigned int)(ttd & 0xFFFFFFFF), NULL); +#endif /* CONFIG_DTRACE */ +} + +static void +timer_call_trace_expire_return(struct timer_call *call) +{ +#pragma unused(call) +#if CONFIG_DTRACE + DTRACE_TMR4(callout__end, timer_call_func_t, call->tc_func, + call->tc_param0, call->tc_param1, NULL); +#endif /* CONFIG_DTRACE */ + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_CALLOUT | DBG_FUNC_END, + VM_KERNEL_UNSLIDE_OR_PERM(call), + VM_KERNEL_UNSLIDE(call->tc_func), + VM_KERNEL_ADDRHIDE(call->tc_param0), + VM_KERNEL_ADDRHIDE(call->tc_param1), + 0); +} + +/* + * Set a new deadline for a running timer on this processor. + */ +void +running_timer_setup(processor_t processor, enum running_timer timer, + void *param, uint64_t deadline, uint64_t now) +{ + assert(timer < RUNNING_TIMER_MAX); + assert(ml_get_interrupts_enabled() == FALSE); + + struct timer_call *call = &processor->running_timers[timer]; + + timer_call_trace_enter_before(call, deadline, RUNNING_TIMER_FAKE_FLAGS, + now); + + if (__improbable(deadline < now)) { + deadline = timer_call_past_deadline_timer_handle(deadline, now); + } + + call->tc_pqlink.deadline = deadline; +#if TIMER_TRACE + call->tc_entry_time = now; +#endif /* TIMER_TRACE */ + call->tc_param1 = param; + + timer_call_trace_enter_after(call, deadline); +} + +void +running_timers_sync(void) +{ + timer_resync_deadlines(); +} + +void +running_timer_enter(processor_t processor, unsigned int timer, + void *param, uint64_t deadline, uint64_t now) +{ + running_timer_setup(processor, timer, param, deadline, now); + running_timers_sync(); +} + +/* + * Call the callback for any running timers that fired for this processor. + * Returns true if any timers were past their deadline. + */ +bool +running_timers_expire(processor_t processor, uint64_t now) +{ + bool expired = false; + + if (!processor->running_timers_active) { + return expired; + } + + for (int i = 0; i < RUNNING_TIMER_MAX; i++) { + struct timer_call *call = &processor->running_timers[i]; + + uint64_t deadline = call->tc_pqlink.deadline; + if (deadline > now) { + continue; + } + + expired = true; + timer_call_trace_expire_entry(call); + call->tc_func(call->tc_param0, call->tc_param1); + timer_call_trace_expire_return(call); + } + + return expired; +} + +void +running_timer_clear(processor_t processor, enum running_timer timer) +{ + struct timer_call *call = &processor->running_timers[timer]; + uint64_t deadline = call->tc_pqlink.deadline; + if (deadline == EndOfAllTime) { + return; + } + + call->tc_pqlink.deadline = EndOfAllTime; +#if TIMER_TRACE + call->tc_entry_time = 0; +#endif /* TIMER_TRACE */ + timer_call_trace_cancel(call); +} + +void +running_timer_cancel(processor_t processor, unsigned int timer) +{ + running_timer_clear(processor, timer); + running_timers_sync(); +} + +uint64_t +running_timers_deadline(processor_t processor) +{ + if (!processor->running_timers_active) { + return EndOfAllTime; + } + + uint64_t deadline = EndOfAllTime; + for (int i = 0; i < RUNNING_TIMER_MAX; i++) { + uint64_t candidate = + processor->running_timers[i].tc_pqlink.deadline; + if (candidate != 0 && candidate < deadline) { + deadline = candidate; + } + } + + return deadline; +} + +void +running_timers_activate(processor_t processor) +{ + processor->running_timers_active = true; + running_timers_sync(); +} + +void +running_timers_deactivate(processor_t processor) +{ + assert(processor->running_timers_active == true); + processor->running_timers_active = false; + running_timers_sync(); +} diff --git a/osfmk/kern/timer_call.h b/osfmk/kern/timer_call.h index a817ce088..37bf58b6f 100644 --- a/osfmk/kern/timer_call.h +++ b/osfmk/kern/timer_call.h @@ -25,8 +25,20 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ + /* - * Declarations for timer interrupt callouts. + * The timer_call system is responsible for manipulating timers that call + * callbacks at a given deadline (with or without some leeway for coalescing). + * + * Call timer_call_setup once on a timer_call structure to register the callback + * function and a context parameter that's passed to it (param0). + * + * To arm the timer to fire at a deadline, call any of the timer_call_enter + * functions. If the function used accepts a parameter, it will be passed to + * the callback function when it fires. + * + * If the timer needs to be cancelled (like if the timer_call has been armed but + * now needs to be deallocated), call timer_call_cancel. */ #ifndef _KERN_TIMER_CALL_H_ @@ -37,37 +49,46 @@ #ifdef XNU_KERNEL_PRIVATE -#include #include #ifdef MACH_KERNEL_PRIVATE #include +#include #include extern boolean_t mach_timer_coalescing_enabled; extern void timer_call_queue_init(mpqueue_head_t *); +#endif /* MACH_KERNEL_PRIVATE */ + +#if XNU_TARGET_OS_OSX +#define TIMER_TRACE 1 #endif -/* - * NOTE: for now, bsd/dev/dtrace/dtrace_glue.c has its own definition - * of this data structure, and the two had better match. - */ +typedef void *timer_call_param_t; +typedef void (*timer_call_func_t)( + timer_call_param_t param0, + timer_call_param_t param1); + typedef struct timer_call { - struct call_entry call_entry; - decl_simple_lock_data(, lock); /* protects call_entry queue */ - uint64_t soft_deadline; - uint32_t flags; - boolean_t async_dequeue; /* this field is protected by - * call_entry queue's lock */ - uint64_t ttd; /* Time to deadline at creation */ + uint64_t tc_soft_deadline; + decl_simple_lock_data(, tc_lock); /* protects tc_queue */ + struct priority_queue_entry_deadline tc_pqlink; + queue_head_t *tc_queue; + queue_chain_t tc_qlink; + timer_call_func_t tc_func; + timer_call_param_t tc_param0; + timer_call_param_t tc_param1; + uint64_t tc_ttd; /* Time to deadline at creation */ +#if TIMER_TRACE + uint64_t tc_entry_time; +#endif + uint32_t tc_flags; + /* this field is locked by the lock in the object tc_queue points at */ + bool tc_async_dequeue; } timer_call_data_t, *timer_call_t; #define EndOfAllTime 0xFFFFFFFFFFFFFFFFULL -typedef void *timer_call_param_t; -typedef void (*timer_call_func_t)( - timer_call_param_t param0, - timer_call_param_t param1); /* * Flags to alter the default timer/timeout coalescing behavior @@ -125,20 +146,9 @@ extern boolean_t timer_call_enter_with_leeway( uint32_t flags, boolean_t ratelimited); -extern boolean_t timer_call_quantum_timer_enter( - timer_call_t call, - timer_call_param_t param1, - uint64_t deadline, - uint64_t ctime); - extern boolean_t timer_call_cancel( timer_call_t call); -extern boolean_t timer_call_quantum_timer_cancel( - timer_call_t call); - -extern void timer_call_init(void); - extern void timer_call_setup( timer_call_t call, timer_call_func_t func, @@ -173,6 +183,90 @@ typedef struct { } timer_coalescing_priority_params_t; extern timer_coalescing_priority_params_t tcoal_prio_params; +/* + * Initialize the timer call subsystem during system startup. + */ +extern void timer_call_init(void); + +#if MACH_KERNEL_PRIVATE + +/* + * Handle deadlines in the past. + */ +uint64_t timer_call_past_deadline_timer_handle(uint64_t deadline, + uint64_t ctime); + +/* + * Running timers are only active for a given CPU when a non-idle thread + * is running. + */ + +enum running_timer { + RUNNING_TIMER_QUANTUM, +#if KPERF + RUNNING_TIMER_KPERF, +#endif /* KPERF */ + RUNNING_TIMER_MAX, +}; + +/* + * Get the earliest active deadline for this processor. + */ +uint64_t running_timers_deadline(processor_t processor); + +/* + * Run the expire handler to process any timers past their deadline. Returns + * true if any timer was processed, and false otherwise. + */ +bool running_timers_expire(processor_t processor, uint64_t now); + +/* + * Set up a new deadline for the given running timer on the processor, but don't + * synchronize it with the hardware. A subsequent call to running_timers_sync + * is necessary. This allows thread_dispatch to batch all of the setup and only + * set the decrementer once. + */ +void running_timer_setup(processor_t processor, enum running_timer timer, + void *param, uint64_t deadline, uint64_t now); + +/* + * Synchronize the state of any running timers that have been set up with the + * hardware. + */ +void running_timers_sync(void); + +/* + * Enter a new deadline for the given running timer on the processor and put it + * into effect. + */ +void running_timer_enter(processor_t processor, enum running_timer timer, + void *param, uint64_t deadline, uint64_t now); + +/* + * Clear the deadline and parameters for the given running timer on the + * processor. + */ +void running_timer_clear(processor_t processor, enum running_timer timer); + +/* + * Cancel a running timer on the processor. + */ +void running_timer_cancel(processor_t processor, enum running_timer timer); + +/* + * Activate the running timers for the given, current processor. Should only be + * called by thread_dispatch. + */ +void running_timers_activate(processor_t processor); + +/* + * Deactivate the running timers for the given, current processor. Should only + * be called by thread_dispatch. + */ +void running_timers_deactivate(processor_t processor); + +#endif /* MACH_KERNEL_PRIVATE */ + #endif /* XNU_KERNEL_PRIVATE */ #endif /* _KERN_TIMER_CALL_H_ */ diff --git a/osfmk/kern/timer_queue.h b/osfmk/kern/timer_queue.h index cba8edd6d..b029dd258 100644 --- a/osfmk/kern/timer_queue.h +++ b/osfmk/kern/timer_queue.h @@ -150,9 +150,6 @@ extern uint64_t timer_queue_expire_with_options( uint64_t, boolean_t); -extern void quantum_timer_expire( - uint64_t deadline); - /* Shutdown a timer queue and reassign existing activities */ extern void timer_queue_shutdown( mpqueue_head_t *queue); @@ -180,8 +177,6 @@ extern void timer_queue_expire_local(void *arg); extern void timer_set_deadline(uint64_t deadline); -extern void quantum_timer_set_deadline(uint64_t deadline); - /* Migrate the local timer queue of a given cpu to the master cpu */ extern uint32_t timer_queue_migrate_cpu(int target_cpu); diff --git a/osfmk/kern/tlock.c b/osfmk/kern/tlock.c index 1c75a2ecc..f29bd10fa 100644 --- a/osfmk/kern/tlock.c +++ b/osfmk/kern/tlock.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #if defined(__x86_64__) @@ -63,7 +64,7 @@ extern uint64_t TLockTimeOut; */ void -lck_ticket_init(lck_ticket_t *tlock) +lck_ticket_init(lck_ticket_t *tlock, lck_grp_t *grp) { memset(tlock, 0, sizeof(*tlock)); /* Current ticket size limit--tickets can be trivially expanded @@ -78,6 +79,11 @@ lck_ticket_init(lck_ticket_t *tlock) __assert_only uintptr_t tn = (uintptr_t) &tlocki->nticket; assert(((tcn & 3) == 0) && (tcn == tc) && (tn == (tc + 1))); + + if (grp) { + lck_grp_reference(grp); + lck_grp_lckcnt_incr(grp, LCK_TYPE_TICKET); + } } static void @@ -116,11 +122,20 @@ load_exclusive_acquire8(uint8_t *target) * Returns when the current ticket is observed equal to "mt" */ static void __attribute__((noinline)) -tlock_contended(uint8_t *tp, uint8_t mt, lck_ticket_t *tlock, thread_t cthread) +tlock_contended(uint8_t *tp, uint8_t mt, lck_ticket_t *tlock, thread_t cthread LCK_GRP_ARG(lck_grp_t *grp)) { uint8_t cticket; uint64_t etime = 0, ctime = 0, stime = 0; +#if CONFIG_DTRACE || LOCK_STATS + uint64_t begin = 0; + boolean_t stat_enabled = lck_grp_ticket_spin_enabled(tlock LCK_GRP_ARG(grp)); + + if (__improbable(stat_enabled)) { + begin = mach_absolute_time(); + } +#endif /* CONFIG_DTRACE || LOCK_STATS */ + assertf(tlock->lck_owner != (uintptr_t) cthread, "Recursive ticket lock, owner: %p, current thread: %p", (void *) tlock->lck_owner, (void *) cthread); for (;;) { @@ -136,6 +151,12 @@ tlock_contended(uint8_t *tp, uint8_t mt, lck_ticket_t *tlock, thread_t cthread) */ os_atomic_clear_exclusive(); tlock_mark_owned(tlock, cthread); +#if CONFIG_DTRACE || LOCK_STATS + lck_grp_ticket_update_miss(tlock LCK_GRP_ARG(grp)); + if (__improbable(stat_enabled)) { + lck_grp_ticket_update_spin(tlock LCK_GRP_ARG(grp), mach_absolute_time() - begin); + } +#endif /* CONFIG_DTRACE || LOCK_STATS */ return; } #else /* !WFE */ @@ -144,6 +165,12 @@ tlock_contended(uint8_t *tp, uint8_t mt, lck_ticket_t *tlock, thread_t cthread) #endif /* x64 */ if ((cticket = __c11_atomic_load((_Atomic uint8_t *) tp, __ATOMIC_SEQ_CST)) == mt) { tlock_mark_owned(tlock, cthread); +#if CONFIG_DTRACE || LOCK_STATS + if (__improbable(stat_enabled)) { + lck_grp_ticket_update_spin(tlock LCK_GRP_ARG(grp), mach_absolute_time() - begin); + } + lck_grp_ticket_update_miss(tlock LCK_GRP_ARG(grp)); +#endif /* CONFIG_DTRACE || LOCK_STATS */ return; } #endif /* !WFE */ @@ -166,7 +193,7 @@ tlock_contended(uint8_t *tp, uint8_t mt, lck_ticket_t *tlock, thread_t cthread) } void -lck_ticket_lock(lck_ticket_t *tlock) +(lck_ticket_lock)(lck_ticket_t * tlock LCK_GRP_ARG(lck_grp_t *grp)) { lck_ticket_internal *tlocki = &tlock->tu; thread_t cthread = current_thread(); @@ -181,10 +208,12 @@ lck_ticket_lock(lck_ticket_t *tlock) /* Contention? branch to out of line contended block */ if (__improbable(tlocka.cticket != tlocka.nticket)) { - return tlock_contended(&tlocki->cticket, tlocka.nticket, tlock, cthread); + return tlock_contended(&tlocki->cticket, tlocka.nticket, tlock, cthread LCK_GRP_ARG(grp)); } tlock_mark_owned(tlock, cthread); + + lck_grp_ticket_update_held(tlock LCK_GRP_ARG(grp)); } void @@ -212,6 +241,9 @@ lck_ticket_unlock(lck_ticket_t *tlock) set_event(); #endif // __arm__ #endif /* !x86_64 */ +#if CONFIG_DTRACE + LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_RELEASE, tlock); +#endif /* CONFIG_DTRACE */ enable_preemption(); } diff --git a/osfmk/kern/turnstile.c b/osfmk/kern/turnstile.c index e2b189dcb..baa08bf27 100644 --- a/osfmk/kern/turnstile.c +++ b/osfmk/kern/turnstile.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Apple Inc. All rights reserved. + * Copyright (c) 2017-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -47,10 +47,10 @@ #include #include -static zone_t turnstiles_zone; -static int turnstile_max_hop; +static TUNABLE(int, turnstile_max_hop, "turnstile_max_hop", TURNSTILE_MAX_HOP_DEFAULT); +static ZONE_DECLARE(turnstiles_zone, "turnstiles", sizeof(struct turnstile), ZC_NONE); + static struct mpsc_daemon_queue turnstile_deallocate_queue; -#define MAX_TURNSTILES (thread_max) #define TURNSTILES_CHUNK (THREAD_CHUNK) /* Global table for turnstile promote policy for all type of turnstiles */ @@ -83,16 +83,10 @@ os_refgrp_decl(static, turnstile_refgrp, "turnstile", NULL); #if DEVELOPMENT || DEBUG static queue_head_t turnstiles_list; -static lck_spin_t global_turnstile_lock; -lck_grp_t turnstiles_dev_lock_grp; -lck_attr_t turnstiles_dev_lock_attr; -lck_grp_attr_t turnstiles_dev_lock_grp_attr; +static LCK_GRP_DECLARE(turnstiles_dev_lock_grp, "turnstile_dev_lock"); +static LCK_SPIN_DECLARE(global_turnstile_lock, &turnstiles_dev_lock_grp); -#define global_turnstiles_lock_init() \ - lck_spin_init(&global_turnstile_lock, &turnstiles_dev_lock_grp, &turnstiles_dev_lock_attr) -#define global_turnstiles_lock_destroy() \ - lck_spin_destroy(&global_turnstile_lock, &turnstiles_dev_lock_grp) #define global_turnstiles_lock() \ lck_spin_lock_grp(&global_turnstile_lock, &turnstiles_dev_lock_grp) #define global_turnstiles_lock_try() \ @@ -114,6 +108,8 @@ uint64_t thread_block_on_regular_waitq_count; /* Static function declarations */ static turnstile_type_t turnstile_get_type(struct turnstile *turnstile); +static bool +turnstile_is_send_turnstile(struct turnstile *turnstile); static uint32_t turnstile_get_gencount(struct turnstile *turnstile); static void @@ -214,6 +210,40 @@ turnstile_get_type(struct turnstile *turnstile) return (turnstile_type_t) type_and_gencount.ts_type; } +/* Only safe to be called from stackshot context */ +static bool +turnstile_is_send_turnstile(struct turnstile *turnstile) +{ + if (not_in_kdp) { + panic("turnstile_is_send_turnstile() called outside of kernel debugger context"); + } + + if (turnstile_get_type(turnstile) == TURNSTILE_SYNC_IPC) { + ipc_port_t port = (ipc_port_t) turnstile->ts_proprietor; + + return port_send_turnstile(port) == turnstile; + } + + return false; +} + +/* Only safe to be called from stackshot context */ +static bool +turnstile_is_receive_turnstile(struct turnstile *turnstile) +{ + if (not_in_kdp) { + panic("turnstile_is_receive_turnstile() called outside of kernel debugger context"); + } + + if (turnstile_get_type(turnstile) == TURNSTILE_SYNC_IPC) { + ipc_port_t port = (ipc_port_t) turnstile->ts_proprietor; + + return *port_rcv_turnstile_address(port) == turnstile; + } + + return false; +} + static uint32_t turnstile_get_gencount(struct turnstile *turnstile) { @@ -262,12 +292,10 @@ SECURITY_READ_ONLY_LATE(static struct turnstile_htable_bucket *)turnstile_htable /* Bucket locks for turnstile hashtable */ -lck_grp_t turnstiles_htable_lock_grp; -lck_attr_t turnstiles_htable_lock_attr; -lck_grp_attr_t turnstiles_htable_lock_grp_attr; +LCK_GRP_DECLARE(turnstiles_htable_lock_grp, "turnstiles_htable_locks"); #define turnstile_bucket_lock_init(bucket) \ - lck_spin_init(&bucket->ts_ht_bucket_lock, &turnstiles_htable_lock_grp, &turnstiles_htable_lock_attr) + lck_spin_init(&bucket->ts_ht_bucket_lock, &turnstiles_htable_lock_grp, LCK_ATTR_NULL) #define turnstile_bucket_lock(bucket) \ lck_spin_lock_grp(&bucket->ts_ht_bucket_lock, &turnstiles_htable_lock_grp) #define turnstile_bucket_unlock(bucket) \ @@ -297,18 +325,15 @@ turnstiles_hashtable_init(void) assert(ts_htable_buckets <= TURNSTILE_HTABLE_BUCKETS_MAX); uint32_t ts_htable_size = ts_htable_buckets * sizeof(struct turnstile_htable_bucket); - turnstile_htable_irq_safe = (struct turnstile_htable_bucket *)kalloc(ts_htable_size); + turnstile_htable_irq_safe = zalloc_permanent(ts_htable_size, ZALIGN_PTR); if (turnstile_htable_irq_safe == NULL) { panic("Turnstiles hash table memory allocation failed!"); } - turnstile_htable = (struct turnstile_htable_bucket *)kalloc(ts_htable_size); + turnstile_htable = zalloc_permanent(ts_htable_size, ZALIGN_PTR); if (turnstile_htable == NULL) { panic("Turnstiles hash table memory allocation failed!"); } - lck_grp_attr_setdefault(&turnstiles_htable_lock_grp_attr); - lck_grp_init(&turnstiles_htable_lock_grp, "turnstiles_htable_locks", &turnstiles_htable_lock_grp_attr); - lck_attr_setdefault(&turnstiles_htable_lock_attr); /* Initialize all the buckets of the hashtables */ for (uint32_t i = 0; i < ts_htable_buckets; i++) { @@ -766,28 +791,12 @@ turnstile_deallocate_queue_invoke(mpsc_queue_chain_t e, void turnstiles_init(void) { - turnstiles_zone = zinit(sizeof(struct turnstile), - MAX_TURNSTILES * sizeof(struct turnstile), - TURNSTILES_CHUNK * sizeof(struct turnstile), - "turnstiles"); - - if (!PE_parse_boot_argn("turnstile_max_hop", &turnstile_max_hop, sizeof(turnstile_max_hop))) { - turnstile_max_hop = TURNSTILE_MAX_HOP_DEFAULT; - } - turnstiles_hashtable_init(); thread_deallocate_daemon_register_queue(&turnstile_deallocate_queue, turnstile_deallocate_queue_invoke); #if DEVELOPMENT || DEBUG - /* Initialize the global turnstile locks and lock group */ - - lck_grp_attr_setdefault(&turnstiles_dev_lock_grp_attr); - lck_grp_init(&turnstiles_dev_lock_grp, "turnstiles_dev_lock", &turnstiles_dev_lock_grp_attr); - lck_attr_setdefault(&turnstiles_dev_lock_attr); - global_turnstiles_lock_init(); - queue_init(&turnstiles_list); /* Initialize turnstile test primitive */ @@ -796,7 +805,6 @@ turnstiles_init(void) tstile_test_prim_init(&test_prim_global_ts_kernel); tstile_test_prim_init(&test_prim_global_ts_kernel_hash); #endif - return; } /* @@ -857,8 +865,7 @@ turnstile_init(struct turnstile *turnstile) turnstile->ts_priority = 0; turnstile->ts_inheritor_flags = TURNSTILE_UPDATE_FLAGS_NONE; turnstile->ts_port_ref = 0; - priority_queue_init(&turnstile->ts_inheritor_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); + priority_queue_init(&turnstile->ts_inheritor_queue); #if DEVELOPMENT || DEBUG turnstile->ts_thread = current_thread(); @@ -1460,8 +1467,9 @@ turnstile_need_thread_promotion_update( int thread_link_priority; boolean_t needs_update = FALSE; - thread_link_priority = priority_queue_entry_key(&(dst_turnstile->ts_waitq.waitq_prio_queue), - &(thread->wait_prioq_links)); + thread_link_priority = priority_queue_entry_sched_pri( + &dst_turnstile->ts_waitq.waitq_prio_queue, + &thread->wait_prioq_links); int priority = turnstile_compute_thread_push(dst_turnstile, thread); @@ -1482,20 +1490,20 @@ turnstile_need_thread_promotion_update( * Returns: whether the maximum priority of the queue changed. */ static boolean_t -turnstile_priority_queue_update_entry_key(struct priority_queue *q, - priority_queue_entry_t elt, priority_queue_key_t pri) +turnstile_priority_queue_update_entry_key(struct priority_queue_sched_max *q, + priority_queue_entry_sched_t elt, priority_queue_key_t pri) { - priority_queue_key_t old_key = priority_queue_max_key(q); + priority_queue_key_t old_key = priority_queue_max_sched_pri(q); - if (priority_queue_entry_key(q, elt) < pri) { - if (priority_queue_entry_increase(q, elt, pri, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { - return old_key != priority_queue_max_key(q); + if (priority_queue_entry_sched_pri(q, elt) < pri) { + priority_queue_entry_set_sched_pri(q, elt, pri, false); + if (priority_queue_entry_increased(q, elt)) { + return old_key != priority_queue_max_sched_pri(q); } - } else if (priority_queue_entry_key(q, elt) > pri) { - if (priority_queue_entry_decrease(q, elt, pri, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { - return old_key != priority_queue_max_key(q); + } else if (priority_queue_entry_sched_pri(q, elt) > pri) { + priority_queue_entry_set_sched_pri(q, elt, pri, false); + if (priority_queue_entry_decreased(q, elt)) { + return old_key != priority_queue_max_sched_pri(q); } } @@ -1525,8 +1533,9 @@ turnstile_update_thread_promotion_locked( int priority = turnstile_compute_thread_push(dst_turnstile, thread); - thread_link_priority = priority_queue_entry_key(&(dst_turnstile->ts_waitq.waitq_prio_queue), - &(thread->wait_prioq_links)); + thread_link_priority = priority_queue_entry_sched_pri( + &dst_turnstile->ts_waitq.waitq_prio_queue, + &thread->wait_prioq_links); if (priority != thread_link_priority) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, @@ -1539,7 +1548,7 @@ turnstile_update_thread_promotion_locked( if (!turnstile_priority_queue_update_entry_key( &dst_turnstile->ts_waitq.waitq_prio_queue, - &thread->wait_prioq_links, priority)) { + &thread->wait_prioq_links, (priority_queue_key_t)priority)) { return FALSE; } @@ -1582,18 +1591,20 @@ thread_add_turnstile_promotion( case TURNSTILE_USER_PROMOTE: case TURNSTILE_USER_IPC_PROMOTE: - if (priority_queue_insert(&(thread->base_inheritor_queue), - &turnstile->ts_inheritor_links, turnstile->ts_priority, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + priority_queue_entry_set_sched_pri(&thread->base_inheritor_queue, + &turnstile->ts_inheritor_links, turnstile->ts_priority, false); + if (priority_queue_insert(&thread->base_inheritor_queue, + &turnstile->ts_inheritor_links)) { needs_update = thread_recompute_user_promotion_locked(thread); } break; case TURNSTILE_KERNEL_PROMOTE: - if (priority_queue_insert(&(thread->sched_inheritor_queue), - &turnstile->ts_inheritor_links, turnstile->ts_priority, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + priority_queue_entry_set_sched_pri(&thread->sched_inheritor_queue, + &turnstile->ts_inheritor_links, turnstile->ts_priority, false); + if (priority_queue_insert(&thread->sched_inheritor_queue, + &turnstile->ts_inheritor_links)) { needs_update = thread_recompute_kernel_promotion_locked(thread); } @@ -1648,16 +1659,14 @@ thread_remove_turnstile_promotion( switch (turnstile_promote_policy[turnstile_get_type(turnstile)]) { case TURNSTILE_USER_PROMOTE: case TURNSTILE_USER_IPC_PROMOTE: - if (priority_queue_remove(&(thread->base_inheritor_queue), - &turnstile->ts_inheritor_links, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + if (priority_queue_remove(&thread->base_inheritor_queue, + &turnstile->ts_inheritor_links)) { needs_update = thread_recompute_user_promotion_locked(thread); } break; case TURNSTILE_KERNEL_PROMOTE: - if (priority_queue_remove(&(thread->sched_inheritor_queue), - &turnstile->ts_inheritor_links, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + if (priority_queue_remove(&thread->sched_inheritor_queue, + &turnstile->ts_inheritor_links)) { needs_update = thread_recompute_kernel_promotion_locked(thread); } break; @@ -1701,12 +1710,14 @@ thread_needs_turnstile_promotion_update( switch (turnstile_promote_policy[turnstile_get_type(turnstile)]) { case TURNSTILE_USER_PROMOTE: case TURNSTILE_USER_IPC_PROMOTE: - turnstile_link_priority = priority_queue_entry_key(&(thread->base_inheritor_queue), - &(turnstile->ts_inheritor_links)); + turnstile_link_priority = priority_queue_entry_sched_pri( + &thread->base_inheritor_queue, + &turnstile->ts_inheritor_links); break; case TURNSTILE_KERNEL_PROMOTE: - turnstile_link_priority = priority_queue_entry_key(&(thread->sched_inheritor_queue), - &(turnstile->ts_inheritor_links)); + turnstile_link_priority = priority_queue_entry_sched_pri( + &thread->sched_inheritor_queue, + &turnstile->ts_inheritor_links); break; default: panic("turnstile promotion for type %d not yet implemented", turnstile_get_type(turnstile)); @@ -1740,7 +1751,9 @@ thread_update_turnstile_promotion_locked( switch (turnstile_promote_policy[turnstile_get_type(turnstile)]) { case TURNSTILE_USER_PROMOTE: case TURNSTILE_USER_IPC_PROMOTE: - turnstile_link_priority = priority_queue_entry_key(&(thread->base_inheritor_queue), &turnstile->ts_inheritor_links); + turnstile_link_priority = priority_queue_entry_sched_pri( + &thread->base_inheritor_queue, + &turnstile->ts_inheritor_links); if (turnstile_priority_queue_update_entry_key(&(thread->base_inheritor_queue), &turnstile->ts_inheritor_links, turnstile->ts_priority)) { @@ -1748,7 +1761,9 @@ thread_update_turnstile_promotion_locked( } break; case TURNSTILE_KERNEL_PROMOTE: - turnstile_link_priority = priority_queue_entry_key(&(thread->sched_inheritor_queue), &turnstile->ts_inheritor_links); + turnstile_link_priority = priority_queue_entry_sched_pri( + &thread->sched_inheritor_queue, + &turnstile->ts_inheritor_links); if (turnstile_priority_queue_update_entry_key(&(thread->sched_inheritor_queue), &turnstile->ts_inheritor_links, turnstile->ts_priority)) { @@ -1838,8 +1853,9 @@ thread_get_inheritor_turnstile_sched_priority(thread_t thread) struct turnstile, ts_inheritor_links); if (max_turnstile) { - return priority_queue_entry_key(&thread->sched_inheritor_queue, - &max_turnstile->ts_inheritor_links); + return priority_queue_entry_sched_pri( + &thread->sched_inheritor_queue, + &max_turnstile->ts_inheritor_links); } return 0; @@ -1865,8 +1881,9 @@ thread_get_inheritor_turnstile_base_priority(thread_t thread) struct turnstile, ts_inheritor_links); if (max_turnstile) { - return priority_queue_entry_key(&thread->base_inheritor_queue, - &max_turnstile->ts_inheritor_links); + return priority_queue_entry_sched_pri( + &thread->base_inheritor_queue, + &max_turnstile->ts_inheritor_links); } return 0; @@ -2037,8 +2054,9 @@ turnstile_need_turnstile_promotion_update( int src_turnstile_link_priority; boolean_t needs_update = FALSE; - src_turnstile_link_priority = priority_queue_entry_key(&(dst_turnstile->ts_inheritor_queue), - &(src_turnstile->ts_inheritor_links)); + src_turnstile_link_priority = priority_queue_entry_sched_pri( + &dst_turnstile->ts_inheritor_queue, + &src_turnstile->ts_inheritor_links); needs_update = (src_turnstile_link_priority == src_turnstile->ts_priority) ? FALSE : TRUE; return needs_update; @@ -2064,8 +2082,9 @@ turnstile_update_turnstile_promotion_locked( struct turnstile *src_turnstile) { int src_turnstile_link_priority; - src_turnstile_link_priority = priority_queue_entry_key(&(dst_turnstile->ts_inheritor_queue), - &(src_turnstile->ts_inheritor_links)); + src_turnstile_link_priority = priority_queue_entry_sched_pri( + &dst_turnstile->ts_inheritor_queue, + &src_turnstile->ts_inheritor_links); if (src_turnstile->ts_priority != src_turnstile_link_priority) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, @@ -2157,10 +2176,11 @@ turnstile_add_turnstile_promotion( VM_KERNEL_UNSLIDE_OR_PERM(src_turnstile), src_turnstile->ts_priority, 0, 0); - priority_queue_entry_init(&(src_turnstile->ts_inheritor_links)); + priority_queue_entry_init(&src_turnstile->ts_inheritor_links); + priority_queue_entry_set_sched_pri(&dst_turnstile->ts_inheritor_queue, + &src_turnstile->ts_inheritor_links, src_turnstile->ts_priority, false); if (priority_queue_insert(&dst_turnstile->ts_inheritor_queue, - &src_turnstile->ts_inheritor_links, src_turnstile->ts_priority, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + &src_turnstile->ts_inheritor_links)) { /* Update dst turnstile priority */ needs_update = turnstile_recompute_priority_locked(dst_turnstile); } @@ -2207,8 +2227,7 @@ turnstile_remove_turnstile_promotion( 0, 0, 0); if (priority_queue_remove(&dst_turnstile->ts_inheritor_queue, - &src_turnstile->ts_inheritor_links, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + &src_turnstile->ts_inheritor_links)) { /* Update dst turnstile priority */ needs_update = turnstile_recompute_priority_locked(dst_turnstile); } @@ -2300,10 +2319,11 @@ turnstile_waitq_add_thread_priority_queue( * queue by calling priority queue increase/decrease * operations. */ - priority_queue_entry_init(&(thread->wait_prioq_links)); + priority_queue_entry_init(&thread->wait_prioq_links); + priority_queue_entry_set_sched_pri(&wq->waitq_prio_queue, + &thread->wait_prioq_links, priority, false); priority_queue_insert(&wq->waitq_prio_queue, - &thread->wait_prioq_links, priority, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); + &thread->wait_prioq_links); } /* @@ -2343,8 +2363,9 @@ turnstile_recompute_priority_locked( struct thread, wait_prioq_links); if (max_thread) { - thread_max_pri = priority_queue_entry_key(&turnstile->ts_waitq.waitq_prio_queue, - &max_thread->wait_prioq_links); + thread_max_pri = priority_queue_entry_sched_pri( + &turnstile->ts_waitq.waitq_prio_queue, + &max_thread->wait_prioq_links); } max_turnstile = priority_queue_max(&turnstile->ts_inheritor_queue, @@ -2352,12 +2373,13 @@ turnstile_recompute_priority_locked( if (max_turnstile) { assert(turnstile_promote_policy[turnstile_get_type(turnstile)] != TURNSTILE_KERNEL_PROMOTE); - turnstile_max_pri = priority_queue_entry_key(&turnstile->ts_inheritor_queue, - &max_turnstile->ts_inheritor_links); + turnstile_max_pri = priority_queue_entry_sched_pri( + &turnstile->ts_inheritor_queue, + &max_turnstile->ts_inheritor_links); } new_priority = max(thread_max_pri, turnstile_max_pri); - turnstile->ts_priority = new_priority; + turnstile->ts_priority = (uint8_t)new_priority; if (old_priority != new_priority) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, @@ -2447,8 +2469,9 @@ turnstile_workq_proprietor_of_max_turnstile( max_turnstile = priority_queue_max(&turnstile->ts_inheritor_queue, struct turnstile, ts_inheritor_links); if (max_turnstile) { - max_priority = priority_queue_entry_key(&turnstile->ts_inheritor_queue, - &max_turnstile->ts_inheritor_links); + max_priority = priority_queue_entry_sched_pri( + &turnstile->ts_inheritor_queue, + &max_turnstile->ts_inheritor_links); proprietor = max_turnstile->ts_proprietor; } @@ -2500,7 +2523,7 @@ turnstile_workloop_pusher_info( max_thread = priority_queue_max(&turnstile->ts_waitq.waitq_prio_queue, struct thread, wait_prioq_links); if (max_thread) { - max_thread_pri = priority_queue_entry_key( + max_thread_pri = priority_queue_entry_sched_pri( &turnstile->ts_waitq.waitq_prio_queue, &max_thread->wait_prioq_links); } @@ -2508,8 +2531,9 @@ turnstile_workloop_pusher_info( max_ts = priority_queue_max(&turnstile->ts_inheritor_queue, struct turnstile, ts_inheritor_links); if (max_ts) { - max_ts_pri = priority_queue_entry_key(&turnstile->ts_inheritor_queue, - &max_ts->ts_inheritor_links); + max_ts_pri = priority_queue_entry_sched_pri( + &turnstile->ts_inheritor_queue, + &max_ts->ts_inheritor_links); } /* @@ -3171,6 +3195,38 @@ kdp_turnstile_traverse_inheritor_chain(struct turnstile *ts, uint64_t *flags, ui *hops = *hops + 1; + /* + * If we found a send turnstile, try to get the task that the turnstile's + * port is in the ipc space of + */ + if (turnstile_is_send_turnstile(ts)) { + task_t dest_task = TASK_NULL; + ipc_port_t port = (ipc_port_t)ts->ts_proprietor; + + if (port && ip_active(port)) { + if (ip_lock_held_kdp(port)) { + *flags |= STACKSHOT_TURNSTILE_STATUS_HELD_IPLOCK; + + return 0; + } else { + if (port->ip_receiver_name != 0) { + if (port->ip_receiver) { + ipc_space_t space = (ipc_space_t) port->ip_receiver; + + dest_task = space->is_task; + } else { + return 0; + } + } + } + } + + if (dest_task != TASK_NULL) { + *flags |= STACKSHOT_TURNSTILE_STATUS_BLOCKED_ON_TASK; + return pid_from_task(dest_task); + } + } + if (ts->ts_inheritor_flags & TURNSTILE_INHERITOR_TURNSTILE) { return kdp_turnstile_traverse_inheritor_chain(ts->ts_inheritor, flags, hops); } @@ -3185,6 +3241,25 @@ kdp_turnstile_traverse_inheritor_chain(struct turnstile *ts, uint64_t *flags, ui return VM_KERNEL_UNSLIDE_OR_PERM(ts->ts_inheritor); } + if (turnstile_is_receive_turnstile(ts)) { + ipc_port_t port = (ipc_port_t)ts->ts_proprietor; + if (port && ip_active(port)) { + if (ip_lock_held_kdp(port)) { + *flags |= STACKSHOT_TURNSTILE_STATUS_HELD_IPLOCK; + return 0; + } + if (port->ip_specialreply) { + /* try getting the pid stored in the port */ + uint64_t pid_candidate = ipc_special_reply_get_pid_locked(port); + + if (pid_candidate) { + *flags |= STACKSHOT_TURNSTILE_STATUS_BLOCKED_ON_TASK; + return pid_candidate; + } + } + } + } + *flags |= STACKSHOT_TURNSTILE_STATUS_UNKNOWN; return 0; } @@ -3265,7 +3340,7 @@ tstile_test_prim_init(struct tstile_test_prim **test_prim_ptr) test_prim->ttprim_turnstile = TURNSTILE_NULL; test_prim->ttprim_owner = NULL; - lck_spin_init(&test_prim->ttprim_interlock, &turnstiles_dev_lock_grp, &turnstiles_dev_lock_attr); + lck_spin_init(&test_prim->ttprim_interlock, &turnstiles_dev_lock_grp, LCK_ATTR_NULL); test_prim->tt_prim_waiters = 0; *test_prim_ptr = test_prim; diff --git a/osfmk/kern/turnstile.h b/osfmk/kern/turnstile.h index 6050fa3d9..8233fa96d 100644 --- a/osfmk/kern/turnstile.h +++ b/osfmk/kern/turnstile.h @@ -323,9 +323,9 @@ struct turnstile { struct turnstile_list ts_free_turnstiles; /* turnstile free list (IL) */ SLIST_ENTRY(turnstile) ts_free_elm; /* turnstile free list element (IL) */ }; - struct priority_queue ts_inheritor_queue; /* Queue of turnstile with us as an inheritor (WL) */ + struct priority_queue_sched_max ts_inheritor_queue; /* Queue of turnstile with us as an inheritor (WL) */ union { - struct priority_queue_entry ts_inheritor_links; /* Inheritor queue links */ + struct priority_queue_entry_sched ts_inheritor_links; /* Inheritor queue links */ struct mpsc_queue_chain ts_deallocate_link; /* thread deallocate link */ }; SLIST_ENTRY(turnstile) ts_htable_link; /* linkage for turnstile in global hash table */ diff --git a/osfmk/kern/waitq.c b/osfmk/kern/waitq.c index e408f029b..1674f379a 100644 --- a/osfmk/kern/waitq.c +++ b/osfmk/kern/waitq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016 Apple Inc. All rights reserved. + * Copyright (c) 2015-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -66,6 +66,7 @@ #include #include #include +#include #include #include #include @@ -122,9 +123,16 @@ static kern_return_t waitq_select_thread_locked(struct waitq *waitq, event64_t event, thread_t thread, spl_t *spl); -#define WAITQ_SET_MAX (task_max * 3) -static zone_t waitq_set_zone; +ZONE_DECLARE(waitq_set_zone, "waitq sets", + sizeof(struct waitq_set), ZC_NOENCRYPT); +/* waitq prepost cache */ +#define WQP_CACHE_MAX 50 +struct wqp_cache { + uint64_t head; + unsigned int avail; +}; +static struct wqp_cache PERCPU_DATA(wqp_cache); #define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align))) #define ROUNDDOWN(x, y) (((x)/(y))*(y)) @@ -134,7 +142,7 @@ static zone_t waitq_set_zone; static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip); #endif -lck_grp_t waitq_lck_grp; +LCK_GRP_DECLARE(waitq_lck_grp, "waitq"); #if __arm64__ @@ -744,7 +752,7 @@ wq_prepost_refill_cpu_cache(uint32_t nalloc) } disable_preemption(); - cache = &PROCESSOR_DATA(current_processor(), wqp_cache); + cache = PERCPU_GET(wqp_cache); /* check once more before putting these elements on the list */ if (cache->avail >= WQP_CACHE_MAX) { @@ -776,14 +784,14 @@ wq_prepost_ensure_free_space(void) struct wqp_cache *cache; if (g_min_free_cache == 0) { - g_min_free_cache = (WQP_CACHE_MAX * ml_get_max_cpus()); + g_min_free_cache = (WQP_CACHE_MAX * ml_wait_max_cpus()); } /* * Ensure that we always have a pool of per-CPU prepost elements */ disable_preemption(); - cache = &PROCESSOR_DATA(current_processor(), wqp_cache); + cache = PERCPU_GET(wqp_cache); free_elem = cache->avail; enable_preemption(); @@ -828,7 +836,7 @@ wq_prepost_alloc(int type, int nelem) * allocating RESERVED elements */ disable_preemption(); - cache = &PROCESSOR_DATA(current_processor(), wqp_cache); + cache = PERCPU_GET(wqp_cache); if (nelem <= (int)cache->avail) { struct lt_elem *first, *next = NULL; int nalloc = nelem; @@ -1048,7 +1056,7 @@ wq_prepost_release_rlist(struct wq_prepost *wqp) * if our cache is running low. */ disable_preemption(); - cache = &PROCESSOR_DATA(current_processor(), wqp_cache); + cache = PERCPU_GET(wqp_cache); if (cache->avail < WQP_CACHE_MAX) { struct lt_elem *tmp = NULL; if (cache->head != LT_IDX_MAX) { @@ -1121,14 +1129,14 @@ restart: /* the caller wants to remove the only prepost here */ assert(wqp_id == wqset->wqset_prepost_id); wqset->wqset_prepost_id = 0; - /* fall through */ + OS_FALLTHROUGH; case WQ_ITERATE_CONTINUE: wq_prepost_put(wqp); ret = WQ_ITERATE_SUCCESS; break; case WQ_ITERATE_RESTART: wq_prepost_put(wqp); - /* fall through */ + OS_FALLTHROUGH; case WQ_ITERATE_DROPPED: goto restart; default: @@ -1196,7 +1204,7 @@ restart: goto next_prepost; case WQ_ITERATE_RESTART: wq_prepost_put(wqp); - /* fall-through */ + OS_FALLTHROUGH; case WQ_ITERATE_DROPPED: /* the callback dropped the ref to wqp: just restart */ goto restart; @@ -1882,8 +1890,7 @@ waitq_thread_remove(struct waitq *wq, VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq)), thread_tid(thread), 0, 0, 0); - priority_queue_remove(&wq->waitq_prio_queue, &thread->wait_prioq_links, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); + priority_queue_remove(&wq->waitq_prio_queue, &thread->wait_prioq_links); } else { remqueue(&(thread->wait_links)); } @@ -1901,8 +1908,6 @@ waitq_bootstrap(void) } wqdbg("Minimum free table elements: %d", tmp32); - lck_grp_init(&waitq_lck_grp, "waitq", LCK_GRP_ATTR_NULL); - /* * Determine the amount of memory we're willing to reserve for * the waitqueue hash table @@ -1953,12 +1958,6 @@ waitq_bootstrap(void) waitq_init(&global_waitqs[i], SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ); } - waitq_set_zone = zinit(sizeof(struct waitq_set), - WAITQ_SET_MAX * sizeof(struct waitq_set), - sizeof(struct waitq_set), - "waitq sets"); - zone_change(waitq_set_zone, Z_NOENCRYPT, TRUE); - /* initialize the global waitq link table */ wql_init(); @@ -2150,13 +2149,13 @@ waitq_select_walk_cb(struct waitq *waitq, void *ctx, */ disable_preemption(); - os_atomic_inc(hook, relaxed); + os_atomic_add(hook, (uint16_t)1, relaxed); waitq_set_unlock(wqset); waitq_set__CALLING_PREPOST_HOOK__(hook); /* Note: after this decrement, the wqset may be deallocated */ - os_atomic_dec(hook, relaxed); + os_atomic_add(hook, (uint16_t)-1, relaxed); enable_preemption(); return ret; } @@ -2297,8 +2296,7 @@ waitq_prioq_iterate_locked(struct waitq *safeq, struct waitq *waitq, if (remove_op) { thread = priority_queue_remove_max(&safeq->waitq_prio_queue, - struct thread, wait_prioq_links, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); + struct thread, wait_prioq_links); } else { /* For the peek operation, the only valid value for max_threads is 1 */ assert(max_threads == 1); @@ -3096,7 +3094,7 @@ waitq_wakeup64_all_locked(struct waitq *waitq, assert_thread_magic(thread); remqueue(&thread->wait_links); maybe_adjust_thread_pri(thread, priority, waitq); - ret = thread_go(thread, result); + ret = thread_go(thread, result, WQ_OPTION_NONE); assert(ret == KERN_SUCCESS); thread_unlock(thread); } @@ -3124,7 +3122,8 @@ waitq_wakeup64_one_locked(struct waitq *waitq, wait_result_t result, uint64_t *reserved_preposts, int priority, - waitq_lock_state_t lock_state) + waitq_lock_state_t lock_state, + waitq_options_t option) { thread_t thread; spl_t th_spl; @@ -3147,7 +3146,7 @@ waitq_wakeup64_one_locked(struct waitq *waitq, if (thread != THREAD_NULL) { maybe_adjust_thread_pri(thread, priority, waitq); - kern_return_t ret = thread_go(thread, result); + kern_return_t ret = thread_go(thread, result, option); assert(ret == KERN_SUCCESS); thread_unlock(thread); splx(th_spl); @@ -3198,7 +3197,7 @@ waitq_wakeup64_identify_locked(struct waitq *waitq, if (thread != THREAD_NULL) { kern_return_t __assert_only ret; - ret = thread_go(thread, result); + ret = thread_go(thread, result, WQ_OPTION_NONE); assert(ret == KERN_SUCCESS); } @@ -3252,7 +3251,7 @@ waitq_wakeup64_thread_locked(struct waitq *waitq, return KERN_NOT_WAITING; } - ret = thread_go(thread, result); + ret = thread_go(thread, result, WQ_OPTION_NONE); assert(ret == KERN_SUCCESS); thread_unlock(thread); splx(th_spl); @@ -3298,8 +3297,7 @@ waitq_init(struct waitq *waitq, int policy) waitq_lock_init(waitq); if (waitq_is_turnstile_queue(waitq)) { /* For turnstile, initialize it as a priority queue */ - priority_queue_init(&waitq->waitq_prio_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); + priority_queue_init(&waitq->waitq_prio_queue); assert(waitq->waitq_fifo == 0); } else if (policy & SYNC_POLICY_TURNSTILE_PROXY) { waitq->waitq_ts = TURNSTILE_NULL; @@ -4946,7 +4944,7 @@ waitq_alloc_prepost_reservation(int nalloc, struct waitq *waitq, */ if (waitq) { disable_preemption(); - cache = &PROCESSOR_DATA(current_processor(), wqp_cache); + cache = PERCPU_GET(wqp_cache); if (nalloc <= (int)cache->avail) { goto do_alloc; } @@ -5546,7 +5544,7 @@ waitq_wakeup64_one(struct waitq *waitq, event64_t wake_event, /* waitq is locked upon return */ kr = waitq_wakeup64_one_locked(waitq, wake_event, result, - &reserved_preposts, priority, WAITQ_UNLOCK); + &reserved_preposts, priority, WAITQ_UNLOCK, WQ_OPTION_NONE); if (waitq_irq_safe(waitq)) { splx(spl); @@ -5638,7 +5636,7 @@ waitq_wakeup64_thread(struct waitq *waitq, waitq_unlock(waitq); if (ret == KERN_SUCCESS) { - ret = thread_go(thread, result); + ret = thread_go(thread, result, WQ_OPTION_NONE); assert(ret == KERN_SUCCESS); thread_unlock(thread); splx(th_spl); diff --git a/osfmk/kern/waitq.h b/osfmk/kern/waitq.h index efbdcc883..dd90a09ba 100644 --- a/osfmk/kern/waitq.h +++ b/osfmk/kern/waitq.h @@ -106,6 +106,11 @@ enum waitq_type { WQT_SET = 0x3, }; +__options_decl(waitq_options_t, uint32_t, { + WQ_OPTION_NONE = 0, + WQ_OPTION_HANDOFF = 1, +}); + #if CONFIG_WAITQ_STATS #define NWAITQ_BTFRAMES 5 struct wq_stats { @@ -154,11 +159,16 @@ struct waitq { uint64_t waitq_set_id; uint64_t waitq_prepost_id; union { - queue_head_t waitq_queue; /* queue of elements */ - struct priority_queue waitq_prio_queue; /* priority ordered queue of elements */ - struct { - struct turnstile *waitq_ts; /* turnstile for WQT_TSPROXY */ - void *waitq_tspriv; /* private field for clients use */ + queue_head_t waitq_queue; /* queue of elements - used for waitq not embedded in turnstile or ports */ + struct priority_queue_sched_max waitq_prio_queue; /* priority ordered queue of elements - used for waitqs embedded in turnstiles */ + struct { /* used for waitqs embedded in ports */ + struct turnstile *waitq_ts; /* used to store receive turnstile of the port */ + union { + void *waitq_tspriv; /* non special-reply port, used to store the watchport element for port used to store + * receive turnstile of the port */ + int waitq_priv_pid; /* special-reply port, used to store the pid that copies out the send once right of the + * special-reply port. */ + }; }; }; }; @@ -276,7 +286,8 @@ extern kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq, wait_result_t result, uint64_t *reserved_preposts, int priority, - waitq_lock_state_t lock_state); + waitq_lock_state_t lock_state, + waitq_options_t options); /* return identity of a thread awakened for a particular */ extern thread_t diff --git a/osfmk/kern/work_interval.c b/osfmk/kern/work_interval.c index ed14fe308..dd574a0b7 100644 --- a/osfmk/kern/work_interval.c +++ b/osfmk/kern/work_interval.c @@ -39,12 +39,87 @@ #include #include #include +#include #include #include +#include #include +/* + * With the introduction of auto-join work intervals, it is possible + * to change the work interval (and related thread group) of a thread in a + * variety of contexts (thread termination, context switch, thread mode + * change etc.). In order to clearly specify the policy expectation and + * the locking behavior, all calls to thread_set_work_interval() pass + * in a set of flags. + */ + +__options_decl(thread_work_interval_options_t, uint32_t, { + /* Change the work interval using the explicit join rules */ + THREAD_WI_EXPLICIT_JOIN_POLICY = 0x1, + /* Change the work interval using the auto-join rules */ + THREAD_WI_AUTO_JOIN_POLICY = 0x2, + /* Caller already holds the thread lock */ + THREAD_WI_THREAD_LOCK_HELD = 0x4, + /* Caller does not hold the thread lock */ + THREAD_WI_THREAD_LOCK_NEEDED = 0x8, + /* Change the work interval from the context switch path (thread may not be running or on a runq) */ + THREAD_WI_THREAD_CTX_SWITCH = 0x10, +}); + +static kern_return_t thread_set_work_interval(thread_t, struct work_interval *, thread_work_interval_options_t); + +#if CONFIG_SCHED_AUTO_JOIN +/* MPSC queue used to defer deallocate work intervals */ +static struct mpsc_daemon_queue work_interval_deallocate_queue; + +static void work_interval_deferred_release(struct work_interval *); + +/* + * Work Interval Auto-Join Status + * + * work_interval_auto_join_status_t represents the state of auto-join for a given work interval. + * It packs the following information: + * - A bit representing if a "finish" is deferred on the work interval + * - Count of number of threads auto-joined to the work interval + */ +#define WORK_INTERVAL_STATUS_DEFERRED_FINISH_MASK ((uint32_t)(1 << 31)) +#define WORK_INTERVAL_STATUS_AUTO_JOIN_COUNT_MASK ((uint32_t)(WORK_INTERVAL_STATUS_DEFERRED_FINISH_MASK - 1)) +#define WORK_INTERVAL_STATUS_AUTO_JOIN_COUNT_MAX WORK_INTERVAL_STATUS_AUTO_JOIN_COUNT_MASK +typedef uint32_t work_interval_auto_join_status_t; + +static inline bool __unused +work_interval_status_deferred_finish(work_interval_auto_join_status_t status) +{ + return (status & WORK_INTERVAL_STATUS_DEFERRED_FINISH_MASK) ? true : false; +} + +static inline uint32_t __unused +work_interval_status_auto_join_count(work_interval_auto_join_status_t status) +{ + return (uint32_t)(status & WORK_INTERVAL_STATUS_AUTO_JOIN_COUNT_MASK); +} + +/* + * struct work_interval_deferred_finish_state + * + * Contains the parameters of the finish operation which is being deferred. + */ +struct work_interval_deferred_finish_state { + uint64_t instance_id; + uint64_t start; + uint64_t deadline; + uint64_t complexity; +}; + +struct work_interval_auto_join_info { + struct work_interval_deferred_finish_state deferred_finish_state; + work_interval_auto_join_status_t _Atomic status; +}; +#endif /* CONFIG_SCHED_AUTO_JOIN */ + /* * Work Interval structs * @@ -66,7 +141,7 @@ struct work_interval { uint64_t wi_id; - _Atomic uint32_t wi_ref_count; + struct os_refcnt wi_ref_count; uint32_t wi_create_flags; /* for debugging purposes only, does not hold a ref on port */ @@ -81,29 +156,321 @@ struct work_interval { uint32_t wi_creator_pid; int wi_creator_pidversion; +#if CONFIG_THREAD_GROUPS + struct thread_group *wi_group; /* holds +1 ref on group */ +#endif /* CONFIG_THREAD_GROUPS */ + +#if CONFIG_SCHED_AUTO_JOIN + /* Information related to auto-join and deferred finish for work interval */ + struct work_interval_auto_join_info wi_auto_join_info; + + /* + * Since the deallocation of auto-join work intervals + * can happen in the scheduler when the last thread in + * the WI blocks and the thread lock is held, the deallocation + * might have to be done on a separate thread. + */ + struct mpsc_queue_chain wi_deallocate_link; +#endif /* CONFIG_SCHED_AUTO_JOIN */ }; +#if CONFIG_SCHED_AUTO_JOIN + +/* + * work_interval_perform_deferred_finish() + * + * Perform a deferred finish for a work interval. The routine accepts the deferred_finish_state as an + * argument rather than looking at the work_interval since the deferred finish can race with another + * start-finish cycle. To address that, the caller ensures that it gets a consistent snapshot of the + * deferred state before calling this routine. This allows the racing start-finish cycle to overwrite + * the deferred state without issues. + */ +static inline void +work_interval_perform_deferred_finish(__unused struct work_interval_deferred_finish_state *deferred_finish_state, + __unused struct work_interval *work_interval, __unused thread_t thread) +{ + + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_WI_DEFERRED_FINISH), + thread_tid(thread), thread_group_get_id(work_interval->wi_group)); +} + +/* + * work_interval_auto_join_increment() + * + * Routine to increment auto-join counter when a new thread is auto-joined to + * the work interval. + */ +static void +work_interval_auto_join_increment(struct work_interval *work_interval) +{ + struct work_interval_auto_join_info *join_info = &work_interval->wi_auto_join_info; + __assert_only work_interval_auto_join_status_t old_status = os_atomic_add_orig(&join_info->status, 1, relaxed); + assert(work_interval_status_auto_join_count(old_status) < WORK_INTERVAL_STATUS_AUTO_JOIN_COUNT_MAX); +} + +/* + * work_interval_auto_join_decrement() + * + * Routine to decrement the auto-join counter when a thread unjoins the work interval (due to + * blocking or termination). If this was the last auto-joined thread in the work interval and + * there was a deferred finish, performs the finish operation for the work interval. + */ +static void +work_interval_auto_join_decrement(struct work_interval *work_interval, thread_t thread) +{ + struct work_interval_auto_join_info *join_info = &work_interval->wi_auto_join_info; + work_interval_auto_join_status_t old_status, new_status; + struct work_interval_deferred_finish_state deferred_finish_state; + bool perform_finish; + + /* Update the auto-join count for the work interval atomically */ + os_atomic_rmw_loop(&join_info->status, old_status, new_status, acquire, { + perform_finish = false; + new_status = old_status; + assert(work_interval_status_auto_join_count(old_status) > 0); + new_status -= 1; + if (new_status == WORK_INTERVAL_STATUS_DEFERRED_FINISH_MASK) { + /* No auto-joined threads remaining and finish is deferred */ + new_status = 0; + perform_finish = true; + /* + * Its important to copy the deferred finish state here so that this works + * when racing with another start-finish cycle. + */ + deferred_finish_state = join_info->deferred_finish_state; + } + }); + + if (perform_finish == true) { + /* + * Since work_interval_perform_deferred_finish() calls down to + * the machine layer callout for finish which gets the thread + * group from the thread passed in here, it is important to + * make sure that the thread still has the work interval thread + * group here. + */ + assert(thread->thread_group == work_interval->wi_group); + work_interval_perform_deferred_finish(&deferred_finish_state, work_interval, thread); + } +} + +/* + * work_interval_auto_join_enabled() + * + * Helper routine to check if work interval has auto-join enabled. + */ +static inline bool +work_interval_auto_join_enabled(struct work_interval *work_interval) +{ + return (work_interval->wi_create_flags & WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN) != 0; +} + +/* + * work_interval_deferred_finish_enabled() + * + * Helper routine to check if work interval has deferred finish enabled. + */ +static inline bool __unused +work_interval_deferred_finish_enabled(struct work_interval *work_interval) +{ + return (work_interval->wi_create_flags & WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH) != 0; +} + +#endif /* CONFIG_SCHED_AUTO_JOIN */ + static inline void -wi_retain(struct work_interval *work_interval) +work_interval_retain(struct work_interval *work_interval) { - uint32_t old_count; - old_count = atomic_fetch_add_explicit(&work_interval->wi_ref_count, - 1, memory_order_relaxed); - assert(old_count > 0); + /* + * Even though wi_retain is called under a port lock, we have + * to use os_ref_retain instead of os_ref_retain_locked + * because wi_release is not synchronized. wi_release calls + * os_ref_release which is unsafe to pair with os_ref_retain_locked. + */ + os_ref_retain(&work_interval->wi_ref_count); } static inline void -wi_release(struct work_interval *work_interval) +work_interval_deallocate(struct work_interval *work_interval) +{ + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_WORKGROUP, WORKGROUP_INTERVAL_DESTROY), + work_interval->wi_id); +#if CONFIG_THREAD_GROUPS + thread_group_release(work_interval->wi_group); + work_interval->wi_group = NULL; +#endif /* CONFIG_THREAD_GROUPS */ + kfree(work_interval, sizeof(struct work_interval)); +} + +/* + * work_interval_release() + * + * Routine to release a ref count on the work interval. If the refcount goes down + * to zero, the work interval needs to be de-allocated. + * + * For non auto-join work intervals, they are de-allocated in this context. + * + * For auto-join work intervals, the de-allocation cannot be done from this context + * since that might need the kernel memory allocator lock. In that case, the + * deallocation is done via a thread-call based mpsc queue. + */ +static void +work_interval_release(struct work_interval *work_interval, __unused thread_work_interval_options_t options) +{ + if (os_ref_release(&work_interval->wi_ref_count) == 0) { +#if CONFIG_SCHED_AUTO_JOIN + if (options & THREAD_WI_THREAD_LOCK_HELD) { + work_interval_deferred_release(work_interval); + } else { + work_interval_deallocate(work_interval); + } +#else /* CONFIG_SCHED_AUTO_JOIN */ + work_interval_deallocate(work_interval); +#endif /* CONFIG_SCHED_AUTO_JOIN */ + } +} + +#if CONFIG_SCHED_AUTO_JOIN + +/* + * work_interval_deferred_release() + * + * Routine to enqueue the work interval on the deallocation mpsc queue. + */ +static void +work_interval_deferred_release(struct work_interval *work_interval) { - uint32_t old_count; - old_count = atomic_fetch_sub_explicit(&work_interval->wi_ref_count, - 1, memory_order_relaxed); - assert(old_count > 0); + mpsc_daemon_enqueue(&work_interval_deallocate_queue, + &work_interval->wi_deallocate_link, MPSC_QUEUE_NONE); +} + +/* + * work_interval_should_propagate() + * + * Main policy routine to decide if a thread should be auto-joined to + * another thread's work interval. The conditions are arranged such that + * the most common bailout condition are checked the earliest. This routine + * is called from the scheduler context; so it needs to be efficient and + * be careful when taking locks or performing wakeups. + */ +inline bool +work_interval_should_propagate(thread_t cthread, thread_t thread) +{ + /* Only allow propagation if the current thread has a work interval and the woken up thread does not */ + if ((cthread->th_work_interval == NULL) || (thread->th_work_interval != NULL)) { + return false; + } + + /* Only propagate work intervals which have auto-join enabled */ + if (work_interval_auto_join_enabled(cthread->th_work_interval) == false) { + return false; + } + + /* Work interval propagation is enabled for realtime threads only */ + if ((cthread->sched_mode != TH_MODE_REALTIME) || (thread->sched_mode != TH_MODE_REALTIME)) { + return false; + } - if (old_count == 1) { - kfree(work_interval, sizeof(struct work_interval)); + /* Work interval propagation only works for threads with the same home thread group */ + struct thread_group *thread_home_tg = thread_group_get_home_group(thread); + if (thread_group_get_home_group(cthread) != thread_home_tg) { + return false; } + + /* If woken up thread has adopted vouchers and other thread groups, it does not get propagation */ + if (thread->thread_group != thread_home_tg) { + return false; + } + + /* If either thread is inactive (in the termination path), do not propagate auto-join */ + if ((!cthread->active) || (!thread->active)) { + return false; + } + + return true; +} + +/* + * work_interval_auto_join_propagate() + * + * Routine to auto-join a thread into another thread's work interval + * + * Should only be invoked if work_interval_should_propagate() returns + * true. Also expects "from" thread to be current thread and "to" thread + * to be locked. + */ +void +work_interval_auto_join_propagate(thread_t from, thread_t to) +{ + assert(from == current_thread()); + work_interval_retain(from->th_work_interval); + work_interval_auto_join_increment(from->th_work_interval); + __assert_only kern_return_t kr = thread_set_work_interval(to, from->th_work_interval, + THREAD_WI_AUTO_JOIN_POLICY | THREAD_WI_THREAD_LOCK_HELD | THREAD_WI_THREAD_CTX_SWITCH); + assert(kr == KERN_SUCCESS); +} + +/* + * work_interval_auto_join_unwind() + * + * Routine to un-join an auto-joined work interval for a thread that is blocking. + * + * Expects thread to be locked. + */ +void +work_interval_auto_join_unwind(thread_t thread) +{ + __assert_only kern_return_t kr = thread_set_work_interval(thread, NULL, + THREAD_WI_AUTO_JOIN_POLICY | THREAD_WI_THREAD_LOCK_HELD | THREAD_WI_THREAD_CTX_SWITCH); + assert(kr == KERN_SUCCESS); +} + +/* + * work_interval_auto_join_demote() + * + * Routine to un-join an auto-joined work interval when a thread is changing from + * realtime to non-realtime scheduling mode. This could happen due to multiple + * reasons such as RT failsafe, thread backgrounding or thread termination. Also, + * the thread being demoted may not be the current thread. + * + * Expects thread to be locked. + */ +void +work_interval_auto_join_demote(thread_t thread) +{ + __assert_only kern_return_t kr = thread_set_work_interval(thread, NULL, + THREAD_WI_AUTO_JOIN_POLICY | THREAD_WI_THREAD_LOCK_HELD); + assert(kr == KERN_SUCCESS); +} + +static void +work_interval_deallocate_queue_invoke(mpsc_queue_chain_t e, + __assert_only mpsc_daemon_queue_t dq) +{ + struct work_interval *work_interval = NULL; + work_interval = mpsc_queue_element(e, struct work_interval, wi_deallocate_link); + assert(dq == &work_interval_deallocate_queue); + assert(os_ref_get_count(&work_interval->wi_ref_count) == 0); + work_interval_deallocate(work_interval); +} + +#endif /* CONFIG_SCHED_AUTO_JOIN */ + +void +work_interval_subsystem_init(void) +{ +#if CONFIG_SCHED_AUTO_JOIN + /* + * The work interval deallocation queue must be a thread call based queue + * because it is woken up from contexts where the thread lock is held. The + * only way to perform wakeups safely in those contexts is to wakeup a + * thread call which is guaranteed to be on a different waitq and would + * not hash onto the same global waitq which might be currently locked. + */ + mpsc_daemon_queue_init_with_thread_call(&work_interval_deallocate_queue, + work_interval_deallocate_queue_invoke, THREAD_CALL_PRIORITY_KERNEL); +#endif /* CONFIG_SCHED_AUTO_JOIN */ } /* @@ -131,7 +498,7 @@ work_interval_port_convert_locked(ipc_port_t port) work_interval = (struct work_interval *) ip_get_kobject(port); - wi_retain(work_interval); + work_interval_retain(work_interval); return work_interval; } @@ -241,44 +608,189 @@ work_interval_port_notify(mach_msg_header_t *msg) ip_unlock(port); ipc_port_dealloc_kernel(port); - wi_release(work_interval); + work_interval_release(work_interval, THREAD_WI_THREAD_LOCK_NEEDED); } /* + * work_interval_port_type() + * + * Converts a port name into the work interval object and returns its type. + * + * For invalid ports, it returns WORK_INTERVAL_TYPE_LAST (which is not a + * valid type for work intervals). + */ +static uint32_t +work_interval_port_type(mach_port_name_t port_name) +{ + struct work_interval *work_interval = NULL; + kern_return_t kr; + uint32_t work_interval_type; + + if (port_name == MACH_PORT_NULL) { + return WORK_INTERVAL_TYPE_LAST; + } + + kr = port_name_to_work_interval(port_name, &work_interval); + if (kr != KERN_SUCCESS) { + return WORK_INTERVAL_TYPE_LAST; + } + /* work_interval has a +1 ref */ + + assert(work_interval != NULL); + work_interval_type = work_interval->wi_create_flags & WORK_INTERVAL_TYPE_MASK; + work_interval_release(work_interval, THREAD_WI_THREAD_LOCK_NEEDED); + return work_interval_type; +} + + +/* + * thread_set_work_interval() + * * Change thread's bound work interval to the passed-in work interval - * Consumes +1 ref on work_interval + * Consumes +1 ref on work_interval upon success. * * May also pass NULL to un-set work_interval on the thread - * * Will deallocate any old work interval on the thread + * Return error if thread does not satisfy requirements to join work interval + * + * For non auto-join work intervals, deallocate any old work interval on the thread + * For auto-join work intervals, the routine may wakeup the work interval deferred + * deallocation queue since thread locks might be currently held. */ -static void +static kern_return_t thread_set_work_interval(thread_t thread, - struct work_interval *work_interval) + struct work_interval *work_interval, thread_work_interval_options_t options) { - assert(thread == current_thread()); + /* All explicit work interval operations should always be from the current thread */ + if (options & THREAD_WI_EXPLICIT_JOIN_POLICY) { + assert(thread == current_thread()); + } + + /* All cases of needing the thread lock should be from explicit join scenarios */ + if (options & THREAD_WI_THREAD_LOCK_NEEDED) { + assert((options & THREAD_WI_EXPLICIT_JOIN_POLICY) != 0); + } + + /* For all cases of auto join must come in with the thread lock held */ + if (options & THREAD_WI_AUTO_JOIN_POLICY) { + assert((options & THREAD_WI_THREAD_LOCK_HELD) != 0); + } + + if (work_interval) { + uint32_t work_interval_type = work_interval->wi_create_flags & WORK_INTERVAL_TYPE_MASK; + + if ((work_interval_type == WORK_INTERVAL_TYPE_COREAUDIO) && + (thread->sched_mode != TH_MODE_REALTIME) && (thread->saved_mode != TH_MODE_REALTIME)) { + return KERN_INVALID_ARGUMENT; + } + } struct work_interval *old_th_wi = thread->th_work_interval; +#if CONFIG_SCHED_AUTO_JOIN + bool old_wi_auto_joined = ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) != 0); + + spl_t s; + /* Take the thread lock if needed */ + if (options & THREAD_WI_THREAD_LOCK_NEEDED) { + s = splsched(); + thread_lock(thread); + } + + /* + * Work interval auto-join leak to non-RT threads. + * + * If thread might be running on a remote core and it's not in the context switch path (where + * thread is neither running, blocked or in the runq), its not possible to update the + * work interval & thread group remotely since its not possible to update CLPC for a remote + * core. This situation might happen when a thread is transitioning from realtime to + * non-realtime due to backgrounding etc., which would mean that non-RT threads would now + * be part of the work interval. + * + * Since there is no immediate mitigation to this issue, the policy is to set a new + * flag on the thread which indicates that such a "leak" has happened. This flag will + * be cleared when the remote thread eventually blocks and unjoins from the work interval. + */ + bool thread_on_remote_core = ((thread != current_thread()) && (thread->state & TH_RUN) && (thread->runq == PROCESSOR_NULL)); + + if (thread_on_remote_core && ((options & THREAD_WI_THREAD_CTX_SWITCH) == 0)) { + assert((options & THREAD_WI_THREAD_LOCK_NEEDED) == 0); + os_atomic_or(&thread->th_work_interval_flags, TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK, relaxed); + return KERN_SUCCESS; + } + + old_wi_auto_joined = ((thread->sched_flags & TH_SFLAG_THREAD_GROUP_AUTO_JOIN) != 0); + + if ((options & THREAD_WI_AUTO_JOIN_POLICY) || old_wi_auto_joined) { + __kdebug_only uint64_t old_tg_id = (old_th_wi) ? thread_group_get_id(old_th_wi->wi_group) : ~0; + __kdebug_only uint64_t new_tg_id = (work_interval) ? thread_group_get_id(work_interval->wi_group) : ~0; + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_WI_AUTO_JOIN), + thread_tid(thread), old_tg_id, new_tg_id, options); + } + + if (old_wi_auto_joined) { + /* + * If thread was auto-joined to a work interval and is not realtime, make sure it + * happened due to the "leak" described above. + */ + if (thread->sched_mode != TH_MODE_REALTIME) { + assert((thread->th_work_interval_flags & TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK) != 0); + } + + os_atomic_andnot(&thread->th_work_interval_flags, TH_WORK_INTERVAL_FLAGS_AUTO_JOIN_LEAK, relaxed); + work_interval_auto_join_decrement(old_th_wi, thread); + thread->sched_flags &= ~TH_SFLAG_THREAD_GROUP_AUTO_JOIN; + } + +#endif /* CONFIG_SCHED_AUTO_JOIN */ + + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_WORKGROUP, WORKGROUP_INTERVAL_CHANGE), + thread_tid(thread), (old_th_wi ? old_th_wi->wi_id : 0), (work_interval ? work_interval->wi_id : 0), !!(options & THREAD_WI_AUTO_JOIN_POLICY)); /* transfer +1 ref to thread */ thread->th_work_interval = work_interval; +#if CONFIG_SCHED_AUTO_JOIN + + if ((options & THREAD_WI_AUTO_JOIN_POLICY) && work_interval) { + assert(work_interval_auto_join_enabled(work_interval) == true); + thread->sched_flags |= TH_SFLAG_THREAD_GROUP_AUTO_JOIN; + } + + if (options & THREAD_WI_THREAD_LOCK_NEEDED) { + thread_unlock(thread); + splx(s); + } +#endif /* CONFIG_SCHED_AUTO_JOIN */ + +#if CONFIG_THREAD_GROUPS + struct thread_group *new_tg = (work_interval) ? (work_interval->wi_group) : NULL; + thread_set_work_interval_thread_group(thread, new_tg, (options & THREAD_WI_AUTO_JOIN_POLICY)); +#endif /* CONFIG_THREAD_GROUPS */ if (old_th_wi != NULL) { - wi_release(old_th_wi); + work_interval_release(old_th_wi, options); } + + return KERN_SUCCESS; } -void +static kern_return_t +thread_set_work_interval_explicit_join(thread_t thread, struct work_interval *work_interval) +{ + assert(thread == current_thread()); + return thread_set_work_interval(thread, work_interval, THREAD_WI_EXPLICIT_JOIN_POLICY | THREAD_WI_THREAD_LOCK_NEEDED); +} + +kern_return_t work_interval_thread_terminate(thread_t thread) { + assert(thread == current_thread()); if (thread->th_work_interval != NULL) { - thread_set_work_interval(thread, NULL); + return thread_set_work_interval(thread, NULL, THREAD_WI_EXPLICIT_JOIN_POLICY | THREAD_WI_THREAD_LOCK_NEEDED); } + return KERN_SUCCESS; } - - kern_return_t kern_work_interval_notify(thread_t thread, struct kern_work_interval_args* kwi_args) { @@ -303,9 +815,12 @@ kern_work_interval_notify(thread_t thread, struct kern_work_interval_args* kwi_a spl_t s = splsched(); +#if CONFIG_THREAD_GROUPS + assert(work_interval->wi_group == thread->thread_group); +#endif /* CONFIG_THREAD_GROUPS */ uint64_t urgency_param1, urgency_param2; - kwi_args->urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2); + kwi_args->urgency = (uint16_t)thread_get_urgency(thread, &urgency_param1, &urgency_param2); splx(s); @@ -324,26 +839,23 @@ kern_work_interval_create(thread_t thread, { assert(thread == current_thread()); - if (thread->th_work_interval != NULL) { - /* already assigned a work interval */ - return KERN_FAILURE; - } - - struct work_interval *work_interval = kalloc(sizeof(*work_interval)); + uint32_t create_flags = create_params->wica_create_flags; - if (work_interval == NULL) { - panic("failed to allocate work_interval"); + if (((create_flags & WORK_INTERVAL_FLAG_JOINABLE) == 0) && + thread->th_work_interval != NULL) { + /* + * If the thread is doing a legacy combined create and join, + * it shouldn't already be part of a work interval. + * + * (Creating a joinable WI is allowed anytime.) + */ + return KERN_FAILURE; } - bzero(work_interval, sizeof(*work_interval)); - - uint64_t old_value = atomic_fetch_add_explicit(&unique_work_interval_id, 1, - memory_order_relaxed); - - uint64_t work_interval_id = old_value + 1; - - uint32_t create_flags = create_params->wica_create_flags; - + /* + * Check the validity of the create flags before allocating the work + * interval. + */ task_t creating_task = current_task(); if ((create_flags & WORK_INTERVAL_TYPE_MASK) == WORK_INTERVAL_TYPE_CA_CLIENT) { /* @@ -355,22 +867,88 @@ kern_work_interval_create(thread_t thread, return KERN_FAILURE; } if (!task_is_app(creating_task)) { +#if XNU_TARGET_OS_OSX + /* + * Soft-fail the case of a non-app pretending to be an + * app, by allowing it to press the buttons, but they're + * not actually connected to anything. + */ + create_flags |= WORK_INTERVAL_FLAG_IGNORED; +#else + /* + * On iOS, it's a hard failure to get your apptype + * wrong and then try to render something. + */ return KERN_NOT_SUPPORTED; +#endif /* XNU_TARGET_OS_OSX */ } if (task_set_ca_client_wi(creating_task, true) == false) { return KERN_FAILURE; } } +#if CONFIG_SCHED_AUTO_JOIN + if (create_flags & WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN) { + uint32_t type = (create_flags & WORK_INTERVAL_TYPE_MASK); + if (type != WORK_INTERVAL_TYPE_COREAUDIO) { + return KERN_NOT_SUPPORTED; + } + if ((create_flags & WORK_INTERVAL_FLAG_GROUP) == 0) { + return KERN_NOT_SUPPORTED; + } + } + + if (create_flags & WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH) { + if ((create_flags & WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN) == 0) { + return KERN_NOT_SUPPORTED; + } + } +#endif /* CONFIG_SCHED_AUTO_JOIN */ + + struct work_interval *work_interval = kalloc_flags(sizeof(*work_interval), + Z_WAITOK | Z_ZERO); + assert(work_interval != NULL); + + uint64_t work_interval_id = os_atomic_inc(&unique_work_interval_id, relaxed); + *work_interval = (struct work_interval) { .wi_id = work_interval_id, - .wi_ref_count = 1, + .wi_ref_count = {}, .wi_create_flags = create_flags, .wi_creator_pid = pid_from_task(creating_task), .wi_creator_uniqueid = get_task_uniqueid(creating_task), .wi_creator_pidversion = get_task_version(creating_task), }; + os_ref_init(&work_interval->wi_ref_count, NULL); + + __kdebug_only uint64_t tg_id = 0; +#if CONFIG_THREAD_GROUPS + struct thread_group *tg; + if (create_flags & WORK_INTERVAL_FLAG_GROUP) { + /* create a new group for the interval to represent */ + char name[THREAD_GROUP_MAXNAME] = ""; + + snprintf(name, sizeof(name), "WI[%d] #%lld", + work_interval->wi_creator_pid, work_interval_id); + + tg = thread_group_create_and_retain(); + + thread_group_set_name(tg, name); + + work_interval->wi_group = tg; + } else { + /* the interval represents the thread's home group */ + tg = thread_group_get_home_group(thread); + thread_group_retain(tg); + + work_interval->wi_group = tg; + } + + /* Capture the tg_id for tracing purposes */ + tg_id = thread_group_get_id(work_interval->wi_group); + +#endif /* CONFIG_THREAD_GROUPS */ if (create_flags & WORK_INTERVAL_FLAG_JOINABLE) { mach_port_name_t name = MACH_PORT_NULL; @@ -394,11 +972,41 @@ kern_work_interval_create(thread_t thread, create_params->wica_port = name; } else { /* work_interval has a +1 ref, moves to the thread */ - thread_set_work_interval(thread, work_interval); + kern_return_t kr = thread_set_work_interval_explicit_join(thread, work_interval); + if (kr != KERN_SUCCESS) { + /* No other thread can join this work interval since it isn't + * JOINABLE so release the reference on work interval */ + work_interval_release(work_interval, THREAD_WI_THREAD_LOCK_NEEDED); + return kr; + } create_params->wica_port = MACH_PORT_NULL; } create_params->wica_id = work_interval_id; + + KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_WORKGROUP, WORKGROUP_INTERVAL_CREATE), + work_interval_id, create_flags, pid_from_task(creating_task), tg_id); + return KERN_SUCCESS; +} + +kern_return_t +kern_work_interval_get_flags_from_port(mach_port_name_t port_name, uint32_t *flags) +{ + assert(flags != NULL); + + kern_return_t kr; + struct work_interval *work_interval; + + kr = port_name_to_work_interval(port_name, &work_interval); + if (kr != KERN_SUCCESS) { + return kr; + } + + assert(work_interval != NULL); + *flags = work_interval->wi_create_flags; + + work_interval_release(work_interval, THREAD_WI_THREAD_LOCK_NEEDED); + return KERN_SUCCESS; } @@ -416,9 +1024,7 @@ kern_work_interval_destroy(thread_t thread, uint64_t work_interval_id) return KERN_INVALID_ARGUMENT; } - thread_set_work_interval(thread, NULL); - - return KERN_SUCCESS; + return thread_set_work_interval_explicit_join(thread, NULL); } kern_return_t @@ -430,8 +1036,7 @@ kern_work_interval_join(thread_t thread, if (port_name == MACH_PORT_NULL) { /* 'Un-join' the current work interval */ - thread_set_work_interval(thread, NULL); - return KERN_SUCCESS; + return thread_set_work_interval_explicit_join(thread, NULL); } kr = port_name_to_work_interval(port_name, &work_interval); @@ -442,9 +1047,22 @@ kern_work_interval_join(thread_t thread, assert(work_interval != NULL); - thread_set_work_interval(thread, work_interval); - - /* ref was consumed by passing it to the thread */ + kr = thread_set_work_interval_explicit_join(thread, work_interval); + /* ref was consumed by passing it to the thread in the successful case */ + if (kr != KERN_SUCCESS) { + work_interval_release(work_interval, THREAD_WI_THREAD_LOCK_NEEDED); + } + return kr; +} - return KERN_SUCCESS; +/* + * work_interval_port_type_render_server() + * + * Helper routine to determine if the port points to a + * WORK_INTERVAL_TYPE_CA_RENDER_SERVER work interval. + */ +bool +work_interval_port_type_render_server(mach_port_name_t port_name) +{ + return work_interval_port_type(port_name) == WORK_INTERVAL_TYPE_CA_RENDER_SERVER; } diff --git a/osfmk/kern/work_interval.h b/osfmk/kern/work_interval.h index baa2311ba..5f6677a84 100644 --- a/osfmk/kern/work_interval.h +++ b/osfmk/kern/work_interval.h @@ -53,7 +53,7 @@ struct kern_work_interval_args { struct kern_work_interval_create_args { uint64_t wica_id; /* out param */ - uint32_t wica_port; /* out param */ + mach_port_name_t wica_port; /* out param */ uint32_t wica_create_flags; }; @@ -64,6 +64,9 @@ struct kern_work_interval_create_args { extern kern_return_t kern_work_interval_create(thread_t thread, struct kern_work_interval_create_args *create_params); +extern kern_return_t +kern_work_interval_get_flags_from_port(mach_port_name_t port_name, uint32_t*flags); + extern kern_return_t kern_work_interval_destroy(thread_t thread, uint64_t work_interval_id); extern kern_return_t @@ -75,10 +78,17 @@ kern_work_interval_notify(thread_t thread, struct kern_work_interval_args* kwi_a #ifdef MACH_KERNEL_PRIVATE extern void work_interval_port_notify(mach_msg_header_t *msg); +void work_interval_subsystem_init(void); +bool work_interval_port_type_render_server(mach_port_name_t port_name); +#if CONFIG_SCHED_AUTO_JOIN +bool work_interval_should_propagate(thread_t cthread, thread_t thread); +void work_interval_auto_join_propagate(thread_t from, thread_t to); +void work_interval_auto_join_unwind(thread_t thread); +void work_interval_auto_join_demote(thread_t thread); +#endif /* CONFIG_SCHED_AUTO_JOIN */ -extern void work_interval_thread_terminate(thread_t thread); - +extern kern_return_t work_interval_thread_terminate(thread_t thread); #endif /* MACH_KERNEL_PRIVATE */ __END_DECLS diff --git a/osfmk/kern/zalloc.c b/osfmk/kern/zalloc.c index 3ee2ae14f..8853e5cb7 100644 --- a/osfmk/kern/zalloc.c +++ b/osfmk/kern/zalloc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -62,8 +62,8 @@ * Zone-based memory allocator. A zone is a collection of fixed size * data blocks for which quick allocation/deallocation is possible. */ -#include +#define ZALLOC_ALLOW_DEPRECATED 1 #include #include #include @@ -74,6 +74,7 @@ #include #include +#include #include #include #include @@ -84,7 +85,7 @@ #include #include #include -#include +#include #include #include @@ -93,12 +94,15 @@ #include #include #include +#include /* C_SLOT_PACKED_PTR* */ #include #include #include /* ml_cpu_get_info */ +#include + #include #include #include @@ -106,28 +110,239 @@ #include +#if KASAN_ZALLOC +#define ZONE_ENABLE_LOGGING 0 +#elif DEBUG || DEVELOPMENT +#define ZONE_ENABLE_LOGGING 1 +#else +#define ZONE_ENABLE_LOGGING 0 +#endif + +extern void vm_pageout_garbage_collect(int collect); + +/* Returns pid of the task with the largest number of VM map entries. */ +extern pid_t find_largest_process_vm_map_entries(void); + +/* + * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills. + * For any other pid we try to kill that process synchronously. + */ +extern boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid); + +extern zone_t vm_map_entry_zone; +extern zone_t vm_object_zone; +extern vm_offset_t kmapoff_kaddr; +extern unsigned int kmapoff_pgcnt; +extern unsigned int stack_total; +extern unsigned long long stack_allocs; + +/* + * The max # of elements in a chunk should fit into + * zone_page_metadata.free_count (uint16_t). + * + * Update this if the type of free_count changes. + */ +#define ZONE_CHUNK_MAXELEMENTS (UINT16_MAX) + +#define ZONE_PAGECOUNT_BITS 14 + +/* Zone elements must fit both a next pointer and a backup pointer */ +#define ZONE_MIN_ELEM_SIZE (2 * sizeof(vm_offset_t)) +#define ZONE_MAX_ALLOC_SIZE (32 * 1024) + +/* per-cpu zones are special because of counters */ +#define ZONE_MIN_PCPU_ELEM_SIZE (1 * sizeof(vm_offset_t)) + +struct zone_map_range { + vm_offset_t min_address; + vm_offset_t max_address; +}; + +struct zone_page_metadata { + /* The index of the zone this metadata page belongs to */ + zone_id_t zm_index; + + /* + * zm_secondary_page == 0: number of pages in this run + * zm_secondary_page == 1: offset to the chunk start + */ + uint16_t zm_page_count : ZONE_PAGECOUNT_BITS; + + /* Whether this page is part of a chunk run */ + uint16_t zm_percpu : 1; + uint16_t zm_secondary_page : 1; + + /* + * The start of the freelist can be maintained as a 16-bit + * offset instead of a pointer because the free elements would + * be at max ZONE_MAX_ALLOC_SIZE bytes away from the start + * of the allocation chunk. + * + * Offset from start of the allocation chunk to free element + * list head. + */ + uint16_t zm_freelist_offs; + + /* + * zm_secondary_page == 0: number of allocated elements in the chunk + * zm_secondary_page == 1: unused + * + * PAGE_METADATA_EMPTY_FREELIST indicates an empty freelist + */ + uint16_t zm_alloc_count; +#define PAGE_METADATA_EMPTY_FREELIST UINT16_MAX + + zone_pva_t zm_page_next; + zone_pva_t zm_page_prev; + + /* + * This is only for the sake of debuggers + */ +#define ZONE_FOREIGN_COOKIE 0x123456789abcdef + uint64_t zm_foreign_cookie[]; +}; + + +/* Align elements that use the zone page list to 32 byte boundaries. */ +#define ZONE_PAGE_FIRST_OFFSET(kind) ((kind) == ZONE_ADDR_NATIVE ? 0 : 32) + +static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing"); + +static __security_const_late struct { + struct zone_map_range zi_map_range; + struct zone_map_range zi_general_range; + struct zone_map_range zi_meta_range; + struct zone_map_range zi_foreign_range; + + /* + * The metadata lives within the zi_meta_range address range. + * + * The correct formula to find a metadata index is: + * absolute_page_index - page_index(zi_meta_range.min_address) + * + * And then this index is used to dereference zi_meta_range.min_address + * as a `struct zone_page_metadata` array. + * + * To avoid doing that substraction all the time in the various fast-paths, + * zi_array_base is offset by `page_index(zi_meta_range.min_address)` + * to avoid redoing that math all the time. + */ + struct zone_page_metadata *zi_array_base; +} zone_info; + /* * The zone_locks_grp allows for collecting lock statistics. * All locks are associated to this group in zinit. * Look at tools/lockstat for debugging lock contention. */ +LCK_GRP_DECLARE(zone_locks_grp, "zone_locks"); +LCK_MTX_EARLY_DECLARE(zone_metadata_region_lck, &zone_locks_grp); + +/* + * Exclude more than one concurrent garbage collection + */ +LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc"); +LCK_MTX_EARLY_DECLARE(zone_gc_lock, &zone_gc_lck_grp); -lck_grp_t zone_locks_grp; -lck_grp_attr_t zone_locks_grp_attr; +boolean_t panic_include_zprint = FALSE; +mach_memory_info_t *panic_kext_memory_info = NULL; +vm_size_t panic_kext_memory_size = 0; /* - * ZONE_ALIAS_ADDR (deprecated) + * Protects zone_array, num_zones, num_zones_in_use, and + * zone_destroyed_bitmap */ +static SIMPLE_LOCK_DECLARE(all_zones_lock, 0); +static unsigned int num_zones_in_use; +unsigned int _Atomic num_zones; +SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count; -#define from_zone_map(addr, size) \ - ((vm_offset_t)(addr) >= zone_map_min_address && \ - ((vm_offset_t)(addr) + size) >= zone_map_min_address && \ - ((vm_offset_t)(addr) + size) <= zone_map_max_address ) +#if KASAN_ZALLOC +#define MAX_ZONES 566 +#else /* !KASAN_ZALLOC */ +#define MAX_ZONES 402 +#endif/* !KASAN_ZALLOC */ +struct zone zone_array[MAX_ZONES]; + +/* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */ +static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count; + +/* Used to keep track of destroyed slots in the zone_array */ +static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)]; + +/* number of pages used by all zones */ +static long _Atomic zones_phys_page_count; + +/* number of zone mapped pages used by all zones */ +static long _Atomic zones_phys_page_mapped_count; + +#if CONFIG_ZALLOC_SEQUESTER +#define ZSECURITY_OPTIONS_SEQUESTER_DEFAULT ZSECURITY_OPTIONS_SEQUESTER +#else +#define ZSECURITY_OPTIONS_SEQUESTER_DEFAULT 0 +#endif +/* + * Turn ZSECURITY_OPTIONS_STRICT_IOKIT_FREE off on x86 so as not + * not break third party kexts that haven't yet been recompiled + * to use the new iokit macros. + */ +#if XNU_TARGET_OS_OSX && __x86_64__ +#define ZSECURITY_OPTIONS_STRICT_IOKIT_FREE_DEFAULT 0 +#else +#define ZSECURITY_OPTIONS_STRICT_IOKIT_FREE_DEFAULT \ + ZSECURITY_OPTIONS_STRICT_IOKIT_FREE +#endif + +#define ZSECURITY_DEFAULT ( \ + ZSECURITY_OPTIONS_SEQUESTER_DEFAULT | \ + ZSECURITY_OPTIONS_SUBMAP_USER_DATA | \ + ZSECURITY_OPTIONS_SEQUESTER_KEXT_KALLOC | \ + ZSECURITY_OPTIONS_STRICT_IOKIT_FREE_DEFAULT | \ + 0) +TUNABLE(zone_security_options_t, zsecurity_options, "zs", ZSECURITY_DEFAULT); + +#if VM_MAX_TAG_ZONES +/* enable tags for zones that ask for it */ +TUNABLE(bool, zone_tagging_on, "-zt", false); +#endif /* VM_MAX_TAG_ZONES */ + +#if DEBUG || DEVELOPMENT +TUNABLE(bool, zalloc_disable_copyio_check, "-no-copyio-zalloc-check", false); +__options_decl(zalloc_debug_t, uint32_t, { + ZALLOC_DEBUG_ZONEGC = 0x00000001, + ZALLOC_DEBUG_ZCRAM = 0x00000002, +}); + +TUNABLE(zalloc_debug_t, zalloc_debug, "zalloc_debug", 0); +#endif /* DEBUG || DEVELOPMENT */ +#if CONFIG_ZLEAKS +/* Making pointer scanning leaks detection possible for all zones */ +TUNABLE(bool, zone_leaks_scan_enable, "-zl", false); +#else +#define zone_leaks_scan_enable false +#endif + +/* + * Async allocation of zones + * This mechanism allows for bootstrapping an empty zone which is setup with + * non-blocking flags. The first call to zalloc_noblock() will kick off a thread_call + * to zalloc_async. We perform a zalloc() (which may block) and then an immediate free. + * This will prime the zone for the next use. + * + * Currently the thread_callout function (zalloc_async) will loop through all zones + * looking for any zone with async_pending set and do the work for it. + * + * NOTE: If the calling thread for zalloc_noblock is lower priority than thread_call, + * then zalloc_noblock to an empty zone may succeed. + */ +static void zalloc_async(thread_call_param_t p0, thread_call_param_t p1); +static thread_call_data_t call_async_alloc; +static void zcram_and_lock(zone_t zone, vm_offset_t newmem, vm_size_t size); /* * Zone Corruption Debugging * - * We use three techniques to detect modification of a zone element + * We use four techniques to detect modification of a zone element * after it's been freed. * * (1) Check the freelist next pointer for sanity. @@ -140,14 +355,18 @@ lck_grp_attr_t zone_locks_grp_attr; * no part of the element has been modified while it was on the freelist. * This will also help catch read-after-frees, as code will now dereference * 0xdeadbeef instead of a valid but freed pointer. + * (4) If the zfree_clear_mem flag is set clear the element on free and + * assert that it is still clear when alloc-ed. * * (1) and (2) occur for every allocation and free to a zone. * This is done to make it slightly more difficult for an attacker to * manipulate the freelist to behave in a specific way. * - * Poisoning (3) occurs periodically for every N frees (counted per-zone) - * and on every free for zones smaller than a cacheline. If -zp - * is passed as a boot arg, poisoning occurs for every free. + * Poisoning (3) occurs periodically for every N frees (counted per-zone). + * If -zp is passed as a boot arg, poisoning occurs for every free. + * + * Zeroing (4) is done for those zones that pass the ZC_ZFREE_CLEARMEM + * flag on creation or if the element size is less than one cacheline. * * Performance slowdown is inversely proportional to the frequency of poisoning, * with a 4-5% hit around N=1, down to ~0.3% at N=16 and just "noise" at N=32 @@ -173,500 +392,693 @@ lck_grp_attr_t zone_locks_grp_attr; * */ -/* Returns TRUE if we rolled over the counter at factor */ -static inline boolean_t -sample_counter(volatile uint32_t * count_p, uint32_t factor) -{ - uint32_t old_count, new_count; - boolean_t rolled_over; +#define ZP_DEFAULT_SAMPLING_FACTOR 16 +#define ZP_DEFAULT_SCALE_FACTOR 4 - do { - new_count = old_count = *count_p; +/* + * set by zp-factor=N boot arg + * + * A zp_factor of 0 indicates zone poisoning is disabled and can also be set by + * passing the -no-zp boot-arg. + * + * A zp_factor of 1 indicates zone poisoning is on for all elements and can be + * set by passing the -zp boot-arg. + */ +static TUNABLE(uint32_t, zp_factor, "zp-factor", ZP_DEFAULT_SAMPLING_FACTOR); - if (++new_count >= factor) { - rolled_over = TRUE; - new_count = 0; - } else { - rolled_over = FALSE; - } - } while (!OSCompareAndSwap(old_count, new_count, count_p)); +/* set by zp-scale=N boot arg, scales zp_factor by zone size */ +static TUNABLE(uint32_t, zp_scale, "zp-scale", ZP_DEFAULT_SCALE_FACTOR); - return rolled_over; -} +/* initialized to a per-boot random value in zp_bootstrap */ +static SECURITY_READ_ONLY_LATE(uintptr_t) zp_poisoned_cookie; +static SECURITY_READ_ONLY_LATE(uintptr_t) zp_nopoison_cookie; +static SECURITY_READ_ONLY_LATE(uintptr_t) zp_min_size; +static SECURITY_READ_ONLY_LATE(uint64_t) zone_phys_mapped_max; -#if defined(__LP64__) -#define ZP_POISON 0xdeadbeefdeadbeef -#else -#define ZP_POISON 0xdeadbeef -#endif +static SECURITY_READ_ONLY_LATE(vm_map_t) zone_submaps[Z_SUBMAP_IDX_COUNT]; +static SECURITY_READ_ONLY_LATE(uint32_t) zone_last_submap_idx; -boolean_t zfree_poison_element(zone_t zone, vm_offset_t elem); -void zalloc_poison_element(boolean_t check_poison, zone_t zone, vm_offset_t addr); +static struct bool_gen zone_bool_gen; +static zone_t zone_find_largest(void); +static void zone_drop_free_elements(zone_t z); -#define ZP_DEFAULT_SAMPLING_FACTOR 16 -#define ZP_DEFAULT_SCALE_FACTOR 4 +#define submap_for_zone(z) zone_submaps[(z)->submap_idx] +#define MAX_SUBMAP_NAME 16 +/* Globals for random boolean generator for elements in free list */ +#define MAX_ENTROPY_PER_ZCRAM 4 + +#if CONFIG_ZCACHE /* - * A zp_factor of 0 indicates zone poisoning is disabled, - * however, we still poison zones smaller than zp_tiny_zone_limit (a cacheline). - * Passing the -no-zp boot-arg disables even this behavior. - * In all cases, we record and check the integrity of a backup pointer. + * Specifies a single zone to enable CPU caching for. + * Can be set using boot-args: zcc_enable_for_zone_name= */ +static char cache_zone_name[MAX_ZONE_NAME]; +static TUNABLE(bool, zcc_kalloc, "zcc_kalloc", false); -/* set by zp-factor=N boot arg, zero indicates non-tiny poisoning disabled */ -#if DEBUG -#define DEFAULT_ZP_FACTOR (1) +__header_always_inline bool +zone_caching_enabled(zone_t z) +{ + return z->zcache.zcc_depot != NULL; +} #else -#define DEFAULT_ZP_FACTOR (0) -#endif -uint32_t zp_factor = DEFAULT_ZP_FACTOR; +__header_always_inline bool +zone_caching_enabled(zone_t z __unused) +{ + return false; +} +#endif /* CONFIG_ZCACHE */ -/* set by zp-scale=N boot arg, scales zp_factor by zone size */ -uint32_t zp_scale = 0; +#pragma mark Zone metadata -/* set in zp_init, zero indicates -no-zp boot-arg */ -vm_size_t zp_tiny_zone_limit = 0; +__enum_closed_decl(zone_addr_kind_t, bool, { + ZONE_ADDR_NATIVE, + ZONE_ADDR_FOREIGN, +}); -/* initialized to a per-boot random value in zp_init */ -uintptr_t zp_poisoned_cookie = 0; -uintptr_t zp_nopoison_cookie = 0; +static inline zone_id_t +zone_index(zone_t z) +{ + return (zone_id_t)(z - zone_array); +} -#if VM_MAX_TAG_ZONES -boolean_t zone_tagging_on; -#endif /* VM_MAX_TAG_ZONES */ +static inline bool +zone_has_index(zone_t z, zone_id_t zid) +{ + return zone_array + zid == z; +} -SECURITY_READ_ONLY_LATE(boolean_t) copyio_zalloc_check = TRUE; -static struct bool_gen zone_bool_gen; +static inline vm_size_t +zone_elem_count(zone_t zone, vm_size_t alloc_size, zone_addr_kind_t kind) +{ + if (kind == ZONE_ADDR_NATIVE) { + if (zone->percpu) { + return PAGE_SIZE / zone_elem_size(zone); + } + return alloc_size / zone_elem_size(zone); + } else { + assert(alloc_size == PAGE_SIZE); + return (PAGE_SIZE - ZONE_PAGE_FIRST_OFFSET(kind)) / zone_elem_size(zone); + } +} -/* - * initialize zone poisoning - * called from zone_bootstrap before any allocations are made from zalloc - */ -static inline void -zp_init(void) +__abortlike +static void +zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta, + const char *kind) { - char temp_buf[16]; + panic("zone metadata corruption: %s (meta %p, zone %s%s)", + kind, meta, zone_heap_name(zone), zone->z_name); +} - /* - * Initialize backup pointer random cookie for poisoned elements - * Try not to call early_random() back to back, it may return - * the same value if mach_absolute_time doesn't have sufficient time - * to tick over between calls. - * (This is only a problem on embedded devices) - */ - zp_poisoned_cookie = (uintptr_t) early_random(); +__abortlike +static void +zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr) +{ + panic("zone element pointer validation failed (addr: %p, zone %s%s)", + (void *)addr, zone_heap_name(zone), zone->z_name); +} - /* - * Always poison zones smaller than a cacheline, - * because it's pretty close to free - */ - ml_cpu_info_t cpu_info; - ml_cpu_get_info(&cpu_info); - zp_tiny_zone_limit = (vm_size_t) cpu_info.cache_line_size; +__abortlike +static void +zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr, + struct zone_page_metadata *meta) +{ + panic("%p not in the expected zone %s%s (%d != %d)", + (void *)addr, zone_heap_name(zone), zone->z_name, + meta->zm_index, zone_index(zone)); +} - zp_factor = ZP_DEFAULT_SAMPLING_FACTOR; - zp_scale = ZP_DEFAULT_SCALE_FACTOR; +__abortlike +static void +zone_page_metadata_native_queue_corruption(zone_t zone, zone_pva_t *queue) +{ + panic("foreign metadata index %d enqueued in native head %p from zone %s%s", + queue->packed_address, queue, zone_heap_name(zone), + zone->z_name); +} - //TODO: Bigger permutation? - /* - * Permute the default factor +/- 1 to make it less predictable - * This adds or subtracts ~4 poisoned objects per 1000 frees. - */ - if (zp_factor != 0) { - uint32_t rand_bits = early_random() & 0x3; +__abortlike +static void +zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta) +{ + panic("metadata list corruption through element %p detected in zone %s%s", + meta, zone_heap_name(zone), zone->z_name); +} - if (rand_bits == 0x1) { - zp_factor += 1; - } else if (rand_bits == 0x2) { - zp_factor -= 1; - } - /* if 0x0 or 0x3, leave it alone */ - } +__abortlike +static void +zone_page_metadata_foreign_queue_corruption(zone_t zone, zone_pva_t *queue) +{ + panic("native metadata index %d enqueued in foreign head %p from zone %s%s", + queue->packed_address, queue, zone_heap_name(zone), zone->z_name); +} - /* -zp: enable poisoning for every alloc and free */ - if (PE_parse_boot_argn("-zp", temp_buf, sizeof(temp_buf))) { - zp_factor = 1; - } +__abortlike +static void +zone_page_metadata_foreign_confusion_panic(zone_t zone, vm_offset_t addr) +{ + panic("manipulating foreign address %p in a native-only zone %s%s", + (void *)addr, zone_heap_name(zone), zone->z_name); +} - /* -no-zp: disable poisoning completely even for tiny zones */ - if (PE_parse_boot_argn("-no-zp", temp_buf, sizeof(temp_buf))) { - zp_factor = 0; - zp_tiny_zone_limit = 0; - printf("Zone poisoning disabled\n"); - } +__abortlike __unused +static void +zone_invalid_foreign_addr_panic(zone_t zone, vm_offset_t addr) +{ + panic("addr %p being freed to foreign zone %s%s not from foreign range", + (void *)addr, zone_heap_name(zone), zone->z_name); +} - /* zp-factor=XXXX: override how often to poison freed zone elements */ - if (PE_parse_boot_argn("zp-factor", &zp_factor, sizeof(zp_factor))) { - printf("Zone poisoning factor override: %u\n", zp_factor); - } +__abortlike +static void +zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta, + const char *kind) +{ + panic("accounting mismatch (%s) for zone %s%s, meta %p", kind, + zone_heap_name(zone), zone->z_name, meta); +} - /* zp-scale=XXXX: override how much zone size scales zp-factor by */ - if (PE_parse_boot_argn("zp-scale", &zp_scale, sizeof(zp_scale))) { - printf("Zone poisoning scale factor override: %u\n", zp_scale); - } +__abortlike +static void +zone_accounting_panic(zone_t zone, const char *kind) +{ + panic("accounting mismatch (%s) for zone %s%s", kind, + zone_heap_name(zone), zone->z_name); +} - /* Initialize backup pointer random cookie for unpoisoned elements */ - zp_nopoison_cookie = (uintptr_t) early_random(); +__abortlike +static void +zone_nofail_panic(zone_t zone) +{ + panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)", + zone_heap_name(zone), zone->z_name); +} -#if MACH_ASSERT - if (zp_poisoned_cookie == zp_nopoison_cookie) { - panic("early_random() is broken: %p and %p are not random\n", - (void *) zp_poisoned_cookie, (void *) zp_nopoison_cookie); - } +#if __arm64__ +// arm64 doesn't use ldp when I'd expect it to +#define zone_range_load(r, rmin, rmax) \ + asm("ldp %[rmin], %[rmax], [%[range]]" \ + : [rmin] "=r"(rmin), [rmax] "=r"(rmax) \ + : [range] "r"(r)) +#else +#define zone_range_load(r, rmin, rmax) \ + ({ rmin = (r)->min_address; rmax = (r)->max_address; }) #endif - /* - * Use the last bit in the backup pointer to hint poisoning state - * to backup_ptr_mismatch_panic. Valid zone pointers are aligned, so - * the low bits are zero. - */ - zp_poisoned_cookie |= (uintptr_t)0x1ULL; - zp_nopoison_cookie &= ~((uintptr_t)0x1ULL); +__header_always_inline bool +zone_range_contains(const struct zone_map_range *r, vm_offset_t addr, vm_offset_t size) +{ + vm_offset_t rmin, rmax; -#if defined(__LP64__) /* - * Make backup pointers more obvious in GDB for 64 bit - * by making OxFFFFFF... ^ cookie = 0xFACADE... - * (0xFACADE = 0xFFFFFF ^ 0x053521) - * (0xC0FFEE = 0xFFFFFF ^ 0x3f0011) - * The high 3 bytes of a zone pointer are always 0xFFFFFF, and are checked - * by the sanity check, so it's OK for that part of the cookie to be predictable. - * - * TODO: Use #defines, xors, and shifts + * The `&` is not a typo: we really expect the check to pass, + * so encourage the compiler to eagerly load and test without branches */ + zone_range_load(r, rmin, rmax); + return (addr >= rmin) & (addr + size >= rmin) & (addr + size <= rmax); +} - zp_poisoned_cookie &= 0x000000FFFFFFFFFF; - zp_poisoned_cookie |= 0x0535210000000000; /* 0xFACADE */ +__header_always_inline vm_size_t +zone_range_size(const struct zone_map_range *r) +{ + vm_offset_t rmin, rmax; - zp_nopoison_cookie &= 0x000000FFFFFFFFFF; - zp_nopoison_cookie |= 0x3f00110000000000; /* 0xC0FFEE */ -#endif + zone_range_load(r, rmin, rmax); + return rmax - rmin; } -/* - * These macros are used to keep track of the number - * of pages being used by the zone currently. The - * z->page_count is not protected by the zone lock. - */ -#define ZONE_PAGE_COUNT_INCR(z, count) \ -{ \ - OSAddAtomic64(count, &(z->page_count)); \ -} +#define from_zone_map(addr, size) \ + zone_range_contains(&zone_info.zi_map_range, (vm_offset_t)(addr), size) -#define ZONE_PAGE_COUNT_DECR(z, count) \ -{ \ - OSAddAtomic64(-count, &(z->page_count)); \ -} +#define from_general_submap(addr, size) \ + zone_range_contains(&zone_info.zi_general_range, (vm_offset_t)(addr), size) -vm_map_t zone_map = VM_MAP_NULL; +#define from_foreign_range(addr, size) \ + zone_range_contains(&zone_info.zi_foreign_range, (vm_offset_t)(addr), size) -/* for is_sane_zone_element and garbage collection */ +#define from_native_meta_map(addr) \ + zone_range_contains(&zone_info.zi_meta_range, (vm_offset_t)(addr), \ + sizeof(struct zone_page_metadata)) -vm_offset_t zone_map_min_address = 0; /* initialized in zone_init */ -vm_offset_t zone_map_max_address = 0; +#define zone_addr_kind(addr, size) \ + (from_zone_map(addr, size) ? ZONE_ADDR_NATIVE : ZONE_ADDR_FOREIGN) -/* Globals for random boolean generator for elements in free list */ -#define MAX_ENTROPY_PER_ZCRAM 4 - -/* VM region for all metadata structures */ -vm_offset_t zone_metadata_region_min = 0; -vm_offset_t zone_metadata_region_max = 0; -decl_lck_mtx_data(static, zone_metadata_region_lck); -lck_attr_t zone_metadata_lock_attr; -lck_mtx_ext_t zone_metadata_region_lck_ext; - -/* Helpful for walking through a zone's free element list. */ -struct zone_free_element { - struct zone_free_element *next; - /* ... */ - /* void *backup_ptr; */ -}; - -#if CONFIG_ZCACHE - -/* - * Decides whether per-cpu zone caching is to be enabled for all zones. - * Can be set to TRUE via the boot-arg '-zcache_all'. - */ -bool cache_all_zones = FALSE; - -/* - * Specifies a single zone to enable CPU caching for. - * Can be set using boot-args: zcc_enable_for_zone_name= - */ -static char cache_zone_name[MAX_ZONE_NAME]; - -static inline bool -zone_caching_enabled(zone_t z) +__header_always_inline bool +zone_pva_is_null(zone_pva_t page) { - return z->cpu_cache_enabled && !z->tags && !z->zleak_on; + return page.packed_address == 0; } -#endif /* CONFIG_ZCACHE */ - -/* - * Protects zone_array, num_zones, num_zones_in_use, and zone_empty_bitmap - */ -decl_simple_lock_data(, all_zones_lock); -unsigned int num_zones_in_use; -unsigned int num_zones; - -#if KASAN -#define MAX_ZONES 512 -#else /* !KASAN */ -#define MAX_ZONES 320 -#endif/* !KASAN */ -struct zone zone_array[MAX_ZONES]; - -/* Used to keep track of empty slots in the zone_array */ -bitmap_t zone_empty_bitmap[BITMAP_LEN(MAX_ZONES)]; - -#if DEBUG || DEVELOPMENT -/* - * Used for sysctl kern.run_zone_test which is not thread-safe. Ensure only one thread goes through at a time. - * Or we can end up with multiple test zones (if a second zinit() comes through before zdestroy()), which could lead us to - * run out of zones. - */ -decl_simple_lock_data(, zone_test_lock); -static boolean_t zone_test_running = FALSE; -static zone_t test_zone_ptr = NULL; -#endif /* DEBUG || DEVELOPMENT */ - -#define PAGE_METADATA_GET_ZINDEX(page_meta) \ - (page_meta->zindex) - -#define PAGE_METADATA_GET_ZONE(page_meta) \ - (&(zone_array[page_meta->zindex])) - -#define PAGE_METADATA_SET_ZINDEX(page_meta, index) \ - page_meta->zindex = (index); - -struct zone_page_metadata { - queue_chain_t pages; /* linkage pointer for metadata lists */ - - /* Union for maintaining start of element free list and real metadata (for multipage allocations) */ - union { - /* - * The start of the freelist can be maintained as a 32-bit offset instead of a pointer because - * the free elements would be at max ZONE_MAX_ALLOC_SIZE bytes away from the metadata. Offset - * from start of the allocation chunk to free element list head. - */ - uint32_t freelist_offset; - /* - * This field is used to lookup the real metadata for multipage allocations, where we mark the - * metadata for all pages except the first as "fake" metadata using MULTIPAGE_METADATA_MAGIC. - * Offset from this fake metadata to real metadata of allocation chunk (-ve offset). - */ - uint32_t real_metadata_offset; - }; +__header_always_inline bool +zone_pva_is_queue(zone_pva_t page) +{ + // actual kernel pages have the top bit set + return (int32_t)page.packed_address > 0; +} - /* - * For the first page in the allocation chunk, this represents the total number of free elements in - * the chunk. - */ - uint16_t free_count; - unsigned zindex : ZINDEX_BITS; /* Zone index within the zone_array */ - unsigned page_count : PAGECOUNT_BITS; /* Count of pages within the allocation chunk */ -}; +__header_always_inline bool +zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2) +{ + return pva1.packed_address == pva2.packed_address; +} -/* Macro to get page index (within zone_map) of page containing element */ -#define PAGE_INDEX_FOR_ELEMENT(element) \ - (((vm_offset_t)trunc_page(element) - zone_map_min_address) / PAGE_SIZE) +__header_always_inline void +zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv, + struct zone_page_metadata *meta) +{ + zone_pva_t *queue_head = &((zone_pva_t *)zone_array)[queue.packed_address]; -/* Macro to get metadata structure given a page index in zone_map */ -#define PAGE_METADATA_FOR_PAGE_INDEX(index) \ - (zone_metadata_region_min + ((index) * sizeof(struct zone_page_metadata))) + if (!zone_pva_is_equal(*queue_head, oldv)) { + zone_page_metadata_list_corruption(z, meta); + } + *queue_head = meta->zm_page_next; +} -/* Macro to get index (within zone_map) for given metadata */ -#define PAGE_INDEX_FOR_METADATA(page_meta) \ - (((vm_offset_t)page_meta - zone_metadata_region_min) / sizeof(struct zone_page_metadata)) +__header_always_inline zone_pva_t +zone_queue_encode(zone_pva_t *headp) +{ + return (zone_pva_t){ (uint32_t)(headp - (zone_pva_t *)zone_array) }; +} -/* Macro to get page for given page index in zone_map */ -#define PAGE_FOR_PAGE_INDEX(index) \ - (zone_map_min_address + (PAGE_SIZE * (index))) +__header_always_inline zone_pva_t +zone_pva_from_addr(vm_address_t addr) +{ + // cannot use atop() because we want to maintain the sign bit + return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) }; +} -/* Macro to get the actual metadata for a given address */ -#define PAGE_METADATA_FOR_ELEMENT(element) \ - (struct zone_page_metadata *)(PAGE_METADATA_FOR_PAGE_INDEX(PAGE_INDEX_FOR_ELEMENT(element))) +__header_always_inline vm_address_t +zone_pva_to_addr(zone_pva_t page) +{ + // cause sign extension so that we end up with the right address + return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT; +} -/* Magic value to indicate empty element free list */ -#define PAGE_METADATA_EMPTY_FREELIST ((uint32_t)(~0)) +__header_always_inline struct zone_page_metadata * +zone_pva_to_meta(zone_pva_t page, zone_addr_kind_t kind) +{ + if (kind == ZONE_ADDR_NATIVE) { + return &zone_info.zi_array_base[page.packed_address]; + } else { + return (struct zone_page_metadata *)zone_pva_to_addr(page); + } +} -vm_map_copy_t create_vm_map_copy(vm_offset_t start_addr, vm_size_t total_size, vm_size_t used_size); -boolean_t get_zone_info(zone_t z, mach_zone_name_t *zn, mach_zone_info_t *zi); -boolean_t is_zone_map_nearing_exhaustion(void); -extern void vm_pageout_garbage_collect(int collect); +__header_always_inline zone_pva_t +zone_pva_from_meta(struct zone_page_metadata *meta, zone_addr_kind_t kind) +{ + if (kind == ZONE_ADDR_NATIVE) { + uint32_t index = (uint32_t)(meta - zone_info.zi_array_base); + return (zone_pva_t){ index }; + } else { + return zone_pva_from_addr((vm_address_t)meta); + } +} -static inline void * -page_metadata_get_freelist(struct zone_page_metadata *page_meta) +__header_always_inline struct zone_page_metadata * +zone_meta_from_addr(vm_offset_t addr, zone_addr_kind_t kind) { - assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC); - if (page_meta->freelist_offset == PAGE_METADATA_EMPTY_FREELIST) { - return NULL; + if (kind == ZONE_ADDR_NATIVE) { + return zone_pva_to_meta(zone_pva_from_addr(addr), kind); } else { - if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) { - return (void *)(PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta)) + page_meta->freelist_offset); - } else { - return (void *)((vm_offset_t)page_meta + page_meta->freelist_offset); - } + return (struct zone_page_metadata *)trunc_page(addr); } } -static inline void -page_metadata_set_freelist(struct zone_page_metadata *page_meta, void *addr) +#define zone_native_meta_from_addr(addr) \ + zone_meta_from_addr((vm_offset_t)(addr), ZONE_ADDR_NATIVE) + +__header_always_inline vm_offset_t +zone_meta_to_addr(struct zone_page_metadata *meta, zone_addr_kind_t kind) { - assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC); - if (addr == NULL) { - page_meta->freelist_offset = PAGE_METADATA_EMPTY_FREELIST; + if (kind == ZONE_ADDR_NATIVE) { + return ptoa((int)(meta - zone_info.zi_array_base)); } else { - if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) { - page_meta->freelist_offset = (uint32_t)((vm_offset_t)(addr) - PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta))); - } else { - page_meta->freelist_offset = (uint32_t)((vm_offset_t)(addr) - (vm_offset_t)page_meta); - } + return (vm_offset_t)meta; } } -static inline struct zone_page_metadata * -page_metadata_get_realmeta(struct zone_page_metadata *page_meta) +__header_always_inline void +zone_meta_queue_push(zone_t z, zone_pva_t *headp, + struct zone_page_metadata *meta, zone_addr_kind_t kind) { - assert(PAGE_METADATA_GET_ZINDEX(page_meta) == MULTIPAGE_METADATA_MAGIC); - return (struct zone_page_metadata *)((vm_offset_t)page_meta - page_meta->real_metadata_offset); + zone_pva_t head = *headp; + zone_pva_t queue_pva = zone_queue_encode(headp); + struct zone_page_metadata *tmp; + + meta->zm_page_next = head; + if (!zone_pva_is_null(head)) { + tmp = zone_pva_to_meta(head, kind); + if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) { + zone_page_metadata_list_corruption(z, meta); + } + tmp->zm_page_prev = zone_pva_from_meta(meta, kind); + } + meta->zm_page_prev = queue_pva; + *headp = zone_pva_from_meta(meta, kind); } -static inline void -page_metadata_set_realmeta(struct zone_page_metadata *page_meta, struct zone_page_metadata *real_meta) +__header_always_inline struct zone_page_metadata * +zone_meta_queue_pop(zone_t z, zone_pva_t *headp, zone_addr_kind_t kind, + vm_offset_t *page_addrp) { - assert(PAGE_METADATA_GET_ZINDEX(page_meta) == MULTIPAGE_METADATA_MAGIC); - assert(PAGE_METADATA_GET_ZINDEX(real_meta) != MULTIPAGE_METADATA_MAGIC); - assert((vm_offset_t)page_meta > (vm_offset_t)real_meta); - vm_offset_t offset = (vm_offset_t)page_meta - (vm_offset_t)real_meta; - assert(offset <= UINT32_MAX); - page_meta->real_metadata_offset = (uint32_t)offset; + zone_pva_t head = *headp; + struct zone_page_metadata *meta = zone_pva_to_meta(head, kind); + vm_offset_t page_addr = zone_pva_to_addr(head); + struct zone_page_metadata *tmp; + + if (kind == ZONE_ADDR_NATIVE && !from_native_meta_map(meta)) { + zone_page_metadata_native_queue_corruption(z, headp); + } + if (kind == ZONE_ADDR_FOREIGN && from_zone_map(meta, sizeof(*meta))) { + zone_page_metadata_foreign_queue_corruption(z, headp); + } + + if (!zone_pva_is_null(meta->zm_page_next)) { + tmp = zone_pva_to_meta(meta->zm_page_next, kind); + if (!zone_pva_is_equal(tmp->zm_page_prev, head)) { + zone_page_metadata_list_corruption(z, meta); + } + tmp->zm_page_prev = meta->zm_page_prev; + } + *headp = meta->zm_page_next; + + *page_addrp = page_addr; + return meta; } -/* The backup pointer is stored in the last pointer-sized location in an element. */ -static inline vm_offset_t * -get_backup_ptr(vm_size_t elem_size, - vm_offset_t *element) +__header_always_inline void +zone_meta_requeue(zone_t z, zone_pva_t *headp, + struct zone_page_metadata *meta, zone_addr_kind_t kind) { - return (vm_offset_t *) ((vm_offset_t)element + elem_size - sizeof(vm_offset_t)); + zone_pva_t meta_pva = zone_pva_from_meta(meta, kind); + struct zone_page_metadata *tmp; + + if (!zone_pva_is_null(meta->zm_page_next)) { + tmp = zone_pva_to_meta(meta->zm_page_next, kind); + if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) { + zone_page_metadata_list_corruption(z, meta); + } + tmp->zm_page_prev = meta->zm_page_prev; + } + if (zone_pva_is_queue(meta->zm_page_prev)) { + zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta); + } else { + tmp = zone_pva_to_meta(meta->zm_page_prev, kind); + if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) { + zone_page_metadata_list_corruption(z, meta); + } + tmp->zm_page_next = meta->zm_page_next; + } + + zone_meta_queue_push(z, headp, meta, kind); } /* * Routine to populate a page backing metadata in the zone_metadata_region. * Must be called without the zone lock held as it might potentially block. */ -static inline void -zone_populate_metadata_page(struct zone_page_metadata *page_meta) +static void +zone_meta_populate(struct zone_page_metadata *from, struct zone_page_metadata *to) { - vm_offset_t page_metadata_begin = trunc_page(page_meta); - vm_offset_t page_metadata_end = trunc_page((vm_offset_t)page_meta + sizeof(struct zone_page_metadata)); + vm_offset_t page_addr = trunc_page(from); - for (; page_metadata_begin <= page_metadata_end; page_metadata_begin += PAGE_SIZE) { -#if !KASAN + for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) { +#if !KASAN_ZALLOC /* * This can race with another thread doing a populate on the same metadata * page, where we see an updated pmap but unmapped KASan shadow, causing a * fault in the shadow when we first access the metadata page. Avoid this * by always synchronizing on the zone_metadata_region lock with KASan. */ - if (pmap_find_phys(kernel_pmap, (vm_map_address_t)page_metadata_begin)) { + if (pmap_find_phys(kernel_pmap, page_addr)) { continue; } #endif - /* All updates to the zone_metadata_region are done under the zone_metadata_region_lck */ - lck_mtx_lock(&zone_metadata_region_lck); - if (0 == pmap_find_phys(kernel_pmap, (vm_map_address_t)page_metadata_begin)) { - kern_return_t __assert_only ret = kernel_memory_populate(zone_map, - page_metadata_begin, - PAGE_SIZE, - KMA_KOBJECT, - VM_KERN_MEMORY_OSFMK); - - /* should not fail with the given arguments */ - assert(ret == KERN_SUCCESS); + + for (;;) { + kern_return_t ret = KERN_SUCCESS; + + /* All updates to the zone_metadata_region are done under the zone_metadata_region_lck */ + lck_mtx_lock(&zone_metadata_region_lck); + if (0 == pmap_find_phys(kernel_pmap, page_addr)) { + ret = kernel_memory_populate(kernel_map, page_addr, + PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO, + VM_KERN_MEMORY_OSFMK); + } + lck_mtx_unlock(&zone_metadata_region_lck); + + if (ret == KERN_SUCCESS) { + break; + } + + /* + * We can't pass KMA_NOPAGEWAIT under a global lock as it leads + * to bad system deadlocks, so if the allocation failed, + * we need to do the VM_PAGE_WAIT() outside of the lock. + */ + VM_PAGE_WAIT(); } - lck_mtx_unlock(&zone_metadata_region_lck); } - return; } -static inline uint16_t -get_metadata_alloc_count(struct zone_page_metadata *page_meta) +static inline bool +zone_allocated_element_offset_is_valid(zone_t zone, vm_offset_t addr, + vm_offset_t page, zone_addr_kind_t kind) { - assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC); - struct zone *z = PAGE_METADATA_GET_ZONE(page_meta); - return (page_meta->page_count * PAGE_SIZE) / z->elem_size; + vm_offset_t offs = addr - page - ZONE_PAGE_FIRST_OFFSET(kind); + vm_offset_t esize = zone_elem_size(zone); + + if (esize & (esize - 1)) { /* not a power of 2 */ + return (offs % esize) == 0; + } else { + return (offs & (esize - 1)) == 0; + } } -/* - * Routine to lookup metadata for any given address. - * If init is marked as TRUE, this should be called without holding the zone lock - * since the initialization might block. - */ -static inline struct zone_page_metadata * -get_zone_page_metadata(struct zone_free_element *element, boolean_t init) +__attribute__((always_inline)) +static struct zone_page_metadata * +zone_allocated_element_resolve(zone_t zone, vm_offset_t addr, + vm_offset_t *pagep, zone_addr_kind_t *kindp) { - struct zone_page_metadata *page_meta = 0; + struct zone_page_metadata *meta; + zone_addr_kind_t kind; + vm_offset_t page; + vm_offset_t esize = zone_elem_size(zone); + + kind = zone_addr_kind(addr, esize); + page = trunc_page(addr); + meta = zone_meta_from_addr(addr, kind); - if (from_zone_map(element, sizeof(struct zone_free_element))) { - page_meta = (struct zone_page_metadata *)(PAGE_METADATA_FOR_ELEMENT(element)); - if (init) { - zone_populate_metadata_page(page_meta); + if (kind == ZONE_ADDR_NATIVE) { + if (meta->zm_secondary_page) { + if (meta->zm_percpu) { + zone_invalid_element_addr_panic(zone, addr); + } + page -= ptoa(meta->zm_page_count); + meta -= meta->zm_page_count; } + } else if (!zone->allows_foreign) { + zone_page_metadata_foreign_confusion_panic(zone, addr); +#if __LP64__ + } else if (!from_foreign_range(addr, esize)) { + zone_invalid_foreign_addr_panic(zone, addr); +#else + } else if (!pmap_kernel_va(addr)) { + zone_invalid_element_addr_panic(zone, addr); +#endif + } + + if (!zone_allocated_element_offset_is_valid(zone, addr, page, kind)) { + zone_invalid_element_addr_panic(zone, addr); + } + + if (!zone_has_index(zone, meta->zm_index)) { + zone_page_metadata_index_confusion_panic(zone, addr, meta); + } + + if (kindp) { + *kindp = kind; + } + if (pagep) { + *pagep = page; + } + return meta; +} + +__attribute__((always_inline)) +void +zone_allocated_element_validate(zone_t zone, vm_offset_t addr) +{ + zone_allocated_element_resolve(zone, addr, NULL, NULL); +} + +__header_always_inline vm_offset_t +zone_page_meta_get_freelist(zone_t zone, struct zone_page_metadata *meta, + vm_offset_t page) +{ + assert(!meta->zm_secondary_page); + if (meta->zm_freelist_offs == PAGE_METADATA_EMPTY_FREELIST) { + return 0; + } + + vm_size_t size = ptoa(meta->zm_percpu ? 1 : meta->zm_page_count); + if (meta->zm_freelist_offs + zone_elem_size(zone) > size) { + zone_metadata_corruption(zone, meta, "freelist corruption"); + } + + return page + meta->zm_freelist_offs; +} + +__header_always_inline void +zone_page_meta_set_freelist(struct zone_page_metadata *meta, + vm_offset_t page, vm_offset_t addr) +{ + assert(!meta->zm_secondary_page); + if (addr) { + meta->zm_freelist_offs = (uint16_t)(addr - page); } else { - page_meta = (struct zone_page_metadata *)(trunc_page((vm_offset_t)element)); + meta->zm_freelist_offs = PAGE_METADATA_EMPTY_FREELIST; } - if (init) { - bzero((char *)page_meta, sizeof(struct zone_page_metadata)); +} + +static bool +zone_page_meta_is_sane_element(zone_t zone, struct zone_page_metadata *meta, + vm_offset_t page, vm_offset_t element, zone_addr_kind_t kind) +{ + if (element == 0) { + /* ends of the freelist are NULL */ + return true; } - return (PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC) ? page_meta : page_metadata_get_realmeta(page_meta); + if (element < page + ZONE_PAGE_FIRST_OFFSET(kind)) { + return false; + } + vm_size_t size = ptoa(meta->zm_percpu ? 1 : meta->zm_page_count); + if (element > page + size - zone_elem_size(zone)) { + return false; + } + return true; } -/* Routine to get the page for a given metadata */ -static inline vm_offset_t -get_zone_page(struct zone_page_metadata *page_meta) +/* Routine to get the size of a zone allocated address. + * If the address doesnt belong to the zone maps, returns 0. + */ +vm_size_t +zone_element_size(void *addr, zone_t *z) +{ + struct zone_page_metadata *meta; + struct zone *src_zone; + + if (from_zone_map(addr, sizeof(void *))) { + meta = zone_native_meta_from_addr(addr); + src_zone = &zone_array[meta->zm_index]; + if (z) { + *z = src_zone; + } + return zone_elem_size(src_zone); + } +#if CONFIG_GZALLOC + if (__improbable(gzalloc_enabled())) { + vm_size_t gzsize; + if (gzalloc_element_size(addr, z, &gzsize)) { + return gzsize; + } + } +#endif /* CONFIG_GZALLOC */ + + return 0; +} + +/* This function just formats the reason for the panics by redoing the checks */ +__abortlike +static void +zone_require_panic(zone_t zone, void *addr) { - if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) { - return (vm_offset_t)(PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta))); + uint32_t zindex; + zone_t other; + + if (!from_zone_map(addr, zone_elem_size(zone))) { + panic("zone_require failed: address not in a zone (addr: %p)", addr); + } + + zindex = zone_native_meta_from_addr(addr)->zm_index; + other = &zone_array[zindex]; + if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) { + panic("zone_require failed: invalid zone index %d " + "(addr: %p, expected: %s%s)", zindex, + addr, zone_heap_name(zone), zone->z_name); } else { - return (vm_offset_t)(trunc_page(page_meta)); + panic("zone_require failed: address in unexpected zone id %d (%s%s) " + "(addr: %p, expected: %s%s)", + zindex, zone_heap_name(other), other->z_name, + addr, zone_heap_name(zone), zone->z_name); } } +__abortlike +static void +zone_id_require_panic(zone_id_t zid, void *addr) +{ + zone_require_panic(&zone_array[zid], addr); +} + /* - * Routine to panic if a pointer is not mapped to an expected zone. + * Routines to panic if a pointer is not mapped to an expected zone. * This can be used as a means of pinning an object to the zone it is expected * to be a part of. Causes a panic if the address does not belong to any * specified zone, does not belong to any zone, has been freed and therefore * unmapped from the zone, or the pointer contains an uninitialized value that * does not belong to any zone. + * + * Note that this can only work with collectable zones without foreign pages. */ - void -zone_require(void *addr, zone_t expected_zone) +zone_require(zone_t zone, void *addr) { - struct zone_page_metadata *page_meta; - - if (!from_zone_map(addr, expected_zone->elem_size)) { - panic("Address not in a zone map for zone_require check (addr: %p)", addr); + if (__probable(from_general_submap(addr, zone_elem_size(zone)) && + (zone_has_index(zone, zone_native_meta_from_addr(addr)->zm_index)))) { + return; + } +#if CONFIG_GZALLOC + if (__probable(gzalloc_enabled())) { + return; } +#endif + zone_require_panic(zone, addr); +} - page_meta = PAGE_METADATA_FOR_ELEMENT(addr); - if (PAGE_METADATA_GET_ZINDEX(page_meta) == MULTIPAGE_METADATA_MAGIC) { - page_meta = page_metadata_get_realmeta(page_meta); +void +zone_id_require(zone_id_t zid, vm_size_t esize, void *addr) +{ + if (__probable(from_general_submap(addr, esize) && + (zid == zone_native_meta_from_addr(addr)->zm_index))) { + return; } - if (PAGE_METADATA_GET_ZINDEX(page_meta) != expected_zone->index) { - panic("Address not in expected zone for zone_require check (addr: %p, zone: %s)", - addr, expected_zone->zone_name); +#if CONFIG_GZALLOC + if (__probable(gzalloc_enabled())) { + return; } +#endif + zone_id_require_panic(zid, addr); } -/* - * ZTAGS - */ +bool +zone_owns(zone_t zone, void *addr) +{ + if (__probable(from_general_submap(addr, zone_elem_size(zone)) && + (zone_has_index(zone, zone_native_meta_from_addr(addr)->zm_index)))) { + return true; + } +#if CONFIG_GZALLOC + if (__probable(gzalloc_enabled())) { + return true; + } +#endif + return false; +} +#pragma mark ZTAGS #if VM_MAX_TAG_ZONES // for zones with tagging enabled: @@ -676,7 +1088,7 @@ zone_require(void *addr, zone_t expected_zone) // or two uint16_t tags if the page can only hold one or two elements #define ZTAGBASE(zone, element) \ - (&((uint32_t *)zone_tagbase_min)[atop((element) - zone_map_min_address)]) + (&((uint32_t *)zone_tagbase_min)[atop((element) - zone_info.zi_map_range.min_address)]) // pointer to the tag for an element #define ZTAG(zone, element) \ @@ -684,9 +1096,9 @@ zone_require(void *addr, zone_t expected_zone) vm_tag_t * result; \ if ((zone)->tags_inline) { \ result = (vm_tag_t *) ZTAGBASE((zone), (element)); \ - if ((page_mask & element) >= (zone)->elem_size) result++; \ + if ((page_mask & element) >= zone_elem_size(zone)) result++; \ } else { \ - result = &((vm_tag_t *)zone_tags_min)[ZTAGBASE((zone), (element))[0] + ((element) & page_mask) / (zone)->elem_size]; \ + result = &((vm_tag_t *)zone_tags_min)[ZTAGBASE((zone), (element))[0] + ((element) & page_mask) / zone_elem_size((zone))]; \ } \ result; \ }) @@ -704,7 +1116,8 @@ static vm_map_t zone_tags_map; // simple heap allocator for allocating the tags for new memory -decl_lck_mtx_data(, ztLock); /* heap lock */ +LCK_MTX_EARLY_DECLARE(ztLock, &zone_locks_grp); /* heap lock */ + enum{ ztFreeIndexCount = 8, ztFreeIndexMax = (ztFreeIndexCount - 1), @@ -948,15 +1361,14 @@ ztAlloc(zone_t zone, uint32_t count) return -1U; } +__startup_func static void -ztInit(vm_size_t max_zonemap_size, lck_grp_t * group) +zone_tagging_init(vm_size_t max_zonemap_size) { kern_return_t ret; vm_map_kernel_flags_t vmk_flags; uint32_t idx; - lck_mtx_init(&ztLock, group, LCK_ATTR_NULL); - // allocate submaps VM_KERN_MEMORY_DIAG zone_tagbase_map_size = atop(max_zonemap_size) * sizeof(uint32_t); @@ -1019,7 +1431,7 @@ ztMemoryAdd(zone_t zone, vm_offset_t mem, vm_size_t size) if (!zone->tags_inline) { // allocate tags - count = (uint32_t)(size / zone->elem_size); + count = (uint32_t)(size / zone_elem_size(zone)); blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock); block = ztAlloc(zone, blocks); if (-1U == block) { @@ -1034,7 +1446,8 @@ ztMemoryAdd(zone_t zone, vm_offset_t mem, vm_size_t size) // set tag base for each page block *= ztTagsPerBlock; for (idx = 0; idx < pages; idx++) { - tagbase[idx] = block + (uint32_t)((ptoa(idx) + (zone->elem_size - 1)) / zone->elem_size); + vm_offset_t esize = zone_elem_size(zone); + tagbase[idx] = block + (uint32_t)((ptoa(idx) + esize - 1) / esize); } } } @@ -1056,7 +1469,7 @@ ztMemoryRemove(zone_t zone, vm_offset_t mem, vm_size_t size) lck_mtx_lock(&ztLock); if (!zone->tags_inline) { - count = (uint32_t)(size / zone->elem_size); + count = (uint32_t)(size / zone_elem_size(zone)); blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock); assert(block != 0xFFFFFFFF); block /= ztTagsPerBlock; @@ -1069,189 +1482,204 @@ ztMemoryRemove(zone_t zone, vm_offset_t mem, vm_size_t size) uint32_t zone_index_from_tag_index(uint32_t tag_zone_index, vm_size_t * elem_size) { - zone_t z; - uint32_t idx; - simple_lock(&all_zones_lock, &zone_locks_grp); - for (idx = 0; idx < num_zones; idx++) { - z = &(zone_array[idx]); + zone_index_foreach(idx) { + zone_t z = &zone_array[idx]; if (!z->tags) { continue; } if (tag_zone_index != z->tag_zone_index) { continue; } - *elem_size = z->elem_size; - break; + + *elem_size = zone_elem_size(z); + simple_unlock(&all_zones_lock); + return idx; } simple_unlock(&all_zones_lock); - if (idx == num_zones) { - idx = -1U; - } - - return idx; + return -1U; } #endif /* VM_MAX_TAG_ZONES */ +#pragma mark zalloc helpers -/* Routine to get the size of a zone allocated address. If the address doesnt belong to the - * zone_map, returns 0. - */ -vm_size_t -zone_element_size(void *addr, zone_t *z) +const char * +zone_name(zone_t z) { - struct zone *src_zone; - if (from_zone_map(addr, sizeof(void *))) { - struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr, FALSE); - src_zone = PAGE_METADATA_GET_ZONE(page_meta); - if (z) { - *z = src_zone; - } - return src_zone->elem_size; - } else { -#if CONFIG_GZALLOC - vm_size_t gzsize; - if (gzalloc_element_size(addr, z, &gzsize)) { - return gzsize; - } -#endif /* CONFIG_GZALLOC */ + return z->z_name; +} - return 0; +const char * +zone_heap_name(zone_t z) +{ + if (__probable(z->kalloc_heap < KHEAP_ID_COUNT)) { + return kalloc_heap_names[z->kalloc_heap]; } + return "invalid"; } -#if DEBUG || DEVELOPMENT - -vm_size_t -zone_element_info(void *addr, vm_tag_t * ptag) +static inline vm_size_t +zone_submaps_approx_size(void) { - vm_size_t size = 0; - vm_tag_t tag = VM_KERN_MEMORY_NONE; - struct zone * src_zone; + vm_size_t size = 0; - if (from_zone_map(addr, sizeof(void *))) { - struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr, FALSE); - src_zone = PAGE_METADATA_GET_ZONE(page_meta); -#if VM_MAX_TAG_ZONES - if (__improbable(src_zone->tags)) { - tag = (ZTAG(src_zone, (vm_offset_t) addr)[0] >> 1); - } -#endif /* VM_MAX_TAG_ZONES */ - size = src_zone->elem_size; - } else { -#if CONFIG_GZALLOC - gzalloc_element_size(addr, NULL, &size); -#endif /* CONFIG_GZALLOC */ + for (unsigned idx = 0; idx <= zone_last_submap_idx; idx++) { + size += zone_submaps[idx]->size; } - *ptag = tag; + return size; } -#endif /* DEBUG || DEVELOPMENT */ +bool +zone_maps_owned(vm_address_t addr, vm_size_t size) +{ + return from_zone_map(addr, size); +} -/* - * Zone checking helper function. - * A pointer that satisfies these conditions is OK to be a freelist next pointer - * A pointer that doesn't satisfy these conditions indicates corruption - */ -static inline boolean_t -is_sane_zone_ptr(zone_t zone, - vm_offset_t addr, - size_t obj_size) +void +zone_map_sizes( + vm_map_size_t *psize, + vm_map_size_t *pfree, + vm_map_size_t *plargest_free) { - /* Must be aligned to pointer boundary */ - if (__improbable((addr & (sizeof(vm_offset_t) - 1)) != 0)) { - return FALSE; - } + vm_map_sizes(zone_submaps[Z_SUBMAP_IDX_GENERAL_MAP], psize, pfree, plargest_free); +} - /* Must be a kernel address */ - if (__improbable(!pmap_kernel_va(addr))) { - return FALSE; - } +vm_map_t +zone_submap(zone_t zone) +{ + return submap_for_zone(zone); +} - /* Must be from zone map if the zone only uses memory from the zone_map */ - /* - * TODO: Remove the zone->collectable check when every - * zone using foreign memory is properly tagged with allows_foreign +unsigned +zpercpu_count(void) +{ + return zpercpu_early_count; +} + +int +track_this_zone(const char *zonename, const char *logname) +{ + unsigned int len; + const char *zc = zonename; + const char *lc = logname; + + /* + * Compare the strings. We bound the compare by MAX_ZONE_NAME. */ - if (zone->collectable && !zone->allows_foreign) { - /* check if addr is from zone map */ - if (addr >= zone_map_min_address && - (addr + obj_size - 1) < zone_map_max_address) { - return TRUE; + + for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) { + /* + * If the current characters don't match, check for a space in + * in the zone name and a corresponding period in the log name. + * If that's not there, then the strings don't match. + */ + + if (*zc != *lc && !(*zc == ' ' && *lc == '.')) { + break; } - return FALSE; + /* + * The strings are equal so far. If we're at the end, then it's a match. + */ + + if (*zc == '\0') { + return TRUE; + } } - return TRUE; + return FALSE; } -static inline boolean_t -is_sane_zone_page_metadata(zone_t zone, - vm_offset_t page_meta) -{ - /* NULL page metadata structures are invalid */ - if (page_meta == 0) { - return FALSE; - } - return is_sane_zone_ptr(zone, page_meta, sizeof(struct zone_page_metadata)); -} +#if DEBUG || DEVELOPMENT -static inline boolean_t -is_sane_zone_element(zone_t zone, - vm_offset_t addr) +vm_size_t +zone_element_info(void *addr, vm_tag_t * ptag) { - /* NULL is OK because it indicates the tail of the list */ - if (addr == 0) { - return TRUE; + vm_size_t size = 0; + vm_tag_t tag = VM_KERN_MEMORY_NONE; + struct zone_page_metadata *meta; + struct zone *src_zone; + + if (from_zone_map(addr, sizeof(void *))) { + meta = zone_native_meta_from_addr(addr); + src_zone = &zone_array[meta->zm_index]; +#if VM_MAX_TAG_ZONES + if (__improbable(src_zone->tags)) { + tag = (ZTAG(src_zone, (vm_offset_t) addr)[0] >> 1); + } +#endif /* VM_MAX_TAG_ZONES */ + size = zone_elem_size(src_zone); + } else { +#if CONFIG_GZALLOC + gzalloc_element_size(addr, NULL, &size); +#endif /* CONFIG_GZALLOC */ } - return is_sane_zone_ptr(zone, addr, zone->elem_size); + *ptag = tag; + return size; } +#endif /* DEBUG || DEVELOPMENT */ + /* Someone wrote to freed memory. */ -__dead2 -static inline void -zone_element_was_modified_panic(zone_t zone, - vm_offset_t element, - vm_offset_t found, - vm_offset_t expected, - vm_offset_t offset) -{ - panic("a freed zone element has been modified in zone %s: expected %p but found %p, bits changed %p, at offset %d of %d in element %p, cookies %p %p", - zone->zone_name, +__abortlike +static void +zone_element_was_modified_panic( + zone_t zone, + vm_offset_t element, + vm_offset_t found, + vm_offset_t expected, + vm_offset_t offset) +{ + panic("a freed zone element has been modified in zone %s%s: " + "expected %p but found %p, bits changed %p, " + "at offset %d of %d in element %p, cookies %p %p", + zone_heap_name(zone), + zone->z_name, (void *) expected, (void *) found, (void *) (expected ^ found), (uint32_t) offset, - (uint32_t) zone->elem_size, + (uint32_t) zone_elem_size(zone), (void *) element, (void *) zp_nopoison_cookie, (void *) zp_poisoned_cookie); } +/* The backup pointer is stored in the last pointer-sized location in an element. */ +__header_always_inline vm_offset_t * +get_backup_ptr(vm_size_t elem_size, vm_offset_t *element) +{ + return (vm_offset_t *)((vm_offset_t)element + elem_size - sizeof(vm_offset_t)); +} + /* * The primary and backup pointers don't match. * Determine which one was likely the corrupted pointer, find out what it * probably should have been, and panic. */ -__dead2 +__abortlike static void -backup_ptr_mismatch_panic(zone_t zone, - vm_offset_t element, - vm_offset_t primary, - vm_offset_t backup) +backup_ptr_mismatch_panic( + zone_t zone, + struct zone_page_metadata *page_meta, + vm_offset_t page, + vm_offset_t element) { + vm_offset_t primary = *(vm_offset_t *)element; + vm_offset_t backup = *get_backup_ptr(zone_elem_size(zone), &element); vm_offset_t likely_backup; vm_offset_t likely_primary; + zone_addr_kind_t kind = zone_addr_kind(page, zone_elem_size(zone)); likely_primary = primary ^ zp_nopoison_cookie; boolean_t sane_backup; - boolean_t sane_primary = is_sane_zone_element(zone, likely_primary); - boolean_t element_was_poisoned = (backup & 0x1) ? TRUE : FALSE; + boolean_t sane_primary = zone_page_meta_is_sane_element(zone, page_meta, + page, likely_primary, kind); + boolean_t element_was_poisoned = (backup & 0x1); #if defined(__LP64__) /* We can inspect the tag in the upper bits for additional confirmation */ @@ -1264,11 +1692,11 @@ backup_ptr_mismatch_panic(zone_t zone, if (element_was_poisoned) { likely_backup = backup ^ zp_poisoned_cookie; - sane_backup = is_sane_zone_element(zone, likely_backup); } else { likely_backup = backup ^ zp_nopoison_cookie; - sane_backup = is_sane_zone_element(zone, likely_backup); } + sane_backup = zone_page_meta_is_sane_element(zone, page_meta, + page, likely_backup, kind); /* The primary is definitely the corrupted one */ if (!sane_primary && sane_backup) { @@ -1279,7 +1707,7 @@ backup_ptr_mismatch_panic(zone_t zone, if (sane_primary && !sane_backup) { zone_element_was_modified_panic(zone, element, backup, (likely_primary ^ (element_was_poisoned ? zp_poisoned_cookie : zp_nopoison_cookie)), - zone->elem_size - sizeof(vm_offset_t)); + zone_elem_size(zone) - sizeof(vm_offset_t)); } /* @@ -1297,271 +1725,293 @@ backup_ptr_mismatch_panic(zone_t zone, } /* - * Adds the element to the head of the zone's free list - * Keeps a backup next-pointer at the end of the element + * zone_sequestered_page_get + * z is locked */ -static inline void -free_to_zone(zone_t zone, - vm_offset_t element, - boolean_t poison) +static struct zone_page_metadata * +zone_sequestered_page_get(zone_t z, vm_offset_t *page) { - vm_offset_t old_head; - struct zone_page_metadata *page_meta; - - vm_offset_t *primary = (vm_offset_t *) element; - vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary); + const zone_addr_kind_t kind = ZONE_ADDR_NATIVE; - page_meta = get_zone_page_metadata((struct zone_free_element *)element, FALSE); - assert(PAGE_METADATA_GET_ZONE(page_meta) == zone); - old_head = (vm_offset_t)page_metadata_get_freelist(page_meta); - - if (__improbable(!is_sane_zone_element(zone, old_head))) { - panic("zfree: invalid head pointer %p for freelist of zone %s\n", - (void *) old_head, zone->zone_name); - } - - if (__improbable(!is_sane_zone_element(zone, element))) { - panic("zfree: freeing invalid pointer %p to zone %s\n", - (void *) element, zone->zone_name); + if (!zone_pva_is_null(z->pages_sequester)) { + if (os_sub_overflow(z->sequester_page_count, z->alloc_pages, + &z->sequester_page_count)) { + zone_accounting_panic(z, "sequester_page_count wrap-around"); + } + return zone_meta_queue_pop(z, &z->pages_sequester, kind, page); } - if (__improbable(old_head == element)) { - panic("zfree: double free of %p to zone %s\n", - (void *) element, zone->zone_name); - } - /* - * Always write a redundant next pointer - * So that it is more difficult to forge, xor it with a random cookie - * A poisoned element is indicated by using zp_poisoned_cookie - * instead of zp_nopoison_cookie - */ + return NULL; +} - *backup = old_head ^ (poison ? zp_poisoned_cookie : zp_nopoison_cookie); +/* + * zone_sequestered_page_populate + * z is unlocked + * page_meta is invalid on failure + */ +static kern_return_t +zone_sequestered_page_populate(zone_t z, struct zone_page_metadata *page_meta, + vm_offset_t space, vm_size_t alloc_size, int zflags) +{ + kern_return_t retval; - /* - * Insert this element at the head of the free list. We also xor the - * primary pointer with the zp_nopoison_cookie to make sure a free - * element does not provide the location of the next free element directly. - */ - *primary = old_head ^ zp_nopoison_cookie; - page_metadata_set_freelist(page_meta, (struct zone_free_element *)element); - page_meta->free_count++; - if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) { - if (page_meta->free_count == 1) { - /* first foreign element freed on page, move from all_used */ - re_queue_tail(&zone->pages.any_free_foreign, &(page_meta->pages)); - } else { - /* no other list transitions */ - } - } else if (page_meta->free_count == get_metadata_alloc_count(page_meta)) { - /* whether the page was on the intermediate or all_used, queue, move it to free */ - re_queue_tail(&zone->pages.all_free, &(page_meta->pages)); - zone->count_all_free_pages += page_meta->page_count; - } else if (page_meta->free_count == 1) { - /* first free element on page, move from all_used */ - re_queue_tail(&zone->pages.intermediate, &(page_meta->pages)); + assert(alloc_size == ptoa(z->alloc_pages)); + retval = kernel_memory_populate(submap_for_zone(z), space, alloc_size, + zflags, VM_KERN_MEMORY_ZONE); + if (retval != KERN_SUCCESS) { + lock_zone(z); + zone_meta_queue_push(z, &z->pages_sequester, page_meta, ZONE_ADDR_NATIVE); + z->sequester_page_count += z->alloc_pages; + unlock_zone(z); } - zone->count--; - zone->countfree++; - -#if KASAN_ZALLOC - kasan_poison_range(element, zone->elem_size, ASAN_HEAP_FREED); -#endif + return retval; } +#pragma mark Zone poisoning/zeroing /* - * Removes an element from the zone's free list, returning 0 if the free list is empty. - * Verifies that the next-pointer and backup next-pointer are intact, - * and verifies that a poisoned element hasn't been modified. + * Initialize zone poisoning + * called from zone_bootstrap before any allocations are made from zalloc */ -static inline vm_offset_t -try_alloc_from_zone(zone_t zone, - vm_tag_t tag __unused, - boolean_t* check_poison) +__startup_func +static void +zp_bootstrap(void) { - vm_offset_t element; - struct zone_page_metadata *page_meta; + char temp_buf[16]; - *check_poison = FALSE; + /* + * Initialize backup pointer random cookie for poisoned elements + * Try not to call early_random() back to back, it may return + * the same value if mach_absolute_time doesn't have sufficient time + * to tick over between calls. + * (This is only a problem on embedded devices) + */ + zp_poisoned_cookie = (uintptr_t) early_random(); - /* if zone is empty, bail */ - if (zone->allows_foreign && !queue_empty(&zone->pages.any_free_foreign)) { - page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign); - } else if (!queue_empty(&zone->pages.intermediate)) { - page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate); - } else if (!queue_empty(&zone->pages.all_free)) { - page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.all_free); - assert(zone->count_all_free_pages >= page_meta->page_count); - zone->count_all_free_pages -= page_meta->page_count; - } else { - return 0; + /* -zp: enable poisoning for every alloc and free */ + if (PE_parse_boot_argn("-zp", temp_buf, sizeof(temp_buf))) { + zp_factor = 1; } - /* Check if page_meta passes is_sane_zone_element */ - if (__improbable(!is_sane_zone_page_metadata(zone, (vm_offset_t)page_meta))) { - panic("zalloc: invalid metadata structure %p for freelist of zone %s\n", - (void *) page_meta, zone->zone_name); + + /* -no-zp: disable poisoning */ + if (PE_parse_boot_argn("-no-zp", temp_buf, sizeof(temp_buf))) { + zp_factor = 0; + printf("Zone poisoning disabled\n"); } - assert(PAGE_METADATA_GET_ZONE(page_meta) == zone); - element = (vm_offset_t)page_metadata_get_freelist(page_meta); - if (__improbable(!is_sane_zone_ptr(zone, element, zone->elem_size))) { - panic("zfree: invalid head pointer %p for freelist of zone %s\n", - (void *) element, zone->zone_name); + /* Initialize backup pointer random cookie for unpoisoned elements */ + zp_nopoison_cookie = (uintptr_t) early_random(); + +#if MACH_ASSERT + if (zp_poisoned_cookie == zp_nopoison_cookie) { + panic("early_random() is broken: %p and %p are not random\n", + (void *) zp_poisoned_cookie, (void *) zp_nopoison_cookie); } +#endif - vm_offset_t *primary = (vm_offset_t *) element; - vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary); + /* + * Use the last bit in the backup pointer to hint poisoning state + * to backup_ptr_mismatch_panic. Valid zone pointers are aligned, so + * the low bits are zero. + */ + zp_poisoned_cookie |= (uintptr_t)0x1ULL; + zp_nopoison_cookie &= ~((uintptr_t)0x1ULL); +#if defined(__LP64__) /* - * Since the primary next pointer is xor'ed with zp_nopoison_cookie - * for obfuscation, retrieve the original value back + * Make backup pointers more obvious in GDB for 64 bit + * by making OxFFFFFF... ^ cookie = 0xFACADE... + * (0xFACADE = 0xFFFFFF ^ 0x053521) + * (0xC0FFEE = 0xFFFFFF ^ 0x3f0011) + * The high 3 bytes of a zone pointer are always 0xFFFFFF, and are checked + * by the sanity check, so it's OK for that part of the cookie to be predictable. + * + * TODO: Use #defines, xors, and shifts */ - vm_offset_t next_element = *primary ^ zp_nopoison_cookie; - vm_offset_t next_element_primary = *primary; - vm_offset_t next_element_backup = *backup; + + zp_poisoned_cookie &= 0x000000FFFFFFFFFF; + zp_poisoned_cookie |= 0x0535210000000000; /* 0xFACADE */ + + zp_nopoison_cookie &= 0x000000FFFFFFFFFF; + zp_nopoison_cookie |= 0x3f00110000000000; /* 0xC0FFEE */ +#endif /* - * backup_ptr_mismatch_panic will determine what next_element - * should have been, and print it appropriately + * Initialize zp_min_size to two cachelines. Elements smaller than this will + * be zero-ed. */ - if (__improbable(!is_sane_zone_element(zone, next_element))) { - backup_ptr_mismatch_panic(zone, element, next_element_primary, next_element_backup); - } + ml_cpu_info_t cpu_info; + ml_cpu_get_info(&cpu_info); + zp_min_size = 2 * cpu_info.cache_line_size; +} - /* Check the backup pointer for the regular cookie */ - if (__improbable(next_element != (next_element_backup ^ zp_nopoison_cookie))) { - /* Check for the poisoned cookie instead */ - if (__improbable(next_element != (next_element_backup ^ zp_poisoned_cookie))) { - /* Neither cookie is valid, corruption has occurred */ - backup_ptr_mismatch_panic(zone, element, next_element_primary, next_element_backup); - } +inline uint32_t +zone_poison_count_init(zone_t zone) +{ + return zp_factor + (((uint32_t)zone_elem_size(zone)) >> zp_scale) ^ + (mach_absolute_time() & 0x7); +} + +#if ZALLOC_ENABLE_POISONING +static bool +zfree_poison_element(zone_t zone, uint32_t *zp_count, vm_offset_t elem) +{ + bool poison = false; + uint32_t zp_count_local; + assert(!zone->percpu); + if (zp_factor != 0) { /* - * Element was marked as poisoned, so check its integrity before using it. + * Poison the memory of every zp_count-th element before it ends up + * on the freelist to catch use-after-free and use of uninitialized + * memory. + * + * Every element is poisoned when zp_factor is set to 1. + * */ - *check_poison = TRUE; - } - - /* Make sure the page_meta is at the correct offset from the start of page */ - if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)element, FALSE))) { - panic("zalloc: Incorrect metadata %p found in zone %s page queue. Expected metadata: %p\n", - page_meta, zone->zone_name, get_zone_page_metadata((struct zone_free_element *)element, FALSE)); - } + zp_count_local = os_atomic_load(zp_count, relaxed); + if (__improbable(zp_count_local == 0 || zp_factor == 1)) { + poison = true; - /* Make sure next_element belongs to the same page as page_meta */ - if (next_element) { - if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)next_element, FALSE))) { - panic("zalloc: next element pointer %p for element %p points to invalid element for zone %s\n", - (void *)next_element, (void *)element, zone->zone_name); - } - } + os_atomic_store(zp_count, zone_poison_count_init(zone), relaxed); - /* Remove this element from the free list */ - page_metadata_set_freelist(page_meta, (struct zone_free_element *)next_element); - page_meta->free_count--; + /* memset_pattern{4|8} could help make this faster: */ + vm_offset_t *element_cursor = ((vm_offset_t *) elem); + vm_offset_t *end_cursor = (vm_offset_t *)(elem + zone_elem_size(zone)); - if (page_meta->free_count == 0) { - /* move to all used */ - re_queue_tail(&zone->pages.all_used, &(page_meta->pages)); - } else { - if (!zone->allows_foreign || from_zone_map(element, zone->elem_size)) { - if (get_metadata_alloc_count(page_meta) == page_meta->free_count + 1) { - /* remove from free, move to intermediate */ - re_queue_tail(&zone->pages.intermediate, &(page_meta->pages)); + for (; element_cursor < end_cursor; element_cursor++) { + *element_cursor = ZONE_POISON; } + } else { + os_atomic_store(zp_count, zp_count_local - 1, relaxed); + /* + * Zero first zp_min_size bytes of elements that aren't being poisoned. + * Element size is larger than zp_min_size in this path as elements + * that are smaller will always be zero-ed. + */ + bzero((void *) elem, zp_min_size); } } - zone->countfree--; - zone->count++; - zone->sum_count++; + return poison; +} +#else +static bool +zfree_poison_element(zone_t zone, uint32_t *zp_count, vm_offset_t elem) +{ +#pragma unused(zone, zp_count, elem) + assert(!zone->percpu); + return false; +} +#endif -#if VM_MAX_TAG_ZONES - if (__improbable(zone->tags)) { - // set the tag with b0 clear so the block remains inuse - ZTAG(zone, element)[0] = (tag << 1); +__attribute__((always_inline)) +static bool +zfree_clear(zone_t zone, vm_offset_t addr, vm_size_t elem_size) +{ + assert(zone->zfree_clear_mem); + if (zone->percpu) { + zpercpu_foreach_cpu(i) { + bzero((void *)(addr + ptoa(i)), elem_size); + } + } else { + bzero((void *)addr, elem_size); } -#endif /* VM_MAX_TAG_ZONES */ - - -#if KASAN_ZALLOC - kasan_poison_range(element, zone->elem_size, ASAN_VALID); -#endif - return element; + return true; } /* - * End of zone poisoning - */ - -/* - * Zone info options + * Zero the element if zone has zfree_clear_mem flag set else poison + * the element if zp_count hits 0. */ -#define ZINFO_SLOTS MAX_ZONES /* for now */ - -zone_t zone_find_largest(void); +__attribute__((always_inline)) +bool +zfree_clear_or_poison(zone_t zone, uint32_t *zp_count, vm_offset_t addr) +{ + vm_size_t elem_size = zone_elem_size(zone); -/* - * Async allocation of zones - * This mechanism allows for bootstrapping an empty zone which is setup with - * non-blocking flags. The first call to zalloc_noblock() will kick off a thread_call - * to zalloc_async. We perform a zalloc() (which may block) and then an immediate free. - * This will prime the zone for the next use. - * - * Currently the thread_callout function (zalloc_async) will loop through all zones - * looking for any zone with async_pending set and do the work for it. - * - * NOTE: If the calling thread for zalloc_noblock is lower priority than thread_call, - * then zalloc_noblock to an empty zone may succeed. - */ -void zalloc_async( - thread_call_param_t p0, - thread_call_param_t p1); + if (zone->zfree_clear_mem) { + return zfree_clear(zone, addr, elem_size); + } -static thread_call_data_t call_async_alloc; + return zfree_poison_element(zone, zp_count, (vm_offset_t)addr); +} /* - * Align elements that use the zone page list to 32 byte boundaries. + * Clear out the old next pointer and backup to avoid leaking the zone + * poisoning cookie and so that only values on the freelist have a valid + * cookie. */ -#define ZONE_ELEMENT_ALIGNMENT 32 +void +zone_clear_freelist_pointers(zone_t zone, vm_offset_t addr) +{ + vm_offset_t perm_value = 0; -#define zone_wakeup(zone) thread_wakeup((event_t)(zone)) -#define zone_sleep(zone) \ - (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN_ALWAYS, (event_t)(zone), THREAD_UNINT); + if (!zone->zfree_clear_mem) { + perm_value = ZONE_POISON; + } + vm_offset_t *primary = (vm_offset_t *) addr; + vm_offset_t *backup = get_backup_ptr(zone_elem_size(zone), primary); -#define lock_zone_init(zone) \ -MACRO_BEGIN \ - lck_attr_setdefault(&(zone)->lock_attr); \ - lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \ - &zone_locks_grp, &(zone)->lock_attr); \ -MACRO_END + *primary = perm_value; + *backup = perm_value; +} -#define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock) +#if ZALLOC_ENABLE_POISONING +__abortlike +static void +zone_element_not_clear_panic(zone_t zone, void *addr) +{ + panic("Zone element %p was modified after free for zone %s%s: " + "Expected element to be cleared", addr, zone_heap_name(zone), + zone->z_name); +} /* - * Exclude more than one concurrent garbage collection + * Validate that the element was not tampered with while it was in the + * freelist. */ -decl_lck_mtx_data(, zone_gc_lock); - -lck_attr_t zone_gc_lck_attr; -lck_grp_t zone_gc_lck_grp; -lck_grp_attr_t zone_gc_lck_grp_attr; -lck_mtx_ext_t zone_gc_lck_ext; - -boolean_t zone_gc_allowed = TRUE; -boolean_t panic_include_zprint = FALSE; - -mach_memory_info_t *panic_kext_memory_info = NULL; -vm_size_t panic_kext_memory_size = 0; - -#define ZALLOC_DEBUG_ZONEGC 0x00000001 -#define ZALLOC_DEBUG_ZCRAM 0x00000002 - -#if DEBUG || DEVELOPMENT -static uint32_t zalloc_debug = 0; -#endif +void +zalloc_validate_element(zone_t zone, vm_offset_t addr, vm_size_t size, bool validate) +{ + if (zone->percpu) { + assert(zone->zfree_clear_mem); + zpercpu_foreach_cpu(i) { + if (memcmp_zero_ptr_aligned((void *)(addr + ptoa(i)), size)) { + zone_element_not_clear_panic(zone, (void *)(addr + ptoa(i))); + } + } + } else if (zone->zfree_clear_mem) { + if (memcmp_zero_ptr_aligned((void *)addr, size)) { + zone_element_not_clear_panic(zone, (void *)addr); + } + } else if (__improbable(validate)) { + const vm_offset_t *p = (vm_offset_t *)addr; + const vm_offset_t *end = (vm_offset_t *)(addr + size); + + for (; p < end; p++) { + if (*p != ZONE_POISON) { + zone_element_was_modified_panic(zone, addr, + *p, ZONE_POISON, (vm_offset_t)p - addr); + } + } + } else { + /* + * If element wasn't poisoned or entirely cleared, validate that the + * minimum bytes that were cleared on free haven't been corrupted. + * addr is advanced by ptr size as we have already validated and cleared + * the freelist pointer/zcache canary. + */ + if (memcmp_zero_ptr_aligned((void *) (addr + sizeof(vm_offset_t)), + zp_min_size - sizeof(vm_offset_t))) { + zone_element_not_clear_panic(zone, (void *)addr); + } + } +} +#endif /* ZALLOC_ENABLE_POISONING */ + +#pragma mark Zone Leak Detection /* * Zone leak debugging code @@ -1593,24 +2043,31 @@ static uint32_t zalloc_debug = 0; * corrupted to examine its history. This should lead to the source of the corruption. */ -static boolean_t log_records_init = FALSE; -static int log_records; /* size of the log, expressed in number of records */ - -#define MAX_NUM_ZONES_ALLOWED_LOGGING 10 /* Maximum 10 zones can be logged at once */ - -static int max_num_zones_to_log = MAX_NUM_ZONES_ALLOWED_LOGGING; -static int num_zones_logged = 0; +/* Returns TRUE if we rolled over the counter at factor */ +__header_always_inline bool +sample_counter(volatile uint32_t *count_p, uint32_t factor) +{ + uint32_t old_count, new_count = 0; + if (count_p != NULL) { + os_atomic_rmw_loop(count_p, old_count, new_count, relaxed, { + new_count = old_count + 1; + if (new_count >= factor) { + new_count = 0; + } + }); + } -static char zone_name_to_log[MAX_ZONE_NAME] = ""; /* the zone name we're logging, if any */ + return new_count == 0; +} +#if ZONE_ENABLE_LOGGING /* Log allocations and frees to help debug a zone element corruption */ -boolean_t corruption_debug_flag = DEBUG; /* enabled by "-zc" boot-arg */ -/* Making pointer scanning leaks detection possible for all zones */ +TUNABLE(bool, corruption_debug_flag, "-zc", false); -#if DEBUG || DEVELOPMENT -boolean_t leak_scan_debug_flag = FALSE; /* enabled by "-zl" boot-arg */ -#endif /* DEBUG || DEVELOPMENT */ +#define MAX_NUM_ZONES_ALLOWED_LOGGING 10 /* Maximum 10 zones can be logged at once */ +static int max_num_zones_to_log = MAX_NUM_ZONES_ALLOWED_LOGGING; +static int num_zones_logged = 0; /* * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to @@ -1618,76 +2075,140 @@ boolean_t leak_scan_debug_flag = FALSE; /* enabled by "-zl" boot-ar * is the number of stacks suspected of leaking, we don't need many records. */ -#if defined(__LP64__) +#if defined(__LP64__) #define ZRECORDS_MAX 2560 /* Max records allowed in the log */ #else #define ZRECORDS_MAX 1536 /* Max records allowed in the log */ #endif #define ZRECORDS_DEFAULT 1024 /* default records in log if zrecs is not specificed in boot-args */ -/* - * Each record in the log contains a pointer to the zone element it refers to, - * and a small array to hold the pc's from the stack trace. A - * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging, - * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees. - * If the log fills, old records are replaced as if it were a circular buffer. - */ +static TUNABLE(uint32_t, log_records, "zrecs", ZRECORDS_DEFAULT); +static void +zone_enable_logging(zone_t z) +{ + z->zlog_btlog = btlog_create(log_records, MAX_ZTRACE_DEPTH, + (corruption_debug_flag == FALSE) /* caller_will_remove_entries_for_element? */); -/* - * Decide if we want to log this zone by doing a string compare between a zone name and the name - * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not - * possible to include spaces in strings passed in via the boot-args, a period in the logname will - * match a space in the zone name. - */ + if (z->zlog_btlog) { + printf("zone: logging started for zone %s%s\n", + zone_heap_name(z), z->z_name); + } else { + printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n"); + z->zone_logging = false; + } +} -int -track_this_zone(const char *zonename, const char *logname) +/** + * @function zone_setup_logging + * + * @abstract + * Optionally sets up a zone for logging. + * + * @discussion + * We recognized two boot-args: + * + * zlog= + * zrecs= + * + * The zlog arg is used to specify the zone name that should be logged, + * and zrecs is used to control the size of the log. + * + * If zrecs is not specified, a default value is used. + */ +static void +zone_setup_logging(zone_t z) { - unsigned int len; - const char *zc = zonename; - const char *lc = logname; + char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */ + char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */ + char zlog_val[MAX_ZONE_NAME]; /* the zone name we're logging, if any */ /* - * Compare the strings. We bound the compare by MAX_ZONE_NAME. + * Don't allow more than ZRECORDS_MAX records even if the user asked for more. + * + * This prevents accidentally hogging too much kernel memory + * and making the system unusable. */ + if (log_records > ZRECORDS_MAX) { + log_records = ZRECORDS_MAX; + } - for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) { - /* - * If the current characters don't match, check for a space in - * in the zone name and a corresponding period in the log name. - * If that's not there, then the strings don't match. - */ + /* + * Append kalloc heap name to zone name (if zone is used by kalloc) + */ + snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name); - if (*zc != *lc && !(*zc == ' ' && *lc == '.')) { + /* zlog0 isn't allowed. */ + for (int i = 1; i <= max_num_zones_to_log; i++) { + snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i); + + if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val)) && + track_this_zone(zone_name, zlog_val)) { + z->zone_logging = true; + num_zones_logged++; break; } + } - /* - * The strings are equal so far. If we're at the end, then it's a match. - */ - - if (*zc == '\0') { - return TRUE; - } + /* + * Backwards compat. with the old boot-arg used to specify single zone + * logging i.e. zlog Needs to happen after the newer zlogn checks + * because the prefix will match all the zlogn + * boot-args. + */ + if (!z->zone_logging && + PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val)) && + track_this_zone(zone_name, zlog_val)) { + z->zone_logging = true; + num_zones_logged++; } - return FALSE; + + /* + * If we want to log a zone, see if we need to allocate buffer space for + * the log. + * + * Some vm related zones are zinit'ed before we can do a kmem_alloc, so + * we have to defer allocation in that case. + * + * zone_init() will finish the job. + * + * If we want to log one of the VM related zones that's set up early on, + * we will skip allocation of the log until zinit is called again later + * on some other zone. + */ + if (z->zone_logging && startup_phase >= STARTUP_SUB_KMEM_ALLOC) { + zone_enable_logging(z); + } } +/* + * Each record in the log contains a pointer to the zone element it refers to, + * and a small array to hold the pc's from the stack trace. A + * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging, + * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees. + * If the log fills, old records are replaced as if it were a circular buffer. + */ + + +/* + * Decide if we want to log this zone by doing a string compare between a zone name and the name + * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not + * possible to include spaces in strings passed in via the boot-args, a period in the logname will + * match a space in the zone name. + */ /* * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and * the buffer for the records has been allocated. */ -#define DO_LOGGING(z) (z->zone_logging == TRUE && z->zlog_btlog) - -extern boolean_t kmem_alloc_ready; +#define DO_LOGGING(z) (z->zlog_btlog != NULL) +#else /* !ZONE_ENABLE_LOGGING */ +#define DO_LOGGING(z) 0 +#endif /* !ZONE_ENABLE_LOGGING */ #if CONFIG_ZLEAKS -#pragma mark - -#pragma mark Zone Leak Detection /* * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding @@ -1733,8 +2254,6 @@ unsigned int z_trace_recorded = 0; /* Times zleak_log returned false due to not being able to acquire the lock */ unsigned int z_total_conflicts = 0; - -#pragma mark struct zallocation /* * Structure for keeping track of an allocation * An allocation bucket is in use if its element is not NULL @@ -1761,14 +2280,13 @@ static struct ztrace* ztraces; struct ztrace* top_ztrace; /* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */ -static lck_spin_t zleak_lock; -static lck_attr_t zleak_lock_attr; -static lck_grp_t zleak_lock_grp; -static lck_grp_attr_t zleak_lock_grp_attr; +LCK_GRP_DECLARE(zleak_lock_grp, "zleak_lock"); +LCK_SPIN_DECLARE(zleak_lock, &zleak_lock_grp); /* * Initializes the zone leak monitor. Called from zone_init() */ +__startup_func static void zleak_init(vm_size_t max_zonemap_size) { @@ -1821,19 +2339,11 @@ zleak_init(vm_size_t max_zonemap_size) } } - /* allocate the zleak_lock */ - lck_grp_attr_setdefault(&zleak_lock_grp_attr); - lck_grp_init(&zleak_lock_grp, "zleak_lock", &zleak_lock_grp_attr); - lck_attr_setdefault(&zleak_lock_attr); - lck_spin_init(&zleak_lock, &zleak_lock_grp, &zleak_lock_attr); - if (zleak_enable_flag) { zleak_state = ZLEAK_STATE_ENABLED; } } -#if CONFIG_ZLEAKS - /* * Support for kern.zleak.active sysctl - a simplified * version of the zleak_state variable. @@ -1850,9 +2360,6 @@ get_zleak_state(void) return 0; } -#endif - - kern_return_t zleak_activate(void) { @@ -2050,6 +2557,7 @@ zleak_log(uintptr_t* bt, * Free the allocation record and release the stacktrace. * This should be as fast as possible because it will be called for every free. */ +__attribute__((noinline)) static void zleak_free(uintptr_t addr, vm_size_t allocation_size) @@ -2156,420 +2664,690 @@ hashaddr(uintptr_t pt, uint32_t max_size) } /* End of all leak-detection code */ -#pragma mark - - -#define ZONE_MAX_ALLOC_SIZE (32 * 1024) -#define ZONE_ALLOC_FRAG_PERCENT(alloc_size, ele_size) (((alloc_size % ele_size) * 100) / alloc_size) +#pragma mark zone creation, configuration, destruction -/* Used to manage copying in of new zone names */ -static vm_offset_t zone_names_start; -static vm_offset_t zone_names_next; - -static vm_size_t -compute_element_size(vm_size_t requested_size) +static zone_t +zone_init_defaults(zone_id_t zid) { - vm_size_t element_size = requested_size; + zone_t z = &zone_array[zid]; - /* Zone elements must fit both a next pointer and a backup pointer */ - vm_size_t minimum_element_size = sizeof(vm_offset_t) * 2; - if (element_size < minimum_element_size) { - element_size = minimum_element_size; - } + z->page_count_max = ~0u; + z->collectable = true; + z->expandable = true; + z->submap_idx = Z_SUBMAP_IDX_GENERAL_MAP; - /* - * Round element size to a multiple of sizeof(pointer) - * This also enforces that allocations will be aligned on pointer boundaries - */ - element_size = ((element_size - 1) + sizeof(vm_offset_t)) - - ((element_size - 1) % sizeof(vm_offset_t)); + simple_lock_init(&z->lock, 0); - return element_size; + return z; } -#if KASAN_ZALLOC +static bool +zone_is_initializing(zone_t z) +{ + return !z->z_self && !z->destroyed; +} -/* - * Called from zinit(). - * - * Fixes up the zone's element size to incorporate the redzones. - */ static void -kasan_update_element_size_for_redzone( - zone_t zone, /* the zone that needs to be updated */ - vm_size_t *size, /* requested zone element size */ - vm_size_t *max, /* maximum memory to use */ - const char *name) /* zone name */ -{ - /* Expand the zone allocation size to include the redzones. For page-multiple - * zones add a full guard page because they likely require alignment. kalloc - * and fakestack handles its own KASan state, so ignore those zones. */ - /* XXX: remove this when zinit_with_options() is a thing */ - const char *kalloc_name = "kalloc."; - const char *fakestack_name = "fakestack."; - if (strncmp(name, kalloc_name, strlen(kalloc_name)) == 0) { - zone->kasan_redzone = 0; - } else if (strncmp(name, fakestack_name, strlen(fakestack_name)) == 0) { - zone->kasan_redzone = 0; +zone_set_max(zone_t z, vm_size_t max) +{ +#if KASAN_ZALLOC + if (z->kasan_redzone) { + /* + * Adjust the max memory for the kasan redzones + */ + max += (max / z->pcpu_elem_size) * z->kasan_redzone * 2; + } +#endif + if (max < z->percpu ? 1 : z->alloc_pages) { + max = z->percpu ? 1 : z->alloc_pages; } else { - if ((*size % PAGE_SIZE) != 0) { - zone->kasan_redzone = KASAN_GUARD_SIZE; - } else { - zone->kasan_redzone = PAGE_SIZE; - } - *max = (*max / *size) * (*size + zone->kasan_redzone * 2); - *size += zone->kasan_redzone * 2; + max = atop(round_page(max)); } + z->page_count_max = max; } -/* - * Called from zalloc_internal() to fix up the address of the newly - * allocated element. - * - * Returns the element address skipping over the redzone on the left. - */ -static vm_offset_t -kasan_fixup_allocated_element_address( - zone_t zone, /* the zone the element belongs to */ - vm_offset_t addr) /* address of the element, including the redzone */ +void +zone_set_submap_idx(zone_t zone, unsigned int sub_map_idx) { - /* Fixup the return address to skip the redzone */ - if (zone->kasan_redzone) { - addr = kasan_alloc(addr, zone->elem_size, - zone->elem_size - 2 * zone->kasan_redzone, zone->kasan_redzone); + if (!zone_is_initializing(zone)) { + panic("%s: called after zone_create()", __func__); } - return addr; + if (sub_map_idx > zone_last_submap_idx) { + panic("zone_set_submap_idx(%d) > %d", sub_map_idx, zone_last_submap_idx); + } + zone->submap_idx = sub_map_idx; } -/* - * Called from zfree() to add the element being freed to the KASan quarantine. - * - * Returns true if the newly-freed element made it into the quarantine without - * displacing another, false otherwise. In the latter case, addrp points to the - * address of the displaced element, which will be freed by the zone. - */ -static bool -kasan_quarantine_freed_element( - zone_t *zonep, /* the zone the element is being freed to */ - void **addrp) /* address of the element being freed */ +void +zone_set_noexpand( + zone_t zone, + vm_size_t max) { - zone_t zone = *zonep; - void *addr = *addrp; - - /* - * Resize back to the real allocation size and hand off to the KASan - * quarantine. `addr` may then point to a different allocation, if the - * current element replaced another in the quarantine. The zone then - * takes ownership of the swapped out free element. - */ - vm_size_t usersz = zone->elem_size - 2 * zone->kasan_redzone; - vm_size_t sz = usersz; - - if (addr && zone->kasan_redzone) { - kasan_check_free((vm_address_t)addr, usersz, KASAN_HEAP_ZALLOC); - addr = (void *)kasan_dealloc((vm_address_t)addr, &sz); - assert(sz == zone->elem_size); - } - if (addr && zone->kasan_quarantine) { - kasan_free(&addr, &sz, KASAN_HEAP_ZALLOC, zonep, usersz, true); - if (!addr) { - return TRUE; - } + if (!zone_is_initializing(zone)) { + panic("%s: called after zone_create()", __func__); } - *addrp = addr; - return FALSE; + zone->expandable = false; + zone_set_max(zone, max); } -#endif /* KASAN_ZALLOC */ +void +zone_set_exhaustible( + zone_t zone, + vm_size_t max) +{ + if (!zone_is_initializing(zone)) { + panic("%s: called after zone_create()", __func__); + } + zone->expandable = false; + zone->exhaustible = true; + zone_set_max(zone, max); +} -/* - * zinit initializes a new zone. The zone data structures themselves - * are stored in a zone, which is initially a static structure that - * is initialized by zone_init. +/** + * @function zone_create_find + * + * @abstract + * Finds an unused zone for the given name and element size. + * + * @param name the zone name + * @param size the element size (including redzones, ...) + * @param flags the flags passed to @c zone_create* + * @param zid the desired zone ID or ZONE_ID_ANY + * + * @returns a zone to initialize further. */ - -zone_t -zinit( - vm_size_t size, /* the size of an element */ - vm_size_t max, /* maximum memory to use */ - vm_size_t alloc, /* allocation size */ - const char *name) /* a name for the zone */ +static zone_t +zone_create_find( + const char *name, + vm_size_t size, + zone_create_flags_t flags, + zone_id_t zid) { - zone_t z; - - size = compute_element_size(size); + zone_id_t nzones; + zone_t z; simple_lock(&all_zones_lock, &zone_locks_grp); - assert(num_zones < MAX_ZONES); - assert(num_zones_in_use <= num_zones); - - /* If possible, find a previously zdestroy'ed zone in the zone_array that we can reuse instead of initializing a new zone. */ - for (int index = bitmap_first(zone_empty_bitmap, MAX_ZONES); - index >= 0 && index < (int)num_zones; - index = bitmap_next(zone_empty_bitmap, index)) { - z = &(zone_array[index]); + nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed); + assert(num_zones_in_use <= nzones && nzones < MAX_ZONES); + if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) { /* - * If the zone name and the element size are the same, we can just reuse the old zone struct. - * Otherwise hand out a new zone from the zone_array. + * The first time around, make sure the reserved zone IDs + * have an initialized lock as zone_index_foreach() will + * enumerate them. */ - if (!strcmp(z->zone_name, name)) { - vm_size_t old_size = z->elem_size; -#if KASAN_ZALLOC - old_size -= z->kasan_redzone * 2; -#endif - if (old_size == size) { - /* Clear the empty bit for this zone, increment num_zones_in_use, and mark the zone as valid again. */ - bitmap_clear(zone_empty_bitmap, index); - num_zones_in_use++; - z->zone_valid = TRUE; - z->zone_destruction = FALSE; - - /* All other state is already set up since the zone was previously in use. Return early. */ - simple_unlock(&all_zones_lock); - return z; - } + while (nzones < ZONE_ID__FIRST_DYNAMIC) { + zone_init_defaults(nzones++); } - } - /* If we're here, it means we didn't find a zone above that we could simply reuse. Set up a new zone. */ + os_atomic_store(&num_zones, nzones, release); + } - /* Clear the empty bit for the new zone */ - bitmap_clear(zone_empty_bitmap, num_zones); + if (zid != ZONE_ID_ANY) { + if (zid >= ZONE_ID__FIRST_DYNAMIC) { + panic("zone_create: invalid desired zone ID %d for %s", + zid, name); + } + if (flags & ZC_DESTRUCTIBLE) { + panic("zone_create: ID %d (%s) must be permanent", zid, name); + } + if (zone_array[zid].z_self) { + panic("zone_create: creating zone ID %d (%s) twice", zid, name); + } + z = &zone_array[zid]; + } else { + if (flags & ZC_DESTRUCTIBLE) { + /* + * If possible, find a previously zdestroy'ed zone in the + * zone_array that we can reuse. + */ + for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES); + i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) { + z = &zone_array[i]; - z = &(zone_array[num_zones]); - z->index = num_zones; + /* + * If the zone name and the element size are the + * same, we can just reuse the old zone struct. + */ + if (strcmp(z->z_name, name) || zone_elem_size(z) != size) { + continue; + } + bitmap_clear(zone_destroyed_bitmap, i); + z->destroyed = false; + z->z_self = z; + zid = (zone_id_t)i; + goto out; + } + } - num_zones++; - num_zones_in_use++; + zid = nzones++; + z = zone_init_defaults(zid); - /* - * Initialize the zone lock here before dropping the all_zones_lock. Otherwise we could race with - * zalloc_async() and try to grab the zone lock before it has been initialized, causing a panic. - */ - lock_zone_init(z); + /* + * The release barrier pairs with the acquire in + * zone_index_foreach() and makes sure that enumeration loops + * always see an initialized zone lock. + */ + os_atomic_store(&num_zones, nzones, release); + } +out: + num_zones_in_use++; simple_unlock(&all_zones_lock); -#if KASAN_ZALLOC - kasan_update_element_size_for_redzone(z, &size, &max, name); -#endif - - max = round_page(max); - - vm_size_t best_alloc = PAGE_SIZE; + return z; +} - if ((size % PAGE_SIZE) == 0) { - /* zero fragmentation by definition */ - best_alloc = size; - } else { - vm_size_t alloc_size; - for (alloc_size = (2 * PAGE_SIZE); alloc_size <= ZONE_MAX_ALLOC_SIZE; alloc_size += PAGE_SIZE) { - if (ZONE_ALLOC_FRAG_PERCENT(alloc_size, size) < ZONE_ALLOC_FRAG_PERCENT(best_alloc, size)) { - best_alloc = alloc_size; - } - } +__abortlike +static void +zone_create_panic(const char *name, const char *f1, const char *f2) +{ + panic("zone_create: creating zone %s: flag %s and %s are incompatible", + name, f1, f2); +} +#define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \ + if ((flags) & forbidden_flag) { \ + zone_create_panic(name, #current_flag, #forbidden_flag); \ } - alloc = best_alloc; - if (max && (max < alloc)) { - max = alloc; - } - - z->free_elements = NULL; - queue_init(&z->pages.any_free_foreign); - queue_init(&z->pages.all_free); - queue_init(&z->pages.intermediate); - queue_init(&z->pages.all_used); - z->cur_size = 0; - z->page_count = 0; - z->max_size = max; - z->elem_size = size; - z->alloc_size = alloc; - z->count = 0; - z->countfree = 0; - z->count_all_free_pages = 0; - z->sum_count = 0LL; - z->doing_alloc_without_vm_priv = FALSE; - z->doing_alloc_with_vm_priv = FALSE; - z->exhaustible = FALSE; - z->collectable = TRUE; - z->allows_foreign = FALSE; - z->expandable = TRUE; - z->waiting = FALSE; - z->async_pending = FALSE; - z->caller_acct = TRUE; - z->noencrypt = FALSE; - z->no_callout = FALSE; - z->async_prio_refill = FALSE; - z->gzalloc_exempt = FALSE; - z->alignment_required = FALSE; - z->zone_replenishing = FALSE; - z->prio_refill_count = 0; - z->zone_replenish_thread = NULL; - z->zp_count = 0; - z->kasan_quarantine = TRUE; - z->zone_valid = TRUE; - z->zone_destruction = FALSE; - z->cpu_cache_enabled = FALSE; - z->clear_memory = FALSE; - -#if CONFIG_ZLEAKS - z->zleak_capture = 0; - z->zleak_on = FALSE; -#endif /* CONFIG_ZLEAKS */ - +/* + * Adjusts the size of the element based on minimum size, alignment + * and kasan redzones + */ +static vm_size_t +zone_elem_adjust_size( + const char *name __unused, + vm_size_t elem_size, + zone_create_flags_t flags, + vm_size_t *redzone __unused) +{ + vm_size_t size; /* - * If the VM is ready to handle kmem_alloc requests, copy the zone name passed in. - * - * Else simply maintain a pointer to the name string. The only zones we'll actually have - * to do this for would be the VM-related zones that are created very early on before any - * kexts can be loaded (unloaded). So we should be fine with just a pointer in this case. + * Adjust element size for minimum size and pointer alignment */ - if (kmem_alloc_ready) { - size_t len = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN); - - if (zone_names_start == 0 || ((zone_names_next - zone_names_start) + len) > PAGE_SIZE) { - printf("zalloc: allocating memory for zone names buffer\n"); - kern_return_t retval = kmem_alloc_kobject(kernel_map, &zone_names_start, - PAGE_SIZE, VM_KERN_MEMORY_OSFMK); - if (retval != KERN_SUCCESS) { - panic("zalloc: zone_names memory allocation failed"); - } - bzero((char *)zone_names_start, PAGE_SIZE); - zone_names_next = zone_names_start; - } - - strlcpy((char *)zone_names_next, name, len); - z->zone_name = (char *)zone_names_next; - zone_names_next += len; - } else { - z->zone_name = name; + size = (elem_size + sizeof(vm_offset_t) - 1) & -sizeof(vm_offset_t); + if (((flags & ZC_PERCPU) == 0) && size < ZONE_MIN_ELEM_SIZE) { + size = ZONE_MIN_ELEM_SIZE; } +#if KASAN_ZALLOC /* - * Check for and set up zone leak detection if requested via boot-args. We recognized two - * boot-args: - * - * zlog= - * zrecs= + * Expand the zone allocation size to include the redzones. * - * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to - * control the size of the log. If zrecs is not specified, a default value is used. + * For page-multiple zones add a full guard page because they + * likely require alignment. */ - - if (num_zones_logged < max_num_zones_to_log) { - int i = 1; /* zlog0 isn't allowed. */ - boolean_t zone_logging_enabled = FALSE; - char zlog_name[MAX_ZONE_NAME] = ""; /* Temp. buffer to create the strings zlog1, zlog2 etc... */ - - while (i <= max_num_zones_to_log) { - snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i); - - if (PE_parse_boot_argn(zlog_name, zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) { - if (track_this_zone(z->zone_name, zone_name_to_log)) { - if (z->zone_valid) { - z->zone_logging = TRUE; - zone_logging_enabled = TRUE; - num_zones_logged++; - break; - } - } - } - i++; + vm_size_t redzone_tmp; + if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU)) { + redzone_tmp = 0; + } else if ((size & PAGE_MASK) == 0) { + if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) { + panic("zone_create: zone %s can't provide more than PAGE_SIZE" + "alignment", name); } - - if (zone_logging_enabled == FALSE) { - /* - * Backwards compat. with the old boot-arg used to specify single zone logging i.e. zlog - * Needs to happen after the newer zlogn checks because the prefix will match all the zlogn - * boot-args. - */ - if (PE_parse_boot_argn("zlog", zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) { - if (track_this_zone(z->zone_name, zone_name_to_log)) { - if (z->zone_valid) { - z->zone_logging = TRUE; - zone_logging_enabled = TRUE; - num_zones_logged++; - } - } + redzone_tmp = PAGE_SIZE; + } else if (flags & ZC_ALIGNMENT_REQUIRED) { + redzone_tmp = 0; + } else { + redzone_tmp = KASAN_GUARD_SIZE; + } + size += redzone_tmp * 2; + if (redzone) { + *redzone = redzone_tmp; + } +#endif + return size; +} + +/* + * Returns the allocation chunk size that has least framentation + */ +static vm_size_t +zone_get_min_alloc_granule( + vm_size_t elem_size, + zone_create_flags_t flags) +{ + vm_size_t alloc_granule = PAGE_SIZE; + if (flags & ZC_PERCPU) { + alloc_granule = PAGE_SIZE * zpercpu_count(); + if (PAGE_SIZE % elem_size > 256) { + panic("zone_create: per-cpu zone has too much fragmentation"); + } + } else if ((elem_size & PAGE_MASK) == 0) { + /* zero fragmentation by definition */ + alloc_granule = elem_size; + } else if (alloc_granule % elem_size == 0) { + /* zero fragmentation by definition */ + } else { + vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule; + vm_size_t alloc_tmp = PAGE_SIZE; + while ((alloc_tmp += PAGE_SIZE) <= ZONE_MAX_ALLOC_SIZE) { + vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp; + if (frag_tmp < frag) { + frag = frag_tmp; + alloc_granule = alloc_tmp; } } + } + return alloc_granule; +} - if (log_records_init == FALSE && zone_logging_enabled == TRUE) { - if (PE_parse_boot_argn("zrecs", &log_records, sizeof(log_records)) == TRUE) { - /* - * Don't allow more than ZRECORDS_MAX records even if the user asked for more. - * This prevents accidentally hogging too much kernel memory and making the system - * unusable. - */ +vm_size_t +zone_get_foreign_alloc_size( + const char *name __unused, + vm_size_t elem_size, + zone_create_flags_t flags, + uint16_t min_pages) +{ + vm_size_t adjusted_size = zone_elem_adjust_size(name, elem_size, flags, + NULL); + vm_size_t alloc_granule = zone_get_min_alloc_granule(adjusted_size, + flags); + vm_size_t min_size = min_pages * PAGE_SIZE; + /* + * Round up min_size to a multiple of alloc_granule + */ + return ((min_size + alloc_granule - 1) / alloc_granule) + * alloc_granule; +} - log_records = MIN(ZRECORDS_MAX, log_records); - log_records_init = TRUE; - } else { - log_records = ZRECORDS_DEFAULT; - log_records_init = TRUE; - } - } +zone_t +zone_create_ext( + const char *name, + vm_size_t size, + zone_create_flags_t flags, + zone_id_t desired_zid, + void (^extra_setup)(zone_t)) +{ + vm_size_t alloc; + vm_size_t redzone; + zone_t z; + + if (size > ZONE_MAX_ALLOC_SIZE) { + panic("zone_create: element size too large: %zd", (size_t)size); + } + + size = zone_elem_adjust_size(name, size, flags, &redzone); + /* + * Allocate the zone slot, return early if we found an older match. + */ + z = zone_create_find(name, size, flags, desired_zid); + if (__improbable(z->z_self)) { + /* We found a zone to reuse */ + return z; + } + + /* + * Initialize the zone properly. + */ + + /* + * If the kernel is post lockdown, copy the zone name passed in. + * Else simply maintain a pointer to the name string as it can only + * be a core XNU zone (no unloadable kext exists before lockdown). + */ + if (startup_phase >= STARTUP_SUB_LOCKDOWN) { + size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN); + char *buf = zalloc_permanent(nsz, ZALIGN_NONE); + strlcpy(buf, name, nsz); + z->z_name = buf; + } else { + z->z_name = name; + } + /* + * If zone_init() hasn't run yet, the permanent zones do not exist. + * We can limp along without properly initialized stats for a while, + * zone_init() will rebuild the missing stats when it runs. + */ + if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) { + z->z_stats = zalloc_percpu_permanent_type(struct zone_stats); + } + + alloc = zone_get_min_alloc_granule(size, flags); + + if (flags & ZC_KALLOC_HEAP) { + size_t rem = (alloc % size) / (alloc / size); /* - * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are - * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. kmem_alloc_ready is set to - * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one - * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again - * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized - * right now. + * Try to grow the elements size and spread them more if the remaining + * space is large enough. */ - if (kmem_alloc_ready) { - zone_t curr_zone = NULL; - unsigned int max_zones = 0, zone_idx = 0; + size += rem & ~(KALLOC_MINALIGN - 1); + } - simple_lock(&all_zones_lock, &zone_locks_grp); - max_zones = num_zones; - simple_unlock(&all_zones_lock); + z->pcpu_elem_size = z->z_elem_size = (uint16_t)size; + z->alloc_pages = (uint16_t)atop(alloc); +#if KASAN_ZALLOC + z->kasan_redzone = redzone; + if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) { + z->kasan_fakestacks = true; + } +#endif - for (zone_idx = 0; zone_idx < max_zones; zone_idx++) { - curr_zone = &(zone_array[zone_idx]); + /* + * Handle KPI flags + */ +#if __LP64__ + if (flags & ZC_SEQUESTER) { + z->va_sequester = true; + } +#endif + /* ZC_CACHING applied after all configuration is done */ - if (!curr_zone->zone_valid) { - continue; - } + if (flags & ZC_PERCPU) { + /* + * ZC_CACHING is disallowed because it uses per-cpu zones for its + * implementation and it would be circular. These allocations are + * also quite expensive, so caching feels dangerous memory wise too. + * + * ZC_ZFREE_CLEARMEM is forced because per-cpu zones allow for + * pointer-sized allocations which poisoning doesn't support. + */ + zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_CACHING); + zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_ALLOW_FOREIGN); + z->percpu = true; + z->gzalloc_exempt = true; + z->zfree_clear_mem = true; + z->pcpu_elem_size *= zpercpu_count(); + } + if (flags & ZC_ZFREE_CLEARMEM) { + z->zfree_clear_mem = true; + } + if (flags & ZC_NOGC) { + z->collectable = false; + } + if (flags & ZC_NOENCRYPT) { + z->noencrypt = true; + } + if (flags & ZC_ALIGNMENT_REQUIRED) { + z->alignment_required = true; + } + if (flags & ZC_NOGZALLOC) { + z->gzalloc_exempt = true; + } + if (flags & ZC_NOCALLOUT) { + z->no_callout = true; + } + if (flags & ZC_DESTRUCTIBLE) { + zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_CACHING); + zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_ALLOW_FOREIGN); + z->destructible = true; + } - /* - * We work with the zone unlocked here because we could end up needing the zone lock to - * enable logging for this zone e.g. need a VM object to allocate memory to enable logging for the - * VM objects zone. - * - * We don't expect these zones to be needed at this early a time in boot and so take this chance. - */ - if (curr_zone->zone_logging && curr_zone->zlog_btlog == NULL) { - curr_zone->zlog_btlog = btlog_create(log_records, MAX_ZTRACE_DEPTH, (corruption_debug_flag == FALSE) /* caller_will_remove_entries_for_element? */); - - if (curr_zone->zlog_btlog) { - printf("zone: logging started for zone %s\n", curr_zone->zone_name); - } else { - printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n"); - curr_zone->zone_logging = FALSE; - } - } - } - } + /* + * Handle Internal flags + */ + if (flags & ZC_ALLOW_FOREIGN) { + z->allows_foreign = true; + } + if ((ZSECURITY_OPTIONS_SUBMAP_USER_DATA & zsecurity_options) && + (flags & ZC_DATA_BUFFERS)) { + z->submap_idx = Z_SUBMAP_IDX_BAG_OF_BYTES_MAP; + } + if (flags & ZC_KASAN_NOQUARANTINE) { + z->kasan_noquarantine = true; + } + /* ZC_KASAN_NOREDZONE already handled */ + + /* + * Then if there's extra tuning, do it + */ + if (extra_setup) { + extra_setup(z); } -#if CONFIG_GZALLOC - gzalloc_zone_init(z); + /* + * Configure debugging features + */ +#if CONFIG_GZALLOC + gzalloc_zone_init(z); /* might set z->gzalloc_tracked */ +#endif +#if ZONE_ENABLE_LOGGING + if (!z->gzalloc_tracked && num_zones_logged < max_num_zones_to_log) { + /* + * Check for and set up zone leak detection if requested via boot-args. + * might set z->zone_logging + */ + zone_setup_logging(z); + } +#endif /* ZONE_ENABLE_LOGGING */ +#if VM_MAX_TAG_ZONES + if (!z->gzalloc_tracked && z->kalloc_heap && zone_tagging_on) { + static int tag_zone_index; + vm_offset_t esize = zone_elem_size(z); + z->tags = true; + z->tags_inline = (((page_size + esize - 1) / esize) <= + (sizeof(uint32_t) / sizeof(uint16_t))); + z->tag_zone_index = os_atomic_inc_orig(&tag_zone_index, relaxed); + assert(z->tag_zone_index < VM_MAX_TAG_ZONES); + } #endif -#if CONFIG_ZCACHE - /* Check if boot-arg specified it should have a cache */ - if (cache_all_zones || track_this_zone(name, cache_zone_name)) { - zone_change(z, Z_CACHING_ENABLED, TRUE); + /* + * Finally, fixup properties based on security policies, boot-args, ... + */ + if ((ZSECURITY_OPTIONS_SUBMAP_USER_DATA & zsecurity_options) && + z->kalloc_heap == KHEAP_ID_DATA_BUFFERS) { + z->submap_idx = Z_SUBMAP_IDX_BAG_OF_BYTES_MAP; + } +#if __LP64__ + if ((ZSECURITY_OPTIONS_SEQUESTER & zsecurity_options) && + (flags & ZC_NOSEQUESTER) == 0 && + z->submap_idx == Z_SUBMAP_IDX_GENERAL_MAP) { + z->va_sequester = true; } #endif + /* + * Always clear zone elements smaller than a cacheline, + * because it's pretty close to free. + */ + if (size <= zp_min_size) { + z->zfree_clear_mem = true; + } + if (zp_factor != 0 && !z->zfree_clear_mem) { + z->zp_count = zone_poison_count_init(z); + } + +#if CONFIG_ZCACHE + if ((flags & ZC_NOCACHING) == 0) { + /* + * Append kalloc heap name to zone name (if zone is used by kalloc) + */ + char temp_zone_name[MAX_ZONE_NAME] = ""; + snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name); + + /* Check if boot-arg specified it should have a cache */ + if (track_this_zone(temp_zone_name, cache_zone_name)) { + flags |= ZC_CACHING; + } else if (zcc_kalloc && z->kalloc_heap) { + flags |= ZC_CACHING; + } + } + if ((flags & ZC_CACHING) && + !z->tags && !z->zone_logging && !z->gzalloc_tracked) { + zcache_init(z); + } +#endif /* CONFIG_ZCACHE */ + + lock_zone(z); + z->z_self = z; + unlock_zone(z); + + return z; +} + +__startup_func +void +zone_create_startup(struct zone_create_startup_spec *spec) +{ + *spec->z_var = zone_create_ext(spec->z_name, spec->z_size, + spec->z_flags, spec->z_zid, spec->z_setup); +} + +/* + * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t + * union works. trust but verify. + */ +#define zalloc_check_zov_alias(f1, f2) \ + static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2)) +zalloc_check_zov_alias(z_self, zv_zone); +zalloc_check_zov_alias(z_stats, zv_stats); +zalloc_check_zov_alias(z_name, zv_name); +zalloc_check_zov_alias(z_views, zv_next); +#undef zalloc_check_zov_alias + +__startup_func +void +zone_view_startup_init(struct zone_view_startup_spec *spec) +{ + struct kalloc_heap *heap = NULL; + zone_view_t zv = spec->zv_view; + zone_t z; + + switch (spec->zv_heapid) { + case KHEAP_ID_DEFAULT: + heap = KHEAP_DEFAULT; + break; + case KHEAP_ID_DATA_BUFFERS: + heap = KHEAP_DATA_BUFFERS; + break; + case KHEAP_ID_KEXT: + heap = KHEAP_KEXT; + break; + default: + heap = NULL; + } + + if (heap) { + z = kalloc_heap_zone_for_size(heap, spec->zv_size); + assert(z); + } else { + z = spec->zv_zone; + assert(spec->zv_size <= zone_elem_size(z)); + } + + zv->zv_zone = z; + zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats); + zv->zv_next = z->z_views; + if (z->z_views == NULL && z->kalloc_heap == KHEAP_ID_NONE) { + /* + * count the raw view for zones not in a heap, + * kalloc_heap_init() already counts it for its members. + */ + zone_view_count += 2; + } else { + zone_view_count += 1; + } + z->z_views = zv; +} + +zone_t +zone_create( + const char *name, + vm_size_t size, + zone_create_flags_t flags) +{ + return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL); +} +zone_t +zinit( + vm_size_t size, /* the size of an element */ + vm_size_t max, /* maximum memory to use */ + vm_size_t alloc __unused, /* allocation size */ + const char *name) /* a name for the zone */ +{ + zone_t z = zone_create(name, size, ZC_DESTRUCTIBLE); + zone_set_max(z, max); return z; } +void +zdestroy(zone_t z) +{ + unsigned int zindex = zone_index(z); + + lock_zone(z); + + if (!z->destructible || zone_caching_enabled(z) || z->allows_foreign) { + panic("zdestroy: Zone %s%s isn't destructible", + zone_heap_name(z), z->z_name); + } + + if (!z->z_self || z->expanding_no_vm_priv || z->expanding_vm_priv || + z->async_pending || z->waiting) { + panic("zdestroy: Zone %s%s in an invalid state for destruction", + zone_heap_name(z), z->z_name); + } + +#if !KASAN_ZALLOC + /* + * Unset the valid bit. We'll hit an assert failure on further operations + * on this zone, until zinit() is called again. + * + * Leave the zone valid for KASan as we will see zfree's on quarantined free + * elements even after the zone is destroyed. + */ + z->z_self = NULL; +#endif + z->destroyed = true; + unlock_zone(z); + + /* Dump all the free elements */ + zone_drop_free_elements(z); + +#if CONFIG_GZALLOC + if (__improbable(z->gzalloc_tracked)) { + /* If the zone is gzalloc managed dump all the elements in the free cache */ + gzalloc_empty_free_cache(z); + } +#endif + + lock_zone(z); + + while (!zone_pva_is_null(z->pages_sequester)) { + struct zone_page_metadata *page_meta; + vm_offset_t free_addr; + + page_meta = zone_sequestered_page_get(z, &free_addr); + unlock_zone(z); + kmem_free(submap_for_zone(z), free_addr, ptoa(z->alloc_pages)); + lock_zone(z); + } + +#if !KASAN_ZALLOC + /* Assert that all counts are zero */ + if (z->countavail || z->countfree || zone_size_wired(z) || + z->allfree_page_count || z->sequester_page_count) { + panic("zdestroy: Zone %s%s isn't empty at zdestroy() time", + zone_heap_name(z), z->z_name); + } + + /* consistency check: make sure everything is indeed empty */ + assert(zone_pva_is_null(z->pages_any_free_foreign)); + assert(zone_pva_is_null(z->pages_all_used_foreign)); + assert(zone_pva_is_null(z->pages_all_free)); + assert(zone_pva_is_null(z->pages_intermediate)); + assert(zone_pva_is_null(z->pages_all_used)); + assert(zone_pva_is_null(z->pages_sequester)); +#endif + + unlock_zone(z); + + simple_lock(&all_zones_lock, &zone_locks_grp); + + assert(!bitmap_test(zone_destroyed_bitmap, zindex)); + /* Mark the zone as empty in the bitmap */ + bitmap_set(zone_destroyed_bitmap, zindex); + num_zones_in_use--; + assert(num_zones_in_use > 0); + + simple_unlock(&all_zones_lock); +} + +#pragma mark zone (re)fill, jetsam + /* * Dealing with zone allocations from the mach VM code. * @@ -2597,7 +3375,7 @@ zinit( * as the refill size on all platforms. * * When a refill zone drops to half that available, i.e. REFILL_SIZE / 2, - * zalloc_internal() will wake the replenish thread. The replenish thread runs + * zalloc_ext() will wake the replenish thread. The replenish thread runs * until at least REFILL_SIZE worth of free elements exist, before sleeping again. * In the meantime threads may continue to use the reserve until there are only REFILL_SIZE / 4 * elements left. Below that point only the replenish threads themselves and the GC @@ -2609,97 +3387,198 @@ static unsigned zone_replenish_wakeups_initiated; static unsigned zone_replenish_throttle_count; #define ZONE_REPLENISH_TARGET (16 * 1024) -static unsigned int zone_replenish_active = 0; /* count of zones currently replenishing */ -static unsigned int zone_replenish_max_threads = 0; -static lck_spin_t zone_replenish_lock; -static lck_attr_t zone_replenish_lock_attr; -static lck_grp_t zone_replenish_lock_grp; -static lck_grp_attr_t zone_replenish_lock_grp_attr; +static unsigned zone_replenish_active = 0; /* count of zones currently replenishing */ +static unsigned zone_replenish_max_threads = 0; -static void zone_replenish_thread(zone_t); +LCK_GRP_DECLARE(zone_replenish_lock_grp, "zone_replenish_lock"); +LCK_SPIN_DECLARE(zone_replenish_lock, &zone_replenish_lock_grp); -/* High priority VM privileged thread used to asynchronously refill a designated - * zone, such as the reserved VM map entry zone. - */ -__dead2 +__abortlike static void -zone_replenish_thread(zone_t z) +zone_replenish_panic(zone_t zone, kern_return_t kr) { - current_thread()->options |= (TH_OPT_VMPRIV | TH_OPT_ZONE_PRIV); + panic_include_zprint = TRUE; +#if CONFIG_ZLEAKS + if ((zleak_state & ZLEAK_STATE_ACTIVE)) { + panic_include_ztrace = TRUE; + } +#endif /* CONFIG_ZLEAKS */ + if (kr == KERN_NO_SPACE) { + zone_t zone_largest = zone_find_largest(); + panic("zalloc: zone map exhausted while allocating from zone %s%s, " + "likely due to memory leak in zone %s%s " + "(%lu total bytes, %d elements allocated)", + zone_heap_name(zone), zone->z_name, + zone_heap_name(zone_largest), zone_largest->z_name, + (unsigned long)zone_size_wired(zone_largest), + zone_count_allocated(zone_largest)); + } + panic("zalloc: %s%s (%d elements) retry fail %d", + zone_heap_name(zone), zone->z_name, + zone_count_allocated(zone), kr); +} + +static void +zone_replenish_locked(zone_t z, zalloc_flags_t flags, bool asynchronously) +{ + int kmaflags = KMA_KOBJECT | KMA_ZERO; + vm_offset_t space, alloc_size; + uint32_t retry = 0; + kern_return_t kr; + + if (z->noencrypt) { + kmaflags |= KMA_NOENCRYPT; + } + if (flags & Z_NOPAGEWAIT) { + kmaflags |= KMA_NOPAGEWAIT; + } + if (z->permanent) { + kmaflags |= KMA_PERMANENT; + } for (;;) { - lock_zone(z); - assert(z->zone_valid); - assert(z->zone_replenishing); - assert(z->prio_refill_count != 0); - while ((z->cur_size / z->elem_size) - z->count < z->prio_refill_count) { - assert(z->doing_alloc_without_vm_priv == FALSE); - assert(z->doing_alloc_with_vm_priv == FALSE); - assert(z->async_prio_refill == TRUE); + struct zone_page_metadata *page_meta = NULL; - unlock_zone(z); - int zflags = KMA_KOBJECT | KMA_NOPAGEWAIT; - vm_offset_t space, alloc_size; - kern_return_t kr; + /* + * Try to allocate our regular chunk of pages, + * unless the system is under massive pressure + * and we're looking for more than 2 pages. + */ + if (!z->percpu && z->alloc_pages > 2 && (vm_pool_low() || retry > 0)) { + alloc_size = round_page(zone_elem_size(z)); + } else { + alloc_size = ptoa(z->alloc_pages); + page_meta = zone_sequestered_page_get(z, &space); + } - if (vm_pool_low()) { - alloc_size = round_page(z->elem_size); - } else { - alloc_size = z->alloc_size; - } + unlock_zone(z); - if (z->noencrypt) { - zflags |= KMA_NOENCRYPT; +#if CONFIG_ZLEAKS + /* + * Do the zone leak activation here because zleak_activate() + * may block, and can't be done on the way out. + */ + if (__improbable(zleak_state & ZLEAK_STATE_ENABLED)) { + if (!(zleak_state & ZLEAK_STATE_ACTIVE) && + zone_submaps_approx_size() >= zleak_global_tracking_threshold) { + kr = zleak_activate(); + if (kr != KERN_SUCCESS) { + printf("Failed to activate live zone leak debugging (%d).\n", kr); + } } + } +#endif /* CONFIG_ZLEAKS */ - if (z->clear_memory) { - zflags |= KMA_ZERO; - } + /* + * Trigger jetsams via the vm_pageout_garbage_collect thread if + * we're running out of zone memory + */ + if (is_zone_map_nearing_exhaustion()) { + thread_wakeup((event_t) &vm_pageout_garbage_collect); + } - /* Trigger jetsams via the vm_pageout_garbage_collect thread if we're running out of zone memory */ - if (is_zone_map_nearing_exhaustion()) { - thread_wakeup((event_t) &vm_pageout_garbage_collect); + if (page_meta) { + kr = zone_sequestered_page_populate(z, page_meta, space, + alloc_size, kmaflags); + } else { + if (z->submap_idx == Z_SUBMAP_IDX_GENERAL_MAP && z->kalloc_heap != KHEAP_ID_NONE) { + kmaflags |= KMA_KHEAP; } + kr = kernel_memory_allocate(submap_for_zone(z), + &space, alloc_size, 0, kmaflags, VM_KERN_MEMORY_ZONE); + } - kr = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE); - - if (kr == KERN_SUCCESS) { - zcram(z, space, alloc_size); - } else if (kr == KERN_RESOURCE_SHORTAGE) { - VM_PAGE_WAIT(); - } else if (kr == KERN_NO_SPACE) { - kr = kernel_memory_allocate(kernel_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE); - if (kr == KERN_SUCCESS) { - zcram(z, space, alloc_size); - } else { - assert_wait_timeout(&z->zone_replenish_thread, THREAD_UNINT, 1, 100 * NSEC_PER_USEC); - thread_block(THREAD_CONTINUE_NULL); - } - } +#if !__LP64__ + if (kr == KERN_NO_SPACE && z->allows_foreign) { + /* + * For zones allowing foreign pages, fallback to the kernel map + */ + kr = kernel_memory_allocate(kernel_map, &space, + alloc_size, 0, kmaflags, VM_KERN_MEMORY_ZONE); + } +#endif + + if (kr == KERN_SUCCESS) { + break; + } + if (flags & Z_NOPAGEWAIT) { lock_zone(z); - assert(z->zone_valid); - zone_replenish_loops++; + return; } - /* Wakeup any potentially throttled allocations. */ - thread_wakeup(z); + if (asynchronously) { + assert_wait_timeout(&z->prio_refill_count, + THREAD_UNINT, 1, 100 * NSEC_PER_USEC); + thread_block(THREAD_CONTINUE_NULL); + } else if (++retry == 3) { + zone_replenish_panic(z, kr); + } - assert_wait(&z->zone_replenish_thread, THREAD_UNINT); + lock_zone(z); + } - /* - * We finished refilling the zone, so decrement the active count - * and wake up any waiting GC threads. - */ - lck_spin_lock(&zone_replenish_lock); + zcram_and_lock(z, space, alloc_size); + +#if CONFIG_ZLEAKS + if (__improbable(zleak_state & ZLEAK_STATE_ACTIVE)) { + if (!z->zleak_on && + zone_size_wired(z) >= zleak_per_zone_tracking_threshold) { + z->zleak_on = true; + } + } +#endif /* CONFIG_ZLEAKS */ +} + +/* + * High priority VM privileged thread used to asynchronously refill a given zone. + * These are needed for data structures used by the lower level VM itself. The + * replenish thread maintains a reserve of elements, so that the VM will never + * block in the zone allocator. + */ +__dead2 +static void +zone_replenish_thread(void *_z, wait_result_t __unused wr) +{ + zone_t z = _z; + + current_thread()->options |= (TH_OPT_VMPRIV | TH_OPT_ZONE_PRIV); + + for (;;) { + lock_zone(z); + assert(z->z_self == z); + assert(z->zone_replenishing); + assert(z->prio_refill_count != 0); + + while (z->countfree < z->prio_refill_count) { + assert(!z->expanding_no_vm_priv); + assert(!z->expanding_vm_priv); + + zone_replenish_locked(z, Z_WAITOK, true); + + assert(z->z_self == z); + zone_replenish_loops++; + } + + /* Wakeup any potentially throttled allocations. */ + thread_wakeup(z); + + assert_wait(&z->prio_refill_count, THREAD_UNINT); + + /* + * We finished refilling the zone, so decrement the active count + * and wake up any waiting GC threads. + */ + lck_spin_lock(&zone_replenish_lock); assert(zone_replenish_active > 0); if (--zone_replenish_active == 0) { thread_wakeup((event_t)&zone_replenish_active); } lck_spin_unlock(&zone_replenish_lock); - z->zone_replenishing = FALSE; + z->zone_replenishing = false; unlock_zone(z); + thread_block(THREAD_CONTINUE_NULL); zone_replenish_wakeups++; } @@ -2708,255 +3587,225 @@ zone_replenish_thread(zone_t z) void zone_prio_refill_configure(zone_t z) { - z->prio_refill_count = ZONE_REPLENISH_TARGET / z->elem_size; + thread_t th; + kern_return_t tres; + + lock_zone(z); + assert(!z->prio_refill_count && !z->destructible); + z->prio_refill_count = (uint16_t)(ZONE_REPLENISH_TARGET / zone_elem_size(z)); + z->zone_replenishing = true; + unlock_zone(z); - z->async_prio_refill = TRUE; - z->zone_replenishing = TRUE; lck_spin_lock(&zone_replenish_lock); ++zone_replenish_max_threads; ++zone_replenish_active; lck_spin_unlock(&zone_replenish_lock); OSMemoryBarrier(); - kern_return_t tres = kernel_thread_start_priority((thread_continue_t)zone_replenish_thread, z, MAXPRI_KERNEL, &z->zone_replenish_thread); + tres = kernel_thread_start_priority(zone_replenish_thread, z, + MAXPRI_KERNEL, &th); if (tres != KERN_SUCCESS) { panic("zone_prio_refill_configure, thread create: 0x%x", tres); } - thread_deallocate(z->zone_replenish_thread); -} - -void -zdestroy(zone_t z) -{ - unsigned int zindex; - - assert(z != NULL); - - lock_zone(z); - assert(z->zone_valid); - - /* Assert that the zone does not have any allocations in flight */ - assert(z->doing_alloc_without_vm_priv == FALSE); - assert(z->doing_alloc_with_vm_priv == FALSE); - assert(z->async_pending == FALSE); - assert(z->waiting == FALSE); - assert(z->async_prio_refill == FALSE); - -#if !KASAN_ZALLOC - /* - * Unset the valid bit. We'll hit an assert failure on further operations on this zone, until zinit() is called again. - * Leave the zone valid for KASan as we will see zfree's on quarantined free elements even after the zone is destroyed. - */ - z->zone_valid = FALSE; -#endif - z->zone_destruction = TRUE; - unlock_zone(z); - -#if CONFIG_ZCACHE - /* Drain the per-cpu caches if caching is enabled for the zone. */ - if (zone_caching_enabled(z)) { - panic("zdestroy: Zone caching enabled for zone %s", z->zone_name); - } -#endif /* CONFIG_ZCACHE */ - - /* Dump all the free elements */ - drop_free_elements(z); - -#if CONFIG_GZALLOC - /* If the zone is gzalloc managed dump all the elements in the free cache */ - gzalloc_empty_free_cache(z); -#endif - - lock_zone(z); - -#if !KASAN_ZALLOC - /* Assert that all counts are zero */ - assert(z->count == 0); - assert(z->countfree == 0); - assert(z->cur_size == 0); - assert(z->page_count == 0); - assert(z->count_all_free_pages == 0); - - /* Assert that all queues except the foreign queue are empty. The zone allocator doesn't know how to free up foreign memory. */ - assert(queue_empty(&z->pages.all_used)); - assert(queue_empty(&z->pages.intermediate)); - assert(queue_empty(&z->pages.all_free)); -#endif - - zindex = z->index; - - unlock_zone(z); - - simple_lock(&all_zones_lock, &zone_locks_grp); - - assert(!bitmap_test(zone_empty_bitmap, zindex)); - /* Mark the zone as empty in the bitmap */ - bitmap_set(zone_empty_bitmap, zindex); - num_zones_in_use--; - assert(num_zones_in_use > 0); - - simple_unlock(&all_zones_lock); + thread_deallocate(th); } -/* Initialize the metadata for an allocation chunk */ -static inline void -zcram_metadata_init(vm_offset_t newmem, vm_size_t size, struct zone_page_metadata *chunk_metadata) +static void +zone_randomize_freelist(zone_t zone, struct zone_page_metadata *meta, + vm_offset_t size, zone_addr_kind_t kind, unsigned int *entropy_buffer) { - struct zone_page_metadata *page_metadata; + const vm_size_t elem_size = zone_elem_size(zone); + vm_offset_t left, right, head, base; + vm_offset_t element; + + left = ZONE_PAGE_FIRST_OFFSET(kind); + right = size - ((size - left) % elem_size); + head = 0; + base = zone_meta_to_addr(meta, kind); + + while (left < right) { + if (zone_leaks_scan_enable || __improbable(zone->tags) || + random_bool_gen_bits(&zone_bool_gen, entropy_buffer, MAX_ENTROPY_PER_ZCRAM, 1)) { + element = base + left; + left += elem_size; + } else { + right -= elem_size; + element = base + right; + } - /* The first page is the real metadata for this allocation chunk. We mark the others as fake metadata */ - size -= PAGE_SIZE; - newmem += PAGE_SIZE; + vm_offset_t *primary = (vm_offset_t *)element; + vm_offset_t *backup = get_backup_ptr(elem_size, primary); - for (; size > 0; newmem += PAGE_SIZE, size -= PAGE_SIZE) { - page_metadata = get_zone_page_metadata((struct zone_free_element *)newmem, TRUE); - assert(page_metadata != chunk_metadata); - PAGE_METADATA_SET_ZINDEX(page_metadata, MULTIPAGE_METADATA_MAGIC); - page_metadata_set_realmeta(page_metadata, chunk_metadata); - page_metadata->free_count = 0; + *primary = *backup = head ^ zp_nopoison_cookie; + head = element; } - return; -} - -static void -random_free_to_zone( - zone_t zone, - vm_offset_t newmem, - vm_offset_t first_element_offset, - int element_count, - unsigned int *entropy_buffer) -{ - vm_offset_t last_element_offset; - vm_offset_t element_addr; - vm_size_t elem_size; - int index; - - assert(element_count && element_count <= ZONE_CHUNK_MAXELEMENTS); - elem_size = zone->elem_size; - last_element_offset = first_element_offset + ((element_count * elem_size) - elem_size); - for (index = 0; index < element_count; index++) { - assert(first_element_offset <= last_element_offset); - if ( -#if DEBUG || DEVELOPMENT - leak_scan_debug_flag || __improbable(zone->tags) || -#endif /* DEBUG || DEVELOPMENT */ - random_bool_gen_bits(&zone_bool_gen, entropy_buffer, MAX_ENTROPY_PER_ZCRAM, 1)) { - element_addr = newmem + first_element_offset; - first_element_offset += elem_size; - } else { - element_addr = newmem + last_element_offset; - last_element_offset -= elem_size; - } - if (element_addr != (vm_offset_t)zone) { - zone->count++; /* compensate for free_to_zone */ - free_to_zone(zone, element_addr, FALSE); - } - zone->cur_size += elem_size; - } + meta->zm_freelist_offs = (uint16_t)(head - base); } /* * Cram the given memory into the specified zone. Update the zone page count accordingly. */ -void -zcram( - zone_t zone, - vm_offset_t newmem, - vm_size_t size) +static void +zcram_and_lock(zone_t zone, vm_offset_t newmem, vm_size_t size) { - vm_size_t elem_size; - boolean_t from_zm = FALSE; - int element_count; unsigned int entropy_buffer[MAX_ENTROPY_PER_ZCRAM] = { 0 }; + struct zone_page_metadata *meta; + zone_addr_kind_t kind; + uint32_t pg_count = (uint32_t)atop(size); + uint32_t zindex = zone_index(zone); + uint32_t free_count; + uint16_t empty_freelist_offs = PAGE_METADATA_EMPTY_FREELIST; /* Basic sanity checks */ assert(zone != ZONE_NULL && newmem != (vm_offset_t)0); - assert(!zone->collectable || zone->allows_foreign - || (from_zone_map(newmem, size))); - - elem_size = zone->elem_size; - - KDBG(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_START, zone->index, size); - - if (from_zone_map(newmem, size)) { - from_zm = TRUE; - } + assert((newmem & PAGE_MASK) == 0); + assert((size & PAGE_MASK) == 0); - if (!from_zm) { - /* We cannot support elements larger than page size for foreign memory because we - * put metadata on the page itself for each page of foreign memory. We need to do - * this in order to be able to reach the metadata when any element is freed - */ - assert((zone->allows_foreign == TRUE) && (zone->elem_size <= (PAGE_SIZE - sizeof(struct zone_page_metadata)))); - } + KDBG(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_START, + zindex, size); + kind = zone_addr_kind(newmem, size); #if DEBUG || DEVELOPMENT if (zalloc_debug & ZALLOC_DEBUG_ZCRAM) { - kprintf("zcram(%p[%s], 0x%lx%s, 0x%lx)\n", zone, zone->zone_name, - (unsigned long)newmem, from_zm ? "" : "[F]", (unsigned long)size); + kprintf("zcram(%p[%s%s], 0x%lx%s, 0x%lx)\n", zone, + zone_heap_name(zone), zone->z_name, (uintptr_t)newmem, + kind == ZONE_ADDR_FOREIGN ? "[F]" : "", (uintptr_t)size); } #endif /* DEBUG || DEVELOPMENT */ - ZONE_PAGE_COUNT_INCR(zone, (size / PAGE_SIZE)); - /* * Initialize the metadata for all pages. We dont need the zone lock * here because we are not manipulating any zone related state yet. + * + * This includes randomizing the freelists as the metadata isn't + * published yet. */ - struct zone_page_metadata *chunk_metadata; - size_t zone_page_metadata_size = sizeof(struct zone_page_metadata); + if (kind == ZONE_ADDR_NATIVE) { + /* + * We're being called by zfill, + * zone_replenish_thread or vm_page_more_fictitious, + * + * which will only either allocate a single page, or `alloc_pages` + * worth. + */ + assert(pg_count <= zone->alloc_pages); - assert((newmem & PAGE_MASK) == 0); - assert((size & PAGE_MASK) == 0); + /* + * Make sure the range of metadata entries we're about to init + * have proper physical backing, then initialize them. + */ + meta = zone_meta_from_addr(newmem, kind); + zone_meta_populate(meta, meta + pg_count); + + if (zone->permanent) { + empty_freelist_offs = 0; + } + + meta[0] = (struct zone_page_metadata){ + .zm_index = zindex, + .zm_page_count = pg_count, + .zm_percpu = zone->percpu, + .zm_freelist_offs = empty_freelist_offs, + }; + + for (uint32_t i = 1; i < pg_count; i++) { + meta[i] = (struct zone_page_metadata){ + .zm_index = zindex, + .zm_page_count = i, + .zm_percpu = zone->percpu, + .zm_secondary_page = true, + .zm_freelist_offs = empty_freelist_offs, + }; + } - chunk_metadata = get_zone_page_metadata((struct zone_free_element *)newmem, TRUE); - chunk_metadata->pages.next = NULL; - chunk_metadata->pages.prev = NULL; - page_metadata_set_freelist(chunk_metadata, 0); - PAGE_METADATA_SET_ZINDEX(chunk_metadata, zone->index); - chunk_metadata->free_count = 0; - assert((size / PAGE_SIZE) <= ZONE_CHUNK_MAXPAGES); - chunk_metadata->page_count = (unsigned)(size / PAGE_SIZE); + if (!zone->permanent) { + zone_randomize_freelist(zone, meta, + zone->percpu ? PAGE_SIZE : size, kind, entropy_buffer); + } + } else { + if (!zone->allows_foreign || !from_foreign_range(newmem, size)) { + panic("zcram_and_lock: foreign memory [%lx] being crammed is " + "outside of foreign range", (uintptr_t)newmem); + } - zcram_metadata_init(newmem, size, chunk_metadata); + /* + * We cannot support elements larger than page size for foreign + * memory because we put metadata on the page itself for each + * page of foreign memory. + * + * We need to do this in order to be able to reach the metadata + * when any element is freed. + */ + assert(!zone->percpu && !zone->permanent); + assert(zone_elem_size(zone) <= PAGE_SIZE - sizeof(struct zone_page_metadata)); + + bzero((void *)newmem, size); + + for (vm_offset_t offs = 0; offs < size; offs += PAGE_SIZE) { + meta = (struct zone_page_metadata *)(newmem + offs); + *meta = (struct zone_page_metadata){ + .zm_index = zindex, + .zm_page_count = 1, + .zm_freelist_offs = empty_freelist_offs, + }; + meta->zm_foreign_cookie[0] = ZONE_FOREIGN_COOKIE; + zone_randomize_freelist(zone, meta, PAGE_SIZE, kind, + entropy_buffer); + } + } #if VM_MAX_TAG_ZONES if (__improbable(zone->tags)) { - assert(from_zm); + assert(kind == ZONE_ADDR_NATIVE && !zone->percpu); ztMemoryAdd(zone, newmem, size); } #endif /* VM_MAX_TAG_ZONES */ + /* + * Insert the initialized pages / metadatas into the right lists. + */ + lock_zone(zone); - assert(zone->zone_valid); - enqueue_tail(&zone->pages.all_used, &(chunk_metadata->pages)); + assert(zone->z_self == zone); - if (!from_zm) { - /* We cannot support elements larger than page size for foreign memory because we - * put metadata on the page itself for each page of foreign memory. We need to do - * this in order to be able to reach the metadata when any element is freed - */ + zone->page_count += pg_count; + if (zone->page_count_hwm < zone->page_count) { + zone->page_count_hwm = zone->page_count; + } + os_atomic_add(&zones_phys_page_count, pg_count, relaxed); - for (; size > 0; newmem += PAGE_SIZE, size -= PAGE_SIZE) { - vm_offset_t first_element_offset = 0; - if (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT == 0) { - first_element_offset = zone_page_metadata_size; - } else { - first_element_offset = zone_page_metadata_size + (ZONE_ELEMENT_ALIGNMENT - (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT)); - } - element_count = (unsigned int)((PAGE_SIZE - first_element_offset) / elem_size); - random_free_to_zone(zone, newmem, first_element_offset, element_count, entropy_buffer); + if (kind == ZONE_ADDR_NATIVE) { + os_atomic_add(&zones_phys_page_mapped_count, pg_count, relaxed); + if (zone->permanent) { + zone_meta_queue_push(zone, &zone->pages_intermediate, meta, kind); + } else { + zone_meta_queue_push(zone, &zone->pages_all_free, meta, kind); + zone->allfree_page_count += meta->zm_page_count; } + free_count = zone_elem_count(zone, size, kind); + zone->countfree += free_count; + zone->countavail += free_count; } else { - element_count = (unsigned int)(size / elem_size); - random_free_to_zone(zone, newmem, 0, element_count, entropy_buffer); + free_count = zone_elem_count(zone, PAGE_SIZE, kind); + for (vm_offset_t offs = 0; offs < size; offs += PAGE_SIZE) { + meta = (struct zone_page_metadata *)(newmem + offs); + zone_meta_queue_push(zone, &zone->pages_any_free_foreign, meta, kind); + zone->countfree += free_count; + zone->countavail += free_count; + } } - unlock_zone(zone); - KDBG(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_END, zone->index); + KDBG(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_END, zindex); +} + +void +zcram(zone_t zone, vm_offset_t newmem, vm_size_t size) +{ + zcram_and_lock(zone, newmem, size); + unlock_zone(zone); } /* @@ -2971,120 +3820,66 @@ zfill( int nelem) { kern_return_t kr; - vm_offset_t memory; + vm_offset_t memory; - vm_size_t alloc_size = zone->alloc_size; - vm_size_t elem_per_alloc = alloc_size / zone->elem_size; - vm_size_t nalloc = (nelem + elem_per_alloc - 1) / elem_per_alloc; - int zflags = KMA_KOBJECT; + vm_size_t alloc_size = ptoa(zone->alloc_pages); + vm_size_t nalloc_inc = zone_elem_count(zone, alloc_size, ZONE_ADDR_NATIVE); + vm_size_t nalloc = 0, goal = MAX(0, nelem); + int kmaflags = KMA_KOBJECT | KMA_ZERO; - if (zone->clear_memory) { - zflags |= KMA_ZERO; + if (zone->noencrypt) { + kmaflags |= KMA_NOENCRYPT; } - /* Don't mix-and-match zfill with foreign memory */ - assert(!zone->allows_foreign); + assert(!zone->allows_foreign && !zone->permanent); - /* Trigger jetsams via the vm_pageout_garbage_collect thread if we're running out of zone memory */ + /* + * Trigger jetsams via the vm_pageout_garbage_collect thread if we're + * running out of zone memory + */ if (is_zone_map_nearing_exhaustion()) { thread_wakeup((event_t) &vm_pageout_garbage_collect); } - kr = kernel_memory_allocate(zone_map, &memory, nalloc * alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE); - if (kr != KERN_SUCCESS) { - printf("%s: kernel_memory_allocate() of %lu bytes failed\n", - __func__, (unsigned long)(nalloc * alloc_size)); - return 0; - } - - for (vm_size_t i = 0; i < nalloc; i++) { - zcram(zone, memory + i * alloc_size, alloc_size); - } - - return (int)(nalloc * elem_per_alloc); -} - -/* - * Initialize the "zone of zones" which uses fixed memory allocated - * earlier in memory initialization. zone_bootstrap is called - * before zone_init. - */ -void -zone_bootstrap(void) -{ - char temp_buf[16]; - -#if DEBUG || DEVELOPMENT - if (!PE_parse_boot_argn("zalloc_debug", &zalloc_debug, sizeof(zalloc_debug))) { - zalloc_debug = 0; - } -#endif /* DEBUG || DEVELOPMENT */ + if (zone->va_sequester) { + lock_zone(zone); - /* Set up zone element poisoning */ - zp_init(); + do { + struct zone_page_metadata *page_meta; + page_meta = zone_sequestered_page_get(zone, &memory); + if (NULL == page_meta) { + break; + } + unlock_zone(zone); - random_bool_init(&zone_bool_gen); + kr = zone_sequestered_page_populate(zone, page_meta, + memory, alloc_size, kmaflags); + if (KERN_SUCCESS != kr) { + goto out_nolock; + } - /* should zlog log to debug zone corruption instead of leaks? */ - if (PE_parse_boot_argn("-zc", temp_buf, sizeof(temp_buf))) { - corruption_debug_flag = TRUE; - } + zcram_and_lock(zone, memory, alloc_size); + nalloc += nalloc_inc; + } while (nalloc < goal); -#if DEBUG || DEVELOPMENT - /* should perform zone element size checking in copyin/copyout? */ - if (PE_parse_boot_argn("-no-copyio-zalloc-check", temp_buf, sizeof(temp_buf))) { - copyio_zalloc_check = FALSE; - } -#if VM_MAX_TAG_ZONES - /* enable tags for zones that ask for */ - if (PE_parse_boot_argn("-zt", temp_buf, sizeof(temp_buf))) { - zone_tagging_on = TRUE; - } -#endif /* VM_MAX_TAG_ZONES */ - /* disable element location randomization in a page */ - if (PE_parse_boot_argn("-zl", temp_buf, sizeof(temp_buf))) { - leak_scan_debug_flag = TRUE; + unlock_zone(zone); } -#endif - simple_lock_init(&all_zones_lock, 0); - - num_zones_in_use = 0; - num_zones = 0; - /* Mark all zones as empty */ - bitmap_full(zone_empty_bitmap, BITMAP_LEN(MAX_ZONES)); - zone_names_next = zone_names_start = 0; - -#if DEBUG || DEVELOPMENT - simple_lock_init(&zone_test_lock, 0); -#endif /* DEBUG || DEVELOPMENT */ - - thread_call_setup(&call_async_alloc, zalloc_async, NULL); - - /* initializing global lock group for zones */ - lck_grp_attr_setdefault(&zone_locks_grp_attr); - lck_grp_init(&zone_locks_grp, "zone_locks", &zone_locks_grp_attr); - - lck_attr_setdefault(&zone_metadata_lock_attr); - lck_mtx_init_ext(&zone_metadata_region_lck, &zone_metadata_region_lck_ext, &zone_locks_grp, &zone_metadata_lock_attr); - - lck_grp_attr_setdefault(&zone_replenish_lock_grp_attr); - lck_grp_init(&zone_replenish_lock_grp, "zone_replenish_lock", &zone_replenish_lock_grp_attr); - lck_attr_setdefault(&zone_replenish_lock_attr); - lck_spin_init(&zone_replenish_lock, &zone_replenish_lock_grp, &zone_replenish_lock_attr); +out_nolock: + while (nalloc < goal) { + kr = kernel_memory_allocate(submap_for_zone(zone), &memory, + alloc_size, 0, kmaflags, VM_KERN_MEMORY_ZONE); + if (kr != KERN_SUCCESS) { + printf("%s: kernel_memory_allocate() of %lu bytes failed\n", + __func__, (unsigned long)(nalloc * alloc_size)); + break; + } -#if CONFIG_ZCACHE - /* zcc_enable_for_zone_name=: enable per-cpu zone caching for . */ - if (PE_parse_boot_arg_str("zcc_enable_for_zone_name", cache_zone_name, sizeof(cache_zone_name))) { - printf("zcache: caching enabled for zone %s\n", cache_zone_name); + zcram(zone, memory, alloc_size); + nalloc += nalloc_inc; } - /* -zcache_all: enable per-cpu zone caching for all zones, overrides 'zcc_enable_for_zone_name'. */ - if (PE_parse_boot_argn("-zcache_all", temp_buf, sizeof(temp_buf))) { - cache_all_zones = TRUE; - printf("zcache: caching enabled for all zones\n"); - } -#endif /* CONFIG_ZCACHE */ + return (int)nalloc; } /* @@ -3097,47 +3892,38 @@ zone_bootstrap(void) * Trigger zone-map-exhaustion jetsams if the zone map is X% full, where X=zone_map_jetsam_limit. * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default. */ -unsigned int zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT; - -/* - * Returns pid of the task with the largest number of VM map entries. - */ -extern pid_t find_largest_process_vm_map_entries(void); - -/* - * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills. - * For any other pid we try to kill that process synchronously. - */ -boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid); +TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit", + ZONE_MAP_JETSAM_LIMIT_DEFAULT); void get_zone_map_size(uint64_t *current_size, uint64_t *capacity) { - *current_size = zone_map->size; - *capacity = vm_map_max(zone_map) - vm_map_min(zone_map); + vm_offset_t phys_pages = os_atomic_load(&zones_phys_page_mapped_count, relaxed); + *current_size = ptoa_64(phys_pages); + *capacity = zone_phys_mapped_max; } void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size) { zone_t largest_zone = zone_find_largest(); - strlcpy(zone_name, largest_zone->zone_name, zone_name_len); - *zone_size = largest_zone->cur_size; + + /* + * Append kalloc heap name to zone name (if zone is used by kalloc) + */ + snprintf(zone_name, zone_name_len, "%s%s", + zone_heap_name(largest_zone), largest_zone->z_name); + + *zone_size = zone_size_wired(largest_zone); } boolean_t is_zone_map_nearing_exhaustion(void) { - uint64_t size = zone_map->size; - uint64_t capacity = vm_map_max(zone_map) - vm_map_min(zone_map); - if (size > ((capacity * zone_map_jetsam_limit) / 100)) { - return TRUE; - } - return FALSE; + vm_offset_t phys_pages = os_atomic_load(&zones_phys_page_mapped_count, relaxed); + return ptoa_64(phys_pages) > (zone_phys_mapped_max * zone_map_jetsam_limit) / 100; } -extern zone_t vm_map_entry_zone; -extern zone_t vm_object_zone; #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98 @@ -3151,27 +3937,38 @@ kill_process_in_largest_zone(void) pid_t pid = -1; zone_t largest_zone = zone_find_largest(); - printf("zone_map_exhaustion: Zone map size %lld, capacity %lld [jetsam limit %d%%]\n", (uint64_t)zone_map->size, - (uint64_t)(vm_map_max(zone_map) - vm_map_min(zone_map)), zone_map_jetsam_limit); - printf("zone_map_exhaustion: Largest zone %s, size %lu\n", largest_zone->zone_name, (uintptr_t)largest_zone->cur_size); + printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, map size %lld, capacity %lld [jetsam limit %d%%]\n", + ptoa_64(os_atomic_load(&zones_phys_page_mapped_count, relaxed)), ptoa_64(zone_phys_mapped_max), + ptoa_64(os_atomic_load(&zones_phys_page_count, relaxed)), + (uint64_t)zone_submaps_approx_size(), + (uint64_t)zone_range_size(&zone_info.zi_map_range), + zone_map_jetsam_limit); + printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone), + largest_zone->z_name, (uintptr_t)zone_size_wired(largest_zone)); /* - * We want to make sure we don't call this function from userspace. Or we could end up trying to synchronously kill the process + * We want to make sure we don't call this function from userspace. + * Or we could end up trying to synchronously kill the process * whose context we're in, causing the system to hang. */ assert(current_task() == kernel_task); /* - * If vm_object_zone is the largest, check to see if the number of elements in vm_map_entry_zone is comparable. If so, consider - * vm_map_entry_zone as the largest. This lets us target a specific process to jetsam to quickly recover from the zone map bloat. + * If vm_object_zone is the largest, check to see if the number of + * elements in vm_map_entry_zone is comparable. + * + * If so, consider vm_map_entry_zone as the largest. This lets us target + * a specific process to jetsam to quickly recover from the zone map + * bloat. */ if (largest_zone == vm_object_zone) { - unsigned int vm_object_zone_count = vm_object_zone->count; - unsigned int vm_map_entry_zone_count = vm_map_entry_zone->count; + unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone); + unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone); /* Is the VM map entries zone count >= 98% of the VM objects zone count? */ if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) { largest_zone = vm_map_entry_zone; - printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n", (uintptr_t)largest_zone->cur_size); + printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n", + (uintptr_t)zone_size_wired(largest_zone)); } } @@ -3179,330 +3976,520 @@ kill_process_in_largest_zone(void) if (largest_zone == vm_map_entry_zone) { pid = find_largest_process_vm_map_entries(); } else { - printf("zone_map_exhaustion: Nothing to do for the largest zone [%s]. Waking up memorystatus thread.\n", largest_zone->zone_name); + printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. " + "Waking up memorystatus thread.\n", zone_heap_name(largest_zone), + largest_zone->z_name); } if (!memorystatus_kill_on_zone_map_exhaustion(pid)) { printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid); } } -/* Global initialization of Zone Allocator. - * Runs after zone_bootstrap. +#pragma mark zalloc module init + +/* + * Initialize the "zone of zones" which uses fixed memory allocated + * earlier in memory initialization. zone_bootstrap is called + * before zone_init. */ +__startup_func void -zone_init( - vm_size_t max_zonemap_size) -{ - kern_return_t retval; - vm_offset_t zone_min; - vm_offset_t zone_max; - vm_offset_t zone_metadata_space; - unsigned int zone_pages; - vm_map_kernel_flags_t vmk_flags; +zone_bootstrap(void) +{ + /* Validate struct zone_page_metadata expectations */ + if ((1U << ZONE_PAGECOUNT_BITS) < + atop(ZONE_MAX_ALLOC_SIZE) * sizeof(struct zone_page_metadata)) { + panic("ZONE_PAGECOUNT_BITS is not large enough to hold page counts"); + } -#if VM_MAX_TAG_ZONES - if (zone_tagging_on) { - ztInit(max_zonemap_size, &zone_locks_grp); + /* Validate struct zone_packed_virtual_address expectations */ + static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1"); + if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) { + panic("zone_pva_t can't pack a kernel page address in 31 bits"); } -#endif - vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; - vmk_flags.vmkf_permanent = TRUE; - retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size, - FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_ZONE, - &zone_map); + zpercpu_early_count = ml_early_cpu_max_number() + 1; - if (retval != KERN_SUCCESS) { - panic("zone_init: kmem_suballoc failed"); - } - zone_max = zone_min + round_page(max_zonemap_size); + /* Set up zone element poisoning */ + zp_bootstrap(); -#if CONFIG_GZALLOC - gzalloc_init(max_zonemap_size); -#endif + random_bool_init(&zone_bool_gen); /* - * Setup garbage collection information: + * the KASAN quarantine for kalloc doesn't understand heaps + * and trips the heap confusion panics. At the end of the day, + * all these security measures are double duty with KASAN. + * + * On 32bit kernels, these protections are just too expensive. */ - zone_map_min_address = zone_min; - zone_map_max_address = zone_max; +#if !defined(__LP64__) || KASAN_ZALLOC + zsecurity_options &= ~ZSECURITY_OPTIONS_SEQUESTER; + zsecurity_options &= ~ZSECURITY_OPTIONS_SUBMAP_USER_DATA; + zsecurity_options &= ~ZSECURITY_OPTIONS_SEQUESTER_KEXT_KALLOC; +#endif - zone_pages = (unsigned int)atop_kernel(zone_max - zone_min); - zone_metadata_space = round_page(zone_pages * sizeof(struct zone_page_metadata)); - retval = kernel_memory_allocate(zone_map, &zone_metadata_region_min, zone_metadata_space, - 0, KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_OSFMK); - if (retval != KERN_SUCCESS) { - panic("zone_init: zone_metadata_region initialization failed!"); - } - zone_metadata_region_max = zone_metadata_region_min + zone_metadata_space; + thread_call_setup(&call_async_alloc, zalloc_async, NULL); -#if defined(__LP64__) - /* - * ensure that any vm_page_t that gets created from - * the vm_page zone can be packed properly (see vm_page.h - * for the packing requirements - */ - if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_metadata_region_max))) != (vm_page_t)zone_metadata_region_max) { - panic("VM_PAGE_PACK_PTR failed on zone_metadata_region_max - %p", (void *)zone_metadata_region_max); +#if CONFIG_ZCACHE + /* zcc_enable_for_zone_name=: enable per-cpu zone caching for . */ + if (PE_parse_boot_arg_str("zcc_enable_for_zone_name", cache_zone_name, sizeof(cache_zone_name))) { + printf("zcache: caching enabled for zone %s\n", cache_zone_name); } +#endif /* CONFIG_ZCACHE */ +} - if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_max_address))) != (vm_page_t)zone_map_max_address) { - panic("VM_PAGE_PACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address); - } +#if __LP64__ +#if CONFIG_EMBEDDED +#define ZONE_MAP_VIRTUAL_SIZE_LP64 (32ULL * 1024ULL * 1024 * 1024) +#else +#define ZONE_MAP_VIRTUAL_SIZE_LP64 (128ULL * 1024ULL * 1024 * 1024) #endif +#endif /* __LP64__ */ - lck_grp_attr_setdefault(&zone_gc_lck_grp_attr); - lck_grp_init(&zone_gc_lck_grp, "zone_gc", &zone_gc_lck_grp_attr); - lck_attr_setdefault(&zone_gc_lck_attr); - lck_mtx_init_ext(&zone_gc_lock, &zone_gc_lck_ext, &zone_gc_lck_grp, &zone_gc_lck_attr); +#define SINGLE_GUARD 16384 +#define MULTI_GUARD (3 * SINGLE_GUARD) -#if CONFIG_ZLEAKS - /* - * Initialize the zone leak monitor - */ - zleak_init(max_zonemap_size); -#endif /* CONFIG_ZLEAKS */ +#if __LP64__ +static inline vm_offset_t +zone_restricted_va_max(void) +{ + vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR); + vm_offset_t vm_page_max = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR); -#if VM_MAX_TAG_ZONES - if (zone_tagging_on) { - vm_allocation_zones_init(); - } + return trunc_page(MIN(compressor_max, vm_page_max)); +} #endif - int jetsam_limit_temp = 0; - if (PE_parse_boot_argn("zone_map_jetsam_limit", &jetsam_limit_temp, sizeof(jetsam_limit_temp)) && - jetsam_limit_temp > 0 && jetsam_limit_temp <= 100) { - zone_map_jetsam_limit = jetsam_limit_temp; +__startup_func +static void +zone_tunables_fixup(void) +{ + if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) { + zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT; } } +STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup); -#pragma mark - -#pragma mark zalloc_canblock +__startup_func +static vm_size_t +zone_phys_size_max(void) +{ + mach_vm_size_t zsize; + vm_size_t zsizearg; -extern boolean_t early_boot_complete; + if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) { + zsize = zsizearg * (1024ULL * 1024); + } else { + zsize = sane_size >> 2; /* Set target zone size as 1/4 of physical memory */ +#if defined(__LP64__) + zsize += zsize >> 1; +#endif /* __LP64__ */ + } -void -zalloc_poison_element(boolean_t check_poison, zone_t zone, vm_offset_t addr) -{ - vm_offset_t inner_size = zone->elem_size; - if (__improbable(check_poison && addr)) { - vm_offset_t *element_cursor = ((vm_offset_t *) addr) + 1; - vm_offset_t *backup = get_backup_ptr(inner_size, (vm_offset_t *) addr); - - for (; element_cursor < backup; element_cursor++) { - if (__improbable(*element_cursor != ZP_POISON)) { - zone_element_was_modified_panic(zone, - addr, - *element_cursor, - ZP_POISON, - ((vm_offset_t)element_cursor) - addr); - } - } + if (zsize < CONFIG_ZONE_MAP_MIN) { + zsize = CONFIG_ZONE_MAP_MIN; /* Clamp to min */ + } + if (zsize > sane_size >> 1) { + zsize = sane_size >> 1; /* Clamp to half of RAM max */ + } + if (zsizearg == 0 && zsize > ZONE_MAP_MAX) { + /* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */ + vm_size_t orig_zsize = zsize; + zsize = ZONE_MAP_MAX; + printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n", + (uintptr_t)orig_zsize, (uintptr_t)zsize); } - if (addr) { - /* - * Clear out the old next pointer and backup to avoid leaking the cookie - * and so that only values on the freelist have a valid cookie - */ + assert((vm_size_t) zsize == zsize); + return (vm_size_t)trunc_page(zsize); +} + +__startup_func +static struct zone_map_range +zone_init_allocate_va(vm_offset_t *submap_min, vm_size_t size, bool guard) +{ + struct zone_map_range r; + kern_return_t kr; - vm_offset_t *primary = (vm_offset_t *) addr; - vm_offset_t *backup = get_backup_ptr(inner_size, primary); + if (guard) { + vm_map_offset_t addr = *submap_min; + vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; - *primary = ZP_POISON; - *backup = ZP_POISON; + vmk_flags.vmkf_permanent = TRUE; + kr = vm_map_enter(kernel_map, &addr, size, 0, + VM_FLAGS_FIXED, vmk_flags, VM_KERN_MEMORY_ZONE, kernel_object, + 0, FALSE, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_DEFAULT); + *submap_min = (vm_offset_t)addr; + } else { + kr = kernel_memory_allocate(kernel_map, submap_min, size, + 0, KMA_KOBJECT | KMA_PAGEABLE | KMA_VAONLY, VM_KERN_MEMORY_ZONE); } + if (kr != KERN_SUCCESS) { + panic("zone_init_allocate_va(0x%lx:0x%zx) failed: %d", + (uintptr_t)*submap_min, (size_t)size, kr); + } + + r.min_address = *submap_min; + *submap_min += size; + r.max_address = *submap_min; + + return r; } -/* - * zalloc returns an element from the specified zone. +__startup_func +static void +zone_submap_init( + vm_offset_t *submap_min, + unsigned idx, + uint64_t zone_sub_map_numer, + uint64_t *remaining_denom, + vm_offset_t *remaining_size, + vm_size_t guard_size) +{ + vm_offset_t submap_start, submap_end; + vm_size_t submap_size; + vm_map_t submap; + kern_return_t kr; + + submap_size = trunc_page(zone_sub_map_numer * *remaining_size / + *remaining_denom); + submap_start = *submap_min; + submap_end = submap_start + submap_size; + +#if defined(__LP64__) + if (idx == Z_SUBMAP_IDX_VA_RESTRICTED_MAP) { + vm_offset_t restricted_va_max = zone_restricted_va_max(); + if (submap_end > restricted_va_max) { +#if DEBUG || DEVELOPMENT + printf("zone_init: submap[%d] clipped to %zdM of %zdM\n", idx, + (size_t)(restricted_va_max - submap_start) >> 20, + (size_t)submap_size >> 20); +#endif /* DEBUG || DEVELOPMENT */ + guard_size += submap_end - restricted_va_max; + *remaining_size -= submap_end - restricted_va_max; + submap_end = restricted_va_max; + submap_size = restricted_va_max - submap_start; + } + + vm_packing_verify_range("vm_compressor", + submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR)); + vm_packing_verify_range("vm_page", + submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR)); + } +#endif /* defined(__LP64__) */ + + vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + vmk_flags.vmkf_permanent = TRUE; + kr = kmem_suballoc(kernel_map, submap_min, submap_size, + FALSE, VM_FLAGS_FIXED, vmk_flags, + VM_KERN_MEMORY_ZONE, &submap); + if (kr != KERN_SUCCESS) { + panic("kmem_suballoc(kernel_map[%d] %p:%p) failed: %d", + idx, (void *)submap_start, (void *)submap_end, kr); + } + +#if DEBUG || DEVELOPMENT + printf("zone_init: submap[%d] %p:%p (%zuM)\n", + idx, (void *)submap_start, (void *)submap_end, + (size_t)submap_size >> 20); +#endif /* DEBUG || DEVELOPMENT */ + + zone_submaps[idx] = submap; + *submap_min = submap_end; + *remaining_size -= submap_size; + *remaining_denom -= zone_sub_map_numer; + + zone_init_allocate_va(submap_min, guard_size, true); +} + +/* Global initialization of Zone Allocator. + * Runs after zone_bootstrap. */ -static void * -zalloc_internal( - zone_t zone, - boolean_t canblock, - boolean_t nopagewait, - vm_size_t -#if !VM_MAX_TAG_ZONES - __unused -#endif - reqsize, - vm_tag_t tag) +__startup_func +static void +zone_init(void) { - vm_offset_t addr = 0; - kern_return_t retval; - uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */ - unsigned int numsaved = 0; - thread_t thr = current_thread(); - boolean_t check_poison = FALSE; - boolean_t set_doing_alloc_with_vm_priv = FALSE; - vm_size_t curr_free; - vm_size_t min_free; - vm_size_t resv_free; + vm_size_t zone_meta_size; + vm_size_t zone_map_size; + vm_size_t remaining_size; + vm_offset_t submap_min = 0; -#if CONFIG_ZLEAKS - uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */ -#endif /* CONFIG_ZLEAKS */ + if (ZSECURITY_OPTIONS_SUBMAP_USER_DATA & zsecurity_options) { + zone_last_submap_idx = Z_SUBMAP_IDX_BAG_OF_BYTES_MAP; + } else { + zone_last_submap_idx = Z_SUBMAP_IDX_GENERAL_MAP; + } + zone_phys_mapped_max = zone_phys_size_max(); + +#if __LP64__ + zone_map_size = ZONE_MAP_VIRTUAL_SIZE_LP64; +#else + zone_map_size = zone_phys_mapped_max; +#endif + zone_meta_size = round_page(atop(zone_map_size) * + sizeof(struct zone_page_metadata)); -#if KASAN /* - * KASan uses zalloc() for fakestack, which can be called anywhere. However, - * we make sure these calls can never block. + * Zone "map" setup: + * + * [ VA_RESTRICTED ] <-- LP64 only + * [ SINGLE_GUARD ] <-- LP64 only + * [ meta ] + * [ SINGLE_GUARD ] + * [ map ] \ for each extra map + * [ MULTI_GUARD ] / */ - boolean_t irq_safe = FALSE; - const char *fakestack_name = "fakestack."; - if (strncmp(zone->zone_name, fakestack_name, strlen(fakestack_name)) == 0) { - irq_safe = TRUE; - } -#elif MACH_ASSERT - /* In every other case, zalloc() from interrupt context is unsafe. */ - const boolean_t irq_safe = FALSE; + remaining_size = zone_map_size; +#if defined(__LP64__) + remaining_size -= SINGLE_GUARD; #endif + remaining_size -= zone_meta_size + SINGLE_GUARD; + remaining_size -= MULTI_GUARD * (zone_last_submap_idx - + Z_SUBMAP_IDX_GENERAL_MAP + 1); - assert(zone != ZONE_NULL); - assert(irq_safe || ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !early_boot_complete); +#if VM_MAX_TAG_ZONES + if (zone_tagging_on) { + zone_tagging_init(zone_map_size); + } +#endif -#if CONFIG_GZALLOC - addr = gzalloc_alloc(zone, canblock); + uint64_t remaining_denom = 0; + uint64_t zone_sub_map_numer[Z_SUBMAP_IDX_COUNT] = { +#ifdef __LP64__ + [Z_SUBMAP_IDX_VA_RESTRICTED_MAP] = 20, +#endif /* defined(__LP64__) */ + [Z_SUBMAP_IDX_GENERAL_MAP] = 40, + [Z_SUBMAP_IDX_BAG_OF_BYTES_MAP] = 40, + }; + + for (unsigned idx = 0; idx <= zone_last_submap_idx; idx++) { +#if DEBUG || DEVELOPMENT + char submap_name[MAX_SUBMAP_NAME]; + snprintf(submap_name, MAX_SUBMAP_NAME, "submap%d", idx); + PE_parse_boot_argn(submap_name, &zone_sub_map_numer[idx], sizeof(uint64_t)); #endif + remaining_denom += zone_sub_map_numer[idx]; + } + /* - * If zone logging is turned on and this is the zone we're tracking, grab a backtrace. + * And now allocate the various pieces of VA and submaps. + * + * Make a first allocation of contiguous VA, that we'll deallocate, + * and we'll carve-out memory in that range again linearly. + * The kernel is stil single threaded at this stage. */ - if (__improbable(DO_LOGGING(zone))) { - numsaved = OSBacktrace((void*) zbt, MAX_ZTRACE_DEPTH); - } -#if CONFIG_ZLEAKS + struct zone_map_range *map_range = &zone_info.zi_map_range; + + *map_range = zone_init_allocate_va(&submap_min, zone_map_size, false); + submap_min = map_range->min_address; + kmem_free(kernel_map, submap_min, zone_map_size); + +#if defined(__LP64__) /* - * Zone leak detection: capture a backtrace every zleak_sample_factor - * allocations in this zone. + * Allocate `Z_SUBMAP_IDX_VA_RESTRICTED_MAP` first because its VA range + * can't go beyond RESTRICTED_VA_MAX for the vm_page_t packing to work. */ - if (__improbable(zone->zleak_on && sample_counter(&zone->zleak_capture, zleak_sample_factor) == TRUE)) { - /* Avoid backtracing twice if zone logging is on */ - if (numsaved == 0) { - zleak_tracedepth = backtrace(zbt, MAX_ZTRACE_DEPTH, NULL); - } else { - zleak_tracedepth = numsaved; - } - } -#endif /* CONFIG_ZLEAKS */ + zone_submap_init(&submap_min, Z_SUBMAP_IDX_VA_RESTRICTED_MAP, + zone_sub_map_numer[Z_SUBMAP_IDX_VA_RESTRICTED_MAP], &remaining_denom, + &remaining_size, SINGLE_GUARD); +#endif /* defined(__LP64__) */ -#if VM_MAX_TAG_ZONES - if (__improbable(zone->tags)) { - vm_tag_will_update_zone(tag, zone->tag_zone_index); + /* + * Allocate metadata array + */ + zone_info.zi_meta_range = + zone_init_allocate_va(&submap_min, zone_meta_size, true); + zone_init_allocate_va(&submap_min, SINGLE_GUARD, true); + + zone_info.zi_array_base = + (struct zone_page_metadata *)zone_info.zi_meta_range.min_address - + zone_pva_from_addr(map_range->min_address).packed_address; + + /* + * Allocate other submaps + */ + for (unsigned idx = Z_SUBMAP_IDX_GENERAL_MAP; idx <= zone_last_submap_idx; idx++) { + zone_submap_init(&submap_min, idx, zone_sub_map_numer[idx], + &remaining_denom, &remaining_size, MULTI_GUARD); } -#endif /* VM_MAX_TAG_ZONES */ -#if CONFIG_ZCACHE - if (__probable(addr == 0)) { - if (zone_caching_enabled(zone)) { - addr = zcache_alloc_from_cpu_cache(zone); - if (addr) { -#if KASAN_ZALLOC - addr = kasan_fixup_allocated_element_address(zone, addr); + vm_map_t general_map = zone_submaps[Z_SUBMAP_IDX_GENERAL_MAP]; + zone_info.zi_general_range.min_address = vm_map_min(general_map); + zone_info.zi_general_range.max_address = vm_map_max(general_map); + + assert(submap_min == map_range->max_address); + +#if CONFIG_GZALLOC + gzalloc_init(zone_map_size); #endif - if (__improbable(DO_LOGGING(zone) && addr)) { - btlog_add_entry(zone->zlog_btlog, (void *)addr, - ZOP_ALLOC, (void **)zbt, numsaved); - } - DTRACE_VM2(zalloc, zone_t, zone, void*, addr); - return (void *)addr; + + zone_create_flags_t kma_flags = ZC_NOCACHING | + ZC_NOGC | ZC_NOENCRYPT | ZC_NOGZALLOC | ZC_NOCALLOUT | + ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE; + + (void)zone_create_ext("vm.permanent", 1, kma_flags, + ZONE_ID_PERMANENT, ^(zone_t z){ + z->permanent = true; + z->z_elem_size = 1; + z->pcpu_elem_size = 1; +#if defined(__LP64__) + z->submap_idx = Z_SUBMAP_IDX_VA_RESTRICTED_MAP; +#endif + }); + (void)zone_create_ext("vm.permanent.percpu", 1, kma_flags | ZC_PERCPU, + ZONE_ID_PERCPU_PERMANENT, ^(zone_t z){ + z->permanent = true; + z->z_elem_size = 1; + z->pcpu_elem_size = zpercpu_count(); +#if defined(__LP64__) + z->submap_idx = Z_SUBMAP_IDX_VA_RESTRICTED_MAP; +#endif + }); + + /* + * Now fix the zones that are missing their zone stats + * we don't really know if zfree()s happened so our stats + * are slightly off for early boot. ¯\_(ツ)_/¯ + */ + zone_index_foreach(idx) { + zone_t tz = &zone_array[idx]; + + if (tz->z_self) { + zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats); + + zpercpu_get_cpu(zs, 0)->zs_mem_allocated += + (tz->countavail - tz->countfree) * + zone_elem_size(tz); + assert(tz->z_stats == NULL); + tz->z_stats = zs; +#if ZONE_ENABLE_LOGGING + if (tz->zone_logging && !tz->zlog_btlog) { + zone_enable_logging(tz); } +#endif } } -#endif /* CONFIG_ZCACHE */ - - lock_zone(zone); - assert(zone->zone_valid); +#if CONFIG_ZLEAKS /* - * Check if we need another thread to replenish the zone or - * if we have to wait for a replenish thread to finish. - * This is used for elements, like vm_map_entry, which are - * needed themselves to implement zalloc(). + * Initialize the zone leak monitor */ - if (addr == 0 && zone->async_prio_refill && zone->zone_replenish_thread) { - min_free = (zone->prio_refill_count * zone->elem_size) / 2; - resv_free = min_free / 2; - for (;;) { - curr_free = zone->cur_size - (zone->count * zone->elem_size); + zleak_init(zone_map_size); +#endif /* CONFIG_ZLEAKS */ - /* - * Nothing to do if there are plenty of elements. - */ - if (curr_free > min_free) { - break; - } +#if VM_MAX_TAG_ZONES + if (zone_tagging_on) { + vm_allocation_zones_init(); + } +#endif +} +STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init); - /* - * Wakeup the replenish thread if not running. - */ - if (!zone->zone_replenishing) { - lck_spin_lock(&zone_replenish_lock); - assert(zone_replenish_active < zone_replenish_max_threads); - ++zone_replenish_active; - lck_spin_unlock(&zone_replenish_lock); - zone->zone_replenishing = TRUE; - zone_replenish_wakeups_initiated++; - thread_wakeup(&zone->zone_replenish_thread); - } +__startup_func +static void +zone_set_foreign_range( + vm_offset_t range_min, + vm_offset_t range_max) +{ + zone_info.zi_foreign_range.min_address = range_min; + zone_info.zi_foreign_range.max_address = range_max; +} - /* - * We'll let VM_PRIV threads to continue to allocate until the - * reserve drops to 25%. After that only TH_OPT_ZONE_PRIV threads - * may continue. - * - * TH_OPT_ZONE_PRIV threads are the GC thread and a replenish thread itself. - * Replenish threads *need* to use the reserve. GC threads need to - * get through the current allocation, but then will wait at a higher - * level after they've dropped any locks which would deadlock the - * replenish thread. - */ - if ((curr_free > resv_free && (thr->options & TH_OPT_VMPRIV)) || - (thr->options & TH_OPT_ZONE_PRIV)) { - break; - } +__startup_func +vm_offset_t +zone_foreign_mem_init(vm_size_t size) +{ + vm_offset_t mem = (vm_offset_t) pmap_steal_memory(size); + zone_set_foreign_range(mem, mem + size); + return mem; +} - /* - * Wait for the replenish threads to add more elements for us to allocate from. - */ - zone_replenish_throttle_count++; - unlock_zone(zone); - assert_wait_timeout(zone, THREAD_UNINT, 1, NSEC_PER_MSEC); - thread_block(THREAD_CONTINUE_NULL); - lock_zone(zone); +#pragma mark zalloc - assert(zone->zone_valid); +#if KASAN_ZALLOC +/* + * Called from zfree() to add the element being freed to the KASan quarantine. + * + * Returns true if the newly-freed element made it into the quarantine without + * displacing another, false otherwise. In the latter case, addrp points to the + * address of the displaced element, which will be freed by the zone. + */ +static bool +kasan_quarantine_freed_element( + zone_t *zonep, /* the zone the element is being freed to */ + void **addrp) /* address of the element being freed */ +{ + zone_t zone = *zonep; + void *addr = *addrp; + + /* + * Resize back to the real allocation size and hand off to the KASan + * quarantine. `addr` may then point to a different allocation, if the + * current element replaced another in the quarantine. The zone then + * takes ownership of the swapped out free element. + */ + vm_size_t usersz = zone_elem_size(zone) - 2 * zone->kasan_redzone; + vm_size_t sz = usersz; + + if (addr && zone->kasan_redzone) { + kasan_check_free((vm_address_t)addr, usersz, KASAN_HEAP_ZALLOC); + addr = (void *)kasan_dealloc((vm_address_t)addr, &sz); + assert(sz == zone_elem_size(zone)); + } + if (addr && !zone->kasan_noquarantine) { + kasan_free(&addr, &sz, KASAN_HEAP_ZALLOC, zonep, usersz, true); + if (!addr) { + return TRUE; } } - - if (__probable(addr == 0)) { - addr = try_alloc_from_zone(zone, tag, &check_poison); + if (addr && zone->kasan_noquarantine) { + kasan_unpoison(addr, zone_elem_size(zone)); } + *addrp = addr; + return FALSE; +} - /* - * If we didn't wait for zone_replenish_thread to finish, ensure that we did successfully grab - * an element. Obviously we only need to assert this for zones that have a replenish thread configured. - * The value of (refill_level / 2) in the previous bit of code should have given us - * headroom even though this thread didn't wait. - */ - if ((thr->options & TH_OPT_ZONE_PRIV) && zone->async_prio_refill) { - assert(addr != 0); +#endif /* KASAN_ZALLOC */ + +static inline bool +zone_needs_async_refill(zone_t zone) +{ + if (zone->countfree != 0 || zone->async_pending || zone->no_callout) { + return false; } - while (addr == 0 && canblock) { + return zone->expandable || zone->page_count < zone->page_count_max; +} + +__attribute__((noinline)) +static void +zone_refill_synchronously_locked( + zone_t zone, + zalloc_flags_t flags) +{ + thread_t thr = current_thread(); + bool set_expanding_vm_priv = false; + zone_pva_t orig = zone->pages_intermediate; + + while ((flags & Z_NOWAIT) == 0 && (zone->permanent + ? zone_pva_is_equal(zone->pages_intermediate, orig) + : zone->countfree == 0)) { /* * zone is empty, try to expand it * - * Note that we now allow up to 2 threads (1 vm_privliged and 1 non-vm_privliged) - * to expand the zone concurrently... this is necessary to avoid stalling - * vm_privileged threads running critical code necessary to continue compressing/swapping - * pages (i.e. making new free pages) from stalling behind non-vm_privileged threads - * waiting to acquire free pages when the vm_page_free_count is below the + * Note that we now allow up to 2 threads (1 vm_privliged and + * 1 non-vm_privliged) to expand the zone concurrently... + * + * this is necessary to avoid stalling vm_privileged threads + * running critical code necessary to continue + * compressing/swapping pages (i.e. making new free pages) from + * stalling behind non-vm_privileged threads waiting to acquire + * free pages when the vm_page_free_count is below the * vm_page_free_reserved limit. */ - if ((zone->doing_alloc_without_vm_priv || zone->doing_alloc_with_vm_priv) && - (((thr->options & TH_OPT_VMPRIV) == 0) || zone->doing_alloc_with_vm_priv)) { + if ((zone->expanding_no_vm_priv || zone->expanding_vm_priv) && + (((thr->options & TH_OPT_VMPRIV) == 0) || zone->expanding_vm_priv)) { /* * This is a non-vm_privileged thread and a non-vm_privileged or * a vm_privileged thread is already expanding the zone... @@ -3512,680 +4499,906 @@ zalloc_internal( * * In either case wait for a thread to finish, then try again. */ - zone->waiting = TRUE; - zone_sleep(zone); - } else { - vm_offset_t space; - vm_size_t alloc_size; - int retry = 0; - - if ((zone->cur_size + zone->elem_size) > - zone->max_size) { - if (zone->exhaustible) { - break; - } - if (zone->expandable) { - /* - * We're willing to overflow certain - * zones, but not without complaining. - * - * This is best used in conjunction - * with the collectable flag. What we - * want is an assurance we can get the - * memory back, assuming there's no - * leak. - */ - zone->max_size += (zone->max_size >> 1); - } else { - unlock_zone(zone); - - panic_include_zprint = TRUE; + zone->waiting = true; + assert_wait(zone, THREAD_UNINT); + unlock_zone(zone); + thread_block(THREAD_CONTINUE_NULL); + lock_zone(zone); + continue; + } + + if (zone->page_count >= zone->page_count_max) { + if (zone->exhaustible) { + break; + } + if (zone->expandable) { + /* + * If we're expandable, just don't go through this again. + */ + zone->page_count_max = ~0u; + } else { + unlock_zone(zone); + + panic_include_zprint = true; #if CONFIG_ZLEAKS - if (zleak_state & ZLEAK_STATE_ACTIVE) { - panic_include_ztrace = TRUE; - } -#endif /* CONFIG_ZLEAKS */ - panic("zalloc: zone \"%s\" empty.", zone->zone_name); + if (zleak_state & ZLEAK_STATE_ACTIVE) { + panic_include_ztrace = true; } +#endif /* CONFIG_ZLEAKS */ + panic("zalloc: zone \"%s\" empty.", zone->z_name); } + } + + /* + * It is possible that a BG thread is refilling/expanding the zone + * and gets pre-empted during that operation. That blocks all other + * threads from making progress leading to a watchdog timeout. To + * avoid that, boost the thread priority using the rwlock boost + */ + set_thread_rwlock_boost(); + + if ((thr->options & TH_OPT_VMPRIV)) { + zone->expanding_vm_priv = true; + set_expanding_vm_priv = true; + } else { + zone->expanding_no_vm_priv = true; + } + + zone_replenish_locked(zone, flags, false); + + if (set_expanding_vm_priv == true) { + zone->expanding_vm_priv = false; + } else { + zone->expanding_no_vm_priv = false; + } + + if (zone->waiting) { + zone->waiting = false; + thread_wakeup(zone); + } + clear_thread_rwlock_boost(); + + if (zone->countfree == 0) { + assert(flags & Z_NOPAGEWAIT); + break; + } + } + + if ((flags & (Z_NOWAIT | Z_NOPAGEWAIT)) && + zone_needs_async_refill(zone) && !vm_pool_low()) { + zone->async_pending = true; + unlock_zone(zone); + thread_call_enter(&call_async_alloc); + lock_zone(zone); + assert(zone->z_self == zone); + } +} + +__attribute__((noinline)) +static void +zone_refill_asynchronously_locked(zone_t zone) +{ + uint32_t min_free = zone->prio_refill_count / 2; + uint32_t resv_free = zone->prio_refill_count / 4; + thread_t thr = current_thread(); + + /* + * Nothing to do if there are plenty of elements. + */ + while (zone->countfree <= min_free) { + /* + * Wakeup the replenish thread if not running. + */ + if (!zone->zone_replenishing) { + lck_spin_lock(&zone_replenish_lock); + assert(zone_replenish_active < zone_replenish_max_threads); + ++zone_replenish_active; + lck_spin_unlock(&zone_replenish_lock); + zone->zone_replenishing = true; + zone_replenish_wakeups_initiated++; + thread_wakeup(&zone->prio_refill_count); + } + + /* + * We'll let VM_PRIV threads to continue to allocate until the + * reserve drops to 25%. After that only TH_OPT_ZONE_PRIV threads + * may continue. + * + * TH_OPT_ZONE_PRIV threads are the GC thread and a replenish thread itself. + * Replenish threads *need* to use the reserve. GC threads need to + * get through the current allocation, but then will wait at a higher + * level after they've dropped any locks which would deadlock the + * replenish thread. + */ + if ((zone->countfree > resv_free && (thr->options & TH_OPT_VMPRIV)) || + (thr->options & TH_OPT_ZONE_PRIV)) { + break; + } + + /* + * Wait for the replenish threads to add more elements for us to allocate from. + */ + zone_replenish_throttle_count++; + unlock_zone(zone); + assert_wait_timeout(zone, THREAD_UNINT, 1, NSEC_PER_MSEC); + thread_block(THREAD_CONTINUE_NULL); + lock_zone(zone); + + assert(zone->z_self == zone); + } + + /* + * If we're here because of zone_gc(), we didn't wait for + * zone_replenish_thread to finish. So we need to ensure that + * we will successfully grab an element. + * + * zones that have a replenish thread configured. + * The value of (refill_level / 2) in the previous bit of code should have + * given us headroom even though this thread didn't wait. + */ + if (thr->options & TH_OPT_ZONE_PRIV) { + assert(zone->countfree != 0); + } +} + +#if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS +__attribute__((noinline)) +static void +zalloc_log_or_trace_leaks(zone_t zone, vm_offset_t addr) +{ + uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */ + unsigned int numsaved = 0; + +#if ZONE_ENABLE_LOGGING + if (DO_LOGGING(zone)) { + numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH, NULL); + btlog_add_entry(zone->zlog_btlog, (void *)addr, + ZOP_ALLOC, (void **)zbt, numsaved); + } +#endif + +#if CONFIG_ZLEAKS + /* + * Zone leak detection: capture a backtrace every zleak_sample_factor + * allocations in this zone. + */ + if (__improbable(zone->zleak_on)) { + if (sample_counter(&zone->zleak_capture, zleak_sample_factor)) { + /* Avoid backtracing twice if zone logging is on */ + if (numsaved == 0) { + numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH, NULL); + } + /* Sampling can fail if another sample is happening at the same time in a different zone. */ + if (!zleak_log(zbt, addr, numsaved, zone_elem_size(zone))) { + /* If it failed, roll back the counter so we sample the next allocation instead. */ + zone->zleak_capture = zleak_sample_factor; + } + } + } + + if (__improbable(zone_leaks_scan_enable && + !(zone_elem_size(zone) & (sizeof(uintptr_t) - 1)))) { + unsigned int count, idx; + /* Fill element, from tail, with backtrace in reverse order */ + if (numsaved == 0) { + numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH, NULL); + } + count = (unsigned int)(zone_elem_size(zone) / sizeof(uintptr_t)); + if (count >= numsaved) { + count = numsaved - 1; + } + for (idx = 0; idx < count; idx++) { + ((uintptr_t *)addr)[count - 1 - idx] = zbt[idx + 1]; + } + } +#endif /* CONFIG_ZLEAKS */ +} + +static inline bool +zalloc_should_log_or_trace_leaks(zone_t zone, vm_size_t elem_size) +{ +#if ZONE_ENABLE_LOGGING + if (DO_LOGGING(zone)) { + return true; + } +#endif +#if CONFIG_ZLEAKS + /* + * Zone leak detection: capture a backtrace every zleak_sample_factor + * allocations in this zone. + */ + if (zone->zleak_on) { + return true; + } + if (zone_leaks_scan_enable && !(elem_size & (sizeof(uintptr_t) - 1))) { + return true; + } +#endif /* CONFIG_ZLEAKS */ + return false; +} +#endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */ +#if ZONE_ENABLE_LOGGING + +__attribute__((noinline)) +static void +zfree_log_trace(zone_t zone, vm_offset_t addr) +{ + /* + * See if we're doing logging on this zone. + * + * There are two styles of logging used depending on + * whether we're trying to catch a leak or corruption. + */ + if (__improbable(DO_LOGGING(zone))) { + if (corruption_debug_flag) { + uintptr_t zbt[MAX_ZTRACE_DEPTH]; + unsigned int numsaved; /* - * It is possible that a BG thread is refilling/expanding the zone - * and gets pre-empted during that operation. That blocks all other - * threads from making progress leading to a watchdog timeout. To - * avoid that, boost the thread priority using the rwlock boost + * We're logging to catch a corruption. + * + * Add a record of this zfree operation to log. + */ + numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH, NULL); + btlog_add_entry(zone->zlog_btlog, (void *)addr, ZOP_FREE, + (void **)zbt, numsaved); + } else { + /* + * We're logging to catch a leak. + * + * Remove any record we might have for this element + * since it's being freed. Note that we may not find it + * if the buffer overflowed and that's OK. + * + * Since the log is of a limited size, old records get + * overwritten if there are more zallocs than zfrees. */ - set_thread_rwlock_boost(); + btlog_remove_entries_for_element(zone->zlog_btlog, (void *)addr); + } + } +} +#endif /* ZONE_ENABLE_LOGGING */ - if ((thr->options & TH_OPT_VMPRIV)) { - zone->doing_alloc_with_vm_priv = TRUE; - set_doing_alloc_with_vm_priv = TRUE; - } else { - zone->doing_alloc_without_vm_priv = TRUE; - } - unlock_zone(zone); +/* + * Removes an element from the zone's free list, returning 0 if the free list is empty. + * Verifies that the next-pointer and backup next-pointer are intact, + * and verifies that a poisoned element hasn't been modified. + */ +vm_offset_t +zalloc_direct_locked( + zone_t zone, + zalloc_flags_t flags __unused, + vm_size_t waste __unused) +{ + struct zone_page_metadata *page_meta; + zone_addr_kind_t kind = ZONE_ADDR_NATIVE; + vm_offset_t element, page, validate_bit = 0; - for (;;) { - int zflags = KMA_KOBJECT | KMA_NOPAGEWAIT; + /* if zone is empty, bail */ + if (!zone_pva_is_null(zone->pages_any_free_foreign)) { + kind = ZONE_ADDR_FOREIGN; + page_meta = zone_pva_to_meta(zone->pages_any_free_foreign, kind); + page = (vm_offset_t)page_meta; + } else if (!zone_pva_is_null(zone->pages_intermediate)) { + page_meta = zone_pva_to_meta(zone->pages_intermediate, kind); + page = zone_pva_to_addr(zone->pages_intermediate); + } else if (!zone_pva_is_null(zone->pages_all_free)) { + page_meta = zone_pva_to_meta(zone->pages_all_free, kind); + page = zone_pva_to_addr(zone->pages_all_free); + if (os_sub_overflow(zone->allfree_page_count, + page_meta->zm_page_count, &zone->allfree_page_count)) { + zone_accounting_panic(zone, "allfree_page_count wrap-around"); + } + } else { + zone_accounting_panic(zone, "countfree corruption"); + } - if (vm_pool_low() || retry >= 1) { - alloc_size = - round_page(zone->elem_size); - } else { - alloc_size = zone->alloc_size; - } + if (!zone_has_index(zone, page_meta->zm_index)) { + zone_page_metadata_index_confusion_panic(zone, page, page_meta); + } - if (zone->noencrypt) { - zflags |= KMA_NOENCRYPT; - } + element = zone_page_meta_get_freelist(zone, page_meta, page); - if (zone->clear_memory) { - zflags |= KMA_ZERO; - } + vm_offset_t *primary = (vm_offset_t *) element; + vm_offset_t *backup = get_backup_ptr(zone_elem_size(zone), primary); - /* Trigger jetsams via the vm_pageout_garbage_collect thread if we're running out of zone memory */ - if (is_zone_map_nearing_exhaustion()) { - thread_wakeup((event_t) &vm_pageout_garbage_collect); - } + /* + * since the primary next pointer is xor'ed with zp_nopoison_cookie + * for obfuscation, retrieve the original value back + */ + vm_offset_t next_element = *primary ^ zp_nopoison_cookie; + vm_offset_t next_element_primary = *primary; + vm_offset_t next_element_backup = *backup; - retval = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE); - if (retval == KERN_SUCCESS) { -#if CONFIG_ZLEAKS - if ((zleak_state & (ZLEAK_STATE_ENABLED | ZLEAK_STATE_ACTIVE)) == ZLEAK_STATE_ENABLED) { - if (zone_map->size >= zleak_global_tracking_threshold) { - kern_return_t kr; - - kr = zleak_activate(); - if (kr != KERN_SUCCESS) { - printf("Failed to activate live zone leak debugging (%d).\n", kr); - } - } - } - - if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) { - if (zone->cur_size > zleak_per_zone_tracking_threshold) { - zone->zleak_on = TRUE; - } - } -#endif /* CONFIG_ZLEAKS */ - zcram(zone, space, alloc_size); + /* + * backup_ptr_mismatch_panic will determine what next_element + * should have been, and print it appropriately + */ + if (!zone_page_meta_is_sane_element(zone, page_meta, page, next_element, kind)) { + backup_ptr_mismatch_panic(zone, page_meta, page, element); + } - break; - } else if (retval != KERN_RESOURCE_SHORTAGE) { - retry++; + /* Check the backup pointer for the regular cookie */ + if (__improbable(next_element_primary != next_element_backup)) { + /* Check for the poisoned cookie instead */ + if (__improbable(next_element != (next_element_backup ^ zp_poisoned_cookie))) { + /* Neither cookie is valid, corruption has occurred */ + backup_ptr_mismatch_panic(zone, page_meta, page, element); + } - if (retry == 3) { - panic_include_zprint = TRUE; -#if CONFIG_ZLEAKS - if ((zleak_state & ZLEAK_STATE_ACTIVE)) { - panic_include_ztrace = TRUE; - } -#endif /* CONFIG_ZLEAKS */ - if (retval == KERN_NO_SPACE) { - zone_t zone_largest = zone_find_largest(); - panic("zalloc: zone map exhausted while allocating from zone %s, likely due to memory leak in zone %s (%lu total bytes, %d elements allocated)", - zone->zone_name, zone_largest->zone_name, - (unsigned long)zone_largest->cur_size, zone_largest->count); - } - panic("zalloc: \"%s\" (%d elements) retry fail %d", zone->zone_name, zone->count, retval); - } - } else { - break; - } - } - lock_zone(zone); - assert(zone->zone_valid); + /* + * Element was marked as poisoned, so check its integrity before using it. + */ + validate_bit = ZALLOC_ELEMENT_NEEDS_VALIDATION; + } else if (zone->zfree_clear_mem) { + validate_bit = ZALLOC_ELEMENT_NEEDS_VALIDATION; + } - if (set_doing_alloc_with_vm_priv == TRUE) { - zone->doing_alloc_with_vm_priv = FALSE; - } else { - zone->doing_alloc_without_vm_priv = FALSE; - } + /* Remove this element from the free list */ + zone_page_meta_set_freelist(page_meta, page, next_element); - if (zone->waiting) { - zone->waiting = FALSE; - zone_wakeup(zone); - } - clear_thread_rwlock_boost(); + if (kind == ZONE_ADDR_FOREIGN) { + if (next_element == 0) { + /* last foreign element allocated on page, move to all_used_foreign */ + zone_meta_requeue(zone, &zone->pages_all_used_foreign, page_meta, kind); + } + } else if (next_element == 0) { + zone_meta_requeue(zone, &zone->pages_all_used, page_meta, kind); + } else if (page_meta->zm_alloc_count == 0) { + /* remove from free, move to intermediate */ + zone_meta_requeue(zone, &zone->pages_intermediate, page_meta, kind); + } - addr = try_alloc_from_zone(zone, tag, &check_poison); - if (addr == 0 && - retval == KERN_RESOURCE_SHORTAGE) { - if (nopagewait == TRUE) { - break; /* out of the main while loop */ - } - unlock_zone(zone); + if (os_add_overflow(page_meta->zm_alloc_count, 1, + &page_meta->zm_alloc_count)) { + /* + * This will not catch a lot of errors, the proper check + * would be against the number of elements this run should + * have which is expensive to count. + * + * But zm_alloc_count is a 16 bit number which could + * theoretically be valuable to cause to wrap around, + * so catch this. + */ + zone_page_meta_accounting_panic(zone, page_meta, + "zm_alloc_count overflow"); + } + if (os_sub_overflow(zone->countfree, 1, &zone->countfree)) { + zone_accounting_panic(zone, "countfree wrap-around"); + } - VM_PAGE_WAIT(); - lock_zone(zone); - assert(zone->zone_valid); - } - } - if (addr == 0) { - addr = try_alloc_from_zone(zone, tag, &check_poison); +#if VM_MAX_TAG_ZONES + if (__improbable(zone->tags)) { + vm_tag_t tag = zalloc_flags_get_tag(flags); + // set the tag with b0 clear so the block remains inuse + ZTAG(zone, element)[0] = (vm_tag_t)(tag << 1); + vm_tag_update_zone_size(tag, zone->tag_zone_index, + zone_elem_size(zone), waste); + } +#endif /* VM_MAX_TAG_ZONES */ +#if KASAN_ZALLOC + if (zone->percpu) { + zpercpu_foreach_cpu(i) { + kasan_poison_range(element + ptoa(i), + zone_elem_size(zone), ASAN_VALID); } + } else { + kasan_poison_range(element, zone_elem_size(zone), ASAN_VALID); + } +#endif + + return element | validate_bit; +} + +/* + * zalloc returns an element from the specified zone. + */ +void * +zalloc_ext( + zone_t zone, + zone_stats_t zstats, + zalloc_flags_t flags, + vm_size_t waste) +{ + vm_offset_t addr = 0; + vm_size_t elem_size = zone_elem_size(zone); + + /* + * KASan uses zalloc() for fakestack, which can be called anywhere. + * However, we make sure these calls can never block. + */ + assert(zone->kasan_fakestacks || + ml_get_interrupts_enabled() || + ml_is_quiescing() || + debug_mode_active() || + startup_phase < STARTUP_SUB_EARLY_BOOT); + + /* + * Make sure Z_NOFAIL was not obviously misused + */ + if ((flags & Z_NOFAIL) && !zone->prio_refill_count) { + assert(!zone->exhaustible && (flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0); } -#if CONFIG_ZLEAKS - /* Zone leak detection: - * If we're sampling this allocation, add it to the zleaks hash table. +#if CONFIG_ZCACHE + /* + * Note: if zone caching is on, gzalloc and tags aren't used + * so we can always check this first */ - if (addr && zleak_tracedepth > 0) { - /* Sampling can fail if another sample is happening at the same time in a different zone. */ - if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) { - /* If it failed, roll back the counter so we sample the next allocation instead. */ - zone->zleak_capture = zleak_sample_factor; + if (zone_caching_enabled(zone)) { + addr = zcache_alloc_from_cpu_cache(zone, zstats, waste); + if (__probable(addr)) { + goto allocated_from_cache; } } -#endif /* CONFIG_ZLEAKS */ - +#endif /* CONFIG_ZCACHE */ - if ((addr == 0) && (!canblock || nopagewait) && (zone->async_pending == FALSE) && (zone->no_callout == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) { - zone->async_pending = TRUE; - unlock_zone(zone); - thread_call_enter(&call_async_alloc); - lock_zone(zone); - assert(zone->zone_valid); - addr = try_alloc_from_zone(zone, tag, &check_poison); +#if CONFIG_GZALLOC + if (__improbable(zone->gzalloc_tracked)) { + addr = gzalloc_alloc(zone, zstats, flags); + goto allocated_from_gzalloc; } - +#endif /* CONFIG_GZALLOC */ #if VM_MAX_TAG_ZONES - if (__improbable(zone->tags) && addr) { - if (reqsize) { - reqsize = zone->elem_size - reqsize; + if (__improbable(zone->tags)) { + vm_tag_t tag = zalloc_flags_get_tag(flags); + if (tag == VM_KERN_MEMORY_NONE) { + /* + * zone views into heaps can lead to a site-less call + * and we fallback to KALLOC as a tag for those. + */ + tag = VM_KERN_MEMORY_KALLOC; + flags |= Z_VM_TAG(tag); } - vm_tag_update_zone_size(tag, zone->tag_zone_index, zone->elem_size, reqsize); + vm_tag_will_update_zone(tag, zone->tag_zone_index); } #endif /* VM_MAX_TAG_ZONES */ - unlock_zone(zone); + lock_zone(zone); + assert(zone->z_self == zone); - if (__improbable(DO_LOGGING(zone) && addr)) { - btlog_add_entry(zone->zlog_btlog, (void *)addr, ZOP_ALLOC, (void **)zbt, numsaved); + /* + * Check if we need another thread to replenish the zone or + * if we have to wait for a replenish thread to finish. + * This is used for elements, like vm_map_entry, which are + * needed themselves to implement zalloc(). + */ + if (__improbable(zone->prio_refill_count && + zone->countfree <= zone->prio_refill_count / 2)) { + zone_refill_asynchronously_locked(zone); + } else if (__improbable(zone->countfree == 0)) { + zone_refill_synchronously_locked(zone, flags); + if (__improbable(zone->countfree == 0)) { + unlock_zone(zone); + if (__improbable(flags & Z_NOFAIL)) { + zone_nofail_panic(zone); + } + goto out_nomem; + } } - zalloc_poison_element(check_poison, zone, addr); - - if (addr) { -#if DEBUG || DEVELOPMENT - if (__improbable(leak_scan_debug_flag && !(zone->elem_size & (sizeof(uintptr_t) - 1)))) { - unsigned int count, idx; - /* Fill element, from tail, with backtrace in reverse order */ - if (numsaved == 0) { - numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH, NULL); - } - count = (unsigned int)(zone->elem_size / sizeof(uintptr_t)); - if (count >= numsaved) { - count = numsaved - 1; - } - for (idx = 0; idx < count; idx++) { - ((uintptr_t *)addr)[count - 1 - idx] = zbt[idx + 1]; - } + addr = zalloc_direct_locked(zone, flags, waste); + if (__probable(zstats != NULL)) { + /* + * The few vm zones used before zone_init() runs do not have + * per-cpu stats yet + */ + int cpu = cpu_number(); + zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += elem_size; +#if ZALLOC_DETAILED_STATS + if (waste) { + zpercpu_get_cpu(zstats, cpu)->zs_mem_wasted += waste; } -#endif /* DEBUG || DEVELOPMENT */ +#endif /* ZALLOC_DETAILED_STATS */ } - TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr); + unlock_zone(zone); + +#if ZALLOC_ENABLE_POISONING + bool validate = addr & ZALLOC_ELEMENT_NEEDS_VALIDATION; +#endif + addr &= ~ZALLOC_ELEMENT_NEEDS_VALIDATION; + zone_clear_freelist_pointers(zone, addr); +#if ZALLOC_ENABLE_POISONING + /* + * Note: percpu zones do not respect ZONE_MIN_ELEM_SIZE, + * so we will check the first word even if we just + * cleared it. + */ + zalloc_validate_element(zone, addr, elem_size - sizeof(vm_offset_t), + validate); +#endif /* ZALLOC_ENABLE_POISONING */ +allocated_from_cache: +#if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS + if (__improbable(zalloc_should_log_or_trace_leaks(zone, elem_size))) { + zalloc_log_or_trace_leaks(zone, addr); + } +#endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */ -#if KASAN_ZALLOC - addr = kasan_fixup_allocated_element_address(zone, addr); +#if CONFIG_GZALLOC +allocated_from_gzalloc: #endif +#if KASAN_ZALLOC + if (zone->kasan_redzone) { + addr = kasan_alloc(addr, elem_size, + elem_size - 2 * zone->kasan_redzone, zone->kasan_redzone); + elem_size -= 2 * zone->kasan_redzone; + } + /* + * Initialize buffer with unique pattern only if memory + * wasn't expected to be zeroed. + */ + if (!zone->zfree_clear_mem && !(flags & Z_ZERO)) { + kasan_leak_init(addr, elem_size); + } +#endif /* KASAN_ZALLOC */ + if ((flags & Z_ZERO) && !zone->zfree_clear_mem) { + bzero((void *)addr, elem_size); + } - DTRACE_VM2(zalloc, zone_t, zone, void*, addr); + TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, elem_size, addr); +out_nomem: + DTRACE_VM2(zalloc, zone_t, zone, void*, addr); return (void *)addr; } void * -zalloc(zone_t zone) +zalloc(union zone_or_view zov) { - return zalloc_internal(zone, TRUE, FALSE, 0, VM_KERN_MEMORY_NONE); + return zalloc_flags(zov, Z_WAITOK); } void * -zalloc_noblock(zone_t zone) +zalloc_noblock(union zone_or_view zov) { - return zalloc_internal(zone, FALSE, FALSE, 0, VM_KERN_MEMORY_NONE); + return zalloc_flags(zov, Z_NOWAIT); } void * -zalloc_nopagewait(zone_t zone) +zalloc_flags(union zone_or_view zov, zalloc_flags_t flags) { - return zalloc_internal(zone, TRUE, TRUE, 0, VM_KERN_MEMORY_NONE); + zone_t zone = zov.zov_view->zv_zone; + zone_stats_t zstats = zov.zov_view->zv_stats; + assert(!zone->percpu); + return zalloc_ext(zone, zstats, flags, 0); } void * -zalloc_canblock_tag(zone_t zone, boolean_t canblock, vm_size_t reqsize, vm_tag_t tag) +zalloc_percpu(union zone_or_view zov, zalloc_flags_t flags) { - return zalloc_internal(zone, canblock, FALSE, reqsize, tag); + zone_t zone = zov.zov_view->zv_zone; + zone_stats_t zstats = zov.zov_view->zv_stats; + assert(zone->percpu); + return (void *)__zpcpu_mangle(zalloc_ext(zone, zstats, flags, 0)); } -void * -zalloc_canblock(zone_t zone, boolean_t canblock) +static void * +_zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask) { - return zalloc_internal(zone, canblock, FALSE, 0, VM_KERN_MEMORY_NONE); + const zone_addr_kind_t kind = ZONE_ADDR_NATIVE; + struct zone_page_metadata *page_meta; + vm_offset_t offs, addr; + zone_pva_t pva; + + assert(ml_get_interrupts_enabled() || + ml_is_quiescing() || + debug_mode_active() || + startup_phase < STARTUP_SUB_EARLY_BOOT); + + size = (size + mask) & ~mask; + assert(size <= PAGE_SIZE); + + lock_zone(zone); + assert(zone->z_self == zone); + + for (;;) { + pva = zone->pages_intermediate; + while (!zone_pva_is_null(pva)) { + page_meta = zone_pva_to_meta(pva, kind); + if (page_meta->zm_freelist_offs + size <= PAGE_SIZE) { + goto found; + } + pva = page_meta->zm_page_next; + } + + zone_refill_synchronously_locked(zone, Z_WAITOK); + } + +found: + offs = (page_meta->zm_freelist_offs + mask) & ~mask; + page_meta->zm_freelist_offs = offs + size; + page_meta->zm_alloc_count += size; + zone->countfree -= size; + if (__probable(zone->z_stats)) { + zpercpu_get(zone->z_stats)->zs_mem_allocated += size; + } + + if (page_meta->zm_alloc_count >= PAGE_SIZE - sizeof(vm_offset_t)) { + zone_meta_requeue(zone, &zone->pages_all_used, page_meta, kind); + } + + unlock_zone(zone); + + addr = offs + zone_pva_to_addr(pva); + + DTRACE_VM2(zalloc, zone_t, zone, void*, addr); + return (void *)addr; } -void * -zalloc_attempt(zone_t zone) +static void * +_zalloc_permanent_large(size_t size, vm_offset_t mask) { - boolean_t check_poison = FALSE; - vm_offset_t addr = try_alloc_from_zone(zone, VM_KERN_MEMORY_NONE, &check_poison); - zalloc_poison_element(check_poison, zone, addr); + kern_return_t kr; + vm_offset_t addr; + + kr = kernel_memory_allocate(kernel_map, &addr, size, mask, + KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, + VM_KERN_MEMORY_KALLOC); + if (kr != 0) { + panic("zalloc_permanent: unable to allocate %zd bytes (%d)", + size, kr); + } return (void *)addr; } -void -zfree_direct(zone_t zone, vm_offset_t elem) +void * +zalloc_permanent(vm_size_t size, vm_offset_t mask) { - boolean_t poison = zfree_poison_element(zone, elem); - free_to_zone(zone, elem, poison); + if (size <= PAGE_SIZE) { + zone_t zone = &zone_array[ZONE_ID_PERMANENT]; + return _zalloc_permanent(zone, size, mask); + } + return _zalloc_permanent_large(size, mask); } +void * +zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask) +{ + zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT]; + return (void *)__zpcpu_mangle(_zalloc_permanent(zone, size, mask)); +} void -zalloc_async( - __unused thread_call_param_t p0, - __unused thread_call_param_t p1) +zalloc_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1) { - zone_t current_z = NULL; - unsigned int max_zones, i; - void *elt = NULL; - boolean_t pending = FALSE; - - simple_lock(&all_zones_lock, &zone_locks_grp); - max_zones = num_zones; - simple_unlock(&all_zones_lock); - for (i = 0; i < max_zones; i++) { - current_z = &(zone_array[i]); + zone_index_foreach(i) { + zone_t z = &zone_array[i]; - if (current_z->no_callout == TRUE) { + if (z->no_callout) { /* async_pending will never be set */ continue; } - lock_zone(current_z); - if (current_z->zone_valid && current_z->async_pending == TRUE) { - current_z->async_pending = FALSE; - pending = TRUE; - } - unlock_zone(current_z); - - if (pending == TRUE) { - elt = zalloc_canblock_tag(current_z, TRUE, 0, VM_KERN_MEMORY_OSFMK); - zfree(current_z, elt); - pending = FALSE; + lock_zone(z); + if (z->z_self && z->async_pending) { + z->async_pending = false; + zone_refill_synchronously_locked(z, Z_WAITOK); } + unlock_zone(z); } } /* - * zget returns an element from the specified zone - * and immediately returns nothing if there is nothing there. + * Adds the element to the head of the zone's free list + * Keeps a backup next-pointer at the end of the element */ -void * -zget( - zone_t zone) -{ - return zalloc_internal(zone, FALSE, TRUE, 0, VM_KERN_MEMORY_NONE); -} - -/* Keep this FALSE by default. Large memory machine run orders of magnitude - * slower in debug mode when true. Use debugger to enable if needed */ -/* static */ boolean_t zone_check = FALSE; - -static void -zone_check_freelist(zone_t zone, vm_offset_t elem) -{ - struct zone_free_element *this; - struct zone_page_metadata *thispage; - - if (zone->allows_foreign) { - for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign); - !queue_end(&zone->pages.any_free_foreign, &(thispage->pages)); - thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { - for (this = page_metadata_get_freelist(thispage); - this != NULL; - this = this->next) { - if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) { - panic("zone_check_freelist"); - } - } - } - } - for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.all_free); - !queue_end(&zone->pages.all_free, &(thispage->pages)); - thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { - for (this = page_metadata_get_freelist(thispage); - this != NULL; - this = this->next) { - if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) { - panic("zone_check_freelist"); - } - } - } - for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate); - !queue_end(&zone->pages.intermediate, &(thispage->pages)); - thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { - for (this = page_metadata_get_freelist(thispage); - this != NULL; - this = this->next) { - if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) { - panic("zone_check_freelist"); - } - } - } -} - -boolean_t -zfree_poison_element(zone_t zone, vm_offset_t elem) +void +zfree_direct_locked(zone_t zone, vm_offset_t element, bool poison) { - boolean_t poison = FALSE; - if (zp_factor != 0 || zp_tiny_zone_limit != 0) { - /* - * Poison the memory before it ends up on the freelist to catch - * use-after-free and use of uninitialized memory - * - * Always poison tiny zones' elements (limit is 0 if -no-zp is set) - * Also poison larger elements periodically - */ - - vm_offset_t inner_size = zone->elem_size; - - uint32_t sample_factor = zp_factor + (((uint32_t)inner_size) >> zp_scale); + struct zone_page_metadata *page_meta; + vm_offset_t page, old_head; + zone_addr_kind_t kind; + vm_size_t elem_size = zone_elem_size(zone); - if (inner_size <= zp_tiny_zone_limit) { - poison = TRUE; - } else if (zp_factor != 0 && sample_counter(&zone->zp_count, sample_factor) == TRUE) { - poison = TRUE; - } + vm_offset_t *primary = (vm_offset_t *) element; + vm_offset_t *backup = get_backup_ptr(elem_size, primary); - if (__improbable(poison)) { - /* memset_pattern{4|8} could help make this faster: */ - /* Poison everything but primary and backup */ - vm_offset_t *element_cursor = ((vm_offset_t *) elem) + 1; - vm_offset_t *backup = get_backup_ptr(inner_size, (vm_offset_t *)elem); + page_meta = zone_allocated_element_resolve(zone, element, &page, &kind); + old_head = zone_page_meta_get_freelist(zone, page_meta, page); - for (; element_cursor < backup; element_cursor++) { - *element_cursor = ZP_POISON; - } - } + if (__improbable(old_head == element)) { + panic("zfree: double free of %p to zone %s%s\n", + (void *) element, zone_heap_name(zone), zone->z_name); } - return poison; -} -void -(zfree)( - zone_t zone, - void *addr) -{ - vm_offset_t elem = (vm_offset_t) addr; - uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */ - unsigned int numsaved = 0; - boolean_t gzfreed = FALSE; - boolean_t poison = FALSE; -#if VM_MAX_TAG_ZONES - vm_tag_t tag; -#endif /* VM_MAX_TAG_ZONES */ - assert(zone != ZONE_NULL); - DTRACE_VM2(zfree, zone_t, zone, void*, addr); -#if KASAN_ZALLOC - if (kasan_quarantine_freed_element(&zone, &addr)) { - return; +#if ZALLOC_ENABLE_POISONING + if (poison && elem_size < ZONE_MIN_ELEM_SIZE) { + assert(zone->percpu); + poison = false; } - elem = (vm_offset_t)addr; +#else + poison = false; #endif /* - * If zone logging is turned on and this is the zone we're tracking, grab a backtrace. + * Always write a redundant next pointer + * So that it is more difficult to forge, xor it with a random cookie + * A poisoned element is indicated by using zp_poisoned_cookie + * instead of zp_nopoison_cookie + */ + + *backup = old_head ^ (poison ? zp_poisoned_cookie : zp_nopoison_cookie); + + /* + * Insert this element at the head of the free list. We also xor the + * primary pointer with the zp_nopoison_cookie to make sure a free + * element does not provide the location of the next free element directly. */ + *primary = old_head ^ zp_nopoison_cookie; - if (__improbable(DO_LOGGING(zone) && corruption_debug_flag)) { - numsaved = OSBacktrace((void *)zbt, MAX_ZTRACE_DEPTH); +#if VM_MAX_TAG_ZONES + if (__improbable(zone->tags)) { + vm_tag_t tag = (ZTAG(zone, element)[0] >> 1); + // set the tag with b0 clear so the block remains inuse + ZTAG(zone, element)[0] = 0xFFFE; + vm_tag_update_zone_size(tag, zone->tag_zone_index, + -((int64_t)elem_size), 0); } +#endif /* VM_MAX_TAG_ZONES */ -#if MACH_ASSERT - /* Basic sanity checks */ - if (zone == ZONE_NULL || elem == (vm_offset_t)0) { - panic("zfree: NULL"); + zone_page_meta_set_freelist(page_meta, page, element); + if (os_sub_overflow(page_meta->zm_alloc_count, 1, + &page_meta->zm_alloc_count)) { + zone_page_meta_accounting_panic(zone, page_meta, + "alloc_count wrap-around"); } -#endif + zone->countfree++; -#if CONFIG_GZALLOC - gzfreed = gzalloc_free(zone, addr); -#endif + if (kind == ZONE_ADDR_FOREIGN) { + if (old_head == 0) { + /* first foreign element freed on page, move from all_used_foreign */ + zone_meta_requeue(zone, &zone->pages_any_free_foreign, page_meta, kind); + } + } else if (page_meta->zm_alloc_count == 0) { + /* whether the page was on the intermediate or all_used, queue, move it to free */ + zone_meta_requeue(zone, &zone->pages_all_free, page_meta, kind); + zone->allfree_page_count += page_meta->zm_page_count; + } else if (old_head == 0) { + /* first free element on page, move from all_used */ + zone_meta_requeue(zone, &zone->pages_intermediate, page_meta, kind); + } - if (!gzfreed) { - struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr, FALSE); - if (zone != PAGE_METADATA_GET_ZONE(page_meta)) { - panic("Element %p from zone %s caught being freed to wrong zone %s\n", addr, PAGE_METADATA_GET_ZONE(page_meta)->zone_name, zone->zone_name); +#if KASAN_ZALLOC + if (zone->percpu) { + zpercpu_foreach_cpu(i) { + kasan_poison_range(element + ptoa(i), elem_size, + ASAN_HEAP_FREED); } + } else { + kasan_poison_range(element, elem_size, ASAN_HEAP_FREED); } +#endif +} - TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (uintptr_t)addr); +void +zfree_ext(zone_t zone, zone_stats_t zstats, void *addr) +{ + vm_offset_t elem = (vm_offset_t)addr; + vm_size_t elem_size = zone_elem_size(zone); + bool poison = false; - if (__improbable(!gzfreed && zone->collectable && !zone->allows_foreign && - !from_zone_map(elem, zone->elem_size))) { - panic("zfree: non-allocated memory in collectable zone!"); - } + DTRACE_VM2(zfree, zone_t, zone, void*, addr); + TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, elem_size, elem); - if (!gzfreed) { - poison = zfree_poison_element(zone, elem); +#if KASAN_ZALLOC + if (kasan_quarantine_freed_element(&zone, &addr)) { + return; } - /* - * See if we're doing logging on this zone. There are two styles of logging used depending on - * whether we're trying to catch a leak or corruption. See comments above in zalloc for details. + * kasan_quarantine_freed_element() might return a different + * {zone, addr} than the one being freed for kalloc heaps. + * + * Make sure we reload everything. */ + elem = (vm_offset_t)addr; + elem_size = zone_elem_size(zone); +#endif - if (__improbable(DO_LOGGING(zone))) { - if (corruption_debug_flag) { - /* - * We're logging to catch a corruption. Add a record of this zfree operation - * to log. - */ - btlog_add_entry(zone->zlog_btlog, (void *)addr, ZOP_FREE, (void **)zbt, numsaved); - } else { - /* - * We're logging to catch a leak. Remove any record we might have for this - * element since it's being freed. Note that we may not find it if the buffer - * overflowed and that's OK. Since the log is of a limited size, old records - * get overwritten if there are more zallocs than zfrees. - */ - btlog_remove_entries_for_element(zone->zlog_btlog, (void *)addr); - } +#if CONFIG_ZLEAKS + /* + * Zone leak detection: un-track the allocation + */ + if (__improbable(zone->zleak_on)) { + zleak_free(elem, elem_size); } +#endif /* CONFIG_ZLEAKS */ #if CONFIG_ZCACHE + /* + * Note: if zone caching is on, gzalloc and tags aren't used + * so we can always check this first + */ if (zone_caching_enabled(zone)) { - int __assert_only ret = zcache_free_to_cpu_cache(zone, addr); - assert(ret != FALSE); - return; + return zcache_free_to_cpu_cache(zone, zstats, (vm_offset_t)addr); } #endif /* CONFIG_ZCACHE */ - lock_zone(zone); - assert(zone->zone_valid); - - if (zone_check) { - zone_check_freelist(zone, elem); +#if CONFIG_GZALLOC + if (__improbable(zone->gzalloc_tracked)) { + return gzalloc_free(zone, zstats, addr); } +#endif /* CONFIG_GZALLOC */ - if (__probable(!gzfreed)) { -#if VM_MAX_TAG_ZONES - if (__improbable(zone->tags)) { - tag = (ZTAG(zone, elem)[0] >> 1); - // set the tag with b0 clear so the block remains inuse - ZTAG(zone, elem)[0] = 0xFFFE; - } -#endif /* VM_MAX_TAG_ZONES */ - free_to_zone(zone, elem, poison); +#if ZONE_ENABLE_LOGGING + if (__improbable(DO_LOGGING(zone))) { + zfree_log_trace(zone, elem); } +#endif /* ZONE_ENABLE_LOGGING */ - if (__improbable(zone->count < 0)) { - panic("zfree: zone count underflow in zone %s while freeing element %p, possible cause: double frees or freeing memory that did not come from this zone", - zone->zone_name, addr); + if (zone->zfree_clear_mem) { + poison = zfree_clear(zone, elem, elem_size); } -#if CONFIG_ZLEAKS - /* - * Zone leak detection: un-track the allocation - */ - if (zone->zleak_on) { - zleak_free(elem, zone->elem_size); + lock_zone(zone); + assert(zone->z_self == zone); + + if (!poison) { + poison = zfree_poison_element(zone, &zone->zp_count, elem); } -#endif /* CONFIG_ZLEAKS */ -#if VM_MAX_TAG_ZONES - if (__improbable(zone->tags) && __probable(!gzfreed)) { - vm_tag_update_zone_size(tag, zone->tag_zone_index, -((int64_t)zone->elem_size), 0); + if (__probable(zstats != NULL)) { + /* + * The few vm zones used before zone_init() runs do not have + * per-cpu stats yet + */ + zpercpu_get(zstats)->zs_mem_freed += elem_size; } -#endif /* VM_MAX_TAG_ZONES */ + + zfree_direct_locked(zone, elem, poison); unlock_zone(zone); } -/* Change a zone's flags. - * This routine must be called immediately after zinit. - */ void -zone_change( - zone_t zone, - unsigned int item, - boolean_t value) +(zfree)(union zone_or_view zov, void *addr) { - assert( zone != ZONE_NULL ); - assert( value == TRUE || value == FALSE ); - - switch (item) { - case Z_NOENCRYPT: - zone->noencrypt = value; - break; - case Z_EXHAUST: - zone->exhaustible = value; - break; - case Z_COLLECT: - zone->collectable = value; - break; - case Z_EXPAND: - zone->expandable = value; - break; - case Z_FOREIGN: - zone->allows_foreign = value; - break; - case Z_CALLERACCT: - zone->caller_acct = value; - break; - case Z_NOCALLOUT: - zone->no_callout = value; - break; - case Z_TAGS_ENABLED: -#if VM_MAX_TAG_ZONES - { - static int tag_zone_index; - zone->tags = TRUE; - zone->tags_inline = (((page_size + zone->elem_size - 1) / zone->elem_size) <= (sizeof(uint32_t) / sizeof(uint16_t))); - zone->tag_zone_index = OSAddAtomic(1, &tag_zone_index); - } -#endif /* VM_MAX_TAG_ZONES */ - break; - case Z_GZALLOC_EXEMPT: - zone->gzalloc_exempt = value; -#if CONFIG_GZALLOC - gzalloc_reconfigure(zone); -#endif - break; - case Z_ALIGNMENT_REQUIRED: - zone->alignment_required = value; -#if KASAN_ZALLOC - if (zone->kasan_redzone == KASAN_GUARD_SIZE) { - /* Don't disturb alignment with the redzone for zones with - * specific alignment requirements. */ - zone->elem_size -= zone->kasan_redzone * 2; - zone->kasan_redzone = 0; - } -#endif -#if CONFIG_GZALLOC - gzalloc_reconfigure(zone); -#endif - break; - case Z_KASAN_QUARANTINE: - zone->kasan_quarantine = value; - break; - case Z_CACHING_ENABLED: -#if CONFIG_ZCACHE - if (value == TRUE) { -#if CONFIG_GZALLOC - /* - * Per cpu zone caching should be - * disabled if gzalloc is enabled. - */ - if (gzalloc_enabled()) { - break; - } -#endif - if (zcache_ready()) { - zcache_init(zone); - } else { - zone->cpu_cache_enable_when_ready = TRUE; - } - } -#endif - break; - case Z_CLEARMEMORY: - zone->clear_memory = value; - break; - default: - panic("Zone_change: Wrong Item Type!"); - /* break; */ - } + zone_t zone = zov.zov_view->zv_zone; + zone_stats_t zstats = zov.zov_view->zv_stats; + assert(!zone->percpu); + zfree_ext(zone, zstats, addr); } -/* - * Return the expected number of free elements in the zone. - * This calculation will be incorrect if items are zfree'd that - * were never zalloc'd/zget'd. The correct way to stuff memory - * into a zone is by zcram. - */ - -integer_t -zone_free_count(zone_t zone) +void +zfree_percpu(union zone_or_view zov, void *addr) { - integer_t free_count; - - lock_zone(zone); - free_count = zone->countfree; - unlock_zone(zone); - - assert(free_count >= 0); - - return free_count; + zone_t zone = zov.zov_view->zv_zone; + zone_stats_t zstats = zov.zov_view->zv_stats; + assert(zone->percpu); + zfree_ext(zone, zstats, (void *)__zpcpu_demangle(addr)); } +#pragma mark vm integration, MIG routines + /* * Drops (i.e. frees) the elements in the all free pages queue of a zone. * Called by zone_gc() on each zone and when a zone is zdestroy()ed. */ -void -drop_free_elements(zone_t z) +static void +zone_drop_free_elements(zone_t z) { - vm_size_t elt_size; + const zone_addr_kind_t kind = ZONE_ADDR_NATIVE; unsigned int total_freed_pages = 0; - struct zone_page_metadata *page_meta; - vm_address_t free_page_address; + struct zone_page_metadata *page_meta, *seq_meta; + vm_address_t page_addr; vm_size_t size_to_free; + vm_size_t free_count; + uint32_t page_count; current_thread()->options |= TH_OPT_ZONE_PRIV; lock_zone(z); - elt_size = z->elem_size; - - while (!queue_empty(&z->pages.all_free)) { + while (!zone_pva_is_null(z->pages_all_free)) { /* - * If any replenishment threads are running, defer to them, so that we don't deplete reserved zones. - * The timing of the check isn't super important, as there are enough reserves to allow freeing an - * extra page_meta. Hence, we can check without grabbing the lock every time through the loop. - * We do need the lock however to avoid missing a wakeup when we decide to block. + * If any replenishment threads are running, defer to them, + * so that we don't deplete reserved zones. + * + * The timing of the check isn't super important, as there are + * enough reserves to allow freeing an extra page_meta. + * + * Hence, we can check without grabbing the lock every time + * through the loop. We do need the lock however to avoid + * missing a wakeup when we decide to block. */ if (zone_replenish_active > 0) { lck_spin_lock(&zone_replenish_lock); @@ -4199,60 +5412,86 @@ drop_free_elements(zone_t z) } lck_spin_unlock(&zone_replenish_lock); } - page_meta = (struct zone_page_metadata *)queue_first(&z->pages.all_free); - assert(from_zone_map((vm_address_t)page_meta, sizeof(*page_meta))); /* foreign elements should be in any_free_foreign */ + + page_meta = zone_pva_to_meta(z->pages_all_free, kind); + page_count = page_meta->zm_page_count; + free_count = zone_elem_count(z, ptoa(page_count), kind); + /* - * Don't drain zones with async refill to below the refill threshold, - * as they need some reserve to function properly. + * Don't drain zones with async refill to below the refill + * threshold, as they need some reserve to function properly. */ - if (!z->zone_destruction && - z->async_prio_refill && z->zone_replenish_thread && - (vm_size_t)(page_meta->free_count - z->countfree) < z->prio_refill_count) { + if (!z->destroyed && z->prio_refill_count && + (vm_size_t)(z->countfree - free_count) < z->prio_refill_count) { break; } - (void)dequeue_head(&z->pages.all_free); + zone_meta_queue_pop(z, &z->pages_all_free, kind, &page_addr); - assert(z->countfree >= page_meta->free_count); - z->countfree -= page_meta->free_count; + if (os_sub_overflow(z->countfree, free_count, &z->countfree)) { + zone_accounting_panic(z, "countfree wrap-around"); + } + if (os_sub_overflow(z->countavail, free_count, &z->countavail)) { + zone_accounting_panic(z, "countavail wrap-around"); + } + if (os_sub_overflow(z->allfree_page_count, page_count, + &z->allfree_page_count)) { + zone_accounting_panic(z, "allfree_page_count wrap-around"); + } + if (os_sub_overflow(z->page_count, page_count, &z->page_count)) { + zone_accounting_panic(z, "page_count wrap-around"); + } - assert(z->count_all_free_pages >= page_meta->page_count); - z->count_all_free_pages -= page_meta->page_count; + os_atomic_sub(&zones_phys_page_count, page_count, relaxed); + os_atomic_sub(&zones_phys_page_mapped_count, page_count, relaxed); - assert(z->cur_size >= page_meta->free_count * elt_size); - z->cur_size -= page_meta->free_count * elt_size; + bzero(page_meta, sizeof(*page_meta) * page_count); + seq_meta = page_meta; + page_meta = NULL; /* page_meta fields are zeroed, prevent reuse */ - ZONE_PAGE_COUNT_DECR(z, page_meta->page_count); unlock_zone(z); /* Free the pages for metadata and account for them */ - free_page_address = get_zone_page(page_meta); - total_freed_pages += page_meta->page_count; - size_to_free = page_meta->page_count * PAGE_SIZE; + total_freed_pages += page_count; + size_to_free = ptoa(page_count); #if KASAN_ZALLOC - kasan_poison_range(free_page_address, size_to_free, ASAN_VALID); + kasan_poison_range(page_addr, size_to_free, ASAN_VALID); #endif #if VM_MAX_TAG_ZONES if (z->tags) { - ztMemoryRemove(z, free_page_address, size_to_free); + ztMemoryRemove(z, page_addr, size_to_free); } #endif /* VM_MAX_TAG_ZONES */ - kmem_free(zone_map, free_page_address, size_to_free); + + if (z->va_sequester && z->alloc_pages == page_count) { + kernel_memory_depopulate(submap_for_zone(z), page_addr, + size_to_free, KMA_KOBJECT, VM_KERN_MEMORY_ZONE); + } else { + kmem_free(submap_for_zone(z), page_addr, size_to_free); + seq_meta = NULL; + } thread_yield_to_preemption(); + lock_zone(z); + + if (seq_meta) { + zone_meta_queue_push(z, &z->pages_sequester, seq_meta, kind); + z->sequester_page_count += page_count; + } } - if (z->zone_destruction) { - assert(queue_empty(&z->pages.all_free)); - assert(z->count_all_free_pages == 0); + if (z->destroyed) { + assert(zone_pva_is_null(z->pages_all_free)); + assert(z->allfree_page_count == 0); } unlock_zone(z); current_thread()->options &= ~TH_OPT_ZONE_PRIV; - #if DEBUG || DEVELOPMENT if (zalloc_debug & ZALLOC_DEBUG_ZONEGC) { - kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z->zone_name, - (unsigned long)((total_freed_pages * PAGE_SIZE) / elt_size), total_freed_pages); + kprintf("zone_gc() of zone %s%s freed %lu elements, %d pages\n", + zone_heap_name(z), z->z_name, + (unsigned long)(ptoa(total_freed_pages) / z->pcpu_elem_size), + total_freed_pages); } #endif /* DEBUG || DEVELOPMENT */ } @@ -4269,10 +5508,6 @@ drop_free_elements(zone_t z) void zone_gc(boolean_t consider_jetsams) { - unsigned int max_zones; - zone_t z; - unsigned int i; - if (consider_jetsams) { kill_process_in_largest_zone(); /* @@ -4284,19 +5519,14 @@ zone_gc(boolean_t consider_jetsams) lck_mtx_lock(&zone_gc_lock); - simple_lock(&all_zones_lock, &zone_locks_grp); - max_zones = num_zones; - simple_unlock(&all_zones_lock); - #if DEBUG || DEVELOPMENT if (zalloc_debug & ZALLOC_DEBUG_ZONEGC) { kprintf("zone_gc() starting...\n"); } #endif /* DEBUG || DEVELOPMENT */ - for (i = 0; i < max_zones; i++) { - z = &(zone_array[i]); - assert(z != ZONE_NULL); + zone_index_foreach(i) { + zone_t z = &zone_array[i]; if (!z->collectable) { continue; @@ -4306,19 +5536,16 @@ zone_gc(boolean_t consider_jetsams) zcache_drain_depot(z); } #endif /* CONFIG_ZCACHE */ - if (queue_empty(&z->pages.all_free)) { + if (zone_pva_is_null(z->pages_all_free)) { continue; } - drop_free_elements(z); + zone_drop_free_elements(z); } lck_mtx_unlock(&zone_gc_lock); } -extern vm_offset_t kmapoff_kaddr; -extern unsigned int kmapoff_pgcnt; - /* * consider_zone_gc: * @@ -4328,19 +5555,19 @@ extern unsigned int kmapoff_pgcnt; void consider_zone_gc(boolean_t consider_jetsams) { - if (kmapoff_kaddr != 0) { - /* - * One-time reclaim of kernel_map resources we allocated in - * early boot. - */ - (void) vm_deallocate(kernel_map, - kmapoff_kaddr, kmapoff_pgcnt * PAGE_SIZE_64); - kmapoff_kaddr = 0; + /* + * One-time reclaim of kernel_map resources we allocated in + * early boot. + * + * Use atomic exchange in case multiple threads race into here. + */ + vm_offset_t deallocate_kaddr; + if (kmapoff_kaddr != 0 && + (deallocate_kaddr = os_atomic_xchg(&kmapoff_kaddr, 0, relaxed)) != 0) { + vm_deallocate(kernel_map, deallocate_kaddr, ptoa_64(kmapoff_pgcnt)); } - if (zone_gc_allowed) { - zone_gc(consider_jetsams); - } + zone_gc(consider_jetsams); } /* @@ -4349,7 +5576,7 @@ consider_zone_gc(boolean_t consider_jetsams) * Frees unused pages towards the end of the region, and zero'es out unused * space on the last page. */ -vm_map_copy_t +static vm_map_copy_t create_vm_map_copy( vm_offset_t start_addr, vm_size_t total_size, @@ -4378,9 +5605,9 @@ create_vm_map_copy( return copy; } -boolean_t +static boolean_t get_zone_info( - zone_t z, + zone_t z, mach_zone_name_t *zn, mach_zone_info_t *zi) { @@ -4388,7 +5615,7 @@ get_zone_info( assert(z != ZONE_NULL); lock_zone(z); - if (!z->zone_valid) { + if (!z->z_self) { unlock_zone(z); return FALSE; } @@ -4396,22 +5623,34 @@ get_zone_info( unlock_zone(z); if (zn != NULL) { + /* + * Append kalloc heap name to zone name (if zone is used by kalloc) + */ + char temp_zone_name[MAX_ZONE_NAME] = ""; + snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s", + zone_heap_name(z), z->z_name); + /* assuming here the name data is static */ - (void) __nosan_strlcpy(zn->mzn_name, zcopy.zone_name, - strlen(zcopy.zone_name) + 1); + (void) __nosan_strlcpy(zn->mzn_name, temp_zone_name, + strlen(temp_zone_name) + 1); } if (zi != NULL) { - zi->mzi_count = (uint64_t)zcopy.count; - zi->mzi_cur_size = ptoa_64(zcopy.page_count); - zi->mzi_max_size = (uint64_t)zcopy.max_size; - zi->mzi_elem_size = (uint64_t)zcopy.elem_size; - zi->mzi_alloc_size = (uint64_t)zcopy.alloc_size; - zi->mzi_sum_size = zcopy.sum_count * zcopy.elem_size; - zi->mzi_exhaustible = (uint64_t)zcopy.exhaustible; - zi->mzi_collectable = 0; + *zi = (mach_zone_info_t) { + .mzi_count = zone_count_allocated(&zcopy), + .mzi_cur_size = ptoa_64(zcopy.page_count), + // max_size for zprint is now high-watermark of pages used + .mzi_max_size = ptoa_64(zcopy.page_count_hwm), + .mzi_elem_size = zcopy.pcpu_elem_size, + .mzi_alloc_size = ptoa_64(zcopy.alloc_pages), + .mzi_exhaustible = (uint64_t)zcopy.exhaustible, + }; + zpercpu_foreach(zs, zcopy.z_stats) { + zi->mzi_sum_size += zs->zs_mem_allocated; + } if (zcopy.collectable) { - SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable, ((uint64_t)zcopy.count_all_free_pages * PAGE_SIZE)); + SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable, + ptoa_64(zcopy.allfree_page_count)); SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE); } } @@ -4487,9 +5726,7 @@ mach_memory_info( * We won't pick up any zones that are allocated later. */ - simple_lock(&all_zones_lock, &zone_locks_grp); - max_zones = (unsigned int)(num_zones); - simple_unlock(&all_zones_lock); + max_zones = os_atomic_load(&num_zones, relaxed); names_size = round_page(max_zones * sizeof *names); kr = kmem_alloc_pageable(ipc_kernel_map, @@ -4570,7 +5807,6 @@ mach_zone_info_for_zone( mach_zone_name_t name, mach_zone_info_t *infop) { - unsigned int max_zones, i; zone_t zone_ptr; if (host == HOST_NULL) { @@ -4586,17 +5822,20 @@ mach_zone_info_for_zone( return KERN_INVALID_ARGUMENT; } - simple_lock(&all_zones_lock, &zone_locks_grp); - max_zones = (unsigned int)(num_zones); - simple_unlock(&all_zones_lock); - zone_ptr = ZONE_NULL; - for (i = 0; i < max_zones; i++) { + zone_index_foreach(i) { zone_t z = &(zone_array[i]); assert(z != ZONE_NULL); + /* + * Append kalloc heap name to zone name (if zone is used by kalloc) + */ + char temp_zone_name[MAX_ZONE_NAME] = ""; + snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s", + zone_heap_name(z), z->z_name); + /* Find the requested zone by name */ - if (track_this_zone(z->zone_name, name.mzn_name)) { + if (track_this_zone(temp_zone_name, name.mzn_name)) { zone_ptr = z; break; } @@ -4641,17 +5880,13 @@ mach_zone_info_for_largest_zone( uint64_t get_zones_collectable_bytes(void) { - unsigned int i, max_zones; uint64_t zones_collectable_bytes = 0; mach_zone_info_t zi; - simple_lock(&all_zones_lock, &zone_locks_grp); - max_zones = (unsigned int)(num_zones); - simple_unlock(&all_zones_lock); - - for (i = 0; i < max_zones; i++) { - if (get_zone_info(&(zone_array[i]), NULL, &zi)) { - zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable); + zone_index_foreach(i) { + if (get_zone_info(&zone_array[i], NULL, &zi)) { + zones_collectable_bytes += + GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable); } } @@ -4664,7 +5899,7 @@ mach_zone_get_zlog_zones( mach_zone_name_array_t *namesp, mach_msg_type_number_t *namesCntp) { -#if DEBUG || DEVELOPMENT +#if ZONE_ENABLE_LOGGING unsigned int max_zones, logged_zones, i; kern_return_t kr; zone_t zone_ptr; @@ -4680,9 +5915,7 @@ mach_zone_get_zlog_zones( return KERN_INVALID_ARGUMENT; } - simple_lock(&all_zones_lock, &zone_locks_grp); - max_zones = (unsigned int)(num_zones); - simple_unlock(&all_zones_lock); + max_zones = os_atomic_load(&num_zones, relaxed); names_size = round_page(max_zones * sizeof *names); kr = kmem_alloc_pageable(ipc_kernel_map, @@ -4710,10 +5943,10 @@ mach_zone_get_zlog_zones( return KERN_SUCCESS; -#else /* DEBUG || DEVELOPMENT */ +#else /* ZONE_ENABLE_LOGGING */ #pragma unused(host, namesp, namesCntp) return KERN_FAILURE; -#endif /* DEBUG || DEVELOPMENT */ +#endif /* ZONE_ENABLE_LOGGING */ } kern_return_t @@ -4724,7 +5957,7 @@ mach_zone_get_btlog_records( mach_msg_type_number_t *recsCntp) { #if DEBUG || DEVELOPMENT - unsigned int max_zones, i, numrecs = 0; + unsigned int numrecs = 0; zone_btrecord_t *recs; kern_return_t kr; zone_t zone_ptr; @@ -4739,17 +5972,19 @@ mach_zone_get_btlog_records( return KERN_INVALID_ARGUMENT; } - simple_lock(&all_zones_lock, &zone_locks_grp); - max_zones = (unsigned int)(num_zones); - simple_unlock(&all_zones_lock); - zone_ptr = ZONE_NULL; - for (i = 0; i < max_zones; i++) { - zone_t z = &(zone_array[i]); - assert(z != ZONE_NULL); + zone_index_foreach(i) { + zone_t z = &zone_array[i]; + + /* + * Append kalloc heap name to zone name (if zone is used by kalloc) + */ + char temp_zone_name[MAX_ZONE_NAME] = ""; + snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s", + zone_heap_name(z), z->z_name); /* Find the requested zone by name */ - if (track_this_zone(z->zone_name, name.mzn_name)) { + if (track_this_zone(temp_zone_name, name.mzn_name)) { zone_ptr = z; break; } @@ -4807,9 +6042,8 @@ mach_memory_info_check(void) { mach_memory_info_t * memory_info; mach_memory_info_t * info; - zone_t zone; - unsigned int idx, num_info, max_zones; - vm_offset_t memory_info_addr; + unsigned int num_info; + vm_offset_t memory_info_addr; kern_return_t kr; size_t memory_info_size, memory_info_vmsize; uint64_t top_wired, zonestotal, total; @@ -4823,19 +6057,12 @@ mach_memory_info_check(void) memory_info = (mach_memory_info_t *) memory_info_addr; vm_page_diagnose(memory_info, num_info, 0); - simple_lock(&all_zones_lock, &zone_locks_grp); - max_zones = num_zones; - simple_unlock(&all_zones_lock); - top_wired = total = zonestotal = 0; - for (idx = 0; idx < max_zones; idx++) { - zone = &(zone_array[idx]); - assert(zone != ZONE_NULL); - lock_zone(zone); - zonestotal += ptoa_64(zone->page_count); - unlock_zone(zone); + zone_index_foreach(idx) { + zonestotal += zone_size_wired(&zone_array[idx]); } - for (idx = 0; idx < num_info; idx++) { + + for (uint32_t idx = 0; idx < num_info; idx++) { info = &memory_info[idx]; if (!info->size) { continue; @@ -4853,7 +6080,8 @@ mach_memory_info_check(void) } total += zonestotal; - printf("vm_page_diagnose_check %qd of %qd, zones %qd, short 0x%qx\n", total, top_wired, zonestotal, top_wired - total); + printf("vm_page_diagnose_check %qd of %qd, zones %qd, short 0x%qx\n", + total, top_wired, zonestotal, top_wired - total); kmem_free(kernel_map, memory_info_addr, memory_info_vmsize); @@ -4882,96 +6110,76 @@ mach_zone_force_gc( return KERN_SUCCESS; } -extern unsigned int stack_total; -extern unsigned long long stack_allocs; - zone_t zone_find_largest(void) { - unsigned int i; - unsigned int max_zones; - zone_t the_zone; - zone_t zone_largest; - - simple_lock(&all_zones_lock, &zone_locks_grp); - max_zones = num_zones; - simple_unlock(&all_zones_lock); - - zone_largest = &(zone_array[0]); - for (i = 0; i < max_zones; i++) { - the_zone = &(zone_array[i]); - if (the_zone->cur_size > zone_largest->cur_size) { - zone_largest = the_zone; + uint32_t largest_idx = 0; + vm_offset_t largest_size = zone_size_wired(&zone_array[0]); + + zone_index_foreach(i) { + vm_offset_t size = zone_size_wired(&zone_array[i]); + if (size > largest_size) { + largest_idx = i; + largest_size = size; } } - return zone_largest; -} -#if ZONE_DEBUG + return &zone_array[largest_idx]; +} -/* should we care about locks here ? */ +#pragma mark - tests +#if DEBUG || DEVELOPMENT -#define zone_in_use(z) ( z->count || z->free_elements \ - || !queue_empty(&z->pages.all_free) \ - || !queue_empty(&z->pages.intermediate) \ - || (z->allows_foreign && !queue_empty(&z->pages.any_free_foreign))) +/* + * Used for sysctl kern.run_zone_test which is not thread-safe. Ensure only one + * thread goes through at a time. Or we can end up with multiple test zones (if + * a second zinit() comes through before zdestroy()), which could lead us to + * run out of zones. + */ +SIMPLE_LOCK_DECLARE(zone_test_lock, 0); +static boolean_t zone_test_running = FALSE; +static zone_t test_zone_ptr = NULL; +static uintptr_t * +zone_copy_allocations(zone_t z, uintptr_t *elems, bitmap_t *bits, + zone_pva_t page_index, zone_addr_kind_t kind) +{ + vm_offset_t free, first, end, page; + struct zone_page_metadata *meta; -#endif /* ZONE_DEBUG */ + while (!zone_pva_is_null(page_index)) { + page = zone_pva_to_addr(page_index); + meta = zone_pva_to_meta(page_index, kind); + end = page + ptoa(meta->zm_percpu ? 1 : meta->zm_page_count); + first = page + ZONE_PAGE_FIRST_OFFSET(kind); + bitmap_clear(bits, (uint32_t)((end - first) / zone_elem_size(z))); -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + // construct bitmap of all freed elements + free = zone_page_meta_get_freelist(z, meta, page); + while (free) { + bitmap_set(bits, (uint32_t)((free - first) / zone_elem_size(z))); -#if DEBUG || DEVELOPMENT + // next free element + free = *(vm_offset_t *)free ^ zp_nopoison_cookie; + } -static uintptr_t * -zone_copy_all_allocations_inqueue(zone_t z, queue_head_t * queue, uintptr_t * elems) -{ - struct zone_page_metadata *page_meta; - vm_offset_t free, elements; - vm_offset_t idx, numElements, freeCount, bytesAvail, metaSize; - - queue_iterate(queue, page_meta, struct zone_page_metadata *, pages) - { - elements = get_zone_page(page_meta); - bytesAvail = ptoa(page_meta->page_count); - freeCount = 0; - if (z->allows_foreign && !from_zone_map(elements, z->elem_size)) { - metaSize = (sizeof(struct zone_page_metadata) + ZONE_ELEMENT_ALIGNMENT - 1) & ~(ZONE_ELEMENT_ALIGNMENT - 1); - bytesAvail -= metaSize; - elements += metaSize; - } - numElements = bytesAvail / z->elem_size; - // construct array of all possible elements - for (idx = 0; idx < numElements; idx++) { - elems[idx] = INSTANCE_PUT(elements + idx * z->elem_size); - } - // remove from the array all free elements - free = (vm_offset_t)page_metadata_get_freelist(page_meta); - while (free) { - // find idx of free element - for (idx = 0; (idx < numElements) && (elems[idx] != INSTANCE_PUT(free)); idx++) { + for (unsigned i = 0; first < end; i++, first += zone_elem_size(z)) { + if (!bitmap_test(bits, i)) { + *elems++ = INSTANCE_PUT(first); } - assert(idx < numElements); - // remove it - bcopy(&elems[idx + 1], &elems[idx], (numElements - (idx + 1)) * sizeof(elems[0])); - numElements--; - freeCount++; - // next free element - vm_offset_t *primary = (vm_offset_t *) free; - free = *primary ^ zp_nopoison_cookie; } - elems += numElements; - } + page_index = meta->zm_page_next; + } return elems; } kern_return_t zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc, void * refCon) { - uintptr_t zbt[MAX_ZTRACE_DEPTH]; - zone_t zone; + uintptr_t zbt[MAX_ZTRACE_DEPTH]; + zone_t zone = NULL; uintptr_t * array; uintptr_t * next; uintptr_t element, bt; @@ -4979,48 +6187,53 @@ zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc, void * uint32_t btidx, btcount, nobtcount, btfound; uint32_t elemSize; uint64_t maxElems; - unsigned int max_zones; kern_return_t kr; + bitmap_t *bits; - simple_lock(&all_zones_lock, &zone_locks_grp); - max_zones = num_zones; - simple_unlock(&all_zones_lock); - - for (idx = 0; idx < max_zones; idx++) { - if (!strncmp(zoneName, zone_array[idx].zone_name, nameLen)) { + zone_index_foreach(i) { + if (!strncmp(zoneName, zone_array[i].z_name, nameLen)) { + zone = &zone_array[i]; break; } } - if (idx >= max_zones) { + if (zone == NULL) { return KERN_INVALID_NAME; } - zone = &zone_array[idx]; - elemSize = (uint32_t) zone->elem_size; - maxElems = ptoa(zone->page_count) / elemSize; + elemSize = zone_elem_size(zone); + maxElems = (zone->countavail + 1) & ~1ul; - if ((zone->alloc_size % elemSize) - && !leak_scan_debug_flag) { + if ((ptoa(zone->percpu ? 1 : zone->alloc_pages) % elemSize) && + !zone_leaks_scan_enable) { return KERN_INVALID_CAPABILITY; } kr = kmem_alloc_kobject(kernel_map, (vm_offset_t *) &array, - maxElems * sizeof(uintptr_t), VM_KERN_MEMORY_DIAG); + maxElems * sizeof(uintptr_t) + BITMAP_LEN(ZONE_CHUNK_MAXELEMENTS), + VM_KERN_MEMORY_DIAG); if (KERN_SUCCESS != kr) { return kr; } + /* maxElems is a 2-multiple so we're always aligned */ + bits = CAST_DOWN_EXPLICIT(bitmap_t *, array + maxElems); + lock_zone(zone); next = array; - next = zone_copy_all_allocations_inqueue(zone, &zone->pages.any_free_foreign, next); - next = zone_copy_all_allocations_inqueue(zone, &zone->pages.intermediate, next); - next = zone_copy_all_allocations_inqueue(zone, &zone->pages.all_used, next); + next = zone_copy_allocations(zone, next, bits, + zone->pages_any_free_foreign, ZONE_ADDR_FOREIGN); + next = zone_copy_allocations(zone, next, bits, + zone->pages_all_used_foreign, ZONE_ADDR_FOREIGN); + next = zone_copy_allocations(zone, next, bits, + zone->pages_intermediate, ZONE_ADDR_NATIVE); + next = zone_copy_allocations(zone, next, bits, + zone->pages_all_used, ZONE_ADDR_NATIVE); count = (uint32_t)(next - array); unlock_zone(zone); - zone_leaks_scan(array, count, (uint32_t)zone->elem_size, &found); + zone_leaks_scan(array, count, zone_elem_size(zone), &found); assert(found <= count); for (idx = 0; idx < count; idx++) { @@ -5031,10 +6244,12 @@ zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc, void * element = INSTANCE_PUT(element) & ~kInstanceFlags; } +#if ZONE_ENABLE_LOGGING if (zone->zlog_btlog && !corruption_debug_flag) { // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found btlog_copy_backtraces_for_elements(zone->zlog_btlog, array, &count, elemSize, proc, refCon); } +#endif /* ZONE_ENABLE_LOGGING */ for (nobtcount = idx = 0; idx < count; idx++) { element = array[idx]; @@ -5047,7 +6262,7 @@ zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc, void * element = INSTANCE_PUT(element) & ~kInstanceFlags; // see if we can find any backtrace left in the element - btcount = (typeof(btcount))(zone->elem_size / sizeof(uintptr_t)); + btcount = (typeof(btcount))(zone_elem_size(zone) / sizeof(uintptr_t)); if (btcount >= MAX_ZTRACE_DEPTH) { btcount = MAX_ZTRACE_DEPTH - 1; } @@ -5075,13 +6290,6 @@ zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc, void * return KERN_SUCCESS; } -boolean_t -kdp_is_in_zone(void *addr, const char *zone_name) -{ - zone_t z; - return zone_element_size(addr, &z) && !strcmp(z->zone_name, zone_name); -} - boolean_t run_zone_test(void) { @@ -5110,9 +6318,9 @@ run_zone_test(void) } #if KASAN_ZALLOC - if (test_zone_ptr == NULL && zone_free_count(test_zone) != 0) { + if (test_zone_ptr == NULL && test_zone->countfree != 0) { #else - if (zone_free_count(test_zone) != 0) { + if (test_zone->countfree != 0) { #endif printf("run_zone_test: free count is not zero\n"); return FALSE; @@ -5140,6 +6348,90 @@ run_zone_test(void) printf("run_zone_test: Iteration %d successful\n", i); } while (i < max_iter); + /* test Z_VA_SEQUESTER */ + if (zsecurity_options & ZSECURITY_OPTIONS_SEQUESTER) { + int idx, num_allocs = 8; + vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs; + void *allocs[num_allocs]; + vm_offset_t phys_pages = os_atomic_load(&zones_phys_page_count, relaxed); + vm_size_t zone_map_size = zone_range_size(&zone_info.zi_map_range); + + test_zone = zone_create("test_zone_sysctl", elem_size, + ZC_DESTRUCTIBLE | ZC_SEQUESTER); + if (test_zone == NULL) { + printf("run_zone_test: zinit() failed\n"); + return FALSE; + } + + for (idx = 0; idx < num_allocs; idx++) { + allocs[idx] = zalloc(test_zone); + assert(NULL != allocs[idx]); + printf("alloc[%d] %p\n", idx, allocs[idx]); + } + for (idx = 0; idx < num_allocs; idx++) { + zfree(test_zone, allocs[idx]); + } + assert(!zone_pva_is_null(test_zone->pages_all_free)); + + printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %qd%%\n", + vm_page_wire_count, vm_page_free_count, + (100ULL * ptoa_64(phys_pages)) / zone_map_size); + zone_gc(FALSE); + printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %qd%%\n", + vm_page_wire_count, vm_page_free_count, + (100ULL * ptoa_64(phys_pages)) / zone_map_size); + unsigned int allva = 0; + zone_index_foreach(zidx) { + zone_t z = &zone_array[zidx]; + lock_zone(z); + allva += z->page_count; + if (!z->sequester_page_count) { + unlock_zone(z); + continue; + } + unsigned count = 0; + uint64_t size; + zone_pva_t pg = z->pages_sequester; + struct zone_page_metadata *page_meta; + while (pg.packed_address) { + page_meta = zone_pva_to_meta(pg, ZONE_ADDR_NATIVE); + count += z->alloc_pages; + pg = page_meta->zm_page_next; + } + assert(count == z->sequester_page_count); + size = zone_size_wired(z); + if (!size) { + size = 1; + } + printf("%s%s: seq %d, res %d, %qd %%\n", + zone_heap_name(z), z->z_name, z->sequester_page_count, + z->page_count, zone_size_allocated(z) * 100ULL / size); + unlock_zone(z); + } + + printf("total va: %d\n", allva); + + assert(zone_pva_is_null(test_zone->pages_all_free)); + assert(!zone_pva_is_null(test_zone->pages_sequester)); + assert(2 == test_zone->sequester_page_count); + for (idx = 0; idx < num_allocs; idx++) { + assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx])); + } + for (idx = 0; idx < num_allocs; idx++) { + allocs[idx] = zalloc(test_zone); + assert(allocs[idx]); + printf("alloc[%d] %p\n", idx, allocs[idx]); + } + assert(zone_pva_is_null(test_zone->pages_sequester)); + assert(0 == test_zone->sequester_page_count); + for (idx = 0; idx < num_allocs; idx++) { + zfree(test_zone, allocs[idx]); + } + zdestroy(test_zone); + } else { + printf("run_zone_test: skipping sequester test (not enabled)\n"); + } + printf("run_zone_test: Test passed\n"); simple_lock(&zone_test_lock, &zone_locks_grp); @@ -5149,4 +6441,54 @@ run_zone_test(void) return TRUE; } +/* + * Routines to test that zone garbage collection and zone replenish threads + * running at the same time don't cause problems. + */ + +void +zone_gc_replenish_test(void) +{ + zone_gc(FALSE); +} + + +void +zone_alloc_replenish_test(void) +{ + zone_t z = NULL; + struct data { struct data *next; } *node, *list = NULL; + + /* + * Find a zone that has a replenish thread + */ + zone_index_foreach(i) { + z = &zone_array[i]; + if (z->prio_refill_count && + zone_elem_size(z) >= sizeof(struct data)) { + z = &zone_array[i]; + break; + } + } + if (z == NULL) { + printf("Couldn't find a replenish zone\n"); + return; + } + + for (uint32_t i = 0; i < 2000; ++i) { /* something big enough to go past replenishment */ + node = zalloc(z); + node->next = list; + list = node; + } + + /* + * release the memory we allocated + */ + while (list != NULL) { + node = list; + list = list->next; + zfree(z, node); + } +} + #endif /* DEBUG || DEVELOPMENT */ diff --git a/osfmk/kern/zalloc.h b/osfmk/kern/zalloc.h index 5e3caa6de..541de3bdd 100644 --- a/osfmk/kern/zalloc.h +++ b/osfmk/kern/zalloc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -72,407 +72,902 @@ #include #include -#ifdef MACH_KERNEL_PRIVATE +#if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED) +#define __zalloc_deprecated(msg) __deprecated_msg(msg) +#else +#define __zalloc_deprecated(msg) +#endif -#include -#include -#include -#include -#include +__BEGIN_DECLS -#if KASAN -#include -#include -#endif +/*! + * @typedef zone_id_t + * + * @abstract + * The type for a zone ID. + */ +typedef uint16_t zone_id_t; + +/** + * @enum zone_create_flags_t + * + * @abstract + * Set of flags to pass to zone_create(). + * + * @discussion + * Some kernel-wide policies affect all possible created zones. + * Explicit @c ZC_* win over such policies. + */ +__options_decl(zone_create_flags_t, uint64_t, { + /** The default value to pass to zone_create() */ + ZC_NONE = 0x00000000, + + /** Force the created zone to use VA sequestering */ + ZC_SEQUESTER = 0x00000001, + /** Force the created zone @b NOT to use VA sequestering */ + ZC_NOSEQUESTER = 0x00000002, + + /** Enable per-CPU zone caching for this zone */ + ZC_CACHING = 0x00000010, + /** Disable per-CPU zone caching for this zone */ + ZC_NOCACHING = 0x00000020, + + + /** Mark zone as a per-cpu zone */ + ZC_PERCPU = 0x01000000, + + /** Force the created zone to clear every allocation on free */ + ZC_ZFREE_CLEARMEM = 0x02000000, + + /** Mark zone as non collectable by zone_gc() */ + ZC_NOGC = 0x04000000, + + /** Do not encrypt this zone during hibernation */ + ZC_NOENCRYPT = 0x08000000, + + /** Type requires alignment to be preserved */ + ZC_ALIGNMENT_REQUIRED = 0x10000000, -#ifdef CONFIG_ZCACHE -#include + /** Do not track this zone when gzalloc is engaged */ + ZC_NOGZALLOC = 0x20000000, + + /** Don't asynchronously replenish the zone via callouts */ + ZC_NOCALLOUT = 0x40000000, + + /** Can be zdestroy()ed, not default unlike zinit() */ + ZC_DESTRUCTIBLE = 0x80000000, + +#ifdef XNU_KERNEL_PRIVATE + + /** This zone will back a kalloc heap */ + ZC_KALLOC_HEAP = 0x0800000000000000, + + /** This zone can be crammed with foreign pages */ + ZC_ALLOW_FOREIGN = 0x1000000000000000, + + /** This zone contains bytes / data buffers only */ + ZC_DATA_BUFFERS = 0x2000000000000000, + + /** Disable kasan quarantine for this zone */ + ZC_KASAN_NOQUARANTINE = 0x4000000000000000, + + /** Disable kasan redzones for this zone */ + ZC_KASAN_NOREDZONE = 0x8000000000000000, #endif +}); -#if CONFIG_GZALLOC -typedef struct gzalloc_data { - uint32_t gzfc_index; - vm_offset_t *gzfc; -} gzalloc_data_t; +/*! + * @union zone_or_view + * + * @abstract + * A type used for calls that admit both a zone or a zone view. + * + * @discussion + * @c zalloc() and @c zfree() and their variants can act on both + * zones and zone views. + */ +union zone_or_view { + struct zone_view *zov_view; + struct zone *zov_zone; +#ifdef __cplusplus + inline zone_or_view(struct zone_view *zv) : zov_view(zv) { + } + inline zone_or_view(struct zone *z) : zov_zone(z) { + } +#endif +}; +#ifdef __cplusplus +typedef union zone_or_view zone_or_view_t; +#else +typedef union zone_or_view zone_or_view_t __attribute__((transparent_union)); #endif -/* - * A zone is a collection of fixed size blocks for which there - * is fast allocation/deallocation access. Kernel routines can - * use zones to manage data structures dynamically, creating a zone - * for each type of data structure to be managed. - * - */ - -struct zone_free_element; -struct zone_page_metadata; - -struct zone { -#ifdef CONFIG_ZCACHE - struct zone_cache *zcache; -#endif /* CONFIG_ZCACHE */ - struct zone_free_element *free_elements; /* free elements directly linked */ - struct { - queue_head_t any_free_foreign; /* foreign pages crammed into zone */ - queue_head_t all_free; - queue_head_t intermediate; - queue_head_t all_used; - } pages; /* list of zone_page_metadata structs, which maintain per-page free element lists */ - int count; /* Number of elements used now */ - int countfree; /* Number of free elements */ - int count_all_free_pages; /* Number of pages collectable by GC */ - lck_attr_t lock_attr; /* zone lock attribute */ - decl_lck_mtx_data(, lock); /* zone lock */ - lck_mtx_ext_t lock_ext; /* placeholder for indirect mutex */ - vm_size_t cur_size; /* current memory utilization */ - vm_size_t max_size; /* how large can this zone grow */ - vm_size_t elem_size; /* size of an element */ - vm_size_t alloc_size; /* size used for more memory */ - uint64_t page_count __attribute__((aligned(8))); /* number of pages used by this zone */ - uint64_t sum_count; /* count of allocs (life of zone) */ - uint64_t - /* boolean_t */ exhaustible :1, /* (F) merely return if empty? */ - /* boolean_t */ collectable :1, /* (F) garbage collect empty pages */ - /* boolean_t */ expandable :1, /* (T) expand zone (with message)? */ - /* boolean_t */ allows_foreign :1, /* (F) allow non-zalloc space */ - /* boolean_t */ doing_alloc_without_vm_priv:1, /* is zone expanding now via a non-vm_privileged thread? */ - /* boolean_t */ doing_alloc_with_vm_priv:1, /* is zone expanding now via a vm_privileged thread? */ - /* boolean_t */ waiting :1, /* is thread waiting for expansion? */ - /* boolean_t */ async_pending :1, /* asynchronous allocation pending? */ - /* boolean_t */ zleak_on :1, /* Are we collecting allocation information? */ - /* boolean_t */ caller_acct :1, /* do we account allocation/free to the caller? */ - /* boolean_t */ noencrypt :1, - /* boolean_t */ no_callout :1, - /* boolean_t */ async_prio_refill :1, - /* boolean_t */ gzalloc_exempt :1, - /* boolean_t */ alignment_required :1, - /* boolean_t */ zone_logging :1, /* Enable zone logging for this zone. */ - /* boolean_t */ zone_replenishing :1, - /* boolean_t */ kasan_quarantine :1, - /* boolean_t */ tags :1, - /* boolean_t */ tags_inline :1, - /* future */ tag_zone_index :6, - /* boolean_t */ zone_valid :1, - /* boolean_t */ cpu_cache_enable_when_ready :1, - /* boolean_t */ cpu_cache_enabled :1, - /* boolean_t */ clear_memory :1, - /* boolean_t */ zone_destruction :1, - /* future */ _reserved :33; - - int index; /* index into zone_info arrays for this zone */ - const char *zone_name; /* a name for the zone */ +/*! + * @function zone_create + * + * @abstract + * Creates a zone with the specified parameters. + * + * @discussion + * A Zone is a slab allocator that returns objects of a given size very quickly. + * + * @param name the name for the new zone. + * @param size the size of the elements returned by this zone. + * @param flags a set of @c zone_create_flags_t flags. + * + * @returns the created zone, this call never fails. + */ +extern zone_t zone_create( + const char *name, + vm_size_t size, + zone_create_flags_t flags); -#if CONFIG_ZLEAKS - uint32_t zleak_capture; /* per-zone counter for capturing every N allocations */ -#endif /* CONFIG_ZLEAKS */ - uint32_t zp_count; /* counter for poisoning every N frees */ - uint32_t prio_refill_count; /* if async_prio_refill, refill to this count */ - thread_t zone_replenish_thread; -#if CONFIG_GZALLOC - gzalloc_data_t gz; -#endif /* CONFIG_GZALLOC */ - -#if KASAN_ZALLOC - vm_size_t kasan_redzone; +/*! + * @function zdestroy + * + * @abstract + * Destroys a zone previously made with zone_create. + * + * @discussion + * Zones must have been made destructible for @c zdestroy() to be allowed, + * passing @c ZC_DESTRUCTIBLE at @c zone_create() time. + * + * @param zone the zone to destroy. + */ +extern void zdestroy( + zone_t zone); + +/*! + * @function zone_require + * + * @abstract + * Requires for a given pointer to belong to the specified zone. + * + * @discussion + * The function panics if the check fails as it indicates that the kernel + * internals have been compromised. + * + * Note that zone_require() can only work with: + * - zones not allowing foreign memory + * - zones in the general submap. + * + * @param zone the zone the address needs to belong to. + * @param addr the element address to check. + */ +extern void zone_require( + zone_t zone, + void *addr); + +/*! + * @enum zalloc_flags_t + * + * @brief + * Flags that can be passed to @c zalloc_internal or @c zalloc_flags. + * + * @discussion + * It is encouraged that any callsite passing flags uses exactly one of: + * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK + * if nothing else was specified. + * + * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK, + * then @c Z_WAITOK is ignored. + * + * @const Z_WAITOK + * Means that it's OK for zalloc() to block to wait for memory, + * when Z_WAITOK is passed, zalloc will never return NULL. + * + * @const Z_NOWAIT + * Passing this flag means that zalloc is not allowed to ever block. + * + * @const Z_NOPAGEWAIT + * Passing this flag means that zalloc is allowed to wait due to lock + * contention, but will not wait for the VM to wait for pages when + * under memory pressure. + * + * @const Z_ZERO + * Passing this flags means that the returned memory has been zeroed out. + * + * @const Z_NOFAIL + * Passing this flag means that the caller expects the allocation to always + * succeed. This will result in a panic if this assumption isn't correct. + * + * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT. It also can't + * be used on exhaustible zones. + * + #if XNU_KERNEL_PRIVATE + * + * @const Z_VM_TAG_MASK + * Represents bits in which a vm_tag_t for the allocation can be passed. + * (used by kalloc for the zone tagging debugging feature). + #endif + */ +__options_decl(zalloc_flags_t, uint32_t, { + // values smaller than 0xff are shared with the M_* flags from BSD MALLOC + Z_WAITOK = 0x0000, + Z_NOWAIT = 0x0001, + Z_NOPAGEWAIT = 0x0002, + Z_ZERO = 0x0004, + + Z_NOFAIL = 0x8000, +#if XNU_KERNEL_PRIVATE + /** used by kalloc to propagate vm tags for -zt */ + Z_VM_TAG_MASK = 0xffff0000, + +#define Z_VM_TAG_SHIFT 16 +#define Z_VM_TAG(tag) ((zalloc_flags_t)(tag) << Z_VM_TAG_SHIFT) #endif +}); + +/*! + * @function zalloc + * + * @abstract + * Allocates an element from a specified zone. + * + * @discussion + * If the zone isn't exhaustible and is expandable, this call never fails. + * + * @param zone_or_view the zone or zone view to allocate from + * + * @returns NULL or the allocated element + */ +extern void *zalloc( + zone_or_view_t zone_or_view); + +/*! + * @function zalloc_noblock + * + * @abstract + * Allocates an element from a specified zone, but never blocks. + * + * @discussion + * This call is suitable for preemptible code, however allocation + * isn't allowed from interrupt context. + * + * @param zone_or_view the zone or zone view to allocate from + * + * @returns NULL or the allocated element + */ +extern void *zalloc_noblock( + zone_or_view_t zone_or_view); + +/*! + * @function zalloc_flags() + * + * @abstract + * Allocates an element from a specified zone, with flags. + * + * @param zone_or_view the zone or zone view to allocate from + * @param flags a collection of @c zalloc_flags_t. + * + * @returns NULL or the allocated element + */ +extern void *zalloc_flags( + zone_or_view_t zone_or_view, + zalloc_flags_t flags); + +/*! + * @function zfree + * + * @abstract + * Frees an element allocated with @c zalloc*. + * + * @discussion + * If the element being freed doesn't belong to the specified zone, + * then this call will panic. + * + * @param zone_or_view the zone or zone view to free the element to. + * @param elem the element to free + */ +extern void zfree( + zone_or_view_t zone_or_view, + void *elem); + +/* deprecated KPIS */ + +__zalloc_deprecated("use zone_create()") +extern zone_t zinit( + vm_size_t size, /* the size of an element */ + vm_size_t maxmem, /* maximum memory to use */ + vm_size_t alloc, /* allocation size */ + const char *name); /* a name for the zone */ + +#ifdef XNU_KERNEL_PRIVATE +#pragma mark - XNU only interfaces +#include +#include + +#pragma GCC visibility push(hidden) + +#pragma mark XNU only: zalloc (extended) + +#define ZALIGN_NONE (sizeof(uint8_t) - 1) +#define ZALIGN_16 (sizeof(uint16_t) - 1) +#define ZALIGN_32 (sizeof(uint32_t) - 1) +#define ZALIGN_PTR (sizeof(void *) - 1) +#define ZALIGN_64 (sizeof(uint64_t) - 1) +#define ZALIGN(t) (_Alignof(t) - 1) + + +/*! + * @function zalloc_permanent() + * + * @abstract + * Allocates a permanent element from the permanent zone + * + * @discussion + * Memory returned by this function is always 0-initialized. + * Note that the size of this allocation can not be determined + * by zone_element_size so it should not be used for copyio. + * + * @param size the element size (must be smaller than PAGE_SIZE) + * @param align_mask the required alignment for this allocation + * + * @returns the allocated element + */ +extern void *zalloc_permanent( + vm_size_t size, + vm_offset_t align_mask); + +/*! + * @function zalloc_permanent_type() + * + * @abstract + * Allocates a permanent element of a given type with its natural alignment. + * + * @discussion + * Memory returned by this function is always 0-initialized. + * + * @param type_t the element type + * + * @returns the allocated element + */ +#define zalloc_permanent_type(type_t) \ + ((type_t *)zalloc_permanent(sizeof(type_t), ZALIGN(type_t))) + +#pragma mark XNU only: per-cpu allocations + +/*! + * @macro __zpercpu + * + * @abstract + * Annotation that helps denoting a per-cpu pointer that requires usage of + * @c zpercpu_*() for access. + */ +#define __zpercpu + +/*! + * @macro zpercpu_get_cpu() + * + * @abstract + * Get a pointer to a specific CPU slot of a given per-cpu variable. + * + * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()). + * @param cpu the specified CPU number as returned by @c cpu_number() + * + * @returns the per-CPU slot for @c ptr for the specified CPU. + */ +#define zpercpu_get_cpu(ptr, cpu) \ + __zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)cpu)) + +/*! + * @macro zpercpu_get() + * + * @abstract + * Get a pointer to the current CPU slot of a given per-cpu variable. + * + * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()). + * + * @returns the per-CPU slot for @c ptr for the current CPU. + */ +#define zpercpu_get(ptr) \ + zpercpu_get_cpu(ptr, cpu_number()) + +/*! + * @macro zpercpu_foreach() + * + * @abstract + * Enumerate all per-CPU slots by address. + * + * @param it the name for the iterator + * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()). + */ +#define zpercpu_foreach(it, ptr) \ + for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \ + __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \ + it < __end_##it; it = __zpcpu_next(it)) + +/*! + * @macro zpercpu_foreach_cpu() + * + * @abstract + * Enumerate all per-CPU slots by CPU slot number. + * + * @param cpu the name for cpu number iterator. + */ +#define zpercpu_foreach_cpu(cpu) \ + for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++) + +/*! + * @function zalloc_percpu() + * + * @abstract + * Allocates an element from a per-cpu zone. + * + * @discussion + * The returned pointer cannot be used directly and must be manipulated + * through the @c zpercpu_get*() interfaces. + * + * @param zone_or_view the zone or zone view to allocate from + * @param flags a collection of @c zalloc_flags_t. + * + * @returns NULL or the allocated element + */ +extern void *zalloc_percpu( + zone_or_view_t zone_or_view, + zalloc_flags_t flags); - btlog_t *zlog_btlog; /* zone logging structure to hold stacks and element references to those stacks. */ +/*! + * @function zfree_percpu() + * + * @abstract + * Frees an element previously allocated with @c zalloc_percpu(). + * + * @param zone_or_view the zone or zone view to free the element to. + * @param addr the address to free + */ +extern void zfree_percpu( + zone_or_view_t zone_or_view, + void *addr); + +/*! + * @function zalloc_percpu_permanent() + * + * @abstract + * Allocates a permanent percpu-element from the permanent percpu zone. + * + * @discussion + * Memory returned by this function is always 0-initialized. + * + * @param size the element size (must be smaller than PAGE_SIZE) + * @param align_mask the required alignment for this allocation + * + * @returns the allocated element + */ +extern void *zalloc_percpu_permanent( + vm_size_t size, + vm_offset_t align_mask); + +/*! + * @function zalloc_percpu_permanent_type() + * + * @abstract + * Allocates a permanent percpu-element from the permanent percpu zone of a given + * type with its natural alignment. + * + * @discussion + * Memory returned by this function is always 0-initialized. + * + * @param type_t the element type + * + * @returns the allocated element + */ +#define zalloc_percpu_permanent_type(type_t) \ + ((type_t *)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t))) + +#pragma mark XNU only: zone views + +/*! + * @enum zone_kheap_id_t + * + * @brief + * Enumerate a particular kalloc heap. + * + * @discussion + * More documentation about heaps is available in @c . + * + * @const KHEAP_ID_NONE + * This value denotes regular zones, not used by kalloc. + * + * @const KHEAP_ID_DEFAULT + * Indicates zones part of the KHEAP_DEFAULT heap. + * + * @const KHEAP_ID_DATA_BUFFERS + * Indicates zones part of the KHEAP_DATA_BUFFERS heap. + * + * @const KHEAP_ID_KEXT + * Indicates zones part of the KHEAP_KEXT heap. + */ +__enum_decl(zone_kheap_id_t, uint32_t, { + KHEAP_ID_NONE, + KHEAP_ID_DEFAULT, + KHEAP_ID_DATA_BUFFERS, + KHEAP_ID_KEXT, + +#define KHEAP_ID_COUNT (KHEAP_ID_KEXT + 1) +}); + +/*! + * @typedef zone_stats_t + * + * @abstract + * The opaque type for per-cpu zone stats that are accumulated per zone + * or per zone-view. + */ +typedef struct zone_stats *__zpercpu zone_stats_t; + +/*! + * @typedef zone_view_t + * + * @abstract + * A view on a zone for accounting purposes. + * + * @discussion + * A zone view uses the zone it references for the allocations backing store, + * but does the allocation accounting at the view level. + * + * These accounting are surfaced by @b zprint(1) and similar tools, + * which allow for cheap but finer grained understanding of allocations + * without any fragmentation cost. + * + * Zone views are protected by the kernel lockdown and can't be initialized + * dynamically. They must be created using @c ZONE_VIEW_DEFINE(). + */ +typedef struct zone_view *zone_view_t; +struct zone_view { + zone_t zv_zone; + zone_stats_t zv_stats; + const char *zv_name; + zone_view_t zv_next; }; -/* - * structure for tracking zone usage - * Used either one per task/thread for all zones or . +/*! + * @macro ZONE_VIEW_DECLARE + * + * @abstract + * (optionally) declares a zone view (in a header). + * + * @param var the name for the zone view. */ -typedef struct zinfo_usage_store_t { - /* These fields may be updated atomically, and so must be 8 byte aligned */ - uint64_t alloc __attribute__((aligned(8))); /* allocation counter */ - uint64_t free __attribute__((aligned(8))); /* free counter */ -} zinfo_usage_store_t; +#define ZONE_VIEW_DECLARE(var) \ + extern struct zone_view var[1] -/* - * For sysctl kern.zones_collectable_bytes used by memory_maintenance to check if a - * userspace reboot is needed. The only other way to query for this information - * is via mach_memory_info() which is unavailable on release kernels. +/*! + * @macro ZONE_VIEW_DEFINE + * + * @abstract + * Defines a given zone view and what it points to. + * + * @discussion + * Zone views can either share a pre-existing zone, + * or perform a lookup into a kalloc heap for the zone + * backing the bucket of the proper size. + * + * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase, + * as the last rank. If views on zones are created, these must have been + * created before this stage. + * + * @param var the name for the zone view. + * @param name a string describing the zone view. + * @param heap_or_zone a @c KHEAP_ID_* constant or a pointer to a zone. + * @param size the element size to be allocated from this view. */ -extern uint64_t get_zones_collectable_bytes(void); +#define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \ + SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \ + .zv_name = name, \ + } }; \ + static __startup_data struct zone_view_startup_spec \ + __startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \ + STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, zone_view_startup_init, \ + &__startup_zone_view_spec_ ## var) -/* - * zone_gc also checks if the zone_map is getting close to full and triggers jetsams if needed, provided - * consider_jetsams is set to TRUE. To avoid deadlocks, we only pass a value of TRUE from within the - * vm_pageout_garbage_collect thread. - */ -extern void zone_gc(boolean_t consider_jetsams); -extern void consider_zone_gc(boolean_t consider_jetsams); -extern void drop_free_elements(zone_t z); - -/* Debug logging for zone-map-exhaustion jetsams. */ -extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity); -extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size); - -/* Bootstrap zone module (create zone zone) */ -extern void zone_bootstrap(void); - -/* Init zone module */ -extern void zone_init( - vm_size_t map_size); - -/* Stack use statistics */ -extern void stack_fake_zone_init(int zone_index); -extern void stack_fake_zone_info( - int *count, - vm_size_t *cur_size, - vm_size_t *max_size, - vm_size_t *elem_size, - vm_size_t *alloc_size, - uint64_t *sum_size, - int *collectable, - int *exhaustable, - int *caller_acct); - -#if ZONE_DEBUG - -extern void zone_debug_enable( - zone_t z); - -extern void zone_debug_disable( - zone_t z); - -#define zone_debug_enabled(z) z->active_zones.next -#define ROUNDUP(x, y) ((((x)+(y)-1)/(y))*(y)) -#define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16) -#endif /* ZONE_DEBUG */ - -extern unsigned int num_zones; -extern struct zone zone_array[]; - -/* zindex and page_count must pack into 16 bits - * update tools/lldbmacros/memory.py:GetRealMetadata - * when these values change */ - -#define ZINDEX_BITS (10U) -#define PAGECOUNT_BITS (16U - ZINDEX_BITS) -#define MULTIPAGE_METADATA_MAGIC ((1UL << ZINDEX_BITS) - 1) -#define ZONE_CHUNK_MAXPAGES ((1UL << PAGECOUNT_BITS) - 1) -/* - * The max # of elements in a chunk should fit into zone_page_metadata.free_count (uint16_t). - * Update this if the type of free_count changes. +#pragma mark XNU only: zone creation (extended) + +/*! + * @enum zone_reserved_id_t + * + * @abstract + * Well known pre-registered zones, allowing use of zone_id_require() + * + * @discussion + * @c ZONE_ID__* aren't real zone IDs. + * + * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too + * easy a value to produce (by malice or accident). + * + * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by + * @c zone_create(). */ -#define ZONE_CHUNK_MAXELEMENTS (UINT16_MAX) +__enum_decl(zone_reserved_id_t, zone_id_t, { + ZONE_ID__ZERO, + + ZONE_ID_PERMANENT, + ZONE_ID_PERCPU_PERMANENT, + + ZONE_ID_IPC_PORT, + ZONE_ID_IPC_PORT_SET, + ZONE_ID_IPC_VOUCHERS, + ZONE_ID_TASK, + ZONE_ID_PROC, + ZONE_ID_VM_MAP_COPY, + ZONE_ID_PMAP, + + ZONE_ID__FIRST_DYNAMIC, +}); + +/*! + * @const ZONE_ID_ANY + * The value to pass to @c zone_create_ext() to allocate a non pre-registered + * Zone ID. + */ +#define ZONE_ID_ANY ((zone_id_t)-1) -#endif /* MACH_KERNEL_PRIVATE */ +/**! + * @function zone_name + * + * @param zone the specified zone + * @returns the name of the specified zone. + */ +const char *zone_name( + zone_t zone); -__BEGIN_DECLS +/**! + * @function zone_heap_name + * + * @param zone the specified zone + * @returns the name of the heap this zone is part of, or "". + */ +const char *zone_heap_name( + zone_t zone); +/**! + * @function zone_submap + * + * @param zone the specified zone + * @returns the zone (sub)map this zone allocates from. + */ +extern vm_map_t zone_submap( + zone_t zone); -/* Item definitions for zalloc/zinit/zone_change */ -#define Z_EXHAUST 1 /* Make zone exhaustible */ -#define Z_COLLECT 2 /* Make zone collectable */ -#define Z_EXPAND 3 /* Make zone expandable */ -#define Z_FOREIGN 4 /* Allow collectable zone to contain foreign elements */ -#define Z_CALLERACCT 5 /* Account alloc/free against the caller */ -#define Z_NOENCRYPT 6 /* Don't encrypt zone during hibernation */ -#define Z_NOCALLOUT 7 /* Don't asynchronously replenish the zone via callouts */ -#define Z_ALIGNMENT_REQUIRED 8 -#define Z_GZALLOC_EXEMPT 9 /* Not tracked in guard allocation mode */ -#define Z_KASAN_QUARANTINE 10 /* Allow zone elements to be quarantined on free */ -#ifdef XNU_KERNEL_PRIVATE -#define Z_TAGS_ENABLED 11 /* Store tags */ -#endif /* XNU_KERNEL_PRIVATE */ -#define Z_CACHING_ENABLED 12 /*enable and initialize per-cpu caches for the zone*/ -#define Z_CLEARMEMORY 13 /* Use KMA_ZERO on new allocations */ - -#ifdef XNU_KERNEL_PRIVATE - -extern vm_offset_t zone_map_min_address; -extern vm_offset_t zone_map_max_address; - -/* free an element with no regard for gzalloc, zleaks, or kasan*/ -extern void zfree_direct( zone_t zone, - vm_offset_t elem); - -/* attempts to allocate an element with no regard for gzalloc, zleaks, or kasan*/ -extern void * zalloc_attempt( zone_t zone); - -/* Non-waiting for memory version of zalloc */ -extern void * zalloc_nopagewait( - zone_t zone); +/*! + * @function zone_create_ext + * + * @abstract + * Creates a zone with the specified parameters. + * + * @discussion + * This is an extended version of @c zone_create(). + * + * @param name the name for the new zone. + * @param size the size of the elements returned by this zone. + * @param flags a set of @c zone_create_flags_t flags. + * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY. + * + * @param extra_setup a block that can perform non trivial initialization + * on the zone before it is marked valid. + * This block can call advanced setups like: + * - zone_set_submap_idx() + * - zone_set_exhaustible() + * - zone_set_noexpand() + * + * @returns the created zone, this call never fails. + */ +extern zone_t zone_create_ext( + const char *name, + vm_size_t size, + zone_create_flags_t flags, + zone_id_t desired_zid, + void (^extra_setup)(zone_t)); + +/*! + * @macro ZONE_DECLARE + * + * @abstract + * Declares a zone variable to automatically initialize with the specified + * parameters. + * + * @param var the name of the variable to declare. + * @param name the name for the zone + * @param size the size of the elements returned by this zone. + * @param flags a set of @c zone_create_flags_t flags. + */ +#define ZONE_DECLARE(var, name, size, flags) \ + SECURITY_READ_ONLY_LATE(zone_t) var; \ + static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \ + static __startup_data struct zone_create_startup_spec \ + __startup_zone_spec_ ## var = { &var, name, size, flags, \ + ZONE_ID_ANY, NULL }; \ + STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \ + &__startup_zone_spec_ ## var) + +/*! + * @macro ZONE_INIT + * + * @abstract + * Initializes a given zone automatically during startup with the specified + * parameters. + * + * @param var the name of the variable to initialize. + * @param name the name for the zone + * @param size the size of the elements returned by this zone. + * @param flags a set of @c zone_create_flags_t flags. + * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY. + * @param extra_setup a block that can perform non trivial initialization + * (@see @c zone_create_ext()). + */ +#define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \ + __ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup) -/* selective version of zalloc */ -extern void * zalloc_canblock( - zone_t zone, - boolean_t canblock); +/*! + * @function zone_id_require + * + * @abstract + * Requires for a given pointer to belong to the specified zone, by ID and size. + * + * @discussion + * The function panics if the check fails as it indicates that the kernel + * internals have been compromised. + * + * This is a variant of @c zone_require() which: + * - isn't sensitive to @c zone_t::elem_size being compromised, + * - is slightly faster as it saves one load and a multiplication. + * + * @param zone_id the zone ID the address needs to belong to. + * @param elem_size the size of elements for this zone. + * @param addr the element address to check. + */ +extern void zone_id_require( + zone_id_t zone_id, + vm_size_t elem_size, + void *addr); -/* selective version of zalloc */ -extern void * zalloc_canblock_tag( +/* + * Zone submap indices + * + * Z_SUBMAP_IDX_VA_RESTRICTED_MAP (LP64) + * used to restrict VM allocations lower in the kernel VA space, + * for pointer packing + * + * Z_SUBMAP_IDX_GENERAL_MAP + * used for unrestricted allocations + * + * Z_SUBMAP_IDX_BAG_OF_BYTES_MAP + * used to sequester bags of bytes from all other allocations and allow VA reuse + * within the map + */ +#if !defined(__LP64__) +#define Z_SUBMAP_IDX_GENERAL_MAP 0 +#define Z_SUBMAP_IDX_BAG_OF_BYTES_MAP 1 +#define Z_SUBMAP_IDX_COUNT 2 +#else +#define Z_SUBMAP_IDX_VA_RESTRICTED_MAP 0 +#define Z_SUBMAP_IDX_GENERAL_MAP 1 +#define Z_SUBMAP_IDX_BAG_OF_BYTES_MAP 2 +#define Z_SUBMAP_IDX_COUNT 3 +#endif + +/* Change zone sub-map, to be called from the zone_create_ext() setup hook */ +extern void zone_set_submap_idx( zone_t zone, - boolean_t canblock, - vm_size_t reqsize, - vm_tag_t tag); + unsigned int submap_idx); -/* Get from zone free list */ -extern void * zget( - zone_t zone); +/* Make zone as non expandable, to be called from the zone_create_ext() setup hook */ +extern void zone_set_noexpand( + zone_t zone, + vm_size_t maxsize); -/* Fill zone with memory */ -extern void zcram( +/* Make zone exhaustible, to be called from the zone_create_ext() setup hook */ +extern void zone_set_exhaustible( zone_t zone, - vm_offset_t newmem, - vm_size_t size); + vm_size_t maxsize); /* Initially fill zone with specified number of elements */ -extern int zfill( +extern int zfill( zone_t zone, - int nelem); - -extern void zone_prio_refill_configure(zone_t); - -/* See above/top of file. Z_* definitions moved so they would be usable by kexts */ + int nelem); -/* Preallocate space for zone from zone map */ -extern void zprealloc( +/* Fill zone with memory */ +extern void zcram( zone_t zone, + vm_offset_t newmem, vm_size_t size); -extern integer_t zone_free_count( - zone_t zone); - -extern vm_size_t zone_element_size( - void *addr, - zone_t *z); +#pragma mark XNU only: misc & implementation details /* - * Structure for keeping track of a backtrace, used for leak detection. - * This is in the .h file because it is used during panic, see kern/debug.c - * A non-zero size indicates that the trace is in use. - */ -struct ztrace { - vm_size_t zt_size; /* How much memory are all the allocations referring to this trace taking up? */ - uint32_t zt_depth; /* depth of stack (0 to MAX_ZTRACE_DEPTH) */ - void* zt_stack[MAX_ZTRACE_DEPTH]; /* series of return addresses from OSBacktrace */ - uint32_t zt_collisions; /* How many times did a different stack land here while it was occupied? */ - uint32_t zt_hit_count; /* for determining effectiveness of hash function */ + * This macro sets "elem" to NULL on free. + * + * Note: all values passed to zfree() might be in the element to be freed, + * temporaries must be taken, and the resetting to be done prior to free. + */ +#define zfree(zone, elem) ({ \ + _Static_assert(sizeof(elem) == sizeof(void *), "elem isn't pointer sized"); \ + __auto_type __zfree_zone = (zone); \ + __auto_type __zfree_eptr = &(elem); \ + __auto_type __zfree_elem = *__zfree_eptr; \ + *__zfree_eptr = (__typeof__(__zfree_elem))NULL; \ + (zfree)(__zfree_zone, (void *)__zfree_elem); \ +}) + +struct zone_create_startup_spec { + zone_t *z_var; + const char *z_name; + vm_size_t z_size; + zone_create_flags_t z_flags; + zone_id_t z_zid; + void (^z_setup)(zone_t); }; -#if CONFIG_ZLEAKS - -/* support for the kern.zleak.* sysctls */ - -extern kern_return_t zleak_activate(void); -extern vm_size_t zleak_max_zonemap_size; -extern vm_size_t zleak_global_tracking_threshold; -extern vm_size_t zleak_per_zone_tracking_threshold; +extern void zone_create_startup( + struct zone_create_startup_spec *spec); + +#define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \ + static __startup_data struct zone_create_startup_spec \ + __startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \ + STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \ + &__startup_zone_spec_ ## ns) + +#define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \ + __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \ + +struct zone_view_startup_spec { + zone_view_t zv_view; + union { + zone_kheap_id_t zv_heapid; + zone_t zv_zone; + }; + vm_size_t zv_size; +}; -extern int get_zleak_state(void); +extern void zone_view_startup_init( + struct zone_view_startup_spec *spec); -#endif /* CONFIG_ZLEAKS */ -#ifndef VM_MAX_TAG_ZONES -#error MAX_TAG_ZONES -#endif +#if DEBUG || DEVELOPMENT +# if __LP64__ +# define ZPCPU_MANGLE_BIT (1ul << 63) +# else /* !__LP64__ */ +# define ZPCPU_MANGLE_BIT (1ul << 31) +# endif /* !__LP64__ */ +#else /* !(DEBUG || DEVELOPMENT) */ +# define ZPCPU_MANGLE_BIT 0ul +#endif /* !(DEBUG || DEVELOPMENT) */ -#if VM_MAX_TAG_ZONES +#define __zpcpu_mangle(ptr) (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_BIT) +#define __zpcpu_demangle(ptr) (__zpcpu_addr(ptr) | ZPCPU_MANGLE_BIT) +#define __zpcpu_addr(e) ((vm_address_t)(e)) +#define __zpcpu_cast(ptr, e) ((typeof(ptr))(e)) +#define __zpcpu_next(ptr) __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE) -extern boolean_t zone_tagging_on; -extern uint32_t zone_index_from_tag_index(uint32_t tag_zone_index, vm_size_t * elem_size); +extern unsigned zpercpu_count(void) __pure2; -#endif /* VM_MAX_TAG_ZONES */ /* These functions used for leak detection both in zalloc.c and mbuf.c */ extern uintptr_t hash_mix(uintptr_t); extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t); extern uint32_t hashaddr(uintptr_t, uint32_t); -#define lock_zone(zone) \ -MACRO_BEGIN \ - lck_mtx_lock_spin_always(&(zone)->lock); \ -MACRO_END - -#define unlock_zone(zone) \ -MACRO_BEGIN \ - lck_mtx_unlock(&(zone)->lock); \ -MACRO_END - -#if CONFIG_GZALLOC -void gzalloc_init(vm_size_t); -void gzalloc_zone_init(zone_t); -void gzalloc_configure(void); -void gzalloc_reconfigure(zone_t); -void gzalloc_empty_free_cache(zone_t); -boolean_t gzalloc_enabled(void); - -vm_offset_t gzalloc_alloc(zone_t, boolean_t); -boolean_t gzalloc_free(zone_t, void *); -boolean_t gzalloc_element_size(void *, zone_t *, vm_size_t *); -#endif /* CONFIG_GZALLOC */ - -/* Callbacks for btlog lock/unlock */ -void zlog_btlog_lock(__unused void *); -void zlog_btlog_unlock(__unused void *); - -#ifdef MACH_KERNEL_PRIVATE -#define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */ -int track_this_zone(const char *zonename, const char *logname); -#endif - -#if DEBUG || DEVELOPMENT -extern boolean_t run_zone_test(void); -extern vm_size_t zone_element_info(void *addr, vm_tag_t * ptag); -#endif /* DEBUG || DEVELOPMENT */ - -#endif /* XNU_KERNEL_PRIVATE */ - -/* Allocate from zone */ -extern void * zalloc( - zone_t zone); - -/* Non-blocking version of zalloc */ -extern void * zalloc_noblock( - zone_t zone); +#if CONFIG_ZLEAKS +/* support for the kern.zleak.* sysctls */ -/* Free zone element */ -extern void zfree( - zone_t zone, - void *elem); +extern kern_return_t zleak_activate(void); +extern vm_size_t zleak_max_zonemap_size; +extern vm_size_t zleak_global_tracking_threshold; +extern vm_size_t zleak_per_zone_tracking_threshold; -#ifdef XNU_KERNEL_PRIVATE -#define zfree(zone, elem) \ -_Pragma("clang diagnostic push") \ -_Pragma("clang diagnostic ignored \"-Wshadow\"") \ - do { \ - _Static_assert(sizeof (elem) == sizeof (void *) || sizeof (elem) == sizeof (mach_vm_address_t), "elem is not a pointer"); \ - void *__tmp_addr = (void *) elem; \ - zone_t __tmp_zone = zone; \ - elem = (__typeof__(elem)) NULL; \ - (zfree)(__tmp_zone, __tmp_addr); \ - } while (0) \ -_Pragma("clang diagnostic pop") -#endif /* XNU_KERNEL_PRIVATE */ +extern int get_zleak_state(void); -/* Create zone */ -extern zone_t zinit( - vm_size_t size, /* the size of an element */ - vm_size_t maxmem, /* maximum memory to use */ - vm_size_t alloc, /* allocation size */ - const char *name); /* a name for the zone */ - -/* Change zone parameters */ -extern void zone_change( - zone_t zone, - unsigned int item, - boolean_t value); - -/* Destroy the zone */ -extern void zdestroy( - zone_t zone); +#endif /* CONFIG_ZLEAKS */ +#if DEBUG || DEVELOPMENT -#ifdef XNU_KERNEL_PRIVATE +extern boolean_t run_zone_test(void); +extern void zone_gc_replenish_test(void); +extern void zone_alloc_replenish_test(void); -/* Panic if a pointer is not mapped to the zone specified */ -extern void zone_require( - void *addr, - zone_t expected_zone); +#endif /* DEBUG || DEVELOPMENT */ +#pragma GCC visibility pop #endif /* XNU_KERNEL_PRIVATE */ __END_DECLS diff --git a/osfmk/kern/zalloc_internal.h b/osfmk/kern/zalloc_internal.h new file mode 100644 index 000000000..9fff60429 --- /dev/null +++ b/osfmk/kern/zalloc_internal.h @@ -0,0 +1,560 @@ +/* + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_ZALLOC_INTERNAL_H_ +#define _KERN_ZALLOC_INTERNAL_H_ + +#include +#include +#include +#include +#include + +#include + +#if KASAN +#include +#include +/* + * Set to 0 to debug poisoning and ZC_ZFREE_CLEARMEM validation under kasan. + * Otherwise they are double-duty with what kasan already does. + */ +#define ZALLOC_ENABLE_POISONING 0 +#else /* !KASAN */ +#define ZALLOC_ENABLE_POISONING 1 +#endif /* !KASAN */ + +#if DEBUG || DEVELOPMENT +#define ZALLOC_DETAILED_STATS 1 +#else +#define ZALLOC_DETAILED_STATS 0 +#endif + +/*! + * @file + * + * @abstract + * Exposes some guts of zalloc to interact with the VM, debugging, copyio and + * kalloc subsystems. + */ + +__BEGIN_DECLS + +#pragma GCC visibility push(hidden) + +#if CONFIG_GZALLOC +typedef struct gzalloc_data { + uint32_t gzfc_index; + vm_offset_t *gzfc; +} gzalloc_data_t; +#endif + +/* + * A zone is a collection of fixed size blocks for which there + * is fast allocation/deallocation access. Kernel routines can + * use zones to manage data structures dynamically, creating a zone + * for each type of data structure to be managed. + * + */ + +/*! + * @typedef zone_pva_t + * + * @brief + * Type used to point to a page virtual address in the zone allocator. + * + * @description + * - Valid pages have the top bit set. + * - 0 represents the "NULL" page + * - non 0 values with the top bit cleared do not represent any valid page. + * the zone freelists use this space to encode "queue" addresses. + */ +typedef struct zone_packed_virtual_address { + uint32_t packed_address; +} zone_pva_t; + +/*! + * @struct zone_stats + * + * @abstract + * Per-cpu structure used for basic zone stats. + * + * @discussion + * The values aren't scaled for per-cpu zones. + */ +struct zone_stats { + uint64_t zs_mem_allocated; + uint64_t zs_mem_freed; +#if ZALLOC_DETAILED_STATS + uint64_t zs_mem_wasted; +#endif /* ZALLOC_DETAILED_STATS */ +}; + +struct zone { + /* + * Readonly / rarely written fields + */ + + /* + * The first 4 fields match a zone_view. + * + * z_self points back to the zone when the zone is initialized, + * or is NULL else. + */ + struct zone *z_self; + zone_stats_t z_stats; + const char *z_name; + struct zone_view *z_views; +#ifdef CONFIG_ZCACHE + struct zone_cache zcache; +#endif /* CONFIG_ZCACHE */ + + uint16_t alloc_pages; /* size used for more memory in pages */ + uint16_t z_elem_size; /* size of an element */ + uint16_t pcpu_elem_size; + uint16_t prio_refill_count; /* if !=0 , refill to this count */ + uint32_t page_count_max; /* how large can this zone grow */ + + uint32_t page_count_hwm; /* page_count high watermark */ + uint32_t page_count; /* number of pages used by this zone */ + uint32_t countavail; /* Number of elements available */ + + uint64_t + /* + * Lifecycle state (Mutable after creation) + */ + destroyed :1, /* zone is (being) destroyed */ + expanding_no_vm_priv:1, /* zone expanding via a non-vm_privileged thread */ + expanding_vm_priv :1, /* zone expanding via a vm_privileged thread */ + async_pending :1, /* asynchronous allocation pending? */ + waiting :1, /* is thread waiting for expansion? */ + zone_replenishing :1, + + /* + * Security sensitive configuration bits + */ + allows_foreign :1, /* allow non-zalloc space */ + destructible :1, /* zone can be zdestroy()ed */ + kalloc_heap :2, /* zone_kheap_id_t when part of a kalloc heap */ + noencrypt :1, /* do not encrypt pages when hibernating */ + submap_idx :2, /* a Z_SUBMAP_IDX_* value */ + va_sequester :1, /* page sequester: no VA reuse with other zones */ + zfree_clear_mem :1, /* clear memory of elements on free and assert on alloc */ + + /* + * Behavior configuration bits + */ + collectable :1, /* garbage collect empty pages */ + cpu_cache_enabled :1, + permanent :1, /* the zone allocations are permanent */ + exhaustible :1, /* merely return if empty? */ + expandable :1, /* expand zone (with message)? */ + no_callout :1, + percpu :1, /* the zone is percpu */ + + _reserved :26, + + /* + * Debugging features + */ + alignment_required :1, /* element alignment needs to be preserved */ + gzalloc_tracked :1, /* this zone is tracked by gzalloc */ + gzalloc_exempt :1, /* this zone doesn't participate with gzalloc */ + kasan_fakestacks :1, + kasan_noquarantine :1, /* whether to use the kasan quarantine */ + tag_zone_index :7, + tags :1, + tags_inline :1, + zleak_on :1, /* Are we collecting allocation information? */ + zone_logging :1; /* Enable zone logging for this zone. */ + + /* + * often mutated fields + */ + + decl_simple_lock_data(, lock); + + /* + * list of metadata structs, which maintain per-page free element lists + * + * Note: Due to the index packing in page metadata, + * these pointers can't be at the beginning of the zone struct. + */ + zone_pva_t pages_any_free_foreign; /* foreign pages crammed into zone */ + zone_pva_t pages_all_used_foreign; + zone_pva_t pages_all_free; + zone_pva_t pages_intermediate; + zone_pva_t pages_all_used; + zone_pva_t pages_sequester; /* sequestered pages - allocated VA with no populated pages */ + + uint32_t zp_count; /* counter for poisoning every N frees */ + uint32_t countfree; /* Number of free elements */ + uint32_t allfree_page_count; /* Number of pages collectable by GC */ + uint32_t sequester_page_count; + +#if CONFIG_ZLEAKS + uint32_t zleak_capture; /* per-zone counter for capturing every N allocations */ +#endif +#if CONFIG_GZALLOC + gzalloc_data_t gz; +#endif +#if KASAN_ZALLOC + vm_size_t kasan_redzone; +#endif +#if DEBUG || DEVELOPMENT || CONFIG_ZLEAKS + /* zone logging structure to hold stacks and element references to those stacks. */ + btlog_t *zlog_btlog; +#endif +}; + + +__options_decl(zone_security_options_t, uint64_t, { + /* + * Zsecurity option to enable sequestering VA of zones + */ + ZSECURITY_OPTIONS_SEQUESTER = 0x00000001, + /* + * Zsecurity option to enable creating separate kalloc zones for + * bags of bytes + */ + ZSECURITY_OPTIONS_SUBMAP_USER_DATA = 0x00000004, + /* + * Zsecurity option to enable sequestering of kalloc zones used by + * kexts (KHEAP_KEXT heap) + */ + ZSECURITY_OPTIONS_SEQUESTER_KEXT_KALLOC = 0x00000008, + /* + * Zsecurity option to enable strict free of iokit objects to zone + * or heap they were allocated from. + */ + ZSECURITY_OPTIONS_STRICT_IOKIT_FREE = 0x00000010, +}); + +#define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN) +#define KALLOC_DLUT_SIZE (2048 / KALLOC_MINALIGN) + +struct kheap_zones { + struct kalloc_zone_cfg *cfg; + struct kalloc_heap *views; + zone_kheap_id_t heap_id; + uint16_t max_k_zone; + uint8_t dlut[KALLOC_DLUT_SIZE]; /* table of indices into k_zone[] */ + uint8_t k_zindex_start; + /* If there's no hit in the DLUT, then start searching from k_zindex_start. */ + zone_t *k_zone; +}; + +extern zone_security_options_t zsecurity_options; +extern uint32_t _Atomic num_zones; +extern uint32_t zone_view_count; +extern struct zone zone_array[]; +extern lck_grp_t zone_locks_grp; +extern const char * const kalloc_heap_names[KHEAP_ID_COUNT]; + +#define zone_index_foreach(i) \ + for (uint32_t i = 1, num_zones_##i = os_atomic_load(&num_zones, acquire); \ + i < num_zones_##i; i++) + +__pure2 +static inline vm_offset_t +zone_elem_size(zone_t zone) +{ + return zone->z_elem_size; +} + +static inline uint32_t +zone_count_allocated(zone_t zone) +{ + return zone->countavail - zone->countfree; +} + +static inline vm_size_t +zone_size_wired(zone_t zone) +{ + /* + * this either require the zone lock, + * or to be used for statistics purposes only. + */ + return ptoa(os_atomic_load(&zone->page_count, relaxed)); +} + +static inline vm_size_t +zone_size_free(zone_t zone) +{ + return (vm_size_t)zone->pcpu_elem_size * zone->countfree; +} + +static inline vm_size_t +zone_size_allocated(zone_t zone) +{ + return (vm_size_t)zone->pcpu_elem_size * zone_count_allocated(zone); +} + +static inline vm_size_t +zone_size_wasted(zone_t zone) +{ + return zone_size_wired(zone) - + (vm_size_t)zone->pcpu_elem_size * zone->countavail; +} + +/* + * For sysctl kern.zones_collectable_bytes used by memory_maintenance to check if a + * userspace reboot is needed. The only other way to query for this information + * is via mach_memory_info() which is unavailable on release kernels. + */ +extern uint64_t get_zones_collectable_bytes(void); + +/* + * zone_gc also checks if the zone maps are getting close to full and triggers + * jetsams if needed, provided consider_jetsams is set to TRUE. + * + * To avoid deadlocks, we only pass a value of TRUE from within the + * vm_pageout_garbage_collect thread. + */ +extern void zone_gc(boolean_t consider_jetsams); +extern void consider_zone_gc(boolean_t consider_jetsams); + +/* Debug logging for zone-map-exhaustion jetsams. */ +extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity); +extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size); + +/* Bootstrap zone module (create zone zone) */ +extern void zone_bootstrap(void); + +/* + * Steal memory from pmap (prior to initialization of zalloc) + * for the special vm zones that allow foreign memory and store + * the range so as to facilitate range checking in zfree/zcram. + */ +__startup_func +extern vm_offset_t zone_foreign_mem_init(vm_size_t size); + +/* + * Returns size (greater than min_pages) that is a multiple + * of the allocation granule for the zone. + */ +__startup_func +extern vm_size_t zone_get_foreign_alloc_size( + const char *name __unused, + vm_size_t elem_size, + zone_create_flags_t flags, + uint16_t min_pages); + +extern bool zone_maps_owned( + vm_address_t addr, + vm_size_t size); + +extern void zone_map_sizes( + vm_map_size_t *psize, + vm_map_size_t *pfree, + vm_map_size_t *plargest_free); + +extern boolean_t +is_zone_map_nearing_exhaustion(void); + +#if defined(__LP64__) +#define ZONE_POISON 0xdeadbeefdeadbeef +#else +#define ZONE_POISON 0xdeadbeef +#endif + +/* + * Used by zalloc_direct_locked() and zcache to mark elements that have been + * cleared or poisoned and need to be checked. + */ +#define ZALLOC_ELEMENT_NEEDS_VALIDATION ((vm_offset_t)1) + +static inline vm_tag_t +zalloc_flags_get_tag(zalloc_flags_t flags) +{ + return (vm_tag_t)((flags & Z_VM_TAG_MASK) >> Z_VM_TAG_SHIFT); +} + +extern void *zalloc_ext( + zone_t zone, + zone_stats_t zstats, + zalloc_flags_t flags, + vm_size_t wasted); + +extern void zfree_ext( + zone_t zone, + zone_stats_t zstats, + void *addr); + +/* free an element with no regard for gzalloc, zleaks, or kasan*/ +extern void zfree_direct_locked( + zone_t zone, + vm_offset_t elem, + bool poison); + +/* + * attempts to allocate an element with no regard for gzalloc, zleaks, or kasan + * returns an address possibly tagged with ZALLOC_ELEMENT_NEEDS_VALIDATION. + */ +extern vm_offset_t zalloc_direct_locked( + zone_t zone, + zalloc_flags_t flags, + vm_size_t waste); + +extern uint32_t zone_poison_count_init( + zone_t zone); + +extern bool zfree_clear_or_poison( + zone_t zone, + uint32_t *zp_count, + vm_address_t addr); + +extern void zone_clear_freelist_pointers( + zone_t zone, + vm_offset_t addr); + +#if ZALLOC_ENABLE_POISONING +extern void zalloc_validate_element( + zone_t zone, + vm_offset_t addr, + vm_size_t size, + bool validate); +#endif + +extern void zone_allocated_element_validate( + zone_t zone, + vm_offset_t addr); + +extern void zone_prio_refill_configure( + zone_t zone); + +extern vm_size_t zone_element_size( + void *addr, + zone_t *z); + +/*! + * @function zone_owns + * + * @abstract + * This function is a soft version of zone_require that checks if a given + * pointer belongs to the specified zone and should not be used outside + * allocator code. + * + * @discussion + * Note that zone_owns() can only work with: + * - zones not allowing foreign memory + * - zones in the general submap. + * + * @param zone the zone the address needs to belong to. + * @param addr the element address to check. + */ +extern bool zone_owns( + zone_t zone, + void *addr); + +/* + * Structure for keeping track of a backtrace, used for leak detection. + * This is in the .h file because it is used during panic, see kern/debug.c + * A non-zero size indicates that the trace is in use. + */ +struct ztrace { + vm_size_t zt_size; /* How much memory are all the allocations referring to this trace taking up? */ + uint32_t zt_depth; /* depth of stack (0 to MAX_ZTRACE_DEPTH) */ + void* zt_stack[MAX_ZTRACE_DEPTH]; /* series of return addresses from OSBacktrace */ + uint32_t zt_collisions; /* How many times did a different stack land here while it was occupied? */ + uint32_t zt_hit_count; /* for determining effectiveness of hash function */ +}; + +#ifndef VM_MAX_TAG_ZONES +#error MAX_TAG_ZONES +#endif +#if VM_MAX_TAG_ZONES + +extern uint32_t zone_index_from_tag_index( + uint32_t tag_zone_index, + vm_size_t *elem_size); + +#endif /* VM_MAX_TAG_ZONES */ + +#define lock_zone(zone) simple_lock(&(zone)->lock, &zone_locks_grp) +#define unlock_zone(zone) simple_unlock(&(zone)->lock) + +#if CONFIG_GZALLOC +void gzalloc_init(vm_size_t); +void gzalloc_zone_init(zone_t); +void gzalloc_empty_free_cache(zone_t); +boolean_t gzalloc_enabled(void); + +vm_offset_t gzalloc_alloc(zone_t, zone_stats_t zstats, zalloc_flags_t flags); +void gzalloc_free(zone_t, zone_stats_t zstats, void *); +boolean_t gzalloc_element_size(void *, zone_t *, vm_size_t *); +#endif /* CONFIG_GZALLOC */ + +#define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */ +int track_this_zone(const char *zonename, const char *logname); + +#if DEBUG || DEVELOPMENT +extern boolean_t run_zone_test(void); +extern void zone_gc_replenish_test(void); +extern void zone_alloc_replenish_test(void); +extern vm_size_t zone_element_info(void *addr, vm_tag_t * ptag); +extern bool zalloc_disable_copyio_check; +#else +#define zalloc_disable_copyio_check false +#endif /* DEBUG || DEVELOPMENT */ + +#pragma GCC visibility pop + +__END_DECLS + +#endif /* _KERN_ZALLOC_INTERNAL_H_ */ diff --git a/osfmk/kern/zcache.c b/osfmk/kern/zcache.c index bd0a50dc8..f0889994d 100644 --- a/osfmk/kern/zcache.c +++ b/osfmk/kern/zcache.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Apple Inc. All rights reserved. + * Copyright (c) 2017-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -30,212 +30,208 @@ #include #include #include +#include +#include +/* Size of array in magazine determined by boot-arg or default */ +TUNABLE(uint16_t, magazine_element_count, "zcc_magazine_element_count", 8); -#if defined(__i386__) || defined(__x86_64__) -#include -#endif - -#if defined (__arm__) || defined (__arm64__) -#include -#endif +/* Size of depot lists determined by boot-arg or default */ +TUNABLE(uint16_t, depot_element_count, "zcc_depot_element_count", 8); -#define DEFAULT_MAGAZINE_SIZE 8 /* Default number of elements for all magazines allocated from the magazine_zone */ -#define DEFAULT_DEPOT_SIZE 8 /* Default number of elements for the array zcc_depot_list */ -#define ZCC_MAX_CPU_CACHE_LINE_SIZE 64 /* We should use a platform specific macro for this in the future, right now this is the max cache line size for all platforms*/ +SECURITY_READ_ONLY_LATE(zone_t) magazine_zone; /* zone to allocate zcc_magazine structs from */ +SECURITY_READ_ONLY_LATE(uintptr_t) zcache_canary; /* Canary used for the caching layer to prevent UaF attacks */ -lck_grp_t zcache_locks_grp; /* lock group for depot_lock */ -zone_t magazine_zone; /* zone to allocate zcc_magazine structs from */ -uint16_t magazine_element_count = 0; /* Size of array in magazine determined by boot-arg or default */ -uint16_t depot_element_count = 0; /* Size of depot lists determined by boot-arg or default */ -bool zone_cache_ready = FALSE; /* Flag to check if zone caching has been set up by zcache_bootstrap */ -uintptr_t zcache_canary = 0; /* Canary used for the caching layer to prevent UaF attacks */ - -/* The zcc_magazine is used as a stack to store cached zone elements. These +/* + * The zcc_magazine is used as a stack to store cached zone elements. These * sets of elements can be moved around to perform bulk operations. */ struct zcc_magazine { uint32_t zcc_magazine_index; /* Used as a stack pointer to acess elements in the array */ uint32_t zcc_magazine_capacity; /* Number of pointers able to be stored in the zcc_elements array */ - void *zcc_elements[0]; /* Array of pointers to objects */ + vm_offset_t zcc_elements[0]; /* Array of pointers to objects */ }; -/* Each CPU will use one of these to store its elements +/* + * Each CPU will use one of these to store its elements */ struct zcc_per_cpu_cache { - struct zcc_magazine *current; /* Magazine from which we will always try to allocate from and free to first */ - struct zcc_magazine *previous; /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */ -} __attribute__((aligned(ZCC_MAX_CPU_CACHE_LINE_SIZE))); /* we want to align this to a cache line size so it does not thrash when multiple cpus want to access their caches in paralell */ - + /* Magazine from which we will always try to allocate from and free to first */ + struct zcc_magazine *current; + /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */ + struct zcc_magazine *previous; + /* Zcache poisoning count */ + uint32_t zp_count; +#if ZALLOC_DETAILED_STATS + uint64_t zcc_allocs; + uint64_t zcc_frees; +#endif /* ZALLOC_DETAILED_STATS */ +}; -/* - * The depot layer can be invalid while zone_gc() is draining it out. - * During that time, the CPU caches are active. For CPU magazine allocs and - * frees, the caching layer reaches directly into the zone allocator. - */ -#define ZCACHE_DEPOT_INVALID -1 -#define zcache_depot_available(zcache) (zcache->zcc_depot_index != ZCACHE_DEPOT_INVALID) /* This is the basic struct to take care of cahing and is included within * the zone. */ -struct zone_cache { - lck_mtx_t zcc_depot_lock; /* Lock for the depot layer of caching */ - struct zcc_per_cpu_cache zcc_per_cpu_caches[MAX_CPUS]; /* An array of caches, one for each CPU */ - int zcc_depot_index; /* marks the point in the array where empty magazines begin */ - struct zcc_magazine *zcc_depot_list[0]; /* Stores full and empty magazines in the depot layer */ +struct zcc_depot { + /* marks the point in the array where empty magazines begin */ + int zcc_depot_index; + +#if ZALLOC_DETAILED_STATS + uint64_t zcc_swap; + uint64_t zcc_fill; + uint64_t zcc_drain; + uint64_t zcc_fail; + uint64_t zcc_gc; +#endif /* ZALLOC_DETAILED_STATS */ + + /* Stores full and empty magazines in the depot layer */ + struct zcc_magazine *zcc_depot_list[0]; }; - -void zcache_init_marked_zones(void); -bool zcache_mag_fill(zone_t zone, struct zcc_magazine *mag); -void zcache_mag_drain(zone_t zone, struct zcc_magazine *mag); -void zcache_mag_init(struct zcc_magazine *mag, int count); -void *zcache_mag_pop(struct zcc_magazine *mag); -void zcache_mag_push(struct zcc_magazine *mag, void *elem); -bool zcache_mag_has_space(struct zcc_magazine *mag); -bool zcache_mag_has_elements(struct zcc_magazine *mag); -void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b); -void zcache_mag_depot_swap_for_alloc(struct zone_cache *depot, struct zcc_per_cpu_cache *cache); -void zcache_mag_depot_swap_for_free(struct zone_cache *depot, struct zcc_per_cpu_cache *cache); -void zcache_mag_depot_swap(struct zone_cache *depot, struct zcc_per_cpu_cache *cache, boolean_t load_full); -void zcache_canary_add(zone_t zone, void *addr); -void zcache_canary_validate(zone_t zone, void *addr); +static bool zcache_mag_fill_locked(zone_t zone, struct zcc_magazine *mag); +static void zcache_mag_drain_locked(zone_t zone, struct zcc_magazine *mag); +static bool zcache_mag_has_space(struct zcc_magazine *mag); +static bool zcache_mag_has_elements(struct zcc_magazine *mag); +static void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b); +static void zcache_mag_depot_swap_for_alloc(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache); +static void zcache_mag_depot_swap_for_free(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache); +static void zcache_canary_add(zone_t zone, vm_offset_t addr); +#if ZALLOC_ENABLE_POISONING +static void zcache_validate_element(zone_t zone, vm_offset_t *addr, bool poison); +static void zcache_validate_and_clear_canary(zone_t zone, vm_offset_t *primary, vm_offset_t *backup); +#endif /* * zcache_ready * - * Description: returns whether or not the zone caches are ready to use + * Returns whether or not the zone caches are ready to use * */ -bool +static bool zcache_ready(void) { - return zone_cache_ready; -} - -/* - * zcache_init_marked_zones - * - * Description: Initializes all parts of the per-cpu caches for the list of - * marked zones once we are able to initalize caches. This should - * only be called once, and will be called during the time that the - * system is single threaded so we don't have to take the lock. - * - */ -void -zcache_init_marked_zones(void) -{ - unsigned int i; - for (i = 0; i < num_zones; i++) { - if (zone_array[i].cpu_cache_enable_when_ready) { - zcache_init(&zone_array[i]); - zone_array[i].cpu_cache_enable_when_ready = FALSE; - } - } + return magazine_zone != NULL; } /* * zcache_bootstrap * - * Description: initializes zone to allocate magazines from and sets - * magazine_element_count and depot_element_count from - * boot-args or default values + * Initializes zone to allocate magazines from and sets + * magazine_element_count and depot_element_count from + * boot-args or default values * */ -void +__startup_func +static void zcache_bootstrap(void) { - /* use boot-arg for custom magazine size*/ - if (!PE_parse_boot_argn("zcc_magazine_element_count", &magazine_element_count, sizeof(uint16_t))) { - magazine_element_count = DEFAULT_MAGAZINE_SIZE; - } - int magazine_size = sizeof(struct zcc_magazine) + magazine_element_count * sizeof(void *); - - magazine_zone = zinit(magazine_size, 100000 * magazine_size, magazine_size, "zcc_magazine_zone"); - - assert(magazine_zone != NULL); - - /* use boot-arg for custom depot size*/ - if (!PE_parse_boot_argn("zcc_depot_element_count", &depot_element_count, sizeof(uint16_t))) { - depot_element_count = DEFAULT_DEPOT_SIZE; - } - - lck_grp_init(&zcache_locks_grp, "zcc_depot_lock", LCK_GRP_ATTR_NULL); + zone_t magzone; /* Generate the canary value for zone caches */ zcache_canary = (uintptr_t) early_random(); - zone_cache_ready = TRUE; + magzone = zone_create("zcc_magazine_zone", magazine_size, + ZC_NOCACHING | ZC_ZFREE_CLEARMEM); + + /* + * This causes zcache_ready() to return true. + */ + os_atomic_store(&magazine_zone, magzone, compiler_acq_rel); - zcache_init_marked_zones(); + /* + * Now that we are initialized, we can enable zone caching for zones that + * were made before zcache_bootstrap() was called. + * + * The system is still single threaded so we don't need to take the lock. + */ + zone_index_foreach(i) { + if (zone_array[i].cpu_cache_enabled) { + zcache_init(&zone_array[i]); + } + } +} +STARTUP(ZALLOC, STARTUP_RANK_FOURTH, zcache_bootstrap); + +static struct zcc_magazine * +zcache_mag_alloc(void) +{ + struct zcc_magazine *mag = zalloc_flags(magazine_zone, Z_WAITOK); + mag->zcc_magazine_capacity = magazine_element_count; + return mag; } /* * zcache_init * - * Description: Initializes all parts of the per-cpu caches for a given zone + * Initializes all parts of the per-cpu caches for a given zone * - * Parameters: zone pointer to zone on which to iniitalize caching + * Parameters: + * zone pointer to zone on which to iniitalize caching * */ void zcache_init(zone_t zone) { - int i; /* used as index in for loops */ - vm_size_t total_size; /* Used for allocating the zone_cache struct with the proper size of depot list */ - struct zone_cache *temp_cache; /* Temporary variable to initialize a zone_cache before assigning to the specified zone */ + struct zcc_per_cpu_cache *pcpu_caches; + struct zcc_depot *depot; + vm_size_t size; - /* Allocate chunk of memory for all structs */ - total_size = sizeof(struct zone_cache) + (depot_element_count * sizeof(void *)); + /* + * If zcache hasn't been initialized yet, remember our decision, + * + * zcache_init() will be called again by zcache_bootstrap(), + * while the system is still single threaded, to build the missing caches. + */ + if (!zcache_ready()) { + zone->cpu_cache_enabled = true; + return; + } - temp_cache = (struct zone_cache *) kalloc(total_size); + /* Allocate chunk of memory for all structs */ + size = sizeof(struct zcc_depot) + (depot_element_count * sizeof(void *)); + depot = zalloc_permanent(size, ZALIGN_PTR); + size = sizeof(struct zcc_per_cpu_cache); + pcpu_caches = zalloc_percpu_permanent(size, ZALIGN_PTR); /* Initialize a cache for every CPU */ - for (i = 0; i < MAX_CPUS; i++) { - temp_cache->zcc_per_cpu_caches[i].current = (struct zcc_magazine *)zalloc(magazine_zone); - temp_cache->zcc_per_cpu_caches[i].previous = (struct zcc_magazine *)zalloc(magazine_zone); - - assert(temp_cache->zcc_per_cpu_caches[i].current != NULL && temp_cache->zcc_per_cpu_caches[i].previous != NULL); - - zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].current, magazine_element_count); - zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].previous, magazine_element_count); + zpercpu_foreach(cache, pcpu_caches) { + cache->current = zcache_mag_alloc(); + cache->previous = zcache_mag_alloc(); + cache->zp_count = zone_poison_count_init(zone); } - /* Initialize the lock on the depot layer */ - lck_mtx_init(&(temp_cache->zcc_depot_lock), &zcache_locks_grp, LCK_ATTR_NULL); - /* Initialize empty magazines in the depot list */ - for (i = 0; i < depot_element_count; i++) { - temp_cache->zcc_depot_list[i] = (struct zcc_magazine *)zalloc(magazine_zone); - - assert(temp_cache->zcc_depot_list[i] != NULL); + for (int i = 0; i < depot_element_count; i++) { + depot->zcc_depot_list[i] = zcache_mag_alloc(); + } - zcache_mag_init(temp_cache->zcc_depot_list[i], magazine_element_count); + lock_zone(zone); + if (zone->zcache.zcc_depot) { + panic("allocating caches for zone %s twice", zone->z_name); } - temp_cache->zcc_depot_index = 0; + /* Make the initialization of the per-cpu magazines visible. */ + os_atomic_thread_fence(release); - lock_zone(zone); - zone->zcache = temp_cache; - /* Set flag to know caching is enabled */ - zone->cpu_cache_enabled = TRUE; + zone->zcache.zcc_depot = depot; + zone->zcache.zcc_pcpu = pcpu_caches; + zone->cpu_cache_enabled = true; unlock_zone(zone); - return; } /* * zcache_drain_depot * - * Description: Frees all the full magazines from the depot layer to the zone allocator as part - * of zone_gc(). The routine assumes that only one zone_gc() is in progress (zone_gc_lock - * ensures that) + * Frees all the full magazines from the depot layer to the zone allocator as part + * of zone_gc(). The routine assumes that only one zone_gc() is in progress (zone_gc_lock + * ensures that) * - * Parameters: zone pointer to zone for which the depot layer needs to be drained + * Parameters: + * zone pointer to zone for which the depot layer needs to be drained * * Returns: None * @@ -243,300 +239,259 @@ zcache_init(zone_t zone) void zcache_drain_depot(zone_t zone) { - struct zone_cache *zcache = zone->zcache; + struct zcc_depot *depot; int drain_depot_index = 0; - /* - * Grab the current depot list from the zone cache. If it has full magazines, - * mark the depot as invalid and drain it. - */ - lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock)); - if (!zcache_depot_available(zcache) || (zcache->zcc_depot_index == 0)) { - /* no full magazines in the depot or depot unavailable; nothing to drain here */ - lck_mtx_unlock(&(zcache->zcc_depot_lock)); - return; - } - drain_depot_index = zcache->zcc_depot_index; - /* Mark the depot as unavailable */ - zcache->zcc_depot_index = ZCACHE_DEPOT_INVALID; - lck_mtx_unlock(&(zcache->zcc_depot_lock)); - - /* Now drain the full magazines in the depot */ + lock_zone(zone); + depot = zone->zcache.zcc_depot; + drain_depot_index = depot->zcc_depot_index; for (int i = 0; i < drain_depot_index; i++) { - zcache_mag_drain(zone, zcache->zcc_depot_list[i]); + zcache_mag_drain_locked(zone, depot->zcc_depot_list[i]); } +#if ZALLOC_DETAILED_STATS + depot->zcc_gc += drain_depot_index; +#endif /* ZALLOC_DETAILED_STATS */ + depot->zcc_depot_index = 0; + unlock_zone(zone); +} - lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock)); - /* Mark the depot as available again */ - zcache->zcc_depot_index = 0; - lck_mtx_unlock(&(zcache->zcc_depot_lock)); +__attribute__((noinline)) +static void +zcache_free_to_cpu_cache_slow(zone_t zone, struct zcc_per_cpu_cache *per_cpu_cache) +{ + struct zcc_depot *depot; + + lock_zone(zone); + depot = zone->zcache.zcc_depot; + if (depot->zcc_depot_index < depot_element_count) { + /* If able, rotate in a new empty magazine from the depot and retry */ + zcache_mag_depot_swap_for_free(depot, per_cpu_cache); + } else { + /* Free an entire magazine of elements */ + zcache_mag_drain_locked(zone, per_cpu_cache->current); +#if ZALLOC_DETAILED_STATS + depot->zcc_drain++; +#endif /* ZALLOC_DETAILED_STATS */ + } + unlock_zone(zone); } -/* - * zcache_free_to_cpu_cache - * - * Description: Checks per-cpu caches to free element there if possible - * - * Parameters: zone pointer to zone for which element comes from - * addr pointer to element to free - * - * Returns: TRUE if successfull, FALSE otherwise - * - * Precondition: check that caching is enabled for zone - */ -bool -zcache_free_to_cpu_cache(zone_t zone, void *addr) +void +zcache_free_to_cpu_cache(zone_t zone, zone_stats_t zstats, vm_offset_t addr) { - int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */ - struct zone_cache *zcache; /* local storage of the zone's cache */ - struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */ + struct zcc_per_cpu_cache *per_cpu_cache; + vm_offset_t elem = addr; + int cpu; + + zone_allocated_element_validate(zone, elem); + + /* + * This is racy but we don't need zp_count to be accurate. + * This allows us to do the poisoning with preemption enabled. + */ + per_cpu_cache = zpercpu_get(zone->zcache.zcc_pcpu); + if (zfree_clear_or_poison(zone, &per_cpu_cache->zp_count, elem)) { + addr |= ZALLOC_ELEMENT_NEEDS_VALIDATION; + } else { + zcache_canary_add(zone, elem); + } + +#if KASAN_ZALLOC + kasan_poison_range(elem, zone_elem_size(zone), ASAN_HEAP_FREED); +#endif disable_preemption(); - curcpu = current_processor()->cpu_id; - zcache = zone->zcache; - per_cpu_cache = &zcache->zcc_per_cpu_caches[curcpu]; + cpu = cpu_number(); + per_cpu_cache = zpercpu_get_cpu(zone->zcache.zcc_pcpu, cpu); if (zcache_mag_has_space(per_cpu_cache->current)) { /* If able, free into current magazine */ - goto free_to_current; } else if (zcache_mag_has_space(per_cpu_cache->previous)) { /* If able, swap current and previous magazine and retry */ zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current); - goto free_to_current; } else { - lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock)); - if (zcache_depot_available(zcache) && (zcache->zcc_depot_index < depot_element_count)) { - /* If able, rotate in a new empty magazine from the depot and retry */ - zcache_mag_depot_swap_for_free(zcache, per_cpu_cache); - lck_mtx_unlock(&(zcache->zcc_depot_lock)); - goto free_to_current; - } - lck_mtx_unlock(&(zcache->zcc_depot_lock)); - /* Attempt to free an entire magazine of elements */ - zcache_mag_drain(zone, per_cpu_cache->current); - if (zcache_mag_has_space(per_cpu_cache->current)) { - goto free_to_current; - } + zcache_free_to_cpu_cache_slow(zone, per_cpu_cache); } - /* If not able to use cache return FALSE and fall through to zfree */ + struct zcc_magazine *mag = per_cpu_cache->current; + mag->zcc_elements[mag->zcc_magazine_index++] = addr; + zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += zone_elem_size(zone); +#if ZALLOC_DETAILED_STATS + per_cpu_cache->zcc_frees++; +#endif /* ZALLOC_DETAILED_STATS */ + enable_preemption(); - return FALSE; +} -free_to_current: - assert(zcache_mag_has_space(per_cpu_cache->current)); - zcache_canary_add(zone, addr); - zcache_mag_push(per_cpu_cache->current, addr); +__attribute__((noinline)) +static bool +zcache_alloc_from_cpu_cache_slow(zone_t zone, struct zcc_per_cpu_cache *per_cpu_cache) +{ + struct zcc_depot *depot; -#if KASAN_ZALLOC - kasan_poison_range((vm_offset_t)addr, zone->elem_size, ASAN_HEAP_FREED); -#endif + lock_zone(zone); + depot = zone->zcache.zcc_depot; + if (depot->zcc_depot_index > 0) { + /* If able, rotate in a full magazine from the depot */ + zcache_mag_depot_swap_for_alloc(depot, per_cpu_cache); + } else if (zcache_mag_fill_locked(zone, per_cpu_cache->current)) { +#if ZALLOC_DETAILED_STATS + depot->zcc_fill++; +#endif /* ZALLOC_DETAILED_STATS */ + } else { +#if ZALLOC_DETAILED_STATS + depot->zcc_fail++; +#endif /* ZALLOC_DETAILED_STATS */ + /* If unable to allocate from cache return NULL and fall through to zalloc */ + unlock_zone(zone); + enable_preemption(); + return false; + } + unlock_zone(zone); - enable_preemption(); - return TRUE; + return true; } - -/* - * zcache_alloc_from_cpu_cache - * - * Description: Checks per-cpu caches to allocate element from there if possible - * - * Parameters: zone pointer to zone for which element will come from - * - * Returns: pointer to usable element - * - * Precondition: check that caching is enabled for zone - */ vm_offset_t -zcache_alloc_from_cpu_cache(zone_t zone) +zcache_alloc_from_cpu_cache(zone_t zone, zone_stats_t zstats, vm_size_t waste) { - int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */ - void *ret = NULL; /* Points to the element which will be returned */ - struct zone_cache *zcache; /* local storage of the zone's cache */ - struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */ + struct zcc_per_cpu_cache *per_cpu_cache; + int cpu; disable_preemption(); - curcpu = current_processor()->cpu_id; - zcache = zone->zcache; - per_cpu_cache = &zcache->zcc_per_cpu_caches[curcpu]; + cpu = cpu_number(); + per_cpu_cache = zpercpu_get_cpu(zone->zcache.zcc_pcpu, cpu); if (zcache_mag_has_elements(per_cpu_cache->current)) { /* If able, allocate from current magazine */ - goto allocate_from_current; } else if (zcache_mag_has_elements(per_cpu_cache->previous)) { /* If able, swap current and previous magazine and retry */ zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current); - goto allocate_from_current; - } else { - lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock)); - if (zcache_depot_available(zcache) && (zcache->zcc_depot_index > 0)) { - /* If able, rotate in a full magazine from the depot */ - zcache_mag_depot_swap_for_alloc(zcache, per_cpu_cache); - lck_mtx_unlock(&(zcache->zcc_depot_lock)); - goto allocate_from_current; - } - lck_mtx_unlock(&(zcache->zcc_depot_lock)); - /* Attempt to allocate an entire magazine of elements */ - if (zcache_mag_fill(zone, per_cpu_cache->current)) { - goto allocate_from_current; - } + } else if (!zcache_alloc_from_cpu_cache_slow(zone, per_cpu_cache)) { + return (vm_offset_t)NULL; + } + + struct zcc_magazine *mag = per_cpu_cache->current; + vm_offset_t elem_size = zone_elem_size(zone); + uint32_t index = --mag->zcc_magazine_index; + vm_offset_t addr = mag->zcc_elements[index]; + mag->zcc_elements[index] = 0; + zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += elem_size; +#if ZALLOC_DETAILED_STATS + if (waste) { + zpercpu_get_cpu(zstats, cpu)->zs_mem_wasted += waste; } + per_cpu_cache->zcc_allocs++; +#else + (void)waste; +#endif /* ZALLOC_DETAILED_STATS */ - /* If unable to allocate from cache return NULL and fall through to zalloc */ enable_preemption(); - return (vm_offset_t) NULL; -allocate_from_current: - ret = zcache_mag_pop(per_cpu_cache->current); - assert(ret != NULL); - zcache_canary_validate(zone, ret); +#if ZALLOC_ENABLE_POISONING + bool validate = addr & ZALLOC_ELEMENT_NEEDS_VALIDATION; +#endif /* ZALLOC_ENABLE_POISONING */ + + addr &= ~ZALLOC_ELEMENT_NEEDS_VALIDATION; #if KASAN_ZALLOC - kasan_poison_range((vm_offset_t)ret, zone->elem_size, ASAN_VALID); + kasan_poison_range(addr, elem_size, ASAN_VALID); #endif +#if ZALLOC_ENABLE_POISONING + if (!validate) { + vm_offset_t backup = addr + elem_size - sizeof(vm_offset_t); + zcache_validate_and_clear_canary(zone, (vm_offset_t *)addr, + (vm_offset_t *)backup); + } + zalloc_validate_element(zone, addr, elem_size, validate); +#endif /* ZALLOC_ENABLE_POISONING */ - enable_preemption(); - return (vm_offset_t) ret; -} - - -/* - * zcache_mag_init - * - * Description: initializes fields in a zcc_magazine struct - * - * Parameters: mag pointer to magazine to initialize - * - */ -void -zcache_mag_init(struct zcc_magazine *mag, int count) -{ - mag->zcc_magazine_index = 0; - mag->zcc_magazine_capacity = count; + return addr; } /* - * zcache_mag_fill + * zcache_mag_fill_locked * - * Description: fills a magazine with as many elements as the zone can give - * without blocking to carve out more memory + * Fills a magazine with as many elements as the zone can give + * without blocking to carve out more memory * - * Parameters: zone zone from which to allocate - * mag pointer to magazine to fill + * Parameters: + * zone zone from which to allocate + * mag pointer to magazine to fill * * Return: True if able to allocate elements, false is mag is still empty */ -bool -zcache_mag_fill(zone_t zone, struct zcc_magazine *mag) +static bool +zcache_mag_fill_locked(zone_t zone, struct zcc_magazine *mag) { - assert(mag->zcc_magazine_index == 0); - void* elem = NULL; - uint32_t i; - lock_zone(zone); - for (i = mag->zcc_magazine_index; i < mag->zcc_magazine_capacity; i++) { - elem = zalloc_attempt(zone); - if (elem) { + uint32_t i = mag->zcc_magazine_index; + uint32_t end = mag->zcc_magazine_capacity; + vm_offset_t elem, addr; + + while (i < end && zone->countfree) { + addr = zalloc_direct_locked(zone, Z_NOWAIT, 0); + elem = addr & ~ZALLOC_ELEMENT_NEEDS_VALIDATION; + if (addr & ZALLOC_ELEMENT_NEEDS_VALIDATION) { + zone_clear_freelist_pointers(zone, elem); + } else { zcache_canary_add(zone, elem); - zcache_mag_push(mag, elem); + } #if KASAN_ZALLOC - kasan_poison_range((vm_offset_t)elem, zone->elem_size, ASAN_HEAP_FREED); + kasan_poison_range(elem, zone_elem_size(zone), ASAN_HEAP_FREED); #endif - } else { - break; - } - } - unlock_zone(zone); - if (i == 0) { - return FALSE; + mag->zcc_elements[i++] = addr; } - return TRUE; -} -/* - * zcache_mag_drain - * - * Description: frees all elements in a magazine - * - * Parameters: zone zone to which elements will be freed - * mag pointer to magazine to empty - * - */ -void -zcache_mag_drain(zone_t zone, struct zcc_magazine *mag) -{ - assert(mag->zcc_magazine_index == mag->zcc_magazine_capacity); - lock_zone(zone); - while (mag->zcc_magazine_index > 0) { - uint32_t index = --mag->zcc_magazine_index; - zcache_canary_validate(zone, mag->zcc_elements[index]); - zfree_direct(zone, (vm_offset_t)mag->zcc_elements[index]); - mag->zcc_elements[mag->zcc_magazine_index] = 0; - } - unlock_zone(zone); -} + mag->zcc_magazine_index = i; -/* - * zcache_mag_pop - * - * Description: removes last element from magazine in a stack pop fashion - * zcc_magazine_index represents the number of elements on the - * stack, so it the index of where to save the next element, when - * full, it will be 1 past the last index of the array - * - * Parameters: mag pointer to magazine from which to remove element - * - * Returns: pointer to element removed from magazine - * - * Precondition: must check that magazine is not empty before calling - */ -void * -zcache_mag_pop(struct zcc_magazine *mag) -{ - void *elem; - assert(zcache_mag_has_elements(mag)); - elem = mag->zcc_elements[--mag->zcc_magazine_index]; - /* Ensure pointer to element cannot be accessed after we pop it */ - mag->zcc_elements[mag->zcc_magazine_index] = NULL; - assert(elem != NULL); - return elem; + return i != 0; } - /* - * zcache_mag_push + * zcache_mag_drain_locked * - * Description: adds element to magazine and increments zcc_magazine_index - * zcc_magazine_index represents the number of elements on the - * stack, so it the index of where to save the next element, when - * full, it will be 1 past the last index of the array + * Frees all elements in a magazine * - * Parameters: mag pointer to magazine from which to remove element - * elem pointer to element to add + * Parameters: + * zone zone to which elements will be freed + * mag pointer to magazine to empty * - * Precondition: must check that magazine is not full before calling */ -void -zcache_mag_push(struct zcc_magazine *mag, void *elem) +static void +zcache_mag_drain_locked(zone_t zone, struct zcc_magazine *mag) { - assert(zcache_mag_has_space(mag)); - mag->zcc_elements[mag->zcc_magazine_index++] = elem; + vm_offset_t elem, addr; + bool poison; + + for (uint32_t i = 0, end = mag->zcc_magazine_index; i < end; i++) { + addr = mag->zcc_elements[i]; + poison = addr & ZALLOC_ELEMENT_NEEDS_VALIDATION; + elem = addr & ~ZALLOC_ELEMENT_NEEDS_VALIDATION; + +#if ZALLOC_ENABLE_POISONING + zcache_validate_element(zone, (vm_offset_t *)elem, poison); +#endif /* ZALLOC_ENABLE_POISONING */ + zfree_direct_locked(zone, elem, poison); + mag->zcc_elements[i] = 0; + } + mag->zcc_magazine_index = 0; } /* * zcache_mag_has_space * - * Description: checks if magazine still has capacity + * Checks if magazine still has capacity * - * Parameters: mag pointer to magazine to check + * Parameters: + * mag pointer to magazine to check * * Returns: true if magazine is full * */ -bool +static bool zcache_mag_has_space(struct zcc_magazine *mag) { return mag->zcc_magazine_index < mag->zcc_magazine_capacity; @@ -546,14 +501,15 @@ zcache_mag_has_space(struct zcc_magazine *mag) /* * zcache_mag_has_elements * - * Description: checks if magazine is empty + * Checks if magazine is empty * - * Parameters: mag pointer to magazine to check + * Parameters: + * mag pointer to magazine to check * * Returns: true if magazine has no elements * */ -bool +static bool zcache_mag_has_elements(struct zcc_magazine *mag) { return mag->zcc_magazine_index > 0; @@ -563,12 +519,13 @@ zcache_mag_has_elements(struct zcc_magazine *mag) /* * zcache_swap_magazines * - * Description: Function which swaps two pointers of any type + * Function which swaps two pointers of any type * - * Parameters: a pointer to first pointer - * b pointer to second pointer + * Parameters: + * a pointer to first pointer + * b pointer to second pointer */ -void +static void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b) { struct zcc_magazine *temp = *a; @@ -580,87 +537,162 @@ zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b) /* * zcache_mag_depot_swap_for_alloc * - * Description: Swaps a full magazine into the current position + * Swaps a full magazine into the current position * - * Parameters: zcache pointer to the zone_cache to access the depot - * cache pointer to the current per-cpu cache + * Parameters: + * depot pointer to the depot + * cache pointer to the current per-cpu cache * * Precondition: Check that the depot list has full elements */ -void -zcache_mag_depot_swap_for_alloc(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache) +static void +zcache_mag_depot_swap_for_alloc(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache) { /* Loads a full magazine from which we can allocate */ - assert(zcache_depot_available(zcache)); - assert(zcache->zcc_depot_index > 0); - zcache->zcc_depot_index--; - zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]); + assert(depot->zcc_depot_index > 0); + depot->zcc_depot_index--; +#if ZALLOC_DETAILED_STATS + depot->zcc_swap++; +#endif /* ZALLOC_DETAILED_STATS */ + zcache_swap_magazines(&cache->current, &depot->zcc_depot_list[depot->zcc_depot_index]); } /* * zcache_mag_depot_swap_for_free * - * Description: Swaps an empty magazine into the current position + * Swaps an empty magazine into the current position * - * Parameters: zcache pointer to the zone_cache to access the depot - * cache pointer to the current per-cpu cache + * Parameters: + * depot pointer to the depot + * cache pointer to the current per-cpu cache * * Precondition: Check that the depot list has empty elements */ -void -zcache_mag_depot_swap_for_free(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache) +static void +zcache_mag_depot_swap_for_free(struct zcc_depot *depot, struct zcc_per_cpu_cache *cache) { /* Loads an empty magazine into which we can free */ - assert(zcache_depot_available(zcache)); - assert(zcache->zcc_depot_index < depot_element_count); - zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]); - zcache->zcc_depot_index++; + assert(depot->zcc_depot_index < depot_element_count); + zcache_swap_magazines(&cache->current, &depot->zcc_depot_list[depot->zcc_depot_index]); +#if ZALLOC_DETAILED_STATS + depot->zcc_swap++; +#endif /* ZALLOC_DETAILED_STATS */ + depot->zcc_depot_index++; } /* * zcache_canary_add * - * Description: Adds a canary to an element by putting zcache_canary at the first - * and last location of the element - * - * Parameters: zone zone for the element - * addr element address to add canary to + * Adds a canary to an element by putting zcache_canary at the first + * and last location of the element * + * Parameters: + * zone zone for the element + * addr element address to add canary to */ -void -zcache_canary_add(zone_t zone, void *element) +static void +zcache_canary_add(zone_t zone, vm_offset_t element) { +#if ZALLOC_ENABLE_POISONING vm_offset_t *primary = (vm_offset_t *)element; - vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t)); + vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + + zone_elem_size(zone) - sizeof(vm_offset_t)); *primary = *backup = (zcache_canary ^ (uintptr_t)element); +#else +#pragma unused(zone, element) +#endif +} + +#if ZALLOC_ENABLE_POISONING +__abortlike static void +zcache_validation_panic(zone_t zone, vm_offset_t *primary, vm_offset_t *backup, + vm_offset_t permutation) +{ + vm_offset_t primary_value = 0; + vm_offset_t backup_value = 0; + + if (permutation == zcache_canary) { + primary_value = *primary ^ (vm_offset_t)primary; + backup_value = *backup ^ (vm_offset_t)primary; + permutation = permutation ^ (vm_offset_t)primary; + } else { + primary_value = *primary; + backup_value = *backup; + } + if (primary_value != permutation) { + panic("Zone cache element was used after free! Element %p was corrupted at " + "beginning; Expected 0x%lx but found 0x%lx; canary 0x%lx; zone %p (%s%s)", + primary, (uintptr_t) permutation, (uintptr_t) *primary, zcache_canary, zone, + zone_heap_name(zone), zone->z_name); + } else { + panic("Zone cache element was used after free! Element %p was corrupted at end; " + "Expected 0x%lx but found 0x%lx; canary 0x%lx; zone %p (%s%s)", + primary, (uintptr_t) permutation, (uintptr_t) *backup, zcache_canary, zone, + zone_heap_name(zone), zone->z_name); + } } /* - * zcache_canary_validate + * zcache_validate_and_clear_canary + * + * Validates an element of the zone cache to make sure it still contains the zone + * caching canary and clears it. * - * Description: Validates an element of the zone cache to make sure it still contains the zone - * caching canary. + * Parameters: + * zone zone for the element + * primary addr of canary placed in front + * backup addr of canary placed at the back + */ +static void +zcache_validate_and_clear_canary(zone_t zone, vm_offset_t *primary, vm_offset_t *backup) +{ + vm_offset_t primary_value = (*primary ^ (uintptr_t)primary); + vm_offset_t backup_value = (*backup ^ (uintptr_t)primary); + + if (primary_value == zcache_canary && backup_value == zcache_canary) { + *primary = *backup = ZONE_POISON; + } else { + zcache_validation_panic(zone, primary, backup, zcache_canary); + } +} + +/* + * zcache_validate_element * - * Parameters: zone zone for the element - * addr element address to validate + * Validates the first and last pointer size of the element to ensure + * that they haven't been altered. This function is used when an + * element moves from cache to zone, therefore only validing the + * first and last pointer size (location of future freelist pointers). * + * Parameters: + * zone zone for the element + * element addr of element to validate + * poison has the element been poisoned */ -void -zcache_canary_validate(zone_t zone, void *element) +static void +zcache_validate_element(zone_t zone, vm_offset_t *element, bool poison) { vm_offset_t *primary = (vm_offset_t *)element; - vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t)); + vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + + zone_elem_size(zone) - sizeof(vm_offset_t)); - vm_offset_t primary_value = (*primary ^ (uintptr_t)element); - if (primary_value != zcache_canary) { - panic("Zone cache element was used after free! Element %p was corrupted at beginning; Expected %p but found %p; canary %p; zone %p (%s)", - element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*primary), (void *)zcache_canary, zone, zone->zone_name); + if (zone->zfree_clear_mem) { + if (*primary == 0 && *backup == 0) { + return; + } else { + zcache_validation_panic(zone, primary, backup, 0); + } } - vm_offset_t backup_value = (*backup ^ (uintptr_t)element); - if (backup_value != zcache_canary) { - panic("Zone cache element was used after free! Element %p was corrupted at end; Expected %p but found %p; canary %p; zone %p (%s)", - element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*backup), (void *)zcache_canary, zone, zone->zone_name); + if (__probable(!poison)) { + zcache_validate_and_clear_canary(zone, primary, backup); + } else { + if (*primary == ZONE_POISON && *backup == ZONE_POISON) { + return; + } else { + zcache_validation_panic(zone, primary, backup, ZONE_POISON); + } } } +#endif /* ZALLOC_ENABLE_POISONING */ diff --git a/osfmk/kern/zcache.h b/osfmk/kern/zcache_internal.h similarity index 74% rename from osfmk/kern/zcache.h rename to osfmk/kern/zcache_internal.h index 0c6fb19d5..2cafb525c 100644 --- a/osfmk/kern/zcache.h +++ b/osfmk/kern/zcache_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Apple Inc. All rights reserved. + * Copyright (c) 2017-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -97,8 +97,8 @@ * try to allocate an entire magazine of elements or free an entire magazine of * elements at once. * - * Caching must be enabled explicitly, by calling zone_change() with the - * Z_CACHING_ENABLED flag, for every zone you want to cache elements for. Zones + * Caching must be enabled explicitly, by calling zone_create() with the + * ZC_CACHING flag, for every zone you want to cache elements for. Zones * which are good candidates for this are ones with highly contended zone locks. * * Some good potential candidates are kalloc.16, kalloc.48, Vm objects, VM map @@ -117,76 +117,90 @@ * depot will have N full and N empty magazines * (default 16 used if not specified) */ + +#ifndef _KERN_ZCACHE_H_ +#define _KERN_ZCACHE_H_ + #include +#include /* zone_stats_t */ #include +#if CONFIG_ZCACHE +#pragma GCC visibility push(hidden) -/* - * zcache_ready - * - * Description: returns whether or not the zone caches are ready to use - * - */ -bool zcache_ready(void); +__BEGIN_DECLS +struct zone_cache { + struct zcc_per_cpu_cache *__zpercpu zcc_pcpu; + struct zcc_depot *zcc_depot; +}; -/* - * zcache_bootstrap +/** + * @function zcache_init * - * Description: initializes zone to allocate magazines from + * @abstract + * Initializes all parts of the per-cpu caches for a given zone * - */ -void zcache_bootstrap(void); - - -/* - * zcache_init - * - * Description: Initializes all parts of the per-cpu caches for a given zone - * - * Parameters: zone pointer to zone on which to iniitalize caching + * @param zone pointer to zone on which to iniitalize caching * */ -void zcache_init(zone_t zone); +extern void zcache_init( + zone_t zone); -/* - * zcache_free_to_cpu_cache +/** + * @function zcache_free_to_cpu_cache() * - * Description: Checks per-cpu caches to free element there if possible + * @abstract + * Checks per-cpu caches to free element there if possible. * - * Parameters: zone pointer to zone for which element comes from - * addr pointer to element to free + * @discussion + * The caller is responsible for checking that caching is enabled for zone. * - * Returns: TRUE if successfull, FALSE otherwise - * - * Precondition: check that caching is enabled for zone + * @param zone pointer to zone for which element comes from + * @param zstats pointer to the per-cpu statistics to maintain + * @param addr adddress of the element to free */ -bool zcache_free_to_cpu_cache(zone_t zone, void *addr); +extern void zcache_free_to_cpu_cache( + zone_t zone, + zone_stats_t zstats, + vm_offset_t addr); -/* - * zcache_alloc_from_cpu_cache +/** + * @function zcache_alloc_from_cpu_cache * - * Description: Checks per-cpu caches to allocate element from there if possible + * @abstract + * Checks per-cpu caches to allocate element from there if possible * - * Parameters: zone pointer to zone for which element will come from + * @discussion + * The caller is responsible for checking that caching is enabled for zone. * - * Returns: pointer to usable element + * @param zone pointer to zone for which element will comes from + * @param zstats pointer to the per-cpu statistics to maintain + * @param waste amount of waste of this allocation (or 0) * - * Precondition: check that caching is enabled for zone + * @return pointer to usable element */ -vm_offset_t zcache_alloc_from_cpu_cache(zone_t zone); +extern vm_offset_t zcache_alloc_from_cpu_cache( + zone_t zone, + zone_stats_t zstats, + vm_size_t waste); -/* - * zcache_drain_depot - * - * Description: Frees all the full magazines from the depot layer to the zone allocator - * Invoked by zone_gc() - * - * Parameters: zone pointer to zone for which the depot layer needs to be drained +/** + * @function zcache_drain_depot * - * Returns: None + * @abstract + * Frees all the full magazines from the depot layer to the zone allocator + * Invoked by zone_gc() * + * @param zone pointer to zone for which the depot layer needs to be drained */ -void zcache_drain_depot(zone_t zone); +extern void zcache_drain_depot( + zone_t zone); + +__END_DECLS + +#pragma GCC visibility pop +#endif /* CONFIG_ZCACHE */ +#endif /* _KERN_ZCACHE_H_ */ diff --git a/osfmk/kextd/Makefile b/osfmk/kextd/Makefile index 624f8c42d..de30bc198 100644 --- a/osfmk/kextd/Makefile +++ b/osfmk/kextd/Makefile @@ -41,7 +41,7 @@ COMP_FILES = ${MIG_KUSRC} do_build_all:: $(COMP_FILES) ${MIG_KUSRC} : kextd_mach.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ -user kextd_mach.c \ -header kextd_mach.h \ diff --git a/osfmk/kperf/Makefile b/osfmk/kperf/Makefile index 467e33774..4108baf90 100644 --- a/osfmk/kperf/Makefile +++ b/osfmk/kperf/Makefile @@ -11,7 +11,7 @@ EXPORT_ONLY_FILES = \ context.h \ kperf.h \ kperfbsd.h \ - kperf_timer.h \ + kptimer.h \ kdebug_trigger.h \ lazy.h \ pet.h diff --git a/osfmk/kperf/action.c b/osfmk/kperf/action.c index ae3951156..7c1814e51 100644 --- a/osfmk/kperf/action.c +++ b/osfmk/kperf/action.c @@ -50,7 +50,7 @@ #include #include #include -#include +#include #include #include #include @@ -230,7 +230,8 @@ kperf_sample_internal(struct kperf_sample *sbuf, } if (!task_only) { - context->cur_thread->kperf_pet_gen = kperf_pet_gen; + context->cur_thread->kperf_pet_gen = + os_atomic_load(&kppet_gencount, relaxed); } bool is_kernel = (context->cur_pid == 0); @@ -688,10 +689,7 @@ kperf_action_set_count(unsigned count) * more things, too. */ if (actionc == 0) { - int r; - if ((r = kperf_init())) { - return r; - } + kperf_setup(); } /* create a new array */ diff --git a/osfmk/kperf/arm/kperf_mp.c b/osfmk/kperf/arm/kperf_mp.c deleted file mode 100644 index 92bae56de..000000000 --- a/osfmk/kperf/arm/kperf_mp.c +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2011-2016 Apple Computer, Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include /* xcall */ -#include - -void -kperf_signal_handler(unsigned int cpu_number) -{ - uint64_t cpu_mask = UINT64_C(1) << cpu_number; - - /* find all the timers that caused a signal */ - for (int i = 0; i < (int)kperf_timerc; i++) { - uint64_t pending_cpus; - struct kperf_timer *timer = &kperf_timerv[i]; - - pending_cpus = atomic_fetch_and_explicit(&timer->pending_cpus, - ~cpu_mask, memory_order_relaxed); - if (pending_cpus & cpu_mask) { - kperf_ipi_handler(timer); - } - } -} - -bool -kperf_mp_broadcast_other_running(struct kperf_timer *timer) -{ - int current_cpu = cpu_number(); - bool system_only_self = true; - int n_cpus = machine_info.logical_cpu_max; - - /* signal all running processors */ - for (int i = 0; i < n_cpus; i++) { - uint64_t i_bit = UINT64_C(1) << i; - processor_t processor = cpu_to_processor(i); - - /* do not IPI processors that are not scheduling threads */ - if (processor == PROCESSOR_NULL || - processor->state != PROCESSOR_RUNNING || - processor->active_thread == THREAD_NULL) { - continue; - } - - if (i == current_cpu) { - system_only_self = false; - continue; - } - - /* nor processors that have not responded to the last broadcast */ - uint64_t already_pending = atomic_fetch_or_explicit( - &timer->pending_cpus, i_bit, memory_order_relaxed); - if (already_pending & i_bit) { -#if DEVELOPMENT || DEBUG - atomic_fetch_add_explicit(&kperf_pending_ipis, 1, - memory_order_relaxed); -#endif /* DEVELOPMENT || DEBUG */ - continue; - } - - cpu_signal(cpu_datap(i), SIGPkptimer, NULL, NULL); - } - - return system_only_self; -} diff --git a/osfmk/kperf/buffer.h b/osfmk/kperf/buffer.h index 224240eb8..e8c5e40e7 100644 --- a/osfmk/kperf/buffer.h +++ b/osfmk/kperf/buffer.h @@ -169,15 +169,7 @@ extern int kperf_debug_level; /* BUF_DATA tracepoints are for logging actual kperf results. */ -#define BUF_DATA_INT(EVENTID, A0, A1, A2, A3) KERNEL_DEBUG_CONSTANT_IST(~KDEBUG_ENABLE_PPT, EVENTID, A0, A1, A2, A3, 0) - -#define BUF_DATA(EVENTID, ...) BUF_DATA_(EVENTID, ## __VA_ARGS__, 4, 3, 2, 1, 0) -#define BUF_DATA_(EVENTID, A1, A2, A3, A4, N_ARGS, ...) BUF_DATA##N_ARGS(EVENTID, A1, A2, A3, A4) -#define BUF_DATA0(EVENTID, A1, A2, A3, A4) BUF_DATA_INT(EVENTID, 0, 0, 0, 0) -#define BUF_DATA1(EVENTID, A1, A2, A3, A4) BUF_DATA_INT(EVENTID, A1, 0, 0, 0) -#define BUF_DATA2(EVENTID, A1, A2, A3, A4) BUF_DATA_INT(EVENTID, A1, A2, 0, 0) -#define BUF_DATA3(EVENTID, A1, A2, A3, A4) BUF_DATA_INT(EVENTID, A1, A2, A3, 0) -#define BUF_DATA4(EVENTID, A1, A2, A3, A4) BUF_DATA_INT(EVENTID, A1, A2, A3, A4) +#define BUF_DATA(EVENTID, ...) KDBG_RELEASE(EVENTID, ## __VA_ARGS__) /* * BUF_INFO tracepoints are for logging debugging information relevant to diff --git a/osfmk/kperf/callstack.c b/osfmk/kperf/callstack.c index b42389c91..a6a367c3d 100644 --- a/osfmk/kperf/callstack.c +++ b/osfmk/kperf/callstack.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -86,6 +87,7 @@ callstack_fixup_user(struct kp_ucallstack *cs, thread_t thread) cs->kpuc_frames[0] |= 1ULL; } + fixup_val = get_saved_state_lr(state); #else @@ -289,7 +291,7 @@ kperf_kcallstack_sample(struct kp_kcallstack *cs, struct kperf_context *context) cs->kpkc_flags |= CALLSTACK_KERNEL_WORDS; bool trunc = false; cs->kpkc_nframes = backtrace_interrupted( - cs->kpkc_word_frames, cs->kpkc_nframes - 1, &trunc); + cs->kpkc_word_frames, cs->kpkc_nframes - 1, &trunc); if (cs->kpkc_nframes != 0) { callstack_fixup_interrupted(cs); } @@ -341,7 +343,7 @@ kperf_ucallstack_sample(struct kp_ucallstack *cs, struct kperf_context *context) */ unsigned int maxnframes = cs->kpuc_nframes - 1; unsigned int nframes = backtrace_thread_user(thread, cs->kpuc_frames, - maxnframes, &error, &user64, &trunc); + maxnframes, &error, &user64, &trunc, true); cs->kpuc_nframes = MIN(maxnframes, nframes); /* @@ -451,7 +453,7 @@ kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth, } kperf_ast_set_callstack_depth(context->cur_thread, depth); return kperf_ast_pend(context->cur_thread, T_KPERF_AST_CALLSTACK, - actionid); + actionid); } static kern_return_t diff --git a/osfmk/kperf/callstack.h b/osfmk/kperf/callstack.h index e4a0cd5e0..44779548d 100644 --- a/osfmk/kperf/callstack.h +++ b/osfmk/kperf/callstack.h @@ -46,6 +46,10 @@ #define CALLSTACK_CONTINUATION (1U << 5) /* the frames field is filled with uintptr_t, not uint64_t */ #define CALLSTACK_KERNEL_WORDS (1U << 6) +/* the frames come from a translated task */ +#define CALLSTACK_TRANSLATED (1U << 7) +/* the last frame could be the real PC */ +#define CALLSTACK_FIXUP_PC (1U << 8) struct kp_ucallstack { uint32_t kpuc_flags; diff --git a/osfmk/kperf/kdebug_trigger.c b/osfmk/kperf/kdebug_trigger.c index 885d8a606..4c85898ca 100644 --- a/osfmk/kperf/kdebug_trigger.c +++ b/osfmk/kperf/kdebug_trigger.c @@ -94,27 +94,18 @@ const static uint32_t debugid_masks[] = { /* UNSAFE */ #define DECODE_TYPE(TYPES, I) ((((uint8_t *)(TYPES))[(I) / 2] >> ((I) % 2) * 4) & 0xf) -int -kperf_kdebug_init(void) +void +kperf_kdebug_setup(void) { kperf_kdebug_filter = kalloc_tag(sizeof(*kperf_kdebug_filter), VM_KERN_MEMORY_DIAG); - if (kperf_kdebug_filter == NULL) { - return ENOMEM; - } bzero(kperf_kdebug_filter, sizeof(*kperf_kdebug_filter)); - - return 0; } void kperf_kdebug_reset(void) { - int err; - - if ((err = kperf_init())) { - return; - } + kperf_setup(); kperf_kdebug_action = 0; bzero(kperf_kdebug_filter, sizeof(*kperf_kdebug_filter)); @@ -153,9 +144,7 @@ kperf_kdebug_set_filter(user_addr_t user_filter, uint32_t user_size) uint32_t n_debugids_provided = 0; int err = 0; - if ((err = kperf_init())) { - return err; - } + kperf_setup(); n_debugids_provided = (uint32_t)KPERF_KDEBUG_N_DEBUGIDS(user_size); @@ -184,11 +173,7 @@ out: uint32_t kperf_kdebug_get_filter(struct kperf_kdebug_filter **filter) { - int err; - - if ((err = kperf_init())) { - return 0; - } + kperf_setup(); assert(filter != NULL); @@ -199,11 +184,7 @@ kperf_kdebug_get_filter(struct kperf_kdebug_filter **filter) int kperf_kdebug_set_n_debugids(uint32_t n_debugids_in) { - int err; - - if ((err = kperf_init())) { - return EINVAL; - } + kperf_setup(); if (n_debugids_in > KPERF_KDEBUG_DEBUGIDS_MAX) { return EINVAL; @@ -236,11 +217,7 @@ kperf_kdebug_get_action(void) static void kperf_kdebug_update(void) { - int err; - - if ((err = kperf_init())) { - return; - } + kperf_setup(); if (kperf_kdebug_action != 0 && kperf_kdebug_filter->n_debugids != 0) { diff --git a/osfmk/kperf/kdebug_trigger.h b/osfmk/kperf/kdebug_trigger.h index 6cfc254dc..8afad8213 100644 --- a/osfmk/kperf/kdebug_trigger.h +++ b/osfmk/kperf/kdebug_trigger.h @@ -42,7 +42,7 @@ struct kperf_kdebug_filter; (((FILTER_SIZE) <= (2 * sizeof(uint64_t))) ? 0 : \ (((FILTER_SIZE) - (2 * sizeof(uint64_t))) / sizeof(uint32_t))) -int kperf_kdebug_init(void); +void kperf_kdebug_setup(void); void kperf_kdebug_reset(void); boolean_t kperf_kdebug_should_trigger(uint32_t debugid); diff --git a/osfmk/kperf/kperf.c b/osfmk/kperf/kperf.c index 3bffd178c..77fc0cc46 100644 --- a/osfmk/kperf/kperf.c +++ b/osfmk/kperf/kperf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2011-2018 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -25,6 +25,7 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ + #include /* port_name_to_task */ #include #include @@ -37,7 +38,7 @@ #include #include #include -#include +#include #include #include #include @@ -45,20 +46,19 @@ /* from libkern/libkern.h */ extern uint64_t strtouq(const char *, char **, int); -lck_grp_t kperf_lck_grp; - -/* IDs of threads on CPUs before starting the PET thread */ -uint64_t *kperf_tid_on_cpus = NULL; +LCK_GRP_DECLARE(kperf_lck_grp, "kperf"); /* one wired sample buffer per CPU */ static struct kperf_sample *intr_samplev; static unsigned int intr_samplec = 0; /* current sampling status */ -static unsigned sampling_status = KPERF_SAMPLING_OFF; +enum kperf_sampling kperf_status = KPERF_SAMPLING_OFF; -/* only init once */ -static boolean_t kperf_initted = FALSE; +/* + * Only set up kperf once. + */ +static bool kperf_is_setup = false; /* whether or not to callback to kperf on context switch */ boolean_t kperf_on_cpu_active = FALSE; @@ -77,80 +77,63 @@ kperf_intr_sample_buffer(void) return &(intr_samplev[ncpu]); } -/* setup interrupt sample buffers */ -int -kperf_init(void) +void +kperf_init_early(void) { - static lck_grp_attr_t lck_grp_attr; - - unsigned ncpus = 0; - int err; - - if (kperf_initted) { - return 0; + /* + * kperf allocates based on the number of CPUs and requires them to all be + * accounted for. + */ + ml_wait_max_cpus(); + + boolean_t found_kperf = FALSE; + char kperf_config_str[64]; + found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str)); + if (found_kperf && kperf_config_str[0] != '\0') { + kperf_kernel_configure(kperf_config_str); } +} - lck_grp_attr_setdefault(&lck_grp_attr); - lck_grp_init(&kperf_lck_grp, "kperf", &lck_grp_attr); - - ncpus = machine_info.logical_cpu_max; - - /* create buffers to remember which threads don't need to be sampled by PET */ - kperf_tid_on_cpus = kalloc_tag(ncpus * sizeof(*kperf_tid_on_cpus), - VM_KERN_MEMORY_DIAG); - if (kperf_tid_on_cpus == NULL) { - err = ENOMEM; - goto error; - } - bzero(kperf_tid_on_cpus, ncpus * sizeof(*kperf_tid_on_cpus)); - - /* create the interrupt buffers */ - intr_samplec = ncpus; - intr_samplev = kalloc_tag(ncpus * sizeof(*intr_samplev), - VM_KERN_MEMORY_DIAG); - if (intr_samplev == NULL) { - err = ENOMEM; - goto error; - } - bzero(intr_samplev, ncpus * sizeof(*intr_samplev)); +void +kperf_init(void) +{ + kptimer_init(); +} - /* create kdebug trigger filter buffers */ - if ((err = kperf_kdebug_init())) { - goto error; +void +kperf_setup(void) +{ + if (kperf_is_setup) { + return; } - kperf_initted = TRUE; - return 0; + intr_samplec = machine_info.logical_cpu_max; + size_t intr_samplev_size = intr_samplec * sizeof(*intr_samplev); + intr_samplev = kalloc_tag(intr_samplev_size, VM_KERN_MEMORY_DIAG); + memset(intr_samplev, 0, intr_samplev_size); -error: - if (intr_samplev) { - kfree(intr_samplev, ncpus * sizeof(*intr_samplev)); - intr_samplev = NULL; - intr_samplec = 0; - } + kperf_kdebug_setup(); - if (kperf_tid_on_cpus) { - kfree(kperf_tid_on_cpus, ncpus * sizeof(*kperf_tid_on_cpus)); - kperf_tid_on_cpus = NULL; - } - - return err; + kperf_is_setup = true; } void kperf_reset(void) { - /* turn off sampling first */ - (void)kperf_sampling_disable(); + /* + * Make sure samples aren't being taken before tearing everything down. + */ + (void)kperf_disable_sampling(); - /* cleanup miscellaneous configuration first */ kperf_lazy_reset(); (void)kperf_kdbg_cswitch_set(0); - (void)kperf_set_lightweight_pet(0); kperf_kdebug_reset(); + kptimer_reset(); + kppet_reset(); - /* timers, which require actions, first */ - kperf_timer_reset(); + /* + * Most of the other systems call into actions, so reset them last. + */ kperf_action_reset(); } @@ -179,7 +162,7 @@ kperf_kernel_configure(const char *config) pairs += 1; kperf_action_set_count(pairs); - kperf_timer_set_count(pairs); + kptimer_set_count(pairs); action_samplers = (uint32_t)strtouq(config, &end, 0); if (config == end) { @@ -204,19 +187,19 @@ kperf_kernel_configure(const char *config) nanoseconds_to_absolutetime(timer_period_ns, &timer_period); config = end; - kperf_timer_set_period(pairs - 1, timer_period); - kperf_timer_set_action(pairs - 1, pairs); + kptimer_set_period(pairs - 1, timer_period); + kptimer_set_action(pairs - 1, pairs); if (pet) { - kperf_timer_set_petid(pairs - 1); - kperf_set_lightweight_pet(1); + kptimer_set_pet_timerid(pairs - 1); + kppet_set_lightweight_pet(1); pet = false; } } while (*(config++) == ','); - int error = kperf_sampling_enable(); + int error = kperf_enable_sampling(); if (error) { - kprintf("kperf: cannot enable sampling at boot: %d", error); + printf("kperf: cannot enable sampling at boot: %d\n", error); } out: @@ -234,8 +217,8 @@ kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation, int pid = task_pid(get_threadtask(thread)); BUF_DATA(PERF_TI_CSWITCH, thread_tid(thread), pid); } - if (kperf_lightweight_pet_active) { - kperf_pet_on_cpu(thread, continuation, starting_fp); + if (kppet_lightweight_active) { + kppet_on_cpu(thread, continuation, starting_fp); } if (kperf_lazy_wait_action != 0) { kperf_lazy_wait_sample(thread, continuation, starting_fp); @@ -246,62 +229,68 @@ void kperf_on_cpu_update(void) { kperf_on_cpu_active = kperf_kdebug_cswitch || - kperf_lightweight_pet_active || + kppet_lightweight_active || kperf_lazy_wait_action != 0; } -unsigned int -kperf_sampling_status(void) +bool +kperf_is_sampling(void) { - return sampling_status; + return kperf_status == KPERF_SAMPLING_ON; } int -kperf_sampling_enable(void) +kperf_enable_sampling(void) { - if (sampling_status == KPERF_SAMPLING_ON) { + if (kperf_status == KPERF_SAMPLING_ON) { return 0; } - if (sampling_status != KPERF_SAMPLING_OFF) { - panic("kperf: sampling was %d when asked to enable", sampling_status); + if (kperf_status != KPERF_SAMPLING_OFF) { + panic("kperf: sampling was %d when asked to enable", kperf_status); } /* make sure interrupt tables and actions are initted */ - if (!kperf_initted || (kperf_action_get_count() == 0)) { + if (!kperf_is_setup || (kperf_action_get_count() == 0)) { return ECANCELED; } - /* mark as running */ - sampling_status = KPERF_SAMPLING_ON; - kperf_lightweight_pet_active_update(); - - /* tell timers to enable */ - kperf_timer_go(); + kperf_status = KPERF_SAMPLING_ON; + kppet_lightweight_active_update(); + kptimer_start(); return 0; } int -kperf_sampling_disable(void) +kperf_disable_sampling(void) { - if (sampling_status != KPERF_SAMPLING_ON) { + if (kperf_status != KPERF_SAMPLING_ON) { return 0; } /* mark a shutting down */ - sampling_status = KPERF_SAMPLING_SHUTDOWN; + kperf_status = KPERF_SAMPLING_SHUTDOWN; /* tell timers to disable */ - kperf_timer_stop(); + kptimer_stop(); /* mark as off */ - sampling_status = KPERF_SAMPLING_OFF; - kperf_lightweight_pet_active_update(); + kperf_status = KPERF_SAMPLING_OFF; + kppet_lightweight_active_update(); return 0; } +void +kperf_timer_expire(void *param0, void * __unused param1) +{ + processor_t processor = param0; + int cpuid = processor->cpu_id; + + kptimer_expire(processor, cpuid, mach_absolute_time()); +} + boolean_t kperf_thread_get_dirty(thread_t thread) { diff --git a/osfmk/kperf/kperf.h b/osfmk/kperf/kperf.h index 31f87f6a6..905af51d3 100644 --- a/osfmk/kperf/kperf.h +++ b/osfmk/kperf/kperf.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2018 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2011-2019 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -51,27 +51,39 @@ void kperf_set_thread_ast(thread_t thread, uint32_t flags); boolean_t kperf_thread_get_dirty(thread_t thread); void kperf_thread_set_dirty(thread_t thread, boolean_t dirty); -/* possible states of kperf sampling */ -#define KPERF_SAMPLING_OFF (0) -#define KPERF_SAMPLING_ON (1) -#define KPERF_SAMPLING_SHUTDOWN (2) +/* + * Initialize the rest of kperf lazily, upon first use. May be called multiple times. + * The ktrace_lock must be held. + */ +void kperf_setup(void); /* - * Initialize kperf. Must be called before use and can be called multiple times. + * Configure kperf during boot and check the boot args. */ -extern int kperf_init(void); +extern void kperf_init_early(void); -/* get and set sampling status */ -extern unsigned kperf_sampling_status(void); -extern int kperf_sampling_enable(void); -extern int kperf_sampling_disable(void); +bool kperf_is_sampling(void); +int kperf_enable_sampling(void); +int kperf_disable_sampling(void); +int kperf_port_to_pid(mach_port_name_t portname); /* get a per-CPU sample buffer */ struct kperf_sample *kperf_intr_sample_buffer(void); +enum kperf_sampling { + KPERF_SAMPLING_OFF, + KPERF_SAMPLING_SHUTDOWN, + KPERF_SAMPLING_ON, +}; + +extern enum kperf_sampling kperf_status; + +#pragma mark - external callbacks + /* - * Callbacks into kperf from other systems. + * Set up kperf during system startup. */ +void kperf_init(void); /* * kperf AST handler @@ -81,10 +93,14 @@ struct kperf_sample *kperf_intr_sample_buffer(void); */ extern __attribute__((noinline)) void kperf_thread_ast_handler(thread_t thread); -/* update whether the callback is set */ +/* + * Update whether the on-CPU callback should be called. + */ void kperf_on_cpu_update(void); -/* for scheduler switching threads on */ +/* + * Should only be called by the scheduler when `thread` is switching on-CPU. + */ static inline void kperf_on_cpu(thread_t thread, thread_continue_t continuation, uintptr_t *starting_fp) @@ -98,7 +114,9 @@ kperf_on_cpu(thread_t thread, thread_continue_t continuation, } } -/* for scheduler switching threads off */ +/* + * Should only be called by the scheduler when `thread` is switching off-CPU. + */ static inline void kperf_off_cpu(thread_t thread) { @@ -110,7 +128,9 @@ kperf_off_cpu(thread_t thread) } } -/* for scheduler making threads runnable */ +/* + * Should only be called by the scheduler when `thread` is made runnable. + */ static inline void kperf_make_runnable(thread_t thread, int interrupt) { @@ -122,7 +142,18 @@ kperf_make_runnable(thread_t thread, int interrupt) } } -/* for interrupt handler epilogue */ +static inline void +kperf_running_setup(processor_t processor, uint64_t now) +{ + if (kperf_status == KPERF_SAMPLING_ON) { + extern void kptimer_running_setup(processor_t, uint64_t now); + kptimer_running_setup(processor, now); + } +} + +/* + * Should only be called by platform code at the end of each interrupt. + */ static inline void kperf_interrupt(void) { @@ -131,11 +162,14 @@ kperf_interrupt(void) bool interrupt); if (__improbable(kperf_lazy_cpu_action != 0)) { - kperf_lazy_cpu_sample(current_thread(), 0, true); + kperf_lazy_cpu_sample(NULL, 0, true); } } -/* for kdebug on every traced event */ +/* + * Should only be called by kdebug when an event with `debugid` is emitted + * from the frame starting at `starting_fp`. + */ static inline void kperf_kdebug_callback(uint32_t debugid, uintptr_t *starting_fp) { @@ -147,6 +181,12 @@ kperf_kdebug_callback(uint32_t debugid, uintptr_t *starting_fp) } } +/* + * Should only be called by platform code to indicate kperf's per-CPU timer + * has expired on the current CPU `cpuid` at time `now`. + */ +void kperf_timer_expire(void *param0, void *param1); + /* * Used by ktrace to reset kperf. ktrace_lock must be held. */ @@ -157,11 +197,4 @@ extern void kperf_reset(void); */ void kperf_kernel_configure(const char *config); -/* given a task port, find out its pid */ -int kperf_port_to_pid(mach_port_name_t portname); - -#if DEVELOPMENT || DEBUG -extern _Atomic long long kperf_pending_ipis; -#endif /* DEVELOPMENT || DEBUG */ - #endif /* !defined(KPERF_H) */ diff --git a/osfmk/kperf/kperf_timer.c b/osfmk/kperf/kperf_timer.c deleted file mode 100644 index a6287f39c..000000000 --- a/osfmk/kperf/kperf_timer.c +++ /dev/null @@ -1,532 +0,0 @@ -/* - * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -/* Manage timers */ - -#include -#include /* current_thread() */ -#include -#include -#include -#include -#include - -#include -#if defined(__x86_64__) -#include -#endif /* defined(__x86_64__) */ - -#include -#include -#include -#include -#include -#include -#include -#include - -/* the list of timers */ -struct kperf_timer *kperf_timerv = NULL; -unsigned int kperf_timerc = 0; - -static unsigned int pet_timer_id = 999; - -#define KPERF_TMR_ACTION_MASK (0xff) -#define KPERF_TMR_ACTION(action_state) ((action_state) & KPERF_TMR_ACTION_MASK) -#define KPERF_TMR_ACTIVE (0x100) - -/* maximum number of timers we can construct */ -#define TIMER_MAX (16) - -static uint64_t min_period_abstime; -static uint64_t min_period_bg_abstime; -static uint64_t min_period_pet_abstime; -static uint64_t min_period_pet_bg_abstime; - -static uint64_t -kperf_timer_min_period_abstime(void) -{ - if (ktrace_background_active()) { - return min_period_bg_abstime; - } else { - return min_period_abstime; - } -} - -static uint64_t -kperf_timer_min_pet_period_abstime(void) -{ - if (ktrace_background_active()) { - return min_period_pet_bg_abstime; - } else { - return min_period_pet_abstime; - } -} - -static void -kperf_timer_schedule(struct kperf_timer *timer, uint64_t now) -{ - BUF_INFO(PERF_TM_SCHED, timer->period); - - /* if we re-programmed the timer to zero, just drop it */ - if (timer->period == 0) { - return; - } - - /* calculate deadline */ - uint64_t deadline = now + timer->period; - - /* re-schedule the timer, making sure we don't apply slop */ - timer_call_enter(&timer->tcall, deadline, TIMER_CALL_SYS_CRITICAL); -} - -static void -kperf_sample_cpu(struct kperf_timer *timer, bool system_sample, - bool only_system) -{ - assert(timer != NULL); - - /* Always cut a tracepoint to show a sample event occurred */ - BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START, 0); - - int ncpu = cpu_number(); - - struct kperf_sample *intbuf = kperf_intr_sample_buffer(); -#if DEVELOPMENT || DEBUG - intbuf->sample_time = mach_absolute_time(); -#endif /* DEVELOPMENT || DEBUG */ - - /* On a timer, we can see the "real" current thread */ - thread_t thread = current_thread(); - task_t task = get_threadtask(thread); - struct kperf_context ctx = { - .cur_thread = thread, - .cur_task = task, - .cur_pid = task_pid(task), - .trigger_type = TRIGGER_TYPE_TIMER, - .trigger_id = (unsigned int)(timer - kperf_timerv), - }; - - if (ctx.trigger_id == pet_timer_id && ncpu < machine_info.logical_cpu_max) { - kperf_tid_on_cpus[ncpu] = thread_tid(ctx.cur_thread); - } - - /* make sure sampling is on */ - unsigned int status = kperf_sampling_status(); - if (status == KPERF_SAMPLING_OFF) { - BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_OFF); - return; - } else if (status == KPERF_SAMPLING_SHUTDOWN) { - BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_SHUTDOWN); - return; - } - - /* call the action -- kernel-only from interrupt, pend user */ - int r = kperf_sample(intbuf, &ctx, timer->actionid, - SAMPLE_FLAG_PEND_USER | (system_sample ? SAMPLE_FLAG_SYSTEM : 0) | - (only_system ? SAMPLE_FLAG_ONLY_SYSTEM : 0)); - - /* end tracepoint is informational */ - BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, r); - - (void)atomic_fetch_and_explicit(&timer->pending_cpus, - ~(UINT64_C(1) << ncpu), memory_order_relaxed); -} - -void -kperf_ipi_handler(void *param) -{ - kperf_sample_cpu((struct kperf_timer *)param, false, false); -} - -static void -kperf_timer_handler(void *param0, __unused void *param1) -{ - struct kperf_timer *timer = param0; - unsigned int ntimer = (unsigned int)(timer - kperf_timerv); - unsigned int ncpus = machine_info.logical_cpu_max; - bool system_only_self = true; - - uint32_t action_state = atomic_fetch_or(&timer->action_state, - KPERF_TMR_ACTIVE); - - uint32_t actionid = KPERF_TMR_ACTION(action_state); - if (actionid == 0) { - goto deactivate; - } - -#if DEVELOPMENT || DEBUG - timer->fire_time = mach_absolute_time(); -#endif /* DEVELOPMENT || DEBUG */ - - /* along the lines of do not ipi if we are all shutting down */ - if (kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN) { - goto deactivate; - } - - BUF_DATA(PERF_TM_FIRE, ntimer, ntimer == pet_timer_id, timer->period, - actionid); - - if (ntimer == pet_timer_id) { - kperf_pet_fire_before(); - - /* clean-up the thread-on-CPUs cache */ - bzero(kperf_tid_on_cpus, ncpus * sizeof(*kperf_tid_on_cpus)); - } - - /* - * IPI other cores only if the action has non-system samplers. - */ - if (kperf_action_has_non_system(actionid)) { - /* - * If the core that's handling the timer is not scheduling - * threads, only run system samplers. - */ - system_only_self = kperf_mp_broadcast_other_running(timer); - } - kperf_sample_cpu(timer, true, system_only_self); - - /* release the pet thread? */ - if (ntimer == pet_timer_id) { - /* PET mode is responsible for rearming the timer */ - kperf_pet_fire_after(); - } else { - /* - * FIXME: Get the current time from elsewhere. The next - * timer's period now includes the time taken to reach this - * point. This causes a bias towards longer sampling periods - * than requested. - */ - kperf_timer_schedule(timer, mach_absolute_time()); - } - -deactivate: - atomic_fetch_and(&timer->action_state, ~KPERF_TMR_ACTIVE); -} - -/* program the timer from the PET thread */ -void -kperf_timer_pet_rearm(uint64_t elapsed_ticks) -{ - struct kperf_timer *timer = NULL; - uint64_t period = 0; - uint64_t deadline; - - /* - * If the pet_timer_id is invalid, it has been disabled, so this should - * do nothing. - */ - if (pet_timer_id >= kperf_timerc) { - return; - } - - unsigned int status = kperf_sampling_status(); - /* do not reprogram the timer if it has been shutdown or sampling is off */ - if (status == KPERF_SAMPLING_OFF) { - BUF_INFO(PERF_PET_END, SAMPLE_OFF); - return; - } else if (status == KPERF_SAMPLING_SHUTDOWN) { - BUF_INFO(PERF_PET_END, SAMPLE_SHUTDOWN); - return; - } - - timer = &(kperf_timerv[pet_timer_id]); - - /* if we re-programmed the timer to zero, just drop it */ - if (!timer->period) { - return; - } - - /* subtract the time the pet sample took being careful not to underflow */ - if (timer->period > elapsed_ticks) { - period = timer->period - elapsed_ticks; - } - - /* make sure we don't set the next PET sample to happen too soon */ - if (period < min_period_pet_abstime) { - period = min_period_pet_abstime; - } - - /* we probably took so long in the PET thread, it makes sense to take - * the time again. - */ - deadline = mach_absolute_time() + period; - - BUF_INFO(PERF_PET_SCHED, timer->period, period, elapsed_ticks, deadline); - - /* re-schedule the timer, making sure we don't apply slop */ - timer_call_enter(&timer->tcall, deadline, TIMER_CALL_SYS_CRITICAL); - - return; -} - -/* turn on all the timers */ -void -kperf_timer_go(void) -{ - /* get the PET thread going */ - if (pet_timer_id < kperf_timerc) { - kperf_pet_config(kperf_timerv[pet_timer_id].actionid); - } - - uint64_t now = mach_absolute_time(); - - for (unsigned int i = 0; i < kperf_timerc; i++) { - struct kperf_timer *timer = &kperf_timerv[i]; - if (timer->period == 0) { - continue; - } - - atomic_store(&timer->action_state, - timer->actionid & KPERF_TMR_ACTION_MASK); - kperf_timer_schedule(timer, now); - } -} - -void -kperf_timer_stop(void) -{ - /* - * Determine which timers are running and store them in a bitset, while - * cancelling their timer call. - */ - uint64_t running_timers = 0; - for (unsigned int i = 0; i < kperf_timerc; i++) { - struct kperf_timer *timer = &kperf_timerv[i]; - if (timer->period == 0) { - continue; - } - - uint32_t action_state = atomic_fetch_and(&timer->action_state, - ~KPERF_TMR_ACTION_MASK); - if (action_state & KPERF_TMR_ACTIVE) { - bit_set(running_timers, i); - } - - timer_call_cancel(&timer->tcall); - } - - /* - * Wait for any running timers to finish their critical sections. - */ - for (unsigned int i = lsb_first(running_timers); i < kperf_timerc; - i = lsb_next(running_timers, i)) { - while (atomic_load(&kperf_timerv[i].action_state) != 0) { - delay(10); - } - } - - if (pet_timer_id < kperf_timerc) { - /* wait for PET to stop, too */ - kperf_pet_config(0); - } -} - -unsigned int -kperf_timer_get_petid(void) -{ - return pet_timer_id; -} - -int -kperf_timer_set_petid(unsigned int timerid) -{ - if (timerid < kperf_timerc) { - uint64_t min_period; - - min_period = kperf_timer_min_pet_period_abstime(); - if (kperf_timerv[timerid].period < min_period) { - kperf_timerv[timerid].period = min_period; - } - kperf_pet_config(kperf_timerv[timerid].actionid); - } else { - /* clear the PET trigger if it's a bogus ID */ - kperf_pet_config(0); - } - - pet_timer_id = timerid; - - return 0; -} - -int -kperf_timer_get_period(unsigned int timerid, uint64_t *period_abstime) -{ - if (timerid >= kperf_timerc) { - return EINVAL; - } - - *period_abstime = kperf_timerv[timerid].period; - return 0; -} - -int -kperf_timer_set_period(unsigned int timerid, uint64_t period_abstime) -{ - uint64_t min_period; - - if (timerid >= kperf_timerc) { - return EINVAL; - } - - if (pet_timer_id == timerid) { - min_period = kperf_timer_min_pet_period_abstime(); - } else { - min_period = kperf_timer_min_period_abstime(); - } - - if (period_abstime > 0 && period_abstime < min_period) { - period_abstime = min_period; - } - - kperf_timerv[timerid].period = period_abstime; - - /* FIXME: re-program running timers? */ - - return 0; -} - -int -kperf_timer_get_action(unsigned int timerid, uint32_t *action) -{ - if (timerid >= kperf_timerc) { - return EINVAL; - } - - *action = kperf_timerv[timerid].actionid; - return 0; -} - -int -kperf_timer_set_action(unsigned int timerid, uint32_t action) -{ - if (timerid >= kperf_timerc) { - return EINVAL; - } - - kperf_timerv[timerid].actionid = action; - return 0; -} - -unsigned int -kperf_timer_get_count(void) -{ - return kperf_timerc; -} - -void -kperf_timer_reset(void) -{ - kperf_timer_set_petid(999); - kperf_set_pet_idle_rate(KPERF_PET_DEFAULT_IDLE_RATE); - kperf_set_lightweight_pet(0); - for (unsigned int i = 0; i < kperf_timerc; i++) { - kperf_timerv[i].period = 0; - kperf_timerv[i].actionid = 0; - atomic_store_explicit(&kperf_timerv[i].pending_cpus, 0, memory_order_relaxed); - } -} - -extern int -kperf_timer_set_count(unsigned int count) -{ - struct kperf_timer *new_timerv = NULL, *old_timerv = NULL; - unsigned int old_count; - - if (min_period_abstime == 0) { - nanoseconds_to_absolutetime(KP_MIN_PERIOD_NS, &min_period_abstime); - nanoseconds_to_absolutetime(KP_MIN_PERIOD_BG_NS, &min_period_bg_abstime); - nanoseconds_to_absolutetime(KP_MIN_PERIOD_PET_NS, &min_period_pet_abstime); - nanoseconds_to_absolutetime(KP_MIN_PERIOD_PET_BG_NS, - &min_period_pet_bg_abstime); - assert(min_period_abstime > 0); - } - - if (count == kperf_timerc) { - return 0; - } - if (count > TIMER_MAX) { - return EINVAL; - } - - /* TODO: allow shrinking? */ - if (count < kperf_timerc) { - return EINVAL; - } - - /* - * Make sure kperf is initialized when creating the array for the first - * time. - */ - if (kperf_timerc == 0) { - int r; - - /* main kperf */ - if ((r = kperf_init())) { - return r; - } - } - - /* - * Shut down any running timers since we will be messing with the timer - * call structures. - */ - kperf_timer_stop(); - - /* create a new array */ - new_timerv = kalloc_tag(count * sizeof(struct kperf_timer), - VM_KERN_MEMORY_DIAG); - if (new_timerv == NULL) { - return ENOMEM; - } - old_timerv = kperf_timerv; - old_count = kperf_timerc; - - if (old_timerv != NULL) { - bcopy(kperf_timerv, new_timerv, - kperf_timerc * sizeof(struct kperf_timer)); - } - - /* zero the new entries */ - bzero(&(new_timerv[kperf_timerc]), - (count - old_count) * sizeof(struct kperf_timer)); - - /* (re-)setup the timer call info for all entries */ - for (unsigned int i = 0; i < count; i++) { - timer_call_setup(&new_timerv[i].tcall, kperf_timer_handler, &new_timerv[i]); - } - - kperf_timerv = new_timerv; - kperf_timerc = count; - - if (old_timerv != NULL) { - kfree(old_timerv, old_count * sizeof(struct kperf_timer)); - } - - return 0; -} diff --git a/osfmk/kperf/kperf_timer.h b/osfmk/kperf/kperf_timer.h deleted file mode 100644 index b59e1f8a3..000000000 --- a/osfmk/kperf/kperf_timer.h +++ /dev/null @@ -1,111 +0,0 @@ -#ifndef KPERF_TIMER_H -#define KPERF_TIMER_H -/* - * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -#include -#include - -struct kperf_timer { - struct timer_call tcall; - uint64_t period; - _Atomic uint32_t action_state; - uint32_t actionid; - - /* - * A bitmap of CPUs that have a pending timer to service. On Intel, it - * allows the core responding to the timer interrupt to not queue up - * cross-calls on cores that haven't yet responded. On ARM, it allows - * the signal handler to multiplex simultaneous fires of different - * timers. - */ - _Atomic bitmap_t pending_cpus; - -#if DEVELOPMENT || DEBUG - uint64_t fire_time; -#endif /* DEVELOPMENT || DEBUG */ -}; - -extern struct kperf_timer *kperf_timerv; -extern unsigned int kperf_timerc; - -void kperf_timer_reprogram(void); -void kperf_timer_reprogram_all(void); - -void kperf_ipi_handler(void *param); - -// return values from the action -#define TIMER_REPROGRAM (0) -#define TIMER_STOP (1) - -#if defined(__x86_64__) - -#define KP_MIN_PERIOD_NS (20 * NSEC_PER_USEC) -#define KP_MIN_PERIOD_BG_NS (1 * NSEC_PER_MSEC) -#define KP_MIN_PERIOD_PET_NS (2 * NSEC_PER_MSEC) -#define KP_MIN_PERIOD_PET_BG_NS (5 * NSEC_PER_MSEC) - -#elif defined(__arm64__) - -#define KP_MIN_PERIOD_NS (50 * NSEC_PER_USEC) -#define KP_MIN_PERIOD_BG_NS (1 * NSEC_PER_MSEC) -#define KP_MIN_PERIOD_PET_NS (2 * NSEC_PER_MSEC) -#define KP_MIN_PERIOD_PET_BG_NS (10 * NSEC_PER_MSEC) - -#elif defined(__arm__) - -#define KP_MIN_PERIOD_NS (100 * NSEC_PER_USEC) -#define KP_MIN_PERIOD_BG_NS (10 * NSEC_PER_MSEC) -#define KP_MIN_PERIOD_PET_NS (2 * NSEC_PER_MSEC) -#define KP_MIN_PERIOD_PET_BG_NS (50 * NSEC_PER_MSEC) - -#else /* defined(__x86_64__) */ -#error "unsupported architecture" -#endif /* defined(__x86_64__) */ - -/* getters and setters on timers */ -unsigned kperf_timer_get_count(void); -int kperf_timer_set_count(unsigned int count); - -int kperf_timer_get_period(unsigned int timer, uint64_t *period); -int kperf_timer_set_period(unsigned int timer, uint64_t period); - -int kperf_timer_get_action(unsigned int timer, uint32_t *action); -int kperf_timer_set_action(unsigned int timer, uint32_t action); - -void kperf_timer_go(void); -void kperf_timer_stop(void); -void kperf_timer_reset(void); - -unsigned int kperf_timer_get_petid(void); -int kperf_timer_set_petid(unsigned int count); - -/* so PET thread can re-arm the timer */ -void kperf_timer_pet_rearm(uint64_t elapsed_ticks); - -#endif /* !defined(KPERF_TIMER_H) */ diff --git a/osfmk/kperf/kperfbsd.c b/osfmk/kperf/kperfbsd.c index c50ca6b5a..90a7ca435 100644 --- a/osfmk/kperf/kperfbsd.c +++ b/osfmk/kperf/kperfbsd.c @@ -26,7 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* sysctl interface for paramters from user-land */ +/* sysctl interface for parameters from user-land */ #include #include @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include @@ -162,7 +162,7 @@ kperf_sysctl_get_set_uint64(struct sysctl_req *req, value = get(); } - int error = sysctl_io_number(req, value, sizeof(value), &value, NULL); + int error = sysctl_io_number(req, (long long)value, sizeof(value), &value, NULL); if (error || !req->newptr) { return error; @@ -215,33 +215,29 @@ kperf_sysctl_get_set_unsigned_uint32(struct sysctl_req *req, static int sysctl_timer_period(struct sysctl_req *req) { - int error; uint64_t inputs[2] = {}; - assert(req != NULL); - if (req->newptr == USER_ADDR_NULL) { return EFAULT; } + int error = 0; if ((error = copyin(req->newptr, inputs, sizeof(inputs)))) { return error; } - unsigned int timer = (unsigned int)inputs[0]; uint64_t new_period = inputs[1]; if (req->oldptr != USER_ADDR_NULL) { uint64_t period_out = 0; - if ((error = kperf_timer_get_period(timer, &period_out))) { + if ((error = kptimer_get_period(timer, &period_out))) { return error; } inputs[1] = period_out; - return copyout(inputs, req->oldptr, sizeof(inputs)); } else { - return kperf_timer_set_period(timer, new_period); + return kptimer_set_period(timer, new_period); } } @@ -270,7 +266,7 @@ sysctl_action_filter(struct sysctl_req *req, bool is_task_t) return error; } - inputs[1] = filter_out; + inputs[1] = (uint64_t)filter_out; return copyout(inputs, req->oldptr, sizeof(inputs)); } else { int pid = is_task_t ? kperf_port_to_pid((mach_port_name_t)new_filter) @@ -352,20 +348,26 @@ sysctl_kdebug_filter(struct sysctl_req *req) } } +static uint32_t +kperf_sampling_get(void) +{ + return kperf_is_sampling(); +} + static int kperf_sampling_set(uint32_t sample_start) { if (sample_start) { - return kperf_sampling_enable(); + return kperf_enable_sampling(); } else { - return kperf_sampling_disable(); + return kperf_disable_sampling(); } } static int sysctl_sampling(struct sysctl_req *req) { - return kperf_sysctl_get_set_uint32(req, kperf_sampling_status, + return kperf_sysctl_get_set_uint32(req, kperf_sampling_get, kperf_sampling_set); } @@ -379,22 +381,22 @@ sysctl_action_count(struct sysctl_req *req) static int sysctl_timer_count(struct sysctl_req *req) { - return kperf_sysctl_get_set_uint32(req, kperf_timer_get_count, - kperf_timer_set_count); + return kperf_sysctl_get_set_uint32(req, kptimer_get_count, + kptimer_set_count); } static int sysctl_timer_action(struct sysctl_req *req) { - return kperf_sysctl_get_set_unsigned_uint32(req, kperf_timer_get_action, - kperf_timer_set_action); + return kperf_sysctl_get_set_unsigned_uint32(req, kptimer_get_action, + kptimer_set_action); } static int sysctl_timer_pet(struct sysctl_req *req) { - return kperf_sysctl_get_set_uint32(req, kperf_timer_get_petid, - kperf_timer_set_petid); + return kperf_sysctl_get_set_uint32(req, kptimer_get_pet_timerid, + kptimer_set_pet_timerid); } static int @@ -425,15 +427,15 @@ sysctl_kperf_reset(struct sysctl_req *req) static int sysctl_pet_idle_rate(struct sysctl_req *req) { - return kperf_sysctl_get_set_int(req, kperf_get_pet_idle_rate, - kperf_set_pet_idle_rate); + return kperf_sysctl_get_set_int(req, kppet_get_idle_rate, + kppet_set_idle_rate); } static int sysctl_lightweight_pet(struct sysctl_req *req) { - return kperf_sysctl_get_set_int(req, kperf_get_lightweight_pet, - kperf_set_lightweight_pet); + return kperf_sysctl_get_set_int(req, kppet_get_lightweight_pet, + kppet_set_lightweight_pet); } static int @@ -804,31 +806,13 @@ static int kperf_sysctl_limits SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg2) - enum kperf_limit_request type = (enum kperf_limit_request)arg1; - uint64_t limit = 0; - - switch (type) { - case REQ_LIM_PERIOD_NS: - limit = KP_MIN_PERIOD_NS; - break; - - case REQ_LIM_BG_PERIOD_NS: - limit = KP_MIN_PERIOD_BG_NS; - break; - - case REQ_LIM_PET_PERIOD_NS: - limit = KP_MIN_PERIOD_PET_NS; - break; - - case REQ_LIM_BG_PET_PERIOD_NS: - limit = KP_MIN_PERIOD_PET_BG_NS; - break; - - default: + enum kptimer_period_limit limit = (enum kptimer_period_limit)arg1; + if (limit >= KTPL_MAX) { return ENOENT; } - - return sysctl_io_number(req, limit, sizeof(limit), &limit, NULL); + uint64_t period = kptimer_minperiods_ns[limit]; + return sysctl_io_number(req, (long long)period, sizeof(period), &period, + NULL); } SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_period_ns, diff --git a/osfmk/kperf/kptimer.c b/osfmk/kperf/kptimer.c new file mode 100644 index 000000000..567a52a64 --- /dev/null +++ b/osfmk/kperf/kptimer.c @@ -0,0 +1,734 @@ +/* + * Copyright (c) 2011-2018 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +/* + * This file manages the timers used for on-CPU samples and PET. + * + * Each timer configured by a tool is represented by a kptimer structure. + * The timer calls present in each structure are used to schedule CPU-local + * timers. As each timer fires, that CPU samples itself and schedules another + * timer to fire at the next deadline. The first timer to fire across all CPUs + * determines that deadline. This causes the timers to fire at a consistent + * cadence. + * + * Traditional PET uses a timer call to wake up its sampling thread and take + * on-CPU samples. + * + * Synchronization for start and stop is provided by the ktrace subsystem lock. + * Global state is stored in a single struct, to ease debugging. + */ + +#include +#include /* current_thread() */ +#include +#include +#include +#include +#include +#include +#include + +#include +#if defined(__x86_64__) +#include +#endif /* defined(__x86_64__) */ + +#include +#include +#include +#include +#include +#include +#include + +#define KPTIMER_PET_INACTIVE (999) +#define KPTIMER_MAX (8) + +struct kptimer { + uint32_t kt_actionid; + uint64_t kt_period_abs; + /* + * The `kt_cur_deadline` field represents when the timer should next fire. + * It's used to synchronize between timers firing on each CPU. In the timer + * handler, each CPU will take the `kt_lock` and see if the + * `kt_cur_deadline` still needs to be updated for the timer fire. If so, + * it updates it and logs the timer fire event under the lock. + */ + lck_spin_t kt_lock; + uint64_t kt_cur_deadline; + +#if DEVELOPMENT || DEBUG + /* + * To be set by the timer leader as a debugging aid for timeouts, if kperf + * happens to be on-CPU when they occur. + */ + uint64_t kt_fire_time; +#endif /* DEVELOPMENT || DEBUG */ +}; + +static struct { + struct kptimer *g_timers; + uint64_t *g_cpu_deadlines; + unsigned int g_ntimers; + unsigned int g_pet_timerid; + + bool g_setup:1; + bool g_pet_active:1; + bool g_started:1; + + struct timer_call g_pet_timer; +} kptimer = { + .g_pet_timerid = KPTIMER_PET_INACTIVE, +}; + +SECURITY_READ_ONLY_LATE(static uint64_t) kptimer_minperiods_mtu[KTPL_MAX]; + +/* + * Enforce a minimum timer period to prevent interrupt storms. + */ +const uint64_t kptimer_minperiods_ns[KTPL_MAX] = { +#if defined(__x86_64__) + [KTPL_FG] = 20 * NSEC_PER_USEC, /* The minimum timer period in xnu, period. */ + [KTPL_BG] = 1 * NSEC_PER_MSEC, + [KTPL_FG_PET] = 2 * NSEC_PER_MSEC, + [KTPL_BG_PET] = 5 * NSEC_PER_MSEC, +#elif defined(__arm64__) + [KTPL_FG] = 50 * NSEC_PER_USEC, + [KTPL_BG] = 1 * NSEC_PER_MSEC, + [KTPL_FG_PET] = 2 * NSEC_PER_MSEC, + [KTPL_BG_PET] = 10 * NSEC_PER_MSEC, +#elif defined(__arm__) + [KTPL_FG] = 100 * NSEC_PER_USEC, + [KTPL_BG] = 10 * NSEC_PER_MSEC, + [KTPL_FG_PET] = 2 * NSEC_PER_MSEC, + [KTPL_BG_PET] = 50 * NSEC_PER_MSEC, +#else +#error unexpected architecture +#endif +}; + +static void kptimer_pet_handler(void * __unused param1, void * __unused param2); +static void kptimer_stop_curcpu(processor_t processor); + +void +kptimer_init(void) +{ + for (int i = 0; i < KTPL_MAX; i++) { + nanoseconds_to_absolutetime(kptimer_minperiods_ns[i], + &kptimer_minperiods_mtu[i]); + } +} + +static void +kptimer_set_cpu_deadline(int cpuid, int timerid, uint64_t deadline) +{ + kptimer.g_cpu_deadlines[(cpuid * KPTIMER_MAX) + timerid] = + deadline; +} + +static void +kptimer_setup(void) +{ + if (kptimer.g_setup) { + return; + } + static lck_grp_t kptimer_lock_grp; + lck_grp_init(&kptimer_lock_grp, "kptimer", LCK_GRP_ATTR_NULL); + + const size_t timers_size = KPTIMER_MAX * sizeof(struct kptimer); + kptimer.g_timers = kalloc_tag(timers_size, VM_KERN_MEMORY_DIAG); + assert(kptimer.g_timers != NULL); + memset(kptimer.g_timers, 0, timers_size); + for (int i = 0; i < KPTIMER_MAX; i++) { + lck_spin_init(&kptimer.g_timers[i].kt_lock, &kptimer_lock_grp, + LCK_ATTR_NULL); + } + + const size_t deadlines_size = machine_info.logical_cpu_max * KPTIMER_MAX * + sizeof(kptimer.g_cpu_deadlines[0]); + kptimer.g_cpu_deadlines = kalloc_tag(deadlines_size, VM_KERN_MEMORY_DIAG); + assert(kptimer.g_cpu_deadlines != NULL); + memset(kptimer.g_cpu_deadlines, 0, deadlines_size); + for (int i = 0; i < KPTIMER_MAX; i++) { + for (int j = 0; j < machine_info.logical_cpu_max; j++) { + kptimer_set_cpu_deadline(j, i, EndOfAllTime); + } + } + + timer_call_setup(&kptimer.g_pet_timer, kptimer_pet_handler, NULL); + + kptimer.g_setup = true; +} + +void +kptimer_reset(void) +{ + kptimer_stop(); + kptimer_set_pet_timerid(KPTIMER_PET_INACTIVE); + + for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { + kptimer.g_timers[i].kt_period_abs = 0; + kptimer.g_timers[i].kt_actionid = 0; + for (int j = 0; j < machine_info.logical_cpu_max; j++) { + kptimer_set_cpu_deadline(j, i, EndOfAllTime); + } + } +} + +#pragma mark - deadline management + +static uint64_t +kptimer_get_cpu_deadline(int cpuid, int timerid) +{ + return kptimer.g_cpu_deadlines[(cpuid * KPTIMER_MAX) + timerid]; +} + +static void +kptimer_sample_curcpu(unsigned int actionid, unsigned int timerid, + uint32_t flags) +{ + struct kperf_sample *intbuf = kperf_intr_sample_buffer(); +#if DEVELOPMENT || DEBUG + intbuf->sample_time = mach_absolute_time(); +#endif /* DEVELOPMENT || DEBUG */ + + BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START); + + thread_t thread = current_thread(); + task_t task = get_threadtask(thread); + struct kperf_context ctx = { + .cur_thread = thread, + .cur_task = task, + .cur_pid = task_pid(task), + .trigger_type = TRIGGER_TYPE_TIMER, + .trigger_id = timerid, + }; + + (void)kperf_sample(intbuf, &ctx, actionid, + SAMPLE_FLAG_PEND_USER | flags); + + BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END); +} + +static void +kptimer_lock(struct kptimer *timer) +{ + lck_spin_lock(&timer->kt_lock); +} + +static void +kptimer_unlock(struct kptimer *timer) +{ + lck_spin_unlock(&timer->kt_lock); +} + +/* + * If the deadline expired in the past, find the next deadline to program, + * locked into the cadence provided by the period. + */ +static inline uint64_t +dead_reckon_deadline(uint64_t now, uint64_t deadline, uint64_t period) +{ + if (deadline < now) { + uint64_t time_since = now - deadline; + uint64_t extra_time = period - (time_since % period); + return now + extra_time; + } + return deadline; +} + +static uint64_t +kptimer_fire(struct kptimer *timer, unsigned int timerid, + uint64_t deadline, int __unused cpuid, uint64_t now) +{ + bool first = false; + uint64_t next_deadline = deadline + timer->kt_period_abs; + + /* + * It's not straightforward to replace this lock with a compare-exchange, + * since the PERF_TM_FIRE event must be emitted *before* any subsequent + * PERF_TM_HNDLR events, so tools can understand the handlers are responding + * to this timer fire. + */ + kptimer_lock(timer); + if (timer->kt_cur_deadline < next_deadline) { + first = true; + next_deadline = dead_reckon_deadline(now, next_deadline, + timer->kt_period_abs); + timer->kt_cur_deadline = next_deadline; + BUF_DATA(PERF_TM_FIRE, timerid, timerid == kptimer.g_pet_timerid, + timer->kt_period_abs, timer->kt_actionid); +#if DEVELOPMENT || DEBUG + /* + * Debugging aid to see the last time this timer fired. + */ + timer->kt_fire_time = mach_absolute_time(); +#endif /* DEVELOPMENT || DEBUG */ + if (timerid == kptimer.g_pet_timerid && kppet_get_lightweight_pet()) { + os_atomic_inc(&kppet_gencount, relaxed); + } + } else { + /* + * In case this CPU has missed several timer fires, get it back on track + * by synchronizing with the latest timer fire. + */ + next_deadline = timer->kt_cur_deadline; + } + kptimer_unlock(timer); + + if (!first && !kperf_action_has_non_system(timer->kt_actionid)) { + /* + * The first timer to fire will sample the system, so there's + * no need to run other timers if those are the only samplers + * for this action. + */ + return next_deadline; + } + + kptimer_sample_curcpu(timer->kt_actionid, timerid, + first ? SAMPLE_FLAG_SYSTEM : 0); + + return next_deadline; +} + +/* + * Determine which of the timers fired. + */ +void +kptimer_expire(processor_t processor, int cpuid, uint64_t now) +{ + uint64_t min_deadline = UINT64_MAX; + + if (kperf_status != KPERF_SAMPLING_ON) { + if (kperf_status == KPERF_SAMPLING_SHUTDOWN) { + kptimer_stop_curcpu(processor); + return; + } else if (kperf_status == KPERF_SAMPLING_OFF) { + panic("kperf: timer fired at %llu, but sampling is disabled", now); + } else { + panic("kperf: unknown sampling state 0x%x", kperf_status); + } + } + + for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { + struct kptimer *timer = &kptimer.g_timers[i]; + if (timer->kt_period_abs == 0) { + continue; + } + + uint64_t cpudeadline = kptimer_get_cpu_deadline(cpuid, i); + if (now > cpudeadline) { + uint64_t deadline = kptimer_fire(timer, i, cpudeadline, cpuid, now); + if (deadline == 0) { + kptimer_set_cpu_deadline(cpuid, i, EndOfAllTime); + } else { + kptimer_set_cpu_deadline(cpuid, i, deadline); + if (deadline < min_deadline) { + min_deadline = deadline; + } + } + } + } + if (min_deadline < UINT64_MAX) { + running_timer_enter(processor, RUNNING_TIMER_KPERF, NULL, + min_deadline, mach_absolute_time()); + } +} + +#pragma mark - start/stop + +static void +kptimer_broadcast(void (*fn)(void *)) +{ + ktrace_assert_lock_held(); + +#if defined(__x86_64__) + (void)mp_cpus_call(CPUMASK_ALL, ASYNC, fn, NULL); +#else /* defined(__x86_64__) */ + _Atomic uint32_t xcsync = 0; + cpu_broadcast_xcall((uint32_t *)&xcsync, TRUE /* include self */, fn, + &xcsync); +#endif /* !defined(__x86_64__) */ +} + +static void +kptimer_broadcast_ack(void *arg) +{ +#if defined(__x86_64__) +#pragma unused(arg) +#else /* defined(__x86_64__) */ + _Atomic uint32_t *xcsync = arg; + int pending = os_atomic_dec(xcsync, relaxed); + if (pending == 0) { + thread_wakeup(xcsync); + } +#endif /* !defined(__x86_64__) */ +} + +static void +kptimer_sample_pet_remote(void * __unused arg) +{ + if (!kperf_is_sampling()) { + return; + } + struct kptimer *timer = &kptimer.g_timers[kptimer.g_pet_timerid]; + kptimer_sample_curcpu(timer->kt_actionid, kptimer.g_pet_timerid, 0); +} + +#if !defined(__x86_64__) + +#include + +void kperf_signal_handler(void); +void +kperf_signal_handler(void) +{ + kptimer_sample_pet_remote(NULL); +} + +#endif /* !defined(__x86_64__) */ + +#include +_Atomic uint64_t mycounter = 0; + +static void +kptimer_broadcast_pet(void) +{ + atomic_fetch_add(&mycounter, 1); +#if defined(__x86_64__) + (void)mp_cpus_call(CPUMASK_OTHERS, NOSYNC, kptimer_sample_pet_remote, + NULL); +#else /* defined(__x86_64__) */ + int curcpu = cpu_number(); + for (int i = 0; i < machine_info.logical_cpu_max; i++) { + if (i != curcpu) { + cpu_signal(cpu_datap(i), SIGPkppet, NULL, NULL); + } + } +#endif /* !defined(__x86_64__) */ +} + +static void +kptimer_pet_handler(void * __unused param1, void * __unused param2) +{ + if (!kptimer.g_pet_active) { + return; + } + + struct kptimer *timer = &kptimer.g_timers[kptimer.g_pet_timerid]; + + BUF_DATA(PERF_TM_FIRE, kptimer.g_pet_timerid, 1, timer->kt_period_abs, + timer->kt_actionid); + + /* + * To get the on-CPU samples as close to this timer fire as possible, first + * broadcast to them to sample themselves. + */ + kptimer_broadcast_pet(); + + /* + * Wakeup the PET thread afterwards so it's not inadvertently sampled (it's a + * high-priority kernel thread). If the scheduler needs to IPI to run it, + * that IPI will be handled after the IPIs issued during the broadcast. + */ + kppet_wake_thread(); + + /* + * Finally, sample this CPU, who's stacks and state have been preserved while + * running this handler. Make sure to include system measurements. + */ + kptimer_sample_curcpu(timer->kt_actionid, kptimer.g_pet_timerid, + SAMPLE_FLAG_SYSTEM); + + BUF_INFO(PERF_TM_FIRE | DBG_FUNC_END); + + /* + * The PET thread will re-arm the timer when it's done. + */ +} + +void +kptimer_pet_enter(uint64_t sampledur_abs) +{ + if (!kperf_is_sampling()) { + return; + } + + uint64_t period_abs = kptimer.g_timers[kptimer.g_pet_timerid].kt_period_abs; + uint64_t orig_period_abs = period_abs; + + if (period_abs > sampledur_abs) { + period_abs -= sampledur_abs; + } + period_abs = MAX(kptimer_min_period_abs(true), period_abs); + uint64_t deadline_abs = mach_absolute_time() + period_abs; + + BUF_INFO(PERF_PET_SCHED, orig_period_abs, period_abs, sampledur_abs, + deadline_abs); + + timer_call_enter(&kptimer.g_pet_timer, deadline_abs, TIMER_CALL_SYS_CRITICAL); +} + +static uint64_t +kptimer_earliest_deadline(processor_t processor, uint64_t now) +{ + uint64_t min_deadline = UINT64_MAX; + for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { + struct kptimer *timer = &kptimer.g_timers[i]; + uint64_t cur_deadline = timer->kt_cur_deadline; + if (cur_deadline == 0) { + continue; + } + cur_deadline = dead_reckon_deadline(now, cur_deadline, + timer->kt_period_abs); + kptimer_set_cpu_deadline(processor->cpu_id, i, cur_deadline); + if (cur_deadline < min_deadline) { + min_deadline = cur_deadline; + } + } + return min_deadline; +} + +void kptimer_running_setup(processor_t processor, uint64_t now); +void +kptimer_running_setup(processor_t processor, uint64_t now) +{ + uint64_t deadline = kptimer_earliest_deadline(processor, now); + if (deadline < UINT64_MAX) { + running_timer_setup(processor, RUNNING_TIMER_KPERF, NULL, deadline, + now); + } +} + +static void +kptimer_start_remote(void *arg) +{ + processor_t processor = current_processor(); + uint64_t now = mach_absolute_time(); + uint64_t deadline = kptimer_earliest_deadline(processor, now); + if (deadline < UINT64_MAX) { + running_timer_enter(processor, RUNNING_TIMER_KPERF, NULL, deadline, + now); + } + kptimer_broadcast_ack(arg); +} + +static void +kptimer_stop_curcpu(processor_t processor) +{ + for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { + kptimer_set_cpu_deadline(processor->cpu_id, i, EndOfAllTime); + } + running_timer_cancel(processor, RUNNING_TIMER_KPERF); +} + +static void +kptimer_stop_remote(void * __unused arg) +{ + assert(ml_get_interrupts_enabled() == FALSE); + kptimer_stop_curcpu(current_processor()); + kptimer_broadcast_ack(arg); +} + +void +kptimer_start(void) +{ + ktrace_assert_lock_held(); + + if (kptimer.g_started) { + return; + } + + uint64_t now = mach_absolute_time(); + unsigned int ntimers_active = 0; + kptimer.g_started = true; + for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { + struct kptimer *timer = &kptimer.g_timers[i]; + if (timer->kt_period_abs == 0 || timer->kt_actionid == 0) { + /* + * No period or action means the timer is inactive. + */ + continue; + } else if (!kppet_get_lightweight_pet() && + i == kptimer.g_pet_timerid) { + kptimer.g_pet_active = true; + timer_call_enter(&kptimer.g_pet_timer, now + timer->kt_period_abs, + TIMER_CALL_SYS_CRITICAL); + } else { + timer->kt_cur_deadline = now + timer->kt_period_abs; + ntimers_active++; + } + } + if (ntimers_active > 0) { + kptimer_broadcast(kptimer_start_remote); + } +} + +void +kptimer_stop(void) +{ + ktrace_assert_lock_held(); + + if (!kptimer.g_started) { + return; + } + + int intrs_en = ml_set_interrupts_enabled(FALSE); + + if (kptimer.g_pet_active) { + kptimer.g_pet_active = false; + timer_call_cancel(&kptimer.g_pet_timer); + } + kptimer.g_started = false; + kptimer_broadcast(kptimer_stop_remote); + for (unsigned int i = 0; i < kptimer.g_ntimers; i++) { + kptimer.g_timers[i].kt_cur_deadline = 0; + } + + ml_set_interrupts_enabled(intrs_en); +} + +#pragma mark - accessors + +int +kptimer_get_period(unsigned int timerid, uint64_t *period_abs) +{ + if (timerid >= kptimer.g_ntimers) { + return EINVAL; + } + *period_abs = kptimer.g_timers[timerid].kt_period_abs; + return 0; +} + +int +kptimer_set_period(unsigned int timerid, uint64_t period_abs) +{ + if (timerid >= kptimer.g_ntimers) { + return EINVAL; + } + if (kptimer.g_started) { + return EBUSY; + } + + bool pet = kptimer.g_pet_timerid == timerid; + uint64_t min_period = kptimer_min_period_abs(pet); + if (period_abs != 0 && period_abs < min_period) { + period_abs = min_period; + } + if (pet && !kppet_get_lightweight_pet()) { + kppet_config(kptimer.g_timers[timerid].kt_actionid); + } + + kptimer.g_timers[timerid].kt_period_abs = period_abs; + return 0; +} + +int +kptimer_get_action(unsigned int timerid, unsigned int *actionid) +{ + if (timerid >= kptimer.g_ntimers) { + return EINVAL; + } + *actionid = kptimer.g_timers[timerid].kt_actionid; + return 0; +} + +int +kptimer_set_action(unsigned int timerid, unsigned int actionid) +{ + if (timerid >= kptimer.g_ntimers) { + return EINVAL; + } + if (kptimer.g_started) { + return EBUSY; + } + + kptimer.g_timers[timerid].kt_actionid = actionid; + if (kptimer.g_pet_timerid == timerid && !kppet_get_lightweight_pet()) { + kppet_config(actionid); + } + return 0; +} + +unsigned int +kptimer_get_count(void) +{ + return kptimer.g_ntimers; +} + +int +kptimer_set_count(unsigned int count) +{ + kptimer_setup(); + if (kptimer.g_started) { + return EBUSY; + } + if (count > KPTIMER_MAX) { + return EINVAL; + } + kptimer.g_ntimers = count; + return 0; +} + +uint64_t +kptimer_min_period_abs(bool pet) +{ + enum kptimer_period_limit limit = 0; + if (ktrace_background_active()) { + limit = pet ? KTPL_BG_PET : KTPL_BG; + } else { + limit = pet ? KTPL_FG_PET : KTPL_FG; + } + return kptimer_minperiods_mtu[limit]; +} + +uint32_t +kptimer_get_pet_timerid(void) +{ + return kptimer.g_pet_timerid; +} + +int +kptimer_set_pet_timerid(uint32_t petid) +{ + if (kptimer.g_started) { + return EBUSY; + } + if (petid >= kptimer.g_ntimers) { + kppet_config(0); + } else { + kppet_config(kptimer.g_timers[petid].kt_actionid); + uint64_t period_abs = MAX(kptimer_min_period_abs(true), + kptimer.g_timers[petid].kt_period_abs); + kptimer.g_timers[petid].kt_period_abs = period_abs; + } + + kptimer.g_pet_timerid = petid; + + return 0; +} diff --git a/osfmk/kperf/kptimer.h b/osfmk/kperf/kptimer.h new file mode 100644 index 000000000..57211e73c --- /dev/null +++ b/osfmk/kperf/kptimer.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2011-2018 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef KPERF_KPTIMER_H +#define KPERF_KPTIMER_H + +/* + * kptimer is responsible for managing the kperf's on-CPU timers. These + * timers sample threads that are running on CPUs at a cadence determined by a + * specified period. When they fire, a handler runs the specified action and + * reprograms the timer to fire again. To get everything started or stopped, + * kptimer issues a broadcast IPI to modify kperf's multiplexed per-CPU timer, + * stored in the machine-dependent per-CPU structure. + * + * On-CPU timers are disabled when the CPU they've been programmed for goes idle + * to prevent waking up the idle CPU when it's not running anything interesting. + * This logic lives in the platform code that's responsible for entering and + * exiting idle. + * + * Traditional PET is configured here (since it's defined by identifying a timer + * to use for PET) but its mechanism is in osfmk/kperf/pet.c. Lightweight PET + * does use kptimer to increment its generation count, however. + */ + +/* + * The minimum allowed timer period depends on the type of client (foreground vs. + * background) and timer (on-CPU vs. PET). + */ +enum kptimer_period_limit { + KTPL_FG, + KTPL_BG, + KTPL_FG_PET, + KTPL_BG_PET, + KTPL_MAX, +}; + +/* + * The minimum timer periods allowed by kperf. There's no other mechanism + * to prevent interrupt storms due to kptimer. + */ +extern const uint64_t kptimer_minperiods_ns[KTPL_MAX]; + +/* + * Called from the kernel startup thread to set up kptimer. + */ +void kptimer_init(void); + +/* + * Return the minimum timer period in Mach time units. + */ +uint64_t kptimer_min_period_abs(bool pet); + +/* + * Return the number of timers available. + */ +unsigned int kptimer_get_count(void); + +/* + * Set the number of timers available to `count`. + * + * Returns 0 on success, and non-0 on error. + */ +int kptimer_set_count(unsigned int count); + +/* + * Return the period of the timer identified by `timerid` in `period_out`. + * + * Returns 0 on success, and non-0 on error. + */ +int kptimer_get_period(unsigned int timerid, uint64_t *period_out); + +/* + * Set the period of the timer identified by `timerid` to `period`. + * + * Returns non-zero on error, and zero otherwise. + */ +int kptimer_set_period(unsigned int timerid, uint64_t period); + +/* + * Return the action of the timer identified by `timerid` in + * `actionid_out`. + */ +int kptimer_get_action(unsigned int timerid, uint32_t *actionid_out); + +/* + * Set the action of the timer identified by `timerid` to `actionid`. + */ +int kptimer_set_action(unsigned int timer, uint32_t actionid); + +/* + * Set the PET timer to the timer identified by `timerid`. + */ +int kptimer_set_pet_timerid(unsigned int timerid); + +/* + * Return the ID of the PET timer. + */ +unsigned int kptimer_get_pet_timerid(void); + +/* + * For PET to rearm its timer after its sampling thread took `sampledur_abs` + * to sample. + */ +void kptimer_pet_enter(uint64_t sampledur_abs); + +/* + * Start all active timers. The ktrace lock must be held. + */ +void kptimer_start(void); + +/* + * Stop all active timers, waiting for them to stop. The ktrace lock must be held. + */ +void kptimer_stop(void); + +/* + * To indicate the next timer has expired. + */ +void kptimer_expire(processor_t processor, int cpuid, uint64_t now); + +/* + * Reset the kptimer system. + */ +void kptimer_reset(void); + +#endif /* !defined(KPERF_KPTIMER_H) */ diff --git a/osfmk/kperf/lazy.c b/osfmk/kperf/lazy.c index 339a16644..ab24586a3 100644 --- a/osfmk/kperf/lazy.c +++ b/osfmk/kperf/lazy.c @@ -128,6 +128,9 @@ void kperf_lazy_cpu_sample(thread_t thread, unsigned int flags, bool interrupt) { assert(ml_get_interrupts_enabled() == FALSE); + if (!thread) { + thread = current_thread(); + } /* take a sample if this CPU's last sample time is beyond the threshold */ processor_t processor = current_processor(); diff --git a/osfmk/kperf/pet.c b/osfmk/kperf/pet.c index 09e73dc9a..d7cda41bf 100644 --- a/osfmk/kperf/pet.c +++ b/osfmk/kperf/pet.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2016 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2011-2018 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -26,38 +26,26 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* all thread states code */ -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -/* action ID to call for each sample - * - * Address is used as the sync point for waiting. - */ -static unsigned int pet_action_id = 0; - -static lck_mtx_t *pet_lock; -static boolean_t pet_initted = FALSE; -static boolean_t pet_running = FALSE; - -/* number of callstack samples to skip for idle threads */ -static uint32_t pet_idle_rate = KPERF_PET_DEFAULT_IDLE_RATE; - /* - * Lightweight PET mode samples the system less-intrusively than normal PET + * Profile Every Thread (PET) provides a profile of all threads on the system + * when a timer fires. PET supports the "record waiting threads" mode in + * Instruments, and used to be called All Thread States (ATS). New tools should + * adopt the lightweight PET mode, which provides the same information, but with + * much less overhead. + * + * When traditional (non-lightweight) PET is active, a migrating timer call + * causes the PET thread to wake up. The timer handler also issues a broadcast + * IPI to the other CPUs, to provide a (somewhat) synchronized set of on-core + * samples. This is provided for backwards-compatibility with clients that + * expect on-core samples, when PET's timer was based off the on-core timers. + * Because PET sampling can take on the order of milliseconds, the PET thread + * will enter a new timer deadline after it finished sampling This perturbs the + * timer cadence by the duration of PET sampling, but it leaves the system to + * work on non-profiling tasks for the duration of the timer period. + * + * Lightweight PET samples the system less-intrusively than normal PET * mode. Instead of iterating tasks and threads on each sample, it increments - * a global generation count, kperf_pet_gen, which is checked as threads are + * a global generation count, `kppet_gencount`, which is checked as threads are * context switched on-core. If the thread's local generation count is older * than the global generation, the thread samples itself. * @@ -76,96 +64,96 @@ static uint32_t pet_idle_rate = KPERF_PET_DEFAULT_IDLE_RATE; * | kperf_pet_switch_context * | * +--- PET timer fire, sample on-core threads A and B, - * increment kperf_pet_gen - */ -static boolean_t lightweight_pet = FALSE; - -/* - * Whether or not lightweight PET and sampling is active. + * increment kppet_gencount */ -boolean_t kperf_lightweight_pet_active = FALSE; - -uint32_t kperf_pet_gen = 0; - -static struct kperf_sample *pet_sample; -/* thread lifecycle */ - -static kern_return_t pet_init(void); -static void pet_start(void); -static void pet_stop(void); - -/* PET thread-only */ +#include +#include -static void pet_thread_loop(void *param, wait_result_t wr); -static void pet_thread_idle(void); -static void pet_thread_work_unit(void); +#include +#include +#include +#include +#include +#include +#include -/* listing things to sample */ +#include +#include +#if defined(__x86_64__) +#include +#endif /* defined(__x86_64__) */ -static task_array_t pet_tasks = NULL; -static vm_size_t pet_tasks_size = 0; -static vm_size_t pet_tasks_count = 0; +static LCK_MTX_DECLARE(kppet_mtx, &kperf_lck_grp); -static thread_array_t pet_threads = NULL; -static vm_size_t pet_threads_size = 0; -static vm_size_t pet_threads_count = 0; +static struct { + unsigned int g_actionid; + /* + * The idle rate controls how many sampling periods to skip if a thread + * is idle. + */ + uint32_t g_idle_rate; + bool g_setup:1; + bool g_lightweight:1; + struct kperf_sample *g_sample; -static kern_return_t pet_tasks_prepare(void); -static kern_return_t pet_tasks_prepare_internal(void); + thread_t g_sample_thread; -static kern_return_t pet_threads_prepare(task_t task); + /* + * Used by the PET thread to manage which threads and tasks to sample. + */ + thread_t *g_threads; + unsigned int g_nthreads; + size_t g_threads_size; -/* sampling */ + task_t *g_tasks; + unsigned int g_ntasks; + size_t g_tasks_size; +} kppet = { + .g_actionid = 0, + .g_idle_rate = KPERF_PET_DEFAULT_IDLE_RATE, +}; -static void pet_sample_all_tasks(uint32_t idle_rate); -static void pet_sample_task(task_t task, uint32_t idle_rate); -static void pet_sample_thread(int pid, task_t task, thread_t thread, - uint32_t idle_rate); +bool kppet_lightweight_active = false; +_Atomic uint32_t kppet_gencount = 0; -/* functions called by other areas of kperf */ +static uint64_t kppet_sample_tasks(uint32_t idle_rate); +static void kppet_thread(void * param, wait_result_t wr); -void -kperf_pet_fire_before(void) +static void +kppet_lock_assert_owned(void) { - if (!pet_initted || !pet_running) { - return; - } - - if (lightweight_pet) { - BUF_INFO(PERF_PET_SAMPLE); - OSIncrementAtomic(&kperf_pet_gen); - } + lck_mtx_assert(&kppet_mtx, LCK_MTX_ASSERT_OWNED); } -void -kperf_pet_fire_after(void) +static void +kppet_lock(void) { - if (!pet_initted || !pet_running) { - return; - } + lck_mtx_lock(&kppet_mtx); +} - if (lightweight_pet) { - kperf_timer_pet_rearm(0); - } else { - thread_wakeup(&pet_action_id); - } +static void +kppet_unlock(void) +{ + lck_mtx_unlock(&kppet_mtx); } void -kperf_pet_on_cpu(thread_t thread, thread_continue_t continuation, +kppet_on_cpu(thread_t thread, thread_continue_t continuation, uintptr_t *starting_fp) { assert(thread != NULL); assert(ml_get_interrupts_enabled() == FALSE); - uint32_t actionid = pet_action_id; + uint32_t actionid = kppet.g_actionid; if (actionid == 0) { return; } - if (thread->kperf_pet_gen != kperf_pet_gen) { - BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_START, kperf_pet_gen, thread->kperf_pet_gen); + if (thread->kperf_pet_gen != atomic_load(&kppet_gencount)) { + BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_START, + atomic_load_explicit(&kppet_gencount, + memory_order_relaxed), thread->kperf_pet_gen); task_t task = get_threadtask(thread); struct kperf_context ctx = { @@ -192,183 +180,127 @@ kperf_pet_on_cpu(thread_t thread, thread_continue_t continuation, BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_END); } else { - BUF_VERB(PERF_PET_SAMPLE_THREAD, kperf_pet_gen, thread->kperf_pet_gen); - } -} - -void -kperf_pet_config(unsigned int action_id) -{ - if (action_id == 0 && !pet_initted) { - return; - } - - kern_return_t kr = pet_init(); - if (kr != KERN_SUCCESS) { - return; - } - - lck_mtx_lock(pet_lock); - - BUF_INFO(PERF_PET_THREAD, 3, action_id); - - if (action_id == 0) { - pet_stop(); - } else { - pet_start(); + BUF_VERB(PERF_PET_SAMPLE_THREAD, + os_atomic_load(&kppet_gencount, relaxed), thread->kperf_pet_gen); } - - pet_action_id = action_id; - - lck_mtx_unlock(pet_lock); } -/* handle resource allocation */ +#pragma mark - state transitions -void -pet_start(void) +/* + * Lazily initialize PET. The PET thread never exits once PET has been used + * once. + */ +static void +kppet_setup(void) { - lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED); - - if (pet_running) { + if (kppet.g_setup) { return; } - pet_sample = kalloc(sizeof(struct kperf_sample)); - if (!pet_sample) { - return; + kern_return_t kr = kernel_thread_start(kppet_thread, NULL, + &kppet.g_sample_thread); + if (kr != KERN_SUCCESS) { + panic("kperf: failed to create PET thread %d", kr); } - pet_running = TRUE; + thread_set_thread_name(kppet.g_sample_thread, "kperf-pet-sampling"); + kppet.g_setup = true; } void -pet_stop(void) +kppet_config(unsigned int actionid) { - lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED); - - if (!pet_initted) { + /* + * Resetting kperf shouldn't get the PET thread started. + */ + if (actionid == 0 && !kppet.g_setup) { return; } - if (pet_tasks != NULL) { - assert(pet_tasks_size != 0); - kfree(pet_tasks, pet_tasks_size); + kppet_setup(); - pet_tasks = NULL; - pet_tasks_size = 0; - pet_tasks_count = 0; - } + kppet_lock(); - if (pet_threads != NULL) { - assert(pet_threads_size != 0); - kfree(pet_threads, pet_threads_size); - - pet_threads = NULL; - pet_threads_size = 0; - pet_threads_count = 0; - } + kppet.g_actionid = actionid; - if (pet_sample != NULL) { - kfree(pet_sample, sizeof(struct kperf_sample)); - pet_sample = NULL; - } - - pet_running = FALSE; -} - -/* - * Lazily initialize PET. The PET thread never exits once PET has been used - * once. - */ -static kern_return_t -pet_init(void) -{ - if (pet_initted) { - return KERN_SUCCESS; - } - - /* make the sync point */ - pet_lock = lck_mtx_alloc_init(&kperf_lck_grp, NULL); - assert(pet_lock != NULL); - - /* create the thread */ - - BUF_INFO(PERF_PET_THREAD, 0); - thread_t t; - kern_return_t kr = kernel_thread_start(pet_thread_loop, NULL, &t); - if (kr != KERN_SUCCESS) { - lck_mtx_free(pet_lock, &kperf_lck_grp); - return kr; + if (actionid > 0) { + if (!kppet.g_sample) { + kppet.g_sample = kalloc_tag(sizeof(*kppet.g_sample), + VM_KERN_MEMORY_DIAG); + } + } else { + if (kppet.g_tasks) { + assert(kppet.g_tasks_size != 0); + kfree(kppet.g_tasks, kppet.g_tasks_size); + kppet.g_tasks = NULL; + kppet.g_tasks_size = 0; + kppet.g_ntasks = 0; + } + if (kppet.g_threads) { + assert(kppet.g_threads_size != 0); + kfree(kppet.g_threads, kppet.g_threads_size); + kppet.g_threads = NULL; + kppet.g_threads_size = 0; + kppet.g_nthreads = 0; + } + if (kppet.g_sample != NULL) { + kfree(kppet.g_sample, sizeof(*kppet.g_sample)); + kppet.g_sample = NULL; + } } - thread_set_thread_name(t, "kperf sampling"); - /* let the thread hold the only reference */ - thread_deallocate(t); - - pet_initted = TRUE; - - return KERN_SUCCESS; + kppet_unlock(); } -/* called by PET thread only */ - -static void -pet_thread_work_unit(void) +void +kppet_reset(void) { - pet_sample_all_tasks(pet_idle_rate); + kppet_config(0); + kppet_set_idle_rate(KPERF_PET_DEFAULT_IDLE_RATE); + kppet_set_lightweight_pet(0); } -static void -pet_thread_idle(void) +void +kppet_wake_thread(void) { - lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED); - - do { - (void)lck_mtx_sleep(pet_lock, LCK_SLEEP_DEFAULT, &pet_action_id, - THREAD_UNINT); - } while (pet_action_id == 0); + thread_wakeup(&kppet); } __attribute__((noreturn)) static void -pet_thread_loop(void *param, wait_result_t wr) +kppet_thread(void * __unused param, wait_result_t __unused wr) { -#pragma unused(param, wr) - uint64_t work_unit_ticks; - - BUF_INFO(PERF_PET_THREAD, 1); + kppet_lock(); - lck_mtx_lock(pet_lock); for (;;) { BUF_INFO(PERF_PET_IDLE); - pet_thread_idle(); + + do { + (void)lck_mtx_sleep(&kppet_mtx, LCK_SLEEP_DEFAULT, &kppet, + THREAD_UNINT); + } while (kppet.g_actionid == 0); BUF_INFO(PERF_PET_RUN); - /* measure how long the work unit takes */ - work_unit_ticks = mach_absolute_time(); - pet_thread_work_unit(); - work_unit_ticks = mach_absolute_time() - work_unit_ticks; + uint64_t sampledur_abs = kppet_sample_tasks(kppet.g_idle_rate); - /* re-program the timer */ - kperf_timer_pet_rearm(work_unit_ticks); + kptimer_pet_enter(sampledur_abs); } } -/* sampling */ +#pragma mark - sampling static void -pet_sample_thread(int pid, task_t task, thread_t thread, uint32_t idle_rate) +kppet_sample_thread(int pid, task_t task, thread_t thread, uint32_t idle_rate) { - lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED); + kppet_lock_assert_owned(); uint32_t sample_flags = SAMPLE_FLAG_IDLE_THREADS | SAMPLE_FLAG_THREAD_ONLY; BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_START); - /* work out the context */ struct kperf_context ctx = { .cur_thread = thread, .cur_task = task, @@ -379,7 +311,7 @@ pet_sample_thread(int pid, task_t task, thread_t thread, uint32_t idle_rate) /* * Clean a dirty thread and skip callstack sample if the thread was not - * dirty and thread has skipped less than pet_idle_rate samples. + * dirty and thread had skipped less than `idle_rate` samples. */ if (thread_dirty) { kperf_thread_set_dirty(thread, FALSE); @@ -388,202 +320,183 @@ pet_sample_thread(int pid, task_t task, thread_t thread, uint32_t idle_rate) } thread->kperf_pet_cnt++; - kperf_sample(pet_sample, &ctx, pet_action_id, sample_flags); - kperf_sample_user(&pet_sample->usample, &ctx, pet_action_id, + kperf_sample(kppet.g_sample, &ctx, kppet.g_actionid, sample_flags); + kperf_sample_user(&kppet.g_sample->usample, &ctx, kppet.g_actionid, sample_flags); BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_END); } static kern_return_t -pet_threads_prepare(task_t task) +kppet_threads_prepare(task_t task) { - lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED); + kppet_lock_assert_owned(); vm_size_t threads_size_needed; - if (task == TASK_NULL) { - return KERN_INVALID_ARGUMENT; - } - for (;;) { task_lock(task); if (!task->active) { task_unlock(task); - return KERN_FAILURE; } - /* do we have the memory we need? */ + /* + * With the task locked, figure out if enough space has been allocated to + * contain all of the thread references. + */ threads_size_needed = task->thread_count * sizeof(thread_t); - if (threads_size_needed <= pet_threads_size) { + if (threads_size_needed <= kppet.g_threads_size) { break; } - /* not enough memory, unlock the task and increase allocation */ + /* + * Otherwise, allocate more and try again. + */ task_unlock(task); - if (pet_threads_size != 0) { - kfree(pet_threads, pet_threads_size); + if (kppet.g_threads_size != 0) { + kfree(kppet.g_threads, kppet.g_threads_size); } assert(threads_size_needed > 0); - pet_threads_size = threads_size_needed; + kppet.g_threads_size = threads_size_needed; - pet_threads = kalloc(pet_threads_size); - if (pet_threads == NULL) { - pet_threads_size = 0; + kppet.g_threads = kalloc_tag(kppet.g_threads_size, VM_KERN_MEMORY_DIAG); + if (kppet.g_threads == NULL) { + kppet.g_threads_size = 0; return KERN_RESOURCE_SHORTAGE; } } - /* have memory and the task is locked and active */ thread_t thread; - pet_threads_count = 0; + kppet.g_nthreads = 0; queue_iterate(&(task->threads), thread, thread_t, task_threads) { thread_reference_internal(thread); - pet_threads[pet_threads_count++] = thread; + kppet.g_threads[kppet.g_nthreads++] = thread; } - /* can unlock task now that threads are referenced */ task_unlock(task); - return (pet_threads_count == 0) ? KERN_FAILURE : KERN_SUCCESS; + return (kppet.g_nthreads > 0) ? KERN_SUCCESS : KERN_FAILURE; } +/* + * Sample a `task`, using `idle_rate` to control whether idle threads need to be + * re-sampled. + * + * The task must be referenced. + */ static void -pet_sample_task(task_t task, uint32_t idle_rate) +kppet_sample_task(task_t task, uint32_t idle_rate) { - lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED); + kppet_lock_assert_owned(); + assert(task != kernel_task); + if (task == kernel_task) { + return; + } BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_START); int pid = task_pid(task); - if (kperf_action_has_task(pet_action_id)) { + if (kperf_action_has_task(kppet.g_actionid)) { struct kperf_context ctx = { .cur_task = task, .cur_pid = pid, }; - kperf_sample(pet_sample, &ctx, pet_action_id, SAMPLE_FLAG_TASK_ONLY); + kperf_sample(kppet.g_sample, &ctx, kppet.g_actionid, + SAMPLE_FLAG_TASK_ONLY); } - if (!kperf_action_has_thread(pet_action_id)) { + if (!kperf_action_has_thread(kppet.g_actionid)) { BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END); return; } - kern_return_t kr = KERN_SUCCESS; - /* * Suspend the task to see an atomic snapshot of all its threads. This - * is expensive, and disruptive. + * is expensive and disruptive. */ - bool needs_suspend = task != kernel_task; - if (needs_suspend) { - kr = task_suspend_internal(task); - if (kr != KERN_SUCCESS) { - BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END, 1); - return; - } - needs_suspend = true; + kern_return_t kr = task_suspend_internal(task); + if (kr != KERN_SUCCESS) { + BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END, 1); + return; } - kr = pet_threads_prepare(task); + kr = kppet_threads_prepare(task); if (kr != KERN_SUCCESS) { BUF_INFO(PERF_PET_ERROR, ERR_THREAD, kr); goto out; } - for (unsigned int i = 0; i < pet_threads_count; i++) { - thread_t thread = pet_threads[i]; + for (unsigned int i = 0; i < kppet.g_nthreads; i++) { + thread_t thread = kppet.g_threads[i]; assert(thread != THREAD_NULL); - /* - * Do not sample the thread if it was on a CPU when the timer fired. - */ - int cpu = 0; - for (cpu = 0; cpu < machine_info.logical_cpu_max; cpu++) { - if (kperf_tid_on_cpus[cpu] == thread_tid(thread)) { - break; - } - } + kppet_sample_thread(pid, task, thread, idle_rate); - /* the thread was not on a CPU */ - if (cpu == machine_info.logical_cpu_max) { - pet_sample_thread(pid, task, thread, idle_rate); - } - - thread_deallocate(pet_threads[i]); + thread_deallocate(kppet.g_threads[i]); } out: - if (needs_suspend) { - task_resume_internal(task); - } + task_resume_internal(task); - BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END, pet_threads_count); + BUF_VERB(PERF_PET_SAMPLE_TASK | DBG_FUNC_END, kppet.g_nthreads); } +/* + * Store and reference all tasks on the system, so they can be safely inspected + * outside the `tasks_threads_lock`. + */ static kern_return_t -pet_tasks_prepare_internal(void) +kppet_tasks_prepare(void) { - lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED); + kppet_lock_assert_owned(); - vm_size_t tasks_size_needed = 0; + vm_size_t size_needed = 0; for (;;) { lck_mtx_lock(&tasks_threads_lock); - /* do we have the memory we need? */ - tasks_size_needed = tasks_count * sizeof(task_t); - if (tasks_size_needed <= pet_tasks_size) { + /* + * With the lock held, break out of the lock/unlock loop if + * there's enough space to store all the tasks. + */ + size_needed = tasks_count * sizeof(task_t); + if (size_needed <= kppet.g_tasks_size) { break; } - /* unlock and allocate more memory */ + /* + * Otherwise, allocate more memory outside of the lock. + */ lck_mtx_unlock(&tasks_threads_lock); - /* grow task array */ - if (tasks_size_needed > pet_tasks_size) { - if (pet_tasks_size != 0) { - kfree(pet_tasks, pet_tasks_size); + if (size_needed > kppet.g_tasks_size) { + if (kppet.g_tasks_size != 0) { + kfree(kppet.g_tasks, kppet.g_tasks_size); } - assert(tasks_size_needed > 0); - pet_tasks_size = tasks_size_needed; + assert(size_needed > 0); + kppet.g_tasks_size = size_needed; - pet_tasks = (task_array_t)kalloc(pet_tasks_size); - if (pet_tasks == NULL) { - pet_tasks_size = 0; + kppet.g_tasks = kalloc_tag(kppet.g_tasks_size, VM_KERN_MEMORY_DIAG); + if (!kppet.g_tasks) { + kppet.g_tasks_size = 0; return KERN_RESOURCE_SHORTAGE; } } } - return KERN_SUCCESS; -} - -static kern_return_t -pet_tasks_prepare(void) -{ - lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED); - - /* allocate space and take the tasks_threads_lock */ - kern_return_t kr = pet_tasks_prepare_internal(); - if (KERN_SUCCESS != kr) { - return kr; - } - lck_mtx_assert(&tasks_threads_lock, LCK_MTX_ASSERT_OWNED); - - /* make sure the tasks are not deallocated after dropping the lock */ - task_t task; - pet_tasks_count = 0; + task_t task = TASK_NULL; + kppet.g_ntasks = 0; queue_iterate(&tasks, task, task_t, tasks) { - if (task != kernel_task) { + bool eligible_task = task != kernel_task; + if (eligible_task) { task_reference_internal(task); - pet_tasks[pet_tasks_count++] = task; + kppet.g_tasks[kppet.g_ntasks++] = task; } } @@ -592,72 +505,72 @@ pet_tasks_prepare(void) return KERN_SUCCESS; } -static void -pet_sample_all_tasks(uint32_t idle_rate) +static uint64_t +kppet_sample_tasks(uint32_t idle_rate) { - lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED); - assert(pet_action_id > 0); + kppet_lock_assert_owned(); + assert(kppet.g_actionid > 0); + + uint64_t start_abs = mach_absolute_time(); BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_START); - kern_return_t kr = pet_tasks_prepare(); + kern_return_t kr = kppet_tasks_prepare(); if (kr != KERN_SUCCESS) { BUF_INFO(PERF_PET_ERROR, ERR_TASK, kr); - BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_END, 0); - return; + BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_END); + return mach_absolute_time() - start_abs; } - for (unsigned int i = 0; i < pet_tasks_count; i++) { - task_t task = pet_tasks[i]; - - pet_sample_task(task, idle_rate); - } - - for (unsigned int i = 0; i < pet_tasks_count; i++) { - task_deallocate(pet_tasks[i]); + for (unsigned int i = 0; i < kppet.g_ntasks; i++) { + task_t task = kppet.g_tasks[i]; + assert(task != TASK_NULL); + kppet_sample_task(task, idle_rate); + task_deallocate(task); + kppet.g_tasks[i] = TASK_NULL; } - BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_END, pet_tasks_count); + BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_END, kppet.g_ntasks); + kppet.g_ntasks = 0; + return mach_absolute_time() - start_abs; } -/* support sysctls */ +#pragma mark - sysctl accessors int -kperf_get_pet_idle_rate(void) +kppet_get_idle_rate(void) { - return pet_idle_rate; + return kppet.g_idle_rate; } int -kperf_set_pet_idle_rate(int val) +kppet_set_idle_rate(int new_idle_rate) { - pet_idle_rate = val; - + kppet.g_idle_rate = new_idle_rate; return 0; } +void +kppet_lightweight_active_update(void) +{ + kppet_lightweight_active = (kperf_is_sampling() && kppet.g_lightweight); + kperf_on_cpu_update(); +} + int -kperf_get_lightweight_pet(void) +kppet_get_lightweight_pet(void) { - return lightweight_pet; + return kppet.g_lightweight; } int -kperf_set_lightweight_pet(int val) +kppet_set_lightweight_pet(int on) { - if (kperf_sampling_status() == KPERF_SAMPLING_ON) { + if (kperf_is_sampling()) { return EBUSY; } - lightweight_pet = (val == 1); - kperf_lightweight_pet_active_update(); - + kppet.g_lightweight = (on == 1); + kppet_lightweight_active_update(); return 0; } - -void -kperf_lightweight_pet_active_update(void) -{ - kperf_lightweight_pet_active = (kperf_sampling_status() && lightweight_pet); - kperf_on_cpu_update(); -} diff --git a/osfmk/kperf/pet.h b/osfmk/kperf/pet.h index 8f9d54279..6744c3791 100644 --- a/osfmk/kperf/pet.h +++ b/osfmk/kperf/pet.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2011-2018 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -27,29 +27,44 @@ */ #include +#include -#define KPERF_PET_DEFAULT_IDLE_RATE (15) +#define KPERF_PET_DEFAULT_IDLE_RATE 15 -extern boolean_t kperf_lightweight_pet_active; -extern uint32_t kperf_pet_gen; +extern bool kppet_lightweight_active; +extern _Atomic uint32_t kppet_gencount; -/* prepare PET to be able to fire action with given ID, or disable PET */ -void kperf_pet_config(unsigned int action_id); +/* + * If `actionid` is non-zero, set up PET to sample the action. Otherwise, + * disable PET. + */ +void kppet_config(unsigned int actionid); -/* fire off a PET sample, both before and after on-core samples */ -void kperf_pet_fire_before(void); -void kperf_pet_fire_after(void); +/* + * Reset PET back to its default settings. + */ +void kppet_reset(void); -/* notify PET of new threads switching on */ -void kperf_pet_on_cpu(thread_t thread, thread_continue_t continuation, +/* + * Notify PET that new threads are switching on-CPU. + */ +void kppet_on_cpu(thread_t thread, thread_continue_t continuation, uintptr_t *starting_frame); -/* get/set rate at which idle threads are sampled by PET */ -int kperf_get_pet_idle_rate(void); -int kperf_set_pet_idle_rate(int val); +/* + * Wake the PET thread from its timer handler. + */ +void kppet_wake_thread(void); -/* get/set whether lightweight PET is enabled */ -int kperf_get_lightweight_pet(void); -int kperf_set_lightweight_pet(int val); +/* + * For configuring PET from the sysctl interface. + */ +int kppet_get_idle_rate(void); +int kppet_set_idle_rate(int new_idle_rate); +int kppet_get_lightweight_pet(void); +int kppet_set_lightweight_pet(int on); -void kperf_lightweight_pet_active_update(void); +/* + * Update whether lightweight PET is active when turning sampling on and off. + */ +void kppet_lightweight_active_update(void); diff --git a/osfmk/kperf/thread_samplers.c b/osfmk/kperf/thread_samplers.c index ca2d0c67d..867215fb1 100644 --- a/osfmk/kperf/thread_samplers.c +++ b/osfmk/kperf/thread_samplers.c @@ -91,12 +91,12 @@ kperf_thread_info_runmode_legacy(thread_t thread) kperf_state |= KPERF_TI_IDLE; } -#if !CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) /* on desktop, if state is blank, leave not idle set */ if (kperf_state == 0) { return TH_IDLE << 16; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* defined(XNU_TARGET_OS_OSX) */ /* high two bytes are inverted mask, low two bytes are normal */ return ((~kperf_state & 0xffff) << 16) | (kperf_state & 0xffff); @@ -247,7 +247,10 @@ kperf_thread_snapshot_sample(struct kperf_thread_snapshot *thsn, } thsn->kpthsn_suspend_count = thread->suspend_count; - thsn->kpthsn_io_tier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO); + /* + * Only have room for 8-bits in the trace event, so truncate here. + */ + thsn->kpthsn_io_tier = (uint8_t)proc_get_effective_thread_policy(thread, TASK_POLICY_IO); BUF_VERB(PERF_TI_SNAPSAMPLE | DBG_FUNC_END); } diff --git a/osfmk/kperf/x86_64/kperf_mp.c b/osfmk/kperf/x86_64/kperf_mp.c deleted file mode 100644 index 8cbb676e9..000000000 --- a/osfmk/kperf/x86_64/kperf_mp.c +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2011-2016 Apple Computer, Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -bool -kperf_mp_broadcast_other_running(struct kperf_timer *trigger) -{ - int current_cpu = cpu_number(); - int ncpus = machine_info.logical_cpu_max; - bool system_only_self = true; - cpumask_t cpu_mask = 0; - - for (int i = 0; i < ncpus; i++) { - uint64_t i_bit = UINT64_C(1) << i; - processor_t processor = cpu_to_processor(i); - - /* do not IPI processors that are not scheduling threads */ - if (processor == PROCESSOR_NULL || - processor->state != PROCESSOR_RUNNING || - processor->active_thread == THREAD_NULL) { -#if DEVELOPMENT || DEBUG - BUF_VERB(PERF_TM_SKIPPED, i, - processor != PROCESSOR_NULL ? processor->state : 0, - processor != PROCESSOR_NULL ? processor->active_thread : 0); -#endif /* DEVELOPMENT || DEBUG */ - continue; - } - - /* don't run the handler on the current processor */ - if (i == current_cpu) { - system_only_self = false; - continue; - } - - /* nor processors that have not responded to the last IPI */ - uint64_t already_pending = atomic_fetch_or_explicit( - &trigger->pending_cpus, i_bit, - memory_order_relaxed); - if (already_pending & i_bit) { -#if DEVELOPMENT || DEBUG - BUF_VERB(PERF_TM_PENDING, i_bit, already_pending); - atomic_fetch_add_explicit(&kperf_pending_ipis, 1, - memory_order_relaxed); -#endif /* DEVELOPMENT || DEBUG */ - continue; - } - - cpu_mask |= cpu_to_cpumask(i); - } - - if (cpu_mask != 0) { - mp_cpus_call(cpu_mask, NOSYNC, kperf_ipi_handler, trigger); - } - - return system_only_self; -} diff --git a/osfmk/libsa/stdlib.h b/osfmk/libsa/stdlib.h index ad2788148..e1e516b52 100644 --- a/osfmk/libsa/stdlib.h +++ b/osfmk/libsa/stdlib.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -67,7 +67,6 @@ #endif extern int atoi(const char *); -extern int atoi_term(char *, char **); extern char *itoa(int, char *); extern void free(void *); diff --git a/osfmk/libsa/string.h b/osfmk/libsa/string.h index 1bcf828cc..a5a10300a 100644 --- a/osfmk/libsa/string.h +++ b/osfmk/libsa/string.h @@ -44,6 +44,10 @@ #include #endif +#if defined(KERNEL) +#include +#endif + #ifdef __cplusplus extern "C" { #endif @@ -66,27 +70,46 @@ extern void *memmove(void *, const void *, size_t); extern void *memset(void *, int, size_t); extern int memset_s(void *, size_t, int, size_t); +#ifdef XNU_KERNEL_PRIVATE +/* memcmp_zero_ptr_aligned() checks string s of n bytes contains all zeros. + * Address and size of the string s must be pointer-aligned. + * Return 0 if true, 1 otherwise. Also return 0 if n is 0. + */ +extern unsigned long memcmp_zero_ptr_aligned(const void *s, size_t n); +#endif + extern size_t strlen(const char *); extern size_t strnlen(const char *, size_t); -/* strcpy() is being deprecated. Please use strlcpy() instead. */ +/* strcpy() and strncpy() are deprecated. Please use strlcpy() instead. */ +__kpi_deprecated_arm64_macos_unavailable extern char *strcpy(char *, const char *) __deprecated; -extern char *strncpy(char *, const char *, size_t); -extern size_t strlcat(char *, const char *, size_t); -extern size_t strlcpy(char *, const char *, size_t); +__kpi_deprecated_arm64_macos_unavailable +extern char *strncpy(char *, const char *, size_t); -/* strcat() is being deprecated. Please use strlcat() instead. */ +/* strcat() and strncat() are deprecated. Please use strlcat() instead. */ +__kpi_deprecated_arm64_macos_unavailable extern char *strcat(char *, const char *) __deprecated; + +__kpi_deprecated_arm64_macos_unavailable extern char *strncat(char *, const char *, size_t); -/* strcmp() is being deprecated. Please use strncmp() instead. */ +/* strcmp() is deprecated. Please use strncmp() instead. */ +__kpi_deprecated_arm64_macos_unavailable extern int strcmp(const char *, const char *); + +extern size_t strlcpy(char *, const char *, size_t); +extern size_t strlcat(char *, const char *, size_t); extern int strncmp(const char *, const char *, size_t); extern int strcasecmp(const char *s1, const char *s2); extern int strncasecmp(const char *s1, const char *s2, size_t n); -extern char *strnstr(char *s, const char *find, size_t slen); +#ifdef XNU_KERNEL_PRIVATE +extern const char *strnstr(const char *s, const char *find, size_t slen); +#else +extern char *strnstr(const char *s, const char *find, size_t slen); +#endif extern char *strchr(const char *s, int c); #ifdef XNU_KERNEL_PRIVATE extern char *strrchr(const char *s, int c); @@ -133,8 +156,8 @@ __nochk_bcopy(const void *src, void *dest, size_t len) /* _FORTIFY_SOURCE disabled */ #else /* _chk macros */ -#ifdef XNU_KERNEL_PRIVATE -/* Stricter checking in xnu than kexts. When type is set to 1, __builtin_object_size +#if defined XNU_KERNEL_PRIVATE || defined(_FORTIFY_SOURCE_STRICT) +/* Stricter checking is optional for kexts. When type is set to 1, __builtin_object_size * returns the size of the closest surrounding sub-object, which would detect copying past * the end of a struct member. */ #define BOS_COPY_TYPE 1 diff --git a/osfmk/libsa/types.h b/osfmk/libsa/types.h index 9a9326fc7..d2aaeb6ac 100644 --- a/osfmk/libsa/types.h +++ b/osfmk/libsa/types.h @@ -47,6 +47,7 @@ #ifndef _MACH_TYPES_H_ #define _MACH_TYPES_H_ +#include #include "libsa/machine/types.h" #ifndef _SIZE_T diff --git a/osfmk/lockd/Makefile b/osfmk/lockd/Makefile index a2591e477..5db8c4bf4 100644 --- a/osfmk/lockd/Makefile +++ b/osfmk/lockd/Makefile @@ -42,7 +42,7 @@ COMP_FILES = ${MIG_KUSRC} do_build_all:: $(COMP_FILES) ${MIG_KUSRC} : lockd_mach.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ -user lockd_mach.c \ -header lockd_mach.h \ diff --git a/osfmk/mach/Makefile b/osfmk/mach/Makefile index 310027b65..08d6d7511 100644 --- a/osfmk/mach/Makefile +++ b/osfmk/mach/Makefile @@ -50,6 +50,7 @@ MIG_DEFS = \ vm_map.defs MIG_PRIVATE_DEFS = \ + mach_eventlink.defs \ restartable.defs MACH_PRIVATE_DEFS = \ @@ -110,6 +111,7 @@ MIG_UUHDRS = \ MIGINCLUDES = ${MIG_UUHDRS} ${MIG_USHDRS} DATAFILES = \ + audit_triggers_types.h \ boolean.h \ clock_types.h \ dyld_kernel.h \ @@ -184,6 +186,7 @@ PRIVATE_DATAFILES = \ arcade_upcall.defs \ host_info.h \ ktrace_background.defs \ + mach_eventlink_types.h \ mach_host.defs \ mach_traps.h \ memory_object_types.h \ @@ -224,6 +227,7 @@ EXPORT_MI_LIST = \ coalition.h \ mach_interface.h \ resource_monitors.h \ + mach_eventlink_types.h \ sfi_class.h \ ${DATAFILES} @@ -236,7 +240,7 @@ ${MIGINCLUDES} : ${MIG_TYPES} ${MIG_UUHDRS} : \ %.h : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)$(MIG) $(MIGFLAGS) \ -server /dev/null \ -user /dev/null \ @@ -245,7 +249,7 @@ ${MIG_UUHDRS} : \ ${MIG_USHDRS} : \ %_server.h : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)$(MIG) $(MIGFLAGS) \ -server /dev/null \ -user /dev/null \ @@ -310,6 +314,7 @@ MIG_KSHDRS = \ arcade_register_server.h \ clock_server.h \ clock_priv_server.h \ + mach_eventlink_server.h \ exc_server.h \ host_priv_server.h \ host_security_server.h \ @@ -337,6 +342,7 @@ MIG_KSSRC = \ arcade_register_server.c \ clock_server.c \ clock_priv_server.c \ + mach_eventlink_server.c \ exc_server.c \ host_priv_server.c \ host_security_server.c \ @@ -384,7 +390,7 @@ ${COMP_FILES} : ${MIG_TYPES} ${MIG_KUSRC} : \ %_user.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ -user $*_user.c \ -header $*.h \ @@ -394,7 +400,7 @@ ${MIG_KUSRC} : \ ${MIG_KSSRC}: \ %_server.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) $@ $(_v)${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ -user /dev/null \ -header /dev/null \ diff --git a/osfmk/mach/arm/Makefile b/osfmk/mach/arm/Makefile index 2edb04c4e..569b92786 100644 --- a/osfmk/mach/arm/Makefile +++ b/osfmk/mach/arm/Makefile @@ -11,7 +11,7 @@ VPATH := $(VPATH):$(SOURCE)/../../arm DATAFILES = \ boolean.h exception.h kern_return.h ndr_def.h \ processor_info.h rpc.h thread_state.h thread_status.h \ - vm_param.h vm_types.h \ + traps.h vm_param.h vm_types.h \ syscall_sw.h _structs.h sdt_isa.h INSTALL_MD_LIST = ${DATAFILES} diff --git a/osfmk/mach/arm/_structs.h b/osfmk/mach/arm/_structs.h index d5f4d864d..d3d084ea3 100644 --- a/osfmk/mach/arm/_structs.h +++ b/osfmk/mach/arm/_structs.h @@ -296,6 +296,25 @@ _STRUCT_ARM_THREAD_STATE64 ptrauth_key_process_independent_data, \ ptrauth_string_discriminator("fp")) : __p); }) +/* Strip ptr auth bits from pc, lr, sp and fp field of arm_thread_state64_t */ +#define __darwin_arm_thread_state64_ptrauth_strip(ts) \ + __extension__ ({ _STRUCT_ARM_THREAD_STATE64 *__tsp = &(ts); \ + __tsp->__opaque_pc = ((__tsp->__opaque_flags & \ + __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) ? __tsp->__opaque_pc : \ + ptrauth_strip(__tsp->__opaque_pc, ptrauth_key_process_independent_code)); \ + __tsp->__opaque_lr = ((__tsp->__opaque_flags & \ + (__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH | \ + __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) ? __tsp->__opaque_lr : \ + ptrauth_strip(__tsp->__opaque_lr, ptrauth_key_process_independent_code)); \ + __tsp->__opaque_sp = ((__tsp->__opaque_flags & \ + __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) ? __tsp->__opaque_sp : \ + ptrauth_strip(__tsp->__opaque_sp, ptrauth_key_process_independent_data)); \ + __tsp->__opaque_fp = ((__tsp->__opaque_flags & \ + __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) ? __tsp->__opaque_fp : \ + ptrauth_strip(__tsp->__opaque_fp, ptrauth_key_process_independent_data)); \ + __tsp->__opaque_flags |= \ + __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH; }) + #else /* __has_feature(ptrauth_calls) && defined(__LP64__) */ #if __DARWIN_OPAQUE_ARM_THREAD_STATE64 @@ -334,6 +353,9 @@ _STRUCT_ARM_THREAD_STATE64 /* Set fp field of arm_thread_state64_t to a data pointer value */ #define __darwin_arm_thread_state64_set_fp(ts, ptr) \ ((ts).__opaque_fp = (void*)(uintptr_t)(ptr)) +/* Strip ptr auth bits from pc, lr, sp and fp field of arm_thread_state64_t */ +#define __darwin_arm_thread_state64_ptrauth_strip(ts) \ + (void)(ts) #else /* __DARWIN_OPAQUE_ARM_THREAD_STATE64 */ #if __DARWIN_UNIX03 @@ -368,6 +390,9 @@ _STRUCT_ARM_THREAD_STATE64 /* Set fp field of arm_thread_state64_t to a data pointer value */ #define __darwin_arm_thread_state64_set_fp(ts, ptr) \ ((ts).__fp = (uintptr_t)(ptr)) +/* Strip ptr auth bits from pc, lr, sp and fp field of arm_thread_state64_t */ +#define __darwin_arm_thread_state64_ptrauth_strip(ts) \ + (void)(ts) #else /* __DARWIN_UNIX03 */ @@ -401,6 +426,9 @@ _STRUCT_ARM_THREAD_STATE64 /* Set fp field of arm_thread_state64_t to a data pointer value */ #define __darwin_arm_thread_state64_set_fp(ts, ptr) \ ((ts).fp = (uintptr_t)(ptr)) +/* Strip ptr auth bits from pc, lr, sp and fp field of arm_thread_state64_t */ +#define __darwin_arm_thread_state64_ptrauth_strip(ts) \ + (void)(ts) #endif /* __DARWIN_UNIX03 */ #endif /* __DARWIN_OPAQUE_ARM_THREAD_STATE64 */ @@ -537,7 +565,7 @@ _STRUCT_ARM_DEBUG_STATE /* ARM's arm_debug_state is ARM64's arm_legacy_debug_state */ #if __DARWIN_UNIX03 -#define _STRUCT_ARM_LEGACY_DEBUG_STATE struct arm_legacy_debug_state +#define _STRUCT_ARM_LEGACY_DEBUG_STATE struct __arm_legacy_debug_state _STRUCT_ARM_LEGACY_DEBUG_STATE { __uint32_t __bvr[16]; diff --git a/osfmk/mach/arm/exception.h b/osfmk/mach/arm/exception.h index 42a802824..e374b21d8 100644 --- a/osfmk/mach/arm/exception.h +++ b/osfmk/mach/arm/exception.h @@ -72,6 +72,7 @@ #define EXC_ARM_DA_DEBUG 0x102 /* Debug (watch/break) Fault */ #define EXC_ARM_SP_ALIGN 0x103 /* SP Alignment Fault */ #define EXC_ARM_SWP 0x104 /* SWP instruction */ +#define EXC_ARM_PAC_FAIL 0x105 /* PAC authentication failure */ /* * EXC_BREAKPOINT diff --git a/osfmk/mach/arm/sdt_isa.h b/osfmk/mach/arm/sdt_isa.h index 519e1d935..3ca2639e5 100644 --- a/osfmk/mach/arm/sdt_isa.h +++ b/osfmk/mach/arm/sdt_isa.h @@ -28,7 +28,7 @@ */ #ifndef _MACH_ARM_SDT_ISA_H -#define _MACH_ARM_SDT_ISA_H +#define _MACH_ARM_SDT_ISA_H /* * Only define when testing. This makes the calls into actual calls to @@ -44,122 +44,130 @@ * For the kernel, set an explicit global label so the symbol can be located */ #ifdef __arm__ -#define DTRACE_LAB(p, n) \ - "__dtrace_probe$" DTRACE_TOSTRING(%=__LINE__) DTRACE_STRINGIFY(_##p##___##n) - -#define DTRACE_LABEL(p, n) \ - ".pushsection __DATA, __data\n\t" \ - ".p2align 2\n\t" \ - ".globl " DTRACE_LAB(p, n) "\n\t" \ - DTRACE_LAB(p, n) ":" ".long 1f""\n\t" \ - ".popsection" "\n\t" \ - "1:" + +#define DTRACE_LABEL(p, n) \ + ".pushsection __DATA, __sdt_cstring, cstring_literals\n\t" \ + "1: .ascii \"" DTRACE_STRINGIFY(p##___) "\\0\"\n\t" \ + "2: .ascii \"" DTRACE_STRINGIFY(n) "\\0\"\n\t" \ + ".popsection" "\n\t" \ + ".pushsection __DATA, __sdt, regular, live_support\n\t" \ + ".p2align 2\n\t" \ + "l3_%=:\n\t" \ + ".long 4f""\n\t" \ + ".long 1b""\n\t" \ + ".long 2b""\n\t" \ + ".popsection" "\n\t" \ + "4:" #else /* __arm64__ */ -#define DTRACE_LAB(p, n) \ - "__dtrace_probe$" DTRACE_TOSTRING(%=__LINE__) DTRACE_STRINGIFY(_##p##___##n) - -#define DTRACE_LABEL(p, n) \ - ".pushsection __DATA, __data\n\t" \ - ".p2align 3\n\t" \ - ".globl " DTRACE_LAB(p, n) "\n\t" \ - DTRACE_LAB(p, n) ":" ".quad 1f""\n\t" \ - ".popsection" "\n\t" \ - "1:" + +#define DTRACE_LABEL(p, n) \ + ".pushsection __DATA, __sdt_cstring, cstring_literals\n\t" \ + "1: .ascii \"" DTRACE_STRINGIFY(p##___) "\\0\"\n\t" \ + "2: .ascii \"" DTRACE_STRINGIFY(n) "\\0\"\n\t" \ + ".popsection" "\n\t" \ + ".pushsection __DATA, __sdt, regular, live_support\n\t" \ + ".p2align 3\n\t" \ + "l3_%=:\n\t" \ + ".quad 4f""\n\t" \ + ".quad 1b""\n\t" \ + ".quad 2b""\n\t" \ + ".popsection" "\n\t" \ + "4:" #endif -#else /* !KERNEL */ -#define DTRACE_LABEL(p, n) \ +#else /* !KERNEL */ +#define DTRACE_LABEL(p, n) \ "__dtrace_probe$" DTRACE_TOSTRING(%=__LINE__) DTRACE_STRINGIFY(_##p##___##n) ":" "\n\t" -#endif /* !KERNEL */ +#endif /* !KERNEL */ #ifdef DTRACE_CALL_TEST -#define DTRACE_CALL(p,n) \ - DTRACE_LABEL(p,n) \ +#define DTRACE_CALL(p, n) \ + DTRACE_LABEL(p,n) \ DTRACE_CALL_INSN(p,n) -#else /* !DTRACE_CALL_TEST */ +#else /* !DTRACE_CALL_TEST */ -#define DTRACE_CALL(p,n) \ - DTRACE_LABEL(p,n) \ +#define DTRACE_CALL(p, n) \ + DTRACE_LABEL(p,n) \ DTRACE_NOPS -#endif /* !DTRACE_CALL_TEST */ +#endif /* !DTRACE_CALL_TEST */ #if defined(__arm__) -#define DTRACE_NOPS \ +#define DTRACE_NOPS \ "nop" "\n\t" -#define DTRACE_CALL_INSN(p,n) \ +#define DTRACE_CALL_INSN(p, n) \ "blx _dtracetest" DTRACE_STRINGIFY(_##p##_##n) "\n\t" #ifdef __thumb__ -#define DTRACE_ALLOC_STACK(n) \ +#define DTRACE_ALLOC_STACK(n) \ "sub sp, #" #n "\n\t" -#define DTRACE_DEALLOC_STACK(n) \ +#define DTRACE_DEALLOC_STACK(n) \ "add sp, #" #n "\n\t" #else -#define DTRACE_ALLOC_STACK(n) \ +#define DTRACE_ALLOC_STACK(n) \ "sub sp, sp, #" #n "\n\t" -#define DTRACE_DEALLOC_STACK(n) \ +#define DTRACE_DEALLOC_STACK(n) \ "add sp, sp, #" #n "\n\t" #endif -#define ARG1_EXTENT 1 -#define ARGS2_EXTENT 2 -#define ARGS3_EXTENT 3 -#define ARGS4_EXTENT 4 -#define ARGS5_EXTENT 5 -#define ARGS6_EXTENT 6 -#define ARGS7_EXTENT 7 -#define ARGS8_EXTENT 8 -#define ARGS9_EXTENT 9 -#define ARGS10_EXTENT 10 - -#define DTRACE_CALL0ARGS(provider, name) \ - asm volatile ( \ - DTRACE_CALL(provider, name) \ - "# eat trailing nl+tab from DTRACE_CALL" \ - : \ - : \ +#define ARG1_EXTENT 1 +#define ARGS2_EXTENT 2 +#define ARGS3_EXTENT 3 +#define ARGS4_EXTENT 4 +#define ARGS5_EXTENT 5 +#define ARGS6_EXTENT 6 +#define ARGS7_EXTENT 7 +#define ARGS8_EXTENT 8 +#define ARGS9_EXTENT 9 +#define ARGS10_EXTENT 10 + +#define DTRACE_CALL0ARGS(provider, name) \ + asm volatile ( \ + DTRACE_CALL(provider, name) \ + "# eat trailing nl+tab from DTRACE_CALL" \ + : \ + : \ ); -#define DTRACE_CALL1ARG(provider, name) \ - asm volatile ("ldr r0, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "l" (__dtrace_args) \ - : "memory", "r0" \ +#define DTRACE_CALL1ARG(provider, name) \ + asm volatile ("ldr r0, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "l" (__dtrace_args) \ + : "memory", "r0" \ ); -#define DTRACE_CALL2ARGS(provider, name) \ - asm volatile ("ldr r1, [%0, #4]" "\n\t" \ - "ldr r0, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "l" (__dtrace_args) \ - : "memory", "r0", "r1" \ +#define DTRACE_CALL2ARGS(provider, name) \ + asm volatile ("ldr r1, [%0, #4]" "\n\t" \ + "ldr r0, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "l" (__dtrace_args) \ + : "memory", "r0", "r1" \ ); -#define DTRACE_CALL3ARGS(provider, name) \ - asm volatile ("ldr r2, [%0, #8]" "\n\t" \ - "ldr r1, [%0, #4]" "\n\t" \ - "ldr r0, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "l" (__dtrace_args) \ - : "memory", "r0", "r1", "r2" \ +#define DTRACE_CALL3ARGS(provider, name) \ + asm volatile ("ldr r2, [%0, #8]" "\n\t" \ + "ldr r1, [%0, #4]" "\n\t" \ + "ldr r0, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "l" (__dtrace_args) \ + : "memory", "r0", "r1", "r2" \ ); -#define DTRACE_CALL4ARGS(provider, name) \ - asm volatile ("ldr r3, [%0, #12]" "\n\t" \ - "ldr r2, [%0, #8]" "\n\t" \ - "ldr r1, [%0, #4]" "\n\t" \ - "ldr r0, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "l" (__dtrace_args) \ - : "memory", "r0", "r1", "r2", "r3" \ +#define DTRACE_CALL4ARGS(provider, name) \ + asm volatile ("ldr r3, [%0, #12]" "\n\t" \ + "ldr r2, [%0, #8]" "\n\t" \ + "ldr r1, [%0, #4]" "\n\t" \ + "ldr r0, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "l" (__dtrace_args) \ + : "memory", "r0", "r1", "r2", "r3" \ ); /* @@ -167,274 +175,274 @@ * We currently apply this constraint to all ARM32 DTRACE_CALL macros; hence the * macros below will overallocate for some ABIs. */ -#define DTRACE_CALL5ARGS(provider, name) \ - asm volatile ( \ - DTRACE_ALLOC_STACK(16) \ - "ldr r0, [%0, #16]" "\n\t" \ - "str r0, [sp]" "\n\t" \ - "ldr r3, [%0, #12]" "\n\t" \ - "ldr r2, [%0, #8]" "\n\t" \ - "ldr r1, [%0, #4]" "\n\t" \ - "ldr r0, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - DTRACE_DEALLOC_STACK(16) \ - : \ - : "l" (__dtrace_args) \ - : "memory", "r0", "r1", "r2", "r3" \ +#define DTRACE_CALL5ARGS(provider, name) \ + asm volatile ( \ + DTRACE_ALLOC_STACK(16) \ + "ldr r0, [%0, #16]" "\n\t" \ + "str r0, [sp]" "\n\t" \ + "ldr r3, [%0, #12]" "\n\t" \ + "ldr r2, [%0, #8]" "\n\t" \ + "ldr r1, [%0, #4]" "\n\t" \ + "ldr r0, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + DTRACE_DEALLOC_STACK(16) \ + : \ + : "l" (__dtrace_args) \ + : "memory", "r0", "r1", "r2", "r3" \ ); -#define DTRACE_CALL6ARGS(provider, name) \ - asm volatile ( \ - DTRACE_ALLOC_STACK(16) \ - "ldr r1, [%0, #20]" "\n\t" \ - "ldr r0, [%0, #16]" "\n\t" \ - "str r1, [sp, #4]" "\n\t" \ - "str r0, [sp]" "\n\t" \ - "ldr r3, [%0, #12]" "\n\t" \ - "ldr r2, [%0, #8]" "\n\t" \ - "ldr r1, [%0, #4]" "\n\t" \ - "ldr r0, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - DTRACE_DEALLOC_STACK(16) \ - : \ - : "l" (__dtrace_args) \ - : "memory", "r0", "r1", "r2", "r3" \ +#define DTRACE_CALL6ARGS(provider, name) \ + asm volatile ( \ + DTRACE_ALLOC_STACK(16) \ + "ldr r1, [%0, #20]" "\n\t" \ + "ldr r0, [%0, #16]" "\n\t" \ + "str r1, [sp, #4]" "\n\t" \ + "str r0, [sp]" "\n\t" \ + "ldr r3, [%0, #12]" "\n\t" \ + "ldr r2, [%0, #8]" "\n\t" \ + "ldr r1, [%0, #4]" "\n\t" \ + "ldr r0, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + DTRACE_DEALLOC_STACK(16) \ + : \ + : "l" (__dtrace_args) \ + : "memory", "r0", "r1", "r2", "r3" \ ); -#define DTRACE_CALL7ARGS(provider, name) \ - asm volatile ( \ - DTRACE_ALLOC_STACK(16) \ - "ldr r2, [%0, #24]" "\n\t" \ - "ldr r1, [%0, #20]" "\n\t" \ - "ldr r0, [%0, #16]" "\n\t" \ - "str r2, [sp, #8]" "\n\t" \ - "str r1, [sp, #4]" "\n\t" \ - "str r0, [sp]" "\n\t" \ - "ldr r3, [%0, #12]" "\n\t" \ - "ldr r2, [%0, #8]" "\n\t" \ - "ldr r1, [%0, #4]" "\n\t" \ - "ldr r0, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - DTRACE_DEALLOC_STACK(16) \ - : \ - : "l" (__dtrace_args) \ - : "memory", "r0", "r1", "r2", "r3" \ +#define DTRACE_CALL7ARGS(provider, name) \ + asm volatile ( \ + DTRACE_ALLOC_STACK(16) \ + "ldr r2, [%0, #24]" "\n\t" \ + "ldr r1, [%0, #20]" "\n\t" \ + "ldr r0, [%0, #16]" "\n\t" \ + "str r2, [sp, #8]" "\n\t" \ + "str r1, [sp, #4]" "\n\t" \ + "str r0, [sp]" "\n\t" \ + "ldr r3, [%0, #12]" "\n\t" \ + "ldr r2, [%0, #8]" "\n\t" \ + "ldr r1, [%0, #4]" "\n\t" \ + "ldr r0, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + DTRACE_DEALLOC_STACK(16) \ + : \ + : "l" (__dtrace_args) \ + : "memory", "r0", "r1", "r2", "r3" \ ); -#define DTRACE_CALL8ARGS(provider, name) \ - asm volatile ( \ - DTRACE_ALLOC_STACK(16) \ - "ldr r3, [%0, #28]" "\n\t" \ - "ldr r2, [%0, #24]" "\n\t" \ - "ldr r1, [%0, #20]" "\n\t" \ - "ldr r0, [%0, #16]" "\n\t" \ - "str r3, [sp, #12]" "\n\t" \ - "str r2, [sp, #8]" "\n\t" \ - "str r1, [sp, #4]" "\n\t" \ - "str r0, [sp]" "\n\t" \ - "ldr r3, [%0, #12]" "\n\t" \ - "ldr r2, [%0, #8]" "\n\t" \ - "ldr r1, [%0, #4]" "\n\t" \ - "ldr r0, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - DTRACE_DEALLOC_STACK(16) \ - : \ - : "l" (__dtrace_args) \ - : "memory", "r0", "r1", "r2", "r3" \ +#define DTRACE_CALL8ARGS(provider, name) \ + asm volatile ( \ + DTRACE_ALLOC_STACK(16) \ + "ldr r3, [%0, #28]" "\n\t" \ + "ldr r2, [%0, #24]" "\n\t" \ + "ldr r1, [%0, #20]" "\n\t" \ + "ldr r0, [%0, #16]" "\n\t" \ + "str r3, [sp, #12]" "\n\t" \ + "str r2, [sp, #8]" "\n\t" \ + "str r1, [sp, #4]" "\n\t" \ + "str r0, [sp]" "\n\t" \ + "ldr r3, [%0, #12]" "\n\t" \ + "ldr r2, [%0, #8]" "\n\t" \ + "ldr r1, [%0, #4]" "\n\t" \ + "ldr r0, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + DTRACE_DEALLOC_STACK(16) \ + : \ + : "l" (__dtrace_args) \ + : "memory", "r0", "r1", "r2", "r3" \ ); -#define DTRACE_CALL9ARGS(provider, name) \ - asm volatile ( \ - DTRACE_ALLOC_STACK(32) \ - "ldr r0, [%0, #32]" "\n\t" \ - "str r0, [sp, #16]" "\n\t" \ - "ldr r3, [%0, #28]" "\n\t" \ - "ldr r2, [%0, #24]" "\n\t" \ - "ldr r1, [%0, #20]" "\n\t" \ - "ldr r0, [%0, #16]" "\n\t" \ - "str r3, [sp, #12]" "\n\t" \ - "str r2, [sp, #8]" "\n\t" \ - "str r1, [sp, #4]" "\n\t" \ - "str r0, [sp]" "\n\t" \ - "ldr r3, [%0, #12]" "\n\t" \ - "ldr r2, [%0, #8]" "\n\t" \ - "ldr r1, [%0, #4]" "\n\t" \ - "ldr r0, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - DTRACE_DEALLOC_STACK(32) \ - : \ - : "l" (__dtrace_args) \ - : "memory", "r0", "r1", "r2", "r3" \ +#define DTRACE_CALL9ARGS(provider, name) \ + asm volatile ( \ + DTRACE_ALLOC_STACK(32) \ + "ldr r0, [%0, #32]" "\n\t" \ + "str r0, [sp, #16]" "\n\t" \ + "ldr r3, [%0, #28]" "\n\t" \ + "ldr r2, [%0, #24]" "\n\t" \ + "ldr r1, [%0, #20]" "\n\t" \ + "ldr r0, [%0, #16]" "\n\t" \ + "str r3, [sp, #12]" "\n\t" \ + "str r2, [sp, #8]" "\n\t" \ + "str r1, [sp, #4]" "\n\t" \ + "str r0, [sp]" "\n\t" \ + "ldr r3, [%0, #12]" "\n\t" \ + "ldr r2, [%0, #8]" "\n\t" \ + "ldr r1, [%0, #4]" "\n\t" \ + "ldr r0, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + DTRACE_DEALLOC_STACK(32) \ + : \ + : "l" (__dtrace_args) \ + : "memory", "r0", "r1", "r2", "r3" \ ); -#define DTRACE_CALL10ARGS(provider, name) \ - asm volatile ( \ - DTRACE_ALLOC_STACK(32) \ - "ldr r1, [%0, #36]" "\n\t" \ - "ldr r0, [%0, #32]" "\n\t" \ - "str r1, [sp, #20]" "\n\t" \ - "str r0, [sp, #16]" "\n\t" \ - "ldr r3, [%0, #28]" "\n\t" \ - "ldr r2, [%0, #24]" "\n\t" \ - "ldr r1, [%0, #20]" "\n\t" \ - "ldr r0, [%0, #16]" "\n\t" \ - "str r3, [sp, #12]" "\n\t" \ - "str r2, [sp, #8]" "\n\t" \ - "str r1, [sp, #4]" "\n\t" \ - "str r0, [sp]" "\n\t" \ - "ldr r3, [%0, #12]" "\n\t" \ - "ldr r2, [%0, #8]" "\n\t" \ - "ldr r1, [%0, #4]" "\n\t" \ - "ldr r0, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - DTRACE_DEALLOC_STACK(32) \ - : \ - : "l" (__dtrace_args) \ - : "memory", "r0", "r1", "r2", "r3" \ +#define DTRACE_CALL10ARGS(provider, name) \ + asm volatile ( \ + DTRACE_ALLOC_STACK(32) \ + "ldr r1, [%0, #36]" "\n\t" \ + "ldr r0, [%0, #32]" "\n\t" \ + "str r1, [sp, #20]" "\n\t" \ + "str r0, [sp, #16]" "\n\t" \ + "ldr r3, [%0, #28]" "\n\t" \ + "ldr r2, [%0, #24]" "\n\t" \ + "ldr r1, [%0, #20]" "\n\t" \ + "ldr r0, [%0, #16]" "\n\t" \ + "str r3, [sp, #12]" "\n\t" \ + "str r2, [sp, #8]" "\n\t" \ + "str r1, [sp, #4]" "\n\t" \ + "str r0, [sp]" "\n\t" \ + "ldr r3, [%0, #12]" "\n\t" \ + "ldr r2, [%0, #8]" "\n\t" \ + "ldr r1, [%0, #4]" "\n\t" \ + "ldr r0, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + DTRACE_DEALLOC_STACK(32) \ + : \ + : "l" (__dtrace_args) \ + : "memory", "r0", "r1", "r2", "r3" \ ); #elif defined(__arm64__) -#define DTRACE_NOPS \ +#define DTRACE_NOPS \ "nop" "\n\t" -#define DTRACE_CALL_INSN(p,n) \ +#define DTRACE_CALL_INSN(p, n) \ "bl _dtracetest" DTRACE_STRINGIFY(_##p##_##n) "\n\t" -#define DTRACE_ALLOC_STACK(n) \ +#define DTRACE_ALLOC_STACK(n) \ "sub sp, sp, #" #n "\n\t" -#define DTRACE_DEALLOC_STACK(n) \ +#define DTRACE_DEALLOC_STACK(n) \ "add sp, sp, #" #n "\n\t" -#define ARG1_EXTENT 1 -#define ARGS2_EXTENT 2 -#define ARGS3_EXTENT 3 -#define ARGS4_EXTENT 4 -#define ARGS5_EXTENT 5 -#define ARGS6_EXTENT 6 -#define ARGS7_EXTENT 7 -#define ARGS8_EXTENT 8 -#define ARGS9_EXTENT 9 -#define ARGS10_EXTENT 10 - -#define DTRACE_CALL0ARGS(provider, name) \ - asm volatile ( \ - DTRACE_CALL(provider, name) \ - "# eat trailing nl+tab from DTRACE_CALL" \ - : \ - : \ +#define ARG1_EXTENT 1 +#define ARGS2_EXTENT 2 +#define ARGS3_EXTENT 3 +#define ARGS4_EXTENT 4 +#define ARGS5_EXTENT 5 +#define ARGS6_EXTENT 6 +#define ARGS7_EXTENT 7 +#define ARGS8_EXTENT 8 +#define ARGS9_EXTENT 9 +#define ARGS10_EXTENT 10 + +#define DTRACE_CALL0ARGS(provider, name) \ + asm volatile ( \ + DTRACE_CALL(provider, name) \ + "# eat trailing nl+tab from DTRACE_CALL" \ + : \ + : \ ); -#define DTRACE_CALL1ARG(provider, name) \ - asm volatile ("ldr x0, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "x0" \ +#define DTRACE_CALL1ARG(provider, name) \ + asm volatile ("ldr x0, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "x0" \ ); -#define DTRACE_CALL2ARGS(provider, name) \ - asm volatile ("ldp x0, x1, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "x0", "x1" \ +#define DTRACE_CALL2ARGS(provider, name) \ + asm volatile ("ldp x0, x1, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "x0", "x1" \ ); -#define DTRACE_CALL3ARGS(provider, name) \ - asm volatile ("ldr x2, [%0, #16]" "\n\t" \ - "ldp x0, x1, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "x0", "x1", "x2" \ +#define DTRACE_CALL3ARGS(provider, name) \ + asm volatile ("ldr x2, [%0, #16]" "\n\t" \ + "ldp x0, x1, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "x0", "x1", "x2" \ ); -#define DTRACE_CALL4ARGS(provider, name) \ - asm volatile ("ldp x2, x3, [%0, #16]" "\n\t" \ - "ldp x0, x1, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "x0", "x1", "x2", "x3" \ +#define DTRACE_CALL4ARGS(provider, name) \ + asm volatile ("ldp x2, x3, [%0, #16]" "\n\t" \ + "ldp x0, x1, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "x0", "x1", "x2", "x3" \ ); -#define DTRACE_CALL5ARGS(provider, name) \ - asm volatile ("ldr x4, [%0, #32]" "\n\t" \ - "ldp x2, x3, [%0, #16]" "\n\t" \ - "ldp x0, x1, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "x0", "x1", "x2", "x3", "x4" \ +#define DTRACE_CALL5ARGS(provider, name) \ + asm volatile ("ldr x4, [%0, #32]" "\n\t" \ + "ldp x2, x3, [%0, #16]" "\n\t" \ + "ldp x0, x1, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "x0", "x1", "x2", "x3", "x4" \ ); -#define DTRACE_CALL6ARGS(provider, name) \ - asm volatile ("ldp x4, x5, [%0, #32]" "\n\t" \ - "ldp x2, x3, [%0, #16]" "\n\t" \ - "ldp x0, x1, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "x0", "x1", "x2", "x3", "x4", "x5" \ +#define DTRACE_CALL6ARGS(provider, name) \ + asm volatile ("ldp x4, x5, [%0, #32]" "\n\t" \ + "ldp x2, x3, [%0, #16]" "\n\t" \ + "ldp x0, x1, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "x0", "x1", "x2", "x3", "x4", "x5" \ ); -#define DTRACE_CALL7ARGS(provider, name) \ - asm volatile ("ldr x6, [%0, #48]" "\n\t" \ - "ldp x4, x5, [%0, #32]" "\n\t" \ - "ldp x2, x3, [%0, #16]" "\n\t" \ - "ldp x0, x1, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6" \ - ); - -#define DTRACE_CALL8ARGS(provider, name) \ - asm volatile ("ldp x6, x7, [%0, #48]" "\n\t" \ - "ldp x4, x5, [%0, #32]" "\n\t" \ - "ldp x2, x3, [%0, #16]" "\n\t" \ - "ldp x0, x1, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7" \ - ); +#define DTRACE_CALL7ARGS(provider, name) \ + asm volatile ("ldr x6, [%0, #48]" "\n\t" \ + "ldp x4, x5, [%0, #32]" "\n\t" \ + "ldp x2, x3, [%0, #16]" "\n\t" \ + "ldp x0, x1, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6" \ + ); + +#define DTRACE_CALL8ARGS(provider, name) \ + asm volatile ("ldp x6, x7, [%0, #48]" "\n\t" \ + "ldp x4, x5, [%0, #32]" "\n\t" \ + "ldp x2, x3, [%0, #16]" "\n\t" \ + "ldp x0, x1, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7" \ + ); /* Keep stack 16 byte aligned per ABI requirements */ -#define DTRACE_CALL9ARGS(provider, name) \ - asm volatile ( \ - DTRACE_ALLOC_STACK(16) \ - "ldr x0, [%0, #64]" "\n\t" \ - "str x0, [sp]" "\n\t" \ - "ldp x6, x7, [%0, #48]" "\n\t" \ - "ldp x4, x5, [%0, #32]" "\n\t" \ - "ldp x2, x3, [%0, #16]" "\n\t" \ - "ldp x0, x1, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - DTRACE_DEALLOC_STACK(16) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7" \ - ); - -#define DTRACE_CALL10ARGS(provider, name) \ - asm volatile ( \ - DTRACE_ALLOC_STACK(16) \ - "ldp x0, x1, [%0, #64]" "\n\t" \ - "stp x0, x1, [sp]" "\n\t" \ - "ldp x6, x7, [%0, #48]" "\n\t" \ - "ldp x4, x5, [%0, #32]" "\n\t" \ - "ldp x2, x3, [%0, #16]" "\n\t" \ - "ldp x0, x1, [%0]" "\n\t" \ - DTRACE_CALL(provider, name) \ - DTRACE_DEALLOC_STACK(16) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7" \ - ); +#define DTRACE_CALL9ARGS(provider, name) \ + asm volatile ( \ + DTRACE_ALLOC_STACK(16) \ + "ldr x0, [%0, #64]" "\n\t" \ + "str x0, [sp]" "\n\t" \ + "ldp x6, x7, [%0, #48]" "\n\t" \ + "ldp x4, x5, [%0, #32]" "\n\t" \ + "ldp x2, x3, [%0, #16]" "\n\t" \ + "ldp x0, x1, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + DTRACE_DEALLOC_STACK(16) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7" \ + ); + +#define DTRACE_CALL10ARGS(provider, name) \ + asm volatile ( \ + DTRACE_ALLOC_STACK(16) \ + "ldp x0, x1, [%0, #64]" "\n\t" \ + "stp x0, x1, [sp]" "\n\t" \ + "ldp x6, x7, [%0, #48]" "\n\t" \ + "ldp x4, x5, [%0, #32]" "\n\t" \ + "ldp x2, x3, [%0, #16]" "\n\t" \ + "ldp x0, x1, [%0]" "\n\t" \ + DTRACE_CALL(provider, name) \ + DTRACE_DEALLOC_STACK(16) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7" \ + ); #endif /* __arm__ */ -#endif /* _MACH_ARM_SDT_ISA_H */ +#endif /* _MACH_ARM_SDT_ISA_H */ diff --git a/osfmk/mach/arm/thread_status.h b/osfmk/mach/arm/thread_status.h index 30f2f097d..a52b770aa 100644 --- a/osfmk/mach/arm/thread_status.h +++ b/osfmk/mach/arm/thread_status.h @@ -58,6 +58,10 @@ // ARM_THREAD_STATE_LAST 8 /* legacy */ #define ARM_THREAD_STATE32 9 +#ifdef XNU_KERNEL_PRIVATE +#define X86_THREAD_STATE_NONE 13 /* i386/thread_status.h THREAD_STATE_NONE */ +#endif /* XNU_KERNEL_PRIVATE */ + /* API */ #define ARM_DEBUG_STATE32 14 #define ARM_DEBUG_STATE64 15 @@ -143,6 +147,9 @@ typedef _STRUCT_ARM_THREAD_STATE64 arm_thread_state64_t; /* Set fp field of arm_thread_state64_t to a data pointer value */ #define arm_thread_state64_set_fp(ts, ptr) \ __darwin_arm_thread_state64_set_fp(ts, ptr) +/* Strip ptr auth bits from pc, lr, sp and fp field of arm_thread_state64_t */ +#define arm_thread_state64_ptrauth_strip(ts) \ + __darwin_arm_thread_state64_ptrauth_strip(ts) #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__) */ #endif /* !defined(KERNEL) */ @@ -357,13 +364,13 @@ get_saved_state_pc(const arm_saved_state_t *iss) static inline void add_saved_state_pc(arm_saved_state_t *iss, int diff) { - iss->pc += diff; + iss->pc += (unsigned int)diff; } static inline void set_saved_state_pc(arm_saved_state_t *iss, register_t pc) { - iss->pc = pc; + iss->pc = (typeof(iss->pc))pc; } static inline register_t @@ -393,7 +400,7 @@ set_saved_state_fp(arm_saved_state_t *iss, register_t fp) static inline register_t get_saved_state_lr(const arm_saved_state_t *iss) { - return iss->lr; + return (register_t)iss->lr; } static inline void @@ -502,6 +509,21 @@ struct arm_saved_state { typedef struct arm_saved_state arm_saved_state_t; +struct arm_kernel_saved_state { + uint64_t x[12]; /* General purpose registers x16-x28 */ + uint64_t fp; /* Frame pointer x29 */ + uint64_t lr; /* Link register x30 */ + uint64_t sp; /* Stack pointer x31 */ + uint64_t pc; /* Program counter */ + uint32_t cpsr; /* Current program status register */ + uint32_t reserved; /* Reserved padding */ +#if defined(HAS_APPLE_PAC) + uint64_t jophash; +#endif /* defined(HAS_APPLE_PAC) */ +} __attribute__((aligned(16))); + +typedef struct arm_kernel_saved_state arm_kernel_saved_state_t; + #if defined(XNU_KERNEL_PRIVATE) #if defined(HAS_APPLE_PAC) @@ -632,14 +654,14 @@ const_saved_state64(const arm_saved_state_t *iss) static inline register_t get_saved_state_pc(const arm_saved_state_t *iss) { - return is_saved_state32(iss) ? const_saved_state32(iss)->pc : const_saved_state64(iss)->pc; + return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->pc : const_saved_state64(iss)->pc); } static inline void add_saved_state_pc(arm_saved_state_t *iss, int diff) { if (is_saved_state32(iss)) { - uint64_t pc = saved_state32(iss)->pc + diff; + uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff; saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc); } else { #if defined(XNU_KERNEL_PRIVATE) && defined(HAS_APPLE_PAC) @@ -650,7 +672,7 @@ add_saved_state_pc(arm_saved_state_t *iss, int diff) [diff] "r"(diff) ); #else - saved_state64(iss)->pc += diff; + saved_state64(iss)->pc += (unsigned long)diff; #endif /* defined(XNU_KERNEL_PRIVATE) && defined(HAS_APPLE_PAC) */ } } @@ -668,7 +690,7 @@ set_saved_state_pc(arm_saved_state_t *iss, register_t pc) [pc] "r"(pc) ); #else - saved_state64(iss)->pc = pc; + saved_state64(iss)->pc = (unsigned long)pc; #endif /* defined(XNU_KERNEL_PRIVATE) && defined(HAS_APPLE_PAC) */ } } @@ -676,7 +698,7 @@ set_saved_state_pc(arm_saved_state_t *iss, register_t pc) static inline register_t get_saved_state_sp(const arm_saved_state_t *iss) { - return is_saved_state32(iss) ? const_saved_state32(iss)->sp : const_saved_state64(iss)->sp; + return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->sp : const_saved_state64(iss)->sp); } static inline void @@ -685,14 +707,14 @@ set_saved_state_sp(arm_saved_state_t *iss, register_t sp) if (is_saved_state32(iss)) { saved_state32(iss)->sp = CAST_ASSERT_SAFE(uint32_t, sp); } else { - saved_state64(iss)->sp = sp; + saved_state64(iss)->sp = (uint64_t)sp; } } static inline register_t get_saved_state_lr(const arm_saved_state_t *iss) { - return is_saved_state32(iss) ? const_saved_state32(iss)->lr : const_saved_state64(iss)->lr; + return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->lr : const_saved_state64(iss)->lr); } static inline void @@ -708,7 +730,7 @@ set_saved_state_lr(arm_saved_state_t *iss, register_t lr) [lr] "r"(lr) ); #else - saved_state64(iss)->lr = lr; + saved_state64(iss)->lr = (unsigned long)lr; #endif /* defined(XNU_KERNEL_PRIVATE) && defined(HAS_APPLE_PAC) */ } } @@ -716,7 +738,7 @@ set_saved_state_lr(arm_saved_state_t *iss, register_t lr) static inline register_t get_saved_state_fp(const arm_saved_state_t *iss) { - return is_saved_state32(iss) ? const_saved_state32(iss)->r[7] : const_saved_state64(iss)->fp; + return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->r[7] : const_saved_state64(iss)->fp); } static inline void @@ -725,7 +747,7 @@ set_saved_state_fp(arm_saved_state_t *iss, register_t fp) if (is_saved_state32(iss)) { saved_state32(iss)->r[7] = CAST_ASSERT_SAFE(uint32_t, fp); } else { - saved_state64(iss)->fp = fp; + saved_state64(iss)->fp = (uint64_t)fp; } } @@ -742,7 +764,7 @@ get_saved_state_reg(const arm_saved_state_t *iss, unsigned reg) return 0; } - return is_saved_state32(iss) ? (const_saved_state32(iss)->r[reg]) : (const_saved_state64(iss)->x[reg]); + return (register_t)(is_saved_state32(iss) ? (const_saved_state32(iss)->r[reg]) : (const_saved_state64(iss)->x[reg])); } static inline void @@ -774,10 +796,11 @@ set_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value) return; } #endif - saved_state64(iss)->x[reg] = value; + saved_state64(iss)->x[reg] = (uint64_t)value; } } + static inline uint32_t get_saved_state_cpsr(const arm_saved_state_t *iss) { @@ -829,7 +852,7 @@ set_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr) static inline register_t get_saved_state_far(const arm_saved_state_t *iss) { - return is_saved_state32(iss) ? const_saved_state32(iss)->far : const_saved_state64(iss)->far; + return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->far : const_saved_state64(iss)->far); } static inline void @@ -838,7 +861,7 @@ set_saved_state_far(arm_saved_state_t *iss, register_t far) if (is_saved_state32(iss)) { saved_state32(iss)->far = CAST_ASSERT_SAFE(uint32_t, far); } else { - saved_state64(iss)->far = far; + saved_state64(iss)->far = (uint64_t)far; } } @@ -943,6 +966,12 @@ typedef struct arm_neon_saved_state arm_neon_saved_state_t; #define ns_32 uns.ns_32 #define ns_64 uns.ns_64 +struct arm_kernel_neon_saved_state { + uint64_t d[8]; + uint32_t fpcr; +}; +typedef struct arm_kernel_neon_saved_state arm_kernel_neon_saved_state_t; + static inline boolean_t is_neon_saved_state32(const arm_neon_saved_state_t *state) { @@ -978,6 +1007,12 @@ struct arm_context { }; typedef struct arm_context arm_context_t; +struct arm_kernel_context { + struct arm_kernel_saved_state ss; + struct arm_kernel_neon_saved_state ns; +}; +typedef struct arm_kernel_context arm_kernel_context_t; + extern void saved_state_to_thread_state64(const arm_saved_state_t*, arm_thread_state64_t*); extern void thread_state64_to_saved_state(const arm_thread_state64_t*, arm_saved_state_t*); diff --git a/osfmk/mach/arm/traps.h b/osfmk/mach/arm/traps.h new file mode 100644 index 000000000..b81e54944 --- /dev/null +++ b/osfmk/mach/arm/traps.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#pragma once + +#define MACH_ARM_TRAP_ABSTIME -3 +#define MACH_ARM_TRAP_CONTTIME -4 diff --git a/osfmk/mach/arm/vm_param.h b/osfmk/mach/arm/vm_param.h index 12939b6ee..c9d7b44f1 100644 --- a/osfmk/mach/arm/vm_param.h +++ b/osfmk/mach/arm/vm_param.h @@ -53,7 +53,7 @@ #ifdef __arm__ #define PAGE_SHIFT_CONST 12 #elif defined(__arm64__) -extern unsigned PAGE_SHIFT_CONST; +extern int PAGE_SHIFT_CONST; #else #error Unsupported arch #endif @@ -99,14 +99,15 @@ extern unsigned PAGE_SHIFT_CONST; #define PAGE_MIN_SIZE (1 << PAGE_MIN_SHIFT) #define PAGE_MIN_MASK (PAGE_MIN_SIZE-1) +#define VM_MAX_PAGE_ADDRESS MACH_VM_MAX_ADDRESS + #ifndef __ASSEMBLER__ #ifdef MACH_KERNEL_PRIVATE #define VM32_SUPPORT 1 #define VM32_MIN_ADDRESS ((vm32_offset_t) 0) -#define VM32_MAX_ADDRESS ((vm32_offset_t) (VM_MAX_PAGE_ADDRESS & 0xFFFFFFFF)) -#define VM_MAX_PAGE_ADDRESS VM_MAX_ADDRESS /* ARM64_TODO: ?? */ +#define VM32_MAX_ADDRESS ((vm32_offset_t) (VM_MAX_ADDRESS & 0xFFFFFFFF)) /* * kalloc() parameters: @@ -136,6 +137,18 @@ extern unsigned PAGE_SHIFT_CONST; #error Unsupported arch #endif +#if defined (__arm__) +/* existing zone map size limit moved from osfmk/vm/vm_init.c */ +#define ZONE_MAP_MAX (1024 * 1024 * 1536) /* 1.5GB for 32bit systems */ +#elif defined(__arm64__) +/* + * Limits the physical pages in the zone map + */ +#define ZONE_MAP_MAX (31ULL << 30) /* 31GB for 64bit systems */ +#else +#error Unsupported arch +#endif + #endif #if defined (__arm__) @@ -154,12 +167,17 @@ extern unsigned PAGE_SHIFT_CONST; /* system-wide values */ #define MACH_VM_MIN_ADDRESS_RAW 0x0ULL +#if defined(PLATFORM_MacOSX) || defined(PLATFORM_DriverKit) +#define MACH_VM_MAX_ADDRESS_RAW 0x00007FFFFE000000ULL +#else #define MACH_VM_MAX_ADDRESS_RAW 0x0000000FC0000000ULL +#endif + #define MACH_VM_MIN_ADDRESS ((mach_vm_offset_t) MACH_VM_MIN_ADDRESS_RAW) #define MACH_VM_MAX_ADDRESS ((mach_vm_offset_t) MACH_VM_MAX_ADDRESS_RAW) -#else +#else /* defined(__arm64__) */ #error architecture not supported #endif @@ -169,7 +187,7 @@ extern unsigned PAGE_SHIFT_CONST; #ifdef KERNEL #if defined (__arm__) -#define VM_KERNEL_POINTER_SIGNIFICANT_BITS 32 +#define VM_KERNEL_POINTER_SIGNIFICANT_BITS 31 #define VM_MIN_KERNEL_ADDRESS ((vm_address_t) 0x80000000) #define VM_MAX_KERNEL_ADDRESS ((vm_address_t) 0xFFFEFFFF) #define VM_HIGH_KERNEL_WINDOW ((vm_address_t) 0xFFFE0000) @@ -178,15 +196,61 @@ extern unsigned PAGE_SHIFT_CONST; * The minimum and maximum kernel address; some configurations may * constrain the address space further. */ +#define TiB(x) ((0ULL + (x)) << 40) +#define GiB(x) ((0ULL + (x)) << 30) + +#if XNU_KERNEL_PRIVATE +#if defined(ARM_LARGE_MEMORY) +/* + * +-----------------------+--------+--------+------------------------+ + * | 0xffff_fe90_0000_0000 |-1472GB | 576GB | KASAN_SHADOW_MIN | + * | | | | VM_MAX_KERNEL_ADDRESS | + * +-----------------------+--------+--------+------------------------+ + * | 0xffff_fe10_0000_0000 |-1984GB | 64GB | PMAP_HEAP_RANGE_START | + * +-----------------------+--------+--------+------------------------+ + * | 0xffff_fe00_0700_4000 | | | VM_KERNEL_LINK_ADDRESS | + * +-----------------------+--------+--------+------------------------+ + * | 0xffff_fe00_0000_0000 | -2TB | 0GB | VM_MIN_KERNEL_ADDRESS | + * | | | | LOW_GLOBALS | + * +-----------------------+--------+--------+------------------------+ + */ +#define VM_KERNEL_POINTER_SIGNIFICANT_BITS 41 + +// Kernel VA space starts at -2TB +#define VM_MIN_KERNEL_ADDRESS ((vm_address_t) (0ULL - TiB(2))) + +// 1.25TB for static_memory_region, 512GB for kernel heap, 256GB for KASAN +#define VM_MAX_KERNEL_ADDRESS ((vm_address_t) (VM_MIN_KERNEL_ADDRESS + GiB(64) + GiB(512) - 1)) +#else // ARM_LARGE_MEMORY +/* + * +-----------------------+--------+--------+------------------------+ + * | 0xffff_fffc_0000_0000 | -16GB | 112GB | KASAN_SHADOW_MIN | + * | | | | VM_MAX_KERNEL_ADDRESS | + * +-----------------------+--------+--------+------------------------+ + * | 0xffff_fff0_0700_4000 | | | VM_KERNEL_LINK_ADDRESS | + * +-----------------------+--------+--------+------------------------+ + * | 0xffff_fff0_0000_0000 | -64GB | 64GB | LOW_GLOBALS | + * | | | | PMAP_HEAP_RANGE_START | <= H8 + * +-----------------------+--------+--------+------------------------+ + * | 0xffff_ffe0_0000_0000 | -128GB | 0GB | VM_MIN_KERNEL_ADDRESS | + * | | | | PMAP_HEAP_RANGE_START | >= H9 + * +-----------------------+--------+--------+------------------------+ + */ #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 37 #define VM_MIN_KERNEL_ADDRESS ((vm_address_t) 0xffffffe000000000ULL) #define VM_MAX_KERNEL_ADDRESS ((vm_address_t) 0xfffffffbffffffffULL) +#endif // ARM_LARGE_MEMORY + +#else // !XNU_KERNEL_PRIVATE +// Inform kexts about largest possible kernel address space +#define VM_MIN_KERNEL_ADDRESS ((vm_address_t) (0ULL - TiB(2))) +#define VM_MAX_KERNEL_ADDRESS ((vm_address_t) 0xfffffffbffffffffULL) +#endif // XNU_KERNEL_PRIVATE #else #error architecture not supported #endif -#define VM_MIN_KERNEL_AND_KEXT_ADDRESS \ - VM_MIN_KERNEL_ADDRESS +#define VM_MIN_KERNEL_AND_KEXT_ADDRESS VM_MIN_KERNEL_ADDRESS #if __has_feature(ptrauth_calls) #include @@ -208,6 +272,15 @@ extern unsigned long gVirtBase, gPhysBase, gPhysSize; #define isphysmem(a) (((vm_address_t)(a) - gPhysBase) < gPhysSize) #define physmap_enclosed(a) isphysmem(a) +/* + * gPhysBase/Size only represent kernel-managed memory. These globals represent + * the actual DRAM base address and size as reported by iBoot through the device + * tree. + */ +#include +extern uint64_t gDramBase, gDramSize; +#define is_dram_addr(addr) (((uint64_t)(addr) - gDramBase) < gDramSize) + #if KASAN /* Increase the stack sizes to account for the redzones that get added to every * stack object. */ @@ -219,7 +292,15 @@ extern unsigned long gVirtBase, gPhysBase, gPhysSize; */ # define KERNEL_STACK_SIZE (2*4*4096) #else -# define KERNEL_STACK_SIZE (4*4096) +/* + * KERNEL_STACK_MULTIPLIER can be defined externally to get a larger + * kernel stack size. For example, adding "-DKERNEL_STACK_MULTIPLIER=2" + * helps avoid kernel stack overflows when compiling with "-O0". + */ +#ifndef KERNEL_STACK_MULTIPLIER +#define KERNEL_STACK_MULTIPLIER (1) +#endif /* KERNEL_STACK_MULTIPLIER */ +# define KERNEL_STACK_SIZE (4*4096*KERNEL_STACK_MULTIPLIER) #endif #define INTSTACK_SIZE (4*4096) @@ -243,7 +324,7 @@ extern unsigned long gVirtBase, gPhysBase, gPhysSize; #if defined (__arm__) #define VM_KERNEL_LINK_ADDRESS ((vm_address_t) 0x80000000) #elif defined (__arm64__) -#define VM_KERNEL_LINK_ADDRESS ((vm_address_t) 0xFFFFFFF007004000) +/* VM_KERNEL_LINK_ADDRESS defined in makedefs/MakeInc.def for arm64 platforms */ #else #error architecture not supported #endif diff --git a/osfmk/mach/audit_triggers.defs b/osfmk/mach/audit_triggers.defs index 1d6e279b6..acf750075 100644 --- a/osfmk/mach/audit_triggers.defs +++ b/osfmk/mach/audit_triggers.defs @@ -43,7 +43,15 @@ subsystem #include #include +import ; + +type string_t = c_string [*:1024]; + simpleroutine audit_triggers( - audit_port : mach_port_t; + audit_port : mach_port_t; in flags : int); +simpleroutine audit_analytics( + audit_port : mach_port_t; + in caller_id : string_t; + in caller_name : string_t); diff --git a/osfmk/mach/audit_triggers_types.h b/osfmk/mach/audit_triggers_types.h new file mode 100644 index 000000000..3ddf1e947 --- /dev/null +++ b/osfmk/mach/audit_triggers_types.h @@ -0,0 +1 @@ +typedef const char* string_t; diff --git a/osfmk/mach/coalition.h b/osfmk/mach/coalition.h index 82be1cf5a..6b32ba6c7 100644 --- a/osfmk/mach/coalition.h +++ b/osfmk/mach/coalition.h @@ -31,6 +31,8 @@ /* code shared by userspace and xnu */ +#define COALITION_SPAWN_ENTITLEMENT "com.apple.private.coalition-spawn" + #define COALITION_CREATE_FLAGS_MASK ((uint32_t)0xFF1) #define COALITION_CREATE_FLAGS_PRIVILEGED ((uint32_t)0x01) @@ -149,11 +151,16 @@ struct coalition_resource_usage { uint64_t cpu_time_eqos[COALITION_NUM_THREAD_QOS_TYPES]; uint64_t cpu_instructions; uint64_t cpu_cycles; + uint64_t fs_metadata_writes; + uint64_t pm_writes; }; #ifdef PRIVATE /* definitions shared by only xnu + Libsyscall */ +/* coalition id for kernel task */ +#define COALITION_ID_KERNEL 1 + /* Syscall flavors */ #define COALITION_OP_CREATE 1 #define COALITION_OP_TERMINATE 2 diff --git a/osfmk/mach/dyld_kernel_fixups.h b/osfmk/mach/dyld_kernel_fixups.h new file mode 100644 index 000000000..0d26fb0d0 --- /dev/null +++ b/osfmk/mach/dyld_kernel_fixups.h @@ -0,0 +1,610 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +/* + * This file contains static dyld helper functions for + * exclusive use in platform startup code. + */ + +#include +#include + +#if defined(HAS_APPLE_PAC) +#include +#endif /* defined(HAS_APPLE_PAC) */ + +#ifndef dyldLogFunc +#define dyldLogFunc(msg, ...) kprintf(msg, ## __VA_ARGS__) +#endif + +#if 0 +#define dyldLogFunc(msg, ...) ({int _wait = 0; do { asm volatile ("yield" : "+r"(_wait) : ); } while(!_wait); }) +#endif +#define LogFixups 0 + +// cannot safely callout out to functions like strcmp before initial fixup +static inline int +strings_are_equal(const char* a, const char* b) +{ + while (*a && *b) { + if (*a != *b) { + return 0; + } + ++a; + ++b; + } + return *a == *b; +} + +/* + * Functions from dyld to rebase, fixup and sign the contents of MH_FILESET + * kernel collections. + */ + +union ChainedFixupPointerOnDisk { + uint64_t raw64; + struct dyld_chained_ptr_64_kernel_cache_rebase fixup64; +}; + +static uint64_t __unused +sign_pointer(struct dyld_chained_ptr_64_kernel_cache_rebase pointer __unused, + void *loc __unused, + uint64_t target __unused) +{ +#if HAS_APPLE_PAC + uint64_t discriminator = pointer.diversity; + if (pointer.addrDiv) { + if (discriminator) { + discriminator = __builtin_ptrauth_blend_discriminator(loc, discriminator); + } else { + discriminator = (uint64_t)(uintptr_t)loc; + } + } + switch (pointer.key) { + case 0: // IA + return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target, 0, discriminator); + case 1: // IB + return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target, 1, discriminator); + case 2: // DA + return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target, 2, discriminator); + case 3: // DB + return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target, 3, discriminator); + } +#endif + return target; +} + +static inline __attribute__((__always_inline__)) void +fixup_value(union ChainedFixupPointerOnDisk* fixupLoc __unused, + const struct dyld_chained_starts_in_segment* segInfo, + uintptr_t slide __unused, + const void* basePointers[KCNumKinds] __unused, + int* stop) +{ + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: fixup_value %p\n", fixupLoc); + } + switch (segInfo->pointer_format) { +#if __LP64__ + case DYLD_CHAINED_PTR_64_KERNEL_CACHE: + case DYLD_CHAINED_PTR_X86_64_KERNEL_CACHE: { + const void* baseAddress = basePointers[fixupLoc->fixup64.cacheLevel]; + if (baseAddress == 0) { + dyldLogFunc("Invalid cache level: %d\n", fixupLoc->fixup64.cacheLevel); + *stop = 1; + return; + } + uintptr_t slidValue = (uintptr_t)baseAddress + fixupLoc->fixup64.target; + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: slidValue %p (base=%p, target=%p)\n", (void*)slidValue, + (const void *)baseAddress, (void *)(uintptr_t)fixupLoc->fixup64.target); + } +#if HAS_APPLE_PAC + if (fixupLoc->fixup64.isAuth) { + slidValue = sign_pointer(fixupLoc->fixup64, fixupLoc, slidValue); + } +#else + if (fixupLoc->fixup64.isAuth) { + dyldLogFunc("Unexpected authenticated fixup\n"); + *stop = 1; + return; + } +#endif // HAS_APPLE_PAC + fixupLoc->raw64 = slidValue; + break; + } +#endif // __LP64__ + default: + dyldLogFunc("unsupported pointer chain format: 0x%04X", segInfo->pointer_format); + *stop = 1; + break; + } +} + +static inline __attribute__((__always_inline__)) int +walk_chain(const struct mach_header_64* mh, + const struct dyld_chained_starts_in_segment* segInfo, + uint32_t pageIndex, + uint16_t offsetInPage, + uintptr_t slide __unused, + const void* basePointers[KCNumKinds]) +{ + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: walk_chain page[%d]\n", pageIndex); + } + int stop = 0; + uintptr_t pageContentStart = (uintptr_t)mh + (uintptr_t)segInfo->segment_offset + + (pageIndex * segInfo->page_size); + union ChainedFixupPointerOnDisk* chain = (union ChainedFixupPointerOnDisk*)(pageContentStart + offsetInPage); + int chainEnd = 0; + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: segInfo->segment_offset 0x%llx\n", segInfo->segment_offset); + dyldLogFunc("[LOG] kernel-fixups: segInfo->segment_pagesize %d\n", segInfo->page_size); + dyldLogFunc("[LOG] kernel-fixups: segInfo pointer format %d\n", segInfo->pointer_format); + } + while (!stop && !chainEnd) { + // copy chain content, in case handler modifies location to final value + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: value of chain %p", chain); + } + union ChainedFixupPointerOnDisk chainContent __unused = *chain; + fixup_value(chain, segInfo, slide, basePointers, &stop); + if (!stop) { + switch (segInfo->pointer_format) { +#if __LP64__ + case DYLD_CHAINED_PTR_64_KERNEL_CACHE: + if (chainContent.fixup64.next == 0) { + chainEnd = 1; + } else { + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: chainContent fixup 64.next %d\n", chainContent.fixup64.next); + } + chain = (union ChainedFixupPointerOnDisk*)((uintptr_t)chain + chainContent.fixup64.next * 4); + } + break; + case DYLD_CHAINED_PTR_X86_64_KERNEL_CACHE: + if (chainContent.fixup64.next == 0) { + chainEnd = 1; + } else { + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: chainContent fixup x86 64.next %d\n", chainContent.fixup64.next); + } + chain = (union ChainedFixupPointerOnDisk*)((uintptr_t)chain + chainContent.fixup64.next); + } + break; +#endif // __LP64__ + default: + dyldLogFunc("unknown pointer format 0x%04X", segInfo->pointer_format); + stop = 1; + } + } + } + return stop; +} + +static inline __attribute__((__always_inline__)) int +kernel_collection_slide(const struct mach_header_64* mh, const void* basePointers[KCNumKinds]) +{ + // First find the slide and chained fixups load command + uint64_t textVMAddr = 0; + const struct linkedit_data_command* chainedFixups = 0; + uint64_t linkeditVMAddr = 0; + uint64_t linkeditFileOffset = 0; + + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: parsing load commands\n"); + } + + const struct load_command* startCmds = 0; + if (mh->magic == MH_MAGIC_64) { + startCmds = (struct load_command*)((uintptr_t)mh + sizeof(struct mach_header_64)); + } else if (mh->magic == MH_MAGIC) { + startCmds = (struct load_command*)((uintptr_t)mh + sizeof(struct mach_header)); + } else { + //const uint32_t* h = (uint32_t*)mh; + //diag.error("file does not start with MH_MAGIC[_64]: 0x%08X 0x%08X", h[0], h [1]); + return 1; // not a mach-o file + } + const struct load_command* const cmdsEnd = (struct load_command*)((uintptr_t)startCmds + mh->sizeofcmds); + const struct load_command* cmd = startCmds; + for (uint32_t i = 0; i < mh->ncmds; ++i) { + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: parsing load command %d with cmd=0x%x\n", i, cmd->cmd); + } + const struct load_command* nextCmd = (struct load_command*)((uintptr_t)cmd + cmd->cmdsize); + if (cmd->cmdsize < 8) { + //diag.error("malformed load command #%d of %d at %p with mh=%p, size (0x%X) too small", i, this->ncmds, cmd, this, cmd->cmdsize); + return 1; + } + if ((nextCmd > cmdsEnd) || (nextCmd < startCmds)) { + //diag.error("malformed load command #%d of %d at %p with mh=%p, size (0x%X) is too large, load commands end at %p", i, this->ncmds, cmd, this, cmd->cmdsize, cmdsEnd); + return 1; + } + if (cmd->cmd == LC_DYLD_CHAINED_FIXUPS) { + chainedFixups = (const struct linkedit_data_command*)cmd; + } else if (cmd->cmd == LC_SEGMENT_64) { + const struct segment_command_64* seg = (const struct segment_command_64*)(uintptr_t)cmd; + + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: segment name vm start and size: %s 0x%llx 0x%llx\n", + seg->segname, seg->vmaddr, seg->vmsize); + } + if (strings_are_equal(seg->segname, "__TEXT")) { + textVMAddr = seg->vmaddr; + } else if (strings_are_equal(seg->segname, "__LINKEDIT")) { + linkeditVMAddr = seg->vmaddr; + linkeditFileOffset = seg->fileoff; + } + } + cmd = nextCmd; + } + + uintptr_t slide = (uintptr_t)mh - (uintptr_t)textVMAddr; + + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: slide %lx\n", slide); + } + + if (chainedFixups == 0) { + return 0; + } + + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: found chained fixups %p\n", chainedFixups); + dyldLogFunc("[LOG] kernel-fixups: found linkeditVMAddr %p\n", (void*)linkeditVMAddr); + dyldLogFunc("[LOG] kernel-fixups: found linkeditFileOffset %p\n", (void*)linkeditFileOffset); + } + + // Now we have the chained fixups, walk it to apply all the rebases + uint64_t offsetInLinkedit = chainedFixups->dataoff - linkeditFileOffset; + uintptr_t linkeditStartAddr = (uintptr_t)linkeditVMAddr + slide; + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: offsetInLinkedit %llx\n", offsetInLinkedit); + dyldLogFunc("[LOG] kernel-fixups: linkeditStartAddr %p\n", (void*)linkeditStartAddr); + } + + const struct dyld_chained_fixups_header* fixupsHeader = (const struct dyld_chained_fixups_header*)(linkeditStartAddr + offsetInLinkedit); + const struct dyld_chained_starts_in_image* fixupStarts = (const struct dyld_chained_starts_in_image*)((uintptr_t)fixupsHeader + fixupsHeader->starts_offset); + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: fixupsHeader %p\n", fixupsHeader); + dyldLogFunc("[LOG] kernel-fixups: fixupStarts %p\n", fixupStarts); + } + + int stopped = 0; + for (uint32_t segIndex = 0; segIndex < fixupStarts->seg_count && !stopped; ++segIndex) { + if (LogFixups) { + dyldLogFunc("[LOG] kernel-fixups: segment %d\n", segIndex); + } + if (fixupStarts->seg_info_offset[segIndex] == 0) { + continue; + } + const struct dyld_chained_starts_in_segment* segInfo = (const struct dyld_chained_starts_in_segment*)((uintptr_t)fixupStarts + fixupStarts->seg_info_offset[segIndex]); + for (uint32_t pageIndex = 0; pageIndex < segInfo->page_count && !stopped; ++pageIndex) { + uint16_t offsetInPage = segInfo->page_start[pageIndex]; + if (offsetInPage == DYLD_CHAINED_PTR_START_NONE) { + continue; + } + if (offsetInPage & DYLD_CHAINED_PTR_START_MULTI) { + // FIXME: Implement this + return 1; + } else { + // one chain per page + if (walk_chain(mh, segInfo, pageIndex, offsetInPage, slide, basePointers)) { + stopped = 1; + } + } + } + } + + return stopped; +} + +/* + * Utility functions to adjust the load command vmaddrs in constituent MachO's + * of an MH_FILESET kernel collection. + */ + +static void +kernel_collection_adjust_fileset_entry_addrs(struct mach_header_64 *mh, uintptr_t adj) +{ + struct load_command *lc; + struct segment_command_64 *seg, *linkedit_cmd = NULL; + struct symtab_command *symtab_cmd = NULL; + struct section_64 *sec; + uint32_t i, j; + + lc = (struct load_command *)((uintptr_t)mh + sizeof(*mh)); + for (i = 0; i < mh->ncmds; i++, + lc = (struct load_command *)((uintptr_t)lc + lc->cmdsize)) { + if (lc->cmd == LC_SYMTAB) { + symtab_cmd = (struct symtab_command *)lc; + continue; + } + if (lc->cmd != LC_SEGMENT_64) { + continue; + } + if (strcmp(((struct segment_command_64 *)(uintptr_t)lc)->segname, SEG_LINKEDIT) == 0) { + linkedit_cmd = ((struct segment_command_64 *)(uintptr_t)lc); + } + + seg = (struct segment_command_64 *)(uintptr_t)lc; + seg->vmaddr += adj; + /* slide/adjust every section in the segment */ + sec = (struct section_64 *)((uintptr_t)seg + sizeof(*seg)); + for (j = 0; j < seg->nsects; j++, sec++) { + sec->addr += adj; + } + } + + + if (symtab_cmd != NULL && linkedit_cmd != NULL) { + struct nlist_64 *sym; + uint32_t cnt = 0; + + if (LogFixups) { + dyldLogFunc("[LOG] Symbols:\n"); + dyldLogFunc("[LOG] nsyms: %d, symoff: 0x%x\n", symtab_cmd->nsyms, symtab_cmd->symoff); + } + + if (symtab_cmd->nsyms == 0) { + dyldLogFunc("[LOG] No symbols to relocate\n"); + } + + sym = (struct nlist_64 *)(linkedit_cmd->vmaddr + symtab_cmd->symoff - linkedit_cmd->fileoff); + + for (i = 0; i < symtab_cmd->nsyms; i++) { + if (sym[i].n_type & N_STAB) { + continue; + } + sym[i].n_value += adj; + cnt++; + } + if (LogFixups) { + dyldLogFunc("[LOG] KASLR: Relocated %d symbols\n", cnt); + } + } +} + +static void +kernel_collection_adjust_mh_addrs(struct mach_header_64 *kc_mh, uintptr_t adj, + bool pageable, uintptr_t *kc_lowest_vmaddr, uintptr_t *kc_highest_vmaddr, + uintptr_t *kc_lowest_ro_vmaddr, uintptr_t *kc_highest_ro_vmaddr, + uintptr_t *kc_lowest_rx_vmaddr, uintptr_t *kc_highest_rx_vmaddr, + uintptr_t *kc_highest_nle_vmaddr) +{ + assert(kc_mh->filetype == MH_FILESET); + + struct load_command *lc; + struct fileset_entry_command *fse; + struct segment_command_64 *seg; + struct section_64 *sec; + struct mach_header_64 *mh; + uintptr_t lowest_vmaddr = UINTPTR_MAX, highest_vmaddr = 0, highest_nle_vmaddr = 0; + uintptr_t lowest_ro_vmaddr = UINTPTR_MAX, highest_ro_vmaddr = 0; + uintptr_t lowest_rx_vmaddr = UINTPTR_MAX, highest_rx_vmaddr = 0; + uint32_t i, j; + int is_linkedit = 0; + + /* + * Slide (offset/adjust) every segment/section of every kext contained + * in this MH_FILESET mach-o. + */ + lc = (struct load_command *)((uintptr_t)kc_mh + sizeof(*kc_mh)); + for (i = 0; i < kc_mh->ncmds; i++, + lc = (struct load_command *)((uintptr_t)lc + lc->cmdsize)) { + if (lc->cmd == LC_FILESET_ENTRY) { + fse = (struct fileset_entry_command *)(uintptr_t)lc; + /* + * The fileset_entry contains a pointer to the mach-o + * of a kext (or the kernel). Slide/adjust this command, and + * then slide/adjust all the sub-commands in the mach-o. + */ + if (LogFixups) { + dyldLogFunc("[MH] sliding %s", (char *)((uintptr_t)fse + + (uintptr_t)(fse->entry_id.offset))); + } + mh = (struct mach_header_64 *)((uintptr_t)fse->vmaddr + adj); + if (!pageable) { + /* + * Do not adjust mach headers of entries in pageable KC as that + * would pull those pages in prematurely + */ + kernel_collection_adjust_fileset_entry_addrs(mh, adj); + } + fse->vmaddr += adj; + } else if (lc->cmd == LC_SEGMENT_64) { + /* + * Slide/adjust all LC_SEGMENT_64 commands in the fileset + * (and any sections in those segments) + */ + seg = (struct segment_command_64 *)(uintptr_t)lc; + seg->vmaddr += adj; + sec = (struct section_64 *)((uintptr_t)seg + sizeof(*seg)); + for (j = 0; j < seg->nsects; j++, sec++) { + sec->addr += adj; + } + if (seg->vmsize == 0) { + continue; + } + /* + * Record vmaddr range covered by all non-empty segments in the + * kernel collection. + */ + if (seg->vmaddr < lowest_vmaddr) { + lowest_vmaddr = (uintptr_t)seg->vmaddr; + } + + is_linkedit = strings_are_equal(seg->segname, "__LINKEDIT"); + + if (seg->vmaddr + seg->vmsize > highest_vmaddr) { + highest_vmaddr = (uintptr_t)seg->vmaddr + (uintptr_t)seg->vmsize; + if (!is_linkedit) { + highest_nle_vmaddr = highest_vmaddr; + } + } + + if ((seg->maxprot & VM_PROT_WRITE) || is_linkedit) { + continue; + } + /* + * Record vmaddr range covered by non-empty read-only segments + * in the kernel collection (excluding LINKEDIT). + */ + if (seg->vmaddr < lowest_ro_vmaddr) { + lowest_ro_vmaddr = (uintptr_t)seg->vmaddr; + } + if (seg->vmaddr + seg->vmsize > highest_ro_vmaddr) { + highest_ro_vmaddr = (uintptr_t)seg->vmaddr + (uintptr_t)seg->vmsize; + } + + if (!(seg->maxprot & VM_PROT_EXECUTE)) { + continue; + } + /* + * Record vmaddr range covered by contiguous execute segments + * in the kernel collection. + */ + if (seg->vmaddr < lowest_rx_vmaddr && (lowest_rx_vmaddr <= seg->vmaddr + seg->vmsize || lowest_rx_vmaddr == UINTPTR_MAX)) { + lowest_rx_vmaddr = (uintptr_t)seg->vmaddr; + } + if (seg->vmaddr + seg->vmsize > highest_rx_vmaddr && (highest_rx_vmaddr >= seg->vmaddr || highest_rx_vmaddr == 0)) { + highest_rx_vmaddr = (uintptr_t)seg->vmaddr + (uintptr_t)seg->vmsize; + } + } + } + if (kc_lowest_vmaddr) { + *kc_lowest_vmaddr = lowest_vmaddr; + } + if (kc_highest_vmaddr) { + *kc_highest_vmaddr = highest_vmaddr; + } + if (kc_lowest_ro_vmaddr) { + *kc_lowest_ro_vmaddr = lowest_ro_vmaddr; + } + if (kc_highest_ro_vmaddr) { + *kc_highest_ro_vmaddr = highest_ro_vmaddr; + } + if (kc_lowest_rx_vmaddr) { + *kc_lowest_rx_vmaddr = lowest_rx_vmaddr; + } + if (kc_highest_rx_vmaddr) { + *kc_highest_rx_vmaddr = highest_rx_vmaddr; + } + if (kc_highest_nle_vmaddr) { + *kc_highest_nle_vmaddr = highest_nle_vmaddr; + } +} + +/* + * Rebaser functions for the traditional arm64e static kernelcache with + * threaded rebase. + */ + +static void +rebase_chain(uintptr_t chainStartAddress, uint64_t stepMultiplier, uintptr_t baseAddress __unused, uint64_t slide) +{ + uint64_t delta = 0; + uintptr_t address = chainStartAddress; + do { + uint64_t value = *(uint64_t*)address; + +#if HAS_APPLE_PAC + uint16_t diversity = (uint16_t)(value >> 32); + bool hasAddressDiversity = (value & (1ULL << 48)) != 0; + ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3); +#endif + bool isAuthenticated = (value & (1ULL << 63)) != 0; + bool isRebase = (value & (1ULL << 62)) == 0; + if (isRebase) { + if (isAuthenticated) { + // The new value for a rebase is the low 32-bits of the threaded value plus the slide. + uint64_t newValue = (value & 0xFFFFFFFF) + slide; + // Add in the offset from the mach_header + newValue += baseAddress; +#if HAS_APPLE_PAC + // We have bits to merge in to the discriminator + uintptr_t discriminator = diversity; + if (hasAddressDiversity) { + // First calculate a new discriminator using the address of where we are trying to store the value + // Only blend if we have a discriminator + if (discriminator) { + discriminator = __builtin_ptrauth_blend_discriminator((void*)address, discriminator); + } else { + discriminator = address; + } + } + switch (key) { + case ptrauth_key_asia: + newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asia, discriminator); + break; + case ptrauth_key_asib: + newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asib, discriminator); + break; + case ptrauth_key_asda: + newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asda, discriminator); + break; + case ptrauth_key_asdb: + newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asdb, discriminator); + break; + } +#endif + *(uint64_t*)address = newValue; + } else { + // Regular pointer which needs to fit in 51-bits of value. + // C++ RTTI uses the top bit, so we'll allow the whole top-byte + // and the bottom 43-bits to be fit in to 51-bits. + uint64_t top8Bits = value & 0x0007F80000000000ULL; + uint64_t bottom43Bits = value & 0x000007FFFFFFFFFFULL; + uint64_t targetValue = (top8Bits << 13) | (((intptr_t)(bottom43Bits << 21) >> 21) & 0x00FFFFFFFFFFFFFF); + targetValue = targetValue + slide; + *(uint64_t*)address = targetValue; + } + } + + // The delta is bits [51..61] + // And bit 62 is to tell us if we are a rebase (0) or bind (1) + value &= ~(1ULL << 62); + delta = (value & 0x3FF8000000000000) >> 51; + address += delta * stepMultiplier; + } while (delta != 0); +} + +static bool __unused +rebase_threaded_starts(uint32_t *threadArrayStart, uint32_t *threadArrayEnd, + uintptr_t macho_header_addr, uintptr_t macho_header_vmaddr, size_t slide) +{ + uint32_t threadStartsHeader = *threadArrayStart; + uint64_t stepMultiplier = (threadStartsHeader & 1) == 1 ? 8 : 4; + for (uint32_t* threadOffset = threadArrayStart + 1; threadOffset != threadArrayEnd; ++threadOffset) { + if (*threadOffset == 0xFFFFFFFF) { + break; + } + rebase_chain(macho_header_addr + *threadOffset, stepMultiplier, macho_header_vmaddr, slide); + } + return true; +} diff --git a/osfmk/mach/host_priv.defs b/osfmk/mach/host_priv.defs index 83f601b5a..f04f5969b 100644 --- a/osfmk/mach/host_priv.defs +++ b/osfmk/mach/host_priv.defs @@ -215,7 +215,12 @@ routine kmod_control( * examples include the master device port. * There are a limited number of slots available for system servers. */ -routine host_get_special_port( +routine +#if KERNEL_SERVER && MACH_KERNEL_PRIVATE +host_get_special_port_from_user( +#else +host_get_special_port( +#endif host_priv : host_priv_t; node : int; which : int; @@ -225,7 +230,12 @@ routine host_get_special_port( * Set a given special port for the local node. * See host_get_special_port. */ -routine host_set_special_port( +routine +#if KERNEL_SERVER && MACH_KERNEL_PRIVATE +host_set_special_port_from_user( +#else +host_set_special_port( +#endif host_priv : host_priv_t; which : int; port : mach_port_t); diff --git a/osfmk/mach/i386/_structs.h b/osfmk/mach/i386/_structs.h index c9cc8992c..2c79050b6 100644 --- a/osfmk/mach/i386/_structs.h +++ b/osfmk/mach/i386/_structs.h @@ -603,7 +603,48 @@ _STRUCT_X86_DEBUG_STATE32 unsigned int __dr6; unsigned int __dr7; }; + +#define _STRUCT_X86_INSTRUCTION_STATE struct __x86_instruction_state +_STRUCT_X86_INSTRUCTION_STATE +{ + int __insn_stream_valid_bytes; + int __insn_offset; + int __out_of_synch; /* + * non-zero when the cacheline that includes the insn_offset + * is replaced in the insn_bytes array due to a mismatch + * detected when comparing it with the same cacheline in memory + */ +#define _X86_INSTRUCTION_STATE_MAX_INSN_BYTES (2448 - 64 - 4) + __uint8_t __insn_bytes[_X86_INSTRUCTION_STATE_MAX_INSN_BYTES]; +#define _X86_INSTRUCTION_STATE_CACHELINE_SIZE 64 + __uint8_t __insn_cacheline[_X86_INSTRUCTION_STATE_CACHELINE_SIZE]; +}; + +#define _STRUCT_LAST_BRANCH_RECORD struct __last_branch_record +_STRUCT_LAST_BRANCH_RECORD +{ + __uint64_t __from_ip; + __uint64_t __to_ip; + __uint32_t __mispredict : 1, + __tsx_abort : 1, + __in_tsx : 1, + __cycle_count: 16, + __reserved : 13; +}; + +#define _STRUCT_LAST_BRANCH_STATE struct __last_branch_state +_STRUCT_LAST_BRANCH_STATE +{ + int __lbr_count; + __uint32_t __lbr_supported_tsx : 1, + __lbr_supported_cycle_count : 1, + __reserved : 30; +#define __LASTBRANCH_MAX 32 + _STRUCT_LAST_BRANCH_RECORD __lbrs[__LASTBRANCH_MAX]; +}; + #else /* !__DARWIN_UNIX03 */ + #define _STRUCT_X86_DEBUG_STATE32 struct x86_debug_state32 _STRUCT_X86_DEBUG_STATE32 { @@ -616,6 +657,45 @@ _STRUCT_X86_DEBUG_STATE32 unsigned int dr6; unsigned int dr7; }; + +#define _STRUCT_X86_INSTRUCTION_STATE struct __x86_instruction_state +_STRUCT_X86_INSTRUCTION_STATE +{ + int insn_stream_valid_bytes; + int insn_offset; + int out_of_synch; /* + * non-zero when the cacheline that includes the insn_offset + * is replaced in the insn_bytes array due to a mismatch + * detected when comparing it with the same cacheline in memory + */ +#define x86_INSTRUCTION_STATE_MAX_INSN_BYTES (2448 - 64 - 4) + __uint8_t insn_bytes[x86_INSTRUCTION_STATE_MAX_INSN_BYTES]; +#define x86_INSTRUCTION_STATE_CACHELINE_SIZE 64 + __uint8_t insn_cacheline[x86_INSTRUCTION_STATE_CACHELINE_SIZE]; +}; + +#define _STRUCT_LAST_BRANCH_RECORD struct __last_branch_record +_STRUCT_LAST_BRANCH_RECORD +{ + __uint64_t from_ip; + __uint64_t to_ip; + __uint32_t mispredict : 1, + tsx_abort : 1, + in_tsx : 1, + cycle_count: 16, + reserved : 13; +}; + +#define _STRUCT_LAST_BRANCH_STATE struct __last_branch_state +_STRUCT_LAST_BRANCH_STATE +{ + int lbr_count; + __uint32_t lbr_supported_tsx : 1, + lbr_supported_cycle_count : 1, + reserved : 30; +#define __LASTBRANCH_MAX 32 + _STRUCT_LAST_BRANCH_RECORD lbrs[__LASTBRANCH_MAX]; +}; #endif /* !__DARWIN_UNIX03 */ #define _STRUCT_X86_PAGEIN_STATE struct __x86_pagein_state diff --git a/osfmk/mach/i386/sdt_isa.h b/osfmk/mach/i386/sdt_isa.h index 961d9f638..3c2754db0 100644 --- a/osfmk/mach/i386/sdt_isa.h +++ b/osfmk/mach/i386/sdt_isa.h @@ -25,7 +25,7 @@ */ #ifndef _MACH_I386_SDT_ISA_H -#define _MACH_I386_SDT_ISA_H +#define _MACH_I386_SDT_ISA_H /* * Only define when testing. This makes the calls into actual calls to @@ -40,174 +40,203 @@ * For the kernel, set an explicit global label so the symbol can be located */ #ifdef __x86_64__ -#define DTRACE_LAB(p, n) \ - "__dtrace_probe$" DTRACE_TOSTRING(%=__LINE__) DTRACE_STRINGIFY(_##p##___##n) - -#define DTRACE_LABEL(p, n) \ - ".section __DATA, __data\n\t" \ - ".globl " DTRACE_LAB(p, n) "\n\t" \ - DTRACE_LAB(p, n) ":" ".quad 1f""\n\t" \ - ".text" "\n\t" \ - "1:" + +#define DTRACE_LABEL(p, n) \ + ".pushsection __DATA, __sdt_cstring, cstring_literals\n\t" \ + "1: .ascii \"" DTRACE_STRINGIFY(p##___) "\\0\"\n\t" \ + "2: .ascii \"" DTRACE_STRINGIFY(n) "\\0\"\n\t" \ + ".popsection" "\n\t" \ + ".pushsection __DATA, __sdt, regular, live_support\n\t" \ + ".p2align 3\n\t" \ + "l3_%=:\n\t" \ + ".quad 4f" "\n\t" \ + ".quad 1b" "\n\t" \ + ".quad 2b" "\n\t" \ + ".popsection" "\n\t" \ + "4:" #else -#define DTRACE_LAB(p, n) \ - "__dtrace_probe$" DTRACE_TOSTRING(%=__LINE__) DTRACE_STRINGIFY(_##p##___##n) - -#define DTRACE_LABEL(p, n) \ - ".section __DATA, __data\n\t" \ - ".globl " DTRACE_LAB(p, n) "\n\t" \ - DTRACE_LAB(p, n) ":" ".long 1f""\n\t" \ - ".text" "\n\t" \ - "1:" + +#define DTRACE_LABEL(p, n) \ + ".pushsection __DATA, __sdt_cstring, cstring_literals\n\t" \ + "1: .ascii \"" DATA_STRINGIFY(p##___) "\\0\"\n\t" \ + "2: .ascii \"" DATA_STRINGIFY(n) "\\0\"\n\t" \ + ".popsection" "\n\t" \ + ".pushsection __DATA, __sdt, regular, live_support\n\t" \ + ".p2align 3\n\t" \ + "l3_%=:\n\t" \ + ".long 4f""\n\t" \ + ".long 1b""\n\t" \ + ".long 2b""\n\t" \ + ".popsection" "\n\t" \ + "4:" #endif -#else /* !KERNEL */ -#define DTRACE_LABEL(p, n) \ +#else /* !KERNEL */ +#define DTRACE_LABEL(p, n) \ "__dtrace_probe$" DTRACE_TOSTRING(%=__LINE__) DTRACE_STRINGIFY(_##p##___##n) ":" "\n\t" -#endif /* !KERNEL */ +#endif /* !KERNEL */ #ifdef DTRACE_CALL_TEST -#define DTRACE_CALL(p,n) \ - DTRACE_LABEL(p,n) \ +#define DTRACE_CALL(p, n) \ + DTRACE_LABEL(p,n) \ DTRACE_CALL_INSN(p,n) #else -#define DTRACE_CALL(p,n) \ - DTRACE_LABEL(p,n) \ +#define DTRACE_CALL(p, n) \ + DTRACE_LABEL(p,n) \ DTRACE_NOPS #endif #ifdef __x86_64__ -#define DTRACE_NOPS \ - "nop" "\n\t" \ - "nop" "\n\t" \ - "nop" "\n\t" +#define DTRACE_NOPS \ + "nop" "\n\t" \ + "nop" "\n\t" \ + "nop" "\n\t" -#define DTRACE_CALL_INSN(p,n) \ +#define DTRACE_CALL_INSN(p, n) \ "call _dtracetest" DTRACE_STRINGIFY(_##p##_##n) "\n\t" -#define ARG1_EXTENT 1 -#define ARGS2_EXTENT 2 -#define ARGS3_EXTENT 3 -#define ARGS4_EXTENT 4 -#define ARGS5_EXTENT 5 -#define ARGS6_EXTENT 6 -#define ARGS7_EXTENT 7 -#define ARGS8_EXTENT 8 -#define ARGS9_EXTENT 9 -#define ARGS10_EXTENT 10 - -#define DTRACE_CALL0ARGS(provider, name) \ - asm volatile ( \ - DTRACE_CALL(provider, name) \ - : \ - : \ +#define ARG1_EXTENT 1 +#define ARGS2_EXTENT 2 +#define ARGS3_EXTENT 3 +#define ARGS4_EXTENT 4 +#define ARGS5_EXTENT 5 +#define ARGS6_EXTENT 6 +#define ARGS7_EXTENT 7 +#define ARGS8_EXTENT 8 +#define ARGS9_EXTENT 9 +#define ARGS10_EXTENT 10 + +#define DTRACE_CALL0ARGS(provider, name) \ + asm volatile ( \ + DTRACE_CALL(provider, name) \ + : \ + : \ + ); + +#define DTRACE_CALL1ARG(provider, name) \ + asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "rdi" \ ); -#define DTRACE_CALL1ARG(provider, name) \ - asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "rdi" \ +#define DTRACE_CALL2ARGS(provider, name) \ + asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ + "movq\t0x8(%0),%%rsi" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "rdi", "rsi" \ ); -#define DTRACE_CALL2ARGS(provider, name) \ - asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ - "movq\t0x8(%0),%%rsi" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "rdi", "rsi" \ +#define DTRACE_CALL3ARGS(provider, name) \ + asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ + "movq\t0x8(%0),%%rsi" "\n\t" \ + "movq\t0x10(%0),%%rdx" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "rdi", "rsi", "rdx" \ ); -#define DTRACE_CALL3ARGS(provider, name) \ - asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ - "movq\t0x8(%0),%%rsi" "\n\t" \ - "movq\t0x10(%0),%%rdx" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "rdi", "rsi", "rdx" \ +#define DTRACE_CALL4ARGS(provider, name) \ + asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ + "movq\t0x8(%0),%%rsi" "\n\t" \ + "movq\t0x10(%0),%%rdx" "\n\t" \ + "movq\t0x18(%0),%%rcx" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "rdi", "rsi", "rdx", "rcx" \ ); -#define DTRACE_CALL4ARGS(provider, name) \ - asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ - "movq\t0x8(%0),%%rsi" "\n\t" \ - "movq\t0x10(%0),%%rdx" "\n\t" \ - "movq\t0x18(%0),%%rcx" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "rdi", "rsi", "rdx", "rcx" \ +#define DTRACE_CALL5ARGS(provider, name) \ + asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ + "movq\t0x8(%0),%%rsi" "\n\t" \ + "movq\t0x10(%0),%%rdx" "\n\t" \ + "movq\t0x18(%0),%%rcx" "\n\t" \ + "movq\t0x20(%0),%%r8" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "rdi", "rsi", "rdx", "rcx", "r8" \ ); -#define DTRACE_CALL5ARGS(provider, name) \ - asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ - "movq\t0x8(%0),%%rsi" "\n\t" \ - "movq\t0x10(%0),%%rdx" "\n\t" \ - "movq\t0x18(%0),%%rcx" "\n\t" \ - "movq\t0x20(%0),%%r8" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "rdi", "rsi", "rdx", "rcx", "r8" \ +#define DTRACE_CALL6ARGS(provider, name) \ + asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ + "movq\t0x8(%0),%%rsi" "\n\t" \ + "movq\t0x10(%0),%%rdx" "\n\t" \ + "movq\t0x18(%0),%%rcx" "\n\t" \ + "movq\t0x20(%0),%%r8" "\n\t" \ + "movq\t0x28(%0),%%r9" "\n\t" \ + DTRACE_CALL(provider, name) \ + : \ + : "r" (__dtrace_args) \ + : "memory", "rdi", "rsi", "rdx", "rcx", "r8", "r9" \ ); -#define DTRACE_CALL6ARGS(provider, name) \ - asm volatile ("movq\t0x0(%0),%%rdi" "\n\t" \ - "movq\t0x8(%0),%%rsi" "\n\t" \ - "movq\t0x10(%0),%%rdx" "\n\t" \ - "movq\t0x18(%0),%%rcx" "\n\t" \ - "movq\t0x20(%0),%%r8" "\n\t" \ - "movq\t0x28(%0),%%r9" "\n\t" \ - DTRACE_CALL(provider, name) \ - : \ - : "r" (__dtrace_args) \ - : "memory", "rdi", "rsi", "rdx", "rcx", "r8", "r9" \ +#define DTRACE_CALL7ARGS(provider, name) \ + asm volatile ("subq\t$0x8,%%rsp" "\n\t" \ + "movq\t0x0(%0),%%rdi" "\n\t" \ + "movq\t0x8(%0),%%rsi" "\n\t" \ + "movq\t0x10(%0),%%rdx" "\n\t" \ + "movq\t0x18(%0),%%rcx" "\n\t" \ + "movq\t0x20(%0),%%r8" "\n\t" \ + "movq\t0x28(%0),%%r9" "\n\t" \ + "movq\t0x30(%0),%%rax" "\n\t" \ + "movq\t%%rax,0x0(%%rsp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addq\t$0x8,%%rsp" "\n\t" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "rdi", "rsi", "rdx", "rcx", "r8", "r9", "rax" \ ); -#define DTRACE_CALL7ARGS(provider, name) \ - asm volatile ("subq\t$0x8,%%rsp" "\n\t" \ - "movq\t0x0(%0),%%rdi" "\n\t" \ - "movq\t0x8(%0),%%rsi" "\n\t" \ - "movq\t0x10(%0),%%rdx" "\n\t" \ - "movq\t0x18(%0),%%rcx" "\n\t" \ - "movq\t0x20(%0),%%r8" "\n\t" \ - "movq\t0x28(%0),%%r9" "\n\t" \ - "movq\t0x30(%0),%%rax" "\n\t" \ - "movq\t%%rax,0x0(%%rsp)" "\n\t" \ - DTRACE_CALL(provider, name) \ - "addq\t$0x8,%%rsp" "\n\t" \ - : \ - : "r" (__dtrace_args) \ - : "memory", "rdi", "rsi", "rdx", "rcx", "r8", "r9", "rax" \ +#define DTRACE_CALL8ARGS(provider, name) \ + asm volatile ("subq\t$0x10,%%rsp" "\n\t" \ + "movq\t0x0(%0),%%rdi" "\n\t" \ + "movq\t0x8(%0),%%rsi" "\n\t" \ + "movq\t0x10(%0),%%rdx" "\n\t" \ + "movq\t0x18(%0),%%rcx" "\n\t" \ + "movq\t0x20(%0),%%r8" "\n\t" \ + "movq\t0x28(%0),%%r9" "\n\t" \ + "movq\t0x30(%0),%%rax" "\n\t" \ + "movq\t%%rax,0x0(%%rsp)" "\n\t" \ + "movq\t0x38(%0),%%rax" "\n\t" \ + "movq\t%%rax,0x8(%%rsp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addq\t$0x10,%%rsp" "\n\t" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "rdi", "rsi", "rdx", "rcx", "r8", "r9", "rax" \ ); #endif // __x86_64__ #ifdef __i386__ -#define DTRACE_NOPS \ - "nop" "\n\t" \ - "leal 0(%%esi), %%esi" "\n\t" +#define DTRACE_NOPS \ + "nop" "\n\t" \ + "leal 0(%%esi), %%esi" "\n\t" -#define DTRACE_CALL_INSN(p,n) \ +#define DTRACE_CALL_INSN(p, n) \ "call _dtracetest" DTRACE_STRINGIFY(_##p##_##n) "\n\t" -#define ARG1_EXTENT 1 -#define ARGS2_EXTENT 2 -#define ARGS3_EXTENT 4 -#define ARGS4_EXTENT 4 -#define ARGS5_EXTENT 8 -#define ARGS6_EXTENT 8 -#define ARGS7_EXTENT 8 -#define ARGS8_EXTENT 8 -#define ARGS9_EXTENT 12 -#define ARGS10_EXTENT 12 +#define ARG1_EXTENT 1 +#define ARGS2_EXTENT 2 +#define ARGS3_EXTENT 4 +#define ARGS4_EXTENT 4 +#define ARGS5_EXTENT 8 +#define ARGS6_EXTENT 8 +#define ARGS7_EXTENT 8 +#define ARGS8_EXTENT 8 +#define ARGS9_EXTENT 12 +#define ARGS10_EXTENT 12 /* * Because this code is used in the kernel, we must not touch any floating point @@ -228,214 +257,214 @@ * The end result is that we only pipeline two loads/stores at a time. Blech. */ -#define DTRACE_CALL0ARGS(provider, name) \ - asm volatile ( \ - DTRACE_CALL(provider, name) \ - "# eat trailing nl +tabfrom DTRACE_CALL" \ - : \ - : \ +#define DTRACE_CALL0ARGS(provider, name) \ + asm volatile ( \ + DTRACE_CALL(provider, name) \ + "# eat trailing nl +tabfrom DTRACE_CALL" \ + : \ + : \ ); -#define DTRACE_CALL1ARG(provider, name) \ - asm volatile ("subl\t$0x10,%%esp" "\n\t" \ - "movl\t0x0(%0),%%eax" "\n\t" \ - "movl\t%%eax,0x0(%%esp)" "\n\t" \ - DTRACE_CALL(provider, name) \ - "addl\t$0x10,%%esp" \ - : \ - : "r" (__dtrace_args) \ - : "memory", "eax" \ +#define DTRACE_CALL1ARG(provider, name) \ + asm volatile ("subl\t$0x10,%%esp" "\n\t" \ + "movl\t0x0(%0),%%eax" "\n\t" \ + "movl\t%%eax,0x0(%%esp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addl\t$0x10,%%esp" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "eax" \ ); -#define DTRACE_CALL2ARGS(provider, name) \ - asm volatile ("subl\t$0x10,%%esp" "\n\t" \ - "movl\t0x0(%0),%%eax" "\n\t" \ - "movl\t0x4(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x0(%%esp)" "\n\t" \ - "movl\t%%edx,0x4(%%esp)" "\n\t" \ - DTRACE_CALL(provider, name) \ - "addl\t$0x10,%%esp" \ - : \ - : "r" (__dtrace_args) \ - : "memory", "eax", "edx" \ +#define DTRACE_CALL2ARGS(provider, name) \ + asm volatile ("subl\t$0x10,%%esp" "\n\t" \ + "movl\t0x0(%0),%%eax" "\n\t" \ + "movl\t0x4(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x0(%%esp)" "\n\t" \ + "movl\t%%edx,0x4(%%esp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addl\t$0x10,%%esp" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "eax", "edx" \ ); -#define DTRACE_CALL3ARGS(provider, name) \ - asm volatile ("subl\t$0x10,%%esp" "\n\t" \ - "movl\t0x0(%0),%%eax" "\n\t" \ - "movl\t0x4(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x0(%%esp)" "\n\t" \ - "movl\t%%edx,0x4(%%esp)" "\n\t" \ - "movl\t0x8(%0),%%eax" "\n\t" \ - "movl\t%%eax,0x8(%%esp)" "\n\t" \ - DTRACE_CALL(provider, name) \ - "addl\t$0x10,%%esp" \ - : \ - : "r" (__dtrace_args) \ - : "memory", "eax", "edx" \ +#define DTRACE_CALL3ARGS(provider, name) \ + asm volatile ("subl\t$0x10,%%esp" "\n\t" \ + "movl\t0x0(%0),%%eax" "\n\t" \ + "movl\t0x4(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x0(%%esp)" "\n\t" \ + "movl\t%%edx,0x4(%%esp)" "\n\t" \ + "movl\t0x8(%0),%%eax" "\n\t" \ + "movl\t%%eax,0x8(%%esp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addl\t$0x10,%%esp" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "eax", "edx" \ ); -#define DTRACE_CALL4ARGS(provider, name) \ - asm volatile ("subl\t$0x10,%%esp" "\n\t" \ - "movl\t0x0(%0),%%eax" "\n\t" \ - "movl\t0x4(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x0(%%esp)" "\n\t" \ - "movl\t%%edx,0x4(%%esp)" "\n\t" \ - "movl\t0x8(%0),%%eax" "\n\t" \ - "movl\t0xC(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x8(%%esp)" "\n\t" \ - "movl\t%%edx,0xC(%%esp)" "\n\t" \ - DTRACE_CALL(provider, name) \ - "addl\t$0x10,%%esp" \ - : \ - : "r" (__dtrace_args) \ - : "memory", "eax", "edx" \ +#define DTRACE_CALL4ARGS(provider, name) \ + asm volatile ("subl\t$0x10,%%esp" "\n\t" \ + "movl\t0x0(%0),%%eax" "\n\t" \ + "movl\t0x4(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x0(%%esp)" "\n\t" \ + "movl\t%%edx,0x4(%%esp)" "\n\t" \ + "movl\t0x8(%0),%%eax" "\n\t" \ + "movl\t0xC(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x8(%%esp)" "\n\t" \ + "movl\t%%edx,0xC(%%esp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addl\t$0x10,%%esp" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "eax", "edx" \ ); -#define DTRACE_CALL5ARGS(provider, name) \ - asm volatile ("subl\t$0x20,%%esp" "\n\t" \ - "movl\t0x0(%0),%%eax" "\n\t" \ - "movl\t0x4(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x0(%%esp)" "\n\t" \ - "movl\t%%edx,0x4(%%esp)" "\n\t" \ - "movl\t0x8(%0),%%eax" "\n\t" \ - "movl\t0xC(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x8(%%esp)" "\n\t" \ - "movl\t%%edx,0xC(%%esp)" "\n\t" \ - "movl\t0x10(%0),%%eax" "\n\t" \ - "movl\t%%eax,0x10(%%esp)" "\n\t" \ - DTRACE_CALL(provider, name) \ - "addl\t$0x20,%%esp" \ - : \ - : "r" (__dtrace_args) \ - : "memory", "eax", "edx" \ +#define DTRACE_CALL5ARGS(provider, name) \ + asm volatile ("subl\t$0x20,%%esp" "\n\t" \ + "movl\t0x0(%0),%%eax" "\n\t" \ + "movl\t0x4(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x0(%%esp)" "\n\t" \ + "movl\t%%edx,0x4(%%esp)" "\n\t" \ + "movl\t0x8(%0),%%eax" "\n\t" \ + "movl\t0xC(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x8(%%esp)" "\n\t" \ + "movl\t%%edx,0xC(%%esp)" "\n\t" \ + "movl\t0x10(%0),%%eax" "\n\t" \ + "movl\t%%eax,0x10(%%esp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addl\t$0x20,%%esp" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "eax", "edx" \ ); -#define DTRACE_CALL6ARGS(provider, name) \ - asm volatile ("subl\t$0x20,%%esp" "\n\t" \ - "movl\t0x0(%0),%%eax" "\n\t" \ - "movl\t0x4(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x0(%%esp)" "\n\t" \ - "movl\t%%edx,0x4(%%esp)" "\n\t" \ - "movl\t0x8(%0),%%eax" "\n\t" \ - "movl\t0xC(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x8(%%esp)" "\n\t" \ - "movl\t%%edx,0xC(%%esp)" "\n\t" \ - "movl\t0x10(%0),%%eax" "\n\t" \ - "movl\t0x14(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x10(%%esp)" "\n\t" \ - "movl\t%%edx,0x14(%%esp)" "\n\t" \ - DTRACE_CALL(provider, name) \ - "addl\t$0x20,%%esp" \ - : \ - : "r" (__dtrace_args) \ - : "memory", "eax", "edx" \ +#define DTRACE_CALL6ARGS(provider, name) \ + asm volatile ("subl\t$0x20,%%esp" "\n\t" \ + "movl\t0x0(%0),%%eax" "\n\t" \ + "movl\t0x4(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x0(%%esp)" "\n\t" \ + "movl\t%%edx,0x4(%%esp)" "\n\t" \ + "movl\t0x8(%0),%%eax" "\n\t" \ + "movl\t0xC(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x8(%%esp)" "\n\t" \ + "movl\t%%edx,0xC(%%esp)" "\n\t" \ + "movl\t0x10(%0),%%eax" "\n\t" \ + "movl\t0x14(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x10(%%esp)" "\n\t" \ + "movl\t%%edx,0x14(%%esp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addl\t$0x20,%%esp" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "eax", "edx" \ ); -#define DTRACE_CALL7ARGS(provider, name) \ - asm volatile ("subl\t$0x20,%%esp" "\n\t" \ - "movl\t0x0(%0),%%eax" "\n\t" \ - "movl\t0x4(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x0(%%esp)" "\n\t" \ - "movl\t%%edx,0x4(%%esp)" "\n\t" \ - "movl\t0x8(%0),%%eax" "\n\t" \ - "movl\t0xC(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x8(%%esp)" "\n\t" \ - "movl\t%%edx,0xC(%%esp)" "\n\t" \ - "movl\t0x10(%0),%%eax" "\n\t" \ - "movl\t0x14(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x10(%%esp)" "\n\t" \ - "movl\t%%edx,0x14(%%esp)" "\n\t" \ - "movl\t0x18(%0),%%eax" "\n\t" \ - "movl\t%%eax,0x18(%%esp)" "\n\t" \ - DTRACE_CALL(provider, name) \ - "addl\t$0x20,%%esp" \ - : \ - : "r" (__dtrace_args) \ - : "memory", "eax", "edx" \ +#define DTRACE_CALL7ARGS(provider, name) \ + asm volatile ("subl\t$0x20,%%esp" "\n\t" \ + "movl\t0x0(%0),%%eax" "\n\t" \ + "movl\t0x4(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x0(%%esp)" "\n\t" \ + "movl\t%%edx,0x4(%%esp)" "\n\t" \ + "movl\t0x8(%0),%%eax" "\n\t" \ + "movl\t0xC(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x8(%%esp)" "\n\t" \ + "movl\t%%edx,0xC(%%esp)" "\n\t" \ + "movl\t0x10(%0),%%eax" "\n\t" \ + "movl\t0x14(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x10(%%esp)" "\n\t" \ + "movl\t%%edx,0x14(%%esp)" "\n\t" \ + "movl\t0x18(%0),%%eax" "\n\t" \ + "movl\t%%eax,0x18(%%esp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addl\t$0x20,%%esp" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "eax", "edx" \ ); -#define DTRACE_CALL8ARGS(provider, name) \ - asm volatile ("subl\t$0x20,%%esp" "\n\t" \ - "movl\t0x0(%0),%%eax" "\n\t" \ - "movl\t0x4(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x0(%%esp)" "\n\t" \ - "movl\t%%edx,0x4(%%esp)" "\n\t" \ - "movl\t0x8(%0),%%eax" "\n\t" \ - "movl\t0xC(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x8(%%esp)" "\n\t" \ - "movl\t%%edx,0xC(%%esp)" "\n\t" \ - "movl\t0x10(%0),%%eax" "\n\t" \ - "movl\t0x14(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x10(%%esp)" "\n\t" \ - "movl\t%%edx,0x14(%%esp)" "\n\t" \ - "movl\t0x18(%0),%%eax" "\n\t" \ - "movl\t0x1C(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x18(%%esp)" "\n\t" \ - "movl\t%%edx,0x1C(%%esp)" "\n\t" \ - DTRACE_CALL(provider, name) \ - "addl\t$0x20,%%esp" \ - : \ - : "r" (__dtrace_args) \ - : "memory", "eax", "edx" \ +#define DTRACE_CALL8ARGS(provider, name) \ + asm volatile ("subl\t$0x20,%%esp" "\n\t" \ + "movl\t0x0(%0),%%eax" "\n\t" \ + "movl\t0x4(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x0(%%esp)" "\n\t" \ + "movl\t%%edx,0x4(%%esp)" "\n\t" \ + "movl\t0x8(%0),%%eax" "\n\t" \ + "movl\t0xC(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x8(%%esp)" "\n\t" \ + "movl\t%%edx,0xC(%%esp)" "\n\t" \ + "movl\t0x10(%0),%%eax" "\n\t" \ + "movl\t0x14(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x10(%%esp)" "\n\t" \ + "movl\t%%edx,0x14(%%esp)" "\n\t" \ + "movl\t0x18(%0),%%eax" "\n\t" \ + "movl\t0x1C(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x18(%%esp)" "\n\t" \ + "movl\t%%edx,0x1C(%%esp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addl\t$0x20,%%esp" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "eax", "edx" \ ); -#define DTRACE_CALL9ARGS(provider, name) \ - asm volatile ("subl\t$0x30,%%esp" "\n\t" \ - "movl\t0x0(%0),%%eax" "\n\t" \ - "movl\t0x4(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x0(%%esp)" "\n\t" \ - "movl\t%%edx,0x4(%%esp)" "\n\t" \ - "movl\t0x8(%0),%%eax" "\n\t" \ - "movl\t0xC(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x8(%%esp)" "\n\t" \ - "movl\t%%edx,0xC(%%esp)" "\n\t" \ - "movl\t0x10(%0),%%eax" "\n\t" \ - "movl\t0x14(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x10(%%esp)" "\n\t" \ - "movl\t%%edx,0x14(%%esp)" "\n\t" \ - "movl\t0x18(%0),%%eax" "\n\t" \ - "movl\t0x1C(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x18(%%esp)" "\n\t" \ - "movl\t%%edx,0x1C(%%esp)" "\n\t" \ - "movl\t0x20(%0),%%eax" "\n\t" \ - "movl\t%%eax,0x20(%%esp)" "\n\t" \ - DTRACE_CALL(provider, name) \ - "addl\t$0x30,%%esp" \ - : \ - : "r" (__dtrace_args) \ - : "memory", "eax", "edx" \ +#define DTRACE_CALL9ARGS(provider, name) \ + asm volatile ("subl\t$0x30,%%esp" "\n\t" \ + "movl\t0x0(%0),%%eax" "\n\t" \ + "movl\t0x4(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x0(%%esp)" "\n\t" \ + "movl\t%%edx,0x4(%%esp)" "\n\t" \ + "movl\t0x8(%0),%%eax" "\n\t" \ + "movl\t0xC(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x8(%%esp)" "\n\t" \ + "movl\t%%edx,0xC(%%esp)" "\n\t" \ + "movl\t0x10(%0),%%eax" "\n\t" \ + "movl\t0x14(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x10(%%esp)" "\n\t" \ + "movl\t%%edx,0x14(%%esp)" "\n\t" \ + "movl\t0x18(%0),%%eax" "\n\t" \ + "movl\t0x1C(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x18(%%esp)" "\n\t" \ + "movl\t%%edx,0x1C(%%esp)" "\n\t" \ + "movl\t0x20(%0),%%eax" "\n\t" \ + "movl\t%%eax,0x20(%%esp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addl\t$0x30,%%esp" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "eax", "edx" \ ); -#define DTRACE_CALL10ARGS(provider, name) \ - asm volatile ("subl\t$0x30,%%esp" "\n\t" \ - "movl\t0x0(%0),%%eax" "\n\t" \ - "movl\t0x4(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x0(%%esp)" "\n\t" \ - "movl\t%%edx,0x4(%%esp)" "\n\t" \ - "movl\t0x8(%0),%%eax" "\n\t" \ - "movl\t0xC(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x8(%%esp)" "\n\t" \ - "movl\t%%edx,0xC(%%esp)" "\n\t" \ - "movl\t0x10(%0),%%eax" "\n\t" \ - "movl\t0x14(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x10(%%esp)" "\n\t" \ - "movl\t%%edx,0x14(%%esp)" "\n\t" \ - "movl\t0x18(%0),%%eax" "\n\t" \ - "movl\t0x1C(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x18(%%esp)" "\n\t" \ - "movl\t%%edx,0x1C(%%esp)" "\n\t" \ - "movl\t0x20(%0),%%eax" "\n\t" \ - "movl\t0x24(%0),%%edx" "\n\t" \ - "movl\t%%eax,0x20(%%esp)" "\n\t" \ - "movl\t%%edx,0x24(%%esp)" "\n\t" \ - DTRACE_CALL(provider, name) \ - "addl\t$0x30,%%esp" \ - : \ - : "r" (__dtrace_args) \ - : "memory", "eax", "edx" \ +#define DTRACE_CALL10ARGS(provider, name) \ + asm volatile ("subl\t$0x30,%%esp" "\n\t" \ + "movl\t0x0(%0),%%eax" "\n\t" \ + "movl\t0x4(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x0(%%esp)" "\n\t" \ + "movl\t%%edx,0x4(%%esp)" "\n\t" \ + "movl\t0x8(%0),%%eax" "\n\t" \ + "movl\t0xC(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x8(%%esp)" "\n\t" \ + "movl\t%%edx,0xC(%%esp)" "\n\t" \ + "movl\t0x10(%0),%%eax" "\n\t" \ + "movl\t0x14(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x10(%%esp)" "\n\t" \ + "movl\t%%edx,0x14(%%esp)" "\n\t" \ + "movl\t0x18(%0),%%eax" "\n\t" \ + "movl\t0x1C(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x18(%%esp)" "\n\t" \ + "movl\t%%edx,0x1C(%%esp)" "\n\t" \ + "movl\t0x20(%0),%%eax" "\n\t" \ + "movl\t0x24(%0),%%edx" "\n\t" \ + "movl\t%%eax,0x20(%%esp)" "\n\t" \ + "movl\t%%edx,0x24(%%esp)" "\n\t" \ + DTRACE_CALL(provider, name) \ + "addl\t$0x30,%%esp" \ + : \ + : "r" (__dtrace_args) \ + : "memory", "eax", "edx" \ ); #endif // __i386__ -#endif /* _MACH_I386_SDT_ISA_H */ +#endif /* _MACH_I386_SDT_ISA_H */ diff --git a/osfmk/mach/i386/thread_state.h b/osfmk/mach/i386/thread_state.h index 759489dcf..4cd90fa08 100644 --- a/osfmk/mach/i386/thread_state.h +++ b/osfmk/mach/i386/thread_state.h @@ -32,7 +32,7 @@ #ifndef _MACH_I386_THREAD_STATE_H_ #define _MACH_I386_THREAD_STATE_H_ -/* Size of maximum exported thread state in words */ +/* Size of maximum exported thread state in 32-bit words */ #define I386_THREAD_STATE_MAX (614) /* Size of biggest state possible */ #if defined (__i386__) || defined(__x86_64__) diff --git a/osfmk/mach/i386/thread_status.h b/osfmk/mach/i386/thread_status.h index 2744c0be6..da08806a0 100644 --- a/osfmk/mach/i386/thread_status.h +++ b/osfmk/mach/i386/thread_status.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -124,6 +124,8 @@ #define x86_AVX512_STATE (x86_AVX512_STATE32 + 2) #define x86_PAGEIN_STATE 22 #define x86_THREAD_FULL_STATE64 23 +#define x86_INSTRUCTION_STATE 24 +#define x86_LAST_BRANCH_STATE 25 /* * Largest state on this machine: @@ -158,6 +160,8 @@ (x == x86_AVX512_STATE64) || \ (x == x86_AVX512_STATE) || \ (x == x86_PAGEIN_STATE) || \ + (x == x86_INSTRUCTION_STATE) || \ + (x == x86_LAST_BRANCH_STATE) || \ (x == THREAD_STATE_NONE)) struct x86_state_hdr { @@ -262,6 +266,19 @@ typedef _STRUCT_X86_PAGEIN_STATE x86_pagein_state_t; #define X86_PAGEIN_STATE_COUNT x86_PAGEIN_STATE_COUNT +typedef _STRUCT_X86_INSTRUCTION_STATE x86_instruction_state_t; +#define x86_INSTRUCTION_STATE_COUNT \ + ((mach_msg_type_number_t)(sizeof(x86_instruction_state_t) / sizeof(int))) + +#define X86_INSTRUCTION_STATE_COUNT x86_INSTRUCTION_STATE_COUNT + +typedef _STRUCT_LAST_BRANCH_STATE last_branch_state_t; +#define x86_LAST_BRANCH_STATE_COUNT \ + ((mach_msg_type_number_t)(sizeof(last_branch_state_t) / sizeof(int))) + +#define X86_LAST_BRANCH_STATE_COUNT x86_LAST_BRANCH_STATE_COUNT + + /* * Combined thread, float and exception states */ diff --git a/osfmk/mach/i386/vm_param.h b/osfmk/mach/i386/vm_param.h index f140978d7..50f25ab78 100644 --- a/osfmk/mach/i386/vm_param.h +++ b/osfmk/mach/i386/vm_param.h @@ -90,11 +90,18 @@ #ifndef _MACH_I386_VM_PARAM_H_ #define _MACH_I386_VM_PARAM_H_ +#if !defined(KERNEL) && !defined(__ASSEMBLER__) + +#include +#endif + #define BYTE_SIZE 8 /* byte size in bits */ #define I386_PGBYTES 4096 /* bytes per 80386 page */ #define I386_PGSHIFT 12 /* bitshift for pages */ +#if defined(KERNEL) + #define PAGE_SIZE I386_PGBYTES #define PAGE_SHIFT I386_PGSHIFT #define PAGE_MASK (PAGE_SIZE - 1) @@ -131,7 +138,27 @@ ~(I386_PGBYTES-1)) #define i386_trunc_page(x) (((pmap_paddr_t)(x)) & ~(I386_PGBYTES-1)) +#else /* KERNEL */ + +#if !defined(__MAC_OS_X_VERSION_MIN_REQUIRED) || (__MAC_OS_X_VERSION_MIN_REQUIRED < 101600) +#define PAGE_SHIFT I386_PGSHIFT +#define PAGE_SIZE I386_PGBYTES +#define PAGE_MASK (PAGE_SIZE-1) +#else /* !defined(__MAC_OS_X_VERSION_MIN_REQUIRED) || (__MAC_OS_X_VERSION_MIN_REQUIRED < 101600) */ +#define PAGE_SHIFT vm_page_shift +#define PAGE_SIZE vm_page_size +#define PAGE_MASK vm_page_mask +#endif /* !defined(__MAC_OS_X_VERSION_MIN_REQUIRED) || (__MAC_OS_X_VERSION_MIN_REQUIRED < 101600) */ + +#define PAGE_MAX_SHIFT 14 +#define PAGE_MAX_SIZE (1 << PAGE_MAX_SHIFT) +#define PAGE_MAX_MASK (PAGE_MAX_SIZE-1) + +#define PAGE_MIN_SHIFT 12 +#define PAGE_MIN_SIZE (1 << PAGE_MIN_SHIFT) +#define PAGE_MIN_MASK (PAGE_MIN_SIZE-1) +#endif /* KERNEL */ #define VM_MIN_ADDRESS64 ((user_addr_t) 0x0000000000000000ULL) /* @@ -191,6 +218,14 @@ * We can't let VM allocate memory from there. */ +/* + * +-----------------------+--------+--------+------------------------+ + * | 0xffff_ffff_ffff_efff | -4096 | ~512GB | VM_MAX_KERNEL_ADDRESS | + * +-----------------------+--------+--------+------------------------+ + * | 0xffff_ff80_0000_0000 | -512GB | 0GB | VM_MIN_KERNEL_ADDRESS | + * | | | | PMAP_HEAP_RANGE_START | + * +-----------------------+--------+--------+------------------------+ + */ #define KERNEL_IMAGE_TO_PHYS(x) (x) #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 39 @@ -199,14 +234,15 @@ #define VM_MIN_KERNEL_AND_KEXT_ADDRESS (VM_MIN_KERNEL_ADDRESS - 0x80000000ULL) #define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0xFFFFFFFFFFFFEFFFUL) #define VM_MAX_KERNEL_ADDRESS_EFI32 ((vm_offset_t) 0xFFFFFF80FFFFEFFFUL) -#define KEXT_ALLOC_MAX_OFFSET (2 * 1024 * 1024 * 1024UL) -#define KEXT_ALLOC_BASE(x) ((x) - KEXT_ALLOC_MAX_OFFSET) -#define KEXT_ALLOC_SIZE(x) (KEXT_ALLOC_MAX_OFFSET - (x)) +#define KEXT_ALLOC_MAX_OFFSET (2 * 1024 * 1024 * 1024UL) +#define KEXT_ALLOC_BASE(x) ((x) - KEXT_ALLOC_MAX_OFFSET) +#define KEXT_ALLOC_SIZE(x) (KEXT_ALLOC_MAX_OFFSET - (x)) #define VM_KERNEL_STRIP_PTR(_v) (_v) -#define VM_KERNEL_ADDRESS(va) ((((vm_address_t)(va))>=VM_MIN_KERNEL_AND_KEXT_ADDRESS) && \ - (((vm_address_t)(va))<=VM_MAX_KERNEL_ADDRESS)) +#define VM_KERNEL_ADDRESS(va) \ + (((vm_address_t)(va) >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) && \ + ((vm_address_t)(va) <= VM_MAX_KERNEL_ADDRESS)) #define VM_MAP_MIN_ADDRESS MACH_VM_MIN_ADDRESS #define VM_MAP_MAX_ADDRESS MACH_VM_MAX_ADDRESS @@ -221,8 +257,16 @@ # define INTSTACK_SIZE (I386_PGBYTES*4) # define KERNEL_STACK_SIZE (I386_PGBYTES*6) #else +/* + * KERNEL_STACK_MULTIPLIER can be defined externally to get a larger + * kernel stack size. For example, adding "-DKERNEL_STACK_MULTIPLIER=2" + * helps avoid kernel stack overflows when compiling with "-O0". + */ +#ifndef KERNEL_STACK_MULTIPLIER +#define KERNEL_STACK_MULTIPLIER (1) +#endif /* KERNEL_STACK_MULTIPLIER */ # define INTSTACK_SIZE (I386_PGBYTES*4) -# define KERNEL_STACK_SIZE (I386_PGBYTES*4) +# define KERNEL_STACK_SIZE (I386_PGBYTES*4*KERNEL_STACK_MULTIPLIER) #endif #ifdef MACH_KERNEL_PRIVATE @@ -254,9 +298,6 @@ #define VM_MIN_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0xFFFFFF8000000000UL) #define VM_MAX_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0xFFFFFF801FFFFFFFUL) -#define NCOPY_WINDOWS 0 - - /* * Conversion between 80386 pages and VM pages @@ -284,6 +325,11 @@ #define IS_USERADDR64_CANONICAL(addr) \ ((addr) < (VM_MAX_USER_PAGE_ADDRESS)) +/* + * This now limits the physical pages in the zone map + */ +#define ZONE_MAP_MAX (64ULL << 30) /* 64GB */ + #endif /* MACH_KERNEL_PRIVATE */ #endif /* KERNEL_PRIVATE */ diff --git a/osfmk/mach/i386/vm_types.h b/osfmk/mach/i386/vm_types.h index e4442e30d..fe5a71dd4 100644 --- a/osfmk/mach/i386/vm_types.h +++ b/osfmk/mach/i386/vm_types.h @@ -70,7 +70,6 @@ #ifndef ASSEMBLER #include -#include #include /* @@ -133,8 +132,6 @@ typedef mach_vm_address_t mach_port_context_t; #ifdef MACH_KERNEL_PRIVATE -#ifdef VM32_SUPPORT - /* * These are types used internal to Mach to implement the * legacy 32-bit VM APIs published by the kernel. @@ -143,8 +140,6 @@ typedef uint32_t vm32_address_t; typedef uint32_t vm32_offset_t; typedef uint32_t vm32_size_t; -#endif /* VM32_SUPPORT */ - #endif /* MACH_KERNEL_PRIVATE */ #endif /* ASSEMBLER */ diff --git a/osfmk/mach/kern_return.h b/osfmk/mach/kern_return.h index cbc29d937..d32182600 100644 --- a/osfmk/mach/kern_return.h +++ b/osfmk/mach/kern_return.h @@ -323,6 +323,10 @@ /* The provided buffer is of insufficient size for the requested data. */ +#define KERN_DENIED 53 +/* Denied by security policy + */ + #define KERN_RETURN_MAX 0x100 /* Maximum return value allowable */ diff --git a/osfmk/mach/kmod.h b/osfmk/mach/kmod.h index a4fba6d3a..2453a9bdd 100644 --- a/osfmk/mach/kmod.h +++ b/osfmk/mach/kmod.h @@ -176,6 +176,7 @@ extern void kmod_panic_dump(vm_offset_t * addr, unsigned int dump_cnt); */ #define KMOD_DTRACE_FORCE_INIT 0x01 #define KMOD_DTRACE_STATIC_KEXT 0x02 +#define KMOD_DTRACE_NO_KERNEL_SYMS 0x04 #endif /* CONFIG_DTRACE */ #endif /* KERNEL_PRIVATE */ diff --git a/osfmk/mach/mach_eventlink.defs b/osfmk/mach/mach_eventlink.defs new file mode 100644 index 000000000..28d611222 --- /dev/null +++ b/osfmk/mach/mach_eventlink.defs @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + mach_eventlink 716200; + +#include +#include +#include + +routine mach_eventlink_create( + task : task_t; + option : mach_eventlink_create_option_t; + out eventlink_pair : eventlink_port_pair_t); + +routine mach_eventlink_destroy( + eventlink : eventlink_consume_ref_t); + + +routine mach_eventlink_associate( + eventlink : eventlink_t; + thread : thread_t; + copyin_addr_wait : mach_vm_address_t; + copyin_mask_wait : uint64_t; + copyin_addr_signal : mach_vm_address_t; + copyin_mask_signal : uint64_t; + option : mach_eventlink_associate_option_t); + +routine mach_eventlink_disassociate( + eventlink : eventlink_t; + option : mach_eventlink_disassociate_option_t); + + /* vim: set ft=c : */ diff --git a/osfmk/mach/mach_eventlink_types.h b/osfmk/mach/mach_eventlink_types.h new file mode 100644 index 000000000..49454d83a --- /dev/null +++ b/osfmk/mach/mach_eventlink_types.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _MACH_EVENTLINK_TYPES_H_ +#define _MACH_EVENTLINK_TYPES_H_ + +#include +#include + +__options_decl(kern_clock_id_t, uint32_t, { + KERN_CLOCK_MACH_ABSOLUTE_TIME = 1, +}); + +__options_decl(mach_eventlink_create_option_t, uint32_t, { + MELC_OPTION_NONE = 0, + MELC_OPTION_NO_COPYIN = 0x1, + MELC_OPTION_WITH_COPYIN = 0x2, +}); + +__options_decl(mach_eventlink_associate_option_t, uint32_t, { + MELA_OPTION_NONE = 0, + MELA_OPTION_ASSOCIATE_ON_WAIT = 0x1, +}); + +__options_decl(mach_eventlink_disassociate_option_t, uint32_t, { + MELD_OPTION_NONE = 0, +}); + +__options_decl(mach_eventlink_signal_wait_option_t, uint32_t, { + MELSW_OPTION_NONE = 0, + MELSW_OPTION_NO_WAIT = 0x1, +}); + +#define EVENTLINK_SIGNAL_COUNT_MASK 0xffffffffffffff +#define EVENTLINK_SIGNAL_ERROR_MASK 0xff +#define EVENTLINK_SIGNAL_ERROR_SHIFT 56 + +#define encode_eventlink_count_and_error(count, error) \ + (((count) & EVENTLINK_SIGNAL_COUNT_MASK) | ((((uint64_t)error) & EVENTLINK_SIGNAL_ERROR_MASK) << EVENTLINK_SIGNAL_ERROR_SHIFT)) + +#define decode_eventlink_count_from_retval(retval) \ + ((retval) & EVENTLINK_SIGNAL_COUNT_MASK) + +#define decode_eventlink_error_from_retval(retval) \ + ((kern_return_t)(((retval) >> EVENTLINK_SIGNAL_ERROR_SHIFT) & EVENTLINK_SIGNAL_ERROR_MASK)) + +#ifndef KERNEL +kern_return_t +mach_eventlink_signal( + mach_port_t eventlink_port, + uint64_t signal_count); + +kern_return_t +mach_eventlink_wait_until( + mach_port_t eventlink_port, + uint64_t *count_ptr, + mach_eventlink_signal_wait_option_t option, + kern_clock_id_t clock_id, + uint64_t deadline); + +kern_return_t +mach_eventlink_signal_wait_until( + mach_port_t eventlink_port, + uint64_t *count_ptr, + uint64_t signal_count, + mach_eventlink_signal_wait_option_t option, + kern_clock_id_t clock_id, + uint64_t deadline); + +#endif + +#endif /* _MACH_EVENTLINK_TYPES_H_ */ diff --git a/osfmk/mach/mach_param.h b/osfmk/mach/mach_param.h index fb4261301..f10d4d161 100644 --- a/osfmk/mach/mach_param.h +++ b/osfmk/mach/mach_param.h @@ -73,4 +73,10 @@ /* Number of watchport for task */ #define TASK_MAX_WATCHPORT_COUNT 32 +/* Number of different task port flavor */ +#define TASK_SELF_PORT_COUNT 4 + +/* Number of different thread port flavor */ +#define THREAD_SELF_PORT_COUNT 3 + #endif /* _MACH_MACH_PARAM_H_ */ diff --git a/osfmk/mach/mach_port.defs b/osfmk/mach/mach_port.defs index d62095ad7..969fad546 100644 --- a/osfmk/mach/mach_port.defs +++ b/osfmk/mach/mach_port.defs @@ -248,9 +248,14 @@ routine mach_port_set_mscount( * Only valid for port sets. Returns a list of * the members. */ - -routine mach_port_get_set_status( - task : ipc_space_inspect_t; +routine +#ifdef KERNEL_SERVER +mach_port_get_set_status_from_user( + port : mach_port_t; +#else +mach_port_get_set_status( + task : ipc_space_read_t; +#endif name : mach_port_name_t; out members : mach_port_name_array_t); @@ -354,9 +359,14 @@ routine mach_port_set_seqno( /* * Returns information about a port. */ - -routine mach_port_get_attributes( - task : ipc_space_inspect_t; +routine +#ifdef KERNEL_SERVER +mach_port_get_attributes_from_user( + port : mach_port_t; +#else +mach_port_get_attributes( + task : ipc_space_read_t; +#endif name : mach_port_name_t; flavor : mach_port_flavor_t; out port_info_out : mach_port_info_t, CountInOut); @@ -426,8 +436,14 @@ routine mach_port_get_srights( * This call is only valid on MACH_IPC_DEBUG kernels. * Otherwise, KERN_FAILURE is returned. */ -routine mach_port_space_info( - task : ipc_space_inspect_t; +routine +#ifdef KERNEL_SERVER +mach_port_space_info_from_user( + port : mach_port_t; +#else +mach_port_space_info( + space : ipc_space_read_t; +#endif out space_info : ipc_info_space_t; out table_info : ipc_info_name_array_t; out tree_info : ipc_info_tree_name_array_t); @@ -453,8 +469,14 @@ routine mach_port_dnrequest_info( * This interface is DEPRECATED in favor of the new * mach_port_kernel_object64() call (see below). */ -routine mach_port_kernel_object( - task : ipc_space_inspect_t; +routine +#ifdef KERNEL_SERVER +mach_port_kernel_object_from_user( + port : mach_port_t; +#else +mach_port_kernel_object( + task : ipc_space_read_t; +#endif name : mach_port_name_t; out object_type : unsigned; out object_addr : unsigned); @@ -503,9 +525,14 @@ routine mach_port_extract_member( * Only valid for receive rights. * Gets the context pointer for the port. */ - -routine mach_port_get_context( - task : ipc_space_inspect_t; +routine +#ifdef KERNEL_SERVER +mach_port_get_context_from_user( + port : mach_port_t; +#else +mach_port_get_context( + task : ipc_space_read_t; +#endif name : mach_port_name_t; #ifdef LIBSYSCALL_INTERFACE out context : mach_port_context_t @@ -535,8 +562,14 @@ routine mach_port_set_context( * This call is only valid on MACH_IPC_DEBUG kernels. * Otherwise, KERN_FAILURE is returned. */ -routine mach_port_kobject( - task : ipc_space_inspect_t; +routine +#ifdef KERNEL_SERVER +mach_port_kobject_from_user( + port : mach_port_t; +#else +mach_port_kobject( + task : ipc_space_read_t; +#endif name : mach_port_name_t; out object_type : natural_t; out object_addr : mach_vm_address_t); @@ -666,8 +699,14 @@ routine mach_port_swap_guard( * This call is only valid on MACH_IPC_DEBUG kernels. * Otherwise, KERN_FAILURE is returned. */ -routine mach_port_kobject_description( - task : ipc_space_inspect_t; +routine +#ifdef KERNEL_SERVER +mach_port_kobject_description_from_user( + port : mach_port_t; +#else +mach_port_kobject_description( + task : ipc_space_read_t; +#endif name : mach_port_name_t; out object_type : natural_t; out object_addr : mach_vm_address_t; diff --git a/osfmk/mach/mach_traps.h b/osfmk/mach/mach_traps.h index 064514ebc..b7a9bdd1c 100644 --- a/osfmk/mach/mach_traps.h +++ b/osfmk/mach/mach_traps.h @@ -109,7 +109,7 @@ extern mach_msg_return_t mach_msg_overwrite_trap( mach_msg_size_t rcv_size, mach_port_name_t rcv_name, mach_msg_timeout_t timeout, - mach_msg_priority_t override, + mach_msg_priority_t priority, mach_msg_header_t *rcv_msg, mach_msg_size_t rcv_limit); @@ -191,12 +191,6 @@ extern kern_return_t _kernelrpc_mach_port_allocate_trap( mach_port_name_t *name ); - -extern kern_return_t _kernelrpc_mach_port_destroy_trap( - mach_port_name_t target, - mach_port_name_t name - ); - extern kern_return_t _kernelrpc_mach_port_deallocate_trap( mach_port_name_t target, mach_port_name_t name @@ -441,7 +435,7 @@ struct mach_msg_overwrite_trap_args { PAD_ARG_(mach_msg_size_t, rcv_size); PAD_ARG_(mach_port_name_t, rcv_name); PAD_ARG_(mach_msg_timeout_t, timeout); - PAD_ARG_(mach_msg_priority_t, override); + PAD_ARG_(mach_msg_priority_t, priority); PAD_ARG_8 PAD_ARG_(user_addr_t, rcv_msg); /* Unused on mach_msg_trap */ }; @@ -708,14 +702,6 @@ struct _kernelrpc_mach_port_allocate_args { extern kern_return_t _kernelrpc_mach_port_allocate_trap( struct _kernelrpc_mach_port_allocate_args *args); - -struct _kernelrpc_mach_port_destroy_args { - PAD_ARG_(mach_port_name_t, target); - PAD_ARG_(mach_port_name_t, name); -}; -extern kern_return_t _kernelrpc_mach_port_destroy_trap( - struct _kernelrpc_mach_port_destroy_args *args); - struct _kernelrpc_mach_port_deallocate_args { PAD_ARG_(mach_port_name_t, target); PAD_ARG_(mach_port_name_t, name); diff --git a/osfmk/mach/mach_types.defs b/osfmk/mach/mach_types.defs index 09613b3a8..25e03e784 100644 --- a/osfmk/mach/mach_types.defs +++ b/osfmk/mach/mach_types.defs @@ -119,6 +119,8 @@ type mach_port_info_t = array[*:17] of integer_t; type mach_msg_trailer_type_t = int; type mach_msg_trailer_info_t = array[*:68] of char; +type mach_task_flavor_t = int; + type task_t = mach_port_t #if KERNEL_SERVER intran: task_t convert_port_to_task(mach_port_t) @@ -135,6 +137,21 @@ type task_name_t = mach_port_t #endif /* KERNEL_SERVER */ ; +type task_policy_set_t = mach_port_t +#if KERNEL_SERVER + intran: task_policy_set_t convert_port_to_task_policy_set(mach_port_t) + destructor: task_policy_set_deallocate(task_policy_set_t) +#endif /* KERNEL_SERVER */ + ; + +type task_policy_get_t = mach_port_t +#if KERNEL_SERVER + intran: task_policy_get_t convert_port_to_task_policy_get(mach_port_t) + destructor: task_policy_get_deallocate(task_policy_get_t) +#endif /* KERNEL_SERVER */ + ; + + type task_inspect_t = mach_port_t #if KERNEL_SERVER intran: task_inspect_t convert_port_to_task_inspect(mach_port_t) @@ -143,6 +160,14 @@ type task_inspect_t = mach_port_t #endif /* KERNEL_SERVER */ ; +type task_read_t = mach_port_t +#if KERNEL_SERVER + intran: task_read_t convert_port_to_task_read(mach_port_t) + outtran: mach_port_t convert_task_read_to_port(task_read_t) + destructor: task_read_deallocate(task_read_t) +#endif /* KERNEL_SERVER */ + ; + type thread_t = mach_port_t #if KERNEL_SERVER intran: thread_t convert_port_to_thread(mach_port_t) @@ -159,6 +184,14 @@ type thread_inspect_t = mach_port_t #endif /* KERNEL_SERVER */ ; +type thread_read_t = mach_port_t +#if KERNEL_SERVER + intran: thread_read_t convert_port_to_thread_read(mach_port_t) + outtran: mach_port_t convert_thread_read_to_port(thread_read_t) + destructor: thread_read_deallocate(thread_read_t) +#endif /* KERNEL_SERVER */ + ; + type thread_act_t = mach_port_t #if KERNEL_SERVER intran: thread_act_t convert_port_to_thread(mach_port_t) @@ -205,6 +238,20 @@ type vm_map_t = mach_port_t #endif /* KERNEL_SERVER */ ; +type vm_map_inspect_t = mach_port_t +#if KERNEL_SERVER + intran: vm_map_inspect_t convert_port_to_map_inspect(mach_port_t) + destructor: vm_map_inspect_deallocate(vm_map_inspect_t) +#endif /* KERNEL_SERVER */ + ; + +type vm_map_read_t = mach_port_t +#if KERNEL_SERVER + intran: vm_map_read_t convert_port_to_map_read(mach_port_t) + destructor: vm_map_read_deallocate(vm_map_read_t) +#endif /* KERNEL_SERVER */ + ; + type vm_task_entry_t = mach_port_t cusertype: vm_map_t #if KERNEL_SERVER @@ -220,6 +267,13 @@ type ipc_space_t = mach_port_t #endif /* KERNEL_SERVER */ ; +type ipc_space_read_t = mach_port_t +#if KERNEL_SERVER + intran: ipc_space_read_t convert_port_to_space_read(mach_port_t) + destructor: space_read_deallocate(ipc_space_read_t) +#endif /* KERNEL_SERVER */ + ; + type ipc_space_inspect_t = mach_port_t #if KERNEL_SERVER intran: ipc_space_inspect_t convert_port_to_space_inspect(mach_port_t) @@ -552,6 +606,33 @@ type semaphore_consume_ref_t = mach_port_move_send_t #endif /* KERNEL_SERVER */ ; +#ifndef _MACH_MACH_EVENTLINK_TYPE_DEFS +#define _MACH_MACH_EVENTLINK_TYPE_DEFS + +type eventlink_t = mach_port_t + ctype: mach_port_t +#if KERNEL_SERVER + intran: ipc_eventlink_t convert_port_to_eventlink(mach_port_t) + destructor: ipc_eventlink_deallocate(ipc_eventlink_t) +#endif /* KERNEL_SERVER */ + ; + +type eventlink_consume_ref_t = mach_port_move_send_t + ctype: mach_port_t +#if KERNEL_SERVER + intran: ipc_eventlink_t convert_port_to_eventlink(mach_port_t) + destructor: ipc_eventlink_deallocate(ipc_eventlink_t) +#endif /* KERNEL_SERVER */ + ; + +type eventlink_port_pair_t = array[2] of mach_port_t; +type mach_eventlink_create_option_t = uint32_t; +type mach_eventlink_associate_option_t = uint32_t; +type mach_eventlink_disassociate_option_t = uint32_t; +type mach_eventlink_signal_wait_option_t = uint32_t; + +#endif /* _MACH_MACH_EVENTLINK_TYPE_DEFS */ + type lock_set_t = mach_port_t #if KERNEL_SERVER intran: lock_set_t convert_port_to_lock_set(mach_port_t) @@ -642,6 +723,7 @@ simport ; /* for ledger conversions */ simport ; /* for processor conversions */ simport ; /* for lock-set conversions */ simport ; /* for semaphore conversions */ +simport ; /* for eventlink conversions */ simport ; /* for memory object type conversions */ simport ; /* for vm_map conversions */ #if CONFIG_ARCADE diff --git a/osfmk/mach/mach_types.h b/osfmk/mach/mach_types.h index e46370cc8..bf5c680b2 100644 --- a/osfmk/mach/mach_types.h +++ b/osfmk/mach/mach_types.h @@ -118,9 +118,9 @@ * If we are in the kernel, then pick up the kernel definitions for * the basic mach types. */ -typedef struct task *task_t, *task_name_t, *task_inspect_t, *task_suspension_token_t; -typedef struct thread *thread_t, *thread_act_t, *thread_inspect_t; -typedef struct ipc_space *ipc_space_t, *ipc_space_inspect_t; +typedef struct task *task_t, *task_name_t, *task_inspect_t, *task_read_t, *task_suspension_token_t, *task_policy_set_t, *task_policy_get_t; +typedef struct thread *thread_t, *thread_act_t, *thread_inspect_t, *thread_read_t; +typedef struct ipc_space *ipc_space_t, *ipc_space_read_t, *ipc_space_inspect_t; typedef struct coalition *coalition_t; typedef struct host *host_t; typedef struct host *host_priv_t; @@ -134,7 +134,9 @@ typedef struct alarm *alarm_t; typedef struct clock *clock_serv_t; typedef struct clock *clock_ctrl_t; typedef struct arcade_register *arcade_register_t; -typedef struct suid_cred *suid_cred_t; +typedef struct ipc_eventlink *ipc_eventlink_t; +typedef struct ipc_port *eventlink_port_pair_t[2]; +typedef struct suid_cred *suid_cred_t; /* * OBSOLETE: lock_set interfaces are obsolete. @@ -156,6 +158,8 @@ struct ledger; struct alarm; struct clock; struct arcade_register; +struct ipc_eventlink; +struct ipc_port; struct suid_cred; __END_DECLS @@ -170,12 +174,17 @@ __END_DECLS */ typedef mach_port_t task_t; typedef mach_port_t task_name_t; +typedef mach_port_t task_policy_set_t; +typedef mach_port_t task_policy_get_t; typedef mach_port_t task_inspect_t; +typedef mach_port_t task_read_t; typedef mach_port_t task_suspension_token_t; typedef mach_port_t thread_t; typedef mach_port_t thread_act_t; typedef mach_port_t thread_inspect_t; +typedef mach_port_t thread_read_t; typedef mach_port_t ipc_space_t; +typedef mach_port_t ipc_space_read_t; typedef mach_port_t ipc_space_inspect_t; typedef mach_port_t coalition_t; typedef mach_port_t host_t; @@ -191,6 +200,8 @@ typedef mach_port_t alarm_t; typedef mach_port_t clock_serv_t; typedef mach_port_t clock_ctrl_t; typedef mach_port_t arcade_register_t; +typedef mach_port_t ipc_eventlink_t; +typedef mach_port_t eventlink_port_pair_t[2]; typedef mach_port_t suid_cred_t; #endif /* KERNEL */ @@ -213,6 +224,7 @@ typedef exception_handler_t *exception_handler_array_t; typedef mach_port_t vm_task_entry_t; typedef mach_port_t io_master_t; typedef mach_port_t UNDServerRef; +typedef mach_port_t mach_eventlink_t; /* * Mig doesn't translate the components of an array. @@ -267,12 +279,15 @@ typedef uint32_t suid_cred_uid_t; #ifdef KERNEL #define TASK_NULL ((task_t) NULL) #define TASK_NAME_NULL ((task_name_t) NULL) -#define TASK_INSPECT_NULL ((task_inspect_t) NULL) +#define TASK_INSPECT_NULL ((task_inspect_t) NULL) +#define TASK_READ_NULL ((task_read_t) NULL) #define THREAD_NULL ((thread_t) NULL) #define THREAD_INSPECT_NULL ((thread_inspect_t)NULL) +#define THREAD_READ_NULL ((thread_read_t)NULL) #define TID_NULL ((uint64_t) NULL) #define THR_ACT_NULL ((thread_act_t) NULL) #define IPC_SPACE_NULL ((ipc_space_t) NULL) +#define IPC_SPACE_READ_NULL ((ipc_space_read_t) NULL) #define IPC_SPACE_INSPECT_NULL ((ipc_space_inspect_t) NULL) #define COALITION_NULL ((coalition_t) NULL) #define HOST_NULL ((host_t) NULL) @@ -287,16 +302,21 @@ typedef uint32_t suid_cred_uid_t; #define CLOCK_NULL ((clock_t) NULL) #define UND_SERVER_NULL ((UNDServerRef) NULL) #define ARCADE_REG_NULL ((arcade_register_t) NULL) -#define SUID_CRED_NULL ((suid_cred_t) NULL) +#define MACH_EVENTLINK_NULL ((mach_eventlink_t) 0) +#define IPC_EVENTLINK_NULL ((ipc_eventlink_t) NULL) +#define SUID_CRED_NULL ((suid_cred_t) NULL) #else #define TASK_NULL ((task_t) 0) #define TASK_NAME_NULL ((task_name_t) 0) -#define TASK_INSPECT_NULL ((task_inspect_t) 0) +#define TASK_INSPECT_NULL ((task_inspect_t) 0) +#define TASK_READ_NULL ((task_read_t) 0) #define THREAD_NULL ((thread_t) 0) #define THREAD_INSPECT_NULL ((thread_inspect_t) 0) +#define THREAD_READ_NULL ((thread_read_t) 0) #define TID_NULL ((uint64_t) 0) #define THR_ACT_NULL ((thread_act_t) 0) #define IPC_SPACE_NULL ((ipc_space_t) 0) +#define IPC_SPACE_READ_NULL ((ipc_space_read_t) 0) #define IPC_SPACE_INSPECT_NULL ((ipc_space_inspect_t) 0) #define COALITION_NULL ((coalition_t) 0) #define HOST_NULL ((host_t) 0) @@ -311,9 +331,27 @@ typedef uint32_t suid_cred_uid_t; #define CLOCK_NULL ((clock_t) 0) #define UND_SERVER_NULL ((UNDServerRef) 0) #define ARCADE_REG_NULL ((arcade_register_t) 0) -#define SUID_CRED_NULL ((suid_cred_t) 0) +#define MACH_EVENTLINK_NULL ((mach_eventlink_t) 0) +#define IPC_EVENTLINK_NULL ((ipc_eventlink_t) 0) +#define SUID_CRED_NULL ((suid_cred_t) 0) #endif +/* capability strictly _DECREASING_. + * not ordered the other way around because we want TASK_FLAVOR_CONTROL + * to be closest to the itk_lock. see task.h. + */ +typedef unsigned int mach_task_flavor_t; +#define TASK_FLAVOR_CONTROL 0 /* a task_t */ +#define TASK_FLAVOR_READ 1 /* a task_read_t */ +#define TASK_FLAVOR_INSPECT 2 /* a task_inspect_t */ +#define TASK_FLAVOR_NAME 3 /* a task_name_t */ + +/* capability strictly _DECREASING_ */ +typedef unsigned int mach_thread_flavor_t; +#define THREAD_FLAVOR_CONTROL 0 /* a thread_t */ +#define THREAD_FLAVOR_READ 1 /* a thread_read_t */ +#define THREAD_FLAVOR_INSPECT 2 /* a thread_inspect_t */ + /* DEPRECATED */ typedef natural_t ledger_item_t; #define LEDGER_ITEM_INFINITY ((ledger_item_t) (~0)) diff --git a/osfmk/mach/mach_vm.defs b/osfmk/mach/mach_vm.defs index 88097761f..94f3db918 100644 --- a/osfmk/mach/mach_vm.defs +++ b/osfmk/mach/mach_vm.defs @@ -207,7 +207,7 @@ routine PREFIX(mach_vm_read) ( #else routine PREFIX(vm_read) ( #endif - target_task : vm_map_t; + target_task : vm_map_read_t; address : mach_vm_address_t; size : mach_vm_size_t; out data : pointer_t); @@ -221,9 +221,9 @@ routine mach_vm_read_list( #else routine vm_read_list( #endif - target_task : vm_map_t; - inout data_list : mach_vm_read_entry_t; - count : natural_t); + target_task : vm_map_read_t; + inout data_list : mach_vm_read_entry_t; + count : natural_t); /* * Writes the contents of the specified range of the @@ -274,7 +274,7 @@ routine mach_vm_read_overwrite( #else routine vm_read_overwrite( #endif - target_task : vm_map_t; + target_task : vm_map_read_t; address : mach_vm_address_t; size : mach_vm_size_t; data : mach_vm_address_t; @@ -405,18 +405,17 @@ routine mach_vm_page_query( #else routine vm_map_page_query( #endif - target_map :vm_map_t; + target_map :vm_map_read_t; offset :mach_vm_offset_t; out disposition :integer_t; out ref_count :integer_t); - #if !defined(_MACH_VM_PUBLISH_AS_LOCAL_) routine mach_vm_region_recurse( #else routine vm_region_recurse_64( #endif - target_task : vm_map_t; + target_task : vm_map_read_t; inout address : mach_vm_address_t; out size : mach_vm_size_t; inout nesting_depth : natural_t; @@ -438,12 +437,12 @@ routine mach_vm_region( #else routine vm_region_64( #endif - target_task : vm_map_t; - inout address : mach_vm_address_t; - out size : mach_vm_size_t; - flavor : vm_region_flavor_t; - out info : vm_region_info_t, CountInOut; - out object_name : memory_object_name_t = + target_task : vm_map_read_t; + inout address : mach_vm_address_t; + out size : mach_vm_size_t; + flavor : vm_region_flavor_t; + out info : vm_region_info_t, CountInOut; + out object_name : memory_object_name_t = MACH_MSG_TYPE_MOVE_SEND ctype: mach_port_t); @@ -491,7 +490,7 @@ routine PREFIX(vm_purgable_control) ( #if !defined(_MACH_VM_PUBLISH_AS_LOCAL_) routine mach_vm_page_info( - target_task : vm_map_t; + target_task : vm_map_read_t; address : mach_vm_address_t; flavor : vm_page_info_flavor_t; out info : vm_page_info_t, CountInOut); @@ -501,7 +500,7 @@ skip; #if !defined(_MACH_VM_PUBLISH_AS_LOCAL_) routine mach_vm_page_range_query( - target_map : vm_map_t; + target_map : vm_map_read_t; address : mach_vm_offset_t; size : mach_vm_size_t; dispositions : mach_vm_address_t; diff --git a/osfmk/mach/machine.h b/osfmk/mach/machine.h index 6865cee72..4b9fef9c5 100644 --- a/osfmk/mach/machine.h +++ b/osfmk/mach/machine.h @@ -162,6 +162,9 @@ __END_DECLS #define CPU_TYPE_POWERPC ((cpu_type_t) 18) #define CPU_TYPE_POWERPC64 (CPU_TYPE_POWERPC | CPU_ARCH_ABI64) /* skip ((cpu_type_t) 19) */ +/* skip ((cpu_type_t) 20 */ +/* skip ((cpu_type_t) 21 */ +/* skip ((cpu_type_t) 22 */ /* * Machine subtypes (these are defined here, instead of in a machine @@ -174,7 +177,14 @@ __END_DECLS */ #define CPU_SUBTYPE_MASK 0xff000000 /* mask for feature flags */ #define CPU_SUBTYPE_LIB64 0x80000000 /* 64 bit libraries */ +#define CPU_SUBTYPE_PTRAUTH_ABI 0x80000000 /* pointer authentication with versioned ABI */ +/* + * When selecting a slice, ANY will pick the slice with the best + * grading for the selected cpu_type_t, unlike the "ALL" subtypes, + * which are the slices that can run on any hardware for that cpu type. + */ +#define CPU_SUBTYPE_ANY ((cpu_subtype_t) -1) /* * Object files that are hand-crafted to run on any @@ -375,6 +385,9 @@ __END_DECLS /* CPU subtype feature flags for ptrauth on arm64e platforms */ #define CPU_SUBTYPE_ARM64_PTR_AUTH_MASK 0x0f000000 #define CPU_SUBTYPE_ARM64_PTR_AUTH_VERSION(x) (((x) & CPU_SUBTYPE_ARM64_PTR_AUTH_MASK) >> 24) +#ifdef PRIVATE +#define CPU_SUBTYPE_ARM64_PTR_AUTH_CURRENT_VERSION 0 +#endif /* PRIVATE */ /* * ARM64_32 subtypes @@ -409,6 +422,7 @@ __END_DECLS #define CPUFAMILY_INTEL_BROADWELL 0x582ed09c #define CPUFAMILY_INTEL_SKYLAKE 0x37fc219f #define CPUFAMILY_INTEL_KABYLAKE 0x0f817246 +#define CPUFAMILY_INTEL_ICELAKE 0x38435547 #define CPUFAMILY_ARM_9 0xe73283ae #define CPUFAMILY_ARM_11 0x8ff620d8 #define CPUFAMILY_ARM_XSCALE 0x53b005f5 @@ -423,9 +437,12 @@ __END_DECLS #define CPUFAMILY_ARM_HURRICANE 0x67ceee93 #define CPUFAMILY_ARM_MONSOON_MISTRAL 0xe81e7ef6 #define CPUFAMILY_ARM_VORTEX_TEMPEST 0x07d34b9f -#ifndef RC_HIDE_XNU_LIGHTNING #define CPUFAMILY_ARM_LIGHTNING_THUNDER 0x462504d2 -#endif /* !RC_HIDE_XNU_LIGHTNING */ + +#define CPUSUBFAMILY_UNKNOWN 0 +#define CPUSUBFAMILY_ARM_HP 1 +#define CPUSUBFAMILY_ARM_HG 2 +#define CPUSUBFAMILY_ARM_M 3 /* The following synonyms are deprecated: */ #define CPUFAMILY_INTEL_6_23 CPUFAMILY_INTEL_PENRYN diff --git a/osfmk/mach/machine/sdt.h b/osfmk/mach/machine/sdt.h index 9c24d5db7..c20c51e17 100644 --- a/osfmk/mach/machine/sdt.h +++ b/osfmk/mach/machine/sdt.h @@ -283,6 +283,10 @@ type3, arg3, type4, arg4, type5, arg5, type6, arg6, type7, arg7) \ DTRACE_PROBE7(__vminfo_, name, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +#define DTRACE_VM8(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4, type5, arg5, type6, arg6, type7, arg7, type8, arg8) \ + DTRACE_PROBE8(__vminfo_, name, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + #define DTRACE_IP(name) \ DTRACE_PROBE(__ip_, name) @@ -444,6 +448,32 @@ #if PRIVATE #endif /* PRIVATE */ +#ifdef PRIVATE +#define DTRACE_HV(name) \ + DTRACE_PROBE(__hv_, name) + +#define DTRACE_HV1(name, type1, arg1) \ + DTRACE_PROBE1(__hv_, name, arg1) + +#define DTRACE_HV2(name, type1, arg1, type2, arg2) \ + DTRACE_PROBE2(__hv_, name, arg1, arg2) + +#define DTRACE_HV3(name, type1, arg1, type2, arg2, type3, arg3) \ + DTRACE_PROBE3(__hv_, name, arg1, arg2, arg3) + +#define DTRACE_HV4(name, type1, arg1, type2, arg2, type3, arg3, \ + type4, arg4) \ + DTRACE_PROBE4(__hv_, name, arg1, arg2, arg3, arg4) + +#define DTRACE_HV5(name, type1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5) \ + DTRACE_PROBE5(__hv_, name, arg1, arg2, arg3, arg4, arg5) + +#define DTRACE_HV6(name, type1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5, type6, arg6) \ + DTRACE_PROBE6(__hv_, name, arg1, arg2, arg3, arg4, arg5, arg6) +#endif /* PRIVATE */ + #endif /* KERNEL */ #endif /* _MACH_MACHINE_SYS_SDT_H */ diff --git a/osfmk/mach/machine/sdt_isa.h b/osfmk/mach/machine/sdt_isa.h index 7145e43d1..e29ea4085 100644 --- a/osfmk/mach/machine/sdt_isa.h +++ b/osfmk/mach/machine/sdt_isa.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACH_MACHINE_SDT_ISA_H_ diff --git a/osfmk/mach/memory_object_types.h b/osfmk/mach/memory_object_types.h index 6c5cdd941..d59123748 100644 --- a/osfmk/mach/memory_object_types.h +++ b/osfmk/mach/memory_object_types.h @@ -436,7 +436,6 @@ typedef struct memory_object_attr_info memory_object_attr_info_data_t; #define MAX_UPL_TRANSFER (MAX_UPL_TRANSFER_BYTES / PAGE_SIZE) #endif - struct upl_page_info { ppnum_t phys_addr; /* physical page index number */ unsigned int @@ -447,9 +446,13 @@ struct upl_page_info { precious:1, /* must be cleaned, we have only copy */ device:1, /* no page data, mapped dev memory */ speculative:1, /* page is valid, but not yet accessed */ - cs_validated:1, /* CODE SIGNING: page was validated */ - cs_tainted:1, /* CODE SIGNING: page is tainted */ - cs_nx:1, /* CODE SIGNING: page is NX */ +#define VMP_CS_BITS 4 +#define VMP_CS_ALL_FALSE 0x0 +#define VMP_CS_ALL_TRUE 0xF + cs_validated:VMP_CS_BITS, /* CODE SIGNING: page was validated */ + cs_tainted:VMP_CS_BITS, /* CODE SIGNING: page is tainted */ + cs_nx:VMP_CS_BITS, /* CODE SIGNING: page is NX */ + needed:1, /* page should be left in cache on abort */ mark:1, /* a mark flag for the creator to use as they wish */ :0; /* force to long boundary */ @@ -713,13 +716,13 @@ typedef uint64_t upl_control_flags_t; /* modifier macros for upl_t */ #define UPL_SET_CS_VALIDATED(upl, index, value) \ - ((upl)[(index)].cs_validated = ((value) ? TRUE : FALSE)) + ((upl)[(index)].cs_validated = (value)) #define UPL_SET_CS_TAINTED(upl, index, value) \ - ((upl)[(index)].cs_tainted = ((value) ? TRUE : FALSE)) + ((upl)[(index)].cs_tainted = (value)) #define UPL_SET_CS_NX(upl, index, value) \ - ((upl)[(index)].cs_nx = ((value) ? TRUE : FALSE)) + ((upl)[(index)].cs_nx = (value)) #define UPL_SET_REPRIO_INFO(upl, index, blkno, len) \ ((upl)->upl_reprio_info[(index)]) = (((uint64_t)(blkno) & UPL_REPRIO_INFO_MASK) | \ diff --git a/osfmk/mach/message.h b/osfmk/mach/message.h index cb789b57b..9ffa0bbe9 100644 --- a/osfmk/mach/message.h +++ b/osfmk/mach/message.h @@ -80,6 +80,9 @@ #include #include #include +#if !KERNEL && PRIVATE +#include +#endif /* * The timeout mechanism uses mach_msg_timeout_t values, @@ -228,6 +231,93 @@ typedef unsigned int mach_msg_priority_t; #define MACH_MSG_PRIORITY_UNSPECIFIED (mach_msg_priority_t) 0 +#if PRIVATE +typedef uint8_t mach_msg_qos_t; // same as thread_qos_t +#define MACH_MSG_QOS_UNSPECIFIED 0 +#define MACH_MSG_QOS_MAINTENANCE 1 +#define MACH_MSG_QOS_BACKGROUND 2 +#define MACH_MSG_QOS_UTILITY 3 +#define MACH_MSG_QOS_DEFAULT 4 +#define MACH_MSG_QOS_USER_INITIATED 5 +#define MACH_MSG_QOS_USER_INTERACTIVE 6 +#define MACH_MSG_QOS_LAST 6 + +extern int mach_msg_priority_is_pthread_priority(mach_msg_priority_t pri); +extern mach_msg_priority_t mach_msg_priority_encode( + mach_msg_qos_t override_qos, + mach_msg_qos_t qos, + int relpri); +extern mach_msg_qos_t mach_msg_priority_overide_qos(mach_msg_priority_t pri); +extern mach_msg_qos_t mach_msg_priority_qos(mach_msg_priority_t pri); +extern int mach_msg_priority_relpri(mach_msg_priority_t pri); + +#if KERNEL || !TARGET_OS_SIMULATOR +static inline int +mach_msg_priority_is_pthread_priority_inline(mach_msg_priority_t pri) +{ + return (pri & 0xff) == 0xff; +} + +#define MACH_MSG_PRIORITY_RELPRI_SHIFT 8 +#define MACH_MSG_PRIORITY_RELPRI_MASK (0xff << MACH_MSG_PRIORITY_RELPRI_SHIFT) +#define MACH_MSG_PRIORITY_QOS_SHIFT 16 +#define MACH_MSG_PRIORITY_QOS_MASK (0xf << MACH_MSG_PRIORITY_QOS_SHIFT) +#define MACH_MSG_PRIORITY_OVERRIDE_SHIFT 20 +#define MACH_MSG_PRIORITY_OVERRIDE_MASK (0xf << MACH_MSG_PRIORITY_OVERRIDE_SHIFT) + +static inline mach_msg_priority_t +mach_msg_priority_encode_inline(mach_msg_qos_t override_qos, mach_msg_qos_t qos, int relpri) +{ + mach_msg_priority_t pri = 0; + if (qos > 0 && qos <= MACH_MSG_QOS_LAST) { + pri |= (uint32_t)(qos << MACH_MSG_PRIORITY_QOS_SHIFT); + pri |= (uint32_t)((uint8_t)(relpri - 1) << MACH_MSG_PRIORITY_RELPRI_SHIFT); + } + if (override_qos > 0 && override_qos <= MACH_MSG_QOS_LAST) { + pri |= (uint32_t)(override_qos << MACH_MSG_PRIORITY_OVERRIDE_SHIFT); + } + return pri; +} + +static inline mach_msg_qos_t +mach_msg_priority_overide_qos_inline(mach_msg_priority_t pri) +{ + pri &= MACH_MSG_PRIORITY_OVERRIDE_MASK; + pri >>= MACH_MSG_PRIORITY_OVERRIDE_SHIFT; + return (mach_msg_qos_t)(pri <= MACH_MSG_QOS_LAST ? pri : 0); +} + +static inline mach_msg_qos_t +mach_msg_priority_qos_inline(mach_msg_priority_t pri) +{ + pri &= MACH_MSG_PRIORITY_QOS_MASK; + pri >>= MACH_MSG_PRIORITY_QOS_SHIFT; + return (mach_msg_qos_t)(pri <= MACH_MSG_QOS_LAST ? pri : 0); +} + +static inline int +mach_msg_priority_relpri_inline(mach_msg_priority_t pri) +{ + if (mach_msg_priority_qos_inline(pri)) { + return (int8_t)(pri >> MACH_MSG_PRIORITY_RELPRI_SHIFT) + 1; + } + return 0; +} + +#define mach_msg_priority_is_pthread_priority(...) \ + mach_msg_priority_is_pthread_priority_inline(__VA_ARGS__) +#define mach_msg_priority_encode(...) \ + mach_msg_priority_encode_inline(__VA_ARGS__) +#define mach_msg_priority_overide_qos(...) \ + mach_msg_priority_overide_qos_inline(__VA_ARGS__) +#define mach_msg_priority_qos(...) \ + mach_msg_priority_qos_inline(__VA_ARGS__) +#define mach_msg_priority_relpri(...) \ + mach_msg_priority_relpri_inline(__VA_ARGS__) +#endif + +#endif // PRIVATE + typedef unsigned int mach_msg_type_name_t; #define MACH_MSG_TYPE_MOVE_RECEIVE 16 /* Must hold receive right */ @@ -552,6 +642,9 @@ typedef struct{ mach_port_name_t sender; } msg_labels_t; +typedef int mach_msg_filter_id; +#define MACH_MSG_FILTER_POLICY_ALLOW (mach_msg_filter_id)0 + /* * Trailer type to pass MAC policy label info as a mach message trailer. * @@ -564,7 +657,7 @@ typedef struct{ security_token_t msgh_sender; audit_token_t msgh_audit; mach_port_context_t msgh_context; - int msgh_ad; + mach_msg_filter_id msgh_ad; msg_labels_t msgh_labels; } mach_msg_mac_trailer_t; @@ -576,7 +669,7 @@ typedef struct{ security_token_t msgh_sender; audit_token_t msgh_audit; mach_port_context32_t msgh_context; - int msgh_ad; + mach_msg_filter_id msgh_ad; msg_labels_t msgh_labels; } mach_msg_mac_trailer32_t; @@ -587,7 +680,7 @@ typedef struct{ security_token_t msgh_sender; audit_token_t msgh_audit; mach_port_context64_t msgh_context; - int msgh_ad; + mach_msg_filter_id msgh_ad; msg_labels_t msgh_labels; } mach_msg_mac_trailer64_t; @@ -985,6 +1078,8 @@ typedef kern_return_t mach_msg_return_t; /* compatibility: no longer a returned error */ #define MACH_SEND_NO_GRANT_DEST 0x10000016 /* The destination port doesn't accept ports in body */ +#define MACH_SEND_MSG_FILTERED 0x10000017 +/* Message send was rejected by message filter */ #define MACH_RCV_IN_PROGRESS 0x10004001 /* Thread is waiting for receive. (Internal use only.) */ diff --git a/osfmk/mach/port.h b/osfmk/mach/port.h index 2eb09af2e..a70035edd 100644 --- a/osfmk/mach/port.h +++ b/osfmk/mach/port.h @@ -402,6 +402,9 @@ typedef struct mach_port_qos { #define MPO_STRICT 0x20 /* Apply strict guarding for port */ #define MPO_DENAP_RECEIVER 0x40 /* Mark the port as App de-nap receiver */ #define MPO_IMMOVABLE_RECEIVE 0x80 /* Mark the port as immovable; protected by the guard context */ +#define MPO_FILTER_MSG 0x100 /* Allow message filtering */ +#define MPO_TG_BLOCK_TRACKING 0x200 /* Track blocking relationship for thread group during sync IPC */ + /* * Structure to define optional attributes for a newly * constructed port. @@ -409,7 +412,10 @@ typedef struct mach_port_qos { typedef struct mach_port_options { uint32_t flags; /* Flags defining attributes for port */ mach_port_limits_t mpl; /* Message queue limit for port */ - uint64_t reserved[2]; /* Reserved */ + union { + uint64_t reserved[2]; /* Reserved */ + mach_port_name_t work_interval_port; /* Work interval port */ + }; }mach_port_options_t; typedef mach_port_options_t *mach_port_options_ptr_t; @@ -430,6 +436,7 @@ enum mach_port_guard_exception_codes { kGUARD_EXC_INCORRECT_GUARD = 1u << 4, kGUARD_EXC_IMMOVABLE = 1u << 5, kGUARD_EXC_STRICT_REPLY = 1u << 6, + kGUARD_EXC_MSG_FILTERED = 1u << 7, /* start of [optionally] non-fatal guards */ kGUARD_EXC_INVALID_RIGHT = 1u << 8, kGUARD_EXC_INVALID_NAME = 1u << 9, diff --git a/osfmk/mach/processor_set.defs b/osfmk/mach/processor_set.defs index dc0f40774..0eccbf145 100644 --- a/osfmk/mach/processor_set.defs +++ b/osfmk/mach/processor_set.defs @@ -158,4 +158,12 @@ routine processor_set_info( out host : host_t; out info_out : processor_set_info_t, CountInOut); +/* + * List all tasks(/inspect/read) in processor set based on flavor. + */ +routine processor_set_tasks_with_flavor( + processor_set : processor_set_t; + flavor : mach_task_flavor_t; + out task_list : task_array_t); + /* vim: set ft=c : */ diff --git a/osfmk/mach/semaphore.h b/osfmk/mach/semaphore.h index 6c31f21ea..efb9a28e6 100644 --- a/osfmk/mach/semaphore.h +++ b/osfmk/mach/semaphore.h @@ -133,6 +133,7 @@ __END_DECLS #define SEMAPHORE_USE_SAVED_RESULT 0x01000000 /* internal use only */ #define SEMAPHORE_SIGNAL_RELEASE 0x02000000 /* internal use only */ +#define SEMAPHORE_THREAD_HANDOFF 0x04000000 #endif /* PRIVATE */ diff --git a/osfmk/mach/shared_memory_server.h b/osfmk/mach/shared_memory_server.h index c4eeb4726..5a63b9174 100644 --- a/osfmk/mach/shared_memory_server.h +++ b/osfmk/mach/shared_memory_server.h @@ -120,14 +120,6 @@ typedef struct sf_mapping sf_mapping_t; * between dyld and the kernel. * */ -struct shared_file_mapping_np { - mach_vm_address_t sfm_address; - mach_vm_size_t sfm_size; - mach_vm_offset_t sfm_file_offset; - vm_prot_t sfm_max_prot; - vm_prot_t sfm_init_prot; -}; - struct shared_region_range_np { mach_vm_address_t srr_address; mach_vm_size_t srr_size; diff --git a/osfmk/mach/shared_region.h b/osfmk/mach/shared_region.h index f6efdcbc1..b1c309496 100644 --- a/osfmk/mach/shared_region.h +++ b/osfmk/mach/shared_region.h @@ -49,6 +49,9 @@ #define SHARED_REGION_NESTING_MIN_X86_64 0x0000000000200000ULL #define SHARED_REGION_NESTING_MAX_X86_64 0xFFFFFFFFFFE00000ULL +#ifdef XNU_KERNEL_PRIVATE +#endif + #define SHARED_REGION_BASE_PPC 0x90000000ULL #define SHARED_REGION_SIZE_PPC 0x20000000ULL #define SHARED_REGION_NESTING_BASE_PPC 0x90000000ULL @@ -82,8 +85,8 @@ #endif #define SHARED_REGION_BASE_ARM64 0x180000000ULL #define SHARED_REGION_SIZE_ARM64 0x100000000ULL -#define SHARED_REGION_NESTING_BASE_ARM64 0x180000000ULL -#define SHARED_REGION_NESTING_SIZE_ARM64 0x100000000ULL +#define SHARED_REGION_NESTING_BASE_ARM64 SHARED_REGION_BASE_ARM64 +#define SHARED_REGION_NESTING_SIZE_ARM64 SHARED_REGION_SIZE_ARM64 #define SHARED_REGION_NESTING_MIN_ARM64 ? #define SHARED_REGION_NESTING_MAX_ARM64 ? @@ -137,9 +140,12 @@ void post_sys_powersource(int); #endif /* KERNEL_PRIVATE */ /* - * All shared_region_* declarations are a private interface - * between dyld and the kernel. - * + * The shared_region_* declarations are a private interface between dyld and the kernel. + */ + +/* + * This is used by legacy shared_region_map_and_slide_np() interface. + * We build a shared_file_mapping_slide_np from this. */ struct shared_file_mapping_np { mach_vm_address_t sfm_address; @@ -148,9 +154,42 @@ struct shared_file_mapping_np { vm_prot_t sfm_max_prot; vm_prot_t sfm_init_prot; }; -#define VM_PROT_COW 0x8 /* must not interfere with normal prot assignments */ -#define VM_PROT_ZF 0x10 /* must not interfere with normal prot assignments */ -#define VM_PROT_SLIDE 0x20 /* must not interfere with normal prot assignments */ + +struct shared_file_mapping_slide_np { + mach_vm_address_t sms_address; /* address at which to create mapping */ + mach_vm_size_t sms_size; /* size of region to map */ + mach_vm_offset_t sms_file_offset; /* offset into file to be mapped */ + user_addr_t sms_slide_size; /* size of data at sms_slide_start */ + user_addr_t sms_slide_start; /* address from which to get relocation data */ + vm_prot_t sms_max_prot; /* protections, plus flags, see below */ + vm_prot_t sms_init_prot; +}; +struct shared_file_np { + int sf_fd; /* file to be mapped into shared region */ + uint32_t sf_mappings_count; /* number of mappings */ + uint32_t sf_slide; /* distance in bytes of the slide */ +}; + +/* + * Extensions to sfm_max_prot that identify how to handle each mapping. + * These must not interfere with normal prot assignments. + * + * VM_PROT_COW - copy on write pages + * + * VM_PROT_ZF - zero fill pages + * + * VM_PROT_SLIDE - file pages which require relocation and, on arm64e, signing + * these will be unique per shared region. + * + * VM_PROT_NOAUTH - file pages which don't require signing. When combined + * with VM_PROT_SLIDE, pages are shareable across different + * shared regions which map the same file with the same relocation info. + */ +#define VM_PROT_COW 0x08 +#define VM_PROT_ZF 0x10 +#define VM_PROT_SLIDE 0x20 +#define VM_PROT_NOAUTH 0x40 +#define VM_PROT_TRANSLATED_ALLOW_EXECUTE 0x80 #ifndef KERNEL diff --git a/osfmk/mach/syscall_sw.h b/osfmk/mach/syscall_sw.h index f1e419809..dd9bd8404 100644 --- a/osfmk/mach/syscall_sw.h +++ b/osfmk/mach/syscall_sw.h @@ -89,7 +89,7 @@ kernel_trap(_kernelrpc_mach_vm_deallocate_trap,-12,5) /* 3 args, +2 for mach_vm_ kernel_trap(_kernelrpc_mach_vm_protect_trap,-14,7) /* 5 args, +2 for mach_vm_address_t and mach_vm_size_t */ kernel_trap(_kernelrpc_mach_vm_map_trap,-15,9) kernel_trap(_kernelrpc_mach_port_allocate_trap,-16,3) -kernel_trap(_kernelrpc_mach_port_destroy_trap,-17,2) +/* mach_port_destroy */ kernel_trap(_kernelrpc_mach_port_deallocate_trap,-18,2) kernel_trap(_kernelrpc_mach_port_mod_refs_trap,-19,4) kernel_trap(_kernelrpc_mach_port_move_member_trap,-20,3) diff --git a/osfmk/mach/task.defs b/osfmk/mach/task.defs index 8723a5255..9e82450c7 100644 --- a/osfmk/mach/task.defs +++ b/osfmk/mach/task.defs @@ -93,11 +93,20 @@ routine task_terminate( /* * Returns the set of threads belonging to the target task. + * [Polymorphic] This routine returns thread port with the same + * flavor as that of the task port passed in. */ -routine task_threads( +routine +#ifdef KERNEL_SERVER +task_threads_from_user( + port : mach_port_t; +#else +task_threads( target_task : task_inspect_t; +#endif out act_list : thread_act_array_t); + /* * Stash a handful of ports for the target task; child * tasks inherit this stash at task_create time. @@ -160,11 +169,18 @@ routine task_resume( * Returns the current value of the selected special port * associated with the target task. */ -routine task_get_special_port( +routine +#ifdef KERNEL_SERVER +task_get_special_port_from_user( + port : mach_port_t; +#else +task_get_special_port( task : task_inspect_t; +#endif which_port : int; out special_port : mach_port_t); + /* * Set one of the special ports associated with the * target task. @@ -223,8 +239,14 @@ routine task_set_exception_ports( /* * Lookup some of the old exception handlers for a task */ -routine task_get_exception_ports( - task : task_inspect_t; +routine +#if KERNEL_SERVER +task_get_exception_ports_from_user( + port : mach_port_t; +#else +task_get_exception_ports( + task : task_t; +#endif exception_mask : exception_mask_t; out masks : exception_mask_array_t; out old_handlers : exception_handler_array_t, SameCount; @@ -285,12 +307,12 @@ routine semaphore_destroy( */ routine task_policy_set( - task : task_t; + task : task_policy_set_t; flavor : task_policy_flavor_t; policy_info : task_policy_t); routine task_policy_get( - task : task_t; + task : task_policy_get_t; flavor : task_policy_flavor_t; out policy_info : task_policy_t, CountInOut; inout get_default : boolean_t); @@ -362,7 +384,7 @@ routine task_set_ras_pc( * Return zone info as seen/used by this task. */ routine task_zone_info( - target_task : task_t; + target_task : task_inspect_t; out names : mach_zone_name_array_t, Dealloc; out info : task_zone_info_array_t, @@ -392,7 +414,7 @@ routine task_assign_default( * Get current assignment for task. */ routine task_get_assignment( - task : task_t; + task : task_inspect_t; out assigned_set : processor_set_name_t); /* @@ -411,7 +433,7 @@ routine task_set_policy( * threads in the task as they are created. */ routine task_get_state( - task : task_t; + task : task_read_t; flavor : thread_state_flavor_t; out old_state : thread_state_t, CountInOut); @@ -440,11 +462,11 @@ routine task_resume2( suspend_token : task_suspension_token_t); routine task_purgable_info( - task : task_t; + task : task_inspect_t; out stats : task_purgable_info_t); routine task_get_mach_voucher( - task : task_t; + task : task_read_t; which : mach_voucher_selector_t; out voucher : ipc_voucher_t); @@ -463,7 +485,7 @@ routine task_generate_corpse( routine task_map_corpse_info( task :task_t; - corspe_task :task_t; + corspe_task :task_read_t; out kcd_addr_begin :vm_address_t; out kcd_size :uint32_t); @@ -476,7 +498,7 @@ routine task_unregister_dyld_image_infos( dyld_images :dyld_kernel_image_info_array_t); routine task_get_dyld_image_infos( - task :task_inspect_t; + task :task_read_t; out dyld_images :dyld_kernel_image_info_array_t); routine task_register_dyld_shared_cache_image_info( @@ -495,7 +517,7 @@ routine task_register_dyld_get_process_state( routine task_map_corpse_info_64( task :task_t; - corspe_task :task_t; + corspe_task :task_read_t; out kcd_addr_begin :mach_vm_address_t; out kcd_size :mach_vm_size_t); diff --git a/osfmk/mach/task_info.h b/osfmk/mach/task_info.h index 93fa357c9..2f2ef1ceb 100644 --- a/osfmk/mach/task_info.h +++ b/osfmk/mach/task_info.h @@ -439,7 +439,7 @@ typedef struct task_vm_info *task_vm_info_t; typedef struct vm_purgeable_info task_purgable_info_t; -#define TASK_TRACE_MEMORY_INFO 24 +#define TASK_TRACE_MEMORY_INFO 24 /* no longer supported */ struct task_trace_memory_info { uint64_t user_memory_address; /* address of start of trace memory buffer */ uint64_t buffer_size; /* size of buffer in bytes */ @@ -476,7 +476,7 @@ struct task_power_info_v2 { gpu_energy_data gpu_energy; #if defined(__arm__) || defined(__arm64__) uint64_t task_energy; -#endif +#endif /* defined(__arm__) || defined(__arm64__) */ uint64_t task_ptime; uint64_t task_pset_switches; }; diff --git a/osfmk/mach/task_policy.h b/osfmk/mach/task_policy.h index 6aa6e9180..0efcafb89 100644 --- a/osfmk/mach/task_policy.h +++ b/osfmk/mach/task_policy.h @@ -110,8 +110,7 @@ typedef integer_t *task_policy_t; #define TASK_BASE_LATENCY_QOS_POLICY 10 #define TASK_BASE_THROUGHPUT_QOS_POLICY 11 - -enum task_role { +typedef enum task_role { TASK_RENICED = -1, TASK_UNSPECIFIED = 0, TASK_FOREGROUND_APPLICATION = 1, @@ -122,9 +121,7 @@ enum task_role { TASK_NONUI_APPLICATION = 6, TASK_DEFAULT_APPLICATION = 7, TASK_DARWINBG_APPLICATION = 8, -}; - -typedef integer_t task_role_t; +} task_role_t; struct task_category_policy { task_role_t role; @@ -193,7 +190,9 @@ typedef struct task_qos_policy *task_qos_policy_t; * When they do, we will update TASK_POLICY_INTERNAL_STRUCT_VERSION. */ -#define TASK_POLICY_INTERNAL_STRUCT_VERSION 2 +#define TASK_POLICY_INTERNAL_STRUCT_VERSION 4 + +#define trp_tal_enabled trp_reserved /* trp_tal_enabled is unused, reuse its slot to grow trp_role */ struct task_requested_policy { uint64_t trp_int_darwinbg :1, /* marked as darwinbg via setpriority */ @@ -209,8 +208,7 @@ struct task_requested_policy { trp_apptype :3, /* What apptype did launchd tell us this was (inherited) */ trp_boosted :1, /* Has a non-zero importance assertion count */ - trp_role :4, /* task's system role */ - trp_tal_enabled :1, /* TAL mode is enabled */ + trp_role :5, /* task's system role */ trp_over_latency_qos :3, /* Timer latency QoS override */ trp_over_through_qos :3, /* Computation throughput QoS override */ trp_sfi_managed :1, /* SFI Managed task */ @@ -250,8 +248,9 @@ struct task_effective_policy { tep_live_donor :1, /* task is a live importance boost donor */ tep_qos_clamp :3, /* task qos clamp (applies to qos-disabled threads too) */ tep_qos_ceiling :3, /* task qos ceiling (applies to only qos-participating threads) */ + tep_adaptive_bg :1, /* task is bg solely due to the adaptive daemon clamp */ - tep_reserved :31; + tep_reserved :30; }; #endif /* PRIVATE */ @@ -336,7 +335,7 @@ typedef struct task_policy_state *task_policy_state_t; #define TASK_APPTYPE_DAEMON_ADAPTIVE 3 #define TASK_APPTYPE_DAEMON_BACKGROUND 4 #define TASK_APPTYPE_APP_DEFAULT 5 -#define TASK_APPTYPE_APP_TAL 6 +#define TASK_APPTYPE_APP_TAL 6 /* unused */ #define TASK_APPTYPE_DRIVER 7 /* task policy state flags */ @@ -344,6 +343,7 @@ typedef struct task_policy_state *task_policy_state_t; #define TASK_IMP_DONOR 0x00000002 #define TASK_IMP_LIVE_DONOR 0x00000004 #define TASK_DENAP_RECEIVER 0x00000008 +#define TASK_IS_PIDSUSPENDED 0x00000010 /* requested_policy */ #define POLICY_REQ_INT_DARWIN_BG 0x00000001 diff --git a/osfmk/mach/task_special_ports.h b/osfmk/mach/task_special_ports.h index d1e5ec465..a2840ed89 100644 --- a/osfmk/mach/task_special_ports.h +++ b/osfmk/mach/task_special_ports.h @@ -69,18 +69,19 @@ typedef int task_special_port_t; -#define TASK_KERNEL_PORT 1 /* Represents task to the outside - * world.*/ +#define TASK_KERNEL_PORT 1 /* The full task port for task. */ #define TASK_HOST_PORT 2 /* The host (priv) port for task. */ -#define TASK_NAME_PORT 3 /* the name (unpriv) port for task */ +#define TASK_NAME_PORT 3 /* The name port for task. */ #define TASK_BOOTSTRAP_PORT 4 /* Bootstrap environment for task. */ -/* - * Evolving and likely to change. - */ +#define TASK_INSPECT_PORT 5 /* The inspect port for task. */ + +#define TASK_READ_PORT 6 /* The read port for task. */ + + #define TASK_SEATBELT_PORT 7 /* Seatbelt compiler/DEM port for task. */ diff --git a/osfmk/mach/thread_act.defs b/osfmk/mach/thread_act.defs index 205fff541..e7f20e54c 100644 --- a/osfmk/mach/thread_act.defs +++ b/osfmk/mach/thread_act.defs @@ -102,7 +102,7 @@ act_get_state_to_user( #else act_get_state( #endif - target_act : thread_act_t; + target_act : thread_read_t; flavor : int; out old_state : thread_state_t, CountInOut); @@ -136,7 +136,7 @@ thread_get_state_to_user( #else thread_get_state( #endif - target_act : thread_act_t; + target_act : thread_read_t; flavor : thread_state_flavor_t; out old_state : thread_state_t, CountInOut); @@ -214,11 +214,18 @@ thread_depress_abort( * Returns the current value of the selected special port * associated with the target thread. */ -routine thread_get_special_port( - thr_act : thread_act_t; +routine +#ifdef KERNEL_SERVER +thread_get_special_port_from_user( + port : mach_port_t; +#else +thread_get_special_port( + thr_act : thread_inspect_t; +#endif which_port : int; out special_port : mach_port_t); + /* * Set one of the special ports associated with the * target thread. @@ -249,8 +256,14 @@ routine thread_set_exception_ports( /* * Lookup some of the old exception handlers for a thread */ -routine thread_get_exception_ports( - thread : thread_inspect_t; +routine +#if KERNEL_SERVER +thread_get_exception_ports_from_user( + port : mach_port_t; +#else +thread_get_exception_ports( + thread : thread_act_t; +#endif exception_mask : exception_mask_t; out masks : exception_mask_array_t; out old_handlers : exception_handler_array_t, SameCount; @@ -337,7 +350,7 @@ routine thread_assign_default( * Get current assignment for thread. */ routine thread_get_assignment( - thread : thread_act_t; + thread : thread_inspect_t; out assigned_set : processor_set_name_t); /* @@ -351,7 +364,7 @@ routine PREFIX(thread_set_policy)( limit : policy_limit_t); routine thread_get_mach_voucher( - thr_act : thread_act_t; + thr_act : thread_read_t; which : mach_voucher_selector_t; out voucher : ipc_voucher_t); @@ -364,4 +377,14 @@ routine thread_swap_mach_voucher( new_voucher : ipc_voucher_t; inout old_voucher : ipc_voucher_t); +routine thread_convert_thread_state( + thread : thread_act_t; + direction : int; + flavor : thread_state_flavor_t; + in_state : thread_state_t; + out out_state : thread_state_t, CountInOut); + +#ifdef XNU_KERNEL_PRIVATE +#endif + /* vim: set ft=c : */ diff --git a/osfmk/mach/thread_info.h b/osfmk/mach/thread_info.h index dfe51dd7d..a20d3dd90 100644 --- a/osfmk/mach/thread_info.h +++ b/osfmk/mach/thread_info.h @@ -214,6 +214,7 @@ typedef struct io_stat_info *io_stat_info_t; #if KERNEL_PRIVATE __BEGIN_DECLS void thread_group_join_io_storage(void); +void thread_group_join_perf_controller(void); __END_DECLS #endif diff --git a/osfmk/mach/thread_policy.h b/osfmk/mach/thread_policy.h index b0c82bdc5..696e414b2 100644 --- a/osfmk/mach/thread_policy.h +++ b/osfmk/mach/thread_policy.h @@ -373,10 +373,7 @@ typedef struct thread_qos_policy *thread_qos_policy_t; * When they do, we will update THREAD_POLICY_INTERNAL_STRUCT_VERSION. */ -#define THREAD_POLICY_INTERNAL_STRUCT_VERSION 5 - -// legacy names -#define thrp_qos_ipc_override thrp_qos_kevent_override +#define THREAD_POLICY_INTERNAL_STRUCT_VERSION 6 struct thread_requested_policy { uint64_t thrp_int_darwinbg :1, /* marked as darwinbg via setpriority */ @@ -395,11 +392,10 @@ struct thread_requested_policy { thrp_qos_promote :3, /* thread qos class from promotion */ thrp_qos_kevent_override:3, /* thread qos class from kevent override */ thrp_terminated :1, /* heading for termination */ - thrp_qos_sync_ipc_override:3, /* now unused */ thrp_qos_workq_override :3, /* thread qos class override (workq) */ thrp_qos_wlsvc_override :3, /* workloop servicer qos class override */ - thrp_reserved :23; + thrp_reserved :26; }; struct thread_effective_policy { @@ -416,8 +412,9 @@ struct thread_effective_policy { thep_qos :3, /* thread qos class */ thep_qos_relprio :4, /* thread qos relative priority (store as inverse, -10 -> 0xA) */ thep_qos_promote :3, /* thread qos class used for promotion */ + thep_promote_above_task :1, /* thread is promoted above task-level clamp */ - thep_reserved :40; + thep_reserved :39; }; #endif /* PRIVATE */ diff --git a/osfmk/mach/thread_special_ports.h b/osfmk/mach/thread_special_ports.h index 02199835a..7bb1bea5a 100644 --- a/osfmk/mach/thread_special_ports.h +++ b/osfmk/mach/thread_special_ports.h @@ -67,8 +67,11 @@ #ifndef _MACH_THREAD_SPECIAL_PORTS_H_ #define _MACH_THREAD_SPECIAL_PORTS_H_ -#define THREAD_KERNEL_PORT 1 /* Represents the thread to the outside - * world.*/ +#define THREAD_KERNEL_PORT 1 /* The full thread port for thread. */ + +#define THREAD_INSPECT_PORT 2 /* The inspect port for thread. */ + +#define THREAD_READ_PORT 3 /* The read port for thread. */ /* * Definitions for ease of use diff --git a/osfmk/mach/thread_status.h b/osfmk/mach/thread_status.h index a91b936eb..7ccb65d6a 100644 --- a/osfmk/mach/thread_status.h +++ b/osfmk/mach/thread_status.h @@ -94,4 +94,7 @@ typedef natural_t thread_state_data_t[THREAD_STATE_MAX]; typedef int thread_state_flavor_t; typedef thread_state_flavor_t *thread_state_flavor_array_t; +#define THREAD_CONVERT_THREAD_STATE_TO_SELF 1 +#define THREAD_CONVERT_THREAD_STATE_FROM_SELF 2 + #endif /* _MACH_THREAD_STATUS_H_ */ diff --git a/osfmk/mach/vm_param.h b/osfmk/mach/vm_param.h index deef9ffd8..9a518a2bb 100644 --- a/osfmk/mach/vm_param.h +++ b/osfmk/mach/vm_param.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -162,6 +162,11 @@ mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) #define round_page_64(x) (((uint64_t)(x) + PAGE_MASK_64) & ~((uint64_t)PAGE_MASK_64)) #define trunc_page_64(x) ((uint64_t)(x) & ~((uint64_t)PAGE_MASK_64)) +#define round_page_mask_32(x, mask) (((uint32_t)(x) + (mask)) & ~((uint32_t)(mask))) +#define trunc_page_mask_32(x, mask) ((uint32_t)(x) & ~((uint32_t)(mask))) +#define round_page_mask_64(x, mask) (((uint64_t)(x) + (mask)) & ~((uint64_t)(mask))) +#define trunc_page_mask_64(x, mask) ((uint64_t)(x) & ~((uint64_t)(mask))) + /* * Enable the following block to find uses of xxx_32 macros that should * be xxx_64. These macros only work in C code, not C++. The resulting @@ -260,6 +265,7 @@ extern uint64_t max_mem; /* 64-bit size of memory - limit #include extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ +extern uint64_t max_mem_actual; /* Size of physical memory adjusted by maxmem */ extern uint64_t sane_size; /* Memory size to use for defaults calculations */ extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ @@ -398,6 +404,237 @@ round_page_32(uint32_t x) return x; } + +/*! + * @typedef vm_packing_params_t + * + * @brief + * Data structure representing the packing parameters for a given packed pointer + * encoding. + * + * @discussion + * Several data structures wish to pack their pointers on less than 64bits + * on LP64 in order to save memory. + * + * Adopters are supposed to define 3 macros: + * - @c *_BITS: number of storage bits used for the packing, + * - @c *_SHIFT: number of non significant low bits (expected to be 0), + * - @c *_BASE: the base against which to encode. + * + * The encoding is a no-op when @c *_BITS is equal to @c __WORDSIZE and + * @c *_SHIFT is 0. + * + * + * The convenience macro @c VM_PACKING_PARAMS can be used to create + * a @c vm_packing_params_t structure out of those definitions. + * + * It is customary to declare a constant global per scheme for the sake + * of debuggers to be able to dynamically decide how to unpack various schemes. + * + * + * This uses 2 possible schemes (who both preserve @c NULL): + * + * 1. When the storage bits and shift are sufficiently large (strictly more than + * VM_KERNEL_POINTER_SIGNIFICANT_BITS), a sign-extension scheme can be used. + * + * This allows to represent any kernel pointer. + * + * 2. Else, a base-relative scheme can be used, typical bases are: + * + * - @c KERNEL_PMAP_HEAP_RANGE_START when only pointers to heap (zone) + * allocated objects need to be packed, + * + * - @c VM_MIN_KERNEL_AND_KEXT_ADDRESS when pointers to kernel globals also + * need this. + * + * When such an ecoding is used, @c zone_restricted_va_max() must be taught + * about it. + */ +typedef struct vm_packing_params { + vm_offset_t vmpp_base; + uint8_t vmpp_bits; + uint8_t vmpp_shift; + bool vmpp_base_relative; +} vm_packing_params_t; + + +/*! + * @macro VM_PACKING_IS_BASE_RELATIVE + * + * @brief + * Whether the packing scheme with those parameters will be base-relative. + */ +#define VM_PACKING_IS_BASE_RELATIVE(ns) \ + (ns##_BITS + ns##_SHIFT <= VM_KERNEL_POINTER_SIGNIFICANT_BITS) + + +/*! + * @macro VM_PACKING_PARAMS + * + * @brief + * Constructs a @c vm_packing_params_t structure based on the convention that + * macros with the @c _BASE, @c _BITS and @c _SHIFT suffixes have been defined + * to the proper values. + */ +#define VM_PACKING_PARAMS(ns) \ + (vm_packing_params_t){ \ + .vmpp_base = ns##_BASE, \ + .vmpp_bits = ns##_BITS, \ + .vmpp_shift = ns##_SHIFT, \ + .vmpp_base_relative = VM_PACKING_IS_BASE_RELATIVE(ns), \ + } + +/** + * @function vm_pack_pointer + * + * @brief + * Packs a pointer according to the specified parameters. + * + * @discussion + * The convenience @c VM_PACK_POINTER macro allows to synthesize + * the @c params argument. + * + * @param ptr The pointer to pack. + * @param params The encoding parameters. + * @returns The packed pointer. + */ +static inline vm_offset_t +vm_pack_pointer(vm_offset_t ptr, vm_packing_params_t params) +{ + if (!params.vmpp_base_relative) { + return ptr >> params.vmpp_shift; + } + if (ptr) { + return (ptr - params.vmpp_base) >> params.vmpp_shift; + } + return (vm_offset_t)0; +} +#define VM_PACK_POINTER(ptr, ns) \ + vm_pack_pointer(ptr, VM_PACKING_PARAMS(ns)) + +/** + * @function vm_unpack_pointer + * + * @brief + * Unpacks a pointer packed with @c vm_pack_pointer(). + * + * @discussion + * The convenience @c VM_UNPACK_POINTER macro allows to synthesize + * the @c params argument. + * + * @param packed The packed value to decode. + * @param params The encoding parameters. + * @returns The unpacked pointer. + */ +static inline vm_offset_t +vm_unpack_pointer(vm_offset_t packed, vm_packing_params_t params) +{ + if (!params.vmpp_base_relative) { + intptr_t addr = (intptr_t)packed; + addr <<= __WORDSIZE - params.vmpp_bits; + addr >>= __WORDSIZE - params.vmpp_bits - params.vmpp_shift; + return (vm_offset_t)addr; + } + if (packed) { + return (packed << params.vmpp_shift) + params.vmpp_base; + } + return (vm_offset_t)0; +} +#define VM_UNPACK_POINTER(packed, ns) \ + vm_unpack_pointer(packed, VM_PACKING_PARAMS(ns)) + +/** + * @function vm_packing_max_packable + * + * @brief + * Returns the largest packable address for the given parameters. + * + * @discussion + * The convenience @c VM_PACKING_MAX_PACKABLE macro allows to synthesize + * the @c params argument. + * + * @param params The encoding parameters. + * @returns The largest packable pointer. + */ +static inline vm_offset_t +vm_packing_max_packable(vm_packing_params_t params) +{ + if (!params.vmpp_base_relative) { + return VM_MAX_KERNEL_ADDRESS; + } + + vm_offset_t ptr = params.vmpp_base + + (((1ul << params.vmpp_bits) - 1) << params.vmpp_shift); + + return ptr >= params.vmpp_base ? ptr : VM_MAX_KERNEL_ADDRESS; +} +#define VM_PACKING_MAX_PACKABLE(ns) \ + vm_packing_max_packable(VM_PACKING_PARAMS(ns)) + + +__abortlike +extern void +vm_packing_pointer_invalid(vm_offset_t ptr, vm_packing_params_t params); + +/** + * @function vm_verify_pointer_packable + * + * @brief + * Panics if the specified pointer cannot be packed with the specified + * parameters. + * + * @discussion + * The convenience @c VM_VERIFY_POINTER_PACKABLE macro allows to synthesize + * the @c params argument. + * + * The convenience @c VM_ASSERT_POINTER_PACKABLE macro allows to synthesize + * the @c params argument, and is erased when assertions are disabled. + * + * @param ptr The packed value to decode. + * @param params The encoding parameters. + */ +static inline void +vm_verify_pointer_packable(vm_offset_t ptr, vm_packing_params_t params) +{ + if (ptr & ((1ul << params.vmpp_shift) - 1)) { + vm_packing_pointer_invalid(ptr, params); + } + if (!params.vmpp_base_relative || ptr == 0) { + return; + } + if (ptr <= params.vmpp_base || ptr > vm_packing_max_packable(params)) { + vm_packing_pointer_invalid(ptr, params); + } +} +#define VM_VERIFY_POINTER_PACKABLE(ptr, ns) \ + vm_verify_pointer_packable(ptr, VM_PACKING_PARAMS(ns)) + +#if DEBUG || DEVELOPMENT +#define VM_ASSERT_POINTER_PACKABLE(ptr, ns) \ + VM_VERIFY_POINTER_PACKABLE(ptr, ns) +#else +#define VM_ASSERT_POINTER_PACKABLE(ptr, ns) ((void)(ptr)) +#endif + +/** + * @function vm_verify_pointer_range + * + * @brief + * Panics if some pointers in the specified range can't be packed with the + * specified parameters. + * + * @param subsystem The subsystem requiring the packing. + * @param min_address The smallest address of the range. + * @param max_address The largest address of the range. + * @param params The encoding parameters. + */ +extern void +vm_packing_verify_range( + const char *subsystem, + vm_offset_t min_address, + vm_offset_t max_address, + vm_packing_params_t params); + #endif /* XNU_KERNEL_PRIVATE */ extern vm_size_t page_size; diff --git a/osfmk/mach/vm_prot.h b/osfmk/mach/vm_prot.h index fa1eb0344..5e7100967 100644 --- a/osfmk/mach/vm_prot.h +++ b/osfmk/mach/vm_prot.h @@ -157,4 +157,12 @@ typedef int vm_prot_t; #define VM_PROT_STRIP_READ ((vm_prot_t) 0x80) #define VM_PROT_EXECUTE_ONLY (VM_PROT_EXECUTE|VM_PROT_STRIP_READ) +#ifdef PRIVATE +/* + * When using VM_PROT_COPY, fail instead of copying an executable mapping, + * since that could cause code-signing violations. + */ +#define VM_PROT_COPY_FAIL_IF_EXECUTABLE ((vm_prot_t)0x100) +#endif /* PRIVATE */ + #endif /* _MACH_VM_PROT_H_ */ diff --git a/osfmk/mach/vm_statistics.h b/osfmk/mach/vm_statistics.h index 267d5df2f..3de128669 100644 --- a/osfmk/mach/vm_statistics.h +++ b/osfmk/mach/vm_statistics.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -66,8 +66,12 @@ #ifndef _MACH_VM_STATISTICS_H_ #define _MACH_VM_STATISTICS_H_ -#include +#ifdef __cplusplus +extern "C" { +#endif +#include +#include /* * vm_statistics @@ -167,6 +171,8 @@ struct vm_statistics64 { typedef struct vm_statistics64 *vm_statistics64_t; typedef struct vm_statistics64 vm_statistics64_data_t; +kern_return_t vm_stats(void *info, unsigned int *count); + /* * VM_STATISTICS_TRUNCATE_TO_32_BIT * @@ -383,12 +389,12 @@ typedef struct { vmkf_cs_enforcement:1, vmkf_nested_pmap:1, vmkf_no_copy_on_read:1, -#if !defined(CONFIG_EMBEDDED) vmkf_32bit_map_va:1, - __vmkf_unused:13; -#else - __vmkf_unused:14; -#endif + vmkf_copy_single_object:1, + vmkf_copy_pageable:1, + vmkf_copy_same_map:1, + vmkf_translated_allow_execute:1, + __vmkf_unused:9; } vm_map_kernel_flags_t; #define VM_MAP_KERNEL_FLAGS_NONE (vm_map_kernel_flags_t) { \ .vmkf_atomic_entry = 0, /* keep entry atomic (no coalescing) */ \ @@ -409,6 +415,11 @@ typedef struct { .vmkf_cs_enforcement = 0, /* new value for CS_ENFORCEMENT */ \ .vmkf_nested_pmap = 0, /* use a nested pmap */ \ .vmkf_no_copy_on_read = 0, /* do not use copy_on_read */ \ + .vmkf_32bit_map_va = 0, /* allocate in low 32-bits range */ \ + .vmkf_copy_single_object = 0, /* vm_map_copy only 1 VM object */ \ + .vmkf_copy_pageable = 0, /* vm_map_copy with pageable entries */ \ + .vmkf_copy_same_map = 0, /* vm_map_copy to remap in original map */ \ + .vmkf_translated_allow_execute = 0, /* allow execute in translated processes */ \ .__vmkf_unused = 0 \ } @@ -456,6 +467,7 @@ typedef struct { #define VM_MEMORY_MALLOC_NANO 11 #define VM_MEMORY_MALLOC_MEDIUM 12 +#define VM_MEMORY_MALLOC_PGUARD 13 #define VM_MEMORY_MACH_MSG 20 #define VM_MEMORY_IOKIT 21 @@ -616,6 +628,22 @@ typedef struct { /* memory allocated by CoreMedia for global image registration of frames */ #define VM_MEMORY_CM_REGWARP 101 +/* memory allocated by EmbeddedAcousticRecognition for speech decoder */ +#define VM_MEMORY_EAR_DECODER 102 + +/* CoreUI cached image data */ +#define VM_MEMORY_COREUI_CACHED_IMAGE_DATA 103 + +/* Reserve 230-239 for Rosetta */ +#define VM_MEMORY_ROSETTA 230 +#define VM_MEMORY_ROSETTA_THREAD_CONTEXT 231 +#define VM_MEMORY_ROSETTA_INDIRECT_BRANCH_MAP 232 +#define VM_MEMORY_ROSETTA_RETURN_STACK 233 +#define VM_MEMORY_ROSETTA_EXECUTABLE_HEAP 234 +#define VM_MEMORY_ROSETTA_USER_LDT 235 +#define VM_MEMORY_ROSETTA_ARENA 236 +#define VM_MEMORY_ROSETTA_10 239 + /* Reserve 240-255 for application */ #define VM_MEMORY_APPLICATION_SPECIFIC_1 240 #define VM_MEMORY_APPLICATION_SPECIFIC_16 255 @@ -659,8 +687,9 @@ typedef struct { #define VM_KERN_MEMORY_REASON 25 #define VM_KERN_MEMORY_SKYWALK 26 #define VM_KERN_MEMORY_LTABLE 27 +#define VM_KERN_MEMORY_HV 28 -#define VM_KERN_MEMORY_FIRST_DYNAMIC 28 +#define VM_KERN_MEMORY_FIRST_DYNAMIC 29 /* out of tags: */ #define VM_KERN_MEMORY_ANY 255 #define VM_KERN_MEMORY_COUNT 256 @@ -677,6 +706,7 @@ typedef struct { #define VM_KERN_SITE_HIDE 0x00000200 /* no zprint */ #define VM_KERN_SITE_NAMED 0x00000400 #define VM_KERN_SITE_ZONE 0x00000800 +#define VM_KERN_SITE_ZONE_VIEW 0x00001000 #define VM_KERN_COUNT_MANAGED 0 #define VM_KERN_COUNT_RESERVED 1 @@ -692,8 +722,15 @@ typedef struct { #define VM_KERN_COUNT_BOOT_STOLEN 10 -#define VM_KERN_COUNTER_COUNT 11 +/* The number of bytes from the kernel cache that are wired in memory */ +#define VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE 11 + +#define VM_KERN_COUNTER_COUNT 12 #endif /* KERNEL_PRIVATE */ +#ifdef __cplusplus +} +#endif + #endif /* _MACH_VM_STATISTICS_H_ */ diff --git a/osfmk/mach/vm_types.h b/osfmk/mach/vm_types.h index 95eaafd5e..e29742be6 100644 --- a/osfmk/mach/vm_types.h +++ b/osfmk/mach/vm_types.h @@ -91,7 +91,7 @@ __END_DECLS #endif /* MACH_KERNEL_PRIVATE */ typedef struct pmap *pmap_t; -typedef struct _vm_map *vm_map_t; +typedef struct _vm_map *vm_map_t, *vm_map_read_t, *vm_map_inspect_t; typedef struct vm_object *vm_object_t; typedef struct vm_object_fault_info *vm_object_fault_info_t; @@ -100,14 +100,18 @@ typedef struct vm_object_fault_info *vm_object_fault_info_t; #else /* KERNEL_PRIVATE */ -typedef mach_port_t vm_map_t; +typedef mach_port_t vm_map_t, vm_map_read_t, vm_map_inspect_t; #endif /* KERNEL_PRIVATE */ #ifdef KERNEL #define VM_MAP_NULL ((vm_map_t) NULL) +#define VM_MAP_INSPECT_NULL ((vm_map_inspect_t) NULL) +#define VM_MAP_READ_NULL ((vm_map_read_t) NULL) #else #define VM_MAP_NULL ((vm_map_t) 0) +#define VM_MAP_INSPECT_NULL ((vm_map_inspect_t) 0) +#define VM_MAP_READ_NULL ((vm_map_read_t) 0) #endif /* @@ -131,7 +135,11 @@ typedef uint16_t vm_tag_t; #define VM_TAG_KMOD 0x0200 #if DEBUG || DEVELOPMENT -#define VM_MAX_TAG_ZONES 28 +#if __LP64__ +#define VM_MAX_TAG_ZONES 84 +#else +#define VM_MAX_TAG_ZONES 31 +#endif #else #define VM_MAX_TAG_ZONES 0 #endif @@ -178,7 +186,7 @@ typedef struct vm_allocation_site vm_allocation_site_t; static vm_allocation_site_t site __attribute__((section("__DATA, __data"))) \ = { .refcount = 2, .tag = (itag), .flags = (iflags) }; -extern int vmrtf_extract(uint64_t, boolean_t, int, void *, int *); +extern int vmrtf_extract(uint64_t, boolean_t, unsigned long, void *, unsigned long *); extern unsigned int vmrtfaultinfo_bufsz(void); #endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/machine/Makefile b/osfmk/machine/Makefile index 0e31820d1..27aebf9a2 100644 --- a/osfmk/machine/Makefile +++ b/osfmk/machine/Makefile @@ -10,7 +10,6 @@ PRIVATE_DATAFILES = \ cpu_capabilities.h KERNELFILES = \ - atomic_impl.h \ atomic.h \ config.h \ cpu_capabilities.h \ @@ -27,7 +26,8 @@ KERNELFILES = \ pal_routines.h \ pal_hibernate.h \ simple_lock.h \ - smp.h + smp.h \ + trap.h EXPORT_FILES = \ machine_remote_time.h diff --git a/osfmk/machine/atomic.h b/osfmk/machine/atomic.h index ab11c7004..149e36583 100644 --- a/osfmk/machine/atomic.h +++ b/osfmk/machine/atomic.h @@ -29,735 +29,7 @@ #ifndef _MACHINE_ATOMIC_H #define _MACHINE_ATOMIC_H -/* - * Internal implementation details are in a separate header - */ -#include - -/*! - * @file - * - * @brief - * This file defines nicer (terser and safer) wrappers for C11's . - * - * @discussion - * @see xnu.git::doc/atomics.md which provides more extensive documentation - * about this header. - * - * Note that some of the macros defined in this file may be overridden by - * architecture specific headers. - * - * All the os_atomic* functions take an operation ordering argument that can be: - * - C11 memory orders: relaxed, acquire, release, acq_rel or seq_cst which - * imply a memory fence on SMP machines, and always carry the matching - * compiler barrier semantics. - * - * - the os_atomic-specific `dependency` memory ordering that is used to - * document intent to a carry a data or address dependency. - * See doc/atomics.md for more information. - * - * - a compiler barrier: compiler_acquire, compiler_release, compiler_acq_rel - * without a corresponding memory fence. - */ - -/*! - * @function os_compiler_barrier - * - * @brief - * Provide a compiler barrier according to the specified ordering. - * - * @param m - * An optional ordering among `acquire`, `release` or `acq_rel` which defaults - * to `acq_rel` when not specified. - * These are equivalent to the `compiler_acquire`, `compiler_release` and - * `compiler_acq_rel` orderings taken by the os_atomic* functions - */ -#define os_compiler_barrier(b...) \ - atomic_signal_fence(_os_compiler_barrier_##b) - -/*! - * @function os_atomic_thread_fence - * - * @brief - * Memory fence which is elided in non-SMP mode, but always carries the - * corresponding compiler barrier. - * - * @param m - * The ordering for this fence. - */ -#define os_atomic_thread_fence(m) ({ \ - atomic_thread_fence(memory_order_##m##_smp); \ - atomic_signal_fence(memory_order_##m); \ -}) - -/*! - * @function os_atomic_init - * - * @brief - * Wrapper for C11 atomic_init() - * - * @discussion - * This initialization is not performed atomically, and so must only be used as - * part of object initialization before the object is made visible to other - * threads/cores. - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value to initialize the variable with. - * - * @returns - * The value loaded from @a p. - */ -#define os_atomic_init(p, v) \ - atomic_init(_os_atomic_c11_atomic(p), v) - -/*! - * @function os_atomic_load_is_plain, os_atomic_store_is_plain - * - * @brief - * Return whether a relaxed atomic load (resp. store) to an atomic variable - * is implemented as a single plain load (resp. store) instruction. - * - * @discussion - * Non-relaxed loads/stores may involve additional memory fence instructions - * or more complex atomic instructions. - * - * This is a construct that can safely be used in static asserts. - * - * @param p - * A pointer to an atomic variable. - * - * @returns - * True when relaxed atomic loads (resp. stores) compile to a plain load - * (resp. store) instruction, false otherwise. - */ -#define os_atomic_load_is_plain(p) (sizeof(*(p)) <= sizeof(void *)) -#define os_atomic_store_is_plain(p) os_atomic_load_is_plain(p) - -/*! - * @function os_atomic_load - * - * @brief - * Wrapper for C11 atomic_load_explicit(), guaranteed to compile to a single - * plain load instruction (when @a m is `relaxed`). - * - * @param p - * A pointer to an atomic variable. - * - * @param m - * The ordering to use. - * - * @returns - * The value loaded from @a p. - */ -#define os_atomic_load(p, m) ({ \ - _Static_assert(os_atomic_load_is_plain(p), "Load is wide"); \ - _os_atomic_basetypeof(p) _r; \ - _os_compiler_barrier_before_atomic(m); \ - _r = atomic_load_explicit(_os_atomic_c11_atomic(p), \ - memory_order_##m##_smp); \ - _os_compiler_barrier_after_atomic(m); \ - _r; \ -}) - -/*! - * @function os_atomic_load_wide - * - * @brief - * Wrapper for C11 atomic_load_explicit(), which may be implemented by a - * compare-exchange loop for double-wide variables. - * - * @param p - * A pointer to an atomic variable. - * - * @param m - * The ordering to use. - * - * @returns - * The value loaded from @a p. - */ -#define os_atomic_load_wide(p, m) ({ \ - _os_atomic_basetypeof(p) _r; \ - _os_compiler_barrier_before_atomic(m); \ - _r = atomic_load_explicit(_os_atomic_c11_atomic(p), \ - memory_order_##m##_smp); \ - _os_compiler_barrier_after_atomic(m); \ - _r; \ -}) - -/*! - * @function os_atomic_store - * - * @brief - * Wrapper for C11 atomic_store_explicit(), guaranteed to compile to a single - * plain store instruction (when @a m is `relaxed`). - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value to store. - * - * @param m - * The ordering to use. - * - * @returns - * The value stored at @a p. - */ -#define os_atomic_store(p, v, m) ({ \ - _Static_assert(os_atomic_store_is_plain(p), "Store is wide"); \ - _os_atomic_basetypeof(p) _v = (v); \ - _os_compiler_barrier_before_atomic(m); \ - atomic_store_explicit(_os_atomic_c11_atomic(p), _v, \ - memory_order_##m##_smp); \ - _os_compiler_barrier_after_atomic(m); \ - _v; \ -}) - -/*! - * @function os_atomic_store_wide - * - * @brief - * Wrapper for C11 atomic_store_explicit(), which may be implemented by a - * compare-exchange loop for double-wide variables. - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value to store. - * - * @param m - * The ordering to use. - * - * @returns - * The value stored at @a p. - */ -#define os_atomic_store_wide(p, v, m) ({ \ - _os_atomic_basetypeof(p) _v = (v); \ - _os_compiler_barrier_before_atomic(m); \ - atomic_store_explicit(_os_atomic_c11_atomic(p), _v, \ - memory_order_##m##_smp); \ - _os_compiler_barrier_after_atomic(m); \ - _v; \ -}) - -/*! - * @function os_atomic_add, os_atomic_add_orig - * - * @brief - * Wrappers for C11 atomic_fetch_add_explicit(). - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value to add. - * - * @param m - * The ordering to use. - * - * @returns - * os_atomic_add_orig returns the value of the variable before the atomic add, - * os_atomic_add returns the value of the variable after the atomic add. - */ -#define os_atomic_add_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_add) -#define os_atomic_add(p, v, m) _os_atomic_c11_op(p, v, m, fetch_add, +) - -/*! - * @function os_atomic_inc, os_atomic_inc_orig - * - * @brief - * Perform an atomic increment. - * - * @param p - * A pointer to an atomic variable. - * - * @param m - * The ordering to use. - * - * @returns - * os_atomic_inc_orig returns the value of the variable before the atomic increment, - * os_atomic_inc returns the value of the variable after the atomic increment. - */ -#define os_atomic_inc_orig(p, m) _os_atomic_c11_op_orig(p, 1, m, fetch_add) -#define os_atomic_inc(p, m) _os_atomic_c11_op(p, 1, m, fetch_add, +) - -/*! - * @function os_atomic_sub, os_atomic_sub_orig - * - * @brief - * Wrappers for C11 atomic_fetch_sub_explicit(). - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value to subtract. - * - * @param m - * The ordering to use. - * - * @returns - * os_atomic_sub_orig returns the value of the variable before the atomic subtract, - * os_atomic_sub returns the value of the variable after the atomic subtract. - */ -#define os_atomic_sub_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_sub) -#define os_atomic_sub(p, v, m) _os_atomic_c11_op(p, v, m, fetch_sub, -) - -/*! - * @function os_atomic_dec, os_atomic_dec_orig - * - * @brief - * Perform an atomic decrement. - * - * @param p - * A pointer to an atomic variable. - * - * @param m - * The ordering to use. - * - * @returns - * os_atomic_dec_orig returns the value of the variable before the atomic decrement, - * os_atomic_dec returns the value of the variable after the atomic decrement. - */ -#define os_atomic_dec_orig(p, m) _os_atomic_c11_op_orig(p, 1, m, fetch_sub) -#define os_atomic_dec(p, m) _os_atomic_c11_op(p, 1, m, fetch_sub, -) - -/*! - * @function os_atomic_and, os_atomic_and_orig - * - * @brief - * Wrappers for C11 atomic_fetch_and_explicit(). - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value to and. - * - * @param m - * The ordering to use. - * - * @returns - * os_atomic_and_orig returns the value of the variable before the atomic and, - * os_atomic_and returns the value of the variable after the atomic and. - */ -#define os_atomic_and_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_and) -#define os_atomic_and(p, v, m) _os_atomic_c11_op(p, v, m, fetch_and, &) - -/*! - * @function os_atomic_andnot, os_atomic_andnot_orig - * - * @brief - * Wrappers for C11 atomic_fetch_and_explicit(p, ~value). - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value whose complement to and. - * - * @param m - * The ordering to use. - * - * @returns - * os_atomic_andnot_orig returns the value of the variable before the atomic andnot, - * os_atomic_andnot returns the value of the variable after the atomic andnot. - */ -#define os_atomic_andnot_orig(p, v, m) _os_atomic_c11_op_orig(p, ~(v), m, fetch_and) -#define os_atomic_andnot(p, v, m) _os_atomic_c11_op(p, ~(v), m, fetch_and, &) - -/*! - * @function os_atomic_or, os_atomic_or_orig - * - * @brief - * Wrappers for C11 atomic_fetch_or_explicit(). - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value to or. - * - * @param m - * The ordering to use. - * - * @returns - * os_atomic_or_orig returns the value of the variable before the atomic or, - * os_atomic_or returns the value of the variable after the atomic or. - */ -#define os_atomic_or_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_or) -#define os_atomic_or(p, v, m) _os_atomic_c11_op(p, v, m, fetch_or, |) - -/*! - * @function os_atomic_xor, os_atomic_xor_orig - * - * @brief - * Wrappers for C11 atomic_fetch_xor_explicit(). - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value to xor. - * - * @param m - * The ordering to use. - * - * @returns - * os_atomic_xor_orig returns the value of the variable before the atomic xor, - * os_atomic_xor returns the value of the variable after the atomic xor. - */ -#define os_atomic_xor_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_xor) -#define os_atomic_xor(p, v, m) _os_atomic_c11_op(p, v, m, fetch_xor, ^) - -/*! - * @function os_atomic_min, os_atomic_min_orig - * - * @brief - * Wrappers for Clang's __atomic_fetch_min() - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value to minimize. - * - * @param m - * The ordering to use. - * - * @returns - * os_atomic_min_orig returns the value of the variable before the atomic min, - * os_atomic_min returns the value of the variable after the atomic min. - */ -#define os_atomic_min_orig(p, v, m) _os_atomic_clang_op_orig(p, v, m, fetch_min) -#define os_atomic_min(p, v, m) _os_atomic_clang_op(p, v, m, fetch_min, MIN) - -/*! - * @function os_atomic_max, os_atomic_max_orig - * - * @brief - * Wrappers for Clang's __atomic_fetch_max() - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value to maximize. - * - * @param m - * The ordering to use. - * - * @returns - * os_atomic_max_orig returns the value of the variable before the atomic max, - * os_atomic_max returns the value of the variable after the atomic max. - */ -#define os_atomic_max_orig(p, v, m) _os_atomic_clang_op_orig(p, v, m, fetch_max) -#define os_atomic_max(p, v, m) _os_atomic_clang_op(p, v, m, fetch_max, MAX) - -/*! - * @function os_atomic_xchg - * - * @brief - * Wrapper for C11 atomic_exchange_explicit(). - * - * @param p - * A pointer to an atomic variable. - * - * @param v - * The value to exchange with. - * - * @param m - * The ordering to use. - * - * @returns - * The value of the variable before the exchange. - */ -#define os_atomic_xchg(p, v, m) _os_atomic_c11_op_orig(p, v, m, exchange) - -/*! - * @function os_atomic_cmpxchg - * - * @brief - * Wrapper for C11 atomic_compare_exchange_strong_explicit(). - * - * @discussion - * Loops around os_atomic_cmpxchg() may want to consider using the - * os_atomic_rmw_loop() construct instead to take advantage of the C11 weak - * compare-exchange operation. - * - * @param p - * A pointer to an atomic variable. - * - * @param e - * The value expected in the atomic variable. - * - * @param v - * The value to store if the atomic variable has the expected value @a e. - * - * @param m - * The ordering to use in case of success. - * The ordering in case of failure is always `relaxed`. - * - * @returns - * 0 if the compare-exchange failed. - * 1 if the compare-exchange succeeded. - */ -#define os_atomic_cmpxchg(p, e, v, m) ({ \ - _os_atomic_basetypeof(p) _r = (e); int _b; \ - _os_compiler_barrier_before_atomic(m); \ - _b = atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ - &_r, v, memory_order_##m##_smp, memory_order_relaxed); \ - _os_compiler_barrier_after_atomic(m); \ - _b; \ -}) - -/*! - * @function os_atomic_cmpxchgv - * - * @brief - * Wrapper for C11 atomic_compare_exchange_strong_explicit(). - * - * @discussion - * Loops around os_atomic_cmpxchgv() may want to consider using the - * os_atomic_rmw_loop() construct instead to take advantage of the C11 weak - * compare-exchange operation. - * - * @param p - * A pointer to an atomic variable. - * - * @param e - * The value expected in the atomic variable. - * - * @param v - * The value to store if the atomic variable has the expected value @a e. - * - * @param g - * A pointer to a location that is filled with the value that was present in - * the atomic variable before the compare-exchange (whether successful or not). - * This can be used to redrive compare-exchange loops. - * - * @param m - * The ordering to use in case of success. - * The ordering in case of failure is always `relaxed`. - * - * @returns - * 0 if the compare-exchange failed. - * 1 if the compare-exchange succeeded. - */ -#define os_atomic_cmpxchgv(p, e, v, g, m) ({ \ - _os_atomic_basetypeof(p) _r = (e); int _b; \ - _os_compiler_barrier_before_atomic(m); \ - _b = atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ - &_r, v, memory_order_##m##_smp, memory_order_relaxed); \ - _os_compiler_barrier_after_atomic(m); \ - *(g) = _r; _b; \ -}) - -/*! - * @function os_atomic_rmw_loop - * - * @brief - * Advanced read-modify-write construct to wrap compare-exchange loops. - * - * @param p - * A pointer to an atomic variable to be modified. - * - * @param ov - * The name of the variable that will contain the original value of the atomic - * variable (reloaded every iteration of the loop). - * - * @param nv - * The name of the variable that will contain the new value to compare-exchange - * the atomic variable to (typically computed from @a ov every iteration of the - * loop). - * - * @param m - * The ordering to use in case of success. - * The ordering in case of failure is always `relaxed`. - * - * @param ... - * Code block that validates the value of @p ov and computes the new value of - * @p nv that the atomic variable will be compare-exchanged to in an iteration - * of the loop. - * - * The loop can be aborted using os_atomic_rmw_loop_give_up(), e.g. when the - * value of @p ov is found to be "invalid" for the ovarall operation. - * `continue` cannot be used in this context. - * - * No stores to memory should be performed within the code block as it may cause - * LL/SC transactions used to implement compare-exchange to fail persistently. - * - * @returns - * 0 if the loop was aborted with os_atomic_rmw_loop_give_up(). - * 1 if the loop completed. - */ -#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - int _result = 0; \ - typeof(p) _p = (p); \ - _os_compiler_barrier_before_atomic(m); \ - ov = atomic_load_explicit(_os_atomic_c11_atomic(_p), \ - memory_order_relaxed); \ - do { \ - __VA_ARGS__; \ - _result = atomic_compare_exchange_weak_explicit( \ - _os_atomic_c11_atomic(_p), &ov, nv, \ - memory_order_##m##_smp, memory_order_relaxed); \ - } while (__builtin_expect(!_result, 0)); \ - _os_compiler_barrier_after_atomic(m); \ - _result; \ - }) - -/*! - * @function os_atomic_rmw_loop_give_up - * - * @brief - * Abort an os_atomic_rmw_loop() loop. - * - * @param ... - * Optional code block to execute before the `break` out of the loop. May - * further alter the control flow (e.g. using `return`, `goto`, ...). - */ -#define os_atomic_rmw_loop_give_up(...) ({ __VA_ARGS__; break; }) - -/*! - * @typedef os_atomic_dependency_t - * - * @brief - * Type for dependency tokens that can be derived from loads with dependency - * and injected into various expressions. - * - * @warning - * The implementation of atomic dependencies makes painstakingly sure that the - * compiler doesn't know that os_atomic_dependency_t::__opaque_zero is always 0. - * - * Users of os_atomic_dependency_t MUST NOT test its value (even with an - * assert), as doing so would allow the compiler to reason about the value and - * elide its use to inject hardware dependencies (thwarting the entire purpose - * of the construct). - */ -typedef struct { unsigned long __opaque_zero; } os_atomic_dependency_t; - -/*! - * @const OS_ATOMIC_DEPENDENCY_NONE - * - * @brief - * A value to pass to functions that can carry dependencies, to indicate that - * no dependency should be carried. - */ -#define OS_ATOMIC_DEPENDENCY_NONE \ - ((os_atomic_dependency_t){ 0UL }) - -/*! - * @function os_atomic_make_dependency - * - * @brief - * Create a dependency token that can be injected into expressions to force a - * hardware dependency. - * - * @discussion - * This function is only useful for cases where the dependency needs to be used - * several times. - * - * os_atomic_load_with_dependency_on() and os_atomic_inject_dependency() are - * otherwise capable of automatically creating dependency tokens. - * - * @param v - * The result of: - * - an os_atomic_load(..., dependency), - * - an os_atomic_inject_dependency(), - * - an os_atomic_load_with_dependency_on(). - * - * Note that due to implementation limitations, the type of @p v must be - * register-sized, if necessary an explicit cast is required. - * - * @returns - * An os_atomic_dependency_t token that can be used to prolongate dependency - * chains. - * - * The token value is always 0, but the compiler must never be able to reason - * about that fact (c.f. os_atomic_dependency_t) - */ -#define os_atomic_make_dependency(v) \ - ((void)(v), OS_ATOMIC_DEPENDENCY_NONE) - -/*! - * @function os_atomic_inject_dependency - * - * @brief - * Inject a hardware dependency resulting from a `dependency` load into a - * specified pointer. - * - * @param p - * A pointer to inject the dependency into. - * - * @param e - * - a dependency token returned from os_atomic_make_dependency(), - * - * - OS_ATOMIC_DEPENDENCY_NONE, which turns this operation into a no-op, - * - * - any value accepted by os_atomic_make_dependency(). - * - * @returns - * A value equal to @a p but that prolongates the dependency chain rooted at - * @a e. - */ -#define os_atomic_inject_dependency(p, e) \ - ((typeof(*(p)) *)((p) + _os_atomic_auto_dependency(e).__opaque_zero)) - -/*! - * @function os_atomic_load_with_dependency_on - * - * @brief - * Load that prolongates the dependency chain rooted at `v`. - * - * @discussion - * This is shorthand for: - * - * - * os_atomic_load(os_atomic_inject_dependency(p, e), dependency) - * - * - * @param p - * A pointer to an atomic variable. - * - * @param e - * - a dependency token returned from os_atomic_make_dependency(), - * - * - OS_ATOMIC_DEPENDENCY_NONE, which turns this operation into a no-op, - * - * - any value accepted by os_atomic_make_dependency(). - * - * @returns - * The value loaded from @a p. - */ -#define os_atomic_load_with_dependency_on(p, e) \ - os_atomic_load(os_atomic_inject_dependency(p, e), dependency) - -/*! - * @const OS_ATOMIC_HAS_LLSC - * - * @brief - * Whether the platform has LL/SC features. - * - * @discussion - * When set, the os_atomic_*_exclusive() macros are defined. - */ -#define OS_ATOMIC_HAS_LLSC 0 - -/*! - * @const OS_ATOMIC_USE_LLSC - * - * @brief - * Whether os_atomic* use LL/SC internally. - * - * @discussion - * OS_ATOMIC_USE_LLSC implies OS_ATOMIC_HAS_LLSC. - */ -#define OS_ATOMIC_USE_LLSC 0 +#include #if defined (__x86_64__) #include "i386/atomic.h" diff --git a/osfmk/machine/atomic_impl.h b/osfmk/machine/atomic_impl.h deleted file mode 100644 index 9e646f80e..000000000 --- a/osfmk/machine/atomic_impl.h +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Copyright (c) 2018 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -/* - * This header provides some gory details to implement the - * interfaces. Nothing in this header should be called directly, no promise is - * made to keep this interface stable. - */ - -#ifndef _MACHINE_ATOMIC_H -#error "Do not include directly, use " -#endif - -#ifndef _MACHINE_ATOMIC_IMPL_H -#define _MACHINE_ATOMIC_IMPL_H - -#include -#include - -static inline int -memory_order_has_acquire(enum memory_order ord) -{ - switch (ord) { - case memory_order_consume: - case memory_order_acquire: - case memory_order_acq_rel: - case memory_order_seq_cst: - return 1; - default: - return 0; - } -} - -static inline int -memory_order_has_release(enum memory_order ord) -{ - switch (ord) { - case memory_order_release: - case memory_order_acq_rel: - case memory_order_seq_cst: - return 1; - default: - return 0; - } -} - -#if __SMP__ - -#define memory_order_relaxed_smp memory_order_relaxed -#define memory_order_compiler_acquire_smp memory_order_relaxed -#define memory_order_compiler_release_smp memory_order_relaxed -#define memory_order_compiler_acq_rel_smp memory_order_relaxed -#define memory_order_consume_smp memory_order_consume -#define memory_order_dependency_smp memory_order_acquire -#define memory_order_acquire_smp memory_order_acquire -#define memory_order_release_smp memory_order_release -#define memory_order_acq_rel_smp memory_order_acq_rel -#define memory_order_seq_cst_smp memory_order_seq_cst - -#else - -#define memory_order_relaxed_smp memory_order_relaxed -#define memory_order_compiler_acquire_smp memory_order_relaxed -#define memory_order_compiler_release_smp memory_order_relaxed -#define memory_order_compiler_acq_rel_smp memory_order_relaxed -#define memory_order_consume_smp memory_order_relaxed -#define memory_order_dependency_smp memory_order_relaxed -#define memory_order_acquire_smp memory_order_relaxed -#define memory_order_release_smp memory_order_relaxed -#define memory_order_acq_rel_smp memory_order_relaxed -#define memory_order_seq_cst_smp memory_order_relaxed - -#endif - -/* - * Hack needed for os_compiler_barrier() to work (including with empty argument) - */ -#define _os_compiler_barrier_relaxed memory_order_relaxed -#define _os_compiler_barrier_acquire memory_order_acquire -#define _os_compiler_barrier_release memory_order_release -#define _os_compiler_barrier_acq_rel memory_order_acq_rel -#define _os_compiler_barrier_ memory_order_acq_rel - -/* - * Mapping between compiler barrier/memory orders and: - * - compiler barriers before atomics ("rel_barrier") - * - compiler barriers after atomics ("acq_barrier") - */ -#define _os_rel_barrier_relaxed memory_order_relaxed -#define _os_rel_barrier_compiler_acquire memory_order_relaxed -#define _os_rel_barrier_compiler_release memory_order_release -#define _os_rel_barrier_compiler_acq_rel memory_order_release -#define _os_rel_barrier_consume memory_order_relaxed -#define _os_rel_barrier_dependency memory_order_relaxed -#define _os_rel_barrier_acquire memory_order_relaxed -#define _os_rel_barrier_release memory_order_release -#define _os_rel_barrier_acq_rel memory_order_release -#define _os_rel_barrier_seq_cst memory_order_release - -#define _os_acq_barrier_relaxed memory_order_relaxed -#define _os_acq_barrier_compiler_acquire memory_order_acquire -#define _os_acq_barrier_compiler_release memory_order_relaxed -#define _os_acq_barrier_compiler_acq_rel memory_order_acquire -#define _os_acq_barrier_consume memory_order_acquire -#define _os_acq_barrier_dependency memory_order_acquire -#define _os_acq_barrier_acquire memory_order_acquire -#define _os_acq_barrier_release memory_order_relaxed -#define _os_acq_barrier_acq_rel memory_order_acquire -#define _os_acq_barrier_seq_cst memory_order_acquire - -#define _os_compiler_barrier_before_atomic(m) \ - atomic_signal_fence(_os_rel_barrier_##m) -#define _os_compiler_barrier_after_atomic(m) \ - atomic_signal_fence(_os_acq_barrier_##m) - -/* - * Mapping between compiler barrier/memmory orders and: - * - memory fences before atomics ("rel_fence") - * - memory fences after atomics ("acq_fence") - */ -#define _os_rel_fence_relaxed memory_order_relaxed -#define _os_rel_fence_compiler_acquire memory_order_relaxed -#define _os_rel_fence_compiler_release memory_order_release -#define _os_rel_fence_compiler_acq_rel memory_order_release -#define _os_rel_fence_consume memory_order_relaxed_smp -#define _os_rel_fence_dependency memory_order_relaxed_smp -#define _os_rel_fence_acquire memory_order_relaxed_smp -#define _os_rel_fence_release memory_order_release_smp -#define _os_rel_fence_acq_rel memory_order_release_smp -#define _os_rel_fence_seq_cst memory_order_release_smp - -#define _os_acq_fence_relaxed memory_order_relaxed -#define _os_acq_fence_compiler_acquire memory_order_relaxed -#define _os_acq_fence_compiler_release memory_order_relaxed -#define _os_acq_fence_compiler_acq_rel memory_order_relaxed -#define _os_acq_fence_consume memory_order_acquire_smp -#define _os_acq_fence_dependency memory_order_dependency_smp -#define _os_acq_fence_acquire memory_order_acquire_smp -#define _os_acq_fence_release memory_order_relaxed_smp -#define _os_acq_fence_acq_rel memory_order_acquire_smp -#define _os_acq_fence_seq_cst memory_order_acquire_smp - -#define _os_memory_fence_before_atomic(m) \ - atomic_thread_fence(_os_rel_fence_##m) -#define _os_memory_fence_after_atomic(m) \ - atomic_thread_fence(_os_acq_fence_##m) - -/* - * Misc. helpers - */ - -/* - * For this implementation, we make sure the compiler cannot coalesce any of the - * os_atomic calls by casting all atomic variables to `volatile _Atomic`. - * - * At the time this decision was taken, clang has been treating all `_Atomic` - * accesses as if qualified `volatile _Atomic`, so the cast below freezes that - * aspect of the codegen in time. - * - * When/if clang starts coalescing non-volatile _Atomics, we may decide to add - * coalescing orderings, e.g. {relaxed,acquire,release,acq_rel,seq_cst}_nv. - */ -#define _os_atomic_c11_atomic(p) \ - ((typeof(*(p)) volatile _Atomic *)(p)) - -#define _os_atomic_basetypeof(p) \ - typeof(atomic_load(_os_atomic_c11_atomic(p))) - -#define _os_atomic_op_orig(p, v, m, o) ({ \ - _os_atomic_basetypeof(p) _r; \ - _os_compiler_barrier_before_atomic(m); \ - _r = o(_os_atomic_c11_atomic(p), v, memory_order_##m##_smp); \ - _os_compiler_barrier_after_atomic(m); \ - _r; \ -}) - -#define _os_atomic_c11_op_orig(p, v, m, o) \ - _os_atomic_op_orig(p, v, m, atomic_##o##_explicit) - -#define _os_atomic_c11_op(p, v, m, o, op) \ - ({ typeof(v) _v = (v); _os_atomic_c11_op_orig(p, _v, m, o) op _v; }) - -#define _os_atomic_clang_op_orig(p, v, m, o) \ - _os_atomic_op_orig(p, v, m, __atomic_##o) - -#define _os_atomic_clang_op(p, v, m, o, op) \ - ({ typeof(v) _v = (v); _os_atomic_basetypeof(p) _r = \ - _os_atomic_clang_op_orig(p, _v, m, o); op(_r, _v); }) - -#define _os_atomic_auto_dependency(e) \ - _Generic(e, \ - os_atomic_dependency_t: (e), \ - default: os_atomic_make_dependency(e)) - -#endif /* _MACHINE_ATOMIC_IMPL_H */ diff --git a/osfmk/machine/machine_routines.h b/osfmk/machine/machine_routines.h index c12c8ee2b..40eae37e9 100644 --- a/osfmk/machine/machine_routines.h +++ b/osfmk/machine/machine_routines.h @@ -28,6 +28,8 @@ #ifndef _MACHINE_MACHINE_ROUTINES_H #define _MACHINE_MACHINE_ROUTINES_H +#include + #if defined (__i386__) || defined(__x86_64__) #include "i386/machine_routines.h" #elif defined (__arm__) || defined (__arm64__) @@ -36,4 +38,129 @@ #error architecture not supported #endif +__BEGIN_DECLS + +#ifdef XNU_KERNEL_PRIVATE +#pragma GCC visibility push(hidden) + +/*! + * @function ml_cpu_can_exit + * @brief Check whether the platform code allows |cpu_id| to be + * shut down at runtime. + * @return true if allowed, false otherwise + */ +bool ml_cpu_can_exit(int cpu_id); + +/*! + * @function ml_cpu_init_state + * @brief Needs to be called from schedulable context prior to using + * the ml_cpu_*_state_transition or ml_cpu_*_loop functions. + */ +void ml_cpu_init_state(void); + +/*! + * @function ml_cpu_begin_state_transition + * @brief Tell the platform code that processor_start() or + * processor_exit() is about to begin for |cpu_id|. This + * can block. + * @param cpu_id CPU that is (potentially) going up or down + */ +void ml_cpu_begin_state_transition(int cpu_id); + +/*! + * @function ml_cpu_end_state_transition + * @brief Tell the platform code that processor_start() or + * processor_exit() is finished for |cpu_id|. This + * can block. Can be called from a different thread from + * ml_cpu_begin_state_transition(). + * @param cpu_id CPU that is (potentially) going up or down + */ +void ml_cpu_end_state_transition(int cpu_id); + +/*! + * @function ml_cpu_begin_loop + * @brief Acquire a global lock that prevents processor_start() or + * processor_exit() from changing any CPU states for the + * duration of a loop. This can block. + */ +void ml_cpu_begin_loop(void); + +/*! + * @function ml_cpu_end_loop + * @brief Release the global lock acquired by ml_cpu_begin_loop(). + * Must be called from the same thread as ml_cpu_begin_loop(). + */ +void ml_cpu_end_loop(void); + +/*! + * @function ml_early_cpu_max_number() + * @brief Returns an early maximum cpu number the kernel will ever use. + * + * @return the maximum cpu number the kernel will ever use. + * + * @discussion + * The value returned by this function might be an over-estimate, + * but is more precise than @c MAX_CPUS. + * + * Unlike @c real_ncpus which is only initialized late in boot, + * this can be called during startup after the @c STARTUP_SUB_TUNABLES + * subsystem has been initialized. + */ +int ml_early_cpu_max_number(void); + +#pragma GCC visibility pop +#endif /* defined(XNU_KERNEL_PRIVATE) */ + +/*! + * @enum cpu_event + * @abstract Broadcast events allowing clients to hook CPU state transitions. + * @constant CPU_BOOT_REQUESTED Called from processor_start(); may block. + * @constant CPU_BOOTED Called from platform code on the newly-booted CPU; may not block. + * @constant CPU_ACTIVE Called from scheduler code; may block. + * @constant CLUSTER_ACTIVE Called from platform code; may not block. + * @constant CPU_EXIT_REQUESTED Called from processor_exit(); may block. + * @constant CPU_DOWN Called from platform code on the disabled CPU; may not block. + * @constant CLUSTER_EXIT_REQUESTED Called from platform code; may not block. + * @constant CPU_EXITED Called after CPU is stopped; may block. + */ +enum cpu_event { + CPU_BOOT_REQUESTED = 0, + CPU_BOOTED, + CPU_ACTIVE, + CLUSTER_ACTIVE, + CPU_EXIT_REQUESTED, + CPU_DOWN, + CLUSTER_EXIT_REQUESTED, + CPU_EXITED, +}; + +typedef bool (*cpu_callback_t)(void *param, enum cpu_event event, unsigned int cpu_or_cluster); + +/*! + * @function cpu_event_register_callback + * @abstract Register a function to be called on CPU state changes. + * @param fn Function to call on state change events. + * @param param Optional argument to be passed to the callback (e.g. object pointer). + */ +void cpu_event_register_callback(cpu_callback_t fn, void *param); + +/*! + * @function cpu_event_unregister_callback + * @abstract Unregister a previously-registered callback function. + * @param fn Function pointer previously passed to cpu_event_register_callback(). + */ +void cpu_event_unregister_callback(cpu_callback_t fn); + +#if XNU_KERNEL_PRIVATE +/*! + * @function ml_broadcast_cpu_event + * @abstract Internal XNU function used to broadcast CPU state changes to callers. + * @param event CPU event that is occurring. + * @param cpu_or_cluster Logical CPU ID of the core (or cluster) affected by the event. + */ +void ml_broadcast_cpu_event(enum cpu_event event, unsigned int cpu_or_cluster); +#endif + +__END_DECLS + #endif /* _MACHINE_MACHINE_ROUTINES_H */ diff --git a/osfmk/machine/pal_hibernate.h b/osfmk/machine/pal_hibernate.h index 4277f9939..9c321117c 100644 --- a/osfmk/machine/pal_hibernate.h +++ b/osfmk/machine/pal_hibernate.h @@ -25,15 +25,145 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +/** + * Platform abstraction layer to support hibernation. + */ + #ifndef _MACHINE_PAL_HIBERNATE_H #define _MACHINE_PAL_HIBERNATE_H +#include + #if defined (__i386__) || defined(__x86_64__) #include "i386/pal_hibernate.h" #elif defined (__arm__) //#include "arm/pal_hibernate.h" +#elif defined(__arm64__) +#include "arm64/pal_hibernate.h" #else #error architecture not supported #endif +__BEGIN_DECLS + +/*! + * @typedef pal_hib_restore_stage_t + * @discussion hibernate_kernel_entrypoint restores data in multiple stages; this enum defines those stages. + */ +typedef enum { + pal_hib_restore_stage_dram_pages = 0, + pal_hib_restore_stage_preview_pages = 1, + pal_hib_restore_stage_handoff_data = 2, +} pal_hib_restore_stage_t; + +/*! + * @typedef pal_hib_ctx_t + * @discussion This type is used to pass context between pal_hib_resume_init, pal_hib_restored_page, and + * pal_hib_patchup during hibernation resume. The context is declared on the stack in + * hibernate_kernel_entrypoint, so it should be relatively small. During pal_hib_resume_init(), + * additional memory can be allocated with hibernate_page_list_grab if necessary. + */ +typedef struct pal_hib_ctx pal_hib_ctx_t; + +/*! + * @function __hib_assert + * @discussion Called when a fatal assertion has been detected during hibernation. Logs the + * expression string and loops indefinitely. + * + * @param file The source file in which the failed assertion occurred + * @param line The line number at which the failed assertion occurred + * @param expression A string describing the failed assertion + */ +void __hib_assert(const char *file, int line, const char *expression) __attribute__((noreturn)); +#define HIB_ASSERT(ex) \ + (__builtin_expect(!!((ex)), 1L) ? (void)0 : __hib_assert(__FILE__, __LINE__, # ex)) + +/*! + * @function pal_hib_map + * @discussion Given a map type and a physical address, return the corresponding virtual address. + * + * @param virt Which memory region to access + * @param phys The physical address to access + * + * @result The virtual address corresponding to this physical address. + */ +uintptr_t pal_hib_map(pal_hib_map_type_t virt, uint64_t phys); + +/*! + * @function pal_hib_restore_pal_state + * @discussion Callout to the platform abstraction layer to restore platform-specific data. + * + * @param src Pointer to platform-specific data + */ +void pal_hib_restore_pal_state(uint32_t *src); + +/*! + * @function pal_hib_init + * @discussion Platform-specific hibernation initialization. + */ +void pal_hib_init(void); + +/*! + * @function pal_hib_write_hook + * @discussion Platform-specific callout before the hibernation image is written. + */ +void pal_hib_write_hook(void); + +/*! + * @function pal_hib_resume_init + * @discussion Initialize the platform-specific hibernation resume context. Additional memory can + * be allocated with hibernate_page_list_grab if necessary + * + * @param palHibCtx Pointer to platform-specific hibernation resume context + * @param map map argument that can be passed to hibernate_page_list_grab + * @param nextFree nextFree argument that can be passed to hibernate_page_list_grab + */ +void pal_hib_resume_init(pal_hib_ctx_t *palHibCtx, hibernate_page_list_t *map, uint32_t *nextFree); + +/*! + * @function pal_hib_restored_page + * @discussion Inform the platform abstraction layer of a page that will be restored. + * + * @param palHibCtx Pointer to platform-specific hibernation resume context + * @param stage The stage of hibernation resume during which this page will be resumed + * @param ppnum The page number of the page that will be resumed. + */ +void pal_hib_restored_page(pal_hib_ctx_t *palHibCtx, pal_hib_restore_stage_t stage, ppnum_t ppnum); + +/*! + * @function pal_hib_patchup + * @discussion Allow the platform abstraction layer to perform post-restore fixups. + * + * @param palHibCtx Pointer to platform-specific hibernation resume context + */ +void pal_hib_patchup(pal_hib_ctx_t *palHibCtx); + +/*! + * @function pal_hib_teardown_pmap_structs + * @discussion Platform-specific function to return a range of memory that doesn't need to be saved during hibernation. + * + * @param unneeded_start Out parameter: the beginning of the unneeded range + * @param unneeded_end Out parameter: the end of the unneeded range + */ +void pal_hib_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end); + +/*! + * @function pal_hib_rebuild_pmap_structs + * @discussion Platform-specific function to fix up the teardown done by pal_hib_teardown_pmap_structs. + */ +void pal_hib_rebuild_pmap_structs(void); + +/*! + * @function pal_hib_decompress_page + * @discussion Decompress a page of memory using WKdm + * + * @param src The compressed data + * @param dst A page-sized buffer to decompress into; must be page aligned + * @param scratch A page-sized scratch buffer to use during decompression + * @param compressedSize The number of bytes to decompress + */ +void pal_hib_decompress_page(void *src, void *dst, void *scratch, unsigned int compressedSize); + +__END_DECLS + #endif /* _MACHINE_PAL_HIBERNATE_H */ diff --git a/osfmk/machine/smp.h b/osfmk/machine/smp.h index 2714ba5a2..550065042 100644 --- a/osfmk/machine/smp.h +++ b/osfmk/machine/smp.h @@ -32,7 +32,10 @@ #if defined (__x86_64__) #include "i386/smp.h" #elif defined (__arm__) || defined (__arm64__) +#ifdef KERNEL_PRIVATE +/* arm/smp.h isn't installed into the public SDK. */ #include "arm/smp.h" +#endif /* KERNEL_PRIVATE */ #else #error architecture not supported #endif diff --git a/osfmk/prng/Makefile b/osfmk/prng/Makefile index d2234ec25..4f26863aa 100644 --- a/osfmk/prng/Makefile +++ b/osfmk/prng/Makefile @@ -7,6 +7,7 @@ include $(MakeInc_cmd) include $(MakeInc_def) EXPORT_ONLY_FILES = \ + entropy.h \ random.h EXPORT_MI_DIR = prng diff --git a/osfmk/prng/entropy.c b/osfmk/prng/entropy.c new file mode 100644 index 000000000..40a5e6ee7 --- /dev/null +++ b/osfmk/prng/entropy.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include + +// Use a static buffer when the entropy collection boot arg is not present and before the +// RNG has been initialized. +static uint32_t entropy_buffer[ENTROPY_BUFFER_SIZE]; + +entropy_data_t EntropyData = { + .sample_count = 0, + .buffer = entropy_buffer, + .buffer_size = ENTROPY_BUFFER_SIZE, + .buffer_index_mask = ENTROPY_BUFFER_SIZE - 1, + .ror_mask = -1 +}; + +void +entropy_buffer_init(void) +{ + uint32_t ebsz = 0; + uint32_t *bp; + + if (PE_parse_boot_argn("ebsz", &ebsz, sizeof(ebsz))) { + if (((ebsz & (ebsz - 1)) != 0) || (ebsz < 32)) { + panic("entropy_buffer_init: entropy buffer size must be a power of 2 and >= 32\n"); + } + + register_entropy_sysctl(); + + bp = zalloc_permanent(sizeof(uint32_t) * ebsz, ZALIGN(uint32_t)); + + boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE); + EntropyData.buffer = bp; + EntropyData.sample_count = 0; + EntropyData.buffer_size = sizeof(uint32_t) * ebsz; + EntropyData.buffer_index_mask = ebsz - 1; + EntropyData.ror_mask = 0; + ml_set_interrupts_enabled(interrupt_state); + } +} diff --git a/osfmk/prng/entropy.h b/osfmk/prng/entropy.h new file mode 100644 index 000000000..101c9343d --- /dev/null +++ b/osfmk/prng/entropy.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _PRNG_ENTROPY_H_ +#define _PRNG_ENTROPY_H_ + +__BEGIN_DECLS + +#ifdef XNU_KERNEL_PRIVATE + +// The below three definitions are utilized when the kernel is in +// "normal" operation, that is when we are *not* interested in collecting +// entropy. + +// Indicates the number of bytes in the entropy buffer +#define ENTROPY_BUFFER_BYTE_SIZE 32 + +// Indicates the number of uint32_t's in the entropy buffer +#define ENTROPY_BUFFER_SIZE (ENTROPY_BUFFER_BYTE_SIZE / sizeof(uint32_t)) + +// Mask applied to EntropyData.sample_count to get an +// index suitable for storing the next sample in +// EntropyData.buffer. Note that ENTROPY_BUFFER_SIZE must be a power +// of two for the following mask calculation to be valid. +#define ENTROPY_BUFFER_INDEX_MASK (ENTROPY_BUFFER_SIZE - 1) + +typedef struct entropy_data { + /* + * TODO: Should sample_count be volatile? Are we exposed to any races that + * we care about if it is not? + */ + + // At 32 bits, this counter can overflow. Since we're primarily + // interested in the delta from one read to the next, we don't + // worry about this too much. + uint32_t sample_count; + + // We point to either a static array when operating normally or + // a dynamically allocated array when we wish to collect entropy + // data. This decision is based on the presence of the boot + // argument "ebsz". + uint32_t *buffer; + + // The entropy buffer size in bytes. This must be a power of 2. + uint32_t buffer_size; + + // The mask used to index into the entropy buffer for storing + // the next entropy sample. + uint32_t buffer_index_mask; + + // The mask used to include the previous entropy buffer contents + // when updating the entropy buffer. When in entropy collection + // mode this is set to zero so that we can gather the raw entropy. + // In normal operation this is set to (uint32_t) -1. + uint32_t ror_mask; +} entropy_data_t; + +extern entropy_data_t EntropyData; + +/* Trace codes for DBG_SEC_KERNEL: */ +#define ENTROPY_READ(n) SECURITYDBG_CODE(DBG_SEC_KERNEL, n) /* n: 0 .. 3 */ + +#endif /* XNU_KERNEL_PRIVATE */ + +void entropy_buffer_init(void); + +__END_DECLS + +#endif /* _PRNG_ENTROPY_H_ */ diff --git a/osfmk/prng/prng_random.c b/osfmk/prng/prng_random.c index 4e000828b..31a59996b 100644 --- a/osfmk/prng/prng_random.c +++ b/osfmk/prng/prng_random.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -45,7 +46,7 @@ static struct cckprng_ctx *prng_ctx; static SECURITY_READ_ONLY_LATE(struct cckprng_funcs) prng_funcs; static SECURITY_READ_ONLY_LATE(int) prng_ready; -entropy_data_t EntropyData = {}; +extern entropy_data_t EntropyData; #define SEED_SIZE (SHA256_DIGEST_LENGTH) static uint8_t bootseed[SEED_SIZE]; @@ -146,7 +147,7 @@ static struct { .strictFIPS = 0, }}; -static void read_erandom(void * buf, uint32_t nbytes); +static void read_erandom(void * buf, size_t nbytes); /* * Return a uniformly distributed 64-bit random number. @@ -221,10 +222,10 @@ early_random(void) } static void -read_random_generate(uint8_t *buffer, u_int numbytes); +read_random_generate(uint8_t *buffer, size_t numbytes); static void -read_erandom(void * buf, uint32_t nbytes) +read_erandom(void * buf, size_t nbytes) { uint8_t * buffer_bytes = buf; size_t n; @@ -267,11 +268,13 @@ register_and_init_prng(struct cckprng_ctx *ctx, const struct cckprng_funcs *func assert(cpu_number() == master_cpu); assert(!prng_ready); + entropy_buffer_init(); + prng_ctx = ctx; prng_funcs = *funcs; uint64_t nonce = ml_get_timebase(); - prng_funcs.init(prng_ctx, MAX_CPUS, sizeof(EntropyData.buffer), EntropyData.buffer, &EntropyData.sample_count, sizeof(bootseed), bootseed, sizeof(nonce), &nonce); + prng_funcs.init(prng_ctx, MAX_CPUS, EntropyData.buffer_size, EntropyData.buffer, &EntropyData.sample_count, sizeof(bootseed), bootseed, sizeof(nonce), &nonce); prng_funcs.initgen(prng_ctx, master_cpu); prng_ready = 1; @@ -320,7 +323,7 @@ ensure_gsbase(void) } static void -read_random_generate(uint8_t *buffer, u_int numbytes) +read_random_generate(uint8_t *buffer, size_t numbytes) { ensure_gsbase(); diff --git a/osfmk/prng/random.h b/osfmk/prng/random.h index 61432793b..84424bd31 100644 --- a/osfmk/prng/random.h +++ b/osfmk/prng/random.h @@ -35,37 +35,8 @@ __BEGIN_DECLS #ifdef XNU_KERNEL_PRIVATE -#define ENTROPY_BUFFER_BYTE_SIZE 32 - -#define ENTROPY_BUFFER_SIZE (ENTROPY_BUFFER_BYTE_SIZE / sizeof(uint32_t)) - -// This mask can be applied to EntropyData.sample_count to get an -// index suitable for storing the next sample in -// EntropyData.buffer. Note that ENTROPY_BUFFER_SIZE must be a power -// of two for the following mask calculation to be valid. -#define ENTROPY_BUFFER_INDEX_MASK (ENTROPY_BUFFER_SIZE - 1) - -typedef struct entropy_data { - /* - * TODO: Should sample_count be volatile? Are we exposed to any races that - * we care about if it is not? - */ - - // At 32 bits, this counter can overflow. Since we're primarily - // interested in the delta from one read to the next, we don't - // worry about this too much. - uint32_t sample_count; - uint32_t buffer[ENTROPY_BUFFER_SIZE]; -} entropy_data_t; - -extern entropy_data_t EntropyData; - -/* Trace codes for DBG_SEC_KERNEL: */ -#define ENTROPY_READ(n) SECURITYDBG_CODE(DBG_SEC_KERNEL, n) /* n: 0 .. 3 */ - void random_cpu_init(int cpu); - #endif /* XNU_KERNEL_PRIVATE */ void register_and_init_prng(struct cckprng_ctx *ctx, const struct cckprng_funcs *funcs); diff --git a/osfmk/tests/bitmap_test.c b/osfmk/tests/bitmap_test.c index 8eb5c35cc..9d8d80b13 100644 --- a/osfmk/tests/bitmap_test.c +++ b/osfmk/tests/bitmap_test.c @@ -73,6 +73,7 @@ test_bitmap(void) for (uint i = 0; i < nbits; i++) { bitmap_set(map, i); } + assert(bitmap_is_full(map, nbits)); int expected_result = nbits - 1; for (int i = bitmap_first(map, nbits); i >= 0; i = bitmap_next(map, i)) { @@ -81,6 +82,21 @@ test_bitmap(void) } assert(expected_result == -1); + bitmap_zero(map, nbits); + + assert(bitmap_first(map, nbits) == -1); + assert(bitmap_lsb_first(map, nbits) == -1); + + bitmap_full(map, nbits); + assert(bitmap_is_full(map, nbits)); + + expected_result = nbits - 1; + for (int i = bitmap_first(map, nbits); i >= 0; i = bitmap_next(map, i)) { + assert(i == expected_result); + expected_result--; + } + assert(expected_result == -1); + expected_result = 0; for (int i = bitmap_lsb_first(map, nbits); i >= 0; i = bitmap_lsb_next(map, nbits, i)) { assert(i == expected_result); @@ -88,6 +104,13 @@ test_bitmap(void) } assert(expected_result == (int)nbits); + for (uint i = 0; i < nbits; i++) { + bitmap_clear(map, i); + assert(!bitmap_is_full(map, nbits)); + bitmap_set(map, i); + assert(bitmap_is_full(map, nbits)); + } + for (uint i = 0; i < nbits; i++) { bitmap_clear(map, i); } diff --git a/osfmk/tests/kernel_tests.c b/osfmk/tests/kernel_tests.c index 4748deb1c..da46869c7 100644 --- a/osfmk/tests/kernel_tests.c +++ b/osfmk/tests/kernel_tests.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -35,7 +35,7 @@ #include #include #include -#include +#include #include #include #include @@ -44,7 +44,7 @@ #include #include #include -#include +#include #include #if !(DEVELOPMENT || DEBUG) @@ -63,13 +63,16 @@ void xnupost_reset_panic_widgets(void); kern_return_t zalloc_test(void); kern_return_t RandomULong_test(void); kern_return_t kcdata_api_test(void); -kern_return_t priority_queue_test(void); kern_return_t ts_kernel_primitive_test(void); kern_return_t ts_kernel_sleep_inheritor_test(void); kern_return_t ts_kernel_gate_test(void); kern_return_t ts_kernel_turnstile_chain_test(void); kern_return_t ts_kernel_timingsafe_bcmp_test(void); +#if __ARM_VFP__ +extern kern_return_t vfp_state_test(void); +#endif + extern kern_return_t kprintf_hhx_test(void); #if defined(__arm__) || defined(__arm64__) @@ -126,13 +129,16 @@ struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test), //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests) XNUPOST_TEST_CONFIG_BASIC(test_thread_call), - XNUPOST_TEST_CONFIG_BASIC(priority_queue_test), XNUPOST_TEST_CONFIG_BASIC(ts_kernel_primitive_test), XNUPOST_TEST_CONFIG_BASIC(ts_kernel_sleep_inheritor_test), XNUPOST_TEST_CONFIG_BASIC(ts_kernel_gate_test), XNUPOST_TEST_CONFIG_BASIC(ts_kernel_turnstile_chain_test), XNUPOST_TEST_CONFIG_BASIC(ts_kernel_timingsafe_bcmp_test), - XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test), }; + XNUPOST_TEST_CONFIG_BASIC(kprintf_hhx_test), +#if __ARM_VFP__ + XNUPOST_TEST_CONFIG_BASIC(vfp_state_test), +#endif + XNUPOST_TEST_CONFIG_BASIC(vm_tests), }; uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t); @@ -205,7 +211,8 @@ xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count) for (uint32_t i = 0; i < test_count; i++) { testp = &test_list[i]; if (testp->xt_test_num == 0) { - testp->xt_test_num = ++total_post_tests_count; + assert(total_post_tests_count < UINT16_MAX); + testp->xt_test_num = (uint16_t)++total_post_tests_count; } /* make sure the boot-arg based test run list is honored */ if (kernel_post_args & POSTARGS_CUSTOM_TEST_RUNLIST) { @@ -388,16 +395,17 @@ xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count) kern_return_t -zalloc_test() +zalloc_test(void) { zone_t test_zone; void * test_ptr; T_SETUPBEGIN; - test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_uint64_zone"); + test_zone = zone_create("test_uint64_zone", sizeof(uint64_t), + ZC_DESTRUCTIBLE); T_ASSERT_NOTNULL(test_zone, NULL); - T_ASSERT_EQ_INT(zone_free_count(test_zone), 0, NULL); + T_ASSERT_EQ_INT(test_zone->countfree, 0, NULL); T_SETUPEND; T_ASSERT_NOTNULL(test_ptr = zalloc(test_zone), NULL); @@ -427,129 +435,6 @@ compare_numbers_ascending(const void * a, const void * b) } } -/* - * Function used for comparison by qsort() - */ -static int -compare_numbers_descending(const void * a, const void * b) -{ - const uint32_t x = *(const uint32_t *)a; - const uint32_t y = *(const uint32_t *)b; - if (x > y) { - return -1; - } else if (x < y) { - return 1; - } else { - return 0; - } -} - -/* Node structure for the priority queue tests */ -struct priority_queue_test_node { - struct priority_queue_entry link; - priority_queue_key_t node_key; -}; - -static void -priority_queue_test_queue(struct priority_queue *pq, int type, - priority_queue_compare_fn_t cmp_fn) -{ - /* Configuration for the test */ -#define PRIORITY_QUEUE_NODES 7 - static uint32_t priority_list[] = { 20, 3, 7, 6, 50, 2, 8}; - uint32_t increase_pri = 100; - uint32_t decrease_pri = 90; - struct priority_queue_test_node *result; - uint32_t key = 0; - boolean_t update_result = false; - - struct priority_queue_test_node *node = NULL; - /* Add all priorities to the first priority queue */ - for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) { - node = kalloc(sizeof(struct priority_queue_test_node)); - T_ASSERT_NOTNULL(node, NULL); - - priority_queue_entry_init(&(node->link)); - node->node_key = priority_list[i]; - key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : priority_list[i]; - priority_queue_insert(pq, &(node->link), key, cmp_fn); - } - - T_ASSERT_NOTNULL(node, NULL); - key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? node->node_key : priority_queue_entry_key(pq, &(node->link)); - T_ASSERT((key == node->node_key), "verify node stored key correctly"); - - /* Test the priority increase operation by updating the last node added (8) */ - T_ASSERT_NOTNULL(node, NULL); - node->node_key = increase_pri; - key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : node->node_key; - update_result = priority_queue_entry_increase(pq, &node->link, key, cmp_fn); - T_ASSERT((update_result == true), "increase key updated root"); - result = priority_queue_max(pq, struct priority_queue_test_node, link); - T_ASSERT((result->node_key == increase_pri), "verify priority_queue_entry_increase() operation"); - - - /* Test the priority decrease operation by updating the last node added */ - T_ASSERT((result == node), NULL); - node->node_key = decrease_pri; - key = (type == PRIORITY_QUEUE_GENERIC_KEY) ? PRIORITY_QUEUE_KEY_NONE : node->node_key; - update_result = priority_queue_entry_decrease(pq, &node->link, key, cmp_fn); - T_ASSERT((update_result == true), "decrease key updated root"); - result = priority_queue_max(pq, struct priority_queue_test_node, link); - T_ASSERT((result->node_key == decrease_pri), "verify priority_queue_entry_decrease() operation"); - - /* Update our local priority list as well */ - priority_list[PRIORITY_QUEUE_NODES - 1] = decrease_pri; - - /* Sort the local list in descending order */ - qsort(priority_list, PRIORITY_QUEUE_NODES, sizeof(priority_list[0]), compare_numbers_descending); - - /* Test the maximum operation by comparing max node with local list */ - result = priority_queue_max(pq, struct priority_queue_test_node, link); - T_ASSERT((result->node_key == priority_list[0]), "(heap (%u) == qsort (%u)) priority queue max node lookup", - (uint32_t)result->node_key, priority_list[0]); - - /* Remove all remaining elements and verify they match local list */ - for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) { - result = priority_queue_remove_max(pq, struct priority_queue_test_node, link, cmp_fn); - T_ASSERT((result->node_key == priority_list[i]), "(heap (%u) == qsort (%u)) priority queue max node removal", - (uint32_t)result->node_key, priority_list[i]); - } - - priority_queue_destroy(pq, struct priority_queue_test_node, link, ^(void *n) { - kfree(n, sizeof(struct priority_queue_test_node)); - }); -} - -kern_return_t -priority_queue_test(void) -{ - /* - * Initialize two priority queues - * - One which uses the key comparator - * - Other which uses the node comparator - */ - static struct priority_queue pq; - static struct priority_queue pq_nodes; - - T_SETUPBEGIN; - - priority_queue_init(&pq, PRIORITY_QUEUE_BUILTIN_KEY | PRIORITY_QUEUE_MAX_HEAP); - priority_queue_init(&pq_nodes, PRIORITY_QUEUE_GENERIC_KEY | PRIORITY_QUEUE_MAX_HEAP); - - T_SETUPEND; - - priority_queue_test_queue(&pq, PRIORITY_QUEUE_BUILTIN_KEY, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); - - priority_queue_test_queue(&pq_nodes, PRIORITY_QUEUE_GENERIC_KEY, - priority_heap_make_comparator(a, b, struct priority_queue_test_node, link, { - return (a->node_key > b->node_key) ? 1 : ((a->node_key == b->node_key) ? 0 : -1); - })); - - return KERN_SUCCESS; -} - /* * Function to count number of bits that are set in a number. * It uses Side Addition using Magic Binary Numbers @@ -751,7 +636,7 @@ kcdata_api_test() char data[30] = "sample_disk_io_stats"; retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data), KCFLAG_USE_MEMCOPY); - T_ASSERT(retval == KERN_RESOURCE_SHORTAGE, "init with 30 bytes failed as expected with KERN_RESOURCE_SHORTAGE"); + T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "init with 30 bytes failed as expected with KERN_INSUFFICIENT_BUFFER_SIZE"); /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */ retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE, @@ -809,7 +694,7 @@ kcdata_api_test() user_addr = 0xdeadbeef; bytes_used = kcdata_memory_get_used_bytes(&test_kc_data); retval = kcdata_get_memory_addr(&test_kc_data, KCDATA_TYPE_MACH_ABSOLUTE_TIME, PAGE_SIZE * 4, &user_addr); - T_ASSERT(retval == KERN_RESOURCE_SHORTAGE, "Allocating entry with size > buffer -> KERN_RESOURCE_SHORTAGE"); + T_ASSERT(retval == KERN_INSUFFICIENT_BUFFER_SIZE, "Allocating entry with size > buffer -> KERN_INSUFFICIENT_BUFFER_SIZE"); T_ASSERT(user_addr == 0xdeadbeef, "user_addr remained unaffected with failed kcdata_get_memory_addr"); T_ASSERT(bytes_used == kcdata_memory_get_used_bytes(&test_kc_data), "The data structure should be unaffected"); @@ -830,7 +715,7 @@ kcdata_api_test() T_ASSERT(retval == KERN_SUCCESS, "Array of 20 integers should be possible"); T_ASSERT(user_addr != 0xdeadbeef, "user_addr is updated as expected"); T_ASSERT((kcdata_memory_get_used_bytes(&test_kc_data) - bytes_used) >= 20 * sizeof(uint64_t), "memory allocation is in range"); - kcdata_iter_t iter = kcdata_iter(item_p, PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data)); + kcdata_iter_t iter = kcdata_iter(item_p, (unsigned long)(PAGE_SIZE - kcdata_memory_get_used_bytes(&test_kc_data))); T_ASSERT(kcdata_iter_array_elem_count(iter) == 20, "array count is 20"); /* FIXME add tests here for ranges of sizes and counts */ @@ -942,19 +827,19 @@ pmap_coredump_test(void) T_ASSERT_GE_ULONG(lowGlo.lgStaticAddr, gPhysBase, NULL); T_ASSERT_LE_ULONG(lowGlo.lgStaticAddr + lowGlo.lgStaticSize, first_avail, NULL); T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMajorVersion, 3, NULL); - T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMinorVersion, 0, NULL); + T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMinorVersion, 2, NULL); T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL); // check the constant values in lowGlo - T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((uint64_t) &(pmap_object_store.memq)), NULL); + T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((typeof(lowGlo.lgPmapMemQ)) & (pmap_object_store.memq)), NULL); T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL); T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL); T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL); #if defined(__arm64__) - T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PACKED_FROM_VM_PAGES_ARRAY, NULL); - T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PACKED_POINTER_SHIFT, NULL); - T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_MIN_KERNEL_AND_KEXT_ADDRESS, NULL); + T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemFromArrayMask, VM_PAGE_PACKED_FROM_ARRAY, NULL); + T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedShift, VM_PAGE_PACKED_PTR_SHIFT, NULL); + T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPackedBaseAddr, VM_PAGE_PACKED_PTR_BASE, NULL); #endif vm_object_lock_shared(&pmap_object_store); diff --git a/osfmk/tests/ktest_emit.c b/osfmk/tests/ktest_emit.c index 48bfb5b4b..ea8f293b3 100644 --- a/osfmk/tests/ktest_emit.c +++ b/osfmk/tests/ktest_emit.c @@ -67,7 +67,7 @@ ktest_emit_testbegin(const char * test_name) int ret; /* left trim the file path for readability */ - char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); + const char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, size, @@ -95,7 +95,7 @@ ktest_emit_testskip(const char * skip_msg, va_list args) int size = sizeof(ktest_output_buf); int ret; - char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); + const char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, size, @@ -125,7 +125,7 @@ ktest_emit_testend() int size = sizeof(ktest_output_buf); int ret; - char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); + const char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, size, @@ -153,7 +153,7 @@ ktest_emit_log(const char * log_msg, va_list args) int size = sizeof(ktest_output_buf); int ret; - char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); + const char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, size, @@ -185,7 +185,7 @@ ktest_emit_perfdata(const char * metric, const char * unit, double value, const int size = sizeof(ktest_output_buf); int ret; - char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); + const char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, size, "\n[KTEST]\t" /* header */ @@ -214,7 +214,7 @@ ktest_emit_testcase(void) int size = sizeof(ktest_output_buf); int ret; - char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); + const char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, size, diff --git a/osfmk/tests/pmap_tests.c b/osfmk/tests/pmap_tests.c index a12ca10c8..b4bdf2071 100644 --- a/osfmk/tests/pmap_tests.c +++ b/osfmk/tests/pmap_tests.c @@ -253,6 +253,25 @@ test_pmap_iommu_disconnect(void) pmap_disconnect(phys_page); assert(pmap_verify_free(phys_page)); + /* Phase 7: allocate contiguous memory and hand it to the shart */ + shart_more more_shart; + more_shart.nbytes = (PAGE_SIZE * 5) + 42; + more_shart.baseaddr = pmap_iommu_alloc_contiguous_pages(&iommu->super, more_shart.nbytes, 0, 0, VM_WIMG_DEFAULT); + assert(more_shart.baseaddr != 0); + + kr = pmap_iommu_ioctl(&iommu->super, SHART_IOCTL_MORE, &more_shart, sizeof(more_shart), NULL, 0); + assert(kr == KERN_SUCCESS); + assert(iommu->extra_memory == more_shart.baseaddr); + assert(iommu->extra_bytes == more_shart.nbytes); + + more_shart.baseaddr += PAGE_SIZE; + more_shart.nbytes -= PAGE_SIZE; + kr = pmap_iommu_ioctl(&iommu->super, SHART_IOCTL_MORE, &more_shart, sizeof(more_shart), NULL, 0); + assert(kr == KERN_NOT_SUPPORTED); + kr = KERN_SUCCESS; + assert(iommu->extra_memory == (more_shart.baseaddr - PAGE_SIZE)); + assert(iommu->extra_bytes == (more_shart.nbytes + PAGE_SIZE)); + cleanup: if (iommu != NULL) { @@ -273,6 +292,7 @@ cleanup: #endif } + kern_return_t test_pmap_extended(void) { diff --git a/osfmk/tests/ptrauth_data_tests.c b/osfmk/tests/ptrauth_data_tests.c new file mode 100644 index 000000000..a9c4c8bb5 --- /dev/null +++ b/osfmk/tests/ptrauth_data_tests.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#if DEVELOPMENT || DEBUG +#if __has_feature(ptrauth_calls) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +kern_return_t ptrauth_data_tests(void); + +/* + * Given an existing PAC pointer (ptr), its declaration type (decl), the (key) + * used to sign it and the string discriminator (discr), extract the raw pointer + * along with the signature and compare it with one computed on the fly + * via ptrauth_sign_unauthenticated(). + * + * If the two mismatch, return an error and fail the test. + */ +#define VALIDATE_PTR(decl, ptr, key, discr) { \ + decl raw = *(decl *)&(ptr); \ + decl cmp = ptrauth_sign_unauthenticated(ptr, key, \ + ptrauth_blend_discriminator(&ptr, ptrauth_string_discriminator(discr))); \ + if (cmp != raw) { \ + printf("kern.run_pac_test: %s (%s) (discr=%s) is not signed as expected (%p vs %p)\n", #decl, #ptr, #discr, raw, cmp); \ + kr = KERN_INVALID_ADDRESS; \ + } \ +} + +/* + * Allocate the containing structure, and store a pointer to the desired member, + * which should be subject to pointer signing. + */ +#define ALLOC_VALIDATE_DATA_PTR(structure, decl, member, discr) { \ + structure *tmp = kheap_alloc(KHEAP_TEMP, sizeof(structure), Z_WAITOK | Z_ZERO); \ + if (!tmp) return KERN_NO_SPACE; \ + tmp->member = (void*)0xffffffff41414141; \ + VALIDATE_DATA_PTR(decl, tmp->member, discr) \ + kheap_free(KHEAP_TEMP, tmp, sizeof(structure)); \ +} + +#define VALIDATE_DATA_PTR(decl, ptr, discr) VALIDATE_PTR(decl, ptr, ptrauth_key_process_independent_data, discr) + +/* + * Validate that a pointer that is supposed to be signed, is, and that the signature + * matches based on signing key, location and discriminator + */ +kern_return_t +ptrauth_data_tests(void) +{ + int kr = KERN_SUCCESS; + + /* task_t */ + ALLOC_VALIDATE_DATA_PTR(struct task, vm_map_t, map, "task.map"); + ALLOC_VALIDATE_DATA_PTR(struct task, struct ipc_port *, itk_self[0], "task.itk_self"); + ALLOC_VALIDATE_DATA_PTR(struct task, struct ipc_port *, itk_settable_self, "task.itk_settable_self"); + ALLOC_VALIDATE_DATA_PTR(struct task, struct ipc_port *, itk_host, "task.itk_host"); + ALLOC_VALIDATE_DATA_PTR(struct task, struct ipc_port *, itk_bootstrap, "task.itk_bootstrap"); + ALLOC_VALIDATE_DATA_PTR(struct task, struct ipc_port *, itk_debug_control, "task.itk_debug_control"); + ALLOC_VALIDATE_DATA_PTR(struct task, struct ipc_space *, itk_space, "task.itk_space"); + ALLOC_VALIDATE_DATA_PTR(struct task, void *, bsd_info, "task.bsd_info"); + ALLOC_VALIDATE_DATA_PTR(struct task, struct ipc_port *, itk_gssd, "task.itk_gssd"); + ALLOC_VALIDATE_DATA_PTR(struct task, struct ipc_port *, itk_task_access, "task.itk_task_access"); + ALLOC_VALIDATE_DATA_PTR(struct task, struct ipc_port *, itk_resume, "task.itk_resume"); + ALLOC_VALIDATE_DATA_PTR(struct task, struct ipc_port *, itk_seatbelt, "task.itk_seatbelt"); + + /* _vm_map */ + ALLOC_VALIDATE_DATA_PTR(struct _vm_map, pmap_t, pmap, "_vm_map.pmap"); + + /* pmap */ + ALLOC_VALIDATE_DATA_PTR(struct pmap, tt_entry_t *, tte, "pmap.tte"); + ALLOC_VALIDATE_DATA_PTR(struct pmap, pmap_t, nested_pmap, "pmap.nested_pmap"); + + /* ipc_port */ + ALLOC_VALIDATE_DATA_PTR(struct ipc_port, ipc_kobject_t, ip_kobject, "ipc_port.kobject"); + ALLOC_VALIDATE_DATA_PTR(struct ipc_port, ipc_kobject_label_t, ip_kolabel, "ipc_port.kolabel"); + + /* ipc_kobject_label */ + ALLOC_VALIDATE_DATA_PTR(struct ipc_kobject_label, ipc_kobject_t, ikol_kobject, "ipc_kobject_label.ikol_kobject"); + + /* ipc_entry */ + ALLOC_VALIDATE_DATA_PTR(struct ipc_entry, struct ipc_object *, ie_object, "ipc_entry.ie_object"); + + /* ipc_kmsg */ + ALLOC_VALIDATE_DATA_PTR(struct ipc_kmsg, struct ipc_port *, ikm_prealloc, "kmsg.ikm_prealloc"); + ALLOC_VALIDATE_DATA_PTR(struct ipc_kmsg, void *, ikm_data, "kmsg.ikm_data"); + ALLOC_VALIDATE_DATA_PTR(struct ipc_kmsg, mach_msg_header_t *, ikm_header, "kmsg.ikm_header"); + ALLOC_VALIDATE_DATA_PTR(struct ipc_kmsg, struct ipc_port *, ikm_voucher, "kmsg.ikm_voucher"); + + return kr; +} + +#endif /* __has_feature(ptrauth_calls) */ +#endif /* DEVELOPMENT || DEBUG */ diff --git a/osfmk/tests/vfp_state_test.c b/osfmk/tests/vfp_state_test.c new file mode 100644 index 000000000..6487f0581 --- /dev/null +++ b/osfmk/tests/vfp_state_test.c @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#if !(DEVELOPMENT || DEBUG) +#error "Testing is not enabled on RELEASE configurations" +#endif + +#include +#include +#include +#include +#include + +#define VFP_STATE_TEST_N_THREADS 4 +#define VFP_STATE_TEST_N_REGS 8 +#define VFP_STATE_TEST_N_ITER 100 +#define VFP_STATE_TEST_DELAY_USEC 10000 +#if __arm__ +#define VFP_STATE_TEST_NZCV_SHIFT 28 +#define VFP_STATE_TEST_NZCV_MAX 16 +#else +#define VFP_STATE_TEST_RMODE_STRIDE_SHIFT 20 +#define VFP_STATE_TEST_RMODE_STRIDE_MAX 16 +#endif + +#if __ARM_VFP__ +extern kern_return_t vfp_state_test(void); + +const uint64_t vfp_state_test_regs[VFP_STATE_TEST_N_REGS] = { + 0x6a4cac4427ab5658, 0x51200e9ebbe0c9d1, + 0xa94d20c2bbe367bc, 0xfee45035460927db, + 0x64f3f1f7e93d019f, 0x02a625f02b890a40, + 0xf5e42399d8480de8, 0xc38cdde520908d6b, +}; + +struct vfp_state_test_args { + uint64_t vfp_reg_rand; +#if __arm__ + uint32_t fp_control_mask; +#else + uint64_t fp_control_mask; +#endif + int result; + int *start_barrier; + int *end_barrier; +}; + +static void +wait_threads( + int* var, + int num) +{ + if (var != NULL) { + while (os_atomic_load(var, acquire) != num) { + assert_wait((event_t) var, THREAD_UNINT); + if (os_atomic_load(var, acquire) != num) { + (void) thread_block(THREAD_CONTINUE_NULL); + } else { + clear_wait(current_thread(), THREAD_AWAKENED); + } + } + } +} + +static void +wake_threads( + int* var) +{ + if (var) { + os_atomic_inc(var, relaxed); + thread_wakeup((event_t) var); + } +} + +static void +vfp_state_test_thread_routine(void *args, __unused wait_result_t wr) +{ + struct vfp_state_test_args *vfp_state_test_args = (struct vfp_state_test_args *)args; + uint64_t *vfp_regs, *vfp_regs_expected; + int retval; +#if __arm__ + uint32_t fp_control, fp_control_expected; +#else + uint64_t fp_control, fp_control_expected; +#endif + + vfp_state_test_args->result = -1; + + /* Allocate memory to store expected and actual VFP register values */ + vfp_regs = kalloc(sizeof(vfp_state_test_regs)); + if (vfp_regs == NULL) { + goto vfp_state_thread_kalloc1_failure; + } + + vfp_regs_expected = kalloc(sizeof(vfp_state_test_regs)); + if (vfp_regs_expected == NULL) { + goto vfp_state_thread_kalloc2_failure; + } + + /* Preload VFP registers with unique, per-thread patterns */ + bcopy(vfp_state_test_regs, vfp_regs_expected, sizeof(vfp_state_test_regs)); + for (int i = 0; i < VFP_STATE_TEST_N_REGS; i++) { + vfp_regs_expected[i] ^= vfp_state_test_args->vfp_reg_rand; + } + +#if __arm__ + asm volatile ("vldr d8, [%0, #0] \t\n vldr d9, [%0, #8] \t\n\ + vldr d10, [%0, #16] \t\n vldr d11, [%0, #24] \t\n\ + vldr d12, [%0, #32] \t\n vldr d13, [%0, #40] \t\n\ + vldr d14, [%0, #48] \t\n vldr d15, [%0, #56]" \ + : : "r"(vfp_regs_expected) : \ + "memory", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"); + + /* + * Set FPSCR to a known value, so we can validate the save/restore path. + * Only touch NZCV flags, since 1) writing them does not have visible side-effects + * and 2) they're only set by the CPU as a result of executing an FP comparison, + * which do not exist in this function. + */ + asm volatile ("fmrx %0, fpscr" : "=r"(fp_control_expected)); + fp_control_expected |= vfp_state_test_args->fp_control_mask; + asm volatile ("fmxr fpscr, %0" : : "r"(fp_control_expected)); +#else + asm volatile ("ldr d8, [%0, #0] \t\n ldr d9, [%0, #8] \t\n\ + ldr d10, [%0, #16] \t\n ldr d11, [%0, #24] \t\n\ + ldr d12, [%0, #32] \t\n ldr d13, [%0, #40] \t\n\ + ldr d14, [%0, #48] \t\n ldr d15, [%0, #56]" \ + : : "r"(vfp_regs_expected) : \ + "memory", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"); + + asm volatile ("mrs %0, fpcr" : "=r"(fp_control_expected)); + fp_control_expected |= vfp_state_test_args->fp_control_mask; + asm volatile ("msr fpcr, %0" : : "r"(fp_control_expected)); +#endif + + /* Make sure all threads start at roughly the same time */ + wake_threads(vfp_state_test_args->start_barrier); + wait_threads(vfp_state_test_args->start_barrier, VFP_STATE_TEST_N_THREADS); + + /* Check VFP registers against expected values, and go to sleep */ + for (int i = 0; i < VFP_STATE_TEST_N_ITER; i++) { + bzero(vfp_regs, sizeof(vfp_state_test_regs)); + +#if __arm__ + asm volatile ("vstr d8, [%0, #0] \t\n vstr d9, [%0, #8] \t\n\ + vstr d10, [%0, #16] \t\n vstr d11, [%0, #24] \t\n\ + vstr d12, [%0, #32] \t\n vstr d13, [%0, #40] \t\n\ + vstr d14, [%0, #48] \t\n vstr d15, [%0, #56]" \ + : : "r"(vfp_regs) : "memory"); + asm volatile ("fmrx %0, fpscr" : "=r"(fp_control)); +#else + asm volatile ("str d8, [%0, #0] \t\n str d9, [%0, #8] \t\n\ + str d10, [%0, #16] \t\n str d11, [%0, #24] \t\n\ + str d12, [%0, #32] \t\n str d13, [%0, #40] \t\n\ + str d14, [%0, #48] \t\n str d15, [%0, #56]" \ + : : "r"(vfp_regs) : "memory"); + asm volatile ("mrs %0, fpcr" : "=r"(fp_control)); +#endif + + retval = bcmp(vfp_regs, vfp_regs_expected, sizeof(vfp_state_test_regs)); + if ((retval != 0) || (fp_control != fp_control_expected)) { + goto vfp_state_thread_cmp_failure; + } + + delay(VFP_STATE_TEST_DELAY_USEC); + } + + vfp_state_test_args->result = 0; + +vfp_state_thread_cmp_failure: + kfree(vfp_regs_expected, sizeof(vfp_state_test_regs)); +vfp_state_thread_kalloc2_failure: + kfree(vfp_regs, sizeof(vfp_state_test_regs)); +vfp_state_thread_kalloc1_failure: + + /* Signal that the thread has finished, and terminate */ + wake_threads(vfp_state_test_args->end_barrier); + thread_terminate_self(); +} + +/* + * This test spawns N threads that preload unique values into + * callee-saved VFP registers and then repeatedly check them + * for correctness after waking up from delay() + */ +kern_return_t +vfp_state_test(void) +{ + thread_t vfp_state_thread[VFP_STATE_TEST_N_THREADS]; + struct vfp_state_test_args vfp_state_test_args[VFP_STATE_TEST_N_THREADS]; + kern_return_t retval; + int start_barrier = 0, end_barrier = 0; + + /* Spawn threads */ + for (int i = 0; i < VFP_STATE_TEST_N_THREADS; i++) { + vfp_state_test_args[i].start_barrier = &start_barrier; + vfp_state_test_args[i].end_barrier = &end_barrier; +#if __arm__ + vfp_state_test_args[i].fp_control_mask = (i % VFP_STATE_TEST_NZCV_MAX) << VFP_STATE_TEST_NZCV_SHIFT; +#else + vfp_state_test_args[i].fp_control_mask = (i % VFP_STATE_TEST_RMODE_STRIDE_MAX) << VFP_STATE_TEST_RMODE_STRIDE_SHIFT; +#endif + read_random(&vfp_state_test_args[i].vfp_reg_rand, sizeof(uint64_t)); + + retval = kernel_thread_start((thread_continue_t)vfp_state_test_thread_routine, + (void *)&vfp_state_test_args[i], + &vfp_state_thread[i]); + + T_EXPECT((retval == KERN_SUCCESS), "thread %d started", i); + } + + /* Wait for all threads to finish */ + wait_threads(&end_barrier, VFP_STATE_TEST_N_THREADS); + + /* Check if all threads completed successfully */ + for (int i = 0; i < VFP_STATE_TEST_N_THREADS; i++) { + T_EXPECT((vfp_state_test_args[i].result == 0), "thread %d finished", i); + } + + return KERN_SUCCESS; +} +#endif /* __ARM_VFP__ */ diff --git a/osfmk/tests/xnupost.h b/osfmk/tests/xnupost.h index 4d22f2639..1e3f3042f 100644 --- a/osfmk/tests/xnupost.h +++ b/osfmk/tests/xnupost.h @@ -125,7 +125,7 @@ kern_return_t xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count); kern_return_t xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count); kern_return_t xnupost_reset_tests(xnupost_test_t test_list, uint32_t test_count); -int xnupost_export_testdata(void * outp, uint32_t size, uint32_t * lenp); +int xnupost_export_testdata(void * outp, size_t size, uint32_t * lenp); uint32_t xnupost_get_estimated_testdata_size(void); kern_return_t kernel_do_post(void); diff --git a/osfmk/vm/bsd_vm.c b/osfmk/vm/bsd_vm.c index cedf45dbc..006350038 100644 --- a/osfmk/vm/bsd_vm.c +++ b/osfmk/vm/bsd_vm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -152,17 +152,14 @@ struct vnode * vnode_pager_lookup_vnode( /* forward */ memory_object_t); -zone_t vnode_pager_zone; - +ZONE_DECLARE(vnode_pager_zone, "vnode pager structures", + sizeof(struct vnode_pager), ZC_NOENCRYPT); #define VNODE_PAGER_NULL ((vnode_pager_t) 0) /* TODO: Should be set dynamically by vnode_pager_init() */ #define CLUSTER_SHIFT 1 -/* TODO: Should be set dynamically by vnode_pager_bootstrap() */ -#define MAX_VNODE 10000 - #if DEBUG int pagerdebug = 0; @@ -267,7 +264,7 @@ memory_object_control_uiomove( * We're modifying a code-signed * page: force revalidate */ - dst_page->vmp_cs_validated = FALSE; + dst_page->vmp_cs_validated = VMP_CS_ALL_FALSE; VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1); @@ -352,33 +349,6 @@ memory_object_control_uiomove( } -/* - * - */ -void -vnode_pager_bootstrap(void) -{ - vm_size_t size; - - size = (vm_size_t) sizeof(struct vnode_pager); - vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE * size, - PAGE_SIZE, "vnode pager structures"); - zone_change(vnode_pager_zone, Z_CALLERACCT, FALSE); - zone_change(vnode_pager_zone, Z_NOENCRYPT, TRUE); - - -#if CONFIG_CODE_DECRYPTION - apple_protect_pager_bootstrap(); -#endif /* CONFIG_CODE_DECRYPTION */ - swapfile_pager_bootstrap(); -#if __arm64__ - fourk_pager_bootstrap(); -#endif /* __arm64__ */ - shared_region_pager_bootstrap(); - - return; -} - /* * */ @@ -458,6 +428,8 @@ vnode_pager_data_return( { vnode_pager_t vnode_object; + assertf(page_aligned(offset), "offset 0x%llx\n", offset); + vnode_object = vnode_pager_lookup(mem_obj); vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags); @@ -648,6 +620,8 @@ vnode_pager_data_request( vm_size_t size; uint32_t io_streaming = 0; + assertf(page_aligned(offset), "offset 0x%llx\n", offset); + vnode_object = vnode_pager_lookup(mem_obj); size = MAX_UPL_TRANSFER_BYTES; @@ -996,6 +970,7 @@ fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal * vm_region_extended_info_data_t extended; vm_region_top_info_data_t top; boolean_t do_region_footprint; + int effective_page_shift, effective_page_size; task_lock(task); map = task->map; @@ -1003,6 +978,10 @@ fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal * task_unlock(task); return 0; } + + effective_page_shift = vm_self_region_page_shift(map); + effective_page_size = (1 << effective_page_shift); + vm_map_reference(map); task_unlock(task); @@ -1046,19 +1025,19 @@ fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal * pinfo->pri_user_wired_count = 0; pinfo->pri_user_tag = -1; pinfo->pri_pages_resident = - (uint32_t) (ledger_resident / PAGE_SIZE); + (uint32_t) (ledger_resident / effective_page_size); pinfo->pri_pages_shared_now_private = 0; pinfo->pri_pages_swapped_out = - (uint32_t) (ledger_compressed / PAGE_SIZE); + (uint32_t) (ledger_compressed / effective_page_size); pinfo->pri_pages_dirtied = - (uint32_t) (ledger_resident / PAGE_SIZE); + (uint32_t) (ledger_resident / effective_page_size); pinfo->pri_ref_count = 1; pinfo->pri_shadow_depth = 0; pinfo->pri_share_mode = SM_PRIVATE; pinfo->pri_private_pages_resident = - (uint32_t) (ledger_resident / PAGE_SIZE); + (uint32_t) (ledger_resident / effective_page_size); pinfo->pri_shared_pages_resident = 0; - pinfo->pri_obj_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile); + pinfo->pri_obj_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile); pinfo->pri_address = address; pinfo->pri_size = (uint64_t) (ledger_resident + ledger_compressed); diff --git a/osfmk/vm/device_vm.c b/osfmk/vm/device_vm.c index 377d1aacc..631cc4e37 100644 --- a/osfmk/vm/device_vm.c +++ b/osfmk/vm/device_vm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -99,15 +99,14 @@ typedef struct device_pager { boolean_t is_mapped; } *device_pager_t; -lck_grp_t device_pager_lck_grp; -lck_grp_attr_t device_pager_lck_grp_attr; -lck_attr_t device_pager_lck_attr; +LCK_GRP_DECLARE(device_pager_lck_grp, "device_pager"); -#define device_pager_lock_init(pager) \ - lck_mtx_init(&(pager)->lock, \ - &device_pager_lck_grp, \ - &device_pager_lck_attr) -#define device_pager_lock_destroy(pager) \ +ZONE_DECLARE(device_pager_zone, "device node pager structures", + sizeof(struct device_pager), ZC_NONE); + +#define device_pager_lock_init(pager) \ + lck_mtx_init(&(pager)->lock, &device_pager_lck_grp, LCK_ATTR_NULL) +#define device_pager_lock_destroy(pager) \ lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp) #define device_pager_lock(pager) lck_mtx_lock(&(pager)->lock) #define device_pager_unlock(pager) lck_mtx_unlock(&(pager)->lock) @@ -119,38 +118,11 @@ device_pager_lookup( /* forward */ device_pager_t device_object_create(void); /* forward */ -zone_t device_pager_zone; - - #define DEVICE_PAGER_NULL ((device_pager_t) 0) - #define MAX_DNODE 10000 - - - -/* - * - */ -void -device_pager_bootstrap(void) -{ - vm_size_t size; - - size = (vm_size_t) sizeof(struct device_pager); - device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE * size, - PAGE_SIZE, "device node pager structures"); - zone_change(device_pager_zone, Z_CALLERACCT, FALSE); - - lck_grp_attr_setdefault(&device_pager_lck_grp_attr); - lck_grp_init(&device_pager_lck_grp, "device_pager", &device_pager_lck_grp_attr); - lck_attr_setdefault(&device_pager_lck_attr); - - return; -} - /* * */ diff --git a/osfmk/vm/memory_object.c b/osfmk/vm/memory_object.c index 207ee0fbc..e4013fd82 100644 --- a/osfmk/vm/memory_object.c +++ b/osfmk/vm/memory_object.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -105,7 +105,7 @@ #include memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; -decl_lck_mtx_data(, memory_manager_default_lock); +LCK_MTX_EARLY_DECLARE(memory_manager_default_lock, &vm_object_lck_grp); /* @@ -532,15 +532,24 @@ vm_object_update_extent( vm_object_offset_t next_offset = offset; memory_object_lock_result_t page_lock_result; memory_object_cluster_size_t data_cnt = 0; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; + struct vm_page_delayed_work dw_array; + struct vm_page_delayed_work *dwp, *dwp_start; + bool dwp_finish_ctx = TRUE; int dw_count; int dw_limit; int dirty_count; - dwp = &dw_array[0]; + dwp_start = dwp = NULL; dw_count = 0; dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); + dwp_start = vm_page_delayed_work_get_ctx(); + if (dwp_start == NULL) { + dwp_start = &dw_array; + dw_limit = 1; + dwp_finish_ctx = FALSE; + } + dwp = dwp_start; + dirty_count = 0; for (; @@ -553,8 +562,8 @@ vm_object_update_extent( if (data_cnt) { if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) { if (dw_count) { - vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); - dwp = &dw_array[0]; + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); + dwp = dwp_start; dw_count = 0; } LIST_REQ_PAGEOUT_PAGES(object, data_cnt, @@ -572,8 +581,8 @@ vm_object_update_extent( * End of a run of dirty/precious pages. */ if (dw_count) { - vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); - dwp = &dw_array[0]; + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); + dwp = dwp_start; dw_count = 0; } LIST_REQ_PAGEOUT_PAGES(object, data_cnt, @@ -638,8 +647,8 @@ vm_object_update_extent( VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count); if (dw_count >= dw_limit) { - vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); - dwp = &dw_array[0]; + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); + dwp = dwp_start; dw_count = 0; } } @@ -655,13 +664,19 @@ vm_object_update_extent( * Clean any pages that have been saved. */ if (dw_count) { - vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); } if (data_cnt) { LIST_REQ_PAGEOUT_PAGES(object, data_cnt, paging_offset, offset_resid, io_errno, should_iosync); } + + if (dwp_start && dwp_finish_ctx) { + vm_page_delayed_work_finish_ctx(dwp_start); + dwp_start = dwp = NULL; + } + return retval; } @@ -757,9 +772,9 @@ vm_object_update( } } if ((copy_object != VM_OBJECT_NULL && update_cow) || (flags & MEMORY_OBJECT_DATA_SYNC)) { - vm_map_size_t i; - vm_map_size_t copy_size; - vm_map_offset_t copy_offset; + vm_object_offset_t i; + vm_object_size_t copy_size; + vm_object_offset_t copy_offset; vm_prot_t prot; vm_page_t page; vm_page_t top_page; @@ -771,8 +786,7 @@ vm_object_update( * translate offset with respect to shadow's offset */ copy_offset = (offset >= copy_object->vo_shadow_offset) ? - (vm_map_offset_t)(offset - copy_object->vo_shadow_offset) : - (vm_map_offset_t) 0; + (offset - copy_object->vo_shadow_offset) : 0; if (copy_offset > copy_object->vo_size) { copy_offset = copy_object->vo_size; @@ -784,7 +798,7 @@ vm_object_update( if (offset >= copy_object->vo_shadow_offset) { copy_size = size; } else if (size >= copy_object->vo_shadow_offset - offset) { - copy_size = size - (copy_object->vo_shadow_offset - offset); + copy_size = (size - (copy_object->vo_shadow_offset - offset)); } else { copy_size = 0; } @@ -870,7 +884,7 @@ RETRY_COW_OF_LOCK_REQUEST: /* success but no VM page: fail */ vm_object_paging_end(copy_object); vm_object_unlock(copy_object); - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case VM_FAULT_MEMORY_ERROR: if (object != copy_object) { vm_object_deallocate(copy_object); @@ -1431,15 +1445,20 @@ memory_object_iopl_request( /* the callers parameter offset is defined to be the */ /* offset from beginning of named entry offset in object */ offset = offset + named_entry->offset; + offset += named_entry->data_offset; if (named_entry->is_sub_map || named_entry->is_copy) { return KERN_INVALID_ARGUMENT; } + if (!named_entry->is_object) { + return KERN_INVALID_ARGUMENT; + } named_entry_lock(named_entry); - object = named_entry->backing.object; + object = vm_named_entry_to_vm_object(named_entry); + assert(object != VM_OBJECT_NULL); vm_object_reference(object); named_entry_unlock(named_entry); } else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) { @@ -1503,6 +1522,8 @@ memory_object_upl_request( int tag) { vm_object_t object; + vm_tag_t vmtag = (vm_tag_t)tag; + assert(vmtag == tag); object = memory_object_control_to_vm_object(control); if (object == VM_OBJECT_NULL) { @@ -1516,7 +1537,7 @@ memory_object_upl_request( user_page_list, page_list_count, (upl_control_flags_t)(unsigned int) cntrl_flags, - tag); + vmtag); } /* @@ -1543,6 +1564,8 @@ memory_object_super_upl_request( int tag) { vm_object_t object; + vm_tag_t vmtag = (vm_tag_t)tag; + assert(vmtag == tag); object = memory_object_control_to_vm_object(control); if (object == VM_OBJECT_NULL) { @@ -1557,7 +1580,7 @@ memory_object_super_upl_request( user_page_list, page_list_count, (upl_control_flags_t)(unsigned int) cntrl_flags, - tag); + vmtag); } kern_return_t @@ -1748,15 +1771,6 @@ memory_manager_default_check(void) } } -__private_extern__ void -memory_manager_default_init(void) -{ - memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; - lck_mtx_init(&memory_manager_default_lock, &vm_object_lck_grp, &vm_object_lck_attr); -} - - - /* Allow manipulation of individual page state. This is actually part of */ /* the UPL regimen but takes place on the object rather than on a UPL */ @@ -1992,19 +2006,8 @@ memory_object_is_shared_cache( return object->object_is_shared_cache; } -static zone_t mem_obj_control_zone; - -__private_extern__ void -memory_object_control_bootstrap(void) -{ - int i; - - i = (vm_size_t) sizeof(struct memory_object_control); - mem_obj_control_zone = zinit(i, 8192 * i, 4096, "mem_obj_control"); - zone_change(mem_obj_control_zone, Z_CALLERACCT, FALSE); - zone_change(mem_obj_control_zone, Z_NOENCRYPT, TRUE); - return; -} +static ZONE_DECLARE(mem_obj_control_zone, "mem_obj_control", + sizeof(struct memory_object_control), ZC_NOENCRYPT); __private_extern__ memory_object_control_t memory_object_control_allocate( diff --git a/osfmk/vm/memory_object.h b/osfmk/vm/memory_object.h index cc4eba042..930a66023 100644 --- a/osfmk/vm/memory_object.h +++ b/osfmk/vm/memory_object.h @@ -74,11 +74,6 @@ memory_object_default_t memory_manager_default_reference(void); __private_extern__ kern_return_t memory_manager_default_check(void); -__private_extern__ -void memory_manager_default_init(void); - -__private_extern__ -void memory_object_control_bootstrap(void); __private_extern__ memory_object_control_t memory_object_control_allocate( vm_object_t object); diff --git a/osfmk/vm/pmap.h b/osfmk/vm/pmap.h index 873bae998..d97f5b5e7 100644 --- a/osfmk/vm/pmap.h +++ b/osfmk/vm/pmap.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -76,6 +76,7 @@ #include + #ifdef KERNEL_PRIVATE /* @@ -193,6 +194,7 @@ extern pmap_t(pmap_kernel)(void); /* Return the kernel's pmap */ extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ extern void pmap_switch(pmap_t); +extern void pmap_require(pmap_t pmap); #if MACH_ASSERT extern void pmap_set_process(pmap_t pmap, @@ -219,6 +221,16 @@ extern kern_return_t pmap_enter_options( boolean_t wired, unsigned int options, void *arg); +extern kern_return_t pmap_enter_options_addr( + pmap_t pmap, + vm_map_offset_t v, + pmap_paddr_t pa, + vm_prot_t prot, + vm_prot_t fault_type, + unsigned int flags, + boolean_t wired, + unsigned int options, + void *arg); extern void pmap_remove_some_phys( pmap_t pmap, @@ -447,8 +459,9 @@ extern kern_return_t(pmap_attribute)( /* Get/Set special memory #endif /* !PMAP_ENTER */ #ifndef PMAP_ENTER_OPTIONS -#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \ - fault_type, flags, wired, options, result) \ +#define PMAP_ENTER_OPTIONS(pmap, virtual_address, fault_phys_offset, \ + page, protection, \ + fault_type, flags, wired, options, result) \ MACRO_BEGIN \ pmap_t __pmap = (pmap); \ vm_page_t __page = (page); \ @@ -463,9 +476,12 @@ extern kern_return_t(pmap_attribute)( /* Get/Set special memory if (__page->vmp_reusable || __obj->all_reusable) { \ __extra_options |= PMAP_OPTIONS_REUSABLE; \ } \ - result = pmap_enter_options(__pmap, \ + result = pmap_enter_options_addr(__pmap, \ (virtual_address), \ - VM_PAGE_GET_PHYS_PAGE(__page), \ + (((pmap_paddr_t) \ + VM_PAGE_GET_PHYS_PAGE(__page) \ + << PAGE_SHIFT) \ + + fault_phys_offset), \ (protection), \ (fault_type), \ (flags), \ @@ -583,6 +599,22 @@ extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask) #define VM_MEM_REFERENCED 0x02 /* Referenced bit */ extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); +/* + * Clears the reference and/or modified bits on a range of virtually + * contiguous pages. + * It returns true if the operation succeeded. If it returns false, + * nothing has been modified. + * This operation is only supported on some platforms, so callers MUST + * handle the case where it returns false. + */ +extern bool +pmap_clear_refmod_range_options( + pmap_t pmap, + vm_map_address_t start, + vm_map_address_t end, + unsigned int mask, + unsigned int options); + extern void pmap_flush_context_init(pmap_flush_context *); extern void pmap_flush(pmap_flush_context *); @@ -610,14 +642,14 @@ extern void(pmap_pageable)( vm_map_offset_t end, boolean_t pageable); +extern uint64_t pmap_shared_region_size_min(pmap_t map); -extern uint64_t pmap_nesting_size_min; -extern uint64_t pmap_nesting_size_max; +/* TODO: Completely remove pmap_nesting_size_max() */ +extern uint64_t pmap_nesting_size_max(pmap_t map); extern kern_return_t pmap_nest(pmap_t, pmap_t, addr64_t, - addr64_t, uint64_t); extern kern_return_t pmap_unnest(pmap_t, addr64_t, @@ -652,12 +684,33 @@ extern pmap_t kernel_pmap; /* The kernel's map */ /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS * definitions in i386/pmap_internal.h */ -#define PMAP_CREATE_64BIT 0x1 +#define PMAP_CREATE_64BIT 0x1 + #if __x86_64__ -#define PMAP_CREATE_EPT 0x2 + +#define PMAP_CREATE_EPT 0x2 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT) + +#else + +#define PMAP_CREATE_STAGE2 0 +#define PMAP_CREATE_DISABLE_JOP 0 +#if __ARM_MIXED_PAGE_SIZE__ +#define PMAP_CREATE_FORCE_4K_PAGES 0x8 +#else +#define PMAP_CREATE_FORCE_4K_PAGES 0 +#endif /* __ARM_MIXED_PAGE_SIZE__ */ +#if __arm64__ +#define PMAP_CREATE_X86_64 0 +#else +#define PMAP_CREATE_X86_64 0 #endif +/* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */ +#define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64) + +#endif /* __x86_64__ */ + #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return * KERN_RESOURCE_SHORTAGE * instead */ @@ -679,7 +732,11 @@ extern pmap_t kernel_pmap; /* The kernel's map */ #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be * be upgraded */ #define PMAP_OPTIONS_CLEAR_WRITE 0x2000 - +#define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */ +#if defined(__arm__) || defined(__arm64__) +#define PMAP_OPTIONS_FF_LOCKED 0x8000 +#define PMAP_OPTIONS_FF_WIRED 0x10000 +#endif #if !defined(__LP64__) extern vm_offset_t pmap_extract(pmap_t pmap, @@ -715,29 +772,35 @@ mach_vm_size_t pmap_query_resident(pmap_t pmap, vm_map_offset_t e, mach_vm_size_t *compressed_bytes_p); +extern void pmap_set_vm_map_cs_enforced(pmap_t pmap, bool new_value); +extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap); + /* Inform the pmap layer that there is a JIT entry in this map. */ extern void pmap_set_jit_entitled(pmap_t pmap); +/* Ask the pmap layer if there is a JIT entry in this map. */ +extern bool pmap_get_jit_entitled(pmap_t pmap); + /* * Tell the pmap layer what range within the nested region the VM intends to * use. */ -extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, addr64_t nstart, uint64_t size); +extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, uint64_t size); /* - * Dump page table contents into the specified buffer. Returns the number of - * bytes copied, 0 if insufficient space, (size_t)-1 if unsupported. + * Dump page table contents into the specified buffer. Returns KERN_INSUFFICIENT_BUFFER_SIZE + * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration. * This is expected to only be called from kernel debugger context, * so synchronization is not required. */ -extern size_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end); +extern kern_return_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied); /* * Indicates if any special policy is applied to this protection by the pmap * layer. */ -bool pmap_has_prot_policy(vm_prot_t prot); +bool pmap_has_prot_policy(pmap_t pmap, bool translated_allow_execute, vm_prot_t prot); /* * Causes the pmap to return any available pages that it can return cheaply to @@ -777,6 +840,16 @@ struct pmap_legacy_trust_cache; extern kern_return_t pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache *trust_cache, const vm_size_t trust_cache_len); +typedef enum { + PMAP_TC_TYPE_PERSONALIZED, + PMAP_TC_TYPE_PDI, + PMAP_TC_TYPE_CRYPTEX, + PMAP_TC_TYPE_ENGINEERING, + PMAP_TC_TYPE_GLOBAL_FF00, + PMAP_TC_TYPE_GLOBAL_FF01, +} pmap_tc_type_t; + +#define PMAP_IMAGE4_TRUST_CACHE_HAS_TYPE 1 struct pmap_image4_trust_cache { // Filled by pmap layer. struct pmap_image4_trust_cache const *next; // linked list linkage @@ -785,6 +858,9 @@ struct pmap_image4_trust_cache { // Filled by caller. // data is either an image4, // or just the trust cache payload itself if the image4 manifest is external. + pmap_tc_type_t type; + size_t bnch_len; + uint8_t const bnch[48]; size_t data_len; uint8_t const data[]; }; @@ -799,8 +875,18 @@ typedef enum { PMAP_TC_TOO_BIG = -6, PMAP_TC_RESOURCE_SHORTAGE = -7, PMAP_TC_MANIFEST_TOO_BIG = -8, + PMAP_TC_MANIFEST_VIOLATION = -9, + PMAP_TC_PAYLOAD_VIOLATION = -10, + PMAP_TC_EXPIRED = -11, + PMAP_TC_CRYPTO_WRONG = -12, + PMAP_TC_OBJECT_WRONG = -13, + PMAP_TC_UNKNOWN_CALLER = -14, + PMAP_TC_UNKNOWN_FAILURE = -15, } pmap_tc_ret_t; +#define PMAP_HAS_LOCKDOWN_IMAGE4_SLAB 1 +extern void pmap_lockdown_image4_slab(vm_offset_t slab, vm_size_t slab_len, uint64_t flags); + extern pmap_tc_ret_t pmap_load_image4_trust_cache( struct pmap_image4_trust_cache *trust_cache, vm_size_t trust_cache_len, uint8_t const *img4_manifest, @@ -821,6 +907,12 @@ extern void pmap_ledger_alloc_init(size_t); extern ledger_t pmap_ledger_alloc(void); extern void pmap_ledger_free(ledger_t); +#if __arm64__ +extern bool pmap_is_exotic(pmap_t pmap); +#else /* __arm64__ */ +#define pmap_is_exotic(pmap) false +#endif /* __arm64__ */ + #endif /* KERNEL_PRIVATE */ #endif /* _VM_PMAP_H_ */ diff --git a/osfmk/vm/vm32_user.c b/osfmk/vm/vm32_user.c index fa529ec93..6aea0c17f 100644 --- a/osfmk/vm/vm32_user.c +++ b/osfmk/vm/vm32_user.c @@ -79,7 +79,6 @@ #include #include -#include #include #include #include diff --git a/osfmk/vm/vm_apple_protect.c b/osfmk/vm/vm_apple_protect.c index 1b7922574..3174fcdc5 100644 --- a/osfmk/vm/vm_apple_protect.c +++ b/osfmk/vm/vm_apple_protect.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2006-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -166,8 +166,9 @@ typedef struct apple_protect_pager { */ int apple_protect_pager_count = 0; /* number of pagers */ int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */ -queue_head_t apple_protect_pager_queue; -decl_lck_mtx_data(, apple_protect_pager_lock); +queue_head_t apple_protect_pager_queue = QUEUE_HEAD_INITIALIZER(apple_protect_pager_queue); +LCK_GRP_DECLARE(apple_protect_pager_lck_grp, "apple_protect"); +LCK_MTX_DECLARE(apple_protect_pager_lock, &apple_protect_pager_lck_grp); /* * Maximum number of unmapped pagers we're willing to keep around. @@ -183,10 +184,6 @@ int apple_protect_pager_num_trim_max = 0; int apple_protect_pager_num_trim_total = 0; -lck_grp_t apple_protect_pager_lck_grp; -lck_grp_attr_t apple_protect_pager_lck_grp_attr; -lck_attr_t apple_protect_pager_lck_attr; - /* internal prototypes */ apple_protect_pager_t apple_protect_pager_create( @@ -220,17 +217,6 @@ int apple_protect_pagerdebug = 0; #define PAGER_DEBUG(LEVEL, A) #endif - -void -apple_protect_pager_bootstrap(void) -{ - lck_grp_attr_setdefault(&apple_protect_pager_lck_grp_attr); - lck_grp_init(&apple_protect_pager_lck_grp, "apple_protect", &apple_protect_pager_lck_grp_attr); - lck_attr_setdefault(&apple_protect_pager_lck_attr); - lck_mtx_init(&apple_protect_pager_lock, &apple_protect_pager_lck_grp, &apple_protect_pager_lck_attr); - queue_init(&apple_protect_pager_queue); -} - /* * apple_protect_pager_init() * @@ -470,7 +456,7 @@ retry_src_fault: if (vm_page_wait(interruptible)) { goto retry_src_fault; } - /* fall thru */ + OS_FALLTHROUGH; case VM_FAULT_INTERRUPTED: retval = MACH_SEND_INTERRUPTED; goto done; @@ -478,7 +464,7 @@ retry_src_fault: /* success but no VM page: fail */ vm_object_paging_end(src_top_object); vm_object_unlock(src_top_object); - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case VM_FAULT_MEMORY_ERROR: /* the page is not there ! */ if (error_code) { @@ -525,7 +511,7 @@ retry_src_fault: */ if (src_page_object->code_signed) { vm_page_validate_cs_mapped( - src_page, + src_page, PAGE_SIZE, 0, (const void *) src_vaddr); } /* @@ -717,7 +703,10 @@ done: } } else { boolean_t empty; - upl_commit_range(upl, 0, upl->size, + assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size), + "upl %p offset 0x%llx size 0x%x", + upl, upl->u_offset, upl->u_size); + upl_commit_range(upl, 0, upl->u_size, UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, upl_pl, pl_count, &empty); } diff --git a/osfmk/vm/vm_compressor.c b/osfmk/vm/vm_compressor.c index 37a6f2844..92a53eb22 100644 --- a/osfmk/vm/vm_compressor.c +++ b/osfmk/vm/vm_compressor.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2013 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -54,13 +54,20 @@ #include extern boolean_t vm_darkwake_mode; +extern zone_t vm_page_zone; #if DEVELOPMENT || DEBUG +/* sysctl defined in bsd/dev/arm64/sysctl.c */ int do_cseg_wedge_thread(void); int do_cseg_unwedge_thread(void); static event_t debug_cseg_wait_event = NULL; #endif /* DEVELOPMENT || DEBUG */ +#if CONFIG_FREEZE +bool freezer_incore_cseg_acct = TRUE; /* Only count incore compressed memory for jetsams. */ +void task_disown_frozen_csegs(task_t owner_task); +#endif /* CONFIG_FREEZE */ + #if POPCOUNT_THE_COMPRESSED_DATA boolean_t popcount_c_segs = TRUE; @@ -114,29 +121,22 @@ boolean_t validate_c_segs = TRUE; #if CONFIG_EMBEDDED #if CONFIG_FREEZE -int vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT; - -void *freezer_chead; /* The chead used to track c_segs allocated for the exclusive use of holding just one task's compressed memory.*/ -char *freezer_compressor_scratch_buf = NULL; - -extern int c_freezer_swapout_page_count; /* This count keeps track of the # of compressed pages holding just one task's compressed memory on the swapout queue. This count is used during each freeze i.e. on a per-task basis.*/ - +int vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT; +struct freezer_context freezer_context_global; #else /* CONFIG_FREEZE */ -int vm_compressor_mode = VM_PAGER_NOT_CONFIGURED; +int vm_compressor_mode = VM_PAGER_NOT_CONFIGURED; #endif /* CONFIG_FREEZE */ -int vm_scale = 1; - #else /* CONFIG_EMBEDDED */ int vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP; -int vm_scale = 16; #endif /* CONFIG_EMBEDDED */ +TUNABLE(uint32_t, vm_compression_limit, "vm_compression_limit", 0); int vm_compressor_is_active = 0; -int vm_compression_limit = 0; int vm_compressor_available = 0; +extern uint64_t vm_swap_get_max_configured_space(void); extern void vm_pageout_io_throttle(void); #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA @@ -183,11 +183,18 @@ union c_segu { uintptr_t c_segno; }; +#define C_SLOT_ASSERT_PACKABLE(ptr) \ + VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(ptr), C_SLOT_PACKED_PTR); +#define C_SLOT_PACK_PTR(ptr) \ + VM_PACK_POINTER((vm_offset_t)(ptr), C_SLOT_PACKED_PTR) -#define C_SLOT_PACK_PTR(ptr) (((uintptr_t)ptr - (uintptr_t) KERNEL_PMAP_HEAP_RANGE_START) >> 2) -#define C_SLOT_UNPACK_PTR(cslot) ((uintptr_t)(cslot->c_packed_ptr << 2) + (uintptr_t) KERNEL_PMAP_HEAP_RANGE_START) +#define C_SLOT_UNPACK_PTR(cslot) \ + (c_slot_mapping_t)VM_UNPACK_POINTER((cslot)->c_packed_ptr, C_SLOT_PACKED_PTR) +/* for debugging purposes */ +SECURITY_READ_ONLY_EARLY(vm_packing_params_t) c_slot_packing_params = + VM_PACKING_PARAMS(C_SLOT_PACKED_PTR); uint32_t c_segment_count = 0; uint32_t c_segment_count_max = 0; @@ -257,7 +264,12 @@ uint32_t c_segment_svp_nonzero_decompressions; uint32_t c_segment_noncompressible_pages; -uint32_t c_segment_pages_compressed; +uint32_t c_segment_pages_compressed = 0; /* Tracks # of uncompressed pages fed into the compressor */ +#if CONFIG_FREEZE +int32_t c_segment_pages_compressed_incore = 0; /* Tracks # of uncompressed pages fed into the compressor that are in memory */ +uint32_t c_segments_incore_limit = 0; /* Tracks # of segments allowed to be in-core. Based on compressor pool size */ +#endif /* CONFIG_FREEZE */ + uint32_t c_segment_pages_compressed_limit; uint32_t c_segment_pages_compressed_nearing_limit; uint32_t c_free_segno_head = (uint32_t)-1; @@ -274,12 +286,10 @@ uint32_t vm_compressor_catchup_threshold_divisor_overridden = 0; #define C_SEGMENTS_PER_PAGE (PAGE_SIZE / sizeof(union c_segu)) +LCK_GRP_DECLARE(vm_compressor_lck_grp, "vm_compressor"); +LCK_RW_DECLARE(c_master_lock, &vm_compressor_lck_grp); +LCK_MTX_DECLARE(c_list_lock_storage, &vm_compressor_lck_grp); -lck_grp_attr_t vm_compressor_lck_grp_attr; -lck_attr_t vm_compressor_lck_attr; -lck_grp_t vm_compressor_lck_grp; -lck_mtx_t *c_list_lock; -lck_rw_t c_master_lock; boolean_t decompressions_blocked = FALSE; zone_t compressor_segment_zone; @@ -393,11 +403,27 @@ vm_compressor_pages_compressed(void) boolean_t vm_compressor_low_on_space(void) { +#if CONFIG_FREEZE + uint64_t incore_seg_count; + uint32_t incore_compressed_pages; + if (freezer_incore_cseg_acct) { + incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count; + incore_compressed_pages = c_segment_pages_compressed_incore; + } else { + incore_seg_count = c_segment_count; + incore_compressed_pages = c_segment_pages_compressed; + } + + if ((incore_compressed_pages > c_segment_pages_compressed_nearing_limit) || + (incore_seg_count > c_segments_nearing_limit)) { + return TRUE; + } +#else /* CONFIG_FREEZE */ if ((c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) || (c_segment_count > c_segments_nearing_limit)) { return TRUE; } - +#endif /* CONFIG_FREEZE */ return FALSE; } @@ -405,11 +431,27 @@ vm_compressor_low_on_space(void) boolean_t vm_compressor_out_of_space(void) { +#if CONFIG_FREEZE + uint64_t incore_seg_count; + uint32_t incore_compressed_pages; + if (freezer_incore_cseg_acct) { + incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count; + incore_compressed_pages = c_segment_pages_compressed_incore; + } else { + incore_seg_count = c_segment_count; + incore_compressed_pages = c_segment_pages_compressed; + } + + if ((incore_compressed_pages >= c_segment_pages_compressed_limit) || + (incore_seg_count > c_segments_incore_limit)) { + return TRUE; + } +#else /* CONFIG_FREEZE */ if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) || (c_segment_count >= c_segments_limit)) { return TRUE; } - +#endif /* CONFIG_FREEZE */ return FALSE; } @@ -432,8 +474,11 @@ vm_wants_task_throttled(task_t task) #if DEVELOPMENT || DEBUG -boolean_t kill_on_no_paging_space = FALSE; /* On compressor/swap exhaustion, kill the largest process regardless of - * its chosen process policy. Controlled by a boot-arg of the same name. */ +/* + * On compressor/swap exhaustion, kill the largest process regardless of + * its chosen process policy. + */ +TUNABLE(bool, kill_on_no_paging_space, "-kill_on_no_paging_space", false); #endif /* DEVELOPMENT || DEBUG */ #if !CONFIG_EMBEDDED @@ -448,7 +493,7 @@ vm_compressor_take_paging_space_action(void) if (OSCompareAndSwap(0, 1, (UInt32 *)&no_paging_space_action_in_progress)) { if (no_paging_space_action()) { #if DEVELOPMENT || DEBUG - if (kill_on_no_paging_space == TRUE) { + if (kill_on_no_paging_space) { /* * Since we are choosing to always kill a process, we don't need the * "out of application memory" dialog box in this mode. And, hence we won't @@ -468,17 +513,6 @@ vm_compressor_take_paging_space_action(void) #endif /* !CONFIG_EMBEDDED */ -void -vm_compressor_init_locks(void) -{ - lck_grp_attr_setdefault(&vm_compressor_lck_grp_attr); - lck_grp_init(&vm_compressor_lck_grp, "vm_compressor", &vm_compressor_lck_grp_attr); - lck_attr_setdefault(&vm_compressor_lck_attr); - - lck_rw_init(&c_master_lock, &vm_compressor_lck_grp, &vm_compressor_lck_attr); -} - - void vm_decompressor_lock(void) { @@ -518,6 +552,9 @@ cslot_copy(c_slot_t cdst, c_slot_t csrc) #if defined(__arm__) || defined(__arm64__) cdst->c_codec = csrc->c_codec; #endif +#if __ARM_WKDM_POPCNT__ + cdst->c_inline_popcount = csrc->c_inline_popcount; +#endif } vm_map_t compressor_map; @@ -541,10 +578,6 @@ void vm_compressor_init(void) { thread_t thread; - struct c_slot cs_dummy; - c_slot_t cs = &cs_dummy; - int c_segment_min_size; - int c_segment_padded_size; int attempts = 1; kern_return_t retval = KERN_SUCCESS; vm_offset_t start_addr = 0; @@ -554,11 +587,11 @@ vm_compressor_init(void) vm_size_t c_compressed_record_sbuf_size = 0; #endif /* RECORD_THE_COMPRESSED_DATA */ -#if DEVELOPMENT || DEBUG +#if DEVELOPMENT || DEBUG || CONFIG_FREEZE char bootarg_name[32]; - if (PE_parse_boot_argn("-kill_on_no_paging_space", bootarg_name, sizeof(bootarg_name))) { - kill_on_no_paging_space = TRUE; - } +#endif /* DEVELOPMENT || DEBUG || CONFIG_FREEZE */ + +#if DEVELOPMENT || DEBUG if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof(bootarg_name))) { write_protect_c_segs = FALSE; } @@ -582,27 +615,14 @@ vm_compressor_init(void) } #endif /* DEVELOPMENT || DEBUG */ - /* - * ensure that any pointer that gets created from - * the vm_page zone can be packed properly - */ - cs->c_packed_ptr = C_SLOT_PACK_PTR(zone_map_min_address); - - if (C_SLOT_UNPACK_PTR(cs) != (uintptr_t)zone_map_min_address) { - panic("C_SLOT_UNPACK_PTR failed on zone_map_min_address - %p", (void *)zone_map_min_address); - } - - cs->c_packed_ptr = C_SLOT_PACK_PTR(zone_map_max_address); - - if (C_SLOT_UNPACK_PTR(cs) != (uintptr_t)zone_map_max_address) { - panic("C_SLOT_UNPACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address); +#if CONFIG_FREEZE + if (PE_parse_boot_argn("-disable_freezer_cseg_acct", bootarg_name, sizeof(bootarg_name))) { + freezer_incore_cseg_acct = FALSE; } - +#endif /* CONFIG_FREEZE */ assert((C_SEGMENTS_PER_PAGE * sizeof(union c_segu)) == PAGE_SIZE); - PE_parse_boot_argn("vm_compression_limit", &vm_compression_limit, sizeof(vm_compression_limit)); - #ifdef CONFIG_EMBEDDED vm_compressor_minorcompact_threshold_divisor = 20; vm_compressor_majorcompact_threshold_divisor = 30; @@ -621,13 +641,6 @@ vm_compressor_init(void) vm_compressor_catchup_threshold_divisor = 50; } #endif - /* - * vm_page_init_lck_grp is now responsible for calling vm_compressor_init_locks - * c_master_lock needs to be available early so that "vm_page_find_contiguous" can - * use PAGE_REPLACEMENT_ALLOWED to coordinate with the compressor. - */ - - c_list_lock = lck_mtx_alloc_init(&vm_compressor_lck_grp, &vm_compressor_lck_attr); queue_init(&c_bad_list_head); queue_init(&c_age_list_head); @@ -644,7 +657,7 @@ vm_compressor_init(void) c_segments_available = 0; if (vm_compression_limit) { - compressor_pool_size = (uint64_t)vm_compression_limit * PAGE_SIZE_64; + compressor_pool_size = ptoa_64(vm_compression_limit); } compressor_pool_max_size = C_SEG_MAX_LIMIT; @@ -694,7 +707,7 @@ vm_compressor_init(void) * sitting in idle band via jetsams */ -#define COMPRESSOR_CAP_PERCENTAGE 30ULL +#define COMPRESSOR_CAP_PERCENTAGE 37ULL if (compressor_pool_max_size > max_mem) { compressor_pool_max_size = max_mem; @@ -727,11 +740,28 @@ try_again: c_segment_pages_compressed_limit = (c_segments_limit * (C_SEG_BUFSIZE / PAGE_SIZE) * compressor_pool_multiplier); if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) { - c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE); + if (!vm_compression_limit) { + c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE); + } } c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL); +#if CONFIG_FREEZE + /* + * Our in-core limits are based on the size of the compressor pool. + * The c_segments_nearing_limit is also based on the compressor pool + * size and calculated above. + */ + c_segments_incore_limit = c_segments_limit; + + if (freezer_incore_cseg_acct) { + /* + * Add enough segments to track all frozen c_segs that can be stored in swap. + */ + c_segments_limit += (uint32_t)(vm_swap_get_max_configured_space() / (vm_size_t)(C_SEG_ALLOCSIZE)); + } +#endif /* * Submap needs space for: * - c_segments @@ -764,25 +794,39 @@ try_again: kprintf("retrying creation of the compressor submap at 0x%llx bytes\n", compressor_pool_size); goto try_again; } - if (kernel_memory_allocate(compressor_map, (vm_offset_t *)(&c_segments), (sizeof(union c_segu) * c_segments_limit), 0, KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) { + if (kernel_memory_allocate(compressor_map, (vm_offset_t *)(&c_segments), + (sizeof(union c_segu) * c_segments_limit), 0, + KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) { panic("vm_compressor_init: kernel_memory_allocate failed - c_segments\n"); } - if (kernel_memory_allocate(compressor_map, &c_buffers, c_buffers_size, 0, KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) { + if (kernel_memory_allocate(compressor_map, &c_buffers, c_buffers_size, 0, + KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) { panic("vm_compressor_init: kernel_memory_allocate failed - c_buffers\n"); } - c_segment_min_size = sizeof(struct c_segment) + (C_SEG_SLOT_VAR_ARRAY_MIN_LEN * sizeof(struct c_slot)); + /* + * Pick a good size that will minimize fragmentation in zalloc + * by minimizing the fragmentation in a 16k run. + * + * C_SEG_SLOT_VAR_ARRAY_MIN_LEN is larger on 4k systems than 16k ones, + * making the fragmentation in a 4k page terrible. Using 16k for all + * systems matches zalloc() and will minimize fragmentation. + */ + uint32_t c_segment_size = sizeof(struct c_segment) + (C_SEG_SLOT_VAR_ARRAY_MIN_LEN * sizeof(struct c_slot)); + uint32_t cnt = (16 << 10) / c_segment_size; + uint32_t frag = (16 << 10) % c_segment_size; - for (c_segment_padded_size = 128; c_segment_padded_size < c_segment_min_size; c_segment_padded_size = c_segment_padded_size << 1) { - ; - } + c_seg_fixed_array_len = C_SEG_SLOT_VAR_ARRAY_MIN_LEN; - compressor_segment_zone = zinit(c_segment_padded_size, c_segments_limit * c_segment_padded_size, PAGE_SIZE, "compressor_segment"); - zone_change(compressor_segment_zone, Z_CALLERACCT, FALSE); - zone_change(compressor_segment_zone, Z_NOENCRYPT, TRUE); + while (cnt * sizeof(struct c_slot) < frag) { + c_segment_size += sizeof(struct c_slot); + c_seg_fixed_array_len++; + frag -= cnt * sizeof(struct c_slot); + } - c_seg_fixed_array_len = (c_segment_padded_size - sizeof(struct c_segment)) / sizeof(struct c_slot); + compressor_segment_zone = zone_create("compressor_segment", + c_segment_size, ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM); c_segments_busy = FALSE; @@ -792,41 +836,62 @@ try_again: { host_basic_info_data_t hinfo; mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; + size_t bufsize; + char *buf; #define BSD_HOST 1 host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count); compressor_cpus = hinfo.max_cpus; - compressor_scratch_bufs = kalloc_tag(compressor_cpus * vm_compressor_get_decode_scratch_size(), VM_KERN_MEMORY_COMPRESSOR); - kdp_compressor_scratch_buf = kalloc_tag(vm_compressor_get_decode_scratch_size(), VM_KERN_MEMORY_COMPRESSOR); + bufsize = PAGE_SIZE; + bufsize += compressor_cpus * vm_compressor_get_decode_scratch_size(); + bufsize += vm_compressor_get_decode_scratch_size(); +#if CONFIG_FREEZE + bufsize += vm_compressor_get_encode_scratch_size(); +#endif +#if RECORD_THE_COMPRESSED_DATA + bufsize += c_compressed_record_sbuf_size; +#endif + + if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&buf, bufsize, + PAGE_MASK, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR)) { + panic("vm_compressor_init: Unable to allocate %zd bytes", bufsize); + } /* * kdp_compressor_decompressed_page must be page aligned because we access - * it through the physical apperture by page number. kalloc() does not - * guarantee alignment. + * it through the physical apperture by page number. */ - vm_offset_t addr; - if (kernel_memory_allocate(kernel_map, &addr, PAGE_SIZE, 0, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) { - panic("vm_compressor_init: kernel_memory_allocate failed - kdp_compressor_decompressed_page\n"); - } - assert((addr & PAGE_MASK) == 0); - kdp_compressor_decompressed_page = (void *)addr; + kdp_compressor_decompressed_page = buf; kdp_compressor_decompressed_page_paddr = kvtophys((vm_offset_t)kdp_compressor_decompressed_page); kdp_compressor_decompressed_page_ppnum = (ppnum_t) atop(kdp_compressor_decompressed_page_paddr); - } + buf += PAGE_SIZE; + bufsize -= PAGE_SIZE; + + compressor_scratch_bufs = buf; + buf += compressor_cpus * vm_compressor_get_decode_scratch_size(); + bufsize -= compressor_cpus * vm_compressor_get_decode_scratch_size(); + + kdp_compressor_scratch_buf = buf; + buf += vm_compressor_get_decode_scratch_size(); + bufsize -= vm_compressor_get_decode_scratch_size(); + #if CONFIG_FREEZE - freezer_compressor_scratch_buf = kalloc_tag(vm_compressor_get_encode_scratch_size(), VM_KERN_MEMORY_COMPRESSOR); + freezer_context_global.freezer_ctx_compressor_scratch_buf = buf; + buf += vm_compressor_get_encode_scratch_size(); + bufsize -= vm_compressor_get_encode_scratch_size(); #endif #if RECORD_THE_COMPRESSED_DATA - if (kernel_memory_allocate(compressor_map, (vm_offset_t *)&c_compressed_record_sbuf, c_compressed_record_sbuf_size, 0, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) { - panic("vm_compressor_init: kernel_memory_allocate failed - c_compressed_record_sbuf\n"); - } - - c_compressed_record_cptr = c_compressed_record_sbuf; - c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size; + c_compressed_record_sbuf = buf; + c_compressed_record_cptr = buf; + c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size; + buf += c_compressed_record_sbuf_size; + bufsize -= c_compressed_record_sbuf_size; #endif + assert(bufsize == 0); + } if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL, BASEPRI_VM, &thread) != KERN_SUCCESS) { @@ -860,7 +925,7 @@ try_again: static void c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact) { - int c_indx; + uint16_t c_indx; int32_t bytes_used; uint32_t c_rounded_size; uint32_t c_size; @@ -1150,6 +1215,94 @@ c_seg_wait_on_busy(c_segment_t c_seg) thread_block(THREAD_CONTINUE_NULL); } +#if CONFIG_FREEZE +/* + * We don't have the task lock held while updating the task's + * c_seg queues. We can do that because of the following restrictions: + * + * - SINGLE FREEZER CONTEXT: + * We 'insert' c_segs into the task list on the task_freeze path. + * There can only be one such freeze in progress and the task + * isn't disappearing because we have the VM map lock held throughout + * and we have a reference on the proc too. + * + * - SINGLE TASK DISOWN CONTEXT: + * We 'disown' c_segs of a task ONLY from the task_terminate context. So + * we don't need the task lock but we need the c_list_lock and the + * compressor master lock (shared). We also hold the individual + * c_seg locks (exclusive). + * + * If we either: + * - can't get the c_seg lock on a try, then we start again because maybe + * the c_seg is part of a compaction and might get freed. So we can't trust + * that linkage and need to restart our queue traversal. + * - OR, we run into a busy c_seg (say being swapped in or free-ing) we + * drop all locks again and wait and restart our queue traversal. + * + * - The new_owner_task below is currently only the kernel or NULL. + * + */ +void +c_seg_update_task_owner(c_segment_t c_seg, task_t new_owner_task) +{ + task_t owner_task = c_seg->c_task_owner; + uint64_t uncompressed_bytes = ((c_seg->c_slots_used) * PAGE_SIZE_64); + + LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED); + + if (owner_task) { + task_update_frozen_to_swap_acct(owner_task, uncompressed_bytes, DEBIT_FROM_SWAP); + queue_remove(&owner_task->task_frozen_cseg_q, c_seg, + c_segment_t, c_task_list_next_cseg); + } + + if (new_owner_task) { + queue_enter(&new_owner_task->task_frozen_cseg_q, c_seg, + c_segment_t, c_task_list_next_cseg); + task_update_frozen_to_swap_acct(new_owner_task, uncompressed_bytes, CREDIT_TO_SWAP); + } + + c_seg->c_task_owner = new_owner_task; +} + +void +task_disown_frozen_csegs(task_t owner_task) +{ + c_segment_t c_seg = NULL, next_cseg = NULL; + +again: + PAGE_REPLACEMENT_DISALLOWED(TRUE); + lck_mtx_lock_spin_always(c_list_lock); + + for (c_seg = (c_segment_t) queue_first(&owner_task->task_frozen_cseg_q); + !queue_end(&owner_task->task_frozen_cseg_q, (queue_entry_t) c_seg); + c_seg = next_cseg) { + next_cseg = (c_segment_t) queue_next(&c_seg->c_task_list_next_cseg);; + + if (!lck_mtx_try_lock_spin_always(&c_seg->c_lock)) { + lck_mtx_unlock(c_list_lock); + PAGE_REPLACEMENT_DISALLOWED(FALSE); + goto again; + } + + if (c_seg->c_busy) { + lck_mtx_unlock(c_list_lock); + PAGE_REPLACEMENT_DISALLOWED(FALSE); + + c_seg_wait_on_busy(c_seg); + + goto again; + } + assert(c_seg->c_task_owner == owner_task); + c_seg_update_task_owner(c_seg, kernel_task); + lck_mtx_unlock_always(&c_seg->c_lock); + } + + lck_mtx_unlock(c_list_lock); + PAGE_REPLACEMENT_DISALLOWED(FALSE); +} +#endif /* CONFIG_FREEZE */ void c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head) @@ -1196,6 +1349,12 @@ c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head) case C_ON_SWAPOUT_Q: assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q); +#if CONFIG_FREEZE + if (c_seg->c_task_owner && (new_state != C_ON_SWAPIO_Q)) { + c_seg_update_task_owner(c_seg, NULL); + } +#endif /* CONFIG_FREEZE */ + queue_remove(&c_swapout_list_head, c_seg, c_segment_t, c_age_list); thread_wakeup((event_t)&compaction_swapper_running); c_swapout_count--; @@ -1405,20 +1564,25 @@ c_seg_free_locked(c_segment_t c_seg) c_seg_switch_state(c_seg, C_IS_FREE, FALSE); - lck_mtx_unlock_always(c_list_lock); - if (c_buffer) { pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE; c_seg->c_store.c_buffer = NULL; } else { +#if CONFIG_FREEZE + c_seg_update_task_owner(c_seg, NULL); +#endif /* CONFIG_FREEZE */ + c_seg->c_store.c_swap_handle = (uint64_t)-1; } lck_mtx_unlock_always(&c_seg->c_lock); + lck_mtx_unlock_always(c_list_lock); + if (c_buffer) { if (pages_populated) { - kernel_memory_depopulate(compressor_map, (vm_offset_t) c_buffer, pages_populated * PAGE_SIZE, KMA_COMPRESSOR); + kernel_memory_depopulate(compressor_map, (vm_offset_t)c_buffer, + pages_populated * PAGE_SIZE, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR); } } else if (c_swap_handle) { /* @@ -1453,7 +1617,8 @@ c_seg_free_locked(c_segment_t c_seg) lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp); if (c_seg->c_slot_var_array_len) { - kfree(c_seg->c_slot_var_array, sizeof(struct c_slot) * c_seg->c_slot_var_array_len); + kheap_free(KHEAP_DATA_BUFFERS, c_seg->c_slot_var_array, + sizeof(struct c_slot) * c_seg->c_slot_var_array_len); } zfree(compressor_segment_zone, c_seg); @@ -1518,7 +1683,7 @@ c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy) uint32_t old_populated_offset; uint32_t c_rounded_size; uint32_t c_size; - int c_indx = 0; + uint16_t c_indx = 0; int i; c_slot_t c_dst; c_slot_t c_src; @@ -1569,7 +1734,7 @@ c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy) cslot_copy(c_dst, c_src); c_dst->c_offset = c_offset; - slot_ptr = (c_slot_mapping_t)C_SLOT_UNPACK_PTR(c_dst); + slot_ptr = C_SLOT_UNPACK_PTR(c_dst); slot_ptr->s_cindx = c_indx; c_offset += C_SEG_BYTES_TO_OFFSET(c_rounded_size); @@ -1594,7 +1759,8 @@ c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy) gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset); gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset]; - kernel_memory_depopulate(compressor_map, (vm_offset_t)gc_ptr, gc_size, KMA_COMPRESSOR); + kernel_memory_depopulate(compressor_map, (vm_offset_t)gc_ptr, gc_size, + KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR); } #if DEVELOPMENT || DEBUG @@ -1633,12 +1799,14 @@ c_seg_alloc_nextslot(c_segment_t c_seg) newlen = oldlen * 2; } - new_slot_array = (struct c_slot *)kalloc(sizeof(struct c_slot) * newlen); + new_slot_array = kheap_alloc(KHEAP_DATA_BUFFERS, + sizeof(struct c_slot) * newlen, Z_WAITOK); lck_mtx_lock_spin_always(&c_seg->c_lock); if (old_slot_array) { - memcpy((char *)new_slot_array, (char *)old_slot_array, sizeof(struct c_slot) * oldlen); + memcpy(new_slot_array, old_slot_array, + sizeof(struct c_slot) * oldlen); } c_seg->c_slot_var_array_len = newlen; @@ -1647,7 +1815,8 @@ c_seg_alloc_nextslot(c_segment_t c_seg) lck_mtx_unlock_always(&c_seg->c_lock); if (old_slot_array) { - kfree(old_slot_array, sizeof(struct c_slot) * oldlen); + kheap_free(KHEAP_DATA_BUFFERS, old_slot_array, + sizeof(struct c_slot) * oldlen); } } } @@ -1815,7 +1984,7 @@ c_seg_major_compact( while (dst_slot < c_seg_dst->c_nextslot) { c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot); - slot_ptr = (c_slot_mapping_t)C_SLOT_UNPACK_PTR(c_dst); + slot_ptr = C_SLOT_UNPACK_PTR(c_dst); /* would mean "empty slot", so use csegno+1 */ slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1; slot_ptr->s_cindx = dst_slot++; @@ -2008,18 +2177,22 @@ compressor_needs_to_swap(void) lck_mtx_unlock_always(c_list_lock); if (age >= vm_ripe_target_age) { - return TRUE; + should_swap = TRUE; + goto check_if_low_space; } } if (VM_CONFIG_SWAP_IS_ACTIVE) { if (COMPRESSOR_NEEDS_TO_SWAP()) { - return TRUE; + should_swap = TRUE; + goto check_if_low_space; } if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external) && vm_page_anonymous_count < (vm_page_inactive_count / 20)) { - return TRUE; + should_swap = TRUE; + goto check_if_low_space; } if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) { - return TRUE; + should_swap = TRUE; + goto check_if_low_space; } } compute_swapout_target_age(); @@ -2047,19 +2220,20 @@ compressor_needs_to_swap(void) should_swap = TRUE; } +check_if_low_space: + #if CONFIG_JETSAM if (should_swap || vm_compressor_low_on_space() == TRUE) { if (vm_compressor_thrashing_detected == FALSE) { vm_compressor_thrashing_detected = TRUE; - if (swapout_target_age || vm_compressor_low_on_space() == TRUE) { - if (swapout_target_age) { - /* The compressor is thrashing. */ - memorystatus_kill_on_VM_compressor_thrashing(TRUE /* async */); - } else { - /* The compressor is running low on space. */ - memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */); - } + if (swapout_target_age) { + /* The compressor is thrashing. */ + memorystatus_kill_on_VM_compressor_thrashing(TRUE /* async */); + compressor_thrashing_induced_jetsam++; + } else if (vm_compressor_low_on_space() == TRUE) { + /* The compressor is running low on space. */ + memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */); compressor_thrashing_induced_jetsam++; } else { memorystatus_kill_on_FC_thrashing(TRUE /* async */); @@ -2077,6 +2251,10 @@ compressor_needs_to_swap(void) should_swap = FALSE; } +#else /* CONFIG_JETSAM */ + if (should_swap && vm_swap_low_on_space()) { + vm_compressor_take_paging_space_action(); + } #endif /* CONFIG_JETSAM */ if (should_swap == FALSE) { @@ -2486,7 +2664,7 @@ vm_compressor_swap_trigger_thread(void) * the tuneables until we are awakened via vm_pageout_scan * so that we are at a point where the vm_swapfile_open will * be operating on the correct directory (in case the default - * of /var/vm/ is overridden by the dymanic_pager + * of using the VM volume is overridden by the dynamic_pager) */ if (compaction_swapper_init_now) { vm_compaction_swapper_do_init(); @@ -2494,6 +2672,9 @@ vm_compressor_swap_trigger_thread(void) if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) { thread_vm_bind_group_add(); } +#if CONFIG_THREAD_GROUPS + thread_group_vm_add(); +#endif thread_set_thread_name(current_thread(), "VM_cswap_trigger"); compaction_swapper_init_now = 0; } @@ -2699,6 +2880,8 @@ do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg) } int min_csegs_per_major_compaction = DELAYED_COMPACTIONS_PER_PASS; +extern bool vm_swapout_thread_running; +extern boolean_t compressor_store_stop_compaction; void vm_compressor_compact_and_swap(boolean_t flush_all) @@ -2755,7 +2938,7 @@ vm_compressor_compact_and_swap(boolean_t flush_all) bytes_freed = 0; yield_after_considered_per_pass = MAX(min_csegs_per_major_compaction, DELAYED_COMPACTIONS_PER_PASS); - while (!queue_empty(&c_age_list_head) && compaction_swapper_abort == 0) { + while (!queue_empty(&c_age_list_head) && !compaction_swapper_abort && !compressor_store_stop_compaction) { if (hibernate_flushing == TRUE) { clock_sec_t sec; @@ -2783,9 +2966,13 @@ vm_compressor_compact_and_swap(boolean_t flush_all) break; } } - if (c_swapout_count >= C_SWAPOUT_LIMIT) { + if (!vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) { assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000 * NSEC_PER_USEC); + if (!vm_swapout_thread_running) { + thread_wakeup((event_t)&c_swapout_list_head); + } + lck_mtx_unlock_always(c_list_lock); VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 1, c_swapout_count, 0, 0); @@ -2801,7 +2988,7 @@ vm_compressor_compact_and_swap(boolean_t flush_all) vm_compressor_age_swapped_in_segments(flush_all); - if (c_swapout_count >= C_SWAPOUT_LIMIT) { + if (!vm_swap_out_of_space() && c_swapout_count >= C_SWAPOUT_LIMIT) { /* * we timed out on the above thread_block * let's loop around and try again @@ -2824,12 +3011,6 @@ vm_compressor_compact_and_swap(boolean_t flush_all) needs_to_swap = compressor_needs_to_swap(); -#if !CONFIG_EMBEDDED - if (needs_to_swap == TRUE && vm_swap_low_on_space()) { - vm_compressor_take_paging_space_action(); - } -#endif /* !CONFIG_EMBEDDED */ - lck_mtx_lock_spin_always(c_list_lock); VM_DEBUG_CONSTANT_EVENT(vm_compressor_compact_and_swap, VM_COMPRESSOR_COMPACT_AND_SWAP, DBG_FUNC_NONE, 3, needs_to_swap, 0, 0); @@ -3037,11 +3218,14 @@ vm_compressor_compact_and_swap(boolean_t flush_all) lck_mtx_unlock_always(&c_seg->c_lock); if (c_swapout_count) { - lck_mtx_unlock_always(c_list_lock); - - thread_wakeup((event_t)&c_swapout_list_head); - - lck_mtx_lock_spin_always(c_list_lock); + /* + * We don't pause/yield here because we will either + * yield below or at the top of the loop with the + * assert_wait_timeout. + */ + if (!vm_swapout_thread_running) { + thread_wakeup((event_t)&c_swapout_list_head); + } } if (number_considered >= yield_after_considered_per_pass) { @@ -3124,8 +3308,19 @@ c_seg_allocate(c_segment_t *current_chead) } if (c_free_segno_head == (uint32_t)-1) { uint32_t c_segments_available_new; + uint32_t compressed_pages; - if (c_segments_available >= c_segments_limit || c_segment_pages_compressed >= c_segment_pages_compressed_limit) { +#if CONFIG_FREEZE + if (freezer_incore_cseg_acct) { + compressed_pages = c_segment_pages_compressed_incore; + } else { + compressed_pages = c_segment_pages_compressed; + } +#else + compressed_pages = c_segment_pages_compressed; +#endif /* CONFIG_FREEZE */ + + if (c_segments_available >= c_segments_limit || compressed_pages >= c_segment_pages_compressed_limit) { lck_mtx_unlock_always(c_list_lock); return NULL; @@ -3173,12 +3368,11 @@ c_seg_allocate(c_segment_t *current_chead) lck_mtx_unlock_always(c_list_lock); - c_seg = (c_segment_t)zalloc(compressor_segment_zone); - bzero((char *)c_seg, sizeof(struct c_segment)); + c_seg = zalloc_flags(compressor_segment_zone, Z_WAITOK | Z_ZERO); c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno); - lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, &vm_compressor_lck_attr); + lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, LCK_ATTR_NULL); c_seg->c_state = C_IS_EMPTY; c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX; @@ -3261,7 +3455,8 @@ c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead) compressor_map, (vm_offset_t) &c_seg->c_store.c_buffer[offset_to_depopulate], unused_bytes, - KMA_COMPRESSOR); + KMA_COMPRESSOR, + VM_KERN_MEMORY_COMPRESSOR); lck_mtx_lock_spin_always(&c_seg->c_lock); @@ -3292,7 +3487,7 @@ c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead) #endif #if CONFIG_FREEZE - if (current_chead == (c_segment_t*)&freezer_chead && + if (current_chead == (c_segment_t*) &(freezer_context_global.freezer_ctx_chead) && VM_CONFIG_SWAP_IS_PRESENT && VM_CONFIG_FREEZER_SWAP_IS_ACTIVE #if DEVELOPMENT || DEBUG @@ -3323,17 +3518,28 @@ c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead) * We'll need to fix this accounting as a start. */ assert(vm_darkwake_mode == FALSE); - c_freezer_swapout_page_count += (C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset)) / PAGE_SIZE_64; + c_seg_update_task_owner(c_seg, freezer_context_global.freezer_ctx_task); + freezer_context_global.freezer_ctx_swapped_bytes += c_seg->c_bytes_used; } #endif /* CONFIG_FREEZE */ if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) { +#if CONFIG_FREEZE + assert(c_seg->c_task_owner == NULL); +#endif /* CONFIG_FREEZE */ c_seg_need_delayed_compaction(c_seg, TRUE); } lck_mtx_unlock_always(c_list_lock); if (c_seg->c_state == C_ON_SWAPOUT_Q) { + /* + * Darkwake and Freeze configs always + * wake up the swapout thread because + * the compactor thread that normally handles + * it may not be running as much in these + * configs. + */ thread_wakeup((event_t)&c_swapout_list_head); } @@ -3431,7 +3637,7 @@ c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_ if (vm_swap_get(c_seg, f_offset, io_size) != KERN_SUCCESS) { PAGE_REPLACEMENT_DISALLOWED(TRUE); - kernel_memory_depopulate(compressor_map, addr, io_size, KMA_COMPRESSOR); + kernel_memory_depopulate(compressor_map, addr, io_size, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR); c_seg_swapin_requeue(c_seg, FALSE, TRUE, age_on_swapin_q); } else { @@ -3453,6 +3659,27 @@ c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_ c_seg_swapin_requeue(c_seg, TRUE, force_minor_compaction == TRUE ? FALSE : TRUE, age_on_swapin_q); +#if CONFIG_FREEZE + /* + * c_seg_swapin_requeue() returns with the c_seg lock held. + */ + if (!lck_mtx_try_lock_spin_always(c_list_lock)) { + assert(c_seg->c_busy); + + lck_mtx_unlock_always(&c_seg->c_lock); + lck_mtx_lock_spin_always(c_list_lock); + lck_mtx_lock_spin_always(&c_seg->c_lock); + } + + if (c_seg->c_task_owner) { + c_seg_update_task_owner(c_seg, NULL); + } + + lck_mtx_unlock_always(c_list_lock); + + OSAddAtomic(c_seg->c_slots_used, &c_segment_pages_compressed_incore); +#endif /* CONFIG_FREEZE */ + OSAddAtomic64(c_seg->c_bytes_used, &compressor_bytes_used); if (force_minor_compaction == TRUE) { @@ -3596,8 +3823,8 @@ retry: cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_seg->c_nextslot); + C_SLOT_ASSERT_PACKABLE(slot_ptr); cs->c_packed_ptr = C_SLOT_PACK_PTR(slot_ptr); - assert(slot_ptr == (c_slot_mapping_t)C_SLOT_UNPACK_PTR(cs)); cs->c_offset = c_seg->c_nextoffset; @@ -3616,12 +3843,18 @@ retry: if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) { #if defined(__arm__) || defined(__arm64__) uint16_t ccodec = CINVALID; - + uint32_t inline_popcount; if (max_csize >= C_SEG_OFFSET_ALIGNMENT_BOUNDARY) { c_size = metacompressor((const uint8_t *) src, (uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset], max_csize_adj, &ccodec, - scratch_buf, &incomp_copy); + scratch_buf, &incomp_copy, &inline_popcount); +#if __ARM_WKDM_POPCNT__ + cs->c_inline_popcount = inline_popcount; +#else + assert(inline_popcount == C_SLOT_NO_POPCOUNT); +#endif + #if C_SEG_OFFSET_ALIGNMENT_BOUNDARY > 4 if (c_size > max_csize_adj) { c_size = -1; @@ -3742,6 +3975,9 @@ sv_compression: OSAddAtomic64(PAGE_SIZE, &c_segment_input_bytes); OSAddAtomic(1, &c_segment_pages_compressed); +#if CONFIG_FREEZE + OSAddAtomic(1, &c_segment_pages_compressed_incore); +#endif /* CONFIG_FREEZE */ OSAddAtomic(1, &sample_period_compression_count); KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END, *current_chead, c_size, c_segment_input_bytes, c_segment_compressed_bytes, 0); @@ -3802,7 +4038,7 @@ c_decompress_page(char *dst, volatile c_slot_mapping_t slot_ptr, int flags, int c_slot_t cs; c_segment_t c_seg; uint32_t c_segno; - int c_indx; + uint16_t c_indx; int c_rounded_size; uint32_t c_size; int retval = 0; @@ -3940,6 +4176,28 @@ bypass_busy_check: clock_nsec_t cur_ts_nsec; if (C_SEG_IS_ONDISK(c_seg)) { +#if CONFIG_FREEZE + if (freezer_incore_cseg_acct) { + if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) { + PAGE_REPLACEMENT_DISALLOWED(FALSE); + lck_mtx_unlock_always(&c_seg->c_lock); + + memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */); + + goto ReTry; + } + + uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count; + if ((incore_seg_count + 1) >= c_segments_nearing_limit) { + PAGE_REPLACEMENT_DISALLOWED(FALSE); + lck_mtx_unlock_always(&c_seg->c_lock); + + memorystatus_kill_on_VM_compressor_space_shortage(FALSE /* async */); + + goto ReTry; + } + } +#endif /* CONFIG_FREEZE */ assert(kdp_mode == FALSE); retval = c_seg_swapin(c_seg, FALSE, TRUE); assert(retval == 0); @@ -4008,8 +4266,32 @@ bypass_busy_check: if (vm_compressor_algorithm() != VM_COMPRESSOR_DEFAULT_CODEC) { #if defined(__arm__) || defined(__arm64__) uint16_t c_codec = cs->c_codec; - metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset], - (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf); + uint32_t inline_popcount; + if (!metadecompressor((const uint8_t *) &c_seg->c_store.c_buffer[cs->c_offset], + (uint8_t *)dst, c_size, c_codec, (void *)scratch_buf, &inline_popcount)) { + retval = -1; + } else { +#if __ARM_WKDM_POPCNT__ + if (inline_popcount != cs->c_inline_popcount) { + /* + * The codec choice in compression and + * decompression must agree, so there + * should never be a disagreement in + * whether an inline population count + * was performed. + */ + assert(inline_popcount != C_SLOT_NO_POPCOUNT); + assert(cs->c_inline_popcount != C_SLOT_NO_POPCOUNT); + printf("decompression failure from physical region %llx+%05x: popcount mismatch (%d != %d)\n", + (unsigned long long)kvtophys((uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset]), c_size, + inline_popcount, + cs->c_inline_popcount); + retval = -1; + } +#else + assert(inline_popcount == C_SLOT_NO_POPCOUNT); +#endif /* __ARM_WKDM_POPCNT__ */ + } #endif } else { #if defined(__arm64__) @@ -4052,6 +4334,31 @@ bypass_busy_check: OSAddAtomic(1, &sample_period_decompression_count); } } +#if CONFIG_FREEZE + else { + /* + * We are freeing an uncompressed page from this c_seg and so balance the ledgers. + */ + if (C_SEG_IS_ONDISK(c_seg)) { + /* + * The compression sweep feature will push out anonymous pages to disk + * without going through the freezer path and so those c_segs, while + * swapped out, won't have an owner. + */ + if (c_seg->c_task_owner) { + task_update_frozen_to_swap_acct(c_seg->c_task_owner, PAGE_SIZE_64, DEBIT_FROM_SWAP); + } + + /* + * We are freeing a page in swap without swapping it in. We bump the in-core + * count here to simulate a swapin of a page so that we can accurately + * decrement it below. + */ + OSAddAtomic(1, &c_segment_pages_compressed_incore); + } + } +#endif /* CONFIG_FREEZE */ + if (flags & C_KEEP) { *zeroslot = 0; goto done; @@ -4071,6 +4378,10 @@ bypass_busy_check: } OSAddAtomic(-1, &c_segment_pages_compressed); +#if CONFIG_FREEZE + OSAddAtomic(-1, &c_segment_pages_compressed_incore); + assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore); +#endif /* CONFIG_FREEZE */ if (c_seg->c_state != C_ON_BAD_Q && !(C_SEG_IS_ONDISK(c_seg))) { /* @@ -4105,7 +4416,9 @@ bypass_busy_check: C_SEG_BUSY(c_seg); lck_mtx_unlock_always(&c_seg->c_lock); - kernel_memory_depopulate(compressor_map, (vm_offset_t) c_seg->c_store.c_buffer, pages_populated * PAGE_SIZE, KMA_COMPRESSOR); + kernel_memory_depopulate(compressor_map, + (vm_offset_t) c_seg->c_store.c_buffer, + pages_populated * PAGE_SIZE, KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR); lck_mtx_lock_spin_always(&c_seg->c_lock); C_SEG_WAKEUP_DONE(c_seg); @@ -4192,6 +4505,10 @@ vm_compressor_get(ppnum_t pn, int *slot, int flags) c_segment_sv_hash_drop_ref(slot_ptr->s_cindx); OSAddAtomic(-1, &c_segment_pages_compressed); +#if CONFIG_FREEZE + OSAddAtomic(-1, &c_segment_pages_compressed_incore); + assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count 0x%x", c_segment_pages_compressed_incore); +#endif /* CONFIG_FREEZE */ *slot = 0; } if (data) { @@ -4225,6 +4542,95 @@ vm_compressor_get(ppnum_t pn, int *slot, int flags) return retval; } +#if DEVELOPMENT || DEBUG + +void +vm_compressor_inject_error(int *slot) +{ + c_slot_mapping_t slot_ptr = (c_slot_mapping_t)slot; + + /* No error detection for single-value compression. */ + if (slot_ptr->s_cseg == C_SV_CSEG_ID) { + printf("%s(): cannot inject errors in SV-compressed pages\n", __func__ ); + return; + } + + /* s_cseg is actually "segno+1" */ + const uint32_t c_segno = slot_ptr->s_cseg - 1; + + assert(c_segno < c_segments_available); + assert(c_segments[c_segno].c_segno >= c_segments_available); + + const c_segment_t c_seg = c_segments[c_segno].c_seg; + + PAGE_REPLACEMENT_DISALLOWED(TRUE); + + lck_mtx_lock_spin_always(&c_seg->c_lock); + assert(c_seg->c_state != C_IS_EMPTY && c_seg->c_state != C_IS_FREE); + + const uint16_t c_indx = slot_ptr->s_cindx; + assert(c_indx < c_seg->c_nextslot); + + /* + * To safely make this segment temporarily writable, we need to mark + * the segment busy, which allows us to release the segment lock. + */ + while (c_seg->c_busy) { + c_seg_wait_on_busy(c_seg); + lck_mtx_lock_spin_always(&c_seg->c_lock); + } + C_SEG_BUSY(c_seg); + + bool already_writable = (c_seg->c_state == C_IS_FILLING); + if (!already_writable) { + /* + * Protection update must be performed preemptibly, so temporarily drop + * the lock. Having set c_busy will prevent most other concurrent + * operations. + */ + lck_mtx_unlock_always(&c_seg->c_lock); + C_SEG_MAKE_WRITEABLE(c_seg); + lck_mtx_lock_spin_always(&c_seg->c_lock); + } + + /* + * Once we've released the lock following our c_state == C_IS_FILLING check, + * c_current_seg_filled() can (re-)write-protect the segment. However, it + * will transition from C_IS_FILLING before releasing the c_seg lock, so we + * can detect this by re-checking after we've reobtained the lock. + */ + if (already_writable && c_seg->c_state != C_IS_FILLING) { + lck_mtx_unlock_always(&c_seg->c_lock); + C_SEG_MAKE_WRITEABLE(c_seg); + lck_mtx_lock_spin_always(&c_seg->c_lock); + already_writable = false; + /* Segment can't be freed while c_busy is set. */ + assert(c_seg->c_state != C_IS_FILLING); + } + + c_slot_t cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx); + int32_t *data = &c_seg->c_store.c_buffer[cs->c_offset]; + /* assume that the compressed data holds at least one int32_t */ + assert(UNPACK_C_SIZE(cs) > sizeof(*data)); + /* + * This bit is known to be in the payload of a MISS packet resulting from + * the pattern used in the test pattern from decompression_failure.c. + * Flipping it should result in many corrupted bits in the test page. + */ + data[0] ^= 0x00000100; + if (!already_writable) { + lck_mtx_unlock_always(&c_seg->c_lock); + C_SEG_WRITE_PROTECT(c_seg); + lck_mtx_lock_spin_always(&c_seg->c_lock); + } + + C_SEG_WAKEUP_DONE(c_seg); + lck_mtx_unlock_always(&c_seg->c_lock); + + PAGE_REPLACEMENT_DISALLOWED(FALSE); +} + +#endif /* DEVELOPMENT || DEBUG */ int vm_compressor_free(int *slot, int flags) @@ -4240,6 +4646,10 @@ vm_compressor_free(int *slot, int flags) if (slot_ptr->s_cseg == C_SV_CSEG_ID) { c_segment_sv_hash_drop_ref(slot_ptr->s_cindx); OSAddAtomic(-1, &c_segment_pages_compressed); +#if CONFIG_FREEZE + OSAddAtomic(-1, &c_segment_pages_compressed_incore); + assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count 0x%x", c_segment_pages_compressed_incore); +#endif /* CONFIG_FREEZE */ *slot = 0; return 0; @@ -4282,7 +4692,7 @@ vm_compressor_transfer( { c_slot_mapping_t dst_slot, src_slot; c_segment_t c_seg; - int c_indx; + uint16_t c_indx; c_slot_t cs; src_slot = (c_slot_mapping_t) src_slot_p; @@ -4309,6 +4719,7 @@ Retry: c_indx = src_slot->s_cindx; cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx); /* point the c_slot back to dst_slot instead of src_slot */ + C_SLOT_ASSERT_PACKABLE(dst_slot); cs->c_packed_ptr = C_SLOT_PACK_PTR(dst_slot); /* transfer */ *dst_slot_p = *src_slot_p; @@ -4367,7 +4778,7 @@ vm_compressor_relocate( uint16_t dst_slot; c_slot_t c_dst; c_slot_t c_src; - int c_indx; + uint16_t c_indx; c_segment_t c_seg_dst = NULL; c_segment_t c_seg_src = NULL; kern_return_t kr = KERN_SUCCESS; @@ -4415,17 +4826,21 @@ Relookup_src: lck_mtx_lock_spin_always(&c_seg_src->c_lock); - if (C_SEG_IS_ONDISK(c_seg_src)) { + if (C_SEG_IS_ON_DISK_OR_SOQ(c_seg_src) || + c_seg_src->c_state == C_IS_FILLING) { /* - * A "thaw" can mark a process as eligible for + * Skip this page if :- + * a) the src c_seg is already on-disk (or on its way there) + * A "thaw" can mark a process as eligible for * another freeze cycle without bringing any of * its swapped out c_segs back from disk (because * that is done on-demand). + * Or, this page may be mapped elsewhere in the task's map, + * and we may have marked it for swap already. * - * If the src c_seg we find for our pre-compressed - * data is already on-disk, then we are dealing - * with an app's data that is already packed and - * swapped out. Don't do anything. + * b) Or, the src c_seg is being filled by the compressor + * thread. We don't want the added latency of waiting for + * this c_seg in the freeze path and so we skip it. */ PAGE_REPLACEMENT_DISALLOWED(FALSE); @@ -4499,7 +4914,9 @@ Relookup_src: c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, c_seg_dst->c_nextslot); memcpy(&c_seg_dst->c_store.c_buffer[c_seg_dst->c_nextoffset], &c_seg_src->c_store.c_buffer[c_src->c_offset], c_size); -//is platform alignment actually necessary since wkdm aligns its output? + /* + * Is platform alignment actually necessary since wkdm aligns its output? + */ c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK; cslot_copy(c_dst, c_src); @@ -4530,7 +4947,7 @@ Relookup_src: c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot); PAGE_REPLACEMENT_ALLOWED(TRUE); - slot_ptr = (c_slot_mapping_t)C_SLOT_UNPACK_PTR(c_dst); + slot_ptr = C_SLOT_UNPACK_PTR(c_dst); /* would mean "empty slot", so use csegno+1 */ slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1; slot_ptr->s_cindx = dst_slot; diff --git a/osfmk/vm/vm_compressor.h b/osfmk/vm/vm_compressor.h index c3b722952..9b4cf69eb 100644 --- a/osfmk/vm/vm_compressor.h +++ b/osfmk/vm/vm_compressor.h @@ -26,7 +26,6 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#include #include #include #include @@ -47,25 +46,21 @@ #define C_SEG_BUFSIZE (1024 * 256) #define C_SEG_MAX_PAGES (C_SEG_BUFSIZE / PAGE_SIZE) -#if CONFIG_EMBEDDED +#if !defined(__x86_64__) #define C_SEG_OFF_LIMIT (C_SEG_BYTES_TO_OFFSET((C_SEG_BUFSIZE - 512))) #define C_SEG_ALLOCSIZE (C_SEG_BUFSIZE + PAGE_SIZE) #else #define C_SEG_OFF_LIMIT (C_SEG_BYTES_TO_OFFSET((C_SEG_BUFSIZE - 128))) #define C_SEG_ALLOCSIZE (C_SEG_BUFSIZE) -#endif +#endif /* !defined(__x86_64__) */ #define C_SEG_MAX_POPULATE_SIZE (4 * PAGE_SIZE) -#if defined(__arm64__) - -#if DEVELOPMENT || DEBUG +#if defined(__arm64__) && (DEVELOPMENT || DEBUG) #if defined(PLATFORM_WatchOS) #define VALIDATE_C_SEGMENTS (1) #endif -#endif - -#endif +#endif /* defined(__arm64__) && (DEVELOPMENT || DEBUG) */ #if DEBUG || COMPRESSOR_INTEGRITY_CHECKS @@ -77,7 +72,7 @@ #define ENABLE_COMPRESSOR_CHECKS 0 #endif -#define CHECKSUM_THE_SWAP ENABLE_SWAP_CHECKS /* Debug swap data */ +#define CHECKSUM_THE_SWAP ENABLE_SWAP_CHECKS /* Debug swap data */ #define CHECKSUM_THE_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor data */ #define CHECKSUM_THE_COMPRESSED_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor compressed data */ @@ -87,20 +82,89 @@ #define RECORD_THE_COMPRESSED_DATA 0 -struct c_slot { - uint64_t c_offset:C_SEG_OFFSET_BITS, -#if defined(__arm64__) - c_size:14, - c_codec:1, - c_packed_ptr:33; -#elif defined(__arm__) - c_size:12, - c_codec:1, - c_packed_ptr:35; +/* + * The c_slot structure embeds a packed pointer to a c_slot_mapping + * (32bits) which we ideally want to span as much VA space as possible + * to not limit zalloc in how it sets itself up. + */ +#if !defined(__LP64__) /* no packing */ +#define C_SLOT_PACKED_PTR_BITS 32 +#define C_SLOT_PACKED_PTR_SHIFT 0 +#define C_SLOT_PACKED_PTR_BASE 0 + +#define C_SLOT_C_SIZE_BITS 12 +#define C_SLOT_C_CODEC_BITS 1 +#define C_SLOT_C_POPCOUNT_BITS 0 +#define C_SLOT_C_PADDING_BITS 3 + +#elif __ARM_WKDM_POPCNT__ /* no packing */ +#define C_SLOT_PACKED_PTR_BITS 47 +#define C_SLOT_PACKED_PTR_SHIFT 0 +#define C_SLOT_PACKED_PTR_BASE ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START) + +#define C_SLOT_C_SIZE_BITS 14 +#define C_SLOT_C_CODEC_BITS 1 +#define C_SLOT_C_POPCOUNT_BITS 18 +#define C_SLOT_C_PADDING_BITS 0 + +#elif defined(__arm64__) /* 32G from the heap start */ +#define C_SLOT_PACKED_PTR_BITS 33 +#define C_SLOT_PACKED_PTR_SHIFT 2 +#define C_SLOT_PACKED_PTR_BASE ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START) + +#define C_SLOT_C_SIZE_BITS 14 +#define C_SLOT_C_CODEC_BITS 1 +#define C_SLOT_C_POPCOUNT_BITS 0 +#define C_SLOT_C_PADDING_BITS 0 + +#elif defined(__x86_64__) /* 256G from the heap start */ +#define C_SLOT_PACKED_PTR_BITS 36 +#define C_SLOT_PACKED_PTR_SHIFT 2 +#define C_SLOT_PACKED_PTR_BASE ((uintptr_t)KERNEL_PMAP_HEAP_RANGE_START) + +#define C_SLOT_C_SIZE_BITS 12 +#define C_SLOT_C_CODEC_BITS 0 /* not used */ +#define C_SLOT_C_POPCOUNT_BITS 0 +#define C_SLOT_C_PADDING_BITS 0 + #else - c_size:12, - c_packed_ptr:36; +#error vm_compressor parameters undefined for this architecture +#endif + +/* + * Popcounts needs to represent both 0 and full which requires + * (8 ^ C_SLOT_C_SIZE_BITS) + 1 values and (C_SLOT_C_SIZE_BITS + 4) bits. + * + * We us the (2 * (8 ^ C_SLOT_C_SIZE_BITS) - 1) value to mean "unknown". + */ +#define C_SLOT_NO_POPCOUNT ((16u << C_SLOT_C_SIZE_BITS) - 1) + +static_assert((C_SEG_OFFSET_BITS + C_SLOT_C_SIZE_BITS + + C_SLOT_C_CODEC_BITS + C_SLOT_C_POPCOUNT_BITS + + C_SLOT_C_PADDING_BITS + C_SLOT_PACKED_PTR_BITS) % 32 == 0); + +struct c_slot { + uint64_t c_offset:C_SEG_OFFSET_BITS; + uint64_t c_size:C_SLOT_C_SIZE_BITS; +#if C_SLOT_C_CODEC_BITS + uint64_t c_codec:C_SLOT_C_CODEC_BITS; +#endif +#if C_SLOT_C_POPCOUNT_BITS + /* + * This value may not agree with c_pop_cdata, as it may be the + * population count of the uncompressed data. + * + * This value must be C_SLOT_NO_POPCOUNT when the compression algorithm + * cannot provide it. + */ + uint32_t c_inline_popcount:C_SLOT_C_POPCOUNT_BITS; #endif +#if C_SLOT_C_PADDING_BITS + uint64_t c_padding:C_SLOT_C_PADDING_BITS; +#endif + uint64_t c_packed_ptr:C_SLOT_PACKED_PTR_BITS; + + /* debugging fields, typically not present on release kernels */ #if CHECKSUM_THE_DATA unsigned int c_hash_data; #endif @@ -110,7 +174,7 @@ struct c_slot { #if POPCOUNT_THE_COMPRESSED_DATA unsigned int c_pop_cdata; #endif -}; +} __attribute__((packed, aligned(4))); #define C_IS_EMPTY 0 #define C_IS_FREE 1 @@ -130,6 +194,11 @@ struct c_segment { queue_chain_t c_age_list; queue_chain_t c_list; +#if CONFIG_FREEZE + queue_chain_t c_task_list_next_cseg; + task_t c_task_owner; +#endif /* CONFIG_FREEZE */ + #define C_SEG_MAX_LIMIT (1 << 20) /* this needs to track the size of c_mysegno */ uint32_t c_mysegno:20, c_busy:1, @@ -153,8 +222,6 @@ struct c_segment { uint32_t c_nextoffset; uint32_t c_populated_offset; - uint32_t c_swappedin_ts; - union { int32_t *c_buffer; uint64_t c_swap_handle; @@ -171,6 +238,7 @@ struct c_segment { #endif /* CHECKSUM_THE_SWAP */ thread_t c_busy_for_thread; + uint32_t c_swappedin_ts; int c_slot_var_array_len; struct c_slot *c_slot_var_array; @@ -299,6 +367,7 @@ void c_seg_free(c_segment_t); void c_seg_free_locked(c_segment_t); void c_seg_insert_into_age_q(c_segment_t); void c_seg_need_delayed_compaction(c_segment_t, boolean_t); +void c_seg_update_task_owner(c_segment_t, task_t); void vm_decompressor_lock(void); void vm_decompressor_unlock(void); @@ -312,7 +381,6 @@ int vm_wants_task_throttled(task_t); extern void vm_compaction_swapper_do_init(void); extern void vm_compressor_swap_init(void); -extern void vm_compressor_init_locks(void); extern lck_rw_t c_master_lock; #if ENCRYPTED_SWAP @@ -320,6 +388,7 @@ extern void vm_swap_decrypt(c_segment_t); #endif /* ENCRYPTED_SWAP */ extern int vm_swap_low_on_space(void); +extern int vm_swap_out_of_space(void); extern kern_return_t vm_swap_get(c_segment_t, uint64_t, uint64_t); extern void vm_swap_free(uint64_t); extern void vm_swap_consider_defragmenting(int); @@ -421,8 +490,8 @@ extern void kdp_compressor_busy_find_owner(event64_t wait_event, thread_waitinfo #define COMPRESSOR_FREE_RESERVED_LIMIT 128 #endif -uint32_t vm_compressor_get_encode_scratch_size(void); -uint32_t vm_compressor_get_decode_scratch_size(void); +uint32_t vm_compressor_get_encode_scratch_size(void) __pure2; +uint32_t vm_compressor_get_decode_scratch_size(void) __pure2; #define COMPRESSOR_SCRATCH_BUF_SIZE vm_compressor_get_encode_scratch_size() @@ -431,7 +500,8 @@ extern void c_compressed_record_init(void); extern void c_compressed_record_write(char *, int); #endif -extern lck_mtx_t *c_list_lock; +extern lck_mtx_t c_list_lock_storage; +#define c_list_lock (&c_list_lock_storage) #if DEVELOPMENT || DEBUG extern uint32_t vm_ktrace_enabled; diff --git a/osfmk/vm/vm_compressor_algorithms.c b/osfmk/vm/vm_compressor_algorithms.c index 4344f3ebe..fba9cb42b 100644 --- a/osfmk/vm/vm_compressor_algorithms.c +++ b/osfmk/vm/vm_compressor_algorithms.c @@ -237,8 +237,9 @@ WKdm_hv(uint32_t *wkbuf) #if defined(__arm64__) #endif -static inline void -WKdmD(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, unsigned int bytes) +static inline bool +WKdmD(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, unsigned int bytes, + __unused uint32_t *pop_count) { #if defined(__arm64__) #endif @@ -258,13 +259,15 @@ WKdmD(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, unsigned int bytes) #else /* !defined arm64 */ WKdm_decompress_new(src_buf, dest_buf, scratch, bytes); #endif + return true; } #if DEVELOPMENT || DEBUG int precompy, wkswhw; #endif static inline int -WKdmC(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, boolean_t *incomp_copy, unsigned int limit) +WKdmC(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, + boolean_t *incomp_copy, unsigned int limit, __unused uint32_t *pop_count) { (void)incomp_copy; int wkcval; @@ -290,12 +293,15 @@ WKdmC(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, boolean_t *incomp_c int -metacompressor(const uint8_t *in, uint8_t *cdst, int32_t outbufsz, uint16_t *codec, void *cscratchin, boolean_t *incomp_copy) +metacompressor(const uint8_t *in, uint8_t *cdst, int32_t outbufsz, uint16_t *codec, + void *cscratchin, boolean_t *incomp_copy, uint32_t *pop_count_p) { int sz = -1; int dowk = FALSE, dolz4 = FALSE, skiplz4 = FALSE; int insize = PAGE_SIZE; compressor_encode_scratch_t *cscratch = cscratchin; + /* Not all paths lead to an inline population count. */ + uint32_t pop_count = C_SLOT_NO_POPCOUNT; if (vm_compressor_current_codec == CMODE_WK) { dowk = TRUE; @@ -318,7 +324,7 @@ metacompressor(const uint8_t *in, uint8_t *cdst, int32_t outbufsz, uint16_t *cod if (dowk) { *codec = CCWK; VM_COMPRESSOR_STAT(compressor_stats.wk_compressions++); - sz = WKdmC(in, cdst, &cscratch->wkscratch[0], incomp_copy, outbufsz); + sz = WKdmC(in, cdst, &cscratch->wkscratch[0], incomp_copy, outbufsz, &pop_count); if (sz == -1) { VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total += PAGE_SIZE); @@ -367,15 +373,21 @@ lz4compress: } } cexit: + assert(pop_count_p != NULL); + *pop_count_p = pop_count; return sz; } -void -metadecompressor(const uint8_t *source, uint8_t *dest, uint32_t csize, uint16_t ccodec, void *compressor_dscratchin) +bool +metadecompressor(const uint8_t *source, uint8_t *dest, uint32_t csize, + uint16_t ccodec, void *compressor_dscratchin, uint32_t *pop_count_p) { int dolz4 = (ccodec == CCLZ4); int rval; compressor_decode_scratch_t *compressor_dscratch = compressor_dscratchin; + /* Not all paths lead to an inline population count. */ + uint32_t pop_count = C_SLOT_NO_POPCOUNT; + bool success; if (dolz4) { rval = (int)lz4raw_decode_buffer(dest, PAGE_SIZE, source, csize, &compressor_dscratch->lz4decodestate[0]); @@ -386,14 +398,19 @@ metadecompressor(const uint8_t *source, uint8_t *dest, uint32_t csize, uint16_t #endif assertf(rval == PAGE_SIZE, "LZ4 decode: size != pgsize %d, header: 0x%x, 0x%x, 0x%x", rval, *d32, *(d32 + 1), *(d32 + 2)); + success = (rval == PAGE_SIZE); } else { assert(ccodec == CCWK); - WKdmD(source, dest, &compressor_dscratch->wkdecompscratch[0], csize); + success = WKdmD(source, dest, &compressor_dscratch->wkdecompscratch[0], csize, &pop_count); VM_DECOMPRESSOR_STAT(compressor_stats.wk_decompressions += 1); VM_DECOMPRESSOR_STAT(compressor_stats.wk_decompressed_bytes += csize); } + + assert(pop_count_p != NULL); + *pop_count_p = pop_count; + return success; } #pragma clang diagnostic pop diff --git a/osfmk/vm/vm_compressor_algorithms.h b/osfmk/vm/vm_compressor_algorithms.h index 163d393be..56aab0256 100644 --- a/osfmk/vm/vm_compressor_algorithms.h +++ b/osfmk/vm/vm_compressor_algorithms.h @@ -90,8 +90,10 @@ typedef struct { extern compressor_tuneables_t vmctune; -int metacompressor(const uint8_t *in, uint8_t *cdst, int32_t outbufsz, uint16_t *codec, void *cscratch, boolean_t *); -void metadecompressor(const uint8_t *source, uint8_t *dest, uint32_t csize, uint16_t ccodec, void *compressor_dscratch); +int metacompressor(const uint8_t *in, uint8_t *cdst, int32_t outbufsz, + uint16_t *codec, void *cscratch, boolean_t *, uint32_t *pop_count_p); +bool metadecompressor(const uint8_t *source, uint8_t *dest, uint32_t csize, + uint16_t ccodec, void *compressor_dscratch, uint32_t *pop_count_p); typedef enum { CCWK = 0, // must be 0 or 1 diff --git a/osfmk/vm/vm_compressor_backing_store.c b/osfmk/vm/vm_compressor_backing_store.c index 4874789d5..0c98c24a4 100644 --- a/osfmk/vm/vm_compressor_backing_store.c +++ b/osfmk/vm/vm_compressor_backing_store.c @@ -34,7 +34,18 @@ #include +LCK_GRP_DECLARE(vm_swap_data_lock_grp, "vm_swap_data"); +LCK_MTX_EARLY_DECLARE(vm_swap_data_lock, &vm_swap_data_lock_grp); + +#if defined(XNU_TARGET_OS_OSX) +/* + * launchd explicitly turns ON swap later during boot on macOS devices. + */ +boolean_t compressor_store_stop_compaction = TRUE; +#else boolean_t compressor_store_stop_compaction = FALSE; +#endif + boolean_t vm_swapfile_create_needed = FALSE; boolean_t vm_swapfile_gc_needed = FALSE; @@ -49,6 +60,7 @@ int vm_num_swap_files = 0; int vm_num_pinned_swap_files = 0; int vm_swapout_thread_processed_segments = 0; int vm_swapout_thread_awakened = 0; +bool vm_swapout_thread_running = FALSE; int vm_swapfile_create_thread_awakened = 0; int vm_swapfile_create_thread_running = 0; int vm_swapfile_gc_thread_awakened = 0; @@ -111,16 +123,19 @@ static void vm_swap_handle_delayed_trims(boolean_t); static void vm_swap_do_delayed_trim(struct swapfile *); static void vm_swap_wait_on_trim_handling_in_progress(void); +extern int vnode_getwithref(struct vnode* vp); boolean_t vm_swap_force_defrag = FALSE, vm_swap_force_reclaim = FALSE; #if CONFIG_EMBEDDED -#if DEVELOPMENT || DEBUG -#define VM_MAX_SWAP_FILE_NUM 100 -#else /* DEVELOPMENT || DEBUG */ +/* + * For CONFIG_FREEZE, we scale the c_segments_limit based on the + * number of swapfiles allowed. That increases wired memory overhead. + * So we want to keep the max swapfiles same on both DEV/RELEASE so + * that the memory overhead is similar for performance comparisons. + */ #define VM_MAX_SWAP_FILE_NUM 5 -#endif /* DEVELOPMENT || DEBUG */ #define VM_SWAPFILE_DELAYED_TRIM_MAX 4 @@ -390,19 +405,8 @@ vm_compressor_swap_init() { thread_t thread = NULL; - lck_grp_attr_setdefault(&vm_swap_data_lock_grp_attr); - lck_grp_init(&vm_swap_data_lock_grp, - "vm_swap_data", - &vm_swap_data_lock_grp_attr); - lck_attr_setdefault(&vm_swap_data_lock_attr); - lck_mtx_init_ext(&vm_swap_data_lock, - &vm_swap_data_lock_ext, - &vm_swap_data_lock_grp, - &vm_swap_data_lock_attr); - queue_init(&swf_global_queue); - if (kernel_thread_start_priority((thread_continue_t)vm_swapout_thread, NULL, BASEPRI_VM, &thread) != KERN_SUCCESS) { panic("vm_swapout_thread: create failed"); @@ -501,8 +505,7 @@ vm_compaction_swapper_do_init(void) if (!compaction_swapper_inited) { namelen = (int)strlen(swapfilename) + SWAPFILENAME_INDEX_LEN + 1; - pathname = (char*)kalloc(namelen); - memset(pathname, 0, namelen); + pathname = kheap_alloc(KHEAP_TEMP, namelen, Z_WAITOK | Z_ZERO); snprintf(pathname, namelen, "%s%d", swapfilename, 0); vm_swapfile_open(pathname, &vp); @@ -541,7 +544,7 @@ vm_compaction_swapper_do_init(void) #endif vm_swapfile_close((uint64_t)pathname, vp); } - kfree(pathname, namelen); + kheap_free(KHEAP_TEMP, pathname, namelen); compaction_swapper_inited = 1; } @@ -583,6 +586,16 @@ int vm_swap_defragment_swapin = 0; int vm_swap_defragment_free = 0; int vm_swap_defragment_busy = 0; +#if CONFIG_FREEZE +extern uint32_t c_segment_pages_compressed_incore; +extern uint32_t c_segment_pages_compressed_nearing_limit; +extern uint32_t c_segment_count; +extern uint32_t c_segments_nearing_limit; + +boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t); + +extern bool freezer_incore_cseg_acct; +#endif /* CONFIG_FREEZE */ static void vm_swap_defragment() @@ -636,6 +649,18 @@ vm_swap_defragment() } else { lck_mtx_unlock_always(c_list_lock); +#if CONFIG_FREEZE + if (freezer_incore_cseg_acct) { + if ((c_seg->c_slots_used + c_segment_pages_compressed_incore) >= c_segment_pages_compressed_nearing_limit) { + memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */); + } + + uint32_t incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count; + if ((incore_seg_count + 1) >= c_segments_nearing_limit) { + memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */); + } + } +#endif /* CONFIG_FREEZE */ if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) { lck_mtx_unlock_always(&c_seg->c_lock); } @@ -1031,7 +1056,7 @@ vm_swapout_complete_soc(struct swapout_io_completion *soc) lck_mtx_unlock_always(c_list_lock); - vm_swap_put_finish(soc->swp_swf, &soc->swp_f_offset, soc->swp_io_error); + vm_swap_put_finish(soc->swp_swf, &soc->swp_f_offset, soc->swp_io_error, TRUE /*drop iocount*/); vm_swapout_finish(soc->swp_c_seg, soc->swp_f_offset, soc->swp_c_size, kr); lck_mtx_lock_spin_always(c_list_lock); @@ -1057,8 +1082,10 @@ vm_swapout_thread(void) vm_swapout_thread_awakened++; lck_mtx_lock_spin_always(c_list_lock); + + vm_swapout_thread_running = TRUE; again: - while (!queue_empty(&c_swapout_list_head) && vm_swapout_soc_busy < vm_swapout_limit) { + while (!queue_empty(&c_swapout_list_head) && vm_swapout_soc_busy < vm_swapout_limit && !compressor_store_stop_compaction) { c_seg = (c_segment_t)queue_first(&c_swapout_list_head); lck_mtx_lock_spin_always(&c_seg->c_lock); @@ -1140,22 +1167,37 @@ c_seg_is_empty: lck_mtx_lock_spin_always(c_list_lock); - if ((soc = vm_swapout_find_done_soc())) { + while ((soc = vm_swapout_find_done_soc())) { vm_swapout_complete_soc(soc); } lck_mtx_unlock_always(c_list_lock); vm_swapout_thread_throttle_adjust(); - vm_pageout_io_throttle(); lck_mtx_lock_spin_always(c_list_lock); } - if ((soc = vm_swapout_find_done_soc())) { + while ((soc = vm_swapout_find_done_soc())) { vm_swapout_complete_soc(soc); + } + lck_mtx_unlock_always(c_list_lock); + + vm_pageout_io_throttle(); + + lck_mtx_lock_spin_always(c_list_lock); + + /* + * Recheck if we have some c_segs to wakeup + * post throttle. And, check to see if we + * have any more swapouts needed. + */ + if (vm_swapout_soc_done) { goto again; } + assert_wait((event_t)&c_swapout_list_head, THREAD_UNINT); + vm_swapout_thread_running = FALSE; + lck_mtx_unlock_always(c_list_lock); thread_block((thread_continue_t)vm_swapout_thread); @@ -1177,7 +1219,9 @@ vm_swapout_iodone(void *io_context, int error) soc->swp_io_error = error; vm_swapout_soc_done++; - thread_wakeup((event_t)&c_swapout_list_head); + if (!vm_swapout_thread_running) { + thread_wakeup((event_t)&c_swapout_list_head); + } lck_mtx_unlock_always(c_list_lock); } @@ -1189,7 +1233,8 @@ vm_swapout_finish(c_segment_t c_seg, uint64_t f_offset, uint32_t size, kern_retu PAGE_REPLACEMENT_DISALLOWED(TRUE); if (kr == KERN_SUCCESS) { - kernel_memory_depopulate(compressor_map, (vm_offset_t)c_seg->c_store.c_buffer, size, KMA_COMPRESSOR); + kernel_memory_depopulate(compressor_map, (vm_offset_t)c_seg->c_store.c_buffer, size, + KMA_COMPRESSOR, VM_KERN_MEMORY_COMPRESSOR); } #if ENCRYPTED_SWAP else { @@ -1221,11 +1266,26 @@ vm_swapout_finish(c_segment_t c_seg, uint64_t f_offset, uint32_t size, kern_retu if (c_seg->c_bytes_used) { OSAddAtomic64(-c_seg->c_bytes_used, &compressor_bytes_used); } + +#if CONFIG_FREEZE + /* + * Successful swapout. Decrement the in-core compressed pages count. + */ + OSAddAtomic(-(c_seg->c_slots_used), &c_segment_pages_compressed_incore); + assertf(c_segment_pages_compressed_incore >= 0, "-ve incore count %p 0x%x", c_seg, c_segment_pages_compressed_incore); +#endif /* CONFIG_FREEZE */ } else { if (c_seg->c_overage_swap == TRUE) { c_seg->c_overage_swap = FALSE; c_overage_swapped_count--; } + +#if CONFIG_FREEZE + if (c_seg->c_task_owner) { + c_seg_update_task_owner(c_seg, NULL); + } +#endif /* CONFIG_FREEZE */ + c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE); if (!c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) { @@ -1285,14 +1345,11 @@ vm_swap_create_file() if (swap_file_reuse == FALSE) { namelen = (int)strlen(swapfilename) + SWAPFILENAME_INDEX_LEN + 1; - swf = (struct swapfile*) kalloc(sizeof *swf); - memset(swf, 0, sizeof(*swf)); - + swf = kalloc_flags(sizeof *swf, Z_WAITOK | Z_ZERO); swf->swp_index = vm_num_swap_files + 1; swf->swp_pathlen = namelen; - swf->swp_path = (char*)kalloc(swf->swp_pathlen); - - memset(swf->swp_path, 0, namelen); + swf->swp_path = kheap_alloc(KHEAP_DATA_BUFFERS, swf->swp_pathlen, + Z_WAITOK | Z_ZERO); snprintf(swf->swp_path, namelen, "%s%d", swapfilename, vm_num_swap_files); } @@ -1301,7 +1358,7 @@ vm_swap_create_file() if (swf->swp_vp == NULL) { if (swap_file_reuse == FALSE) { - kfree(swf->swp_path, swf->swp_pathlen); + kheap_free(KHEAP_DATA_BUFFERS, swf->swp_path, swf->swp_pathlen); kfree(swf, sizeof *swf); } return FALSE; @@ -1328,11 +1385,11 @@ vm_swap_create_file() * Allocate a bitmap that describes the * number of segments held by this swapfile. */ - swf->swp_bitmap = (uint8_t*)kalloc(num_bytes_for_bitmap); - memset(swf->swp_bitmap, 0, num_bytes_for_bitmap); + swf->swp_bitmap = kheap_alloc(KHEAP_DATA_BUFFERS, + num_bytes_for_bitmap, Z_WAITOK | Z_ZERO); - swf->swp_csegs = (c_segment_t *) kalloc(swf->swp_nsegs * sizeof(c_segment_t)); - memset(swf->swp_csegs, 0, (swf->swp_nsegs * sizeof(c_segment_t))); + swf->swp_csegs = kalloc_flags(swf->swp_nsegs * sizeof(c_segment_t), + Z_WAITOK | Z_ZERO); /* * passing a NULL trim_list into vnode_trim_list @@ -1384,14 +1441,14 @@ vm_swap_create_file() swf->swp_vp = NULL; if (swap_file_reuse == FALSE) { - kfree(swf->swp_path, swf->swp_pathlen); + kheap_free(KHEAP_DATA_BUFFERS, swf->swp_path, swf->swp_pathlen); kfree(swf, sizeof *swf); } } return swap_file_created; } - +extern void vnode_put(struct vnode* vp); kern_return_t vm_swap_get(c_segment_t c_seg, uint64_t f_offset, uint64_t size) { @@ -1418,7 +1475,13 @@ vm_swap_get(c_segment_t c_seg, uint64_t f_offset, uint64_t size) C_SEG_MAKE_WRITEABLE(c_seg); #endif file_offset = (f_offset & SWAP_SLOT_MASK); - retval = vm_swapfile_io(swf->swp_vp, file_offset, (uint64_t)c_seg->c_store.c_buffer, (int)(size / PAGE_SIZE_64), SWAP_READ, NULL); + + if ((retval = vnode_getwithref(swf->swp_vp)) != 0) { + printf("vm_swap_get: vnode_getwithref on swapfile failed with %d\n", retval); + } else { + retval = vm_swapfile_io(swf->swp_vp, file_offset, (uint64_t)c_seg->c_store.c_buffer, (int)(size / PAGE_SIZE_64), SWAP_READ, NULL); + vnode_put(swf->swp_vp); + } #if DEVELOPMENT || DEBUG C_SEG_WRITE_PROTECT(c_seg); @@ -1467,8 +1530,9 @@ vm_swap_put(vm_offset_t addr, uint64_t *f_offset, uint32_t size, c_segment_t c_s clock_sec_t sec; clock_nsec_t nsec; void *upl_ctx = NULL; + boolean_t drop_iocount = FALSE; - if (addr == 0 || f_offset == NULL) { + if (addr == 0 || f_offset == NULL || compressor_store_stop_compaction) { return KERN_FAILURE; } retry: @@ -1576,18 +1640,28 @@ issue_io: upl_ctx = (void *)&soc->swp_upl_ctx; } - error = vm_swapfile_io(swf->swp_vp, file_offset, addr, (int) (size / PAGE_SIZE_64), SWAP_WRITE, upl_ctx); + + if ((error = vnode_getwithref(swf->swp_vp)) != 0) { + printf("vm_swap_put: vnode_getwithref on swapfile failed with %d\n", error); + } else { + error = vm_swapfile_io(swf->swp_vp, file_offset, addr, (int) (size / PAGE_SIZE_64), SWAP_WRITE, upl_ctx); + drop_iocount = TRUE; + } if (error || upl_ctx == NULL) { - return vm_swap_put_finish(swf, f_offset, error); + return vm_swap_put_finish(swf, f_offset, error, drop_iocount); } return KERN_SUCCESS; } kern_return_t -vm_swap_put_finish(struct swapfile *swf, uint64_t *f_offset, int error) +vm_swap_put_finish(struct swapfile *swf, uint64_t *f_offset, int error, boolean_t drop_iocount) { + if (drop_iocount) { + vnode_put(swf->swp_vp); + } + lck_mtx_lock(&vm_swap_data_lock); swf->swp_io_count--; @@ -1729,6 +1803,11 @@ vm_swap_handle_delayed_trims(boolean_t force_now) * that file since vm_swap_reclaim will first process * all of the delayed trims associated with it */ + + if (compressor_store_stop_compaction == TRUE) { + return; + } + lck_mtx_lock(&vm_swap_data_lock); delayed_trim_handling_in_progress = TRUE; @@ -1767,6 +1846,16 @@ static void vm_swap_do_delayed_trim(struct swapfile *swf) { struct trim_list *tl, *tl_head; + int error; + + if (compressor_store_stop_compaction == TRUE) { + return; + } + + if ((error = vnode_getwithref(swf->swp_vp)) != 0) { + printf("vm_swap_do_delayed_trim: vnode_getwithref on swapfile failed with %d\n", error); + return; + } lck_mtx_lock(&vm_swap_data_lock); @@ -1778,6 +1867,8 @@ vm_swap_do_delayed_trim(struct swapfile *swf) vnode_trim_list(swf->swp_vp, tl_head, TRUE); + (void) vnode_put(swf->swp_vp); + while ((tl = tl_head) != NULL) { unsigned int segidx = 0; unsigned int byte_for_segidx = 0; @@ -1966,19 +2057,27 @@ ReTry_for_cseg: lck_mtx_unlock_always(&c_seg->c_lock); - if (vm_swapfile_io(swf->swp_vp, f_offset, addr, (int)(c_size / PAGE_SIZE_64), SWAP_READ, NULL)) { - /* - * reading the data back in failed, so convert c_seg - * to a swapped in c_segment that contains no data - */ - c_seg_swapin_requeue(c_seg, FALSE, TRUE, FALSE); - /* - * returns with c_busy_swapping cleared - */ - + if (vnode_getwithref(swf->swp_vp)) { + printf("vm_swap_reclaim: vnode_getwithref on swapfile failed.\n"); vm_swap_get_failures++; goto swap_io_failed; + } else { + if (vm_swapfile_io(swf->swp_vp, f_offset, addr, (int)(c_size / PAGE_SIZE_64), SWAP_READ, NULL)) { + /* + * reading the data back in failed, so convert c_seg + * to a swapped in c_segment that contains no data + */ + c_seg_swapin_requeue(c_seg, FALSE, TRUE, FALSE); + /* + * returns with c_busy_swapping cleared + */ + vnode_put(swf->swp_vp); + vm_swap_get_failures++; + goto swap_io_failed; + } + vnode_put(swf->swp_vp); } + VM_STAT_INCR_BY(swapins, c_size >> PAGE_SHIFT); if (vm_swap_put(addr, &f_offset, c_size, c_seg, NULL)) { @@ -2047,7 +2146,8 @@ swap_io_failed: vm_swapfile_close((uint64_t)(swf->swp_path), swf->swp_vp); kfree(swf->swp_csegs, swf->swp_nsegs * sizeof(c_segment_t)); - kfree(swf->swp_bitmap, MAX((swf->swp_nsegs >> 3), 1)); + kheap_free(KHEAP_DATA_BUFFERS, swf->swp_bitmap, + MAX((swf->swp_nsegs >> 3), 1)); lck_mtx_lock(&vm_swap_data_lock); @@ -2098,6 +2198,12 @@ vm_swap_get_free_space(void) return vm_swap_get_total_space() - vm_swap_get_used_space(); } +uint64_t +vm_swap_get_max_configured_space(void) +{ + int num_swap_files = (vm_num_swap_files_config ? vm_num_swap_files_config : VM_MAX_SWAP_FILE_NUM); + return num_swap_files * MAX_SWAP_FILE_SIZE; +} int vm_swap_low_on_space(void) @@ -2118,6 +2224,21 @@ vm_swap_low_on_space(void) return 0; } +int +vm_swap_out_of_space(void) +{ + if ((vm_num_swap_files == vm_num_swap_files_config) && + ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) < VM_SWAPOUT_LIMIT_MAX)) { + /* + * Last swapfile and we have only space for the + * last few swapouts. + */ + return 1; + } + + return 0; +} + boolean_t vm_swap_files_pinned(void) { diff --git a/osfmk/vm/vm_compressor_backing_store.h b/osfmk/vm/vm_compressor_backing_store.h index abc0c3be2..c8a03a235 100644 --- a/osfmk/vm/vm_compressor_backing_store.h +++ b/osfmk/vm/vm_compressor_backing_store.h @@ -28,7 +28,6 @@ #include #include -#include #include #include #include @@ -40,15 +39,15 @@ #if CONFIG_EMBEDDED -#define MIN_SWAP_FILE_SIZE (64 * 1024 * 1024) +#define MIN_SWAP_FILE_SIZE (64 * 1024 * 1024ULL) -#define MAX_SWAP_FILE_SIZE (128 * 1024 * 1024) +#define MAX_SWAP_FILE_SIZE (128 * 1024 * 1024ULL) #else /* CONFIG_EMBEDDED */ -#define MIN_SWAP_FILE_SIZE (256 * 1024 * 1024) +#define MIN_SWAP_FILE_SIZE (256 * 1024 * 1024ULL) -#define MAX_SWAP_FILE_SIZE (1 * 1024 * 1024 * 1024) +#define MAX_SWAP_FILE_SIZE (1 * 1024 * 1024 * 1024ULL) #endif /* CONFIG_EMBEDDED */ @@ -59,8 +58,12 @@ #define SWAPFILE_RECLAIM_THRESHOLD_SEGS ((17 * (MAX_SWAP_FILE_SIZE / COMPRESSED_SWAP_CHUNK_SIZE)) / 10) #define SWAPFILE_RECLAIM_MINIMUM_SEGS ((13 * (MAX_SWAP_FILE_SIZE / COMPRESSED_SWAP_CHUNK_SIZE)) / 10) - +#if defined(XNU_TARGET_OS_OSX) +#define SWAP_FILE_NAME "/System/Volumes/VM/swapfile" +#else #define SWAP_FILE_NAME "/private/var/vm/swapfile" +#endif + #define SWAPFILENAME_LEN (int)(strlen(SWAP_FILE_NAME)) @@ -70,11 +73,6 @@ extern int vm_num_swap_files; struct swapfile; -lck_grp_attr_t vm_swap_data_lock_grp_attr; -lck_grp_t vm_swap_data_lock_grp; -lck_attr_t vm_swap_data_lock_attr; -lck_mtx_ext_t vm_swap_data_lock_ext; -lck_mtx_t vm_swap_data_lock; void vm_swap_init(void); boolean_t vm_swap_create_file(void); @@ -97,7 +95,7 @@ void vm_swapout_iodone(void *, int); static void vm_swapout_finish(c_segment_t, uint64_t, uint32_t, kern_return_t); -kern_return_t vm_swap_put_finish(struct swapfile *, uint64_t *, int); +kern_return_t vm_swap_put_finish(struct swapfile *, uint64_t *, int, boolean_t); kern_return_t vm_swap_put(vm_offset_t, uint64_t*, uint32_t, c_segment_t, struct swapout_io_completion *); void vm_swap_flush(void); @@ -106,6 +104,7 @@ void vm_swap_encrypt(c_segment_t); uint64_t vm_swap_get_total_space(void); uint64_t vm_swap_get_used_space(void); uint64_t vm_swap_get_free_space(void); +uint64_t vm_swap_get_max_configured_space(void); struct vnode; extern void vm_swapfile_open(const char *path, struct vnode **vp); diff --git a/osfmk/vm/vm_compressor_pager.c b/osfmk/vm/vm_compressor_pager.c index a0a93f882..a989ed0c2 100644 --- a/osfmk/vm/vm_compressor_pager.c +++ b/osfmk/vm/vm_compressor_pager.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -178,16 +178,14 @@ typedef struct compressor_pager { zone_t compressor_pager_zone; -lck_grp_t compressor_pager_lck_grp; -lck_grp_attr_t compressor_pager_lck_grp_attr; -lck_attr_t compressor_pager_lck_attr; +LCK_GRP_DECLARE(compressor_pager_lck_grp, "compressor_pager"); #define compressor_pager_lock(_cpgr_) \ lck_mtx_lock(&(_cpgr_)->cpgr_lock) #define compressor_pager_unlock(_cpgr_) \ lck_mtx_unlock(&(_cpgr_)->cpgr_lock) #define compressor_pager_lock_init(_cpgr_) \ - lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr) + lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, LCK_ATTR_NULL) #define compressor_pager_lock_destroy(_cpgr_) \ lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp) @@ -205,6 +203,35 @@ void compressor_pager_slot_lookup( memory_object_offset_t offset, compressor_slot_t **slot_pp); +#if defined(__LP64__) + +/* restricted VA zones for slots */ + +#define NUM_SLOTS_ZONES 3 + +static const size_t compressor_slots_zones_sizes[NUM_SLOTS_ZONES] = { + 16, + 64, + COMPRESSOR_SLOTS_CHUNK_SIZE +}; + +static const char * compressor_slots_zones_names[NUM_SLOTS_ZONES] = { + "compressor_slots.16", + "compressor_slots.64", + "compressor_slots.512" +}; + +static zone_t + compressor_slots_zones[NUM_SLOTS_ZONES]; + +#endif /* defined(__LP64__) */ + +static void +zfree_slot_array(compressor_slot_t *slots, size_t size); +static compressor_slot_t * +zalloc_slot_array(size_t size, zalloc_flags_t); + + kern_return_t compressor_memory_object_init( memory_object_t mem_obj, @@ -374,10 +401,10 @@ compressor_memory_object_deallocate( 0, NULL); pager->cpgr_slots.cpgr_islots[i] = NULL; - kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); + zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); } } - kfree(pager->cpgr_slots.cpgr_islots, + kheap_free(KHEAP_DEFAULT, pager->cpgr_slots.cpgr_islots, num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0])); pager->cpgr_slots.cpgr_islots = NULL; } else if (pager->cpgr_num_slots > 2) { @@ -389,7 +416,7 @@ compressor_memory_object_deallocate( 0, NULL); pager->cpgr_slots.cpgr_dslots = NULL; - kfree(chunk, + zfree_slot_array(chunk, (pager->cpgr_num_slots * sizeof(pager->cpgr_slots.cpgr_dslots[0]))); } else { @@ -557,11 +584,12 @@ compressor_memory_object_create( num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK; if (num_chunks > 1) { - pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0])); - bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0])); + pager->cpgr_slots.cpgr_islots = kheap_alloc(KHEAP_DEFAULT, + num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0]), + Z_WAITOK | Z_ZERO); } else if (pager->cpgr_num_slots > 2) { - pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof(pager->cpgr_slots.cpgr_dslots[0])); - bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof(pager->cpgr_slots.cpgr_dslots[0])); + pager->cpgr_slots.cpgr_dslots = zalloc_slot_array(pager->cpgr_num_slots * + sizeof(pager->cpgr_slots.cpgr_dslots[0]), Z_WAITOK | Z_ZERO); } else { pager->cpgr_slots.cpgr_eslots[0] = 0; pager->cpgr_slots.cpgr_eslots[1] = 0; @@ -649,8 +677,8 @@ compressor_pager_slot_lookup( chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]; if (chunk == NULL && do_alloc) { - t_chunk = kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE); - bzero(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); + t_chunk = zalloc_slot_array(COMPRESSOR_SLOTS_CHUNK_SIZE, + Z_WAITOK | Z_ZERO); compressor_pager_lock(pager); @@ -672,7 +700,7 @@ compressor_pager_slot_lookup( compressor_pager_unlock(pager); if (t_chunk) { - kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); + zfree_slot_array(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); } } if (chunk == NULL) { @@ -693,19 +721,68 @@ compressor_pager_slot_lookup( void vm_compressor_pager_init(void) { - lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr); - lck_grp_init(&compressor_pager_lck_grp, "compressor_pager", &compressor_pager_lck_grp_attr); - lck_attr_setdefault(&compressor_pager_lck_attr); - - compressor_pager_zone = zinit(sizeof(struct compressor_pager), - 10000 * sizeof(struct compressor_pager), - 8192, "compressor_pager"); - zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE); - zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE); + /* embedded slot pointers in compressor_pager get packed, so VA restricted */ + compressor_pager_zone = zone_create_ext("compressor_pager", + sizeof(struct compressor_pager), ZC_NOENCRYPT, + ZONE_ID_ANY, ^(zone_t z){ +#if defined(__LP64__) + zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED_MAP); +#else + (void)z; +#endif /* defined(__LP64__) */ + }); + +#if defined(__LP64__) + for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) { + compressor_slots_zones[idx] = zone_create_ext( + compressor_slots_zones_names[idx], + compressor_slots_zones_sizes[idx], ZC_NONE, + ZONE_ID_ANY, ^(zone_t z){ + zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED_MAP); + }); + } +#endif /* defined(__LP64__) */ vm_compressor_init(); } +static compressor_slot_t * +zalloc_slot_array(size_t size, zalloc_flags_t flags) +{ +#if defined(__LP64__) + compressor_slot_t *slots = NULL; + + assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE); + for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) { + if (size > compressor_slots_zones_sizes[idx]) { + continue; + } + slots = zalloc_flags(compressor_slots_zones[idx], flags); + break; + } + return slots; +#else /* defined(__LP64__) */ + return kheap_alloc(KHEAP_DATA_BUFFERS, size, flags); +#endif /* !defined(__LP64__) */ +} + +static void +zfree_slot_array(compressor_slot_t *slots, size_t size) +{ +#if defined(__LP64__) + assert(size <= COMPRESSOR_SLOTS_CHUNK_SIZE); + for (unsigned int idx = 0; idx < NUM_SLOTS_ZONES; idx++) { + if (size > compressor_slots_zones_sizes[idx]) { + continue; + } + zfree(compressor_slots_zones[idx], slots); + break; + } +#else /* defined(__LP64__) */ + kheap_free(KHEAP_DATA_BUFFERS, slots, size); +#endif /* !defined(__LP64__) */ +} + kern_return_t vm_compressor_pager_put( memory_object_t mem_obj, @@ -950,7 +1027,7 @@ vm_compressor_pager_reap_pages( &failures); if (failures == 0) { pager->cpgr_slots.cpgr_islots[i] = NULL; - kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); + zfree_slot_array(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); } } } @@ -1176,3 +1253,29 @@ vm_compressor_pager_relocate( return vm_compressor_relocate(current_chead, slot_p); } #endif /* CONFIG_FREEZE */ + +#if DEVELOPMENT || DEBUG + +kern_return_t +vm_compressor_pager_inject_error(memory_object_t mem_obj, + memory_object_offset_t offset) +{ + kern_return_t result = KERN_FAILURE; + compressor_slot_t *slot_p; + compressor_pager_t pager; + + assert(mem_obj); + + compressor_pager_lookup(mem_obj, pager); + if (pager != NULL) { + compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p); + if (slot_p != NULL && *slot_p != 0) { + vm_compressor_inject_error(slot_p); + result = KERN_SUCCESS; + } + } + + return result; +} + +#endif diff --git a/osfmk/vm/vm_compressor_pager.h b/osfmk/vm/vm_compressor_pager.h index 26ace8f15..65c60e112 100644 --- a/osfmk/vm/vm_compressor_pager.h +++ b/osfmk/vm/vm_compressor_pager.h @@ -127,6 +127,12 @@ extern kern_return_t vm_compressor_relocate(void **current_chead, int *src_slot_ extern void vm_compressor_finished_filling(void **current_chead); #endif /* CONFIG_FREEZE */ +#if DEVELOPMENT || DEBUG +extern kern_return_t vm_compressor_pager_inject_error(memory_object_t pager, + memory_object_offset_t offset); +extern void vm_compressor_inject_error(int *slot); +#endif /* DEVELOPMENT || DEBUG */ + #endif /* _VM_VM_COMPRESSOR_PAGER_H_ */ #endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/vm/vm_fault.c b/osfmk/vm/vm_fault.c index e558508d7..fb7ce2544 100644 --- a/osfmk/vm/vm_fault.c +++ b/osfmk/vm/vm_fault.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -227,21 +227,22 @@ struct vmrtfr { } vmrtfrs; #define VMRTF_DEFAULT_BUFSIZE (4096) #define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t)) -int vmrtf_num_records = VMRTF_NUM_RECORDS_DEFAULT; +TUNABLE(int, vmrtf_num_records, "vm_rtfault_records", VMRTF_NUM_RECORDS_DEFAULT); static void vm_rtfrecord_lock(void); static void vm_rtfrecord_unlock(void); static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int); -lck_spin_t vm_rtfr_slock; extern lck_grp_t vm_page_lck_grp_bucket; extern lck_attr_t vm_page_lck_attr; +LCK_SPIN_DECLARE_ATTR(vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); /* * Routine: vm_fault_init * Purpose: * Initialize our private data structures. */ +__startup_func void vm_fault_init(void) { @@ -264,8 +265,7 @@ vm_fault_init(void) if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) { for (i = 0; i < VM_PAGER_MAX_MODES; i++) { - if (vm_compressor_temp > 0 && - ((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) { + if (((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) { need_default_val = FALSE; vm_compressor_mode = vm_compressor_temp; break; @@ -281,22 +281,25 @@ vm_fault_init(void) } printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode); - PE_parse_boot_argn("vm_protect_privileged_from_untrusted", &vm_protect_privileged_from_untrusted, sizeof(vm_protect_privileged_from_untrusted)); + PE_parse_boot_argn("vm_protect_privileged_from_untrusted", + &vm_protect_privileged_from_untrusted, + sizeof(vm_protect_privileged_from_untrusted)); } -void +__startup_func +static void vm_rtfault_record_init(void) { - PE_parse_boot_argn("vm_rtfault_records", &vmrtf_num_records, sizeof(vmrtf_num_records)); + size_t size; - assert(vmrtf_num_records >= 1); vmrtf_num_records = MAX(vmrtf_num_records, 1); - size_t kallocsz = vmrtf_num_records * sizeof(vm_rtfault_record_t); - vmrtfrs.vm_rtf_records = kalloc(kallocsz); - bzero(vmrtfrs.vm_rtf_records, kallocsz); + size = vmrtf_num_records * sizeof(vm_rtfault_record_t); + vmrtfrs.vm_rtf_records = zalloc_permanent(size, + ZALIGN(vm_rtfault_record_t)); vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1; - lck_spin_init(&vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); } +STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_rtfault_record_init); + /* * Routine: vm_fault_cleanup * Purpose: @@ -368,6 +371,12 @@ vm_fault_is_sequential( sequential = object->sequential; orig_sequential = sequential; + offset = vm_object_trunc_page(offset); + if (offset == last_alloc && behavior != VM_BEHAVIOR_RANDOM) { + /* re-faulting in the same page: no change in behavior */ + return; + } + switch (behavior) { case VM_BEHAVIOR_RANDOM: /* @@ -497,12 +506,16 @@ vm_fault_deactivate_behind( #if TRACEFAULTPAGE dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */ #endif - - if (object == kernel_object || vm_page_deactivate_behind == FALSE) { + if (object == kernel_object || vm_page_deactivate_behind == FALSE || (vm_object_trunc_page(offset) != offset)) { /* * Do not deactivate pages from the kernel object: they * are not intended to become pageable. * or we've disabled the deactivate behind mechanism + * or we are dealing with an offset that is not aligned to + * the system's PAGE_SIZE because in that case we will + * handle the deactivation on the aligned offset and, thus, + * the full PAGE_SIZE page once. This helps us avoid the redundant + * deactivates and the extra faults. */ return FALSE; } @@ -748,6 +761,37 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrup return VM_FAULT_SUCCESS; } +/* + * Clear the code signing bits on the given page_t + */ +static void +vm_fault_cs_clear(vm_page_t m) +{ + m->vmp_cs_validated = VMP_CS_ALL_FALSE; + m->vmp_cs_tainted = VMP_CS_ALL_FALSE; + m->vmp_cs_nx = VMP_CS_ALL_FALSE; +} + +/* + * Enqueues the given page on the throttled queue. + * The caller must hold the vm_page_queue_lock and it will be held on return. + */ +static void +vm_fault_enqueue_throttled_locked(vm_page_t m) +{ + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); + assert(!VM_PAGE_WIRED(m)); + + /* + * can't be on the pageout queue since we don't + * have a pager to try and clean to + */ + vm_page_queues_remove(m, TRUE); + vm_page_check_pageable_safe(m); + vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq); + m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q; + vm_page_throttled_count++; +} /* * do the work to zero fill a page and @@ -779,12 +823,9 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) * sending a program into this area. We * choose this approach for performance */ + vm_fault_cs_clear(m); m->vmp_pmapped = TRUE; - m->vmp_cs_validated = FALSE; - m->vmp_cs_tainted = FALSE; - m->vmp_cs_nx = FALSE; - if (no_zero_fill == TRUE) { my_fault = DBG_NZF_PAGE_FAULT; @@ -800,25 +841,13 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) assert(!m->vmp_laundry); assert(object != kernel_object); //assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0); - if (!VM_DYNAMIC_PAGING_ENABLED() && (object->purgable == VM_PURGABLE_DENY || object->purgable == VM_PURGABLE_NONVOLATILE || object->purgable == VM_PURGABLE_VOLATILE)) { vm_page_lockspin_queues(); - if (!VM_DYNAMIC_PAGING_ENABLED()) { - assert(!VM_PAGE_WIRED(m)); - - /* - * can't be on the pageout queue since we don't - * have a pager to try and clean to - */ - vm_page_queues_remove(m, TRUE); - vm_page_check_pageable_safe(m); - vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq); - m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q; - vm_page_throttled_count++; + vm_fault_enqueue_throttled_locked(m); } vm_page_unlock_queues(); } @@ -1072,7 +1101,7 @@ vm_fault_page( m = *result_page; caller_lookup = FALSE; /* no longer valid after that */ } else { - m = vm_page_lookup(object, offset); + m = vm_page_lookup(object, vm_object_trunc_page(offset)); } #if TRACEFAULTPAGE dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ @@ -1313,7 +1342,7 @@ vm_fault_page( vm_object_lock(object); assert(object->ref_count > 0); - m = vm_page_lookup(object, offset); + m = vm_page_lookup(object, vm_object_trunc_page(offset)); if (m != VM_PAGE_NULL && m->vmp_cleaning) { PAGE_ASSERT_WAIT(m, interruptible); @@ -1440,9 +1469,11 @@ vm_fault_page( } if (fault_info && fault_info->batch_pmap_op == TRUE) { - vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); + vm_page_insert_internal(m, object, + vm_object_trunc_page(offset), + VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); } else { - vm_page_insert(m, object, offset); + vm_page_insert(m, object, vm_object_trunc_page(offset)); } } if (look_for_page) { @@ -1551,9 +1582,9 @@ vm_fault_page( m->vmp_absent = TRUE; if (fault_info && fault_info->batch_pmap_op == TRUE) { - vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); + vm_page_insert_internal(m, object, vm_object_trunc_page(offset), VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); } else { - vm_page_insert(m, object, offset); + vm_page_insert(m, object, vm_object_trunc_page(offset)); } } assert(m->vmp_busy); @@ -1754,7 +1785,7 @@ vm_fault_page( */ rc = memory_object_data_request( pager, - offset + object->paging_offset, + vm_object_trunc_page(offset) + object->paging_offset, PAGE_SIZE, access_required | wants_copy_flag, (memory_object_fault_info_t)fault_info); @@ -1897,7 +1928,7 @@ dont_look_for_page: return VM_FAULT_MEMORY_SHORTAGE; } - vm_page_insert(m, object, offset); + vm_page_insert(m, object, vm_object_trunc_page(offset)); } if (fault_info->mark_zf_absent && no_zero_fill == TRUE) { m->vmp_absent = TRUE; @@ -2068,7 +2099,7 @@ dont_look_for_page: * page we just copied into */ assert(copy_m->vmp_busy); - vm_page_insert(copy_m, object, offset); + vm_page_insert(copy_m, object, vm_object_trunc_page(offset)); SET_PAGE_DIRTY(copy_m, TRUE); m = copy_m; @@ -2079,7 +2110,7 @@ dont_look_for_page: * paging_in_progress to do that... */ vm_object_paging_end(object); - vm_object_collapse(object, offset, TRUE); + vm_object_collapse(object, vm_object_trunc_page(offset), TRUE); vm_object_paging_begin(object); } else { *protection &= (~VM_PROT_WRITE); @@ -2143,6 +2174,7 @@ dont_look_for_page: * Does the page exist in the copy? */ copy_offset = first_offset - copy_object->vo_shadow_offset; + copy_offset = vm_object_trunc_page(copy_offset); if (copy_object->vo_size <= copy_offset) { /* @@ -2398,6 +2430,11 @@ backoff: } +extern int panic_on_cs_killed; +extern int proc_selfpid(void); +extern char *proc_name_address(void *p); +unsigned long cs_enter_tainted_rejected = 0; +unsigned long cs_enter_tainted_accepted = 0; /* * CODE SIGNING: @@ -2407,132 +2444,106 @@ backoff: * 3. the page belongs to a code-signed object * 4. the page has not been validated yet or has been mapped for write. */ -#define VM_FAULT_NEED_CS_VALIDATION(pmap, page, page_obj) \ - ((pmap) != kernel_pmap /*1*/ && \ - !(page)->vmp_cs_tainted /*2*/ && \ - (page_obj)->code_signed /*3*/ && \ - (!(page)->vmp_cs_validated || (page)->vmp_wpmapped /*4*/ )) - - -/* - * page queue lock must NOT be held - * m->vmp_object must be locked - * - * NOTE: m->vmp_object could be locked "shared" only if we are called - * from vm_fault() as part of a soft fault. If so, we must be - * careful not to modify the VM object in any way that is not - * legal under a shared lock... - */ -extern int panic_on_cs_killed; -extern int proc_selfpid(void); -extern char *proc_name_address(void *p); -unsigned long cs_enter_tainted_rejected = 0; -unsigned long cs_enter_tainted_accepted = 0; -kern_return_t -vm_fault_enter(vm_page_t m, - pmap_t pmap, - vm_map_offset_t vaddr, - vm_prot_t prot, - vm_prot_t caller_prot, - boolean_t wired, - boolean_t change_wiring, - vm_tag_t wire_tag, - vm_object_fault_info_t fault_info, - boolean_t *need_retry, - int *type_of_fault) +static bool +vm_fault_cs_need_validation( + pmap_t pmap, + vm_page_t page, + vm_object_t page_obj, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset) { - kern_return_t kr, pe_result; - boolean_t previously_pmapped = m->vmp_pmapped; - boolean_t must_disconnect = 0; - boolean_t map_is_switched, map_is_switch_protected; - boolean_t cs_violation; - int cs_enforcement_enabled; - vm_prot_t fault_type; - vm_object_t object; - boolean_t no_cache = fault_info->no_cache; - boolean_t cs_bypass = fault_info->cs_bypass; - int pmap_options = fault_info->pmap_options; - - fault_type = change_wiring ? VM_PROT_NONE : caller_prot; - object = VM_PAGE_OBJECT(m); - - vm_object_lock_assert_held(object); - -#if KASAN if (pmap == kernel_pmap) { - kasan_notify_address(vaddr, PAGE_SIZE); + /* 1 - not user space */ + return false; } -#endif - - LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED); - - if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { - assert(m->vmp_fictitious); - return KERN_SUCCESS; + if (!page_obj->code_signed) { + /* 3 - page does not belong to a code-signed object */ + return false; } - - if (*type_of_fault == DBG_ZERO_FILL_FAULT) { - vm_object_lock_assert_exclusive(object); - } else if ((fault_type & VM_PROT_WRITE) == 0 && - (!m->vmp_wpmapped -#if VM_OBJECT_ACCESS_TRACKING - || object->access_tracking -#endif /* VM_OBJECT_ACCESS_TRACKING */ - )) { - /* - * This is not a "write" fault, so we - * might not have taken the object lock - * exclusively and we might not be able - * to update the "wpmapped" bit in - * vm_fault_enter(). - * Let's just grant read access to - * the page for now and we'll - * soft-fault again if we need write - * access later... - */ - - /* This had better not be a JIT page. */ - if (!pmap_has_prot_policy(prot)) { - prot &= ~VM_PROT_WRITE; - } else { - assert(cs_bypass); + if (fault_page_size == PAGE_SIZE) { + /* looking at the whole page */ + assertf(fault_phys_offset == 0, + "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", + (uint64_t)fault_page_size, + (uint64_t)fault_phys_offset); + if (page->vmp_cs_tainted == VMP_CS_ALL_TRUE) { + /* 2 - page is all tainted */ + return false; } - } - if (m->vmp_pmapped == FALSE) { - if (m->vmp_clustered) { - if (*type_of_fault == DBG_CACHE_HIT_FAULT) { - /* - * found it in the cache, but this - * is the first fault-in of the page (m->vmp_pmapped == FALSE) - * so it must have come in as part of - * a cluster... account 1 pagein against it - */ - if (object->internal) { - *type_of_fault = DBG_PAGEIND_FAULT; - } else { - *type_of_fault = DBG_PAGEINV_FAULT; - } - - VM_PAGE_COUNT_AS_PAGEIN(m); - } - VM_PAGE_CONSUME_CLUSTERED(m); + if (page->vmp_cs_validated == VMP_CS_ALL_TRUE && + !page->vmp_wpmapped) { + /* 4 - already fully validated and never mapped writable */ + return false; + } + } else { + /* looking at a specific sub-page */ + if (VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) { + /* 2 - sub-page was already marked as tainted */ + return false; + } + if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) && + !page->vmp_wpmapped) { + /* 4 - already validated and never mapped writable */ + return false; } } + /* page needs to be validated */ + return true; +} - if (*type_of_fault != DBG_COW_FAULT) { - DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL); - if (pmap == kernel_pmap) { - DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL); - } +static bool +vm_fault_cs_page_immutable( + vm_page_t m, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_prot_t prot __unused) +{ + if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) + /*&& ((prot) & VM_PROT_EXECUTE)*/) { + return true; } + return false; +} - /* Validate code signature if necessary. */ +static bool +vm_fault_cs_page_nx( + vm_page_t m, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset) +{ + return VMP_CS_NX(m, fault_page_size, fault_phys_offset); +} + +/* + * Check if the page being entered into the pmap violates code signing. + */ +static kern_return_t +vm_fault_cs_check_violation( + bool cs_bypass, + vm_object_t object, + vm_page_t m, + pmap_t pmap, + vm_prot_t prot, + vm_prot_t caller_prot, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_object_fault_info_t fault_info, + bool map_is_switched, + bool map_is_switch_protected, + bool *cs_violation) +{ +#if !PMAP_CS +#pragma unused(caller_prot) +#pragma unused(fault_info) +#endif /* !PMAP_CS */ + int cs_enforcement_enabled; if (!cs_bypass && - VM_FAULT_NEED_CS_VALIDATION(pmap, m, object)) { + vm_fault_cs_need_validation(pmap, m, object, + fault_page_size, fault_phys_offset)) { vm_object_lock_assert_exclusive(object); - if (m->vmp_cs_validated) { + if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)) { vm_cs_revalidates++; } @@ -2542,9 +2553,9 @@ vm_fault_enter(vm_page_t m, #if PMAP_CS if (fault_info->pmap_cs_associated && pmap_cs_enforced(pmap) && - !m->vmp_cs_validated && - !m->vmp_cs_tainted && - !m->vmp_cs_nx && + !VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) && + !VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) && + !VMP_CS_NX(m, fault_page_size, fault_phys_offset) && (prot & VM_PROT_EXECUTE) && (caller_prot & VM_PROT_EXECUTE)) { /* @@ -2556,20 +2567,13 @@ vm_fault_enter(vm_page_t m, vm_cs_defer_to_pmap_cs++; } else { vm_cs_defer_to_pmap_cs_not++; - vm_page_validate_cs(m); + vm_page_validate_cs(m, fault_page_size, fault_phys_offset); } #else /* PMAP_CS */ - vm_page_validate_cs(m); + vm_page_validate_cs(m, fault_page_size, fault_phys_offset); #endif /* PMAP_CS */ } -#define page_immutable(m, prot) ((m)->vmp_cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/ ) -#define page_nx(m) ((m)->vmp_cs_nx) - - map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) && - (pmap == vm_map_pmap(current_thread()->map))); - map_is_switch_protected = current_thread()->map->switch_protect; - /* If the map is switched, and is switch-protected, we must protect * some pages from being write-faulted: immutable pages because by * definition they may not be written, and executable pages because that @@ -2581,19 +2585,22 @@ vm_fault_enter(vm_page_t m, * PMAP_ENTER. */ if (pmap == kernel_pmap) { - /* kernel fault: cs_process_enforcement() does not apply */ + /* kernel fault: cs_enforcement does not apply */ cs_enforcement_enabled = 0; } else { - cs_enforcement_enabled = cs_process_enforcement(NULL); + cs_enforcement_enabled = pmap_get_vm_map_cs_enforced(pmap); } if (cs_enforcement_enabled && map_is_switched && - map_is_switch_protected && page_immutable(m, prot) && + map_is_switch_protected && + vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) && (prot & VM_PROT_WRITE)) { return KERN_CODESIGN_ERROR; } - if (cs_enforcement_enabled && page_nx(m) && (prot & VM_PROT_EXECUTE)) { + if (cs_enforcement_enabled && + vm_fault_cs_page_nx(m, fault_page_size, fault_phys_offset) && + (prot & VM_PROT_EXECUTE)) { if (cs_debug) { printf("page marked to be NX, not letting it be mapped EXEC\n"); } @@ -2612,14 +2619,14 @@ vm_fault_enter(vm_page_t m, */ if (cs_bypass) { /* code-signing is bypassed */ - cs_violation = FALSE; - } else if (m->vmp_cs_tainted) { + *cs_violation = FALSE; + } else if (VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) { /* tainted page */ - cs_violation = TRUE; + *cs_violation = TRUE; } else if (!cs_enforcement_enabled) { /* no further code-signing enforcement */ - cs_violation = FALSE; - } else if (page_immutable(m, prot) && + *cs_violation = FALSE; + } else if (vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) && ((prot & VM_PROT_WRITE) || m->vmp_wpmapped)) { /* @@ -2637,8 +2644,8 @@ vm_fault_enter(vm_page_t m, * another map later, we will disconnect it from this pmap so * we'll notice the change. */ - cs_violation = TRUE; - } else if (!m->vmp_cs_validated && + *cs_violation = TRUE; + } else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) && (prot & VM_PROT_EXECUTE) #if PMAP_CS /* @@ -2650,128 +2657,255 @@ vm_fault_enter(vm_page_t m, && !(pmap_cs_enforced(pmap)) #endif /* PMAP_CS */ ) { - cs_violation = TRUE; + *cs_violation = TRUE; } else { - cs_violation = FALSE; + *cs_violation = FALSE; } + return KERN_SUCCESS; +} - if (cs_violation) { - /* We will have a tainted page. Have to handle the special case - * of a switched map now. If the map is not switched, standard - * procedure applies - call cs_invalid_page(). - * If the map is switched, the real owner is invalid already. - * There is no point in invalidating the switching process since - * it will not be executing from the map. So we don't call - * cs_invalid_page() in that case. */ - boolean_t reject_page, cs_killed; - if (map_is_switched) { - assert(pmap == vm_map_pmap(current_thread()->map)); - assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE)); - reject_page = FALSE; - } else { - if (cs_debug > 5) { - printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n", - object->code_signed ? "yes" : "no", - m->vmp_cs_validated ? "yes" : "no", - m->vmp_cs_tainted ? "yes" : "no", - m->vmp_wpmapped ? "yes" : "no", - (int)prot); - } - reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed); +/* + * Handles a code signing violation by either rejecting the page or forcing a disconnect. + * @param must_disconnect This value will be set to true if the caller must disconnect + * this page. + * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault. + */ +static kern_return_t +vm_fault_cs_handle_violation( + vm_object_t object, + vm_page_t m, + pmap_t pmap, + vm_prot_t prot, + vm_map_offset_t vaddr, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + bool map_is_switched, + bool map_is_switch_protected, + bool *must_disconnect) +{ +#if !MACH_ASSERT +#pragma unused(pmap) +#pragma unused(map_is_switch_protected) +#endif /* !MACH_ASSERT */ + /* + * We will have a tainted page. Have to handle the special case + * of a switched map now. If the map is not switched, standard + * procedure applies - call cs_invalid_page(). + * If the map is switched, the real owner is invalid already. + * There is no point in invalidating the switching process since + * it will not be executing from the map. So we don't call + * cs_invalid_page() in that case. + */ + boolean_t reject_page, cs_killed; + kern_return_t kr; + if (map_is_switched) { + assert(pmap == vm_map_pmap(current_thread()->map)); + assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE)); + reject_page = FALSE; + } else { + if (cs_debug > 5) { + printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n", + object->code_signed ? "yes" : "no", + VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) ? "yes" : "no", + VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) ? "yes" : "no", + m->vmp_wpmapped ? "yes" : "no", + (int)prot); } + reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed); + } - if (reject_page) { - /* reject the invalid page: abort the page fault */ - int pid; - const char *procname; - task_t task; - vm_object_t file_object, shadow; - vm_object_offset_t file_offset; - char *pathname, *filename; - vm_size_t pathname_len, filename_len; - boolean_t truncated_path; + if (reject_page) { + /* reject the invalid page: abort the page fault */ + int pid; + const char *procname; + task_t task; + vm_object_t file_object, shadow; + vm_object_offset_t file_offset; + char *pathname, *filename; + vm_size_t pathname_len, filename_len; + boolean_t truncated_path; #define __PATH_MAX 1024 - struct timespec mtime, cs_mtime; - int shadow_depth; - os_reason_t codesigning_exit_reason = OS_REASON_NULL; - - kr = KERN_CODESIGN_ERROR; - cs_enter_tainted_rejected++; - - /* get process name and pid */ - procname = "?"; - task = current_task(); - pid = proc_selfpid(); - if (task->bsd_info != NULL) { - procname = proc_name_address(task->bsd_info); - } - - /* get file's VM object */ - file_object = object; - file_offset = m->vmp_offset; - for (shadow = file_object->shadow, - shadow_depth = 0; - shadow != VM_OBJECT_NULL; - shadow = file_object->shadow, - shadow_depth++) { - vm_object_lock_shared(shadow); - if (file_object != object) { - vm_object_unlock(file_object); + struct timespec mtime, cs_mtime; + int shadow_depth; + os_reason_t codesigning_exit_reason = OS_REASON_NULL; + + kr = KERN_CODESIGN_ERROR; + cs_enter_tainted_rejected++; + + /* get process name and pid */ + procname = "?"; + task = current_task(); + pid = proc_selfpid(); + if (task->bsd_info != NULL) { + procname = proc_name_address(task->bsd_info); + } + + /* get file's VM object */ + file_object = object; + file_offset = m->vmp_offset; + for (shadow = file_object->shadow, + shadow_depth = 0; + shadow != VM_OBJECT_NULL; + shadow = file_object->shadow, + shadow_depth++) { + vm_object_lock_shared(shadow); + if (file_object != object) { + vm_object_unlock(file_object); + } + file_offset += file_object->vo_shadow_offset; + file_object = shadow; + } + + mtime.tv_sec = 0; + mtime.tv_nsec = 0; + cs_mtime.tv_sec = 0; + cs_mtime.tv_nsec = 0; + + /* get file's pathname and/or filename */ + pathname = NULL; + filename = NULL; + pathname_len = 0; + filename_len = 0; + truncated_path = FALSE; + /* no pager -> no file -> no pathname, use "" in that case */ + if (file_object->pager != NULL) { + pathname = kheap_alloc(KHEAP_TEMP, __PATH_MAX * 2, Z_WAITOK); + if (pathname) { + pathname[0] = '\0'; + pathname_len = __PATH_MAX; + filename = pathname + pathname_len; + filename_len = __PATH_MAX; + + if (vnode_pager_get_object_name(file_object->pager, + pathname, + pathname_len, + filename, + filename_len, + &truncated_path) == KERN_SUCCESS) { + /* safety first... */ + pathname[__PATH_MAX - 1] = '\0'; + filename[__PATH_MAX - 1] = '\0'; + + vnode_pager_get_object_mtime(file_object->pager, + &mtime, + &cs_mtime); + } else { + kheap_free(KHEAP_TEMP, pathname, __PATH_MAX * 2); + pathname = NULL; + filename = NULL; + pathname_len = 0; + filename_len = 0; + truncated_path = FALSE; } - file_offset += file_object->vo_shadow_offset; - file_object = shadow; } + } + printf("CODE SIGNING: process %d[%s]: " + "rejecting invalid page at address 0x%llx " + "from offset 0x%llx in file \"%s%s%s\" " + "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " + "(signed:%d validated:%d tainted:%d nx:%d " + "wpmapped:%d dirty:%d depth:%d)\n", + pid, procname, (addr64_t) vaddr, + file_offset, + (pathname ? pathname : ""), + (truncated_path ? "/.../" : ""), + (truncated_path ? filename : ""), + cs_mtime.tv_sec, cs_mtime.tv_nsec, + ((cs_mtime.tv_sec == mtime.tv_sec && + cs_mtime.tv_nsec == mtime.tv_nsec) + ? "==" + : "!="), + mtime.tv_sec, mtime.tv_nsec, + object->code_signed, + VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset), + VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset), + VMP_CS_NX(m, fault_page_size, fault_phys_offset), + m->vmp_wpmapped, + m->vmp_dirty, + shadow_depth); - mtime.tv_sec = 0; - mtime.tv_nsec = 0; - cs_mtime.tv_sec = 0; - cs_mtime.tv_nsec = 0; + /* + * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page + * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the + * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler + * will deal with the segmentation fault. + */ + if (cs_killed) { + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, + pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0); - /* get file's pathname and/or filename */ - pathname = NULL; - filename = NULL; - pathname_len = 0; - filename_len = 0; - truncated_path = FALSE; - /* no pager -> no file -> no pathname, use "" in that case */ - if (file_object->pager != NULL) { - pathname = (char *)kalloc(__PATH_MAX * 2); - if (pathname) { - pathname[0] = '\0'; - pathname_len = __PATH_MAX; - filename = pathname + pathname_len; - filename_len = __PATH_MAX; - - if (vnode_pager_get_object_name(file_object->pager, - pathname, - pathname_len, - filename, - filename_len, - &truncated_path) == KERN_SUCCESS) { - /* safety first... */ - pathname[__PATH_MAX - 1] = '\0'; - filename[__PATH_MAX - 1] = '\0'; - - vnode_pager_get_object_mtime(file_object->pager, - &mtime, - &cs_mtime); + codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE); + if (codesigning_exit_reason == NULL) { + printf("vm_fault_enter: failed to allocate codesigning exit reason\n"); + } else { + mach_vm_address_t data_addr = 0; + struct codesigning_exit_reason_info *ceri = NULL; + uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri)); + + if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) { + printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n"); + } else { + if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor, + EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) { + ceri = (struct codesigning_exit_reason_info *)data_addr; + static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname)); + + ceri->ceri_virt_addr = vaddr; + ceri->ceri_file_offset = file_offset; + if (pathname) { + strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname)); + } else { + ceri->ceri_pathname[0] = '\0'; + } + if (filename) { + strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename)); + } else { + ceri->ceri_filename[0] = '\0'; + } + ceri->ceri_path_truncated = (truncated_path ? 1 : 0); + ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec; + ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec; + ceri->ceri_page_modtime_secs = mtime.tv_sec; + ceri->ceri_page_modtime_nsecs = mtime.tv_nsec; + ceri->ceri_object_codesigned = (object->code_signed); + ceri->ceri_page_codesig_validated = VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset); + ceri->ceri_page_codesig_tainted = VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset); + ceri->ceri_page_codesig_nx = VMP_CS_NX(m, fault_page_size, fault_phys_offset); + ceri->ceri_page_wpmapped = (m->vmp_wpmapped); + ceri->ceri_page_slid = 0; + ceri->ceri_page_dirty = (m->vmp_dirty); + ceri->ceri_page_shadow_depth = shadow_depth; } else { - kfree(pathname, __PATH_MAX * 2); - pathname = NULL; - filename = NULL; - pathname_len = 0; - filename_len = 0; - truncated_path = FALSE; +#if DEBUG || DEVELOPMENT + panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason"); +#else + printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n"); +#endif /* DEBUG || DEVELOPMENT */ + /* Free the buffer */ + os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0); } } } - printf("CODE SIGNING: process %d[%s]: " - "rejecting invalid page at address 0x%llx " + + set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE); + } + if (panic_on_cs_killed && + object->object_is_shared_cache) { + char *tainted_contents; + vm_map_offset_t src_vaddr; + src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT); + tainted_contents = kalloc(PAGE_SIZE); + bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE); + printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents); + panic("CODE SIGNING: process %d[%s]: " + "rejecting invalid page (phys#0x%x) at address 0x%llx " "from offset 0x%llx in file \"%s%s%s\" " "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " - "(signed:%d validated:%d tainted:%d nx:%d " + "(signed:%d validated:%d tainted:%d nx:%d" "wpmapped:%d dirty:%d depth:%d)\n", - pid, procname, (addr64_t) vaddr, + pid, procname, + VM_PAGE_GET_PHYS_PAGE(m), + (addr64_t) vaddr, file_offset, (pathname ? pathname : ""), (truncated_path ? "/.../" : ""), @@ -2783,177 +2917,135 @@ vm_fault_enter(vm_page_t m, : "!="), mtime.tv_sec, mtime.tv_nsec, object->code_signed, - m->vmp_cs_validated, - m->vmp_cs_tainted, - m->vmp_cs_nx, + VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset), + VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset), + VMP_CS_NX(m, fault_page_size, fault_phys_offset), m->vmp_wpmapped, m->vmp_dirty, shadow_depth); + } + if (file_object != object) { + vm_object_unlock(file_object); + } + if (pathname_len != 0) { + kheap_free(KHEAP_TEMP, pathname, __PATH_MAX * 2); + pathname = NULL; + filename = NULL; + } + } else { + /* proceed with the invalid page */ + kr = KERN_SUCCESS; + if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) && + !object->code_signed) { /* - * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page - * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the - * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler - * will deal with the segmentation fault. + * This page has not been (fully) validated but + * does not belong to a code-signed object + * so it should not be forcefully considered + * as tainted. + * We're just concerned about it here because + * we've been asked to "execute" it but that + * does not mean that it should cause other + * accesses to fail. + * This happens when a debugger sets a + * breakpoint and we then execute code in + * that page. Marking the page as "tainted" + * would cause any inspection tool ("leaks", + * "vmmap", "CrashReporter", ...) to get killed + * due to code-signing violation on that page, + * even though they're just reading it and not + * executing from it. */ - if (cs_killed) { - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0); - - codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE); - if (codesigning_exit_reason == NULL) { - printf("vm_fault_enter: failed to allocate codesigning exit reason\n"); - } else { - mach_vm_address_t data_addr = 0; - struct codesigning_exit_reason_info *ceri = NULL; - uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri)); - - if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) { - printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n"); - } else { - if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor, - EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) { - ceri = (struct codesigning_exit_reason_info *)data_addr; - static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname)); - - ceri->ceri_virt_addr = vaddr; - ceri->ceri_file_offset = file_offset; - if (pathname) { - strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname)); - } else { - ceri->ceri_pathname[0] = '\0'; - } - if (filename) { - strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename)); - } else { - ceri->ceri_filename[0] = '\0'; - } - ceri->ceri_path_truncated = (truncated_path); - ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec; - ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec; - ceri->ceri_page_modtime_secs = mtime.tv_sec; - ceri->ceri_page_modtime_nsecs = mtime.tv_nsec; - ceri->ceri_object_codesigned = (object->code_signed); - ceri->ceri_page_codesig_validated = (m->vmp_cs_validated); - ceri->ceri_page_codesig_tainted = (m->vmp_cs_tainted); - ceri->ceri_page_codesig_nx = (m->vmp_cs_nx); - ceri->ceri_page_wpmapped = (m->vmp_wpmapped); - ceri->ceri_page_slid = 0; - ceri->ceri_page_dirty = (m->vmp_dirty); - ceri->ceri_page_shadow_depth = shadow_depth; - } else { -#if DEBUG || DEVELOPMENT - panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason"); -#else - printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n"); -#endif /* DEBUG || DEVELOPMENT */ - /* Free the buffer */ - os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0); - } - } - } - - set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE); - } - if (panic_on_cs_killed && - object->object_is_shared_cache) { - char *tainted_contents; - vm_map_offset_t src_vaddr; - src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT); - tainted_contents = kalloc(PAGE_SIZE); - bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE); - printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents); - panic("CODE SIGNING: process %d[%s]: " - "rejecting invalid page (phys#0x%x) at address 0x%llx " - "from offset 0x%llx in file \"%s%s%s\" " - "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " - "(signed:%d validated:%d tainted:%d nx:%d" - "wpmapped:%d dirty:%d depth:%d)\n", - pid, procname, - VM_PAGE_GET_PHYS_PAGE(m), - (addr64_t) vaddr, - file_offset, - (pathname ? pathname : ""), - (truncated_path ? "/.../" : ""), - (truncated_path ? filename : ""), - cs_mtime.tv_sec, cs_mtime.tv_nsec, - ((cs_mtime.tv_sec == mtime.tv_sec && - cs_mtime.tv_nsec == mtime.tv_nsec) - ? "==" - : "!="), - mtime.tv_sec, mtime.tv_nsec, - object->code_signed, - m->vmp_cs_validated, - m->vmp_cs_tainted, - m->vmp_cs_nx, - m->vmp_wpmapped, - m->vmp_dirty, - shadow_depth); - } - - if (file_object != object) { - vm_object_unlock(file_object); - } - if (pathname_len != 0) { - kfree(pathname, __PATH_MAX * 2); - pathname = NULL; - filename = NULL; - } } else { - /* proceed with the invalid page */ - kr = KERN_SUCCESS; - if (!m->vmp_cs_validated && - !object->code_signed) { - /* - * This page has not been (fully) validated but - * does not belong to a code-signed object - * so it should not be forcefully considered - * as tainted. - * We're just concerned about it here because - * we've been asked to "execute" it but that - * does not mean that it should cause other - * accesses to fail. - * This happens when a debugger sets a - * breakpoint and we then execute code in - * that page. Marking the page as "tainted" - * would cause any inspection tool ("leaks", - * "vmmap", "CrashReporter", ...) to get killed - * due to code-signing violation on that page, - * even though they're just reading it and not - * executing from it. - */ - } else { - /* - * Page might have been tainted before or not; - * now it definitively is. If the page wasn't - * tainted, we must disconnect it from all - * pmaps later, to force existing mappings - * through that code path for re-consideration - * of the validity of that page. - */ - must_disconnect = !m->vmp_cs_tainted; - m->vmp_cs_tainted = TRUE; + /* + * Page might have been tainted before or not; + * now it definitively is. If the page wasn't + * tainted, we must disconnect it from all + * pmaps later, to force existing mappings + * through that code path for re-consideration + * of the validity of that page. + */ + if (!VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) { + *must_disconnect = TRUE; + VMP_CS_SET_TAINTED(m, fault_page_size, fault_phys_offset, TRUE); } - cs_enter_tainted_accepted++; } - if (kr != KERN_SUCCESS) { - if (cs_debug) { - printf("CODESIGNING: vm_fault_enter(0x%llx): " - "*** INVALID PAGE ***\n", - (long long)vaddr); - } + cs_enter_tainted_accepted++; + } + if (kr != KERN_SUCCESS) { + if (cs_debug) { + printf("CODESIGNING: vm_fault_enter(0x%llx): " + "*** INVALID PAGE ***\n", + (long long)vaddr); + } #if !SECURE_KERNEL - if (cs_enforcement_panic) { - panic("CODESIGNING: panicking on invalid page\n"); - } -#endif + if (cs_enforcement_panic) { + panic("CODESIGNING: panicking on invalid page\n"); } - } else { - /* proceed with the valid page */ - kr = KERN_SUCCESS; +#endif } + return kr; +} +/* + * Check that the code signature is valid for the given page being inserted into + * the pmap. + * + * @param must_disconnect This value will be set to true if the caller must disconnect + * this page. + * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault. + */ +static kern_return_t +vm_fault_validate_cs( + bool cs_bypass, + vm_object_t object, + vm_page_t m, + pmap_t pmap, + vm_map_offset_t vaddr, + vm_prot_t prot, + vm_prot_t caller_prot, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_object_fault_info_t fault_info, + bool *must_disconnect) +{ + bool map_is_switched, map_is_switch_protected, cs_violation; + kern_return_t kr; + /* Validate code signature if necessary. */ + map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) && + (pmap == vm_map_pmap(current_thread()->map))); + map_is_switch_protected = current_thread()->map->switch_protect; + kr = vm_fault_cs_check_violation(cs_bypass, object, m, pmap, + prot, caller_prot, fault_page_size, fault_phys_offset, fault_info, + map_is_switched, map_is_switch_protected, &cs_violation); + if (kr != KERN_SUCCESS) { + return kr; + } + if (cs_violation) { + kr = vm_fault_cs_handle_violation(object, m, pmap, prot, vaddr, + fault_page_size, fault_phys_offset, + map_is_switched, map_is_switch_protected, must_disconnect); + } + return kr; +} + +/* + * Enqueue the page on the appropriate paging queue. + */ +static void +vm_fault_enqueue_page( + vm_object_t object, + vm_page_t m, + bool wired, + bool change_wiring, + vm_tag_t wire_tag, + bool no_cache, + int *type_of_fault, + kern_return_t kr) +{ + assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object); boolean_t page_queues_locked = FALSE; + boolean_t previously_pmapped = m->vmp_pmapped; #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \ MACRO_BEGIN \ if (! page_queues_locked) { \ @@ -2969,13 +3061,6 @@ MACRO_BEGIN \ } \ MACRO_END - /* - * Hold queues lock to manipulate - * the page queues. Change wiring - * case is obvious. - */ - assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object); - #if CONFIG_BACKGROUND_QUEUE vm_page_update_background_state(m); #endif @@ -3040,7 +3125,7 @@ MACRO_END */ lid = cpu_number(); - lq = &vm_page_local_q[lid].vpl_un.vpl; + lq = zpercpu_get_cpu(vm_page_local_q, lid); VPL_LOCK(&lq->vpl_lock); @@ -3126,103 +3211,434 @@ MACRO_END } /* we're done with the page queues lock, if we ever took it */ __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED(); +} + +/* + * Sets the pmmpped, xpmapped, and wpmapped bits on the vm_page_t and updates accounting. + * @return true if the page needs to be sync'ed via pmap_sync-page_data_physo + * before being inserted into the pmap. + */ +static bool +vm_fault_enter_set_mapped( + vm_object_t object, + vm_page_t m, + vm_prot_t prot, + vm_prot_t fault_type) +{ + bool page_needs_sync = false; + /* + * NOTE: we may only hold the vm_object lock SHARED + * at this point, so we need the phys_page lock to + * properly serialize updating the pmapped and + * xpmapped bits + */ + if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) { + ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); + + pmap_lock_phys_page(phys_page); + m->vmp_pmapped = TRUE; + + if (!m->vmp_xpmapped) { + m->vmp_xpmapped = TRUE; + + pmap_unlock_phys_page(phys_page); + + if (!object->internal) { + OSAddAtomic(1, &vm_page_xpmapped_external_count); + } + +#if defined(__arm__) || defined(__arm64__) + page_needs_sync = true; +#else + if (object->internal && + object->pager != NULL) { + /* + * This page could have been + * uncompressed by the + * compressor pager and its + * contents might be only in + * the data cache. + * Since it's being mapped for + * "execute" for the fist time, + * make sure the icache is in + * sync. + */ + assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); + page_needs_sync = true; + } +#endif + } else { + pmap_unlock_phys_page(phys_page); + } + } else { + if (m->vmp_pmapped == FALSE) { + ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); + + pmap_lock_phys_page(phys_page); + m->vmp_pmapped = TRUE; + pmap_unlock_phys_page(phys_page); + } + } + + if (fault_type & VM_PROT_WRITE) { + if (m->vmp_wpmapped == FALSE) { + vm_object_lock_assert_exclusive(object); + if (!object->internal && object->pager) { + task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager)); + } + m->vmp_wpmapped = TRUE; + } + } + return page_needs_sync; +} +/* + * Try to enter the given page into the pmap. + * Will retry without execute permission iff PMAP_CS is enabled and we encounter + * a codesigning failure on a non-execute fault. + */ +static kern_return_t +vm_fault_attempt_pmap_enter( + pmap_t pmap, + vm_map_offset_t vaddr, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_page_t m, + vm_prot_t *prot, + vm_prot_t caller_prot, + vm_prot_t fault_type, + bool wired, + int pmap_options) +{ +#if !PMAP_CS +#pragma unused(caller_prot) +#endif /* !PMAP_CS */ + kern_return_t kr; + if (fault_page_size != PAGE_SIZE) { + DEBUG4K_FAULT("pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x fault_type 0x%x\n", pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, *prot, fault_type); + assertf((!(fault_phys_offset & FOURK_PAGE_MASK) && + fault_phys_offset < PAGE_SIZE), + "0x%llx\n", (uint64_t)fault_phys_offset); + } else { + assertf(fault_phys_offset == 0, + "0x%llx\n", (uint64_t)fault_phys_offset); + } - /* If we have a KERN_SUCCESS from the previous checks, we either have - * a good page, or a tainted page that has been accepted by the process. - * In both cases the page will be entered into the pmap. - * If the page is writeable, we need to disconnect it from other pmaps - * now so those processes can take note. + PMAP_ENTER_OPTIONS(pmap, vaddr, + fault_phys_offset, + m, *prot, fault_type, 0, + wired, + pmap_options, + kr); +#if PMAP_CS + /* + * Retry without execute permission if we encountered a codesigning + * failure on a non-execute fault. This allows applications which + * don't actually need to execute code to still map it for read access. */ - if (kr == KERN_SUCCESS) { + if ((kr == KERN_CODESIGN_ERROR) && pmap_cs_enforced(pmap) && + (*prot & VM_PROT_EXECUTE) && !(caller_prot & VM_PROT_EXECUTE)) { + *prot &= ~VM_PROT_EXECUTE; + PMAP_ENTER_OPTIONS(pmap, vaddr, + fault_phys_offset, + m, *prot, fault_type, 0, + wired, + pmap_options, + kr); + } +#endif + return kr; +} + +/* + * Enter the given page into the pmap. + * The map must be locked shared. + * The vm object must NOT be locked. + * + * @param need_retry if not null, avoid making a (potentially) blocking call into + * the pmap layer. When such a call would be necessary, return true in this boolean instead. + */ +static kern_return_t +vm_fault_pmap_enter( + pmap_t pmap, + vm_map_offset_t vaddr, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_page_t m, + vm_prot_t *prot, + vm_prot_t caller_prot, + vm_prot_t fault_type, + bool wired, + int pmap_options, + boolean_t *need_retry) +{ + kern_return_t kr; + if (need_retry != NULL) { /* - * NOTE: we may only hold the vm_object lock SHARED - * at this point, so we need the phys_page lock to - * properly serialize updating the pmapped and - * xpmapped bits + * Although we don't hold a lock on this object, we hold a lock + * on the top object in the chain. To prevent a deadlock, we + * can't allow the pmap layer to block. */ - if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) { - ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); + pmap_options |= PMAP_OPTIONS_NOWAIT; + } + kr = vm_fault_attempt_pmap_enter(pmap, vaddr, + fault_page_size, fault_phys_offset, + m, prot, caller_prot, fault_type, wired, pmap_options); + if (kr == KERN_RESOURCE_SHORTAGE) { + if (need_retry) { + /* + * There's nothing we can do here since we hold the + * lock on the top object in the chain. The caller + * will need to deal with this by dropping that lock and retrying. + */ + *need_retry = TRUE; + vm_pmap_enter_retried++; + } + } + return kr; +} - pmap_lock_phys_page(phys_page); +/* + * Enter the given page into the pmap. + * The vm map must be locked shared. + * The vm object must be locked exclusive, unless this is a soft fault. + * For a soft fault, the object must be locked shared or exclusive. + * + * @param need_retry if not null, avoid making a (potentially) blocking call into + * the pmap layer. When such a call would be necessary, return true in this boolean instead. + */ +static kern_return_t +vm_fault_pmap_enter_with_object_lock( + vm_object_t object, + pmap_t pmap, + vm_map_offset_t vaddr, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_page_t m, + vm_prot_t *prot, + vm_prot_t caller_prot, + vm_prot_t fault_type, + bool wired, + int pmap_options, + boolean_t *need_retry) +{ + kern_return_t kr; + /* + * Prevent a deadlock by not + * holding the object lock if we need to wait for a page in + * pmap_enter() - + */ + kr = vm_fault_attempt_pmap_enter(pmap, vaddr, + fault_page_size, fault_phys_offset, + m, prot, caller_prot, fault_type, wired, pmap_options | PMAP_OPTIONS_NOWAIT); +#if __x86_64__ + if (kr == KERN_INVALID_ARGUMENT && + pmap == PMAP_NULL && + wired) { + /* + * Wiring a page in a pmap-less VM map: + * VMware's "vmmon" kernel extension does this + * to grab pages. + * Let it proceed even though the PMAP_ENTER() failed. + */ + kr = KERN_SUCCESS; + } +#endif /* __x86_64__ */ + + if (kr == KERN_RESOURCE_SHORTAGE) { + if (need_retry) { /* - * go ahead and take the opportunity - * to set 'pmapped' here so that we don't - * need to grab this lock a 2nd time - * just below + * this will be non-null in the case where we hold the lock + * on the top-object in this chain... we can't just drop + * the lock on the object we're inserting the page into + * and recall the PMAP_ENTER since we can still cause + * a deadlock if one of the critical paths tries to + * acquire the lock on the top-object and we're blocked + * in PMAP_ENTER waiting for memory... our only recourse + * is to deal with it at a higher level where we can + * drop both locks. */ - m->vmp_pmapped = TRUE; + *need_retry = TRUE; + vm_pmap_enter_retried++; + goto done; + } + /* + * The nonblocking version of pmap_enter did not succeed. + * and we don't need to drop other locks and retry + * at the level above us, so + * use the blocking version instead. Requires marking + * the page busy and unlocking the object + */ + boolean_t was_busy = m->vmp_busy; - if (!m->vmp_xpmapped) { - m->vmp_xpmapped = TRUE; + vm_object_lock_assert_exclusive(object); - pmap_unlock_phys_page(phys_page); + m->vmp_busy = TRUE; + vm_object_unlock(object); - if (!object->internal) { - OSAddAtomic(1, &vm_page_xpmapped_external_count); - } + PMAP_ENTER_OPTIONS(pmap, vaddr, + fault_phys_offset, + m, *prot, fault_type, + 0, wired, + pmap_options, kr); -#if defined(__arm__) || defined(__arm64__) - pmap_sync_page_data_phys(phys_page); -#else - if (object->internal && - object->pager != NULL) { - /* - * This page could have been - * uncompressed by the - * compressor pager and its - * contents might be only in - * the data cache. - * Since it's being mapped for - * "execute" for the fist time, - * make sure the icache is in - * sync. - */ - assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); - pmap_sync_page_data_phys(phys_page); - } + assert(VM_PAGE_OBJECT(m) == object); + + /* Take the object lock again. */ + vm_object_lock(object); + + /* If the page was busy, someone else will wake it up. + * Otherwise, we have to do it now. */ + assert(m->vmp_busy); + if (!was_busy) { + PAGE_WAKEUP_DONE(m); + } + vm_pmap_enter_blocked++; + } + +done: + return kr; +} + +/* + * Prepare to enter a page into the pmap by checking CS, protection bits, + * and setting mapped bits on the page_t. + * Does not modify the page's paging queue. + * + * page queue lock must NOT be held + * m->vmp_object must be locked + * + * NOTE: m->vmp_object could be locked "shared" only if we are called + * from vm_fault() as part of a soft fault. + */ +static kern_return_t +vm_fault_enter_prepare( + vm_page_t m, + pmap_t pmap, + vm_map_offset_t vaddr, + vm_prot_t *prot, + vm_prot_t caller_prot, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + boolean_t change_wiring, + vm_prot_t fault_type, + vm_object_fault_info_t fault_info, + int *type_of_fault, + bool *page_needs_data_sync) +{ + kern_return_t kr; + bool is_tainted = false; + vm_object_t object; + boolean_t cs_bypass = fault_info->cs_bypass; + + object = VM_PAGE_OBJECT(m); + + vm_object_lock_assert_held(object); + +#if KASAN + if (pmap == kernel_pmap) { + kasan_notify_address(vaddr, PAGE_SIZE); + } #endif - } else { - pmap_unlock_phys_page(phys_page); - } +#if PMAP_CS + if (pmap_cs_exempt(pmap)) { + cs_bypass = TRUE; + } +#endif + + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED); + + if (*type_of_fault == DBG_ZERO_FILL_FAULT) { + vm_object_lock_assert_exclusive(object); + } else if ((fault_type & VM_PROT_WRITE) == 0 && + !change_wiring && + (!m->vmp_wpmapped +#if VM_OBJECT_ACCESS_TRACKING + || object->access_tracking +#endif /* VM_OBJECT_ACCESS_TRACKING */ + )) { + /* + * This is not a "write" fault, so we + * might not have taken the object lock + * exclusively and we might not be able + * to update the "wpmapped" bit in + * vm_fault_enter(). + * Let's just grant read access to + * the page for now and we'll + * soft-fault again if we need write + * access later... + */ + + /* This had better not be a JIT page. */ + if (!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) { + *prot &= ~VM_PROT_WRITE; } else { - if (m->vmp_pmapped == FALSE) { - ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); + assert(cs_bypass); + } + } + if (m->vmp_pmapped == FALSE) { + if (m->vmp_clustered) { + if (*type_of_fault == DBG_CACHE_HIT_FAULT) { + /* + * found it in the cache, but this + * is the first fault-in of the page (m->vmp_pmapped == FALSE) + * so it must have come in as part of + * a cluster... account 1 pagein against it + */ + if (object->internal) { + *type_of_fault = DBG_PAGEIND_FAULT; + } else { + *type_of_fault = DBG_PAGEINV_FAULT; + } - pmap_lock_phys_page(phys_page); - m->vmp_pmapped = TRUE; - pmap_unlock_phys_page(phys_page); + VM_PAGE_COUNT_AS_PAGEIN(m); } + VM_PAGE_CONSUME_CLUSTERED(m); } + } + + if (*type_of_fault != DBG_COW_FAULT) { + DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL); + + if (pmap == kernel_pmap) { + DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL); + } + } + + kr = vm_fault_validate_cs(cs_bypass, object, m, pmap, vaddr, + *prot, caller_prot, fault_page_size, fault_phys_offset, + fault_info, &is_tainted); + if (kr == KERN_SUCCESS) { + /* + * We either have a good page, or a tainted page that has been accepted by the process. + * In both cases the page will be entered into the pmap. + */ + *page_needs_data_sync = vm_fault_enter_set_mapped(object, m, *prot, fault_type); + if ((fault_type & VM_PROT_WRITE) && is_tainted) { + /* + * This page is tainted but we're inserting it anyways. + * Since it's writeable, we need to disconnect it from other pmaps + * now so those processes can take note. + */ - if (fault_type & VM_PROT_WRITE) { - if (m->vmp_wpmapped == FALSE) { - vm_object_lock_assert_exclusive(object); - if (!object->internal && object->pager) { - task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager)); - } - m->vmp_wpmapped = TRUE; - } - if (must_disconnect) { - /* - * We can only get here - * because of the CSE logic - */ - assert(cs_enforcement_enabled); - pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); - /* - * If we are faulting for a write, we can clear - * the execute bit - that will ensure the page is - * checked again before being executable, which - * protects against a map switch. - * This only happens the first time the page - * gets tainted, so we won't get stuck here - * to make an already writeable page executable. - */ - if (!cs_bypass) { - assert(!pmap_has_prot_policy(prot)); - prot &= ~VM_PROT_EXECUTE; - } + /* + * We can only get here + * because of the CSE logic + */ + assert(pmap_get_vm_map_cs_enforced(pmap)); + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); + /* + * If we are faulting for a write, we can clear + * the execute bit - that will ensure the page is + * checked again before being executable, which + * protects against a map switch. + * This only happens the first time the page + * gets tainted, so we won't get stuck here + * to make an already writeable page executable. + */ + if (!cs_bypass) { + assert(!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)); + *prot &= ~VM_PROT_EXECUTE; } } assert(VM_PAGE_OBJECT(m) == object); @@ -3239,95 +3655,66 @@ MACRO_END } } #endif /* VM_OBJECT_ACCESS_TRACKING */ + } + return kr; +} -#if PMAP_CS -pmap_enter_retry: -#endif - /* Prevent a deadlock by not - * holding the object lock if we need to wait for a page in - * pmap_enter() - */ - PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, 0, - wired, - pmap_options | PMAP_OPTIONS_NOWAIT, - pe_result); -#if PMAP_CS - /* - * Retry without execute permission if we encountered a codesigning - * failure on a non-execute fault. This allows applications which - * don't actually need to execute code to still map it for read access. - */ - if ((pe_result == KERN_CODESIGN_ERROR) && pmap_cs_enforced(pmap) && - (prot & VM_PROT_EXECUTE) && !(caller_prot & VM_PROT_EXECUTE)) { - prot &= ~VM_PROT_EXECUTE; - goto pmap_enter_retry; - } -#endif -#if __x86_64__ - if (pe_result == KERN_INVALID_ARGUMENT && - pmap == PMAP_NULL && - wired) { - /* - * Wiring a page in a pmap-less VM map: - * VMware's "vmmon" kernel extension does this - * to grab pages. - * Let it proceed even though the PMAP_ENTER() failed. - */ - pe_result = KERN_SUCCESS; - } -#endif /* __x86_64__ */ - - if (pe_result == KERN_RESOURCE_SHORTAGE) { - if (need_retry) { - /* - * this will be non-null in the case where we hold the lock - * on the top-object in this chain... we can't just drop - * the lock on the object we're inserting the page into - * and recall the PMAP_ENTER since we can still cause - * a deadlock if one of the critical paths tries to - * acquire the lock on the top-object and we're blocked - * in PMAP_ENTER waiting for memory... our only recourse - * is to deal with it at a higher level where we can - * drop both locks. - */ - *need_retry = TRUE; - vm_pmap_enter_retried++; - goto after_the_pmap_enter; - } - /* The nonblocking version of pmap_enter did not succeed. - * and we don't need to drop other locks and retry - * at the level above us, so - * use the blocking version instead. Requires marking - * the page busy and unlocking the object */ - boolean_t was_busy = m->vmp_busy; - - vm_object_lock_assert_exclusive(object); +/* + * page queue lock must NOT be held + * m->vmp_object must be locked + * + * NOTE: m->vmp_object could be locked "shared" only if we are called + * from vm_fault() as part of a soft fault. If so, we must be + * careful not to modify the VM object in any way that is not + * legal under a shared lock... + */ +kern_return_t +vm_fault_enter( + vm_page_t m, + pmap_t pmap, + vm_map_offset_t vaddr, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_prot_t prot, + vm_prot_t caller_prot, + boolean_t wired, + boolean_t change_wiring, + vm_tag_t wire_tag, + vm_object_fault_info_t fault_info, + boolean_t *need_retry, + int *type_of_fault) +{ + kern_return_t kr; + vm_object_t object; + bool page_needs_data_sync; + vm_prot_t fault_type; + int pmap_options = fault_info->pmap_options; - m->vmp_busy = TRUE; - vm_object_unlock(object); + if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { + assert(m->vmp_fictitious); + return KERN_SUCCESS; + } - PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, - 0, wired, - pmap_options, pe_result); + fault_type = change_wiring ? VM_PROT_NONE : caller_prot; - assert(VM_PAGE_OBJECT(m) == object); + kr = vm_fault_enter_prepare(m, pmap, vaddr, &prot, caller_prot, + fault_page_size, fault_phys_offset, change_wiring, fault_type, + fault_info, type_of_fault, &page_needs_data_sync); + object = VM_PAGE_OBJECT(m); - /* Take the object lock again. */ - vm_object_lock(object); + vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info->no_cache, type_of_fault, kr); - /* If the page was busy, someone else will wake it up. - * Otherwise, we have to do it now. */ - assert(m->vmp_busy); - if (!was_busy) { - PAGE_WAKEUP_DONE(m); - } - vm_pmap_enter_blocked++; + if (kr == KERN_SUCCESS) { + if (page_needs_data_sync) { + pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m)); } - kr = pe_result; + kr = vm_fault_pmap_enter_with_object_lock(object, pmap, vaddr, + fault_page_size, fault_phys_offset, m, + &prot, caller_prot, fault_type, wired, pmap_options, need_retry); } -after_the_pmap_enter: return kr; } @@ -3361,7 +3748,6 @@ vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot) * and deallocated when leaving vm_fault. */ -extern int _map_enter_debug; extern uint64_t get_current_unique_pid(void); unsigned long vm_fault_collapse_total = 0; @@ -3378,7 +3764,8 @@ vm_fault_external( pmap_t caller_pmap, vm_map_offset_t caller_pmap_addr) { - return vm_fault_internal(map, vaddr, fault_type, change_wiring, vm_tag_bt(), + return vm_fault_internal(map, vaddr, fault_type, change_wiring, + change_wiring ? vm_tag_bt() : VM_KERN_MEMORY_NONE, interruptible, caller_pmap, caller_pmap_addr, NULL); } @@ -3407,6 +3794,145 @@ current_proc_is_privileged(void) uint64_t vm_copied_on_read = 0; +/* + * Cleanup after a vm_fault_enter. + * At this point, the fault should either have failed (kr != KERN_SUCCESS) + * or the page should be in the pmap and on the correct paging queue. + * + * Precondition: + * map must be locked shared. + * m_object must be locked. + * If top_object != VM_OBJECT_NULL, it must be locked. + * real_map must be locked. + * + * Postcondition: + * map will be unlocked + * m_object will be unlocked + * top_object will be unlocked + * If real_map != map, it will be unlocked + */ +static void +vm_fault_complete( + vm_map_t map, + vm_map_t real_map, + vm_object_t object, + vm_object_t m_object, + vm_page_t m, + vm_map_offset_t offset, + vm_map_offset_t trace_real_vaddr, + vm_object_fault_info_t fault_info, + vm_prot_t caller_prot, +#if CONFIG_DTRACE + vm_map_offset_t real_vaddr, +#else + __unused vm_map_offset_t real_vaddr, +#endif /* CONFIG_DTRACE */ + int type_of_fault, + boolean_t need_retry, + kern_return_t kr, + ppnum_t *physpage_p, + vm_prot_t prot, + vm_object_t top_object, + boolean_t need_collapse, + vm_map_offset_t cur_offset, + vm_prot_t fault_type, + vm_object_t *written_on_object, + memory_object_t *written_on_pager, + vm_object_offset_t *written_on_offset) +{ + int event_code = 0; + vm_map_lock_assert_shared(map); + vm_object_lock_assert_held(m_object); + if (top_object != VM_OBJECT_NULL) { + vm_object_lock_assert_held(top_object); + } + vm_map_lock_assert_held(real_map); + + if (m_object->internal) { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL)); + } else if (m_object->object_is_shared_cache) { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE)); + } else { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); + } + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0); + if (need_retry == FALSE) { + KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid(), 0, 0, 0, 0); + } + DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag); + if (kr == KERN_SUCCESS && + physpage_p != NULL) { + /* for vm_map_wire_and_extract() */ + *physpage_p = VM_PAGE_GET_PHYS_PAGE(m); + if (prot & VM_PROT_WRITE) { + vm_object_lock_assert_exclusive(m_object); + m->vmp_dirty = TRUE; + } + } + + if (top_object != VM_OBJECT_NULL) { + /* + * It's safe to drop the top object + * now that we've done our + * vm_fault_enter(). Any other fault + * in progress for that virtual + * address will either find our page + * and translation or put in a new page + * and translation. + */ + vm_object_unlock(top_object); + top_object = VM_OBJECT_NULL; + } + + if (need_collapse == TRUE) { + vm_object_collapse(object, vm_object_trunc_page(offset), TRUE); + } + + if (need_retry == FALSE && + (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) { + /* + * evaluate access pattern and update state + * vm_fault_deactivate_behind depends on the + * state being up to date + */ + vm_fault_is_sequential(m_object, cur_offset, fault_info->behavior); + + vm_fault_deactivate_behind(m_object, cur_offset, fault_info->behavior); + } + /* + * That's it, clean up and return. + */ + if (m->vmp_busy) { + vm_object_lock_assert_exclusive(m_object); + PAGE_WAKEUP_DONE(m); + } + + if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) { + vm_object_paging_begin(m_object); + + assert(*written_on_object == VM_OBJECT_NULL); + *written_on_object = m_object; + *written_on_pager = m_object->pager; + *written_on_offset = m_object->paging_offset + m->vmp_offset; + } + vm_object_unlock(object); + + vm_map_unlock_read(map); + if (real_map != map) { + vm_map_unlock(real_map); + } +} + +static inline int +vm_fault_type_for_tracing(boolean_t need_copy_on_read, int type_of_fault) +{ + if (need_copy_on_read && type_of_fault == DBG_COW_FAULT) { + return DBG_COR_FAULT; + } + return type_of_fault; +} + kern_return_t vm_fault_internal( vm_map_t map, @@ -3441,34 +3967,59 @@ vm_fault_internal( wait_interrupt_t interruptible_state; vm_map_t real_map = map; vm_map_t original_map = map; - boolean_t object_locks_dropped = FALSE; + bool object_locks_dropped = FALSE; vm_prot_t fault_type; vm_prot_t original_fault_type; struct vm_object_fault_info fault_info = {}; - boolean_t need_collapse = FALSE; + bool need_collapse = FALSE; boolean_t need_retry = FALSE; boolean_t *need_retry_ptr = NULL; - int object_lock_type = 0; - int cur_object_lock_type; + uint8_t object_lock_type = 0; + uint8_t cur_object_lock_type; vm_object_t top_object = VM_OBJECT_NULL; vm_object_t written_on_object = VM_OBJECT_NULL; memory_object_t written_on_pager = NULL; vm_object_offset_t written_on_offset = 0; int throttle_delay; int compressed_count_delta; - int grab_options; - boolean_t need_copy; - boolean_t need_copy_on_read; + uint8_t grab_options; + bool need_copy; + bool need_copy_on_read; vm_map_offset_t trace_vaddr; vm_map_offset_t trace_real_vaddr; + vm_map_size_t fault_page_size; + vm_map_size_t fault_page_mask; + vm_map_offset_t fault_phys_offset; vm_map_offset_t real_vaddr; - boolean_t resilient_media_retry = FALSE; + bool resilient_media_retry = FALSE; vm_object_t resilient_media_object = VM_OBJECT_NULL; vm_object_offset_t resilient_media_offset = (vm_object_offset_t)-1; + bool page_needs_data_sync = false; + /* + * Was the VM object contended when vm_map_lookup_locked locked it? + * If so, the zero fill path will drop the lock + * NB: Ideally we would always drop the lock rather than rely on + * this heuristic, but vm_object_unlock currently takes > 30 cycles. + */ + bool object_is_contended = false; real_vaddr = vaddr; trace_real_vaddr = vaddr; - vaddr = vm_map_trunc_page(vaddr, PAGE_MASK); + + if (VM_MAP_PAGE_SIZE(original_map) < PAGE_SIZE) { + fault_phys_offset = (vm_map_offset_t)-1; + fault_page_size = VM_MAP_PAGE_SIZE(original_map); + fault_page_mask = VM_MAP_PAGE_MASK(original_map); + if (fault_page_size < PAGE_SIZE) { + DEBUG4K_FAULT("map %p vaddr 0x%llx caller_prot 0x%x\n", map, (uint64_t)trace_real_vaddr, caller_prot); + vaddr = vm_map_trunc_page(vaddr, fault_page_mask); + } + } else { + fault_phys_offset = 0; + fault_page_size = PAGE_SIZE; + fault_page_mask = PAGE_MASK; + vaddr = vm_map_trunc_page(vaddr, PAGE_MASK); + } if (map == kernel_map) { trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr); @@ -3498,7 +4049,7 @@ vm_fault_internal( } thread_t cthread = current_thread(); - boolean_t rtfault = (cthread->sched_mode == TH_MODE_REALTIME); + bool rtfault = (cthread->sched_mode == TH_MODE_REALTIME); uint64_t fstart = 0; if (rtfault) { @@ -3518,7 +4069,7 @@ vm_fault_internal( need_copy = TRUE; } - if (need_copy) { + if (need_copy || change_wiring) { object_lock_type = OBJECT_LOCK_EXCLUSIVE; } else { object_lock_type = OBJECT_LOCK_SHARED; @@ -3568,12 +4119,15 @@ RetryFault: object_lock_type, &version, &object, &offset, &prot, &wired, &fault_info, - &real_map); + &real_map, + &object_is_contended); if (kr != KERN_SUCCESS) { vm_map_unlock_read(map); goto done; } + + pmap = real_map->pmap; fault_info.interruptible = interruptible; fault_info.stealth = FALSE; @@ -3698,6 +4252,8 @@ RetryFault: } #endif + fault_phys_offset = (vm_map_offset_t)offset - vm_map_trunc_page((vm_map_offset_t)offset, PAGE_MASK); + /* * If this page is to be inserted in a copy delay object * for writing, and if the object has a copy, then the @@ -3732,7 +4288,7 @@ RetryFault: break; } - m = vm_page_lookup(cur_object, cur_offset); + m = vm_page_lookup(cur_object, vm_object_trunc_page(cur_offset)); m_object = NULL; if (m != VM_PAGE_NULL) { @@ -3896,7 +4452,8 @@ reclaimed_from_pageout: } assert(m_object == VM_PAGE_OBJECT(m)); - if (VM_FAULT_NEED_CS_VALIDATION(map->pmap, m, m_object) || + if (vm_fault_cs_need_validation(map->pmap, m, m_object, + PAGE_SIZE, 0) || (physpage_p != NULL && (prot & VM_PROT_WRITE))) { upgrade_lock_and_retry: /* @@ -3956,7 +4513,7 @@ upgrade_lock_and_retry: vm_protect_privileged_from_untrusted && !((prot & VM_PROT_EXECUTE) && cur_object->code_signed && - cs_process_enforcement(NULL)) && + pmap_get_vm_map_cs_enforced(caller_pmap ? caller_pmap : pmap)) && current_proc_is_privileged()) { /* * We're faulting on a page in "object" and @@ -4000,7 +4557,7 @@ upgrade_lock_and_retry: } if (!(fault_type & VM_PROT_WRITE) && !need_copy) { - if (!pmap_has_prot_policy(prot)) { + if (!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) { prot &= ~VM_PROT_WRITE; } else { /* @@ -4059,10 +4616,22 @@ FastPmapEnter: need_retry_ptr = NULL; } + if (fault_page_size < PAGE_SIZE) { + DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx caller pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, caller_pmap, (uint64_t)caller_pmap_addr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot); + assertf((!(fault_phys_offset & FOURK_PAGE_MASK) && + fault_phys_offset < PAGE_SIZE), + "0x%llx\n", (uint64_t)fault_phys_offset); + } else { + assertf(fault_phys_offset == 0, + "0x%llx\n", (uint64_t)fault_phys_offset); + } + if (caller_pmap) { kr = vm_fault_enter(m, caller_pmap, caller_pmap_addr, + fault_page_size, + fault_phys_offset, prot, caller_prot, wired, @@ -4075,6 +4644,8 @@ FastPmapEnter: kr = vm_fault_enter(m, pmap, vaddr, + fault_page_size, + fault_phys_offset, prot, caller_prot, wired, @@ -4084,84 +4655,31 @@ FastPmapEnter: need_retry_ptr, &type_of_fault); } - { - int event_code = 0; - - if (m_object->internal) { - event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL)); - } else if (m_object->object_is_shared_cache) { - event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE)); - } else { - event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); - } - - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0); - if (need_retry == FALSE) { - KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid(), 0, 0, 0, 0); - } - DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag); - } - if (kr == KERN_SUCCESS && - physpage_p != NULL) { - /* for vm_map_wire_and_extract() */ - *physpage_p = VM_PAGE_GET_PHYS_PAGE(m); - if (prot & VM_PROT_WRITE) { - vm_object_lock_assert_exclusive(m_object); - m->vmp_dirty = TRUE; - } - } - - if (top_object != VM_OBJECT_NULL) { - /* - * It's safe to drop the top object - * now that we've done our - * vm_fault_enter(). Any other fault - * in progress for that virtual - * address will either find our page - * and translation or put in a new page - * and translation. - */ - vm_object_unlock(top_object); - top_object = VM_OBJECT_NULL; - } - - if (need_collapse == TRUE) { - vm_object_collapse(object, offset, TRUE); - } - - if (need_retry == FALSE && - (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) { - /* - * evaluate access pattern and update state - * vm_fault_deactivate_behind depends on the - * state being up to date - */ - vm_fault_is_sequential(m_object, cur_offset, fault_info.behavior); - - vm_fault_deactivate_behind(m_object, cur_offset, fault_info.behavior); - } - /* - * That's it, clean up and return. - */ - if (m->vmp_busy) { - PAGE_WAKEUP_DONE(m); - } - - if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) { - vm_object_paging_begin(m_object); - - assert(written_on_object == VM_OBJECT_NULL); - written_on_object = m_object; - written_on_pager = m_object->pager; - written_on_offset = m_object->paging_offset + m->vmp_offset; - } - vm_object_unlock(object); - - vm_map_unlock_read(map); - if (real_map != map) { - vm_map_unlock(real_map); - } + vm_fault_complete( + map, + real_map, + object, + m_object, + m, + offset, + trace_real_vaddr, + &fault_info, + caller_prot, + real_vaddr, + vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), + need_retry, + kr, + physpage_p, + prot, + top_object, + need_collapse, + cur_offset, + fault_type, + &written_on_object, + &written_on_pager, + &written_on_offset); + top_object = VM_OBJECT_NULL; if (need_retry == TRUE) { /* * vm_fault_enter couldn't complete the PMAP_ENTER... @@ -4208,7 +4726,8 @@ FastPmapEnter: assert(m_object == VM_PAGE_OBJECT(m)); if ((cur_object_lock_type == OBJECT_LOCK_SHARED) && - VM_FAULT_NEED_CS_VALIDATION(NULL, m, m_object)) { + vm_fault_cs_need_validation(NULL, m, m_object, + PAGE_SIZE, 0)) { goto upgrade_lock_and_retry; } @@ -4242,7 +4761,10 @@ FastPmapEnter: * the page copy. */ vm_page_copy(cur_m, m); - vm_page_insert(m, object, offset); + vm_page_insert(m, object, vm_object_trunc_page(offset)); + if (VM_MAP_PAGE_MASK(map) != PAGE_MASK) { + DEBUG4K_FAULT("map %p vaddr 0x%llx page %p [%p 0x%llx] copied to %p [%p 0x%llx]\n", map, (uint64_t)vaddr, cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, m, VM_PAGE_OBJECT(m), m->vmp_offset); + } m_object = object; SET_PAGE_DIRTY(m, FALSE); @@ -4251,6 +4773,40 @@ FastPmapEnter: */ if (object->ref_count > 1 && cur_m->vmp_pmapped) { pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m)); + } else if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) { + /* + * We've copied the full 16K page but we're + * about to call vm_fault_enter() only for + * the 4K chunk we're faulting on. The other + * three 4K chunks in that page could still + * be pmapped in this pmap. + * Since the VM object layer thinks that the + * entire page has been dealt with and the + * original page might no longer be needed, + * it might collapse/bypass the original VM + * object and free its pages, which would be + * bad (and would trigger pmap_verify_free() + * assertions) if the other 4K chunks are still + * pmapped. + */ + /* + * XXX FBDP TODO4K: to be revisisted + * Technically, we need to pmap_disconnect() + * only the target pmap's mappings for the 4K + * chunks of this 16K VM page. If other pmaps + * have PTEs on these chunks, that means that + * the associated VM map must have a reference + * on the VM object, so no need to worry about + * those. + * pmap_protect() for each 4K chunk would be + * better but we'd have to check which chunks + * are actually mapped before and after this + * one. + * A full-blown pmap_disconnect() is easier + * for now but not efficient. + */ + DEBUG4K_FAULT("pmap_disconnect() page %p object %p offset 0x%llx phys 0x%x\n", cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, VM_PAGE_GET_PHYS_PAGE(cur_m)); + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m)); } if (cur_m->vmp_clustered) { @@ -4303,12 +4859,12 @@ FastPmapEnter: * No page at cur_object, cur_offset... m == NULL */ if (cur_object->pager_created) { - int compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN; + vm_external_state_t compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN; if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) { int my_fault_type; - int c_flags = C_DONT_BLOCK; - boolean_t insert_cur_object = FALSE; + uint8_t c_flags = C_DONT_BLOCK; + bool insert_cur_object = FALSE; /* * May have to talk to a pager... @@ -4414,7 +4970,7 @@ FastPmapEnter: * so no need to take a * "paging_in_progress" reference. */ - boolean_t shared_lock; + bool shared_lock; if ((object == cur_object && object_lock_type == OBJECT_LOCK_EXCLUSIVE) || (object != cur_object && @@ -4426,8 +4982,8 @@ FastPmapEnter: kr = vm_compressor_pager_get( cur_object->pager, - (cur_offset + - cur_object->paging_offset), + (vm_object_trunc_page(cur_offset) + + cur_object->paging_offset), VM_PAGE_GET_PHYS_PAGE(m), &my_fault_type, c_flags, @@ -4442,6 +4998,24 @@ FastPmapEnter: if (kr != KERN_SUCCESS) { vm_page_release(m, FALSE); m = VM_PAGE_NULL; + } + /* + * If vm_compressor_pager_get() returns + * KERN_MEMORY_FAILURE, then the + * compressed data is permanently lost, + * so return this error immediately. + */ + if (kr == KERN_MEMORY_FAILURE) { + if (object != cur_object) { + vm_object_unlock(cur_object); + } + vm_object_unlock(object); + vm_map_unlock_read(map); + if (real_map != map) { + vm_map_unlock(real_map); + } + goto done; + } else if (kr != KERN_SUCCESS) { break; } m->vmp_dirty = TRUE; @@ -4496,10 +5070,10 @@ FastPmapEnter: } if (insert_cur_object) { - vm_page_insert(m, cur_object, cur_offset); + vm_page_insert(m, cur_object, vm_object_trunc_page(cur_offset)); m_object = cur_object; } else { - vm_page_insert(m, object, offset); + vm_page_insert(m, object, vm_object_trunc_page(offset)); m_object = object; } @@ -4587,7 +5161,7 @@ FastPmapEnter: if (!object->internal) { panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__, __LINE__, (uint64_t)offset, object); } - m = vm_page_alloc(object, offset); + m = vm_page_alloc(object, vm_object_trunc_page(offset)); m_object = NULL; if (m == VM_PAGE_NULL) { @@ -4600,23 +5174,161 @@ FastPmapEnter: m_object = object; /* - * Now zero fill page... - * the page is probably going to - * be written soon, so don't bother - * to clear the modified bit + * Zeroing the page and entering into it into the pmap + * represents a significant amount of the zero fill fault handler's work. * - * NOTE: This code holds the map - * lock across the zero fill. + * To improve fault scalability, we'll drop the object lock, if it appears contended, + * now that we've inserted the page into the vm object. + * Before dropping the lock, we need to check protection bits and set the + * mapped bits on the page. Then we can mark the page busy, drop the lock, + * zero it, and do the pmap enter. We'll need to reacquire the lock + * to clear the busy bit and wake up any waiters. */ - type_of_fault = vm_fault_zero_page(m, map->no_zero_fill); + vm_fault_cs_clear(m); + m->vmp_pmapped = TRUE; + if (map->no_zero_fill) { + type_of_fault = DBG_NZF_PAGE_FAULT; + } else { + type_of_fault = DBG_ZERO_FILL_FAULT; + } + { + pmap_t destination_pmap; + vm_map_offset_t destination_pmap_vaddr; + vm_prot_t enter_fault_type; + if (caller_pmap) { + destination_pmap = caller_pmap; + destination_pmap_vaddr = caller_pmap_addr; + } else { + destination_pmap = pmap; + destination_pmap_vaddr = vaddr; + } + if (change_wiring) { + enter_fault_type = VM_PROT_NONE; + } else { + enter_fault_type = caller_prot; + } + kr = vm_fault_enter_prepare(m, + destination_pmap, + destination_pmap_vaddr, + &prot, + caller_prot, + fault_page_size, + fault_phys_offset, + change_wiring, + enter_fault_type, + &fault_info, + &type_of_fault, + &page_needs_data_sync); + if (kr != KERN_SUCCESS) { + goto zero_fill_cleanup; + } - goto FastPmapEnter; + if (object_is_contended) { + /* + * At this point the page is in the vm object, but not on a paging queue. + * Since it's accessible to another thread but its contents are invalid + * (it hasn't been zeroed) mark it busy before dropping the object lock. + */ + m->vmp_busy = TRUE; + vm_object_unlock(object); + } + if (type_of_fault == DBG_ZERO_FILL_FAULT) { + /* + * Now zero fill page... + * the page is probably going to + * be written soon, so don't bother + * to clear the modified bit + * + * NOTE: This code holds the map + * lock across the zero fill. + */ + vm_page_zero_fill(m); + VM_STAT_INCR(zero_fill_count); + DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL); + } + if (page_needs_data_sync) { + pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m)); + } + + if (top_object != VM_OBJECT_NULL) { + need_retry_ptr = &need_retry; + } else { + need_retry_ptr = NULL; + } + if (object_is_contended) { + kr = vm_fault_pmap_enter(destination_pmap, destination_pmap_vaddr, + fault_page_size, fault_phys_offset, + m, &prot, caller_prot, enter_fault_type, wired, + fault_info.pmap_options, need_retry_ptr); + vm_object_lock(object); + } else { + kr = vm_fault_pmap_enter_with_object_lock(object, destination_pmap, destination_pmap_vaddr, + fault_page_size, fault_phys_offset, + m, &prot, caller_prot, enter_fault_type, wired, + fault_info.pmap_options, need_retry_ptr); + } + } +zero_fill_cleanup: + if (!VM_DYNAMIC_PAGING_ENABLED() && + (object->purgable == VM_PURGABLE_DENY || + object->purgable == VM_PURGABLE_NONVOLATILE || + object->purgable == VM_PURGABLE_VOLATILE)) { + vm_page_lockspin_queues(); + if (!VM_DYNAMIC_PAGING_ENABLED()) { + vm_fault_enqueue_throttled_locked(m); + } + vm_page_unlock_queues(); + } + vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info.no_cache, &type_of_fault, kr); + + vm_fault_complete( + map, + real_map, + object, + m_object, + m, + offset, + trace_real_vaddr, + &fault_info, + caller_prot, + real_vaddr, + type_of_fault, + need_retry, + kr, + physpage_p, + prot, + top_object, + need_collapse, + cur_offset, + fault_type, + &written_on_object, + &written_on_pager, + &written_on_offset); + top_object = VM_OBJECT_NULL; + if (need_retry == TRUE) { + /* + * vm_fault_enter couldn't complete the PMAP_ENTER... + * at this point we don't hold any locks so it's safe + * to ask the pmap layer to expand the page table to + * accommodate this mapping... once expanded, we'll + * re-drive the fault which should result in vm_fault_enter + * being able to successfully enter the mapping this time around + */ + (void)pmap_enter_options( + pmap, vaddr, 0, 0, 0, 0, 0, + PMAP_OPTIONS_NOENTER, NULL); + + need_retry = FALSE; + goto RetryFault; + } + goto done; } /* * On to the next level in the shadow chain */ cur_offset += cur_object->vo_shadow_offset; new_object = cur_object->shadow; + fault_phys_offset = cur_offset - vm_object_trunc_page(cur_offset); /* * take the new_object's lock with the indicated state @@ -4784,9 +5496,7 @@ handle_copy_delay: THREAD_ABORTSAFE)) { goto RetryFault; } - /* - * fall thru - */ + OS_FALLTHROUGH; case VM_FAULT_INTERRUPTED: kr = KERN_ABORTED; goto done; @@ -4889,7 +5599,8 @@ handle_copy_delay: &retry_object, &retry_offset, &retry_prot, &wired, &fault_info, - &real_map); + &real_map, + NULL); pmap = real_map->pmap; if (kr != KERN_SUCCESS) { @@ -4963,7 +5674,7 @@ handle_copy_delay: * Check whether the protection has changed or the object * has been copied while we left the map unlocked. */ - if (pmap_has_prot_policy(retry_prot)) { + if (pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, retry_prot)) { /* If the pmap layer cares, pass the full set. */ prot = retry_prot; } else { @@ -4980,7 +5691,7 @@ handle_copy_delay: * The copy object changed while the top-level object * was unlocked, so take away write permission. */ - assert(!pmap_has_prot_policy(prot)); + assert(!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)); prot &= ~VM_PROT_WRITE; } } else { @@ -4998,7 +5709,7 @@ handle_copy_delay: vm_protect_privileged_from_untrusted && !((prot & VM_PROT_EXECUTE) && VM_PAGE_OBJECT(m)->code_signed && - cs_process_enforcement(NULL)) && + pmap_get_vm_map_cs_enforced(caller_pmap ? caller_pmap : pmap)) && current_proc_is_privileged()) { /* * We found the page we want in an "untrusted" VM object @@ -5058,10 +5769,21 @@ handle_copy_delay: * the pageout queues. If the pageout daemon comes * across the page, it will remove it from the queues. */ + if (fault_page_size < PAGE_SIZE) { + DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx pa 0x%llx(0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot); + assertf((!(fault_phys_offset & FOURK_PAGE_MASK) && + fault_phys_offset < PAGE_SIZE), + "0x%llx\n", (uint64_t)fault_phys_offset); + } else { + assertf(fault_phys_offset == 0, + "0x%llx\n", (uint64_t)fault_phys_offset); + } if (caller_pmap) { kr = vm_fault_enter(m, caller_pmap, caller_pmap_addr, + fault_page_size, + fault_phys_offset, prot, caller_prot, wired, @@ -5074,6 +5796,8 @@ handle_copy_delay: kr = vm_fault_enter(m, pmap, vaddr, + fault_page_size, + fault_phys_offset, prot, caller_prot, wired, @@ -5096,7 +5820,7 @@ handle_copy_delay: event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); } - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), m->vmp_offset, get_current_unique_pid(), 0); KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_SLOW), get_current_unique_pid(), 0, 0, 0, 0); DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag); @@ -5173,7 +5897,7 @@ handle_copy_delay: if (vm_map_lookup_entry(map, laddr, &entry) && (VME_OBJECT(entry) != NULL) && (VME_OBJECT(entry) == object)) { - int superpage; + uint16_t superpage; if (!object->pager_created && object->phys_contiguous && @@ -5324,9 +6048,13 @@ done: ((uint64_t)trace_vaddr >> 32), trace_vaddr, kr, - type_of_fault, + vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), 0); + if (fault_page_size < PAGE_SIZE && kr != KERN_SUCCESS) { + DEBUG4K_FAULT("map %p original %p vaddr 0x%llx -> 0x%x\n", map, original_map, (uint64_t)trace_real_vaddr, kr); + } + return kr; } @@ -5348,6 +6076,7 @@ vm_fault_wire( vm_map_offset_t va; vm_map_offset_t end_addr = entry->vme_end; kern_return_t rc; + vm_map_size_t effective_page_size; assert(entry->in_transition); @@ -5371,7 +6100,10 @@ vm_fault_wire( * in the physical map. */ - for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { + effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE); + for (va = entry->vme_start; + va < end_addr; + va += effective_page_size) { rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap, pmap_addr + (va - entry->vme_start), physpage_p); @@ -5419,6 +6151,7 @@ vm_fault_unwire( vm_object_t object; struct vm_object_fault_info fault_info = {}; unsigned int unwired_pages; + vm_map_size_t effective_page_size; object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry); @@ -5451,7 +6184,10 @@ vm_fault_unwire( * get their mappings from the physical map system. */ - for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { + effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE); + for (va = entry->vme_start; + va < end_addr; + va += effective_page_size) { if (object == VM_OBJECT_NULL) { if (pmap) { pmap_change_wiring(pmap, @@ -5565,7 +6301,18 @@ vm_fault_unwire( pmap_addr + (end_addr - entry->vme_start), TRUE); if (kernel_object == object) { - vm_tag_update_size(fault_info.user_tag, -ptoa_64(unwired_pages)); + /* + * Would like to make user_tag in vm_object_fault_info + * vm_tag_t (unsigned short) but user_tag derives its value from + * VME_ALIAS(entry) at a few places and VME_ALIAS, in turn, casts + * to an _unsigned int_ which is used by non-fault_info paths throughout the + * code at many places. + * + * So, for now, an explicit truncation to unsigned short (vm_tag_t). + */ + assertf((fault_info.user_tag & VME_ALIAS_MASK) == fault_info.user_tag, + "VM Tag truncated from 0x%x to 0x%x\n", fault_info.user_tag, (fault_info.user_tag & VME_ALIAS_MASK)); + vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages)); } } @@ -5607,6 +6354,8 @@ vm_fault_wire_fast( thread_t thread = current_thread(); int type_of_fault; kern_return_t kr; + vm_map_size_t fault_page_size; + vm_map_offset_t fault_phys_offset; struct vm_object_fault_info fault_info = {}; VM_STAT_INCR(faults); @@ -5692,7 +6441,7 @@ vm_fault_wire_fast( * Look for page in top-level object. If it's not there or * there's something going on, give up. */ - m = vm_page_lookup(object, offset); + m = vm_page_lookup(object, vm_object_trunc_page(offset)); if ((m == VM_PAGE_NULL) || (m->vmp_busy) || (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) { GIVE_UP; @@ -5738,6 +6487,9 @@ vm_fault_wire_fast( fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT; } + fault_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE); + fault_phys_offset = offset - vm_object_trunc_page(offset); + /* * Put this page into the physical map. */ @@ -5745,6 +6497,8 @@ vm_fault_wire_fast( kr = vm_fault_enter(m, pmap, pmap_addr, + fault_page_size, + fault_phys_offset, prot, prot, TRUE, /* wired */ @@ -5950,14 +6704,14 @@ RetryDestinationFault:; if (vm_page_wait(interruptible)) { goto RetryDestinationFault; } - /* fall thru */ + OS_FALLTHROUGH; case VM_FAULT_INTERRUPTED: RETURN(MACH_SEND_INTERRUPTED); case VM_FAULT_SUCCESS_NO_VM_PAGE: /* success but no VM page: fail the copy */ vm_object_paging_end(dst_object); vm_object_unlock(dst_object); - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case VM_FAULT_MEMORY_ERROR: if (error) { return error; @@ -6044,7 +6798,7 @@ RetrySourceFault:; if (vm_page_wait(interruptible)) { goto RetrySourceFault; } - /* fall thru */ + OS_FALLTHROUGH; case VM_FAULT_INTERRUPTED: vm_fault_copy_dst_cleanup(dst_page); RETURN(MACH_SEND_INTERRUPTED); @@ -6052,7 +6806,7 @@ RetrySourceFault:; /* success but no VM page: fail */ vm_object_paging_end(src_object); vm_object_unlock(src_object); - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case VM_FAULT_MEMORY_ERROR: vm_fault_copy_dst_cleanup(dst_page); if (error) { @@ -6279,6 +7033,15 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) int compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP); int my_fault_type = VM_PROT_READ; kern_return_t kr; + int effective_page_mask, effective_page_size; + + if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) { + effective_page_mask = VM_MAP_PAGE_MASK(map); + effective_page_size = VM_MAP_PAGE_SIZE(map); + } else { + effective_page_mask = PAGE_MASK; + effective_page_size = PAGE_SIZE; + } if (not_in_kdp) { panic("kdp_lightweight_fault called from outside of debugger context"); @@ -6286,8 +7049,8 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) assert(map != VM_MAP_NULL); - assert((cur_target_addr & PAGE_MASK) == 0); - if ((cur_target_addr & PAGE_MASK) != 0) { + assert((cur_target_addr & effective_page_mask) == 0); + if ((cur_target_addr & effective_page_mask) != 0) { return 0; } @@ -6320,7 +7083,7 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) return 0; } - m = kdp_vm_page_lookup(object, object_offset); + m = kdp_vm_page_lookup(object, vm_object_trunc_page(object_offset)); if (m != VM_PAGE_NULL) { if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) { @@ -6354,7 +7117,8 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) { if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) { - kr = vm_compressor_pager_get(object->pager, (object_offset + object->paging_offset), + kr = vm_compressor_pager_get(object->pager, + vm_object_trunc_page(object_offset + object->paging_offset), kdp_compressor_decompressed_page_ppnum, &my_fault_type, compressor_flags, &compressed_count_delta); if (kr == KERN_SUCCESS) { @@ -6386,14 +7150,17 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) */ static boolean_t vm_page_validate_cs_fast( - vm_page_t page) + vm_page_t page, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset) { vm_object_t object; object = VM_PAGE_OBJECT(page); vm_object_lock_assert_held(object); - if (page->vmp_wpmapped && !page->vmp_cs_tainted) { + if (page->vmp_wpmapped && + !VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) { /* * This page was mapped for "write" access sometime in the * past and could still be modifiable in the future. @@ -6402,8 +7169,8 @@ vm_page_validate_cs_fast( * need to re-validate. ] */ vm_object_lock_assert_exclusive(object); - page->vmp_cs_validated = TRUE; - page->vmp_cs_tainted = TRUE; + VMP_CS_SET_VALIDATED(page, fault_page_size, fault_phys_offset, TRUE); + VMP_CS_SET_TAINTED(page, fault_page_size, fault_phys_offset, TRUE); if (cs_debug) { printf("CODESIGNING: %s: " "page %p obj %p off 0x%llx " @@ -6414,7 +7181,8 @@ vm_page_validate_cs_fast( vm_cs_validated_dirtied++; } - if (page->vmp_cs_validated || page->vmp_cs_tainted) { + if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) || + VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) { return TRUE; } vm_object_lock_assert_exclusive(object); @@ -6427,8 +7195,8 @@ vm_page_validate_cs_fast( page->vmp_offset + object->paging_offset, CS_BITMAP_CHECK); if (kr == KERN_SUCCESS) { - page->vmp_cs_validated = TRUE; - page->vmp_cs_tainted = FALSE; + page->vmp_cs_validated = VMP_CS_ALL_TRUE; + page->vmp_cs_tainted = VMP_CS_ALL_FALSE; vm_cs_bitmap_validated++; return TRUE; } @@ -6456,8 +7224,7 @@ vm_page_validate_cs_mapped_slow( memory_object_offset_t mo_offset; memory_object_t pager; struct vnode *vnode; - boolean_t validated; - unsigned tainted; + int validated, tainted, nx; assert(page->vmp_busy); object = VM_PAGE_OBJECT(page); @@ -6481,26 +7248,24 @@ vm_page_validate_cs_mapped_slow( mo_offset = page->vmp_offset + object->paging_offset; /* verify the SHA1 hash for this page */ + validated = 0; tainted = 0; - validated = cs_validate_range(vnode, + nx = 0; + cs_validate_page(vnode, pager, mo_offset, (const void *)((const char *)kaddr), - PAGE_SIZE_64, - &tainted); + &validated, + &tainted, + &nx); - if (tainted & CS_VALIDATE_TAINTED) { - page->vmp_cs_tainted = TRUE; - } - if (tainted & CS_VALIDATE_NX) { - page->vmp_cs_nx = TRUE; - } - if (validated) { - page->vmp_cs_validated = TRUE; - } + page->vmp_cs_validated |= validated; + page->vmp_cs_tainted |= tainted; + page->vmp_cs_nx |= nx; #if CHECK_CS_VALIDATION_BITMAP - if (page->vmp_cs_validated && !page->vmp_cs_tainted) { + if (page->vmp_cs_validated == VMP_CS_ALL_TRUE && + page->vmp_cs_tainted == VMP_CS_ALL_FALSE) { vnode_pager_cs_check_validation_bitmap(object->pager, mo_offset, CS_BITMAP_SET); @@ -6511,16 +7276,20 @@ vm_page_validate_cs_mapped_slow( void vm_page_validate_cs_mapped( vm_page_t page, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, const void *kaddr) { - if (!vm_page_validate_cs_fast(page)) { + if (!vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) { vm_page_validate_cs_mapped_slow(page, kaddr); } } void vm_page_validate_cs( - vm_page_t page) + vm_page_t page, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset) { vm_object_t object; vm_object_offset_t offset; @@ -6534,7 +7303,7 @@ vm_page_validate_cs( object = VM_PAGE_OBJECT(page); vm_object_lock_assert_held(object); - if (vm_page_validate_cs_fast(page)) { + if (vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) { return; } vm_object_lock_assert_exclusive(object); @@ -6680,6 +7449,7 @@ vmrtfaultinfo_bufsz(void) #include +__attribute__((noinline)) static void vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault) { @@ -6698,7 +7468,7 @@ vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr * further user stack traversals, thus avoiding copyin()s and further * faults. */ - unsigned int bfrs = backtrace_thread_user(cthread, &bpc, 1U, &btr, &u64, NULL); + unsigned int bfrs = backtrace_thread_user(cthread, &bpc, 1U, &btr, &u64, NULL, false); if ((btr == 0) && (bfrs > 0)) { cfpc = bpc; @@ -6727,11 +7497,11 @@ vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr } int -vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, int vrecordsz, void *vrecords, int *vmrtfrv) +vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, unsigned long vrecordsz, void *vrecords, unsigned long *vmrtfrv) { vm_rtfault_record_t *cvmrd = vrecords; size_t residue = vrecordsz; - int numextracted = 0; + size_t numextracted = 0; boolean_t early_exit = FALSE; vm_rtfrecord_lock(); diff --git a/osfmk/vm/vm_fault.h b/osfmk/vm/vm_fault.h index 185764384..15a65e76a 100644 --- a/osfmk/vm/vm_fault.h +++ b/osfmk/vm/vm_fault.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -181,6 +181,8 @@ extern kern_return_t vm_fault_enter( vm_page_t m, pmap_t pmap, vm_map_offset_t vaddr, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, vm_prot_t prot, vm_prot_t fault_type, boolean_t wired, @@ -194,8 +196,6 @@ extern vm_offset_t kdp_lightweight_fault( vm_map_t map, vm_offset_t cur_target_addr); -extern void vm_rtfault_record_init(void); - #endif /* MACH_KERNEL_PRIVATE */ #if XNU_KERNEL_PRIVATE diff --git a/osfmk/vm/vm_fourk_pager.c b/osfmk/vm/vm_fourk_pager.c index 4a9e7a43e..ba214a77f 100644 --- a/osfmk/vm/vm_fourk_pager.c +++ b/osfmk/vm/vm_fourk_pager.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2014-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -162,8 +162,9 @@ typedef struct fourk_pager { */ int fourk_pager_count = 0; /* number of pagers */ int fourk_pager_count_mapped = 0; /* number of unmapped pagers */ -queue_head_t fourk_pager_queue; -decl_lck_mtx_data(, fourk_pager_lock); +queue_head_t fourk_pager_queue = QUEUE_HEAD_INITIALIZER(fourk_pager_queue); +LCK_GRP_DECLARE(fourk_pager_lck_grp, "4K-pager"); +LCK_MTX_DECLARE(fourk_pager_lock, &fourk_pager_lck_grp); /* * Maximum number of unmapped pagers we're willing to keep around. @@ -178,12 +179,6 @@ int fourk_pager_count_unmapped_max = 0; int fourk_pager_num_trim_max = 0; int fourk_pager_num_trim_total = 0; - -lck_grp_t fourk_pager_lck_grp; -lck_grp_attr_t fourk_pager_lck_grp_attr; -lck_attr_t fourk_pager_lck_attr; - - /* internal prototypes */ fourk_pager_t fourk_pager_lookup(memory_object_t mem_obj); void fourk_pager_dequeue(fourk_pager_t pager); @@ -210,16 +205,6 @@ int fourk_pagerdebug = 0; #endif -void -fourk_pager_bootstrap(void) -{ - lck_grp_attr_setdefault(&fourk_pager_lck_grp_attr); - lck_grp_init(&fourk_pager_lck_grp, "4K-pager", &fourk_pager_lck_grp_attr); - lck_attr_setdefault(&fourk_pager_lck_attr); - lck_mtx_init(&fourk_pager_lock, &fourk_pager_lck_grp, &fourk_pager_lck_attr); - queue_init(&fourk_pager_queue); -} - /* * fourk_pager_init() * @@ -1034,7 +1019,7 @@ retry_src_fault: if (vm_page_wait(interruptible)) { goto retry_src_fault; } - /* fall thru */ + OS_FALLTHROUGH; case VM_FAULT_INTERRUPTED: retval = MACH_SEND_INTERRUPTED; goto src_fault_done; @@ -1042,7 +1027,7 @@ retry_src_fault: /* success but no VM page: fail */ vm_object_paging_end(src_object); vm_object_unlock(src_object); - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case VM_FAULT_MEMORY_ERROR: /* the page is not there! */ if (error_code) { @@ -1189,11 +1174,11 @@ src_fault_done: /* a tainted subpage taints entire 16K page */ UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, - TRUE); + VMP_CS_ALL_TRUE); /* also mark as "validated" for consisteny */ UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, - TRUE); + VMP_CS_ALL_TRUE); } else if (num_subpg_validated == num_subpg_signed) { /* * All the code-signed 4K subpages of this @@ -1202,12 +1187,12 @@ src_fault_done: */ UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, - TRUE); + VMP_CS_ALL_TRUE); } if (num_subpg_nx > 0) { UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, - TRUE); + VMP_CS_ALL_TRUE); } } } @@ -1257,7 +1242,10 @@ done: } } else { boolean_t empty; - upl_commit_range(upl, 0, upl->size, + assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size), + "upl %p offset 0x%llx size 0x%x", + upl, upl->u_offset, upl->u_size); + upl_commit_range(upl, 0, upl->u_size, UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, upl_pl, pl_count, &empty); } diff --git a/osfmk/vm/vm_init.c b/osfmk/vm/vm_init.c index e20fd75a0..8bd9c2378 100644 --- a/osfmk/vm/vm_init.c +++ b/osfmk/vm/vm_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2011 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -65,8 +65,8 @@ #include #include -#include -#include +#include +#include #include #include #include @@ -81,27 +81,15 @@ #include -#define ZONE_MAP_MIN CONFIG_ZONE_MAP_MIN - -/* Maximum zone size is 1.5G */ -#define ZONE_MAP_MAX (1024 * 1024 * 1536) - const vm_offset_t vm_min_kernel_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS; const vm_offset_t vm_max_kernel_address = VM_MAX_KERNEL_ADDRESS; -boolean_t vm_kernel_ready = FALSE; -boolean_t kmem_ready = FALSE; -boolean_t kmem_alloc_ready = FALSE; -boolean_t zlog_ready = FALSE; -boolean_t iokit_iomd_setownership_enabled = TRUE; +TUNABLE(bool, iokit_iomd_setownership_enabled, + "iokit_iomd_setownership_enabled", true); vm_offset_t kmapoff_kaddr; unsigned int kmapoff_pgcnt; -#if CONFIG_EMBEDDED -extern int log_executable_mem_entry; -#endif /* CONFIG_EMBEDDED */ - static inline void vm_mem_bootstrap_log(const char *message) { @@ -113,13 +101,11 @@ vm_mem_bootstrap_log(const char *message) * vm_mem_bootstrap initializes the virtual memory system. * This is done only by the first cpu up. */ - +__startup_func void vm_mem_bootstrap(void) { vm_offset_t start, end; - vm_size_t zsizearg; - mach_vm_size_t zsize; /* * Initializes resident memory structures. @@ -139,128 +125,51 @@ vm_mem_bootstrap(void) vm_mem_bootstrap_log("vm_object_bootstrap"); vm_object_bootstrap(); - vm_kernel_ready = TRUE; + kernel_startup_initialize_upto(STARTUP_SUB_VM_KERNEL); vm_mem_bootstrap_log("vm_map_init"); vm_map_init(); vm_mem_bootstrap_log("kmem_init"); kmem_init(start, end); - kmem_ready = TRUE; + + kernel_startup_initialize_upto(STARTUP_SUB_KMEM); + /* * Eat a random amount of kernel_map to fuzz subsequent heap, zone and * stack addresses. (With a 4K page and 9 bits of randomness, this - * eats at most 2M of VA from the map.) + * eats about 2M of VA from the map) + * + * Note that we always need to slide by at least one page because the VM + * pointer packing schemes using KERNEL_PMAP_HEAP_RANGE_START as a base + * do not admit this address to be part of any zone submap. */ - if (!PE_parse_boot_argn("kmapoff", &kmapoff_pgcnt, - sizeof(kmapoff_pgcnt))) { - kmapoff_pgcnt = early_random() & 0x1ff; /* 9 bits */ - } - if (kmapoff_pgcnt > 0 && - vm_allocate_kernel(kernel_map, &kmapoff_kaddr, + kmapoff_pgcnt = (early_random() & 0x1ff) + 1; /* 9 bits */ + if (vm_allocate_kernel(kernel_map, &kmapoff_kaddr, kmapoff_pgcnt * PAGE_SIZE_64, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK) != KERN_SUCCESS) { panic("cannot vm_allocate %u kernel_map pages", kmapoff_pgcnt); } -#if CONFIG_EMBEDDED - PE_parse_boot_argn("log_executable_mem_entry", - &log_executable_mem_entry, - sizeof(log_executable_mem_entry)); -#endif /* CONFIG_EMBEDDED */ - vm_mem_bootstrap_log("pmap_init"); pmap_init(); - kmem_alloc_ready = TRUE; - - if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) { - zsize = zsizearg * (1024ULL * 1024); - } else { - zsize = sane_size >> 2; /* Set target zone size as 1/4 of physical memory */ -#if defined(__LP64__) - zsize += zsize >> 1; -#endif /* __LP64__ */ - -#if !CONFIG_EMBEDDED - /* - * The max_zonemap_size was based on physical memory and might make the - * end of the zone go beyond what vm_page_[un]pack_ptr() can handle. - * To fix that we'll limit the size of the zone map to be what a 256Gig - * machine would have, but we'll retain the boot-args-specified size if - * it was provided. - */ - vm_size_t orig_zsize = zsize; - - if (zsize > 256 * (1024ULL * 1024 * 1024) / 4) { - zsize = 256 * (1024ULL * 1024 * 1024) / 4; - printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n", - (uintptr_t)orig_zsize, (uintptr_t)zsize); - } -#endif - } - - if (zsize < ZONE_MAP_MIN) { - zsize = ZONE_MAP_MIN; /* Clamp to min */ - } - if (zsize > sane_size >> 1) { - zsize = sane_size >> 1; /* Clamp to half of RAM max */ - } -#if !__LP64__ - if (zsize > ZONE_MAP_MAX) { - zsize = ZONE_MAP_MAX; /* Clamp to 1.5GB max for K32 */ - } -#endif /* !__LP64__ */ - - vm_mem_bootstrap_log("kext_alloc_init"); - kext_alloc_init(); - - vm_mem_bootstrap_log("zone_init"); - assert((vm_size_t) zsize == zsize); - zone_init((vm_size_t) zsize); /* Allocate address space for zones */ - - /* The vm_page_zone must be created prior to kalloc_init; that - * routine can trigger zalloc()s (for e.g. mutex statistic structure - * initialization). The vm_page_zone must exist to saisfy fictitious - * page allocations (which are used for guard pages by the guard - * mode zone allocator). - */ - vm_mem_bootstrap_log("vm_page_module_init"); - vm_page_module_init(); - - vm_mem_bootstrap_log("kalloc_init"); - kalloc_init(); + kernel_startup_initialize_upto(STARTUP_SUB_KMEM_ALLOC); vm_mem_bootstrap_log("vm_fault_init"); vm_fault_init(); - vm_mem_bootstrap_log("memory_manager_default_init"); - memory_manager_default_init(); - - vm_mem_bootstrap_log("memory_object_control_bootstrap"); - memory_object_control_bootstrap(); + vm_mem_bootstrap_log("kext_alloc_init"); + kext_alloc_init(); - vm_mem_bootstrap_log("device_pager_bootstrap"); - device_pager_bootstrap(); + kernel_startup_initialize_upto(STARTUP_SUB_ZALLOC); vm_paging_map_init(); - vm_mem_bootstrap_log("vm_mem_bootstrap done"); + vm_page_delayed_work_init_ctx(); -#ifdef CONFIG_ZCACHE - zcache_bootstrap(); -#endif - vm_rtfault_record_init(); - - PE_parse_boot_argn("iokit_iomd_setownership_enabled", &iokit_iomd_setownership_enabled, sizeof(iokit_iomd_setownership_enabled)); - if (!iokit_iomd_setownership_enabled) { - kprintf("IOKit IOMD setownership DISABLED\n"); - } else { + if (iokit_iomd_setownership_enabled) { kprintf("IOKit IOMD setownership ENABLED\n"); + } else { + kprintf("IOKit IOMD setownership DISABLED\n"); } } - -void -vm_mem_init(void) -{ - vm_object_init(); -} diff --git a/osfmk/vm/vm_init.h b/osfmk/vm/vm_init.h index 86b1c0128..7f862df8a 100644 --- a/osfmk/vm/vm_init.h +++ b/osfmk/vm/vm_init.h @@ -33,7 +33,5 @@ #define VM_INIT_H extern void vm_mem_bootstrap(void); -extern void vm_mem_init(void); -extern void vm_map_steal_memory(void); #endif /* VM_INIT_H */ diff --git a/osfmk/vm/vm_kern.c b/osfmk/vm/vm_kern.c index 62dfb35f5..8abf0275c 100644 --- a/osfmk/vm/vm_kern.c +++ b/osfmk/vm/vm_kern.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -77,6 +77,7 @@ #include #include #include +#include #include @@ -94,8 +95,6 @@ SECURITY_READ_ONLY_LATE(vm_map_t) kernel_map; vm_map_t kernel_pageable_map; -extern boolean_t vm_kernel_ready; - /* * Forward declarations for internal functions. */ @@ -278,7 +277,7 @@ kernel_memory_allocate( task_t task = current_task(); #endif /* DEVELOPMENT || DEBUG */ - if (!vm_kernel_ready) { + if (startup_phase < STARTUP_SUB_KMEM) { panic("kernel_memory_allocate: VM is not ready"); } @@ -429,6 +428,10 @@ kernel_memory_allocate( vmk_flags.vmkf_atomic_entry = TRUE; } + if (flags & KMA_KHEAP) { + vm_alloc_flags |= VM_MAP_FIND_LAST_FREE; + } + kr = vm_map_find_space(map, &map_addr, fill_size, map_mask, vm_alloc_flags, vmk_flags, tag, &entry); @@ -514,7 +517,9 @@ kernel_memory_allocate( mem->vmp_pmapped = TRUE; mem->vmp_wpmapped = TRUE; - PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem, + PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, + 0, /* fault_phys_offset */ + mem, kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, PMAP_OPTIONS_NOWAIT, pe_result); @@ -773,7 +778,9 @@ kernel_memory_populate( mem->vmp_pmapped = TRUE; mem->vmp_wpmapped = TRUE; - PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem, + PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, + 0, /* fault_phys_offset */ + mem, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, PMAP_OPTIONS_NOWAIT, pe_result); @@ -796,9 +803,12 @@ kernel_memory_populate( pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)); } } + vm_object_unlock(object); + vm_page_lockspin_queues(); vm_page_wire_count += page_count; vm_page_unlock_queues(); + vm_tag_update_size(tag, ptoa_64(page_count)); #if DEBUG || DEVELOPMENT VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0); @@ -807,12 +817,6 @@ kernel_memory_populate( } #endif - if (kernel_object == object) { - vm_tag_update_size(tag, size); - } - - vm_object_unlock(object); - #if KASAN if (map == compressor_map) { kasan_notify_address_nopoison(addr, size); @@ -840,15 +844,17 @@ out: void kernel_memory_depopulate( - vm_map_t map, - vm_offset_t addr, - vm_size_t size, - int flags) + vm_map_t map, + vm_offset_t addr, + vm_size_t size, + int flags, + vm_tag_t tag) { - vm_object_t object; - vm_object_offset_t offset, pg_offset; - vm_page_t mem; - vm_page_t local_freeq = NULL; + vm_object_t object; + vm_object_offset_t offset, pg_offset; + vm_page_t mem; + vm_page_t local_freeq = NULL; + unsigned int pages_unwired; assert((flags & (KMA_COMPRESSOR | KMA_KOBJECT)) != (KMA_COMPRESSOR | KMA_KOBJECT)); @@ -877,7 +883,7 @@ kernel_memory_depopulate( } pmap_protect(kernel_map->pmap, offset, offset + size, VM_PROT_NONE); - for (pg_offset = 0; + for (pg_offset = 0, pages_unwired = 0; pg_offset < size; pg_offset += PAGE_SIZE_64) { mem = vm_page_lookup(object, offset + pg_offset); @@ -886,6 +892,7 @@ kernel_memory_depopulate( if (mem->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) { pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem)); + pages_unwired++; } mem->vmp_busy = TRUE; @@ -896,7 +903,7 @@ kernel_memory_depopulate( assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0); assert((mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || - (mem->vmp_q_state == VM_PAGE_NOT_ON_Q)); + (mem->vmp_q_state == VM_PAGE_IS_WIRED)); mem->vmp_q_state = VM_PAGE_NOT_ON_Q; mem->vmp_snext = local_freeq; @@ -904,8 +911,15 @@ kernel_memory_depopulate( } vm_object_unlock(object); + if (local_freeq) { vm_page_free_list(local_freeq, TRUE); + if (pages_unwired != 0) { + vm_page_lockspin_queues(); + vm_page_wire_count -= pages_unwired; + vm_page_unlock_queues(); + vm_tag_update_size(tag, -ptoa_64(pages_unwired)); + } } } @@ -945,7 +959,9 @@ kmem_alloc_flags( int flags) { kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, flags, tag); - TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp); + if (kr == KERN_SUCCESS) { + TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp); + } return kr; } @@ -1378,7 +1394,12 @@ kmem_set_user_wire_limits(void) size_t wire_limit_percents_length = sizeof(wire_limit_percents) / sizeof(vm_map_size_t); vm_map_size_t limit; - available_mem_log = bit_floor(max_mem); + uint64_t config_memsize = max_mem; +#if defined(XNU_TARGET_OS_OSX) + config_memsize = max_mem_actual; +#endif /* defined(XNU_TARGET_OS_OSX) */ + + available_mem_log = bit_floor(config_memsize); if (available_mem_log < VM_USER_WIREABLE_MIN_CONFIG) { available_mem_log = 0; @@ -1390,15 +1411,17 @@ kmem_set_user_wire_limits(void) } max_wire_percent = wire_limit_percents[available_mem_log]; - limit = max_mem * max_wire_percent / 100; + limit = config_memsize * max_wire_percent / 100; /* Cap the number of non lockable bytes at VM_NOT_USER_WIREABLE_MAX */ - if (max_mem - limit > VM_NOT_USER_WIREABLE_MAX) { - limit = max_mem - VM_NOT_USER_WIREABLE_MAX; + if (config_memsize - limit > VM_NOT_USER_WIREABLE_MAX) { + limit = config_memsize - VM_NOT_USER_WIREABLE_MAX; } vm_global_user_wire_limit = limit; /* the default per task limit is the same as the global limit */ vm_per_task_user_wire_limit = limit; + vm_add_wire_count_over_global_limit = 0; + vm_add_wire_count_over_user_limit = 0; } @@ -1408,6 +1431,7 @@ kmem_set_user_wire_limits(void) * Initialize the kernel's virtual memory map, taking * into account all memory allocated up to this time. */ +__startup_func void kmem_init( vm_offset_t start, @@ -1541,9 +1565,7 @@ copyinmap( * Routine: copyoutmap * Purpose: * Like copyout, except that toaddr is an address - * in the specified VM map. This implementation - * is incomplete; it handles the current user map - * and the kernel map/submaps. + * in the specified VM map. */ kern_return_t copyoutmap( @@ -1552,21 +1574,26 @@ copyoutmap( vm_map_address_t toaddr, vm_size_t length) { + kern_return_t kr = KERN_SUCCESS; + vm_map_t oldmap; + if (vm_map_pmap(map) == pmap_kernel()) { /* assume a correct copy */ memcpy(CAST_DOWN(void *, toaddr), fromdata, length); - return KERN_SUCCESS; - } - - if (current_map() != map) { - return KERN_NOT_SUPPORTED; - } - - if (copyout(fromdata, toaddr, length) != 0) { - return KERN_INVALID_ADDRESS; + } else if (current_map() == map) { + if (copyout(fromdata, toaddr, length) != 0) { + kr = KERN_INVALID_ADDRESS; + } + } else { + vm_map_reference(map); + oldmap = vm_map_switch(map); + if (copyout(fromdata, toaddr, length) != 0) { + kr = KERN_INVALID_ADDRESS; + } + vm_map_switch(oldmap); + vm_map_deallocate(map); } - - return KERN_SUCCESS; + return kr; } /* @@ -1661,3 +1688,46 @@ vm_kernel_unslide_or_perm_external( { vm_kernel_addrperm_external(addr, up_addr); } + +void +vm_packing_pointer_invalid(vm_offset_t ptr, vm_packing_params_t params) +{ + if (ptr & ((1ul << params.vmpp_shift) - 1)) { + panic("pointer %p can't be packed: low %d bits aren't 0", + (void *)ptr, params.vmpp_shift); + } else if (ptr <= params.vmpp_base) { + panic("pointer %p can't be packed: below base %p", + (void *)ptr, (void *)params.vmpp_base); + } else { + panic("pointer %p can't be packed: maximum encodable pointer is %p", + (void *)ptr, (void *)vm_packing_max_packable(params)); + } +} + +void +vm_packing_verify_range( + const char *subsystem, + vm_offset_t min_address, + vm_offset_t max_address, + vm_packing_params_t params) +{ + if (min_address > max_address) { + panic("%s: %s range invalid min:%p > max:%p", + __func__, subsystem, (void *)min_address, (void *)max_address); + } + + if (!params.vmpp_base_relative) { + return; + } + + if (min_address <= params.vmpp_base) { + panic("%s: %s range invalid min:%p <= base:%p", + __func__, subsystem, (void *)min_address, (void *)params.vmpp_base); + } + + if (max_address > vm_packing_max_packable(params)) { + panic("%s: %s range invalid max:%p >= max packable:%p", + __func__, subsystem, (void *)max_address, + (void *)vm_packing_max_packable(params)); + } +} diff --git a/osfmk/vm/vm_kern.h b/osfmk/vm/vm_kern.h index 7a0466951..2cafcebe2 100644 --- a/osfmk/vm/vm_kern.h +++ b/osfmk/vm/vm_kern.h @@ -103,12 +103,13 @@ extern kern_return_t kernel_memory_allocate( #define KMA_ATOMIC 0x800 #define KMA_ZERO 0x1000 #define KMA_PAGEABLE 0x2000 +#define KMA_KHEAP 0x4000 /* Pages belonging to zones backing one of kalloc_heap. */ extern kern_return_t kmem_alloc( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_tag_t tag) __XNU_INTERNAL(kmem_alloc); + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_tag_t tag) __XNU_INTERNAL(kmem_alloc); extern kern_return_t kmem_alloc_contig( vm_map_t map, @@ -159,7 +160,7 @@ extern kern_return_t kmem_suballoc( boolean_t pageable, int flags, vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, + vm_tag_t tag, vm_map_t *new_map); extern kern_return_t kmem_alloc_kobject( @@ -179,7 +180,8 @@ extern void kernel_memory_depopulate( vm_map_t map, vm_offset_t addr, vm_size_t size, - int flags); + int flags, + vm_tag_t tag); extern kern_return_t memory_object_iopl_request( ipc_port_t port, @@ -192,8 +194,10 @@ extern kern_return_t memory_object_iopl_request( vm_tag_t tag); struct mach_memory_info; -extern kern_return_t vm_page_diagnose(struct mach_memory_info * info, - unsigned int num_info, uint64_t zones_collectable_bytes); +extern kern_return_t vm_page_diagnose( + struct mach_memory_info *info, + unsigned int num_info, + uint64_t zones_collectable_bytes); extern uint32_t vm_page_diagnose_estimate(void); @@ -205,6 +209,10 @@ extern kern_return_t vm_kern_allocation_info(uintptr_t addr, vm_size_t * size #endif /* DEBUG || DEVELOPMENT */ +#if HIBERNATION +extern void hibernate_rebuild_vm_structs(void); +#endif /* HIBERNATION */ + extern vm_tag_t vm_tag_bt(void); extern vm_tag_t vm_tag_alloc(vm_allocation_site_t * site); @@ -214,8 +222,9 @@ extern void vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allo extern void vm_tag_update_size(vm_tag_t tag, int64_t size); #if VM_MAX_TAG_ZONES + extern void vm_allocation_zones_init(void); -extern void vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx); +extern void vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx); extern void vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, int64_t delta, int64_t dwaste); extern vm_allocation_zone_total_t ** vm_allocation_zone_totals; @@ -263,7 +272,7 @@ struct kern_allocation_name; typedef struct kern_allocation_name * kern_allocation_name_t; #endif /* !XNU_KERNEL_PRIVATE */ -extern kern_allocation_name_t kern_allocation_name_allocate(const char * name, uint32_t suballocs); +extern kern_allocation_name_t kern_allocation_name_allocate(const char * name, uint16_t suballocs); extern void kern_allocation_name_release(kern_allocation_name_t allocation); extern const char * kern_allocation_get_name(kern_allocation_name_t allocation); #ifdef XNU_KERNEL_PRIVATE @@ -312,22 +321,22 @@ extern kern_return_t kmem_alloc_pageable_external( extern kern_return_t mach_vm_allocate_kernel( vm_map_t map, mach_vm_offset_t *addr, - mach_vm_size_t size, + mach_vm_size_t size, int flags, - vm_tag_t tag); + vm_tag_t tag); extern kern_return_t vm_allocate_kernel( - vm_map_t map, - vm_offset_t *addr, - vm_size_t size, - int flags, - vm_tag_t tag); + vm_map_t map, + vm_offset_t *addr, + vm_size_t size, + int flags, + vm_tag_t tag); extern kern_return_t mach_vm_map_kernel( vm_map_t target_map, mach_vm_offset_t *address, - mach_vm_size_t initial_size, + mach_vm_size_t initial_size, mach_vm_offset_t mask, int flags, vm_map_kernel_flags_t vmk_flags, @@ -358,10 +367,10 @@ extern kern_return_t vm_map_kernel( extern kern_return_t mach_vm_remap_kernel( vm_map_t target_map, mach_vm_offset_t *address, - mach_vm_size_t size, + mach_vm_size_t size, mach_vm_offset_t mask, int flags, - vm_tag_t tag, + vm_tag_t tag, vm_map_t src_map, mach_vm_offset_t memory_address, boolean_t copy, @@ -375,7 +384,7 @@ extern kern_return_t vm_remap_kernel( vm_size_t size, vm_offset_t mask, int flags, - vm_tag_t tag, + vm_tag_t tag, vm_map_t src_map, vm_offset_t memory_address, boolean_t copy, @@ -402,7 +411,7 @@ extern kern_return_t mach_vm_wire_kernel( host_priv_t host_priv, vm_map_t map, mach_vm_offset_t start, - mach_vm_size_t size, + mach_vm_size_t size, vm_prot_t access, vm_tag_t tag); @@ -427,6 +436,7 @@ extern kern_return_t vm_map_wire_and_extract_kernel( extern vm_map_t kernel_map; extern vm_map_t kernel_pageable_map; extern vm_map_t ipc_kernel_map; +extern vm_map_t g_kext_map; #endif /* KERNEL_PRIVATE */ diff --git a/osfmk/vm/vm_map.c b/osfmk/vm/vm_map.c index 9aa1eb9b5..4ba91fb04 100644 --- a/osfmk/vm/vm_map.c +++ b/osfmk/vm/vm_map.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -86,7 +86,7 @@ #include #include #include -#include +#include #include #include @@ -115,14 +115,38 @@ #include #include +#include + #include #if DEVELOPMENT || DEBUG extern int proc_selfcsflags(void); -#if CONFIG_EMBEDDED -extern int panic_on_unsigned_execute; -#endif /* CONFIG_EMBEDDED */ +int panic_on_unsigned_execute = 0; #endif /* DEVELOPMENT || DEBUG */ +#if MACH_ASSERT +int debug4k_filter = 0; +char debug4k_proc_name[1024] = ""; +int debug4k_proc_filter = (int)-1 & ~(1 << __DEBUG4K_FAULT); +int debug4k_panic_on_misaligned_sharing = 0; +const char *debug4k_category_name[] = { + "error", /* 0 */ + "life", /* 1 */ + "load", /* 2 */ + "fault", /* 3 */ + "copy", /* 4 */ + "share", /* 5 */ + "adjust", /* 6 */ + "pmap", /* 7 */ + "mementry", /* 8 */ + "iokit", /* 9 */ + "upl", /* 10 */ + "exc", /* 11 */ + "vfs" /* 12 */ +}; +#endif /* MACH_ASSERT */ +int debug4k_no_cow_copyin = 0; + + #if __arm64__ extern const int fourk_binary_compatibility_unsafe; extern const int fourk_binary_compatibility_allow_wx; @@ -274,13 +298,12 @@ static kern_return_t vm_map_remap_extract( vm_map_t map, vm_map_offset_t addr, vm_map_size_t size, + vm_prot_t required_protection, boolean_t copy, struct vm_map_header *map_header, vm_prot_t *cur_protection, vm_prot_t *max_protection, vm_inherit_t inheritance, - boolean_t pageable, - boolean_t same_map, vm_map_kernel_flags_t vmk_flags); static kern_return_t vm_map_remap_range_allocate( @@ -299,7 +322,7 @@ static void vm_map_region_look_for_page( vm_object_t object, vm_object_offset_t offset, int max_refcnt, - int depth, + unsigned short depth, vm_region_extended_info_t extended, mach_msg_type_number_t count); @@ -335,8 +358,28 @@ static kern_return_t vm_map_pageout( vm_map_offset_t end); #endif /* MACH_ASSERT */ -static void vm_map_corpse_footprint_destroy( +kern_return_t vm_map_corpse_footprint_collect( + vm_map_t old_map, + vm_map_entry_t old_entry, + vm_map_t new_map); +void vm_map_corpse_footprint_collect_done( + vm_map_t new_map); +void vm_map_corpse_footprint_destroy( vm_map_t map); +kern_return_t vm_map_corpse_footprint_query_page_info( + vm_map_t map, + vm_map_offset_t va, + int *disposition_p); +void vm_map_footprint_query_page_info( + vm_map_t map, + vm_map_entry_t map_entry, + vm_map_offset_t curr_s_offset, + int *disposition_p); + +static const struct vm_map_entry vm_map_entry_template = { + .behavior = VM_BEHAVIOR_DEFAULT, + .inheritance = VM_INHERIT_DEFAULT, +}; pid_t find_largest_process_vm_map_entries(void); @@ -350,63 +393,76 @@ pid_t find_largest_process_vm_map_entries(void); * vm_map_copyout. */ -#if CONFIG_EMBEDDED - -/* - * The "used_for_jit" flag was copied from OLD to NEW in vm_map_entry_copy(). - * But for security reasons on embedded platforms, we don't want the - * new mapping to be "used for jit", so we always reset the flag here. - * Same for "pmap_cs_associated". - */ -#define VM_MAP_ENTRY_COPY_CODE_SIGNING(NEW, OLD) \ -MACRO_BEGIN \ - (NEW)->used_for_jit = FALSE; \ - (NEW)->pmap_cs_associated = FALSE; \ -MACRO_END - -#else /* CONFIG_EMBEDDED */ +static inline void +vm_map_entry_copy_pmap_cs_assoc( + vm_map_t map __unused, + vm_map_entry_t new __unused, + vm_map_entry_t old __unused) +{ +#if PMAP_CS + /* when pmap_cs is enabled, we want to reset on copy */ + new->pmap_cs_associated = FALSE; +#else /* PMAP_CS */ + /* when pmap_cs is not enabled, assert as a sanity check */ + assert(new->pmap_cs_associated == FALSE); +#endif /* PMAP_CS */ +} /* * The "used_for_jit" flag was copied from OLD to NEW in vm_map_entry_copy(). - * On macOS, the new mapping can be "used for jit". + * But for security reasons on some platforms, we don't want the + * new mapping to be "used for jit", so we reset the flag here. */ -#define VM_MAP_ENTRY_COPY_CODE_SIGNING(NEW, OLD) \ -MACRO_BEGIN \ - assert((NEW)->used_for_jit == (OLD)->used_for_jit); \ - assert((NEW)->pmap_cs_associated == FALSE); \ -MACRO_END +static inline void +vm_map_entry_copy_code_signing( + vm_map_t map, + vm_map_entry_t new, + vm_map_entry_t old __unused) +{ + if (VM_MAP_POLICY_ALLOW_JIT_COPY(map)) { + assert(new->used_for_jit == old->used_for_jit); + } else { + new->used_for_jit = FALSE; + } +} -#endif /* CONFIG_EMBEDDED */ - -#define vm_map_entry_copy(NEW, OLD) \ -MACRO_BEGIN \ -boolean_t _vmec_reserved = (NEW)->from_reserved_zone; \ - *(NEW) = *(OLD); \ - (NEW)->is_shared = FALSE; \ - (NEW)->needs_wakeup = FALSE; \ - (NEW)->in_transition = FALSE; \ - (NEW)->wired_count = 0; \ - (NEW)->user_wired_count = 0; \ - (NEW)->permanent = FALSE; \ - VM_MAP_ENTRY_COPY_CODE_SIGNING((NEW),(OLD)); \ - (NEW)->from_reserved_zone = _vmec_reserved; \ - if ((NEW)->iokit_acct) { \ - assertf(!(NEW)->use_pmap, "old %p new %p\n", (OLD), (NEW)); \ - (NEW)->iokit_acct = FALSE; \ - (NEW)->use_pmap = TRUE; \ - } \ - (NEW)->vme_resilient_codesign = FALSE; \ - (NEW)->vme_resilient_media = FALSE; \ - (NEW)->vme_atomic = FALSE; \ - (NEW)->vme_no_copy_on_read = FALSE; \ -MACRO_END +static inline void +vm_map_entry_copy( + vm_map_t map, + vm_map_entry_t new, + vm_map_entry_t old) +{ + boolean_t _vmec_reserved = new->from_reserved_zone; + *new = *old; + new->is_shared = FALSE; + new->needs_wakeup = FALSE; + new->in_transition = FALSE; + new->wired_count = 0; + new->user_wired_count = 0; + new->permanent = FALSE; + vm_map_entry_copy_code_signing(map, new, old); + vm_map_entry_copy_pmap_cs_assoc(map, new, old); + new->from_reserved_zone = _vmec_reserved; + if (new->iokit_acct) { + assertf(!new->use_pmap, "old %p new %p\n", old, new); + new->iokit_acct = FALSE; + new->use_pmap = TRUE; + } + new->vme_resilient_codesign = FALSE; + new->vme_resilient_media = FALSE; + new->vme_atomic = FALSE; + new->vme_no_copy_on_read = FALSE; +} -#define vm_map_entry_copy_full(NEW, OLD) \ -MACRO_BEGIN \ -boolean_t _vmecf_reserved = (NEW)->from_reserved_zone; \ -(*(NEW) = *(OLD)); \ -(NEW)->from_reserved_zone = _vmecf_reserved; \ -MACRO_END +static inline void +vm_map_entry_copy_full( + vm_map_entry_t new, + vm_map_entry_t old) +{ + boolean_t _vmecf_reserved = new->from_reserved_zone; + *new = *old; + new->from_reserved_zone = _vmecf_reserved; +} /* * Normal lock_read_to_write() returns FALSE/0 on failure. @@ -445,6 +501,54 @@ vm_map_try_lock_read(vm_map_t map) return FALSE; } +/* + * Routines to get the page size the caller should + * use while inspecting the target address space. + * Use the "_safely" variant if the caller is dealing with a user-provided + * array whose size depends on the page size, to avoid any overflow or + * underflow of a user-allocated buffer. + */ +int +vm_self_region_page_shift_safely( + vm_map_t target_map) +{ + int effective_page_shift = 0; + + if (PAGE_SIZE == (4096)) { + /* x86_64 and 4k watches: always use 4k */ + return PAGE_SHIFT; + } + /* did caller provide an explicit page size for this thread to use? */ + effective_page_shift = thread_self_region_page_shift(); + if (effective_page_shift) { + /* use the explicitly-provided page size */ + return effective_page_shift; + } + /* no explicit page size: use the caller's page size... */ + effective_page_shift = VM_MAP_PAGE_SHIFT(current_map()); + if (effective_page_shift == VM_MAP_PAGE_SHIFT(target_map)) { + /* page size match: safe to use */ + return effective_page_shift; + } + /* page size mismatch */ + return -1; +} +int +vm_self_region_page_shift( + vm_map_t target_map) +{ + int effective_page_shift; + + effective_page_shift = vm_self_region_page_shift_safely(target_map); + if (effective_page_shift == -1) { + /* no safe value but OK to guess for caller */ + effective_page_shift = MIN(VM_MAP_PAGE_SHIFT(current_map()), + VM_MAP_PAGE_SHIFT(target_map)); + } + return effective_page_shift; +} + + /* * Decide if we want to allow processes to execute from their data or stack areas. * override_nx() returns true if we do. Data/stack execution can be enabled independently @@ -552,12 +656,46 @@ override_nx(vm_map_t map, uint32_t user_tag) /* map unused on arm */ * vm_object_copy_strategically() in vm_object.c. */ -static zone_t vm_map_zone; /* zone for vm_map structures */ -zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */ -static zone_t vm_map_entry_reserved_zone; /* zone with reserve for non-blocking allocations */ -static SECURITY_READ_ONLY_LATE(zone_t) vm_map_copy_zone; /* zone for vm_map_copy structures */ -zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */ +static SECURITY_READ_ONLY_LATE(zone_t) vm_map_zone; /* zone for vm_map structures */ +static SECURITY_READ_ONLY_LATE(zone_t) vm_map_entry_reserved_zone; /* zone with reserve for non-blocking allocations */ +static SECURITY_READ_ONLY_LATE(zone_t) vm_map_copy_zone; /* zone for vm_map_copy structures */ + +SECURITY_READ_ONLY_LATE(zone_t) vm_map_entry_zone; /* zone for vm_map_entry structures */ +SECURITY_READ_ONLY_LATE(zone_t) vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */ + +#define VM_MAP_ZONE_NAME "maps" +#define VM_MAP_ZFLAGS ( \ + ZC_NOENCRYPT | \ + ZC_NOGC | \ + ZC_NOGZALLOC | \ + ZC_ALLOW_FOREIGN) + +#define VME_RESERVED_ZONE_NAME "Reserved VM map entries" +#define VM_MAP_RESERVED_ZFLAGS ( \ + ZC_NOENCRYPT | \ + ZC_ALLOW_FOREIGN | \ + ZC_NOCALLOUT | \ + ZC_NOGZALLOC | \ + ZC_KASAN_NOQUARANTINE | \ + ZC_NOGC) + +#define VM_MAP_HOLES_ZONE_NAME "VM map holes" +#define VM_MAP_HOLES_ZFLAGS ( \ + ZC_NOENCRYPT | \ + ZC_NOGC | \ + ZC_NOGZALLOC | \ + ZC_ALLOW_FOREIGN) +/* + * Asserts that a vm_map_copy object is coming from the + * vm_map_copy_zone to ensure that it isn't a fake constructed + * anywhere else. + */ +static inline void +vm_map_copy_require(struct vm_map_copy *copy) +{ + zone_id_require(ZONE_ID_VM_MAP_COPY, sizeof(struct vm_map_copy), copy); +} /* * Placeholder object for submap operations. This object is dropped @@ -567,18 +705,18 @@ zone_t vm_map_holes_zone; /* zone for vm map h vm_object_t vm_submap_object; -static void *map_data; -static vm_size_t map_data_size; -static void *kentry_data; -static vm_size_t kentry_data_size; -static void *map_holes_data; -static vm_size_t map_holes_data_size; +static __startup_data vm_offset_t map_data; +static __startup_data vm_size_t map_data_size; +static __startup_data vm_offset_t kentry_data; +static __startup_data vm_size_t kentry_data_size; +static __startup_data vm_offset_t map_holes_data; +static __startup_data vm_size_t map_holes_data_size; -#if CONFIG_EMBEDDED -#define NO_COALESCE_LIMIT 0 -#else +#if XNU_TARGET_OS_OSX #define NO_COALESCE_LIMIT ((1024 * 128) - 1) -#endif +#else /* XNU_TARGET_OS_OSX */ +#define NO_COALESCE_LIMIT 0 +#endif /* XNU_TARGET_OS_OSX */ /* Skip acquiring locks if we're in the midst of a kernel core dump */ unsigned int not_in_kdp = 1; @@ -640,7 +778,8 @@ vm_map_apple_protected( vm_map_offset_t start, vm_map_offset_t end, vm_object_offset_t crypto_backing_offset, - struct pager_crypt_info *crypt_info) + struct pager_crypt_info *crypt_info, + uint32_t cryptid) { boolean_t map_locked; kern_return_t kr; @@ -693,13 +832,20 @@ vm_map_apple_protected( map_addr, &map_entry) || map_entry->is_sub_map || - VME_OBJECT(map_entry) == VM_OBJECT_NULL || - !(map_entry->protection & VM_PROT_EXECUTE)) { + VME_OBJECT(map_entry) == VM_OBJECT_NULL) { /* that memory is not properly mapped */ kr = KERN_INVALID_ARGUMENT; goto done; } + /* ensure mapped memory is mapped as executable except + * except for model decryption flow */ + if ((cryptid != CRYPTID_MODEL_ENCRYPTION) && + !(map_entry->protection & VM_PROT_EXECUTE)) { + kr = KERN_INVALID_ARGUMENT; + goto done; + } + /* get the protected object to be decrypted */ protected_object = VME_OBJECT(map_entry); if (protected_object == VM_OBJECT_NULL) { @@ -854,18 +1000,15 @@ done: #endif /* CONFIG_CODE_DECRYPTION */ -lck_grp_t vm_map_lck_grp; -lck_grp_attr_t vm_map_lck_grp_attr; -lck_attr_t vm_map_lck_attr; -lck_attr_t vm_map_lck_rw_attr; +LCK_GRP_DECLARE(vm_map_lck_grp, "vm_map"); +LCK_ATTR_DECLARE(vm_map_lck_attr, 0, 0); +LCK_ATTR_DECLARE(vm_map_lck_rw_attr, 0, LCK_ATTR_DEBUG); -#if CONFIG_EMBEDDED -int malloc_no_cow = 1; -#define VM_PROTECT_WX_FAIL 0 -#else /* CONFIG_EMBEDDED */ +#if XNU_TARGET_OS_OSX int malloc_no_cow = 0; -#define VM_PROTECT_WX_FAIL 1 -#endif /* CONFIG_EMBEDDED */ +#else /* XNU_TARGET_OS_OSX */ +int malloc_no_cow = 1; +#endif /* XNU_TARGET_OS_OSX */ uint64_t vm_memory_malloc_no_cow_mask = 0ULL; #if DEBUG int vm_check_map_sanity = 0; @@ -892,96 +1035,52 @@ int vm_check_map_sanity = 0; * empty since the very act of allocating memory implies the creation * of a new entry. */ +__startup_func void -vm_map_init( - void) +vm_map_init(void) { - vm_size_t entry_zone_alloc_size; const char *mez_name = "VM map entries"; - vm_map_zone = zinit((vm_map_size_t) sizeof(struct _vm_map), 40 * 1024, - PAGE_SIZE, "maps"); - zone_change(vm_map_zone, Z_NOENCRYPT, TRUE); -#if defined(__LP64__) - entry_zone_alloc_size = PAGE_SIZE * 5; -#else - entry_zone_alloc_size = PAGE_SIZE * 6; -#endif - vm_map_entry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry), - 1024 * 1024, entry_zone_alloc_size, - mez_name); - zone_change(vm_map_entry_zone, Z_NOENCRYPT, TRUE); - zone_change(vm_map_entry_zone, Z_NOCALLOUT, TRUE); - zone_change(vm_map_entry_zone, Z_GZALLOC_EXEMPT, TRUE); - - vm_map_entry_reserved_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry), - kentry_data_size * 64, kentry_data_size, - "Reserved VM map entries"); - zone_change(vm_map_entry_reserved_zone, Z_NOENCRYPT, TRUE); - /* Don't quarantine because we always need elements available */ - zone_change(vm_map_entry_reserved_zone, Z_KASAN_QUARANTINE, FALSE); - - vm_map_copy_zone = zinit((vm_map_size_t) sizeof(struct vm_map_copy), - 16 * 1024, PAGE_SIZE, "VM map copies"); - zone_change(vm_map_copy_zone, Z_NOENCRYPT, TRUE); - - vm_map_holes_zone = zinit((vm_map_size_t) sizeof(struct vm_map_links), - 16 * 1024, PAGE_SIZE, "VM map holes"); - zone_change(vm_map_holes_zone, Z_NOENCRYPT, TRUE); + +#if MACH_ASSERT + PE_parse_boot_argn("debug4k_filter", &debug4k_filter, + sizeof(debug4k_filter)); +#endif /* MACH_ASSERT */ + + vm_map_zone = zone_create(VM_MAP_ZONE_NAME, sizeof(struct _vm_map), + VM_MAP_ZFLAGS); + + vm_map_entry_zone = zone_create(mez_name, sizeof(struct vm_map_entry), + ZC_NOENCRYPT | ZC_NOGZALLOC | ZC_NOCALLOUT); /* - * Cram the map and kentry zones with initial data. - * Set reserved_zone non-collectible to aid zone_gc(). + * Don't quarantine because we always need elements available + * Disallow GC on this zone... to aid the GC. */ - zone_change(vm_map_zone, Z_COLLECT, FALSE); - zone_change(vm_map_zone, Z_FOREIGN, TRUE); - zone_change(vm_map_zone, Z_GZALLOC_EXEMPT, TRUE); - - zone_change(vm_map_entry_reserved_zone, Z_COLLECT, FALSE); - zone_change(vm_map_entry_reserved_zone, Z_EXPAND, FALSE); - zone_change(vm_map_entry_reserved_zone, Z_FOREIGN, TRUE); - zone_change(vm_map_entry_reserved_zone, Z_NOCALLOUT, TRUE); - zone_change(vm_map_entry_reserved_zone, Z_CALLERACCT, FALSE); /* don't charge caller */ - zone_change(vm_map_copy_zone, Z_CALLERACCT, FALSE); /* don't charge caller */ - zone_change(vm_map_entry_reserved_zone, Z_GZALLOC_EXEMPT, TRUE); - - zone_change(vm_map_holes_zone, Z_COLLECT, TRUE); - zone_change(vm_map_holes_zone, Z_EXPAND, TRUE); - zone_change(vm_map_holes_zone, Z_FOREIGN, TRUE); - zone_change(vm_map_holes_zone, Z_NOCALLOUT, TRUE); - zone_change(vm_map_holes_zone, Z_CALLERACCT, TRUE); - zone_change(vm_map_holes_zone, Z_GZALLOC_EXEMPT, TRUE); + vm_map_entry_reserved_zone = zone_create_ext(VME_RESERVED_ZONE_NAME, + sizeof(struct vm_map_entry), VM_MAP_RESERVED_ZFLAGS, + ZONE_ID_ANY, ^(zone_t z) { + zone_set_noexpand(z, 64 * kentry_data_size); + }); + + vm_map_copy_zone = zone_create_ext("VM map copies", sizeof(struct vm_map_copy), + ZC_NOENCRYPT | ZC_CACHING, ZONE_ID_VM_MAP_COPY, NULL); + + vm_map_holes_zone = zone_create(VM_MAP_HOLES_ZONE_NAME, + sizeof(struct vm_map_links), VM_MAP_HOLES_ZFLAGS); /* * Add the stolen memory to zones, adjust zone size and stolen counts. - * zcram only up to the maximum number of pages for each zone chunk. */ - zcram(vm_map_zone, (vm_offset_t)map_data, map_data_size); - - const vm_size_t stride = ZONE_CHUNK_MAXPAGES * PAGE_SIZE; - for (vm_offset_t off = 0; off < kentry_data_size; off += stride) { - zcram(vm_map_entry_reserved_zone, - (vm_offset_t)kentry_data + off, - MIN(kentry_data_size - off, stride)); - } - for (vm_offset_t off = 0; off < map_holes_data_size; off += stride) { - zcram(vm_map_holes_zone, - (vm_offset_t)map_holes_data + off, - MIN(map_holes_data_size - off, stride)); - } + zcram(vm_map_zone, map_data, map_data_size); + zcram(vm_map_entry_reserved_zone, kentry_data, kentry_data_size); + zcram(vm_map_holes_zone, map_holes_data, map_holes_data_size); /* * Since these are covered by zones, remove them from stolen page accounting. */ VM_PAGE_MOVE_STOLEN(atop_64(map_data_size) + atop_64(kentry_data_size) + atop_64(map_holes_data_size)); - lck_grp_attr_setdefault(&vm_map_lck_grp_attr); - lck_grp_init(&vm_map_lck_grp, "vm_map", &vm_map_lck_grp_attr); - lck_attr_setdefault(&vm_map_lck_attr); - - lck_attr_setdefault(&vm_map_lck_rw_attr); - lck_attr_cleardebug(&vm_map_lck_rw_attr); - #if VM_MAP_DEBUG_APPLE_PROTECT PE_parse_boot_argn("vm_map_debug_apple_protect", &vm_map_debug_apple_protect, @@ -1028,16 +1127,22 @@ vm_map_init( kprintf("VM sanity checking disabled. Set bootarg vm_check_map_sanity=1 to enable\n"); } #endif /* DEBUG */ + +#if DEVELOPMENT || DEBUG + PE_parse_boot_argn("panic_on_unsigned_execute", + &panic_on_unsigned_execute, + sizeof(panic_on_unsigned_execute)); +#endif /* DEVELOPMENT || DEBUG */ } -void -vm_map_steal_memory( - void) +__startup_func +static void +vm_map_steal_memory(void) { - uint32_t kentry_initial_pages; + uint16_t kentry_initial_pages; - map_data_size = round_page(10 * sizeof(struct _vm_map)); - map_data = pmap_steal_memory(map_data_size); + map_data_size = zone_get_foreign_alloc_size(VM_MAP_ZONE_NAME, + sizeof(struct _vm_map), VM_MAP_ZFLAGS, 1); /* * kentry_initial_pages corresponds to the number of kernel map entries @@ -1060,12 +1165,29 @@ vm_map_steal_memory( } #endif - kentry_data_size = kentry_initial_pages * PAGE_SIZE; - kentry_data = pmap_steal_memory(kentry_data_size); + kentry_data_size = zone_get_foreign_alloc_size(VME_RESERVED_ZONE_NAME, + sizeof(struct vm_map_entry), VM_MAP_RESERVED_ZFLAGS, + kentry_initial_pages); + + map_holes_data_size = zone_get_foreign_alloc_size(VM_MAP_HOLES_ZONE_NAME, + sizeof(struct vm_map_links), VM_MAP_HOLES_ZFLAGS, + kentry_initial_pages); - map_holes_data_size = kentry_data_size; - map_holes_data = pmap_steal_memory(map_holes_data_size); + /* + * Steal a contiguous range of memory so that a simple range check + * can validate foreign addresses being freed/crammed to these + * zones + */ + vm_size_t total_size; + if (os_add3_overflow(map_data_size, kentry_data_size, + map_holes_data_size, &total_size)) { + panic("vm_map_steal_memory: overflow in amount of memory requested"); + } + map_data = zone_foreign_mem_init(total_size); + kentry_data = map_data + map_data_size; + map_holes_data = kentry_data + kentry_data_size; } +STARTUP(PMAP_STEAL, STARTUP_RANK_FIRST, vm_map_steal_memory); boolean_t vm_map_supports_hole_optimization = FALSE; @@ -1177,7 +1299,7 @@ vm_map_create_options( result->size = 0; result->user_wire_limit = MACH_VM_MAX_ADDRESS; /* default limit is unlimited */ result->user_wire_size = 0; -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX result->vmmap_high_start = 0; #endif os_ref_init_count(&result->map_refcnt, &map_refgrp, 1); @@ -1198,10 +1320,13 @@ vm_map_create_options( result->is_nested_map = FALSE; result->map_disallow_new_exec = FALSE; result->terminated = FALSE; + result->cs_enforcement = FALSE; result->highest_entry_end = 0; result->first_free = vm_map_to_entry(result); result->hint = vm_map_to_entry(result); result->jit_entry_exists = FALSE; + result->is_alien = FALSE; + result->reserved_regions = FALSE; /* "has_corpse_footprint" and "holelistenabled" are mutually exclusive */ if (options & VM_MAP_CREATE_CORPSE_FOOTPRINT) { @@ -1233,6 +1358,49 @@ vm_map_create_options( return result; } +vm_map_size_t +vm_map_adjusted_size(vm_map_t map) +{ + struct vm_reserved_region *regions = NULL; + size_t num_regions = 0; + mach_vm_size_t reserved_size = 0, map_size = 0; + + if (map == NULL || (map->size == 0)) { + return 0; + } + + map_size = map->size; + + if (map->reserved_regions == FALSE || !vm_map_is_exotic(map) || map->terminated) { + /* + * No special reserved regions or not an exotic map or the task + * is terminating and these special regions might have already + * been deallocated. + */ + return map_size; + } + + num_regions = ml_get_vm_reserved_regions(vm_map_is_64bit(map), ®ions); + assert((num_regions == 0) || (num_regions > 0 && regions != NULL)); + + while (num_regions) { + reserved_size += regions[--num_regions].vmrr_size; + } + + /* + * There are a few places where the map is being switched out due to + * 'termination' without that bit being set (e.g. exec and corpse purging). + * In those cases, we could have the map's regions being deallocated on + * a core while some accounting process is trying to get the map's size. + * So this assert can't be enabled till all those places are uniform in + * their use of the 'map->terminated' bit. + * + * assert(map_size >= reserved_size); + */ + + return (map_size >= reserved_size) ? (map_size - reserved_size) : map_size; +} + /* * vm_map_entry_create: [ internal use only ] * @@ -1259,7 +1427,7 @@ _vm_map_entry_create( if (map_header->entries_pageable) { entry = (vm_map_entry_t) zalloc(zone); } else { - entry = (vm_map_entry_t) zalloc_canblock(zone, FALSE); + entry = (vm_map_entry_t) zalloc_noblock(zone); if (entry == VM_MAP_ENTRY_NULL) { zone = vm_map_entry_reserved_zone; @@ -1273,6 +1441,7 @@ _vm_map_entry_create( if (entry == VM_MAP_ENTRY_NULL) { panic("vm_map_entry_create"); } + *entry = vm_map_entry_template; entry->from_reserved_zone = (zone == vm_map_entry_reserved_zone); vm_map_store_update((vm_map_t) NULL, entry, VM_MAP_ENTRY_CREATE); @@ -1296,7 +1465,7 @@ _vm_map_entry_create( #define vm_map_entry_dispose(map, entry) \ _vm_map_entry_dispose(&(map)->hdr, (entry)) -#define vm_map_copy_entry_dispose(map, entry) \ +#define vm_map_copy_entry_dispose(copy, entry) \ _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry)) static void @@ -1439,11 +1608,11 @@ vm_map_destroy( /* clean up regular map entries */ (void) vm_map_delete(map, map->min_offset, map->max_offset, flags, VM_MAP_NULL); - /* clean up leftover special mappings (commpage, etc...) */ -#if !defined(__arm__) && !defined(__arm64__) + /* clean up leftover special mappings (commpage, GPU carveout, etc...) */ +#if !defined(__arm__) (void) vm_map_delete(map, 0x0, 0xFFFFFFFFFFFFF000ULL, flags, VM_MAP_NULL); -#endif /* !__arm__ && !__arm64__ */ +#endif /* !__arm__ */ vm_map_disable_hole_optimization(map); vm_map_corpse_footprint_destroy(map); @@ -1733,197 +1902,239 @@ vm_map_lookup_entry( * * If an entry is allocated, the object/offset fields * are initialized to zero. + * + * If VM_MAP_FIND_LAST_FREE flag is set, allocate from end of map. This + * is currently only used for allocating memory for zones backing + * one of the kalloc heaps.(rdar://65832263) */ kern_return_t vm_map_find_space( - vm_map_t map, + vm_map_t map, vm_map_offset_t *address, /* OUT */ vm_map_size_t size, vm_map_offset_t mask, - int flags __unused, + int flags, vm_map_kernel_flags_t vmk_flags, vm_tag_t tag, vm_map_entry_t *o_entry) /* OUT */ { - vm_map_entry_t entry, new_entry; - vm_map_offset_t start; - vm_map_offset_t end; - vm_map_entry_t hole_entry; + vm_map_entry_t entry, new_entry, hole_entry; + vm_map_offset_t start; + vm_map_offset_t end; if (size == 0) { *address = 0; return KERN_INVALID_ARGUMENT; } - if (vmk_flags.vmkf_guard_after) { - /* account for the back guard page in the size */ - size += VM_MAP_PAGE_SIZE(map); - } - new_entry = vm_map_entry_create(map, FALSE); - - /* - * Look for the first possible address; if there's already - * something at this address, we have to start after it. - */ - vm_map_lock(map); - if (map->disable_vmentry_reuse == TRUE) { - VM_MAP_HIGHEST_ENTRY(map, entry, start); - } else { - if (map->holelistenabled) { - hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); - - if (hole_entry == NULL) { - /* - * No more space in the map? - */ - vm_map_entry_dispose(map, new_entry); - vm_map_unlock(map); - return KERN_NO_SPACE; - } - - entry = hole_entry; - start = entry->vme_start; - } else { - assert(first_free_is_valid(map)); - if ((entry = map->first_free) == vm_map_to_entry(map)) { - start = map->min_offset; - } else { - start = entry->vme_end; - } - } - } - - /* - * In any case, the "entry" always precedes - * the proposed new region throughout the loop: - */ - - while (TRUE) { - vm_map_entry_t next; + if (flags & VM_MAP_FIND_LAST_FREE) { + assert(!map->disable_vmentry_reuse); + /* TODO: Make backward lookup generic and support guard pages */ + assert(!vmk_flags.vmkf_guard_after && !vmk_flags.vmkf_guard_before); + assert(VM_MAP_PAGE_ALIGNED(size, VM_MAP_PAGE_MASK(map))); - /* - * Find the end of the proposed new region. - * Be sure we didn't go beyond the end, or - * wrap around the address. - */ + /* Allocate space from end of map */ + vm_map_store_find_last_free(map, &entry); - if (vmk_flags.vmkf_guard_before) { - /* reserve space for the front guard page */ - start += VM_MAP_PAGE_SIZE(map); + if (!entry) { + goto noSpace; } - end = ((start + mask) & ~mask); - if (end < start) { - vm_map_entry_dispose(map, new_entry); - vm_map_unlock(map); - return KERN_NO_SPACE; + if (entry == vm_map_to_entry(map)) { + end = map->max_offset; + } else { + end = entry->vme_start; } - start = end; - assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map))); - end += size; - assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map))); - if ((end > map->max_offset) || (end < start)) { - vm_map_entry_dispose(map, new_entry); - vm_map_unlock(map); - return KERN_NO_SPACE; - } + while (TRUE) { + vm_map_entry_t prev; - next = entry->vme_next; + start = end - size; - if (map->holelistenabled) { - if (entry->vme_end >= end) { - break; + if ((start < map->min_offset) || end < start) { + goto noSpace; } - } else { - /* - * If there are no more entries, we must win. - * - * OR - * - * If there is another entry, it must be - * after the end of the potential new region. - */ - if (next == vm_map_to_entry(map)) { + prev = entry->vme_prev; + entry = prev; + + if (prev == vm_map_to_entry(map)) { break; } - if (next->vme_start >= end) { + if (prev->vme_end <= start) { break; } + + /* + * Didn't fit -- move to the next entry. + */ + + end = entry->vme_start; + } + } else { + if (vmk_flags.vmkf_guard_after) { + /* account for the back guard page in the size */ + size += VM_MAP_PAGE_SIZE(map); } /* - * Didn't fit -- move to the next entry. + * Look for the first possible address; if there's already + * something at this address, we have to start after it. */ - entry = next; + if (map->disable_vmentry_reuse == TRUE) { + VM_MAP_HIGHEST_ENTRY(map, entry, start); + } else { + if (map->holelistenabled) { + hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); - if (map->holelistenabled) { - if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { - /* - * Wrapped around - */ - vm_map_entry_dispose(map, new_entry); - vm_map_unlock(map); - return KERN_NO_SPACE; + if (hole_entry == NULL) { + /* + * No more space in the map? + */ + goto noSpace; + } + + entry = hole_entry; + start = entry->vme_start; + } else { + assert(first_free_is_valid(map)); + if ((entry = map->first_free) == vm_map_to_entry(map)) { + start = map->min_offset; + } else { + start = entry->vme_end; + } } - start = entry->vme_start; - } else { - start = entry->vme_end; } - } - if (map->holelistenabled) { - if (vm_map_lookup_entry(map, entry->vme_start, &entry)) { - panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", entry, (unsigned long long)entry->vme_start); - } - } + /* + * In any case, the "entry" always precedes + * the proposed new region throughout the loop: + */ - /* - * At this point, - * "start" and "end" should define the endpoints of the - * available new range, and - * "entry" should refer to the region before the new - * range, and - * - * the map should be locked. - */ + while (TRUE) { + vm_map_entry_t next; - if (vmk_flags.vmkf_guard_before) { - /* go back for the front guard page */ - start -= VM_MAP_PAGE_SIZE(map); - } - *address = start; + /* + * Find the end of the proposed new region. + * Be sure we didn't go beyond the end, or + * wrap around the address. + */ - assert(start < end); - new_entry->vme_start = start; - new_entry->vme_end = end; - assert(page_aligned(new_entry->vme_start)); - assert(page_aligned(new_entry->vme_end)); - assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start, - VM_MAP_PAGE_MASK(map))); - assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end, - VM_MAP_PAGE_MASK(map))); + if (vmk_flags.vmkf_guard_before) { + /* reserve space for the front guard page */ + start += VM_MAP_PAGE_SIZE(map); + } + end = ((start + mask) & ~mask); - new_entry->is_shared = FALSE; - new_entry->is_sub_map = FALSE; - new_entry->use_pmap = TRUE; - VME_OBJECT_SET(new_entry, VM_OBJECT_NULL); - VME_OFFSET_SET(new_entry, (vm_object_offset_t) 0); + if (end < start) { + goto noSpace; + } + start = end; + assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map))); + end += size; + assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map))); - new_entry->needs_copy = FALSE; + if ((end > map->max_offset) || (end < start)) { + goto noSpace; + } - new_entry->inheritance = VM_INHERIT_DEFAULT; - new_entry->protection = VM_PROT_DEFAULT; - new_entry->max_protection = VM_PROT_ALL; - new_entry->behavior = VM_BEHAVIOR_DEFAULT; - new_entry->wired_count = 0; - new_entry->user_wired_count = 0; + next = entry->vme_next; + + if (map->holelistenabled) { + if (entry->vme_end >= end) { + break; + } + } else { + /* + * If there are no more entries, we must win. + * + * OR + * + * If there is another entry, it must be + * after the end of the potential new region. + */ + + if (next == vm_map_to_entry(map)) { + break; + } + + if (next->vme_start >= end) { + break; + } + } + + /* + * Didn't fit -- move to the next entry. + */ + + entry = next; + + if (map->holelistenabled) { + if (entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { + /* + * Wrapped around + */ + goto noSpace; + } + start = entry->vme_start; + } else { + start = entry->vme_end; + } + } + + if (vmk_flags.vmkf_guard_before) { + /* go back for the front guard page */ + start -= VM_MAP_PAGE_SIZE(map); + } + } + + if (map->holelistenabled) { + if (vm_map_lookup_entry(map, entry->vme_start, &entry)) { + panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", entry, (unsigned long long)entry->vme_start); + } + } + + /* + * At this point, + * "start" and "end" should define the endpoints of the + * available new range, and + * "entry" should refer to the region before the new + * range, and + * + * the map should be locked. + */ + + *address = start; + + assert(start < end); + new_entry->vme_start = start; + new_entry->vme_end = end; + assert(page_aligned(new_entry->vme_start)); + assert(page_aligned(new_entry->vme_end)); + assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start, + VM_MAP_PAGE_MASK(map))); + assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end, + VM_MAP_PAGE_MASK(map))); + + new_entry->is_shared = FALSE; + new_entry->is_sub_map = FALSE; + new_entry->use_pmap = TRUE; + VME_OBJECT_SET(new_entry, VM_OBJECT_NULL); + VME_OFFSET_SET(new_entry, (vm_object_offset_t) 0); + + new_entry->needs_copy = FALSE; + + new_entry->inheritance = VM_INHERIT_DEFAULT; + new_entry->protection = VM_PROT_DEFAULT; + new_entry->max_protection = VM_PROT_ALL; + new_entry->behavior = VM_BEHAVIOR_DEFAULT; + new_entry->wired_count = 0; + new_entry->user_wired_count = 0; new_entry->in_transition = FALSE; new_entry->needs_wakeup = FALSE; @@ -1965,6 +2176,12 @@ vm_map_find_space( *o_entry = new_entry; return KERN_SUCCESS; + +noSpace: + + vm_map_entry_dispose(map, new_entry); + vm_map_unlock(map); + return KERN_NO_SPACE; } int vm_map_pmap_enter_print = FALSE; @@ -2001,6 +2218,8 @@ vm_map_pmap_enter( return; } + assert(VM_MAP_PAGE_SHIFT(map) == PAGE_SHIFT); + while (addr < end_addr) { vm_page_t m; @@ -2032,7 +2251,9 @@ vm_map_pmap_enter( } type_of_fault = DBG_CACHE_HIT_FAULT; kr = vm_fault_enter(m, map->pmap, - addr, protection, protection, + addr, + PAGE_SIZE, 0, + protection, protection, VM_PAGE_WIRED(m), FALSE, /* change_wiring */ VM_KERN_MEMORY_NONE, /* tag - not wiring */ @@ -2102,10 +2323,10 @@ vm_map_random_address_for_size( addr_space_size = vm_map_max(map) - vm_map_min(map); - assert(page_aligned(size)); + assert(VM_MAP_PAGE_ALIGNED(size, VM_MAP_PAGE_MASK(map))); while (tries < MAX_TRIES_TO_GET_RANDOM_ADDRESS) { - random_addr = ((vm_map_offset_t)random()) << PAGE_SHIFT; + random_addr = ((vm_map_offset_t)random()) << VM_MAP_PAGE_SHIFT(map); random_addr = vm_map_trunc_page( vm_map_min(map) + (random_addr % addr_space_size), VM_MAP_PAGE_MASK(map)); @@ -2163,7 +2384,6 @@ vm_memory_malloc_no_cow( * * Arguments are as defined in the vm_map call. */ -int _map_enter_debug = 0; static unsigned int vm_map_enter_restore_successes = 0; static unsigned int vm_map_enter_restore_failures = 0; kern_return_t @@ -2204,6 +2424,7 @@ vm_map_enter( boolean_t no_copy_on_read = vmk_flags.vmkf_no_copy_on_read; boolean_t entry_for_jit = vmk_flags.vmkf_map_jit; boolean_t iokit_acct = vmk_flags.vmkf_iokit_acct; + boolean_t translated_allow_execute = vmk_flags.vmkf_translated_allow_execute; boolean_t resilient_codesign = ((flags & VM_FLAGS_RESILIENT_CODESIGN) != 0); boolean_t resilient_media = ((flags & VM_FLAGS_RESILIENT_MEDIA) != 0); boolean_t random_address = ((flags & VM_FLAGS_RANDOM_ADDR) != 0); @@ -2240,6 +2461,7 @@ vm_map_enter( case SUPERPAGE_SIZE_ANY: /* handle it like 2 MB and round up to page size */ size = (size + 2 * 1024 * 1024 - 1) & ~(2 * 1024 * 1024 - 1); + OS_FALLTHROUGH; case SUPERPAGE_SIZE_2MB: break; #endif @@ -2256,33 +2478,40 @@ vm_map_enter( if ((cur_protection & VM_PROT_WRITE) && (cur_protection & VM_PROT_EXECUTE) && -#if !CONFIG_EMBEDDED - map != kernel_map && +#if XNU_TARGET_OS_OSX + map->pmap != kernel_pmap && (cs_process_global_enforcement() || (vmk_flags.vmkf_cs_enforcement_override ? vmk_flags.vmkf_cs_enforcement - : cs_process_enforcement(NULL))) && -#endif /* !CONFIG_EMBEDDED */ + : (vm_map_cs_enforcement(map) +#if __arm64__ + || !VM_MAP_IS_EXOTIC(map) +#endif /* __arm64__ */ + ))) && +#endif /* XNU_TARGET_OS_OSX */ +#if PMAP_CS + !pmap_cs_exempt(map->pmap) && +#endif + (VM_MAP_POLICY_WX_FAIL(map) || + VM_MAP_POLICY_WX_STRIP_X(map)) && !entry_for_jit) { + boolean_t vm_protect_wx_fail = VM_MAP_POLICY_WX_FAIL(map); + DTRACE_VM3(cs_wx, uint64_t, 0, uint64_t, 0, vm_prot_t, cur_protection); - printf("CODE SIGNING: %d[%s] %s: curprot cannot be write+execute. " -#if VM_PROTECT_WX_FAIL - "failing\n", -#else /* VM_PROTECT_WX_FAIL */ - "turning off execute\n", -#endif /* VM_PROTECT_WX_FAIL */ + printf("CODE SIGNING: %d[%s] %s: curprot cannot be write+execute. %s\n", proc_selfpid(), (current_task()->bsd_info ? proc_name_address(current_task()->bsd_info) : "?"), - __FUNCTION__); + __FUNCTION__, + (vm_protect_wx_fail ? "failing" : "turning off execute")); cur_protection &= ~VM_PROT_EXECUTE; -#if VM_PROTECT_WX_FAIL - return KERN_PROTECTION_FAILURE; -#endif /* VM_PROTECT_WX_FAIL */ + if (vm_protect_wx_fail) { + return KERN_PROTECTION_FAILURE; + } } /* @@ -2297,8 +2526,8 @@ vm_map_enter( if (resilient_codesign) { assert(!is_submap); - if ((cur_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) || - (max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE))) { + int reject_prot = (needs_copy ? VM_PROT_EXECUTE : (VM_PROT_WRITE | VM_PROT_EXECUTE)); + if ((cur_protection | max_protection) & reject_prot) { return KERN_PROTECTION_FAILURE; } } @@ -2351,26 +2580,26 @@ vm_map_enter( /* * Allow an insertion beyond the map's max offset. */ -#if !defined(__arm__) && !defined(__arm64__) +#if !defined(__arm__) if (vm_map_is_64bit(map)) { effective_max_offset = 0xFFFFFFFFFFFFF000ULL; } else #endif /* __arm__ */ effective_max_offset = 0x00000000FFFFF000ULL; } else { -#if !defined(CONFIG_EMBEDDED) +#if XNU_TARGET_OS_OSX if (__improbable(vmk_flags.vmkf_32bit_map_va)) { effective_max_offset = MIN(map->max_offset, 0x00000000FFFFF000ULL); } else { effective_max_offset = map->max_offset; } -#else +#else /* XNU_TARGET_OS_OSX */ effective_max_offset = map->max_offset; -#endif +#endif /* XNU_TARGET_OS_OSX */ } if (size == 0 || - (offset & PAGE_MASK_64) != 0) { + (offset & MIN(VM_MAP_PAGE_MASK(map), PAGE_MASK_64)) != 0) { *address = 0; return KERN_INVALID_ARGUMENT; } @@ -2387,10 +2616,15 @@ vm_map_enter( #define RETURN(value) { result = value; goto BailOut; } - assert(page_aligned(*address)); - assert(page_aligned(size)); + assertf(VM_MAP_PAGE_ALIGNED(*address, FOURK_PAGE_MASK), "0x%llx", (uint64_t)*address); + assertf(VM_MAP_PAGE_ALIGNED(size, FOURK_PAGE_MASK), "0x%llx", (uint64_t)size); + if (VM_MAP_PAGE_MASK(map) >= PAGE_MASK) { + assertf(page_aligned(*address), "0x%llx", (uint64_t)*address); + assertf(page_aligned(size), "0x%llx", (uint64_t)size); + } - if (!VM_MAP_PAGE_ALIGNED(size, VM_MAP_PAGE_MASK(map))) { + if (VM_MAP_PAGE_MASK(map) >= PAGE_MASK && + !VM_MAP_PAGE_ALIGNED(size, VM_MAP_PAGE_MASK(map))) { /* * In most cases, the caller rounds the size up to the * map's page size. @@ -2402,6 +2636,7 @@ vm_map_enter( clear_map_aligned = TRUE; } if (!anywhere && + VM_MAP_PAGE_MASK(map) >= PAGE_MASK && !VM_MAP_PAGE_ALIGNED(*address, VM_MAP_PAGE_MASK(map))) { /* * We've been asked to map at a fixed address and that @@ -2454,13 +2689,14 @@ StartAgain:; map_locked = TRUE; if (entry_for_jit) { -#if CONFIG_EMBEDDED - if (map->jit_entry_exists) { + if (map->jit_entry_exists && + !VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(map)) { result = KERN_INVALID_ARGUMENT; goto BailOut; } - random_address = TRUE; -#endif /* CONFIG_EMBEDDED */ + if (VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS(map)) { + random_address = TRUE; + } } if (random_address) { @@ -2473,13 +2709,13 @@ StartAgain:; } start = *address; } -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX else if ((start == 0 || start == vm_map_min(map)) && !map->disable_vmentry_reuse && map->vmmap_high_start != 0) { start = map->vmmap_high_start; } -#endif +#endif /* XNU_TARGET_OS_OSX */ /* @@ -2682,6 +2918,17 @@ StartAgain:; assert(VM_MAP_PAGE_ALIGNED(*address, VM_MAP_PAGE_MASK(map))); } else { + if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT && + !overwrite && + user_alias == VM_MEMORY_REALLOC) { + /* + * Force realloc() to switch to a new allocation, + * to prevent 4k-fragmented virtual ranges. + */ +// DEBUG4K_ERROR("no realloc in place"); + return KERN_NO_SPACE; + } + /* * Verify that: * the address doesn't itself violate @@ -2864,6 +3111,8 @@ StartAgain:; } offset = (vm_object_offset_t)0; } + } else if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) { + /* no coalescing if address space uses sub-pages */ } else if ((is_submap == FALSE) && (object == VM_OBJECT_NULL) && (entry != vm_map_to_entry(map)) && @@ -2960,29 +3209,32 @@ StartAgain:; tmp_end = tmp2_end; } do { - new_entry = vm_map_entry_insert( - map, entry, tmp_start, tmp_end, - object, offset, needs_copy, - FALSE, FALSE, - cur_protection, max_protection, - VM_BEHAVIOR_DEFAULT, - (entry_for_jit)? VM_INHERIT_NONE: inheritance, - 0, - no_cache, - permanent, - no_copy_on_read, - superpage_size, - clear_map_aligned, - is_submap, - entry_for_jit, - alias); + new_entry = vm_map_entry_insert(map, + entry, tmp_start, tmp_end, + object, offset, needs_copy, + FALSE, FALSE, + cur_protection, max_protection, + VM_BEHAVIOR_DEFAULT, + (entry_for_jit && !VM_MAP_POLICY_ALLOW_JIT_INHERIT(map) ? + VM_INHERIT_NONE : inheritance), + 0, + no_cache, + permanent, + no_copy_on_read, + superpage_size, + clear_map_aligned, + is_submap, + entry_for_jit, + alias, + translated_allow_execute); assert((object != kernel_object) || (VM_KERN_MEMORY_NONE != alias)); - if (resilient_codesign && - !((cur_protection | max_protection) & - (VM_PROT_WRITE | VM_PROT_EXECUTE))) { - new_entry->vme_resilient_codesign = TRUE; + if (resilient_codesign) { + int reject_prot = (needs_copy ? VM_PROT_EXECUTE : (VM_PROT_WRITE | VM_PROT_EXECUTE)); + if (!((cur_protection | max_protection) & reject_prot)) { + new_entry->vme_resilient_codesign = TRUE; + } } if (resilient_media && @@ -3055,11 +3307,15 @@ StartAgain:; #endif } if (use_pmap && submap->pmap != NULL) { - kr = pmap_nest(map->pmap, - submap->pmap, - tmp_start, - tmp_start, - tmp_end - tmp_start); + if (VM_MAP_PAGE_SHIFT(map) != VM_MAP_PAGE_SHIFT(submap)) { + DEBUG4K_ERROR("map %p (%d) submap %p (%d): incompatible page sizes\n", map, VM_MAP_PAGE_SHIFT(map), submap, VM_MAP_PAGE_SHIFT(submap)); + kr = KERN_FAILURE; + } else { + kr = pmap_nest(map->pmap, + submap->pmap, + tmp_start, + tmp_end - tmp_start); + } if (kr != KERN_SUCCESS) { printf("vm_map_enter: " "pmap_nest(0x%llx,0x%llx) " @@ -3350,6 +3606,7 @@ vm_map_enter_fourk( boolean_t no_copy_on_read = vmk_flags.vmkf_permanent; boolean_t entry_for_jit = vmk_flags.vmkf_map_jit; // boolean_t iokit_acct = vmk_flags.vmkf_iokit_acct; + boolean_t translated_allow_execute = vmk_flags.vmkf_translated_allow_execute; unsigned int superpage_size = ((flags & VM_FLAGS_SUPERPAGE_MASK) >> VM_FLAGS_SUPERPAGE_SHIFT); vm_map_offset_t effective_min_offset, effective_max_offset; kern_return_t kr; @@ -3363,6 +3620,9 @@ vm_map_enter_fourk( vm_object_t copy_object; vm_object_offset_t copy_offset; + if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) { + panic("%s:%d\n", __FUNCTION__, __LINE__); + } fourk_mem_obj = MEMORY_OBJECT_NULL; fourk_object = VM_OBJECT_NULL; @@ -3372,10 +3632,17 @@ vm_map_enter_fourk( if ((cur_protection & VM_PROT_WRITE) && (cur_protection & VM_PROT_EXECUTE) && -#if !CONFIG_EMBEDDED - map != kernel_map && - cs_process_enforcement(NULL) && -#endif /* !CONFIG_EMBEDDED */ +#if XNU_TARGET_OS_OSX + map->pmap != kernel_pmap && + (vm_map_cs_enforcement(map) +#if __arm64__ + || !VM_MAP_IS_EXOTIC(map) +#endif /* __arm64__ */ + ) && +#endif /* XNU_TARGET_OS_OSX */ +#if PMAP_CS + !pmap_cs_exempt(map->pmap) && +#endif !entry_for_jit) { DTRACE_VM3(cs_wx, uint64_t, 0, @@ -3545,6 +3812,7 @@ vm_map_enter_fourk( /* keep the "4K" object alive */ vm_object_reference_locked(fourk_object); + memory_object_reference(fourk_mem_obj); vm_object_unlock(fourk_object); /* merge permissions */ @@ -3558,7 +3826,6 @@ vm_map_enter_fourk( /* write+execute: need to be "jit" */ entry->used_for_jit = TRUE; } - goto map_in_fourk_pager; } @@ -3603,9 +3870,6 @@ vm_map_enter_fourk( assert(copy_object != VM_OBJECT_NULL); assert(copy_offset == 0); - /* take a reference on the copy object, for this mapping */ - vm_object_reference(copy_object); - /* map the "4K" pager's copy object */ new_entry = vm_map_entry_insert(map, entry, @@ -3620,9 +3884,8 @@ vm_map_enter_fourk( FALSE, cur_protection, max_protection, VM_BEHAVIOR_DEFAULT, - ((entry_for_jit) - ? VM_INHERIT_NONE - : inheritance), + (entry_for_jit && !VM_MAP_POLICY_ALLOW_JIT_INHERIT(map) ? + VM_INHERIT_NONE : inheritance), 0, no_cache, permanent, @@ -3631,7 +3894,8 @@ vm_map_enter_fourk( clear_map_aligned, is_submap, FALSE, /* jit */ - alias); + alias, + translated_allow_execute); entry = new_entry; #if VM_MAP_DEBUG_FOURK @@ -3729,12 +3993,6 @@ map_in_fourk_pager: BailOut: assert(map_locked == TRUE); - if (fourk_object != VM_OBJECT_NULL) { - vm_object_deallocate(fourk_object); - fourk_object = VM_OBJECT_NULL; - fourk_mem_obj = MEMORY_OBJECT_NULL; - } - if (result == KERN_SUCCESS) { vm_prot_t pager_prot; memory_object_t pager; @@ -3810,6 +4068,13 @@ BailOut: } } + if (fourk_object != VM_OBJECT_NULL) { + vm_object_deallocate(fourk_object); + fourk_object = VM_OBJECT_NULL; + memory_object_deallocate(fourk_mem_obj); + fourk_mem_obj = MEMORY_OBJECT_NULL; + } + assert(map_locked == TRUE); if (!keep_map_locked) { @@ -3981,6 +4246,11 @@ vm_map_enter_mem_object_helper( boolean_t fourk = vmk_flags.vmkf_fourk; #endif /* __arm64__ */ + if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) { + /* XXX TODO4K prefaulting depends on page size... */ + try_prefault = FALSE; + } + assertf(vmk_flags.__vmkf_unused == 0, "vmk_flags unused=0x%x\n", vmk_flags.__vmkf_unused); mask_cur_protection = cur_protection & VM_PROT_IS_MASK; @@ -4001,6 +4271,10 @@ vm_map_enter_mem_object_helper( } #if __arm64__ + if (fourk && VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) { + /* no "fourk" if map is using a sub-page page size */ + fourk = FALSE; + } if (fourk) { map_addr = vm_map_trunc_page(*address, FOURK_PAGE_MASK); map_size = vm_map_round_page(initial_size, FOURK_PAGE_MASK); @@ -4023,12 +4297,16 @@ vm_map_enter_mem_object_helper( copy = FALSE; } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) { vm_named_entry_t named_entry; + vm_object_offset_t data_offset; named_entry = (vm_named_entry_t) ip_get_kobject(port); if (flags & (VM_FLAGS_RETURN_DATA_ADDR | VM_FLAGS_RETURN_4K_DATA_ADDR)) { + data_offset = named_entry->data_offset; offset += named_entry->data_offset; + } else { + data_offset = 0; } /* a few checks to make sure user is obeying rules */ @@ -4071,18 +4349,6 @@ vm_map_enter_mem_object_helper( size, VM_MAP_PAGE_MASK(target_map)); } - - if (!(flags & VM_FLAGS_ANYWHERE) && - (offset != 0 || - size != named_entry->size)) { - /* - * XXX for a mapping at a "fixed" address, - * we can't trim after mapping the whole - * memory entry, so reject a request for a - * partial mapping. - */ - return KERN_INVALID_ARGUMENT; - } } /* the callers parameter offset is defined to be the */ @@ -4109,9 +4375,7 @@ vm_map_enter_mem_object_helper( } submap = named_entry->backing.map; - vm_map_lock(submap); vm_map_reference(submap); - vm_map_unlock(submap); named_entry_unlock(named_entry); vmk_flags.vmkf_submap = TRUE; @@ -4162,6 +4426,10 @@ vm_map_enter_mem_object_helper( vm_map_copy_t copy_map; vm_map_entry_t copy_entry; vm_map_offset_t copy_addr; + vm_map_copy_t target_copy_map; + vm_map_offset_t overmap_start, overmap_end; + vm_map_offset_t trimmed_start; + vm_map_size_t target_size; if (flags & ~(VM_FLAGS_FIXED | VM_FLAGS_ANYWHERE | @@ -4173,19 +4441,8 @@ vm_map_enter_mem_object_helper( return KERN_INVALID_ARGUMENT; } - if (flags & (VM_FLAGS_RETURN_DATA_ADDR | - VM_FLAGS_RETURN_4K_DATA_ADDR)) { - offset_in_mapping = offset - vm_object_trunc_page(offset); - if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) { - offset_in_mapping &= ~((signed)(0xFFF)); - } - offset = vm_object_trunc_page(offset); - map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset; - } - copy_map = named_entry->backing.copy; assert(copy_map->type == VM_MAP_COPY_ENTRY_LIST); - zone_require(copy_map, vm_map_copy_zone); if (copy_map->type != VM_MAP_COPY_ENTRY_LIST) { /* unsupported type; should not happen */ printf("vm_map_enter_mem_object: " @@ -4196,11 +4453,53 @@ vm_map_enter_mem_object_helper( return KERN_INVALID_ARGUMENT; } + if (VM_MAP_PAGE_SHIFT(target_map) != copy_map->cpy_hdr.page_shift) { + DEBUG4K_SHARE("copy_map %p offset %llx size 0x%llx pgshift %d -> target_map %p pgshift %d\n", copy_map, offset, (uint64_t)map_size, copy_map->cpy_hdr.page_shift, target_map, VM_MAP_PAGE_SHIFT(target_map)); + } + + if (flags & (VM_FLAGS_RETURN_DATA_ADDR | + VM_FLAGS_RETURN_4K_DATA_ADDR)) { + offset_in_mapping = offset & VM_MAP_PAGE_MASK(target_map); + if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) { + offset_in_mapping &= ~((signed)(0xFFF)); + } + } + + target_copy_map = VM_MAP_COPY_NULL; + target_size = copy_map->size; + overmap_start = 0; + overmap_end = 0; + trimmed_start = 0; + if (copy_map->cpy_hdr.page_shift != VM_MAP_PAGE_SHIFT(target_map)) { + DEBUG4K_ADJUST("adjusting...\n"); + kr = vm_map_copy_adjust_to_target( + copy_map, + offset /* includes data_offset */, + initial_size, + target_map, + copy, + &target_copy_map, + &overmap_start, + &overmap_end, + &trimmed_start); + if (kr != KERN_SUCCESS) { + named_entry_unlock(named_entry); + return kr; + } + target_size = target_copy_map->size; + if (trimmed_start >= data_offset) { + data_offset = offset & VM_MAP_PAGE_MASK(target_map); + } else { + data_offset -= trimmed_start; + } + } else { + target_copy_map = copy_map; + } + /* reserve a contiguous range */ kr = vm_map_enter(target_map, &map_addr, - /* map whole mem entry, trim later: */ - named_entry->size, + vm_map_round_page(target_size, VM_MAP_PAGE_MASK(target_map)), mask, flags & (VM_FLAGS_ANYWHERE | VM_FLAGS_OVERWRITE | @@ -4215,14 +4514,19 @@ vm_map_enter_mem_object_helper( max_protection, inheritance); if (kr != KERN_SUCCESS) { + DEBUG4K_ERROR("kr 0x%x\n", kr); + if (target_copy_map != copy_map) { + vm_map_copy_discard(target_copy_map); + target_copy_map = VM_MAP_COPY_NULL; + } named_entry_unlock(named_entry); return kr; } copy_addr = map_addr; - for (copy_entry = vm_map_copy_first_entry(copy_map); - copy_entry != vm_map_copy_to_entry(copy_map); + for (copy_entry = vm_map_copy_first_entry(target_copy_map); + copy_entry != vm_map_copy_to_entry(target_copy_map); copy_entry = copy_entry->vme_next) { int remap_flags; vm_map_kernel_flags_t vmk_remap_flags; @@ -4252,9 +4556,11 @@ vm_map_enter_mem_object_helper( /* sanity check */ if ((copy_addr + copy_size) > (map_addr + + overmap_start + overmap_end + named_entry->size /* XXX full size */)) { /* over-mapping too much !? */ kr = KERN_INVALID_ARGUMENT; + DEBUG4K_ERROR("kr 0x%x\n", kr); /* abort */ break; } @@ -4297,6 +4603,7 @@ vm_map_enter_mem_object_helper( copy_offset, copy_size, PMAP_NULL, + PAGE_SIZE, 0, prot); } @@ -4335,18 +4642,21 @@ vm_map_enter_mem_object_helper( */ assert(!copy_entry->needs_copy); } -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX if (copy_entry->used_for_jit) { vmk_remap_flags.vmkf_map_jit = TRUE; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ + + assertf((copy_vm_alias & VME_ALIAS_MASK) == copy_vm_alias, + "VM Tag truncated from 0x%x to 0x%x\n", copy_vm_alias, (copy_vm_alias & VME_ALIAS_MASK)); kr = vm_map_enter(target_map, ©_addr, copy_size, (vm_map_offset_t) 0, remap_flags, vmk_remap_flags, - copy_vm_alias, + (vm_tag_t) copy_vm_alias, /* see comment at end of vm_fault_unwire re. cast*/ copy_object, copy_offset, ((copy_object == NULL) ? FALSE : copy), @@ -4354,6 +4664,7 @@ vm_map_enter_mem_object_helper( max_protection, inheritance); if (kr != KERN_SUCCESS) { + DEBUG4K_SHARE("failed kr 0x%x\n", kr); if (copy_entry->is_sub_map) { vm_map_deallocate(copy_submap); } else { @@ -4374,32 +4685,16 @@ vm_map_enter_mem_object_helper( } else { *address = map_addr; } - - if (offset) { - /* - * Trim in front, from 0 to "offset". - */ - vm_map_remove(target_map, - map_addr, - map_addr + offset, - VM_MAP_REMOVE_NO_FLAGS); - *address += offset; - } - if (offset + map_size < named_entry->size) { - /* - * Trim in back, from - * "offset + map_size" to - * "named_entry->size". - */ - vm_map_remove(target_map, - (map_addr + - offset + map_size), - (map_addr + - named_entry->size), - VM_MAP_REMOVE_NO_FLAGS); + if (overmap_start) { + *address += overmap_start; + DEBUG4K_SHARE("map %p map_addr 0x%llx offset_in_mapping 0x%llx overmap_start 0x%llx -> *address 0x%llx\n", target_map, (uint64_t)map_addr, (uint64_t) offset_in_mapping, (uint64_t)overmap_start, (uint64_t)*address); } } named_entry_unlock(named_entry); + if (target_copy_map != copy_map) { + vm_map_copy_discard(target_copy_map); + target_copy_map = VM_MAP_COPY_NULL; + } if (kr != KERN_SUCCESS) { if (!(flags & VM_FLAGS_OVERWRITE)) { @@ -4411,7 +4706,9 @@ vm_map_enter_mem_object_helper( } return kr; - } else { + } + + if (named_entry->is_object) { unsigned int access; vm_prot_t protections; unsigned int wimg_mode; @@ -4423,15 +4720,15 @@ vm_map_enter_mem_object_helper( if (flags & (VM_FLAGS_RETURN_DATA_ADDR | VM_FLAGS_RETURN_4K_DATA_ADDR)) { - offset_in_mapping = offset - vm_object_trunc_page(offset); + offset_in_mapping = offset - VM_MAP_TRUNC_PAGE(offset, VM_MAP_PAGE_MASK(target_map)); if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) { offset_in_mapping &= ~((signed)(0xFFF)); } - offset = vm_object_trunc_page(offset); - map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset; + offset = VM_MAP_TRUNC_PAGE(offset, VM_MAP_PAGE_MASK(target_map)); + map_size = VM_MAP_ROUND_PAGE((offset + offset_in_mapping + initial_size) - offset, VM_MAP_PAGE_MASK(target_map)); } - object = named_entry->backing.object; + object = vm_named_entry_to_vm_object(named_entry); assert(object != VM_OBJECT_NULL); vm_object_lock(object); named_entry_unlock(named_entry); @@ -4445,6 +4742,8 @@ vm_map_enter_mem_object_helper( } vm_object_unlock(object); + } else { + panic("invalid VM named entry %p", named_entry); } } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) { /* @@ -4816,6 +5115,10 @@ vm_map_enter_mem_object_control( } #if __arm64__ + if (fourk && VM_MAP_PAGE_MASK(target_map) < PAGE_MASK) { + fourk = FALSE; + } + if (fourk) { map_addr = vm_map_trunc_page(*address, FOURK_PAGE_MASK); @@ -4999,6 +5302,12 @@ vm_map_enter_cpm( boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); vm_tag_t tag; + if (VM_MAP_PAGE_SHIFT(map) != PAGE_SHIFT) { + /* XXX TODO4K do we need to support this? */ + *addr = 0; + return KERN_NOT_SUPPORTED; + } + VM_GET_FLAGS_ALIAS(flags, tag); if (size == 0) { @@ -5133,7 +5442,9 @@ vm_map_enter_cpm( type_of_fault = DBG_ZERO_FILL_FAULT; - vm_fault_enter(m, pmap, va, VM_PROT_ALL, VM_PROT_WRITE, + vm_fault_enter(m, pmap, va, + PAGE_SIZE, 0, + VM_PROT_ALL, VM_PROT_WRITE, VM_PAGE_WIRED(m), FALSE, /* change_wiring */ VM_KERN_MEMORY_NONE, /* tag - not wiring */ @@ -5323,8 +5634,8 @@ vm_map_clip_start( * vm_map_clip_unnest may perform additional adjustments to * the unnest range. */ - start_unnest = startaddr & ~(pmap_nesting_size_min - 1); - end_unnest = start_unnest + pmap_nesting_size_min; + start_unnest = startaddr & ~(pmap_shared_region_size_min(map->pmap) - 1); + end_unnest = start_unnest + pmap_shared_region_size_min(map->pmap); vm_map_clip_unnest(map, entry, start_unnest, end_unnest); } #endif /* NO_NESTED_PMAP */ @@ -5441,8 +5752,8 @@ vm_map_clip_end( */ start_unnest = entry->vme_start; end_unnest = - (endaddr + pmap_nesting_size_min - 1) & - ~(pmap_nesting_size_min - 1); + (endaddr + pmap_shared_region_size_min(map->pmap) - 1) & + ~(pmap_shared_region_size_min(map->pmap) - 1); vm_map_clip_unnest(map, entry, start_unnest, end_unnest); } #endif /* NO_NESTED_PMAP */ @@ -5699,7 +6010,6 @@ vm_map_submap( result = pmap_nest(map->pmap, (VME_SUBMAP(entry))->pmap, (addr64_t)start, - (addr64_t)start, (uint64_t)(end - start)); if (result) { panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result); @@ -5749,10 +6059,14 @@ vm_map_protect( return KERN_INVALID_ADDRESS; } -#if VM_PROTECT_WX_FAIL if ((new_prot & VM_PROT_EXECUTE) && - map != kernel_map && - cs_process_enforcement(NULL)) { + map->pmap != kernel_pmap && + (vm_map_cs_enforcement(map) +#if XNU_TARGET_OS_OSX && __arm64__ + || !VM_MAP_IS_EXOTIC(map) +#endif /* XNU_TARGET_OS_OSX && __arm64__ */ + ) && + VM_MAP_POLICY_WX_FAIL(map)) { DTRACE_VM3(cs_wx, uint64_t, (uint64_t) start, uint64_t, (uint64_t) end, @@ -5765,7 +6079,6 @@ vm_map_protect( __FUNCTION__); return KERN_PROTECTION_FAILURE; } -#endif /* VM_PROTECT_WX_FAIL */ /* * Let vm_map_remap_extract() know that it will need to: @@ -5851,6 +6164,11 @@ vm_map_protect( } new_max = current->max_protection; +#if PMAP_CS + if (set_max && (new_prot & VM_PROT_EXECUTE) && pmap_cs_exempt(map->pmap)) { + new_max |= VM_PROT_EXECUTE; + } +#endif if ((new_prot & new_max) != new_prot) { vm_map_unlock(map); return KERN_PROTECTION_FAILURE; @@ -5858,10 +6176,17 @@ vm_map_protect( if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE) && -#if !CONFIG_EMBEDDED - map != kernel_map && - cs_process_enforcement(NULL) && -#endif /* !CONFIG_EMBEDDED */ +#if XNU_TARGET_OS_OSX + map->pmap != kernel_pmap && + (vm_map_cs_enforcement(map) +#if __arm64__ + || !VM_MAP_IS_EXOTIC(map) +#endif /* __arm64__ */ + ) && +#endif /* XNU_TARGET_OS_OSX */ +#if PMAP_CS + !pmap_cs_exempt(map->pmap) && +#endif !(current->used_for_jit)) { DTRACE_VM3(cs_wx, uint64_t, (uint64_t) current->vme_start, @@ -5874,10 +6199,10 @@ vm_map_protect( : "?"), __FUNCTION__); new_prot &= ~VM_PROT_EXECUTE; -#if VM_PROTECT_WX_FAIL - vm_map_unlock(map); - return KERN_PROTECTION_FAILURE; -#endif /* VM_PROTECT_WX_FAIL */ + if (VM_MAP_POLICY_WX_FAIL(map)) { + vm_map_unlock(map); + return KERN_PROTECTION_FAILURE; + } } /* @@ -5985,16 +6310,16 @@ vm_map_protect( prot |= VM_PROT_EXECUTE; } -#if CONFIG_EMBEDDED && (DEVELOPMENT || DEBUG) +#if DEVELOPMENT || DEBUG if (!(old_prot & VM_PROT_EXECUTE) && (prot & VM_PROT_EXECUTE) && panic_on_unsigned_execute && (proc_selfcsflags() & CS_KILL)) { panic("vm_map_protect(%p,0x%llx,0x%llx) old=0x%x new=0x%x - code-signing bypass?\n", map, (uint64_t)current->vme_start, (uint64_t)current->vme_end, old_prot, prot); } -#endif /* CONFIG_EMBEDDED && (DEVELOPMENT || DEBUG) */ +#endif /* DEVELOPMENT || DEBUG */ - if (pmap_has_prot_policy(prot)) { + if (pmap_has_prot_policy(map->pmap, current->translated_allow_execute, prot)) { if (current->wired_count) { panic("vm_map_protect(%p,0x%llx,0x%llx) new=0x%x wired=%x\n", map, (uint64_t)current->vme_start, (uint64_t)current->vme_end, prot, current->wired_count); @@ -6156,6 +6481,11 @@ add_wire_counts( if (size + map->user_wire_size > MIN(map->user_wire_limit, vm_per_task_user_wire_limit) || size + ptoa_64(total_wire_count) > vm_global_user_wire_limit) { + if (size + ptoa_64(total_wire_count) > vm_global_user_wire_limit) { + os_atomic_inc(&vm_add_wire_count_over_global_limit, relaxed); + } else { + os_atomic_inc(&vm_add_wire_count_over_user_limit, relaxed); + } return KERN_RESOURCE_SHORTAGE; } @@ -6270,6 +6600,21 @@ vm_map_wire_nested( unsigned int last_timestamp; vm_map_size_t size; boolean_t wire_and_extract; + vm_prot_t extra_prots; + + extra_prots = VM_PROT_COPY; + extra_prots |= VM_PROT_COPY_FAIL_IF_EXECUTABLE; +#if XNU_TARGET_OS_OSX + if (map->pmap == kernel_pmap || + !vm_map_cs_enforcement(map)) { + extra_prots &= ~VM_PROT_COPY_FAIL_IF_EXECUTABLE; + } +#endif /* XNU_TARGET_OS_OSX */ +#if PMAP_CS + if (pmap_cs_exempt(map->pmap)) { + extra_prots &= ~VM_PROT_COPY_FAIL_IF_EXECUTABLE; + } +#endif /* PMAP_CS */ access_type = (caller_prot & VM_PROT_ALL); @@ -6294,10 +6639,9 @@ vm_map_wire_nested( last_timestamp = map->timestamp; VM_MAP_RANGE_CHECK(map, start, end); - assert(page_aligned(start)); - assert(page_aligned(end)); assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map))); assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map))); + if (start == end) { /* We wired what the caller asked for, zero pages */ vm_map_unlock(map); @@ -6472,19 +6816,20 @@ vm_map_wire_nested( local_start = entry->vme_start; lookup_map = map; vm_map_lock_write_to_read(map); - if (vm_map_lookup_locked( - &lookup_map, local_start, - access_type | VM_PROT_COPY, - OBJECT_LOCK_EXCLUSIVE, - &version, &object, - &offset, &prot, &wired, - NULL, - &real_map)) { + rc = vm_map_lookup_locked( + &lookup_map, local_start, + (access_type | extra_prots), + OBJECT_LOCK_EXCLUSIVE, + &version, &object, + &offset, &prot, &wired, + NULL, + &real_map, NULL); + if (rc != KERN_SUCCESS) { vm_map_unlock_read(lookup_map); assert(map_pmap == NULL); vm_map_unwire(map, start, s, user_wire); - return KERN_FAILURE; + return rc; } vm_object_unlock(object); if (real_map != lookup_map) { @@ -6680,11 +7025,19 @@ vm_map_wire_nested( */ if ((entry->protection & VM_PROT_EXECUTE) -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX && map->pmap != kernel_pmap && - cs_process_enforcement(NULL) -#endif /* !CONFIG_EMBEDDED */ + (vm_map_cs_enforcement(map) +#if __arm64__ + || !VM_MAP_IS_EXOTIC(map) +#endif /* __arm64__ */ + ) +#endif /* XNU_TARGET_OS_OSX */ +#if PMAP_CS + && + !pmap_cs_exempt(map->pmap) +#endif ) { #if MACH_ASSERT printf("pid %d[%s] wiring executable range from " @@ -7012,8 +7365,6 @@ vm_map_unwire_nested( last_timestamp = map->timestamp; VM_MAP_RANGE_CHECK(map, start, end); - assert(page_aligned(start)); - assert(page_aligned(end)); assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map))); assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map))); @@ -7360,8 +7711,12 @@ vm_map_entry_delete( s = entry->vme_start; e = entry->vme_end; - assert(page_aligned(s)); - assert(page_aligned(e)); + assert(VM_MAP_PAGE_ALIGNED(s, FOURK_PAGE_MASK)); + assert(VM_MAP_PAGE_ALIGNED(e, FOURK_PAGE_MASK)); + if (VM_MAP_PAGE_MASK(map) >= PAGE_MASK) { + assert(page_aligned(s)); + assert(page_aligned(e)); + } if (entry->map_aligned == TRUE) { assert(VM_MAP_PAGE_ALIGNED(s, VM_MAP_PAGE_MASK(map))); assert(VM_MAP_PAGE_ALIGNED(e, VM_MAP_PAGE_MASK(map))); @@ -7441,6 +7796,7 @@ vm_map_submap_pmap_clean( entry->vme_start), remove_size, PMAP_NULL, + PAGE_SIZE, entry->vme_start, VM_PROT_NONE, PMAP_OPTIONS_REMOVE); @@ -7476,6 +7832,7 @@ vm_map_submap_pmap_clean( VME_OFFSET(entry), remove_size, PMAP_NULL, + PAGE_SIZE, entry->vme_start, VM_PROT_NONE, PMAP_OPTIONS_REMOVE); @@ -8101,7 +8458,17 @@ vm_map_delete( * all this. */ } else if (entry->is_sub_map) { + assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map), + "map %p (%d) entry %p submap %p (%d)\n", + map, VM_MAP_PAGE_SHIFT(map), entry, + VME_SUBMAP(entry), + VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry))); if (entry->use_pmap) { + assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) == VM_MAP_PAGE_SHIFT(map), + "map %p (%d) entry %p submap %p (%d)\n", + map, VM_MAP_PAGE_SHIFT(map), entry, + VME_SUBMAP(entry), + VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry))); #ifndef NO_NESTED_PMAP int pmap_flags; @@ -8156,6 +8523,7 @@ vm_map_delete( object, VME_OFFSET(entry), entry->vme_end - entry->vme_start, PMAP_NULL, + PAGE_SIZE, entry->vme_start, VM_PROT_NONE, PMAP_OPTIONS_REMOVE); @@ -8375,14 +8743,14 @@ vm_map_remove( vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); /* - * For the zone_map, the kernel controls the allocation/freeing of memory. - * Any free to the zone_map should be within the bounds of the map and + * For the zone maps, the kernel controls the allocation/freeing of memory. + * Any free to the zone maps should be within the bounds of the map and * should free up memory. If the VM_MAP_RANGE_CHECK() silently converts a - * free to the zone_map into a no-op, there is a problem and we should + * free to the zone maps into a no-op, there is a problem and we should * panic. */ - if ((map == zone_map) && (start == end)) { - panic("Nothing being freed to the zone_map. start = end = %p\n", (void *)start); + if ((start == end) && zone_maps_owned(start, 1)) { + panic("Nothing being freed to a zone map. start = end = %p\n", (void *)start); } result = vm_map_delete(map, start, end, flags, VM_MAP_NULL); vm_map_unlock(map); @@ -8447,7 +8815,6 @@ vm_map_copy_discard( switch (copy->type) { case VM_MAP_COPY_ENTRY_LIST: - zone_require(copy, vm_map_copy_zone); while (vm_map_copy_first_entry(copy) != vm_map_copy_to_entry(copy)) { vm_map_entry_t entry = vm_map_copy_first_entry(copy); @@ -8462,22 +8829,20 @@ vm_map_copy_discard( } break; case VM_MAP_COPY_OBJECT: - zone_require(copy, vm_map_copy_zone); vm_object_deallocate(copy->cpy_object); break; case VM_MAP_COPY_KERNEL_BUFFER: /* * The vm_map_copy_t and possibly the data buffer were - * allocated by a single call to kalloc(), i.e. the + * allocated by a single call to kheap_alloc(), i.e. the * vm_map_copy_t was not allocated out of the zone. */ if (copy->size > msg_ool_size_small || copy->offset) { panic("Invalid vm_map_copy_t sz:%lld, ofst:%lld", (long long)copy->size, (long long)copy->offset); } - kfree(copy, copy->size + cpy_kdata_hdr_sz); - return; + kheap_free(KHEAP_DATA_BUFFERS, copy->cpy_kdata, copy->size); } zfree(vm_map_copy_zone, copy); } @@ -8515,10 +8880,14 @@ vm_map_copy_copy( */ new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); - *new_copy = *copy; + memcpy((void *) new_copy, (void *) copy, sizeof(struct vm_map_copy)); +#if __has_feature(ptrauth_calls) + if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) { + new_copy->cpy_kdata = copy->cpy_kdata; + } +#endif if (copy->type == VM_MAP_COPY_ENTRY_LIST) { - zone_require(copy, vm_map_copy_zone); /* * The links in the entry chain must be * changed to point to the new copy object. @@ -8760,6 +9129,7 @@ vm_map_copy_overwrite_nested( vm_map_offset_t base_addr; vm_map_size_t copy_size; vm_map_size_t total_size; + int copy_page_shift; /* @@ -8770,6 +9140,12 @@ vm_map_copy_overwrite_nested( return KERN_SUCCESS; } + /* + * Assert that the vm_map_copy is coming from the right + * zone and hasn't been forged + */ + vm_map_copy_require(copy); + /* * Check for special kernel buffer allocated * by new_ipc_kmsg_copyin. @@ -8787,7 +9163,6 @@ vm_map_copy_overwrite_nested( */ assert(copy->type == VM_MAP_COPY_ENTRY_LIST); - zone_require(copy, vm_map_copy_zone); if (copy->size == 0) { if (discard_on_success) { @@ -8796,6 +9171,8 @@ vm_map_copy_overwrite_nested( return KERN_SUCCESS; } + copy_page_shift = copy->cpy_hdr.page_shift; + /* * Verify that the destination is all writeable * initially. We have to trunc the destination @@ -8808,7 +9185,8 @@ vm_map_copy_overwrite_nested( !VM_MAP_PAGE_ALIGNED(copy->offset, VM_MAP_PAGE_MASK(dst_map)) || !VM_MAP_PAGE_ALIGNED(dst_addr, - VM_MAP_PAGE_MASK(dst_map))) { + VM_MAP_PAGE_MASK(dst_map)) || + copy_page_shift != VM_MAP_PAGE_SHIFT(dst_map)) { aligned = FALSE; dst_end = vm_map_round_page(dst_addr + copy->size, VM_MAP_PAGE_MASK(dst_map)); @@ -9169,6 +9547,7 @@ start_overwrite: copy = vm_map_copy_allocate(); copy->type = VM_MAP_COPY_ENTRY_LIST; copy->offset = new_offset; + copy->cpy_hdr.page_shift = copy_page_shift; /* * XXX FBDP @@ -9373,6 +9752,7 @@ vm_map_copy_overwrite( vm_map_entry_t entry; kern_return_t kr; vm_map_offset_t effective_page_mask, effective_page_size; + int copy_page_shift; head_size = 0; tail_size = 0; @@ -9397,9 +9777,19 @@ blunt_copy: TRUE); } - effective_page_mask = MAX(VM_MAP_PAGE_MASK(dst_map), PAGE_MASK); - effective_page_mask = MAX(VM_MAP_COPY_PAGE_MASK(copy), - effective_page_mask); + copy_page_shift = VM_MAP_COPY_PAGE_SHIFT(copy); + if (copy_page_shift < PAGE_SHIFT || + VM_MAP_PAGE_SHIFT(dst_map) < PAGE_SHIFT) { + goto blunt_copy; + } + + if (VM_MAP_PAGE_SHIFT(dst_map) < PAGE_SHIFT) { + effective_page_mask = VM_MAP_PAGE_MASK(dst_map); + } else { + effective_page_mask = MAX(VM_MAP_PAGE_MASK(dst_map), PAGE_MASK); + effective_page_mask = MAX(VM_MAP_COPY_PAGE_MASK(copy), + effective_page_mask); + } effective_page_size = effective_page_mask + 1; if (copy_size < VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES * effective_page_size) { @@ -9486,6 +9876,7 @@ blunt_copy: head_copy->cpy_hdr.entries_pageable = copy->cpy_hdr.entries_pageable; vm_map_store_init(&head_copy->cpy_hdr); + head_copy->cpy_hdr.page_shift = copy_page_shift; entry = vm_map_copy_first_entry(copy); if (entry->vme_end < copy->offset + head_size) { @@ -9528,6 +9919,7 @@ blunt_copy: tail_copy->cpy_hdr.entries_pageable = copy->cpy_hdr.entries_pageable; vm_map_store_init(&tail_copy->cpy_hdr); + tail_copy->cpy_hdr.page_shift = copy_page_shift; tail_copy->offset = copy->offset + copy_size - tail_size; tail_copy->size = tail_size; @@ -9587,7 +9979,6 @@ blunt_copy: done: assert(copy->type == VM_MAP_COPY_ENTRY_LIST); - zone_require(copy, vm_map_copy_zone); if (kr == KERN_SUCCESS) { /* * Discard all the copy maps. @@ -9681,7 +10072,7 @@ vm_map_copy_overwrite_unaligned( vm_map_lock_write_to_read(dst_map); - src_offset = copy->offset - vm_object_trunc_page(copy->offset); + src_offset = copy->offset - trunc_page_mask_64(copy->offset, VM_MAP_COPY_PAGE_MASK(copy)); amount_left = copy->size; /* * unaligned so we never clipped this entry, we need the offset into @@ -9991,7 +10382,7 @@ vm_map_copy_overwrite_aligned( continue; } -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX #define __TRADEOFF1_OBJ_SIZE (64 * 1024 * 1024) /* 64 MB */ #define __TRADEOFF1_COPY_SIZE (128 * 1024) /* 128 KB */ if (VME_OBJECT(copy_entry) != VM_OBJECT_NULL && @@ -10009,7 +10400,7 @@ vm_map_copy_overwrite_aligned( vm_map_copy_overwrite_aligned_src_large++; goto slow_copy; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ if ((dst_map->pmap != kernel_pmap) && (VME_ALIAS(entry) >= VM_MEMORY_MALLOC) && @@ -10025,10 +10416,10 @@ vm_map_copy_overwrite_aligned( vm_object_lock_shared(new_object); } while (new_object != VM_OBJECT_NULL && -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX !new_object->true_share && new_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ new_object->internal) { new_shadow = new_object->shadow; if (new_shadow == VM_OBJECT_NULL) { @@ -10053,7 +10444,7 @@ vm_map_copy_overwrite_aligned( vm_object_unlock(new_object); goto slow_copy; } -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX if (new_object->true_share || new_object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) { /* @@ -10066,7 +10457,7 @@ vm_map_copy_overwrite_aligned( vm_object_unlock(new_object); goto slow_copy; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ vm_object_unlock(new_object); } /* @@ -10110,6 +10501,7 @@ vm_map_copy_overwrite_aligned( entry->vme_end - entry->vme_start, PMAP_NULL, + PAGE_SIZE, entry->vme_start, VM_PROT_NONE, PMAP_OPTIONS_REMOVE); @@ -10318,25 +10710,29 @@ vm_map_copyin_kernel_buffer( { kern_return_t kr; vm_map_copy_t copy; - vm_size_t kalloc_size; if (len > msg_ool_size_small) { return KERN_INVALID_ARGUMENT; } - kalloc_size = (vm_size_t)(cpy_kdata_hdr_sz + len); - - copy = (vm_map_copy_t)kalloc(kalloc_size); + copy = zalloc_flags(vm_map_copy_zone, Z_WAITOK | Z_ZERO); if (copy == VM_MAP_COPY_NULL) { return KERN_RESOURCE_SHORTAGE; } + copy->cpy_kdata = kheap_alloc(KHEAP_DATA_BUFFERS, len, Z_WAITOK); + if (copy->cpy_kdata == NULL) { + zfree(vm_map_copy_zone, copy); + return KERN_RESOURCE_SHORTAGE; + } + copy->type = VM_MAP_COPY_KERNEL_BUFFER; copy->size = len; copy->offset = 0; kr = copyinmap(src_map, src_addr, copy->cpy_kdata, (vm_size_t)len); if (kr != KERN_SUCCESS) { - kfree(copy, kalloc_size); + kheap_free(KHEAP_DATA_BUFFERS, copy->cpy_kdata, len); + zfree(vm_map_copy_zone, copy); return kr; } if (src_destroy) { @@ -10470,7 +10866,8 @@ vm_map_copyout_kernel_buffer( } else { /* copy was successful, dicard the copy structure */ if (consume_on_success) { - kfree(copy, copy_size + cpy_kdata_hdr_sz); + kheap_free(KHEAP_DATA_BUFFERS, copy->cpy_kdata, copy_size); + zfree(vm_map_copy_zone, copy); } } @@ -10523,7 +10920,7 @@ vm_map_copy_remap( new_entry = vm_map_entry_create(map, !map->hdr.entries_pageable); /* copy the "copy entry" to the new entry */ - vm_map_entry_copy(new_entry, copy_entry); + vm_map_entry_copy(map, new_entry, copy_entry); /* adjust "start" and "end" */ new_entry->vme_start += adjustment; new_entry->vme_end += adjustment; @@ -10568,7 +10965,6 @@ vm_map_copy_validate_size( vm_map_size_t sz = *size; switch (copy->type) { case VM_MAP_COPY_OBJECT: - zone_require(copy, vm_map_copy_zone); case VM_MAP_COPY_KERNEL_BUFFER: if (sz == copy_sz) { return TRUE; @@ -10580,7 +10976,6 @@ vm_map_copy_validate_size( * validating this flavor of vm_map_copy, but we can at least * assert that it's within a range. */ - zone_require(copy, vm_map_copy_zone); if (copy_sz >= sz && copy_sz <= vm_map_round_page(sz, VM_MAP_PAGE_MASK(dst_map))) { *size = copy_sz; @@ -10659,6 +11054,7 @@ vm_map_copyout_internal( vm_map_entry_t last; vm_map_entry_t entry; vm_map_entry_t hole_entry; + vm_map_copy_t original_copy; /* * Check for null copy object. @@ -10669,6 +11065,12 @@ vm_map_copyout_internal( return KERN_SUCCESS; } + /* + * Assert that the vm_map_copy is coming from the right + * zone and hasn't been forged + */ + vm_map_copy_require(copy); + if (copy->size != copy_size) { *dst_addr = 0; return KERN_FAILURE; @@ -10680,7 +11082,6 @@ vm_map_copyout_internal( */ if (copy->type == VM_MAP_COPY_OBJECT) { - zone_require(copy, vm_map_copy_zone); vm_object_t object = copy->cpy_object; kern_return_t kr; vm_object_offset_t offset; @@ -10720,7 +11121,35 @@ vm_map_copyout_internal( consume_on_success); } - zone_require(copy, vm_map_copy_zone); + original_copy = copy; + if (copy->cpy_hdr.page_shift != VM_MAP_PAGE_SHIFT(dst_map)) { + kern_return_t kr; + vm_map_copy_t target_copy; + vm_map_offset_t overmap_start, overmap_end, trimmed_start; + + target_copy = VM_MAP_COPY_NULL; + DEBUG4K_ADJUST("adjusting...\n"); + kr = vm_map_copy_adjust_to_target( + copy, + 0, /* offset */ + copy->size, /* size */ + dst_map, + TRUE, /* copy */ + &target_copy, + &overmap_start, + &overmap_end, + &trimmed_start); + if (kr != KERN_SUCCESS) { + DEBUG4K_COPY("adjust failed 0x%x\n", kr); + return kr; + } + DEBUG4K_COPY("copy %p (%d 0x%llx 0x%llx) dst_map %p (%d) target_copy %p (%d 0x%llx 0x%llx) overmap_start 0x%llx overmap_end 0x%llx trimmed_start 0x%llx\n", copy, copy->cpy_hdr.page_shift, copy->offset, (uint64_t)copy->size, dst_map, VM_MAP_PAGE_SHIFT(dst_map), target_copy, target_copy->cpy_hdr.page_shift, target_copy->offset, (uint64_t)target_copy->size, (uint64_t)overmap_start, (uint64_t)overmap_end, (uint64_t)trimmed_start); + if (target_copy != copy) { + copy = target_copy; + } + copy_size = copy->size; + } + /* * Find space for the data */ @@ -10935,6 +11364,9 @@ StartAgain:; vm_prot_t prot; int type_of_fault; + /* TODO4K would need to use actual page size */ + assert(VM_MAP_PAGE_SHIFT(dst_map) == PAGE_SHIFT); + object = VME_OBJECT(entry); offset = VME_OFFSET(entry); va = entry->vme_start; @@ -10992,6 +11424,7 @@ StartAgain:; vm_fault_enter(m, dst_map->pmap, va, + PAGE_SIZE, 0, prot, prot, VM_PAGE_WIRED(m), @@ -11039,12 +11472,21 @@ after_adjustments: if (consume_on_success) { vm_map_copy_insert(dst_map, last, copy); + if (copy != original_copy) { + vm_map_copy_discard(original_copy); + original_copy = VM_MAP_COPY_NULL; + } } else { vm_map_copy_remap(dst_map, last, copy, adjustment, cur_protection, max_protection, inheritance); + if (copy != original_copy && original_copy != VM_MAP_COPY_NULL) { + vm_map_copy_discard(copy); + copy = original_copy; + } } + vm_map_unlock(dst_map); /* @@ -11231,18 +11673,7 @@ vm_map_copyin_internal( copy = vm_map_copy_allocate(); copy->type = VM_MAP_COPY_ENTRY_LIST; copy->cpy_hdr.entries_pageable = TRUE; -#if 00 - copy->cpy_hdr.page_shift = src_map->hdr.page_shift; -#else - /* - * The copy entries can be broken down for a variety of reasons, - * so we can't guarantee that they will remain map-aligned... - * Will need to adjust the first copy_entry's "vme_start" and - * the last copy_entry's "vme_end" to be rounded to PAGE_MASK - * rather than the original map's alignment. - */ - copy->cpy_hdr.page_shift = PAGE_SHIFT; -#endif + copy->cpy_hdr.page_shift = VM_MAP_PAGE_SHIFT(src_map); vm_map_store_init( &(copy->cpy_hdr)); @@ -11425,7 +11856,7 @@ vm_map_copyin_internal( src_offset = VME_OFFSET(src_entry); was_wired = (src_entry->wired_count != 0); - vm_map_entry_copy(new_entry, src_entry); + vm_map_entry_copy(src_map, new_entry, src_entry); if (new_entry->is_sub_map) { /* clr address space specifics */ new_entry->use_pmap = FALSE; @@ -11465,7 +11896,8 @@ vm_map_copyin_internal( RestartCopy: if ((src_object == VM_OBJECT_NULL || - (!was_wired && !map_share && !tmp_entry->is_shared)) && + (!was_wired && !map_share && !tmp_entry->is_shared + && !(debug4k_no_cow_copyin && VM_MAP_PAGE_SHIFT(src_map) < PAGE_SHIFT))) && vm_object_copy_quickly( VME_OBJECT_PTR(new_entry), src_offset, @@ -11495,6 +11927,7 @@ RestartCopy: (src_entry->is_shared ? PMAP_NULL : src_map->pmap), + VM_MAP_PAGE_SIZE(src_map), src_entry->vme_start, prot); @@ -11536,7 +11969,9 @@ RestartCopy: * Perform the copy */ - if (was_wired) { + if (was_wired || + (debug4k_no_cow_copyin && + VM_MAP_PAGE_SHIFT(src_map) < PAGE_SHIFT)) { CopySlowly: vm_object_lock(src_object); result = vm_object_copy_slowly( @@ -11545,7 +11980,8 @@ CopySlowly: src_size, THREAD_UNINT, VME_OBJECT_PTR(new_entry)); - VME_OFFSET_SET(new_entry, 0); + VME_OFFSET_SET(new_entry, + src_offset - vm_object_trunc_page(src_offset)); new_entry->needs_copy = FALSE; } else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && (entry_was_shared || map_share)) { @@ -11585,8 +12021,20 @@ CopySlowly: } if (result == KERN_SUCCESS && - preserve_purgeable && - src_object->purgable != VM_PURGABLE_DENY) { + ((preserve_purgeable && + src_object->purgable != VM_PURGABLE_DENY) || + new_entry->used_for_jit)) { + /* + * Purgeable objects should be COPY_NONE, true share; + * this should be propogated to the copy. + * + * Also force mappings the pmap specially protects to + * be COPY_NONE; trying to COW these mappings would + * change the effective protections, which could have + * side effects if the pmap layer relies on the + * specified protections. + */ + vm_object_t new_object; new_object = VME_OBJECT(new_entry); @@ -11598,23 +12046,29 @@ CopySlowly: assert(new_object->vo_owner == NULL); new_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; - new_object->true_share = TRUE; - /* start as non-volatile with no owner... */ - new_object->purgable = VM_PURGABLE_NONVOLATILE; - vm_purgeable_nonvolatile_enqueue(new_object, NULL); - /* ... and move to src_object's purgeable state */ - if (src_object->purgable != VM_PURGABLE_NONVOLATILE) { - int state; - state = src_object->purgable; - vm_object_purgable_control( - new_object, - VM_PURGABLE_SET_STATE_FROM_KERNEL, - &state); + + if (preserve_purgeable && + src_object->purgable != VM_PURGABLE_DENY) { + new_object->true_share = TRUE; + + /* start as non-volatile with no owner... */ + new_object->purgable = VM_PURGABLE_NONVOLATILE; + vm_purgeable_nonvolatile_enqueue(new_object, NULL); + /* ... and move to src_object's purgeable state */ + if (src_object->purgable != VM_PURGABLE_NONVOLATILE) { + int state; + state = src_object->purgable; + vm_object_purgable_control( + new_object, + VM_PURGABLE_SET_STATE_FROM_KERNEL, + &state); + } + /* no pmap accounting for purgeable objects */ + new_entry->use_pmap = FALSE; } + vm_object_unlock(new_object); new_object = VM_OBJECT_NULL; - /* no pmap accounting for purgeable objects */ - new_entry->use_pmap = FALSE; } if (result != KERN_SUCCESS && @@ -11849,7 +12303,8 @@ CopySuccessful: ; vm_map_unlock(src_map); tmp_entry = VM_MAP_ENTRY_NULL; - if (VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT) { + if (VM_MAP_PAGE_SHIFT(src_map) > PAGE_SHIFT && + VM_MAP_PAGE_SHIFT(src_map) != VM_MAP_COPY_PAGE_SHIFT(copy)) { vm_map_offset_t original_start, original_offset, original_end; assert(VM_MAP_COPY_PAGE_MASK(copy) == PAGE_MASK); @@ -11957,10 +12412,10 @@ CopySuccessful: ; assert(VM_MAP_PAGE_ALIGNED( copy_addr + (tmp_entry->vme_end - tmp_entry->vme_start), - VM_MAP_COPY_PAGE_MASK(copy))); + MIN(VM_MAP_COPY_PAGE_MASK(copy), PAGE_MASK))); assert(VM_MAP_PAGE_ALIGNED( copy_addr, - VM_MAP_COPY_PAGE_MASK(copy))); + MIN(VM_MAP_COPY_PAGE_MASK(copy), PAGE_MASK))); /* * The copy_entries will be injected directly into the @@ -12009,11 +12464,14 @@ vm_map_copy_extract( vm_map_t src_map, vm_map_address_t src_addr, vm_map_size_t len, + vm_prot_t required_prot, + boolean_t do_copy, vm_map_copy_t *copy_result, /* OUT */ vm_prot_t *cur_prot, /* OUT */ - vm_prot_t *max_prot) + vm_prot_t *max_prot, /* OUT */ + vm_inherit_t inheritance, + vm_map_kernel_flags_t vmk_flags) { - vm_map_offset_t src_start, src_end; vm_map_copy_t copy; kern_return_t kr; @@ -12029,16 +12487,13 @@ vm_map_copy_extract( /* * Check that the end address doesn't overflow */ - src_end = src_addr + len; - if (src_end < src_addr) { + if (src_addr + len < src_addr) { return KERN_INVALID_ADDRESS; } - /* - * Compute (page aligned) start and end of region - */ - src_start = vm_map_trunc_page(src_addr, PAGE_MASK); - src_end = vm_map_round_page(src_end, PAGE_MASK); + if (VM_MAP_PAGE_SIZE(src_map) < PAGE_SIZE) { + DEBUG4K_SHARE("src_map %p src_addr 0x%llx src_end 0x%llx\n", src_map, (uint64_t)src_addr, (uint64_t)(src_addr + len)); + } /* * Allocate a header element for the list. @@ -12049,7 +12504,7 @@ vm_map_copy_extract( copy = vm_map_copy_allocate(); copy->type = VM_MAP_COPY_ENTRY_LIST; - copy->cpy_hdr.entries_pageable = TRUE; + copy->cpy_hdr.entries_pageable = vmk_flags.vmkf_copy_pageable; vm_map_store_init(©->cpy_hdr); @@ -12059,18 +12514,19 @@ vm_map_copy_extract( kr = vm_map_remap_extract(src_map, src_addr, len, - FALSE, /* copy */ + required_prot, + do_copy, /* copy */ ©->cpy_hdr, cur_prot, max_prot, - VM_INHERIT_SHARE, - TRUE, /* pageable */ - FALSE, /* same_map */ - VM_MAP_KERNEL_FLAGS_NONE); + inheritance, + vmk_flags); if (kr != KERN_SUCCESS) { vm_map_copy_discard(copy); return kr; } + assert((*cur_prot & required_prot) == required_prot); + assert((*max_prot & required_prot) == required_prot); *copy_result = copy; return KERN_SUCCESS; @@ -12136,7 +12592,6 @@ vm_map_fork_share( result = pmap_nest(new_map->pmap, (VME_SUBMAP(old_entry))->pmap, (addr64_t)old_entry->vme_start, - (addr64_t)old_entry->vme_start, (uint64_t)(old_entry->vme_end - old_entry->vme_start)); if (result) { panic("vm_map_fork_share: pmap_nest failed!"); @@ -12256,11 +12711,11 @@ vm_map_fork_share( (old_entry->protection & VM_PROT_WRITE)) { vm_prot_t prot; - assert(!pmap_has_prot_policy(old_entry->protection)); + assert(!pmap_has_prot_policy(old_map->pmap, old_entry->translated_allow_execute, old_entry->protection)); prot = old_entry->protection & ~VM_PROT_WRITE; - assert(!pmap_has_prot_policy(prot)); + assert(!pmap_has_prot_policy(old_map->pmap, old_entry->translated_allow_execute, prot)); if (override_nx(old_map, VME_ALIAS(old_entry)) && prot) { prot |= VM_PROT_EXECUTE; @@ -12274,6 +12729,7 @@ vm_map_fork_share( (old_entry->vme_end - old_entry->vme_start), PMAP_NULL, + PAGE_SIZE, old_entry->vme_start, prot); } else { @@ -12318,7 +12774,7 @@ vm_map_fork_share( new_entry = vm_map_entry_create(new_map, FALSE); /* Never the kernel * map or descendants */ - vm_map_entry_copy(new_entry, old_entry); + vm_map_entry_copy(old_map, new_entry, old_entry); old_entry->is_shared = TRUE; new_entry->is_shared = TRUE; @@ -12411,9 +12867,14 @@ vm_map_fork_copy( } /* - * Insert the copy into the new map + * Assert that the vm_map_copy is coming from the right + * zone and hasn't been forged */ + vm_map_copy_require(copy); + /* + * Insert the copy into the new map + */ vm_map_copy_insert(new_map, last, copy); /* @@ -12492,6 +12953,12 @@ vm_map_fork( #if defined(HAS_APPLE_PAC) pmap_flags |= old_map->pmap->disable_jop ? PMAP_CREATE_DISABLE_JOP : 0; #endif +#if PMAP_CREATE_FORCE_4K_PAGES + if (VM_MAP_PAGE_SIZE(old_map) == FOURK_PAGE_SIZE && + PAGE_SIZE != FOURK_PAGE_SIZE) { + pmap_flags |= PMAP_CREATE_FORCE_4K_PAGES; + } +#endif /* PMAP_CREATE_FORCE_4K_PAGES */ new_pmap = pmap_create_options(ledger, (vm_map_size_t) 0, pmap_flags); vm_map_reference_swap(old_map); @@ -12509,6 +12976,8 @@ vm_map_fork( old_map->min_offset, old_map->max_offset, map_create_options); + /* inherit cs_enforcement */ + vm_map_cs_enforcement_set(new_map, old_map->cs_enforcement); vm_map_lock(new_map); vm_commit_pagezero_status(new_map); /* inherit the parent map's page size */ @@ -12527,6 +12996,7 @@ vm_map_fork( */ if (old_entry_inheritance == VM_INHERIT_NONE && (options & VM_MAP_FORK_SHARE_IF_INHERIT_NONE) && + (old_entry->protection & VM_PROT_READ) && !(!old_entry->is_sub_map && VME_OBJECT(old_entry) != NULL && VME_OBJECT(old_entry)->pager != NULL && @@ -12577,7 +13047,12 @@ vm_map_fork( } new_entry = vm_map_entry_create(new_map, FALSE); /* never the kernel map or descendants */ - vm_map_entry_copy(new_entry, old_entry); + vm_map_entry_copy(old_map, new_entry, old_entry); + + if (new_entry->used_for_jit == TRUE && new_map->jit_entry_exists == FALSE) { + new_map->jit_entry_exists = TRUE; + } + if (new_entry->is_sub_map) { /* clear address space specifics */ new_entry->use_pmap = FALSE; @@ -12614,7 +13089,7 @@ vm_map_fork( if (src_needs_copy && !old_entry->needs_copy) { vm_prot_t prot; - assert(!pmap_has_prot_policy(old_entry->protection)); + assert(!pmap_has_prot_policy(old_map->pmap, old_entry->translated_allow_execute, old_entry->protection)); prot = old_entry->protection & ~VM_PROT_WRITE; @@ -12623,7 +13098,7 @@ vm_map_fork( prot |= VM_PROT_EXECUTE; } - assert(!pmap_has_prot_policy(prot)); + assert(!pmap_has_prot_policy(old_map->pmap, old_entry->translated_allow_execute, prot)); vm_object_pmap_protect( VME_OBJECT(old_entry), @@ -12634,6 +13109,7 @@ vm_map_fork( || old_map->mapped_in_other_pmaps) ? PMAP_NULL : old_map->pmap), + VM_MAP_PAGE_SIZE(old_map), old_entry->vme_start, prot); @@ -12673,7 +13149,7 @@ slow_vm_map_fork_copy: #if defined(__arm64__) pmap_insert_sharedpage(new_map->pmap); -#endif +#endif /* __arm64__ */ new_map->size = new_size; @@ -12681,6 +13157,12 @@ slow_vm_map_fork_copy: vm_map_corpse_footprint_collect_done(new_map); } + /* Propagate JIT entitlement for the pmap layer. */ + if (pmap_get_jit_entitled(old_map->pmap)) { + /* Tell the pmap that it supports JIT. */ + pmap_set_jit_entitled(new_map->pmap); + } + vm_map_unlock(new_map); vm_map_unlock(old_map); vm_map_deallocate(old_map); @@ -12702,7 +13184,8 @@ vm_map_exec( boolean_t is64bit, void *fsroot, cpu_type_t cpu, - cpu_subtype_t cpu_subtype) + cpu_subtype_t cpu_subtype, + boolean_t reslide) { SHARED_REGION_TRACE_DEBUG( ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x,0x%x): ->\n", @@ -12713,7 +13196,9 @@ vm_map_exec( cpu, cpu_subtype)); (void) vm_commpage_enter(new_map, task, is64bit); - (void) vm_shared_region_enter(new_map, task, is64bit, fsroot, cpu, cpu_subtype); + + (void) vm_shared_region_enter(new_map, task, is64bit, fsroot, cpu, cpu_subtype, reslide); + SHARED_REGION_TRACE_DEBUG( ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x,0x%x): <-\n", (void *)VM_KERNEL_ADDRPERM(current_task()), @@ -12722,6 +13207,45 @@ vm_map_exec( (void *)VM_KERNEL_ADDRPERM(fsroot), cpu, cpu_subtype)); + + /* + * Some devices have region(s) of memory that shouldn't get allocated by + * user processes. The following code creates dummy vm_map_entry_t's for each + * of the regions that needs to be reserved to prevent any allocations in + * those regions. + */ + kern_return_t kr = KERN_FAILURE; + vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + vmk_flags.vmkf_permanent = TRUE; + vmk_flags.vmkf_beyond_max = TRUE; + + struct vm_reserved_region *regions = NULL; + size_t num_regions = ml_get_vm_reserved_regions(is64bit, ®ions); + assert((num_regions == 0) || (num_regions > 0 && regions != NULL)); + + for (size_t i = 0; i < num_regions; ++i) { + kr = vm_map_enter( + new_map, + ®ions[i].vmrr_addr, + regions[i].vmrr_size, + (vm_map_offset_t)0, + VM_FLAGS_FIXED, + vmk_flags, + VM_KERN_MEMORY_NONE, + VM_OBJECT_NULL, + (vm_object_offset_t)0, + FALSE, + VM_PROT_NONE, + VM_PROT_NONE, + VM_INHERIT_NONE); + + if (kr != KERN_SUCCESS) { + panic("Failed to reserve %s region in user map %p %d", regions[i].vmrr_name, new_map, kr); + } + } + + new_map->reserved_regions = (num_regions ? TRUE : FALSE); + return KERN_SUCCESS; } @@ -12738,6 +13262,9 @@ vm_map_exec( * this map has the only reference to the data in question. * In order to later verify this lookup, a "version" * is returned. + * If contended != NULL, *contended will be set to + * true iff the thread had to spin or block to acquire + * an exclusive lock. * * The map MUST be locked by the caller and WILL be * locked on exit. In order to guarantee the @@ -12761,7 +13288,8 @@ vm_map_lookup_locked( vm_prot_t *out_prot, /* OUT */ boolean_t *wired, /* OUT */ vm_object_fault_info_t fault_info, /* OUT */ - vm_map_t *real_map) + vm_map_t *real_map, /* OUT */ + bool *contended) /* OUT */ { vm_map_entry_t entry; vm_map_t map = *var_map; @@ -12773,7 +13301,9 @@ vm_map_lookup_locked( vm_prot_t prot; boolean_t mask_protections; boolean_t force_copy; + boolean_t no_force_copy_if_executable; vm_prot_t original_fault_type; + vm_map_size_t fault_page_mask; /* * VM_PROT_MASK means that the caller wants us to use "fault_type" @@ -12782,11 +13312,18 @@ vm_map_lookup_locked( */ mask_protections = (fault_type & VM_PROT_IS_MASK) ? TRUE : FALSE; force_copy = (fault_type & VM_PROT_COPY) ? TRUE : FALSE; + no_force_copy_if_executable = (fault_type & VM_PROT_COPY_FAIL_IF_EXECUTABLE) ? TRUE : FALSE; fault_type &= VM_PROT_ALL; original_fault_type = fault_type; + if (contended) { + *contended = false; + } *real_map = map; + fault_page_mask = MIN(VM_MAP_PAGE_MASK(map), PAGE_MASK); + vaddr = VM_MAP_TRUNC_PAGE(vaddr, fault_page_mask); + RetryLookup: fault_type = original_fault_type; @@ -12832,11 +13369,19 @@ submap_recurse: vm_map_offset_t local_vaddr; vm_map_offset_t end_delta; vm_map_offset_t start_delta; - vm_map_entry_t submap_entry; + vm_map_entry_t submap_entry, saved_submap_entry; + vm_object_offset_t submap_entry_offset; + vm_object_size_t submap_entry_size; vm_prot_t subentry_protection; vm_prot_t subentry_max_protection; boolean_t subentry_no_copy_on_read; boolean_t mapped_needs_copy = FALSE; + vm_map_version_t version; + + assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map), + "map %p (%d) entry %p submap %p (%d)\n", + map, VM_MAP_PAGE_SHIFT(map), entry, + VME_SUBMAP(entry), VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry))); local_vaddr = vaddr; @@ -12895,6 +13440,9 @@ submap_recurse: /* calculate the offset in the submap for vaddr */ local_vaddr = (local_vaddr - entry->vme_start) + VME_OFFSET(entry); + assertf(VM_MAP_PAGE_ALIGNED(local_vaddr, fault_page_mask), + "local_vaddr 0x%llx entry->vme_start 0x%llx fault_page_mask 0x%llx\n", + (uint64_t)local_vaddr, (uint64_t)entry->vme_start, (uint64_t)fault_page_mask); RetrySubMap: if (!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) { @@ -12945,7 +13493,9 @@ RetrySubMap: vm_object_offset_t copy_offset; vm_map_offset_t local_start; vm_map_offset_t local_end; - boolean_t copied_slowly = FALSE; + boolean_t copied_slowly = FALSE; + vm_object_offset_t copied_slowly_phys_offset = 0; + kern_return_t kr = KERN_SUCCESS; if (vm_map_lock_read_to_write(map)) { vm_map_lock_read(map); @@ -12983,18 +13533,97 @@ RetrySubMap: /* object in the submap, bypassing the */ /* submap. */ - if (submap_entry->wired_count != 0 || - (sub_object->copy_strategy == - MEMORY_OBJECT_COPY_NONE)) { + (sub_object->copy_strategy != + MEMORY_OBJECT_COPY_SYMMETRIC)) { + if ((submap_entry->protection & VM_PROT_EXECUTE) && + no_force_copy_if_executable) { +// printf("FBDP map %p entry %p start 0x%llx end 0x%llx wired %d strat %d\n", map, submap_entry, (uint64_t)local_start, (uint64_t)local_end, submap_entry->wired_count, sub_object->copy_strategy); + if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) { + vm_map_unlock(cow_sub_map_parent); + } + if ((*real_map != map) + && (*real_map != cow_sub_map_parent)) { + vm_map_unlock(*real_map); + } + *real_map = map; + vm_map_lock_write_to_read(map); + kr = KERN_PROTECTION_FAILURE; + DTRACE_VM4(submap_no_copy_executable, + vm_map_t, map, + vm_object_offset_t, submap_entry_offset, + vm_object_size_t, submap_entry_size, + int, kr); + return kr; + } + + vm_object_reference(sub_object); + + assertf(VM_MAP_PAGE_ALIGNED(VME_OFFSET(submap_entry), VM_MAP_PAGE_MASK(map)), + "submap_entry %p offset 0x%llx\n", + submap_entry, VME_OFFSET(submap_entry)); + submap_entry_offset = VME_OFFSET(submap_entry); + submap_entry_size = submap_entry->vme_end - submap_entry->vme_start; + + DTRACE_VM6(submap_copy_slowly, + vm_map_t, cow_sub_map_parent, + vm_map_offset_t, vaddr, + vm_map_t, map, + vm_object_size_t, submap_entry_size, + int, submap_entry->wired_count, + int, sub_object->copy_strategy); + + saved_submap_entry = submap_entry; + version.main_timestamp = map->timestamp; + vm_map_unlock(map); /* Increments timestamp by 1 */ + submap_entry = VM_MAP_ENTRY_NULL; + vm_object_lock(sub_object); - vm_object_copy_slowly(sub_object, - VME_OFFSET(submap_entry), - (submap_entry->vme_end - - submap_entry->vme_start), + kr = vm_object_copy_slowly(sub_object, + submap_entry_offset, + submap_entry_size, FALSE, ©_object); copied_slowly = TRUE; + /* 4k: account for extra offset in physical page */ + copied_slowly_phys_offset = submap_entry_offset - vm_object_trunc_page(submap_entry_offset); + vm_object_deallocate(sub_object); + + vm_map_lock(map); + + if (kr != KERN_SUCCESS && + kr != KERN_MEMORY_RESTART_COPY) { + if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) { + vm_map_unlock(cow_sub_map_parent); + } + if ((*real_map != map) + && (*real_map != cow_sub_map_parent)) { + vm_map_unlock(*real_map); + } + *real_map = map; + vm_object_deallocate(copy_object); + copy_object = VM_OBJECT_NULL; + vm_map_lock_write_to_read(map); + DTRACE_VM4(submap_copy_slowly, + vm_object_t, sub_object, + vm_object_offset_t, submap_entry_offset, + vm_object_size_t, submap_entry_size, + int, kr); + return kr; + } + + if ((kr == KERN_SUCCESS) && + (version.main_timestamp + 1) == map->timestamp) { + submap_entry = saved_submap_entry; + } else { + saved_submap_entry = NULL; + old_start -= start_delta; + old_end += end_delta; + vm_object_deallocate(copy_object); + copy_object = VM_OBJECT_NULL; + vm_map_lock_write_to_read(map); + goto RetrySubMap; + } } else { /* set up shadow object */ copy_object = sub_object; @@ -13007,9 +13636,9 @@ RetrySubMap: submap_entry->needs_copy = TRUE; prot = submap_entry->protection; - assert(!pmap_has_prot_policy(prot)); + assert(!pmap_has_prot_policy(map->pmap, submap_entry->translated_allow_execute, prot)); prot = prot & ~VM_PROT_WRITE; - assert(!pmap_has_prot_policy(prot)); + assert(!pmap_has_prot_policy(map->pmap, submap_entry->translated_allow_execute, prot)); if (override_nx(old_map, VME_ALIAS(submap_entry)) @@ -13025,6 +13654,7 @@ RetrySubMap: (submap_entry->is_shared || map->mapped_in_other_pmaps) ? PMAP_NULL : map->pmap, + VM_MAP_PAGE_SIZE(map), submap_entry->vme_start, prot); } @@ -13058,9 +13688,23 @@ RetrySubMap: if (!vm_map_lookup_entry(map, vaddr, &entry)) { + if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) { + vm_map_unlock(cow_sub_map_parent); + } + if ((*real_map != map) + && (*real_map != cow_sub_map_parent)) { + vm_map_unlock(*real_map); + } + *real_map = map; vm_object_deallocate( copy_object); + copy_object = VM_OBJECT_NULL; vm_map_lock_write_to_read(map); + DTRACE_VM4(submap_lookup_post_unlock, + uint64_t, (uint64_t)entry->vme_start, + uint64_t, (uint64_t)entry->vme_end, + vm_map_offset_t, vaddr, + int, copied_slowly); return KERN_INVALID_ADDRESS; } @@ -13073,8 +13717,8 @@ RetrySubMap: * Clip (and unnest) the smallest nested chunk * possible around the faulting address... */ - local_start = vaddr & ~(pmap_nesting_size_min - 1); - local_end = local_start + pmap_nesting_size_min; + local_start = vaddr & ~(pmap_shared_region_size_min(map->pmap) - 1); + local_end = local_start + pmap_shared_region_size_min(map->pmap); /* * ... but don't go beyond the "old_start" to "old_end" * range, to avoid spanning over another VM region @@ -13127,11 +13771,19 @@ RetrySubMap: if ((entry->protection & VM_PROT_WRITE) && (entry->protection & VM_PROT_EXECUTE) && -#if !CONFIG_EMBEDDED - map != kernel_map && - cs_process_enforcement(NULL) && -#endif /* !CONFIG_EMBEDDED */ - !(entry->used_for_jit)) { +#if XNU_TARGET_OS_OSX + map->pmap != kernel_pmap && + (vm_map_cs_enforcement(map) +#if __arm64__ + || !VM_MAP_IS_EXOTIC(map) +#endif /* __arm64__ */ + ) && +#endif /* XNU_TARGET_OS_OSX */ +#if PMAP_CS + !pmap_cs_exempt(map->pmap) && +#endif + !(entry->used_for_jit) && + VM_MAP_POLICY_WX_STRIP_X(map)) { DTRACE_VM3(cs_wx, uint64_t, (uint64_t)entry->vme_start, uint64_t, (uint64_t)entry->vme_end, @@ -13146,7 +13798,7 @@ RetrySubMap: } if (copied_slowly) { - VME_OFFSET_SET(entry, local_start - old_start); + VME_OFFSET_SET(entry, local_start - old_start + copied_slowly_phys_offset); entry->needs_copy = FALSE; entry->is_shared = FALSE; } else { @@ -13301,7 +13953,7 @@ protection_failure: *offset = (vaddr - entry->vme_start) + VME_OFFSET(entry); *object = VME_OBJECT(entry); *out_prot = prot; - KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_MAP_LOOKUP_OBJECT), VM_KERNEL_UNSLIDE_OR_PERM(*object), 0, 0, 0, 0); + KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_MAP_LOOKUP_OBJECT), VM_KERNEL_UNSLIDE_OR_PERM(*object), (unsigned long) VME_ALIAS(entry), 0, 0); if (fault_info) { fault_info->interruptible = THREAD_UNINT; /* for now... */ @@ -13321,6 +13973,9 @@ protection_failure: fault_info->stealth = FALSE; fault_info->io_sync = FALSE; if (entry->used_for_jit || +#if PMAP_CS + pmap_cs_exempt(map->pmap) || +#endif entry->vme_resilient_codesign) { fault_info->cs_bypass = TRUE; } else { @@ -13340,13 +13995,20 @@ protection_failure: fault_info->batch_pmap_op = FALSE; fault_info->resilient_media = entry->vme_resilient_media; fault_info->no_copy_on_read = entry->vme_no_copy_on_read; + if (entry->translated_allow_execute) { + fault_info->pmap_options |= PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE; + } } /* * Lock the object to prevent it from disappearing */ if (object_lock_type == OBJECT_LOCK_EXCLUSIVE) { - vm_object_lock(*object); + if (contended == NULL) { + vm_object_lock(*object); + } else { + *contended = vm_object_lock_check_contended(*object); + } } else { vm_object_lock_shared(*object); } @@ -13448,12 +14110,15 @@ vm_map_region_recurse_64( boolean_t look_for_pages; vm_region_submap_short_info_64_t short_info; boolean_t do_region_footprint; + int effective_page_size, effective_page_shift; if (map == VM_MAP_NULL) { /* no address space to work on */ return KERN_INVALID_ARGUMENT; } + effective_page_shift = vm_self_region_page_shift(map); + effective_page_size = (1 << effective_page_shift); if (*count < VM_REGION_SUBMAP_SHORT_INFO_COUNT_64) { /* @@ -13691,9 +14356,9 @@ recurse_again: submap_info->inheritance = VM_INHERIT_DEFAULT; submap_info->offset = 0; submap_info->user_tag = -1; - submap_info->pages_resident = (unsigned int) (ledger_resident / PAGE_SIZE); + submap_info->pages_resident = (unsigned int) (ledger_resident / effective_page_size); submap_info->pages_shared_now_private = 0; - submap_info->pages_swapped_out = (unsigned int) (ledger_compressed / PAGE_SIZE); + submap_info->pages_swapped_out = (unsigned int) (ledger_compressed / effective_page_size); submap_info->pages_dirtied = submap_info->pages_resident; submap_info->ref_count = 1; submap_info->shadow_depth = 0; @@ -13701,7 +14366,7 @@ recurse_again: submap_info->share_mode = SM_PRIVATE; submap_info->is_submap = 0; submap_info->behavior = VM_BEHAVIOR_DEFAULT; - submap_info->object_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile); + submap_info->object_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile); submap_info->user_wired_count = 0; submap_info->pages_reusable = 0; } else { @@ -13713,7 +14378,7 @@ recurse_again: short_info->behavior = VM_BEHAVIOR_DEFAULT; short_info->user_wired_count = 0; short_info->is_submap = 0; - short_info->object_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile); + short_info->object_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile); short_info->external_pager = 0; short_info->shadow_depth = 0; short_info->share_mode = SM_PRIVATE; @@ -13772,11 +14437,6 @@ recurse_again: *size = curr_max_above + curr_max_below; *address = user_address + curr_skip - curr_max_below; -// LP64todo: all the current tools are 32bit, obviously never worked for 64b -// so probably should be a real 32b ID vs. ptr. -// Current users just check for equality -#define INFO_MAKE_OBJECT_ID(p) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM(p)) - if (look_for_pages) { submap_info->user_tag = VME_ALIAS(curr_entry); submap_info->offset = VME_OFFSET(curr_entry); @@ -13786,7 +14446,7 @@ recurse_again: submap_info->behavior = curr_entry->behavior; submap_info->user_wired_count = curr_entry->user_wired_count; submap_info->is_submap = curr_entry->is_sub_map; - submap_info->object_id = INFO_MAKE_OBJECT_ID(VME_OBJECT(curr_entry)); + submap_info->object_id = VM_OBJECT_ID(VME_OBJECT(curr_entry)); } else { short_info->user_tag = VME_ALIAS(curr_entry); short_info->offset = VME_OFFSET(curr_entry); @@ -13796,7 +14456,7 @@ recurse_again: short_info->behavior = curr_entry->behavior; short_info->user_wired_count = curr_entry->user_wired_count; short_info->is_submap = curr_entry->is_sub_map; - short_info->object_id = INFO_MAKE_OBJECT_ID(VME_OBJECT(curr_entry)); + short_info->object_id = VM_OBJECT_ID(VME_OBJECT(curr_entry)); } extended.pages_resident = 0; @@ -14002,7 +14662,7 @@ vm_map_region( if (*count < VM_REGION_EXTENDED_INFO_COUNT) { return KERN_INVALID_ARGUMENT; } - /*fallthru*/ + OS_FALLTHROUGH; case VM_REGION_EXTENDED_INFO__legacy: if (*count < VM_REGION_EXTENDED_INFO_COUNT__legacy) { return KERN_INVALID_ARGUMENT; @@ -14011,9 +14671,13 @@ vm_map_region( { vm_region_extended_info_t extended; mach_msg_type_number_t original_count; + int effective_page_size, effective_page_shift; extended = (vm_region_extended_info_t) info; + effective_page_shift = vm_self_region_page_shift(map); + effective_page_size = (1 << effective_page_shift); + vm_map_lock_read(map); start = *address; @@ -14209,8 +14873,10 @@ vm_map_region_walk( int i; int ref_count; struct vm_object *shadow_object; - int shadow_depth; + unsigned short shadow_depth; boolean_t do_region_footprint; + int effective_page_size, effective_page_shift; + vm_map_offset_t effective_page_mask; do_region_footprint = task_self_region_footprint(); @@ -14228,11 +14894,19 @@ vm_map_region_walk( extended->share_mode = SM_LARGE_PAGE; extended->ref_count = 1; extended->external_pager = 0; + + /* TODO4K: Superpage in 4k mode? */ extended->pages_resident = (unsigned int)(range >> PAGE_SHIFT); extended->shadow_depth = 0; return; } + effective_page_shift = vm_self_region_page_shift(map); + effective_page_size = (1 << effective_page_shift); + effective_page_mask = effective_page_size - 1; + + offset = vm_map_trunc_page(offset, effective_page_mask); + obj = VME_OBJECT(entry); vm_object_lock(obj); @@ -14244,7 +14918,7 @@ vm_map_region_walk( if (look_for_pages) { for (last_offset = offset + range; offset < last_offset; - offset += PAGE_SIZE_64, va += PAGE_SIZE) { + offset += effective_page_size, va += effective_page_size) { if (do_region_footprint) { int disp; @@ -14262,101 +14936,29 @@ vm_map_region_walk( /* * Query the pmap. */ - pmap_query_page_info(map->pmap, - va, - &disp); + vm_map_footprint_query_page_info( + map, + entry, + va, + &disp); } - if (disp & PMAP_QUERY_PAGE_PRESENT) { - if (!(disp & PMAP_QUERY_PAGE_ALTACCT)) { - extended->pages_resident++; - } - if (disp & PMAP_QUERY_PAGE_REUSABLE) { - extended->pages_reusable++; - } else if (!(disp & PMAP_QUERY_PAGE_INTERNAL) || - (disp & PMAP_QUERY_PAGE_ALTACCT)) { - /* alternate accounting */ - } else { - extended->pages_dirtied++; - } - } else if (disp & PMAP_QUERY_PAGE_COMPRESSED) { - if (disp & PMAP_QUERY_PAGE_COMPRESSED_ALTACCT) { - /* alternate accounting */ - } else { - extended->pages_swapped_out++; - } + if (disp & VM_PAGE_QUERY_PAGE_PRESENT) { + extended->pages_resident++; } - /* deal with alternate accounting */ - if (obj->purgable == VM_PURGABLE_NONVOLATILE && - /* && not tagged as no-footprint? */ - VM_OBJECT_OWNER(obj) != NULL && - VM_OBJECT_OWNER(obj)->map == map) { - if ((((va - - entry->vme_start - + VME_OFFSET(entry)) - / PAGE_SIZE) < - (obj->resident_page_count + - vm_compressor_pager_get_count(obj->pager)))) { - /* - * Non-volatile purgeable object owned - * by this task: report the first - * "#resident + #compressed" pages as - * "resident" (to show that they - * contribute to the footprint) but not - * "dirty" (to avoid double-counting - * with the fake "non-volatile" region - * we'll report at the end of the - * address space to account for all - * (mapped or not) non-volatile memory - * owned by this task. - */ - extended->pages_resident++; - } - } else if ((obj->purgable == VM_PURGABLE_VOLATILE || - obj->purgable == VM_PURGABLE_EMPTY) && - /* && not tagged as no-footprint? */ - VM_OBJECT_OWNER(obj) != NULL && - VM_OBJECT_OWNER(obj)->map == map) { - if ((((va - - entry->vme_start - + VME_OFFSET(entry)) - / PAGE_SIZE) < - obj->wired_page_count)) { - /* - * Volatile|empty purgeable object owned - * by this task: report the first - * "#wired" pages as "resident" (to - * show that they contribute to the - * footprint) but not "dirty" (to avoid - * double-counting with the fake - * "non-volatile" region we'll report - * at the end of the address space to - * account for all (mapped or not) - * non-volatile memory owned by this - * task. - */ - extended->pages_resident++; - } - } else if (obj->purgable != VM_PURGABLE_DENY) { - /* - * Pages from purgeable objects - * will be reported as dirty - * appropriately in an extra - * fake memory region at the end of - * the address space. - */ - } else if (entry->iokit_acct) { - /* - * IOKit mappings are considered - * as fully dirty for footprint's - * sake. - */ + if (disp & VM_PAGE_QUERY_PAGE_REUSABLE) { + extended->pages_reusable++; + } + if (disp & VM_PAGE_QUERY_PAGE_DIRTY) { extended->pages_dirtied++; } + if (disp & PMAP_QUERY_PAGE_COMPRESSED) { + extended->pages_swapped_out++; + } continue; } vm_map_region_look_for_page(map, va, obj, - offset, ref_count, + vm_object_trunc_page(offset), ref_count, 0, extended, count); } @@ -14460,7 +15062,7 @@ vm_map_region_look_for_page( vm_object_t object, vm_object_offset_t offset, int max_refcnt, - int depth, + unsigned short depth, vm_region_extended_info_t extended, mach_msg_type_number_t count) { @@ -14648,8 +15250,7 @@ vm_map_simplify_entry( (this_entry->in_transition == FALSE) && (prev_entry->needs_wakeup == FALSE) && (this_entry->needs_wakeup == FALSE) && - (prev_entry->is_shared == FALSE) && - (this_entry->is_shared == FALSE) && + (prev_entry->is_shared == this_entry->is_shared) && (prev_entry->superpage_size == FALSE) && (this_entry->superpage_size == FALSE) ) { @@ -14815,6 +15416,7 @@ vm_map_machine_attribute( range = sub_size; offset = (start - entry->vme_start) + VME_OFFSET(entry); + offset = vm_object_trunc_page(offset); base_offset = offset; object = VME_OBJECT(entry); base_object = object; @@ -14840,7 +15442,11 @@ vm_map_machine_attribute( vm_object_unlock(last_object); continue; } - range -= PAGE_SIZE; + if (range < PAGE_SIZE) { + range = 0; + } else { + range -= PAGE_SIZE; + } if (base_object != object) { vm_object_unlock(object); @@ -15080,7 +15686,7 @@ vm_map_willneed( region_size = len; addr = start; - effective_page_mask = MAX(vm_map_page_mask(current_map()), PAGE_MASK); + effective_page_mask = MIN(vm_map_page_mask(current_map()), PAGE_MASK); effective_page_size = effective_page_mask + 1; vm_map_unlock_read(map); @@ -15131,7 +15737,7 @@ vm_map_willneed( */ kr = memory_object_data_request( pager, - offset + object->paging_offset, + vm_object_trunc_page(offset) + object->paging_offset, 0, /* ignored */ VM_PROT_READ, (memory_object_fault_info_t)&fault_info); @@ -15271,6 +15877,15 @@ vm_map_reuse_pages( * vm_map_entry_t's, so the read lock is sufficient. */ + if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) { + /* + * XXX TODO4K + * need to figure out what reusable means for a + * portion of a native page. + */ + return KERN_SUCCESS; + } + vm_map_lock_read(map); assert(map->pmap != kernel_pmap); /* protect alias access */ @@ -15353,6 +15968,15 @@ vm_map_reusable_pages( vm_object_offset_t start_offset, end_offset; vm_map_offset_t pmap_offset; + if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) { + /* + * XXX TODO4K + * need to figure out what reusable means for a portion + * of a native page. + */ + return KERN_SUCCESS; + } + /* * The MADV_REUSABLE operation doesn't require any changes to the * vm_map_entry_t's, so the read lock is sufficient. @@ -15635,7 +16259,7 @@ vm_map_entry_insert( vm_prot_t max_protection, vm_behavior_t behavior, vm_inherit_t inheritance, - unsigned wired_count, + unsigned short wired_count, boolean_t no_cache, boolean_t permanent, boolean_t no_copy_on_read, @@ -15643,7 +16267,8 @@ vm_map_entry_insert( boolean_t clear_map_aligned, boolean_t is_submap, boolean_t used_for_jit, - int alias) + int alias, + boolean_t translated_allow_execute) { vm_map_entry_t new_entry; @@ -15670,13 +16295,14 @@ vm_map_entry_insert( new_entry->vme_start = start; new_entry->vme_end = end; - assert(page_aligned(new_entry->vme_start)); - assert(page_aligned(new_entry->vme_end)); if (new_entry->map_aligned) { assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start, VM_MAP_PAGE_MASK(map))); assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end, VM_MAP_PAGE_MASK(map))); + } else { + assert(page_aligned(new_entry->vme_start)); + assert(page_aligned(new_entry->vme_end)); } assert(new_entry->vme_start < new_entry->vme_end); @@ -15716,16 +16342,19 @@ vm_map_entry_insert( new_entry->superpage_size = FALSE; } if (used_for_jit) { -#if CONFIG_EMBEDDED - if (!(map->jit_entry_exists)) -#endif /* CONFIG_EMBEDDED */ - { + if (!(map->jit_entry_exists) || + VM_MAP_POLICY_ALLOW_MULTIPLE_JIT(map)) { new_entry->used_for_jit = TRUE; map->jit_entry_exists = TRUE; } } else { new_entry->used_for_jit = FALSE; } + if (translated_allow_execute) { + new_entry->translated_allow_execute = TRUE; + } else { + new_entry->translated_allow_execute = FALSE; + } new_entry->pmap_cs_associated = FALSE; new_entry->iokit_acct = FALSE; new_entry->vme_resilient_codesign = FALSE; @@ -15749,24 +16378,25 @@ vm_map_entry_insert( return new_entry; } +int vm_remap_old_path = 0; +int vm_remap_new_path = 0; /* * Routine: vm_map_remap_extract * - * Descritpion: This routine returns a vm_entry list from a map. + * Description: This routine returns a vm_entry list from a map. */ static kern_return_t vm_map_remap_extract( vm_map_t map, vm_map_offset_t addr, vm_map_size_t size, + vm_prot_t required_protection, boolean_t copy, struct vm_map_header *map_header, vm_prot_t *cur_protection, vm_prot_t *max_protection, /* What, no behavior? */ vm_inherit_t inheritance, - boolean_t pageable, - boolean_t same_map, vm_map_kernel_flags_t vmk_flags) { kern_return_t result; @@ -15785,20 +16415,27 @@ vm_map_remap_extract( vm_map_entry_t saved_src_entry; boolean_t src_entry_was_wired; vm_prot_t max_prot_for_prot_copy; + vm_map_offset_t effective_page_mask; + boolean_t pageable, same_map; + + pageable = vmk_flags.vmkf_copy_pageable; + same_map = vmk_flags.vmkf_copy_same_map; + + effective_page_mask = MIN(PAGE_MASK, VM_MAP_PAGE_MASK(map)); assert(map != VM_MAP_NULL); assert(size != 0); - assert(size == vm_map_round_page(size, PAGE_MASK)); + assert(size == vm_map_round_page(size, effective_page_mask)); assert(inheritance == VM_INHERIT_NONE || inheritance == VM_INHERIT_COPY || inheritance == VM_INHERIT_SHARE); + assert(!(required_protection & ~VM_PROT_ALL)); /* * Compute start and end of region. */ - src_start = vm_map_trunc_page(addr, PAGE_MASK); - src_end = vm_map_round_page(src_start + size, PAGE_MASK); - + src_start = vm_map_trunc_page(addr, effective_page_mask); + src_end = vm_map_round_page(src_start + size, effective_page_mask); /* * Initialize map_header. @@ -15807,7 +16444,9 @@ vm_map_remap_extract( map_header->links.prev = CAST_TO_VM_MAP_ENTRY(&map_header->links); map_header->nentries = 0; map_header->entries_pageable = pageable; - map_header->page_shift = PAGE_SHIFT; +// map_header->page_shift = MIN(VM_MAP_PAGE_SHIFT(map), PAGE_SHIFT); + map_header->page_shift = VM_MAP_PAGE_SHIFT(map); + map_header->rb_head_store.rbh_root = (void *)(int)SKIP_RB_TREE; vm_map_store_init( map_header ); @@ -15828,6 +16467,15 @@ vm_map_remap_extract( * multiple map entries, need to loop on them. */ vm_map_lock(map); + if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) { + /* + * This address space uses sub-pages so the range might + * not be re-mappable in an address space with larger + * pages. Re-assemble any broken-up VM map entries to + * improve our chances of making it work. + */ + vm_map_simplify_range(map, src_start, src_end); + } while (mapped_size != size) { vm_map_size_t entry_size; @@ -15853,7 +16501,168 @@ vm_map_remap_extract( entry_size = (vm_map_size_t)(src_entry->vme_end - src_entry->vme_start); - if (src_entry->is_sub_map) { + if (src_entry->is_sub_map && + vmk_flags.vmkf_copy_single_object) { + vm_map_t submap; + vm_map_offset_t submap_start; + vm_map_size_t submap_size; + + /* + * No check for "required_protection" on "src_entry" + * because the protections that matter are the ones + * on the submap's VM map entry, which will be checked + * during the call to vm_map_remap_extract() below. + */ + submap_size = src_entry->vme_end - src_start; + if (submap_size > size) { + submap_size = size; + } + submap_start = VME_OFFSET(src_entry) + src_start - src_entry->vme_start; + submap = VME_SUBMAP(src_entry); + vm_map_reference(submap); + vm_map_unlock(map); + src_entry = NULL; + result = vm_map_remap_extract(submap, + submap_start, + submap_size, + required_protection, + copy, + map_header, + cur_protection, + max_protection, + inheritance, + vmk_flags); + vm_map_deallocate(submap); + return result; + } + + if ((src_entry->protection & required_protection) + != required_protection) { + if (vmk_flags.vmkf_copy_single_object && + mapped_size != 0) { + /* + * Single object extraction. + * We can't extract more with the required + * protection but we've extracted some, so + * stop there and declare success. + * The caller should check the size of + * the copy entry we've extracted. + */ + result = KERN_SUCCESS; + } else { + /* + * VM range extraction. + * Required proctection is not available + * for this part of the range: fail. + */ + result = KERN_PROTECTION_FAILURE; + } + break; + } + + if (src_entry->is_sub_map && + VM_MAP_PAGE_SHIFT(VME_SUBMAP(src_entry)) < PAGE_SHIFT) { + vm_map_t submap; + vm_map_offset_t submap_start; + vm_map_size_t submap_size; + vm_map_copy_t submap_copy; + vm_prot_t submap_curprot, submap_maxprot; + + vm_remap_new_path++; + + /* + * No check for "required_protection" on "src_entry" + * because the protections that matter are the ones + * on the submap's VM map entry, which will be checked + * during the call to vm_map_copy_extract() below. + */ + object = VM_OBJECT_NULL; + submap_copy = VM_MAP_COPY_NULL; + + /* find equivalent range in the submap */ + submap = VME_SUBMAP(src_entry); + submap_start = VME_OFFSET(src_entry) + src_start - src_entry->vme_start; + submap_size = tmp_size; + /* extra ref to keep submap alive */ + vm_map_reference(submap); + + DTRACE_VM6(remap_submap_recurse, + vm_map_t, map, + vm_map_offset_t, addr, + vm_map_size_t, size, + boolean_t, copy, + vm_map_offset_t, submap_start, + vm_map_size_t, submap_size); + + /* + * The map can be safely unlocked since we + * already hold a reference on the submap. + * + * No timestamp since we don't care if the map + * gets modified while we're down in the submap. + * We'll resume the extraction at src_start + tmp_size + * anyway. + */ + vm_map_unlock(map); + src_entry = NULL; /* not valid once map is unlocked */ + + result = vm_map_copy_extract(submap, + submap_start, + submap_size, + required_protection, + copy, + &submap_copy, + &submap_curprot, + &submap_maxprot, + inheritance, + vmk_flags); + + /* release extra ref on submap */ + vm_map_deallocate(submap); + submap = VM_MAP_NULL; + + if (result != KERN_SUCCESS) { + vm_map_lock(map); + break; + } + + /* transfer submap_copy entries to map_header */ + while (vm_map_copy_first_entry(submap_copy) != + vm_map_copy_to_entry(submap_copy)) { + vm_map_entry_t copy_entry; + vm_map_size_t copy_entry_size; + + copy_entry = vm_map_copy_first_entry(submap_copy); + assert(!copy_entry->is_sub_map); + vm_map_copy_entry_unlink(submap_copy, copy_entry); + copy_entry_size = copy_entry->vme_end - copy_entry->vme_start; + copy_entry->vme_start = map_address; + copy_entry->vme_end = map_address + copy_entry_size; + map_address += copy_entry_size; + mapped_size += copy_entry_size; + src_start += copy_entry_size; + assert(src_start <= src_end); + _vm_map_store_entry_link(map_header, + map_header->links.prev, + copy_entry); + } + /* done with submap_copy */ + vm_map_copy_discard(submap_copy); + + *cur_protection &= submap_curprot; + *max_protection &= submap_maxprot; + + /* re-acquire the map lock and continue to next entry */ + vm_map_lock(map); + continue; + } else if (src_entry->is_sub_map) { + vm_remap_old_path++; + DTRACE_VM4(remap_submap, + vm_map_t, map, + vm_map_offset_t, addr, + vm_map_size_t, size, + boolean_t, copy); + vm_map_reference(VME_SUBMAP(src_entry)); object = VM_OBJECT_NULL; } else { @@ -15895,16 +16704,27 @@ vm_map_remap_extract( } if (object == VM_OBJECT_NULL) { + assert(!src_entry->needs_copy); object = vm_object_allocate(entry_size); VME_OFFSET_SET(src_entry, 0); VME_OBJECT_SET(src_entry, object); assert(src_entry->use_pmap); - } else if (object->copy_strategy != - MEMORY_OBJECT_COPY_SYMMETRIC) { + } else if (src_entry->wired_count || + object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) { /* - * We are already using an asymmetric - * copy, and therefore we already have - * the right object. + * A wired memory region should not have + * any pending copy-on-write and needs to + * keep pointing at the VM object that + * contains the wired pages. + * If we're sharing this memory (copy=false), + * we'll share this VM object. + * If we're copying this memory (copy=true), + * we'll call vm_object_copy_slowly() below + * and use the new VM object for the remapping. + * + * Or, we are already using an asymmetric + * copy, and therefore we already have + * the right object. */ assert(!src_entry->needs_copy); } else if (src_entry->needs_copy || object->shadowed || @@ -15918,7 +16738,7 @@ vm_map_remap_extract( (src_entry->protection & VM_PROT_WRITE)) { vm_prot_t prot; - assert(!pmap_has_prot_policy(src_entry->protection)); + assert(!pmap_has_prot_policy(map->pmap, src_entry->translated_allow_execute, src_entry->protection)); prot = src_entry->protection & ~VM_PROT_WRITE; @@ -15928,7 +16748,7 @@ vm_map_remap_extract( prot |= VM_PROT_EXECUTE; } - assert(!pmap_has_prot_policy(prot)); + assert(!pmap_has_prot_policy(map->pmap, src_entry->translated_allow_execute, prot)); if (map->mapped_in_other_pmaps) { vm_object_pmap_protect( @@ -15936,8 +16756,21 @@ vm_map_remap_extract( VME_OFFSET(src_entry), entry_size, PMAP_NULL, + PAGE_SIZE, src_entry->vme_start, prot); +#if MACH_ASSERT + } else if (__improbable(map->pmap == PMAP_NULL)) { + extern boolean_t vm_tests_in_progress; + assert(vm_tests_in_progress); + /* + * Some VM tests (in vm_tests.c) + * sometimes want to use a VM + * map without a pmap. + * Otherwise, this should never + * happen. + */ +#endif /* MACH_ASSERT */ } else { pmap_protect(vm_map_pmap(map), src_entry->vme_start, @@ -15953,8 +16786,20 @@ vm_map_remap_extract( vm_object_lock(object); vm_object_reference_locked(object); /* object ref. for new entry */ + assert(!src_entry->needs_copy); if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { + /* + * If we want to share this object (copy==0), + * it needs to be COPY_DELAY. + * If we want to copy this object (copy==1), + * we can't just set "needs_copy" on our side + * and expect the other side to do the same + * (symmetrically), so we can't let the object + * stay COPY_SYMMETRIC. + * So we always switch from COPY_SYMMETRIC to + * COPY_DELAY. + */ object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; } @@ -15965,7 +16810,7 @@ vm_map_remap_extract( (src_start - src_entry->vme_start)); new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable); - vm_map_entry_copy(new_entry, src_entry); + vm_map_entry_copy(map, new_entry, src_entry); if (new_entry->is_sub_map) { /* clr address space specifics */ new_entry->use_pmap = FALSE; @@ -16019,15 +16864,13 @@ RestartCopy: result = KERN_PROTECTION_FAILURE; break; #endif /* __APRR_SUPPORTED__*/ - } else { -#if CONFIG_EMBEDDED + } else if (!VM_MAP_POLICY_ALLOW_JIT_SHARING(map)) { /* * Cannot allow an entry describing a JIT * region to be shared across address spaces. */ result = KERN_INVALID_ARGUMENT; break; -#endif /* CONFIG_EMBEDDED */ } } @@ -16042,6 +16885,7 @@ RestartCopy: new_entry->needs_copy = TRUE; object = VM_OBJECT_NULL; } else if (src_entry->wired_count == 0 && + !(debug4k_no_cow_copyin && VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) && vm_object_copy_quickly(VME_OBJECT_PTR(new_entry), VME_OFFSET(new_entry), (new_entry->vme_end - @@ -16058,7 +16902,7 @@ RestartCopy: if (src_needs_copy && !src_entry->needs_copy) { vm_prot_t prot; - assert(!pmap_has_prot_policy(src_entry->protection)); + assert(!pmap_has_prot_policy(map->pmap, src_entry->translated_allow_execute, src_entry->protection)); prot = src_entry->protection & ~VM_PROT_WRITE; @@ -16068,7 +16912,7 @@ RestartCopy: prot |= VM_PROT_EXECUTE; } - assert(!pmap_has_prot_policy(prot)); + assert(!pmap_has_prot_policy(map->pmap, src_entry->translated_allow_execute, prot)); vm_object_pmap_protect(object, offset, @@ -16076,6 +16920,7 @@ RestartCopy: ((src_entry->is_shared || map->mapped_in_other_pmaps) ? PMAP_NULL : map->pmap), + VM_MAP_PAGE_SIZE(map), src_entry->vme_start, prot); @@ -16107,7 +16952,9 @@ RestartCopy: /* * Perform the copy. */ - if (src_entry_was_wired > 0) { + if (src_entry_was_wired > 0 || + (debug4k_no_cow_copyin && + VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT)) { vm_object_lock(object); result = vm_object_copy_slowly( object, @@ -16117,7 +16964,7 @@ RestartCopy: THREAD_UNINT, VME_OBJECT_PTR(new_entry)); - VME_OFFSET_SET(new_entry, 0); + VME_OFFSET_SET(new_entry, offset - vm_object_trunc_page(offset)); new_entry->needs_copy = FALSE; } else { vm_object_offset_t new_offset; @@ -16189,9 +17036,22 @@ RestartCopy: *cur_protection &= src_entry->protection; *max_protection &= src_entry->max_protection; } + map_address += tmp_size; mapped_size += tmp_size; src_start += tmp_size; + + if (vmk_flags.vmkf_copy_single_object) { + if (mapped_size != size) { + DEBUG4K_SHARE("map %p addr 0x%llx size 0x%llx clipped copy at mapped_size 0x%llx\n", map, (uint64_t)addr, (uint64_t)size, (uint64_t)mapped_size); + if (src_entry->vme_next != vm_map_to_entry(map) && + VME_OBJECT(src_entry->vme_next) == VME_OBJECT(src_entry)) { + /* XXX TODO4K */ + DEBUG4K_ERROR("could have extended copy to next entry...\n"); + } + } + break; + } } /* end while */ vm_map_unlock(map); @@ -16212,9 +17072,694 @@ RestartCopy: _vm_map_entry_dispose(map_header, src_entry); } } - return result; + return result; +} + +bool +vm_map_is_exotic( + vm_map_t map) +{ + return VM_MAP_IS_EXOTIC(map); +} + +bool +vm_map_is_alien( + vm_map_t map) +{ + return VM_MAP_IS_ALIEN(map); +} + +#if XNU_TARGET_OS_OSX +void +vm_map_mark_alien( + vm_map_t map) +{ + vm_map_lock(map); + map->is_alien = true; + vm_map_unlock(map); +} +#endif /* XNU_TARGET_OS_OSX */ + +void vm_map_copy_to_physcopy(vm_map_copy_t copy_map, vm_map_t target_map); +void +vm_map_copy_to_physcopy( + vm_map_copy_t copy_map, + vm_map_t target_map) +{ + vm_map_size_t size; + vm_map_entry_t entry; + vm_map_entry_t new_entry; + vm_object_t new_object; + unsigned int pmap_flags; + pmap_t new_pmap; + vm_map_t new_map; + vm_map_address_t src_start, src_end, src_cur; + vm_map_address_t dst_start, dst_end, dst_cur; + kern_return_t kr; + void *kbuf; + + /* + * Perform the equivalent of vm_allocate() and memcpy(). + * Replace the mappings in "copy_map" with the newly allocated mapping. + */ + DEBUG4K_COPY("copy_map %p (%d %d 0x%llx 0x%llx) BEFORE\n", copy_map, copy_map->cpy_hdr.page_shift, copy_map->cpy_hdr.nentries, copy_map->offset, (uint64_t)copy_map->size); + + assert(copy_map->cpy_hdr.page_shift != VM_MAP_PAGE_MASK(target_map)); + + /* allocate new VM object */ + size = VM_MAP_ROUND_PAGE(copy_map->size, PAGE_MASK); + new_object = vm_object_allocate(size); + assert(new_object); + + /* allocate new VM map entry */ + new_entry = vm_map_copy_entry_create(copy_map, FALSE); + assert(new_entry); + + /* finish initializing new VM map entry */ + new_entry->protection = VM_PROT_DEFAULT; + new_entry->max_protection = VM_PROT_DEFAULT; + new_entry->use_pmap = TRUE; + + /* make new VM map entry point to new VM object */ + new_entry->vme_start = 0; + new_entry->vme_end = size; + VME_OBJECT_SET(new_entry, new_object); + VME_OFFSET_SET(new_entry, 0); + + /* create a new pmap to map "copy_map" */ + pmap_flags = 0; + assert(copy_map->cpy_hdr.page_shift == FOURK_PAGE_SHIFT); +#if PMAP_CREATE_FORCE_4K_PAGES + pmap_flags |= PMAP_CREATE_FORCE_4K_PAGES; +#endif /* PMAP_CREATE_FORCE_4K_PAGES */ + pmap_flags |= PMAP_CREATE_64BIT; + new_pmap = pmap_create_options(NULL, (vm_map_size_t)0, pmap_flags); + assert(new_pmap); + + /* create a new pageable VM map to map "copy_map" */ + new_map = vm_map_create(new_pmap, 0, MACH_VM_MAX_ADDRESS, TRUE); + assert(new_map); + vm_map_set_page_shift(new_map, copy_map->cpy_hdr.page_shift); + + /* map "copy_map" in the new VM map */ + src_start = 0; + kr = vm_map_copyout_internal( + new_map, + &src_start, + copy_map, + copy_map->size, + FALSE, /* consume_on_success */ + VM_PROT_DEFAULT, + VM_PROT_DEFAULT, + VM_INHERIT_DEFAULT); + assert(kr == KERN_SUCCESS); + src_end = src_start + copy_map->size; + + /* map "new_object" in the new VM map */ + vm_object_reference(new_object); + dst_start = 0; + kr = vm_map_enter(new_map, + &dst_start, + size, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_OSFMK, + new_object, + 0, /* offset */ + FALSE, /* needs copy */ + VM_PROT_DEFAULT, + VM_PROT_DEFAULT, + VM_INHERIT_DEFAULT); + assert(kr == KERN_SUCCESS); + dst_end = dst_start + size; + + /* get a kernel buffer */ + kbuf = kheap_alloc(KHEAP_TEMP, PAGE_SIZE, Z_WAITOK); + assert(kbuf); + + /* physically copy "copy_map" mappings to new VM object */ + for (src_cur = src_start, dst_cur = dst_start; + src_cur < src_end; + src_cur += PAGE_SIZE, dst_cur += PAGE_SIZE) { + vm_size_t bytes; + + bytes = PAGE_SIZE; + if (src_cur + PAGE_SIZE > src_end) { + /* partial copy for last page */ + bytes = src_end - src_cur; + assert(bytes > 0 && bytes < PAGE_SIZE); + /* rest of dst page should be zero-filled */ + } + /* get bytes from src mapping */ + kr = copyinmap(new_map, src_cur, kbuf, bytes); + if (kr != KERN_SUCCESS) { + DEBUG4K_COPY("copyinmap(%p, 0x%llx, %p, 0x%llx) kr 0x%x\n", new_map, (uint64_t)src_cur, kbuf, (uint64_t)bytes, kr); + } + /* put bytes in dst mapping */ + assert(dst_cur < dst_end); + assert(dst_cur + bytes <= dst_end); + kr = copyoutmap(new_map, kbuf, dst_cur, bytes); + if (kr != KERN_SUCCESS) { + DEBUG4K_COPY("copyoutmap(%p, %p, 0x%llx, 0x%llx) kr 0x%x\n", new_map, kbuf, (uint64_t)dst_cur, (uint64_t)bytes, kr); + } + } + + /* free kernel buffer */ + kheap_free(KHEAP_TEMP, kbuf, PAGE_SIZE); + kbuf = NULL; + + /* destroy new map */ + vm_map_destroy(new_map, VM_MAP_REMOVE_NO_FLAGS); + new_map = VM_MAP_NULL; + + /* dispose of the old map entries in "copy_map" */ + while (vm_map_copy_first_entry(copy_map) != + vm_map_copy_to_entry(copy_map)) { + entry = vm_map_copy_first_entry(copy_map); + vm_map_copy_entry_unlink(copy_map, entry); + if (entry->is_sub_map) { + vm_map_deallocate(VME_SUBMAP(entry)); + } else { + vm_object_deallocate(VME_OBJECT(entry)); + } + vm_map_copy_entry_dispose(copy_map, entry); + } + + /* change "copy_map"'s page_size to match "target_map" */ + copy_map->cpy_hdr.page_shift = VM_MAP_PAGE_SHIFT(target_map); + copy_map->offset = 0; + copy_map->size = size; + + /* insert new map entry in "copy_map" */ + assert(vm_map_copy_last_entry(copy_map) == vm_map_copy_to_entry(copy_map)); + vm_map_copy_entry_link(copy_map, vm_map_copy_last_entry(copy_map), new_entry); + + DEBUG4K_COPY("copy_map %p (%d %d 0x%llx 0x%llx) AFTER\n", copy_map, copy_map->cpy_hdr.page_shift, copy_map->cpy_hdr.nentries, copy_map->offset, (uint64_t)copy_map->size); +} + +void +vm_map_copy_adjust_get_target_copy_map( + vm_map_copy_t copy_map, + vm_map_copy_t *target_copy_map_p); +void +vm_map_copy_adjust_get_target_copy_map( + vm_map_copy_t copy_map, + vm_map_copy_t *target_copy_map_p) +{ + vm_map_copy_t target_copy_map; + vm_map_entry_t entry, target_entry; + + if (*target_copy_map_p != VM_MAP_COPY_NULL) { + /* the caller already has a "target_copy_map": use it */ + return; + } + + /* the caller wants us to create a new copy of "copy_map" */ + target_copy_map = vm_map_copy_allocate(); + target_copy_map->type = copy_map->type; + assert(target_copy_map->type == VM_MAP_COPY_ENTRY_LIST); + target_copy_map->offset = copy_map->offset; + target_copy_map->size = copy_map->size; + target_copy_map->cpy_hdr.page_shift = copy_map->cpy_hdr.page_shift; + vm_map_store_init(&target_copy_map->cpy_hdr); + for (entry = vm_map_copy_first_entry(copy_map); + entry != vm_map_copy_to_entry(copy_map); + entry = entry->vme_next) { + target_entry = vm_map_copy_entry_create(target_copy_map, FALSE); + vm_map_entry_copy_full(target_entry, entry); + if (target_entry->is_sub_map) { + vm_map_reference(VME_SUBMAP(target_entry)); + } else { + vm_object_reference(VME_OBJECT(target_entry)); + } + vm_map_copy_entry_link( + target_copy_map, + vm_map_copy_last_entry(target_copy_map), + target_entry); + } + entry = VM_MAP_ENTRY_NULL; + *target_copy_map_p = target_copy_map; +} + +void +vm_map_copy_trim( + vm_map_copy_t copy_map, + int new_page_shift, + vm_map_offset_t trim_start, + vm_map_offset_t trim_end); +void +vm_map_copy_trim( + vm_map_copy_t copy_map, + int new_page_shift, + vm_map_offset_t trim_start, + vm_map_offset_t trim_end) +{ + int copy_page_shift; + vm_map_entry_t entry, next_entry; + + assert(copy_map->type == VM_MAP_COPY_ENTRY_LIST); + assert(copy_map->cpy_hdr.nentries > 0); + + trim_start += vm_map_copy_first_entry(copy_map)->vme_start; + trim_end += vm_map_copy_first_entry(copy_map)->vme_start; + + /* use the new page_shift to do the clipping */ + copy_page_shift = VM_MAP_COPY_PAGE_SHIFT(copy_map); + copy_map->cpy_hdr.page_shift = new_page_shift; + + for (entry = vm_map_copy_first_entry(copy_map); + entry != vm_map_copy_to_entry(copy_map); + entry = next_entry) { + next_entry = entry->vme_next; + if (entry->vme_end <= trim_start) { + /* entry fully before trim range: skip */ + continue; + } + if (entry->vme_start >= trim_end) { + /* entry fully after trim range: done */ + break; + } + /* clip entry if needed */ + vm_map_copy_clip_start(copy_map, entry, trim_start); + vm_map_copy_clip_end(copy_map, entry, trim_end); + /* dispose of entry */ + copy_map->size -= entry->vme_end - entry->vme_start; + vm_map_copy_entry_unlink(copy_map, entry); + if (entry->is_sub_map) { + vm_map_deallocate(VME_SUBMAP(entry)); + } else { + vm_object_deallocate(VME_OBJECT(entry)); + } + vm_map_copy_entry_dispose(copy_map, entry); + entry = VM_MAP_ENTRY_NULL; + } + + /* restore copy_map's original page_shift */ + copy_map->cpy_hdr.page_shift = copy_page_shift; +} + +/* + * Make any necessary adjustments to "copy_map" to allow it to be + * mapped into "target_map". + * If no changes were necessary, "target_copy_map" points to the + * untouched "copy_map". + * If changes are necessary, changes will be made to "target_copy_map". + * If "target_copy_map" was NULL, we create a new "vm_map_copy_t" and + * copy the original "copy_map" to it before applying the changes. + * The caller should discard "target_copy_map" if it's not the same as + * the original "copy_map". + */ +/* TODO4K: also adjust to sub-range in the copy_map -> add start&end? */ +kern_return_t +vm_map_copy_adjust_to_target( + vm_map_copy_t src_copy_map, + vm_map_offset_t offset, + vm_map_size_t size, + vm_map_t target_map, + boolean_t copy, + vm_map_copy_t *target_copy_map_p, + vm_map_offset_t *overmap_start_p, + vm_map_offset_t *overmap_end_p, + vm_map_offset_t *trimmed_start_p) +{ + vm_map_copy_t copy_map, target_copy_map; + vm_map_size_t target_size; + vm_map_size_t src_copy_map_size; + vm_map_size_t overmap_start, overmap_end; + int misalignments; + vm_map_entry_t entry, target_entry; + vm_map_offset_t addr_adjustment; + vm_map_offset_t new_start, new_end; + int copy_page_mask, target_page_mask; + int copy_page_shift, target_page_shift; + vm_map_offset_t trimmed_end; + + /* + * Assert that the vm_map_copy is coming from the right + * zone and hasn't been forged + */ + vm_map_copy_require(src_copy_map); + assert(src_copy_map->type == VM_MAP_COPY_ENTRY_LIST); + + /* + * Start working with "src_copy_map" but we'll switch + * to "target_copy_map" as soon as we start making adjustments. + */ + copy_map = src_copy_map; + src_copy_map_size = src_copy_map->size; + + copy_page_shift = VM_MAP_COPY_PAGE_SHIFT(copy_map); + copy_page_mask = VM_MAP_COPY_PAGE_MASK(copy_map); + target_page_shift = VM_MAP_PAGE_SHIFT(target_map); + target_page_mask = VM_MAP_PAGE_MASK(target_map); + + DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d offset 0x%llx size 0x%llx target_copy_map %p...\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, (uint64_t)offset, (uint64_t)size, *target_copy_map_p); + + target_copy_map = *target_copy_map_p; + if (target_copy_map != VM_MAP_COPY_NULL) { + vm_map_copy_require(target_copy_map); + } + + if (offset + size > copy_map->size) { + DEBUG4K_ERROR("copy_map %p (%d->%d) copy_map->size 0x%llx offset 0x%llx size 0x%llx KERN_INVALID_ARGUMENT\n", copy_map, copy_page_shift, target_page_shift, (uint64_t)copy_map->size, (uint64_t)offset, (uint64_t)size); + return KERN_INVALID_ARGUMENT; + } + + /* trim the end */ + trimmed_end = 0; + new_end = VM_MAP_ROUND_PAGE(offset + size, target_page_mask); + if (new_end < copy_map->size) { + trimmed_end = src_copy_map_size - new_end; + DEBUG4K_ADJUST("copy_map %p (%d->%d) copy %d offset 0x%llx size 0x%llx target_copy_map %p... trim end from 0x%llx to 0x%llx\n", copy_map, copy_page_shift, target_page_shift, copy, (uint64_t)offset, (uint64_t)size, target_copy_map, (uint64_t)new_end, (uint64_t)copy_map->size); + /* get "target_copy_map" if needed and adjust it */ + vm_map_copy_adjust_get_target_copy_map(copy_map, + &target_copy_map); + copy_map = target_copy_map; + vm_map_copy_trim(target_copy_map, target_page_shift, + new_end, copy_map->size); + } + + /* trim the start */ + new_start = VM_MAP_TRUNC_PAGE(offset, target_page_mask); + if (new_start != 0) { + DEBUG4K_ADJUST("copy_map %p (%d->%d) copy %d offset 0x%llx size 0x%llx target_copy_map %p... trim start from 0x%llx to 0x%llx\n", copy_map, copy_page_shift, target_page_shift, copy, (uint64_t)offset, (uint64_t)size, target_copy_map, (uint64_t)0, (uint64_t)new_start); + /* get "target_copy_map" if needed and adjust it */ + vm_map_copy_adjust_get_target_copy_map(copy_map, + &target_copy_map); + copy_map = target_copy_map; + vm_map_copy_trim(target_copy_map, target_page_shift, + 0, new_start); + } + *trimmed_start_p = new_start; + + /* target_size starts with what's left after trimming */ + target_size = copy_map->size; + assertf(target_size == src_copy_map_size - *trimmed_start_p - trimmed_end, + "target_size 0x%llx src_copy_map_size 0x%llx trimmed_start 0x%llx trimmed_end 0x%llx\n", + (uint64_t)target_size, (uint64_t)src_copy_map_size, + (uint64_t)*trimmed_start_p, (uint64_t)trimmed_end); + + /* check for misalignments but don't adjust yet */ + misalignments = 0; + overmap_start = 0; + overmap_end = 0; + if (copy_page_shift < target_page_shift) { + /* + * Remapping from 4K to 16K: check the VM object alignments + * throughout the range. + * If the start and end of the range are mis-aligned, we can + * over-map to re-align, and adjust the "overmap" start/end + * and "target_size" of the range accordingly. + * If there is any mis-alignment within the range: + * if "copy": + * we can do immediate-copy instead of copy-on-write, + * else: + * no way to remap and share; fail. + */ + for (entry = vm_map_copy_first_entry(copy_map); + entry != vm_map_copy_to_entry(copy_map); + entry = entry->vme_next) { + vm_object_offset_t object_offset_start, object_offset_end; + + object_offset_start = VME_OFFSET(entry); + object_offset_end = object_offset_start; + object_offset_end += entry->vme_end - entry->vme_start; + if (object_offset_start & target_page_mask) { + if (entry == vm_map_copy_first_entry(copy_map) && !copy) { + overmap_start++; + } else { + misalignments++; + } + } + if (object_offset_end & target_page_mask) { + if (entry->vme_next == vm_map_copy_to_entry(copy_map) && !copy) { + overmap_end++; + } else { + misalignments++; + } + } + } + } + entry = VM_MAP_ENTRY_NULL; + + /* decide how to deal with misalignments */ + assert(overmap_start <= 1); + assert(overmap_end <= 1); + if (!overmap_start && !overmap_end && !misalignments) { + /* copy_map is properly aligned for target_map ... */ + if (*trimmed_start_p) { + /* ... but we trimmed it, so still need to adjust */ + } else { + /* ... and we didn't trim anything: we're done */ + if (target_copy_map == VM_MAP_COPY_NULL) { + target_copy_map = copy_map; + } + *target_copy_map_p = target_copy_map; + *overmap_start_p = 0; + *overmap_end_p = 0; + DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d target_copy_map %p (%d offset 0x%llx size 0x%llx) -> trimmed 0x%llx overmap start 0x%llx end 0x%llx KERN_SUCCESS\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, *target_copy_map_p, VM_MAP_COPY_PAGE_SHIFT(*target_copy_map_p), (uint64_t)(*target_copy_map_p)->offset, (uint64_t)(*target_copy_map_p)->size, (uint64_t)*trimmed_start_p, (uint64_t)*overmap_start_p, (uint64_t)*overmap_end_p); + return KERN_SUCCESS; + } + } else if (misalignments && !copy) { + /* can't "share" if misaligned */ + DEBUG4K_ADJUST("unsupported sharing\n"); +#if MACH_ASSERT + if (debug4k_panic_on_misaligned_sharing) { + panic("DEBUG4k %s:%d unsupported sharing\n", __FUNCTION__, __LINE__); + } +#endif /* MACH_ASSERT */ + DEBUG4K_ADJUST("copy_map %p (%d) target_map %p (%d) copy %d target_copy_map %p -> KERN_NOT_SUPPORTED\n", copy_map, copy_page_shift, target_map, target_page_shift, copy, *target_copy_map_p); + return KERN_NOT_SUPPORTED; + } else { + /* can't virtual-copy if misaligned (but can physical-copy) */ + DEBUG4K_ADJUST("mis-aligned copying\n"); + } + + /* get a "target_copy_map" if needed and switch to it */ + vm_map_copy_adjust_get_target_copy_map(copy_map, &target_copy_map); + copy_map = target_copy_map; + + if (misalignments && copy) { + vm_map_size_t target_copy_map_size; + + /* + * Can't do copy-on-write with misaligned mappings. + * Replace the mappings with a physical copy of the original + * mappings' contents. + */ + target_copy_map_size = target_copy_map->size; + vm_map_copy_to_physcopy(target_copy_map, target_map); + *target_copy_map_p = target_copy_map; + *overmap_start_p = 0; + *overmap_end_p = target_copy_map->size - target_copy_map_size; + DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d target_copy_map %p (%d offset 0x%llx size 0x%llx)-> trimmed 0x%llx overmap start 0x%llx end 0x%llx PHYSCOPY\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, *target_copy_map_p, VM_MAP_COPY_PAGE_SHIFT(*target_copy_map_p), (uint64_t)(*target_copy_map_p)->offset, (uint64_t)(*target_copy_map_p)->size, (uint64_t)*trimmed_start_p, (uint64_t)*overmap_start_p, (uint64_t)*overmap_end_p); + return KERN_SUCCESS; + } + + /* apply the adjustments */ + misalignments = 0; + overmap_start = 0; + overmap_end = 0; + /* remove copy_map->offset, so that everything starts at offset 0 */ + addr_adjustment = copy_map->offset; + /* also remove whatever we trimmed from the start */ + addr_adjustment += *trimmed_start_p; + for (target_entry = vm_map_copy_first_entry(target_copy_map); + target_entry != vm_map_copy_to_entry(target_copy_map); + target_entry = target_entry->vme_next) { + vm_object_offset_t object_offset_start, object_offset_end; + + DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx BEFORE\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry)); + object_offset_start = VME_OFFSET(target_entry); + if (object_offset_start & target_page_mask) { + DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx misaligned at start\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry)); + if (target_entry == vm_map_copy_first_entry(target_copy_map)) { + /* + * start of 1st entry is mis-aligned: + * re-adjust by over-mapping. + */ + overmap_start = object_offset_start - trunc_page_mask_64(object_offset_start, target_page_mask); + DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> overmap_start 0x%llx\n", target_entry, VME_OFFSET(target_entry), copy, (uint64_t)overmap_start); + VME_OFFSET_SET(target_entry, VME_OFFSET(target_entry) - overmap_start); + } else { + misalignments++; + DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> misalignments %d\n", target_entry, VME_OFFSET(target_entry), copy, misalignments); + assert(copy); + } + } + + if (target_entry == vm_map_copy_first_entry(target_copy_map)) { + target_size += overmap_start; + } else { + target_entry->vme_start += overmap_start; + } + target_entry->vme_end += overmap_start; + + object_offset_end = VME_OFFSET(target_entry) + target_entry->vme_end - target_entry->vme_start; + if (object_offset_end & target_page_mask) { + DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx misaligned at end\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry)); + if (target_entry->vme_next == vm_map_copy_to_entry(target_copy_map)) { + /* + * end of last entry is mis-aligned: re-adjust by over-mapping. + */ + overmap_end = round_page_mask_64(object_offset_end, target_page_mask) - object_offset_end; + DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> overmap_end 0x%llx\n", target_entry, VME_OFFSET(target_entry), copy, (uint64_t)overmap_end); + target_entry->vme_end += overmap_end; + target_size += overmap_end; + } else { + misalignments++; + DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> misalignments %d\n", target_entry, VME_OFFSET(target_entry), copy, misalignments); + assert(copy); + } + } + target_entry->vme_start -= addr_adjustment; + target_entry->vme_end -= addr_adjustment; + DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx AFTER\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry)); + } + + target_copy_map->size = target_size; + target_copy_map->offset += overmap_start; + target_copy_map->offset -= addr_adjustment; + target_copy_map->cpy_hdr.page_shift = target_page_shift; + +// assert(VM_MAP_PAGE_ALIGNED(target_copy_map->size, target_page_mask)); +// assert(VM_MAP_PAGE_ALIGNED(target_copy_map->offset, FOURK_PAGE_MASK)); + assert(overmap_start < VM_MAP_PAGE_SIZE(target_map)); + assert(overmap_end < VM_MAP_PAGE_SIZE(target_map)); + + *target_copy_map_p = target_copy_map; + *overmap_start_p = overmap_start; + *overmap_end_p = overmap_end; + + DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d target_copy_map %p (%d offset 0x%llx size 0x%llx) -> trimmed 0x%llx overmap start 0x%llx end 0x%llx KERN_SUCCESS\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, *target_copy_map_p, VM_MAP_COPY_PAGE_SHIFT(*target_copy_map_p), (uint64_t)(*target_copy_map_p)->offset, (uint64_t)(*target_copy_map_p)->size, (uint64_t)*trimmed_start_p, (uint64_t)*overmap_start_p, (uint64_t)*overmap_end_p); + return KERN_SUCCESS; +} + +kern_return_t +vm_map_range_physical_size( + vm_map_t map, + vm_map_address_t start, + mach_vm_size_t size, + mach_vm_size_t * phys_size) +{ + kern_return_t kr; + vm_map_copy_t copy_map, target_copy_map; + vm_map_offset_t adjusted_start, adjusted_end; + vm_map_size_t adjusted_size; + vm_prot_t cur_prot, max_prot; + vm_map_offset_t overmap_start, overmap_end, trimmed_start; + vm_map_kernel_flags_t vmk_flags; + + adjusted_start = vm_map_trunc_page(start, VM_MAP_PAGE_MASK(map)); + adjusted_end = vm_map_round_page(start + size, VM_MAP_PAGE_MASK(map)); + adjusted_size = adjusted_end - adjusted_start; + *phys_size = adjusted_size; + if (VM_MAP_PAGE_SIZE(map) == PAGE_SIZE) { + return KERN_SUCCESS; + } + if (start == 0) { + adjusted_start = vm_map_trunc_page(start, PAGE_MASK); + adjusted_end = vm_map_round_page(start + size, PAGE_MASK); + adjusted_size = adjusted_end - adjusted_start; + *phys_size = adjusted_size; + return KERN_SUCCESS; + } + if (adjusted_size == 0) { + DEBUG4K_SHARE("map %p start 0x%llx size 0x%llx adjusted 0x%llx -> phys_size 0!\n", map, (uint64_t)start, (uint64_t)size, (uint64_t)adjusted_size); + *phys_size = 0; + return KERN_SUCCESS; + } + + vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + vmk_flags.vmkf_copy_pageable = TRUE; + vmk_flags.vmkf_copy_same_map = TRUE; + assert(adjusted_size != 0); + kr = vm_map_copy_extract(map, adjusted_start, adjusted_size, + VM_PROT_NONE, /* required_protection: no check here */ + FALSE /* copy */, + ©_map, + &cur_prot, &max_prot, VM_INHERIT_DEFAULT, + vmk_flags); + if (kr != KERN_SUCCESS) { + DEBUG4K_ERROR("map %p start 0x%llx 0x%llx size 0x%llx 0x%llx kr 0x%x\n", map, (uint64_t)start, (uint64_t)adjusted_start, size, (uint64_t)adjusted_size, kr); + //assert(0); + *phys_size = 0; + return kr; + } + assert(copy_map != VM_MAP_COPY_NULL); + target_copy_map = copy_map; + DEBUG4K_ADJUST("adjusting...\n"); + kr = vm_map_copy_adjust_to_target( + copy_map, + start - adjusted_start, /* offset */ + size, /* size */ + kernel_map, + FALSE, /* copy */ + &target_copy_map, + &overmap_start, + &overmap_end, + &trimmed_start); + if (kr == KERN_SUCCESS) { + if (target_copy_map->size != *phys_size) { + DEBUG4K_ADJUST("map %p (%d) start 0x%llx size 0x%llx adjusted_start 0x%llx adjusted_end 0x%llx overmap_start 0x%llx overmap_end 0x%llx trimmed_start 0x%llx phys_size 0x%llx -> 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)start, (uint64_t)size, (uint64_t)adjusted_start, (uint64_t)adjusted_end, (uint64_t)overmap_start, (uint64_t)overmap_end, (uint64_t)trimmed_start, (uint64_t)*phys_size, (uint64_t)target_copy_map->size); + } + *phys_size = target_copy_map->size; + } else { + DEBUG4K_ERROR("map %p start 0x%llx 0x%llx size 0x%llx 0x%llx kr 0x%x\n", map, (uint64_t)start, (uint64_t)adjusted_start, size, (uint64_t)adjusted_size, kr); + //assert(0); + *phys_size = 0; + } + vm_map_copy_discard(copy_map); + copy_map = VM_MAP_COPY_NULL; + + return kr; +} + + +kern_return_t +memory_entry_check_for_adjustment( + vm_map_t src_map, + ipc_port_t port, + vm_map_offset_t *overmap_start, + vm_map_offset_t *overmap_end) +{ + kern_return_t kr = KERN_SUCCESS; + vm_map_copy_t copy_map = VM_MAP_COPY_NULL, target_copy_map = VM_MAP_COPY_NULL; + + assert(port); + assertf(ip_kotype(port) == IKOT_NAMED_ENTRY, "Port Type expected: %d...received:%d\n", IKOT_NAMED_ENTRY, ip_kotype(port)); + + vm_named_entry_t named_entry; + + named_entry = (vm_named_entry_t) port->ip_kobject; + named_entry_lock(named_entry); + copy_map = named_entry->backing.copy; + target_copy_map = copy_map; + + if (src_map && VM_MAP_PAGE_SHIFT(src_map) < PAGE_SHIFT) { + vm_map_offset_t trimmed_start; + + trimmed_start = 0; + DEBUG4K_ADJUST("adjusting...\n"); + kr = vm_map_copy_adjust_to_target( + copy_map, + 0, /* offset */ + copy_map->size, /* size */ + src_map, + FALSE, /* copy */ + &target_copy_map, + overmap_start, + overmap_end, + &trimmed_start); + assert(trimmed_start == 0); + } + named_entry_unlock(named_entry); + + return kr; } + /* * Routine: vm_remap * @@ -16246,13 +17791,23 @@ vm_map_remap( vm_map_entry_t entry; vm_map_entry_t insp_entry = VM_MAP_ENTRY_NULL; vm_map_entry_t new_entry; - struct vm_map_header map_header; + vm_map_copy_t copy_map; vm_map_offset_t offset_in_mapping; + vm_map_size_t target_size = 0; + vm_map_size_t src_page_mask, target_page_mask; + vm_map_offset_t overmap_start, overmap_end, trimmed_start; + vm_map_offset_t initial_memory_address; + vm_map_size_t initial_size; if (target_map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; } + initial_memory_address = memory_address; + initial_size = size; + src_page_mask = VM_MAP_PAGE_MASK(src_map); + target_page_mask = VM_MAP_PAGE_MASK(target_map); + switch (inheritance) { case VM_INHERIT_NONE: case VM_INHERIT_COPY: @@ -16260,11 +17815,19 @@ vm_map_remap( if (size != 0 && src_map != VM_MAP_NULL) { break; } - /*FALL THRU*/ + OS_FALLTHROUGH; default: return KERN_INVALID_ARGUMENT; } + if (src_page_mask != target_page_mask) { + if (copy) { + DEBUG4K_COPY("src_map %p pgsz 0x%x addr 0x%llx size 0x%llx copy %d -> target_map %p pgsz 0x%x\n", src_map, VM_MAP_PAGE_SIZE(src_map), (uint64_t)memory_address, (uint64_t)size, copy, target_map, VM_MAP_PAGE_SIZE(target_map)); + } else { + DEBUG4K_SHARE("src_map %p pgsz 0x%x addr 0x%llx size 0x%llx copy %d -> target_map %p pgsz 0x%x\n", src_map, VM_MAP_PAGE_SIZE(src_map), (uint64_t)memory_address, (uint64_t)size, copy, target_map, VM_MAP_PAGE_SIZE(target_map)); + } + } + /* * If the user is requesting that we return the address of the * first byte of the data (rather than the base of the page), @@ -16286,11 +17849,33 @@ vm_map_remap( * 0x1000 and page 0x2000 in the region we remap. */ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) { - offset_in_mapping = memory_address - vm_map_trunc_page(memory_address, PAGE_MASK); - size = vm_map_round_page(memory_address + size - vm_map_trunc_page(memory_address, PAGE_MASK), PAGE_MASK); + vm_map_offset_t range_start, range_end; + + range_start = vm_map_trunc_page(memory_address, src_page_mask); + range_end = vm_map_round_page(memory_address + size, src_page_mask); + memory_address = range_start; + size = range_end - range_start; + offset_in_mapping = initial_memory_address - memory_address; } else { - size = vm_map_round_page(size, PAGE_MASK); + /* + * IMPORTANT: + * This legacy code path is broken: for the range mentioned + * above [ memory_address = 0x1ff0,size = 0x20 ], which spans + * two 4k pages, it yields [ memory_address = 0x1000, + * size = 0x1000 ], which covers only the first 4k page. + * BUT some code unfortunately depends on this bug, so we + * can't fix it without breaking something. + * New code should get automatically opted in the new + * behavior with the new VM_FLAGS_RETURN_DATA_ADDR flags. + */ + offset_in_mapping = 0; + memory_address = vm_map_trunc_page(memory_address, src_page_mask); + size = vm_map_round_page(size, src_page_mask); + initial_memory_address = memory_address; + initial_size = size; } + + if (size == 0) { return KERN_INVALID_ARGUMENT; } @@ -16302,35 +17887,82 @@ vm_map_remap( } } - result = vm_map_remap_extract(src_map, memory_address, - size, copy, &map_header, + vmk_flags.vmkf_copy_pageable = target_map->hdr.entries_pageable; + vmk_flags.vmkf_copy_same_map = (src_map == target_map); + + assert(size != 0); + result = vm_map_copy_extract(src_map, + memory_address, + size, + VM_PROT_NONE, /* required_protection: no check here */ + copy, ©_map, cur_protection, max_protection, inheritance, - target_map->hdr.entries_pageable, - src_map == target_map, vmk_flags); - if (result != KERN_SUCCESS) { return result; } + assert(copy_map != VM_MAP_COPY_NULL); + + overmap_start = 0; + overmap_end = 0; + trimmed_start = 0; + target_size = size; + if (src_page_mask != target_page_mask) { + vm_map_copy_t target_copy_map; + + target_copy_map = copy_map; /* can modify "copy_map" itself */ + DEBUG4K_ADJUST("adjusting...\n"); + result = vm_map_copy_adjust_to_target( + copy_map, + offset_in_mapping, /* offset */ + initial_size, + target_map, + copy, + &target_copy_map, + &overmap_start, + &overmap_end, + &trimmed_start); + if (result != KERN_SUCCESS) { + DEBUG4K_COPY("failed to adjust 0x%x\n", result); + vm_map_copy_discard(copy_map); + return result; + } + if (trimmed_start == 0) { + /* nothing trimmed: no adjustment needed */ + } else if (trimmed_start >= offset_in_mapping) { + /* trimmed more than offset_in_mapping: nothing left */ + assert(overmap_start == 0); + assert(overmap_end == 0); + offset_in_mapping = 0; + } else { + /* trimmed some of offset_in_mapping: adjust */ + assert(overmap_start == 0); + assert(overmap_end == 0); + offset_in_mapping -= trimmed_start; + } + offset_in_mapping += overmap_start; + target_size = target_copy_map->size; + } /* * Allocate/check a range of free virtual address * space for the target */ - *address = vm_map_trunc_page(*address, - VM_MAP_PAGE_MASK(target_map)); + *address = vm_map_trunc_page(*address, target_page_mask); vm_map_lock(target_map); - result = vm_map_remap_range_allocate(target_map, address, size, + target_size = vm_map_round_page(target_size, target_page_mask); + result = vm_map_remap_range_allocate(target_map, address, + target_size, mask, flags, vmk_flags, tag, &insp_entry); - for (entry = map_header.links.next; - entry != CAST_TO_VM_MAP_ENTRY(&map_header.links); + for (entry = vm_map_copy_first_entry(copy_map); + entry != vm_map_copy_to_entry(copy_map); entry = new_entry) { new_entry = entry->vme_next; - _vm_map_store_entry_unlink(&map_header, entry); + vm_map_copy_entry_unlink(copy_map, entry); if (result == KERN_SUCCESS) { if (flags & VM_FLAGS_RESILIENT_CODESIGN) { /* no codesigning -> read-only access */ @@ -16347,6 +17979,9 @@ vm_map_remap( VME_OBJECT(entry)->internal)) { entry->vme_resilient_media = TRUE; } + assert(VM_MAP_PAGE_ALIGNED(entry->vme_start, MIN(target_page_mask, PAGE_MASK))); + assert(VM_MAP_PAGE_ALIGNED(entry->vme_end, MIN(target_page_mask, PAGE_MASK))); + assert(VM_MAP_PAGE_ALIGNED(VME_OFFSET(entry), MIN(target_page_mask, PAGE_MASK))); vm_map_store_entry_link(target_map, insp_entry, entry, vmk_flags); insp_entry = entry; @@ -16356,7 +17991,7 @@ vm_map_remap( } else { vm_map_deallocate(VME_SUBMAP(entry)); } - _vm_map_entry_dispose(&map_header, entry); + vm_map_copy_entry_dispose(copy_map, entry); } } @@ -16373,7 +18008,7 @@ vm_map_remap( } if (result == KERN_SUCCESS) { - target_map->size += size; + target_map->size += target_size; SAVE_HINT_MAP_WRITE(target_map, insp_entry); #if PMAP_CS @@ -16420,6 +18055,12 @@ vm_map_remap( *address += offset_in_mapping; } + if (src_page_mask != target_page_mask) { + DEBUG4K_SHARE("vm_remap(%p 0x%llx 0x%llx copy=%d-> %p 0x%llx 0x%llx result=0x%x\n", src_map, (uint64_t)memory_address, (uint64_t)size, copy, target_map, (uint64_t)*address, (uint64_t)offset_in_mapping, result); + } + vm_map_copy_discard(copy_map); + copy_map = VM_MAP_COPY_NULL; + return result; } @@ -16994,6 +18635,165 @@ vm_map_purgable_control( return kr; } +void +vm_map_footprint_query_page_info( + vm_map_t map, + vm_map_entry_t map_entry, + vm_map_offset_t curr_s_offset, + int *disposition_p) +{ + int pmap_disp; + vm_object_t object; + int disposition; + int effective_page_size; + + vm_map_lock_assert_held(map); + assert(!map->has_corpse_footprint); + assert(curr_s_offset >= map_entry->vme_start); + assert(curr_s_offset < map_entry->vme_end); + + object = VME_OBJECT(map_entry); + if (object == VM_OBJECT_NULL) { + *disposition_p = 0; + return; + } + + effective_page_size = MIN(PAGE_SIZE, VM_MAP_PAGE_SIZE(map)); + + pmap_disp = 0; + if (object == VM_OBJECT_NULL) { + /* nothing mapped here: no need to ask */ + *disposition_p = 0; + return; + } else if (map_entry->is_sub_map && + !map_entry->use_pmap) { + /* nested pmap: no footprint */ + *disposition_p = 0; + return; + } + + /* + * Query the pmap. + */ + pmap_query_page_info(map->pmap, curr_s_offset, &pmap_disp); + + /* + * Compute this page's disposition. + */ + disposition = 0; + + /* deal with "alternate accounting" first */ + if (!map_entry->is_sub_map && + object->vo_no_footprint) { + /* does not count in footprint */ + assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + } else if (!map_entry->is_sub_map && + (object->purgable == VM_PURGABLE_NONVOLATILE || + (object->purgable == VM_PURGABLE_DENY && + object->vo_ledger_tag)) && + VM_OBJECT_OWNER(object) != NULL && + VM_OBJECT_OWNER(object)->map == map) { + assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + if ((((curr_s_offset + - map_entry->vme_start + + VME_OFFSET(map_entry)) + / effective_page_size) < + (object->resident_page_count + + vm_compressor_pager_get_count(object->pager)))) { + /* + * Non-volatile purgeable object owned + * by this task: report the first + * "#resident + #compressed" pages as + * "resident" (to show that they + * contribute to the footprint) but not + * "dirty" (to avoid double-counting + * with the fake "non-volatile" region + * we'll report at the end of the + * address space to account for all + * (mapped or not) non-volatile memory + * owned by this task. + */ + disposition |= VM_PAGE_QUERY_PAGE_PRESENT; + } + } else if (!map_entry->is_sub_map && + (object->purgable == VM_PURGABLE_VOLATILE || + object->purgable == VM_PURGABLE_EMPTY) && + VM_OBJECT_OWNER(object) != NULL && + VM_OBJECT_OWNER(object)->map == map) { + assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + if ((((curr_s_offset + - map_entry->vme_start + + VME_OFFSET(map_entry)) + / effective_page_size) < + object->wired_page_count)) { + /* + * Volatile|empty purgeable object owned + * by this task: report the first + * "#wired" pages as "resident" (to + * show that they contribute to the + * footprint) but not "dirty" (to avoid + * double-counting with the fake + * "non-volatile" region we'll report + * at the end of the address space to + * account for all (mapped or not) + * non-volatile memory owned by this + * task. + */ + disposition |= VM_PAGE_QUERY_PAGE_PRESENT; + } + } else if (!map_entry->is_sub_map && + map_entry->iokit_acct && + object->internal && + object->purgable == VM_PURGABLE_DENY) { + /* + * Non-purgeable IOKit memory: phys_footprint + * includes the entire virtual mapping. + */ + assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + disposition |= VM_PAGE_QUERY_PAGE_PRESENT; + disposition |= VM_PAGE_QUERY_PAGE_DIRTY; + } else if (pmap_disp & (PMAP_QUERY_PAGE_ALTACCT | + PMAP_QUERY_PAGE_COMPRESSED_ALTACCT)) { + /* alternate accounting */ +#if (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG) + if (map->pmap->footprint_was_suspended) { + /* + * The assertion below can fail if dyld + * suspended footprint accounting + * while doing some adjustments to + * this page; the mapping would say + * "use pmap accounting" but the page + * would be marked "alternate + * accounting". + */ + } else +#endif /* (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG) */ + { + assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + } + disposition = 0; + } else { + if (pmap_disp & PMAP_QUERY_PAGE_PRESENT) { + assertf(map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + disposition |= VM_PAGE_QUERY_PAGE_PRESENT; + disposition |= VM_PAGE_QUERY_PAGE_REF; + if (pmap_disp & PMAP_QUERY_PAGE_INTERNAL) { + disposition |= VM_PAGE_QUERY_PAGE_DIRTY; + } else { + disposition |= VM_PAGE_QUERY_PAGE_EXTERNAL; + } + if (pmap_disp & PMAP_QUERY_PAGE_REUSABLE) { + disposition |= VM_PAGE_QUERY_PAGE_REUSABLE; + } + } else if (pmap_disp & PMAP_QUERY_PAGE_COMPRESSED) { + assertf(map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT; + } + } + + *disposition_p = disposition; +} + kern_return_t vm_map_page_query_internal( vm_map_t target_map, @@ -17031,8 +18831,9 @@ vm_map_page_info( mach_msg_type_number_t *count) { return vm_map_page_range_info_internal(map, - offset, /* start of range */ - (offset + 1), /* this will get rounded in the call to the page boundary */ + offset, /* start of range */ + (offset + 1), /* this will get rounded in the call to the page boundary */ + (int)-1, /* effective_page_shift: unspecified */ flavor, info, count); @@ -17043,6 +18844,7 @@ vm_map_page_range_info_internal( vm_map_t map, vm_map_offset_t start_offset, vm_map_offset_t end_offset, + int effective_page_shift, vm_page_info_flavor_t flavor, vm_page_info_t info, mach_msg_type_number_t *count) @@ -17059,6 +18861,8 @@ vm_map_page_range_info_internal( vm_map_offset_t start = 0, end = 0, curr_s_offset = 0, curr_e_offset = 0; boolean_t do_region_footprint; ledger_amount_t ledger_resident, ledger_compressed; + int effective_page_size; + vm_map_offset_t effective_page_mask; switch (flavor) { case VM_PAGE_INFO_BASIC: @@ -17077,6 +18881,15 @@ vm_map_page_range_info_internal( return KERN_INVALID_ARGUMENT; } + if (effective_page_shift == -1) { + effective_page_shift = vm_self_region_page_shift_safely(map); + if (effective_page_shift == -1) { + return KERN_INVALID_ARGUMENT; + } + } + effective_page_size = (1 << effective_page_shift); + effective_page_mask = effective_page_size - 1; + do_region_footprint = task_self_region_footprint(); disposition = 0; ref_count = 0; @@ -17084,9 +18897,9 @@ vm_map_page_range_info_internal( info_idx = 0; /* Tracks the next index within the info structure to be filled.*/ retval = KERN_SUCCESS; - offset_in_page = start_offset & PAGE_MASK; - start = vm_map_trunc_page(start_offset, PAGE_MASK); - end = vm_map_round_page(end_offset, PAGE_MASK); + offset_in_page = start_offset & effective_page_mask; + start = vm_map_trunc_page(start_offset, effective_page_mask); + end = vm_map_round_page(end_offset, effective_page_mask); if (end < start) { return KERN_INVALID_ARGUMENT; @@ -17142,14 +18955,14 @@ vm_map_page_range_info_internal( basic_info = (vm_page_info_basic_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); basic_info->disposition = disposition; basic_info->ref_count = 1; - basic_info->object_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile); + basic_info->object_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile); basic_info->offset = 0; basic_info->depth = 0; info_idx++; break; } - curr_s_offset += PAGE_SIZE; + curr_s_offset += effective_page_size; continue; } @@ -17198,7 +19011,7 @@ vm_map_page_range_info_internal( assert(curr_e_offset >= curr_s_offset); - uint64_t num_pages = (curr_e_offset - curr_s_offset) >> PAGE_SHIFT; + uint64_t num_pages = (curr_e_offset - curr_s_offset) >> effective_page_shift; void *info_ptr = (void*) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); @@ -17234,9 +19047,13 @@ vm_map_page_range_info_internal( submap_info = (vm_page_info_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); + assertf(VM_MAP_PAGE_SHIFT(sub_map) >= VM_MAP_PAGE_SHIFT(map), + "Submap page size (%d) differs from current map (%d)\n", VM_MAP_PAGE_SIZE(sub_map), VM_MAP_PAGE_SIZE(map)); + retval = vm_map_page_range_info_internal(sub_map, submap_s_offset, submap_e_offset, + effective_page_shift, VM_PAGE_INFO_BASIC, (vm_page_info_t) submap_info, count); @@ -17247,7 +19064,7 @@ vm_map_page_range_info_internal( vm_map_deallocate(sub_map); /* Move the "info" index by the number of pages we inspected.*/ - info_idx += range_len >> PAGE_SHIFT; + info_idx += range_len >> effective_page_shift; /* Move our current offset by the size of the range we inspected.*/ curr_s_offset += range_len; @@ -17256,6 +19073,7 @@ vm_map_page_range_info_internal( } object = VME_OBJECT(map_entry); + if (object == VM_OBJECT_NULL) { /* * We don't have an object here and, hence, @@ -17265,7 +19083,7 @@ vm_map_page_range_info_internal( curr_e_offset = MIN(map_entry->vme_end, end); - uint64_t num_pages = (curr_e_offset - curr_s_offset) >> PAGE_SHIFT; + uint64_t num_pages = (curr_e_offset - curr_s_offset) >> effective_page_shift; void *info_ptr = (void*) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); @@ -17279,10 +19097,7 @@ vm_map_page_range_info_internal( } if (do_region_footprint) { - int pmap_disp; - disposition = 0; - pmap_disp = 0; if (map->has_corpse_footprint) { /* * Query the page info data we saved @@ -17291,129 +19106,31 @@ vm_map_page_range_info_internal( vm_map_corpse_footprint_query_page_info( map, curr_s_offset, - &pmap_disp); + &disposition); } else { /* - * Query the pmap. - */ - pmap_query_page_info(map->pmap, - curr_s_offset, - &pmap_disp); - } - if (object->purgable == VM_PURGABLE_NONVOLATILE && - /* && not tagged as no-footprint? */ - VM_OBJECT_OWNER(object) != NULL && - VM_OBJECT_OWNER(object)->map == map) { - if ((((curr_s_offset - - map_entry->vme_start - + VME_OFFSET(map_entry)) - / PAGE_SIZE) < - (object->resident_page_count + - vm_compressor_pager_get_count(object->pager)))) { - /* - * Non-volatile purgeable object owned - * by this task: report the first - * "#resident + #compressed" pages as - * "resident" (to show that they - * contribute to the footprint) but not - * "dirty" (to avoid double-counting - * with the fake "non-volatile" region - * we'll report at the end of the - * address space to account for all - * (mapped or not) non-volatile memory - * owned by this task. - */ - disposition |= VM_PAGE_QUERY_PAGE_PRESENT; - } - } else if ((object->purgable == VM_PURGABLE_VOLATILE || - object->purgable == VM_PURGABLE_EMPTY) && - /* && not tagged as no-footprint? */ - VM_OBJECT_OWNER(object) != NULL && - VM_OBJECT_OWNER(object)->map == map) { - if ((((curr_s_offset - - map_entry->vme_start - + VME_OFFSET(map_entry)) - / PAGE_SIZE) < - object->wired_page_count)) { - /* - * Volatile|empty purgeable object owned - * by this task: report the first - * "#wired" pages as "resident" (to - * show that they contribute to the - * footprint) but not "dirty" (to avoid - * double-counting with the fake - * "non-volatile" region we'll report - * at the end of the address space to - * account for all (mapped or not) - * non-volatile memory owned by this - * task. - */ - disposition |= VM_PAGE_QUERY_PAGE_PRESENT; - } - } else if (map_entry->iokit_acct && - object->internal && - object->purgable == VM_PURGABLE_DENY) { - /* - * Non-purgeable IOKit memory: phys_footprint - * includes the entire virtual mapping. + * Query the live pmap for footprint info + * about this page. */ - assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); - disposition |= VM_PAGE_QUERY_PAGE_PRESENT; - disposition |= VM_PAGE_QUERY_PAGE_DIRTY; - } else if (pmap_disp & (PMAP_QUERY_PAGE_ALTACCT | - PMAP_QUERY_PAGE_COMPRESSED_ALTACCT)) { - /* alternate accounting */ -#if CONFIG_EMBEDDED && (DEVELOPMENT || DEBUG) - if (map->pmap->footprint_was_suspended || - /* - * XXX corpse does not know if original - * pmap had its footprint suspended... - */ - map->has_corpse_footprint) { - /* - * The assertion below can fail if dyld - * suspended footprint accounting - * while doing some adjustments to - * this page; the mapping would say - * "use pmap accounting" but the page - * would be marked "alternate - * accounting". - */ - } else -#endif /* CONFIG_EMBEDDED && (DEVELOPMENT || DEBUG) */ - assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); - pmap_disp = 0; - } else { - if (pmap_disp & PMAP_QUERY_PAGE_PRESENT) { - assertf(map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); - disposition |= VM_PAGE_QUERY_PAGE_PRESENT; - disposition |= VM_PAGE_QUERY_PAGE_REF; - if (pmap_disp & PMAP_QUERY_PAGE_INTERNAL) { - disposition |= VM_PAGE_QUERY_PAGE_DIRTY; - } else { - disposition |= VM_PAGE_QUERY_PAGE_EXTERNAL; - } - if (pmap_disp & PMAP_QUERY_PAGE_REUSABLE) { - disposition |= VM_PAGE_QUERY_PAGE_REUSABLE; - } - } else if (pmap_disp & PMAP_QUERY_PAGE_COMPRESSED) { - assertf(map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); - disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT; - } + vm_map_footprint_query_page_info( + map, + map_entry, + curr_s_offset, + &disposition); } switch (flavor) { case VM_PAGE_INFO_BASIC: basic_info = (vm_page_info_basic_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); basic_info->disposition = disposition; basic_info->ref_count = 1; - basic_info->object_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile); + basic_info->object_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile); basic_info->offset = 0; basic_info->depth = 0; info_idx++; break; } - curr_s_offset += PAGE_SIZE; + curr_s_offset += effective_page_size; continue; } @@ -17442,7 +19159,7 @@ vm_map_page_range_info_internal( curr_offset_in_object = offset_in_object; for (;;) { - m = vm_page_lookup(curr_object, curr_offset_in_object); + m = vm_page_lookup(curr_object, vm_object_trunc_page(curr_offset_in_object)); if (m != VM_PAGE_NULL) { disposition |= VM_PAGE_QUERY_PAGE_PRESENT; @@ -17452,7 +19169,7 @@ vm_map_page_range_info_internal( curr_object->alive && !curr_object->terminating && curr_object->pager_ready) { - if (VM_COMPRESSOR_PAGER_STATE_GET(curr_object, curr_offset_in_object) + if (VM_COMPRESSOR_PAGER_STATE_GET(curr_object, vm_object_trunc_page(curr_offset_in_object)) == VM_EXTERNAL_STATE_EXISTS) { /* the pager has that page */ disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT; @@ -17517,6 +19234,12 @@ vm_map_page_range_info_internal( disposition |= VM_PAGE_QUERY_PAGE_SPECULATIVE; } + /* + * XXX TODO4K: + * when this routine deals with 4k + * pages, check the appropriate CS bit + * here. + */ if (m->vmp_cs_validated) { disposition |= VM_PAGE_QUERY_PAGE_CS_VALIDATED; } @@ -17553,8 +19276,8 @@ vm_map_page_range_info_internal( /* * Move to next offset in the range and in our object. */ - curr_s_offset += PAGE_SIZE; - offset_in_object += PAGE_SIZE; + curr_s_offset += effective_page_size; + offset_in_object += effective_page_size; curr_offset_in_object = offset_in_object; if (curr_object != object) { @@ -17626,6 +19349,7 @@ vm_map_msync( vm_map_entry_t entry; vm_map_size_t amount_left; vm_object_offset_t offset; + vm_object_offset_t start_offset, end_offset; boolean_t do_sync_req; boolean_t had_hole = FALSE; vm_map_offset_t pmap_offset; @@ -17635,6 +19359,10 @@ vm_map_msync( return KERN_INVALID_ARGUMENT; } + if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) { + DEBUG4K_SHARE("map %p address 0x%llx size 0x%llx flags 0x%x\n", map, (uint64_t)address, (uint64_t)size, sync_flags); + } + /* * align address and size on page boundaries */ @@ -17765,6 +19493,27 @@ vm_map_msync( int kill_pages = 0; boolean_t reusable_pages = FALSE; + if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) { + /* + * This is a destructive operation and so we + * err on the side of limiting the range of + * the operation. + */ + start_offset = vm_object_round_page(offset); + end_offset = vm_object_trunc_page(offset + flush_size); + + if (end_offset <= start_offset) { + vm_object_unlock(object); + vm_map_unlock(map); + continue; + } + + pmap_offset += start_offset - offset;; + } else { + start_offset = offset; + end_offset = offset + flush_size; + } + if (sync_flags & VM_SYNC_KILLPAGES) { if (((object->ref_count == 1) || ((object->copy_strategy != @@ -17782,8 +19531,8 @@ vm_map_msync( if (kill_pages != -1) { vm_object_deactivate_pages( object, - offset, - (vm_object_size_t) flush_size, + start_offset, + (vm_object_size_t) (end_offset - start_offset), kill_pages, reusable_pages, map->pmap, @@ -17812,9 +19561,17 @@ vm_map_msync( vm_map_unlock(map); + if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) { + start_offset = vm_object_trunc_page(offset); + end_offset = vm_object_round_page(offset + flush_size); + } else { + start_offset = offset; + end_offset = offset + flush_size; + } + do_sync_req = vm_object_sync(object, - offset, - flush_size, + start_offset, + (end_offset - start_offset), sync_flags & VM_SYNC_INVALIDATE, ((sync_flags & VM_SYNC_SYNCHRONOUS) || (sync_flags & VM_SYNC_ASYNCHRONOUS)), @@ -17844,6 +19601,74 @@ vm_map_msync( return KERN_SUCCESS; }/* vm_msync */ +kern_return_t +vm_named_entry_from_vm_object( + vm_named_entry_t named_entry, + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + vm_prot_t prot) +{ + vm_map_copy_t copy; + vm_map_entry_t copy_entry; + + assert(!named_entry->is_sub_map); + assert(!named_entry->is_copy); + assert(!named_entry->is_object); + assert(!named_entry->internal); + assert(named_entry->backing.copy == VM_MAP_COPY_NULL); + + copy = vm_map_copy_allocate(); + copy->type = VM_MAP_COPY_ENTRY_LIST; + copy->offset = offset; + copy->size = size; + copy->cpy_hdr.page_shift = PAGE_SHIFT; + vm_map_store_init(©->cpy_hdr); + + copy_entry = vm_map_copy_entry_create(copy, FALSE); + copy_entry->protection = prot; + copy_entry->max_protection = prot; + copy_entry->use_pmap = TRUE; + copy_entry->vme_start = VM_MAP_TRUNC_PAGE(offset, PAGE_MASK); + copy_entry->vme_end = VM_MAP_ROUND_PAGE(offset + size, PAGE_MASK); + VME_OBJECT_SET(copy_entry, object); + VME_OFFSET_SET(copy_entry, vm_object_trunc_page(offset)); + vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), copy_entry); + + named_entry->backing.copy = copy; + named_entry->is_object = TRUE; + if (object->internal) { + named_entry->internal = TRUE; + } + + DEBUG4K_MEMENTRY("named_entry %p copy %p object %p offset 0x%llx size 0x%llx prot 0x%x\n", named_entry, copy, object, offset, size, prot); + + return KERN_SUCCESS; +} + +vm_object_t +vm_named_entry_to_vm_object( + vm_named_entry_t named_entry) +{ + vm_map_copy_t copy; + vm_map_entry_t copy_entry; + vm_object_t object; + + assert(!named_entry->is_sub_map); + assert(!named_entry->is_copy); + assert(named_entry->is_object); + copy = named_entry->backing.copy; + assert(copy != VM_MAP_COPY_NULL); + assert(copy->cpy_hdr.nentries == 1); + copy_entry = vm_map_copy_first_entry(copy); + assert(!copy_entry->is_sub_map); + object = VME_OBJECT(copy_entry); + + DEBUG4K_MEMENTRY("%p -> %p -> %p [0x%llx 0x%llx 0x%llx 0x%x/0x%x ] -> %p offset 0x%llx size 0x%llx prot 0x%x\n", named_entry, copy, copy_entry, (uint64_t)copy_entry->vme_start, (uint64_t)copy_entry->vme_end, copy_entry->vme_offset, copy_entry->protection, copy_entry->max_protection, object, named_entry->offset, named_entry->size, named_entry->protection); + + return object; +} + /* * Routine: convert_port_entry_to_map * Purpose: @@ -17885,6 +19710,13 @@ convert_port_entry_to_map( (named_entry->protection & VM_PROT_WRITE)) { map = named_entry->backing.map; + if (map->pmap != PMAP_NULL) { + if (map->pmap == kernel_pmap) { + panic("userspace has access " + "to a kernel map %p", map); + } + pmap_require(map->pmap); + } } else { mach_destroy_memory_entry(port); return VM_MAP_NULL; @@ -17940,8 +19772,17 @@ try_again: ip_unlock(port); if (!(named_entry->is_sub_map) && !(named_entry->is_copy) && + (named_entry->is_object) && (named_entry->protection & VM_PROT_WRITE)) { - object = named_entry->backing.object; + vm_map_copy_t copy; + vm_map_entry_t copy_entry; + + copy = named_entry->backing.copy; + assert(copy->cpy_hdr.nentries == 1); + copy_entry = vm_map_copy_first_entry(copy); + assert(!copy_entry->is_sub_map); + object = VME_OBJECT(copy_entry); + assert(object != VM_OBJECT_NULL); vm_object_reference(object); } mach_destroy_memory_entry(port); @@ -18027,6 +19868,20 @@ vm_map_deallocate( vm_map_destroy(map, VM_MAP_REMOVE_NO_FLAGS); } +void +vm_map_inspect_deallocate( + vm_map_inspect_t map) +{ + vm_map_deallocate((vm_map_t)map); +} + +void +vm_map_read_deallocate( + vm_map_read_t map) +{ + vm_map_deallocate((vm_map_t)map); +} + void vm_map_disable_NX(vm_map_t map) @@ -18081,7 +19936,7 @@ vm_map_set_64bit(vm_map_t map) void vm_map_set_jumbo(vm_map_t map) { -#if defined (__arm64__) +#if defined (__arm64__) && !defined(CONFIG_ARROW) vm_map_set_max_addr(map, ~0); #else /* arm64 */ (void) map; @@ -18337,6 +20192,28 @@ vm_map_switch_protect(vm_map_t map, vm_map_unlock(map); } +extern int cs_process_enforcement_enable; +boolean_t +vm_map_cs_enforcement( + vm_map_t map) +{ + if (cs_process_enforcement_enable) { + return TRUE; + } + return map->cs_enforcement; +} + +void +vm_map_cs_enforcement_set( + vm_map_t map, + boolean_t val) +{ + vm_map_lock(map); + map->cs_enforcement = val; + pmap_set_vm_map_cs_enforced(map->pmap, val); + vm_map_unlock(map); +} + /* * IOKit has mapped a region into this map; adjust the pmap's ledgers appropriately. * phys_footprint is a composite limit consisting of iokit + physmem, so we need to @@ -18431,7 +20308,8 @@ vm_map_sign(vm_map_t map, /* Page is OK... now "validate" it */ /* This is the place where we'll call out to create a code * directory, later */ - m->vmp_cs_validated = TRUE; + /* XXX TODO4K: deal with 4k subpages individually? */ + m->vmp_cs_validated = VMP_CS_ALL_TRUE; /* The page is now "clean" for codesigning purposes. That means * we don't consider it as modified (wpmapped) anymore. But @@ -18569,14 +20447,51 @@ vm_map_disconnect_page_mappings( return page_count; } +kern_return_t +vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr) +{ + vm_object_t object = NULL; + vm_object_offset_t offset; + vm_prot_t prot; + boolean_t wired; + vm_map_version_t version; + vm_map_t real_map; + int result = KERN_FAILURE; + + vaddr = vm_map_trunc_page(vaddr, PAGE_MASK); + vm_map_lock(map); + + result = vm_map_lookup_locked(&map, vaddr, VM_PROT_READ, + OBJECT_LOCK_EXCLUSIVE, &version, &object, &offset, &prot, &wired, + NULL, &real_map, NULL); + if (object == NULL) { + result = KERN_MEMORY_ERROR; + } else if (object->pager) { + result = vm_compressor_pager_inject_error(object->pager, + offset); + } else { + result = KERN_MEMORY_PRESENT; + } + + if (object != NULL) { + vm_object_unlock(object); + } + + if (real_map != map) { + vm_map_unlock(real_map); + } + vm_map_unlock(map); + + return result; +} + #endif #if CONFIG_FREEZE -int c_freezer_swapout_page_count; -int c_freezer_compression_count = 0; +extern struct freezer_context freezer_context_global; AbsoluteTime c_freezer_last_yield_ts = 0; extern unsigned int memorystatus_freeze_private_shared_pages_ratio; @@ -18644,7 +20559,7 @@ vm_map_freeze( goto done; } - c_freezer_compression_count = 0; + freezer_context_global.freezer_ctx_uncompressed_pages = 0; clock_get_uptime(&c_freezer_last_yield_ts); } again: @@ -18773,7 +20688,7 @@ again: evaluation_phase = FALSE; dirty_shared_count = dirty_private_count = 0; - c_freezer_compression_count = 0; + freezer_context_global.freezer_ctx_uncompressed_pages = 0; clock_get_uptime(&c_freezer_last_yield_ts); if (eval_only) { @@ -18793,16 +20708,6 @@ done: if ((eval_only == FALSE) && (kr == KERN_SUCCESS)) { vm_object_compressed_freezer_done(); - - if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { - /* - * reset the counter tracking the # of swapped compressed pages - * because we are now done with this freeze session and task. - */ - - *dirty_count = c_freezer_swapout_page_count; //used to track pageouts - c_freezer_swapout_page_count = 0; - } } return kr; } @@ -19155,7 +21060,7 @@ vm_commit_pagezero_status(vm_map_t lmap) pmap_advise_pagezero_range(lmap->pmap, lmap->min_offset); } -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX void vm_map_set_high_start( vm_map_t map, @@ -19163,7 +21068,7 @@ vm_map_set_high_start( { map->vmmap_high_start = high_start; } -#endif +#endif /* XNU_TARGET_OS_OSX */ #if PMAP_CS kern_return_t @@ -19180,6 +21085,7 @@ vm_map_entry_cs_associate( if (map->pmap == NULL || entry->is_sub_map || /* XXX FBDP: recurse on sub-range? */ + pmap_cs_exempt(map->pmap) || VME_OBJECT(entry) == VM_OBJECT_NULL || !(entry->protection & VM_PROT_EXECUTE)) { return KERN_SUCCESS; @@ -19191,7 +21097,8 @@ vm_map_entry_cs_associate( cs_ret = pmap_cs_associate(map->pmap, PMAP_CS_ASSOCIATE_JIT, entry->vme_start, - entry->vme_end - entry->vme_start); + entry->vme_end - entry->vme_start, + 0); goto done; } @@ -19199,7 +21106,8 @@ vm_map_entry_cs_associate( cs_ret = pmap_cs_associate(map->pmap, PMAP_CS_ASSOCIATE_COW, entry->vme_start, - entry->vme_end - entry->vme_start); + entry->vme_end - entry->vme_start, + 0); goto done; } @@ -19363,6 +21271,55 @@ uint64_t vm_map_corpse_footprint_size_max = 0; uint64_t vm_map_corpse_footprint_full = 0; uint64_t vm_map_corpse_footprint_no_buf = 0; +struct vm_map_corpse_footprint_header { + vm_size_t cf_size; /* allocated buffer size */ + uint32_t cf_last_region; /* offset of last region in buffer */ + union { + uint32_t cfu_last_zeroes; /* during creation: + * number of "zero" dispositions at + * end of last region */ + uint32_t cfu_hint_region; /* during lookup: + * offset of last looked up region */ +#define cf_last_zeroes cfu.cfu_last_zeroes +#define cf_hint_region cfu.cfu_hint_region + } cfu; +}; +typedef uint8_t cf_disp_t; +struct vm_map_corpse_footprint_region { + vm_map_offset_t cfr_vaddr; /* region start virtual address */ + uint32_t cfr_num_pages; /* number of pages in this "region" */ + cf_disp_t cfr_disposition[0]; /* disposition of each page */ +} __attribute__((packed)); + +static cf_disp_t +vm_page_disposition_to_cf_disp( + int disposition) +{ + assert(sizeof(cf_disp_t) == 1); + /* relocate bits that don't fit in a "uint8_t" */ + if (disposition & VM_PAGE_QUERY_PAGE_REUSABLE) { + disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS; + } + /* cast gets rid of extra bits */ + return (cf_disp_t) disposition; +} + +static int +vm_page_cf_disp_to_disposition( + cf_disp_t cf_disp) +{ + int disposition; + + assert(sizeof(cf_disp_t) == 1); + disposition = (int) cf_disp; + /* move relocated bits back in place */ + if (cf_disp & VM_PAGE_QUERY_PAGE_FICTITIOUS) { + disposition |= VM_PAGE_QUERY_PAGE_REUSABLE; + disposition &= ~VM_PAGE_QUERY_PAGE_FICTITIOUS; + } + return disposition; +} + /* * vm_map_corpse_footprint_new_region: * closes the current footprint "region" and creates a new one @@ -19401,7 +21358,7 @@ vm_map_corpse_footprint_new_region( /* compute offset of new region */ new_region_offset = footprint_header->cf_last_region; new_region_offset += sizeof(*footprint_region); - new_region_offset += footprint_region->cfr_num_pages; + new_region_offset += (footprint_region->cfr_num_pages * sizeof(cf_disp_t)); new_region_offset = roundup(new_region_offset, sizeof(int)); /* check if we're going over the edge */ @@ -19438,14 +21395,16 @@ vm_map_corpse_footprint_collect( vm_map_t new_map) { vm_map_offset_t va; - int disp; kern_return_t kr; struct vm_map_corpse_footprint_header *footprint_header; struct vm_map_corpse_footprint_region *footprint_region; struct vm_map_corpse_footprint_region *new_footprint_region; - unsigned char *next_disp_p; + cf_disp_t *next_disp_p; uintptr_t footprint_edge; uint32_t num_pages_tmp; + int effective_page_size; + + effective_page_size = MIN(PAGE_SIZE, VM_MAP_PAGE_SIZE(old_map)); va = old_entry->vme_start; @@ -19474,9 +21433,9 @@ vm_map_corpse_footprint_collect( (sizeof(*footprint_region) + +3)) /* potential alignment for each region */ + - ((old_map->size / PAGE_SIZE) + ((old_map->size / effective_page_size) * - sizeof(char))); /* disposition for each page */ + sizeof(cf_disp_t))); /* disposition for each page */ // printf("FBDP corpse map %p guestimate footprint size 0x%llx\n", new_map, (uint64_t) buf_size); buf_size = round_page(buf_size); @@ -19484,11 +21443,11 @@ vm_map_corpse_footprint_collect( // buf_size = PAGE_SIZE; /* limit size to a somewhat sane amount */ -#if CONFIG_EMBEDDED -#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (256*1024) /* 256KB */ -#else /* CONFIG_EMBEDDED */ +#if XNU_TARGET_OS_OSX #define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (8*1024*1024) /* 8MB */ -#endif /* CONFIG_EMBEDDED */ +#else /* XNU_TARGET_OS_OSX */ +#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (256*1024) /* 256KB */ +#endif /* XNU_TARGET_OS_OSX */ if (buf_size > VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE) { buf_size = VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE; } @@ -19536,10 +21495,10 @@ vm_map_corpse_footprint_collect( if ((footprint_region->cfr_vaddr + (((vm_map_offset_t)footprint_region->cfr_num_pages) * - PAGE_SIZE)) + effective_page_size)) != old_entry->vme_start) { - uint64_t num_pages_delta; - uint32_t region_offset_delta; + uint64_t num_pages_delta, num_pages_delta_size; + uint32_t region_offset_delta_size; /* * Not the next contiguous virtual address: @@ -19547,19 +21506,20 @@ vm_map_corpse_footprint_collect( * the missing pages? */ /* size of gap in actual page dispositions */ - num_pages_delta = (((old_entry->vme_start - - footprint_region->cfr_vaddr) / PAGE_SIZE) - - footprint_region->cfr_num_pages); + num_pages_delta = ((old_entry->vme_start - + footprint_region->cfr_vaddr) / effective_page_size) + - footprint_region->cfr_num_pages; + num_pages_delta_size = num_pages_delta * sizeof(cf_disp_t); /* size of gap as a new footprint region header */ - region_offset_delta = + region_offset_delta_size = (sizeof(*footprint_region) + - roundup((footprint_region->cfr_num_pages - - footprint_header->cf_last_zeroes), + roundup(((footprint_region->cfr_num_pages - + footprint_header->cf_last_zeroes) * sizeof(cf_disp_t)), sizeof(int)) - - (footprint_region->cfr_num_pages - - footprint_header->cf_last_zeroes)); + ((footprint_region->cfr_num_pages - + footprint_header->cf_last_zeroes) * sizeof(cf_disp_t))); // printf("FBDP %s:%d region 0x%x 0x%llx 0x%x vme_start 0x%llx pages_delta 0x%llx region_delta 0x%x\n", __FUNCTION__, __LINE__, footprint_header->cf_last_region, footprint_region->cfr_vaddr, footprint_region->cfr_num_pages, old_entry->vme_start, num_pages_delta, region_offset_delta); - if (region_offset_delta < num_pages_delta || + if (region_offset_delta_size < num_pages_delta_size || os_add3_overflow(footprint_region->cfr_num_pages, (uint32_t) num_pages_delta, 1, @@ -19594,17 +21554,17 @@ vm_map_corpse_footprint_collect( */ // printf("FBDP %s:%d zero gap\n", __FUNCTION__, __LINE__); for (; num_pages_delta > 0; num_pages_delta--) { - next_disp_p = - ((unsigned char *) footprint_region + - sizeof(*footprint_region) + - footprint_region->cfr_num_pages); + next_disp_p = (cf_disp_t *) + ((uintptr_t) footprint_region + + sizeof(*footprint_region)); + next_disp_p += footprint_region->cfr_num_pages; /* check that we're not going over the edge */ if ((uintptr_t)next_disp_p >= footprint_edge) { goto over_the_edge; } /* store "zero" disposition for this gap page */ footprint_region->cfr_num_pages++; - *next_disp_p = (unsigned char) 0; + *next_disp_p = (cf_disp_t) 0; footprint_header->cf_last_zeroes++; } } @@ -19612,40 +21572,24 @@ vm_map_corpse_footprint_collect( for (va = old_entry->vme_start; va < old_entry->vme_end; - va += PAGE_SIZE) { - vm_object_t object; + va += effective_page_size) { + int disposition; + cf_disp_t cf_disp; - object = VME_OBJECT(old_entry); - if (!old_entry->is_sub_map && - old_entry->iokit_acct && - object != VM_OBJECT_NULL && - object->internal && - object->purgable == VM_PURGABLE_DENY) { - /* - * Non-purgeable IOKit memory: phys_footprint - * includes the entire virtual mapping. - * Since the forked corpse's VM map entry will not - * have "iokit_acct", pretend that this page's - * disposition is "present & internal", so that it - * shows up in the forked corpse's footprint. - */ - disp = (PMAP_QUERY_PAGE_PRESENT | - PMAP_QUERY_PAGE_INTERNAL); - } else { - disp = 0; - pmap_query_page_info(old_map->pmap, - va, - &disp); - } + vm_map_footprint_query_page_info(old_map, + old_entry, + va, + &disposition); + cf_disp = vm_page_disposition_to_cf_disp(disposition); // if (va < SHARED_REGION_BASE_ARM64) printf("FBDP collect map %p va 0x%llx disp 0x%x\n", new_map, va, disp); - if (disp == 0 && footprint_region->cfr_num_pages == 0) { + if (cf_disp == 0 && footprint_region->cfr_num_pages == 0) { /* * Ignore "zero" dispositions at start of * region: just move start of region. */ - footprint_region->cfr_vaddr += PAGE_SIZE; + footprint_region->cfr_vaddr += effective_page_size; continue; } @@ -19664,18 +21608,18 @@ vm_map_corpse_footprint_collect( footprint_region->cfr_num_pages = 0; } - next_disp_p = ((unsigned char *)footprint_region + - sizeof(*footprint_region) + - footprint_region->cfr_num_pages); + next_disp_p = (cf_disp_t *) ((uintptr_t) footprint_region + + sizeof(*footprint_region)); + next_disp_p += footprint_region->cfr_num_pages; /* check that we're not going over the edge */ if ((uintptr_t)next_disp_p >= footprint_edge) { goto over_the_edge; } /* store this dispostion */ - *next_disp_p = (unsigned char) disp; + *next_disp_p = cf_disp; footprint_region->cfr_num_pages++; - if (disp != 0) { + if (cf_disp != 0) { /* non-zero disp: break the current zero streak */ footprint_header->cf_last_zeroes = 0; /* done */ @@ -19685,8 +21629,8 @@ vm_map_corpse_footprint_collect( /* zero disp: add to the current streak of zeroes */ footprint_header->cf_last_zeroes++; if ((footprint_header->cf_last_zeroes + - roundup((footprint_region->cfr_num_pages - - footprint_header->cf_last_zeroes) & + roundup(((footprint_region->cfr_num_pages - + footprint_header->cf_last_zeroes) * sizeof(cf_disp_t)) & (sizeof(int) - 1), sizeof(int))) < (sizeof(*footprint_header))) { @@ -19712,7 +21656,7 @@ vm_map_corpse_footprint_collect( /* initialize the new region as empty ... */ footprint_region->cfr_num_pages = 0; /* ... and skip this "zero" disp */ - footprint_region->cfr_vaddr = va + PAGE_SIZE; + footprint_region->cfr_vaddr = va + effective_page_size; } return KERN_SUCCESS; @@ -19759,7 +21703,7 @@ vm_map_corpse_footprint_collect_done( actual_size = (vm_size_t)(footprint_header->cf_last_region + sizeof(*footprint_region) + - footprint_region->cfr_num_pages); + (footprint_region->cfr_num_pages * sizeof(cf_disp_t))); // printf("FBDP map %p buf_size 0x%llx actual_size 0x%llx\n", new_map, (uint64_t) buf_size, (uint64_t) actual_size); vm_map_corpse_footprint_size_avg = @@ -19807,13 +21751,13 @@ vm_map_corpse_footprint_collect_done( * retrieves the disposition of the page at virtual address "vaddr" * in the forked corpse's VM map * - * This is the equivalent of pmap_query_page_info() for a forked corpse. + * This is the equivalent of vm_map_footprint_query_page_info() for a forked corpse. */ kern_return_t vm_map_corpse_footprint_query_page_info( vm_map_t map, vm_map_offset_t va, - int *disp) + int *disposition_p) { struct vm_map_corpse_footprint_header *footprint_header; struct vm_map_corpse_footprint_region *footprint_region; @@ -19821,17 +21765,19 @@ vm_map_corpse_footprint_query_page_info( vm_map_offset_t region_start, region_end; int disp_idx; kern_return_t kr; + int effective_page_size; + cf_disp_t cf_disp; if (!map->has_corpse_footprint) { - *disp = 0; + *disposition_p = 0; kr = KERN_INVALID_ARGUMENT; goto done; } footprint_header = map->vmmap_corpse_footprint; if (footprint_header == NULL) { - *disp = 0; -// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disp); + *disposition_p = 0; +// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disposition_p); kr = KERN_INVALID_ARGUMENT; goto done; } @@ -19839,6 +21785,8 @@ vm_map_corpse_footprint_query_page_info( /* start looking at the hint ("cf_hint_region") */ footprint_region_offset = footprint_header->cf_hint_region; + effective_page_size = MIN(PAGE_SIZE, VM_MAP_PAGE_SIZE(map)); + lookup_again: if (footprint_region_offset < sizeof(*footprint_header)) { /* hint too low: start from 1st region */ @@ -19853,7 +21801,7 @@ lookup_again: region_start = footprint_region->cfr_vaddr; region_end = (region_start + ((vm_map_offset_t)(footprint_region->cfr_num_pages) * - PAGE_SIZE)); + effective_page_size)); if (va < region_start && footprint_region_offset != sizeof(*footprint_header)) { /* our range starts before the hint region */ @@ -19872,7 +21820,7 @@ lookup_again: /* skip the region's header */ footprint_region_offset += sizeof(*footprint_region); /* skip the region's page dispositions */ - footprint_region_offset += footprint_region->cfr_num_pages; + footprint_region_offset += (footprint_region->cfr_num_pages * sizeof(cf_disp_t)); /* align to next word boundary */ footprint_region_offset = roundup(footprint_region_offset, @@ -19882,12 +21830,12 @@ lookup_again: region_start = footprint_region->cfr_vaddr; region_end = (region_start + ((vm_map_offset_t)(footprint_region->cfr_num_pages) * - PAGE_SIZE)); + effective_page_size)); } if (va < region_start || va >= region_end) { /* page not found */ - *disp = 0; -// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disp); + *disposition_p = 0; +// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disposition_p); kr = KERN_SUCCESS; goto done; } @@ -19896,24 +21844,23 @@ lookup_again: footprint_header->cf_hint_region = footprint_region_offset; /* get page disposition for "va" in this region */ - disp_idx = (int) ((va - footprint_region->cfr_vaddr) / PAGE_SIZE); - *disp = (int) (footprint_region->cfr_disposition[disp_idx]); - + disp_idx = (int) ((va - footprint_region->cfr_vaddr) / effective_page_size); + cf_disp = footprint_region->cfr_disposition[disp_idx]; + *disposition_p = vm_page_cf_disp_to_disposition(cf_disp); kr = KERN_SUCCESS; done: -// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disp); +// if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disposition_p); /* dtrace -n 'vminfo:::footprint_query_page_info { printf("map 0x%p va 0x%llx disp 0x%x kr 0x%x", arg0, arg1, arg2, arg3); }' */ DTRACE_VM4(footprint_query_page_info, vm_map_t, map, vm_map_offset_t, va, - int, *disp, + int, *disposition_p, kern_return_t, kr); return kr; } - -static void +void vm_map_corpse_footprint_destroy( vm_map_t map) { diff --git a/osfmk/vm/vm_map.h b/osfmk/vm/vm_map.h index f56606f8f..e07ad6549 100644 --- a/osfmk/vm/vm_map.h +++ b/osfmk/vm/vm_map.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -86,6 +86,10 @@ #include +#ifdef XNU_KERNEL_PRIVATE +#include +#endif /* XNU_KERNEL_PRIVATE */ + __BEGIN_DECLS extern void vm_map_reference(vm_map_t map); @@ -98,7 +102,8 @@ extern kern_return_t vm_map_exec( boolean_t is64bit, void *fsroot, cpu_type_t cpu, - cpu_subtype_t cpu_subtype); + cpu_subtype_t cpu_subtype, + boolean_t reslide); __END_DECLS @@ -176,7 +181,6 @@ extern queue_head_t vm_named_entry_list; struct vm_named_entry { decl_lck_mtx_data(, Lock); /* Synchronization */ union { - vm_object_t object; /* object I point to */ vm_map_t map; /* map backing submap */ vm_map_copy_t copy; /* a VM map copy */ } backing; @@ -186,7 +190,8 @@ struct vm_named_entry { vm_prot_t protection; /* access permissions */ int ref_count; /* Number of references */ unsigned int /* Is backing.xxx : */ - /* boolean_t */ internal:1, /* ... an internal object */ + /* boolean_t */ is_object:1, /* ... a VM object (wrapped in a VM map copy) */ + /* boolean_t */ internal:1, /* ... an internal object */ /* boolean_t */ is_sub_map:1, /* ... a submap? */ /* boolean_t */ is_copy:1; /* ... a VM map copy */ #if VM_NAMED_ENTRY_LIST @@ -298,7 +303,8 @@ struct vm_map_entry { /* boolean_t */ vme_resilient_media:1, /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */ /* boolean_t */ vme_no_copy_on_read:1, - __unused:3; + /* boolean_t */ translated_allow_execute:1, /* execute in translated processes */ + __unused:2; unsigned short wired_count; /* can be paged if = 0 */ unsigned short user_wired_count; /* for vm_wire */ @@ -311,6 +317,8 @@ struct vm_map_entry { uintptr_t vme_creation_bt[16]; #endif #if MAP_ENTRY_INSERTION_DEBUG + vm_map_offset_t vme_start_original; + vm_map_offset_t vme_end_original; uintptr_t vme_insertion_bt[16]; #endif }; @@ -324,8 +332,8 @@ struct vm_map_entry { #define VME_OBJECT(entry) \ ((vm_object_t)((uintptr_t)0 + *VME_OBJECT_PTR(entry))) #define VME_OFFSET(entry) \ - ((entry)->vme_offset & ~PAGE_MASK) -#define VME_ALIAS_MASK (PAGE_MASK) + ((entry)->vme_offset & (vm_object_offset_t)~FOURK_PAGE_MASK) +#define VME_ALIAS_MASK (FOURK_PAGE_MASK) #define VME_ALIAS(entry) \ ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK)) @@ -351,11 +359,11 @@ VME_SUBMAP_SET( static inline void VME_OFFSET_SET( vm_map_entry_t entry, - vm_map_offset_t offset) + vm_object_offset_t offset) { - int alias; + unsigned int alias; alias = VME_ALIAS(entry); - assert((offset & PAGE_MASK) == 0); + assert((offset & FOURK_PAGE_MASK) == 0); entry->vme_offset = offset | alias; } /* @@ -369,9 +377,9 @@ VME_ALIAS_SET( vm_map_entry_t entry, int alias) { - vm_map_offset_t offset; + vm_object_offset_t offset; offset = VME_OFFSET(entry); - entry->vme_offset = offset | (alias & VME_ALIAS_MASK); + entry->vme_offset = offset | ((unsigned int)alias & VME_ALIAS_MASK); } static inline void @@ -455,7 +463,7 @@ struct _vm_map { struct vm_map_header hdr; /* Map entry header */ #define min_offset hdr.links.start /* start of range */ #define max_offset hdr.links.end /* end of range */ - pmap_t pmap; /* Physical map */ + pmap_t XNU_PTRAUTH_SIGNED_PTR("_vm_map.pmap") pmap; /* Physical map */ vm_map_size_t size; /* virtual size */ vm_map_size_t user_wire_limit;/* rlimit on user locked memory */ vm_map_size_t user_wire_size; /* current size of user locked memory in this map */ @@ -517,7 +525,10 @@ struct _vm_map { /* boolean_t */ jit_entry_exists:1, /* boolean_t */ has_corpse_footprint:1, /* boolean_t */ terminated:1, - /* reserved */ pad:19; + /* boolean_t */ is_alien:1, /* for platform simulation, i.e. PLATFORM_IOS on OSX */ + /* boolean_t */ cs_enforcement:1, /* code-signing enforcement */ + /* boolean_t */ reserved_regions:1, /* has reserved regions. The map size that userspace sees should ignore these. */ + /* reserved */ pad:16; unsigned int timestamp; /* Version number */ }; @@ -595,9 +606,9 @@ struct vm_map_copy { vm_object_offset_t offset; vm_map_size_t size; union { - struct vm_map_header hdr; /* ENTRY_LIST */ - vm_object_t object; /* OBJECT */ - uint8_t kdata[0]; /* KERNEL_BUFFER */ + struct vm_map_header hdr; /* ENTRY_LIST */ + vm_object_t object; /* OBJECT */ + void *XNU_PTRAUTH_SIGNED_PTR("vm_map_copy.kdata") kdata; /* KERNEL_BUFFER */ } c_u; }; @@ -606,7 +617,6 @@ struct vm_map_copy { #define cpy_object c_u.object #define cpy_kdata c_u.kdata -#define cpy_kdata_hdr_sz (offsetof(struct vm_map_copy, c_u.kdata)) #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift) #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy))) @@ -622,6 +632,18 @@ struct vm_map_copy { #define vm_map_copy_last_entry(copy) \ ((copy)->cpy_hdr.links.prev) +extern kern_return_t +vm_map_copy_adjust_to_target( + vm_map_copy_t copy_map, + vm_map_offset_t offset, + vm_map_size_t size, + vm_map_t target_map, + boolean_t copy, + vm_map_copy_t *target_copy_map_p, + vm_map_offset_t *overmap_start_p, + vm_map_offset_t *overmap_end_p, + vm_map_offset_t *trimmed_start_p); + /* * Macros: vm_map_lock, etc. [internal use only] * Description: @@ -675,6 +697,9 @@ boolean_t vm_map_try_lock(vm_map_t map); __attribute__((always_inline)) boolean_t vm_map_try_lock_read(vm_map_t map); +int vm_self_region_page_shift(vm_map_t target_map); +int vm_self_region_page_shift_safely(vm_map_t target_map); + #if MACH_ASSERT || DEBUG #define vm_map_lock_assert_held(map) \ lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD) @@ -712,6 +737,9 @@ extern kern_return_t vm_map_find_space( vm_tag_t tag, vm_map_entry_t *o_entry); /* OUT */ +/* flags for vm_map_find_space */ +#define VM_MAP_FIND_LAST_FREE 0x01 + extern void vm_map_clip_start( vm_map_t map, vm_map_entry_t entry, @@ -751,7 +779,8 @@ extern kern_return_t vm_map_lookup_locked( vm_prot_t *out_prot, /* OUT */ boolean_t *wired, /* OUT */ vm_object_fault_info_t fault_info, /* OUT */ - vm_map_t *real_map); /* OUT */ + vm_map_t *real_map, /* OUT */ + bool *contended); /* OUT */ /* Verifies that the map has not changed since the given version. */ extern boolean_t vm_map_verify( @@ -772,7 +801,7 @@ extern vm_map_entry_t vm_map_entry_insert( vm_prot_t max_protection, vm_behavior_t behavior, vm_inherit_t inheritance, - unsigned wired_count, + unsigned short wired_count, boolean_t no_cache, boolean_t permanent, boolean_t no_copy_on_read, @@ -780,7 +809,8 @@ extern vm_map_entry_t vm_map_entry_insert( boolean_t clear_map_aligned, boolean_t is_submap, boolean_t used_for_jit, - int alias); + int alias, + boolean_t translated_allow_execute); /* @@ -1150,36 +1180,6 @@ extern void vm_map_region_walk( mach_msg_type_number_t count); -struct vm_map_corpse_footprint_header { - vm_size_t cf_size; /* allocated buffer size */ - uint32_t cf_last_region; /* offset of last region in buffer */ - union { - uint32_t cfu_last_zeroes; /* during creation: - * number of "zero" dispositions at - * end of last region */ - uint32_t cfu_hint_region; /* during lookup: - * offset of last looked up region */ -#define cf_last_zeroes cfu.cfu_last_zeroes -#define cf_hint_region cfu.cfu_hint_region - } cfu; -}; -struct vm_map_corpse_footprint_region { - vm_map_offset_t cfr_vaddr; /* region start virtual address */ - uint32_t cfr_num_pages; /* number of pages in this "region" */ - unsigned char cfr_disposition[0]; /* disposition of each page */ -} __attribute__((packed)); - -extern kern_return_t vm_map_corpse_footprint_collect( - vm_map_t old_map, - vm_map_entry_t old_entry, - vm_map_t new_map); -extern void vm_map_corpse_footprint_collect_done( - vm_map_t new_map); - -extern kern_return_t vm_map_corpse_footprint_query_page_info( - vm_map_t map, - vm_map_offset_t va, - int *disp); extern void vm_map_copy_footprint_ledgers( task_t old_task, @@ -1189,6 +1189,25 @@ extern void vm_map_copy_ledger( task_t new_task, int ledger_entry); +/** + * Represents a single region of virtual address space that should be reserved + * (pre-mapped) in a user address space. + */ +struct vm_reserved_region { + char *vmrr_name; + vm_map_offset_t vmrr_addr; + vm_map_size_t vmrr_size; +}; + +/** + * Return back a machine-dependent array of address space regions that should be + * reserved by the VM. This function is defined in the machine-dependent + * machine_routines.c files. + */ +extern size_t ml_get_vm_reserved_regions( + bool vm_is64bit, + struct vm_reserved_region **regions); + #endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS @@ -1209,6 +1228,8 @@ extern vm_map_t vm_map_create_options( #define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \ VM_MAP_CREATE_CORPSE_FOOTPRINT) +extern vm_map_size_t vm_map_adjusted_size(vm_map_t map); + extern void vm_map_disable_hole_optimization(vm_map_t map); /* Get rid of a map */ @@ -1220,6 +1241,14 @@ extern void vm_map_destroy( extern void vm_map_deallocate( vm_map_t map); +/* Lose a reference */ +extern void vm_map_inspect_deallocate( + vm_map_inspect_t map); + +/* Lose a reference */ +extern void vm_map_read_deallocate( + vm_map_read_t map); + extern vm_map_t vm_map_switch( vm_map_t map); @@ -1238,6 +1267,12 @@ extern boolean_t vm_map_check_protection( vm_map_offset_t end, vm_prot_t protection); +extern boolean_t vm_map_cs_enforcement( + vm_map_t map); +extern void vm_map_cs_enforcement_set( + vm_map_t map, + boolean_t val); + /* wire down a region */ #ifdef XNU_KERNEL_PRIVATE @@ -1443,9 +1478,13 @@ extern kern_return_t vm_map_copy_extract( vm_map_t src_map, vm_map_address_t src_addr, vm_map_size_t len, + vm_prot_t required_prot, + boolean_t copy, vm_map_copy_t *copy_result, /* OUT */ vm_prot_t *cur_prot, /* OUT */ - vm_prot_t *max_prot); + vm_prot_t *max_prot, /* OUT */ + vm_inherit_t inheritance, + vm_map_kernel_flags_t vmk_flags); extern void vm_map_disable_NX( @@ -1568,6 +1607,11 @@ mach_vm_range_overflows(mach_vm_offset_t addr, mach_vm_size_t size) } #ifdef XNU_KERNEL_PRIVATE + +#if XNU_TARGET_OS_OSX +extern void vm_map_mark_alien(vm_map_t map); +#endif /* XNU_TARGET_OS_OSX */ + extern kern_return_t vm_map_page_info( vm_map_t map, vm_map_offset_t offset, @@ -1578,6 +1622,7 @@ extern kern_return_t vm_map_page_range_info_internal( vm_map_t map, vm_map_offset_t start_offset, vm_map_offset_t end_offset, + int effective_page_shift, vm_page_info_flavor_t flavor, vm_page_info_t info, mach_msg_type_number_t *count); @@ -1619,6 +1664,118 @@ extern kern_return_t vm_map_page_range_info_internal( #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1) #define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0) +static inline bool +VM_MAP_IS_EXOTIC( + vm_map_t map __unused) +{ +#if __arm64__ + if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT || + pmap_is_exotic(map->pmap)) { + return true; + } +#endif /* __arm64__ */ + return false; +} + +static inline bool +VM_MAP_IS_ALIEN( + vm_map_t map __unused) +{ + /* + * An "alien" process/task/map/pmap should mostly behave + * as it currently would on iOS. + */ +#if XNU_TARGET_OS_OSX + if (map->is_alien) { + return true; + } + return false; +#else /* XNU_TARGET_OS_OSX */ + return true; +#endif /* XNU_TARGET_OS_OSX */ +} + +static inline bool +VM_MAP_POLICY_WX_FAIL( + vm_map_t map __unused) +{ + if (VM_MAP_IS_ALIEN(map)) { + return false; + } + return true; +} + +static inline bool +VM_MAP_POLICY_WX_STRIP_X( + vm_map_t map __unused) +{ + if (VM_MAP_IS_ALIEN(map)) { + return true; + } + return false; +} + +static inline bool +VM_MAP_POLICY_ALLOW_MULTIPLE_JIT( + vm_map_t map __unused) +{ + if (VM_MAP_IS_ALIEN(map)) { + return false; + } + return true; +} + +static inline bool +VM_MAP_POLICY_ALLOW_JIT_RANDOM_ADDRESS( + vm_map_t map) +{ + return VM_MAP_IS_ALIEN(map); +} + +static inline bool +VM_MAP_POLICY_ALLOW_JIT_INHERIT( + vm_map_t map __unused) +{ + if (VM_MAP_IS_ALIEN(map)) { + return false; + } + return true; +} + +static inline bool +VM_MAP_POLICY_ALLOW_JIT_SHARING( + vm_map_t map __unused) +{ + if (VM_MAP_IS_ALIEN(map)) { + return false; + } + return true; +} + +static inline bool +VM_MAP_POLICY_ALLOW_JIT_COPY( + vm_map_t map __unused) +{ + if (VM_MAP_IS_ALIEN(map)) { + return false; + } + return true; +} + +static inline bool +VM_MAP_POLICY_WRITABLE_SHARED_REGION( + vm_map_t map __unused) +{ +#if __x86_64__ + return true; +#else /* __x86_64__ */ + if (VM_MAP_IS_EXOTIC(map)) { + return true; + } + return false; +#endif /* __x86_64__ */ +} + static inline void vm_prot_to_wimg(unsigned int prot, unsigned int *wimg) { @@ -1633,8 +1790,7 @@ vm_prot_to_wimg(unsigned int prot, unsigned int *wimg) case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break; case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break; case MAP_MEM_RT: *wimg = VM_WIMG_RT; break; - default: - panic("Unrecognized mapping type %u\n", prot); + default: break; } } @@ -1642,6 +1798,8 @@ vm_prot_to_wimg(unsigned int prot, unsigned int *wimg) #ifdef XNU_KERNEL_PRIVATE extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift); +extern bool vm_map_is_exotic(vm_map_t map); +extern bool vm_map_is_alien(vm_map_t map); #endif /* XNU_KERNEL_PRIVATE */ #define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) @@ -1701,6 +1859,9 @@ extern kern_return_t vm_map_partial_reap( extern int vm_map_disconnect_page_mappings( vm_map_t map, boolean_t); + +extern kern_return_t vm_map_inject_error(vm_map_t map, vm_map_offset_t vaddr); + #endif @@ -1717,7 +1878,6 @@ extern kern_return_t vm_map_freeze( int *freezer_error_code, boolean_t eval_only); - #define FREEZER_ERROR_GENERIC (-1) #define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2) #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3) @@ -1734,7 +1894,7 @@ __END_DECLS * a fake pointer based on the map's ledger and the index of the ledger being * reported. */ -#define INFO_MAKE_FAKE_OBJECT_ID(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id))) +#define VM_OBJECT_ID_FAKE(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id))) #endif /* KERNEL_PRIVATE */ diff --git a/osfmk/vm/vm_map_store.c b/osfmk/vm/vm_map_store.c index df03e1ca0..990f1d452 100644 --- a/osfmk/vm/vm_map_store.c +++ b/osfmk/vm/vm_map_store.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009 Apple Inc. All rights reserved. + * Copyright (c) 2009-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -59,6 +59,7 @@ vm_map_store_init( struct vm_map_header *hdr ) #endif } +__attribute__((noinline)) boolean_t vm_map_store_lookup_entry( vm_map_t map, @@ -96,6 +97,21 @@ vm_map_store_update( vm_map_t map, vm_map_entry_t entry, int update_type ) } } +/* + * vm_map_store_find_last_free: + * + * Finds and returns in O_ENTRY the entry *after* the last hole (if one exists) in MAP. + * Returns NULL if map is full and no hole can be found. + */ +void +vm_map_store_find_last_free( + vm_map_t map, + vm_map_entry_t *o_entry) /* OUT */ +{ + /* TODO: Provide a RB implementation for this routine. */ + vm_map_store_find_last_free_ll(map, o_entry); +} + /* * vm_map_entry_{un,}link: * @@ -124,6 +140,10 @@ _vm_map_store_entry_link( struct vm_map_header * mapHdr, vm_map_entry_t after_wh } #endif #if MAP_ENTRY_INSERTION_DEBUG + if (entry->vme_start_original == 0 && entry->vme_end_original == 0) { + entry->vme_start_original = entry->vme_start; + entry->vme_end_original = entry->vme_end; + } backtrace(&entry->vme_insertion_bt[0], (sizeof(entry->vme_insertion_bt) / sizeof(uintptr_t)), NULL); #endif @@ -141,6 +161,13 @@ vm_map_store_entry_link( VMEL_map = (map); VMEL_entry = (entry); + if (entry->is_sub_map) { + assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map), + "map %p (%d) entry %p submap %p (%d)\n", + map, VM_MAP_PAGE_SHIFT(map), entry, + VME_SUBMAP(entry), VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry))); + } + _vm_map_store_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); if (VMEL_map->disable_vmentry_reuse == TRUE) { UPDATE_HIGHEST_ENTRY_END( VMEL_map, VMEL_entry); diff --git a/osfmk/vm/vm_map_store.h b/osfmk/vm/vm_map_store.h index d4712a7c3..cd1170db4 100644 --- a/osfmk/vm/vm_map_store.h +++ b/osfmk/vm/vm_map_store.h @@ -131,6 +131,7 @@ RB_HEAD( rb_head, vm_map_store ); void vm_map_store_init( struct vm_map_header* ); boolean_t vm_map_store_lookup_entry( struct _vm_map*, vm_map_offset_t, struct vm_map_entry**); void vm_map_store_update( struct _vm_map*, struct vm_map_entry*, int); +void vm_map_store_find_last_free( struct _vm_map*, struct vm_map_entry**); void _vm_map_store_entry_link( struct vm_map_header *, struct vm_map_entry*, struct vm_map_entry*); void vm_map_store_entry_link( struct _vm_map*, struct vm_map_entry*, struct vm_map_entry*, vm_map_kernel_flags_t); void _vm_map_store_entry_unlink( struct vm_map_header *, struct vm_map_entry*); diff --git a/osfmk/vm/vm_map_store_ll.c b/osfmk/vm/vm_map_store_ll.c index ad6ad2bdd..13dd8ac4c 100644 --- a/osfmk/vm/vm_map_store_ll.c +++ b/osfmk/vm/vm_map_store_ll.c @@ -227,6 +227,39 @@ vm_map_store_lookup_entry_ll( return FALSE; } +/* + * vm_map_store_find_last_free_ll: + * + * Finds and returns in O_ENTRY the entry *after* the last hole (if one exists) in MAP. + * Returns NULL if map is full and no hole can be found. + */ +void +vm_map_store_find_last_free_ll( + vm_map_t map, + vm_map_entry_t *o_entry) /* OUT */ +{ + vm_map_entry_t entry, prev_entry; + vm_offset_t end; + + entry = vm_map_to_entry(map); + end = map->max_offset; + + /* Skip over contiguous entries from end of map until wraps around */ + while ((prev_entry = entry->vme_prev) != vm_map_to_entry(map) + && (prev_entry->vme_end == end || prev_entry->vme_end > map->max_offset)) { + entry = prev_entry; + end = entry->vme_start; + } + + if (entry != vm_map_to_entry(map) && entry->vme_start == map->min_offset) { + /* Map is completely full, no holes exist */ + *o_entry = NULL; + return; + } + + *o_entry = entry; +} + void vm_map_store_entry_link_ll( struct vm_map_header *mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry) { diff --git a/osfmk/vm/vm_map_store_ll.h b/osfmk/vm/vm_map_store_ll.h index e122db663..d1f444292 100644 --- a/osfmk/vm/vm_map_store_ll.h +++ b/osfmk/vm/vm_map_store_ll.h @@ -35,6 +35,7 @@ boolean_t first_free_is_valid_ll( struct _vm_map*); void vm_map_store_init_ll( struct vm_map_header* ); boolean_t vm_map_store_lookup_entry_ll( struct _vm_map*, vm_map_offset_t, struct vm_map_entry**); +void vm_map_store_find_last_free_ll( struct _vm_map*, struct vm_map_entry**); void vm_map_store_entry_link_ll( struct vm_map_header*, struct vm_map_entry*, struct vm_map_entry*); void vm_map_store_entry_unlink_ll( struct vm_map_header*, struct vm_map_entry*); void update_first_free_ll(struct _vm_map*, struct vm_map_entry*); diff --git a/osfmk/vm/vm_object.c b/osfmk/vm/vm_object.c index 5b6250afc..b7a718aea 100644 --- a/osfmk/vm/vm_object.c +++ b/osfmk/vm/vm_object.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -113,6 +113,12 @@ boolean_t vm_object_collapse_compressor_allowed = TRUE; struct vm_counters vm_counters; +#if DEVELOPMENT || DEBUG +extern struct memory_object_pager_ops shared_region_pager_ops; +extern unsigned int shared_region_pagers_resident_count; +extern unsigned int shared_region_pagers_resident_peak; +#endif /* DEVELOPMENT || DEBUG */ + #if VM_OBJECT_TRACKING boolean_t vm_object_tracking_inited = FALSE; btlog_t *vm_object_tracking_btlog; @@ -231,16 +237,16 @@ static void vm_object_do_bypass( static void vm_object_release_pager( memory_object_t pager); -zone_t vm_object_zone; /* vm backing store zone */ +SECURITY_READ_ONLY_LATE(zone_t) vm_object_zone; /* vm backing store zone */ /* * All wired-down kernel memory belongs to a single virtual * memory object (kernel_object) to avoid wasting data structures. */ -static struct vm_object kernel_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +static struct vm_object kernel_object_store VM_PAGE_PACKED_ALIGNED; vm_object_t kernel_object; -static struct vm_object compressor_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +static struct vm_object compressor_object_store VM_PAGE_PACKED_ALIGNED; vm_object_t compressor_object = &compressor_object_store; /* @@ -249,7 +255,7 @@ vm_object_t compressor_object = &compressor_object_s * is exported by the vm_map module. The storage is declared * here because it must be initialized here. */ -static struct vm_object vm_submap_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +static struct vm_object vm_submap_object_store VM_PAGE_PACKED_ALIGNED; /* * Virtual memory objects are initialized from @@ -259,7 +265,138 @@ static struct vm_object vm_submap_object_store __attribute__((al * object structure, be sure to add initialization * (see _vm_object_allocate()). */ -static struct vm_object vm_object_template; +static const struct vm_object vm_object_template = { + .memq.prev = 0, + .memq.next = 0, + /* + * The lock will be initialized for each allocated object in + * _vm_object_allocate(), so we don't need to initialize it in + * the vm_object_template. + */ +#if DEVELOPMENT || DEBUG + .Lock_owner = 0, +#endif + .vo_size = 0, + .memq_hint = VM_PAGE_NULL, + .ref_count = 1, +#if TASK_SWAPPER + .res_count = 1, +#endif /* TASK_SWAPPER */ + .resident_page_count = 0, + .wired_page_count = 0, + .reusable_page_count = 0, + .copy = VM_OBJECT_NULL, + .shadow = VM_OBJECT_NULL, + .vo_shadow_offset = (vm_object_offset_t) 0, + .pager = MEMORY_OBJECT_NULL, + .paging_offset = 0, + .pager_control = MEMORY_OBJECT_CONTROL_NULL, + .copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC, + .paging_in_progress = 0, +#if __LP64__ + .__object1_unused_bits = 0, +#endif /* __LP64__ */ + .activity_in_progress = 0, + + /* Begin bitfields */ + .all_wanted = 0, /* all bits FALSE */ + .pager_created = FALSE, + .pager_initialized = FALSE, + .pager_ready = FALSE, + .pager_trusted = FALSE, + .can_persist = FALSE, + .internal = TRUE, + .private = FALSE, + .pageout = FALSE, + .alive = TRUE, + .purgable = VM_PURGABLE_DENY, + .purgeable_when_ripe = FALSE, + .purgeable_only_by_kernel = FALSE, + .shadowed = FALSE, + .true_share = FALSE, + .terminating = FALSE, + .named = FALSE, + .shadow_severed = FALSE, + .phys_contiguous = FALSE, + .nophyscache = FALSE, + /* End bitfields */ + + .cached_list.prev = NULL, + .cached_list.next = NULL, + + .last_alloc = (vm_object_offset_t) 0, + .sequential = (vm_object_offset_t) 0, + .pages_created = 0, + .pages_used = 0, + .scan_collisions = 0, +#if CONFIG_PHANTOM_CACHE + .phantom_object_id = 0, +#endif + .cow_hint = ~(vm_offset_t)0, + + /* cache bitfields */ + .wimg_bits = VM_WIMG_USE_DEFAULT, + .set_cache_attr = FALSE, + .object_is_shared_cache = FALSE, + .code_signed = FALSE, + .transposed = FALSE, + .mapping_in_progress = FALSE, + .phantom_isssd = FALSE, + .volatile_empty = FALSE, + .volatile_fault = FALSE, + .all_reusable = FALSE, + .blocked_access = FALSE, + .vo_ledger_tag = VM_LEDGER_TAG_NONE, + .vo_no_footprint = FALSE, +#if CONFIG_IOSCHED || UPL_DEBUG + .uplq.prev = NULL, + .uplq.next = NULL, +#endif /* UPL_DEBUG */ +#ifdef VM_PIP_DEBUG + .pip_holders = {0}, +#endif /* VM_PIP_DEBUG */ + + .objq.next = NULL, + .objq.prev = NULL, + .task_objq.next = NULL, + .task_objq.prev = NULL, + + .purgeable_queue_type = PURGEABLE_Q_TYPE_MAX, + .purgeable_queue_group = 0, + + .wire_tag = VM_KERN_MEMORY_NONE, +#if !VM_TAG_ACTIVE_UPDATE + .wired_objq.next = NULL, + .wired_objq.prev = NULL, +#endif /* ! VM_TAG_ACTIVE_UPDATE */ + + .io_tracking = FALSE, + +#if CONFIG_SECLUDED_MEMORY + .eligible_for_secluded = FALSE, + .can_grab_secluded = FALSE, +#else /* CONFIG_SECLUDED_MEMORY */ + .__object3_unused_bits = 0, +#endif /* CONFIG_SECLUDED_MEMORY */ + +#if VM_OBJECT_ACCESS_TRACKING + .access_tracking = FALSE, + .access_tracking_reads = 0, + .access_tracking_writes = 0, +#endif /* VM_OBJECT_ACCESS_TRACKING */ + +#if DEBUG + .purgeable_owner_bt = {0}, + .vo_purgeable_volatilizer = NULL, + .purgeable_volatilizer_bt = {0}, +#endif /* DEBUG */ +}; + +LCK_GRP_DECLARE(vm_object_lck_grp, "vm_object"); +LCK_GRP_DECLARE(vm_object_cache_lck_grp, "vm_object_cache"); +LCK_ATTR_DECLARE(vm_object_lck_attr, 0, 0); +LCK_ATTR_DECLARE(kernel_object_lck_attr, 0, LCK_ATTR_DEBUG); +LCK_ATTR_DECLARE(compressor_object_lck_attr, 0, LCK_ATTR_DEBUG); unsigned int vm_page_purged_wired = 0; unsigned int vm_page_purged_busy = 0; @@ -271,8 +408,8 @@ static uint32_t vm_object_cache_pages_moved = 0; static uint32_t vm_object_cache_pages_skipped = 0; static uint32_t vm_object_cache_adds = 0; static uint32_t vm_object_cached_count = 0; -static lck_mtx_t vm_object_cached_lock_data; -static lck_mtx_ext_t vm_object_cached_lock_data_ext; +static LCK_MTX_EARLY_DECLARE_ATTR(vm_object_cached_lock_data, + &vm_object_cache_lck_grp, &vm_object_lck_attr); static uint32_t vm_object_page_grab_failed = 0; static uint32_t vm_object_page_grab_skipped = 0; @@ -292,8 +429,8 @@ static void vm_object_reap(vm_object_t object); static void vm_object_reap_async(vm_object_t object); static void vm_object_reaper_thread(void); -static lck_mtx_t vm_object_reaper_lock_data; -static lck_mtx_ext_t vm_object_reaper_lock_data_ext; +static LCK_MTX_EARLY_DECLARE_ATTR(vm_object_reaper_lock_data, + &vm_object_lck_grp, &vm_object_lck_attr); static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */ unsigned int vm_object_reap_count = 0; @@ -308,8 +445,10 @@ unsigned int vm_object_reap_count_async = 0; #if CONFIG_IOSCHED /* I/O Re-prioritization request list */ -queue_head_t io_reprioritize_list; -lck_spin_t io_reprioritize_list_lock; +queue_head_t io_reprioritize_list = QUEUE_HEAD_INITIALIZER(io_reprioritize_list); + +LCK_SPIN_DECLARE_ATTR(io_reprioritize_list_lock, + &vm_object_lck_grp, &vm_object_lck_attr); #define IO_REPRIORITIZE_LIST_LOCK() \ lck_spin_lock_grp(&io_reprioritize_list_lock, &vm_object_lck_grp) @@ -317,7 +456,8 @@ lck_spin_t io_reprioritize_list_lock; lck_spin_unlock(&io_reprioritize_list_lock) #define MAX_IO_REPRIORITIZE_REQS 8192 -zone_t io_reprioritize_req_zone; +ZONE_DECLARE(io_reprioritize_req_zone, "io_reprioritize_req", + sizeof(struct io_reprioritize_req), ZC_NOGC); /* I/O Re-prioritization thread */ int io_reprioritize_wakeup = 0; @@ -358,7 +498,7 @@ _vm_object_allocate( queue_init(&object->uplq); #endif vm_object_lock_init(object); - object->vo_size = size; + object->vo_size = vm_object_round_page(size); #if VM_OBJECT_TRACKING_OP_CREATED if (vm_object_tracking_inited) { @@ -392,198 +532,38 @@ vm_object_allocate( return object; } - -lck_grp_t vm_object_lck_grp; -lck_grp_t vm_object_cache_lck_grp; -lck_grp_attr_t vm_object_lck_grp_attr; -lck_attr_t vm_object_lck_attr; -lck_attr_t kernel_object_lck_attr; -lck_attr_t compressor_object_lck_attr; - -extern void vm_named_entry_init(void); - -int workaround_41447923 = 0; +TUNABLE(bool, workaround_41447923, "workaround_41447923", false); /* * vm_object_bootstrap: * * Initialize the VM objects module. */ -__private_extern__ void +__startup_func +void vm_object_bootstrap(void) { vm_size_t vm_object_size; assert(sizeof(mo_ipc_object_bits_t) == sizeof(ipc_object_bits_t)); - vm_object_size = (sizeof(struct vm_object) + (VM_PACKED_POINTER_ALIGNMENT - 1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1); - - vm_object_zone = zinit(vm_object_size, - round_page(512 * 1024), - round_page(12 * 1024), - "vm objects"); - zone_change(vm_object_zone, Z_CALLERACCT, FALSE); /* don't charge caller */ - zone_change(vm_object_zone, Z_NOENCRYPT, TRUE); - zone_change(vm_object_zone, Z_ALIGNMENT_REQUIRED, TRUE); + vm_object_size = (sizeof(struct vm_object) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) & + ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1); - vm_object_init_lck_grp(); + vm_object_zone = zone_create_ext("vm objects", vm_object_size, + ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED, + ZONE_ID_ANY, ^(zone_t z){ +#if defined(__LP64__) + zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED_MAP); +#else + (void)z; +#endif + }); queue_init(&vm_object_cached_list); - lck_mtx_init_ext(&vm_object_cached_lock_data, - &vm_object_cached_lock_data_ext, - &vm_object_cache_lck_grp, - &vm_object_lck_attr); - queue_init(&vm_object_reaper_queue); - lck_mtx_init_ext(&vm_object_reaper_lock_data, - &vm_object_reaper_lock_data_ext, - &vm_object_lck_grp, - &vm_object_lck_attr); - - - /* - * Fill in a template object, for quick initialization - */ - - /* memq; Lock; init after allocation */ - - vm_object_template.memq.prev = 0; - vm_object_template.memq.next = 0; -#if 0 - /* - * We can't call vm_object_lock_init() here because that will - * allocate some memory and VM is not fully initialized yet. - * The lock will be initialized for each allocated object in - * _vm_object_allocate(), so we don't need to initialize it in - * the vm_object_template. - */ - vm_object_lock_init(&vm_object_template); -#endif -#if DEVELOPMENT || DEBUG - vm_object_template.Lock_owner = 0; -#endif - vm_object_template.vo_size = 0; - vm_object_template.memq_hint = VM_PAGE_NULL; - vm_object_template.ref_count = 1; -#if TASK_SWAPPER - vm_object_template.res_count = 1; -#endif /* TASK_SWAPPER */ - vm_object_template.resident_page_count = 0; - vm_object_template.wired_page_count = 0; - vm_object_template.reusable_page_count = 0; - vm_object_template.copy = VM_OBJECT_NULL; - vm_object_template.shadow = VM_OBJECT_NULL; - vm_object_template.vo_shadow_offset = (vm_object_offset_t) 0; - vm_object_template.pager = MEMORY_OBJECT_NULL; - vm_object_template.paging_offset = 0; - vm_object_template.pager_control = MEMORY_OBJECT_CONTROL_NULL; - vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC; - vm_object_template.paging_in_progress = 0; -#if __LP64__ - vm_object_template.__object1_unused_bits = 0; -#endif /* __LP64__ */ - vm_object_template.activity_in_progress = 0; - - /* Begin bitfields */ - vm_object_template.all_wanted = 0; /* all bits FALSE */ - vm_object_template.pager_created = FALSE; - vm_object_template.pager_initialized = FALSE; - vm_object_template.pager_ready = FALSE; - vm_object_template.pager_trusted = FALSE; - vm_object_template.can_persist = FALSE; - vm_object_template.internal = TRUE; - vm_object_template.private = FALSE; - vm_object_template.pageout = FALSE; - vm_object_template.alive = TRUE; - vm_object_template.purgable = VM_PURGABLE_DENY; - vm_object_template.purgeable_when_ripe = FALSE; - vm_object_template.purgeable_only_by_kernel = FALSE; - vm_object_template.shadowed = FALSE; - vm_object_template.true_share = FALSE; - vm_object_template.terminating = FALSE; - vm_object_template.named = FALSE; - vm_object_template.shadow_severed = FALSE; - vm_object_template.phys_contiguous = FALSE; - vm_object_template.nophyscache = FALSE; - /* End bitfields */ - - vm_object_template.cached_list.prev = NULL; - vm_object_template.cached_list.next = NULL; - - vm_object_template.last_alloc = (vm_object_offset_t) 0; - vm_object_template.sequential = (vm_object_offset_t) 0; - vm_object_template.pages_created = 0; - vm_object_template.pages_used = 0; - vm_object_template.scan_collisions = 0; -#if CONFIG_PHANTOM_CACHE - vm_object_template.phantom_object_id = 0; -#endif - vm_object_template.cow_hint = ~(vm_offset_t)0; - - /* cache bitfields */ - vm_object_template.wimg_bits = VM_WIMG_USE_DEFAULT; - vm_object_template.set_cache_attr = FALSE; - vm_object_template.object_is_shared_cache = FALSE; - vm_object_template.code_signed = FALSE; - vm_object_template.transposed = FALSE; - vm_object_template.mapping_in_progress = FALSE; - vm_object_template.phantom_isssd = FALSE; - vm_object_template.volatile_empty = FALSE; - vm_object_template.volatile_fault = FALSE; - vm_object_template.all_reusable = FALSE; - vm_object_template.blocked_access = FALSE; - vm_object_template.vo_ledger_tag = VM_LEDGER_TAG_NONE; - vm_object_template.vo_no_footprint = FALSE; -#if CONFIG_IOSCHED || UPL_DEBUG - vm_object_template.uplq.prev = NULL; - vm_object_template.uplq.next = NULL; -#endif /* UPL_DEBUG */ -#ifdef VM_PIP_DEBUG - bzero(&vm_object_template.pip_holders, - sizeof(vm_object_template.pip_holders)); -#endif /* VM_PIP_DEBUG */ - - vm_object_template.objq.next = NULL; - vm_object_template.objq.prev = NULL; - vm_object_template.task_objq.next = NULL; - vm_object_template.task_objq.prev = NULL; - - vm_object_template.purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; - vm_object_template.purgeable_queue_group = 0; - - vm_object_template.vo_cache_ts = 0; - - vm_object_template.wire_tag = VM_KERN_MEMORY_NONE; -#if !VM_TAG_ACTIVE_UPDATE - vm_object_template.wired_objq.next = NULL; - vm_object_template.wired_objq.prev = NULL; -#endif /* ! VM_TAG_ACTIVE_UPDATE */ - - vm_object_template.io_tracking = FALSE; - -#if CONFIG_SECLUDED_MEMORY - vm_object_template.eligible_for_secluded = FALSE; - vm_object_template.can_grab_secluded = FALSE; -#else /* CONFIG_SECLUDED_MEMORY */ - vm_object_template.__object3_unused_bits = 0; -#endif /* CONFIG_SECLUDED_MEMORY */ - -#if VM_OBJECT_ACCESS_TRACKING - vm_object_template.access_tracking = FALSE; - vm_object_template.access_tracking_reads = 0; - vm_object_template.access_tracking_writes = 0; -#endif /* VM_OBJECT_ACCESS_TRACKING */ - -#if DEBUG - bzero(&vm_object_template.purgeable_owner_bt[0], - sizeof(vm_object_template.purgeable_owner_bt)); - vm_object_template.vo_purgeable_volatilizer = NULL; - bzero(&vm_object_template.purgeable_volatilizer_bt[0], - sizeof(vm_object_template.purgeable_volatilizer_bt)); -#endif /* DEBUG */ - /* * Initialize the "kernel object" */ @@ -620,11 +600,6 @@ vm_object_bootstrap(void) * non-zone memory. */ vm_object_reference(vm_submap_object); - - vm_named_entry_init(); - - PE_parse_boot_argn("workaround_41447923", &workaround_41447923, - sizeof(workaround_41447923)); } #if CONFIG_IOSCHED @@ -634,15 +609,6 @@ vm_io_reprioritize_init(void) kern_return_t result; thread_t thread = THREAD_NULL; - /* Initialze the I/O reprioritization subsystem */ - lck_spin_init(&io_reprioritize_list_lock, &vm_object_lck_grp, &vm_object_lck_attr); - queue_init(&io_reprioritize_list); - - io_reprioritize_req_zone = zinit(sizeof(struct io_reprioritize_req), - MAX_IO_REPRIORITIZE_REQS * sizeof(struct io_reprioritize_req), - 4096, "io_reprioritize_req"); - zone_change(io_reprioritize_req_zone, Z_COLLECT, FALSE); - result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread); if (result == KERN_SUCCESS) { thread_set_thread_name(thread, "VM_io_reprioritize_thread"); @@ -671,31 +637,6 @@ vm_object_reaper_init(void) thread_deallocate(thread); } -__private_extern__ void -vm_object_init(void) -{ - /* - * Finish initializing the kernel object. - */ -} - - -__private_extern__ void -vm_object_init_lck_grp(void) -{ - /* - * initialze the vm_object lock world - */ - lck_grp_attr_setdefault(&vm_object_lck_grp_attr); - lck_grp_init(&vm_object_lck_grp, "vm_object", &vm_object_lck_grp_attr); - lck_grp_init(&vm_object_cache_lck_grp, "vm_object_cache", &vm_object_lck_grp_attr); - lck_attr_setdefault(&vm_object_lck_attr); - lck_attr_setdefault(&kernel_object_lck_attr); - lck_attr_cleardebug(&kernel_object_lck_attr); - lck_attr_setdefault(&compressor_object_lck_attr); - lck_attr_cleardebug(&compressor_object_lck_attr); -} - /* * vm_object_deallocate: @@ -1490,6 +1431,14 @@ vm_object_reap( assert(object->vo_owner == NULL); } +#if DEVELOPMENT || DEBUG + if (object->object_is_shared_cache && + object->pager != NULL && + object->pager->mo_pager_ops == &shared_region_pager_ops) { + OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count); + } +#endif /* DEVELOPMENT || DEBUG */ + pager = object->pager; object->pager = MEMORY_OBJECT_NULL; @@ -2014,6 +1963,14 @@ vm_object_destroy( object->named = FALSE; object->alive = FALSE; +#if DEVELOPMENT || DEBUG + if (object->object_is_shared_cache && + object->pager != NULL && + object->pager->mo_pager_ops == &shared_region_pager_ops) { + OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count); + } +#endif /* DEVELOPMENT || DEBUG */ + old_pager = object->pager; object->pager = MEMORY_OBJECT_NULL; if (old_pager != MEMORY_OBJECT_NULL) { @@ -2156,6 +2113,13 @@ int madvise_free_debug = 1; int madvise_free_debug = 0; #endif /* DEBUG */ +__options_decl(deactivate_flags_t, uint32_t, { + DEACTIVATE_KILL = 0x1, + DEACTIVATE_REUSABLE = 0x2, + DEACTIVATE_ALL_REUSABLE = 0x4, + DEACTIVATE_CLEAR_REFMOD = 0x8 +}); + /* * Deactivate the pages in the specified object and range. If kill_page is set, also discard any * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify @@ -2167,9 +2131,7 @@ deactivate_pages_in_object( vm_object_t object, vm_object_offset_t offset, vm_object_size_t size, - boolean_t kill_page, - boolean_t reusable_page, - boolean_t all_reusable, + deactivate_flags_t flags, chunk_state_t *chunk_state, pmap_flush_context *pfc, struct pmap *pmap, @@ -2177,8 +2139,9 @@ deactivate_pages_in_object( { vm_page_t m; int p; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; + struct vm_page_delayed_work dw_array; + struct vm_page_delayed_work *dwp, *dwp_start; + bool dwp_finish_ctx = TRUE; int dw_count; int dw_limit; unsigned int reusable = 0; @@ -2190,9 +2153,17 @@ deactivate_pages_in_object( * all the pages in the chunk. */ - dwp = &dw_array[0]; + dwp_start = dwp = NULL; dw_count = 0; dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); + dwp_start = vm_page_delayed_work_get_ctx(); + if (dwp_start == NULL) { + dwp_start = &dw_array; + dw_limit = 1; + dwp_finish_ctx = FALSE; + } + + dwp = dwp_start; for (p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) { /* @@ -2220,16 +2191,15 @@ deactivate_pages_in_object( if ((!VM_PAGE_WIRED(m)) && (!m->vmp_private) && (!m->vmp_gobbled) && (!m->vmp_busy) && (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) { - int clear_refmod; + int clear_refmod_mask; int pmap_options; - dwp->dw_mask = 0; pmap_options = 0; - clear_refmod = VM_MEM_REFERENCED; + clear_refmod_mask = VM_MEM_REFERENCED; dwp->dw_mask |= DW_clear_reference; - if ((kill_page) && (object->internal)) { + if ((flags & DEACTIVATE_KILL) && (object->internal)) { if (madvise_free_debug) { /* * zero-fill the page now @@ -2241,7 +2211,7 @@ deactivate_pages_in_object( m->vmp_precious = FALSE; m->vmp_dirty = FALSE; - clear_refmod |= VM_MEM_MODIFIED; + clear_refmod_mask |= VM_MEM_MODIFIED; if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) { /* * This page is now clean and @@ -2255,8 +2225,8 @@ deactivate_pages_in_object( VM_COMPRESSOR_PAGER_STATE_CLR(object, offset); - if (reusable_page && !m->vmp_reusable) { - assert(!all_reusable); + if ((flags & DEACTIVATE_REUSABLE) && !m->vmp_reusable) { + assert(!(flags & DEACTIVATE_ALL_REUSABLE)); assert(!object->all_reusable); m->vmp_reusable = TRUE; object->reusable_page_count++; @@ -2270,13 +2240,20 @@ deactivate_pages_in_object( pmap_options |= PMAP_OPTIONS_SET_REUSABLE; } } - pmap_options |= PMAP_OPTIONS_NOFLUSH; - pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), - clear_refmod, - pmap_options, - (void *)pfc); + if (flags & DEACTIVATE_CLEAR_REFMOD) { + /* + * The caller didn't clear the refmod bits in advance. + * Clear them for this page now. + */ + pmap_options |= PMAP_OPTIONS_NOFLUSH; + pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), + clear_refmod_mask, + pmap_options, + (void *)pfc); + } - if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !(reusable_page || all_reusable)) { + if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && + !(flags & (DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE))) { dwp->dw_mask |= DW_move_page; } @@ -2292,9 +2269,9 @@ deactivate_pages_in_object( vm_page_stats_reusable.reusable += reusable; reusable = 0; } - vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); - dwp = &dw_array[0]; + dwp = dwp_start; dw_count = 0; } } @@ -2313,7 +2290,7 @@ deactivate_pages_in_object( * map so we don't bother paging it back in if it's touched again in the future. */ - if ((kill_page) && (object->internal)) { + if ((flags & DEACTIVATE_KILL) && (object->internal)) { VM_COMPRESSOR_PAGER_STATE_CLR(object, offset); if (pmap != PMAP_NULL) { @@ -2343,7 +2320,14 @@ deactivate_pages_in_object( } if (dw_count) { - vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); + dwp = dwp_start; + dw_count = 0; + } + + if (dwp_start && dwp_finish_ctx) { + vm_page_delayed_work_finish_ctx(dwp_start); + dwp_start = dwp = NULL; } } @@ -2355,6 +2339,7 @@ deactivate_pages_in_object( * chain. This routine returns how much of the given "size" it actually processed. It's * up to the caler to loop and keep calling this routine until the entire range they want * to process has been done. + * Iff clear_refmod is true, pmap_clear_refmod_options is called for each physical page in this range. */ static vm_object_size_t @@ -2362,9 +2347,7 @@ deactivate_a_chunk( vm_object_t orig_object, vm_object_offset_t offset, vm_object_size_t size, - boolean_t kill_page, - boolean_t reusable_page, - boolean_t all_reusable, + deactivate_flags_t flags, pmap_flush_context *pfc, struct pmap *pmap, vm_map_offset_t pmap_offset) @@ -2400,7 +2383,7 @@ deactivate_a_chunk( while (object && CHUNK_NOT_COMPLETE(chunk_state)) { vm_object_paging_begin(object); - deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state, pfc, pmap, pmap_offset); + deactivate_pages_in_object(object, offset, length, flags, &chunk_state, pfc, pmap, pmap_offset); vm_object_paging_end(object); @@ -2413,9 +2396,8 @@ deactivate_a_chunk( tmp_object = object->shadow; if (tmp_object) { - kill_page = FALSE; - reusable_page = FALSE; - all_reusable = FALSE; + assert(!(flags & DEACTIVATE_KILL) || (flags & DEACTIVATE_CLEAR_REFMOD)); + flags &= ~(DEACTIVATE_KILL | DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE); offset += object->vo_shadow_offset; vm_object_lock(tmp_object); } @@ -2455,6 +2437,16 @@ vm_object_deactivate_pages( vm_object_size_t length; boolean_t all_reusable; pmap_flush_context pmap_flush_context_storage; + unsigned int pmap_clear_refmod_mask = VM_MEM_REFERENCED; + unsigned int pmap_clear_refmod_options = 0; + deactivate_flags_t flags = DEACTIVATE_CLEAR_REFMOD; + bool refmod_cleared = false; + if (kill_page) { + flags |= DEACTIVATE_KILL; + } + if (reusable_page) { + flags |= DEACTIVATE_REUSABLE; + } /* * We break the range up into chunks and do one chunk at a time. This is for @@ -2470,7 +2462,15 @@ vm_object_deactivate_pages( * For the sake of accurate "reusable" pmap stats, we need * to tell pmap about each page that is no longer "reusable", * so we can't do the "all_reusable" optimization. + * + * If we do go with the all_reusable optimization, we can't + * return if size is 0 since we could have "all_reusable == TRUE" + * In this case, we save the overhead of doing the pmap_flush_context + * work. */ + if (size == 0) { + return; + } #else if (reusable_page && object->internal && @@ -2479,6 +2479,7 @@ vm_object_deactivate_pages( object->reusable_page_count == 0) { all_reusable = TRUE; reusable_page = FALSE; + flags |= DEACTIVATE_ALL_REUSABLE; } #endif @@ -2488,10 +2489,34 @@ vm_object_deactivate_pages( return; } + pmap_flush_context_init(&pmap_flush_context_storage); + /* + * If we're deactivating multiple pages, try to perform one bulk pmap operation. + * We can't do this if we're killing pages and there's a shadow chain as + * we don't yet know which pages are in the top object (pages in shadow copies aren't + * safe to kill). + * And we can only do this on hardware that supports it. + */ + if (size > PAGE_SIZE && (!kill_page || !object->shadow)) { + if (kill_page && object->internal) { + pmap_clear_refmod_mask |= VM_MEM_MODIFIED; + } + if (reusable_page) { + pmap_clear_refmod_options |= PMAP_OPTIONS_SET_REUSABLE; + } + + refmod_cleared = pmap_clear_refmod_range_options(pmap, pmap_offset, pmap_offset + size, pmap_clear_refmod_mask, pmap_clear_refmod_options); + if (refmod_cleared) { + // We were able to clear all the refmod bits. So deactivate_a_chunk doesn't need to do it. + flags &= ~DEACTIVATE_CLEAR_REFMOD; + } + } + while (size) { - length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable, &pmap_flush_context_storage, pmap, pmap_offset); + length = deactivate_a_chunk(object, offset, size, flags, + &pmap_flush_context_storage, pmap, pmap_offset); size -= length; offset += length; @@ -2643,11 +2668,13 @@ vm_object_pmap_protect( vm_object_offset_t offset, vm_object_size_t size, pmap_t pmap, + vm_map_size_t pmap_page_size, vm_map_offset_t pmap_start, vm_prot_t prot) { - vm_object_pmap_protect_options(object, offset, size, - pmap, pmap_start, prot, 0); + vm_object_pmap_protect_options(object, offset, size, pmap, + pmap_page_size, + pmap_start, prot, 0); } __private_extern__ void @@ -2656,18 +2683,37 @@ vm_object_pmap_protect_options( vm_object_offset_t offset, vm_object_size_t size, pmap_t pmap, + vm_map_size_t pmap_page_size, vm_map_offset_t pmap_start, vm_prot_t prot, int options) { pmap_flush_context pmap_flush_context_storage; boolean_t delayed_pmap_flush = FALSE; + vm_object_offset_t offset_in_object; + vm_object_size_t size_in_object; if (object == VM_OBJECT_NULL) { return; } - size = vm_object_round_page(size); - offset = vm_object_trunc_page(offset); + if (pmap_page_size > PAGE_SIZE) { + /* for 16K map on 4K device... */ + pmap_page_size = PAGE_SIZE; + } + /* + * If we decide to work on the object itself, extend the range to + * cover a full number of native pages. + */ + size_in_object = vm_object_round_page(offset + size) - vm_object_trunc_page(offset); + offset_in_object = vm_object_trunc_page(offset); + /* + * If we decide to work on the pmap, use the exact range specified, + * so no rounding/truncating offset and size. They should already + * be aligned to pmap_page_size. + */ + assertf(!(offset & (pmap_page_size - 1)) && !(size & (pmap_page_size - 1)), + "offset 0x%llx size 0x%llx pmap_page_size 0x%llx", + offset, size, (uint64_t)pmap_page_size); vm_object_lock(object); @@ -2683,8 +2729,8 @@ vm_object_pmap_protect_options( } else { vm_object_offset_t phys_start, phys_end, phys_addr; - phys_start = object->vo_shadow_offset + offset; - phys_end = phys_start + size; + phys_start = object->vo_shadow_offset + offset_in_object; + phys_end = phys_start + size_in_object; assert(phys_start <= phys_end); assert(phys_end <= object->vo_shadow_offset + object->vo_size); vm_object_unlock(object); @@ -2712,13 +2758,20 @@ vm_object_pmap_protect_options( assert(object->internal); while (TRUE) { - if (ptoa_64(object->resident_page_count) > size / 2 && pmap != PMAP_NULL) { + if (ptoa_64(object->resident_page_count) > size_in_object / 2 && pmap != PMAP_NULL) { vm_object_unlock(object); + if (pmap_page_size < PAGE_SIZE) { + DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: pmap_protect()\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot); + } pmap_protect_options(pmap, pmap_start, pmap_start + size, prot, options & ~PMAP_OPTIONS_NOFLUSH, NULL); return; } + if (pmap_page_size < PAGE_SIZE) { + DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: offset 0x%llx size 0x%llx object %p offset 0x%llx size 0x%llx\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot, offset, size, object, offset_in_object, size_in_object); + } + pmap_flush_context_init(&pmap_flush_context_storage); delayed_pmap_flush = FALSE; @@ -2727,26 +2780,41 @@ vm_object_pmap_protect_options( * page count then we should interate over pages otherwise * inverse page look-up will be faster */ - if (ptoa_64(object->resident_page_count / 4) < size) { + if (ptoa_64(object->resident_page_count / 4) < size_in_object) { vm_page_t p; vm_object_offset_t end; - end = offset + size; + end = offset_in_object + size_in_object; vm_page_queue_iterate(&object->memq, p, vmp_listq) { - if (!p->vmp_fictitious && (offset <= p->vmp_offset) && (p->vmp_offset < end)) { + if (!p->vmp_fictitious && (offset_in_object <= p->vmp_offset) && (p->vmp_offset < end)) { vm_map_offset_t start; + /* + * XXX FBDP 4K: intentionally using "offset" here instead + * of "offset_in_object", since "start" is a pmap address. + */ start = pmap_start + p->vmp_offset - offset; if (pmap != PMAP_NULL) { - pmap_protect_options( - pmap, - start, - start + PAGE_SIZE_64, - prot, - options | PMAP_OPTIONS_NOFLUSH, - &pmap_flush_context_storage); + vm_map_offset_t curr; + for (curr = start; + curr < start + PAGE_SIZE_64; + curr += pmap_page_size) { + if (curr < pmap_start) { + continue; + } + if (curr >= pmap_start + size) { + break; + } + pmap_protect_options( + pmap, + curr, + curr + pmap_page_size, + prot, + options | PMAP_OPTIONS_NOFLUSH, + &pmap_flush_context_storage); + } } else { pmap_page_protect_options( VM_PAGE_GET_PHYS_PAGE(p), @@ -2762,25 +2830,40 @@ vm_object_pmap_protect_options( vm_object_offset_t end; vm_object_offset_t target_off; - end = offset + size; + end = offset_in_object + size_in_object; - for (target_off = offset; + for (target_off = offset_in_object; target_off < end; target_off += PAGE_SIZE) { p = vm_page_lookup(object, target_off); if (p != VM_PAGE_NULL) { vm_object_offset_t start; + /* + * XXX FBDP 4K: intentionally using "offset" here instead + * of "offset_in_object", since "start" is a pmap address. + */ start = pmap_start + (p->vmp_offset - offset); if (pmap != PMAP_NULL) { - pmap_protect_options( - pmap, - start, - start + PAGE_SIZE_64, - prot, - options | PMAP_OPTIONS_NOFLUSH, - &pmap_flush_context_storage); + vm_map_offset_t curr; + for (curr = start; + curr < start + PAGE_SIZE; + curr += pmap_page_size) { + if (curr < pmap_start) { + continue; + } + if (curr >= pmap_start + size) { + break; + } + pmap_protect_options( + pmap, + curr, + curr + pmap_page_size, + prot, + options | PMAP_OPTIONS_NOFLUSH, + &pmap_flush_context_storage); + } } else { pmap_page_protect_options( VM_PAGE_GET_PHYS_PAGE(p), @@ -2805,6 +2888,7 @@ vm_object_pmap_protect_options( next_object = object->shadow; if (next_object != VM_OBJECT_NULL) { + offset_in_object += object->vo_shadow_offset; offset += object->vo_shadow_offset; vm_object_lock(next_object); vm_object_unlock(object); @@ -2894,6 +2978,8 @@ vm_object_copy_slowly( * this routine, since we have the only reference. */ + size = vm_object_round_page(src_offset + size) - vm_object_trunc_page(src_offset); + src_offset = vm_object_trunc_page(src_offset); new_object = vm_object_allocate(size); new_offset = 0; @@ -3074,7 +3160,7 @@ vm_object_copy_slowly( if (vm_page_wait(interruptible)) { break; } - /* fall thru */ + OS_FALLTHROUGH; case VM_FAULT_INTERRUPTED: vm_object_lock(new_object); @@ -3090,7 +3176,7 @@ vm_object_copy_slowly( /* success but no VM page: fail */ vm_object_paging_end(src_object); vm_object_unlock(src_object); - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case VM_FAULT_MEMORY_ERROR: /* * A policy choice: @@ -3299,6 +3385,9 @@ Retry: goto Retry; } if (copy->vo_size < src_offset + size) { + assertf(page_aligned(src_offset + size), + "object %p size 0x%llx", + copy, (uint64_t)(src_offset + size)); copy->vo_size = src_offset + size; } @@ -3523,6 +3612,9 @@ Retry: pmap_flush(&pmap_flush_context_storage); } + assertf(page_aligned(copy_size), + "object %p size 0x%llx", + old_copy, (uint64_t)copy_size); old_copy->vo_size = copy_size; } if (src_object_shared == TRUE) { @@ -3561,6 +3653,9 @@ Retry: src_object_shared = FALSE; goto Retry; } + assertf(page_aligned(copy_size), + "object %p size 0x%llx", + new_copy, (uint64_t)copy_size); new_copy->vo_size = copy_size; /* @@ -3742,13 +3837,13 @@ vm_object_copy_strategically( break; } vm_object_lock(src_object); - /* fall thru when delayed copy not allowed */ + OS_FALLTHROUGH; /* fall thru when delayed copy not allowed */ case MEMORY_OBJECT_COPY_NONE: result = vm_object_copy_slowly(src_object, src_offset, size, interruptible, dst_object); if (result == KERN_SUCCESS) { - *dst_offset = 0; + *dst_offset = src_offset - vm_object_trunc_page(src_offset); *dst_needs_copy = FALSE; } break; @@ -3857,6 +3952,18 @@ vm_object_shadow( vm_object_unlock(source); } + /* + * *offset is the map entry's offset into the VM object and + * is aligned to the map's page size. + * VM objects need to be aligned to the system's page size. + * Record the necessary adjustment and re-align the offset so + * that result->vo_shadow_offset is properly page-aligned. + */ + vm_object_offset_t offset_adjustment; + offset_adjustment = *offset - vm_object_trunc_page(*offset); + length = vm_object_round_page(length + offset_adjustment); + *offset = vm_object_trunc_page(*offset); + /* * Allocate a new object with the given length */ @@ -3880,12 +3987,23 @@ vm_object_shadow( */ result->vo_shadow_offset = *offset; + assertf(page_aligned(result->vo_shadow_offset), + "result %p shadow offset 0x%llx", + result, result->vo_shadow_offset); /* * Return the new things */ *offset = 0; + if (offset_adjustment) { + /* + * Make the map entry point to the equivalent offset + * in the new object. + */ + DEBUG4K_COPY("adjusting offset @ %p from 0x%llx to 0x%llx for object %p length: 0x%llx\n", offset, *offset, *offset + offset_adjustment, result, length); + *offset += offset_adjustment; + } *object = result; return TRUE; } @@ -4391,6 +4509,12 @@ vm_object_do_collapse( assert(!backing_object->phys_contiguous); object->shadow = backing_object->shadow; if (object->shadow) { + assertf(page_aligned(object->vo_shadow_offset), + "object %p shadow_offset 0x%llx", + object, object->vo_shadow_offset); + assertf(page_aligned(backing_object->vo_shadow_offset), + "backing_object %p shadow_offset 0x%llx", + backing_object, backing_object->vo_shadow_offset); object->vo_shadow_offset += backing_object->vo_shadow_offset; /* "backing_object" gave its shadow to "object" */ backing_object->shadow = VM_OBJECT_NULL; @@ -4482,6 +4606,12 @@ vm_object_do_bypass( assert(!backing_object->phys_contiguous); object->shadow = backing_object->shadow; if (object->shadow) { + assertf(page_aligned(object->vo_shadow_offset), + "object %p shadow_offset 0x%llx", + object, object->vo_shadow_offset); + assertf(page_aligned(backing_object->vo_shadow_offset), + "backing_object %p shadow_offset 0x%llx", + backing_object, backing_object->vo_shadow_offset); object->vo_shadow_offset += backing_object->vo_shadow_offset; } else { /* no shadow, therefore no shadow offset... */ @@ -4604,6 +4734,8 @@ vm_object_collapse( vm_object_collapse_calls++; + assertf(page_aligned(hint_offset), "hint_offset 0x%llx", hint_offset); + if (!vm_object_collapse_allowed && !(can_bypass && vm_object_bypass_allowed)) { return; @@ -5174,6 +5306,9 @@ vm_object_coalesce( */ newsize = prev_offset + prev_size + next_size; if (newsize > prev_object->vo_size) { + assertf(page_aligned(newsize), + "object %p size 0x%llx", + prev_object, (uint64_t)newsize); prev_object->vo_size = newsize; } @@ -5268,6 +5403,9 @@ vm_object_populate_with_private( /* shadows on contiguous memory are not allowed */ /* we therefore can use the offset field */ object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT; + assertf(page_aligned(size), + "object %p size 0x%llx", + object, (uint64_t)size); object->vo_size = size; } vm_object_unlock(object); @@ -5499,7 +5637,13 @@ vm_object_lock_request( return KERN_INVALID_ARGUMENT; } - size = round_page_64(size); + /* + * XXX TODO4K + * extend range for conservative operations (copy-on-write, sync, ...) + * truncate range for destructive operations (purge, ...) + */ + size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset); + offset = vm_object_trunc_page(offset); /* * Lock the object, and acquire a paging reference to @@ -6744,6 +6888,7 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, switch (behavior) { default: behavior = VM_BEHAVIOR_DEFAULT; + OS_FALLTHROUGH; case VM_BEHAVIOR_DEFAULT: if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) { @@ -7305,6 +7450,28 @@ vm_object_lock_try(vm_object_t object) return _vm_object_lock_try(object); } +/* + * Lock the object exclusive. + * + * Returns true iff the thread had to spin or block before + * acquiring the lock. + */ +bool +vm_object_lock_check_contended(vm_object_t object) +{ + bool contended; + if (object == vm_pageout_scan_wants_object) { + scan_object_collision++; + mutex_pause(2); + } + DTRACE_VM(vm_object_lock_w); + contended = lck_rw_lock_exclusive_check_contended(&object->Lock); +#if DEVELOPMENT || DEBUG + object->Lock_owner = current_thread(); +#endif + return contended; +} + void vm_object_lock_shared(vm_object_t object) { @@ -7410,6 +7577,8 @@ vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode) #if CONFIG_FREEZE +extern struct freezer_context freezer_context_global; + /* * This routine does the "relocation" of previously * compressed pages belonging to this object that are @@ -7418,9 +7587,6 @@ vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode) * compressed pages belonging to this object. */ -extern void *freezer_chead; -extern char *freezer_compressor_scratch_buf; -extern int c_freezer_compression_count; extern AbsoluteTime c_freezer_last_yield_ts; #define MAX_FREE_BATCH 32 @@ -7452,7 +7618,7 @@ c_freezer_should_yield() void vm_object_compressed_freezer_done() { - vm_compressor_finished_filling(&freezer_chead); + vm_compressor_finished_filling( &(freezer_context_global.freezer_ctx_chead)); } @@ -7487,6 +7653,21 @@ vm_object_compressed_freezer_pageout( } } + /* + * We could be freezing a shared internal object that might + * be part of some other thread's current VM operations. + * We skip it if there's a paging-in-progress or activity-in-progress + * because we could be here a long time with the map lock held. + * + * Note: We are holding the map locked while we wait. + * This is fine in the freezer path because the task + * is suspended and so this latency is acceptable. + */ + if (object->paging_in_progress || object->activity_in_progress) { + vm_object_unlock(object); + return paged_out_count; + } + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { vm_object_offset_t curr_offset = 0; @@ -7502,7 +7683,7 @@ vm_object_compressed_freezer_pageout( break; } - retval = vm_compressor_pager_relocate(object->pager, curr_offset, &freezer_chead); + retval = vm_compressor_pager_relocate(object->pager, curr_offset, &(freezer_context_global.freezer_ctx_chead)); if (retval != KERN_SUCCESS) { break; @@ -7602,7 +7783,9 @@ vm_object_compressed_freezer_pageout( vm_object_unlock(object); - if (vm_pageout_compress_page(&freezer_chead, freezer_compressor_scratch_buf, p) == KERN_SUCCESS) { + if (vm_pageout_compress_page(&(freezer_context_global.freezer_ctx_chead), + (freezer_context_global.freezer_ctx_compressor_scratch_buf), + p) == KERN_SUCCESS) { /* * page has already been un-tabled from the object via 'vm_page_remove' */ @@ -7619,7 +7802,7 @@ vm_object_compressed_freezer_pageout( local_freeq = NULL; local_freed = 0; } - c_freezer_compression_count++; + freezer_context_global.freezer_ctx_uncompressed_pages++; } KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0); @@ -7860,11 +8043,15 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio) io_upl = upl->decmp_io_upl; assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0); - io_upl_size = io_upl->size; + assertf(page_aligned(io_upl->u_offset) && page_aligned(io_upl->u_size), + "upl %p offset 0x%llx size 0x%x\n", + io_upl, io_upl->u_offset, io_upl->u_size); + io_upl_size = io_upl->u_size; upl_unlock(upl); /* Now perform the allocation */ - io_upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * (io_upl_size / PAGE_SIZE)); + io_upl_reprio_info = (uint64_t *)kheap_alloc(KHEAP_TEMP, + sizeof(uint64_t) * atop(io_upl_size), Z_WAITOK); if (io_upl_reprio_info == NULL) { return; } @@ -7879,7 +8066,8 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio) upl_unlock(upl); goto out; } - memcpy(io_upl_reprio_info, io_upl->upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE)); + memcpy(io_upl_reprio_info, io_upl->upl_reprio_info, + sizeof(uint64_t) * atop(io_upl_size)); /* Get the VM object for this UPL */ if (io_upl->flags & UPL_SHADOWED) { @@ -7901,8 +8089,8 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio) offset = 0; while (offset < io_upl_size) { - blkno = io_upl_reprio_info[(offset / PAGE_SIZE)] & UPL_REPRIO_INFO_MASK; - len = (io_upl_reprio_info[(offset / PAGE_SIZE)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK; + blkno = io_upl_reprio_info[atop(offset)] & UPL_REPRIO_INFO_MASK; + len = (io_upl_reprio_info[atop(offset)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK; /* * This implementation may cause some spurious expedites due to the @@ -7934,8 +8122,8 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio) IO_REPRIO_THREAD_WAKEUP(); out: - kfree(io_upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE)); - return; + kheap_free(KHEAP_TEMP, io_upl_reprio_info, + sizeof(uint64_t) * atop(io_upl_size)); } void @@ -7957,7 +8145,10 @@ vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m) continue; } pl = UPL_GET_INTERNAL_PAGE_LIST(upl); - num_pages = (upl->size / PAGE_SIZE); + assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size), + "upl %p offset 0x%llx size 0x%x\n", + upl, upl->u_offset, upl->u_size); + num_pages = (upl->u_size / PAGE_SIZE); /* * For each page in the UPL page list, see if it matches the contended @@ -8060,6 +8251,7 @@ vm_object_access_tracking( 0, object->vo_size, PMAP_NULL, + PAGE_SIZE, 0, VM_PROT_NONE, 0); @@ -8260,14 +8452,15 @@ vm_object_ownership_change( old_no_footprint = object->vo_no_footprint; old_owner = VM_OBJECT_OWNER(object); - DTRACE_VM7(object_ownership_change, + DTRACE_VM8(object_ownership_change, vm_object_t, object, task_t, old_owner, int, old_ledger_tag, int, old_no_footprint, task_t, new_owner, int, new_ledger_tag, - int, new_no_footprint); + int, new_no_footprint, + int, VM_OBJECT_ID(object)); assert(object->internal); resident_count = object->resident_page_count - object->wired_page_count; diff --git a/osfmk/vm/vm_object.h b/osfmk/vm/vm_object.h index c4fde23b1..6399f95d0 100644 --- a/osfmk/vm/vm_object.h +++ b/osfmk/vm/vm_object.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -118,8 +118,8 @@ struct vm_object_fault_info { uint32_t user_tag; vm_size_t cluster_size; vm_behavior_t behavior; - vm_map_offset_t lo_offset; - vm_map_offset_t hi_offset; + vm_object_offset_t lo_offset; + vm_object_offset_t hi_offset; unsigned int /* boolean_t */ no_cache:1, /* boolean_t */ stealth:1, @@ -536,7 +536,6 @@ extern lck_attr_t vm_map_lck_attr; #define OBJECT_LOCK_EXCLUSIVE 1 extern lck_grp_t vm_object_lck_grp; -extern lck_grp_attr_t vm_object_lck_grp_attr; extern lck_attr_t vm_object_lck_attr; extern lck_attr_t kernel_object_lck_attr; extern lck_attr_t compressor_object_lck_attr; @@ -544,6 +543,7 @@ extern lck_attr_t compressor_object_lck_attr; extern vm_object_t vm_pageout_scan_wants_object; extern void vm_object_lock(vm_object_t); +extern bool vm_object_lock_check_contended(vm_object_t); extern boolean_t vm_object_lock_try(vm_object_t); extern boolean_t _vm_object_lock_try(vm_object_t); extern boolean_t vm_object_lock_avoid(vm_object_t); @@ -597,10 +597,6 @@ extern boolean_t vm_object_lock_upgrade(vm_object_t); __private_extern__ void vm_object_bootstrap(void); -__private_extern__ void vm_object_init(void); - -__private_extern__ void vm_object_init_lck_grp(void); - __private_extern__ void vm_object_reaper_init(void); __private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size); @@ -678,6 +674,7 @@ __private_extern__ void vm_object_pmap_protect( vm_object_offset_t offset, vm_object_size_t size, pmap_t pmap, + vm_map_size_t pmap_page_size, vm_map_offset_t pmap_start, vm_prot_t prot); @@ -686,6 +683,7 @@ __private_extern__ void vm_object_pmap_protect_options( vm_object_offset_t offset, vm_object_size_t size, pmap_t pmap, + vm_map_size_t pmap_page_size, vm_map_offset_t pmap_start, vm_prot_t prot, int options); @@ -702,6 +700,7 @@ __private_extern__ void vm_object_deactivate_pages( boolean_t kill_page, boolean_t reusable_page, struct pmap *pmap, +/* XXX TODO4K: need pmap_page_size here too? */ vm_map_offset_t pmap_offset); __private_extern__ void vm_object_reuse_pages( @@ -944,7 +943,7 @@ vm_object_assert_wait( assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); object->all_wanted |= 1 << event; - wr = assert_wait((event_t)((vm_offset_t)object + event), + wr = assert_wait((event_t)((vm_offset_t)object + (vm_offset_t)event), interruptible); return wr; } @@ -1000,7 +999,7 @@ vm_object_sleep( object->all_wanted |= 1 << event; wr = thread_sleep_vm_object(object, - (event_t)((vm_offset_t)object + event), + (event_t)((vm_offset_t)object + (vm_offset_t)event), interruptible); return wr; } @@ -1014,7 +1013,7 @@ vm_object_wakeup( assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); if (object->all_wanted & (1 << event)) { - thread_wakeup((event_t)((vm_offset_t)object + event)); + thread_wakeup((event_t)((vm_offset_t)object + (vm_offset_t)event)); } object->all_wanted &= ~(1 << event); } @@ -1204,4 +1203,9 @@ extern kern_return_t vm_object_ownership_change( int new_ledger_flags, boolean_t task_objq_locked); +// LP64todo: all the current tools are 32bit, obviously never worked for 64b +// so probably should be a real 32b ID vs. ptr. +// Current users just check for equality +#define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((o))) + #endif /* _VM_VM_OBJECT_H_ */ diff --git a/osfmk/vm/vm_page.h b/osfmk/vm/vm_page.h index 7ae865190..164a36145 100644 --- a/osfmk/vm/vm_page.h +++ b/osfmk/vm/vm_page.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -68,9 +68,11 @@ #include #include +#include #include #include #include +#include /* for VMP_CS_BITS... */ #if defined(__LP64__) @@ -96,11 +98,11 @@ typedef struct vm_page_packed_queue_entry *vm_page_queue_entry_t; typedef vm_page_packed_t vm_page_object_t; -#else +#else // __LP64__ /* - * we can't do the packing trick on 32 bit architectures, so - * just turn the macros into noops. + * we can't do the packing trick on 32 bit architectures + * so just turn the macros into noops. */ typedef struct vm_page *vm_page_packed_t; @@ -110,7 +112,7 @@ typedef struct vm_page *vm_page_packed_t; #define vm_page_queue_entry_t queue_entry_t #define vm_page_object_t vm_object_t -#endif +#endif // __LP64__ #include @@ -222,7 +224,8 @@ struct vm_page { /* be reused ahead of other pages (P) */ vmp_private:1, /* Page should not be returned to the free list (P) */ vmp_reference:1, /* page has been used (P) */ - vmp_unused_page_bits:5; + vmp_lopage:1, + vmp_unused_page_bits:4; /* * MUST keep the 2 32 bit words used as bit fields @@ -264,25 +267,140 @@ struct vm_page { vmp_restart:1, /* Page was pushed higher in shadow chain by copy_call-related pagers */ /* start again at top of chain */ vmp_unusual:1, /* Page is absent, error, restart or page locked */ - vmp_cs_validated:1, /* code-signing: page was checked */ - vmp_cs_tainted:1, /* code-signing: page is tainted */ - vmp_cs_nx:1, /* code-signing: page is nx */ + vmp_cs_validated:VMP_CS_BITS, /* code-signing: page was checked */ + vmp_cs_tainted:VMP_CS_BITS, /* code-signing: page is tainted */ + vmp_cs_nx:VMP_CS_BITS, /* code-signing: page is nx */ vmp_reusable:1, - vmp_lopage:1, - vmp_written_by_kernel:1, /* page was written by kernel (i.e. decompressed) */ - vmp_unused_object_bits:8; + vmp_written_by_kernel:1; /* page was written by kernel (i.e. decompressed) */ #if !defined(__arm__) && !defined(__arm64__) ppnum_t vmp_phys_page; /* Physical page number of the page */ #endif }; - typedef struct vm_page *vm_page_t; extern vm_page_t vm_pages; extern vm_page_t vm_page_array_beginning_addr; extern vm_page_t vm_page_array_ending_addr; +static inline int +VMP_CS_FOR_OFFSET( + vm_map_offset_t fault_phys_offset) +{ + assertf(fault_phys_offset < PAGE_SIZE && + !(fault_phys_offset & FOURK_PAGE_MASK), + "offset 0x%llx\n", (uint64_t)fault_phys_offset); + return 1 << (fault_phys_offset >> FOURK_PAGE_SHIFT); +} +static inline bool +VMP_CS_VALIDATED( + vm_page_t p, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset) +{ + assertf(fault_page_size <= PAGE_SIZE, + "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", + (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); + if (fault_page_size == PAGE_SIZE) { + return p->vmp_cs_validated == VMP_CS_ALL_TRUE; + } + return p->vmp_cs_validated & VMP_CS_FOR_OFFSET(fault_phys_offset); +} +static inline bool +VMP_CS_TAINTED( + vm_page_t p, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset) +{ + assertf(fault_page_size <= PAGE_SIZE, + "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", + (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); + if (fault_page_size == PAGE_SIZE) { + return p->vmp_cs_tainted != VMP_CS_ALL_FALSE; + } + return p->vmp_cs_tainted & VMP_CS_FOR_OFFSET(fault_phys_offset); +} +static inline bool +VMP_CS_NX( + vm_page_t p, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset) +{ + assertf(fault_page_size <= PAGE_SIZE, + "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", + (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); + if (fault_page_size == PAGE_SIZE) { + return p->vmp_cs_nx != VMP_CS_ALL_FALSE; + } + return p->vmp_cs_nx & VMP_CS_FOR_OFFSET(fault_phys_offset); +} +static inline void +VMP_CS_SET_VALIDATED( + vm_page_t p, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + boolean_t value) +{ + assertf(fault_page_size <= PAGE_SIZE, + "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", + (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); + if (value) { + if (fault_page_size == PAGE_SIZE) { + p->vmp_cs_validated = VMP_CS_ALL_TRUE; + } + p->vmp_cs_validated |= VMP_CS_FOR_OFFSET(fault_phys_offset); + } else { + if (fault_page_size == PAGE_SIZE) { + p->vmp_cs_validated = VMP_CS_ALL_FALSE; + } + p->vmp_cs_validated &= ~VMP_CS_FOR_OFFSET(fault_phys_offset); + } +} +static inline void +VMP_CS_SET_TAINTED( + vm_page_t p, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + boolean_t value) +{ + assertf(fault_page_size <= PAGE_SIZE, + "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", + (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); + if (value) { + if (fault_page_size == PAGE_SIZE) { + p->vmp_cs_tainted = VMP_CS_ALL_TRUE; + } + p->vmp_cs_tainted |= VMP_CS_FOR_OFFSET(fault_phys_offset); + } else { + if (fault_page_size == PAGE_SIZE) { + p->vmp_cs_tainted = VMP_CS_ALL_FALSE; + } + p->vmp_cs_tainted &= ~VMP_CS_FOR_OFFSET(fault_phys_offset); + } +} +static inline void +VMP_CS_SET_NX( + vm_page_t p, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + boolean_t value) +{ + assertf(fault_page_size <= PAGE_SIZE, + "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", + (uint64_t)fault_page_size, (uint64_t)fault_phys_offset); + if (value) { + if (fault_page_size == PAGE_SIZE) { + p->vmp_cs_nx = VMP_CS_ALL_TRUE; + } + p->vmp_cs_nx |= VMP_CS_FOR_OFFSET(fault_phys_offset); + } else { + if (fault_page_size == PAGE_SIZE) { + p->vmp_cs_nx = VMP_CS_ALL_FALSE; + } + p->vmp_cs_nx &= ~VMP_CS_FOR_OFFSET(fault_phys_offset); + } +} + #if defined(__arm__) || defined(__arm64__) @@ -337,36 +455,49 @@ typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t; -#if defined(__LP64__) - +#if defined(__LP64__) +/* + * Parameters for pointer packing + * + * + * VM Pages pointers might point to: + * + * 1. VM_PAGE_PACKED_ALIGNED aligned kernel globals, + * + * 2. VM_PAGE_PACKED_ALIGNED aligned heap allocated vm pages + * + * 3. entries in the vm_pages array (whose entries aren't VM_PAGE_PACKED_ALIGNED + * aligned). + * + * + * The current scheme uses 31 bits of storage and 6 bits of shift using the + * VM_PACK_POINTER() scheme for (1-2), and packs (3) as an index within the + * vm_pages array, setting the top bit (VM_PAGE_PACKED_FROM_ARRAY). + * + * This scheme gives us a reach of 128G from VM_MIN_KERNEL_AND_KEXT_ADDRESS. + */ #define VM_VPLQ_ALIGNMENT 128 -#define VM_PACKED_POINTER_ALIGNMENT 64 /* must be a power of 2 */ -#define VM_PACKED_POINTER_SHIFT 6 +#define VM_PAGE_PACKED_PTR_ALIGNMENT 64 /* must be a power of 2 */ +#define VM_PAGE_PACKED_ALIGNED __attribute__((aligned(VM_PAGE_PACKED_PTR_ALIGNMENT))) +#define VM_PAGE_PACKED_PTR_BITS 31 +#define VM_PAGE_PACKED_PTR_SHIFT 6 +#define VM_PAGE_PACKED_PTR_BASE ((uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS) -#define VM_PACKED_FROM_VM_PAGES_ARRAY 0x80000000 +#define VM_PAGE_PACKED_FROM_ARRAY 0x80000000 static inline vm_page_packed_t vm_page_pack_ptr(uintptr_t p) { - vm_page_packed_t packed_ptr; - - if (!p) { - return (vm_page_packed_t)0; - } - - if (p >= (uintptr_t)(vm_page_array_beginning_addr) && p < (uintptr_t)(vm_page_array_ending_addr)) { - packed_ptr = ((vm_page_packed_t)(((vm_page_t)p - vm_page_array_beginning_addr))); - assert(!(packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY)); - packed_ptr |= VM_PACKED_FROM_VM_PAGES_ARRAY; - return packed_ptr; + if (p >= (uintptr_t)vm_page_array_beginning_addr && + p < (uintptr_t)vm_page_array_ending_addr) { + ptrdiff_t diff = (vm_page_t)p - vm_page_array_beginning_addr; + assert((vm_page_t)p == &vm_pages[diff]); + return (vm_page_packed_t)(diff | VM_PAGE_PACKED_FROM_ARRAY); } - assert((p & (VM_PACKED_POINTER_ALIGNMENT - 1)) == 0); - - packed_ptr = ((vm_page_packed_t)(((uintptr_t)(p - (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)) >> VM_PACKED_POINTER_SHIFT)); - assert(packed_ptr != 0); - assert(!(packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY)); - return packed_ptr; + VM_ASSERT_POINTER_PACKABLE(p, VM_PAGE_PACKED_PTR); + vm_offset_t packed = VM_PACK_POINTER(p, VM_PAGE_PACKED_PTR); + return CAST_DOWN_EXPLICIT(vm_page_packed_t, packed); } @@ -375,15 +506,13 @@ vm_page_unpack_ptr(uintptr_t p) { extern unsigned int vm_pages_count; - if (!p) { - return (uintptr_t)0; + if (p >= VM_PAGE_PACKED_FROM_ARRAY) { + p &= ~VM_PAGE_PACKED_FROM_ARRAY; + assert(p < (uintptr_t)vm_pages_count); + return (uintptr_t)&vm_pages[p]; } - if (p & VM_PACKED_FROM_VM_PAGES_ARRAY) { - assert((uint32_t)(p & ~VM_PACKED_FROM_VM_PAGES_ARRAY) < vm_pages_count); - return (uintptr_t)(&vm_pages[(uint32_t)(p & ~VM_PACKED_FROM_VM_PAGES_ARRAY)]); - } - return (p << VM_PACKED_POINTER_SHIFT) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS; + return VM_UNPACK_POINTER(p, VM_PAGE_PACKED_PTR); } @@ -447,10 +576,9 @@ vm_page_remque( * void vm_page_queue_init(q) * vm_page_queue_t q; \* MODIFIED *\ */ -#define vm_page_queue_init(q) \ -MACRO_BEGIN \ - assert((((uintptr_t)q) & (VM_PACKED_POINTER_ALIGNMENT-1)) == 0); \ - assert((VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR((uintptr_t)q))) == (uintptr_t)q); \ +#define vm_page_queue_init(q) \ +MACRO_BEGIN \ + VM_ASSERT_POINTER_PACKABLE((vm_offset_t)(q), VM_PAGE_PACKED_PTR); \ (q)->next = VM_PAGE_PACK_PTR(q); \ (q)->prev = VM_PAGE_PACK_PTR(q); \ MACRO_END @@ -535,8 +663,8 @@ vm_page_queue_enter_clump( vm_page_queue_t head, vm_page_t elt) { - vm_page_queue_entry_t first; /* first page in the clump */ - vm_page_queue_entry_t last; /* last page in the clump */ + vm_page_queue_entry_t first = NULL; /* first page in the clump */ + vm_page_queue_entry_t last = NULL; /* last page in the clump */ vm_page_queue_entry_t prev = NULL; vm_page_queue_entry_t next; uint_t n_free = 1; @@ -866,18 +994,21 @@ MACRO_END !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \ (elt) = (vm_page_t)vm_page_queue_next(&(elt)->field)) \ -#else +#else // LP64 #define VM_VPLQ_ALIGNMENT 128 -#define VM_PACKED_POINTER_ALIGNMENT 4 -#define VM_PACKED_POINTER_SHIFT 0 +#define VM_PAGE_PACKED_PTR_ALIGNMENT sizeof(vm_offset_t) +#define VM_PAGE_PACKED_ALIGNED +#define VM_PAGE_PACKED_PTR_BITS 32 +#define VM_PAGE_PACKED_PTR_SHIFT 0 +#define VM_PAGE_PACKED_PTR_BASE 0 -#define VM_PACKED_FROM_VM_PAGES_ARRAY 0 +#define VM_PAGE_PACKED_FROM_ARRAY 0 #define VM_PAGE_PACK_PTR(p) (p) #define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p)) -#define VM_PAGE_OBJECT(p) (vm_object_t)(p->vmp_object) +#define VM_PAGE_OBJECT(p) ((vm_object_t)((p)->vmp_object)) #define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o))) @@ -904,7 +1035,7 @@ MACRO_END #define vm_page_queue_prev queue_prev #define vm_page_queue_iterate(h, e, f) queue_iterate(h, e, vm_page_t, f) -#endif +#endif // __LP64__ @@ -949,7 +1080,7 @@ struct vm_speculative_age_q { */ vm_page_queue_head_t age_q; mach_timespec_t age_ts; -} __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +} VM_PAGE_PACKED_ALIGNED; @@ -1042,6 +1173,10 @@ extern vm_map_size_t vm_per_task_user_wire_limit; extern vm_map_size_t vm_global_user_wire_limit; +extern +uint64_t vm_add_wire_count_over_global_limit; +extern +uint64_t vm_add_wire_count_over_user_limit; /* * Each pageable resident page falls into one of three lists: @@ -1080,16 +1215,8 @@ struct vpl { #endif }; -struct vplq { - union { - char cache_line_pad[VM_VPLQ_ALIGNMENT]; - struct vpl vpl; - } vpl_un; -}; -extern -unsigned int vm_page_local_q_count; extern -struct vplq *vm_page_local_q; +struct vpl * /* __zpercpu */ vm_page_local_q; extern unsigned int vm_page_local_q_soft_limit; extern @@ -1156,6 +1283,8 @@ extern unsigned int vm_page_active_count; /* How many pages are active? */ extern unsigned int vm_page_inactive_count; /* How many pages are inactive? */ +extern +unsigned int vm_page_kernelcache_count; /* How many pages are used for the kernelcache? */ #if CONFIG_SECLUDED_MEMORY extern unsigned int vm_page_secluded_count; /* How many pages are secluded? */ @@ -1288,9 +1417,7 @@ extern void vm_page_bootstrap( vm_offset_t *startp, vm_offset_t *endp); -extern void vm_page_module_init(void); - -extern void vm_page_init_local_q(void); +extern void vm_page_init_local_q(unsigned int num_cpus); extern void vm_page_create( ppnum_t start, @@ -1453,9 +1580,14 @@ extern void vm_set_page_size(void); extern void vm_page_gobble( vm_page_t page); -extern void vm_page_validate_cs(vm_page_t page); +extern void vm_page_validate_cs( + vm_page_t page, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset); extern void vm_page_validate_cs_mapped( vm_page_t page, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, const void *kaddr); extern void vm_page_validate_cs_mapped_slow( vm_page_t page, @@ -1673,11 +1805,18 @@ struct vm_page_delayed_work { int dw_mask; }; +#define DEFAULT_DELAYED_WORK_LIMIT 32 + +struct vm_page_delayed_work_ctx { + struct vm_page_delayed_work dwp[DEFAULT_DELAYED_WORK_LIMIT]; + thread_t delayed_owner; +}; + void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count); extern unsigned int vm_max_delayed_work_limit; -#define DEFAULT_DELAYED_WORK_LIMIT 32 +extern void vm_page_delayed_work_init_ctx(void); #define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit)) @@ -1716,6 +1855,7 @@ extern void vm_page_check_pageable_safe(vm_page_t page); #if CONFIG_SECLUDED_MEMORY extern uint64_t secluded_shutoff_trigger; +extern uint64_t secluded_shutoff_headroom; extern void start_secluded_suppression(task_t); extern void stop_secluded_suppression(task_t); #endif /* CONFIG_SECLUDED_MEMORY */ diff --git a/osfmk/vm/vm_pageout.c b/osfmk/vm/vm_pageout.c index 33344f15e..ac74045a0 100644 --- a/osfmk/vm/vm_pageout.c +++ b/osfmk/vm/vm_pageout.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -64,6 +64,7 @@ */ #include +#include #include #include @@ -88,6 +89,7 @@ #include #include #include +#include #include #include @@ -308,8 +310,6 @@ static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *, boolean_t extern void vm_pageout_continue(void); extern void vm_pageout_scan(void); -void vm_tests(void); /* forward */ - boolean_t vm_pageout_running = FALSE; uint32_t vm_page_upl_tainted = 0; @@ -327,8 +327,8 @@ struct vm_pageout_vminfo vm_pageout_vminfo; struct vm_pageout_state vm_pageout_state; struct vm_config vm_config; -struct vm_pageout_queue vm_pageout_queue_internal __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -struct vm_pageout_queue vm_pageout_queue_external __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +struct vm_pageout_queue vm_pageout_queue_internal VM_PAGE_PACKED_ALIGNED; +struct vm_pageout_queue vm_pageout_queue_external VM_PAGE_PACKED_ALIGNED; int vm_upl_wait_for_pages = 0; vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL; @@ -337,7 +337,7 @@ boolean_t(*volatile consider_buffer_cache_collect)(int) = NULL; int vm_debug_events = 0; -lck_grp_t vm_pageout_lck_grp; +LCK_GRP_DECLARE(vm_pageout_lck_grp, "vm_pageout"); #if CONFIG_MEMORYSTATUS extern boolean_t memorystatus_kill_on_VM_page_shortage(boolean_t async); @@ -350,7 +350,7 @@ uint32_t vm_pageout_memorystatus_fb_factor_dr = 2; #if __AMP__ int vm_compressor_ebound = 1; int vm_pgo_pbound = 0; -extern void thread_bind_cluster_type(char); +extern void thread_bind_cluster_type(thread_t, char, bool); #endif /* __AMP__ */ @@ -1545,7 +1545,8 @@ extern struct memory_object_pager_ops shared_region_pager_ops; void update_vm_info(void) { - uint64_t tmp; + unsigned long tmp; + uint64_t tmp64; vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count = vm_page_active_count; vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count = vm_page_speculative_count; @@ -1566,17 +1567,17 @@ update_vm_info(void) vm_pageout_stats[vm_pageout_stat_now].considered = (unsigned int)(tmp - last.vm_pageout_considered_page); last.vm_pageout_considered_page = tmp; - tmp = vm_pageout_vminfo.vm_pageout_compressions; - vm_pageout_stats[vm_pageout_stat_now].pages_compressed = (unsigned int)(tmp - last.vm_pageout_compressions); - last.vm_pageout_compressions = tmp; + tmp64 = vm_pageout_vminfo.vm_pageout_compressions; + vm_pageout_stats[vm_pageout_stat_now].pages_compressed = (unsigned int)(tmp64 - last.vm_pageout_compressions); + last.vm_pageout_compressions = tmp64; tmp = vm_pageout_vminfo.vm_compressor_failed; vm_pageout_stats[vm_pageout_stat_now].failed_compressions = (unsigned int)(tmp - last.vm_compressor_failed); last.vm_compressor_failed = tmp; - tmp = vm_pageout_vminfo.vm_compressor_pages_grabbed; - vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor = (unsigned int)(tmp - last.vm_compressor_pages_grabbed); - last.vm_compressor_pages_grabbed = tmp; + tmp64 = vm_pageout_vminfo.vm_compressor_pages_grabbed; + vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor = (unsigned int)(tmp64 - last.vm_compressor_pages_grabbed); + last.vm_compressor_pages_grabbed = tmp64; tmp = vm_pageout_vminfo.vm_phantom_cache_found_ghost; vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found = (unsigned int)(tmp - last.vm_phantom_cache_found_ghost); @@ -1586,9 +1587,9 @@ update_vm_info(void) vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added = (unsigned int)(tmp - last.vm_phantom_cache_added_ghost); last.vm_phantom_cache_added_ghost = tmp; - tmp = get_pages_grabbed_count(); - vm_pageout_stats[vm_pageout_stat_now].pages_grabbed = (unsigned int)(tmp - last_vm_page_pages_grabbed); - last_vm_page_pages_grabbed = tmp; + tmp64 = get_pages_grabbed_count(); + vm_pageout_stats[vm_pageout_stat_now].pages_grabbed = (unsigned int)(tmp64 - last_vm_page_pages_grabbed); + last_vm_page_pages_grabbed = tmp64; tmp = vm_pageout_vminfo.vm_page_pages_freed; vm_pageout_stats[vm_pageout_stat_now].pages_freed = (unsigned int)(tmp - last.vm_page_pages_freed); @@ -2330,7 +2331,7 @@ vps_flow_control(struct flow_control *flow_control, int *anons_grabbed, vm_objec */ static int vps_choose_victim_page(vm_page_t *victim_page, int *anons_grabbed, boolean_t *grab_anonymous, boolean_t force_anonymous, - boolean_t *is_page_from_bg_q, unsigned int reactivated_this_call) + boolean_t *is_page_from_bg_q, unsigned int *reactivated_this_call) { vm_page_t m = NULL; vm_object_t m_object = VM_OBJECT_NULL; @@ -2342,184 +2343,183 @@ vps_choose_victim_page(vm_page_t *victim_page, int *anons_grabbed, boolean_t *gr sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q]; iq = &vm_pageout_queue_internal; - while (1) { - *is_page_from_bg_q = FALSE; + *is_page_from_bg_q = FALSE; - m = NULL; - m_object = VM_OBJECT_NULL; + m = NULL; + m_object = VM_OBJECT_NULL; - if (VM_DYNAMIC_PAGING_ENABLED()) { - assert(vm_page_throttled_count == 0); - assert(vm_page_queue_empty(&vm_page_queue_throttled)); - } + if (VM_DYNAMIC_PAGING_ENABLED()) { + assert(vm_page_throttled_count == 0); + assert(vm_page_queue_empty(&vm_page_queue_throttled)); + } - /* - * Try for a clean-queue inactive page. - * These are pages that vm_pageout_scan tried to steal earlier, but - * were dirty and had to be cleaned. Pick them up now that they are clean. - */ - if (!vm_page_queue_empty(&vm_page_queue_cleaned)) { - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned); + /* + * Try for a clean-queue inactive page. + * These are pages that vm_pageout_scan tried to steal earlier, but + * were dirty and had to be cleaned. Pick them up now that they are clean. + */ + if (!vm_page_queue_empty(&vm_page_queue_cleaned)) { + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned); - assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q); + assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q); - break; - } + goto found_page; + } - /* - * The next most eligible pages are ones we paged in speculatively, - * but which have not yet been touched and have been aged out. - */ - if (!vm_page_queue_empty(&sq->age_q)) { - m = (vm_page_t) vm_page_queue_first(&sq->age_q); + /* + * The next most eligible pages are ones we paged in speculatively, + * but which have not yet been touched and have been aged out. + */ + if (!vm_page_queue_empty(&sq->age_q)) { + m = (vm_page_t) vm_page_queue_first(&sq->age_q); - assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q); + assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q); - if (!m->vmp_dirty || force_anonymous == FALSE) { - break; - } else { - m = NULL; - } + if (!m->vmp_dirty || force_anonymous == FALSE) { + goto found_page; + } else { + m = NULL; } + } #if CONFIG_BACKGROUND_QUEUE - if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) { - vm_object_t bg_m_object = NULL; + if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) { + vm_object_t bg_m_object = NULL; - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background); + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background); - bg_m_object = VM_PAGE_OBJECT(m); + bg_m_object = VM_PAGE_OBJECT(m); - if (!VM_PAGE_PAGEABLE(m)) { - /* - * This page is on the background queue - * but not on a pageable queue. This is - * likely a transient state and whoever - * took it out of its pageable queue - * will likely put it back on a pageable - * queue soon but we can't deal with it - * at this point, so let's ignore this - * page. - */ - } else if (force_anonymous == FALSE || bg_m_object->internal) { - if (bg_m_object->internal && - (VM_PAGE_Q_THROTTLED(iq) || - vm_compressor_out_of_space() == TRUE || - vm_page_free_count < (vm_page_free_reserved / 4))) { - vm_pageout_skipped_bq_internal++; - } else { - *is_page_from_bg_q = TRUE; + if (!VM_PAGE_PAGEABLE(m)) { + /* + * This page is on the background queue + * but not on a pageable queue. This is + * likely a transient state and whoever + * took it out of its pageable queue + * will likely put it back on a pageable + * queue soon but we can't deal with it + * at this point, so let's ignore this + * page. + */ + } else if (force_anonymous == FALSE || bg_m_object->internal) { + if (bg_m_object->internal && + (VM_PAGE_Q_THROTTLED(iq) || + vm_compressor_out_of_space() == TRUE || + vm_page_free_count < (vm_page_free_reserved / 4))) { + vm_pageout_skipped_bq_internal++; + } else { + *is_page_from_bg_q = TRUE; - if (bg_m_object->internal) { - vm_pageout_vminfo.vm_pageout_considered_bq_internal++; - } else { - vm_pageout_vminfo.vm_pageout_considered_bq_external++; - } - break; + if (bg_m_object->internal) { + vm_pageout_vminfo.vm_pageout_considered_bq_internal++; + } else { + vm_pageout_vminfo.vm_pageout_considered_bq_external++; } + goto found_page; } } + } #endif /* CONFIG_BACKGROUND_QUEUE */ - inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count; + inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count; - if ((vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min || force_anonymous == TRUE) || - (inactive_external_count < VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) { - *grab_anonymous = TRUE; - *anons_grabbed = 0; + if ((vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min || force_anonymous == TRUE) || + (inactive_external_count < VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) { + *grab_anonymous = TRUE; + *anons_grabbed = 0; - vm_pageout_vminfo.vm_pageout_skipped_external++; - goto want_anonymous; - } - *grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min); + vm_pageout_vminfo.vm_pageout_skipped_external++; + goto want_anonymous; + } + *grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min); #if CONFIG_JETSAM - /* If the file-backed pool has accumulated - * significantly more pages than the jetsam - * threshold, prefer to reclaim those - * inline to minimise compute overhead of reclaiming - * anonymous pages. - * This calculation does not account for the CPU local - * external page queues, as those are expected to be - * much smaller relative to the global pools. - */ + /* If the file-backed pool has accumulated + * significantly more pages than the jetsam + * threshold, prefer to reclaim those + * inline to minimise compute overhead of reclaiming + * anonymous pages. + * This calculation does not account for the CPU local + * external page queues, as those are expected to be + * much smaller relative to the global pools. + */ - struct vm_pageout_queue *eq = &vm_pageout_queue_external; + struct vm_pageout_queue *eq = &vm_pageout_queue_external; - if (*grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) { - if (vm_page_pageable_external_count > - vm_pageout_state.vm_page_filecache_min) { - if ((vm_page_pageable_external_count * - vm_pageout_memorystatus_fb_factor_dr) > - (memorystatus_available_pages_critical * - vm_pageout_memorystatus_fb_factor_nr)) { - *grab_anonymous = FALSE; + if (*grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) { + if (vm_page_pageable_external_count > + vm_pageout_state.vm_page_filecache_min) { + if ((vm_page_pageable_external_count * + vm_pageout_memorystatus_fb_factor_dr) > + (memorystatus_available_pages_critical * + vm_pageout_memorystatus_fb_factor_nr)) { + *grab_anonymous = FALSE; - VM_PAGEOUT_DEBUG(vm_grab_anon_overrides, 1); - } - } - if (*grab_anonymous) { - VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1); + VM_PAGEOUT_DEBUG(vm_grab_anon_overrides, 1); } } + if (*grab_anonymous) { + VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1); + } + } #endif /* CONFIG_JETSAM */ want_anonymous: - if (*grab_anonymous == FALSE || *anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) { - if (!vm_page_queue_empty(&vm_page_queue_inactive)) { - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); + if (*grab_anonymous == FALSE || *anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) { + if (!vm_page_queue_empty(&vm_page_queue_inactive)) { + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); - assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q); - *anons_grabbed = 0; + assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q); + *anons_grabbed = 0; - if (vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min) { - if (!vm_page_queue_empty(&vm_page_queue_anonymous)) { - if ((++reactivated_this_call % 100)) { - vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++; + if (vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min) { + if (!vm_page_queue_empty(&vm_page_queue_anonymous)) { + if ((++(*reactivated_this_call) % 100)) { + vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++; - vm_page_activate(m); - VM_STAT_INCR(reactivations); + vm_page_activate(m); + VM_STAT_INCR(reactivations); #if CONFIG_BACKGROUND_QUEUE #if DEVELOPMENT || DEBUG - if (*is_page_from_bg_q == TRUE) { - if (m_object->internal) { - vm_pageout_rejected_bq_internal++; - } else { - vm_pageout_rejected_bq_external++; - } + if (*is_page_from_bg_q == TRUE) { + if (m_object->internal) { + vm_pageout_rejected_bq_internal++; + } else { + vm_pageout_rejected_bq_external++; } + } #endif /* DEVELOPMENT || DEBUG */ #endif /* CONFIG_BACKGROUND_QUEUE */ - vm_pageout_state.vm_pageout_inactive_used++; - - m = NULL; - retval = VM_PAGEOUT_SCAN_NEXT_ITERATION; + vm_pageout_state.vm_pageout_inactive_used++; - break; - } + m = NULL; + retval = VM_PAGEOUT_SCAN_NEXT_ITERATION; - /* - * steal 1% of the file backed pages even if - * we are under the limit that has been set - * for a healthy filecache - */ + goto found_page; } + + /* + * steal 1 of the file backed pages even if + * we are under the limit that has been set + * for a healthy filecache + */ } - break; } + goto found_page; } - if (!vm_page_queue_empty(&vm_page_queue_anonymous)) { - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous); - - assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q); - *anons_grabbed += 1; + } + if (!vm_page_queue_empty(&vm_page_queue_anonymous)) { + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous); - break; - } + assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q); + *anons_grabbed += 1; - m = NULL; + goto found_page; } + m = NULL; + +found_page: *victim_page = m; return retval; @@ -2774,7 +2774,7 @@ vm_page_balance_inactive(int max_to_move) LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - if (hibernation_vmqueues_inspection == TRUE) { + if (hibernation_vmqueues_inspection || hibernate_cleaning_in_progress) { /* * It is likely that the hibernation code path is * dealing with these very queues as we are about @@ -3101,12 +3101,10 @@ return_from_scan: */ m = NULL; - retval = vps_choose_victim_page(&m, &anons_grabbed, &grab_anonymous, force_anonymous, &page_from_bg_q, reactivated_this_call); + retval = vps_choose_victim_page(&m, &anons_grabbed, &grab_anonymous, force_anonymous, &page_from_bg_q, &reactivated_this_call); if (m == NULL) { if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) { - reactivated_this_call++; - inactive_burst_count = 0; if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { @@ -4278,9 +4276,8 @@ vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpr proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid, TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy); - eq->pgo_lowpriority = req_lowpriority; - vm_page_lock_queues(); + eq->pgo_lowpriority = req_lowpriority; } } @@ -4330,10 +4327,17 @@ vm_pageout_iothread_internal(struct cq *cq) thread_vm_bind_group_add(); } +#if CONFIG_THREAD_GROUPS + thread_group_vm_add(); +#endif /* CONFIG_THREAD_GROUPS */ #if __AMP__ if (vm_compressor_ebound) { - thread_bind_cluster_type('E'); + /* + * Use the soft bound option for vm_compressor to allow it to run on + * P-cores if E-cluster is unavailable. + */ + thread_bind_cluster_type(self, 'E', true); } #endif /* __AMP__ */ @@ -4349,7 +4353,7 @@ vm_pageout_iothread_internal(struct cq *cq) kern_return_t vm_set_buffer_cleanup_callout(boolean_t (*func)(int)) { - if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) { + if (OSCompareAndSwapPtr(NULL, ptrauth_nop_cast(void *, func), (void * volatile *) &consider_buffer_cache_collect)) { return KERN_SUCCESS; } else { return KERN_FAILURE; /* Already set */ @@ -4570,12 +4574,10 @@ compute_pageout_gc_throttle(__unused void *arg) * * 2. The jetsam path might need to allocate zone memory itself. We could try * using the non-blocking variant of zalloc for this path, but we can still - * end up trying to do a kernel_memory_allocate when the zone_map is almost + * end up trying to do a kernel_memory_allocate when the zone maps are almost * full. */ -extern boolean_t is_zone_map_nearing_exhaustion(void); - void vm_pageout_garbage_collect(int collect) { @@ -4643,7 +4645,7 @@ extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end; void -vm_set_restrictions() +vm_set_restrictions(unsigned int num_cpus) { int vm_restricted_to_single_processor = 0; @@ -4651,15 +4653,9 @@ vm_set_restrictions() kprintf("Overriding vm_restricted_to_single_processor to %d\n", vm_restricted_to_single_processor); vm_pageout_state.vm_restricted_to_single_processor = (vm_restricted_to_single_processor ? TRUE : FALSE); } else { - host_basic_info_data_t hinfo; - mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; + assert(num_cpus > 0); -#define BSD_HOST 1 - host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count); - - assert(hinfo.max_cpus > 0); - - if (hinfo.max_cpus <= 3) { + if (num_cpus <= 3) { /* * on systems with a limited number of CPUS, bind the * 4 major threads that can free memory and that tend to use @@ -4741,11 +4737,18 @@ vm_pageout(void) } +#if CONFIG_THREAD_GROUPS + thread_group_vm_add(); +#endif /* CONFIG_THREAD_GROUPS */ #if __AMP__ PE_parse_boot_argn("vmpgo_pcluster", &vm_pgo_pbound, sizeof(vm_pgo_pbound)); if (vm_pgo_pbound) { - thread_bind_cluster_type('P'); + /* + * Use the soft bound option for vm pageout to allow it to run on + * E-cores if P-cluster is unavailable. + */ + thread_bind_cluster_type(self, 'P', true); } #endif /* __AMP__ */ @@ -4898,6 +4901,7 @@ vm_pageout(void) switch (vm_compressor_mode) { case VM_PAGER_DEFAULT: printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n"); + OS_FALLTHROUGH; case VM_PAGER_COMPRESSOR_WITH_SWAP: vm_config.compressor_is_present = TRUE; @@ -4914,6 +4918,7 @@ vm_pageout(void) case VM_PAGER_FREEZER_DEFAULT: printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n"); + OS_FALLTHROUGH; case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP: vm_config.compressor_is_present = TRUE; @@ -4962,8 +4967,6 @@ vm_pageout(void) vm_object_tracking_init(); #endif /* VM_OBJECT_TRACKING */ - vm_tests(); - vm_pageout_continue(); /* @@ -4996,8 +4999,8 @@ kern_return_t vm_pageout_internal_start(void) { kern_return_t result; - int i; host_basic_info_data_t hinfo; + vm_offset_t buf, bufsize; assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); @@ -5007,8 +5010,6 @@ vm_pageout_internal_start(void) assert(hinfo.max_cpus > 0); - lck_grp_init(&vm_pageout_lck_grp, "vm_pageout", LCK_GRP_ATTR_NULL); - #if CONFIG_EMBEDDED vm_pageout_state.vm_compressor_thread_count = 1; #else @@ -5036,18 +5037,30 @@ vm_pageout_internal_start(void) vm_pageout_state.vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT; } - vm_pageout_queue_internal.pgo_maxlaundry = (vm_pageout_state.vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX; + vm_pageout_queue_internal.pgo_maxlaundry = + (vm_pageout_state.vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX; - PE_parse_boot_argn("vmpgoi_maxlaundry", &vm_pageout_queue_internal.pgo_maxlaundry, sizeof(vm_pageout_queue_internal.pgo_maxlaundry)); + PE_parse_boot_argn("vmpgoi_maxlaundry", + &vm_pageout_queue_internal.pgo_maxlaundry, + sizeof(vm_pageout_queue_internal.pgo_maxlaundry)); - for (i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) { + bufsize = COMPRESSOR_SCRATCH_BUF_SIZE; + if (kernel_memory_allocate(kernel_map, &buf, + bufsize * vm_pageout_state.vm_compressor_thread_count, + 0, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR)) { + panic("vm_pageout_internal_start: Unable to allocate %zd bytes", + (size_t)(bufsize * vm_pageout_state.vm_compressor_thread_count)); + } + + for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) { ciq[i].id = i; ciq[i].q = &vm_pageout_queue_internal; ciq[i].current_chead = NULL; - ciq[i].scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE); + ciq[i].scratch_buf = (char *)(buf + i * bufsize); - result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, (void *)&ciq[i], - BASEPRI_VM, &vm_pageout_state.vm_pageout_internal_iothread); + result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, + (void *)&ciq[i], BASEPRI_VM, + &vm_pageout_state.vm_pageout_internal_iothread); if (result == KERN_SUCCESS) { thread_deallocate(vm_pageout_state.vm_pageout_internal_iothread); @@ -5111,6 +5124,8 @@ upl_create(int type, int flags, upl_size_t size) int upl_flags = 0; vm_size_t upl_size = sizeof(struct upl); + assert(page_aligned(size)); + size = round_page_32(size); if (type & UPL_CREATE_LITE) { @@ -5132,7 +5147,8 @@ upl_create(int type, int flags, upl_size_t size) upl->flags = upl_flags | flags; upl->kaddr = (vm_offset_t)0; - upl->size = 0; + upl->u_offset = 0; + upl->u_size = 0; upl->map_object = NULL; upl->ref_count = 1; upl->ext_ref_count = 0; @@ -5188,6 +5204,8 @@ upl_destroy(upl_t upl) int page_field_size; /* bit field in word size buf */ int size; +// DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object); + if (upl->ext_ref_count) { panic("upl(%p) ext_ref_count", upl); } @@ -5205,7 +5223,8 @@ upl_destroy(upl_t upl) #endif /* CONFIG_IOSCHED */ #if CONFIG_IOSCHED || UPL_DEBUG - if ((upl->flags & UPL_TRACKED_BY_OBJECT) && !(upl->flags & UPL_VECTOR)) { + if (((upl->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) && + !(upl->flags & UPL_VECTOR)) { vm_object_t object; if (upl->flags & UPL_SHADOWED) { @@ -5232,7 +5251,7 @@ upl_destroy(upl_t upl) if (upl->flags & UPL_DEVICE_MEMORY) { size = PAGE_SIZE; } else { - size = upl->size; + size = upl_adjusted_size(upl, PAGE_MASK); } page_field_size = 0; @@ -5316,6 +5335,58 @@ must_throttle_writes() return FALSE; } +#define MIN_DELAYED_WORK_CTX_ALLOCATED (16) +#define MAX_DELAYED_WORK_CTX_ALLOCATED (512) + +int vm_page_delayed_work_ctx_needed = 0; +zone_t dw_ctx_zone = ZONE_NULL; + +void +vm_page_delayed_work_init_ctx(void) +{ + int nelems = 0, elem_size = 0; + + elem_size = sizeof(struct vm_page_delayed_work_ctx); + + dw_ctx_zone = zone_create_ext("delayed-work-ctx", elem_size, + ZC_NOGC, ZONE_ID_ANY, ^(zone_t z) { + zone_set_exhaustible(z, MAX_DELAYED_WORK_CTX_ALLOCATED * elem_size); + }); + + nelems = zfill(dw_ctx_zone, MIN_DELAYED_WORK_CTX_ALLOCATED); + if (nelems < MIN_DELAYED_WORK_CTX_ALLOCATED) { + printf("vm_page_delayed_work_init_ctx: Failed to preallocate minimum delayed work contexts (%d vs %d).\n", nelems, MIN_DELAYED_WORK_CTX_ALLOCATED); +#if DEVELOPMENT || DEBUG + panic("Failed to preallocate minimum delayed work contexts (%d vs %d).\n", nelems, MIN_DELAYED_WORK_CTX_ALLOCATED); +#endif /* DEVELOPMENT || DEBUG */ + } +} + +struct vm_page_delayed_work* +vm_page_delayed_work_get_ctx(void) +{ + struct vm_page_delayed_work_ctx * dw_ctx = NULL; + + dw_ctx = (struct vm_page_delayed_work_ctx*) zalloc_noblock(dw_ctx_zone); + + if (dw_ctx) { + dw_ctx->delayed_owner = current_thread(); + } else { + vm_page_delayed_work_ctx_needed++; + } + return dw_ctx ? dw_ctx->dwp : NULL; +} + +void +vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp) +{ + struct vm_page_delayed_work_ctx *ldw_ctx; + + ldw_ctx = (struct vm_page_delayed_work_ctx *)dwp; + ldw_ctx->delayed_owner = NULL; + + zfree(dw_ctx_zone, ldw_ctx); +} /* * Routine: vm_object_upl_request @@ -5384,8 +5455,9 @@ vm_object_upl_request( int refmod_state = 0; wpl_array_t lite_list = NULL; vm_object_t last_copy_object; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; + struct vm_page_delayed_work dw_array; + struct vm_page_delayed_work *dwp, *dwp_start; + bool dwp_finish_ctx = TRUE; int dw_count; int dw_limit; int io_tracking_flag = 0; @@ -5398,6 +5470,8 @@ vm_object_upl_request( task_t task = current_task(); #endif /* DEVELOPMENT || DEBUG */ + dwp_start = dwp = NULL; + if (cntrl_flags & ~UPL_VALID_FLAGS) { /* * For forward compatibility's sake, @@ -5412,8 +5486,23 @@ vm_object_upl_request( panic("vm_object_upl_request: contiguous object specified\n"); } + assertf(page_aligned(offset) && page_aligned(size), + "offset 0x%llx size 0x%x", + offset, size); + VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, 0, 0); + dw_count = 0; + dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); + dwp_start = vm_page_delayed_work_get_ctx(); + if (dwp_start == NULL) { + dwp_start = &dw_array; + dw_limit = 1; + dwp_finish_ctx = FALSE; + } + + dwp = dwp_start; + if (size > MAX_UPL_SIZE_BYTES) { size = MAX_UPL_SIZE_BYTES; } @@ -5485,6 +5574,9 @@ vm_object_upl_request( upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; upl->map_object->vo_shadow_offset = offset; upl->map_object->wimg_bits = object->wimg_bits; + assertf(page_aligned(upl->map_object->vo_shadow_offset), + "object %p shadow_offset 0x%llx", + upl->map_object, upl->map_object->vo_shadow_offset); VM_PAGE_GRAB_FICTITIOUS(alias_page); @@ -5507,8 +5599,8 @@ vm_object_upl_request( /* * we can lock in the paging_offset once paging_in_progress is set */ - upl->size = size; - upl->offset = offset + object->paging_offset; + upl->u_size = size; + upl->u_offset = offset + object->paging_offset; #if CONFIG_IOSCHED || UPL_DEBUG if (object->io_tracking || upl_debug_enabled) { @@ -5548,10 +5640,6 @@ vm_object_upl_request( dst_offset = offset; size_in_pages = size / PAGE_SIZE; - dwp = &dw_array[0]; - dw_count = 0; - dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); - if (vm_page_free_count > (vm_page_free_target + size_in_pages) || object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT)) { object->scan_collisions = 0; @@ -6008,8 +6096,8 @@ check_busy: * all pmaps so processes have to fault it back in and * deal with the tainted bit. */ - if (object->code_signed && dst_page->vmp_cs_tainted == FALSE) { - dst_page->vmp_cs_tainted = TRUE; + if (object->code_signed && dst_page->vmp_cs_tainted != VMP_CS_ALL_TRUE) { + dst_page->vmp_cs_tainted = VMP_CS_ALL_TRUE; vm_page_upl_tainted++; if (dst_page->vmp_pmapped) { refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); @@ -6119,9 +6207,9 @@ try_next_page: VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count); if (dw_count >= dw_limit) { - vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, tag, dwp_start, dw_count); - dwp = &dw_array[0]; + dwp = dwp_start; dw_count = 0; } } @@ -6130,7 +6218,9 @@ try_next_page: xfer_size -= PAGE_SIZE; } if (dw_count) { - vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, tag, dwp_start, dw_count); + dwp = dwp_start; + dw_count = 0; } if (alias_page != NULL) { @@ -6159,6 +6249,11 @@ try_next_page: } #endif /* DEVELOPMENT || DEBUG */ + if (dwp_start && dwp_finish_ctx) { + vm_page_delayed_work_finish_ctx(dwp_start); + dwp_start = dwp = NULL; + } + return KERN_SUCCESS; } @@ -6226,11 +6321,9 @@ vm_object_super_upl_request( return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags, tag); } -#if CONFIG_EMBEDDED int cs_executable_create_upl = 0; extern int proc_selfpid(void); extern char *proc_name_address(void *p); -#endif /* CONFIG_EMBEDDED */ kern_return_t vm_map_create_upl( @@ -6251,8 +6344,18 @@ vm_map_create_upl( vm_map_offset_t local_offset; vm_map_offset_t local_start; kern_return_t ret; + vm_map_address_t original_offset; + vm_map_size_t original_size, adjusted_size; + vm_map_offset_t local_entry_start; + vm_object_offset_t local_entry_offset; + vm_object_offset_t offset_in_mapped_page; + boolean_t release_map = FALSE; + +start_with_map: - assert(page_aligned(offset)); + original_offset = offset; + original_size = *upl_size; + adjusted_size = original_size; caller_flags = *flags; @@ -6261,13 +6364,15 @@ vm_map_create_upl( * For forward compatibility's sake, * reject any unknown flag. */ - return KERN_INVALID_VALUE; + ret = KERN_INVALID_VALUE; + goto done; } force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC); sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM); if (upl == NULL) { - return KERN_INVALID_ARGUMENT; + ret = KERN_INVALID_ARGUMENT; + goto done; } REDISCOVER_ENTRY: @@ -6275,12 +6380,22 @@ REDISCOVER_ENTRY: if (!vm_map_lookup_entry(map, offset, &entry)) { vm_map_unlock_read(map); - return KERN_FAILURE; + ret = KERN_FAILURE; + goto done; } - if ((entry->vme_end - offset) < *upl_size) { - *upl_size = (upl_size_t) (entry->vme_end - offset); - assert(*upl_size == entry->vme_end - offset); + local_entry_start = entry->vme_start; + local_entry_offset = VME_OFFSET(entry); + + if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) { + DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%x flags 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)offset, *upl_size, *flags); + } + + if (entry->vme_end - original_offset < adjusted_size) { + adjusted_size = entry->vme_end - original_offset; + assert(adjusted_size > 0); + *upl_size = (upl_size_t) adjusted_size; + assert(*upl_size == adjusted_size); } if (caller_flags & UPL_QUERY_OBJECT_TYPE) { @@ -6297,7 +6412,22 @@ REDISCOVER_ENTRY: } } vm_map_unlock_read(map); - return KERN_SUCCESS; + ret = KERN_SUCCESS; + goto done; + } + + offset_in_mapped_page = 0; + if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) { + offset = vm_map_trunc_page(original_offset, VM_MAP_PAGE_MASK(map)); + *upl_size = (upl_size_t) + (vm_map_round_page(original_offset + adjusted_size, + VM_MAP_PAGE_MASK(map)) + - offset); + + offset_in_mapped_page = original_offset - offset; + assert(offset_in_mapped_page < VM_MAP_PAGE_SIZE(map)); + + DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%llx flags 0x%llx -> offset 0x%llx adjusted_size 0x%llx *upl_size 0x%x offset_in_mapped_page 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)original_offset, (uint64_t)original_size, *flags, (uint64_t)offset, (uint64_t)adjusted_size, *upl_size, offset_in_mapped_page); } if (VME_OBJECT(entry) == VM_OBJECT_NULL || @@ -6317,8 +6447,7 @@ REDISCOVER_ENTRY: VME_OBJECT_SET(entry, vm_object_allocate((vm_size_t) - (entry->vme_end - - entry->vme_start))); + vm_object_round_page((entry->vme_end - entry->vme_start)))); VME_OFFSET_SET(entry, 0); assert(entry->use_pmap); @@ -6329,7 +6458,8 @@ REDISCOVER_ENTRY: !entry->is_sub_map && !(entry->protection & VM_PROT_WRITE)) { vm_map_unlock_read(map); - return KERN_PROTECTION_FAILURE; + ret = KERN_PROTECTION_FAILURE; + goto done; } #if CONFIG_EMBEDDED @@ -6354,6 +6484,7 @@ REDISCOVER_ENTRY: */ vm_map_unlock_read(map); + entry = VM_MAP_ENTRY_NULL; /* allocate kernel buffer */ ksize = round_page(*upl_size); kaddr = 0; @@ -6363,7 +6494,6 @@ REDISCOVER_ENTRY: tag); if (ret == KERN_SUCCESS) { /* copyin the user data */ - assert(page_aligned(offset)); ret = copyinmap(map, offset, (void *)kaddr, *upl_size); } if (ret == KERN_SUCCESS) { @@ -6374,8 +6504,17 @@ REDISCOVER_ENTRY: ksize - *upl_size); } /* create the UPL from the kernel buffer */ - ret = vm_map_create_upl(kernel_map, kaddr, upl_size, - upl, page_list, count, flags, tag); + vm_object_offset_t offset_in_object; + vm_object_offset_t offset_in_object_page; + + offset_in_object = offset - local_entry_start + local_entry_offset; + offset_in_object_page = offset_in_object - vm_object_trunc_page(offset_in_object); + assert(offset_in_object_page < PAGE_SIZE); + assert(offset_in_object_page + offset_in_mapped_page < PAGE_SIZE); + *upl_size -= offset_in_object_page + offset_in_mapped_page; + ret = vm_map_create_upl(kernel_map, + (vm_map_address_t)(kaddr + offset_in_object_page + offset_in_mapped_page), + upl_size, upl, page_list, count, flags, tag); } if (kaddr != 0) { /* free the kernel buffer */ @@ -6390,7 +6529,7 @@ REDISCOVER_ENTRY: upl_size_t, *upl_size, kern_return_t, ret); #endif /* DEVELOPMENT || DEBUG */ - return ret; + goto done; } #endif /* CONFIG_EMBEDDED */ @@ -6457,6 +6596,7 @@ REDISCOVER_ENTRY: map->mapped_in_other_pmaps) ? PMAP_NULL : map->pmap), + VM_MAP_PAGE_SIZE(map), entry->vme_start, prot); @@ -6497,6 +6637,24 @@ REDISCOVER_ENTRY: vm_map_t real_map; vm_prot_t fault_type; + if (entry->vme_start < VM_MAP_TRUNC_PAGE(offset, VM_MAP_PAGE_MASK(map)) || + entry->vme_end > VM_MAP_ROUND_PAGE(offset + *upl_size, VM_MAP_PAGE_MASK(map))) { + /* + * Clip the requested range first to minimize the + * amount of potential copying... + */ + if (vm_map_lock_read_to_write(map)) { + goto REDISCOVER_ENTRY; + } + vm_map_lock_assert_exclusive(map); + assert(VME_OBJECT(entry) == local_object); + vm_map_clip_start(map, entry, + VM_MAP_TRUNC_PAGE(offset, VM_MAP_PAGE_MASK(map))); + vm_map_clip_end(map, entry, + VM_MAP_ROUND_PAGE(offset + *upl_size, VM_MAP_PAGE_MASK(map))); + vm_map_lock_write_to_read(map); + } + local_map = map; if (caller_flags & UPL_COPYOUT_FROM) { @@ -6513,16 +6671,17 @@ REDISCOVER_ENTRY: &version, &object, &new_offset, &prot, &wired, NULL, - &real_map) != KERN_SUCCESS) { + &real_map, NULL) != KERN_SUCCESS) { if (fault_type == VM_PROT_WRITE) { vm_counters.create_upl_lookup_failure_write++; } else { vm_counters.create_upl_lookup_failure_copy++; } vm_map_unlock_read(local_map); - return KERN_FAILURE; + ret = KERN_FAILURE; + goto done; } - if (real_map != map) { + if (real_map != local_map) { vm_map_unlock(real_map); } vm_map_unlock_read(local_map); @@ -6537,17 +6696,22 @@ REDISCOVER_ENTRY: submap = VME_SUBMAP(entry); local_start = entry->vme_start; - local_offset = VME_OFFSET(entry); + local_offset = (vm_map_offset_t)VME_OFFSET(entry); vm_map_reference(submap); vm_map_unlock_read(map); - ret = vm_map_create_upl(submap, - local_offset + (offset - local_start), - upl_size, upl, page_list, count, flags, tag); - vm_map_deallocate(submap); + DEBUG4K_UPL("map %p offset 0x%llx (0x%llx) size 0x%x (adjusted 0x%llx original 0x%llx) offset_in_mapped_page 0x%llx submap %p\n", map, (uint64_t)offset, (uint64_t)original_offset, *upl_size, (uint64_t)adjusted_size, (uint64_t)original_size, offset_in_mapped_page, submap); + offset += offset_in_mapped_page; + *upl_size -= offset_in_mapped_page; - return ret; + if (release_map) { + vm_map_deallocate(map); + } + map = submap; + release_map = TRUE; + offset = local_offset + (offset - local_start); + goto start_with_map; } if (sync_cow_data && @@ -6555,7 +6719,7 @@ REDISCOVER_ENTRY: VME_OBJECT(entry)->copy)) { local_object = VME_OBJECT(entry); local_start = entry->vme_start; - local_offset = VME_OFFSET(entry); + local_offset = (vm_map_offset_t)VME_OFFSET(entry); vm_object_reference(local_object); vm_map_unlock_read(map); @@ -6578,7 +6742,7 @@ REDISCOVER_ENTRY: if (force_data_sync) { local_object = VME_OBJECT(entry); local_start = entry->vme_start; - local_offset = VME_OFFSET(entry); + local_offset = (vm_map_offset_t)VME_OFFSET(entry); vm_object_reference(local_object); vm_map_unlock_read(map); @@ -6608,10 +6772,9 @@ REDISCOVER_ENTRY: } local_object = VME_OBJECT(entry); - local_offset = VME_OFFSET(entry); + local_offset = (vm_map_offset_t)VME_OFFSET(entry); local_start = entry->vme_start; -#if CONFIG_EMBEDDED /* * Wiring will copy the pages to the shadow object. * The shadow object will not be code-signed so @@ -6635,7 +6798,6 @@ REDISCOVER_ENTRY: uint64_t, (uint64_t)entry->vme_end); cs_executable_create_upl++; } -#endif /* CONFIG_EMBEDDED */ vm_object_lock(local_object); @@ -6678,6 +6840,10 @@ REDISCOVER_ENTRY: vm_map_unlock_read(map); + offset += offset_in_mapped_page; + assert(*upl_size > offset_in_mapped_page); + *upl_size -= offset_in_mapped_page; + ret = vm_object_iopl_request(local_object, ((vm_object_offset_t) ((offset - local_start) + local_offset)), @@ -6689,6 +6855,11 @@ REDISCOVER_ENTRY: tag); vm_object_deallocate(local_object); +done: + if (release_map) { + vm_map_deallocate(map); + } + return ret; } @@ -6720,6 +6891,9 @@ vm_map_enter_upl( return KERN_INVALID_ARGUMENT; } + DEBUG4K_UPL("map %p upl %p flags 0x%x object %p offset 0x%llx size 0x%x \n", map, upl, upl->flags, upl->map_object, upl->u_offset, upl->u_size); + assert(map == kernel_map); + if ((isVectorUPL = vector_upl_is_valid(upl))) { int mapped = 0, valid_upls = 0; vector_upl = upl; @@ -6745,7 +6919,13 @@ vm_map_enter_upl( } } - kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE, + if (VM_MAP_PAGE_MASK(map) < PAGE_MASK) { + panic("TODO4K: vector UPL not implemented"); + } + + kr = kmem_suballoc(map, &vector_upl_dst_addr, + vector_upl->u_size, + FALSE, VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, &vector_upl_submap); if (kr != KERN_SUCCESS) { @@ -6781,6 +6961,9 @@ process_upl_to_enter: return KERN_FAILURE; } } + + size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map)); + if ((!(upl->flags & UPL_SHADOWED)) && ((upl->flags & UPL_HAS_BUSY) || !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) { @@ -6793,12 +6976,12 @@ process_upl_to_enter: if (upl->flags & UPL_INTERNAL) { lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl)) - + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t))); + + ((size / PAGE_SIZE) * sizeof(upl_page_info_t))); } else { lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl)); } object = upl->map_object; - upl->map_object = vm_object_allocate(upl->size); + upl->map_object = vm_object_allocate(vm_object_round_page(size)); vm_object_lock(upl->map_object); @@ -6806,11 +6989,18 @@ process_upl_to_enter: upl->map_object->pageout = TRUE; upl->map_object->can_persist = FALSE; upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; - upl->map_object->vo_shadow_offset = upl->offset - object->paging_offset; + upl->map_object->vo_shadow_offset = upl_adjusted_offset(upl, PAGE_MASK) - object->paging_offset; + assertf(page_aligned(upl->map_object->vo_shadow_offset), + "object %p shadow_offset 0x%llx", + upl->map_object, + (uint64_t)upl->map_object->vo_shadow_offset); upl->map_object->wimg_bits = object->wimg_bits; + assertf(page_aligned(upl->map_object->vo_shadow_offset), + "object %p shadow_offset 0x%llx", + upl->map_object, upl->map_object->vo_shadow_offset); offset = upl->map_object->vo_shadow_offset; new_offset = 0; - size = upl->size; + size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map)); upl->flags |= UPL_SHADOWED; @@ -6865,10 +7055,10 @@ process_upl_to_enter: if (upl->flags & UPL_SHADOWED) { offset = 0; } else { - offset = upl->offset - upl->map_object->paging_offset; + offset = upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)) + upl->map_object->paging_offset; } - size = upl->size; + size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map)); vm_object_reference(upl->map_object); @@ -6933,6 +7123,17 @@ process_upl_to_enter: goto process_upl_to_enter; } + if (!isVectorUPL) { + vm_map_offset_t addr_adjustment; + + addr_adjustment = (vm_map_offset_t)(upl->u_offset - upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map))); + if (addr_adjustment) { + assert(VM_MAP_PAGE_MASK(map) != PAGE_MASK); + DEBUG4K_UPL("dst_addr 0x%llx (+ 0x%llx) -> 0x%llx\n", (uint64_t)*dst_addr, (uint64_t)addr_adjustment, (uint64_t)(*dst_addr + addr_adjustment)); + *dst_addr += addr_adjustment; + } + } + upl_unlock(upl); return KERN_SUCCESS; @@ -6997,7 +7198,9 @@ process_upl_to_remove: vm_offset_t v_upl_submap_dst_addr; vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr); - vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_REMOVE_NO_FLAGS); + vm_map_remove(map, v_upl_submap_dst_addr, + v_upl_submap_dst_addr + vector_upl->u_size, + VM_MAP_REMOVE_NO_FLAGS); vm_map_deallocate(v_upl_submap); upl_unlock(vector_upl); return KERN_SUCCESS; @@ -7011,7 +7214,7 @@ process_upl_to_remove: if (upl->flags & UPL_PAGE_LIST_MAPPED) { addr = upl->kaddr; - size = upl->size; + size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map)); assert(upl->ref_count > 1); upl->ref_count--; /* removing mapping ref */ @@ -7055,7 +7258,7 @@ upl_commit_range( mach_msg_type_number_t count, boolean_t *empty) { - upl_size_t xfer_size, subupl_size = size; + upl_size_t xfer_size, subupl_size; vm_object_t shadow_object; vm_object_t object; vm_object_t m_object; @@ -7066,8 +7269,9 @@ upl_commit_range( int occupied; int clear_refmod = 0; int pgpgout_count = 0; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; + struct vm_page_delayed_work dw_array; + struct vm_page_delayed_work *dwp, *dwp_start; + bool dwp_finish_ctx = TRUE; int dw_count; int dw_limit; int isVectorUPL = 0; @@ -7081,13 +7285,31 @@ upl_commit_range( int unwired_count = 0; int local_queue_count = 0; vm_page_t first_local, last_local; + vm_object_offset_t obj_start, obj_end, obj_offset; + kern_return_t kr = KERN_SUCCESS; + +// DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx flags 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, flags); + + dwp_start = dwp = NULL; + subupl_size = size; *empty = FALSE; if (upl == UPL_NULL) { return KERN_INVALID_ARGUMENT; } + dw_count = 0; + dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); + dwp_start = vm_page_delayed_work_get_ctx(); + if (dwp_start == NULL) { + dwp_start = &dw_array; + dw_limit = 1; + dwp_finish_ctx = FALSE; + } + + dwp = dwp_start; + if (count == 0) { page_list = NULL; } @@ -7106,12 +7328,14 @@ process_upl_to_commit: offset = subupl_offset; if (size == 0) { upl_unlock(vector_upl); - return KERN_SUCCESS; + kr = KERN_SUCCESS; + goto done; } upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size); if (upl == NULL) { upl_unlock(vector_upl); - return KERN_FAILURE; + kr = KERN_FAILURE; + goto done; } page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl); subupl_size -= size; @@ -7130,7 +7354,7 @@ process_upl_to_commit: #endif if (upl->flags & UPL_DEVICE_MEMORY) { xfer_size = 0; - } else if ((offset + size) <= upl->size) { + } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) { xfer_size = size; } else { if (!isVectorUPL) { @@ -7138,7 +7362,9 @@ process_upl_to_commit: } else { upl_unlock(vector_upl); } - return KERN_FAILURE; + DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size); + kr = KERN_FAILURE; + goto done; } if (upl->flags & UPL_SET_DIRTY) { flags |= UPL_COMMIT_SET_DIRTY; @@ -7149,7 +7375,7 @@ process_upl_to_commit: if (upl->flags & UPL_INTERNAL) { lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl)) - + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t))); + + ((upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE) * sizeof(upl_page_info_t))); } else { lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl)); } @@ -7165,9 +7391,6 @@ process_upl_to_commit: entry = offset / PAGE_SIZE; target_offset = (vm_object_offset_t)offset; - assert(!(target_offset & PAGE_MASK)); - assert(!(xfer_size & PAGE_MASK)); - if (upl->flags & UPL_KERNEL_OBJECT) { vm_object_lock_shared(shadow_object); } else { @@ -7202,10 +7425,6 @@ process_upl_to_commit: should_be_throttled = TRUE; } - dwp = &dw_array[0]; - dw_count = 0; - dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); - if ((upl->flags & UPL_IO_WIRE) && !(flags & UPL_COMMIT_FREE_ABSENT) && !isVectorUPL && @@ -7229,7 +7448,13 @@ process_upl_to_commit: first_local = VM_PAGE_NULL; last_local = VM_PAGE_NULL; - while (xfer_size) { + obj_start = target_offset + upl->u_offset - shadow_object->paging_offset; + obj_end = obj_start + xfer_size; + obj_start = vm_object_trunc_page(obj_start); + obj_end = vm_object_round_page(obj_end); + for (obj_offset = obj_start; + obj_offset < obj_end; + obj_offset += PAGE_SIZE) { vm_page_t t, m; dwp->dw_mask = 0; @@ -7252,7 +7477,7 @@ process_upl_to_commit: lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31)); if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) { - m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset)); + m = vm_page_lookup(shadow_object, obj_offset); } } else { m = NULL; @@ -7288,9 +7513,9 @@ process_upl_to_commit: * Set the code signing bits according to * what the UPL says they should be. */ - m->vmp_cs_validated = page_list[entry].cs_validated; - m->vmp_cs_tainted = page_list[entry].cs_tainted; - m->vmp_cs_nx = page_list[entry].cs_nx; + m->vmp_cs_validated |= page_list[entry].cs_validated; + m->vmp_cs_tainted |= page_list[entry].cs_tainted; + m->vmp_cs_nx |= page_list[entry].cs_nx; } if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) { m->vmp_written_by_kernel = TRUE; @@ -7307,7 +7532,8 @@ process_upl_to_commit: m->vmp_dirty = FALSE; if (!(flags & UPL_COMMIT_CS_VALIDATED) && - m->vmp_cs_validated && !m->vmp_cs_tainted) { + m->vmp_cs_validated && + m->vmp_cs_tainted != VMP_CS_ALL_TRUE) { /* * CODE SIGNING: * This page is no longer dirty @@ -7315,7 +7541,7 @@ process_upl_to_commit: * so it will need to be * re-validated. */ - m->vmp_cs_validated = FALSE; + m->vmp_cs_validated = VMP_CS_ALL_FALSE; VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1); @@ -7440,7 +7666,8 @@ process_upl_to_commit: } if (!(flags & UPL_COMMIT_CS_VALIDATED) && - m->vmp_cs_validated && !m->vmp_cs_tainted) { + m->vmp_cs_validated && + m->vmp_cs_tainted != VMP_CS_ALL_TRUE) { /* * CODE SIGNING: * This page is no longer dirty @@ -7448,7 +7675,7 @@ process_upl_to_commit: * so it will need to be * re-validated. */ - m->vmp_cs_validated = FALSE; + m->vmp_cs_validated = VMP_CS_ALL_FALSE; VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1); @@ -7608,9 +7835,9 @@ commit_next_page: VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count); if (dw_count >= dw_limit) { - vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); + vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); - dwp = &dw_array[0]; + dwp = dwp_start; dw_count = 0; } } else { @@ -7625,7 +7852,9 @@ commit_next_page: } } if (dw_count) { - vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); + vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); + dwp = dwp_start; + dw_count = 0; } if (fast_path_possible) { @@ -7713,7 +7942,7 @@ commit_next_page: occupied = 0; if (!fast_path_full_commit) { - pg_num = upl->size / PAGE_SIZE; + pg_num = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE; pg_num = (pg_num + 31) >> 5; for (i = 0; i < pg_num; i++) { @@ -7786,7 +8015,14 @@ commit_next_page: DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL); } - return KERN_SUCCESS; + kr = KERN_SUCCESS; +done: + if (dwp_start && dwp_finish_ctx) { + vm_page_delayed_work_finish_ctx(dwp_start); + dwp_start = dwp = NULL; + } + + return kr; } kern_return_t @@ -7798,7 +8034,7 @@ upl_abort_range( boolean_t *empty) { upl_page_info_t *user_page_list = NULL; - upl_size_t xfer_size, subupl_size = size; + upl_size_t xfer_size, subupl_size; vm_object_t shadow_object; vm_object_t object; vm_object_offset_t target_offset; @@ -7806,13 +8042,21 @@ upl_abort_range( int entry; wpl_array_t lite_list; int occupied; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; + struct vm_page_delayed_work dw_array; + struct vm_page_delayed_work *dwp, *dwp_start; + bool dwp_finish_ctx = TRUE; int dw_count; int dw_limit; int isVectorUPL = 0; upl_t vector_upl = NULL; + vm_object_offset_t obj_start, obj_end, obj_offset; + kern_return_t kr = KERN_SUCCESS; + +// DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx error 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, error); + + dwp_start = dwp = NULL; + subupl_size = size; *empty = FALSE; if (upl == UPL_NULL) { @@ -7823,6 +8067,17 @@ upl_abort_range( return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty); } + dw_count = 0; + dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); + dwp_start = vm_page_delayed_work_get_ctx(); + if (dwp_start == NULL) { + dwp_start = &dw_array; + dw_limit = 1; + dwp_finish_ctx = FALSE; + } + + dwp = dwp_start; + if ((isVectorUPL = vector_upl_is_valid(upl))) { vector_upl = upl; upl_lock(vector_upl); @@ -7836,12 +8091,14 @@ process_upl_to_abort: offset = subupl_offset; if (size == 0) { upl_unlock(vector_upl); - return KERN_SUCCESS; + kr = KERN_SUCCESS; + goto done; } upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size); if (upl == NULL) { upl_unlock(vector_upl); - return KERN_FAILURE; + kr = KERN_FAILURE; + goto done; } subupl_size -= size; subupl_offset += size; @@ -7862,7 +8119,7 @@ process_upl_to_abort: #endif if (upl->flags & UPL_DEVICE_MEMORY) { xfer_size = 0; - } else if ((offset + size) <= upl->size) { + } else if ((offset + size) <= upl_adjusted_size(upl, PAGE_MASK)) { xfer_size = size; } else { if (!isVectorUPL) { @@ -7870,13 +8127,14 @@ process_upl_to_abort: } else { upl_unlock(vector_upl); } - - return KERN_FAILURE; + DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl, upl->u_offset, upl->u_size, offset, size); + kr = KERN_FAILURE; + goto done; } if (upl->flags & UPL_INTERNAL) { lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl)) - + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t))); + + ((upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE) * sizeof(upl_page_info_t))); user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); } else { @@ -7895,9 +8153,6 @@ process_upl_to_abort: entry = offset / PAGE_SIZE; target_offset = (vm_object_offset_t)offset; - assert(!(target_offset & PAGE_MASK)); - assert(!(xfer_size & PAGE_MASK)); - if (upl->flags & UPL_KERNEL_OBJECT) { vm_object_lock_shared(shadow_object); } else { @@ -7910,15 +8165,17 @@ process_upl_to_abort: vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED); } - dwp = &dw_array[0]; - dw_count = 0; - dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); - if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) { panic("upl_abort_range: kernel_object being DUMPED"); } - while (xfer_size) { + obj_start = target_offset + upl->u_offset - shadow_object->paging_offset; + obj_end = obj_start + xfer_size; + obj_start = vm_object_trunc_page(obj_start); + obj_end = vm_object_round_page(obj_end); + for (obj_offset = obj_start; + obj_offset < obj_end; + obj_offset += PAGE_SIZE) { vm_page_t t, m; unsigned int pg_num; boolean_t needed; @@ -7940,8 +8197,7 @@ process_upl_to_abort: lite_list[pg_num >> 5] &= ~(1U << (pg_num & 31)); if (!(upl->flags & UPL_KERNEL_OBJECT)) { - m = vm_page_lookup(shadow_object, target_offset + - (upl->offset - shadow_object->paging_offset)); + m = vm_page_lookup(shadow_object, obj_offset); } } } @@ -8092,9 +8348,9 @@ abort_next_page: VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count); if (dw_count >= dw_limit) { - vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); + vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); - dwp = &dw_array[0]; + dwp = dwp_start; dw_count = 0; } } else { @@ -8109,7 +8365,9 @@ abort_next_page: } } if (dw_count) { - vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); + vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, dwp_start, dw_count); + dwp = dwp_start; + dw_count = 0; } occupied = 1; @@ -8120,7 +8378,7 @@ abort_next_page: int pg_num; int i; - pg_num = upl->size / PAGE_SIZE; + pg_num = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE; pg_num = (pg_num + 31) >> 5; occupied = 0; @@ -8189,7 +8447,15 @@ abort_next_page: goto process_upl_to_abort; } - return KERN_SUCCESS; + kr = KERN_SUCCESS; + +done: + if (dwp_start && dwp_finish_ctx) { + vm_page_delayed_work_finish_ctx(dwp_start); + dwp_start = dwp = NULL; + } + + return kr; } @@ -8204,7 +8470,7 @@ upl_abort( return KERN_INVALID_ARGUMENT; } - return upl_abort_range(upl, 0, upl->size, error, &empty); + return upl_abort_range(upl, 0, upl->u_size, error, &empty); } @@ -8221,7 +8487,8 @@ upl_commit( return KERN_INVALID_ARGUMENT; } - return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty); + return upl_commit_range(upl, 0, upl->u_size, 0, + page_list, count, &empty); } @@ -8258,7 +8525,7 @@ iopl_valid_data( object, object->purgable); } - size = upl->size; + size = upl_adjusted_size(upl, PAGE_MASK); vm_object_lock(object); VM_OBJECT_WIRED_PAGE_UPDATE_START(object); @@ -8266,7 +8533,7 @@ iopl_valid_data( if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE)) { nxt_page = (vm_page_t)vm_page_queue_first(&object->memq); } else { - offset = 0 + upl->offset - object->paging_offset; + offset = (vm_offset_t)(upl_adjusted_offset(upl, PAGE_MASK) - object->paging_offset); } while (size) { @@ -8621,8 +8888,9 @@ vm_object_iopl_request( kern_return_t ret; vm_prot_t prot; struct vm_object_fault_info fault_info = {}; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; + struct vm_page_delayed_work dw_array; + struct vm_page_delayed_work *dwp, *dwp_start; + bool dwp_finish_ctx = TRUE; int dw_count; int dw_limit; int dw_index; @@ -8640,6 +8908,19 @@ vm_object_iopl_request( task_t task = current_task(); #endif /* DEVELOPMENT || DEBUG */ + dwp_start = dwp = NULL; + + vm_object_offset_t original_offset = offset; + upl_size_t original_size = size; + +// DEBUG4K_UPL("object %p offset 0x%llx size 0x%llx cntrl_flags 0x%llx\n", object, (uint64_t)offset, (uint64_t)size, cntrl_flags); + + size = (upl_size_t)(vm_object_round_page(offset + size) - vm_object_trunc_page(offset)); + offset = vm_object_trunc_page(offset); + if (size != original_size || offset != original_offset) { + DEBUG4K_IOKIT("flags 0x%llx object %p offset 0x%llx size 0x%x -> offset 0x%llx size 0x%x\n", cntrl_flags, object, original_offset, original_size, offset, size); + } + if (cntrl_flags & ~UPL_VALID_FLAGS) { /* * For forward compatibility's sake, @@ -8680,6 +8961,7 @@ vm_object_iopl_request( panic("vm_object_iopl_request: external object with non-zero paging offset\n"); } + VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, prot, 0); #if CONFIG_IOSCHED || UPL_DEBUG @@ -8701,6 +8983,17 @@ vm_object_iopl_request( psize = PAGE_SIZE; } else { psize = size; + + dw_count = 0; + dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); + dwp_start = vm_page_delayed_work_get_ctx(); + if (dwp_start == NULL) { + dwp_start = &dw_array; + dw_limit = 1; + dwp_finish_ctx = FALSE; + } + + dwp = dwp_start; } if (cntrl_flags & UPL_SET_INTERNAL) { @@ -8735,7 +9028,8 @@ vm_object_iopl_request( } upl->map_object = object; - upl->size = size; + upl->u_offset = original_offset; + upl->u_size = original_size; size_in_pages = size / PAGE_SIZE; @@ -8754,7 +9048,7 @@ vm_object_iopl_request( /* * paging in progress also protects the paging_offset */ - upl->offset = offset + object->paging_offset; + upl->u_offset = original_offset + object->paging_offset; if (cntrl_flags & UPL_BLOCK_ACCESS) { /* @@ -8765,7 +9059,7 @@ vm_object_iopl_request( } #if CONFIG_IOSCHED || UPL_DEBUG - if (upl->flags & UPL_TRACKED_BY_OBJECT) { + if ((upl->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) { vm_object_activity_begin(object); queue_enter(&object->uplq, upl, upl_t, uplq); } @@ -8895,7 +9189,6 @@ vm_object_iopl_request( xfer_size = size; dst_offset = offset; - dw_count = 0; if (fast_path_full_req) { if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags, tag) == TRUE) { @@ -8926,9 +9219,6 @@ vm_object_iopl_request( fault_info.interruptible = interruptible; fault_info.batch_pmap_op = TRUE; - dwp = &dw_array[0]; - dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); - while (xfer_size) { vm_fault_return_t result; @@ -9061,10 +9351,11 @@ vm_object_iopl_request( VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1); - /* fall thru */ + OS_FALLTHROUGH; case VM_FAULT_INTERRUPTED: error_code = MACH_SEND_INTERRUPTED; + OS_FALLTHROUGH; case VM_FAULT_MEMORY_ERROR: memory_error: ret = (error_code ? error_code: KERN_MEMORY_ERROR); @@ -9207,8 +9498,8 @@ memory_error: * all pmaps so processes have to fault it back in and * deal with the tainted bit. */ - if (object->code_signed && dst_page->vmp_cs_tainted == FALSE) { - dst_page->vmp_cs_tainted = TRUE; + if (object->code_signed && dst_page->vmp_cs_tainted != VMP_CS_ALL_TRUE) { + dst_page->vmp_cs_tainted = VMP_CS_ALL_TRUE; vm_page_iopl_tainted++; if (dst_page->vmp_pmapped) { int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); @@ -9271,9 +9562,9 @@ skip_page: VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count); if (dw_count >= dw_limit) { - vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, tag, dwp_start, dw_count); - dwp = &dw_array[0]; + dwp = dwp_start; dw_count = 0; } } @@ -9281,7 +9572,9 @@ skip_page: assert(entry == size_in_pages); if (dw_count) { - vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count); + vm_page_do_delayed_work(object, tag, dwp_start, dw_count); + dwp = dwp_start; + dw_count = 0; } finish: if (user_page_list && set_cache_attr_needed == TRUE) { @@ -9305,7 +9598,9 @@ finish: * can't be accessed without causing a page fault. */ vm_object_pmap_protect(object, offset, (vm_object_size_t)size, - PMAP_NULL, 0, VM_PROT_NONE); + PMAP_NULL, + PAGE_SIZE, + 0, VM_PROT_NONE); assert(!object->blocked_access); object->blocked_access = TRUE; } @@ -9316,6 +9611,12 @@ finish: ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count); } #endif /* DEVELOPMENT || DEBUG */ + + if (dwp_start && dwp_finish_ctx) { + vm_page_delayed_work_finish_ctx(dwp_start); + dwp_start = dwp = NULL; + } + return KERN_SUCCESS; return_err: @@ -9343,7 +9644,7 @@ return_err: need_unwire = TRUE; if (dw_count) { - if (dw_array[dw_index].dw_m == dst_page) { + if ((dwp_start)[dw_index].dw_m == dst_page) { /* * still in the deferred work list * which means we haven't yet called @@ -9390,6 +9691,11 @@ return_err: ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count); } #endif /* DEVELOPMENT || DEBUG */ + + if (dwp_start && dwp_finish_ctx) { + vm_page_delayed_work_finish_ctx(dwp_start); + dwp_start = dwp = NULL; + } return ret; } @@ -9424,8 +9730,8 @@ upl_transpose( object1 = upl1->map_object; object2 = upl2->map_object; - if (upl1->offset != 0 || upl2->offset != 0 || - upl1->size != upl2->size) { + if (upl1->u_offset != 0 || upl2->u_offset != 0 || + upl1->u_size != upl2->u_size) { /* * We deal only with full objects, not subsets. * That's because we exchange the entire backing store info @@ -9440,7 +9746,7 @@ upl_transpose( * Tranpose the VM objects' backing store. */ retval = vm_object_transpose(object1, object2, - (vm_object_size_t) upl1->size); + upl_adjusted_size(upl1, PAGE_MASK)); if (retval == KERN_SUCCESS) { /* @@ -9452,10 +9758,10 @@ upl_transpose( vm_object_lock(object1); vm_object_lock(object2); } - if (upl1->flags & UPL_TRACKED_BY_OBJECT) { + if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) { queue_remove(&object1->uplq, upl1, upl_t, uplq); } - if (upl2->flags & UPL_TRACKED_BY_OBJECT) { + if ((upl2->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) { queue_remove(&object2->uplq, upl2, upl_t, uplq); } #endif @@ -9463,10 +9769,10 @@ upl_transpose( upl2->map_object = object1; #if CONFIG_IOSCHED || UPL_DEBUG - if (upl1->flags & UPL_TRACKED_BY_OBJECT) { + if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) { queue_enter(&object2->uplq, upl1, upl_t, uplq); } - if (upl2->flags & UPL_TRACKED_BY_OBJECT) { + if ((upl2->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) { queue_enter(&object1->uplq, upl2, upl_t, uplq); } if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) { @@ -9502,7 +9808,7 @@ upl_range_needed( return; } - size_in_pages = upl->size / PAGE_SIZE; + size_in_pages = upl_adjusted_size(upl, PAGE_MASK) / PAGE_SIZE; user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); @@ -9521,7 +9827,7 @@ upl_range_needed( * virtaul address space each time we need to work with * a physical page. */ -decl_simple_lock_data(, vm_paging_lock); +SIMPLE_LOCK_DECLARE(vm_paging_lock, 0); #define VM_PAGING_NUM_PAGES 64 vm_map_offset_t vm_paging_base_address = 0; boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, }; @@ -9535,6 +9841,7 @@ unsigned long vm_paging_pages_mapped = 0; unsigned long vm_paging_objects_mapped_slow = 0; unsigned long vm_paging_pages_mapped_slow = 0; +__startup_func void vm_paging_map_init(void) { @@ -9899,7 +10206,7 @@ vector_upl_create(vm_offset_t upl_offset) upl = upl_create(0, UPL_VECTOR, 0); upl->vector_upl = vector_upl; - upl->offset = upl_offset; + upl->u_offset = upl_offset; vector_upl->size = 0; vector_upl->offset = upl_offset; vector_upl->invalid_upls = 0; @@ -9967,7 +10274,7 @@ vector_upl_set_subupl(upl_t upl, upl_t subupl, uint32_t io_size) subupl->vector_upl = (void*)vector_upl; vector_upl->upl_elems[vector_upl->num_upls++] = subupl; vector_upl->size += io_size; - upl->size += io_size; + upl->u_size += io_size; } else { uint32_t i = 0, invalid_upls = 0; for (i = 0; i < vector_upl->num_upls; i++) { @@ -10014,7 +10321,7 @@ vector_upl_set_pagelist(upl_t upl) vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)); for (i = 0; i < vector_upl->num_upls; i++) { - cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size / PAGE_SIZE; + cur_upl_pagelist_size = sizeof(struct upl_page_info) * upl_adjusted_size(vector_upl->upl_elems[i], PAGE_MASK) / PAGE_SIZE; bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size); pagelist_size += cur_upl_pagelist_size; if (vector_upl->upl_elems[i]->highest_page > upl->highest_page) { @@ -10500,7 +10807,35 @@ upl_size_t upl_get_size( upl_t upl) { - return upl->size; + return upl_adjusted_size(upl, PAGE_MASK); +} + +upl_size_t +upl_adjusted_size( + upl_t upl, + vm_map_offset_t pgmask) +{ + vm_object_offset_t start_offset, end_offset; + + start_offset = trunc_page_mask_64(upl->u_offset, pgmask); + end_offset = round_page_mask_64(upl->u_offset + upl->u_size, pgmask); + + return (upl_size_t)(end_offset - start_offset); +} + +vm_object_offset_t +upl_adjusted_offset( + upl_t upl, + vm_map_offset_t pgmask) +{ + return trunc_page_mask_64(upl->u_offset, pgmask); +} + +vm_object_offset_t +upl_get_data_offset( + upl_t upl) +{ + return upl->u_offset - upl_adjusted_offset(upl, PAGE_MASK); } upl_t @@ -10619,440 +10954,3 @@ VM_PRESSURE_CRITICAL_TO_WARNING(void) } } #endif /* VM_PRESSURE_EVENTS */ - - - -#define VM_TEST_COLLAPSE_COMPRESSOR 0 -#define VM_TEST_WIRE_AND_EXTRACT 0 -#define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0 -#if __arm64__ -#define VM_TEST_KERNEL_OBJECT_FAULT 0 -#endif /* __arm64__ */ -#define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG) - -#if VM_TEST_COLLAPSE_COMPRESSOR -extern boolean_t vm_object_collapse_compressor_allowed; -#include -static void -vm_test_collapse_compressor(void) -{ - vm_object_size_t backing_size, top_size; - vm_object_t backing_object, top_object; - vm_map_offset_t backing_offset, top_offset; - unsigned char *backing_address, *top_address; - kern_return_t kr; - - printf("VM_TEST_COLLAPSE_COMPRESSOR:\n"); - - /* create backing object */ - backing_size = 15 * PAGE_SIZE; - backing_object = vm_object_allocate(backing_size); - assert(backing_object != VM_OBJECT_NULL); - printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n", - backing_object); - /* map backing object */ - backing_offset = 0; - kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0, - VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, - backing_object, 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); - assert(kr == KERN_SUCCESS); - backing_address = (unsigned char *) backing_offset; - printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "mapped backing object %p at 0x%llx\n", - backing_object, (uint64_t) backing_offset); - /* populate with pages to be compressed in backing object */ - backing_address[0x1 * PAGE_SIZE] = 0xB1; - backing_address[0x4 * PAGE_SIZE] = 0xB4; - backing_address[0x7 * PAGE_SIZE] = 0xB7; - backing_address[0xa * PAGE_SIZE] = 0xBA; - backing_address[0xd * PAGE_SIZE] = 0xBD; - printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "populated pages to be compressed in " - "backing_object %p\n", backing_object); - /* compress backing object */ - vm_object_pageout(backing_object); - printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n", - backing_object); - /* wait for all the pages to be gone */ - while (*(volatile int *)&backing_object->resident_page_count != 0) { - IODelay(10); - } - printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n", - backing_object); - /* populate with pages to be resident in backing object */ - backing_address[0x0 * PAGE_SIZE] = 0xB0; - backing_address[0x3 * PAGE_SIZE] = 0xB3; - backing_address[0x6 * PAGE_SIZE] = 0xB6; - backing_address[0x9 * PAGE_SIZE] = 0xB9; - backing_address[0xc * PAGE_SIZE] = 0xBC; - printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "populated pages to be resident in " - "backing_object %p\n", backing_object); - /* leave the other pages absent */ - /* mess with the paging_offset of the backing_object */ - assert(backing_object->paging_offset == 0); - backing_object->paging_offset = 0x3000; - - /* create top object */ - top_size = 9 * PAGE_SIZE; - top_object = vm_object_allocate(top_size); - assert(top_object != VM_OBJECT_NULL); - printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n", - top_object); - /* map top object */ - top_offset = 0; - kr = vm_map_enter(kernel_map, &top_offset, top_size, 0, - VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, - top_object, 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); - assert(kr == KERN_SUCCESS); - top_address = (unsigned char *) top_offset; - printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "mapped top object %p at 0x%llx\n", - top_object, (uint64_t) top_offset); - /* populate with pages to be compressed in top object */ - top_address[0x3 * PAGE_SIZE] = 0xA3; - top_address[0x4 * PAGE_SIZE] = 0xA4; - top_address[0x5 * PAGE_SIZE] = 0xA5; - printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "populated pages to be compressed in " - "top_object %p\n", top_object); - /* compress top object */ - vm_object_pageout(top_object); - printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n", - top_object); - /* wait for all the pages to be gone */ - while (top_object->resident_page_count != 0) { - IODelay(10); - } - printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n", - top_object); - /* populate with pages to be resident in top object */ - top_address[0x0 * PAGE_SIZE] = 0xA0; - top_address[0x1 * PAGE_SIZE] = 0xA1; - top_address[0x2 * PAGE_SIZE] = 0xA2; - printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "populated pages to be resident in " - "top_object %p\n", top_object); - /* leave the other pages absent */ - - /* link the 2 objects */ - vm_object_reference(backing_object); - top_object->shadow = backing_object; - top_object->vo_shadow_offset = 0x3000; - printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n", - top_object, backing_object); - - /* unmap backing object */ - vm_map_remove(kernel_map, - backing_offset, - backing_offset + backing_size, - VM_MAP_REMOVE_NO_FLAGS); - printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "unmapped backing_object %p [0x%llx:0x%llx]\n", - backing_object, - (uint64_t) backing_offset, - (uint64_t) (backing_offset + backing_size)); - - /* collapse */ - printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object); - vm_object_lock(top_object); - vm_object_collapse(top_object, 0, FALSE); - vm_object_unlock(top_object); - printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object); - - /* did it work? */ - if (top_object->shadow != VM_OBJECT_NULL) { - printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n"); - printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); - if (vm_object_collapse_compressor_allowed) { - panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); - } - } else { - /* check the contents of the mapping */ - unsigned char expect[9] = - { 0xA0, 0xA1, 0xA2, /* resident in top */ - 0xA3, 0xA4, 0xA5, /* compressed in top */ - 0xB9, /* resident in backing + shadow_offset */ - 0xBD, /* compressed in backing + shadow_offset + paging_offset */ - 0x00 }; /* absent in both */ - unsigned char actual[9]; - unsigned int i, errors; - - errors = 0; - for (i = 0; i < sizeof(actual); i++) { - actual[i] = (unsigned char) top_address[i * PAGE_SIZE]; - if (actual[i] != expect[i]) { - errors++; - } - } - printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "actual [%x %x %x %x %x %x %x %x %x] " - "expect [%x %x %x %x %x %x %x %x %x] " - "%d errors\n", - actual[0], actual[1], actual[2], actual[3], - actual[4], actual[5], actual[6], actual[7], - actual[8], - expect[0], expect[1], expect[2], expect[3], - expect[4], expect[5], expect[6], expect[7], - expect[8], - errors); - if (errors) { - panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); - } else { - printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n"); - } - } -} -#else /* VM_TEST_COLLAPSE_COMPRESSOR */ -#define vm_test_collapse_compressor() -#endif /* VM_TEST_COLLAPSE_COMPRESSOR */ - -#if VM_TEST_WIRE_AND_EXTRACT -extern ledger_template_t task_ledger_template; -#include -extern ppnum_t vm_map_get_phys_page(vm_map_t map, - vm_offset_t offset); -static void -vm_test_wire_and_extract(void) -{ - ledger_t ledger; - vm_map_t user_map, wire_map; - mach_vm_address_t user_addr, wire_addr; - mach_vm_size_t user_size, wire_size; - mach_vm_offset_t cur_offset; - vm_prot_t cur_prot, max_prot; - ppnum_t user_ppnum, wire_ppnum; - kern_return_t kr; - - ledger = ledger_instantiate(task_ledger_template, - LEDGER_CREATE_ACTIVE_ENTRIES); - user_map = vm_map_create(pmap_create_options(ledger, 0, PMAP_CREATE_64BIT), - 0x100000000ULL, - 0x200000000ULL, - TRUE); - wire_map = vm_map_create(NULL, - 0x100000000ULL, - 0x200000000ULL, - TRUE); - user_addr = 0; - user_size = 0x10000; - kr = mach_vm_allocate(user_map, - &user_addr, - user_size, - VM_FLAGS_ANYWHERE); - assert(kr == KERN_SUCCESS); - wire_addr = 0; - wire_size = user_size; - kr = mach_vm_remap(wire_map, - &wire_addr, - wire_size, - 0, - VM_FLAGS_ANYWHERE, - user_map, - user_addr, - FALSE, - &cur_prot, - &max_prot, - VM_INHERIT_NONE); - assert(kr == KERN_SUCCESS); - for (cur_offset = 0; - cur_offset < wire_size; - cur_offset += PAGE_SIZE) { - kr = vm_map_wire_and_extract(wire_map, - wire_addr + cur_offset, - VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK), - TRUE, - &wire_ppnum); - assert(kr == KERN_SUCCESS); - user_ppnum = vm_map_get_phys_page(user_map, - user_addr + cur_offset); - printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x " - "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", - kr, - user_map, user_addr + cur_offset, user_ppnum, - wire_map, wire_addr + cur_offset, wire_ppnum); - if (kr != KERN_SUCCESS || - wire_ppnum == 0 || - wire_ppnum != user_ppnum) { - panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n"); - } - } - cur_offset -= PAGE_SIZE; - kr = vm_map_wire_and_extract(wire_map, - wire_addr + cur_offset, - VM_PROT_DEFAULT, - TRUE, - &wire_ppnum); - assert(kr == KERN_SUCCESS); - printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x " - "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", - kr, - user_map, user_addr + cur_offset, user_ppnum, - wire_map, wire_addr + cur_offset, wire_ppnum); - if (kr != KERN_SUCCESS || - wire_ppnum == 0 || - wire_ppnum != user_ppnum) { - panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n"); - } - - printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n"); -} -#else /* VM_TEST_WIRE_AND_EXTRACT */ -#define vm_test_wire_and_extract() -#endif /* VM_TEST_WIRE_AND_EXTRACT */ - -#if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC -static void -vm_test_page_wire_overflow_panic(void) -{ - vm_object_t object; - vm_page_t page; - - printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n"); - - object = vm_object_allocate(PAGE_SIZE); - vm_object_lock(object); - page = vm_page_alloc(object, 0x0); - vm_page_lock_queues(); - do { - vm_page_wire(page, 1, FALSE); - } while (page->wire_count != 0); - vm_page_unlock_queues(); - vm_object_unlock(object); - panic("FBDP(%p,%p): wire_count overflow not detected\n", - object, page); -} -#else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */ -#define vm_test_page_wire_overflow_panic() -#endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */ - -#if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT -extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit); -static void -vm_test_kernel_object_fault(void) -{ - kern_return_t kr; - vm_offset_t stack; - uintptr_t frameb[2]; - int ret; - - kr = kernel_memory_allocate(kernel_map, &stack, - kernel_stack_size + (2 * PAGE_SIZE), - 0, - (KMA_KSTACK | KMA_KOBJECT | - KMA_GUARD_FIRST | KMA_GUARD_LAST), - VM_KERN_MEMORY_STACK); - if (kr != KERN_SUCCESS) { - panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr); - } - ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE); - if (ret != 0) { - printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n"); - } else { - printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n"); - } - vm_map_remove(kernel_map, - stack, - stack + kernel_stack_size + (2 * PAGE_SIZE), - VM_MAP_REMOVE_KUNWIRE); - stack = 0; -} -#else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */ -#define vm_test_kernel_object_fault() -#endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */ - -#if VM_TEST_DEVICE_PAGER_TRANSPOSE -static void -vm_test_device_pager_transpose(void) -{ - memory_object_t device_pager; - vm_object_t anon_object, device_object; - vm_size_t size; - vm_map_offset_t device_mapping; - kern_return_t kr; - - size = 3 * PAGE_SIZE; - anon_object = vm_object_allocate(size); - assert(anon_object != VM_OBJECT_NULL); - device_pager = device_pager_setup(NULL, 0, size, 0); - assert(device_pager != NULL); - device_object = memory_object_to_vm_object(device_pager); - assert(device_object != VM_OBJECT_NULL); -#if 0 - /* - * Can't actually map this, since another thread might do a - * vm_map_enter() that gets coalesced into this object, which - * would cause the test to fail. - */ - vm_map_offset_t anon_mapping = 0; - kr = vm_map_enter(kernel_map, &anon_mapping, size, 0, - VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, - anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, - VM_INHERIT_DEFAULT); - assert(kr == KERN_SUCCESS); -#endif - device_mapping = 0; - kr = vm_map_enter_mem_object(kernel_map, &device_mapping, size, 0, - VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_NONE, - (void *)device_pager, 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, - VM_INHERIT_DEFAULT); - assert(kr == KERN_SUCCESS); - memory_object_deallocate(device_pager); - - vm_object_lock(anon_object); - vm_object_activity_begin(anon_object); - anon_object->blocked_access = TRUE; - vm_object_unlock(anon_object); - vm_object_lock(device_object); - vm_object_activity_begin(device_object); - device_object->blocked_access = TRUE; - vm_object_unlock(device_object); - - assert(anon_object->ref_count == 1); - assert(!anon_object->named); - assert(device_object->ref_count == 2); - assert(device_object->named); - - kr = vm_object_transpose(device_object, anon_object, size); - assert(kr == KERN_SUCCESS); - - vm_object_lock(anon_object); - vm_object_activity_end(anon_object); - anon_object->blocked_access = FALSE; - vm_object_unlock(anon_object); - vm_object_lock(device_object); - vm_object_activity_end(device_object); - device_object->blocked_access = FALSE; - vm_object_unlock(device_object); - - assert(anon_object->ref_count == 2); - assert(anon_object->named); -#if 0 - kr = vm_deallocate(kernel_map, anon_mapping, size); - assert(kr == KERN_SUCCESS); -#endif - assert(device_object->ref_count == 1); - assert(!device_object->named); - kr = vm_deallocate(kernel_map, device_mapping, size); - assert(kr == KERN_SUCCESS); - - printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n"); -} -#else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */ -#define vm_test_device_pager_transpose() -#endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */ - -void -vm_tests(void) -{ - vm_test_collapse_compressor(); - vm_test_wire_and_extract(); - vm_test_page_wire_overflow_panic(); - vm_test_kernel_object_fault(); - vm_test_device_pager_transpose(); -} diff --git a/osfmk/vm/vm_pageout.h b/osfmk/vm/vm_pageout.h index d4e947c16..980095f6f 100644 --- a/osfmk/vm/vm_pageout.h +++ b/osfmk/vm/vm_pageout.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -94,7 +94,28 @@ extern unsigned int vm_pageout_cleaned_fault_reactivated; #if CONFIG_FREEZE extern boolean_t memorystatus_freeze_enabled; -#endif + +struct freezer_context { + /* + * All these counters & variables track the task + * being frozen. + * Currently we only freeze one task at a time. Should that + * change, we'll need to add support for multiple freezer contexts. + */ + + task_t freezer_ctx_task; /* Task being frozen. */ + + void *freezer_ctx_chead; /* The chead used to track c_segs allocated */ + /* to freeze the task.*/ + + uint64_t freezer_ctx_swapped_bytes; /* Tracks # of compressed bytes.*/ + + int freezer_ctx_uncompressed_pages; /* Tracks # of uncompressed pages frozen. */ + + char *freezer_ctx_compressor_scratch_buf; /* Scratch buffer for the compressor algorithm. */ +}; + +#endif /* CONFIG_FREEZE */ #define VM_DYNAMIC_PAGING_ENABLED() (VM_CONFIG_COMPRESSOR_IS_ACTIVE) @@ -170,6 +191,8 @@ extern int vm_debug_events; #define VM_PRESSURE_LEVEL_CHANGE 0x141 +#define VM_PHYS_WRITE_ACCT 0x142 + #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ MACRO_BEGIN \ if (__improbable(vm_debug_events)) { \ @@ -206,6 +229,15 @@ extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl); #ifdef XNU_KERNEL_PRIVATE +extern upl_size_t upl_adjusted_size( + upl_t upl, + vm_map_offset_t page_mask); +extern vm_object_offset_t upl_adjusted_offset( + upl_t upl, + vm_map_offset_t page_mask); +extern vm_object_offset_t upl_get_data_offset( + upl_t upl); + extern kern_return_t vm_map_create_upl( vm_map_t map, vm_map_address_t offset, @@ -261,18 +293,19 @@ extern unsigned int vm_page_anonymous_count; * manipulate this structure */ struct vm_pageout_queue { - vm_page_queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ - unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ + vm_page_queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ + uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */ + unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ unsigned int pgo_maxlaundry; - uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */ - uint8_t pgo_lowpriority; /* iothread is set to use low priority I/O */ - unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */ - pgo_busy:1, /* iothread is currently processing request from pgo_pending */ - pgo_throttled:1, /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ + uint32_t + pgo_idle:1, /* iothread is blocked waiting for work to do */ + pgo_busy:1, /* iothread is currently processing request from pgo_pending */ + pgo_throttled:1, /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ + pgo_lowpriority:1, /* iothread is set to use low priority I/O */ pgo_draining:1, pgo_inited:1, - :0; + pgo_unused_bits:26; }; #define VM_PAGE_Q_THROTTLED(q) \ @@ -357,8 +390,13 @@ struct upl { int ref_count; int ext_ref_count; int flags; - vm_object_offset_t offset; - upl_size_t size; /* size in bytes of the address space */ + /* + * XXX CAUTION: to accomodate devices with "mixed page sizes", + * u_offset and u_size are now byte-aligned and no longer + * page-aligned, on all devices. + */ + vm_object_offset_t u_offset; + upl_size_t u_size; /* size in bytes of the address space */ vm_offset_t kaddr; /* secondary mapping in kernel */ vm_object_t map_object; ppnum_t highest_page; @@ -469,6 +507,12 @@ extern kern_return_t vm_map_remove_upl( /* wired page list structure */ typedef uint32_t *wpl_array_t; +extern struct vm_page_delayed_work* +vm_page_delayed_work_get_ctx(void); + +extern void +vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp); + extern void vm_page_free_reserve(int pages); extern void vm_pageout_throttle_down(vm_page_t page); @@ -558,7 +602,7 @@ extern int hibernate_flush_memory(void); extern void hibernate_reset_stats(void); extern void hibernate_create_paddr_map(void); -extern void vm_set_restrictions(void); +extern void vm_set_restrictions(unsigned int num_cpus); extern int vm_compressor_mode; extern kern_return_t vm_pageout_compress_page(void **, char *, vm_page_t); diff --git a/osfmk/vm/vm_protos.h b/osfmk/vm/vm_protos.h index 66dbe7ce7..871b3c714 100644 --- a/osfmk/vm/vm_protos.h +++ b/osfmk/vm/vm_protos.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2007 Apple Inc. All rights reserved. + * Copyright (c) 2004-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -73,7 +73,7 @@ extern mach_port_name_t ipc_port_copyout_send( ipc_space_t space); extern task_t port_name_to_task( mach_port_name_t name); -extern task_t port_name_to_task_inspect( +extern task_t port_name_to_task_name( mach_port_name_t name); extern void ipc_port_release_send( ipc_port_t port); @@ -90,8 +90,8 @@ extern int max_task_footprint_mb; /* Per-task limit on physical memory con extern vm_map_t kalloc_map; extern vm_size_t msg_ool_size_small; -extern vm_map_t zone_map; +extern kern_return_t vm_tests(void); extern void consider_machine_adjust(void); extern vm_map_offset_t get_map_min(vm_map_t); extern vm_map_offset_t get_map_max(vm_map_t); @@ -124,7 +124,7 @@ vnode_pager_get_object_vnode( uint32_t * vid); #if CONFIG_COREDUMP -extern boolean_t coredumpok(vm_map_t map, vm_offset_t va); +extern boolean_t coredumpok(vm_map_t map, mach_vm_offset_t va); #endif /* @@ -170,8 +170,8 @@ extern kern_return_t vm_map_apple_protected( vm_map_offset_t start, vm_map_offset_t end, vm_object_offset_t crypto_backing_offset, - struct pager_crypt_info *crypt_info); -extern void apple_protect_pager_bootstrap(void); + struct pager_crypt_info *crypt_info, + uint32_t cryptid); extern memory_object_t apple_protect_pager_setup( vm_object_t backing_object, vm_object_offset_t backing_offset, @@ -188,14 +188,30 @@ extern kern_return_t vm_map_shared_region( vm_map_offset_t end, vm_object_offset_t backing_offset, struct vm_shared_region_slide_info *slide_info); -extern void shared_region_pager_bootstrap(void); + extern memory_object_t shared_region_pager_setup( vm_object_t backing_object, vm_object_offset_t backing_offset, - struct vm_shared_region_slide_info *slide_info); + struct vm_shared_region_slide_info *slide_info, + uint64_t jop_key); +#if __has_feature(ptrauth_calls) +extern memory_object_t shared_region_pager_match( + vm_object_t backing_object, + vm_object_offset_t backing_offset, + struct vm_shared_region_slide_info *slide_info, + uint64_t jop_key); +extern void shared_region_key_alloc( + char *shared_region_id, + bool inherit, + uint64_t inherited_key); +extern void shared_region_key_dealloc( + char *shared_region_id); +extern uint64_t generate_jop_key(void); +extern void shared_region_pager_match_task_key(memory_object_t memobj, task_t task); +#endif /* __has_feature(ptrauth_calls) */ +extern bool vm_shared_region_is_reslide(struct task *task); struct vnode; -extern void swapfile_pager_bootstrap(void); extern memory_object_t swapfile_pager_setup(struct vnode *vp); extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj); @@ -205,18 +221,18 @@ extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj); #define SIXTEENK_PAGE_SHIFT 14 #endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */ -#if __arm64__ #define FOURK_PAGE_SIZE 0x1000 #define FOURK_PAGE_MASK 0xFFF #define FOURK_PAGE_SHIFT 12 +#if __arm64__ + extern unsigned int page_shift_user32; #define VM_MAP_DEBUG_FOURK MACH_ASSERT #if VM_MAP_DEBUG_FOURK extern int vm_map_debug_fourk; #endif /* VM_MAP_DEBUG_FOURK */ -extern void fourk_pager_bootstrap(void); extern memory_object_t fourk_pager_create(void); extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj); extern kern_return_t fourk_pager_populate( @@ -299,7 +315,6 @@ void vnode_pager_issue_reprioritize_io( #endif /* CHECK_CS_VALIDATION_BITMAP */ -extern void vnode_pager_bootstrap(void); extern kern_return_t vnode_pager_data_unlock( memory_object_t mem_obj, @@ -448,7 +463,7 @@ extern memory_object_t device_pager_setup( uintptr_t, vm_size_t, int); -extern void device_pager_bootstrap(void); + extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops); extern kern_return_t pager_map_to_phys_contiguous( @@ -493,6 +508,14 @@ extern boolean_t cs_validate_range(struct vnode *vp, const void *data, vm_size_t size, unsigned *result); +extern void cs_validate_page( + struct vnode *vp, + memory_object_t pager, + memory_object_offset_t offset, + const void *data, + int *validated_p, + int *tainted_p, + int *nx_p); #if PMAP_CS extern kern_return_t cs_associate_blob_with_mapping( void *pmap, @@ -523,6 +546,23 @@ extern kern_return_t mach_memory_entry_get_page_counts( unsigned int *resident_page_count, unsigned int *dirty_page_count); +extern kern_return_t mach_memory_entry_phys_page_offset( + ipc_port_t entry_port, + vm_object_offset_t *offset_p); + +extern kern_return_t mach_memory_entry_map_size( + ipc_port_t entry_port, + vm_map_t map, + memory_object_offset_t offset, + memory_object_offset_t size, + mach_vm_size_t *map_size); + +extern kern_return_t vm_map_range_physical_size( + vm_map_t map, + vm_map_address_t start, + mach_vm_size_t size, + mach_vm_size_t * phys_size); + extern kern_return_t mach_memory_entry_page_op( ipc_port_t entry_port, vm_object_offset_t offset, @@ -542,6 +582,14 @@ extern void mach_destroy_memory_entry(ipc_port_t port); extern kern_return_t mach_memory_entry_allocate( struct vm_named_entry **user_entry_p, ipc_port_t *user_handle_p); +extern vm_object_t vm_named_entry_to_vm_object( + vm_named_entry_t named_entry); +extern kern_return_t vm_named_entry_from_vm_object( + vm_named_entry_t named_entry, + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + vm_prot_t prot); extern void vm_paging_map_init(void); @@ -649,6 +697,13 @@ extern kern_return_t mach_make_memory_entry_internal( ipc_port_t *object_handle, ipc_port_t parent_handle); +extern kern_return_t +memory_entry_check_for_adjustment( + vm_map_t src_map, + ipc_port_t port, + vm_map_offset_t *overmap_start, + vm_map_offset_t *overmap_end); + #define roundup(x, y) ((((x) % (y)) == 0) ? \ (x) : ((x) + ((y) - ((x) % (y))))) @@ -677,6 +732,104 @@ extern kern_return_t mach_make_memory_entry_internal( #endif /* __arm64__ */ +#if MACH_ASSERT +struct proc; +extern struct proc *current_proc(void); +extern int proc_pid(struct proc *); +extern char *proc_best_name(struct proc *); +struct thread; +extern uint64_t thread_tid(struct thread *); +extern int debug4k_filter; +extern int debug4k_proc_filter; +extern char debug4k_proc_name[]; +extern const char *debug4k_category_name[]; + +#define __DEBUG4K(category, fmt, ...) \ + MACRO_BEGIN \ + int __category = (category); \ + struct thread *__t = NULL; \ + struct proc *__p = NULL; \ + const char *__pname = "?"; \ + boolean_t __do_log = FALSE; \ + \ + if ((1 << __category) & debug4k_filter) { \ + __do_log = TRUE; \ + } else if (((1 << __category) & debug4k_proc_filter) && \ + debug4k_proc_name[0] != '\0') { \ + __p = current_proc(); \ + if (__p != NULL) { \ + __pname = proc_best_name(__p); \ + } \ + if (!strcmp(debug4k_proc_name, __pname)) { \ + __do_log = TRUE; \ + } \ + } \ + if (__do_log) { \ + if (__p == NULL) { \ + __p = current_proc(); \ + if (__p != NULL) { \ + __pname = proc_best_name(__p); \ + } \ + } \ + __t = current_thread(); \ + printf("DEBUG4K(%s) %d[%s] %p(0x%llx) %s:%d: " fmt, \ + debug4k_category_name[__category], \ + __p ? proc_pid(__p) : 0, \ + __pname, \ + __t, \ + thread_tid(__t), \ + __FUNCTION__, \ + __LINE__, \ + ##__VA_ARGS__); \ + } \ + MACRO_END + +#define __DEBUG4K_ERROR 0 +#define __DEBUG4K_LIFE 1 +#define __DEBUG4K_LOAD 2 +#define __DEBUG4K_FAULT 3 +#define __DEBUG4K_COPY 4 +#define __DEBUG4K_SHARE 5 +#define __DEBUG4K_ADJUST 6 +#define __DEBUG4K_PMAP 7 +#define __DEBUG4K_MEMENTRY 8 +#define __DEBUG4K_IOKIT 9 +#define __DEBUG4K_UPL 10 +#define __DEBUG4K_EXC 11 +#define __DEBUG4K_VFS 12 + +#define DEBUG4K_ERROR(...) __DEBUG4K(__DEBUG4K_ERROR, ##__VA_ARGS__) +#define DEBUG4K_LIFE(...) __DEBUG4K(__DEBUG4K_LIFE, ##__VA_ARGS__) +#define DEBUG4K_LOAD(...) __DEBUG4K(__DEBUG4K_LOAD, ##__VA_ARGS__) +#define DEBUG4K_FAULT(...) __DEBUG4K(__DEBUG4K_FAULT, ##__VA_ARGS__) +#define DEBUG4K_COPY(...) __DEBUG4K(__DEBUG4K_COPY, ##__VA_ARGS__) +#define DEBUG4K_SHARE(...) __DEBUG4K(__DEBUG4K_SHARE, ##__VA_ARGS__) +#define DEBUG4K_ADJUST(...) __DEBUG4K(__DEBUG4K_ADJUST, ##__VA_ARGS__) +#define DEBUG4K_PMAP(...) __DEBUG4K(__DEBUG4K_PMAP, ##__VA_ARGS__) +#define DEBUG4K_MEMENTRY(...) __DEBUG4K(__DEBUG4K_MEMENTRY, ##__VA_ARGS__) +#define DEBUG4K_IOKIT(...) __DEBUG4K(__DEBUG4K_IOKIT, ##__VA_ARGS__) +#define DEBUG4K_UPL(...) __DEBUG4K(__DEBUG4K_UPL, ##__VA_ARGS__) +#define DEBUG4K_EXC(...) __DEBUG4K(__DEBUG4K_EXC, ##__VA_ARGS__) +#define DEBUG4K_VFS(...) __DEBUG4K(__DEBUG4K_VFS, ##__VA_ARGS__) + +#else /* MACH_ASSERT */ + +#define DEBUG4K_ERROR(...) +#define DEBUG4K_LIFE(...) +#define DEBUG4K_LOAD(...) +#define DEBUG4K_FAULT(...) +#define DEBUG4K_COPY(...) +#define DEBUG4K_SHARE(...) +#define DEBUG4K_ADJUST(...) +#define DEBUG4K_PMAP(...) +#define DEBUG4K_MEMENTRY(...) +#define DEBUG4K_IOKIT(...) +#define DEBUG4K_UPL(...) +#define DEBUG4K_EXC(...) +#define DEBUG4K_VFS(...) + +#endif /* MACH_ASSERT */ + #endif /* _VM_VM_PROTOS_H_ */ #endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/vm/vm_purgeable.c b/osfmk/vm/vm_purgeable.c index 17350f63b..b44ec0cf8 100644 --- a/osfmk/vm/vm_purgeable.c +++ b/osfmk/vm/vm_purgeable.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -1679,7 +1679,7 @@ vm_object_owner_compressed_update( case VM_PURGABLE_DENY: /* not purgeable: must be ledger-tagged */ assert(object->vo_ledger_tag != VM_LEDGER_TAG_NONE); - /* fallthru */ + OS_FALLTHROUGH; case VM_PURGABLE_NONVOLATILE: if (delta > 0) { ledger_credit(owner->ledger, diff --git a/osfmk/vm/vm_resident.c b/osfmk/vm/vm_resident.c index bf3b6d3a8..45430fce9 100644 --- a/osfmk/vm/vm_resident.c +++ b/osfmk/vm/vm_resident.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -71,12 +71,13 @@ #include #include #include +#include #include #include #include #include #include -#include +#include #include #include #include @@ -85,7 +86,6 @@ #include #include /* kernel_memory_allocate() */ #include -#include #include #include #include @@ -103,13 +103,19 @@ #include #endif +#if HIBERNATION #include +#include +#endif /* HIBERNATION */ #include #if defined(HAS_APPLE_PAC) #include #endif +#if defined(__arm64__) +#include +#endif /* defined(__arm64__) */ #if MACH_ASSERT @@ -132,9 +138,16 @@ char vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE]; #if CONFIG_SECLUDED_MEMORY struct vm_page_secluded_data vm_page_secluded; -void secluded_suppression_init(void); #endif /* CONFIG_SECLUDED_MEMORY */ +#if DEVELOPMENT || DEBUG +extern struct memory_object_pager_ops shared_region_pager_ops; +unsigned int shared_region_pagers_resident_count = 0; +unsigned int shared_region_pagers_resident_peak = 0; +#endif /* DEVELOPMENT || DEBUG */ + +int PERCPU_DATA(start_color); +vm_page_t PERCPU_DATA(free_pages); boolean_t hibernate_cleaning_in_progress = FALSE; boolean_t vm_page_free_verify = TRUE; @@ -155,16 +168,14 @@ struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AG boolean_t hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues. * Updated and checked behind the vm_page_queues_lock. */ -__private_extern__ void vm_page_init_lck_grp(void); - static void vm_page_free_prepare(vm_page_t page); static vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr); static void vm_tag_init(void); -uint64_t vm_min_kernel_and_kext_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS; -uint32_t vm_packed_from_vm_pages_array_mask = VM_PACKED_FROM_VM_PAGES_ARRAY; -uint32_t vm_packed_pointer_shift = VM_PACKED_POINTER_SHIFT; +/* for debugging purposes */ +SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params = + VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR); /* * Associated with page of user-allocatable memory is a @@ -216,8 +227,6 @@ unsigned int vm_page_bucket_lock_count = 0; /* How big is array of l boolean_t vm_tag_active_update = VM_TAG_ACTIVE_UPDATE; lck_spin_t *vm_page_bucket_locks; -lck_spin_t vm_objects_wired_lock; -lck_spin_t vm_allocation_sites_lock; vm_allocation_site_t vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1]; vm_allocation_site_t * vm_allocation_sites[VM_MAX_TAG_VALUE]; @@ -294,9 +303,9 @@ vm_size_t page_mask = PAGE_MASK; int page_shift = PAGE_SHIFT; #endif -vm_page_t vm_pages = VM_PAGE_NULL; -vm_page_t vm_page_array_beginning_addr; -vm_page_t vm_page_array_ending_addr; +SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages = VM_PAGE_NULL; +SECURITY_READ_ONLY_LATE(vm_page_t) vm_page_array_beginning_addr; +vm_page_t vm_page_array_ending_addr; unsigned int vm_pages_count = 0; @@ -313,7 +322,7 @@ unsigned int vm_free_magazine_refill_limit = 0; struct vm_page_queue_free_head { vm_page_queue_head_t qhead; -} __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +} VM_PAGE_PACKED_ALIGNED; struct vm_page_queue_free_head vm_page_queue_free[MAX_COLORS]; @@ -334,16 +343,23 @@ unsigned int vm_page_free_count; * These page structures are allocated the way * most other kernel structures are. */ -zone_t vm_page_array_zone; -zone_t vm_page_zone; +SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone; vm_locks_array_t vm_page_locks; -decl_lck_mtx_data(, vm_page_alloc_lock); -lck_mtx_ext_t vm_page_alloc_lock_ext; -unsigned int vm_page_local_q_count = 0; +LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0); +LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free"); +LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue"); +LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local"); +LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge"); +LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc"); +LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket"); +LCK_MTX_EARLY_DECLARE_ATTR(vm_page_alloc_lock, &vm_page_lck_grp_alloc, &vm_page_lck_attr); +LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); +LCK_SPIN_DECLARE_ATTR(vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); + unsigned int vm_page_local_q_soft_limit = 250; unsigned int vm_page_local_q_hard_limit = 500; -struct vplq *vm_page_local_q = NULL; +struct vpl *__zpercpu vm_page_local_q; /* N.B. Guard and fictitious pages must not * be assigned a zero phys_page value. @@ -376,20 +392,20 @@ const ppnum_t vm_page_guard_addr = (ppnum_t) -2; * pageout daemon often assignes a higher * importance to anonymous pages (less likely to pick) */ -vm_page_queue_head_t vm_page_queue_active __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -vm_page_queue_head_t vm_page_queue_inactive __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_page_queue_head_t vm_page_queue_active VM_PAGE_PACKED_ALIGNED; +vm_page_queue_head_t vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED; #if CONFIG_SECLUDED_MEMORY -vm_page_queue_head_t vm_page_queue_secluded __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_page_queue_head_t vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED; #endif /* CONFIG_SECLUDED_MEMORY */ -vm_page_queue_head_t vm_page_queue_anonymous __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); /* inactive memory queue for anonymous pages */ -vm_page_queue_head_t vm_page_queue_throttled __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_page_queue_head_t vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED; /* inactive memory queue for anonymous pages */ +vm_page_queue_head_t vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED; queue_head_t vm_objects_wired; void vm_update_darkwake_mode(boolean_t); #if CONFIG_BACKGROUND_QUEUE -vm_page_queue_head_t vm_page_queue_background __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_page_queue_head_t vm_page_queue_background VM_PAGE_PACKED_ALIGNED; uint32_t vm_page_background_target; uint32_t vm_page_background_target_snapshot; uint32_t vm_page_background_count; @@ -404,6 +420,7 @@ uint32_t vm_page_background_exclude_external; unsigned int vm_page_active_count; unsigned int vm_page_inactive_count; +unsigned int vm_page_kernelcache_count; #if CONFIG_SECLUDED_MEMORY unsigned int vm_page_secluded_count; unsigned int vm_page_secluded_count_free; @@ -442,7 +459,7 @@ unsigned int vm_page_speculative_created = 0; unsigned int vm_page_speculative_used = 0; #endif -vm_page_queue_head_t vm_page_queue_cleaned __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_page_queue_head_t vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED; unsigned int vm_page_cleaned_count = 0; @@ -831,74 +848,38 @@ vm_free_delayed_pages_contig( } } - -lck_grp_t vm_page_lck_grp_free; -lck_grp_t vm_page_lck_grp_queue; -lck_grp_t vm_page_lck_grp_local; -lck_grp_t vm_page_lck_grp_purge; -lck_grp_t vm_page_lck_grp_alloc; -lck_grp_t vm_page_lck_grp_bucket; -lck_grp_attr_t vm_page_lck_grp_attr; -lck_attr_t vm_page_lck_attr; - - -__private_extern__ void -vm_page_init_lck_grp(void) -{ - /* - * initialze the vm_page lock world - */ - lck_grp_attr_setdefault(&vm_page_lck_grp_attr); - lck_grp_init(&vm_page_lck_grp_free, "vm_page_free", &vm_page_lck_grp_attr); - lck_grp_init(&vm_page_lck_grp_queue, "vm_page_queue", &vm_page_lck_grp_attr); - lck_grp_init(&vm_page_lck_grp_local, "vm_page_queue_local", &vm_page_lck_grp_attr); - lck_grp_init(&vm_page_lck_grp_purge, "vm_page_purge", &vm_page_lck_grp_attr); - lck_grp_init(&vm_page_lck_grp_alloc, "vm_page_alloc", &vm_page_lck_grp_attr); - lck_grp_init(&vm_page_lck_grp_bucket, "vm_page_bucket", &vm_page_lck_grp_attr); - lck_attr_setdefault(&vm_page_lck_attr); - lck_mtx_init_ext(&vm_page_alloc_lock, &vm_page_alloc_lock_ext, &vm_page_lck_grp_alloc, &vm_page_lck_attr); - - vm_compressor_init_locks(); -} - #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1))) void -vm_page_init_local_q() +vm_page_init_local_q(unsigned int num_cpus) { - unsigned int num_cpus; - unsigned int i; - struct vplq *t_local_q; - - num_cpus = ml_get_max_cpus(); + struct vpl *t_local_q; /* * no point in this for a uni-processor system */ if (num_cpus >= 2) { -#if KASAN - /* KASAN breaks the expectation of a size-aligned object by adding a - * redzone, so explicitly align. */ - t_local_q = (struct vplq *)kalloc(num_cpus * sizeof(struct vplq) + VM_PACKED_POINTER_ALIGNMENT); - t_local_q = (void *)(((uintptr_t)t_local_q + (VM_PACKED_POINTER_ALIGNMENT - 1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1)); -#else - /* round the size up to the nearest power of two */ - t_local_q = (struct vplq *)kalloc(ROUNDUP_NEXTP2(num_cpus * sizeof(struct vplq))); -#endif + ml_cpu_info_t cpu_info; + + /* + * Force the allocation alignment to a cacheline, + * because the `vpl` struct has a lock and will be taken + * cross CPU so we want to isolate the rest of the per-CPU + * data to avoid false sharing due to this lock being taken. + */ + + ml_cpu_get_info(&cpu_info); - for (i = 0; i < num_cpus; i++) { - struct vpl *lq; + t_local_q = zalloc_percpu_permanent(sizeof(struct vpl), + cpu_info.cache_line_size - 1); - lq = &t_local_q[i].vpl_un.vpl; + zpercpu_foreach(lq, t_local_q) { VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr); vm_page_queue_init(&lq->vpl_queue); - lq->vpl_count = 0; - lq->vpl_internal_count = 0; - lq->vpl_external_count = 0; } - vm_page_local_q_count = num_cpus; - vm_page_local_q = (struct vplq *)t_local_q; + /* make the initialization visible to all cores */ + os_atomic_store(&vm_page_local_q, t_local_q, release); } } @@ -926,7 +907,7 @@ vm_init_before_launchd() * Each page cell is initialized and placed on the free list. * Returns the range of available kernel virtual memory. */ - +__startup_func void vm_page_bootstrap( vm_offset_t *startp, @@ -940,7 +921,6 @@ vm_page_bootstrap( /* * Initialize the page queues. */ - vm_page_init_lck_grp(); lck_mtx_init_ext(&vm_page_queue_free_lock, &vm_page_queue_free_lock_ext, &vm_page_lck_grp_free, &vm_page_lck_attr); lck_mtx_init_ext(&vm_page_queue_lock, &vm_page_queue_lock_ext, &vm_page_lck_grp_queue, &vm_page_lck_attr); @@ -1060,10 +1040,10 @@ vm_page_bootstrap( vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1; #endif /* CONFIG_SECLUDED_MEMORY */ - for (i = 0; i < VM_KERN_MEMORY_FIRST_DYNAMIC; i++) { - vm_allocation_sites_static[i].refcount = 2; - vm_allocation_sites_static[i].tag = i; - vm_allocation_sites[i] = &vm_allocation_sites_static[i]; + for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) { + vm_allocation_sites_static[t].refcount = 2; + vm_allocation_sites_static[t].tag = t; + vm_allocation_sites[t] = &vm_allocation_sites_static[t]; } vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2; vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY; @@ -1072,11 +1052,7 @@ vm_page_bootstrap( /* * Steal memory for the map and zone subsystems. */ -#if CONFIG_GZALLOC - gzalloc_configure(); -#endif - kernel_debug_string_early("vm_map_steal_memory"); - vm_map_steal_memory(); + kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL); /* * Allocate (and initialize) the virtual-to-physical @@ -1173,8 +1149,6 @@ vm_page_bootstrap( lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr); } - lck_spin_init(&vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); - lck_spin_init(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); vm_tag_init(); #if VM_PAGE_BUCKETS_CHECK @@ -1219,7 +1193,6 @@ vm_page_bootstrap( vm_page_free_count, vm_page_wire_count, vm_delayed_count); kernel_debug_string_early("vm_page_bootstrap complete"); - simple_lock_init(&vm_paging_lock, 0); } #ifndef MACHINE_PAGES @@ -1368,6 +1341,7 @@ int secluded_for_filecache = 2; /* filecache can use seclude memory */ int secluded_for_fbdp = 0; #endif uint64_t secluded_shutoff_trigger = 0; +uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */ #endif /* CONFIG_SECLUDED_MEMORY */ @@ -1472,26 +1446,24 @@ pmap_startup( #endif /* - * On small devices, allow a large app to effectively suppress - * secluded memory until it exits. + * Allow a really large app to effectively use secluded memory until it exits. */ - if (max_mem <= 1 * 1024 * 1024 * 1024 && vm_page_secluded_target != 0) { + if (vm_page_secluded_target != 0) { /* - * Get an amount from boot-args, else use 500MB. - * 500MB was chosen from a Peace daemon tentpole test which used munch - * to induce jetsam thrashing of false idle daemons. + * Get an amount from boot-args, else use 1/2 of max_mem. + * 1/2 max_mem was chosen from a Peace daemon tentpole test which + * used munch to induce jetsam thrashing of false idle daemons on N56. */ int secluded_shutoff_mb; if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb, sizeof(secluded_shutoff_mb))) { secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024; } else { - secluded_shutoff_trigger = 500 * 1024 * 1024; + secluded_shutoff_trigger = max_mem / 2; } - if (secluded_shutoff_trigger != 0) { - secluded_suppression_init(); - } + /* ensure the headroom value is sensible and avoid underflows */ + assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom); } #endif /* CONFIG_SECLUDED_MEMORY */ @@ -1526,10 +1498,15 @@ pmap_startup( /* * Initialize and release the page frames. */ - kernel_debug_string_early("Initialize and free the page frames"); + kernel_debug_string_early("page_frame_init"); vm_page_array_beginning_addr = &vm_pages[0]; vm_page_array_ending_addr = &vm_pages[npages]; /* used by ptr packing/unpacking code */ +#if VM_PAGE_PACKED_FROM_ARRAY + if (npages >= VM_PAGE_PACKED_FROM_ARRAY) { + panic("pmap_startup(): too many pages to support vm_page packing"); + } +#endif vm_delayed_count = 0; @@ -1587,8 +1564,7 @@ pmap_startup( printf("pmap_startup() init/release time: %lld microsec\n", (now_ns - start_ns) / NSEC_PER_USEC); printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count); -#if defined(__LP64__) - +#if defined(__LP64__) if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) { panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]); } @@ -1618,29 +1594,25 @@ pmap_startup( static void vm_page_module_init_delayed(void) { - uint64_t vm_page_zone_pages, vm_page_array_zone_data_size; - - vm_page_array_zone = zinit((vm_size_t) sizeof(struct vm_page), - 0, PAGE_SIZE, "vm pages array"); + (void)zone_create_ext("vm pages array", sizeof(struct vm_page), + ZC_NOGZALLOC, ZONE_ID_ANY, ^(zone_t z) { + uint64_t vm_page_zone_pages, vm_page_array_zone_data_size; - zone_change(vm_page_array_zone, Z_CALLERACCT, FALSE); - zone_change(vm_page_array_zone, Z_EXPAND, FALSE); - zone_change(vm_page_array_zone, Z_EXHAUST, TRUE); - zone_change(vm_page_array_zone, Z_FOREIGN, TRUE); - zone_change(vm_page_array_zone, Z_GZALLOC_EXEMPT, TRUE); + zone_set_exhaustible(z, 0); + /* + * Reflect size and usage information for vm_pages[]. + */ - /* - * Reflect size and usage information for vm_pages[]. - */ - vm_page_array_zone->count = vm_pages_count; - vm_page_array_zone->countfree = (int)(vm_page_array_ending_addr - &vm_pages[vm_pages_count]); - vm_page_array_zone->sum_count = vm_pages_count; - vm_page_array_zone_data_size = (uintptr_t)((void *)vm_page_array_ending_addr - (void *)vm_pages); - vm_page_array_zone->cur_size = vm_page_array_zone_data_size; - vm_page_zone_pages = ((round_page(vm_page_array_zone_data_size)) / PAGE_SIZE); - OSAddAtomic64(vm_page_zone_pages, &(vm_page_array_zone->page_count)); - /* since zone accounts for these, take them out of stolen */ - VM_PAGE_MOVE_STOLEN(vm_page_zone_pages); + z->countavail = (uint32_t)(vm_page_array_ending_addr - vm_pages); + z->countfree = z->countavail - vm_pages_count; + zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated = + vm_pages_count * sizeof(struct vm_page); + vm_page_array_zone_data_size = (uintptr_t)((void *)vm_page_array_ending_addr - (void *)vm_pages); + vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size)); + z->page_count += vm_page_zone_pages; + /* since zone accounts for these, take them out of stolen */ + VM_PAGE_MOVE_STOLEN(vm_page_zone_pages); + }); } /* @@ -1648,7 +1620,9 @@ vm_page_module_init_delayed(void) * that are scavanged from other boot time usages by ml_static_mfree(). As such, * this needs to happen in early VM bootstrap. */ -void + +__startup_func +static void vm_page_module_init(void) { vm_size_t vm_page_with_ppnum_size; @@ -1658,18 +1632,19 @@ vm_page_module_init(void) * must have appropriate size. Not strictly what sizeof() reports. */ vm_page_with_ppnum_size = - (sizeof(struct vm_page_with_ppnum) + (VM_PACKED_POINTER_ALIGNMENT - 1)) & - ~(VM_PACKED_POINTER_ALIGNMENT - 1); - - vm_page_zone = zinit(vm_page_with_ppnum_size, 0, PAGE_SIZE, "vm pages"); - - zone_change(vm_page_zone, Z_CALLERACCT, FALSE); - zone_change(vm_page_zone, Z_EXPAND, FALSE); - zone_change(vm_page_zone, Z_EXHAUST, TRUE); - zone_change(vm_page_zone, Z_FOREIGN, TRUE); - zone_change(vm_page_zone, Z_GZALLOC_EXEMPT, TRUE); - zone_change(vm_page_zone, Z_ALIGNMENT_REQUIRED, TRUE); + (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) & + ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1); + + vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size, + ZC_ALLOW_FOREIGN | ZC_NOGZALLOC | ZC_ALIGNMENT_REQUIRED | + ZC_NOCALLOUT, ZONE_ID_ANY, ^(zone_t z) { +#if defined(__LP64__) + zone_set_submap_idx(z, Z_SUBMAP_IDX_VA_RESTRICTED_MAP); +#endif + zone_set_exhaustible(z, 0); + }); } +STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init); /* * Routine: vm_page_create @@ -1775,7 +1750,7 @@ vm_page_insert_internal( VM_PAGE_CHECK(mem); #endif - assert(page_aligned(offset)); + assertf(page_aligned(offset), "0x%llx\n", offset); assert(!VM_PAGE_WIRED(mem) || mem->vmp_private || mem->vmp_fictitious || (tag != VM_KERN_MEMORY_NONE)); @@ -1792,7 +1767,7 @@ vm_page_insert_internal( } if (insert_in_hash == TRUE) { -#if DEBUG || VM_PAGE_CHECK_BUCKETS +#if DEBUG || VM_PAGE_BUCKETS_CHECK if (mem->vmp_tabled || mem->vmp_object) { panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) " "already in (obj=%p,off=0x%llx)", @@ -1870,6 +1845,20 @@ vm_page_insert_internal( } assert(object->resident_page_count >= object->wired_page_count); +#if DEVELOPMENT || DEBUG + if (object->object_is_shared_cache && + object->pager != NULL && + object->pager->mo_pager_ops == &shared_region_pager_ops) { + int new, old; + assert(!object->internal); + new = OSAddAtomic(+1, &shared_region_pagers_resident_count); + do { + old = shared_region_pagers_resident_peak; + } while (old < new && + !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak)); + } +#endif /* DEVELOPMENT || DEBUG */ + if (batch_accounting == FALSE) { if (object->internal) { OSAddAtomic(1, &vm_page_internal_count); @@ -2007,7 +1996,7 @@ vm_page_replace( VM_PAGE_CHECK(mem); #endif vm_object_lock_assert_exclusive(object); -#if DEBUG || VM_PAGE_CHECK_BUCKETS +#if DEBUG || VM_PAGE_BUCKETS_CHECK if (mem->vmp_tabled || mem->vmp_object) { panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) " "already in (obj=%p,off=0x%llx)", @@ -2168,6 +2157,15 @@ vm_page_remove( assert(m_object->resident_page_count > 0); m_object->resident_page_count--; +#if DEVELOPMENT || DEBUG + if (m_object->object_is_shared_cache && + m_object->pager != NULL && + m_object->pager->mo_pager_ops == &shared_region_pager_ops) { + assert(!m_object->internal); + OSAddAtomic(-1, &shared_region_pagers_resident_count); + } +#endif /* DEVELOPMENT || DEBUG */ + if (m_object->internal) { #if DEBUG assert(vm_page_internal_count); @@ -2342,6 +2340,7 @@ vm_page_lookup( OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total); #endif vm_object_lock_assert_held(object); + assertf(page_aligned(offset), "offset 0x%llx\n", offset); if (object->resident_page_count == 0) { #if DEBUG_VM_PAGE_LOOKUP @@ -2633,7 +2632,7 @@ vm_page_grab_fictitious_common( { vm_page_t m; - if ((m = (vm_page_t)zget(vm_page_zone))) { + if ((m = (vm_page_t)zalloc_noblock(vm_page_zone))) { vm_page_init(m, phys_addr, FALSE); m->vmp_fictitious = TRUE; @@ -2698,7 +2697,7 @@ vm_page_release_fictitious( * with the zones code, for several reasons: * 1. we need to carve some page structures out of physical * memory before zones work, so they _cannot_ come from - * the zone_map. + * the zone restricted submap. * 2. the zone needs to be collectable in order to prevent * growth without bound. These structures are used by * the device pager (by the hundreds and thousands), as @@ -2719,19 +2718,20 @@ vm_page_more_fictitious(void) c_vm_page_more_fictitious++; /* - * Allocate a single page from the zone_map. Do not wait if no physical - * pages are immediately available, and do not zero the space. We need - * our own blocking lock here to prevent having multiple, - * simultaneous requests from piling up on the zone_map lock. Exactly - * one (of our) threads should be potentially waiting on the map lock. - * If winner is not vm-privileged, then the page allocation will fail, - * and it will temporarily block here in the vm_page_wait(). + * Allocate a single page from the zone restricted submap. Do not wait + * if no physical pages are immediately available, and do not zero the + * space. We need our own blocking lock here to prevent having multiple, + * simultaneous requests from piling up on the zone restricted submap + * lock. + * Exactly one (of our) threads should be potentially waiting on the map + * lock. If winner is not vm-privileged, then the page allocation will + * fail, and it will temporarily block here in the vm_page_wait(). */ lck_mtx_lock(&vm_page_alloc_lock); /* * If another thread allocated space, just bail out now. */ - if (zone_free_count(vm_page_zone) > 5) { + if (os_atomic_load(&vm_page_zone->countfree, relaxed) > 5) { /* * The number "5" is a small number that is larger than the * number of fictitious pages that any single caller will @@ -2748,9 +2748,10 @@ vm_page_more_fictitious(void) return; } - retval = kernel_memory_allocate(zone_map, - &addr, PAGE_SIZE, 0, - KMA_KOBJECT | KMA_NOPAGEWAIT, VM_KERN_MEMORY_ZONE); + retval = kernel_memory_allocate(zone_submap(vm_page_zone), + &addr, PAGE_SIZE, 0, KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT, + VM_KERN_MEMORY_ZONE); + if (retval != KERN_SUCCESS) { /* * No page was available. Drop the @@ -2991,7 +2992,7 @@ boolean_t vm_himemory_mode = TRUE; unsigned int vm_lopages_allocated_q = 0; unsigned int vm_lopages_allocated_cpm_success = 0; unsigned int vm_lopages_allocated_cpm_failed = 0; -vm_page_queue_head_t vm_lopage_queue_free __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_page_queue_head_t vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED; vm_page_t vm_page_grablo(void) @@ -3053,7 +3054,7 @@ vm_page_grablo(void) VM_PAGE_ZERO_PAGEQ_ENTRY(mem); disable_preemption(); - PROCESSOR_DATA(current_processor(), page_grab_count) += 1; + *PERCPU_GET(vm_page_grab_count) += 1; VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0); enable_preemption(); @@ -3107,7 +3108,7 @@ vm_page_grab_options( disable_preemption(); - if ((mem = PROCESSOR_DATA(current_processor(), free_pages))) { + if ((mem = *PERCPU_GET(free_pages))) { return_page_from_cpu_list: assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q); @@ -3118,8 +3119,10 @@ return_page_from_cpu_list: #endif /* HIBERNATION */ vm_page_grab_diags(); - PROCESSOR_DATA(current_processor(), page_grab_count) += 1; - PROCESSOR_DATA(current_processor(), free_pages) = mem->vmp_snext; + + vm_offset_t pcpu_base = current_percpu_base(); + *PERCPU_GET_WITH_BASE(pcpu_base, vm_page_grab_count) += 1; + *PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem->vmp_snext; VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0); enable_preemption(); @@ -3200,7 +3203,7 @@ return_page_from_cpu_list: disable_preemption(); vm_page_grab_diags(); - PROCESSOR_DATA(current_processor(), page_grab_count) += 1; + *PERCPU_GET(vm_page_grab_count) += 1; VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0); enable_preemption(); @@ -3231,7 +3234,7 @@ return_page_from_cpu_list: disable_preemption(); - if ((mem = PROCESSOR_DATA(current_processor(), free_pages))) { + if ((mem = *PERCPU_GET(free_pages))) { lck_mtx_unlock(&vm_page_queue_free_lock); /* @@ -3249,7 +3252,7 @@ return_page_from_cpu_list: pages_to_steal = (vm_page_free_count - vm_page_free_reserved); } } - color = PROCESSOR_DATA(current_processor(), start_color); + color = *PERCPU_GET(start_color); head = tail = NULL; vm_page_free_count -= pages_to_steal; @@ -3322,14 +3325,15 @@ return_page_from_cpu_list: panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__); } #endif /* HIBERNATION */ - PROCESSOR_DATA(current_processor(), free_pages) = head->vmp_snext; - PROCESSOR_DATA(current_processor(), start_color) = color; + vm_offset_t pcpu_base = current_percpu_base(); + *PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = head->vmp_snext; + *PERCPU_GET_WITH_BASE(pcpu_base, start_color) = color; /* * satisfy this request */ vm_page_grab_diags(); - PROCESSOR_DATA(current_processor(), page_grab_count) += 1; + *PERCPU_GET_WITH_BASE(pcpu_base, vm_page_grab_count) += 1; VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0); mem = head; assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q); @@ -5267,7 +5271,7 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks) return; } - lq = &vm_page_local_q[lid].vpl_un.vpl; + lq = zpercpu_get_cpu(vm_page_local_q, lid); if (nolocks == FALSE) { if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) { @@ -5476,7 +5480,7 @@ vm_page_copy( * source page now. */ vm_page_copy_cs_validations++; - vm_page_validate_cs(src_m); + vm_page_validate_cs(src_m, PAGE_SIZE, 0); #if DEVELOPMENT || DEBUG DTRACE_VM4(codesigned_copy, vm_object_t, src_m_object, @@ -5491,6 +5495,7 @@ vm_page_copy( * the cs_validated bit. */ dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted; + dest_m->vmp_cs_nx = src_m->vmp_cs_nx; if (dest_m->vmp_cs_tainted) { vm_page_copy_cs_tainted++; } @@ -5539,10 +5544,10 @@ _vm_page_print( (p->vmp_overwriting ? "" : "!"), (p->vmp_restart ? "" : "!"), (p->vmp_unusual ? "" : "!")); - printf(" %scs_validated, %scs_tainted, %scs_nx, %sno_cache\n", - (p->vmp_cs_validated ? "" : "!"), - (p->vmp_cs_tainted ? "" : "!"), - (p->vmp_cs_nx ? "" : "!"), + printf(" cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n", + p->vmp_cs_validated, + p->vmp_cs_tainted, + p->vmp_cs_nx, (p->vmp_no_cache ? "" : "!")); printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p)); @@ -6291,7 +6296,7 @@ did_consider: m2->vmp_pmapped = TRUE; m2->vmp_wpmapped = TRUE; - PMAP_ENTER(kernel_pmap, m2->vmp_offset, m2, + PMAP_ENTER(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE, kr); assert(kr == KERN_SUCCESS); @@ -6773,7 +6778,6 @@ void hibernate_clear_in_progress(void); void hibernate_free_range(int, int); void hibernate_hash_insert_page(vm_page_t); uint32_t hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *); -void hibernate_rebuild_vm_structs(void); uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *); ppnum_t hibernate_lookup_paddr(unsigned int); @@ -7085,8 +7089,8 @@ hibernate_flush_dirty_pages(int pass) uint32_t i; if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - vm_page_reactivate_local(i, TRUE, FALSE); + zpercpu_foreach_cpu(lid) { + vm_page_reactivate_local(lid, TRUE, FALSE); } } @@ -7456,10 +7460,7 @@ hibernate_vm_lock_queues(void) lck_mtx_lock(&vm_purgeable_queue_lock); if (vm_page_local_q) { - uint32_t i; - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; + zpercpu_foreach(lq, vm_page_local_q) { VPL_LOCK(&lq->vpl_lock); } } @@ -7469,10 +7470,7 @@ void hibernate_vm_unlock_queues(void) { if (vm_page_local_q) { - uint32_t i; - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; + zpercpu_foreach(lq, vm_page_local_q) { VPL_UNLOCK(&lq->vpl_lock); } } @@ -7532,9 +7530,7 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, assert(hibernate_vm_locks_are_safe()); vm_page_lock_queues(); if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; + zpercpu_foreach(lq, vm_page_local_q) { VPL_LOCK(&lq->vpl_lock); } } @@ -7556,8 +7552,8 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, } if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - vm_page_reactivate_local(i, TRUE, !preflight); + zpercpu_foreach_cpu(lid) { + vm_page_reactivate_local(lid, TRUE, !preflight); } } @@ -7583,19 +7579,17 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, } if (!preflight) { - for (i = 0; i < real_ncpus; i++) { - if (cpu_data_ptr[i] && cpu_data_ptr[i]->cpu_processor) { - for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = m->vmp_snext) { - assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q); + percpu_foreach(free_pages_head, free_pages) { + for (m = *free_pages_head; m; m = m->vmp_snext) { + assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q); - pages--; - count_wire--; - hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + pages--; + count_wire--; + hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - hibernate_stats.cd_local_free++; - hibernate_stats.cd_total_free++; - } + hibernate_stats.cd_local_free++; + hibernate_stats.cd_total_free++; } } } @@ -7877,9 +7871,7 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list, #if MACH_ASSERT || DEBUG if (!preflight) { if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; + zpercpu_foreach(lq, vm_page_local_q) { VPL_UNLOCK(&lq->vpl_lock); } } @@ -7913,9 +7905,7 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list) #if MACH_ASSERT || DEBUG vm_page_lock_queues(); if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; + zpercpu_foreach(lq, vm_page_local_q) { VPL_LOCK(&lq->vpl_lock); } } @@ -8004,9 +7994,7 @@ hibernate_page_list_discard(hibernate_page_list_t * page_list) #if MACH_ASSERT || DEBUG if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; + zpercpu_foreach(lq, vm_page_local_q) { VPL_UNLOCK(&lq->vpl_lock); } } @@ -8042,7 +8030,7 @@ struct ppnum_mapping *ppnm_last_found = NULL; void -hibernate_create_paddr_map() +hibernate_create_paddr_map(void) { unsigned int i; ppnum_t next_ppnum_in_run = 0; @@ -8055,7 +8043,7 @@ hibernate_create_paddr_map() } if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) { - ppnm = kalloc(sizeof(struct ppnum_mapping)); + ppnm = zalloc_permanent_type(struct ppnum_mapping); ppnm->ppnm_next = ppnm_head; ppnm_head = ppnm; @@ -8173,9 +8161,6 @@ hibernate_free_range(int sindx, int eindx) } } - -extern void hibernate_rebuild_pmap_structs(void); - void hibernate_rebuild_vm_structs(void) { @@ -8193,7 +8178,7 @@ hibernate_rebuild_vm_structs(void) clock_get_uptime(&startTime); - hibernate_rebuild_pmap_structs(); + pal_hib_rebuild_pmap_structs(); bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t)); eindx = vm_pages_count; @@ -8269,9 +8254,6 @@ hibernate_rebuild_vm_structs(void) KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END); } - -extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *); - uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired) { @@ -8368,7 +8350,7 @@ hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_l (addr64_t)&vm_pages[vm_pages_count - 1], page_list, page_list_wired); mark_as_unneeded_pages += unneeded_vm_pages_pages; - hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded); + pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded); if (start_of_unneeded) { unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired); @@ -8596,7 +8578,7 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) { struct vpl *lq; - lq = &vm_page_local_q[mem->vmp_local_id].vpl_un.vpl; + lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id); VPL_LOCK(&lq->vpl_lock); vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq); mem->vmp_local_id = 0; @@ -8945,7 +8927,7 @@ vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** release while (TRUE) { avail = free_tag_bits[idx]; if (avail) { - tag = __builtin_clzll(avail); + tag = (vm_tag_t)__builtin_clzll(avail); avail &= ~(1ULL << (63 - tag)); free_tag_bits[idx] = avail; tag += (idx << 6); @@ -8972,7 +8954,7 @@ vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** release } assert(idx == prev->tag); - tag = idx; + tag = (vm_tag_t)idx; prev->tag = VM_KERN_MEMORY_NONE; *releasesiteP = prev; break; @@ -9227,7 +9209,7 @@ kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subt lck_spin_lock(&vm_allocation_sites_lock); for (; subidx < allocation->subtotalscount; subidx++) { if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag) { - allocation->subtotals[subidx].tag = subtag; + allocation->subtotals[subidx].tag = (vm_tag_t)subtag; break; } if (subtag == allocation->subtotals[subidx].tag) { @@ -9259,19 +9241,20 @@ kern_allocation_get_name(kern_allocation_name_t allocation) } kern_allocation_name_t -kern_allocation_name_allocate(const char * name, uint32_t subtotalscount) +kern_allocation_name_allocate(const char * name, uint16_t subtotalscount) { - uint32_t namelen; + uint16_t namelen; - namelen = (uint32_t) strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1); + namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1); kern_allocation_name_t allocation; - allocation = kalloc(KA_SIZE(namelen, subtotalscount)); + allocation = kheap_alloc(KHEAP_DATA_BUFFERS, + KA_SIZE(namelen, subtotalscount), Z_WAITOK); bzero(allocation, KA_SIZE(namelen, subtotalscount)); allocation->refcount = 1; allocation->subtotalscount = subtotalscount; - allocation->flags = (namelen << VM_TAG_NAME_LEN_SHIFT); + allocation->flags = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT); strlcpy(KA_NAME(allocation), name, namelen + 1); return allocation; @@ -9282,7 +9265,8 @@ kern_allocation_name_release(kern_allocation_name_t allocation) { assert(allocation->refcount > 0); if (1 == OSAddAtomic16(-1, &allocation->refcount)) { - kfree(allocation, KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount)); + kheap_free(KHEAP_DATA_BUFFERS, allocation, + KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount)); } } @@ -9341,7 +9325,8 @@ vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info, #endif /* ! VM_TAG_ACTIVE_UPDATE */ static uint64_t -process_account(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, boolean_t iterated) +process_account(mach_memory_info_t * info, unsigned int num_info, + uint64_t zones_collectable_bytes, boolean_t iterated) { size_t namelen; unsigned int idx, count, nextinfo; @@ -9414,13 +9399,13 @@ process_account(mach_memory_info_t * info, unsigned int num_info, uint64_t zones if (!zone[zidx].peak) { continue; } - info[nextinfo] = info[idx]; - info[nextinfo].zone = zone_index_from_tag_index(zidx, &elem_size); - info[nextinfo].flags &= ~VM_KERN_SITE_WIRED; - info[nextinfo].flags |= VM_KERN_SITE_ZONE; - info[nextinfo].size = zone[zidx].total; - info[nextinfo].peak = zone[zidx].peak; - info[nextinfo].mapped = 0; + info[nextinfo] = info[idx]; + info[nextinfo].zone = (uint16_t)zone_index_from_tag_index(zidx, &elem_size); + info[nextinfo].flags &= ~VM_KERN_SITE_WIRED; + info[nextinfo].flags |= VM_KERN_SITE_ZONE; + info[nextinfo].size = zone[zidx].total; + info[nextinfo].peak = zone[zidx].peak; + info[nextinfo].mapped = 0; if (zone[zidx].wastediv) { info[nextinfo].collectable_bytes = ((zone[zidx].waste * zone[zidx].total / elem_size) / zone[zidx].wastediv); } @@ -9467,11 +9452,11 @@ uint32_t vm_page_diagnose_estimate(void) { vm_allocation_site_t * site; - uint32_t count; + uint32_t count = zone_view_count; uint32_t idx; lck_spin_lock(&vm_allocation_sites_lock); - for (count = idx = 0; idx < VM_MAX_TAG_VALUE; idx++) { + for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) { site = vm_allocation_sites[idx]; if (!site) { continue; @@ -9501,6 +9486,46 @@ vm_page_diagnose_estimate(void) return count; } +static void +vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats, + bool percpu) +{ + zpercpu_foreach(zs, zstats) { + info->size += zs->zs_mem_allocated - zs->zs_mem_freed; + } + if (percpu) { + info->size *= zpercpu_count(); + } + info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW; +} + +static void +vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z) +{ + vm_page_diagnose_zone_stats(info, z->z_stats, z->percpu); + snprintf(info->name, sizeof(info->name), + "%s%s[raw]", zone_heap_name(z), z->z_name); +} + +static int +vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap) +{ + struct kheap_zones *zones = kheap->kh_zones; + int i = 0; + + for (; i < zones->max_k_zone; i++) { + vm_page_diagnose_zone(info + i, zones->k_zone[i]); + } + + for (kalloc_heap_t kh = zones->views; kh; kh = kh->kh_next, i++) { + vm_page_diagnose_zone_stats(info + i, kh->kh_stats, false); + snprintf(info[i].name, sizeof(info[i].name), + "%skalloc[%s]", kheap->kh_name, kh->kh_name); + } + + return i; +} + kern_return_t vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes) { @@ -9509,6 +9534,7 @@ vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zone uint64_t wired_reserved_size; boolean_t iterate; mach_memory_info_t * counts; + uint32_t i; bzero(info, num_info * sizeof(mach_memory_info_t)); @@ -9546,6 +9572,7 @@ vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zone SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED); SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0); SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED); + SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0); #define SET_MAP(xcount, xsize, xfree, xlargest) \ counts[xcount].site = (xcount); \ @@ -9560,12 +9587,50 @@ vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zone vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest); SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest); - vm_map_sizes(zone_map, &map_size, &map_free, &map_largest); + zone_map_sizes(&map_size, &map_free, &map_largest); SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest); vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest); SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest); + assert(num_info >= zone_view_count); + num_info -= zone_view_count; + counts = &info[num_info]; + i = 0; + + i += vm_page_diagnose_heap(counts + i, KHEAP_DEFAULT); + if (KHEAP_DATA_BUFFERS->kh_heap_id == KHEAP_ID_DATA_BUFFERS) { + i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS); + } + if (KHEAP_KEXT->kh_heap_id == KHEAP_ID_KEXT) { + i += vm_page_diagnose_heap(counts + i, KHEAP_KEXT); + } + assert(i <= zone_view_count); + + zone_index_foreach(zidx) { + zone_t z = &zone_array[zidx]; + zone_view_t zv = z->z_views; + + if (zv == NULL) { + continue; + } + + if (z->kalloc_heap == KHEAP_ID_NONE) { + vm_page_diagnose_zone(counts + i, z); + i++; + assert(i <= zone_view_count); + } + + for (; zv; zv = zv->zv_next) { + vm_page_diagnose_zone_stats(counts + i, zv->zv_stats, + z->percpu); + snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]", + zone_heap_name(z), z->z_name, zv->zv_name); + i++; + assert(i <= zone_view_count); + } + } + iterate = !VM_TAG_ACTIVE_UPDATE; if (iterate) { enum { kMaxKernelDepth = 1 }; @@ -9664,7 +9729,7 @@ vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_siz if (entry->vme_start != addr) { break; } - *tag = VME_ALIAS(entry); + *tag = (vm_tag_t)VME_ALIAS(entry); *size = (entry->vme_end - addr); ret = KERN_SUCCESS; break; @@ -9710,22 +9775,8 @@ vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen) unsigned int vm_page_secluded_suppress_cnt = 0; unsigned int vm_page_secluded_save_target; - -lck_grp_attr_t secluded_suppress_slock_grp_attr; -lck_grp_t secluded_suppress_slock_grp; -lck_attr_t secluded_suppress_slock_attr; -lck_spin_t secluded_suppress_slock; - -void -secluded_suppression_init(void) -{ - lck_grp_attr_setdefault(&secluded_suppress_slock_grp_attr); - lck_grp_init(&secluded_suppress_slock_grp, - "secluded_suppress_slock", &secluded_suppress_slock_grp_attr); - lck_attr_setdefault(&secluded_suppress_slock_attr); - lck_spin_init(&secluded_suppress_slock, - &secluded_suppress_slock_grp, &secluded_suppress_slock_attr); -} +LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock"); +LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp); void start_secluded_suppression(task_t task) diff --git a/osfmk/vm/vm_shared_region.c b/osfmk/vm/vm_shared_region.c index 341ce4754..64c6aab05 100644 --- a/osfmk/vm/vm_shared_region.c +++ b/osfmk/vm/vm_shared_region.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -32,39 +32,41 @@ * -------------- * * A shared region is a submap that contains the most common system shared - * libraries for a given environment. - * An environment is defined by (cpu-type, 64-bitness, root directory). + * libraries for a given environment which is defined by: + * - cpu-type + * - 64-bitness + * - root directory + * - Team ID - when we have pointer authentication. * * The point of a shared region is to reduce the setup overhead when exec'ing - * a new process. - * A shared region uses a shared VM submap that gets mapped automatically - * at exec() time (see vm_map_exec()). The first process of a given + * a new process. A shared region uses a shared VM submap that gets mapped + * automatically at exec() time, see vm_map_exec(). The first process of a given * environment sets up the shared region and all further processes in that * environment can re-use that shared region without having to re-create * the same mappings in their VM map. All they need is contained in the shared * region. - * It can also shared a pmap (mostly for read-only parts but also for the + * + * The region can also share a pmap (mostly for read-only parts but also for the * initial version of some writable parts), which gets "nested" into the * process's pmap. This reduces the number of soft faults: once one process * brings in a page in the shared region, all the other processes can access * it without having to enter it in their own pmap. * - * * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter() * to map the appropriate shared region in the process's address space. * We look up the appropriate shared region for the process's environment. * If we can't find one, we create a new (empty) one and add it to the list. * Otherwise, we just take an extra reference on the shared region we found. * - * The "dyld" runtime (mapped into the process's address space at exec() time) - * will then use the shared_region_check_np() and shared_region_map_np() - * system call to validate and/or populate the shared region with the + * The "dyld" runtime, mapped into the process's address space at exec() time, + * will then use the shared_region_check_np() and shared_region_map_and_slide_np() + * system calls to validate and/or populate the shared region with the * appropriate dyld_shared_cache file. * * The shared region is inherited on fork() and the child simply takes an * extra reference on its parent's shared region. * - * When the task terminates, we release a reference on its shared region. + * When the task terminates, we release the reference on its shared region. * When the last reference is released, we destroy the shared region. * * After a chroot(), the calling process keeps using its original shared region, @@ -72,6 +74,7 @@ * will use a different shared region, because they need to use the shared * cache that's relative to the new root directory. */ + /* * COMM PAGE * @@ -84,11 +87,10 @@ * The comm pages are created and populated at boot time. * * The appropriate comm page is mapped into a process's address space - * at exec() time, in vm_map_exec(). - * It is then inherited on fork(). + * at exec() time, in vm_map_exec(). It is then inherited on fork(). * * The comm page is shared between the kernel and all applications of - * a given platform. Only the kernel can modify it. + * a given platform. Only the kernel can modify it. * * Applications just branch to fixed addresses in the comm page and find * the right version of the code for the platform. There is also some @@ -111,9 +113,11 @@ #include #include +#include #if defined (__arm__) || defined(__arm64__) #include +#include #endif /* @@ -122,9 +126,9 @@ */ #define PROCESS_SHARED_CACHE_LAYOUT 0x00 -#if defined(HAS_APPLE_PAC) +#if __has_feature(ptrauth_calls) #include -#endif /* HAS_APPLE_PAC */ +#endif /* __has_feature(ptrauth_calls) */ /* "dyld" uses this to figure out what the kernel supports */ int shared_region_version = 3; @@ -135,8 +139,9 @@ int shared_region_trace_level = SHARED_REGION_TRACE_ERROR_LVL; /* should local (non-chroot) shared regions persist when no task uses them ? */ int shared_region_persistence = 0; /* no by default */ -/* delay before reclaiming an unused shared region */ -int shared_region_destroy_delay = 120; /* in seconds */ + +/* delay in seconds before reclaiming an unused shared region */ +TUNABLE_WRITEABLE(int, shared_region_destroy_delay, "vm_shared_region_destroy_delay", 120); struct vm_shared_region *init_task_shared_region = NULL; @@ -150,8 +155,8 @@ boolean_t shared_region_completed_slide = FALSE; #endif /* this lock protects all the shared region data structures */ -lck_grp_t *vm_shared_region_lck_grp; -lck_mtx_t vm_shared_region_lock; +static LCK_GRP_DECLARE(vm_shared_region_lck_grp, "vm shared region"); +static LCK_MTX_DECLARE(vm_shared_region_lock, &vm_shared_region_lck_grp); #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock) #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock) @@ -162,26 +167,40 @@ lck_mtx_t vm_shared_region_lock; (interruptible)) /* the list of currently available shared regions (one per environment) */ -queue_head_t vm_shared_region_queue; +queue_head_t vm_shared_region_queue = QUEUE_HEAD_INITIALIZER(vm_shared_region_queue); +int vm_shared_region_count = 0; +int vm_shared_region_peak = 0; + +/* + * the number of times an event has forced the recalculation of the reslide + * shared region slide. + */ +#if __has_feature(ptrauth_calls) +int vm_shared_region_reslide_count = 0; +#endif /* __has_feature(ptrauth_calls) */ static void vm_shared_region_reference_locked(vm_shared_region_t shared_region); static vm_shared_region_t vm_shared_region_create( void *root_dir, cpu_type_t cputype, cpu_subtype_t cpu_subtype, - boolean_t is_64bit); + boolean_t is_64bit, + boolean_t reslide); static void vm_shared_region_destroy(vm_shared_region_t shared_region); +static kern_return_t vm_shared_region_slide_sanity_check(vm_shared_region_slide_info_entry_t entry, mach_vm_size_t size); static void vm_shared_region_timeout(thread_call_param_t param0, thread_call_param_t param1); -kern_return_t vm_shared_region_slide_mapping( +static kern_return_t vm_shared_region_slide_mapping( vm_shared_region_t sr, - mach_vm_size_t slide_info_size, - mach_vm_offset_t start, - mach_vm_size_t size, - mach_vm_offset_t slid_mapping, - uint32_t slide, - memory_object_control_t); /* forward */ + user_addr_t slide_info_addr, + mach_vm_size_t slide_info_size, + mach_vm_offset_t start, + mach_vm_size_t size, + mach_vm_offset_t slid_mapping, + uint32_t slide, + memory_object_control_t, + vm_prot_t prot); /* forward */ static int __commpage_setup = 0; #if !CONFIG_EMBEDDED @@ -189,27 +208,7 @@ static int __system_power_source = 1; /* init to extrnal power source */ static void post_sys_powersource_internal(int i, int internal); #endif - -/* - * Initialize the module... - */ -void -vm_shared_region_init(void) -{ - SHARED_REGION_TRACE_DEBUG( - ("shared_region: -> init\n")); - - vm_shared_region_lck_grp = lck_grp_alloc_init("vm shared region", - LCK_GRP_ATTR_NULL); - lck_mtx_init(&vm_shared_region_lock, - vm_shared_region_lck_grp, - LCK_ATTR_NULL); - - queue_init(&vm_shared_region_queue); - - SHARED_REGION_TRACE_DEBUG( - ("shared_region: <- init\n")); -} +extern u_int32_t random(void); /* * Retrieve a task's shared region and grab an extra reference to @@ -338,40 +337,6 @@ vm_shared_region_vm_map( (void *)VM_KERNEL_ADDRPERM(sr_map))); return sr_map; } -uint32_t -vm_shared_region_get_slide( - vm_shared_region_t shared_region) -{ - SHARED_REGION_TRACE_DEBUG( - ("shared_region: -> vm_shared_region_get_slide(%p)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); - assert(shared_region->sr_ref_count > 1); - SHARED_REGION_TRACE_DEBUG( - ("shared_region: vm_shared_region_get_slide(%p) <- %u\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - shared_region->sr_slide_info.slide)); - - /* 0 if we haven't slid */ - assert(shared_region->sr_slide_info.slide_object != NULL || - shared_region->sr_slide_info.slide == 0); - - return shared_region->sr_slide_info.slide; -} - -vm_shared_region_slide_info_t -vm_shared_region_get_slide_info( - vm_shared_region_t shared_region) -{ - SHARED_REGION_TRACE_DEBUG( - ("shared_region: -> vm_shared_region_get_slide_info(%p)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); - assert(shared_region->sr_ref_count > 1); - SHARED_REGION_TRACE_DEBUG( - ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - (void *)VM_KERNEL_ADDRPERM(&shared_region->sr_slide_info))); - return &shared_region->sr_slide_info; -} /* * Set the shared region the process should use. @@ -429,16 +394,16 @@ vm_shared_region_lookup( void *root_dir, cpu_type_t cputype, cpu_subtype_t cpu_subtype, - boolean_t is_64bit) + boolean_t is_64bit, + boolean_t reslide) { vm_shared_region_t shared_region; vm_shared_region_t new_shared_region; SHARED_REGION_TRACE_DEBUG( - ("shared_region: -> lookup(root=%p,cpu=<%d,%d>,64bit=%d)\n", - + ("shared_region: -> lookup(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d)\n", (void *)VM_KERNEL_ADDRPERM(root_dir), - cputype, cpu_subtype, is_64bit)); + cputype, cpu_subtype, is_64bit, reslide)); shared_region = NULL; new_shared_region = NULL; @@ -451,9 +416,15 @@ vm_shared_region_lookup( sr_q) { assert(shared_region->sr_ref_count > 0); if (shared_region->sr_cpu_type == cputype && +#if !__has_feature(ptrauth_calls) /* arm64e/arm64 use same region */ shared_region->sr_cpu_subtype == cpu_subtype && +#endif /* !__has_feature(ptrauth_calls) */ shared_region->sr_root_dir == root_dir && - shared_region->sr_64bit == is_64bit) { + shared_region->sr_64bit == is_64bit && +#if __has_feature(ptrauth_calls) + shared_region->sr_reslide == reslide && +#endif /* __has_feature(ptrauth_calls) */ + !shared_region->sr_stale) { /* found a match ! */ vm_shared_region_reference_locked(shared_region); goto done; @@ -465,7 +436,8 @@ vm_shared_region_lookup( new_shared_region = vm_shared_region_create(root_dir, cputype, cpu_subtype, - is_64bit); + is_64bit, + reslide); /* do the lookup again, in case we lost a race */ vm_shared_region_lock(); continue; @@ -477,6 +449,10 @@ vm_shared_region_lookup( shared_region, vm_shared_region_t, sr_q); + vm_shared_region_count++; + if (vm_shared_region_count > vm_shared_region_peak) { + vm_shared_region_peak = vm_shared_region_count; + } break; } @@ -486,7 +462,7 @@ done: if (new_shared_region) { /* * We lost a race with someone else to create a new shared - * region for that environment. Get rid of our unused one. + * region for that environment. Get rid of our unused one. */ assert(new_shared_region->sr_ref_count == 1); new_shared_region->sr_ref_count--; @@ -495,9 +471,9 @@ done: } SHARED_REGION_TRACE_DEBUG( - ("shared_region: lookup(root=%p,cpu=<%d,%d>,64bit=%d) <- %p\n", + ("shared_region: lookup(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d) <- %p\n", (void *)VM_KERNEL_ADDRPERM(root_dir), - cputype, cpu_subtype, is_64bit, + cputype, cpu_subtype, is_64bit, reslide, (void *)VM_KERNEL_ADDRPERM(shared_region))); assert(shared_region->sr_ref_count > 0); @@ -519,6 +495,7 @@ vm_shared_region_reference_locked( (void *)VM_KERNEL_ADDRPERM(shared_region))); assert(shared_region->sr_ref_count > 0); shared_region->sr_ref_count++; + assert(shared_region->sr_ref_count != 0); if (shared_region->sr_timer_call != NULL) { boolean_t cancelled; @@ -541,6 +518,26 @@ vm_shared_region_reference_locked( shared_region->sr_ref_count)); } +/* + * Take a reference on a shared region. + */ +void +vm_shared_region_reference(vm_shared_region_t shared_region) +{ + SHARED_REGION_TRACE_DEBUG( + ("shared_region: -> reference(%p)\n", + (void *)VM_KERNEL_ADDRPERM(shared_region))); + + vm_shared_region_lock(); + vm_shared_region_reference_locked(shared_region); + vm_shared_region_unlock(); + + SHARED_REGION_TRACE_DEBUG( + ("shared_region: reference(%p) <- %d\n", + (void *)VM_KERNEL_ADDRPERM(shared_region), + shared_region->sr_ref_count)); +} + /* * Release a reference on the shared region. * Destroy it if there are no references left. @@ -591,9 +588,13 @@ vm_shared_region_deallocate( if (shared_region->sr_ref_count == 0) { uint64_t deadline; - assert(!shared_region->sr_slid); - - if (shared_region->sr_timer_call == NULL) { + /* + * Even though a shared region is unused, delay a while before + * tearing it down, in case a new app launch can use it. + */ + if (shared_region->sr_timer_call == NULL && + shared_region_destroy_delay != 0 && + !shared_region->sr_stale) { /* hold one reference for the timer */ assert(!shared_region->sr_mapping_in_progress); shared_region->sr_ref_count++; @@ -605,7 +606,7 @@ vm_shared_region_deallocate( /* schedule the timer */ clock_interval_to_deadline(shared_region_destroy_delay, - 1000 * 1000 * 1000, + NSEC_PER_SEC, &deadline); thread_call_enter_delayed(shared_region->sr_timer_call, deadline); @@ -618,11 +619,6 @@ vm_shared_region_deallocate( } else { /* timer expired: let go of this shared region */ - /* - * We can't properly handle teardown of a slid object today. - */ - assert(!shared_region->sr_slid); - /* * Remove it from the queue first, so no one can find * it... @@ -631,6 +627,7 @@ vm_shared_region_deallocate( shared_region, vm_shared_region_t, sr_q); + vm_shared_region_count--; vm_shared_region_unlock(); /* ... and destroy it */ @@ -658,6 +655,7 @@ vm_shared_region_timeout( vm_shared_region_deallocate(shared_region); } + /* * Create a new (empty) shared region for a new environment. */ @@ -666,21 +664,24 @@ vm_shared_region_create( void *root_dir, cpu_type_t cputype, cpu_subtype_t cpu_subtype, - boolean_t is_64bit) + boolean_t is_64bit, +#if !__has_feature(ptrauth_calls) + __unused +#endif /* __has_feature(ptrauth_calls) */ + boolean_t reslide) { kern_return_t kr; vm_named_entry_t mem_entry; ipc_port_t mem_entry_port; vm_shared_region_t shared_region; - vm_shared_region_slide_info_t si; vm_map_t sub_map; mach_vm_offset_t base_address, pmap_nesting_start; mach_vm_size_t size, pmap_nesting_size; SHARED_REGION_TRACE_INFO( - ("shared_region: -> create(root=%p,cpu=<%d,%d>,64bit=%d)\n", + ("shared_region: -> create(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d)\n", (void *)VM_KERNEL_ADDRPERM(root_dir), - cputype, cpu_subtype, is_64bit)); + cputype, cpu_subtype, is_64bit, reslide)); base_address = 0; size = 0; @@ -762,8 +763,7 @@ vm_shared_region_create( } /* create a memory entry structure and a Mach port handle */ - kr = mach_memory_entry_allocate(&mem_entry, - &mem_entry_port); + kr = mach_memory_entry_allocate(&mem_entry, &mem_entry_port); if (kr != KERN_SUCCESS) { kfree(shared_region, sizeof(*shared_region)); shared_region = NULL; @@ -776,11 +776,14 @@ vm_shared_region_create( #if defined(__arm__) || defined(__arm64__) { struct pmap *pmap_nested; + int pmap_flags = 0; + pmap_flags |= is_64bit ? PMAP_CREATE_64BIT : 0; + - pmap_nested = pmap_create_options(NULL, 0, is_64bit ? PMAP_CREATE_64BIT : 0); + pmap_nested = pmap_create_options(NULL, 0, pmap_flags); if (pmap_nested != PMAP_NULL) { pmap_set_nested(pmap_nested); - sub_map = vm_map_create(pmap_nested, 0, size, TRUE); + sub_map = vm_map_create(pmap_nested, 0, (vm_map_offset_t)size, TRUE); #if defined(__arm64__) if (is_64bit || page_shift_user32 == SIXTEENK_PAGE_SHIFT) { @@ -788,6 +791,7 @@ vm_shared_region_create( vm_map_set_page_shift(sub_map, SIXTEENK_PAGE_SHIFT); } + #elif (__ARM_ARCH_7K__ >= 2) /* enforce 16KB alignment for watch targets with new ABI */ vm_map_set_page_shift(sub_map, SIXTEENK_PAGE_SHIFT); @@ -798,20 +802,21 @@ vm_shared_region_create( } #else /* create a VM sub map and its pmap */ - sub_map = vm_map_create(pmap_create_options(NULL, 0, is_64bit), - 0, size, - TRUE); + sub_map = vm_map_create(pmap_create_options(NULL, 0, is_64bit), 0, size, TRUE); #endif if (sub_map == VM_MAP_NULL) { ipc_port_release_send(mem_entry_port); kfree(shared_region, sizeof(*shared_region)); shared_region = NULL; - SHARED_REGION_TRACE_ERROR( - ("shared_region: create: " - "couldn't allocate map\n")); + SHARED_REGION_TRACE_ERROR(("shared_region: create: couldn't allocate map\n")); goto done; } + /* shared regions should always enforce code-signing */ + vm_map_cs_enforcement_set(sub_map, true); + assert(vm_map_cs_enforcement(sub_map)); + assert(pmap_get_vm_map_cs_enforced(vm_map_pmap(sub_map))); + assert(!sub_map->disable_vmentry_reuse); sub_map->is_nested_map = TRUE; @@ -831,45 +836,44 @@ vm_shared_region_create( shared_region->sr_pmap_nesting_size = pmap_nesting_size; shared_region->sr_cpu_type = cputype; shared_region->sr_cpu_subtype = cpu_subtype; - shared_region->sr_64bit = is_64bit; + shared_region->sr_64bit = (uint8_t)is_64bit; shared_region->sr_root_dir = root_dir; queue_init(&shared_region->sr_q); shared_region->sr_mapping_in_progress = FALSE; shared_region->sr_slide_in_progress = FALSE; shared_region->sr_persists = FALSE; - shared_region->sr_slid = FALSE; + shared_region->sr_stale = FALSE; shared_region->sr_timer_call = NULL; shared_region->sr_first_mapping = (mach_vm_offset_t) -1; /* grab a reference for the caller */ shared_region->sr_ref_count = 1; - /* And set up slide info */ - si = &shared_region->sr_slide_info; - si->start = 0; - si->end = 0; - si->slide = 0; -#if defined(HAS_APPLE_PAC) - si->si_ptrauth = FALSE; /* no pointer authentication by default */ -#endif /* HAS_APPLE_PAC */ - si->slide_object = NULL; - si->slide_info_size = 0; - si->slide_info_entry = NULL; + shared_region->sr_slide = 0; /* not slid yet */ /* Initialize UUID and other metadata */ memset(&shared_region->sr_uuid, '\0', sizeof(shared_region->sr_uuid)); shared_region->sr_uuid_copied = FALSE; shared_region->sr_images_count = 0; shared_region->sr_images = NULL; +#if __has_feature(ptrauth_calls) + shared_region->sr_reslide = reslide; + shared_region->sr_num_auth_section = 0; + for (uint_t i = 0; i < NUM_SR_AUTH_SECTIONS; ++i) { + shared_region->sr_auth_section[i] = NULL; + } + shared_region->sr_num_auth_section = 0; +#endif /* __has_feature(ptrauth_calls) */ + done: if (shared_region) { SHARED_REGION_TRACE_INFO( - ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d," + ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d" "base=0x%llx,size=0x%llx) <- " "%p mem=(%p,%p) map=%p pmap=%p\n", (void *)VM_KERNEL_ADDRPERM(root_dir), - cputype, cpu_subtype, is_64bit, + cputype, cpu_subtype, is_64bit, reslide, (long long)base_address, (long long)size, (void *)VM_KERNEL_ADDRPERM(shared_region), @@ -910,7 +914,6 @@ vm_shared_region_destroy( assert(shared_region->sr_ref_count == 0); assert(!shared_region->sr_persists); - assert(!shared_region->sr_slid); mem_entry = (vm_named_entry_t) ip_get_kobject(shared_region->sr_mem_entry); assert(mem_entry->is_sub_map); @@ -928,9 +931,8 @@ vm_shared_region_destroy( */ if (map->pmap) { pmap_remove(map->pmap, - shared_region->sr_base_address, - (shared_region->sr_base_address + - shared_region->sr_size)); + (vm_map_offset_t)shared_region->sr_base_address, + (vm_map_offset_t)(shared_region->sr_base_address + shared_region->sr_size)); } /* @@ -948,19 +950,21 @@ vm_shared_region_destroy( thread_call_free(shared_region->sr_timer_call); } -#if 0 +#if __has_feature(ptrauth_calls) /* - * If slid, free those resources. We'll want this eventually, - * but can't handle it properly today. + * Free the cached copies of slide_info for the AUTH regions. */ - si = &shared_region->sr_slide_info; - if (si->slide_info_entry) { - kmem_free(kernel_map, - (vm_offset_t) si->slide_info_entry, - (vm_size_t) si->slide_info_size); - vm_object_deallocate(si->slide_object); + for (uint_t i = 0; i < shared_region->sr_num_auth_section; ++i) { + vm_shared_region_slide_info_t si = shared_region->sr_auth_section[i]; + if (si != NULL) { + vm_object_deallocate(si->si_slide_object); + kheap_free(KHEAP_DATA_BUFFERS, si->si_slide_info_entry, si->si_slide_info_size); + kfree(si, sizeof *si); + shared_region->sr_auth_section[i] = NULL; + } } -#endif + shared_region->sr_num_auth_section = 0; +#endif /* __has_feature(ptrauth_calls) */ /* release the shared region structure... */ kfree(shared_region, sizeof(*shared_region)); @@ -1015,6 +1019,7 @@ vm_shared_region_start_address( *start_address = sr_base_address + sr_first_mapping; } + vm_shared_region_unlock(); SHARED_REGION_TRACE_DEBUG( @@ -1025,16 +1030,215 @@ vm_shared_region_start_address( return kr; } +/* + * Look up a pre-existing mapping in shared region, for replacement. + * Takes an extra object reference if found. + */ +static kern_return_t +find_mapping_to_slide(vm_map_t map, vm_map_address_t addr, vm_map_entry_t entry) +{ + vm_map_entry_t found; + + /* find the shared region's map entry to slide */ + vm_map_lock_read(map); + if (!vm_map_lookup_entry(map, addr, &found)) { + /* no mapping there */ + vm_map_unlock(map); + return KERN_INVALID_ARGUMENT; + } + + *entry = *found; + /* extra ref to keep object alive while map is unlocked */ + vm_object_reference(VME_OBJECT(found)); + vm_map_unlock_read(map); + return KERN_SUCCESS; +} + +#if __has_feature(ptrauth_calls) + +/* + * Determine if this task is actually using pointer signing. + */ +static boolean_t +task_sign_pointers(task_t task) +{ + if (task->map && + task->map->pmap && + !task->map->pmap->disable_jop) { + return TRUE; + } + return FALSE; +} + +/* + * If the shared region contains mappings that are authenticated, then + * remap them into the task private map. + * + * Failures are possible in this routine when jetsam kills a process + * just as dyld is trying to set it up. The vm_map and task shared region + * info get torn down w/o waiting for this thread to finish up. + */ +__attribute__((noinline)) +kern_return_t +vm_shared_region_auth_remap(vm_shared_region_t sr) +{ + memory_object_t sr_pager = MEMORY_OBJECT_NULL; + task_t task = current_task(); + vm_shared_region_slide_info_t si; + uint_t i; + vm_object_t object; + vm_map_t sr_map; + struct vm_map_entry tmp_entry_store = {0}; + vm_map_entry_t tmp_entry = NULL; + int vm_flags; + vm_map_kernel_flags_t vmk_flags; + vm_map_offset_t map_addr; + kern_return_t kr = KERN_SUCCESS; + boolean_t use_ptr_auth = task_sign_pointers(task); + + /* + * Don't do this more than once and avoid any race conditions in finishing it. + */ + vm_shared_region_lock(); + while (sr->sr_mapping_in_progress) { + /* wait for our turn... */ + vm_shared_region_sleep(&sr->sr_mapping_in_progress, THREAD_UNINT); + } + assert(!sr->sr_mapping_in_progress); + assert(sr->sr_ref_count > 1); + + /* Just return if already done. */ + if (task->shared_region_auth_remapped) { + vm_shared_region_unlock(); + return KERN_SUCCESS; + } + + /* let others know to wait while we're working in this shared region */ + sr->sr_mapping_in_progress = TRUE; + vm_shared_region_unlock(); + + /* + * Remap any sections with pointer authentications into the private map. + */ + for (i = 0; i < sr->sr_num_auth_section; ++i) { + si = sr->sr_auth_section[i]; + assert(si != NULL); + assert(si->si_ptrauth); + + /* + * We have mapping that needs to be private. + * Look for an existing slid mapping's pager with matching + * object, offset, slide info and shared_region_id to reuse. + */ + object = si->si_slide_object; + sr_pager = shared_region_pager_match(object, si->si_start, si, + use_ptr_auth ? task->jop_pid : 0); + if (sr_pager == MEMORY_OBJECT_NULL) { + kr = KERN_FAILURE; + goto done; + } + + /* + * verify matching jop_pid for this task and this pager + */ + if (use_ptr_auth) { + shared_region_pager_match_task_key(sr_pager, task); + } + + sr_map = vm_shared_region_vm_map(sr); + tmp_entry = NULL; + + kr = find_mapping_to_slide(sr_map, si->si_slid_address - sr->sr_base_address, &tmp_entry_store); + if (kr != KERN_SUCCESS) { + goto done; + } + tmp_entry = &tmp_entry_store; + + /* + * Check that the object exactly covers the region to slide. + */ + if (VME_OFFSET(tmp_entry) != si->si_start || + tmp_entry->vme_end - tmp_entry->vme_start != si->si_end - si->si_start) { + kr = KERN_FAILURE; + goto done; + } + + /* + * map the pager over the portion of the mapping that needs sliding + */ + vm_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE; + vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + vmk_flags.vmkf_overwrite_immutable = TRUE; + map_addr = si->si_slid_address; + kr = vm_map_enter_mem_object(task->map, + &map_addr, + si->si_end - si->si_start, + (mach_vm_offset_t) 0, + vm_flags, + vmk_flags, + VM_KERN_MEMORY_NONE, + (ipc_port_t)(uintptr_t) sr_pager, + 0, + TRUE, + tmp_entry->protection, + tmp_entry->max_protection, + tmp_entry->inheritance); + memory_object_deallocate(sr_pager); + sr_pager = MEMORY_OBJECT_NULL; + if (kr != KERN_SUCCESS) { + goto done; + } + assertf(map_addr == si->si_slid_address, + "map_addr=0x%llx si_slid_address=0x%llx tmp_entry=%p\n", + (uint64_t)map_addr, + (uint64_t)si->si_slid_address, + tmp_entry); + + /* Drop the ref count grabbed by find_mapping_to_slide */ + vm_object_deallocate(VME_OBJECT(tmp_entry)); + tmp_entry = NULL; + } + +done: + if (tmp_entry) { + /* Drop the ref count grabbed by find_mapping_to_slide */ + vm_object_deallocate(VME_OBJECT(tmp_entry)); + tmp_entry = NULL; + } + + /* + * Drop any extra reference to the pager in case we're quitting due to an error above. + */ + if (sr_pager != MEMORY_OBJECT_NULL) { + memory_object_deallocate(sr_pager); + } + + /* + * Mark the region as having it's auth sections remapped. + */ + vm_shared_region_lock(); + task->shared_region_auth_remapped = TRUE; + sr->sr_mapping_in_progress = FALSE; + thread_wakeup((event_t)&sr->sr_mapping_in_progress); + vm_shared_region_unlock(); + return kr; +} +#endif /* __has_feature(ptrauth_calls) */ + void vm_shared_region_undo_mappings( - vm_map_t sr_map, - mach_vm_offset_t sr_base_address, - struct shared_file_mapping_np *mappings, - unsigned int mappings_count) + vm_map_t sr_map, + mach_vm_offset_t sr_base_address, + struct _sr_file_mappings *srf_mappings, + struct _sr_file_mappings *srf_mappings_current, + unsigned int srf_current_mappings_count) { - unsigned int j = 0; - vm_shared_region_t shared_region = NULL; - boolean_t reset_shared_region_state = FALSE; + unsigned int j = 0; + vm_shared_region_t shared_region = NULL; + boolean_t reset_shared_region_state = FALSE; + struct _sr_file_mappings *srfmp; + unsigned int mappings_count; + struct shared_file_mapping_slide_np *mappings; shared_region = vm_shared_region_get(current_task()); if (shared_region == NULL) { @@ -1042,7 +1246,6 @@ vm_shared_region_undo_mappings( return; } - if (sr_map == NULL) { ipc_port_t sr_handle; vm_named_entry_t sr_mem_entry; @@ -1073,35 +1276,45 @@ vm_shared_region_undo_mappings( /* * Undo the mappings we've established so far. */ - for (j = 0; j < mappings_count; j++) { - kern_return_t kr2; + for (srfmp = &srf_mappings[0]; + srfmp <= srf_mappings_current; + srfmp++) { + mappings = srfmp->mappings; + mappings_count = srfmp->mappings_count; + if (srfmp == srf_mappings_current) { + mappings_count = srf_current_mappings_count; + } - if (mappings[j].sfm_size == 0) { - /* - * We didn't establish this - * mapping, so nothing to undo. - */ - continue; + for (j = 0; j < mappings_count; j++) { + kern_return_t kr2; + + if (mappings[j].sms_size == 0) { + /* + * We didn't establish this + * mapping, so nothing to undo. + */ + continue; + } + SHARED_REGION_TRACE_INFO( + ("shared_region: mapping[%d]: " + "address:0x%016llx " + "size:0x%016llx " + "offset:0x%016llx " + "maxprot:0x%x prot:0x%x: " + "undoing...\n", + j, + (long long)mappings[j].sms_address, + (long long)mappings[j].sms_size, + (long long)mappings[j].sms_file_offset, + mappings[j].sms_max_prot, + mappings[j].sms_init_prot)); + kr2 = mach_vm_deallocate( + sr_map, + (mappings[j].sms_address - + sr_base_address), + mappings[j].sms_size); + assert(kr2 == KERN_SUCCESS); } - SHARED_REGION_TRACE_INFO( - ("shared_region: mapping[%d]: " - "address:0x%016llx " - "size:0x%016llx " - "offset:0x%016llx " - "maxprot:0x%x prot:0x%x: " - "undoing...\n", - j, - (long long)mappings[j].sfm_address, - (long long)mappings[j].sfm_size, - (long long)mappings[j].sfm_file_offset, - mappings[j].sfm_max_prot, - mappings[j].sfm_init_prot)); - kr2 = mach_vm_deallocate( - sr_map, - (mappings[j].sfm_address - - sr_base_address), - mappings[j].sfm_size); - assert(kr2 == KERN_SUCCESS); } if (reset_shared_region_state) { @@ -1119,60 +1332,52 @@ vm_shared_region_undo_mappings( } /* - * Establish some mappings of a file in the shared region. - * This is used by "dyld" via the shared_region_map_np() system call - * to populate the shared region with the appropriate shared cache. - * - * One could also call it several times to incrementally load several - * libraries, as long as they do not overlap. - * It will return KERN_SUCCESS if the mappings were successfully established - * or if they were already established identically by another process. + * For now we only expect to see at most 2 regions to relocate/authenticate + * per file. One that's VM_PROT_SLIDE and one VM_PROT_SLIDE | VM_PROT_NOAUTH. */ -kern_return_t -vm_shared_region_map_file( +#define VMSR_NUM_SLIDES 2 + +/* + * First part of vm_shared_region_map_file(). Split out to + * avoid kernel stack overflow. + */ +__attribute__((noinline)) +static kern_return_t +vm_shared_region_map_file_setup( vm_shared_region_t shared_region, - unsigned int mappings_count, - struct shared_file_mapping_np *mappings, - memory_object_control_t file_control, - memory_object_size_t file_size, void *root_dir, - uint32_t slide, - user_addr_t slide_start, - user_addr_t slide_size) + int sr_file_mappings_count, + struct _sr_file_mappings *sr_file_mappings, + unsigned int *mappings_to_slide_cnt, + struct shared_file_mapping_slide_np **mappings_to_slide, + mach_vm_offset_t *slid_mappings, + memory_object_control_t *slid_file_controls, + mach_vm_offset_t *first_mapping, + mach_vm_offset_t *file_first_mappings, + mach_vm_offset_t *sfm_min_address, + mach_vm_offset_t *sfm_max_address, + vm_map_t *sr_map_ptr, + vm_map_offset_t *lowest_unnestable_addr_ptr) { - kern_return_t kr; + kern_return_t kr = KERN_SUCCESS; + memory_object_control_t file_control; vm_object_t file_object; ipc_port_t sr_handle; vm_named_entry_t sr_mem_entry; vm_map_t sr_map; mach_vm_offset_t sr_base_address; - unsigned int i; + unsigned int i = 0; mach_port_t map_port; vm_map_offset_t target_address; vm_object_t object; vm_object_size_t obj_size; - struct shared_file_mapping_np *mapping_to_slide = NULL; - mach_vm_offset_t first_mapping = (mach_vm_offset_t) -1; - mach_vm_offset_t slid_mapping = (mach_vm_offset_t) -1; vm_map_offset_t lowest_unnestable_addr = 0; vm_map_kernel_flags_t vmk_flags; - mach_vm_offset_t sfm_min_address = ~0; - mach_vm_offset_t sfm_max_address = 0; mach_vm_offset_t sfm_end; - struct _dyld_cache_header sr_cache_header; - -#if __arm64__ - if ((shared_region->sr_64bit || - page_shift_user32 == SIXTEENK_PAGE_SHIFT) && - ((slide & SIXTEENK_PAGE_MASK) != 0)) { - printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n", - __FUNCTION__, slide); - kr = KERN_INVALID_ARGUMENT; - goto done; - } -#endif /* __arm64__ */ - - kr = KERN_SUCCESS; + uint32_t mappings_count; + struct shared_file_mapping_slide_np *mappings; + struct _sr_file_mappings *srfmp; + unsigned int current_file_index = 0; vm_shared_region_lock(); assert(shared_region->sr_ref_count > 1); @@ -1185,8 +1390,11 @@ vm_shared_region_map_file( * doesn't quite belong into it. */ vm_shared_region_unlock(); - kr = KERN_PROTECTION_FAILURE; - goto done; + + SHARED_REGION_TRACE_DEBUG( + ("shared_region: map(%p) <- 0x%x \n", + (void *)VM_KERNEL_ADDRPERM(shared_region), kr)); + return KERN_PROTECTION_FAILURE; } /* @@ -1213,206 +1421,256 @@ vm_shared_region_map_file( sr_base_address = shared_region->sr_base_address; SHARED_REGION_TRACE_DEBUG( - ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count, - (void *)VM_KERNEL_ADDRPERM(mappings), - (void *)VM_KERNEL_ADDRPERM(file_control), file_size)); + ("shared_region: -> map(%p)\n", + (void *)VM_KERNEL_ADDRPERM(shared_region))); - /* get the VM object associated with the file to be mapped */ - file_object = memory_object_control_to_vm_object(file_control); + mappings_count = 0; + mappings = NULL; + srfmp = NULL; - assert(file_object); + /* process all the files to be mapped */ + for (srfmp = &sr_file_mappings[0]; + srfmp < &sr_file_mappings[sr_file_mappings_count]; + srfmp++) { + mappings_count = srfmp->mappings_count; + mappings = srfmp->mappings; + file_control = srfmp->file_control; - /* establish the mappings */ - for (i = 0; i < mappings_count; i++) { - SHARED_REGION_TRACE_INFO( - ("shared_region: mapping[%d]: " - "address:0x%016llx size:0x%016llx offset:0x%016llx " - "maxprot:0x%x prot:0x%x\n", - i, - (long long)mappings[i].sfm_address, - (long long)mappings[i].sfm_size, - (long long)mappings[i].sfm_file_offset, - mappings[i].sfm_max_prot, - mappings[i].sfm_init_prot)); - - if (mappings[i].sfm_address < sfm_min_address) { - sfm_min_address = mappings[i].sfm_address; + if (mappings_count == 0) { + /* no mappings here... */ + continue; } - if (os_add_overflow(mappings[i].sfm_address, - mappings[i].sfm_size, - &sfm_end) || - (vm_map_round_page(sfm_end, VM_MAP_PAGE_MASK(sr_map)) < - mappings[i].sfm_address)) { - /* overflow */ + /* + * The code below can only correctly "slide" (perform relocations) for one + * value of the slide amount. So if a file has a non-zero slide, it has to + * match any previous value. A zero slide value is ok for things that are + * just directly mapped. + */ + if (shared_region->sr_slide == 0 && srfmp->slide != 0) { + shared_region->sr_slide = srfmp->slide; + } else if (shared_region->sr_slide != 0 && + srfmp->slide != 0 && + shared_region->sr_slide != srfmp->slide) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: more than 1 non-zero slide value amount " + "slide 1:0x%x slide 2:0x%x\n ", + shared_region->sr_slide, srfmp->slide)); kr = KERN_INVALID_ARGUMENT; break; } - if (sfm_end > sfm_max_address) { - sfm_max_address = sfm_end; - } - if (mappings[i].sfm_init_prot & VM_PROT_ZF) { - /* zero-filled memory */ - map_port = MACH_PORT_NULL; - } else { - /* file-backed memory */ - __IGNORE_WCASTALIGN(map_port = (ipc_port_t) file_object->pager); +#if __arm64__ + if ((shared_region->sr_64bit || + page_shift_user32 == SIXTEENK_PAGE_SHIFT) && + ((srfmp->slide & SIXTEENK_PAGE_MASK) != 0)) { + printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n", + __FUNCTION__, srfmp->slide); + kr = KERN_INVALID_ARGUMENT; + break; } +#endif /* __arm64__ */ - if (mappings[i].sfm_init_prot & VM_PROT_SLIDE) { - /* - * This is the mapping that needs to be slid. - */ - if (mapping_to_slide != NULL) { - SHARED_REGION_TRACE_INFO( - ("shared_region: mapping[%d]: " - "address:0x%016llx size:0x%016llx " - "offset:0x%016llx " - "maxprot:0x%x prot:0x%x " - "will not be slid as only one such mapping is allowed...\n", - i, - (long long)mappings[i].sfm_address, - (long long)mappings[i].sfm_size, - (long long)mappings[i].sfm_file_offset, - mappings[i].sfm_max_prot, - mappings[i].sfm_init_prot)); - } else { - mapping_to_slide = &mappings[i]; + /* get the VM object associated with the file to be mapped */ + file_object = memory_object_control_to_vm_object(file_control); + assert(file_object); + + /* establish the mappings for that file */ + for (i = 0; i < mappings_count; i++) { + SHARED_REGION_TRACE_INFO( + ("shared_region: mapping[%d]: " + "address:0x%016llx size:0x%016llx offset:0x%016llx " + "maxprot:0x%x prot:0x%x\n", + i, + (long long)mappings[i].sms_address, + (long long)mappings[i].sms_size, + (long long)mappings[i].sms_file_offset, + mappings[i].sms_max_prot, + mappings[i].sms_init_prot)); + + if (mappings[i].sms_address < *sfm_min_address) { + *sfm_min_address = mappings[i].sms_address; } - } - /* mapping's address is relative to the shared region base */ - target_address = - mappings[i].sfm_address - sr_base_address; + if (os_add_overflow(mappings[i].sms_address, + mappings[i].sms_size, + &sfm_end) || + (vm_map_round_page(sfm_end, VM_MAP_PAGE_MASK(sr_map)) < + mappings[i].sms_address)) { + /* overflow */ + kr = KERN_INVALID_ARGUMENT; + break; + } + if (sfm_end > *sfm_max_address) { + *sfm_max_address = sfm_end; + } - vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; - vmk_flags.vmkf_already = TRUE; - /* no copy-on-read for mapped binaries */ - vmk_flags.vmkf_no_copy_on_read = 1; + if (mappings[i].sms_init_prot & VM_PROT_ZF) { + /* zero-filled memory */ + map_port = MACH_PORT_NULL; + } else { + /* file-backed memory */ + __IGNORE_WCASTALIGN(map_port = (ipc_port_t) file_object->pager); + } - /* establish that mapping, OK if it's "already" there */ - if (map_port == MACH_PORT_NULL) { /* - * We want to map some anonymous memory in a - * shared region. - * We have to create the VM object now, so that it - * can be mapped "copy-on-write". + * Remember which mappings need sliding. */ - obj_size = vm_map_round_page(mappings[i].sfm_size, - VM_MAP_PAGE_MASK(sr_map)); - object = vm_object_allocate(obj_size); - if (object == VM_OBJECT_NULL) { - kr = KERN_RESOURCE_SHORTAGE; + if (mappings[i].sms_max_prot & VM_PROT_SLIDE) { + if (*mappings_to_slide_cnt == VMSR_NUM_SLIDES) { + SHARED_REGION_TRACE_INFO( + ("shared_region: mapping[%d]: " + "address:0x%016llx size:0x%016llx " + "offset:0x%016llx " + "maxprot:0x%x prot:0x%x " + "too many mappings to slide...\n", + i, + (long long)mappings[i].sms_address, + (long long)mappings[i].sms_size, + (long long)mappings[i].sms_file_offset, + mappings[i].sms_max_prot, + mappings[i].sms_init_prot)); + } else { + mappings_to_slide[*mappings_to_slide_cnt] = &mappings[i]; + *mappings_to_slide_cnt += 1; + } + } + + /* mapping's address is relative to the shared region base */ + target_address = (vm_map_offset_t)(mappings[i].sms_address - sr_base_address); + + vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + vmk_flags.vmkf_already = TRUE; + /* no copy-on-read for mapped binaries */ + vmk_flags.vmkf_no_copy_on_read = 1; + + + /* establish that mapping, OK if it's "already" there */ + if (map_port == MACH_PORT_NULL) { + /* + * We want to map some anonymous memory in a shared region. + * We have to create the VM object now, so that it can be mapped "copy-on-write". + */ + obj_size = vm_map_round_page(mappings[i].sms_size, VM_MAP_PAGE_MASK(sr_map)); + object = vm_object_allocate(obj_size); + if (object == VM_OBJECT_NULL) { + kr = KERN_RESOURCE_SHORTAGE; + } else { + kr = vm_map_enter( + sr_map, + &target_address, + vm_map_round_page(mappings[i].sms_size, + VM_MAP_PAGE_MASK(sr_map)), + 0, + VM_FLAGS_FIXED, + vmk_flags, + VM_KERN_MEMORY_NONE, + object, + 0, + TRUE, + mappings[i].sms_init_prot & VM_PROT_ALL, + mappings[i].sms_max_prot & VM_PROT_ALL, + VM_INHERIT_DEFAULT); + } } else { - kr = vm_map_enter( + object = VM_OBJECT_NULL; /* no anonymous memory here */ + kr = vm_map_enter_mem_object( sr_map, &target_address, - vm_map_round_page(mappings[i].sfm_size, + vm_map_round_page(mappings[i].sms_size, VM_MAP_PAGE_MASK(sr_map)), 0, VM_FLAGS_FIXED, vmk_flags, VM_KERN_MEMORY_NONE, - object, - 0, + map_port, + mappings[i].sms_file_offset, TRUE, - mappings[i].sfm_init_prot & VM_PROT_ALL, - mappings[i].sfm_max_prot & VM_PROT_ALL, + mappings[i].sms_init_prot & VM_PROT_ALL, + mappings[i].sms_max_prot & VM_PROT_ALL, VM_INHERIT_DEFAULT); } - } else { - object = VM_OBJECT_NULL; /* no anonymous memory here */ - kr = vm_map_enter_mem_object( - sr_map, - &target_address, - vm_map_round_page(mappings[i].sfm_size, - VM_MAP_PAGE_MASK(sr_map)), - 0, - VM_FLAGS_FIXED, - vmk_flags, - VM_KERN_MEMORY_NONE, - map_port, - mappings[i].sfm_file_offset, - TRUE, - mappings[i].sfm_init_prot & VM_PROT_ALL, - mappings[i].sfm_max_prot & VM_PROT_ALL, - VM_INHERIT_DEFAULT); - } - - if (kr == KERN_SUCCESS) { - /* - * Record the first (chronologically) successful - * mapping in this shared region. - * We're protected by "sr_mapping_in_progress" here, - * so no need to lock "shared_region". - */ - if (first_mapping == (mach_vm_offset_t) -1) { - first_mapping = target_address; - } - -#if defined(HAS_APPLE_PAC) - /* - * Set "sr_slid_mapping" - * it is used to get the userland address for address authentication. - */ -#endif - if ((slid_mapping == (mach_vm_offset_t) -1) && - (mapping_to_slide == &mappings[i])) { - slid_mapping = target_address; - } - /* - * Record the lowest writable address in this - * sub map, to log any unexpected unnesting below - * that address (see log_unnest_badness()). - */ - if ((mappings[i].sfm_init_prot & VM_PROT_WRITE) && - sr_map->is_nested_map && - (lowest_unnestable_addr == 0 || - (target_address < lowest_unnestable_addr))) { - lowest_unnestable_addr = target_address; - } - } else { - if (map_port == MACH_PORT_NULL) { - /* - * Get rid of the VM object we just created - * but failed to map. - */ - vm_object_deallocate(object); - object = VM_OBJECT_NULL; - } - if (kr == KERN_MEMORY_PRESENT) { + if (kr == KERN_SUCCESS) { /* - * This exact mapping was already there: - * that's fine. + * Record the first (chronologically) successful + * mapping in this shared region. + * We're protected by "sr_mapping_in_progress" here, + * so no need to lock "shared_region". */ - SHARED_REGION_TRACE_INFO( - ("shared_region: mapping[%d]: " - "address:0x%016llx size:0x%016llx " - "offset:0x%016llx " - "maxprot:0x%x prot:0x%x " - "already mapped...\n", - i, - (long long)mappings[i].sfm_address, - (long long)mappings[i].sfm_size, - (long long)mappings[i].sfm_file_offset, - mappings[i].sfm_max_prot, - mappings[i].sfm_init_prot)); + assert(current_file_index < VMSR_NUM_SLIDES); + if (file_first_mappings[current_file_index] == (mach_vm_offset_t) -1) { + file_first_mappings[current_file_index] = target_address; + } + + if (*mappings_to_slide_cnt > 0 && + mappings_to_slide[*mappings_to_slide_cnt - 1] == &mappings[i]) { + slid_mappings[*mappings_to_slide_cnt - 1] = target_address; + slid_file_controls[*mappings_to_slide_cnt - 1] = file_control; + } + /* - * We didn't establish this mapping ourselves; - * let's reset its size, so that we do not - * attempt to undo it if an error occurs later. + * Record the lowest writable address in this + * sub map, to log any unexpected unnesting below + * that address (see log_unnest_badness()). */ - mappings[i].sfm_size = 0; - kr = KERN_SUCCESS; + if ((mappings[i].sms_init_prot & VM_PROT_WRITE) && + sr_map->is_nested_map && + (lowest_unnestable_addr == 0 || + (target_address < lowest_unnestable_addr))) { + lowest_unnestable_addr = target_address; + } } else { - break; + if (map_port == MACH_PORT_NULL) { + /* + * Get rid of the VM object we just created + * but failed to map. + */ + vm_object_deallocate(object); + object = VM_OBJECT_NULL; + } + if (kr == KERN_MEMORY_PRESENT) { + /* + * This exact mapping was already there: + * that's fine. + */ + SHARED_REGION_TRACE_INFO( + ("shared_region: mapping[%d]: " + "address:0x%016llx size:0x%016llx " + "offset:0x%016llx " + "maxprot:0x%x prot:0x%x " + "already mapped...\n", + i, + (long long)mappings[i].sms_address, + (long long)mappings[i].sms_size, + (long long)mappings[i].sms_file_offset, + mappings[i].sms_max_prot, + mappings[i].sms_init_prot)); + /* + * We didn't establish this mapping ourselves; + * let's reset its size, so that we do not + * attempt to undo it if an error occurs later. + */ + mappings[i].sms_size = 0; + kr = KERN_SUCCESS; + } else { + break; + } } } + + if (kr != KERN_SUCCESS) { + break; + } + + ++current_file_index; + } + + if (file_first_mappings[0] != (mach_vm_offset_t)-1) { + *first_mapping = file_first_mappings[0]; } + if (kr != KERN_SUCCESS) { /* the last mapping we tried (mappings[i]) failed ! */ assert(i < mappings_count); @@ -1422,52 +1680,116 @@ vm_shared_region_map_file( "offset:0x%016llx " "maxprot:0x%x prot:0x%x failed 0x%x\n", i, - (long long)mappings[i].sfm_address, - (long long)mappings[i].sfm_size, - (long long)mappings[i].sfm_file_offset, - mappings[i].sfm_max_prot, - mappings[i].sfm_init_prot, + (long long)mappings[i].sms_address, + (long long)mappings[i].sms_size, + (long long)mappings[i].sms_file_offset, + mappings[i].sms_max_prot, + mappings[i].sms_init_prot, kr)); + + /* + * Respect the design of vm_shared_region_undo_mappings + * as we are holding the sr_mapping_in_progress == true here. + * So don't allow sr_map == NULL otherwise vm_shared_region_undo_mappings + * will be blocked at waiting sr_mapping_in_progress to be false. + */ + assert(sr_map != NULL); /* undo all the previous mappings */ - vm_shared_region_undo_mappings(sr_map, sr_base_address, mappings, i); - } - - if (kr == KERN_SUCCESS && - slide_size != 0 && - mapping_to_slide != NULL) { - kr = vm_shared_region_slide(slide, - mapping_to_slide->sfm_file_offset, - mapping_to_slide->sfm_size, - slide_start, - slide_size, - slid_mapping, - file_control); + vm_shared_region_undo_mappings(sr_map, sr_base_address, sr_file_mappings, srfmp, i); + return kr; + } + + *lowest_unnestable_addr_ptr = lowest_unnestable_addr; + *sr_map_ptr = sr_map; + return KERN_SUCCESS; +} + +/* forwared declaration */ +__attribute__((noinline)) +static void +vm_shared_region_map_file_final( + vm_shared_region_t shared_region, + vm_map_t sr_map, + mach_vm_offset_t sfm_min_address, + mach_vm_offset_t sfm_max_address, + mach_vm_offset_t *file_first_mappings); + +/* + * Establish some mappings of a file in the shared region. + * This is used by "dyld" via the shared_region_map_np() system call + * to populate the shared region with the appropriate shared cache. + * + * One could also call it several times to incrementally load several + * libraries, as long as they do not overlap. + * It will return KERN_SUCCESS if the mappings were successfully established + * or if they were already established identically by another process. + */ +__attribute__((noinline)) +kern_return_t +vm_shared_region_map_file( + vm_shared_region_t shared_region, + void *root_dir, + int sr_file_mappings_count, + struct _sr_file_mappings *sr_file_mappings) +{ + kern_return_t kr = KERN_SUCCESS; + unsigned int i; + unsigned int mappings_to_slide_cnt = 0; + struct shared_file_mapping_slide_np *mappings_to_slide[VMSR_NUM_SLIDES] = {}; + mach_vm_offset_t slid_mappings[VMSR_NUM_SLIDES]; + memory_object_control_t slid_file_controls[VMSR_NUM_SLIDES]; + mach_vm_offset_t first_mapping = (mach_vm_offset_t)-1; + mach_vm_offset_t sfm_min_address = (mach_vm_offset_t)-1; + mach_vm_offset_t sfm_max_address = 0; + vm_map_t sr_map = NULL; + vm_map_offset_t lowest_unnestable_addr = 0; + mach_vm_offset_t file_first_mappings[VMSR_NUM_SLIDES] = {(mach_vm_offset_t) -1, (mach_vm_offset_t) -1}; + + kr = vm_shared_region_map_file_setup(shared_region, root_dir, sr_file_mappings_count, sr_file_mappings, + &mappings_to_slide_cnt, &mappings_to_slide[0], slid_mappings, slid_file_controls, + &first_mapping, &file_first_mappings[0], + &sfm_min_address, &sfm_max_address, &sr_map, &lowest_unnestable_addr); + if (kr != KERN_SUCCESS) { + vm_shared_region_lock(); + goto done; + } + + /* + * The call above installed direct mappings to the shared cache file. + * Now we go back and overwrite the mappings that need relocation + * with a special shared region pager. + */ + for (i = 0; i < mappings_to_slide_cnt; ++i) { + kr = vm_shared_region_slide(shared_region->sr_slide, + mappings_to_slide[i]->sms_file_offset, + mappings_to_slide[i]->sms_size, + mappings_to_slide[i]->sms_slide_start, + mappings_to_slide[i]->sms_slide_size, + slid_mappings[i], + slid_file_controls[i], + mappings_to_slide[i]->sms_max_prot); if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("shared_region: region_slide(" "slide:0x%x start:0x%016llx " "size:0x%016llx) failed 0x%x\n", - slide, - (long long)slide_start, - (long long)slide_size, + shared_region->sr_slide, + (long long)mappings_to_slide[i]->sms_slide_start, + (long long)mappings_to_slide[i]->sms_slide_size, kr)); - vm_shared_region_undo_mappings(sr_map, - sr_base_address, - mappings, - mappings_count); + vm_shared_region_lock(); + goto done; } } - if (kr == KERN_SUCCESS) { - /* adjust the map's "lowest_unnestable_start" */ - lowest_unnestable_addr &= ~(pmap_nesting_size_min - 1); - if (lowest_unnestable_addr != - sr_map->lowest_unnestable_start) { - vm_map_lock(sr_map); - sr_map->lowest_unnestable_start = - lowest_unnestable_addr; - vm_map_unlock(sr_map); - } + assert(kr == KERN_SUCCESS); + + /* adjust the map's "lowest_unnestable_start" */ + lowest_unnestable_addr &= ~(pmap_shared_region_size_min(sr_map->pmap) - 1); + if (lowest_unnestable_addr != sr_map->lowest_unnestable_start) { + vm_map_lock(sr_map); + sr_map->lowest_unnestable_start = lowest_unnestable_addr; + vm_map_unlock(sr_map); } vm_shared_region_lock(); @@ -1475,19 +1797,65 @@ vm_shared_region_map_file( assert(shared_region->sr_mapping_in_progress); /* set "sr_first_mapping"; dyld uses it to validate the shared cache */ - if (kr == KERN_SUCCESS && - shared_region->sr_first_mapping == (mach_vm_offset_t) -1) { + if (shared_region->sr_first_mapping == (mach_vm_offset_t) -1) { shared_region->sr_first_mapping = first_mapping; } + vm_shared_region_map_file_final(shared_region, sr_map, sfm_min_address, sfm_max_address, + &file_first_mappings[0]); + +done: + /* + * We're done working on that shared region. + * Wake up any waiting threads. + */ + shared_region->sr_mapping_in_progress = FALSE; + thread_wakeup((event_t) &shared_region->sr_mapping_in_progress); + vm_shared_region_unlock(); + +#if __has_feature(ptrauth_calls) + if (kr == KERN_SUCCESS) { + /* + * Since authenticated mappings were just added to the shared region, + * go back and remap them into private mappings for this task. + */ + kr = vm_shared_region_auth_remap(shared_region); + } +#endif /* __has_feature(ptrauth_calls) */ + + SHARED_REGION_TRACE_DEBUG( + ("shared_region: map(%p) <- 0x%x \n", + (void *)VM_KERNEL_ADDRPERM(shared_region), kr)); + return kr; +} + +/* + * Final part of vm_shared_region_map_file(). + * Kept in separate function to avoid blowing out the stack. + */ +__attribute__((noinline)) +static void +vm_shared_region_map_file_final( + vm_shared_region_t shared_region, + vm_map_t sr_map, + mach_vm_offset_t sfm_min_address, + mach_vm_offset_t sfm_max_address, + __unused mach_vm_offset_t *file_first_mappings) +{ + struct _dyld_cache_header sr_cache_header; + int error; + size_t image_array_length; + struct _dyld_cache_image_text_info *sr_image_layout; + + /* * copy in the shared region UUID to the shared region structure. * we do this indirectly by first copying in the shared cache header * and then copying the UUID from there because we'll need to look * at other content from the shared cache header. */ - if (kr == KERN_SUCCESS && !shared_region->sr_uuid_copied) { - int error = copyin((shared_region->sr_base_address + shared_region->sr_first_mapping), + if (!shared_region->sr_uuid_copied) { + error = copyin((user_addr_t)(shared_region->sr_base_address + shared_region->sr_first_mapping), (char *)&sr_cache_header, sizeof(sr_cache_header)); if (error == 0) { @@ -1515,12 +1883,12 @@ vm_shared_region_map_file( /* Copy in the shared cache layout if we're running with a locally built shared cache */ if (sr_cache_header.locallyBuiltCache) { KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_START); - size_t image_array_length = (sr_cache_header.imagesTextCount * sizeof(struct _dyld_cache_image_text_info)); - struct _dyld_cache_image_text_info *sr_image_layout = kalloc(image_array_length); - int error = copyin((shared_region->sr_base_address + shared_region->sr_first_mapping + + image_array_length = (size_t)(sr_cache_header.imagesTextCount * sizeof(struct _dyld_cache_image_text_info)); + sr_image_layout = kheap_alloc(KHEAP_DATA_BUFFERS, image_array_length, Z_WAITOK); + error = copyin((user_addr_t)(shared_region->sr_base_address + shared_region->sr_first_mapping + sr_cache_header.imagesTextOffset), (char *)sr_image_layout, image_array_length); if (error == 0) { - shared_region->sr_images = kalloc(sr_cache_header.imagesTextCount * sizeof(struct dyld_uuid_info_64)); + shared_region->sr_images = kalloc((vm_size_t)(sr_cache_header.imagesTextCount * sizeof(struct dyld_uuid_info_64))); for (size_t index = 0; index < sr_cache_header.imagesTextCount; index++) { memcpy((char *)&shared_region->sr_images[index].imageUUID, (char *)&sr_image_layout[index].uuid, sizeof(shared_region->sr_images[index].imageUUID)); @@ -1541,33 +1909,20 @@ vm_shared_region_map_file( #endif /* DEVELOPMENT || DEBUG */ } KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_END, shared_region->sr_images_count); - kfree(sr_image_layout, image_array_length); + kheap_free(KHEAP_DATA_BUFFERS, sr_image_layout, image_array_length); sr_image_layout = NULL; } init_task_shared_region = shared_region; } - if (kr == KERN_SUCCESS) { - /* - * If we succeeded, we know the bounds of the shared region. - * Trim our pmaps to only cover this range (if applicable to - * this platform). - */ - pmap_trim(current_map()->pmap, sr_map->pmap, sfm_min_address, sfm_min_address, sfm_max_address - sfm_min_address); + /* + * If we succeeded, we know the bounds of the shared region. + * Trim our pmaps to only cover this range (if applicable to + * this platform). + */ + if (VM_MAP_PAGE_SHIFT(current_map()) == VM_MAP_PAGE_SHIFT(sr_map)) { + pmap_trim(current_map()->pmap, sr_map->pmap, sfm_min_address, sfm_max_address - sfm_min_address); } - - /* we're done working on that shared region */ - shared_region->sr_mapping_in_progress = FALSE; - thread_wakeup((event_t) &shared_region->sr_mapping_in_progress); - vm_shared_region_unlock(); - -done: - SHARED_REGION_TRACE_DEBUG( - ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n", - (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count, - (void *)VM_KERNEL_ADDRPERM(mappings), - (void *)VM_KERNEL_ADDRPERM(file_control), file_size, kr)); - return kr; } /* @@ -1597,7 +1952,9 @@ vm_shared_region_trim_and_get(task_t task) sr_map = sr_mem_entry->backing.map; /* Trim the pmap if possible. */ - pmap_trim(task->map->pmap, sr_map->pmap, 0, 0, 0); + if (VM_MAP_PAGE_SHIFT(task->map) == VM_MAP_PAGE_SHIFT(sr_map)) { + pmap_trim(task->map->pmap, sr_map->pmap, 0, 0); + } return shared_region; } @@ -1615,7 +1972,8 @@ vm_shared_region_enter( boolean_t is_64bit, void *fsroot, cpu_type_t cpu, - cpu_subtype_t cpu_subtype) + cpu_subtype_t cpu_subtype, + boolean_t reslide) { kern_return_t kr; vm_shared_region_t shared_region; @@ -1635,41 +1993,42 @@ vm_shared_region_enter( cpu, cpu_subtype, is_64bit)); /* lookup (create if needed) the shared region for this environment */ - shared_region = vm_shared_region_lookup(fsroot, cpu, cpu_subtype, is_64bit); + shared_region = vm_shared_region_lookup(fsroot, cpu, cpu_subtype, is_64bit, reslide); if (shared_region == NULL) { /* this should not happen ! */ SHARED_REGION_TRACE_ERROR( ("shared_region: -> " - "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d): " + "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d): " "lookup failed !\n", (void *)VM_KERNEL_ADDRPERM(map), (void *)VM_KERNEL_ADDRPERM(task), (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, cpu_subtype, is_64bit)); + cpu, cpu_subtype, is_64bit, reslide)); //panic("shared_region_enter: lookup failed\n"); return KERN_FAILURE; } kr = KERN_SUCCESS; /* no need to lock since this data is never modified */ - sr_address = shared_region->sr_base_address; - sr_size = shared_region->sr_size; + sr_address = (vm_map_offset_t)shared_region->sr_base_address; + sr_size = (vm_map_size_t)shared_region->sr_size; sr_handle = shared_region->sr_mem_entry; - sr_pmap_nesting_start = shared_region->sr_pmap_nesting_start; - sr_pmap_nesting_size = shared_region->sr_pmap_nesting_size; + sr_pmap_nesting_start = (vm_map_offset_t)shared_region->sr_pmap_nesting_start; + sr_pmap_nesting_size = (vm_map_size_t)shared_region->sr_pmap_nesting_size; cur_prot = VM_PROT_READ; -#if __x86_64__ - /* - * XXX BINARY COMPATIBILITY - * java6 apparently needs to modify some code in the - * dyld shared cache and needs to be allowed to add - * write access... - */ - max_prot = VM_PROT_ALL; -#else /* __x86_64__ */ - max_prot = VM_PROT_READ; -#endif /* __x86_64__ */ + if (VM_MAP_POLICY_WRITABLE_SHARED_REGION(map)) { + /* + * XXX BINARY COMPATIBILITY + * java6 apparently needs to modify some code in the + * dyld shared cache and needs to be allowed to add + * write access... + */ + max_prot = VM_PROT_ALL; + } else { + max_prot = VM_PROT_READ; + } + /* * Start mapping the shared region's VM sub map into the task's VM map. */ @@ -1732,8 +2091,8 @@ vm_shared_region_enter( target_address = sr_address + sr_offset; mapping_size = sr_pmap_nesting_size; - if (mapping_size > pmap_nesting_size_max) { - mapping_size = (vm_map_offset_t) pmap_nesting_size_max; + if (mapping_size > pmap_nesting_size_max(map->pmap)) { + mapping_size = (vm_map_offset_t) pmap_nesting_size_max(map->pmap); } vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; vmk_flags.vmkf_nested_pmap = TRUE; @@ -1834,7 +2193,8 @@ done: (void *)VM_KERNEL_ADDRPERM(map), (void *)VM_KERNEL_ADDRPERM(task), (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, cpu_subtype, is_64bit, kr)); + cpu, cpu_subtype, is_64bit, + kr)); return kr; } @@ -1852,11 +2212,8 @@ vm_shared_region_sliding_valid(uint32_t slide) return kr; } - if ((sr->sr_slid == TRUE) && slide) { - if (slide != vm_shared_region_get_slide_info(sr)->slide) { - printf("Only one shared region can be slid\n"); - kr = KERN_FAILURE; - } else { + if (sr->sr_slide != 0 && slide != 0) { + if (slide == sr->sr_slide) { /* * Request for sliding when we've * already done it with exactly the @@ -1866,61 +2223,86 @@ vm_shared_region_sliding_valid(uint32_t slide) * so we return this value. */ kr = KERN_INVALID_ARGUMENT; + } else { + printf("Mismatched shared region slide\n"); + kr = KERN_FAILURE; } } vm_shared_region_deallocate(sr); return kr; } +/* + * Actually create (really overwrite) the mapping to part of the shared cache which + * undergoes relocation. This routine reads in the relocation info from dyld and + * verifies it. It then creates a (or finds a matching) shared region pager which + * handles the actual modification of the page contents and installs the mapping + * using that pager. + */ kern_return_t vm_shared_region_slide_mapping( vm_shared_region_t sr, + user_addr_t slide_info_addr, mach_vm_size_t slide_info_size, mach_vm_offset_t start, mach_vm_size_t size, mach_vm_offset_t slid_mapping, uint32_t slide, - memory_object_control_t sr_file_control) + memory_object_control_t sr_file_control, + vm_prot_t prot) { kern_return_t kr; - vm_object_t object; - vm_shared_region_slide_info_t si; - vm_offset_t slide_info_entry; - vm_map_entry_t slid_entry, tmp_entry; + vm_object_t object = VM_OBJECT_NULL; + vm_shared_region_slide_info_t si = NULL; + vm_map_entry_t tmp_entry = VM_MAP_ENTRY_NULL; struct vm_map_entry tmp_entry_store; - memory_object_t sr_pager; + memory_object_t sr_pager = MEMORY_OBJECT_NULL; vm_map_t sr_map; int vm_flags; vm_map_kernel_flags_t vmk_flags; vm_map_offset_t map_addr; - - tmp_entry = VM_MAP_ENTRY_NULL; - sr_pager = MEMORY_OBJECT_NULL; - object = VM_OBJECT_NULL; - slide_info_entry = 0; + void *slide_info_entry = NULL; + int error; assert(sr->sr_slide_in_progress); - assert(!sr->sr_slid); - - si = vm_shared_region_get_slide_info(sr); - assert(si->slide_object == VM_OBJECT_NULL); - assert(si->slide_info_entry == NULL); if (sr_file_control == MEMORY_OBJECT_CONTROL_NULL) { return KERN_INVALID_ARGUMENT; } + + /* + * Copy in and verify the relocation information. + */ + if (slide_info_size < MIN_SLIDE_INFO_SIZE) { + printf("Slide_info_size too small: %lx\n", (uintptr_t)slide_info_size); + return KERN_FAILURE; + } if (slide_info_size > SANE_SLIDE_INFO_SIZE) { printf("Slide_info_size too large: %lx\n", (uintptr_t)slide_info_size); return KERN_FAILURE; } - kr = kmem_alloc(kernel_map, - (vm_offset_t *) &slide_info_entry, - (vm_size_t) slide_info_size, VM_KERN_MEMORY_OSFMK); - if (kr != KERN_SUCCESS) { - return kr; + slide_info_entry = kheap_alloc(KHEAP_DATA_BUFFERS, (vm_size_t)slide_info_size, Z_WAITOK); + if (slide_info_entry == NULL) { + return KERN_RESOURCE_SHORTAGE; + } + error = copyin(slide_info_addr, slide_info_entry, (size_t)slide_info_size); + if (error) { + printf("copyin of slide_info failed\n"); + kr = KERN_INVALID_ADDRESS; + goto done; + } + + if ((kr = vm_shared_region_slide_sanity_check(slide_info_entry, slide_info_size)) != KERN_SUCCESS) { + printf("Sanity Check failed for slide_info\n"); + goto done; } + /* + * Allocate and fill in a vm_shared_region_slide_info. + * This will either be used by a new pager, or used to find + * a pre-existing matching pager. + */ object = memory_object_control_to_vm_object(sr_file_control); if (object == VM_OBJECT_NULL || object->internal) { object = VM_OBJECT_NULL; @@ -1928,57 +2310,76 @@ vm_shared_region_slide_mapping( goto done; } + si = kalloc(sizeof(*si)); + if (si == NULL) { + kr = KERN_RESOURCE_SHORTAGE; + goto done; + } vm_object_lock(object); + vm_object_reference_locked(object); /* for si->slide_object */ object->object_is_shared_cache = TRUE; vm_object_unlock(object); - si->slide_info_entry = (vm_shared_region_slide_info_entry_t)slide_info_entry; - si->slide_info_size = slide_info_size; + si->si_slide_info_entry = slide_info_entry; + si->si_slide_info_size = slide_info_size; assert(slid_mapping != (mach_vm_offset_t) -1); - si->slid_address = slid_mapping + sr->sr_base_address; - si->slide_object = object; - si->start = start; - si->end = si->start + size; - si->slide = slide; -#if defined(HAS_APPLE_PAC) + si->si_slid_address = slid_mapping + sr->sr_base_address; + si->si_slide_object = object; + si->si_start = start; + si->si_end = si->si_start + size; + si->si_slide = slide; +#if __has_feature(ptrauth_calls) + /* + * If there is authenticated pointer data in this slid mapping, + * then just add the information needed to create new pagers for + * different shared_region_id's later. + */ if (sr->sr_cpu_type == CPU_TYPE_ARM64 && - sr->sr_cpu_subtype == CPU_SUBTYPE_ARM64E) { - /* arm64e has pointer authentication */ + sr->sr_cpu_subtype == CPU_SUBTYPE_ARM64E && + !(prot & VM_PROT_NOAUTH)) { + if (sr->sr_num_auth_section == NUM_SR_AUTH_SECTIONS) { + printf("Too many auth/private sections for shared region!!\n"); + kr = KERN_INVALID_ARGUMENT; + goto done; + } si->si_ptrauth = TRUE; + sr->sr_auth_section[sr->sr_num_auth_section++] = si; + /* + * Remember the shared region, since that's where we'll + * stash this info for all auth pagers to share. Each pager + * will need to take a reference to it. + */ + si->si_shared_region = sr; + kr = KERN_SUCCESS; + goto done; } -#endif /* HAS_APPLE_PAC */ + si->si_shared_region = NULL; + si->si_ptrauth = FALSE; +#else /* __has_feature(ptrauth_calls) */ + (void)prot; /* silence unused warning */ +#endif /* __has_feature(ptrauth_calls) */ - /* find the shared region's map entry to slide */ + /* + * find the pre-existing shared region's map entry to slide + */ sr_map = vm_shared_region_vm_map(sr); - vm_map_lock_read(sr_map); - if (!vm_map_lookup_entry(sr_map, - slid_mapping, - &slid_entry)) { - /* no mapping there */ - vm_map_unlock(sr_map); - kr = KERN_INVALID_ARGUMENT; + kr = find_mapping_to_slide(sr_map, (vm_map_address_t)slid_mapping, &tmp_entry_store); + if (kr != KERN_SUCCESS) { goto done; } + tmp_entry = &tmp_entry_store; + /* - * We might want to clip the entry to cover only the portion that - * needs sliding (offsets si->start to si->end in the shared cache - * file at the bottom of the shadow chain). - * In practice, it seems to cover the entire DATA segment... + * The object must exactly cover the region to slide. */ - tmp_entry_store = *slid_entry; - tmp_entry = &tmp_entry_store; - slid_entry = VM_MAP_ENTRY_NULL; - /* extra ref to keep object alive while map is unlocked */ - vm_object_reference(VME_OBJECT(tmp_entry)); - vm_map_unlock_read(sr_map); + assert(VME_OFFSET(tmp_entry) == start); + assert(tmp_entry->vme_end - tmp_entry->vme_start == size); /* create a "shared_region" sliding pager */ - sr_pager = shared_region_pager_setup(VME_OBJECT(tmp_entry), - VME_OFFSET(tmp_entry), - si); - if (sr_pager == NULL) { + sr_pager = shared_region_pager_setup(VME_OBJECT(tmp_entry), VME_OFFSET(tmp_entry), si, 0); + if (sr_pager == MEMORY_OBJECT_NULL) { kr = KERN_RESOURCE_SHORTAGE; goto done; } @@ -1990,8 +2391,7 @@ vm_shared_region_slide_mapping( map_addr = tmp_entry->vme_start; kr = vm_map_enter_mem_object(sr_map, &map_addr, - (tmp_entry->vme_end - - tmp_entry->vme_start), + (tmp_entry->vme_end - tmp_entry->vme_start), (mach_vm_offset_t) 0, vm_flags, vmk_flags, @@ -2013,17 +2413,15 @@ vm_shared_region_slide_mapping( kr = KERN_SUCCESS; done: - if (sr_pager) { + if (sr_pager != NULL) { /* - * Release the sr_pager reference obtained by - * shared_region_pager_setup(). - * The mapping (if it succeeded) is now holding a reference on - * the memory object. + * Release the sr_pager reference obtained by shared_region_pager_setup(). + * The mapping, if it succeeded, is now holding a reference on the memory object. */ memory_object_deallocate(sr_pager); sr_pager = MEMORY_OBJECT_NULL; } - if (tmp_entry) { + if (tmp_entry != NULL) { /* release extra ref on tmp_entry's VM object */ vm_object_deallocate(VME_OBJECT(tmp_entry)); tmp_entry = VM_MAP_ENTRY_NULL; @@ -2031,46 +2429,31 @@ done: if (kr != KERN_SUCCESS) { /* cleanup */ - if (slide_info_entry) { - kmem_free(kernel_map, slide_info_entry, slide_info_size); - slide_info_entry = 0; + if (si != NULL) { + if (si->si_slide_object) { + vm_object_deallocate(si->si_slide_object); + si->si_slide_object = VM_OBJECT_NULL; + } + kfree(si, sizeof(*si)); + si = NULL; } - if (si->slide_object) { - vm_object_deallocate(si->slide_object); - si->slide_object = VM_OBJECT_NULL; + if (slide_info_entry != NULL) { + kheap_free(KHEAP_DATA_BUFFERS, slide_info_entry, (vm_size_t)slide_info_size); + slide_info_entry = NULL; } } return kr; } -void* -vm_shared_region_get_slide_info_entry(vm_shared_region_t sr) -{ - return (void*)sr->sr_slide_info.slide_info_entry; -} - static kern_return_t -vm_shared_region_slide_sanity_check_v1(vm_shared_region_slide_info_entry_v1_t s_info) +vm_shared_region_slide_sanity_check_v2( + vm_shared_region_slide_info_entry_v2_t s_info, + mach_vm_size_t slide_info_size) { - uint32_t pageIndex = 0; - uint16_t entryIndex = 0; - uint16_t *toc = NULL; - - toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset); - for (; pageIndex < s_info->toc_count; pageIndex++) { - entryIndex = (uint16_t)(toc[pageIndex]); - - if (entryIndex >= s_info->entry_count) { - printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex, entryIndex, s_info->entry_count); - return KERN_FAILURE; - } + if (slide_info_size < sizeof(struct vm_shared_region_slide_info_entry_v2)) { + printf("%s bad slide_info_size: %lx\n", __func__, (uintptr_t)slide_info_size); + return KERN_FAILURE; } - return KERN_SUCCESS; -} - -static kern_return_t -vm_shared_region_slide_sanity_check_v2(vm_shared_region_slide_info_entry_v2_t s_info, mach_vm_size_t slide_info_size) -{ if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) { return KERN_FAILURE; } @@ -2103,8 +2486,14 @@ vm_shared_region_slide_sanity_check_v2(vm_shared_region_slide_info_entry_v2_t s_ } static kern_return_t -vm_shared_region_slide_sanity_check_v3(vm_shared_region_slide_info_entry_v3_t s_info, mach_vm_size_t slide_info_size) +vm_shared_region_slide_sanity_check_v3( + vm_shared_region_slide_info_entry_v3_t s_info, + mach_vm_size_t slide_info_size) { + if (slide_info_size < sizeof(struct vm_shared_region_slide_info_entry_v3)) { + printf("%s bad slide_info_size: %lx\n", __func__, (uintptr_t)slide_info_size); + return KERN_FAILURE; + } if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) { printf("vm_shared_region_slide_sanity_check_v3: s_info->page_size != PAGE_SIZE_FOR_SR_SL 0x%llx != 0x%llx\n", (uint64_t)s_info->page_size, (uint64_t)PAGE_SIZE_FOR_SR_SLIDE); return KERN_FAILURE; @@ -2128,8 +2517,14 @@ vm_shared_region_slide_sanity_check_v3(vm_shared_region_slide_info_entry_v3_t s_ } static kern_return_t -vm_shared_region_slide_sanity_check_v4(vm_shared_region_slide_info_entry_v4_t s_info, mach_vm_size_t slide_info_size) +vm_shared_region_slide_sanity_check_v4( + vm_shared_region_slide_info_entry_v4_t s_info, + mach_vm_size_t slide_info_size) { + if (slide_info_size < sizeof(struct vm_shared_region_slide_info_entry_v4)) { + printf("%s bad slide_info_size: %lx\n", __func__, (uintptr_t)slide_info_size); + return KERN_FAILURE; + } if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) { return KERN_FAILURE; } @@ -2162,111 +2557,27 @@ vm_shared_region_slide_sanity_check_v4(vm_shared_region_slide_info_entry_v4_t s_ } -kern_return_t -vm_shared_region_slide_sanity_check(vm_shared_region_t sr) -{ - vm_shared_region_slide_info_t si; - vm_shared_region_slide_info_entry_t s_info; - kern_return_t kr; - - si = vm_shared_region_get_slide_info(sr); - s_info = si->slide_info_entry; - - kr = mach_vm_protect(kernel_map, - (mach_vm_offset_t)(vm_offset_t)s_info, - (mach_vm_size_t) si->slide_info_size, - TRUE, VM_PROT_READ); - if (kr != KERN_SUCCESS) { - panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr); - } - - if (s_info->version == 1) { - kr = vm_shared_region_slide_sanity_check_v1(&s_info->v1); - } else if (s_info->version == 2) { - kr = vm_shared_region_slide_sanity_check_v2(&s_info->v2, si->slide_info_size); - } else if (s_info->version == 3) { - kr = vm_shared_region_slide_sanity_check_v3(&s_info->v3, si->slide_info_size); - } else if (s_info->version == 4) { - kr = vm_shared_region_slide_sanity_check_v4(&s_info->v4, si->slide_info_size); - } else { - goto fail; - } - if (kr != KERN_SUCCESS) { - goto fail; - } - - return KERN_SUCCESS; -fail: - if (si->slide_info_entry != NULL) { - kmem_free(kernel_map, - (vm_offset_t) si->slide_info_entry, - (vm_size_t) si->slide_info_size); - - vm_object_deallocate(si->slide_object); - si->slide_object = NULL; - si->start = 0; - si->end = 0; - si->slide = 0; - si->slide_info_entry = NULL; - si->slide_info_size = 0; - } - return KERN_FAILURE; -} - static kern_return_t -vm_shared_region_slide_page_v1(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex) +vm_shared_region_slide_sanity_check( + vm_shared_region_slide_info_entry_t s_info, + mach_vm_size_t s_info_size) { - uint16_t *toc = NULL; - slide_info_entry_toc_t bitmap = NULL; - uint32_t i = 0, j = 0; - uint8_t b = 0; - uint32_t slide = si->slide; - int is_64 = task_has_64Bit_addr(current_task()); - - vm_shared_region_slide_info_entry_v1_t s_info = &si->slide_info_entry->v1; - toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset); - - if (pageIndex >= s_info->toc_count) { - printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex, s_info->toc_count); - } else { - uint16_t entryIndex = (uint16_t)(toc[pageIndex]); - slide_info_entry_toc_t slide_info_entries = (slide_info_entry_toc_t)((uintptr_t)s_info + s_info->entry_offset); + kern_return_t kr; - if (entryIndex >= s_info->entry_count) { - printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex, s_info->entry_count); - } else { - bitmap = &slide_info_entries[entryIndex]; - - for (i = 0; i < NUM_SLIDING_BITMAPS_PER_PAGE; ++i) { - b = bitmap->entry[i]; - if (b != 0) { - for (j = 0; j < 8; ++j) { - if (b & (1 << j)) { - uint32_t *ptr_to_slide; - uint32_t old_value; - - ptr_to_slide = (uint32_t*)((uintptr_t)(vaddr) + (sizeof(uint32_t) * (i * 8 + j))); - old_value = *ptr_to_slide; - *ptr_to_slide += slide; - if (is_64 && *ptr_to_slide < old_value) { - /* - * We just slid the low 32 bits of a 64-bit pointer - * and it looks like there should have been a carry-over - * to the upper 32 bits. - * The sliding failed... - */ - printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n", - i, j, b, slide, old_value, *ptr_to_slide); - return KERN_FAILURE; - } - } - } - } - } - } + switch (s_info->version) { + case 2: + kr = vm_shared_region_slide_sanity_check_v2(&s_info->v2, s_info_size); + break; + case 3: + kr = vm_shared_region_slide_sanity_check_v3(&s_info->v3, s_info_size); + break; + case 4: + kr = vm_shared_region_slide_sanity_check_v4(&s_info->v4, s_info_size); + break; + default: + kr = KERN_FAILURE; } - - return KERN_SUCCESS; + return kr; } static kern_return_t @@ -2391,8 +2702,8 @@ rebase_chain( static kern_return_t vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex) { - vm_shared_region_slide_info_entry_v2_t s_info = &si->slide_info_entry->v2; - const uint32_t slide_amount = si->slide; + vm_shared_region_slide_info_entry_v2_t s_info = &si->si_slide_info_entry->v2; + const uint32_t slide_amount = si->si_slide; /* The high bits of the delta_mask field are nonzero precisely when the shared * cache is 64-bit. */ @@ -2429,7 +2740,7 @@ vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si, vm_offset_t vad return KERN_FAILURE; } info = page_extras[chain_index]; - page_start_offset = (info & DYLD_CACHE_SLIDE_PAGE_VALUE) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; + page_start_offset = (uint16_t)((info & DYLD_CACHE_SLIDE_PAGE_VALUE) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT); kr = rebase_chain(is_64, pageIndex, page_content, page_start_offset, slide_amount, s_info); if (kr != KERN_SUCCESS) { @@ -2439,7 +2750,7 @@ vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si, vm_offset_t vad chain_index++; } while (!(info & DYLD_CACHE_SLIDE_PAGE_ATTR_END)); } else { - const uint32_t page_start_offset = page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; + const uint16_t page_start_offset = (uint16_t)(page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT); kern_return_t kr; kr = rebase_chain(is_64, pageIndex, page_content, page_start_offset, slide_amount, s_info); @@ -2453,10 +2764,18 @@ vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si, vm_offset_t vad static kern_return_t -vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si, vm_offset_t vaddr, __unused mach_vm_offset_t uservaddr, uint32_t pageIndex) +vm_shared_region_slide_page_v3( + vm_shared_region_slide_info_t si, + vm_offset_t vaddr, + __unused mach_vm_offset_t uservaddr, + uint32_t pageIndex, +#if !__has_feature(ptrauth_calls) + __unused +#endif /* !__has_feature(ptrauth_calls) */ + uint64_t jop_key) { - vm_shared_region_slide_info_entry_v3_t s_info = &si->slide_info_entry->v3; - const uint32_t slide_amount = si->slide; + vm_shared_region_slide_info_entry_v3_t s_info = &si->si_slide_info_entry->v3; + const uint32_t slide_amount = si->si_slide; uint8_t *page_content = (uint8_t *)vaddr; uint16_t page_entry; @@ -2503,11 +2822,11 @@ vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si, vm_offset_t vad return KERN_FAILURE; } -#if defined(HAS_APPLE_PAC) +#if __has_feature(ptrauth_calls) uint16_t diversity_data = (uint16_t)(value >> 32); bool hasAddressDiversity = (value & (1ULL << 48)) != 0; ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3); -#endif /* HAS_APPLE_PAC */ +#endif /* __has_feature(ptrauth_calls) */ bool isAuthenticated = (value & (1ULL << 63)) != 0; if (isAuthenticated) { @@ -2517,7 +2836,7 @@ vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si, vm_offset_t vad const uint64_t value_add = s_info->value_add; value += value_add; -#if defined(HAS_APPLE_PAC) +#if __has_feature(ptrauth_calls) uint64_t discriminator = diversity_data; if (hasAddressDiversity) { // First calculate a new discriminator using the address of where we are trying to store the value @@ -2525,15 +2844,14 @@ vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si, vm_offset_t vad discriminator = __builtin_ptrauth_blend_discriminator((void*)(((uintptr_t)uservaddr) + pageOffset), discriminator); } - if (si->si_ptrauth && - !(BootArgs->bootFlags & kBootFlagsDisableUserJOP)) { + if (jop_key != 0 && si->si_ptrauth && !arm_user_jop_disabled()) { /* * these pointers are used in user mode. disable the kernel key diversification * so we can sign them for use in user mode. */ - value = (uintptr_t)pmap_sign_user_ptr((void *)value, key, discriminator); + value = (uintptr_t)pmap_sign_user_ptr((void *)value, key, discriminator, jop_key); } -#endif /* HAS_APPLE_PAC */ +#endif /* __has_feature(ptrauth_calls) */ } else { // The new value for a rebase is the low 51-bits of the threaded value plus the slide. // Regular pointer which needs to fit in 51-bits of value. @@ -2601,8 +2919,8 @@ rebase_chainv4( static kern_return_t vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex) { - vm_shared_region_slide_info_entry_v4_t s_info = &si->slide_info_entry->v4; - const uint32_t slide_amount = si->slide; + vm_shared_region_slide_info_entry_v4_t s_info = &si->si_slide_info_entry->v4; + const uint32_t slide_amount = si->si_slide; const uint16_t *page_starts = (uint16_t *)((uintptr_t)s_info + s_info->page_starts_offset); const uint16_t *page_extras = (uint16_t *)((uintptr_t)s_info + s_info->page_extras_offset); @@ -2635,7 +2953,7 @@ vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si, vm_offset_t vad return KERN_FAILURE; } info = page_extras[chain_index]; - page_start_offset = (info & DYLD_CACHE_SLIDE4_PAGE_INDEX) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; + page_start_offset = (uint16_t)((info & DYLD_CACHE_SLIDE4_PAGE_INDEX) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT); kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info); if (kr != KERN_SUCCESS) { @@ -2645,7 +2963,7 @@ vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si, vm_offset_t vad chain_index++; } while (!(info & DYLD_CACHE_SLIDE4_PAGE_EXTRA_END)); } else { - const uint32_t page_start_offset = page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; + const uint16_t page_start_offset = (uint16_t)(page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT); kern_return_t kr; kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info); @@ -2660,17 +2978,21 @@ vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si, vm_offset_t vad kern_return_t -vm_shared_region_slide_page(vm_shared_region_slide_info_t si, vm_offset_t vaddr, mach_vm_offset_t uservaddr, uint32_t pageIndex) +vm_shared_region_slide_page( + vm_shared_region_slide_info_t si, + vm_offset_t vaddr, + mach_vm_offset_t uservaddr, + uint32_t pageIndex, + uint64_t jop_key) { - if (si->slide_info_entry->version == 1) { - return vm_shared_region_slide_page_v1(si, vaddr, pageIndex); - } else if (si->slide_info_entry->version == 2) { + switch (si->si_slide_info_entry->version) { + case 2: return vm_shared_region_slide_page_v2(si, vaddr, pageIndex); - } else if (si->slide_info_entry->version == 3) { - return vm_shared_region_slide_page_v3(si, vaddr, uservaddr, pageIndex); - } else if (si->slide_info_entry->version == 4) { + case 3: + return vm_shared_region_slide_page_v3(si, vaddr, uservaddr, pageIndex, jop_key); + case 4: return vm_shared_region_slide_page_v4(si, vaddr, pageIndex); - } else { + default: return KERN_FAILURE; } } @@ -2693,8 +3015,8 @@ vm_named_entry_t commpage_text64_entry = NULL; vm_map_t commpage_text32_map = VM_MAP_NULL; vm_map_t commpage_text64_map = VM_MAP_NULL; -user32_addr_t commpage_text32_location = (user32_addr_t) _COMM_PAGE32_TEXT_START; -user64_addr_t commpage_text64_location = (user64_addr_t) _COMM_PAGE64_TEXT_START; +user32_addr_t commpage_text32_location = 0; +user64_addr_t commpage_text64_location = 0; #if defined(__i386__) || defined(__x86_64__) /* @@ -2739,7 +3061,6 @@ _vm_commpage_init( /* * Initialize the comm text pages at boot time */ -extern u_int32_t random(void); void vm_commpage_text_init(void) { @@ -2759,12 +3080,10 @@ vm_commpage_text_init(void) commpage_text64_entry = (vm_named_entry_t) ip_get_kobject(commpage_text64_handle); commpage_text64_map = commpage_text64_entry->backing.map; commpage_text64_location = (user64_addr_t) (_COMM_PAGE64_TEXT_START + offset); +#endif commpage_text_populate(); -#elif defined(__arm64__) || defined(__arm__) -#else -#error Unknown architecture. -#endif /* __i386__ || __x86_64__ */ + /* populate the routines in here */ SHARED_REGION_TRACE_DEBUG( ("commpage text: init() <-\n")); @@ -2868,8 +3187,8 @@ vm_commpage_enter( } vm_tag_t tag = VM_KERN_MEMORY_NONE; - if ((commpage_address & (pmap_nesting_size_min - 1)) == 0 && - (commpage_size & (pmap_nesting_size_min - 1)) == 0) { + if ((commpage_address & (pmap_commpage_size_min(map->pmap) - 1)) == 0 && + (commpage_size & (pmap_commpage_size_min(map->pmap) - 1)) == 0) { /* the commpage is properly aligned or sized for pmap-nesting */ tag = VM_MEMORY_SHARED_PMAP; vmk_flags.vmkf_nested_pmap = TRUE; @@ -2964,17 +3283,18 @@ vm_commpage_enter( } int -vm_shared_region_slide(uint32_t slide, - mach_vm_offset_t entry_start_address, - mach_vm_size_t entry_size, - mach_vm_offset_t slide_start, - mach_vm_size_t slide_size, - mach_vm_offset_t slid_mapping, - memory_object_control_t sr_file_control) +vm_shared_region_slide( + uint32_t slide, + mach_vm_offset_t entry_start_address, + mach_vm_size_t entry_size, + mach_vm_offset_t slide_start, + mach_vm_size_t slide_size, + mach_vm_offset_t slid_mapping, + memory_object_control_t sr_file_control, + vm_prot_t prot) { - void *slide_info_entry = NULL; - int error; vm_shared_region_t sr; + kern_return_t error; SHARED_REGION_TRACE_DEBUG( ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n", @@ -2996,84 +3316,34 @@ vm_shared_region_slide(uint32_t slide, while (sr->sr_slide_in_progress) { vm_shared_region_sleep(&sr->sr_slide_in_progress, THREAD_UNINT); } - if (sr->sr_slid -#ifndef CONFIG_EMBEDDED - || shared_region_completed_slide -#endif - ) { - vm_shared_region_unlock(); - - vm_shared_region_deallocate(sr); - printf("%s: shared region already slid?\n", __FUNCTION__); - SHARED_REGION_TRACE_DEBUG( - ("vm_shared_region_slide: <- %d (already slid)\n", - KERN_FAILURE)); - return KERN_FAILURE; - } sr->sr_slide_in_progress = TRUE; vm_shared_region_unlock(); error = vm_shared_region_slide_mapping(sr, + (user_addr_t)slide_start, slide_size, entry_start_address, entry_size, slid_mapping, slide, - sr_file_control); + sr_file_control, + prot); if (error) { printf("slide_info initialization failed with kr=%d\n", error); - goto done; - } - - slide_info_entry = vm_shared_region_get_slide_info_entry(sr); - if (slide_info_entry == NULL) { - error = KERN_FAILURE; - } else { - error = copyin((user_addr_t)slide_start, - slide_info_entry, - (vm_size_t)slide_size); - if (error) { - error = KERN_INVALID_ADDRESS; - } - } - if (error) { - goto done; } - if (vm_shared_region_slide_sanity_check(sr) != KERN_SUCCESS) { - error = KERN_INVALID_ARGUMENT; - printf("Sanity Check failed for slide_info\n"); - } else { -#if DEBUG - printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n", - (void*)(uintptr_t)entry_start_address, - (unsigned long)entry_size, - (unsigned long)slide_size); -#endif - } -done: vm_shared_region_lock(); assert(sr->sr_slide_in_progress); - assert(sr->sr_slid == FALSE); sr->sr_slide_in_progress = FALSE; thread_wakeup(&sr->sr_slide_in_progress); - if (error == KERN_SUCCESS) { - sr->sr_slid = TRUE; - - /* - * We don't know how to tear down a slid shared region today, because - * we would have to invalidate all the pages that have been slid - * atomically with respect to anyone mapping the shared region afresh. - * Therefore, take a dangling reference to prevent teardown. - */ - sr->sr_ref_count++; #ifndef CONFIG_EMBEDDED + if (error == KERN_SUCCESS) { shared_region_completed_slide = TRUE; -#endif } +#endif vm_shared_region_unlock(); vm_shared_region_deallocate(sr); @@ -3085,6 +3355,111 @@ done: return error; } +/* + * Used during Authenticated Root Volume macOS boot. + * Launchd re-execs itself and wants the new launchd to use + * the shared cache from the new root volume. This call + * makes all the existing shared caches stale to allow + * that to happen. + */ +void +vm_shared_region_pivot(void) +{ + vm_shared_region_t shared_region = NULL; + + vm_shared_region_lock(); + + queue_iterate(&vm_shared_region_queue, shared_region, vm_shared_region_t, sr_q) { + assert(shared_region->sr_ref_count > 0); + shared_region->sr_stale = TRUE; + if (shared_region->sr_timer_call) { + /* + * We have a shared region ready to be destroyed + * and just waiting for a delayed timer to fire. + * Marking it stale cements its ineligibility to + * be used ever again. So let's shorten the timer + * aggressively down to 10 milliseconds and get rid of it. + * This is a single quantum and we don't need to go + * shorter than this duration. We want it to be short + * enough, however, because we could have an unmount + * of the volume hosting this shared region just behind + * us. + */ + uint64_t deadline; + assert(shared_region->sr_ref_count == 1); + + /* + * Free the old timer call. Returns with a reference held. + * If the old timer has fired and is waiting for the vm_shared_region_lock + * lock, we will just return with an additional ref_count i.e. 2. + * The old timer will then fire and just drop the ref count down to 1 + * with no other modifications. + */ + vm_shared_region_reference_locked(shared_region); + + /* set up the timer. Keep the reference from above for this timer.*/ + shared_region->sr_timer_call = thread_call_allocate( + (thread_call_func_t) vm_shared_region_timeout, + (thread_call_param_t) shared_region); + + /* schedule the timer */ + clock_interval_to_deadline(10, /* 10 milliseconds */ + NSEC_PER_MSEC, + &deadline); + thread_call_enter_delayed(shared_region->sr_timer_call, + deadline); + + SHARED_REGION_TRACE_DEBUG( + ("shared_region: pivot(%p): armed timer\n", + (void *)VM_KERNEL_ADDRPERM(shared_region))); + } + } + + vm_shared_region_unlock(); +} + +/* + * Routine to mark any non-standard slide shared cache region as stale. + * This causes the next "reslide" spawn to create a new shared region. + */ +void +vm_shared_region_reslide_stale(void) +{ +#if __has_feature(ptrauth_calls) + vm_shared_region_t shared_region = NULL; + + vm_shared_region_lock(); + + queue_iterate(&vm_shared_region_queue, shared_region, vm_shared_region_t, sr_q) { + assert(shared_region->sr_ref_count > 0); + if (!shared_region->sr_stale && shared_region->sr_reslide) { + shared_region->sr_stale = TRUE; + vm_shared_region_reslide_count++; + } + } + + vm_shared_region_unlock(); +#endif /* __has_feature(ptrauth_calls) */ +} + +/* + * report if the task is using a reslide shared cache region. + */ +bool +vm_shared_region_is_reslide(__unused struct task *task) +{ + bool is_reslide = FALSE; +#if !XNU_TARGET_OS_OSX && __has_feature(ptrauth_calls) + vm_shared_region_t sr = vm_shared_region_get(task); + + if (sr != NULL) { + is_reslide = sr->sr_reslide; + vm_shared_region_deallocate(sr); + } +#endif /* !XNU_TARGET_OS_OSX && __has_feature(ptrauth_calls) */ + return is_reslide; +} + /* * This is called from powermanagement code to let kernel know the current source of power. * 0 if it is external source (connected to power ) @@ -3110,13 +3485,5 @@ post_sys_powersource_internal(int i, int internal) if (internal == 0) { __system_power_source = i; } - - if (__commpage_setup != 0) { - if (__system_power_source != 0) { - commpage_set_spin_count(0); - } else { - commpage_set_spin_count(MP_SPIN_TRIES); - } - } } #endif diff --git a/osfmk/vm/vm_shared_region.h b/osfmk/vm/vm_shared_region.h index 95fe9fa54..ffb9bb0cb 100644 --- a/osfmk/vm/vm_shared_region.h +++ b/osfmk/vm/vm_shared_region.h @@ -100,23 +100,6 @@ typedef struct vm_shared_region *vm_shared_region_t; /* Documentation for the slide info format can be found in the dyld project in * the file 'launch-cache/dyld_cache_format.h'. */ -typedef struct vm_shared_region_slide_info_entry_v1 *vm_shared_region_slide_info_entry_v1_t; -struct vm_shared_region_slide_info_entry_v1 { - uint32_t version; - uint32_t toc_offset; // offset from start of header to table-of-contents - uint32_t toc_count; // number of entries in toc (same as number of pages in r/w mapping) - uint32_t entry_offset; - uint32_t entry_count; - // uint16_t toc[toc_count]; - // entrybitmap entries[entries_count]; -}; - -#define NBBY 8 -#define NUM_SLIDING_BITMAPS_PER_PAGE (0x1000/sizeof(int)/NBBY) /*128*/ -typedef struct slide_info_entry_toc *slide_info_entry_toc_t; -struct slide_info_entry_toc { - uint8_t entry[NUM_SLIDING_BITMAPS_PER_PAGE]; -}; typedef struct vm_shared_region_slide_info_entry_v2 *vm_shared_region_slide_info_entry_v2_t; struct vm_shared_region_slide_info_entry_v2 { @@ -174,39 +157,53 @@ struct vm_shared_region_slide_info_entry_v4 { typedef union vm_shared_region_slide_info_entry *vm_shared_region_slide_info_entry_t; union vm_shared_region_slide_info_entry { - uint32_t version; - struct vm_shared_region_slide_info_entry_v1 v1; - struct vm_shared_region_slide_info_entry_v2 v2; - struct vm_shared_region_slide_info_entry_v3 v3; + uint32_t version; + struct vm_shared_region_slide_info_entry_v2 v2; + struct vm_shared_region_slide_info_entry_v3 v3; struct vm_shared_region_slide_info_entry_v4 v4; }; -typedef struct vm_shared_region_slide_info *vm_shared_region_slide_info_t; -struct vm_shared_region_slide_info { - mach_vm_address_t slid_address; - mach_vm_offset_t start; - mach_vm_offset_t end; - uint32_t slide; -#if defined(HAS_APPLE_PAC) - boolean_t si_ptrauth; -#endif /* HAS_APPLE_PAC */ - vm_object_t slide_object; - mach_vm_size_t slide_info_size; - vm_shared_region_slide_info_entry_t slide_info_entry; -}; +#define MIN_SLIDE_INFO_SIZE \ + MIN(sizeof(struct vm_shared_region_slide_info_entry_v2), \ + MIN(sizeof(struct vm_shared_region_slide_info_entry_v3), \ + sizeof(struct vm_shared_region_slide_info_entry_v4))) + +/* + * This is the information used by the shared cache pager for sub-sections + * which must be modified for relocations and/or pointer authentications + * before it can be used. The shared_region_pager gets source pages from + * the shared cache file and modifies them -- see shared_region_pager_data_request(). + * + * A single pager may be used from multiple shared regions provided: + * - same si_slide_object, si_start, si_end, si_slide, si_ptrauth and si_jop_key + * - The size and contents of si_slide_info_entry are the same. + */ +typedef struct vm_shared_region_slide_info { + uint32_t si_slide; /* the distance that the file data is relocated */ + bool si_slid; +#if __has_feature(ptrauth_calls) + bool si_ptrauth; + uint64_t si_jop_key; + struct vm_shared_region *si_shared_region; /* so we can ref/dealloc for authenticated slide info */ +#endif /* __has_feature(ptrauth_calls) */ + mach_vm_address_t si_slid_address; + mach_vm_offset_t si_start; /* start offset in si_slide_object */ + mach_vm_offset_t si_end; + vm_object_t si_slide_object; /* The source object for the pages to be modified */ + mach_vm_size_t si_slide_info_size; /* size of dyld provided relocation information */ + vm_shared_region_slide_info_entry_t si_slide_info_entry; /* dyld provided relocation information */ +} *vm_shared_region_slide_info_t; -/* address space shared region descriptor */ +/* + * Data structure that represents a unique shared cache region. + */ struct vm_shared_region { uint32_t sr_ref_count; + uint32_t sr_slide; queue_chain_t sr_q; void *sr_root_dir; cpu_type_t sr_cpu_type; cpu_subtype_t sr_cpu_subtype; - boolean_t sr_64bit; - boolean_t sr_mapping_in_progress; - boolean_t sr_slide_in_progress; - boolean_t sr_persists; - boolean_t sr_slid; ipc_port_t sr_mem_entry; mach_vm_offset_t sr_first_mapping; mach_vm_offset_t sr_base_address; @@ -214,18 +211,33 @@ struct vm_shared_region { mach_vm_offset_t sr_pmap_nesting_start; mach_vm_size_t sr_pmap_nesting_size; thread_call_t sr_timer_call; - struct vm_shared_region_slide_info sr_slide_info; uuid_t sr_uuid; - boolean_t sr_uuid_copied; + + bool sr_mapping_in_progress; + bool sr_slide_in_progress; + bool sr_64bit; + bool sr_persists; + bool sr_uuid_copied; + bool sr_stale; /* This region should never be used again. */ + +#if __has_feature(ptrauth_calls) + bool sr_reslide; /* Special shared region for suspected attacked processes */ +#define NUM_SR_AUTH_SECTIONS 2 + vm_shared_region_slide_info_t sr_auth_section[NUM_SR_AUTH_SECTIONS]; + uint_t sr_num_auth_section; +#endif /* __has_feature(ptrauth_calls) */ + uint32_t sr_images_count; struct dyld_uuid_info_64 *sr_images; }; -extern kern_return_t vm_shared_region_slide_page(vm_shared_region_slide_info_t si, - vm_offset_t vaddr, - mach_vm_offset_t uservaddr, - uint32_t pageIndex); -extern vm_shared_region_slide_info_t vm_shared_region_get_slide_info(vm_shared_region_t sr); +extern kern_return_t vm_shared_region_slide_page( + vm_shared_region_slide_info_t si, + vm_offset_t vaddr, + mach_vm_offset_t uservaddr, + uint32_t pageIndex, + uint64_t jop_key); +extern uint64_t shared_region_find_key(char *shared_region_id); #else /* !MACH_KERNEL_PRIVATE */ struct vm_shared_region; @@ -235,6 +247,17 @@ struct slide_info_entry_toc; #endif /* MACH_KERNEL_PRIVATE */ +struct _sr_file_mappings { + int fd; + uint32_t mappings_count; + struct shared_file_mapping_slide_np *mappings; + uint32_t slide; + struct fileproc *fp; + struct vnode *vp; + memory_object_size_t file_size; + memory_object_control_t file_control; +}; + extern void vm_shared_region_init(void); extern kern_return_t vm_shared_region_enter( struct _vm_map *map, @@ -242,7 +265,8 @@ extern kern_return_t vm_shared_region_enter( boolean_t is_64bit, void *fsroot, cpu_type_t cpu, - cpu_subtype_t cpu_subtype); + cpu_subtype_t cpu_subtype, + boolean_t reslide); extern kern_return_t vm_shared_region_remove( struct _vm_map *map, struct task *task); @@ -260,8 +284,6 @@ extern ipc_port_t vm_shared_region_mem_entry( struct vm_shared_region *shared_region); extern vm_map_t vm_shared_region_vm_map( struct vm_shared_region *shared_region); -extern uint32_t vm_shared_region_get_slide( - vm_shared_region_t shared_region); extern void vm_shared_region_set( struct task *task, struct vm_shared_region *new_shared_region); @@ -269,28 +291,24 @@ extern vm_shared_region_t vm_shared_region_lookup( void *root_dir, cpu_type_t cpu, cpu_subtype_t cpu_subtype, - boolean_t is_64bit); + boolean_t is_64bit, + boolean_t reslide); extern kern_return_t vm_shared_region_start_address( struct vm_shared_region *shared_region, mach_vm_offset_t *start_address); extern void vm_shared_region_undo_mappings( vm_map_t sr_map, mach_vm_offset_t sr_base_address, - struct shared_file_mapping_np *mappings, + struct _sr_file_mappings *srf_mappings, + struct _sr_file_mappings *srf_mappings_count, unsigned int mappings_count); +__attribute__((noinline)) extern kern_return_t vm_shared_region_map_file( struct vm_shared_region *shared_region, - unsigned int mappings_count, - struct shared_file_mapping_np *mappings, - memory_object_control_t file_control, - memory_object_size_t file_size, void *root_dir, - uint32_t slide, - user_addr_t slide_start, - user_addr_t slide_size); + int sr_mappings_count, + struct _sr_file_mappings *sr_mappings); extern kern_return_t vm_shared_region_sliding_valid(uint32_t slide); -extern kern_return_t vm_shared_region_slide_sanity_check(vm_shared_region_t sr); -extern void* vm_shared_region_get_slide_info_entry(vm_shared_region_t sr); extern void vm_commpage_init(void); extern void vm_commpage_text_init(void); extern kern_return_t vm_commpage_enter( @@ -306,7 +324,15 @@ int vm_shared_region_slide(uint32_t, mach_vm_offset_t, mach_vm_size_t, mach_vm_offset_t, - memory_object_control_t); + memory_object_control_t, + vm_prot_t); +extern void vm_shared_region_pivot(void); +extern void vm_shared_region_reslide_stale(void); +#if __has_feature(ptrauth_calls) +__attribute__((noinline)) +extern kern_return_t vm_shared_region_auth_remap(vm_shared_region_t sr); +#endif /* __has_feature(ptrauth_calls) */ +extern void vm_shared_region_reference(vm_shared_region_t sr); #endif /* KERNEL_PRIVATE */ diff --git a/osfmk/vm/vm_shared_region_pager.c b/osfmk/vm/vm_shared_region_pager.c index 35cd0e817..4c0025051 100644 --- a/osfmk/vm/vm_shared_region_pager.c +++ b/osfmk/vm/vm_shared_region_pager.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2018-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -54,10 +54,13 @@ #include #include #include -#include #include #include +#if __has_feature(ptrauth_calls) +#include +extern boolean_t diversify_user_jop; +#endif /* __has_feature(ptrauth_calls) */ /* * SHARED REGION MEMORY PAGER @@ -133,22 +136,168 @@ const struct memory_object_pager_ops shared_region_pager_ops = { .memory_object_pager_name = "shared_region" }; +#if __has_feature(ptrauth_calls) +/* + * Track mappings between shared_region_id and the key used to sign + * authenticated pointers. + */ +typedef struct shared_region_jop_key_map { + queue_chain_t srk_queue; + char *srk_shared_region_id; + uint64_t srk_jop_key; + os_refcnt_t srk_ref_count; /* count of tasks active with this shared_region_id */ +} *shared_region_jop_key_map_t; + +os_refgrp_decl(static, srk_refgrp, "shared region key ref cnts", NULL); + +/* + * The list is protected by the "shared_region_key_map" lock. + */ +int shared_region_key_count = 0; /* number of active shared_region_id keys */ +queue_head_t shared_region_jop_key_queue = QUEUE_HEAD_INITIALIZER(shared_region_jop_key_queue); +LCK_GRP_DECLARE(shared_region_jop_key_lck_grp, "shared_region_jop_key"); +LCK_MTX_DECLARE(shared_region_jop_key_lock, &shared_region_jop_key_lck_grp); + +/* + * Find the pointer signing key for the give shared_region_id. + */ +uint64_t +shared_region_find_key(char *shared_region_id) +{ + shared_region_jop_key_map_t region; + uint64_t key; + + lck_mtx_lock(&shared_region_jop_key_lock); + queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) { + if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) { + goto found; + } + } + panic("shared_region_find_key() no key for region '%s'", shared_region_id); + +found: + key = region->srk_jop_key; + lck_mtx_unlock(&shared_region_jop_key_lock); + return key; +} + +/* + * Return a authentication key to use for the given shared_region_id. + * If inherit is TRUE, then the key must match inherited_key. + * Creates an additional reference when successful. + */ +void +shared_region_key_alloc(char *shared_region_id, bool inherit, uint64_t inherited_key) +{ + shared_region_jop_key_map_t region; + shared_region_jop_key_map_t new = NULL; + + assert(shared_region_id != NULL); +again: + lck_mtx_lock(&shared_region_jop_key_lock); + queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) { + if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) { + os_ref_retain_locked(®ion->srk_ref_count); + goto done; + } + } + + /* + * ID was not found, if first time, allocate a new one and redo the lookup. + */ + if (new == NULL) { + lck_mtx_unlock(&shared_region_jop_key_lock); + new = kalloc(sizeof *new); + uint_t len = strlen(shared_region_id) + 1; + new->srk_shared_region_id = kheap_alloc(KHEAP_DATA_BUFFERS, len, Z_WAITOK); + strlcpy(new->srk_shared_region_id, shared_region_id, len); + os_ref_init(&new->srk_ref_count, &srk_refgrp); + + if (diversify_user_jop && inherit) { + new->srk_jop_key = inherited_key; + } else if (diversify_user_jop && strlen(shared_region_id) > 0) { + new->srk_jop_key = generate_jop_key(); + } else { + new->srk_jop_key = ml_default_jop_pid(); + } + + goto again; + } + + /* + * Use the newly allocated entry + */ + ++shared_region_key_count; + queue_enter_first(&shared_region_jop_key_queue, new, shared_region_jop_key_map_t, srk_queue); + region = new; + new = NULL; + +done: + if (inherit && inherited_key != region->srk_jop_key) { + panic("shared_region_key_alloc() inherited key mismatch"); + } + lck_mtx_unlock(&shared_region_jop_key_lock); + + /* + * free any unused new entry + */ + if (new != NULL) { + kheap_free(KHEAP_DATA_BUFFERS, new->srk_shared_region_id, strlen(new->srk_shared_region_id) + 1); + kfree(new, sizeof *new); + } +} + +/* + * Mark the end of using a shared_region_id's key + */ +extern void +shared_region_key_dealloc(char *shared_region_id) +{ + shared_region_jop_key_map_t region; + + assert(shared_region_id != NULL); + lck_mtx_lock(&shared_region_jop_key_lock); + queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) { + if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) { + goto done; + } + } + panic("shared_region_key_dealloc() Shared region ID '%s' not found", shared_region_id); + +done: + if (os_ref_release_locked(®ion->srk_ref_count) == 0) { + queue_remove(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue); + --shared_region_key_count; + } else { + region = NULL; + } + lck_mtx_unlock(&shared_region_jop_key_lock); + + if (region != NULL) { + kheap_free(KHEAP_DATA_BUFFERS, region->srk_shared_region_id, strlen(region->srk_shared_region_id) + 1); + kfree(region, sizeof *region); + } +} +#endif /* __has_feature(ptrauth_calls) */ + /* * The "shared_region_pager" describes a memory object backed by * the "shared_region" EMM. */ typedef struct shared_region_pager { - /* mandatory generic header */ - struct memory_object sc_pgr_hdr; + struct memory_object srp_header; /* mandatory generic header */ /* pager-specific data */ - queue_chain_t pager_queue; /* next & prev pagers */ - unsigned int ref_count; /* reference count */ - boolean_t is_ready; /* is this pager ready ? */ - boolean_t is_mapped; /* is this mem_obj mapped ? */ - vm_object_t backing_object; /* VM obj for shared cache */ - vm_object_offset_t backing_offset; - struct vm_shared_region_slide_info *scp_slide_info; + queue_chain_t srp_queue; /* next & prev pagers */ + uint32_t srp_ref_count; /* active uses */ + bool srp_is_mapped; /* has active mappings */ + bool srp_is_ready; /* is this pager ready? */ + vm_object_t srp_backing_object; /* VM object for shared cache */ + vm_object_offset_t srp_backing_offset; + vm_shared_region_slide_info_t srp_slide_info; +#if __has_feature(ptrauth_calls) + uint64_t srp_jop_key; /* zero if used for arm64 */ +#endif /* __has_feature(ptrauth_calls) */ } *shared_region_pager_t; #define SHARED_REGION_PAGER_NULL ((shared_region_pager_t) NULL) @@ -158,8 +307,9 @@ typedef struct shared_region_pager { */ int shared_region_pager_count = 0; /* number of pagers */ int shared_region_pager_count_mapped = 0; /* number of unmapped pagers */ -queue_head_t shared_region_pager_queue; -decl_lck_mtx_data(, shared_region_pager_lock); +queue_head_t shared_region_pager_queue = QUEUE_HEAD_INITIALIZER(shared_region_pager_queue); +LCK_GRP_DECLARE(shared_region_pager_lck_grp, "shared_region_pager"); +LCK_MTX_DECLARE(shared_region_pager_lock, &shared_region_pager_lck_grp); /* * Maximum number of unmapped pagers we're willing to keep around. @@ -174,21 +324,12 @@ int shared_region_pager_count_unmapped_max = 0; int shared_region_pager_num_trim_max = 0; int shared_region_pager_num_trim_total = 0; - -lck_grp_t shared_region_pager_lck_grp; -lck_grp_attr_t shared_region_pager_lck_grp_attr; -lck_attr_t shared_region_pager_lck_attr; - uint64_t shared_region_pager_copied = 0; uint64_t shared_region_pager_slid = 0; uint64_t shared_region_pager_slid_error = 0; uint64_t shared_region_pager_reclaimed = 0; /* internal prototypes */ -shared_region_pager_t shared_region_pager_create( - vm_object_t backing_object, - vm_object_offset_t backing_offset, - struct vm_shared_region_slide_info *slide_info); shared_region_pager_t shared_region_pager_lookup(memory_object_t mem_obj); void shared_region_pager_dequeue(shared_region_pager_t pager); void shared_region_pager_deallocate_internal(shared_region_pager_t pager, @@ -213,17 +354,6 @@ int shared_region_pagerdebug = 0; #define PAGER_DEBUG(LEVEL, A) #endif - -void -shared_region_pager_bootstrap(void) -{ - lck_grp_attr_setdefault(&shared_region_pager_lck_grp_attr); - lck_grp_init(&shared_region_pager_lck_grp, "shared_region", &shared_region_pager_lck_grp_attr); - lck_attr_setdefault(&shared_region_pager_lck_attr); - lck_mtx_init(&shared_region_pager_lock, &shared_region_pager_lck_grp, &shared_region_pager_lck_attr); - queue_init(&shared_region_pager_queue); -} - /* * shared_region_pager_init() * @@ -254,7 +384,7 @@ shared_region_pager_init( memory_object_control_reference(control); - pager->sc_pgr_hdr.mo_control = control; + pager->srp_header.mo_control = control; attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ @@ -383,15 +513,16 @@ shared_region_pager_data_request( interruptible = fault_info.interruptible; pager = shared_region_pager_lookup(mem_obj); - assert(pager->is_ready); - assert(pager->ref_count > 1); /* pager is alive and mapped */ + assert(pager->srp_is_ready); + assert(pager->srp_ref_count > 1); /* pager is alive */ + assert(pager->srp_is_mapped); /* pager is mapped */ PAGER_DEBUG(PAGER_PAGEIN, ("shared_region_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager)); /* * Gather in a UPL all the VM pages requested by VM. */ - mo_control = pager->sc_pgr_hdr.mo_control; + mo_control = pager->srp_header.mo_control; upl_size = length; upl_flags = @@ -416,14 +547,14 @@ shared_region_pager_data_request( * backing VM object (itself backed by the shared cache file via * the vnode pager). */ - src_top_object = pager->backing_object; + src_top_object = pager->srp_backing_object; assert(src_top_object != VM_OBJECT_NULL); vm_object_reference(src_top_object); /* keep the source object alive */ - slide_start_address = pager->scp_slide_info->slid_address; + slide_start_address = pager->srp_slide_info->si_slid_address; - fault_info.lo_offset += pager->backing_offset; - fault_info.hi_offset += pager->backing_offset; + fault_info.lo_offset += pager->srp_backing_offset; + fault_info.hi_offset += pager->srp_backing_offset; /* * Fill in the contents of the pages requested by VM. @@ -452,7 +583,7 @@ retry_src_fault: prot = VM_PROT_READ; src_page = VM_PAGE_NULL; kr = vm_fault_page(src_top_object, - pager->backing_offset + offset + cur_offset, + pager->srp_backing_offset + offset + cur_offset, VM_PROT_READ, FALSE, FALSE, /* src_page not looked up */ @@ -473,7 +604,7 @@ retry_src_fault: if (vm_page_wait(interruptible)) { goto retry_src_fault; } - /* fall thru */ + OS_FALLTHROUGH; case VM_FAULT_INTERRUPTED: retval = MACH_SEND_INTERRUPTED; goto done; @@ -481,7 +612,7 @@ retry_src_fault: /* success but no VM page: fail */ vm_object_paging_end(src_top_object); vm_object_unlock(src_top_object); - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case VM_FAULT_MEMORY_ERROR: /* the page is not there ! */ if (error_code) { @@ -526,7 +657,7 @@ retry_src_fault: */ if (src_page_object->code_signed) { vm_page_validate_cs_mapped( - src_page, + src_page, PAGE_SIZE, 0, (const void *) src_vaddr); } /* @@ -570,24 +701,25 @@ retry_src_fault: PAGE_SIZE_FOR_SR_SLIDE); offset_in_backing_object = (chunk_offset + - pager->backing_offset); - if ((offset_in_backing_object < pager->scp_slide_info->start) || - (offset_in_backing_object >= pager->scp_slide_info->end)) { + pager->srp_backing_offset); + if ((offset_in_backing_object < pager->srp_slide_info->si_start) || + (offset_in_backing_object >= pager->srp_slide_info->si_end)) { /* chunk is outside of sliding range: done */ shared_region_pager_copied++; continue; } - offset_in_sliding_range = - (offset_in_backing_object - - pager->scp_slide_info->start); - kr = vm_shared_region_slide_page( - pager->scp_slide_info, - dst_vaddr + offset_in_page, - (mach_vm_offset_t) (offset_in_sliding_range + - slide_start_address), - (uint32_t) (offset_in_sliding_range / - PAGE_SIZE_FOR_SR_SLIDE)); + offset_in_sliding_range = offset_in_backing_object - pager->srp_slide_info->si_start; + kr = vm_shared_region_slide_page(pager->srp_slide_info, + dst_vaddr + offset_in_page, + (mach_vm_offset_t) (offset_in_sliding_range + slide_start_address), + (uint32_t) (offset_in_sliding_range / PAGE_SIZE_FOR_SR_SLIDE), +#if __has_feature(ptrauth_calls) + pager->srp_slide_info->si_ptrauth ? pager->srp_jop_key : 0 +#else /* __has_feature(ptrauth_calls) */ + 0 +#endif /* __has_feature(ptrauth_calls) */ + ); if (shared_region_pager_data_request_debug) { printf("shared_region_data_request" "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx " @@ -605,13 +737,13 @@ retry_src_fault: (uint64_t) cur_offset, (uint64_t) offset_in_page, chunk_offset, - pager->scp_slide_info->start, - pager->scp_slide_info->end, - (pager->backing_offset + + pager->srp_slide_info->si_start, + pager->srp_slide_info->si_end, + (pager->srp_backing_offset + offset + cur_offset + offset_in_page), - pager->backing_offset, + pager->srp_backing_offset, offset, (uint64_t) cur_offset, (uint64_t) offset_in_page, @@ -670,7 +802,10 @@ done: upl_abort(upl, 0); } else { boolean_t empty; - upl_commit_range(upl, 0, upl->size, + assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size), + "upl %p offset 0x%llx size 0x%x\n", + upl, upl->u_offset, upl->u_size); + upl_commit_range(upl, 0, upl->u_size, UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, upl_pl, pl_count, &empty); } @@ -701,8 +836,8 @@ shared_region_pager_reference( pager = shared_region_pager_lookup(mem_obj); lck_mtx_lock(&shared_region_pager_lock); - assert(pager->ref_count > 0); - pager->ref_count++; + assert(pager->srp_ref_count > 0); + pager->srp_ref_count++; lck_mtx_unlock(&shared_region_pager_lock); } @@ -718,14 +853,14 @@ void shared_region_pager_dequeue( shared_region_pager_t pager) { - assert(!pager->is_mapped); + assert(!pager->srp_is_mapped); queue_remove(&shared_region_pager_queue, pager, shared_region_pager_t, - pager_queue); - pager->pager_queue.next = NULL; - pager->pager_queue.prev = NULL; + srp_queue); + pager->srp_queue.next = NULL; + pager->srp_queue.prev = NULL; shared_region_pager_count--; } @@ -747,23 +882,23 @@ void shared_region_pager_terminate_internal( shared_region_pager_t pager) { - assert(pager->is_ready); - assert(!pager->is_mapped); + assert(pager->srp_is_ready); + assert(!pager->srp_is_mapped); + assert(pager->srp_ref_count == 1); - if (pager->backing_object != VM_OBJECT_NULL) { - vm_object_deallocate(pager->backing_object); - pager->backing_object = VM_OBJECT_NULL; + if (pager->srp_backing_object != VM_OBJECT_NULL) { + vm_object_deallocate(pager->srp_backing_object); + pager->srp_backing_object = VM_OBJECT_NULL; } /* trigger the destruction of the memory object */ - memory_object_destroy(pager->sc_pgr_hdr.mo_control, 0); + memory_object_destroy(pager->srp_header.mo_control, 0); } /* * shared_region_pager_deallocate_internal() * - * Release a reference on this pager and free it when the last - * reference goes away. - * Can be called with shared_region_pager_lock held or not but always returns + * Release a reference on this pager and free it when the last reference goes away. + * Can be called with shared_region_pager_lock held or not, but always returns * with it unlocked. */ void @@ -778,19 +913,15 @@ shared_region_pager_deallocate_internal( lck_mtx_lock(&shared_region_pager_lock); } - count_unmapped = (shared_region_pager_count - - shared_region_pager_count_mapped); - if (count_unmapped > shared_region_pager_cache_limit) { - /* we have too many unmapped pagers: trim some */ - needs_trimming = TRUE; - } else { - needs_trimming = FALSE; - } + /* if we have too many unmapped pagers, trim some */ + count_unmapped = shared_region_pager_count - shared_region_pager_count_mapped; + needs_trimming = (count_unmapped > shared_region_pager_cache_limit); /* drop a reference on this pager */ - pager->ref_count--; + assert(pager->srp_ref_count > 0); + pager->srp_ref_count--; - if (pager->ref_count == 1) { + if (pager->srp_ref_count == 1) { /* * Only the "named" reference is left, which means that * no one is really holding on to this pager anymore. @@ -800,16 +931,40 @@ shared_region_pager_deallocate_internal( /* the pager is all ours: no need for the lock now */ lck_mtx_unlock(&shared_region_pager_lock); shared_region_pager_terminate_internal(pager); - } else if (pager->ref_count == 0) { + } else if (pager->srp_ref_count == 0) { /* * Dropped the existence reference; the memory object has * been terminated. Do some final cleanup and release the * pager structure. */ lck_mtx_unlock(&shared_region_pager_lock); - if (pager->sc_pgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) { - memory_object_control_deallocate(pager->sc_pgr_hdr.mo_control); - pager->sc_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; + + vm_shared_region_slide_info_t si = pager->srp_slide_info; +#if __has_feature(ptrauth_calls) + /* + * The slide_info for auth sections lives in the shared region. + * Just deallocate() on the shared region and clear the field. + */ + if (si != NULL) { + if (si->si_shared_region != NULL) { + assert(si->si_ptrauth); + vm_shared_region_deallocate(si->si_shared_region); + pager->srp_slide_info = NULL; + si = NULL; + } + } +#endif /* __has_feature(ptrauth_calls) */ + if (si != NULL) { + vm_object_deallocate(si->si_slide_object); + /* free the slide_info_entry */ + kheap_free(KHEAP_DATA_BUFFERS, si->si_slide_info_entry, si->si_slide_info_size); + kfree(si, sizeof *si); + pager->srp_slide_info = NULL; + } + + if (pager->srp_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) { + memory_object_control_deallocate(pager->srp_header.mo_control); + pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL; } kfree(pager, sizeof(*pager)); pager = SHARED_REGION_PAGER_NULL; @@ -861,9 +1016,9 @@ shared_region_pager_terminate( */ kern_return_t shared_region_pager_synchronize( - __unused memory_object_t mem_obj, + __unused memory_object_t mem_obj, __unused memory_object_offset_t offset, - __unused memory_object_size_t length, + __unused memory_object_size_t length, __unused vm_sync_t sync_flags) { panic("shared_region_pager_synchronize: memory_object_synchronize no longer supported\n"); @@ -875,8 +1030,8 @@ shared_region_pager_synchronize( * * This allows VM to let us, the EMM, know that this memory object * is currently mapped one or more times. This is called by VM each time - * the memory object gets mapped and we take one extra reference on the - * memory object to account for all its mappings. + * the memory object gets mapped, but we only take one extra reference the + * first time it is called. */ kern_return_t shared_region_pager_map( @@ -890,16 +1045,11 @@ shared_region_pager_map( pager = shared_region_pager_lookup(mem_obj); lck_mtx_lock(&shared_region_pager_lock); - assert(pager->is_ready); - assert(pager->ref_count > 0); /* pager is alive */ - if (pager->is_mapped == FALSE) { - /* - * First mapping of this pager: take an extra reference - * that will remain until all the mappings of this pager - * are removed. - */ - pager->is_mapped = TRUE; - pager->ref_count++; + assert(pager->srp_is_ready); + assert(pager->srp_ref_count > 0); /* pager is alive */ + if (!pager->srp_is_mapped) { + pager->srp_is_mapped = TRUE; + pager->srp_ref_count++; shared_region_pager_count_mapped++; } lck_mtx_unlock(&shared_region_pager_lock); @@ -925,18 +1075,17 @@ shared_region_pager_last_unmap( pager = shared_region_pager_lookup(mem_obj); lck_mtx_lock(&shared_region_pager_lock); - if (pager->is_mapped) { + if (pager->srp_is_mapped) { /* * All the mappings are gone, so let go of the one extra * reference that represents all the mappings of this pager. */ shared_region_pager_count_mapped--; - count_unmapped = (shared_region_pager_count - - shared_region_pager_count_mapped); + count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped); if (count_unmapped > shared_region_pager_count_unmapped_max) { shared_region_pager_count_unmapped_max = count_unmapped; } - pager->is_mapped = FALSE; + pager->srp_is_mapped = FALSE; shared_region_pager_deallocate_internal(pager, TRUE); /* caution: deallocate_internal() released the lock ! */ } else { @@ -958,15 +1107,23 @@ shared_region_pager_lookup( assert(mem_obj->mo_pager_ops == &shared_region_pager_ops); pager = (shared_region_pager_t)(uintptr_t) mem_obj; - assert(pager->ref_count > 0); + assert(pager->srp_ref_count > 0); return pager; } -shared_region_pager_t +/* + * Create and return a pager for the given object with the + * given slide information. + */ +static shared_region_pager_t shared_region_pager_create( vm_object_t backing_object, vm_object_offset_t backing_offset, - struct vm_shared_region_slide_info *slide_info) + struct vm_shared_region_slide_info *slide_info, +#if !__has_feature(ptrauth_calls) + __unused +#endif /* !__has_feature(ptrauth_calls) */ + uint64_t jop_key) { shared_region_pager_t pager; memory_object_control_t control; @@ -985,17 +1142,28 @@ shared_region_pager_create( * we reserve the first word in the object for a fake ip_kotype * setting - that will tell vm_map to use it as a memory object. */ - pager->sc_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT; - pager->sc_pgr_hdr.mo_pager_ops = &shared_region_pager_ops; - pager->sc_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; - - pager->is_ready = FALSE;/* not ready until it has a "name" */ - pager->ref_count = 1; /* existence reference (for the cache) */ - pager->ref_count++; /* for the caller */ - pager->is_mapped = FALSE; - pager->backing_object = backing_object; - pager->backing_offset = backing_offset; - pager->scp_slide_info = slide_info; + pager->srp_header.mo_ikot = IKOT_MEMORY_OBJECT; + pager->srp_header.mo_pager_ops = &shared_region_pager_ops; + pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL; + + pager->srp_is_ready = FALSE;/* not ready until it has a "name" */ + pager->srp_ref_count = 1; /* existence reference (for the cache) */ + pager->srp_ref_count++; /* for the caller */ + pager->srp_is_mapped = FALSE; + pager->srp_backing_object = backing_object; + pager->srp_backing_offset = backing_offset; + pager->srp_slide_info = slide_info; +#if __has_feature(ptrauth_calls) + pager->srp_jop_key = jop_key; + /* + * If we're getting slide_info from the shared_region, + * take a reference, so it can't disappear from under us. + */ + if (slide_info->si_shared_region) { + assert(slide_info->si_ptrauth); + vm_shared_region_reference(slide_info->si_shared_region); + } +#endif /* __has_feature(ptrauth_calls) */ vm_object_reference(backing_object); @@ -1004,7 +1172,7 @@ shared_region_pager_create( queue_enter_first(&shared_region_pager_queue, pager, shared_region_pager_t, - pager_queue); + srp_queue); shared_region_pager_count++; if (shared_region_pager_count > shared_region_pager_count_max) { shared_region_pager_count_max = shared_region_pager_count; @@ -1020,7 +1188,7 @@ shared_region_pager_create( lck_mtx_lock(&shared_region_pager_lock); /* the new pager is now ready to be used */ - pager->is_ready = TRUE; + pager->srp_is_ready = TRUE; object = memory_object_to_vm_object((memory_object_t) pager); assert(object); /* @@ -1032,7 +1200,7 @@ shared_region_pager_create( lck_mtx_unlock(&shared_region_pager_lock); /* wakeup anyone waiting for this pager to be ready */ - thread_wakeup(&pager->is_ready); + thread_wakeup(&pager->srp_is_ready); return pager; } @@ -1047,25 +1215,24 @@ memory_object_t shared_region_pager_setup( vm_object_t backing_object, vm_object_offset_t backing_offset, - struct vm_shared_region_slide_info *slide_info) + struct vm_shared_region_slide_info *slide_info, + uint64_t jop_key) { shared_region_pager_t pager; /* create new pager */ - pager = shared_region_pager_create( - backing_object, - backing_offset, - slide_info); + pager = shared_region_pager_create(backing_object, + backing_offset, slide_info, jop_key); if (pager == SHARED_REGION_PAGER_NULL) { /* could not create a new pager */ return MEMORY_OBJECT_NULL; } lck_mtx_lock(&shared_region_pager_lock); - while (!pager->is_ready) { + while (!pager->srp_is_ready) { lck_mtx_sleep(&shared_region_pager_lock, LCK_SLEEP_DEFAULT, - &pager->is_ready, + &pager->srp_is_ready, THREAD_UNINT); } lck_mtx_unlock(&shared_region_pager_lock); @@ -1073,6 +1240,83 @@ shared_region_pager_setup( return (memory_object_t) pager; } +#if __has_feature(ptrauth_calls) +/* + * shared_region_pager_match() + * + * Provide the caller with a memory object backed by the provided + * "backing_object" VM object. + */ +memory_object_t +shared_region_pager_match( + vm_object_t backing_object, + vm_object_offset_t backing_offset, + vm_shared_region_slide_info_t slide_info, + uint64_t jop_key) +{ + shared_region_pager_t pager; + vm_shared_region_slide_info_t si; + + lck_mtx_lock(&shared_region_pager_lock); + queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) { + if (pager->srp_backing_object != backing_object->copy) { + continue; + } + if (pager->srp_backing_offset != backing_offset) { + continue; + } + si = pager->srp_slide_info; + + /* If there's no AUTH section then it can't match (slide_info is always !NULL) */ + if (!si->si_ptrauth) { + continue; + } + if (pager->srp_jop_key != jop_key) { + continue; + } + if (si->si_slide != slide_info->si_slide) { + continue; + } + if (si->si_start != slide_info->si_start) { + continue; + } + if (si->si_end != slide_info->si_end) { + continue; + } + if (si->si_slide_object != slide_info->si_slide_object) { + continue; + } + if (si->si_slide_info_size != slide_info->si_slide_info_size) { + continue; + } + if (memcmp(si->si_slide_info_entry, slide_info->si_slide_info_entry, si->si_slide_info_size) != 0) { + continue; + } + ++pager->srp_ref_count; /* the caller expects a reference on this */ + lck_mtx_unlock(&shared_region_pager_lock); + return (memory_object_t)pager; + } + + /* + * We didn't find a pre-existing pager, so create one. + * + * Note slight race condition here since we drop the lock. This could lead to more than one + * thread calling setup with the same arguments here. That shouldn't break anything, just + * waste a little memory. + */ + lck_mtx_unlock(&shared_region_pager_lock); + return shared_region_pager_setup(backing_object->copy, backing_offset, slide_info, jop_key); +} + +void +shared_region_pager_match_task_key(memory_object_t memobj, __unused task_t task) +{ + __unused shared_region_pager_t pager = (shared_region_pager_t)memobj; + + assert(pager->srp_jop_key == task->jop_pid); +} +#endif /* __has_feature(ptrauth_calls) */ + void shared_region_pager_trim(void) { @@ -1090,18 +1334,15 @@ shared_region_pager_trim(void) queue_init(&trim_queue); num_trim = 0; - for (pager = (shared_region_pager_t) - queue_last(&shared_region_pager_queue); - !queue_end(&shared_region_pager_queue, - (queue_entry_t) pager); + for (pager = (shared_region_pager_t)queue_last(&shared_region_pager_queue); + !queue_end(&shared_region_pager_queue, (queue_entry_t) pager); pager = prev_pager) { /* get prev elt before we dequeue */ - prev_pager = (shared_region_pager_t) - queue_prev(&pager->pager_queue); + prev_pager = (shared_region_pager_t)queue_prev(&pager->srp_queue); - if (pager->ref_count == 2 && - pager->is_ready && - !pager->is_mapped) { + if (pager->srp_ref_count == 2 && + pager->srp_is_ready && + !pager->srp_is_mapped) { /* this pager can be trimmed */ num_trim++; /* remove this pager from the main list ... */ @@ -1110,12 +1351,11 @@ shared_region_pager_trim(void) queue_enter_first(&trim_queue, pager, shared_region_pager_t, - pager_queue); + srp_queue); - count_unmapped = (shared_region_pager_count - - shared_region_pager_count_mapped); + /* do we have enough pagers to trim? */ + count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped); if (count_unmapped <= shared_region_pager_cache_limit) { - /* we have enough pagers to trim */ break; } } @@ -1132,16 +1372,16 @@ shared_region_pager_trim(void) queue_remove_first(&trim_queue, pager, shared_region_pager_t, - pager_queue); - pager->pager_queue.next = NULL; - pager->pager_queue.prev = NULL; - assert(pager->ref_count == 2); + srp_queue); + pager->srp_queue.next = NULL; + pager->srp_queue.prev = NULL; + assert(pager->srp_ref_count == 2); /* * We can't call deallocate_internal() because the pager * has already been dequeued, but we still need to remove * a reference. */ - pager->ref_count--; + pager->srp_ref_count--; shared_region_pager_terminate_internal(pager); } } diff --git a/osfmk/vm/vm_swapfile_pager.c b/osfmk/vm/vm_swapfile_pager.c index 8ebc2d3e1..39526d9b6 100644 --- a/osfmk/vm/vm_swapfile_pager.c +++ b/osfmk/vm/vm_swapfile_pager.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2008-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -152,20 +152,15 @@ typedef struct swapfile_pager { * The list is protected by the "swapfile_pager_lock" lock. */ int swapfile_pager_count = 0; /* number of pagers */ -queue_head_t swapfile_pager_queue; -decl_lck_mtx_data(, swapfile_pager_lock); +queue_head_t swapfile_pager_queue = QUEUE_HEAD_INITIALIZER(swapfile_pager_queue); +LCK_GRP_DECLARE(swapfile_pager_lck_grp, "swapfile pager"); +LCK_MTX_DECLARE(swapfile_pager_lock, &swapfile_pager_lck_grp); /* * Statistics & counters. */ int swapfile_pager_count_max = 0; - -lck_grp_t swapfile_pager_lck_grp; -lck_grp_attr_t swapfile_pager_lck_grp_attr; -lck_attr_t swapfile_pager_lck_attr; - - /* internal prototypes */ swapfile_pager_t swapfile_pager_create(struct vnode *vp); swapfile_pager_t swapfile_pager_lookup(memory_object_t mem_obj); @@ -192,16 +187,6 @@ int swapfile_pagerdebug = 0; #endif -void -swapfile_pager_bootstrap(void) -{ - lck_grp_attr_setdefault(&swapfile_pager_lck_grp_attr); - lck_grp_init(&swapfile_pager_lck_grp, "swapfile pager", &swapfile_pager_lck_grp_attr); - lck_attr_setdefault(&swapfile_pager_lck_attr); - lck_mtx_init(&swapfile_pager_lock, &swapfile_pager_lck_grp, &swapfile_pager_lck_attr); - queue_init(&swapfile_pager_queue); -} - /* * swapfile_pager_init() * @@ -454,7 +439,10 @@ done: upl_abort(upl, 0); } else { boolean_t empty; - upl_commit_range(upl, 0, upl->size, + assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size), + "upl %p offset 0x%llx size 0x%x", + upl, upl->u_offset, upl->u_size); + upl_commit_range(upl, 0, upl->u_size, UPL_COMMIT_CS_VALIDATED, upl_pl, pl_count, &empty); } diff --git a/osfmk/vm/vm_tests.c b/osfmk/vm/vm_tests.c new file mode 100644 index 000000000..cda7e0ba1 --- /dev/null +++ b/osfmk/vm/vm_tests.c @@ -0,0 +1,962 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +extern kern_return_t +vm_map_copy_adjust_to_target( + vm_map_copy_t copy_map, + vm_map_offset_t offset, + vm_map_size_t size, + vm_map_t target_map, + boolean_t copy, + vm_map_copy_t *target_copy_map_p, + vm_map_offset_t *overmap_start_p, + vm_map_offset_t *overmap_end_p, + vm_map_offset_t *trimmed_start_p); + +#define VM_TEST_COLLAPSE_COMPRESSOR 0 +#define VM_TEST_WIRE_AND_EXTRACT 0 +#define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0 +#if __arm64__ +#define VM_TEST_KERNEL_OBJECT_FAULT 0 +#endif /* __arm64__ */ +#define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG) + +#if VM_TEST_COLLAPSE_COMPRESSOR +extern boolean_t vm_object_collapse_compressor_allowed; +#include +static void +vm_test_collapse_compressor(void) +{ + vm_object_size_t backing_size, top_size; + vm_object_t backing_object, top_object; + vm_map_offset_t backing_offset, top_offset; + unsigned char *backing_address, *top_address; + kern_return_t kr; + + printf("VM_TEST_COLLAPSE_COMPRESSOR:\n"); + + /* create backing object */ + backing_size = 15 * PAGE_SIZE; + backing_object = vm_object_allocate(backing_size); + assert(backing_object != VM_OBJECT_NULL); + printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n", + backing_object); + /* map backing object */ + backing_offset = 0; + kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0, + VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, + backing_object, 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); + assert(kr == KERN_SUCCESS); + backing_address = (unsigned char *) backing_offset; + printf("VM_TEST_COLLAPSE_COMPRESSOR: " + "mapped backing object %p at 0x%llx\n", + backing_object, (uint64_t) backing_offset); + /* populate with pages to be compressed in backing object */ + backing_address[0x1 * PAGE_SIZE] = 0xB1; + backing_address[0x4 * PAGE_SIZE] = 0xB4; + backing_address[0x7 * PAGE_SIZE] = 0xB7; + backing_address[0xa * PAGE_SIZE] = 0xBA; + backing_address[0xd * PAGE_SIZE] = 0xBD; + printf("VM_TEST_COLLAPSE_COMPRESSOR: " + "populated pages to be compressed in " + "backing_object %p\n", backing_object); + /* compress backing object */ + vm_object_pageout(backing_object); + printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n", + backing_object); + /* wait for all the pages to be gone */ + while (*(volatile int *)&backing_object->resident_page_count != 0) { + IODelay(10); + } + printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n", + backing_object); + /* populate with pages to be resident in backing object */ + backing_address[0x0 * PAGE_SIZE] = 0xB0; + backing_address[0x3 * PAGE_SIZE] = 0xB3; + backing_address[0x6 * PAGE_SIZE] = 0xB6; + backing_address[0x9 * PAGE_SIZE] = 0xB9; + backing_address[0xc * PAGE_SIZE] = 0xBC; + printf("VM_TEST_COLLAPSE_COMPRESSOR: " + "populated pages to be resident in " + "backing_object %p\n", backing_object); + /* leave the other pages absent */ + /* mess with the paging_offset of the backing_object */ + assert(backing_object->paging_offset == 0); + backing_object->paging_offset = 3 * PAGE_SIZE; + + /* create top object */ + top_size = 9 * PAGE_SIZE; + top_object = vm_object_allocate(top_size); + assert(top_object != VM_OBJECT_NULL); + printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n", + top_object); + /* map top object */ + top_offset = 0; + kr = vm_map_enter(kernel_map, &top_offset, top_size, 0, + VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, + top_object, 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); + assert(kr == KERN_SUCCESS); + top_address = (unsigned char *) top_offset; + printf("VM_TEST_COLLAPSE_COMPRESSOR: " + "mapped top object %p at 0x%llx\n", + top_object, (uint64_t) top_offset); + /* populate with pages to be compressed in top object */ + top_address[0x3 * PAGE_SIZE] = 0xA3; + top_address[0x4 * PAGE_SIZE] = 0xA4; + top_address[0x5 * PAGE_SIZE] = 0xA5; + printf("VM_TEST_COLLAPSE_COMPRESSOR: " + "populated pages to be compressed in " + "top_object %p\n", top_object); + /* compress top object */ + vm_object_pageout(top_object); + printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n", + top_object); + /* wait for all the pages to be gone */ + while (top_object->resident_page_count != 0) { + IODelay(10); + } + printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n", + top_object); + /* populate with pages to be resident in top object */ + top_address[0x0 * PAGE_SIZE] = 0xA0; + top_address[0x1 * PAGE_SIZE] = 0xA1; + top_address[0x2 * PAGE_SIZE] = 0xA2; + printf("VM_TEST_COLLAPSE_COMPRESSOR: " + "populated pages to be resident in " + "top_object %p\n", top_object); + /* leave the other pages absent */ + + /* link the 2 objects */ + vm_object_reference(backing_object); + top_object->shadow = backing_object; + top_object->vo_shadow_offset = 3 * PAGE_SIZE; + printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n", + top_object, backing_object); + + /* unmap backing object */ + vm_map_remove(kernel_map, + backing_offset, + backing_offset + backing_size, + VM_MAP_REMOVE_NO_FLAGS); + printf("VM_TEST_COLLAPSE_COMPRESSOR: " + "unmapped backing_object %p [0x%llx:0x%llx]\n", + backing_object, + (uint64_t) backing_offset, + (uint64_t) (backing_offset + backing_size)); + + /* collapse */ + printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object); + vm_object_lock(top_object); + vm_object_collapse(top_object, 0, FALSE); + vm_object_unlock(top_object); + printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object); + + /* did it work? */ + if (top_object->shadow != VM_OBJECT_NULL) { + printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n"); + printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); + if (vm_object_collapse_compressor_allowed) { + panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); + } + } else { + /* check the contents of the mapping */ + unsigned char expect[9] = + { 0xA0, 0xA1, 0xA2, /* resident in top */ + 0xA3, 0xA4, 0xA5, /* compressed in top */ + 0xB9, /* resident in backing + shadow_offset */ + 0xBD, /* compressed in backing + shadow_offset + paging_offset */ + 0x00 }; /* absent in both */ + unsigned char actual[9]; + unsigned int i, errors; + + errors = 0; + for (i = 0; i < sizeof(actual); i++) { + actual[i] = (unsigned char) top_address[i * PAGE_SIZE]; + if (actual[i] != expect[i]) { + errors++; + } + } + printf("VM_TEST_COLLAPSE_COMPRESSOR: " + "actual [%x %x %x %x %x %x %x %x %x] " + "expect [%x %x %x %x %x %x %x %x %x] " + "%d errors\n", + actual[0], actual[1], actual[2], actual[3], + actual[4], actual[5], actual[6], actual[7], + actual[8], + expect[0], expect[1], expect[2], expect[3], + expect[4], expect[5], expect[6], expect[7], + expect[8], + errors); + if (errors) { + panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); + } else { + printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n"); + } + } +} +#else /* VM_TEST_COLLAPSE_COMPRESSOR */ +#define vm_test_collapse_compressor() +#endif /* VM_TEST_COLLAPSE_COMPRESSOR */ + +#if VM_TEST_WIRE_AND_EXTRACT +extern ledger_template_t task_ledger_template; +#include +extern ppnum_t vm_map_get_phys_page(vm_map_t map, + vm_offset_t offset); +static void +vm_test_wire_and_extract(void) +{ + ledger_t ledger; + vm_map_t user_map, wire_map; + mach_vm_address_t user_addr, wire_addr; + mach_vm_size_t user_size, wire_size; + mach_vm_offset_t cur_offset; + vm_prot_t cur_prot, max_prot; + ppnum_t user_ppnum, wire_ppnum; + kern_return_t kr; + + ledger = ledger_instantiate(task_ledger_template, + LEDGER_CREATE_ACTIVE_ENTRIES); + user_map = vm_map_create(pmap_create_options(ledger, 0, PMAP_CREATE_64BIT), + 0x100000000ULL, + 0x200000000ULL, + TRUE); + wire_map = vm_map_create(NULL, + 0x100000000ULL, + 0x200000000ULL, + TRUE); + user_addr = 0; + user_size = 0x10000; + kr = mach_vm_allocate(user_map, + &user_addr, + user_size, + VM_FLAGS_ANYWHERE); + assert(kr == KERN_SUCCESS); + wire_addr = 0; + wire_size = user_size; + kr = mach_vm_remap(wire_map, + &wire_addr, + wire_size, + 0, + VM_FLAGS_ANYWHERE, + user_map, + user_addr, + FALSE, + &cur_prot, + &max_prot, + VM_INHERIT_NONE); + assert(kr == KERN_SUCCESS); + for (cur_offset = 0; + cur_offset < wire_size; + cur_offset += PAGE_SIZE) { + kr = vm_map_wire_and_extract(wire_map, + wire_addr + cur_offset, + VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK), + TRUE, + &wire_ppnum); + assert(kr == KERN_SUCCESS); + user_ppnum = vm_map_get_phys_page(user_map, + user_addr + cur_offset); + printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x " + "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", + kr, + user_map, user_addr + cur_offset, user_ppnum, + wire_map, wire_addr + cur_offset, wire_ppnum); + if (kr != KERN_SUCCESS || + wire_ppnum == 0 || + wire_ppnum != user_ppnum) { + panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n"); + } + } + cur_offset -= PAGE_SIZE; + kr = vm_map_wire_and_extract(wire_map, + wire_addr + cur_offset, + VM_PROT_DEFAULT, + TRUE, + &wire_ppnum); + assert(kr == KERN_SUCCESS); + printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x " + "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", + kr, + user_map, user_addr + cur_offset, user_ppnum, + wire_map, wire_addr + cur_offset, wire_ppnum); + if (kr != KERN_SUCCESS || + wire_ppnum == 0 || + wire_ppnum != user_ppnum) { + panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n"); + } + + printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n"); +} +#else /* VM_TEST_WIRE_AND_EXTRACT */ +#define vm_test_wire_and_extract() +#endif /* VM_TEST_WIRE_AND_EXTRACT */ + +#if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC +static void +vm_test_page_wire_overflow_panic(void) +{ + vm_object_t object; + vm_page_t page; + + printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n"); + + object = vm_object_allocate(PAGE_SIZE); + vm_object_lock(object); + page = vm_page_alloc(object, 0x0); + vm_page_lock_queues(); + do { + vm_page_wire(page, 1, FALSE); + } while (page->wire_count != 0); + vm_page_unlock_queues(); + vm_object_unlock(object); + panic("FBDP(%p,%p): wire_count overflow not detected\n", + object, page); +} +#else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */ +#define vm_test_page_wire_overflow_panic() +#endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */ + +#if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT +extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit); +static void +vm_test_kernel_object_fault(void) +{ + kern_return_t kr; + vm_offset_t stack; + uintptr_t frameb[2]; + int ret; + + kr = kernel_memory_allocate(kernel_map, &stack, + kernel_stack_size + (2 * PAGE_SIZE), + 0, + (KMA_KSTACK | KMA_KOBJECT | + KMA_GUARD_FIRST | KMA_GUARD_LAST), + VM_KERN_MEMORY_STACK); + if (kr != KERN_SUCCESS) { + panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr); + } + ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE); + if (ret != 0) { + printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n"); + } else { + printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n"); + } + vm_map_remove(kernel_map, + stack, + stack + kernel_stack_size + (2 * PAGE_SIZE), + VM_MAP_REMOVE_KUNWIRE); + stack = 0; +} +#else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */ +#define vm_test_kernel_object_fault() +#endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */ + +#if VM_TEST_DEVICE_PAGER_TRANSPOSE +static void +vm_test_device_pager_transpose(void) +{ + memory_object_t device_pager; + vm_object_t anon_object, device_object; + vm_size_t size; + vm_map_offset_t device_mapping; + kern_return_t kr; + + size = 3 * PAGE_SIZE; + anon_object = vm_object_allocate(size); + assert(anon_object != VM_OBJECT_NULL); + device_pager = device_pager_setup(NULL, 0, size, 0); + assert(device_pager != NULL); + device_object = memory_object_to_vm_object(device_pager); + assert(device_object != VM_OBJECT_NULL); +#if 0 + /* + * Can't actually map this, since another thread might do a + * vm_map_enter() that gets coalesced into this object, which + * would cause the test to fail. + */ + vm_map_offset_t anon_mapping = 0; + kr = vm_map_enter(kernel_map, &anon_mapping, size, 0, + VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, + anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_DEFAULT); + assert(kr == KERN_SUCCESS); +#endif + device_mapping = 0; + kr = vm_map_enter_mem_object(kernel_map, &device_mapping, size, 0, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + (void *)device_pager, 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_DEFAULT); + assert(kr == KERN_SUCCESS); + memory_object_deallocate(device_pager); + + vm_object_lock(anon_object); + vm_object_activity_begin(anon_object); + anon_object->blocked_access = TRUE; + vm_object_unlock(anon_object); + vm_object_lock(device_object); + vm_object_activity_begin(device_object); + device_object->blocked_access = TRUE; + vm_object_unlock(device_object); + + assert(anon_object->ref_count == 1); + assert(!anon_object->named); + assert(device_object->ref_count == 2); + assert(device_object->named); + + kr = vm_object_transpose(device_object, anon_object, size); + assert(kr == KERN_SUCCESS); + + vm_object_lock(anon_object); + vm_object_activity_end(anon_object); + anon_object->blocked_access = FALSE; + vm_object_unlock(anon_object); + vm_object_lock(device_object); + vm_object_activity_end(device_object); + device_object->blocked_access = FALSE; + vm_object_unlock(device_object); + + assert(anon_object->ref_count == 2); + assert(anon_object->named); +#if 0 + kr = vm_deallocate(kernel_map, anon_mapping, size); + assert(kr == KERN_SUCCESS); +#endif + assert(device_object->ref_count == 1); + assert(!device_object->named); + kr = vm_deallocate(kernel_map, device_mapping, size); + assert(kr == KERN_SUCCESS); + + printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n"); +} +#else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */ +#define vm_test_device_pager_transpose() +#endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */ + +#if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT +extern kern_return_t vm_allocate_external(vm_map_t map, + vm_offset_t *addr, + vm_size_t size, + int flags); +extern kern_return_t vm_remap_external(vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_map_t src_map, + vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance); +extern int debug4k_panic_on_misaligned_sharing; + +void vm_test_4k(void); +void +vm_test_4k(void) +{ + pmap_t test_pmap; + vm_map_t test_map; + kern_return_t kr; + vm_address_t expected_addr; + vm_address_t alloc1_addr, alloc2_addr, alloc3_addr, alloc4_addr; + vm_address_t alloc5_addr, dealloc_addr, remap_src_addr, remap_dst_addr; + vm_size_t alloc1_size, alloc2_size, alloc3_size, alloc4_size; + vm_size_t alloc5_size, remap_src_size; + vm_address_t fault_addr; + vm_prot_t cur_prot, max_prot; + int saved_debug4k_panic_on_misaligned_sharing; + + printf("\n\n\nVM_TEST_4K:%d creating 4K map...\n", __LINE__); + test_pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT | PMAP_CREATE_FORCE_4K_PAGES); + assert(test_pmap != NULL); + test_map = vm_map_create(test_pmap, + MACH_VM_MIN_ADDRESS, + MACH_VM_MAX_ADDRESS, + TRUE); + assert(test_map != VM_MAP_NULL); + vm_map_set_page_shift(test_map, FOURK_PAGE_SHIFT); + printf("VM_TEST_4K:%d map %p pmap %p page_size 0x%x\n", __LINE__, test_map, test_pmap, VM_MAP_PAGE_SIZE(test_map)); + + alloc1_addr = 0; + alloc1_size = 1 * FOURK_PAGE_SIZE; + expected_addr = 0x1000; + printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size); + kr = vm_allocate_external(test_map, + &alloc1_addr, + alloc1_size, + VM_FLAGS_ANYWHERE); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr); + printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr); + expected_addr += alloc1_size; + + printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size); + kr = vm_deallocate(test_map, alloc1_addr, alloc1_size); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr); + + alloc1_addr = 0; + alloc1_size = 1 * FOURK_PAGE_SIZE; + expected_addr = 0x1000; + printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size); + kr = vm_allocate_external(test_map, + &alloc1_addr, + alloc1_size, + VM_FLAGS_ANYWHERE); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr); + printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr); + expected_addr += alloc1_size; + + alloc2_addr = 0; + alloc2_size = 3 * FOURK_PAGE_SIZE; + printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc2_addr, alloc2_size); + kr = vm_allocate_external(test_map, + &alloc2_addr, + alloc2_size, + VM_FLAGS_ANYWHERE); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + assertf(alloc2_addr == expected_addr, "alloc2_addr = 0x%lx expected 0x%lx", alloc2_addr, expected_addr); + printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc2_addr); + expected_addr += alloc2_size; + + alloc3_addr = 0; + alloc3_size = 18 * FOURK_PAGE_SIZE; + printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc3_addr, alloc3_size); + kr = vm_allocate_external(test_map, + &alloc3_addr, + alloc3_size, + VM_FLAGS_ANYWHERE); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + assertf(alloc3_addr == expected_addr, "alloc3_addr = 0x%lx expected 0x%lx\n", alloc3_addr, expected_addr); + printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr); + expected_addr += alloc3_size; + + alloc4_addr = 0; + alloc4_size = 1 * FOURK_PAGE_SIZE; + printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc4_addr, alloc4_size); + kr = vm_allocate_external(test_map, + &alloc4_addr, + alloc4_size, + VM_FLAGS_ANYWHERE); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + assertf(alloc4_addr == expected_addr, "alloc4_addr = 0x%lx expected 0x%lx", alloc4_addr, expected_addr); + printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr); + expected_addr += alloc4_size; + + printf("VM_TEST_4K:%d vm_protect(%p, 0x%lx, 0x%lx, READ)...\n", __LINE__, test_map, alloc2_addr, (1UL * FOURK_PAGE_SIZE)); + kr = vm_protect(test_map, + alloc2_addr, + (1UL * FOURK_PAGE_SIZE), + FALSE, + VM_PROT_READ); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + + for (fault_addr = alloc1_addr; + fault_addr < alloc4_addr + alloc4_size + (2 * FOURK_PAGE_SIZE); + fault_addr += FOURK_PAGE_SIZE) { + printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr); + kr = vm_fault(test_map, + fault_addr, + VM_PROT_WRITE, + FALSE, + VM_KERN_MEMORY_NONE, + THREAD_UNINT, + NULL, + 0); + printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr); + if (fault_addr == alloc2_addr) { + assertf(kr == KERN_PROTECTION_FAILURE, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_PROTECTION_FAILURE); + printf("VM_TEST_4K:%d read fault at 0x%lx...\n", __LINE__, fault_addr); + kr = vm_fault(test_map, + fault_addr, + VM_PROT_READ, + FALSE, + VM_KERN_MEMORY_NONE, + THREAD_UNINT, + NULL, + 0); + assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS); + printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr); + } else if (fault_addr >= alloc4_addr + alloc4_size) { + assertf(kr == KERN_INVALID_ADDRESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_INVALID_ADDRESS); + } else { + assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS); + } + } + + alloc5_addr = 0; + alloc5_size = 7 * FOURK_PAGE_SIZE; + printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc5_addr, alloc5_size); + kr = vm_allocate_external(test_map, + &alloc5_addr, + alloc5_size, + VM_FLAGS_ANYWHERE); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + assertf(alloc5_addr == expected_addr, "alloc5_addr = 0x%lx expected 0x%lx", alloc5_addr, expected_addr); + printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc5_addr); + expected_addr += alloc5_size; + + dealloc_addr = vm_map_round_page(alloc5_addr, PAGE_SHIFT); + dealloc_addr += FOURK_PAGE_SIZE; + printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%x)...\n", __LINE__, test_map, dealloc_addr, FOURK_PAGE_SIZE); + kr = vm_deallocate(test_map, dealloc_addr, FOURK_PAGE_SIZE); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr); + + remap_src_addr = vm_map_round_page(alloc3_addr, PAGE_SHIFT); + remap_src_addr += FOURK_PAGE_SIZE; + remap_src_size = 2 * FOURK_PAGE_SIZE; + remap_dst_addr = 0; + printf("VM_TEST_4K:%d vm_remap(%p, 0x%lx, 0x%lx, 0x%lx, copy=0)...\n", __LINE__, test_map, remap_dst_addr, remap_src_size, remap_src_addr); + kr = vm_remap_external(test_map, + &remap_dst_addr, + remap_src_size, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + test_map, + remap_src_addr, + FALSE, /* copy */ + &cur_prot, + &max_prot, + VM_INHERIT_DEFAULT); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + assertf(remap_dst_addr == expected_addr, "remap_dst_addr = 0x%lx expected 0x%lx", remap_dst_addr, expected_addr); + printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, remap_dst_addr); + expected_addr += remap_src_size; + + for (fault_addr = remap_dst_addr; + fault_addr < remap_dst_addr + remap_src_size; + fault_addr += 4096) { + printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr); + kr = vm_fault(test_map, + fault_addr, + VM_PROT_WRITE, + FALSE, + VM_KERN_MEMORY_NONE, + THREAD_UNINT, + NULL, + 0); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr); + } + + printf("VM_TEST_4K:\n"); + remap_dst_addr = 0; + remap_src_addr = alloc3_addr + 0xc000; + remap_src_size = 0x5000; + printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map); + kr = vm_remap_external(kernel_map, + &remap_dst_addr, + remap_src_size, + 0, /* mask */ + VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, + test_map, + remap_src_addr, + FALSE, /* copy */ + &cur_prot, + &max_prot, + VM_INHERIT_DEFAULT); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + printf("VM_TEST_4K: -> remapped (shared) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr); + + printf("VM_TEST_4K:\n"); + remap_dst_addr = 0; + remap_src_addr = alloc3_addr + 0xc000; + remap_src_size = 0x5000; + printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map); + kr = vm_remap_external(kernel_map, + &remap_dst_addr, + remap_src_size, + 0, /* mask */ + VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, + test_map, + remap_src_addr, + TRUE, /* copy */ + &cur_prot, + &max_prot, + VM_INHERIT_DEFAULT); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + printf("VM_TEST_4K: -> remapped (COW) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr); + + printf("VM_TEST_4K:\n"); + saved_debug4k_panic_on_misaligned_sharing = debug4k_panic_on_misaligned_sharing; + debug4k_panic_on_misaligned_sharing = 0; + remap_dst_addr = 0; + remap_src_addr = alloc1_addr; + remap_src_size = alloc1_size + alloc2_size; + printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map); + kr = vm_remap_external(kernel_map, + &remap_dst_addr, + remap_src_size, + 0, /* mask */ + VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, + test_map, + remap_src_addr, + FALSE, /* copy */ + &cur_prot, + &max_prot, + VM_INHERIT_DEFAULT); + assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr); + printf("VM_TEST_4K: -> remap (SHARED) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr); + debug4k_panic_on_misaligned_sharing = saved_debug4k_panic_on_misaligned_sharing; + + printf("VM_TEST_4K:\n"); + remap_dst_addr = 0; + remap_src_addr = alloc1_addr; + remap_src_size = alloc1_size + alloc2_size; + printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map); + kr = vm_remap_external(kernel_map, + &remap_dst_addr, + remap_src_size, + 0, /* mask */ + VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, + test_map, + remap_src_addr, + TRUE, /* copy */ + &cur_prot, + &max_prot, + VM_INHERIT_DEFAULT); +#if 000 + assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr); + printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr); +#else /* 000 */ + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); + printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr); +#endif /* 000 */ + + +#if 00 + printf("VM_TEST_4K:%d vm_map_remove(%p, 0x%llx, 0x%llx)...\n", __LINE__, test_map, test_map->min_offset, test_map->max_offset); + kr = vm_map_remove(test_map, + test_map->min_offset, + test_map->max_offset, + VM_MAP_REMOVE_GAPS_OK); + assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); +#endif + + printf("VM_TEST_4K: PASS\n\n\n\n"); +} +#endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */ + +#if MACH_ASSERT +static void +vm_test_map_copy_adjust_to_target_one( + vm_map_copy_t copy_map, + vm_map_t target_map) +{ + kern_return_t kr; + vm_map_copy_t target_copy; + vm_map_offset_t overmap_start, overmap_end, trimmed_start; + + target_copy = VM_MAP_COPY_NULL; + /* size is 2 (4k) pages but range covers 3 pages */ + kr = vm_map_copy_adjust_to_target(copy_map, + 0x0 + 0xfff, + 0x1002, + target_map, + FALSE, + &target_copy, + &overmap_start, + &overmap_end, + &trimmed_start); + assert(kr == KERN_SUCCESS); + assert(overmap_start == 0); + assert(overmap_end == 0); + assert(trimmed_start == 0); + assertf(target_copy->size == 0x3000, + "target_copy %p size 0x%llx\n", + target_copy, (uint64_t)target_copy->size); + vm_map_copy_discard(target_copy); + + /* 1. adjust_to_target() for bad offset -> error */ + /* 2. adjust_to_target() for bad size -> error */ + /* 3. adjust_to_target() for the whole thing -> unchanged */ + /* 4. adjust_to_target() to trim start by less than 1 page */ + /* 5. adjust_to_target() to trim end by less than 1 page */ + /* 6. adjust_to_target() to trim start and end by less than 1 page */ + /* 7. adjust_to_target() to trim start by more than 1 page */ + /* 8. adjust_to_target() to trim end by more than 1 page */ + /* 9. adjust_to_target() to trim start and end by more than 1 page */ + /* 10. adjust_to_target() to trim start by more than 1 entry */ + /* 11. adjust_to_target() to trim start by more than 1 entry */ + /* 12. adjust_to_target() to trim start and end by more than 1 entry */ + /* 13. adjust_to_target() to trim start and end down to 1 entry */ +} + +static void +vm_test_map_copy_adjust_to_target(void) +{ + kern_return_t kr; + vm_map_t map4k, map16k; + vm_object_t obj1, obj2, obj3, obj4; + vm_map_offset_t addr4k, addr16k; + vm_map_size_t size4k, size16k; + vm_map_copy_t copy4k, copy16k; + vm_prot_t curprot, maxprot; + + /* create a 4k map */ + map4k = vm_map_create(PMAP_NULL, 0, (uint32_t)-1, TRUE); + vm_map_set_page_shift(map4k, 12); + + /* create a 16k map */ + map16k = vm_map_create(PMAP_NULL, 0, (uint32_t)-1, TRUE); + vm_map_set_page_shift(map16k, 14); + + /* create 4 VM objects */ + obj1 = vm_object_allocate(0x100000); + obj2 = vm_object_allocate(0x100000); + obj3 = vm_object_allocate(0x100000); + obj4 = vm_object_allocate(0x100000); + + /* map objects in 4k map */ + vm_object_reference(obj1); + addr4k = 0x1000; + size4k = 0x3000; + kr = vm_map_enter(map4k, &addr4k, size4k, 0, VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, 0, obj1, 0, + FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, + VM_INHERIT_DEFAULT); + assert(kr == KERN_SUCCESS); + assert(addr4k == 0x1000); + + /* map objects in 16k map */ + vm_object_reference(obj1); + addr16k = 0x4000; + size16k = 0x8000; + kr = vm_map_enter(map16k, &addr16k, size16k, 0, VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, 0, obj1, 0, + FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, + VM_INHERIT_DEFAULT); + assert(kr == KERN_SUCCESS); + assert(addr16k == 0x4000); + + /* test for */ + ipc_port_t mem_entry; + memory_object_size_t mem_entry_size; + mach_vm_size_t map_size; + mem_entry_size = 0x1002; + mem_entry = IPC_PORT_NULL; + kr = mach_make_memory_entry_64(map16k, &mem_entry_size, addr16k + 0x2fff, + MAP_MEM_VM_SHARE | MAP_MEM_USE_DATA_ADDR | VM_PROT_READ, + &mem_entry, IPC_PORT_NULL); + assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr); + assertf(mem_entry_size == 0x5001, "mem_entry_size 0x%llx\n", (uint64_t) mem_entry_size); + map_size = 0; + kr = mach_memory_entry_map_size(mem_entry, map4k, 0, 0x1002, &map_size); + assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr); + assertf(map_size == 0x3000, "mem_entry %p map_size 0x%llx\n", mem_entry, (uint64_t)map_size); + mach_memory_entry_port_release(mem_entry); + + /* create 4k copy map */ + kr = vm_map_copy_extract(map4k, addr4k, 0x3000, + VM_PROT_READ, FALSE, + ©4k, &curprot, &maxprot, + VM_INHERIT_DEFAULT, VM_MAP_KERNEL_FLAGS_NONE); + assert(kr == KERN_SUCCESS); + assert(copy4k->size == 0x3000); + + /* create 16k copy map */ + kr = vm_map_copy_extract(map16k, addr16k, 0x4000, + VM_PROT_READ, FALSE, + ©16k, &curprot, &maxprot, + VM_INHERIT_DEFAULT, VM_MAP_KERNEL_FLAGS_NONE); + assert(kr == KERN_SUCCESS); + assert(copy16k->size == 0x4000); + + /* test each combination */ +// vm_test_map_copy_adjust_to_target_one(copy4k, map4k); +// vm_test_map_copy_adjust_to_target_one(copy16k, map16k); +// vm_test_map_copy_adjust_to_target_one(copy4k, map16k); + vm_test_map_copy_adjust_to_target_one(copy16k, map4k); + + /* assert 1 ref on 4k map */ + assert(os_ref_get_count(&map4k->map_refcnt) == 1); + /* release 4k map */ + vm_map_deallocate(map4k); + /* assert 1 ref on 16k map */ + assert(os_ref_get_count(&map16k->map_refcnt) == 1); + /* release 16k map */ + vm_map_deallocate(map16k); + /* deallocate copy maps */ + vm_map_copy_discard(copy4k); + vm_map_copy_discard(copy16k); + /* assert 1 ref on all VM objects */ + assert(obj1->ref_count == 1); + assert(obj2->ref_count == 1); + assert(obj3->ref_count == 1); + assert(obj4->ref_count == 1); + /* release all VM objects */ + vm_object_deallocate(obj1); + vm_object_deallocate(obj2); + vm_object_deallocate(obj3); + vm_object_deallocate(obj4); +} +#endif /* MACH_ASSERT */ + +boolean_t vm_tests_in_progress = FALSE; + +kern_return_t +vm_tests(void) +{ + vm_tests_in_progress = TRUE; + + vm_test_collapse_compressor(); + vm_test_wire_and_extract(); + vm_test_page_wire_overflow_panic(); + vm_test_kernel_object_fault(); + vm_test_device_pager_transpose(); +#if MACH_ASSERT + vm_test_map_copy_adjust_to_target(); +#endif /* MACH_ASSERT */ +#if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT + vm_test_4k(); +#endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */ + + vm_tests_in_progress = FALSE; + + return KERN_SUCCESS; +} diff --git a/osfmk/vm/vm_user.c b/osfmk/vm/vm_user.c index 2dc0be56b..b73513d8a 100644 --- a/osfmk/vm/vm_user.c +++ b/osfmk/vm/vm_user.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -1681,7 +1681,7 @@ kern_return_t mach_vm_region( vm_map_t map, mach_vm_offset_t *address, /* IN/OUT */ - mach_vm_size_t *size, /* OUT */ + mach_vm_size_t *size, /* OUT */ vm_region_flavor_t flavor, /* IN */ vm_region_info_t info, /* OUT */ mach_msg_type_number_t *count, /* IN/OUT */ @@ -1955,7 +1955,7 @@ mach_vm_purgable_control( } return vm_map_purgable_control(map, - vm_map_trunc_page(address, PAGE_MASK), + vm_map_trunc_page(address, VM_MAP_PAGE_MASK(map)), control, state); } @@ -1977,7 +1977,7 @@ vm_purgable_control( } return vm_map_purgable_control(map, - vm_map_trunc_page(address, PAGE_MASK), + vm_map_trunc_page(address, VM_MAP_PAGE_MASK(map)), control, state); } @@ -2086,14 +2086,22 @@ mach_vm_page_range_query( void *local_disp = NULL;; vm_map_size_t info_size = 0, local_disp_size = 0; mach_vm_offset_t start = 0, end = 0; + int effective_page_shift, effective_page_size, effective_page_mask; if (map == VM_MAP_NULL || dispositions_count == NULL) { return KERN_INVALID_ARGUMENT; } + effective_page_shift = vm_self_region_page_shift_safely(map); + if (effective_page_shift == -1) { + return KERN_INVALID_ARGUMENT; + } + effective_page_size = (1 << effective_page_shift); + effective_page_mask = effective_page_size - 1; + disp_buf_req_size = (*dispositions_count * sizeof(int)); - start = mach_vm_trunc_page(address); - end = mach_vm_round_page(address + size); + start = vm_map_trunc_page(address, effective_page_mask); + end = vm_map_round_page(address + size, effective_page_mask); if (end < start) { return KERN_INVALID_ARGUMENT; @@ -2116,22 +2124,17 @@ mach_vm_page_range_query( */ curr_sz = MIN(end - start, MAX_PAGE_RANGE_QUERY); - num_pages = (int) (curr_sz >> PAGE_SHIFT); + num_pages = (int) (curr_sz >> effective_page_shift); info_size = num_pages * sizeof(vm_page_info_basic_data_t); - info = kalloc(info_size); - - if (info == NULL) { - return KERN_RESOURCE_SHORTAGE; - } + info = kheap_alloc(KHEAP_TEMP, info_size, Z_WAITOK); local_disp_size = num_pages * sizeof(int); - local_disp = kalloc(local_disp_size); + local_disp = kheap_alloc(KHEAP_TEMP, local_disp_size, Z_WAITOK); - if (local_disp == NULL) { - kfree(info, info_size); - info = NULL; - return KERN_RESOURCE_SHORTAGE; + if (info == NULL || local_disp == NULL) { + kr = KERN_RESOURCE_SHORTAGE; + goto out; } while (size) { @@ -2139,7 +2142,8 @@ mach_vm_page_range_query( kr = vm_map_page_range_info_internal( map, start, - mach_vm_round_page(start + curr_sz), + vm_map_round_page(start + curr_sz, effective_page_mask), + effective_page_shift, VM_PAGE_INFO_BASIC, (vm_page_info_t) info, &count); @@ -2175,19 +2179,20 @@ mach_vm_page_range_query( size -= curr_sz; - curr_sz = MIN(mach_vm_round_page(size), MAX_PAGE_RANGE_QUERY); - num_pages = (int)(curr_sz >> PAGE_SHIFT); + curr_sz = MIN(vm_map_round_page(size, effective_page_mask), MAX_PAGE_RANGE_QUERY); + num_pages = (int)(curr_sz >> effective_page_shift); } } *dispositions_count = disp_buf_total_size / sizeof(int); - kfree(local_disp, local_disp_size); - local_disp = NULL; - - kfree(info, info_size); - info = NULL; - +out: + if (local_disp) { + kheap_free(KHEAP_TEMP, local_disp, local_disp_size); + } + if (info) { + kheap_free(KHEAP_TEMP, info, info_size); + } return kr; } @@ -2279,13 +2284,6 @@ vm_map_get_upl( return kr; } -#if CONFIG_EMBEDDED -extern int proc_selfpid(void); -extern char *proc_name_address(void *p); -int cs_executable_mem_entry = 0; -int log_executable_mem_entry = 0; -#endif /* CONFIG_EMBEDDED */ - /* * mach_make_memory_entry_64 * @@ -2335,31 +2333,13 @@ mach_make_memory_entry_internal( ipc_port_t *object_handle, ipc_port_t parent_handle) { - vm_map_version_t version; vm_named_entry_t parent_entry; vm_named_entry_t user_entry; ipc_port_t user_handle; kern_return_t kr; - vm_map_t real_map; - - /* needed for call to vm_map_lookup_locked */ - boolean_t wired; - boolean_t iskernel; - vm_object_offset_t obj_off; - vm_prot_t prot; - struct vm_object_fault_info fault_info = {}; vm_object_t object; - vm_object_t shadow_object; - - /* needed for direct map entry manipulation */ - vm_map_entry_t map_entry; - vm_map_entry_t next_entry; - vm_map_t local_map; - vm_map_t original_map = target_map; - vm_map_size_t total_size, map_size; + vm_map_size_t map_size; vm_map_offset_t map_start, map_end; - vm_map_offset_t local_offset; - vm_object_size_t mappable_size; /* * Stash the offset in the page for use by vm_map_enter_mem_object() @@ -2371,18 +2351,18 @@ mach_make_memory_entry_internal( vm_prot_t protections; vm_prot_t original_protections, mask_protections; unsigned int wimg_mode; - - boolean_t force_shadow = FALSE; boolean_t use_data_addr; boolean_t use_4K_compat; -#if VM_NAMED_ENTRY_LIST - int alias = -1; -#endif /* VM_NAMED_ENTRY_LIST */ + + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x\n", target_map, offset, *size, permission); + + user_entry = NULL; if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) { /* * Unknown flag: reject for forward compatibility. */ + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_VALUE); return KERN_INVALID_VALUE; } @@ -2394,6 +2374,7 @@ mach_make_memory_entry_internal( } if (parent_entry && parent_entry->is_copy) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); return KERN_INVALID_ARGUMENT; } @@ -2407,20 +2388,25 @@ mach_make_memory_entry_internal( user_handle = IP_NULL; user_entry = NULL; - map_start = vm_map_trunc_page(offset, PAGE_MASK); + map_start = vm_map_trunc_page(offset, VM_MAP_PAGE_MASK(target_map)); if (permission & MAP_MEM_ONLY) { boolean_t parent_is_object; - map_end = vm_map_round_page(offset + *size, PAGE_MASK); + map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map)); map_size = map_end - map_start; if (use_data_addr || use_4K_compat || parent_entry == NULL) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); return KERN_INVALID_ARGUMENT; } - parent_is_object = !parent_entry->is_sub_map; - object = parent_entry->backing.object; + parent_is_object = parent_entry->is_object; + if (!parent_is_object) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; + } + object = vm_named_entry_to_vm_object(parent_entry); if (parent_is_object && object != VM_OBJECT_NULL) { wimg_mode = object->wimg_bits; } else { @@ -2428,6 +2414,7 @@ mach_make_memory_entry_internal( } if ((access != GET_MAP_MEM(parent_entry->protection)) && !(parent_entry->protection & VM_PROT_WRITE)) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_RIGHT); return KERN_INVALID_RIGHT; } vm_prot_to_wimg(access, &wimg_mode); @@ -2446,20 +2433,29 @@ mach_make_memory_entry_internal( if (object_handle) { *object_handle = IP_NULL; } + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); return KERN_SUCCESS; } else if (permission & MAP_MEM_NAMED_CREATE) { int ledger_flags = 0; task_t owner; - map_end = vm_map_round_page(offset + *size, PAGE_MASK); + map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map)); map_size = map_end - map_start; if (use_data_addr || use_4K_compat) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); return KERN_INVALID_ARGUMENT; } + if (map_size == 0) { + *size = 0; + *object_handle = IPC_PORT_NULL; + return KERN_SUCCESS; + } + kr = mach_memory_entry_allocate(&user_entry, &user_handle); if (kr != KERN_SUCCESS) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_FAILURE); return KERN_FAILURE; } @@ -2585,7 +2581,16 @@ mach_make_memory_entry_internal( /* the object has no pages, so no WIMG bits to update here */ - user_entry->backing.object = object; + kr = vm_named_entry_from_vm_object( + user_entry, + object, + 0, + map_size, + (protections & VM_PROT_ALL)); + if (kr != KERN_SUCCESS) { + vm_object_deallocate(object); + goto make_mem_done; + } user_entry->internal = TRUE; user_entry->is_sub_map = FALSE; user_entry->offset = 0; @@ -2600,6 +2605,7 @@ mach_make_memory_entry_internal( *size = CAST_DOWN(vm_size_t, (user_entry->size - user_entry->data_offset)); *object_handle = user_handle; + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); return KERN_SUCCESS; } @@ -2607,11 +2613,17 @@ mach_make_memory_entry_internal( vm_map_copy_t copy; if (target_map == VM_MAP_NULL) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_TASK); return KERN_INVALID_TASK; } - map_end = vm_map_round_page(offset + *size, PAGE_MASK); + map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map)); map_size = map_end - map_start; + if (map_size == 0) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; + } + if (use_data_addr || use_4K_compat) { offset_in_page = offset - map_start; if (use_4K_compat) { @@ -2627,12 +2639,15 @@ mach_make_memory_entry_internal( VM_MAP_COPYIN_ENTRY_LIST, ©); if (kr != KERN_SUCCESS) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, kr); return kr; } + assert(copy != VM_MAP_COPY_NULL); kr = mach_memory_entry_allocate(&user_entry, &user_handle); if (kr != KERN_SUCCESS) { vm_map_copy_discard(copy); + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_FAILURE); return KERN_FAILURE; } @@ -2648,19 +2663,47 @@ mach_make_memory_entry_internal( *size = CAST_DOWN(vm_size_t, (user_entry->size - user_entry->data_offset)); *object_handle = user_handle; + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); return KERN_SUCCESS; } - if (permission & MAP_MEM_VM_SHARE) { + if ((permission & MAP_MEM_VM_SHARE) + || parent_entry == NULL + || (permission & MAP_MEM_NAMED_REUSE)) { vm_map_copy_t copy; vm_prot_t cur_prot, max_prot; + vm_map_kernel_flags_t vmk_flags; + vm_map_entry_t parent_copy_entry; + vm_prot_t required_protection; if (target_map == VM_MAP_NULL) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_TASK); return KERN_INVALID_TASK; } - map_end = vm_map_round_page(offset + *size, PAGE_MASK); + map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map)); + vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + parent_copy_entry = VM_MAP_ENTRY_NULL; + if (!(permission & MAP_MEM_VM_SHARE)) { + /* stop extracting if VM object changes */ + vmk_flags.vmkf_copy_single_object = TRUE; + if ((permission & MAP_MEM_NAMED_REUSE) && + parent_entry != NULL && + parent_entry->is_object) { + vm_map_copy_t parent_copy; + parent_copy = parent_entry->backing.copy; + assert(parent_copy->cpy_hdr.nentries == 1); + parent_copy_entry = vm_map_copy_first_entry(parent_copy); + assert(!parent_copy_entry->is_sub_map); + } + } + map_size = map_end - map_start; + if (map_size == 0) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; + } + if (use_data_addr || use_4K_compat) { offset_in_page = offset - map_start; if (use_4K_compat) { @@ -2670,16 +2713,42 @@ mach_make_memory_entry_internal( offset_in_page = 0; } + if (mask_protections) { + /* + * caller is asking for whichever proctections are + * available: no required protections. + */ + required_protection = VM_PROT_NONE; + } else { + /* + * Caller wants a memory entry with "protections". + * Make sure we extract only memory that matches that. + */ + required_protection = protections; + } cur_prot = VM_PROT_ALL; + vmk_flags.vmkf_copy_pageable = TRUE; + vmk_flags.vmkf_copy_same_map = FALSE; + assert(map_size != 0); kr = vm_map_copy_extract(target_map, map_start, map_size, + required_protection, + FALSE, /* copy */ ©, &cur_prot, - &max_prot); + &max_prot, + VM_INHERIT_SHARE, + vmk_flags); if (kr != KERN_SUCCESS) { + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, kr); + if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) { +// panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr); + } return kr; } + assert(copy != VM_MAP_COPY_NULL); + assert((cur_prot & required_protection) == required_protection); if (mask_protections) { /* @@ -2689,6 +2758,10 @@ mach_make_memory_entry_internal( protections &= cur_prot; if (protections == VM_PROT_NONE) { /* no access at all: fail */ + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_PROTECTION_FAILURE); + if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) { +// panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr); + } vm_map_copy_discard(copy); return KERN_PROTECTION_FAILURE; } @@ -2698,462 +2771,194 @@ mach_make_memory_entry_internal( * out of "cur_prot". */ if ((cur_prot & protections) != protections) { + if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) { +// panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, KERN_PROTECTION_FAILURE); + } vm_map_copy_discard(copy); + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_PROTECTION_FAILURE); return KERN_PROTECTION_FAILURE; } } + if (!(permission & MAP_MEM_VM_SHARE)) { + vm_map_entry_t copy_entry; + + /* limit size to what's actually covered by "copy" */ + assert(copy->cpy_hdr.nentries == 1); + copy_entry = vm_map_copy_first_entry(copy); + map_size = copy_entry->vme_end - copy_entry->vme_start; + + if ((permission & MAP_MEM_NAMED_REUSE) && + parent_copy_entry != VM_MAP_ENTRY_NULL && + VME_OBJECT(copy_entry) == VME_OBJECT(parent_copy_entry) && + VME_OFFSET(copy_entry) == VME_OFFSET(parent_copy_entry) && + parent_entry->offset == 0 && + parent_entry->size == map_size && + (parent_entry->data_offset == offset_in_page)) { + /* we have a match: re-use "parent_entry" */ + + /* release our new "copy" */ + vm_map_copy_discard(copy); + /* get extra send right on handle */ + ipc_port_copy_send(parent_handle); + + *size = CAST_DOWN(vm_size_t, + (parent_entry->size - + parent_entry->data_offset)); + *object_handle = parent_handle; + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); + return KERN_SUCCESS; + } + + /* no match: we need to create a new entry */ + object = VME_OBJECT(copy_entry); + vm_object_lock(object); + wimg_mode = object->wimg_bits; + if (!(object->nophyscache)) { + vm_prot_to_wimg(access, &wimg_mode); + } + if (object->wimg_bits != wimg_mode) { + vm_object_change_wimg_mode(object, wimg_mode); + } + vm_object_unlock(object); + } + kr = mach_memory_entry_allocate(&user_entry, &user_handle); if (kr != KERN_SUCCESS) { + if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) { +// panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr); + } vm_map_copy_discard(copy); + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_FAILURE); return KERN_FAILURE; } user_entry->backing.copy = copy; - user_entry->internal = FALSE; user_entry->is_sub_map = FALSE; - user_entry->is_copy = TRUE; - user_entry->offset = 0; + user_entry->is_object = FALSE; + user_entry->internal = FALSE; user_entry->protection = protections; user_entry->size = map_size; user_entry->data_offset = offset_in_page; + if (permission & MAP_MEM_VM_SHARE) { + user_entry->is_copy = TRUE; + user_entry->offset = 0; + } else { + user_entry->is_object = TRUE; + user_entry->internal = object->internal; + user_entry->offset = VME_OFFSET(vm_map_copy_first_entry(copy)); + SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection); + } + *size = CAST_DOWN(vm_size_t, (user_entry->size - user_entry->data_offset)); *object_handle = user_handle; + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); return KERN_SUCCESS; } - if (parent_entry == NULL || - (permission & MAP_MEM_NAMED_REUSE)) { - map_end = vm_map_round_page(offset + *size, PAGE_MASK); - map_size = map_end - map_start; - if (use_data_addr || use_4K_compat) { - offset_in_page = offset - map_start; - if (use_4K_compat) { - offset_in_page &= ~((signed)(0xFFF)); - } - } else { - offset_in_page = 0; - } - - /* Create a named object based on address range within the task map */ - /* Go find the object at given address */ - - if (target_map == VM_MAP_NULL) { - return KERN_INVALID_TASK; - } - -redo_lookup: - protections = original_protections; - vm_map_lock_read(target_map); - - /* get the object associated with the target address */ - /* note we check the permission of the range against */ - /* that requested by the caller */ - - kr = vm_map_lookup_locked(&target_map, map_start, - protections | mask_protections, - OBJECT_LOCK_EXCLUSIVE, &version, - &object, &obj_off, &prot, &wired, - &fault_info, - &real_map); - if (kr != KERN_SUCCESS) { - vm_map_unlock_read(target_map); - goto make_mem_done; - } - if (mask_protections) { - /* - * The caller asked us to use the "protections" as - * a mask, so restrict "protections" to what this - * mapping actually allows. - */ - protections &= prot; - } -#if CONFIG_EMBEDDED - /* - * Wiring would copy the pages to a shadow object. - * The shadow object would not be code-signed so - * attempting to execute code from these copied pages - * would trigger a code-signing violation. - */ - if (prot & VM_PROT_EXECUTE) { - if (log_executable_mem_entry) { - void *bsd_info; - bsd_info = current_task()->bsd_info; - printf("pid %d[%s] making memory entry out of " - "executable range from 0x%llx to 0x%llx:" - "might cause code-signing issues " - "later\n", - proc_selfpid(), - (bsd_info != NULL - ? proc_name_address(bsd_info) - : "?"), - (uint64_t) map_start, - (uint64_t) map_end); - } - DTRACE_VM2(cs_executable_mem_entry, - uint64_t, (uint64_t)map_start, - uint64_t, (uint64_t)map_end); - cs_executable_mem_entry++; - -#if 11 - /* - * We don't know how the memory entry will be used. - * It might never get wired and might not cause any - * trouble, so let's not reject this request... - */ -#else /* 11 */ - kr = KERN_PROTECTION_FAILURE; - vm_object_unlock(object); - vm_map_unlock_read(target_map); - if (real_map != target_map) { - vm_map_unlock_read(real_map); - } - goto make_mem_done; -#endif /* 11 */ - } -#endif /* CONFIG_EMBEDDED */ - - if (((prot & protections) != protections) - || (object == kernel_object)) { - kr = KERN_INVALID_RIGHT; - vm_object_unlock(object); - vm_map_unlock_read(target_map); - if (real_map != target_map) { - vm_map_unlock_read(real_map); - } - if (object == kernel_object) { - printf("Warning: Attempt to create a named" - " entry from the kernel_object\n"); - } - goto make_mem_done; - } + /* The new object will be based on an existing named object */ + if (parent_entry == NULL) { + kr = KERN_INVALID_ARGUMENT; + goto make_mem_done; + } - /* We have an object, now check to see if this object */ - /* is suitable. If not, create a shadow and share that */ + if (parent_entry->is_copy) { + panic("parent_entry %p is_copy not supported\n", parent_entry); + kr = KERN_INVALID_ARGUMENT; + goto make_mem_done; + } + if (use_data_addr || use_4K_compat) { /* - * We have to unlock the VM object to avoid deadlocking with - * a VM map lock (the lock ordering is map, the object), if we - * need to modify the VM map to create a shadow object. Since - * we might release the VM map lock below anyway, we have - * to release the VM map lock now. - * XXX FBDP There must be a way to avoid this double lookup... - * - * Take an extra reference on the VM object to make sure it's - * not going to disappear. + * submaps and pagers should only be accessible from within + * the kernel, which shouldn't use the data address flag, so can fail here. */ - vm_object_reference_locked(object); /* extra ref to hold obj */ - vm_object_unlock(object); - - local_map = original_map; - local_offset = map_start; - if (target_map != local_map) { - vm_map_unlock_read(target_map); - if (real_map != target_map) { - vm_map_unlock_read(real_map); - } - vm_map_lock_read(local_map); - target_map = local_map; - real_map = local_map; - } - while (TRUE) { - if (!vm_map_lookup_entry(local_map, - local_offset, &map_entry)) { - kr = KERN_INVALID_ARGUMENT; - vm_map_unlock_read(target_map); - if (real_map != target_map) { - vm_map_unlock_read(real_map); - } - vm_object_deallocate(object); /* release extra ref */ - object = VM_OBJECT_NULL; - goto make_mem_done; - } - iskernel = (local_map->pmap == kernel_pmap); - if (!(map_entry->is_sub_map)) { - if (VME_OBJECT(map_entry) != object) { - kr = KERN_INVALID_ARGUMENT; - vm_map_unlock_read(target_map); - if (real_map != target_map) { - vm_map_unlock_read(real_map); - } - vm_object_deallocate(object); /* release extra ref */ - object = VM_OBJECT_NULL; - goto make_mem_done; - } - break; - } else { - vm_map_t tmap; - tmap = local_map; - local_map = VME_SUBMAP(map_entry); - - vm_map_lock_read(local_map); - vm_map_unlock_read(tmap); - target_map = local_map; - real_map = local_map; - local_offset = local_offset - map_entry->vme_start; - local_offset += VME_OFFSET(map_entry); - } + if (parent_entry->is_sub_map) { + panic("Shouldn't be using data address with a parent entry that is a submap."); } - -#if VM_NAMED_ENTRY_LIST - alias = VME_ALIAS(map_entry); -#endif /* VM_NAMED_ENTRY_LIST */ - /* - * We found the VM map entry, lock the VM object again. + * Account for offset to data in parent entry and + * compute our own offset to data. */ - vm_object_lock(object); - if (map_entry->wired_count) { - /* JMM - The check below should be reworked instead. */ - object->true_share = TRUE; - } - if (mask_protections) { - /* - * The caller asked us to use the "protections" as - * a mask, so restrict "protections" to what this - * mapping actually allows. - */ - protections &= map_entry->max_protection; - } - if (((map_entry->max_protection) & protections) != protections) { - kr = KERN_INVALID_RIGHT; - vm_object_unlock(object); - vm_map_unlock_read(target_map); - if (real_map != target_map) { - vm_map_unlock_read(real_map); - } - vm_object_deallocate(object); - object = VM_OBJECT_NULL; + if ((offset + *size + parent_entry->data_offset) > parent_entry->size) { + kr = KERN_INVALID_ARGUMENT; goto make_mem_done; } - mappable_size = fault_info.hi_offset - obj_off; - total_size = map_entry->vme_end - map_entry->vme_start; - if (map_size > mappable_size) { - /* try to extend mappable size if the entries */ - /* following are from the same object and are */ - /* compatible */ - next_entry = map_entry->vme_next; - /* lets see if the next map entry is still */ - /* pointing at this object and is contiguous */ - while (map_size > mappable_size) { - if ((VME_OBJECT(next_entry) == object) && - (next_entry->vme_start == - next_entry->vme_prev->vme_end) && - (VME_OFFSET(next_entry) == - (VME_OFFSET(next_entry->vme_prev) + - (next_entry->vme_prev->vme_end - - next_entry->vme_prev->vme_start)))) { - if (mask_protections) { - /* - * The caller asked us to use - * the "protections" as a mask, - * so restrict "protections" to - * what this mapping actually - * allows. - */ - protections &= next_entry->max_protection; - } - if ((next_entry->wired_count) && - (map_entry->wired_count == 0)) { - break; - } - if (((next_entry->max_protection) - & protections) != protections) { - break; - } - if (next_entry->needs_copy != - map_entry->needs_copy) { - break; - } - mappable_size += next_entry->vme_end - - next_entry->vme_start; - total_size += next_entry->vme_end - - next_entry->vme_start; - next_entry = next_entry->vme_next; - } else { - break; - } - } + map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK); + offset_in_page = (offset + parent_entry->data_offset) - map_start; + if (use_4K_compat) { + offset_in_page &= ~((signed)(0xFFF)); } + map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK); + map_size = map_end - map_start; + } else { + map_end = vm_map_round_page(offset + *size, PAGE_MASK); + map_size = map_end - map_start; + offset_in_page = 0; - /* vm_map_entry_should_cow_for_true_share() checks for malloc tags, - * never true in kernel */ - if (!iskernel && vm_map_entry_should_cow_for_true_share(map_entry) && - object->vo_size > map_size && - map_size != 0) { - /* - * Set up the targeted range for copy-on-write to - * limit the impact of "true_share"/"copy_delay" to - * that range instead of the entire VM object... - */ - - vm_object_unlock(object); - if (vm_map_lock_read_to_write(target_map)) { - vm_object_deallocate(object); - target_map = original_map; - goto redo_lookup; - } - - vm_map_clip_start(target_map, - map_entry, - vm_map_trunc_page(map_start, - VM_MAP_PAGE_MASK(target_map))); - vm_map_clip_end(target_map, - map_entry, - (vm_map_round_page(map_end, - VM_MAP_PAGE_MASK(target_map)))); - force_shadow = TRUE; - - if ((map_entry->vme_end - offset) < map_size) { - map_size = map_entry->vme_end - map_start; - } - total_size = map_entry->vme_end - map_entry->vme_start; - - vm_map_lock_write_to_read(target_map); - vm_object_lock(object); + if ((offset + map_size) > parent_entry->size) { + kr = KERN_INVALID_ARGUMENT; + goto make_mem_done; } + } - if (object->internal) { - /* vm_map_lookup_locked will create a shadow if */ - /* needs_copy is set but does not check for the */ - /* other two conditions shown. It is important to */ - /* set up an object which will not be pulled from */ - /* under us. */ - - if (force_shadow || - ((map_entry->needs_copy || - object->shadowed || - (object->vo_size > total_size && - (VME_OFFSET(map_entry) != 0 || - object->vo_size > - vm_map_round_page(total_size, - VM_MAP_PAGE_MASK(target_map))))) - && !object->true_share - && object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)) { - /* - * We have to unlock the VM object before - * trying to upgrade the VM map lock, to - * honor lock ordering (map then object). - * Otherwise, we would deadlock if another - * thread holds a read lock on the VM map and - * is trying to acquire the VM object's lock. - * We still hold an extra reference on the - * VM object, guaranteeing that it won't - * disappear. - */ - vm_object_unlock(object); - - if (vm_map_lock_read_to_write(target_map)) { - /* - * We couldn't upgrade our VM map lock - * from "read" to "write" and we lost - * our "read" lock. - * Start all over again... - */ - vm_object_deallocate(object); /* extra ref */ - target_map = original_map; - goto redo_lookup; - } -#if 00 - vm_object_lock(object); -#endif - - /* - * JMM - We need to avoid coming here when the object - * is wired by anybody, not just the current map. Why - * couldn't we use the standard vm_object_copy_quickly() - * approach here? - */ - - /* create a shadow object */ - VME_OBJECT_SHADOW(map_entry, total_size); - shadow_object = VME_OBJECT(map_entry); -#if 00 - vm_object_unlock(object); -#endif - - prot = map_entry->protection & ~VM_PROT_WRITE; - - if (override_nx(target_map, - VME_ALIAS(map_entry)) - && prot) { - prot |= VM_PROT_EXECUTE; - } - - vm_object_pmap_protect( - object, VME_OFFSET(map_entry), - total_size, - ((map_entry->is_shared - || target_map->mapped_in_other_pmaps) - ? PMAP_NULL : - target_map->pmap), - map_entry->vme_start, - prot); - total_size -= (map_entry->vme_end - - map_entry->vme_start); - next_entry = map_entry->vme_next; - map_entry->needs_copy = FALSE; - - vm_object_lock(shadow_object); - while (total_size) { - assert((next_entry->wired_count == 0) || - (map_entry->wired_count)); - - if (VME_OBJECT(next_entry) == object) { - vm_object_reference_locked(shadow_object); - VME_OBJECT_SET(next_entry, - shadow_object); - vm_object_deallocate(object); - VME_OFFSET_SET( - next_entry, - (VME_OFFSET(next_entry->vme_prev) + - (next_entry->vme_prev->vme_end - - next_entry->vme_prev->vme_start))); - next_entry->use_pmap = TRUE; - next_entry->needs_copy = FALSE; - } else { - panic("mach_make_memory_entry_64:" - " map entries out of sync\n"); - } - total_size -= - next_entry->vme_end - - next_entry->vme_start; - next_entry = next_entry->vme_next; - } - - /* - * Transfer our extra reference to the - * shadow object. - */ - vm_object_reference_locked(shadow_object); - vm_object_deallocate(object); /* extra ref */ - object = shadow_object; + if (mask_protections) { + /* + * The caller asked us to use the "protections" as + * a mask, so restrict "protections" to what this + * mapping actually allows. + */ + protections &= parent_entry->protection; + } + if ((protections & parent_entry->protection) != protections) { + kr = KERN_PROTECTION_FAILURE; + goto make_mem_done; + } - obj_off = ((local_offset - map_entry->vme_start) - + VME_OFFSET(map_entry)); + if (mach_memory_entry_allocate(&user_entry, &user_handle) + != KERN_SUCCESS) { + kr = KERN_FAILURE; + goto make_mem_done; + } - vm_map_lock_write_to_read(target_map); - } - } + user_entry->size = map_size; + user_entry->offset = parent_entry->offset + map_start; + user_entry->data_offset = offset_in_page; + user_entry->is_sub_map = parent_entry->is_sub_map; + user_entry->is_copy = parent_entry->is_copy; + user_entry->internal = parent_entry->internal; + user_entry->protection = protections; - /* note: in the future we can (if necessary) allow for */ - /* memory object lists, this will better support */ - /* fragmentation, but is it necessary? The user should */ - /* be encouraged to create address space oriented */ - /* shared objects from CLEAN memory regions which have */ - /* a known and defined history. i.e. no inheritence */ - /* share, make this call before making the region the */ - /* target of ipc's, etc. The code above, protecting */ - /* against delayed copy, etc. is mostly defensive. */ + if (access != MAP_MEM_NOOP) { + SET_MAP_MEM(access, user_entry->protection); + } - wimg_mode = object->wimg_bits; - if (!(object->nophyscache)) { - vm_prot_to_wimg(access, &wimg_mode); + if (parent_entry->is_sub_map) { + vm_map_t map = parent_entry->backing.map; + user_entry->backing.map = map; + lck_mtx_lock(&map->s_lock); + os_ref_retain_locked(&map->map_refcnt); + lck_mtx_unlock(&map->s_lock); + } else { + object = vm_named_entry_to_vm_object(parent_entry); + assert(object != VM_OBJECT_NULL); + assert(object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC); + kr = vm_named_entry_from_vm_object( + user_entry, + object, + user_entry->offset, + user_entry->size, + (user_entry->protection & VM_PROT_ALL)); + if (kr != KERN_SUCCESS) { + goto make_mem_done; } - + assert(user_entry->is_object); + /* we now point to this object, hold on */ + vm_object_lock(object); + vm_object_reference_locked(object); #if VM_OBJECT_TRACKING_OP_TRUESHARE if (!object->true_share && vm_object_tracking_inited) { @@ -3170,218 +2975,17 @@ redo_lookup: } #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ - vm_object_lock_assert_exclusive(object); object->true_share = TRUE; if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; } - - /* - * The memory entry now points to this VM object and we - * need to hold a reference on the VM object. Use the extra - * reference we took earlier to keep the object alive when we - * had to unlock it. - */ - - vm_map_unlock_read(target_map); - if (real_map != target_map) { - vm_map_unlock_read(real_map); - } - - if (object->wimg_bits != wimg_mode) { - vm_object_change_wimg_mode(object, wimg_mode); - } - - /* the size of mapped entry that overlaps with our region */ - /* which is targeted for share. */ - /* (entry_end - entry_start) - */ - /* offset of our beg addr within entry */ - /* it corresponds to this: */ - - if (map_size > mappable_size) { - map_size = mappable_size; - } - - if (permission & MAP_MEM_NAMED_REUSE) { - /* - * Compare what we got with the "parent_entry". - * If they match, re-use the "parent_entry" instead - * of creating a new one. - */ - if (parent_entry != NULL && - parent_entry->backing.object == object && - parent_entry->internal == object->internal && - parent_entry->is_sub_map == FALSE && - parent_entry->offset == obj_off && - parent_entry->protection == protections && - parent_entry->size == map_size && - ((!(use_data_addr || use_4K_compat) && - (parent_entry->data_offset == 0)) || - ((use_data_addr || use_4K_compat) && - (parent_entry->data_offset == offset_in_page)))) { - /* - * We have a match: re-use "parent_entry". - */ - /* release our extra reference on object */ - vm_object_unlock(object); - vm_object_deallocate(object); - /* parent_entry->ref_count++; XXX ? */ - /* Get an extra send-right on handle */ - ipc_port_copy_send(parent_handle); - - *size = CAST_DOWN(vm_size_t, - (parent_entry->size - - parent_entry->data_offset)); - *object_handle = parent_handle; - return KERN_SUCCESS; - } else { - /* - * No match: we need to create a new entry. - * fall through... - */ - } - } - vm_object_unlock(object); - if (mach_memory_entry_allocate(&user_entry, &user_handle) - != KERN_SUCCESS) { - /* release our unused reference on the object */ - vm_object_deallocate(object); - return KERN_FAILURE; - } - - user_entry->backing.object = object; - user_entry->internal = object->internal; - user_entry->is_sub_map = FALSE; - user_entry->offset = obj_off; - user_entry->data_offset = offset_in_page; - user_entry->protection = protections; - SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection); - user_entry->size = map_size; -#if VM_NAMED_ENTRY_LIST - user_entry->named_entry_alias = alias; -#endif /* VM_NAMED_ENTRY_LIST */ - - /* user_object pager and internal fields are not used */ - /* when the object field is filled in. */ - - *size = CAST_DOWN(vm_size_t, (user_entry->size - - user_entry->data_offset)); - *object_handle = user_handle; - return KERN_SUCCESS; - } else { - /* The new object will be base on an existing named object */ - if (parent_entry == NULL) { - kr = KERN_INVALID_ARGUMENT; - goto make_mem_done; - } - - if (use_data_addr || use_4K_compat) { - /* - * submaps and pagers should only be accessible from within - * the kernel, which shouldn't use the data address flag, so can fail here. - */ - if (parent_entry->is_sub_map) { - panic("Shouldn't be using data address with a parent entry that is a submap."); - } - /* - * Account for offset to data in parent entry and - * compute our own offset to data. - */ - if ((offset + *size + parent_entry->data_offset) > parent_entry->size) { - kr = KERN_INVALID_ARGUMENT; - goto make_mem_done; - } - - map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK); - offset_in_page = (offset + parent_entry->data_offset) - map_start; - if (use_4K_compat) { - offset_in_page &= ~((signed)(0xFFF)); - } - map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK); - map_size = map_end - map_start; - } else { - map_end = vm_map_round_page(offset + *size, PAGE_MASK); - map_size = map_end - map_start; - offset_in_page = 0; - - if ((offset + map_size) > parent_entry->size) { - kr = KERN_INVALID_ARGUMENT; - goto make_mem_done; - } - } - - if (mask_protections) { - /* - * The caller asked us to use the "protections" as - * a mask, so restrict "protections" to what this - * mapping actually allows. - */ - protections &= parent_entry->protection; - } - if ((protections & parent_entry->protection) != protections) { - kr = KERN_PROTECTION_FAILURE; - goto make_mem_done; - } - - if (mach_memory_entry_allocate(&user_entry, &user_handle) - != KERN_SUCCESS) { - kr = KERN_FAILURE; - goto make_mem_done; - } - - user_entry->size = map_size; - user_entry->offset = parent_entry->offset + map_start; - user_entry->data_offset = offset_in_page; - user_entry->is_sub_map = parent_entry->is_sub_map; - user_entry->is_copy = parent_entry->is_copy; - user_entry->internal = parent_entry->internal; - user_entry->protection = protections; - - if (access != MAP_MEM_NOOP) { - SET_MAP_MEM(access, user_entry->protection); - } - - if (parent_entry->is_sub_map) { - vm_map_t map = parent_entry->backing.map; - user_entry->backing.map = map; - lck_mtx_lock(&map->s_lock); - os_ref_retain_locked(&map->map_refcnt); - lck_mtx_unlock(&map->s_lock); - } else { - object = parent_entry->backing.object; - assert(object != VM_OBJECT_NULL); - user_entry->backing.object = object; - /* we now point to this object, hold on */ - vm_object_lock(object); - vm_object_reference_locked(object); -#if VM_OBJECT_TRACKING_OP_TRUESHARE - if (!object->true_share && - vm_object_tracking_inited) { - void *bt[VM_OBJECT_TRACKING_BTDEPTH]; - int num = 0; - - num = OSBacktrace(bt, - VM_OBJECT_TRACKING_BTDEPTH); - btlog_add_entry(vm_object_tracking_btlog, - object, - VM_OBJECT_TRACKING_OP_TRUESHARE, - bt, - num); - } -#endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ - - object->true_share = TRUE; - if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { - object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; - } - vm_object_unlock(object); - } - *size = CAST_DOWN(vm_size_t, (user_entry->size - - user_entry->data_offset)); - *object_handle = user_handle; - return KERN_SUCCESS; } + *size = CAST_DOWN(vm_size_t, (user_entry->size - + user_entry->data_offset)); + *object_handle = user_handle; + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); + return KERN_SUCCESS; make_mem_done: if (user_handle != IP_NULL) { @@ -3392,6 +2996,7 @@ make_mem_done: */ mach_memory_entry_port_release(user_handle); } + DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, kr); return kr; } @@ -3475,26 +3080,12 @@ vm_map_exec_lockdown( } #if VM_NAMED_ENTRY_LIST -queue_head_t vm_named_entry_list; +queue_head_t vm_named_entry_list = QUEUE_HEAD_INITIALIZER(vm_named_entry_list); int vm_named_entry_count = 0; -lck_mtx_t vm_named_entry_list_lock_data; -lck_mtx_ext_t vm_named_entry_list_lock_data_ext; +LCK_MTX_EARLY_DECLARE_ATTR(vm_named_entry_list_lock_data, + &vm_object_lck_grp, &vm_object_lck_attr); #endif /* VM_NAMED_ENTRY_LIST */ -void vm_named_entry_init(void); -void -vm_named_entry_init(void) -{ -#if VM_NAMED_ENTRY_LIST - queue_init(&vm_named_entry_list); - vm_named_entry_count = 0; - lck_mtx_init_ext(&vm_named_entry_list_lock_data, - &vm_named_entry_list_lock_data_ext, - &vm_object_lck_grp, - &vm_object_lck_attr); -#endif /* VM_NAMED_ENTRY_LIST */ -} - __private_extern__ kern_return_t mach_memory_entry_allocate( vm_named_entry_t *user_entry_p, @@ -3511,7 +3102,8 @@ mach_memory_entry_allocate( named_entry_lock_init(user_entry); - user_entry->backing.object = NULL; + user_entry->backing.copy = NULL; + user_entry->is_object = FALSE; user_entry->is_sub_map = FALSE; user_entry->is_copy = FALSE; user_entry->internal = FALSE; @@ -3565,6 +3157,7 @@ mach_memory_object_memory_entry_64( vm_named_entry_t user_entry; ipc_port_t user_handle; vm_object_t object; + kern_return_t kr; if (host == HOST_NULL) { return KERN_INVALID_HOST; @@ -3599,7 +3192,11 @@ mach_memory_object_memory_entry_64( user_entry->is_sub_map = FALSE; assert(user_entry->ref_count == 1); - user_entry->backing.object = object; + kr = vm_named_entry_from_vm_object(user_entry, object, 0, size, + (user_entry->protection & VM_PROT_ALL)); + if (kr != KERN_SUCCESS) { + return kr; + } user_entry->internal = object->internal; assert(object->internal == internal); @@ -3672,7 +3269,8 @@ memory_entry_purgeable_control_internal( return KERN_INVALID_ARGUMENT; } - object = mem_entry->backing.object; + assert(mem_entry->is_object); + object = vm_named_entry_to_vm_object(mem_entry); if (object == VM_OBJECT_NULL) { named_entry_unlock(mem_entry); return KERN_INVALID_ARGUMENT; @@ -3735,7 +3333,8 @@ memory_entry_access_tracking_internal( return KERN_INVALID_ARGUMENT; } - object = mem_entry->backing.object; + assert(mem_entry->is_object); + object = vm_named_entry_to_vm_object(mem_entry); if (object == VM_OBJECT_NULL) { named_entry_unlock(mem_entry); return KERN_INVALID_ARGUMENT; @@ -3814,7 +3413,8 @@ mach_memory_entry_ownership( return KERN_INVALID_ARGUMENT; } - object = mem_entry->backing.object; + assert(mem_entry->is_object); + object = vm_named_entry_to_vm_object(mem_entry); if (object == VM_OBJECT_NULL) { named_entry_unlock(mem_entry); return KERN_INVALID_ARGUMENT; @@ -3868,7 +3468,8 @@ mach_memory_entry_get_page_counts( return KERN_INVALID_ARGUMENT; } - object = mem_entry->backing.object; + assert(mem_entry->is_object); + object = vm_named_entry_to_vm_object(mem_entry); if (object == VM_OBJECT_NULL) { named_entry_unlock(mem_entry); return KERN_INVALID_ARGUMENT; @@ -3878,6 +3479,8 @@ mach_memory_entry_get_page_counts( offset = mem_entry->offset; size = mem_entry->size; + size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset); + offset = vm_object_trunc_page(offset); named_entry_unlock(mem_entry); @@ -3888,6 +3491,136 @@ mach_memory_entry_get_page_counts( return kr; } +kern_return_t +mach_memory_entry_phys_page_offset( + ipc_port_t entry_port, + vm_object_offset_t *offset_p) +{ + vm_named_entry_t mem_entry; + vm_object_t object; + vm_object_offset_t offset; + vm_object_offset_t data_offset; + + if (!IP_VALID(entry_port) || + ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { + return KERN_INVALID_ARGUMENT; + } + + mem_entry = (vm_named_entry_t) entry_port->ip_kobject; + + named_entry_lock(mem_entry); + + if (mem_entry->is_sub_map || + mem_entry->is_copy) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + assert(mem_entry->is_object); + object = vm_named_entry_to_vm_object(mem_entry); + if (object == VM_OBJECT_NULL) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + offset = mem_entry->offset; + data_offset = mem_entry->data_offset; + + named_entry_unlock(mem_entry); + + *offset_p = offset - vm_object_trunc_page(offset) + data_offset; + return KERN_SUCCESS; +} + +kern_return_t +mach_memory_entry_map_size( + ipc_port_t entry_port, + vm_map_t map, + memory_object_offset_t offset, + memory_object_offset_t size, + mach_vm_size_t *map_size) +{ + vm_named_entry_t mem_entry; + vm_object_t object; + vm_object_offset_t object_offset_start, object_offset_end; + vm_map_copy_t copy_map, target_copy_map; + vm_map_offset_t overmap_start, overmap_end, trimmed_start; + kern_return_t kr; + + if (!IP_VALID(entry_port) || + ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { + return KERN_INVALID_ARGUMENT; + } + + mem_entry = (vm_named_entry_t) entry_port->ip_kobject; + named_entry_lock(mem_entry); + + if (mem_entry->is_sub_map) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + if (mem_entry->is_object) { + object = vm_named_entry_to_vm_object(mem_entry); + if (object == VM_OBJECT_NULL) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + object_offset_start = mem_entry->offset; + object_offset_start += mem_entry->data_offset; + object_offset_start += offset; + object_offset_end = object_offset_start + size; + object_offset_start = vm_map_trunc_page(object_offset_start, + VM_MAP_PAGE_MASK(map)); + object_offset_end = vm_map_round_page(object_offset_end, + VM_MAP_PAGE_MASK(map)); + + named_entry_unlock(mem_entry); + + *map_size = object_offset_end - object_offset_start; + return KERN_SUCCESS; + } + + if (!mem_entry->is_copy) { + panic("unsupported type of mem_entry %p\n", mem_entry); + } + + assert(mem_entry->is_copy); + if (VM_MAP_COPY_PAGE_MASK(mem_entry->backing.copy) == VM_MAP_PAGE_MASK(map)) { + *map_size = vm_map_round_page(mem_entry->offset + mem_entry->data_offset + offset + size, VM_MAP_PAGE_MASK(map)) - vm_map_trunc_page(mem_entry->offset + mem_entry->data_offset + offset, VM_MAP_PAGE_MASK(map)); + DEBUG4K_SHARE("map %p (%d) mem_entry %p offset 0x%llx + 0x%llx + 0x%llx size 0x%llx -> map_size 0x%llx\n", map, VM_MAP_PAGE_MASK(map), mem_entry, mem_entry->offset, mem_entry->data_offset, offset, size, *map_size); + named_entry_unlock(mem_entry); + return KERN_SUCCESS; + } + + DEBUG4K_SHARE("mem_entry %p copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx\n", mem_entry, mem_entry->backing.copy, VM_MAP_COPY_PAGE_SHIFT(mem_entry->backing.copy), map, VM_MAP_PAGE_SHIFT(map), offset, size); + copy_map = mem_entry->backing.copy; + target_copy_map = VM_MAP_COPY_NULL; + DEBUG4K_ADJUST("adjusting...\n"); + kr = vm_map_copy_adjust_to_target(copy_map, + mem_entry->data_offset + offset, + size, + map, + FALSE, + &target_copy_map, + &overmap_start, + &overmap_end, + &trimmed_start); + if (kr == KERN_SUCCESS) { + if (target_copy_map->size != copy_map->size) { + DEBUG4K_ADJUST("copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx overmap_start 0x%llx overmap_end 0x%llx trimmed_start 0x%llx map_size 0x%llx -> 0x%llx\n", copy_map, VM_MAP_COPY_PAGE_SHIFT(copy_map), map, VM_MAP_PAGE_SHIFT(map), (uint64_t)offset, (uint64_t)size, (uint64_t)overmap_start, (uint64_t)overmap_end, (uint64_t)trimmed_start, (uint64_t)copy_map->size, (uint64_t)target_copy_map->size); + } + *map_size = target_copy_map->size; + if (target_copy_map != copy_map) { + vm_map_copy_discard(target_copy_map); + } + target_copy_map = VM_MAP_COPY_NULL; + } + named_entry_unlock(mem_entry); + return kr; +} + /* * mach_memory_entry_port_release: * @@ -3933,9 +3666,11 @@ mach_destroy_memory_entry( vm_map_deallocate(named_entry->backing.map); } else if (named_entry->is_copy) { vm_map_copy_discard(named_entry->backing.copy); + } else if (named_entry->is_object) { + assert(named_entry->backing.copy->cpy_hdr.nentries == 1); + vm_map_copy_discard(named_entry->backing.copy); } else { - /* release the VM object we've been pointing to */ - vm_object_deallocate(named_entry->backing.object); + assert(named_entry->backing.copy == VM_MAP_COPY_NULL); } named_entry_unlock(named_entry); @@ -3986,7 +3721,8 @@ mach_memory_entry_page_op( return KERN_INVALID_ARGUMENT; } - object = mem_entry->backing.object; + assert(mem_entry->is_object); + object = vm_named_entry_to_vm_object(mem_entry); if (object == VM_OBJECT_NULL) { named_entry_unlock(mem_entry); return KERN_INVALID_ARGUMENT; @@ -4039,7 +3775,8 @@ mach_memory_entry_range_op( return KERN_INVALID_ARGUMENT; } - object = mem_entry->backing.object; + assert(mem_entry->is_object); + object = vm_named_entry_to_vm_object(mem_entry); if (object == VM_OBJECT_NULL) { named_entry_unlock(mem_entry); return KERN_INVALID_ARGUMENT; @@ -4394,7 +4131,8 @@ kernel_object_iopl_request( /* object cannot be mapped until it is ready */ /* we can therefore avoid the ready check */ /* in this case. */ - object = named_entry->backing.object; + assert(named_entry->is_object); + object = vm_named_entry_to_vm_object(named_entry); vm_object_reference(object); named_entry_unlock(named_entry); diff --git a/osfmk/voucher/Makefile b/osfmk/voucher/Makefile index 46c5051e7..de7454138 100644 --- a/osfmk/voucher/Makefile +++ b/osfmk/voucher/Makefile @@ -52,7 +52,7 @@ ${MIGINCLUDES} : ${MIG_TYPES} ${MIG_UUHDRS} : \ %.h : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) "$@" $(_v)$(MIG) $(MIGFLAGS) \ -server /dev/null \ -user /dev/null \ @@ -61,7 +61,7 @@ ${MIG_UUHDRS} : \ ${MIG_USHDRS} : \ %_server.h : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) "$@" $(_v)$(MIG) $(MIGFLAGS) \ -server /dev/null \ -user /dev/null \ @@ -97,7 +97,7 @@ ${COMP_FILES} : ${MIG_TYPES} ${MIG_KUSRC} : \ %_user.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) "$@" $(_v)${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ -user $*_user.c \ -header $*.h \ @@ -107,7 +107,7 @@ ${MIG_KUSRC} : \ ${MIG_KSSRC}: \ %_server.c : %.defs - $(call makelog,$(ColorM)MIG$(Color0) $(ColorF)$@$(Color0)) + @$(LOG_MIG) "$@" $(_v)${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ -user /dev/null \ -header /dev/null \ diff --git a/osfmk/voucher/ipc_pthread_priority.c b/osfmk/voucher/ipc_pthread_priority.c index 22118c325..6b2c6a6d3 100644 --- a/osfmk/voucher/ipc_pthread_priority.c +++ b/osfmk/voucher/ipc_pthread_priority.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include diff --git a/osfmk/x86_64/copyio.c b/osfmk/x86_64/copyio.c index 5482ff39b..9569ed36c 100644 --- a/osfmk/x86_64/copyio.c +++ b/osfmk/x86_64/copyio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2016 Apple Inc. All rights reserved. + * Copyright (c) 2009-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -44,6 +44,7 @@ #include #include +#include #undef copyin #undef copyout @@ -85,9 +86,6 @@ extern int _copyin_atomic64(const char *src, uint64_t *dst); extern int _copyout_atomic32(const uint32_t *u32, char *src); extern int _copyout_atomic64(const uint64_t *u64, char *src); -/* On by default, optionally disabled by boot-arg */ -extern boolean_t copyio_zalloc_check; - /* * Types of copies: */ @@ -201,9 +199,15 @@ copyio(int copy_type, user_addr_t user_addr, char *kernel_addr, if (__improbable((vm_offset_t)kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) { panic("Invalid copy parameter, copy type: %d, kernel address: %p", copy_type, kernel_addr); } - if (__probable(copyio_zalloc_check)) { - kernel_buf_size = zone_element_size(kernel_addr, NULL); - if (__improbable(kernel_buf_size && kernel_buf_size < nbytes)) { + if (__probable(!zalloc_disable_copyio_check)) { + zone_t src_zone = NULL; + kernel_buf_size = zone_element_size(kernel_addr, &src_zone); + /* + * Size of elements in the permanent zone is not saved as a part of the + * zone's info + */ + if (__improbable(src_zone && !src_zone->permanent && + kernel_buf_size < nbytes)) { panic("copyio: kernel buffer %p has size %lu < nbytes %lu", kernel_addr, kernel_buf_size, nbytes); } } diff --git a/osfmk/x86_64/dwarf_unwind.h b/osfmk/x86_64/dwarf_unwind.h new file mode 100644 index 000000000..2abd407a0 --- /dev/null +++ b/osfmk/x86_64/dwarf_unwind.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _X86_64_DWARF_UNWIND_H_ +#define _X86_64_DWARF_UNWIND_H_ + +/* + * This file contains the architecture specific DWARF definitions needed for unwind + * information added to trap handlers. + */ + +/* DWARF Register numbers for x86 */ + +#define DWARF_RAX 0 +#define DWARF_RDX 1 +#define DWARF_RCX 2 +#define DWARF_RBX 3 +#define DWARF_RSI 4 +#define DWARF_RDI 5 +#define DWARF_RBP 6 +#define DWARF_RSP 7 +#define DWARF_R8 8 +#define DWARF_R9 9 +#define DWARF_R10 10 +#define DWARF_R11 11 +#define DWARF_R12 12 +#define DWARF_R13 13 +#define DWARF_R14 14 +#define DWARF_R15 15 +#define DWARF_RIP 16 + +/* Dwarf opcodes */ + +#define DW_OP_breg15 0x7f +#define DW_CFA_expression 0x10 + +/* Convenient DWARF expression macros */ + +#define DW_FORM_LEN_TWO_BYTE_SLEB 3 +#define DW_FORM_LEN_ONE_BYTE_SLEB 2 + +/* Additional constants for register offsets in the saved state that need to be expressed as SLEB128 */ + +#define R64_RAX_SLEB128 0x88, 0x01 +#define R64_RCX_SLEB128 0x80, 0x01 +#define R64_RBX_SLEB128 0xf8, 0x00 +#define R64_RBP_SLEB128 0xf0, 0x00 +#define R64_RSP_SLEB128 0xd0, 0x01 +#define R64_R11_SLEB128 0xe8, 0x00 +#define R64_R12_SLEB128 0xe0, 0x00 +#define R64_R13_SLEB128 0xd8, 0x00 +#define R64_R14_SLEB128 0xd0, 0x00 +#define R64_R15_SLEB128 0xc8, 0x00 +#define R64_RIP_SLEB128 0xb8, 0x01 + +/* The actual unwind directives added to trap handlers to let the debugger know where the register state is stored */ + +/* Unwind Prologue added to each function to indicate the start of the unwind information. */ + +#define UNWIND_PROLOGUE \ +.cfi_sections .eh_frame ;\ +.cfi_startproc; ;\ +.cfi_signal_frame ;\ + + +/* Unwind Epilogue added to each function to indicate the end of the unwind information */ + +#define UNWIND_EPILOGUE .cfi_endproc + + +#define UNWIND_DIRECTIVES \ +.cfi_escape DW_CFA_expression, DWARF_RAX, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg15, R64_RAX_SLEB128 ;\ +.cfi_escape DW_CFA_expression, DWARF_RDX, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg15, R64_RDX ;\ +.cfi_escape DW_CFA_expression, DWARF_RCX, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg15, R64_RCX_SLEB128 ;\ +.cfi_escape DW_CFA_expression, DWARF_RBX, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg15, R64_RBX_SLEB128 ;\ +.cfi_escape DW_CFA_expression, DWARF_RSI, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg15, R64_RSI ;\ +.cfi_escape DW_CFA_expression, DWARF_RDI, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg15, R64_RDI ;\ +.cfi_escape DW_CFA_expression, DWARF_RBP, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg15, R64_RBP_SLEB128 ;\ +.cfi_escape DW_CFA_expression, DWARF_RSP, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg15, R64_RSP_SLEB128 ;\ +.cfi_escape DW_CFA_expression, DWARF_R8, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg15, R64_R8 ;\ +.cfi_escape DW_CFA_expression, DWARF_R9, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg15, R64_R9 ;\ +.cfi_escape DW_CFA_expression, DWARF_R10, DW_FORM_LEN_ONE_BYTE_SLEB, DW_OP_breg15, R64_R10 ;\ +.cfi_escape DW_CFA_expression, DWARF_R11, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg15, R64_R11_SLEB128 ;\ +.cfi_escape DW_CFA_expression, DWARF_R12, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg15, R64_R12_SLEB128 ;\ +.cfi_escape DW_CFA_expression, DWARF_R13, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg15, R64_R13_SLEB128 ;\ +.cfi_escape DW_CFA_expression, DWARF_R14, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg15, R64_R14_SLEB128 ;\ +.cfi_escape DW_CFA_expression, DWARF_R15, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg15, R64_R15_SLEB128 ;\ +.cfi_escape DW_CFA_expression, DWARF_RIP, DW_FORM_LEN_TWO_BYTE_SLEB, DW_OP_breg15, R64_RIP_SLEB128 ;\ + +#endif /* _X86_64_DWARF_UNWIND_H_ */ diff --git a/osfmk/x86_64/idt64.s b/osfmk/x86_64/idt64.s index d54c1c095..2c2fc4926 100644 --- a/osfmk/x86_64/idt64.s +++ b/osfmk/x86_64/idt64.s @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2019 Apple Inc. All rights reserved. + * Copyright (c) 2010-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -28,6 +28,7 @@ #include #include #include +#include "dwarf_unwind.h" #include #include #include @@ -79,6 +80,7 @@ #define HNDL_DOUBLE_FAULT 7 #define HNDL_MACHINE_CHECK 8 + /* Begin double-mapped descriptor section */ .section __HIB, __desc @@ -159,8 +161,70 @@ EXT(idt64_hndl_table1): Entry(idt64_page_fault) pushq $(HNDL_ALLTRAPS) - push $(T_PAGE_FAULT) +#if !(DEVELOPMENT || DEBUG) + pushq $(T_PAGE_FAULT) jmp L_dispatch +#else + pushq $(T_PAGE_FAULT) + + pushq %rax + pushq %rbx + pushq %rcx + testb $3, 8+8+8+ISF64_CS(%rsp) /* Coming from userspace? */ + jz L_pfkern /* No? (relatively uncommon), goto L_pfkern */ + + /* + * We faulted from the user; if the fault address is at the user's %rip, + * abort trying to save the cacheline since that adds another page fault's + * overhead when we recover, below. + */ + movq 8+8+8+ISF64_RIP(%rsp), %rbx + movq %cr2, %rcx + cmpq %rbx, %rcx + + /* note that the next 3 instructions do not affect RFLAGS */ + swapgs + leaq EXT(idt64_hndl_table0)(%rip), %rax + mov 16(%rax), %rax /* Offset of per-CPU shadow */ + + jne L_dispatch_from_user_with_rbx_rcx_pushes + jmp abort_rip_cacheline_read + +L_pfkern: + /* + * Kernel page fault + * If the fault occurred on while reading from the user's code cache line, abort the cache line read; + * otherwise, treat this as a regular kernel fault + */ + movq 8+8+8+ISF64_RIP(%rsp), %rbx + leaq rip_cacheline_read(%rip), %rcx + cmpq %rcx, %rbx + jb regular_kernel_page_fault + leaq rip_cacheline_read_end(%rip), %rcx + cmpq %rcx, %rbx + jbe L_pf_on_clread /* Did we hit a #PF within the cacheline read? */ + +regular_kernel_page_fault: + /* No, regular kernel #PF */ + popq %rcx + popq %rbx + jmp L_dispatch_from_kernel_no_push_rax + +L_pf_on_clread: + /* + * We faulted while trying to read user instruction memory at the parent fault's %rip; abort that action by + * changing the return address on the stack, restoring cr2 to its previous value, peeling off the pushes we + * added on entry to the page fault handler, then performing an iretq + */ + popq %rcx + movq %rcx, %cr2 + popq %rbx + leaq abort_rip_cacheline_read(%rip), %rax + movq %rax, 8+ISF64_RIP(%rsp) + popq %rax + addq $24, %rsp /* pop the 2 pushes + the error code */ + iretq /* Resume previous trap/fault processing */ +#endif /* !(DEVELOPMENT || DEBUG) */ /* * #DB handler, which runs on IST1, will treat as spurious any #DB received while executing in the @@ -531,6 +595,85 @@ L_sysenter_continue: orl $(EFL_IF), ISF64_RFLAGS(%rsp) jmp L_u64bit_entry_check +#if DEVELOPMENT || DEBUG +do_cacheline_stash: + /* + * Copy the cache line that includes the user's EIP/RIP into the shadow cpu structure + * for later extraction/sanity-checking in user_trap(). + */ + + pushq %rbx + pushq %rcx +L_dispatch_from_user_with_rbx_rcx_pushes: + movq 8+8+8+ISF64_RIP(%rsp), %rbx + andq $-64, %rbx /* Round address to cacheline boundary */ + pushf + /* + * disable SMAP, if it's enabled (note that CLAC is present in BDW and later only, so we're + * using generic instructions here without checking whether the CPU supports SMAP first) + */ + orq $(1 << 18), (%rsp) + popf + /* + * Note that we only check for a faulting read on the first read, since if the first read + * succeeds, the rest of the cache line should also be readible since we are running with + * interrupts disabled here and a TLB invalidation cannot sneak in and pull the rug out. + */ + movq %cr2, %rcx /* stash the original %cr2 in case the first cacheline read triggers a #PF */ + /* This value of %cr2 is restored in the page fault handler if it detects */ + /* that the fault occurrent on the next instruction, so the original #PF can */ + /* continue to be handled without issue. */ +rip_cacheline_read: + mov (%rbx), %rcx + /* Note that CPU_RTIMES in the shadow cpu struct was just a convenient place to stash the cacheline */ + mov %rcx, %gs:CPU_RTIMES(%rax) + movq %cr2, %rcx + mov 8(%rbx), %rcx + mov %rcx, %gs:8+CPU_RTIMES(%rax) + movq %cr2, %rcx + mov 16(%rbx), %rcx + mov %rcx, %gs:16+CPU_RTIMES(%rax) + movq %cr2, %rcx + mov 24(%rbx), %rcx + mov %rcx, %gs:24+CPU_RTIMES(%rax) + movq %cr2, %rcx + mov 32(%rbx), %rcx + mov %rcx, %gs:32+CPU_RTIMES(%rax) + movq %cr2, %rcx + mov 40(%rbx), %rcx + mov %rcx, %gs:40+CPU_RTIMES(%rax) + movq %cr2, %rcx + mov 48(%rbx), %rcx + mov %rcx, %gs:48+CPU_RTIMES(%rax) + movq %cr2, %rcx +rip_cacheline_read_end: + mov 56(%rbx), %rcx + mov %rcx, %gs:56+CPU_RTIMES(%rax) + + pushf + andq $~(1 << 18), (%rsp) /* reenable SMAP */ + popf + + jmp cacheline_read_cleanup_stack + +abort_rip_cacheline_read: + pushf + andq $~(1 << 18), (%rsp) /* reenable SMAP */ + popf +abort_rip_cacheline_read_no_smap_reenable: + movl $0xdeadc0de, %ecx /* Write a sentinel so higher-level code knows this was aborted */ + shlq $32, %rcx + movl $0xbeefcafe, %ebx + orq %rbx, %rcx + movq %rcx, %gs:CPU_RTIMES(%rax) + movq %rcx, %gs:8+CPU_RTIMES(%rax) + +cacheline_read_cleanup_stack: + popq %rcx + popq %rbx + jmp L_dispatch_kgsb +#endif /* if DEVELOPMENT || DEBUG */ + /* * Common dispatch point. * Determine what mode has been interrupted and save state accordingly. @@ -546,9 +689,20 @@ L_dispatch: testb $3, 8+ISF64_CS(%rsp) jz 1f L_dispatch_from_user_no_push_rax: - swapgs + swapgs leaq EXT(idt64_hndl_table0)(%rip), %rax - mov 16(%rax), %rax + mov 16(%rax), %rax /* Offset of per-CPU shadow */ + +#if DEVELOPMENT || DEBUG + /* Stash the cacheline for #UD, #PF, and #GP */ + cmpl $(T_INVALID_OPCODE), 8+ISF64_TRAPNO(%rsp) + je do_cacheline_stash + cmpl $(T_PAGE_FAULT), 8+ISF64_TRAPNO(%rsp) + je do_cacheline_stash + cmpl $(T_GENERAL_PROTECTION), 8+ISF64_TRAPNO(%rsp) + je do_cacheline_stash +#endif + L_dispatch_kgsb: mov %gs:CPU_SHADOWTASK_CR3(%rax), %rax mov %rax, %cr3 @@ -1379,13 +1533,22 @@ Entry(hndl_alltraps) Entry(return_from_trap) movq %gs:CPU_ACTIVE_THREAD,%r15 /* Get current thread */ movl $-1, TH_IOTIER_OVERRIDE(%r15) /* Reset IO tier override to -1 before returning to userspace */ + cmpl $0, TH_RWLOCK_COUNT(%r15) /* Check if current thread has pending RW locks held */ jz 1f - xorq %rbp, %rbp /* clear framepointer */ - mov %r15, %rdi /* Set RDI to current thread */ + xorq %rbp, %rbp /* clear framepointer */ + mov %r15, %rdi /* Set RDI to current thread */ CCALL(lck_rw_clear_promotions_x86) /* Clear promotions if needed */ 1: - movq TH_PCB_ISS(%r15), %r15 /* PCB stack */ + + cmpl $0, TH_TMP_ALLOC_CNT(%r15) /* Check if current thread has KHEAP_TEMP leaks */ + jz 1f + xorq %rbp, %rbp /* clear framepointer */ + mov %r15, %rdi /* Set RDI to current thread */ + CCALL(kheap_temp_leak_panic) +1: + + movq TH_PCB_ISS(%r15), %r15 /* PCB stack */ movl %gs:CPU_PENDING_AST,%eax testl %eax,%eax je EXT(return_to_user) /* branch if no AST */ @@ -1428,7 +1591,13 @@ L_return_from_trap_with_ast: * */ trap_from_kernel: + +UNWIND_PROLOGUE + movq %r15, %rdi /* saved state addr */ + +UNWIND_DIRECTIVES + pushq R64_RIP(%r15) /* Simulate a CALL from fault point */ pushq %rbp /* Extend framepointer chain */ movq %rsp, %rbp @@ -1459,7 +1628,8 @@ trap_from_kernel: mov %rsp, %r15 /* AST changes stack, saved state */ jmp ret_to_kernel - +UNWIND_EPILOGUE + /* * All interrupts on all tasks enter here with: * r15 x86_saved_state_t @@ -1471,6 +1641,9 @@ trap_from_kernel: * direction flag cleared */ Entry(hndl_allintrs) + +UNWIND_PROLOGUE + /* * test whether already on interrupt stack */ @@ -1490,6 +1663,8 @@ Entry(hndl_allintrs) pushq %rcx /* save pointer to old stack */ pushq %gs:CPU_INT_STATE /* save previous intr state */ movq %r15,%gs:CPU_INT_STATE /* set intr state */ + +UNWIND_DIRECTIVES TIME_INT_ENTRY /* do timing */ @@ -1502,6 +1677,8 @@ Entry(hndl_allintrs) incl %gs:CPU_INTERRUPT_LEVEL CCALL1(interrupt, %r15) /* call generic interrupt routine */ + +UNWIND_EPILOGUE .globl EXT(return_to_iret) LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */ @@ -1529,21 +1706,20 @@ LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */ mov %rax,%cr0 /* set cr0 */ 2: /* Load interrupted code segment into %eax */ - movl R32_CS(%r15),%eax /* assume 32-bit state */ - cmpl $(SS_64),SS_FLAVOR(%r15)/* 64-bit? */ + movl R64_CS(%r15),%eax /* assume 64-bit state */ + cmpl $(SS_32),SS_FLAVOR(%r15)/* 32-bit? */ #if DEBUG_IDT64 - jne 4f - movl R64_CS(%r15),%eax /* 64-bit user mode */ + jne 5f + movl R32_CS(%r15),%eax /* 32-bit user mode */ jmp 3f -4: - cmpl $(SS_32),SS_FLAVOR(%r15) +5: + cmpl $(SS_64),SS_FLAVOR(%r15) je 3f POSTCODE2(0x6431) CCALL1(panic_idt64, %r15) hlt #else - jne 3f - movl R64_CS(%r15),%eax /* 64-bit user mode */ + je 4f #endif 3: testb $3,%al /* user mode, */ @@ -1569,6 +1745,9 @@ LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */ mov %rsp, %r15 /* AST changes stack, saved state */ jmp ret_to_kernel +4: + movl R32_CS(%r15),%eax /* 32-bit user mode */ + jmp 3b /* diff --git a/osfmk/x86_64/kpc_x86.c b/osfmk/x86_64/kpc_x86.c index 08fd380f2..5dc851801 100644 --- a/osfmk/x86_64/kpc_x86.c +++ b/osfmk/x86_64/kpc_x86.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -49,6 +48,9 @@ #define IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS (0x333) #define IA32_FIXED_CTR_ENABLE_ALL_PMI (0x888) +#define IA32_PERFEVT_USER_EN (0x10000) +#define IA32_PERFEVT_OS_EN (0x20000) + #define IA32_PERFEVTSEL_PMI (1ull << 20) #define IA32_PERFEVTSEL_EN (1ull << 22) @@ -456,7 +458,7 @@ kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) enabled = ml_set_interrupts_enabled(FALSE); if (curcpu) { - *curcpu = current_processor()->cpu_id; + *curcpu = cpu_number(); } mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_get_curcpu_counters_mp_call, &hdl); @@ -598,7 +600,53 @@ kpc_set_config_arch(struct kpc_config_remote *mp_config) return 0; } -/* PMI stuff */ +static uintptr_t +get_interrupted_pc(bool *kernel_out) +{ + x86_saved_state_t *state = current_cpu_datap()->cpu_int_state; + if (!state) { + return 0; + } + + bool state_64 = is_saved_state64(state); + uint64_t cs; + if (state_64) { + cs = saved_state64(state)->isf.cs; + } else { + cs = saved_state32(state)->cs; + } + bool kernel = (cs & SEL_PL) != SEL_PL_U; + *kernel_out = kernel; + + uintptr_t pc = 0; + if (state_64) { + pc = saved_state64(state)->isf.rip; + } else { + pc = saved_state32(state)->eip; + } + if (kernel) { + pc = VM_KERNEL_UNSLIDE(pc); + } + return pc; +} + +static void +kpc_sample_kperf_x86(uint32_t ctr, uint64_t count, uint64_t config) +{ + uint32_t actionid = FIXED_ACTIONID(ctr); + bool kernel = false; + uintptr_t pc = get_interrupted_pc(&kernel); + kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0; + if ((config) & IA32_PERFEVT_USER_EN) { + flags |= KPC_USER_COUNTING; + } + if ((config) & IA32_PERFEVT_OS_EN) { + flags |= KPC_KERNEL_COUNTING; + } + kpc_sample_kperf(actionid, ctr, + config & 0xffff /* just the number and umask */, count, pc, flags); +} + void kpc_pmi_handler(void) { @@ -621,7 +669,7 @@ kpc_pmi_handler(void) BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, FIXED_ACTIONID(ctr)); if (FIXED_ACTIONID(ctr)) { - kpc_sample_kperf(FIXED_ACTIONID(ctr)); + kpc_sample_kperf_x86(ctr, FIXED_SHADOW(ctr) + extra, 0); } } } @@ -631,8 +679,8 @@ kpc_pmi_handler(void) if ((1ULL << ctr) & status) { extra = kpc_reload_configurable(ctr); - CONFIGURABLE_SHADOW(ctr) - += kpc_configurable_max() - CONFIGURABLE_RELOAD(ctr) + extra; + CONFIGURABLE_SHADOW(ctr) += kpc_configurable_max() - + CONFIGURABLE_RELOAD(ctr) + extra; /* kperf can grab the PMCs when it samples so we need to make sure the overflow * bits are in the correct state before the call to kperf_sample */ @@ -641,7 +689,9 @@ kpc_pmi_handler(void) BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, CONFIGURABLE_ACTIONID(ctr)); if (CONFIGURABLE_ACTIONID(ctr)) { - kpc_sample_kperf(CONFIGURABLE_ACTIONID(ctr)); + uint64_t config = IA32_PERFEVTSELx(ctr); + kpc_sample_kperf_x86(ctr + kpc_configurable_count(), + CONFIGURABLE_SHADOW(ctr) + extra, config); } } } diff --git a/osfmk/x86_64/loose_ends.c b/osfmk/x86_64/loose_ends.c index 807ecfc52..e9cd1f6d6 100644 --- a/osfmk/x86_64/loose_ends.c +++ b/osfmk/x86_64/loose_ends.c @@ -63,9 +63,11 @@ #include #include #include +#include #include #include #include +#include #include #include #include @@ -371,7 +373,7 @@ ml_phys_read_data(uint64_t paddr, int size) (void)ml_set_interrupts_enabled(istate); if (phyreadpanic && (machine_timeout_suspended() == FALSE)) { - panic_io_port_read(); + panic_notify(); panic("Read from physical addr 0x%llx took %llu ns, " "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", paddr, (eabs - sabs), result, sabs, eabs, @@ -521,7 +523,7 @@ ml_phys_write_data(uint64_t paddr, unsigned long long data, int size) (void)ml_set_interrupts_enabled(istate); if (phywritepanic && (machine_timeout_suspended() == FALSE)) { - panic_io_port_read(); + panic_notify(); panic("Write to physical addr 0x%llx took %llu ns, " "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu", paddr, (eabs - sabs), data, sabs, eabs, @@ -654,7 +656,7 @@ ml_port_io_read(uint16_t ioport, int size) (void)ml_set_interrupts_enabled(istate); if (phyreadpanic && (machine_timeout_suspended() == FALSE)) { - panic_io_port_read(); + panic_notify(); panic("Read from IO port 0x%x took %llu ns, " "result: 0x%x (start: %llu, end: %llu), ceiling: %llu", ioport, (eabs - sabs), result, sabs, eabs, @@ -725,7 +727,7 @@ ml_port_io_write(uint16_t ioport, uint32_t val, int size) (void)ml_set_interrupts_enabled(istate); if (phywritepanic && (machine_timeout_suspended() == FALSE)) { - panic_io_port_read(); + panic_notify(); panic("Write to IO port 0x%x took %llu ns, val: 0x%x" " (start: %llu, end: %llu), ceiling: %llu", ioport, (eabs - sabs), val, sabs, eabs, @@ -841,13 +843,22 @@ bcmp( return 0; } - do{ + do { if (*a++ != *b++) { break; } } while (--len); - return (int)len; + /* + * Check for the overflow case but continue to handle the non-overflow + * case the same way just in case someone is using the return value + * as more than zero/non-zero + */ + if (__improbable(!(len & 0x00000000FFFFFFFFULL) && (len & 0xFFFFFFFF00000000ULL))) { + return 0xFFFFFFFF; + } else { + return (int)len; + } } #undef memcmp @@ -866,6 +877,45 @@ memcmp(const void *s1, const void *s2, size_t n) return 0; } +unsigned long +memcmp_zero_ptr_aligned(const void *addr, size_t size) +{ + const uint64_t *p = (const uint64_t *)addr; + uint64_t a = p[0]; + + static_assert(sizeof(unsigned long) == sizeof(uint64_t)); + + if (size < 4 * sizeof(uint64_t)) { + if (size > 1 * sizeof(uint64_t)) { + a |= p[1]; + if (size > 2 * sizeof(uint64_t)) { + a |= p[2]; + } + } + } else { + size_t count = size / sizeof(uint64_t); + uint64_t b = p[1]; + uint64_t c = p[2]; + uint64_t d = p[3]; + + /* + * note: for sizes not a multiple of 32 bytes, this will load + * the bytes [size % 32 .. 32) twice which is ok + */ + while (count > 4) { + count -= 4; + a |= p[count + 0]; + b |= p[count + 1]; + c |= p[count + 2]; + d |= p[count + 3]; + } + + a |= b | c | d; + } + + return a; +} + #undef memmove void * memmove(void *dst, const void *src, size_t ulen) @@ -1038,3 +1088,77 @@ host_vmxoff(void) return; } #endif + +static lck_grp_t xcpm_lck_grp; +static lck_grp_attr_t xcpm_lck_grp_attr; +static lck_attr_t xcpm_lck_attr; +static lck_spin_t xcpm_lock; + +void xcpm_bootstrap(void); +void xcpm_mbox_lock(void); +void xcpm_mbox_unlock(void); +uint32_t xcpm_bios_mbox_cmd_read(uint32_t cmd); +uint32_t xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd); +void xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data); +boolean_t xcpm_is_hwp_enabled(void); + +void +xcpm_bootstrap(void) +{ + lck_grp_attr_setdefault(&xcpm_lck_grp_attr); + lck_grp_init(&xcpm_lck_grp, "xcpm", &xcpm_lck_grp_attr); + lck_attr_setdefault(&xcpm_lck_attr); + lck_spin_init(&xcpm_lock, &xcpm_lck_grp, &xcpm_lck_attr); +} + +void +xcpm_mbox_lock(void) +{ + lck_spin_lock(&xcpm_lock); +} + +void +xcpm_mbox_unlock(void) +{ + lck_spin_unlock(&xcpm_lock); +} + +static uint32_t __xcpm_state[64] = {}; + +uint32_t +xcpm_bios_mbox_cmd_read(uint32_t cmd) +{ + uint32_t reg; + boolean_t istate = ml_set_interrupts_enabled(FALSE); + xcpm_mbox_lock(); + reg = xcpm_bios_mbox_cmd_unsafe_read(cmd); + xcpm_mbox_unlock(); + ml_set_interrupts_enabled(istate); + return reg; +} + +uint32_t +xcpm_bios_mbox_cmd_unsafe_read(uint32_t cmd) +{ + return __xcpm_state[cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0]))]; +} + +void +xcpm_bios_mbox_cmd_write(uint32_t cmd, uint32_t data) +{ + uint32_t idx = cmd % (sizeof(__xcpm_state) / sizeof(__xcpm_state[0])); + idx &= ~0x1; + + boolean_t istate = ml_set_interrupts_enabled(FALSE); + xcpm_mbox_lock(); + __xcpm_state[idx] = data; + xcpm_mbox_unlock(); + ml_set_interrupts_enabled(istate); +} + +boolean_t +xcpm_is_hwp_enabled(void) +{ + return FALSE; +} + diff --git a/osfmk/x86_64/machine_remote_time.c b/osfmk/x86_64/machine_remote_time.c index 6172f3181..c9ebc7355 100644 --- a/osfmk/x86_64/machine_remote_time.c +++ b/osfmk/x86_64/machine_remote_time.c @@ -30,12 +30,11 @@ #include #include #include +#include void mach_bridge_send_timestamp(uint64_t timestamp); extern _Atomic uint32_t bt_init_flag; -extern lck_spin_t *bt_maintenance_lock; -extern void mach_bridge_timer_init(void); extern uint32_t bt_enable_flag; /* @@ -56,22 +55,21 @@ mach_bridge_register_regwrite_timestamp_callback(mach_bridge_regwrite_timestamp_ static uint64_t delay_amount = 0; if (!os_atomic_load(&bt_init_flag, relaxed)) { - mach_bridge_timer_init(); nanoseconds_to_absolutetime(DELAY_INTERVAL_NS, &delay_amount); os_atomic_store(&bt_init_flag, 1, release); } - lck_spin_lock(bt_maintenance_lock); + lck_spin_lock(&bt_maintenance_lock); bridge_regwrite_timestamp_callback = func; bt_enable_flag = (func != NULL) ? 1 : 0; bt_delay_timestamp = mach_absolute_time() + delay_amount; - lck_spin_unlock(bt_maintenance_lock); + lck_spin_unlock(&bt_maintenance_lock); } void mach_bridge_send_timestamp(uint64_t timestamp) { - LCK_SPIN_ASSERT(bt_maintenance_lock, LCK_ASSERT_OWNED); + LCK_SPIN_ASSERT(&bt_maintenance_lock, LCK_ASSERT_OWNED); if (bt_delay_timestamp > 0) { uint64_t now = mach_absolute_time(); diff --git a/osfmk/x86_64/pmap.c b/osfmk/x86_64/pmap.c index 87298757b..7291e516a 100644 --- a/osfmk/x86_64/pmap.c +++ b/osfmk/x86_64/pmap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2019 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -102,7 +102,6 @@ #include #include -#include #include #include @@ -192,13 +191,12 @@ uint32_t npvhashmask = 0, npvhashbuckets = 0; pv_hashed_entry_t pv_hashed_free_list = PV_HASHED_ENTRY_NULL; pv_hashed_entry_t pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL; -decl_simple_lock_data(, pv_hashed_free_list_lock); -decl_simple_lock_data(, pv_hashed_kern_free_list_lock); -decl_simple_lock_data(, pv_hash_table_lock); +SIMPLE_LOCK_DECLARE(pv_hashed_free_list_lock, 0); +SIMPLE_LOCK_DECLARE(pv_hashed_kern_free_list_lock, 0); +SIMPLE_LOCK_DECLARE(pv_hash_table_lock, 0); +SIMPLE_LOCK_DECLARE(phys_backup_lock, 0); -decl_simple_lock_data(, phys_backup_lock); - -zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry structures */ +SECURITY_READ_ONLY_LATE(zone_t) pv_hashed_list_zone; /* zone of pv_hashed_entry structures */ /* * First and last physical addresses that we maintain any information @@ -207,9 +205,9 @@ zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry structures */ */ boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */ -static struct vm_object kptobj_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -static struct vm_object kpml4obj_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -static struct vm_object kpdptobj_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +static struct vm_object kptobj_object_store VM_PAGE_PACKED_ALIGNED; +static struct vm_object kpml4obj_object_store VM_PAGE_PACKED_ALIGNED; +static struct vm_object kpdptobj_object_store VM_PAGE_PACKED_ALIGNED; /* * Array of physical page attribites for managed pages. @@ -230,17 +228,14 @@ pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE]; struct pmap kernel_pmap_store; SECURITY_READ_ONLY_LATE(pmap_t) kernel_pmap = NULL; - -struct zone *pmap_zone; /* zone of pmap structures */ - -struct zone *pmap_anchor_zone; -struct zone *pmap_uanchor_zone; +SECURITY_READ_ONLY_LATE(zone_t) pmap_zone; /* zone of pmap structures */ +SECURITY_READ_ONLY_LATE(zone_t) pmap_anchor_zone; +SECURITY_READ_ONLY_LATE(zone_t) pmap_uanchor_zone; int pmap_debug = 0; /* flag for debugging prints */ unsigned int inuse_ptepages_count = 0; long long alloc_ptepages_count __attribute__((aligned(8))) = 0; /* aligned for atomic access */ unsigned int bootstrap_wired_pages = 0; -int pt_fake_zone_index = -1; extern long NMIPI_acks; @@ -380,9 +375,8 @@ pmap_scale_shift(void) return scale; } -lck_grp_t pmap_lck_grp; -lck_grp_attr_t pmap_lck_grp_attr; -lck_attr_t pmap_lck_rw_attr; +LCK_GRP_DECLARE(pmap_lck_grp, "pmap"); +LCK_ATTR_DECLARE(pmap_lck_rw_attr, 0, LCK_ATTR_DEBUG); /* * Bootstrap the system enough to run with virtual memory. @@ -395,10 +389,6 @@ pmap_bootstrap( __unused vm_offset_t load_start, __unused boolean_t IA32e) { -#if NCOPY_WINDOWS > 0 - vm_offset_t va; - int i; -#endif assert(IA32e); vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address @@ -434,38 +424,6 @@ pmap_bootstrap( virtual_avail = (vm_offset_t)(VM_MIN_KERNEL_ADDRESS) + (vm_offset_t)first_avail; virtual_end = (vm_offset_t)(VM_MAX_KERNEL_ADDRESS); -#if NCOPY_WINDOWS > 0 - /* - * Reserve some special page table entries/VA space for temporary - * mapping of pages. - */ -#define SYSMAP(c, p, v, n) \ - v = (c)va; va += ((n)*INTEL_PGBYTES); - - va = virtual_avail; - - for (i = 0; i < PMAP_NWINDOWS; i++) { -#if 1 - kprintf("trying to do SYSMAP idx %d %p\n", i, - current_cpu_datap()); - kprintf("cpu_pmap %p\n", current_cpu_datap()->cpu_pmap); - kprintf("mapwindow %p\n", current_cpu_datap()->cpu_pmap->mapwindow); - kprintf("two stuff %p %p\n", - (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP), - (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR)); -#endif - SYSMAP(caddr_t, - (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP), - (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR), - 1); - current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = - &(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP_store); - *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0; - } - - - virtual_avail = va; -#endif if (!PE_parse_boot_argn("npvhash", &npvhashmask, sizeof(npvhashmask))) { npvhashmask = ((NPVHASHBUCKETS) << pmap_scale_shift()) - 1; } @@ -477,20 +435,9 @@ pmap_bootstrap( "using default %d\n", npvhashmask, NPVHASHMASK); } - lck_grp_attr_setdefault(&pmap_lck_grp_attr); - lck_grp_init(&pmap_lck_grp, "pmap", &pmap_lck_grp_attr); - - lck_attr_setdefault(&pmap_lck_rw_attr); - lck_attr_cleardebug(&pmap_lck_rw_attr); - lck_rw_init(&kernel_pmap->pmap_rwl, &pmap_lck_grp, &pmap_lck_rw_attr); kernel_pmap->pmap_rwl.lck_rw_can_sleep = FALSE; - simple_lock_init(&pv_hashed_free_list_lock, 0); - simple_lock_init(&pv_hashed_kern_free_list_lock, 0); - simple_lock_init(&pv_hash_table_lock, 0); - simple_lock_init(&phys_backup_lock, 0); - pmap_cpu_init(); if (pmap_pcid_ncpus) { @@ -579,17 +526,14 @@ pmap_virtual_space( #if HIBERNATION #include +#include int32_t pmap_npages; int32_t pmap_teardown_last_valid_compact_indx = -1; - -void hibernate_rebuild_pmap_structs(void); -void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *); void pmap_pack_index(uint32_t); int32_t pmap_unpack_index(pv_rooted_entry_t); - int32_t pmap_unpack_index(pv_rooted_entry_t pv_h) { @@ -622,7 +566,7 @@ pmap_pack_index(uint32_t indx) void -hibernate_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end) +pal_hib_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end) { int32_t i; int32_t compact_target_indx; @@ -655,12 +599,12 @@ hibernate_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end *unneeded_start = (addr64_t)&pv_head_table[pmap_teardown_last_valid_compact_indx + 1]; *unneeded_end = (addr64_t)&pv_head_table[pmap_npages - 1]; - HIBLOG("hibernate_teardown_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx); + HIBLOG("pal_hib_teardown_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx); } void -hibernate_rebuild_pmap_structs(void) +pal_hib_rebuild_pmap_structs(void) { int32_t cindx, eindx, rindx = 0; pv_rooted_entry_t pv_h; @@ -675,7 +619,7 @@ hibernate_rebuild_pmap_structs(void) if (rindx != cindx) { /* - * this pv_rooted_entry_t was moved by hibernate_teardown_pmap_structs, + * this pv_rooted_entry_t was moved by pal_hib_teardown_pmap_structs, * so move it back to its real location */ pv_head_table[rindx] = pv_head_table[cindx]; @@ -694,7 +638,7 @@ hibernate_rebuild_pmap_structs(void) bzero((char *)&pv_head_table[0], rindx * sizeof(struct pv_rooted_entry)); } - HIBLOG("hibernate_rebuild_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx); + HIBLOG("pal_hib_rebuild_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx); } #endif @@ -843,37 +787,28 @@ pmap_init(void) * Create the zone of physical maps, * and of the physical-to-virtual entries. */ - s = (vm_size_t) sizeof(struct pmap); - pmap_zone = zinit(s, 400 * s, 4096, "pmap"); /* XXX */ - zone_change(pmap_zone, Z_NOENCRYPT, TRUE); - - pmap_anchor_zone = zinit(PAGE_SIZE, task_max, PAGE_SIZE, "pagetable anchors"); - zone_change(pmap_anchor_zone, Z_NOENCRYPT, TRUE); + pmap_zone = zone_create_ext("pmap", sizeof(struct pmap), + ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM, ZONE_ID_PMAP, NULL); /* The anchor is required to be page aligned. Zone debugging adds * padding which may violate that requirement. Tell the zone * subsystem that alignment is required. */ + pmap_anchor_zone = zone_create("pagetable anchors", PAGE_SIZE, + ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED); - zone_change(pmap_anchor_zone, Z_ALIGNMENT_REQUIRED, TRUE); /* TODO: possible general optimisation...pre-allocate via zones commonly created * level3/2 pagetables */ - pmap_uanchor_zone = zinit(PAGE_SIZE, task_max, PAGE_SIZE, "pagetable user anchors"); - zone_change(pmap_uanchor_zone, Z_NOENCRYPT, TRUE); - /* The anchor is required to be page aligned. Zone debugging adds * padding which may violate that requirement. Tell the zone * subsystem that alignment is required. */ + pmap_uanchor_zone = zone_create("pagetable user anchors", PAGE_SIZE, + ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED); - zone_change(pmap_uanchor_zone, Z_ALIGNMENT_REQUIRED, TRUE); - - s = (vm_size_t) sizeof(struct pv_hashed_entry); - pv_hashed_list_zone = zinit(s, 10000 * s /* Expandable zone */, - 4096 * 3 /* LCM x86_64*/, "pv_list"); - zone_change(pv_hashed_list_zone, Z_NOENCRYPT, TRUE); - zone_change(pv_hashed_list_zone, Z_GZALLOC_EXEMPT, TRUE); + pv_hashed_list_zone = zone_create("pv_list", sizeof(struct pv_hashed_entry), + ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED); /* * Create pv entries for kernel pages that might get pmap_remove()ed. @@ -901,7 +836,6 @@ pmap_init(void) #endif /* CONFIG_VMX */ } -static void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolean_t ro) { @@ -909,6 +843,7 @@ pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolea pd_entry_t *pdep; pt_entry_t *ptep = NULL; + /* XXX what if nxrosz is 0? we end up marking the page whose address is passed in via sv -- is that kosher? */ assert(!is_ept_pmap(npmap)); assert(((sv & 0xFFFULL) | (nxrosz & 0xFFFULL)) == 0); @@ -917,11 +852,27 @@ pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolea uint64_t pdev = (cv & ~((uint64_t)PDEMASK)); if (*pdep & INTEL_PTE_PS) { +#ifdef REMAP_DEBUG + if ((NX ^ !!(*pdep & INTEL_PTE_NX)) || (ro ^ !!!(*pdep & INTEL_PTE_WRITE))) { + kprintf("WARNING: Remapping PDE for %p from %s%s%s to %s%s%s\n", (void *)cv, + (*pdep & INTEL_PTE_VALID) ? "R" : "", + (*pdep & INTEL_PTE_WRITE) ? "W" : "", + (*pdep & INTEL_PTE_NX) ? "" : "X", + "R", + ro ? "" : "W", + NX ? "" : "X"); + } +#endif + if (NX) { *pdep |= INTEL_PTE_NX; + } else { + *pdep &= ~INTEL_PTE_NX; } if (ro) { *pdep &= ~INTEL_PTE_WRITE; + } else { + *pdep |= INTEL_PTE_WRITE; } cv += NBPD; cv &= ~((uint64_t) PDEMASK); @@ -930,11 +881,26 @@ pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolea } for (ptep = pmap_pte(npmap, cv); ptep != NULL && (cv < (pdev + NBPD)) && (cv < ev);) { +#ifdef REMAP_DEBUG + if ((NX ^ !!(*ptep & INTEL_PTE_NX)) || (ro ^ !!!(*ptep & INTEL_PTE_WRITE))) { + kprintf("WARNING: Remapping PTE for %p from %s%s%s to %s%s%s\n", (void *)cv, + (*ptep & INTEL_PTE_VALID) ? "R" : "", + (*ptep & INTEL_PTE_WRITE) ? "W" : "", + (*ptep & INTEL_PTE_NX) ? "" : "X", + "R", + ro ? "" : "W", + NX ? "" : "X"); + } +#endif if (NX) { *ptep |= INTEL_PTE_NX; + } else { + *ptep &= ~INTEL_PTE_NX; } if (ro) { *ptep &= ~INTEL_PTE_WRITE; + } else { + *ptep |= INTEL_PTE_WRITE; } cv += NBPT; ptep = pmap_pte(npmap, cv); @@ -1201,16 +1167,24 @@ pmap_lowmem_finalize(void) kernel_segment_command_t * seg; kernel_section_t * sec; + kc_format_t kc_format; + + PE_get_primary_kc_format(&kc_format); for (seg = firstseg(); seg != NULL; seg = nextsegfromheader(&_mh_execute_header, seg)) { if (!strcmp(seg->segname, "__TEXT") || !strcmp(seg->segname, "__DATA")) { continue; } - //XXX - if (!strcmp(seg->segname, "__KLD")) { - continue; + + /* XXX: FIXME_IN_dyld: This is a workaround (see below) */ + if (kc_format != KCFormatFileset) { + //XXX + if (!strcmp(seg->segname, "__KLD")) { + continue; + } } + if (!strcmp(seg->segname, "__HIB")) { for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) { if (sec->addr & PAGE_MASK) { @@ -1223,7 +1197,41 @@ pmap_lowmem_finalize(void) } } } else { - pmap_mark_range(kernel_pmap, seg->vmaddr, round_page_64(seg->vmsize), TRUE, FALSE); + if (kc_format == KCFormatFileset) { +#if 0 + /* + * This block of code is commented out because it may or may not have induced an earlier panic + * in ledger init. + */ + + + boolean_t NXbit = !(seg->initprot & VM_PROT_EXECUTE), + robit = (seg->initprot & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ; + + /* + * XXX: FIXME_IN_dyld: This is a workaround for primary KC containing incorrect inaccurate + * initprot for segments containing code. + */ + if (!strcmp(seg->segname, "__KLD") || !strcmp(seg->segname, "__VECTORS")) { + NXbit = FALSE; + robit = FALSE; + } + + pmap_mark_range(kernel_pmap, seg->vmaddr & ~(uint64_t)PAGE_MASK, + round_page_64(seg->vmsize), NXbit, robit); +#endif + + /* + * XXX: We are marking *every* segment with rwx permissions as a workaround + * XXX: until the primary KC's kernel segments are page-aligned. + */ + kprintf("Marking (%p, %p) as rwx\n", (void *)(seg->vmaddr & ~(uint64_t)PAGE_MASK), + (void *)((seg->vmaddr & ~(uint64_t)PAGE_MASK) + round_page_64(seg->vmsize))); + pmap_mark_range(kernel_pmap, seg->vmaddr & ~(uint64_t)PAGE_MASK, + round_page_64(seg->vmsize), FALSE, FALSE); + } else { + pmap_mark_range(kernel_pmap, seg->vmaddr, round_page_64(seg->vmsize), TRUE, FALSE); + } } } @@ -1500,6 +1508,7 @@ pmap_create_options( p->pm_task_map = ((flags & PMAP_CREATE_64BIT) ? TASK_MAP_64BIT : TASK_MAP_32BIT); p->pagezero_accessible = FALSE; + p->pm_vm_map_cs_enforced = FALSE; if (pmap_pcid_ncpus) { pmap_pcid_initialize(p); @@ -1662,6 +1671,7 @@ pmap_destroy(pmap_t p) pmap_check_ledgers(p); ledger_dereference(p->ledger); + lck_rw_destroy(&p->pmap_rwl, &pmap_lck_grp); zfree(pmap_zone, p); PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END); @@ -2455,7 +2465,7 @@ pmap_list_resident_pages( #if CONFIG_COREDUMP /* temporary workaround */ boolean_t -coredumpok(__unused vm_map_t map, __unused vm_offset_t va) +coredumpok(__unused vm_map_t map, __unused mach_vm_offset_t va) { #if 0 pt_entry_t *ptep; @@ -2502,6 +2512,13 @@ pmap_switch(pmap_t tpmap) PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_END); } +void +pmap_require(pmap_t pmap) +{ + if (pmap != kernel_pmap) { + zone_id_require(ZONE_ID_PMAP, sizeof(struct pmap), pmap); + } +} /* * disable no-execute capability on @@ -2515,40 +2532,6 @@ pmap_disable_NX(__unused pmap_t pmap) #endif } -void -pt_fake_zone_init(int zone_index) -{ - pt_fake_zone_index = zone_index; -} - -void -pt_fake_zone_info( - int *count, - vm_size_t *cur_size, - vm_size_t *max_size, - vm_size_t *elem_size, - vm_size_t *alloc_size, - uint64_t *sum_size, - int *collectable, - int *exhaustable, - int *caller_acct) -{ - *count = inuse_ptepages_count; - *cur_size = PAGE_SIZE * inuse_ptepages_count; - *max_size = PAGE_SIZE * (inuse_ptepages_count + - vm_page_inactive_count + - vm_page_active_count + - vm_page_free_count); - *elem_size = PAGE_SIZE; - *alloc_size = PAGE_SIZE; - *sum_size = alloc_ptepages_count * PAGE_SIZE; - - *collectable = 1; - *exhaustable = 0; - *caller_acct = 1; -} - - void pmap_flush_context_init(pmap_flush_context *pfc) { @@ -3299,6 +3282,12 @@ pmap_in_ppl(void) return false; } +void +pmap_lockdown_image4_slab(__unused vm_offset_t slab, __unused vm_size_t slab_len, __unused uint64_t flags) +{ + // Unsupported on this architecture. +} + void * pmap_claim_reserved_ppl_page(void) { diff --git a/pexpert/arm/pe_consistent_debug.c b/pexpert/arm/pe_consistent_debug.c index 1fe7142ac..d43130ffe 100644 --- a/pexpert/arm/pe_consistent_debug.c +++ b/pexpert/arm/pe_consistent_debug.c @@ -74,12 +74,12 @@ int PE_consistent_debug_inherit(void) { DTEntry entryP; - uintptr_t *prop_data; + uintptr_t const *prop_data; uintptr_t root_pointer = 0; uint32_t size; - if ((DTLookupEntry(NULL, "/chosen", &entryP) == kSuccess)) { - if (DTGetProperty(entryP, "consistent-debug-root", (void **)&prop_data, &size) == kSuccess) { + if ((SecureDTLookupEntry(NULL, "/chosen", &entryP) == kSuccess)) { + if (SecureDTGetProperty(entryP, "consistent-debug-root", (void const **)&prop_data, &size) == kSuccess) { root_pointer = prop_data[0]; } } diff --git a/pexpert/arm/pe_identify_machine.c b/pexpert/arm/pe_identify_machine.c index b35a029e9..1ddd6fc65 100644 --- a/pexpert/arm/pe_identify_machine.c +++ b/pexpert/arm/pe_identify_machine.c @@ -47,7 +47,7 @@ pe_identify_machine(boot_args * bootArgs) OpaqueDTEntryIterator iter; DTEntry cpus, cpu; uint32_t mclk = 0, hclk = 0, pclk = 0, tclk = 0, use_dt = 0; - unsigned long *value; + unsigned long const *value; unsigned int size; int err; @@ -107,27 +107,27 @@ pe_identify_machine(boot_args * bootArgs) gPEClockFrequencyInfo.bus_clock_rate_hz = 100000000; gPEClockFrequencyInfo.cpu_clock_rate_hz = 400000000; - err = DTLookupEntry(NULL, "/cpus", &cpus); + err = SecureDTLookupEntry(NULL, "/cpus", &cpus); assert(err == kSuccess); - err = DTInitEntryIterator(cpus, &iter); + err = SecureDTInitEntryIterator(cpus, &iter); assert(err == kSuccess); - while (kSuccess == DTIterateEntries(&iter, &cpu)) { - if ((kSuccess != DTGetProperty(cpu, "state", (void **)&value, &size)) || - (strncmp((char*)value, "running", size) != 0)) { + while (kSuccess == SecureDTIterateEntries(&iter, &cpu)) { + if ((kSuccess != SecureDTGetProperty(cpu, "state", (void const **)&value, &size)) || + (strncmp((char const *)value, "running", size) != 0)) { continue; } /* Find the time base frequency first. */ - if (DTGetProperty(cpu, "timebase-frequency", (void **)&value, &size) == kSuccess) { + if (SecureDTGetProperty(cpu, "timebase-frequency", (void const **)&value, &size) == kSuccess) { /* * timebase_frequency_hz is only 32 bits, and * the device tree should never provide 64 * bits so this if should never be taken. */ if (size == 8) { - gPEClockFrequencyInfo.timebase_frequency_hz = *(unsigned long long *)value; + gPEClockFrequencyInfo.timebase_frequency_hz = *(unsigned long long const *)value; } else { gPEClockFrequencyInfo.timebase_frequency_hz = *value; } @@ -135,9 +135,9 @@ pe_identify_machine(boot_args * bootArgs) gPEClockFrequencyInfo.dec_clock_rate_hz = gPEClockFrequencyInfo.timebase_frequency_hz; /* Find the bus frequency next. */ - if (DTGetProperty(cpu, "bus-frequency", (void **)&value, &size) == kSuccess) { + if (SecureDTGetProperty(cpu, "bus-frequency", (void const **)&value, &size) == kSuccess) { if (size == 8) { - gPEClockFrequencyInfo.bus_frequency_hz = *(unsigned long long *)value; + gPEClockFrequencyInfo.bus_frequency_hz = *(unsigned long long const *)value; } else { gPEClockFrequencyInfo.bus_frequency_hz = *value; } @@ -152,9 +152,9 @@ pe_identify_machine(boot_args * bootArgs) } /* Find the memory frequency next. */ - if (DTGetProperty(cpu, "memory-frequency", (void **)&value, &size) == kSuccess) { + if (SecureDTGetProperty(cpu, "memory-frequency", (void const **)&value, &size) == kSuccess) { if (size == 8) { - gPEClockFrequencyInfo.mem_frequency_hz = *(unsigned long long *)value; + gPEClockFrequencyInfo.mem_frequency_hz = *(unsigned long long const *)value; } else { gPEClockFrequencyInfo.mem_frequency_hz = *value; } @@ -163,9 +163,9 @@ pe_identify_machine(boot_args * bootArgs) gPEClockFrequencyInfo.mem_frequency_max_hz = gPEClockFrequencyInfo.mem_frequency_hz; /* Find the peripheral frequency next. */ - if (DTGetProperty(cpu, "peripheral-frequency", (void **)&value, &size) == kSuccess) { + if (SecureDTGetProperty(cpu, "peripheral-frequency", (void const **)&value, &size) == kSuccess) { if (size == 8) { - gPEClockFrequencyInfo.prf_frequency_hz = *(unsigned long long *)value; + gPEClockFrequencyInfo.prf_frequency_hz = *(unsigned long long const *)value; } else { gPEClockFrequencyInfo.prf_frequency_hz = *value; } @@ -174,17 +174,17 @@ pe_identify_machine(boot_args * bootArgs) gPEClockFrequencyInfo.prf_frequency_max_hz = gPEClockFrequencyInfo.prf_frequency_hz; /* Find the fixed frequency next. */ - if (DTGetProperty(cpu, "fixed-frequency", (void **)&value, &size) == kSuccess) { + if (SecureDTGetProperty(cpu, "fixed-frequency", (void const **)&value, &size) == kSuccess) { if (size == 8) { - gPEClockFrequencyInfo.fix_frequency_hz = *(unsigned long long *)value; + gPEClockFrequencyInfo.fix_frequency_hz = *(unsigned long long const *)value; } else { gPEClockFrequencyInfo.fix_frequency_hz = *value; } } /* Find the cpu frequency last. */ - if (DTGetProperty(cpu, "clock-frequency", (void **)&value, &size) == kSuccess) { + if (SecureDTGetProperty(cpu, "clock-frequency", (void const **)&value, &size) == kSuccess) { if (size == 8) { - gPEClockFrequencyInfo.cpu_frequency_hz = *(unsigned long long *)value; + gPEClockFrequencyInfo.cpu_frequency_hz = *(unsigned long long const *)value; } else { gPEClockFrequencyInfo.cpu_frequency_hz = *value; } @@ -235,17 +235,17 @@ vm_offset_t pe_arm_get_soc_base_phys(void) { DTEntry entryP; - uintptr_t *ranges_prop; + uintptr_t const *ranges_prop; uint32_t prop_size; - char *tmpStr; + char const *tmpStr; - if (DTFindEntry("name", "arm-io", &entryP) == kSuccess) { + if (SecureDTFindEntry("name", "arm-io", &entryP) == kSuccess) { if (gPESoCDeviceType == 0) { - DTGetProperty(entryP, "device_type", (void **)&tmpStr, &prop_size); + SecureDTGetProperty(entryP, "device_type", (void const **)&tmpStr, &prop_size); strlcpy(gPESoCDeviceTypeBuffer, tmpStr, SOC_DEVICE_TYPE_BUFFER_SIZE); gPESoCDeviceType = gPESoCDeviceTypeBuffer; - DTGetProperty(entryP, "ranges", (void **)&ranges_prop, &prop_size); + SecureDTGetProperty(entryP, "ranges", (void const **)&ranges_prop, &prop_size); gPESoCBasePhys = *(ranges_prop + 1); } return gPESoCBasePhys; @@ -253,43 +253,8 @@ pe_arm_get_soc_base_phys(void) return 0; } -uint32_t -pe_arm_get_soc_revision(void) -{ - DTEntry entryP; - uint32_t *value; - uint32_t size; - - if ((DTFindEntry("name", "arm-io", &entryP) == kSuccess) - && (DTGetProperty(entryP, "chip-revision", (void **)&value, &size) == kSuccess)) { - if (size == 8) { - return (uint32_t)*(unsigned long long *)value; - } else { - return *value; - } - } - return 0; -} - - extern void fleh_fiq_generic(void); - -#if defined(ARM_BOARD_CLASS_T7000) -static struct tbd_ops t7000_funcs = {NULL, NULL, NULL}; -#endif /* defined(ARM_BOARD_CLASS_T7000) */ - -#if defined(ARM_BOARD_CLASS_S7002) -extern void fleh_fiq_s7002(void); -extern uint32_t s7002_get_decrementer(void); -extern void s7002_set_decrementer(uint32_t); -static struct tbd_ops s7002_funcs = {&fleh_fiq_s7002, &s7002_get_decrementer, &s7002_set_decrementer}; -#endif /* defined(ARM_BOARD_CLASS_S7002) */ - -#if defined(ARM_BOARD_CLASS_S8000) -static struct tbd_ops s8000_funcs = {NULL, NULL, NULL}; -#endif /* defined(ARM_BOARD_CLASS_T7000) */ - #if defined(ARM_BOARD_CLASS_T8002) extern void fleh_fiq_t8002(void); extern uint32_t t8002_get_decrementer(void); @@ -297,45 +262,6 @@ extern void t8002_set_decrementer(uint32_t); static struct tbd_ops t8002_funcs = {&fleh_fiq_t8002, &t8002_get_decrementer, &t8002_set_decrementer}; #endif /* defined(ARM_BOARD_CLASS_T8002) */ -#if defined(ARM_BOARD_CLASS_T8010) -static struct tbd_ops t8010_funcs = {NULL, NULL, NULL}; -#endif /* defined(ARM_BOARD_CLASS_T8010) */ - -#if defined(ARM_BOARD_CLASS_T8011) -static struct tbd_ops t8011_funcs = {NULL, NULL, NULL}; -#endif /* defined(ARM_BOARD_CLASS_T8011) */ - -#if defined(ARM_BOARD_CLASS_T8015) -static struct tbd_ops t8015_funcs = {NULL, NULL, NULL}; -#endif /* defined(ARM_BOARD_CLASS_T8015) */ - -#if defined(ARM_BOARD_CLASS_T8020) -static struct tbd_ops t8020_funcs = {NULL, NULL, NULL}; -#endif /* defined(ARM_BOARD_CLASS_T8020) */ - -#if defined(ARM_BOARD_CLASS_T8006) -static struct tbd_ops t8006_funcs = {NULL, NULL, NULL}; -#endif /* defined(ARM_BOARD_CLASS_T8006) */ - -#if defined(ARM_BOARD_CLASS_T8027) -static struct tbd_ops t8027_funcs = {NULL, NULL, NULL}; -#endif /* defined(ARM_BOARD_CLASS_T8027) */ - -#if defined(ARM_BOARD_CLASS_T8028) -static struct tbd_ops t8028_funcs = {NULL, NULL, NULL}; -#endif /* defined(ARM_BOARD_CLASS_T8028) */ - -#if defined(ARM_BOARD_CLASS_T8030) -static struct tbd_ops t8030_funcs = {NULL, NULL, NULL}; -#endif /* defined(ARM_BOARD_CLASS_T8030) */ - - - - -#if defined(ARM_BOARD_CLASS_BCM2837) -static struct tbd_ops bcm2837_funcs = {NULL, NULL, NULL}; -#endif /* defined(ARM_BOARD_CLASS_BCM2837) */ - vm_offset_t gPicBase; vm_offset_t gTimerBase; vm_offset_t gSocPhys; @@ -404,7 +330,7 @@ static int running_debug_command_on_cpu_number = -1; static void pe_init_debug_command(DTEntry entryP, command_buffer_element_t **command_buffer, const char* entry_name) { - uintptr_t *reg_prop; + uintptr_t const *reg_prop; uint32_t prop_size, reg_window_size = 0, command_starting_index; uintptr_t debug_reg_window = 0; @@ -412,7 +338,7 @@ pe_init_debug_command(DTEntry entryP, command_buffer_element_t **command_buffer, return; } - if (DTGetProperty(entryP, entry_name, (void **)®_prop, &prop_size) != kSuccess) { + if (SecureDTGetProperty(entryP, entry_name, (void const **)®_prop, &prop_size) != kSuccess) { panic("pe_init_debug_command: failed to read property %s\n", entry_name); } @@ -442,7 +368,7 @@ pe_init_debug_command(DTEntry entryP, command_buffer_element_t **command_buffer, panic("pe_init_debug_command: Command Offset is %lx, exceeds allocated size of %x\n", REGISTER_OFFSET(*reg_prop), reg_window_size ); } debug_command_buffer[next_command_buffer_entry].address = debug_reg_window + REGISTER_OFFSET(*reg_prop); - debug_command_buffer[next_command_buffer_entry].destination_cpu_selector = CPU_SELECTOR(*reg_prop); + debug_command_buffer[next_command_buffer_entry].destination_cpu_selector = (uint16_t)CPU_SELECTOR(*reg_prop); #if defined(__arm64__) debug_command_buffer[next_command_buffer_entry].delay_us = DELAY_US(*reg_prop); debug_command_buffer[next_command_buffer_entry].is_32bit = ((*reg_prop & REGISTER_32BIT_MASK) != 0); @@ -563,7 +489,7 @@ void pe_arm_init_debug(void *args) { DTEntry entryP; - uintptr_t *reg_prop; + uintptr_t const *reg_prop; uint32_t prop_size; if (gSocPhys == 0) { @@ -571,9 +497,9 @@ pe_arm_init_debug(void *args) return; } - if (DTFindEntry("device_type", "cpu-debug-interface", &entryP) == kSuccess) { + if (SecureDTFindEntry("device_type", "cpu-debug-interface", &entryP) == kSuccess) { if (args != NULL) { - if (DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size) == kSuccess) { + if (SecureDTGetProperty(entryP, "reg", (void const **)®_prop, &prop_size) == kSuccess) { ml_init_arm_debug_interface(args, ml_io_map(gSocPhys + *reg_prop, *(reg_prop + 1))); } #if DEVELOPMENT || DEBUG @@ -617,7 +543,7 @@ static uint32_t pe_arm_map_interrupt_controller(void) { DTEntry entryP; - uintptr_t *reg_prop; + uintptr_t const *reg_prop; uint32_t prop_size; vm_offset_t soc_phys = 0; @@ -629,9 +555,9 @@ pe_arm_map_interrupt_controller(void) return 0; } - if (DTFindEntry("interrupt-controller", "master", &entryP) == kSuccess) { + if (SecureDTFindEntry("interrupt-controller", "master", &entryP) == kSuccess) { kprintf("pe_arm_map_interrupt_controller: found interrupt-controller\n"); - DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); + SecureDTGetProperty(entryP, "reg", (void const **)®_prop, &prop_size); gPicBase = ml_io_map(soc_phys + *reg_prop, *(reg_prop + 1)); kprintf("pe_arm_map_interrupt_controller: gPicBase: 0x%lx\n", (unsigned long)gPicBase); } @@ -640,9 +566,9 @@ pe_arm_map_interrupt_controller(void) return 0; } - if (DTFindEntry("device_type", "timer", &entryP) == kSuccess) { + if (SecureDTFindEntry("device_type", "timer", &entryP) == kSuccess) { kprintf("pe_arm_map_interrupt_controller: found timer\n"); - DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); + SecureDTGetProperty(entryP, "reg", (void const **)®_prop, &prop_size); gTimerBase = ml_io_map(soc_phys + *reg_prop, *(reg_prop + 1)); kprintf("pe_arm_map_interrupt_controller: gTimerBase: 0x%lx\n", (unsigned long)gTimerBase); } @@ -678,6 +604,7 @@ pe_arm_init_timer(void *args) vm_offset_t eoi_addr = 0; uint32_t eoi_value = 0; struct tbd_ops generic_funcs = {&fleh_fiq_generic, NULL, NULL}; + struct tbd_ops empty_funcs __unused = {NULL, NULL, NULL}; tbd_ops_t tbd_funcs = &generic_funcs; /* The SoC headers expect to use pic_base, timer_base, etc... */ @@ -685,35 +612,6 @@ pe_arm_init_timer(void *args) timer_base = gTimerBase; soc_phys = gSocPhys; -#if defined(ARM_BOARD_CLASS_T7000) - if (!strcmp(gPESoCDeviceType, "t7000-io") || - !strcmp(gPESoCDeviceType, "t7001-io")) { - tbd_funcs = &t7000_funcs; - } else -#endif -#if defined(ARM_BOARD_CLASS_S7002) - if (!strcmp(gPESoCDeviceType, "s7002-io")) { -#ifdef ARM_BOARD_WFE_TIMEOUT_NS - // Enable the WFE Timer - rPMGR_EVENT_TMR_PERIOD = ((uint64_t)(ARM_BOARD_WFE_TIMEOUT_NS) *gPEClockFrequencyInfo.timebase_frequency_hz) / NSEC_PER_SEC; - rPMGR_EVENT_TMR = rPMGR_EVENT_TMR_PERIOD; - rPMGR_EVENT_TMR_CTL = PMGR_EVENT_TMR_CTL_EN; -#endif /* ARM_BOARD_WFE_TIMEOUT_NS */ - - rPMGR_INTERVAL_TMR = 0x7FFFFFFF; - rPMGR_INTERVAL_TMR_CTL = PMGR_INTERVAL_TMR_CTL_EN | PMGR_INTERVAL_TMR_CTL_CLR_INT; - - eoi_addr = timer_base; - eoi_value = PMGR_INTERVAL_TMR_CTL_EN | PMGR_INTERVAL_TMR_CTL_CLR_INT; - tbd_funcs = &s7002_funcs; - } else -#endif -#if defined(ARM_BOARD_CLASS_S8000) - if (!strcmp(gPESoCDeviceType, "s8000-io") || - !strcmp(gPESoCDeviceType, "s8001-io")) { - tbd_funcs = &s8000_funcs; - } else -#endif #if defined(ARM_BOARD_CLASS_T8002) if (!strcmp(gPESoCDeviceType, "t8002-io") || !strcmp(gPESoCDeviceType, "t8004-io")) { @@ -733,52 +631,11 @@ pe_arm_init_timer(void *args) tbd_funcs = &t8002_funcs; } else #endif -#if defined(ARM_BOARD_CLASS_T8010) - if (!strcmp(gPESoCDeviceType, "t8010-io")) { - tbd_funcs = &t8010_funcs; - } else -#endif -#if defined(ARM_BOARD_CLASS_T8011) - if (!strcmp(gPESoCDeviceType, "t8011-io")) { - tbd_funcs = &t8011_funcs; - } else -#endif -#if defined(ARM_BOARD_CLASS_T8015) - if (!strcmp(gPESoCDeviceType, "t8015-io")) { - tbd_funcs = &t8015_funcs; - } else -#endif -#if defined(ARM_BOARD_CLASS_T8020) - if (!strcmp(gPESoCDeviceType, "t8020-io")) { - tbd_funcs = &t8020_funcs; - } else -#endif -#if defined(ARM_BOARD_CLASS_T8006) - if (!strcmp(gPESoCDeviceType, "t8006-io")) { - tbd_funcs = &t8006_funcs; - } else -#endif -#if defined(ARM_BOARD_CLASS_T8027) - if (!strcmp(gPESoCDeviceType, "t8027-io")) { - tbd_funcs = &t8027_funcs; - } else -#endif -#if defined(ARM_BOARD_CLASS_T8028) - if (!strcmp(gPESoCDeviceType, "t8028-io")) { - tbd_funcs = &t8028_funcs; - } else -#endif -#if defined(ARM_BOARD_CLASS_T8030) - if (!strcmp(gPESoCDeviceType, "t8030-io")) { - tbd_funcs = &t8030_funcs; - } else -#endif -#if defined(ARM_BOARD_CLASS_BCM2837) - if (!strcmp(gPESoCDeviceType, "bcm2837-io")) { - tbd_funcs = &bcm2837_funcs; - } else -#endif +#if defined(__arm64__) + tbd_funcs = &empty_funcs; +#else return 0; +#endif if (args != NULL) { ml_init_timebase(args, tbd_funcs, eoi_addr, eoi_value); diff --git a/pexpert/arm/pe_init.c b/pexpert/arm/pe_init.c index fda4a4dbb..8ff54da6c 100644 --- a/pexpert/arm/pe_init.c +++ b/pexpert/arm/pe_init.c @@ -49,13 +49,13 @@ uint8_t gPlatformECID[8]; uint32_t gPlatformMemoryID; static boolean_t vc_progress_initialized = FALSE; uint64_t last_hwaccess_thread = 0; -char gTargetTypeBuffer[8]; +char gTargetTypeBuffer[16]; char gModelTypeBuffer[32]; /* Clock Frequency Info */ clock_frequency_info_t gPEClockFrequencyInfo; -vm_offset_t gPanicBase; +vm_offset_t gPanicBase = 0; unsigned int gPanicSize; struct embedded_panic_header *panic_info = NULL; @@ -90,8 +90,8 @@ check_for_panic_log(void) #else DTEntry entry, chosen; unsigned int size; - uintptr_t *reg_prop; - uint32_t *panic_region_length; + uintptr_t const *reg_prop; + uint32_t const *panic_region_length; /* * DT properties for the panic region are populated by UpdateDeviceTree() in iBoot: @@ -109,19 +109,19 @@ check_for_panic_log(void) * reg[1] is the size of iBoot's kMemoryRegion_Panic (not used) * embedded-panic-log-size is the maximum amount of data to store in the buffer */ - if (kSuccess != DTLookupEntry(0, "pram", &entry)) { + if (kSuccess != SecureDTLookupEntry(0, "pram", &entry)) { return; } - if (kSuccess != DTGetProperty(entry, "reg", (void **)®_prop, &size)) { + if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)®_prop, &size)) { return; } - if (kSuccess != DTLookupEntry(0, "/chosen", &chosen)) { + if (kSuccess != SecureDTLookupEntry(0, "/chosen", &chosen)) { return; } - if (kSuccess != DTGetProperty(chosen, "embedded-panic-log-size", (void **) &panic_region_length, &size)) { + if (kSuccess != SecureDTGetProperty(chosen, "embedded-panic-log-size", (void const **) &panic_region_length, &size)) { return; } @@ -228,7 +228,7 @@ PE_init_iokit(void) DTEntry entry; unsigned int size, scale; unsigned long display_size; - void **map; + void const * const *map; unsigned int show_progress; int *delta, image_size, flip; uint32_t start_time_value = 0; @@ -236,20 +236,19 @@ PE_init_iokit(void) uint32_t load_kernel_start_value = 0; uint32_t populate_registry_time_value = 0; - PE_init_kprintf(TRUE); PE_init_printf(TRUE); printf("iBoot version: %s\n", firmware_version); - if (kSuccess == DTLookupEntry(0, "/chosen/memory-map", &entry)) { - boot_progress_element *bootPict; + if (kSuccess == SecureDTLookupEntry(0, "/chosen/memory-map", &entry)) { + boot_progress_element const *bootPict; - if (kSuccess == DTGetProperty(entry, "BootCLUT", (void **) &map, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "BootCLUT", (void const **) &map, &size)) { bcopy(map[0], appleClut8, sizeof(appleClut8)); } - if (kSuccess == DTGetProperty(entry, "Pict-FailedBoot", (void **) &map, &size)) { - bootPict = (boot_progress_element *) map[0]; + if (kSuccess == SecureDTGetProperty(entry, "Pict-FailedBoot", (void const **) &map, &size)) { + bootPict = (boot_progress_element const *) map[0]; default_noroot.width = bootPict->width; default_noroot.height = bootPict->height; default_noroot.dx = 0; @@ -263,12 +262,25 @@ PE_init_iokit(void) scale = PE_state.video.v_scale; flip = 1; - if (PE_parse_boot_argn("-progress", &show_progress, sizeof(show_progress)) && show_progress) { +#if defined(XNU_TARGET_OS_OSX) + int notused; + show_progress = TRUE; + if (PE_parse_boot_argn("-restore", ¬used, sizeof(notused))) { + show_progress = FALSE; + } + if (PE_parse_boot_argn("-noprogress", ¬used, sizeof(notused))) { + show_progress = FALSE; + } +#else + show_progress = FALSE; + PE_parse_boot_argn("-progress", &show_progress, sizeof(show_progress)); +#endif /* XNU_TARGET_OS_OSX */ + if (show_progress) { /* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */ switch (PE_state.video.v_rotate) { case 2: flip = -1; - /* fall through */ + OS_FALLTHROUGH; case 0: display_size = PE_state.video.v_height; image_size = default_progress.height; @@ -276,7 +288,7 @@ PE_init_iokit(void) break; case 1: flip = -1; - /* fall through */ + OS_FALLTHROUGH; case 3: default: display_size = PE_state.video.v_width; @@ -303,28 +315,28 @@ PE_init_iokit(void) if (kdebug_enable && kdebug_debugid_enabled(IOKDBG_CODE(DBG_BOOTER, 0))) { /* Trace iBoot-provided timing information. */ - if (kSuccess == DTLookupEntry(0, "/chosen/iBoot", &entry)) { - uint32_t * value_ptr; + if (kSuccess == SecureDTLookupEntry(0, "/chosen/iBoot", &entry)) { + uint32_t const * value_ptr; - if (kSuccess == DTGetProperty(entry, "start-time", (void **)&value_ptr, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "start-time", (void const **)&value_ptr, &size)) { if (size == sizeof(start_time_value)) { start_time_value = *value_ptr; } } - if (kSuccess == DTGetProperty(entry, "debug-wait-start", (void **)&value_ptr, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "debug-wait-start", (void const **)&value_ptr, &size)) { if (size == sizeof(debug_wait_start_value)) { debug_wait_start_value = *value_ptr; } } - if (kSuccess == DTGetProperty(entry, "load-kernel-start", (void **)&value_ptr, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "load-kernel-start", (void const **)&value_ptr, &size)) { if (size == sizeof(load_kernel_start_value)) { load_kernel_start_value = *value_ptr; } } - if (kSuccess == DTGetProperty(entry, "populate-registry-time", (void **)&value_ptr, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "populate-registry-time", (void const **)&value_ptr, &size)) { if (size == sizeof(populate_registry_time_value)) { populate_registry_time_value = *value_ptr; } @@ -334,7 +346,26 @@ PE_init_iokit(void) KDBG_RELEASE(IOKDBG_CODE(DBG_BOOTER, 0), start_time_value, debug_wait_start_value, load_kernel_start_value, populate_registry_time_value); } - StartIOKit(PE_state.deviceTreeHead, PE_state.bootArgs, (void *) 0, (void *) 0); + InitIOKit(PE_state.deviceTreeHead); + ConfigureIOKit(); +} + +void +PE_lockdown_iokit(void) +{ + /* + * On arm/arm64 platforms, and especially those that employ KTRR/CTRR, + * machine_lockdown() is treated as a hard security checkpoint, such that + * code which executes prior to lockdown must be minimized and limited only to + * trusted parts of the kernel and specially-entitled kexts. We therefore + * cannot start the general-purpose IOKit matching process until after lockdown, + * as it may involve execution of untrusted/non-entitled kext code. + * Furthermore, such kext code may process attacker controlled data (e.g. + * network packets), which dramatically increases the potential attack surface + * against a kernel which has not yet enabled the full set of available + * hardware protections. + */ + StartIOKitMatching(); } void @@ -342,7 +373,7 @@ PE_slide_devicetree(vm_offset_t slide) { assert(PE_state.initialized); PE_state.deviceTreeHead += slide; - DTInit(PE_state.deviceTreeHead); + SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize); } void @@ -350,13 +381,14 @@ PE_init_platform(boolean_t vm_initialized, void *args) { DTEntry entry; unsigned int size; - void **prop; + void * const *prop; boot_args *boot_args_ptr = (boot_args *) args; if (PE_state.initialized == FALSE) { PE_state.initialized = TRUE; PE_state.bootArgs = boot_args_ptr; PE_state.deviceTreeHead = boot_args_ptr->deviceTreeP; + PE_state.deviceTreeSize = boot_args_ptr->deviceTreeLength; PE_state.video.v_baseAddr = boot_args_ptr->Video.v_baseAddr; PE_state.video.v_rowBytes = boot_args_ptr->Video.v_rowBytes; PE_state.video.v_width = boot_args_ptr->Video.v_width; @@ -373,7 +405,7 @@ PE_init_platform(boolean_t vm_initialized, void *args) * so the console can be found and the right I/O space * can be used.. */ - DTInit(PE_state.deviceTreeHead); + SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize); pe_identify_machine(boot_args_ptr); } else { pe_arm_init_interrupts(args); @@ -381,9 +413,9 @@ PE_init_platform(boolean_t vm_initialized, void *args) } if (!vm_initialized) { - if (kSuccess == (DTFindEntry("name", "device-tree", &entry))) { - if (kSuccess == DTGetProperty(entry, "target-type", - (void **)&prop, &size)) { + if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) { + if (kSuccess == SecureDTGetProperty(entry, "target-type", + (void const **)&prop, &size)) { if (size > sizeof(gTargetTypeBuffer)) { size = sizeof(gTargetTypeBuffer); } @@ -391,9 +423,9 @@ PE_init_platform(boolean_t vm_initialized, void *args) gTargetTypeBuffer[size - 1] = '\0'; } } - if (kSuccess == (DTFindEntry("name", "device-tree", &entry))) { - if (kSuccess == DTGetProperty(entry, "model", - (void **)&prop, &size)) { + if (kSuccess == (SecureDTFindEntry("name", "device-tree", &entry))) { + if (kSuccess == SecureDTGetProperty(entry, "model", + (void const **)&prop, &size)) { if (size > sizeof(gModelTypeBuffer)) { size = sizeof(gModelTypeBuffer); } @@ -401,9 +433,9 @@ PE_init_platform(boolean_t vm_initialized, void *args) gModelTypeBuffer[size - 1] = '\0'; } } - if (kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) { - if (kSuccess == DTGetProperty(entry, "debug-enabled", - (void **) &prop, &size)) { + if (kSuccess == SecureDTLookupEntry(NULL, "/chosen", &entry)) { + if (kSuccess == SecureDTGetProperty(entry, "debug-enabled", + (void const **) &prop, &size)) { /* * We purposefully modify a constified variable as * it will get locked down by a trusted monitor or @@ -419,23 +451,23 @@ PE_init_platform(boolean_t vm_initialized, void *args) bcopy(prop, modify_debug_enabled, size); #pragma clang diagnostic pop } - if (kSuccess == DTGetProperty(entry, "firmware-version", - (void **) &prop, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "firmware-version", + (void const **) &prop, &size)) { if (size > sizeof(firmware_version)) { size = sizeof(firmware_version); } bcopy(prop, firmware_version, size); firmware_version[size - 1] = '\0'; } - if (kSuccess == DTGetProperty(entry, "unique-chip-id", - (void **) &prop, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "unique-chip-id", + (void const **) &prop, &size)) { if (size > sizeof(gPlatformECID)) { size = sizeof(gPlatformECID); } bcopy(prop, gPlatformECID, size); } - if (kSuccess == DTGetProperty(entry, "dram-vendor-id", - (void **) &prop, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "dram-vendor-id", + (void const **) &prop, &size)) { if (size > sizeof(gPlatformMemoryID)) { size = sizeof(gPlatformMemoryID); } @@ -512,7 +544,7 @@ PE_call_timebase_callback(void) int PE_stub_poll_input(__unused unsigned int options, char *c) { - *c = uart_getc(); + *c = (char)uart_getc(); return 0; /* 0 for success, 1 for unsupported */ } @@ -529,7 +561,7 @@ PE_i_can_has_debugger(uint32_t *debug_flags) { if (debug_flags) { #if DEVELOPMENT || DEBUG - assert(debug_boot_arg_inited); + assert(startup_phase >= STARTUP_SUB_TUNABLES); #endif if (debug_enabled) { *debug_flags = debug_boot_arg; @@ -579,11 +611,11 @@ PE_save_buffer_to_vram(unsigned char *buf, unsigned int *size) uint32_t PE_get_offset_into_panic_region(char *location) { - assert(panic_info != NULL); - assert(location > (char *) panic_info); - assert((unsigned int)(location - (char *) panic_info) < panic_text_len); + assert(gPanicBase != 0); + assert(location >= (char *) gPanicBase); + assert((unsigned int)(location - gPanicBase) < gPanicSize); - return (uint32_t) (location - gPanicBase); + return (uint32_t)(uintptr_t)(location - gPanicBase); } void @@ -599,7 +631,7 @@ PE_init_panicheader() * The panic log begins immediately after the panic header -- debugger synchronization and other functions * may log into this region before we've become the exclusive panicking CPU and initialize the header here. */ - panic_info->eph_panic_log_offset = PE_get_offset_into_panic_region(debug_buf_base); + panic_info->eph_panic_log_offset = debug_buf_base ? PE_get_offset_into_panic_region(debug_buf_base) : 0; panic_info->eph_magic = EMBEDDED_PANIC_MAGIC; panic_info->eph_version = EMBEDDED_PANIC_HEADER_CURRENT_VERSION; diff --git a/pexpert/arm/pe_kprintf.c b/pexpert/arm/pe_kprintf.c index fdac3ab1a..8475cd619 100644 --- a/pexpert/arm/pe_kprintf.c +++ b/pexpert/arm/pe_kprintf.c @@ -14,39 +14,46 @@ #include /* Globals */ -void (*PE_kputc)(char c) = 0; +typedef void (*PE_kputc_t)(char); +SECURITY_READ_ONLY_LATE(PE_kputc_t) PE_kputc; -SECURITY_READ_ONLY_LATE(unsigned int) disable_serial_output = TRUE; +// disable_serial_output disables kprintf() *and* unbuffered panic output. +// disable_kprintf_output only disables kprintf(). +SECURITY_READ_ONLY_LATE(unsigned int) disable_serial_output = TRUE; +static SECURITY_READ_ONLY_LATE(unsigned int) disable_kprintf_output = TRUE; -decl_simple_lock_data(static, kprintf_lock); +static SIMPLE_LOCK_DECLARE(kprintf_lock, 0); static void serial_putc_crlf(char c); -void -PE_init_kprintf(boolean_t vm_initialized) +__startup_func +static void +PE_init_kprintf(void) { - unsigned int boot_arg; - if (PE_state.initialized == FALSE) { panic("Platform Expert not initialized"); } - if (!vm_initialized) { - simple_lock_init(&kprintf_lock, 0); + if (debug_boot_arg & DB_KPRT) { + disable_serial_output = FALSE; + } - if (PE_parse_boot_argn("debug", &boot_arg, sizeof(boot_arg))) { - if (boot_arg & DB_KPRT) { - disable_serial_output = FALSE; - } - } +#if DEBUG + disable_kprintf_output = FALSE; +#elif DEVELOPMENT + bool enable_kprintf_spam = false; + if (PE_parse_boot_argn("-enable_kprintf_spam", &enable_kprintf_spam, sizeof(enable_kprintf_spam))) { + disable_kprintf_output = FALSE; + } +#endif - if (serial_init()) { - PE_kputc = serial_putc_crlf; - } else { - PE_kputc = cnputc; - } + if (serial_init()) { + PE_kputc = serial_putc_crlf; + } else { + PE_kputc = cnputc_unbuffered; } } +STARTUP(KPRINTF, STARTUP_RANK_FIRST, PE_init_kprintf); #ifdef MP_DEBUG static void @@ -79,7 +86,9 @@ kprintf(const char *fmt, ...) boolean_t state; void *caller = __builtin_return_address(0); - if (!disable_serial_output) { + if (!disable_serial_output && !disable_kprintf_output) { + va_start(listp, fmt); + va_copy(listp2, listp); /* * Spin to get kprintf lock but re-enable interrupts while failing. * This allows interrupts to be handled while waiting but @@ -96,10 +105,7 @@ kprintf(const char *fmt, ...) cpu_last_locked = cpu_number(); } - va_start(listp, fmt); - va_copy(listp2, listp); _doprnt_log(fmt, &listp, PE_kputc, 16); - va_end(listp); simple_unlock(&kprintf_lock); @@ -117,19 +123,14 @@ kprintf(const char *fmt, ...) } #endif ml_set_interrupts_enabled(state); + va_end(listp); - // If interrupts are enabled - if (ml_get_interrupts_enabled()) { - os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller); - } + os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller); va_end(listp2); } else { - // If interrupts are enabled - if (ml_get_interrupts_enabled()) { - va_start(listp, fmt); - os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp, caller); - va_end(listp); - } + va_start(listp, fmt); + os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp, caller); + va_end(listp); } } diff --git a/pexpert/arm/pe_serial.c b/pexpert/arm/pe_serial.c index 0d8ffefe2..b555aa1e2 100644 --- a/pexpert/arm/pe_serial.c +++ b/pexpert/arm/pe_serial.c @@ -28,6 +28,9 @@ #include #include #endif +#if HIBERNATION +#include +#endif /* HIBERNATION */ struct pe_serial_functions { void (*uart_init) (void); @@ -383,17 +386,17 @@ static void shmcon_init(void) { DTEntry entry; - uintptr_t *reg_prop; + uintptr_t const *reg_prop; volatile struct shm_buffer_info *end; size_t i, header_size; unsigned int size; vm_offset_t pa_panic_base, panic_size, va_buffer_base, va_buffer_end; - if (kSuccess != DTLookupEntry(0, "pram", &entry)) { + if (kSuccess != SecureDTLookupEntry(0, "pram", &entry)) { return; } - if (kSuccess != DTGetProperty(entry, "reg", (void **)®_prop, &size)) { + if (kSuccess != SecureDTGetProperty(entry, "reg", (void const **)®_prop, &size)) { return; } @@ -511,111 +514,13 @@ pe_shmcon_set_child(uint64_t paddr, uint32_t entry) /*****************************************************************************/ -#ifdef DOCKFIFO_UART - - -// Allow a 30ms stall of wall clock time before DockFIFO starts dropping characters -#define DOCKFIFO_WR_MAX_STALL_US (30*1000) - -static uint64_t prev_dockfifo_drained_time; // Last time we've seen the DockFIFO drained by an external agent -static uint64_t prev_dockfifo_spaces; // Previous w_stat level of the DockFIFO. -static uint32_t dockfifo_capacity; -static uint64_t dockfifo_stall_grace; - -static vm_offset_t dockfifo_uart_base = 0; - -//======================= -// Local funtions -//======================= - -static int -dockfifo_drain_on_stall() -{ - // Called when DockFIFO runs out of spaces. - // Check if the DockFIFO reader has stalled. If so, empty the DockFIFO ourselves. - // Return number of bytes drained. - - if (mach_absolute_time() - prev_dockfifo_drained_time >= dockfifo_stall_grace) { - // It's been more than DOCKFIFO_WR_MAX_STALL_US and nobody read from the FIFO - // Drop a character. - (void)rDOCKFIFO_R_DATA(DOCKFIFO_UART_READ, 1); - os_atomic_inc(&prev_dockfifo_spaces, relaxed); - return 1; - } - return 0; -} - - -static int -dockfifo_uart_tr0(void) -{ - uint32_t spaces = rDOCKFIFO_W_STAT(DOCKFIFO_UART_WRITE) & 0xffff; - if (spaces >= dockfifo_capacity || spaces > prev_dockfifo_spaces) { - // More spaces showed up. That can only mean someone read the FIFO. - // Note that if the DockFIFO is empty we cannot tell if someone is listening, - // we can only give them the benefit of the doubt. - - prev_dockfifo_drained_time = mach_absolute_time(); - } - prev_dockfifo_spaces = spaces; - - return spaces || dockfifo_drain_on_stall(); -} - -static void -dockfifo_uart_td0(int c) -{ - rDOCKFIFO_W_DATA(DOCKFIFO_UART_WRITE, 1) = (unsigned)(c & 0xff); - os_atomic_dec(&prev_dockfifo_spaces, relaxed); // After writing a byte we have one fewer space than previously expected. -} - -static int -dockfifo_uart_rr0(void) -{ - return rDOCKFIFO_R_DATA(DOCKFIFO_UART_READ, 0) & 0x7f; -} - -static int -dockfifo_uart_rd0(void) -{ - return (int)((rDOCKFIFO_R_DATA(DOCKFIFO_UART_READ, 1) >> 8) & 0xff); -} - -static void -dockfifo_uart_init(void) -{ - nanoseconds_to_absolutetime(DOCKFIFO_WR_MAX_STALL_US * 1000, &dockfifo_stall_grace); - - // Disable autodraining of the FIFO. We now purely manage it in software. - rDOCKFIFO_DRAIN(DOCKFIFO_UART_WRITE) = 0; - - // Empty the DockFIFO by draining it until OCCUPANCY is 0, then measure its capacity - while (rDOCKFIFO_R_DATA(DOCKFIFO_UART_WRITE, 3) & 0x7F) { - ; - } - dockfifo_capacity = rDOCKFIFO_W_STAT(DOCKFIFO_UART_WRITE) & 0xffff; -} - -SECURITY_READ_ONLY_LATE(static struct pe_serial_functions) dockfifo_uart_serial_functions = -{ - .uart_init = dockfifo_uart_init, - .uart_set_baud_rate = NULL, - .tr0 = dockfifo_uart_tr0, - .td0 = dockfifo_uart_td0, - .rr0 = dockfifo_uart_rr0, - .rd0 = dockfifo_uart_rd0 -}; - -#endif /* DOCKFIFO_UART */ - -/*****************************************************************************/ - #ifdef DOCKCHANNEL_UART #define DOCKCHANNEL_WR_MAX_STALL_US (30*1000) static vm_offset_t dock_agent_base; static uint32_t max_dockchannel_drain_period; static bool use_sw_drain; +static uint32_t dock_wstat_mask; static uint64_t prev_dockchannel_drained_time; // Last time we've seen the DockChannel drained by an external agent static uint64_t prev_dockchannel_spaces; // Previous w_stat level of the DockChannel. static uint64_t dockchannel_stall_grace; @@ -635,7 +540,7 @@ dockchannel_drain_on_stall() if ((mach_absolute_time() - prev_dockchannel_drained_time) >= dockchannel_stall_grace) { // It's been more than DOCKCHANEL_WR_MAX_STALL_US and nobody read from the FIFO // Drop a character. - (void)rDOCKCHANNELS_DEV_RDATA1(DOCKCHANNEL_UART_CHANNEL); + (void)rDOCKCHANNELS_DOCK_RDATA1(DOCKCHANNEL_UART_CHANNEL); os_atomic_inc(&prev_dockchannel_spaces, relaxed); return 1; } @@ -646,7 +551,7 @@ static int dockchannel_uart_tr0(void) { if (use_sw_drain) { - uint32_t spaces = rDOCKCHANNELS_DEV_WSTAT(DOCKCHANNEL_UART_CHANNEL) & 0x1ff; + uint32_t spaces = rDOCKCHANNELS_DEV_WSTAT(DOCKCHANNEL_UART_CHANNEL) & dock_wstat_mask; if (spaces > prev_dockchannel_spaces) { // More spaces showed up. That can only mean someone read the FIFO. // Note that if the DockFIFO is empty we cannot tell if someone is listening, @@ -658,7 +563,7 @@ dockchannel_uart_tr0(void) return spaces || dockchannel_drain_on_stall(); } else { // Returns spaces in dockchannel fifo - return rDOCKCHANNELS_DEV_WSTAT(DOCKCHANNEL_UART_CHANNEL) & 0x1ff; + return rDOCKCHANNELS_DEV_WSTAT(DOCKCHANNEL_UART_CHANNEL) & dock_wstat_mask; } } @@ -765,7 +670,7 @@ pi3_uart_init(void) BCM2837_PUT32(BCM2837_AUX_MU_IIR_REG_V, 0xC6); BCM2837_PUT32(BCM2837_AUX_MU_BAUD_REG_V, 270); - i = BCM2837_FSEL_REG(14); + i = (uint32_t)BCM2837_FSEL_REG(14); // Configure GPIOs 14 & 15 for alternate function 5 i &= ~(BCM2837_FSEL_MASK(14)); i |= (BCM2837_FSEL_ALT5 << BCM2837_FSEL_OFFS(14)); @@ -826,9 +731,9 @@ serial_init(void) DTEntry entryP = NULL; uint32_t prop_size; vm_offset_t soc_base; - uintptr_t *reg_prop; - uint32_t *prop_value __unused = NULL; - char *serial_compat __unused = 0; + uintptr_t const *reg_prop; + uint32_t const *prop_value __unused = NULL; + char const *serial_compat __unused = 0; uint32_t dccmode; struct pe_serial_functions *fns = gPESF; @@ -860,12 +765,12 @@ serial_init(void) } #ifdef PI3_UART - if (DTFindEntry("name", "gpio", &entryP) == kSuccess) { - DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); + if (SecureDTFindEntry("name", "gpio", &entryP) == kSuccess) { + SecureDTGetProperty(entryP, "reg", (void const **)®_prop, &prop_size); pi3_gpio_base_vaddr = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); } - if (DTFindEntry("name", "aux", &entryP) == kSuccess) { - DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); + if (SecureDTFindEntry("name", "aux", &entryP) == kSuccess) { + SecureDTGetProperty(entryP, "reg", (void const **)®_prop, &prop_size); pi3_aux_base_vaddr = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); } if ((pi3_gpio_base_vaddr != 0) && (pi3_aux_base_vaddr != 0)) { @@ -873,22 +778,10 @@ serial_init(void) } #endif /* PI3_UART */ -#ifdef DOCKFIFO_UART - uint32_t no_dockfifo_uart = 0; - PE_parse_boot_argn("no-dockfifo-uart", &no_dockfifo_uart, sizeof(no_dockfifo_uart)); - if (no_dockfifo_uart == 0) { - if (DTFindEntry("name", "dockfifo-uart", &entryP) == kSuccess) { - DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); - dockfifo_uart_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); - register_serial_functions(&dockfifo_uart_serial_functions); - } - } -#endif /* DOCKFIFO_UART */ - #ifdef DOCKCHANNEL_UART uint32_t no_dockchannel_uart = 0; - if (DTFindEntry("name", "dockchannel-uart", &entryP) == kSuccess) { - DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); + if (SecureDTFindEntry("name", "dockchannel-uart", &entryP) == kSuccess) { + SecureDTGetProperty(entryP, "reg", (void const **)®_prop, &prop_size); // Should be two reg entries if (prop_size / sizeof(uintptr_t) != 4) { panic("Malformed dockchannel-uart property"); @@ -899,10 +792,14 @@ serial_init(void) // Keep the old name for boot-arg if (no_dockchannel_uart == 0) { register_serial_functions(&dockchannel_uart_serial_functions); - DTGetProperty(entryP, "max-aop-clk", (void **)&prop_value, &prop_size); + SecureDTGetProperty(entryP, "max-aop-clk", (void const **)&prop_value, &prop_size); max_dockchannel_drain_period = (uint32_t)((prop_value)? (*prop_value * 0.03) : DOCKCHANNEL_DRAIN_PERIOD); - DTGetProperty(entryP, "enable-sw-drain", (void **)&prop_value, &prop_size); + prop_value = NULL; + SecureDTGetProperty(entryP, "enable-sw-drain", (void const **)&prop_value, &prop_size); use_sw_drain = (prop_value)? *prop_value : 0; + prop_value = NULL; + SecureDTGetProperty(entryP, "dock-wstat-mask", (void const **)&prop_value, &prop_size); + dock_wstat_mask = (prop_value)? *prop_value : 0x1ff; } else { dockchannel_uart_clear_intr(); } @@ -917,52 +814,55 @@ serial_init(void) * If we don't find it there, look for "uart0" and "uart1". */ - if (DTFindEntry("boot-console", NULL, &entryP) == kSuccess) { - DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); + if (SecureDTFindEntry("boot-console", NULL, &entryP) == kSuccess) { + SecureDTGetProperty(entryP, "reg", (void const **)®_prop, &prop_size); uart_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); if (serial_compat == 0) { - DTGetProperty(entryP, "compatible", (void **)&serial_compat, &prop_size); + SecureDTGetProperty(entryP, "compatible", (void const **)&serial_compat, &prop_size); } - } else if (DTFindEntry("name", "uart0", &entryP) == kSuccess) { - DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); + } else if (SecureDTFindEntry("name", "uart0", &entryP) == kSuccess) { + SecureDTGetProperty(entryP, "reg", (void const **)®_prop, &prop_size); uart_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); if (serial_compat == 0) { - DTGetProperty(entryP, "compatible", (void **)&serial_compat, &prop_size); + SecureDTGetProperty(entryP, "compatible", (void const **)&serial_compat, &prop_size); } - } else if (DTFindEntry("name", "uart1", &entryP) == kSuccess) { - DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); + } else if (SecureDTFindEntry("name", "uart1", &entryP) == kSuccess) { + SecureDTGetProperty(entryP, "reg", (void const **)®_prop, &prop_size); uart_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); if (serial_compat == 0) { - DTGetProperty(entryP, "compatible", (void **)&serial_compat, &prop_size); + SecureDTGetProperty(entryP, "compatible", (void const **)&serial_compat, &prop_size); } } #ifdef S3CUART if (NULL != entryP) { - DTGetProperty(entryP, "pclk", (void **)&prop_value, &prop_size); + SecureDTGetProperty(entryP, "pclk", (void const **)&prop_value, &prop_size); if (prop_value) { dt_pclk = *prop_value; } prop_value = NULL; - DTGetProperty(entryP, "sampling", (void **)&prop_value, &prop_size); + SecureDTGetProperty(entryP, "sampling", (void const **)&prop_value, &prop_size); if (prop_value) { dt_sampling = *prop_value; } prop_value = NULL; - DTGetProperty(entryP, "ubrdiv", (void **)&prop_value, &prop_size); + SecureDTGetProperty(entryP, "ubrdiv", (void const **)&prop_value, &prop_size); if (prop_value) { dt_ubrdiv = *prop_value; } } - if (!strcmp(serial_compat, "uart,16550")) { - register_serial_functions(&ln2410_serial_functions); - } else if (!strcmp(serial_compat, "uart-16550")) { - register_serial_functions(&ln2410_serial_functions); - } else if (!strcmp(serial_compat, "uart,s5i3000")) { - register_serial_functions(&ln2410_serial_functions); - } else if (!strcmp(serial_compat, "uart-1,samsung")) { - register_serial_functions(&ln2410_serial_functions); + + if (serial_compat) { + if (!strcmp(serial_compat, "uart,16550")) { + register_serial_functions(&ln2410_serial_functions); + } else if (!strcmp(serial_compat, "uart-16550")) { + register_serial_functions(&ln2410_serial_functions); + } else if (!strcmp(serial_compat, "uart,s5i3000")) { + register_serial_functions(&ln2410_serial_functions); + } else if (!strcmp(serial_compat, "uart-1,samsung")) { + register_serial_functions(&ln2410_serial_functions); + } } #endif /* S3CUART */ @@ -976,6 +876,17 @@ serial_init(void) fns = fns->next; } +#if HIBERNATION + /* hibernation needs to know the UART register addresses since it can't directly use this serial driver */ + if (dockchannel_uart_base) { + gHibernateGlobals.dockChannelRegBase = ml_vtophys(dockchannel_uart_base); + gHibernateGlobals.dockChannelWstatMask = dock_wstat_mask; + } + if (uart_base) { + gHibernateGlobals.hibUartRegBase = ml_vtophys(uart_base); + } +#endif /* HIBERNATION */ + uart_initted = 1; return 1; @@ -987,6 +898,9 @@ uart_putc(char c) struct pe_serial_functions *fns = gPESF; while (fns != NULL) { while (!fns->tr0()) { +#if __arm64__ /* on arm64, we have a WFE timeout, so no need to hot-poll here */ + __builtin_arm_wfe() +#endif ; /* Wait until THR is empty. */ } fns->td0(c); diff --git a/pexpert/conf/Makefile b/pexpert/conf/Makefile index 05c4b79cf..51eddb889 100644 --- a/pexpert/conf/Makefile +++ b/pexpert/conf/Makefile @@ -23,7 +23,7 @@ endif $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile: $(SRCROOT)/SETUP/config/doconf $(OBJROOT)/SETUP/config $(DOCONFDEPS) $(_v)$(MKDIR) $(TARGET)/$(CURRENT_KERNEL_CONFIG) - $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) + $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -platform $(PLATFORM) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) do_all: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile $(_v)${MAKE} \ diff --git a/pexpert/conf/Makefile.template b/pexpert/conf/Makefile.template index b5357650d..92b00f7ac 100644 --- a/pexpert/conf/Makefile.template +++ b/pexpert/conf/Makefile.template @@ -19,9 +19,6 @@ include $(MakeInc_def) CFLAGS+= -include meta_features.h -DPEXPERT_KERNEL_PRIVATE SFLAGS+= -include meta_features.h -# Objects that don't want -Wcast-align warning (8474835) -pe_identify_machine.o_CWARNFLAGS_ADD = -Wno-cast-align - # # Directories for mig generated files # @@ -51,6 +48,25 @@ COMP_SUBDIRS = %MACHDEP +# +# Diagnostic opt-outs. We need to make this list empty. +# +# DO NOT ADD MORE HERE. +# +# -Wno-cast-align +pe_identify_machine.o_CFLAGS_ADD += -Wno-cast-align +# -Wno-implicit-int-conversion +pe_serial.o_CFLAGS_ADD += -Wno-implicit-int-conversion +# -Wno-shorten-64-to-32 +pe_identify_machine.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +pe_init.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +# -Wno-sign-conversion +bootargs.o_CFLAGS_ADD += -Wno-sign-conversion +device_tree.o_CFLAGS_ADD += -Wno-sign-conversion +pe_identify_machine.o_CFLAGS_ADD += -Wno-sign-conversion +pe_init.o_CFLAGS_ADD += -Wno-sign-conversion +pe_serial.o_CFLAGS_ADD += -Wno-sign-conversion + # Rebuild if per-file overrides change ${OBJS}: $(firstword $(MAKEFILE_LIST)) @@ -66,7 +82,7 @@ $(SOBJS): .SFLAGS $(_v)$(REPLACECONTENTS) $@ $(S_KCC) $(SFLAGS) $(INCFLAGS) $(COMPONENT).filelist: $(OBJS) - $(call makelog,$(ColorL)LDFILELIST$(Color0) $(ColorLF)$(COMPONENT)$(Color0)) + @$(LOG_LDFILELIST) "$(COMPONENT)" $(_v)for obj in ${OBJS}; do \ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \ done > $(COMPONENT).filelist diff --git a/pexpert/conf/files b/pexpert/conf/files index 1ec12c871..b3af11f0a 100644 --- a/pexpert/conf/files +++ b/pexpert/conf/files @@ -8,6 +8,7 @@ OPTIONS/config_dtrace optional config_dtrace pexpert/gen/device_tree.c standard pexpert/gen/bootargs.c standard pexpert/gen/pe_gen.c standard +pexpert/gen/kcformat.c standard # diff --git a/pexpert/gen/bootargs.c b/pexpert/gen/bootargs.c index fddac8d3e..79b3d9619 100644 --- a/pexpert/gen/bootargs.c +++ b/pexpert/gen/bootargs.c @@ -32,7 +32,7 @@ typedef boolean_t (*argsep_func_t) (char c); static boolean_t isargsep( char c); static boolean_t israngesep( char c); -#ifndef CONFIG_EMBEDDED +#if defined(__x86_64__) static int argstrcpy(char *from, char *to); #endif static int argstrcpy2(char *from, char *to, unsigned maxlen); @@ -70,7 +70,7 @@ PE_parse_boot_argn_internal( return FALSE; } -#ifdef CONFIG_EMBEDDED +#if !defined(__x86_64__) if (max_len == -1) { return FALSE; } @@ -149,7 +149,7 @@ PE_parse_boot_argn_internal( } else if (max_len == 0) { arg_found = TRUE; } -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) else if (max_len == -1) { /* unreachable on embedded */ argstrcpy(++cp, (char *)arg_ptr); arg_found = TRUE; @@ -212,7 +212,7 @@ israngesep(char c) } } -#if !CONFIG_EMBEDDED +#if defined(__x86_64__) static int argstrcpy( char *from, @@ -253,23 +253,23 @@ argnumcpy(long long val, void *to, unsigned maxlen) /* No write-back, caller just wants to know if arg was found */ break; case 1: - *(int8_t *)to = val; + *(int8_t *)to = (int8_t)val; break; case 2: - *(int16_t *)to = val; + *(int16_t *)to = (int16_t)val; break; case 3: /* Unlikely in practice */ - ((struct i24 *)to)->i24 = val; + ((struct i24 *)to)->i24 = (int32_t)val; break; case 4: - *(int32_t *)to = val; + *(int32_t *)to = (int32_t)val; break; case 8: - *(int64_t *)to = val; + *(int64_t *)to = (int64_t)val; break; default: - *(int32_t *)to = val; + *(int32_t *)to = (int32_t)val; maxlen = 4; break; } @@ -395,17 +395,17 @@ PE_get_default( unsigned int max_property) { DTEntry dte; - void **property_data; + void const *property_data; unsigned int property_size; /* * Look for the property using the PE DT support. */ - if (kSuccess == DTLookupEntry(NULL, "/defaults", &dte)) { + if (kSuccess == SecureDTLookupEntry(NULL, "/defaults", &dte)) { /* * We have a /defaults node, look for the named property. */ - if (kSuccess != DTGetProperty(dte, property_name, (void **)&property_data, &property_size)) { + if (kSuccess != SecureDTGetProperty(dte, property_name, &property_data, &property_size)) { return FALSE; } diff --git a/pexpert/gen/device_tree.c b/pexpert/gen/device_tree.c index ef5744688..bd6a75e42 100644 --- a/pexpert/gen/device_tree.c +++ b/pexpert/gen/device_tree.c @@ -38,41 +38,111 @@ #include #include #include +#include #include +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) +extern addr64_t kvtophys(vm_offset_t va); +#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ + #include -static int DTInitialized; -static RealDTEntry DTRootNode; +SECURITY_READ_ONLY_LATE(static int) DTInitialized; +SECURITY_READ_ONLY_LATE(RealDTEntry) DTRootNode; +SECURITY_READ_ONLY_LATE(static vm_size_t) DTSize; +SECURITY_READ_ONLY_LATE(static vm_offset_t) DTEnd; /* + * * Support Routines + * + */ + +static inline void +assert_in_dt_region(vm_offset_t const start, vm_offset_t const end, void const *p) +{ + if ((vm_offset_t)p < start || (vm_offset_t)p > end) { + panic("Device tree pointer outside of device tree region: pointer %p, DTEnd %lx\n", p, (unsigned long)DTEnd); + } +} +#define ASSERT_IN_DT(p) assert_in_dt_region((vm_offset_t)DTRootNode, (vm_offset_t)DTEnd, (p)) + +static inline void +assert_prop_in_dt_region(vm_offset_t const start, vm_offset_t const end, DeviceTreeNodeProperty const *prop) +{ + vm_offset_t prop_end; + + assert_in_dt_region(start, end, prop); + if (os_add3_overflow((vm_offset_t)prop, sizeof(DeviceTreeNodeProperty), prop->length, &prop_end)) { + panic("Device tree property overflow: prop %p, length 0x%x\n", prop, prop->length); + } + assert_in_dt_region(start, end, (void*)prop_end); +} +#define ASSERT_PROP_IN_DT(prop) assert_prop_in_dt_region((vm_offset_t)DTRootNode, (vm_offset_t)DTEnd, (prop)) + +#define ASSERT_HEADER_IN_DT_REGION(start, end, p, size) assert_in_dt_region((start), (end), (uint8_t const *)(p) + (size)) +#define ASSERT_HEADER_IN_DT(p, size) ASSERT_IN_DT((uint8_t const *)(p) + (size)) + +/* + * Since there is no way to know the size of a device tree node + * without fully walking it, we employ the following principle to make + * sure that the accessed device tree is fully within its memory + * region: + * + * Internally, we check anything we want to access just before we want + * to access it (not after creating a pointer). + * + * Then, before returning a DTEntry to the caller, we check whether + * the start address (only!) of the entry is still within the device + * tree region. + * + * Before returning a property value the caller, we check whether the + * property is fully within the region. + * + * "DTEntry"s are opaque to the caller, so only checking their + * starting address is enough to satisfy existence within the device + * tree region, while for property values we need to make sure that + * they are fully within the region. */ -static inline DeviceTreeNodeProperty* -next_prop(DeviceTreeNodeProperty* prop) + +static inline DeviceTreeNodeProperty const * +next_prop_region(vm_offset_t const start, vm_offset_t end, DeviceTreeNodeProperty const *prop) { uintptr_t next_addr; + + ASSERT_HEADER_IN_DT_REGION(start, end, prop, sizeof(DeviceTreeNode)); + if (os_add3_overflow((uintptr_t)prop, prop->length, sizeof(DeviceTreeNodeProperty) + 3, &next_addr)) { panic("Device tree property overflow: prop %p, length 0x%x\n", prop, prop->length); } + next_addr &= ~(3ULL); + return (DeviceTreeNodeProperty*)next_addr; } +#define next_prop(prop) next_prop_region((vm_offset_t)DTRootNode, (vm_offset_t)DTEnd, (prop)) static RealDTEntry skipProperties(RealDTEntry entry) { - DeviceTreeNodeProperty *prop; + DeviceTreeNodeProperty const *prop; unsigned int k; - if (entry == NULL || entry->nProperties == 0) { + if (entry == NULL) { + return NULL; + } + + ASSERT_HEADER_IN_DT(entry, sizeof(DeviceTreeNode)); + + if (entry->nProperties == 0) { return NULL; } else { - prop = (DeviceTreeNodeProperty *) (entry + 1); + prop = (DeviceTreeNodeProperty const *) (entry + 1); for (k = 0; k < entry->nProperties; k++) { prop = next_prop(prop); } } + ASSERT_IN_DT(prop); return (RealDTEntry) prop; } @@ -82,6 +152,8 @@ skipTree(RealDTEntry root) RealDTEntry entry; unsigned int k; + ASSERT_HEADER_IN_DT(root, sizeof(DeviceTreeNode)); + entry = skipProperties(root); if (entry == NULL) { return NULL; @@ -130,16 +202,18 @@ FindChild(RealDTEntry cur, char *buf) { RealDTEntry child; unsigned long index; - char * str; + char const * str; unsigned int dummy; + ASSERT_HEADER_IN_DT(cur, sizeof(DeviceTreeNode)); + if (cur->nChildren == 0) { return NULL; } index = 1; child = GetFirstChild(cur); while (1) { - if (DTGetProperty(child, "name", (void **)&str, &dummy) != kSuccess) { + if (SecureDTGetProperty(child, "name", (void const **)&str, &dummy) != kSuccess) { break; } if (strcmp(str, buf) == 0) { @@ -154,58 +228,90 @@ FindChild(RealDTEntry cur, char *buf) return NULL; } - /* * External Routines */ void -DTInit(void *base) +SecureDTInit(void const *base, size_t size) { - DTRootNode = (RealDTEntry) base; + if ((uintptr_t)base + size < (uintptr_t)base) { + panic("DeviceTree overflow: %p, size %#zx", base, size); + } + DTRootNode = base; + DTSize = size; + DTEnd = (vm_offset_t)DTRootNode + DTSize; DTInitialized = (DTRootNode != 0); } +bool +SecureDTIsLockedDown(void) +{ +#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) + /* + * We cannot check if the DT is in the CTRR region early on, + * because knowledge of the CTRR region is set up later. But the + * DT is used in all kinds of early bootstrapping before that. + * + * Luckily, we know that the device tree must be in front of the + * kernel if set up in EXTRADATA (which means it's covered by + * CTRR), and after it otherwise. + */ + addr64_t exec_header_phys = kvtophys((vm_offset_t)&_mh_execute_header); + + if (kvtophys((vm_offset_t)DTRootNode) < exec_header_phys) { + assert(kvtophys(DTEnd) < exec_header_phys); + return true; + } + +#endif + return false; +} + int -DTEntryIsEqual(const DTEntry ref1, const DTEntry ref2) +SecureDTEntryIsEqual(const DTEntry ref1, const DTEntry ref2) { /* equality of pointers */ return ref1 == ref2; } -static char *startingP; // needed for find_entry +static char const *startingP; // needed for find_entry int find_entry(const char *propName, const char *propValue, DTEntry *entryH); int -DTFindEntry(const char *propName, const char *propValue, DTEntry *entryH) +SecureDTFindEntry(const char *propName, const char *propValue, DTEntry *entryH) { if (!DTInitialized) { return kError; } - startingP = (char *)DTRootNode; + startingP = (char const *)DTRootNode; return find_entry(propName, propValue, entryH); } int find_entry(const char *propName, const char *propValue, DTEntry *entryH) { - DeviceTreeNode *nodeP = (DeviceTreeNode *) (void *) startingP; + DeviceTreeNode const *nodeP = (DeviceTreeNode const *) (void const *) startingP; unsigned int k; + ASSERT_HEADER_IN_DT(nodeP, sizeof(DeviceTreeNode)); + if (nodeP->nProperties == 0) { return kError; // End of the list of nodes } - startingP = (char *) (nodeP + 1); + startingP = (char const *) (nodeP + 1); // Search current entry for (k = 0; k < nodeP->nProperties; ++k) { - DeviceTreeNodeProperty *propP = (DeviceTreeNodeProperty *) (void *) startingP; + DeviceTreeNodeProperty const *propP = (DeviceTreeNodeProperty const *) (void const *) startingP; + ASSERT_PROP_IN_DT(propP); startingP += sizeof(*propP) + ((propP->length + 3) & -4); if (strcmp(propP->name, propName) == 0) { - if (propValue == NULL || strcmp((char *)(propP + 1), propValue) == 0) { + if (propValue == NULL || strcmp((char const *)(propP + 1), propValue) == 0) { *entryH = (DTEntry)nodeP; + ASSERT_HEADER_IN_DT(*entryH, sizeof(DeviceTreeNode)); return kSuccess; } } @@ -221,7 +327,7 @@ find_entry(const char *propName, const char *propValue, DTEntry *entryH) } int -DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEntry) +SecureDTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEntry) { DTEntryNameBuf buf; RealDTEntry cur; @@ -235,6 +341,7 @@ DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEnt } else { cur = searchPoint; } + ASSERT_IN_DT(cur); cp = pathName; if (*cp == kDTPathNameSeparator) { cp++; @@ -262,7 +369,7 @@ DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEnt } int -DTInitEntryIterator(const DTEntry startEntry, DTEntryIterator iter) +SecureDTInitEntryIterator(const DTEntry startEntry, DTEntryIterator iter) { if (!DTInitialized) { return kError; @@ -283,7 +390,7 @@ DTInitEntryIterator(const DTEntry startEntry, DTEntryIterator iter) } int -DTEnterEntry(DTEntryIterator iter, DTEntry childEntry) +SecureDTEnterEntry(DTEntryIterator iter, DTEntry childEntry) { DTSavedScopePtr newScope; @@ -305,7 +412,7 @@ DTEnterEntry(DTEntryIterator iter, DTEntry childEntry) } int -DTExitEntry(DTEntryIterator iter, DTEntry *currentPosition) +SecureDTExitEntry(DTEntryIterator iter, DTEntry *currentPosition) { DTSavedScopePtr newScope; @@ -325,7 +432,7 @@ DTExitEntry(DTEntryIterator iter, DTEntry *currentPosition) } int -DTIterateEntries(DTEntryIterator iter, DTEntry *nextEntry) +SecureDTIterateEntries(DTEntryIterator iter, DTEntry *nextEntry) { if (iter->currentIndex >= iter->currentScope->nChildren) { *nextEntry = NULL; @@ -337,13 +444,14 @@ DTIterateEntries(DTEntryIterator iter, DTEntry *nextEntry) } else { iter->currentEntry = GetNextChild(iter->currentEntry); } + ASSERT_IN_DT(iter->currentEntry); *nextEntry = iter->currentEntry; return kSuccess; } } int -DTRestartEntryIteration(DTEntryIterator iter) +SecureDTRestartEntryIteration(DTEntryIterator iter) { #if 0 // This commented out code allows a second argument (outer) @@ -364,31 +472,55 @@ DTRestartEntryIteration(DTEntryIterator iter) return kSuccess; } -int -DTGetProperty(const DTEntry entry, const char *propertyName, void **propertyValue, unsigned int *propertySize) +static int +SecureDTGetPropertyInternal(const DTEntry entry, const char *propertyName, void const **propertyValue, unsigned int *propertySize, vm_offset_t const region_start, vm_size_t region_size) { - DeviceTreeNodeProperty *prop; + DeviceTreeNodeProperty const *prop; unsigned int k; - if (entry == NULL || entry->nProperties == 0) { + if (entry == NULL) { + return kError; + } + + ASSERT_HEADER_IN_DT_REGION(region_start, region_start + region_size, entry, sizeof(DeviceTreeNode)); + + if (entry->nProperties == 0) { return kError; } else { - prop = (DeviceTreeNodeProperty *) (entry + 1); + prop = (DeviceTreeNodeProperty const *) (entry + 1); for (k = 0; k < entry->nProperties; k++) { + assert_prop_in_dt_region(region_start, region_start + region_size, prop); if (strcmp(prop->name, propertyName) == 0) { - *propertyValue = (void *) (((uintptr_t)prop) + *propertyValue = (void const *) (((uintptr_t)prop) + sizeof(DeviceTreeNodeProperty)); *propertySize = prop->length; return kSuccess; } - prop = next_prop(prop); + prop = next_prop_region(region_start, region_start + region_size, prop); } } return kError; } int -DTInitPropertyIterator(const DTEntry entry, DTPropertyIterator iter) +SecureDTGetProperty(const DTEntry entry, const char *propertyName, void const **propertyValue, unsigned int *propertySize) +{ + return SecureDTGetPropertyInternal(entry, propertyName, propertyValue, propertySize, + (vm_offset_t)DTRootNode, (vm_size_t)((uintptr_t)DTEnd - (uintptr_t)DTRootNode)); +} + +#if defined(__i386__) || defined(__x86_64__) +int +SecureDTGetPropertyRegion(const DTEntry entry, const char *propertyName, void const **propertyValue, unsigned int *propertySize, vm_offset_t const region_start, vm_size_t region_size) +{ + return SecureDTGetPropertyInternal(entry, propertyName, propertyValue, propertySize, + region_start, region_size); +} +#endif + + +int +SecureDTInitPropertyIterator(const DTEntry entry, DTPropertyIterator iter) { iter->entry = entry; iter->currentProperty = NULL; @@ -397,7 +529,7 @@ DTInitPropertyIterator(const DTEntry entry, DTPropertyIterator iter) } int -DTIterateProperties(DTPropertyIterator iter, char **foundProperty) +SecureDTIterateProperties(DTPropertyIterator iter, char const **foundProperty) { if (iter->currentIndex >= iter->entry->nProperties) { *foundProperty = NULL; @@ -405,17 +537,18 @@ DTIterateProperties(DTPropertyIterator iter, char **foundProperty) } else { iter->currentIndex++; if (iter->currentIndex == 1) { - iter->currentProperty = (DeviceTreeNodeProperty *) (iter->entry + 1); + iter->currentProperty = (DeviceTreeNodeProperty const *) (iter->entry + 1); } else { iter->currentProperty = next_prop(iter->currentProperty); } + ASSERT_PROP_IN_DT(iter->currentProperty); *foundProperty = iter->currentProperty->name; return kSuccess; } } int -DTRestartPropertyIteration(DTPropertyIterator iter) +SecureDTRestartPropertyIteration(DTPropertyIterator iter) { iter->currentProperty = NULL; iter->currentIndex = 0; diff --git a/pexpert/gen/kcformat.c b/pexpert/gen/kcformat.c new file mode 100644 index 000000000..422f75df7 --- /dev/null +++ b/pexpert/gen/kcformat.c @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ + +#include +#include +#include + +vm_offset_t kc_highest_nonlinkedit_vmaddr = 0; +int vnode_put(void *vp); + +// FIXME: should come from mach-o/fixup_chains.h +// Index in basePointers array used by chained rebase in dyld_kernel_fixups.h +typedef enum kc_index { + primary_kc_index = 0, + pageable_kc_index = 1, + auxiliary_kc_index = 3, +} kc_index_t; + +#if defined(__x86_64__) || defined(__i386__) +/* FIXME: This should be locked down during early boot */ +void *collection_base_pointers[KCNumKinds] = {}; +kernel_mach_header_t * collection_mach_headers[KCNumKinds] = {}; +uintptr_t collection_slide[KCNumKinds] = {}; +void * collection_vp[KCNumKinds] = {}; +#else + +SECURITY_READ_ONLY_LATE(void *) collection_base_pointers[KCNumKinds]; +SECURITY_READ_ONLY_LATE(kernel_mach_header_t *) collection_mach_headers[KCNumKinds]; +SECURITY_READ_ONLY_LATE(uintptr_t) collection_slide[KCNumKinds]; +SECURITY_READ_ONLY_LATE(void *) collection_vp[KCNumKinds]; +#endif //(__x86_64__) || defined(__i386__) + +static inline kc_index_t +kc_kind2index(kc_kind_t type) +{ + switch (type) { + case KCKindPrimary: + return primary_kc_index; + case KCKindPageable: + return pageable_kc_index; + case KCKindAuxiliary: + return auxiliary_kc_index; + default: + panic("Invalid KC Kind"); + break; + } + __builtin_unreachable(); +} + +void +PE_set_kc_header(kc_kind_t type, kernel_mach_header_t *header, uintptr_t slide) +{ + kc_index_t i = kc_kind2index(type); + assert(!collection_base_pointers[i]); + assert(!collection_mach_headers[i]); + collection_mach_headers[i] = header; + collection_slide[i] = slide; + + struct load_command *lc; + struct segment_command_64 *seg; + uint64_t lowest_vmaddr = ~0ULL; + + lc = (struct load_command *)((uintptr_t)header + sizeof(*header)); + for (uint32_t j = 0; j < header->ncmds; j++, + lc = (struct load_command *)((uintptr_t)lc + lc->cmdsize)) { + if (lc->cmd != LC_SEGMENT_64) { + continue; + } + seg = (struct segment_command_64 *)(uintptr_t)lc; + if (seg->vmaddr < lowest_vmaddr) { + lowest_vmaddr = seg->vmaddr; + } + } + + collection_base_pointers[i] = (void *)(uintptr_t)lowest_vmaddr + slide; + assert((uint64_t)(uintptr_t)collection_base_pointers[i] != ~0ULL); +} + +void +PE_reset_kc_header(kc_kind_t type) +{ + if (type == KCKindPrimary) { + return; + } + + kc_index_t i = kc_kind2index(type); + collection_mach_headers[i] = 0; + collection_base_pointers[i] = 0; + collection_slide[i] = 0; +} + +void +PE_set_kc_header_and_base(kc_kind_t type, kernel_mach_header_t * header, void *base, uintptr_t slide) +{ + kc_index_t i = kc_kind2index(type); + assert(!collection_base_pointers[i]); + assert(!collection_mach_headers[i]); + collection_mach_headers[i] = header; + collection_slide[i] = slide; + collection_base_pointers[i] = base; +} + +void * +PE_get_kc_header(kc_kind_t type) +{ + return collection_mach_headers[kc_kind2index(type)]; +} + +void +PE_set_kc_vp(kc_kind_t type, void *vp) +{ + kc_index_t i = kc_kind2index(type); + assert(collection_vp[i] == NULL); + + collection_vp[i] = vp; +} + +void * +PE_get_kc_vp(kc_kind_t type) +{ + kc_index_t i = kc_kind2index(type); + return collection_vp[i]; +} + +void +PE_reset_all_kc_vp(void) +{ + for (int i = 0; i < KCNumKinds; i++) { + if (collection_vp[i] != NULL) { + vnode_put(collection_vp[i]); + collection_vp[i] = NULL; + } + } +} + +const void * const * +PE_get_kc_base_pointers(void) +{ + return (const void * const*)collection_base_pointers; +} + +/* + * Prelinked kexts in an MH_FILESET start with address 0, + * the slide for such kexts is calculated from the base + * address of the first kext mapped in that KC. Return the + * slide based on the type of the KC. + * + * Prelinked kexts booted from a non MH_FILESET KC are + * marked as KCKindUnknown, for such cases, return + * the kernel slide. + */ +uintptr_t +PE_get_kc_slide(kc_kind_t type) +{ + if (type == KCKindUnknown) { + return vm_kernel_slide; + } + return collection_slide[kc_kind2index(type)]; +} + +bool +PE_get_primary_kc_format(kc_format_t *type) +{ + if (type != NULL) { + kernel_mach_header_t *mh = PE_get_kc_header(KCKindPrimary); + if (mh && mh->filetype == MH_FILESET) { + *type = KCFormatFileset; + } else { +#if defined(__arm__) || defined(__arm64__) + /* From osfmk/arm/arm_init.c */ + extern bool static_kernelcache; + if (static_kernelcache) { + *type = KCFormatStatic; + } else { + *type = KCFormatKCGEN; + } +#else + *type = KCFormatDynamic; +#endif + } + } + return true; +} + +void * +PE_get_kc_baseaddress(kc_kind_t type) +{ + kc_index_t i = kc_kind2index(type); + switch (type) { +#if defined(__arm__) || defined(__arm64__) + case KCKindPrimary: { + extern vm_offset_t segLOWESTTEXT; + return (void*)segLOWESTTEXT; + } +#endif + default: + return collection_base_pointers[i]; + } + return NULL; +} diff --git a/pexpert/gen/pe_gen.c b/pexpert/gen/pe_gen.c index b96bf4974..872da7895 100644 --- a/pexpert/gen/pe_gen.c +++ b/pexpert/gen/pe_gen.c @@ -34,13 +34,9 @@ #include #include -#if CONFIG_EMBEDDED #include -#endif - -static int DEBUGFlag; -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) SECURITY_READ_ONLY_LATE(static uint32_t) gPEKernelConfigurationBitmask; #else static uint32_t gPEKernelConfigurationBitmask; @@ -55,10 +51,6 @@ pe_init_debug(void) { boolean_t boot_arg_value; - if (!PE_parse_boot_argn("debug", &DEBUGFlag, sizeof(DEBUGFlag))) { - DEBUGFlag = 0; - } - gPEKernelConfigurationBitmask = 0; if (!PE_parse_boot_argn("assertions", &boot_arg_value, sizeof(boot_arg_value))) { @@ -97,10 +89,10 @@ pe_init_debug(void) debug_cpu_performance_degradation_factor = factor; } else { DTEntry root; - if (DTLookupEntry(NULL, "/", &root) == kSuccess) { - void *prop = NULL; + if (SecureDTLookupEntry(NULL, "/", &root) == kSuccess) { + void const *prop = NULL; uint32_t size = 0; - if (DTGetProperty(root, "target-is-fpga", &prop, &size) == kSuccess) { + if (SecureDTGetProperty(root, "target-is-fpga", &prop, &size) == kSuccess) { debug_cpu_performance_degradation_factor = 10; } } @@ -110,7 +102,7 @@ pe_init_debug(void) void PE_enter_debugger(const char *cause) { - if (DEBUGFlag & DB_NMI) { + if (debug_boot_arg & DB_NMI) { Debugger(cause); } } @@ -125,7 +117,13 @@ PE_i_can_has_kernel_configuration(void) extern void vcattach(void); /* Globals */ -void (*PE_putc)(char c); +typedef void (*PE_putc_t)(char); + +#if XNU_TARGET_OS_OSX +PE_putc_t PE_putc; +#else +SECURITY_READ_ONLY_LATE(PE_putc_t) PE_putc; +#endif void PE_init_printf(boolean_t vm_initialized) @@ -144,9 +142,11 @@ PE_get_random_seed(unsigned char *dst_random_seed, uint32_t request_size) uint32_t size = 0; void *dt_random_seed; - if ((DTLookupEntry(NULL, "/chosen", &entryP) == kSuccess) - && (DTGetProperty(entryP, "random-seed", - (void **)&dt_random_seed, &size) == kSuccess)) { + if ((SecureDTLookupEntry(NULL, "/chosen", &entryP) == kSuccess) + && (SecureDTGetProperty(entryP, "random-seed", + /* casting away the const is permissible here, since + * this function runs before lockdown. */ + (const void **)(uintptr_t)&dt_random_seed, &size) == kSuccess)) { unsigned char *src_random_seed; unsigned int i; unsigned int null_count = 0; diff --git a/pexpert/i386/pe_init.c b/pexpert/i386/pe_init.c index 4892e95c7..74c6c6276 100644 --- a/pexpert/i386/pe_init.c +++ b/pexpert/i386/pe_init.c @@ -98,7 +98,7 @@ PE_initialize_console( PE_Video * info, int op ) if (info) { PE_state.video = *info; } - /* fall thru */ + OS_FALLTHROUGH; default: initialize_screen(info, op); @@ -118,13 +118,12 @@ PE_init_iokit(void) DTEntry entry; unsigned int size; - uint32_t *map; + uint32_t const *map; boot_progress_element *bootPict; norootIcon_lzss = NULL; norootClut_lzss = NULL; - PE_init_kprintf(TRUE); PE_init_printf(TRUE); kprintf("Kernel boot args: '%s'\n", PE_boot_args()); @@ -133,15 +132,15 @@ PE_init_iokit(void) * Fetch the CLUT and the noroot image. */ - if (kSuccess == DTLookupEntry(NULL, "/chosen/memory-map", &entry)) { - if (kSuccess == DTGetProperty(entry, "BootCLUT", (void **) &map, &size)) { + if (kSuccess == SecureDTLookupEntry(NULL, "/chosen/memory-map", &entry)) { + if (kSuccess == SecureDTGetProperty(entry, "BootCLUT", (void const **) &map, &size)) { if (sizeof(appleClut8) <= map[1]) { bcopy((void *)ml_static_ptovirt(map[0]), appleClut8, sizeof(appleClut8)); bootClutInitialized = TRUE; } } - if (kSuccess == DTGetProperty(entry, "Pict-FailedBoot", (void **) &map, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "Pict-FailedBoot", (void const **) &map, &size)) { bootPict = (boot_progress_element *) ml_static_ptovirt(map[0]); default_noroot.width = bootPict->width; default_noroot.height = bootPict->height; @@ -151,11 +150,11 @@ PE_init_iokit(void) noroot_rle_Initialized = TRUE; } - if (kSuccess == DTGetProperty(entry, "FailedCLUT", (void **) &map, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "FailedCLUT", (void const **) &map, &size)) { norootClut_lzss = (uint8_t*) ml_static_ptovirt(map[0]); } - if (kSuccess == DTGetProperty(entry, "FailedImage", (void **) &map, &size)) { + if (kSuccess == SecureDTGetProperty(entry, "FailedImage", (void const **) &map, &size)) { norootIcon_lzss = (boot_icon_element *) ml_static_ptovirt(map[0]); if (norootClut_lzss == NULL) { printf("ERROR: No FailedCLUT provided for noroot icon!\n"); @@ -184,7 +183,20 @@ PE_init_iokit(void) default_progress_data3x, (unsigned char *) appleClut8); - StartIOKit( PE_state.deviceTreeHead, PE_state.bootArgs, gPEEFIRuntimeServices, NULL); + /* + * x86 only minimally enforces lockdown in hardware. Additionally, some pre-lockdown functionality + * such as commpage initialization requires IOKit enumeration of CPUs, which is heavily entangled + * with the ACPI stack. Therefore, we start the IOKit matching process immediately on x86. + */ + InitIOKit(PE_state.deviceTreeHead); + StartIOKitMatching(); +} + +void +PE_lockdown_iokit(void) +{ + /* Ensure that at least the CPUs have been enumerated before moving forward. */ + ml_wait_max_cpus(); } void @@ -198,6 +210,7 @@ PE_init_platform(boolean_t vm_initialized, void * _args) // New EFI-style PE_state.bootArgs = _args; PE_state.deviceTreeHead = (void *) ml_static_ptovirt(args->deviceTreeP); + PE_state.deviceTreeSize = args->deviceTreeLength; if (args->Video.v_baseAddr) { PE_state.video.v_baseAddr = args->Video.v_baseAddr;// remains physical address PE_state.video.v_rowBytes = args->Video.v_rowBytes; @@ -243,7 +256,7 @@ PE_init_platform(boolean_t vm_initialized, void * _args) if (!vm_initialized) { if (PE_state.deviceTreeHead) { - DTInit(PE_state.deviceTreeHead); + SecureDTInit(PE_state.deviceTreeHead, PE_state.deviceTreeSize); } pe_identify_machine(args); @@ -366,7 +379,7 @@ PE_i_can_has_debugger(uint32_t *debug_flags) { #if DEVELOPMENT || DEBUG if (debug_flags) { - assert(debug_boot_arg_inited); + assert(startup_phase >= STARTUP_SUB_TUNABLES); } #endif diff --git a/pexpert/i386/pe_kprintf.c b/pexpert/i386/pe_kprintf.c index ce2ff230e..64775063f 100644 --- a/pexpert/i386/pe_kprintf.c +++ b/pexpert/i386/pe_kprintf.c @@ -45,10 +45,15 @@ #include extern uint64_t LockTimeOut; -extern processor_t current_processor(void); /* Globals */ -void (*PE_kputc)(char c); +typedef void (*PE_kputc_t)(char); + +#if XNU_TARGET_OS_OSX +PE_kputc_t PE_kputc; +#else +SECURITY_READ_ONLY_LATE(PE_kputc_t) PE_kputc; +#endif #if DEVELOPMENT || DEBUG /* DEBUG kernel starts with true serial, but @@ -59,40 +64,34 @@ SECURITY_READ_ONLY_LATE(unsigned int) disable_serial_output = FALSE; SECURITY_READ_ONLY_LATE(unsigned int) disable_serial_output = TRUE; #endif -decl_simple_lock_data(static, kprintf_lock); +static SIMPLE_LOCK_DECLARE(kprintf_lock, 0); -void -PE_init_kprintf(boolean_t vm_initialized) +__startup_func +static void +PE_init_kprintf(void) { - unsigned int boot_arg; - if (PE_state.initialized == FALSE) { panic("Platform Expert not initialized"); } - if (!vm_initialized) { - unsigned int new_disable_serial_output = TRUE; + unsigned int new_disable_serial_output = TRUE; - simple_lock_init(&kprintf_lock, 0); - - if (PE_parse_boot_argn("debug", &boot_arg, sizeof(boot_arg))) { - if (boot_arg & DB_KPRT) { - new_disable_serial_output = FALSE; - } - } - - /* If we are newly enabling serial, make sure we only - * call pal_serial_init() if our previous state was - * not enabled */ - if (!new_disable_serial_output && (!disable_serial_output || pal_serial_init())) { - PE_kputc = pal_serial_putc; - } else { - PE_kputc = cnputc; - } + if (debug_boot_arg & DB_KPRT) { + new_disable_serial_output = FALSE; + } - disable_serial_output = new_disable_serial_output; + /* If we are newly enabling serial, make sure we only + * call pal_serial_init() if our previous state was + * not enabled */ + if (!new_disable_serial_output && (!disable_serial_output || pal_serial_init())) { + PE_kputc = pal_serial_putc; + } else { + PE_kputc = cnputc_unbuffered; } + + disable_serial_output = new_disable_serial_output; } +STARTUP(KPRINTF, STARTUP_RANK_FIRST, PE_init_kprintf); #if CONFIG_NO_KPRINTF_STRINGS /* Prevent CPP from breaking the definition below */ @@ -153,11 +152,14 @@ kprintf(const char *fmt, ...) return; } + va_start(listp, fmt); + va_copy(listp2, listp); + state = ml_set_interrupts_enabled(FALSE); pal_preemption_assert(); - in_panic_context = processor_in_panic_context(current_processor()); + in_panic_context = debug_is_current_cpu_in_panic_state(); // If current CPU is in panic context, be a little more impatient. kprintf_lock_grabbed = simple_lock_try_lock_mp_signal_safe_loop_duration(&kprintf_lock, @@ -169,10 +171,7 @@ kprintf(const char *fmt, ...) cpu_last_locked = cpu_number(); } - va_start(listp, fmt); - va_copy(listp2, listp); _doprnt(fmt, &listp, PE_kputc, 16); - va_end(listp); if (kprintf_lock_grabbed) { simple_unlock(&kprintf_lock); @@ -180,6 +179,8 @@ kprintf(const char *fmt, ...) ml_set_interrupts_enabled(state); + va_end(listp); + // If interrupts are enabled if (ml_get_interrupts_enabled()) { os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller); @@ -194,8 +195,6 @@ kprintf(const char *fmt, ...) } } - - extern void kprintf_break_lock(void); void kprintf_break_lock(void) diff --git a/pexpert/pexpert/GearImage.h b/pexpert/pexpert/GearImage.h index 40a3a896a..1e11e4320 100644 --- a/pexpert/pexpert/GearImage.h +++ b/pexpert/pexpert/GearImage.h @@ -8,8 +8,6 @@ const unsigned char gGearPict3x[9 * kGearFrames * kGearWidth * kGearHeight] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, @@ -20,554 +18,3748 @@ const unsigned char gGearPict3x[9 * kGearFrames * kGearWidth * kGearHeight] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, - 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf5, 0xf0, - 0xf0, 0xf5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd7, 0xad, 0x94, - 0x94, 0xac, 0xd6, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe1, 0xa1, 0x76, 0x6f, - 0x6f, 0x76, 0x9f, 0xdc, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xed, 0xb4, 0x7c, 0x6d, 0x7d, - 0x7d, 0x6e, 0x79, 0xac, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd5, 0x90, 0x75, 0x75, 0x79, - 0x79, 0x75, 0x74, 0x8e, 0xd4, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xe7, 0xe6, 0xf6, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x82, 0x76, 0x7a, 0x72, - 0x72, 0x79, 0x75, 0x83, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfd, 0xf9, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xeb, 0xca, 0xae, 0xac, 0xc4, 0xe7, - 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x80, 0x73, 0x79, 0x76, - 0x76, 0x79, 0x72, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfa, 0xf2, 0xed, 0xed, 0xf3, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdf, 0xa4, 0x86, 0x82, 0x7e, 0x81, 0xa6, - 0xdd, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x80, 0x73, 0x79, 0x75, - 0x75, 0x78, 0x72, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xf8, - 0xec, 0xe4, 0xe3, 0xe3, 0xe5, 0xec, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfc, 0xbc, 0x78, 0x70, 0x79, 0x76, 0x71, 0x77, - 0xa8, 0xec, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc9, 0x81, 0x75, 0x7a, 0x74, - 0x74, 0x7a, 0x74, 0x80, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfb, 0xed, - 0xe2, 0xe0, 0xe1, 0xe1, 0xdf, 0xe1, 0xf0, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xa7, 0x76, 0x7a, 0x7e, 0x7d, 0x80, 0x72, - 0x81, 0xc4, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xf2, 0xe4, - 0xe1, 0xe3, 0xe2, 0xe2, 0xe1, 0xe0, 0xeb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe2, 0xa4, 0x78, 0x79, 0x7f, 0x7b, 0x7c, 0x7c, - 0x79, 0x91, 0xd4, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xf6, 0xe7, 0xe2, - 0xe2, 0xe2, 0xe2, 0xe3, 0xe1, 0xe1, 0xeb, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xf2, 0xad, 0x72, 0x75, 0x80, 0x77, 0x76, 0x81, - 0x79, 0x77, 0xb4, 0xf8, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xef, 0xe2, 0xe1, - 0xe3, 0xe2, 0xe2, 0xe3, 0xe0, 0xdf, 0xec, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc1, 0x7c, 0x7a, 0x82, 0x79, 0x7a, 0x7e, - 0x78, 0x77, 0x93, 0xcb, 0xf9, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xe8, 0xe2, 0xe2, - 0xe2, 0xe2, 0xe2, 0xe3, 0xe1, 0xe1, 0xf0, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa0, 0x81, 0x7c, 0x7d, 0x7c, 0x78, - 0x7c, 0x7e, 0x76, 0x9e, 0xef, 0xff, 0xfc, 0xfe, 0xfc, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfc, 0xea, 0xe1, 0xe4, 0xe3, - 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe9, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, 0xd0, 0x8b, 0x73, 0x80, 0x7e, 0x77, - 0x7e, 0x7f, 0x6f, 0x88, 0xd0, 0xfb, 0xfe, 0xff, 0xfa, 0xff, 0xff, 0xc9, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xf5, 0xe5, 0xdf, 0xe3, 0xe3, - 0xe1, 0xe3, 0xe3, 0xe0, 0xe5, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf1, 0xa6, 0x7c, 0x7f, 0x7f, 0x7a, - 0x7b, 0x7b, 0x7a, 0x7e, 0x9d, 0xda, 0xff, 0xff, 0xf6, 0xff, 0xff, 0xc9, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xf6, 0xea, 0xe4, 0xe2, 0xe2, 0xe2, - 0xe2, 0xe2, 0xe3, 0xe1, 0xea, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd3, 0x99, 0x79, 0x7b, 0x7f, - 0x79, 0x79, 0x81, 0x76, 0x77, 0xba, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xc9, 0x81, 0x75, 0x7a, 0x74, - 0x74, 0x7a, 0x74, 0x80, 0xc9, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xf0, 0xe3, 0xe2, 0xe4, 0xe1, 0xe2, - 0xe3, 0xe2, 0xe1, 0xe8, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xfd, 0xff, - 0xfc, 0xfa, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xfa, 0xb9, 0x7b, 0x7a, 0x82, - 0x7a, 0x79, 0x7f, 0x77, 0x73, 0x99, 0xda, 0xfd, 0xfc, 0xff, 0xff, 0xcb, 0x80, 0x72, 0x78, 0x75, - 0x75, 0x78, 0x71, 0x7f, 0xca, 0xff, 0xff, 0xfa, 0xff, 0xf8, 0xe9, 0xe2, 0xe2, 0xe3, 0xe2, 0xe1, - 0xe3, 0xe1, 0xe0, 0xee, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd9, 0x9a, 0x7e, 0x7c, - 0x7d, 0x7c, 0x79, 0x7c, 0x7d, 0x7c, 0xa8, 0xf1, 0xff, 0xff, 0xff, 0xcc, 0x80, 0x75, 0x7a, 0x73, - 0x73, 0x79, 0x74, 0x80, 0xca, 0xff, 0xff, 0xfc, 0xfe, 0xef, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, - 0xe2, 0xe2, 0xe7, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xf1, 0xe4, - 0xf0, 0xff, 0xff, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xcc, 0x89, 0x73, - 0x80, 0x80, 0x77, 0x7e, 0x7f, 0x70, 0x8a, 0xd4, 0xf9, 0xff, 0xff, 0xcf, 0x87, 0x77, 0x79, 0x74, - 0x74, 0x78, 0x77, 0x8a, 0xd0, 0xff, 0xff, 0xfb, 0xfa, 0xe9, 0xe0, 0xe3, 0xe2, 0xe1, 0xe2, 0xe3, - 0xe0, 0xe5, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfb, - 0xf8, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xdd, 0xc2, 0xb2, 0xa9, - 0xaf, 0xc6, 0xe2, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfa, 0xff, 0xec, 0xa2, 0x7c, - 0x81, 0x7e, 0x79, 0x7d, 0x7d, 0x74, 0x7f, 0xad, 0xe4, 0xff, 0xff, 0xde, 0xa0, 0x74, 0x6e, 0x7d, - 0x7d, 0x6e, 0x73, 0xa1, 0xe2, 0xff, 0xff, 0xfa, 0xf2, 0xe6, 0xe1, 0xe3, 0xe3, 0xe1, 0xe3, 0xe3, - 0xe0, 0xe9, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf8, 0xef, 0xe8, - 0xe7, 0xe9, 0xed, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xe7, 0xa4, 0x7b, 0x7d, 0x80, - 0x79, 0x82, 0xa7, 0xd7, 0xf3, 0xfc, 0xfe, 0xff, 0xfe, 0xff, 0xfd, 0xfe, 0xff, 0xfa, 0xcf, 0x96, - 0x7a, 0x7c, 0x7e, 0x79, 0x79, 0x7c, 0x7b, 0x8f, 0xd3, 0xff, 0xff, 0xf4, 0xcd, 0x8c, 0x6c, 0x76, - 0x76, 0x6c, 0x89, 0xca, 0xf7, 0xff, 0xff, 0xf8, 0xeb, 0xe4, 0xe2, 0xe3, 0xe2, 0xe2, 0xe2, 0xe1, - 0xe6, 0xf2, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf5, 0xe8, 0xdd, 0xd9, - 0xda, 0xda, 0xda, 0xe5, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xc2, 0x8d, 0x7b, 0x83, 0x84, - 0x7d, 0x7f, 0x86, 0x90, 0xaa, 0xd6, 0xf7, 0xff, 0xfe, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xfa, 0xb5, - 0x78, 0x7c, 0x83, 0x77, 0x77, 0x7e, 0x79, 0x87, 0xce, 0xff, 0xff, 0xfe, 0xf5, 0xc6, 0x93, 0x7a, - 0x7a, 0x92, 0xc3, 0xf3, 0xff, 0xff, 0xff, 0xf6, 0xe7, 0xe3, 0xe3, 0xe2, 0xe2, 0xe3, 0xe1, 0xe0, - 0xed, 0xfd, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfd, 0xf5, 0xe9, 0xe0, 0xdd, 0xdb, 0xda, - 0xdb, 0xdc, 0xd9, 0xde, 0xee, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe3, 0xae, 0x8c, 0x85, 0x89, 0x89, - 0x89, 0x8a, 0x83, 0x79, 0x81, 0x9f, 0xbf, 0xde, 0xfa, 0xff, 0xfe, 0xfb, 0xf9, 0xff, 0xff, 0xd8, - 0x97, 0x7a, 0x7b, 0x81, 0x81, 0x7c, 0x79, 0x92, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xf3, 0xd7, 0xc2, - 0xc2, 0xd7, 0xf1, 0xfe, 0xff, 0xff, 0xff, 0xf6, 0xe8, 0xe2, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, 0xe6, - 0xf5, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf6, 0xee, 0xe5, 0xdc, 0xda, 0xdc, 0xde, 0xde, - 0xdd, 0xdc, 0xdb, 0xdd, 0xe6, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xaf, 0x88, 0x80, 0x86, 0x84, - 0x82, 0x84, 0x85, 0x87, 0x85, 0x7e, 0x81, 0x9f, 0xcd, 0xee, 0xfa, 0xfe, 0xff, 0xff, 0xfe, 0xf7, - 0xcd, 0x91, 0x76, 0x7c, 0x79, 0x73, 0x86, 0xb7, 0xeb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfa, 0xef, 0xe4, 0xe0, 0xe2, 0xe2, 0xe0, 0xe5, 0xf3, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfa, 0xf1, 0xe6, 0xdc, 0xda, 0xdc, 0xdd, 0xdc, 0xdb, 0xdb, - 0xdb, 0xdb, 0xda, 0xdc, 0xe7, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xc2, 0x8b, 0x7e, 0x89, 0x85, - 0x80, 0x84, 0x86, 0x88, 0x86, 0x81, 0x80, 0x83, 0x8b, 0xa3, 0xd0, 0xf9, 0xff, 0xfc, 0xfb, 0xff, - 0xf5, 0xc7, 0x96, 0x7d, 0x78, 0x82, 0xad, 0xe8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xec, 0xe2, 0xe0, 0xe0, 0xe6, 0xf2, 0xfc, - 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf3, 0xe7, 0xe0, 0xdc, 0xdb, 0xdb, 0xdd, 0xdd, 0xdc, 0xdb, 0xda, - 0xdc, 0xdd, 0xd9, 0xdc, 0xec, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe2, 0xa6, 0x7f, 0x7e, 0x86, - 0x8a, 0x88, 0x83, 0x82, 0x84, 0x87, 0x8a, 0x83, 0x77, 0x80, 0x9a, 0xb8, 0xdb, 0xf6, 0xff, 0xff, - 0xfe, 0xf2, 0xd8, 0xc2, 0xbf, 0xca, 0xe2, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf8, - 0xf8, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf3, 0xf1, 0xf0, 0xf5, 0xfc, 0xff, - 0xff, 0xff, 0xfd, 0xf6, 0xec, 0xe5, 0xde, 0xda, 0xdb, 0xdd, 0xdd, 0xdb, 0xda, 0xda, 0xdc, 0xdd, - 0xdb, 0xd8, 0xd8, 0xe4, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd6, 0xa7, 0x8b, 0x85, - 0x84, 0x83, 0x86, 0x88, 0x85, 0x83, 0x84, 0x85, 0x87, 0x86, 0x7d, 0x7c, 0x9a, 0xce, 0xf5, 0xff, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfd, 0xf3, 0xe4, 0xdb, 0xdc, 0xde, 0xdd, 0xdc, 0xdc, 0xdb, 0xdb, 0xdc, 0xdc, 0xda, 0xda, - 0xda, 0xdb, 0xe3, 0xf2, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xc9, 0x9b, - 0x82, 0x83, 0x88, 0x88, 0x85, 0x83, 0x84, 0x86, 0x88, 0x84, 0x82, 0x80, 0x7e, 0x94, 0xc8, 0xf3, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfb, 0xf0, 0xe3, 0xdc, 0xdb, 0xdc, 0xdd, 0xdd, 0xdb, 0xda, 0xda, 0xdb, 0xdc, 0xdc, 0xd9, 0xd8, - 0xe0, 0xee, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf7, 0xd8, - 0xb9, 0x9c, 0x82, 0x7c, 0x86, 0x8a, 0x87, 0x83, 0x81, 0x82, 0x86, 0x89, 0x81, 0x7a, 0x95, 0xd8, - 0xff, 0xff, 0xf8, 0xf9, 0xf9, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xff, 0xff, - 0xf5, 0xe3, 0xdb, 0xdc, 0xdd, 0xdc, 0xdb, 0xda, 0xdb, 0xdc, 0xdc, 0xda, 0xd7, 0xd9, 0xe0, 0xe9, - 0xf3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, - 0xf6, 0xd2, 0xa4, 0x8e, 0x8a, 0x84, 0x83, 0x87, 0x87, 0x87, 0x83, 0x80, 0x8b, 0x80, 0x7b, 0xc1, - 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xef, 0xdb, 0xdb, 0xdd, 0xdb, 0xdb, 0xdd, 0xdc, 0xdb, 0xda, 0xda, 0xda, 0xdb, 0xe3, 0xf1, 0xfc, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, - 0xfe, 0xfb, 0xef, 0xd0, 0xa5, 0x85, 0x80, 0x89, 0x8a, 0x88, 0x83, 0x81, 0x8c, 0x81, 0x79, 0xc0, - 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xed, 0xd9, 0xda, 0xde, 0xda, 0xda, 0xdc, 0xdc, 0xda, 0xd8, 0xda, 0xe3, 0xef, 0xf9, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf9, - 0xfb, 0xff, 0xff, 0xf4, 0xdc, 0xbf, 0xa0, 0x87, 0x7d, 0x80, 0x86, 0x87, 0x86, 0x7c, 0x89, 0xcd, - 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xf1, 0xdc, 0xd8, 0xdc, 0xdc, 0xda, 0xd8, 0xd7, 0xd9, 0xe0, 0xeb, 0xf4, 0xfb, 0xff, 0xff, 0xfd, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x4a, 0x12, + 0x12, 0x4a, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x17, 0x00, 0x00, + 0x00, 0x00, 0x17, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x6e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3a, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x2d, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x2e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x2e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x2d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x2e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x2d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xb6, 0xe2, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x2e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x2d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf9, 0xf2, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe9, 0x54, 0x1a, 0x1a, 0x1b, 0x64, + 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x2e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x2e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, + 0xe2, 0xce, 0xce, 0xce, 0xdc, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x54, 0x1a, 0x1a, 0x1b, 0x1a, 0x1a, + 0x45, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x2d, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x2d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd9, + 0xcf, 0xcf, 0xce, 0xce, 0xcf, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0x1a, 0x1b, 0x1b, 0x1a, 0x1a, 0x1b, + 0x1a, 0x45, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x2d, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x2e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xda, 0xcf, + 0xcf, 0xce, 0xcf, 0xcf, 0xce, 0xcf, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x1b, 0x1a, 0x1b, 0x1b, 0x1a, 0x1b, + 0x1b, 0x1a, 0x44, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x2e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x2e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xda, 0xce, 0xcf, + 0xcf, 0xce, 0xcf, 0xcf, 0xce, 0xcf, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x1b, + 0x1a, 0x1b, 0x1a, 0x45, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x2d, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x2d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xda, 0xcf, 0xcf, 0xce, + 0xce, 0xce, 0xcf, 0xce, 0xce, 0xce, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x65, 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, + 0x1a, 0x1b, 0x1b, 0x1a, 0x45, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x2e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x2d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xda, 0xcf, 0xce, 0xcf, 0xce, + 0xce, 0xcf, 0xce, 0xce, 0xce, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x44, 0x1a, 0x1b, 0x1a, 0x1a, + 0x1b, 0x1a, 0x1b, 0x1a, 0x1b, 0x44, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3a, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x39, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xda, 0xcf, 0xce, 0xcf, 0xcf, 0xce, + 0xcf, 0xcf, 0xcf, 0xce, 0xd9, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x45, 0x1a, 0x1b, 0x1a, + 0x1a, 0x1a, 0x1b, 0x1b, 0x1a, 0x1a, 0x45, 0xf2, 0xff, 0xff, 0xff, 0xff, 0x6e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x6e, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xda, 0xce, 0xce, 0xcf, 0xce, 0xce, 0xce, + 0xce, 0xcf, 0xcf, 0xd9, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x45, 0x1b, 0x1a, + 0x1a, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1a, 0x51, 0xff, 0xff, 0xff, 0xff, 0xef, 0x17, 0x00, 0x00, + 0x00, 0x00, 0x17, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xcf, 0xcf, 0xcf, 0xcf, 0xce, 0xce, 0xce, + 0xce, 0xcf, 0xda, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x45, 0x1b, + 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x1b, 0xae, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x49, 0x12, + 0x12, 0x49, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xef, 0xcf, 0xcf, 0xcf, 0xcf, 0xce, 0xcf, 0xcf, 0xcf, + 0xcf, 0xda, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x45, + 0x1b, 0x1b, 0x1a, 0x1b, 0x1a, 0x1b, 0x1a, 0x1b, 0x74, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xce, + 0xd9, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, + 0x45, 0x1b, 0x1a, 0x1a, 0x1b, 0x1b, 0x1a, 0x1b, 0x7e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xcf, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xce, 0xda, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfd, 0xda, 0xad, 0x94, 0x8b, 0x89, 0x87, 0x84, 0x90, 0xb6, 0xe8, - 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xf9, 0xea, 0xdf, 0xdb, 0xdb, 0xda, 0xda, 0xdd, 0xe6, 0xf4, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xda, 0xb9, 0xa0, 0x98, 0xa0, 0xc2, 0xee, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf2, 0x44, 0x1a, 0x1b, 0x1a, 0x1a, 0x1a, 0x1b, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xcf, 0xce, 0xce, 0xcf, 0xcf, 0xce, 0xda, 0xfc, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf2, 0x51, 0x1a, 0x1a, 0x1a, 0x1c, 0x8b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe9, 0xcf, 0xcf, 0xce, 0xcf, 0xdd, 0xfc, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xae, 0x74, 0x7f, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xe7, 0xe4, 0xf0, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf4, 0x9d, 0x75, 0x6c, 0x6c, 0x6c, 0x6b, 0x6c, 0x6b, 0x6c, 0x6c, 0x6b, 0x6b, + 0x75, 0x9d, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0xd1, + 0xcd, 0xce, 0xcd, 0xce, 0xcd, 0xcd, 0xce, 0xce, 0xce, 0xce, 0xd1, 0xdf, 0xfb, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xeb, 0x57, 0x43, 0x44, 0x44, 0x43, 0x44, 0x43, 0x44, 0x44, 0x44, 0x44, 0x43, 0x44, + 0x44, 0x44, 0x57, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc6, 0xbf, 0xbf, + 0xbf, 0xbf, 0xbf, 0xbe, 0xbf, 0xbf, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xc6, 0xf8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x81, 0x44, 0x43, 0x43, 0x43, 0x44, 0x44, 0x44, 0x44, 0x43, 0x43, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x43, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xbe, 0xbf, 0xbf, + 0xbe, 0xbf, 0xbe, 0xbe, 0xbf, 0xbe, 0xbf, 0xbe, 0xbf, 0xbf, 0xbe, 0xbf, 0xbf, 0xd6, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x52, 0x43, 0x44, 0x44, 0x43, 0x44, 0x44, 0x43, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0xbe, 0xbf, 0xbf, + 0xbf, 0xbf, 0xbe, 0xbf, 0xbf, 0xbf, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbe, 0xbf, 0xc4, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x52, 0x44, 0x44, 0x44, 0x44, 0x44, 0x43, 0x44, 0x43, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc5, 0xbf, 0xbf, 0xbf, + 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbe, 0xbe, 0xbf, 0xbe, 0xbf, 0xbe, 0xbf, 0xc5, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x81, 0x43, 0x43, 0x44, 0x44, 0x44, 0x44, 0x43, 0x44, 0x44, 0x43, 0x44, 0x43, 0x44, + 0x44, 0x44, 0x44, 0x81, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xbf, 0xbf, 0xbf, + 0xbf, 0xbf, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbe, 0xbe, 0xbf, 0xbf, 0xbe, 0xd6, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xeb, 0x58, 0x43, 0x44, 0x44, 0x43, 0x43, 0x43, 0x44, 0x44, 0x44, 0x44, 0x44, 0x43, + 0x43, 0x44, 0x58, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc6, 0xbe, 0xbf, + 0xbf, 0xbf, 0xbf, 0xbe, 0xbf, 0xbf, 0xbf, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, 0xc6, 0xf9, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf5, 0x9d, 0x74, 0x6c, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x6c, 0x6c, 0x6c, + 0x75, 0x9d, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdf, 0xd1, + 0xce, 0xce, 0xce, 0xcd, 0xcd, 0xcd, 0xcd, 0xce, 0xce, 0xcd, 0xd2, 0xde, 0xfb, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xd2, 0xb1, 0xb7, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xd6, 0xd2, 0xe5, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf8, 0x9c, 0x76, 0x76, 0x76, 0x78, 0xbe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0xaf, 0xaf, 0xaf, 0xaf, 0xc6, 0xfb, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf9, 0x94, 0x76, 0x76, 0x76, 0x76, 0x76, 0x77, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xaf, 0xaf, 0xb0, 0xaf, 0xaf, 0xaf, 0xc1, 0xfb, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, + 0x94, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0xb8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xaf, 0xaf, 0xb0, 0xaf, 0xb0, 0xaf, 0xaf, 0xc1, + 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x93, + 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0xb1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xaf, 0xaf, 0xaf, + 0xc2, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x94, 0x76, + 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xb9, 0x9b, + 0x9b, 0xb8, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xe5, 0xb0, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, + 0xaf, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x94, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x9b, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x9f, 0x93, 0x92, + 0x92, 0x92, 0x9f, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xaf, 0xaf, 0xaf, 0xb0, 0xaf, 0xaf, 0xaf, + 0xb0, 0xaf, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x94, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x94, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x92, 0x92, 0x92, + 0x93, 0x92, 0x92, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc1, 0xaf, 0xaf, 0xb0, 0xaf, 0xaf, 0xb0, + 0xaf, 0xaf, 0xaf, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x93, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x76, 0x76, 0x93, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb2, 0x92, 0x92, 0x92, + 0x92, 0x93, 0x93, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc1, 0xaf, 0xb0, 0xaf, 0xb0, 0xaf, + 0xaf, 0xaf, 0xb0, 0xaf, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa8, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x76, 0x94, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0x93, 0x92, 0x92, + 0x93, 0x92, 0x92, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc1, 0xaf, 0xaf, 0xb0, 0xaf, + 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x93, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0x92, 0x93, 0x92, + 0x93, 0x93, 0x92, 0xab, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc1, 0xaf, 0xaf, 0xaf, + 0xaf, 0xaf, 0xaf, 0xb0, 0xaf, 0xb0, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x94, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0x92, 0x92, 0x92, + 0x92, 0x93, 0x92, 0xab, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc1, 0xaf, 0xb0, + 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x94, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xab, 0x92, 0x93, 0x92, + 0x92, 0x92, 0x93, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc1, 0xaf, + 0xaf, 0xaf, 0xaf, 0xb0, 0xaf, 0xaf, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9d, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x93, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xab, 0x92, 0x92, 0x93, + 0x92, 0x93, 0x93, 0xab, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc1, + 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x9e, 0x76, 0x76, 0x76, 0xa8, + 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xab, 0x92, 0x93, 0x92, + 0x93, 0x93, 0x92, 0xab, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, + 0xce, 0xb0, 0xaf, 0xaf, 0xc7, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xd7, 0xef, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0x92, 0x93, 0x92, + 0x92, 0x93, 0x92, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf6, 0xe9, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0x92, 0x92, 0x93, + 0x93, 0x93, 0x92, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xab, 0x92, 0x92, 0x92, + 0x93, 0x93, 0x93, 0xab, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaa, 0x93, 0x92, 0x92, + 0x92, 0x93, 0x93, 0xab, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x92, 0x92, 0x93, + 0x93, 0x93, 0x93, 0xb1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x93, 0x92, 0x92, + 0x93, 0x92, 0x92, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x9f, 0x93, 0x92, + 0x93, 0x92, 0x9f, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xb9, 0x9c, + 0x9b, 0xb9, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0x51, 0x19, + 0x19, 0x51, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x1e, 0x0b, 0x0b, + 0x0b, 0x0b, 0x1e, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x75, 0x0b, 0x0a, 0x0b, + 0x0b, 0x0b, 0x0b, 0x75, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x42, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0a, 0x42, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x35, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0a, 0x0b, 0x35, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x35, 0x0b, 0x0a, 0x0b, + 0x0b, 0x0a, 0x0b, 0x35, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x35, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0a, 0x35, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xbc, 0xe4, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x34, 0x0b, 0x0a, 0x0b, + 0x0b, 0x0b, 0x0b, 0x35, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf0, 0xda, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0x5f, 0x28, 0x28, 0x28, 0x6f, + 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x35, 0x0a, 0x0b, 0x0b, + 0x0a, 0x0b, 0x0a, 0x35, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, + 0xac, 0x7b, 0x7b, 0x7b, 0xa1, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f, 0x28, 0x27, 0x27, 0x27, 0x28, + 0x52, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x34, 0x0b, 0x0b, 0x0a, + 0x0b, 0x0b, 0x0b, 0x35, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x98, + 0x7c, 0x7c, 0x7b, 0x7b, 0x7b, 0xa1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0x28, 0x27, 0x28, 0x27, 0x27, 0x28, + 0x28, 0x51, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x35, 0x0a, 0x0a, 0x0b, + 0x0b, 0x0b, 0x0a, 0x34, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x98, 0x7b, + 0x7b, 0x7c, 0x7b, 0x7b, 0x7c, 0x7c, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbc, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, + 0x28, 0x28, 0x52, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x35, 0x0b, 0x0a, 0x0b, + 0x0b, 0x0a, 0x0b, 0x35, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x98, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7c, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0x28, 0x27, 0x28, 0x28, 0x28, 0x27, + 0x28, 0x28, 0x28, 0x52, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x34, 0x0b, 0x0a, 0x0a, + 0x0b, 0x0b, 0x0b, 0x34, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x98, 0x7c, 0x7b, 0x7b, + 0x7b, 0x7c, 0x7c, 0x7b, 0x7c, 0x7c, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x28, 0x28, 0x28, 0x28, 0x28, + 0x27, 0x28, 0x28, 0x27, 0x52, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x35, 0x0a, 0x0b, 0x0a, + 0x0b, 0x0a, 0x0a, 0x35, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x98, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7b, 0x7c, 0x7b, 0x7c, 0x7c, 0xac, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x52, 0x28, 0x27, 0x27, 0x28, + 0x28, 0x28, 0x28, 0x27, 0x28, 0x51, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x42, 0x0b, 0x0b, 0x0b, + 0x0a, 0x0b, 0x0b, 0x41, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x98, 0x7b, 0x7c, 0x7b, 0x7c, 0x7c, + 0x7b, 0x7b, 0x7b, 0x7b, 0x98, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x52, 0x27, 0x28, 0x28, + 0x28, 0x28, 0x28, 0x28, 0x28, 0x27, 0x52, 0xf3, 0xff, 0xff, 0xff, 0xff, 0x75, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0a, 0x75, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x98, 0x7c, 0x7c, 0x7b, 0x7b, 0x7b, 0x7b, + 0x7c, 0x7c, 0x7c, 0x98, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x52, 0x28, 0x28, + 0x27, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x5e, 0xff, 0xff, 0xff, 0xff, 0xef, 0x1f, 0x0b, 0x0b, + 0x0b, 0x0b, 0x1e, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x7b, 0x7c, 0x7b, 0x7c, 0x7c, 0x7b, 0x7b, + 0x7b, 0x7c, 0x98, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x52, 0x28, + 0x28, 0x28, 0x28, 0x27, 0x28, 0x27, 0x28, 0x28, 0xb4, 0xff, 0xff, 0xff, 0xff, 0xe4, 0x50, 0x19, + 0x19, 0x51, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7c, 0x7b, 0x7b, + 0x7c, 0x98, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x51, + 0x27, 0x28, 0x28, 0x28, 0x27, 0x28, 0x28, 0x28, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x7c, 0x7b, 0x7b, 0x7c, 0x7b, 0x7b, 0x7b, 0x7c, + 0x98, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, + 0x52, 0x28, 0x28, 0x27, 0x28, 0x28, 0x27, 0x28, 0x89, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x7b, 0x7b, 0x7b, 0x7b, 0x7c, 0x7b, 0x7b, 0x98, + 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf3, 0x52, 0x28, 0x28, 0x28, 0x28, 0x28, 0x29, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0x7d, 0x7b, 0x7c, 0x7b, 0x7b, 0x7c, 0x98, 0xf8, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf3, 0x5d, 0x27, 0x27, 0x28, 0x29, 0x93, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0x7c, 0x7b, 0x7b, 0x7b, 0xa0, 0xf9, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xb4, 0x7f, 0x88, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xbb, 0xb4, 0xd5, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf6, 0xa7, 0x83, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, + 0x82, 0xa7, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe2, 0xd6, + 0xd2, 0xd3, 0xd2, 0xd3, 0xd3, 0xd2, 0xd2, 0xd3, 0xd3, 0xd2, 0xd6, 0xe1, 0xfb, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xee, 0x67, 0x55, 0x55, 0x55, 0x55, 0x54, 0x55, 0x54, 0x55, 0x55, 0x55, 0x55, 0x55, + 0x55, 0x54, 0x67, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcb, 0xc5, 0xc5, + 0xc5, 0xc5, 0xc5, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc4, 0xc5, 0xcb, 0xfa, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x8e, 0x55, 0x55, 0x54, 0x54, 0x54, 0x55, 0x55, 0x55, 0x55, 0x55, 0x54, 0x54, 0x55, + 0x54, 0x55, 0x54, 0x8e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0xc5, 0xc5, 0xc4, + 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc4, 0xc5, 0xc5, 0xc5, 0xda, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x62, 0x55, 0x55, 0x54, 0x55, 0x55, 0x55, 0x55, 0x55, 0x54, 0x54, 0x54, 0x54, 0x54, + 0x55, 0x55, 0x54, 0x62, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xca, 0xc5, 0xc5, 0xc4, + 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc4, 0xc5, 0xc5, 0xc5, 0xca, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x62, 0x54, 0x55, 0x55, 0x55, 0x55, 0x55, 0x54, 0x55, 0x55, 0x55, 0x55, 0x54, 0x54, + 0x55, 0x54, 0x55, 0x62, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xca, 0xc5, 0xc4, 0xc5, + 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc4, 0xc5, 0xc5, 0xca, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x8e, 0x54, 0x55, 0x55, 0x55, 0x54, 0x54, 0x55, 0x54, 0x54, 0x55, 0x55, 0x54, 0x54, + 0x55, 0x54, 0x55, 0x8e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc5, 0xc4, + 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xda, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xee, 0x67, 0x55, 0x54, 0x55, 0x55, 0x55, 0x55, 0x55, 0x54, 0x54, 0x54, 0x54, 0x55, + 0x54, 0x55, 0x67, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xcb, 0xc5, 0xc5, + 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xca, 0xf9, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf6, 0xa7, 0x82, 0x79, 0x7a, 0x79, 0x79, 0x79, 0x79, 0x79, 0x7a, 0x7a, 0x79, + 0x83, 0xa7, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe2, 0xd6, + 0xd2, 0xd2, 0xd3, 0xd3, 0xd3, 0xd2, 0xd2, 0xd2, 0xd3, 0xd3, 0xd6, 0xe1, 0xfb, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xd6, 0xb6, 0xbc, 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xda, 0xd6, 0xe8, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf8, 0xa2, 0x7e, 0x7f, 0x7e, 0x80, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xb5, 0xb5, 0xb5, 0xb5, 0xc9, 0xfb, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf9, 0x9b, 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x80, 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xb5, 0xb5, 0xb5, 0xb6, 0xb5, 0xb5, 0xc6, 0xfb, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, + 0x9b, 0x7f, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, 0x7e, 0xbc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0xb5, 0xb5, 0xb6, 0xb5, 0xb5, 0xb5, 0xb5, 0xc6, + 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x9c, + 0x7e, 0x7e, 0x7f, 0x7e, 0x7f, 0x7e, 0x7f, 0x7e, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb5, 0xb5, 0xb6, 0xb5, 0xb5, 0xb5, 0xb5, 0xb6, + 0xc6, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x9c, 0x7f, + 0x7f, 0x7e, 0x7f, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, 0xd6, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xbd, 0xa3, + 0xa3, 0xbe, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xb5, 0xb5, 0xb5, 0xb5, 0xb6, 0xb5, 0xb5, 0xb5, + 0xb5, 0xc6, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x9c, 0x7e, 0x7e, + 0x7e, 0x7e, 0x7e, 0x7f, 0x7e, 0x7e, 0x7f, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xa5, 0x9a, 0x9a, + 0x9a, 0x9b, 0xa6, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xca, 0xb5, 0xb5, 0xb5, 0xb5, 0xb6, 0xb5, 0xb5, + 0xb6, 0xb5, 0xc6, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x9c, 0x7f, 0x7f, 0x7e, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x9b, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x9b, 0x9a, 0x9a, + 0x9b, 0x9a, 0x9b, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc6, 0xb5, 0xb5, 0xb5, 0xb5, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xc6, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x9b, 0x7e, 0x7e, 0x7f, 0x7f, + 0x7e, 0x7f, 0x7e, 0x7e, 0x7f, 0x9b, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x9b, 0x9b, 0x9a, + 0x9b, 0x9a, 0x9a, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc6, 0xb5, 0xb5, 0xb5, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb6, 0xb5, 0xc6, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xae, 0x7e, 0x7e, 0x7e, 0x7e, 0x7e, + 0x7f, 0x7e, 0x7f, 0x7e, 0x9c, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x9a, 0x9a, 0x9a, + 0x9a, 0x9a, 0x9a, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc6, 0xb5, 0xb6, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xb5, 0xb5, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x7e, 0x7f, 0x7e, 0x7f, 0x7e, 0x7f, + 0x7f, 0x7f, 0x7e, 0x9b, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x9a, 0x9b, 0x9a, + 0x9b, 0x9a, 0x9b, 0xb1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc6, 0xb5, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xb5, 0xb5, 0xb5, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0x7e, 0x7f, 0x7e, 0x7e, 0x7f, 0x7f, + 0x7e, 0x7e, 0x9c, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x9a, 0x9b, 0x9b, + 0x9a, 0x9a, 0x9a, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc6, 0xb5, 0xb6, + 0xb6, 0xb5, 0xb5, 0xb5, 0xb6, 0xb5, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0x7f, 0x7e, 0x7f, 0x7f, 0x7e, 0x7f, + 0x7f, 0x9c, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb2, 0x9a, 0x9a, 0x9a, + 0x9a, 0x9a, 0x9a, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc6, 0xb5, + 0xb5, 0xb5, 0xb5, 0xb6, 0xb5, 0xb6, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa4, 0x7e, 0x7f, 0x7f, 0x7e, 0x7f, + 0x9b, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x9a, 0x9a, 0x9a, + 0x9a, 0x9a, 0x9b, 0xb1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc6, + 0xb5, 0xb5, 0xb5, 0xb5, 0xb5, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xa4, 0x7f, 0x7e, 0x7e, 0xae, + 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb2, 0x9a, 0x9a, 0x9a, + 0x9a, 0x9b, 0x9a, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, + 0xd2, 0xb5, 0xb5, 0xb5, 0xca, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xda, 0xf1, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x9a, 0x9b, 0x9a, + 0x9b, 0x9a, 0x9a, 0xb1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf6, 0xea, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x9a, 0x9b, 0x9a, + 0x9b, 0x9a, 0x9a, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb2, 0x9a, 0x9b, 0x9a, + 0x9a, 0x9a, 0x9a, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb2, 0x9b, 0x9a, 0x9b, + 0x9a, 0x9a, 0x9a, 0xb1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x9b, 0x9b, 0x9a, + 0x9a, 0x9a, 0x9a, 0xb8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x9a, 0x9a, 0x9a, + 0x9a, 0x9a, 0x9a, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xa5, 0x9a, 0x9b, + 0x9b, 0x9a, 0xa5, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xbe, 0xa3, + 0xa4, 0xbd, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe5, 0x58, 0x21, + 0x21, 0x58, 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x26, 0x12, 0x11, + 0x12, 0x11, 0x26, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7a, 0x12, 0x11, 0x11, + 0x12, 0x12, 0x12, 0x7a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x49, 0x12, 0x11, 0x12, + 0x11, 0x12, 0x12, 0x49, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3d, 0x12, 0x12, 0x12, + 0x12, 0x12, 0x12, 0x3d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3d, 0x11, 0x12, 0x12, + 0x12, 0x11, 0x12, 0x3d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3d, 0x12, 0x12, 0x12, + 0x12, 0x11, 0x11, 0x3c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xc2, 0xe7, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3d, 0x12, 0x11, 0x11, + 0x12, 0x12, 0x12, 0x3c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xe6, 0xc1, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xed, 0x69, 0x34, 0x34, 0x34, 0x7a, + 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3d, 0x12, 0x12, 0x12, + 0x12, 0x12, 0x12, 0x3d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, + 0x79, 0x33, 0x34, 0x34, 0x6a, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6a, 0x34, 0x34, 0x34, 0x34, 0x34, + 0x5d, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3c, 0x12, 0x12, 0x12, + 0x12, 0x12, 0x12, 0x3d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5c, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x6a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x5c, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3c, 0x12, 0x12, 0x12, + 0x12, 0x12, 0x11, 0x3d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5d, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc2, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x5c, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3d, 0x12, 0x12, 0x12, + 0x12, 0x11, 0x12, 0x3d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5c, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x5d, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3d, 0x11, 0x12, 0x12, + 0x11, 0x12, 0x12, 0x3c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5c, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x79, 0x34, 0x33, 0x33, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x5d, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3d, 0x12, 0x11, 0x11, + 0x12, 0x12, 0x12, 0x3c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5d, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x7a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0x5c, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x33, 0x34, 0x34, 0x34, 0x5c, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0x49, 0x12, 0x12, 0x11, + 0x12, 0x12, 0x11, 0x49, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0x5c, 0x34, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x5c, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5c, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x5d, 0xf4, 0xff, 0xff, 0xff, 0xff, 0x7a, 0x12, 0x12, 0x12, + 0x12, 0x12, 0x11, 0x7a, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5d, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x5c, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5c, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x67, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x27, 0x12, 0x12, + 0x11, 0x11, 0x26, 0xf1, 0xff, 0xff, 0xff, 0xff, 0x68, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x33, + 0x34, 0x34, 0x5d, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0x5d, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x33, 0x34, 0x34, 0xba, 0xff, 0xff, 0xff, 0xff, 0xe5, 0x58, 0x21, + 0x21, 0x57, 0xe5, 0xff, 0xff, 0xff, 0xff, 0xba, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x5c, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0x5d, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x88, 0x34, 0x34, 0x33, 0x33, 0x34, 0x34, 0x34, 0x34, + 0x5c, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, + 0x5c, 0x34, 0x33, 0x34, 0x34, 0x34, 0x34, 0x34, 0x90, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x90, 0x34, 0x33, 0x34, 0x34, 0x34, 0x34, 0x34, 0x5d, + 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf4, 0x5c, 0x34, 0x34, 0x34, 0x34, 0x34, 0x35, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x36, 0x34, 0x34, 0x34, 0x34, 0x34, 0x5d, 0xf5, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf5, 0x68, 0x34, 0x34, 0x34, 0x36, 0x9b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9c, 0x35, 0x34, 0x33, 0x34, 0x67, 0xf4, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xba, 0x87, 0x90, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x90, 0x87, 0xba, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf6, 0xb0, 0x8e, 0x86, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, + 0x8e, 0xaf, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe4, 0xd8, + 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd4, 0xd5, 0xd5, 0xd5, 0xd5, 0xd9, 0xe4, 0xfb, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xef, 0x75, 0x63, 0x64, 0x63, 0x63, 0x63, 0x63, 0x64, 0x64, 0x64, 0x63, 0x63, 0x64, + 0x64, 0x63, 0x75, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xce, 0xc8, 0xc8, + 0xc9, 0xc9, 0xc8, 0xc8, 0xc9, 0xc9, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc8, 0xcf, 0xfa, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x98, 0x63, 0x63, 0x63, 0x63, 0x64, 0x64, 0x63, 0x63, 0x63, 0x63, 0x63, 0x64, 0x63, + 0x64, 0x63, 0x63, 0x98, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xc8, 0xc8, 0xc8, + 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc8, 0xc8, 0xc9, 0xc9, 0xc8, 0xc8, 0xc8, 0xc9, 0xdc, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x71, 0x63, 0x64, 0x64, 0x63, 0x64, 0x63, 0x63, 0x64, 0x64, 0x64, 0x63, 0x64, 0x63, + 0x64, 0x63, 0x64, 0x71, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcd, 0xc8, 0xc8, 0xc8, + 0xc9, 0xc9, 0xc8, 0xc8, 0xc9, 0xc9, 0xc8, 0xc9, 0xc9, 0xc8, 0xc9, 0xc8, 0xc9, 0xcd, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x70, 0x64, 0x64, 0x64, 0x64, 0x64, 0x63, 0x64, 0x63, 0x64, 0x63, 0x64, 0x63, 0x63, + 0x63, 0x63, 0x64, 0x70, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xc8, 0xc8, 0xc9, + 0xc8, 0xc9, 0xc8, 0xc9, 0xc9, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc8, 0xc8, 0xce, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x98, 0x63, 0x63, 0x64, 0x63, 0x63, 0x64, 0x63, 0x64, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x63, 0x98, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xc8, 0xc9, 0xc9, + 0xc9, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xc9, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xc8, 0xdc, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xef, 0x75, 0x63, 0x63, 0x64, 0x63, 0x63, 0x63, 0x64, 0x64, 0x64, 0x63, 0x63, 0x63, + 0x63, 0x64, 0x75, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xce, 0xc8, 0xc8, + 0xc8, 0xc9, 0xc8, 0xc8, 0xc9, 0xc9, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xc8, 0xcf, 0xfa, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf6, 0xb0, 0x8d, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x86, + 0x8e, 0xaf, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe4, 0xd8, + 0xd5, 0xd5, 0xd5, 0xd4, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd9, 0xe3, 0xfb, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xd9, 0xbc, 0xc2, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xdb, 0xd8, 0xe9, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf8, 0xab, 0x89, 0x89, 0x8a, 0x8b, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xb9, 0xb9, 0xb9, 0xb9, 0xcc, 0xfb, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf8, 0xa3, 0x89, 0x8a, 0x8a, 0x89, 0x89, 0x8a, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xb9, 0xb9, 0xb9, 0xb8, 0xb9, 0xb9, 0xc8, 0xfb, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, + 0xa4, 0x89, 0x8a, 0x8a, 0x89, 0x89, 0x8a, 0x8a, 0xc2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb8, 0xc8, + 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xa4, + 0x8a, 0x89, 0x89, 0x8a, 0x8a, 0x89, 0x89, 0x89, 0xbc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xb8, 0xb9, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, + 0xc9, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xa3, 0x8a, + 0x89, 0x89, 0x8a, 0x8a, 0x89, 0x8a, 0x8a, 0x8a, 0xda, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xc6, 0xae, + 0xae, 0xc6, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xe9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb8, 0xb8, 0xb8, + 0xb8, 0xc9, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xa4, 0x89, 0x8a, + 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x89, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb2, 0xa7, 0xa7, + 0xa7, 0xa7, 0xb1, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xcd, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, + 0xb9, 0xb9, 0xc8, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xa4, 0x89, 0x8a, 0x8a, + 0x89, 0x8a, 0x89, 0x89, 0x89, 0x8a, 0xa4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, + 0xb9, 0xb9, 0xb8, 0xc8, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xa3, 0x8a, 0x89, 0x89, 0x8a, + 0x8a, 0x89, 0x8a, 0x89, 0x8a, 0xa3, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc8, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, + 0xb9, 0xb8, 0xb9, 0xb8, 0xc8, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb5, 0x89, 0x89, 0x8a, 0x89, 0x89, + 0x89, 0x89, 0x89, 0x89, 0xa4, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc8, 0xb9, 0xb9, 0xb9, 0xb9, + 0xb8, 0xb9, 0xb8, 0xb9, 0xb9, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, + 0x89, 0x8a, 0x89, 0xa3, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc8, 0xb8, 0xb9, 0xb9, + 0xb8, 0xb9, 0xb9, 0xb9, 0xb8, 0xb9, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0x8a, 0x8a, 0x89, 0x89, 0x8a, 0x89, + 0x8a, 0x89, 0xa3, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc8, 0xb9, 0xb9, + 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0x8a, 0x8a, 0x8a, 0x8a, 0x89, 0x8a, + 0x8a, 0xa3, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbc, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc9, 0xb8, + 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xac, 0x8a, 0x89, 0x8a, 0x8a, 0x89, + 0xa4, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc8, + 0xb9, 0xb9, 0xb9, 0xb8, 0xb9, 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xab, 0x8a, 0x89, 0x89, 0xb5, + 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0xa6, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xbc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, + 0xd3, 0xb9, 0xb9, 0xb9, 0xcd, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xde, 0xf2, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf8, 0xec, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0xa7, 0xa7, 0xa6, + 0xa7, 0xa7, 0xa7, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb2, 0xa7, 0xa7, + 0xa7, 0xa7, 0xb2, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xc6, 0xae, + 0xae, 0xc6, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x5f, 0x2a, + 0x2a, 0x5f, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x30, 0x1b, 0x1b, + 0x1a, 0x1a, 0x30, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x81, 0x1b, 0x1a, 0x1a, + 0x1b, 0x1b, 0x1b, 0x81, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x51, 0x1a, 0x1a, 0x1a, + 0x1a, 0x1b, 0x1b, 0x52, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x46, 0x1b, 0x1a, 0x1a, + 0x1a, 0x1b, 0x1a, 0x46, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x46, 0x1b, 0x1a, 0x1a, + 0x1b, 0x1b, 0x1a, 0x46, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x45, 0x1a, 0x1a, 0x1b, + 0x1a, 0x1a, 0x1b, 0x46, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xc7, 0xe9, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x46, 0x1a, 0x1b, 0x1a, + 0x1b, 0x1a, 0x1a, 0x46, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xde, 0xab, 0xd6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0x77, 0x43, 0x43, 0x44, 0x85, + 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x46, 0x1a, 0x1b, 0x1a, + 0x1b, 0x1b, 0x1a, 0x45, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, + 0x4f, 0x00, 0x00, 0x00, 0x3b, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x77, 0x43, 0x44, 0x43, 0x44, 0x44, + 0x6b, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x45, 0x1a, 0x1a, 0x1a, + 0x1b, 0x1a, 0x1a, 0x45, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x2c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x6b, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x46, 0x1a, 0x1a, 0x1b, + 0x1a, 0x1a, 0x1b, 0x46, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x2c, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0x43, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x6a, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x45, 0x1a, 0x1b, 0x1b, + 0x1b, 0x1a, 0x1a, 0x46, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x2d, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe9, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x43, 0x44, 0x43, 0x6b, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x46, 0x1a, 0x1a, 0x1a, + 0x1b, 0x1a, 0x1a, 0x45, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x2c, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x85, 0x44, 0x44, 0x43, 0x43, 0x43, + 0x44, 0x43, 0x44, 0x44, 0x6b, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x45, 0x1a, 0x1b, 0x1a, + 0x1a, 0x1a, 0x1b, 0x46, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x2c, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x6b, 0x43, 0x44, 0x43, 0x44, + 0x44, 0x43, 0x44, 0x44, 0x44, 0x6a, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0x52, 0x1a, 0x1a, 0x1a, + 0x1b, 0x1b, 0x1b, 0x51, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x2d, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0x6b, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x43, 0x44, 0x44, 0x43, 0x6a, 0xf4, 0xff, 0xff, 0xff, 0xff, 0x82, 0x1b, 0x1a, 0x1a, + 0x1b, 0x1a, 0x1a, 0x82, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x2d, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x6b, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x75, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x30, 0x1b, 0x1a, + 0x1a, 0x1a, 0x2f, 0xf1, 0xff, 0xff, 0xff, 0xff, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x2d, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x6b, 0x44, + 0x43, 0x44, 0x43, 0x44, 0x43, 0x43, 0x44, 0x44, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xe7, 0x60, 0x2a, + 0x2a, 0x5f, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x2c, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0x6a, + 0x44, 0x44, 0x44, 0x44, 0x43, 0x44, 0x44, 0x44, 0x91, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x2d, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, + 0x6b, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x9b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, + 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf4, 0x6b, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xf1, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf5, 0x75, 0x44, 0x44, 0x43, 0x45, 0xa5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x79, 0x04, 0x00, 0x00, 0x00, 0x39, 0xf0, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xc0, 0x91, 0x9b, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x6c, 0x5f, 0xa1, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf7, 0xb9, 0x9c, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x94, 0x95, 0x95, + 0x9d, 0xb9, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe7, 0xdc, + 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdc, 0xe6, 0xfd, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf2, 0x85, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x85, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xd4, 0xcf, 0xcf, + 0xcf, 0xcf, 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xd4, 0xf9, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xa5, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0xa4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xcf, 0xcf, 0xcf, + 0xce, 0xcf, 0xce, 0xcf, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xe0, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x82, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x81, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd2, 0xcf, 0xcf, 0xcf, + 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xce, 0xcf, 0xd3, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x82, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x82, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xce, 0xcf, 0xcf, + 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xd3, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xa5, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0xa5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe1, 0xcf, 0xcf, 0xcf, + 0xcf, 0xce, 0xcf, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xce, 0xcf, 0xce, 0xe0, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf2, 0x85, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x85, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xd4, 0xcf, 0xcf, + 0xce, 0xce, 0xce, 0xce, 0xce, 0xcf, 0xcf, 0xce, 0xce, 0xce, 0xcf, 0xce, 0xd4, 0xfa, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf7, 0xb9, 0x9c, 0x94, 0x94, 0x95, 0x94, 0x95, 0x94, 0x95, 0x95, 0x95, 0x94, + 0x9d, 0xb8, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe7, 0xdc, + 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xdc, 0xe7, 0xfd, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xdd, 0xc3, 0xc7, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xdf, 0xdb, 0xea, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfa, 0xb1, 0x93, 0x93, 0x92, 0x93, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xbf, 0xbf, 0xbf, 0xbf, 0xd1, 0xfb, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0xaa, 0x93, 0x92, 0x92, 0x92, 0x93, 0x93, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbf, 0xbf, 0xbf, 0xbf, 0xbe, 0xbf, 0xcd, 0xfb, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, + 0xaa, 0x92, 0x92, 0x93, 0x92, 0x92, 0x92, 0x93, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xcd, + 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xab, + 0x93, 0x93, 0x92, 0x93, 0x92, 0x92, 0x93, 0x93, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xbf, 0xbf, 0xbf, 0xbe, 0xbf, 0xbf, 0xbf, 0xbe, + 0xce, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xaa, 0x92, + 0x92, 0x93, 0x92, 0x93, 0x93, 0x92, 0x93, 0x93, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xcb, 0xb6, + 0xb7, 0xcb, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xea, 0xbe, 0xbf, 0xbe, 0xbf, 0xbe, 0xbf, 0xbe, 0xbe, + 0xbf, 0xcd, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xab, 0x92, 0x92, + 0x93, 0x93, 0x92, 0x92, 0x92, 0x92, 0x93, 0xb0, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xb9, 0xaf, 0xaf, + 0xaf, 0xaf, 0xb9, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xbf, 0xbf, 0xbe, 0xbe, 0xbe, 0xbf, 0xbf, + 0xbf, 0xbf, 0xcd, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xaa, 0x92, 0x92, 0x92, + 0x92, 0x92, 0x93, 0x92, 0x92, 0x93, 0xaa, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xaf, 0xaf, 0xaf, + 0xaf, 0xaf, 0xb0, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xcd, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, + 0xbf, 0xbf, 0xbe, 0xcd, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xaa, 0x92, 0x92, 0x93, 0x93, + 0x92, 0x92, 0x92, 0x93, 0x93, 0xab, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xaf, 0xaf, 0xaf, + 0xaf, 0xaf, 0xb0, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xcd, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, + 0xbe, 0xbe, 0xbf, 0xbf, 0xce, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x93, 0x93, 0x93, 0x93, 0x92, + 0x93, 0x92, 0x93, 0x93, 0xab, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xaf, 0xaf, 0xaf, + 0xaf, 0xb0, 0xaf, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xce, 0xbf, 0xbe, 0xbf, 0xbf, + 0xbf, 0xbf, 0xbf, 0xbe, 0xbf, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x92, 0x92, 0x92, 0x92, 0x92, 0x93, + 0x92, 0x93, 0x92, 0xab, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xaf, 0xb0, 0xaf, + 0xaf, 0xaf, 0xb0, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xcd, 0xbf, 0xbf, 0xbf, + 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xbe, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x93, 0x92, 0x92, 0x92, 0x93, 0x92, + 0x93, 0x93, 0xaa, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xaf, 0xaf, 0xaf, + 0xaf, 0xaf, 0xb0, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xce, 0xbf, 0xbf, + 0xbe, 0xbf, 0xbe, 0xbe, 0xbf, 0xbe, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x93, 0x92, 0x93, 0x93, 0x92, 0x93, + 0x93, 0xab, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc2, 0xaf, 0xaf, 0xaf, + 0xb0, 0xb0, 0xaf, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xcd, 0xbf, + 0xbe, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb2, 0x93, 0x92, 0x93, 0x92, 0x93, + 0xab, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xaf, 0xb0, 0xaf, + 0xaf, 0xaf, 0xaf, 0xc2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xce, + 0xbf, 0xbf, 0xbf, 0xbe, 0xbf, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xb2, 0x92, 0x93, 0x93, 0xbb, + 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xaf, 0xaf, 0xaf, + 0xaf, 0xaf, 0xaf, 0xc2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, + 0xd7, 0xbf, 0xbf, 0xbf, 0xd2, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe0, 0xf2, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xaf, 0xb0, 0xaf, + 0xb0, 0xaf, 0xaf, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf8, 0xed, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xaf, 0xb0, 0xaf, + 0xb0, 0xaf, 0xaf, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xaf, 0xaf, 0xaf, + 0xaf, 0xaf, 0xaf, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc2, 0xb0, 0xb0, 0xaf, + 0xaf, 0xaf, 0xb0, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xaf, 0xaf, 0xaf, + 0xaf, 0xaf, 0xaf, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xaf, 0xaf, 0xaf, + 0xaf, 0xb0, 0xaf, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb9, 0xaf, 0xaf, + 0xaf, 0xaf, 0xb9, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xcb, 0xb7, + 0xb6, 0xcb, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0x6c, 0x37, + 0x37, 0x6c, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x3c, 0x27, 0x28, + 0x28, 0x27, 0x3c, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8b, 0x28, 0x27, 0x28, + 0x28, 0x28, 0x28, 0x8b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x28, 0x28, 0x28, + 0x28, 0x28, 0x27, 0x5d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x53, 0x28, 0x28, 0x27, + 0x28, 0x27, 0x28, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x53, 0x28, 0x27, 0x28, + 0x28, 0x28, 0x27, 0x52, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x53, 0x27, 0x27, 0x28, + 0x28, 0x27, 0x28, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xcd, 0xeb, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x52, 0x27, 0x28, 0x28, + 0x28, 0x28, 0x28, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xdf, 0xb0, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x84, 0x55, 0x55, 0x55, 0x91, + 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x53, 0x28, 0x28, 0x28, + 0x27, 0x28, 0x28, 0x52, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, + 0x55, 0x0a, 0x0b, 0x0a, 0x43, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x84, 0x54, 0x55, 0x55, 0x54, 0x55, + 0x78, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x52, 0x28, 0x27, 0x28, + 0x28, 0x28, 0x28, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x34, + 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x43, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x54, 0x54, 0x54, 0x54, 0x55, 0x55, + 0x54, 0x78, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x53, 0x28, 0x28, 0x28, + 0x27, 0x28, 0x28, 0x52, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x34, 0x0b, + 0x0a, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0x55, 0x55, 0x55, 0x54, 0x55, 0x55, + 0x55, 0x55, 0x78, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x53, 0x27, 0x27, 0x27, + 0x27, 0x28, 0x27, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x34, 0x0b, 0x0a, + 0x0b, 0x0a, 0x0b, 0x0a, 0x0a, 0x0b, 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0x54, 0x55, 0x55, 0x55, 0x54, 0x54, + 0x55, 0x54, 0x55, 0x79, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x52, 0x27, 0x27, 0x28, + 0x28, 0x28, 0x28, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x34, 0x0b, 0x0a, 0x0b, + 0x0a, 0x0b, 0x0b, 0x0a, 0x0b, 0x0b, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x91, 0x55, 0x55, 0x55, 0x55, 0x55, + 0x55, 0x54, 0x54, 0x55, 0x79, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x53, 0x28, 0x28, 0x27, + 0x28, 0x28, 0x27, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x34, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0a, 0x0b, 0x0b, 0x0b, 0x0b, 0x56, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x78, 0x55, 0x54, 0x54, 0x55, + 0x54, 0x55, 0x55, 0x54, 0x55, 0x78, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5d, 0x28, 0x28, 0x28, + 0x27, 0x28, 0x28, 0x5d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x34, 0x0a, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0a, 0x0a, 0x0a, 0x34, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x78, 0x54, 0x55, 0x55, + 0x54, 0x55, 0x55, 0x55, 0x55, 0x54, 0x78, 0xf5, 0xff, 0xff, 0xff, 0xff, 0x8b, 0x27, 0x28, 0x28, + 0x28, 0x27, 0x28, 0x8b, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x34, 0x0b, 0x0b, 0x0a, 0x0a, 0x0a, 0x0b, + 0x0b, 0x0a, 0x0b, 0x34, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x79, 0x54, 0x54, + 0x54, 0x55, 0x54, 0x54, 0x55, 0x55, 0x54, 0x82, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x3d, 0x28, 0x28, + 0x27, 0x27, 0x3c, 0xf2, 0xff, 0xff, 0xff, 0xff, 0x41, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, + 0x0a, 0x0b, 0x33, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x78, 0x55, + 0x55, 0x54, 0x54, 0x54, 0x54, 0x54, 0x55, 0x55, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x6b, 0x37, + 0x37, 0x6c, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xa5, 0x0a, 0x0b, 0x0a, 0x0b, 0x0a, 0x0b, 0x0a, 0x0b, + 0x0b, 0x34, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x78, + 0x54, 0x54, 0x54, 0x55, 0x54, 0x55, 0x55, 0x55, 0x9d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x65, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x34, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, + 0x78, 0x54, 0x54, 0x54, 0x54, 0x55, 0x54, 0x55, 0xa5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x72, 0x0b, 0x0a, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x34, + 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf5, 0x78, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x0c, 0x0a, 0x0b, 0x0b, 0x0b, 0x0b, 0x34, 0xf1, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf6, 0x82, 0x54, 0x55, 0x55, 0x56, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x0c, 0x0a, 0x0a, 0x0a, 0x41, 0xf1, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xc6, 0x9d, 0xa4, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x71, 0x66, 0xa5, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf8, 0xbe, 0xa3, 0x9b, 0x9c, 0x9c, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9b, + 0xa2, 0xbe, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbd, 0xa0, + 0x99, 0x99, 0x99, 0x99, 0x9a, 0x99, 0x99, 0x99, 0x99, 0x99, 0xa0, 0xbd, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf2, 0x8d, 0x7f, 0x7e, 0x7f, 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7e, 0x7f, 0x7e, 0x7f, + 0x7e, 0x7f, 0x8c, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x8b, 0x7b, 0x7b, + 0x7c, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7c, 0x7b, 0x7b, 0x8b, 0xf2, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xab, 0x7f, 0x7e, 0x7f, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, 0x7e, 0x7e, 0x7f, 0x7e, 0x7f, + 0x7f, 0x7e, 0x7e, 0xab, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa8, 0x7b, 0x7c, 0x7b, + 0x7b, 0x7c, 0x7c, 0x7b, 0x7b, 0x7c, 0x7b, 0x7c, 0x7c, 0x7b, 0x7b, 0x7c, 0x7b, 0xa8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x89, 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f, 0x7e, 0x7f, 0x7f, 0x7f, 0x7f, 0x7e, 0x7e, + 0x7f, 0x7f, 0x7f, 0x89, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x86, 0x7b, 0x7b, 0x7b, + 0x7c, 0x7b, 0x7b, 0x7c, 0x7b, 0x7c, 0x7b, 0x7c, 0x7b, 0x7b, 0x7c, 0x7b, 0x7b, 0x86, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x89, 0x7e, 0x7f, 0x7e, 0x7e, 0x7f, 0x7f, 0x7e, 0x7f, 0x7e, 0x7f, 0x7e, 0x7f, 0x7e, + 0x7f, 0x7e, 0x7f, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x86, 0x7b, 0x7b, 0x7c, + 0x7b, 0x7b, 0x7c, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7b, 0x7b, 0x7b, 0x7b, 0x86, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xab, 0x7e, 0x7f, 0x7f, 0x7e, 0x7e, 0x7e, 0x7e, 0x7f, 0x7e, 0x7f, 0x7f, 0x7e, 0x7f, + 0x7f, 0x7f, 0x7f, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa8, 0x7c, 0x7c, 0x7b, + 0x7b, 0x7b, 0x7c, 0x7b, 0x7c, 0x7c, 0x7b, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7b, 0xa8, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf2, 0x8d, 0x7f, 0x7e, 0x7f, 0x7f, 0x7f, 0x7f, 0x7e, 0x7f, 0x7e, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x8d, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x8b, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7b, 0x7c, 0x7b, 0x7b, 0x7b, 0x8a, 0xf2, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf7, 0xbe, 0xa2, 0x9b, 0x9c, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9c, 0x9c, 0x9b, + 0xa3, 0xbe, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xbd, 0xa0, + 0x99, 0x99, 0x9a, 0x99, 0x99, 0x99, 0x9a, 0x99, 0x9a, 0x99, 0xa0, 0xbc, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xdf, 0xc6, 0xcb, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xe1, 0xdf, 0xed, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfa, 0xb6, 0x9b, 0x9a, 0x9a, 0x9b, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xc5, 0xc5, 0xc5, 0xc4, 0xd4, 0xfc, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0xb1, 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xd3, 0xfc, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, + 0xb1, 0x9a, 0x9a, 0x9b, 0x9a, 0x9b, 0x9a, 0x9a, 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe1, 0xc5, 0xc4, 0xc5, 0xc4, 0xc5, 0xc5, 0xc5, 0xd3, + 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xb0, + 0x9a, 0x9a, 0x9b, 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xc5, 0xc5, 0xc5, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, + 0xd3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb1, 0x9a, + 0x9a, 0x9b, 0x9b, 0x9a, 0x9a, 0x9a, 0x9b, 0x9a, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xcf, 0xbb, + 0xbb, 0xcf, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xec, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, + 0xc5, 0xd2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xb0, 0x9a, 0x9a, + 0x9a, 0x9a, 0x9b, 0x9a, 0x9a, 0x9b, 0x9a, 0xb6, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xbd, 0xb5, 0xb5, + 0xb5, 0xb5, 0xbe, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xc4, 0xc5, 0xc4, 0xc5, 0xc5, 0xc5, 0xc4, + 0xc5, 0xc5, 0xd2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb0, 0x9b, 0x9b, 0x9b, + 0x9a, 0x9b, 0x9a, 0x9a, 0x9a, 0x9b, 0xb1, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xb6, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xda, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd3, 0xc5, 0xc5, 0xc5, 0xc4, 0xc5, 0xc5, + 0xc5, 0xc5, 0xc5, 0xd2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb0, 0x9b, 0x9a, 0x9a, 0x9b, + 0x9a, 0x9b, 0x9b, 0x9a, 0x9b, 0xb1, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xb6, 0xb5, 0xb5, + 0xb6, 0xb6, 0xb5, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd2, 0xc4, 0xc5, 0xc5, 0xc5, 0xc4, + 0xc5, 0xc4, 0xc5, 0xc5, 0xd2, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x9a, 0x9a, 0x9b, 0x9b, 0x9a, + 0x9a, 0x9a, 0x9a, 0x9a, 0xb0, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xb5, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd3, 0xc4, 0xc5, 0xc5, 0xc5, + 0xc5, 0xc5, 0xc5, 0xc4, 0xc5, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x9b, 0x9b, 0x9a, 0x9b, 0x9a, 0x9a, + 0x9a, 0x9a, 0x9a, 0xb0, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xb5, 0xb6, 0xb5, + 0xb6, 0xb5, 0xb5, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xd2, 0xc4, 0xc5, 0xc4, + 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9a, + 0x9a, 0x9a, 0xb1, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xb5, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd2, 0xc5, 0xc5, + 0xc5, 0xc4, 0xc5, 0xc5, 0xc5, 0xc4, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x9a, 0x9a, 0x9b, 0x9a, 0x9b, 0x9a, + 0x9a, 0xb1, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xb5, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd2, 0xc5, + 0xc5, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x9b, 0x9a, 0x9a, 0x9a, 0x9a, + 0xb0, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xb5, 0xb5, 0xb6, + 0xb5, 0xb5, 0xb5, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd3, + 0xc5, 0xc4, 0xc5, 0xc5, 0xc5, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xb8, 0x9b, 0x9a, 0x9a, 0xc0, + 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xb5, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, + 0xdc, 0xc5, 0xc5, 0xc4, 0xd6, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe3, 0xf3, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xb5, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf9, 0xee, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xb5, 0xb5, 0xb6, + 0xb6, 0xb5, 0xb5, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xb5, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0xb6, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xb6, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb6, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0xb5, 0xb5, 0xb5, + 0xb5, 0xb5, 0xb5, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xbd, 0xb5, 0xb5, + 0xb5, 0xb5, 0xbd, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xcf, 0xbb, + 0xbb, 0xcf, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0x75, 0x43, + 0x44, 0x75, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x48, 0x34, 0x34, + 0x34, 0x34, 0x48, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x93, 0x34, 0x33, 0x34, + 0x34, 0x34, 0x34, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x69, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x68, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5d, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x5e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x5d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5d, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x5e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe9, 0xd3, 0xed, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5d, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x5d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xe0, 0xb3, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x8f, 0x63, 0x64, 0x64, 0x9c, + 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5d, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x5e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, + 0x5e, 0x12, 0x11, 0x11, 0x4b, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8f, 0x63, 0x63, 0x63, 0x64, 0x63, + 0x85, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5d, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x5e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x3b, + 0x11, 0x12, 0x12, 0x12, 0x12, 0x4b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x63, 0x63, 0x64, 0x64, 0x63, 0x63, + 0x63, 0x85, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5d, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x5d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x3b, 0x12, + 0x12, 0x12, 0x12, 0x12, 0x12, 0x11, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x63, 0x63, 0x64, 0x64, 0x64, 0x64, + 0x64, 0x64, 0x85, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5d, 0x34, 0x34, 0x34, + 0x34, 0x33, 0x34, 0x5e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x3c, 0x11, 0x12, + 0x12, 0x11, 0x12, 0x12, 0x12, 0x12, 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xed, 0x63, 0x63, 0x64, 0x63, 0x63, 0x63, + 0x64, 0x64, 0x64, 0x85, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x5d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x3c, 0x12, 0x12, 0x11, + 0x11, 0x12, 0x12, 0x11, 0x12, 0x12, 0xe1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9b, 0x63, 0x64, 0x63, 0x64, 0x64, + 0x63, 0x64, 0x64, 0x63, 0x86, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5d, 0x34, 0x34, 0x33, + 0x34, 0x34, 0x34, 0x5d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x3c, 0x12, 0x11, 0x12, 0x11, + 0x12, 0x11, 0x11, 0x12, 0x12, 0x5e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x85, 0x64, 0x63, 0x63, 0x64, + 0x64, 0x63, 0x64, 0x64, 0x64, 0x85, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0x68, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x68, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x3c, 0x12, 0x12, 0x12, 0x11, 0x12, + 0x12, 0x11, 0x12, 0x11, 0x3c, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x85, 0x63, 0x64, 0x63, + 0x63, 0x63, 0x63, 0x64, 0x63, 0x63, 0x85, 0xf7, 0xff, 0xff, 0xff, 0xff, 0x92, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x92, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x3c, 0x12, 0x12, 0x12, 0x12, 0x11, 0x11, + 0x12, 0x11, 0x12, 0x3c, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x85, 0x64, 0x63, + 0x64, 0x64, 0x64, 0x64, 0x63, 0x63, 0x63, 0x8e, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x48, 0x34, 0x34, + 0x34, 0x34, 0x48, 0xf4, 0xff, 0xff, 0xff, 0xff, 0x48, 0x12, 0x12, 0x12, 0x12, 0x12, 0x11, 0x12, + 0x11, 0x12, 0x3b, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x86, 0x64, + 0x63, 0x63, 0x63, 0x64, 0x63, 0x63, 0x63, 0x64, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xea, 0x75, 0x43, + 0x44, 0x75, 0xea, 0xff, 0xff, 0xff, 0xff, 0xa9, 0x12, 0x12, 0x11, 0x12, 0x11, 0x12, 0x12, 0x12, + 0x12, 0x3b, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x85, + 0x63, 0x64, 0x63, 0x64, 0x63, 0x64, 0x63, 0x64, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6c, 0x12, 0x12, 0x12, 0x11, 0x12, 0x12, 0x12, 0x12, + 0x3c, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, + 0x85, 0x64, 0x63, 0x64, 0x64, 0x64, 0x64, 0x63, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x78, 0x12, 0x11, 0x11, 0x11, 0x12, 0x12, 0x11, 0x3c, + 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf7, 0x85, 0x63, 0x64, 0x63, 0x63, 0x63, 0x64, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x13, 0x12, 0x12, 0x12, 0x12, 0x12, 0x3b, 0xf2, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf7, 0x8e, 0x64, 0x63, 0x64, 0x65, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x84, 0x13, 0x12, 0x12, 0x12, 0x48, 0xf2, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xcc, 0xa6, 0xad, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x78, 0x6c, 0xaa, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf9, 0xc4, 0xab, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa3, 0xa3, 0xa4, + 0xaa, 0xc3, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x93, 0x68, + 0x5d, 0x5e, 0x5d, 0x5e, 0x5d, 0x5d, 0x5d, 0x5e, 0x5e, 0x5d, 0x68, 0x93, 0xf3, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf3, 0x96, 0x8a, 0x89, 0x89, 0x8a, 0x89, 0x8a, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, + 0x89, 0x8a, 0x97, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0x48, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x33, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x49, 0xea, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xb3, 0x8a, 0x8a, 0x89, 0x89, 0x8a, 0x89, 0x8a, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, 0x8a, + 0x89, 0x8a, 0x89, 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x75, 0x34, 0x34, 0x34, + 0x33, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x75, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x94, 0x8a, 0x8a, 0x8a, 0x8a, 0x8a, 0x89, 0x8a, 0x8a, 0x89, 0x89, 0x8a, 0x8a, 0x89, + 0x8a, 0x89, 0x89, 0x93, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x44, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x44, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x94, 0x8a, 0x8a, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x89, 0x89, 0x89, 0x8a, 0x89, + 0x8a, 0x89, 0x8a, 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x43, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x44, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xb2, 0x89, 0x8a, 0x89, 0x8a, 0x89, 0x8a, 0x8a, 0x89, 0x89, 0x8a, 0x89, 0x89, 0x8a, + 0x89, 0x8a, 0x8a, 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x75, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x75, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf3, 0x97, 0x8a, 0x89, 0x8a, 0x89, 0x8a, 0x89, 0x89, 0x89, 0x8a, 0x8a, 0x8a, 0x8a, + 0x89, 0x8a, 0x97, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0x48, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x33, 0x34, 0x34, 0x34, 0x34, 0x48, 0xea, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf8, 0xc4, 0xaa, 0xa3, 0xa4, 0xa4, 0xa3, 0xa4, 0xa3, 0xa4, 0xa4, 0xa4, 0xa3, + 0xaa, 0xc4, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x92, 0x68, + 0x5e, 0x5d, 0x5d, 0x5d, 0x5d, 0x5d, 0x5d, 0x5d, 0x5e, 0x5d, 0x68, 0x92, 0xf4, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xe3, 0xcd, 0xd2, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe3, 0xe0, 0xee, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf9, 0xc0, 0xa7, 0xa7, 0xa7, 0xa7, 0xd6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc9, 0xc9, 0xc9, 0xc8, 0xd7, 0xfc, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0xbb, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc8, 0xd5, 0xfc, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, + 0xbb, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xd2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xc9, 0xc8, 0xc9, 0xc9, 0xc8, 0xc9, 0xc8, 0xd5, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xbb, + 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa6, 0xa7, 0xa7, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xc8, 0xc9, 0xc8, 0xc8, 0xc8, 0xc9, 0xc8, 0xc9, + 0xd5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xbb, 0xa7, + 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xd1, 0xbf, + 0xbf, 0xd1, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xee, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xc9, 0xc8, 0xc8, + 0xc9, 0xd5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xbc, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc1, 0xb9, 0xb9, + 0xb9, 0xb9, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xc8, 0xc9, 0xc9, 0xc8, 0xc9, 0xc8, 0xc8, + 0xc9, 0xc9, 0xd5, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xbb, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xbb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xb9, 0xb9, 0xb8, + 0xb9, 0xb8, 0xb9, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd5, 0xc8, 0xc8, 0xc9, 0xc9, 0xc9, 0xc9, + 0xc9, 0xc9, 0xc8, 0xd5, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xbb, 0xa7, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xbb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xb9, 0xb9, 0xb9, + 0xb9, 0xb9, 0xb9, 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd4, 0xc9, 0xc9, 0xc8, 0xc8, 0xc9, + 0xc8, 0xc8, 0xc9, 0xc9, 0xd5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa6, 0xa7, 0xa7, 0xbc, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xb9, 0xb8, 0xb8, + 0xb9, 0xb9, 0xb8, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xd4, 0xc8, 0xc9, 0xc9, 0xc8, + 0xc8, 0xc9, 0xc9, 0xc9, 0xc8, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xa7, 0xa7, 0xa6, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xa7, 0xbb, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xb8, 0xb9, 0xb9, + 0xb9, 0xb9, 0xb9, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xd5, 0xc8, 0xc9, 0xc8, + 0xc8, 0xc8, 0xc9, 0xc8, 0xc9, 0xc8, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, + 0xa7, 0xa7, 0xbb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xb8, 0xb8, 0xb9, + 0xb8, 0xb9, 0xb8, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd5, 0xc8, 0xc9, + 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xc9, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, + 0xa7, 0xbb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xb9, 0xb9, 0xb8, + 0xb8, 0xb8, 0xb9, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd5, 0xc9, + 0xc9, 0xc8, 0xc8, 0xc9, 0xc8, 0xc9, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, + 0xbb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xb8, 0xb9, 0xb9, + 0xb8, 0xb9, 0xb9, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd5, + 0xc9, 0xc8, 0xc9, 0xc8, 0xc9, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xc1, 0xa7, 0xa7, 0xa7, 0xc8, + 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xb8, 0xb9, 0xb8, + 0xb9, 0xb9, 0xb9, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, + 0xdd, 0xc9, 0xc9, 0xc9, 0xd8, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe6, 0xf5, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xb8, 0xb9, 0xb8, + 0xb9, 0xb9, 0xb8, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf9, 0xef, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xb8, 0xb9, 0xb9, + 0xb9, 0xb9, 0xb9, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xb9, 0xb9, 0xb8, + 0xb9, 0xb9, 0xb9, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0xb9, 0xb8, 0xb9, + 0xb8, 0xb9, 0xb9, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xb9, 0xb8, 0xb9, + 0xb9, 0xb9, 0xb9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xb9, 0xb9, 0xb8, + 0xb9, 0xb9, 0xb8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc1, 0xb9, 0xb9, + 0xb9, 0xb9, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xd1, 0xbf, + 0xbe, 0xd1, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; +#endif /* !PEXPERT_NO_3X_IMAGES */ + +const unsigned char gGearPict2x[4 * kGearFrames * kGearWidth * kGearHeight] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7c, 0x11, + 0x12, 0x7d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x00, 0x00, + 0x00, 0x00, 0xb4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x70, 0x00, 0x00, + 0x00, 0x00, 0x70, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x69, 0x00, 0x00, + 0x00, 0x00, 0x69, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x68, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf5, 0x71, 0x38, 0x59, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x68, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xde, 0xd6, 0xe4, 0xfc, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x72, 0x1a, 0x1a, 0x1a, 0x2b, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x68, 0x00, 0x00, + 0x00, 0x00, 0x69, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xd4, 0xcf, 0xcf, 0xce, 0xe4, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x38, 0x1a, 0x1a, 0x1a, 0x1b, 0x2b, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0x69, 0x00, 0x00, + 0x00, 0x00, 0x69, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xd3, 0xce, 0xcf, 0xcf, 0xcf, 0xd6, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x59, 0x1a, 0x1b, 0x1b, 0x1a, 0x1a, 0x2b, 0xd7, 0xff, 0xff, 0xff, 0xff, 0x69, 0x00, 0x00, + 0x00, 0x00, 0x69, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xd3, 0xcf, 0xcf, 0xce, 0xcf, 0xcf, 0xde, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xde, 0x2a, 0x1b, 0x1a, 0x1b, 0x1b, 0x1a, 0x2b, 0xd8, 0xff, 0xff, 0xff, 0x6f, 0x00, 0x00, + 0x00, 0x00, 0x6f, 0xff, 0xff, 0xff, 0xf7, 0xd4, 0xcf, 0xce, 0xce, 0xcf, 0xcf, 0xd3, 0xf9, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xd7, 0x2b, 0x1a, 0x1a, 0x1a, 0x1b, 0x1a, 0x2a, 0xd8, 0xff, 0xff, 0xb4, 0x00, 0x00, + 0x00, 0x00, 0xb4, 0xff, 0xff, 0xf7, 0xd4, 0xcf, 0xce, 0xcf, 0xce, 0xcf, 0xd4, 0xf7, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xd8, 0x2a, 0x1b, 0x1a, 0x1b, 0x1b, 0x1a, 0x42, 0xff, 0xff, 0xff, 0x7d, 0x11, + 0x12, 0x7d, 0xff, 0xff, 0xff, 0xd8, 0xcf, 0xcf, 0xcf, 0xce, 0xcf, 0xd3, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd7, 0x2b, 0x1a, 0x1a, 0x1a, 0x1b, 0x1a, 0xf7, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xcf, 0xcf, 0xce, 0xce, 0xce, 0xd3, 0xf7, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x2b, 0x1b, 0x1a, 0x1a, 0x35, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xcf, 0xcf, 0xcf, 0xd4, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x42, 0x1a, 0x35, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xd5, 0xcf, 0xd9, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x9e, 0x98, + 0x98, 0x98, 0x98, 0x98, 0x98, 0x9d, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xdf, 0xde, 0xde, 0xdd, 0xdd, 0xde, + 0xde, 0xdf, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7, 0x44, 0x44, 0x43, + 0x44, 0x44, 0x44, 0x43, 0x44, 0x44, 0x44, 0xa7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xbf, 0xbf, 0xbf, 0xbe, 0xbf, 0xbe, 0xbf, + 0xbf, 0xbf, 0xbf, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x53, 0x44, 0x44, 0x44, + 0x43, 0x44, 0x44, 0x43, 0x43, 0x44, 0x44, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc5, 0xbf, 0xbf, 0xbf, 0xbe, 0xbf, 0xbf, 0xbf, + 0xbf, 0xbf, 0xbf, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x53, 0x44, 0x44, 0x43, + 0x44, 0x44, 0x44, 0x43, 0x44, 0x43, 0x44, 0x52, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0xbf, 0xbf, 0xbe, 0xbf, 0xbf, 0xbf, 0xbe, + 0xbf, 0xbf, 0xbf, 0xc4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7, 0x43, 0x44, 0x44, + 0x44, 0x43, 0x44, 0x43, 0x43, 0x44, 0x44, 0xa7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbe, 0xbf, 0xbe, 0xbe, 0xbf, 0xbf, 0xbf, + 0xbe, 0xbf, 0xbf, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x9e, 0x98, + 0x98, 0x98, 0x98, 0x98, 0x98, 0x9d, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xdf, 0xdd, 0xdd, 0xdd, 0xde, 0xde, + 0xde, 0xdf, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0x92, 0x76, 0x89, 0xe1, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xbb, 0xb0, 0xc0, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xe9, 0x83, 0x76, 0x76, 0x76, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xbc, 0xb0, 0xaf, 0xaf, 0xb7, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xe9, 0x83, 0x76, 0x76, 0x76, 0x76, 0x76, 0xfa, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xaf, 0xaf, 0xaf, 0xb0, 0xaf, 0xb6, 0xf2, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xe9, 0x83, 0x76, 0x76, 0x76, 0x76, 0x76, 0x93, 0xff, 0xff, 0xff, 0xcd, 0x9c, + 0x9c, 0xcd, 0xff, 0xff, 0xff, 0xc0, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, 0xb7, 0xf2, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xe9, 0x83, 0x76, 0x76, 0x76, 0x76, 0x76, 0x83, 0xeb, 0xff, 0xff, 0xe4, 0x92, 0x92, + 0x92, 0x93, 0xe4, 0xff, 0xff, 0xf4, 0xb6, 0xaf, 0xaf, 0xaf, 0xaf, 0xb0, 0xb6, 0xf2, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xed, 0x83, 0x76, 0x76, 0x76, 0x76, 0x76, 0x83, 0xe9, 0xff, 0xff, 0xff, 0xc8, 0x92, 0x93, + 0x92, 0x93, 0xc9, 0xff, 0xff, 0xff, 0xf2, 0xb6, 0xb0, 0xaf, 0xaf, 0xaf, 0xaf, 0xb6, 0xf5, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xa0, 0x76, 0x76, 0x76, 0x76, 0x76, 0x83, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x93, 0x92, + 0x92, 0x92, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xb7, 0xaf, 0xaf, 0xb0, 0xaf, 0xaf, 0xc9, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x8b, 0x76, 0x76, 0x76, 0x76, 0x83, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x92, 0x93, + 0x92, 0x92, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xb6, 0xaf, 0xb0, 0xb0, 0xaf, 0xbd, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xb0, 0x76, 0x76, 0x76, 0x83, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x92, 0x93, + 0x92, 0x93, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xb6, 0xaf, 0xaf, 0xaf, 0xd2, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfa, 0xb0, 0x8c, 0xa0, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x93, 0x92, + 0x93, 0x92, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xc8, 0xbc, 0xd3, 0xfb, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x93, 0x92, + 0x92, 0x92, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x92, 0x92, + 0x93, 0x92, 0xc6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x92, 0x92, + 0x93, 0x93, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x92, 0x92, + 0x92, 0x92, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0x9b, + 0x9b, 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfb, 0xee, 0xe2, 0xdf, 0xe1, 0xe9, 0xf3, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, 0xfa, 0xfa, - 0xfa, 0xfc, 0xfd, 0xfb, 0xf7, 0xf7, 0xfc, 0xff, 0xfa, 0xea, 0xd8, 0xd4, 0xdd, 0xee, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfe, 0xfa, 0xf5, 0xf1, 0xf3, 0xf9, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfd, 0xfd, - 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xed, 0xdf, 0xd7, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, - 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd8, 0xdc, 0xe7, 0xf5, 0xfd, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf7, 0xf3, 0xf1, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xef, 0xef, - 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf3, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe3, 0xbc, 0xa6, 0x9d, 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, - 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0x99, 0x98, 0x9a, 0xa2, 0xb3, 0xd5, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfc, 0xee, 0xe3, 0xde, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xd8, 0xd8, - 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xdd, 0xe6, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xe2, 0xb2, 0x91, 0x8e, 0x90, 0x8c, 0x8c, 0x8d, 0x8d, 0x8d, - 0x8d, 0x8d, 0x8d, 0x8d, 0x8c, 0x8c, 0x8b, 0x8d, 0x8f, 0x8b, 0x9f, 0xd0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfb, 0xeb, 0xda, 0xd4, 0xd5, 0xd4, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd3, 0xd3, - 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd5, 0xe1, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf6, 0xbb, 0x8e, 0x87, 0x8d, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, - 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x8f, 0x90, 0x8f, 0x86, 0x86, 0xa5, 0xdf, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf2, 0xdc, 0xd2, 0xd2, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd5, 0xd5, - 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd3, 0xd1, 0xd4, 0xe6, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf0, 0xa4, 0x86, 0x94, 0x91, 0x8b, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, - 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8c, 0x8c, 0x94, 0x8d, 0x90, 0xce, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfd, 0xff, 0xff, 0xec, 0xd4, 0xd4, 0xd6, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, - 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd4, 0xd5, 0xd0, 0xdd, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf1, 0xa7, 0x88, 0x95, 0x91, 0x8c, 0x8f, 0x8e, 0x8e, 0x8e, 0x8e, - 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8d, 0x8d, 0x95, 0x8f, 0x93, 0xcf, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfd, 0xff, 0xff, 0xec, 0xd4, 0xd2, 0xd4, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, - 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd0, 0xd2, 0xd3, 0xcf, 0xdc, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf6, 0xbd, 0x90, 0x89, 0x8e, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, - 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x90, 0x91, 0x90, 0x88, 0x89, 0xa9, 0xe1, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf1, 0xd9, 0xce, 0xce, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, - 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd0, 0xce, 0xd1, 0xe4, 0xfb, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe0, 0xb1, 0x91, 0x8f, 0x91, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, - 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x8f, 0x91, 0x8e, 0xa1, 0xd1, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfa, 0xea, 0xd7, 0xd0, 0xd1, 0xd0, 0xcf, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, - 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, 0xd0, 0xd2, 0xdf, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xe3, 0xbd, 0xa7, 0x9e, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, - 0x9b, 0x9b, 0x9d, 0x9d, 0x9d, 0x9d, 0x9c, 0x9c, 0xa3, 0xb4, 0xd6, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xdf, 0xd8, 0xd4, 0xd3, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, - 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xda, 0xe3, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xdd, 0xd5, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, - 0xd4, 0xd4, 0xd5, 0xd5, 0xd5, 0xd5, 0xd6, 0xd7, 0xda, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xf4, 0xef, 0xed, 0xec, 0xec, 0xed, 0xee, 0xee, 0xee, 0xee, - 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xf1, 0xf7, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, 0xfa, 0xfa, - 0xfa, 0xfb, 0xfd, 0xfc, 0xf8, 0xf7, 0xfc, 0xff, 0xfb, 0xed, 0xe0, 0xdd, 0xe5, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfe, 0xf9, 0xf1, 0xee, 0xf0, 0xf7, 0xfd, 0xff, 0xfe, 0xfc, 0xfc, 0xfd, 0xfe, 0xfe, 0xfd, - 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf4, 0xe0, 0xc4, 0xaf, 0xa8, 0xb0, 0xcd, 0xf1, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xf8, 0xe5, 0xd6, 0xd3, 0xd8, 0xe3, 0xf0, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfe, 0xe0, 0xb9, 0xa5, 0x9d, 0x9a, 0x98, 0x98, 0xa4, 0xc4, 0xed, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xf4, 0xde, 0xce, 0xc8, 0xca, 0xcc, 0xce, 0xd1, 0xdb, 0xf0, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfa, - 0xfb, 0xff, 0xff, 0xf6, 0xe3, 0xca, 0xae, 0x97, 0x8f, 0x92, 0x98, 0x9a, 0x99, 0x92, 0x9f, 0xd7, - 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xe9, 0xca, 0xc4, 0xc8, 0xc9, 0xc9, 0xc7, 0xc6, 0xcb, 0xd6, 0xe5, 0xf1, 0xfb, 0xff, 0xff, 0xfd, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xfc, 0xf1, 0xd7, 0xb1, 0x96, 0x92, 0x9a, 0x9a, 0x99, 0x97, 0x97, 0xa1, 0x97, 0x91, 0xcb, - 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, - 0xe2, 0xc1, 0xc6, 0xcc, 0xc7, 0xc7, 0xc9, 0xcb, 0xcc, 0xc8, 0xcb, 0xd9, 0xec, 0xf8, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xf7, 0xd8, 0xb2, 0x9f, 0x9a, 0x95, 0x94, 0x99, 0x9b, 0x9b, 0x98, 0x98, 0xa2, 0x99, 0x95, 0xce, - 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, - 0xe3, 0xc2, 0xc6, 0xcb, 0xc6, 0xc7, 0xc9, 0xc9, 0xca, 0xc9, 0xc9, 0xcc, 0xcf, 0xda, 0xec, 0xfb, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf9, 0xe1, - 0xc7, 0xac, 0x94, 0x8e, 0x98, 0x9d, 0x9b, 0x98, 0x97, 0x98, 0x9c, 0xa0, 0x9b, 0x96, 0xaa, 0xdf, - 0xff, 0xff, 0xf9, 0xfb, 0xfb, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, - 0xed, 0xce, 0xc2, 0xc5, 0xca, 0xca, 0xc7, 0xc6, 0xc7, 0xca, 0xcc, 0xca, 0xc6, 0xc9, 0xd5, 0xe3, - 0xef, 0xfc, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf0, 0xd2, 0xab, - 0x95, 0x95, 0x9a, 0x9b, 0x99, 0x98, 0x99, 0x9b, 0x9e, 0x9d, 0x9c, 0x9a, 0x98, 0xa9, 0xd1, 0xf4, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xf9, 0xe5, 0xcc, 0xc2, 0xc4, 0xc7, 0xc8, 0xc9, 0xc8, 0xc7, 0xc7, 0xc9, 0xca, 0xca, 0xc9, 0xc9, - 0xd3, 0xe8, 0xf8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xdd, 0xb6, 0x9e, 0x97, - 0x97, 0x98, 0x9a, 0x9b, 0x9a, 0x9a, 0x9a, 0x9b, 0x9e, 0x9f, 0x99, 0x97, 0xad, 0xd7, 0xf7, 0xff, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfa, 0xe7, 0xcf, 0xc2, 0xc4, 0xc8, 0xc9, 0xc8, 0xc7, 0xc7, 0xc8, 0xc9, 0xc9, 0xc8, 0xc8, - 0xc8, 0xce, 0xda, 0xed, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe9, 0xb5, 0x93, 0x92, 0x99, - 0x9d, 0x9b, 0x97, 0x97, 0x9a, 0x9f, 0xa1, 0x9b, 0x93, 0x99, 0xac, 0xc4, 0xe0, 0xf6, 0xff, 0xff, - 0xfe, 0xf7, 0xe5, 0xd6, 0xd5, 0xde, 0xed, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, - 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xe8, 0xe2, 0xe3, 0xee, 0xfb, 0xff, - 0xff, 0xff, 0xfa, 0xee, 0xdd, 0xce, 0xc4, 0xc1, 0xc6, 0xca, 0xca, 0xc7, 0xc6, 0xc7, 0xca, 0xcb, - 0xc9, 0xc7, 0xc7, 0xd8, 0xf3, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xcc, 0x9d, 0x93, 0x9d, 0x9a, - 0x96, 0x9a, 0x9b, 0x9d, 0x9c, 0x9a, 0x9a, 0x9d, 0xa2, 0xb4, 0xd8, 0xfa, 0xff, 0xfd, 0xfc, 0xff, - 0xf8, 0xd8, 0xb5, 0xa3, 0xa1, 0xaa, 0xc9, 0xf2, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf4, 0xd7, 0xc2, 0xbd, 0xbf, 0xcd, 0xe6, 0xfb, - 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xe7, 0xd2, 0xc8, 0xc5, 0xc5, 0xc7, 0xc8, 0xca, 0xc9, 0xc8, 0xc6, - 0xc8, 0xcb, 0xc6, 0xcc, 0xe4, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe9, 0xba, 0x9b, 0x97, 0x9b, 0x99, - 0x98, 0x9b, 0x9d, 0x9e, 0x9d, 0x99, 0x9b, 0xb2, 0xd7, 0xf1, 0xfb, 0xfe, 0xff, 0xff, 0xfe, 0xf9, - 0xda, 0xaf, 0x9d, 0xa3, 0xa2, 0x9f, 0xae, 0xd1, 0xf3, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf5, 0xdb, 0xc2, 0xb9, 0xbc, 0xbe, 0xbc, 0xca, 0xe7, - 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xe7, 0xd2, 0xc4, 0xc4, 0xc8, 0xca, 0xc9, 0xc7, 0xc6, - 0xc7, 0xc8, 0xc6, 0xc9, 0xdb, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xba, 0x9e, 0x9a, 0x9e, 0x9e, - 0x9f, 0xa1, 0x9b, 0x94, 0x9b, 0xb1, 0xc9, 0xe2, 0xfb, 0xff, 0xfe, 0xfc, 0xfb, 0xff, 0xff, 0xe4, - 0xb5, 0x9f, 0xa1, 0xaa, 0xab, 0xa6, 0xa5, 0xb9, 0xe5, 0xff, 0xff, 0xfd, 0xff, 0xf9, 0xeb, 0xe0, - 0xe1, 0xed, 0xfa, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc8, 0xba, 0xbc, 0xc0, 0xc2, 0xbe, 0xbd, 0xcd, - 0xed, 0xff, 0xff, 0xfc, 0xfd, 0xfe, 0xff, 0xfd, 0xee, 0xdf, 0xd0, 0xc5, 0xc3, 0xc7, 0xca, 0xca, - 0xca, 0xc9, 0xc8, 0xcb, 0xda, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xce, 0xa1, 0x93, 0x9b, 0x9b, - 0x97, 0x9a, 0x9f, 0xa5, 0xba, 0xdd, 0xf8, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xff, 0xfc, 0xca, - 0x9f, 0xa2, 0xa9, 0xa2, 0xa3, 0xa8, 0xa6, 0xb2, 0xe0, 0xff, 0xff, 0xfe, 0xfa, 0xde, 0xc2, 0xb6, - 0xb7, 0xc6, 0xe2, 0xfa, 0xfe, 0xff, 0xff, 0xe6, 0xc1, 0xba, 0xbd, 0xba, 0xbc, 0xc2, 0xbe, 0xbc, - 0xdb, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xfb, 0xec, 0xd7, 0xcc, 0xc8, 0xc6, 0xc5, - 0xc8, 0xc8, 0xc4, 0xcc, 0xe4, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xed, 0xba, 0x98, 0x96, 0x98, - 0x94, 0x9c, 0xb7, 0xdd, 0xf5, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfb, 0xdc, 0xb4, - 0xa1, 0xa4, 0xa6, 0xa3, 0xa4, 0xa7, 0xa9, 0xb7, 0xe3, 0xff, 0xff, 0xf8, 0xe1, 0xbb, 0xab, 0xb2, - 0xb2, 0xae, 0xc1, 0xe4, 0xf9, 0xff, 0xff, 0xe8, 0xc5, 0xbb, 0xbc, 0xbc, 0xbc, 0xc0, 0xbf, 0xbe, - 0xcb, 0xe7, 0xfc, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xf9, 0xec, 0xd6, 0xc5, 0xc2, - 0xc6, 0xc6, 0xc6, 0xd9, 0xf6, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe6, 0xcd, 0xbe, 0xb8, - 0xbe, 0xcf, 0xe6, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf2, 0xbc, 0xa2, - 0xa6, 0xa4, 0xa2, 0xa7, 0xa8, 0xa4, 0xab, 0xc9, 0xed, 0xff, 0xff, 0xed, 0xc8, 0xad, 0xab, 0xb5, - 0xb5, 0xad, 0xb4, 0xcf, 0xf0, 0xff, 0xff, 0xf0, 0xd4, 0xbd, 0xb8, 0xbc, 0xbd, 0xbb, 0xbe, 0xc1, - 0xbe, 0xd1, 0xf6, 0xff, 0xfc, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, 0xff, 0xff, 0xfb, 0xf0, 0xe3, 0xd9, - 0xd6, 0xda, 0xe3, 0xf1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf3, 0xe9, - 0xf2, 0xff, 0xff, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xdb, 0xab, 0x9c, - 0xa6, 0xa7, 0xa2, 0xa8, 0xaa, 0xa2, 0xb4, 0xe3, 0xfb, 0xff, 0xff, 0xe2, 0xb8, 0xae, 0xb1, 0xb0, - 0xb0, 0xb3, 0xb6, 0xc1, 0xe6, 0xff, 0xff, 0xfb, 0xe8, 0xc3, 0xb5, 0xbd, 0xbe, 0xbb, 0xbe, 0xc0, - 0xba, 0xc5, 0xe6, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, 0xf7, - 0xf1, 0xf7, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe3, 0xb6, 0xa4, 0xa3, - 0xa4, 0xa5, 0xa4, 0xa7, 0xa9, 0xaa, 0xc7, 0xf6, 0xff, 0xff, 0xff, 0xe1, 0xb4, 0xad, 0xb2, 0xb0, - 0xb0, 0xb5, 0xb4, 0xba, 0xe2, 0xff, 0xff, 0xff, 0xf7, 0xd0, 0xba, 0xbc, 0xbd, 0xbb, 0xbe, 0xbf, - 0xbe, 0xbf, 0xce, 0xed, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, - 0xfd, 0xfb, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xfd, 0xce, 0xa1, 0xa2, 0xa9, - 0xa4, 0xa4, 0xa9, 0xa4, 0xa2, 0xbc, 0xe6, 0xfe, 0xfd, 0xff, 0xff, 0xe0, 0xb3, 0xab, 0xb1, 0xb0, - 0xb0, 0xb4, 0xb2, 0xb9, 0xe2, 0xff, 0xff, 0xfd, 0xfe, 0xea, 0xca, 0xb8, 0xb9, 0xbe, 0xbc, 0xbd, - 0xc2, 0xbe, 0xbf, 0xde, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfe, - 0xff, 0xfe, 0xfd, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe1, 0xb8, 0xa1, 0xa2, 0xa7, - 0xa4, 0xa4, 0xab, 0xa5, 0xa6, 0xd2, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xdf, 0xb4, 0xac, 0xb2, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xdb, 0xb8, 0xb8, 0xbf, 0xbb, 0xbc, - 0xbf, 0xbe, 0xbe, 0xcd, 0xe9, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf4, 0xbf, 0xa2, 0xa6, 0xa6, 0xa4, - 0xa6, 0xa6, 0xa7, 0xab, 0xbf, 0xe7, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xdf, 0xb4, 0xac, 0xb2, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xea, 0xca, 0xbd, 0xbc, 0xbc, 0xbd, - 0xbc, 0xbe, 0xc0, 0xbf, 0xd3, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe1, 0xad, 0x9c, 0xa7, 0xa7, 0xa3, - 0xa9, 0xa9, 0xa0, 0xaf, 0xdc, 0xfb, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdf, 0xb4, 0xad, 0xb2, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfa, 0xe3, 0xc0, 0xb5, 0xbd, 0xbe, - 0xbb, 0xbe, 0xbf, 0xba, 0xc6, 0xe9, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe9, 0xc0, 0xa8, 0xa3, 0xa6, 0xa6, 0xa3, - 0xa7, 0xa9, 0xa5, 0xbe, 0xf1, 0xff, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xdf, 0xb4, 0xae, 0xb3, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xff, 0xf4, 0xcc, 0xb8, 0xbd, 0xbe, - 0xbc, 0xbd, 0xbd, 0xbd, 0xc1, 0xd2, 0xef, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd4, 0xa4, 0xa3, 0xa9, 0xa4, 0xa5, 0xa7, - 0xa4, 0xa5, 0xb8, 0xdd, 0xfb, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfc, 0xe4, 0xc7, 0xb9, 0xbb, - 0xbe, 0xbe, 0xbe, 0xc2, 0xbd, 0xbe, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf7, 0xc6, 0x9d, 0xa0, 0xa8, 0xa2, 0xa3, 0xaa, - 0xa5, 0xa5, 0xcd, 0xfa, 0xff, 0xfc, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, - 0xb0, 0xb4, 0xb2, 0xba, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xfa, 0xd7, 0xb8, 0xba, - 0xbe, 0xba, 0xbc, 0xc2, 0xbb, 0xba, 0xd8, 0xfa, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xeb, 0xbf, 0xa1, 0xa4, 0xa8, 0xa4, 0xa6, 0xa8, - 0xa7, 0xb7, 0xe3, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xe0, 0xb5, 0xaf, 0xb3, 0xb0, - 0xb0, 0xb3, 0xb1, 0xb9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe8, 0xc5, 0xba, - 0xbc, 0xbd, 0xbc, 0xc0, 0xbe, 0xbd, 0xd3, 0xf2, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xed, 0xc1, 0xa1, 0xa4, 0xa8, 0xa7, 0xa9, 0xa1, - 0xad, 0xd8, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xe0, 0xb5, 0xaf, 0xb3, 0xb0, - 0xb0, 0xb3, 0xb1, 0xb9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe0, 0xbe, - 0xb7, 0xbf, 0xbd, 0xbe, 0xbd, 0xbb, 0xd4, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xcf, 0xa2, 0x9d, 0xa5, 0xa3, 0xa0, 0xa3, - 0xc4, 0xf2, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xaf, 0xb3, 0xb0, - 0xb0, 0xb3, 0xb1, 0xb9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf5, 0xd1, - 0xb8, 0xb7, 0xba, 0xbc, 0xb7, 0xbb, 0xdd, 0xfe, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe8, 0xc1, 0xad, 0xab, 0xa8, 0xa9, 0xc2, - 0xe7, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xaf, 0xb3, 0xb0, - 0xb0, 0xb3, 0xb1, 0xb9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xec, - 0xcf, 0xbd, 0xbe, 0xc1, 0xc2, 0xd0, 0xee, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf0, 0xd9, 0xc7, 0xc7, 0xd6, 0xee, - 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xaf, 0xb3, 0xb0, - 0xb0, 0xb3, 0xb1, 0xb9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, - 0xf1, 0xdf, 0xd5, 0xd6, 0xe3, 0xf3, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf8, 0xed, 0xed, 0xf8, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb6, 0xb0, 0xb3, 0xaf, - 0xaf, 0xb3, 0xb2, 0xbb, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfa, 0xf1, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xbf, 0xaf, 0xb0, 0xb2, - 0xb2, 0xb1, 0xb2, 0xc1, 0xe7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf3, 0xd3, 0xb4, 0xac, 0xb5, - 0xb5, 0xad, 0xb4, 0xd1, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xec, 0xc7, 0xaf, 0xac, - 0xad, 0xb1, 0xc8, 0xeb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe7, 0xce, 0xc0, - 0xc1, 0xd0, 0xe7, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xf5, - 0xf5, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, @@ -578,572 +3770,235 @@ const unsigned char gGearPict3x[9 * kGearFrames * kGearWidth * kGearHeight] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x82, 0x19, + 0x19, 0x82, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x0a, 0x0a, + 0x0b, 0x0b, 0xb8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x76, 0x0a, 0x0b, + 0x0a, 0x0b, 0x76, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x0b, 0x0a, + 0x0a, 0x0b, 0x6f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x0b, 0x0b, + 0x0b, 0x0b, 0x6e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf6, 0x7d, 0x46, 0x64, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6e, 0x0b, 0x0a, + 0x0b, 0x0b, 0x6f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xa4, 0x90, 0xb4, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x7c, 0x28, 0x28, 0x28, 0x39, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x0a, 0x0b, + 0x0b, 0x0a, 0x6f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0x87, 0x7c, 0x7b, 0x7c, 0xb4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x45, 0x28, 0x27, 0x27, 0x28, 0x39, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x0b, 0x0b, + 0x0b, 0x0b, 0x6e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0x87, 0x7b, 0x7b, 0x7b, 0x7b, 0x90, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x65, 0x28, 0x28, 0x28, 0x27, 0x28, 0x39, 0xd9, 0xff, 0xff, 0xff, 0xff, 0x6e, 0x0b, 0x0b, + 0x0b, 0x0a, 0x6f, 0xff, 0xff, 0xff, 0xff, 0xea, 0x87, 0x7b, 0x7c, 0x7b, 0x7b, 0x7c, 0xa5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, - 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xe0, 0x39, 0x28, 0x28, 0x27, 0x28, 0x28, 0x39, 0xda, 0xff, 0xff, 0xff, 0x76, 0x0a, 0x0a, + 0x0b, 0x0b, 0x76, 0xff, 0xff, 0xff, 0xea, 0x87, 0x7c, 0x7b, 0x7b, 0x7b, 0x7b, 0x87, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xd9, 0x39, 0x28, 0x27, 0x27, 0x28, 0x28, 0x39, 0xdb, 0xff, 0xff, 0xb8, 0x0b, 0x0b, + 0x0b, 0x0b, 0xb7, 0xff, 0xff, 0xea, 0x87, 0x7b, 0x7b, 0x7c, 0x7b, 0x7b, 0x88, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xda, 0x39, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4f, 0xff, 0xff, 0xff, 0x81, 0x19, + 0x19, 0x82, 0xff, 0xff, 0xff, 0x97, 0x7c, 0x7b, 0x7b, 0x7b, 0x7c, 0x87, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf5, 0xf0, - 0xf0, 0xf5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xda, 0x39, 0x27, 0x28, 0x27, 0x27, 0x27, 0xf7, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfa, 0x7b, 0x7c, 0x7c, 0x7b, 0x7b, 0x88, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0x39, 0x28, 0x28, 0x27, 0x42, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x7c, 0x7b, 0x7c, 0x87, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd7, 0xad, 0x94, - 0x94, 0xad, 0xd7, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0x4f, 0x28, 0x42, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x8e, 0x7b, 0x96, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe1, 0xa1, 0x76, 0x6f, - 0x6f, 0x76, 0x9f, 0xdc, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xed, 0xb4, 0x7c, 0x6d, 0x7d, - 0x7d, 0x6d, 0x78, 0xac, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd5, 0x90, 0x75, 0x75, 0x79, - 0x79, 0x75, 0x74, 0x8e, 0xd4, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd2, 0xa7, 0xa2, + 0xa3, 0xa2, 0xa2, 0xa2, 0xa2, 0xa7, 0xd2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xe3, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, + 0xe0, 0xe2, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaf, 0x54, 0x55, 0x54, + 0x54, 0x54, 0x54, 0x55, 0x54, 0x55, 0x54, 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe5, 0xc4, 0xc4, 0xc4, 0xc5, 0xc5, 0xc4, 0xc5, + 0xc5, 0xc5, 0xc5, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x62, 0x55, 0x54, 0x55, + 0x55, 0x54, 0x55, 0x55, 0x54, 0x55, 0x55, 0x63, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xca, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, + 0xc5, 0xc5, 0xc5, 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x62, 0x55, 0x55, 0x55, + 0x55, 0x55, 0x54, 0x55, 0x54, 0x55, 0x55, 0x62, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xc5, 0xc5, 0xc4, 0xc5, 0xc5, 0xc4, 0xc5, + 0xc4, 0xc4, 0xc5, 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaf, 0x55, 0x55, 0x55, + 0x55, 0x54, 0x55, 0x55, 0x54, 0x54, 0x55, 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe5, 0xc5, 0xc4, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, + 0xc5, 0xc4, 0xc5, 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd2, 0xa7, 0xa3, + 0xa3, 0xa2, 0xa2, 0xa3, 0xa2, 0xa7, 0xd2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xe2, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, + 0xe0, 0xe2, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xe7, 0xe6, 0xf6, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x82, 0x76, 0x7a, 0x72, - 0x72, 0x79, 0x75, 0x83, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfd, 0xf9, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xeb, 0xca, 0xae, 0xac, 0xc4, 0xe7, - 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x80, 0x73, 0x79, 0x76, - 0x76, 0x79, 0x72, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfa, 0xf2, 0xed, 0xed, 0xf3, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdf, 0xa4, 0x86, 0x82, 0x7e, 0x81, 0xa6, - 0xdd, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x80, 0x73, 0x79, 0x75, - 0x75, 0x78, 0x72, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xf8, - 0xec, 0xe4, 0xe3, 0xe3, 0xe5, 0xec, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0x99, 0x7e, 0x90, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xbf, 0xb6, 0xc5, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfc, 0xbc, 0x78, 0x70, 0x79, 0x75, 0x70, 0x77, - 0xa8, 0xec, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc9, 0x81, 0x75, 0x7a, 0x74, - 0x74, 0x7a, 0x74, 0x80, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfb, 0xed, - 0xe2, 0xe0, 0xe1, 0xe1, 0xdf, 0xe1, 0xf0, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0x8b, 0x7e, 0x7e, 0x7e, 0x90, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xb6, 0xb5, 0xb5, 0xbc, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xa7, 0x76, 0x7a, 0x7e, 0x7d, 0x80, 0x72, - 0x81, 0xc4, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xf2, 0xe4, - 0xe1, 0xe3, 0xe2, 0xe2, 0xe1, 0xe0, 0xeb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xea, 0x8a, 0x7e, 0x7f, 0x7f, 0x7f, 0x7f, 0xfb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xb5, 0xb5, 0xb6, 0xb5, 0xb5, 0xbc, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe2, 0xa4, 0x78, 0x79, 0x7f, 0x7b, 0x7c, 0x7c, - 0x79, 0x91, 0xd4, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xf5, 0xe7, 0xe3, - 0xe3, 0xe2, 0xe2, 0xe3, 0xe1, 0xe1, 0xeb, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xea, 0x8a, 0x7f, 0x7f, 0x7f, 0x7f, 0x7e, 0x99, 0xff, 0xff, 0xff, 0xd1, 0xa3, + 0xa3, 0xd1, 0xff, 0xff, 0xff, 0xc5, 0xb6, 0xb5, 0xb5, 0xb5, 0xb5, 0xbc, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xf2, 0xad, 0x72, 0x74, 0x80, 0x77, 0x76, 0x81, - 0x79, 0x77, 0xb4, 0xf8, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xef, 0xe2, 0xe2, - 0xe4, 0xe1, 0xe1, 0xe3, 0xe0, 0xdf, 0xec, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xea, 0x8b, 0x7e, 0x7e, 0x7e, 0x7f, 0x7e, 0x8a, 0xec, 0xff, 0xff, 0xe5, 0x9a, 0x9a, + 0x9b, 0x9b, 0xe5, 0xff, 0xff, 0xf3, 0xbc, 0xb5, 0xb5, 0xb5, 0xb5, 0xb5, 0xbc, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc1, 0x7c, 0x7a, 0x82, 0x79, 0x7a, 0x7e, - 0x78, 0x77, 0x93, 0xcb, 0xf9, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xe8, 0xe2, 0xe2, - 0xe2, 0xe2, 0xe2, 0xe3, 0xe1, 0xe1, 0xf0, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xef, 0x8b, 0x7e, 0x7f, 0x7e, 0x7f, 0x7e, 0x8b, 0xea, 0xff, 0xff, 0xff, 0xcd, 0x9a, 0x9a, + 0x9a, 0x9a, 0xce, 0xff, 0xff, 0xff, 0xf3, 0xbc, 0xb6, 0xb5, 0xb6, 0xb6, 0xb6, 0xbc, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa0, 0x81, 0x7c, 0x7d, 0x7c, 0x78, - 0x7c, 0x7e, 0x76, 0x9e, 0xef, 0xff, 0xfc, 0xfe, 0xfc, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfc, 0xea, 0xe1, 0xe4, 0xe3, - 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe9, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xd0, 0x89, 0x72, 0x80, 0x7e, 0x77, - 0x7f, 0x7f, 0x6f, 0x88, 0xd0, 0xfb, 0xfe, 0xff, 0xfa, 0xff, 0xff, 0xc9, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xf5, 0xe5, 0xdf, 0xe3, 0xe3, - 0xe1, 0xe3, 0xe3, 0xe0, 0xe5, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf0, 0xa4, 0x7b, 0x7f, 0x7d, 0x79, - 0x7c, 0x7b, 0x7a, 0x7e, 0x9d, 0xda, 0xff, 0xff, 0xf6, 0xff, 0xff, 0xc9, 0x81, 0x74, 0x79, 0x74, - 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xf6, 0xea, 0xe4, 0xe2, 0xe2, 0xe2, - 0xe2, 0xe2, 0xe3, 0xe1, 0xea, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd3, 0x99, 0x79, 0x7a, 0x7e, - 0x79, 0x79, 0x81, 0x76, 0x77, 0xba, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xc9, 0x81, 0x75, 0x7a, 0x74, - 0x74, 0x7a, 0x74, 0x80, 0xc9, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xf0, 0xe2, 0xe1, 0xe3, 0xe2, 0xe2, - 0xe3, 0xe2, 0xe1, 0xe8, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xfd, 0xff, - 0xfc, 0xfa, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xfa, 0xb9, 0x7b, 0x7b, 0x83, - 0x79, 0x79, 0x7f, 0x77, 0x73, 0x99, 0xda, 0xfd, 0xfc, 0xff, 0xff, 0xcb, 0x81, 0x72, 0x78, 0x75, - 0x75, 0x78, 0x71, 0x7f, 0xca, 0xff, 0xff, 0xfa, 0xff, 0xf8, 0xe9, 0xe1, 0xe1, 0xe3, 0xe1, 0xe1, - 0xe4, 0xe2, 0xe0, 0xee, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd9, 0x9a, 0x7f, 0x7d, - 0x7d, 0x7c, 0x79, 0x7c, 0x7d, 0x7c, 0xa8, 0xf1, 0xff, 0xff, 0xff, 0xcc, 0x80, 0x75, 0x7a, 0x73, - 0x73, 0x79, 0x74, 0x80, 0xca, 0xff, 0xff, 0xfc, 0xfe, 0xef, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, - 0xe2, 0xe1, 0xe7, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xf1, 0xe4, - 0xf0, 0xff, 0xff, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xcc, 0x89, 0x74, - 0x80, 0x7e, 0x76, 0x7e, 0x7f, 0x70, 0x8a, 0xd4, 0xf9, 0xff, 0xff, 0xce, 0x86, 0x77, 0x79, 0x74, - 0x74, 0x78, 0x77, 0x8a, 0xd0, 0xff, 0xff, 0xfc, 0xfa, 0xe8, 0xe0, 0xe4, 0xe3, 0xe1, 0xe3, 0xe3, - 0xdf, 0xe3, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfb, - 0xf8, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xdd, 0xc2, 0xb2, 0xa9, - 0xaf, 0xc6, 0xe2, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xec, 0xa2, 0x7c, - 0x81, 0x7c, 0x77, 0x7d, 0x7d, 0x74, 0x7f, 0xad, 0xe4, 0xff, 0xff, 0xde, 0xa0, 0x74, 0x6e, 0x7d, - 0x7d, 0x6e, 0x73, 0xa0, 0xe1, 0xff, 0xff, 0xf9, 0xf1, 0xe5, 0xe0, 0xe3, 0xe2, 0xe1, 0xe3, 0xe3, - 0xe0, 0xe8, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf8, 0xef, 0xe8, - 0xe7, 0xe9, 0xed, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xe7, 0xa4, 0x7b, 0x7d, 0x80, - 0x79, 0x82, 0xa7, 0xd7, 0xf3, 0xfc, 0xfe, 0xff, 0xfe, 0xff, 0xfd, 0xfe, 0xff, 0xfa, 0xcf, 0x96, - 0x7a, 0x7c, 0x7e, 0x79, 0x79, 0x7c, 0x7b, 0x8f, 0xd3, 0xff, 0xff, 0xf4, 0xcd, 0x8c, 0x6c, 0x76, - 0x76, 0x6c, 0x89, 0xc9, 0xf7, 0xff, 0xff, 0xf8, 0xeb, 0xe4, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe1, - 0xe6, 0xf2, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf5, 0xe8, 0xdd, 0xd9, - 0xda, 0xda, 0xda, 0xe5, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xc2, 0x8d, 0x7b, 0x83, 0x84, - 0x7d, 0x7f, 0x87, 0x90, 0xaa, 0xd6, 0xf7, 0xff, 0xfe, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xfa, 0xb5, - 0x78, 0x7c, 0x84, 0x78, 0x77, 0x7e, 0x79, 0x87, 0xce, 0xff, 0xff, 0xfe, 0xf5, 0xc6, 0x93, 0x7a, - 0x7a, 0x92, 0xc3, 0xf2, 0xff, 0xff, 0xff, 0xf7, 0xe8, 0xe3, 0xe3, 0xe2, 0xe1, 0xe4, 0xe2, 0xe0, - 0xed, 0xfd, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfd, 0xf5, 0xe9, 0xe0, 0xdd, 0xdb, 0xda, - 0xdb, 0xdc, 0xd9, 0xde, 0xee, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe3, 0xae, 0x8c, 0x86, 0x89, 0x89, - 0x8a, 0x89, 0x81, 0x78, 0x82, 0x9f, 0xbf, 0xde, 0xfb, 0xff, 0xfe, 0xfb, 0xf9, 0xff, 0xff, 0xd8, - 0x96, 0x79, 0x7c, 0x83, 0x81, 0x7c, 0x7a, 0x92, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xf3, 0xd7, 0xc2, - 0xc2, 0xd7, 0xf1, 0xfe, 0xff, 0xff, 0xff, 0xf7, 0xe9, 0xe2, 0xe2, 0xe3, 0xe3, 0xe1, 0xdf, 0xe5, - 0xf5, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf6, 0xed, 0xe5, 0xdc, 0xd9, 0xdb, 0xde, 0xde, - 0xdd, 0xdc, 0xdb, 0xdd, 0xe6, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xaf, 0x88, 0x81, 0x86, 0x84, - 0x82, 0x83, 0x85, 0x87, 0x85, 0x7e, 0x81, 0x9f, 0xcd, 0xee, 0xfa, 0xfe, 0xff, 0xff, 0xfe, 0xf7, - 0xcd, 0x91, 0x77, 0x7d, 0x7a, 0x73, 0x86, 0xb7, 0xeb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfa, 0xef, 0xe4, 0xe0, 0xe2, 0xe2, 0xdf, 0xe4, 0xf2, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfa, 0xf1, 0xe6, 0xdc, 0xda, 0xdc, 0xdd, 0xdc, 0xdb, 0xda, - 0xda, 0xdb, 0xda, 0xdc, 0xe7, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xc2, 0x8b, 0x7e, 0x89, 0x85, - 0x80, 0x84, 0x86, 0x88, 0x86, 0x81, 0x7f, 0x82, 0x8b, 0xa4, 0xd0, 0xf9, 0xff, 0xfc, 0xfb, 0xff, - 0xf5, 0xc7, 0x96, 0x7d, 0x78, 0x82, 0xad, 0xe8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xec, 0xe3, 0xe1, 0xe0, 0xe5, 0xf1, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xf3, 0xe7, 0xe0, 0xdc, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdc, 0xd9, - 0xdb, 0xdd, 0xd9, 0xdc, 0xec, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe2, 0xa6, 0x7f, 0x7e, 0x86, - 0x8a, 0x88, 0x83, 0x81, 0x83, 0x87, 0x8a, 0x82, 0x77, 0x80, 0x9a, 0xb8, 0xdb, 0xf6, 0xff, 0xff, - 0xfe, 0xf2, 0xd8, 0xc2, 0xbf, 0xca, 0xe2, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf8, - 0xf8, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf3, 0xf0, 0xf0, 0xf6, 0xfd, 0xff, - 0xff, 0xff, 0xfd, 0xf5, 0xec, 0xe5, 0xde, 0xda, 0xdb, 0xdd, 0xdd, 0xdb, 0xda, 0xda, 0xdc, 0xdd, - 0xdb, 0xd8, 0xd8, 0xe4, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd6, 0xa7, 0x8b, 0x85, - 0x84, 0x83, 0x86, 0x88, 0x85, 0x83, 0x84, 0x85, 0x87, 0x86, 0x7d, 0x7b, 0x99, 0xcf, 0xf5, 0xff, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfd, 0xf3, 0xe4, 0xdb, 0xdc, 0xde, 0xdd, 0xdc, 0xdc, 0xdb, 0xdb, 0xdc, 0xdc, 0xda, 0xda, - 0xda, 0xdb, 0xe3, 0xf2, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xc9, 0x9b, - 0x82, 0x83, 0x88, 0x88, 0x85, 0x83, 0x84, 0x86, 0x87, 0x84, 0x81, 0x7f, 0x7d, 0x94, 0xc8, 0xf3, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfb, 0xf0, 0xe3, 0xdc, 0xdb, 0xdc, 0xdd, 0xdd, 0xdb, 0xda, 0xda, 0xdb, 0xdc, 0xdc, 0xd9, 0xd8, - 0xe0, 0xee, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf7, 0xd8, - 0xb9, 0x9c, 0x82, 0x7b, 0x85, 0x8a, 0x88, 0x83, 0x81, 0x82, 0x86, 0x89, 0x81, 0x7a, 0x96, 0xd8, - 0xff, 0xff, 0xf8, 0xf9, 0xf9, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfd, 0xff, 0xff, - 0xf5, 0xe3, 0xdb, 0xdc, 0xdd, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xda, 0xd7, 0xd9, 0xe0, 0xe9, - 0xf3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, - 0xf5, 0xd2, 0xa5, 0x8e, 0x89, 0x84, 0x83, 0x87, 0x87, 0x87, 0x83, 0x82, 0x8c, 0x80, 0x7b, 0xc2, - 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xef, 0xdb, 0xdb, 0xde, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xda, 0xda, 0xda, 0xdb, 0xe3, 0xf1, 0xfc, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, - 0xfe, 0xfb, 0xef, 0xd0, 0xa5, 0x85, 0x80, 0x89, 0x8a, 0x88, 0x83, 0x82, 0x8d, 0x80, 0x78, 0xbf, - 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xed, 0xd9, 0xda, 0xdd, 0xda, 0xdb, 0xdb, 0xda, 0xd9, 0xd8, 0xda, 0xe3, 0xef, 0xf9, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf9, - 0xfb, 0xff, 0xff, 0xf4, 0xdc, 0xbf, 0xa0, 0x87, 0x7d, 0x7f, 0x85, 0x87, 0x86, 0x7c, 0x88, 0xcd, - 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xf0, 0xdc, 0xd8, 0xda, 0xda, 0xda, 0xd7, 0xd5, 0xd8, 0xe1, 0xeb, 0xf4, 0xfc, 0xff, 0xff, 0xfd, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xa7, 0x7e, 0x7f, 0x7e, 0x7f, 0x7e, 0x8b, 0xea, 0xff, 0xff, 0xff, 0xff, 0xca, 0x9a, 0x9b, + 0x9a, 0x9a, 0xca, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xbc, 0xb5, 0xb5, 0xb5, 0xb5, 0xb5, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfd, 0xda, 0xad, 0x94, 0x8b, 0x88, 0x87, 0x84, 0x91, 0xb7, 0xe8, - 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xf8, 0xea, 0xdf, 0xda, 0xd9, 0xda, 0xdb, 0xde, 0xe6, 0xf3, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x92, 0x7f, 0x7f, 0x7e, 0x7f, 0x8b, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x9a, 0x9a, + 0x9a, 0x9a, 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xbd, 0xb5, 0xb5, 0xb5, 0xb5, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xda, 0xb9, 0xa0, 0x98, 0xa0, 0xc2, 0xee, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfb, 0xee, 0xe2, 0xdf, 0xe1, 0xea, 0xf4, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xb5, 0x7e, 0x7e, 0x7f, 0x8b, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xca, 0x9a, 0x9a, + 0x9b, 0x9a, 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xbc, 0xb5, 0xb5, 0xb5, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, 0xfa, 0xfa, - 0xfa, 0xfc, 0xfd, 0xfb, 0xf7, 0xf7, 0xfc, 0xff, 0xfa, 0xea, 0xd8, 0xd4, 0xdd, 0xee, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf9, 0xb5, 0x92, 0xa7, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xca, 0x9a, 0x9a, + 0x9b, 0x9a, 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xcc, 0xc2, 0xd4, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfe, 0xfa, 0xf5, 0xf1, 0xf3, 0xf9, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfd, - 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xca, 0x9a, 0x9a, + 0x9a, 0x9b, 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x9a, 0x9a, + 0x9a, 0x9b, 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcd, 0x9b, 0x9a, + 0x9b, 0x9b, 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe5, 0x9a, 0x9a, + 0x9b, 0x9a, 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xed, 0xdf, 0xd7, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, - 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd8, 0xdc, 0xe7, 0xf5, 0xfd, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xa4, + 0xa3, 0xd2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf7, 0xf3, 0xf1, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, - 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf3, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe3, 0xbc, 0xa6, 0x9d, 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, - 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0x99, 0x98, 0x9a, 0xa2, 0xb3, 0xd5, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfc, 0xee, 0xe3, 0xde, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xd9, - 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xdd, 0xe6, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xe2, 0xb2, 0x91, 0x8e, 0x90, 0x8c, 0x8c, 0x8d, 0x8d, 0x8d, - 0x8c, 0x8c, 0x8d, 0x8d, 0x8c, 0x8c, 0x8b, 0x8d, 0x8f, 0x8b, 0x9f, 0xd0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfb, 0xeb, 0xda, 0xd4, 0xd5, 0xd4, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, - 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd5, 0xe1, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf6, 0xbb, 0x8e, 0x87, 0x8d, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, - 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x8f, 0x90, 0x8f, 0x86, 0x86, 0xa5, 0xdf, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf2, 0xdc, 0xd2, 0xd2, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, - 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd3, 0xd1, 0xd4, 0xe6, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf0, 0xa4, 0x86, 0x94, 0x91, 0x8b, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, - 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8c, 0x8c, 0x94, 0x8d, 0x90, 0xce, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfd, 0xff, 0xff, 0xec, 0xd4, 0xd4, 0xd6, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, - 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd4, 0xd5, 0xd0, 0xdd, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf1, 0xa7, 0x88, 0x95, 0x91, 0x8c, 0x8f, 0x8e, 0x8e, 0x8e, 0x8e, - 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8d, 0x8d, 0x95, 0x8f, 0x93, 0xcf, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfd, 0xff, 0xff, 0xec, 0xd4, 0xd2, 0xd4, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, - 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd0, 0xd2, 0xd3, 0xcf, 0xdc, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf6, 0xbd, 0x90, 0x89, 0x8e, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, - 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x90, 0x91, 0x90, 0x88, 0x89, 0xa9, 0xe1, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf1, 0xd9, 0xce, 0xce, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, - 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd0, 0xce, 0xd1, 0xe4, 0xfb, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe0, 0xb1, 0x91, 0x8f, 0x91, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, - 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x8f, 0x91, 0x8e, 0xa1, 0xd1, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfa, 0xea, 0xd7, 0xd0, 0xd1, 0xd0, 0xcf, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, - 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, 0xd0, 0xd2, 0xdf, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xe3, 0xbd, 0xa7, 0x9e, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, - 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9c, 0x9c, 0xa3, 0xb4, 0xd6, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xdf, 0xd8, 0xd4, 0xd3, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, - 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xda, 0xe3, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xdd, 0xd5, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, - 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd6, 0xd7, 0xda, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xf4, 0xef, 0xed, 0xec, 0xec, 0xed, 0xee, 0xee, 0xee, 0xee, - 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xf1, 0xf7, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, 0xfa, 0xfa, - 0xfa, 0xfb, 0xfd, 0xfc, 0xf8, 0xf7, 0xfc, 0xff, 0xfb, 0xee, 0xe1, 0xdd, 0xe5, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfe, 0xf9, 0xf1, 0xee, 0xf0, 0xf7, 0xfd, 0xff, 0xfe, 0xfc, 0xfc, 0xfd, 0xfe, 0xfe, 0xfd, - 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf4, 0xe0, 0xc4, 0xaf, 0xa8, 0xb0, 0xcd, 0xf1, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xf8, 0xe5, 0xd6, 0xd3, 0xd8, 0xe2, 0xef, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfe, 0xe0, 0xb9, 0xa5, 0x9d, 0x9a, 0x99, 0x97, 0xa3, 0xc4, 0xed, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xf5, 0xde, 0xcd, 0xc8, 0xca, 0xcc, 0xcd, 0xd1, 0xdc, 0xef, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfa, - 0xfb, 0xff, 0xff, 0xf6, 0xe3, 0xca, 0xae, 0x97, 0x8f, 0x92, 0x98, 0x9a, 0x99, 0x93, 0x9f, 0xd7, - 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xe9, 0xca, 0xc4, 0xc8, 0xc9, 0xc9, 0xc8, 0xc7, 0xcb, 0xd6, 0xe5, 0xf1, 0xfb, 0xff, 0xff, 0xfd, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xfc, 0xf2, 0xd7, 0xb1, 0x96, 0x93, 0x9a, 0x9a, 0x9a, 0x98, 0x97, 0xa2, 0x99, 0x92, 0xcb, - 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, - 0xe3, 0xc2, 0xc6, 0xcb, 0xc6, 0xc8, 0xcb, 0xcc, 0xcc, 0xc8, 0xcb, 0xd9, 0xec, 0xf8, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xf7, 0xd9, 0xb4, 0xa0, 0x9a, 0x95, 0x95, 0x9a, 0x9b, 0x9b, 0x99, 0x98, 0xa1, 0x99, 0x96, 0xce, - 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, - 0xe3, 0xc2, 0xc6, 0xcb, 0xc6, 0xc7, 0xc9, 0xc9, 0xca, 0xc9, 0xc9, 0xcc, 0xcf, 0xda, 0xec, 0xfb, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf9, 0xe1, - 0xc7, 0xac, 0x94, 0x8f, 0x98, 0x9d, 0x9b, 0x98, 0x97, 0x98, 0x9c, 0xa1, 0x9b, 0x95, 0xaa, 0xdf, - 0xff, 0xff, 0xf9, 0xfc, 0xfc, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfc, 0xfc, 0xff, 0xff, - 0xec, 0xcd, 0xc2, 0xc6, 0xcb, 0xc9, 0xc6, 0xc6, 0xc7, 0xca, 0xcc, 0xca, 0xc6, 0xc9, 0xd5, 0xe3, - 0xef, 0xfc, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf0, 0xd2, 0xab, - 0x95, 0x95, 0x99, 0x9b, 0x9a, 0x98, 0x99, 0x9b, 0x9e, 0x9d, 0x9c, 0x9a, 0x98, 0xa9, 0xd1, 0xf4, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xf9, 0xe5, 0xcc, 0xc3, 0xc6, 0xc7, 0xc8, 0xc9, 0xc8, 0xc7, 0xc7, 0xc9, 0xca, 0xca, 0xc9, 0xca, - 0xd4, 0xe8, 0xf8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xdd, 0xb6, 0x9e, 0x97, - 0x97, 0x97, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9e, 0x9f, 0x99, 0x97, 0xad, 0xd7, 0xf7, 0xff, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfa, 0xe7, 0xcf, 0xc2, 0xc4, 0xc8, 0xc9, 0xc8, 0xc7, 0xc7, 0xc8, 0xc9, 0xc9, 0xc8, 0xca, - 0xcb, 0xce, 0xda, 0xee, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe9, 0xb5, 0x93, 0x92, 0x99, - 0x9d, 0x9b, 0x98, 0x98, 0x9a, 0x9f, 0xa2, 0x9b, 0x93, 0x99, 0xac, 0xc4, 0xe0, 0xf6, 0xff, 0xff, - 0xfe, 0xf7, 0xe5, 0xd7, 0xd6, 0xdf, 0xed, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, - 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xe8, 0xe2, 0xe3, 0xee, 0xfb, 0xff, - 0xff, 0xff, 0xfa, 0xee, 0xdd, 0xce, 0xc3, 0xc2, 0xc7, 0xcb, 0xc9, 0xc7, 0xc7, 0xc7, 0xc9, 0xcb, - 0xca, 0xc7, 0xc8, 0xda, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xcc, 0x9d, 0x92, 0x9d, 0x9a, - 0x96, 0x9a, 0x9d, 0x9e, 0x9c, 0x9a, 0x9b, 0x9c, 0xa2, 0xb4, 0xd8, 0xfa, 0xff, 0xfd, 0xfc, 0xff, - 0xf8, 0xd8, 0xb4, 0xa4, 0xa3, 0xab, 0xc9, 0xf2, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf4, 0xd7, 0xc2, 0xbd, 0xbf, 0xcc, 0xe6, 0xfb, - 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xe8, 0xd3, 0xc8, 0xc6, 0xc6, 0xc7, 0xc9, 0xc9, 0xc9, 0xc8, 0xc6, - 0xc8, 0xcb, 0xc7, 0xcd, 0xe5, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe9, 0xba, 0x9b, 0x97, 0x9b, 0x99, - 0x98, 0x9b, 0x9d, 0x9e, 0x9d, 0x99, 0x9b, 0xb2, 0xd7, 0xf1, 0xfb, 0xfe, 0xff, 0xff, 0xfe, 0xf9, - 0xdb, 0xb1, 0x9e, 0xa3, 0xa3, 0xa0, 0xae, 0xd2, 0xf4, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf5, 0xdb, 0xc2, 0xb9, 0xbc, 0xbe, 0xbc, 0xca, 0xe7, - 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xe7, 0xd1, 0xc5, 0xc6, 0xc8, 0xca, 0xc9, 0xc7, 0xc6, - 0xc7, 0xc8, 0xc7, 0xcb, 0xdb, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xba, 0x9e, 0x9a, 0x9e, 0x9e, - 0x9f, 0xa1, 0x9b, 0x94, 0x9b, 0xb1, 0xc9, 0xe2, 0xfb, 0xff, 0xfe, 0xfc, 0xfb, 0xff, 0xff, 0xe4, - 0xb6, 0xa0, 0xa2, 0xa9, 0xaa, 0xa6, 0xa6, 0xb9, 0xe5, 0xff, 0xff, 0xfd, 0xff, 0xf9, 0xeb, 0xe0, - 0xe0, 0xec, 0xfa, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc8, 0xbb, 0xbd, 0xc1, 0xc2, 0xbe, 0xbe, 0xcd, - 0xec, 0xff, 0xff, 0xfc, 0xfd, 0xfe, 0xff, 0xfd, 0xee, 0xe0, 0xd2, 0xc5, 0xc2, 0xc7, 0xca, 0xca, - 0xca, 0xc9, 0xc8, 0xcb, 0xda, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xce, 0xa1, 0x93, 0x9b, 0x9b, - 0x97, 0x9a, 0x9f, 0xa5, 0xba, 0xdd, 0xf8, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xff, 0xfc, 0xca, - 0x9f, 0xa2, 0xa9, 0xa2, 0xa3, 0xa9, 0xa8, 0xb2, 0xe0, 0xff, 0xff, 0xfe, 0xfa, 0xde, 0xc2, 0xb6, - 0xb7, 0xc6, 0xe2, 0xfa, 0xfe, 0xff, 0xff, 0xe6, 0xc2, 0xbb, 0xbe, 0xbb, 0xbc, 0xc3, 0xbf, 0xbc, - 0xdc, 0xfe, 0xff, 0xfd, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xfb, 0xec, 0xd7, 0xcc, 0xc8, 0xc6, 0xc5, - 0xc8, 0xc8, 0xc4, 0xcc, 0xe4, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xed, 0xba, 0x98, 0x96, 0x98, - 0x94, 0x9c, 0xb7, 0xdd, 0xf5, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfb, 0xdd, 0xb4, - 0xa1, 0xa5, 0xa7, 0xa3, 0xa4, 0xa7, 0xa9, 0xb7, 0xe3, 0xff, 0xff, 0xf8, 0xe1, 0xbb, 0xaa, 0xb2, - 0xb2, 0xaf, 0xc2, 0xe5, 0xfa, 0xff, 0xff, 0xe9, 0xc6, 0xbb, 0xbc, 0xbc, 0xbc, 0xc0, 0xbf, 0xbd, - 0xcd, 0xe9, 0xfd, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xf9, 0xec, 0xd6, 0xc5, 0xc2, - 0xc6, 0xc6, 0xc6, 0xd9, 0xf6, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe6, 0xcd, 0xbe, 0xb8, - 0xbe, 0xcf, 0xe6, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf2, 0xbd, 0xa2, - 0xa6, 0xa5, 0xa3, 0xa7, 0xa8, 0xa4, 0xab, 0xc9, 0xed, 0xff, 0xff, 0xed, 0xc8, 0xad, 0xab, 0xb5, - 0xb5, 0xae, 0xb5, 0xce, 0xef, 0xff, 0xff, 0xf1, 0xd4, 0xbd, 0xb8, 0xbd, 0xbe, 0xbb, 0xbe, 0xc1, - 0xbf, 0xd2, 0xf6, 0xff, 0xfc, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, 0xff, 0xff, 0xfb, 0xf0, 0xe3, 0xd9, - 0xd6, 0xda, 0xe3, 0xf1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf3, 0xe9, - 0xf2, 0xff, 0xff, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xdb, 0xab, 0x9c, - 0xa6, 0xa7, 0xa2, 0xa8, 0xaa, 0xa2, 0xb4, 0xe3, 0xfb, 0xff, 0xff, 0xe2, 0xb8, 0xae, 0xb1, 0xb0, - 0xb0, 0xb4, 0xb7, 0xc2, 0xe7, 0xff, 0xff, 0xfb, 0xe7, 0xc3, 0xb6, 0xbe, 0xbe, 0xbb, 0xbe, 0xc0, - 0xba, 0xc5, 0xe6, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, 0xf7, - 0xf1, 0xf7, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe3, 0xb6, 0xa4, 0xa3, - 0xa4, 0xa6, 0xa5, 0xa7, 0xa9, 0xaa, 0xc7, 0xf6, 0xff, 0xff, 0xff, 0xe1, 0xb3, 0xac, 0xb2, 0xb0, - 0xb0, 0xb5, 0xb5, 0xbc, 0xe3, 0xff, 0xff, 0xff, 0xf7, 0xd2, 0xbc, 0xbd, 0xbd, 0xbb, 0xbe, 0xbf, - 0xbf, 0xc0, 0xce, 0xed, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, - 0xfd, 0xfb, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xfd, 0xce, 0xa1, 0xa2, 0xa9, - 0xa4, 0xa5, 0xa9, 0xa4, 0xa3, 0xbd, 0xe6, 0xfe, 0xfd, 0xff, 0xff, 0xe1, 0xb5, 0xad, 0xb1, 0xb0, - 0xb1, 0xb5, 0xb3, 0xbb, 0xe3, 0xff, 0xff, 0xfd, 0xfe, 0xeb, 0xca, 0xb7, 0xb9, 0xbe, 0xbc, 0xbd, - 0xc2, 0xbe, 0xbf, 0xde, 0xfd, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfe, - 0xff, 0xfe, 0xfd, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe1, 0xb8, 0xa1, 0xa3, 0xa7, - 0xa4, 0xa4, 0xab, 0xa6, 0xa8, 0xd3, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xdf, 0xb5, 0xaf, 0xb3, 0xb0, - 0xb1, 0xb5, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xdb, 0xb8, 0xb9, 0xc0, 0xbc, 0xbc, - 0xbf, 0xbe, 0xbe, 0xcd, 0xe9, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf4, 0xbf, 0xa2, 0xa6, 0xa6, 0xa4, - 0xa6, 0xa6, 0xa7, 0xab, 0xbf, 0xe7, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xe0, 0xb5, 0xad, 0xb2, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xea, 0xca, 0xbd, 0xbc, 0xbc, 0xbd, - 0xbd, 0xc0, 0xc0, 0xbe, 0xd3, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe1, 0xad, 0x9c, 0xa7, 0xa6, 0xa3, - 0xa9, 0xa9, 0xa1, 0xb0, 0xdc, 0xfa, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb2, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfa, 0xe3, 0xc0, 0xb5, 0xbd, 0xbe, - 0xbb, 0xbf, 0xc0, 0xbb, 0xc7, 0xe9, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe9, 0xc0, 0xa8, 0xa3, 0xa6, 0xa6, 0xa3, - 0xa6, 0xa9, 0xa6, 0xc0, 0xf2, 0xff, 0xfe, 0xff, 0xfd, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xff, 0xf4, 0xcc, 0xb8, 0xbd, 0xbe, - 0xbc, 0xbe, 0xbf, 0xbf, 0xc2, 0xd2, 0xef, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd4, 0xa4, 0xa3, 0xa9, 0xa4, 0xa5, 0xa7, - 0xa4, 0xa5, 0xb8, 0xdd, 0xfb, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfc, 0xe4, 0xc7, 0xb9, 0xbb, - 0xbe, 0xbc, 0xbc, 0xc0, 0xbd, 0xc0, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf7, 0xc7, 0x9d, 0x9f, 0xa9, 0xa3, 0xa2, 0xa9, - 0xa6, 0xa5, 0xcc, 0xf9, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xfa, 0xd7, 0xb8, 0xba, - 0xbf, 0xbb, 0xbb, 0xc0, 0xbc, 0xbb, 0xd8, 0xfa, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xeb, 0xbf, 0xa1, 0xa4, 0xa8, 0xa5, 0xa6, 0xa8, - 0xa7, 0xb7, 0xe3, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe8, 0xc5, 0xba, - 0xbd, 0xbe, 0xbc, 0xc0, 0xbe, 0xbc, 0xd2, 0xf2, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xed, 0xc1, 0xa1, 0xa4, 0xa7, 0xa8, 0xab, 0xa3, - 0xad, 0xd8, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe0, 0xbe, - 0xb7, 0xbf, 0xbe, 0xbf, 0xbd, 0xbb, 0xd4, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xcf, 0xa2, 0x9e, 0xa4, 0xa3, 0xa2, 0xa5, - 0xc3, 0xf2, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf5, 0xd1, - 0xb8, 0xb7, 0xba, 0xbc, 0xb7, 0xbc, 0xde, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe8, 0xc1, 0xae, 0xab, 0xa8, 0xab, 0xc3, - 0xe7, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xec, - 0xcf, 0xbe, 0xbe, 0xc0, 0xc2, 0xd1, 0xef, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf0, 0xd9, 0xc7, 0xc7, 0xd7, 0xef, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, - 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, - 0xf1, 0xdf, 0xd5, 0xd6, 0xe3, 0xf4, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf8, 0xed, 0xed, 0xf8, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb7, 0xb1, 0xb3, 0xaf, - 0xaf, 0xb4, 0xb4, 0xbc, 0xe3, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xf9, 0xf1, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xbf, 0xb0, 0xb1, 0xb2, - 0xb2, 0xb2, 0xb4, 0xc2, 0xe7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf3, 0xd3, 0xb4, 0xac, 0xb5, - 0xb5, 0xad, 0xb4, 0xd1, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xeb, 0xc7, 0xb0, 0xad, - 0xad, 0xb1, 0xc8, 0xeb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe7, 0xcf, 0xc2, - 0xc2, 0xcf, 0xe8, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xf5, - 0xf5, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, @@ -1154,14 +4009,10 @@ const unsigned char gGearPict3x[9 * kGearFrames * kGearWidth * kGearHeight] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, @@ -1172,554 +4023,209 @@ const unsigned char gGearPict3x[9 * kGearFrames * kGearWidth * kGearHeight] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, - 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x87, 0x21, + 0x21, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xba, 0x11, 0x12, + 0x12, 0x12, 0xba, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7c, 0x12, 0x11, + 0x12, 0x11, 0x7b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x75, 0x12, 0x12, + 0x12, 0x11, 0x75, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf5, 0xf0, - 0xf0, 0xf5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x75, 0x11, 0x11, + 0x11, 0x12, 0x75, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd9, 0xb1, 0x99, - 0x99, 0xb1, 0xd9, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf7, 0x85, 0x51, 0x6f, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x74, 0x12, 0x12, + 0x11, 0x12, 0x75, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x6f, 0x51, 0x85, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe2, 0xa5, 0x7b, 0x74, - 0x74, 0x7b, 0xa4, 0xde, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x85, 0x34, 0x34, 0x34, 0x44, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x75, 0x12, 0x11, + 0x12, 0x12, 0x75, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0x44, 0x34, 0x34, 0x34, 0x85, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xb8, 0x82, 0x73, 0x82, - 0x81, 0x74, 0x7f, 0xaf, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x50, 0x34, 0x34, 0x34, 0x34, 0x44, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0x75, 0x12, 0x12, + 0x11, 0x12, 0x75, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdd, 0x44, 0x34, 0x34, 0x34, 0x34, 0x51, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd7, 0x97, 0x7d, 0x7b, 0x7d, - 0x7d, 0x7b, 0x7a, 0x92, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x6e, 0x34, 0x34, 0x34, 0x34, 0x34, 0x45, 0xdd, 0xff, 0xff, 0xff, 0xff, 0x75, 0x12, 0x11, + 0x12, 0x12, 0x75, 0xff, 0xff, 0xff, 0xff, 0xde, 0x45, 0x34, 0x34, 0x34, 0x34, 0x34, 0x6f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf8, 0xe9, 0xe8, 0xf6, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xce, 0x8b, 0x7e, 0x7f, 0x77, - 0x77, 0x7f, 0x7b, 0x87, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xff, 0xf6, 0xe5, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xe2, 0x44, 0x34, 0x34, 0x34, 0x34, 0x34, 0x45, 0xde, 0xff, 0xff, 0xff, 0x7b, 0x12, 0x11, + 0x11, 0x12, 0x7b, 0xff, 0xff, 0xff, 0xdd, 0x45, 0x34, 0x34, 0x34, 0x34, 0x34, 0x44, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xed, 0xce, 0xb4, 0xb2, 0xc8, 0xe9, - 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7a, 0x7f, 0x7b, - 0x7b, 0x7e, 0x78, 0x85, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, - 0xe7, 0xc2, 0xa9, 0xa9, 0xc6, 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xde, 0x45, 0x34, 0x34, 0x33, 0x34, 0x34, 0x44, 0xde, 0xff, 0xff, 0xba, 0x12, 0x12, + 0x11, 0x12, 0xba, 0xff, 0xff, 0xde, 0x45, 0x34, 0x34, 0x34, 0x33, 0x34, 0x45, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe1, 0xab, 0x90, 0x8c, 0x87, 0x89, 0xab, - 0xdf, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7a, 0x7e, 0x7a, - 0x7a, 0x7e, 0x78, 0x85, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xdc, - 0xa2, 0x7c, 0x79, 0x7d, 0x81, 0xa1, 0xdd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xde, 0x44, 0x34, 0x34, 0x34, 0x34, 0x34, 0x59, 0xff, 0xff, 0xff, 0x87, 0x21, + 0x21, 0x88, 0xff, 0xff, 0xff, 0x59, 0x34, 0x34, 0x34, 0x34, 0x34, 0x45, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xc0, 0x81, 0x7a, 0x84, 0x80, 0x7a, 0x7f, - 0xad, 0xee, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7c, 0x80, 0x79, - 0x79, 0x7f, 0x7a, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xec, 0xa6, - 0x72, 0x6b, 0x70, 0x75, 0x6a, 0x71, 0xb9, 0xfd, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xde, 0x45, 0x34, 0x34, 0x34, 0x34, 0x34, 0xf7, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf7, 0x34, 0x34, 0x34, 0x34, 0x34, 0x45, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xea, 0xac, 0x7f, 0x84, 0x88, 0x86, 0x88, 0x7a, - 0x89, 0xc8, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x7f, 0x79, - 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf6, 0xc2, 0x7d, - 0x6d, 0x7b, 0x78, 0x7a, 0x75, 0x6f, 0xa3, 0xe8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0x44, 0x34, 0x34, 0x34, 0x4c, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x4d, 0x34, 0x34, 0x33, 0x44, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe3, 0xa9, 0x81, 0x83, 0x88, 0x84, 0x85, 0x85, - 0x81, 0x98, 0xd7, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x7f, 0x79, - 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xd3, 0x8e, 0x74, - 0x77, 0x77, 0x75, 0x7a, 0x75, 0x70, 0x9e, 0xe1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdd, 0x59, 0x34, 0x4d, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0x4d, 0x34, 0x59, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf3, 0xb3, 0x7c, 0x7f, 0x89, 0x81, 0x81, 0x89, - 0x81, 0x7e, 0xb7, 0xf7, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xce, 0x89, 0x7b, 0x80, 0x7a, - 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfa, 0xff, 0xf8, 0xb2, 0x72, 0x74, - 0x7c, 0x71, 0x72, 0x7b, 0x6e, 0x6a, 0xa7, 0xf0, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xc5, 0x84, 0x82, 0x8a, 0x83, 0x85, 0x87, - 0x81, 0x7f, 0x99, 0xce, 0xf9, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xce, 0x89, 0x7c, 0x81, 0x7b, - 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xf9, 0xca, 0x90, 0x72, 0x73, - 0x79, 0x75, 0x74, 0x7c, 0x71, 0x72, 0xbb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdf, 0xa6, 0x89, 0x84, 0x85, 0x86, 0x82, - 0x85, 0x86, 0x7e, 0xa4, 0xf0, 0xff, 0xfc, 0xff, 0xfc, 0xff, 0xff, 0xcd, 0x88, 0x7c, 0x81, 0x7b, - 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfc, 0xff, 0xfc, 0xff, 0xed, 0x9a, 0x71, 0x78, 0x77, - 0x73, 0x77, 0x76, 0x75, 0x78, 0x98, 0xd9, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, 0xd3, 0x92, 0x7d, 0x89, 0x87, 0x81, - 0x88, 0x88, 0x79, 0x8f, 0xd2, 0xfb, 0xfe, 0xff, 0xfb, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x80, 0x7a, - 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfb, 0xff, 0xfe, 0xfb, 0xce, 0x83, 0x6a, 0x7a, 0x7a, - 0x72, 0x79, 0x79, 0x6a, 0x81, 0xcb, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf1, 0xab, 0x84, 0x88, 0x87, 0x83, - 0x86, 0x86, 0x83, 0x85, 0xa3, 0xdd, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xcd, 0x89, 0x7c, 0x7f, 0x79, - 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf6, 0xff, 0xff, 0xda, 0x9b, 0x7a, 0x75, 0x76, 0x77, - 0x74, 0x78, 0x78, 0x72, 0x9c, 0xed, 0xff, 0xfb, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xd6, 0xa1, 0x82, 0x83, 0x88, - 0x83, 0x83, 0x89, 0x7f, 0x80, 0xbf, 0xff, 0xff, 0xf5, 0xff, 0xff, 0xcd, 0x89, 0x7d, 0x80, 0x79, - 0x79, 0x7f, 0x7a, 0x85, 0xcb, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xb9, 0x74, 0x72, 0x7c, 0x74, 0x74, - 0x79, 0x74, 0x71, 0x92, 0xcf, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xfd, 0xff, - 0xfd, 0xfb, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xfb, 0xbe, 0x84, 0x83, 0x8b, - 0x83, 0x83, 0x89, 0x81, 0x7b, 0xa0, 0xdd, 0xfe, 0xfc, 0xff, 0xff, 0xce, 0x89, 0x7c, 0x7f, 0x7a, - 0x7a, 0x7d, 0x77, 0x84, 0xcc, 0xff, 0xff, 0xfb, 0xfe, 0xda, 0x97, 0x6d, 0x71, 0x7a, 0x74, 0x73, - 0x7d, 0x72, 0x71, 0xb3, 0xfa, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdc, 0xa1, 0x87, 0x85, - 0x85, 0x85, 0x84, 0x86, 0x85, 0x82, 0xad, 0xf1, 0xff, 0xff, 0xff, 0xcf, 0x89, 0x7d, 0x80, 0x79, - 0x78, 0x7e, 0x7a, 0x86, 0xce, 0xff, 0xff, 0xff, 0xf0, 0xa7, 0x78, 0x78, 0x77, 0x74, 0x76, 0x76, - 0x75, 0x75, 0x90, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf2, 0xe7, - 0xf1, 0xff, 0xff, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcf, 0x90, 0x7b, - 0x88, 0x88, 0x82, 0x87, 0x86, 0x78, 0x91, 0xd6, 0xf9, 0xff, 0xff, 0xd2, 0x8f, 0x80, 0x80, 0x7a, - 0x79, 0x7d, 0x7d, 0x8f, 0xd2, 0xff, 0xff, 0xf9, 0xd2, 0x87, 0x6c, 0x7a, 0x79, 0x72, 0x7a, 0x7a, - 0x69, 0x7e, 0xc6, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfc, - 0xf9, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfb, 0xe0, 0xc7, 0xb9, 0xb1, - 0xb6, 0xcb, 0xe5, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xed, 0xa7, 0x84, - 0x8a, 0x86, 0x81, 0x86, 0x86, 0x7d, 0x86, 0xb2, 0xe5, 0xff, 0xff, 0xe0, 0xa5, 0x7d, 0x77, 0x84, - 0x81, 0x73, 0x7a, 0xa5, 0xe0, 0xff, 0xff, 0xe3, 0xab, 0x7b, 0x6f, 0x78, 0x79, 0x72, 0x76, 0x79, - 0x72, 0x99, 0xea, 0xff, 0xfb, 0xfd, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf9, 0xf2, 0xec, - 0xeb, 0xed, 0xf1, 0xf7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xe9, 0xab, 0x86, 0x89, 0x8a, - 0x83, 0x8d, 0xb0, 0xda, 0xf4, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd2, 0x9d, - 0x82, 0x84, 0x87, 0x83, 0x84, 0x85, 0x82, 0x96, 0xd6, 0xff, 0xff, 0xf4, 0xcf, 0x92, 0x74, 0x7d, - 0x7c, 0x72, 0x8f, 0xcc, 0xf3, 0xff, 0xff, 0xd2, 0x8d, 0x77, 0x77, 0x74, 0x74, 0x78, 0x75, 0x71, - 0x8e, 0xca, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xec, 0xe3, 0xe0, - 0xe1, 0xe1, 0xdf, 0xe9, 0xfa, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xc9, 0x96, 0x84, 0x8c, 0x8c, - 0x88, 0x8b, 0x91, 0x98, 0xb1, 0xda, 0xf8, 0xff, 0xfe, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xfb, 0xbb, - 0x80, 0x84, 0x8b, 0x82, 0x82, 0x87, 0x81, 0x8c, 0xd0, 0xff, 0xff, 0xfe, 0xf5, 0xc8, 0x97, 0x80, - 0x80, 0x96, 0xc6, 0xf5, 0xfe, 0xff, 0xff, 0xce, 0x85, 0x76, 0x79, 0x72, 0x71, 0x7c, 0x73, 0x6f, - 0xaf, 0xf9, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf7, 0xec, 0xe6, 0xe5, 0xe3, 0xe1, - 0xe1, 0xe1, 0xe0, 0xe4, 0xf2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xb5, 0x95, 0x8f, 0x92, 0x92, - 0x92, 0x92, 0x8c, 0x84, 0x8c, 0xa7, 0xc4, 0xe0, 0xfb, 0xff, 0xfe, 0xfb, 0xf9, 0xff, 0xff, 0xda, - 0x9e, 0x82, 0x84, 0x8a, 0x8b, 0x86, 0x82, 0x98, 0xd7, 0xff, 0xff, 0xfc, 0xff, 0xf3, 0xd9, 0xc5, - 0xc5, 0xd9, 0xf2, 0xff, 0xfc, 0xff, 0xff, 0xd4, 0x8f, 0x75, 0x77, 0x7d, 0x7c, 0x73, 0x70, 0x8e, - 0xd5, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf1, 0xea, 0xe3, 0xe0, 0xe2, 0xe4, 0xe4, - 0xe3, 0xe3, 0xe3, 0xe4, 0xec, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xb5, 0x91, 0x8b, 0x90, 0x8e, - 0x8b, 0x8c, 0x8e, 0x91, 0x8f, 0x89, 0x8b, 0xa8, 0xd3, 0xf1, 0xfb, 0xfe, 0xff, 0xff, 0xfe, 0xf8, - 0xd0, 0x98, 0x7e, 0x84, 0x83, 0x7d, 0x8e, 0xbb, 0xec, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xea, 0xb4, 0x82, 0x6d, 0x73, 0x73, 0x6c, 0x88, 0xc8, - 0xf6, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf5, 0xea, 0xe2, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, - 0xe2, 0xe2, 0xe1, 0xe3, 0xec, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xc7, 0x93, 0x88, 0x93, 0x8f, - 0x8b, 0x8e, 0x8f, 0x91, 0x8f, 0x8b, 0x8a, 0x8d, 0x95, 0xac, 0xd5, 0xf9, 0xff, 0xfd, 0xfc, 0xff, - 0xf5, 0xcb, 0x9d, 0x85, 0x81, 0x8b, 0xb2, 0xe9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xe8, 0xa9, 0x7b, 0x6f, 0x72, 0x8d, 0xc3, 0xf3, - 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xf5, 0xeb, 0xe5, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, - 0xe2, 0xe3, 0xe0, 0xe3, 0xf0, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe5, 0xad, 0x88, 0x88, 0x90, - 0x94, 0x91, 0x8d, 0x8c, 0x8c, 0x90, 0x93, 0x8c, 0x82, 0x8b, 0xa4, 0xbf, 0xdf, 0xf7, 0xff, 0xff, - 0xfe, 0xf2, 0xda, 0xc6, 0xc3, 0xce, 0xe4, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf8, - 0xf8, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, 0xc7, 0xba, 0xbc, 0xd4, 0xf1, 0xfe, - 0xff, 0xff, 0xfe, 0xf8, 0xf0, 0xe9, 0xe2, 0xe0, 0xe2, 0xe4, 0xe4, 0xe1, 0xe1, 0xe1, 0xe3, 0xe3, - 0xe3, 0xe1, 0xe0, 0xe9, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xda, 0xaf, 0x95, 0x8e, - 0x8d, 0x8d, 0x90, 0x91, 0x90, 0x8d, 0x8d, 0x8d, 0x8f, 0x90, 0x89, 0x87, 0xa3, 0xd3, 0xf6, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xff, 0xfd, 0xf6, 0xeb, 0xe4, 0xe3, 0xe4, 0xe4, 0xe2, 0xe1, 0xe1, 0xe2, 0xe3, 0xe3, 0xe1, 0xe1, - 0xe2, 0xe3, 0xe9, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xef, 0xce, 0xa3, - 0x8c, 0x8d, 0x91, 0x92, 0x8f, 0x8d, 0x8d, 0x8e, 0x90, 0x8e, 0x8d, 0x8a, 0x88, 0x9e, 0xcc, 0xf3, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfd, 0xf4, 0xe7, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, - 0xe6, 0xf1, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf8, 0xdb, - 0xbf, 0xa5, 0x8c, 0x85, 0x8f, 0x94, 0x92, 0x8d, 0x8b, 0x8b, 0x8f, 0x93, 0x8b, 0x85, 0x9f, 0xdc, - 0xff, 0xff, 0xf8, 0xfa, 0xf9, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf9, 0xf8, 0xf8, 0xfa, 0xff, 0xff, - 0xf7, 0xea, 0xe2, 0xe1, 0xe2, 0xe3, 0xe2, 0xe1, 0xe2, 0xe3, 0xe4, 0xe2, 0xdf, 0xe0, 0xe7, 0xee, - 0xf5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xf6, 0xd5, 0xad, 0x97, 0x92, 0x8d, 0x8c, 0x90, 0x91, 0x91, 0x8c, 0x8b, 0x95, 0x8b, 0x87, 0xc8, - 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xf3, 0xe4, 0xe3, 0xe4, 0xe1, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, 0xe3, 0xe3, 0xe8, 0xf3, 0xfc, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, - 0xfe, 0xfb, 0xf0, 0xd4, 0xac, 0x8f, 0x8a, 0x91, 0x92, 0x91, 0x8d, 0x8c, 0x96, 0x8a, 0x82, 0xc4, - 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xf1, 0xe1, 0xe2, 0xe4, 0xe1, 0xe2, 0xe2, 0xe3, 0xe3, 0xdf, 0xdf, 0xe8, 0xf4, 0xfb, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfa, - 0xfb, 0xff, 0xff, 0xf5, 0xe0, 0xc5, 0xa6, 0x90, 0x87, 0x89, 0x8e, 0x90, 0x8f, 0x86, 0x92, 0xd1, - 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfd, 0xff, 0xff, - 0xf3, 0xe3, 0xe0, 0xe3, 0xe3, 0xe2, 0xe1, 0xe0, 0xe1, 0xe6, 0xed, 0xf6, 0xfd, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfd, 0xdd, 0xb4, 0x9e, 0x95, 0x91, 0x90, 0x8e, 0x9a, 0xbd, 0xea, - 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfa, 0xee, 0xe4, 0xe2, 0xe2, 0xe2, 0xe3, 0xe5, 0xea, 0xf5, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xdd, 0xc1, 0xaa, 0xa1, 0xa8, 0xc8, 0xf0, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfb, 0xf0, 0xe7, 0xe5, 0xe7, 0xee, 0xf6, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, - 0xfb, 0xfc, 0xfd, 0xfc, 0xf8, 0xf7, 0xfc, 0xff, 0xfb, 0xed, 0xde, 0xd8, 0xe0, 0xf0, 0xfd, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfe, 0xfa, 0xf6, 0xf4, 0xf5, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xaf, 0xab, + 0xac, 0xab, 0xac, 0xac, 0xac, 0xaf, 0xd6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xe4, 0xe2, 0xe3, 0xe3, 0xe3, 0xe2, + 0xe3, 0xe4, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x63, 0x64, 0x63, + 0x63, 0x64, 0x63, 0x63, 0x64, 0x64, 0x64, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xc9, 0xc8, 0xc9, 0xc8, 0xc9, 0xc8, 0xc8, + 0xc9, 0xc9, 0xc9, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x63, 0x63, 0x63, + 0x63, 0x64, 0x64, 0x63, 0x63, 0x64, 0x64, 0x71, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xc8, 0xc8, 0xc9, 0xc8, 0xc9, 0xc9, 0xc9, + 0xc8, 0xc8, 0xc9, 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x71, 0x63, 0x63, 0x63, + 0x63, 0x64, 0x63, 0x63, 0x63, 0x63, 0x64, 0x70, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcd, 0xc8, 0xc9, 0xc8, 0xc8, 0xc9, 0xc9, 0xc8, + 0xc9, 0xc9, 0xc8, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x63, 0x63, 0x63, + 0x63, 0x63, 0x64, 0x63, 0x63, 0x64, 0x64, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc8, 0xc9, 0xc8, 0xc8, 0xc9, 0xc8, 0xc9, + 0xc8, 0xc8, 0xc8, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xb0, 0xac, + 0xac, 0xac, 0xab, 0xac, 0xac, 0xaf, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xe4, 0xe3, 0xe3, 0xe2, 0xe2, 0xe3, + 0xe2, 0xe4, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xef, 0xe2, 0xdb, 0xda, 0xda, 0xda, 0xda, 0xda, - 0xda, 0xda, 0xda, 0xda, 0xd9, 0xd9, 0xda, 0xdb, 0xdf, 0xe9, 0xf5, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf8, 0xf4, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, - 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf6, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xc2, 0xaf, 0xa8, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, - 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa2, 0xa1, 0xa3, 0xaa, 0xb9, 0xd7, 0xf8, 0xff, 0xfe, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf3, 0xe9, 0xe4, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, - 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe5, 0xeb, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe4, 0xb8, 0x9a, 0x98, 0x9a, 0x96, 0x96, 0x97, 0x97, 0x97, - 0x97, 0x97, 0x97, 0x97, 0x97, 0x97, 0x96, 0x98, 0x9a, 0x96, 0xa8, 0xd5, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xed, 0xa3, 0x8a, 0x9a, 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xc2, 0xb9, 0xc7, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfb, 0xf0, 0xe3, 0xde, 0xdf, 0xde, 0xdd, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, - 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xe7, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xc3, 0x99, 0x92, 0x97, 0x9a, 0x9a, 0x9b, 0x9b, 0x9a, 0x9a, - 0x9a, 0x9b, 0x9b, 0x9a, 0x9a, 0x9b, 0x9a, 0x9a, 0x98, 0x92, 0x92, 0xad, 0xe2, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xed, 0x95, 0x8a, 0x89, 0x8a, 0x9a, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0xb9, 0xb9, 0xb9, 0xbf, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf5, 0xe4, 0xdb, 0xdb, 0xde, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdc, 0xdc, - 0xdc, 0xdd, 0xdd, 0xdd, 0xdd, 0xdc, 0xda, 0xd9, 0xdc, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf2, 0xac, 0x92, 0xa0, 0x9c, 0x96, 0x99, 0x98, 0x98, 0x99, 0x99, - 0x99, 0x98, 0x98, 0x99, 0x99, 0x98, 0x98, 0x96, 0x96, 0x9e, 0x99, 0x9a, 0xd2, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfd, 0xff, 0xff, 0xf0, 0xde, 0xdc, 0xde, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, - 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdc, 0xdd, 0xda, 0xe4, 0xfb, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf2, 0xae, 0x94, 0xa1, 0x9d, 0x97, 0x9a, 0x99, 0x9a, 0x9b, 0x9b, - 0x9a, 0x99, 0x99, 0x9a, 0x9b, 0x99, 0x99, 0x98, 0x99, 0xa0, 0x9a, 0x9c, 0xd3, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xec, 0x95, 0x8a, 0x8a, 0x89, 0x89, 0x89, 0xfb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xb8, 0xb9, 0xb9, 0xb9, 0xb9, 0xbe, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfd, 0xff, 0xff, 0xef, 0xdc, 0xdb, 0xdd, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, - 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xda, 0xdc, 0xdd, 0xd9, 0xe3, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xc3, 0x9c, 0x97, 0x9b, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, - 0x9e, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9c, 0x9e, 0x9d, 0x96, 0x95, 0xb0, 0xe3, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf4, 0xe1, 0xd8, 0xd9, 0xda, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xda, 0xd9, - 0xda, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xd9, 0xdb, 0xe9, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe3, 0xb9, 0x9e, 0x9b, 0x9d, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, - 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9a, 0x9c, 0x9e, 0x9a, 0xaa, 0xd5, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xed, 0x95, 0x8a, 0x89, 0x8a, 0x89, 0x8a, 0xa2, 0xff, 0xff, 0xff, 0xd7, 0xae, + 0xae, 0xd7, 0xff, 0xff, 0xff, 0xc7, 0xb9, 0xb9, 0xb9, 0xb9, 0xb8, 0xbf, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfb, 0xee, 0xe1, 0xda, 0xd9, 0xd9, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xd8, - 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdb, 0xdb, 0xe5, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xe5, 0xc3, 0xb0, 0xa9, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, - 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xae, 0xbc, 0xd8, 0xf7, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xed, 0x95, 0x8a, 0x89, 0x8a, 0x89, 0x89, 0x94, 0xed, 0xff, 0xff, 0xe9, 0xa7, 0xa7, + 0xa7, 0xa7, 0xe9, 0xff, 0xff, 0xf5, 0xbf, 0xb9, 0xb9, 0xb9, 0xb9, 0xb9, 0xbf, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf1, 0xe5, 0xdf, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, - 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xe0, 0xe8, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xee, 0xe1, 0xda, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, - 0xd9, 0xd9, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xdb, 0xde, 0xe8, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xf0, 0x94, 0x89, 0x8a, 0x8a, 0x89, 0x8a, 0x95, 0xed, 0xff, 0xff, 0xff, 0xd4, 0xa7, 0xa7, + 0xa7, 0xa7, 0xd4, 0xff, 0xff, 0xff, 0xf5, 0xbe, 0xb9, 0xb9, 0xb9, 0xb8, 0xb9, 0xbf, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xf6, 0xf2, 0xf2, 0xf2, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, - 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xae, 0x8a, 0x89, 0x89, 0x8a, 0x89, 0x95, 0xed, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xa7, 0xa7, + 0xa7, 0xa7, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xbf, 0xb8, 0xb8, 0xb9, 0xb9, 0xb9, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x9c, 0x89, 0x8a, 0x89, 0x8a, 0x95, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xa7, 0xa7, + 0xa7, 0xa7, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xbf, 0xb9, 0xb9, 0xb9, 0xb9, 0xc4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xbc, 0x89, 0x8a, 0x8a, 0x94, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xa7, 0xa7, + 0xa7, 0xa7, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xbf, 0xb8, 0xb9, 0xb8, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfa, 0xbc, 0x9c, 0xae, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xa7, 0xa7, + 0xa7, 0xa7, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xce, 0xc3, 0xd7, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, - 0xfb, 0xfc, 0xfd, 0xfb, 0xf8, 0xf8, 0xfc, 0xff, 0xfb, 0xf0, 0xe3, 0xe0, 0xe7, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xa7, 0xa7, + 0xa7, 0xa7, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfe, 0xf9, 0xf3, 0xf2, 0xf4, 0xf9, 0xfe, 0xff, 0xfd, 0xfc, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, - 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe3, 0xca, 0xb7, 0xb0, 0xb7, 0xd3, 0xf3, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xa7, 0xa7, + 0xa7, 0xa7, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xf8, 0xea, 0xde, 0xdc, 0xe0, 0xe8, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xa7, 0xa7, + 0xa7, 0xa7, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xfe, 0xe4, 0xc0, 0xad, 0xa7, 0xa6, 0xa4, 0xa3, 0xad, 0xca, 0xee, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xf6, 0xe4, 0xd8, 0xd4, 0xd4, 0xd5, 0xd6, 0xd9, 0xe2, 0xf2, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfb, - 0xfc, 0xff, 0xff, 0xf7, 0xe6, 0xd0, 0xb7, 0xa3, 0x9b, 0x9e, 0xa4, 0xa6, 0xa5, 0x9e, 0xa9, 0xdb, - 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xec, 0xd2, 0xcf, 0xd3, 0xd4, 0xd3, 0xd1, 0xd0, 0xd4, 0xdd, 0xe9, 0xf4, 0xfc, 0xff, 0xff, 0xfe, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xfc, 0xf4, 0xdd, 0xba, 0xa2, 0x9f, 0xa6, 0xa6, 0xa5, 0xa3, 0xa2, 0xab, 0xa4, 0xa0, 0xd2, - 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, - 0xe7, 0xcc, 0xcf, 0xd5, 0xd1, 0xd3, 0xd5, 0xd5, 0xd5, 0xd1, 0xd3, 0xe0, 0xef, 0xf9, 0xfd, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xf8, 0xdc, 0xba, 0xaa, 0xa6, 0xa2, 0xa1, 0xa5, 0xa7, 0xa7, 0xa4, 0xa3, 0xab, 0xa4, 0xa1, 0xd4, - 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, - 0xe8, 0xce, 0xd0, 0xd4, 0xd0, 0xd2, 0xd4, 0xd4, 0xd4, 0xd2, 0xd2, 0xd5, 0xd8, 0xe0, 0xf0, 0xfc, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xe4, - 0xcd, 0xb6, 0x9f, 0x9b, 0xa5, 0xa9, 0xa7, 0xa3, 0xa2, 0xa3, 0xa7, 0xaa, 0xa6, 0xa1, 0xb4, 0xe3, - 0xff, 0xff, 0xfa, 0xfc, 0xfb, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfd, 0xfd, 0xff, 0xff, - 0xf0, 0xd8, 0xce, 0xd0, 0xd3, 0xd2, 0xd0, 0xd0, 0xd3, 0xd6, 0xd6, 0xd4, 0xd1, 0xd4, 0xdd, 0xe7, - 0xf2, 0xfd, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf2, 0xd6, 0xb4, - 0xa1, 0xa2, 0xa6, 0xa6, 0xa5, 0xa4, 0xa4, 0xa6, 0xa8, 0xa7, 0xa6, 0xa7, 0xa5, 0xb3, 0xd6, 0xf6, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfa, 0xea, 0xd7, 0xcf, 0xd1, 0xd1, 0xd1, 0xd2, 0xd3, 0xd3, 0xd3, 0xd4, 0xd5, 0xd5, 0xd3, 0xd2, - 0xdc, 0xed, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, 0xbe, 0xaa, 0xa3, - 0xa2, 0xa2, 0xa5, 0xa7, 0xa6, 0xa4, 0xa4, 0xa6, 0xa9, 0xaa, 0xa5, 0xa4, 0xb8, 0xdb, 0xf7, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfb, 0xec, 0xd9, 0xcf, 0xd0, 0xd2, 0xd2, 0xd1, 0xd1, 0xd1, 0xd4, 0xd5, 0xd4, 0xd3, 0xd3, - 0xd3, 0xd6, 0xe0, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xeb, 0xbd, 0x9f, 0x9f, 0xa6, - 0xa8, 0xa6, 0xa3, 0xa3, 0xa3, 0xa7, 0xaa, 0xa6, 0xa0, 0xa6, 0xb8, 0xcb, 0xe3, 0xf7, 0xff, 0xff, - 0xfe, 0xf8, 0xe9, 0xdc, 0xdb, 0xe2, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, - 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xec, 0xe7, 0xe7, 0xf1, 0xfb, 0xff, - 0xff, 0xff, 0xfb, 0xf1, 0xe4, 0xd9, 0xd0, 0xcd, 0xd0, 0xd2, 0xd2, 0xd1, 0xd1, 0xd1, 0xd4, 0xd6, - 0xd4, 0xd1, 0xd1, 0xe0, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd2, 0xa9, 0x9f, 0xa8, 0xa5, - 0xa2, 0xa5, 0xa6, 0xa9, 0xa7, 0xa4, 0xa5, 0xa9, 0xae, 0xbd, 0xdd, 0xfa, 0xff, 0xfd, 0xfd, 0xff, - 0xf9, 0xde, 0xbe, 0xaf, 0xad, 0xb5, 0xd0, 0xf3, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf7, 0xdf, 0xcc, 0xc8, 0xcb, 0xd6, 0xe9, 0xfb, - 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xec, 0xdb, 0xd4, 0xd1, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd2, 0xd1, - 0xd3, 0xd5, 0xd2, 0xd6, 0xe9, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xc3, 0xa7, 0xa2, 0xa6, 0xa5, - 0xa3, 0xa3, 0xa6, 0xaa, 0xa9, 0xa5, 0xa6, 0xbb, 0xdd, 0xf3, 0xfa, 0xfe, 0xff, 0xff, 0xfe, 0xf9, - 0xdf, 0xba, 0xa9, 0xad, 0xae, 0xac, 0xb8, 0xd6, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf6, 0xe1, 0xce, 0xc5, 0xc7, 0xca, 0xc8, 0xd3, 0xeb, - 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfd, 0xf8, 0xec, 0xdb, 0xd0, 0xcf, 0xd2, 0xd2, 0xd1, 0xd0, 0xd1, - 0xd3, 0xd4, 0xd2, 0xd5, 0xe2, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc2, 0xa9, 0xa5, 0xa8, 0xa8, - 0xa8, 0xa9, 0xa6, 0xa1, 0xa7, 0xba, 0xce, 0xe5, 0xfb, 0xff, 0xfe, 0xfc, 0xfc, 0xff, 0xff, 0xe8, - 0xbf, 0xaa, 0xac, 0xb4, 0xb5, 0xb1, 0xb1, 0xc1, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xfa, 0xee, 0xe4, - 0xe4, 0xef, 0xfb, 0xff, 0xfd, 0xff, 0xff, 0xed, 0xd0, 0xc7, 0xc8, 0xcb, 0xcb, 0xc9, 0xc9, 0xd6, - 0xf0, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, 0xfd, 0xf1, 0xe5, 0xda, 0xd0, 0xce, 0xd1, 0xd3, 0xd3, - 0xd3, 0xd3, 0xd1, 0xd3, 0xe1, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd3, 0xab, 0x9f, 0xa6, 0xa5, - 0xa1, 0xa4, 0xaa, 0xb0, 0xc2, 0xe0, 0xf8, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xfc, 0xd2, - 0xab, 0xac, 0xb3, 0xaf, 0xaf, 0xb3, 0xb3, 0xbc, 0xe4, 0xff, 0xff, 0xfe, 0xfa, 0xe3, 0xcb, 0xc0, - 0xc1, 0xce, 0xe6, 0xfa, 0xfe, 0xff, 0xff, 0xea, 0xcb, 0xc6, 0xc9, 0xc6, 0xc6, 0xcc, 0xcb, 0xca, - 0xe1, 0xfd, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xfc, 0xf0, 0xdf, 0xd5, 0xd3, 0xd1, 0xcf, - 0xd1, 0xd1, 0xce, 0xd4, 0xe9, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf0, 0xc2, 0xa2, 0xa1, 0xa4, - 0xa1, 0xa6, 0xbe, 0xe0, 0xf6, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfc, 0xe1, 0xbd, - 0xab, 0xae, 0xb2, 0xaf, 0xb0, 0xb4, 0xb6, 0xc1, 0xe7, 0xff, 0xff, 0xfa, 0xe7, 0xc6, 0xb7, 0xbd, - 0xbd, 0xba, 0xca, 0xe9, 0xfb, 0xff, 0xff, 0xec, 0xce, 0xc5, 0xc7, 0xc8, 0xc7, 0xc9, 0xca, 0xca, - 0xd5, 0xeb, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfb, 0xef, 0xdd, 0xd1, 0xce, - 0xcf, 0xce, 0xcf, 0xe0, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe9, 0xd2, 0xc4, 0xc1, - 0xc5, 0xd3, 0xe7, 0xf8, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf4, 0xc6, 0xad, - 0xb0, 0xb1, 0xb0, 0xb2, 0xb2, 0xaf, 0xb8, 0xd1, 0xef, 0xff, 0xff, 0xef, 0xd0, 0xba, 0xb7, 0xc0, - 0xc0, 0xb9, 0xc0, 0xd7, 0xf2, 0xff, 0xff, 0xf3, 0xda, 0xc7, 0xc4, 0xc9, 0xc9, 0xc6, 0xc9, 0xcb, - 0xca, 0xdb, 0xf9, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, 0xff, 0xff, 0xfb, 0xf3, 0xe9, 0xe1, - 0xde, 0xe0, 0xe8, 0xf3, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf4, 0xea, - 0xf4, 0xff, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe0, 0xb5, 0xa7, - 0xb0, 0xb2, 0xaf, 0xb2, 0xb3, 0xad, 0xbe, 0xe7, 0xfb, 0xff, 0xff, 0xe6, 0xc2, 0xbb, 0xbd, 0xbb, - 0xbc, 0xc0, 0xc1, 0xcb, 0xea, 0xff, 0xff, 0xfc, 0xeb, 0xcc, 0xc1, 0xc9, 0xc9, 0xc6, 0xc9, 0xca, - 0xc6, 0xd0, 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf9, - 0xf4, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xbf, 0xae, 0xae, - 0xb0, 0xb1, 0xb0, 0xb2, 0xb4, 0xb5, 0xcf, 0xf7, 0xff, 0xff, 0xff, 0xe4, 0xbe, 0xba, 0xbe, 0xbb, - 0xbe, 0xc2, 0xbf, 0xc4, 0xe7, 0xff, 0xff, 0xff, 0xf9, 0xd8, 0xc5, 0xc7, 0xc8, 0xc7, 0xc8, 0xc9, - 0xc9, 0xca, 0xd6, 0xf0, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, - 0xfe, 0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xfd, 0xd4, 0xad, 0xac, 0xb4, - 0xb1, 0xb0, 0xb3, 0xb1, 0xb1, 0xc6, 0xe9, 0xfe, 0xfe, 0xff, 0xff, 0xe5, 0xbf, 0xb9, 0xbc, 0xbc, - 0xbd, 0xc0, 0xbd, 0xc4, 0xe7, 0xff, 0xff, 0xfe, 0xfe, 0xed, 0xd1, 0xc2, 0xc5, 0xc9, 0xc7, 0xc8, - 0xcb, 0xc8, 0xca, 0xe3, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfe, - 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe5, 0xc0, 0xab, 0xae, 0xb3, - 0xb0, 0xb0, 0xb6, 0xb2, 0xb3, 0xd8, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe1, 0xc4, 0xc5, 0xca, 0xc7, 0xc7, - 0xc9, 0xc9, 0xc9, 0xd6, 0xed, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf5, 0xc8, 0xae, 0xaf, 0xb1, 0xb0, - 0xb1, 0xb2, 0xb3, 0xb5, 0xc6, 0xe9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbc, - 0xbe, 0xc1, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xee, 0xd3, 0xc7, 0xc7, 0xc8, 0xc8, - 0xc7, 0xc9, 0xca, 0xc9, 0xda, 0xf8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xe4, 0xb7, 0xa8, 0xb1, 0xb2, 0xae, - 0xb3, 0xb4, 0xae, 0xba, 0xe1, 0xfb, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbc, - 0xbd, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xe7, 0xc9, 0xc1, 0xc9, 0xc9, - 0xc6, 0xc9, 0xc9, 0xc5, 0xd0, 0xed, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe9, 0xa7, 0xa7, + 0xa7, 0xa7, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xec, 0xc8, 0xb1, 0xae, 0xb2, 0xb2, 0xaf, - 0xb2, 0xb5, 0xb2, 0xc8, 0xf5, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xf6, 0xd3, 0xc3, 0xc9, 0xc9, - 0xc7, 0xc8, 0xc8, 0xc9, 0xcc, 0xda, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xae, + 0xae, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xd9, 0xaf, 0xad, 0xb4, 0xb0, 0xb0, 0xb2, - 0xb0, 0xb2, 0xc2, 0xe1, 0xfc, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, - 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfc, 0xe8, 0xd0, 0xc6, 0xc7, - 0xc9, 0xc7, 0xc7, 0xcb, 0xc8, 0xc9, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf9, 0xcd, 0xa8, 0xac, 0xb4, 0xae, 0xae, 0xb4, - 0xb0, 0xb1, 0xd4, 0xfa, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xfb, 0xde, 0xc4, 0xc6, - 0xca, 0xc6, 0xc7, 0xca, 0xc5, 0xc5, 0xde, 0xfa, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xc6, 0xab, 0xaf, 0xb3, 0xb0, 0xb1, 0xb2, - 0xb1, 0xc1, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xce, 0xc5, - 0xc9, 0xc9, 0xc8, 0xca, 0xc8, 0xc6, 0xd9, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xc9, 0xac, 0xb0, 0xb3, 0xb2, 0xb4, 0xae, - 0xb8, 0xdd, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, - 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe6, 0xca, - 0xc4, 0xca, 0xc9, 0xca, 0xc7, 0xc5, 0xdb, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xd5, 0xae, 0xab, 0xb1, 0xae, 0xab, 0xaf, - 0xcc, 0xf4, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, - 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xd8, - 0xc4, 0xc3, 0xc5, 0xc7, 0xc3, 0xc6, 0xe3, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xc8, 0xb7, 0xb5, 0xb3, 0xb5, 0xca, - 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xef, - 0xd7, 0xc9, 0xc9, 0xcb, 0xcc, 0xd8, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xde, 0xce, 0xcd, 0xdc, 0xf1, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xf4, 0xe5, 0xdd, 0xdd, 0xe8, 0xf6, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xef, 0xef, 0xfa, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc2, 0xbc, 0xbe, 0xba, - 0xbb, 0xc0, 0xbf, 0xc6, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfb, 0xf4, 0xf4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc8, 0xbb, 0xbc, 0xbe, - 0xbe, 0xbe, 0xbf, 0xcb, 0xeb, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf5, 0xd8, 0xbd, 0xb8, 0xc1, - 0xc1, 0xba, 0xc1, 0xd9, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xee, 0xcf, 0xbb, 0xb9, - 0xb9, 0xbc, 0xd1, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xeb, 0xd6, 0xca, - 0xca, 0xd7, 0xec, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xf6, - 0xf6, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, @@ -1730,14 +4236,10 @@ const unsigned char gGearPict3x[9 * kGearFrames * kGearWidth * kGearHeight] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, @@ -1748,554 +4250,209 @@ const unsigned char gGearPict3x[9 * kGearFrames * kGearWidth * kGearHeight] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, - 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf5, 0xf0, - 0xf0, 0xf5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd9, 0xb1, 0x99, - 0x99, 0xb1, 0xd9, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe2, 0xa5, 0x7b, 0x74, - 0x74, 0x7b, 0xa4, 0xde, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xb8, 0x82, 0x73, 0x82, - 0x81, 0x74, 0x7f, 0xaf, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd7, 0x97, 0x7d, 0x7b, 0x7d, - 0x7d, 0x7b, 0x7a, 0x92, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf8, 0xe9, 0xe8, 0xf6, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xce, 0x8b, 0x7e, 0x7f, 0x77, - 0x77, 0x7f, 0x7b, 0x87, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xff, 0xf6, 0xe5, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xed, 0xce, 0xb4, 0xb2, 0xc8, 0xe9, - 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7a, 0x7f, 0x7b, - 0x7b, 0x7e, 0x78, 0x85, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, - 0xe7, 0xc2, 0xa9, 0xa9, 0xc6, 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe1, 0xab, 0x90, 0x8c, 0x87, 0x89, 0xab, - 0xdf, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7a, 0x7e, 0x7a, - 0x7a, 0x7e, 0x78, 0x85, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xdc, - 0xa2, 0x7c, 0x79, 0x7d, 0x81, 0xa1, 0xdd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xc0, 0x81, 0x7a, 0x84, 0x80, 0x7a, 0x7f, - 0xad, 0xee, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7c, 0x80, 0x79, - 0x79, 0x7f, 0x7a, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xec, 0xa6, - 0x72, 0x6b, 0x70, 0x75, 0x6a, 0x71, 0xb9, 0xfd, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xea, 0xac, 0x7f, 0x84, 0x88, 0x86, 0x88, 0x7a, - 0x89, 0xc8, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x7f, 0x79, - 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf6, 0xc2, 0x7d, - 0x6d, 0x7b, 0x78, 0x7a, 0x75, 0x6f, 0xa3, 0xe8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe3, 0xa9, 0x81, 0x83, 0x88, 0x84, 0x85, 0x85, - 0x81, 0x98, 0xd7, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x7f, 0x79, - 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xd3, 0x8e, 0x74, - 0x77, 0x77, 0x75, 0x7a, 0x75, 0x70, 0x9e, 0xe1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x2a, + 0x2a, 0x8e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf3, 0xb3, 0x7c, 0x7f, 0x89, 0x81, 0x81, 0x89, - 0x81, 0x7e, 0xb7, 0xf7, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xce, 0x89, 0x7b, 0x80, 0x7a, - 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfa, 0xff, 0xf8, 0xb2, 0x72, 0x74, - 0x7c, 0x71, 0x72, 0x7b, 0x6e, 0x6a, 0xa7, 0xf0, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0x1b, 0x1a, + 0x1b, 0x1b, 0xbd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xc5, 0x84, 0x82, 0x8a, 0x83, 0x85, 0x87, - 0x81, 0x7f, 0x99, 0xce, 0xf9, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xce, 0x89, 0x7c, 0x81, 0x7b, - 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xf9, 0xca, 0x90, 0x72, 0x73, - 0x79, 0x75, 0x74, 0x7c, 0x71, 0x72, 0xbb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x82, 0x1a, 0x1a, + 0x1a, 0x1a, 0x83, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdf, 0xa6, 0x89, 0x84, 0x85, 0x86, 0x82, - 0x85, 0x86, 0x7e, 0xa4, 0xf0, 0xff, 0xfc, 0xff, 0xfc, 0xff, 0xff, 0xcd, 0x88, 0x7c, 0x81, 0x7b, - 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfc, 0xff, 0xfc, 0xff, 0xed, 0x9a, 0x71, 0x78, 0x77, - 0x73, 0x77, 0x76, 0x75, 0x78, 0x98, 0xd9, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, 0xd3, 0x92, 0x7d, 0x89, 0x87, 0x81, - 0x88, 0x88, 0x79, 0x8f, 0xd2, 0xfb, 0xfe, 0xff, 0xfb, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x80, 0x7a, - 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfb, 0xff, 0xfe, 0xfb, 0xce, 0x83, 0x6a, 0x7a, 0x7a, - 0x72, 0x79, 0x79, 0x6a, 0x81, 0xcb, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf1, 0xab, 0x84, 0x88, 0x87, 0x83, - 0x86, 0x86, 0x83, 0x85, 0xa3, 0xdd, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xcd, 0x89, 0x7c, 0x7f, 0x79, - 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf6, 0xff, 0xff, 0xda, 0x9b, 0x7a, 0x75, 0x76, 0x77, - 0x74, 0x78, 0x78, 0x72, 0x9c, 0xed, 0xff, 0xfb, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xd6, 0xa1, 0x82, 0x83, 0x88, - 0x83, 0x83, 0x89, 0x7f, 0x80, 0xbf, 0xff, 0xff, 0xf5, 0xff, 0xff, 0xcd, 0x89, 0x7d, 0x80, 0x79, - 0x79, 0x7f, 0x7a, 0x85, 0xcb, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xb9, 0x74, 0x72, 0x7c, 0x74, 0x74, - 0x79, 0x74, 0x71, 0x92, 0xcf, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xfd, 0xff, - 0xfd, 0xfb, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xfb, 0xbe, 0x84, 0x83, 0x8b, - 0x83, 0x83, 0x89, 0x81, 0x7b, 0xa0, 0xdd, 0xfe, 0xfc, 0xff, 0xff, 0xce, 0x89, 0x7c, 0x7f, 0x7a, - 0x7a, 0x7d, 0x77, 0x84, 0xcc, 0xff, 0xff, 0xfb, 0xfe, 0xda, 0x97, 0x6d, 0x71, 0x7a, 0x74, 0x73, - 0x7d, 0x72, 0x71, 0xb3, 0xfa, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdc, 0xa1, 0x87, 0x85, - 0x85, 0x85, 0x84, 0x86, 0x85, 0x82, 0xad, 0xf1, 0xff, 0xff, 0xff, 0xcf, 0x89, 0x7d, 0x80, 0x79, - 0x78, 0x7e, 0x7a, 0x86, 0xce, 0xff, 0xff, 0xff, 0xf0, 0xa7, 0x78, 0x78, 0x77, 0x74, 0x76, 0x76, - 0x75, 0x75, 0x90, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf2, 0xe7, - 0xf1, 0xff, 0xff, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcf, 0x90, 0x7b, - 0x88, 0x88, 0x82, 0x87, 0x86, 0x78, 0x91, 0xd6, 0xf9, 0xff, 0xff, 0xd2, 0x8f, 0x80, 0x80, 0x7a, - 0x79, 0x7d, 0x7d, 0x8f, 0xd2, 0xff, 0xff, 0xf9, 0xd2, 0x87, 0x6c, 0x7a, 0x79, 0x72, 0x7a, 0x7a, - 0x69, 0x7e, 0xc6, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfc, - 0xf9, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfb, 0xe0, 0xc7, 0xb9, 0xb1, - 0xb6, 0xcb, 0xe5, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xed, 0xa7, 0x84, - 0x8a, 0x86, 0x81, 0x86, 0x86, 0x7d, 0x86, 0xb2, 0xe5, 0xff, 0xff, 0xe0, 0xa5, 0x7d, 0x77, 0x84, - 0x81, 0x73, 0x7a, 0xa5, 0xe0, 0xff, 0xff, 0xe3, 0xab, 0x7b, 0x6f, 0x78, 0x79, 0x72, 0x76, 0x79, - 0x72, 0x99, 0xea, 0xff, 0xfb, 0xfd, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf9, 0xf2, 0xec, - 0xeb, 0xed, 0xf1, 0xf7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xe9, 0xab, 0x86, 0x89, 0x8a, - 0x83, 0x8d, 0xb0, 0xda, 0xf4, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd2, 0x9d, - 0x82, 0x84, 0x87, 0x83, 0x84, 0x85, 0x82, 0x96, 0xd6, 0xff, 0xff, 0xf4, 0xcf, 0x92, 0x74, 0x7d, - 0x7c, 0x72, 0x8f, 0xcc, 0xf3, 0xff, 0xff, 0xd2, 0x8d, 0x77, 0x77, 0x74, 0x74, 0x78, 0x75, 0x71, - 0x8e, 0xca, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xec, 0xe3, 0xe0, - 0xe1, 0xe1, 0xdf, 0xe9, 0xfa, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xc9, 0x96, 0x84, 0x8c, 0x8c, - 0x88, 0x8b, 0x91, 0x98, 0xb1, 0xda, 0xf8, 0xff, 0xfe, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xfb, 0xbb, - 0x80, 0x84, 0x8b, 0x82, 0x82, 0x87, 0x81, 0x8c, 0xd0, 0xff, 0xff, 0xfe, 0xf5, 0xc8, 0x97, 0x80, - 0x80, 0x96, 0xc6, 0xf5, 0xfe, 0xff, 0xff, 0xce, 0x85, 0x76, 0x79, 0x72, 0x71, 0x7c, 0x73, 0x6f, - 0xaf, 0xf9, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf7, 0xec, 0xe6, 0xe5, 0xe3, 0xe1, - 0xe1, 0xe1, 0xe0, 0xe4, 0xf2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xb5, 0x95, 0x8f, 0x92, 0x92, - 0x92, 0x92, 0x8c, 0x84, 0x8c, 0xa7, 0xc4, 0xe0, 0xfb, 0xff, 0xfe, 0xfb, 0xf9, 0xff, 0xff, 0xda, - 0x9e, 0x82, 0x84, 0x8a, 0x8b, 0x86, 0x82, 0x98, 0xd7, 0xff, 0xff, 0xfc, 0xff, 0xf3, 0xd9, 0xc5, - 0xc5, 0xd9, 0xf2, 0xff, 0xfc, 0xff, 0xff, 0xd4, 0x8f, 0x75, 0x77, 0x7d, 0x7c, 0x73, 0x70, 0x8e, - 0xd5, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf1, 0xea, 0xe3, 0xe0, 0xe2, 0xe4, 0xe4, - 0xe3, 0xe3, 0xe3, 0xe4, 0xec, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xb5, 0x91, 0x8b, 0x90, 0x8e, - 0x8b, 0x8c, 0x8e, 0x91, 0x8f, 0x89, 0x8b, 0xa8, 0xd3, 0xf1, 0xfb, 0xfe, 0xff, 0xff, 0xfe, 0xf8, - 0xd0, 0x98, 0x7e, 0x84, 0x83, 0x7d, 0x8e, 0xbb, 0xec, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xea, 0xb4, 0x82, 0x6d, 0x73, 0x73, 0x6c, 0x88, 0xc8, - 0xf6, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf5, 0xea, 0xe2, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, - 0xe2, 0xe2, 0xe1, 0xe3, 0xec, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xc7, 0x93, 0x88, 0x93, 0x8f, - 0x8b, 0x8e, 0x8f, 0x91, 0x8f, 0x8b, 0x8a, 0x8d, 0x95, 0xac, 0xd5, 0xf9, 0xff, 0xfd, 0xfc, 0xff, - 0xf5, 0xcb, 0x9d, 0x85, 0x81, 0x8b, 0xb2, 0xe9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xe8, 0xa9, 0x7b, 0x6f, 0x72, 0x8d, 0xc3, 0xf3, - 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xf5, 0xeb, 0xe5, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, - 0xe2, 0xe3, 0xe0, 0xe3, 0xf0, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe5, 0xad, 0x88, 0x88, 0x90, - 0x94, 0x91, 0x8d, 0x8c, 0x8c, 0x90, 0x93, 0x8c, 0x82, 0x8b, 0xa4, 0xbf, 0xdf, 0xf7, 0xff, 0xff, - 0xfe, 0xf2, 0xda, 0xc6, 0xc3, 0xce, 0xe4, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf8, - 0xf8, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, 0xc7, 0xba, 0xbc, 0xd4, 0xf1, 0xfe, - 0xff, 0xff, 0xfe, 0xf8, 0xf0, 0xe9, 0xe2, 0xe0, 0xe2, 0xe4, 0xe4, 0xe1, 0xe1, 0xe1, 0xe3, 0xe3, - 0xe3, 0xe1, 0xe0, 0xe9, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xda, 0xaf, 0x95, 0x8e, - 0x8d, 0x8d, 0x90, 0x91, 0x90, 0x8d, 0x8d, 0x8d, 0x8f, 0x90, 0x89, 0x87, 0xa3, 0xd3, 0xf6, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xff, 0xfd, 0xf6, 0xeb, 0xe4, 0xe3, 0xe4, 0xe4, 0xe2, 0xe1, 0xe1, 0xe2, 0xe3, 0xe3, 0xe1, 0xe1, - 0xe2, 0xe3, 0xe9, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xef, 0xce, 0xa3, - 0x8c, 0x8d, 0x91, 0x92, 0x8f, 0x8d, 0x8d, 0x8e, 0x90, 0x8e, 0x8d, 0x8a, 0x88, 0x9e, 0xcc, 0xf3, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfd, 0xf4, 0xe7, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, - 0xe6, 0xf1, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf8, 0xdb, - 0xbf, 0xa5, 0x8c, 0x85, 0x8f, 0x94, 0x92, 0x8d, 0x8b, 0x8b, 0x8f, 0x93, 0x8b, 0x85, 0x9f, 0xdc, - 0xff, 0xff, 0xf8, 0xfa, 0xf9, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf9, 0xf8, 0xf8, 0xfa, 0xff, 0xff, - 0xf7, 0xea, 0xe2, 0xe1, 0xe2, 0xe3, 0xe2, 0xe1, 0xe2, 0xe3, 0xe4, 0xe2, 0xdf, 0xe0, 0xe7, 0xee, - 0xf5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xf6, 0xd5, 0xad, 0x97, 0x92, 0x8d, 0x8c, 0x90, 0x91, 0x91, 0x8c, 0x8b, 0x95, 0x8b, 0x87, 0xc8, - 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xf3, 0xe4, 0xe3, 0xe4, 0xe1, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, 0xe3, 0xe3, 0xe8, 0xf3, 0xfc, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, - 0xfe, 0xfb, 0xf0, 0xd4, 0xac, 0x8f, 0x8a, 0x91, 0x92, 0x91, 0x8d, 0x8c, 0x96, 0x8a, 0x82, 0xc4, - 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xf1, 0xe1, 0xe2, 0xe4, 0xe1, 0xe2, 0xe2, 0xe3, 0xe3, 0xdf, 0xdf, 0xe8, 0xf4, 0xfb, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfa, - 0xfb, 0xff, 0xff, 0xf5, 0xe0, 0xc5, 0xa6, 0x90, 0x87, 0x89, 0x8e, 0x90, 0x8f, 0x86, 0x92, 0xd1, - 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfd, 0xff, 0xff, - 0xf3, 0xe3, 0xe0, 0xe3, 0xe3, 0xe2, 0xe1, 0xe0, 0xe1, 0xe6, 0xed, 0xf6, 0xfd, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfd, 0xdd, 0xb4, 0x9e, 0x95, 0x91, 0x90, 0x8e, 0x9a, 0xbd, 0xea, - 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfa, 0xee, 0xe4, 0xe2, 0xe2, 0xe2, 0xe3, 0xe5, 0xea, 0xf5, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xdd, 0xc1, 0xaa, 0xa1, 0xa8, 0xc8, 0xf0, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfb, 0xf0, 0xe7, 0xe5, 0xe7, 0xee, 0xf6, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, - 0xfb, 0xfc, 0xfd, 0xfc, 0xf8, 0xf7, 0xfc, 0xff, 0xfb, 0xed, 0xde, 0xd8, 0xe0, 0xf0, 0xfd, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfe, 0xfa, 0xf6, 0xf4, 0xf5, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7c, 0x1a, 0x1a, + 0x1a, 0x1b, 0x7c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7d, 0x1a, 0x1a, + 0x1b, 0x1b, 0x7c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xef, 0xe2, 0xdb, 0xda, 0xda, 0xda, 0xda, 0xda, - 0xda, 0xda, 0xda, 0xda, 0xd9, 0xd9, 0xda, 0xdb, 0xdf, 0xe9, 0xf5, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf7, 0x90, 0x60, 0x7c, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7c, 0x1a, 0x1b, + 0x1b, 0x1a, 0x7d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x42, 0x1f, 0x5d, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf8, 0xf4, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, - 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf6, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xc2, 0xaf, 0xa8, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, - 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa2, 0xa1, 0xa3, 0xaa, 0xb9, 0xd7, 0xf8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x90, 0x44, 0x43, 0x44, 0x54, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7c, 0x1a, 0x1b, + 0x1a, 0x1b, 0x7d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x13, 0x00, 0x00, 0x00, 0x5d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf3, 0xe9, 0xe4, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, - 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe5, 0xeb, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe4, 0xb8, 0x9a, 0x98, 0x9a, 0x96, 0x96, 0x97, 0x97, 0x97, - 0x97, 0x97, 0x97, 0x97, 0x97, 0x97, 0x96, 0x98, 0x9a, 0x96, 0xa8, 0xd5, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x60, 0x43, 0x44, 0x44, 0x44, 0x54, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7d, 0x1b, 0x1a, + 0x1a, 0x1b, 0x7d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x13, 0x00, 0x00, 0x00, 0x00, 0x20, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfb, 0xf0, 0xe3, 0xde, 0xdf, 0xde, 0xdd, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, - 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xe7, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xc3, 0x99, 0x92, 0x97, 0x9a, 0x9a, 0x9b, 0x9b, 0x9a, 0x9a, - 0x9a, 0x9b, 0x9b, 0x9a, 0x9a, 0x9b, 0x9a, 0x9a, 0x98, 0x92, 0x92, 0xad, 0xe2, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf5, 0xe4, 0xdb, 0xdb, 0xde, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdc, 0xdc, - 0xdc, 0xdd, 0xdd, 0xdd, 0xdd, 0xdc, 0xda, 0xd9, 0xdc, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf2, 0xac, 0x92, 0xa0, 0x9c, 0x96, 0x99, 0x98, 0x98, 0x99, 0x99, - 0x99, 0x98, 0x98, 0x99, 0x99, 0x98, 0x98, 0x96, 0x96, 0x9e, 0x99, 0x9a, 0xd2, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x7a, 0x44, 0x43, 0x44, 0x44, 0x44, 0x54, 0xe0, 0xff, 0xff, 0xff, 0xff, 0x7c, 0x1b, 0x1a, + 0x1b, 0x1b, 0x7d, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfd, 0xff, 0xff, 0xf0, 0xde, 0xdc, 0xde, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, - 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdc, 0xdd, 0xda, 0xe4, 0xfb, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf2, 0xae, 0x94, 0xa1, 0x9d, 0x97, 0x9a, 0x99, 0x9a, 0x9b, 0x9b, - 0x9a, 0x99, 0x99, 0x9a, 0x9b, 0x99, 0x99, 0x98, 0x99, 0xa0, 0x9a, 0x9c, 0xd3, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfd, 0xff, 0xff, 0xef, 0xdc, 0xdb, 0xdd, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, - 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xda, 0xdc, 0xdd, 0xd9, 0xe3, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xc3, 0x9c, 0x97, 0x9b, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, - 0x9e, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9c, 0x9e, 0x9d, 0x96, 0x95, 0xb0, 0xe3, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xe6, 0x53, 0x44, 0x43, 0x44, 0x44, 0x43, 0x54, 0xe1, 0xff, 0xff, 0xff, 0x83, 0x1a, 0x1a, + 0x1a, 0x1a, 0x83, 0xff, 0xff, 0xff, 0xd2, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf4, 0xe1, 0xd8, 0xd9, 0xda, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xda, 0xd9, - 0xda, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xd9, 0xdb, 0xe9, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe3, 0xb9, 0x9e, 0x9b, 0x9d, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, - 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9a, 0x9c, 0x9e, 0x9a, 0xaa, 0xd5, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xe0, 0x54, 0x44, 0x43, 0x43, 0x43, 0x44, 0x54, 0xe1, 0xff, 0xff, 0xbd, 0x1a, 0x1a, + 0x1b, 0x1a, 0xbe, 0xff, 0xff, 0xd2, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfb, 0xee, 0xe1, 0xda, 0xd9, 0xd9, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xd8, - 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdb, 0xdb, 0xe5, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xe5, 0xc3, 0xb0, 0xa9, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, - 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xae, 0xbc, 0xd8, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xe0, 0x54, 0x44, 0x43, 0x44, 0x44, 0x43, 0x67, 0xff, 0xff, 0xff, 0x8e, 0x2a, + 0x2a, 0x8e, 0xff, 0xff, 0xff, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf1, 0xe5, 0xdf, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, - 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xe0, 0xe8, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xee, 0xe1, 0xda, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, - 0xd9, 0xd9, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xdb, 0xde, 0xe8, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xe0, 0x54, 0x44, 0x43, 0x43, 0x44, 0x44, 0xf8, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xf6, 0xf2, 0xf2, 0xf2, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, - 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x53, 0x43, 0x44, 0x44, 0x5d, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x1b, 0x00, 0x00, 0x00, 0x12, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe1, 0x67, 0x43, 0x5d, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x1c, 0x00, 0x2a, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, - 0xfb, 0xfc, 0xfd, 0xfb, 0xf8, 0xf8, 0xfc, 0xff, 0xfb, 0xf0, 0xe3, 0xe0, 0xe7, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfe, 0xf9, 0xf3, 0xf2, 0xf4, 0xf9, 0xfe, 0xff, 0xfd, 0xfc, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, - 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe3, 0xca, 0xb7, 0xb0, 0xb7, 0xd3, 0xf3, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xba, 0xb7, + 0xb6, 0xb6, 0xb7, 0xb6, 0xb6, 0xba, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xe6, 0xe6, 0xe5, 0xe5, 0xe6, 0xe6, + 0xe5, 0xe6, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe9, 0xcf, 0xcf, 0xcf, 0xce, 0xcf, 0xce, 0xce, + 0xcf, 0xcf, 0xcf, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x82, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x82, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd2, 0xce, 0xce, 0xcf, 0xcf, 0xcf, 0xcf, 0xce, + 0xce, 0xce, 0xce, 0xd2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x81, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x81, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd2, 0xce, 0xcf, 0xce, 0xce, 0xcf, 0xce, 0xce, + 0xcf, 0xcf, 0xce, 0xd2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x76, 0x76, 0x76, + 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0xc0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe9, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xcf, 0xce, + 0xce, 0xcf, 0xcf, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xba, 0xb6, + 0xb7, 0xb6, 0xb6, 0xb7, 0xb7, 0xba, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xe6, 0xe5, 0xe5, 0xe5, 0xe5, 0xe6, + 0xe5, 0xe6, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xf8, 0xea, 0xde, 0xdc, 0xe0, 0xe8, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xfe, 0xe4, 0xc0, 0xad, 0xa7, 0xa6, 0xa4, 0xa3, 0xad, 0xca, 0xee, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xf6, 0xe4, 0xd8, 0xd4, 0xd4, 0xd5, 0xd6, 0xd9, 0xe2, 0xf2, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfb, - 0xfc, 0xff, 0xff, 0xf7, 0xe6, 0xd0, 0xb7, 0xa3, 0x9b, 0x9e, 0xa4, 0xa6, 0xa5, 0x9e, 0xa9, 0xdb, - 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xec, 0xd2, 0xcf, 0xd3, 0xd4, 0xd3, 0xd1, 0xd0, 0xd4, 0xdd, 0xe9, 0xf4, 0xfc, 0xff, 0xff, 0xfe, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xfc, 0xf4, 0xdd, 0xba, 0xa2, 0x9f, 0xa6, 0xa6, 0xa5, 0xa3, 0xa2, 0xab, 0xa4, 0xa0, 0xd2, - 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, - 0xe7, 0xcc, 0xcf, 0xd5, 0xd1, 0xd3, 0xd5, 0xd5, 0xd5, 0xd1, 0xd3, 0xe0, 0xef, 0xf9, 0xfd, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xf8, 0xdc, 0xba, 0xaa, 0xa6, 0xa2, 0xa1, 0xa5, 0xa7, 0xa7, 0xa4, 0xa3, 0xab, 0xa4, 0xa1, 0xd4, - 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, - 0xe8, 0xce, 0xd0, 0xd4, 0xd0, 0xd2, 0xd4, 0xd4, 0xd4, 0xd2, 0xd2, 0xd5, 0xd8, 0xe0, 0xf0, 0xfc, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xe4, - 0xcd, 0xb6, 0x9f, 0x9b, 0xa5, 0xa9, 0xa7, 0xa3, 0xa2, 0xa3, 0xa7, 0xaa, 0xa6, 0xa1, 0xb4, 0xe3, - 0xff, 0xff, 0xfa, 0xfc, 0xfb, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfd, 0xfd, 0xff, 0xff, - 0xf0, 0xd8, 0xce, 0xd0, 0xd3, 0xd2, 0xd0, 0xd0, 0xd3, 0xd6, 0xd6, 0xd4, 0xd1, 0xd4, 0xdd, 0xe7, - 0xf2, 0xfd, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf2, 0xd6, 0xb4, - 0xa1, 0xa2, 0xa6, 0xa6, 0xa5, 0xa4, 0xa4, 0xa6, 0xa8, 0xa7, 0xa6, 0xa7, 0xa5, 0xb3, 0xd6, 0xf6, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfa, 0xea, 0xd7, 0xcf, 0xd1, 0xd1, 0xd1, 0xd2, 0xd3, 0xd3, 0xd3, 0xd4, 0xd5, 0xd5, 0xd3, 0xd2, - 0xdc, 0xed, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, 0xbe, 0xaa, 0xa3, - 0xa2, 0xa2, 0xa5, 0xa7, 0xa6, 0xa4, 0xa4, 0xa6, 0xa9, 0xaa, 0xa5, 0xa4, 0xb8, 0xdb, 0xf7, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfb, 0xec, 0xd9, 0xcf, 0xd0, 0xd2, 0xd2, 0xd1, 0xd1, 0xd1, 0xd4, 0xd5, 0xd4, 0xd3, 0xd3, - 0xd3, 0xd6, 0xe0, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xeb, 0xbd, 0x9f, 0x9f, 0xa6, - 0xa8, 0xa6, 0xa3, 0xa3, 0xa3, 0xa7, 0xaa, 0xa6, 0xa0, 0xa6, 0xb8, 0xcb, 0xe3, 0xf7, 0xff, 0xff, - 0xfe, 0xf8, 0xe9, 0xdc, 0xdb, 0xe2, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, - 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xec, 0xe7, 0xe7, 0xf1, 0xfb, 0xff, - 0xff, 0xff, 0xfb, 0xf1, 0xe4, 0xd9, 0xd0, 0xcd, 0xd0, 0xd2, 0xd2, 0xd1, 0xd1, 0xd1, 0xd4, 0xd6, - 0xd4, 0xd1, 0xd1, 0xe0, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd2, 0xa9, 0x9f, 0xa8, 0xa5, - 0xa2, 0xa5, 0xa6, 0xa9, 0xa7, 0xa4, 0xa5, 0xa9, 0xae, 0xbd, 0xdd, 0xfa, 0xff, 0xfd, 0xfd, 0xff, - 0xf9, 0xde, 0xbe, 0xaf, 0xad, 0xb5, 0xd0, 0xf3, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf7, 0xdf, 0xcc, 0xc8, 0xcb, 0xd6, 0xe9, 0xfb, - 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xec, 0xdb, 0xd4, 0xd1, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd2, 0xd1, - 0xd3, 0xd5, 0xd2, 0xd6, 0xe9, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xc3, 0xa7, 0xa2, 0xa6, 0xa5, - 0xa3, 0xa3, 0xa6, 0xaa, 0xa9, 0xa5, 0xa6, 0xbb, 0xdd, 0xf3, 0xfa, 0xfe, 0xff, 0xff, 0xfe, 0xf9, - 0xdf, 0xba, 0xa9, 0xad, 0xae, 0xac, 0xb8, 0xd6, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf6, 0xe1, 0xce, 0xc5, 0xc7, 0xca, 0xc8, 0xd3, 0xeb, - 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfd, 0xf8, 0xec, 0xdb, 0xd0, 0xcf, 0xd2, 0xd2, 0xd1, 0xd0, 0xd1, - 0xd3, 0xd4, 0xd2, 0xd5, 0xe2, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc2, 0xa9, 0xa5, 0xa8, 0xa8, - 0xa8, 0xa9, 0xa6, 0xa1, 0xa7, 0xba, 0xce, 0xe5, 0xfb, 0xff, 0xfe, 0xfc, 0xfc, 0xff, 0xff, 0xe8, - 0xbf, 0xaa, 0xac, 0xb4, 0xb5, 0xb1, 0xb1, 0xc1, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xfa, 0xee, 0xe4, - 0xe4, 0xef, 0xfb, 0xff, 0xfd, 0xff, 0xff, 0xed, 0xd0, 0xc7, 0xc8, 0xcb, 0xcb, 0xc9, 0xc9, 0xd6, - 0xf0, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, 0xfd, 0xf1, 0xe5, 0xda, 0xd0, 0xce, 0xd1, 0xd3, 0xd3, - 0xd3, 0xd3, 0xd1, 0xd3, 0xe1, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd3, 0xab, 0x9f, 0xa6, 0xa5, - 0xa1, 0xa4, 0xaa, 0xb0, 0xc2, 0xe0, 0xf8, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xfc, 0xd2, - 0xab, 0xac, 0xb3, 0xaf, 0xaf, 0xb3, 0xb3, 0xbc, 0xe4, 0xff, 0xff, 0xfe, 0xfa, 0xe3, 0xcb, 0xc0, - 0xc1, 0xce, 0xe6, 0xfa, 0xfe, 0xff, 0xff, 0xea, 0xcb, 0xc6, 0xc9, 0xc6, 0xc6, 0xcc, 0xcb, 0xca, - 0xe1, 0xfd, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xfc, 0xf0, 0xdf, 0xd5, 0xd3, 0xd1, 0xcf, - 0xd1, 0xd1, 0xce, 0xd4, 0xe9, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf0, 0xc2, 0xa2, 0xa1, 0xa4, - 0xa1, 0xa6, 0xbe, 0xe0, 0xf6, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfc, 0xe1, 0xbd, - 0xab, 0xae, 0xb2, 0xaf, 0xb0, 0xb4, 0xb6, 0xc1, 0xe7, 0xff, 0xff, 0xfa, 0xe7, 0xc6, 0xb7, 0xbd, - 0xbd, 0xba, 0xca, 0xe9, 0xfb, 0xff, 0xff, 0xec, 0xce, 0xc5, 0xc7, 0xc8, 0xc7, 0xc9, 0xca, 0xca, - 0xd5, 0xeb, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfb, 0xef, 0xdd, 0xd1, 0xce, - 0xcf, 0xce, 0xcf, 0xe0, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe9, 0xd2, 0xc4, 0xc1, - 0xc5, 0xd3, 0xe7, 0xf8, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf4, 0xc6, 0xad, - 0xb0, 0xb1, 0xb0, 0xb2, 0xb2, 0xaf, 0xb8, 0xd1, 0xef, 0xff, 0xff, 0xef, 0xd0, 0xba, 0xb7, 0xc0, - 0xc0, 0xb9, 0xc0, 0xd7, 0xf2, 0xff, 0xff, 0xf3, 0xda, 0xc7, 0xc4, 0xc9, 0xc9, 0xc6, 0xc9, 0xcb, - 0xca, 0xdb, 0xf9, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, 0xff, 0xff, 0xfb, 0xf3, 0xe9, 0xe1, - 0xde, 0xe0, 0xe8, 0xf3, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf4, 0xea, - 0xf4, 0xff, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe0, 0xb5, 0xa7, - 0xb0, 0xb2, 0xaf, 0xb2, 0xb3, 0xad, 0xbe, 0xe7, 0xfb, 0xff, 0xff, 0xe6, 0xc2, 0xbb, 0xbd, 0xbb, - 0xbc, 0xc0, 0xc1, 0xcb, 0xea, 0xff, 0xff, 0xfc, 0xeb, 0xcc, 0xc1, 0xc9, 0xc9, 0xc6, 0xc9, 0xca, - 0xc6, 0xd0, 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf9, - 0xf4, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xbf, 0xae, 0xae, - 0xb0, 0xb1, 0xb0, 0xb2, 0xb4, 0xb5, 0xcf, 0xf7, 0xff, 0xff, 0xff, 0xe4, 0xbe, 0xba, 0xbe, 0xbb, - 0xbe, 0xc2, 0xbf, 0xc4, 0xe7, 0xff, 0xff, 0xff, 0xf9, 0xd8, 0xc5, 0xc7, 0xc8, 0xc7, 0xc8, 0xc9, - 0xc9, 0xca, 0xd6, 0xf0, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, - 0xfe, 0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xfd, 0xd4, 0xad, 0xac, 0xb4, - 0xb1, 0xb0, 0xb3, 0xb1, 0xb1, 0xc6, 0xe9, 0xfe, 0xfe, 0xff, 0xff, 0xe5, 0xbf, 0xb9, 0xbc, 0xbc, - 0xbd, 0xc0, 0xbd, 0xc4, 0xe7, 0xff, 0xff, 0xfe, 0xfe, 0xed, 0xd1, 0xc2, 0xc5, 0xc9, 0xc7, 0xc8, - 0xcb, 0xc8, 0xca, 0xe3, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfe, - 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe5, 0xc0, 0xab, 0xae, 0xb3, - 0xb0, 0xb0, 0xb6, 0xb2, 0xb3, 0xd8, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe1, 0xc4, 0xc5, 0xca, 0xc7, 0xc7, - 0xc9, 0xc9, 0xc9, 0xd6, 0xed, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf5, 0xc8, 0xae, 0xaf, 0xb1, 0xb0, - 0xb1, 0xb2, 0xb3, 0xb5, 0xc6, 0xe9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbc, - 0xbe, 0xc1, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xee, 0xd3, 0xc7, 0xc7, 0xc8, 0xc8, - 0xc7, 0xc9, 0xca, 0xc9, 0xda, 0xf8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xe4, 0xb7, 0xa8, 0xb1, 0xb2, 0xae, - 0xb3, 0xb4, 0xae, 0xba, 0xe1, 0xfb, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbc, - 0xbd, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xe7, 0xc9, 0xc1, 0xc9, 0xc9, - 0xc6, 0xc9, 0xc9, 0xc5, 0xd0, 0xed, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xec, 0xc8, 0xb1, 0xae, 0xb2, 0xb2, 0xaf, - 0xb2, 0xb5, 0xb2, 0xc8, 0xf5, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xf6, 0xd3, 0xc3, 0xc9, 0xc9, - 0xc7, 0xc8, 0xc8, 0xc9, 0xcc, 0xda, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xa9, 0x93, 0xa1, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc9, 0xbf, 0xcc, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xd9, 0xaf, 0xad, 0xb4, 0xb0, 0xb0, 0xb2, - 0xb0, 0xb2, 0xc2, 0xe1, 0xfc, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, - 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfc, 0xe8, 0xd0, 0xc6, 0xc7, - 0xc9, 0xc7, 0xc7, 0xcb, 0xc8, 0xc9, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x9d, 0x92, 0x92, 0x92, 0xa1, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xbf, 0xbf, 0xbe, 0xc5, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf9, 0xcd, 0xa8, 0xac, 0xb4, 0xae, 0xae, 0xb4, - 0xb0, 0xb1, 0xd4, 0xfa, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xfb, 0xde, 0xc4, 0xc6, - 0xca, 0xc6, 0xc7, 0xca, 0xc5, 0xc5, 0xde, 0xfa, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xee, 0x9d, 0x93, 0x92, 0x92, 0x92, 0x92, 0xfb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xbf, 0xbe, 0xbe, 0xbe, 0xbf, 0xc5, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xc6, 0xab, 0xaf, 0xb3, 0xb0, 0xb1, 0xb2, - 0xb1, 0xc1, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xce, 0xc5, - 0xc9, 0xc9, 0xc8, 0xca, 0xc8, 0xc6, 0xd9, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xee, 0x9c, 0x92, 0x92, 0x92, 0x93, 0x92, 0xaa, 0xff, 0xff, 0xff, 0xdb, 0xb7, + 0xb6, 0xdb, 0xff, 0xff, 0xff, 0xcd, 0xbf, 0xbf, 0xbf, 0xbf, 0xbf, 0xc5, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xc9, 0xac, 0xb0, 0xb3, 0xb2, 0xb4, 0xae, - 0xb8, 0xdd, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, - 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe6, 0xca, - 0xc4, 0xca, 0xc9, 0xca, 0xc7, 0xc5, 0xdb, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xee, 0x9c, 0x92, 0x92, 0x93, 0x93, 0x92, 0x9d, 0xee, 0xff, 0xff, 0xeb, 0xaf, 0xaf, + 0xaf, 0xaf, 0xeb, 0xff, 0xff, 0xf6, 0xc5, 0xbf, 0xbf, 0xbf, 0xbe, 0xbf, 0xc5, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xd5, 0xae, 0xab, 0xb1, 0xae, 0xab, 0xaf, - 0xcc, 0xf4, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, - 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xd8, - 0xc4, 0xc3, 0xc5, 0xc7, 0xc3, 0xc6, 0xe3, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf1, 0x9c, 0x92, 0x92, 0x92, 0x92, 0x92, 0x9d, 0xee, 0xff, 0xff, 0xff, 0xd7, 0xaf, 0xaf, + 0xb0, 0xaf, 0xd8, 0xff, 0xff, 0xff, 0xf5, 0xc5, 0xbf, 0xbf, 0xbe, 0xbf, 0xbf, 0xc5, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xc8, 0xb7, 0xb5, 0xb3, 0xb5, 0xca, - 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xef, - 0xd7, 0xc9, 0xc9, 0xcb, 0xcc, 0xd8, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xb5, 0x93, 0x93, 0x93, 0x92, 0x92, 0x9c, 0xee, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xaf, 0xb0, + 0xaf, 0xaf, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xc4, 0xbe, 0xbf, 0xbf, 0xbf, 0xbe, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xde, 0xce, 0xcd, 0xdc, 0xf1, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, - 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xf4, 0xe5, 0xdd, 0xdd, 0xe8, 0xf6, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xa3, 0x92, 0x93, 0x93, 0x92, 0x9d, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xaf, 0xaf, + 0xb0, 0xaf, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xc4, 0xbf, 0xbe, 0xbf, 0xbf, 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xef, 0xef, 0xfa, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc2, 0xbc, 0xbe, 0xba, - 0xbb, 0xc0, 0xbf, 0xc6, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfb, 0xf4, 0xf4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xc1, 0x93, 0x92, 0x92, 0x9d, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xaf, 0xb0, + 0xaf, 0xaf, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xc4, 0xbf, 0xbf, 0xbf, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc8, 0xbb, 0xbc, 0xbe, - 0xbe, 0xbe, 0xbf, 0xcb, 0xeb, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xc1, 0xa3, 0xb5, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xb0, 0xaf, + 0xaf, 0xaf, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xd4, 0xca, 0xdb, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf5, 0xd8, 0xbd, 0xb8, 0xc1, - 0xc1, 0xba, 0xc1, 0xd9, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xaf, 0xaf, + 0xaf, 0xaf, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xee, 0xcf, 0xbb, 0xb9, - 0xb9, 0xbc, 0xd1, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xaf, 0xb0, + 0xaf, 0xaf, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xeb, 0xd6, 0xca, - 0xca, 0xd7, 0xec, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xaf, 0xaf, + 0xaf, 0xaf, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xaf, 0xaf, + 0xb0, 0xaf, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xf6, - 0xf6, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xb7, + 0xb6, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, @@ -2306,14 +4463,10 @@ const unsigned char gGearPict3x[9 * kGearFrames * kGearWidth * kGearHeight] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, @@ -2324,1130 +4477,517 @@ const unsigned char gGearPict3x[9 * kGearFrames * kGearWidth * kGearHeight] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfb, - 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf2, - 0xf1, 0xf6, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xea, 0xc4, 0xa3, - 0x9e, 0xb2, 0xd7, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xed, 0xb8, 0x8d, 0x80, - 0x7d, 0x83, 0xa6, 0xd9, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xcc, 0x8c, 0x79, 0x8b, - 0x8b, 0x7f, 0x84, 0xa9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xaf, 0x88, 0x81, 0x87, - 0x87, 0x86, 0x7f, 0x8b, 0xcf, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xe8, 0xf3, 0xfe, - 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa5, 0x8c, 0x87, 0x81, - 0x81, 0x89, 0x81, 0x82, 0xc7, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xfb, 0xe6, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xd7, 0xbb, 0xb5, 0xc6, 0xe7, - 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xde, 0xa5, 0x88, 0x84, 0x85, - 0x85, 0x89, 0x7d, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xee, 0xcb, 0xae, 0xac, 0xc6, 0xe4, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xec, 0xc0, 0x9d, 0x92, 0x92, 0x94, 0xa9, - 0xd5, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xde, 0xa4, 0x86, 0x84, 0x85, - 0x84, 0x88, 0x7d, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xed, - 0xb3, 0x83, 0x7f, 0x87, 0x83, 0x94, 0xd3, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd0, 0x93, 0x85, 0x8b, 0x8c, 0x88, 0x86, - 0xaa, 0xe7, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x83, 0x8a, 0x80, 0x7f, 0xc5, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf4, 0xb8, - 0x81, 0x71, 0x76, 0x7c, 0x6e, 0x6e, 0xae, 0xf3, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfb, 0xbc, 0x85, 0x8c, 0x95, 0x8f, 0x91, 0x8a, - 0x92, 0xbe, 0xee, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x83, 0x8a, 0x80, 0x7f, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xd6, 0x88, - 0x72, 0x83, 0x7e, 0x7c, 0x7c, 0x79, 0x9b, 0xd5, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf4, 0xb8, 0x87, 0x8c, 0x94, 0x8d, 0x8d, 0x91, - 0x8b, 0x96, 0xd2, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc6, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe4, 0xaa, 0x80, - 0x7a, 0x7f, 0x7b, 0x7c, 0x7c, 0x7a, 0x97, 0xd0, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xc2, 0x89, 0x89, 0x93, 0x8c, 0x8b, 0x93, - 0x8b, 0x87, 0xb6, 0xf0, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc4, 0x83, 0x7e, - 0x82, 0x77, 0x78, 0x7e, 0x74, 0x70, 0xa1, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xd3, 0x98, 0x8e, 0x92, 0x8e, 0x8f, 0x90, - 0x8d, 0x8e, 0x9b, 0xc5, 0xf7, 0xff, 0xfe, 0xff, 0xfd, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xfe, 0xe6, 0xa5, 0x75, 0x78, - 0x81, 0x79, 0x78, 0x82, 0x77, 0x74, 0xb5, 0xfb, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xed, 0xbf, 0x97, 0x8b, 0x90, 0x90, 0x8b, - 0x8f, 0x91, 0x85, 0xa1, 0xe7, 0xff, 0xfd, 0xfe, 0xfe, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xff, 0xf4, 0xb3, 0x85, 0x7d, 0x7b, - 0x7a, 0x7b, 0x7a, 0x7d, 0x7c, 0x90, 0xd1, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xe3, 0xa1, 0x86, 0x92, 0x91, 0x8c, - 0x91, 0x90, 0x84, 0x92, 0xc6, 0xf2, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xfa, 0xff, 0xfd, 0xff, 0xdd, 0x92, 0x76, 0x82, 0x7e, - 0x76, 0x7d, 0x7e, 0x71, 0x81, 0xbe, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbc, 0x94, 0x8e, 0x8f, 0x8f, - 0x8f, 0x8e, 0x8f, 0x8e, 0x9e, 0xd6, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc4, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xe6, 0xb0, 0x84, 0x78, 0x7d, 0x7c, - 0x78, 0x7d, 0x7e, 0x72, 0x95, 0xe6, 0xff, 0xfc, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe8, 0xb1, 0x89, 0x8c, 0x93, - 0x8d, 0x8c, 0x92, 0x89, 0x87, 0xbc, 0xf8, 0xff, 0xf8, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xc6, 0x83, 0x7b, 0x81, 0x79, 0x7a, - 0x7d, 0x7a, 0x79, 0x8d, 0xc3, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfb, 0xfd, 0xfe, - 0xfc, 0xfa, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xcc, 0x93, 0x8d, 0x93, - 0x8d, 0x8e, 0x90, 0x8b, 0x88, 0x9f, 0xd1, 0xfa, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0x87, 0x84, 0x84, - 0x84, 0x89, 0x7d, 0x7d, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xea, 0xa6, 0x76, 0x7a, 0x82, 0x78, 0x79, - 0x81, 0x77, 0x75, 0xaf, 0xf1, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xfa, - 0xfe, 0xfe, 0xfb, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xb5, 0x92, 0x8b, - 0x90, 0x90, 0x8d, 0x91, 0x91, 0x88, 0xaa, 0xee, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0x8a, 0x86, 0x84, - 0x82, 0x89, 0x7f, 0x7d, 0xc6, 0xff, 0xff, 0xff, 0xf6, 0xba, 0x88, 0x7e, 0x7d, 0x7c, 0x7a, 0x7a, - 0x7e, 0x79, 0x88, 0xcd, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf6, 0xee, - 0xf5, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xde, 0x9e, 0x86, - 0x92, 0x92, 0x8c, 0x91, 0x91, 0x85, 0x96, 0xcd, 0xf4, 0xff, 0xff, 0xe2, 0xac, 0x8d, 0x85, 0x83, - 0x83, 0x88, 0x81, 0x85, 0xcb, 0xff, 0xff, 0xfe, 0xe1, 0x95, 0x76, 0x82, 0x7e, 0x78, 0x7e, 0x7d, - 0x75, 0x82, 0xb5, 0xeb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xfb, 0xfc, 0xff, 0xff, 0xf9, - 0xed, 0xeb, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xea, 0xd1, 0xbf, 0xba, - 0xc1, 0xd0, 0xe4, 0xf6, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf4, 0xb9, 0x94, - 0x91, 0x8e, 0x8c, 0x8f, 0x8f, 0x8c, 0x8f, 0xa9, 0xdf, 0xff, 0xff, 0xee, 0xc0, 0x89, 0x7a, 0x8a, - 0x8b, 0x82, 0x80, 0x98, 0xd8, 0xff, 0xff, 0xef, 0xc1, 0x85, 0x74, 0x81, 0x7f, 0x76, 0x7d, 0x7f, - 0x72, 0x90, 0xdf, 0xff, 0xfa, 0xfd, 0xff, 0xfe, 0xfb, 0xfb, 0xfe, 0xff, 0xfc, 0xeb, 0xcd, 0xb4, - 0xa8, 0xa7, 0xb3, 0xd5, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf3, 0xc0, 0x99, 0x93, 0x94, - 0x90, 0x95, 0xb1, 0xd8, 0xf2, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfd, 0xe2, 0xae, - 0x8c, 0x8e, 0x92, 0x8c, 0x8b, 0x91, 0x8b, 0x93, 0xd2, 0xff, 0xff, 0xfb, 0xe0, 0x9f, 0x7e, 0x86, - 0x85, 0x7d, 0x91, 0xc1, 0xec, 0xff, 0xff, 0xde, 0xa4, 0x83, 0x7c, 0x7c, 0x7a, 0x7a, 0x7c, 0x7a, - 0x83, 0xb4, 0xf4, 0xff, 0xfc, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xf7, 0xde, 0xae, 0x7f, 0x6b, - 0x70, 0x70, 0x6a, 0x91, 0xdb, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xda, 0xa5, 0x91, 0x99, 0x98, - 0x92, 0x95, 0x9a, 0xa0, 0xb4, 0xd9, 0xf6, 0xff, 0xff, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xff, 0xc8, - 0x91, 0x90, 0x95, 0x8b, 0x8b, 0x92, 0x89, 0x8b, 0xcc, 0xff, 0xff, 0xfc, 0xfa, 0xd8, 0xa9, 0x8d, - 0x86, 0x94, 0xbf, 0xee, 0xfd, 0xff, 0xff, 0xda, 0x9b, 0x81, 0x7d, 0x78, 0x77, 0x7f, 0x78, 0x71, - 0xa4, 0xe8, 0xff, 0xfe, 0xff, 0xfd, 0xfd, 0xfc, 0xfe, 0xfe, 0xe3, 0xaf, 0x8a, 0x7d, 0x74, 0x6f, - 0x72, 0x73, 0x6e, 0x7b, 0xac, 0xe7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf1, 0xc8, 0xa2, 0x97, 0x9e, 0x9e, - 0x9d, 0x9e, 0x99, 0x8f, 0x95, 0xad, 0xc6, 0xdf, 0xf9, 0xff, 0xfe, 0xfd, 0xfb, 0xff, 0xff, 0xe6, - 0xb2, 0x8e, 0x8b, 0x94, 0x94, 0x93, 0x8a, 0x91, 0xd0, 0xff, 0xff, 0xfa, 0xff, 0xfa, 0xe2, 0xcb, - 0xc7, 0xd5, 0xed, 0xfd, 0xfe, 0xff, 0xff, 0xe2, 0xa6, 0x7e, 0x79, 0x81, 0x81, 0x7d, 0x75, 0x82, - 0xc8, 0xff, 0xff, 0xf7, 0xfb, 0xfe, 0xff, 0xfb, 0xe2, 0xc2, 0x9f, 0x7c, 0x6c, 0x71, 0x7a, 0x7d, - 0x7b, 0x79, 0x7a, 0x7b, 0x93, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf1, 0xc9, 0xa1, 0x95, 0x9c, 0x9c, - 0x98, 0x97, 0x99, 0x9b, 0x99, 0x95, 0x94, 0xa9, 0xd0, 0xee, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, - 0xdf, 0xa6, 0x8a, 0x8e, 0x8c, 0x89, 0x94, 0xb4, 0xe5, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf4, 0xc6, 0x8c, 0x73, 0x79, 0x79, 0x73, 0x86, 0xb6, - 0xeb, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xf1, 0xd6, 0xa6, 0x7b, 0x6e, 0x76, 0x7a, 0x77, 0x74, 0x73, - 0x74, 0x76, 0x74, 0x76, 0x93, 0xd7, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xd9, 0xa2, 0x93, 0xa1, 0x9d, - 0x98, 0x9b, 0x9a, 0x9a, 0x99, 0x97, 0x95, 0x97, 0x9f, 0xb0, 0xd3, 0xf7, 0xff, 0xfd, 0xfc, 0xff, - 0xfa, 0xda, 0xae, 0x91, 0x8a, 0x91, 0xb4, 0xe8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xee, 0xb7, 0x88, 0x79, 0x78, 0x88, 0xb8, 0xee, - 0xff, 0xfc, 0xfd, 0xff, 0xfa, 0xd4, 0xa4, 0x85, 0x7a, 0x73, 0x71, 0x75, 0x79, 0x78, 0x75, 0x72, - 0x76, 0x79, 0x71, 0x78, 0xa7, 0xe4, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xef, 0xbb, 0x98, 0x95, 0x9c, - 0xa1, 0x9f, 0x9a, 0x98, 0x98, 0x9b, 0x9e, 0x99, 0x90, 0x94, 0xa8, 0xc0, 0xdb, 0xf2, 0xfe, 0xff, - 0xff, 0xfa, 0xe4, 0xcc, 0xc7, 0xcf, 0xe3, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, - 0xf9, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe8, 0xcf, 0xbf, 0xbf, 0xcf, 0xe9, 0xfc, - 0xff, 0xff, 0xfb, 0xe0, 0xb8, 0x97, 0x79, 0x6a, 0x71, 0x7c, 0x7b, 0x75, 0x72, 0x73, 0x78, 0x7b, - 0x7a, 0x71, 0x6c, 0x91, 0xd4, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe4, 0xbc, 0xa0, 0x9b, - 0x9b, 0x9a, 0x9c, 0x9e, 0x9d, 0x9a, 0x98, 0x98, 0x9a, 0x9c, 0x96, 0x91, 0xa3, 0xce, 0xf3, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, - 0xff, 0xf8, 0xd6, 0x9c, 0x74, 0x70, 0x79, 0x7a, 0x76, 0x74, 0x74, 0x76, 0x78, 0x77, 0x75, 0x72, - 0x72, 0x7a, 0x93, 0xc6, 0xf8, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf3, 0xd6, 0xaf, - 0x99, 0x99, 0x9d, 0x9e, 0x9d, 0x9a, 0x99, 0x99, 0x9c, 0x9b, 0x98, 0x95, 0x94, 0xa3, 0xc6, 0xee, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xf9, 0xd2, 0x92, 0x72, 0x74, 0x75, 0x76, 0x79, 0x77, 0x74, 0x73, 0x75, 0x78, 0x78, 0x73, 0x6e, - 0x80, 0xae, 0xe0, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, - 0xc7, 0xb1, 0x9b, 0x93, 0x9a, 0x9f, 0x9f, 0x9b, 0x97, 0x96, 0x9a, 0x9d, 0x9a, 0x91, 0x9d, 0xd5, - 0xff, 0xff, 0xf9, 0xfa, 0xfa, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfa, 0xf9, 0xf8, 0xf8, 0xff, 0xff, - 0xe2, 0xa1, 0x71, 0x6e, 0x7d, 0x79, 0x72, 0x71, 0x73, 0x78, 0x7c, 0x78, 0x6b, 0x6d, 0x87, 0xa7, - 0xc8, 0xe9, 0xfc, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xf8, 0xdd, 0xba, 0xa5, 0x9f, 0x9a, 0x99, 0x9d, 0x9c, 0x9c, 0x99, 0x97, 0x9f, 0x95, 0x8f, 0xc5, - 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, - 0xc8, 0x81, 0x77, 0x7b, 0x74, 0x74, 0x77, 0x79, 0x78, 0x73, 0x71, 0x76, 0x7b, 0x90, 0xbd, 0xec, - 0xff, 0xff, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, - 0xfe, 0xfc, 0xf5, 0xdf, 0xba, 0x9c, 0x96, 0x9d, 0x9e, 0x9e, 0x9b, 0x99, 0x9f, 0x97, 0x91, 0xc2, - 0xfb, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, - 0xc5, 0x7b, 0x75, 0x7c, 0x73, 0x75, 0x78, 0x7a, 0x79, 0x6f, 0x6e, 0x8a, 0xbd, 0xe6, 0xf7, 0xfd, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xfb, - 0xfc, 0xff, 0xff, 0xf9, 0xe6, 0xcd, 0xb4, 0xa1, 0x95, 0x95, 0x9b, 0x9d, 0x9e, 0x94, 0x98, 0xcd, - 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xfa, 0xff, 0xff, - 0xd4, 0x8c, 0x6f, 0x72, 0x78, 0x77, 0x70, 0x6a, 0x6f, 0x8b, 0xaf, 0xce, 0xee, 0xff, 0xff, 0xfa, - 0xf9, 0xfd, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xff, 0xe8, 0xc3, 0xaa, 0xa1, 0x9e, 0x9b, 0x9c, 0xa3, 0xb9, 0xe5, - 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xed, 0xb9, 0x86, 0x72, 0x76, 0x76, 0x77, 0x7f, 0x96, 0xca, 0xf7, 0xff, 0xfd, 0xfd, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe5, 0xcc, 0xb5, 0xa8, 0xad, 0xc9, 0xec, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xee, 0xbe, 0x96, 0x89, 0x8d, 0xa4, 0xc9, 0xeb, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfb, 0xfb, 0xfb, - 0xfb, 0xfc, 0xfd, 0xfc, 0xf9, 0xf8, 0xfb, 0xff, 0xfe, 0xf1, 0xe1, 0xda, 0xdf, 0xee, 0xfd, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfd, 0xee, 0xd9, 0xcc, 0xd0, 0xe3, 0xf6, 0xfe, 0xfe, 0xfb, 0xfa, 0xfc, 0xff, 0xff, 0xfe, - 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe9, 0xe0, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, - 0xdf, 0xdf, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xdf, 0xe3, 0xe9, 0xf4, 0xfe, 0xff, 0xfd, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfb, 0xfa, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, - 0xf4, 0xf5, 0xf4, 0xf4, 0xf5, 0xf5, 0xf7, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xd2, 0xbe, 0xb3, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, - 0xaf, 0xaf, 0xad, 0xac, 0xac, 0xac, 0xaa, 0xac, 0xb4, 0xbe, 0xd7, 0xf6, 0xff, 0xfe, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf2, 0xea, 0xe7, 0xe7, 0xe7, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, - 0xe6, 0xe5, 0xe6, 0xe6, 0xe5, 0xe6, 0xe9, 0xee, 0xf7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xc9, 0xa8, 0xa5, 0xa7, 0xa3, 0xa4, 0xa4, 0xa3, 0xa3, - 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa1, 0xa3, 0xa5, 0xa4, 0xaf, 0xd0, 0xf2, 0xff, 0xff, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xf8, 0xea, 0xe3, 0xe4, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, - 0xe3, 0xe2, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe3, 0xe9, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd7, 0xad, 0x9c, 0xa2, 0xa8, 0xa7, 0xa7, 0xa7, 0xa6, 0xa6, - 0xa6, 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa5, 0xa2, 0x9f, 0xae, 0xde, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfa, 0xec, 0xe1, 0xde, 0xe2, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, - 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe1, 0xe0, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbf, 0xa5, 0xa9, 0xa6, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, - 0xa5, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa3, 0xa3, 0xab, 0xa3, 0x9f, 0xd2, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf5, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, - 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe1, 0xe3, 0xe4, 0xdf, 0xe6, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbf, 0xa5, 0xaa, 0xa8, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, - 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa4, 0xa4, 0xac, 0xa3, 0x9f, 0xd2, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf5, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, - 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe1, 0xe3, 0xe4, 0xdf, 0xe5, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd3, 0xac, 0x9e, 0xa3, 0xa8, 0xa8, 0xa8, 0xaa, 0xa9, 0xa7, - 0xa7, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xa7, 0xa6, 0xa1, 0xad, 0xdd, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf9, 0xeb, 0xe0, 0xdf, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, - 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe0, 0xea, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xc7, 0xa8, 0xa6, 0xa8, 0xa5, 0xa6, 0xa8, 0xa7, 0xa6, - 0xa7, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa7, 0xa8, 0xa9, 0xa8, 0xb1, 0xd0, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xe7, 0xe1, 0xe2, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, - 0xe1, 0xe2, 0xe2, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe8, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xec, 0xd0, 0xbd, 0xb3, 0xb0, 0xb0, 0xb0, 0xb0, 0xb2, - 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb6, 0xbe, 0xd6, 0xf4, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xec, 0xe6, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, - 0xe3, 0xe4, 0xe3, 0xe3, 0xe4, 0xe4, 0xe5, 0xeb, 0xf5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xe7, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, - 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xe1, 0xe7, 0xf2, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf8, 0xf4, 0xf3, 0xf3, 0xf3, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, - 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, - 0xfb, 0xfc, 0xfd, 0xfd, 0xfa, 0xf9, 0xfc, 0xff, 0xfe, 0xf6, 0xeb, 0xe5, 0xe9, 0xf4, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfc, 0xf8, 0xf5, 0xf6, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xea, 0xd9, 0xc6, 0xba, 0xbf, 0xd8, 0xf3, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfb, 0xf1, 0xe7, 0xe4, 0xe6, 0xec, 0xf5, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xff, 0xef, 0xcf, 0xba, 0xb3, 0xb0, 0xad, 0xae, 0xb7, 0xca, 0xec, - 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfb, 0xee, 0xe1, 0xdc, 0xdc, 0xdd, 0xde, 0xe2, 0xe9, 0xf4, 0xfd, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfc, - 0xfc, 0xff, 0xff, 0xfc, 0xef, 0xdb, 0xc5, 0xb3, 0xa7, 0xa8, 0xae, 0xb1, 0xb1, 0xab, 0xaf, 0xda, - 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xf4, 0xe1, 0xd9, 0xda, 0xdc, 0xdb, 0xda, 0xda, 0xdd, 0xe4, 0xed, 0xf4, 0xfb, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, - 0xff, 0xfe, 0xf9, 0xe8, 0xca, 0xb0, 0xa9, 0xae, 0xb0, 0xb2, 0xb1, 0xaf, 0xb4, 0xaf, 0xad, 0xd2, - 0xfc, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xef, 0xdc, 0xdb, 0xdd, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xe3, 0xf0, 0xfa, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, - 0xfc, 0xe9, 0xcc, 0xb8, 0xb1, 0xad, 0xad, 0xb0, 0xb2, 0xb3, 0xb0, 0xaf, 0xb5, 0xb0, 0xae, 0xd4, - 0xfe, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xf0, 0xdc, 0xd9, 0xdc, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdb, 0xdb, 0xdd, 0xdf, 0xe5, 0xf1, 0xfb, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xeb, - 0xd7, 0xc3, 0xb0, 0xa8, 0xad, 0xb3, 0xb3, 0xb0, 0xaf, 0xaf, 0xb1, 0xb4, 0xb5, 0xaf, 0xb6, 0xde, - 0xff, 0xff, 0xfb, 0xfd, 0xfc, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, 0xfd, 0xfd, 0xff, 0xff, - 0xf7, 0xe4, 0xd7, 0xd7, 0xdc, 0xdb, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xdd, 0xda, 0xdb, 0xe2, 0xea, - 0xf3, 0xfc, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf7, 0xe2, 0xc3, - 0xad, 0xab, 0xb1, 0xb3, 0xb0, 0xb0, 0xb1, 0xb1, 0xb2, 0xb2, 0xb3, 0xb2, 0xb3, 0xbb, 0xd0, 0xef, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xf2, 0xe0, 0xd7, 0xd8, 0xd9, 0xda, 0xdc, 0xdb, 0xdb, 0xdb, 0xdc, 0xdd, 0xdd, 0xdb, 0xdb, - 0xe1, 0xed, 0xf8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xce, 0xb4, 0xae, - 0xad, 0xad, 0xb1, 0xb3, 0xb2, 0xb0, 0xb0, 0xb1, 0xb2, 0xb5, 0xb3, 0xb1, 0xbb, 0xd7, 0xf3, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xff, 0xfd, 0xf2, 0xe2, 0xd7, 0xd7, 0xdb, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdd, 0xdb, 0xda, - 0xdb, 0xde, 0xe4, 0xf0, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf3, 0xcd, 0xaf, 0xa9, 0xae, - 0xb4, 0xb3, 0xb0, 0xae, 0xaf, 0xb2, 0xb5, 0xb4, 0xaf, 0xb0, 0xbd, 0xce, 0xe1, 0xf2, 0xfe, 0xff, - 0xff, 0xfd, 0xf1, 0xe2, 0xe0, 0xe6, 0xf1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf9, 0xf1, 0xec, 0xec, 0xf2, 0xfa, 0xff, - 0xff, 0xff, 0xfd, 0xf5, 0xe9, 0xe1, 0xda, 0xd7, 0xd9, 0xdb, 0xdb, 0xda, 0xda, 0xda, 0xdc, 0xdd, - 0xdd, 0xdb, 0xd9, 0xe3, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xe1, 0xb5, 0xa9, 0xb4, 0xb3, - 0xae, 0xb1, 0xb2, 0xb2, 0xb2, 0xb1, 0xb1, 0xb4, 0xb9, 0xc4, 0xdb, 0xf5, 0xff, 0xfe, 0xfd, 0xff, - 0xfc, 0xea, 0xd0, 0xbe, 0xb9, 0xc0, 0xd6, 0xf4, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfa, 0xe8, 0xd9, 0xd3, 0xd3, 0xda, 0xeb, 0xfa, - 0xff, 0xfe, 0xfe, 0xff, 0xfd, 0xf2, 0xe5, 0xde, 0xda, 0xd8, 0xd9, 0xdb, 0xdc, 0xdb, 0xdb, 0xdb, - 0xdb, 0xdc, 0xdb, 0xde, 0xea, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xd5, 0xb5, 0xac, 0xb2, 0xb1, - 0xae, 0xb0, 0xb2, 0xb3, 0xb4, 0xb3, 0xb2, 0xbf, 0xda, 0xf1, 0xfb, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, - 0xeb, 0xc9, 0xb9, 0xbb, 0xba, 0xba, 0xc4, 0xd8, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfc, 0xed, 0xd7, 0xce, 0xd1, 0xd2, 0xd2, 0xd9, 0xe8, - 0xf9, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfa, 0xf2, 0xe5, 0xda, 0xd7, 0xdb, 0xdd, 0xdc, 0xdb, 0xdb, - 0xdb, 0xdb, 0xdb, 0xdd, 0xe4, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf3, 0xd3, 0xb6, 0xaf, 0xb4, 0xb3, - 0xb3, 0xb5, 0xb3, 0xad, 0xb0, 0xc1, 0xd3, 0xe5, 0xf8, 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xff, 0xef, - 0xcf, 0xb9, 0xb7, 0xbd, 0xbf, 0xc0, 0xbd, 0xc4, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xfe, 0xf4, 0xea, - 0xea, 0xf0, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xe0, 0xd1, 0xce, 0xd3, 0xd6, 0xd4, 0xd1, 0xd8, - 0xee, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xf6, 0xed, 0xe3, 0xda, 0xd7, 0xd9, 0xdc, 0xdc, - 0xdc, 0xdc, 0xdc, 0xdc, 0xe3, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe1, 0xb9, 0xaa, 0xb1, 0xb2, - 0xae, 0xb0, 0xb3, 0xb8, 0xc6, 0xdf, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, - 0xba, 0xba, 0xbe, 0xba, 0xbb, 0xc1, 0xbd, 0xbf, 0xe3, 0xff, 0xff, 0xfd, 0xfd, 0xef, 0xd9, 0xcd, - 0xcd, 0xd5, 0xe7, 0xfa, 0xff, 0xff, 0xff, 0xf1, 0xda, 0xd2, 0xd2, 0xd1, 0xd2, 0xd5, 0xd3, 0xd2, - 0xe3, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf5, 0xe6, 0xdd, 0xda, 0xd8, 0xd8, - 0xdb, 0xdb, 0xd8, 0xdc, 0xe9, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf5, 0xd0, 0xb1, 0xab, 0xaf, - 0xad, 0xb0, 0xc3, 0xe0, 0xf5, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xed, 0xcd, - 0xb8, 0xba, 0xbe, 0xbb, 0xbd, 0xc2, 0xbe, 0xc2, 0xe5, 0xff, 0xff, 0xfd, 0xf1, 0xd3, 0xc4, 0xc8, - 0xc8, 0xc7, 0xd2, 0xe7, 0xf8, 0xff, 0xff, 0xf2, 0xdd, 0xd3, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd3, - 0xda, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xe6, 0xda, 0xd7, - 0xda, 0xda, 0xd8, 0xe2, 0xf6, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xef, 0xdc, 0xcd, 0xc7, - 0xcb, 0xd7, 0xe7, 0xf8, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf8, 0xd3, 0xbd, - 0xbc, 0xbb, 0xbb, 0xbe, 0xbf, 0xbf, 0xc1, 0xcf, 0xed, 0xff, 0xff, 0xf8, 0xe2, 0xc6, 0xc1, 0xcb, - 0xca, 0xc7, 0xca, 0xd5, 0xef, 0xff, 0xff, 0xf8, 0xe8, 0xd2, 0xcd, 0xd2, 0xd2, 0xcf, 0xd2, 0xd4, - 0xd3, 0xde, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf8, 0xef, 0xe8, - 0xe5, 0xe6, 0xeb, 0xf4, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xec, - 0xf3, 0xff, 0xff, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xea, 0xc2, 0xb4, - 0xbd, 0xbf, 0xbb, 0xbe, 0xc0, 0xbb, 0xc5, 0xe4, 0xf9, 0xff, 0xff, 0xf1, 0xd7, 0xc7, 0xc6, 0xc8, - 0xc8, 0xca, 0xca, 0xce, 0xeb, 0xff, 0xff, 0xff, 0xf4, 0xd7, 0xcc, 0xd3, 0xd2, 0xcf, 0xd2, 0xd4, - 0xd2, 0xd7, 0xea, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfb, - 0xf6, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xd4, 0xbd, 0xb9, - 0xbd, 0xbd, 0xbd, 0xc0, 0xc0, 0xbc, 0xcf, 0xf5, 0xff, 0xff, 0xff, 0xf0, 0xd3, 0xc7, 0xc6, 0xc7, - 0xc7, 0xcb, 0xc9, 0xca, 0xe8, 0xff, 0xff, 0xff, 0xfc, 0xe4, 0xd3, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, - 0xd5, 0xd3, 0xd8, 0xef, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, - 0xfe, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe2, 0xbf, 0xba, 0xbf, - 0xbc, 0xbc, 0xbf, 0xbf, 0xbe, 0xc7, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xf0, 0xd2, 0xc5, 0xc5, 0xc7, - 0xc8, 0xc9, 0xc7, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xf6, 0xdf, 0xcf, 0xd0, 0xd3, 0xd0, 0xd1, - 0xd5, 0xd3, 0xd1, 0xe4, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf4, 0xd1, 0xb6, 0xb8, 0xbf, - 0xbc, 0xbc, 0xc0, 0xbd, 0xbd, 0xd7, 0xf8, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc7, - 0xc7, 0xca, 0xc8, 0xca, 0xe7, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xea, 0xd2, 0xd0, 0xd3, 0xd0, 0xd0, - 0xd2, 0xd3, 0xd3, 0xda, 0xec, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd9, 0xc0, 0xba, 0xbb, 0xbc, - 0xbd, 0xbe, 0xc0, 0xbf, 0xc8, 0xe8, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xee, 0xd3, 0xc6, 0xc6, 0xc8, - 0xc8, 0xca, 0xc8, 0xca, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xf5, 0xe1, 0xd3, 0xd0, 0xd1, 0xd1, - 0xd1, 0xd4, 0xd5, 0xd1, 0xdc, 0xf8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf0, 0xc7, 0xb6, 0xbe, 0xbe, 0xbb, - 0xbe, 0xc0, 0xbb, 0xc2, 0xdd, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc8, - 0xc8, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xf1, 0xd6, 0xcd, 0xd2, 0xd2, - 0xd0, 0xd3, 0xd4, 0xd0, 0xd7, 0xed, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf5, 0xd9, 0xbf, 0xb8, 0xbd, 0xbe, 0xbb, - 0xbe, 0xc0, 0xbb, 0xcb, 0xf1, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc8, - 0xc8, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xff, 0xfa, 0xe0, 0xd1, 0xd2, 0xd1, - 0xd0, 0xd1, 0xd3, 0xd4, 0xd5, 0xdd, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc1, 0xba, 0xbe, 0xbc, 0xbc, 0xbe, - 0xbf, 0xbe, 0xc6, 0xe0, 0xfb, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfe, 0xf2, 0xdd, 0xcf, 0xd0, - 0xd2, 0xd0, 0xd0, 0xd4, 0xd2, 0xd3, 0xe8, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x96, 0x37, + 0x38, 0x95, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xda, 0xb6, 0xb7, 0xbf, 0xbc, 0xbb, 0xc0, - 0xbd, 0xbb, 0xd4, 0xf6, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xca, 0xc7, 0xc9, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe8, 0xd1, 0xd1, - 0xd3, 0xcf, 0xcf, 0xd3, 0xd1, 0xd0, 0xe0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0x28, 0x28, + 0x27, 0x28, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf8, 0xd4, 0xb7, 0xba, 0xc0, 0xbc, 0xbd, 0xc1, - 0xbe, 0xc2, 0xe3, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xde, 0xd1, - 0xd0, 0xd2, 0xd1, 0xd2, 0xd3, 0xd4, 0xde, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8b, 0x27, 0x28, + 0x28, 0x28, 0x8c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xd7, 0xb8, 0xbc, 0xc0, 0xbd, 0xc0, 0xbe, - 0xc2, 0xd8, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xef, 0xd3, - 0xcd, 0xd3, 0xd2, 0xd1, 0xd2, 0xd3, 0xdf, 0xf2, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x86, 0x28, 0x27, + 0x28, 0x28, 0x86, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xbb, 0xb7, 0xbd, 0xbd, 0xbc, 0xba, - 0xcc, 0xef, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfa, 0xe3, - 0xd1, 0xcd, 0xcf, 0xd1, 0xce, 0xcf, 0xe5, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x86, 0x28, 0x28, + 0x28, 0x28, 0x86, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf0, 0xd3, 0xc3, 0xc0, 0xc1, 0xc2, 0xcb, - 0xe3, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, - 0xe3, 0xd2, 0xd2, 0xd4, 0xd3, 0xdb, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf8, 0x9b, 0x6e, 0x89, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x86, 0x28, 0x27, + 0x27, 0x28, 0x86, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0x49, 0x27, 0x65, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe4, 0xd5, 0xd3, 0xde, 0xef, - 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xca, 0xc6, 0xc8, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xf7, 0xeb, 0xe3, 0xe1, 0xe8, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x9b, 0x54, 0x55, 0x54, 0x64, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x86, 0x28, 0x28, + 0x27, 0x28, 0x86, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x1a, 0x0b, 0x0b, 0x0b, 0x64, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf1, 0xf0, 0xf8, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd4, 0xc9, 0xc8, 0xc7, - 0xc6, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfd, 0xf6, 0xf4, 0xf9, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x6f, 0x54, 0x54, 0x54, 0x55, 0x63, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0x87, 0x28, 0x27, + 0x28, 0x27, 0x86, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x1a, 0x0b, 0x0a, 0x0a, 0x0a, 0x27, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xda, 0xc8, 0xc6, 0xca, - 0xc9, 0xc9, 0xc9, 0xcf, 0xeb, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x88, 0x55, 0x55, 0x55, 0x55, 0x54, 0x63, 0xe4, 0xff, 0xff, 0xff, 0xff, 0x86, 0x28, 0x27, + 0x28, 0x27, 0x87, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x1a, 0x0a, 0x0b, 0x0a, 0x0b, 0x0b, 0x49, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe7, 0xca, 0xc2, 0xcb, - 0xcb, 0xc6, 0xc9, 0xd9, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xe8, 0x63, 0x55, 0x55, 0x54, 0x55, 0x54, 0x63, 0xe4, 0xff, 0xff, 0xff, 0x8c, 0x28, 0x27, + 0x28, 0x28, 0x8c, 0xff, 0xff, 0xff, 0xd3, 0x1a, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x1a, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xe3, 0x63, 0x54, 0x54, 0x54, 0x55, 0x55, 0x63, 0xe4, 0xff, 0xff, 0xc4, 0x27, 0x28, + 0x28, 0x27, 0xc4, 0xff, 0xff, 0xd3, 0x1a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0b, 0x1b, 0xd2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf5, 0xdc, 0xc8, 0xc4, - 0xc4, 0xc5, 0xd3, 0xec, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xe4, 0x63, 0x54, 0x54, 0x55, 0x54, 0x54, 0x76, 0xff, 0xff, 0xff, 0x96, 0x38, + 0x37, 0x96, 0xff, 0xff, 0xff, 0x31, 0x0b, 0x0a, 0x0b, 0x0a, 0x0b, 0x1b, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf4, 0xe0, 0xd2, - 0xd2, 0xd9, 0xe9, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xe4, 0x63, 0x54, 0x54, 0x54, 0x54, 0x54, 0xf8, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf6, 0x0a, 0x0b, 0x0b, 0x0b, 0x0b, 0x1b, 0xd2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0x63, 0x55, 0x55, 0x55, 0x6c, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x23, 0x0b, 0x0a, 0x0b, 0x1b, 0xd2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf7, - 0xf7, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0x76, 0x55, 0x6c, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x23, 0x0a, 0x31, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xbe, 0xbb, + 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xbf, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xbd, 0xb9, 0xb9, 0xb9, 0xb8, 0xb9, + 0xb9, 0xbd, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc5, 0x7f, 0x7f, 0x7e, + 0x7e, 0x7f, 0x7e, 0x7e, 0x7f, 0x7f, 0x7e, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc2, 0x7b, 0x7b, 0x7b, 0x7c, 0x7b, 0x7b, 0x7b, + 0x7c, 0x7b, 0x7b, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x89, 0x7f, 0x7f, 0x7e, + 0x7e, 0x7e, 0x7e, 0x7f, 0x7e, 0x7f, 0x7e, 0x89, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x86, 0x7b, 0x7b, 0x7c, 0x7b, 0x7b, 0x7b, 0x7c, + 0x7c, 0x7b, 0x7b, 0x86, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8a, 0x7e, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7e, 0x7e, 0x7e, 0x7e, 0x7f, 0x89, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x86, 0x7b, 0x7c, 0x7b, 0x7b, 0x7c, 0x7b, 0x7b, + 0x7b, 0x7b, 0x7b, 0x86, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc5, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7e, 0x7f, 0x7e, 0x7f, 0x7f, 0x7f, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x7b, 0x7b, 0x7b, 0x7c, 0x7c, 0x7c, 0x7b, + 0x7c, 0x7b, 0x7c, 0xc2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xbf, 0xbb, + 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xbe, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xbc, 0xb8, 0xb9, 0xb8, 0xb9, 0xb9, + 0xb8, 0xbc, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xaf, 0x9a, 0xa8, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xcd, 0xc5, 0xd1, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xa3, 0x9a, 0x9a, 0x9b, 0xa8, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xce, 0xc5, 0xc5, 0xc5, 0xc9, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xef, 0xa3, 0x9a, 0x9b, 0x9a, 0x9b, 0x9a, 0xfb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xc5, 0xc5, 0xc5, 0xc5, 0xc4, 0xca, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf0, 0xa3, 0x9a, 0x9b, 0x9a, 0x9a, 0x9a, 0xaf, 0xff, 0xff, 0xff, 0xdd, 0xbb, + 0xbb, 0xdd, 0xff, 0xff, 0xff, 0xd1, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xc9, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xef, 0xa4, 0x9b, 0x9b, 0x9a, 0x9a, 0x9a, 0xa4, 0xef, 0xff, 0xff, 0xed, 0xb5, 0xb5, + 0xb5, 0xb6, 0xed, 0xff, 0xff, 0xf6, 0xca, 0xc5, 0xc5, 0xc4, 0xc5, 0xc5, 0xc9, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf2, 0xa4, 0x9a, 0x9b, 0x9a, 0x9a, 0x9a, 0xa4, 0xf0, 0xff, 0xff, 0xff, 0xdb, 0xb5, 0xb6, + 0xb5, 0xb5, 0xdb, 0xff, 0xff, 0xff, 0xf5, 0xca, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xca, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xba, 0x9a, 0x9a, 0x9b, 0x9a, 0x9b, 0xa3, 0xef, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xb5, 0xb5, + 0xb5, 0xb5, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xca, 0xc5, 0xc5, 0xc5, 0xc5, 0xc5, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xaa, 0x9a, 0x9b, 0x9a, 0x9a, 0xa3, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xb5, 0xb5, + 0xb5, 0xb5, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xca, 0xc5, 0xc5, 0xc5, 0xc5, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xc6, 0x9a, 0x9a, 0x9a, 0xa4, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xb5, 0xb5, + 0xb5, 0xb5, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xc9, 0xc5, 0xc4, 0xc4, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xc6, 0xaa, 0xba, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xb5, 0xb5, + 0xb5, 0xb5, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xd7, 0xcf, 0xdd, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xb6, 0xb5, + 0xb5, 0xb5, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xb5, 0xb5, + 0xb5, 0xb5, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xb5, 0xb5, + 0xb6, 0xb5, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xed, 0xb5, 0xb5, + 0xb5, 0xb5, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xbb, + 0xbb, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfb, - 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf2, - 0xf1, 0xf6, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xea, 0xc4, 0xa3, - 0x9e, 0xb2, 0xd7, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xed, 0xb8, 0x8d, 0x80, - 0x7d, 0x83, 0xa6, 0xd9, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xcc, 0x8c, 0x79, 0x8b, - 0x8b, 0x7f, 0x84, 0xa9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xaf, 0x88, 0x81, 0x87, - 0x87, 0x86, 0x7f, 0x8b, 0xcf, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xe8, 0xf3, 0xfe, - 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa5, 0x8c, 0x87, 0x81, - 0x81, 0x89, 0x81, 0x82, 0xc7, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xfb, 0xe6, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xd7, 0xbb, 0xb5, 0xc6, 0xe7, - 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xde, 0xa5, 0x88, 0x84, 0x85, - 0x85, 0x89, 0x7d, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xee, 0xcb, 0xae, 0xac, 0xc6, 0xe4, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xec, 0xc0, 0x9d, 0x92, 0x92, 0x94, 0xa9, - 0xd5, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xde, 0xa4, 0x86, 0x84, 0x85, - 0x84, 0x88, 0x7d, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xed, - 0xb3, 0x83, 0x7f, 0x87, 0x83, 0x94, 0xd3, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd0, 0x93, 0x85, 0x8b, 0x8c, 0x88, 0x86, - 0xaa, 0xe7, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x83, 0x8a, 0x80, 0x7f, 0xc5, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf4, 0xb8, - 0x81, 0x71, 0x76, 0x7c, 0x6e, 0x6e, 0xae, 0xf3, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfb, 0xbc, 0x85, 0x8c, 0x95, 0x8f, 0x91, 0x8a, - 0x92, 0xbe, 0xee, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x83, 0x8a, 0x80, 0x7f, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xd6, 0x88, - 0x72, 0x83, 0x7e, 0x7c, 0x7c, 0x79, 0x9b, 0xd5, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf4, 0xb8, 0x87, 0x8c, 0x94, 0x8d, 0x8d, 0x91, - 0x8b, 0x96, 0xd2, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc6, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe4, 0xaa, 0x80, - 0x7a, 0x7f, 0x7b, 0x7c, 0x7c, 0x7a, 0x97, 0xd0, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xc2, 0x89, 0x89, 0x93, 0x8c, 0x8b, 0x93, - 0x8b, 0x87, 0xb6, 0xf0, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc4, 0x83, 0x7e, - 0x82, 0x77, 0x78, 0x7e, 0x74, 0x70, 0xa1, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xd3, 0x98, 0x8e, 0x92, 0x8e, 0x8f, 0x90, - 0x8d, 0x8e, 0x9b, 0xc5, 0xf7, 0xff, 0xfe, 0xff, 0xfd, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xfe, 0xe6, 0xa5, 0x75, 0x78, - 0x81, 0x79, 0x78, 0x82, 0x77, 0x74, 0xb5, 0xfb, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xed, 0xbf, 0x97, 0x8b, 0x90, 0x90, 0x8b, - 0x8f, 0x91, 0x85, 0xa1, 0xe7, 0xff, 0xfd, 0xfe, 0xfe, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xff, 0xf4, 0xb3, 0x85, 0x7d, 0x7b, - 0x7a, 0x7b, 0x7a, 0x7d, 0x7c, 0x90, 0xd1, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xe3, 0xa1, 0x86, 0x92, 0x91, 0x8c, - 0x91, 0x90, 0x84, 0x92, 0xc6, 0xf2, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xfa, 0xff, 0xfd, 0xff, 0xdd, 0x92, 0x76, 0x82, 0x7e, - 0x76, 0x7d, 0x7e, 0x71, 0x81, 0xbe, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbc, 0x94, 0x8e, 0x8f, 0x8f, - 0x8f, 0x8e, 0x8f, 0x8e, 0x9e, 0xd6, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc4, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xe6, 0xb0, 0x84, 0x78, 0x7d, 0x7c, - 0x78, 0x7d, 0x7e, 0x72, 0x95, 0xe6, 0xff, 0xfc, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe8, 0xb1, 0x89, 0x8c, 0x93, - 0x8d, 0x8c, 0x92, 0x89, 0x87, 0xbc, 0xf8, 0xff, 0xf8, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, - 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xc6, 0x83, 0x7b, 0x81, 0x79, 0x7a, - 0x7d, 0x7a, 0x79, 0x8d, 0xc3, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfb, 0xfd, 0xfe, - 0xfc, 0xfa, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xcc, 0x93, 0x8d, 0x93, - 0x8d, 0x8e, 0x90, 0x8b, 0x88, 0x9f, 0xd1, 0xfa, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0x87, 0x84, 0x84, - 0x84, 0x89, 0x7d, 0x7d, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xea, 0xa6, 0x76, 0x7a, 0x82, 0x78, 0x79, - 0x81, 0x77, 0x75, 0xaf, 0xf1, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xfa, - 0xfe, 0xfe, 0xfb, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xb5, 0x92, 0x8b, - 0x90, 0x90, 0x8d, 0x91, 0x91, 0x88, 0xaa, 0xee, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0x8a, 0x86, 0x84, - 0x82, 0x89, 0x7f, 0x7d, 0xc6, 0xff, 0xff, 0xff, 0xf6, 0xba, 0x88, 0x7e, 0x7d, 0x7c, 0x7a, 0x7a, - 0x7e, 0x79, 0x88, 0xcd, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf6, 0xee, - 0xf5, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xde, 0x9e, 0x86, - 0x92, 0x92, 0x8c, 0x91, 0x91, 0x85, 0x96, 0xcd, 0xf4, 0xff, 0xff, 0xe2, 0xac, 0x8d, 0x85, 0x83, - 0x83, 0x88, 0x81, 0x85, 0xcb, 0xff, 0xff, 0xfe, 0xe1, 0x95, 0x76, 0x82, 0x7e, 0x78, 0x7e, 0x7d, - 0x75, 0x82, 0xb5, 0xeb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xfb, 0xfc, 0xff, 0xff, 0xf9, - 0xed, 0xeb, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xea, 0xd1, 0xbf, 0xba, - 0xc1, 0xd0, 0xe4, 0xf6, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf4, 0xb9, 0x94, - 0x91, 0x8e, 0x8c, 0x8f, 0x8f, 0x8c, 0x8f, 0xa9, 0xdf, 0xff, 0xff, 0xee, 0xc0, 0x89, 0x7a, 0x8a, - 0x8b, 0x82, 0x80, 0x98, 0xd8, 0xff, 0xff, 0xef, 0xc1, 0x85, 0x74, 0x81, 0x7f, 0x76, 0x7d, 0x7f, - 0x72, 0x90, 0xdf, 0xff, 0xfa, 0xfd, 0xff, 0xfe, 0xfb, 0xfb, 0xfe, 0xff, 0xfc, 0xeb, 0xcd, 0xb4, - 0xa8, 0xa7, 0xb3, 0xd5, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf3, 0xc0, 0x99, 0x93, 0x94, - 0x90, 0x95, 0xb1, 0xd8, 0xf2, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfd, 0xe2, 0xae, - 0x8c, 0x8e, 0x92, 0x8c, 0x8b, 0x91, 0x8b, 0x93, 0xd2, 0xff, 0xff, 0xfb, 0xe0, 0x9f, 0x7e, 0x86, - 0x85, 0x7d, 0x91, 0xc1, 0xec, 0xff, 0xff, 0xde, 0xa4, 0x83, 0x7c, 0x7c, 0x7a, 0x7a, 0x7c, 0x7a, - 0x83, 0xb4, 0xf4, 0xff, 0xfc, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xf7, 0xde, 0xae, 0x7f, 0x6b, - 0x70, 0x70, 0x6a, 0x91, 0xdb, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xda, 0xa5, 0x91, 0x99, 0x98, - 0x92, 0x95, 0x9a, 0xa0, 0xb4, 0xd9, 0xf6, 0xff, 0xff, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xff, 0xc8, - 0x91, 0x90, 0x95, 0x8b, 0x8b, 0x92, 0x89, 0x8b, 0xcc, 0xff, 0xff, 0xfc, 0xfa, 0xd8, 0xa9, 0x8d, - 0x86, 0x94, 0xbf, 0xee, 0xfd, 0xff, 0xff, 0xda, 0x9b, 0x81, 0x7d, 0x78, 0x77, 0x7f, 0x78, 0x71, - 0xa4, 0xe8, 0xff, 0xfe, 0xff, 0xfd, 0xfd, 0xfc, 0xfe, 0xfe, 0xe3, 0xaf, 0x8a, 0x7d, 0x74, 0x6f, - 0x72, 0x73, 0x6e, 0x7b, 0xac, 0xe7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf1, 0xc8, 0xa2, 0x97, 0x9e, 0x9e, - 0x9d, 0x9e, 0x99, 0x8f, 0x95, 0xad, 0xc6, 0xdf, 0xf9, 0xff, 0xfe, 0xfd, 0xfb, 0xff, 0xff, 0xe6, - 0xb2, 0x8e, 0x8b, 0x94, 0x94, 0x93, 0x8a, 0x91, 0xd0, 0xff, 0xff, 0xfa, 0xff, 0xfa, 0xe2, 0xcb, - 0xc7, 0xd5, 0xed, 0xfd, 0xfe, 0xff, 0xff, 0xe2, 0xa6, 0x7e, 0x79, 0x81, 0x81, 0x7d, 0x75, 0x82, - 0xc8, 0xff, 0xff, 0xf7, 0xfb, 0xfe, 0xff, 0xfb, 0xe2, 0xc2, 0x9f, 0x7c, 0x6c, 0x71, 0x7a, 0x7d, - 0x7b, 0x79, 0x7a, 0x7b, 0x93, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf1, 0xc9, 0xa1, 0x95, 0x9c, 0x9c, - 0x98, 0x97, 0x99, 0x9b, 0x99, 0x95, 0x94, 0xa9, 0xd0, 0xee, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, - 0xdf, 0xa6, 0x8a, 0x8e, 0x8c, 0x89, 0x94, 0xb4, 0xe5, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf4, 0xc6, 0x8c, 0x73, 0x79, 0x79, 0x73, 0x86, 0xb6, - 0xeb, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xf1, 0xd6, 0xa6, 0x7b, 0x6e, 0x76, 0x7a, 0x77, 0x74, 0x73, - 0x74, 0x76, 0x74, 0x76, 0x93, 0xd7, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xd9, 0xa2, 0x93, 0xa1, 0x9d, - 0x98, 0x9b, 0x9a, 0x9a, 0x99, 0x97, 0x95, 0x97, 0x9f, 0xb0, 0xd3, 0xf7, 0xff, 0xfd, 0xfc, 0xff, - 0xfa, 0xda, 0xae, 0x91, 0x8a, 0x91, 0xb4, 0xe8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xee, 0xb7, 0x88, 0x79, 0x78, 0x88, 0xb8, 0xee, - 0xff, 0xfc, 0xfd, 0xff, 0xfa, 0xd4, 0xa4, 0x85, 0x7a, 0x73, 0x71, 0x75, 0x79, 0x78, 0x75, 0x72, - 0x76, 0x79, 0x71, 0x78, 0xa7, 0xe4, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xef, 0xbb, 0x98, 0x95, 0x9c, - 0xa1, 0x9f, 0x9a, 0x98, 0x98, 0x9b, 0x9e, 0x99, 0x90, 0x94, 0xa8, 0xc0, 0xdb, 0xf2, 0xfe, 0xff, - 0xff, 0xfa, 0xe4, 0xcc, 0xc7, 0xcf, 0xe3, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, - 0xf9, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe8, 0xcf, 0xbf, 0xbf, 0xcf, 0xe9, 0xfc, - 0xff, 0xff, 0xfb, 0xe0, 0xb8, 0x97, 0x79, 0x6a, 0x71, 0x7c, 0x7b, 0x75, 0x72, 0x73, 0x78, 0x7b, - 0x7a, 0x71, 0x6c, 0x91, 0xd4, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe4, 0xbc, 0xa0, 0x9b, - 0x9b, 0x9a, 0x9c, 0x9e, 0x9d, 0x9a, 0x98, 0x98, 0x9a, 0x9c, 0x96, 0x91, 0xa3, 0xce, 0xf3, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, - 0xff, 0xf8, 0xd6, 0x9c, 0x74, 0x70, 0x79, 0x7a, 0x76, 0x74, 0x74, 0x76, 0x78, 0x77, 0x75, 0x72, - 0x72, 0x7a, 0x93, 0xc6, 0xf8, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf3, 0xd6, 0xaf, - 0x99, 0x99, 0x9d, 0x9e, 0x9d, 0x9a, 0x99, 0x99, 0x9c, 0x9b, 0x98, 0x95, 0x94, 0xa3, 0xc6, 0xee, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xf9, 0xd2, 0x92, 0x72, 0x74, 0x75, 0x76, 0x79, 0x77, 0x74, 0x73, 0x75, 0x78, 0x78, 0x73, 0x6e, - 0x80, 0xae, 0xe0, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, - 0xc7, 0xb1, 0x9b, 0x93, 0x9a, 0x9f, 0x9f, 0x9b, 0x97, 0x96, 0x9a, 0x9d, 0x9a, 0x91, 0x9d, 0xd5, - 0xff, 0xff, 0xf9, 0xfa, 0xfa, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfa, 0xf9, 0xf8, 0xf8, 0xff, 0xff, - 0xe2, 0xa1, 0x71, 0x6e, 0x7d, 0x79, 0x72, 0x71, 0x73, 0x78, 0x7c, 0x78, 0x6b, 0x6d, 0x87, 0xa7, - 0xc8, 0xe9, 0xfc, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xf8, 0xdd, 0xba, 0xa5, 0x9f, 0x9a, 0x99, 0x9d, 0x9c, 0x9c, 0x99, 0x97, 0x9f, 0x95, 0x8f, 0xc5, - 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, - 0xc8, 0x81, 0x77, 0x7b, 0x74, 0x74, 0x77, 0x79, 0x78, 0x73, 0x71, 0x76, 0x7b, 0x90, 0xbd, 0xec, - 0xff, 0xff, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, - 0xfe, 0xfc, 0xf5, 0xdf, 0xba, 0x9c, 0x96, 0x9d, 0x9e, 0x9e, 0x9b, 0x99, 0x9f, 0x97, 0x91, 0xc2, - 0xfb, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, - 0xc5, 0x7b, 0x75, 0x7c, 0x73, 0x75, 0x78, 0x7a, 0x79, 0x6f, 0x6e, 0x8a, 0xbd, 0xe6, 0xf7, 0xfd, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xfb, - 0xfc, 0xff, 0xff, 0xf9, 0xe6, 0xcd, 0xb4, 0xa1, 0x95, 0x95, 0x9b, 0x9d, 0x9e, 0x94, 0x98, 0xcd, - 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xfa, 0xff, 0xff, - 0xd4, 0x8c, 0x6f, 0x72, 0x78, 0x77, 0x70, 0x6a, 0x6f, 0x8b, 0xaf, 0xce, 0xee, 0xff, 0xff, 0xfa, - 0xf9, 0xfd, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xff, 0xe8, 0xc3, 0xaa, 0xa1, 0x9e, 0x9b, 0x9c, 0xa3, 0xb9, 0xe5, - 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xed, 0xb9, 0x86, 0x72, 0x76, 0x76, 0x77, 0x7f, 0x96, 0xca, 0xf7, 0xff, 0xfd, 0xfd, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe5, 0xcc, 0xb5, 0xa8, 0xad, 0xc9, 0xec, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xee, 0xbe, 0x96, 0x89, 0x8d, 0xa4, 0xc9, 0xeb, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfb, 0xfb, 0xfb, - 0xfb, 0xfc, 0xfd, 0xfc, 0xf9, 0xf8, 0xfb, 0xff, 0xfe, 0xf1, 0xe1, 0xda, 0xdf, 0xee, 0xfd, 0xff, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfd, 0xee, 0xd9, 0xcc, 0xd0, 0xe3, 0xf6, 0xfe, 0xfe, 0xfb, 0xfa, 0xfc, 0xff, 0xff, 0xfe, - 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe9, 0xe0, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, - 0xdf, 0xdf, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xdf, 0xe3, 0xe9, 0xf4, 0xfe, 0xff, 0xfd, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfb, 0xfa, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, - 0xf4, 0xf5, 0xf4, 0xf4, 0xf5, 0xf5, 0xf7, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xd2, 0xbe, 0xb3, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, - 0xaf, 0xaf, 0xad, 0xac, 0xac, 0xac, 0xaa, 0xac, 0xb4, 0xbe, 0xd7, 0xf6, 0xff, 0xfe, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf2, 0xea, 0xe7, 0xe7, 0xe7, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, - 0xe6, 0xe5, 0xe6, 0xe6, 0xe5, 0xe6, 0xe9, 0xee, 0xf7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xc9, 0xa8, 0xa5, 0xa7, 0xa3, 0xa4, 0xa4, 0xa3, 0xa3, - 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa1, 0xa3, 0xa5, 0xa4, 0xaf, 0xd0, 0xf2, 0xff, 0xff, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xf8, 0xea, 0xe3, 0xe4, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, - 0xe3, 0xe2, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe3, 0xe9, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd7, 0xad, 0x9c, 0xa2, 0xa8, 0xa7, 0xa7, 0xa7, 0xa6, 0xa6, - 0xa6, 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa5, 0xa2, 0x9f, 0xae, 0xde, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfa, 0xec, 0xe1, 0xde, 0xe2, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, - 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe1, 0xe0, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbf, 0xa5, 0xa9, 0xa6, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, - 0xa5, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa3, 0xa3, 0xab, 0xa3, 0x9f, 0xd2, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf5, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, - 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe1, 0xe3, 0xe4, 0xdf, 0xe6, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbf, 0xa5, 0xaa, 0xa8, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, - 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa4, 0xa4, 0xac, 0xa3, 0x9f, 0xd2, 0xff, 0xff, 0xfa, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xff, 0xff, 0xf5, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, - 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe1, 0xe3, 0xe4, 0xdf, 0xe5, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd3, 0xac, 0x9e, 0xa3, 0xa8, 0xa8, 0xa8, 0xaa, 0xa9, 0xa7, - 0xa7, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xa7, 0xa6, 0xa1, 0xad, 0xdd, 0xff, 0xff, 0xfc, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf9, 0xeb, 0xe0, 0xdf, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, - 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe0, 0xea, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xc7, 0xa8, 0xa6, 0xa8, 0xa5, 0xa6, 0xa8, 0xa7, 0xa6, - 0xa7, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa7, 0xa8, 0xa9, 0xa8, 0xb1, 0xd0, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xe7, 0xe1, 0xe2, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, - 0xe1, 0xe2, 0xe2, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe8, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xec, 0xd0, 0xbd, 0xb3, 0xb0, 0xb0, 0xb0, 0xb0, 0xb2, - 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb6, 0xbe, 0xd6, 0xf4, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xec, 0xe6, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, - 0xe3, 0xe4, 0xe3, 0xe3, 0xe4, 0xe4, 0xe5, 0xeb, 0xf5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xe7, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, - 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xe1, 0xe7, 0xf2, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf8, 0xf4, 0xf3, 0xf3, 0xf3, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, - 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, - 0xfb, 0xfc, 0xfd, 0xfd, 0xfa, 0xf9, 0xfc, 0xff, 0xfe, 0xf6, 0xeb, 0xe5, 0xe9, 0xf4, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfc, 0xf8, 0xf5, 0xf6, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, - 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xea, 0xd9, 0xc6, 0xba, 0xbf, 0xd8, 0xf3, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfb, 0xf1, 0xe7, 0xe4, 0xe6, 0xec, 0xf5, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xff, 0xef, 0xcf, 0xba, 0xb3, 0xb0, 0xad, 0xae, 0xb7, 0xca, 0xec, - 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfb, 0xee, 0xe1, 0xdc, 0xdc, 0xdd, 0xde, 0xe2, 0xe9, 0xf4, 0xfd, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfc, - 0xfc, 0xff, 0xff, 0xfc, 0xef, 0xdb, 0xc5, 0xb3, 0xa7, 0xa8, 0xae, 0xb1, 0xb1, 0xab, 0xaf, 0xda, - 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, - 0xf4, 0xe1, 0xd9, 0xda, 0xdc, 0xdb, 0xda, 0xda, 0xdd, 0xe4, 0xed, 0xf4, 0xfb, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, - 0xff, 0xfe, 0xf9, 0xe8, 0xca, 0xb0, 0xa9, 0xae, 0xb0, 0xb2, 0xb1, 0xaf, 0xb4, 0xaf, 0xad, 0xd2, - 0xfc, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xef, 0xdc, 0xdb, 0xdd, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xe3, 0xf0, 0xfa, 0xfe, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, - 0xfc, 0xe9, 0xcc, 0xb8, 0xb1, 0xad, 0xad, 0xb0, 0xb2, 0xb3, 0xb0, 0xaf, 0xb5, 0xb0, 0xae, 0xd4, - 0xfe, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, - 0xf0, 0xdc, 0xd9, 0xdc, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdb, 0xdb, 0xdd, 0xdf, 0xe5, 0xf1, 0xfb, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xeb, - 0xd7, 0xc3, 0xb0, 0xa8, 0xad, 0xb3, 0xb3, 0xb0, 0xaf, 0xaf, 0xb1, 0xb4, 0xb5, 0xaf, 0xb6, 0xde, - 0xff, 0xff, 0xfb, 0xfd, 0xfc, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, 0xfd, 0xfd, 0xff, 0xff, - 0xf7, 0xe4, 0xd7, 0xd7, 0xdc, 0xdb, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xdd, 0xda, 0xdb, 0xe2, 0xea, - 0xf3, 0xfc, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf7, 0xe2, 0xc3, - 0xad, 0xab, 0xb1, 0xb3, 0xb0, 0xb0, 0xb1, 0xb1, 0xb2, 0xb2, 0xb3, 0xb2, 0xb3, 0xbb, 0xd0, 0xef, - 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xfe, 0xf2, 0xe0, 0xd7, 0xd8, 0xd9, 0xda, 0xdc, 0xdb, 0xdb, 0xdb, 0xdc, 0xdd, 0xdd, 0xdb, 0xdb, - 0xe1, 0xed, 0xf8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xce, 0xb4, 0xae, - 0xad, 0xad, 0xb1, 0xb3, 0xb2, 0xb0, 0xb0, 0xb1, 0xb2, 0xb5, 0xb3, 0xb1, 0xbb, 0xd7, 0xf3, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xff, 0xfd, 0xf2, 0xe2, 0xd7, 0xd7, 0xdb, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdd, 0xdb, 0xda, - 0xdb, 0xde, 0xe4, 0xf0, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf3, 0xcd, 0xaf, 0xa9, 0xae, - 0xb4, 0xb3, 0xb0, 0xae, 0xaf, 0xb2, 0xb5, 0xb4, 0xaf, 0xb0, 0xbd, 0xce, 0xe1, 0xf2, 0xfe, 0xff, - 0xff, 0xfd, 0xf1, 0xe2, 0xe0, 0xe6, 0xf1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf9, 0xf1, 0xec, 0xec, 0xf2, 0xfa, 0xff, - 0xff, 0xff, 0xfd, 0xf5, 0xe9, 0xe1, 0xda, 0xd7, 0xd9, 0xdb, 0xdb, 0xda, 0xda, 0xda, 0xdc, 0xdd, - 0xdd, 0xdb, 0xd9, 0xe3, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xe1, 0xb5, 0xa9, 0xb4, 0xb3, - 0xae, 0xb1, 0xb2, 0xb2, 0xb2, 0xb1, 0xb1, 0xb4, 0xb9, 0xc4, 0xdb, 0xf5, 0xff, 0xfe, 0xfd, 0xff, - 0xfc, 0xea, 0xd0, 0xbe, 0xb9, 0xc0, 0xd6, 0xf4, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfa, 0xe8, 0xd9, 0xd3, 0xd3, 0xda, 0xeb, 0xfa, - 0xff, 0xfe, 0xfe, 0xff, 0xfd, 0xf2, 0xe5, 0xde, 0xda, 0xd8, 0xd9, 0xdb, 0xdc, 0xdb, 0xdb, 0xdb, - 0xdb, 0xdc, 0xdb, 0xde, 0xea, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xd5, 0xb5, 0xac, 0xb2, 0xb1, - 0xae, 0xb0, 0xb2, 0xb3, 0xb4, 0xb3, 0xb2, 0xbf, 0xda, 0xf1, 0xfb, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, - 0xeb, 0xc9, 0xb9, 0xbb, 0xba, 0xba, 0xc4, 0xd8, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfc, 0xed, 0xd7, 0xce, 0xd1, 0xd2, 0xd2, 0xd9, 0xe8, - 0xf9, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfa, 0xf2, 0xe5, 0xda, 0xd7, 0xdb, 0xdd, 0xdc, 0xdb, 0xdb, - 0xdb, 0xdb, 0xdb, 0xdd, 0xe4, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf3, 0xd3, 0xb6, 0xaf, 0xb4, 0xb3, - 0xb3, 0xb5, 0xb3, 0xad, 0xb0, 0xc1, 0xd3, 0xe5, 0xf8, 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xff, 0xef, - 0xcf, 0xb9, 0xb7, 0xbd, 0xbf, 0xc0, 0xbd, 0xc4, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xfe, 0xf4, 0xea, - 0xea, 0xf0, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xe0, 0xd1, 0xce, 0xd3, 0xd6, 0xd4, 0xd1, 0xd8, - 0xee, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xf6, 0xed, 0xe3, 0xda, 0xd7, 0xd9, 0xdc, 0xdc, - 0xdc, 0xdc, 0xdc, 0xdc, 0xe3, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe1, 0xb9, 0xaa, 0xb1, 0xb2, - 0xae, 0xb0, 0xb3, 0xb8, 0xc6, 0xdf, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, - 0xba, 0xba, 0xbe, 0xba, 0xbb, 0xc1, 0xbd, 0xbf, 0xe3, 0xff, 0xff, 0xfd, 0xfd, 0xef, 0xd9, 0xcd, - 0xcd, 0xd5, 0xe7, 0xfa, 0xff, 0xff, 0xff, 0xf1, 0xda, 0xd2, 0xd2, 0xd1, 0xd2, 0xd5, 0xd3, 0xd2, - 0xe3, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf5, 0xe6, 0xdd, 0xda, 0xd8, 0xd8, - 0xdb, 0xdb, 0xd8, 0xdc, 0xe9, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf5, 0xd0, 0xb1, 0xab, 0xaf, - 0xad, 0xb0, 0xc3, 0xe0, 0xf5, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xed, 0xcd, - 0xb8, 0xba, 0xbe, 0xbb, 0xbd, 0xc2, 0xbe, 0xc2, 0xe5, 0xff, 0xff, 0xfd, 0xf1, 0xd3, 0xc4, 0xc8, - 0xc8, 0xc7, 0xd2, 0xe7, 0xf8, 0xff, 0xff, 0xf2, 0xdd, 0xd3, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd3, - 0xda, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xe6, 0xda, 0xd7, - 0xda, 0xda, 0xd8, 0xe2, 0xf6, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xef, 0xdc, 0xcd, 0xc7, - 0xcb, 0xd7, 0xe7, 0xf8, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf8, 0xd3, 0xbd, - 0xbc, 0xbb, 0xbb, 0xbe, 0xbf, 0xbf, 0xc1, 0xcf, 0xed, 0xff, 0xff, 0xf8, 0xe2, 0xc6, 0xc1, 0xcb, - 0xca, 0xc7, 0xca, 0xd5, 0xef, 0xff, 0xff, 0xf8, 0xe8, 0xd2, 0xcd, 0xd2, 0xd2, 0xcf, 0xd2, 0xd4, - 0xd3, 0xde, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf8, 0xef, 0xe8, - 0xe5, 0xe6, 0xeb, 0xf4, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xec, - 0xf3, 0xff, 0xff, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xea, 0xc2, 0xb4, - 0xbd, 0xbf, 0xbb, 0xbe, 0xc0, 0xbb, 0xc5, 0xe4, 0xf9, 0xff, 0xff, 0xf1, 0xd7, 0xc7, 0xc6, 0xc8, - 0xc8, 0xca, 0xca, 0xce, 0xeb, 0xff, 0xff, 0xff, 0xf4, 0xd7, 0xcc, 0xd3, 0xd2, 0xcf, 0xd2, 0xd4, - 0xd2, 0xd7, 0xea, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfb, - 0xf6, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xd4, 0xbd, 0xb9, - 0xbd, 0xbd, 0xbd, 0xc0, 0xc0, 0xbc, 0xcf, 0xf5, 0xff, 0xff, 0xff, 0xf0, 0xd3, 0xc7, 0xc6, 0xc7, - 0xc7, 0xcb, 0xc9, 0xca, 0xe8, 0xff, 0xff, 0xff, 0xfc, 0xe4, 0xd3, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, - 0xd5, 0xd3, 0xd8, 0xef, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, - 0xfe, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe2, 0xbf, 0xba, 0xbf, - 0xbc, 0xbc, 0xbf, 0xbf, 0xbe, 0xc7, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xf0, 0xd2, 0xc5, 0xc5, 0xc7, - 0xc8, 0xc9, 0xc7, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xf6, 0xdf, 0xcf, 0xd0, 0xd3, 0xd0, 0xd1, - 0xd5, 0xd3, 0xd1, 0xe4, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, - 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf4, 0xd1, 0xb6, 0xb8, 0xbf, - 0xbc, 0xbc, 0xc0, 0xbd, 0xbd, 0xd7, 0xf8, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc7, - 0xc7, 0xca, 0xc8, 0xca, 0xe7, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xea, 0xd2, 0xd0, 0xd3, 0xd0, 0xd0, - 0xd2, 0xd3, 0xd3, 0xda, 0xec, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd9, 0xc0, 0xba, 0xbb, 0xbc, - 0xbd, 0xbe, 0xc0, 0xbf, 0xc8, 0xe8, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xee, 0xd3, 0xc6, 0xc6, 0xc8, - 0xc8, 0xca, 0xc8, 0xca, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xf5, 0xe1, 0xd3, 0xd0, 0xd1, 0xd1, - 0xd1, 0xd4, 0xd5, 0xd1, 0xdc, 0xf8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf0, 0xc7, 0xb6, 0xbe, 0xbe, 0xbb, - 0xbe, 0xc0, 0xbb, 0xc2, 0xdd, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc8, - 0xc8, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xf1, 0xd6, 0xcd, 0xd2, 0xd2, - 0xd0, 0xd3, 0xd4, 0xd0, 0xd7, 0xed, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf5, 0xd9, 0xbf, 0xb8, 0xbd, 0xbe, 0xbb, - 0xbe, 0xc0, 0xbb, 0xcb, 0xf1, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc8, - 0xc8, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xff, 0xfa, 0xe0, 0xd1, 0xd2, 0xd1, - 0xd0, 0xd1, 0xd3, 0xd4, 0xd5, 0xdd, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc1, 0xba, 0xbe, 0xbc, 0xbc, 0xbe, - 0xbf, 0xbe, 0xc6, 0xe0, 0xfb, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfe, 0xf2, 0xdd, 0xcf, 0xd0, - 0xd2, 0xd0, 0xd0, 0xd4, 0xd2, 0xd3, 0xe8, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xda, 0xb6, 0xb7, 0xbf, 0xbc, 0xbb, 0xc0, - 0xbd, 0xbb, 0xd4, 0xf6, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xca, 0xc7, 0xc9, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe8, 0xd1, 0xd1, - 0xd3, 0xcf, 0xcf, 0xd3, 0xd1, 0xd0, 0xe0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf8, 0xd4, 0xb7, 0xba, 0xc0, 0xbc, 0xbd, 0xc1, - 0xbe, 0xc2, 0xe3, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xde, 0xd1, - 0xd0, 0xd2, 0xd1, 0xd2, 0xd3, 0xd4, 0xde, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xd7, 0xb8, 0xbc, 0xc0, 0xbd, 0xc0, 0xbe, - 0xc2, 0xd8, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xef, 0xd3, - 0xcd, 0xd3, 0xd2, 0xd1, 0xd2, 0xd3, 0xdf, 0xf2, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xbb, 0xb7, 0xbd, 0xbd, 0xbc, 0xba, - 0xcc, 0xef, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfa, 0xe3, - 0xd1, 0xcd, 0xcf, 0xd1, 0xce, 0xcf, 0xe5, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf0, 0xd3, 0xc3, 0xc0, 0xc1, 0xc2, 0xcb, - 0xe3, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, - 0xe3, 0xd2, 0xd2, 0xd4, 0xd3, 0xdb, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe4, 0xd5, 0xd3, 0xde, 0xef, - 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, - 0xc7, 0xca, 0xc6, 0xc8, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xf7, 0xeb, 0xe3, 0xe1, 0xe8, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf1, 0xf0, 0xf8, 0xfe, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd4, 0xc9, 0xc8, 0xc7, - 0xc6, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xfd, 0xf6, 0xf4, 0xf9, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xda, 0xc8, 0xc6, 0xca, - 0xc9, 0xc9, 0xc9, 0xcf, 0xeb, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe7, 0xca, 0xc2, 0xcb, - 0xcb, 0xc6, 0xc9, 0xd9, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf5, 0xdc, 0xc8, 0xc4, - 0xc4, 0xc5, 0xd3, 0xec, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf4, 0xe0, 0xd2, - 0xd2, 0xd9, 0xe9, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf7, - 0xf7, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, - 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9e, 0x43, + 0x44, 0x9e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x34, 0x34, + 0x34, 0x34, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x93, 0x34, 0x34, + 0x34, 0x34, 0x93, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x34, 0x34, + 0x34, 0x34, 0x8e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x34, 0x34, + 0x34, 0x34, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf9, 0xa5, 0x7b, 0x93, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8d, 0x34, 0x34, + 0x34, 0x34, 0x8e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0x51, 0x2f, 0x6b, 0xf4, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xa5, 0x63, 0x64, 0x63, 0x72, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x34, 0x34, + 0x34, 0x34, 0x8e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x22, 0x12, 0x12, 0x12, 0x6a, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x7c, 0x63, 0x64, 0x64, 0x64, 0x72, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x34, 0x34, + 0x34, 0x34, 0x8e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x22, 0x11, 0x12, 0x12, 0x12, 0x2f, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x92, 0x63, 0x63, 0x64, 0x63, 0x63, 0x72, 0xe6, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x34, 0x34, + 0x34, 0x34, 0x8e, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x22, 0x12, 0x12, 0x11, 0x12, 0x11, 0x51, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xeb, 0x71, 0x64, 0x64, 0x63, 0x64, 0x63, 0x72, 0xe7, 0xff, 0xff, 0xff, 0x94, 0x34, 0x33, + 0x34, 0x34, 0x94, 0xff, 0xff, 0xff, 0xd5, 0x22, 0x12, 0x11, 0x11, 0x12, 0x12, 0x22, 0xdb, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xe7, 0x71, 0x63, 0x63, 0x63, 0x63, 0x63, 0x72, 0xe7, 0xff, 0xff, 0xc7, 0x34, 0x34, + 0x34, 0x34, 0xc7, 0xff, 0xff, 0xd6, 0x23, 0x12, 0x11, 0x12, 0x11, 0x12, 0x23, 0xd4, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xe7, 0x72, 0x64, 0x63, 0x64, 0x64, 0x63, 0x83, 0xff, 0xff, 0xff, 0x9e, 0x43, + 0x44, 0x9e, 0xff, 0xff, 0xff, 0x39, 0x12, 0x12, 0x12, 0x11, 0x12, 0x22, 0xd5, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xe7, 0x72, 0x64, 0x63, 0x63, 0x63, 0x64, 0xfa, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf6, 0x12, 0x12, 0x12, 0x11, 0x11, 0x22, 0xd5, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x72, 0x63, 0x64, 0x63, 0x78, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0x2c, 0x12, 0x12, 0x11, 0x23, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x83, 0x64, 0x78, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xc5, 0x2c, 0x11, 0x39, 0xd6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe1, 0xc3, 0xc1, + 0xc2, 0xc1, 0xc1, 0xc1, 0xc1, 0xc3, 0xe1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0x94, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, + 0x8e, 0x93, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x8a, 0x8a, 0x89, + 0x89, 0x89, 0x89, 0x8a, 0x89, 0x89, 0x8a, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9d, 0x34, 0x34, 0x34, 0x34, 0x34, 0x33, 0x34, + 0x34, 0x34, 0x34, 0x9e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x94, 0x89, 0x8a, 0x8a, + 0x89, 0x8a, 0x89, 0x8a, 0x89, 0x8a, 0x89, 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x44, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x44, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x94, 0x8a, 0x89, 0x8a, + 0x89, 0x8a, 0x89, 0x89, 0x89, 0x89, 0x8a, 0x93, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x43, 0x34, 0x34, 0x33, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x44, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xca, 0x8a, 0x89, 0x89, + 0x8a, 0x89, 0x8a, 0x89, 0x89, 0x89, 0x8a, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9e, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, + 0x34, 0x34, 0x34, 0x9e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xc4, 0xc1, + 0xc2, 0xc1, 0xc1, 0xc2, 0xc1, 0xc3, 0xe1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x94, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, + 0x8e, 0x93, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xba, 0xa7, 0xb4, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xd0, 0xc9, 0xd3, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xaf, 0xa7, 0xa6, 0xa7, 0xb4, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xc9, 0xc9, 0xc8, 0xce, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf0, 0xaf, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xfd, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xc8, 0xc8, 0xc9, 0xc8, 0xc8, 0xce, 0xf6, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf0, 0xaf, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xba, 0xff, 0xff, 0xff, 0xe0, 0xbf, + 0xbf, 0xe0, 0xff, 0xff, 0xff, 0xd4, 0xc9, 0xc9, 0xc8, 0xc9, 0xc9, 0xce, 0xf6, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf0, 0xaf, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xaf, 0xf2, 0xff, 0xff, 0xed, 0xb9, 0xb8, + 0xb9, 0xb9, 0xed, 0xff, 0xff, 0xf7, 0xcd, 0xc9, 0xc8, 0xc8, 0xc8, 0xc8, 0xcd, 0xf6, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf3, 0xb0, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xb0, 0xf1, 0xff, 0xff, 0xff, 0xdc, 0xb8, 0xb9, + 0xb8, 0xb9, 0xdd, 0xff, 0xff, 0xff, 0xf6, 0xcd, 0xc9, 0xc8, 0xc9, 0xc8, 0xc8, 0xcd, 0xf7, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xc2, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xb0, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xda, 0xb9, 0xb9, + 0xb8, 0xb9, 0xda, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xcd, 0xc9, 0xc8, 0xc9, 0xc9, 0xc8, 0xda, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xb5, 0xa7, 0xa7, 0xa7, 0xa7, 0xb0, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0xb8, 0xb9, + 0xb9, 0xb8, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xce, 0xc8, 0xc8, 0xc9, 0xc8, 0xd2, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xcd, 0xa7, 0xa7, 0xa7, 0xaf, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0xb9, 0xb9, + 0xb9, 0xb9, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xce, 0xc8, 0xc9, 0xc8, 0xe0, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xcd, 0xb5, 0xc2, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0xb9, 0xb8, + 0xb9, 0xb9, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xda, 0xd1, 0xe0, 0xfc, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xb9, 0xb8, + 0xb9, 0xb9, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0xb8, 0xb9, + 0xb9, 0xb8, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xb8, 0xb9, + 0xb9, 0xb9, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xed, 0xb8, 0xb8, + 0xb9, 0xb8, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xbf, + 0xbf, 0xe1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, @@ -3462,587 +5002,391 @@ const unsigned char gGearPict3x[9 * kGearFrames * kGearWidth * kGearHeight] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -}; -#endif /* !PEXPERT_NO_3X_IMAGES */ - -const unsigned char gGearPict2x[4 * kGearFrames * kGearWidth * kGearHeight] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbc, 0x86, 0x86, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x76, 0x76, 0x76, 0x75, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x99, 0x76, 0x76, 0x76, 0x75, 0x98, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd0, 0xb8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xef, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa2, 0x7b, 0x7b, 0x7b, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe3, 0xe2, 0xe2, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0x94, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x82, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x7d, 0x7d, 0x7b, 0x7b, 0x7b, 0x85, 0xfa, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe4, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x8d, 0x7d, 0x7d, 0x7b, 0x7b, 0x7b, 0xb8, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7d, 0xec, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xfb, 0xe4, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9d, 0x7d, 0x7d, 0x7b, 0x7b, 0x7b, 0xa1, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xec, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xd7, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0x81, 0x7d, 0x7d, 0x7b, 0x7b, 0x7a, 0xd9, 0xff, 0xff, 0x9a, 0x76, 0x76, 0x76, 0x75, 0x9d, 0xff, 0xff, 0xfa, 0xe4, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xa0, 0x83, 0x83, 0x86, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x7e, 0x7d, 0x7b, 0x7b, 0x7a, 0xa2, 0xff, 0xff, 0xd1, 0x76, 0x76, 0x76, 0x75, 0xd0, 0xff, 0xff, 0xef, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xef, 0xde, 0xdc, 0xdc, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x85, 0x85, 0x85, 0x83, 0x83, 0x91, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x87, 0x7e, 0x7b, 0x7b, 0x7b, 0x95, 0xff, 0xff, 0xff, 0xc7, 0x8b, 0x8b, 0xc5, 0xff, 0xff, 0xff, 0xea, 0xe3, 0xe2, 0xe2, 0xe2, 0xe3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe2, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x85, 0x85, 0x85, 0x85, 0x85, 0x83, 0x82, 0xa1, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x7f, 0x7d, 0x7b, 0x7b, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe2, 0xe2, 0xe2, 0xe2, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe6, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x86, 0x85, 0x85, 0x85, 0x85, 0x85, 0x83, 0x83, 0x82, 0xb5, 0xf6, 0xff, 0xff, 0xff, 0xc8, 0x8d, 0x85, 0xad, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xe3, 0xe4, 0xf2, 0xff, 0xff, 0xff, 0xfc, 0xec, 0xde, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0x86, 0x86, 0x85, 0x85, 0x85, 0x85, 0x85, 0x83, 0x82, 0x89, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdf, 0xde, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x9e, 0x86, 0x86, 0x85, 0x85, 0x85, 0x85, 0x83, 0x83, 0x82, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xde, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xe0, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x8f, 0x87, 0x86, 0x85, 0x85, 0x85, 0x83, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xd9, 0xdc, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xbd, 0x8a, 0x87, 0x86, 0x85, 0x85, 0x85, 0x86, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xdb, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa9, 0x87, 0x87, 0x86, 0x85, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xdb, 0xdb, 0xd9, 0xd9, 0xe4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xb2, 0xa5, 0xc5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xe3, 0xe7, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xae, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xb0, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe3, 0xde, 0xde, 0xde, 0xde, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xe0, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xcb, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xeb, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x98, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x9e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xd4, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd8, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x9c, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd7, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xcb, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x91, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xea, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xb0, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xb2, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xde, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xde, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xbd, 0xb2, 0xcf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe6, 0xd8, 0xdf, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb4, 0x98, 0x98, 0x98, 0x99, 0xc4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xc9, 0xc9, 0xcb, 0xcb, 0xd9, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0x9a, 0x98, 0x98, 0x98, 0x9a, 0x9a, 0x9c, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xcc, 0xe3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x9e, 0x98, 0x98, 0x98, 0x99, 0x9a, 0x9c, 0x9c, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xcb, 0xcf, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xac, 0x98, 0x98, 0x98, 0x99, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xc5, 0xc5, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xcb, 0xd4, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xa2, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xc9, 0xc5, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xcb, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x98, 0x99, 0x99, 0x9a, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xc3, 0xf8, 0xff, 0xff, 0xff, 0xd8, 0xae, 0xaa, 0xc8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xd7, 0xc3, 0xc7, 0xe6, 0xff, 0xff, 0xff, 0xfb, 0xdb, 0xc5, 0xc5, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0x99, 0x9a, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xb4, 0xec, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xa2, 0xa4, 0xa5, 0xa6, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xbc, 0xbd, 0xbf, 0xc0, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xd3, 0xc5, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xa8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xa9, 0xa4, 0xa5, 0xa6, 0xa6, 0xbc, 0xff, 0xff, 0xff, 0xdf, 0xbf, 0xc0, 0xe2, 0xff, 0xff, 0xff, 0xc9, 0xbb, 0xbc, 0xbf, 0xbf, 0xc4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xcc, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xb9, 0x9c, 0x9c, 0xa0, 0xcc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0xa4, 0xa4, 0xa5, 0xa6, 0xa8, 0xc4, 0xff, 0xff, 0xe4, 0xae, 0xb1, 0xb1, 0xb4, 0xe7, 0xff, 0xff, 0xcf, 0xbb, 0xbc, 0xbd, 0xbf, 0xc0, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe2, 0xc8, 0xc7, 0xc8, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xdf, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xa4, 0xa4, 0xa5, 0xa6, 0xa8, 0xa9, 0xe7, 0xff, 0xff, 0xc3, 0xae, 0xb1, 0xb1, 0xb4, 0xcb, 0xff, 0xff, 0xeb, 0xbb, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xec, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xc3, 0xff, 0xff, 0xff, 0xbc, 0xae, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xcd, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0xa4, 0xa5, 0xa6, 0xa8, 0xaa, 0xf3, 0xff, 0xff, 0xff, 0xbc, 0xae, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xf6, 0xbc, 0xbb, 0xbc, 0xbd, 0xbf, 0xc0, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xad, 0xa4, 0xa5, 0xa6, 0xa6, 0xa8, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xbc, 0xae, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc7, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xae, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xbc, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xbf, 0xbb, 0xbd, 0xbd, 0xbd, 0xbf, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xa8, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xb9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb2, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb2, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc0, 0xa6, 0xa6, 0xa6, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb2, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xbb, 0xbd, 0xbd, 0xd0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xde, 0xcf, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb2, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xdb, 0xe7, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0xb0, 0xb1, 0xb1, 0xb2, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xb1, 0xb1, 0xb1, 0xb2, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xb9, 0xbb, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbc, 0x86, 0x86, 0xbc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x76, 0x76, 0x76, 0x75, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x99, 0x76, 0x76, 0x76, 0x75, 0x98, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd0, 0xb8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xef, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa2, 0x7b, 0x7b, 0x7b, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe2, 0xe2, 0xe2, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0x94, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x82, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x7d, 0x7d, 0x7b, 0x7b, 0x7b, 0x85, 0xfa, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe4, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x8b, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xb8, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7d, 0xec, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xfb, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9d, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0xa1, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xec, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xd7, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0x81, 0x7d, 0x7b, 0x7b, 0x7b, 0x7a, 0xd9, 0xff, 0xff, 0x99, 0x76, 0x76, 0x76, 0x75, 0x9d, 0xff, 0xff, 0xfa, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe0, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xa0, 0x83, 0x83, 0x86, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x7e, 0x7d, 0x7b, 0x7b, 0x7a, 0xa2, 0xff, 0xff, 0xd1, 0x76, 0x76, 0x76, 0x75, 0xcf, 0xff, 0xff, 0xef, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xef, 0xde, 0xdc, 0xdc, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x85, 0x85, 0x85, 0x83, 0x83, 0x91, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x87, 0x7e, 0x7d, 0x7b, 0x7b, 0x95, 0xff, 0xff, 0xff, 0xc7, 0x8b, 0x8b, 0xc5, 0xff, 0xff, 0xff, 0xeb, 0xe3, 0xe2, 0xe2, 0xe2, 0xe3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe2, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x85, 0x85, 0x85, 0x85, 0x83, 0x83, 0x82, 0xa1, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x7f, 0x7e, 0x7b, 0x7b, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe2, 0xe2, 0xe2, 0xe0, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe6, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x86, 0x85, 0x85, 0x85, 0x85, 0x85, 0x83, 0x82, 0x82, 0xb5, 0xf6, 0xff, 0xff, 0xff, 0xc8, 0x8d, 0x85, 0xad, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xe4, 0xe4, 0xf2, 0xff, 0xff, 0xff, 0xfc, 0xec, 0xde, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0x86, 0x86, 0x85, 0x85, 0x85, 0x85, 0x85, 0x83, 0x82, 0x89, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdf, 0xde, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x9e, 0x86, 0x86, 0x85, 0x85, 0x85, 0x85, 0x83, 0x82, 0x82, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xde, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xe0, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x8f, 0x86, 0x86, 0x85, 0x85, 0x85, 0x85, 0x83, 0x8b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xdc, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xbd, 0x8a, 0x87, 0x86, 0x85, 0x85, 0x85, 0x85, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xdb, 0xdb, 0xdb, 0xd9, 0xd9, 0xdb, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa9, 0x87, 0x86, 0x86, 0x85, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xdb, 0xd9, 0xd9, 0xd9, 0xe4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xb2, 0xa5, 0xc5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xe3, 0xe7, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xae, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xb0, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe3, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdc, 0xdc, 0xdc, 0xdc, 0xe0, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xcb, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xeb, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x98, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x9e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xd4, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd8, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x9c, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd7, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xcb, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x91, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xea, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xb0, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xb2, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xde, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xde, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xbd, 0xb2, 0xcf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe6, 0xd8, 0xdf, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb4, 0x98, 0x98, 0x98, 0x99, 0xc4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xc8, 0xc9, 0xcb, 0xcb, 0xd9, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0x9a, 0x98, 0x98, 0x99, 0x9a, 0x9c, 0x9c, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xcb, 0xcc, 0xe3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xa0, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9c, 0x9c, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xcb, 0xcf, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xac, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xc5, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xcb, 0xd5, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xa2, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xc9, 0xc5, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xcb, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x98, 0x99, 0x99, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0xc3, 0xf8, 0xff, 0xff, 0xff, 0xd8, 0xae, 0xac, 0xc8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xd7, 0xc4, 0xc7, 0xe6, 0xff, 0xff, 0xff, 0xfb, 0xdc, 0xc5, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0x99, 0x9a, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xb4, 0xec, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xa4, 0xa4, 0xa5, 0xa6, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xbc, 0xbd, 0xbf, 0xc0, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xd3, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xa8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xa9, 0xa4, 0xa5, 0xa6, 0xa8, 0xbc, 0xff, 0xff, 0xff, 0xdf, 0xbf, 0xc0, 0xe2, 0xff, 0xff, 0xff, 0xc9, 0xbc, 0xbd, 0xbf, 0xc0, 0xc4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xcc, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xb9, 0x9c, 0x9c, 0xa0, 0xcc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xc4, 0xff, 0xff, 0xe4, 0xae, 0xb1, 0xb2, 0xb5, 0xe7, 0xff, 0xff, 0xd0, 0xbb, 0xbc, 0xbd, 0xbf, 0xc0, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe2, 0xc8, 0xc7, 0xc8, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xdf, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xa5, 0xa4, 0xa5, 0xa6, 0xa8, 0xa9, 0xe7, 0xff, 0xff, 0xc3, 0xae, 0xb1, 0xb1, 0xb5, 0xcb, 0xff, 0xff, 0xeb, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc0, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xec, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa4, 0xa5, 0xa6, 0xa6, 0xa8, 0xc3, 0xff, 0xff, 0xff, 0xbc, 0xae, 0xb1, 0xb2, 0xb5, 0xc3, 0xff, 0xff, 0xff, 0xcf, 0xbb, 0xbc, 0xbd, 0xbf, 0xc0, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xac, 0xf3, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb2, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xf6, 0xbc, 0xbc, 0xbd, 0xbd, 0xbf, 0xc0, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xad, 0xa4, 0xa5, 0xa6, 0xa6, 0xa8, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xae, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xc7, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xb0, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xbf, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xa8, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc3, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xa4, 0xa5, 0xa6, 0xa6, 0xa8, 0xb9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc7, 0xbb, 0xbd, 0xbd, 0xbf, 0xbf, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xa5, 0xa5, 0xa6, 0xa8, 0xa8, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc0, 0xa6, 0xa6, 0xa8, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xbb, 0xbd, 0xbd, 0xd1, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xde, 0xcf, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xdb, 0xe7, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0xb1, 0xb1, 0xb1, 0xb4, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xb1, 0xb1, 0xb1, 0xb2, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xbb, 0xbb, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8b, 0x8b, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x7d, 0x7b, 0x7b, 0x7b, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x7d, 0x7b, 0x7b, 0x7b, 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd3, 0xbc, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xb4, 0xcc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa9, 0x85, 0x85, 0x83, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x76, 0x76, 0x76, 0x9e, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x85, 0x85, 0x85, 0x83, 0x82, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x76, 0x76, 0x76, 0x76, 0x76, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0x85, 0x85, 0x85, 0x85, 0x83, 0x9a, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x91, 0x76, 0x76, 0x76, 0x76, 0x75, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x8a, 0x85, 0x85, 0x85, 0x83, 0x82, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7d, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0x76, 0x76, 0x76, 0x76, 0x75, 0x79, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x86, 0x85, 0x85, 0x85, 0x83, 0x8d, 0xfa, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7e, 0x7d, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x81, 0x76, 0x76, 0x76, 0x76, 0x75, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x94, 0x86, 0x85, 0x85, 0x85, 0x82, 0xbc, 0xff, 0xff, 0xff, 0xff, 0x96, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x76, 0x76, 0x76, 0x76, 0x76, 0x83, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x86, 0x85, 0x85, 0x85, 0x83, 0x85, 0xee, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xec, 0x79, 0x76, 0x76, 0x76, 0x76, 0x75, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa4, 0x86, 0x85, 0x85, 0x85, 0x82, 0xa6, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xa0, 0x76, 0x76, 0x76, 0x76, 0x75, 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xdb, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x87, 0x86, 0x85, 0x85, 0x83, 0x82, 0xdb, 0xff, 0xff, 0xa0, 0x7e, 0x7d, 0x7b, 0x7b, 0xa1, 0xff, 0xff, 0xd8, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xa8, 0x8e, 0x8d, 0x91, 0xc7, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb9, 0x86, 0x85, 0x85, 0x85, 0x82, 0xa8, 0xff, 0xff, 0xd3, 0x7e, 0x7d, 0x7b, 0x7b, 0xd1, 0xff, 0xff, 0xa0, 0x76, 0x76, 0x76, 0x76, 0x75, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf2, 0xe4, 0xe2, 0xe2, 0xe8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x9a, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x8f, 0x86, 0x85, 0x85, 0x83, 0x9a, 0xff, 0xff, 0xff, 0xc9, 0x91, 0x91, 0xc8, 0xff, 0xff, 0xff, 0x94, 0x76, 0x76, 0x75, 0x76, 0x7e, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0xa9, 0xea, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x87, 0x85, 0x85, 0x85, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x76, 0x76, 0x75, 0x75, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xea, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0xbc, 0xf7, 0xff, 0xff, 0xff, 0xcc, 0x95, 0x8d, 0xb2, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xa9, 0x7d, 0x83, 0xc4, 0xff, 0xff, 0xff, 0xfc, 0xef, 0xe3, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x8d, 0x94, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe7, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xa5, 0x91, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8d, 0x8d, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe3, 0xe2, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe7, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x99, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x95, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc3, 0x94, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8f, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe0, 0xee, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb0, 0x91, 0x8f, 0x8f, 0x8f, 0xbd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xe2, 0xe2, 0xe2, 0xe2, 0xe8, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbb, 0xad, 0xcb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe8, 0xeb, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xb7, 0xad, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xb7, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xe8, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe7, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xd0, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xde, 0xde, 0xde, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xef, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xa2, 0x9a, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x98, 0x98, 0x99, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xe0, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xa5, 0x9c, 0x9c, 0x9a, 0x9a, 0x9c, 0x9c, 0x9a, 0x9a, 0x9c, 0x9a, 0x9a, 0x9c, 0x9a, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdf, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xd0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xee, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xb8, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xbb, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe4, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc4, 0xb9, 0xd4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xea, 0xe0, 0xe6, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xbc, 0xa2, 0xa4, 0xa4, 0xa4, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xd4, 0xd4, 0xd4, 0xd4, 0xe0, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcc, 0xa5, 0xa4, 0xa4, 0xa4, 0xa5, 0xa6, 0xa8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xd1, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xe8, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xa9, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa6, 0xa6, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd8, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xb5, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xa8, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xdc, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa8, 0xa8, 0xae, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xd4, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xcb, 0xf8, 0xff, 0xff, 0xff, 0xde, 0xb8, 0xb5, 0xcf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xde, 0xcd, 0xd1, 0xea, 0xff, 0xff, 0xff, 0xfb, 0xe2, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa8, 0xa8, 0xbd, 0xef, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xae, 0xae, 0xb0, 0xb1, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xc8, 0xc8, 0xcb, 0xcb, 0xec, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xdc, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xb2, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb4, 0xae, 0xb0, 0xb1, 0xb2, 0xc4, 0xff, 0xff, 0xff, 0xe3, 0xc8, 0xc9, 0xe6, 0xff, 0xff, 0xff, 0xd1, 0xc7, 0xc8, 0xc8, 0xcb, 0xcf, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xd7, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xc1, 0xa6, 0xa8, 0xaa, 0xd1, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xae, 0xb0, 0xb1, 0xb1, 0xb5, 0xcc, 0xff, 0xff, 0xe8, 0xbb, 0xbc, 0xbd, 0xbf, 0xeb, 0xff, 0xff, 0xd7, 0xc5, 0xc8, 0xc8, 0xc9, 0xcb, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe7, 0xd3, 0xd1, 0xd1, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xe3, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb0, 0xae, 0xb1, 0xb1, 0xb2, 0xb5, 0xea, 0xff, 0xff, 0xcb, 0xbb, 0xbc, 0xbd, 0xc0, 0xd3, 0xff, 0xff, 0xee, 0xc5, 0xc7, 0xc8, 0xc8, 0xc9, 0xcc, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xae, 0xb1, 0xb1, 0xb1, 0xb4, 0xcb, 0xff, 0xff, 0xff, 0xc5, 0xbb, 0xbc, 0xbf, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xd5, 0xc5, 0xc8, 0xc8, 0xc9, 0xc9, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xae, 0xb0, 0xb1, 0xb1, 0xb4, 0xb7, 0xf4, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xf7, 0xc7, 0xc7, 0xc8, 0xc8, 0xc9, 0xcb, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xb8, 0xae, 0xb1, 0xb1, 0xb2, 0xb4, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbf, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xc5, 0xc8, 0xc8, 0xc8, 0xc9, 0xd0, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xae, 0xb1, 0xb1, 0xb1, 0xb4, 0xbb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc9, 0xc7, 0xc8, 0xc8, 0xc8, 0xcb, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb2, 0xb0, 0xb1, 0xb1, 0xb1, 0xb4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbd, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xcc, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xae, 0xb0, 0xb1, 0xb1, 0xb2, 0xc3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xb0, 0xb1, 0xb1, 0xb1, 0xb4, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbd, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc8, 0xb1, 0xb1, 0xb2, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xc7, 0xc8, 0xc8, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe2, 0xd5, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe2, 0xeb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xbc, 0xbd, 0xbd, 0xbf, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xbb, 0xbd, 0xbd, 0xbf, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xc4, 0xc4, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8b, 0x8b, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x7d, 0x7b, 0x7b, 0x7b, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x7d, 0x7b, 0x7b, 0x7b, 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd3, 0xbc, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xb4, 0xcc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa9, 0x85, 0x85, 0x83, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x76, 0x76, 0x76, 0x9e, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x85, 0x85, 0x85, 0x83, 0x82, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x76, 0x76, 0x76, 0x76, 0x76, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0x85, 0x85, 0x85, 0x85, 0x83, 0x9a, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x91, 0x76, 0x76, 0x76, 0x76, 0x75, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x8a, 0x85, 0x85, 0x85, 0x83, 0x82, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7d, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0x76, 0x76, 0x76, 0x76, 0x75, 0x79, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x86, 0x85, 0x85, 0x85, 0x83, 0x8d, 0xfa, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7e, 0x7d, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x81, 0x76, 0x76, 0x76, 0x76, 0x75, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x94, 0x86, 0x85, 0x85, 0x85, 0x82, 0xbc, 0xff, 0xff, 0xff, 0xff, 0x96, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x76, 0x76, 0x76, 0x76, 0x76, 0x83, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x86, 0x85, 0x85, 0x85, 0x83, 0x85, 0xee, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xec, 0x79, 0x76, 0x76, 0x76, 0x76, 0x75, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa4, 0x86, 0x85, 0x85, 0x85, 0x82, 0xa6, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xa0, 0x76, 0x76, 0x76, 0x76, 0x75, 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xdb, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x87, 0x86, 0x85, 0x85, 0x83, 0x82, 0xdb, 0xff, 0xff, 0xa0, 0x7e, 0x7d, 0x7b, 0x7b, 0xa1, 0xff, 0xff, 0xd8, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xa8, 0x8e, 0x8d, 0x91, 0xc7, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb9, 0x86, 0x85, 0x85, 0x85, 0x82, 0xa8, 0xff, 0xff, 0xd3, 0x7e, 0x7d, 0x7b, 0x7b, 0xd1, 0xff, 0xff, 0xa0, 0x76, 0x76, 0x76, 0x76, 0x75, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf2, 0xe4, 0xe2, 0xe2, 0xe8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x9a, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x8f, 0x86, 0x85, 0x85, 0x83, 0x9a, 0xff, 0xff, 0xff, 0xc9, 0x91, 0x91, 0xc8, 0xff, 0xff, 0xff, 0x94, 0x76, 0x76, 0x75, 0x76, 0x7e, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0xa9, 0xea, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x87, 0x85, 0x85, 0x85, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x76, 0x76, 0x75, 0x75, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xea, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0xbc, 0xf7, 0xff, 0xff, 0xff, 0xcc, 0x95, 0x8d, 0xb2, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xa9, 0x7d, 0x83, 0xc4, 0xff, 0xff, 0xff, 0xfc, 0xef, 0xe3, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x8d, 0x94, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe7, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xa5, 0x91, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8d, 0x8d, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe3, 0xe2, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe7, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x99, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x95, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc3, 0x94, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8f, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe0, 0xee, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb0, 0x91, 0x8f, 0x8f, 0x8f, 0xbd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xe2, 0xe2, 0xe2, 0xe2, 0xe8, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbb, 0xad, 0xcb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe8, 0xeb, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xb7, 0xad, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xb7, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xe8, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe7, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xd0, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xde, 0xde, 0xde, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xef, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xa2, 0x9a, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x98, 0x98, 0x99, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xe0, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xa5, 0x9c, 0x9c, 0x9a, 0x9a, 0x9c, 0x9c, 0x9a, 0x9a, 0x9c, 0x9a, 0x9a, 0x9c, 0x9a, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdf, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xd0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xee, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xb8, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xbb, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe4, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc4, 0xb9, 0xd4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xea, 0xe0, 0xe6, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xbc, 0xa2, 0xa4, 0xa4, 0xa4, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xd4, 0xd4, 0xd4, 0xd4, 0xe0, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcc, 0xa5, 0xa4, 0xa4, 0xa4, 0xa5, 0xa6, 0xa8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xd1, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xe8, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xa9, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa6, 0xa6, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd8, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xb5, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xa8, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xdc, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa8, 0xa8, 0xae, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xd4, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xcb, 0xf8, 0xff, 0xff, 0xff, 0xde, 0xb8, 0xb5, 0xcf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xde, 0xcd, 0xd1, 0xea, 0xff, 0xff, 0xff, 0xfb, 0xe2, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa8, 0xa8, 0xbd, 0xef, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xae, 0xae, 0xb0, 0xb1, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xc8, 0xc8, 0xcb, 0xcb, 0xec, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xdc, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xb2, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb4, 0xae, 0xb0, 0xb1, 0xb2, 0xc4, 0xff, 0xff, 0xff, 0xe3, 0xc8, 0xc9, 0xe6, 0xff, 0xff, 0xff, 0xd1, 0xc7, 0xc8, 0xc8, 0xcb, 0xcf, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xd7, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xc1, 0xa6, 0xa8, 0xaa, 0xd1, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xae, 0xb0, 0xb1, 0xb1, 0xb5, 0xcc, 0xff, 0xff, 0xe8, 0xbb, 0xbc, 0xbd, 0xbf, 0xeb, 0xff, 0xff, 0xd7, 0xc5, 0xc8, 0xc8, 0xc9, 0xcb, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe7, 0xd3, 0xd1, 0xd1, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xe3, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb0, 0xae, 0xb1, 0xb1, 0xb2, 0xb5, 0xea, 0xff, 0xff, 0xcb, 0xbb, 0xbc, 0xbd, 0xc0, 0xd3, 0xff, 0xff, 0xee, 0xc5, 0xc7, 0xc8, 0xc8, 0xc9, 0xcc, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xae, 0xb1, 0xb1, 0xb1, 0xb4, 0xcb, 0xff, 0xff, 0xff, 0xc5, 0xbb, 0xbc, 0xbf, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xd5, 0xc5, 0xc8, 0xc8, 0xc9, 0xc9, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xae, 0xb0, 0xb1, 0xb1, 0xb4, 0xb7, 0xf4, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xf7, 0xc7, 0xc7, 0xc8, 0xc8, 0xc9, 0xcb, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xb8, 0xae, 0xb1, 0xb1, 0xb2, 0xb4, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbf, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xc5, 0xc8, 0xc8, 0xc8, 0xc9, 0xd0, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xae, 0xb1, 0xb1, 0xb1, 0xb4, 0xbb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc9, 0xc7, 0xc8, 0xc8, 0xc8, 0xcb, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb2, 0xb0, 0xb1, 0xb1, 0xb1, 0xb4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbd, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xcc, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xae, 0xb0, 0xb1, 0xb1, 0xb2, 0xc3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xb0, 0xb1, 0xb1, 0xb1, 0xb4, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbd, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc8, 0xb1, 0xb1, 0xb2, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xc7, 0xc8, 0xc8, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe2, 0xd5, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe2, 0xeb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xbc, 0xbd, 0xbd, 0xbf, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xbb, 0xbd, 0xbd, 0xbf, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xc4, 0xc4, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x99, 0x91, 0xbf, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x86, 0x85, 0x85, 0x83, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x86, 0x85, 0x85, 0x83, 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xc0, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xb7, 0xcc, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8f, 0x8e, 0x8e, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x85, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x7e, 0x7d, 0x7b, 0x91, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x85, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x81, 0x7d, 0x7b, 0x7b, 0x7b, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x99, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xac, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x9d, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x91, 0x8f, 0x8f, 0x8e, 0x8e, 0x8f, 0xef, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x98, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xa9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa5, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xff, 0xcb, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x91, 0x8f, 0x8f, 0x8e, 0x8d, 0x8d, 0xe4, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xf6, 0x87, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xa2, 0xfe, 0xff, 0xff, 0xb1, 0x87, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xb4, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x8d, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xe3, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x96, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xd0, 0xff, 0xff, 0xbd, 0x87, 0x85, 0x85, 0x83, 0x95, 0xff, 0xff, 0xe8, 0x81, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0x99, 0x98, 0x9a, 0xc5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x91, 0x8f, 0x8f, 0x8e, 0x8d, 0xa2, 0xff, 0xff, 0xe6, 0x89, 0x85, 0x85, 0x83, 0xc4, 0xff, 0xff, 0xb7, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x99, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0x83, 0x76, 0x76, 0x8e, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9a, 0x9a, 0x99, 0x99, 0x98, 0xa0, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xa1, 0x91, 0x8f, 0x8f, 0x8d, 0x96, 0xff, 0xff, 0xff, 0xd9, 0x9e, 0x94, 0xbf, 0xfc, 0xff, 0xff, 0xaa, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x91, 0x76, 0x76, 0x76, 0x76, 0x76, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9c, 0x9a, 0x9c, 0x99, 0x99, 0x98, 0x98, 0xaa, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x92, 0x8f, 0x8f, 0x8e, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x7d, 0x7b, 0x7b, 0x7a, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xa6, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0x98, 0x98, 0x98, 0xbc, 0xf3, 0xff, 0xff, 0xff, 0xdc, 0xa1, 0x94, 0xb4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xb7, 0x87, 0x86, 0xb8, 0xff, 0xff, 0xff, 0xf8, 0xb8, 0x7b, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0x98, 0x98, 0x9a, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x85, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0xa8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xb1, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x9a, 0x99, 0x98, 0x98, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x79, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0x85, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xa8, 0x9c, 0x9c, 0x9c, 0x9a, 0x9a, 0x9a, 0x98, 0x99, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x94, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x7a, 0xbc, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xcf, 0xa0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8b, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0xa5, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xc0, 0x9d, 0x9c, 0x9c, 0x9c, 0xb8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbc, 0x76, 0x75, 0x75, 0x73, 0x91, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xc5, 0xb1, 0xcb, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc1, 0x98, 0xa0, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xc3, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb5, 0xb4, 0xb4, 0xb4, 0xbf, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xef, 0xea, 0xea, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xeb, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xe2, 0xa5, 0xa5, 0xa5, 0xa5, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe3, 0xe3, 0xe3, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe3, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xaa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xdf, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xc3, 0xb9, 0xb9, 0xb9, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xc0, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xea, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe6, 0xe7, 0xe8, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xd3, 0xc1, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe7, 0xea, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xcd, 0xae, 0xae, 0xae, 0xb0, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xdc, 0xdc, 0xdc, 0xde, 0xe6, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xdb, 0xb2, 0xae, 0xae, 0xb1, 0xb1, 0xb1, 0xb2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xde, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xbb, 0xae, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xd9, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdf, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc4, 0xae, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb4, 0xb5, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xd9, 0xd9, 0xd9, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xe2, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xb0, 0xae, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb4, 0xb7, 0xd7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdc, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb4, 0xb5, 0xcc, 0xf4, 0xff, 0xff, 0xff, 0xeb, 0xc7, 0xc0, 0xd5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xd8, 0xd8, 0xeb, 0xff, 0xff, 0xff, 0xfc, 0xea, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb5, 0xc1, 0xea, 0xff, 0xff, 0xff, 0xff, 0xec, 0xbc, 0xbb, 0xbc, 0xbf, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd1, 0xd1, 0xd4, 0xd4, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe6, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb9, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc4, 0xbb, 0xbb, 0xbd, 0xbf, 0xc5, 0xff, 0xff, 0xff, 0xef, 0xd4, 0xd3, 0xe7, 0xff, 0xff, 0xff, 0xe0, 0xd1, 0xd1, 0xd4, 0xd4, 0xd5, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdf, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xcf, 0xb1, 0xb2, 0xb4, 0xd3, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xbb, 0xbb, 0xbd, 0xbf, 0xc0, 0xcb, 0xff, 0xff, 0xf4, 0xc7, 0xc8, 0xc8, 0xcb, 0xe7, 0xff, 0xff, 0xe4, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xdc, 0xdb, 0xdb, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe4, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbd, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xe6, 0xff, 0xff, 0xdf, 0xc5, 0xc8, 0xc8, 0xcb, 0xd4, 0xff, 0xff, 0xf7, 0xd0, 0xd1, 0xd1, 0xd1, 0xd4, 0xd4, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xcb, 0xfe, 0xff, 0xff, 0xd9, 0xc5, 0xc7, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xe3, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbb, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xec, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc7, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xfb, 0xd4, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc1, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xd7, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc3, 0xbb, 0xbd, 0xbd, 0xbf, 0xbf, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbc, 0xbd, 0xbd, 0xbf, 0xbf, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xd1, 0xd1, 0xd1, 0xd1, 0xd4, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xbd, 0xbf, 0xbf, 0xd5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd1, 0xd1, 0xd1, 0xdb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe7, 0xd9, 0xea, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc8, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe7, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xc7, 0xc8, 0xc8, 0xc9, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc7, 0xc8, 0xc8, 0xc8, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xcd, 0xcc, 0xde, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x99, 0x91, 0xbf, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x86, 0x85, 0x85, 0x83, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x86, 0x85, 0x85, 0x83, 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xc0, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xb7, 0xcc, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8f, 0x8e, 0x8e, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x85, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x7e, 0x7d, 0x7b, 0x91, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x85, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x81, 0x7d, 0x7b, 0x7b, 0x7b, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x99, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xac, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x9d, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x91, 0x8f, 0x8f, 0x8e, 0x8e, 0x8f, 0xef, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x98, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xa9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa5, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xff, 0xcb, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x91, 0x8f, 0x8f, 0x8e, 0x8d, 0x8d, 0xe4, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xf6, 0x87, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xa2, 0xfe, 0xff, 0xff, 0xb1, 0x87, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xb4, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x8d, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xe3, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x96, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xd0, 0xff, 0xff, 0xbd, 0x87, 0x85, 0x85, 0x83, 0x95, 0xff, 0xff, 0xe8, 0x81, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0x99, 0x98, 0x9a, 0xc5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x91, 0x8f, 0x8f, 0x8e, 0x8d, 0xa2, 0xff, 0xff, 0xe6, 0x89, 0x85, 0x85, 0x83, 0xc4, 0xff, 0xff, 0xb7, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x99, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0x83, 0x76, 0x76, 0x8e, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9a, 0x9a, 0x99, 0x99, 0x98, 0xa0, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xa1, 0x91, 0x8f, 0x8f, 0x8d, 0x96, 0xff, 0xff, 0xff, 0xd9, 0x9e, 0x94, 0xbf, 0xfc, 0xff, 0xff, 0xaa, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x91, 0x76, 0x76, 0x76, 0x76, 0x76, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9c, 0x9a, 0x9c, 0x99, 0x99, 0x98, 0x98, 0xaa, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x92, 0x8f, 0x8f, 0x8e, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x7d, 0x7b, 0x7b, 0x7a, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xa6, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0x98, 0x98, 0x98, 0xbc, 0xf3, 0xff, 0xff, 0xff, 0xdc, 0xa1, 0x94, 0xb4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xb7, 0x87, 0x86, 0xb8, 0xff, 0xff, 0xff, 0xf8, 0xb8, 0x7b, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0x98, 0x98, 0x9a, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x85, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0xa8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xb1, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x9a, 0x99, 0x98, 0x98, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x79, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0x85, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xa8, 0x9c, 0x9c, 0x9c, 0x9a, 0x9a, 0x9a, 0x98, 0x99, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x94, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x7a, 0xbc, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xcf, 0xa0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8b, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0xa5, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xc0, 0x9d, 0x9c, 0x9c, 0x9c, 0xb8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbc, 0x76, 0x75, 0x75, 0x73, 0x91, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xc5, 0xb1, 0xcb, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc1, 0x98, 0xa0, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xc3, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb5, 0xb4, 0xb4, 0xb4, 0xbf, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xef, 0xea, 0xea, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xeb, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xe2, 0xa5, 0xa5, 0xa5, 0xa5, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe3, 0xe3, 0xe3, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe3, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xaa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xdf, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xc3, 0xb9, 0xb9, 0xb9, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xc0, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xea, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe6, 0xe7, 0xe8, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xd3, 0xc1, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe7, 0xea, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xcd, 0xae, 0xae, 0xae, 0xb0, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xdc, 0xdc, 0xdc, 0xde, 0xe6, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xdb, 0xb2, 0xae, 0xae, 0xb1, 0xb1, 0xb1, 0xb2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xde, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xbb, 0xae, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xd9, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdf, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc4, 0xae, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb4, 0xb5, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xd9, 0xd9, 0xd9, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xe2, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xb0, 0xae, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb4, 0xb7, 0xd7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdc, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb4, 0xb5, 0xcc, 0xf4, 0xff, 0xff, 0xff, 0xeb, 0xc7, 0xc0, 0xd5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xd8, 0xd8, 0xeb, 0xff, 0xff, 0xff, 0xfc, 0xea, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb5, 0xc1, 0xea, 0xff, 0xff, 0xff, 0xff, 0xec, 0xbc, 0xbb, 0xbc, 0xbf, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd1, 0xd1, 0xd4, 0xd4, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe6, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb9, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc4, 0xbb, 0xbb, 0xbd, 0xbf, 0xc5, 0xff, 0xff, 0xff, 0xef, 0xd4, 0xd3, 0xe7, 0xff, 0xff, 0xff, 0xe0, 0xd1, 0xd1, 0xd4, 0xd4, 0xd5, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdf, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xcf, 0xb1, 0xb2, 0xb4, 0xd3, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xbb, 0xbb, 0xbd, 0xbf, 0xc0, 0xcb, 0xff, 0xff, 0xf4, 0xc7, 0xc8, 0xc8, 0xcb, 0xe7, 0xff, 0xff, 0xe4, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xdc, 0xdb, 0xdb, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe4, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbd, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xe6, 0xff, 0xff, 0xdf, 0xc5, 0xc8, 0xc8, 0xcb, 0xd4, 0xff, 0xff, 0xf7, 0xd0, 0xd1, 0xd1, 0xd1, 0xd4, 0xd4, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xcb, 0xfe, 0xff, 0xff, 0xd9, 0xc5, 0xc7, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xe3, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbb, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xec, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc7, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xfb, 0xd4, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc1, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xd7, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc3, 0xbb, 0xbd, 0xbd, 0xbf, 0xbf, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbc, 0xbd, 0xbd, 0xbf, 0xbf, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xd1, 0xd1, 0xd1, 0xd1, 0xd4, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xbd, 0xbf, 0xbf, 0xd5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd1, 0xd1, 0xd1, 0xdb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe7, 0xd9, 0xea, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc8, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe7, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xc7, 0xc8, 0xc8, 0xc9, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc7, 0xc8, 0xc8, 0xc8, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xcd, 0xcc, 0xde, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; const unsigned char gGearPict[kGearFrames * kGearWidth * kGearHeight] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x89, 0x89, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xf6, 0xff, 0xff, 0xff, 0xcb, 0x76, 0x76, 0xc9, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0x7b, 0x8a, 0xf6, 0xff, 0xff, 0xc7, 0x76, 0x76, 0xc5, 0xff, 0xff, 0xfe, 0xe6, 0xe2, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x7b, 0x7b, 0xb5, 0xff, 0xff, 0xc7, 0x76, 0x76, 0xc5, 0xff, 0xff, 0xf0, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0x81, 0x7b, 0x7e, 0xec, 0xff, 0xc7, 0x76, 0x76, 0xc5, 0xff, 0xfc, 0xe4, 0xe2, 0xe2, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x7b, 0x7b, 0xa1, 0xff, 0xc7, 0x76, 0x76, 0xc4, 0xff, 0xf2, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xc3, 0xbb, 0xee, 0xff, 0xff, 0xfa, 0x8b, 0x7b, 0x7b, 0xe2, 0xdb, 0x76, 0x76, 0xd9, 0xf3, 0xe4, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xfa, 0xec, 0xee, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xe4, 0x85, 0x85, 0x85, 0xbf, 0xfa, 0xff, 0xd1, 0x7e, 0x7b, 0xd7, 0xff, 0xdc, 0xdb, 0xff, 0xf4, 0xe2, 0xe2, 0xf4, 0xff, 0xfe, 0xee, 0xde, 0xdc, 0xdb, 0xf7, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf8, 0x94, 0x85, 0x85, 0x83, 0x8e, 0xd3, 0xff, 0xd5, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xf4, 0xff, 0xf3, 0xe0, 0xdc, 0xdb, 0xdb, 0xde, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xf8, 0xbd, 0x89, 0x85, 0x85, 0x83, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdc, 0xdc, 0xdb, 0xdb, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa9, 0x87, 0x85, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xdb, 0xd9, 0xe4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xf4, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xef, 0xee, 0xee, 0xee, 0xee, 0xfb, 0xff, 0xff, - 0xff, 0xff, 0xa0, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xd4, 0xd4, 0xd4, 0xd3, 0xd3, 0xd3, 0xd9, 0xff, 0xff, - 0xff, 0xff, 0xa1, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd8, 0xff, 0xff, - 0xff, 0xff, 0xf4, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xec, 0xec, 0xec, 0xec, 0xec, 0xfb, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb7, 0x98, 0x99, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xc8, 0xc9, 0xdb, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0x9a, 0x99, 0x9c, 0x9c, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xc5, 0xc8, 0xc8, 0xcb, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf8, 0xa4, 0x98, 0x9a, 0x9c, 0xa5, 0xde, 0xff, 0xe2, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xeb, 0xff, 0xeb, 0xcb, 0xc8, 0xc8, 0xc8, 0xcf, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xe8, 0x9a, 0x9c, 0x9d, 0xcc, 0xfb, 0xff, 0xdf, 0xa4, 0xa6, 0xe4, 0xff, 0xeb, 0xec, 0xff, 0xea, 0xbc, 0xbf, 0xea, 0xff, 0xfc, 0xe0, 0xc8, 0xc8, 0xc8, 0xf3, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xcf, 0xc8, 0xf2, 0xff, 0xff, 0xfb, 0xad, 0xa5, 0xa8, 0xec, 0xe8, 0xb0, 0xb2, 0xee, 0xef, 0xbb, 0xbd, 0xc5, 0xfc, 0xff, 0xff, 0xf7, 0xdf, 0xe4, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcd, 0xa5, 0xa6, 0xc3, 0xff, 0xde, 0xb0, 0xb2, 0xe0, 0xff, 0xcf, 0xbb, 0xbd, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xa5, 0xa5, 0xa9, 0xf3, 0xff, 0xde, 0xb0, 0xb2, 0xe0, 0xff, 0xf6, 0xbc, 0xbd, 0xc0, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xa5, 0xa6, 0xd0, 0xff, 0xff, 0xde, 0xb0, 0xb1, 0xe0, 0xff, 0xff, 0xd8, 0xbc, 0xbd, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xa6, 0xb1, 0xf8, 0xff, 0xff, 0xde, 0xb0, 0xb1, 0xe0, 0xff, 0xff, 0xfa, 0xc3, 0xbd, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xf8, 0xff, 0xff, 0xff, 0xe0, 0xb0, 0xb1, 0xe3, 0xff, 0xff, 0xff, 0xfa, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbb, 0xbc, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x89, 0x89, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xf6, 0xff, 0xff, 0xff, 0xcb, 0x76, 0x75, 0xc9, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x7b, 0x8a, 0xf6, 0xff, 0xff, 0xc7, 0x76, 0x76, 0xc5, 0xff, 0xff, 0xfe, 0xe6, 0xe2, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x7b, 0x7b, 0xb5, 0xff, 0xff, 0xc7, 0x76, 0x76, 0xc4, 0xff, 0xff, 0xf0, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0x7f, 0x7b, 0x7e, 0xec, 0xff, 0xc7, 0x76, 0x75, 0xc3, 0xff, 0xfc, 0xe3, 0xe2, 0xe2, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x7d, 0x7b, 0xa1, 0xff, 0xc7, 0x76, 0x75, 0xc3, 0xff, 0xf0, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xc3, 0xbb, 0xee, 0xff, 0xff, 0xfa, 0x8b, 0x7b, 0x7b, 0xe2, 0xdb, 0x76, 0x73, 0xdb, 0xf4, 0xe7, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xfa, 0xec, 0xee, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xe4, 0x85, 0x85, 0x85, 0xbd, 0xfa, 0xff, 0xd1, 0x7e, 0x7b, 0xd5, 0xff, 0xdc, 0xdb, 0xff, 0xf6, 0xe3, 0xe2, 0xf4, 0xff, 0xfe, 0xee, 0xde, 0xdc, 0xdb, 0xf7, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf8, 0x94, 0x85, 0x85, 0x83, 0x8e, 0xd3, 0xff, 0xd5, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xf4, 0xff, 0xf3, 0xe0, 0xdc, 0xdb, 0xdb, 0xde, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xf8, 0xbd, 0x89, 0x85, 0x85, 0x83, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdc, 0xdc, 0xdb, 0xdb, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa9, 0x86, 0x85, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xdb, 0xd9, 0xe4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xf4, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xef, 0xee, 0xee, 0xee, 0xee, 0xfb, 0xff, 0xff, - 0xff, 0xff, 0xa0, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xd4, 0xd4, 0xd4, 0xd3, 0xd3, 0xd3, 0xd9, 0xff, 0xff, - 0xff, 0xff, 0xa1, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd8, 0xff, 0xff, - 0xff, 0xff, 0xf4, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xec, 0xec, 0xec, 0xec, 0xec, 0xfb, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb8, 0x98, 0x99, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xc8, 0xcb, 0xdb, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0x9a, 0x99, 0x9c, 0x9c, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xc7, 0xc8, 0xc8, 0xcc, 0xe3, 0xfc, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf8, 0xa4, 0x99, 0x9a, 0x9c, 0xa5, 0xde, 0xff, 0xe3, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xeb, 0xff, 0xeb, 0xcb, 0xc8, 0xc8, 0xc8, 0xcf, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xe8, 0x9a, 0x9c, 0x9d, 0xcc, 0xfb, 0xff, 0xdf, 0xa4, 0xa6, 0xe4, 0xff, 0xec, 0xec, 0xff, 0xea, 0xbd, 0xbf, 0xea, 0xff, 0xfc, 0xe2, 0xc8, 0xc8, 0xc8, 0xf3, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xcf, 0xc8, 0xf2, 0xff, 0xff, 0xfb, 0xad, 0xa5, 0xa8, 0xec, 0xe8, 0xb0, 0xb4, 0xee, 0xef, 0xbb, 0xbd, 0xc7, 0xfc, 0xff, 0xff, 0xf7, 0xe0, 0xe4, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcd, 0xa5, 0xa6, 0xc3, 0xff, 0xde, 0xb1, 0xb4, 0xe0, 0xff, 0xcf, 0xbd, 0xbf, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xa5, 0xa6, 0xaa, 0xf3, 0xff, 0xde, 0xb0, 0xb2, 0xe0, 0xff, 0xf6, 0xbd, 0xbd, 0xc0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xa5, 0xa6, 0xd0, 0xff, 0xff, 0xde, 0xb1, 0xb2, 0xe0, 0xff, 0xff, 0xd8, 0xbc, 0xbf, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0xa6, 0xb1, 0xf8, 0xff, 0xff, 0xde, 0xb1, 0xb2, 0xe0, 0xff, 0xff, 0xfa, 0xc3, 0xbd, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xf8, 0xff, 0xff, 0xff, 0xe0, 0xb1, 0xb2, 0xe3, 0xff, 0xff, 0xff, 0xfa, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbb, 0xbc, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x8f, 0x8e, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xf6, 0xff, 0xff, 0xff, 0xcd, 0x7d, 0x7b, 0xcc, 0xff, 0xff, 0xff, 0xf6, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc5, 0x85, 0x92, 0xf6, 0xff, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xff, 0xf6, 0x86, 0x76, 0xbd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x85, 0x83, 0xb9, 0xff, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xff, 0xb2, 0x76, 0x76, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x89, 0x85, 0x86, 0xee, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xec, 0x79, 0x76, 0x79, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x85, 0x85, 0xa6, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0x9e, 0x76, 0x76, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xc7, 0xc0, 0xef, 0xff, 0xff, 0xfa, 0x94, 0x85, 0x82, 0xe3, 0xdc, 0x7e, 0x7b, 0xdc, 0xe0, 0x76, 0x76, 0x83, 0xf8, 0xff, 0xff, 0xfb, 0xf0, 0xf2, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xe6, 0x8f, 0x8e, 0x8f, 0xc4, 0xfa, 0xff, 0xd5, 0x86, 0x85, 0xd8, 0xff, 0xde, 0xde, 0xff, 0xd5, 0x76, 0x75, 0xcd, 0xff, 0xfc, 0xf6, 0xe3, 0xe2, 0xe2, 0xf8, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf8, 0x9c, 0x8f, 0x8f, 0x8e, 0x98, 0xd7, 0xff, 0xd8, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcd, 0xd0, 0xff, 0xec, 0xe4, 0xe4, 0xe2, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc3, 0x92, 0x8f, 0x8e, 0x8d, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xeb, 0xe3, 0xe2, 0xe3, 0xef, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb0, 0x8f, 0x8e, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe2, 0xe2, 0xe8, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xf6, 0xd5, 0xd5, 0xd5, 0xd4, 0xd4, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xfc, 0xff, 0xff, - 0xff, 0xff, 0xa9, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xe2, 0xff, 0xff, - 0xff, 0xff, 0xaa, 0x9c, 0x9c, 0x9a, 0x9c, 0x9c, 0x9c, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xe0, 0xff, 0xff, - 0xff, 0xff, 0xf6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xfb, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbf, 0xa2, 0xa5, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd4, 0xd4, 0xe3, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcc, 0xa5, 0xa5, 0xa5, 0xa6, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xd1, 0xd1, 0xd3, 0xd4, 0xe8, 0xfc, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfa, 0xad, 0xa4, 0xa5, 0xa6, 0xb1, 0xe2, 0xff, 0xe6, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xef, 0xff, 0xee, 0xd5, 0xd1, 0xd1, 0xd4, 0xd8, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xeb, 0xa5, 0xa5, 0xa8, 0xd1, 0xfb, 0xff, 0xe3, 0xae, 0xb1, 0xe8, 0xff, 0xee, 0xef, 0xff, 0xee, 0xc8, 0xc9, 0xee, 0xff, 0xfc, 0xe7, 0xd1, 0xd1, 0xd3, 0xf6, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xd4, 0xcf, 0xf3, 0xff, 0xff, 0xfb, 0xb7, 0xb1, 0xb2, 0xef, 0xec, 0xbb, 0xbf, 0xf0, 0xf2, 0xc5, 0xc8, 0xd0, 0xfc, 0xff, 0xff, 0xf8, 0xe6, 0xe8, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0xb0, 0xb1, 0xcb, 0xff, 0xe2, 0xbb, 0xbf, 0xe6, 0xff, 0xd7, 0xc8, 0xc8, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xb1, 0xb1, 0xb4, 0xf4, 0xff, 0xe3, 0xbb, 0xbf, 0xe6, 0xff, 0xf7, 0xc7, 0xc8, 0xcb, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xb0, 0xb1, 0xd5, 0xff, 0xff, 0xe3, 0xbc, 0xbf, 0xe6, 0xff, 0xff, 0xdf, 0xc8, 0xc9, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xb1, 0xbc, 0xfa, 0xff, 0xff, 0xe3, 0xbc, 0xbf, 0xe6, 0xff, 0xff, 0xfb, 0xcd, 0xc8, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf8, 0xff, 0xff, 0xff, 0xe4, 0xbc, 0xbf, 0xe7, 0xff, 0xff, 0xff, 0xfa, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0xc5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x8f, 0x8e, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xf6, 0xff, 0xff, 0xff, 0xcd, 0x7d, 0x7b, 0xcc, 0xff, 0xff, 0xff, 0xf6, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc5, 0x85, 0x92, 0xf6, 0xff, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xff, 0xf6, 0x86, 0x76, 0xbd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x85, 0x83, 0xb9, 0xff, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xff, 0xb2, 0x76, 0x76, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x89, 0x85, 0x86, 0xee, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xec, 0x79, 0x76, 0x79, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x85, 0x85, 0xa6, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0x9e, 0x76, 0x76, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfe, 0xc7, 0xc0, 0xef, 0xff, 0xff, 0xfa, 0x94, 0x85, 0x82, 0xe3, 0xdc, 0x7e, 0x7b, 0xdc, 0xe0, 0x76, 0x76, 0x83, 0xf8, 0xff, 0xff, 0xfb, 0xf0, 0xf2, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xe6, 0x8f, 0x8e, 0x8f, 0xc4, 0xfa, 0xff, 0xd5, 0x86, 0x85, 0xd8, 0xff, 0xde, 0xde, 0xff, 0xd5, 0x76, 0x75, 0xcd, 0xff, 0xfc, 0xf6, 0xe3, 0xe2, 0xe2, 0xf8, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf8, 0x9c, 0x8f, 0x8f, 0x8e, 0x98, 0xd7, 0xff, 0xd8, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcd, 0xd0, 0xff, 0xec, 0xe4, 0xe4, 0xe2, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc3, 0x92, 0x8f, 0x8e, 0x8d, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xeb, 0xe3, 0xe2, 0xe3, 0xef, 0xfe, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb0, 0x8f, 0x8e, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe2, 0xe2, 0xe8, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xf6, 0xd5, 0xd5, 0xd5, 0xd4, 0xd4, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xfc, 0xff, 0xff, - 0xff, 0xff, 0xa9, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xe2, 0xff, 0xff, - 0xff, 0xff, 0xaa, 0x9c, 0x9c, 0x9a, 0x9c, 0x9c, 0x9c, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xe0, 0xff, 0xff, - 0xff, 0xff, 0xf6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xfb, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbf, 0xa2, 0xa5, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd4, 0xd4, 0xe3, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcc, 0xa5, 0xa5, 0xa5, 0xa6, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xd1, 0xd1, 0xd3, 0xd4, 0xe8, 0xfc, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfa, 0xad, 0xa4, 0xa5, 0xa6, 0xb1, 0xe2, 0xff, 0xe6, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xef, 0xff, 0xee, 0xd5, 0xd1, 0xd1, 0xd4, 0xd8, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xeb, 0xa5, 0xa5, 0xa8, 0xd1, 0xfb, 0xff, 0xe3, 0xae, 0xb1, 0xe8, 0xff, 0xee, 0xef, 0xff, 0xee, 0xc8, 0xc9, 0xee, 0xff, 0xfc, 0xe7, 0xd1, 0xd1, 0xd3, 0xf6, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xd4, 0xcf, 0xf3, 0xff, 0xff, 0xfb, 0xb7, 0xb1, 0xb2, 0xef, 0xec, 0xbb, 0xbf, 0xf0, 0xf2, 0xc5, 0xc8, 0xd0, 0xfc, 0xff, 0xff, 0xf8, 0xe6, 0xe8, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0xb0, 0xb1, 0xcb, 0xff, 0xe2, 0xbb, 0xbf, 0xe6, 0xff, 0xd7, 0xc8, 0xc8, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xb1, 0xb1, 0xb4, 0xf4, 0xff, 0xe3, 0xbb, 0xbf, 0xe6, 0xff, 0xf7, 0xc7, 0xc8, 0xcb, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xb0, 0xb1, 0xd5, 0xff, 0xff, 0xe3, 0xbc, 0xbf, 0xe6, 0xff, 0xff, 0xdf, 0xc8, 0xc9, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xb1, 0xbc, 0xfa, 0xff, 0xff, 0xe3, 0xbc, 0xbf, 0xe6, 0xff, 0xff, 0xfb, 0xcd, 0xc8, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf8, 0xff, 0xff, 0xff, 0xe4, 0xbc, 0xbf, 0xe7, 0xff, 0xff, 0xff, 0xfa, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0xc5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x9c, 0x92, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xf4, 0xff, 0xff, 0xff, 0xd8, 0x85, 0x85, 0xc8, 0xff, 0xff, 0xff, 0xf7, 0xe6, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0x8f, 0x99, 0xf3, 0xff, 0xff, 0xd5, 0x85, 0x85, 0xc5, 0xff, 0xff, 0xfa, 0x92, 0x7b, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0x8f, 0x8e, 0xb7, 0xff, 0xff, 0xd5, 0x85, 0x85, 0xc4, 0xff, 0xff, 0xc1, 0x7d, 0x7b, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x96, 0x8e, 0x8f, 0xea, 0xff, 0xd5, 0x86, 0x85, 0xc4, 0xff, 0xf2, 0x83, 0x7b, 0x7e, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x8f, 0x8e, 0xa8, 0xff, 0xd5, 0x86, 0x85, 0xc4, 0xff, 0xac, 0x7d, 0x7b, 0xb1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xd1, 0xc4, 0xee, 0xff, 0xff, 0xfc, 0xa1, 0x8e, 0x8d, 0xde, 0xe7, 0x86, 0x85, 0xd4, 0xeb, 0x7e, 0x7b, 0x83, 0xf6, 0xff, 0xff, 0xef, 0xb5, 0xb8, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf0, 0x9c, 0x99, 0x99, 0xc5, 0xf8, 0xff, 0xdf, 0x91, 0x8f, 0xd4, 0xff, 0xe3, 0xdc, 0xff, 0xe2, 0x7d, 0x7b, 0xc8, 0xff, 0xfb, 0xbd, 0x7d, 0x76, 0x76, 0xd8, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfc, 0xaa, 0x9c, 0x9a, 0x98, 0x9e, 0xd7, 0xff, 0xde, 0xd0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xcd, 0xff, 0xd5, 0x86, 0x76, 0x76, 0x76, 0x82, 0xf2, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfb, 0xcb, 0x9d, 0x9c, 0x9a, 0x98, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0x76, 0x76, 0x76, 0x75, 0xa9, 0xf4, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbc, 0x9c, 0x9c, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x76, 0x75, 0x94, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xde, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfa, 0xdb, 0xd9, 0xd9, 0xd9, 0xd9, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xef, 0xf8, 0xf6, 0xf4, 0xf4, 0xfc, 0xff, 0xff, - 0xff, 0xff, 0xb8, 0xa5, 0xa5, 0xa5, 0xa4, 0xa4, 0xa4, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe6, 0xff, 0xff, - 0xff, 0xff, 0xb9, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe4, 0xff, 0xff, - 0xff, 0xff, 0xf8, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xf2, 0xf3, 0xf3, 0xf3, 0xf3, 0xfb, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xcb, 0xae, 0xb1, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdc, 0xdc, 0xe7, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0xb1, 0xb0, 0xb1, 0xb2, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd9, 0xdb, 0xdc, 0xdc, 0xeb, 0xfc, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfc, 0xbc, 0xb1, 0xb1, 0xb1, 0xb9, 0xe3, 0xff, 0xeb, 0xe3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xff, 0xf3, 0xde, 0xdb, 0xdb, 0xdc, 0xe0, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf2, 0xb1, 0xb1, 0xb4, 0xd5, 0xfb, 0xff, 0xeb, 0xbb, 0xbd, 0xe7, 0xff, 0xf3, 0xf2, 0xff, 0xf4, 0xd1, 0xd4, 0xee, 0xff, 0xfe, 0xee, 0xdb, 0xdb, 0xdb, 0xf4, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xdc, 0xd4, 0xf3, 0xff, 0xff, 0xfe, 0xc4, 0xbc, 0xbf, 0xee, 0xf4, 0xc5, 0xc8, 0xef, 0xf7, 0xd1, 0xd1, 0xd7, 0xfc, 0xff, 0xff, 0xfb, 0xeb, 0xec, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xbb, 0xbd, 0xcf, 0xff, 0xea, 0xc5, 0xc8, 0xe7, 0xff, 0xe0, 0xd1, 0xd1, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbd, 0xbd, 0xc0, 0xf3, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xfa, 0xd3, 0xd1, 0xd4, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xbd, 0xbd, 0xd7, 0xff, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xe8, 0xd1, 0xd3, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbd, 0xc5, 0xf8, 0xff, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xfc, 0xd8, 0xd1, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xec, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd0, 0xcf, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x9c, 0x92, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xf4, 0xff, 0xff, 0xff, 0xd8, 0x85, 0x85, 0xc8, 0xff, 0xff, 0xff, 0xf7, 0xe6, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0x8f, 0x99, 0xf3, 0xff, 0xff, 0xd5, 0x85, 0x85, 0xc5, 0xff, 0xff, 0xfa, 0x92, 0x7b, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0x8f, 0x8e, 0xb7, 0xff, 0xff, 0xd5, 0x85, 0x85, 0xc4, 0xff, 0xff, 0xc1, 0x7d, 0x7b, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x96, 0x8e, 0x8f, 0xea, 0xff, 0xd5, 0x86, 0x85, 0xc4, 0xff, 0xf2, 0x83, 0x7b, 0x7e, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x8f, 0x8e, 0xa8, 0xff, 0xd5, 0x86, 0x85, 0xc4, 0xff, 0xac, 0x7d, 0x7b, 0xb1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xd1, 0xc4, 0xee, 0xff, 0xff, 0xfc, 0xa1, 0x8e, 0x8d, 0xde, 0xe7, 0x86, 0x85, 0xd4, 0xeb, 0x7e, 0x7b, 0x83, 0xf6, 0xff, 0xff, 0xef, 0xb5, 0xb8, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf0, 0x9c, 0x99, 0x99, 0xc5, 0xf8, 0xff, 0xdf, 0x91, 0x8f, 0xd4, 0xff, 0xe3, 0xdc, 0xff, 0xe2, 0x7d, 0x7b, 0xc8, 0xff, 0xfb, 0xbd, 0x7d, 0x76, 0x76, 0xd8, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfc, 0xaa, 0x9c, 0x9a, 0x98, 0x9e, 0xd7, 0xff, 0xde, 0xd0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xcd, 0xff, 0xd5, 0x86, 0x76, 0x76, 0x76, 0x82, 0xf2, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfb, 0xcb, 0x9d, 0x9c, 0x9a, 0x98, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0x76, 0x76, 0x76, 0x75, 0xa9, 0xf4, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbc, 0x9c, 0x9c, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x76, 0x75, 0x94, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xde, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xfa, 0xdb, 0xd9, 0xd9, 0xd9, 0xd9, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xef, 0xf8, 0xf6, 0xf4, 0xf4, 0xfc, 0xff, 0xff, - 0xff, 0xff, 0xb8, 0xa5, 0xa5, 0xa5, 0xa4, 0xa4, 0xa4, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe6, 0xff, 0xff, - 0xff, 0xff, 0xb9, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe4, 0xff, 0xff, - 0xff, 0xff, 0xf8, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xf2, 0xf3, 0xf3, 0xf3, 0xf3, 0xfb, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xcb, 0xae, 0xb1, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdc, 0xdc, 0xe7, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0xb1, 0xb0, 0xb1, 0xb2, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd9, 0xdb, 0xdc, 0xdc, 0xeb, 0xfc, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xfc, 0xbc, 0xb1, 0xb1, 0xb1, 0xb9, 0xe3, 0xff, 0xeb, 0xe3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xff, 0xf3, 0xde, 0xdb, 0xdb, 0xdc, 0xe0, 0xfc, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xf2, 0xb1, 0xb1, 0xb4, 0xd5, 0xfb, 0xff, 0xeb, 0xbb, 0xbd, 0xe7, 0xff, 0xf3, 0xf2, 0xff, 0xf4, 0xd1, 0xd4, 0xee, 0xff, 0xfe, 0xee, 0xdb, 0xdb, 0xdb, 0xf4, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xdc, 0xd4, 0xf3, 0xff, 0xff, 0xfe, 0xc4, 0xbc, 0xbf, 0xee, 0xf4, 0xc5, 0xc8, 0xef, 0xf7, 0xd1, 0xd1, 0xd7, 0xfc, 0xff, 0xff, 0xfb, 0xeb, 0xec, 0xfe, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xbb, 0xbd, 0xcf, 0xff, 0xea, 0xc5, 0xc8, 0xe7, 0xff, 0xe0, 0xd1, 0xd1, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbd, 0xbd, 0xc0, 0xf3, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xfa, 0xd3, 0xd1, 0xd4, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xbd, 0xbd, 0xd7, 0xff, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xe8, 0xd1, 0xd3, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbd, 0xc5, 0xf8, 0xff, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xfc, 0xd8, 0xd1, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xec, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd0, 0xcf, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x28, + 0x27, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x00, + 0x00, 0xb4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xaf, 0xd3, 0xff, 0xff, 0xff, 0xaf, 0x00, + 0x00, 0xaf, 0xff, 0xff, 0xff, 0xf7, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaf, 0x1b, 0x1d, 0xaf, 0xff, 0xff, 0xaf, 0x00, + 0x00, 0xaf, 0xff, 0xff, 0xf0, 0xcf, 0xcf, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x1d, 0x1a, 0x1e, 0xaf, 0xff, 0xb4, 0x00, + 0x00, 0xb4, 0xff, 0xef, 0xce, 0xcf, 0xcf, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaf, 0x1e, 0x1a, 0x1e, 0xcb, 0xf7, 0x27, + 0x28, 0xf7, 0xf4, 0xce, 0xce, 0xcf, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xaf, 0x1d, 0x1a, 0x90, 0xff, 0xff, + 0xff, 0xff, 0xea, 0xcf, 0xcf, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0x90, 0xf3, 0xff, 0xff, + 0xff, 0xff, 0xfc, 0xea, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcc, 0xc9, 0xca, 0xcc, 0xfa, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf0, 0xee, 0xee, 0xf0, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x66, 0x44, 0x43, 0x44, 0x43, 0x67, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xcc, 0xbf, 0xbe, 0xbf, 0xbf, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x67, 0x44, 0x44, 0x43, 0x43, 0x66, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xcd, 0xbf, 0xbf, 0xbf, 0xbf, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcc, 0xca, 0xca, 0xcc, 0xf9, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xef, 0xee, 0xee, 0xf0, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xc3, 0xf9, 0xff, 0xff, + 0xff, 0xff, 0xfb, 0xdc, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x78, 0x76, 0xc2, 0xff, 0xff, + 0xff, 0xff, 0xdd, 0xaf, 0xb1, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x78, 0x76, 0x78, 0xe2, 0xfb, 0xa8, + 0xa8, 0xfb, 0xee, 0xb0, 0xaf, 0xb0, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x78, 0x76, 0x78, 0xd4, 0xff, 0xe4, 0x93, + 0x93, 0xe4, 0xff, 0xe7, 0xb0, 0xaf, 0xb0, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x76, 0x78, 0xd3, 0xff, 0xff, 0xe1, 0x92, + 0x92, 0xe1, 0xff, 0xff, 0xe6, 0xb0, 0xaf, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xd4, 0xe8, 0xff, 0xff, 0xff, 0xe1, 0x93, + 0x92, 0xe1, 0xff, 0xff, 0xff, 0xf2, 0xe6, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0x93, + 0x93, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xa8, + 0xa8, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x30, + 0x30, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x0a, + 0x0b, 0xb8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xb5, 0xd7, 0xff, 0xff, 0xff, 0xb2, 0x0b, + 0x0a, 0xb3, 0xff, 0xff, 0xff, 0xe9, 0xd5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb6, 0x28, 0x2b, 0xb5, 0xff, 0xff, 0xb2, 0x0b, + 0x0b, 0xb2, 0xff, 0xff, 0xd4, 0x7d, 0x7b, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x2b, 0x28, 0x2b, 0xb5, 0xff, 0xb7, 0x0b, + 0x0a, 0xb7, 0xff, 0xd4, 0x7d, 0x7b, 0x7d, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb5, 0x2b, 0x28, 0x2b, 0xce, 0xf7, 0x30, + 0x30, 0xf7, 0xe4, 0x7d, 0x7b, 0x7e, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb5, 0x2a, 0x27, 0x99, 0xff, 0xff, + 0xff, 0xff, 0xc4, 0x7c, 0x7d, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x9a, 0xf4, 0xff, 0xff, + 0xff, 0xff, 0xf9, 0xc5, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xd2, 0xcf, 0xce, 0xd2, 0xfa, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf1, 0xef, 0xf0, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x75, 0x55, 0x55, 0x55, 0x54, 0x75, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd1, 0xc4, 0xc5, 0xc5, 0xc5, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x75, 0x55, 0x55, 0x55, 0x54, 0x75, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd1, 0xc5, 0xc5, 0xc5, 0xc5, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xd2, 0xce, 0xcf, 0xd2, 0xfa, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf0, 0xef, 0xf0, 0xf1, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xc6, 0xf9, 0xff, 0xff, + 0xff, 0xff, 0xfb, 0xdf, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd6, 0x80, 0x7e, 0xc6, 0xff, 0xff, + 0xff, 0xff, 0xdf, 0xb6, 0xb7, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd6, 0x81, 0x7f, 0x81, 0xe4, 0xfd, 0xaf, + 0xb0, 0xfc, 0xef, 0xb7, 0xb5, 0xb7, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe9, 0x81, 0x7e, 0x80, 0xd6, 0xff, 0xe5, 0x9b, + 0x9a, 0xe5, 0xff, 0xe8, 0xb6, 0xb5, 0xb6, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd6, 0x7f, 0x81, 0xd6, 0xff, 0xff, 0xe4, 0x9a, + 0x9b, 0xe4, 0xff, 0xff, 0xe8, 0xb7, 0xb5, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xd6, 0xe9, 0xff, 0xff, 0xff, 0xe4, 0x9a, + 0x9a, 0xe4, 0xff, 0xff, 0xff, 0xf2, 0xe8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe5, 0x9a, + 0x9a, 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xaf, + 0xaf, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x38, + 0x37, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xba, 0x12, + 0x11, 0xba, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xba, 0xd9, 0xff, 0xff, 0xff, 0xb7, 0x11, + 0x12, 0xb6, 0xff, 0xff, 0xff, 0xda, 0xba, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xba, 0x34, 0x37, 0xba, 0xff, 0xff, 0xb7, 0x12, + 0x12, 0xb7, 0xff, 0xff, 0xba, 0x37, 0x34, 0xb9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0x37, 0x34, 0x37, 0xba, 0xff, 0xba, 0x12, + 0x12, 0xba, 0xff, 0xba, 0x37, 0x34, 0x37, 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xba, 0x37, 0x33, 0x37, 0xd2, 0xf7, 0x37, + 0x37, 0xf7, 0xd2, 0x37, 0x34, 0x37, 0xba, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xba, 0x37, 0x34, 0xa0, 0xff, 0xff, + 0xff, 0xff, 0xa0, 0x34, 0x38, 0xb9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xa0, 0xf5, 0xff, 0xff, + 0xff, 0xff, 0xf4, 0xa0, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xd6, 0xd3, 0xd3, 0xd6, 0xf9, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf1, 0xf1, 0xf0, 0xf1, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x82, 0x64, 0x63, 0x64, 0x63, 0x82, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd4, 0xc8, 0xc8, 0xc9, 0xc8, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x82, 0x64, 0x63, 0x63, 0x63, 0x82, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd4, 0xc8, 0xc9, 0xc9, 0xc9, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xd6, 0xd4, 0xd4, 0xd6, 0xf9, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf1, 0xf1, 0xf1, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xcb, 0xfa, 0xff, 0xff, + 0xff, 0xff, 0xfb, 0xe0, 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0x8b, 0x89, 0xcb, 0xff, 0xff, + 0xff, 0xff, 0xe1, 0xb9, 0xba, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0x8c, 0x8a, 0x8c, 0xe6, 0xfd, 0xb9, + 0xb9, 0xfc, 0xf1, 0xba, 0xb9, 0xba, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0x8b, 0x8a, 0x8c, 0xda, 0xff, 0xe9, 0xa7, + 0xa7, 0xe9, 0xff, 0xe9, 0xba, 0xb8, 0xba, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xda, 0x8a, 0x8c, 0xda, 0xff, 0xff, 0xe7, 0xa7, + 0xa7, 0xe7, 0xff, 0xff, 0xe9, 0xba, 0xb9, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xda, 0xec, 0xff, 0xff, 0xff, 0xe8, 0xa7, + 0xa7, 0xe8, 0xff, 0xff, 0xff, 0xf3, 0xe9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe9, 0xa7, + 0xa7, 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xb8, + 0xb8, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x41, + 0x40, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0x1a, + 0x1a, 0xbd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xc1, 0xdd, 0xff, 0xff, 0xff, 0xba, 0x1a, + 0x1a, 0xba, 0xff, 0xff, 0xff, 0xce, 0xa2, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0x43, 0x47, 0xc1, 0xff, 0xff, 0xba, 0x1a, + 0x1a, 0xba, 0xff, 0xff, 0xa3, 0x07, 0x00, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0x46, 0x44, 0x47, 0xc1, 0xff, 0xbe, 0x1a, + 0x1b, 0xbd, 0xff, 0xa2, 0x06, 0x00, 0x07, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0x46, 0x44, 0x46, 0xd6, 0xf8, 0x40, + 0x40, 0xf8, 0xc3, 0x07, 0x00, 0x07, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0x46, 0x44, 0xa8, 0xff, 0xff, + 0xff, 0xff, 0x80, 0x00, 0x07, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xa8, 0xf6, 0xff, 0xff, + 0xff, 0xff, 0xf2, 0x7f, 0xc2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdb, 0xd9, 0xda, 0xdb, 0xfb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf3, 0xf2, 0xf2, 0xf3, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x91, 0x76, 0x76, 0x76, 0x76, 0x92, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd9, 0xcf, 0xcf, 0xce, 0xcf, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x92, 0x76, 0x76, 0x76, 0x76, 0x92, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd8, 0xce, 0xcf, 0xcf, 0xcf, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdb, 0xda, 0xda, 0xdb, 0xfb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf3, 0xf2, 0xf2, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe9, 0xcf, 0xfa, 0xff, 0xff, + 0xff, 0xff, 0xfc, 0xe2, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0x94, 0x93, 0xd0, 0xff, 0xff, + 0xff, 0xff, 0xe3, 0xbf, 0xc0, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0x95, 0x92, 0x95, 0xe9, 0xfc, 0xc0, + 0xc0, 0xfd, 0xf2, 0xc0, 0xbf, 0xc0, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xed, 0x95, 0x92, 0x95, 0xdc, 0xff, 0xea, 0xaf, + 0xaf, 0xea, 0xff, 0xec, 0xc0, 0xbf, 0xc0, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0x92, 0x94, 0xdd, 0xff, 0xff, 0xe9, 0xaf, + 0xaf, 0xe9, 0xff, 0xff, 0xec, 0xc0, 0xbf, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xdd, 0xed, 0xff, 0xff, 0xff, 0xe9, 0xaf, + 0xaf, 0xe9, 0xff, 0xff, 0xff, 0xf4, 0xec, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xaf, + 0xaf, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xc0, + 0xc0, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x4d, + 0x4d, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x28, + 0x28, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc7, 0xe1, 0xff, 0xff, 0xff, 0xc0, 0x28, + 0x28, 0xc0, 0xff, 0xff, 0xff, 0xce, 0xa6, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x54, 0x57, 0xc7, 0xff, 0xff, 0xc0, 0x28, + 0x28, 0xc0, 0xff, 0xff, 0xa5, 0x0d, 0x0a, 0xa5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x57, 0x54, 0x58, 0xc8, 0xff, 0xc3, 0x28, + 0x28, 0xc3, 0xff, 0xa6, 0x0d, 0x0a, 0x0d, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x58, 0x55, 0x58, 0xdb, 0xf9, 0x4d, + 0x4d, 0xf9, 0xc5, 0x0e, 0x0a, 0x0e, 0xa5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0x57, 0x54, 0xb1, 0xff, 0xff, + 0xff, 0xff, 0x85, 0x0b, 0x0d, 0xa5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xb2, 0xf7, 0xff, 0xff, + 0xff, 0xff, 0xf2, 0x85, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xde, 0xdc, 0xdc, 0xde, 0xfb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdd, 0xdb, 0xdb, 0xde, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x98, 0x7e, 0x7e, 0x7f, 0x7e, 0x98, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x95, 0x7b, 0x7b, 0x7b, 0x7b, 0x96, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x99, 0x7f, 0x7e, 0x7f, 0x7f, 0x98, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x96, 0x7b, 0x7c, 0x7b, 0x7c, 0x96, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xdd, 0xdd, 0xdd, 0xde, 0xfb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfb, 0xde, 0xdb, 0xdb, 0xde, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd3, 0xfa, 0xff, 0xff, + 0xff, 0xff, 0xfc, 0xe5, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0x9b, 0x9a, 0xd3, 0xff, 0xff, + 0xff, 0xff, 0xe6, 0xc5, 0xc6, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x9b, 0x9a, 0x9b, 0xea, 0xfc, 0xc3, + 0xc4, 0xfc, 0xf3, 0xc6, 0xc5, 0xc6, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0x9c, 0x9a, 0x9b, 0xdf, 0xff, 0xed, 0xb5, + 0xb5, 0xed, 0xff, 0xed, 0xc6, 0xc5, 0xc6, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x9b, 0x9c, 0xde, 0xff, 0xff, 0xeb, 0xb5, + 0xb5, 0xec, 0xff, 0xff, 0xed, 0xc6, 0xc5, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xdf, 0xef, 0xff, 0xff, 0xff, 0xec, 0xb6, + 0xb5, 0xec, 0xff, 0xff, 0xff, 0xf5, 0xed, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xed, 0xb5, + 0xb6, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xc4, + 0xc3, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x59, + 0x59, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x34, + 0x34, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xcc, 0xe3, 0xff, 0xff, 0xff, 0xc4, 0x34, + 0x34, 0xc3, 0xff, 0xff, 0xff, 0xd1, 0xaa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x64, 0x66, 0xcc, 0xff, 0xff, 0xc4, 0x34, + 0x34, 0xc4, 0xff, 0xff, 0xaa, 0x15, 0x12, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0x66, 0x63, 0x66, 0xcc, 0xff, 0xc7, 0x34, + 0x34, 0xc7, 0xff, 0xaa, 0x14, 0x12, 0x15, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x67, 0x63, 0x67, 0xdd, 0xf9, 0x59, + 0x59, 0xf9, 0xc7, 0x15, 0x12, 0x15, 0xab, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x66, 0x63, 0xb8, 0xff, 0xff, + 0xff, 0xff, 0x8b, 0x12, 0x15, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xb9, 0xf7, 0xff, 0xff, + 0xff, 0xff, 0xf2, 0x8b, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe2, 0xdf, 0xdf, 0xe1, 0xfb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf9, 0xc8, 0xc4, 0xc4, 0xc7, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa1, 0x89, 0x8a, 0x8a, 0x89, 0xa2, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x58, 0x34, 0x34, 0x34, 0x34, 0x58, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa2, 0x8a, 0x8a, 0x8a, 0x89, 0xa1, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x58, 0x34, 0x34, 0x34, 0x34, 0x59, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe1, 0xdf, 0xdf, 0xe2, 0xfb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc7, 0xc3, 0xc3, 0xc8, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xed, 0xd9, 0xfb, 0xff, 0xff, + 0xff, 0xff, 0xfc, 0xe7, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xa8, 0xa7, 0xd8, 0xff, 0xff, + 0xff, 0xff, 0xe8, 0xc8, 0xca, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xa9, 0xa7, 0xa9, 0xec, 0xfc, 0xc7, + 0xc7, 0xfc, 0xf3, 0xc9, 0xc8, 0xca, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xa8, 0xa7, 0xa8, 0xe4, 0xff, 0xec, 0xb9, + 0xb9, 0xed, 0xff, 0xee, 0xca, 0xc8, 0xca, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xa7, 0xa8, 0xe4, 0xff, 0xff, 0xeb, 0xb9, + 0xb9, 0xec, 0xff, 0xff, 0xee, 0xca, 0xc8, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe4, 0xf1, 0xff, 0xff, 0xff, 0xec, 0xb9, + 0xb9, 0xeb, 0xff, 0xff, 0xff, 0xf6, 0xee, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xed, 0xb9, + 0xb9, 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xc7, + 0xc7, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; diff --git a/pexpert/pexpert/arm/Makefile b/pexpert/pexpert/arm/Makefile index 86386e462..82bffe580 100644 --- a/pexpert/pexpert/arm/Makefile +++ b/pexpert/pexpert/arm/Makefile @@ -6,6 +6,41 @@ export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir include $(MakeInc_cmd) include $(MakeInc_def) +ifeq ($(PLATFORM),MacOSX) + +PRIVATE_DATAFILES = boot.h consistent_debug.h protos.h + +PRIVATE_KERNELFILES = \ + AIC.h \ + board_config.h \ + boot.h \ + consistent_debug.h \ + dockchannel.h \ + PL192_VIC.h \ + protos.h \ + S3cUART.h \ + T8002.h + +# Headers installed into System.framework/PrivateHeaders (userspace internal SDK only). +INSTALL_MD_LCL_LIST = ${PRIVATE_DATAFILES} + +# The userspace headers can be located in System.framework/PrivateHeaders/pexpert/arm +INSTALL_MD_DIR = pexpert/arm + +# Ensure these files don't get auto-included into the public Kernel.framework/Headers. +INSTALL_KF_MD_LIST = ${EMPTY} + +# Headers installed into Kernel.framework/PrivateHeaders (internal SDK only). +INSTALL_KF_MD_LCL_LIST = ${PRIVATE_KERNELFILES} + +# Headers used to compile xnu +EXPORT_MD_LIST = ${PRIVATE_KERNELFILES} + +# These headers will be available with #include +EXPORT_MD_DIR = pexpert/arm + +else # $(PLATFORM),MacOSX + DATAFILES = \ AIC.h \ board_config.h \ @@ -15,7 +50,6 @@ DATAFILES = \ PL192_VIC.h \ protos.h \ S3cUART.h \ - S7002.h \ T8002.h INSTALL_MD_LIST = ${DATAFILES} @@ -26,5 +60,7 @@ EXPORT_MD_LIST = ${DATAFILES} EXPORT_MD_DIR = pexpert/arm +endif # $(PLATFORM),MacOSX + include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/pexpert/pexpert/arm/S7002.h b/pexpert/pexpert/arm/S7002.h deleted file mode 100644 index 6c6d2e07c..000000000 --- a/pexpert/pexpert/arm/S7002.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2013 Apple Inc. All rights reserved. - */ - -#ifndef _PEXPERT_ARM_S7002_H -#define _PEXPERT_ARM_S7002_H - -#ifndef ASSEMBLER - -#include - -#define rPMGR_EVENT_TMR (*(volatile unsigned *) (timer_base + 0x00100)) -#define rPMGR_EVENT_TMR_PERIOD (*(volatile unsigned *) (timer_base + 0x00104)) -#define rPMGR_EVENT_TMR_CTL (*(volatile unsigned *) (timer_base + 0x00108)) -#define rPMGR_INTERVAL_TMR (*(volatile unsigned *) (timer_base + 0x00200)) -#define rPMGR_INTERVAL_TMR_CTL (*(volatile unsigned *) (timer_base + 0x00204)) - -#define PMGR_EVENT_TMR_CTL_EN (1 << 0) -#define PMGR_INTERVAL_TMR_CTL_EN (1 << 0) -#define PMGR_INTERVAL_TMR_CTL_CLR_INT (1 << 8) - -#define DOCKFIFO_UART (1) -#define DOCKFIFO_UART_WRITE (0) -#define DOCKFIFO_UART_READ (1) -#define DOCKFIFO_W_SPACING (0x1000) -#define DOCKFIFO_SPACING (0x3000) - -#define rDOCKFIFO_R_DATA(_f, _n) (*(volatile uint32_t *)(dockfifo_uart_base + ((_f) * DOCKFIFO_SPACING) + ((_n) * 4))) -#define rDOCKFIFO_R_STAT(_f) (*(volatile uint32_t *)(dockfifo_uart_base + ((_f) * DOCKFIFO_SPACING) + 0x14)) -#define rDOCKFIFO_W_DATA(_f, _n) (*(volatile uint32_t *)(dockfifo_uart_base + ((_f) * DOCKFIFO_SPACING) + DOCKFIFO_W_SPACING + ((_n) * 4))) -#define rDOCKFIFO_W_STAT(_f) (*(volatile uint32_t *)(dockfifo_uart_base + ((_f) * DOCKFIFO_SPACING) + DOCKFIFO_W_SPACING + 0x14)) -#define rDOCKFIFO_CNFG(_f) (*(volatile uint32_t *)(dockfifo_uart_base + ((_f) * DOCKFIFO_SPACING) + 0x2000)) -#define rDOCKFIFO_DRAIN(_f) (*(volatile uint32_t *)(dockfifo_uart_base + ((_f) * DOCKFIFO_SPACING) + 0x2004)) -#define rDOCKFIFO_INTMASK(_f) (*(volatile uint32_t *)(dockfifo_uart_base + ((_f) * DOCKFIFO_SPACING) + 0x2008)) - -#endif - -#define PMGR_INTERVAL_TMR_OFFSET (0x200) -#define PMGR_INTERVAL_TMR_CTL_OFFSET (0x204) - -#endif /* ! _PEXPERT_ARM_S7002_H */ diff --git a/pexpert/pexpert/arm/board_config.h b/pexpert/pexpert/arm/board_config.h index 0cad92467..7ba990c06 100644 --- a/pexpert/pexpert/arm/board_config.h +++ b/pexpert/pexpert/arm/board_config.h @@ -5,22 +5,13 @@ #ifndef _PEXPERT_ARM_BOARD_CONFIG_H #define _PEXPERT_ARM_BOARD_CONFIG_H -#ifdef ARM_BOARD_CONFIG_S7002 -#define ARMA7 -#define __XNU_UP__ -#include - -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define __ARM_L2CACHE_SIZE_LOG__ 18 -#define ARM_BOARD_CLASS_S7002 -#define PEXPERT_NO_3X_IMAGES 1 -#endif /* ARM_BOARD_CONFIG_S7002 */ - #ifdef ARM_BOARD_CONFIG_T8002 #define ARMA7 #include +#define MAX_CPUS 2 +#define MAX_CPU_CLUSTERS 1 #define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define __ARM_L2CACHE_SIZE_LOG__ 19 +#define MAX_L2_CLINE 6 #define ARM_BOARD_CLASS_T8002 #define PEXPERT_NO_3X_IMAGES 1 #endif /* ARM_BOARD_CONFIG_T8002 */ @@ -28,8 +19,10 @@ #ifdef ARM_BOARD_CONFIG_T8004 #define ARMA7 #include +#define MAX_CPUS 2 +#define MAX_CPU_CLUSTERS 1 #define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define __ARM_L2CACHE_SIZE_LOG__ 20 +#define MAX_L2_CLINE 6 #define ARM_BOARD_CLASS_T8002 #define PEXPERT_NO_3X_IMAGES 1 #endif /* ARM_BOARD_CONFIG_T8004 */ diff --git a/pexpert/pexpert/arm/consistent_debug.h b/pexpert/pexpert/arm/consistent_debug.h index 62549b868..68d22da15 100644 --- a/pexpert/pexpert/arm/consistent_debug.h +++ b/pexpert/pexpert/arm/consistent_debug.h @@ -30,6 +30,9 @@ #define PE_CONSISTENT_DEBUG_H #include +#include + +__BEGIN_DECLS #define DEBUG_RECORD_ID_LONG(a, b, c, d, e, f, g, h) \ ( ((uint64_t)( (((h) << 24) & 0xFF000000) | \ @@ -139,4 +142,6 @@ boolean_t PE_consistent_debug_lookup_entry(uint64_t record_id, uint64_t *phys_ad */ int PE_consistent_debug_enabled(void); +__END_DECLS + #endif // PE_CONSISTENT_DEBUG_H diff --git a/pexpert/pexpert/arm/protos.h b/pexpert/pexpert/arm/protos.h index 2944b793e..5abd4ab9e 100644 --- a/pexpert/pexpert/arm/protos.h +++ b/pexpert/pexpert/arm/protos.h @@ -9,13 +9,13 @@ #endif extern vm_offset_t pe_arm_get_soc_base_phys(void); -extern uint32_t pe_arm_get_soc_revision(void); extern uint32_t pe_arm_init_interrupts(void *args); extern void pe_arm_init_debug(void *args); #ifdef PEXPERT_KERNEL_PRIVATE extern void cnputc(char); +extern void cnputc_unbuffered(char); #endif int serial_init(void); int serial_getc(void); diff --git a/pexpert/pexpert/arm64/AMCC.h b/pexpert/pexpert/arm64/AMCC.h deleted file mode 100644 index 4dccef945..000000000 --- a/pexpert/pexpert/arm64/AMCC.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2016 Apple Inc. All rights reserved. - */ - -#ifndef _PEXPERT_ARM_AMCC_H -#define _PEXPERT_ARM_AMCC_H - -#include - -/* - * AMCC registers for KTRR/RoRegion related lockdown in early kernel bootstrap. - * amcc_base must be retrieved from device tree before using. - */ - -#if defined(KERNEL_INTEGRITY_KTRR) -#define AMCC_PGSHIFT 14 -#define AMCC_PGSIZE (1 << AMCC_PGSHIFT) -#define AMCC_PGMASK (AMCC_PGSIZE - 1) - -#define rMCCGEN (*(volatile uint32_t *) (amcc_base + 0x780)) -#define rRORGNBASEADDR (*(volatile uint32_t *) (amcc_base + 0x7e4)) -#define rRORGNENDADDR (*(volatile uint32_t *) (amcc_base + 0x7e8)) -#define rRORGNLOCK (*(volatile uint32_t *) (amcc_base + 0x7ec)) -#endif - - -#endif /* _PEXPERT_ARM_AMCC_H */ diff --git a/pexpert/pexpert/arm64/BCM2837.h b/pexpert/pexpert/arm64/BCM2837.h index cc3a2147f..d9f072324 100644 --- a/pexpert/pexpert/arm64/BCM2837.h +++ b/pexpert/pexpert/arm64/BCM2837.h @@ -5,13 +5,20 @@ #ifndef _PEXPERT_ARM_BCM2837_H #define _PEXPERT_ARM_BCM2837_H -#ifdef BCM2837 -#include "arm64_common.h" -#endif - #define NO_MONITOR 1 #define NO_ECORE 1 +#define BCM2837 +#define BCM2837_BRINGUP +#define ARM_ARCH_TIMER + +#define __ARM_ARCH__ 8 +#define __ARM_VMSA__ 8 +#define __ARM_VFP__ 4 +#define __ARM_COHERENT_CACHE__ 1 +#define __ARM_DEBUG__ 7 +#define __ARM64_PMAP_SUBPAGE_L1__ 1 + #ifndef ASSEMBLER #define PI3_UART diff --git a/pexpert/pexpert/arm64/H7.h b/pexpert/pexpert/arm64/H7.h new file mode 100644 index 000000000..e1eafd91c --- /dev/null +++ b/pexpert/pexpert/arm64/H7.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _PEXPERT_ARM64_H7_H +#define _PEXPERT_ARM64_H7_H + +#define APPLETYPHOON +#define MONITOR 1 /* Use EL3 monitor */ +#define NO_ECORE 1 +#define HAS_32BIT_DBGWRAP 1 +#define HAS_CPMU_BIU_EVENTS 1 /* Has BIU events in CPMU */ +#define HAS_CPMU_L2C_EVENTS 1 /* Has L2 cache events in CPMU */ + +#define CORE_NCTRS 8 +#define CPMU_AIC_PMI 1 +#define WITH_CLASSIC_S2R 1 + +#define __ARM_KERNEL_PROTECT__ 1 + +#include + +#endif /* !_PEXPERT_ARM64_H7_H */ diff --git a/pexpert/pexpert/arm64/H8.h b/pexpert/pexpert/arm64/H8.h new file mode 100644 index 000000000..6a612a13a --- /dev/null +++ b/pexpert/pexpert/arm64/H8.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _PEXPERT_ARM64_H8_H +#define _PEXPERT_ARM64_H8_H + +#define APPLETWISTER +#define MONITOR 1 /* Use EL3 monitor */ +#define NO_ECORE 1 +#define HAS_32BIT_DBGWRAP 1 +#define HAS_CPMU_L2C_EVENTS 1 /* Has L2 cache events in CPMU */ + +#define CORE_NCTRS 8 +#define CPMU_AIC_PMI 1 + +#define __ARM_16K_PG__ 1 +#define __ARM_KERNEL_PROTECT__ 1 + +#include + +#endif /* !_PEXPERT_ARM64_H8_H */ diff --git a/pexpert/pexpert/arm64/H9.h b/pexpert/pexpert/arm64/H9.h new file mode 100644 index 000000000..53143c7b6 --- /dev/null +++ b/pexpert/pexpert/arm64/H9.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _PEXPERT_ARM64_H9_H +#define _PEXPERT_ARM64_H9_H + +#define APPLEHURRICANE +#define NO_MONITOR 1 /* No EL3 for this CPU -- ever */ +#define HAS_MIGSTS 1 /* Has MIGSTS register, and supports migration between p-core and e-core */ +#define HAS_KTRR 1 /* Has KTRR registers */ +#define HAS_CPMU_L2C_EVENTS 1 /* Has L2 cache events in CPMU */ + +#define CORE_NCTRS 10 +#define CPMU_AIC_PMI 1 + +#define __ARM_16K_PG__ 1 +#define __ARM_KERNEL_PROTECT__ 1 +#define __ARM_GLOBAL_SLEEP_BIT__ 1 +#define __ARM_PAN_AVAILABLE__ 1 + + +// Hurricane and Zephyr require workaround for radar 20619637 +#define SINGLE_STEP_RETIRE_ERRATA 1 + +#include + +#endif /* !_PEXPERT_ARM64_H9_H */ diff --git a/pexpert/pexpert/arm64/Makefile b/pexpert/pexpert/arm64/Makefile index 059b64ee8..6807e0b64 100644 --- a/pexpert/pexpert/arm64/Makefile +++ b/pexpert/pexpert/arm64/Makefile @@ -6,19 +6,54 @@ export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir include $(MakeInc_cmd) include $(MakeInc_def) +ifeq ($(PLATFORM),MacOSX) + +PRIVATE_DATAFILES = boot.h + +PRIVATE_KERNELFILES = \ + AIC.h \ + apple_arm64_common.h \ + apple_arm64_regs.h \ + board_config.h \ + boot.h \ + S3c2410x.h \ + H7.h \ + H8.h \ + H9.h \ + BCM2837.h \ + spr_locks.h + + +# Headers installed into System.framework/PrivateHeaders (userspace internal SDK only). +INSTALL_MD_LCL_LIST = ${PRIVATE_DATAFILES} + +# The userspace headers can be located in System.framework/PrivateHeaders/pexpert/arm64 +INSTALL_MD_DIR = pexpert/arm64 + +# Ensure these files don't get auto-included into the public Kernel.framework/Headers. +INSTALL_KF_MD_LIST = ${EMPTY} + +# Headers installed into Kernel.framework/PrivateHeaders (internal SDK only). +INSTALL_KF_MD_LCL_LIST = ${PRIVATE_KERNELFILES} + +# Headers used to compile xnu +EXPORT_MD_LIST = ${PRIVATE_KERNELFILES} + +# These headers will be available with #include +EXPORT_MD_DIR = pexpert/arm64 + +else # $(PLATFORM),MacOSX + DATAFILES = \ AIC.h \ - AMCC.h \ - arm64_common.h \ + apple_arm64_common.h \ + apple_arm64_regs.h \ board_config.h \ boot.h \ S3c2410x.h \ - T7000.h \ - S8000.h \ - T8010.h \ - typhoon.h \ - twister.h \ - hurricane.h \ + H7.h \ + H8.h \ + H9.h \ BCM2837.h \ spr_locks.h @@ -31,5 +66,7 @@ EXPORT_MD_LIST = ${DATAFILES} EXPORT_MD_DIR = pexpert/arm64 +endif # $(PLATFORM),MacOSX + include $(MakeInc_rule) include $(MakeInc_dir) diff --git a/pexpert/pexpert/arm64/S8000.h b/pexpert/pexpert/arm64/S8000.h deleted file mode 100644 index 284d239cd..000000000 --- a/pexpert/pexpert/arm64/S8000.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (c) 2014 Apple Inc. All rights reserved. - */ - -#ifndef _PEXPERT_ARM_S8000_H -#define _PEXPERT_ARM_S8000_H - -#include -#include - -#ifndef ASSEMBLER - -#include - -#endif - -#endif /* ! _PEXPERT_ARM_S8000_H */ diff --git a/pexpert/pexpert/arm64/T7000.h b/pexpert/pexpert/arm64/T7000.h deleted file mode 100644 index d6fffc0d9..000000000 --- a/pexpert/pexpert/arm64/T7000.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (c) 2012 Apple Inc. All rights reserved. - */ - -#ifndef _PEXPERT_ARM_T7000_H -#define _PEXPERT_ARM_T7000_H - -#include -#include - -#define WITH_CLASSIC_S2R 1 - -#ifndef ASSEMBLER - -#include - -#endif - -#endif /* ! _PEXPERT_ARM_T7000_H */ diff --git a/pexpert/pexpert/arm64/T8010.h b/pexpert/pexpert/arm64/T8010.h deleted file mode 100644 index 826414b54..000000000 --- a/pexpert/pexpert/arm64/T8010.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (c) 2014-2015 Apple Inc. All rights reserved. - */ - -#ifndef _PEXPERT_ARM_T8010_H -#define _PEXPERT_ARM_T8010_H - -#include -#include - -#ifndef ASSEMBLER - -#include -#include -#include - -// AOP_CLOCK frequency * 30 ms -#define DOCKCHANNEL_DRAIN_PERIOD (192000000 * 0.03) - -#endif - -#endif /* ! _PEXPERT_ARM_T8010_H */ diff --git a/pexpert/pexpert/arm64/apple_arm64_common.h b/pexpert/pexpert/arm64/apple_arm64_common.h new file mode 100644 index 000000000..34005a9ed --- /dev/null +++ b/pexpert/pexpert/arm64/apple_arm64_common.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _PEXPERT_ARM64_APPLE_ARM64_COMMON_H +#define _PEXPERT_ARM64_APPLE_ARM64_COMMON_H + +#define __ARM_ARCH__ 8 +#define __ARM_VMSA__ 8 +#define __ARM_VFP__ 4 +#define __ARM_COHERENT_CACHE__ 1 +#define __ARM_COHERENT_IO__ 1 +#define __ARM_IC_NOALIAS_ICACHE__ 1 +#define __ARM_DEBUG__ 7 +#define __ARM_ENABLE_SWAP__ 1 +#define __ARM_V8_CRYPTO_EXTENSIONS__ 1 + +#ifndef ARM_LARGE_MEMORY +#define __ARM64_PMAP_SUBPAGE_L1__ 1 +#endif + +#define APPLE_ARM64_ARCH_FAMILY 1 +#define ARM_ARCH_TIMER +#define ARM_BOARD_WFE_TIMEOUT_NS 1000 + +#if defined(HAS_CTRR) +#define KERNEL_INTEGRITY_CTRR 1 +#elif defined(HAS_KTRR) +#define KERNEL_INTEGRITY_KTRR 1 +#elif defined(MONITOR) +#define KERNEL_INTEGRITY_WT 1 +#endif + + +#include +#include + +#ifndef ASSEMBLER +#include + +#if !defined(APPLETYPHOON) && !defined(APPLETWISTER) +#include + +// AOP_CLOCK frequency * 30 ms +#define DOCKCHANNEL_DRAIN_PERIOD (192000000 * 0.03) +#endif + +#endif /* ASSEMBLER */ + +/* + * See arm64/proc_reg.h for how these values are constructed from the MIDR. + * The chip-revision property from EDT also uses these constants. + */ +#define CPU_VERSION_A0 0x00 +#define CPU_VERSION_A1 0x01 +#define CPU_VERSION_B0 0x10 +#define CPU_VERSION_B1 0x11 +#define CPU_VERSION_C0 0x20 +#define CPU_VERSION_UNKNOWN 0xff + +#endif /* !_PEXPERT_ARM64_APPLE_ARM64_COMMON_H */ diff --git a/pexpert/pexpert/arm64/arm64_common.h b/pexpert/pexpert/arm64/apple_arm64_regs.h similarity index 65% rename from pexpert/pexpert/arm64/arm64_common.h rename to pexpert/pexpert/arm64/apple_arm64_regs.h index fe0b98768..857278634 100644 --- a/pexpert/pexpert/arm64/arm64_common.h +++ b/pexpert/pexpert/arm64/apple_arm64_regs.h @@ -5,15 +5,26 @@ #ifndef _PEXPERT_ARM64_COMMON_H #define _PEXPERT_ARM64_COMMON_H +#ifdef ASSEMBLER +#define __MSR_STR(x) x +#else +#define __MSR_STR1(x) #x +#define __MSR_STR(x) __MSR_STR1(x) +#endif + #ifdef APPLE_ARM64_ARCH_FAMILY #define ARM64_REG_HID0 S3_0_c15_c0_0 #define ARM64_REG_HID0_LoopBuffDisb (1<<20) +#define ARM64_REG_HID0_AMXCacheFusionDisb (1ULL<<21) #define ARM64_REG_HID0_ICPrefLimitOneBrn (1<<25) +#define ARM64_REG_HID0_FetchWidthDisb (1ULL<<28) #define ARM64_REG_HID0_PMULLFuseDisable (1ULL<<33) #define ARM64_REG_HID0_CacheFusionDisable (1ULL<<36) +#define ARM64_REG_HID0_SamePgPwrOpt (1ULL<<45) #define ARM64_REG_HID0_ICPrefDepth_bshift 60 #define ARM64_REG_HID0_ICPrefDepth_bmsk (7ULL < +/* + * Per-SoC configuration. General order is: + * + * CPU type + * CPU configuration + * CPU feature disables / workarounds + * CPU topology + * Other platform configuration (e.g. DARTs, PPL) + * + * This should answer the question: "what's unique about this SoC?" + * + * arm64/H*.h should answer the question: "what's unique about this CPU core?" + * + * For __ARM_AMP__ systems that have different cache line sizes on different + * clusters, MAX_L2_CLINE must reflect the largest L2 cache line size + * across all clusters. + */ + #ifdef ARM64_BOARD_CONFIG_T7000 -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLETYPHOON -#define ARM_ARCH_TIMER -#include -#define __ARM_L2CACHE_SIZE_LOG__ 20 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_T7000 -#define KERNEL_INTEGRITY_WT 1 -#define CORE_NCTRS 8 -#define CPMU_AIC_PMI 1 +#include + +#define MAX_L2_CLINE 6 +#define MAX_CPUS 3 +#define MAX_CPU_CLUSTERS 1 #endif /* ARM64_BOARD_CONFIG_T7000 */ #ifdef ARM64_BOARD_CONFIG_T7001 -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLETYPHOON -#define ARM_ARCH_TIMER -#include -#define __ARM_L2CACHE_SIZE_LOG__ 21 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_T7000 -#define KERNEL_INTEGRITY_WT 1 -#define CPU_COUNT 3 -#define CORE_NCTRS 8 -#define CPMU_AIC_PMI 1 +#include + +#define MAX_L2_CLINE 6 +#define MAX_CPUS 3 +#define MAX_CPU_CLUSTERS 1 #endif /* ARM64_BOARD_CONFIG_T7001 */ #ifdef ARM64_BOARD_CONFIG_S8000 +#include +#define MAX_CPUS 2 +#define MAX_CPU_CLUSTERS 1 /* * The L2 size for twister is in fact 3MB, not 4MB; we round up due * to the code being architected for power of 2 cache sizes, and rely * on the expected behavior that out of bounds operations will be * ignored. */ -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLETWISTER -#define ARM_ARCH_TIMER -#include -#define __ARM_L2CACHE_SIZE_LOG__ 22 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_S8000 -#define KERNEL_INTEGRITY_WT 1 -#define CORE_NCTRS 8 -#define CPMU_AIC_PMI 1 +#define MAX_L2_CLINE 6 #endif /* ARM64_BOARD_CONFIG_S8000 */ #ifdef ARM64_BOARD_CONFIG_S8001 +#include +#define MAX_CPUS 2 +#define MAX_CPU_CLUSTERS 1 /* * The L2 size for twister is in fact 3MB, not 4MB; we round up due * to the code being architected for power of 2 cache sizes, and rely * on the expect behavior that out of bounds operations will be * ignored. */ -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLETWISTER -#define ARM_ARCH_TIMER -#include -#define __ARM_L2CACHE_SIZE_LOG__ 22 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_S8000 -#define KERNEL_INTEGRITY_WT 1 -#define CORE_NCTRS 8 -#define CPMU_AIC_PMI 1 +#define MAX_L2_CLINE 6 #endif /* ARM64_BOARD_CONFIG_S8001 */ #ifdef ARM64_BOARD_CONFIG_T8010 +#include +#define MAX_CPUS 3 +#define MAX_CPU_CLUSTERS 1 /* * The L2 size for hurricane/zephyr is in fact 3MB, not 4MB; we round up due * to the code being architected for power of 2 cache sizes, and rely * on the expect behavior that out of bounds operations will be * ignored. */ -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLEHURRICANE -#define ARM_ARCH_TIMER -#define KERNEL_INTEGRITY_KTRR -#include -#define __ARM_L2CACHE_SIZE_LOG__ 22 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_T8010 -#define CORE_NCTRS 10 -#define CPMU_AIC_PMI 1 +#define MAX_L2_CLINE 7 + #if DEVELOPMENT || DEBUG -#define PMAP_CS 1 -#define PMAP_CS_ENABLE 0 +#define PMAP_CS 1 +#define PMAP_CS_ENABLE 0 #endif #endif /* ARM64_BOARD_CONFIG_T8010 */ #ifdef ARM64_BOARD_CONFIG_T8011 -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLEHURRICANE -#define ARM_ARCH_TIMER -#define KERNEL_INTEGRITY_KTRR -#include -#define __ARM_L2CACHE_SIZE_LOG__ 23 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_T8011 -#define CPU_COUNT 3 -#define CORE_NCTRS 10 -#define CPMU_AIC_PMI 1 +#include + +#define MAX_L2_CLINE 7 +#define MAX_CPUS 3 +#define MAX_CPU_CLUSTERS 1 + #if DEVELOPMENT || DEBUG -#define PMAP_CS 1 -#define PMAP_CS_ENABLE 0 +#define PMAP_CS 1 +#define PMAP_CS_ENABLE 0 #endif #endif /* ARM64_BOARD_CONFIG_T8011 */ #ifdef ARM64_BOARD_CONFIG_T8015 -/* - * The LLC size for monsoon is 8MB, but the L2E exposed to mistral is - * only 1MB. We use the larger cache size here. The expectation is - * that this may cause flushes from mistral to be less efficient - * (cycles will be wasted on unnecessary way/set operations), but it - * will be technically correct... the best kind of correct. - * - * And is an explicit flush from L2E to LLC something we'll ever want - * to do? - */ -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLEMONSOON -#define ARM_ARCH_TIMER -#define KERNEL_INTEGRITY_KTRR -#include -#define __ARM_L2CACHE_SIZE_LOG__ 23 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_T8015 -#define CPU_COUNT 6 -#define BROKEN_FRIGGING_SLEEP 1 /* Spurious wake: See rdar://problem/29762505 */ -#define HAS_UNCORE_CTRS 1 -#define UNCORE_VERSION 1 -#define UNCORE_PER_CLUSTER 0 -#define UNCORE_NCTRS 8 -#define CORE_NCTRS 10 +#include + +#define MAX_L2_CLINE 7 +#define MAX_CPUS 6 +#define MAX_CPU_CLUSTERS 2 + +#define BROKEN_FRIGGING_SLEEP 1 /* Spurious wake: See rdar://problem/29762505 */ + #if DEVELOPMENT || DEBUG -#define PMAP_CS 1 -#define PMAP_CS_ENABLE 0 +#define PMAP_CS 1 +#define PMAP_CS_ENABLE 0 #endif #endif /* ARM64_BOARD_CONFIG_T8015 */ #ifdef ARM64_BOARD_CONFIG_T8020 -/* - * The LLC size for Vortex is 8MB, but the LLC on Tempest is only 2MB. - * We use the larger cache size here. The expectation is - * that this may cause flushes from Tempest to be less efficient - * (cycles will be wasted on unnecessary way/set operations), but it - * will be technically correct... the best kind of correct. - */ -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLEVORTEX -#define ARM_ARCH_TIMER -#define KERNEL_INTEGRITY_CTRR -#include -#define __ARM_L2CACHE_SIZE_LOG__ 23 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_T8020 -#define CPU_COUNT 6 -#define CPU_CLUSTER_OFFSETS {0, 4} -#define HAS_UNCORE_CTRS 1 -#define UNCORE_VERSION 2 -#define UNCORE_PER_CLUSTER 1 -#define UNCORE_NCTRS 16 -#define CORE_NCTRS 10 -#define PMAP_PV_LOAD_FACTOR 5 -#define PMAP_CS 1 -#define PMAP_CS_ENABLE 1 +#include + +#define MAX_L2_CLINE 7 +#define MAX_CPUS 8 +#define MAX_CPU_CLUSTERS 2 + +#define XNU_MONITOR 1 /* Secure pmap runtime */ +#define XNU_MONITOR_T8020_DART 1 /* T8020 DART plugin for secure pmap runtime */ +#define T8020_DART_ALLOW_BYPASS (1 << 1) /* DART allows translation bypass in certain cases */ +#define XNU_MONITOR_NVME_PPL 1 /* NVMe PPL plugin for secure pmap runtime */ +#define XNU_MONITOR_ANS2_SART 1 /* ANS2 SART plugin for secure pmap runtime */ +#define PMAP_CS 1 +#define PMAP_CS_ENABLE 1 #endif /* ARM64_BOARD_CONFIG_T8020 */ #ifdef ARM64_BOARD_CONFIG_T8006 @@ -179,105 +135,109 @@ * The T8006 consists of 2 Tempest cores (i.e. T8020 eCores) and for most * of our purposes here may be considered a functional subset of T8020. */ -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLEVORTEX -#define ARM_ARCH_TIMER -#define KERNEL_INTEGRITY_CTRR -#include -#define __ARM_L2CACHE_SIZE_LOG__ 21 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_T8006 -#define PEXPERT_NO_3X_IMAGES 1 -#define CORE_NCTRS 10 -#define PMAP_PV_LOAD_FACTOR 5 -#define PMAP_CS 1 -#define PMAP_CS_ENABLE 1 +#include + +#undef HAS_UNCORE_CTRS +#ifdef XNU_TARGET_OS_WATCH // This check might be redundant +#undef __APRR_SHADOW_SUPPORTED__ +#endif + +#define MAX_L2_CLINE 7 +#define MAX_CPUS 2 +#define MAX_CPU_CLUSTERS 1 + +#define XNU_MONITOR 1 /* Secure pmap runtime */ +#define XNU_MONITOR_T8020_DART 1 /* T8020 DART plugin for secure pmap runtime */ +#define T8020_DART_ALLOW_BYPASS (1 << 1) /* DART allows translation bypass in certain cases */ +#define XNU_MONITOR_NVME_PPL 1 /* NVMe PPL plugin for secure pmap runtime */ +#define XNU_MONITOR_ANS2_SART 1 /* ANS2 SART plugin for secure pmap runtime */ +#define PMAP_CS 1 +#define PMAP_CS_ENABLE 1 +#define PREFER_ARM64_32_BINARIES +#define PEXPERT_NO_3X_IMAGES 1 #endif /* ARM64_BOARD_CONFIG_T8006 */ #ifdef ARM64_BOARD_CONFIG_T8027 -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLEVORTEX -#define ARM_ARCH_TIMER -#define KERNEL_INTEGRITY_CTRR -#include -#define __ARM_L2CACHE_SIZE_LOG__ 23 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_T8027 -#define CPU_COUNT 8 -#define CPU_CLUSTER_OFFSETS {0, 4} -#define HAS_UNCORE_CTRS 1 -#define UNCORE_VERSION 2 -#define UNCORE_PER_CLUSTER 1 -#define UNCORE_NCTRS 16 -#define CORE_NCTRS 10 -#define PMAP_PV_LOAD_FACTOR 5 -#define PMAP_CS 1 -#define PMAP_CS_ENABLE 1 +#include + +#define MAX_L2_CLINE 7 +#define MAX_CPUS 8 +#define MAX_CPU_CLUSTERS 2 + +#define XNU_MONITOR 1 /* Secure pmap runtime */ +#define XNU_MONITOR_T8020_DART 1 /* T8020 DART plugin for secure pmap runtime */ +#define T8020_DART_ALLOW_BYPASS (1 << 1) /* DART allows translation bypass in certain cases */ +#define XNU_MONITOR_NVME_PPL 1 /* NVMe PPL plugin for secure pmap runtime */ +#define XNU_MONITOR_ANS2_SART 1 /* ANS2 SART plugin for secure pmap runtime */ +#define PMAP_CS 1 +#define PMAP_CS_ENABLE 1 #endif /* ARM64_BOARD_CONFIG_T8027 */ #ifdef ARM64_BOARD_CONFIG_T8028 -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLEVORTEX -#define ARM_ARCH_TIMER -#define KERNEL_INTEGRITY_CTRR -#include -#define __ARM_L2CACHE_SIZE_LOG__ 23 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_T8028 -#define CPU_COUNT 8 -#define CPU_CLUSTER_OFFSETS {0, 4} -#define HAS_UNCORE_CTRS 1 -#define UNCORE_VERSION 2 -#define UNCORE_PER_CLUSTER 1 -#define UNCORE_NCTRS 16 -#define CORE_NCTRS 10 -#define PMAP_PV_LOAD_FACTOR 5 -#define PMAP_CS 1 -#define PMAP_CS_ENABLE 1 +#include + +#define MAX_L2_CLINE 7 +#define MAX_CPUS 8 +#define MAX_CPU_CLUSTERS 2 + +#define XNU_MONITOR 1 /* Secure pmap runtime */ +#define XNU_MONITOR_T8020_DART 1 /* T8020 DART plugin for secure pmap runtime */ +#define T8020_DART_ALLOW_BYPASS (1 << 1) /* DART allows translation bypass in certain cases */ +#define XNU_MONITOR_NVME_PPL 1 /* NVMe PPL plugin for secure pmap runtime */ +#define XNU_MONITOR_ANS2_SART 1 /* ANS2 SART plugin for secure pmap runtime */ +#define PMAP_CS 1 +#define PMAP_CS_ENABLE 1 #endif /* ARM64_BOARD_CONFIG_T8028 */ #ifdef ARM64_BOARD_CONFIG_T8030 -/* - * The LLC size for Lightning is 8MB, but the LLC on Thunder is only 4MB. - * We use the larger cache size here. The expectation is - * that this may cause flushes from Tempest to be less efficient - * (cycles will be wasted on unnecessary way/set operations), but it - * will be technically correct... the best kind of correct. - */ -#define APPLE_ARM64_ARCH_FAMILY 1 -#define APPLELIGHTNING -#define ARM_ARCH_TIMER -#define KERNEL_INTEGRITY_CTRR -#include -#define __ARM_L2CACHE_SIZE_LOG__ 23 -#define ARM_BOARD_WFE_TIMEOUT_NS 1000 -#define ARM_BOARD_CLASS_T8030 -#define CPU_COUNT 6 -#define CPU_CLUSTER_OFFSETS {0, 4} -#define CPU_PIO_RO_CTL_OFFSETS {0x210055000, 0x210155000, 0x210255000, 0x210355000, 0x211055000, 0x211155000} -#define CLUSTER_PIO_RO_CTL_OFFSETS {0x210e49000, 0x211e49000} -#define HAS_UNCORE_CTRS 1 -#define UNCORE_VERSION 2 -#define UNCORE_PER_CLUSTER 1 -#define UNCORE_NCTRS 16 -#define CORE_NCTRS 10 -#define PMAP_PV_LOAD_FACTOR 7 -#define PMAP_CS 1 -#define PMAP_CS_ENABLE 1 +#include + +#define MAX_L2_CLINE 7 +#define MAX_CPUS 6 +#define MAX_CPU_CLUSTERS 2 + +#define XNU_MONITOR 1 /* Secure pmap runtime */ +#define XNU_MONITOR_T8020_DART 1 /* T8020 DART plugin for secure pmap runtime */ +#define T8020_DART_ALLOW_BYPASS (1 << 1) /* DART allows translation bypass in certain cases */ +#define XNU_MONITOR_NVME_PPL 1 /* NVMe PPL plugin for secure pmap runtime */ +#define XNU_MONITOR_ANS2_SART 1 /* ANS2 SART plugin for secure pmap runtime */ +#define XNU_MONITOR_UAT_PPL 1 /* UAT PPL plugin for secure pmap runtime */ +#define PMAP_CS 1 +#define PMAP_CS_ENABLE 1 #endif /* ARM64_BOARD_CONFIG_T8030 */ + + #ifdef ARM64_BOARD_CONFIG_BCM2837 -#define BCM2837 -#define BCM2837_BRINGUP -#define ARM_ARCH_TIMER #include -#define __ARM_L2CACHE_SIZE_LOG__ 19 -#define ARM_BOARD_CLASS_BCM2837 -#define CPU_COUNT 4 -#define CORE_NCTRS 8 /* Placeholder; KPC is not enabled for this target */ + +#define MAX_L2_CLINE 6 +#define MAX_CPUS 4 +#define MAX_CPU_CLUSTERS 1 + +#define CORE_NCTRS 8 /* Placeholder; KPC is not enabled for this target */ #endif /* ARM64_BOARD_CONFIG_BCM2837 */ +#ifndef HAS_UNCORE_CTRS +#undef UNCORE_VERSION +#undef UNCORE_PER_CLUSTER +#undef UNCORE_NCTRS +#endif + +#if MAX_CPU_CLUSTERS == 1 +#undef __ARM_AMP__ +#endif + +#ifndef MAX_CPU_CLUSTER_PHY_ID +#define MAX_CPU_CLUSTER_PHY_ID (MAX_CPU_CLUSTERS - 1) +#endif + +#ifdef PREFER_ARM64_32_BINARIES +#define PREFERRED_USER_CPU_TYPE CPU_TYPE_ARM64_32 +#define PREFERRED_USER_CPU_SUBTYPE CPU_SUBTYPE_ARM64_32_V8 +#endif + #endif /* ! _PEXPERT_ARM_BOARD_CONFIG_H */ diff --git a/pexpert/pexpert/arm64/hurricane.h b/pexpert/pexpert/arm64/hurricane.h deleted file mode 100644 index bf1b181d2..000000000 --- a/pexpert/pexpert/arm64/hurricane.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2014-2018 Apple Inc. All rights reserved. - */ - -#ifndef _PEXPERT_ARM_HURRICANE_H -#define _PEXPERT_ARM_HURRICANE_H - -#define NO_MONITOR 1 /* No EL3 for this CPU -- ever */ -#define HAS_MIGSTS 1 /* Has MIGSTS register, and supports migration between p-core and e-core */ -#define HAS_KTRR 1 /* Has KTRR registers */ -#define HAS_CPMU_L2C_EVENTS 1 /* Has L2 cache events in CPMU */ - -#ifdef APPLEHURRICANE -#include "arm64_common.h" -#endif - -/* - * A0 is variant 0, B0 is variant 1. See arm64/proc_reg.h - * for how these values are constructed from the MIDR. - */ -#define HURRICANE_CPU_VERSION_A0 0x00 -#define HURRICANE_CPU_VERSION_B0 0x10 - -// Hurricane and Zephyr require workaround for radar 20619637 -#define SINGLE_STEP_RETIRE_ERRATA 1 - -#endif /* ! _PEXPERT_ARM_HURRICANE_H */ diff --git a/pexpert/pexpert/arm64/spr_locks.h b/pexpert/pexpert/arm64/spr_locks.h index 5d42a95b0..341874d99 100644 --- a/pexpert/pexpert/arm64/spr_locks.h +++ b/pexpert/pexpert/arm64/spr_locks.h @@ -28,6 +28,7 @@ #ifndef _PEXPERT_ARM64_SPR_LOCKS_H #define _PEXPERT_ARM64_SPR_LOCKS_H +#define MSR_RO_CTL_HID1 (1ULL << 1) #define MSR_RO_CTL_HID4 (1ULL << 4) #define MSR_RO_CTL_CYC_OVRD (1ULL << 27) #define MSR_RO_CTL_ACC_OVRD (1ULL << 47) diff --git a/pexpert/pexpert/arm64/twister.h b/pexpert/pexpert/arm64/twister.h deleted file mode 100644 index 4fc2b8480..000000000 --- a/pexpert/pexpert/arm64/twister.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (c) 2014-2018 Apple Inc. All rights reserved. - */ - -#ifndef _PEXPERT_ARM_TWISTER_H -#define _PEXPERT_ARM_TWISTER_H - -#define MONITOR 1 /* Use EL3 monitor */ -#define NO_ECORE 1 -#define HAS_32BIT_DBGWRAP 1 -#define HAS_CPMU_L2C_EVENTS 1 /* Has L2 cache events in CPMU */ - -#ifdef APPLETWISTER -#include "arm64_common.h" -#endif - -#endif /* ! _PEXPERT_ARM_TWISTER_H */ diff --git a/pexpert/pexpert/arm64/typhoon.h b/pexpert/pexpert/arm64/typhoon.h deleted file mode 100644 index dba7d4362..000000000 --- a/pexpert/pexpert/arm64/typhoon.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright (c) 2012-2018 Apple Inc. All rights reserved. - */ - -#ifndef _PEXPERT_ARM_TYPHOON_H -#define _PEXPERT_ARM_TYPHOON_H - -#define MONITOR 1 /* Use EL3 monitor */ -#define NO_ECORE 1 -#define HAS_32BIT_DBGWRAP 1 -#define HAS_CPMU_BIU_EVENTS 1 /* Has BIU events in CPMU */ -#define HAS_CPMU_L2C_EVENTS 1 /* Has L2 cache events in CPMU */ - -#ifdef APPLETYPHOON -#include "arm64_common.h" -#endif - -#endif /* ! _PEXPERT_ARM_TYPHOON_H */ diff --git a/pexpert/pexpert/device_tree.h b/pexpert/pexpert/device_tree.h index 427f3a12c..d3a18e529 100644 --- a/pexpert/pexpert/device_tree.h +++ b/pexpert/pexpert/device_tree.h @@ -28,6 +28,11 @@ #ifndef _PEXPERT_DEVICE_TREE_H_ #define _PEXPERT_DEVICE_TREE_H_ +#include + +#include +#include + #include #ifdef __APPLE_API_PRIVATE @@ -82,7 +87,7 @@ typedef struct OpaqueDTEntry { // DeviceTreeNode children[]; // array size == nChildren } DeviceTreeNode; -typedef DeviceTreeNode *RealDTEntry; +typedef const DeviceTreeNode *RealDTEntry; typedef struct DTSavedScope { struct DTSavedScope * nextScope; @@ -103,12 +108,12 @@ typedef struct OpaqueDTEntryIterator { /* Property Iterator*/ typedef struct OpaqueDTPropertyIterator { RealDTEntry entry; - DeviceTreeNodeProperty *currentProperty; + DeviceTreeNodeProperty const *currentProperty; unsigned long currentIndex; } OpaqueDTPropertyIterator, *DTPropertyIterator; /* Entry*/ -typedef struct OpaqueDTEntry* DTEntry; +typedef const struct OpaqueDTEntry* DTEntry; /* Entry Iterator*/ typedef struct OpaqueDTEntryIterator* DTEntryIterator; @@ -134,7 +139,12 @@ enum { /* Used to initalize the device tree functions. */ /* base is the base address of the flatened device tree */ -void DTInit(void *base); +extern void SecureDTInit(void const *base, size_t size); + +/* Whether the device tree is locked down after machine lockdown. */ +/* Returns false if there is no meaningful distinction, in */ +/* contrast to SecureDTFindEntry. */ +extern bool SecureDTIsLockedDown(void); /* * ------------------------------------------------------------------------------- @@ -142,7 +152,7 @@ void DTInit(void *base); * ------------------------------------------------------------------------------- */ /* Compare two Entry's for equality. */ -extern int DTEntryIsEqual(const DTEntry ref1, const DTEntry ref2); +extern int SecureDTEntryIsEqual(const DTEntry ref1, const DTEntry ref2); /* * ------------------------------------------------------------------------------- @@ -150,22 +160,24 @@ extern int DTEntryIsEqual(const DTEntry ref1, const DTEntry ref2); * ------------------------------------------------------------------------------- */ /* - * DTFindEntry: + * Find Entry * Find the device tree entry that contains propName=propValue. * It currently searches the entire * tree. This function should eventually go in DeviceTree.c. * Returns: kSuccess = entry was found. Entry is in entryH. * kError = entry was not found */ -extern int DTFindEntry(const char *propName, const char *propValue, DTEntry *entryH); +extern int SecureDTFindEntry(const char *propName, const char *propValue, DTEntry *entryH); /* * Lookup Entry * Locates an entry given a specified subroot (searchPoint) and path name. If the * searchPoint pointer is NULL, the path name is assumed to be an absolute path * name rooted to the root of the device tree. + * Returns: kSuccess = entry was found. Entry is in foundEntry. + * kError = entry was not found */ -extern int DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEntry); +extern int SecureDTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEntry); /* * ------------------------------------------------------------------------------- @@ -186,7 +198,7 @@ extern int DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntr * currentScope are set to the root entry. The currentPosition for the iterator is * set to "nil". */ -extern int DTInitEntryIterator(const DTEntry startEntry, DTEntryIterator iter); +extern int SecureDTInitEntryIterator(const DTEntry startEntry, DTEntryIterator iter); /* * Enter Child Entry @@ -195,7 +207,7 @@ extern int DTInitEntryIterator(const DTEntry startEntry, DTEntryIterator iter); * "childEntry" is nil, the currentScope is set to the entry specified by the * currentPosition of the iterator. */ -extern int DTEnterEntry(DTEntryIterator iterator, DTEntry childEntry); +extern int SecureDTEnterEntry(DTEntryIterator iterator, DTEntry childEntry); /* * Exit to Parent Entry @@ -204,7 +216,7 @@ extern int DTEnterEntry(DTEntryIterator iterator, DTEntry childEntry); * previous currentScope), so the next iteration call will continue where it left off. * This position is returned in parameter "currentPosition". */ -extern int DTExitEntry(DTEntryIterator iterator, DTEntry *currentPosition); +extern int SecureDTExitEntry(DTEntryIterator iterator, DTEntry *currentPosition); /* * Iterate Entries @@ -213,7 +225,7 @@ extern int DTExitEntry(DTEntryIterator iterator, DTEntry *currentPosition); * int == kIterationDone, all entries have been exhausted, and the * value of nextEntry will be Nil. */ -extern int DTIterateEntries(DTEntryIterator iterator, DTEntry *nextEntry); +extern int SecureDTIterateEntries(DTEntryIterator iterator, DTEntry *nextEntry); /* * Restart Entry Iteration @@ -222,7 +234,7 @@ extern int DTIterateEntries(DTEntryIterator iterator, DTEntry *nextEntry); * outermostScope and currentScope of the iterator are unchanged. The currentPosition * for the iterator is set to "nil". */ -extern int DTRestartEntryIteration(DTEntryIterator iterator); +extern int SecureDTRestartEntryIteration(DTEntryIterator iterator); /* * ------------------------------------------------------------------------------- @@ -234,7 +246,16 @@ extern int DTRestartEntryIteration(DTEntryIterator iterator); * * Get Property */ -extern int DTGetProperty(const DTEntry entry, const char *propertyName, void **propertyValue, unsigned int *propertySize); +extern int SecureDTGetProperty(const DTEntry entry, const char *propertyName, + void const **propertyValue, unsigned int *propertySize); + +#if defined(__i386__) || defined(__x86_64__) +// x86 processes device tree fragments outside the normal DT region in +// hibernation. This would not work on ARM. +extern int SecureDTGetPropertyRegion(const DTEntry entry, const char *propertyName, + void const **propertyValue, unsigned int *propertySize, + vm_offset_t const region_start, vm_size_t region_size); +#endif /* * ------------------------------------------------------------------------------- @@ -245,7 +266,7 @@ extern int DTGetProperty(const DTEntry entry, const char *propertyName, void **p * Initialize Property Iterator * Fill out the property iterator structure. The target entry is defined by entry. */ -extern int DTInitPropertyIterator(const DTEntry entry, DTPropertyIterator iter); +extern int SecureDTInitPropertyIterator(const DTEntry entry, DTPropertyIterator iter); /* * Iterate Properites @@ -253,8 +274,8 @@ extern int DTInitPropertyIterator(const DTEntry entry, DTPropertyIterator iter); * When int == kIterationDone, all properties have been exhausted. */ -extern int DTIterateProperties(DTPropertyIterator iterator, - char **foundProperty); +extern int SecureDTIterateProperties(DTPropertyIterator iterator, + char const **foundProperty); /* * Restart Property Iteration @@ -262,7 +283,7 @@ extern int DTIterateProperties(DTPropertyIterator iterator, * reset to the beginning of the list of properties for an entry. */ -extern int DTRestartPropertyIteration(DTPropertyIterator iterator); +extern int SecureDTRestartPropertyIteration(DTPropertyIterator iterator); #ifdef __cplusplus } diff --git a/pexpert/pexpert/i386/boot.h b/pexpert/pexpert/i386/boot.h index 0476e8d33..de2f95e8d 100644 --- a/pexpert/pexpert/i386/boot.h +++ b/pexpert/pexpert/i386/boot.h @@ -123,6 +123,8 @@ typedef struct boot_icon_element boot_icon_element; * "Revision" can be incremented for compatible changes */ #define kBootArgsRevision 0 +#define kBootArgsRevision0 kBootArgsRevision +#define kBootArgsRevision1 1 /* added KC_hdrs_addr */ #define kBootArgsVersion 2 /* Snapshot constants of previous revisions that are supported */ @@ -143,6 +145,7 @@ typedef struct boot_icon_element boot_icon_element; #define kBootArgsFlagBlackBg (1 << 6) #define kBootArgsFlagLoginUI (1 << 7) #define kBootArgsFlagInstallUI (1 << 8) +#define kBootArgsFlagRecoveryBoot (1 << 10) typedef struct boot_args { uint16_t Revision; /* Revision of boot_args structure */ @@ -196,7 +199,17 @@ typedef struct boot_args { uint32_t apfsDataStart;/* Physical address of apfs volume key structure */ uint32_t apfsDataSize; - uint32_t __reserved4[710]; + /* Version 2, Revision 1 */ + uint64_t KC_hdrs_vaddr; + + uint64_t arvRootHashStart; /* Physical address of root hash file */ + uint64_t arvRootHashSize; + + uint64_t arvManifestStart; /* Physical address of manifest file */ + uint64_t arvManifestSize; + + /* Reserved */ + uint32_t __reserved4[700]; } boot_args; extern char assert_boot_args_size_is_4096[sizeof(boot_args) == 4096 ? 1 : -1]; diff --git a/pexpert/pexpert/i386/efi.h b/pexpert/pexpert/i386/efi.h index 3ab0f1f38..4ca27dd0e 100644 --- a/pexpert/pexpert/i386/efi.h +++ b/pexpert/pexpert/i386/efi.h @@ -550,4 +550,8 @@ typedef struct EFI_SYSTEM_TABLE_64 { EFI_PTR64 ConfigurationTable; } __attribute__((aligned(8))) EFI_SYSTEM_TABLE_64; +#if defined(XNU_KERNEL_PRIVATE) +extern uint64_t efi_get_rsdp_physaddr(void); +#endif + #endif /* _PEXPERT_I386_EFI_H */ diff --git a/pexpert/pexpert/i386/protos.h b/pexpert/pexpert/i386/protos.h index fb4c5f3dc..e4d591462 100644 --- a/pexpert/pexpert/i386/protos.h +++ b/pexpert/pexpert/i386/protos.h @@ -86,6 +86,7 @@ int serial_getc(void); * from osfmk/kern/misc_protos.h */ void cnputc(char); +void cnputc_unbuffered(char); int cngetc(void); #endif /* _PEXPERT_I386_PROTOS_H */ diff --git a/pexpert/pexpert/machine/boot.h b/pexpert/pexpert/machine/boot.h index 4d5e34950..565ec5b22 100644 --- a/pexpert/pexpert/machine/boot.h +++ b/pexpert/pexpert/machine/boot.h @@ -31,9 +31,15 @@ #if defined (__i386__) || defined(__x86_64__) #include "pexpert/i386/boot.h" #elif defined (__arm64__) +#ifdef PRIVATE +/* pexpert/arm64/boot.h isn't installed into the public SDK. */ #include "pexpert/arm64/boot.h" +#endif /* PRIVATE */ #elif defined (__arm__) +#ifdef PRIVATE +/* pexpert/arm/boot.h isn't installed into the public SDK. */ #include "pexpert/arm/boot.h" +#endif /* PRIVATE */ #else #error architecture not supported #endif diff --git a/pexpert/pexpert/machine/protos.h b/pexpert/pexpert/machine/protos.h index d0aaa6887..184c2d0fd 100644 --- a/pexpert/pexpert/machine/protos.h +++ b/pexpert/pexpert/machine/protos.h @@ -31,7 +31,10 @@ #if defined (__i386__) || defined(__x86_64__) #include "pexpert/i386/protos.h" #elif defined (__arm__) || defined (__arm64__) +#ifdef PRIVATE +/* pexpert/arm/protos.h isn't installed into the public SDK. */ #include "pexpert/arm/protos.h" +#endif /* PRIVATE */ #else #error architecture not supported #endif diff --git a/pexpert/pexpert/pexpert.h b/pexpert/pexpert/pexpert.h index c721ce842..e6fd6d56b 100644 --- a/pexpert/pexpert/pexpert.h +++ b/pexpert/pexpert/pexpert.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2009 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -35,6 +35,10 @@ #include #endif +#if XNU_KERNEL_PRIVATE +#include +#endif + __BEGIN_DECLS #include #include @@ -52,7 +56,7 @@ typedef void *cpu_id_t; #endif #if XNU_KERNEL_PRIVATE -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) extern struct embedded_panic_header *panic_info; extern vm_offset_t gPanicBase; extern unsigned int gPanicSize; @@ -65,9 +69,9 @@ void PE_save_buffer_to_vram( unsigned char *, unsigned int *); -#else /* CONFIG_EMBEDDED */ +#else /* defined(__arm__) || defined(__arm64__) */ extern struct macos_panic_header *panic_info; -#endif /* CONFIG_EMBEDDED */ +#endif /* defined(__arm__) || defined(__arm64__) */ #endif /* XNU_KERNEL_PRIVATE */ extern void lpss_uart_enable(boolean_t on_off); @@ -111,6 +115,10 @@ void PE_init_panicheader( void PE_update_panicheader_nestedpanic( void); +/* Invokes AppleARMIO::handlePlatformError() if present */ +bool PE_handle_platform_error( + vm_offset_t far); + #if KERNEL_PRIVATE /* @@ -128,9 +136,6 @@ extern uint32_t PE_i_can_has_kernel_configuration(void); #endif /* KERNEL_PRIVATE */ -void PE_init_kprintf( - boolean_t vm_initialized); - extern int32_t gPESerialBaud; extern uint8_t gPlatformECID[8]; @@ -146,9 +151,29 @@ void PE_init_printf( extern void (*PE_putc)(char c); +/* + * Perform pre-lockdown IOKit initialization. + * This is guaranteed to execute prior to machine_lockdown(). + * The precise operations performed by this function depend upon + * the security model employed by the platform, but in general this + * function should be expected to at least perform basic C++ runtime + * and I/O registry initialization. + */ void PE_init_iokit( void); +/* + * Perform post-lockdown IOKit initialization. + * This is guaranteed to execute after machine_lockdown(). + * The precise operations performed by this function depend upon + * the security model employed by the platform. For example, if + * the platform treats machine_lockdown() as a strict security + * checkpoint, general-purpose IOKit matching may not begin until + * this function is called. + */ +void PE_lockdown_iokit( + void); + struct clock_frequency_info_t { unsigned long bus_clock_rate_hz; unsigned long cpu_clock_rate_hz; @@ -224,18 +249,11 @@ enum { kPEReadTOD, kPEWriteTOD }; -extern int (*PE_read_write_time_of_day)( - unsigned int options, - long * secs); enum { kPEWaitForInput = 0x00000001, kPERawInput = 0x00000002 }; -extern int (*PE_write_IIC)( - unsigned char addr, - unsigned char reg, - unsigned char data); /* Private Stuff - eventually put in pexpertprivate.h */ enum { @@ -305,6 +323,7 @@ typedef struct PE_state { PE_Video video; void *deviceTreeHead; void *bootArgs; + vm_size_t deviceTreeSize; } PE_state_t; extern PE_state_t PE_state; @@ -353,6 +372,9 @@ extern kern_return_t PE_cpu_start( extern void PE_cpu_halt( cpu_id_t target); +extern bool PE_cpu_down( + cpu_id_t target); + extern void PE_cpu_signal( cpu_id_t source, cpu_id_t target); @@ -380,6 +402,8 @@ extern void PE_panic_hook(const char *str); extern void PE_init_cpu(void); +extern void PE_handle_ext_interrupt(void); + #if defined(__arm__) || defined(__arm64__) typedef void (*perfmon_interrupt_handler_func)(cpu_id_t source); extern kern_return_t PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler); @@ -393,6 +417,50 @@ extern void(*const PE_arm_debug_panic_hook)(const char *str); #endif #endif + +typedef enum kc_kind { + KCKindNone = -1, + KCKindUnknown = 0, + KCKindPrimary = 1, + KCKindPageable = 2, + KCKindAuxiliary = 3, + KCNumKinds = 4, +} kc_kind_t; + +typedef enum kc_format { + KCFormatUnknown = 0, + KCFormatStatic = 1, + KCFormatDynamic = 2, + KCFormatFileset = 3, + KCFormatKCGEN = 4, +} kc_format_t; + +#if XNU_KERNEL_PRIVATE +/* set the mach-o header for a given KC type */ +extern void PE_set_kc_header(kc_kind_t type, kernel_mach_header_t *header, uintptr_t slide); +void PE_reset_kc_header(kc_kind_t type); +/* set both lowest VA (base) and mach-o header for a given KC type */ +extern void PE_set_kc_header_and_base(kc_kind_t type, kernel_mach_header_t *header, void *base, uintptr_t slide); +/* The highest non-LINKEDIT virtual address */ +extern vm_offset_t kc_highest_nonlinkedit_vmaddr; +#endif +/* returns a pointer to the mach-o header for a give KC type, returns NULL if nothing's been set */ +extern void *PE_get_kc_header(kc_kind_t type); +/* returns a pointer to the lowest VA of of the KC of the given type */ +extern void *PE_get_kc_baseaddress(kc_kind_t type); +/* returns an array of length KCNumKinds of the lowest VAs of each KC type - members could be NULL */ +extern const void * const*PE_get_kc_base_pointers(void); +/* returns the slide for the kext collection */ +extern uintptr_t PE_get_kc_slide(kc_kind_t type); +/* quickly accesss the format of the primary kc */ +extern bool PE_get_primary_kc_format(kc_format_t *type); +/* set vnode ptr for kc fileset */ +extern void PE_set_kc_vp(kc_kind_t type, void *vp); +/* quickly set vnode ptr for kc fileset */ +void * PE_get_kc_vp(kc_kind_t type); +/* drop reference to kc fileset vnodes */ +void PE_reset_all_kc_vp(void); + #if KERNEL_PRIVATE #if defined(__arm64__) extern uint8_t PE_smc_stashed_x86_power_state; diff --git a/pexpert/pexpert/protos.h b/pexpert/pexpert/protos.h index 15bea419a..a385c2e84 100644 --- a/pexpert/pexpert/protos.h +++ b/pexpert/pexpert/protos.h @@ -42,7 +42,7 @@ //------------------------------------------------------------------------ // from ppc/misc_protos.h -extern void printf(const char *fmt, ...); +extern void printf(const char *fmt, ...) __printflike(1, 2); extern void interrupt_enable(void); extern void interrupt_disable(void); @@ -85,7 +85,9 @@ void Debugger(const char *message); //------------------------------------------------------------------------ // from iokit/IOStartIOKit.cpp -extern void StartIOKit( void * p1, void * p2, void * p3, void * p4); +extern void InitIOKit(void *dtTop); +extern void ConfigureIOKit(void); +extern void StartIOKitMatching(void); // from iokit/Families/IOFramebuffer.cpp extern unsigned char appleClut8[256 * 3]; diff --git a/san/Makefile b/san/Makefile index 816390994..c22e3a9d1 100644 --- a/san/Makefile +++ b/san/Makefile @@ -33,7 +33,7 @@ COMP_SUBDIRS = conf .DELETE_ON_ERROR: $(OBJROOT)/san/kasan-blacklist-%: $(SOURCE)/kasan-blacklist $(SOURCE)/ubsan-blacklist $(SOURCE)/kasan-blacklist-% - $(call makelog,$(ColorH)GENERATING$(Color0) $(ColorLF)$(notdir $@)$(Color0)) + @$(LOG_GENERATE) "$(notdir $@)" $(_v)sed -e 's,^src:\./,src:'"$(SRCROOT)/," $^ > $@ $(_v)$(SOURCE)/tools/validate_blacklist.sh "$@" @@ -49,33 +49,42 @@ SYMROOT_KEXT_PATH = $(addprefix $(SYMROOT),/System.kext/PlugIns/Kasan.kext) ifneq ($(INSTALL_KASAN_ONLY),1) DSTROOT_KEXT = $(DSTROOT_KEXT_PATH)/Kasan SYMROOT_KEXT = $(SYMROOT_KEXT_PATH)/Kasan +SYMBOL_SET_BUILD = $(OBJPATH)/Kasan.symbolset endif ifeq ($(KASAN),1) DSTROOT_KEXT += $(DSTROOT_KEXT_PATH)/Kasan_kasan SYMROOT_KEXT += $(SYMROOT_KEXT_PATH)/Kasan_kasan +SYMBOL_SET_BUILD += $(OBJPATH)/Kasan_kasan.symbolset endif # Our external dependency on allsymbols is fine because this runs in a later phase (config_install vs. config_all) $(OBJPATH)/%.symbolset: $(SOURCE)/%.exports - $(call makelog,$(ColorH)SYMBOLSET$(Color0) $(ColorF)$*$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") + @$(LOG_SYMBOLSET) "$*$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" $(_v)$(KEXT_CREATE_SYMBOL_SET) \ $(ARCH_FLAGS_$(CURRENT_ARCH_CONFIG)) \ -import $(OBJPATH)/allsymbols \ -export $< \ -output $@ $(_vstdout) -$(DSTROOT_KEXT): $(DSTROOT_KEXT_PATH)/% : $(OBJPATH)/%.symbolset +$(DSTROOT_KEXT): $(DSTROOT_KEXT_PATH)/% : $(SYMROOT_KEXT_PATH)/% ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorF)INSTALL$(Color0) $(ColorF)$(notdir $@)$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") + @$(LOG_INSTALL) "$(@F)$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" $(_v)$(INSTALL) $(EXEC_INSTALL_FLAGS) $< $@ -$(SYMROOT_KEXT): $(SYMROOT_KEXT_PATH)/% : $(DSTROOT_KEXT_PATH)/% +$(SYMROOT_KEXT): $(SYMBOL_SET_BUILD) ALWAYS $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorF)INSTALL$(Color0) $(ColorF)$(notdir $@)$(Color0) "($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))") - $(_v)$(INSTALL) $(EXEC_INSTALL_FLAGS) $< $@ + @$(LOG_INSTALL) "$(@F)$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))" + $(_v)if [ $(OBJROOT)/.symbolset.timestamp -nt $@ ]; then \ + $(INSTALL) $(EXEC_INSTALL_FLAGS) $(OBJPATH)/$(@F).symbolset $@; \ + cmdstatus=$$?; \ + else \ + $(LIPO) -create $@ $(OBJPATH)/$(@F).symbolset -output $@ 2>/dev/null || true; \ + cmdstatus=$$?; \ + fi; \ + exit $$cmdstatus -do_config_install:: $(DSTROOT_KEXT) $(SYMROOT_KEXT) +do_config_install:: $(SYMROOT_KEXT) $(DSTROOT_KEXT) # Install helper scripts @@ -86,7 +95,7 @@ endif $(KASAN_HELPER_SCRIPTS): $(DSTROOT)/$(DEVELOPER_EXTRAS_DIR)/% : $(SOURCE)/tools/% $(_v)$(MKDIR) $(dir $@) - $(call makelog,$(ColorH)INSTALL$(Color0) $(ColorF)$(@F)$(Color0)) + @$(LOG_INSTALL) "$(@F)" $(_v)$(INSTALL) $(EXEC_INSTALL_FLAGS) $< $@ do_config_install:: $(KASAN_HELPER_SCRIPTS) diff --git a/san/conf/Makefile b/san/conf/Makefile index 05c4b79cf..51eddb889 100644 --- a/san/conf/Makefile +++ b/san/conf/Makefile @@ -23,7 +23,7 @@ endif $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile: $(SRCROOT)/SETUP/config/doconf $(OBJROOT)/SETUP/config $(DOCONFDEPS) $(_v)$(MKDIR) $(TARGET)/$(CURRENT_KERNEL_CONFIG) - $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) + $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -platform $(PLATFORM) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) do_all: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile $(_v)${MAKE} \ diff --git a/san/conf/Makefile.template b/san/conf/Makefile.template index 42c6ee1ed..6c765fa40 100644 --- a/san/conf/Makefile.template +++ b/san/conf/Makefile.template @@ -63,13 +63,13 @@ $(SOBJS): .SFLAGS $(_v)$(REPLACECONTENTS) $@ $(KASAN) $(COMPONENT).filelist: $(OBJS) .KASANFLAGS - $(call makelog,$(ColorL)LDFILELIST$(Color0) $(ColorLF)$(COMPONENT)$(Color0)) + @$(LOG_LDFILELIST) "$(COMPONENT)" $(_v)for obj in ${OBJS}; do \ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \ done > $(COMPONENT).filelist $(TARGET)/$(CURRENT_KERNEL_CONFIG)/kasan_blacklist_dynamic.h: $(SRCROOT)/$(COMPONENT)/kasan-blacklist-dynamic - $(call makelog,$(ColorH)GENERATING$(Color0) $(ColorLF)$(notdir $@)$(Color0)) + @$(LOG_GENERATE) "$(notdir $@)" @$(SRCROOT)/$(COMPONENT)/tools/generate_dynamic_blacklist.py "$<" > "$@" $(SRCROOT)/$(COMPONENT)/kasan_dynamic_blacklist.c: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/kasan_blacklist_dynamic.h diff --git a/san/kasan-arm64.c b/san/kasan-arm64.c index 7fa3a8e56..20a815c61 100644 --- a/san/kasan-arm64.c +++ b/san/kasan-arm64.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Apple Inc. All rights reserved. + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -44,6 +44,7 @@ #include #include +#include #include #include @@ -60,7 +61,12 @@ vm_offset_t physmap_vtop; vm_offset_t shadow_pbase; vm_offset_t shadow_ptop; +#if HIBERNATION +// if we're building a kernel with hibernation support, hibernate_write_image depends on this symbol +vm_offset_t shadow_pnext; +#else static vm_offset_t shadow_pnext; +#endif static vm_offset_t zero_page_phys; static vm_offset_t bootstrap_pgtable_phys; @@ -70,14 +76,20 @@ extern vm_offset_t excepstack, excepstack_top; void kasan_bootstrap(boot_args *, vm_offset_t pgtable); -#define KASAN_SHIFT_ARM64 0xe000000000000000ULL /* Defined in makedefs/MakeInc.def */ +#define KASAN_OFFSET_ARM64 0xe000000000000000ULL /* Defined in makedefs/MakeInc.def */ + +#if defined(ARM_LARGE_MEMORY) +#define KASAN_SHADOW_MIN (VM_MAX_KERNEL_ADDRESS+1) +#define KASAN_SHADOW_MAX 0xffffffffffffffffULL +#else #define KASAN_SHADOW_MIN 0xfffffffc00000000ULL #define KASAN_SHADOW_MAX 0xffffffff80000000ULL +#endif -_Static_assert(KASAN_SHIFT == KASAN_SHIFT_ARM64, "KASan inconsistent shadow shift"); +_Static_assert(KASAN_OFFSET == KASAN_OFFSET_ARM64, "KASan inconsistent shadow offset"); _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM"); -_Static_assert((VM_MIN_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM"); -_Static_assert((VM_MAX_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM"); +_Static_assert((VM_MIN_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM"); +_Static_assert((VM_MAX_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM"); static uintptr_t alloc_page(void) @@ -263,7 +275,7 @@ void kasan_arch_init(void) { /* Map the physical aperture */ - kasan_map_shadow(kernel_vtop, physmap_vtop - kernel_vtop, true); + kasan_map_shadow(physmap_vbase, physmap_vtop - physmap_vbase, true); #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */ @@ -296,7 +308,7 @@ kasan_bootstrap(boot_args *args, vm_offset_t pgtable) shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK); shadow_ptop = shadow_pbase + tosteal; shadow_pnext = shadow_pbase; - shadow_pages_total = (long)((shadow_ptop - shadow_pbase) / ARM_PGBYTES); + shadow_pages_total = (uint32_t)((shadow_ptop - shadow_pbase) / ARM_PGBYTES); /* Set aside a page of zeros we can use for dummy shadow mappings */ zero_page_phys = alloc_page(); @@ -316,6 +328,10 @@ kasan_bootstrap(boot_args *args, vm_offset_t pgtable) kasan_map_shadow_early(intstack_virt, intstack_size, false); kasan_map_shadow_early(excepstack_virt, excepstack_size, false); + + if ((vm_offset_t)args->deviceTreeP - p2v < (vm_offset_t)&_mh_execute_header) { + kasan_map_shadow_early((vm_offset_t)args->deviceTreeP, args->deviceTreeLength, false); + } } bool diff --git a/san/kasan-blacklist b/san/kasan-blacklist index 38df385ad..00c42c778 100644 --- a/san/kasan-blacklist +++ b/san/kasan-blacklist @@ -17,8 +17,17 @@ src:./osfmk/kern/debug.c # Exclude linker sets type:struct linker_set_entry type:linker_set_entry +type:struct startup_entry +type:startup_entry -# Exclude KASAN itself +# Exclude per-cpu data structures the assembly accesses +type:vm_statistics64 +type:struct vm_statistics64 +type:vm_statistics64_data_t +type:struct processor +type:processor + +# Exclude KASan itself src:./san/kasan.c src:./san/kasan-fakestack.c src:./san/kasan-x86_64.c diff --git a/san/kasan-blacklist-arm64 b/san/kasan-blacklist-arm64 index 6f1fe4f8b..fae435c3c 100644 --- a/san/kasan-blacklist-arm64 +++ b/san/kasan-blacklist-arm64 @@ -4,6 +4,7 @@ # Exclude KASan runtime src:./san/kasan-arm64.c +src:./osfmk/arm/machine_routines_common.c # These use a local variable to work out which stack we're on, but can end up with # a fakestack allocation. diff --git a/san/kasan-blacklist-x86_64 b/san/kasan-blacklist-x86_64 index 69a8dc15f..fbdbbf414 100644 --- a/san/kasan-blacklist-x86_64 +++ b/san/kasan-blacklist-x86_64 @@ -5,8 +5,8 @@ # Early boot AUTOGEN src:./bsd/kern/kdebug.c src:./bsd/kern/kern_csr.c -src:./osfmk/corecrypto/cc/src/cc_clear.c -src:./osfmk/corecrypto/ccdbrg/src/ccdrbg_nisthmac.c +src:./osfmk/corecrypto/cc_clear.c +src:./osfmk/corecrypto/ccdrbg_nisthmac.c src:./osfmk/device/subrs.c src:./osfmk/i386/Diagnostics.c src:./osfmk/i386/acpi.c @@ -31,6 +31,8 @@ src:./pexpert/i386/pe_bootargs.c src:./pexpert/i386/pe_identify_machine.c src:./pexpert/i386/pe_init.c src:./pexpert/i386/pe_serial.c +# added by hand +src:./osfmk/mach/dyld_kernel_fixups.h # Nothing below is needed before kasan init, so most of it should go away. src:./osfmk/corecrypto/*.c @@ -64,9 +66,15 @@ src:./osfmk/i386/mp_desc.c src:./osfmk/i386/pmap_common.c src:./osfmk/i386/pmap_x86_common.c src:./osfmk/i386/pmCPU.c -src:./osfmk/i386/startup64.c src:./osfmk/i386/lapic_native.c src:./osfmk/vm/vm_compressor.c fun:doublemap_init fun:getsegbynamefromheader fun:getsectbynamefromheader + +src:./osfmk/i386/*.h +src:./EXTERNAL_HEADERS/corecrypto/*.h +src:./osfmk/corecrypto/*.h +src:./osfmk/kern/queue.h +src:*/libkern/libkern/*.h + diff --git a/san/kasan-fakestack.c b/san/kasan-fakestack.c index add9941a9..9f45135d6 100644 --- a/san/kasan-fakestack.c +++ b/san/kasan-fakestack.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Apple Inc. All rights reserved. + * Copyright (c) 2016-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -189,7 +189,7 @@ kasan_fakestack_alloc(int sz_class, size_t realsz) return 0; } - ret = (uptr)zget(zone); + ret = (uptr)zalloc_noblock(zone); if (ret) { size_t leftrz = 32 + FAKESTACK_HEADER_SZ; @@ -272,7 +272,6 @@ kasan_init_fakestack(void) { /* allocate the fakestack zones */ for (int i = 0; i < FAKESTACK_NUM_SZCLASS; i++) { - zone_t z; unsigned long sz = (fakestack_min << i) + FAKESTACK_HEADER_SZ; size_t maxsz = 256UL * 1024; @@ -282,15 +281,12 @@ kasan_init_fakestack(void) } snprintf(fakestack_names[i], 16, "fakestack.%d", i); - z = zinit(sz, maxsz, sz, fakestack_names[i]); - assert(z); - zone_change(z, Z_NOCALLOUT, TRUE); - zone_change(z, Z_EXHAUST, TRUE); - zone_change(z, Z_EXPAND, FALSE); - zone_change(z, Z_COLLECT, FALSE); - zone_change(z, Z_KASAN_QUARANTINE, FALSE); - zfill(z, maxsz / sz); - fakestack_zones[i] = z; + fakestack_zones[i] = zone_create_ext(fakestack_names[i], sz, + ZC_NOCALLOUT | ZC_NOGC | ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE, + ZONE_ID_ANY, ^(zone_t z) { + zone_set_exhaustible(z, maxsz); + }); + zfill(fakestack_zones[i], (int)maxsz / sz); } /* globally enable */ diff --git a/san/kasan-test.c b/san/kasan-test.c index e64c69d84..66c16c391 100644 --- a/san/kasan-test.c +++ b/san/kasan-test.c @@ -189,7 +189,7 @@ test_stack_overflow(struct kasan_test *t) { TEST_START(t); - int i; + uint8_t i; volatile uint8_t a[STACK_ARRAY_SZ]; for (i = 0; i < STACK_ARRAY_SZ; i++) { diff --git a/san/kasan-x86_64.c b/san/kasan-x86_64.c index 9f266870a..a1aa32c48 100644 --- a/san/kasan-x86_64.c +++ b/san/kasan-x86_64.c @@ -177,11 +177,11 @@ kasan_map_shadow_superpage_zero(vm_offset_t address, vm_size_t size) void kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero) { - size = (size + 0x7UL) & ~0x7UL; + size = kasan_granule_round(size); vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), PAGE_MASK); vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), PAGE_MASK); - assert((size & 0x7) == 0); + assert(kasan_granule_partial(size) == 0); for (; shadow_base < shadow_top; shadow_base += I386_PGBYTES) { split_addr_t addr = split_address(shadow_base); @@ -290,7 +290,7 @@ kasan_reserve_memory(void *_args) total_pages += mptr_tmp->NumberOfPages; } - to_steal = (total_pages * STOLEN_MEM_PERCENT) / 100 + (STOLEN_MEM_BYTES / I386_PGBYTES); + to_steal = (unsigned long)(total_pages * STOLEN_MEM_PERCENT) / 100 + (STOLEN_MEM_BYTES / I386_PGBYTES); /* Search for a range large enough to steal from */ for (i = 0, mptr_tmp = mptr; i < mcount; i++, mptr_tmp = (EfiMemoryRange *)(((vm_offset_t)mptr_tmp) + msize)) { @@ -305,7 +305,7 @@ kasan_reserve_memory(void *_args) shadow_pbase = mptr_tmp->PhysicalStart + (mptr_tmp->NumberOfPages << I386_PGSHIFT); shadow_ptop = shadow_pbase + (to_steal << I386_PGSHIFT); shadow_pnext = shadow_pbase; - shadow_pages_total = to_steal; + shadow_pages_total = (unsigned int)to_steal; shadow_stolen_idx = i; /* Set aside a page of zeros we can use for dummy shadow mappings */ diff --git a/san/kasan.c b/san/kasan.c index d66ac2f64..39f046220 100644 --- a/san/kasan.c +++ b/san/kasan.c @@ -37,8 +37,6 @@ #include #include #include -#include -#include #include #include #include @@ -56,7 +54,7 @@ #include #include -const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_SHIFT; +const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_OFFSET; static unsigned kexts_loaded; unsigned shadow_pages_total; @@ -146,7 +144,7 @@ kasan_poison_active(uint8_t flags) return kasan_check_enabled(TYPE_POISON_HEAP); default: return true; - }; + } } /* @@ -156,24 +154,22 @@ void NOINLINE kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz, uint8_t flags) { uint8_t *shadow = SHADOW_FOR_ADDRESS(base); - uint8_t partial = size & 0x07; + uint8_t partial = (uint8_t)kasan_granule_partial(size); vm_size_t total = leftrz + size + rightrz; vm_size_t i = 0; - /* base must be 8-byte aligned */ - /* any left redzone must be a multiple of 8 */ - /* total region must cover 8-byte multiple */ - assert((base & 0x07) == 0); - assert((leftrz & 0x07) == 0); - assert((total & 0x07) == 0); + /* ensure base, leftrz and total allocation size are granule-aligned */ + assert(kasan_granule_partial(base) == 0); + assert(kasan_granule_partial(leftrz) == 0); + assert(kasan_granule_partial(total) == 0); if (!kasan_enabled || !kasan_poison_active(flags)) { return; } - leftrz /= 8; - size /= 8; - total /= 8; + leftrz >>= KASAN_SCALE; + size >>= KASAN_SCALE; + total >>= KASAN_SCALE; uint8_t l_flags = flags; uint8_t r_flags = flags; @@ -207,10 +203,8 @@ kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t right void kasan_poison_range(vm_offset_t base, vm_size_t size, uint8_t flags) { - /* base must be 8-byte aligned */ - /* total region must cover 8-byte multiple */ - assert((base & 0x07) == 0); - assert((size & 0x07) == 0); + assert(kasan_granule_partial(base) == 0); + assert(kasan_granule_partial(size) == 0); kasan_poison(base, 0, 0, size, flags); } @@ -221,16 +215,14 @@ kasan_unpoison(void *base, vm_size_t size) } void NOINLINE -kasan_unpoison_stack(vm_offset_t base, vm_size_t size) +kasan_unpoison_stack(uintptr_t base, size_t size) { - assert(base); - assert(size); + assert(base > 0); + assert(size > 0); - /* align base and size to 8 bytes */ - vm_offset_t align = base & 0x7; - base -= align; - size += align; - size = (size + 7) & ~0x7; + size_t partial = kasan_granule_partial(base); + base = kasan_granule_trunc(base); + size = kasan_granule_round(size + partial); kasan_unpoison((void *)base, size); } @@ -247,12 +239,9 @@ kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t r const uint8_t c0ffee[] = { 0xc0, 0xff, 0xee, 0xc0 }; uint8_t *buf = (uint8_t *)base; - /* base must be 8-byte aligned */ - /* any left redzone must be a multiple of 8 */ - /* total region must cover 8-byte multiple */ - assert((base & 0x07) == 0); - assert((leftrz & 0x07) == 0); - assert(((size + leftrz + rightrz) & 0x07) == 0); + assert(kasan_granule_partial(base) == 0); + assert(kasan_granule_partial(leftrz) == 0); + assert(kasan_granule_partial(size + leftrz + rightrz) == 0); for (i = 0; i < leftrz; i++) { buf[i] = deadbeef[i % 4]; @@ -305,19 +294,20 @@ kasan_check_range(const void *x, size_t sz, access_t access) * Return true if [base, base+sz) is unpoisoned or has given shadow value. */ bool -kasan_check_shadow(vm_address_t base, vm_size_t sz, uint8_t shadow) +kasan_check_shadow(vm_address_t addr, vm_size_t sz, uint8_t shadow) { - sz -= 8 - (base % 8); - base += 8 - (base % 8); + /* round 'base' up to skip any partial, which won't match 'shadow' */ + uintptr_t base = kasan_granule_round(addr); + sz -= base - addr; - vm_address_t end = base + sz; + uintptr_t end = base + sz; while (base < end) { uint8_t *sh = SHADOW_FOR_ADDRESS(base); if (*sh && *sh != shadow) { return false; } - base += 8; + base += KASAN_GRANULE; } return true; } @@ -325,7 +315,7 @@ kasan_check_shadow(vm_address_t base, vm_size_t sz, uint8_t shadow) static void kasan_report_leak(vm_address_t base, vm_size_t sz, vm_offset_t offset, vm_size_t leak_sz) { - if (leak_fatal_threshold > leak_threshold && leak_sz >= leak_fatal_threshold){ + if (leak_fatal_threshold > leak_threshold && leak_sz >= leak_fatal_threshold) { kasan_violation(base + offset, leak_sz, TYPE_LEAK, REASON_UNINITIALIZED); } @@ -344,11 +334,23 @@ kasan_report_leak(vm_address_t base, vm_size_t sz, vm_offset_t offset, vm_size_t } DTRACE_KASAN5(leak_detected, - vm_address_t, base, - vm_size_t, sz, - vm_offset_t, offset, - vm_size_t, leak_sz, - char *, string_rep); + vm_address_t, base, + vm_size_t, sz, + vm_offset_t, offset, + vm_size_t, leak_sz, + char *, string_rep); +} + +/* + * Initialize buffer by writing unique pattern that can be looked for + * in copyout path to detect uninitialized memory leaks. + */ +void +kasan_leak_init(vm_address_t addr, vm_size_t sz) +{ + if (enabled_checks & TYPE_LEAK) { + __nosan_memset((void *)addr, KASAN_UNINITIALIZED_HEAP, sz); + } } /* @@ -432,7 +434,7 @@ static const char *shadow_strings[] = { static size_t kasan_shadow_crashlog(uptr p, char *buf, size_t len) { - int i,j; + int i, j; size_t n = 0; int before = CRASH_CONTEXT_BEFORE; int after = CRASH_CONTEXT_AFTER; @@ -445,8 +447,8 @@ kasan_shadow_crashlog(uptr p, char *buf, size_t len) shadow &= ~((uptr)0xf); shadow -= 16 * before; - n += scnprintf(buf+n, len-n, - " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n"); + n += scnprintf(buf + n, len - n, + " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n"); for (i = 0; i < 1 + before + after; i++, shadow += 16) { if ((vm_map_round_page(shadow, HW_PAGE_MASK) != shadow_page) && !kasan_is_shadow_mapped(shadow)) { @@ -454,7 +456,7 @@ kasan_shadow_crashlog(uptr p, char *buf, size_t len) continue; } - n += scnprintf(buf+n, len-n, " %16lx:", shadow); + n += scnprintf(buf + n, len - n, " %16lx:", shadow); char *left = " "; char *right; @@ -470,13 +472,13 @@ kasan_shadow_crashlog(uptr p, char *buf, size_t len) right = ""; } - n += scnprintf(buf+n, len-n, "%s%02x%s", left, (unsigned)*x, right); + n += scnprintf(buf + n, len - n, "%s%02x%s", left, (unsigned)*x, right); left = ""; } - n += scnprintf(buf+n, len-n, "\n"); + n += scnprintf(buf + n, len - n, "\n"); } - n += scnprintf(buf+n, len-n, "\n"); + n += scnprintf(buf + n, len - n, "\n"); return n; } @@ -496,14 +498,14 @@ kasan_report_internal(uptr p, uptr width, access_t access, violation_t reason, b buf[0] = '\0'; if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) { - n += scnprintf(buf+n, len-n, "KASan: free of corrupted/invalid object %#lx\n", p); + n += scnprintf(buf + n, len - n, "KASan: free of corrupted/invalid object %#lx\n", p); } else if (reason == REASON_MOD_AFTER_FREE) { - n += scnprintf(buf+n, len-n, "KASan: UaF of quarantined object %#lx\n", p); + n += scnprintf(buf + n, len - n, "KASan: UaF of quarantined object %#lx\n", p); } else { - n += scnprintf(buf+n, len-n, "KASan: invalid %lu-byte %s %#lx [%s]\n", - width, access_str(access), p, shadow_str); + n += scnprintf(buf + n, len - n, "KASan: invalid %lu-byte %s %#lx [%s]\n", + width, access_str(access), p, shadow_str); } - n += kasan_shadow_crashlog(p, buf+n, len-n); + n += kasan_shadow_crashlog(p, buf + n, len - n); if (dopanic) { panic("%s", buf); @@ -540,11 +542,11 @@ kasan_log_report(uptr p, uptr width, access_t access, violation_t reason) NULL); /* ignore current frame */ buf[0] = '\0'; - l += scnprintf(buf+l, len-l, "Backtrace: "); + l += scnprintf(buf + l, len - l, "Backtrace: "); for (uint32_t i = 0; i < nframes; i++) { - l += scnprintf(buf+l, len-l, "%lx,", VM_KERNEL_UNSLIDE(bt[i])); + l += scnprintf(buf + l, len - l, "%lx,", VM_KERNEL_UNSLIDE(bt[i])); } - l += scnprintf(buf+l, len-l, "\n"); + l += scnprintf(buf + l, len - l, "\n"); printf("%s", buf); } @@ -561,8 +563,16 @@ REPORT_DECLARE(4) REPORT_DECLARE(8) REPORT_DECLARE(16) -void OS_NORETURN __asan_report_load_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_LOAD, 0); } -void OS_NORETURN __asan_report_store_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_STORE, 0); } +void OS_NORETURN +__asan_report_load_n(uptr p, unsigned long sz) +{ + kasan_crash_report(p, sz, TYPE_LOAD, 0); +} +void OS_NORETURN +__asan_report_store_n(uptr p, unsigned long sz) +{ + kasan_crash_report(p, sz, TYPE_STORE, 0); +} /* unpoison the current stack */ void NOINLINE @@ -605,18 +615,18 @@ kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invali return false; } - size += base & 0x07; - base &= ~(vm_offset_t)0x07; + size += kasan_granule_partial(base); + base = kasan_granule_trunc(base); shadow = SHADOW_FOR_ADDRESS(base); - vm_size_t limit = (size + 7) / 8; + size_t limit = (size + KASAN_GRANULE - 1) / KASAN_GRANULE; /* XXX: to make debugging easier, catch unmapped shadow here */ - for (i = 0; i < limit; i++, size -= 8) { + for (i = 0; i < limit; i++, size -= KASAN_GRANULE) { assert(size > 0); uint8_t s = shadow[i]; - if (s == 0 || (size < 8 && s >= size && s <= 7)) { + if (s == 0 || (size < KASAN_GRANULE && s >= size && s < KASAN_GRANULE)) { /* valid */ } else { goto fail; @@ -625,10 +635,10 @@ kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invali return false; - fail: +fail: if (first_invalid) { /* XXX: calculate the exact first byte that failed */ - *first_invalid = base + i*8; + *first_invalid = base + i * 8; } return true; } @@ -753,9 +763,9 @@ kasan_debug_touch_mappings(vm_offset_t base, vm_size_t sz) vm_offset_t addr = base + i; uint8_t *x = SHADOW_FOR_ADDRESS(addr); tmp1 = *x; - asm volatile("" ::: "memory"); + asm volatile ("" ::: "memory"); tmp2 = *x; - asm volatile("" ::: "memory"); + asm volatile ("" ::: "memory"); assert(tmp1 == tmp2); } #else @@ -928,11 +938,17 @@ kasan_alloc_resize(vm_size_t size) panic("allocation size overflow (%lu)", size); } + if (size >= 128) { + /* Add a little extra right redzone to larger objects. Gives us extra + * overflow protection, and more space for the backtrace. */ + size += 16; + } + /* add left and right redzones */ size += KASAN_GUARD_PAD; - /* ensure the final allocation is an 8-byte multiple */ - size += 8 - (size % 8); + /* ensure the final allocation is a multiple of the granule */ + size = kasan_granule_round(size); return size; } @@ -949,8 +965,8 @@ kasan_alloc_bt(uint32_t *ptr, vm_size_t sz, vm_size_t skip) vm_size_t frames = sz; if (frames > 0) { - frames = min(frames + skip, BACKTRACE_MAXFRAMES); - frames = backtrace(bt, frames, NULL); + frames = min((uint32_t)(frames + skip), BACKTRACE_MAXFRAMES); + frames = backtrace(bt, (uint32_t)frames, NULL); while (frames > sz && skip > 0) { bt++; @@ -1008,7 +1024,7 @@ kasan_alloc_retrieve_bt(vm_address_t addr, uintptr_t frames[static BACKTRACE_MAX struct kasan_alloc_header *header = header_for_user_addr(alloc_base); if (magic_for_addr(alloc_base, LIVE_XOR) == header->magic) { struct kasan_alloc_footer *footer = footer_for_user_addr(alloc_base, &fsize); - if ((fsize/sizeof(footer->backtrace[0])) >= header->frames) { + if ((fsize / sizeof(footer->backtrace[0])) >= header->frames) { num_frames = header->frames; for (size_t i = 0; i < num_frames; i++) { frames[i] = footer->backtrace[i] + vm_kernel_slid_base; @@ -1034,8 +1050,8 @@ kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz) return 0; } assert(size > 0); - assert((addr % 8) == 0); - assert((size % 8) == 0); + assert(kasan_granule_partial(addr) == 0); + assert(kasan_granule_partial(size) == 0); vm_size_t rightrz = size - req - leftrz; @@ -1044,21 +1060,17 @@ kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz) addr += leftrz; - if (enabled_checks & TYPE_LEAK) { - __nosan_memset((void *)addr, KASAN_UNINITIALIZED_HEAP, req); - } - /* stash the allocation sizes in the left redzone */ struct kasan_alloc_header *h = header_for_user_addr(addr); h->magic = magic_for_addr(addr, LIVE_XOR); - h->left_rz = leftrz; - h->alloc_size = size; - h->user_size = req; + h->left_rz = (uint32_t)leftrz; + h->alloc_size = (uint32_t)size; + h->user_size = (uint32_t)req; /* ... and a backtrace in the right redzone */ vm_size_t fsize; struct kasan_alloc_footer *f = footer_for_user_addr(addr, &fsize); - h->frames = kasan_alloc_bt(f->backtrace, fsize, 2); + h->frames = (uint32_t)kasan_alloc_bt(f->backtrace, fsize, 2); /* checksum the whole object, minus the user part */ h->crc = kasan_alloc_crc(addr); @@ -1077,6 +1089,7 @@ kasan_dealloc(vm_offset_t addr, vm_size_t *size) assert(size && addr); struct kasan_alloc_header *h = header_for_user_addr(addr); *size = h->alloc_size; + h->magic = 0; /* clear the magic so the debugger doesn't find a bogus object */ return addr - h->left_rz; } @@ -1102,8 +1115,8 @@ kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type) /* map heap type to an internal access type */ access_t type = heap_type == KASAN_HEAP_KALLOC ? TYPE_KFREE : - heap_type == KASAN_HEAP_ZALLOC ? TYPE_ZFREE : - heap_type == KASAN_HEAP_FAKESTACK ? TYPE_FSFREE : 0; + heap_type == KASAN_HEAP_ZALLOC ? TYPE_ZFREE : + heap_type == KASAN_HEAP_FAKESTACK ? TYPE_FSFREE : 0; /* check the magic and crc match */ if (h->magic != magic_for_addr(addr, LIVE_XOR)) { @@ -1122,7 +1135,7 @@ kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type) /* Check that the redzones are valid */ if (!kasan_check_shadow(addr - h->left_rz, h->left_rz, ASAN_HEAP_LEFT_RZ) || - !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) { + !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) { kasan_violation(addr, size, type, REASON_BAD_METADATA); } @@ -1163,8 +1176,8 @@ struct quarantine { }; struct quarantine quarantines[] = { - { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_ZALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }, - { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_KALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }, + { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_ZALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }, + { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_KALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }, { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_FAKESTACK].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE } }; @@ -1179,8 +1192,8 @@ fle_crc(struct freelist_entry *fle) */ void NOINLINE kasan_free_internal(void **addrp, vm_size_t *sizep, int type, - zone_t *zone, vm_size_t user_size, int locked, - bool doquarantine) + zone_t *zone, vm_size_t user_size, int locked, + bool doquarantine) { vm_size_t size = *sizep; vm_offset_t addr = *(vm_offset_t *)addrp; @@ -1257,7 +1270,7 @@ kasan_free_internal(void **addrp, vm_size_t *sizep, int type, if (type != KASAN_HEAP_KALLOC) { assert((vm_offset_t)zone >= VM_MIN_KERNEL_AND_KEXT_ADDRESS && - (vm_offset_t)zone <= VM_MAX_KERNEL_ADDRESS); + (vm_offset_t)zone <= VM_MAX_KERNEL_ADDRESS); *zone = tofree->zone; } @@ -1274,18 +1287,17 @@ kasan_free_internal(void **addrp, vm_size_t *sizep, int type, /* clobber the quarantine header */ __nosan_bzero((void *)addr, sizeof(struct freelist_entry)); - } else { /* quarantine is not full - don't really free anything */ addr = 0; } - free_current_locked: +free_current_locked: if (!locked) { kasan_unlock(flg); } - free_current: +free_current: *addrp = (void *)addr; if (addr) { kasan_unpoison((void *)addr, size); @@ -1295,7 +1307,7 @@ kasan_free_internal(void **addrp, vm_size_t *sizep, int type, void NOINLINE kasan_free(void **addrp, vm_size_t *sizep, int type, zone_t *zone, - vm_size_t user_size, bool quarantine) + vm_size_t user_size, bool quarantine) { kasan_free_internal(addrp, sizep, type, zone, user_size, 0, quarantine); @@ -1347,19 +1359,19 @@ kasan_unpoison_cxx_array_cookie(void *ptr) #define ACCESS_CHECK_DECLARE(type, sz, access) \ void __asan_##type##sz(uptr addr) { \ - kasan_check_range((const void *)addr, sz, access); \ + kasan_check_range((const void *)addr, sz, access); \ } \ void OS_NORETURN UNSUPPORTED_API(__asan_exp_##type##sz, uptr a, int32_t b); -ACCESS_CHECK_DECLARE(load, 1, TYPE_LOAD); -ACCESS_CHECK_DECLARE(load, 2, TYPE_LOAD); -ACCESS_CHECK_DECLARE(load, 4, TYPE_LOAD); -ACCESS_CHECK_DECLARE(load, 8, TYPE_LOAD); -ACCESS_CHECK_DECLARE(load, 16, TYPE_LOAD); -ACCESS_CHECK_DECLARE(store, 1, TYPE_STORE); -ACCESS_CHECK_DECLARE(store, 2, TYPE_STORE); -ACCESS_CHECK_DECLARE(store, 4, TYPE_STORE); -ACCESS_CHECK_DECLARE(store, 8, TYPE_STORE); +ACCESS_CHECK_DECLARE(load, 1, TYPE_LOAD); +ACCESS_CHECK_DECLARE(load, 2, TYPE_LOAD); +ACCESS_CHECK_DECLARE(load, 4, TYPE_LOAD); +ACCESS_CHECK_DECLARE(load, 8, TYPE_LOAD); +ACCESS_CHECK_DECLARE(load, 16, TYPE_LOAD); +ACCESS_CHECK_DECLARE(store, 1, TYPE_STORE); +ACCESS_CHECK_DECLARE(store, 2, TYPE_STORE); +ACCESS_CHECK_DECLARE(store, 4, TYPE_STORE); +ACCESS_CHECK_DECLARE(store, 8, TYPE_STORE); ACCESS_CHECK_DECLARE(store, 16, TYPE_STORE); void @@ -1382,7 +1394,7 @@ kasan_set_shadow(uptr addr, size_t sz, uint8_t val) #define SET_SHADOW_DECLARE(val) \ void __asan_set_shadow_##val(uptr addr, size_t sz) { \ - kasan_set_shadow(addr, sz, 0x##val); \ + kasan_set_shadow(addr, sz, 0x##val); \ } SET_SHADOW_DECLARE(00) @@ -1515,19 +1527,19 @@ SYSCTL_UINT(_kern_kasan, OID_AUTO, leak_fatal_threshold, CTLFLAG_RW, &leak_fatal SYSCTL_UINT(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, 0, ""); SYSCTL_UINT(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, 0, ""); SYSCTL_UINT(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, 0, ""); -SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, ""); -SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, ""); -SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, ""); +SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, ""); +SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, ""); +SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, ""); SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_BLACKLIST, ""); SYSCTL_PROC(_kern_kasan, OID_AUTO, fakestack, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_fakestack_enable, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_fakestack_enable, "I", ""); SYSCTL_PROC(_kern_kasan, OID_AUTO, test, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_kasan_test, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_kasan_test, "I", ""); SYSCTL_PROC(_kern_kasan, OID_AUTO, fail, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 1, sysctl_kasan_test, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 1, sysctl_kasan_test, "I", ""); diff --git a/san/kasan.h b/san/kasan.h index fcfc44462..d7a43793b 100644 --- a/san/kasan.h +++ b/san/kasan.h @@ -110,7 +110,8 @@ void kasan_fakestack_gc(thread_t thread); /* free and poison all unused fakest void kasan_fakestack_suspend(void); void kasan_fakestack_resume(void); -/* check for uninitialized memory */ +/* Initialization and check for uninitialized memory */ +void kasan_leak_init(vm_address_t addr, vm_size_t sz); void kasan_check_uninitialized(vm_address_t base, vm_size_t sz); struct kasan_test; @@ -130,10 +131,14 @@ void kasan_notify_address_zero(vm_offset_t, vm_size_t); #elif __x86_64__ extern void kasan_map_low_fixed_regions(void); extern unsigned shadow_stolen_idx; -extern vm_offset_t shadow_pnext, shadow_ptop; #endif #endif +#if HIBERNATION +// if we're building a kernel with hibernation support, hibernate_write_image depends on these symbols +extern vm_offset_t shadow_pnext, shadow_ptop; +#endif /* HIBERNATION */ + /* * Allocator hooks */ diff --git a/san/kasan_dynamic_blacklist.c b/san/kasan_dynamic_blacklist.c index cb661abd0..14f68ee64 100644 --- a/san/kasan_dynamic_blacklist.c +++ b/san/kasan_dynamic_blacklist.c @@ -201,7 +201,7 @@ kasan_dybl_load_kext(uintptr_t addr, const char *kextname) kernel_segment_command_t *seg = (void *)cmd; bool is_exec = seg->initprot & VM_PROT_EXECUTE; -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) if (is_exec && strcmp("__TEXT_EXEC", seg->segname) != 0) { is_exec = false; } @@ -241,8 +241,7 @@ kasan_dybl_unload_kext(uintptr_t addr) if (cmd->cmd == LC_SEGMENT_KERNEL) { kernel_segment_command_t *seg = (void *)cmd; bool is_exec = seg->initprot & VM_PROT_EXECUTE; - -#if CONFIG_EMBEDDED +#if defined(__arm__) || defined(__arm64__) if (is_exec && strcmp("__TEXT_EXEC", seg->segname) != 0) { is_exec = false; } @@ -475,7 +474,7 @@ add_blacklist_entry(const char *kext, const char *func, access_t type) if (kext) { size_t sz = __nosan_strlen(kext) + 1; if (sz > 1) { - char *s = kalloc(sz); + char *s = zalloc_permanent(sz, ZALIGN_NONE); __nosan_strlcpy(s, kext, sz); ble->kext_name = s; } @@ -484,7 +483,7 @@ add_blacklist_entry(const char *kext, const char *func, access_t type) if (func) { size_t sz = __nosan_strlen(func) + 1; if (sz > 1) { - char *s = kalloc(sz); + char *s = zalloc_permanent(sz, ZALIGN_NONE); __nosan_strlcpy(s, func, sz); ble->func_name = s; } diff --git a/san/kasan_internal.h b/san/kasan_internal.h index f0565a004..0ecb75ef0 100644 --- a/san/kasan_internal.h +++ b/san/kasan_internal.h @@ -80,12 +80,37 @@ typedef uintptr_t uptr; # error KASAN undefined #endif -#ifndef KASAN_SHIFT -# error KASAN_SHIFT undefined +#ifndef KASAN_OFFSET +# error KASAN_OFFSET undefined #endif -#define ADDRESS_FOR_SHADOW(x) (((x) - KASAN_SHIFT) << 3) -#define SHADOW_FOR_ADDRESS(x) (uint8_t *)(((x) >> 3) + KASAN_SHIFT) +#ifndef KASAN_SCALE +# error KASAN_SCALE undefined +#endif + +#define KASAN_GRANULE (1UL << KASAN_SCALE) +#define KASAN_GRANULE_MASK (KASAN_GRANULE - 1UL) + +static inline uintptr_t +kasan_granule_trunc(uintptr_t x) +{ + return x & ~KASAN_GRANULE_MASK; +} + +static inline uintptr_t +kasan_granule_round(uintptr_t x) +{ + return (x + KASAN_GRANULE_MASK) & ~KASAN_GRANULE_MASK; +} + +static inline size_t +kasan_granule_partial(uintptr_t x) +{ + return x & KASAN_GRANULE_MASK; +} + +#define ADDRESS_FOR_SHADOW(x) (((x) - KASAN_OFFSET) << KASAN_SCALE) +#define SHADOW_FOR_ADDRESS(x) (uint8_t *)(((x) >> KASAN_SCALE) + KASAN_OFFSET) #if KASAN_DEBUG # define NOINLINE OS_NOINLINE diff --git a/san/ksancov.c b/san/ksancov.c index a8d7c8188..762d69da8 100644 --- a/san/ksancov.c +++ b/san/ksancov.c @@ -28,7 +28,6 @@ #include #include -#include #include #include @@ -48,14 +47,13 @@ #include #include #include -#include #include /* dev_t */ #include /* must come after sys/stat.h */ #include /* must come after sys/stat.h */ #include -#include +#include #include #include @@ -70,17 +68,7 @@ typedef struct uthread * uthread_t; #define USE_PC_TABLE 0 #define KSANCOV_MAX_DEV 64 - -extern boolean_t ml_at_interrupt_context(void); -extern boolean_t ml_get_interrupts_enabled(void); - -static int ksancov_detach(dev_t dev); - -static int dev_major; -static size_t nedges = 0; -static uint32_t __unused npcs = 0; - -static _Atomic unsigned active_devs; +#define KSANCOV_MAX_PCS (1024U * 64) /* default to 256k buffer => 64k pcs */ enum { KS_MODE_NONE, @@ -102,18 +90,58 @@ struct ksancov_dev { thread_t thread; dev_t dev; + lck_mtx_t lock; }; +typedef struct ksancov_dev * ksancov_dev_t; -/* array of devices indexed by devnode minor */ -static struct ksancov_dev *ksancov_devs[KSANCOV_MAX_DEV]; +extern boolean_t ml_at_interrupt_context(void); +extern boolean_t ml_get_interrupts_enabled(void); + +static void ksancov_detach(ksancov_dev_t); + +static int dev_major; +static size_t nedges = 0; +static uint32_t __unused npcs = 0; + +static _Atomic unsigned active_devs; + +static LCK_GRP_DECLARE(ksancov_lck_grp, "ksancov_lck_grp"); +static lck_rw_t *ksancov_devs_lck; +/* array of devices indexed by devnode minor */ +static ksancov_dev_t ksancov_devs[KSANCOV_MAX_DEV]; static struct ksancov_edgemap *ksancov_edgemap; -static inline struct ksancov_dev * -get_dev(dev_t dev) + +static ksancov_dev_t +create_dev(dev_t dev) { - int mn = minor(dev); - return ksancov_devs[mn]; + ksancov_dev_t d = kalloc_tag(sizeof(struct ksancov_dev), VM_KERN_MEMORY_DIAG); + if (!d) { + return NULL; + } + + d->mode = KS_MODE_NONE; + d->trace = NULL; + d->sz = 0; + d->maxpcs = KSANCOV_MAX_PCS; + d->dev = dev; + d->thread = THREAD_NULL; + lck_mtx_init(&d->lock, &ksancov_lck_grp, LCK_ATTR_NULL); + + return d; +} + +static void +free_dev(ksancov_dev_t d) +{ + if (d->mode == KS_MODE_TRACE && d->trace) { + kmem_free(kernel_map, (uintptr_t)d->trace, d->sz); + } else if (d->mode == KS_MODE_COUNTERS && d->counters) { + kmem_free(kernel_map, (uintptr_t)d->counters, d->sz); + } + lck_mtx_destroy(&d->lock, &ksancov_lck_grp); + kfree(d, sizeof(struct ksancov_dev)); } void @@ -134,7 +162,7 @@ trace_pc_guard(uint32_t *guardp, void *caller) if (__improbable(gd && !(gd & GUARD_SEEN) && ksancov_edgemap)) { size_t idx = gd & GUARD_IDX_MASK; if (idx < ksancov_edgemap->nedges) { - ksancov_edgemap->addrs[idx] = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - VM_MIN_KERNEL_ADDRESS - 1); + ksancov_edgemap->addrs[idx] = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - KSANCOV_PC_OFFSET - 1); *guardp |= GUARD_SEEN; } } @@ -149,14 +177,14 @@ trace_pc_guard(uint32_t *guardp, void *caller) return; } - uint32_t pc = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - VM_MIN_KERNEL_ADDRESS - 1); + uint32_t pc = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - KSANCOV_PC_OFFSET - 1); thread_t th = current_thread(); if (__improbable(th == THREAD_NULL)) { return; } - struct ksancov_dev *dev = *(struct ksancov_dev **)__sanitizer_get_thread_data(th); + ksancov_dev_t dev = *(ksancov_dev_t *)__sanitizer_get_thread_data(th); if (__probable(dev == NULL)) { return; } @@ -211,7 +239,7 @@ __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) for (; start != stop; start++) { if (*start == 0) { if (nedges < KSANCOV_MAX_EDGES) { - *start = ++nedges; + *start = (uint32_t)++nedges; } } } @@ -295,13 +323,8 @@ ksancov_do_map(uintptr_t base, size_t sz, vm_prot_t prot) * map the sancov buffer into the current process */ static int -ksancov_map(dev_t dev, void **bufp, size_t *sizep) +ksancov_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep) { - struct ksancov_dev *d = get_dev(dev); - if (!d) { - return EINVAL; - } - uintptr_t addr; size_t size = d->sz; @@ -324,8 +347,9 @@ ksancov_map(dev_t dev, void **bufp, size_t *sizep) return ENOMEM; } - *bufp = buf; + *bufp = (uintptr_t)buf; *sizep = size; + return 0; } @@ -333,13 +357,8 @@ ksancov_map(dev_t dev, void **bufp, size_t *sizep) * map the edge -> pc mapping as read-only */ static int -ksancov_map_edgemap(dev_t dev, void **bufp, size_t *sizep) +ksancov_map_edgemap(uintptr_t *bufp, size_t *sizep) { - struct ksancov_dev *d = get_dev(dev); - if (!d) { - return EINVAL; - } - uintptr_t addr = (uintptr_t)ksancov_edgemap; size_t size = sizeof(struct ksancov_edgemap) + ksancov_edgemap->nedges * sizeof(uint32_t); @@ -348,12 +367,11 @@ ksancov_map_edgemap(dev_t dev, void **bufp, size_t *sizep) return ENOMEM; } - *bufp = buf; + *bufp = (uintptr_t)buf; *sizep = size; return 0; } - /* * Device node management */ @@ -362,35 +380,34 @@ static int ksancov_open(dev_t dev, int flags, int devtype, proc_t p) { #pragma unused(flags,devtype,p) - if (minor(dev) >= KSANCOV_MAX_DEV) { + const int minor_num = minor(dev); + + if (minor_num >= KSANCOV_MAX_DEV) { + return EBUSY; + } + + lck_rw_lock_exclusive(ksancov_devs_lck); + + if (ksancov_devs[minor_num]) { + lck_rw_unlock_exclusive(ksancov_devs_lck); return EBUSY; } - /* allocate a device entry */ - struct ksancov_dev *d = kalloc_tag(sizeof(struct ksancov_dev), VM_KERN_MEMORY_DIAG); + ksancov_dev_t d = create_dev(dev); if (!d) { + lck_rw_unlock_exclusive(ksancov_devs_lck); return ENOMEM; } + ksancov_devs[minor_num] = d; - d->mode = KS_MODE_NONE; - d->trace = NULL; - d->maxpcs = 1024U * 64; /* default to 256k buffer => 64k pcs */ - d->dev = dev; - d->thread = THREAD_NULL; - - ksancov_devs[minor(dev)] = d; + lck_rw_unlock_exclusive(ksancov_devs_lck); return 0; } static int -ksancov_trace_alloc(dev_t dev, size_t maxpcs) +ksancov_trace_alloc(ksancov_dev_t d, size_t maxpcs) { - struct ksancov_dev *d = get_dev(dev); - if (!d) { - return EINVAL; - } - if (d->mode != KS_MODE_NONE) { return EBUSY; /* trace/counters already created */ } @@ -410,10 +427,10 @@ ksancov_trace_alloc(dev_t dev, size_t maxpcs) struct ksancov_trace *trace = (struct ksancov_trace *)buf; trace->magic = KSANCOV_TRACE_MAGIC; - trace->offset = VM_MIN_KERNEL_ADDRESS; - trace->head = 0; - trace->enabled = 0; - trace->maxpcs = maxpcs; + trace->offset = KSANCOV_PC_OFFSET; + os_atomic_init(&trace->head, 0); + os_atomic_init(&trace->enabled, 0); + trace->maxpcs = (uint32_t)maxpcs; d->trace = trace; d->sz = sz; @@ -424,13 +441,8 @@ ksancov_trace_alloc(dev_t dev, size_t maxpcs) } static int -ksancov_counters_alloc(dev_t dev) +ksancov_counters_alloc(ksancov_dev_t d) { - struct ksancov_dev *d = get_dev(dev); - if (!d) { - return EINVAL; - } - if (d->mode != KS_MODE_NONE) { return EBUSY; /* trace/counters already created */ } @@ -448,7 +460,7 @@ ksancov_counters_alloc(dev_t dev) struct ksancov_counters *counters = (struct ksancov_counters *)buf; counters->magic = KSANCOV_COUNTERS_MAGIC; counters->nedges = ksancov_edgemap->nedges; - counters->enabled = 0; + os_atomic_init(&counters->enabled, 0); d->counters = counters; d->sz = sz; @@ -461,18 +473,10 @@ ksancov_counters_alloc(dev_t dev) * attach a thread to a ksancov dev instance */ static int -ksancov_attach(dev_t dev, thread_t th) +ksancov_attach(ksancov_dev_t d, thread_t th) { - struct ksancov_dev *d = get_dev(dev); - if (!d) { - return EINVAL; - } - - if (d->thread != THREAD_NULL) { - int ret = ksancov_detach(dev); - if (ret) { - return ret; - } + if (d->mode == KS_MODE_NONE) { + return EINVAL; /* not configured */ } if (th != current_thread()) { @@ -480,11 +484,15 @@ ksancov_attach(dev_t dev, thread_t th) return EINVAL; } - struct ksancov_dev **devp = (void *)__sanitizer_get_thread_data(th); + ksancov_dev_t *devp = (void *)__sanitizer_get_thread_data(th); if (*devp) { return EBUSY; /* one dev per thread */ } + if (d->thread != THREAD_NULL) { + ksancov_detach(d); + } + d->thread = th; thread_reference(d->thread); @@ -503,21 +511,16 @@ thread_wait( /* * disconnect thread from ksancov dev */ -static int -ksancov_detach(dev_t dev) +static void +ksancov_detach(ksancov_dev_t d) { - struct ksancov_dev *d = get_dev(dev); - if (!d) { - return EINVAL; - } - if (d->thread == THREAD_NULL) { /* no thread attached */ - return 0; + return; } /* disconnect dev from thread */ - struct ksancov_dev **devp = (void *)__sanitizer_get_thread_data(d->thread); + ksancov_dev_t *devp = (void *)__sanitizer_get_thread_data(d->thread); if (*devp != NULL) { assert(*devp == d); os_atomic_store(devp, NULL, relaxed); @@ -531,53 +534,39 @@ ksancov_detach(dev_t dev) /* drop our thread reference */ thread_deallocate(d->thread); d->thread = THREAD_NULL; - - return 0; } static int ksancov_close(dev_t dev, int flags, int devtype, proc_t p) { #pragma unused(flags,devtype,p) - struct ksancov_dev *d = get_dev(dev); - if (!d) { - return EINVAL; - } - - if (d->mode == KS_MODE_TRACE) { - struct ksancov_trace *trace = d->trace; - if (trace) { - /* trace allocated - delete it */ - - os_atomic_sub(&active_devs, 1, relaxed); - os_atomic_store(&trace->enabled, 0, relaxed); /* stop tracing */ + const int minor_num = minor(dev); - ksancov_detach(dev); + lck_rw_lock_exclusive(ksancov_devs_lck); + ksancov_dev_t d = ksancov_devs[minor_num]; + ksancov_devs[minor_num] = NULL; /* dev no longer discoverable */ + lck_rw_unlock_exclusive(ksancov_devs_lck); - /* free trace */ - kmem_free(kernel_map, (uintptr_t)d->trace, d->sz); - d->trace = NULL; - d->sz = 0; - } - } else if (d->mode == KS_MODE_COUNTERS) { - struct ksancov_counters *counters = d->counters; - if (counters) { - os_atomic_sub(&active_devs, 1, relaxed); - os_atomic_store(&counters->enabled, 0, relaxed); /* stop tracing */ - - ksancov_detach(dev); + /* + * No need to lock d here as there is and will be no one having its + * reference except for this thread and the one which is going to + * be detached below. + */ - /* free counters */ - kmem_free(kernel_map, (uintptr_t)d->counters, d->sz); - d->counters = NULL; - d->sz = 0; - } + if (!d) { + return ENXIO; } - ksancov_devs[minor(dev)] = NULL; /* dev no longer discoverable */ + if (d->mode == KS_MODE_TRACE && d->trace) { + os_atomic_sub(&active_devs, 1, relaxed); + os_atomic_store(&d->trace->enabled, 0, relaxed); /* stop tracing */ + } else if (d->mode == KS_MODE_COUNTERS && d->counters) { + os_atomic_sub(&active_devs, 1, relaxed); + os_atomic_store(&d->counters->enabled, 0, relaxed); /* stop tracing */ + } - /* free the ksancov device instance */ - kfree(d, sizeof(struct ksancov_dev)); + ksancov_detach(d); + free_dev(d); return 0; } @@ -628,74 +617,56 @@ static int ksancov_ioctl(dev_t dev, unsigned long cmd, caddr_t _data, int fflag, proc_t p) { #pragma unused(fflag,p) - int ret = 0; + struct ksancov_buf_desc *mcmd; void *data = (void *)_data; - struct ksancov_dev *d = get_dev(dev); + lck_rw_lock_shared(ksancov_devs_lck); + ksancov_dev_t d = ksancov_devs[minor(dev)]; if (!d) { - return EINVAL; /* dev not open */ + lck_rw_unlock_shared(ksancov_devs_lck); + return EINVAL; /* dev not open */ } - if (cmd == KSANCOV_IOC_TRACE) { - size_t maxpcs = *(size_t *)data; - ret = ksancov_trace_alloc(dev, maxpcs); - if (ret) { - return ret; - } - } else if (cmd == KSANCOV_IOC_COUNTERS) { - ret = ksancov_counters_alloc(dev); - if (ret) { - return ret; - } - } else if (cmd == KSANCOV_IOC_MAP) { - struct ksancov_buf_desc *mcmd = (struct ksancov_buf_desc *)data; - - if (d->mode == KS_MODE_NONE) { - return EINVAL; /* mode not configured */ - } - - /* map buffer into the userspace VA space */ - void *buf; - size_t size; - ret = ksancov_map(dev, &buf, &size); - if (ret) { - return ret; - } - - mcmd->ptr = (uintptr_t)buf; - mcmd->sz = size; - } else if (cmd == KSANCOV_IOC_MAP_EDGEMAP) { - struct ksancov_buf_desc *mcmd = (struct ksancov_buf_desc *)data; - - /* map buffer into the userspace VA space */ - void *buf; - size_t size; - ret = ksancov_map_edgemap(dev, &buf, &size); - if (ret) { - return ret; - } + int ret = 0; - mcmd->ptr = (uintptr_t)buf; - mcmd->sz = size; - } else if (cmd == KSANCOV_IOC_START) { - if (d->mode == KS_MODE_NONE) { - return EINVAL; /* not configured */ - } - - ret = ksancov_attach(dev, current_thread()); - if (ret) { - return ret; - } - } else if (cmd == KSANCOV_IOC_NEDGES) { - size_t *nptr = (size_t *)data; - *nptr = nedges; - } else if (cmd == KSANCOV_IOC_TESTPANIC) { - uint64_t guess = *(uint64_t *)data; - ksancov_testpanic(guess); - } else { - /* unknown ioctl */ - return ENODEV; - } + switch (cmd) { + case KSANCOV_IOC_TRACE: + lck_mtx_lock(&d->lock); + ret = ksancov_trace_alloc(d, *(size_t *)data); + lck_mtx_unlock(&d->lock); + break; + case KSANCOV_IOC_COUNTERS: + lck_mtx_lock(&d->lock); + ret = ksancov_counters_alloc(d); + lck_mtx_unlock(&d->lock); + break; + case KSANCOV_IOC_MAP: + mcmd = (struct ksancov_buf_desc *)data; + lck_mtx_lock(&d->lock); + ret = ksancov_map(d, &mcmd->ptr, &mcmd->sz); + lck_mtx_unlock(&d->lock); + break; + case KSANCOV_IOC_MAP_EDGEMAP: + mcmd = (struct ksancov_buf_desc *)data; + ret = ksancov_map_edgemap(&mcmd->ptr, &mcmd->sz); + break; + case KSANCOV_IOC_START: + lck_mtx_lock(&d->lock); + ret = ksancov_attach(d, current_thread()); + lck_mtx_unlock(&d->lock); + break; + case KSANCOV_IOC_NEDGES: + *(size_t *)data = nedges; + break; + case KSANCOV_IOC_TESTPANIC: + ksancov_testpanic(*(uint64_t *)data); + break; + default: + ret = EINVAL; + break; + } + + lck_rw_unlock_shared(ksancov_devs_lck); return ret; } @@ -705,7 +676,7 @@ ksancov_dev_clone(dev_t dev, int action) { #pragma unused(dev) if (action == DEVFS_CLONE_ALLOC) { - for (size_t i = 0; i < KSANCOV_MAX_DEV; i++) { + for (int i = 0; i < KSANCOV_MAX_DEV; i++) { if (ksancov_devs[i] == NULL) { return i; } @@ -717,7 +688,7 @@ ksancov_dev_clone(dev_t dev, int action) return -1; } -static struct cdevsw +static const struct cdevsw ksancov_cdev = { .d_open = ksancov_open, .d_close = ksancov_close, @@ -762,8 +733,10 @@ ksancov_init_dev(void) ksancov_edgemap = (void *)buf; ksancov_edgemap->magic = KSANCOV_EDGEMAP_MAGIC; - ksancov_edgemap->nedges = nedges; - ksancov_edgemap->offset = VM_MIN_KERNEL_ADDRESS; + ksancov_edgemap->nedges = (uint32_t)nedges; + ksancov_edgemap->offset = KSANCOV_PC_OFFSET; + + ksancov_devs_lck = lck_rw_alloc_init(&ksancov_lck_grp, LCK_ATTR_NULL); return 0; } diff --git a/san/ksancov.h b/san/ksancov.h index 80936b49f..d2a836b20 100644 --- a/san/ksancov.h +++ b/san/ksancov.h @@ -115,6 +115,19 @@ struct ksancov_edgemap { }; #if XNU_KERNEL_PRIVATE +/* + * On arm64 the VIM_MIN_KERNEL_ADDRESS is too far from %pc to fit into 32-bit value. As a result + * ksancov reports invalid %pcs. To make at least kernel %pc values corect a different base has + * to be used for arm. + */ +#if defined(__x86_64__) || defined(__i386__) +#define KSANCOV_PC_OFFSET VM_MIN_KERNEL_ADDRESS +#elif defined(__arm__) || defined(__arm64__) +#define KSANCOV_PC_OFFSET VM_KERNEL_LINK_ADDRESS +#else +#error "Unsupported platform" +#endif + int ksancov_init_dev(void); void **__sanitizer_get_thread_data(thread_t); diff --git a/san/ubsan.c b/san/ubsan.c index 0259027d1..d8e42d708 100644 --- a/san/ubsan.c +++ b/san/ubsan.c @@ -34,10 +34,11 @@ static const bool ubsan_print = false; static const uint32_t line_acquired = 0x80000000UL; static const char *get_type_check_kind(uint8_t kind); + static size_t format_loc(struct san_src_loc *loc, char *dst, size_t sz) { - return scnprintf(dst, sz, " loc: %s:%d:%d\n", + return scnprintf(dst, sz, ", file:\"%s\", line:%d, column:%d },\n", loc->filename, loc->line & ~line_acquired, loc->col @@ -74,7 +75,7 @@ format_overflow(struct ubsan_violation *v, char *buf, size_t sz) { struct san_type_desc *ty = v->overflow->ty; return scnprintf(buf, sz, - "%s overflow, op = %s, ty = %s, width = %d, lhs = 0x%llx, rhs = 0x%llx\n", + "problem:\"%s overflow\", op:\"%s\", ty:\"%s\", width:%d, lhs:0x%llx, rhs:0x%llx, ", ty->issigned ? "signed" : "unsigned", overflow_str[v->ubsan_type], ty->name, @@ -91,9 +92,9 @@ format_shift(struct ubsan_violation *v, char *buf, size_t sz) struct san_type_desc *l = v->shift->lhs_t; struct san_type_desc *r = v->shift->rhs_t; - n += scnprintf(buf + n, sz - n, "bad shift\n"); - n += scnprintf(buf + n, sz - n, " lhs: 0x%llx, ty = %s, signed = %d, width = %d\n", v->lhs, l->name, l->issigned, 1 << l->width); - n += scnprintf(buf + n, sz - n, " rhs: 0x%llx, ty = %s, signed = %d, width = %d\n", v->rhs, r->name, r->issigned, 1 << r->width); + n += scnprintf(buf + n, sz - n, "problem:\"bad shift\", "); + n += scnprintf(buf + n, sz - n, "lhs:0x%llx, lty:\"%s\", lsigned:%d, lwidth:%d, ", v->lhs, l->name, l->issigned, 1 << l->width); + n += scnprintf(buf + n, sz - n, "rhs:0x%llx, rty:\"%s\", rsigned:%d, rwidth:%d, ", v->rhs, r->name, r->issigned, 1 << r->width); return n; } @@ -122,15 +123,15 @@ format_type_mismatch(struct ubsan_violation *v, char *buf, size_t sz) const char * kind = get_type_check_kind(v->align->kind); if (NULL == ptr) { //null pointer use - n += scnprintf(buf + n, sz - n, "%s NULL pointer of type %s\n", kind, v->align->ty->name); + n += scnprintf(buf + n, sz - n, "problem:\"%s NULL pointer\", ty:\"%s\", ", kind, v->align->ty->name); } else if (alignment && ((uintptr_t)ptr & (alignment - 1))) { //misaligned pointer use - n += scnprintf(buf + n, sz - n, "%s mis-aligned address %p for type %s ", kind, (void*)v->lhs, v->align->ty->name); - n += scnprintf(buf + n, sz - n, "which requires %d byte alignment\n", 1 << v->align->align); + n += scnprintf(buf + n, sz - n, "problem:\"%s mis-aligned\", address:%p, ty:\"%s\", ", kind, (void*)v->lhs, v->align->ty->name); + n += scnprintf(buf + n, sz - n, "required_alignment:%d, ", 1 << v->align->align); } else { //insufficient object size - n += scnprintf(buf + n, sz - n, "%s address %p with insufficient space for an object of type %s\n", - kind, ptr, v->align->ty->name); + n += scnprintf(buf + n, sz - n, "problem:\"%s insufficient object size\", ty:\"%s\", address:%p, ", + kind, v->align->ty->name, ptr); } return n; @@ -144,25 +145,32 @@ format_oob(struct ubsan_violation *v, char *buf, size_t sz) struct san_type_desc *ity = v->oob->index_ty; uintptr_t idx = v->lhs; - n += scnprintf(buf + n, sz - n, "OOB array access\n"); - n += scnprintf(buf + n, sz - n, " idx %ld\n", idx); - n += scnprintf(buf + n, sz - n, " aty: ty = %s, signed = %d, width = %d\n", aty->name, aty->issigned, 1 << aty->width); - n += scnprintf(buf + n, sz - n, " ity: ty = %s, signed = %d, width = %d\n", ity->name, ity->issigned, 1 << ity->width); + n += scnprintf(buf + n, sz - n, "problem:\"OOB array access\", "); + n += scnprintf(buf + n, sz - n, "idx:%ld, ", idx); + n += scnprintf(buf + n, sz - n, "aty:\"%s\", asigned:%d, awidth:%d, ", aty->name, aty->issigned, 1 << aty->width); + n += scnprintf(buf + n, sz - n, "ity:\"%s\", isigned:%d, iwidth:%d, ", ity->name, ity->issigned, 1 << ity->width); return n; } +static size_t +format_load_invalid_value(struct ubsan_violation *v, char *buf, size_t sz) +{ + return scnprintf(buf, sz, "problem:\"invalid value load\", type:\"%s\", value:0x%llx", + v->invalid->type->name, v->lhs); +} + size_t ubsan_format(struct ubsan_violation *v, char *buf, size_t sz) { - size_t n = 0; + size_t n = scnprintf(buf, sz, "{ "); switch (v->ubsan_type) { case UBSAN_OVERFLOW_add ... UBSAN_OVERFLOW_negate: n += format_overflow(v, buf + n, sz - n); break; case UBSAN_UNREACHABLE: - n += scnprintf(buf + n, sz - n, "unreachable\n"); + n += scnprintf(buf + n, sz - n, "problem:\"unreachable\", "); break; case UBSAN_SHIFT: n += format_shift(v, buf + n, sz - n); @@ -171,13 +179,16 @@ ubsan_format(struct ubsan_violation *v, char *buf, size_t sz) n += format_type_mismatch(v, buf + n, sz - n); break; case UBSAN_POINTER_OVERFLOW: - n += scnprintf(buf + n, sz - n, "pointer overflow, before = 0x%llx, after = 0x%llx\n", v->lhs, v->rhs); + n += scnprintf(buf + n, sz - n, "problem:\"pointer overflow\", before:0x%llx, after:0x%llx, ", v->lhs, v->rhs); break; case UBSAN_OOB: n += format_oob(v, buf + n, sz - n); break; + case UBSAN_LOAD_INVALID_VALUE: + n += format_load_invalid_value(v, buf + n, sz - n); + break; case UBSAN_GENERIC: - n += scnprintf(buf + n, sz - n, "%s\n", v->func); + n += scnprintf(buf + n, sz - n, "problem:\"generic\", function:\"%s\", ", v->func); break; default: panic("unknown violation"); @@ -188,14 +199,11 @@ ubsan_format(struct ubsan_violation *v, char *buf, size_t sz) return n; } +enum UBFatality { Fatal, FleshWound }; + static void -ubsan_handle(struct ubsan_violation *v, bool fatal) +ubsan_handle(struct ubsan_violation *v, enum UBFatality fatality) { - const size_t sz = 256; - static char buf[sz]; - size_t n = 0; - buf[0] = '\0'; - if (!ubsan_loc_acquire(v->loc)) { /* violation site already reported */ return; @@ -203,48 +211,44 @@ ubsan_handle(struct ubsan_violation *v, bool fatal) ubsan_log_append(v); - if (ubsan_print || fatal) { - n += ubsan_format(v, buf + n, sz - n); - } - - if (ubsan_print) { + if (ubsan_print || (fatality == Fatal)) { + const size_t sz = 256; + static char buf[sz]; + buf[0] = '\0'; + ubsan_format(v, buf, sz); printf("UBSan: %s", buf); } - - if (fatal) { - panic("UBSan: %s", buf); - } } void __ubsan_handle_builtin_unreachable(struct ubsan_unreachable_desc *desc) { struct ubsan_violation v = { UBSAN_UNREACHABLE, 0, 0, .unreachable = desc, &desc->loc }; - ubsan_handle(&v, true); + ubsan_handle(&v, Fatal); } void __ubsan_handle_shift_out_of_bounds(struct ubsan_shift_desc *desc, uint64_t lhs, uint64_t rhs) { struct ubsan_violation v = { UBSAN_SHIFT, lhs, rhs, .shift = desc, &desc->loc }; - ubsan_handle(&v, false); + ubsan_handle(&v, FleshWound); } void __ubsan_handle_shift_out_of_bounds_abort(struct ubsan_shift_desc *desc, uint64_t lhs, uint64_t rhs) { struct ubsan_violation v = { UBSAN_SHIFT, lhs, rhs, .shift = desc, &desc->loc }; - ubsan_handle(&v, true); + ubsan_handle(&v, Fatal); } #define DEFINE_OVERFLOW(op) \ void __ubsan_handle_##op##_overflow(struct ubsan_overflow_desc *desc, uint64_t lhs, uint64_t rhs) { \ struct ubsan_violation v = { UBSAN_OVERFLOW_##op, lhs, rhs, .overflow = desc, &desc->loc }; \ - ubsan_handle(&v, false); \ + ubsan_handle(&v, FleshWound); \ } \ void __ubsan_handle_##op##_overflow_abort(struct ubsan_overflow_desc *desc, uint64_t lhs, uint64_t rhs) { \ struct ubsan_violation v = { UBSAN_OVERFLOW_##op, lhs, rhs, .overflow = desc, &desc->loc }; \ - ubsan_handle(&v, true); \ + ubsan_handle(&v, Fatal); \ } DEFINE_OVERFLOW(add) @@ -257,58 +261,71 @@ void __ubsan_handle_type_mismatch_v1(struct ubsan_align_desc *desc, uint64_t val) { struct ubsan_violation v = { UBSAN_TYPE_MISMATCH, val, 0, .align = desc, &desc->loc }; - ubsan_handle(&v, false); + ubsan_handle(&v, FleshWound); } void __ubsan_handle_type_mismatch_v1_abort(struct ubsan_align_desc *desc, uint64_t val) { struct ubsan_violation v = { UBSAN_TYPE_MISMATCH, val, 0, .align = desc, &desc->loc }; - ubsan_handle(&v, true); + ubsan_handle(&v, Fatal); } void __ubsan_handle_pointer_overflow(struct ubsan_ptroverflow_desc *desc, uint64_t before, uint64_t after) { struct ubsan_violation v = { UBSAN_POINTER_OVERFLOW, before, after, .ptroverflow = desc, &desc->loc }; - ubsan_handle(&v, false); + ubsan_handle(&v, FleshWound); } void __ubsan_handle_pointer_overflow_abort(struct ubsan_ptroverflow_desc *desc, uint64_t before, uint64_t after) { struct ubsan_violation v = { UBSAN_POINTER_OVERFLOW, before, after, .ptroverflow = desc, &desc->loc }; - ubsan_handle(&v, true); + ubsan_handle(&v, Fatal); } void __ubsan_handle_out_of_bounds(struct ubsan_oob_desc *desc, uint64_t idx) { struct ubsan_violation v = { UBSAN_OOB, idx, 0, .oob = desc, &desc->loc }; - ubsan_handle(&v, false); + ubsan_handle(&v, FleshWound); } void __ubsan_handle_out_of_bounds_abort(struct ubsan_oob_desc *desc, uint64_t idx) { struct ubsan_violation v = { UBSAN_OOB, idx, 0, .oob = desc, &desc->loc }; - ubsan_handle(&v, true); + ubsan_handle(&v, Fatal); +} + +void +__ubsan_handle_load_invalid_value(struct ubsan_load_invalid_desc *desc, uint64_t invalid_value) +{ + struct ubsan_violation v = { UBSAN_LOAD_INVALID_VALUE, invalid_value, 0, .invalid = desc, &desc->loc }; + ubsan_handle(&v, Fatal); +} + +void +__ubsan_handle_load_invalid_value_abort(struct ubsan_load_invalid_desc *desc, uint64_t invalid_value) +{ + struct ubsan_violation v = { UBSAN_LOAD_INVALID_VALUE, invalid_value, 0, .invalid = desc, &desc->loc }; + ubsan_handle(&v, Fatal); } #define DEFINE_GENERIC(check) \ void __ubsan_handle_##check (struct san_src_loc* loc) \ { \ struct ubsan_violation v = { UBSAN_GENERIC, 0, 0, .func = __func__, loc }; \ - ubsan_handle(&v, false); \ + ubsan_handle(&v, FleshWound); \ } \ void __ubsan_handle_##check##_abort(struct san_src_loc* loc) \ { \ struct ubsan_violation v = { UBSAN_GENERIC, 0, 0, .func = __func__, loc }; \ - ubsan_handle(&v, true); \ + ubsan_handle(&v, Fatal); \ } DEFINE_GENERIC(invalid_builtin) -DEFINE_GENERIC(load_invalid_value) DEFINE_GENERIC(nonnull_arg) DEFINE_GENERIC(vla_bound_not_positive) DEFINE_GENERIC(float_cast_overflow) diff --git a/san/ubsan.h b/san/ubsan.h index 9dff870a2..e24045ab2 100644 --- a/san/ubsan.h +++ b/san/ubsan.h @@ -84,6 +84,11 @@ struct ubsan_oob_desc { struct san_type_desc *index_ty; }; +struct ubsan_load_invalid_desc { + struct san_src_loc loc; + struct san_type_desc *type; +}; + enum { UBSAN_OVERFLOW_add = 1, UBSAN_OVERFLOW_sub, @@ -97,6 +102,7 @@ enum { UBSAN_OOB, UBSAN_GENERIC, UBSAN_TYPE_MISMATCH, + UBSAN_LOAD_INVALID_VALUE, UBSAN_VIOLATION_MAX, }; @@ -111,6 +117,7 @@ struct ubsan_violation { struct ubsan_align_desc *align; struct ubsan_ptroverflow_desc *ptroverflow; struct ubsan_oob_desc *oob; + struct ubsan_load_invalid_desc *invalid; const char *func; }; struct san_src_loc *loc; @@ -142,6 +149,8 @@ void __ubsan_handle_sub_overflow(struct ubsan_overflow_desc *, uint64_t lhs, uin void __ubsan_handle_sub_overflow_abort(struct ubsan_overflow_desc *, uint64_t lhs, uint64_t rhs); void __ubsan_handle_type_mismatch_v1(struct ubsan_align_desc *, uint64_t val); void __ubsan_handle_type_mismatch_v1_abort(struct ubsan_align_desc *, uint64_t val); +void __ubsan_handle_load_invalid_value(struct ubsan_load_invalid_desc *, uint64_t); +void __ubsan_handle_load_invalid_value_abort(struct ubsan_load_invalid_desc *, uint64_t); /* currently unimplemented */ void __ubsan_handle_float_cast_overflow(struct san_src_loc *); @@ -152,8 +161,6 @@ void __ubsan_handle_implicit_conversion(struct san_src_loc *); void __ubsan_handle_implicit_conversion_abort(struct san_src_loc *); void __ubsan_handle_invalid_builtin(struct san_src_loc *); void __ubsan_handle_invalid_builtin_abort(struct san_src_loc *); -void __ubsan_handle_load_invalid_value(struct san_src_loc *); -void __ubsan_handle_load_invalid_value_abort(struct san_src_loc *); void __ubsan_handle_missing_return(struct san_src_loc *); void __ubsan_handle_missing_return_abort(struct san_src_loc *); void __ubsan_handle_nonnull_arg(struct san_src_loc *); diff --git a/san/ubsan_log.c b/san/ubsan_log.c index d0a3fcc69..0c77d4ce9 100644 --- a/san/ubsan_log.c +++ b/san/ubsan_log.c @@ -26,8 +26,10 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#include +#include +#include #include +#include // hw_wait_while_equals #include #include #include "ubsan.h" @@ -43,6 +45,11 @@ static const size_t ubsan_log_size = 2048; struct ubsan_violation ubsan_log[ubsan_log_size]; +/* + * Implement a fixed-size buffer FIFO, similar to the Chase-Lev DeQueue. + * + * See https://fzn.fr/readings/ppopp13.pdf for explanations on barriers. + */ _Atomic size_t ubsan_log_head = 0; /* first valid entry */ _Atomic size_t ubsan_log_tail = 0; /* next free slot (reader) */ _Atomic size_t ubsan_log_next = 0; /* next free slot (writer) */ @@ -56,29 +63,41 @@ next_entry(size_t x) } void -ubsan_log_append(struct ubsan_violation *e) +ubsan_log_append(struct ubsan_violation *violation) { if (!ubsan_logging) { return; } /* reserve a slot */ - size_t i = atomic_load(&ubsan_log_next); - size_t n; - do { + size_t i, e, n; + + disable_preemption(); + + os_atomic_rmw_loop(&ubsan_log_next, i, n, relaxed, { n = next_entry(i); - if (n == ubsan_log_tail) { - return; /* full */ + if (n == os_atomic_load(&ubsan_log_tail, acquire)) { + enable_preemption(); + return; /* full */ } - } while (!atomic_compare_exchange_weak(&ubsan_log_next, &i, n)); + }); - ubsan_log[i] = *e; + ubsan_log[i] = *violation; + os_atomic_thread_fence(release); /* make the entry available */ - size_t prev; - do { - prev = i; - } while (!atomic_compare_exchange_weak(&ubsan_log_head, &prev, n)); +again: + os_atomic_rmw_loop(&ubsan_log_head, e, n, relaxed, { + if (e != i) { + // we need to wait for another enqueuer + os_atomic_rmw_loop_give_up({ + hw_wait_while_equals((void **)&ubsan_log_head, (void *)e); + goto again; + }); + } + }); + + enable_preemption(); } static int @@ -86,30 +105,32 @@ sysctl_ubsan_log_dump SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) const size_t sz = ubsan_log_size * 256; - size_t start = atomic_load(&ubsan_log_tail); - size_t end = atomic_load(&ubsan_log_head); + size_t head, tail; + + head = os_atomic_load(&ubsan_log_head, relaxed); + os_atomic_thread_fence(seq_cst); + tail = os_atomic_load(&ubsan_log_tail, relaxed); char *buf; size_t n = 0; int err; - if (start == end) { + if (tail == head) { return 0; /* log is empty */ } - buf = kalloc(sz); + buf = kheap_alloc(KHEAP_TEMP, sz, Z_WAITOK | Z_ZERO); if (!buf) { return 0; } - bzero(buf, sz); - for (size_t i = start; i != end; i = next_entry(i)) { + for (size_t i = tail; i != head; i = next_entry(i)) { n += ubsan_format(&ubsan_log[i], buf + n, sz - n); } err = SYSCTL_OUT(req, buf, n); - kfree(buf, sz); + kheap_free(KHEAP_TEMP, buf, sz); return err; } @@ -118,12 +139,18 @@ sysctl_ubsan_log_entries SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) int ch, err, val; + size_t head, tail; + + size_t nentries; + + head = os_atomic_load(&ubsan_log_head, relaxed); + os_atomic_thread_fence(seq_cst); + tail = os_atomic_load(&ubsan_log_tail, relaxed); - int nentries; - if (ubsan_log_head >= ubsan_log_tail) { - nentries = ubsan_log_head - ubsan_log_tail; + if (head >= tail) { + nentries = head - tail; } else { - nentries = ubsan_log_size - (ubsan_log_tail - ubsan_log_head + 1); + nentries = ubsan_log_size - (tail - head + 1); } err = sysctl_io_number(req, nentries, sizeof(nentries), &val, &ch); @@ -131,7 +158,7 @@ sysctl_ubsan_log_entries SYSCTL_HANDLER_ARGS if (val != 0) { err = EINVAL; } else { - ubsan_log_tail = ubsan_log_head; + os_atomic_store(&ubsan_log_tail, head, relaxed); } } diff --git a/security/Makefile b/security/Makefile index 77eb0bf7c..584876f07 100644 --- a/security/Makefile +++ b/security/Makefile @@ -28,7 +28,6 @@ PRIVATE_DATAFILES = ${DATAFILES} PRIVATE_KERNELFILES = \ _label.h \ mac.h \ - mac_alloc.h \ mac_data.h \ mac_framework.h \ mac_internal.h \ diff --git a/security/_label.h b/security/_label.h index 2070bf3bd..b18187229 100644 --- a/security/_label.h +++ b/security/_label.h @@ -68,14 +68,14 @@ * XXXMAC: This shouldn't be exported to userland, but is because of ucred.h * and various other messes. */ -#if CONFIG_EMBEDDED +#if defined(XNU_TARGET_OS_OSX) +#define MAC_MAX_SLOTS 7 +#else #if CONFIG_VNGUARD #define MAC_MAX_SLOTS 4 #else #define MAC_MAX_SLOTS 3 #endif -#else -#define MAC_MAX_SLOTS 7 #endif #define MAC_FLAG_INITIALIZED 0x0000001 /* Is initialized for use. */ @@ -83,7 +83,7 @@ struct label { int l_flags; union { - void *l_ptr; + void * XNU_PTRAUTH_SIGNED_PTR("label.l_ptr") l_ptr; long l_long; } l_perpolicy[MAC_MAX_SLOTS]; }; diff --git a/security/conf/Makefile b/security/conf/Makefile index 05c4b79cf..51eddb889 100644 --- a/security/conf/Makefile +++ b/security/conf/Makefile @@ -23,7 +23,7 @@ endif $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile: $(SRCROOT)/SETUP/config/doconf $(OBJROOT)/SETUP/config $(DOCONFDEPS) $(_v)$(MKDIR) $(TARGET)/$(CURRENT_KERNEL_CONFIG) - $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) + $(_v)$(SRCROOT)/SETUP/config/doconf -c -cpu $(DOCONF_ARCH_CONFIG_LC) -soc $(CURRENT_MACHINE_CONFIG_LC) -platform $(PLATFORM) -d $(TARGET)/$(CURRENT_KERNEL_CONFIG) -s $(SOURCE) -m $(MASTERCONFDIR) $(CURRENT_KERNEL_CONFIG) do_all: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/Makefile $(_v)${MAKE} \ diff --git a/security/conf/Makefile.template b/security/conf/Makefile.template index 2d75b556c..e75fe162e 100644 --- a/security/conf/Makefile.template +++ b/security/conf/Makefile.template @@ -19,13 +19,6 @@ include $(MakeInc_def) CFLAGS+= -include meta_features.h -DBSD_KERNEL_PRIVATE SFLAGS+= -include meta_features.h -# Objects that don't want -Wcast-align warning (8474835) -OBJS_NO_CAST_ALIGN = \ - mac_alloc.o \ - mac_base.o - -$(foreach file,$(OBJS_NO_CAST_ALIGN),$(eval $(call add_perfile_cflags,$(file),-Wno-cast-align))) - # # INCFLAGS to include security prototypes # @@ -60,6 +53,23 @@ COMP_SUBDIRS = %MACHDEP +# +# Diagnostic opt-outs. We need to make this list empty. +# +# DO NOT ADD MORE HERE. +# +# -Wno-cast-align +mac_base.o_CFLAGS_ADD += -Wno-cast-align +# -Wno-shorten-64-to-32 +mac_audit.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +mac_base.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +mac_process.o_CFLAGS_ADD += -Wno-shorten-64-to-32 +# -Wno-sign-conversion +mac_base.o_CFLAGS_ADD += -Wno-sign-conversion +mac_mach.o_CFLAGS_ADD += -Wno-sign-conversion +mac_vfs.o_CFLAGS_ADD += -Wno-sign-conversion +mac_vfs_subr.o_CFLAGS_ADD += -Wno-sign-conversion + # Rebuild if per-file overrides change ${OBJS}: $(firstword $(MAKEFILE_LIST)) @@ -75,7 +85,7 @@ $(SOBJS): .SFLAGS $(_v)$(REPLACECONTENTS) $@ $(S_KCC) $(SFLAGS) $(INCFLAGS) $(COMPONENT).filelist: $(OBJS) - $(call makelog,$(ColorL)LDFILELIST$(Color0) $(ColorLF)$(COMPONENT)$(Color0)) + @$(LOG_LDFILELIST) "$(COMPONENT)" $(_v)for obj in ${OBJS}; do \ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \ done > $(COMPONENT).filelist diff --git a/security/conf/files b/security/conf/files index bcd319fde..2ea2051dc 100644 --- a/security/conf/files +++ b/security/conf/files @@ -4,13 +4,10 @@ OPTIONS/audit optional audit OPTIONS/config_macf optional config_macf OPTIONS/config_macf_socket_subset optional config_macf_socket_subset -OPTIONS/config_macf_socket optional config_macf_socket -OPTIONS/config_macf_net optional config_macf_net OPTIONS/config_fse optional config_fse # security -security/mac_alloc.c optional config_macf security/mac_audit.c optional config_macf security/mac_base.c standard security/mac_data.c optional config_macf @@ -26,11 +23,9 @@ security/mac_sysv_msg.c optional config_macf security/mac_posix_sem.c optional config_macf security/mac_posix_shm.c optional config_macf security/mac_socket.c optional config_macf -security/mac_net.c optional config_macf_net security/mac_pipe.c optional config_macf security/mac_iokit.c optional config_macf security/mac_file.c optional config_macf -security/mac_inet.c optional config_macf_net security/mac_priv.c optional config_macf security/mac_pty.c optional config_macf security/mac_kext.c optional config_macf diff --git a/security/mac.h b/security/mac.h index c76796aa7..9043a2bc5 100644 --- a/security/mac.h +++ b/security/mac.h @@ -126,13 +126,6 @@ struct user64_mac { }; #endif /* KERNEL */ -/* - * Device types for mac_iokit_check_device() - */ -#define MAC_DEVICE_USB "USB" -#define MAC_DEVICE_FIREWIRE "FireWire" -#define MAC_DEVICE_TYPE_KEY "DeviceType" - /* * Flags for mac_proc_check_suspend_resume() */ diff --git a/security/mac_alloc.c b/security/mac_alloc.c deleted file mode 100644 index 2a113b10c..000000000 --- a/security/mac_alloc.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright (c) 2007 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/* - * - */ - -#include -#include - -#include -#include -#include -#include - -#include - -#include "mac_alloc.h" - -/* - * XXXMAC: We should probably make sure only registered policies can - * call these, otherwise we're effectively changing Apple's plan wrt - * exported allocators. - */ - -/* - * Kernel allocator - */ -void * -mac_kalloc(vm_size_t size, int how) -{ - if (how == M_WAITOK) { - return kalloc(size); - } else { - return kalloc_noblock(size); - } -} - -/* - * for temporary binary compatibility - */ -void * mac_kalloc_noblock(vm_size_t size); -void * -mac_kalloc_noblock(vm_size_t size) -{ - return kalloc_noblock(size); -} - -void -mac_kfree(void * data, vm_size_t size) -{ - kfree(data, size); -} - -/* - * MBuf tag allocator. - */ - -void * -mac_mbuf_alloc(int len, int wait) -{ -#if CONFIG_MACF_SOCKET_SUBSET - struct m_tag *t; - - t = m_tag_alloc(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_MAC_POLICY_LABEL, - len, wait); - if (t == NULL) { - return NULL; - } - - return (void *)(t + 1); -#else -#pragma unused(len, wait) - return NULL; -#endif -} - -void -mac_mbuf_free(void *data) -{ -#if CONFIG_MACF_SOCKET_SUBSET - struct m_tag *t; - - t = (struct m_tag *)((char *)data - sizeof(struct m_tag)); - m_tag_free(t); -#else -#pragma unused(data) -#endif -} - -/* - * VM functions - */ - -extern vm_map_t kalloc_map; - -int -mac_wire(void *start, void *end) -{ - return vm_map_wire_kernel(kalloc_map, CAST_USER_ADDR_T(start), - CAST_USER_ADDR_T(end), VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_SECURITY, FALSE); -} - -int -mac_unwire(void *start, void *end) -{ - return vm_map_unwire(kalloc_map, CAST_USER_ADDR_T(start), - CAST_USER_ADDR_T(end), FALSE); -} - -/* - * Zone allocator - */ -zone_t -mac_zinit(vm_size_t size, vm_size_t maxmem, vm_size_t alloc, const char *name) -{ - return zinit(size, maxmem, alloc, name); -} - -void -mac_zone_change(zone_t zone, unsigned int item, boolean_t value) -{ - zone_change(zone, item, value); -} - -void * -mac_zalloc(zone_t zone, int how) -{ - if (how == M_WAITOK) { - return zalloc(zone); - } else { - return zalloc_noblock(zone); - } -} - -void -mac_zfree(zone_t zone, void *elem) -{ - zfree(zone, elem); -} diff --git a/security/mac_alloc.h b/security/mac_alloc.h deleted file mode 100644 index 63da2e699..000000000 --- a/security/mac_alloc.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2007 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/* - * Memory allocation wrappers. - */ - -#ifndef _SECURITY_MAC_ALLOC_H_ -#define _SECURITY_MAC_ALLOC_H_ - -#include -#include -#include - -/* JMM - should use OSMlloc.h interfaces */ - -#ifdef __APPLE_API_EVOLVING -/* - * Kernel Memory allocator - */ -void * mac_kalloc(vm_size_t size, int how); -void mac_kfree(void *data, vm_size_t size); - -/* - * Mbuf allocator for mbuf labels. - */ -void * mac_mbuf_alloc(int len, int wait); -void mac_mbuf_free(void *data); - -/* - * - */ -int mac_wire(void *start, void *end); -int mac_unwire(void *start, void *end); - -/* - * Zone allocator - */ -zone_t mac_zinit(vm_size_t size, vm_size_t maxmem, - vm_size_t alloc, const char *name); -void mac_zone_change(zone_t zone, unsigned int item, boolean_t value); -void * mac_zalloc(zone_t zone, int how); -void mac_zfree(zone_t zone, void *elem); - -/* Item definitions */ -#define Z_EXHAUST 1 /* Make zone exhaustible */ -#define Z_COLLECT 2 /* Make zone collectable */ -#define Z_EXPAND 3 /* Make zone expandable */ -#define Z_FOREIGN 4 /* Allow collectable zone to contain foreign elements */ -#define Z_CALLERACCT 5 /* Account alloc/free against the caller */ - -#endif /* __APPLE_API_EVOLVING */ -#endif /* _SECURITY_MAC_ALLOC_H_ */ diff --git a/security/mac_audit.c b/security/mac_audit.c index 44b591fd4..12a009461 100644 --- a/security/mac_audit.c +++ b/security/mac_audit.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006-2007 Apple Inc. All rights reserved. + * Copyright (c) 2006-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -71,13 +71,12 @@ #include #include #include -#include #include #if CONFIG_AUDIT -/* The zone allocator is initialized in mac_base.c. */ -zone_t mac_audit_data_zone; +ZONE_DECLARE(mac_audit_data_zone, "mac_audit_data_zone", + MAC_AUDIT_DATA_LIMIT, ZC_NONE); int mac_system_check_audit(struct ucred *cred, void *record, int length) diff --git a/security/mac_base.c b/security/mac_base.c index b4dad979c..dd3da27a1 100644 --- a/security/mac_base.c +++ b/security/mac_base.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2016 Apple Inc. All rights reserved. + * Copyright (c) 2007-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -80,6 +80,7 @@ #include #include #include +#include #include #include #include @@ -92,10 +93,9 @@ #include #include -#include +#include #include -#include -#include +#include #if CONFIG_MACF #include @@ -105,9 +105,7 @@ #include #endif -#if CONFIG_EMBEDDED #include -#endif /* * define MB_DEBUG to display run-time debugging information @@ -152,24 +150,6 @@ SYSCTL_UINT(_security_mac, OID_AUTO, max_slots, CTLFLAG_RD | CTLFLAG_LOCKED, */ int mac_late = 0; -/* - * Flag to indicate whether or not we should allocate label storage for - * new mbufs. Since most dynamic policies we currently work with don't - * rely on mbuf labeling, try to avoid paying the cost of mtag allocation - * unless specifically notified of interest. One result of this is - * that if a dynamically loaded policy requests mbuf labels, it must - * be able to deal with a NULL label being returned on any mbufs that - * were already in flight when the policy was loaded. Since the policy - * already has to deal with uninitialized labels, this probably won't - * be a problem. Note: currently no locking. Will this be a problem? - */ -#if CONFIG_MACF_NET -unsigned int mac_label_mbufs = 1; -SYSCTL_UINT(_security_mac, OID_AUTO, label_mbufs, SECURITY_MAC_CTLFLAGS, - &mac_label_mbufs, 0, "Label all MBUFs"); -#endif - - /* * Flag to indicate whether or not we should allocate label storage for * new vnodes. Since most dynamic policies we currently work with don't @@ -244,15 +224,6 @@ unsigned int mac_vnode_enforce = 1; SYSCTL_UINT(_security_mac, OID_AUTO, vnode_enforce, SECURITY_MAC_CTLFLAGS, &mac_vnode_enforce, 0, "Enforce MAC policy on vnode operations"); -#if CONFIG_AUDIT -/* - * mac_audit_data_zone is the zone used for data pushed into the audit - * record by policies. Using a zone simplifies memory management of this - * data, and allows tracking of the amount of data in flight. - */ -extern zone_t mac_audit_data_zone; -#endif - /* * mac_policy_list holds the list of policy modules. Modules with a * handle lower than staticmax are considered "static" and cannot be @@ -272,14 +243,13 @@ extern zone_t mac_audit_data_zone; static lck_mtx_t *mac_policy_mtx; /* - * Policy list array allocation chunk size. Trying to set this so that we - * allocate a page at a time. + * Policy list array allocation chunk size. Each entry holds a pointer. */ -#define MAC_POLICY_LIST_CHUNKSIZE 512 +#define MAC_POLICY_LIST_CHUNKSIZE 8 static int mac_policy_busy; -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX SECURITY_READ_ONLY_LATE(mac_policy_list_t) mac_policy_list; SECURITY_READ_ONLY_LATE(static struct mac_policy_list_element) mac_policy_static_entries[MAC_POLICY_LIST_CHUNKSIZE]; #else @@ -373,14 +343,14 @@ mac_policy_init(void) mac_policy_list.freehint = 0; mac_policy_list.chunks = 1; -#if CONFIG_EMBEDDED +#if !XNU_TARGET_OS_OSX mac_policy_list.entries = mac_policy_static_entries; #else - mac_policy_list.entries = kalloc(sizeof(struct mac_policy_list_element) * MAC_POLICY_LIST_CHUNKSIZE); + mac_policy_list.entries = kalloc_flags( + sizeof(struct mac_policy_list_element) * MAC_POLICY_LIST_CHUNKSIZE, + Z_WAITOK | Z_ZERO); #endif - bzero(mac_policy_list.entries, sizeof(struct mac_policy_list_element) * MAC_POLICY_LIST_CHUNKSIZE); - LIST_INIT(&mac_label_element_list); LIST_INIT(&mac_static_label_element_list); @@ -392,8 +362,6 @@ mac_policy_init(void) lck_attr_free(mac_lck_attr); lck_grp_attr_free(mac_lck_grp_attr); lck_grp_free(mac_lck_grp); - - mac_labelzone_init(); } /* Function pointer set up for loading security extensions. @@ -431,12 +399,6 @@ mac_policy_initbsd(void) struct mac_policy_conf *mpc; u_int i; -#if CONFIG_AUDIT - mac_audit_data_zone = zinit(MAC_AUDIT_DATA_LIMIT, - AQ_HIWATER * MAC_AUDIT_DATA_LIMIT, - 8192, "mac_audit_data_zone"); -#endif - printf("MAC Framework successfully initialized\n"); /* Call bsd init functions of already loaded policies */ @@ -669,7 +631,7 @@ int mac_policy_register(struct mac_policy_conf *mpc, mac_policy_handle_t *handlep, void *xd) { -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX struct mac_policy_list_element *tmac_policy_list_element; #endif int error, slot, static_entry = 0; @@ -707,7 +669,7 @@ mac_policy_register(struct mac_policy_conf *mpc, mac_policy_handle_t *handlep, } if (mac_policy_list.numloaded >= mac_policy_list.max) { -#if !CONFIG_EMBEDDED +#if XNU_TARGET_OS_OSX /* allocate new policy list array, zero new chunk */ tmac_policy_list_element = kalloc((sizeof(struct mac_policy_list_element) * @@ -734,7 +696,7 @@ mac_policy_register(struct mac_policy_conf *mpc, mac_policy_handle_t *handlep, #else printf("out of space in mac_policy_list.\n"); return ENOMEM; -#endif /* CONFIG_EMBEDDED */ +#endif /* XNU_TARGET_OS_OSX */ } /* Check for policy with same name already loaded */ @@ -997,6 +959,7 @@ mac_label_externalize(size_t mpo_externalize_off, struct label *label, const char *name; int (*mpo_externalize)(struct label *, char *, struct sbuf *); int all_labels = 0, ignorenotfound = 0, error = 0, busy = FALSE; + int sb_pos; unsigned int count = 0; if (element[0] == '?') { @@ -1032,6 +995,7 @@ element_loop: if (mpo_externalize == NULL) { continue; } + sb_pos = sbuf_len(sb); error = sbuf_printf(sb, "%s/", name); if (error) { goto done; @@ -1048,8 +1012,7 @@ element_loop: * multiple label elements for some * (but not all) object types. */ - sbuf_setpos(sb, sbuf_len(sb) - - (strlen(name) + 1)); + sbuf_setpos(sb, sb_pos); error = 0; continue; } @@ -1110,8 +1073,14 @@ mac_externalize(size_t mpo_externalize_off, struct label *label, */ scratch = scratch_base; - /* get an sbuf */ - if (sbuf_new(&sb, outbuf, outbuflen, SBUF_FIXEDLEN) == NULL) { + /* + * initialize an sbuf mapping over the output buffer (or newly-allocated internal buffer, if + * outbuf is NULL), up to sbuf's limit of INT_MAX. + */ + if (outbuflen > INT_MAX) { + outbuflen = INT_MAX; + } + if (sbuf_new(&sb, outbuf, (int)outbuflen, SBUF_FIXEDLEN) == NULL) { /* could not allocate interior buffer */ error = ENOMEM; goto out; @@ -1400,9 +1369,6 @@ __mac_get_fd(proc_t p, struct __mac_get_fd_args *uap, int *ret __unused) int error; size_t ulen; kauth_cred_t my_cred; -#if CONFIG_MACF_SOCKET - struct socket *so; -#endif /* MAC_SOCKET */ struct label *intlabel; AUDIT_ARG(fd, uap->fd); @@ -1445,7 +1411,7 @@ __mac_get_fd(proc_t p, struct __mac_get_fd_args *uap, int *ret __unused) } my_cred = kauth_cred_proc_ref(p); - error = mac_file_check_get(my_cred, fp->f_fglob, elements, mac.m_buflen); + error = mac_file_check_get(my_cred, fp->fp_glob, elements, mac.m_buflen); kauth_cred_unref(&my_cred); if (error) { fp_drop(p, uap->fd, fp, 0); @@ -1454,14 +1420,14 @@ __mac_get_fd(proc_t p, struct __mac_get_fd_args *uap, int *ret __unused) return error; } - switch (FILEGLOB_DTYPE(fp->f_fglob)) { + switch (FILEGLOB_DTYPE(fp->fp_glob)) { case DTYPE_VNODE: intlabel = mac_vnode_label_alloc(); if (intlabel == NULL) { error = ENOMEM; break; } - vp = (struct vnode *)fp->f_fglob->fg_data; + vp = (struct vnode *)fp->fp_glob->fg_data; error = vnode_getwithref(vp); if (error == 0) { mac_vnode_label_copy(vp->v_label, intlabel); @@ -1473,16 +1439,6 @@ __mac_get_fd(proc_t p, struct __mac_get_fd_args *uap, int *ret __unused) mac_vnode_label_free(intlabel); break; case DTYPE_SOCKET: -#if CONFIG_MACF_SOCKET - so = (struct socket *) fp->f_fglob->fg_data; - intlabel = mac_socket_label_alloc(MAC_WAITOK); - sock_lock(so, 1); - mac_socket_label_copy(so->so_label, intlabel); - sock_unlock(so, 1); - error = mac_socket_label_externalize(intlabel, elements, buffer, mac.m_buflen); - mac_socket_label_free(intlabel); - break; -#endif case DTYPE_PSXSHM: case DTYPE_PSXSEM: case DTYPE_PIPE: @@ -1605,9 +1561,6 @@ __mac_set_fd(proc_t p, struct __mac_set_fd_args *uap, int *ret __unused) size_t ulen; char *buffer; struct label *intlabel; -#if CONFIG_MACF_SOCKET - struct socket *so; -#endif struct vnode *vp; AUDIT_ARG(fd, uap->fd); @@ -1647,14 +1600,14 @@ __mac_set_fd(proc_t p, struct __mac_set_fd_args *uap, int *ret __unused) } - error = mac_file_check_set(vfs_context_ucred(ctx), fp->f_fglob, buffer, mac.m_buflen); + error = mac_file_check_set(vfs_context_ucred(ctx), fp->fp_glob, buffer, mac.m_buflen); if (error) { fp_drop(p, uap->fd, fp, 0); FREE(buffer, M_MACTEMP); return error; } - switch (FILEGLOB_DTYPE(fp->f_fglob)) { + switch (FILEGLOB_DTYPE(fp->fp_glob)) { case DTYPE_VNODE: if (mac_label_vnodes == 0) { error = ENOSYS; @@ -1670,7 +1623,7 @@ __mac_set_fd(proc_t p, struct __mac_set_fd_args *uap, int *ret __unused) } - vp = (struct vnode *)fp->f_fglob->fg_data; + vp = (struct vnode *)fp->fp_glob->fg_data; error = vnode_getwithref(vp); if (error == 0) { @@ -1681,18 +1634,6 @@ __mac_set_fd(proc_t p, struct __mac_set_fd_args *uap, int *ret __unused) break; case DTYPE_SOCKET: -#if CONFIG_MACF_SOCKET - intlabel = mac_socket_label_alloc(MAC_WAITOK); - error = mac_socket_label_internalize(intlabel, buffer); - if (error == 0) { - so = (struct socket *) fp->f_fglob->fg_data; - SOCK_LOCK(so); - error = mac_socket_label_update(vfs_context_ucred(ctx), so, intlabel); - SOCK_UNLOCK(so); - } - mac_socket_label_free(intlabel); - break; -#endif case DTYPE_PSXSHM: case DTYPE_PSXSEM: case DTYPE_PIPE: @@ -1944,10 +1885,13 @@ __mac_get_mount(proc_t p __unused, struct __mac_get_mount_args *uap, return error; } mp = nd.ni_vp->v_mount; + mount_ref(mp, 0); vnode_put(nd.ni_vp); nameidone(&nd); - return mac_mount_label_get(mp, uap->mac_p); + error = mac_mount_label_get(mp, uap->mac_p); + mount_drop(mp, 0); + return error; } /* diff --git a/security/mac_file.c b/security/mac_file.c index afba6a7b3..960a16d22 100644 --- a/security/mac_file.c +++ b/security/mac_file.c @@ -116,7 +116,7 @@ mac_file_check_fcntl(struct ucred *cred, struct fileglob *fg, int cmd, } int -mac_file_check_ioctl(struct ucred *cred, struct fileglob *fg, u_int cmd) +mac_file_check_ioctl(struct ucred *cred, struct fileglob *fg, u_long cmd) { int error; @@ -162,7 +162,7 @@ mac_file_check_change_offset(struct ucred *cred, struct fileglob *fg) int mac_file_check_get(struct ucred *cred, struct fileglob *fg, char *elements, - int len) + size_t len) { int error; @@ -172,7 +172,7 @@ mac_file_check_get(struct ucred *cred, struct fileglob *fg, char *elements, int mac_file_check_set(struct ucred *cred, struct fileglob *fg, char *buf, - int buflen) + size_t buflen) { int error; @@ -238,6 +238,12 @@ mac_file_check_mmap_downgrade(struct ucred *cred, struct fileglob *fg, *prot = result; } +void +mac_file_notify_close(struct ucred *cred, struct fileglob *fg) +{ + MAC_PERFORM(file_notify_close, cred, fg, fg->fg_label, ((fg->fg_flag & FWASWRITTEN) ? 1 : 0)); +} + /* * fileglob XATTR helpers. diff --git a/security/mac_framework.h b/security/mac_framework.h index 865dfaa7a..5a30437a0 100644 --- a/security/mac_framework.h +++ b/security/mac_framework.h @@ -83,7 +83,6 @@ struct attrlist; struct auditinfo; -struct bpf_d; struct componentname; struct cs_blob; struct devnode; @@ -92,17 +91,11 @@ struct flock; struct fdescnode; struct fileglob; struct fileproc; -struct ifnet; struct ifreq; struct image_params; -struct inpcb; struct ipc_port; -struct ipq; struct knote; -struct m_tag; struct mac; -struct mac_module_data; -struct mbuf; struct msg; struct msqid_kernel; struct mount; @@ -152,10 +145,6 @@ int mac_audit_check_postselect(kauth_cred_t cred, unsigned short syscode, void *args, int error, int retval, int mac_forced); int mac_audit_check_preselect(kauth_cred_t cred, unsigned short syscode, void *args); -int mac_bpfdesc_check_receive(struct bpf_d *bpf_d, struct ifnet *ifp); -void mac_bpfdesc_label_destroy(struct bpf_d *bpf_d); -void mac_bpfdesc_label_init(struct bpf_d *bpf_d); -void mac_bpfdesc_label_associate(kauth_cred_t cred, struct bpf_d *bpf_d); int mac_cred_check_label_update(kauth_cred_t cred, struct label *newlabel); int mac_cred_check_label_update_execve(vfs_context_t ctx, @@ -195,11 +184,11 @@ int mac_file_check_dup(kauth_cred_t cred, struct fileglob *fg, int newfd); int mac_file_check_fcntl(kauth_cred_t cred, struct fileglob *fg, int cmd, user_long_t arg); int mac_file_check_get(kauth_cred_t cred, struct fileglob *fg, - char *elements, int len); + char *elements, size_t len); int mac_file_check_get_offset(kauth_cred_t cred, struct fileglob *fg); int mac_file_check_inherit(kauth_cred_t cred, struct fileglob *fg); int mac_file_check_ioctl(kauth_cred_t cred, struct fileglob *fg, - unsigned int cmd); + unsigned long cmd); int mac_file_check_lock(kauth_cred_t cred, struct fileglob *fg, int op, struct flock *fl); int mac_file_check_library_validation(struct proc *proc, @@ -211,56 +200,20 @@ void mac_file_check_mmap_downgrade(kauth_cred_t cred, struct fileglob *fg, int *prot); int mac_file_check_receive(kauth_cred_t cred, struct fileglob *fg); int mac_file_check_set(kauth_cred_t cred, struct fileglob *fg, - char *bufp, int buflen); + char *bufp, size_t buflen); +void mac_file_notify_close(struct ucred *cred, struct fileglob *fg); void mac_file_label_associate(kauth_cred_t cred, struct fileglob *fg); void mac_file_label_destroy(struct fileglob *fg); void mac_file_label_init(struct fileglob *fg); -int mac_ifnet_check_transmit(struct ifnet *ifp, struct mbuf *mbuf, - int family, int type); -void mac_ifnet_label_associate(struct ifnet *ifp); -void mac_ifnet_label_destroy(struct ifnet *ifp); -int mac_ifnet_label_get(kauth_cred_t cred, struct ifreq *ifr, - struct ifnet *ifp); -void mac_ifnet_label_init(struct ifnet *ifp); -void mac_ifnet_label_recycle(struct ifnet *ifp); -int mac_ifnet_label_set(kauth_cred_t cred, struct ifreq *ifr, - struct ifnet *ifp); -int mac_inpcb_check_deliver(struct inpcb *inp, struct mbuf *mbuf, - int family, int type); -void mac_inpcb_label_associate(struct socket *so, struct inpcb *inp); -void mac_inpcb_label_destroy(struct inpcb *inp); -int mac_inpcb_label_init(struct inpcb *inp, int flag); -void mac_inpcb_label_recycle(struct inpcb *inp); -void mac_inpcb_label_update(struct socket *so); -int mac_iokit_check_device(char *devtype, struct mac_module_data *mdata); int mac_iokit_check_open(kauth_cred_t cred, io_object_t user_client, unsigned int user_client_type); int mac_iokit_check_set_properties(kauth_cred_t cred, io_object_t registry_entry, io_object_t properties); int mac_iokit_check_filter_properties(kauth_cred_t cred, io_object_t registry_entry); int mac_iokit_check_get_property(kauth_cred_t cred, io_object_t registry_entry, const char *name); +#ifdef KERNEL_PRIVATE int mac_iokit_check_hid_control(kauth_cred_t cred); -void mac_ipq_label_associate(struct mbuf *fragment, struct ipq *ipq); -int mac_ipq_label_compare(struct mbuf *fragment, struct ipq *ipq); -void mac_ipq_label_destroy(struct ipq *ipq); -int mac_ipq_label_init(struct ipq *ipq, int flag); -void mac_ipq_label_update(struct mbuf *fragment, struct ipq *ipq); -void mac_mbuf_label_associate_bpfdesc(struct bpf_d *bpf_d, struct mbuf *m); -void mac_mbuf_label_associate_ifnet(struct ifnet *ifp, struct mbuf *m); -void mac_mbuf_label_associate_inpcb(struct inpcb *inp, struct mbuf *m); -void mac_mbuf_label_associate_ipq(struct ipq *ipq, struct mbuf *mbuf); -void mac_mbuf_label_associate_linklayer(struct ifnet *ifp, struct mbuf *m); -void mac_mbuf_label_associate_multicast_encap(struct mbuf *oldmbuf, - struct ifnet *ifp, struct mbuf *newmbuf); -void mac_mbuf_label_associate_netlayer(struct mbuf *oldmbuf, - struct mbuf *newmbuf); -void mac_mbuf_label_associate_socket(struct socket *so, struct mbuf *m); -void mac_mbuf_label_copy(struct mbuf *m_from, struct mbuf *m_to); -void mac_mbuf_label_destroy(struct mbuf *m); -int mac_mbuf_label_init(struct mbuf *m, int flag); -void mac_mbuf_tag_copy(struct m_tag *m, struct m_tag *mtag); -void mac_mbuf_tag_destroy(struct m_tag *mtag); -int mac_mbuf_tag_init(struct m_tag *, int how); +#endif int mac_mount_check_fsctl(vfs_context_t ctx, struct mount *mp, - unsigned int cmd); + unsigned long cmd); int mac_mount_check_getattr(vfs_context_t ctx, struct mount *mp, struct vfs_attr *vfa); int mac_mount_check_label_update(vfs_context_t ctx, struct mount *mp); @@ -271,9 +224,11 @@ int mac_mount_check_snapshot_create(vfs_context_t ctx, struct mount *mp, const char *name); int mac_mount_check_snapshot_delete(vfs_context_t ctx, struct mount *mp, const char *name); +#ifdef KERNEL_PRIVATE int mac_mount_check_snapshot_mount(vfs_context_t ctx, struct vnode *rvp, struct vnode *vp, struct componentname *cnp, const char *name, const char *vfc_name); +#endif int mac_mount_check_snapshot_revert(vfs_context_t ctx, struct mount *mp, const char *name); int mac_mount_check_remount(vfs_context_t ctx, struct mount *mp); @@ -288,11 +243,8 @@ int mac_mount_label_externalize(struct label *label, char *elements, int mac_mount_label_get(struct mount *mp, user_addr_t mac_p); void mac_mount_label_init(struct mount *); int mac_mount_label_internalize(struct label *, char *string); -void mac_netinet_fragment(struct mbuf *datagram, struct mbuf *fragment); -void mac_netinet_icmp_reply(struct mbuf *m); -void mac_netinet_tcp_reply(struct mbuf *m); int mac_pipe_check_ioctl(kauth_cred_t cred, struct pipe *cpipe, - unsigned int cmd); + unsigned long cmd); int mac_pipe_check_kqfilter(kauth_cred_t cred, struct knote *kn, struct pipe *cpipe); int mac_pipe_check_read(kauth_cred_t cred, struct pipe *cpipe); @@ -302,12 +254,9 @@ int mac_pipe_check_stat(kauth_cred_t cred, struct pipe *cpipe); int mac_pipe_check_write(kauth_cred_t cred, struct pipe *cpipe); struct label *mac_pipe_label_alloc(void); void mac_pipe_label_associate(kauth_cred_t cred, struct pipe *cpipe); -void mac_pipe_label_copy(struct label *src, struct label *dest); void mac_pipe_label_destroy(struct pipe *cpipe); void mac_pipe_label_free(struct label *label); void mac_pipe_label_init(struct pipe *cpipe); -int mac_pipe_label_update(kauth_cred_t cred, struct pipe *cpipe, - struct label *label); void mac_policy_initbsd(void); int mac_posixsem_check_create(kauth_cred_t cred, const char *name); int mac_posixsem_check_open(kauth_cred_t cred, struct pseminfo *psem); @@ -341,16 +290,16 @@ void mac_posixshm_label_destroy(struct pshminfo *pshm); void mac_posixshm_label_init(struct pshminfo *pshm); int mac_priv_check(kauth_cred_t cred, int priv); int mac_priv_grant(kauth_cred_t cred, int priv); -int mac_proc_check_debug(proc_t proc1, proc_t proc2); +int mac_proc_check_debug(proc_ident_t tracing_ident, kauth_cred_t tracing_cred, proc_ident_t traced_ident); int mac_proc_check_dump_core(proc_t proc); int mac_proc_check_proc_info(proc_t curp, proc_t target, int callnum, int flavor); int mac_proc_check_get_cs_info(proc_t curp, proc_t target, unsigned int op); int mac_proc_check_set_cs_info(proc_t curp, proc_t target, unsigned int op); int mac_proc_check_fork(proc_t proc); int mac_proc_check_suspend_resume(proc_t proc, int sr); -int mac_proc_check_get_task_name(kauth_cred_t cred, struct proc *p); -int mac_proc_check_get_task(kauth_cred_t cred, struct proc *p); -int mac_proc_check_expose_task(kauth_cred_t cred, struct proc *p); +int mac_proc_check_get_task_name(kauth_cred_t cred, proc_ident_t pident); +int mac_proc_check_get_task(kauth_cred_t cred, proc_ident_t pident); +int mac_proc_check_expose_task(kauth_cred_t cred, proc_ident_t pident); int mac_proc_check_inherit_ipc_ports(struct proc *p, struct vnode *cur_vp, off_t cur_offset, struct vnode *img_vp, off_t img_offset, struct vnode *scriptvp); int mac_proc_check_getaudit(proc_t proc); int mac_proc_check_getauid(proc_t proc); @@ -362,6 +311,7 @@ int mac_proc_check_map_anon(proc_t proc, user_addr_t u_addr, int mac_proc_check_mprotect(proc_t proc, user_addr_t addr, user_size_t size, int prot); int mac_proc_check_run_cs_invalid(proc_t proc); +void mac_proc_notify_cs_invalidated(proc_t proc); int mac_proc_check_sched(proc_t proc, proc_t proc2); int mac_proc_check_setaudit(proc_t proc, struct auditinfo_addr *ai); int mac_proc_check_setauid(proc_t proc, uid_t auid); @@ -372,8 +322,6 @@ int mac_proc_check_signal(proc_t proc1, proc_t proc2, int mac_proc_check_syscall_unix(proc_t proc, int scnum); int mac_proc_check_wait(proc_t proc1, proc_t proc2); void mac_proc_notify_exit(proc_t proc); -int mac_setsockopt_label(kauth_cred_t cred, struct socket *so, - struct mac *extmac); int mac_socket_check_accept(kauth_cred_t cred, struct socket *so); int mac_socket_check_accepted(kauth_cred_t cred, struct socket *so); int mac_socket_check_bind(kauth_cred_t cred, struct socket *so, @@ -382,17 +330,12 @@ int mac_socket_check_connect(kauth_cred_t cred, struct socket *so, struct sockaddr *addr); int mac_socket_check_create(kauth_cred_t cred, int domain, int type, int protocol); -int mac_socket_check_deliver(struct socket *so, struct mbuf *m); int mac_socket_check_ioctl(kauth_cred_t cred, struct socket *so, - unsigned int cmd); -int mac_socket_check_kqfilter(kauth_cred_t cred, struct knote *kn, - struct socket *so); + unsigned long cmd); int mac_socket_check_listen(kauth_cred_t cred, struct socket *so); int mac_socket_check_receive(kauth_cred_t cred, struct socket *so); int mac_socket_check_received(kauth_cred_t cred, struct socket *so, struct sockaddr *saddr); -int mac_socket_check_select(kauth_cred_t cred, struct socket *so, - int which); int mac_socket_check_send(kauth_cred_t cred, struct socket *so, struct sockaddr *addr); int mac_socket_check_getsockopt(kauth_cred_t cred, struct socket *so, @@ -408,7 +351,6 @@ void mac_socket_label_destroy(struct socket *); int mac_socket_label_get(kauth_cred_t cred, struct socket *so, struct mac *extmac); int mac_socket_label_init(struct socket *, int waitok); -void mac_socketpeer_label_associate_mbuf(struct mbuf *m, struct socket *so); void mac_socketpeer_label_associate_socket(struct socket *peersocket, struct socket *socket_to_modify); int mac_socketpeer_label_get(kauth_cred_t cred, struct socket *so, @@ -425,7 +367,7 @@ int mac_system_check_settime(kauth_cred_t cred); int mac_system_check_swapoff(kauth_cred_t cred, struct vnode *vp); int mac_system_check_swapon(kauth_cred_t cred, struct vnode *vp); int mac_system_check_sysctlbyname(kauth_cred_t cred, const char *namestring, int *name, - u_int namelen, user_addr_t oldctl, size_t oldlen, + size_t namelen, user_addr_t oldctl, size_t oldlen, user_addr_t newctl, size_t newlen); int mac_system_check_kas_info(kauth_cred_t cred, int selector); void mac_sysvmsg_label_associate(kauth_cred_t cred, @@ -495,7 +437,7 @@ int mac_vnode_check_getattrlist(vfs_context_t ctx, struct vnode *vp, int mac_vnode_check_getextattr(vfs_context_t ctx, struct vnode *vp, const char *name, struct uio *uio); int mac_vnode_check_ioctl(vfs_context_t ctx, struct vnode *vp, - unsigned int cmd); + unsigned long cmd); int mac_vnode_check_kqfilter(vfs_context_t ctx, kauth_cred_t file_cred, struct knote *kn, struct vnode *vp); int mac_vnode_check_label_update(vfs_context_t ctx, struct vnode *vp, @@ -538,11 +480,16 @@ int mac_vnode_check_setutimes(vfs_context_t ctx, struct vnode *vp, int mac_vnode_check_signature(struct vnode *vp, struct cs_blob *cs_blob, struct image_params *imgp, unsigned int *cs_flags, unsigned int *signer_type, - int flags); + int flags, unsigned int platform); +int mac_vnode_check_supplemental_signature(struct vnode *vp, + struct cs_blob *cs_blob, struct vnode *linked_vp, + struct cs_blob *linked_cs_blob, unsigned int *signer_type); int mac_vnode_check_stat(vfs_context_t ctx, kauth_cred_t file_cred, struct vnode *vp); +#ifdef KERNEL_PRIVATE int mac_vnode_check_trigger_resolve(vfs_context_t ctx, struct vnode *dvp, struct componentname *cnp); +#endif int mac_vnode_check_truncate(vfs_context_t ctx, kauth_cred_t file_cred, struct vnode *vp); int mac_vnode_check_uipc_bind(vfs_context_t ctx, struct vnode *dvp, @@ -568,7 +515,9 @@ int mac_vnode_label_externalize_audit(struct vnode *vp, struct mac *mac); void mac_vnode_label_free(struct label *label); void mac_vnode_label_init(struct vnode *vp); int mac_vnode_label_init_needed(struct vnode *vp); +#ifdef KERNEL_PRIVATE struct label *mac_vnode_label_allocate(vnode_t vp); +#endif void mac_vnode_label_recycle(struct vnode *vp); void mac_vnode_label_update(vfs_context_t ctx, struct vnode *vp, struct label *newlabel); @@ -601,15 +550,11 @@ int mac_kext_check_unload(kauth_cred_t cred, const char *identifier); int mac_kext_check_query(kauth_cred_t cred); int mac_skywalk_flow_check_connect(proc_t p, void *flow, const struct sockaddr *addr, int type, int protocol); int mac_skywalk_flow_check_listen(proc_t p, void *flow, const struct sockaddr *addr, int type, int protocol); +void mac_vnode_notify_reclaim(vnode_t vp); void psem_label_associate(struct fileproc *fp, struct vnode *vp, struct vfs_context *ctx); void pshm_label_associate(struct fileproc *fp, struct vnode *vp, struct vfs_context *ctx); -#if CONFIG_MACF_NET -struct label *mac_bpfdesc_label_get(struct bpf_d *d); -void mac_bpfdesc_label_set(struct bpf_d *d, struct label *label); -#endif - #endif /* CONFIG_MACF */ #endif /* !_SECURITY_MAC_FRAMEWORK_H_ */ diff --git a/security/mac_inet.c b/security/mac_inet.c deleted file mode 100644 index ccbc1995e..000000000 --- a/security/mac_inet.c +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Copyright (c) 2007-2011 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/*- - * Copyright (c) 1999-2002 Robert N. M. Watson - * Copyright (c) 2001 Ilmar S. Habibulin - * Copyright (c) 2001-2004 Networks Associates Technology, Inc. - * Copyright (c) 2006-2007 SPARTA, Inc. - * All rights reserved. - * - * This software was developed by Robert Watson and Ilmar Habibulin for the - * TrustedBSD Project. - * - * This software was developed for the FreeBSD Project in part by Network - * Associates Laboratories, the Security Research Division of Network - * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), - * as part of the DARPA CHATS research program. - * - * This software was enhanced by SPARTA ISSO under SPAWAR contract - * N66001-04-C-6019 ("SEFOS"). - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -#include - -static struct label * -mac_inpcb_label_alloc(int flag) -{ - struct label *label; - int error; - - label = mac_labelzone_alloc(flag); - if (label == NULL) { - return NULL; - } - MAC_CHECK(inpcb_label_init, label, flag); - if (error) { - MAC_PERFORM(inpcb_label_destroy, label); - mac_labelzone_free(label); - return NULL; - } - return label; -} - -int -mac_inpcb_label_init(struct inpcb *inp, int flag) -{ - inp->inp_label = mac_inpcb_label_alloc(flag); - if (inp->inp_label == NULL) { - return ENOMEM; - } - return 0; -} - -static struct label * -mac_ipq_label_alloc(int flag) -{ - struct label *label; - int error; - - label = mac_labelzone_alloc(flag); - if (label == NULL) { - return NULL; - } - - MAC_CHECK(ipq_label_init, label, flag); - if (error) { - MAC_PERFORM(ipq_label_destroy, label); - mac_labelzone_free(label); - return NULL; - } - return label; -} - -int -mac_ipq_label_init(struct ipq *ipq, int flag) -{ - ipq->ipq_label = mac_ipq_label_alloc(flag); - if (ipq->ipq_label == NULL) { - return ENOMEM; - } - return 0; -} - -static void -mac_inpcb_label_free(struct label *label) -{ - MAC_PERFORM(inpcb_label_destroy, label); - mac_labelzone_free(label); -} - -void -mac_inpcb_label_destroy(struct inpcb *inp) -{ - mac_inpcb_label_free(inp->inp_label); - inp->inp_label = NULL; -} - -void -mac_inpcb_label_recycle(struct inpcb *inp) -{ - MAC_PERFORM(inpcb_label_recycle, inp->inp_label); -} - -static void -mac_ipq_label_free(struct label *label) -{ - MAC_PERFORM(ipq_label_destroy, label); - mac_labelzone_free(label); -} - -void -mac_ipq_label_destroy(struct ipq *ipq) -{ - mac_ipq_label_free(ipq->ipq_label); - ipq->ipq_label = NULL; -} - -void -mac_inpcb_label_associate(struct socket *so, struct inpcb *inp) -{ - MAC_PERFORM(inpcb_label_associate, so, so->so_label, inp, - inp->inp_label); -} - -void -mac_mbuf_label_associate_ipq(struct ipq *ipq, struct mbuf *m) -{ - struct label *label; - - label = mac_mbuf_to_label(m); - - MAC_PERFORM(mbuf_label_associate_ipq, ipq, ipq->ipq_label, m, label); -} - -void -mac_netinet_fragment(struct mbuf *datagram, struct mbuf *fragment) -{ - struct label *datagramlabel, *fragmentlabel; - - datagramlabel = mac_mbuf_to_label(datagram); - fragmentlabel = mac_mbuf_to_label(fragment); - - MAC_PERFORM(netinet_fragment, datagram, datagramlabel, fragment, - fragmentlabel); -} - -void -mac_ipq_label_associate(struct mbuf *fragment, struct ipq *ipq) -{ - struct label *label; - - label = mac_mbuf_to_label(fragment); - - MAC_PERFORM(ipq_label_associate, fragment, label, ipq, ipq->ipq_label); -} - -void -mac_mbuf_label_associate_inpcb(struct inpcb *inp, struct mbuf *m) -{ - struct label *mlabel; - - /* INP_LOCK_ASSERT(inp); */ - mlabel = mac_mbuf_to_label(m); - - MAC_PERFORM(mbuf_label_associate_inpcb, inp, inp->inp_label, m, mlabel); -} - -int -mac_ipq_label_compare(struct mbuf *fragment, struct ipq *ipq) -{ - struct label *label; - int result; - - label = mac_mbuf_to_label(fragment); - - result = 1; - MAC_BOOLEAN(ipq_label_compare, &&, fragment, label, ipq, ipq->ipq_label); - - return result; -} - -void -mac_netinet_icmp_reply(struct mbuf *m) -{ - struct label *label; - - label = mac_mbuf_to_label(m); - - MAC_PERFORM(netinet_icmp_reply, m, label); -} - -void -mac_netinet_tcp_reply(struct mbuf *m) -{ - struct label *label; - - label = mac_mbuf_to_label(m); - - MAC_PERFORM(netinet_tcp_reply, m, label); -} - -void -mac_ipq_label_update(struct mbuf *fragment, struct ipq *ipq) -{ - struct label *label; - - label = mac_mbuf_to_label(fragment); - - MAC_PERFORM(ipq_label_update, fragment, label, ipq, ipq->ipq_label); -} - -int -mac_inpcb_check_deliver(struct inpcb *inp, struct mbuf *m, int family, int type) -{ - struct label *label; - int error; - - if ((m->m_flags & M_PKTHDR) == 0) { - panic("%s: no mbuf packet header!", __func__); - } - - label = mac_mbuf_to_label(m); - - MAC_CHECK(inpcb_check_deliver, inp, inp->inp_label, m, label, - family, type); - - return error; -} - -/* - * Propagate a change in the socket label to the inpcb label. - */ -void -mac_inpcb_label_update(struct socket *so) -{ - struct inpcb *inp; - - /* XXX: assert socket lock. */ - inp = sotoinpcb(so); /* XXX: inp locking */ - - if (inp != NULL) { - /* INP_LOCK_ASSERT(inp); */ - MAC_PERFORM(inpcb_label_update, so, so->so_label, inp, - inp->inp_label); - } -} diff --git a/security/mac_internal.h b/security/mac_internal.h index 503b2ea4b..37afee7dc 100644 --- a/security/mac_internal.h +++ b/security/mac_internal.h @@ -164,10 +164,6 @@ extern unsigned int mac_sysvshm_enforce; extern unsigned int mac_vm_enforce; extern unsigned int mac_vnode_enforce; -#if CONFIG_MACF_NET -extern unsigned int mac_label_mbufs; -#endif - extern unsigned int mac_label_vnodes; extern unsigned int mac_vnode_label_count; @@ -221,32 +217,10 @@ int mac_check_structmac_consistent(struct mac *mac); #endif int mac_cred_label_externalize(struct label *, char *e, char *out, size_t olen, int flags); -#if CONFIG_MACF_SOCKET -int mac_socket_label_externalize(struct label *, char *e, char *out, size_t olen); -#endif /* CONFIG_MACF_SOCKET */ int mac_vnode_label_externalize(struct label *, char *e, char *out, size_t olen, int flags); -int mac_pipe_label_externalize(struct label *label, char *elements, - char *outbuf, size_t outbuflen); int mac_cred_label_internalize(struct label *label, char *string); -#if CONFIG_MACF_SOCKET -int mac_socket_label_internalize(struct label *label, char *string); -#endif /* CONFIG_MACF_SOCKET */ int mac_vnode_label_internalize(struct label *label, char *string); -int mac_pipe_label_internalize(struct label *label, char *string); - -#if CONFIG_MACF_SOCKET -/* internal socket label manipulation functions */ -struct label *mac_socket_label_alloc(int flags); -void mac_socket_label_free(struct label *l); -int mac_socket_label_update(struct ucred *cred, struct socket *so, struct label *l); -#endif /* MAC_SOCKET */ - -#if CONFIG_MACF_NET -struct label *mac_mbuf_to_label(struct mbuf *m); -#else -#define mac_mbuf_to_label(m) (NULL) -#endif /* * MAC_CHECK performs the designated check by walking the policy diff --git a/security/mac_iokit.c b/security/mac_iokit.c index f23467676..fe1f43ac0 100644 --- a/security/mac_iokit.c +++ b/security/mac_iokit.c @@ -60,20 +60,10 @@ #include #include #include -#include #include #include -int -mac_iokit_check_device(char *devtype, struct mac_module_data *mdata) -{ - int error; - - MAC_CHECK(iokit_check_device, devtype, mdata); - return error; -} - int mac_iokit_check_open(kauth_cred_t cred, io_object_t user_client, unsigned int user_client_type) { diff --git a/security/mac_label.c b/security/mac_label.c index ca6777d0d..aeb2d871b 100644 --- a/security/mac_label.c +++ b/security/mac_label.c @@ -37,35 +37,19 @@ #include #include -static zone_t zone_label; - -void -mac_labelzone_init(void) -{ - zone_label = zinit(sizeof(struct label), - 8192 * sizeof(struct label), - sizeof(struct label), "MAC Labels"); - zone_change(zone_label, Z_EXPAND, TRUE); - zone_change(zone_label, Z_EXHAUST, FALSE); - zone_change(zone_label, Z_CALLERACCT, FALSE); -} +static ZONE_DECLARE(zone_label, "MAC Labels", sizeof(struct label), ZC_ZFREE_CLEARMEM); struct label * mac_labelzone_alloc(int flags) { + int zflags = Z_ZERO | (flags & MAC_NOWAIT); struct label *l; - if (flags & MAC_NOWAIT) { - l = (struct label *) zalloc_noblock(zone_label); - } else { - l = (struct label *) zalloc(zone_label); + static_assert(MAC_NOWAIT == Z_NOWAIT); + l = zalloc_flags(zone_label, zflags); + if (l) { + l->l_flags = MAC_FLAG_INITIALIZED; } - if (l == NULL) { - return NULL; - } - - bzero(l, sizeof(struct label)); - l->l_flags = MAC_FLAG_INITIALIZED; return l; } diff --git a/security/mac_mach.c b/security/mac_mach.c index 0030e5931..4739336fb 100644 --- a/security/mac_mach.c +++ b/security/mac_mach.c @@ -83,10 +83,11 @@ mac_task_check_expose_task(struct task *task) if (p == NULL) { return ESRCH; } + struct proc_ident pident = proc_ident(p); struct ucred *cred = kauth_cred_get(); - MAC_CHECK(proc_check_expose_task, cred, p); proc_rele(p); + MAC_CHECK(proc_check_expose_task, cred, &pident); return error; } diff --git a/security/mac_mach_internal.h b/security/mac_mach_internal.h index df3bae67b..3e716ebd6 100644 --- a/security/mac_mach_internal.h +++ b/security/mac_mach_internal.h @@ -62,6 +62,7 @@ #define MAC_DOEXCF_TRACED 0x01 /* Only do mach exeception if being ptrace()'ed */ struct exception_action; +struct proc; struct uthread; struct task; @@ -82,6 +83,24 @@ int mac_task_check_set_host_exception_port(struct task *task, int mac_task_check_set_host_exception_ports(struct task *task, unsigned int exception_mask); +/* See rdar://problem/58989880 */ +#ifndef bitstr_test +# define bitstr_test(name, bit) ((name)[((bit) >> 3)] & (1 << ((bit) & 0x7))) +#endif /* ! bitstr_test */ + +typedef int (*mac_task_mach_filter_cbfunc_t)(struct proc *bsdinfo, int num); +typedef int (*mac_task_kobj_filter_cbfunc_t)(struct proc *bsdinfo, int msgid, int index); +extern mac_task_mach_filter_cbfunc_t mac_task_mach_trap_evaluate; +extern mac_task_kobj_filter_cbfunc_t mac_task_kobj_msg_evaluate; +extern int mach_trap_count; +extern int mach_kobj_count; + +void mac_task_set_mach_filter_mask(struct task *task, uint8_t *maskptr); +void mac_task_set_kobj_filter_mask(struct task *task, uint8_t *maskptr); +int mac_task_register_filter_callbacks( + const mac_task_mach_filter_cbfunc_t mach_cbfunc, + const mac_task_kobj_filter_cbfunc_t kobj_cbfunc); + /* threads */ void act_set_astmacf(struct thread *); void mac_thread_userret(struct thread *); @@ -100,6 +119,7 @@ int mac_exc_update_task_crash_label(struct task *task, struct label *newlabel); int mac_exc_action_check_exception_send(struct task *victim_task, struct exception_action *action); void mac_proc_notify_exec_complete(struct proc *proc); +int mac_proc_check_remote_thread_create(struct task *task, int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count); struct label *mac_exc_create_label_for_proc(struct proc *proc); struct label *mac_exc_create_label_for_current_proc(void); diff --git a/security/mac_net.c b/security/mac_net.c deleted file mode 100644 index 1169fb516..000000000 --- a/security/mac_net.c +++ /dev/null @@ -1,534 +0,0 @@ -/* - * Copyright (c) 2007 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ -/*- - * Copyright (c) 1999-2002 Robert N. M. Watson - * Copyright (c) 2001 Ilmar S. Habibulin - * Copyright (c) 2001-2004 Networks Associates Technology, Inc. - * Copyright (c) 2006 SPARTA, Inc. - * All rights reserved. - * - * This software was developed by Robert Watson and Ilmar Habibulin for the - * TrustedBSD Project. - * - * This software was developed for the FreeBSD Project in part by Network - * Associates Laboratories, the Security Research Division of Network - * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), - * as part of the DARPA CHATS research program. - * - * This software was enhanced by SPARTA ISSO under SPAWAR contract - * N66001-04-C-6019 ("SEFOS"). - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -#include -#include - -#include -#include - -#include -#include - -#include - -struct label * -mac_mbuf_to_label(struct mbuf *mbuf) -{ - struct m_tag *tag; - struct label *label; - - if (mbuf == NULL) { - return NULL; - } - - if ((mbuf->m_flags & M_PKTHDR) == 0) { - printf("%s() got non-header MBUF!\n", __func__); - return NULL; - } - - tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_MACLABEL, - NULL); - if (tag == NULL) { - printf("%s() m_tag_locate() returned NULL! (m->flags %04x)\n", - __func__, mbuf->m_flags); - return NULL; - } - label = (struct label *)(tag + 1); - return label; -} - -static struct label * -mac_bpfdesc_label_alloc(void) -{ - struct label *label; - - label = mac_labelzone_alloc(M_WAITOK); - if (label == NULL) { - return NULL; - } - MAC_PERFORM(bpfdesc_label_init, label); - return label; -} - -void -mac_bpfdesc_label_init(struct bpf_d *bpf_d) -{ - struct label *label; - - label = mac_bpfdesc_label_alloc(); - mac_bpfdesc_label_set(bpf_d, label); -} - -static struct label * -mac_ifnet_label_alloc(void) -{ - struct label *label; - - label = mac_labelzone_alloc(M_WAITOK); - if (label == NULL) { - return NULL; - } - MAC_PERFORM(ifnet_label_init, label); - return label; -} - -void -mac_ifnet_label_init(struct ifnet *ifp) -{ - ifp->if_label = mac_ifnet_label_alloc(); -} - -/* - * On failure, caller should cleanup with m_tag_free(). - */ -int -mac_mbuf_tag_init(struct m_tag *tag, int flag) -{ - struct label *label; - int error; - - label = (struct label *) (tag + 1); - mac_label_init(label); - - MAC_CHECK(mbuf_label_init, label, flag); - if (error) { - printf("%s(): mpo_mbuf_label_init() failed!\n", __func__); - } - - return error; -} - -static void -mac_bpfdesc_label_free(struct label *label) -{ - MAC_PERFORM(bpfdesc_label_destroy, label); - mac_labelzone_free(label); -} - -void -mac_bpfdesc_label_destroy(struct bpf_d *bpf_d) -{ - struct label *label; - - label = mac_bpfdesc_label_get(bpf_d); - mac_bpfdesc_label_free(label); - mac_bpfdesc_label_set(bpf_d, NULL); -} - -static void -mac_ifnet_label_free(struct label *label) -{ - MAC_PERFORM(ifnet_label_destroy, label); - mac_labelzone_free(label); -} - -void -mac_ifnet_label_destroy(struct ifnet *ifp) -{ - mac_ifnet_label_free(ifp->if_label); - ifp->if_label = NULL; -} - -void -mac_ifnet_label_recycle(struct ifnet *ifp) -{ - MAC_PERFORM(ifnet_label_recycle, ifp->if_label); -} - -void -mac_mbuf_tag_destroy(struct m_tag *tag) -{ - struct label *label; - - label = (struct label *)(tag + 1); - MAC_PERFORM(mbuf_label_destroy, label); - mac_label_destroy(label); - - return; -} - -void -mac_mbuf_tag_copy(struct m_tag *src, struct m_tag *dest) -{ - struct label *src_label, *dest_label; - - src_label = (struct label *)(src + 1); - dest_label = (struct label *)(dest + 1); - - if (src_label == NULL || dest_label == NULL) { - return; - } - - /* - * mac_mbuf_tag_init() is called on the target tag - * in m_tag_copy(), so we don't need to call it here. - */ - MAC_PERFORM(mbuf_label_copy, src_label, dest_label); - - return; -} - -void -mac_mbuf_label_copy(struct mbuf *m_from, struct mbuf *m_to) -{ - struct label *src_label, *dest_label; - - src_label = mac_mbuf_to_label(m_from); - dest_label = mac_mbuf_to_label(m_to); - - MAC_PERFORM(mbuf_label_copy, src_label, dest_label); -} - -static void -mac_ifnet_label_copy(struct label *src, struct label *dest) -{ - MAC_PERFORM(ifnet_label_copy, src, dest); -} - -static int -mac_ifnet_label_externalize(struct label *label, char *elements, - char *outbuf, size_t outbuflen) -{ - return MAC_EXTERNALIZE(ifnet, label, elements, outbuf, outbuflen); -} - -static int -mac_ifnet_label_internalize(struct label *label, char *string) -{ - return MAC_INTERNALIZE(ifnet, label, string); -} - -void -mac_ifnet_label_associate(struct ifnet *ifp) -{ - MAC_PERFORM(ifnet_label_associate, ifp, ifp->if_label); -} - -void -mac_bpfdesc_label_associate(struct ucred *cred, struct bpf_d *bpf_d) -{ - struct label *label; - - label = mac_bpfdesc_label_get(bpf_d); - MAC_PERFORM(bpfdesc_label_associate, cred, bpf_d, label); -} - -int -mac_bpfdesc_check_receive(struct bpf_d *bpf_d, struct ifnet *ifp) -{ - struct label *label; - int error; - - label = mac_bpfdesc_label_get(bpf_d); - ifnet_lock_shared(ifp); - MAC_CHECK(bpfdesc_check_receive, bpf_d, label, ifp, - ifp->if_label); - ifnet_lock_done(ifp); - - return error; -} - -int -mac_mbuf_label_init(struct mbuf *m, int flag) -{ - struct m_tag *tag; - int error; - - if (mac_label_mbufs == 0) { - return 0; - } - - tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_MACLABEL, - sizeof(struct label), flag, m); - if (tag == NULL) { - printf("%s(): m_tag_alloc() failed!\n", __func__); - return ENOBUFS; - } - error = mac_mbuf_tag_init(tag, flag); - if (error) { - printf("%s(): mac_mbuf_tag_init() failed!\n", __func__); - m_tag_free(tag); - return error; - } - m_tag_prepend(m, tag); - return 0; -} - -void -mac_mbuf_label_associate_bpfdesc(struct bpf_d *bpf_d, struct mbuf *mbuf) -{ - struct label *m_label, *b_label; - - /* bpf_d must be locked */ - - m_label = mac_mbuf_to_label(mbuf); - b_label = mac_bpfdesc_label_get(bpf_d); - - MAC_PERFORM(mbuf_label_associate_bpfdesc, bpf_d, b_label, mbuf, - m_label); -} - -void -mac_mbuf_label_associate_ifnet(struct ifnet *ifp, struct mbuf *mbuf) -{ - struct label *m_label; - - /* ifp must be locked */ - - m_label = mac_mbuf_to_label(mbuf); - - MAC_PERFORM(mbuf_label_associate_ifnet, ifp, ifp->if_label, mbuf, - m_label); -} - -void -mac_mbuf_label_associate_linklayer(struct ifnet *ifp, struct mbuf *mbuf) -{ - struct label *m_label; - - /* ifp must be locked */ - - m_label = mac_mbuf_to_label(mbuf); - - MAC_PERFORM(mbuf_label_associate_linklayer, ifp, ifp->if_label, mbuf, - m_label); -} - -void -mac_mbuf_label_associate_multicast_encap(struct mbuf *oldmbuf, - struct ifnet *ifp, struct mbuf *newmbuf) -{ - struct label *oldmbuflabel, *newmbuflabel; - - oldmbuflabel = mac_mbuf_to_label(oldmbuf); - newmbuflabel = mac_mbuf_to_label(newmbuf); - - /* ifp must be locked */ - - MAC_PERFORM(mbuf_label_associate_multicast_encap, oldmbuf, oldmbuflabel, - ifp, ifp->if_label, newmbuf, newmbuflabel); -} - -void -mac_mbuf_label_associate_netlayer(struct mbuf *oldmbuf, struct mbuf *newmbuf) -{ - struct label *oldmbuflabel, *newmbuflabel; - - oldmbuflabel = mac_mbuf_to_label(oldmbuf); - newmbuflabel = mac_mbuf_to_label(newmbuf); - - MAC_PERFORM(mbuf_label_associate_netlayer, oldmbuf, oldmbuflabel, - newmbuf, newmbuflabel); -} - -void -mac_mbuf_label_associate_socket(struct socket *socket, struct mbuf *mbuf) -{ - struct label *label; - struct xsocket xso; - - /* socket must be locked */ - - label = mac_mbuf_to_label(mbuf); - - sotoxsocket(socket, &xso); - MAC_PERFORM(mbuf_label_associate_socket, &xso, socket->so_label, - mbuf, label); -} - -int -mac_ifnet_check_transmit(struct ifnet *ifp, struct mbuf *mbuf, int family, - int type) -{ - struct label *label; - int error; - - label = mac_mbuf_to_label(mbuf); - - ifnet_lock_shared(ifp); - MAC_CHECK(ifnet_check_transmit, ifp, ifp->if_label, mbuf, label, - family, type); - ifnet_lock_done(ifp); - - return error; -} - -int -mac_ifnet_label_get(__unused struct ucred *cred, struct ifreq *ifr, - struct ifnet *ifp) -{ - char *elements, *buffer; - struct label *intlabel; - struct mac mac; - int error; - size_t len; - - error = copyin(CAST_USER_ADDR_T(ifr->ifr_ifru.ifru_data), - &mac, sizeof(mac)); - if (error) { - return error; - } - - error = mac_check_structmac_consistent(&mac); - if (error) { - return error; - } - - MALLOC(elements, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); - error = copyinstr(CAST_USER_ADDR_T(mac.m_string), elements, - mac.m_buflen, &len); - if (error) { - FREE(elements, M_MACTEMP); - return error; - } - AUDIT_ARG(mac_string, elements); - - MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); - intlabel = mac_ifnet_label_alloc(); - ifnet_lock_shared(ifp); - mac_ifnet_label_copy(ifp->if_label, intlabel); - ifnet_lock_done(ifp); - error = mac_ifnet_label_externalize(intlabel, elements, - buffer, mac.m_buflen); - mac_ifnet_label_free(intlabel); - FREE(elements, M_MACTEMP); - - if (error == 0) { - error = copyout(buffer, CAST_USER_ADDR_T(mac.m_string), - strlen(buffer) + 1); - } - FREE(buffer, M_MACTEMP); - - return error; -} - -int -mac_ifnet_label_set(struct ucred *cred, struct ifreq *ifr, - struct ifnet *ifp) -{ - struct label *intlabel; - struct mac mac; - char *buffer; - int error; - size_t len; - - error = copyin(CAST_USER_ADDR_T(ifr->ifr_ifru.ifru_data), - &mac, sizeof(mac)); - if (error) { - return error; - } - - error = mac_check_structmac_consistent(&mac); - if (error) { - return error; - } - - MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); - error = copyinstr(CAST_USER_ADDR_T(mac.m_string), buffer, - mac.m_buflen, &len); - if (error) { - FREE(buffer, M_MACTEMP); - return error; - } - AUDIT_ARG(mac_string, buffer); - - intlabel = mac_ifnet_label_alloc(); - error = mac_ifnet_label_internalize(intlabel, buffer); - FREE(buffer, M_MACTEMP); - if (error) { - mac_ifnet_label_free(intlabel); - return error; - } - - /* - * XXX: Note that this is a redundant privilege check, since - * policies impose this check themselves if required by the - * policy. Eventually, this should go away. - */ - error = suser(cred, NULL); - if (error) { - mac_ifnet_label_free(intlabel); - return error; - } - - ifnet_lock_exclusive(ifp); - MAC_CHECK(ifnet_check_label_update, cred, ifp, ifp->if_label, - intlabel); - if (error) { - ifnet_lock_done(ifp); - mac_ifnet_label_free(intlabel); - return error; - } - - MAC_PERFORM(ifnet_label_update, cred, ifp, ifp->if_label, intlabel); - ifnet_lock_done(ifp); - mac_ifnet_label_free(intlabel); - - return 0; -} diff --git a/security/mac_pipe.c b/security/mac_pipe.c index 78d733696..a50c3b5ba 100644 --- a/security/mac_pipe.c +++ b/security/mac_pipe.c @@ -105,33 +105,6 @@ mac_pipe_label_destroy(struct pipe *cpipe) cpipe->pipe_label = NULL; } -void -mac_pipe_label_copy(struct label *src, struct label *dest) -{ - MAC_PERFORM(pipe_label_copy, src, dest); -} - -int -mac_pipe_label_externalize(struct label *label, char *elements, - char *outbuf, size_t outbuflen) -{ - int error; - - error = MAC_EXTERNALIZE(pipe, label, elements, outbuf, outbuflen); - - return error; -} - -int -mac_pipe_label_internalize(struct label *label, char *string) -{ - int error; - - error = MAC_INTERNALIZE(pipe, label, string); - - return error; -} - void mac_pipe_label_associate(kauth_cred_t cred, struct pipe *cpipe) { @@ -154,7 +127,7 @@ mac_pipe_check_kqfilter(kauth_cred_t cred, struct knote *kn, return error; } int -mac_pipe_check_ioctl(kauth_cred_t cred, struct pipe *cpipe, u_int cmd) +mac_pipe_check_ioctl(kauth_cred_t cred, struct pipe *cpipe, u_long cmd) { int error; @@ -187,24 +160,6 @@ mac_pipe_check_read(kauth_cred_t cred, struct pipe *cpipe) return error; } -static int -mac_pipe_check_label_update(kauth_cred_t cred, struct pipe *cpipe, - struct label *newlabel) -{ - int error; - -#if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_pipe_enforce) { - return 0; - } -#endif - - MAC_CHECK(pipe_check_label_update, cred, cpipe, cpipe->pipe_label, newlabel); - - return error; -} - int mac_pipe_check_select(kauth_cred_t cred, struct pipe *cpipe, int which) { @@ -255,19 +210,3 @@ mac_pipe_check_write(kauth_cred_t cred, struct pipe *cpipe) return error; } - -int -mac_pipe_label_update(kauth_cred_t cred, struct pipe *cpipe, - struct label *label) -{ - int error; - - error = mac_pipe_check_label_update(cred, cpipe, label); - if (error) { - return error; - } - - MAC_PERFORM(pipe_label_update, cred, cpipe, cpipe->pipe_label, label); - - return 0; -} diff --git a/security/mac_policy.h b/security/mac_policy.h index 3f9ddbd52..08adb58fb 100644 --- a/security/mac_policy.h +++ b/security/mac_policy.h @@ -97,7 +97,6 @@ struct ifnet; struct inpcb; struct ipq; struct label; -struct mac_module_data; struct mac_policy_conf; struct mbuf; struct mount; @@ -120,6 +119,12 @@ struct vnode; /** @struct dummy */ +/* + * proc_ident_t support, see: rdar://problem/58928152 + * Should be removed once all dependent parties adopt + * proc_ident_t. + */ +#define MAC_PROC_IDENT_SUPPORT #ifndef _KAUTH_CRED_T #define _KAUTH_CRED_T @@ -240,61 +245,6 @@ typedef int mpo_audit_check_preselect_t( unsigned short syscode, void *args ); -/** - * @brief Initialize BPF descriptor label - * @param label New label to initialize - * - * Initialize the label for a newly instantiated BPF descriptor. - * Sleeping is permitted. - */ -typedef void mpo_bpfdesc_label_init_t( - struct label *label - ); -/** - * @brief Destroy BPF descriptor label - * @param label The label to be destroyed - * - * Destroy a BPF descriptor label. Since the BPF descriptor - * is going out of scope, policy modules should free any internal - * storage associated with the label so that it may be destroyed. - */ -typedef void mpo_bpfdesc_label_destroy_t( - struct label *label - ); -/** - * @brief Associate a BPF descriptor with a label - * @param cred User credential creating the BPF descriptor - * @param bpf_d The BPF descriptor - * @param bpflabel The new label - * - * Set the label on a newly created BPF descriptor from the passed - * subject credential. This call will be made when a BPF device node - * is opened by a process with the passed subject credential. - */ -typedef void mpo_bpfdesc_label_associate_t( - kauth_cred_t cred, - struct bpf_d *bpf_d, - struct label *bpflabel - ); -/** - * @brief Check whether BPF can read from a network interface - * @param bpf_d Subject; the BPF descriptor - * @param bpflabel Policy label for bpf_d - * @param ifp Object; the network interface - * @param ifnetlabel Policy label for ifp - * - * Determine whether the MAC framework should permit datagrams from - * the passed network interface to be delivered to the buffers of - * the passed BPF descriptor. Return (0) for success, or an errno - * value for failure. Suggested failure: EACCES for label mismatches, - * EPERM for lack of privilege. - */ -typedef int mpo_bpfdesc_check_receive_t( - struct bpf_d *bpf_d, - struct label *bpflabel, - struct ifnet *ifp, - struct label *ifnetlabel - ); /** * @brief Indicate desire to change the process label at exec time * @param old Existing subject credential @@ -868,7 +818,7 @@ typedef int mpo_file_check_get_t( kauth_cred_t cred, struct fileglob *fg, char *elements, - int len + size_t len ); /** * @brief Access control for getting the offset of a file descriptor @@ -926,7 +876,7 @@ typedef int mpo_file_check_ioctl_t( kauth_cred_t cred, struct fileglob *fg, struct label *label, - unsigned int cmd + unsigned long cmd ); /** * @brief Access control check for file locking @@ -1054,7 +1004,23 @@ typedef int mpo_file_check_set_t( kauth_cred_t cred, struct fileglob *fg, char *elements, - int len + size_t len + ); +/** + * @brief Inform MAC policies that file is being closed + * @param cred Subject credential + * @param fg Fileglob structure + * @param label Policy label for fg + * @param modified Boolean; 1 if file was modified, 0 otherwise + * + * Called when an open file is being closed, as a result of a call to + * close(2), the process exiting, or exec(2) w/O_CLOEXEC set. + */ +typedef void mpo_file_notify_close_t( + kauth_cred_t cred, + struct fileglob *fg, + struct label *label, + int modified ); /** * @brief Create file label @@ -1085,276 +1051,6 @@ typedef void mpo_file_label_destroy_t( typedef void mpo_file_label_init_t( struct label *label ); -/** - * @brief Access control check for relabeling network interfaces - * @param cred Subject credential - * @param ifp network interface being relabeled - * @param ifnetlabel Current label of the network interfaces - * @param newlabel New label to apply to the network interfaces - * @see mpo_ifnet_label_update_t - * - * Determine whether the subject identified by the credential can - * relabel the network interface represented by ifp to the supplied - * new label (newlabel). - * - * @return Return 0 if access is granted, otherwise an appropriate value for - * errno should be returned. - */ -typedef int mpo_ifnet_check_label_update_t( - kauth_cred_t cred, - struct ifnet *ifp, - struct label *ifnetlabel, - struct label *newlabel - ); -/** - * @brief Access control check for relabeling network interfaces - * @param ifp Network interface mbuf will be transmitted through - * @param ifnetlabel Label of the network interfaces - * @param m The mbuf to be transmitted - * @param mbuflabel Label of the mbuf to be transmitted - * @param family Address Family, AF_* - * @param type Type of socket, SOCK_{STREAM,DGRAM,RAW} - * - * Determine whether the mbuf with label mbuflabel may be transmitted - * through the network interface represented by ifp that has the - * label ifnetlabel. - * - * @return Return 0 if access is granted, otherwise an appropriate value for - * errno should be returned. - */ -typedef int mpo_ifnet_check_transmit_t( - struct ifnet *ifp, - struct label *ifnetlabel, - struct mbuf *m, - struct label *mbuflabel, - int family, - int type - ); -/** - * @brief Create a network interface label - * @param ifp Network interface labeled - * @param ifnetlabel Label for the network interface - * - * Set the label of a newly created network interface, most likely - * using the information in the supplied network interface struct. - */ -typedef void mpo_ifnet_label_associate_t( - struct ifnet *ifp, - struct label *ifnetlabel - ); -/** - * @brief Copy an ifnet label - * @param src Source ifnet label - * @param dest Destination ifnet label - * - * Copy the label information from src to dest. - */ -typedef void mpo_ifnet_label_copy_t( - struct label *src, - struct label *dest - ); -/** - * @brief Destroy ifnet label - * @param label The label to be destroyed - * - * Destroy the label on an ifnet label. In this entry point, a - * policy module should free any internal storage associated with - * label so that it may be destroyed. - */ -typedef void mpo_ifnet_label_destroy_t( - struct label *label - ); -/** - * @brief Externalize an ifnet label - * @param label Label to be externalized - * @param element_name Name of the label namespace for which labels should be - * externalized - * @param sb String buffer to be filled with a text representation of the label - * - * Produce an external representation of the label on an interface. - * An externalized label consists of a text representation of the - * label contents that can be used with user applications. - * Policy-agnostic user space tools will display this externalized - * version. - * - * @return 0 on success, return non-zero if an error occurs while - * externalizing the label data. - * - */ -typedef int mpo_ifnet_label_externalize_t( - struct label *label, - char *element_name, - struct sbuf *sb - ); -/** - * @brief Initialize ifnet label - * @param label New label to initialize - */ -typedef void mpo_ifnet_label_init_t( - struct label *label - ); -/** - * @brief Internalize an interface label - * @param label Label to be internalized - * @param element_name Name of the label namespace for which the label should - * be internalized - * @param element_data Text data to be internalized - * - * Produce an interface label from an external representation. An - * externalized label consists of a text representation of the label - * contents that can be used with user applications. Policy-agnostic - * user space tools will forward text version to the kernel for - * processing by individual policy modules. - * - * The policy's internalize entry points will be called only if the - * policy has registered interest in the label namespace. - * - * @return 0 on success, Otherwise, return non-zero if an error occurs - * while internalizing the label data. - * - */ -typedef int mpo_ifnet_label_internalize_t( - struct label *label, - char *element_name, - char *element_data - ); -/** - * @brief Recycle up a network interface label - * @param label The label to be recycled - * - * Recycle a network interface label. Darwin caches the struct ifnet - * of detached ifnets in a "free pool". Before ifnets are returned - * to the "free pool", policies can cleanup or overwrite any information - * present in the label. - */ -typedef void mpo_ifnet_label_recycle_t( - struct label *label - ); -/** - * @brief Update a network interface label - * @param cred Subject credential - * @param ifp The network interface to be relabeled - * @param ifnetlabel The current label of the network interface - * @param newlabel A new label to apply to the network interface - * @see mpo_ifnet_check_label_update_t - * - * Update the label on a network interface, using the supplied new label. - */ -typedef void mpo_ifnet_label_update_t( - kauth_cred_t cred, - struct ifnet *ifp, - struct label *ifnetlabel, - struct label *newlabel - ); -/** - * @brief Access control check for delivering a packet to a socket - * @param inp inpcb the socket is associated with - * @param inplabel Label of the inpcb - * @param m The mbuf being received - * @param mbuflabel Label of the mbuf being received - * @param family Address family, AF_* - * @param type Type of socket, SOCK_{STREAM,DGRAM,RAW} - * - * Determine whether the mbuf with label mbuflabel may be received - * by the socket associated with inpcb that has the label inplabel. - * - * @return Return 0 if access is granted, otherwise an appropriate value for - * errno should be returned. - */ -typedef int mpo_inpcb_check_deliver_t( - struct inpcb *inp, - struct label *inplabel, - struct mbuf *m, - struct label *mbuflabel, - int family, - int type - ); -/** - * @brief Create an inpcb label - * @param so Socket containing the inpcb to be labeled - * @param solabel Label of the socket - * @param inp inpcb to be labeled - * @param inplabel Label for the inpcb - * - * Set the label of a newly created inpcb, most likely - * using the information in the socket and/or socket label. - */ -typedef void mpo_inpcb_label_associate_t( - struct socket *so, - struct label *solabel, - struct inpcb *inp, - struct label *inplabel - ); -/** - * @brief Destroy inpcb label - * @param label The label to be destroyed - * - * Destroy the label on an inpcb label. In this entry point, a - * policy module should free any internal storage associated with - * label so that it may be destroyed. - */ -typedef void mpo_inpcb_label_destroy_t( - struct label *label - ); -/** - * @brief Initialize inpcb label - * @param label New label to initialize - * @param flag M_WAITOK or M_NOWAIT - */ -typedef int mpo_inpcb_label_init_t( - struct label *label, - int flag - ); -/** - * @brief Recycle up an inpcb label - * @param label The label to be recycled - * - * Recycle an inpcb label. Darwin allocates the inpcb as part of - * the socket structure in some cases. For this case we must recycle - * rather than destroy the inpcb as it will be reused later. - */ -typedef void mpo_inpcb_label_recycle_t( - struct label *label - ); -/** - * @brief Update an inpcb label from a socket label - * @param so Socket containing the inpcb to be relabeled - * @param solabel New label of the socket - * @param inp inpcb to be labeled - * @param inplabel Label for the inpcb - * - * Set the label of a newly created inpcb due to a change in the - * underlying socket label. - */ -typedef void mpo_inpcb_label_update_t( - struct socket *so, - struct label *solabel, - struct inpcb *inp, - struct label *inplabel - ); -/** - * @brief Device hardware access control - * @param devtype Type of device connected - * - * This is the MAC Framework device access control, which is called by the I/O - * Kit when a new device is connected to the system to determine whether that - * device should be trusted. A list of properties associated with the device - * is passed as an XML-formatted string. The routine should examine these - * properties to determine the trustworthiness of the device. A return value - * of EPERM forces the device to be claimed by a special device driver that - * will prevent its operation. - * - * @warning This is an experimental interface and may change in the future. - * - * @return Return EPERM to indicate that the device is untrusted and should - * not be allowed to operate. Return zero to indicate that the device is - * trusted and should be allowed to operate normally. - * - */ -typedef int mpo_iokit_check_device_t( - char *devtype, - struct mac_module_data *mdata - ); /** * @brief Access control check for opening an I/O Kit device * @param cred Subject credential @@ -1448,276 +1144,6 @@ typedef int mpo_iokit_check_get_property_t( typedef int mpo_iokit_check_hid_control_t( kauth_cred_t cred ); -/** - * @brief Create an IP reassembly queue label - * @param fragment First received IP fragment - * @param fragmentlabel Policy label for fragment - * @param ipq IP reassembly queue to be labeled - * @param ipqlabel Policy label to be filled in for ipq - * - * Set the label on a newly created IP reassembly queue from - * the mbuf header of the first received fragment. - */ -typedef void mpo_ipq_label_associate_t( - struct mbuf *fragment, - struct label *fragmentlabel, - struct ipq *ipq, - struct label *ipqlabel - ); -/** - * @brief Compare an mbuf header label to an ipq label - * @param fragment IP datagram fragment - * @param fragmentlabel Policy label for fragment - * @param ipq IP fragment reassembly queue - * @param ipqlabel Policy label for ipq - * - * Compare the label of the mbuf header containing an IP datagram - * (fragment) fragment with the label of the passed IP fragment - * reassembly queue (ipq). Return (1) for a successful match, or (0) - * for no match. This call is made when the IP stack attempts to - * find an existing fragment reassembly queue for a newly received - * fragment; if this fails, a new fragment reassembly queue may be - * instantiated for the fragment. Policies may use this entry point - * to prevent the reassembly of otherwise matching IP fragments if - * policy does not permit them to be reassembled based on the label - * or other information. - */ -typedef int mpo_ipq_label_compare_t( - struct mbuf *fragment, - struct label *fragmentlabel, - struct ipq *ipq, - struct label *ipqlabel - ); -/** - * @brief Destroy IP reassembly queue label - * @param label The label to be destroyed - * - * Destroy the label on an IP fragment queue. In this entry point, a - * policy module should free any internal storage associated with - * label so that it may be destroyed. - */ -typedef void mpo_ipq_label_destroy_t( - struct label *label - ); -/** - * @brief Initialize IP reassembly queue label - * @param label New label to initialize - * @param flag M_WAITOK or M_NOWAIT - * - * Initialize the label on a newly instantiated IP fragment reassembly - * queue. The flag field may be one of M_WAITOK and M_NOWAIT, and - * should be employed to avoid performing a sleeping malloc(9) during - * this initialization call. IP fragment reassembly queue allocation - * frequently occurs in performance sensitive environments, and the - * implementation should be careful to avoid sleeping or long-lived - * operations. This entry point is permitted to fail resulting in - * the failure to allocate the IP fragment reassembly queue. - */ -typedef int mpo_ipq_label_init_t( - struct label *label, - int flag - ); -/** - * @brief Update the label on an IP fragment reassembly queue - * @param fragment IP fragment - * @param fragmentlabel Policy label for fragment - * @param ipq IP fragment reassembly queue - * @param ipqlabel Policy label to be updated for ipq - * - * Update the label on an IP fragment reassembly queue (ipq) based - * on the acceptance of the passed IP fragment mbuf header (fragment). - */ -typedef void mpo_ipq_label_update_t( - struct mbuf *fragment, - struct label *fragmentlabel, - struct ipq *ipq, - struct label *ipqlabel - ); -/** - * @brief Assign a label to a new mbuf - * @param bpf_d BPF descriptor - * @param b_label Policy label for bpf_d - * @param m Object; mbuf - * @param m_label Policy label to fill in for m - * - * Set the label on the mbuf header of a newly created datagram - * generated using the passed BPF descriptor. This call is made when - * a write is performed to the BPF device associated with the passed - * BPF descriptor. - */ -typedef void mpo_mbuf_label_associate_bpfdesc_t( - struct bpf_d *bpf_d, - struct label *b_label, - struct mbuf *m, - struct label *m_label - ); -/** - * @brief Assign a label to a new mbuf - * @param ifp Interface descriptor - * @param i_label Existing label of ifp - * @param m Object; mbuf - * @param m_label Policy label to fill in for m - * - * Label an mbuf based on the interface from which it was received. - */ -typedef void mpo_mbuf_label_associate_ifnet_t( - struct ifnet *ifp, - struct label *i_label, - struct mbuf *m, - struct label *m_label - ); -/** - * @brief Assign a label to a new mbuf - * @param inp inpcb structure - * @param i_label Existing label of inp - * @param m Object; mbuf - * @param m_label Policy label to fill in for m - * - * Label an mbuf based on the inpcb from which it was derived. - */ -typedef void mpo_mbuf_label_associate_inpcb_t( - struct inpcb *inp, - struct label *i_label, - struct mbuf *m, - struct label *m_label - ); -/** - * @brief Set the label on a newly reassembled IP datagram - * @param ipq IP fragment reassembly queue - * @param ipqlabel Policy label for ipq - * @param mbuf IP datagram to be labeled - * @param mbuflabel Policy label to be filled in for mbuf - * - * Set the label on a newly reassembled IP datagram (mbuf) from the IP - * fragment reassembly queue (ipq) from which it was generated. - */ -typedef void mpo_mbuf_label_associate_ipq_t( - struct ipq *ipq, - struct label *ipqlabel, - struct mbuf *mbuf, - struct label *mbuflabel - ); -/** - * @brief Assign a label to a new mbuf - * @param ifp Subject; network interface - * @param i_label Existing label of ifp - * @param m Object; mbuf - * @param m_label Policy label to fill in for m - * - * Set the label on the mbuf header of a newly created datagram - * generated for the purposes of a link layer response for the passed - * interface. This call may be made in a number of situations, including - * for ARP or ND6 responses in the IPv4 and IPv6 stacks. - */ -typedef void mpo_mbuf_label_associate_linklayer_t( - struct ifnet *ifp, - struct label *i_label, - struct mbuf *m, - struct label *m_label - ); -/** - * @brief Assign a label to a new mbuf - * @param oldmbuf mbuf headerder for existing datagram for existing datagram - * @param oldmbuflabel Policy label for oldmbuf - * @param ifp Network interface - * @param ifplabel Policy label for ifp - * @param newmbuf mbuf header to be labeled for new datagram - * @param newmbuflabel Policy label for newmbuf - * - * Set the label on the mbuf header of a newly created datagram - * generated from the existing passed datagram when it is processed - * by the passed multicast encapsulation interface. This call is made - * when an mbuf is to be delivered using the virtual interface. - */ -typedef void mpo_mbuf_label_associate_multicast_encap_t( - struct mbuf *oldmbuf, - struct label *oldmbuflabel, - struct ifnet *ifp, - struct label *ifplabel, - struct mbuf *newmbuf, - struct label *newmbuflabel - ); -/** - * @brief Assign a label to a new mbuf - * @param oldmbuf Received datagram - * @param oldmbuflabel Policy label for oldmbuf - * @param newmbuf Newly created datagram - * @param newmbuflabel Policy label for newmbuf - * - * Set the label on the mbuf header of a newly created datagram generated - * by the IP stack in response to an existing received datagram (oldmbuf). - * This call may be made in a number of situations, including when responding - * to ICMP request datagrams. - */ -typedef void mpo_mbuf_label_associate_netlayer_t( - struct mbuf *oldmbuf, - struct label *oldmbuflabel, - struct mbuf *newmbuf, - struct label *newmbuflabel - ); -/** - * @brief Assign a label to a new mbuf - * @param so Socket to label - * @param so_label Policy label for socket - * @param m Object; mbuf - * @param m_label Policy label to fill in for m - * - * An mbuf structure is used to store network traffic in transit. - * When an application sends data to a socket or a pipe, it is wrapped - * in an mbuf first. This function sets the label on a newly created mbuf header - * based on the socket sending the data. The contents of the label should be - * suitable for performing an access check on the receiving side of the - * communication. - * - * Only labeled MBUFs will be presented to the policy via this entrypoint. - */ -typedef void mpo_mbuf_label_associate_socket_t( - socket_t so, - struct label *so_label, - struct mbuf *m, - struct label *m_label - ); -/** - * @brief Copy a mbuf label - * @param src Source label - * @param dest Destination label - * - * Copy the mbuf label information in src into dest. - * - * Only called when both source and destination mbufs have labels. - */ -typedef void mpo_mbuf_label_copy_t( - struct label *src, - struct label *dest - ); -/** - * @brief Destroy mbuf label - * @param label The label to be destroyed - * - * Destroy a mbuf label. Since the - * object is going out of scope, policy modules should free any - * internal storage associated with the label so that it may be - * destroyed. - */ -typedef void mpo_mbuf_label_destroy_t( - struct label *label - ); -/** - * @brief Initialize mbuf label - * @param label New label to initialize - * @param flag Malloc flags - * - * Initialize the label for a newly instantiated mbuf. - * - * @warning Since it is possible for the flags to be set to - * M_NOWAIT, the malloc operation may fail. - * - * @return On success, 0, otherwise, an appropriate errno return value. - */ -typedef int mpo_mbuf_label_init_t( - struct label *label, - int flag - ); /** * @brief Access control check for fsctl * @param cred Subject credential @@ -1740,7 +1166,7 @@ typedef int mpo_mount_check_fsctl_t( kauth_cred_t cred, struct mount *mp, struct label *label, - unsigned int cmd + unsigned long cmd ); /** * @brief Access control check for the retrieval of file system attributes @@ -2064,47 +1490,6 @@ typedef int mpo_mount_label_internalize_t( char *element_name, char *element_data ); -/** - * @brief Set the label on an IPv4 datagram fragment - * @param datagram Datagram being fragmented - * @param datagramlabel Policy label for datagram - * @param fragment New fragment - * @param fragmentlabel Policy label for fragment - * - * Called when an IPv4 datagram is fragmented into several smaller datagrams. - * Policies implementing mbuf labels will typically copy the label from the - * source datagram to the new fragment. - */ -typedef void mpo_netinet_fragment_t( - struct mbuf *datagram, - struct label *datagramlabel, - struct mbuf *fragment, - struct label *fragmentlabel - ); -/** - * @brief Set the label on an ICMP reply - * @param m mbuf containing the ICMP reply - * @param mlabel Policy label for m - * - * A policy may wish to update the label of an mbuf that refers to - * an ICMP packet being sent in response to an IP packet. This may - * be called in response to a bad packet or an ICMP request. - */ -typedef void mpo_netinet_icmp_reply_t( - struct mbuf *m, - struct label *mlabel - ); -/** - * @brief Set the label on a TCP reply - * @param m mbuf containing the TCP reply - * @param mlabel Policy label for m - * - * Called for outgoing TCP packets not associated with an actual socket. - */ -typedef void mpo_netinet_tcp_reply_t( - struct mbuf *m, - struct label *mlabel - ); /** * @brief Access control check for pipe ioctl * @param cred Subject credential @@ -2127,7 +1512,7 @@ typedef int mpo_pipe_check_ioctl_t( kauth_cred_t cred, struct pipe *cpipe, struct label *pipelabel, - unsigned int cmd + unsigned long cmd ); /** * @brief Access control check for pipe kqfilter @@ -2148,27 +1533,6 @@ typedef int mpo_pipe_check_kqfilter_t( struct pipe *cpipe, struct label *pipelabel ); -/** - * @brief Access control check for pipe relabel - * @param cred Subject credential - * @param cpipe Object to be accessed - * @param pipelabel The current label on the pipe - * @param newlabel The new label to be used - * - * Determine whether the subject identified by the credential can - * perform a relabel operation on the passed pipe. The cred object holds - * the credentials of the subject performing the operation. - * - * @return Return 0 if access is granted, otherwise an appropriate value for - * errno should be returned. - * - */ -typedef int mpo_pipe_check_label_update_t( - kauth_cred_t cred, - struct pipe *cpipe, - struct label *pipelabel, - struct label *newlabel - ); /** * @brief Access control check for pipe read * @param cred Subject credential @@ -2254,28 +1618,14 @@ typedef int mpo_pipe_check_write_t( * @param pipelabel Label for the pipe object * * Create a label for the pipe object being created by the supplied - * user credential. This call is made when the pipe is being created - * XXXPIPE(for one or both sides of the pipe?). - * + * user credential. This call is made when a pipe pair is being created. + * The label is shared by both ends of the pipe. */ typedef void mpo_pipe_label_associate_t( kauth_cred_t cred, struct pipe *cpipe, struct label *pipelabel ); -/** - * @brief Copy a pipe label - * @param src Source pipe label - * @param dest Destination pipe label - * - * Copy the pipe label associated with src to dest. - * XXXPIPE Describe when this is used: most likely during pipe creation to - * copy from rpipe to wpipe. - */ -typedef void mpo_pipe_label_copy_t( - struct label *src, - struct label *dest - ); /** * @brief Destroy pipe label * @param label The label to be destroyed @@ -2284,33 +1634,8 @@ typedef void mpo_pipe_label_copy_t( * policy modules should free any internal storage associated with the * label so that it may be destroyed. */ -typedef void mpo_pipe_label_destroy_t( - struct label *label - ); -/** - * @brief Externalize a pipe label - * @param label Label to be externalized - * @param element_name Name of the label namespace for which labels should be - * externalized - * @param sb String buffer to be filled with a text representation of the label - * - * Produce an external representation of the label on a pipe. - * An externalized label consists of a text representation - * of the label contents that can be used with user applications. - * Policy-agnostic user space tools will display this externalized - * version. - * - * The policy's externalize entry points will be called only if the - * policy has registered interest in the label namespace. - * - * @return 0 on success, return non-zero if an error occurs while - * externalizing the label data. - * - */ -typedef int mpo_pipe_label_externalize_t( - struct label *label, - char *element_name, - struct sbuf *sb +typedef void mpo_pipe_label_destroy_t( + struct label *label ); /** * @brief Initialize pipe label @@ -2322,51 +1647,6 @@ typedef int mpo_pipe_label_externalize_t( typedef void mpo_pipe_label_init_t( struct label *label ); -/** - * @brief Internalize a pipe label - * @param label Label to be internalized - * @param element_name Name of the label namespace for which the label should - * be internalized - * @param element_data Text data to be internalized - * - * Produce a pipe label from an external representation. An - * externalized label consists of a text representation of the label - * contents that can be used with user applications. Policy-agnostic - * user space tools will forward text version to the kernel for - * processing by individual policy modules. - * - * The policy's internalize entry points will be called only if the - * policy has registered interest in the label namespace. - * - * @return 0 on success, Otherwise, return non-zero if an error occurs - * while internalizing the label data. - * - */ -typedef int mpo_pipe_label_internalize_t( - struct label *label, - char *element_name, - char *element_data - ); -/** - * @brief Update a pipe label - * @param cred Subject credential - * @param cpipe Object to be labeled - * @param oldlabel Existing pipe label - * @param newlabel New label to replace existing label - * @see mpo_pipe_check_label_update_t - * - * The subject identified by the credential has previously requested - * and was authorized to relabel the pipe; this entry point allows - * policies to perform the actual relabel operation. Policies should - * update oldlabel using the label stored in the newlabel parameter. - * - */ -typedef void mpo_pipe_label_update_t( - kauth_cred_t cred, - struct pipe *cpipe, - struct label *oldlabel, - struct label *newlabel - ); /** * @brief Policy unload event * @param mpc MAC policy configuration @@ -2791,9 +2071,33 @@ typedef int mpo_proc_check_dump_core_t( struct proc *proc ); /** - * @brief Access control check for debugging process + * @brief Access control over remote thread creation * @param cred Subject credential * @param proc Object process + * @param flavor Flavor of thread state passed in new_state, or -1 + * @param new_state Thread state to be set on the created thread, or NULL + * @param new_state_count Size of thread state, in natural_t units, or 0 + * + * Determine whether the subject can create a thread in the object process + * by calling the thread_create or thread_create_running MIG routines on + * another process' task port. For thread_create_running, the flavor, + * new_state and new_state_count arguments are passed here before they are + * converted and checked by machine-dependent code. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ +typedef int mpo_proc_check_remote_thread_create_t( + kauth_cred_t cred, + struct proc *proc, + int flavor, + thread_state_t new_state, + mach_msg_type_number_t new_state_count + ); +/** + * @brief Access control check for debugging process + * @param cred Subject credential + * @param pident Object unique process identifier * * Determine whether the subject identified by the credential can debug * the passed process. This call may be made in a number of situations, @@ -2806,7 +2110,7 @@ typedef int mpo_proc_check_dump_core_t( */ typedef int mpo_proc_check_debug_t( kauth_cred_t cred, - struct proc *proc + struct proc_ident *pident ); /** * @brief Access control over fork @@ -2849,13 +2153,18 @@ typedef int mpo_proc_check_set_host_exception_port_t( unsigned int exception ); /** - * @brief Access control over pid_suspend and pid_resume + * @brief Access control over pid_suspend, pid_resume and family * @param cred Subject credential - * @param proc Subject process trying to run pid_suspend or pid_resume - * @param sr Call is suspend (0) or resume (1) + * @param proc Object process + * @param sr Type of call; one of MAC_PROC_CHECK_SUSPEND, + * MAC_PROC_CHECK_RESUME, MAC_PROC_CHECK_HIBERNATE, + * MAC_PROC_CHECK_SHUTDOWN_SOCKETS or MAC_PROC_CHECK_PIDBIND. * - * Determine whether the subject identified is allowed to suspend or resume - * other processes. + * Determine whether the subject identified is allowed to call pid_suspend, + * pid_resume, pid_hibernate, pid_shutdown_sockets, + * process_policy(PROC_POLICY_APP_LIFECYCLE, PROC_POLICY_APPLIFE_DEVSTATUS) or + * process_policy(PROC_POLICY_APP_LIFECYCLE, PROC_POLICY_APPLIFE_PIDBIND) on + * the object process. * * @return Return 0 if access is granted, otherwise an appropriate value for * errno should be returned. @@ -3316,51 +2625,6 @@ typedef int mpo_socket_check_create_t( int type, int protocol ); -/** - * @brief Access control check for delivering data to a user's receieve queue - * @param so The socket data is being delivered to - * @param so_label The label of so - * @param m The mbuf whose data will be deposited into the receive queue - * @param m_label The label of the sender of the data. - * - * A socket has a queue for receiving incoming data. When a packet arrives - * on the wire, it eventually gets deposited into this queue, which the - * owner of the socket drains when they read from the socket's file descriptor. - * - * This function determines whether the socket can receive data from - * the sender specified by m_label. - * - * @warning There is an outstanding design issue surrounding the placement - * of this function. The check must be placed either before or after the - * TCP sequence and ACK counters are updated. Placing the check before - * the counters are updated causes the incoming packet to be resent by - * the remote if the check rejects it. Placing the check after the counters - * are updated results in a completely silent drop. As far as each TCP stack - * is concerned the packet was received, however, the data will not be in the - * socket's receive queue. Another consideration is that the current design - * requires using the "failed label" occasionally. In that case, on rejection, - * we want the remote TCP to resend the data. Because of this, we chose to - * place this check before the counters are updated, so rejected packets will be - * resent by the remote host. - * - * If a policy keeps rejecting the same packet, eventually the connection will - * be dropped. Policies have several options if this design causes problems. - * For example, one options is to sanitize the mbuf such that it is acceptable, - * then accept it. That may require negotiation between policies as the - * Framework will not know to re-check the packet. - * - * The policy must handle NULL MBUF labels. This will likely be the case - * for non-local TCP sockets for example. - * - * @return Return 0 if access if granted, otherwise an appropriate - * value for errno should be returned. - */ -typedef int mpo_socket_check_deliver_t( - socket_t so, - struct label *so_label, - struct mbuf *m, - struct label *m_label - ); /** * @brief Access control check for socket ioctl. * @param cred Subject credential @@ -3382,47 +2646,9 @@ typedef int mpo_socket_check_deliver_t( typedef int mpo_socket_check_ioctl_t( kauth_cred_t cred, socket_t so, - unsigned int cmd, - struct label *socklabel - ); -/** - * @brief Access control check for socket kqfilter - * @param cred Subject credential - * @param kn Object knote - * @param so Object socket - * @param socklabel Policy label for socket - * - * Determine whether the subject identified by the credential can - * receive the knote on the passed socket. - * - * @return Return 0 if access if granted, otherwise an appropriate - * value for errno should be returned. - */ -typedef int mpo_socket_check_kqfilter_t( - kauth_cred_t cred, - struct knote *kn, - socket_t so, + unsigned long cmd, struct label *socklabel ); -/** - * @brief Access control check for socket relabel - * @param cred Subject credential - * @param so Object socket - * @param so_label The current label of so - * @param newlabel The label to be assigned to so - * - * Determine whether the subject identified by the credential can - * change the label on the socket. - * - * @return Return 0 if access if granted, otherwise an appropriate - * value for errno should be returned. - */ -typedef int mpo_socket_check_label_update_t( - kauth_cred_t cred, - socket_t so, - struct label *so_label, - struct label *newlabel - ); /** * @brief Access control check for socket listen * @param cred Subject credential @@ -3478,26 +2704,6 @@ typedef int mpo_socket_check_received_t( struct sockaddr *saddr ); - -/** - * @brief Access control check for socket select - * @param cred Subject credential - * @param so Object socket - * @param socklabel Policy label for socket - * @param which The operation selected on: FREAD or FWRITE - * - * Determine whether the subject identified by the credential can use the - * socket in a call to select(). - * - * @return Return 0 if access if granted, otherwise an appropriate - * value for errno should be returned. - */ -typedef int mpo_socket_check_select_t( - kauth_cred_t cred, - socket_t so, - struct label *socklabel, - int which - ); /** * @brief Access control check for socket send * @param cred Subject credential @@ -3572,246 +2778,6 @@ typedef int mpo_socket_check_getsockopt_t( struct label *socklabel, struct sockopt *sopt ); -/** - * @brief Label a socket - * @param oldsock Listening socket - * @param oldlabel Policy label associated with oldsock - * @param newsock New socket - * @param newlabel Policy label associated with newsock - * - * A new socket is created when a connection is accept(2)ed. This - * function labels the new socket based on the existing listen(2)ing - * socket. - */ -typedef void mpo_socket_label_associate_accept_t( - socket_t oldsock, - struct label *oldlabel, - socket_t newsock, - struct label *newlabel - ); -/** - * @brief Assign a label to a new socket - * @param cred Credential of the owning process - * @param so The socket being labeled - * @param solabel The label - * @warning cred can be NULL - * - * Set the label on a newly created socket from the passed subject - * credential. This call is made when a socket is created. The - * credentials may be null if the socket is being created by the - * kernel. - */ -typedef void mpo_socket_label_associate_t( - kauth_cred_t cred, - socket_t so, - struct label *solabel - ); -/** - * @brief Copy a socket label - * @param src Source label - * @param dest Destination label - * - * Copy the socket label information in src into dest. - */ -typedef void mpo_socket_label_copy_t( - struct label *src, - struct label *dest - ); -/** - * @brief Destroy socket label - * @param label The label to be destroyed - * - * Destroy a socket label. Since the object is going out of - * scope, policy modules should free any internal storage associated - * with the label so that it may be destroyed. - */ -typedef void mpo_socket_label_destroy_t( - struct label *label - ); -/** - * @brief Externalize a socket label - * @param label Label to be externalized - * @param element_name Name of the label namespace for which labels should be - * externalized - * @param sb String buffer to be filled with a text representation of label - * - * Produce an externalized socket label based on the label structure passed. - * An externalized label consists of a text representation of the label - * contents that can be used with userland applications and read by the - * user. If element_name does not match a namespace managed by the policy, - * simply return 0. Only return nonzero if an error occurs while externalizing - * the label data. - * - * @return In the event of an error, an appropriate value for errno - * should be returned, otherwise return 0 upon success. - */ -typedef int mpo_socket_label_externalize_t( - struct label *label, - char *element_name, - struct sbuf *sb - ); -/** - * @brief Initialize socket label - * @param label New label to initialize - * @param waitok Malloc flags - * - * Initialize the label of a newly instantiated socket. The waitok - * field may be one of M_WAITOK and M_NOWAIT, and should be employed to - * avoid performing a sleeping malloc(9) during this initialization - * call. It it not always safe to sleep during this entry point. - * - * @warning Since it is possible for the waitok flags to be set to - * M_NOWAIT, the malloc operation may fail. - * - * @return In the event of an error, an appropriate value for errno - * should be returned, otherwise return 0 upon success. - */ -typedef int mpo_socket_label_init_t( - struct label *label, - int waitok - ); -/** - * @brief Internalize a socket label - * @param label Label to be filled in - * @param element_name Name of the label namespace for which the label should - * be internalized - * @param element_data Text data to be internalized - * - * Produce an internal socket label structure based on externalized label - * data in text format. - * - * The policy's internalize entry points will be called only if the - * policy has registered interest in the label namespace. - * - * @return In the event of an error, an appropriate value for errno - * should be returned, otherwise return 0 upon success. - */ -typedef int mpo_socket_label_internalize_t( - struct label *label, - char *element_name, - char *element_data - ); -/** - * @brief Relabel socket - * @param cred Subject credential - * @param so Object; socket - * @param so_label Current label of the socket - * @param newlabel The label to be assigned to so - * - * The subject identified by the credential has previously requested - * and was authorized to relabel the socket; this entry point allows - * policies to perform the actual label update operation. - * - * @warning XXX This entry point will likely change in future versions. - */ -typedef void mpo_socket_label_update_t( - kauth_cred_t cred, - socket_t so, - struct label *so_label, - struct label *newlabel - ); -/** - * @brief Set the peer label on a socket from mbuf - * @param m Mbuf chain received on socket so - * @param m_label Label for m - * @param so Current label for the socket - * @param so_label Policy label to be filled out for the socket - * - * Set the peer label of a socket based on the label of the sender of the - * mbuf. - * - * This is called for every TCP/IP packet received. The first call for a given - * socket operates on a newly initialized label, and subsequent calls operate - * on existing label data. - * - * @warning Because this can affect performance significantly, it has - * different sematics than other 'set' operations. Typically, 'set' operations - * operate on newly initialzed labels and policies do not need to worry about - * clobbering existing values. In this case, it is too inefficient to - * initialize and destroy a label every time data is received for the socket. - * Instead, it is up to the policies to determine how to replace the label data. - * Most policies should be able to replace the data inline. - */ -typedef void mpo_socketpeer_label_associate_mbuf_t( - struct mbuf *m, - struct label *m_label, - socket_t so, - struct label *so_label - ); -/** - * @brief Set the peer label on a socket from socket - * @param source Local socket - * @param sourcelabel Policy label for source - * @param target Peer socket - * @param targetlabel Policy label to fill in for target - * - * Set the peer label on a stream UNIX domain socket from the passed - * remote socket endpoint. This call will be made when the socket pair - * is connected, and will be made for both endpoints. - * - * Note that this call is only made on connection; it is currently not updated - * during communication. - */ -typedef void mpo_socketpeer_label_associate_socket_t( - socket_t source, - struct label *sourcelabel, - socket_t target, - struct label *targetlabel - ); -/** - * @brief Destroy socket peer label - * @param label The peer label to be destroyed - * - * Destroy a socket peer label. Since the object is going out of - * scope, policy modules should free any internal storage associated - * with the label so that it may be destroyed. - */ -typedef void mpo_socketpeer_label_destroy_t( - struct label *label - ); -/** - * @brief Externalize a socket peer label - * @param label Label to be externalized - * @param element_name Name of the label namespace for which labels should be - * externalized - * @param sb String buffer to be filled with a text representation of label - * - * Produce an externalized socket peer label based on the label structure - * passed. An externalized label consists of a text representation of the - * label contents that can be used with userland applications and read by the - * user. If element_name does not match a namespace managed by the policy, - * simply return 0. Only return nonzero if an error occurs while externalizing - * the label data. - * - * @return In the event of an error, an appropriate value for errno - * should be returned, otherwise return 0 upon success. - */ -typedef int mpo_socketpeer_label_externalize_t( - struct label *label, - char *element_name, - struct sbuf *sb - ); -/** - * @brief Initialize socket peer label - * @param label New label to initialize - * @param waitok Malloc flags - * - * Initialize the peer label of a newly instantiated socket. The - * waitok field may be one of M_WAITOK and M_NOWAIT, and should be - * employed to avoid performing a sleeping malloc(9) during this - * initialization call. It it not always safe to sleep during this - * entry point. - * - * @warning Since it is possible for the waitok flags to be set to - * M_NOWAIT, the malloc operation may fail. - * - * @return In the event of an error, an appropriate value for errno - * should be returned, otherwise return 0 upon success. - */ -typedef int mpo_socketpeer_label_init_t( - struct label *label, - int waitok - ); /** * @brief Access control check for enabling accounting * @param cred Subject credential @@ -4015,7 +2981,7 @@ typedef int mpo_system_check_sysctlbyname_t( kauth_cred_t cred, const char *namestring, int *name, - u_int namelen, + size_t namelen, user_addr_t old, /* NULLOK */ size_t oldlen, user_addr_t newvalue, /* NULLOK */ @@ -4496,7 +3462,7 @@ typedef void mpo_sysvshm_label_recycle_t( /** * @brief Access control check for getting a process's task name * @param cred Subject credential - * @param p Object process + * @param pident Object unique process identifier * * Determine whether the subject identified by the credential can get * the passed process's task name port. @@ -4508,12 +3474,12 @@ typedef void mpo_sysvshm_label_recycle_t( */ typedef int mpo_proc_check_get_task_name_t( kauth_cred_t cred, - struct proc *p + struct proc_ident *pident ); /** * @brief Access control check for getting a process's task port * @param cred Subject credential - * @param p Object process + * @param pident Object unique process identifier * * Determine whether the subject identified by the credential can get * the passed process's task control port. @@ -4525,13 +3491,13 @@ typedef int mpo_proc_check_get_task_name_t( */ typedef int mpo_proc_check_get_task_t( kauth_cred_t cred, - struct proc *p + struct proc_ident *pident ); /** * @brief Access control check for exposing a process's task port * @param cred Subject credential - * @param p Object process + * @param pident Object unique process identifier * * Determine whether the subject identified by the credential can expose * the passed process's task control port. @@ -4544,7 +3510,7 @@ typedef int mpo_proc_check_get_task_t( */ typedef int mpo_proc_check_expose_task_t( kauth_cred_t cred, - struct proc *p + struct proc_ident *pident ); /** @@ -4582,6 +3548,26 @@ typedef int mpo_proc_check_run_cs_invalid_t( struct proc *p ); +/** + * @brief Notification a process was invalidated + * @param p Object process + * + * Notifies that the CS_VALID bit was removed from a process' csflags. This + * either indicates that a validly code-signed process has encountered an + * invalidly code-signed page for the first time, or that it was explicitly + * marked invalid via a csops(CS_OPS_MARKINVALID) syscall. + * + * @warning This hook can be called from the page fault handler; it should not + * perform any operations that may result in paging, and stack space is extremely + * limited. Furthermore, the hook is called with proc lock held, and if called + * from the fault handler, with vm object lock held. Consumers reacting to this + * hook being called are expected to defer processing to a userret, possibly + * after suspending the task. + */ +typedef void mpo_proc_notify_cs_invalidated_t( + struct proc *p + ); + /** * @brief Notification a process is finished with exec and will jump to userspace * @param p Object process @@ -4910,7 +3896,7 @@ typedef int mpo_vnode_check_ioctl_t( kauth_cred_t cred, struct vnode *vp, struct label *label, - unsigned int cmd + unsigned long cmd ); /** * @brief Access control check for vnode kqfilter @@ -5441,6 +4427,7 @@ typedef int mpo_vnode_check_setutimes_t( * @param cs_flags update code signing flags if needed * @param signer_type output parameter for the code signature's signer type * @param flags operational flag to mpo_vnode_check_signature + * @param platform platform of the signature being checked * @param fatal_failure_desc description of fatal failure * @param fatal_failure_desc_len failure description len, failure is fatal if non-0 * @@ -5455,8 +4442,31 @@ typedef int mpo_vnode_check_signature_t( unsigned int *cs_flags, unsigned int *signer_type, int flags, + unsigned int platform, char **fatal_failure_desc, size_t *fatal_failure_desc_len ); + +/** + * @brief Access control check for supplemental signature attachement + * @param vp the vnode to which the signature will be attached + * @param label label associated with the vnode + * @param cs_blob the code signature to check + * @param linked_vp vnode to which this new vp is related + * @param linked_cs_blob the code signature of the linked vnode + * @param signer_type output parameter for the signer type of the code signature being checked. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ +typedef int mpo_vnode_check_supplemental_signature_t( + struct vnode *vp, + struct label *label, + struct cs_blob *cs_blob, + struct vnode *linked_vp, + struct cs_blob *linked_cs_blob, + unsigned int *signer_type + ); + /** * @brief Access control check for stat * @param active_cred Subject credential @@ -6311,6 +5321,16 @@ typedef int mpo_kext_check_query_t( kauth_cred_t cred ); +/** + * @brief Inform MAC policies that a vnode is being reclaimed + * @param vp Object vnode + * + * Any external accounting tracking this vnode must consider it to be no longer valid. + */ +typedef void mpo_vnode_notify_reclaim_t( + struct vnode *vp + ); + /* * Placeholder for future events that may need mac hooks. */ @@ -6322,15 +5342,15 @@ typedef void mpo_reserved_hook_t(void); * Please note that this should be kept in sync with the check assumptions * policy in bsd/kern/policy_check.c (policy_ops struct). */ -#define MAC_POLICY_OPS_VERSION 62 /* inc when new reserved slots are taken */ +#define MAC_POLICY_OPS_VERSION 69 /* inc when new reserved slots are taken */ struct mac_policy_ops { mpo_audit_check_postselect_t *mpo_audit_check_postselect; mpo_audit_check_preselect_t *mpo_audit_check_preselect; - mpo_bpfdesc_label_associate_t *mpo_bpfdesc_label_associate; - mpo_bpfdesc_label_destroy_t *mpo_bpfdesc_label_destroy; - mpo_bpfdesc_label_init_t *mpo_bpfdesc_label_init; - mpo_bpfdesc_check_receive_t *mpo_bpfdesc_check_receive; + mpo_reserved_hook_t *mpo_reserved01; + mpo_reserved_hook_t *mpo_reserved02; + mpo_reserved_hook_t *mpo_reserved03; + mpo_reserved_hook_t *mpo_reserved04; mpo_cred_check_label_update_execve_t *mpo_cred_check_label_update_execve; mpo_cred_check_label_update_t *mpo_cred_check_label_update; @@ -6370,32 +5390,29 @@ struct mac_policy_ops { mpo_file_label_init_t *mpo_file_label_init; mpo_file_label_destroy_t *mpo_file_label_destroy; mpo_file_label_associate_t *mpo_file_label_associate; + mpo_file_notify_close_t *mpo_file_notify_close; - mpo_ifnet_check_label_update_t *mpo_ifnet_check_label_update; - mpo_ifnet_check_transmit_t *mpo_ifnet_check_transmit; - mpo_ifnet_label_associate_t *mpo_ifnet_label_associate; - mpo_ifnet_label_copy_t *mpo_ifnet_label_copy; - mpo_ifnet_label_destroy_t *mpo_ifnet_label_destroy; - mpo_ifnet_label_externalize_t *mpo_ifnet_label_externalize; - mpo_ifnet_label_init_t *mpo_ifnet_label_init; - mpo_ifnet_label_internalize_t *mpo_ifnet_label_internalize; - mpo_ifnet_label_update_t *mpo_ifnet_label_update; - mpo_ifnet_label_recycle_t *mpo_ifnet_label_recycle; - - mpo_inpcb_check_deliver_t *mpo_inpcb_check_deliver; - mpo_inpcb_label_associate_t *mpo_inpcb_label_associate; - mpo_inpcb_label_destroy_t *mpo_inpcb_label_destroy; - mpo_inpcb_label_init_t *mpo_inpcb_label_init; - mpo_inpcb_label_recycle_t *mpo_inpcb_label_recycle; - mpo_inpcb_label_update_t *mpo_inpcb_label_update; - - mpo_iokit_check_device_t *mpo_iokit_check_device; - - mpo_ipq_label_associate_t *mpo_ipq_label_associate; - mpo_ipq_label_compare_t *mpo_ipq_label_compare; - mpo_ipq_label_destroy_t *mpo_ipq_label_destroy; - mpo_ipq_label_init_t *mpo_ipq_label_init; - mpo_ipq_label_update_t *mpo_ipq_label_update; + mpo_reserved_hook_t *mpo_reserved06; + mpo_reserved_hook_t *mpo_reserved07; + mpo_reserved_hook_t *mpo_reserved08; + mpo_reserved_hook_t *mpo_reserved09; + mpo_reserved_hook_t *mpo_reserved10; + mpo_reserved_hook_t *mpo_reserved11; + mpo_reserved_hook_t *mpo_reserved12; + mpo_reserved_hook_t *mpo_reserved13; + mpo_reserved_hook_t *mpo_reserved14; + mpo_reserved_hook_t *mpo_reserved15; + mpo_reserved_hook_t *mpo_reserved16; + mpo_reserved_hook_t *mpo_reserved17; + mpo_reserved_hook_t *mpo_reserved18; + mpo_reserved_hook_t *mpo_reserved19; + mpo_reserved_hook_t *mpo_reserved20; + mpo_reserved_hook_t *mpo_reserved21; + mpo_reserved_hook_t *mpo_reserved22; + mpo_reserved_hook_t *mpo_reserved23; + mpo_reserved_hook_t *mpo_reserved24; + mpo_reserved_hook_t *mpo_reserved25; + mpo_reserved_hook_t *mpo_reserved26; mpo_file_check_library_validation_t *mpo_file_check_library_validation; mpo_vnode_notify_setacl_t *mpo_vnode_notify_setacl; @@ -6407,17 +5424,17 @@ struct mac_policy_ops { mpo_vnode_notify_setutimes_t *mpo_vnode_notify_setutimes; mpo_vnode_notify_truncate_t *mpo_vnode_notify_truncate; - mpo_mbuf_label_associate_bpfdesc_t *mpo_mbuf_label_associate_bpfdesc; - mpo_mbuf_label_associate_ifnet_t *mpo_mbuf_label_associate_ifnet; - mpo_mbuf_label_associate_inpcb_t *mpo_mbuf_label_associate_inpcb; - mpo_mbuf_label_associate_ipq_t *mpo_mbuf_label_associate_ipq; - mpo_mbuf_label_associate_linklayer_t *mpo_mbuf_label_associate_linklayer; - mpo_mbuf_label_associate_multicast_encap_t *mpo_mbuf_label_associate_multicast_encap; - mpo_mbuf_label_associate_netlayer_t *mpo_mbuf_label_associate_netlayer; - mpo_mbuf_label_associate_socket_t *mpo_mbuf_label_associate_socket; - mpo_mbuf_label_copy_t *mpo_mbuf_label_copy; - mpo_mbuf_label_destroy_t *mpo_mbuf_label_destroy; - mpo_mbuf_label_init_t *mpo_mbuf_label_init; + mpo_reserved_hook_t *mpo_reserved27; + mpo_reserved_hook_t *mpo_reserved28; + mpo_reserved_hook_t *mpo_reserved29; + mpo_reserved_hook_t *mpo_reserved30; + mpo_reserved_hook_t *mpo_reserved31; + mpo_reserved_hook_t *mpo_reserved32; + mpo_reserved_hook_t *mpo_reserved33; + mpo_reserved_hook_t *mpo_reserved34; + mpo_reserved_hook_t *mpo_reserved35; + mpo_reserved_hook_t *mpo_reserved36; + mpo_reserved_hook_t *mpo_reserved37; mpo_mount_check_fsctl_t *mpo_mount_check_fsctl; mpo_mount_check_getattr_t *mpo_mount_check_getattr; @@ -6433,24 +5450,24 @@ struct mac_policy_ops { mpo_mount_label_init_t *mpo_mount_label_init; mpo_mount_label_internalize_t *mpo_mount_label_internalize; - mpo_netinet_fragment_t *mpo_netinet_fragment; - mpo_netinet_icmp_reply_t *mpo_netinet_icmp_reply; - mpo_netinet_tcp_reply_t *mpo_netinet_tcp_reply; + mpo_reserved_hook_t *mpo_reserved38; + mpo_reserved_hook_t *mpo_reserved39; + mpo_reserved_hook_t *mpo_reserved40; mpo_pipe_check_ioctl_t *mpo_pipe_check_ioctl; mpo_pipe_check_kqfilter_t *mpo_pipe_check_kqfilter; - mpo_pipe_check_label_update_t *mpo_pipe_check_label_update; + mpo_reserved_hook_t *mpo_reserved41; mpo_pipe_check_read_t *mpo_pipe_check_read; mpo_pipe_check_select_t *mpo_pipe_check_select; mpo_pipe_check_stat_t *mpo_pipe_check_stat; mpo_pipe_check_write_t *mpo_pipe_check_write; mpo_pipe_label_associate_t *mpo_pipe_label_associate; - mpo_pipe_label_copy_t *mpo_pipe_label_copy; + mpo_reserved_hook_t *mpo_reserved42; mpo_pipe_label_destroy_t *mpo_pipe_label_destroy; - mpo_pipe_label_externalize_t *mpo_pipe_label_externalize; + mpo_reserved_hook_t *mpo_reserved43; mpo_pipe_label_init_t *mpo_pipe_label_init; - mpo_pipe_label_internalize_t *mpo_pipe_label_internalize; - mpo_pipe_label_update_t *mpo_pipe_label_update; + mpo_reserved_hook_t *mpo_reserved44; + mpo_reserved_hook_t *mpo_reserved45; mpo_policy_destroy_t *mpo_policy_destroy; mpo_policy_init_t *mpo_policy_init; @@ -6462,7 +5479,7 @@ struct mac_policy_ops { mpo_vnode_check_rename_t *mpo_vnode_check_rename; mpo_kext_check_query_t *mpo_kext_check_query; mpo_proc_notify_exec_complete_t *mpo_proc_notify_exec_complete; - mpo_reserved_hook_t *mpo_reserved4; + mpo_proc_notify_cs_invalidated_t *mpo_proc_notify_cs_invalidated; mpo_proc_check_syscall_unix_t *mpo_proc_check_syscall_unix; mpo_proc_check_expose_task_t *mpo_proc_check_expose_task; mpo_proc_check_set_host_special_port_t *mpo_proc_check_set_host_special_port; @@ -6477,7 +5494,7 @@ struct mac_policy_ops { mpo_vnode_check_trigger_resolve_t *mpo_vnode_check_trigger_resolve; mpo_mount_check_mount_late_t *mpo_mount_check_mount_late; mpo_mount_check_snapshot_mount_t *mpo_mount_check_snapshot_mount; - mpo_reserved_hook_t *mpo_reserved2; + mpo_vnode_notify_reclaim_t *mpo_vnode_notify_reclaim; mpo_skywalk_flow_check_connect_t *mpo_skywalk_flow_check_connect; mpo_skywalk_flow_check_listen_t *mpo_skywalk_flow_check_listen; @@ -6514,38 +5531,38 @@ struct mac_policy_ops { mpo_proc_check_signal_t *mpo_proc_check_signal; mpo_proc_check_wait_t *mpo_proc_check_wait; mpo_proc_check_dump_core_t *mpo_proc_check_dump_core; - mpo_reserved_hook_t *mpo_reserved5; + mpo_proc_check_remote_thread_create_t *mpo_proc_check_remote_thread_create; mpo_socket_check_accept_t *mpo_socket_check_accept; mpo_socket_check_accepted_t *mpo_socket_check_accepted; mpo_socket_check_bind_t *mpo_socket_check_bind; mpo_socket_check_connect_t *mpo_socket_check_connect; mpo_socket_check_create_t *mpo_socket_check_create; - mpo_socket_check_deliver_t *mpo_socket_check_deliver; - mpo_socket_check_kqfilter_t *mpo_socket_check_kqfilter; - mpo_socket_check_label_update_t *mpo_socket_check_label_update; + mpo_reserved_hook_t *mpo_reserved46; + mpo_reserved_hook_t *mpo_reserved47; + mpo_reserved_hook_t *mpo_reserved48; mpo_socket_check_listen_t *mpo_socket_check_listen; mpo_socket_check_receive_t *mpo_socket_check_receive; mpo_socket_check_received_t *mpo_socket_check_received; - mpo_socket_check_select_t *mpo_socket_check_select; + mpo_reserved_hook_t *mpo_reserved49; mpo_socket_check_send_t *mpo_socket_check_send; mpo_socket_check_stat_t *mpo_socket_check_stat; mpo_socket_check_setsockopt_t *mpo_socket_check_setsockopt; mpo_socket_check_getsockopt_t *mpo_socket_check_getsockopt; - mpo_socket_label_associate_accept_t *mpo_socket_label_associate_accept; - mpo_socket_label_associate_t *mpo_socket_label_associate; - mpo_socket_label_copy_t *mpo_socket_label_copy; - mpo_socket_label_destroy_t *mpo_socket_label_destroy; - mpo_socket_label_externalize_t *mpo_socket_label_externalize; - mpo_socket_label_init_t *mpo_socket_label_init; - mpo_socket_label_internalize_t *mpo_socket_label_internalize; - mpo_socket_label_update_t *mpo_socket_label_update; - mpo_socketpeer_label_associate_mbuf_t *mpo_socketpeer_label_associate_mbuf; - mpo_socketpeer_label_associate_socket_t *mpo_socketpeer_label_associate_socket; - mpo_socketpeer_label_destroy_t *mpo_socketpeer_label_destroy; - mpo_socketpeer_label_externalize_t *mpo_socketpeer_label_externalize; - mpo_socketpeer_label_init_t *mpo_socketpeer_label_init; + mpo_reserved_hook_t *mpo_reserved50; + mpo_reserved_hook_t *mpo_reserved51; + mpo_reserved_hook_t *mpo_reserved52; + mpo_reserved_hook_t *mpo_reserved53; + mpo_reserved_hook_t *mpo_reserved54; + mpo_reserved_hook_t *mpo_reserved55; + mpo_reserved_hook_t *mpo_reserved56; + mpo_reserved_hook_t *mpo_reserved57; + mpo_reserved_hook_t *mpo_reserved58; + mpo_reserved_hook_t *mpo_reserved59; + mpo_reserved_hook_t *mpo_reserved60; + mpo_reserved_hook_t *mpo_reserved61; + mpo_reserved_hook_t *mpo_reserved62; mpo_system_check_acct_t *mpo_system_check_acct; mpo_system_check_audit_t *mpo_system_check_audit; @@ -6664,7 +5681,7 @@ struct mac_policy_ops { mpo_iokit_check_set_properties_t *mpo_iokit_check_set_properties; - mpo_reserved_hook_t *mpo_reserved3; + mpo_vnode_check_supplemental_signature_t *mpo_vnode_check_supplemental_signature; mpo_vnode_check_searchfs_t *mpo_vnode_check_searchfs; @@ -6795,7 +5812,9 @@ int mac_vnop_removexattr(struct vnode *, const char *); * * Caller must hold an iocount on the vnode represented by the fileglob. */ +#ifdef KERNEL_PRIVATE int mac_file_setxattr(struct fileglob *fg, const char *name, char *buf, size_t len); +#endif /** * @brief Get an extended attribute from a vnode-based fileglob. @@ -6809,8 +5828,10 @@ int mac_file_setxattr(struct fileglob *fg, const char *name, char *buf, size * * Caller must hold an iocount on the vnode represented by the fileglob. */ +#ifdef KERNEL_PRIVATE int mac_file_getxattr(struct fileglob *fg, const char *name, char *buf, size_t len, size_t *attrlen); +#endif /** * @brief Remove an extended attribute from a vnode-based fileglob. @@ -6821,8 +5842,9 @@ int mac_file_getxattr(struct fileglob *fg, const char *name, char *buf, size * * Caller must hold an iocount on the vnode represented by the fileglob. */ +#ifdef KERNEL_PRIVATE int mac_file_removexattr(struct fileglob *fg, const char *name); - +#endif /* * Arbitrary limit on how much data will be logged by the audit @@ -6954,10 +5976,12 @@ int mac_file_removexattr(struct fileglob *fg, const char *name); * Typically, policies wrap this in their own accessor macro that casts an * intptr_t to a policy-specific data type. */ +#ifdef KERNEL_PRIVATE intptr_t mac_label_get(struct label *l, int slot); void mac_label_set(struct label *l, int slot, intptr_t v); intptr_t mac_vnode_label_get(struct vnode *vp, int slot, intptr_t sentinel); void mac_vnode_label_set(struct vnode *vp, int slot, intptr_t v); +#endif #define mac_get_mpc(h) (mac_policy_list.entries[h].mpc) diff --git a/security/mac_process.c b/security/mac_process.c index 31d539af2..3bcb1cba5 100644 --- a/security/mac_process.c +++ b/security/mac_process.c @@ -305,10 +305,11 @@ mac_cred_check_visible(kauth_cred_t u1, kauth_cred_t u2) } int -mac_proc_check_debug(proc_t curp, struct proc *proc) +mac_proc_check_debug(proc_ident_t tracing_ident, kauth_cred_t tracing_cred, proc_ident_t traced_ident) { - kauth_cred_t cred; int error; + bool enforce; + proc_t tracingp; #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ @@ -316,13 +317,20 @@ mac_proc_check_debug(proc_t curp, struct proc *proc) return 0; } #endif - if (!mac_proc_check_enforce(curp)) { - return 0; + /* + * Once all mac hooks adopt proc_ident_t, finding proc_t and releasing + * it below should go to mac_proc_check_enforce(). + */ + if ((tracingp = proc_find_ident(tracing_ident)) == PROC_NULL) { + return ESRCH; } + enforce = mac_proc_check_enforce(tracingp); + proc_rele(tracingp); - cred = kauth_cred_proc_ref(curp); - MAC_CHECK(proc_check_debug, cred, proc); - kauth_cred_unref(&cred); + if (!enforce) { + return 0; + } + MAC_CHECK(proc_check_debug, tracing_cred, traced_ident); return error; } @@ -347,6 +355,37 @@ mac_proc_check_dump_core(struct proc *proc) return error; } +int +mac_proc_check_remote_thread_create(struct task *task, int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count) +{ + proc_t curp = current_proc(); + proc_t proc; + kauth_cred_t cred; + int error; + +#if SECURITY_MAC_CHECK_ENFORCE + /* 21167099 - only check if we allow write */ + if (!mac_proc_enforce) { + return 0; + } +#endif + if (!mac_proc_check_enforce(curp)) { + return 0; + } + + proc = proc_find(task_pid(task)); + if (proc == PROC_NULL) { + return ESRCH; + } + + cred = kauth_cred_proc_ref(curp); + MAC_CHECK(proc_check_remote_thread_create, cred, proc, flavor, new_state, new_state_count); + kauth_cred_unref(&cred); + proc_rele(proc); + + return error; +} + int mac_proc_check_fork(proc_t curp) { @@ -371,31 +410,31 @@ mac_proc_check_fork(proc_t curp) } int -mac_proc_check_get_task_name(struct ucred *cred, struct proc *p) +mac_proc_check_get_task_name(struct ucred *cred, proc_ident_t pident) { int error; - MAC_CHECK(proc_check_get_task_name, cred, p); + MAC_CHECK(proc_check_get_task_name, cred, pident); return error; } int -mac_proc_check_get_task(struct ucred *cred, struct proc *p) +mac_proc_check_get_task(struct ucred *cred, proc_ident_t pident) { int error; - MAC_CHECK(proc_check_get_task, cred, p); + MAC_CHECK(proc_check_get_task, cred, pident); return error; } int -mac_proc_check_expose_task(struct ucred *cred, struct proc *p) +mac_proc_check_expose_task(struct ucred *cred, proc_ident_t pident) { int error; - MAC_CHECK(proc_check_expose_task, cred, p); + MAC_CHECK(proc_check_expose_task, cred, pident); return error; } @@ -480,6 +519,12 @@ mac_proc_check_run_cs_invalid(proc_t proc) return error; } +void +mac_proc_notify_cs_invalidated(proc_t proc) +{ + MAC_PERFORM(proc_notify_cs_invalidated, proc); +} + int mac_proc_check_sched(proc_t curp, struct proc *proc) { @@ -576,7 +621,7 @@ mac_proc_notify_exit(struct proc *proc) } int -mac_proc_check_suspend_resume(proc_t curp, int sr) +mac_proc_check_suspend_resume(proc_t proc, int sr) { kauth_cred_t cred; int error; @@ -587,12 +632,12 @@ mac_proc_check_suspend_resume(proc_t curp, int sr) return 0; } #endif - if (!mac_proc_check_enforce(curp)) { + if (!mac_proc_check_enforce(current_proc())) { return 0; } - cred = kauth_cred_proc_ref(curp); - MAC_CHECK(proc_check_suspend_resume, cred, curp, sr); + cred = kauth_cred_proc_ref(current_proc()); + MAC_CHECK(proc_check_suspend_resume, cred, proc, sr); kauth_cred_unref(&cred); return error; diff --git a/security/mac_socket.c b/security/mac_socket.c index 925e8f23c..45fce6951 100644 --- a/security/mac_socket.c +++ b/security/mac_socket.c @@ -84,405 +84,6 @@ #include -#if CONFIG_MACF_SOCKET -struct label * -mac_socket_label_alloc(int flag) -{ - struct label *label; - int error; - - label = mac_labelzone_alloc(flag); - if (label == NULL) { - return NULL; - } - - MAC_CHECK(socket_label_init, label, flag); - if (error) { - MAC_PERFORM(socket_label_destroy, label); - mac_labelzone_free(label); - return NULL; - } - - return label; -} - -static struct label * -mac_socket_peer_label_alloc(int flag) -{ - struct label *label; - int error; - - label = mac_labelzone_alloc(flag); - if (label == NULL) { - return NULL; - } - - MAC_CHECK(socketpeer_label_init, label, flag); - if (error) { - MAC_PERFORM(socketpeer_label_destroy, label); - mac_labelzone_free(label); - return NULL; - } - - return label; -} - -int -mac_socket_label_init(struct socket *so, int flag) -{ - so->so_label = mac_socket_label_alloc(flag); - if (so->so_label == NULL) { - return ENOMEM; - } - so->so_peerlabel = mac_socket_peer_label_alloc(flag); - if (so->so_peerlabel == NULL) { - mac_socket_label_free(so->so_label); - so->so_label = NULL; - return ENOMEM; - } - return 0; -} - -void -mac_socket_label_free(struct label *label) -{ - MAC_PERFORM(socket_label_destroy, label); - mac_labelzone_free(label); -} - -static void -mac_socket_peer_label_free(struct label *label) -{ - MAC_PERFORM(socketpeer_label_destroy, label); - mac_labelzone_free(label); -} - -void -mac_socket_label_destroy(struct socket *so) -{ - if (so->so_label != NULL) { - mac_socket_label_free(so->so_label); - so->so_label = NULL; - } - if (so->so_peerlabel != NULL) { - mac_socket_peer_label_free(so->so_peerlabel); - so->so_peerlabel = NULL; - } -} - -void -mac_socket_label_copy(struct label *src, struct label *dest) -{ - MAC_PERFORM(socket_label_copy, src, dest); -} - -int -mac_socket_label_externalize(struct label *label, char *elements, - char *outbuf, size_t outbuflen) -{ - int error; - - error = MAC_EXTERNALIZE(socket, label, elements, outbuf, outbuflen); - - return error; -} - -static int -mac_socketpeer_label_externalize(struct label *label, char *elements, - char *outbuf, size_t outbuflen) -{ - int error; - - error = MAC_EXTERNALIZE(socketpeer, label, elements, outbuf, outbuflen); - - return error; -} - -int -mac_socket_label_internalize(struct label *label, char *string) -{ - int error; - - error = MAC_INTERNALIZE(socket, label, string); - - return error; -} - -void -mac_socket_label_associate(struct ucred *cred, struct socket *so) -{ -#if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) { - return; - } -#endif - - MAC_PERFORM(socket_label_associate, cred, - (socket_t)so, so->so_label); -} - -void -mac_socket_label_associate_accept(struct socket *oldsocket, - struct socket *newsocket) -{ -#if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) { - return; - } -#endif - - MAC_PERFORM(socket_label_associate_accept, - (socket_t)oldsocket, oldsocket->so_label, - (socket_t)newsocket, newsocket->so_label); -} - -#if CONFIG_MACF_SOCKET && CONFIG_MACF_NET -void -mac_socketpeer_label_associate_mbuf(struct mbuf *mbuf, struct socket *so) -{ - struct label *label; - -#if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce && !mac_net_enforce) { - return; - } -#endif - - label = mac_mbuf_to_label(mbuf); - - /* Policy must deal with NULL label (unlabeled mbufs) */ - MAC_PERFORM(socketpeer_label_associate_mbuf, mbuf, label, - (socket_t)so, so->so_peerlabel); -} -#else -void -mac_socketpeer_label_associate_mbuf(__unused struct mbuf *mbuf, - __unused struct socket *so) -{ - return; -} -#endif - -void -mac_socketpeer_label_associate_socket(struct socket *oldsocket, - struct socket *newsocket) -{ -#if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) { - return; - } -#endif - - MAC_PERFORM(socketpeer_label_associate_socket, - (socket_t)oldsocket, oldsocket->so_label, - (socket_t)newsocket, newsocket->so_peerlabel); -} - -int -mac_socket_check_kqfilter(kauth_cred_t cred, struct knote *kn, - struct socket *so) -{ - int error; - -#if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) { - return 0; - } -#endif - - MAC_CHECK(socket_check_kqfilter, cred, kn, - (socket_t)so, so->so_label); - return error; -} - -static int -int -mac_socket_check_select(kauth_cred_t cred, struct socket *so, int which) -{ - int error; - -#if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) { - return 0; - } -#endif - - MAC_CHECK(socket_check_select, cred, - (socket_t)so, so->so_label, which); - return error; -} - -mac_socket_check_label_update(kauth_cred_t cred, struct socket *so, - struct label *newlabel) -{ - int error; - -#if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) { - return 0; - } -#endif - - MAC_CHECK(socket_check_label_update, cred, - (socket_t)so, so->so_label, - newlabel); - return error; -} - -int -mac_socket_label_update(kauth_cred_t cred, struct socket *so, struct label *label) -{ - int error; -#if 0 -#if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) { - return 0; - } -#endif -#endif - error = mac_socket_check_label_update(cred, so, label); - if (error) { - return error; - } - - MAC_PERFORM(socket_label_update, cred, - (socket_t)so, so->so_label, label); - -#if CONFIG_MACF_NET - /* - * If the protocol has expressed interest in socket layer changes, - * such as if it needs to propagate changes to a cached pcb - * label from the socket, notify it of the label change while - * holding the socket lock. - * XXXMAC - are there cases when we should not do this? - */ - mac_inpcb_label_update(so); -#endif - return 0; -} - -int -mac_setsockopt_label(kauth_cred_t cred, struct socket *so, struct mac *mac) -{ - struct label *intlabel; - char *buffer; - int error; - size_t len; - - error = mac_check_structmac_consistent(mac); - if (error) { - return error; - } - - MALLOC(buffer, char *, mac->m_buflen, M_MACTEMP, M_WAITOK); - error = copyinstr(CAST_USER_ADDR_T(mac->m_string), buffer, - mac->m_buflen, &len); - if (error) { - FREE(buffer, M_MACTEMP); - return error; - } - - intlabel = mac_socket_label_alloc(MAC_WAITOK); - error = mac_socket_label_internalize(intlabel, buffer); - FREE(buffer, M_MACTEMP); - if (error) { - goto out; - } - - error = mac_socket_label_update(cred, so, intlabel); -out: - mac_socket_label_free(intlabel); - return error; -} - -int -mac_socket_label_get(__unused kauth_cred_t cred, struct socket *so, - struct mac *mac) -{ - char *buffer, *elements; - struct label *intlabel; - int error; - size_t len; - - error = mac_check_structmac_consistent(mac); - if (error) { - return error; - } - - MALLOC(elements, char *, mac->m_buflen, M_MACTEMP, M_WAITOK); - error = copyinstr(CAST_USER_ADDR_T(mac->m_string), elements, - mac->m_buflen, &len); - if (error) { - FREE(elements, M_MACTEMP); - return error; - } - - MALLOC(buffer, char *, mac->m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); - intlabel = mac_socket_label_alloc(MAC_WAITOK); - mac_socket_label_copy(so->so_label, intlabel); - error = mac_socket_label_externalize(intlabel, elements, buffer, - mac->m_buflen); - mac_socket_label_free(intlabel); - if (error == 0) { - error = copyout(buffer, CAST_USER_ADDR_T(mac->m_string), - strlen(buffer) + 1); - } - - FREE(buffer, M_MACTEMP); - FREE(elements, M_MACTEMP); - - return error; -} - -int -mac_socketpeer_label_get(__unused kauth_cred_t cred, struct socket *so, - struct mac *mac) -{ - char *elements, *buffer; - struct label *intlabel; - int error; - size_t len; - - error = mac_check_structmac_consistent(mac); - if (error) { - return error; - } - - MALLOC(elements, char *, mac->m_buflen, M_MACTEMP, M_WAITOK); - error = copyinstr(CAST_USER_ADDR_T(mac->m_string), elements, - mac->m_buflen, &len); - if (error) { - FREE(elements, M_MACTEMP); - return error; - } - - MALLOC(buffer, char *, mac->m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); - intlabel = mac_socket_label_alloc(MAC_WAITOK); - mac_socket_label_copy(so->so_peerlabel, intlabel); - error = mac_socketpeer_label_externalize(intlabel, elements, buffer, - mac->m_buflen); - mac_socket_label_free(intlabel); - if (error == 0) { - error = copyout(buffer, CAST_USER_ADDR_T(mac->m_string), - strlen(buffer) + 1); - } - - FREE(buffer, M_MACTEMP); - FREE(elements, M_MACTEMP); - - return error; -} - -#endif /* MAC_SOCKET */ - int mac_socket_check_accept(kauth_cred_t cred, struct socket *so) { @@ -578,38 +179,8 @@ mac_socket_check_create(kauth_cred_t cred, int domain, int type, int protocol) return error; } -#if CONFIG_MACF_SOCKET && CONFIG_MACF_NET -int -mac_socket_check_deliver(struct socket *so, struct mbuf *mbuf) -{ - struct label *label; - int error; - -#if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) { - return 0; - } -#endif - - label = mac_mbuf_to_label(mbuf); - - /* Policy must deal with NULL label (unlabeled mbufs) */ - MAC_CHECK(socket_check_deliver, - (socket_t)so, so->so_label, mbuf, label); - return error; -} -#else -int -mac_socket_check_deliver(__unused struct socket *so, __unused struct mbuf *mbuf) -{ - return 0; -} -#endif - int -mac_socket_check_ioctl(kauth_cred_t cred, struct socket *so, - unsigned int cmd) +mac_socket_check_ioctl(kauth_cred_t cred, struct socket *so, u_long cmd) { int error; diff --git a/security/mac_system.c b/security/mac_system.c index f34eee62e..9f2ccf759 100644 --- a/security/mac_system.c +++ b/security/mac_system.c @@ -207,7 +207,7 @@ mac_system_check_swapoff(kauth_cred_t cred, struct vnode *vp) int mac_system_check_sysctlbyname(kauth_cred_t cred, const char *namestring, int *name, - u_int namelen, user_addr_t oldctl, size_t oldlen, + size_t namelen, user_addr_t oldctl, size_t oldlen, user_addr_t newctl, size_t newlen) { int error; diff --git a/security/mac_vfs.c b/security/mac_vfs.c index 67452ded3..ba95c90cb 100644 --- a/security/mac_vfs.c +++ b/security/mac_vfs.c @@ -100,7 +100,7 @@ * KDBG_EVENTID(DBG_FSYSTEM, DBG_VFS, dcode) global event id, see bsd/sys/kdebug.h. * Note that dcode is multiplied by 4 and ORed as part of the construction. See bsd/kern/trace_codes * for list of system-wide {global event id, name} pairs. Currently DBG_VFS event ids are in range - * [0x3130000, 0x3130170]. + * [0x3130000, 0x3130174]. */ //#define VFS_TRACE_POLICY_OPS @@ -1319,7 +1319,7 @@ int mac_vnode_check_signature(struct vnode *vp, struct cs_blob *cs_blob, struct image_params *imgp, unsigned int *cs_flags, unsigned int *signer_type, - int flags) + int flags, unsigned int platform) { int error; char *fatal_failure_desc = NULL; @@ -1339,7 +1339,7 @@ mac_vnode_check_signature(struct vnode *vp, struct cs_blob *cs_blob, VFS_KERNEL_DEBUG_START1(43, vp); MAC_CHECK(vnode_check_signature, vp, vp->v_label, cpu_type, cs_blob, - cs_flags, signer_type, flags, &fatal_failure_desc, &fatal_failure_desc_len); + cs_flags, signer_type, flags, platform, &fatal_failure_desc, &fatal_failure_desc_len); VFS_KERNEL_DEBUG_END1(43, vp); if (fatal_failure_desc_len) { @@ -1348,15 +1348,11 @@ mac_vnode_check_signature(struct vnode *vp, struct cs_blob *cs_blob, char const *path = NULL; - vn_path = (char *)kalloc(MAXPATHLEN); - if (vn_path != NULL) { - if (vn_getpath(vp, vn_path, (int*)&vn_pathlen) == 0) { - path = vn_path; - } else { - path = "(get vnode path failed)"; - } + vn_path = zalloc(ZV_NAMEI); + if (vn_getpath(vp, vn_path, (int*)&vn_pathlen) == 0) { + path = vn_path; } else { - path = "(path alloc failed)"; + path = "(get vnode path failed)"; } if (error == 0) { @@ -1396,12 +1392,12 @@ mac_vnode_check_signature(struct vnode *vp, struct cs_blob *cs_blob, int kcdata_error = 0; if ((reason_error = os_reason_alloc_buffer_noblock(reason, kcdata_estimate_required_buffer_size - (1, fatal_failure_desc_len))) == 0 && + (1, (uint32_t)fatal_failure_desc_len))) == 0 && (kcdata_error = kcdata_get_memory_addr(&reason->osr_kcd_descriptor, - EXIT_REASON_USER_DESC, fatal_failure_desc_len, + EXIT_REASON_USER_DESC, (uint32_t)fatal_failure_desc_len, &data_addr)) == KERN_SUCCESS) { kern_return_t mc_error = kcdata_memcpy(&reason->osr_kcd_descriptor, (mach_vm_address_t)data_addr, - fatal_failure_desc, fatal_failure_desc_len); + fatal_failure_desc, (uint32_t)fatal_failure_desc_len); if (mc_error != KERN_SUCCESS) { printf("mac_vnode_check_signature: %s: failed to copy reason string " @@ -1418,12 +1414,34 @@ mac_vnode_check_signature(struct vnode *vp, struct cs_blob *cs_blob, out: if (vn_path) { - kfree(vn_path, MAXPATHLEN); + zfree(ZV_NAMEI, vn_path); } if (fatal_failure_desc_len > 0 && fatal_failure_desc != NULL) { - kfree(fatal_failure_desc, fatal_failure_desc_len); + /* AMFI uses kalloc() which for kexts is redirected to KHEAP_KEXT */ + kheap_free(KHEAP_KEXT, fatal_failure_desc, fatal_failure_desc_len); + } + + return error; +} + +int +mac_vnode_check_supplemental_signature(struct vnode *vp, + struct cs_blob *cs_blob, struct vnode *linked_vp, + struct cs_blob *linked_cs_blob, unsigned int *signer_type) +{ + int error; + +#if SECURITY_MAC_CHECK_ENFORCE + /* 21167099 - only check if we allow write */ + if (!mac_proc_enforce || !mac_vnode_enforce) { + return 0; } +#endif + VFS_KERNEL_DEBUG_START1(93, vp); + MAC_CHECK(vnode_check_supplemental_signature, vp, vp->v_label, cs_blob, linked_vp, linked_cs_blob, + signer_type); + VFS_KERNEL_DEBUG_END1(93, vp); return error; } @@ -1477,7 +1495,7 @@ mac_vnode_check_getextattr(vfs_context_t ctx, struct vnode *vp, } int -mac_vnode_check_ioctl(vfs_context_t ctx, struct vnode *vp, u_int cmd) +mac_vnode_check_ioctl(vfs_context_t ctx, struct vnode *vp, u_long cmd) { kauth_cred_t cred; int error; @@ -2522,7 +2540,7 @@ mac_mount_check_label_update(vfs_context_t ctx, struct mount *mount) } int -mac_mount_check_fsctl(vfs_context_t ctx, struct mount *mp, u_int cmd) +mac_mount_check_fsctl(vfs_context_t ctx, struct mount *mp, u_long cmd) { kauth_cred_t cred; int error; @@ -2667,14 +2685,14 @@ mac_vnode_label_associate_fdesc(struct mount *mp, struct fdescnode *fnp, return error; } - if (fp->f_fglob == NULL) { + if (fp->fp_glob == NULL) { error = EBADF; goto out; } - switch (FILEGLOB_DTYPE(fp->f_fglob)) { + switch (FILEGLOB_DTYPE(fp->fp_glob)) { case DTYPE_VNODE: - fvp = (struct vnode *)fp->f_fglob->fg_data; + fvp = (struct vnode *)fp->fp_glob->fg_data; if ((error = vnode_getwithref(fvp))) { goto out; } @@ -2688,7 +2706,7 @@ mac_vnode_label_associate_fdesc(struct mount *mp, struct fdescnode *fnp, break; #if CONFIG_MACF_SOCKET_SUBSET case DTYPE_SOCKET: - so = (struct socket *)fp->f_fglob->fg_data; + so = (struct socket *)fp->fp_glob->fg_data; socket_lock(so, 1); MAC_PERFORM(vnode_label_associate_socket, vfs_context_ucred(ctx), (socket_t)so, so->so_label, @@ -2703,7 +2721,7 @@ mac_vnode_label_associate_fdesc(struct mount *mp, struct fdescnode *fnp, psem_label_associate(fp, vp, ctx); break; case DTYPE_PIPE: - cpipe = (struct pipe *)fp->f_fglob->fg_data; + cpipe = (struct pipe *)fp->fp_glob->fg_data; /* kern/sys_pipe.c:pipe_select() suggests this test. */ if (cpipe == (struct pipe *)-1) { error = EINVAL; @@ -2720,7 +2738,7 @@ mac_vnode_label_associate_fdesc(struct mount *mp, struct fdescnode *fnp, case DTYPE_NETPOLICY: default: MAC_PERFORM(vnode_label_associate_file, vfs_context_ucred(ctx), - mp, mp->mnt_mntlabel, fp->f_fglob, fp->f_fglob->fg_label, + mp, mp->mnt_mntlabel, fp->fp_glob, fp->fp_glob->fg_label, vp, vp->v_label); break; } @@ -2756,3 +2774,11 @@ mac_vnode_label_set(struct vnode *vp, int slot, intptr_t v) } mac_label_set(l, slot, v); } + +void +mac_vnode_notify_reclaim(struct vnode *vp) +{ + VFS_KERNEL_DEBUG_START1(94, vp); + MAC_PERFORM(vnode_notify_reclaim, vp); + VFS_KERNEL_DEBUG_END1(94, vp); +} diff --git a/tests/Makefile b/tests/Makefile index 5fef05882..71a3e46a2 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -6,6 +6,7 @@ ifdef BASEDSTROOT override DSTROOT = $(BASEDSTROOT) endif +INVALID_ARCHS = i386 ENABLE_LTE_TESTS=YES OTHER_LTE_INCLUDE_FILES += \ @@ -26,11 +27,16 @@ OTHER_CFLAGS = -Weverything -Wno-gnu-union-cast -Wno-missing-field-initializers OTHER_CFLAGS += -Wno-missing-noreturn -Wno-vla -Wno-reserved-id-macro -Wno-documentation-unknown-command OTHER_CFLAGS += -Wno-padded -Wno-used-but-marked-unused -Wno-covered-switch-default -Wno-nullability-extension OTHER_CFLAGS += -Wno-gnu-empty-initializer -Wno-unused-macros -Wno-undef -Wno-fixed-enum-extension +OTHER_CFLAGS += -Wno-gnu-auto-type -Wno-switch-enum -Wno-variadic-macros OTHER_CFLAGS += --std=gnu11 -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders OTHER_CFLAGS += -UT_NAMESPACE_PREFIX -DT_NAMESPACE_PREFIX=xnu OTHER_CFLAGS += -F $(SDKROOT)/System/Library/PrivateFrameworks +OTHER_CFLAGS += -Wl,-sectcreate,__INFO_FILTER,__disable,/dev/null + + CODESIGN:=$(shell xcrun -sdk "$(TARGETSDK)" -find codesign) +CODESIGN_HARDENED_RUNTIME:=$(CODESIGN) -o runtime CODESIGN_ALLOCATE:=$(shell xcrun -sdk "$(TARGETSDK)" -find codesign_allocate) # to have custom compiler flags to @@ -41,9 +47,7 @@ atm_diagnostic_flag: OTHER_CFLAGS += drop_priv.c atm_diagnostic_flag_entitled: CODE_SIGN_ENTITLEMENTS = atm_diagnostic_flag.entitlements atm_diagnostic_flag_entitled: OTHER_CFLAGS += drop_priv.c -testposixshm: INVALID_ARCHS = i386 - -avx: INVALID_ARCHS = i386 +avx: INVALID_ARCHS = $(filter arm%,$(ARCH_CONFIGS)) avx: OTHER_CFLAGS += -mavx512f -mavx512bw -mavx512vl avx: OTHER_CFLAGS += -I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders avx: CONFIG_FLAGS := $(filter-out -O%,$(CONFIG_FLAGS)) @@ -56,7 +60,22 @@ ifneq (osx,$(TARGET_NAME)) EXCLUDED_SOURCES += avx.c endif +CUSTOM_TARGETS = sr_entitlement_helper + +sr_entitlement_helper: sr_entitlement_helper.c + mkdir -p $(SYMROOT) + $(CC) -I $(OBJROOT) $(CFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) sr_entitlement_helper.c -o $(SYMROOT)/$@ + echo $(CODESIGN) --force --sign - --timestamp=none --entitlements sr_entitlement.entitlements $(SYMROOT)/$@; \ + env CODESIGN_ALLOCATE=$(CODESIGN_ALLOCATE) $(CODESIGN) --force --sign - --timestamp=none --entitlements sr_entitlement.entitlements $(SYMROOT)/$@; + +install-sr_entitlement_helper: sr_entitlement_helper + mkdir -p $(INSTALLDIR) + cp $(SYMROOT)/sr_entitlement_helper $(INSTALLDIR) + +sr_entitlement: OTHER_LDFLAGS += -ldarwintest_utils + backtracing: OTHER_LDFLAGS += -framework CoreSymbolication +backtracing: CODE_SIGN_ENTITLEMENTS = kernel_symbolication_entitlements.plist data_protection: OTHER_LDFLAGS += -ldarwintest_utils -framework IOKit @@ -64,9 +83,17 @@ immovable_send: excserver immovable_send: OTHER_CFLAGS += $(OBJROOT)/excserver.c -I $(OBJROOT) immovable_send: OTHER_LDFLAGS += -ldarwintest_utils -lpthread -framework IOKit -CUSTOM_TARGETS += immovable_send_client +CUSTOM_TARGETS += immovable_send_client vm_spawn_tool inspect_port_nocodesign immovable_send: immovable_send_client +vm_spawn_tool: INVALID_ARCHS = i386 +vm_spawn_tool: vm_spawn_tool.c + $(CC) $(DT_CFLAGS) -I $(OBJROOT) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) vm_spawn_tool.c -o $(SYMROOT)/vm_spawn_tool + +install-vm_spawn_tool: vm_spawn_tool + mkdir -p $(INSTALLDIR)/tools + cp $(SYMROOT)/vm_spawn_tool $(INSTALLDIR)/tools/ + immovable_send_client: immovable_send_client.c $(CC) $(DT_CFLAGS) -I $(OBJROOT) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) immovable_send_client.c -o $(SYMROOT)/immovable_send_client @@ -74,18 +101,29 @@ install-immovable_send_client: immovable_send_client mkdir -p $(INSTALLDIR) cp $(SYMROOT)/immovable_send_client $(INSTALLDIR)/ +inspect_port_nocodesign: inspect_port.c + $(CC) $(DT_CFLAGS) -I $(OBJROOT) -DT_NOCODESIGN=1 $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $^ -o $(SYMROOT)/inspect_port_nocodesign + +install-inspect_port_nocodesign: inspect_port_nocodesign + mkdir -p $(INSTALLDIR) + env CODESIGN_ALLOCATE=$(CODESIGN_ALLOCATE) $(CODESIGN_ALLOCATE) -r -i $(SYMROOT)/inspect_port_nocodesign -o $(SYMROOT)/inspect_port_nocodesign + +kas_info: OTHER_LDFLAGS += -framework CoreSymbolication +kas_info: CODE_SIGN_ENTITLEMENTS = kernel_symbolication_entitlements.plist + kdebug: INVALID_ARCHS = i386 kdebug: OTHER_LDFLAGS = -framework ktrace -ldarwintest_utils -framework kperf EXCLUDED_SOURCES += drop_priv.c kperf_helpers.c xnu_quick_test_helpers.c memorystatus_assertion_helpers.c bpflib.c in_cksum.c ifneq ($(PLATFORM),iPhoneOS) -EXCLUDED_SOURCES += jumbo_va_spaces_28530648.c perf_compressor.c memorystatus_freeze_test.c +EXCLUDED_SOURCES += jumbo_va_spaces_28530648.c perf_compressor.c memorystatus_freeze_test.c vm/entitlement_increased_memory_limit.c endif perf_compressor: OTHER_LDFLAGS += -ldarwintest_utils perf_compressor: CODE_SIGN_ENTITLEMENTS=./private_entitlement.plist + memorystatus_freeze_test: CODE_SIGN_ENTITLEMENTS=./task_for_pid_entitlement.plist memorystatus_freeze_test: OTHER_LDFLAGS += -ldarwintest_utils memorystatus_freeze_test: OTHER_CFLAGS += -ldarwintest_utils memorystatus_assertion_helpers.c @@ -93,36 +131,53 @@ memorystatus_freeze_test: OTHER_CFLAGS += -ldarwintest_utils memorystatus_assert memorystatus_is_assertion: OTHER_LDFLAGS += -ldarwintest_utils memorystatus_is_assertion: OTHER_CFLAGS += memorystatus_assertion_helpers.c -shared_cache_tests: INVALID_ARCHS = i386 shared_cache_tests: OTHER_LDFLAGS += -ldarwintest_utils stackshot_tests: OTHER_CFLAGS += -Wno-objc-messaging-id -stackshot_tests: OTHER_LDFLAGS += -lkdd -ldarwintest_utils -framework Foundation -stackshot_tests: INVALID_ARCHS = i386 +stackshot_tests: OTHER_LDFLAGS += -lkdd -lz -ldarwintest_utils -framework Foundation stackshot_accuracy: OTHER_CFLAGS += -ldarwintest_utils -Wno-objc-messaging-id stackshot_accuracy: OTHER_LDFLAGS += -lkdd -ldarwintest_utils -framework Foundation stackshot_accuracy: INVALID_ARCHS = i386 -telemetry: OTHER_LDFLAGS = -framework ktrace -framework CoreFoundation -telemetry: INVALID_ARCHS = i386 +ifeq ($(PLATFORM),MacOSX) +CUSTOM_TARGETS += stackshot_translated_child + +stackshot_translated_child: INVALID_ARCHS = arm64 arm64e +stackshot_translated_child: stackshot_translated_child.c + $(CC) $(DT_CFLAGS) -I $(OBJROOT) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) stackshot_translated_child.c -o $(SYMROOT)/stackshot_translated_child + +install-stackshot_translated_child: stackshot_translated_child + mkdir -p $(INSTALLDIR) + cp $(SYMROOT)/stackshot_translated_child $(INSTALLDIR)/ +else +EXCLUDED_SOURCES += stackshot_translated_child.c +endif + +telemetry: OTHER_LDFLAGS = -framework ktrace -framework kperf -framework CoreFoundation -memorystatus_zone_test: INVALID_ARCHS = i386 memorystatus_zone_test: OTHER_CFLAGS += -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders memorystatus_zone_test: OTHER_LDFLAGS += -framework ktrace memorystatus_zone_test: OTHER_LDFLAGS += -ldarwintest_utils +ifeq ($(PLATFORM),BridgeOS) +EXCLUDED_SOURCES += dirtiness_tracking.c +endif + +dirtiness_tracking: OTHER_CFLAGS += -Wno-cast-align + kpc: OTHER_LDFLAGS += -framework kperf -framework ktrace kpc: INVALID_ARCHS = i386 -kperf: INVALID_ARCHS = i386 kperf: OTHER_CFLAGS += kperf_helpers.c kperf: OTHER_LDFLAGS += -framework kperf -framework kperfdata -framework ktrace -ldarwintest_utils -kperf_backtracing: INVALID_ARCHS = i386 +memcmp_zero: OTHER_CFLAGS += ../osfmk/arm64/memcmp_zero.s + kperf_backtracing: OTHER_CFLAGS += kperf_helpers.c kperf_backtracing: OTHER_LDFLAGS += -framework kperf -framework kperfdata -framework ktrace kperf_backtracing: OTHER_LDFLAGS += -framework CoreSymbolication +kperf_backtracing: CODE_SIGN_ENTITLEMENTS = kernel_symbolication_entitlements.plist kevent_qos: OTHER_CFLAGS += -Wno-unused-macros kevent_qos: OTHER_CFLAGS += -I $(OBJROOT)/ @@ -130,11 +185,9 @@ kevent_qos: OTHER_CFLAGS += -I $(OBJROOT)/ mach_get_times: OTHER_LDFLAGS += -ldarwintest_utils monotonic_core: OTHER_LDFLAGS += -framework ktrace -monotonic_core: INVALID_ARCHS = i386 perf_exit: perf_exit_proc perf_exit: OTHER_LDFLAGS = -framework ktrace -ldarwintest_utils -perf_exit: INVALID_ARCHS = i386 perf_exit: CODE_SIGN_ENTITLEMENTS=./private_entitlement.plist CUSTOM_TARGETS += prioritize_process_launch_helper @@ -157,22 +210,48 @@ mach_exception_reply: OTHER_CFLAGS += -Wno-cast-align os_thread_self_restrict: os_thread_self_restrict.c os_thread_self_restrict-entitlements.plist os_thread_self_restrict: CODE_SIGN_ENTITLEMENTS=os_thread_self_restrict-entitlements.plist -osptr: OTHER_CXXFLAGS += -I$(SRCROOT)/../libkern -std=c++98 -osptr: OTHER_CXXFLAGS += osptr_helper.cpp +subsystem_root_path: subsystem_root_path.c subsystem_root_path-entitlements.plist +subsystem_root_path: CODE_SIGN_ENTITLEMENTS=subsystem_root_path-entitlements.plist -osptr_dumb: OTHER_CXXFLAGS += -I$(SRCROOT)/../libkern -std=c++17 +EXCLUDED_SOURCES += $(wildcard bounded_ptr_src/*.cpp) +bounded_ptr: OTHER_CXXFLAGS += -Werror=implicit-int-conversion -Werror=shorten-64-to-32 -I$(SRCROOT)/../libkern -std=c++17 +bounded_ptr: $(wildcard bounded_ptr_src/*.cpp) bounded_ptr.cpp -osptr_11: OTHER_CXXFLAGS += -I$(SRCROOT)/../libkern -std=c++11 -osptr_11: OTHER_CXXFLAGS += osptr_helper.cpp -osptr_11: osptr.cpp - $(CXX) $(DT_CXXFLAGS) $(OTHER_CXXFLAGS) $(CXXFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ +EXCLUDED_SOURCES += bounded_ptr_03.cpp +bounded_ptr_03: OTHER_CXXFLAGS += -Werror=implicit-int-conversion -Werror=shorten-64-to-32 -I$(SRCROOT)/../libkern -std=c++03 +bounded_ptr_03: bounded_ptr_03.cpp + +EXCLUDED_SOURCES += $(wildcard bounded_array_src/*.cpp) +bounded_array: OTHER_CXXFLAGS += -Werror=implicit-int-conversion -Werror=shorten-64-to-32 -I$(SRCROOT)/../libkern -std=c++17 +bounded_array: $(wildcard bounded_array_src/*.cpp) bounded_array.cpp -osptr_17: OTHER_CXXFLAGS += -I$(SRCROOT)/../libkern -std=c++17 -osptr_17: OTHER_CXXFLAGS += osptr_helper.cpp -osptr_17: osptr.cpp +EXCLUDED_SOURCES += $(wildcard bounded_array_ref_src/*.cpp) +bounded_array_ref: OTHER_CXXFLAGS += -Werror=implicit-int-conversion -Werror=shorten-64-to-32 -I$(SRCROOT)/../libkern -std=c++17 +bounded_array_ref: $(wildcard bounded_array_ref_src/*.cpp) bounded_array_ref.cpp + +EXCLUDED_SOURCES += $(wildcard intrusive_shared_ptr_src/*.cpp) +intrusive_shared_ptr: OTHER_CXXFLAGS += -Werror=implicit-int-conversion -Werror=shorten-64-to-32 -I$(SRCROOT)/../libkern -std=c++17 +intrusive_shared_ptr: $(wildcard intrusive_shared_ptr_src/*.cpp) intrusive_shared_ptr.cpp + +EXCLUDED_SOURCES += $(wildcard safe_allocation_src/*.cpp) +safe_allocation: OTHER_CXXFLAGS += -Werror=implicit-int-conversion -Werror=shorten-64-to-32 -I$(SRCROOT)/../libkern -std=c++17 +safe_allocation: $(wildcard safe_allocation_src/*.cpp) safe_allocation.cpp + +EXCLUDED_SOURCES += osptr_compat.cpp +osptr_98: OTHER_CXXFLAGS += -I$(SRCROOT)/../libkern -std=c++98 -DOSPTR_STD="98" +osptr_98: osptr_compat.cpp + $(CXX) $(DT_CXXFLAGS) $(OTHER_CXXFLAGS) $(CXXFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ +osptr_11: OTHER_CXXFLAGS += -I$(SRCROOT)/../libkern -std=c++11 -DOSPTR_STD="11" +osptr_11: osptr_compat.cpp + $(CXX) $(DT_CXXFLAGS) $(OTHER_CXXFLAGS) $(CXXFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ +osptr_14: OTHER_CXXFLAGS += -I$(SRCROOT)/../libkern -std=c++14 -DOSPTR_STD="14" +osptr_14: osptr_compat.cpp + $(CXX) $(DT_CXXFLAGS) $(OTHER_CXXFLAGS) $(CXXFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ +osptr_17: OTHER_CXXFLAGS += -I$(SRCROOT)/../libkern -std=c++17 -DOSPTR_STD="17" +osptr_17: osptr_compat.cpp $(CXX) $(DT_CXXFLAGS) $(OTHER_CXXFLAGS) $(CXXFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ -EXCLUDED_SOURCES += osptr_helper.cpp +priority_queue: OTHER_CXXFLAGS += -std=c++17 os_refcnt: OTHER_CFLAGS += -I$(SRCROOT)/../libkern/ -Wno-gcc-compat -Wno-undef -O3 -flto @@ -191,10 +270,8 @@ install-perf_exit_proc: perf_exit_proc mkdir -p $(INSTALLDIR) cp $(SYMROOT)/perf_exit_proc $(INSTALLDIR)/ -stackshot_idle_25570396: INVALID_ARCHS = i386 stackshot_idle_25570396: OTHER_LDFLAGS += -lkdd -framework Foundation -stackshot_block_owner_14362384: INVALID_ARCHS = i386 stackshot_block_owner_14362384: OTHER_LDFLAGS += -framework Foundation -lpthread -lkdd ifeq ($(PLATFORM),MacOSX) stackshot_block_owner_14362384: OTHER_LDFLAGS += -lpcre @@ -222,16 +299,74 @@ install-vm_set_max_addr_helper: vm_set_max_addr_helper mkdir -p $(INSTALLDIR) cp $(SYMROOT)/vm_set_max_addr_helper $(INSTALLDIR)/ +CUSTOM_TARGETS += subsystem_root_path_helper_entitled +CUSTOM_TARGETS += subsystem_root_path_helper + +subsystem_root_path_helper_entitled: subsystem_root_path_helper.c subsystem_root_path-entitlements.plist + $(CC) $(OTHER_CFLAGS) $(CFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) subsystem_root_path_helper.c -o $(SYMROOT)/subsystem_root_path_helper_entitled; \ + echo $(CODESIGN) --force --sign - --timestamp=none $(SYMROOT)/$@; \ + env CODESIGN_ALLOCATE=$(CODESIGN_ALLOCATE) $(CODESIGN) --force --sign - --timestamp=none --entitlements subsystem_root_path-entitlements.plist $(SYMROOT)/$@; + +install-subsystem_root_path_helper_entitled: subsystem_root_path_helper_entitled + mkdir -p $(INSTALLDIR) + cp $(SYMROOT)/subsystem_root_path_helper_entitled $(INSTALLDIR)/ + +subsystem_root_path_helper: subsystem_root_path_helper.c + $(CC) $(OTHER_CFLAGS) $(CFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) subsystem_root_path_helper.c -o $(SYMROOT)/subsystem_root_path_helper; \ + echo $(CODESIGN) --force --sign - --timestamp=none $(SYMROOT)/$@; \ + env CODESIGN_ALLOCATE=$(CODESIGN_ALLOCATE) $(CODESIGN) --force --sign - --timestamp=none $(SYMROOT)/$@; + +install-subsystem_root_path_helper: subsystem_root_path_helper + mkdir -p $(INSTALLDIR) + cp $(SYMROOT)/subsystem_root_path_helper $(INSTALLDIR)/ + +CUSTOM_TARGETS += vm_test_code_signing_helper + +vm_test_code_signing_helper: vm_test_code_signing_helper.c + $(CC) $(OTHER_CFLAGS) $(CFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) vm_test_code_signing_helper.c -o $(SYMROOT)/vm_test_code_signing_helper; \ + echo $(CODESIGN) --force --sign - --timestamp=none $(SYMROOT)/$@; \ + env CODESIGN_ALLOCATE=$(CODESIGN_ALLOCATE) $(CODESIGN) --force --sign - --timestamp=none $(SYMROOT)/$@; + +install-vm_test_code_signing_helper: vm_test_code_signing_helper + mkdir -p $(INSTALLDIR) + cp $(SYMROOT)/vm_test_code_signing_helper $(INSTALLDIR)/ + +vm_test_code_signing: OTHER_LDFLAGS += -ldarwintest_utils + +INCLUDED_TEST_SOURCE_DIRS += vm + +# Revert to legacy vm_test suite until gets solved +EXCLUDED_SOURCES += vm/vm_allocation.c + ifeq ($(PLATFORM),iPhoneOS) -OTHER_TEST_TARGETS += jumbo_va_spaces_28530648_unentitled vm_phys_footprint_legacy +OTHER_TEST_TARGETS += jumbo_va_spaces_28530648_unentitled jumbo_va_spaces_52551256 vm_phys_footprint_legacy vm/entitlement_increased_memory_limit vm/entitlement_increased_memory_limit_unentitled jumbo_va_spaces_28530648: CODE_SIGN_ENTITLEMENTS = jumbo_va_spaces_28530648.entitlements -jumbo_va_spaces_28530648: OTHER_CFLAGS += -DENTITLED=1 +jumbo_va_spaces_28530648: OTHER_CFLAGS += -DENTITLED=1 -DTESTNAME=jumbo_va_spaces_28530648 jumbo_va_spaces_28530648: OTHER_LDFLAGS += -ldarwintest_utils +jumbo_va_spaces_52551256: CODE_SIGN_ENTITLEMENTS = jumbo_va_spaces_52551256.entitlements +jumbo_va_spaces_52551256: OTHER_CFLAGS += -DENTITLED=1 -DTESTNAME=jumbo_va_spaces_52551256 +jumbo_va_spaces_52551256: OTHER_LDFLAGS += -ldarwintest_utils +jumbo_va_spaces_52551256: jumbo_va_spaces_28530648.c + $(CC) $(DT_CFLAGS) $(OTHER_CFLAGS) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ + env CODESIGN_ALLOCATE=$(CODESIGN_ALLOCATE) $(CODESIGN) --force --sign - --timestamp=none --entitlements $(CODE_SIGN_ENTITLEMENTS) $(SYMROOT)/$@; + jumbo_va_spaces_28530648_unentitled: OTHER_LDFLAGS += -ldarwintest_utils +jumbo_va_spaces_28530648_unentitled: OTHER_CFLAGS += -DTESTNAME=jumbo_va_spaces_28530648_unentitled jumbo_va_spaces_28530648_unentitled: jumbo_va_spaces_28530648.c $(CC) $(DT_CFLAGS) $(OTHER_CFLAGS) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ +vm/entitlement_increased_memory_limit: CODE_SIGN_ENTITLEMENTS = vm/entitlement_increased_memory_limit.entitlements +vm/entitlement_increased_memory_limit: OTHER_CFLAGS += -DENTITLED=1 +vm/entitlement_increased_memory_limit: OTHER_LDFLAGS += -ldarwintest_utils memorystatus_assertion_helpers.c +vm/entitlement_increased_memory_limit: vm/entitlement_increased_memory_limit.c + $(CC) $(DT_CFLAGS) $(OTHER_CFLAGS) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ + env CODESIGN_ALLOCATE=$(CODESIGN_ALLOCATE) $(CODESIGN) --force --sign - --timestamp=none --entitlements $(CODE_SIGN_ENTITLEMENTS) $(SYMROOT)/$@; + +vm/entitlement_increased_memory_limit_unentitled: OTHER_LDFLAGS += -ldarwintest_utils memorystatus_assertion_helpers.c +vm/entitlement_increased_memory_limit_unentitled: vm/entitlement_increased_memory_limit.c + $(CC) $(DT_CFLAGS) $(OTHER_CFLAGS) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ + vm_phys_footprint_legacy: OTHER_LDFLAGS += -framework CoreFoundation -framework IOSurface vm_phys_footprint_legacy: OTHER_CFLAGS += -DLEGACY_FOOTPRINT_ENTITLED=1 vm_phys_footprint_legacy: CODE_SIGN_ENTITLEMENTS=./legacy_footprint.entitlement @@ -242,6 +377,8 @@ endif task_info_28439149: CODE_SIGN_ENTITLEMENTS = ./task_for_pid_entitlement.plist +inspect_port: CODE_SIGN_ENTITLEMENTS = ./task_for_pid_entitlement.plist + proc_info: CODE_SIGN_ENTITLEMENTS = ./task_for_pid_entitlement.plist proc_info: OTHER_LDFLAGS += -ldarwintest_utils @@ -268,49 +405,30 @@ settimeofday_29193041_entitled: CODE_SIGN_ENTITLEMENTS = settimeofday_29193041.e settimeofday_29193041_entitled: OTHER_CFLAGS += drop_priv.c thread_group_set_32261625: OTHER_LDFLAGS = -framework ktrace -thread_group_set_32261625: INVALID_ARCHS = i386 task_info: CODE_SIGN_ENTITLEMENTS = task_for_pid_entitlement.plist -task_vm_info_decompressions: INVALID_ARCHS = x86_64 i386 +ifneq ($(PLATFORM),iPhoneOS) + EXCLUDED_SOURCES += task_vm_info_decompressions.c +endif socket_bind_35243417: CODE_SIGN_ENTITLEMENTS = network_entitlements.plist socket_bind_35685803: CODE_SIGN_ENTITLEMENTS = network_entitlements.plist -sioc-if-addr-bounds: sioc-if-addr-bounds.c - net_tuntests: CODE_SIGN_ENTITLEMENTS = network_entitlements.plist net_bridge: OTHER_CFLAGS += bpflib.c in_cksum.c net_bridge: OTHER_LDFLAGS += -ldarwintest_utils -ifneq (osx,$(TARGET_NAME)) -EXCLUDED_SOURCES += no32exec_35914211.c no32exec_35914211_helper.c -else # target = osx -CUSTOM_TARGETS += no32exec_35914211_helper no32exec_35914211_helper_binprefs +CUSTOM_TARGETS += posix_spawn_archpref_helper -no32exec_35914211_helper: INVALID_ARCHS = x86_64 i386 -no32exec_35914211_helper: - $(CC) $(LDFLAGS) $(CFLAGS) -arch i386 no32exec_35914211_helper.c -o $(SYMROOT)/$@; +posix_spawn_archpref_helper: + $(CC) $(LDFLAGS) $(CFLAGS) posix_spawn_archpref_helper.c -o $(SYMROOT)/$@; env CODESIGN_ALLOCATE=$(CODESIGN_ALLOCATE) $(CODESIGN) --force --sign - --timestamp=none $(SYMROOT)/$@; -install-no32exec_35914211_helper: +install-posix_spawn_archpref_helper: mkdir -p $(INSTALLDIR) - cp $(SYMROOT)/no32exec_35914211_helper $(INSTALLDIR)/ - -no32exec_35914211_helper_binprefs: INVALID_ARCHS = x86_64 i386 -no32exec_35914211_helper_binprefs: - $(CC) $(OTHER_CFLAGS) $(CFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) -arch i386 -arch x86_64 no32exec_35914211_helper.c -o $(SYMROOT)/$@; - env CODESIGN_ALLOCATE=$(CODESIGN_ALLOCATE) $(CODESIGN) --force --sign - --timestamp=none $(SYMROOT)/$@; - -install-no32exec_35914211_helper_binprefs: - mkdir -p $(INSTALLDIR) - cp $(SYMROOT)/no32exec_35914211_helper_binprefs $(INSTALLDIR)/ - -no32exec_35914211: INVALID_ARCHS = i386 -no32exec_35914211: no32exec_35914211_helper -no32exec_35914211: no32exec_35914211_helper_binprefs -endif # (osx,$(TARGET_NAME))) + cp $(SYMROOT)/posix_spawn_archpref_helper $(INSTALLDIR)/ MIG:=SDKROOT=$(SDKROOT) $(shell xcrun -sdk "$(TARGETSDK)" -find mig) @@ -324,15 +442,32 @@ excserver: excserver.defs install-excserver: ; +EXCLUDED_SOURCES += exc_helpers.c + + +x18: OTHER_CFLAGS += -Wno-language-extension-token + exc_resource_threads: excserver exc_resource_threads: OTHER_CFLAGS += $(OBJROOT)/excserver.c -I $(OBJROOT) -fp_exception: excserver +fp_exception: excserver exc_helpers.c fp_exception: OTHER_CFLAGS += $(OBJROOT)/excserver.c -I $(OBJROOT) -ifneq (osx,$(TARGET_NAME)) -EXCLUDED_SOURCES += ldt_code32.s ldt.c +ptrauth_failure: excserver exc_helpers.c +ptrauth_failure: OTHER_CFLAGS += $(OBJROOT)/excserver.c -I $(OBJROOT) -Wno-language-extension-token +ptrauth_failure: CODESIGN = $(CODESIGN_HARDENED_RUNTIME) + +# This test currently relies on pid_hibernate(), which is only available on embedded platforms. +ifeq ($(PLATFORM),MacOSX) +EXCLUDED_SOURCES += decompression_failure.c else +decompression_failure: excserver exc_helpers.c +decompression_failure: OTHER_CFLAGS += $(OBJROOT)/excserver.c -I $(OBJROOT) +endif + +ifeq ($(findstring x86_64,$(ARCH_CONFIGS)),) +EXCLUDED_SOURCES += ldt_code32.s ldt.c +else # target = osx $(OBJROOT)/ldt_mach_exc_server.c: $(MIG) $(CFLAGS) \ -user /dev/null \ @@ -340,9 +475,9 @@ $(OBJROOT)/ldt_mach_exc_server.c: -header $(OBJROOT)/ldt_mach_exc.h \ mach_exc.defs -ldt: INVALID_ARCHS = i386 +ldt: INVALID_ARCHS = $(ARCH_CONFIGS) ldt: $(OBJROOT)/ldt_mach_exc_server.c -ldt: OTHER_CFLAGS += -I $(OBJROOT) $(SRCROOT)/ldt_code32.s -Wl,-pagezero_size,0x1000 -Wno-missing-variable-declarations +ldt: OTHER_CFLAGS += -arch x86_64 -I $(OBJROOT) $(SRCROOT)/ldt_code32.s -Wl,-pagezero_size,0x1000 -Wno-missing-variable-declarations ldt: CODE_SIGN_ENTITLEMENTS=ldt_entitlement.plist endif @@ -354,17 +489,122 @@ endif vm_phys_footprint: OTHER_LDFLAGS += -framework CoreFoundation -framework IOSurface +vm_kern_count_wired_kernelcache: OTHER_CFLAGS += -I$(SDKROOT)/System/Library/Frameworks/Kernel.framework/PrivateHeaders/mach + debug_control_port_for_pid: CODE_SIGN_ENTITLEMENTS = ./debug_control_port_for_pid_entitlement.plist prng: OTHER_LDFLAGS += -ldarwintest_utils -OTHER_TEST_TARGETS += io_catalog_send_data +preoslog: OTHER_LDFLAGS += -ldarwintest_utils + +task_policy: CODE_SIGN_ENTITLEMENTS = ./task_policy_entitlement.plist + +OTHER_TEST_TARGETS += task_policy_unentitled +task_policy_unentitled: CODE_SIGN_ENTITLEMENTS = task_for_pid_entitlement.plist +task_policy_unentitled: OTHER_CFLAGS += -DUNENTITLED +task_policy_unentitled: task_policy.c + $(CC) $(DT_CFLAGS) $(OTHER_CFLAGS) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ + $(CODESIGN) --force --sign - --timestamp=none --entitlements $(CODE_SIGN_ENTITLEMENTS) $(SYMROOT)/$@ + + +EXCLUDED_SOURCES += get_shared_cache_address.c +ifeq ($(PLATFORM),iPhoneOS) +CUSTOM_TARGETS += get_shared_cache_address +get_shared_cache_address: INVALID_ARCHS = arm64 +get_shared_cache_address: get_shared_cache_address.c + $(CC) $(OTHER_CFLAGS) $(CFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/get_shared_cache_address + +install-get_shared_cache_address: get_shared_cache_address + $(CODESIGN) --force --sign - --timestamp=none --identifier=com.apple.get_shared_cache_address $(SYMROOT)/get_shared_cache_address + mkdir -p $(INSTALLDIR) + cp $(SYMROOT)/get_shared_cache_address $(INSTALLDIR)/ +endif + +ifneq ($(PLATFORM),MacOSX) +EXCLUDED_SOURCES += hvtest_x86.m hvtest_x86_guest.c hvtest_x86_asm.s +else +EXCLUDED_SOURCES += hvtest_x86_guest.c hvtest_x86_asm.s +hvtest_x86: CODE_SIGN_ENTITLEMENTS = hv_public.entitlements +hvtest_x86: INVALID_ARCHS += arm64e arm64 i386 +hvtest_x86: OTHER_CFLAGS += hvtest_x86_guest.c hvtest_x86_asm.s -framework Hypervisor -framework Foundation +endif + +OTHER_TEST_TARGETS += io_catalog_send_data vm_memory_share_tests + +vm_memory_share_tests: INVALID_ARCHS = i386 +vm_memory_share_tests: CODE_SIGN_ENTITLEMENTS = ./task_for_pid_entitlement.plist +vm_memory_share_tests: vm_memory_tests_src/main.c vm_memory_tests_src/common.c vm_memory_tests_src/vm_tests.c + $(CC) $(DT_CFLAGS) $(OTHER_CFLAGS) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $^ -o $(SYMROOT)/$@ + echo $(CODESIGN) --force --sign - --timestamp=none --entitlements $(SRCROOT)/$(CODE_SIGN_ENTITLEMENTS) $(SYMROOT)/$@; + env CODESIGN_ALLOCATE=$(CODESIGN_ALLOCATE) $(CODESIGN) --force --sign - --timestamp=none --entitlements "$(SRCROOT)/$(CODE_SIGN_ENTITLEMENTS)" $(SYMROOT)/$@; + +# build the mach server as individual helper which does not use libdarwintest +CUSTOM_TARGETS += vm_memory_share_tests_server +vm_memory_share_tests_server: CODE_SIGN_ENTITLEMENTS = ./task_for_pid_entitlement.plist +vm_memory_share_tests_server: vm_memory_tests_src/server.c vm_memory_tests_src/common.c vm_memory_tests_src/vm_tests.c + $(CC) $(DT_CFLAGS) $(OTHER_CFLAGS) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $^ -o $(SYMROOT)/vm_memory_share_tests_server + echo $(CODESIGN) --force --sign - --timestamp=none --entitlements $(SRCROOT)/$(CODE_SIGN_ENTITLEMENTS) $(SYMROOT)/$@; + env CODESIGN_ALLOCATE=$(CODESIGN_ALLOCATE) $(CODESIGN) --force --sign - --timestamp=none --entitlements "$(SRCROOT)/$(CODE_SIGN_ENTITLEMENTS)" $(SYMROOT)/$@; + +install-vm_memory_share_tests_server: vm_memory_share_tests_server + mkdir -p $(INSTALLDIR) + cp $(SYMROOT)/vm_memory_share_tests_server $(INSTALLDIR)/ io_catalog_send_data: INVALID_ARCHS = i386 io_catalog_send_data: OTHER_CFLAGS += -DTEST_UNENTITLED -framework IOKit -framework CoreFoundation -framework Foundation + io_catalog_send_data: iokit/io_catalog_send_data.m $(CC) $(DT_CFLAGS) $(OTHER_CFLAGS) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ +ifeq ($(PLATFORM),MacOSX) +EXCLUDED_SOURCES += vm/kern_max_task_pmem.c +endif + +EXCLUDED_SOURCES += vm/perf_helpers.c + +fault_throughput: vm/fault_throughput.c + mkdir -p $(SYMROOT)/vm + $(CC) $(DT_CFLAGS) $(OTHER_CFLAGS) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/vm/$@ +fault_throughput: OTHER_CFLAGS += vm/perf_helpers.c + +install-fault_throughput: fault_throughput + mkdir -p $(INSTALLDIR)/vm + cp $(SYMROOT)/vm/fault_throughput $(INSTALLDIR)/vm/ + +BATS_PLISTS += $(SRCROOT)/vm/fault_throughput.plist + +fault_throughput_benchrun: + mkdir -p $(SYMROOT)/vm + cp $(SRCROOT)/vm/fault_throughput.lua $(SYMROOT)/vm/fault_throughput.lua + chmod +x $(SYMROOT)/vm/fault_throughput.lua + +install-fault_throughput_benchrun: fault_throughput_benchrun + mkdir -p $(INSTALLDIR)/vm + cp $(SYMROOT)/vm/fault_throughput.lua $(INSTALLDIR)/vm + chmod +x $(INSTALLDIR)/vm/fault_throughput.lua + +CUSTOM_TARGETS += fault_throughput fault_throughput_benchrun +EXCLUDED_SOURCES += vm/fault_throughput.plist vm/fault_throughput.c + +perf_madvise: vm/perf_madvise.c + mkdir -p $(SYMROOT)/vm + $(CC) $(DT_CFLAGS) $(OTHER_CFLAGS) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/vm/$@ +perf_madvise: OTHER_CFLAGS += vm/perf_helpers.c +install-perf_madvise: perf_madvise + mkdir -p $(INSTALLDIR)/vm + cp $(SYMROOT)/vm/perf_madvise $(INSTALLDIR)/vm/ +perf_madvise_benchrun: + mkdir -p $(SYMROOT)/vm + cp $(SRCROOT)/vm/perf_madvise.lua $(SYMROOT)/vm/perf_madvise.lua + chmod +x $(SYMROOT)/vm/perf_madvise.lua +install-perf_madvise_benchrun: perf_madvise_benchrun + mkdir -p $(INSTALLDIR)/vm + cp $(SYMROOT)/vm/perf_madvise.lua $(INSTALLDIR)/vm + chmod +x $(INSTALLDIR)/vm/perf_madvise.lua + +CUSTOM_TARGETS += perf_madvise perf_madvise_benchrun +EXCLUDED_SOURCES += vm/perf_madvise.c + task_create_suid_cred: CODE_SIGN_ENTITLEMENTS = ./task_create_suid_cred_entitlement.plist OTHER_TEST_TARGETS += task_create_suid_cred_unentitled @@ -372,4 +612,22 @@ task_create_suid_cred_unentitled: OTHER_CFLAGS += -DUNENTITLED task_create_suid_cred_unentitled: task_create_suid_cred.c $(CC) $(DT_CFLAGS) $(OTHER_CFLAGS) $(CFLAGS) $(DT_LDFLAGS) $(OTHER_LDFLAGS) $(LDFLAGS) $< -o $(SYMROOT)/$@ +ifeq ($(PLATFORM),MacOSX) +test_dext_launch_56101852: OTHER_LDFLAGS += -framework CoreFoundation -framework IOKit +test_dext_launch_56101852: CODE_SIGN_ENTITLEMENTS += test_dext_launch_56101852.entitlements +else +EXCLUDED_SOURCES += test_dext_launch_56101852.c +endif + +ioconnectasyncmethod_57641955: OTHER_LDFLAGS += -framework IOKit + +ifeq ($(PLATFORM),BridgeOS) +EXCLUDED_SOURCES += ipsec.m +else +ipsec: OTHER_LDFLAGS += -framework Foundation -framework CoreFoundation -framework NetworkExtension +ipsec: CODE_SIGN_ENTITLEMENTS = ipsec.entitlements +endif + +test_sysctl_kern_procargs_25397314: OTHER_LDFLAGS += -framework Foundation -ldarwintest_utils + include $(DEVELOPER_DIR)/AppleInternal/Makefiles/darwintest/Makefile.targets diff --git a/tests/backtracing.c b/tests/backtracing.c index f0af5447d..a56dc9dce 100644 --- a/tests/backtracing.c +++ b/tests/backtracing.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, 2019 Apple Computer, Inc. All rights reserved. */ +// Copyright (c) 2016-2020 Apple Computer, Inc. All rights reserved. #include #include @@ -12,47 +12,65 @@ T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); #define USER_FRAMES (12) - -#define NON_RECURSE_FRAMES (4) +#define MAX_SYSCALL_SETUP_FRAMES (2) +#define NON_RECURSE_FRAMES (2) static const char *user_bt[USER_FRAMES] = { - NULL, "backtrace_thread", "recurse_a", "recurse_b", "recurse_a", "recurse_b", "recurse_a", "recurse_b", "recurse_a", "recurse_b", - "expect_stack", NULL + "recurse_a", "recurse_b", "expect_callstack", +}; + +struct callstack_exp { + bool in_syscall_setup; + unsigned int syscall_frames; + const char **callstack; + size_t callstack_len; + unsigned int nchecked; }; static void -expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol, - unsigned long addr, unsigned int bt_idx, unsigned int max_frames) +expect_frame(struct callstack_exp *cs, CSSymbolRef symbol, + unsigned long addr, unsigned int bt_idx) { - const char *name; - unsigned int frame_idx = max_frames - bt_idx - 1; - if (CSIsNull(symbol)) { - T_FAIL("invalid symbol for address %#lx at frame %d", addr, - frame_idx); - return; - } - - if (bt[frame_idx] == NULL) { - T_LOG("frame %2u: skipping system frame %s", frame_idx, - CSSymbolGetName(symbol)); + if (!cs->in_syscall_setup) { + T_FAIL("invalid symbol for address %#lx at frame %d", addr, + bt_idx); + } return; } - if (frame_idx >= bt_len) { - T_FAIL("unexpected frame '%s' (%#lx) at index %u", - CSSymbolGetName(symbol), addr, frame_idx); - return; + const char *name = CSSymbolGetName(symbol); + if (name) { + if (cs->in_syscall_setup) { + if (strcmp(name, cs->callstack[cs->callstack_len - 1]) == 0) { + cs->in_syscall_setup = false; + cs->syscall_frames = bt_idx; + T_LOG("found start of controlled stack at frame %u, expected " + "index %zu", cs->syscall_frames, cs->callstack_len - 1); + } else { + T_LOG("found syscall setup symbol %s at frame %u", name, + bt_idx); + } + } + if (!cs->in_syscall_setup) { + if (cs->nchecked >= cs->callstack_len) { + T_LOG("frame %2u: skipping system frame %s", bt_idx, name); + } else { + size_t frame_idx = cs->callstack_len - cs->nchecked - 1; + T_EXPECT_EQ_STR(name, cs->callstack[frame_idx], + "frame %2zu: saw '%s', expected '%s'", + frame_idx, name, cs->callstack[frame_idx]); + } + cs->nchecked++; + } + } else { + if (!cs->in_syscall_setup) { + T_ASSERT_NOTNULL(name, NULL, "symbol should not be NULL"); + } } - - name = CSSymbolGetName(symbol); - T_QUIET; T_ASSERT_NOTNULL(name, NULL); - T_EXPECT_EQ_STR(name, bt[frame_idx], - "frame %2u: saw '%s', expected '%s'", - frame_idx, name, bt[frame_idx]); } static bool @@ -77,14 +95,21 @@ is_kernel_64_bit(void) return k64; } +// Use an extra, non-inlineable function so that any frames after expect_stack +// can be safely ignored. This insulates the test from changes in how syscalls +// are called by Libc and the kernel. +static int __attribute__((noinline, not_tail_called)) +backtrace_current_thread_wrapper(uint64_t *bt, size_t *bt_filled) +{ + int ret = sysctlbyname("kern.backtrace.user", bt, bt_filled, NULL, 0); + getpid(); // Really prevent tail calls. + return ret; +} + static void __attribute__((noinline, not_tail_called)) -expect_stack(void) +expect_callstack(void) { - uint64_t bt[USER_FRAMES] = { 0 }; - unsigned int bt_len = USER_FRAMES; - int err; - size_t bt_filled; - bool k64; + uint64_t bt[USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES] = { 0 }; static CSSymbolicatorRef user_symb; static dispatch_once_t expect_stack_once; @@ -94,38 +119,49 @@ expect_stack(void) T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL); }); - k64 = is_kernel_64_bit(); - bt_filled = USER_FRAMES; - err = sysctlbyname("kern.backtrace.user", bt, &bt_filled, NULL, 0); - if (err == ENOENT) { + size_t bt_filled = USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES; + int ret = backtrace_current_thread_wrapper(bt, &bt_filled); + if (ret == -1 && errno == ENOENT) { T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT"); } - T_ASSERT_POSIX_SUCCESS(err, "sysctlbyname(\"kern.backtrace.user\")"); - - bt_len = (unsigned int)bt_filled; - T_EXPECT_EQ(bt_len, (unsigned int)USER_FRAMES, - "%u frames should be present in backtrace", (unsigned int)USER_FRAMES); - + T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"kern.backtrace.user\")"); + T_LOG("kernel returned %zu frame backtrace", bt_filled); + + unsigned int bt_len = (unsigned int)bt_filled; + T_EXPECT_GE(bt_len, (unsigned int)USER_FRAMES, + "at least %u frames should be present in backtrace", USER_FRAMES); + T_EXPECT_LE(bt_len, (unsigned int)USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES, + "at most %u frames should be present in backtrace", + USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES); + + struct callstack_exp callstack = { + .in_syscall_setup = true, + .syscall_frames = 0, + .callstack = user_bt, + .callstack_len = USER_FRAMES, + .nchecked = 0, + }; for (unsigned int i = 0; i < bt_len; i++) { uintptr_t addr; #if !defined(__LP64__) - /* - * Backtrace frames come out as kernel words; convert them back to user - * uintptr_t for 32-bit processes. - */ - if (k64) { + // Backtrace frames come out as kernel words; convert them back to user + // uintptr_t for 32-bit processes. + if (is_kernel_64_bit()) { addr = (uintptr_t)(bt[i]); } else { addr = (uintptr_t)(((uint32_t *)bt)[i]); } -#else /* defined(__LP32__) */ +#else // defined(__LP32__) addr = (uintptr_t)bt[i]; -#endif /* defined(__LP32__) */ +#endif // defined(__LP32__) CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime( user_symb, addr, kCSNow); - expect_frame(user_bt, USER_FRAMES, symbol, addr, i, bt_len); + expect_frame(&callstack, symbol, addr, i); } + + T_EXPECT_GE(callstack.nchecked, USER_FRAMES, + "checked enough frames for correct symbols"); } static int __attribute__((noinline, not_tail_called)) @@ -137,8 +173,8 @@ static int __attribute__((noinline, not_tail_called)) recurse_a(unsigned int frames) { if (frames == 1) { - expect_stack(); - getpid(); + expect_callstack(); + getpid(); // Really prevent tail calls. return 0; } @@ -149,8 +185,8 @@ static int __attribute__((noinline, not_tail_called)) recurse_b(unsigned int frames) { if (frames == 1) { - expect_stack(); - getpid(); + expect_callstack(); + getpid(); // Really prevent tail calls. return 0; } @@ -163,11 +199,9 @@ backtrace_thread(void *arg) #pragma unused(arg) unsigned int calls; - /* - * backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname - * - * Always make one less call for this frame (backtrace_thread). - */ + // backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname + // + // Always make one less call for this frame (backtrace_thread). calls = USER_FRAMES - NON_RECURSE_FRAMES; T_LOG("backtrace thread calling into %d frames (already at %d frames)", @@ -181,6 +215,8 @@ T_DECL(backtrace_user, "test that the kernel can backtrace user stacks", { pthread_t thread; + // Run the test from a different thread to insulate it from libdarwintest + // setup. T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread, NULL), "create additional thread to backtrace"); @@ -198,26 +234,18 @@ T_DECL(backtrace_user_bounds, void *guard_page = NULL; void *bt_start = NULL; - /* - * The backtrace addresses come back as kernel words. - */ + // The backtrace addresses come back as kernel words. size_t kword_size = is_kernel_64_bit() ? 8 : 4; - /* - * Get an idea of how many frames to expect. - */ - error = sysctlbyname("kern.backtrace.user", bt_init, &bt_filled, NULL, - 0); - if (error == ENOENT) { + // Get an idea of how many frames to expect. + int ret = sysctlbyname("kern.backtrace.user", bt_init, &bt_filled, NULL, 0); + if (ret == -1 && errno == ENOENT) { T_SKIP("release kernel: kern.backtrace.user missing"); } T_ASSERT_POSIX_SUCCESS(error, "sysctlbyname(\"kern.backtrace.user\")"); - /* - * Allocate two pages -- a first one that's valid and a second that - * will be non-writeable to catch a copyout that's too large. - */ - + // Allocate two pages -- a first one that's valid and a second that + // will be non-writeable to catch a copyout that's too large. bt_page = mmap(NULL, vm_page_size * 2, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); T_WITH_ERRNO; @@ -227,23 +255,16 @@ T_DECL(backtrace_user_bounds, error = mprotect(guard_page, vm_page_size, PROT_READ); T_ASSERT_POSIX_SUCCESS(error, "mprotect(..., PROT_READ) guard page"); - /* - * Ensure the pages are set up as expected. - */ - + // Ensure the pages are set up as expected. kr = vm_write(mach_task_self(), (vm_address_t)bt_page, (vm_offset_t)&(int){ 12345 }, sizeof(int)); T_ASSERT_MACH_SUCCESS(kr, "should succeed in writing to backtrace page"); - kr = vm_write(mach_task_self(), (vm_address_t)guard_page, (vm_offset_t)&(int){ 12345 }, sizeof(int)); T_ASSERT_NE(kr, KERN_SUCCESS, "should fail to write to guard page"); - /* - * Ask the kernel to write the backtrace just before the guard page. - */ - + // Ask the kernel to write the backtrace just before the guard page. bt_start = (char *)guard_page - (kword_size * bt_filled); bt_filled_after = bt_filled; @@ -255,10 +276,7 @@ T_DECL(backtrace_user_bounds, "both calls to backtrace should have filled in the same number of " "frames"); - /* - * Expect the kernel to fault when writing too far. - */ - + // Expect the kernel to fault when writing too far. bt_start = (char *)bt_start + 1; bt_filled_after = bt_filled; error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after, diff --git a/tests/bounded_array.cpp b/tests/bounded_array.cpp new file mode 100644 index 000000000..e4f6ce9ba --- /dev/null +++ b/tests/bounded_array.cpp @@ -0,0 +1,11 @@ +// +// Test runner for all bounded_array tests. +// + +#include + +T_GLOBAL_META( + T_META_NAMESPACE("bounded_array"), + T_META_CHECK_LEAKS(false), + T_META_RUN_CONCURRENTLY(true) + ); diff --git a/tests/bounded_array_ref.cpp b/tests/bounded_array_ref.cpp new file mode 100644 index 000000000..5c6938d86 --- /dev/null +++ b/tests/bounded_array_ref.cpp @@ -0,0 +1,11 @@ +// +// Test runner for all bounded_array_ref tests. +// + +#include + +T_GLOBAL_META( + T_META_NAMESPACE("bounded_array_ref"), + T_META_CHECK_LEAKS(false), + T_META_RUN_CONCURRENTLY(true) + ); diff --git a/tests/bounded_array_ref_src/begin_end.cpp b/tests/bounded_array_ref_src/begin_end.cpp new file mode 100644 index 000000000..067bc6417 --- /dev/null +++ b/tests/bounded_array_ref_src/begin_end.cpp @@ -0,0 +1,47 @@ +// +// Tests for +// iterator begin() const; +// iterator end() const; +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; + +template +static void +tests() +{ + using AR = test_bounded_array_ref; + + // Check begin()/end() for a non-null array ref + { + T array[5] = {T{0}, T{1}, T{2}, T{3}, T{4}}; + AR const view(array); + test_bounded_ptr begin = view.begin(); + test_bounded_ptr end = view.end(); + CHECK(begin.discard_bounds() == &array[0]); + CHECK(end.unsafe_discard_bounds() == &array[5]); + } + + // Check begin()/end() for a null array ref + { + AR const view; + test_bounded_ptr begin = view.begin(); + test_bounded_ptr end = view.end(); + CHECK(begin.unsafe_discard_bounds() == nullptr); + CHECK(end.unsafe_discard_bounds() == nullptr); + } + + // Check associated types + { + static_assert(std::is_same_v >); + } +} + +T_DECL(begin_end, "bounded_array_ref.begin_end") { + tests(); +} diff --git a/tests/bounded_array_ref_src/compare.equal.nullptr.cpp b/tests/bounded_array_ref_src/compare.equal.nullptr.cpp new file mode 100644 index 000000000..700256e69 --- /dev/null +++ b/tests/bounded_array_ref_src/compare.equal.nullptr.cpp @@ -0,0 +1,46 @@ +// +// Tests for +// template +// bool operator==(bounded_array_ref const& x, std::nullptr_t); +// +// template +// bool operator!=(bounded_array_ref const& x, std::nullptr_t); +// +// template +// bool operator==(std::nullptr_t, bounded_array_ref const& x); +// +// template +// bool operator!=(std::nullptr_t, bounded_array_ref const& x); +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; + +template +static void +tests() +{ + { + T array[5] = {T{0}, T{1}, T{2}, T{3}, T{4}}; + test_bounded_array_ref view(array); + CHECK(!(view == nullptr)); + CHECK(!(nullptr == view)); + CHECK(view != nullptr); + CHECK(nullptr != view); + } + { + test_bounded_array_ref view; + CHECK(view == nullptr); + CHECK(nullptr == view); + CHECK(!(view != nullptr)); + CHECK(!(nullptr != view)); + } +} + +T_DECL(compare_equal_nullptr, "bounded_array_ref.compare.equal.nullptr") { + tests(); +} diff --git a/tests/bounded_array_ref_src/ctor.C_array.cpp b/tests/bounded_array_ref_src/ctor.C_array.cpp new file mode 100644 index 000000000..77e1e66cf --- /dev/null +++ b/tests/bounded_array_ref_src/ctor.C_array.cpp @@ -0,0 +1,62 @@ +// +// Tests for +// template +// bounded_array_ref(T (&array)[N]); +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; +inline bool +operator==(T const& a, T const& b) +{ + return a.i == b.i; +}; + +template +static void +tests() +{ + { + T array[5] = {T{0}, T{1}, T{2}, T{3}, T{4}}; + test_bounded_array_ref view(array); + CHECK(view.data() == &array[0]); + CHECK(view.size() == 5); + CHECK(view[0] == T{0}); + CHECK(view[1] == T{1}); + CHECK(view[2] == T{2}); + CHECK(view[3] == T{3}); + CHECK(view[4] == T{4}); + } + + { + T array[1] = {T{11}}; + test_bounded_array_ref view(array); + CHECK(view.data() == &array[0]); + CHECK(view.size() == 1); + CHECK(view[0] == T{11}); + } + + // Also test implicit construction + { + T array[1] = {T{11}}; + test_bounded_array_ref view = array; + CHECK(view.data() == &array[0]); + CHECK(view.size() == 1); + } + { + T array[1] = {T{11}}; + auto check = [&array](test_bounded_array_ref view) { + CHECK(view.data() == &array[0]); + CHECK(view.size() == 1); + }; + check(array); + } +} + +T_DECL(ctor_C_array, "bounded_array_ref.ctor.C_array") { + tests(); +} diff --git a/tests/bounded_array_ref_src/ctor.begin_end.cpp b/tests/bounded_array_ref_src/ctor.begin_end.cpp new file mode 100644 index 000000000..547dd6cb0 --- /dev/null +++ b/tests/bounded_array_ref_src/ctor.begin_end.cpp @@ -0,0 +1,80 @@ +// +// Tests for +// explicit bounded_array_ref(T* first, T* last); +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; +inline bool +operator==(T const& a, T const& b) +{ + return a.i == b.i; +}; + +template +static void +tests() +{ + T array[5] = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // first last + { + T* first = &array[0]; + T* last = &array[5]; + test_bounded_array_ref view(first, last); + CHECK(view.data() == &array[0]); + CHECK(view.size() == 5); + CHECK(view[0] == T{0}); + CHECK(view[1] == T{1}); + CHECK(view[2] == T{2}); + CHECK(view[3] == T{3}); + CHECK(view[4] == T{4}); + } + + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // first last + { + T* first = &array[0]; + T* last = &array[1]; + test_bounded_array_ref view(first, last); + CHECK(view.data() == &array[0]); + CHECK(view.size() == 1); + CHECK(view[0] == T{0}); + } + + // T{0} T{1} T{2} T{3} T{4} + // ^ + // | + // first,last + { + T* first = &array[0]; + T* last = &array[0]; + test_bounded_array_ref view(first, last); + CHECK(view.size() == 0); + } + + // T{0} T{1} T{2} T{3} T{4} + // ^ + // | + // first,last + { + T* first = &array[5]; + T* last = &array[5]; + test_bounded_array_ref view(first, last); + CHECK(view.size() == 0); + } +} + +T_DECL(ctor_begin_end, "bounded_array_ref.ctor.begin_end") { + tests(); + tests(); +} diff --git a/tests/bounded_array_ref_src/ctor.bounded_array.cpp b/tests/bounded_array_ref_src/ctor.bounded_array.cpp new file mode 100644 index 000000000..37eebec49 --- /dev/null +++ b/tests/bounded_array_ref_src/ctor.bounded_array.cpp @@ -0,0 +1,69 @@ +// +// Tests for +// template +// bounded_array_ref(bounded_array& data); +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; +inline bool +operator==(T const& a, T const& b) +{ + return a.i == b.i; +}; + +template +static void +tests() +{ + { + test_bounded_array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + test_bounded_array_ref view(array); + CHECK(view.data() == array.data()); + CHECK(view.size() == 5); + CHECK(view[0] == T{0}); + CHECK(view[1] == T{1}); + CHECK(view[2] == T{2}); + CHECK(view[3] == T{3}); + CHECK(view[4] == T{4}); + } + + { + test_bounded_array array = {T{11}}; + test_bounded_array_ref view(array); + CHECK(view.data() == array.data()); + CHECK(view.size() == 1); + CHECK(view[0] == T{11}); + } + + { + test_bounded_array array = {}; + test_bounded_array_ref view(array); + CHECK(view.data() == array.data()); + CHECK(view.size() == 0); + } + + // Also test implicit construction + { + test_bounded_array array = {T{11}}; + test_bounded_array_ref view = array; + CHECK(view.data() == array.data()); + CHECK(view.size() == 1); + } + { + test_bounded_array array = {T{11}}; + auto check = [&array](test_bounded_array_ref view) { + CHECK(view.data() == array.data()); + CHECK(view.size() == 1); + }; + check(array); + } +} + +T_DECL(ctor_bounded_array, "bounded_array_ref.ctor.bounded_array") { + tests(); +} diff --git a/tests/bounded_array_ref_src/ctor.bounded_ptr.cpp b/tests/bounded_array_ref_src/ctor.bounded_ptr.cpp new file mode 100644 index 000000000..c7a75b31a --- /dev/null +++ b/tests/bounded_array_ref_src/ctor.bounded_ptr.cpp @@ -0,0 +1,104 @@ +// +// Tests for +// explicit bounded_array_ref(bounded_ptr data, size_t n); +// + +#include +#include "test_policy.h" +#include +#include +#include + +struct T { int i; }; +inline bool +operator==(T const& a, T const& b) +{ + return a.i == b.i; +}; + +template +static void +tests() +{ + T array[5] = {T{0}, T{1}, T{2}, T{3}, T{4}}; + T* const begin = &array[0]; + T* const end = &array[5]; + + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin,ptr end + // + // ^------------------- view -----------------------^ + { + test_bounded_ptr ptr(&array[0], begin, end); + test_bounded_array_ref view(ptr, 5); + CHECK(view.data() == &array[0]); + CHECK(view.size() == 5); + CHECK(view[0] == T{0}); + CHECK(view[1] == T{1}); + CHECK(view[2] == T{2}); + CHECK(view[3] == T{3}); + CHECK(view[4] == T{4}); + } + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin,ptr end + // + // ^----- view -----^ + { + test_bounded_ptr ptr(&array[0], begin, end); + test_bounded_array_ref view(ptr, 3); + CHECK(view.data() == &array[0]); + CHECK(view.size() == 3); + CHECK(view[0] == T{0}); + CHECK(view[1] == T{1}); + CHECK(view[2] == T{2}); + } + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + // + // ^------- view --------^ + { + test_bounded_ptr ptr(&array[3], begin, end); + test_bounded_array_ref view(ptr, 2); + CHECK(view.data() == &array[3]); + CHECK(view.size() == 2); + CHECK(view[0] == T{3}); + CHECK(view[1] == T{4}); + } + // Check with a valid `bounded_ptr` and a size of 0. + { + test_bounded_ptr ptr(&array[0], begin, end); + test_bounded_array_ref view(ptr, 0); + CHECK(view.size() == 0); + } + // Check with a null `bounded_ptr` and a size of 0. + { + test_bounded_ptr ptr = nullptr; + test_bounded_array_ref view(ptr, 0); + CHECK(view.size() == 0); + } + // Check with a non-null but invalid `bounded_ptr` and a size of 0. + { + test_bounded_ptr ptr(end, begin, end); + test_bounded_array_ref view(ptr, 0); + CHECK(view.size() == 0); + } + // Make sure there's no ambiguity between constructors. + { + test_bounded_ptr ptr(begin, begin, end); + std::ptrdiff_t size = sizeof(array) / sizeof(*array); + test_bounded_array_ref view(ptr, size); + CHECK(view.data() == &array[0]); + CHECK(view.size() == 5); + } +} + +T_DECL(ctor_bounded_ptr, "bounded_array_ref.ctor.bounded_ptr") { + tests(); + tests(); +} diff --git a/tests/bounded_array_ref_src/ctor.default.cpp b/tests/bounded_array_ref_src/ctor.default.cpp new file mode 100644 index 000000000..3d60a87ac --- /dev/null +++ b/tests/bounded_array_ref_src/ctor.default.cpp @@ -0,0 +1,37 @@ +// +// Tests for +// bounded_array_ref(); +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; + +template +static void +tests() +{ + { + test_bounded_array_ref view; + CHECK(view.data() == nullptr); + CHECK(view.size() == 0); + } + { + test_bounded_array_ref view{}; + CHECK(view.data() == nullptr); + CHECK(view.size() == 0); + } + { + test_bounded_array_ref view = test_bounded_array_ref(); + CHECK(view.data() == nullptr); + CHECK(view.size() == 0); + } +} + +T_DECL(ctor_default, "bounded_array_ref.ctor.default") { + tests(); + tests(); +} diff --git a/tests/bounded_array_ref_src/ctor.raw_ptr.cpp b/tests/bounded_array_ref_src/ctor.raw_ptr.cpp new file mode 100644 index 000000000..509539a3e --- /dev/null +++ b/tests/bounded_array_ref_src/ctor.raw_ptr.cpp @@ -0,0 +1,121 @@ +// +// Tests for +// explicit bounded_array_ref(T* data, size_t n); +// + +#include +#include "test_policy.h" +#include +#include +#include + +struct T { int i; }; +inline bool +operator==(T const& a, T const& b) +{ + return a.i == b.i; +}; + +template +static void +tests() +{ + T array[5] = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + // T{0} T{1} T{2} T{3} T{4} + // ^ + // | + // ptr + // + // ^------------------- view -----------------------^ + { + T* ptr = &array[0]; + test_bounded_array_ref view(ptr, 5); + CHECK(view.data() == &array[0]); + CHECK(view.size() == 5); + CHECK(view[0] == T{0}); + CHECK(view[1] == T{1}); + CHECK(view[2] == T{2}); + CHECK(view[3] == T{3}); + CHECK(view[4] == T{4}); + } + // T{0} T{1} T{2} T{3} T{4} + // ^ + // | + // ptr + // + // ^----- view -----^ + { + T* ptr = &array[0]; + test_bounded_array_ref view(ptr, 3); + CHECK(view.data() == &array[0]); + CHECK(view.size() == 3); + CHECK(view[0] == T{0}); + CHECK(view[1] == T{1}); + CHECK(view[2] == T{2}); + } + // T{0} T{1} T{2} T{3} T{4} + // ^ + // | + // ptr + // + // ^------- view --------^ + { + T* ptr = &array[3]; + test_bounded_array_ref view(ptr, 2); + CHECK(view.data() == &array[3]); + CHECK(view.size() == 2); + CHECK(view[0] == T{3}); + CHECK(view[1] == T{4}); + } + // Check with a valid pointer and a size of 0. + { + T* ptr = &array[0]; + test_bounded_array_ref view(ptr, static_cast(0)); + CHECK(view.size() == 0); + } + // Check with a null pointer and a size of 0. + { + T* ptr = nullptr; + test_bounded_array_ref view(ptr, static_cast(0)); + CHECK(view.size() == 0); + } + // Check with a non-null but invalid pointer and a size of 0. + { + T* ptr = &array[5]; + test_bounded_array_ref view(ptr, static_cast(0)); + CHECK(view.size() == 0); + } + // Make sure there's no ambiguity between constructors. + { + T* ptr = &array[0]; + std::ptrdiff_t size = 5; + test_bounded_array_ref view(ptr, size); + CHECK(view.data() == &array[0]); + CHECK(view.size() == 5); + } + + // Make sure we can create nested bounded_array_refs + { + int array1[] = {1, 2, 3, 4, 5}; + int array2[] = {6, 7, 8}; + int array3[] = {9, 10, 11, 12, 13, 14}; + test_bounded_array_ref views[] = { + test_bounded_array_ref(array1, 5), + test_bounded_array_ref(array2, 3), + test_bounded_array_ref(array3, 6) + }; + + test_bounded_array_ref > two_dim(views, 3); + CHECK(two_dim.size() == 3); + CHECK(two_dim.data() == &views[0]); + CHECK(&two_dim[0] == &views[0]); + CHECK(&two_dim[1] == &views[1]); + CHECK(&two_dim[2] == &views[2]); + } +} + +T_DECL(ctor_raw_ptr, "bounded_array_ref.ctor.raw_ptr") { + tests(); + tests(); +} diff --git a/tests/bounded_array_ref_src/data.cpp b/tests/bounded_array_ref_src/data.cpp new file mode 100644 index 000000000..ceeab8d4b --- /dev/null +++ b/tests/bounded_array_ref_src/data.cpp @@ -0,0 +1,46 @@ +// +// Tests for +// T* data() const; +// + +#include +#include "test_policy.h" +#include +#include +#include + +struct T { int i; }; + +template +static void +tests() +{ + T array[5] = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + { + test_bounded_array_ref const view(&array[0], static_cast(0)); + T* data = view.data(); + CHECK(data == &array[0]); + } + { + test_bounded_array_ref const view(&array[0], 1); + T* data = view.data(); + CHECK(data == &array[0]); + } + + { + test_bounded_array_ref const view(&array[1], 2); + T* data = view.data(); + CHECK(data == &array[1]); + } + { + test_bounded_array_ref const view(&array[2], 2); + T* data = view.data(); + CHECK(data == &array[2]); + } +} + +T_DECL(data, "bounded_array_ref.data") { + tests(); + tests(); +} diff --git a/tests/bounded_array_ref_src/for_loop.cpp b/tests/bounded_array_ref_src/for_loop.cpp new file mode 100644 index 000000000..ac9128e50 --- /dev/null +++ b/tests/bounded_array_ref_src/for_loop.cpp @@ -0,0 +1,30 @@ +// +// Make sure `bounded_array_ref` works nicely with the range-based for-loop. +// + +#include +#include +#include "test_policy.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + T array[5] = {T{0}, T{1}, T{2}, T{3}, T{4}}; + test_bounded_array_ref view(array); + for (T& element : view) { + element = T{3}; + } + + for (T const& element : view) { + CHECK(element.i == 3); + } +} + +T_DECL(for_loop, "bounded_array_ref.for_loop") { + tests(); +} diff --git a/tests/bounded_array_ref_src/operator.bool.cpp b/tests/bounded_array_ref_src/operator.bool.cpp new file mode 100644 index 000000000..bd52b9f58 --- /dev/null +++ b/tests/bounded_array_ref_src/operator.bool.cpp @@ -0,0 +1,38 @@ +// +// Tests for +// explicit operator bool() const; +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; + +template +static void +tests() +{ + { + test_bounded_array_ref const view; + if (view) { + CHECK(false); + } + CHECK(!view); + } + { + T array[5] = {}; + test_bounded_array_ref const view(array); + if (view) { + } else { + CHECK(false); + } + CHECK(!!view); + } +} + +T_DECL(operator_bool, "bounded_array_ref.operator.bool") { + tests(); + tests(); +} diff --git a/tests/bounded_array_ref_src/operator.subscript.cpp b/tests/bounded_array_ref_src/operator.subscript.cpp new file mode 100644 index 000000000..ae7ed088f --- /dev/null +++ b/tests/bounded_array_ref_src/operator.subscript.cpp @@ -0,0 +1,35 @@ +// +// Tests for +// T& operator[](ptrdiff_t n) const; +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; +inline bool +operator==(T const& a, T const& b) +{ + return a.i == b.i; +}; + +template +static void +tests() +{ + { + T array[5] = {T{0}, T{1}, T{2}, T{3}, T{4}}; + test_bounded_array_ref view(array); + CHECK(view[0] == T{0}); + CHECK(view[1] == T{1}); + CHECK(view[2] == T{2}); + CHECK(view[3] == T{3}); + CHECK(view[4] == T{4}); + } +} + +T_DECL(operator_subscript, "bounded_array_ref.operator.subscript") { + tests(); +} diff --git a/tests/bounded_array_ref_src/size.cpp b/tests/bounded_array_ref_src/size.cpp new file mode 100644 index 000000000..5c44738c9 --- /dev/null +++ b/tests/bounded_array_ref_src/size.cpp @@ -0,0 +1,45 @@ +// +// Tests for +// size_t size() const; +// + +#include +#include "test_policy.h" +#include +#include +#include + +struct T { int i; }; + +template +static void +tests() +{ + T array[5] = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + { + test_bounded_array_ref const view(&array[0], static_cast(0)); + std::size_t size = view.size(); + CHECK(size == 0); + } + { + test_bounded_array_ref const view(&array[0], 1); + std::size_t size = view.size(); + CHECK(size == 1); + } + { + test_bounded_array_ref const view(&array[0], 2); + std::size_t size = view.size(); + CHECK(size == 2); + } + { + test_bounded_array_ref const view(&array[0], 5); + std::size_t size = view.size(); + CHECK(size == 5); + } +} + +T_DECL(size, "bounded_array_ref.size") { + tests(); + tests(); +} diff --git a/tests/bounded_array_ref_src/slice.cpp b/tests/bounded_array_ref_src/slice.cpp new file mode 100644 index 000000000..8b2aaf1f6 --- /dev/null +++ b/tests/bounded_array_ref_src/slice.cpp @@ -0,0 +1,227 @@ +// +// Tests for +// bounded_array_ref slice(size_t n, size_t m) const; +// + +#include +#include "test_policy.h" +#include +#include +#include +#include +#include + +struct T { int i; }; + +template +using tracking_bounded_array_ref = libkern::bounded_array_ref; + +template +static void +tests() +{ + T array[5] = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + // Slices starting at 0 + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(0, 0); + CHECK(slice.size() == 0); + } + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(0, 1); + CHECK(slice.size() == 1); + CHECK(&slice[0] == &array[0]); + } + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(0, 2); + CHECK(slice.size() == 2); + CHECK(&slice[0] == &array[0]); + CHECK(&slice[1] == &array[1]); + } + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(0, 5); + CHECK(slice.size() == 5); + CHECK(&slice[0] == &array[0]); + CHECK(&slice[1] == &array[1]); + CHECK(&slice[2] == &array[2]); + CHECK(&slice[3] == &array[3]); + CHECK(&slice[4] == &array[4]); + } + { + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(0, 6); + CHECK(tracking_policy::did_trap); + CHECK(tracking_policy::message == "bounded_array_ref: invalid slice provided, the indices are of bounds for the bounded_array_ref"); + } + + // Slices starting at 1 (near the beginning) + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(1, 0); + CHECK(slice.size() == 0); + } + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(1, 3); + CHECK(slice.size() == 3); + CHECK(&slice[0] == &array[1]); + CHECK(&slice[1] == &array[2]); + CHECK(&slice[2] == &array[3]); + } + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(1, 4); + CHECK(slice.size() == 4); + CHECK(&slice[0] == &array[1]); + CHECK(&slice[1] == &array[2]); + CHECK(&slice[2] == &array[3]); + CHECK(&slice[3] == &array[4]); + } + { + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(1, 5); + CHECK(tracking_policy::did_trap); + } + { + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(1, 10); + CHECK(tracking_policy::did_trap); + } + + // Slices starting at 3 (in the middle) + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(3, 0); + CHECK(slice.size() == 0); + } + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(3, 2); + CHECK(slice.size() == 2); + CHECK(&slice[0] == &array[3]); + CHECK(&slice[1] == &array[4]); + } + { + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(3, 3); + CHECK(tracking_policy::did_trap); + } + { + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(3, 100); + CHECK(tracking_policy::did_trap); + } + + // Slices starting at 4 (near the end) + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(4, 0); + CHECK(slice.size() == 0); + } + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(4, 1); + CHECK(slice.size() == 1); + CHECK(&slice[0] == &array[4]); + } + { + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(4, 2); + CHECK(tracking_policy::did_trap); + } + + // Slices starting at the end + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(5, 0); + CHECK(slice.size() == 0); + } + { + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(5, 1); + CHECK(tracking_policy::did_trap); + } + { + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(5, 10); + CHECK(tracking_policy::did_trap); + } + + // Slices starting after the end + { + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(6, 0); + CHECK(tracking_policy::did_trap); + } + { + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(6, 1); + CHECK(tracking_policy::did_trap); + } + { + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(8, 10); + CHECK(tracking_policy::did_trap); + } + + // Slices overflowing a uint32_t + { + std::uint32_t n = std::numeric_limits::max() / 2 + 1; + std::uint32_t m = std::numeric_limits::max() / 2 + 1; + tracking_bounded_array_ref view(array); + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(n, m); + CHECK(tracking_policy::did_trap); + CHECK(tracking_policy::message == "bounded_array_ref: n + m is larger than the size of any bounded_array_ref"); + } + + // Check the documented range equivalent + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(3, 2); + CHECK(slice.begin() == view.begin() + 3); + CHECK(slice.end() == view.begin() + 3 + 2); + } + + // Chaining calls to slice() + { + test_bounded_array_ref view(array); + test_bounded_array_ref slice = view.slice(1, 4).slice(2, 2); + CHECK(slice.size() == 2); + CHECK(&slice[0] == &array[3]); + CHECK(&slice[1] == &array[4]); + } + + // Slicing an empty view + { + test_bounded_array_ref view; + test_bounded_array_ref slice = view.slice(0, 0); + CHECK(slice.size() == 0); + } + { + tracking_bounded_array_ref view; + tracking_policy::reset(); + tracking_bounded_array_ref slice = view.slice(0, 1); + CHECK(tracking_policy::did_trap); + } +} + +T_DECL(slice, "bounded_array_ref.slice") { + tests(); + tests(); +} diff --git a/tests/bounded_array_ref_src/test_policy.h b/tests/bounded_array_ref_src/test_policy.h new file mode 100644 index 000000000..3897d6db2 --- /dev/null +++ b/tests/bounded_array_ref_src/test_policy.h @@ -0,0 +1,52 @@ +#ifndef TESTS_BOUNDED_ARRAY_REF_SRC_TEST_POLICY_H +#define TESTS_BOUNDED_ARRAY_REF_SRC_TEST_POLICY_H + +#include +#include +#include +#include +#include +#include +#include + +namespace { +struct test_policy { + static void + trap(char const*) + { + assert(false); + } +}; + +struct tracking_policy { + static bool did_trap; + static std::string message; + static void + trap(char const* m) + { + did_trap = true; + message.assign(m); + } + static void + reset() + { + did_trap = false; + message = ""; + } +}; +bool tracking_policy::did_trap = false; +std::string tracking_policy::message = ""; +} + +template +using test_bounded_array_ref = libkern::bounded_array_ref; + +template +using test_bounded_array = libkern::bounded_array; + +template +using test_bounded_ptr = libkern::bounded_ptr; + +#define CHECK(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +#endif // !TESTS_BOUNDED_ARRAY_REF_SRC_TEST_POLICY_H diff --git a/tests/bounded_array_src/begin_end.cpp b/tests/bounded_array_src/begin_end.cpp new file mode 100644 index 000000000..253d84c80 --- /dev/null +++ b/tests/bounded_array_src/begin_end.cpp @@ -0,0 +1,63 @@ +// +// Tests for +// iterator begin(); +// const_iterator begin() const; +// +// iterator end(); +// const_iterator end() const; +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; + +template +static void +tests() +{ + // Check begin()/end() for a non-empty array + { + test_bounded_array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + test_bounded_ptr begin = array.begin(); + test_bounded_ptr end = array.end(); + CHECK(begin.discard_bounds() == array.data()); + CHECK(end.unsafe_discard_bounds() == array.data() + 5); + } + { + test_bounded_array const array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + test_bounded_ptr begin = array.begin(); + test_bounded_ptr end = array.end(); + CHECK(begin.discard_bounds() == array.data()); + CHECK(end.unsafe_discard_bounds() == array.data() + 5); + } + + // Check begin()/end() for an empty array + { + test_bounded_array array = {}; + test_bounded_ptr begin = array.begin(); + test_bounded_ptr end = array.end(); + CHECK(begin.unsafe_discard_bounds() == array.data()); + CHECK(end.unsafe_discard_bounds() == array.data()); + } + { + test_bounded_array const array = {}; + test_bounded_ptr begin = array.begin(); + test_bounded_ptr end = array.end(); + CHECK(begin.unsafe_discard_bounds() == array.data()); + CHECK(end.unsafe_discard_bounds() == array.data()); + } + + // Check associated types + { + using A = test_bounded_array; + static_assert(std::is_same_v >); + static_assert(std::is_same_v >); + } +} + +T_DECL(begin_end, "bounded_array.begin_end") { + tests(); +} diff --git a/tests/bounded_array_src/ctor.aggregate_init.cpp b/tests/bounded_array_src/ctor.aggregate_init.cpp new file mode 100644 index 000000000..2dccc0053 --- /dev/null +++ b/tests/bounded_array_src/ctor.aggregate_init.cpp @@ -0,0 +1,59 @@ +// +// Tests for +// aggregate-initialization of `bounded_array` +// + +#include +#include +#include +#include "test_policy.h" + +struct T { + T() : i(4) + { + } + T(int k) : i(k) + { + } + int i; + friend bool + operator==(T const& a, T const& b) + { + return a.i == b.i; + } +}; + +template +static void +tests() +{ + { + test_bounded_array array = {T(1), T(2), T(3), T(4), T(5)}; + CHECK(array.size() == 5); + CHECK(array[0] == T(1)); + CHECK(array[1] == T(2)); + CHECK(array[2] == T(3)); + CHECK(array[3] == T(4)); + CHECK(array[4] == T(5)); + } + + { + test_bounded_array array{T(1), T(2), T(3), T(4), T(5)}; + CHECK(array.size() == 5); + CHECK(array[0] == T(1)); + CHECK(array[1] == T(2)); + CHECK(array[2] == T(3)); + CHECK(array[3] == T(4)); + CHECK(array[4] == T(5)); + } + + // Check with a 0-sized array + { + test_bounded_array array = {}; + CHECK(array.size() == 0); + } +} + +T_DECL(ctor_aggregate_init, "bounded_array.ctor.aggregate_init") { + tests(); +} diff --git a/tests/bounded_array_src/ctor.default.cpp b/tests/bounded_array_src/ctor.default.cpp new file mode 100644 index 000000000..ddd0457c7 --- /dev/null +++ b/tests/bounded_array_src/ctor.default.cpp @@ -0,0 +1,69 @@ +// +// Tests for +// bounded_array(); +// + +#include +#include +#include +#include "test_policy.h" + +struct T { + T() : i(4) + { + } + int i; + friend bool + operator==(T const& a, T const& b) + { + return a.i == b.i; + } +}; + +template +static void +tests() +{ + { + test_bounded_array array; + CHECK(array.size() == 10); + T* end = array.data() + array.size(); + for (auto it = array.data(); it != end; ++it) { + CHECK(*it == T()); + } + } + { + test_bounded_array array{}; + CHECK(array.size() == 10); + T* end = array.data() + array.size(); + for (auto it = array.data(); it != end; ++it) { + CHECK(*it == T()); + } + } + { + test_bounded_array array = {}; + CHECK(array.size() == 10); + T* end = array.data() + array.size(); + for (auto it = array.data(); it != end; ++it) { + CHECK(*it == T()); + } + } + { + test_bounded_array array = test_bounded_array(); + CHECK(array.size() == 10); + T* end = array.data() + array.size(); + for (auto it = array.data(); it != end; ++it) { + CHECK(*it == T()); + } + } + + // Check with a 0-sized array + { + test_bounded_array array; + CHECK(array.size() == 0); + } +} + +T_DECL(ctor_default, "bounded_array.ctor.default") { + tests(); +} diff --git a/tests/bounded_array_src/data.cpp b/tests/bounded_array_src/data.cpp new file mode 100644 index 000000000..7a87d522e --- /dev/null +++ b/tests/bounded_array_src/data.cpp @@ -0,0 +1,58 @@ +// +// Tests for +// T* data(); +// T const* data() const; +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; +inline bool +operator==(T const& a, T const& b) +{ + return a.i == b.i; +} + +template +static void +tests() +{ + { + test_bounded_array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + T* data = array.data(); + CHECK(data != nullptr); + CHECK(data[0] == T{0}); + CHECK(data[1] == T{1}); + CHECK(data[2] == T{2}); + CHECK(data[3] == T{3}); + CHECK(data[4] == T{4}); + } + { + test_bounded_array const array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + T const* data = array.data(); + CHECK(data != nullptr); + CHECK(data[0] == T{0}); + CHECK(data[1] == T{1}); + CHECK(data[2] == T{2}); + CHECK(data[3] == T{3}); + CHECK(data[4] == T{4}); + } + + { + test_bounded_array array = {}; + T* data = array.data(); + CHECK(data != nullptr); + } + { + test_bounded_array const array = {}; + T const* data = array.data(); + CHECK(data != nullptr); + } +} + +T_DECL(data, "bounded_array.data") { + tests(); +} diff --git a/tests/bounded_array_src/for_loop.cpp b/tests/bounded_array_src/for_loop.cpp new file mode 100644 index 000000000..c2b22122b --- /dev/null +++ b/tests/bounded_array_src/for_loop.cpp @@ -0,0 +1,29 @@ +// +// Make sure `bounded_array` works nicely with the range-based for-loop. +// + +#include +#include +#include "test_policy.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + test_bounded_array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + for (T& element : array) { + element = T{3}; + } + + for (T const& element : array) { + CHECK(element.i == 3); + } +} + +T_DECL(for_loop, "bounded_array.for_loop") { + tests(); +} diff --git a/tests/bounded_array_src/operator.subscript.cpp b/tests/bounded_array_src/operator.subscript.cpp new file mode 100644 index 000000000..cb61048e8 --- /dev/null +++ b/tests/bounded_array_src/operator.subscript.cpp @@ -0,0 +1,56 @@ +// +// Tests for +// T& operator[](ptrdiff_t n); +// T const& operator[](ptrdiff_t n) const; +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; +inline bool +operator==(T const& a, T const& b) +{ + return a.i == b.i; +} + +template +static void +tests() +{ + { + test_bounded_array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + T& a0 = array[0]; + CHECK(&a0 == array.data()); + CHECK(a0 == T{0}); + T& a1 = array[1]; + CHECK(a1 == T{1}); + T& a2 = array[2]; + CHECK(a2 == T{2}); + T& a3 = array[3]; + CHECK(a3 == T{3}); + T& a4 = array[4]; + CHECK(a4 == T{4}); + } + + { + test_bounded_array const array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + T const& a0 = array[0]; + CHECK(&a0 == array.data()); + CHECK(a0 == T{0}); + T const& a1 = array[1]; + CHECK(a1 == T{1}); + T const& a2 = array[2]; + CHECK(a2 == T{2}); + T const& a3 = array[3]; + CHECK(a3 == T{3}); + T const& a4 = array[4]; + CHECK(a4 == T{4}); + } +} + +T_DECL(operator_subscript, "bounded_array.operator.subscript") { + tests(); +} diff --git a/tests/bounded_array_src/size.cpp b/tests/bounded_array_src/size.cpp new file mode 100644 index 000000000..7238361e5 --- /dev/null +++ b/tests/bounded_array_src/size.cpp @@ -0,0 +1,31 @@ +// +// Tests for +// size_t size() const; +// + +#include +#include "test_policy.h" +#include +#include + +struct T { int i; }; + +template +static void +tests() +{ + { + test_bounded_array const array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + size_t size = array.size(); + CHECK(size == 5); + } + { + test_bounded_array const array = {}; + size_t size = array.size(); + CHECK(size == 0); + } +} + +T_DECL(size, "bounded_array.size") { + tests(); +} diff --git a/tests/bounded_array_src/test_policy.h b/tests/bounded_array_src/test_policy.h new file mode 100644 index 000000000..3c80b3b8c --- /dev/null +++ b/tests/bounded_array_src/test_policy.h @@ -0,0 +1,26 @@ +#ifndef TESTS_BOUNDED_ARRAY_SRC_TEST_POLICY_H +#define TESTS_BOUNDED_ARRAY_SRC_TEST_POLICY_H + +#include +#include +#include +#include +#include + +struct test_policy { + static void + trap(char const*) + { + assert(false); + } +}; + +template +using test_bounded_array = libkern::bounded_array; + +template +using test_bounded_ptr = libkern::bounded_ptr; + +#define CHECK(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +#endif // !TESTS_BOUNDED_ARRAY_SRC_TEST_POLICY_H diff --git a/tests/bounded_ptr.cpp b/tests/bounded_ptr.cpp new file mode 100644 index 000000000..dbb31f39d --- /dev/null +++ b/tests/bounded_ptr.cpp @@ -0,0 +1,11 @@ +// +// Test runner for all bounded_ptr tests. +// + +#include + +T_GLOBAL_META( + T_META_NAMESPACE("bounded_ptr"), + T_META_CHECK_LEAKS(false), + T_META_RUN_CONCURRENTLY(true) + ); diff --git a/tests/bounded_ptr_03.cpp b/tests/bounded_ptr_03.cpp new file mode 100644 index 000000000..ff2d2e1b1 --- /dev/null +++ b/tests/bounded_ptr_03.cpp @@ -0,0 +1,10 @@ +// +// Make sure that the forward declaration header can be included in C++03. +// + +#include +#include + +T_DECL(fwd_decl_cxx03, "bounded_ptr.fwd_decl.cxx03") { + T_PASS("bounded_ptr.fwd_decl.cxx03 compiled successfully"); +} diff --git a/tests/bounded_ptr_src/arith.add.cpp b/tests/bounded_ptr_src/arith.add.cpp new file mode 100644 index 000000000..59cba1b15 --- /dev/null +++ b/tests/bounded_ptr_src/arith.add.cpp @@ -0,0 +1,127 @@ +// +// Tests for +// friend bounded_ptr operator+(bounded_ptr p, std::ptrdiff_t n); +// friend bounded_ptr operator+(std::ptrdiff_t n, bounded_ptr p); +// +// The heavy lifting is done in operator+=, so we only check basic functioning. +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { + int i; +}; + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + // Add positive offsets + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin, ptr end + { + test_bounded_ptr const ptr(array.begin(), array.begin(), array.end()); + + { + test_bounded_ptr res = ptr + 0; + _assert(&*res == &array[0]); + } + { + test_bounded_ptr res = ptr + 1; + _assert(&*res == &array[1]); + } + { + test_bounded_ptr res = ptr + 2; + _assert(&*res == &array[2]); + } + { + test_bounded_ptr res = ptr + 3; + _assert(&*res == &array[3]); + } + { + test_bounded_ptr res = ptr + 4; + _assert(&*res == &array[4]); + } + { + test_bounded_ptr res = ptr + 5; + _assert(res == array.end()); + } + } + + // Add negative offsets + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin end,ptr + { + test_bounded_ptr const ptr(array.end(), array.begin(), array.end()); + + { + test_bounded_ptr res = ptr + 0; + _assert(res == array.end()); + } + { + test_bounded_ptr res = ptr + -1; + _assert(&*res == &array[4]); + } + { + test_bounded_ptr res = ptr + -2; + _assert(&*res == &array[3]); + } + { + test_bounded_ptr res = ptr + -3; + _assert(&*res == &array[2]); + } + { + test_bounded_ptr res = ptr + -4; + _assert(&*res == &array[1]); + } + { + test_bounded_ptr res = ptr + -5; + _assert(&*res == &array[0]); + } + } + + // Make sure the original pointer isn't modified + { + test_bounded_ptr const ptr(array.begin() + 1, array.begin(), array.end()); + (void)(ptr + 3); + _assert(&*ptr == &array[1]); + } + + // Make sure the operator is commutative + { + { + test_bounded_ptr const ptr(array.begin(), array.begin(), array.end()); + test_bounded_ptr res = 0 + ptr; + _assert(&*res == &array[0]); + } + { + test_bounded_ptr const ptr(array.begin(), array.begin(), array.end()); + test_bounded_ptr res = 3 + ptr; + _assert(&*res == &array[3]); + } + { + test_bounded_ptr const ptr(array.begin() + 3, array.begin(), array.end()); + test_bounded_ptr res = -2 + ptr; + _assert(&*res == &array[1]); + } + } +} + +T_DECL(arith_add, "bounded_ptr.arith.add") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/arith.add_assign.cpp b/tests/bounded_ptr_src/arith.add_assign.cpp new file mode 100644 index 000000000..0f29c10e3 --- /dev/null +++ b/tests/bounded_ptr_src/arith.add_assign.cpp @@ -0,0 +1,200 @@ +// +// Tests for +// bounded_ptr& operator+=(std::ptrdiff_t n); +// + +#include +#include +#include +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { int i; }; + +namespace { +struct tracking_policy { + static bool did_trap; + static void + trap(char const*) + { + did_trap = true; + } +}; +bool tracking_policy::did_trap = false; +} + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + // Add-assign positive offsets + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin,ptr end + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr += 0; + _assert(&ref == &ptr); + _assert(&*ptr == &array[0]); + } + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr += 1; + _assert(&ref == &ptr); + _assert(&*ptr == &array[1]); + } + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr += 2; + _assert(&ref == &ptr); + _assert(&*ptr == &array[2]); + } + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr += 3; + _assert(&ref == &ptr); + _assert(&*ptr == &array[3]); + } + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr += 4; + _assert(&ref == &ptr); + _assert(&*ptr == &array[4]); + } + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr += 5; + _assert(&ref == &ptr); + _assert(ptr == array.end()); + } + + // Add-assign negative offsets + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin end,ptr + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr += 0; + _assert(&ref == &ptr); + _assert(ptr == array.end()); + } + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr += -1; + _assert(&ref == &ptr); + _assert(&*ptr == &array[4]); + } + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr += -2; + _assert(&ref == &ptr); + _assert(&*ptr == &array[3]); + } + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr += -3; + _assert(&ref == &ptr); + _assert(&*ptr == &array[2]); + } + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr += -4; + _assert(&ref == &ptr); + _assert(&*ptr == &array[1]); + } + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr += -5; + _assert(&ref == &ptr); + _assert(&*ptr == &array[0]); + } + + // Make sure we trap on arithmetic overflow in the number of bytes calculation + { + std::ptrdiff_t sizeof_T = sizeof(T); // avoid promotion to unsigned in calculations + + // largest (most positive) n for the number of bytes `n * sizeof(T)` not to overflow ptrdiff_t + std::ptrdiff_t max_n = std::numeric_limits::max() / sizeof_T; + + // smallest (most negative) n for the number of bytes `n * sizeof(T)` not to overflow ptrdiff_t + std::ptrdiff_t min_n = std::numeric_limits::min() / sizeof_T; + + // Overflow with a positive offset + { + libkern::bounded_ptr ptr(array.begin(), array.begin(), array.end()); + tracking_policy::did_trap = false; + ptr += max_n + 1; + _assert(tracking_policy::did_trap); + } + + // Overflow with a negative offset + { + libkern::bounded_ptr ptr(array.begin(), array.begin(), array.end()); + tracking_policy::did_trap = false; + ptr += min_n - 1; + _assert(tracking_policy::did_trap); + } + } + + // Make sure we trap on arithmetic overflow in the offset calculation + // + // To avoid running into the overflow of `n * sizeof(T)` when ptrdiff_t + // is the same size as int32_t, we test the offset overflow check by + // successive addition of smaller offsets. + // + // We basically push the offset right to its limit, and then push it + // past its limit to watch it overflow. + { + std::int64_t sizeof_T = sizeof(T); // avoid promotion to unsigned in calculations + + // largest (most positive) n for the number of bytes `n * sizeof(T)` not to overflow the int32_t offset + std::int64_t max_n = std::numeric_limits::max() / sizeof_T; + + // smallest (most negative) n for the number of bytes `n * sizeof(T)` not to overflow the int32_t offset + std::int64_t min_n = std::numeric_limits::min() / sizeof_T; + + // Add positive offsets + { + libkern::bounded_ptr ptr(array.begin(), array.begin(), array.end()); + tracking_policy::did_trap = false; + ptr += static_cast(max_n / 2); + _assert(!tracking_policy::did_trap); + ptr += static_cast(max_n / 2); + _assert(!tracking_policy::did_trap); + ptr += (max_n % 2); + _assert(!tracking_policy::did_trap); // offset is now right at its positive limit + ptr += 1; + _assert(tracking_policy::did_trap); + } + + // Add negative offsets + { + libkern::bounded_ptr ptr(array.begin(), array.begin(), array.end()); + tracking_policy::did_trap = false; + ptr += static_cast(min_n / 2); + _assert(!tracking_policy::did_trap); + ptr += static_cast(min_n / 2); + _assert(!tracking_policy::did_trap); + ptr += (min_n % 2); + _assert(!tracking_policy::did_trap); // offset is now right at its negative limit + ptr += -1; + _assert(tracking_policy::did_trap); + } + } +} + +T_DECL(arith_add_assign, "bounded_ptr.arith.add_assign") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/arith.difference.cpp b/tests/bounded_ptr_src/arith.difference.cpp new file mode 100644 index 000000000..580decfcf --- /dev/null +++ b/tests/bounded_ptr_src/arith.difference.cpp @@ -0,0 +1,122 @@ +// +// Tests for +// friend std::ptrdiff_t operator-(bounded_ptr const& a, bounded_ptr const& b); +// friend std::ptrdiff_t operator-(bounded_ptr const& a, T* b); +// friend std::ptrdiff_t operator-(T* a, bounded_ptr const& b); +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { + int i; +}; + +template +static void +tests() +{ + std::array array = {Stored{0}, Stored{1}, Stored{2}, Stored{3}, Stored{4}}; + + // a >= b + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + std::ptrdiff_t diff = a - b; + _assert(diff == 0); + } + { + test_bounded_ptr const a(array.begin() + 1, array.begin(), array.end()); + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + std::ptrdiff_t diff = a - b; + _assert(diff == 1); + } + { + test_bounded_ptr const a(array.begin() + 2, array.begin(), array.end()); + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + std::ptrdiff_t diff = a - b; + _assert(diff == 2); + } + { + test_bounded_ptr const a(array.begin() + 3, array.begin(), array.end()); + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + std::ptrdiff_t diff = a - b; + _assert(diff == 3); + } + { + test_bounded_ptr const a(array.begin() + 4, array.begin(), array.end()); + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + std::ptrdiff_t diff = a - b; + _assert(diff == 4); + } + { + test_bounded_ptr const a(array.end(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + std::ptrdiff_t diff = a - b; + _assert(diff == 5); + } + + // a < b + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin() + 1, array.begin(), array.end()); + std::ptrdiff_t diff = a - b; + _assert(diff == -1); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin() + 2, array.begin(), array.end()); + std::ptrdiff_t diff = a - b; + _assert(diff == -2); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin() + 3, array.begin(), array.end()); + std::ptrdiff_t diff = a - b; + _assert(diff == -3); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin() + 4, array.begin(), array.end()); + std::ptrdiff_t diff = a - b; + _assert(diff == -4); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin() + 5, array.begin(), array.end()); + std::ptrdiff_t diff = a - b; + _assert(diff == -5); + } + + // Subtract pointers with different bounds + { + test_bounded_ptr const a(array.begin() + 2, array.begin() + 1, array.end() - 1); + test_bounded_ptr const b(array.begin() + 4, array.begin() + 3, array.end()); + _assert(a - b == -2); + _assert(b - a == 2); + } + + // Subtract with raw pointers + { + test_bounded_ptr const a(array.begin() + 2, array.begin() + 1, array.end() - 1); + Right* b = array.begin() + 4; + _assert(a - b == -2); + } + { + Left* a = array.begin() + 4; + test_bounded_ptr const b(array.begin() + 2, array.begin() + 1, array.end() - 1); + _assert(a - b == 2); + } +} + +T_DECL(arith_difference, "bounded_ptr.arith.difference") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/arith.inc_dec.cpp b/tests/bounded_ptr_src/arith.inc_dec.cpp new file mode 100644 index 000000000..cb32fd6bd --- /dev/null +++ b/tests/bounded_ptr_src/arith.inc_dec.cpp @@ -0,0 +1,147 @@ +// +// Tests for +// bounded_ptr& operator++(); +// bounded_ptr operator++(int); +// bounded_ptr& operator--(); +// bounded_ptr operator--(int); +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { + int i; +}; + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + { + // Test pre-increment and pre-decrement + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + _assert(&*ptr == &array[0]); + + { + auto& ref = ++ptr; + _assert(&ref == &ptr); + _assert(&*ptr == &array[1]); + } + + { + auto& ref = ++ptr; + _assert(&ref == &ptr); + _assert(&*ptr == &array[2]); + } + { + auto& ref = ++ptr; + _assert(&ref == &ptr); + _assert(&*ptr == &array[3]); + } + { + auto& ref = ++ptr; + _assert(&ref == &ptr); + _assert(&*ptr == &array[4]); + } + { + auto& ref = ++ptr; + _assert(&ref == &ptr); + // ptr is now one-past-last + } + { + auto& ref = --ptr; + _assert(&ref == &ptr); + _assert(&*ptr == &array[4]); + } + { + auto& ref = --ptr; + _assert(&ref == &ptr); + _assert(&*ptr == &array[3]); + } + { + auto& ref = --ptr; + _assert(&ref == &ptr); + _assert(&*ptr == &array[2]); + } + { + auto& ref = --ptr; + _assert(&ref == &ptr); + _assert(&*ptr == &array[1]); + } + { + auto& ref = --ptr; + _assert(&ref == &ptr); + _assert(&*ptr == &array[0]); + } + } + { + // Test post-increment and post-decrement + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + _assert(&*ptr == &array[0]); + + { + auto prev = ptr++; + _assert(&*prev == &array[0]); + _assert(&*ptr == &array[1]); + } + { + auto prev = ptr++; + _assert(&*prev == &array[1]); + _assert(&*ptr == &array[2]); + } + { + auto prev = ptr++; + _assert(&*prev == &array[2]); + _assert(&*ptr == &array[3]); + } + { + auto prev = ptr++; + _assert(&*prev == &array[3]); + _assert(&*ptr == &array[4]); + } + { + auto prev = ptr++; + _assert(&*prev == &array[4]); + _assert(ptr == array.end()); + } + { + auto prev = ptr--; + _assert(prev == array.end()); + _assert(&*ptr == &array[4]); + } + { + auto prev = ptr--; + _assert(&*prev == &array[4]); + _assert(&*ptr == &array[3]); + } + { + auto prev = ptr--; + _assert(&*prev == &array[3]); + _assert(&*ptr == &array[2]); + } + { + auto prev = ptr--; + _assert(&*prev == &array[2]); + _assert(&*ptr == &array[1]); + } + { + auto prev = ptr--; + _assert(&*prev == &array[1]); + _assert(&*ptr == &array[0]); + } + } +} + +T_DECL(arith_inc_dec, "bounded_ptr.arith.inc_dec") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/arith.subtract.cpp b/tests/bounded_ptr_src/arith.subtract.cpp new file mode 100644 index 000000000..c15403a34 --- /dev/null +++ b/tests/bounded_ptr_src/arith.subtract.cpp @@ -0,0 +1,106 @@ +// +// Tests for +// friend bounded_ptr operator-(bounded_ptr p, std::ptrdiff_t n); +// + +#include +#include "test_utils.h" +#include +#include +#include +#include + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { + int i; +}; + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + // Subtract positive offsets + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin end,ptr + { + test_bounded_ptr const ptr(array.end(), array.begin(), array.end()); + + { + test_bounded_ptr res = ptr - static_cast(0); + _assert(ptr == array.end()); + } + { + test_bounded_ptr res = ptr - 1; + _assert(&*res == &array[4]); + } + { + test_bounded_ptr res = ptr - 2; + _assert(&*res == &array[3]); + } + { + test_bounded_ptr res = ptr - 3; + _assert(&*res == &array[2]); + } + { + test_bounded_ptr res = ptr - 4; + _assert(&*res == &array[1]); + } + { + test_bounded_ptr res = ptr - 5; + _assert(&*res == &array[0]); + } + } + + // Subtract negative offsets + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin,ptr end + { + test_bounded_ptr const ptr(array.begin(), array.begin(), array.end()); + + { + test_bounded_ptr res = ptr - static_cast(0); + _assert(&*res == &array[0]); + } + { + test_bounded_ptr res = ptr - -1; + _assert(&*res == &array[1]); + } + { + test_bounded_ptr res = ptr - -2; + _assert(&*res == &array[2]); + } + { + test_bounded_ptr res = ptr - -3; + _assert(&*res == &array[3]); + } + { + test_bounded_ptr res = ptr - -4; + _assert(&*res == &array[4]); + } + { + test_bounded_ptr res = ptr - -5; + _assert(res == array.end()); + } + } + + // Make sure the original pointer isn't modified + { + test_bounded_ptr const ptr(array.begin() + 4, array.begin(), array.end()); + (void)(ptr - 2); + _assert(&*ptr == &array[4]); + } +} + +T_DECL(arith_subtract, "bounded_ptr.arith.subtract") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/arith.subtract_assign.cpp b/tests/bounded_ptr_src/arith.subtract_assign.cpp new file mode 100644 index 000000000..f076618d3 --- /dev/null +++ b/tests/bounded_ptr_src/arith.subtract_assign.cpp @@ -0,0 +1,197 @@ +// +// Tests for +// bounded_ptr& operator-=(std::ptrdiff_t n); +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { int i; }; + +namespace { +struct tracking_policy { + static bool did_trap; + static void + trap(char const* msg) + { + did_trap = true; + } +}; +bool tracking_policy::did_trap = false; +} + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + // Subtract-assign positive offsets + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin end,ptr + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr -= 0; + _assert(&ref == &ptr); + _assert(ptr == array.end()); + } + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr -= 1; + _assert(&ref == &ptr); + _assert(&*ptr == &array[4]); + } + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr -= 2; + _assert(&ref == &ptr); + _assert(&*ptr == &array[3]); + } + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr -= 3; + _assert(&ref == &ptr); + _assert(&*ptr == &array[2]); + } + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr -= 4; + _assert(&ref == &ptr); + _assert(&*ptr == &array[1]); + } + { + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + auto& ref = ptr -= 5; + _assert(&ref == &ptr); + _assert(&*ptr == &array[0]); + } + + // Subtract-assign negative offsets + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin,ptr end + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr -= 0; + _assert(&ref == &ptr); + _assert(&*ptr == &array[0]); + } + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr -= -1; + _assert(&ref == &ptr); + _assert(&*ptr == &array[1]); + } + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr -= -2; + _assert(&ref == &ptr); + _assert(&*ptr == &array[2]); + } + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr -= -3; + _assert(&ref == &ptr); + _assert(&*ptr == &array[3]); + } + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr -= -4; + _assert(&ref == &ptr); + _assert(&*ptr == &array[4]); + } + { + test_bounded_ptr ptr(array.begin(), array.begin(), array.end()); + auto& ref = ptr -= -5; + _assert(&ref == &ptr); + _assert(ptr == array.end()); + } + + // Make sure we trap on arithmetic overflow in the number of bytes calculation + { + std::ptrdiff_t sizeof_T = sizeof(T); // avoid promotion to unsigned in calculations + + // largest (most positive) n for the number of bytes `n * sizeof(T)` not to overflow ptrdiff_t + std::ptrdiff_t max_n = std::numeric_limits::max() / sizeof_T; + + // smallest (most negative) n for the number of bytes `n * sizeof(T)` not to overflow ptrdiff_t + std::ptrdiff_t min_n = std::numeric_limits::min() / sizeof_T; + + // Overflow with a positive offset + { + libkern::bounded_ptr ptr(array.begin(), array.begin(), array.end()); + tracking_policy::did_trap = false; + ptr -= max_n + 1; + _assert(tracking_policy::did_trap); + } + + // Overflow with a negative offset + { + libkern::bounded_ptr ptr(array.begin(), array.begin(), array.end()); + tracking_policy::did_trap = false; + ptr -= min_n - 1; + _assert(tracking_policy::did_trap); + } + } + + // Make sure we trap on arithmetic overflow in the offset calculation + // + // To avoid running into the overflow of `n * sizeof(T)` when ptrdiff_t + // is the same size as int32_t, we test the offset overflow check by + // successive subtraction of smaller offsets. + // + // We basically push the offset right to its limit, and then push it + // past its limit to watch it overflow. + { + std::int64_t sizeof_T = sizeof(T); // avoid promotion to unsigned in calculations + + // largest (most positive) n for the number of bytes `n * sizeof(T)` not to overflow the int32_t offset + std::int64_t max_n = std::numeric_limits::max() / sizeof_T; + + // smallest (most negative) n for the number of bytes `n * sizeof(T)` not to overflow the int32_t offset + std::int64_t min_n = std::numeric_limits::min() / sizeof_T; + + // Subtract positive offsets + { + libkern::bounded_ptr ptr(array.begin(), array.begin(), array.end()); + tracking_policy::did_trap = false; + ptr -= static_cast(-min_n / 2); + _assert(!tracking_policy::did_trap); + ptr -= static_cast(-min_n / 2); + _assert(!tracking_policy::did_trap); + ptr -= (-min_n % 2); + _assert(!tracking_policy::did_trap); // offset is now right at its negative limit + ptr -= 1; + _assert(tracking_policy::did_trap); + } + + // Subtract negative offsets + { + libkern::bounded_ptr ptr(array.begin(), array.begin(), array.end()); + tracking_policy::did_trap = false; + ptr -= static_cast(-max_n / 2); + _assert(!tracking_policy::did_trap); + ptr -= static_cast(-max_n / 2); + _assert(!tracking_policy::did_trap); + ptr -= (-max_n % 2); + _assert(!tracking_policy::did_trap); // offset is now right at its positive limit + ptr -= -1; + _assert(tracking_policy::did_trap); + } + } +} + +T_DECL(arith_subtract_assign, "bounded_ptr.arith.subtract_assign") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/assign.convert.cpp b/tests/bounded_ptr_src/assign.convert.cpp new file mode 100644 index 000000000..379745db9 --- /dev/null +++ b/tests/bounded_ptr_src/assign.convert.cpp @@ -0,0 +1,140 @@ +// +// Tests for +// template +// bounded_ptr& operator=(bounded_ptr const& other); +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct Base { int i; }; +struct Derived : Base { }; + +struct Base1 { int i; }; +struct Base2 { long l; }; +struct DerivedMultiple : Base1, Base2 { + DerivedMultiple(int i) : Base1{i}, Base2{i + 10} + { + } +}; + +struct Unrelated { }; + +struct dummy_policy1 { + static constexpr void + trap(char const*) + { + } +}; +struct dummy_policy2 { + static constexpr void + trap(char const*) + { + } +}; + +template +static void +tests() +{ + std::array array = {Stored{0}, Stored{1}, Stored{2}, Stored{3}, Stored{4}}; + Stored* const ptr1 = array.begin() + 2; + Stored* const ptr2 = array.begin() + 3; + + { + test_bounded_ptr const from(ptr1, array.begin(), array.end()); + test_bounded_ptr to; + test_bounded_ptr& ref = (to = from); + _assert(to.discard_bounds() == static_cast(ptr1)); + _assert(&ref == &to); // make sure we return *this + } + + // Test assigning to a non-null pointer + { + test_bounded_ptr const from(ptr1, array.begin(), array.end()); + test_bounded_ptr to(ptr2, array.begin(), array.end()); + _assert(to.discard_bounds() == static_cast(ptr2)); + + test_bounded_ptr& ref = (to = from); + _assert(to.discard_bounds() == static_cast(ptr1)); + _assert(&ref == &to); // make sure we return *this + } + + // Test assigning from a null pointer + { + test_bounded_ptr const from = nullptr; + test_bounded_ptr to; + test_bounded_ptr& ref = (to = from); + _assert(to.unsafe_discard_bounds() == nullptr); + _assert(&ref == &to); // make sure we return *this + } + + // Test with different policies + { + libkern::bounded_ptr from(ptr1, array.begin(), array.end()); + libkern::bounded_ptr to; + libkern::bounded_ptr& ref = (to = from); + _assert(to.discard_bounds() == static_cast(ptr1)); + _assert(&ref == &to); // make sure we return *this + } + + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // from begin end + { + test_bounded_ptr const from(array.begin(), array.begin() + 1, array.end()); + test_bounded_ptr to; + to = from; + _assert(to.unsafe_discard_bounds() == static_cast(array.begin())); + } +} + +T_DECL(assign_convert, "bounded_ptr.assign.convert") { + tests(); + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + tests(); + + // Make sure downcasts are disabled + static_assert(!std::is_assignable_v, /*from*/ test_bounded_ptr >); + static_assert(!std::is_assignable_v, /*from*/ test_bounded_ptr >); + static_assert(!std::is_assignable_v, /*from*/ test_bounded_ptr >); + static_assert(!std::is_assignable_v, /*from*/ test_bounded_ptr >); + + // Make sure const-casting away doesn't work + static_assert(!std::is_assignable_v, /*from*/ test_bounded_ptr >); + + // Make sure casting to unrelated types doesn't work implicitly + static_assert(!std::is_assignable_v, /*from*/ test_bounded_ptr >); + static_assert(!std::is_assignable_v, /*from*/ test_bounded_ptr >); + static_assert(!std::is_assignable_v, /*from*/ test_bounded_ptr >); + + // Make sure we can't assign from raw pointers + static_assert(!std::is_assignable_v, /*from*/ Derived*>); +} diff --git a/tests/bounded_ptr_src/assign.nullptr.cpp b/tests/bounded_ptr_src/assign.nullptr.cpp new file mode 100644 index 000000000..a9f062650 --- /dev/null +++ b/tests/bounded_ptr_src/assign.nullptr.cpp @@ -0,0 +1,55 @@ +// +// Tests for +// bounded_ptr& operator=(std::nullptr_t); +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { }; + +template +static void +tests() +{ + T obj{}; + + // Assign from nullptr + { + test_bounded_ptr p(&obj, &obj, &obj + 1); + _assert(p != nullptr); + test_bounded_ptr& ref = (p = nullptr); + _assert(&ref == &p); + _assert(p == nullptr); + } + + // Assign from NULL + { + test_bounded_ptr p(&obj, &obj, &obj + 1); + _assert(p != nullptr); + test_bounded_ptr& ref = (p = NULL); + _assert(&ref == &p); + _assert(p == nullptr); + } + + // Assign from 0 + { + test_bounded_ptr p(&obj, &obj, &obj + 1); + _assert(p != nullptr); + test_bounded_ptr& ref = (p = 0); + _assert(&ref == &p); + _assert(p == nullptr); + } +} + +T_DECL(assign_nullptr, "bounded_ptr.assign.nullptr") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/compare.equal.cpp b/tests/bounded_ptr_src/compare.equal.cpp new file mode 100644 index 000000000..38878ea64 --- /dev/null +++ b/tests/bounded_ptr_src/compare.equal.cpp @@ -0,0 +1,152 @@ +// +// Tests for +// template +// bool operator==(bounded_ptr const& a, bounded_ptr const& b); +// +// template +// bool operator!=(bounded_ptr const& a, bounded_ptr const& b); +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct dummy_policy1 { + static constexpr void + trap(char const*) + { + } +}; +struct dummy_policy2 { + static constexpr void + trap(char const*) + { + } +}; + +template +static void +check_eq(T t, U u) +{ + _assert(t == u); + _assert(u == t); + _assert(!(t != u)); + _assert(!(u != t)); +} + +template +static void +check_ne(T t, U u) +{ + _assert(!(t == u)); + _assert(!(u == t)); + _assert(t != u); + _assert(u != t); +} + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + // Pointers with the same bounds + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + check_eq(a, b); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin() + 1, array.begin(), array.end()); + check_ne(a, b); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin() + 2, array.begin(), array.end()); + check_ne(a, b); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.end(), array.begin(), array.end()); + check_ne(a, b); + } + { + test_bounded_ptr const a(array.end(), array.begin(), array.end()); + test_bounded_ptr const b(array.end(), array.begin(), array.end()); + check_eq(a, b); + } + + // Compare null pointers + { + test_bounded_ptr const a; + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + check_ne(a, b); + } + { + test_bounded_ptr const a; + test_bounded_ptr const b; + check_eq(a, b); + } + + // Pointers with different bounds + { + // Overlapping bounds, equal + test_bounded_ptr const a(array.begin(), array.begin() + 2, array.end()); + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + check_eq(a, b); + } + { + // Overlapping bounds, not equal + test_bounded_ptr const a(array.begin(), array.begin() + 2, array.end()); + test_bounded_ptr const b(array.begin() + 2, array.begin(), array.end()); + check_ne(a, b); + } + { + // Non-overlapping bounds, equal + test_bounded_ptr const a(array.begin(), array.begin(), array.begin() + 1); + test_bounded_ptr const b(array.begin(), array.begin() + 2, array.end()); + check_eq(a, b); + } + { + // Non-overlapping bounds, not equal + test_bounded_ptr const a(array.begin(), array.begin(), array.begin() + 1); + test_bounded_ptr const b(array.begin() + 3, array.begin() + 2, array.end()); + check_ne(a, b); + } + + // Test with different policies + { + libkern::bounded_ptr const a(array.begin(), array.begin(), array.end()); + libkern::bounded_ptr const b(array.begin(), array.begin(), array.end()); + check_eq(a, b); + } +} + +struct Base { int i; }; +struct Derived : Base { }; + +template +static void +tests_convert() +{ + std::array array = {Derived{0}, Derived{1}, Derived{2}, Derived{3}, Derived{4}}; + test_bounded_ptr const a(array.begin(), array.begin(), array.end() - 1); + test_bounded_ptr const b(array.begin(), array.begin(), array.end() - 1); + check_eq(a, b); +} + +T_DECL(compare_equal, "bounded_ptr.compare.equal") { + tests(); + tests(); + tests(); + tests(); + tests_convert(); + tests_convert(); + tests_convert(); + tests_convert(); +} diff --git a/tests/bounded_ptr_src/compare.equal.nullptr.cpp b/tests/bounded_ptr_src/compare.equal.nullptr.cpp new file mode 100644 index 000000000..2a314768e --- /dev/null +++ b/tests/bounded_ptr_src/compare.equal.nullptr.cpp @@ -0,0 +1,68 @@ +// +// Tests for +// template +// bool operator==(std::nullptr_t, bounded_ptr const& p); +// +// template +// bool operator!=(std::nullptr_t, bounded_ptr const& p); +// +// template +// bool operator==(bounded_ptr const& p, std::nullptr_t); +// +// template +// bool operator!=(bounded_ptr const& p, std::nullptr_t); +// + +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { }; + +struct non_default_policy { + static constexpr void + trap(char const*) + { + } +}; + +template +static void +tests() +{ + T t; + + { + test_bounded_ptr const ptr(&t, &t, &t + 1); + _assert(!(ptr == nullptr)); + _assert(!(nullptr == ptr)); + _assert(ptr != nullptr); + _assert(nullptr != ptr); + } + { + test_bounded_ptr const ptr = nullptr; + _assert(ptr == nullptr); + _assert(nullptr == ptr); + _assert(!(ptr != nullptr)); + _assert(!(nullptr != ptr)); + } + + // Test with a custom policy + { + libkern::bounded_ptr const ptr = nullptr; + _assert(ptr == nullptr); + _assert(nullptr == ptr); + _assert(!(ptr != nullptr)); + _assert(!(nullptr != ptr)); + } +} + +T_DECL(compare_equal_nullptr, "bounded_ptr.compare.equal.nullptr") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/compare.equal.raw.cpp b/tests/bounded_ptr_src/compare.equal.raw.cpp new file mode 100644 index 000000000..e0bb27a16 --- /dev/null +++ b/tests/bounded_ptr_src/compare.equal.raw.cpp @@ -0,0 +1,190 @@ +// +// Tests for +// template +// bool operator==(bounded_ptr const& a, U* b); +// +// template +// bool operator==(U* a, bounded_ptr const& b); +// +// template +// bool operator!=(bounded_ptr const& a, U* b); +// +// template +// bool operator!=(U* a, bounded_ptr const& b); +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +template +static void +check_eq(T t, U u) +{ + _assert(t == u); + _assert(u == t); + _assert(!(t != u)); + _assert(!(u != t)); +} + +template +static void +check_ne(T t, U u) +{ + _assert(!(t == u)); + _assert(!(u == t)); + _assert(t != u); + _assert(u != t); +} + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + // Compare pointers within the bounds + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin,a,b end + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + TQual* b = array.begin(); + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin a,b end + test_bounded_ptr const a(array.begin() + 1, array.begin(), array.end()); + TQual* b = array.begin() + 1; + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin,a b end + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + TQual* b = array.begin() + 2; + check_ne(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin end,a,b + test_bounded_ptr const a(array.end(), array.begin(), array.end()); + TQual* b = array.end(); + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ ^ + // | | | | + // begin a end b + test_bounded_ptr const a(array.begin() + 2, array.begin(), array.begin() + 3); + TQual* b = array.begin() + 4; + check_ne(a, b); + } + + // Check when the bounded_ptr is outside of its bounds + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // a,b begin end + test_bounded_ptr const a(array.begin(), array.begin() + 2, array.end()); + TQual* b = array.begin(); + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin end a,b + test_bounded_ptr const a(array.end() - 1, array.begin(), array.end() - 2); + TQual* b = array.end() - 1; + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin end a,b + test_bounded_ptr const a(array.end(), array.begin(), array.end() - 1); + TQual* b = array.end(); + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ ^ + // | | | | + // begin end a b + test_bounded_ptr const a(array.end() - 1, array.begin(), array.end() - 2); + TQual* b = array.end(); + check_ne(a, b); + } + + // Test comparing against a null pointer + { + test_bounded_ptr a = nullptr; + TQual* b = nullptr; + check_eq(a, b); + } + { + test_bounded_ptr a(array.end() - 1, array.begin(), array.end() - 2); + TQual* b = nullptr; + check_ne(a, b); + } + { + test_bounded_ptr a = nullptr; + TQual* b = array.begin(); + check_ne(a, b); + } +} + +struct Base { int i; }; +struct Derived : Base { }; + +template +static void +tests_convert() +{ + std::array array = {Derived{0}, Derived{1}, Derived{2}, Derived{3}, Derived{4}}; + + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end() - 1); + Related* b = array.begin(); + check_eq(a, b); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end() - 1); + Derived* b = array.begin(); + check_eq(a, b); + } + + // Test comparisons against cv-void* + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end() - 1); + void* b = array.begin(); + check_eq(a, b); + } +} + +T_DECL(compare_equal_raw, "bounded_ptr.compare.equal.raw") { + tests(); + tests(); + tests(); + tests(); + tests_convert(); + tests_convert(); + tests_convert(); + tests_convert(); +} diff --git a/tests/bounded_ptr_src/compare.order.cpp b/tests/bounded_ptr_src/compare.order.cpp new file mode 100644 index 000000000..0c5c779ba --- /dev/null +++ b/tests/bounded_ptr_src/compare.order.cpp @@ -0,0 +1,168 @@ +// +// Tests for +// template +// bool operator<(bounded_ptr const& a, bounded_ptr const& b); +// +// template +// bool operator<=(bounded_ptr const& a, bounded_ptr const& b); +// +// template +// bool operator>(bounded_ptr const& a, bounded_ptr const& b); +// +// template +// bool operator>=(bounded_ptr const& a, bounded_ptr const& b); +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct dummy_policy1 { + static constexpr void + trap(char const*) + { + } +}; +struct dummy_policy2 { + static constexpr void + trap(char const*) + { + } +}; + +template +static void +check_lt(T t, U u) +{ + _assert(t < u); + _assert(t <= u); + _assert(!(t >= u)); + _assert(!(t > u)); + + _assert(!(u < t)); + _assert(!(u <= t)); + _assert(u > t); + _assert(u >= t); +} + +template +static void +check_eq(T t, U u) +{ + _assert(!(t < u)); + _assert(t <= u); + _assert(t >= u); + _assert(!(t > u)); + + _assert(!(u < t)); + _assert(u <= t); + _assert(!(u > t)); + _assert(u >= t); +} + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + // Pointers with the same bounds + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + check_eq(a, b); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin() + 1, array.begin(), array.end()); + check_lt(a, b); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.begin() + 2, array.begin(), array.end()); + check_lt(a, b); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + test_bounded_ptr const b(array.end(), array.begin(), array.end()); + check_lt(a, b); + } + { + test_bounded_ptr const a(array.end(), array.begin(), array.end()); + test_bounded_ptr const b(array.end(), array.begin(), array.end()); + check_eq(a, b); + } + + // Compare null pointers + { + test_bounded_ptr const a; + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + check_lt(a, b); + } + { + test_bounded_ptr const a; + test_bounded_ptr const b; + check_eq(a, b); + } + + // Pointers with different bounds + { + // Overlapping bounds, equal + test_bounded_ptr const a(array.begin(), array.begin() + 2, array.end()); + test_bounded_ptr const b(array.begin(), array.begin(), array.end()); + check_eq(a, b); + } + { + // Overlapping bounds, not equal + test_bounded_ptr const a(array.begin(), array.begin() + 2, array.end()); + test_bounded_ptr const b(array.begin() + 2, array.begin(), array.end()); + check_lt(a, b); + } + { + // Non-overlapping bounds, equal + test_bounded_ptr const a(array.begin(), array.begin(), array.begin() + 1); + test_bounded_ptr const b(array.begin(), array.begin() + 2, array.end()); + check_eq(a, b); + } + { + // Non-overlapping bounds, not equal + test_bounded_ptr const a(array.begin(), array.begin(), array.begin() + 1); + test_bounded_ptr const b(array.begin() + 3, array.begin() + 2, array.end()); + check_lt(a, b); + } + + // Test with different policies + { + libkern::bounded_ptr const a(array.begin(), array.begin(), array.end()); + libkern::bounded_ptr const b(array.begin(), array.begin(), array.end()); + check_eq(a, b); + } +} + +struct Base { int i; }; +struct Derived : Base { }; + +template +static void +tests_convert() +{ + std::array array = {Derived{0}, Derived{1}, Derived{2}, Derived{3}, Derived{4}}; + test_bounded_ptr const a(array.begin(), array.begin(), array.end() - 1); + test_bounded_ptr const b(array.begin(), array.begin(), array.end() - 1); + check_eq(a, b); +} + +T_DECL(compare_order, "bounded_ptr.compare.order") { + tests(); + tests(); + tests(); + tests(); + tests_convert(); + tests_convert(); + tests_convert(); + tests_convert(); +} diff --git a/tests/bounded_ptr_src/compare.order.raw.cpp b/tests/bounded_ptr_src/compare.order.raw.cpp new file mode 100644 index 000000000..d2ba12277 --- /dev/null +++ b/tests/bounded_ptr_src/compare.order.raw.cpp @@ -0,0 +1,212 @@ +// +// Tests for +// template +// bool operator<(T* a, bounded_ptr const& b); +// +// template +// bool operator<(bounded_ptr const& a, U* b); +// +// template +// bool operator<=(T* a, bounded_ptr const& b); +// +// template +// bool operator<=(bounded_ptr const& a, U* b); +// +// template +// bool operator>(T* a, bounded_ptr const& b); +// +// template +// bool operator>(bounded_ptr const& a, U* b); +// +// template +// bool operator>=(T* a, bounded_ptr const& b); +// +// template +// bool operator>=(bounded_ptr const& a, U* b); +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +template +static void +check_lt(T t, U u) +{ + _assert(t < u); + _assert(t <= u); + _assert(!(t >= u)); + _assert(!(t > u)); + + _assert(!(u < t)); + _assert(!(u <= t)); + _assert(u > t); + _assert(u >= t); +} + +template +static void +check_eq(T t, U u) +{ + _assert(!(t < u)); + _assert(t <= u); + _assert(t >= u); + _assert(!(t > u)); + + _assert(!(u < t)); + _assert(u <= t); + _assert(!(u > t)); + _assert(u >= t); +} + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + // Compare pointers within the bounds + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin,a,b end + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + TQual* b = array.begin(); + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin a,b end + test_bounded_ptr const a(array.begin() + 1, array.begin(), array.end()); + TQual* b = array.begin() + 1; + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin,a b end + test_bounded_ptr const a(array.begin(), array.begin(), array.end()); + TQual* b = array.begin() + 2; + check_lt(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin end,a,b + test_bounded_ptr const a(array.end(), array.begin(), array.end()); + TQual* b = array.end(); + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ ^ + // | | | | + // begin a end b + test_bounded_ptr const a(array.begin() + 2, array.begin(), array.begin() + 3); + TQual* b = array.begin() + 4; + check_lt(a, b); + } + + // Check when the bounded_ptr is outside of its bounds + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // a,b begin end + test_bounded_ptr const a(array.begin(), array.begin() + 2, array.end()); + TQual* b = array.begin(); + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin end a,b + test_bounded_ptr const a(array.end() - 1, array.begin(), array.end() - 2); + TQual* b = array.end() - 1; + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin end a,b + test_bounded_ptr const a(array.end(), array.begin(), array.end() - 1); + TQual* b = array.end(); + check_eq(a, b); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ ^ + // | | | | + // begin end a b + test_bounded_ptr const a(array.end() - 1, array.begin(), array.end() - 2); + TQual* b = array.end(); + check_lt(a, b); + } + + // Test comparing against a null pointer + { + test_bounded_ptr a = nullptr; + TQual* b = nullptr; + check_eq(a, b); + } + { + test_bounded_ptr a(array.end() - 1, array.begin(), array.end() - 2); + TQual* b = nullptr; + check_lt(b, a); + } + { + test_bounded_ptr a = nullptr; + TQual* b = array.begin(); + check_lt(a, b); + } +} + +struct Base { int i; }; +struct Derived : Base { }; + +template +static void +tests_convert() +{ + std::array array = {Derived{0}, Derived{1}, Derived{2}, Derived{3}, Derived{4}}; + + { + test_bounded_ptr const a(array.begin() + 1, array.begin(), array.end() - 1); + Related* b = array.begin(); + check_lt(b, a); + } + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end() - 1); + Derived* b = array.begin() + 1; + check_lt(a, b); + } + + // Test comparisons against cv-void* + { + test_bounded_ptr const a(array.begin(), array.begin(), array.end() - 1); + void* b = array.begin() + 1; + check_lt(a, b); + } +} + +T_DECL(compare_order_raw, "bounded_ptr.compare.order.raw") { + tests(); + tests(); + tests(); + tests(); + tests_convert(); + tests_convert(); + tests_convert(); + tests_convert(); +} diff --git a/tests/bounded_ptr_src/ctor.begin_end.cpp b/tests/bounded_ptr_src/ctor.begin_end.cpp new file mode 100644 index 000000000..82f401d08 --- /dev/null +++ b/tests/bounded_ptr_src/ctor.begin_end.cpp @@ -0,0 +1,99 @@ +// +// Tests for +// explicit bounded_ptr(T* pointer, T const* begin, T const* end); +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { + int i; + friend constexpr bool + operator==(T const volatile& a, T const& b) + { + return a.i == b.i; + } +}; + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + { + test_bounded_ptr p(array.begin() + 0, array.begin(), array.end()); + _assert(*p == T{0}); + } + { + test_bounded_ptr p(array.begin() + 1, array.begin(), array.end()); + _assert(*p == T{1}); + } + { + test_bounded_ptr p(array.begin() + 2, array.begin(), array.end()); + _assert(*p == T{2}); + } + { + test_bounded_ptr p(array.begin() + 3, array.begin(), array.end()); + _assert(*p == T{3}); + } + { + test_bounded_ptr p(array.begin() + 4, array.begin(), array.end()); + _assert(*p == T{4}); + } + + // It must be valid to construct out-of-bounds pointers, but we obviously + // can't dereference them. + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // ptr begin end + test_bounded_ptr p(array.begin() + 1, array.begin() + 3, array.end()); + _assert(p.unsafe_discard_bounds() == array.begin() + 1); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin end ptr + test_bounded_ptr p(array.begin() + 4, array.begin(), array.begin() + 3); + _assert(p.unsafe_discard_bounds() == array.begin() + 4); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin end,ptr + test_bounded_ptr p(array.end(), array.begin(), array.end()); + _assert(p.unsafe_discard_bounds() == array.end()); + } + + // Test creating a bounded_ptr from a null pointer. + { + test_bounded_ptr p(nullptr, nullptr, nullptr); + _assert(p.unsafe_discard_bounds() == nullptr); + } +} + +struct Base { }; +struct Derived : Base { }; + +T_DECL(ctor_begin_end, "bounded_ptr.ctor.begin_end") { + tests(); + tests(); + tests(); + tests(); + + // Make sure we can construct a `bounded_ptr` from `Derived*` pointers + { + std::array array = {}; + test_bounded_ptr p(static_cast(array.begin()), + static_cast(array.begin()), + static_cast(array.end())); + } +} diff --git a/tests/bounded_ptr_src/ctor.convert.cpp b/tests/bounded_ptr_src/ctor.convert.cpp new file mode 100644 index 000000000..8bd0761de --- /dev/null +++ b/tests/bounded_ptr_src/ctor.convert.cpp @@ -0,0 +1,142 @@ +// +// Tests for +// template +// bounded_ptr(bounded_ptr const& other); +// + +#include +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct Base { int i; }; +struct Derived : Base { }; + +struct Base1 { int i; }; +struct Base2 { long l; }; +struct DerivedMultiple : Base1, Base2 { + DerivedMultiple(int i) : Base1{i}, Base2{i + 10} + { + } +}; + +struct Unrelated { }; + +struct dummy_policy1 { + static constexpr void + trap(char const*) + { + } +}; +struct dummy_policy2 { + static constexpr void + trap(char const*) + { + } +}; + +template +static void +tests() +{ + std::array array = {Stored{0}, Stored{1}, Stored{2}, Stored{3}, Stored{4}}; + Stored* const ptr = array.begin() + 2; + + { + test_bounded_ptr const from(ptr, array.begin(), array.end()); + test_bounded_ptr to = from; // conversion (implicit) + _assert(to.discard_bounds() == static_cast(ptr)); + } + { + test_bounded_ptr const from(ptr, array.begin(), array.end()); + test_bounded_ptr to(from); // conversion (explicit) + _assert(to.discard_bounds() == static_cast(ptr)); + } + { + test_bounded_ptr const from(ptr, array.begin(), array.end()); + test_bounded_ptr to{from}; // conversion (explicit) + _assert(to.discard_bounds() == static_cast(ptr)); + } + { + test_bounded_ptr const from(ptr, array.begin(), array.end()); + test_bounded_ptr to = static_cast >(from); // conversion (explicit) + _assert(to.discard_bounds() == static_cast(ptr)); + } + + // Test converting from a null pointer + { + test_bounded_ptr from = nullptr; + test_bounded_ptr to = from; // conversion (implicit) + _assert(to.unsafe_discard_bounds() == nullptr); + } + + // Test with different policies + { + libkern::bounded_ptr from(ptr, array.begin(), array.end()); + libkern::bounded_ptr to = from; // conversion (implicit) + _assert(to.discard_bounds() == static_cast(ptr)); + } + + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // from begin end + { + test_bounded_ptr const from(array.begin(), array.begin() + 1, array.end()); + test_bounded_ptr to(from); + _assert(to.unsafe_discard_bounds() == static_cast(array.begin())); + } +} + +T_DECL(ctor_convert, "bounded_ptr.ctor.convert") { + tests(); + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + tests(); + + // Make sure downcasts are disabled + static_assert(!std::is_convertible_v, /*to*/ test_bounded_ptr >); + static_assert(!std::is_convertible_v, /*to*/ test_bounded_ptr >); + static_assert(!std::is_convertible_v, /*to*/ test_bounded_ptr >); + static_assert(!std::is_convertible_v, /*to*/ test_bounded_ptr >); + + // Make sure const-casting away doesn't work + static_assert(!std::is_convertible_v, /*to*/ test_bounded_ptr >); + + // Make sure casting to unrelated types doesn't work implicitly + static_assert(!std::is_convertible_v, /*to*/ test_bounded_ptr >); + static_assert(!std::is_convertible_v, /*to*/ test_bounded_ptr >); + static_assert(!std::is_convertible_v, /*to*/ test_bounded_ptr >); + + // Make sure even explicit conversion to unrelated types doesn't work + static_assert(!std::is_constructible_v, /*from*/ test_bounded_ptr >); + static_assert(!std::is_constructible_v, /*from*/ test_bounded_ptr >); + static_assert(!std::is_constructible_v, /*from*/ test_bounded_ptr >); + + // Make sure construction from a raw pointer doesn't work + static_assert(!std::is_constructible_v, /*from*/ Derived*>); +} diff --git a/tests/bounded_ptr_src/ctor.default.cpp b/tests/bounded_ptr_src/ctor.default.cpp new file mode 100644 index 000000000..392f3b5c4 --- /dev/null +++ b/tests/bounded_ptr_src/ctor.default.cpp @@ -0,0 +1,34 @@ +// +// Tests for +// explicit bounded_ptr(); +// + +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { }; + +template +static void +tests() +{ + { + test_bounded_ptr p; + _assert(p == nullptr); + } + { + test_bounded_ptr p{}; + _assert(p == nullptr); + } +} + +T_DECL(ctor_default, "bounded_ptr.ctor.default") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/ctor.nullptr.cpp b/tests/bounded_ptr_src/ctor.nullptr.cpp new file mode 100644 index 000000000..ed8f76598 --- /dev/null +++ b/tests/bounded_ptr_src/ctor.nullptr.cpp @@ -0,0 +1,97 @@ +// +// Tests for +// bounded_ptr(std::nullptr_t); +// + +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { }; + +template +static void +tests() +{ + // Test with nullptr + { + test_bounded_ptr p = nullptr; + _assert(p == nullptr); + } + { + test_bounded_ptr p{nullptr}; + _assert(p == nullptr); + } + { + test_bounded_ptr p(nullptr); + _assert(p == nullptr); + } + { + test_bounded_ptr p = static_cast >(nullptr); + _assert(p == nullptr); + } + { + auto f = [](test_bounded_ptr p) { + _assert(p == nullptr); + }; + f(nullptr); + } + + // Test with NULL + { + test_bounded_ptr p = NULL; + _assert(p == nullptr); + } + { + test_bounded_ptr p{NULL}; + _assert(p == nullptr); + } + { + test_bounded_ptr p(NULL); + _assert(p == nullptr); + } + { + test_bounded_ptr p = static_cast >(NULL); + _assert(p == nullptr); + } + { + auto f = [](test_bounded_ptr p) { + _assert(p == nullptr); + }; + f(NULL); + } + + // Test with 0 + { + test_bounded_ptr p = 0; + _assert(p == nullptr); + } + { + test_bounded_ptr p{0}; + _assert(p == nullptr); + } + { + test_bounded_ptr p(0); + _assert(p == nullptr); + } + { + test_bounded_ptr p = static_cast >(0); + _assert(p == nullptr); + } + { + auto f = [](test_bounded_ptr p) { + _assert(p == nullptr); + }; + f(0); + } +} + +T_DECL(ctor_nullptr, "bounded_ptr.ctor.nullptr") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/deref.cpp b/tests/bounded_ptr_src/deref.cpp new file mode 100644 index 000000000..d8c8a64a1 --- /dev/null +++ b/tests/bounded_ptr_src/deref.cpp @@ -0,0 +1,171 @@ +// +// Tests for +// T& operator*() const; +// T* operator->() const; +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { + int i; + friend constexpr bool + operator==(T const volatile& a, T const& b) + { + return a.i == b.i; + } +}; + +namespace { +struct tracking_policy { + static bool did_trap; + static void + trap(char const*) + { + did_trap = true; + } +}; +bool tracking_policy::did_trap = false; +} + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin, ptr end + test_bounded_ptr ptr(array.begin() + 0, array.begin(), array.end()); + QualT& ref = *ptr; + _assert(ref == T{0}); + _assert(&ref == &array[0]); + + _assert(ptr->i == 0); + _assert(&ptr->i == &array[0].i); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr ptr(array.begin() + 1, array.begin(), array.end()); + QualT& ref = *ptr; + _assert(ref == T{1}); + _assert(&ref == &array[1]); + + _assert(ptr->i == 1); + _assert(&ptr->i == &array[1].i); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr ptr(array.begin() + 2, array.begin(), array.end()); + QualT& ref = *ptr; + _assert(ref == T{2}); + _assert(&ref == &array[2]); + + _assert(ptr->i == 2); + _assert(&ptr->i == &array[2].i); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr ptr(array.begin() + 4, array.begin(), array.end()); + QualT& ref = *ptr; + _assert(ref == T{4}); + _assert(&ref == &array[4]); + + _assert(ptr->i == 4); + _assert(&ptr->i == &array[4].i); + } + + // Make sure we don't trap when dereferencing an in-bounds pointer + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + libkern::bounded_ptr ptr(array.begin() + 1, array.begin(), array.end()); + + tracking_policy::did_trap = false; + (void)*ptr; + (void)ptr->i; + _assert(!tracking_policy::did_trap); + } + + // Make sure we trap when dereferencing an out-of-bounds pointer + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin end ptr + libkern::bounded_ptr ptr(array.end() - 1, array.begin(), array.end() - 2); + + tracking_policy::did_trap = false; + (void)*ptr; + _assert(tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr->i; + _assert(tracking_policy::did_trap); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // ptr begin end + libkern::bounded_ptr ptr(array.begin(), array.begin() + 1, array.end()); + + tracking_policy::did_trap = false; + (void)*ptr; + _assert(tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr->i; + _assert(tracking_policy::did_trap); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | (just a bit off) | | + // begin ptr end + T* t3 = const_cast(array.begin() + 3); + char* just_off = reinterpret_cast(t3) + 1; // 1 byte off + libkern::bounded_ptr ptr(reinterpret_cast(just_off), array.begin(), array.end() - 1); + + tracking_policy::did_trap = false; + (void)*ptr; + _assert(tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr->i; + _assert(tracking_policy::did_trap); + } +} + +T_DECL(deref, "bounded_ptr.deref") { + tests(); + tests(); + tests(); + tests(); + + // Make sure that we don't hard-error in the definition of operator* + // when instantiating a `bounded_ptr` + test_bounded_ptr p1; + test_bounded_ptr p2; + test_bounded_ptr p3; + test_bounded_ptr p4; +} diff --git a/tests/bounded_ptr_src/discard_bounds.cpp b/tests/bounded_ptr_src/discard_bounds.cpp new file mode 100644 index 000000000..30a42fbaa --- /dev/null +++ b/tests/bounded_ptr_src/discard_bounds.cpp @@ -0,0 +1,124 @@ +// +// Tests for +// T* discard_bounds() const; +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { int i; }; + +namespace { +struct tracking_policy { + static bool did_trap; + static void + trap(char const*) + { + did_trap = true; + } +}; +bool tracking_policy::did_trap = false; +} + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin, ptr end + test_bounded_ptr const ptr(array.begin() + 0, array.begin(), array.end()); + QualT* raw = ptr.discard_bounds(); + _assert(raw == &array[0]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr const ptr(array.begin() + 1, array.begin(), array.end()); + QualT* raw = ptr.discard_bounds(); + _assert(raw == &array[1]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr const ptr(array.begin() + 2, array.begin(), array.end()); + QualT* raw = ptr.discard_bounds(); + _assert(raw == &array[2]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr const ptr(array.begin() + 4, array.begin(), array.end()); + QualT* raw = ptr.discard_bounds(); + _assert(raw == &array[4]); + } + // Make sure we don't trap when discarding the bounds of an in-bounds pointer + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + libkern::bounded_ptr ptr(array.begin() + 1, array.begin(), array.end()); + tracking_policy::did_trap = false; + (void)*ptr; + (void)ptr->i; + _assert(!tracking_policy::did_trap); + } + + // Make sure we trap when discarding the bounds of an out-of-bounds pointer + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin end ptr + libkern::bounded_ptr ptr(array.end() - 1, array.begin(), array.end() - 2); + tracking_policy::did_trap = false; + (void)ptr.discard_bounds(); + _assert(tracking_policy::did_trap); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // ptr begin end + libkern::bounded_ptr ptr(array.begin(), array.begin() + 1, array.end()); + tracking_policy::did_trap = false; + (void)ptr.discard_bounds(); + _assert(tracking_policy::did_trap); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | (just a bit off) | | + // begin ptr end + T* t3 = const_cast(array.begin() + 3); + char* just_off = reinterpret_cast(t3) + 1; // 1 byte off + libkern::bounded_ptr ptr(reinterpret_cast(just_off), array.begin(), array.end() - 1); + + tracking_policy::did_trap = false; + (void)ptr.discard_bounds(); + _assert(tracking_policy::did_trap); + } +} + +T_DECL(discard_bounds, "bounded_ptr.discard_bounds") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/example.malloc.cpp b/tests/bounded_ptr_src/example.malloc.cpp new file mode 100644 index 000000000..c8a53afeb --- /dev/null +++ b/tests/bounded_ptr_src/example.malloc.cpp @@ -0,0 +1,75 @@ +// +// Example of providing a malloc() wrapper that returns a `bounded_ptr`. +// +// This test serves as some kind of integration test, ensuring that we're +// able to convert existing code using raw pointers to using `bounded_ptr`s +// without too much hassle. This code was lifted from existing code in XNU, +// and the variable names were changed to make it more generic. +// + +#include +#include +#include +#include +#include +#include "test_utils.h" + +test_bounded_ptr +bounded_malloc(std::size_t size) +{ + void* p = std::malloc(size); + void* end = static_cast(p) + size; + test_bounded_ptr with_bounds(p, p, end); + return with_bounds; +} + +void +bounded_free(test_bounded_ptr ptr) +{ + std::free(ptr.discard_bounds()); +} + +struct SomeType { + std::uint32_t idx; +}; + +// Pretend that those functions are already part of the code base being +// transitioned over to `bounded_ptr`s, and we can't change their signature. +// The purpose of having those functions is to make sure that we're able to +// integrate into existing code bases with decent ease. +void +use(SomeType*) +{ +} +void +require(bool condition) +{ + if (!condition) { + std::exit(EXIT_FAILURE); + } +} + +T_DECL(example_malloc, "bounded_ptr.example.malloc") { + test_bounded_ptr array = nullptr; + std::uint32_t count = 100; + std::uint32_t alloc_size = count * sizeof(SomeType); + + // (1) must use a bounded version of malloc + // (2) must use a reinterpret_pointer_cast to go from void* to SomeType* + array = libkern::reinterpret_pointer_cast(bounded_malloc(alloc_size)); + + require(array != nullptr); // use != nullptr instead of relying on implicit conversion to bool + use(array.discard_bounds()); // must manually discard bounds here + + for (std::uint32_t i = 0; i < count; i++) { + std::uint32_t& idx = array[i].idx; + idx = i; + use(&array[idx]); + } + + if (array) { + bounded_free(array); // must use a bounded version of free + } + + T_PASS("bounded_ptr.example.malloc test done"); +} diff --git a/tests/bounded_ptr_src/operator_bool.cpp b/tests/bounded_ptr_src/operator_bool.cpp new file mode 100644 index 000000000..595da16b9 --- /dev/null +++ b/tests/bounded_ptr_src/operator_bool.cpp @@ -0,0 +1,42 @@ +// +// Tests for +// explicit operator bool() const; +// + +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { }; + +template +static void +tests() +{ + { + test_bounded_ptr p = nullptr; + if (p) { + _assert(false); + } + _assert(!p); + } + { + T t; + test_bounded_ptr p(&t, &t, &t + 1); + if (p) { + } else { + _assert(false); + } + _assert(!!p); + } +} + +T_DECL(operator_bool, "bounded_ptr.operator.bool") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/reinterpret_cast.cpp b/tests/bounded_ptr_src/reinterpret_cast.cpp new file mode 100644 index 000000000..0486ddc10 --- /dev/null +++ b/tests/bounded_ptr_src/reinterpret_cast.cpp @@ -0,0 +1,86 @@ +// +// Tests for +// template +// bounded_ptr reinterpret_pointer_cast(bounded_ptr const& p) noexcept +// + +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct Base { int i; }; +struct Derived : Base { }; + +struct Base1 { int i; }; +struct Base2 { long l; }; +struct DerivedMultiple : Base1, Base2 { + DerivedMultiple(int i) : Base1{i}, Base2{i + 10} + { + } +}; + +struct non_default_policy { + static constexpr void + trap(char const*) + { + } +}; + +template +static void +tests() +{ + std::array array = {Stored{0}, Stored{1}, Stored{2}, Stored{3}, Stored{4}}; + + { + test_bounded_ptr from(array.begin() + 2, array.begin(), array.end()); + test_bounded_ptr to = libkern::reinterpret_pointer_cast(from); + _assert(to.discard_bounds() == reinterpret_cast(from.discard_bounds())); + } + + { + test_bounded_ptr from(array.begin() + 2, array.begin(), array.end()); + test_bounded_ptr to = libkern::reinterpret_pointer_cast(from); + _assert(to.discard_bounds() == reinterpret_cast(from.discard_bounds())); + } + + // Test `reinterpret_pointer_cast`ing a null pointer + { + test_bounded_ptr from(nullptr, nullptr, nullptr); + test_bounded_ptr to = libkern::reinterpret_pointer_cast(from); + _assert(to.unsafe_discard_bounds() == nullptr); + } + + // Test with a non-default policy + { + libkern::bounded_ptr from(array.begin(), array.begin(), array.end()); + libkern::bounded_ptr to = libkern::reinterpret_pointer_cast(from); + _assert(to.discard_bounds() == reinterpret_cast(from.discard_bounds())); + } +} + +T_DECL(reinterpret_cast_, "bounded_ptr.reinterpret_cast") { + tests(); + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/bounded_ptr_src/subscript.cpp b/tests/bounded_ptr_src/subscript.cpp new file mode 100644 index 000000000..a69029d33 --- /dev/null +++ b/tests/bounded_ptr_src/subscript.cpp @@ -0,0 +1,252 @@ +// +// Tests for +// T& operator[](std::ptrdiff_t n) const; +// + +#include +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +struct T { + int i; + friend constexpr bool + operator==(T const& a, T const& b) + { + return a.i == b.i; + } +}; + +namespace { +struct tracking_policy { + static bool did_trap; + static void + trap(char const*) + { + did_trap = true; + } +}; +bool tracking_policy::did_trap = false; +} + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin, ptr end + test_bounded_ptr ptr(array.begin() + 0, array.begin(), array.end()); + QualT& ref0 = ptr[0]; + _assert(&ref0 == &array[0]); + + QualT& ref1 = ptr[1]; + _assert(&ref1 == &array[1]); + + QualT& ref2 = ptr[2]; + _assert(&ref2 == &array[2]); + + QualT& ref3 = ptr[3]; + _assert(&ref3 == &array[3]); + + QualT& ref4 = ptr[4]; + _assert(&ref4 == &array[4]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr ptr(array.begin() + 1, array.begin(), array.end()); + QualT& ref0 = ptr[-1]; + _assert(&ref0 == &array[0]); + + QualT& ref1 = ptr[0]; + _assert(&ref1 == &array[1]); + + QualT& ref2 = ptr[1]; + _assert(&ref2 == &array[2]); + + QualT& ref3 = ptr[2]; + _assert(&ref3 == &array[3]); + + QualT& ref4 = ptr[3]; + _assert(&ref4 == &array[4]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr ptr(array.begin() + 2, array.begin(), array.end()); + QualT& ref0 = ptr[-2]; + _assert(&ref0 == &array[0]); + + QualT& ref1 = ptr[-1]; + _assert(&ref1 == &array[1]); + + QualT& ref2 = ptr[0]; + _assert(&ref2 == &array[2]); + + QualT& ref3 = ptr[1]; + _assert(&ref3 == &array[3]); + + QualT& ref4 = ptr[2]; + _assert(&ref4 == &array[4]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr ptr(array.begin() + 4, array.begin(), array.end()); + QualT& ref0 = ptr[-4]; + _assert(&ref0 == &array[0]); + + QualT& ref1 = ptr[-3]; + _assert(&ref1 == &array[1]); + + QualT& ref2 = ptr[-2]; + _assert(&ref2 == &array[2]); + + QualT& ref3 = ptr[-1]; + _assert(&ref3 == &array[3]); + + QualT& ref4 = ptr[0]; + _assert(&ref4 == &array[4]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin end,ptr + test_bounded_ptr ptr(array.end(), array.begin(), array.end()); + QualT& ref0 = ptr[-5]; + _assert(&ref0 == &array[0]); + + QualT& ref1 = ptr[-4]; + _assert(&ref1 == &array[1]); + + QualT& ref2 = ptr[-3]; + _assert(&ref2 == &array[2]); + + QualT& ref3 = ptr[-2]; + _assert(&ref3 == &array[3]); + + QualT& ref4 = ptr[-1]; + _assert(&ref4 == &array[4]); + } + + // Make sure we trap when we subscript a pointer at an out-of-bounds offset + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin end ptr + libkern::bounded_ptr ptr(array.end() - 1, array.begin(), array.end() - 2); + + tracking_policy::did_trap = false; + (void)ptr[-4]; + _assert(!tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[-3]; + _assert(!tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[-2]; + _assert(!tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[-1]; // trap + _assert(tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[0]; // trap + _assert(tracking_policy::did_trap); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + libkern::bounded_ptr ptr(array.begin() + 1, array.begin(), array.end()); + + tracking_policy::did_trap = false; + (void)ptr[-1]; + _assert(!tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[0]; + _assert(!tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[1]; + _assert(!tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[2]; + _assert(!tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[3]; + _assert(!tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[4]; // trap + _assert(tracking_policy::did_trap); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // ptr begin end + libkern::bounded_ptr ptr(array.begin(), array.begin() + 1, array.end() - 1); + + tracking_policy::did_trap = false; + (void)ptr[0]; // trap + _assert(tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[1]; + _assert(!tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[2]; + _assert(!tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[3]; + _assert(!tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[4]; // trap + _assert(tracking_policy::did_trap); + + tracking_policy::did_trap = false; + (void)ptr[5]; // trap + _assert(tracking_policy::did_trap); + } +} + +T_DECL(subscript, "bounded_ptr.subscript") { + tests(); + tests(); + tests(); + tests(); + + // Make sure that we don't hard-error in the definition of operator[] + // when instantiating a `bounded_ptr` + test_bounded_ptr p1; + test_bounded_ptr p2; + test_bounded_ptr p3; + test_bounded_ptr p4; +} diff --git a/tests/bounded_ptr_src/test_utils.h b/tests/bounded_ptr_src/test_utils.h new file mode 100644 index 000000000..3e1601fe6 --- /dev/null +++ b/tests/bounded_ptr_src/test_utils.h @@ -0,0 +1,20 @@ +#ifndef TESTS_BOUNDED_PTR_TEST_UTILS_H +#define TESTS_BOUNDED_PTR_TEST_UTILS_H + +#include +#include + +namespace { +struct test_policy { + static void + trap(char const*) + { + assert(false); + } +}; + +template +using test_bounded_ptr = libkern::bounded_ptr; +} // end anonymous namespace + +#endif // !TESTS_BOUNDED_PTR_TEST_UTILS_H diff --git a/tests/bounded_ptr_src/unsafe_discard_bounds.cpp b/tests/bounded_ptr_src/unsafe_discard_bounds.cpp new file mode 100644 index 000000000..f6a0ae6f1 --- /dev/null +++ b/tests/bounded_ptr_src/unsafe_discard_bounds.cpp @@ -0,0 +1,144 @@ +// +// Tests for +// T* unsafe_discard_bounds() const; +// + +#include +#include +#include +#include +#include +#include +#include +#include "test_utils.h" + +#define _assert(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +namespace { +struct tracking_policy { + static bool did_trap; + static void + trap(char const*) + { + did_trap = true; + } +}; +bool tracking_policy::did_trap = false; +} + +struct T { int i; }; + +template +static void +tests() +{ + std::array array = {T{0}, T{1}, T{2}, T{3}, T{4}}; + + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin, ptr end + test_bounded_ptr const ptr(array.begin() + 0, array.begin(), array.end()); + QualT* raw = ptr.unsafe_discard_bounds(); + _assert(raw == &array[0]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr const ptr(array.begin() + 1, array.begin(), array.end()); + QualT* raw = ptr.unsafe_discard_bounds(); + _assert(raw == &array[1]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr const ptr(array.begin() + 2, array.begin(), array.end()); + QualT* raw = ptr.unsafe_discard_bounds(); + _assert(raw == &array[2]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr const ptr(array.begin() + 4, array.begin(), array.end()); + QualT* raw = ptr.unsafe_discard_bounds(); + _assert(raw == &array[4]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ + // | | + // begin end,ptr + test_bounded_ptr const ptr(array.end(), array.begin(), array.end()); + QualT* raw = ptr.unsafe_discard_bounds(); + _assert(raw == array.end()); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin end ptr + test_bounded_ptr ptr(array.end() - 1, array.begin(), array.end() - 2); + QualT* raw = ptr.unsafe_discard_bounds(); + _assert(raw == &array[4]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // begin ptr end + test_bounded_ptr ptr(array.begin() + 1, array.begin(), array.end()); + QualT* raw = ptr.unsafe_discard_bounds(); + _assert(raw == &array[1]); + } + { + // T{0} T{1} T{2} T{3} T{4} + // ^ ^ ^ + // | | | + // ptr begin end + test_bounded_ptr ptr(array.begin(), array.begin() + 1, array.end()); + QualT* raw = ptr.unsafe_discard_bounds(); + _assert(raw == &array[0]); + } + + // Test discarding the bounds of a null pointer + { + test_bounded_ptr const ptr(nullptr, nullptr, nullptr); + QualT* raw = ptr.unsafe_discard_bounds(); + _assert(raw == nullptr); + } + + // Test discarding the bounds on a pointer outside of representable memory. + // Even `unsafe_discard_bounds()` will trap in such conditions. + // + // To do this, we setup an imaginary object with a very high address, and + // we add a large-ish offset to it, such that adding the base to the offset + // would fall outside of the representable memory. + { + tracking_policy::did_trap = false; + + QualT* end_of_memory = reinterpret_cast(std::numeric_limits::max()); + QualT* base = end_of_memory - 500; // yeah, technically UB + std::ptrdiff_t offset = 501; + + libkern::bounded_ptr ptr(base, base, base + 1); + ptr += offset; // now, `base_ + offset_` points outside of representable memory + + _assert(!tracking_policy::did_trap); + (void)ptr.unsafe_discard_bounds(); + _assert(tracking_policy::did_trap); + } +} + +T_DECL(unsafe_discard_bounds, "bounded_ptr.unsafe_discard_bounds") { + tests(); + tests(); + tests(); + tests(); +} diff --git a/tests/contextswitch.c b/tests/contextswitch.c index b2ec16624..cb093b1f8 100644 --- a/tests/contextswitch.c +++ b/tests/contextswitch.c @@ -196,10 +196,10 @@ T_GLOBAL_META(T_META_NAMESPACE("xnu.scheduler")); /* Disable the test on MacOS for now */ T_DECL(perf_csw, "context switch performance", T_META_TAG_PERF, T_META_CHECK_LEAKS(false), T_META_ASROOT(true)) { -#if !CONFIG_EMBEDDED - T_SKIP("Not supported on MacOS"); +#if !defined (__arm__) && !defined(__arm64__) + T_SKIP("Not supported on Intel platforms"); return; -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined (__arm__) && !defined(__arm64__) */ check_device_temperature(); T_ATEND(csw_perf_test_cleanup); diff --git a/tests/correct_kernel_booted.c b/tests/correct_kernel_booted.c new file mode 100644 index 000000000..e2927dcd5 --- /dev/null +++ b/tests/correct_kernel_booted.c @@ -0,0 +1,167 @@ +// Copyright (c) 2020 Apple, Inc. All rights reserved. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static bool +get_macho_uuid(const char *cwd, const char *path, uuid_t uuid) +{ + bool found = false; + void *mapped = MAP_FAILED; + size_t mapped_len = 0; + + T_SETUPBEGIN; + + // Skip irregular files (directories, devices, etc.). + struct stat stbuf = {}; + int ret = stat(path, &stbuf); + if (ret < 0 && errno == ENOENT) { + goto out; + } + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "should stat %s%s", cwd, path); + if ((stbuf.st_mode & S_IFREG) == 0) { + goto out; + } + if (stbuf.st_size < (off_t)sizeof(struct mach_header)) { + goto out; + } + + int fd = open(path, O_RDONLY); + if (fd < 0 && (errno == EPERM || errno == EACCES || errno == ENOENT)) { + goto out; + } + T_QUIET; + T_ASSERT_POSIX_SUCCESS(fd, "should open file at %s%s", cwd, path); + + mapped = mmap(NULL, (size_t)stbuf.st_size, PROT_READ, MAP_PRIVATE, + fd, 0); + T_QUIET; T_WITH_ERRNO; + T_ASSERT_NE(mapped, MAP_FAILED, "should map Mach-O binary at %s%s", + cwd, path); + (void)close(fd); + + // Mach-O parsing boilerplate. + uint32_t magic = *(uint32_t *)mapped; + bool should_swap = false; + bool b32 = false; + // XXX This does not handle fat binaries. + switch (magic) { + case MH_CIGAM: + should_swap = true; + OS_FALLTHROUGH; + case MH_MAGIC: + b32 = true; + break; + case MH_CIGAM_64: + should_swap = true; + break; + case MH_MAGIC_64: + break; + default: + goto out; + } + const struct load_command *lcmd = NULL; + unsigned int ncmds = 0; + if (b32) { + const struct mach_header *hdr = mapped; + ncmds = hdr->ncmds; + lcmd = (const void *)((const char *)mapped + sizeof(*hdr)); + } else { + const struct mach_header_64 *hdr = mapped; + ncmds = hdr->ncmds; + lcmd = (const void *)((const char *)mapped + sizeof(*hdr)); + } + ncmds = should_swap ? OSSwapInt32(ncmds) : ncmds; + + // Scan through load commands to find LC_UUID. + for (unsigned int i = 0; i < ncmds; i++) { + if ((should_swap ? OSSwapInt32(lcmd->cmd) : lcmd->cmd) == LC_UUID) { + const struct uuid_command *uuid_cmd = (const void *)lcmd; + uuid_copy(uuid, uuid_cmd->uuid); + found = true; + break; + } + + uint32_t cmdsize = should_swap ? OSSwapInt32(lcmd->cmdsize) : + lcmd->cmdsize; + lcmd = (const void *)((const char *)lcmd + cmdsize); + } + + if (!found) { + T_LOG("could not find LC_UUID in Mach-O at %s%s", cwd, path); + } + +out: + T_SETUPEND; + + if (mapped != MAP_FAILED) { + munmap(mapped, mapped_len); + } + return found; +} + +T_DECL(correct_kernel_booted, + "Make sure the kernel on disk matches the running kernel, by UUID.", + T_META_RUN_CONCURRENTLY(true)) +{ + T_SETUPBEGIN; + + uuid_t kern_uuid; + uuid_string_t kern_uuid_str; + size_t kern_uuid_size = sizeof(kern_uuid_str); + int ret = sysctlbyname("kern.uuid", &kern_uuid_str, &kern_uuid_size, NULL, + 0); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "should get running kernel UUID"); + T_LOG("%s: running kernel", kern_uuid_str); + + ret = uuid_parse(kern_uuid_str, kern_uuid); + T_QUIET; T_ASSERT_EQ(ret, 0, "should parse kernel UUID into bytes"); + +#if TARGET_OS_OSX + const char *kernels_path = "/System/Library/Kernels/"; +#else // TARGET_OS_OSX + const char *kernels_path = "/"; +#endif // !TARGET_OS_OSX + T_LOG("searching for kernels at %s", kernels_path); + + ret = chdir(kernels_path); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "should change directory to %s", + kernels_path); + + DIR *kernels_dir = opendir(kernels_path); + T_QUIET; T_ASSERT_NOTNULL(kernels_dir, "should open directory at %s", + kernels_path); + + T_SETUPEND; + + bool found = false; + struct dirent *entry = NULL; + while ((entry = readdir(kernels_dir)) != NULL) { + uuid_t bin_uuid; + bool ok = get_macho_uuid(kernels_path, entry->d_name, bin_uuid); + if (ok) { + uuid_string_t bin_uuid_str; + uuid_unparse(bin_uuid, bin_uuid_str); + T_LOG("%s: from %s%s", bin_uuid_str, kernels_path, entry->d_name); + if (uuid_compare(bin_uuid, kern_uuid) == 0) { + found = true; + T_PASS("UUID from %s%s matches kernel UUID", kernels_path, + entry->d_name); + } + } + } + if (!found) { + T_FAIL("failed to find kernel binary with UUID of the running kernel, " + "wrong kernel is booted"); + } +} diff --git a/tests/decompression_failure.c b/tests/decompression_failure.c new file mode 100644 index 000000000..1e753f7f1 --- /dev/null +++ b/tests/decompression_failure.c @@ -0,0 +1,172 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "excserver.h" +#include "exc_helpers.h" + +extern int pid_hibernate(int pid); + +static vm_address_t page_size; + +T_GLOBAL_META( + T_META_REQUIRES_SYSCTL_EQ("hw.optional.wkdm_popcount", 1) + ); + +static void +page_out(void) +{ + T_ASSERT_POSIX_SUCCESS(pid_hibernate(-2), NULL); + T_ASSERT_POSIX_SUCCESS(pid_hibernate(-2), NULL); +} + +static void +dirty_page(const vm_address_t address) +{ + assert((address & (page_size - 1)) == 0UL); + uint32_t *const page_as_u32 = (uint32_t *)address; + for (uint32_t i = 0; i < page_size / sizeof(uint32_t); i += 2) { + page_as_u32[i + 0] = i % 4; + page_as_u32[i + 1] = 0xcdcdcdcd; + } +} + +static bool +try_to_corrupt_page(vm_address_t page_va) +{ + int val; + size_t size = sizeof(val); + int result = sysctlbyname("vm.compressor_inject_error", &val, &size, + &page_va, sizeof(page_va)); + return result == 0; +} + +static vm_address_t +create_corrupted_region(const vm_address_t buffer_length) +{ + void *const bufferp = malloc(buffer_length); + T_ASSERT_NOTNULL(bufferp, "allocated test buffer"); + const vm_address_t buffer = (vm_address_t)bufferp; + + T_LOG("buffer address: %lx\n", (unsigned long)buffer); + + for (size_t buffer_offset = 0; buffer_offset < buffer_length; + buffer_offset += page_size) { + dirty_page(buffer + buffer_offset); + } + + page_out(); + + uint32_t corrupt = 0; + for (size_t buffer_offset = 0; buffer_offset < buffer_length; + buffer_offset += page_size) { + if (try_to_corrupt_page(buffer + buffer_offset)) { + corrupt++; + } + } + + T_LOG("corrupted %u/%lu pages. accessing...\n", corrupt, + (unsigned long)(buffer_length / page_size)); + if (corrupt == 0) { + T_SKIP("no pages corrupted"); + } + + return buffer; +} + +static bool +try_write(volatile uint32_t *word __unused) +{ +#ifdef __arm64__ + uint64_t val = 1; + __asm__ volatile ( + "str %w0, %1\n" + "mov %0, 0\n" + : "+r"(val) : "m"(*word)); + // The exception handler skips over the instruction that zeroes val when a + // decompression failure is detected. + return val == 0; +#else + return false; +#endif +} + +static void * +run_test(vm_address_t buffer_start, vm_address_t buffer_length) +{ + bool fault = false; + for (size_t buffer_offset = 0; buffer_offset < buffer_length; + buffer_offset += page_size) { + // Access pages until the fault is detected. + if (!try_write((volatile uint32_t *)(buffer_start + + buffer_offset))) { + T_LOG("test_thread breaking"); + fault = true; + break; + } + } + + if (!fault) { + T_SKIP("no faults"); + } + T_LOG("test thread completing"); + return NULL; +} + +static size_t +kern_memory_failure_handler( + exception_type_t exception, + mach_exception_data_t code) +{ + T_EXPECT_EQ(exception, EXC_BAD_ACCESS, + "Verified bad address exception"); + T_EXPECT_EQ((int)code[0], KERN_MEMORY_FAILURE, "caught KERN_MEMORY_FAILURE"); + T_PASS("received KERN_MEMORY_FAILURE from test thread"); + // Skip the next instruction as well so that the faulting code can detect + // the exception. + return 8; +} + +static void +run_test_expect_fault() +{ + mach_port_t exc_port = create_exception_port(EXC_MASK_BAD_ACCESS); + vm_address_t buffer_length = 10 * 1024ULL * 1024ULL; + vm_address_t buffer_start = create_corrupted_region(buffer_length); + + run_exception_handler(exc_port, kern_memory_failure_handler); + run_test(buffer_start, buffer_length); + free((void *)buffer_start); +} + + + +T_DECL(decompression_failure, + "Confirm that exception is raised on decompression failure", + // Disable software checks in development builds, as these would result in + // panics. + T_META_BOOTARGS_SET("vm_compressor_validation=0")) +{ + if (pid_hibernate(-2) != 0) { + T_SKIP("compressor not active"); + } + + int value; + size_t size = sizeof(value); + if (sysctlbyname("vm.compressor_inject_error", &value, &size, NULL, 0) + != 0) { + T_SKIP("vm.compressor_inject_error not present"); + } + + T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.pagesize", &value, &size, NULL, 0), + NULL); + T_ASSERT_EQ_ULONG(size, sizeof(value), NULL); + page_size = (vm_address_t)value; + + run_test_expect_fault(); +} diff --git a/tests/dirtiness_tracking.c b/tests/dirtiness_tracking.c new file mode 100644 index 000000000..76427652c --- /dev/null +++ b/tests/dirtiness_tracking.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../bsd/sys/kern_memorystatus.h" + +static int +read_event(int so) +{ + ssize_t status; + char buf[256]; + struct kern_event_msg *ev_msg = (struct kern_event_msg *)&buf[0]; + + status = recv(so, &buf, sizeof(buf), 0); + if (status == -1) { + T_LOG("recv() failed: %s", strerror(errno)); + return -1; + } + + if (ev_msg->total_size > status) { + T_LOG("missed SYSPROTO_EVENT event, buffer not big enough"); + return -1; + } + + if (ev_msg->vendor_code == KEV_VENDOR_APPLE && ev_msg->kev_class == KEV_SYSTEM_CLASS && ev_msg->kev_subclass == KEV_DIRTYSTATUS_SUBCLASS) { + if (ev_msg->event_code == kDirtyStatusChangeNote) { + dirty_status_change_event_t *ev_data = (dirty_status_change_event_t *)&ev_msg->event_data; + switch (ev_data->dsc_event_type) { + case kDirtyStatusChangedClean: + case kDirtyStatusChangedDirty: + break; + default: + T_LOG("Unknown event type %d", ev_data->dsc_event_type); + return -1; + } + T_LOG("Process: %s, status: %s, pages: %llu, timestamp: %llu, priority: %d", + ev_data->dsc_process_name, ev_data->dsc_event_type == kDirtyStatusChangedDirty ? "dirty" : "clean", ev_data->dsc_pages, ev_data->dsc_time, ev_data->dsc_priority); + return 1; + } else { + T_LOG("Ignoring message with code: %d", ev_msg->event_code); + } + } else { + T_LOG(("Unexpected event with vendor code: %d"), ev_msg->vendor_code); + return -1; + } + return 0; +} + + +T_DECL(dirtiness_tracking, + "Check if we are able to receive dirtiness-tracking events from the kernel") +{ + int so, status; + struct kev_request kev_req; + int enable_sysctl = 1; + + // First try enabling the dirtystatus_tracking sysctl if available + if (sysctlbyname("kern.dirtystatus_tracking_enabled", NULL, NULL, &enable_sysctl, sizeof(enable_sysctl)) != 0) { + T_SKIP("The kern.dirtystatus_tracking_enabled sysctl is not available, skipping..."); + } + /* Open an event socket */ + so = socket(PF_SYSTEM, SOCK_RAW, SYSPROTO_EVENT); + if (so != -1) { + /* establish filter to return all events */ + kev_req.vendor_code = KEV_VENDOR_APPLE; + kev_req.kev_class = KEV_SYSTEM_CLASS;/* Not used if vendor_code is 0 */ + kev_req.kev_subclass = KEV_DIRTYSTATUS_SUBCLASS; /* Not used if either kev_class OR vendor_code are 0 */ + status = ioctl(so, SIOCSKEVFILT, &kev_req); + if (status) { + so = -1; + T_FAIL("could not establish event filter, ioctl() failed: %s", strerror(errno)); + T_END; + } + } else { + T_FAIL("could not open event socket, socket() failed: %s", strerror(errno)); + T_END; + } + + if (so != -1) { + int yes = 1; + + status = ioctl(so, FIONBIO, &yes); + if (status) { + (void) close(so); + so = -1; + T_FAIL( "could not set non-blocking io, ioctl() failed: %s", strerror(errno)); + T_END; + } + } + + if (so == -1) { + T_FAIL("memory monitor disabled"); + T_END; + } + + + dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + + dispatch_source_t read_source = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, + (uintptr_t)so, 0, queue); + + dispatch_source_t timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue); + dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW, 15 * NSEC_PER_SEC), 5 * NSEC_PER_SEC, 1 * NSEC_PER_SEC); + + dispatch_source_set_event_handler(read_source, ^{ + int rc = read_event(so); + if (rc != 0) { + dispatch_source_cancel(read_source); + if (rc == 1) { + T_PASS("Dirtiness-tracking Kevent successfully received"); + } else { + T_FAIL("Could not read from the system socket, aborting data collection"); + } + } + }); + + dispatch_source_set_cancel_handler(read_source, ^{ + close(so); + dispatch_cancel(timer); + T_END; + }); + + dispatch_activate(read_source); + + dispatch_source_set_event_handler(timer, ^{ + dispatch_cancel(read_source); + T_FAIL("Timeout expired, no events received from the kernel"); + T_END; + }); + dispatch_activate(timer); + + dispatch_main(); +} diff --git a/tests/driverkit/Makefile b/tests/driverkit/Makefile new file mode 100644 index 000000000..f58ffbd84 --- /dev/null +++ b/tests/driverkit/Makefile @@ -0,0 +1,100 @@ +PROJECT := xnu/darwintests + +ifdef BASEDSTROOT +override DSTROOT = $(BASEDSTROOT) +endif +INVALID_ARCHS = i386 +ENABLE_LTE_TESTS=YES + +OTHER_LTE_INCLUDE_FILES += \ + /System/Library/PrivateFrameworks/LoggingSupport.framework, \ + /System/Library/PrivateFrameworks/MobileKeyBag.framework, \ + /System/Library/Frameworks/IOSurface.framework, \ + /usr/local/lib/libdarwintest_utils.dylib, \ + /usr/lib/libapple_crypto.dylib, + +DEVELOPER_DIR ?= $(shell xcode-select -p) + +# the xnu build system will only ever call us with the default target +.DEFAULT_GOAL := install + +SDKROOT ?= driverkit.internal + +include $(DEVELOPER_DIR)/AppleInternal/Makefiles/darwintest/Makefile.common + +DRIVERKIT_DIR := $(TARGETSDK)/System/DriverKit +DRIVERKIT_TARGET := x86_64-apple-driverkit$(shell xcrun --sdk driverkit.internal --show-sdk-version) + +IIG := $(shell xcrun --sdk "$(SDKROOT)" -f iig) + +# Enumerate all directories in this folder, excluding the "build" directory +DEXT_SRCS = $(filter-out build,$(shell find . -type d -depth 1 | sed -e "s:./::g")) + +# hack: reuse the default CXXFLAGS and LDFLAGS but remove -mmacosx-version-min and -arch. Also adds a few other required flags +# These are used for both iig and clang +DEXT_SHARED_CXXFLAGS := $(filter-out -mmacosx-version-min=%, $(shell echo $(CXXFLAGS) $(OTHER_CXXFLAGS) | sed -e "s/-arch [a-zA-Z0-9_]*//g")) -isystem$(DRIVERKIT_DIR)/usr/include -iframework$(DRIVERKIT_DIR)/System/Library/Frameworks -std=gnu++14 + +# These are used just for clang +DEXT_CXXFLAGS := $(DEXT_SHARED_CXXFLAGS) -target $(DRIVERKIT_TARGET) + +# These are used just for iig +IIGFLAGS := -- $(DEXT_SHARED_CXXFLAGS) -D__IIG=1 -x c++ + +# Used just for clang. LDFLAGS are not needed for iig +DEXT_LDFLAGS := $(filter-out -mmacosx-version-min=%, $(shell echo $(LDFLAGS) $(OTHER_LDFLAGS) | sed -e "s/-arch [a-zA-Z0-9_]*//g")) -target $(DRIVERKIT_TARGET) -L$(DRIVERKIT_DIR)/usr/lib -F$(DRIVERKIT_DIR)/System/Library/Frameworks -framework DriverKit + + +# This generates rules to create dexts from each directory specified in DEXT_SRCS +define GENERATE_DEXT_RULE +## Given the following directory structure: +## test_driver_123/ +## Info.plist +## test_driver_123.entitlements +## [cpp and iig files] +## This produces a dext called com.apple.test_driver_123.dext: +## com.apple.test_driver_123.dext/ +## com.apple.test_driver_123 [dext executable] +## Info.plist +## _CodeSignature/ + +CUSTOM_TARGETS += com.apple.$1.dext + +com.apple.$1.dext : $(patsubst $1/%.cpp,$(OBJROOT)/$1/%.o,$(wildcard $1/*.cpp)) $(patsubst $1/%.iig,$(OBJROOT)/$1/DerivedSources/%.iig.o,$(wildcard $1/*.iig)) + # Create bundle directory + mkdir -p $(SYMROOT)/$$@ + # Link object files + $(CXX) $(DEXT_LDFLAGS) $$^ -o $(SYMROOT)/$$@/com.apple.$1 + # Copy Info.plist and sign + cp $1/Info.plist $(SYMROOT)/$$@ + codesign -vvv --force --sign - --entitlements $1/$1.entitlements --timestamp=none $(SYMROOT)/$$@ + +install-com.apple.$1.dext: com.apple.$1.dext + mkdir -p $(INSTALLDIR) + cp -R $(SYMROOT)/com.apple.$1.dext $(INSTALLDIR) + +$(OBJROOT)/$1/DerivedSources/%.iig.o: $(OBJROOT)/$1/DerivedSources/%.iig.cpp + mkdir -p $(OBJROOT)/$1/DerivedSources + # Compile *.iig.cpp to object file + $(CXX) $(DEXT_CXXFLAGS) -I$1/ -I$(OBJROOT)/$1/DerivedSources -c $$^ -o $$@ + +$(OBJROOT)/$1/DerivedSources/%.iig.cpp: $1/%.iig + mkdir -p $(OBJROOT)/$1/DerivedSources + # Generate *.iig.cpp and *.h header files from *.iig + $(IIG) --def $$^ --impl $$@ --header $$(patsubst %.iig.cpp,%.h,$$@) $(IIGFLAGS) + +# Tell make not to delete the intermediate *.iig.cpp file since it is useful for debugging +.PRECIOUS :: $(OBJROOT)/$1/DerivedSources/%.iig.cpp + +$(OBJROOT)/$1/%.o: $1/%.cpp $(patsubst $1/%.iig,$(OBJROOT)/$1/DerivedSources/%.iig.o,$(wildcard $1/*.iig)) + # Compile c++ file. The additional dependency is for headers emitted by iig + $(CXX) $(DEXT_CXXFLAGS) -I$1/ -I$(OBJROOT)/$1/DerivedSources -c $$< -o $$@ +endef + + +ifeq ($(PLATFORM),MacOSX) +$(foreach DEXTSRCDIR,$(DEXT_SRCS),$(eval $(call GENERATE_DEXT_RULE,$(DEXTSRCDIR)))) +else +EXCLUDED_SOURCES += $(DEXT_SRCS) +endif + +include $(DEVELOPER_DIR)/AppleInternal/Makefiles/darwintest/Makefile.targets diff --git a/tests/driverkit/test_intentionally_crashing_driver_56101852/Info.plist b/tests/driverkit/test_intentionally_crashing_driver_56101852/Info.plist new file mode 100644 index 000000000..d4a53465d --- /dev/null +++ b/tests/driverkit/test_intentionally_crashing_driver_56101852/Info.plist @@ -0,0 +1,70 @@ + + + + + BuildMachineOSBuild + 19A582a + CFBundleDevelopmentRegion + en + CFBundleExecutable + com.apple.test_intentionally_crashing_driver_56101852 + CFBundleIdentifier + com.apple.test_intentionally_crashing_driver_56101852 + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + com.apple.test_intentionally_crashing_driver_56101852 + CFBundlePackageType + DEXT + CFBundleShortVersionString + 1.0 + CFBundleSupportedPlatforms + + MacOSX + + CFBundleVersion + 1 + DTCompiler + com.apple.compilers.llvm.clang.1_0 + DTPlatformBuild + 12A5026a + DTPlatformName + macosx + DTPlatformVersion + 10.16 + DTSDKBuild + + DTSDKName + driverkit.macosx20.0 + DTXcode + 1200 + DTXcodeBuild + 12A5026a + IOKitPersonalities + + test_intentionally_crashing_driver_56101852 + + CFBundleIdentifier + com.apple.test_intentionally_crashing_driver_56101852 + CFBundleIdentifierKernel + com.apple.kpi.iokit + IOClass + IOUserService + IOMatchCategory + com.apple.test_intentionally_crashing_driver_56101852 + IOProviderClass + IOUserResources + IOResourceMatch + IOKit + IOUserClass + test_intentionally_crashing_driver_56101852 + IOUserServerName + com.apple.test_intentionally_crashing_driver_56101852 + + + OSBundleUsageDescription + + OSMinimumDriverKitVersion + 20.0 + + diff --git a/tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.cpp b/tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.cpp new file mode 100644 index 000000000..96e21dc20 --- /dev/null +++ b/tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.cpp @@ -0,0 +1,30 @@ +// +// test_intentionally_crashing_driver_56101852.cpp +// test_intentionally_crashing_driver_56101852 +// +// Copyright © 2019 Apple Inc. All rights reserved. +// + +#include + +#include +#include + +#include "test_intentionally_crashing_driver_56101852.h" + +kern_return_t +IMPL(test_intentionally_crashing_driver_56101852, Start) +{ + kern_return_t ret; + ret = Start(provider, SUPERDISPATCH); + os_log(OS_LOG_DEFAULT, "Hello World"); + return ret; +} + +/* Intentionally crash */ +__attribute__((constructor)) void +crash() +{ + /* cause SIGILL */ + __builtin_trap(); +} diff --git a/tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.entitlements b/tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.entitlements new file mode 100644 index 000000000..a34733c79 --- /dev/null +++ b/tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.entitlements @@ -0,0 +1,10 @@ + + + + + com.apple.developer.driverkit + + com.apple.security.app-sandbox + + + diff --git a/tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.iig b/tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.iig new file mode 100644 index 000000000..1ebf4fbe8 --- /dev/null +++ b/tests/driverkit/test_intentionally_crashing_driver_56101852/test_intentionally_crashing_driver_56101852.iig @@ -0,0 +1,21 @@ +// +// test_intentionally_crashing_driver_56101852.iig +// test_intentionally_crashing_driver_56101852 +// +// Copyright © 2019 Apple Inc. All rights reserved. +// + +#ifndef test_intentionally_crashing_driver_56101852_h +#define test_intentionally_crashing_driver_56101852_h + +#include +#include + +class test_intentionally_crashing_driver_56101852: public IOService +{ +public: + virtual kern_return_t + Start(IOService * provider) override; +}; + +#endif /* test_intentionally_crashing_driver_56101852_h */ diff --git a/tests/exc_helpers.c b/tests/exc_helpers.c new file mode 100644 index 000000000..6084fef4b --- /dev/null +++ b/tests/exc_helpers.c @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include "exc_helpers.h" + +#include +#include + +#if __arm64__ +#define EXCEPTION_THREAD_STATE ARM_THREAD_STATE64 +#define EXCEPTION_THREAD_STATE_COUNT ARM_THREAD_STATE64_COUNT +#elif __arm__ +#define EXCEPTION_THREAD_STATE ARM_THREAD_STATE +#define EXCEPTION_THREAD_STATE_COUNT ARM_THREAD_STATE_COUNT +#elif __x86_64__ +#define EXCEPTION_THREAD_STATE x86_THREAD_STATE +#define EXCEPTION_THREAD_STATE_COUNT x86_THREAD_STATE_COUNT +#else +#error Unsupported architecture +#endif + +/** + * mach_exc_server() is a MIG-generated function that verifies the message + * that was received is indeed a mach exception and then calls + * catch_mach_exception_raise_state() to handle the exception. + */ +extern boolean_t mach_exc_server(mach_msg_header_t *, mach_msg_header_t *); + +extern kern_return_t +catch_mach_exception_raise( + mach_port_t exception_port, + mach_port_t thread, + mach_port_t task, + exception_type_t type, + exception_data_t codes, + mach_msg_type_number_t code_count); + +extern kern_return_t +catch_mach_exception_raise_state( + mach_port_t exception_port, + exception_type_t type, + exception_data_t codes, + mach_msg_type_number_t code_count, + int *flavor, + thread_state_t in_state, + mach_msg_type_number_t in_state_count, + thread_state_t out_state, + mach_msg_type_number_t *out_state_count); + +extern kern_return_t +catch_mach_exception_raise_state_identity( + mach_port_t exception_port, + mach_port_t thread, + mach_port_t task, + exception_type_t type, + exception_data_t codes, + mach_msg_type_number_t code_count, + int *flavor, + thread_state_t in_state, + mach_msg_type_number_t in_state_count, + thread_state_t out_state, + mach_msg_type_number_t *out_state_count); + +static exc_handler_callback_t exc_handler_callback; + +/** + * This has to be defined for linking purposes, but it's unused. + */ +kern_return_t +catch_mach_exception_raise( + mach_port_t exception_port, + mach_port_t thread, + mach_port_t task, + exception_type_t type, + exception_data_t codes, + mach_msg_type_number_t code_count) +{ +#pragma unused(exception_port, thread, task, type, codes, code_count) + T_FAIL("Triggered catch_mach_exception_raise() which shouldn't happen..."); + __builtin_unreachable(); +} + +/** + * Called by mach_exc_server() to handle the exception. This will call the + * test's exception-handler callback and will then modify + * the thread state to move to the next instruction. + */ +kern_return_t +catch_mach_exception_raise_state( + mach_port_t exception_port __unused, + exception_type_t type, + exception_data_t codes, + mach_msg_type_number_t code_count, + int *flavor, + thread_state_t in_state, + mach_msg_type_number_t in_state_count, + thread_state_t out_state, + mach_msg_type_number_t *out_state_count) +{ + T_LOG("Caught a mach exception!\n"); + + /* There should only be two code values. */ + T_ASSERT_EQ(code_count, 2, "Two code values were provided with the mach exception"); + + /** + * The code values should be 64-bit since MACH_EXCEPTION_CODES was specified + * when setting the exception port. + */ + mach_exception_data_t codes_64 = (mach_exception_data_t)(void *)codes; + T_LOG("Mach exception codes[0]: %#llx, codes[1]: %#llx\n", codes_64[0], codes_64[1]); + + /* Verify that we're receiving the expected thread state flavor. */ + T_ASSERT_EQ(*flavor, EXCEPTION_THREAD_STATE, "The thread state flavor is EXCEPTION_THREAD_STATE"); + T_ASSERT_EQ(in_state_count, EXCEPTION_THREAD_STATE_COUNT, "The thread state count is EXCEPTION_THREAD_STATE_COUNT"); + + size_t advance_pc = exc_handler_callback(type, codes_64); + + /** + * Increment the PC by the requested amount so the thread doesn't cause + * another exception when it resumes. + */ + *out_state_count = in_state_count; /* size of state object in 32-bit words */ + memcpy((void*)out_state, (void*)in_state, in_state_count * 4); + +#if __arm64__ + arm_thread_state64_t *state = (arm_thread_state64_t*)(void *)out_state; + + void *pc = (void*)(arm_thread_state64_get_pc(*state) + advance_pc); + /* Have to sign the new PC value when pointer authentication is enabled. */ + pc = ptrauth_sign_unauthenticated(pc, ptrauth_key_function_pointer, 0); + arm_thread_state64_set_pc_fptr(*state, pc); +#else + T_FAIL("catch_mach_exception_raise_state() not fully implemented on this architecture"); + __builtin_unreachable(); +#endif + + /* Return KERN_SUCCESS to tell the kernel to keep running the victim thread. */ + return KERN_SUCCESS; +} + +/** + * This has to be defined for linking purposes, but it's unused. + */ +kern_return_t +catch_mach_exception_raise_state_identity( + mach_port_t exception_port, + mach_port_t thread, + mach_port_t task, + exception_type_t type, + exception_data_t codes, + mach_msg_type_number_t code_count, + int *flavor, + thread_state_t in_state, + mach_msg_type_number_t in_state_count, + thread_state_t out_state, + mach_msg_type_number_t *out_state_count) +{ +#pragma unused(exception_port, thread, task, type, codes, code_count, flavor, in_state, in_state_count, out_state, out_state_count) + T_FAIL("Triggered catch_mach_exception_raise_state_identity() which shouldn't happen..."); + __builtin_unreachable(); +} + +mach_port_t +create_exception_port(exception_mask_t exception_mask) +{ + mach_port_t exc_port = MACH_PORT_NULL; + mach_port_t task = mach_task_self(); + mach_port_t thread = mach_thread_self(); + kern_return_t kr = KERN_SUCCESS; + + /* Create the mach port the exception messages will be sent to. */ + kr = mach_port_allocate(task, MACH_PORT_RIGHT_RECEIVE, &exc_port); + T_ASSERT_MACH_SUCCESS(kr, "Allocated mach exception port"); + + /** + * Insert a send right into the exception port that the kernel will use to + * send the exception thread the exception messages. + */ + kr = mach_port_insert_right(task, exc_port, exc_port, MACH_MSG_TYPE_MAKE_SEND); + T_ASSERT_MACH_SUCCESS(kr, "Inserted a SEND right into the exception port"); + + /* Tell the kernel what port to send exceptions to. */ + kr = thread_set_exception_ports( + thread, + exception_mask, + exc_port, + (exception_behavior_t)(EXCEPTION_STATE | MACH_EXCEPTION_CODES), + EXCEPTION_THREAD_STATE); + T_ASSERT_MACH_SUCCESS(kr, "Set the exception port to my custom handler"); + + return exc_port; +} + +/** + * Thread to handle the mach exception. + * + * @param arg The exception port to wait for a message on. + */ +static void * +exc_server_thread(void *arg) +{ + mach_port_t exc_port = (mach_port_t)arg; + + /** + * mach_msg_server_once is a helper function provided by libsyscall that + * handles creating mach messages, blocks waiting for a message on the + * exception port, calls mach_exc_server() to handle the exception, and + * sends a reply based on the return value of mach_exc_server(). + */ +#define MACH_MSG_REPLY_SIZE 4096 + kern_return_t kr = mach_msg_server_once(mach_exc_server, MACH_MSG_REPLY_SIZE, exc_port, 0); + T_ASSERT_MACH_SUCCESS(kr, "Received mach exception message"); + + pthread_exit((void*)0); + __builtin_unreachable(); +} + +void +run_exception_handler(mach_port_t exc_port, exc_handler_callback_t callback) +{ + exc_handler_callback = callback; + + pthread_t exc_thread; + + /* Spawn the exception server's thread. */ + int err = pthread_create(&exc_thread, (pthread_attr_t*)0, exc_server_thread, (void*)(uintptr_t)exc_port); + T_ASSERT_POSIX_ZERO(err, "Spawned exception server thread"); + + /* No need to wait for the exception server to be joined when it exits. */ + pthread_detach(exc_thread); +} diff --git a/tests/exc_helpers.h b/tests/exc_helpers.h new file mode 100644 index 000000000..2ac27c8ad --- /dev/null +++ b/tests/exc_helpers.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef EXC_HELPERS_H +#define EXC_HELPERS_H + +#include +#include +#include + +/** + * Callback invoked by run_exception_handler() when a Mach exception is + * received. + * + * @param type exception type received from the kernel + * @param codes exception codes received from the kernel + * + * @return how much the exception handler should advance the program + * counter, in bytes (in order to move past the code causing the + * exception) + */ +typedef size_t (*exc_handler_callback_t)(exception_type_t type, mach_exception_data_t codes); + +mach_port_t +create_exception_port(exception_mask_t exception_mask); + +void +run_exception_handler(mach_port_t exc_port, exc_handler_callback_t callback); + +#endif /* EXC_HELPERS_H */ diff --git a/tests/fd.c b/tests/fd.c new file mode 100644 index 000000000..7f58a64e8 --- /dev/null +++ b/tests/fd.c @@ -0,0 +1,94 @@ +#include +#include +#include +#include + +T_GLOBAL_META( + T_META_RUN_CONCURRENTLY(true), + T_META_LTEPHASE(LTE_POSTINIT) + ); + +static void * +fd_select_close_helper(void *ctx) +{ + int fd = *(int *)ctx; + + // wait for the thread to enter select + usleep(500000); + close(fd); + + return NULL; +} + +T_DECL(fd_select_close, "Test for 54795873: make sure close breaks out of select") +{ + fd_set read_fd; + int pair[2], rc; + pthread_t th; + + rc = socketpair(PF_LOCAL, SOCK_STREAM, 0, pair); + T_ASSERT_POSIX_SUCCESS(rc, "socketpair"); + + pthread_create(&th, NULL, fd_select_close_helper, pair); + + FD_ZERO(&read_fd); + FD_SET(pair[0], &read_fd); + + rc = select(pair[0] + 1, &read_fd, NULL, NULL, NULL); + T_EXPECT_POSIX_FAILURE(rc, EBADF, "select broke out with EBADF"); +} + +static void * +fd_stress_dup2_close_fun(void *ctx) +{ + int thno = (int)(long)ctx; + int count = 10000, rc; + + for (int i = 1; i <= count; i++) { + rc = dup2(STDIN_FILENO, 42); + T_QUIET; T_EXPECT_POSIX_SUCCESS(rc, "dup2(%d, 42)", STDIN_FILENO); + if (thno == 3) { + rc = close(42); + if (rc == -1) { + T_QUIET; T_EXPECT_POSIX_FAILURE(rc, EBADF, "close(42)"); + } + } + if (i % 1000 == 0) { + T_LOG("thread %d: %d/%d dups\n", thno, i, count); + } + } + + return NULL; +} + +T_DECL(fd_stress_dup2_close, "Stress test races between dup2 and close") +{ + pthread_t th[4]; + int rc; + + for (int i = 0; i < 4; i++) { + rc = pthread_create(&th[i], NULL, + fd_stress_dup2_close_fun, (void *)(long)i); + T_ASSERT_POSIX_ZERO(rc, "pthread_create"); + } + + for (int i = 0; i < 4; i++) { + pthread_join(th[i], NULL); + } +} + +T_DECL(fd_dup2_erase_clofork_58446996, + "Make sure dup2() doesn't inherit flags from an old fd") +{ + int fd1, fd2; + + fd1 = open("/dev/null", O_RDONLY | O_CLOEXEC); + T_ASSERT_POSIX_SUCCESS(fd1, "open(/dev/null)"); + + fd2 = open("/dev/null", O_RDONLY | O_CLOEXEC); + T_ASSERT_POSIX_SUCCESS(fd2, "open(/dev/null)"); + + T_ASSERT_POSIX_SUCCESS(dup2(fd1, fd2), "dup2(fd1, fd2)"); + T_EXPECT_EQ(fcntl(fd2, F_GETFD, 0), 0, + "neither FD_CLOEXEC nor FD_CLOFORK should be set"); +} diff --git a/tests/fd_aio_fsync_uaf.c b/tests/fd_aio_fsync_uaf.c new file mode 100644 index 000000000..3fb4b6a4d --- /dev/null +++ b/tests/fd_aio_fsync_uaf.c @@ -0,0 +1,71 @@ +/* + * Proof of Concept / Test Case + * XNU: aio_work_thread use-after-free for AIO_FSYNC entries + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +T_GLOBAL_META( + T_META_NAMESPACE("xnu.vfs"), + T_META_RUN_CONCURRENTLY(true)); + +#define NREQUESTS 8 + +static void +attempt(int fd) +{ + struct aiocb ap[NREQUESTS]; + size_t n; + unsigned char c; + + for (n = 0; n < NREQUESTS; ++n) { + ap[n].aio_fildes = fd; + ap[n].aio_nbytes = 1; + ap[n].aio_buf = &c; + ap[n].aio_sigevent.sigev_notify = SIGEV_NONE; + } + + /* + * fire them off and exit. + */ + for (n = 0; n < NREQUESTS; ++n) { + aio_fsync((n & 1) ? O_SYNC : O_DSYNC, &ap[n]); + } + + exit(0); +} + +T_DECL(lio_listio_race_63669270, "test for the lightspeed/unc0ver UaF") +{ + pid_t child; + int fd; + char path[128]; + uint64_t end = clock_gettime_nsec_np(CLOCK_UPTIME_RAW) + 10 * NSEC_PER_SEC; + + /* we need a valid fd: */ + strcpy(path, "/tmp/aio_fsync_uaf.XXXXXX"); + T_EXPECT_POSIX_SUCCESS(fd = mkstemp(path), "mkstemp"); + T_EXPECT_POSIX_SUCCESS(unlink(path), "unlink"); + + T_LOG("starting..."); + do { + switch ((child = fork())) { + case -1: T_FAIL("fork"); + case 0: attempt(fd); + } + + T_QUIET; T_EXPECT_POSIX_SUCCESS(waitpid(child, NULL, 0), "waitpid"); + } while (clock_gettime_nsec_np(CLOCK_UPTIME_RAW) < end); + + T_PASS("the system didn't panic"); +} diff --git a/tests/fduiomove.c b/tests/fduiomove.c new file mode 100644 index 000000000..12bc50868 --- /dev/null +++ b/tests/fduiomove.c @@ -0,0 +1,26 @@ +#include +#include +#include +#include +#include +#include +#include + +T_GLOBAL_META( + T_META_RUN_CONCURRENTLY(true), + T_META_LTEPHASE(LTE_POSTINIT) + ); + +T_DECL(fd_invalid_pread, "Test for 66711697: make sure we get EFAULT") +{ + int fd; + ssize_t rc; + + fd = open(*_NSGetProgname(), O_RDONLY); + T_ASSERT_POSIX_SUCCESS(fd, "open(self)"); + + rc = pread(fd, (void *)~0, 64 << 10, 0); + T_ASSERT_POSIX_FAILURE(rc, EFAULT, "pread should fail with EFAULT"); + + close(fd); +} diff --git a/tests/filter_policy.c b/tests/filter_policy.c new file mode 100644 index 000000000..f61d1a2ff --- /dev/null +++ b/tests/filter_policy.c @@ -0,0 +1,178 @@ +#include +#include +#include +#include +#include +#include +#include + +#ifdef T_NAMESPACE +#undef T_NAMESPACE +#endif + +#include +#include +#include + +#define MACH_RCV_OPTIONS (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ + MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AV) | \ + MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true), T_META_NAMESPACE("xnu.ipc")); + +typedef struct { + mach_msg_header_t header; + mach_msg_mac_trailer_t trailer; // subtract this when sending +} ipc_simple_message; + +static ipc_simple_message icm_request = {}; + +struct args { + const char *progname; + int verbose; + int num_msgs; + char *server_port_name; + mach_port_t server_port; + int request_msg_size; + void *request_msg; +}; + +void parse_args(struct args *args); +void* create_buffer(int *buffer_size); +void client(struct args *args); +void server_setup(struct args* args); +void *server(void *thread_args); + +void +parse_args(struct args *args) +{ + args->verbose = 0; + args->server_port_name = "TEST_FILTER_POLICY"; + args->server_port = MACH_PORT_NULL; + args->num_msgs = 1; + args->request_msg_size = sizeof(ipc_simple_message); + args->request_msg = &icm_request; +} + +/* Create a mach IPC listener which will respond to the client's message */ +void +server_setup(struct args* args) +{ + kern_return_t ret; + mach_port_t bsport; + + ret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, + &args->server_port); + T_ASSERT_MACH_SUCCESS(ret, "server: mach_port_allocate()"); + + ret = mach_port_insert_right(mach_task_self(), args->server_port, args->server_port, + MACH_MSG_TYPE_MAKE_SEND); + T_ASSERT_MACH_SUCCESS(ret, "server: mach_port_insert_right()"); + + ret = task_get_bootstrap_port(mach_task_self(), &bsport); + T_ASSERT_MACH_SUCCESS(ret, "server: task_get_bootstrap_port()"); + + ret = bootstrap_register(bsport, args->server_port_name, args->server_port); + T_ASSERT_MACH_SUCCESS(ret, "server: bootstrap_register()"); + + T_LOG("server: waiting for IPC messages from client on port '%s'.\n", + args->server_port_name); +} + +/* Server process loop + * + * Listens for message. + * + */ +void * +server(void *thread_args) +{ + mach_msg_header_t *request; + mach_msg_option_t rcvoption = MACH_RCV_OPTIONS; + kern_return_t ret; + mach_msg_trailer_t *tlr = NULL; + mach_msg_mac_trailer_t *mac_tlr; + mach_msg_filter_id filter_policy_id = 0; + pid_t pid = getpid(); + struct args *args = (struct args*)thread_args; + + request = (mach_msg_header_t *)args->request_msg; + + T_LOG("server(%d): Awaiting message", pid); + ret = mach_msg(request, + rcvoption, + 0, + sizeof(ipc_simple_message), + args->server_port, + MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + + T_ASSERT_MACH_SUCCESS(ret, "server: mach_msg receive"); + T_ASSERT_EQ(request->msgh_id, 500, "server: msg id = %d", request->msgh_id); + + tlr = (mach_msg_trailer_t *)((unsigned char *)request + + round_msg(request->msgh_size)); + // The trailer should always be of format zero. + if (tlr->msgh_trailer_type == MACH_MSG_TRAILER_FORMAT_0) { + if (tlr->msgh_trailer_size >= sizeof(mach_msg_mac_trailer_t)) { + mac_tlr = (mach_msg_mac_trailer_t *)tlr; + filter_policy_id = mac_tlr->msgh_ad; + } + } + + T_LOG("server: received the filter policy id = %d", filter_policy_id); + T_ASSERT_EQ(filter_policy_id, MACH_MSG_FILTER_POLICY_ALLOW, "server: filter policy allow sentinel"); + mach_msg_destroy(request); + + return NULL; +} + +T_HELPER_DECL(client_not_filtered, "Send a message to the server which shouldn't be filtered") +{ + T_LOG("client(%d): Prepare to send a message", getpid()); + struct args args = {}; + mach_port_t bsport; + + parse_args(&args); + args.request_msg_size -= sizeof(mach_msg_mac_trailer_t); + + //Find the bootstrap port + kern_return_t ret = task_get_bootstrap_port(mach_task_self(), &bsport); + T_ASSERT_MACH_SUCCESS(ret, "client: task_get_bootstrap_port()"); + + //Look up the service port + ret = bootstrap_look_up(bsport, (char *)args.server_port_name, + &args.server_port); + T_ASSERT_MACH_SUCCESS(ret, "client: bootstrap_look_up()"); + + //Construct the message + mach_msg_header_t *request = (mach_msg_header_t *)args.request_msg; + request->msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, 0, 0, 0); + request->msgh_size = (mach_msg_size_t)args.request_msg_size; + request->msgh_remote_port = args.server_port; + request->msgh_local_port = MACH_PORT_NULL; + request->msgh_id = 500; + + T_LOG("client: Sending request"); + ret = mach_msg_send(request); + T_ASSERT_MACH_SUCCESS(ret, "client: mach_msg_send()"); +} + +T_DECL(filter_policy_id, "Send a message and check the filter policy id received in the trailer") +{ + struct args args = {}; + dt_helper_t helpers[1]; + pthread_t server_thread; + + T_SETUPBEGIN; + parse_args(&args); + server_setup(&args); + T_SETUPEND; + + helpers[0] = dt_fork_helper("client_not_filtered"); + int ret = pthread_create(&server_thread, NULL, server, &args); + T_ASSERT_POSIX_SUCCESS(ret, "pthread_create server_thread"); + pthread_detach(server_thread); + + dt_run_helpers(helpers, 1, 30); +} diff --git a/tests/flow_div_doubleconnect_55917185.c b/tests/flow_div_doubleconnect_55917185.c new file mode 100644 index 000000000..c26dc62b4 --- /dev/null +++ b/tests/flow_div_doubleconnect_55917185.c @@ -0,0 +1,63 @@ +#include + +#include +#include +#include +#include +#include + +#include + +/* we should win the race in this window: */ +#define NTRIES 200000 + +static void * +connect_race(void *data) +{ + int *ps = data; + struct sockaddr_ctl sc = { + .sc_id = 1 /* com.apple.flow-divert */ + }; + int n; + + for (n = 0; n < NTRIES; ++n) { + connect(*ps, (const struct sockaddr *)&sc, sizeof(sc)); + } + + return NULL; +} + +T_DECL(flow_div_doubleconnect_55917185, "Bad error path in double-connect for flow_divert_kctl_connect") +{ + int s = -1; + int tmp_s; + struct sockaddr_ctl sc = { + .sc_id = 1 /* com.apple.flow-divert */ + }; + pthread_t t; + int n; + + T_SETUPBEGIN; + T_ASSERT_POSIX_ZERO(pthread_create(&t, NULL, connect_race, &s), NULL); + T_SETUPEND; + + for (n = 0; n < NTRIES; ++n) { + T_ASSERT_POSIX_SUCCESS(tmp_s = socket(AF_SYSTEM, SOCK_DGRAM, SYSPROTO_CONTROL), NULL); + + /* + * this bind will fail, but that's ok because it initialises + * kctl: + */ + bind(tmp_s, (const struct sockaddr *)&sc, sizeof(sc)); + + /* this is what we're racing the other thread for: */ + s = tmp_s; + connect(s, (const struct sockaddr *)&sc, sizeof(sc)); + + T_ASSERT_POSIX_SUCCESS(close(s), NULL); + s = -1; + } + + T_ASSERT_POSIX_ZERO(pthread_join(t, NULL), NULL); + T_PASS("flow_divert_kctl_connect race didn't trigger panic"); +} diff --git a/tests/fp_exception.c b/tests/fp_exception.c index 5010d9f8a..12f09d0e8 100644 --- a/tests/fp_exception.c +++ b/tests/fp_exception.c @@ -28,6 +28,7 @@ /** * On devices that support it, this test ensures that a mach exception is * generated when an ARMv8 floating point exception is triggered. + * Also verifies that the main thread's FPCR value matches its expected default. */ #include #include @@ -36,203 +37,81 @@ #include #include #include -#include -#include +#include +#include -#if __has_feature(ptrauth_calls) -#include -#endif +#include "exc_helpers.h" T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); /* The bit to set in FPCR to enable the divide-by-zero floating point exception. */ #define FPCR_DIV_EXC 0x200 +#define FPCR_INIT (0x0) /* Whether we caught the EXC_ARITHMETIC mach exception or not. */ static volatile bool mach_exc_caught = false; -/** - * mach_exc_server() is a MIG-generated function that verifies the message - * that was received is indeed a mach exception and then calls - * catch_mach_exception_raise_state() to handle the exception. - */ -extern boolean_t mach_exc_server(mach_msg_header_t *, mach_msg_header_t *); - -/** - * This has to be defined for linking purposes, but it's unused in this test. - */ -kern_return_t -catch_mach_exception_raise( - mach_port_t exception_port, - mach_port_t thread, - mach_port_t task, - exception_type_t type, - exception_data_t codes, - mach_msg_type_number_t code_count) -{ -#pragma unused(exception_port, thread, task, type, codes, code_count) - T_FAIL("Triggered catch_mach_exception_raise() which shouldn't happen..."); - __builtin_unreachable(); -} - -/** - * Called by mach_exc_server() to handle the exception. This will verify the - * exception is a floating point divide-by-zero exception and will then modify - * the thread state to move to the next instruction. - */ -kern_return_t -catch_mach_exception_raise_state( - mach_port_t exception_port, +#ifdef __arm64__ +static size_t +exc_arithmetic_handler( exception_type_t type, - exception_data_t codes, - mach_msg_type_number_t code_count, - int *flavor, - thread_state_t in_state, - mach_msg_type_number_t in_state_count, - thread_state_t out_state, - mach_msg_type_number_t *out_state_count) + mach_exception_data_t codes_64) { -#pragma unused(exception_port, type, codes, code_count, flavor, in_state, in_state_count, out_state, out_state_count) -#ifdef __arm64__ - T_LOG("Caught a mach exception!\n"); - /* Floating point divide by zero should cause an EXC_ARITHMETIC exception. */ T_ASSERT_EQ(type, EXC_ARITHMETIC, "Caught an EXC_ARITHMETIC exception"); - /* There should only be two code vales. */ - T_ASSERT_EQ(code_count, 2, "Two code values were provided with the mach exception"); - - /** - * The code values should be 64-bit since MACH_EXCEPTION_CODES was specified - * when setting the exception port. - */ - uint64_t *codes_64 = (uint64_t*)codes; - T_LOG("Mach exception codes[0]: %#llx, codes[1]: %#llx\n", codes_64[0], codes_64[1]); - - /* Verify that we're receiving 64-bit ARM thread state values. */ - T_ASSERT_EQ(*flavor, ARM_THREAD_STATE64, "The thread state flavor is ARM_THREAD_STATE64"); - T_ASSERT_EQ(in_state_count, ARM_THREAD_STATE64_COUNT, "The thread state count is ARM_THREAD_STATE64_COUNT"); - /* Verify the exception is a floating point divide-by-zero exception. */ - T_ASSERT_EQ(codes_64[0], EXC_ARM_FP_DZ, "The subcode is EXC_ARM_FP_DZ (floating point divide-by-zero)"); - - /** - * Increment the PC to the next instruction so the thread doesn't cause - * another exception when it resumes. - */ - *out_state_count = in_state_count; /* size of state object in 32-bit words */ - memcpy((void*)out_state, (void*)in_state, in_state_count * 4); - arm_thread_state64_t *state = (arm_thread_state64_t*)out_state; - - void *pc = (void*)(arm_thread_state64_get_pc(*state) + 4); -#if __has_feature(ptrauth_calls) - /* Have to sign the new PC value when pointer authentication is enabled. */ - pc = ptrauth_sign_unauthenticated(pc, ptrauth_key_function_pointer, 0); -#endif - arm_thread_state64_set_pc_fptr(*state, pc); + T_ASSERT_EQ(codes_64[0], (mach_exception_data_type_t)EXC_ARM_FP_DZ, "The subcode is EXC_ARM_FP_DZ (floating point divide-by-zero)"); mach_exc_caught = true; -#endif /* __arm64__ */ - - /* Return KERN_SUCCESS to tell the kernel to keep running the victim thread. */ - return KERN_SUCCESS; + return 4; } +#endif -/** - * This has to be defined for linking purposes, but it's unused in this test. - */ -kern_return_t -catch_mach_exception_raise_state_identity( - mach_port_t exception_port, - mach_port_t thread, - mach_port_t task, - exception_type_t type, - exception_data_t codes, - mach_msg_type_number_t code_count, - int *flavor, - thread_state_t in_state, - mach_msg_type_number_t in_state_count, - thread_state_t out_state, - mach_msg_type_number_t *out_state_count) -{ -#pragma unused(exception_port, thread, task, type, codes, code_count, flavor, in_state, in_state_count, out_state, out_state_count) - T_FAIL("Triggered catch_mach_exception_raise_state_identity() which shouldn't happen..."); - __builtin_unreachable(); -} - -/** - * Thread to handle the mach exception generated by the floating point exception. - * - * @param arg The exception port to wait for a message on. - */ -void * -exc_server_thread(void *arg) -{ - mach_port_t exc_port = *(mach_port_t*)arg; - - /** - * mach_msg_server_once is a helper function provided by libsyscall that - * handles creating mach messages, blocks waiting for a message on the - * exception port, calls mach_exc_server() to handle the exception, and - * sends a reply based on the return value of mach_exc_server(). - */ -#define MACH_MSG_REPLY_SIZE 4096 - kern_return_t kr = mach_msg_server_once(mach_exc_server, MACH_MSG_REPLY_SIZE, exc_port, 0); - T_ASSERT_MACH_SUCCESS(kr, "Received mach exception message"); - - pthread_exit((void*)0); - __builtin_unreachable(); -} +#define KERNEL_BOOTARGS_MAX_SIZE 1024 +static char kernel_bootargs[KERNEL_BOOTARGS_MAX_SIZE]; T_DECL(armv8_fp_exception, - "Test that ARMv8 floating point exceptions generate mach exceptions.") + "Test that ARMv8 floating point exceptions generate Mach exceptions, verify default FPCR value.") { #ifndef __arm64__ T_SKIP("Running on non-arm64 target, skipping..."); #else - pthread_t exc_thread; mach_port_t exc_port = MACH_PORT_NULL; - mach_port_t task = mach_task_self(); - mach_port_t thread = mach_thread_self(); - kern_return_t kr = KERN_SUCCESS; + size_t kernel_bootargs_len; + + uint64_t fpcr = __builtin_arm_rsr64("FPCR"); + + if (fpcr != FPCR_INIT) { + T_FAIL("The floating point control register has a non-default value" "%" PRIx64, fpcr); + } /* Attempt to enable Divide-by-Zero floating point exceptions in hardware. */ - uint64_t fpcr = __builtin_arm_rsr64("FPCR") | FPCR_DIV_EXC; - __builtin_arm_wsr64("FPCR", fpcr); + uint64_t fpcr_divexc = fpcr | FPCR_DIV_EXC; + __builtin_arm_wsr64("FPCR", fpcr_divexc); #define DSB_ISH 0xb __builtin_arm_dsb(DSB_ISH); /* Devices that don't support floating point exceptions have FPCR as RAZ/WI. */ - if (__builtin_arm_rsr64("FPCR") != fpcr) { + if (__builtin_arm_rsr64("FPCR") != fpcr_divexc) { T_SKIP("Running on a device that doesn't support floating point exceptions, skipping..."); } - /* Create the mach port the exception messages will be sent to. */ - kr = mach_port_allocate(task, MACH_PORT_RIGHT_RECEIVE, &exc_port); - T_ASSERT_MACH_SUCCESS(kr, "Allocated mach exception port"); + /* Check if floating-point exceptions are enabled */ + kernel_bootargs_len = sizeof(kernel_bootargs); + kern_return_t kr = sysctlbyname("kern.bootargs", kernel_bootargs, &kernel_bootargs_len, NULL, 0); + if (kr != 0) { + T_SKIP("Could not get kernel bootargs, skipping..."); + } - /** - * Insert a send right into the exception port that the kernel will use to - * send the exception thread the exception messages. - */ - kr = mach_port_insert_right(task, exc_port, exc_port, MACH_MSG_TYPE_MAKE_SEND); - T_ASSERT_MACH_SUCCESS(kr, "Inserted a SEND right into the exception port"); - - /* Tell the kernel what port to send EXC_ARITHMETIC exceptions to. */ - kr = thread_set_exception_ports( - thread, - EXC_MASK_ARITHMETIC, - exc_port, - EXCEPTION_STATE | MACH_EXCEPTION_CODES, - ARM_THREAD_STATE64); - T_ASSERT_MACH_SUCCESS(kr, "Set the exception port to my custom handler"); + if (NULL == strstr(kernel_bootargs, "-fp_exceptions")) { + T_SKIP("Floating-point exceptions are disabled, skipping..."); + } + /* Create the mach port the exception messages will be sent to. */ + exc_port = create_exception_port(EXC_MASK_ARITHMETIC); /* Spawn the exception server's thread. */ - int err = pthread_create(&exc_thread, (pthread_attr_t*)0, exc_server_thread, (void*)&exc_port); - T_ASSERT_POSIX_ZERO(err, "Spawned exception server thread"); - - /* No need to wait for the exception server to be joined when it exits. */ - pthread_detach(exc_thread); + run_exception_handler(exc_port, exc_arithmetic_handler); /** * This should cause a floating point divide-by-zero exception to get triggered. diff --git a/tests/ftruncate.c b/tests/ftruncate.c new file mode 100644 index 000000000..425cf2c2d --- /dev/null +++ b/tests/ftruncate.c @@ -0,0 +1,77 @@ +#include +#include +#include +#include +#include +#include +#include + +T_GLOBAL_META(T_META_NAMESPACE("xnu.vfs")); + +#define FSIZE_CUR (10*1024) +#define TMP_FILE_PATH "/tmp/ftruncate_test" + +static int sigcount = 0; + +static void +xfsz_signal_handler(__unused int signo) +{ + sigcount++; +} + +static void +fsize_test(bool use_fd) +{ + struct rlimit rlim; + int fd, ret; + + T_SETUPBEGIN; + + signal(SIGXFSZ, xfsz_signal_handler); + + rlim.rlim_cur = FSIZE_CUR; + rlim.rlim_max = RLIM_INFINITY; + ret = setrlimit(RLIMIT_FSIZE, &rlim); + T_ASSERT_POSIX_SUCCESS(ret, "set soft RLIMIT_FSIZE to %d", FSIZE_CUR); + + fd = open(TMP_FILE_PATH, O_RDWR | O_CREAT, 0777); + T_ASSERT_POSIX_SUCCESS(ret, "create temp file: %s", TMP_FILE_PATH); + + T_SETUPEND; + + if (use_fd) { + ret = ftruncate(fd, FSIZE_CUR); + T_EXPECT_POSIX_SUCCESS(ret, "ftruncate() with length RLIMIT_FSIZE"); + } else { + ret = truncate(TMP_FILE_PATH, FSIZE_CUR); + T_EXPECT_POSIX_SUCCESS(ret, "truncate() with length RLIMIT_FSIZE"); + } + T_EXPECT_EQ(sigcount, 0, "no signal received"); + + if (use_fd) { + ret = ftruncate(fd, FSIZE_CUR + 1); + T_EXPECT_POSIX_FAILURE(ret, EFBIG, "ftruncate() with length RLIMIT_FSIZE + 1"); + } else { + ret = truncate(TMP_FILE_PATH, FSIZE_CUR + 1); + T_EXPECT_POSIX_FAILURE(ret, EFBIG, "truncate() with length RLIMIT_FSIZE + 1"); + } + T_EXPECT_EQ(sigcount, 1, "SIGXFSZ signal received"); + + ret = close(fd); + T_ASSERT_POSIX_SUCCESS(ret, "close temp file"); + + ret = unlink(TMP_FILE_PATH); + T_ASSERT_POSIX_SUCCESS(ret, "unlink temp file"); +} + +T_DECL(ftruncate_fsize, + "ftruncate() should fail with EFBIG and send SIGXFSZ signal when length > RLIMIT_FSIZE") +{ + fsize_test(true); +} + +T_DECL(truncate_fsize, + "truncate() should fail with EFBIG and send SIGXFSZ signal when length > RLIMIT_FSIZE") +{ + fsize_test(false); +} diff --git a/tests/get_shared_cache_address.c b/tests/get_shared_cache_address.c new file mode 100644 index 000000000..d5037188c --- /dev/null +++ b/tests/get_shared_cache_address.c @@ -0,0 +1,40 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Test helper to retrieve the address of the shared cache. The helper + * also verifies that the process is correctly marked to have the shared + * cache reslid when interrogated through proc_pid_rusage() + */ +int +main(int argc, char **argv) +{ + size_t shared_cache_len = 0; + struct rusage_info_v5 ru = {}; + + if (argc != 2) { + fprintf(stderr, "Invalid helper invocation"); + exit(1); + } + + if (proc_pid_rusage(getpid(), RUSAGE_INFO_V5, (rusage_info_t *)&ru) != 0) { + perror("proc_pid_rusage() helper"); + exit(1); + } + + if (strcmp(argv[1], "check_rusage_flag") == 0) { + if (!(ru.ri_flags & RU_PROC_RUNS_RESLIDE)) { + fprintf(stderr, "Helper rusage flag check failed\n"); + exit(1); + } + } + + printf("%p\n", _dyld_get_shared_cache_range(&shared_cache_len)); + exit(0); +} diff --git a/tests/hv_private.entitlements b/tests/hv_private.entitlements new file mode 100644 index 000000000..e6cea6583 --- /dev/null +++ b/tests/hv_private.entitlements @@ -0,0 +1,8 @@ + + + + + com.apple.private.hypervisor + + + diff --git a/tests/hv_public.entitlements b/tests/hv_public.entitlements new file mode 100644 index 000000000..c2ef1a38b --- /dev/null +++ b/tests/hv_public.entitlements @@ -0,0 +1,8 @@ + + + + + com.apple.security.hypervisor + + + diff --git a/tests/hvbench.c b/tests/hvbench.c new file mode 100644 index 000000000..fc7ccded7 --- /dev/null +++ b/tests/hvbench.c @@ -0,0 +1,366 @@ +#include "hvtest_arm64.h" +#include "hvtest_guest.h" + +#include +#include +#include +#include +#include +#include + +T_GLOBAL_META( + T_META_NAMESPACE("xnu.arm.hv"), + T_META_REQUIRES_SYSCTL_EQ("kern.hv_support", 1), + // Temporary workaround for not providing an x86_64 slice + T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm64", 1) + ); + +#define SET_PC(vcpu, symbol) \ +{ \ + vcpu_entry_function entry = ptrauth_strip(&symbol, 0); \ + uint64_t entry_addr = (uintptr_t)entry; \ + (void)hv_vcpu_set_reg(vcpu, HV_REG_PC, entry_addr); \ +} + +// Note that expect_*(), set_reg(), and get_reg() cannot be used in benchmarks, +// as the T_ASSERT() checks they perform are severely detrimental to results. +// +// The helpers below should be used in their place. + +static void +quick_bump_pc(hv_vcpu_t vcpu, const bool forward) +{ + uint64_t pc; + (void)hv_vcpu_get_reg(vcpu, HV_REG_PC, &pc); + pc = forward ? pc + 4 : pc - 4; + (void)hv_vcpu_set_reg(vcpu, HV_REG_PC, pc); +} + +static void +vtimer_benchmark(hv_vcpu_t vcpu, hv_vcpu_exit_t *exit) +{ + dt_stat_thread_cycles_t stat = dt_stat_thread_cycles_create( + "VTimer interruption"); + SET_PC(vcpu, spin_vcpu_entry); + set_sys_reg(vcpu, HV_SYS_REG_CNTV_CVAL_EL0, 0); + set_sys_reg(vcpu, HV_SYS_REG_CNTV_CTL_EL0, 1); + // Dry-run twice to ensure that the timer is re-armed. + run_to_next_vm_fault(vcpu, exit); + T_ASSERT_EQ_UINT(exit->reason, HV_EXIT_REASON_VTIMER_ACTIVATED, + "check for timer"); + hv_vcpu_set_vtimer_mask(vcpu, false); + run_to_next_vm_fault(vcpu, exit); + T_ASSERT_EQ_UINT(exit->reason, HV_EXIT_REASON_VTIMER_ACTIVATED, + "check for timer"); + hv_vcpu_set_vtimer_mask(vcpu, false); + T_STAT_MEASURE_LOOP(stat) { + hv_vcpu_run(vcpu); + hv_vcpu_set_vtimer_mask(vcpu, false); + } + dt_stat_finalize(stat); + // Disable the timer before running other benchmarks, otherwise they will be + // interrupted. + set_sys_reg(vcpu, HV_SYS_REG_CNTV_CTL_EL0, 0); +} + +static void +trap_benchmark(dt_stat_thread_cycles_t trap_stat, hv_vcpu_t vcpu, + hv_vcpu_exit_t *exit, const uint64_t batch, const bool increment_pc) +{ + while (!dt_stat_stable(trap_stat)) { + set_reg(vcpu, HV_REG_X0, batch); + dt_stat_token start = dt_stat_thread_cycles_begin(trap_stat); + for (uint32_t i = 0; i < batch; i++) { + hv_vcpu_run(vcpu); + if (increment_pc) { + quick_bump_pc(vcpu, true); + } + } + dt_stat_thread_cycles_end_batch(trap_stat, (int)batch, start); + expect_hvc(vcpu, exit, 2); + } + dt_stat_finalize(trap_stat); +} + +static void +mrs_bench_kernel(hv_vcpu_t vcpu, hv_vcpu_exit_t *exit, const char *name) +{ + const uint64_t batch = 1000; + SET_PC(vcpu, mrs_actlr_bench_loop); + set_control(vcpu, _HV_CONTROL_FIELD_HCR, + get_control(vcpu, _HV_CONTROL_FIELD_HCR) & ~HCR_TACR); + dt_stat_thread_cycles_t stat = dt_stat_thread_cycles_create(name); + while (!dt_stat_stable(stat)) { + set_reg(vcpu, HV_REG_X0, batch); + dt_stat_token start = dt_stat_thread_cycles_begin(stat); + hv_vcpu_run(vcpu); + dt_stat_thread_cycles_end_batch(stat, (int)batch, start); + T_QUIET; T_ASSERT_EQ_UINT(exit->reason, HV_EXIT_REASON_EXCEPTION, + "check for exception"); + T_QUIET; T_ASSERT_EQ(exit->exception.syndrome >> 26, 0x16, + "check for HVC64"); + } + dt_stat_finalize(stat); +} + +static void * +trap_bench_monitor(void *arg __unused, hv_vcpu_t vcpu, hv_vcpu_exit_t *exit) +{ + // In all benchmark testcases using quick_run_vcpu(), dry run all guest code + // to fault in pages so that run_to_next_vm_fault() isn't needed while + // recording measurements. + + vtimer_benchmark(vcpu, exit); + + // dry-run hvc_bench_loop + SET_PC(vcpu, hvc_bench_loop); + set_reg(vcpu, HV_REG_X0, 1); + expect_hvc(vcpu, exit, 1); + expect_hvc(vcpu, exit, 2); + + SET_PC(vcpu, hvc_bench_loop); + trap_benchmark(dt_stat_thread_cycles_create("HVC handled by VMM"), + vcpu, exit, 1000, false); + + // dry-run data_abort_bench_loop + SET_PC(vcpu, data_abort_bench_loop); + set_reg(vcpu, HV_REG_X0, 1); + expect_trapped_store(vcpu, exit, get_reserved_start()); + expect_hvc(vcpu, exit, 2); + + SET_PC(vcpu, data_abort_bench_loop); + trap_benchmark(dt_stat_thread_cycles_create("data abort handled by VMM"), + vcpu, exit, 1000, true); + + // dry-run mrs_actlr_bench_loop + SET_PC(vcpu, mrs_actlr_bench_loop); + set_reg(vcpu, HV_REG_X0, 1); + set_control(vcpu, _HV_CONTROL_FIELD_HCR, + get_control(vcpu, _HV_CONTROL_FIELD_HCR) & ~HCR_TACR); + // Confirm no visible trap from MRS + expect_hvc(vcpu, exit, 2); + + mrs_bench_kernel(vcpu, exit, "MRS trap handled by kernel"); + + SET_PC(vcpu, mrs_actlr_bench_loop); + set_reg(vcpu, HV_REG_X0, 1); + set_control(vcpu, _HV_CONTROL_FIELD_HCR, + get_control(vcpu, _HV_CONTROL_FIELD_HCR) | HCR_TACR); + // Confirm MRS trap from test loop + expect_exception(vcpu, exit, 0x18); + quick_bump_pc(vcpu, true); + expect_hvc(vcpu, exit, 2); + SET_PC(vcpu, mrs_actlr_bench_loop); + trap_benchmark(dt_stat_thread_cycles_create("MRS trap handled by VMM"), + vcpu, exit, 1000, true); + + SET_PC(vcpu, activate_debug); + expect_hvc(vcpu, exit, 0); + + SET_PC(vcpu, hvc_bench_loop); + trap_benchmark(dt_stat_thread_cycles_create( + "debug-enabled HVC handled by VMM"), vcpu, exit, 1000, false); + + mrs_bench_kernel(vcpu, exit, "debug-enabled MRS trap handled by kernel"); + + return NULL; +} + +T_DECL(trap_benchmark, "trap-processing benchmark") +{ + vm_setup(); + pthread_t vcpu_thread = create_vcpu_thread(hvc_bench_loop, 0, + trap_bench_monitor, NULL); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu_thread, NULL), "join vcpu"); + vm_cleanup(); +} + +static semaphore_t sem1; +static semaphore_t sem2; +static _Atomic uint32_t stage; + +static void +switch_and_return(bool leader) +{ + // wait_semaphore, signal_semaphore + (void)semaphore_wait_signal(leader ? sem2 : sem1, leader ? sem1 : sem2); +} + +static void * +vcpu_switch_leader(void *arg __unused, hv_vcpu_t vcpu, hv_vcpu_exit_t *exit) +{ + dt_stat_thread_cycles_t baseline = dt_stat_thread_cycles_create( + "baseline VCPU run, no switch"); + dt_stat_thread_cycles_t thread = dt_stat_thread_cycles_create( + "VCPU-thread switch"); + dt_stat_thread_cycles_t basic = dt_stat_thread_cycles_create( + "basic VCPU-VCPU switch"); + dt_stat_thread_cycles_t baseline_debug = dt_stat_thread_cycles_create( + "baseline debug-enabled VCPU run, no switch"); + dt_stat_thread_cycles_t basic_debug = dt_stat_thread_cycles_create( + "basic VCPU <-> debug-enabled VCPU switch"); + dt_stat_thread_cycles_t debug_debug = dt_stat_thread_cycles_create( + "debug-enabled VCPU <-> debug-enabled VCPU switch"); + + bind_to_cpu(0); + + // Activate minimal VCPU state + SET_PC(vcpu, hvc_loop); + expect_hvc(vcpu, exit, 0); + T_STAT_MEASURE_LOOP(baseline) { + hv_vcpu_run(vcpu); + } + dt_stat_finalize(baseline); + + T_STAT_MEASURE_LOOP(thread) { + hv_vcpu_run(vcpu); + switch_and_return(true); + } + dt_stat_finalize(thread); + atomic_store_explicit(&stage, 1, memory_order_relaxed); + + T_STAT_MEASURE_LOOP(basic) { + hv_vcpu_run(vcpu); + switch_and_return(true); + } + dt_stat_finalize(basic); + atomic_store_explicit(&stage, 2, memory_order_relaxed); + + T_STAT_MEASURE_LOOP(basic_debug) { + hv_vcpu_run(vcpu); + switch_and_return(true); + } + dt_stat_finalize(basic_debug); + atomic_store_explicit(&stage, 3, memory_order_relaxed); + + SET_PC(vcpu, activate_debug); + expect_hvc(vcpu, exit, 0); + SET_PC(vcpu, hvc_loop); + T_STAT_MEASURE_LOOP(baseline_debug) { + hv_vcpu_run(vcpu); + } + dt_stat_finalize(baseline_debug); + + T_STAT_MEASURE_LOOP(debug_debug) { + hv_vcpu_run(vcpu); + switch_and_return(true); + } + dt_stat_finalize(debug_debug); + atomic_store_explicit(&stage, 4, memory_order_relaxed); + + T_ASSERT_MACH_SUCCESS(semaphore_signal(sem1), "final signal to follower"); + + return NULL; +} + +static void * +vcpu_switch_follower(void *arg __unused, hv_vcpu_t vcpu, hv_vcpu_exit_t *exit) +{ + bind_to_cpu(0); + + // Don't signal until we've been signaled once. + T_ASSERT_MACH_SUCCESS(semaphore_wait(sem1), + "wait for first signal from leader"); + + // For a baseline, don't enter the VCPU at all. This should result in a + // negligible VCPU switch cost. + while (atomic_load_explicit(&stage, memory_order_relaxed) == 0) { + switch_and_return(false); + } + + // Enter the VCPU once to activate a minimal amount of state. + SET_PC(vcpu, hvc_loop); + expect_hvc(vcpu, exit, 0); + + while (atomic_load_explicit(&stage, memory_order_relaxed) == 1) { + hv_vcpu_run(vcpu); + switch_and_return(false); + } + + // Use debug state + SET_PC(vcpu, activate_debug); + expect_hvc(vcpu, exit, 0); + SET_PC(vcpu, hvc_loop); + + while (atomic_load_explicit(&stage, memory_order_relaxed) == 2) { + hv_vcpu_run(vcpu); + switch_and_return(false); + } + + while (atomic_load_explicit(&stage, memory_order_relaxed) == 3) { + hv_vcpu_run(vcpu); + switch_and_return(false); + } + + return NULL; +} + +T_DECL(vcpu_switch_benchmark, "vcpu state-switching benchmarks", + T_META_BOOTARGS_SET("enable_skstb=1")) +{ + bind_to_cpu(0); + + T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &sem1, + SYNC_POLICY_FIFO, 0), "semaphore_create 1"); + T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &sem2, + SYNC_POLICY_FIFO, 0), "semaphore_create 2"); + + vm_setup(); + pthread_t vcpu1_thread = create_vcpu_thread(hvc_loop, 0, + vcpu_switch_leader, NULL); + pthread_t vcpu2_thread = create_vcpu_thread(hvc_loop, 0, + vcpu_switch_follower, NULL); + + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu1_thread, NULL), "join vcpu1"); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu2_thread, NULL), "join vcpu2"); + + vm_cleanup(); +} + +struct thread_params { + uint32_t id; + uint32_t iter; + pthread_t thread; +}; + +static void * +run_cancel_monitor(void *arg, hv_vcpu_t vcpu, hv_vcpu_exit_t *exit __unused) +{ + struct thread_params *param = (struct thread_params *)arg; + dt_stat_time_t s = dt_stat_time_create("hv_vcpus_exit time vcpu%u", + param->id); + while (!dt_stat_stable(s)) { + dt_stat_token start = dt_stat_time_begin(s); + for (uint32_t i = 0; i < param->iter; i++) { + hv_vcpus_exit(&vcpu, 1); + } + dt_stat_time_end_batch(s, (int)param->iter, start); + } + dt_stat_finalize(s); + return NULL; +} + +static void +run_cancel_call(uint32_t vcpu_count, uint32_t iter) +{ + struct thread_params *threads = calloc(vcpu_count, sizeof(*threads)); + vm_setup(); + for (uint32_t i = 0; i < vcpu_count; i++) { + threads[i].id = i; + threads[i].iter = iter; + threads[i].thread = create_vcpu_thread(hvc_loop, 0, run_cancel_monitor, + &threads[i]); + } + for (uint32_t i = 0; i < vcpu_count; i++) { + T_ASSERT_POSIX_SUCCESS(pthread_join(threads[i].thread, NULL), + "join vcpu%u", i); + } + free(threads); + vm_cleanup(); +} + +T_DECL(api_benchmarks, "API call parallel performance") +{ + run_cancel_call(1, 1000); + run_cancel_call(4, 1000); +} diff --git a/tests/hvtest_x86.m b/tests/hvtest_x86.m new file mode 100644 index 000000000..0aebb6bc1 --- /dev/null +++ b/tests/hvtest_x86.m @@ -0,0 +1,1248 @@ +#include +#include +#include + +#include +#include +#include + +#include + +#include "hvtest_x86_guest.h" + +#include +#include +#include + +T_GLOBAL_META( + T_META_NAMESPACE("xnu.intel.hv"), + T_META_RUN_CONCURRENTLY(true), + T_META_REQUIRES_SYSCTL_NE("hw.optional.arm64", 1) // Don't run translated. + ); + +static bool +hv_support() +{ + int hv_support; + size_t hv_support_size = sizeof(hv_support); + + int err = sysctlbyname("kern.hv_support", &hv_support, &hv_support_size, NULL, 0); + if (err) { + return false; + } else { + return hv_support != 0; + } +} + +static uint64_t get_reg(hv_vcpuid_t vcpu, hv_x86_reg_t reg) +{ + uint64_t val; + T_QUIET; T_EXPECT_EQ(hv_vcpu_read_register(vcpu, reg, &val), HV_SUCCESS, + "get register"); + return val; +} + +static void set_reg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t value) +{ + T_QUIET; T_EXPECT_EQ(hv_vcpu_write_register(vcpu, reg, value), HV_SUCCESS, + "set register"); +} + +static uint64_t get_vmcs(hv_vcpuid_t vcpu, uint32_t field) +{ + uint64_t val; + T_QUIET; T_EXPECT_EQ(hv_vmx_vcpu_read_vmcs(vcpu, field, &val), HV_SUCCESS, + "get vmcs"); + return val; +} + +static void set_vmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t value) +{ + T_QUIET; T_EXPECT_EQ(hv_vmx_vcpu_write_vmcs(vcpu, field, value), HV_SUCCESS, + "set vmcs"); +} + +static uint64_t get_cap(uint32_t field) +{ + uint64_t val; + T_QUIET; T_ASSERT_EQ(hv_vmx_read_capability(field, &val), HV_SUCCESS, + "get capability"); + return val; +} + + + +static NSMutableDictionary *page_cache; +static NSMutableSet *allocated_phys_pages; +static pthread_mutex_t page_cache_lock = PTHREAD_MUTEX_INITIALIZER; + +static uint64_t next_phys = 0x4000000; + +/* + * Map a page into guest's physical address space, return gpa of the + * page. If *host_uva is NULL, a new host user page is allocated. + */ +static hv_gpaddr_t +map_guest_phys(void **host_uva) +{ + T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_lock(&page_cache_lock), + "acquire page lock"); + + hv_gpaddr_t gpa = next_phys; + next_phys += vm_page_size; + + if (*host_uva == NULL) { + *host_uva = valloc(vm_page_size); + memset(*host_uva, 0, vm_page_size); + [allocated_phys_pages addObject:@((uintptr_t)*host_uva)]; + } + + T_QUIET; T_ASSERT_EQ(hv_vm_map(*host_uva, gpa, vm_page_size, HV_MEMORY_READ), HV_SUCCESS, "enter hv mapping"); + + [page_cache setObject:@((uintptr_t)*host_uva) forKey:@(gpa)]; + + + T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_unlock(&page_cache_lock), + "release page lock"); + + return gpa; +} + +static uint64_t *pml4; +static hv_gpaddr_t pml4_gpa; + +/* Stolen from kern/bits.h, which cannot be included outside the kernel. */ +#define BIT(b) (1ULL << (b)) + +#define mask(width) (width >= 64 ? (unsigned long long)-1 : (BIT(width) - 1)) +#define extract(x, shift, width) ((((uint64_t)(x)) >> (shift)) & mask(width)) +#define bits(x, hi, lo) extract((x), (lo), (hi) - (lo) + 1) + + +/* + * Enter a page in a level of long mode's PML4 paging structures. + * Helper for fault_in_page. + */ +static void * +enter_level(uint64_t *table, void *host_va, void *va, int hi, int lo) { + uint64_t * const te = &table[bits(va, hi, lo)]; + + const uint64_t present = 1; + const uint64_t rw = 2; + + const uint64_t addr_mask = mask(47-12) << 12; + + if (!(*te & present)) { + hv_gpaddr_t gpa = map_guest_phys(&host_va); + *te = (gpa & addr_mask) | rw | present; + } else { + NSNumber *num = [page_cache objectForKey:@(*te & addr_mask)]; + T_QUIET; T_ASSERT_NOTNULL(num, "existing page is backed"); + void *backing = (void*)[num unsignedLongValue]; + if (host_va != 0) { + T_QUIET; T_ASSERT_EQ(va, backing, "backing page matches"); + } else { + host_va = backing; + } + } + + return host_va; +} + +/* + * Enters a page both into the guest paging structures and the EPT + * (long mode PML4 only, real mode and protected mode support running + * without paging, and that's what they use instead.) + */ +static void * +map_page(void *host_va, void *va) { + uint64_t *pdpt = enter_level(pml4, NULL, va, 47, 39); + uint64_t *pd = enter_level(pdpt, NULL, va, 38, 30); + uint64_t *pt = enter_level(pd, NULL, va, 29, 21); + return enter_level(pt, host_va, va, 20, 12); +} + +static void +fault_in_page(void *va) { + map_page(va, va); +} + +static void free_page_cache(void) +{ + T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_lock(&page_cache_lock), + "acquire page lock"); + + for (NSNumber *uvaNumber in allocated_phys_pages) { + uintptr_t va = [uvaNumber unsignedLongValue]; + free((void *)va); + } + [page_cache release]; + [allocated_phys_pages release]; + + T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_unlock(&page_cache_lock), + "release page lock"); +} + +static uint64_t +run_to_next_vm_fault(hv_vcpuid_t vcpu, bool on_demand_paging) +{ + bool retry; + uint64_t exit_reason, qual, gpa, gla, info, vector_info, error_code; + do { + retry = false; + do { + T_QUIET; T_ASSERT_EQ(hv_vcpu_run_until(vcpu, ~(uint64_t)0), HV_SUCCESS, "run VCPU"); + exit_reason = get_vmcs(vcpu, VMCS_RO_EXIT_REASON); + + } while (exit_reason == VMX_REASON_IRQ); + + qual = get_vmcs(vcpu, VMCS_RO_EXIT_QUALIFIC); + gpa = get_vmcs(vcpu, VMCS_GUEST_PHYSICAL_ADDRESS); + gla = get_vmcs(vcpu, VMCS_RO_GUEST_LIN_ADDR); + info = get_vmcs(vcpu, VMCS_RO_VMEXIT_IRQ_INFO); + vector_info = get_vmcs(vcpu, VMCS_RO_IDT_VECTOR_INFO); + error_code = get_vmcs(vcpu, VMCS_RO_VMEXIT_IRQ_ERROR); + + if (on_demand_paging) { + if (exit_reason == VMX_REASON_EXC_NMI && + (info & 0x800003ff) == 0x8000030e && + (error_code & 0x1) == 0) { + // guest paging fault + fault_in_page((void*)qual); + retry = true; + } + else if (exit_reason == VMX_REASON_EPT_VIOLATION) { + if ((qual & 0x86) == 0x82) { + // EPT write fault + T_QUIET; T_ASSERT_EQ(hv_vm_protect(gpa & ~(hv_gpaddr_t)PAGE_MASK, vm_page_size, + HV_MEMORY_READ | HV_MEMORY_WRITE), + HV_SUCCESS, "make page writable"); + retry = true; + } + else if ((qual & 0x86) == 0x84) { + // EPT exec fault + T_QUIET; T_ASSERT_EQ(hv_vm_protect(gpa & ~(hv_gpaddr_t)PAGE_MASK, vm_page_size, + HV_MEMORY_READ | HV_MEMORY_EXEC), + HV_SUCCESS, "make page executable"); + retry = true; + } + } + } + } while (retry); + + // printf("reason: %lld, qualification: %llx\n", exit_reason, qual); + // printf("gpa: %llx, gla: %llx\n", gpa, gla); + // printf("RIP: %llx\n", get_reg(vcpu, HV_X86_RIP)); + // printf("CR3: %llx\n", get_reg(vcpu, HV_X86_CR3)); + // printf("info: %llx\n", info); + // printf("vector_info: %llx\n", vector_info); + // printf("error_code: %llx\n", error_code); + + return exit_reason; +} + +static uint64_t +expect_vmcall(hv_vcpuid_t vcpu, bool on_demand_paging) +{ + uint64_t reason = run_to_next_vm_fault(vcpu, on_demand_paging); + T_ASSERT_EQ(reason, (uint64_t)VMX_REASON_VMCALL, "expect vmcall exit"); + + // advance RIP to after VMCALL + set_vmcs(vcpu, VMCS_GUEST_RIP, get_reg(vcpu, HV_X86_RIP)+get_vmcs(vcpu, VMCS_RO_VMEXIT_INSTR_LEN)); + + return get_reg(vcpu, HV_X86_RAX); +} + +static uint64_t +expect_vmcall_with_value(hv_vcpuid_t vcpu, uint64_t rax, bool on_demand_paging) +{ + uint64_t reason = run_to_next_vm_fault(vcpu, on_demand_paging); + T_QUIET; T_ASSERT_EQ(reason, (uint64_t)VMX_REASON_VMCALL, "check for vmcall exit"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RAX), rax, "vmcall exit with expected RAX value %llx", rax); + + // advance RIP to after VMCALL + set_vmcs(vcpu, VMCS_GUEST_RIP, get_reg(vcpu, HV_X86_RIP)+get_vmcs(vcpu, VMCS_RO_VMEXIT_INSTR_LEN)); + + return reason; +} + +typedef void (*vcpu_entry_function)(uint64_t); +typedef void *(*vcpu_monitor_function)(void *, hv_vcpuid_t); + +struct test_vcpu { + hv_vcpuid_t vcpu; + vcpu_entry_function guest_func; + uint64_t guest_param; + vcpu_monitor_function monitor_func; + void *monitor_param; +}; + +static uint64_t +canonicalize(uint64_t ctrl, uint64_t mask) +{ + return (ctrl | (mask & 0xffffffff)) & (mask >> 32); +} + +static void +setup_real_mode(hv_vcpuid_t vcpu) +{ + uint64_t pin_cap, proc_cap, proc2_cap, entry_cap, exit_cap; + + pin_cap = get_cap(HV_VMX_CAP_PINBASED); + proc_cap = get_cap(HV_VMX_CAP_PROCBASED); + proc2_cap = get_cap(HV_VMX_CAP_PROCBASED2); + entry_cap = get_cap(HV_VMX_CAP_ENTRY); + exit_cap = get_cap(HV_VMX_CAP_EXIT); + + set_vmcs(vcpu, VMCS_CTRL_PIN_BASED, canonicalize(0, pin_cap)); + set_vmcs(vcpu, VMCS_CTRL_CPU_BASED, + canonicalize(CPU_BASED_HLT | CPU_BASED_CR8_LOAD | CPU_BASED_CR8_STORE, proc_cap)); + set_vmcs(vcpu, VMCS_CTRL_CPU_BASED2, canonicalize(0, proc2_cap)); + set_vmcs(vcpu, VMCS_CTRL_VMENTRY_CONTROLS, canonicalize(0, entry_cap)); + set_vmcs(vcpu, VMCS_CTRL_VMEXIT_CONTROLS, canonicalize(0, exit_cap)); + + set_vmcs(vcpu, VMCS_GUEST_CR0, 0x20); + set_vmcs(vcpu, VMCS_CTRL_CR0_MASK, ~0u); + set_vmcs(vcpu, VMCS_CTRL_CR0_SHADOW, 0x20); + set_vmcs(vcpu, VMCS_GUEST_CR4, 0x2000); + set_vmcs(vcpu, VMCS_CTRL_CR4_MASK, ~0u); + set_vmcs(vcpu, VMCS_CTRL_CR4_SHADOW, 0x0000); + set_vmcs(vcpu, VMCS_GUEST_TR_AR, 0x83); + set_vmcs(vcpu, VMCS_GUEST_LDTR_AR, 0x10000); + set_vmcs(vcpu, VMCS_GUEST_SS, 0); + set_vmcs(vcpu, VMCS_GUEST_SS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_SS_LIMIT, 0xffff); + set_vmcs(vcpu, VMCS_GUEST_SS_AR, 0x93); + set_vmcs(vcpu, VMCS_GUEST_CS, 0); + set_vmcs(vcpu, VMCS_GUEST_CS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_CS_LIMIT, 0xffff); + set_vmcs(vcpu, VMCS_GUEST_CS_AR, 0x9b); + set_vmcs(vcpu, VMCS_GUEST_DS, 0); + set_vmcs(vcpu, VMCS_GUEST_DS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_DS_LIMIT, 0xffff); + set_vmcs(vcpu, VMCS_GUEST_DS_AR, 0x93); + set_vmcs(vcpu, VMCS_GUEST_ES, 0); + set_vmcs(vcpu, VMCS_GUEST_ES_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_ES_LIMIT, 0xffff); + set_vmcs(vcpu, VMCS_GUEST_ES_AR, 0x93); + set_vmcs(vcpu, VMCS_GUEST_FS, 0); + set_vmcs(vcpu, VMCS_GUEST_FS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_FS_LIMIT, 0xffff); + set_vmcs(vcpu, VMCS_GUEST_FS_AR, 0x93); + set_vmcs(vcpu, VMCS_GUEST_GS, 0); + set_vmcs(vcpu, VMCS_GUEST_GS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_GS_LIMIT, 0xffff); + set_vmcs(vcpu, VMCS_GUEST_GS_AR, 0x93); + + set_vmcs(vcpu, VMCS_GUEST_GDTR_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_GDTR_LIMIT, 0); + set_vmcs(vcpu, VMCS_GUEST_IDTR_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_IDTR_LIMIT, 0); + + set_vmcs(vcpu, VMCS_GUEST_RFLAGS, 0x2); + + set_vmcs(vcpu, VMCS_CTRL_EXC_BITMAP, 0xffffffff); +} + +static void +setup_protected_mode(hv_vcpuid_t vcpu) +{ + uint64_t pin_cap, proc_cap, proc2_cap, entry_cap, exit_cap; + + pin_cap = get_cap(HV_VMX_CAP_PINBASED); + proc_cap = get_cap(HV_VMX_CAP_PROCBASED); + proc2_cap = get_cap(HV_VMX_CAP_PROCBASED2); + entry_cap = get_cap(HV_VMX_CAP_ENTRY); + exit_cap = get_cap(HV_VMX_CAP_EXIT); + + set_vmcs(vcpu, VMCS_CTRL_PIN_BASED, canonicalize(0, pin_cap)); + set_vmcs(vcpu, VMCS_CTRL_CPU_BASED, + canonicalize(CPU_BASED_HLT | CPU_BASED_CR8_LOAD | CPU_BASED_CR8_STORE, proc_cap)); + set_vmcs(vcpu, VMCS_CTRL_CPU_BASED2, canonicalize(0, proc2_cap)); + set_vmcs(vcpu, VMCS_CTRL_VMENTRY_CONTROLS, canonicalize(0, entry_cap)); + set_vmcs(vcpu, VMCS_CTRL_VMEXIT_CONTROLS, canonicalize(0, exit_cap)); + + set_vmcs(vcpu, VMCS_GUEST_CR0, 0x21); + set_vmcs(vcpu, VMCS_CTRL_CR0_MASK, ~0u); + set_vmcs(vcpu, VMCS_CTRL_CR0_SHADOW, 0x21); + set_vmcs(vcpu, VMCS_GUEST_CR3, 0); + set_vmcs(vcpu, VMCS_GUEST_CR4, 0x2000); + set_vmcs(vcpu, VMCS_CTRL_CR4_MASK, ~0u); + set_vmcs(vcpu, VMCS_CTRL_CR4_SHADOW, 0x0000); + + set_vmcs(vcpu, VMCS_GUEST_TR, 0); + set_vmcs(vcpu, VMCS_GUEST_TR_AR, 0x8b); + + set_vmcs(vcpu, VMCS_GUEST_LDTR, 0x0); + set_vmcs(vcpu, VMCS_GUEST_LDTR_AR, 0x10000); + + set_vmcs(vcpu, VMCS_GUEST_SS, 0x8); + set_vmcs(vcpu, VMCS_GUEST_SS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_SS_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_SS_AR, 0xc093); + + set_vmcs(vcpu, VMCS_GUEST_CS, 0x10); + set_vmcs(vcpu, VMCS_GUEST_CS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_CS_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_CS_AR, 0xc09b); + + set_vmcs(vcpu, VMCS_GUEST_DS, 0x8); + set_vmcs(vcpu, VMCS_GUEST_DS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_DS_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_DS_AR, 0xc093); + + set_vmcs(vcpu, VMCS_GUEST_ES, 0x8); + set_vmcs(vcpu, VMCS_GUEST_ES_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_ES_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_ES_AR, 0xc093); + + set_vmcs(vcpu, VMCS_GUEST_FS, 0x8); + set_vmcs(vcpu, VMCS_GUEST_FS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_FS_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_FS_AR, 0xc093); + + set_vmcs(vcpu, VMCS_GUEST_GS, 0x8); + set_vmcs(vcpu, VMCS_GUEST_GS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_GS_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_GS_AR, 0xc093); + + set_vmcs(vcpu, VMCS_GUEST_GDTR_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_GDTR_LIMIT, 0); + + set_vmcs(vcpu, VMCS_GUEST_IDTR_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_IDTR_LIMIT, 0); + + set_vmcs(vcpu, VMCS_GUEST_RFLAGS, 0x2); + + set_vmcs(vcpu, VMCS_CTRL_EXC_BITMAP, 0xffffffff); +} + +static void +setup_long_mode(hv_vcpuid_t vcpu) +{ + uint64_t pin_cap, proc_cap, proc2_cap, entry_cap, exit_cap; + + pin_cap = get_cap(HV_VMX_CAP_PINBASED); + proc_cap = get_cap(HV_VMX_CAP_PROCBASED); + proc2_cap = get_cap(HV_VMX_CAP_PROCBASED2); + entry_cap = get_cap(HV_VMX_CAP_ENTRY); + exit_cap = get_cap(HV_VMX_CAP_EXIT); + + set_vmcs(vcpu, VMCS_CTRL_PIN_BASED, canonicalize(0, pin_cap)); + set_vmcs(vcpu, VMCS_CTRL_CPU_BASED, + canonicalize(CPU_BASED_HLT | CPU_BASED_CR8_LOAD | CPU_BASED_CR8_STORE, proc_cap)); + set_vmcs(vcpu, VMCS_CTRL_CPU_BASED2, canonicalize(0, proc2_cap)); + set_vmcs(vcpu, VMCS_CTRL_VMENTRY_CONTROLS, canonicalize(VMENTRY_GUEST_IA32E, entry_cap)); + set_vmcs(vcpu, VMCS_CTRL_VMEXIT_CONTROLS, canonicalize(0, exit_cap)); + + set_vmcs(vcpu, VMCS_GUEST_CR0, 0x80000021L); + set_vmcs(vcpu, VMCS_CTRL_CR0_MASK, ~0u); + set_vmcs(vcpu, VMCS_CTRL_CR0_SHADOW, 0x80000021L); + set_vmcs(vcpu, VMCS_GUEST_CR4, 0x2020); + set_vmcs(vcpu, VMCS_CTRL_CR4_MASK, ~0u); + set_vmcs(vcpu, VMCS_CTRL_CR4_SHADOW, 0x2020); + + set_vmcs(vcpu, VMCS_GUEST_IA32_EFER, 0x500); + + T_QUIET; T_ASSERT_EQ(hv_vcpu_enable_native_msr(vcpu, MSR_IA32_KERNEL_GS_BASE, true), HV_SUCCESS, "enable native GS_BASE"); + + set_vmcs(vcpu, VMCS_GUEST_TR, 0); + set_vmcs(vcpu, VMCS_GUEST_TR_AR, 0x8b); + + set_vmcs(vcpu, VMCS_GUEST_LDTR, 0x0); + set_vmcs(vcpu, VMCS_GUEST_LDTR_AR, 0x10000); + + set_vmcs(vcpu, VMCS_GUEST_SS, 0x8); + set_vmcs(vcpu, VMCS_GUEST_SS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_SS_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_SS_AR, 0xa093); + + set_vmcs(vcpu, VMCS_GUEST_CS, 0x10); + set_vmcs(vcpu, VMCS_GUEST_CS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_CS_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_CS_AR, 0xa09b); + + set_vmcs(vcpu, VMCS_GUEST_DS, 0x8); + set_vmcs(vcpu, VMCS_GUEST_DS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_DS_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_DS_AR, 0xa093); + + set_vmcs(vcpu, VMCS_GUEST_ES, 0x8); + set_vmcs(vcpu, VMCS_GUEST_ES_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_ES_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_ES_AR, 0xa093); + + set_vmcs(vcpu, VMCS_GUEST_FS, 0x8); + set_vmcs(vcpu, VMCS_GUEST_FS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_FS_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_FS_AR, 0xa093); + + set_vmcs(vcpu, VMCS_GUEST_GS, 0x8); + set_vmcs(vcpu, VMCS_GUEST_GS_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_GS_LIMIT, 0xffffffff); + set_vmcs(vcpu, VMCS_GUEST_GS_AR, 0xa093); + + set_vmcs(vcpu, VMCS_GUEST_RFLAGS, 0x2); + + set_vmcs(vcpu, VMCS_CTRL_EXC_BITMAP, 0xffffffff); + + set_vmcs(vcpu, VMCS_GUEST_CR3, pml4_gpa); + + set_vmcs(vcpu, VMCS_GUEST_GDTR_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_GDTR_LIMIT, 0); + + set_vmcs(vcpu, VMCS_GUEST_IDTR_BASE, 0); + set_vmcs(vcpu, VMCS_GUEST_IDTR_LIMIT, 0); +} + +static void * +wrap_monitor(void *param) +{ + struct test_vcpu *test = (struct test_vcpu *)param; + + T_QUIET; T_ASSERT_EQ(hv_vcpu_create(&test->vcpu, HV_VCPU_DEFAULT), HV_SUCCESS, + "created vcpu"); + + const size_t stack_size = 0x4000; + void *stack_bottom = valloc(stack_size); + T_QUIET; T_ASSERT_NOTNULL(stack_bottom, "allocate VCPU stack"); + vcpu_entry_function entry = test->guest_func; + + set_vmcs(test->vcpu, VMCS_GUEST_RIP, (uintptr_t)entry); + set_vmcs(test->vcpu, VMCS_GUEST_RSP, (uintptr_t)stack_bottom + stack_size); + set_reg(test->vcpu, HV_X86_RDI, test->guest_param); + + void *result = test->monitor_func(test->monitor_param, test->vcpu); + + T_QUIET; T_ASSERT_EQ(hv_vcpu_destroy(test->vcpu), HV_SUCCESS, "Destroyed vcpu"); + free(stack_bottom); + free(test); + return result; +} + +static pthread_t +create_vcpu_thread( + vcpu_entry_function guest_function, uint64_t guest_param, + vcpu_monitor_function monitor_func, void *monitor_param) +{ + + pthread_t thread; + struct test_vcpu *test = malloc(sizeof(*test)); + T_QUIET; T_ASSERT_NOTNULL(test, "malloc test params"); + test->guest_func = guest_function; + test->guest_param = guest_param; + test->monitor_func = monitor_func; + test->monitor_param = monitor_param; + T_ASSERT_POSIX_SUCCESS(pthread_create(&thread, NULL, wrap_monitor, test), + "create vcpu pthread"); + // ownership of test struct moves to the thread + test = NULL; + + return thread; +} + +static void +vm_setup() +{ + T_SETUPBEGIN; + + if (hv_support() < 1) { + T_SKIP("Running on non-HV target, skipping..."); + return; + } + + page_cache = [[NSMutableDictionary alloc] init]; + allocated_phys_pages = [[NSMutableSet alloc] init]; + + T_ASSERT_EQ(hv_vm_create(HV_VM_DEFAULT), HV_SUCCESS, "Created vm"); + + + // Set up root paging structures for long mode, + // where paging is mandatory. + + pml4_gpa = map_guest_phys((void**)&pml4); + memset(pml4, 0, vm_page_size); + + T_SETUPEND; +} + +static void +vm_cleanup() +{ + T_ASSERT_EQ(hv_vm_destroy(), HV_SUCCESS, "Destroyed vm"); + free_page_cache(); +} + +static pthread_cond_t ready_cond = PTHREAD_COND_INITIALIZER; +static pthread_mutex_t vcpus_ready_lock = PTHREAD_MUTEX_INITIALIZER; +static uint32_t vcpus_initializing; +static pthread_mutex_t vcpus_hang_lock = PTHREAD_MUTEX_INITIALIZER; + +static void * +multikill_vcpu_thread_function(void __unused *arg) +{ + hv_vcpuid_t *vcpu = (hv_vcpuid_t*)arg; + + T_QUIET; T_ASSERT_EQ(hv_vcpu_create(vcpu, HV_VCPU_DEFAULT), HV_SUCCESS, + "created vcpu"); + + T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_lock(&vcpus_ready_lock), + "acquire vcpus_ready_lock"); + T_QUIET; T_ASSERT_NE(vcpus_initializing, 0, "check for vcpus_ready underflow"); + vcpus_initializing--; + if (vcpus_initializing == 0) { + T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_cond_signal(&ready_cond), + "signaling all VCPUs ready"); + } + T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_unlock(&vcpus_ready_lock), + "release vcpus_ready_lock"); + + // To cause the VCPU pointer to be cleared from the wrong thread, we need + // to get threads onto the thread deallocate queue. One way to accomplish + // this is to die while waiting for a lock. + T_ASSERT_POSIX_SUCCESS(pthread_mutex_lock(&vcpus_hang_lock), + "acquire vcpus_hang_lock"); + + // Do not allow the thread to terminate. Exactly one thread will acquire + // the above lock successfully. + while (true) { + pause(); + } + + return NULL; +} + +T_DECL(regression_55524541, + "kill task with multiple VCPU threads waiting for lock") +{ + if (!hv_support()) { + T_SKIP("no HV support"); + } + + int pipedesc[2]; + T_ASSERT_POSIX_SUCCESS(pipe(pipedesc), "create pipe"); + + pid_t child = fork(); + if (child == 0) { + const uint32_t vcpu_count = 8; + pthread_t vcpu_threads[8]; + T_ASSERT_EQ(hv_vm_create(HV_VM_DEFAULT), HV_SUCCESS, "created vm"); + vcpus_initializing = vcpu_count; + for (uint32_t i = 0; i < vcpu_count; i++) { + hv_vcpuid_t vcpu; + + T_ASSERT_POSIX_SUCCESS(pthread_create(&vcpu_threads[i], NULL, + multikill_vcpu_thread_function, (void *)&vcpu), + "create vcpu_threads[%u]", i); + } + + T_ASSERT_POSIX_SUCCESS(pthread_mutex_lock(&vcpus_ready_lock), + "acquire vcpus_ready_lock"); + while (vcpus_initializing != 0) { + T_ASSERT_POSIX_SUCCESS(pthread_cond_wait(&ready_cond, + &vcpus_ready_lock), "wait for all threads ready"); + } + T_ASSERT_POSIX_SUCCESS(pthread_mutex_unlock(&vcpus_ready_lock), + "release vcpus_ready_lock"); + + // Indicate readiness to die, meditiate peacefully. + uint8_t byte = 0; + T_ASSERT_EQ_LONG(write(pipedesc[1], &byte, 1), 1L, "notifying on pipe"); + while (true) { + pause(); + } + } else { + T_ASSERT_GT(child, 0, "successful fork"); + // Wait for child to prepare. + uint8_t byte; + T_ASSERT_EQ_LONG(read(pipedesc[0], &byte, 1), 1L, "waiting on pipe"); + T_ASSERT_POSIX_SUCCESS(kill(child, SIGTERM), "kill child"); + // Hope for no panic... + T_ASSERT_POSIX_SUCCESS(wait(NULL), "reap child"); + } + T_ASSERT_POSIX_SUCCESS(close(pipedesc[0]), "close pipedesc[0]"); + T_ASSERT_POSIX_SUCCESS(close(pipedesc[1]), "close pipedesc[1]"); +} + +static void * +simple_long_mode_monitor(void *arg __unused, hv_vcpuid_t vcpu) +{ + setup_long_mode(vcpu); + + expect_vmcall_with_value(vcpu, 0x33456, true); + + return NULL; +} + +T_DECL(simple_long_mode_guest, "simple long mode guest") +{ + vm_setup(); + + pthread_t vcpu_thread = create_vcpu_thread(simple_long_mode_vcpu_entry, 0x10000, simple_long_mode_monitor, 0); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu_thread, NULL), "join vcpu"); + + vm_cleanup(); +} + +static void * +smp_test_monitor(void *arg __unused, hv_vcpuid_t vcpu) +{ + setup_long_mode(vcpu); + + uint64_t value = expect_vmcall(vcpu, true); + return (void *)(uintptr_t)value; +} + +T_DECL(smp_sanity, "Multiple VCPUs in the same VM") +{ + vm_setup(); + + // Use this region as shared memory between the VCPUs. + void *shared = NULL; + map_guest_phys((void**)&shared); + + atomic_uint *count_word = (atomic_uint *)shared; + atomic_init(count_word, 0); + + pthread_t vcpu1_thread = create_vcpu_thread(smp_vcpu_entry, + (uintptr_t)count_word, smp_test_monitor, count_word); + pthread_t vcpu2_thread = create_vcpu_thread(smp_vcpu_entry, + (uintptr_t)count_word, smp_test_monitor, count_word); + + void *r1, *r2; + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu1_thread, &r1), "join vcpu1"); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu2_thread, &r2), "join vcpu2"); + uint64_t v1 = (uint64_t)r1; + uint64_t v2 = (uint64_t)r2; + if (v1 == 0) { + T_ASSERT_EQ_ULLONG(v2, 1ULL, "check count"); + } else if (v1 == 1) { + T_ASSERT_EQ_ULLONG(v2, 0ULL, "check count"); + } else { + T_FAIL("unexpected count: %llu", v1); + } + + vm_cleanup(); +} + + +extern void *hvtest_begin; +extern void *hvtest_end; + +static void * +simple_protected_mode_test_monitor(void *arg __unused, hv_vcpuid_t vcpu) +{ + setup_protected_mode(vcpu); + + size_t guest_pages_size = round_page((uintptr_t)&hvtest_end - (uintptr_t)&hvtest_begin); + + const size_t mem_size = 1 * 1024 * 1024; + uint8_t *guest_pages_shadow = valloc(mem_size); + + bzero(guest_pages_shadow, mem_size); + memcpy(guest_pages_shadow+0x1000, &hvtest_begin, guest_pages_size); + + T_ASSERT_EQ(hv_vm_map(guest_pages_shadow, 0x40000000, mem_size, HV_MEMORY_READ | HV_MEMORY_EXEC), + HV_SUCCESS, "map guest memory"); + + expect_vmcall_with_value(vcpu, 0x23456, false); + + free(guest_pages_shadow); + + return NULL; +} + +T_DECL(simple_protected_mode_guest, "simple protected mode guest") +{ + vm_setup(); + + pthread_t vcpu_thread = create_vcpu_thread((vcpu_entry_function) + (((uintptr_t)simple_protected_mode_vcpu_entry & PAGE_MASK) + + 0x40000000 + 0x1000), + 0, simple_protected_mode_test_monitor, 0); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu_thread, NULL), "join vcpu"); + + vm_cleanup(); +} + +static void * +simple_real_mode_monitor(void *arg __unused, hv_vcpuid_t vcpu) +{ + setup_real_mode(vcpu); + + size_t guest_pages_size = round_page((uintptr_t)&hvtest_end - (uintptr_t)&hvtest_begin); + + const size_t mem_size = 1 * 1024 * 1024; + uint8_t *guest_pages_shadow = valloc(mem_size); + + bzero(guest_pages_shadow, mem_size); + memcpy(guest_pages_shadow+0x1000, &hvtest_begin, guest_pages_size); + + T_ASSERT_EQ(hv_vm_map(guest_pages_shadow, 0x0, mem_size, HV_MEMORY_READ | HV_MEMORY_EXEC), HV_SUCCESS, + "map guest memory"); + + expect_vmcall_with_value(vcpu, 0x23456, false); + + free(guest_pages_shadow); + + return NULL; +} + +T_DECL(simple_real_mode_guest, "simple real mode guest") +{ + vm_setup(); + + pthread_t vcpu_thread = create_vcpu_thread((vcpu_entry_function) + (((uintptr_t)simple_real_mode_vcpu_entry & PAGE_MASK) + + 0x1000), + 0, simple_real_mode_monitor, 0); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu_thread, NULL), "join vcpu"); + + vm_cleanup(); +} + +static void * +radar61961809_monitor(void *gpaddr, hv_vcpuid_t vcpu) +{ + uint32_t const gdt_template[] = { + 0, 0, /* Empty */ + 0x0000ffff, 0x00cf9200, /* 0x08 CPL0 4GB writable data, 32bit */ + 0x0000ffff, 0x00cf9a00, /* 0x10 CPL0 4GB readable code, 32bit */ + 0x0000ffff, 0x00af9200, /* 0x18 CPL0 4GB writable data, 64bit */ + 0x0000ffff, 0x00af9a00, /* 0x20 CPL0 4GB readable code, 64bit */ + }; + + // We start the test in protected mode. + setup_protected_mode(vcpu); + + // SAVE_EFER makes untrapped CR0.PG work. + uint64_t exit_cap = get_cap(HV_VMX_CAP_EXIT); + set_vmcs(vcpu, VMCS_CTRL_VMEXIT_CONTROLS, canonicalize(VMEXIT_SAVE_EFER, exit_cap)); + + // Start with CR0.PG disabled. + set_vmcs(vcpu, VMCS_GUEST_CR0, 0x00000021); + set_vmcs(vcpu, VMCS_CTRL_CR0_SHADOW, 0x00000021); + /* + * Don't trap on modifying CR0.PG to reproduce the problem. + * Otherwise, we'd have to handle the switch ourselves, and would + * just do it right. + */ + set_vmcs(vcpu, VMCS_CTRL_CR0_MASK, ~0x80000000UL); + + // PAE must be enabled for a switch into long mode to work. + set_vmcs(vcpu, VMCS_GUEST_CR4, 0x2020); + set_vmcs(vcpu, VMCS_CTRL_CR4_MASK, ~0u); + set_vmcs(vcpu, VMCS_CTRL_CR4_SHADOW, 0x2020); + + // Will use the harness managed page tables in long mode. + set_vmcs(vcpu, VMCS_GUEST_CR3, pml4_gpa); + + // Hypervisor fw wants this (for good, but unrelated reason). + T_QUIET; T_ASSERT_EQ(hv_vcpu_enable_native_msr(vcpu, MSR_IA32_KERNEL_GS_BASE, true), HV_SUCCESS, "enable native GS_BASE"); + + // Far pointer array for our far jumps. + uint32_t *far_ptr = NULL; + hv_gpaddr_t far_ptr_gpaddr = map_guest_phys((void**)&far_ptr); + map_page(far_ptr, (void*)far_ptr_gpaddr); + + far_ptr[0] = (uint32_t)(((uintptr_t)&radar61961809_prepare - (uintptr_t)&hvtest_begin) + (uintptr_t)gpaddr); + far_ptr[1] = 0x0010; // 32bit CS + far_ptr[2] = (uint32_t)(((uintptr_t)&radar61961809_loop64 - (uintptr_t)&hvtest_begin) + (uintptr_t)gpaddr); + far_ptr[3] = 0x0020; // 64bit CS + + set_reg(vcpu, HV_X86_RDI, far_ptr_gpaddr); + + // Setup GDT. + uint32_t *gdt = valloc(vm_page_size); + hv_gpaddr_t gdt_gpaddr = 0x70000000; + map_page(gdt, (void*)gdt_gpaddr); + bzero(gdt, vm_page_size); + memcpy(gdt, gdt_template, sizeof(gdt_template)); + + set_vmcs(vcpu, VMCS_GUEST_GDTR_BASE, gdt_gpaddr); + set_vmcs(vcpu, VMCS_GUEST_GDTR_LIMIT, sizeof(gdt_template)+1); + + // Map test code (because we start in protected mode without + // paging, we cannot use the harness's fault management yet.) + size_t guest_pages_size = round_page((uintptr_t)&hvtest_end - (uintptr_t)&hvtest_begin); + + const size_t mem_size = 1 * 1024 * 1024; + uint8_t *guest_pages_shadow = valloc(mem_size); + + bzero(guest_pages_shadow, mem_size); + memcpy(guest_pages_shadow, &hvtest_begin, guest_pages_size); + + T_ASSERT_EQ(hv_vm_map(guest_pages_shadow, (hv_gpaddr_t)gpaddr, mem_size, HV_MEMORY_READ | HV_MEMORY_EXEC), + HV_SUCCESS, "map guest memory"); + + // Create entries in PML4. + uint8_t *host_va = guest_pages_shadow; + uint8_t *va = (uint8_t*)gpaddr; + for (unsigned long i = 0; i < guest_pages_size / vm_page_size; i++, va += vm_page_size, host_va += vm_page_size) { + map_page(host_va, va); + } + + uint64_t reason = run_to_next_vm_fault(vcpu, false); + T_ASSERT_EQ(reason, (uint64_t)VMX_REASON_RDMSR, "check for rdmsr"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RCX), 0xc0000080LL, "expected EFER rdmsr"); + + set_reg(vcpu, HV_X86_RDX, 0); + set_reg(vcpu, HV_X86_RAX, 0); + set_vmcs(vcpu, VMCS_GUEST_RIP, get_reg(vcpu, HV_X86_RIP)+get_vmcs(vcpu, VMCS_RO_VMEXIT_INSTR_LEN)); + + reason = run_to_next_vm_fault(vcpu, false); + T_ASSERT_EQ(reason, (uint64_t)VMX_REASON_WRMSR, "check for wrmsr"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RCX), 0xc0000080LL, "expected EFER wrmsr"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RDX), 0x0LL, "expected EFER wrmsr higher bits 0"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RAX), 0x100LL, "expected EFER wrmsr lower bits LME"); + + set_vmcs(vcpu, VMCS_GUEST_IA32_EFER, 0x100); + set_vmcs(vcpu, VMCS_GUEST_RIP, get_reg(vcpu, HV_X86_RIP)+get_vmcs(vcpu, VMCS_RO_VMEXIT_INSTR_LEN)); + + // See assembly part of the test for checkpoints. + expect_vmcall_with_value(vcpu, 0x100, false /* PG disabled => + * no PFs expected */); + expect_vmcall_with_value(vcpu, 0x1111, true /* PG now enabled */); + expect_vmcall_with_value(vcpu, 0x2222, true); + + free(guest_pages_shadow); + free(gdt); + + return NULL; +} + +T_DECL(radar61961809_guest, + "rdar://61961809 (Unexpected guest faults with hv_vcpu_run_until, dropping out of long mode)") +{ + vm_setup(); + + hv_gpaddr_t gpaddr = 0x80000000; + pthread_t vcpu_thread = create_vcpu_thread((vcpu_entry_function) + (((uintptr_t)radar61961809_entry & PAGE_MASK) + + gpaddr), + 0, radar61961809_monitor, (void*)gpaddr); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu_thread, NULL), "join vcpu"); + + vm_cleanup(); +} + +static void * +superpage_2mb_backed_guest_monitor(void *arg __unused, hv_vcpuid_t vcpu) +{ + setup_protected_mode(vcpu); + + size_t guest_pages_size = round_page((uintptr_t)&hvtest_end - (uintptr_t)&hvtest_begin); + + const size_t mem_size = 2 * 1024 * 1024; + + uint8_t *guest_pages_shadow = mmap(NULL, mem_size, + PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, + VM_FLAGS_SUPERPAGE_SIZE_2MB, 0); + + if (guest_pages_shadow == MAP_FAILED) { + /* Getting a 2MB superpage is hard in practice, because memory gets fragmented + * easily. + * T_META_REQUIRES_REBOOT in the T_DECL helps a lot in actually getting a page, + * but in the case that it still fails, we don't want the test to fail through + * no fault of the hypervisor. + */ + T_SKIP("Unable to attain a 2MB superpage. Skipping."); + } + + bzero(guest_pages_shadow, mem_size); + memcpy(guest_pages_shadow+0x1000, &hvtest_begin, guest_pages_size); + + T_ASSERT_EQ(hv_vm_map(guest_pages_shadow, 0x40000000, mem_size, HV_MEMORY_READ | HV_MEMORY_EXEC), + HV_SUCCESS, "map guest memory"); + + expect_vmcall_with_value(vcpu, 0x23456, false); + + munmap(guest_pages_shadow, mem_size); + + return NULL; +} + +T_DECL(superpage_2mb_backed_guest, "guest backed by a 2MB superpage", + T_META_REQUIRES_REBOOT(true)) // Helps actually getting a superpage +{ + vm_setup(); + + pthread_t vcpu_thread = create_vcpu_thread((vcpu_entry_function) + (((uintptr_t)simple_protected_mode_vcpu_entry & PAGE_MASK) + + 0x40000000 + 0x1000), + 0, superpage_2mb_backed_guest_monitor, 0); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu_thread, NULL), "join vcpu"); + + vm_cleanup(); +} + +static void * +save_restore_regs_monitor(void *arg __unused, hv_vcpuid_t vcpu) +{ + + setup_long_mode(vcpu); + + uint64_t rsp = get_reg(vcpu, HV_X86_RSP); + + set_reg(vcpu, HV_X86_RAX, 0x0101010101010101); + set_reg(vcpu, HV_X86_RBX, 0x0202020202020202); + set_reg(vcpu, HV_X86_RCX, 0x0303030303030303); + set_reg(vcpu, HV_X86_RDX, 0x0404040404040404); + set_reg(vcpu, HV_X86_RSI, 0x0505050505050505); + set_reg(vcpu, HV_X86_RDI, 0x0606060606060606); + + set_reg(vcpu, HV_X86_RBP, 0x0707070707070707); + + set_reg(vcpu, HV_X86_R8, 0x0808080808080808); + set_reg(vcpu, HV_X86_R9, 0x0909090909090909); + set_reg(vcpu, HV_X86_R10, 0x0a0a0a0a0a0a0a0a); + set_reg(vcpu, HV_X86_R11, 0x0b0b0b0b0b0b0b0b); + set_reg(vcpu, HV_X86_R12, 0x0c0c0c0c0c0c0c0c); + set_reg(vcpu, HV_X86_R13, 0x0d0d0d0d0d0d0d0d); + set_reg(vcpu, HV_X86_R14, 0x0e0e0e0e0e0e0e0e); + set_reg(vcpu, HV_X86_R15, 0x0f0f0f0f0f0f0f0f); + + // invalid selectors: ok as long as we don't try to use them + set_reg(vcpu, HV_X86_DS, 0x1010); + set_reg(vcpu, HV_X86_ES, 0x2020); + set_reg(vcpu, HV_X86_FS, 0x3030); + set_reg(vcpu, HV_X86_GS, 0x4040); + + expect_vmcall_with_value(vcpu, (uint64_t)~0x0101010101010101LL, true); + + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RSP), rsp-8, "check if push happened"); + + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RAX), (uint64_t)~0x0101010101010101LL, "check if RAX negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RBX), (uint64_t)~0x0202020202020202LL, "check if RBX negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RCX), (uint64_t)~0x0303030303030303LL, "check if RCX negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RDX), (uint64_t)~0x0404040404040404LL, "check if RDX negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RSI), (uint64_t)~0x0505050505050505LL, "check if RSI negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RDI), (uint64_t)~0x0606060606060606LL, "check if RDI negated"); + + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RBP), (uint64_t)~0x0707070707070707LL, "check if RBP negated"); + + T_ASSERT_EQ(get_reg(vcpu, HV_X86_R8), (uint64_t)~0x0808080808080808LL, "check if R8 negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_R9), (uint64_t)~0x0909090909090909LL, "check if R9 negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_R10), (uint64_t)~0x0a0a0a0a0a0a0a0aLL, "check if R10 negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_R11), (uint64_t)~0x0b0b0b0b0b0b0b0bLL, "check if R11 negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_R12), (uint64_t)~0x0c0c0c0c0c0c0c0cLL, "check if R12 negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_R13), (uint64_t)~0x0d0d0d0d0d0d0d0dLL, "check if R13 negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_R14), (uint64_t)~0x0e0e0e0e0e0e0e0eLL, "check if R14 negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_R15), (uint64_t)~0x0f0f0f0f0f0f0f0fLL, "check if R15 negated"); + + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RAX), (uint64_t)~0x0101010101010101LL, "check if RAX negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RBX), (uint64_t)~0x0202020202020202LL, "check if RBX negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RCX), (uint64_t)~0x0303030303030303LL, "check if RCX negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RDX), (uint64_t)~0x0404040404040404LL, "check if RDX negated"); + + // Cannot set selector to arbitrary value from the VM, but we have the RPL field to play with + T_ASSERT_EQ(get_reg(vcpu, HV_X86_DS), 1ULL, "check if DS == 1"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_ES), 2ULL, "check if ES == 2"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_FS), 3ULL, "check if FS == 3"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_GS), 1ULL, "check if GS == 1"); + + expect_vmcall_with_value(vcpu, (uint64_t)~0x0101010101010101LL, true); + + T_ASSERT_EQ(get_reg(vcpu, HV_X86_RSP), rsp-16, "check if push happened again"); + + return NULL; +} + +T_DECL(save_restore_regs, "check if general purpose and segment registers are properly saved and restored") +{ + vm_setup(); + + pthread_t vcpu_thread = create_vcpu_thread(save_restore_regs_entry, 0x10000, save_restore_regs_monitor, 0); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu_thread, NULL), "join vcpu"); + + vm_cleanup(); +} + +static void * +save_restore_debug_regs_monitor(void *arg __unused, hv_vcpuid_t vcpu) +{ + + setup_long_mode(vcpu); + + set_reg(vcpu, HV_X86_RAX, 0x0101010101010101); + + set_reg(vcpu, HV_X86_DR0, 0x1111111111111111); + set_reg(vcpu, HV_X86_DR1, 0x2222222222222222); + set_reg(vcpu, HV_X86_DR2, 0x3333333333333333); + set_reg(vcpu, HV_X86_DR3, 0x4444444444444444); + + // debug status and control regs (some bits are reserved, one other bit would generate an exception) + const uint64_t dr6_force_clear = 0xffffffff00001000ULL; + const uint64_t dr6_force_set = 0xffff0ff0ULL; + const uint64_t dr7_force_clear = 0xffffffff0000f000ULL; + const uint64_t dr7_force_set = 0x0400ULL; + + set_reg(vcpu, HV_X86_DR6, (0x5555555555555555ULL | dr6_force_set) & ~(dr6_force_clear)); + set_reg(vcpu, HV_X86_DR7, (0x5555555555555555ULL | dr7_force_set) & ~(dr7_force_clear)); + + expect_vmcall_with_value(vcpu, 0x0101010101010101LL, true); + + T_ASSERT_EQ(get_reg(vcpu, HV_X86_DR0), (uint64_t)~0x1111111111111111LL, "check if DR0 negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_DR1), (uint64_t)~0x2222222222222222LL, "check if DR1 negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_DR2), (uint64_t)~0x3333333333333333LL, "check if DR2 negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_DR3), (uint64_t)~0x4444444444444444LL, "check if DR3 negated"); + + T_ASSERT_EQ(get_reg(vcpu, HV_X86_DR6), (0xaaaaaaaaaaaaaaaaULL | dr6_force_set) & ~(dr6_force_clear), "check if DR6 negated"); + T_ASSERT_EQ(get_reg(vcpu, HV_X86_DR7), (0xaaaaaaaaaaaaaaaaULL | dr7_force_set) & ~(dr7_force_clear), "check if DR7 negated"); + + expect_vmcall_with_value(vcpu, 0x0101010101010101LL, true); + + return NULL; +} + +T_DECL(save_restore_debug_regs, "check if debug registers are properly saved and restored", + T_META_EXPECTFAIL("rdar://57433961 (SEED: Web: Writes to debug registers (DR0 etc.) are not saved)")) +{ + vm_setup(); + + pthread_t vcpu_thread = create_vcpu_thread(save_restore_debug_regs_entry, 0x10000, save_restore_debug_regs_monitor, 0); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu_thread, NULL), "join vcpu"); + + vm_cleanup(); +} + +#define T_NATIVE_MSR(msr) + +static void * +native_msr_monitor(void *arg __unused, hv_vcpuid_t vcpu) +{ + const uint32_t msrs[] = { + MSR_IA32_STAR, + MSR_IA32_LSTAR, + MSR_IA32_CSTAR, + MSR_IA32_FMASK, + MSR_IA32_KERNEL_GS_BASE, + MSR_IA32_TSC, + MSR_IA32_TSC_AUX, + + MSR_IA32_SYSENTER_CS, + MSR_IA32_SYSENTER_ESP, + MSR_IA32_SYSENTER_EIP, + MSR_IA32_FS_BASE, + MSR_IA32_GS_BASE, + }; + const int msr_count = sizeof(msrs)/sizeof(uint32_t); + + setup_long_mode(vcpu); + + for (int i = 0; i < msr_count; i++) { + T_ASSERT_EQ(hv_vcpu_enable_native_msr(vcpu, msrs[i], true), HV_SUCCESS, "enable native MSR %x", msrs[i]); + } + + expect_vmcall_with_value(vcpu, 0x23456, true); + + return NULL; +} + +T_DECL(native_msr_clobber, "enable and clobber native MSRs in the guest") +{ + vm_setup(); + + pthread_t vcpu_thread = create_vcpu_thread(native_msr_vcpu_entry, 0x10000, native_msr_monitor, 0); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu_thread, NULL), "join vcpu"); + + vm_cleanup(); +} + +static void * +radar60691363_monitor(void *arg __unused, hv_vcpuid_t vcpu) +{ + setup_long_mode(vcpu); + + uint64_t proc2_cap = get_cap(HV_VMX_CAP_PROCBASED2); + set_vmcs(vcpu, VMCS_CTRL_CPU_BASED2, canonicalize(CPU_BASED2_VMCS_SHADOW, proc2_cap)); + + T_ASSERT_EQ(hv_vmx_vcpu_set_shadow_access(vcpu, VMCS_GUEST_ES, + HV_SHADOW_VMCS_READ | HV_SHADOW_VMCS_WRITE), HV_SUCCESS, + "enable VMCS_GUEST_ES shadow access"); + T_ASSERT_EQ(hv_vmx_vcpu_write_shadow_vmcs(vcpu, VMCS_GUEST_ES, 0x1234), HV_SUCCESS, + "set VMCS_GUEST_ES in shadow"); + + T_ASSERT_EQ(hv_vmx_vcpu_set_shadow_access(vcpu, VMCS_RO_EXIT_QUALIFIC, + HV_SHADOW_VMCS_READ | HV_SHADOW_VMCS_WRITE), HV_SUCCESS, + "enable VMCS_RO_EXIT_QUALIFIC shadow access"); + T_ASSERT_EQ(hv_vmx_vcpu_write_shadow_vmcs(vcpu, VMCS_RO_EXIT_QUALIFIC, 0x111), HV_SUCCESS, + "set VMCS_RO_EXIT_QUALIFIC in shadow"); + + T_ASSERT_EQ(hv_vmx_vcpu_set_shadow_access(vcpu, VMCS_RO_IO_RCX, + HV_SHADOW_VMCS_READ | HV_SHADOW_VMCS_WRITE), HV_SUCCESS, + "enable VMCS_RO_IO_RCX shadow access"); + T_ASSERT_EQ(hv_vmx_vcpu_write_shadow_vmcs(vcpu, VMCS_RO_IO_RCX, 0x2323), HV_SUCCESS, + "set VMCS_RO_IO_RCX in shadow"); + + expect_vmcall_with_value(vcpu, 0x1234, true); + expect_vmcall_with_value(vcpu, 0x111, true); + expect_vmcall_with_value(vcpu, 0x2323, true); + + expect_vmcall_with_value(vcpu, 0x4567, true); + + uint64_t value; + T_ASSERT_EQ(hv_vmx_vcpu_read_shadow_vmcs(vcpu, VMCS_GUEST_ES, &value), HV_SUCCESS, + "read updated VMCS_GUEST_ES in shadow"); + T_ASSERT_EQ(value, 0x9191LL, "VMCS_GUEST_ES value is updated"); + T_ASSERT_EQ(hv_vmx_vcpu_read_shadow_vmcs(vcpu, VMCS_RO_EXIT_QUALIFIC, &value), HV_SUCCESS, + "read updated VMCS_RO_EXIT_QUALIFIC in shadow"); + T_ASSERT_EQ(value, 0x9898LL, "VMCS_RO_EXIT_QUALIFIC value is updated"); + T_ASSERT_EQ(hv_vmx_vcpu_read_shadow_vmcs(vcpu, VMCS_RO_IO_RCX, &value), HV_SUCCESS, + "read updated VMCS_RO_IO_RCX in shadow"); + T_ASSERT_EQ(value, 0x7979LL, "VMCS_RO_IO_RCX value is updated"); + + // This must not work. + T_ASSERT_EQ(hv_vmx_vcpu_set_shadow_access(vcpu, VMCS_CTRL_EPTP, + HV_SHADOW_VMCS_READ | HV_SHADOW_VMCS_WRITE), HV_SUCCESS, + "enable VMCS_CTRL_EPTP shadow access"); + T_ASSERT_EQ(hv_vmx_vcpu_read_vmcs(vcpu, VMCS_CTRL_EPTP, &value), HV_BAD_ARGUMENT, + "accessing EPTP in ordinary VMCS fails"); + + return NULL; +} + +T_DECL(radar60691363, "rdar://60691363 (SEED: Web: Allow shadowing of read only VMCS fields)") +{ + vm_setup(); + + uint64_t proc2_cap = get_cap(HV_VMX_CAP_PROCBASED2); + + if (!(proc2_cap & ((uint64_t)CPU_BASED2_VMCS_SHADOW << 32))) { + T_SKIP("Device does not support shadow VMCS, skipping."); + } + + pthread_t vcpu_thread = create_vcpu_thread(radar60691363_entry, 0x10000, radar60691363_monitor, 0); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu_thread, NULL), "join vcpu"); + + vm_cleanup(); +} + +T_DECL(radar63641279, "rdar://63641279 (Evaluate \"no SMT\" scheduling option/sidechannel security mitigation for Hypervisor.framework VMs)") +{ + const uint64_t ALL_MITIGATIONS = + HV_VM_MITIGATION_A_ENABLE | + HV_VM_MITIGATION_B_ENABLE | + HV_VM_MITIGATION_C_ENABLE | + HV_VM_MITIGATION_D_ENABLE | + HV_VM_MITIGATION_E_ENABLE; // NO_SMT + + T_SETUPBEGIN; + + if (hv_support() < 1) { + T_SKIP("Running on non-HV target, skipping..."); + return; + } + + T_ASSERT_EQ(hv_vm_create( HV_VM_SPECIFY_MITIGATIONS | ALL_MITIGATIONS), + HV_SUCCESS, "Created vm"); + + T_SETUPEND; + + pthread_t vcpu_thread = create_vcpu_thread( + (vcpu_entry_function) (((uintptr_t)simple_real_mode_vcpu_entry & PAGE_MASK) + 0x1000), + 0, simple_real_mode_monitor, 0); + T_ASSERT_POSIX_SUCCESS(pthread_join(vcpu_thread, NULL), "join vcpu"); + + vm_cleanup(); +} diff --git a/tests/hvtest_x86_asm.s b/tests/hvtest_x86_asm.s new file mode 100644 index 000000000..c2783f5c5 --- /dev/null +++ b/tests/hvtest_x86_asm.s @@ -0,0 +1,486 @@ +#include + + .text + + .balign 0x1000 + + .global _hvtest_begin +_hvtest_begin: + + /* + * Everything between _hvtest_begin and _hvtest_end will be copied for + * tests that don't use the page faulting of the test harness. + * You can put constants here. + */ + +.code64 + + .balign 16 + + .global _save_restore_regs_entry +_save_restore_regs_entry: + + pushq %rax + pushq %rcx + + xor %rcx, %rcx + + pushq %rbx + + + /* + * For all registers to test, each of these blocks: + * 1. increments rcx (to keep track in case of test failure), + * 2. checks the register's value against a (constant) template + * 3. flips all bits for the VMM to later verify that the changes value is available. + * + * For a second pass, bits are all flipped back to their original state after + * the vmcall. + */ + + + // segment registers (pass 1) + + incq %rcx + movq $0x1010, %rax + movq %ds, %rbx + cmpq %rbx, %rax + jne .foul + movq $1, %rbx + movq %rbx, %ds + + incq %rcx + movq $0x2020, %rax + movq %es, %rbx + cmpq %rbx, %rax + jne .foul + movq $2, %rbx + movq %rbx, %es + + incq %rcx + movq $0x3030, %rax + movq %fs, %rbx + cmpq %rbx, %rax + jne .foul + movq $3, %rbx + movq %rbx, %fs + + incq %rcx + movq $0x4040, %rax + movq %gs, %rbx + cmpq %rbx, %rax + jne .foul + movq $1, %rbx + movq %rbx, %gs + + popq %rbx + + jmp .pass + +.pass2: + pushq %rax + pushq %rcx + + xor %rcx, %rcx + + pushq %rbx + + // segment registers (pass 2) + + incq %rcx + movq $0x1, %rax + movq %ds, %rbx + cmpq %rbx, %rax + jne .foul + movq $1, %rbx + movq %rbx, %ds + + incq %rcx + movq $0x2, %rax + movq %es, %rbx + cmpq %rbx, %rax + jne .foul + movq $2, %rbx + movq %rbx, %es + + incq %rcx + movq $0x3, %rax + movq %fs, %rbx + cmpq %rbx, %rax + jne .foul + movq $3, %rbx + movq %rbx, %fs + + incq %rcx + movq $0x1, %rax + movq %gs, %rbx + cmpq %rbx, %rax + jne .foul + movq $1, %rbx + movq %rbx, %gs + + popq %rbx + +.pass: + // general purpose registers + + incq %rcx + movq $0x0101010101010101, %rax + cmpq 8(%rsp), %rax // %rax on stack + jne .foul + notq 8(%rsp) + + incq %rcx + movq $0x0202020202020202, %rax + cmpq %rbx, %rax + jne .foul + notq %rbx + + incq %rcx + movq $0x0303030303030303, %rax + cmpq (%rsp), %rax // %rcx on stack + jne .foul + notq (%rsp) + + incq %rcx + movq $0x0404040404040404, %rax + cmpq %rdx, %rax + jne .foul + notq %rdx + + incq %rcx + movq $0x0505050505050505, %rax + cmpq %rsi, %rax + jne .foul + notq %rsi + + incq %rcx + movq $0x0606060606060606, %rax + cmpq %rdi, %rax + jne .foul + notq %rdi + + incq %rcx + movq $0x0707070707070707, %rax + cmpq %rbp, %rax + jne .foul + notq %rbp + + incq %rcx + movq $0x0808080808080808, %rax + cmpq %r8, %rax + jne .foul + notq %r8 + + incq %rcx + movq $0x0909090909090909, %rax + cmpq %r9, %rax + jne .foul + notq %r9 + + incq %rcx + movq $0x0a0a0a0a0a0a0a0a, %rax + cmpq %r10, %rax + jne .foul + notq %r10 + + incq %rcx + movq $0x0b0b0b0b0b0b0b0b, %rax + cmpq %r11, %rax + jne .foul + notq %r11 + + incq %rcx + movq $0x0c0c0c0c0c0c0c0c, %rax + cmpq %r12, %rax + jne .foul + notq %r12 + + incq %rcx + movq $0x0d0d0d0d0d0d0d0d, %rax + cmpq %r13, %rax + jne .foul + notq %r13 + + incq %rcx + movq $0x0e0e0e0e0e0e0e0e, %rax + cmpq %r14, %rax + jne .foul + notq %r14 + + incq %rcx + movq $0x0f0f0f0f0f0f0f0f, %rax + cmpq %r15, %rax + jne .foul + notq %r15 + + popq %rcx + movq (%rsp), %rax + vmcall + + notq %rax + notq %rbx + notq %rcx + notq %rdx + notq %rsi + notq %rdi + notq %rbp + notq %r8 + notq %r9 + notq %r10 + notq %r11 + notq %r12 + notq %r13 + notq %r14 + notq %r15 + + jmp .pass2 + +.foul: + movq %rcx, %rax + vmcall + + .global _save_restore_debug_regs_entry +_save_restore_debug_regs_entry: + + pushq %rax + xor %rcx, %rcx + + /* + * For all registers to test, each of these blocks: + * 1. increments rcx (to keep track in case of test failure), + * 2. checks the register's value against a (constant) template + * 3. flips all bits for the VMM to later verify that the changes value is available. + * + * For a second pass, bits are all flipped back to their original state after + * the vmcall. + */ + + incq %rcx + movq $0x1111111111111111, %rbx + movq %dr0, %rax + cmpq %rbx, %rax + jne .foul + notq %rbx + movq %rbx, %dr0 + + incq %rcx + movq $0x2222222222222222, %rbx + movq %dr1, %rax + cmpq %rbx, %rax + jne .foul + notq %rbx + movq %rbx, %dr1 + + incq %rcx + movq $0x3333333333333333, %rbx + movq %dr2, %rax + cmpq %rbx, %rax + jne .foul + notq %rbx + movq %rbx, %dr2 + + incq %rcx + movq $0x4444444444444444, %rbx + movq %dr3, %rax + cmpq %rbx, %rax + jne .foul + notq %rbx + movq %rbx, %dr3 + + /* + * flip only defined bits for debug status and control registers + * (and also don't flip General Detect Enable, as the next access + * to any debug register would generate an exception) + */ + + incq %rcx + movq $0x5555555555555555, %rbx + mov $0xffff0ff0, %rax + orq %rax, %rbx + movq $0xffffefff, %rax + andq %rax, %rbx + movq %dr6, %rax + cmpq %rbx, %rax + jne .foul + notq %rbx + mov $0xffff0ff0, %rax + orq %rax, %rbx + movq $0xffffefff, %rax + andq %rax, %rbx + movq %rbx, %dr6 + + incq %rcx + movq $0x5555555555555555, %rbx + orq $0x400, %rbx + movq $0xffff0fff, %rax + andq %rax, %rbx + movq %dr7, %rax + cmpq %rbx, %rax + jne .foul + notq %rbx + orq $0x400, %rbx + movq $0xffff0fff, %rax + andq %rax, %rbx + movq %rbx, %dr7 + + popq %rax + vmcall + + movq %dr0, %rbx + notq %rbx + movq %rbx, %dr0 + + movq %dr1, %rbx + notq %rbx + movq %rbx, %dr1 + + movq %dr2, %rbx + notq %rbx + movq %rbx, %dr2 + + movq %dr3, %rbx + notq %rbx + movq %rbx, %dr3 + + movq %dr6, %rbx + notq %rbx + mov $0xffff0ff0, %rax + orq %rax, %rbx + movq $0xffffefff, %rax + andq %rax, %rbx + movq %rbx, %dr6 + + movq %dr7, %rbx + notq %rbx + orq $0x400, %rbx + movq $0xffff0fff, %rax + andq %rax, %rbx + movq %rbx, %dr7 + + jmp _save_restore_debug_regs_entry // 2nd pass + +.code32 + + .global _simple_protected_mode_vcpu_entry +_simple_protected_mode_vcpu_entry: + + movl $0x23456, %eax + vmcall + +.code16 + + .global _simple_real_mode_vcpu_entry +_simple_real_mode_vcpu_entry: + + movl $0x23456, %eax + vmcall + +.code32 + + .global _radar61961809_entry +_radar61961809_entry: + + mov $0x99999999, %ebx // sentinel address, see _radar61961809_loop64 + + mov $0xc0000080,%ecx // IA32_EFER + rdmsr + or $0x100,%eax // .LME + wrmsr + + vmcall + + mov %cr0,%ecx + or $0x80000000,%ecx // CR0.PG + mov %ecx,%cr0 + + // first (%edi) 6 bytes are _radar61961809_prepare far ptr + ljmp *(%edi) + +.code32 + + .global _radar61961809_prepare +_radar61961809_prepare: + + /* + * We switched into long mode, now immediately out, and the test + * will switch back in. + * + * This is done to suppress (legitimate) EPT and Page Fault exits. + * Until CR0.PG is enabled (which is what effectively activates + * long mode), the page tables are never looked at. Right after + * setting PG, that changes immediately, effecting transparently + * handled EPT violations. Additionally, the far jump that + * would be necessary to switch into a 64bit code segment would + * also cause EPT violations and PFs when fetching the segment + * descriptor from the GDT. + * + * By first jumping into a 32bit code segment after enabling PG + * once, we "warm up" both EPT and (harness managed) page tables, + * so the next exit after the far jump will most likely be an + * IRQ exit, most faithfully reproducing the problem. + */ + + mov %cr0,%ecx + and $~0x80000000,%ecx + mov %ecx,%cr0 + + mov $0x1111, %eax + vmcall + + // This is where the actual test really starts. + mov %cr0,%ecx + or $0x80000000,%ecx + mov %ecx,%cr0 // enable PG => long mode + + xor %ecx, %ecx + + add $8,%edi + ljmp *(%edi) // _radar61961809_loop64 + +.code64 + + .global _radar61961809_loop64 +_radar61961809_loop64: +1: + // as 16bit code, this instruction will be: + // add %al,(%bx,%si) + // and cause an obvious EPT violation (%bx is 0x9999) + mov $0x1,%ebp + + // loop long enough for a good chance to an IRQ exit + dec %ecx + jnz 1b + + // if we reach here, we stayed in long mode. + mov $0x2222, %eax + vmcall + + .global _radar60691363_entry +_radar60691363_entry: + movq $0x800, %rsi // VMCS_GUEST_ES + vmreadq %rsi, %rax + vmcall + movq $0x6400, %rsi // VMCS_RO_EXIT_QUALIFIC + vmreadq %rsi, %rax + vmcall + movq $0x6402, %rsi // VMCS_RO_IO_RCX + vmreadq %rsi, %rax + vmcall + + movq $0x800, %rsi // VMCS_GUEST_ES + movq $0x9191, %rax + vmwriteq %rax, %rsi + movq $0x6400, %rsi // VMCS_RO_EXIT_QUALIFIC + movq $0x9898, %rax + vmwriteq %rax, %rsi + movq $0x6402, %rsi // VMCS_RO_IO_RCX + movq $0x7979, %rax + vmwriteq %rax, %rsi + + movq $0x4567, %rax + + vmcall + + .global _hvtest_end +_hvtest_end: diff --git a/tests/hvtest_x86_guest.c b/tests/hvtest_x86_guest.c new file mode 100644 index 000000000..9ad7f8dd4 --- /dev/null +++ b/tests/hvtest_x86_guest.c @@ -0,0 +1,77 @@ +// Do not include system headers in this file. Code in this file needs to be +// self-contained, as it runs in a VM. +#include "hvtest_x86_guest.h" +#include +#include + +#define VMCALL(x) __asm__("vmcall" : : "a" ((x)) :) + +void +simple_long_mode_vcpu_entry(uint64_t arg) +{ + VMCALL(arg + 0x23456); + + while (true) { + } +} + +void +smp_vcpu_entry(uint64_t arg) +{ + // Performing this atomic operation on the same memory on all VCPUs confirms + // that they are running in the same IPA space, and that the space is + // shareable. + atomic_uint *count = (atomic_uint *)arg; + + VMCALL(atomic_fetch_add_explicit(count, 1, + memory_order_relaxed)); + + while (true) { + } +} + +__unused static inline uint64_t +rdmsr(uint64_t msr) +{ + uint32_t idx = (uint32_t)msr; + uint32_t outhi, outlo; + + __asm__("rdmsr" : "=d"(outhi), "=a"(outlo) : "c"(idx)); + + return ((uint64_t)outhi << 32) | outlo; +} + +static inline void +wrmsr(uint64_t msr, uint64_t value) +{ + uint32_t idx = (uint32_t)msr; + uint32_t inhi = (uint32_t)((value & 0xffffffff00000000UL) >> 32); + uint32_t inlo = (uint32_t)(value & 0xffffffffUL); + + __asm__("wrmsr" : : "d"(inhi),"a"(inlo),"c"(idx)); +} + +void +native_msr_vcpu_entry(uint64_t arg __unused) +{ + wrmsr(MSR_IA32_STAR, 0x123456789abcdef0); + wrmsr(MSR_IA32_LSTAR, 0x123456789abc); + wrmsr(MSR_IA32_CSTAR, 0x123456789abc); + + wrmsr(MSR_IA32_FMASK, 0x123456789abcdef0); + + wrmsr(MSR_IA32_TSC_AUX, 0x123); + + wrmsr(MSR_IA32_SYSENTER_CS, 0xffff); + wrmsr(MSR_IA32_SYSENTER_ESP, 0x123456789abc); + wrmsr(MSR_IA32_SYSENTER_EIP, 0x123456789abc); + + wrmsr(MSR_IA32_FS_BASE, 0x123456789abc); + wrmsr(MSR_IA32_GS_BASE, 0x123456789abc); + wrmsr(MSR_IA32_KERNEL_GS_BASE, 0x123456789abc); + + VMCALL(0x23456); + + while (true) { + } +} diff --git a/tests/hvtest_x86_guest.h b/tests/hvtest_x86_guest.h new file mode 100644 index 000000000..5cb41f34c --- /dev/null +++ b/tests/hvtest_x86_guest.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include + +extern void save_restore_regs_entry(uint64_t arg) OS_NORETURN; +extern void save_restore_debug_regs_entry(uint64_t arg) OS_NORETURN; +extern void simple_real_mode_vcpu_entry(uint64_t arg) OS_NORETURN; +extern void simple_protected_mode_vcpu_entry(uint64_t arg) OS_NORETURN; +extern void simple_long_mode_vcpu_entry(uint64_t arg) OS_NORETURN; +extern void smp_vcpu_entry(uint64_t) OS_NORETURN; +extern void radar61961809_entry(uint64_t) OS_NORETURN; +extern void radar61961809_prepare(uint64_t) OS_NORETURN; +extern void radar61961809_loop64(uint64_t) OS_NORETURN; +extern void radar60691363_entry(uint64_t) OS_NORETURN; + +#define MSR_IA32_STAR 0xc0000081 +#define MSR_IA32_LSTAR 0xc0000082 +#define MSR_IA32_CSTAR 0xc0000083 +#define MSR_IA32_FMASK 0xc0000084 +#define MSR_IA32_KERNEL_GS_BASE 0xc0000102 +#define MSR_IA32_TSC 0x00000010 +#define MSR_IA32_TSC_AUX 0xc0000103 + +#define MSR_IA32_SYSENTER_CS 0x00000174 +#define MSR_IA32_SYSENTER_ESP 0x00000175 +#define MSR_IA32_SYSENTER_EIP 0x00000176 +#define MSR_IA32_FS_BASE 0xc0000100 +#define MSR_IA32_GS_BASE 0xc0000101 + +extern void native_msr_vcpu_entry(uint64_t) OS_NORETURN; diff --git a/tests/inspect_port.c b/tests/inspect_port.c new file mode 100644 index 000000000..b128b5b41 --- /dev/null +++ b/tests/inspect_port.c @@ -0,0 +1,581 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int task_for_pid(mach_port_name_t target_tport, int pid, mach_port_name_t *t); +int task_read_for_pid(mach_port_name_t target_tport, int pid, mach_port_name_t *t); +int task_inspect_for_pid(mach_port_name_t target_tport, int pid, mach_port_name_t *t); +int task_name_for_pid(mach_port_name_t target_tport, int pid, mach_port_name_t *t); +static int test_conversion_eval(pid_t current, pid_t victim, int translation); + +static int g_tfpFail = 0; +static int g_trfpFail = 0; +static int g_tifpFail = 0; +static int g_tnfpFail = 0; + +static pthread_mutex_t g_lock; + +#define NAME 0 +#define INSPECT 1 +#define READ 2 +#define FULL 3 +#define POLY 4 + +/* + * 3. child still spawn as platform binary + */ + +/* Mimic the behavior of task_conversion_eval in kernel. + */ +static int +test_conversion_eval(pid_t current, pid_t victim, int translation) +{ + uint32_t my_csflags = 0; + uint32_t victim_csflags = 0; + csops(victim, CS_OPS_STATUS, &victim_csflags, sizeof(victim_csflags)); + csops(current, CS_OPS_STATUS, &my_csflags, sizeof(my_csflags)); + + switch (translation) { + case FULL: + case READ: + if (victim == 0) { + return false; + } + if (!(my_csflags & CS_PLATFORM_BINARY) && (victim_csflags & CS_PLATFORM_BINARY)) { + return false; + } + break; + default: + break; + } + + return true; +} + +static void +check_result(kern_return_t kr, int port_type, int translation, int low, char *test_str, pid_t victim) +{ + char error[100]; + + if (translation == POLY) { + if (port_type == FULL) { + translation = INSPECT; + } else { + translation = port_type; + } + } + + if (port_type < low) { + goto fail; + } else if (port_type < translation) { + goto fail; + } else if (!test_conversion_eval(getpid(), victim, translation)) { + goto fail; + } else { + goto success; + } + +fail: + snprintf(error, sizeof(error), "%s should fail with %d on %d.\n", test_str, port_type, victim); + T_QUIET; T_EXPECT_NE(kr, 0, "check_result: %s", error); + return; +success: + snprintf(error, sizeof(error), "%s should succeed with %d on %d.\n", test_str, port_type, victim); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "check_result: %s", error); + return; +} + +static void +test_thread_port(mach_port_name_t thread, int type, pid_t victim) +{ + kern_return_t kr; + mach_port_t name = MACH_PORT_NULL; + thread_info_data_t th_info; + mach_msg_type_number_t th_info_cnt = THREAD_INFO_MAX; + + kr = thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)th_info, &th_info_cnt); + check_result(kr, type, INSPECT, INSPECT, "thread_info", victim); + + kr = thread_get_special_port(thread, THREAD_KERNEL_PORT, &name); + check_result(kr, type, POLY, FULL, "thread_get_special_port: THREAD_KERNEL_PORT", victim); + kr = mach_port_deallocate(mach_task_self(), name); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + + kr = thread_get_special_port(thread, THREAD_READ_PORT, &name); + check_result(kr, type, POLY, READ, "thread_get_special_port: THREAD_READ_PORT", victim); + kr = mach_port_deallocate(mach_task_self(), name); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + + kr = thread_get_special_port(thread, THREAD_INSPECT_PORT, &name); + check_result(kr, type, POLY, INSPECT, "thread_get_special_port: THREAD_INSPECT_PORT", victim); + kr = mach_port_deallocate(mach_task_self(), name); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); +} + +static void +test_task_port(mach_port_name_t port, int type) +{ + kern_return_t kr; + volatile int data = 0x4141; + volatile int new_value = 0x4242; + pid_t victim; + if (port == MACH_PORT_NULL) { + return; + } + kr = pid_for_task(port, &victim); + if (victim == -1) { + T_LOG("pid_for_task: port = 0x%x, type = %u is not valid anymore", port, type); + return; + } + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "pid_for_task, port = 0x%x, type = %u, pid = %u", port, type, victim); + + /************* TASK_INFO ************/ + struct task_basic_info info = {}; + mach_msg_type_number_t cnt = sizeof(info); + kr = task_info(port, TASK_BASIC_INFO, (task_info_t)&info, &cnt); + check_result(kr, type, NAME, NAME, "task_info", victim); + + /************ MACH_VM_* ************/ + + if (victim == getpid()) { + kr = mach_vm_write(port, + (mach_vm_address_t)&data, + (vm_offset_t)&new_value, + (mach_msg_type_number_t)sizeof(int)); + check_result(kr, type, FULL, FULL, "mach_vm_write", victim); + + vm_offset_t read_value = 0; + mach_msg_type_number_t read_cnt = 0; + kr = mach_vm_read(port, + (mach_vm_address_t)&data, + (mach_msg_type_number_t)sizeof(int), + &read_value, + &read_cnt); + check_result(kr, type, READ, READ, "mach_vm_read", victim); + } + + /************ TASK_GET_SPECIAL_PORT ************/ + + mach_port_t name = MACH_PORT_NULL; + kr = task_get_special_port(port, TASK_KERNEL_PORT, &name); + check_result(kr, type, POLY, FULL, "task_get_special_port: TASK_KERNEL_PORT", victim); + + name = MACH_PORT_NULL; + kr = task_get_special_port(port, TASK_READ_PORT, &name); + check_result(kr, type, POLY, READ, "task_get_special_port: TASK_READ_PORT", victim); + if (kr == KERN_SUCCESS) { + kr = mach_port_deallocate(mach_task_self(), name); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } + + name = MACH_PORT_NULL; + kr = task_get_special_port(port, TASK_INSPECT_PORT, &name); + check_result(kr, type, POLY, INSPECT, "task_get_special_port: TASK_INSPECT_PORT", victim); + if (kr == KERN_SUCCESS) { + kr = mach_port_deallocate(mach_task_self(), name); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } + + name = MACH_PORT_NULL; + kr = task_get_special_port(port, TASK_NAME_PORT, &name); + check_result(kr, type, POLY, INSPECT, "task_get_special_port: TASK_NAME_PORT", victim); + if (kr == KERN_SUCCESS) { + kr = mach_port_deallocate(mach_task_self(), name); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } + + name = MACH_PORT_NULL; + kr = task_get_special_port(port, TASK_HOST_PORT, &name); + check_result(kr, type, POLY, FULL, "task_get_special_port: TASK_HOST_PORT", victim); + if (kr == KERN_SUCCESS) { + if (victim == getpid()) { + mach_port_t host = mach_host_self(); + T_QUIET; T_EXPECT_EQ(host, name, "mach_host_self == task_get_special_port(.. TASK_HOST_PORT)"); + } + } + + name = MACH_PORT_NULL; + kr = task_get_special_port(port, TASK_BOOTSTRAP_PORT, &name); + check_result(kr, type, POLY, FULL, "task_get_special_port: TASK_BOOTSTRAP_PORT", victim); + + /************ TEST IPC_SPACE_READ AND IPC_SPACE_INSPECT ************/ + if (victim == getpid()) { + mach_port_status_t status; + mach_msg_type_number_t statusCnt = MACH_PORT_LIMITS_INFO_COUNT; + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &name); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, 0, "mach_port_allocate should succeed"); + + kr = mach_port_get_attributes(port, name, MACH_PORT_LIMITS_INFO, (mach_port_info_t)&status, &statusCnt); + check_result(kr, type, POLY, READ, "mach_port_get_attributes", victim); + + mach_port_context_t context; + kr = mach_port_get_context(port, name, &context); + check_result(kr, type, POLY, READ, "mach_port_get_context", victim); + + kr = mach_port_destruct(mach_task_self(), name, 0, 0); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_destruct"); + } + + ipc_info_space_basic_t sinfo; + kr = mach_port_space_basic_info(port, &sinfo); + check_result(kr, type, INSPECT, INSPECT, "mach_port_space_basic_info", victim); + + /************ MACH_PORT_ALLOCATE ************/ + + mach_port_t new_port = MACH_PORT_NULL; + kr = mach_port_allocate(port, MACH_PORT_RIGHT_RECEIVE, &new_port); + check_result(kr, type, FULL, FULL, "mach_port_allocate", victim); + if (kr == KERN_SUCCESS) { + kr = mach_port_destruct(port, new_port, 0, 0); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_destruct"); + } + + /************ INSPECT INTERFACES ************/ + int counts[2]; + mach_msg_type_number_t size = TASK_INSPECT_BASIC_COUNTS_COUNT; + kr = task_inspect(port, TASK_INSPECT_BASIC_COUNTS, counts, &size); + check_result(kr, type, INSPECT, INSPECT, "task_inspect", victim); + + /************ TASK_SET_SPECIAL_PORT ************/ + + if (type == FULL) { + new_port = MACH_PORT_NULL; + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &new_port); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_allocate"); + kr = mach_port_insert_right(mach_task_self(), new_port, new_port, MACH_MSG_TYPE_MAKE_SEND); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_insert_right"); + + mach_port_t backup; + kr = task_get_special_port(port, TASK_BOOTSTRAP_PORT, &backup); + check_result(kr, type, POLY, FULL, "task_get_special_port", victim); + kr = task_set_special_port(port, TASK_BOOTSTRAP_PORT, new_port); + check_result(kr, type, FULL, FULL, "task_set_special_port", victim); + kr = task_set_special_port(port, TASK_BOOTSTRAP_PORT, backup); + check_result(kr, type, FULL, FULL, "task_set_special_port", victim); + + kr = mach_port_deallocate(mach_task_self(), new_port); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + mach_port_mod_refs(mach_task_self(), new_port, MACH_PORT_RIGHT_RECEIVE, -1); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_mod_refs"); + } + /************ TASK_THREADS ************/ + thread_array_t th_list; + mach_msg_type_number_t th_cnt = 0; + + kr = task_threads(port, &th_list, &th_cnt); + check_result(kr, type, POLY, INSPECT, "task_threads", victim); + + /* Skip thread ports tests if task_threads() fails */ + if (kr != KERN_SUCCESS) { + return; + } + + /************ THREAD_GET_SPECIAL_PORT ************/ + mach_port_t special = MACH_PORT_NULL; + + switch (type) { + case FULL: + kr = thread_get_special_port(th_list[0], THREAD_KERNEL_PORT, &special); + break; + case READ: + kr = thread_get_special_port(th_list[0], THREAD_READ_PORT, &special); + break; + case INSPECT: + kr = thread_get_special_port(th_list[0], THREAD_INSPECT_PORT, &special); + break; + default: + break; + } + + T_QUIET; T_EXPECT_EQ(special, th_list[0], "thread_get_special_port should match task_threads"); + + kr = mach_port_deallocate(mach_task_self(), special); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + + for (unsigned int i = 0; i < th_cnt; i++) { + test_thread_port(th_list[i], type, victim); /* polymorphic */ + kr = mach_port_deallocate(mach_task_self(), th_list[i]); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } +} + +static void +test_get_child_port(int with_sleep) +{ + pid_t child_pid; + kern_return_t kr; + mach_port_name_t tr, ti, tp, tn; + + child_pid = fork(); + + if (child_pid < 0) { + T_FAIL("fork failed in test_get_child_port."); + } + + if (child_pid == 0) { + while (1) { + sleep(10); + } + } + + kr = task_for_pid(mach_task_self(), child_pid, &tp); + if (with_sleep) { + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "task_for_pid for child %u", child_pid); + } else if (kr != 0) { + g_tfpFail++; + } + + kr = task_read_for_pid(mach_task_self(), child_pid, &tr); + if (with_sleep) { + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "task_read_for_pid for child %u", child_pid); + } else if (kr != 0) { + g_trfpFail++; + } + + kr = task_inspect_for_pid(mach_task_self(), child_pid, &ti); + if (with_sleep) { + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "task_inspect_for_pid for child %u", child_pid); + } else if (kr != 0) { + g_tifpFail++; + } + + kr = task_name_for_pid(mach_task_self(), child_pid, &tn); + if (with_sleep) { + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "task_name_for_pid for child %u", child_pid); + } else if (kr != 0) { + g_tnfpFail++; + } + + kr = mach_port_deallocate(mach_task_self(), tp); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + kr = mach_port_deallocate(mach_task_self(), tr); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + kr = mach_port_deallocate(mach_task_self(), ti); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + kr = mach_port_deallocate(mach_task_self(), tn); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + + kill(child_pid, SIGKILL); + int status; + wait(&status); +} + +static void +test_child_exec() +{ + pid_t child_pid; + kern_return_t kr; + mach_port_name_t tr2, ti2, tp2, tn2; + + child_pid = fork(); + + if (child_pid < 0) { + T_FAIL("fork failed in test_child_exec."); + } + + if (child_pid == 0) { + execve("/bin/bash", NULL, NULL); + } + + sleep(10); + + kr = task_name_for_pid(mach_task_self(), child_pid, &tn2); + test_task_port(tn2, NAME); + + kr = task_for_pid(mach_task_self(), child_pid, &tp2); + test_task_port(tp2, FULL); + + kr = task_read_for_pid(mach_task_self(), child_pid, &tr2); + test_task_port(tr2, READ); + + kr = task_inspect_for_pid(mach_task_self(), child_pid, &ti2); + test_task_port(ti2, INSPECT); + + kr = mach_port_deallocate(mach_task_self(), tp2); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + kr = mach_port_deallocate(mach_task_self(), tr2); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + kr = mach_port_deallocate(mach_task_self(), ti2); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + kr = mach_port_deallocate(mach_task_self(), tn2); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + + kill(child_pid, SIGKILL); + int status; + wait(&status); +} + +static void * +thread_run() +{ + pthread_mutex_lock(&g_lock); + pthread_mutex_unlock(&g_lock); + + pthread_exit(NULL); + + return NULL; +} + +#ifdef T_NOCODESIGN +#define TEST_NAME inspect_read_port_nocodesign +#else +#define TEST_NAME inspect_read_port +#endif + +T_DECL(TEST_NAME, "inspect and read port test", T_META_ASROOT(true)) +{ + kern_return_t kr; + pid_t pid = 0; + mach_port_t port = MACH_PORT_NULL; + + kr = pid_for_task(mach_task_self(), &pid); + T_EXPECT_MACH_SUCCESS(kr, "pid_for_task: My Pid = %d", pid); + +#ifdef T_NOCODESIGN + T_LOG("Running as non-platform binary...\n"); +#else + T_LOG("Running as platform binary...\n"); +#endif + + kr = task_for_pid(mach_task_self(), pid, &port); + T_EXPECT_EQ(kr, 0, "task_for_pid(mach_task_self..): %u", port); + T_EXPECT_EQ(port, mach_task_self(), "task_for_pid == mach_task_self"); + test_task_port(port, FULL); + + port = MACH_PORT_NULL; + kr = task_read_for_pid(mach_task_self(), pid, &port); + T_EXPECT_EQ(kr, 0, "task_read_for_pid(mach_task_self..): read port = %u", port); + test_task_port(port, READ); + kr = mach_port_deallocate(mach_task_self(), port); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + + port = MACH_PORT_NULL; + kr = task_inspect_for_pid(mach_task_self(), pid, &port); + T_EXPECT_EQ(kr, 0, "task_inspect_for_pid(mach_task_self..): inspect port = %u", port); + test_task_port(port, INSPECT); + kr = mach_port_deallocate(mach_task_self(), port); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + + port = MACH_PORT_NULL; + kr = task_name_for_pid(mach_task_self(), pid, &port); + T_EXPECT_EQ(kr, 0, "task_name_for_pid(mach_task_self..): name port = %u", port); + test_task_port(port, NAME); + kr = mach_port_deallocate(mach_task_self(), port); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + + port = MACH_PORT_NULL; + kr = task_read_for_pid(mach_task_self(), 0, &port); + T_EXPECT_NE(kr, 0, "task_read_for_pid for kernel should fail"); + + /* task_read_for_pid loop, check for leaks */ + for (int i = 0; i < 0x1000; i++) { + kr = task_read_for_pid(mach_task_self(), pid, &port); + test_task_port(port, READ); + kr = mach_port_deallocate(mach_task_self(), port); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } + + /* task_inspect_for_pid loop, check for leaks */ + for (int i = 0; i < 0x1000; i++) { + kr = task_inspect_for_pid(mach_task_self(), pid, &port); + test_task_port(port, INSPECT); + kr = mach_port_deallocate(mach_task_self(), port); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } + + /* fork-exec a child process */ + test_child_exec(); + + /* fork, get full/read/inspect/name port for the child then kill it */ + for (int i = 0; i < 10; i++) { + test_get_child_port(TRUE); + } + + T_LOG("tfp fail: %d, trfp fail: %d, tifp fail: %d, tnfp fail: %d, TOTAL: 10\n", + g_tfpFail, g_trfpFail, g_tifpFail, g_tnfpFail); + + + /* task thread loop, check for leaks */ + thread_array_t th_list; + mach_msg_type_number_t th_cnt; + pthread_t thread; + + pthread_mutex_init(&g_lock, NULL); + pthread_mutex_lock(&g_lock); + + for (unsigned i = 0; i < 0x100; i++) { + pthread_create(&thread, NULL, thread_run, NULL); + } + + for (unsigned i = 0; i < 0x1000; i++) { + kr = task_threads(mach_task_self(), &th_list, &th_cnt); + T_QUIET; T_ASSERT_EQ(th_cnt, 0x101, "257 threads"); + + for (unsigned j = 0; j < th_cnt; j++) { + kr = mach_port_deallocate(mach_task_self(), th_list[j]); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } + } + pthread_mutex_unlock(&g_lock); + + /* processor_set_tasks_with_flavor */ + + processor_set_name_array_t psets; + processor_set_t pset; + task_array_t tasks; + mach_msg_type_number_t pcnt, tcnt; + mach_port_t host = mach_host_self(); + + kr = host_processor_sets(host, &psets, &pcnt); + kr = host_processor_set_priv(host, psets[0], &pset); + + kr = processor_set_tasks_with_flavor(pset, TASK_FLAVOR_CONTROL, &tasks, &tcnt); + T_EXPECT_EQ(kr, 0, "processor_set_tasks_with_flavor: TASK_FLAVOR_CONTROL should succeed"); + for (unsigned int i = 0; i < tcnt; i++) { + test_task_port(tasks[i], FULL); + kr = mach_port_deallocate(mach_task_self(), tasks[i]); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } + + kr = processor_set_tasks_with_flavor(pset, TASK_FLAVOR_READ, &tasks, &tcnt); + T_EXPECT_EQ(kr, 0, "processor_set_tasks_with_flavor: TASK_FLAVOR_READ should succeed"); + for (unsigned int i = 0; i < tcnt; i++) { + test_task_port(tasks[i], READ); + kr = mach_port_deallocate(mach_task_self(), tasks[i]); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } + + kr = processor_set_tasks_with_flavor(pset, TASK_FLAVOR_INSPECT, &tasks, &tcnt); + T_EXPECT_EQ(kr, 0, "processor_set_tasks_with_flavor: TASK_FLAVOR_INSPECT should succeed"); + for (unsigned int i = 0; i < tcnt; i++) { + test_task_port(tasks[i], INSPECT); + kr = mach_port_deallocate(mach_task_self(), tasks[i]); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } + + kr = processor_set_tasks_with_flavor(pset, TASK_FLAVOR_NAME, &tasks, &tcnt); + T_EXPECT_EQ(kr, 0, "processor_set_tasks_with_flavor: TASK_FLAVOR_NAME should succeed"); + for (unsigned int i = 0; i < tcnt; i++) { + test_task_port(tasks[i], NAME); + kr = mach_port_deallocate(mach_task_self(), tasks[i]); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } + + // Cleanup + for (unsigned int i = 0; i < pcnt; i++) { + kr = mach_port_deallocate(mach_task_self(), psets[i]); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); + } + + kr = mach_port_deallocate(mach_task_self(), pset); + T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_port_deallocate"); +} diff --git a/tests/intrusive_shared_ptr.cpp b/tests/intrusive_shared_ptr.cpp new file mode 100644 index 000000000..d860ea717 --- /dev/null +++ b/tests/intrusive_shared_ptr.cpp @@ -0,0 +1,11 @@ +// +// Test runner for all intrusive_shared_ptr tests. +// + +#include + +T_GLOBAL_META( + T_META_NAMESPACE("intrusive_shared_ptr"), + T_META_CHECK_LEAKS(false), + T_META_RUN_CONCURRENTLY(true) + ); diff --git a/tests/intrusive_shared_ptr_src/abi.callee.raw.cpp b/tests/intrusive_shared_ptr_src/abi.callee.raw.cpp new file mode 100644 index 000000000..b33ce072e --- /dev/null +++ b/tests/intrusive_shared_ptr_src/abi.callee.raw.cpp @@ -0,0 +1,15 @@ +// +// Declare a function as returning a shared pointer (in the header), but +// implement it by returning a raw pointer. This represents a TU that would +// not have been translated to shared pointers yet. +// +// In this TU, SharedPtr is just T* since USE_SHARED_PTR is not defined. +// + +#include "abi_helper.h" + +SharedPtr +return_raw_as_shared(T* ptr) +{ + return ptr; +} diff --git a/tests/intrusive_shared_ptr_src/abi.callee.smart.cpp b/tests/intrusive_shared_ptr_src/abi.callee.smart.cpp new file mode 100644 index 000000000..e519bfe0d --- /dev/null +++ b/tests/intrusive_shared_ptr_src/abi.callee.smart.cpp @@ -0,0 +1,18 @@ +// +// Declare a function as returning a raw pointer (in the header), but +// implement it by returning a shared pointer. This represents a TU that +// would have been translated to shared pointers. +// +// In this TU, SharedPtr is intrusive_shared_ptr, since USE_SHARED_PTR +// is defined. +// + +#define USE_SHARED_PTR + +#include "abi_helper.h" + +SharedPtr +return_shared_as_raw(T* ptr) +{ + return SharedPtr(ptr, libkern::no_retain); +} diff --git a/tests/intrusive_shared_ptr_src/abi.caller.raw.cpp b/tests/intrusive_shared_ptr_src/abi.caller.raw.cpp new file mode 100644 index 000000000..b13f8bb89 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/abi.caller.raw.cpp @@ -0,0 +1,24 @@ +// +// This tests that we can call functions implemented using shared pointers +// from an API vending itself as returning raw pointers, because both are +// ABI compatible. +// +// In this TU, SharedPtr is just T*, since USE_SHARED_PTR is not defined. +// + +#include +#include "abi_helper.h" + +// Receive a raw pointer from a function that actually returns a smart pointer +T_DECL(abi_caller_raw, "intrusive_shared_ptr.abi.caller.raw") { + T obj{10}; + T* expected = &obj; + T* result = return_shared_as_raw(expected); + CHECK(result == expected); + + // Sometimes the test above passes even though it should fail, if the + // right address happens to be on the stack in the right location. This + // can happen if abi.caller.smart is run just before this test. This + // second test makes sure it fails when it should. + CHECK(result->i == 10); +} diff --git a/tests/intrusive_shared_ptr_src/abi.caller.smart.cpp b/tests/intrusive_shared_ptr_src/abi.caller.smart.cpp new file mode 100644 index 000000000..a37fe144a --- /dev/null +++ b/tests/intrusive_shared_ptr_src/abi.caller.smart.cpp @@ -0,0 +1,31 @@ +// +// This tests that we can call functions implemented using raw pointers from +// an API vending itself as returning shared pointers, because both are ABI +// compatible. +// +// In this TU, SharedPtr is intrusive_shared_ptr, since USE_SHARED_PTR +// is defined. +// + +#define USE_SHARED_PTR + +#include +#include +#include "abi_helper.h" + +static_assert(sizeof(SharedPtr) == sizeof(T*)); +static_assert(alignof(SharedPtr) == alignof(T*)); + +// Receive a shared pointer from a function that actually returns a raw pointer +T_DECL(abi_caller_smart, "intrusive_shared_ptr.abi.caller.smart") { + T obj{3}; + T* expected = &obj; + SharedPtr result = return_raw_as_shared(expected); + CHECK(result.get() == expected); + + // Sometimes the test above passes even though it should fail, if the + // right address happens to be on the stack in the right location. This + // can happen if abi.caller.raw is run just before this test. This second + // test makes sure it fails when it should. + CHECK(result->i == 3); +} diff --git a/tests/intrusive_shared_ptr_src/abi.size_alignment.cpp b/tests/intrusive_shared_ptr_src/abi.size_alignment.cpp new file mode 100644 index 000000000..def3bab59 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/abi.size_alignment.cpp @@ -0,0 +1,142 @@ +// +// This tests that the alignment and size of a class are the same whether +// they have a `T*` or a shared pointer data member. +// + +#include +#include "test_policy.h" +#include +#include + + +namespace ns1 { +struct FooShared { + test_shared_ptr ptr; +}; + +struct FooRaw { + int* ptr; +}; + +static_assert(sizeof(FooShared) == sizeof(FooRaw)); +static_assert(alignof(FooShared) == alignof(FooRaw)); +static_assert(offsetof(FooShared, ptr) == offsetof(FooRaw, ptr)); +} + +namespace ns2 { +struct FooShared { + int i; + test_shared_ptr ptr; +}; + +struct FooRaw { + int i; + int* ptr; +}; + +static_assert(sizeof(FooShared) == sizeof(FooRaw)); +static_assert(alignof(FooShared) == alignof(FooRaw)); +static_assert(offsetof(FooShared, ptr) == offsetof(FooRaw, ptr)); +} + +namespace ns3 { +struct FooShared { + char c; + test_shared_ptr ptr; + int i; +}; + +struct FooRaw { + char c; + int* ptr; + int i; +}; + +static_assert(sizeof(FooShared) == sizeof(FooRaw)); +static_assert(alignof(FooShared) == alignof(FooRaw)); +static_assert(offsetof(FooShared, ptr) == offsetof(FooRaw, ptr)); +} + +namespace ns4 { +struct FooShared { + char c; + unsigned int b : 5; + test_shared_ptr ptr; + int i; +}; + +struct FooRaw { + char c; + unsigned int b : 5; + int* ptr; + int i; +}; + +static_assert(sizeof(FooShared) == sizeof(FooRaw)); +static_assert(alignof(FooShared) == alignof(FooRaw)); +static_assert(offsetof(FooShared, ptr) == offsetof(FooRaw, ptr)); +} + +namespace ns5 { +struct __attribute__((packed)) FooShared { + char c; + unsigned int b : 5; + test_shared_ptr ptr; + int i; +}; + +struct __attribute__((packed)) FooRaw { + char c; + unsigned int b : 5; + int* ptr; + int i; +}; + +static_assert(sizeof(FooShared) == sizeof(FooRaw)); +static_assert(alignof(FooShared) == alignof(FooRaw)); +static_assert(offsetof(FooShared, ptr) == offsetof(FooRaw, ptr)); +} + +namespace ns6 { +struct FooShared { + char c; + unsigned int b : 5; + test_shared_ptr ptr; + int i __attribute__((packed)); +}; + +struct FooRaw { + char c; + unsigned int b : 5; + int* ptr; + int i __attribute__((packed)); +}; + +static_assert(sizeof(FooShared) == sizeof(FooRaw)); +static_assert(alignof(FooShared) == alignof(FooRaw)); +static_assert(offsetof(FooShared, ptr) == offsetof(FooRaw, ptr)); +} + +namespace ns7 { +struct FooShared { + char c; + unsigned int b : 5; + test_shared_ptr ptr __attribute__((packed)); + int i; +}; + +struct FooRaw { + char c; + unsigned int b : 5; + int* ptr __attribute__((packed)); + int i; +}; + +static_assert(sizeof(FooShared) == sizeof(FooRaw)); +static_assert(alignof(FooShared) == alignof(FooRaw)); +static_assert(offsetof(FooShared, ptr) == offsetof(FooRaw, ptr)); +} + +T_DECL(abi_size_alignment, "intrusive_shared_ptr.abi.size_alignment") { + T_PASS("intrusive_shared_ptr.abi.size_alignment compile-time tests passed"); +} diff --git a/tests/intrusive_shared_ptr_src/abi_helper.h b/tests/intrusive_shared_ptr_src/abi_helper.h new file mode 100644 index 000000000..bce4c6859 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/abi_helper.h @@ -0,0 +1,21 @@ +#ifndef TESTS_INTRUSIVE_SHARED_PTR_ABI_HELPER_H +#define TESTS_INTRUSIVE_SHARED_PTR_ABI_HELPER_H + +#include +#include +#include "test_policy.h" + +struct T { int i; }; + +#if defined USE_SHARED_PTR +template +using SharedPtr = libkern::intrusive_shared_ptr; +#else +template +using SharedPtr = T *; +#endif + +extern SharedPtr return_shared_as_raw(T*); +extern SharedPtr return_raw_as_shared(T*); + +#endif // !TESTS_INTRUSIVE_SHARED_PTR_ABI_HELPER_H diff --git a/tests/intrusive_shared_ptr_src/assign.copy.cpp b/tests/intrusive_shared_ptr_src/assign.copy.cpp new file mode 100644 index 000000000..7e54bbf4f --- /dev/null +++ b/tests/intrusive_shared_ptr_src/assign.copy.cpp @@ -0,0 +1,121 @@ +// +// Tests for +// template +// intrusive_shared_ptr& operator=(intrusive_shared_ptr const& other); +// +// intrusive_shared_ptr& operator=(intrusive_shared_ptr const& other); +// + +#include +#include +#include +#include "test_policy.h" + +struct Base { int i; }; +struct Derived : Base { }; + +struct Base1 { int i; }; +struct Base2 { long l; }; +struct DerivedMultiple : Base1, Base2 { + DerivedMultiple(int i) : Base1{i}, Base2{i + 10} + { + } +}; + +struct Unrelated { }; + +template +static void +tests() +{ + Stored obj1{1}; + Stored obj2{2}; + + // Copy-assign non-null to non-null + { + tracked_shared_ptr const from(&obj1, libkern::retain); + tracked_shared_ptr to(&obj2, libkern::retain); + tracking_policy::reset(); + tracked_shared_ptr& ref = (to = from); + CHECK(tracking_policy::releases == 1); + CHECK(tracking_policy::retains == 1); + CHECK(&ref == &to); + CHECK(from.get() == &obj1); + CHECK(to.get() == &obj1); + } + + // Copy-assign non-null to null + { + tracked_shared_ptr const from(&obj1, libkern::retain); + tracked_shared_ptr to = nullptr; + tracking_policy::reset(); + tracked_shared_ptr& ref = (to = from); + CHECK(tracking_policy::releases == 0); + CHECK(tracking_policy::retains == 1); + CHECK(&ref == &to); + CHECK(from.get() == &obj1); + CHECK(to.get() == &obj1); + } + + // Copy-assign null to non-null + { + tracked_shared_ptr const from = nullptr; + tracked_shared_ptr to(&obj2, libkern::retain); + tracking_policy::reset(); + tracked_shared_ptr& ref = (to = from); + CHECK(tracking_policy::releases == 1); + CHECK(tracking_policy::retains == 0); + CHECK(&ref == &to); + CHECK(from.get() == nullptr); + CHECK(to.get() == nullptr); + } + + // Copy-assign null to null + { + tracked_shared_ptr const from = nullptr; + tracked_shared_ptr to = nullptr; + tracking_policy::reset(); + tracked_shared_ptr& ref = (to = from); + CHECK(tracking_policy::releases == 0); + CHECK(tracking_policy::retains == 0); + CHECK(&ref == &to); + CHECK(from.get() == nullptr); + CHECK(to.get() == nullptr); + } +} + +T_DECL(assign_copy, "intrusive_shared_ptr.assign.copy") { + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + + tests(); + tests(); + + tests(); + tests(); + + // Make sure basic trait querying works + static_assert(std::is_copy_assignable_v >); + + // Make sure downcasts are disabled + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr const&>); + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr const&>); + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr const&>); + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr const&>); + + // Make sure const-casting away doesn't work + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr const&>); + + // Make sure casting to unrelated types doesn't work + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr const&>); + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr const&>); + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr const&>); + + // Make sure constructing with different policies doesn't work + static_assert(!std::is_assignable_v >, /*from*/ libkern::intrusive_shared_ptr > const &>); +} diff --git a/tests/intrusive_shared_ptr_src/assign.move.cpp b/tests/intrusive_shared_ptr_src/assign.move.cpp new file mode 100644 index 000000000..f2ec4b4ee --- /dev/null +++ b/tests/intrusive_shared_ptr_src/assign.move.cpp @@ -0,0 +1,122 @@ +// +// Tests for +// template +// intrusive_shared_ptr& operator=(intrusive_shared_ptr&& other); +// +// intrusive_shared_ptr& operator=(intrusive_shared_ptr&& other); +// + +#include +#include +#include +#include +#include "test_policy.h" + +struct Base { int i; }; +struct Derived : Base { }; + +struct Base1 { int i; }; +struct Base2 { long l; }; +struct DerivedMultiple : Base1, Base2 { + DerivedMultiple(int i) : Base1{i}, Base2{i + 10} + { + } +}; + +struct Unrelated { }; + +template +static void +tests() +{ + Stored obj1{1}; + Stored obj2{2}; + + // Move-assign non-null to non-null + { + tracked_shared_ptr from(&obj1, libkern::retain); + tracked_shared_ptr to(&obj2, libkern::retain); + tracking_policy::reset(); + tracked_shared_ptr& ref = (to = std::move(from)); + CHECK(tracking_policy::releases == 1); + CHECK(tracking_policy::retains == 0); + CHECK(&ref == &to); + CHECK(from.get() == nullptr); + CHECK(to.get() == &obj1); + } + + // Move-assign non-null to null + { + tracked_shared_ptr from(&obj1, libkern::retain); + tracked_shared_ptr to = nullptr; + tracking_policy::reset(); + tracked_shared_ptr& ref = (to = std::move(from)); + CHECK(tracking_policy::releases == 0); + CHECK(tracking_policy::retains == 0); + CHECK(&ref == &to); + CHECK(from.get() == nullptr); + CHECK(to.get() == &obj1); + } + + // Move-assign null to non-null + { + tracked_shared_ptr from = nullptr; + tracked_shared_ptr to(&obj2, libkern::retain); + tracking_policy::reset(); + tracked_shared_ptr& ref = (to = std::move(from)); + CHECK(tracking_policy::releases == 1); + CHECK(tracking_policy::retains == 0); + CHECK(&ref == &to); + CHECK(from.get() == nullptr); + CHECK(to.get() == nullptr); + } + + // Move-assign null to null + { + tracked_shared_ptr from = nullptr; + tracked_shared_ptr to = nullptr; + tracking_policy::reset(); + tracked_shared_ptr& ref = (to = std::move(from)); + CHECK(tracking_policy::releases == 0); + CHECK(tracking_policy::retains == 0); + CHECK(&ref == &to); + CHECK(from.get() == nullptr); + CHECK(to.get() == nullptr); + } +} + +T_DECL(assign_move, "intrusive_shared_ptr.assign.move") { + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + + tests(); + tests(); + + tests(); + tests(); + + // Make sure basic trait querying works + static_assert(std::is_move_assignable_v >); + + // Make sure downcasts are disabled + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr&&>); + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr&&>); + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr&&>); + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr&&>); + + // Make sure const-casting away doesn't work + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr&&>); + + // Make sure casting to unrelated types doesn't work + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr&&>); + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr&&>); + static_assert(!std::is_assignable_v, /*from*/ test_shared_ptr&&>); + + // Make sure constructing with different policies doesn't work + static_assert(!std::is_assignable_v >, /*from*/ libkern::intrusive_shared_ptr >&&>); +} diff --git a/tests/intrusive_shared_ptr_src/assign.nullptr.cpp b/tests/intrusive_shared_ptr_src/assign.nullptr.cpp new file mode 100644 index 000000000..55068d601 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/assign.nullptr.cpp @@ -0,0 +1,46 @@ +// +// Tests for +// intrusive_shared_ptr& operator=(std::nullptr_t); +// + +#include +#include +#include "test_policy.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + T obj{3}; + + // Assign nullptr to non-null + { + tracked_shared_ptr ptr(&obj, libkern::retain); + tracking_policy::reset(); + tracked_shared_ptr& ref = (ptr = nullptr); + CHECK(tracking_policy::releases == 1); + CHECK(tracking_policy::retains == 0); + CHECK(&ref == &ptr); + CHECK(ptr.get() == nullptr); + } + + // Assign nullptr to null + { + tracked_shared_ptr ptr = nullptr; + tracking_policy::reset(); + tracked_shared_ptr& ref = (ptr = nullptr); + CHECK(tracking_policy::releases == 0); + CHECK(tracking_policy::retains == 0); + CHECK(&ref == &ptr); + CHECK(ptr.get() == nullptr); + } +} + +T_DECL(assign_nullptr, "intrusive_shared_ptr.assign.nullptr") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/cast.const.cpp b/tests/intrusive_shared_ptr_src/cast.const.cpp new file mode 100644 index 000000000..461ef13c9 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/cast.const.cpp @@ -0,0 +1,67 @@ +// +// Tests for +// template +// intrusive_shared_ptr const_pointer_cast(intrusive_shared_ptr const& ptr) noexcept; +// +// template +// intrusive_shared_ptr const_pointer_cast(intrusive_shared_ptr&& ptr) noexcept +// + +#include +#include +#include +#include "test_policy.h" + +struct T { int i; }; + +template +static void +tests() +{ + Stored obj{3}; + + { + tracked_shared_ptr const from(&obj, libkern::no_retain); + tracking_policy::reset(); + tracked_shared_ptr to = libkern::const_pointer_cast(from); + CHECK(tracking_policy::retains == 1); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == const_cast(&obj)); + CHECK(from.get() == &obj); + } + { + tracked_shared_ptr from(&obj, libkern::no_retain); + tracking_policy::reset(); + tracked_shared_ptr to = libkern::const_pointer_cast(std::move(from)); + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == const_cast(&obj)); + CHECK(from.get() == nullptr); + } + + // Test `const_pointer_cast`ing a null pointer + { + tracked_shared_ptr const from = nullptr; + tracking_policy::reset(); + tracked_shared_ptr to = libkern::const_pointer_cast(from); + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == nullptr); + CHECK(from.get() == nullptr); + } + { + tracked_shared_ptr from = nullptr; + tracking_policy::reset(); + tracked_shared_ptr to = libkern::const_pointer_cast(std::move(from)); + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == nullptr); + CHECK(from.get() == nullptr); + } +} + +T_DECL(cast_const, "intrusive_shared_ptr.cast.const") { + tests(); + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/cast.reinterpret.cpp b/tests/intrusive_shared_ptr_src/cast.reinterpret.cpp new file mode 100644 index 000000000..74777836a --- /dev/null +++ b/tests/intrusive_shared_ptr_src/cast.reinterpret.cpp @@ -0,0 +1,76 @@ +// +// Tests for +// template +// intrusive_shared_ptr reinterpret_pointer_cast(intrusive_shared_ptr const& ptr) noexcept; +// +// template +// intrusive_shared_ptr reinterpret_pointer_cast(intrusive_shared_ptr&& ptr) noexcept +// + +#include +#include +#include +#include "test_policy.h" + +struct Base { int i; }; +struct Derived : Base { }; + +// Layout compatible with Derived +struct Unrelated { int i; }; + +template +static void +tests() +{ + Stored obj{3}; + + { + tracked_shared_ptr const from(&obj, libkern::no_retain); + tracking_policy::reset(); + tracked_shared_ptr to = libkern::reinterpret_pointer_cast(from); + CHECK(tracking_policy::retains == 1); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == reinterpret_cast(&obj)); + CHECK(from.get() == &obj); + } + { + tracked_shared_ptr from(&obj, libkern::no_retain); + tracking_policy::reset(); + tracked_shared_ptr to = libkern::reinterpret_pointer_cast(std::move(from)); + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == reinterpret_cast(&obj)); + CHECK(from.get() == nullptr); + } + + // Test `reinterpret_pointer_cast`ing a null pointer + { + tracked_shared_ptr const from = nullptr; + tracking_policy::reset(); + tracked_shared_ptr to = libkern::reinterpret_pointer_cast(from); + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == nullptr); + CHECK(from.get() == nullptr); + } + { + tracked_shared_ptr from = nullptr; + tracking_policy::reset(); + tracked_shared_ptr to = libkern::reinterpret_pointer_cast(std::move(from)); + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == nullptr); + CHECK(from.get() == nullptr); + } +} + +T_DECL(cast_reinterpret, "intrusive_shared_ptr.cast.reinterpret") { + tests(); + tests(); + + tests(); + tests(); + + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/cast.static.cpp b/tests/intrusive_shared_ptr_src/cast.static.cpp new file mode 100644 index 000000000..f6e92fd1d --- /dev/null +++ b/tests/intrusive_shared_ptr_src/cast.static.cpp @@ -0,0 +1,78 @@ +// +// Tests for +// template +// intrusive_shared_ptr static_pointer_cast(intrusive_shared_ptr const& ptr) noexcept; +// +// template +// intrusive_shared_ptr static_pointer_cast(intrusive_shared_ptr&& ptr) noexcept +// + +#include +#include +#include +#include "test_policy.h" + +struct Base { int i; }; +struct Derived : Base { }; + +struct Base1 { int i; }; +struct Base2 { long l; }; +struct DerivedMultiple : Base1, Base2 { + DerivedMultiple(int i) : Base1{i}, Base2{i + 10} + { + } +}; + +template +static void +tests() +{ + Stored obj{3}; + + { + tracked_shared_ptr const from(&obj, libkern::no_retain); + tracking_policy::reset(); + tracked_shared_ptr to = libkern::static_pointer_cast(from); + CHECK(tracking_policy::retains == 1); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == static_cast(&obj)); + CHECK(from.get() == &obj); + } + { + tracked_shared_ptr from(&obj, libkern::no_retain); + tracking_policy::reset(); + tracked_shared_ptr to = libkern::static_pointer_cast(std::move(from)); + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == static_cast(&obj)); + CHECK(from.get() == nullptr); + } + + // Test `static_pointer_cast`ing a null pointer + { + tracked_shared_ptr const from = nullptr; + tracking_policy::reset(); + tracked_shared_ptr to = libkern::static_pointer_cast(from); + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == nullptr); + CHECK(from.get() == nullptr); + } + { + tracked_shared_ptr from = nullptr; + tracking_policy::reset(); + tracked_shared_ptr to = libkern::static_pointer_cast(std::move(from)); + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(to.get() == nullptr); + CHECK(from.get() == nullptr); + } +} + +T_DECL(cast_static, "intrusive_shared_ptr.cast.static") { + tests(); + tests(); + + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/compare.equal.cpp b/tests/intrusive_shared_ptr_src/compare.equal.cpp new file mode 100644 index 000000000..fbf6e7b44 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/compare.equal.cpp @@ -0,0 +1,86 @@ +// +// Tests for +// template +// bool operator==(intrusive_shared_ptr const& x, intrusive_shared_ptr const& y); +// +// template +// bool operator!=(intrusive_shared_ptr const& x, intrusive_shared_ptr const& y); +// + +#include +#include +#include "test_policy.h" + +struct Base { int i; }; +struct Derived : Base { }; + +struct T { int i; }; + +template +static void +check_eq(T t, U u) +{ + CHECK(t == u); + CHECK(u == t); + CHECK(!(t != u)); + CHECK(!(u != t)); +} + +template +static void +check_ne(T t, U u) +{ + CHECK(!(t == u)); + CHECK(!(u == t)); + CHECK(t != u); + CHECK(u != t); +} + +template +static void +tests() +{ + T obj1{1}; + T obj2{2}; + + { + test_shared_ptr const a(&obj1, libkern::no_retain); + test_shared_ptr const b(&obj2, libkern::no_retain); + check_ne(a, b); + } + + { + test_shared_ptr const a(&obj1, libkern::no_retain); + test_shared_ptr const b(&obj1, libkern::no_retain); + check_eq(a, b); + } + + { + test_shared_ptr const a = nullptr; + test_shared_ptr const b(&obj2, libkern::no_retain); + check_ne(a, b); + } + + { + test_shared_ptr const a = nullptr; + test_shared_ptr const b = nullptr; + check_eq(a, b); + } +} + +template +static void +tests_convert() +{ + T obj{1}; + test_shared_ptr const a(&obj, libkern::no_retain); + test_shared_ptr const b(&obj, libkern::no_retain); + check_eq(a, b); +} + +T_DECL(compare_equal, "intrusive_shared_ptr.compare.equal") { + tests(); + tests(); + tests_convert(); + tests_convert(); +} diff --git a/tests/intrusive_shared_ptr_src/compare.equal.nullptr.cpp b/tests/intrusive_shared_ptr_src/compare.equal.nullptr.cpp new file mode 100644 index 000000000..94031e353 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/compare.equal.nullptr.cpp @@ -0,0 +1,62 @@ +// +// Tests for +// template +// bool operator==(intrusive_shared_ptr const& x, std::nullptr_t); +// +// template +// bool operator!=(intrusive_shared_ptr const& x, std::nullptr_t); +// +// template +// bool operator==(std::nullptr_t, intrusive_shared_ptr const& x); +// +// template +// bool operator!=(std::nullptr_t, intrusive_shared_ptr const& x); +// + +#include +#include +#include "test_policy.h" + +struct T { int i; }; + +template +static void +check_eq(T t, U u) +{ + CHECK(t == u); + CHECK(u == t); + CHECK(!(t != u)); + CHECK(!(u != t)); +} + +template +static void +check_ne(T t, U u) +{ + CHECK(!(t == u)); + CHECK(!(u == t)); + CHECK(t != u); + CHECK(u != t); +} + +template +static void +tests() +{ + T obj{3}; + + { + test_shared_ptr const a(&obj, libkern::no_retain); + check_ne(a, nullptr); + } + + { + test_shared_ptr const a = nullptr; + check_eq(a, nullptr); + } +} + +T_DECL(compare_equal_nullptr, "intrusive_shared_ptr.compare.equal.nullptr") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/compare.equal.raw.cpp b/tests/intrusive_shared_ptr_src/compare.equal.raw.cpp new file mode 100644 index 000000000..8adb74bb8 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/compare.equal.raw.cpp @@ -0,0 +1,107 @@ +// +// Tests for +// template +// bool operator==(intrusive_shared_ptr const& x, U* y); +// +// template +// bool operator!=(intrusive_shared_ptr const& x, U* y); +// +// template +// bool operator==(T* x, intrusive_shared_ptr const& y); +// +// template +// bool operator!=(T* x, intrusive_shared_ptr const& y); +// + +#include +#include +#include "test_policy.h" + +struct Base { int i; }; +struct Derived : Base { }; + +struct T { int i; }; + +template +static void +check_eq(T t, U u) +{ + CHECK(t == u); + CHECK(u == t); + CHECK(!(t != u)); + CHECK(!(u != t)); +} + +template +static void +check_ne(T t, U u) +{ + CHECK(!(t == u)); + CHECK(!(u == t)); + CHECK(t != u); + CHECK(u != t); +} + +template +static void +tests() +{ + T obj1{1}; + T obj2{2}; + + { + test_shared_ptr const a(&obj1, libkern::no_retain); + TQual* b = &obj2; + check_ne(a, b); + } + + { + test_shared_ptr const a(&obj1, libkern::no_retain); + TQual* b = &obj1; + check_eq(a, b); + } + + { + test_shared_ptr const a = nullptr; + TQual* b = &obj2; + check_ne(a, b); + } + + { + test_shared_ptr const a(&obj1, libkern::no_retain); + TQual* b = nullptr; + check_ne(a, b); + } + + { + test_shared_ptr const a = nullptr; + TQual* b = nullptr; + check_eq(a, b); + } +} + +template +static void +tests_convert() +{ + T obj{1}; + + { + test_shared_ptr const a(&obj, libkern::no_retain); + RelatedT* b = &obj; + check_eq(a, b); + } + + { + test_shared_ptr const a(&obj, libkern::no_retain); + T* b = &obj; + check_eq(a, b); + } +} + +T_DECL(compare_equal_raw, "intrusive_shared_ptr.compare.equal.raw") { + tests(); + tests(); + tests_convert(); + tests_convert(); +} diff --git a/tests/intrusive_shared_ptr_src/ctor.copy.cpp b/tests/intrusive_shared_ptr_src/ctor.copy.cpp new file mode 100644 index 000000000..ea2d66cf2 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/ctor.copy.cpp @@ -0,0 +1,101 @@ +// +// Tests for +// template +// intrusive_shared_ptr(intrusive_shared_ptr const& other); +// +// intrusive_shared_ptr(intrusive_shared_ptr const& other); +// + +#include +#include +#include +#include +#include "test_policy.h" + +struct Base { int i; }; +struct Derived : Base { }; + +struct Base1 { int i; }; +struct Base2 { long l; }; +struct DerivedMultiple : Base1, Base2 { + DerivedMultiple(int i) : Base1{i}, Base2{i + 10} + { + } +}; + +struct Unrelated { }; + +template +static void +tests() +{ + Stored obj{3}; + + // Test with non-null pointers + { + test_policy::retain_count = 0; + libkern::intrusive_shared_ptr const from(&obj, libkern::retain); + libkern::intrusive_shared_ptr to(from); // explicit + CHECK(test_policy::retain_count == 2); + CHECK(to.get() == &obj); + } + { + test_policy::retain_count = 0; + libkern::intrusive_shared_ptr const from(&obj, libkern::retain); + libkern::intrusive_shared_ptr to{from}; // explicit + CHECK(test_policy::retain_count == 2); + CHECK(to.get() == &obj); + } + { + test_policy::retain_count = 0; + libkern::intrusive_shared_ptr const from(&obj, libkern::retain); + libkern::intrusive_shared_ptr to = from; // implicit + CHECK(test_policy::retain_count == 2); + CHECK(to.get() == &obj); + } + + // Test with a null pointer + { + test_policy::retain_count = 0; + libkern::intrusive_shared_ptr const from = nullptr; + libkern::intrusive_shared_ptr to = from; + CHECK(test_policy::retain_count == 0); + CHECK(to.get() == nullptr); + } +} + +T_DECL(ctor_copy, "intrusive_shared_ptr.ctor.copy") { + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + + tests(); + tests(); + + tests(); + tests(); + + // Make sure basic trait querying works + static_assert(std::is_copy_constructible_v >); + + // Make sure downcasts are disabled + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr const&>); + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr const&>); + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr const&>); + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr const&>); + + // Make sure const-casting away doesn't work + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr const&>); + + // Make sure casting to unrelated types doesn't work + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr const&>); + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr const&>); + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr const&>); + + // Make sure constructing with different policies doesn't work + static_assert(!std::is_constructible_v >, /*from*/ libkern::intrusive_shared_ptr > const &>); +} diff --git a/tests/intrusive_shared_ptr_src/ctor.default.cpp b/tests/intrusive_shared_ptr_src/ctor.default.cpp new file mode 100644 index 000000000..613cf9c0b --- /dev/null +++ b/tests/intrusive_shared_ptr_src/ctor.default.cpp @@ -0,0 +1,37 @@ +// +// Tests for +// intrusive_shared_ptr(); +// + +#include +#include +#include +#include "test_policy.h" + +struct T { int i; }; + +template +static void +tests() +{ + { + libkern::intrusive_shared_ptr ptr; + CHECK(ptr.get() == nullptr); + } + { + libkern::intrusive_shared_ptr ptr{}; + CHECK(ptr.get() == nullptr); + } + { + libkern::intrusive_shared_ptr ptr = libkern::intrusive_shared_ptr(); + CHECK(ptr.get() == nullptr); + } + { + libkern::intrusive_shared_ptr ptr = {}; + CHECK(ptr.get() == nullptr); + } +} + +T_DECL(ctor_default, "intrusive_shared_ptr.ctor.default") { + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/ctor.move.cpp b/tests/intrusive_shared_ptr_src/ctor.move.cpp new file mode 100644 index 000000000..a97c673ab --- /dev/null +++ b/tests/intrusive_shared_ptr_src/ctor.move.cpp @@ -0,0 +1,115 @@ +// +// Tests for +// template +// intrusive_shared_ptr(intrusive_shared_ptr&& other); +// +// intrusive_shared_ptr(intrusive_shared_ptr&& other); +// + +#include +#include +#include +#include +#include +#include "test_policy.h" + +struct Base { int i; }; +struct Derived : Base { }; + +struct Base1 { int i; }; +struct Base2 { long l; }; +struct DerivedMultiple : Base1, Base2 { + DerivedMultiple(int i) : Base1{i}, Base2{i + 10} + { + } +}; + +struct Unrelated { }; + +template +static void +tests() +{ + Stored obj{3}; + + // Test with non-null pointers + { + test_policy::retain_count = 0; + libkern::intrusive_shared_ptr from(&obj, libkern::retain); + CHECK(test_policy::retain_count == 1); + CHECK(from.get() == &obj); + + libkern::intrusive_shared_ptr to(std::move(from)); // explicit + CHECK(test_policy::retain_count == 1); + CHECK(to.get() == &obj); + CHECK(from.get() == nullptr); + } + { + test_policy::retain_count = 0; + libkern::intrusive_shared_ptr from(&obj, libkern::retain); + CHECK(test_policy::retain_count == 1); + CHECK(from.get() == &obj); + + libkern::intrusive_shared_ptr to{std::move(from)}; // explicit + CHECK(test_policy::retain_count == 1); + CHECK(to.get() == &obj); + CHECK(from.get() == nullptr); + } + { + test_policy::retain_count = 0; + libkern::intrusive_shared_ptr from(&obj, libkern::retain); + CHECK(test_policy::retain_count == 1); + CHECK(from.get() == &obj); + + libkern::intrusive_shared_ptr to = std::move(from); // implicit + CHECK(test_policy::retain_count == 1); + CHECK(to.get() == &obj); + CHECK(from.get() == nullptr); + } + + // Test with a null pointer + { + test_policy::retain_count = 3; + libkern::intrusive_shared_ptr from = nullptr; + libkern::intrusive_shared_ptr to = std::move(from); + CHECK(test_policy::retain_count == 3); + CHECK(to.get() == nullptr); + CHECK(from.get() == nullptr); + } +} + +T_DECL(ctor_move, "intrusive_shared_ptr.ctor.move") { + tests(); + tests(); + tests(); + + tests(); + tests(); + tests(); + + tests(); + tests(); + + tests(); + tests(); + + // Make sure basic trait querying works + static_assert(std::is_move_constructible_v >); + + // Make sure downcasts are disabled + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr&&>); + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr&&>); + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr&&>); + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr&&>); + + // Make sure const-casting away doesn't work + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr&&>); + + // Make sure casting to unrelated types doesn't work + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr&&>); + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr&&>); + static_assert(!std::is_constructible_v, /*from*/ test_shared_ptr&&>); + + // Make sure constructing with different policies doesn't work + static_assert(!std::is_constructible_v >, /*from*/ libkern::intrusive_shared_ptr >&&>); +} diff --git a/tests/intrusive_shared_ptr_src/ctor.nullptr.cpp b/tests/intrusive_shared_ptr_src/ctor.nullptr.cpp new file mode 100644 index 000000000..acbcdcb01 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/ctor.nullptr.cpp @@ -0,0 +1,33 @@ +// +// Tests for +// intrusive_shared_ptr(nullptr_t); +// + +#include +#include +#include +#include "test_policy.h" + +struct T { int i; }; + +template +static void +tests() +{ + { + libkern::intrusive_shared_ptr ptr = nullptr; + CHECK(ptr.get() == nullptr); + } + { + libkern::intrusive_shared_ptr ptr{nullptr}; + CHECK(ptr.get() == nullptr); + } + { + libkern::intrusive_shared_ptr ptr(nullptr); + CHECK(ptr.get() == nullptr); + } +} + +T_DECL(ctor_nullptr, "intrusive_shared_ptr.ctor.nullptr") { + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/ctor.ptr.no_retain.cpp b/tests/intrusive_shared_ptr_src/ctor.ptr.no_retain.cpp new file mode 100644 index 000000000..df0894f2b --- /dev/null +++ b/tests/intrusive_shared_ptr_src/ctor.ptr.no_retain.cpp @@ -0,0 +1,36 @@ +// +// Tests for +// explicit intrusive_shared_ptr(pointer p, no_retain_t); +// + +#include +#include +#include +#include "test_policy.h" + +struct T { int i; }; + +template +static void +tests() +{ + T obj{0}; + + { + test_policy::retain_count = 0; + libkern::intrusive_shared_ptr ptr(&obj, libkern::no_retain); + CHECK(ptr.get() == &obj); + CHECK(test_policy::retain_count == 0); + } + { + test_policy::retain_count = 0; + libkern::intrusive_shared_ptr ptr{&obj, libkern::no_retain}; + CHECK(ptr.get() == &obj); + CHECK(test_policy::retain_count == 0); + } +} + +T_DECL(ctor_ptr_no_retain, "intrusive_shared_ptr.ctor.ptr.no_retain") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/ctor.ptr.retain.cpp b/tests/intrusive_shared_ptr_src/ctor.ptr.retain.cpp new file mode 100644 index 000000000..b66637835 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/ctor.ptr.retain.cpp @@ -0,0 +1,36 @@ +// +// Tests for +// explicit intrusive_shared_ptr(pointer p, retain_t); +// + +#include +#include +#include +#include "test_policy.h" + +struct T { int i; }; + +template +static void +tests() +{ + T obj{0}; + + { + test_policy::retain_count = 0; + libkern::intrusive_shared_ptr ptr(&obj, libkern::retain); + CHECK(ptr.get() == &obj); + CHECK(test_policy::retain_count == 1); + } + { + test_policy::retain_count = 0; + libkern::intrusive_shared_ptr ptr{&obj, libkern::retain}; + CHECK(ptr.get() == &obj); + CHECK(test_policy::retain_count == 1); + } +} + +T_DECL(ctor_ptr_retain, "intrusive_shared_ptr.ctor.ptr.retain") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/deref.cpp b/tests/intrusive_shared_ptr_src/deref.cpp new file mode 100644 index 000000000..19e092318 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/deref.cpp @@ -0,0 +1,38 @@ +// +// Tests for +// T& operator*() const noexcept; +// T* operator->() const noexcept; +// + +#include +#include +#include "test_policy.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + T obj{3}; + tracked_shared_ptr ptr(&obj, libkern::no_retain); + + { + T& ref = *ptr; + CHECK(&ref == &obj); + CHECK(ref.i == 3); + } + + { + int const& ref = ptr->i; + CHECK(&ref == &obj.i); + CHECK(ref == 3); + } +} + +T_DECL(deref, "intrusive_shared_ptr.deref") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/detach.cpp b/tests/intrusive_shared_ptr_src/detach.cpp new file mode 100644 index 000000000..6e3433558 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/detach.cpp @@ -0,0 +1,32 @@ +// +// Tests for +// pointer detach() noexcept; +// + +#include +#include +#include "test_policy.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + T obj{3}; + + tracking_policy::reset(); + tracked_shared_ptr ptr(&obj, libkern::retain); + T* raw = ptr.detach(); + CHECK(raw == &obj); + CHECK(ptr.get() == nullptr); // ptr was set to null + CHECK(tracking_policy::retains == 1); + CHECK(tracking_policy::releases == 0); +} + +T_DECL(detach, "intrusive_shared_ptr.detach") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/dtor.cpp b/tests/intrusive_shared_ptr_src/dtor.cpp new file mode 100644 index 000000000..dd5bc0c73 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/dtor.cpp @@ -0,0 +1,38 @@ +// +// Tests for +// ~intrusive_shared_ptr(); +// + +#include +#include +#include +#include "test_policy.h" + +struct T { int i; }; + +T_DECL(dtor, "intrusive_shared_ptr.dtor") { + // Destroy a non-null shared pointer + { + T obj{0}; + test_policy::retain_count = 3; + + { + libkern::intrusive_shared_ptr ptr(&obj, libkern::no_retain); + CHECK(test_policy::retain_count == 3); + } + + CHECK(test_policy::retain_count == 2); + } + + // Destroy a null shared pointer + { + test_policy::retain_count = 3; + + { + libkern::intrusive_shared_ptr ptr = nullptr; + CHECK(test_policy::retain_count == 3); + } + + CHECK(test_policy::retain_count == 3); // not decremented + } +} diff --git a/tests/intrusive_shared_ptr_src/get.cpp b/tests/intrusive_shared_ptr_src/get.cpp new file mode 100644 index 000000000..8b37b8e7a --- /dev/null +++ b/tests/intrusive_shared_ptr_src/get.cpp @@ -0,0 +1,50 @@ +// +// Tests for +// pointer get() const noexcept; +// + +#include +#include "test_policy.h" +#include +#include + +struct T { + int i; +}; + +template +static constexpr auto +can_call_get_on_temporary(int)->decltype(std::declval >().get(), bool ()) +{ + return true; +} + +template +static constexpr auto +can_call_get_on_temporary(...)->bool +{ + return false; +} + +template +static void +tests() +{ + { + T obj{3}; + tracking_policy::reset(); + tracked_shared_ptr const ptr(&obj, libkern::retain); + T* raw = ptr.get(); + CHECK(raw == &obj); + CHECK(ptr.get() == raw); // ptr didn't change + CHECK(tracking_policy::retains == 1); + CHECK(tracking_policy::releases == 0); + } + + static_assert(!can_call_get_on_temporary(int{}), ""); +} + +T_DECL(get, "intrusive_shared_ptr.get") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/operator.bool.cpp b/tests/intrusive_shared_ptr_src/operator.bool.cpp new file mode 100644 index 000000000..76107b746 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/operator.bool.cpp @@ -0,0 +1,45 @@ +// +// Tests for +// explicit constexpr operator bool() const noexcept; +// + +#include +#include +#include +#include "test_policy.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + T obj{3}; + + { + test_shared_ptr const ptr(&obj, libkern::no_retain); + CHECK(static_cast(ptr)); + if (ptr) { + } else { + CHECK(false); + } + } + + { + test_shared_ptr const ptr = nullptr; + CHECK(!static_cast(ptr)); + if (!ptr) { + } else { + CHECK(false); + } + } + + static_assert(!std::is_convertible_v, bool>); +} + +T_DECL(operator_bool, "intrusive_shared_ptr.operator.bool") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/reset.cpp b/tests/intrusive_shared_ptr_src/reset.cpp new file mode 100644 index 000000000..7918781cc --- /dev/null +++ b/tests/intrusive_shared_ptr_src/reset.cpp @@ -0,0 +1,57 @@ +// +// Tests for +// void reset() noexcept; +// + +#include +#include +#include "test_policy.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + T obj{3}; + + // reset() on a non-null shared pointer + { + tracked_shared_ptr ptr(&obj, libkern::retain); + tracking_policy::reset(); + ptr.reset(); + CHECK(tracking_policy::releases == 1); + CHECK(tracking_policy::retains == 0); + CHECK(ptr.get() == nullptr); + } + + // reset() on a null shared pointer + { + tracked_shared_ptr ptr = nullptr; + tracking_policy::reset(); + ptr.reset(); + CHECK(tracking_policy::releases == 0); + CHECK(tracking_policy::retains == 0); + CHECK(ptr.get() == nullptr); + } + + // reset() as a self-reference + { + tracked_shared_ptr ptr(&obj, libkern::retain); + tracked_shared_ptr ptr2(&obj, libkern::retain); + CHECK(!ptr.reset()); + + CHECK(&ptr.reset() == &ptr); + + // check short-circuiting + bool ok = (ptr.reset() && !ptr2.reset()); + CHECK(ptr2.get() != nullptr); + } +} + +T_DECL(reset, "intrusive_shared_ptr.reset") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/reset.no_retain.cpp b/tests/intrusive_shared_ptr_src/reset.no_retain.cpp new file mode 100644 index 000000000..db1af4ae9 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/reset.no_retain.cpp @@ -0,0 +1,76 @@ +// +// Tests for +// void reset(pointer p, no_retain_t) noexcept; +// + +#include +#include +#include "test_policy.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + T obj1{1}; + T obj2{2}; + + // reset() non-null shared pointer to non-null raw pointer + { + tracked_shared_ptr ptr(&obj1, libkern::retain); + tracking_policy::reset(); + ptr.reset(&obj2, libkern::no_retain); + CHECK(tracking_policy::releases == 1); + CHECK(tracking_policy::retains == 0); + CHECK(ptr.get() == &obj2); + } + + // reset() null shared pointer to non-null raw pointer + { + tracked_shared_ptr ptr = nullptr; + tracking_policy::reset(); + ptr.reset(&obj2, libkern::no_retain); + CHECK(tracking_policy::releases == 0); + CHECK(tracking_policy::retains == 0); + CHECK(ptr.get() == &obj2); + } + + // reset() non-null shared pointer to null raw pointer + { + tracked_shared_ptr ptr(&obj1, libkern::retain); + tracking_policy::reset(); + ptr.reset(nullptr, libkern::no_retain); + CHECK(tracking_policy::releases == 1); + CHECK(tracking_policy::retains == 0); + CHECK(ptr.get() == nullptr); + } + + // reset() null shared pointer to null raw pointer + { + tracked_shared_ptr ptr = nullptr; + tracking_policy::reset(); + ptr.reset(nullptr, libkern::no_retain); + CHECK(tracking_policy::releases == 0); + CHECK(tracking_policy::retains == 0); + CHECK(ptr.get() == nullptr); + } + + // reset() as a self-reference + { + tracked_shared_ptr ptr; + tracked_shared_ptr ptr2; + CHECK(ptr.reset(&obj2, libkern::no_retain)); + + // check short-circuiting + bool ok = (ptr.reset() && ptr2.reset(&obj1, libkern::no_retain)); + CHECK(ptr2.get() == nullptr); + } +} + +T_DECL(reset_no_retain, "intrusive_shared_ptr.reset.no_retain") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/reset.retain.cpp b/tests/intrusive_shared_ptr_src/reset.retain.cpp new file mode 100644 index 000000000..f71ca6c0b --- /dev/null +++ b/tests/intrusive_shared_ptr_src/reset.retain.cpp @@ -0,0 +1,89 @@ +// +// Tests for +// void reset(pointer p, retain_t) noexcept; +// + +#include +#include +#include "test_policy.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + T obj1{1}; + T obj2{2}; + + // reset() non-null shared pointer to non-null raw pointer + { + tracked_shared_ptr ptr(&obj1, libkern::retain); + tracking_policy::reset(); + ptr.reset(&obj2, libkern::retain); + CHECK(tracking_policy::releases == 1); + CHECK(tracking_policy::retains == 1); + CHECK(ptr.get() == &obj2); + } + + // reset() non-null shared pointer to null raw pointer + { + tracked_shared_ptr ptr(&obj1, libkern::retain); + tracking_policy::reset(); + ptr.reset(nullptr, libkern::retain); + CHECK(tracking_policy::releases == 1); + CHECK(tracking_policy::retains == 0); + CHECK(ptr.get() == nullptr); + } + + // reset() null shared pointer to non-null raw pointer + { + tracked_shared_ptr ptr = nullptr; + tracking_policy::reset(); + ptr.reset(&obj2, libkern::retain); + CHECK(tracking_policy::releases == 0); + CHECK(tracking_policy::retains == 1); + CHECK(ptr.get() == &obj2); + } + + // reset() null shared pointer to null raw pointer + { + tracked_shared_ptr ptr = nullptr; + tracking_policy::reset(); + ptr.reset(nullptr, libkern::retain); + CHECK(tracking_policy::releases == 0); + CHECK(tracking_policy::retains == 0); + CHECK(ptr.get() == nullptr); + } + + // self-reset() should not cause the refcount to hit 0, ever + { + tracking_policy::reset(); + tracked_shared_ptr ptr(&obj1, libkern::retain); + CHECK(tracking_policy::retains == 1); + ptr.reset(ptr.get(), libkern::retain); + CHECK(tracking_policy::retains == 2); + CHECK(tracking_policy::releases == 1); + CHECK(tracking_policy::refcount == 1); + CHECK(!tracking_policy::hit_zero); + CHECK(ptr.get() == &obj1); + } + + // reset() as a self-reference + { + tracked_shared_ptr ptr; + tracked_shared_ptr ptr2; + CHECK(ptr.reset(&obj2, libkern::retain)); + + // check short-circuiting + bool ok = (ptr.reset() && ptr2.reset(&obj1, libkern::retain)); + CHECK(ptr2.get() == nullptr); + } +} + +T_DECL(reset_retain, "intrusive_shared_ptr.reset.retain") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/swap.cpp b/tests/intrusive_shared_ptr_src/swap.cpp new file mode 100644 index 000000000..746e14681 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/swap.cpp @@ -0,0 +1,96 @@ +// +// Tests for +// void swap(intrusive_shared_ptr& a, intrusive_shared_ptr& b); +// + +#include +#include +#include "test_policy.h" + +struct T { int i; }; + +template +static void +tests() +{ + T obj1{1}; + T obj2{2}; + + // Swap non-null with non-null + { + tracked_shared_ptr a(&obj1, libkern::retain); + tracked_shared_ptr b(&obj2, libkern::retain); + T* a_raw = a.get(); + T* b_raw = b.get(); + tracking_policy::reset(); + + swap(a, b); // ADL call + + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(a.get() == b_raw); + CHECK(b.get() == a_raw); + } + + // Swap non-null with null + { + tracked_shared_ptr a(&obj1, libkern::retain); + tracked_shared_ptr b = nullptr; + T* a_raw = a.get(); + tracking_policy::reset(); + + swap(a, b); // ADL call + + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(a.get() == nullptr); + CHECK(b.get() == a_raw); + } + + // Swap null with non-null + { + tracked_shared_ptr a = nullptr; + tracked_shared_ptr b(&obj2, libkern::retain); + T* b_raw = b.get(); + tracking_policy::reset(); + + swap(a, b); // ADL call + + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(a.get() == b_raw); + CHECK(b.get() == nullptr); + } + + // Swap null with null + { + tracked_shared_ptr a = nullptr; + tracked_shared_ptr b = nullptr; + tracking_policy::reset(); + + swap(a, b); // ADL call + + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(a.get() == nullptr); + CHECK(b.get() == nullptr); + } + + // Swap with self + { + tracked_shared_ptr a(&obj1, libkern::retain); + T* a_raw = a.get(); + tracking_policy::reset(); + + swap(a, a); // ADL call + + CHECK(tracking_policy::retains == 0); + CHECK(tracking_policy::releases == 0); + CHECK(a.get() == a_raw); + } +} + +T_DECL(swap, "intrusive_shared_ptr.swap") { + tests(); + tests(); +} diff --git a/tests/intrusive_shared_ptr_src/test_policy.h b/tests/intrusive_shared_ptr_src/test_policy.h new file mode 100644 index 000000000..e1ff2b0f1 --- /dev/null +++ b/tests/intrusive_shared_ptr_src/test_policy.h @@ -0,0 +1,80 @@ +#ifndef TESTS_INTRUSIVE_SHARED_PTR_TEST_POLICY_H +#define TESTS_INTRUSIVE_SHARED_PTR_TEST_POLICY_H + +#include +#include + +struct test_policy { + static inline int retain_count = 0; + + template + static void + retain(T&) + { + ++retain_count; + } + template + static void + release(T&) + { + --retain_count; + } +}; + +struct tracking_policy { + static inline int retains = 0; + static inline int releases = 0; + static inline int refcount = 0; + static inline bool hit_zero = false; + + static void + reset() + { + retains = 0; + releases = 0; + refcount = 0; + hit_zero = false; + } + + template + static void + retain(T&) + { + ++retains; + ++refcount; + } + template + static void + release(T&) + { + ++releases; + --refcount; + if (refcount == 0) { + hit_zero = true; + } + } +}; + +template +struct dummy_policy { + template + static void + retain(T&) + { + } + template + static void + release(T&) + { + } +}; + +template +using tracked_shared_ptr = libkern::intrusive_shared_ptr; + +template +using test_shared_ptr = libkern::intrusive_shared_ptr; + +#define CHECK(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +#endif // !TESTS_INTRUSIVE_SHARED_PTR_TEST_POLICY_H diff --git a/tests/invalid_setaudit_57414044.c b/tests/invalid_setaudit_57414044.c new file mode 100644 index 000000000..dcb619ff9 --- /dev/null +++ b/tests/invalid_setaudit_57414044.c @@ -0,0 +1,46 @@ +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); + +T_DECL(invalid_setaudit_57414044, + "Verify that auditing a setaudit_addr syscall which has an invalid " + "at_type field does not panic", + T_META_CHECK_LEAKS(false)) +{ + T_SETUPBEGIN; + + int cond, ret = auditon(A_GETCOND, &cond, sizeof(cond)); + if (ret == -1 && errno == ENOSYS) { + T_SKIP("no kernel support for auditing; can't test"); + } + T_ASSERT_POSIX_SUCCESS(ret, "auditon A_GETCOND"); + if (cond != AUC_AUDITING) { + T_SKIP("auditing is not enabled; can't test"); + } + + /* set up auditing to audit `setaudit_addr` */ + auditpinfo_addr_t pinfo_addr = {.ap_pid = getpid()}; + T_ASSERT_POSIX_SUCCESS(auditon(A_GETPINFO_ADDR, &pinfo_addr, sizeof(pinfo_addr)), NULL); + auditpinfo_t pinfo = {.ap_pid = getpid(), .ap_mask = pinfo_addr.ap_mask}; + pinfo.ap_mask.am_failure |= 0x800; /* man 5 audit_class */ + T_ASSERT_POSIX_SUCCESS(auditon(A_SETPMASK, &pinfo, sizeof(pinfo)), NULL); + + T_SETUPEND; + + struct auditinfo_addr a; + memset(&a, 0, sizeof(a)); + a.ai_termid.at_type = 999; + T_ASSERT_POSIX_FAILURE(setaudit_addr(&a, sizeof(a)), EINVAL, + "setaudit_addr should fail due to invalid at_type"); +} diff --git a/tests/ioconnectasyncmethod_57641955.c b/tests/ioconnectasyncmethod_57641955.c new file mode 100644 index 000000000..a95509a3c --- /dev/null +++ b/tests/ioconnectasyncmethod_57641955.c @@ -0,0 +1,29 @@ +#include +#include +#include +#include + + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); + +T_DECL(ioconnectasyncmethod_referenceCnt, + "Test IOConnectCallAsyncMethod with referenceCnt < 1", + T_META_ASROOT(true)) +{ + io_service_t service; + io_connect_t conn; + mach_port_t wakePort; + uint64_t reference = 0; + service = IOServiceGetMatchingService(kIOMasterPortDefault, IOServiceMatching(kAppleKeyStoreServiceName)); + if (service == IO_OBJECT_NULL) { + T_SKIP("Service " kAppleKeyStoreServiceName " could not be opened. skipping test"); + } + T_ASSERT_NE(service, MACH_PORT_NULL, "got " kAppleKeyStoreServiceName " service"); + T_ASSERT_MACH_SUCCESS(IOServiceOpen(service, mach_task_self(), 0, &conn), "opened connection to service"); + T_ASSERT_MACH_SUCCESS(mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &wakePort), "allocated wake port"); + T_ASSERT_MACH_ERROR(IOConnectCallAsyncMethod(conn, 0 /* selector */, wakePort, &reference, 0 /* referenceCnt */, + NULL /* input */, 0 /* inputCnt */, NULL /* inputStruct */, 0 /* inputStructCnt */, + NULL /* output */, 0 /* outputCnt */, NULL /* outputStruct */, 0 /* outputStructCntP */), kIOReturnBadArgument, "IOConnectCallAsyncMethod should fail with kIOReturnBadArgument"); + IOServiceClose(conn); + mach_port_mod_refs(mach_task_self(), wakePort, MACH_PORT_RIGHT_RECEIVE, -1); +} diff --git a/tests/ipsec.entitlements b/tests/ipsec.entitlements new file mode 100644 index 000000000..30cb90abe --- /dev/null +++ b/tests/ipsec.entitlements @@ -0,0 +1,10 @@ + + + + + com.apple.private.nehelper.privileged + + com.apple.private.neagent + + + diff --git a/tests/ipsec.m b/tests/ipsec.m new file mode 100644 index 000000000..0b3242307 --- /dev/null +++ b/tests/ipsec.m @@ -0,0 +1,616 @@ +#import +#import +#import +#import +#import +#import +#import +#import +#import +#import + +T_GLOBAL_META( + T_META_NAMESPACE("xnu.ipsec"), + T_META_ASROOT(true), + T_META_CHECK_LEAKS(false)); + +typedef enum { + TEST_INVALID = 0, + TEST_IPSEC_IPv4_ENCAPSULATE_PANIC = 1, + TEST_IPSEC_IPv6_ENCAPSULATE_PANIC = 2, +} test_identifier; + +#define TEST_SRC_ADDRESS_IPv4 "10.0.0.2" +#define TEST_DST_ADDRESS_IPv4 "10.0.0.3" +#define TEST_IPSEC_IPv4_INTERFACE_ADDRESS "192.168.10.10" +#define TEST_IPSEC_IPv6_INTERFACE_ADDRESS "fdd3:0f89:9afd:9b9c::1234" +#define TEST_DELEGATE_IPSEC_INTERFACE_ADDRESS "192.168.20.10" +#define TEST_IPSEC_IPv4_INTERFACE_MASK "255.255.255.255" +#define TEST_IPSEC_IPv6_INTERFACE_MASK "ffff:ffff:ffff:ffff::" + +static test_identifier test_id = TEST_INVALID; +static dispatch_source_t pfkey_source = NULL; +static NEVirtualInterfaceRef ipsecInterface = NULL; +static NEVirtualInterfaceRef delegateIPsecInterface = NULL; +static int bpf_fd = -1; + +static void bpf_write(int fd); +static void pfkey_cleanup(void); +static void pfkey_process_message_test_encapsulate_panic(uint8_t **mhp, int pfkey_socket); + +static void(*const process_pfkey_message_tests[])(uint8_t * *mhp, int pfkey_socket) = +{ + NULL, + pfkey_process_message_test_encapsulate_panic, // TEST_IPSEC_IPv4_ENCAPSULATE_PANIC + pfkey_process_message_test_encapsulate_panic, // TEST_IPSEC_IPv6_ENCAPSULATE_PANIC +}; + +static void +send_pkey_add_sa(int pfkey_socket, uint32_t spi, const char *src, const char *dst, int family) +{ + uint8_t payload[MCLBYTES] __attribute__ ((aligned(32))); + bzero(payload, sizeof(payload)); + uint16_t tlen = 0; + + struct sadb_msg *msg_payload = (struct sadb_msg *)payload; + msg_payload->sadb_msg_version = PF_KEY_V2; + msg_payload->sadb_msg_type = SADB_ADD; + msg_payload->sadb_msg_errno = 0; + msg_payload->sadb_msg_satype = SADB_SATYPE_ESP; + msg_payload->sadb_msg_len = PFKEY_UNIT64(tlen); + msg_payload->sadb_msg_reserved = 0; + msg_payload->sadb_msg_seq = 0; + msg_payload->sadb_msg_pid = (u_int32_t)getpid(); + tlen += sizeof(*msg_payload); + + struct sadb_sa_2 *sa2_payload = (struct sadb_sa_2 *)(void *)(payload + tlen); + sa2_payload->sa.sadb_sa_len = PFKEY_UNIT64(sizeof(*sa2_payload)); + sa2_payload->sa.sadb_sa_exttype = SADB_EXT_SA; + sa2_payload->sa.sadb_sa_spi = htonl(spi); + sa2_payload->sa.sadb_sa_replay = 4; + sa2_payload->sa.sadb_sa_state = SADB_SASTATE_LARVAL; + sa2_payload->sa.sadb_sa_auth = SADB_X_AALG_SHA2_256; + sa2_payload->sa.sadb_sa_encrypt = SADB_X_EALG_AESCBC; + sa2_payload->sa.sadb_sa_flags |= (SADB_X_EXT_NATT | SADB_X_EXT_NATT_KEEPALIVE); + sa2_payload->sadb_sa_natt_src_port = htons(4500); + sa2_payload->sadb_sa_natt_port = 4500; + sa2_payload->sadb_sa_natt_interval = 20; + sa2_payload->sadb_sa_natt_offload_interval = 0; + tlen += sizeof(*sa2_payload); + + struct sadb_x_sa2 *sa2_x_payload = (struct sadb_x_sa2 *)(void *)(payload + tlen); + sa2_x_payload->sadb_x_sa2_len = PFKEY_UNIT64(sizeof(*sa2_x_payload)); + sa2_x_payload->sadb_x_sa2_exttype = SADB_X_EXT_SA2; + sa2_x_payload->sadb_x_sa2_mode = IPSEC_MODE_TUNNEL; + sa2_x_payload->sadb_x_sa2_reqid = 0; + tlen += sizeof(*sa2_x_payload); + + uint8_t prefixlen = (family == AF_INET) ? (sizeof(struct in_addr) << 3) : (sizeof(struct in6_addr) << 3); + + struct sadb_address *src_address_payload = (struct sadb_address *)(void *)(payload + tlen); + src_address_payload->sadb_address_exttype = SADB_EXT_ADDRESS_SRC & 0xffff; + src_address_payload->sadb_address_proto = IPSEC_ULPROTO_ANY & 0xff; + src_address_payload->sadb_address_prefixlen = prefixlen; + src_address_payload->sadb_address_reserved = 0; + tlen += sizeof(*src_address_payload); + + if (family == AF_INET) { + struct sockaddr_in *src4 = (struct sockaddr_in *)(void *)(payload + tlen); + T_QUIET; T_ASSERT_EQ_INT(inet_pton(AF_INET, src, &src4->sin_addr), 1, "src address fail"); + src4->sin_family = AF_INET; + src4->sin_len = sizeof(*src4); + uint16_t len = sizeof(*src_address_payload) + PFKEY_ALIGN8(src4->sin_len); + src_address_payload->sadb_address_len = PFKEY_UNIT64(len); + tlen += PFKEY_ALIGN8(src4->sin_len); + } else { + struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)(void *)(payload + tlen); + T_QUIET; T_ASSERT_EQ_INT(inet_pton(AF_INET6, src, &src6->sin6_addr), 1, "src address fail"); + src6->sin6_family = AF_INET6; + src6->sin6_len = sizeof(*src6); + uint16_t len = sizeof(*src_address_payload) + PFKEY_ALIGN8(src6->sin6_len); + src_address_payload->sadb_address_len = PFKEY_UNIT64(len); + tlen += PFKEY_ALIGN8(src6->sin6_len); + } + + struct sadb_address *dst_address_payload = (struct sadb_address *)(void *)(payload + tlen); + dst_address_payload->sadb_address_exttype = SADB_EXT_ADDRESS_DST & 0xffff; + dst_address_payload->sadb_address_proto = IPSEC_ULPROTO_ANY & 0xff; + dst_address_payload->sadb_address_prefixlen = prefixlen; + dst_address_payload->sadb_address_reserved = 0; + tlen += sizeof(*dst_address_payload); + + if (family == AF_INET) { + struct sockaddr_in *dst4 = (struct sockaddr_in *)(void *)(payload + tlen); + T_QUIET; T_ASSERT_EQ_INT(inet_pton(AF_INET, dst, &dst4->sin_addr), 1, "dst address fail"); + dst4->sin_family = AF_INET; + dst4->sin_len = sizeof(*dst4); + uint16_t len = sizeof(*dst_address_payload) + PFKEY_ALIGN8(dst4->sin_len); + dst_address_payload->sadb_address_len = PFKEY_UNIT64(len); + tlen += PFKEY_ALIGN8(dst4->sin_len); + } else { + struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)(void *)(payload + tlen); + T_QUIET; T_ASSERT_EQ_INT(inet_pton(AF_INET6, dst, &dst6->sin6_addr), 1, "dst address fail"); + dst6->sin6_family = AF_INET6; + dst6->sin6_len = sizeof(*dst6); + uint16_t len = sizeof(*dst_address_payload) + PFKEY_ALIGN8(dst6->sin6_len); + dst_address_payload->sadb_address_len = PFKEY_UNIT64(len); + tlen += PFKEY_ALIGN8(dst6->sin6_len); + } + + CFStringRef ipsecIfName = NEVirtualInterfaceCopyName(ipsecInterface); + T_QUIET; T_ASSERT_NOTNULL(ipsecIfName, "failed to get ipsec interface name"); + char ifname[IFNAMSIZ]; + CFStringGetCString(ipsecIfName, ifname, IFNAMSIZ, kCFStringEncodingUTF8); + + CFStringRef delegateIPsecIfName = NEVirtualInterfaceCopyName(delegateIPsecInterface); + T_QUIET; T_ASSERT_NOTNULL(delegateIPsecIfName, "failed to get delegate ipsec interface name"); + char delegateIfname[IFNAMSIZ]; + CFStringGetCString(delegateIPsecIfName, delegateIfname, IFNAMSIZ, kCFStringEncodingUTF8); + + struct sadb_x_ipsecif *ipsec_if_payload = (struct sadb_x_ipsecif *)(void *)(payload + tlen); + ipsec_if_payload->sadb_x_ipsecif_len = PFKEY_UNIT64(sizeof(*ipsec_if_payload)); + ipsec_if_payload->sadb_x_ipsecif_exttype = SADB_X_EXT_IPSECIF; + strncpy(ipsec_if_payload->sadb_x_ipsecif_ipsec_if, ifname, strlen(ifname)); + strncpy(ipsec_if_payload->sadb_x_ipsecif_outgoing_if, delegateIfname, strlen(delegateIfname)); + tlen += sizeof(*ipsec_if_payload); + + struct sadb_key *encrypt_key_payload = (struct sadb_key *)(void *)(payload + tlen); + uint16_t len = sizeof(*encrypt_key_payload) + PFKEY_ALIGN8(32); + encrypt_key_payload->sadb_key_len = PFKEY_UNIT64(len); + encrypt_key_payload->sadb_key_exttype = SADB_EXT_KEY_ENCRYPT; + encrypt_key_payload->sadb_key_bits = (uint16_t)(32 << 3); + encrypt_key_payload->sadb_key_reserved = 0; + tlen += sizeof(*encrypt_key_payload); + arc4random_buf(payload + tlen, 32); + tlen += PFKEY_ALIGN8(32); + + struct sadb_key *auth_key_payload = (struct sadb_key *)(void *)(payload + tlen); + len = sizeof(*auth_key_payload) + PFKEY_ALIGN8(32); + auth_key_payload->sadb_key_len = PFKEY_UNIT64(len); + auth_key_payload->sadb_key_exttype = SADB_EXT_KEY_AUTH; + auth_key_payload->sadb_key_bits = (uint16_t)(32 << 3); + auth_key_payload->sadb_key_reserved = 0; + tlen += sizeof(*auth_key_payload); + arc4random_buf(payload + tlen, 32); + tlen += PFKEY_ALIGN8(32); + + struct sadb_lifetime *hard_lifetime_payload = (struct sadb_lifetime *)(void *)(payload + tlen); + hard_lifetime_payload->sadb_lifetime_len = PFKEY_UNIT64(sizeof(*hard_lifetime_payload)); + hard_lifetime_payload->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD; + tlen += sizeof(*hard_lifetime_payload); + + struct sadb_lifetime *soft_lifetime_payload = (struct sadb_lifetime *)(void *)(payload + tlen); + soft_lifetime_payload->sadb_lifetime_len = PFKEY_UNIT64(sizeof(*soft_lifetime_payload)); + soft_lifetime_payload->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT; + tlen += sizeof(*soft_lifetime_payload); + + // Update the total length + msg_payload->sadb_msg_len = PFKEY_UNIT64(tlen); + T_QUIET; T_ASSERT_POSIX_SUCCESS(send(pfkey_socket, payload, (size_t)PFKEY_UNUNIT64(msg_payload->sadb_msg_len), 0), "pfkey send update sa"); +} + +static void +send_pfkey_flush_sa(int pfkey_socket) +{ + uint8_t payload[MCLBYTES] __attribute__ ((aligned(32))); + bzero(payload, sizeof(payload)); + uint16_t tlen = 0; + + struct sadb_msg *msg_payload = (struct sadb_msg *)payload; + msg_payload->sadb_msg_version = PF_KEY_V2; + msg_payload->sadb_msg_type = SADB_FLUSH; + msg_payload->sadb_msg_errno = 0; + msg_payload->sadb_msg_satype = SADB_SATYPE_UNSPEC; + msg_payload->sadb_msg_len = PFKEY_UNIT64(tlen); + msg_payload->sadb_msg_reserved = 0; + msg_payload->sadb_msg_seq = 0; + msg_payload->sadb_msg_pid = (u_int32_t)getpid(); + tlen += sizeof(*msg_payload); + + // Update the total length + msg_payload->sadb_msg_len = PFKEY_UNIT64(tlen); + T_QUIET; T_ASSERT_POSIX_SUCCESS(send(pfkey_socket, payload, (size_t)PFKEY_UNUNIT64(msg_payload->sadb_msg_len), 0), "pfkey flush sa"); +} + +static void +pfkey_cleanup(void) +{ + if (pfkey_source != NULL) { + int pfkey_socket = (int)dispatch_source_get_handle(pfkey_source); + if (pfkey_socket > 0) { + send_pfkey_flush_sa(pfkey_socket); + } + dispatch_source_cancel(pfkey_source); + pfkey_source = NULL; + } +} + +static void +pfkey_align(struct sadb_msg *msg, uint8_t **mhp) +{ + struct sadb_ext *ext; + int i; + uint8_t *p; + uint8_t *ep; /* XXX should be passed from upper layer */ + + /* validity check */ + T_QUIET; T_ASSERT_NOTNULL(msg, "pfkey align msg"); + T_QUIET; T_ASSERT_NOTNULL(mhp, "pfkey align mhp"); + + /* initialize */ + for (i = 0; i < SADB_EXT_MAX + 1; i++) { + mhp[i] = NULL; + } + + mhp[0] = (void *)msg; + + /* initialize */ + p = (void *) msg; + ep = p + PFKEY_UNUNIT64(msg->sadb_msg_len); + + /* skip base header */ + p += sizeof(struct sadb_msg); + + while (p < ep) { + ext = (void *)p; + T_QUIET; T_ASSERT_GE_PTR((void *)ep, (void *)(p + sizeof(*ext)), "pfkey extension header beyond end of buffer"); + T_QUIET; T_ASSERT_GE_ULONG((unsigned long)PFKEY_EXTLEN(ext), sizeof(*ext), "pfkey extension shorter than extension header"); + T_QUIET; T_ASSERT_GE_PTR((void *)ep, (void *)(p + PFKEY_EXTLEN(ext)), "pfkey extension length beyond end of buffer"); + + T_QUIET; T_EXPECT_NULL(mhp[ext->sadb_ext_type], "duplicate extension type %u payload", ext->sadb_ext_type); + + /* set pointer */ + switch (ext->sadb_ext_type) { + case SADB_EXT_SA: + case SADB_EXT_LIFETIME_CURRENT: + case SADB_EXT_LIFETIME_HARD: + case SADB_EXT_LIFETIME_SOFT: + case SADB_EXT_ADDRESS_SRC: + case SADB_EXT_ADDRESS_DST: + case SADB_EXT_ADDRESS_PROXY: + case SADB_EXT_KEY_AUTH: + /* XXX should to be check weak keys. */ + case SADB_EXT_KEY_ENCRYPT: + /* XXX should to be check weak keys. */ + case SADB_EXT_IDENTITY_SRC: + case SADB_EXT_IDENTITY_DST: + case SADB_EXT_SENSITIVITY: + case SADB_EXT_PROPOSAL: + case SADB_EXT_SUPPORTED_AUTH: + case SADB_EXT_SUPPORTED_ENCRYPT: + case SADB_EXT_SPIRANGE: + case SADB_X_EXT_POLICY: + case SADB_X_EXT_SA2: + case SADB_EXT_SESSION_ID: + case SADB_EXT_SASTAT: +#ifdef SADB_X_EXT_NAT_T_TYPE + case SADB_X_EXT_NAT_T_TYPE: + case SADB_X_EXT_NAT_T_SPORT: + case SADB_X_EXT_NAT_T_DPORT: + case SADB_X_EXT_NAT_T_OA: +#endif +#ifdef SADB_X_EXT_TAG + case SADB_X_EXT_TAG: +#endif +#ifdef SADB_X_EXT_PACKET + case SADB_X_EXT_PACKET: +#endif + case SADB_X_EXT_IPSECIF: + case SADB_X_EXT_ADDR_RANGE_SRC_START: + case SADB_X_EXT_ADDR_RANGE_SRC_END: + case SADB_X_EXT_ADDR_RANGE_DST_START: + case SADB_X_EXT_ADDR_RANGE_DST_END: +#ifdef SADB_MIGRATE + case SADB_EXT_MIGRATE_ADDRESS_SRC: + case SADB_EXT_MIGRATE_ADDRESS_DST: + case SADB_X_EXT_MIGRATE_IPSECIF: +#endif + mhp[ext->sadb_ext_type] = (void *)ext; + break; + default: + T_FAIL("bad extension type %u", ext->sadb_ext_type); + T_END; + } + + p += PFKEY_EXTLEN(ext); + } + + T_QUIET; T_EXPECT_EQ_PTR((void *)ep, (void *)p, "invalid pfkey message length"); + return; +} + +static void +pfkey_process_message_test_encapsulate_panic(uint8_t **mhp, __unused int pfkey_socket) +{ + struct sadb_msg *message = (struct sadb_msg *)(void *)mhp[0]; + static uint32_t spi = 0; + static uint8_t added_sa_counter = 0; + + if (message->sadb_msg_pid != (uint32_t)getpid()) { + return; + } + + if (message->sadb_msg_errno != 0) { + T_FAIL("SADB add SA received error %d", message->sadb_msg_errno); + T_END; + } + + switch (message->sadb_msg_type) { + case SADB_ADD: + { + struct sadb_sa *sa_message = (struct sadb_sa *)(void *)mhp[SADB_EXT_SA]; + T_QUIET; T_ASSERT_NOTNULL(sa_message, "add sa message is NULL"); + spi = ntohl(sa_message->sadb_sa_spi); + T_LOG("added sa 0x%x", spi); + added_sa_counter++; + if (added_sa_counter == 2) { + bpf_write(bpf_fd); + } + break; + } + case SADB_FLUSH: + case SADB_X_SPDFLUSH: + break; + default: + T_FAIL("bad SADB message type %u", message->sadb_msg_type); + T_END; + } + return; +} + +static void +recv_pfkey_message(int pfkey_socket) +{ + uint8_t buffer[8192] __attribute__((aligned(4))); + struct iovec iovecs[1] = { + { buffer, sizeof(buffer) }, + }; + struct msghdr msg = { + NULL, + 0, + iovecs, + sizeof(iovecs) / sizeof(iovecs[0]), + NULL, + 0, + 0, + }; + + do { + ssize_t result = -1; + memset(buffer, 0, sizeof(buffer)); + T_QUIET; T_ASSERT_POSIX_SUCCESS(result = recvmsg(pfkey_socket, &msg, 0), NULL); + + if (result > 0) { + T_QUIET; T_ASSERT_GE_ULONG((size_t)result, sizeof(struct sadb_msg), "Invalid PFKey message size: %zu", result); + struct sadb_msg *hdr = (struct sadb_msg *)buffer; + uint8_t *mhp[SADB_EXT_MAX + 1]; + pfkey_align(hdr, mhp); + (*process_pfkey_message_tests[test_id])(mhp, pfkey_socket); + } else if (result == 0) { + T_LOG("PFKey socket received EOF"); + break; + } + } while (1); +} + +static int +pfkey_setup_socket(void) +{ + int pfkey_socket = -1; + int bufsiz = 0; + const unsigned long newbufk = 1536; + unsigned long oldmax; + size_t oldmaxsize = sizeof(oldmax); + unsigned long newmax = newbufk * (1024 + 128); + + T_QUIET; T_ASSERT_POSIX_SUCCESS(pfkey_socket = socket(PF_KEY, SOCK_RAW, PF_KEY_V2), NULL); + + if (sysctlbyname("kern.ipc.maxsockbuf", &oldmax, &oldmaxsize, &newmax, sizeof(newmax)) != 0) { + bufsiz = 233016; /* Max allowed by default */ + } else { + bufsiz = newbufk * 1024; + } + + T_QUIET; T_ASSERT_POSIX_SUCCESS(setsockopt(pfkey_socket, SOL_SOCKET, SO_SNDBUF, &bufsiz, sizeof(bufsiz)), "pfkey set snd socket buf failed %d", bufsiz); + T_QUIET; T_ASSERT_POSIX_SUCCESS(setsockopt(pfkey_socket, SOL_SOCKET, SO_RCVBUF, &bufsiz, sizeof(bufsiz)), "pfkey set recv socket buf failed %d", bufsiz); + + pfkey_source = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)pfkey_socket, 0, dispatch_get_main_queue()); + T_QUIET; T_ASSERT_NOTNULL(pfkey_source, "dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, ...)"); + dispatch_source_set_event_handler(pfkey_source, ^{ + recv_pfkey_message(pfkey_socket); + }); + dispatch_source_set_cancel_handler(pfkey_source, ^{ + close(pfkey_socket); + }); + dispatch_resume(pfkey_source); + return pfkey_socket; +} + +static int +bpf_new(void) +{ + char bpfdev[256]; + int i; + int fd = -1; + + for (i = 0; true; i++) { + snprintf(bpfdev, sizeof(bpfdev), "/dev/bpf%d", i); + fd = open(bpfdev, O_RDWR, 0); + if (fd >= 0) { + break; + } + if (errno != EBUSY) { + break; + } + } + return fd; +} + +static int +bpf_setif(int fd, const char *en_name) +{ + struct ifreq ifr; + + strlcpy(ifr.ifr_name, en_name, sizeof(ifr.ifr_name)); + return ioctl(fd, BIOCSETIF, &ifr); +} + +static int +bpf_sethdr_complete(int fd) +{ + u_int8_t hdr_complete = 1; + return ioctl(fd, BIOCSHDRCMPLT, &hdr_complete); +} + +static void +bpf_write(int fd) +{ + if (test_id == TEST_IPSEC_IPv4_ENCAPSULATE_PANIC) { + char buffer[500]; + struct ip *ipheader = (void *)buffer; + ipheader->ip_v = IPVERSION; + ipheader->ip_hl = (sizeof(struct ip) - 4) >> 2; + ipheader->ip_ttl = MAXTTL; + ipheader->ip_p = IPPROTO_UDP; + T_QUIET; T_ASSERT_POSIX_SUCCESS(write(fd, buffer, 500), "bpf write call failed"); + T_PASS("wrote bad ip header successfully"); + T_END; + } else if (test_id == TEST_IPSEC_IPv6_ENCAPSULATE_PANIC) { + struct ip6_hdr ip6 = {0}; + ip6.ip6_vfc |= IPV6_VERSION; + T_QUIET; T_ASSERT_POSIX_SUCCESS(write(fd, &ip6, sizeof(ip6) - 4), "bpf write call failed"); + T_PASS("wrote bad ipv6 header successfully"); + T_END; + } +} + +static void +bpf_socket_setup(void) +{ + int status = -1; + + bpf_fd = bpf_new(); + T_QUIET; T_ASSERT_NE(bpf_fd, -1, "failed to create bpf file descriptor"); + + CFStringRef ipsecIfName = NEVirtualInterfaceCopyName(ipsecInterface); + T_QUIET; T_ASSERT_NOTNULL(ipsecIfName, "failed to get ipsec interface name"); + + char ifname[IFNAMSIZ]; + CFStringGetCString(ipsecIfName, ifname, IFNAMSIZ, kCFStringEncodingUTF8); + + status = bpf_setif(bpf_fd, ifname); + T_QUIET; T_ASSERT_NE(status, -1, "failed to set bpf interface"); + + status = bpf_sethdr_complete(bpf_fd); + T_QUIET; T_ASSERT_NE(status, -1, "failed to set bpf header complete"); +} + +static NEVirtualInterfaceRef +ipsec_interface_setup(CFStringRef interfaceAddress, CFStringRef interfaceMask) +{ + Boolean status = FALSE; + + NEVirtualInterfaceRef interface = NEVirtualInterfaceCreate(NULL, kNEVirtualInterfaceValTypeIPSec, dispatch_get_main_queue(), NULL); + T_QUIET; T_ASSERT_NOTNULL(interface, "ipsec interface creation failed"); + status = NEVirtualInterfaceSetMTU(interface, 1400); + if (status == FALSE) { + T_FAIL("Failed to set MTU on ipsec interface"); + T_END; + } + + status = NEVirtualInterfaceAddAddress(interface, interfaceAddress, interfaceMask); + if (status == FALSE) { + T_FAIL("Failed to set address on ipsec interface"); + T_END; + } + + CFStringRef ipsecIfName = NEVirtualInterfaceCopyName(interface); + T_QUIET; T_ASSERT_NOTNULL(ipsecIfName, "failed to get ipsec interface name"); + + char ifname[IFNAMSIZ]; + CFStringGetCString(ipsecIfName, ifname, IFNAMSIZ, kCFStringEncodingUTF8); + + T_LOG("%s interface setup", ifname); + return interface; +} + +static void +ipsec_interface_set_delegate(NEVirtualInterfaceRef interface, CFStringRef delegateInterfaceName) +{ + Boolean status = NEVirtualInterfaceSetDelegateInterface(interface, delegateInterfaceName); + if (status == FALSE) { + T_FAIL("Failed to set delegate on ipsec interface"); + T_END; + } + + return; +} + +static void +ipsec_cleanup(void) +{ + pfkey_cleanup(); + + if (ipsecInterface != NULL) { + NEVirtualInterfaceInvalidate(ipsecInterface); + ipsecInterface = NULL; + } + + if (delegateIPsecInterface != NULL) { + NEVirtualInterfaceInvalidate(delegateIPsecInterface); + delegateIPsecInterface = NULL; + } + + if (bpf_fd != -1) { + close(bpf_fd); + bpf_fd = -1; + } +} + +T_DECL(ipsec_ipv4_encapsulate_panic_63139357, "ipsec: outer ip header length less than 20") +{ + test_id = TEST_IPSEC_IPv4_ENCAPSULATE_PANIC; + + T_ATEND(ipsec_cleanup); + + ipsecInterface = ipsec_interface_setup(CFSTR(TEST_IPSEC_IPv4_INTERFACE_ADDRESS), CFSTR(TEST_IPSEC_IPv4_INTERFACE_MASK)); + delegateIPsecInterface = ipsec_interface_setup(CFSTR(TEST_DELEGATE_IPSEC_INTERFACE_ADDRESS), CFSTR(TEST_IPSEC_IPv4_INTERFACE_MASK)); + + CFStringRef delegateIPsecIfName = NEVirtualInterfaceCopyName(delegateIPsecInterface); + T_QUIET; T_ASSERT_NOTNULL(delegateIPsecIfName, "failed to get ipsec interface name"); + ipsec_interface_set_delegate(ipsecInterface, delegateIPsecIfName); + + bpf_socket_setup(); + + int pfkey_socket = pfkey_setup_socket(); + send_pfkey_flush_sa(pfkey_socket); + + send_pkey_add_sa(pfkey_socket, 0x12345678, TEST_SRC_ADDRESS_IPv4, TEST_DST_ADDRESS_IPv4, AF_INET); + send_pkey_add_sa(pfkey_socket, 0x23456789, TEST_SRC_ADDRESS_IPv4, TEST_DST_ADDRESS_IPv4, AF_INET); + + dispatch_main(); +} + +T_DECL(ipsec_ipv6_encapsulate_panic_63139357, "ipsec: payload less than IPv6 header") +{ + test_id = TEST_IPSEC_IPv6_ENCAPSULATE_PANIC; + + T_ATEND(ipsec_cleanup); + + ipsecInterface = ipsec_interface_setup(CFSTR(TEST_IPSEC_IPv6_INTERFACE_ADDRESS), CFSTR(TEST_IPSEC_IPv6_INTERFACE_MASK)); + delegateIPsecInterface = ipsec_interface_setup(CFSTR(TEST_DELEGATE_IPSEC_INTERFACE_ADDRESS), CFSTR(TEST_IPSEC_IPv4_INTERFACE_MASK)); + + CFStringRef delegateIPsecIfName = NEVirtualInterfaceCopyName(delegateIPsecInterface); + T_QUIET; T_ASSERT_NOTNULL(delegateIPsecIfName, "failed to get ipsec interface name"); + ipsec_interface_set_delegate(ipsecInterface, delegateIPsecIfName); + + bpf_socket_setup(); + + int pfkey_socket = pfkey_setup_socket(); + send_pfkey_flush_sa(pfkey_socket); + + send_pkey_add_sa(pfkey_socket, 0x12345678, TEST_SRC_ADDRESS_IPv4, TEST_DST_ADDRESS_IPv4, AF_INET); + send_pkey_add_sa(pfkey_socket, 0x23456789, TEST_SRC_ADDRESS_IPv4, TEST_DST_ADDRESS_IPv4, AF_INET); + + dispatch_main(); +} diff --git a/tests/jitbox-entitlements.plist b/tests/jitbox-entitlements.plist new file mode 100644 index 000000000..2703dcd8b --- /dev/null +++ b/tests/jitbox-entitlements.plist @@ -0,0 +1,9 @@ + + + + + dynamic-codesigning + + + + diff --git a/tests/jumbo_va_spaces_28530648.c b/tests/jumbo_va_spaces_28530648.c index 33f9faa24..2fac86108 100644 --- a/tests/jumbo_va_spaces_28530648.c +++ b/tests/jumbo_va_spaces_28530648.c @@ -15,15 +15,13 @@ T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); * This test expects the entitlement to be the enabling factor for a process to * allocate at least this many GB of VA space. i.e. with the entitlement, n GB * must be allocatable; whereas without it, it must be less. + * This value was determined experimentally to fit on applicable devices and to + * be clearly distinguishable from the default VA limit. */ -#define ALLOC_TEST_GB 54 +#define ALLOC_TEST_GB 53 -#if defined(ENTITLED) -T_DECL(jumbo_va_spaces_28530648, -#else -T_DECL(jumbo_va_spaces_28530648_unentitled, -#endif - "Verify that the \"dynamic-codesigning\" entitlement is required to utilize an extra-large " +T_DECL(TESTNAME, + "Verify that a required entitlement is present in order to be granted an extra-large " "VA space on arm64", T_META_NAMESPACE("xnu.vm"), T_META_CHECK_LEAKS(false)) diff --git a/tests/jumbo_va_spaces_52551256.entitlements b/tests/jumbo_va_spaces_52551256.entitlements new file mode 100644 index 000000000..c2fa86359 --- /dev/null +++ b/tests/jumbo_va_spaces_52551256.entitlements @@ -0,0 +1,9 @@ + + + + + + com.apple.developer.kernel.extended-virtual-addressing + + + diff --git a/tests/kas_info.c b/tests/kas_info.c new file mode 100644 index 000000000..82ce529e3 --- /dev/null +++ b/tests/kas_info.c @@ -0,0 +1,255 @@ +/* Copyright (c) 2020 Apple Computer, Inc. All rights reserved. */ + +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include + +#include + +#include + +T_GLOBAL_META( + T_META_NAMESPACE("xnu.kas_info"), + T_META_CHECK_LEAKS(false), + T_META_ASROOT(true)); + +static bool +slide_enabled(void) +{ + int slide_enabled, err; + size_t size = sizeof(slide_enabled); + err = sysctlbyname("kern.slide", &slide_enabled, &size, NULL, 0); + T_ASSERT_POSIX_SUCCESS(err, "sysctl(\"kern.slide\");"); + return slide_enabled != 0; +} + +static uint64_t +kernel_slide(void) +{ + uint64_t slide; + size_t size = sizeof(slide); + int err = kas_info(KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR, &slide, &size); + if (err && errno == ENOTSUP) { + T_SKIP("Running on kernel without kas_info"); + } + + T_ASSERT_POSIX_SUCCESS(errno, "kas_info KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR"); + T_ASSERT_EQ(size, sizeof(slide), "returned size is valid"); + + return slide; +} + +T_DECL(kernel_text_slide, + "ensures that kas_info can return the kernel text slide") +{ + if (!slide_enabled()) { + T_SKIP("KASLR is not enabled"); + __builtin_unreachable(); + } + + uint64_t slide = kernel_slide(); + + T_ASSERT_GT_ULLONG(slide, 0ULL, "kernel slide is non-zero"); +} + +T_DECL(kernel_text_slide_invalid, + "ensures that kas_info handles invalid input to KERNEL_TEXT_SLIDE_SELECTOR") +{ + uint64_t slide; + size_t size = 0; + int err; + + err = kas_info(KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR, &slide, NULL); + if (errno == ENOTSUP) { + T_SKIP("Running on kernel without kas_info"); + } + T_ASSERT_POSIX_FAILURE(err, EFAULT, "kas_info with NULL size"); + + size = sizeof(uint64_t); + err = kas_info(KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR, NULL, &size); + T_ASSERT_POSIX_FAILURE(err, EFAULT, "kas_info with NULL slide"); + + size = sizeof(uint32_t); + err = kas_info(KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR, &slide, &size); + T_ASSERT_POSIX_FAILURE(err, EINVAL, "kas_info with invalid size"); +} + +static char const* +kernel_path(void) +{ + static CSSymbolicatorRef symbolicator; + static char const* path; + static dispatch_once_t once; + dispatch_once(&once, ^{ + uint32_t flags = kCSSymbolicatorDefaultCreateFlags; + symbolicator = CSSymbolicatorCreateWithMachKernelFlagsAndNotification(flags, NULL); + T_QUIET; T_ASSERT_TRUE(!CSIsNull(symbolicator), "CSSymbolicatorCreateWithMachKernelFlagsAndNotification"); + path = CSSymbolOwnerGetPath(CSSymbolicatorGetAOutSymbolOwner(symbolicator)); + if (!path) { + path = CSSymbolOwnerGetPath(CSSymbolicatorGetSymbolOwner(symbolicator)); + } + T_QUIET; T_ASSERT_NOTNULL(path, "CSSymbolOwnerGetPath/CSSymbolicatorGetSymbolOwner"); + }); + return path; +} + +static void +disk_kernel_segments(uint64_t **segs_out, size_t *nsegs_out) +{ + char const* path = kernel_path(); + int fd = open(path, O_RDONLY); + int err; + struct stat sb; + size_t nsegs = 0; + uint64_t *segs = NULL; + void *data; + + T_LOG("Kernel file is %s", path); + T_QUIET; T_ASSERT_POSIX_SUCCESS(fd, "open kernel file"); + + err = fstat(fd, &sb); + T_ASSERT_POSIX_SUCCESS(err, "fstat kernel file"); + + data = mmap(NULL, (size_t)sb.st_size, PROT_READ, MAP_SHARED, fd, 0); + T_ASSERT_NE(data, MAP_FAILED, "mmap kernel file"); + + /* + * TODO: If we bring back FAT kernel binaries + * this will need to be fixed to handle them properly + */ + uint32_t magic = *(uint32_t*)data; + struct load_command *cmd = NULL; + + switch (magic) { + case MH_MAGIC: OS_FALLTHROUGH; + case MH_CIGAM: { + struct mach_header *mh = (struct mach_header *)data; + cmd = (struct load_command *)(&(mh[1])); + nsegs = mh->ncmds; + } + break; + case MH_MAGIC_64: OS_FALLTHROUGH; + case MH_CIGAM_64: { + struct mach_header_64 *mh = (struct mach_header_64 *)data; + cmd = (struct load_command *)(&(mh[1])); + nsegs = mh->ncmds; + } + break; + default: + T_FAIL("kernel file is not a Mach-O file, magic is %x", magic); + } + + /* Adjust for the LC_UUID && LC_BUILD_VERSION commands in front of + * load commands for dSYMs + */ + while (cmd->cmd != LC_SEGMENT && cmd->cmd != LC_SEGMENT_64) { + cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize); + nsegs--; + } + + segs = calloc(nsegs, sizeof(*segs)); + T_ASSERT_NOTNULL(segs, "calloc disk segment array"); + + for (uint8_t i = 0; i < nsegs; i++) { + if (cmd->cmd == LC_SEGMENT) { + struct segment_command *sg = (struct segment_command *) cmd; + if (sg->vmsize > 0) { + segs[i] = sg->vmaddr; + } + } else if (cmd->cmd == LC_SEGMENT_64) { + struct segment_command_64 *sg = (struct segment_command_64 *) cmd; + if (sg->vmsize > 0) { + segs[i] = sg->vmaddr; + } + } + cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize); + } + + *segs_out = segs; + *nsegs_out = nsegs; + + err = munmap(data, (size_t)sb.st_size); + + err = close(fd); + T_ASSERT_POSIX_SUCCESS(err, "close kernel fd"); +} + +static bool +is_fileset_kc(void) +{ + char uuid[1024]; + int err; + size_t size = sizeof(uuid); + err = sysctlbyname("kern.filesetuuid", uuid, &size, NULL, 0); + return err == 0; +} + +#define KAS_INFO_KERNEL_SEGMENT_LOCATION_SELECTOR 1 + +T_DECL(kernel_segment_location, + "ensures that KAS_INFO_KERNEL_SEGMENT_LOCATION returns correct segment locations") +{ + int err; + + if (!slide_enabled()) { + T_SKIP("KASLR is not enabled"); + __builtin_unreachable(); + } + + uint64_t *disk_segs; + size_t disk_nsegs; + disk_kernel_segments(&disk_segs, &disk_nsegs); + + size_t size = 0; + + err = kas_info(KAS_INFO_KERNEL_SEGMENT_VMADDR_SELECTOR, NULL, &size); + if (errno == ENOTSUP) { + T_SKIP("KAS_INFO_KERNEL_SEGMENT_VMADDR_SELECTOR not supported"); + } + T_ASSERT_POSIX_SUCCESS(err, "kas_info KAS_INFO_KERNEL_SEGMENT_VMADDR_SELECTOR for size"); + + uint64_t mem_nsegs = size / sizeof(uint64_t); + uint64_t *mem_segs = calloc(mem_nsegs, sizeof(*disk_segs)); + + err = kas_info(KAS_INFO_KERNEL_SEGMENT_VMADDR_SELECTOR, mem_segs, &size); + if (errno == ENOTSUP) { + T_SKIP("KAS_INFO_KERNEL_SEGMENT_VMADDR_SELECTOR not supported"); + } + + T_ASSERT_POSIX_SUCCESS(err, "kas_info KAS_INFO_KERNEL_SEGMENT_VMADDR_SELECTOR for data"); + + T_LOG("Kernel has %zu segments on disk, %zu in memory:", disk_nsegs, mem_nsegs); + for (size_t i = 0; i < disk_nsegs; i++) { + T_LOG("%zu %llx %llx", i, disk_segs[i], mem_segs[i]); + } + + /* + * If the kernel is not a fileset, verify that all + * the segments in memory are the segment on disk + * + the kaslr slide + */ + if (!is_fileset_kc()) { + T_LOG("Kernelcache is not a fileset kernelcache"); + + uint64_t slide = kernel_slide(); + for (size_t i = 0; i < disk_nsegs; i++) { + if (disk_segs[i] == 0 || mem_segs[i] == 0) { + continue; + } + T_ASSERT_EQ(disk_segs[i] + slide, mem_segs[i], "segment %zu is slid", i); + } + } + + free(disk_segs); + free(mem_segs); +} diff --git a/tests/kdebug.c b/tests/kdebug.c index d8a400c02..6aacdccc0 100644 --- a/tests/kdebug.c +++ b/tests/kdebug.c @@ -1458,3 +1458,335 @@ T_DECL(iop_events_disable, dispatch_main(); } + +T_DECL(lookup_long_paths, + "lookup long path names") +{ + start_controlling_ktrace(); + + int ret = chdir("/tmp"); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to /tmp"); + const char *dir = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/"; + int i = 0; + do { + ret = mkdir(dir, S_IRUSR | S_IWUSR | S_IXUSR); + if (ret >= 0 || errno != EEXIST) { + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mkdir of %d nested directory", + i); + } + ret = chdir(dir); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to %d nested directory", i); + } while (i++ < 40); + + ktrace_session_t s = ktrace_session_create(); + ktrace_set_collection_interval(s, 250); + ktrace_filter_pid(s, getpid()); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session"); + ktrace_events_single(s, VFS_LOOKUP, ^(struct trace_point *tp __unused){}); + ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED); + + dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test", 0); + + ktrace_set_completion_handler(s, ^{ + dispatch_release(q); + T_END; + }); + + int error = ktrace_start(s, q); + T_ASSERT_POSIX_ZERO(error, "started tracing"); + + int fd = open("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", O_RDWR | O_CREAT); + T_ASSERT_POSIX_SUCCESS(fd, "opened file at %d directories deep", i); + + sleep(5); + + T_LOG("ending tracing"); + ktrace_end(s, 0); + dispatch_main(); +} + +#pragma mark - boot tracing + +static const char *expected_subsystems[] = { + "tunables", "locks_early", "kprintf", "pmap_steal", "vm_kernel", + "kmem", "kmem_alloc", "zalloc", + /* "percpu", only has a startup phase on Intel */ + "locks", "codesigning", "oslog", "early_boot", +}; +#define EXPECTED_SUBSYSTEMS_LEN \ + (sizeof(expected_subsystems) / sizeof(expected_subsystems[0])) + +T_DECL(early_boot_tracing, "ensure early boot strings are present", + T_META_BOOTARGS_SET("trace=100000")) +{ + T_ATEND(reset_ktrace); + + T_SETUPBEGIN; + ktrace_session_t s = ktrace_session_create(); + ktrace_set_collection_interval(s, 250); + int error = ktrace_set_use_existing(s); + T_ASSERT_POSIX_ZERO(error, "use existing trace buffer"); + +#if defined(__x86_64__) +#define FIRST_EVENT_STRING "i386_init" +#else /* defined(__x86_64__) */ +#define FIRST_EVENT_STRING "kernel_startup_bootstrap" +#endif /* !defined(__x86_64__) */ + + __block bool seen_event = false; + __block unsigned int cur_subsystem = 0; + ktrace_events_single(s, TRACE_INFO_STRING, ^(struct trace_point *tp) { + char early_str[33] = ""; + size_t argsize = ktrace_is_kernel_64_bit(s) ? 8 : 4; + memcpy(early_str, &tp->arg1, argsize); + memcpy(early_str + argsize, &tp->arg2, argsize); + memcpy(early_str + argsize * 2, &tp->arg3, argsize); + memcpy(early_str + argsize * 3, &tp->arg4, argsize); + + if (!seen_event) { + T_LOG("found first string event with args: " + "0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64, + tp->arg1, tp->arg2, tp->arg3, tp->arg4); + char expect_str[33] = FIRST_EVENT_STRING; + if (!ktrace_is_kernel_64_bit(s)) { + // Only the first 16 bytes of the string will be traced. + expect_str[16] = '\0'; + } + + T_EXPECT_EQ_STR(early_str, expect_str, + "first event in boot trace should be the bootstrap message"); + } + seen_event = true; + + if (strcmp(early_str, expected_subsystems[cur_subsystem]) == 0) { + T_LOG("found log for subsystem %s", + expected_subsystems[cur_subsystem]); + cur_subsystem++; + } + + if (cur_subsystem == EXPECTED_SUBSYSTEMS_LEN) { + T_LOG("ending after seeing all expected logs"); + ktrace_end(s, 1); + } + }); + + ktrace_set_completion_handler(s, ^{ + T_EXPECT_TRUE(seen_event, "should see an early boot string event"); + T_EXPECT_TRUE(cur_subsystem == EXPECTED_SUBSYSTEMS_LEN, + "should see logs from all subsystems"); + if (cur_subsystem != EXPECTED_SUBSYSTEMS_LEN) { + T_LOG("missing log for %s", expected_subsystems[cur_subsystem]); + } + T_END; + }); + + error = ktrace_start(s, dispatch_get_main_queue()); + T_ASSERT_POSIX_ZERO(error, "started tracing"); + + T_SETUPEND; + + dispatch_main(); +} + +T_DECL(typefilter_boot_arg, "ensure typefilter is set up correctly at boot", + T_META_BOOTARGS_SET("trace=100000 trace_typefilter=S0x0c00,C0xfe")) +{ + T_ATEND(reset_ktrace); + + T_SETUPBEGIN; + ktrace_config_t config = ktrace_config_create_current(); + T_QUIET; T_WITH_ERRNO; + T_ASSERT_NOTNULL(config, "create config from current system"); + T_SETUPEND; + + T_LOG("ktrace configuration:"); + ktrace_config_print_description(config, stdout); + + uint8_t *typefilt = ktrace_config_kdebug_get_typefilter(config); + T_ASSERT_NOTNULL(typefilt, "typefilter is active"); + T_EXPECT_TRUE(typefilt[0x0c00 / 8], + "specified subclass is set in typefilter"); + T_MAYFAIL; // rdar://63625062 (UTD converts commas in boot-args to spaces) + T_EXPECT_TRUE(typefilt[0xfeed / 8], + "specified class is set in typefilter"); + + ktrace_config_destroy(config); +} + +#pragma mark - events present + +static int recvd_sigchild = 0; +static void +sighandler(int sig) +{ + if (sig != SIGCHLD) { + T_ASSERT_FAIL("unexpected signal: %d", sig); + } + recvd_sigchild = 1; +} + +T_DECL(instrs_and_cycles_on_proc_exit, + "instructions and cycles should be traced on thread exit", + T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1)) +{ + T_SETUPBEGIN; + start_controlling_ktrace(); + int error; + struct rusage_info_v4 *rusage = calloc(1, sizeof(*rusage)); + char *args[] = { "ls", "-l", NULL, }; + int status; + dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test", + DISPATCH_QUEUE_SERIAL); + T_QUIET; T_ASSERT_POSIX_SUCCESS(signal(SIGCHLD, sighandler), + "register signal handler"); + + ktrace_session_t s = ktrace_session_create(); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create"); + + __block pid_t pid; + __block bool seen_event = false; + __block uint64_t proc_instrs = 0; + __block uint64_t proc_cycles = 0; + __block uint64_t proc_sys_time = 0; + __block uint64_t proc_usr_time = 0; + error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_PROC_EXIT, + ^(ktrace_event_t tp){ + if (tp->pid == pid) { + seen_event = true; + proc_instrs = tp->arg1; + proc_cycles = tp->arg2; + proc_sys_time = tp->arg3; + proc_usr_time = tp->arg4; + ktrace_end(s, 1); + } + }); + T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_ZERO(error, "trace single event"); + ktrace_set_completion_handler(s, ^{ + // TODO Check for equality once rdar://61948669 is fixed. + T_ASSERT_GE(proc_instrs, rusage->ri_instructions, + "trace event instrs are >= to rusage instrs"); + T_ASSERT_GE(proc_cycles, rusage->ri_cycles, + "trace event cycles are >= to rusage cycles"); + T_ASSERT_GE(proc_sys_time, rusage->ri_system_time, + "trace event sys time is >= rusage sys time"); + T_ASSERT_GE(proc_usr_time, rusage->ri_user_time, + "trace event usr time >= rusage usr time"); + T_EXPECT_TRUE(seen_event, "should see the proc exit trace event"); + + free(rusage); + ktrace_session_destroy(s); + dispatch_release(q); + T_END; + }); + error = ktrace_start(s, q); + T_ASSERT_POSIX_ZERO(error, "start tracing"); + T_SETUPEND; + + extern char **environ; + status = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_SUCCESS(status, "spawn process"); + if (status == 0) { + while (!recvd_sigchild) { + pause(); + } + error = proc_pid_rusage(pid, RUSAGE_INFO_V4, (rusage_info_t)rusage); + T_QUIET; T_ASSERT_POSIX_ZERO(error, "rusage"); + error = waitpid(pid, &status, 0); + T_QUIET; T_ASSERT_POSIX_SUCCESS(error, "waitpid"); + } + dispatch_main(); +} + +#define NO_OF_THREADS 2 + +struct thread_counters_info { + uint64_t counts[2]; //cycles and/or instrs + uint64_t cpu_time; + uint64_t thread_id; +}; +typedef struct thread_counters_info *tc_info_t; + +static void* +get_thread_counters(void* ptr) +{ + extern int thread_selfcounts(int type, void *buf, size_t nbytes); + extern uint64_t __thread_selfusage(void); + extern uint64_t __thread_selfid(void); + tc_info_t tc_info = (tc_info_t) ptr; + tc_info->thread_id = __thread_selfid(); + // Just to increase the instr, cycle count + T_LOG("printing %llu\n", tc_info->thread_id); + tc_info->cpu_time = __thread_selfusage(); + thread_selfcounts(1, tc_info->counts, sizeof(tc_info->counts)); + return NULL; +} + +T_DECL(instrs_and_cycles_on_thread_exit, + "instructions and cycles should be traced on thread exit", + T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1)) +{ + T_SETUPBEGIN; + start_controlling_ktrace(); + + int error; + pthread_t *threads = calloc((unsigned int)(NO_OF_THREADS), + sizeof(pthread_t)); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(threads, "calloc(%d threads)", + NO_OF_THREADS); + tc_info_t tc_infos = calloc((unsigned int) (NO_OF_THREADS), + sizeof(struct thread_counters_info)); + T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(tc_infos, + "calloc(%d thread counters)", NO_OF_THREADS); + + ktrace_session_t s = ktrace_session_create(); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create"); + ktrace_filter_pid(s, getpid()); + + __block int nevents = 0; + error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_THR_EXIT, + ^(ktrace_event_t tp) { + for (int i = 0; i < NO_OF_THREADS; i++) { + if (tp->threadid == tc_infos[i].thread_id) { + nevents++; + uint64_t cpu_time = tp->arg3 + tp->arg4; + /* + * as we are getting counts before thread exit, + * the counts at thread exit should be greater than + * thread_selfcounts + */ + T_ASSERT_GE(tp->arg1, tc_infos[i].counts[0], + "trace event instrs are >= to thread's instrs"); + T_ASSERT_GE(tp->arg2, tc_infos[i].counts[1], + "trace event cycles are >= to thread's cycles"); + T_ASSERT_GE(cpu_time, tc_infos[i].cpu_time, + "trace event cpu time is >= thread's cpu time"); + } + if (nevents == NO_OF_THREADS) { + ktrace_end(s, 1); + } + } + }); + T_QUIET; T_ASSERT_POSIX_ZERO(error, "trace single event"); + ktrace_set_completion_handler(s, ^{ + T_EXPECT_EQ(NO_OF_THREADS, nevents, "seen %d thread exit trace events", + NO_OF_THREADS); + free(tc_infos); + ktrace_session_destroy(s); + T_END; + }); + error = ktrace_start(s, dispatch_get_main_queue()); + T_ASSERT_POSIX_ZERO(error, "start tracing"); + + for (int i = 0; i < NO_OF_THREADS; i++) { + error = pthread_create(&threads[i], NULL, get_thread_counters, + (void *)&tc_infos[i]); + T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_create thread %d", i); + } + T_SETUPEND; + for (int i = 0; i < NO_OF_THREADS; i++) { + error = pthread_join(threads[i], NULL); + T_QUIET; T_EXPECT_POSIX_ZERO(error, "pthread_join thread %d", i); + } + + dispatch_main(); +} diff --git a/tests/kernel_symbolication_entitlements.plist b/tests/kernel_symbolication_entitlements.plist new file mode 100644 index 000000000..13566d784 --- /dev/null +++ b/tests/kernel_symbolication_entitlements.plist @@ -0,0 +1,10 @@ + + + + + com.apple.private.kernel.get-kext-info + + com.apple.private.security.get-kernel-info + + + diff --git a/tests/kernel_uuid_match.c b/tests/kernel_uuid_match.c deleted file mode 100644 index 29099bae6..000000000 --- a/tests/kernel_uuid_match.c +++ /dev/null @@ -1,194 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); - -#define MAX_LEN 1024 - -#if TARGET_OS_MAC && !(TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) - //running on macOS - #define KERNEL_SEARCH_DIR "/System/Library/Kernels/*" -#else - //running on a different OS (e.g. iOS, watchOS, etc.) - #define KERNEL_SEARCH_DIR "/*" -#endif - -#define SWAP32(v) v = OSSwapInt32(v) - - -/* opens and maps the file at [path] in memory, - * sets the length in [len] and returns a pointer - * to the beginning of the memory region or NULL - * if unable to open and map the file - */ -static void *open_file(char *path, size_t *len) { - int fd; - if ((fd = open(path, O_RDONLY)) < 0) { - return NULL; - } - *len = (size_t)lseek(fd, (off_t)0, SEEK_END); - void *p = mmap(NULL, *len, PROT_READ, MAP_PRIVATE, fd, 0); - close(fd); - if (p == MAP_FAILED) { - return NULL; - } - return p; -} - -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wsign-conversion" -static void __swap_mach_header(struct mach_header *header) { - SWAP32(header->magic); - SWAP32(header->cputype); - SWAP32(header->cpusubtype); - SWAP32(header->filetype); - SWAP32(header->ncmds); - SWAP32(header->sizeofcmds); - SWAP32(header->flags); -} - -static void __swap_mach_header_64(struct mach_header_64 *header) { - SWAP32(header->magic); - SWAP32(header->cputype); - SWAP32(header->cpusubtype); - SWAP32(header->filetype); - SWAP32(header->ncmds); - SWAP32(header->sizeofcmds); - SWAP32(header->flags); -} -#pragma clang diagnostic pop - -/* parses the uuid from the file at [path] and sets the uuid in [uuid] - * returns true if successfully parses the file, returns false otherwise - * (e.g. the file is not a Mach-O binary) - */ -static bool parse_binary_uuid(char *path, uuid_t uuid) { - size_t len = 0; - bool should_swap = false; - unsigned int ncmds = 0; - struct load_command *lc = NULL; - bool ret = false; - - struct mach_header *h = open_file(path, &len); - if (!h) { - return false; - } - if (h->magic == MH_MAGIC || h->magic == MH_CIGAM) { - //32-bit header - struct mach_header *header = h; - if (header->magic == MH_CIGAM) { - __swap_mach_header(header); - should_swap = true; - } - ncmds = header->ncmds; - //the first load command is after the header - lc = (struct load_command *)(header + 1); - } else if (h->magic == MH_MAGIC_64 || h->magic == MH_CIGAM_64) { - //64-bit header - struct mach_header_64 *header = (struct mach_header_64 *)h; - if (header->magic == MH_CIGAM_64) { - __swap_mach_header_64(header); - should_swap = true; - } - ncmds = header->ncmds; - lc = (struct load_command *)(header + 1); - } else { - //this is not a Mach-O binary, or it is a FAT binary - munmap(h, len); - return false; - } - for (unsigned int i = 0; i < ncmds; i++) { - uint32_t cmd = lc->cmd; - uint32_t cmdsize = lc->cmdsize; - if (should_swap) { - SWAP32(cmd); - SWAP32(cmdsize); - } - if (cmd == LC_UUID) { - struct uuid_command *uuid_cmd = - (struct uuid_command *)lc; - uuid_copy(uuid, uuid_cmd->uuid); - uuid_string_t tuuid_str; - uuid_unparse(uuid, tuuid_str); - T_LOG("Trying test UUID %s", tuuid_str); - ret = true; - break; - } - lc = (struct load_command *)((uintptr_t)lc + cmdsize); - } - munmap(h, len); - return ret; -} - -/* uses the sysctl command line tool to get the uuid - * of the currently running kernel - */ -static void get_system_kernel_uuid(uuid_t kuuid) { - char kuuid_line[MAX_LEN]; - memset(kuuid_line, 0, sizeof(kuuid_line)); - size_t len = sizeof(kuuid_line); - int ret = sysctlbyname("kern.uuid", kuuid_line, &len, NULL, 0); - T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.uuid"); - - T_ASSERT_TRUE(uuid_parse(kuuid_line, kuuid) == 0, - "Parse running kernel uuid"); -} - -/* compares [kuuid] to the uuid in each of the kernel binaries on OS's - * other than macOS (there can be multiple kernel binaries if the mastering - * process doesn't remove all of the irrelevant binaries) - */ -static void find_and_compare_test_uuids(char *search_path, uuid_t kuuid) { - glob_t g; - int ret = glob(search_path, 0, NULL, &g); - T_WITH_ERRNO; T_ASSERT_EQ(ret, 0, "glob %s", search_path); - - bool pass = false; - for (int i = 0; i < g.gl_matchc; i++) { - char *path = g.gl_pathv[i]; - - //check that [path] is the path for a file (not a directory, device, etc.) - struct stat s; - int ret = stat(path, &s); - T_ASSERT_POSIX_SUCCESS(ret, "stat %s", path); - if ((s.st_mode & S_IFREG) == 0) { - continue; - } - - T_LOG("Reading file at path: %s", path); - uuid_t tuuid; - if (parse_binary_uuid(path, tuuid) && - uuid_compare(kuuid, tuuid) == 0) { - pass = true; - break; - } - } - globfree(&g); - T_EXPECT_TRUE(pass, "The sources match"); -} - -T_DECL(uuid_match, "Compare the running kernel UUID to kernel binaries.") -{ - uuid_t kuuid; - uuid_clear(kuuid); - get_system_kernel_uuid(kuuid); - uuid_string_t kuuid_str; - uuid_unparse(kuuid, kuuid_str); - T_LOG("Got running kernel UUID %s", kuuid_str); - find_and_compare_test_uuids(KERNEL_SEARCH_DIR, kuuid); -} diff --git a/tests/kevent_qos.c b/tests/kevent_qos.c index 403917306..ecf2ec6e6 100644 --- a/tests/kevent_qos.c +++ b/tests/kevent_qos.c @@ -403,7 +403,7 @@ populate_kevent(struct kevent_qos_s *kev, unsigned long long port) kev->filter = EVFILT_MACHPORT; kev->flags = EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED; kev->fflags = (MACH_RCV_MSG | MACH_RCV_VOUCHER | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | - MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | + MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AV) | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)); kev->data = 1; } @@ -1652,7 +1652,7 @@ expect_kevent_id_recv(mach_port_t port, qos_class_t qos[], const char *qos_name[ .filter = EVFILT_MACHPORT, .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED, .fflags = (MACH_RCV_MSG | MACH_RCV_VOUCHER | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | - MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | + MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AV) | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)), .data = 1, .qos = (int32_t)_pthread_qos_class_encode(qos[ENV_QOS_QUEUE_OVERRIDE], 0, 0) diff --git a/tests/kpc.c b/tests/kpc.c index 7e3236344..b9e45a30c 100644 --- a/tests/kpc.c +++ b/tests/kpc.c @@ -405,6 +405,14 @@ T_DECL(kpc_pmi_configurable, tp->timestamp, &cur_ns); T_QUIET; T_ASSERT_POSIX_ZERO(cret, "convert timestamp"); + uint64_t desc = tp->arg1; + uint64_t config = desc & UINT32_MAX; + T_QUIET; T_EXPECT_EQ(config & UINT8_MAX, + (uint64_t)CYCLES_EVENT & UINT8_MAX, + "PMI argument matches configuration"); + __unused uint64_t counter = (desc >> 32) & UINT16_MAX; + __unused uint64_t flags = desc >> 48; + uint64_t count = tp->arg2; if (first_ns == 0) { first_ns = cur_ns; @@ -413,8 +421,6 @@ T_DECL(kpc_pmi_configurable, if (cpu->prev_count != 0) { uint64_t delta = count - cpu->prev_count; - T_QUIET; T_EXPECT_GT(delta, PMI_PERIOD, - "counter delta should be greater than PMI period"); uint64_t skid = delta - PMI_PERIOD; if (skid > cpu->max_skid) { cpu->max_skid = skid; @@ -422,6 +428,8 @@ T_DECL(kpc_pmi_configurable, } cpu->prev_count = count; + __unused uint64_t pc = tp->arg3; + double slice = (double)(cur_ns - first_ns) / PMI_TEST_DURATION_NS * NTIMESLICES; if (slice < NTIMESLICES) { @@ -559,9 +567,6 @@ T_DECL(kpc_pmi_configurable, dispatch_main(); } -#if defined(__arm64__) -// This policy only applies to arm64 devices. - static int g_prev_disablewl = 0; static void @@ -574,8 +579,10 @@ whitelist_atend(void) } } -T_DECL(whitelist, "ensure kpc's whitelist is filled out") +T_DECL(kpc_whitelist, "ensure kpc's whitelist is filled out") { +// This policy only applies to arm64 devices. +#if defined(__arm64__) // Start enforcing the whitelist. int set = 0; size_t getsz = sizeof(g_prev_disablewl); @@ -614,6 +621,7 @@ T_DECL(whitelist, "ensure kpc's whitelist is filled out") (void)kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config); free(config); +#else /* defined(__arm64__) */ + T_SKIP("kpc whitelist is only enforced on arm64") +#endif /* !defined(__arm64__) */ } - -#endif // defined(__arm64__) diff --git a/tests/kperf.c b/tests/kperf.c index c74b9671a..45f1df837 100644 --- a/tests/kperf.c +++ b/tests/kperf.c @@ -1,6 +1,4 @@ -#ifdef T_NAMESPACE -#undef T_NAMESPACE -#endif /* defined(T_NAMESPACE) */ +// Copyright (c) 2017-2020 Apple Computer, Inc. All rights reserved. #include #include @@ -8,7 +6,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -20,7 +19,7 @@ #include "ktrace_helpers.h" T_GLOBAL_META( - T_META_NAMESPACE("xnu.kperf"), + T_META_NAMESPACE("xnu.ktrace"), T_META_CHECK_LEAKS(false), T_META_ASROOT(true)); @@ -54,10 +53,10 @@ spinning_thread(void *semp) #define PERF_INSTR_DATA KDBG_EVENTID(DBG_PERF, 1, 17) #define PERF_EVENT KDBG_EVENTID(DBG_PERF, 0, 0) -#define SCHED_HANDOFF KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, \ - MACH_STACK_HANDOFF) -#define SCHED_SWITCH KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_SCHED) -#define SCHED_IDLE KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_IDLE) +#define SCHED_DISPATCH KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_DISPATCH) +#define SCHED_SWITCH KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_SCHED) +#define SCHED_HANDOFF KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_STACK_HANDOFF) +#define SCHED_IDLE KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_IDLE) #define MP_CPUS_CALL UINT32_C(0x1900004) @@ -66,17 +65,76 @@ spinning_thread(void *semp) #define TIMER_PERIOD_NS (1 * NSEC_PER_MSEC) -/* - * Ensure that kperf is correctly IPIing CPUs that are actively scheduling by - * bringing up threads and ensuring that threads on-core are sampled by each - * timer fire. - */ +static void +start_tracing_with_timeout(ktrace_session_t s, unsigned int timeout_secs) +{ + // Only set the timeout after we've seen an event that was traced by us. + // This helps set a reasonable timeout after we're guaranteed to get a + // few events. + dispatch_queue_t q = dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0); + + ktrace_events_single(s, DISPATCH_AFTER_EVENT, + ^(__unused struct trace_point *tp) + { + T_LOG("arming timer to stop tracing after %d seconds", timeout_secs); + dispatch_after(dispatch_time(DISPATCH_TIME_NOW, + timeout_secs * NSEC_PER_SEC), q, ^{ + T_LOG("ending tracing due to timeout"); + ktrace_end(s, 0); + }); + }); + ktrace_set_collection_interval(s, 100); + + T_ASSERT_POSIX_ZERO(ktrace_start(s, q), "start ktrace"); + + kdebug_trace(DISPATCH_AFTER_EVENT, 0, 0, 0, 0); + T_LOG("trace point emitted"); +} + +static void +configure_kperf_timer_samplers(uint64_t period_ns, uint32_t samplers) +{ + T_SETUPBEGIN; + + (void)kperf_action_count_set(1); + T_QUIET; + T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, samplers), + NULL); + (void)kperf_timer_count_set(1); + T_QUIET; + T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0, + kperf_ns_to_ticks(period_ns)), NULL); + T_QUIET; + T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL); + + T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling"); + + T_SETUPEND; +} -T_DECL(ipi_active_cpus, - "make sure that kperf IPIs all active CPUs") +static double +timestamp_secs(ktrace_session_t s, uint64_t timestamp) +{ + uint64_t ns = 0; + T_QUIET; + T_ASSERT_POSIX_ZERO(ktrace_convert_timestamp_to_nanoseconds(s, timestamp, + &ns), NULL); + return (double)ns / NSEC_PER_SEC; +} + +#pragma mark - timers + +// Ensure that kperf is correctly sampling CPUs that are actively scheduling by +// bringing up threads and ensuring that threads on-core are sampled by each +// timer fire. + +T_DECL(kperf_sample_active_cpus, + "make sure that kperf samples all active CPUs") { start_controlling_ktrace(); + T_SETUPBEGIN; + int ncpus = dt_ncpu(); T_QUIET; T_ASSERT_LT(ncpus, MAX_CPUS, @@ -90,50 +148,35 @@ T_DECL(ipi_active_cpus, static pthread_t threads[MAX_THREADS]; - /* - * TODO options to write this to a file and reinterpret a file... - */ - - /* - * Create threads to bring up all of the CPUs. - */ - - dispatch_semaphore_t thread_spinning = dispatch_semaphore_create(0); - - for (int i = 0; i < nthreads; i++) { - T_QUIET; - T_ASSERT_POSIX_ZERO( - pthread_create(&threads[i], NULL, &spinning_thread, - &thread_spinning), NULL); - dispatch_semaphore_wait(thread_spinning, DISPATCH_TIME_FOREVER); - } - - T_LOG("spun up %d thread%s", nthreads, nthreads == 1 ? "" : "s"); - ktrace_session_t s = ktrace_session_create(); - T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create"); - - dispatch_queue_t q = dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0); - - /* - * Only set the timeout after we've seen an event that was traced by us. - * This helps set a reasonable timeout after we're guaranteed to get a - * few events. - */ - - ktrace_events_single(s, DISPATCH_AFTER_EVENT, - ^(__unused struct trace_point *tp) - { - dispatch_after(dispatch_time(DISPATCH_TIME_NOW, - TIMEOUT_SECS * NSEC_PER_SEC), q, ^{ - ktrace_end(s, 0); - }); - }); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create"); + ktrace_set_collection_interval(s, 100); __block uint64_t nfires = 0; __block uint64_t nsamples = 0; static uint64_t idle_tids[MAX_CPUS] = { 0 }; - __block int nidles = 0; + __block double sum_saturation = 0; + __block uint64_t last_nsamples = 0; + + // As a test debugging aid, take an additonal argument that specifies the + // number of fires to stop tracing after. This also turns on additional + // logging of scheduler trace events. + int stopafter = 0; + if (argc > 0) { + stopafter = atoi(argv[0]); + if (stopafter < 0) { + T_ASSERT_FAIL("argument must be positive"); + } + } + + static uint64_t first_timestamp = 0; + static uint64_t last_timestamp = 0; + ktrace_events_any(s, ^(struct trace_point *tp) { + if (first_timestamp == 0) { + first_timestamp = tp->timestamp; + } + last_timestamp = tp->timestamp; + }); ktrace_set_completion_handler(s, ^{ T_LOG("stopping threads"); @@ -145,157 +188,513 @@ T_DECL(ipi_active_cpus, T_ASSERT_POSIX_ZERO(pthread_join(threads[i], NULL), NULL); } - for (int i = 0; i < nidles; i++) { - T_LOG("CPU %d idle thread: %#" PRIx64, i, idle_tids[i]); - } + double saturation = sum_saturation / nfires * 100; - T_LOG("saw %" PRIu64 " timer fires, %" PRIu64 " samples, " - "%g samples/fire", nfires, nsamples, - (double)nsamples / (double)nfires); + T_LOG("over %.1f seconds, saw %" PRIu64 " timer fires, %" PRIu64 + " samples, %g samples/fire, %.2f%% saturation", + timestamp_secs(s, last_timestamp - first_timestamp), nfires, + nsamples, (double)nsamples / (double)nfires, saturation); + T_ASSERT_GT(saturation, 95.0, + "saw reasonable percentage of full samples"); T_END; }); - /* - * Track which threads are running on each CPU. - */ - + // Track which threads are running on each CPU. static uint64_t tids_on_cpu[MAX_CPUS] = { 0 }; - - void (^switch_cb)(struct trace_point *) = ^(struct trace_point *tp) { + void (^switch_cb)(struct trace_point *, const char *name) = + ^(struct trace_point *tp, const char *name) { uint64_t new_thread = tp->arg2; - // uint64_t old_thread = tp->threadid; - for (int i = 0; i < nidles; i++) { - if (idle_tids[i] == new_thread) { - return; - } + if (idle_tids[tp->cpuid] != new_thread) { + tids_on_cpu[tp->cpuid] = new_thread; } - tids_on_cpu[tp->cpuid] = new_thread; + if (stopafter) { + T_LOG("%.7g: %s on %d: %llx", timestamp_secs(s, tp->timestamp), + name, tp->cpuid, tp->arg2); + } }; - ktrace_events_single(s, SCHED_SWITCH, switch_cb); - ktrace_events_single(s, SCHED_HANDOFF, switch_cb); - - /* - * Determine the thread IDs of the idle threads on each CPU. - */ + ktrace_events_single(s, SCHED_SWITCH, ^(struct trace_point *tp) { + switch_cb(tp, "switch"); + }); + ktrace_events_single(s, SCHED_HANDOFF, ^(struct trace_point *tp) { + switch_cb(tp, "hndoff"); + }); + // Determine the thread IDs of the idle threads on each CPU. ktrace_events_single(s, SCHED_IDLE, ^(struct trace_point *tp) { - uint64_t idle_thread = tp->threadid; - + if (tp->debugid & DBG_FUNC_END) { + return; + } tids_on_cpu[tp->cpuid] = 0; - - for (int i = 0; i < nidles; i++) { - if (idle_tids[i] == idle_thread) { - return; - } + idle_tids[tp->cpuid] = tp->threadid; + if (stopafter) { + T_LOG("%.7g: idle on %d: %llx", timestamp_secs(s, tp->timestamp), + tp->cpuid, tp->threadid); } - - idle_tids[nidles++] = idle_thread; }); - /* - * On each timer fire, go through all the cores and mark any threads - * that should be sampled. - */ + // On each timer fire, go through all the cores and mark any threads + // that should be sampled. __block int last_fire_cpu = -1; - __block uint64_t sample_missing = 0; + static bool sample_missing[MAX_CPUS] = { false }; static uint64_t tids_snap[MAX_CPUS] = { 0 }; __block int nexpected = 0; -#if defined(__x86_64__) - __block int xcall_from_cpu = -1; -#endif /* defined(__x86_64__) */ - __block uint64_t xcall_mask = 0; + __block int nextra = 0; + __block int nidles = 0; ktrace_events_single(s, PERF_TMR_FIRE, ^(struct trace_point *tp) { + T_QUIET; T_ASSERT_EQ((tp->debugid & DBG_FUNC_START), 0, + "no timer fire start events are allowed"); int last_expected = nexpected; nfires++; nexpected = 0; for (int i = 0; i < ncpus; i++) { - uint64_t i_bit = UINT64_C(1) << i; - if (sample_missing & i_bit) { - T_LOG("missed sample on CPU %d for thread %#llx from timer on CPU %d (xcall mask = %llx, expected %d samples)", - tp->cpuid, tids_snap[i], last_fire_cpu, - xcall_mask, last_expected); - sample_missing &= ~i_bit; + if (sample_missing[i]) { + T_LOG("missed sample on CPU %d for thread %#llx from " + "timer on CPU %d (expected %d samples)", + tp->cpuid, tids_snap[i], last_fire_cpu, last_expected); + sample_missing[i] = false; } - if (tids_on_cpu[i] != 0) { - tids_snap[i] = tids_on_cpu[i]; - sample_missing |= i_bit; - nexpected++; + if (tids_on_cpu[i] != 0) { + tids_snap[i] = tids_on_cpu[i]; + sample_missing[i] = true; + nexpected++; } } + if (stopafter) { + T_LOG("%.7g: FIRE on %d: %d extra, %d idles", + timestamp_secs(s, tp->timestamp), tp->cpuid, nextra, nidles); + } + + if (nfires == 1) { + return; + } + + if (last_expected == 0) { + sum_saturation += 1; + } else { + sum_saturation += (double)(nsamples - last_nsamples) / + last_expected; + } + last_nsamples = nsamples; + nextra = 0; + nidles = 0; T_QUIET; T_ASSERT_LT((int)tp->cpuid, ncpus, "timer fire should not occur on an IOP"); last_fire_cpu = (int)tp->cpuid; -#if defined(__x86_64__) - xcall_from_cpu = (int)tp->cpuid; -#endif /* defined(__x86_64__) */ - }); - -#if defined(__x86_64__) - /* - * Watch for the cross-call on Intel, make sure they match what kperf - * should be doing. - */ - ktrace_events_single(s, MP_CPUS_CALL, ^(struct trace_point *tp) { - if (xcall_from_cpu != (int)tp->cpuid) { - return; + if (stopafter && (uint64_t)stopafter == nfires) { + ktrace_end(s, 1); } - - xcall_mask = tp->arg1; - xcall_from_cpu = -1; }); -#endif /* defined(__x86_64__) */ - /* - * On the timer handler for each CPU, unset the missing sample bitmap. - */ + // On the timer handler for each CPU, unset the missing sample bitmap. ktrace_events_single(s, PERF_TMR_HNDLR, ^(struct trace_point *tp) { nsamples++; if ((int)tp->cpuid > ncpus) { - /* skip IOPs; they're not scheduling our threads */ + // Skip IOPs; they're not scheduling any relevant threads. return; } - sample_missing &= ~(UINT64_C(1) << tp->cpuid); + if (!sample_missing[tp->cpuid] && idle_tids[tp->cpuid] != 0) { + T_LOG("sampled additional thread %llx on CPU %d", tp->threadid, + tp->cpuid); + nextra++; + } + if (tp->threadid == idle_tids[tp->cpuid]) { + T_LOG("sampled idle thread on CPU %d", tp->cpuid); + nidles++; + } + sample_missing[tp->cpuid] = false; }); - /* - * Configure kperf and ktrace. - */ + configure_kperf_timer_samplers(TIMER_PERIOD_NS, KPERF_SAMPLER_KSTACK); - (void)kperf_action_count_set(1); - T_QUIET; - T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_KSTACK), - NULL); - (void)kperf_timer_count_set(1); - T_QUIET; - T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0, - kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL); - T_QUIET; - T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL); + T_SETUPEND; - T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling"); + start_tracing_with_timeout(s, TIMEOUT_SECS); - T_ASSERT_POSIX_ZERO(ktrace_start(s, - dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)), - "start ktrace"); + // Create threads to bring up all of the CPUs. - kdebug_trace(DISPATCH_AFTER_EVENT, 0, 0, 0, 0); + dispatch_semaphore_t thread_spinning = dispatch_semaphore_create(0); + + for (int i = 0; i < nthreads; i++) { + T_QUIET; + T_ASSERT_POSIX_ZERO( + pthread_create(&threads[i], NULL, &spinning_thread, + &thread_spinning), NULL); + dispatch_semaphore_wait(thread_spinning, DISPATCH_TIME_FOREVER); + } + + T_LOG("spun up %d thread%s", nthreads, nthreads == 1 ? "" : "s"); dispatch_main(); } -#pragma mark kdebug triggers +#define FIRES_THRESHOLD (5000) + +T_DECL(kperf_timer_fires_enough_times, + "ensure the correct number of timers fire in a period of time") +{ + start_controlling_ktrace(); + + dispatch_semaphore_t thread_spinning = dispatch_semaphore_create(0); + + ktrace_session_t s = ktrace_session_create(); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create"); + ktrace_set_collection_interval(s, 100); + + __block uint64_t nfires = 0; + __block uint64_t first_fire_ns = 0; + __block uint64_t last_fire_ns = 0; + + int ncpus = dt_ncpu(); + + ktrace_events_single(s, PERF_TMR_FIRE, ^(struct trace_point *tp) { + nfires++; + if (first_fire_ns == 0) { + ktrace_convert_timestamp_to_nanoseconds(s, tp->timestamp, + &first_fire_ns); + } + ktrace_convert_timestamp_to_nanoseconds(s, tp->timestamp, + &last_fire_ns); + + T_QUIET; T_ASSERT_LT((int)tp->cpuid, ncpus, + "timer fire should not occur on an IOP"); + if (nfires >= FIRES_THRESHOLD) { + ktrace_end(s, 1); + } + }); + + configure_kperf_timer_samplers(TIMER_PERIOD_NS, KPERF_SAMPLER_KSTACK); + + pthread_t thread; + T_QUIET; + T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, &spinning_thread, + &thread_spinning), NULL); + dispatch_semaphore_wait(thread_spinning, DISPATCH_TIME_FOREVER); + + ktrace_set_completion_handler(s, ^{ + running_threads = false; + + double duration_secs = (double)(last_fire_ns - first_fire_ns) / + NSEC_PER_SEC; + T_LOG("stopping thread after %.2f seconds", duration_secs); + + T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL); + + T_LOG("saw %" PRIu64 " timer fires (%g fires/second)", nfires, + (double)nfires / (double)duration_secs); + double expected_nfires = duration_secs * NSEC_PER_SEC / TIMER_PERIOD_NS; + T_LOG("expecting %g timer fires", expected_nfires); + double nfires_seen_pct = expected_nfires / nfires * 100; + T_ASSERT_GT(nfires_seen_pct, 95.0, + "saw reasonable number of missed timer fires"); + T_ASSERT_LT(nfires_seen_pct, 105.0, + "saw reasonable number of extra timer fires"); + + T_END; + }); + + start_tracing_with_timeout(s, TIMEOUT_SECS); + + dispatch_main(); +} + +// kperf_timer_not_oversampling ensures that the profiling timer fires are +// spaced apart by the programmed timer period. Otherwise, tools that rely on +// sample count as a proxy for CPU usage will over-estimate. + +#define FIRE_PERIOD_THRESHOLD_NS \ + (TIMER_PERIOD_NS - (uint64_t)(TIMER_PERIOD_NS * 0.05)) + +struct cirq { + unsigned int nslots; + unsigned int tail_slot; + unsigned int slot_size; +}; + +#define CIRQ_INIT(TYPE, NSLOTS) \ + (struct cirq){ \ + .nslots = NSLOTS, .tail_slot = 0, .slot_size = sizeof(TYPE), \ + } + +static inline void * +cirq_get(struct cirq *cq, unsigned int i) +{ + return (char *)cq + sizeof(*cq) + (cq->slot_size * i); +} + +static void * +cirq_top(void *vcq) +{ + struct cirq *cq = vcq; + unsigned int tail_slot = cq->tail_slot; + unsigned int top_slot = (tail_slot > 0 ? tail_slot : cq->nslots) - 1; + return cirq_get(cq, top_slot); +} + +static void * +cirq_push(void *vcq) +{ + struct cirq *cq = vcq; + unsigned int tail_slot = cq->tail_slot; + unsigned int next_slot = tail_slot == cq->nslots - 1 ? 0 : tail_slot + 1; + cq->tail_slot = next_slot; + return cirq_get(cq, tail_slot); +} + +static void +cirq_for(void *vcq, void (^iter)(void *elt)) +{ + struct cirq *cq = vcq; + for (unsigned int i = cq->tail_slot; i < cq->nslots; i++) { + iter(cirq_get(cq, i)); + } + for (unsigned int i = 0; i < cq->tail_slot; i++) { + iter(cirq_get(cq, i)); + } +} + +#define HISTORY_LEN 5 + +struct instval { + uint64_t iv_instant_ns; + uint64_t iv_val; +}; + +struct cirq_instval { + struct cirq cq; + struct instval elts[HISTORY_LEN]; +}; + +struct cirq_u64 { + struct cirq cq; + uint64_t elts[HISTORY_LEN]; +}; + +struct cpu_oversample { + struct cirq_instval timer_latencies; + struct cirq_instval fire_latencies; +}; + +static void +cpu_oversample_log(struct cpu_oversample *cpu, unsigned int cpuid) +{ + T_LOG("CPU %d timer latencies:", cpuid); + __block int i = -HISTORY_LEN; + cirq_for(&cpu->timer_latencies, ^(void *viv) { + struct instval *iv = viv; + T_LOG("\t%llu timer latency %d: %llu", iv->iv_instant_ns, i, + iv->iv_val); + i++; + }); + + T_LOG("CPU %d fire latencies:", cpuid); + i = -HISTORY_LEN; + cirq_for(&cpu->fire_latencies, ^(void *viv) { + struct instval *iv = viv; + T_LOG("\t%llu fire latency %d: %llu", iv->iv_instant_ns, i, iv->iv_val); + i++; + }); +} + +T_DECL(kperf_timer_not_oversampling, + "ensure that time between fires is long enough") +{ + start_controlling_ktrace(); + + ktrace_session_t s = ktrace_session_create(); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create"); + // Try not to perturb the system with more work. + ktrace_set_collection_interval(s, 1000); + __block uint64_t nfires = 0; + __block uint64_t first_fire_ns = 0; + __block uint64_t last_fire_ns = 0; + __block unsigned int last_fire_cpuid = 0; + + int ncpus = dt_ncpu(); + T_QUIET; T_ASSERT_GT(ncpus, 0, "should see positive number of CPUs"); + + struct cpu_oversample *per_cpu = calloc((unsigned int)ncpus, + sizeof(per_cpu[0])); + T_QUIET; T_WITH_ERRNO; + T_ASSERT_NOTNULL(per_cpu, "allocated timer latency tracking"); + for (int i = 0; i < ncpus; i++) { + per_cpu[i].timer_latencies.cq = CIRQ_INIT(struct instval, HISTORY_LEN); + per_cpu[i].fire_latencies.cq = CIRQ_INIT(struct instval, HISTORY_LEN); + } + + __block bool in_stackshot = false; + __block uint64_t last_stackshot_ns = 0; + + // Stackshots are the primary source of interrupt latency on the system. + ktrace_events_single(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_EXCP_SC, + SYS_stack_snapshot_with_config), ^(struct trace_point *tp) { + bool start = tp->debugid & DBG_FUNC_START; + uint64_t cur_ns = relns_from_abs(s, tp->timestamp); + T_LOG("%llu: %s stackshot syscall from process %s", + cur_ns, start ? "start" : "finish", tp->command); + in_stackshot = start; + if (!start) { + last_stackshot_ns = cur_ns; + } + }); + + struct cirq_u64 *fire_times = calloc(1, sizeof(*fire_times)); + T_ASSERT_NOTNULL(fire_times, "allocated fire time tracking"); + fire_times->cq = CIRQ_INIT(uint64_t, HISTORY_LEN); + + // Track the decrementer's latency values to find any unexpectedly long + // interrupt latencies that could affect the firing cadence. + ktrace_events_single(s, MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0), + ^(struct trace_point *tp) { + uint64_t cur_ns = relns_from_abs(s, tp->timestamp); + uint64_t latency_ns = ns_from_abs(s, 0 - tp->arg1); + struct instval *latency = cirq_push(&per_cpu[tp->cpuid].timer_latencies); + latency->iv_instant_ns = cur_ns; + latency->iv_val = latency_ns; + }); + + ktrace_events_single(s, PERF_TMR_FIRE, ^(struct trace_point *tp) { + T_QUIET; T_ASSERT_LT((int)tp->cpuid, ncpus, + "timer fire should not occur on an IOP"); + + nfires++; + uint64_t cur_ns = relns_from_abs(s, tp->timestamp); + uint64_t *fire_ns = cirq_push(fire_times); + *fire_ns = cur_ns; + + struct cpu_oversample *cur_cpu = &per_cpu[tp->cpuid]; + struct instval *last_timer_latency = cirq_top( + &cur_cpu->timer_latencies); + uint64_t timer_latency_ns = last_timer_latency->iv_val; + + if (first_fire_ns == 0) { + first_fire_ns = cur_ns; + } else { + struct cpu_oversample *last_cpu = &per_cpu[last_fire_cpuid]; + struct instval *last_latency = cirq_top(&last_cpu->fire_latencies); + uint64_t last_fire_latency_ns = last_latency->iv_val; + + if (timer_latency_ns > TIMER_PERIOD_NS / 4) { + T_LOG("%llu: long timer latency at fire: %llu", cur_ns, + timer_latency_ns); + } + + // Long interrupt latencies will cause the timer to miss its fire + // time and report a fire past when it should have, making the next + // period too short. Keep track of the latency as a leeway + // adjustment for the subsequent fire. + uint64_t fire_period_ns = cur_ns - last_fire_ns; + uint64_t fire_period_adj_ns = fire_period_ns + + last_fire_latency_ns + timer_latency_ns; + bool too_short = fire_period_adj_ns < FIRE_PERIOD_THRESHOLD_NS; + if (too_short) { + T_LOG("%llu: period of timer fire %llu is %llu + %llu + %llu = " + "%llu < %llu", + cur_ns, nfires, fire_period_ns, last_fire_latency_ns, + timer_latency_ns, fire_period_adj_ns, + FIRE_PERIOD_THRESHOLD_NS); + + T_LOG("short profile timer fired on CPU %d", tp->cpuid); + cpu_oversample_log(cur_cpu, tp->cpuid); + + if (cur_cpu == last_cpu) { + T_LOG("fired back-to-back on CPU %d", tp->cpuid); + } else { + T_LOG("previous profile timer fired on CPU %d", + last_fire_cpuid); + cpu_oversample_log(last_cpu, last_fire_cpuid); + } + + T_LOG("profile timer fires:"); + cirq_for(fire_times, ^(void *vu64) { + T_LOG("\tfire: %llu", *(uint64_t *)vu64); + }); + if (nfires < (unsigned int)ncpus) { + T_LOG("ignoring timer fire %llu as context may be missing", + nfires); + } else { + if (in_stackshot) { + T_LOG("skipping assertion because stackshot is " + "happening"); + } else if (last_stackshot_ns != 0 && + cur_ns > last_stackshot_ns && + cur_ns - last_stackshot_ns < TIMER_PERIOD_NS) { + T_LOG("skipping assertion because stackshot happened " + "%" PRIu64 "ns ago", + cur_ns - last_stackshot_ns); + } else { + T_ASSERT_FAIL("profiling period is shorter than " + "expected with no stackshot interference"); + } + } + } + + struct instval *latency = cirq_push(&cur_cpu->fire_latencies); + latency->iv_instant_ns = cur_ns; + latency->iv_val = timer_latency_ns; + + // Snapshot this timer fire's interrupt latency, so the next one + // can make an adjustment to the period. + last_fire_latency_ns = timer_latency_ns; + } + last_fire_ns = cur_ns; + last_fire_cpuid = tp->cpuid; + + if (nfires >= FIRES_THRESHOLD) { + ktrace_end(s, 1); + } + }); + + configure_kperf_timer_samplers(TIMER_PERIOD_NS, KPERF_SAMPLER_TINFO); + + ktrace_set_completion_handler(s, ^{ + double duration_secs = (double)(last_fire_ns - first_fire_ns) / + NSEC_PER_SEC; + T_LOG("stopping trace after %.2f seconds", duration_secs); + + T_PASS("saw %" PRIu64 " timer fires (%g fires/second) without " + "oversampling", nfires, (double)nfires / (double)duration_secs); + + T_END; + }); + + start_tracing_with_timeout(s, 5); + + // Get all CPUs out of idle. + uint64_t *counts = kpc_counterbuf_alloc(); + (void)kpc_get_cpu_counters(true,KPC_CLASS_CONFIGURABLE_MASK, NULL, counts); + free(counts); + + dispatch_main(); +} + +T_DECL(kperf_timer_stress, "repeatedly enable and disable timers") +{ + start_controlling_ktrace(); + + const int niters = 500; + for (int i = 0; i < niters; i++) { + configure_kperf_stacks_timer(-1, 1, true); + T_QUIET; + T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling"); + usleep(2000); + kperf_reset(); + } + T_LOG("configured kperf with a timer %d times", niters); +} + +#pragma mark - kdebug triggers #define KDEBUG_TRIGGER_TIMEOUT_NS (10 * NSEC_PER_SEC) @@ -317,7 +716,8 @@ expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids, kperf_kdebug_filter_t filter; s = ktrace_session_create(); - T_QUIET; T_ASSERT_NOTNULL(s, NULL); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create"); + ktrace_set_collection_interval(s, 100); ktrace_events_single(s, PERF_STK_KHDR, ^(struct trace_point *tp) { missing_kernel_stacks--; @@ -352,8 +752,6 @@ expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids, T_END; }); - /* configure kperf */ - kperf_reset(); (void)kperf_action_count_set(1); @@ -373,7 +771,7 @@ expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids, T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); - /* trace the triggering debugids */ + // Trace the triggering events. for (unsigned int i = 0; i < n_debugids; i++) { T_ASSERT_POSIX_SUCCESS(kdebug_trace(debugids[i], 0, 0, 0, 0), NULL); @@ -395,7 +793,7 @@ expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids, #define TRIGGER_DEBUGID \ (KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, TRIGGER_CODE)) -T_DECL(kdebug_trigger_classes, +T_DECL(kperf_kdebug_trigger_classes, "test that kdebug trigger samples on classes") { start_controlling_ktrace(); @@ -412,7 +810,7 @@ T_DECL(kdebug_trigger_classes, dispatch_main(); } -T_DECL(kdebug_trigger_subclasses, +T_DECL(kperf_kdebug_trigger_subclasses, "test that kdebug trigger samples on subclasses") { start_controlling_ktrace(); @@ -429,7 +827,7 @@ T_DECL(kdebug_trigger_subclasses, dispatch_main(); } -T_DECL(kdebug_trigger_debugids, +T_DECL(kperf_kdebug_trigger_debugids, "test that kdebug trigger samples on debugids") { start_controlling_ktrace(); @@ -443,10 +841,8 @@ T_DECL(kdebug_trigger_debugids, dispatch_main(); } -/* - * TODO Set a single function specifier filter, expect not to trigger of all - * events from that class. - */ +// TODO Set a single function specifier filter, expect not to trigger of all +// events from that class. static void reset_kperf(void) @@ -454,7 +850,7 @@ reset_kperf(void) (void)kperf_reset(); } -T_DECL(kdbg_callstacks, +T_DECL(kperf_kdbg_callstacks, "test that the kdbg_callstacks samples on syscalls") { start_controlling_ktrace(); @@ -463,11 +859,10 @@ T_DECL(kdbg_callstacks, __block bool saw_user_stack = false; s = ktrace_session_create(); - T_ASSERT_NOTNULL(s, NULL); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create"); + ktrace_set_collection_interval(s, 100); - /* - * Make sure BSD events are traced in order to trigger samples on syscalls. - */ + // Make sure BSD events are traced in order to trigger samples on syscalls. ktrace_events_class(s, DBG_BSD, ^void (__unused struct trace_point *tp) {}); ktrace_events_single(s, PERF_STK_UHDR, ^(__unused struct trace_point *tp) { @@ -499,17 +894,21 @@ T_DECL(kdbg_callstacks, dispatch_main(); } -#pragma mark PET +#pragma mark - PET #define STACKS_WAIT_DURATION_NS (3 * NSEC_PER_SEC) static void -expect_stacks_traced(void (^cb)(void)) +expect_stacks_traced(void (^setup)(ktrace_session_t s), void (^complete)(void)) { ktrace_session_t s; s = ktrace_session_create(); - T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create"); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create"); + ktrace_set_collection_interval(s, 100); + if (setup) { + setup(s); + } __block unsigned int user_stacks = 0; __block unsigned int kernel_stacks = 0; @@ -525,7 +924,7 @@ expect_stacks_traced(void (^cb)(void)) ktrace_session_destroy(s); T_EXPECT_GT(user_stacks, 0U, NULL); T_EXPECT_GT(kernel_stacks, 0U, NULL); - cb(); + complete(); }); T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL); @@ -540,21 +939,21 @@ expect_stacks_traced(void (^cb)(void)) }); } -T_DECL(pet, "test that PET mode samples kernel and user stacks") +T_DECL(kperf_pet, "test that PET mode samples kernel and user stacks") { start_controlling_ktrace(); - configure_kperf_stacks_timer(-1, 10); + configure_kperf_stacks_timer(-1, 10, false); T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL); - expect_stacks_traced(^(void) { + expect_stacks_traced(NULL, ^(void) { T_END; }); dispatch_main(); } -T_DECL(lightweight_pet, +T_DECL(kperf_lightweight_pet, "test that lightweight PET mode samples kernel and user stacks", T_META_ASROOT(true)) { @@ -562,46 +961,52 @@ T_DECL(lightweight_pet, int set = 1; - configure_kperf_stacks_timer(-1, 10); + configure_kperf_stacks_timer(-1, 10, false); T_ASSERT_POSIX_SUCCESS(sysctlbyname("kperf.lightweight_pet", NULL, NULL, &set, sizeof(set)), NULL); T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL); - expect_stacks_traced(^(void) { + __block uint64_t nfires = 0; + + expect_stacks_traced(^(ktrace_session_t s) { + ktrace_events_single(s, PERF_TMR_FIRE, ^(struct trace_point *tp) { + nfires++; + T_QUIET; + T_ASSERT_EQ(tp->arg1, (uint64_t)0, + "timer fire should have timer ID of 0"); + T_QUIET; + T_ASSERT_EQ(tp->arg2, (uint64_t)1, + "timer fire should have PET bit set"); + }); + }, ^(void) { + T_ASSERT_GT(nfires, (uint64_t)0, "timer fired at least once"); T_END; }); dispatch_main(); } -T_DECL(pet_stress, "repeatedly enable and disable PET mode") +T_DECL(kperf_pet_stress, "repeatedly enable and disable PET mode") { start_controlling_ktrace(); - int niters = 1000; - while (niters--) { - configure_kperf_stacks_timer(-1, 10); + const int niters = 500; + for (int i = 0; i < niters; i++) { + configure_kperf_stacks_timer(-1, 1, true); T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL); - usleep(20); + T_QUIET; + T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling"); + usleep(2000); kperf_reset(); } - ; -} -T_DECL(timer_stress, "repeatedly enable and disable timers") -{ - start_controlling_ktrace(); - - int niters = 1000; - while (niters--) { - configure_kperf_stacks_timer(-1, 1); - usleep(20); - kperf_reset(); - } - ; + T_PASS("configured kperf PET %d times", niters); } -T_DECL(pmc_config_only, "shouldn't show PMC config events unless requested") +#pragma mark - PMCs + +T_DECL(kperf_pmc_config_only, + "shouldn't show PMC config events unless requested") { start_controlling_ktrace(); @@ -609,11 +1014,9 @@ T_DECL(pmc_config_only, "shouldn't show PMC config events unless requested") __block bool saw_kpc_reg = false; ktrace_session_t s = ktrace_session_create(); - T_ASSERT_NOTNULL(s, "ktrace_session_create"); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create"); + ktrace_set_collection_interval(s, 100); - /* - * Make sure BSD events are traced in order to trigger samples on syscalls. - */ ktrace_events_single(s, PERF_KPC_CONFIG, ^(__unused struct trace_point *tp) { saw_kpc_config = true; @@ -692,13 +1095,16 @@ skip_if_monotonic_unsupported(void) #define INSTRS_CYCLES_UPPER 500 #define INSTRS_CYCLES_LOWER 50 -T_DECL(instrs_cycles, "ensure instructions and cycles are sampled") +T_DECL(kperf_sample_instrs_cycles, + "ensure instructions and cycles are sampled") { skip_if_monotonic_unsupported(); start_controlling_ktrace(); ktrace_session_t sess = ktrace_session_create(); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(sess, "ktrace_session_create"); + ktrace_set_collection_interval(sess, 100); __block uint64_t ninstrs_cycles = 0; __block uint64_t nzeroes = 0; @@ -749,4 +1155,3 @@ T_DECL(instrs_cycles, "ensure instructions and cycles are sampled") dispatch_main(); } - diff --git a/tests/kperf_backtracing.c b/tests/kperf_backtracing.c index 5c5e3dcfc..2c133a1a9 100644 --- a/tests/kperf_backtracing.c +++ b/tests/kperf_backtracing.c @@ -1,6 +1,4 @@ -#ifdef T_NAMESPACE -#undef T_NAMESPACE -#endif +/* Copyright (c) 2018-2019 Apple Inc. All rights reserved. */ #include #include @@ -28,7 +26,7 @@ T_GLOBAL_META( static void expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol, - unsigned long addr, unsigned int bt_idx, unsigned int max_frames) + uint64_t addr, unsigned int bt_idx, unsigned int max_frames) { const char *name; unsigned int frame_idx = max_frames - bt_idx - 1; @@ -39,14 +37,11 @@ expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol, return; } - if (CSIsNull(symbol)) { - T_FAIL("invalid symbol for address %#lx at frame %d", addr, - frame_idx); - return; - } + T_LOG("checking frame %d: %llx", bt_idx, addr); + T_ASSERT_FALSE(CSIsNull(symbol), "invalid symbol for return address"); if (frame_idx >= bt_len) { - T_FAIL("unexpected frame '%s' (%#lx) at index %u", + T_FAIL("unexpected frame '%s' (%#" PRIx64 ") at index %u", CSSymbolGetName(symbol), addr, frame_idx); return; } @@ -80,6 +75,8 @@ expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen, __block unsigned int hdr_frames = 0; __block unsigned int allow_larger = allow_larger_by; + T_SETUPBEGIN; + if (kern) { static CSSymbolicatorRef kern_symb; static dispatch_once_t kern_symb_once; @@ -112,7 +109,8 @@ expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen, return; } - T_LOG("found stack from thread %#" PRIx64, tp->threadid); + T_LOG("found %s stack from thread %#" PRIx64, kern ? "kernel" : "user", + tp->threadid); stacks++; if (!(tp->arg1 & 1)) { T_FAIL("invalid %s stack on thread %#" PRIx64, @@ -151,7 +149,7 @@ expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen, } for (; i < 4 && frames < hdr_frames; i++, frames++) { - unsigned long addr = (&tp->arg1)[i]; + uint64_t addr = (&tp->arg1)[i]; CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime( symb, addr, kCSNow); @@ -166,6 +164,8 @@ expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen, } } }); + + T_SETUPEND; } #define TRIGGERING_DEBUGID (0xfeff0f00) @@ -361,7 +361,7 @@ start_backtrace_thread(void) #define TEST_TIMEOUT_NS (5 * NSEC_PER_SEC) #endif /* !TARGET_OS_WATCH */ -T_DECL(kdebug_trigger, +T_DECL(kperf_stacks_kdebug_trig, "test that backtraces from kdebug trigger are correct", T_META_ASROOT(true)) { @@ -425,7 +425,7 @@ T_DECL(kdebug_trigger, dispatch_main(); } -T_DECL(user_timer, +T_DECL(kperf_ustack_timer, "test that user backtraces on a timer are correct", T_META_ASROOT(true)) { @@ -443,7 +443,7 @@ T_DECL(user_timer, ktrace_filter_pid(s, getpid()); - configure_kperf_stacks_timer(getpid(), 10); + configure_kperf_stacks_timer(getpid(), 10, false); tid = create_backtrace_thread(backtrace_thread, wait_for_spinning); /* potentially calling dispatch function and system call */ @@ -496,7 +496,7 @@ spin_thread(void *arg) return NULL; } -T_DECL(truncated_user_stacks, "ensure stacks are marked as truncated") +T_DECL(kperf_ustack_trunc, "ensure stacks are marked as truncated") { start_controlling_ktrace(); @@ -508,7 +508,7 @@ T_DECL(truncated_user_stacks, "ensure stacks are marked as truncated") T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL); - configure_kperf_stacks_timer(getpid(), 10); + configure_kperf_stacks_timer(getpid(), 10, false); __block bool saw_stack = false; ktrace_set_completion_handler(s, ^{ @@ -547,7 +547,7 @@ T_DECL(truncated_user_stacks, "ensure stacks are marked as truncated") dispatch_main(); } -T_DECL(max_user_stacks, "ensure stacks up to 256 frames can be captured") +T_DECL(kperf_ustack_maxlen, "ensure stacks up to 256 frames can be captured") { start_controlling_ktrace(); @@ -559,7 +559,7 @@ T_DECL(max_user_stacks, "ensure stacks up to 256 frames can be captured") T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL); - configure_kperf_stacks_timer(getpid(), 10); + configure_kperf_stacks_timer(getpid(), 10, false); __block bool saw_stack = false; __block bool saw_stack_data = false; diff --git a/tests/kperf_helpers.c b/tests/kperf_helpers.c index c6b4d6d5a..91be99fd3 100644 --- a/tests/kperf_helpers.c +++ b/tests/kperf_helpers.c @@ -5,21 +5,33 @@ #include void -configure_kperf_stacks_timer(pid_t pid, unsigned int period_ms) +configure_kperf_stacks_timer(pid_t pid, unsigned int period_ms, bool quiet) { kperf_reset(); (void)kperf_action_count_set(1); (void)kperf_timer_count_set(1); + if (quiet) { + T_QUIET; + } T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_USTACK | KPERF_SAMPLER_KSTACK), NULL); if (pid != -1) { + if (quiet) { + T_QUIET; + } T_ASSERT_POSIX_SUCCESS(kperf_action_filter_set_by_pid(1, pid), NULL); } + if (quiet) { + T_QUIET; + } T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL); + if (quiet) { + T_QUIET; + } T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0, kperf_ns_to_ticks(period_ms * NSEC_PER_MSEC)), NULL); } diff --git a/tests/kperf_helpers.h b/tests/kperf_helpers.h index b31cc4dad..238d37afb 100644 --- a/tests/kperf_helpers.h +++ b/tests/kperf_helpers.h @@ -2,8 +2,10 @@ #define KPERF_HELPERS_H #include +#include -void configure_kperf_stacks_timer(pid_t pid, unsigned int period_ms); +void configure_kperf_stacks_timer(pid_t pid, unsigned int period_ms, + bool quiet); #define PERF_SAMPLE KDBG_EVENTID(DBG_PERF, 0, 0) #define PERF_KPC_PMI KDBG_EVENTID(DBG_PERF, 6, 0) diff --git a/tests/ktrace_helpers.h b/tests/ktrace_helpers.h index 05191cbae..76466714e 100644 --- a/tests/ktrace_helpers.h +++ b/tests/ktrace_helpers.h @@ -2,9 +2,10 @@ #define KTRACE_HELPERS_H #include +#include #include #include -#include +#include static inline void reset_ktrace(void) @@ -56,4 +57,19 @@ out: T_SETUPEND; } +static inline uint64_t +ns_from_abs(ktrace_session_t s, uint64_t abstime) +{ + uint64_t ns = 0; + int error = ktrace_convert_timestamp_to_nanoseconds(s, abstime, &ns); + T_QUIET; T_ASSERT_POSIX_ZERO(error, "convert abstime to nanoseconds"); + return ns; +} + +static inline uint64_t +relns_from_abs(ktrace_session_t s, uint64_t abstime) +{ + return ns_from_abs(s, abstime - ktrace_get_earliest_timestamp(s)); +} + #endif /* !defined(KTRACE_HELPERS_H) */ diff --git a/tests/launchd_plists/com.apple.xnu.test.turnstile_multihop.plist b/tests/launchd_plists/com.apple.xnu.test.turnstile_multihop.plist index e4d42415f..4fe11d94a 100644 --- a/tests/launchd_plists/com.apple.xnu.test.turnstile_multihop.plist +++ b/tests/launchd_plists/com.apple.xnu.test.turnstile_multihop.plist @@ -2,12 +2,13 @@ - Label - com.apple.xnu.test.turnstile_multihop MachServices com.apple.xnu.test.turnstile_multihop - + + ResetAtClose + + ThrottleInterval 1 @@ -20,5 +21,7 @@ MallocNanoZone 1 + LaunchOnlyOnce + diff --git a/tests/ldt.c b/tests/ldt.c index e6261ddae..c97519e63 100644 --- a/tests/ldt.c +++ b/tests/ldt.c @@ -1073,6 +1073,15 @@ T_DECL(ldt64_with_bsd_sighandling, pthread_t cmthread; thread_arg_t cmarg; + int translated = 0; + size_t translated_size = sizeof(int); + + sysctlbyname("sysctl.proc_translated", &translated, &translated_size, NULL, 0); + + if (translated) { + T_SKIP("Skipping this test because it is translated"); + } + setup_signal_handling(); #ifndef STANDALONE @@ -1108,6 +1117,15 @@ T_DECL(ldt64_with_mach_exception_handling, pthread_t cmthread; thread_arg_t cmarg; + int translated = 0; + size_t translated_size = sizeof(int); + + sysctlbyname("sysctl.proc_translated", &translated, &translated_size, NULL, 0); + + if (translated) { + T_SKIP("Skipping this test because it is translated"); + } + #ifndef STANDALONE T_SETUPBEGIN; #endif diff --git a/tests/mach_eventlink.c b/tests/mach_eventlink.c new file mode 100644 index 000000000..924dfacf5 --- /dev/null +++ b/tests/mach_eventlink.c @@ -0,0 +1,812 @@ +/* + * mach eventlink: Tests mach eventlink kernel synchronization primitive. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +T_GLOBAL_META(T_META_NAMESPACE("xnu.mach_eventlink"), + T_META_RUN_CONCURRENTLY(true)); + +static kern_return_t +test_eventlink_create(mach_port_t *port_pair) +{ + kern_return_t kr; + + kr = mach_eventlink_create(mach_task_self(), MELC_OPTION_NO_COPYIN, port_pair); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_create"); + + return kr; +} + +static pthread_t +thread_create_for_test(void * (*function)(void *), void *arg) +{ + pthread_t pthread; + pthread_attr_t attr; + + pthread_attr_init(&attr); + pthread_create(&pthread, &attr, function, arg); + + T_LOG("pthread created\n"); + return pthread; +} + +static void * +while1loop(void *arg) +{ + arg = NULL; + while (1) { + ; + } + return NULL; +} + +static void * +test_eventlink_wait_with_timeout(void *arg) +{ + kern_return_t kr; + mach_port_t eventlink_port = (mach_port_t) (uintptr_t)arg; + mach_port_t self = mach_thread_self(); + uint64_t ticks = mach_absolute_time(); + uint64_t count = 1; + + /* Associate thread with eventlink port */ + kr = mach_eventlink_associate(eventlink_port, self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate"); + + /* Wait on the eventlink with timeout */ + kr = mach_eventlink_wait_until(eventlink_port, &count, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, ticks + 5000); + + T_EXPECT_MACH_ERROR(kr, KERN_OPERATION_TIMED_OUT, "mach_eventlink_wait_until returned expected error"); + T_EXPECT_EQ(count, (uint64_t)0, "mach_eventlink_wait_until returned correct count value"); + + return NULL; +} + +static void * +test_eventlink_wait_no_wait(void *arg) +{ + kern_return_t kr; + mach_port_t eventlink_port = (mach_port_t) (uintptr_t)arg; + mach_port_t self = mach_thread_self(); + uint64_t count = 1; + + /* Associate thread with eventlink port */ + kr = mach_eventlink_associate(eventlink_port, self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate"); + + /* Wait on the eventlink */ + kr = mach_eventlink_wait_until(eventlink_port, &count, MELSW_OPTION_NO_WAIT, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_EXPECT_MACH_ERROR(kr, KERN_OPERATION_TIMED_OUT, "mach_eventlink_wait_until returned expected error"); + T_EXPECT_EQ(count, (uint64_t)0, "mach_eventlink_wait_until returned correct count value"); + + return NULL; +} + +static void * +test_eventlink_wait_destroy(void *arg) +{ + kern_return_t kr; + mach_port_t eventlink_port = (mach_port_t) (uintptr_t)arg; + mach_port_t self = mach_thread_self(); + uint64_t count = 1; + + /* Associate thread with eventlink port */ + kr = mach_eventlink_associate(eventlink_port, self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate"); + + /* Wait on the eventlink */ + kr = mach_eventlink_wait_until(eventlink_port, &count, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_EXPECT_MACH_ERROR(kr, KERN_TERMINATED, "mach_eventlink_wait_until returned expected error"); + + return NULL; +} + +static void * +test_eventlink_wait_for_signal(void *arg) +{ + kern_return_t kr; + mach_port_t eventlink_port = (mach_port_t) (uintptr_t)arg; + mach_port_t self = mach_thread_self(); + uint64_t count = 0; + + /* Associate thread with eventlink port */ + kr = mach_eventlink_associate(eventlink_port, self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate"); + + /* Wait on the eventlink */ + kr = mach_eventlink_wait_until(eventlink_port, &count, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_wait_until"); + T_EXPECT_EQ(count, (uint64_t)1, "mach_eventlink_wait_until returned correct count value"); + + return NULL; +} + +static void * +test_eventlink_wait_then_signal(void *arg) +{ + kern_return_t kr; + mach_port_t eventlink_port = (mach_port_t) (uintptr_t)arg; + mach_port_t self = mach_thread_self(); + uint64_t count = 0; + + /* Associate thread with eventlink port */ + kr = mach_eventlink_associate(eventlink_port, self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate"); + + /* Wait on the eventlink */ + kr = mach_eventlink_wait_until(eventlink_port, &count, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_wait_until"); + T_EXPECT_EQ(count, (uint64_t)1, "mach_eventlink_wait_until returned correct count value"); + + /* Signal the eventlink to wakeup other side */ + kr = mach_eventlink_signal(eventlink_port, 0); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_signal"); + + return NULL; +} + +static void * +test_eventlink_wait_then_wait_signal_with_no_wait(void *arg) +{ + kern_return_t kr; + mach_port_t eventlink_port = (mach_port_t) (uintptr_t)arg; + mach_port_t self = mach_thread_self(); + uint64_t count = 0; + + /* Associate thread with eventlink port */ + kr = mach_eventlink_associate(eventlink_port, self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate"); + + /* Wait on the eventlink */ + kr = mach_eventlink_wait_until(eventlink_port, &count, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_wait_until"); + T_EXPECT_EQ(count, (uint64_t)1, "mach_eventlink_wait_until returned correct count value"); + + /* Signal wait the eventlink */ + kr = mach_eventlink_signal_wait_until(eventlink_port, &count, 0, MELSW_OPTION_NO_WAIT, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_EXPECT_MACH_ERROR(kr, KERN_OPERATION_TIMED_OUT, "mach_eventlink_wait_until returned expected error"); + T_EXPECT_EQ(count, (uint64_t)1, "mach_eventlink_wait_until returned correct count value"); + + return NULL; +} + +static void * +test_eventlink_wait_then_wait_signal_with_prepost(void *arg) +{ + kern_return_t kr; + mach_port_t eventlink_port = (mach_port_t) (uintptr_t)arg; + mach_port_t self = mach_thread_self(); + uint64_t count = 0; + + /* Associate thread with eventlink port */ + kr = mach_eventlink_associate(eventlink_port, self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate"); + + /* Wait on the eventlink */ + kr = mach_eventlink_wait_until(eventlink_port, &count, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_wait_until"); + T_EXPECT_EQ(count, (uint64_t)1, "mach_eventlink_wait_until returned correct count value"); + + /* Signal wait the eventlink with stale counter value */ + count = 0; + kr = mach_eventlink_signal_wait_until(eventlink_port, &count, 0, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_signal_wait_until"); + T_EXPECT_EQ(count, (uint64_t)1, "mach_eventlink_wait_until returned correct count value"); + + return NULL; +} + +static void * +test_eventlink_wait_then_signal_loop(void *arg) +{ + kern_return_t kr; + mach_port_t eventlink_port = (mach_port_t) (uintptr_t)arg; + mach_port_t self = mach_thread_self(); + uint64_t count = 0; + int i; + + /* Associate thread with eventlink port */ + kr = mach_eventlink_associate(eventlink_port, self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate"); + + /* Wait on the eventlink */ + kr = mach_eventlink_wait_until(eventlink_port, &count, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_wait_until"); + T_EXPECT_EQ(count, (uint64_t)1, "mach_eventlink_wait_until returned correct count value"); + + for (i = 1; i < 100; i++) { + /* Signal wait the eventlink */ + kr = mach_eventlink_signal_wait_until(eventlink_port, &count, 0, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_signal_wait_until"); + T_EXPECT_EQ(count, (uint64_t)(i + 1), "mach_eventlink_wait_until returned correct count value"); + } + + /* Signal the eventlink to wakeup other side */ + kr = mach_eventlink_signal(eventlink_port, 0); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_signal"); + + return NULL; +} + +/* + * Test 1: Create ipc eventlink kernel object. + * + * Calls eventlink creates which returns a pair of eventlink port objects. + */ +T_DECL(test_eventlink_create, "eventlink create test", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + mach_port_deallocate(mach_task_self(), port_pair[0]); + mach_port_deallocate(mach_task_self(), port_pair[1]); +} + +/* + * Test 2: Create ipc eventlink kernel object and call eventlink destroy + * + * Calls eventlink creates which returns a pair of eventlink port objects. + * Calls eventlink destroy on eventlink port pair. + */ +T_DECL(test_eventlink_destroy, "eventlink destroy test", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + kr = mach_eventlink_destroy(port_pair[0]); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_destroy"); + kr = mach_eventlink_destroy(port_pair[1]); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_destroy"); +} + +/* + * Test 3: Associate threads to eventlink object. + * + * Create eventlink object pair and associate threads to each side and then + * disassociate threads and check for error conditions. + */ +T_DECL(test_eventlink_associate, "eventlink associate test", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + mach_port_t self = mach_thread_self(); + mach_port_t other_thread = MACH_PORT_NULL; + pthread_t pthread; + + /* eventlink associate to NULL eventlink object */ + kr = mach_eventlink_associate(MACH_PORT_NULL, self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_EXPECT_MACH_ERROR(kr, MACH_SEND_INVALID_DEST, "mach_eventlink_associate with null eventlink returned expected error"); + + /* eventlink disassociate to NULL eventlink object */ + kr = mach_eventlink_disassociate(MACH_PORT_NULL, MELD_OPTION_NONE); + T_EXPECT_MACH_ERROR(kr, MACH_SEND_INVALID_DEST, "mach_eventlink_disassociate with null eventlink returned expected error"); + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(while1loop, NULL); + other_thread = pthread_mach_thread_np(pthread); + + for (int i = 0; i < 3; i++) { + /* Associate thread to eventlink objects */ + kr = mach_eventlink_associate(port_pair[0], self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate for object 1"); + + kr = mach_eventlink_associate(port_pair[1], other_thread, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate for object 2"); + + /* Try to associate again with diff threads, expect failure */ + kr = mach_eventlink_associate(port_pair[0], other_thread, 0, 0, 0, 0, MELA_OPTION_NONE); + T_EXPECT_MACH_ERROR(kr, KERN_NAME_EXISTS, "mach_eventlink_associate for associated " + "objects returned expected error"); + + kr = mach_eventlink_associate(port_pair[1], self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_EXPECT_MACH_ERROR(kr, KERN_NAME_EXISTS, "mach_eventlink_associate for associated " + "objects return expected error"); + + /* Try to disassociate the threads */ + kr = mach_eventlink_disassociate(port_pair[0], MELD_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_disassociate for object 1"); + + kr = mach_eventlink_disassociate(port_pair[1], MELD_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_disassociate for object 2"); + + /* Try to disassociate the threads again, expect failure */ + kr = mach_eventlink_disassociate(port_pair[0], MELD_OPTION_NONE); + T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, "mach_eventlink_disassociate for " + "disassociated objects returned expected error"); + + kr = mach_eventlink_disassociate(port_pair[1], MELD_OPTION_NONE); + T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, "mach_eventlink_disassociate for " + "disassociated objects returned expected error"); + } + + kr = mach_eventlink_destroy(port_pair[0]); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_destroy"); + + /* Try disassociate on other end of destoryed eventlink pair */ + kr = mach_eventlink_disassociate(port_pair[1], MELD_OPTION_NONE); + T_EXPECT_MACH_ERROR(kr, KERN_TERMINATED, "mach_eventlink_disassociate for " + "terminated object returned expected error"); + + kr = mach_eventlink_destroy(port_pair[1]); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_destroy"); +} + +/* + * Test 4: Test eventlink wait with timeout. + * + * Create an eventlink object, associate threads and test eventlink wait with timeout. + */ +T_DECL(test_eventlink_wait_timeout, "eventlink wait timeout test", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_with_timeout, (void *)(uintptr_t)port_pair[0]); + sleep(10); + + /* destroy the eventlink object, the wake status of thread will check if the test passsed or failed */ + mach_port_deallocate(mach_task_self(), port_pair[0]); + mach_port_deallocate(mach_task_self(), port_pair[1]); + + pthread_join(pthread, NULL); +} + +/* + * Test 5: Test eventlink wait with no wait. + * + * Create an eventlink object, associate threads and test eventlink wait with no wait flag. + */ +T_DECL(test_eventlink_wait_no_wait, "eventlink wait no wait test", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_no_wait, (void *)(uintptr_t)port_pair[0]); + pthread_join(pthread, NULL); + + mach_port_deallocate(mach_task_self(), port_pair[0]); + mach_port_deallocate(mach_task_self(), port_pair[1]); +} + +/* + * Test 6: Test eventlink wait and destroy. + * + * Create an eventlink object, associate threads and destroy the port. + */ +T_DECL(test_eventlink_wait_and_destroy, "eventlink wait and destroy", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_destroy, (void *)(uintptr_t)port_pair[0]); + + sleep(5); + + /* Increase the send right count for port before destroy to make sure no sender does not fire on destroy */ + kr = mach_port_mod_refs(mach_task_self(), port_pair[0], MACH_PORT_RIGHT_SEND, 2); + T_ASSERT_MACH_SUCCESS(kr, "mach_port_mod_refs"); + + /* Destroy the port for thread to wakeup */ + kr = mach_eventlink_destroy(port_pair[0]); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_destroy"); + + pthread_join(pthread, NULL); + mach_port_deallocate(mach_task_self(), port_pair[1]); +} + + +/* + * Test 7: Test eventlink wait and destroy remote side. + * + * Create an eventlink object, associate threads, wait and destroy the remote eventlink port. + */ +T_DECL(test_eventlink_wait_and_destroy_remote, "eventlink wait and remote destroy", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_destroy, (void *)(uintptr_t)port_pair[0]); + + sleep(5); + + /* Increase the send right count for port before destroy to make sure no sender does not fire on destroy */ + kr = mach_port_mod_refs(mach_task_self(), port_pair[1], MACH_PORT_RIGHT_SEND, 2); + T_ASSERT_MACH_SUCCESS(kr, "mach_port_mod_refs"); + + /* Destroy the port for thread to wakeup */ + kr = mach_eventlink_destroy(port_pair[1]); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_destroy"); + + pthread_join(pthread, NULL); + mach_port_deallocate(mach_task_self(), port_pair[0]); +} + +/* + * Test 8: Test eventlink wait and deallocate port. + * + * Create an eventlink object, associate threads, wait and deallocate the eventlink port. + */ +T_DECL(test_eventlink_wait_and_deallocate, "eventlink wait and deallocate", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_destroy, (void *)(uintptr_t)port_pair[0]); + + sleep(5); + + /* Destroy the port for thread to wakeup */ + mach_port_deallocate(mach_task_self(), port_pair[0]); + + pthread_join(pthread, NULL); + mach_port_deallocate(mach_task_self(), port_pair[1]); +} + +/* + * Test 9: Test eventlink wait and disassociate. + * + * Create an eventlink object, associate threads, wait and disassociate thread from the eventlink port. + */ +T_DECL(test_eventlink_wait_and_disassociate, "eventlink wait and disassociate", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_destroy, (void *)(uintptr_t)port_pair[0]); + + sleep(5); + + /* Disassociate thread from eventlink for thread to wakeup */ + kr = mach_eventlink_disassociate(port_pair[0], MELD_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_disassociate"); + + pthread_join(pthread, NULL); + mach_port_deallocate(mach_task_self(), port_pair[1]); + mach_port_deallocate(mach_task_self(), port_pair[0]); +} + +/* + * Test 10: Test eventlink wait and signal. + * + * Create an eventlink object, associate threads and test wait signal. + */ +T_DECL(test_eventlink_wait_and_signal, "eventlink wait and signal", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + mach_port_t self = mach_thread_self(); + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_for_signal, (void *)(uintptr_t)port_pair[0]); + + sleep(5); + + /* Associate thread and signal the eventlink */ + kr = mach_eventlink_associate(port_pair[1], self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate for object 2"); + + kr = mach_eventlink_signal(port_pair[1], 0); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_signal for object 2"); + + pthread_join(pthread, NULL); + + mach_port_deallocate(mach_task_self(), port_pair[0]); + mach_port_deallocate(mach_task_self(), port_pair[1]); +} + +/* + * Test 11: Test eventlink wait_signal. + * + * Create an eventlink object, associate threads and test wait_signal. + */ +T_DECL(test_eventlink_wait_signal, "eventlink wait_signal", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + mach_port_t self = mach_thread_self(); + uint64_t count = 0; + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_then_signal, (void *)(uintptr_t)port_pair[0]); + + sleep(5); + + /* Associate thread and wait_signal the eventlink */ + kr = mach_eventlink_associate(port_pair[1], self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate for object 2"); + + /* Wait on the eventlink with timeout */ + kr = mach_eventlink_signal_wait_until(port_pair[1], &count, 0, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_signal_wait_until"); + T_EXPECT_EQ(count, (uint64_t)1, "mach_eventlink_signal_wait_until returned correct count value"); + + pthread_join(pthread, NULL); + + mach_port_deallocate(mach_task_self(), port_pair[0]); + mach_port_deallocate(mach_task_self(), port_pair[1]); +} + +/* + * Test 12: Test eventlink wait_signal with no wait. + * + * Create an eventlink object, associate threads and test wait_signal with no wait. + */ +T_DECL(test_eventlink_wait_signal_no_wait, "eventlink wait_signal with no wait", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + mach_port_t self = mach_thread_self(); + uint64_t count = 0; + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_then_wait_signal_with_no_wait, (void *)(uintptr_t)port_pair[0]); + + sleep(5); + + /* Associate thread and wait_signal the eventlink */ + kr = mach_eventlink_associate(port_pair[1], self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate for object 2"); + + /* Wait on the eventlink with timeout */ + kr = mach_eventlink_signal_wait_until(port_pair[1], &count, 0, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_signal_wait_until"); + T_EXPECT_EQ(count, (uint64_t)1, "mach_eventlink_signal_wait_until returned correct count value"); + + pthread_join(pthread, NULL); + + mach_port_deallocate(mach_task_self(), port_pair[0]); + mach_port_deallocate(mach_task_self(), port_pair[1]); +} + +/* + * Test 13: Test eventlink wait_signal with prepost. + * + * Create an eventlink object, associate threads and test wait_signal with prepost. + */ +T_DECL(test_eventlink_wait_signal_prepost, "eventlink wait_signal with prepost", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + mach_port_t self = mach_thread_self(); + uint64_t count = 0; + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_then_wait_signal_with_prepost, (void *)(uintptr_t)port_pair[0]); + + sleep(5); + + /* Associate thread and wait_signal the eventlink */ + kr = mach_eventlink_associate(port_pair[1], self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate for object 2"); + + /* Wait on the eventlink with timeout */ + kr = mach_eventlink_signal_wait_until(port_pair[1], &count, 0, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_signal_wait_until"); + T_EXPECT_EQ(count, (uint64_t)1, "mach_eventlink_signal_wait_until returned correct count value"); + + pthread_join(pthread, NULL); + + mach_port_deallocate(mach_task_self(), port_pair[0]); + mach_port_deallocate(mach_task_self(), port_pair[1]); +} + +/* + * Test 14: Test eventlink wait_signal with associate on wait option. + * + * Create an eventlink object, set associate on wait on one side and test wait_signal. + */ +T_DECL(test_eventlink_wait_signal_associate_on_wait, "eventlink wait_signal associate on wait", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + uint64_t count = 0; + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_then_signal, (void *)(uintptr_t)port_pair[0]); + + sleep(5); + + /* Set associate on wait and wait_signal the eventlink */ + kr = mach_eventlink_associate(port_pair[1], MACH_PORT_NULL, 0, 0, 0, 0, MELA_OPTION_ASSOCIATE_ON_WAIT); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate with associate on wait for object 2"); + + /* Wait on the eventlink with timeout */ + kr = mach_eventlink_signal_wait_until(port_pair[1], &count, 0, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_signal_wait_until"); + T_EXPECT_EQ(count, (uint64_t)1, "mach_eventlink_signal_wait_until returned correct count value"); + + /* Remove associate on wait option */ + kr = mach_eventlink_disassociate(port_pair[1], MELD_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_disassociate"); + + /* Wait on the eventlink with timeout */ + kr = mach_eventlink_signal_wait_until(port_pair[1], &count, 0, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, "mach_eventlink_wait_until returned expected error"); + + pthread_join(pthread, NULL); + + mach_port_deallocate(mach_task_self(), port_pair[0]); + mach_port_deallocate(mach_task_self(), port_pair[1]); +} + +/* + * Test 15: Test eventlink wait_signal_loop. + * + * Create an eventlink object, associate threads and test wait_signal in a loop. + */ +T_DECL(test_eventlink_wait_signal_loop, "eventlink wait_signal in loop", T_META_ASROOT(YES)) +{ + kern_return_t kr; + mach_port_t port_pair[2]; + pthread_t pthread; + mach_port_t self = mach_thread_self(); + uint64_t count = 0; + int i; + + /* Create an eventlink and associate threads to it */ + kr = test_eventlink_create(port_pair); + if (kr != KERN_SUCCESS) { + return; + } + + pthread = thread_create_for_test(test_eventlink_wait_then_signal_loop, (void *)(uintptr_t)port_pair[0]); + + /* Associate thread and wait_signal the eventlink */ + kr = mach_eventlink_associate(port_pair[1], self, 0, 0, 0, 0, MELA_OPTION_NONE); + T_ASSERT_MACH_SUCCESS(kr, "mach_eventlink_associate for object 2"); + + for (i = 0; i < 100; i++) { + /* Wait on the eventlink with timeout */ + kr = mach_eventlink_signal_wait_until(port_pair[1], &count, 0, MELSW_OPTION_NONE, + KERN_CLOCK_MACH_ABSOLUTE_TIME, 0); + + T_ASSERT_MACH_SUCCESS(kr, "main thread: mach_eventlink_signal_wait_until"); + T_EXPECT_EQ(count, (uint64_t)(i + 1), "main thread: mach_eventlink_signal_wait_until returned correct count value"); + } + + pthread_join(pthread, NULL); + + mach_port_deallocate(mach_task_self(), port_pair[0]); + mach_port_deallocate(mach_task_self(), port_pair[1]); +} diff --git a/tests/memcmp_zero.c b/tests/memcmp_zero.c new file mode 100644 index 000000000..4e5131c66 --- /dev/null +++ b/tests/memcmp_zero.c @@ -0,0 +1,91 @@ +#include +#include +#include +#include +#include +#include + +static inline unsigned char * +get_guarded_page(void) +{ + unsigned char *p = mmap(NULL, 3 * PAGE_SIZE, PROT_NONE, MAP_SHARED | MAP_ANON, 0, 0); + p += PAGE_SIZE; + mprotect(p, PAGE_SIZE, PROT_READ | PROT_WRITE); + return p; +} + +static inline void +free_guarded_page(unsigned char *p) +{ + munmap(p - PAGE_SIZE, 3 * PAGE_SIZE); +} + +/* memcmp_zero_ptr_aligned() checks string s of n bytes contains all zeros. + * Address and size of the string s must be pointer-aligned. + * Return 0 if true, 1 otherwise. Also return 0 if n is 0. + */ +extern int +memcmp_zero_ptr_aligned(const void *s, size_t n); + +T_DECL(memcmp_zero, "memcmp_zero") +{ + // the assembly version is for the kernel and doesn't support arm64_32 +#if defined(__arm64__) && __LP64__ + unsigned char *buffer = get_guarded_page(); + unsigned char *right = buffer + PAGE_SIZE - 512; + const int ptr_size = sizeof(buffer); + + for (size_t i = 0; i < 256; i += ptr_size) { + for (size_t j = i; j < 256; ++j) { + for (size_t k = 0; k < 256; ++k) { + if (k < i) { + buffer[k] = (unsigned char)rand(); + } else if (k < j) { + buffer[k] = '\0'; + } else if (k == j) { + do { + buffer[k] = (unsigned char)rand(); + } while (!buffer[k]); + } else { + buffer[k] = '\0'; + } + } + for (size_t m = 0; m < 128; m += ptr_size) { + int result = memcmp_zero_ptr_aligned(&buffer[i], m); + int ref = j - i < m ? 1 : 0; + T_QUIET; T_ASSERT_EQ(result, ref, "expected %d, saw %d\n" + "memcmp_zero_ptr_aligned(buf[%zd], %zd)\n", + ref, result, i, m); + } + + + for (size_t k = 0; k < 256; ++k) { + if (k < i) { + right[k] = (unsigned char)rand(); + } else if (k < j) { + right[k] = '\0'; + } else if (k == j) { + do { + right[k] = (unsigned char)rand(); + } while (!right[k]); + } else { + right[k] = '\0'; + } + } + for (size_t m = 0; m < 256; m += ptr_size) { + int result = memcmp_zero_ptr_aligned(&right[i], m); + int ref = j - i < m ? 1 : 0; + T_QUIET; T_ASSERT_EQ(result, ref, "expected %d, saw %d\n" + "memcmp_zero_ptr_aligned(buf[%zd], %zd)\n", + ref, result, i, m); + } + } + } + + T_PASS("success"); + + free_guarded_page(buffer); +#else + T_SKIP("no optimized version to test"); +#endif +} diff --git a/tests/memorystatus_freeze_test.c b/tests/memorystatus_freeze_test.c index 1ed21b3d9..58aa69659 100644 --- a/tests/memorystatus_freeze_test.c +++ b/tests/memorystatus_freeze_test.c @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -64,7 +65,7 @@ get_vmpage_size() static pid_t child_pid = -1; static int freeze_count = 0; -void move_to_idle_band(void); +void move_to_idle_band(pid_t); void run_freezer_test(int); void freeze_helper_process(void); /* Gets and optionally sets the freeze pages max threshold */ @@ -245,7 +246,7 @@ get_rprvt(pid_t pid) } void -move_to_idle_band(void) +move_to_idle_band(pid_t pid) { memorystatus_priority_properties_t props; /* @@ -259,7 +260,7 @@ move_to_idle_band(void) * This requires us to run as root (in the absence of entitlement). * Hence the T_META_ASROOT(true) in the T_HELPER_DECL. */ - if (memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES, getpid(), 0, &props, sizeof(props))) { + if (memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES, pid, 0, &props, sizeof(props))) { exit(MEMSTAT_PRIORITY_CHANGE_FAILED); } } @@ -312,24 +313,31 @@ freeze_helper_process(void) T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(child_pid, SIGUSR1), "failed to send SIGUSR1 to child process"); } +static void +skip_if_freezer_is_disabled() +{ + int freeze_enabled; + size_t length = sizeof(freeze_enabled); + + T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.freeze_enabled", &freeze_enabled, &length, NULL, 0), + "failed to query vm.freeze_enabled"); + if (!freeze_enabled) { + /* If freezer is disabled, skip the test. This can happen due to disk space shortage. */ + T_SKIP("Freeze has been disabled. Skipping test."); + } +} + void run_freezer_test(int num_pages) { - int ret, freeze_enabled; + int ret; char sz_str[50]; char **launch_tool_args; char testpath[PATH_MAX]; uint32_t testpath_buf_size; dispatch_source_t ds_freeze, ds_proc; - size_t length; - length = sizeof(freeze_enabled); - T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.freeze_enabled", &freeze_enabled, &length, NULL, 0), - "failed to query vm.freeze_enabled"); - if (!freeze_enabled) { - /* If freezer is disabled, skip the test. This can happen due to disk space shortage. */ - T_SKIP("Freeze has been disabled. Skipping test."); - } + skip_if_freezer_is_disabled(); signal(SIGUSR1, SIG_IGN); ds_freeze = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dispatch_get_main_queue()); @@ -467,7 +475,7 @@ allocate_pages(int num_pages) } }); dispatch_activate(ds_signal); - move_to_idle_band(); + move_to_idle_band(getpid()); dispatch_main(); } @@ -590,9 +598,6 @@ freeze_process(pid_t pid) static void memorystatus_assertion_test_demote_frozen() { -#if !CONFIG_EMBEDDED - T_SKIP("Freezing processes is only supported on embedded"); -#endif /* * Test that if we assert a priority on a process, freeze it, and then demote all frozen processes, it does not get demoted below the asserted priority. * Then remove thee assertion, and ensure it gets demoted properly. @@ -697,3 +702,304 @@ T_DECL(budget_replenishment, "budget replenishes properly") { T_QUIET; T_ASSERT_EQ(new_budget, expected_new_budget_pages, "Calculate new budget behaves correctly."); } + + +static bool +is_proc_in_frozen_list(pid_t pid, char* name, size_t name_len) +{ + int bytes_written; + bool found = false; + global_frozen_procs_t *frozen_procs = malloc(sizeof(global_frozen_procs_t)); + T_QUIET; T_ASSERT_NOTNULL(frozen_procs, "malloc"); + + bytes_written = memorystatus_control(MEMORYSTATUS_CMD_FREEZER_CONTROL, 0, FREEZER_CONTROL_GET_PROCS, frozen_procs, sizeof(global_frozen_procs_t)); + T_QUIET; T_ASSERT_LE((size_t) bytes_written, sizeof(global_frozen_procs_t), "Didn't overflow buffer"); + T_QUIET; T_ASSERT_GT(bytes_written, 0, "Wrote someting"); + + for (size_t i = 0; i < frozen_procs->gfp_num_frozen; i++) { + if (frozen_procs->gfp_procs[i].fp_pid == pid) { + found = true; + strlcpy(name, frozen_procs->gfp_procs[i].fp_name, name_len); + } + } + return found; +} + +static void +drop_jetsam_snapshot_ownership(void) +{ + int ret; + ret = memorystatus_control(MEMORYSTATUS_CMD_SET_JETSAM_SNAPSHOT_OWNERSHIP, 0, MEMORYSTATUS_FLAGS_SNAPSHOT_DROP_OWNERSHIP, NULL, 0); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, 0, "Drop ownership of jetsam snapshot"); +} + +static void +take_jetsam_snapshot_ownership(void) +{ + int ret; + ret = memorystatus_control(MEMORYSTATUS_CMD_SET_JETSAM_SNAPSHOT_OWNERSHIP, 0, MEMORYSTATUS_FLAGS_SNAPSHOT_TAKE_OWNERSHIP, NULL, 0); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Take ownership of jetsam snapshot"); + T_ATEND(drop_jetsam_snapshot_ownership); +} + +/* + * Retrieve a jetsam snapshot. + * + * return: + * pointer to snapshot. + * + * Caller is responsible for freeing snapshot. + */ +static +memorystatus_jetsam_snapshot_t * +get_jetsam_snapshot(uint32_t flags, bool empty_allowed) +{ + memorystatus_jetsam_snapshot_t * snapshot = NULL; + int ret; + uint32_t size; + + ret = memorystatus_control(MEMORYSTATUS_CMD_GET_JETSAM_SNAPSHOT, 0, flags, NULL, 0); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, 0, "Get jetsam snapshot size"); + size = (uint32_t) ret; + if (size == 0 && empty_allowed) { + return snapshot; + } + + snapshot = (memorystatus_jetsam_snapshot_t*)malloc(size); + T_QUIET; T_ASSERT_NOTNULL(snapshot, "Allocate snapshot of size %d", size); + + ret = memorystatus_control(MEMORYSTATUS_CMD_GET_JETSAM_SNAPSHOT, 0, flags, snapshot, size); + T_QUIET; T_ASSERT_GT(size, 0, "Get jetsam snapshot"); + + if (((size - sizeof(memorystatus_jetsam_snapshot_t)) / sizeof(memorystatus_jetsam_snapshot_entry_t)) != snapshot->entry_count) { + T_FAIL("Malformed snapshot: %d! Expected %ld + %zd x %ld = %ld\n", size, + sizeof(memorystatus_jetsam_snapshot_t), snapshot->entry_count, sizeof(memorystatus_jetsam_snapshot_entry_t), + sizeof(memorystatus_jetsam_snapshot_t) + (snapshot->entry_count * sizeof(memorystatus_jetsam_snapshot_entry_t))); + if (snapshot) { + free(snapshot); + } + } + + return snapshot; +} + +/* + * Look for the given pid in the snapshot. + * + * return: + * pointer to pid's entry or NULL if pid is not found. + * + * Caller has ownership of snapshot before and after call. + */ +static +memorystatus_jetsam_snapshot_entry_t * +get_jetsam_snapshot_entry(memorystatus_jetsam_snapshot_t *snapshot, pid_t pid) +{ + T_QUIET; T_ASSERT_NOTNULL(snapshot, "Got snapshot"); + for (size_t i = 0; i < snapshot->entry_count; i++) { + memorystatus_jetsam_snapshot_entry_t *curr = &(snapshot->entries[i]); + if (curr->pid == pid) { + return curr; + } + } + + return NULL; +} + +/* + * Launches the child & runs the given block after the child signals. + * If exit_with_child is true, the test will exit when the child exits. + */ +static void +test_after_frozen_background_launches(bool exit_with_child, dispatch_block_t test_block) +{ + dispatch_source_t ds_signal, ds_exit; + + /* Run the test block after the child launches & signals it's ready. */ + signal(SIGUSR1, SIG_IGN); + ds_signal = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dispatch_get_main_queue()); + T_QUIET; T_ASSERT_NOTNULL(ds_signal, "dispatch_source_create"); + dispatch_source_set_event_handler(ds_signal, test_block); + /* Launch the child process. */ + child_pid = launch_frozen_background_process(); + /* Listen for exit. */ + if (exit_with_child) { + ds_exit = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, (uintptr_t)child_pid, DISPATCH_PROC_EXIT, dispatch_get_main_queue()); + dispatch_source_set_event_handler(ds_exit, ^{ + int status = 0, code = 0; + pid_t rc = waitpid(child_pid, &status, 0); + T_QUIET; T_ASSERT_EQ(rc, child_pid, "waitpid"); + code = WEXITSTATUS(status); + T_QUIET; T_ASSERT_EQ(code, 0, "Child exited cleanly"); + T_END; + }); + + dispatch_activate(ds_exit); + } + dispatch_activate(ds_signal); + dispatch_main(); +} + +T_DECL(get_frozen_procs, "List processes in the freezer") { + skip_if_freezer_is_disabled(); + + test_after_frozen_background_launches(true, ^{ + proc_name_t name; + /* Place the child in the idle band so that it gets elevated like a typical app. */ + move_to_idle_band(child_pid); + /* Freeze the process, and check that it's in the list of frozen processes. */ + freeze_process(child_pid); + /* Check */ + T_QUIET; T_ASSERT_TRUE(is_proc_in_frozen_list(child_pid, name, sizeof(name)), "Found proc in frozen list"); + T_QUIET; T_EXPECT_EQ_STR(name, "memorystatus_freeze_test", "Proc has correct name"); + /* Kill the child */ + T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(child_pid, SIGKILL), "Killed child process"); + T_END; + }); +} + +T_DECL(frozen_to_swap_accounting, "jetsam snapshot has frozen_to_swap accounting") { + static const size_t kSnapshotSleepDelay = 5; + static const size_t kFreezeToDiskMaxDelay = 60; + + skip_if_freezer_is_disabled(); + + test_after_frozen_background_launches(true, ^{ + memorystatus_jetsam_snapshot_t *snapshot = NULL; + memorystatus_jetsam_snapshot_entry_t *child_entry = NULL; + /* Place the child in the idle band so that it gets elevated like a typical app. */ + move_to_idle_band(child_pid); + freeze_process(child_pid); + /* + * Wait until the child's pages get paged out to disk. + * If we don't see any pages get sent to disk before kFreezeToDiskMaxDelay seconds, + * something is either wrong with the compactor or the accounting. + */ + for (size_t i = 0; i < kFreezeToDiskMaxDelay / kSnapshotSleepDelay; i++) { + snapshot = get_jetsam_snapshot(MEMORYSTATUS_FLAGS_SNAPSHOT_ON_DEMAND, false); + child_entry = get_jetsam_snapshot_entry(snapshot, child_pid); + T_QUIET; T_ASSERT_NOTNULL(child_entry, "Found child in snapshot"); + if (child_entry->jse_frozen_to_swap_pages > 0) { + break; + } + free(snapshot); + sleep(kSnapshotSleepDelay); + } + T_QUIET; T_ASSERT_GT(child_entry->jse_frozen_to_swap_pages, 0ULL, "child has some pages in swap"); + free(snapshot); + /* Kill the child */ + T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(child_pid, SIGKILL), "Killed child process"); + T_END; + }); +} + +T_DECL(freezer_snapshot, "App kills are recorded in the freezer snapshot") { + /* Take ownership of the snapshot to ensure we don't race with another process trying to consume them. */ + take_jetsam_snapshot_ownership(); + + test_after_frozen_background_launches(false, ^{ + int ret; + memorystatus_jetsam_snapshot_t *snapshot = NULL; + memorystatus_jetsam_snapshot_entry_t *child_entry = NULL; + + ret = memorystatus_control(MEMORYSTATUS_CMD_TEST_JETSAM, child_pid, 0, 0, 0); + T_ASSERT_POSIX_SUCCESS(ret, "jetsam'd the child"); + + snapshot = get_jetsam_snapshot(MEMORYSTATUS_FLAGS_SNAPSHOT_FREEZER, false); + T_ASSERT_NOTNULL(snapshot, "Got freezer snapshot"); + child_entry = get_jetsam_snapshot_entry(snapshot, child_pid); + T_QUIET; T_ASSERT_NOTNULL(child_entry, "Child is in freezer snapshot"); + T_QUIET; T_ASSERT_EQ(child_entry->killed, (unsigned long long) JETSAM_REASON_GENERIC, "Child entry was killed"); + + free(snapshot); + T_END; + }); +} + +T_DECL(freezer_snapshot_consume, "Freezer snapshot is consumed on read") { + /* Take ownership of the snapshot to ensure we don't race with another process trying to consume them. */ + take_jetsam_snapshot_ownership(); + + test_after_frozen_background_launches(false, ^{ + int ret; + memorystatus_jetsam_snapshot_t *snapshot = NULL; + memorystatus_jetsam_snapshot_entry_t *child_entry = NULL; + + ret = memorystatus_control(MEMORYSTATUS_CMD_TEST_JETSAM, child_pid, 0, 0, 0); + T_ASSERT_POSIX_SUCCESS(ret, "jetsam'd the child"); + + snapshot = get_jetsam_snapshot(MEMORYSTATUS_FLAGS_SNAPSHOT_FREEZER, false); + T_ASSERT_NOTNULL(snapshot, "Got first freezer snapshot"); + child_entry = get_jetsam_snapshot_entry(snapshot, child_pid); + T_QUIET; T_ASSERT_NOTNULL(child_entry, "Child is in first freezer snapshot"); + + snapshot = get_jetsam_snapshot(MEMORYSTATUS_FLAGS_SNAPSHOT_FREEZER, true); + if (snapshot != NULL) { + child_entry = get_jetsam_snapshot_entry(snapshot, child_pid); + T_QUIET; T_ASSERT_NULL(child_entry, "Child is not in second freezer snapshot"); + } + + free(snapshot); + T_END; + }); +} + +T_DECL(freezer_snapshot_frozen_state, "Frozen state is recorded in freezer snapshot") { + skip_if_freezer_is_disabled(); + /* Take ownership of the snapshot to ensure we don't race with another process trying to consume them. */ + take_jetsam_snapshot_ownership(); + + test_after_frozen_background_launches(false, ^{ + int ret; + memorystatus_jetsam_snapshot_t *snapshot = NULL; + memorystatus_jetsam_snapshot_entry_t *child_entry = NULL; + + move_to_idle_band(child_pid); + freeze_process(child_pid); + + ret = memorystatus_control(MEMORYSTATUS_CMD_TEST_JETSAM, child_pid, 0, 0, 0); + T_ASSERT_POSIX_SUCCESS(ret, "jetsam'd the child"); + + snapshot = get_jetsam_snapshot(MEMORYSTATUS_FLAGS_SNAPSHOT_FREEZER, false); + T_ASSERT_NOTNULL(snapshot, "Got freezer snapshot"); + child_entry = get_jetsam_snapshot_entry(snapshot, child_pid); + T_QUIET; T_ASSERT_NOTNULL(child_entry, "Child is in freezer snapshot"); + T_QUIET; T_ASSERT_TRUE(child_entry->state & kMemorystatusFrozen, "Child entry's frozen bit is set"); + + free(snapshot); + T_END; + }); +} + +T_DECL(freezer_snapshot_thaw_state, "Thaw count is recorded in freezer snapshot") { + skip_if_freezer_is_disabled(); + /* Take ownership of the snapshot to ensure we don't race with another process trying to consume them. */ + take_jetsam_snapshot_ownership(); + + test_after_frozen_background_launches(false, ^{ + int ret; + memorystatus_jetsam_snapshot_t *snapshot = NULL; + memorystatus_jetsam_snapshot_entry_t *child_entry = NULL; + + move_to_idle_band(child_pid); + ret = pid_suspend(child_pid); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "child suspended"); + freeze_process(child_pid); + ret = pid_resume(child_pid); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "child resumed after freeze"); + + ret = memorystatus_control(MEMORYSTATUS_CMD_TEST_JETSAM, child_pid, 0, 0, 0); + T_ASSERT_POSIX_SUCCESS(ret, "jetsam'd the child"); + + snapshot = get_jetsam_snapshot(MEMORYSTATUS_FLAGS_SNAPSHOT_FREEZER, false); + T_ASSERT_NOTNULL(snapshot, "Got freezer snapshot"); + child_entry = get_jetsam_snapshot_entry(snapshot, child_pid); + T_QUIET; T_ASSERT_NOTNULL(child_entry, "Child is in freezer snapshot"); + T_QUIET; T_ASSERT_TRUE(child_entry->state & kMemorystatusFrozen, "Child entry's frozen bit is still set after thaw"); + T_QUIET; T_ASSERT_TRUE(child_entry->state & kMemorystatusWasThawed, "Child entry was thawed"); + T_QUIET; T_ASSERT_EQ(child_entry->jse_thaw_count, 1ULL, "Child entry's thaw count was incremented"); + + free(snapshot); + T_END; + }); +} diff --git a/tests/memorystatus_is_assertion.c b/tests/memorystatus_is_assertion.c index 6475513e4..b14feabdd 100644 --- a/tests/memorystatus_is_assertion.c +++ b/tests/memorystatus_is_assertion.c @@ -123,7 +123,7 @@ memorystatus_assertion_test_repetitive(char *test, boolean_t turn_on_dirty_track /* these values will remain fixed during testing */ int active_limit_mb = 15; /* arbitrary */ - int inactive_limit_mb = 7; /* arbitrary */ + int inactive_limit_mb = 10; /* arbitrary */ /* these values may vary during test */ int requestedpriority = 0; @@ -225,7 +225,7 @@ memorystatus_assertion_test_allow_idle_exit() /* these values will remain fixed during testing */ int active_limit_mb = 15; /* arbitrary */ - int inactive_limit_mb = 7; /* arbitrary */ + int inactive_limit_mb = 10; /* arbitrary */ /* these values may vary during test */ int requestedpriority = JETSAM_PRIORITY_UI_SUPPORT; @@ -350,7 +350,7 @@ memorystatus_assertion_test_do_not_allow_idle_exit() /* these values will remain fixed during testing */ int active_limit_mb = 15; /* arbitrary */ - int inactive_limit_mb = 7; /* arbitrary */ + int inactive_limit_mb = 10; /* arbitrary */ int requestedpriority = JETSAM_PRIORITY_AUDIO_AND_ACCESSORY; T_SETUPBEGIN; diff --git a/tests/memorystatus_vm_map_fork.c b/tests/memorystatus_vm_map_fork.c index 1c450f8a7..ff73724cd 100644 --- a/tests/memorystatus_vm_map_fork.c +++ b/tests/memorystatus_vm_map_fork.c @@ -366,7 +366,7 @@ memorystatus_vm_map_fork_parent(int test_variant) */ wait_for_free_mem(active_limit_mb); -#if defined(__x86_64__) +#if TARGET_OS_OSX /* * vm_map_fork() is always allowed on desktop. */ @@ -458,12 +458,12 @@ memorystatus_vm_map_fork_parent(int test_variant) * We test "not allowed first", then "allowed". If it were the other way around, the corpse from the "allowed" * test would likely cause memory pressure and jetsam would likely kill the "not allowed" test. */ -T_DECL(memorystatus_vm_map_fork_test_not_allowed, "test that corpse generation was not allowed") +T_DECL(memorystatus_vm_map_fork_test_not_allowed, "test that corpse generation was not allowed", T_META_ASROOT(true)) { memorystatus_vm_map_fork_parent(TEST_NOT_ALLOWED); } -T_DECL(memorystatus_vm_map_fork_test_allowed, "test corpse generation allowed") +T_DECL(memorystatus_vm_map_fork_test_allowed, "test corpse generation allowed", T_META_ASROOT(true)) { memorystatus_vm_map_fork_parent(TEST_ALLOWED); } diff --git a/tests/memorystatus_zone_test.c b/tests/memorystatus_zone_test.c index b660e5c6a..04021ac35 100644 --- a/tests/memorystatus_zone_test.c +++ b/tests/memorystatus_zone_test.c @@ -218,7 +218,9 @@ allocate_from_generic_zone(void) /* return some of the resource to avoid O-O-M problems */ for (uint64_t j = 0; j < NUM_GIVE_BACK_PORTS && j < i; ++j) { - mach_port_deallocate(mach_task_self(), give_back[j]); + int ret; + ret = mach_port_mod_refs(mach_task_self(), give_back[j], MACH_PORT_RIGHT_RECEIVE, -1); + T_ASSERT_MACH_SUCCESS(ret, "mach_port_mod_refs(RECV_RIGHT, -1)"); } printf("[%d] Number of allocations: %lld\n", getpid(), i); @@ -636,7 +638,7 @@ run_test(void) initial_zone_occupancy = query_zone_map_size(); - /* On large memory systems, set the zone_map jetsam limit lower so we can hit it without timing out. */ + /* On large memory systems, set the zone maps jetsam limit lower so we can hit it without timing out. */ if (mem > (uint64_t)LARGE_MEM_GB * 1024 * 1024 * 1024) { new_limit = LARGE_MEM_JETSAM_LIMIT; } diff --git a/tests/mpsc.c b/tests/mpsc.c index 08ce2567e..fbc0f1c67 100644 --- a/tests/mpsc.c +++ b/tests/mpsc.c @@ -12,7 +12,7 @@ T_GLOBAL_META(T_META_NAMESPACE("xnu.mpsc"), T_META_RUN_CONCURRENTLY(true)); -T_DECL(pingpong, "mpsc_pingpong") +T_DECL(pingpong, "mpsc_pingpong", T_META_ASROOT(true)) { uint64_t count = 100 * 1000, nsecs = 0; size_t nlen = sizeof(nsecs); diff --git a/tests/net_bridge.c b/tests/net_bridge.c index 54ad5b67c..58f40f172 100644 --- a/tests/net_bridge.c +++ b/tests/net_bridge.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 Apple Inc. All rights reserved. + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -2255,19 +2255,28 @@ make_dhcp_payload(dhcp_min_payload_t payload, ether_addr_t *eaddr) } static void -mac_nat_test_dhcp(switch_port_list_t port_list) +mac_nat_test_dhcp(switch_port_list_t port_list, bool link_layer_unicast) { u_int i; struct in_addr ip_dst = { INADDR_BROADCAST }; struct in_addr ip_src = { INADDR_ANY }; switch_port_t port; + ether_addr_t * ether_dst; + if (link_layer_unicast) { + /* use link-layer address of MAC-NAT interface */ + ether_dst = &port_list->list[0].member_mac; + } else { + /* use link-layer broadcast address */ + ether_dst = ðer_broadcast; + } for (i = 0, port = port_list->list; i < port_list->count; i++, port++) { ether_addr_t eaddr; dhcp_min_payload payload; u_int payload_len; - if (port->mac_nat) { + if (!link_layer_unicast && port->mac_nat) { + /* only send through non-MAC-NAT ports */ continue; } set_ethernet_address(&eaddr, port->unit, 0); @@ -2281,7 +2290,7 @@ mac_nat_test_dhcp(switch_port_list_t port_list) &eaddr, (union ifbrip *)&ip_src, BOOTP_CLIENT_PORT, - ðer_broadcast, + ether_dst, (union ifbrip *)&ip_dst, BOOTP_SERVER_PORT, &payload, @@ -2292,8 +2301,13 @@ mac_nat_test_dhcp(switch_port_list_t port_list) port); check_received_count(port_list, port, 1); + if (link_layer_unicast) { + /* send a single unicast to MAC-NAT interface */ + break; + } } - T_PASS("%s", __func__); + T_PASS("%s %s", __func__, + link_layer_unicast ? "unicast" : "broadcast"); } @@ -3230,6 +3244,8 @@ bridge_cleanup(const char * bridge, u_int n_ports, bool fail_on_error) * - verify DHCP broadcast bit conversion * - verify IPv6 translation * - verify ND6 translation (Neighbor, Router) + * - verify IPv4 subnet-local broadcast to MAC-NAT interface link-layer + * address arrives on all member links */ static void @@ -3280,12 +3296,15 @@ bridge_test_mac_nat_ipv4(u_int n_ports, u_int num_addrs) mac_nat_test_ip(port_list, AF_INET); /* verify the DHCP broadcast bit gets set appropriately */ - mac_nat_test_dhcp(port_list); + mac_nat_test_dhcp(port_list, false); /* verify that ARP packet gets translated when necessary */ mac_nat_test_arp_out(port_list); mac_nat_test_arp_in(port_list); + /* verify IP broadcast to MAC-NAT interface link layer address */ + mac_nat_test_dhcp(port_list, true); + if (S_debug) { T_LOG("Sleeping for 5 seconds"); sleep(5); diff --git a/tests/net_tun_pr_35136664.c b/tests/net_tun_pr_35136664.c index 89b8fc995..967e98e96 100644 --- a/tests/net_tun_pr_35136664.c +++ b/tests/net_tun_pr_35136664.c @@ -10,7 +10,8 @@ #include T_GLOBAL_META(T_META_NAMESPACE("xnu.net"), - T_META_RUN_CONCURRENTLY(true)); + T_META_RUN_CONCURRENTLY(true), + T_META_ASROOT(true)); T_DECL(PR_35136664_utun, "This bind a utun and close it without connecting") diff --git a/tests/netagent_kctl_header_infodisc_56190773.c b/tests/netagent_kctl_header_infodisc_56190773.c new file mode 100644 index 000000000..bde14d871 --- /dev/null +++ b/tests/netagent_kctl_header_infodisc_56190773.c @@ -0,0 +1,51 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +struct netagent_message_header { + uint8_t message_type; + uint8_t message_flags; + uint8_t padding[2]; + uint32_t message_id; + uint32_t message_error; + uint32_t message_payload_length; +}; + +T_DECL(netagent_kctl_header_infodisc_56190773, "Zero out padding in netagent_message_header.") +{ + int s; + struct sockaddr_ctl sc; + struct ctl_info ci; + struct netagent_message_header m; + + T_SETUPBEGIN; + T_ASSERT_POSIX_SUCCESS(s = socket(AF_SYSTEM, SOCK_DGRAM, SYSPROTO_CONTROL), NULL); + + bzero(&ci, sizeof(ci)); + strcpy(ci.ctl_name, "com.apple.net.netagent"); + + T_ASSERT_POSIX_SUCCESS(ioctl(s, CTLIOCGINFO, &ci), NULL); + + bzero(&sc, sizeof(sc)); + sc.sc_id = ci.ctl_id; + T_ASSERT_POSIX_SUCCESS(connect(s, (const struct sockaddr *)&sc, sizeof(sc)), NULL); + + T_SETUPEND; + + bzero(&m, sizeof(m)); + T_ASSERT_POSIX_SUCCESS(send(s, &m, sizeof(m), 0), NULL); + + T_ASSERT_POSIX_SUCCESS(recv(s, &m, sizeof(m), 0), NULL); + T_ASSERT_EQ(m.padding[0], 0, NULL); + T_ASSERT_EQ(m.padding[1], 0, NULL); +} diff --git a/tests/netagent_race_infodisc_56244905.c b/tests/netagent_race_infodisc_56244905.c index cc451d8fd..f609bcca3 100644 --- a/tests/netagent_race_infodisc_56244905.c +++ b/tests/netagent_race_infodisc_56244905.c @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -57,7 +58,7 @@ struct netagent { char netagent_desc[128]; uint32_t netagent_flags; uint32_t netagent_data_size; - uint8_t netagent_data[0]; + /*uint8_t netagent_data[0];*/ }; static void * @@ -114,6 +115,16 @@ unregister_racer(void *data) #define NITERS 200000 +static size_t +data_available(int sock) +{ + int n = 0; + socklen_t nlen = sizeof(n); + + getsockopt(sock, SOL_SOCKET, SO_NREAD, &n, &nlen); + return (size_t)n; +} + T_DECL(netagent_race_infodisc_56244905, "Netagent race between register and post event.") { int s; @@ -132,6 +143,8 @@ T_DECL(netagent_race_infodisc_56244905, "Netagent race between register and post struct kev_netagent_data nd; } ev; int n; + int retry; + unsigned long leaked; T_SETUPBEGIN; /* set up the event socket so we can receive notifications: */ @@ -160,11 +173,21 @@ T_DECL(netagent_race_infodisc_56244905, "Netagent race between register and post /* keep going until we're done: */ for (n = 0; n < NITERS; ++n) { bzero(&ev, sizeof(ev)); - T_ASSERT_POSIX_SUCCESS(recv(evsock, &ev, sizeof(ev), 0), NULL); + for (retry = 0; retry < 20; ++retry) { + if (data_available(evsock) >= sizeof(ev) && + sizeof(ev) == recv(evsock, &ev, sizeof(ev), 0)) { + goto check1; + } + } + + continue; + +check1: if (ev.nd.netagent_uuid[0] != 0) { finished = 1; - T_ASSERT_FAIL("netagent register event leaked data: 0x%08lx", *(unsigned long *)ev.nd.netagent_uuid); + memcpy(&leaked, ev.nd.netagent_uuid, sizeof(leaked)); + T_ASSERT_FAIL("netagent register event leaked data: 0x%08lx", leaked); } } @@ -183,11 +206,21 @@ T_DECL(netagent_race_infodisc_56244905, "Netagent race between register and post /* keep going until we're done: */ for (n = 0; n < NITERS; ++n) { bzero(&ev, sizeof(ev)); - T_ASSERT_POSIX_SUCCESS(recv(evsock, &ev, sizeof(ev), 0), NULL); + for (retry = 0; retry < 20; ++retry) { + if (data_available(evsock) >= sizeof(ev) && + sizeof(ev) == recv(evsock, &ev, sizeof(ev), 0)) { + goto check2; + } + } + + continue; + +check2: if (ev.nd.netagent_uuid[0] != 0) { finished = 1; - T_ASSERT_FAIL("netagent register event leaked data: 0x%08lx", *(unsigned long *)ev.nd.netagent_uuid); + memcpy(&leaked, ev.nd.netagent_uuid, sizeof(leaked)); + T_ASSERT_FAIL("netagent register event leaked data: 0x%08lx", leaked); } } diff --git a/tests/no32exec_35914211.c b/tests/no32exec_35914211.c deleted file mode 100644 index 3ce06731d..000000000 --- a/tests/no32exec_35914211.c +++ /dev/null @@ -1,104 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -T_DECL(no32exec_bootarg_with_spawn, "make sure we can't posix_spawn 32-bit") -{ - int spawn_ret, pid; - char path[1024]; - uint32_t size = sizeof(path); - - T_QUIET; T_ASSERT_EQ(_NSGetExecutablePath(path, &size), 0, NULL); - T_QUIET; T_ASSERT_LT(strlcat(path, "_helper", size), (unsigned long)size, NULL); - - spawn_ret = posix_spawn(&pid, path, NULL, NULL, NULL, NULL); - if (spawn_ret == 0) { - int wait_ret = 0; - waitpid(pid, &wait_ret, 0); - T_ASSERT_FALSE(WIFEXITED(wait_ret), "i386 helper should not run"); - } - T_ASSERT_EQ(spawn_ret, EBADARCH, NULL); -} - -T_DECL(no32_exec_bootarg_with_exec, "make sure we can't fork and exec 32-bit") -{ - int pid; - char path[1024]; - uint32_t size = sizeof(path); - - T_QUIET; T_ASSERT_EQ(_NSGetExecutablePath(path, &size), 0, NULL); - T_QUIET; T_ASSERT_LT(strlcat(path, "_helper", size), (unsigned long)size, NULL); - - pid = fork(); - T_QUIET; T_ASSERT_POSIX_SUCCESS(pid, "fork"); - - if (pid == 0) { /* child */ - execve(path, NULL, NULL); /* this should fail, resulting in the call to exit below */ - exit(errno); - } else { /* parent */ - int wait_ret = 0; - waitpid(pid, &wait_ret, 0); - T_QUIET; T_ASSERT_TRUE(WIFEXITED(wait_ret), "child should have called exit()"); - T_ASSERT_EQ(WEXITSTATUS(wait_ret), EBADARCH, "execve should set errno = EBADARCH"); - } -} - -T_DECL(no32exec_bootarg_with_spawn_binprefs, "make sure we honor no32exec, using posix_spawn with binprefs on a fat i386/x86_64 Mach-O") -{ - int pid, ret; - posix_spawnattr_t spawnattr; - cpu_type_t cpuprefs[] = { CPU_TYPE_X86, CPU_TYPE_X86_64 }; - - char path[1024]; - uint32_t size = sizeof(path); - T_QUIET; T_ASSERT_EQ(_NSGetExecutablePath(path, &size), 0, NULL); - T_QUIET; T_ASSERT_LT(strlcat(path, "_helper_binprefs", size), (unsigned long)size, NULL); - - ret = posix_spawnattr_init(&spawnattr); - T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "posix_spawnattr_init"); - - ret = posix_spawnattr_setbinpref_np(&spawnattr, sizeof(cpuprefs) / sizeof(cpuprefs[0]), cpuprefs, NULL); - T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "posix_spawnattr_setbinpref_np"); - - ret = posix_spawn(&pid, path, NULL, &spawnattr, NULL, NULL); - T_ASSERT_EQ(ret, 0, "posix_spawn should succeed despite 32-bit binpref appearing first"); - - int wait_ret = 0; - ret = waitpid(pid, &wait_ret, 0); - T_QUIET; T_ASSERT_EQ(ret, pid, "child pid"); - - T_QUIET; T_ASSERT_EQ(WIFEXITED(wait_ret), 1, "child process should have called exit()"); - T_ASSERT_EQ(WEXITSTATUS(wait_ret), 8, "child process should be running in 64-bit mode"); - - ret = posix_spawnattr_destroy(&spawnattr); - T_QUIET; T_ASSERT_EQ(ret, 0, "posix_spawnattr_destroy"); -} - -T_DECL(no32exec_bootarg_with_32only_spawn_binprefs, "make sure we honor no32exec, using posix_spawn with 32-bit only binprefs on a fat i386/x86_64 Mach-O") -{ - int pid, ret, spawn_ret; - posix_spawnattr_t spawnattr; - cpu_type_t cpuprefs[] = { CPU_TYPE_X86 }; - - char path[1024]; - uint32_t size = sizeof(path); - T_QUIET; T_ASSERT_EQ(_NSGetExecutablePath(path, &size), 0, NULL); - T_QUIET; T_ASSERT_LT(strlcat(path, "_helper_binprefs", size), (unsigned long)size, NULL); - - ret = posix_spawnattr_init(&spawnattr); - T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "posix_spawnattr_init"); - - ret = posix_spawnattr_setbinpref_np(&spawnattr, sizeof(cpuprefs) / sizeof(cpuprefs[0]), cpuprefs, NULL); - T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "posix_spawnattr_setbinpref_np"); - - spawn_ret = posix_spawn(&pid, path, NULL, &spawnattr, NULL, NULL); - T_ASSERT_EQ(spawn_ret, EBADARCH, "posix_spawn should return EBADARCH since only 32-bit binpref is requested"); - - ret = posix_spawnattr_destroy(&spawnattr); - T_QUIET; T_ASSERT_EQ(ret, 0, "posix_spawnattr_destroy"); -} diff --git a/tests/no32exec_35914211_helper.c b/tests/no32exec_35914211_helper.c deleted file mode 100644 index 04069dcdc..000000000 --- a/tests/no32exec_35914211_helper.c +++ /dev/null @@ -1,17 +0,0 @@ -/* This is a file that compiles as a 32-bit helper to test - * forking of 32-bit programs, now that 32-bit has been - * deprecated on macOS despite still requiring its support in - * the watchOS simulator. - */ - -#include -#include - -int -main(int argc __unused, char **argv) -{ - (void)argc; - size_t retval = sizeof(void *); - printf("%s(%d): sizeof(void *) = %lu\n", argv[0], getpid(), retval); - return (int)retval; -} diff --git a/tests/os_atomic.cpp b/tests/os_atomic.cpp new file mode 100644 index 000000000..976c0650a --- /dev/null +++ b/tests/os_atomic.cpp @@ -0,0 +1,33 @@ +#include +#include + +T_GLOBAL_META( + T_META_RUN_CONCURRENTLY(true), + T_META_CHECK_LEAKS(false) + ); + +T_DECL(os_atomic, "Just to make sure things build at all in c++ mode") +{ + os_atomic(int) i = 0; + int old_i = 0; + volatile int v_i = 0; + int a, b; + + T_ASSERT_EQ(os_atomic_inc_orig(&i, relaxed), 0, "atomic inc"); + T_ASSERT_EQ(os_atomic_cmpxchg(&i, 1, 0, relaxed), true, "os_atomic_cmpxchg"); + os_atomic_rmw_loop(&i, a, b, relaxed, { + b = a; + }); + + T_ASSERT_EQ(os_atomic_inc_orig(&old_i, relaxed), 0, "atomic inc"); + T_ASSERT_EQ(os_atomic_cmpxchg(&old_i, 1, 0, relaxed), true, "os_atomic_cmpxchg"); + os_atomic_rmw_loop(&old_i, a, b, relaxed, { + b = a; + }); + + T_ASSERT_EQ(os_atomic_inc_orig(&v_i, relaxed), 0, "atomic inc"); + T_ASSERT_EQ(os_atomic_cmpxchg(&v_i, 1, 0, relaxed), true, "os_atomic_cmpxchg"); + os_atomic_rmw_loop(&v_i, a, b, relaxed, { + b = a; + }); +} diff --git a/tests/os_proc.c b/tests/os_proc.c index 9f2f0cea7..68ddbb560 100644 --- a/tests/os_proc.c +++ b/tests/os_proc.c @@ -3,6 +3,8 @@ #include #include #include +#include +#include T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); @@ -44,8 +46,57 @@ T_DECL(test_os_proc_available_memory, "Basic available memory") vm_info.limit_bytes_remaining, remainingBytes); } #else + +/* + * os_proc_available_memory is only available on embedded. + * But the underlying syscall works on macOS to support catalyst + * extensions. So we test the syscall directly here. + */ +extern uint64_t __memorystatus_available_memory(void); + +static int +set_memlimit(pid_t pid, int32_t limit_mb) +{ + memorystatus_memlimit_properties_t mmprops; + + memset(&mmprops, 0, sizeof(memorystatus_memlimit_properties_t)); + + mmprops.memlimit_active = limit_mb; + mmprops.memlimit_inactive = limit_mb; + + /* implies we want to set fatal limits */ + mmprops.memlimit_active_attr |= MEMORYSTATUS_MEMLIMIT_ATTR_FATAL; + mmprops.memlimit_inactive_attr |= MEMORYSTATUS_MEMLIMIT_ATTR_FATAL; + return memorystatus_control(MEMORYSTATUS_CMD_SET_MEMLIMIT_PROPERTIES, pid, 0, &mmprops, sizeof(mmprops)); +} T_DECL(test_os_proc_available_memory, "Basic available memory") { - T_SKIP("Not available on macOS"); + uint64_t available_memory; + int ret; + pid_t pid = getpid(); + static const size_t kLimitMb = 1024; + + /* + * Should return 0 unless an proccess is both memory managed and has a + * hard memory limit. + */ + ret = memorystatus_control(MEMORYSTATUS_CMD_SET_PROCESS_IS_MANAGED, pid, 0, NULL, 0); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "memorystatus_control"); + + available_memory = __memorystatus_available_memory(); + T_ASSERT_EQ(available_memory, 0ULL, "__memorystatus_available_memory == 0"); + + ret = memorystatus_control(MEMORYSTATUS_CMD_SET_PROCESS_IS_MANAGED, pid, 1, NULL, 0); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "memorystatus_control"); + available_memory = __memorystatus_available_memory(); + T_ASSERT_EQ(available_memory, 0ULL, "__memorystatus_available_memory == 0"); + + /* + * Should not return 0 for managed procs with a hard memory limit. + */ + ret = set_memlimit(pid, kLimitMb); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "memorystatus_control"); + available_memory = __memorystatus_available_memory(); + T_ASSERT_NE(available_memory, 0ULL, "__memorystatus_available_memory != 0"); } #endif diff --git a/tests/os_refcnt.c b/tests/os_refcnt.c index 36263be20..fecfebe8e 100644 --- a/tests/os_refcnt.c +++ b/tests/os_refcnt.c @@ -11,6 +11,9 @@ #define OS_REFCNT_DEBUG 1 #define STRESS_TESTS 0 +#pragma clang diagnostic ignored "-Watomic-implicit-seq-cst" +#pragma clang diagnostic ignored "-Wc++98-compat" + void handle_panic(const char *func, char *str, ...); #define panic(...) handle_panic(__func__, __VA_ARGS__) @@ -50,15 +53,13 @@ T_DECL(os_refcnt, "Basic atomic refcount") T_ASSERT_EQ_UINT(x, 0, "returned released"); os_ref_init(&rc, NULL); - x = os_ref_retain_try(&rc); - T_ASSERT_GT_INT(x, 0, "try retained"); + T_ASSERT_TRUE(os_ref_retain_try(&rc), "try retained"); (void)os_ref_release(&rc); (void)os_ref_release(&rc); T_QUIET; T_ASSERT_EQ_UINT(os_ref_get_count(&rc), 0, "release"); - x = os_ref_retain_try(&rc); - T_ASSERT_EQ_INT(x, 0, "try failed"); + T_ASSERT_FALSE(os_ref_retain_try(&rc), "try failed"); } T_DECL(refcnt_raw, "Raw refcount") @@ -83,15 +84,13 @@ T_DECL(refcnt_raw, "Raw refcount") T_ASSERT_EQ_UINT(x, 0, "returned released"); os_ref_init_raw(&rc, NULL); - x = os_ref_retain_try_raw(&rc, NULL); - T_ASSERT_GT_INT(x, 0, "try retained"); + T_ASSERT_TRUE(os_ref_retain_try_raw(&rc, NULL), "try retained"); (void)os_ref_release_raw(&rc, NULL); (void)os_ref_release_raw(&rc, NULL); T_QUIET; T_ASSERT_EQ_UINT(os_ref_get_count_raw(&rc), 0, "release"); - x = os_ref_retain_try_raw(&rc, NULL); - T_ASSERT_EQ_INT(x, 0, "try failed"); + T_ASSERT_FALSE(os_ref_retain_try_raw(&rc, NULL), "try failed"); } T_DECL(refcnt_locked, "Locked refcount") @@ -132,66 +131,47 @@ T_DECL(refcnt_raw_locked, "Locked raw refcount") T_ASSERT_EQ_UINT(x, 0, "returned released"); } -T_DECL(refcnt_mask_locked, "Locked bitwise refcount") -{ - const os_ref_count_t b = 12; - os_ref_atomic_t rc; - os_ref_count_t reserved = 0xaaa; - os_ref_init_count_mask(&rc, NULL, 1, reserved, b); - - os_ref_retain_locked_mask(&rc, NULL, b); - os_ref_retain_locked_mask(&rc, NULL, b); - T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, b), 3, "retain increased count"); - - os_ref_count_t x = os_ref_release_locked_mask(&rc, NULL, b); - T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, b), 2, "release decreased count"); - T_ASSERT_EQ_UINT(x, 2, "release returned correct count"); - T_ASSERT_EQ_UINT(rc & ((1U << b) - 1), reserved, "Reserved bits not modified"); - - (void)os_ref_release_locked_mask(&rc, NULL, b); - x = os_ref_release_locked_mask(&rc, NULL, b); - T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, b), 0, "released"); - T_ASSERT_EQ_UINT(x, 0, "returned released"); - T_ASSERT_EQ_UINT(rc & ((1U << b) - 1), reserved, "Reserved bits not modified"); -} - static void do_bitwise_test(const os_ref_count_t bits) { os_ref_atomic_t rc; os_ref_count_t reserved = 0xaaaaaaaaU & ((1U << bits) - 1); - os_ref_init_count_mask(&rc, NULL, 1, reserved, bits); + + T_LOG("do_bitwise_test(nbits:%d, reserved:%#x)", bits, reserved); + + os_ref_init_count_mask(&rc, bits, NULL, 1, reserved); T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, bits), 1, "[%u bits] refcount initialized", bits); - os_ref_retain_mask(&rc, NULL, bits); - os_ref_retain_mask(&rc, NULL, bits); + os_ref_retain_mask(&rc, bits, NULL); + os_ref_retain_mask(&rc, bits, NULL); T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, bits), 3, "retain increased count"); - os_ref_count_t x = os_ref_release_mask(&rc, NULL, bits); + os_ref_count_t x = os_ref_release_mask(&rc, bits, NULL); T_ASSERT_EQ_UINT(x, 2, "release returned correct count"); - os_ref_release_live_mask(&rc, NULL, bits); + os_ref_release_live_mask(&rc, bits, NULL); T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, bits), 1, "release_live decreased count"); - x = os_ref_release_mask(&rc, NULL, bits); + x = os_ref_release_mask(&rc, bits, NULL); T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, bits), 0, "released"); T_ASSERT_EQ_UINT(x, 0, "returned released"); T_ASSERT_EQ_UINT(rc & ((1U << bits) - 1), reserved, "Reserved bits not modified"); - os_ref_init_count_mask(&rc, NULL, 1, reserved, bits); - x = os_ref_retain_try_mask(&rc, NULL, bits); - T_ASSERT_GT_INT(x, 0, "try retained"); + os_ref_init_count_mask(&rc, bits, NULL, 1, reserved); + T_ASSERT_TRUE(os_ref_retain_try_mask(&rc, bits, 0, NULL), "try retained"); + if (reserved) { + T_ASSERT_FALSE(os_ref_retain_try_mask(&rc, bits, reserved, NULL), "try reject"); + } - (void)os_ref_release_mask(&rc, NULL, bits); - (void)os_ref_release_mask(&rc, NULL, bits); + (void)os_ref_release_mask(&rc, bits, NULL); + (void)os_ref_release_mask(&rc, bits, NULL); T_QUIET; T_ASSERT_EQ_UINT(os_ref_get_count_mask(&rc, bits), 0, "release"); - x = os_ref_retain_try_mask(&rc, NULL, bits); - T_ASSERT_EQ_INT(x, 0, "try failed"); + T_ASSERT_FALSE(os_ref_retain_try_mask(&rc, bits, 0, NULL), "try fail"); - T_ASSERT_EQ_UINT(rc & ((1U << bits) - 1), reserved, "Reserved bits not modified"); + T_ASSERT_EQ_UINT(os_ref_get_bits_mask(&rc, bits), reserved, "Reserved bits not modified"); } T_DECL(refcnt_bitwise, "Bitwise refcount") @@ -206,7 +186,7 @@ T_DECL(refcnt_bitwise, "Bitwise refcount") const os_ref_count_t nbits = 3; const os_ref_count_t count = 5; const os_ref_count_t bits = 7; - os_ref_init_count_mask(&rc, NULL, count, bits, nbits); + os_ref_init_count_mask(&rc, nbits, NULL, count, bits); os_ref_count_t mask = (1U << nbits) - 1; T_ASSERT_EQ_UINT(rc & mask, bits, "bits correctly initialized"); diff --git a/tests/osptr.cpp b/tests/osptr.cpp deleted file mode 100644 index 054b8693c..000000000 --- a/tests/osptr.cpp +++ /dev/null @@ -1,772 +0,0 @@ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wc++11-extensions" - -#include -#include -#include -#include -#include - -#if 0 -# define OSPTR_LOG T_LOG -#elif 0 -# define OSPTR_LOG printf -#else -# define OSPTR_LOG(x...) do { } while(0) -#endif - -T_GLOBAL_META( - T_META_NAMESPACE("osptr"), - T_META_CHECK_LEAKS(false), - T_META_RUN_CONCURRENTLY(true) - ); - -static int num_instances = 0; -static int num_retains = 0; -static int num_releases = 0; - -class OSMetaClassBase -{ - static int id_counter; - static OSMetaClassBase *freelist; - -public: - int inst_id; - mutable int refcount; - mutable OSMetaClassBase *next; - static void *type_id; - - OSMetaClassBase() : refcount(1), next(nullptr) - { - inst_id = id_counter++; - num_instances++; - OSPTR_LOG("[%p, %d] constructed\n", this, inst_id); - } - - virtual ~OSMetaClassBase() - { - OSPTR_LOG("[%p, %d] destroyed\n", this, inst_id); - } - - virtual void - retain() const - { - T_QUIET; T_EXPECT_GT_INT(refcount, 0, "Instance resurrected"); - refcount++; - num_retains++; - OSPTR_LOG("[%p, %d] retain, refcount=%d\n", this, inst_id, refcount); - } - - virtual void - release() const - { - T_QUIET; T_EXPECT_GT_INT(refcount, 0, "Double free"); - refcount--; - num_releases++; - OSPTR_LOG("[%p, %d] release, refcount=%d\n", this, inst_id, refcount); - - /* - * Don't delete the object, but keep it around so that we - * can detect double frees - */ - if (refcount == 0) { - num_instances--; - this->next = freelist; - freelist = const_cast(this); - } - } - - virtual void - taggedRetain(void *tag) const - { - OSPTR_LOG("tag[%p] ", tag); - retain(); - } - - virtual void - taggedRelease(void *tag) const - { - OSPTR_LOG("tag[%p] ", tag); - release(); - } -}; - -int OSMetaClassBase::id_counter; -OSMetaClassBase *OSMetaClassBase::freelist; - -void *OSMetaClassBase::type_id; - -#define OSTypeID(T) T::type_id -#define OSTypeAlloc(T) new T -#define OSDynamicCast(T, p) dynamic_cast(p) - -#define LIBKERN_SMART_POINTERS -#include - -class Base : public OSMetaClassBase { -public: - Base() : OSMetaClassBase() - { - } -}; - -class Derived : public Base { -public: - Derived() : Base() - { - } -}; - -class Other : public OSMetaClassBase { -public: - Other() : OSMetaClassBase() - { - } -}; - -typedef OSPtr BasePtr; -typedef OSPtr DerivedPtr; -typedef OSPtr OtherPtr; - -static void -default_constructor() -{ - BasePtr a; - T_ASSERT_NULL(a.get(), "Default NULL construction"); - T_ASSERT_EQ_INT(num_instances, 0, "No instances created"); -} - -static void -null_constructor() -{ - BasePtr a(nullptr); - T_ASSERT_NULL(a.get(), "Default NULL construction"); - T_ASSERT_EQ_INT(num_instances, 0, "No instances created"); -} - -static void -raw_constructor() -{ - Base *a = new Base(); - T_ASSERT_EQ_INT(num_instances, 1, "Created instance"); - - { - BasePtr p(a); - - T_ASSERT_EQ_INT(num_instances, 1, "No new instance"); - T_ASSERT_EQ_PTR(p.get(), a, "osptr bound to correct object"); - T_ASSERT_EQ_INT(a->refcount, 2, "Object refcount incremented"); - } - - T_ASSERT_EQ_INT(a->refcount, 1, "Object refcount decremented"); - a->release(); - T_ASSERT_EQ_INT(num_instances, 0, "All instances released"); -} - -static void -alloc() -{ - BasePtr a = BasePtr::alloc(); - - T_ASSERT_NOTNULL(a.get(), "osptr seated"); - T_ASSERT_EQ_INT(num_instances, 1, "Instance created"); - T_ASSERT_EQ_INT(a->refcount, 1, "Reference created"); -} - -static void -destroy() -{ - { - BasePtr a = BasePtr::alloc(); - T_ASSERT_EQ_INT(num_instances, 1, "Instance created"); - } - - T_ASSERT_EQ_INT(num_instances, 0, "All instances released"); -} - -static void -copy() -{ - BasePtr a = BasePtr::alloc(); - BasePtr b; - int a_id = a->inst_id; - - BasePtr a_copy(a); - - T_ASSERT_EQ_INT(a_copy->inst_id, a_id, NULL); - T_ASSERT_EQ_INT(a->refcount, 2, NULL); - T_ASSERT_EQ_INT(a_copy->refcount, 2, NULL); - T_ASSERT_EQ_INT(num_instances, 1, NULL); - T_EXPECT_EQ_INT(num_retains, 1, NULL); - - BasePtr b_copy(b); - T_ASSERT_NULL(b_copy.get(), "Copy null osptr"); - - T_ASSERT_EQ_INT(num_instances, 1, NULL); - T_EXPECT_EQ_INT(num_retains, 1, NULL); - - BasePtr a_copy2 = a; - T_ASSERT_EQ_PTR(a_copy2.get(), a.get(), NULL); - - T_ASSERT_EQ_INT(num_instances, 1, NULL); - T_EXPECT_EQ_INT(num_retains, 2, NULL); - T_EXPECT_EQ_INT(num_releases, 0, NULL); -} - -static void -copy_subclass() -{ - auto a = DerivedPtr::alloc(); - BasePtr b(a); - - T_ASSERT_EQ_PTR(a.get(), b.get(), NULL); - T_ASSERT_EQ_INT(b->refcount, 2, NULL); - T_ASSERT_EQ_INT(num_instances, 1, NULL); - - a = nullptr; - T_ASSERT_NOTNULL(b.get(), NULL); - T_ASSERT_EQ_INT(b->refcount, 1, NULL); - T_ASSERT_EQ_INT(num_instances, 1, NULL); -} - -static void -assign() -{ - int a_id, b_id; - - BasePtr p; - BasePtr a = BasePtr::alloc(); - BasePtr b = BasePtr::alloc(); - - a_id = a->inst_id; - b_id = b->inst_id; - - p = a; - - T_ASSERT_EQ_PTR(p.get(), a.get(), "Assigned osptr references same object"); - T_ASSERT_EQ_INT(p->inst_id, a_id, NULL); - T_ASSERT_EQ_INT(a->refcount, 2, "Assigned osptr bumps refcount"); - T_QUIET; T_ASSERT_TRUE(b->refcount == 1, NULL); - - p = b; - - T_ASSERT_EQ_PTR(p.get(), b.get(), "Assigned osptr references same object"); - T_ASSERT_EQ_INT(p->inst_id, b_id, NULL); - T_ASSERT_EQ_INT(a->refcount, 1, "Previous assignee drops reference"); - T_ASSERT_EQ_INT(b->refcount, 2, "New assignee bumps reference"); - - T_ASSERT_EQ_INT(a->inst_id, a_id, NULL); - T_ASSERT_EQ_INT(b->inst_id, b_id, NULL); - - a = nullptr; - - T_ASSERT_EQ_INT(num_instances, 1, "Assignment to null releases object"); - - b = nullptr; - p = nullptr; - - T_ASSERT_EQ_INT(num_instances, 0, "All instances released"); -} - -static void -assign_raw() -{ - Base *a1 = new Base(); - Base *a2 = new Base(); - - { - BasePtr p; - - p = a1; - T_ASSERT_EQ_PTR(p.get(), a1, NULL); - T_ASSERT_EQ_INT(a1->refcount, 2, NULL); - T_ASSERT_EQ_INT(a2->refcount, 1, NULL); - - p = a2; - T_ASSERT_EQ_PTR(p.get(), a2, NULL); - T_ASSERT_EQ_INT(a1->refcount, 1, NULL); - T_ASSERT_EQ_INT(a2->refcount, 2, NULL); - } - - T_ASSERT_EQ_INT(a1->refcount, 1, NULL); - T_ASSERT_EQ_INT(a2->refcount, 1, NULL); - - a1->release(); - a2->release(); - - T_ASSERT_EQ_INT(num_instances, 0, "All instances released"); -} - -static void -assign_null() -{ - BasePtr a = BasePtr::alloc(); - T_ASSERT_EQ_INT(num_instances, 1, NULL); - - a = nullptr; - - T_ASSERT_NULL(a.get(), NULL); - T_ASSERT_EQ_INT(num_instances, 0, "No instances created"); - - a = BasePtr::alloc(); - BasePtr b(a.get()); - - T_ASSERT_EQ_INT(a->refcount, 2, NULL); - T_ASSERT_EQ_INT(num_instances, 1, NULL); - - b = nullptr; - - T_ASSERT_EQ_INT(a->refcount, 1, NULL); - T_ASSERT_EQ_INT(num_instances, 1, NULL); - - a = nullptr; - - T_ASSERT_EQ_INT(num_instances, 0, "All instances released"); -} - -static void -assign_subclass() -{ - int a_id, b_id; - - OSPtr base; - BasePtr a = BasePtr::alloc(); - BasePtr b = BasePtr::alloc(); - - a_id = a->inst_id; - b_id = b->inst_id; - - base = a; - - T_ASSERT_TRUE(base.get() == static_cast(a.get()), NULL); - T_ASSERT_TRUE(base->inst_id == a_id, NULL); - T_ASSERT_TRUE(a->refcount == 2, NULL); - T_ASSERT_TRUE(b->refcount == 1, NULL); - - base = b; - - T_ASSERT_TRUE(base.get() == static_cast(b.get()), NULL); - T_ASSERT_TRUE(base->inst_id == b_id, NULL); - T_ASSERT_TRUE(a->refcount == 1, NULL); - T_ASSERT_TRUE(b->refcount == 2, NULL); - - T_ASSERT_TRUE(a->inst_id == a_id, NULL); - T_ASSERT_TRUE(b->inst_id == b_id, NULL); - - a = nullptr; - - T_ASSERT_TRUE(num_instances == 1, NULL); - - b = nullptr; - base = nullptr; - - T_ASSERT_EQ_INT(num_instances, 0, "All instances released"); -} - -static void -assign_compatible() -{ - OSPtr a = OSPtr::alloc(); - OSPtr b = a; - T_ASSERT_EQ_PTR(a.get(), b.get(), NULL); - - OSPtr c = OSPtr::alloc(); - OSPtr d = c; - T_ASSERT_EQ_PTR(c.get(), d.get(), NULL); -} - -static void -move() -{ - OSPtr a = OSPtr::alloc(); - int a_id = a->inst_id; - - OSPtr b(os::move(a)); - - T_ASSERT_TRUE(a.get() == NULL, NULL); - T_ASSERT_TRUE(b->inst_id == a_id, NULL); - T_ASSERT_TRUE(b->refcount == 1, NULL); - T_ASSERT_TRUE(num_instances == 1, NULL); - T_EXPECT_EQ_INT(num_retains, 0, NULL); -} - -static void -move_assign() -{ - OSPtr a = OSPtr::alloc(); - OSPtr b = OSPtr::alloc(); - int a_id = a->inst_id; - int b_id = b->inst_id; - - OSPtr d; - - d = os::move(a); - - T_ASSERT_TRUE(a.get() == NULL, NULL); - T_ASSERT_TRUE(d->inst_id == a_id, NULL); - T_ASSERT_TRUE(d->refcount == 1, NULL); - T_ASSERT_TRUE(num_instances == 2, NULL); - - d = os::move(b); - T_ASSERT_TRUE(a.get() == NULL, NULL); - T_ASSERT_TRUE(b.get() == NULL, NULL); - T_ASSERT_TRUE(d->inst_id == b_id, NULL); - T_ASSERT_TRUE(d->refcount == 1, NULL); - T_ASSERT_TRUE(num_instances == 1, NULL); - T_EXPECT_EQ_INT(num_retains, 0, NULL); -} - -static void -move_assign_null() -{ - BasePtr a = BasePtr::alloc(); - BasePtr b = a; - - T_EXPECT_EQ_INT(num_retains, 1, NULL); - - a = os::move(nullptr); - - T_ASSERT_TRUE(a.get() == NULL, NULL); - T_ASSERT_TRUE(b->refcount == 1, NULL); - - b = os::move(nullptr); - - T_ASSERT_EQ_INT(num_instances, 0, "All instances released"); - T_EXPECT_EQ_INT(num_retains, 1, NULL); -} - -static void -move_assign_raw() -{ - BasePtr a = BasePtr::alloc(); - Base *b = new Base; - Base *tmp = b; - - T_ASSERT_EQ_INT(num_instances, 2, NULL); - - a = os::move(tmp); - - T_ASSERT_EQ_INT(num_instances, 1, NULL); - T_ASSERT_NULL(tmp, NULL); - T_ASSERT_EQ_PTR(a.get(), b, NULL); - T_ASSERT_EQ_INT(a->refcount, 2, NULL); - b->release(); - T_ASSERT_EQ_INT(a->refcount, 1, NULL); -} - -static void -move_assign_subclass() -{ - auto a = DerivedPtr::alloc(); - BasePtr b; - - b = os::move(a); - - T_ASSERT_NULL(a.get(), NULL); - T_ASSERT_NOTNULL(b.get(), NULL); - T_ASSERT_EQ_INT(b->refcount, 1, NULL); - T_ASSERT_EQ_INT(num_instances, 1, NULL); -} - -static void -move_assign_self() -{ - OSPtr a = OSPtr::alloc(); - int a_id = a->inst_id; - - a = os::move(a); - - T_ASSERT_NOTNULL(a.get(), "osptr seated"); - T_ASSERT_TRUE(a->inst_id == a_id, NULL); - T_ASSERT_TRUE(a->refcount == 1, NULL); - T_ASSERT_TRUE(num_instances == 1, NULL); - T_EXPECT_EQ_INT(num_retains, 0, NULL); -} - -static void -test_const_cast() -{ - OSPtr a = OSPtr::alloc(); - - OSPtr b; - - b = a.const_pointer_cast(); - - T_ASSERT_TRUE(a.get() == b.get(), NULL); - T_ASSERT_TRUE(a->refcount == 2, NULL); - T_ASSERT_TRUE(b->refcount == 2, NULL); - - T_ASSERT_TRUE(num_instances == 1, NULL); - T_EXPECT_EQ_INT(num_retains, 1, NULL); -} - -static void -const_cast_move() -{ - OSPtr a = OSPtr::alloc(); - int a_id = a->inst_id; - - OSPtr b; - - b = os::move(a).const_pointer_cast(); - - T_ASSERT_TRUE(a.get() == NULL, NULL); - T_ASSERT_TRUE(b->inst_id == a_id, NULL); - T_ASSERT_TRUE(b->refcount == 1, NULL); - - T_ASSERT_TRUE(num_instances == 1, NULL); - T_EXPECT_EQ_INT(num_retains, 0, NULL); -} - -static void -const_cast_move_self() -{ - BasePtr a = BasePtr::alloc(); - int a_id = a->inst_id; - - a = os::move(a).const_pointer_cast(); - - T_ASSERT_NOTNULL(a.get(), "osptr seated"); - T_ASSERT_TRUE(a->inst_id == a_id, NULL); - T_ASSERT_TRUE(a->refcount == 1, NULL); - T_ASSERT_TRUE(num_instances == 1, NULL); - T_ASSERT_TRUE(num_retains == 0, NULL); -} - -static void -test_static_cast() -{ - DerivedPtr a = DerivedPtr::alloc(); - - BasePtr b; - - b = a.static_pointer_cast(); - - T_ASSERT_TRUE(a.get() == b.get(), NULL); - T_ASSERT_TRUE(a->refcount == 2, NULL); - T_ASSERT_TRUE(b->refcount == 2, NULL); - - T_ASSERT_TRUE(num_instances == 1, NULL); - T_EXPECT_TRUE(num_retains == 1, NULL); -} - -static void -static_cast_move() -{ - DerivedPtr a = DerivedPtr::alloc(); - int a_id = a->inst_id; - - BasePtr b; - - b = os::move(a).static_pointer_cast(); - - T_ASSERT_NULL(a.get(), NULL); - T_ASSERT_EQ_INT(b->inst_id, a_id, NULL); - T_ASSERT_EQ_INT(b->refcount, 1, NULL); - - T_ASSERT_EQ_INT(num_instances, 1, NULL); - T_EXPECT_EQ_INT(num_retains, 0, NULL); -} - -static void -static_cast_move_self() -{ - BasePtr a = BasePtr::alloc(); - int a_id = a->inst_id; - - a = os::move(a).static_pointer_cast(); - - T_ASSERT_NOTNULL(a.get(), "osptr seated"); - T_ASSERT_TRUE(a->inst_id == a_id, NULL); - T_ASSERT_TRUE(a->refcount == 1, NULL); - T_ASSERT_TRUE(num_instances == 1, NULL); - T_ASSERT_TRUE(num_retains == 0, NULL); -} - -static void -tagged_ptr() -{ - OSTaggedPtr a; - auto b = OSTaggedPtr::alloc(); - - T_ASSERT_NULL(a.get(), NULL); - T_ASSERT_NOTNULL(b.get(), NULL); - - T_ASSERT_TRUE(typeid(a.get()) == typeid(Base *), NULL); - T_ASSERT_TRUE(typeid(b.get()) == typeid(Derived *), NULL); -} - -static void -attach() -{ - Base *a = new Base(); - BasePtr b; - b.attach(os::move(a)); - - T_ASSERT_NULL(a, NULL); - T_ASSERT_NOTNULL(b.get(), NULL); - T_ASSERT_EQ_INT(b->refcount, 1, NULL); - T_ASSERT_EQ_INT(num_instances, 1, NULL); - T_ASSERT_EQ_INT(num_retains, 0, NULL); - - b.attach(new Base); - T_ASSERT_NOTNULL(b.get(), NULL); - T_ASSERT_EQ_INT(b->refcount, 1, NULL); - T_ASSERT_EQ_INT(num_instances, 1, NULL); - T_ASSERT_EQ_INT(num_retains, 0, NULL); - T_ASSERT_EQ_INT(num_releases, 1, NULL); -} - -static void -detach() -{ - BasePtr a = BasePtr::alloc(); - Base *p = a.detach(); - - T_ASSERT_NULL(a.get(), NULL); - T_ASSERT_NOTNULL(p, NULL); - T_ASSERT_EQ_INT(p->refcount, 1, NULL); - T_ASSERT_EQ_INT(num_instances, 1, NULL); - T_ASSERT_EQ_INT(num_retains, 0, NULL); - T_ASSERT_EQ_INT(num_releases, 0, NULL); - - BasePtr b(os::move(p), os::no_retain); // re-seat so that 'p' gets freed -} - -static void -foreign() -{ - auto a = OSPtr::alloc(); - auto b = OSTaggedPtr::alloc(); - - void *a_ptr = a.get(); - void *b_ptr = b.get(); - - a.swap(b); - - T_ASSERT_EQ_PTR(b.get(), a_ptr, NULL); - T_ASSERT_EQ_PTR(a.get(), b_ptr, NULL); - T_ASSERT_EQ_INT(a->refcount, 1, NULL); - T_ASSERT_EQ_INT(b->refcount, 1, NULL); - T_ASSERT_EQ_INT(num_instances, 2, NULL); - T_ASSERT_GE_INT(num_retains, 2, NULL); -} - -static void -test_dynamic_cast() -{ - auto a = DerivedPtr::alloc(); - T_ASSERT_NOTNULL(a.get(), NULL); - BasePtr b = a; - - auto c = b.dynamic_pointer_cast(); - T_ASSERT_NOTNULL(c.get(), NULL); - - T_ASSERT_EQ_INT(c->refcount, 3, NULL); - T_ASSERT_EQ_INT(num_instances, 1, NULL); - - auto d = OtherPtr::alloc(); - auto e = d.dynamic_pointer_cast(); - auto f = OSDynamicCastPtr(OtherPtr::alloc()); - - T_ASSERT_NULL(e.get(), NULL); - T_ASSERT_NULL(f.get(), NULL); - - T_ASSERT_EQ_INT(num_instances, 2, NULL); - T_ASSERT_EQ_INT(d->refcount, 1, NULL); - - auto g = OSDynamicCastPtr(DerivedPtr::alloc()); - T_ASSERT_EQ_INT(num_instances, 3, NULL); - T_ASSERT_EQ_INT(g->refcount, 1, NULL); -} - -#define OSPTR_TEST_DECL(name) \ - T_DECL(name, #name) { \ - num_instances = 0; \ - num_retains = 0; \ - num_releases = 0; \ - name(); \ - T_QUIET; T_ASSERT_EQ_INT(num_instances, 0, "Instance leak"); \ - } - -OSPTR_TEST_DECL(default_constructor) -OSPTR_TEST_DECL(null_constructor) -OSPTR_TEST_DECL(raw_constructor) -OSPTR_TEST_DECL(alloc) -OSPTR_TEST_DECL(destroy) -OSPTR_TEST_DECL(copy) -OSPTR_TEST_DECL(copy_subclass) -OSPTR_TEST_DECL(assign) -OSPTR_TEST_DECL(assign_raw) -OSPTR_TEST_DECL(assign_null) -OSPTR_TEST_DECL(assign_subclass) -OSPTR_TEST_DECL(assign_compatible) -OSPTR_TEST_DECL(move) -OSPTR_TEST_DECL(move_assign) -OSPTR_TEST_DECL(move_assign_null) -OSPTR_TEST_DECL(move_assign_raw) -OSPTR_TEST_DECL(move_assign_subclass) -OSPTR_TEST_DECL(move_assign_self) -OSPTR_TEST_DECL(test_const_cast) -OSPTR_TEST_DECL(const_cast_move) -OSPTR_TEST_DECL(const_cast_move_self) -OSPTR_TEST_DECL(test_static_cast) -OSPTR_TEST_DECL(static_cast_move) -OSPTR_TEST_DECL(static_cast_move_self) -OSPTR_TEST_DECL(tagged_ptr) -OSPTR_TEST_DECL(attach) -OSPTR_TEST_DECL(detach) -OSPTR_TEST_DECL(foreign) -OSPTR_TEST_DECL(test_dynamic_cast) - - -/* - * Test that the "trivial_abi" attribute works as expected - */ - -struct Complex { - uintptr_t val; - Complex() : val(71) - { - } - ~Complex() - { - } -}; - -struct Trivial { - uintptr_t val; - Trivial() : val(42) - { - } - ~Trivial() - { - } -} __attribute__((trivial_abi)); - -/* defined in osptr_helper.cpp */ -__BEGIN_DECLS -extern uintptr_t pass_trivial(Trivial); -extern uintptr_t pass_complex(Complex); -__END_DECLS -Trivial return_trivial(uintptr_t); -Complex return_complex(uintptr_t); - -T_DECL(trivial_abi, "Test trivial_abi classes are passed by value") -{ - Trivial a; - uintptr_t x = pass_trivial(a); - T_EXPECT_EQ_ULONG(a.val, x, "Trivial class argument passed by-value"); - - Complex b; - uintptr_t y = pass_complex(b); - T_EXPECT_NE_ULONG(b.val, y, "Non-trivial class argument passed by-reference"); - - Trivial c = return_trivial(55); - T_EXPECT_EQ_ULONG(c.val, 55UL, "Trivial class returned by-value"); - - Complex d = return_complex(99); - T_EXPECT_NE_ULONG(d.val, 99UL, "Non-trivial class returned by-reference"); -} - -#pragma clang diagnostic pop diff --git a/tests/osptr_compat.cpp b/tests/osptr_compat.cpp new file mode 100644 index 000000000..845dfb78a --- /dev/null +++ b/tests/osptr_compat.cpp @@ -0,0 +1,18 @@ +// +// Make sure we can #include OSPtr.h under various version of the C++ Standard. +// + +#include +#include + +T_GLOBAL_META( + T_META_NAMESPACE("osptr"), + T_META_CHECK_LEAKS(false), + T_META_RUN_CONCURRENTLY(true) + ); + +#define CONCAT_PRIM(x, y) x ## y +#define CONCAT(x, y) CONCAT_PRIM(x, y) +T_DECL(CONCAT(osptr_compat_, OSPTR_STD), "osptr.compat") { + T_PASS("OSPtr compatibility test passed"); +} diff --git a/tests/osptr_dumb.cpp b/tests/osptr_dumb.cpp deleted file mode 100644 index 8cb7e4f29..000000000 --- a/tests/osptr_dumb.cpp +++ /dev/null @@ -1,80 +0,0 @@ -#include -#include -#include -#include -#include - -#if 0 -# define OSPTR_LOG T_LOG -#elif 0 -# define OSPTR_LOG printf -#else -# define OSPTR_LOG(x...) do { } while(0) -#endif - -T_GLOBAL_META( - T_META_NAMESPACE("osptr"), - T_META_CHECK_LEAKS(false), - T_META_RUN_CONCURRENTLY(true) - ); - -class OSMetaClassBase -{ -public: - virtual void - retain() const - { - } - virtual void - release() const - { - } - virtual void - taggedRetain(void *tag) const - { - } - virtual void - taggedRelease(void *tag) const - { - } - - static void *type_id; -}; - -void *OSMetaClassBase::type_id; - -#define OSTypeAlloc(T) new T -#define OSTypeID(T) T::type_id - -#include - -class Base : public OSMetaClassBase { -public: - Base() : OSMetaClassBase() - { - } -}; - -class Derived : public Base { -public: - Derived() : Base() - { - } -}; - -typedef OSPtr BasePtr; -typedef OSPtr DerivedPtr; - -T_DECL(dumb_osptr, "Dumb OSPtrs work") -{ - BasePtr x = nullptr; - T_ASSERT_EQ_PTR(x, nullptr, NULL); - T_ASSERT_TRUE(typeid(BasePtr) == typeid(Base *), NULL); - T_ASSERT_TRUE(typeid(DerivedPtr) == typeid(Derived *), NULL); - - OSTaggedPtr y = nullptr; - OSTaggedPtr z = nullptr; - T_ASSERT_EQ_PTR(y, nullptr, NULL); - T_ASSERT_TRUE(typeid(y) == typeid(Base *), NULL); - T_ASSERT_TRUE(typeid(z) == typeid(Derived *), NULL); -} diff --git a/tests/osptr_helper.cpp b/tests/osptr_helper.cpp deleted file mode 100644 index 28eef3dce..000000000 --- a/tests/osptr_helper.cpp +++ /dev/null @@ -1,24 +0,0 @@ -#include - -extern "C" { -uintptr_t -pass_trivial(uintptr_t x) -{ - return x; -} -uintptr_t -pass_complex(uintptr_t x) -{ - return x; -} -uintptr_t -_Z14return_trivialm(uintptr_t x) -{ - return x; -} -uintptr_t -_Z14return_complexm(uintptr_t x) -{ - return x; -} -} diff --git a/tests/perf_vmfault.c b/tests/perf_vmfault.c index d0f64ab0a..db0613f96 100644 --- a/tests/perf_vmfault.c +++ b/tests/perf_vmfault.c @@ -161,9 +161,8 @@ map_mem_regions_multiple(int fault_type, size_t memsize) } region_len *= pgsize; - int flags = VM_MAKE_TAG((i % 2)? VM_TAG1 : VM_TAG2) | MAP_ANON | MAP_PRIVATE; - - memblock = (char *)mmap(NULL, region_len, PROT_READ | PROT_WRITE, flags, -1, 0); + int fd = VM_MAKE_TAG((i % 2)? VM_TAG1 : VM_TAG2); + memblock = (char *)mmap(NULL, region_len, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, fd, 0); T_QUIET; T_ASSERT_NE((void *)memblock, MAP_FAILED, "mmap"); memregion_config_per_thread[i].region_addr = memblock; memregion_config_per_thread[i].shared_region_addr = 0; diff --git a/tests/pfz.c b/tests/pfz.c new file mode 100644 index 000000000..fc3c14e54 --- /dev/null +++ b/tests/pfz.c @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); + +#if TARGET_OS_OSX && defined(_COMM_PAGE_TEXT_ATOMIC_ENQUEUE) + +/* Keys and discriminators */ +#define COMMPAGE_PFZ_BASE_AUTH_KEY ptrauth_key_process_independent_code +#define COMMPAGE_PFZ_FN_AUTH_KEY ptrauth_key_function_pointer +#define COMMPAGE_PFZ_BASE_DISCRIMINATOR ptrauth_string_discriminator("pfz") + +/* Auth and sign macros */ +#define SIGN_COMMPAGE_PFZ_BASE_PTR(ptr) \ + ptrauth_sign_unauthenticated(ptr, COMMPAGE_PFZ_BASE_AUTH_KEY, COMMPAGE_PFZ_BASE_DISCRIMINATOR) +#define AUTH_COMMPAGE_PFZ_BASE_PTR(ptr) \ + ptrauth_auth_data(ptr, COMMPAGE_PFZ_BASE_AUTH_KEY, COMMPAGE_PFZ_BASE_DISCRIMINATOR) +#define SIGN_COMMPAGE_PFZ_FUNCTION_PTR(ptr) \ + ptrauth_sign_unauthenticated(ptr, COMMPAGE_PFZ_FN_AUTH_KEY, 0) + +static void *commpage_pfz_base = NULL; + +static void * +get_pfz_base(void) +{ + void *pfz_base = NULL; + size_t s = sizeof(void *); + + int ret = sysctlbyname("kern.pfz", &pfz_base, &s, NULL, 0); + T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(kern.pfz)"); + + commpage_pfz_base = SIGN_COMMPAGE_PFZ_BASE_PTR(pfz_base); + T_LOG("pfz base = 0x%llx\n", commpage_pfz_base); +} + +static void +undefined_function(void) +{ + // We can use the same commpage_pfz_base as parent since the PFZ is slide + // once per boot and is same across all processes + void (*undefined)(void); + uintptr_t addr = (uintptr_t) (void *) AUTH_COMMPAGE_PFZ_BASE_PTR(commpage_pfz_base); + addr += _COMM_PAGE_TEXT_ATOMIC_DEQUEUE; + addr += 4; // Jump ahead + undefined = SIGN_COMMPAGE_PFZ_FUNCTION_PTR((void *)addr); + + return undefined(); +} + +typedef struct { + void *next; + char *str; +} QueueNode; + +T_DECL(test_arm_pfz, "Validate that ARM PFZ is mapped in", + T_META_CHECK_LEAKS(false), T_META_IGNORECRASHES(".*undefined_function*"), + T_META_ENABLED(false) /* rdar://62615792 */) +{ + static dispatch_once_t pred; + dispatch_once(&pred, ^{ + commpage_pfz_base = get_pfz_base(); + }); + + OSFifoQueueHead head = OS_ATOMIC_FIFO_QUEUE_INIT; + char *str1 = "String 1", *str2 = "String 2"; + QueueNode node1 = { 0, str1 }; + QueueNode node2 = { 0, str2 }; + + OSAtomicFifoEnqueue(&head, &node1, 0); + OSAtomicFifoEnqueue(&head, &node2, 0); + QueueNode *node_ptr = OSAtomicFifoDequeue(&head, 0); + T_ASSERT_EQ(strcmp(node_ptr->str, str1), 0, "Dequeued first node correctly"); + + node_ptr = OSAtomicFifoDequeue(&head, 0); + T_ASSERT_EQ(strcmp(node_ptr->str, str2), 0, "Dequeued second node correctly"); + + node_ptr = OSAtomicFifoDequeue(&head, 0); + T_ASSERT_EQ(node_ptr, NULL, "Dequeuing from empty list correctly"); + + int child_pid = 0; + if ((child_pid = fork()) == 0) { // Child should call undefined function + return undefined_function(); + } else { + int status = 0; + wait(&status); + + T_ASSERT_EQ(!WIFEXITED(status), true, "Did not exit cleanly"); + T_ASSERT_EQ(WIFSIGNALED(status), true, "Exited due to signal"); + T_LOG("Signal number = %d\n", WTERMSIG(status)); + } +} + +T_DECL(test_rdar_65270017, "Testing for rdar 65270017", + T_META_CHECK_LEAKS(false), T_META_ENABLED(false) /* rdar://65270017 */) +{ + static dispatch_once_t pred; + dispatch_once(&pred, ^{ + commpage_pfz_base = get_pfz_base(); + }); + + struct OSAtomicFifoHeadWrapper { + // Embedded OSFifoQueueHead structure inside the structure + void *first; + void *last; + int opaque; + + int data; + } wrapped_head = { + .first = NULL, + .last = NULL, + .opaque = 0, + .data = 0xfeed + }; + + char *str1 = "String 1", *str2 = "String 2"; + QueueNode node1 = { 0, str1 }; + QueueNode node2 = { 0, str2 }; + + OSAtomicFifoEnqueue(&wrapped_head, &node1, 0); + T_ASSERT_EQ(wrapped_head.data, 0xfeed, "data is valid"); + + OSAtomicFifoEnqueue(&wrapped_head, &node2, 0); + T_ASSERT_EQ(wrapped_head.data, 0xfeed, "data is valid"); + + QueueNode *node_ptr = OSAtomicFifoDequeue(&wrapped_head, 0); + T_ASSERT_EQ(strcmp(node_ptr->str, str1), 0, "Dequeued first node correctly"); + T_ASSERT_EQ(wrapped_head.data, 0xfeed, "data is valid"); + + node_ptr = OSAtomicFifoDequeue(&wrapped_head, 0); + T_ASSERT_EQ(strcmp(node_ptr->str, str2), 0, "Dequeued second node correctly"); + T_ASSERT_EQ(wrapped_head.data, 0xfeed, "data is valid"); + + node_ptr = OSAtomicFifoDequeue(&wrapped_head, 0); + T_ASSERT_EQ(node_ptr, NULL, "Dequeuing from empty list correctly"); + T_ASSERT_EQ(wrapped_head.data, 0xfeed, "data is valid"); +} + +#define WIDE 50ll +#define SMALL 2000ll + +void +preheat(dispatch_queue_t dq) +{ + dispatch_apply(WIDE, dq, ^(size_t i) { + sleep(1); + }); +} + +typedef struct elem { + long data1; + struct elem *link; + int data2; +} elem_t; + +static size_t offset = offsetof(elem_t, link); +static elem_t elements[WIDE][SMALL]; + +T_DECL(test_65270017_contended, "multithreaded testing for radar 65270017") +{ + dispatch_queue_t global_q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0); + dispatch_queue_t queue = dispatch_queue_create("com.apple.libctests.threaded", 0); + uint64_t __block t = 0; + + struct OSAtomicFifoHeadWrapper { + // Embedded OSFifoQueueHead structure inside the structure + void *first; + void *last; + int opaque; + + int data; + }; + + struct OSAtomicFifoHeadWrapper wrapped_q_head1 = { + .first = NULL, + .last = NULL, + .opaque = 0, + .data = 0xfeed + }; + OSFifoQueueHead *q1 = (OSFifoQueueHead *) &wrapped_q_head1; + + struct OSAtomicFifoHeadWrapper wrapped_q_head2 = { + .first = NULL, + .last = NULL, + .opaque = 0, + .data = 0xdead + }; + OSFifoQueueHead *q2 = (OSFifoQueueHead *) &wrapped_q_head2; + + t = 0; + T_LOG("Preheating thread pool"); + + preheat(global_q); + + T_LOG("Starting contended pfz test"); + + dispatch_apply(WIDE, global_q, ^(size_t i) { + dispatch_apply(SMALL, global_q, ^(size_t idx) { + OSAtomicFifoEnqueue(q1, &(elements[i][idx]), offset); // contended enqueue on q1 + }); + + uint32_t count = 0; + elem_t *p = NULL; + do { + p = OSAtomicFifoDequeue(q1, offset); + T_QUIET; T_ASSERT_EQ(wrapped_q_head1.data, 0xfeed, "q1 data is valid"); + if (p) { + OSAtomicFifoEnqueue(q2, p, offset); + T_QUIET; T_ASSERT_EQ(wrapped_q_head2.data, 0xdead, "q2 data is valid"); + count++; + } + } while (p != NULL); + + dispatch_sync(queue, ^{ + t += count; + }); + }); + T_ASSERT_EQ(t, ((uint64_t)WIDE * (uint64_t)SMALL), "OSAtomicFifoEnqueue"); + + t = 0; + dispatch_apply(WIDE, global_q, ^(size_t i) { + uint32_t count = 0; + elem_t *p = NULL; + do { + p = OSAtomicFifoDequeue(q2, offset); + T_QUIET; T_ASSERT_EQ(wrapped_q_head2.data, 0xdead, "q2 data is valid"); + if (p) { + count++; + } + } while (p != NULL); + dispatch_sync(queue, ^{ + t += count; + }); + }); + + T_ASSERT_EQ(t, ((uint64_t)WIDE * (uint64_t)SMALL), "OSAtomicFifoDequeue"); + + dispatch_release(queue); +} + +#else + +T_DECL(test_arm_pfz, "Validate that ARM PFZ is mapped in", + T_META_CHECK_LEAKS(false)) +{ + T_SKIP("No PFZ, _COMM_PAGE_TEXT_ATOMIC_ENQUEUE doesn't exist"); +} + +#endif diff --git a/tests/pid_for_task_test.c b/tests/pid_for_task_test.c new file mode 100644 index 000000000..8f8437eb9 --- /dev/null +++ b/tests/pid_for_task_test.c @@ -0,0 +1,17 @@ +#include +#include +#include + +T_DECL(pid_for_task_test, "Test pid_for_task with task name port") +{ + kern_return_t kr; + mach_port_t tname; + pid_t pid; + + kr = task_name_for_pid(mach_task_self(), getpid(), &tname); + T_EXPECT_EQ(kr, 0, "task_name_for_pid should succeed on current pid"); + pid_for_task(tname, &pid); + T_EXPECT_EQ(pid, getpid(), "pid_for_task should return the same value as getpid()"); + + mach_port_deallocate(mach_task_self(), tname); +} diff --git a/tests/pipe_read_infloop_55437634.c b/tests/pipe_read_infloop_55437634.c new file mode 100644 index 000000000..4715c85f6 --- /dev/null +++ b/tests/pipe_read_infloop_55437634.c @@ -0,0 +1,53 @@ +#include +#include + +#include +#include +#include + +#include + +static void +too_long(int ignored) +{ + T_ASSERT_FAIL("child readv is blocked"); +} + +T_DECL(pipe_read_infloop_55437634, "Infinite loop in pipe_read") +{ + int p[2]; + char c = 0; + struct iovec iov = { + .iov_base = &c, + .iov_len = 0x100000000UL + }; + pid_t child; + int status = 0; + + T_SETUPBEGIN; + /* create a pipe with some data in it: */ + T_ASSERT_POSIX_SUCCESS(pipe(p), NULL); + T_ASSERT_POSIX_SUCCESS(write(p[1], "A", 1), NULL); + T_SETUPEND; + + T_ASSERT_POSIX_SUCCESS(child = fork(), NULL); + + if (!child) { + readv(p[0], &iov, 1); + exit(0); + } + + /* + * if the waitpid takes too long, the child is probably stuck in the + * infinite loop, so fail via too_long. + */ + T_ASSERT_NE(signal(SIGALRM, too_long), SIG_ERR, NULL); + T_ASSERT_POSIX_SUCCESS(alarm(10), NULL); + + /* this will hang if the bug is there: */ + T_ASSERT_POSIX_SUCCESS(waitpid(child, &status, 0), NULL); + + /* expecting a clean, zero exit: */ + T_ASSERT_TRUE(WIFEXITED(status), NULL); + T_ASSERT_EQ(WEXITSTATUS(status), 0, NULL); +} diff --git a/tests/posix_spawn_archpref.c b/tests/posix_spawn_archpref.c new file mode 100644 index 000000000..5cbfc1e26 --- /dev/null +++ b/tests/posix_spawn_archpref.c @@ -0,0 +1,63 @@ +#include +#include +#include +#include +#include + +static void +run_test(const char *name, cpu_type_t type, cpu_subtype_t subtype) +{ + int ret, pid; + posix_spawnattr_t spawnattr; + char path[1024]; + uint32_t size = sizeof(path); + cpu_type_t cpuprefs[] = { type }; + cpu_type_t subcpuprefs[] = { subtype }; + + T_QUIET; T_ASSERT_EQ(_NSGetExecutablePath(path, &size), 0, NULL); + T_QUIET; T_ASSERT_LT(strlcat(path, "_helper", size), (unsigned long)size, NULL); + + ret = posix_spawnattr_init(&spawnattr); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "%s: posix_spawnattr_init", name); + + ret = posix_spawnattr_setarchpref_np(&spawnattr, sizeof(cpuprefs) / sizeof(cpuprefs[0]), cpuprefs, subcpuprefs, NULL); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "%s: posix_spawnattr_setarchpref_np", name); + + ret = posix_spawn(&pid, path, NULL, &spawnattr, NULL, NULL); + T_ASSERT_EQ(ret, 0, "%s: posix_spawn should succeed", name); + + int wait_ret = 0; + ret = waitpid(pid, &wait_ret, 0); + T_QUIET; T_ASSERT_EQ(ret, pid, "%s: child pid", name); + + T_QUIET; T_ASSERT_EQ(WIFEXITED(wait_ret), 1, "%s: child process should have called exit()", name); + + if (subtype != CPU_SUBTYPE_ANY) { + T_ASSERT_EQ(WEXITSTATUS(wait_ret), subtype, "%s: child process should be running with %d subtype", name, subtype); + } + + ret = posix_spawnattr_destroy(&spawnattr); + T_QUIET; T_ASSERT_EQ(ret, 0, "%s: posix_spawnattr_destroy", name); +} + +T_DECL(posix_spawn_archpref, "verify posix_spawn_setarchpref_np can select slices") +{ +#if defined(__x86_64__) + run_test("x86_64", CPU_TYPE_X86_64, CPU_SUBTYPE_X86_64_ALL); +#endif /* defined(__x86_64__) */ +#if defined(__arm64__) && defined(__LP64__) + run_test("arm64", CPU_TYPE_ARM64, CPU_SUBTYPE_ARM64_ALL); +#endif /* defined(__arm64__) && defined(__LP64__) */ + +#if defined(__x86_64__) + run_test("any (x86_64)", CPU_TYPE_X86_64, CPU_SUBTYPE_ANY); +#elif defined(__arm64__) && defined(__LP64__) + run_test("any (arm64)", CPU_TYPE_ARM64, CPU_SUBTYPE_ANY); +#elif defined(__arm64__) + run_test("any (arm64_32)", CPU_TYPE_ARM64_32, CPU_SUBTYPE_ANY); +#elif defined(__arm__) + run_test("any (arm)", CPU_TYPE_ARM, CPU_SUBTYPE_ANY); +#else +#error unknown architecture +#endif +} diff --git a/tests/posix_spawn_archpref_helper.c b/tests/posix_spawn_archpref_helper.c new file mode 100644 index 000000000..2bca287ca --- /dev/null +++ b/tests/posix_spawn_archpref_helper.c @@ -0,0 +1,20 @@ +#include +/* + * Returns the subcpu type for the architecture for which the + * binary was compiled. + */ +int +main(void) +{ +#if defined(__x86_64__) + return CPU_SUBTYPE_X86_64_ALL; +#elif defined(__arm64__) && defined(__LP64__) + return CPU_SUBTYPE_ARM64_ALL; +#elif defined(__arm64__) + return CPU_SUBTYPE_ARM64_32_ALL; +#elif defined(__arm__) + return CPU_SUBTYPE_ARM_V7K; +#else +#error unknown architecture +#endif +} diff --git a/tests/preoslog.c b/tests/preoslog.c new file mode 100644 index 000000000..e8615016c --- /dev/null +++ b/tests/preoslog.c @@ -0,0 +1,125 @@ +#include +#include +#include +#include +#include + +/* + * Any change to this structure must be reflected in iBoot / MacEFI / PanicDump / XNU Tests and vice versa. + */ +typedef struct __attribute__((packed)) { + uint32_t magic; /* g_valid_magic if valid */ + uint32_t size; /* Size of the preoslog buffer including the header */ + uint32_t offset; /* Write pointer. Indicates where in the buffer new log entry would go */ + uint8_t source; /* Indicates who filled in the buffer (e.g. iboot vs MacEFI) */ + uint8_t wrapped; /* If equal to 1, the preoslog ring buffer wrapped at least once */ + char data[]; /* log buffer */ +} preoslog_header_t; + +static const char* g_sysctl_kern_version = "kern.version"; +static const char* g_sysctl_kern_preoslog = "kern.preoslog"; +static const uint32_t g_valid_magic = 'LSOP'; + +/* + * Defines substrings to look up in preoslog buffer. + * To pass the test, one of the entries should match a substring in preoslog buffer. + */ +static const char* g_preoslog_buffer_string[] = {"serial output"}; + +static boolean_t +check_for_substrings(const char* string, size_t len) +{ + int i; + boolean_t res = FALSE; + + for (i = 0; i < (sizeof(g_preoslog_buffer_string) / sizeof(char*)); i++) { + res = res || strnstr(string, g_preoslog_buffer_string[i], len) == NULL ? FALSE : TRUE; + } + + return res; +} + +static boolean_t +is_development_kernel(void) +{ + int ret; + int dev = 0; + size_t dev_size = sizeof(dev); + + ret = sysctlbyname("kern.development", &dev, &dev_size, NULL, 0); + if (ret != 0) { + return FALSE; + } + + return dev != 0; +} + +/* + * Valid cases: + * 1. Development & Debug iBoot/macEFI provides a preoslog buffer. + * 2. Release iBoot/macEFI doesn't provide a presoslog buffer. + * 3. Development & Debug xnu provids kern.preoslog sysctl. + * 4. Release xnu doesn't provide kern.preoslog sysctl. + */ + +T_DECL(test_preoslog, "Validate kern.preoslog sysctl has expected log content from the boot loader") +{ + int ret = 0; + size_t size = 0; + void *buffer = NULL; + preoslog_header_t *header = NULL; + char tmp = 0; + const char *lower_buffer = NULL; + size_t lower_buffer_size = 0; + const char *upper_buffer = NULL; + size_t upper_buffer_size = 0; + boolean_t found = FALSE; + + // kern.preoslog is writable + ret = sysctlbyname(g_sysctl_kern_preoslog, buffer, &size, &tmp, sizeof(tmp)); + T_ASSERT_POSIX_SUCCESS(ret, "kern.preoslog write check"); + + ret = sysctlbyname(g_sysctl_kern_preoslog, NULL, &size, NULL, 0); + if (!is_development_kernel()) { + // kern.preoslog mustn't exist on release builds of xnu + T_ASSERT_NE(ret, 0, "get size kern.preoslog ret != 0 on release builds"); + T_ASSERT_POSIX_ERROR(ret, ENOENT, " get size kern.preoslog errno==ENOENT on release builds"); + return; + } + + /* + * Everything below is applicable only to development & debug xnu + */ + + T_ASSERT_POSIX_SUCCESS(ret, "get size for kern.preoslog"); + if (size == 0) { + // No preoslog buffer available, valid case if iboot is release + return; + } + + buffer = calloc(size, sizeof(char)); + T_ASSERT_NOTNULL(buffer, "allocate buffer for preoslog"); + + ret = sysctlbyname(g_sysctl_kern_preoslog, buffer, &size, NULL, 0); + T_ASSERT_POSIX_SUCCESS(ret, "get preoslog buffer"); + + header = (preoslog_header_t *)buffer; + T_ASSERT_EQ(header->magic, g_valid_magic, "check preoslog header magic - expected %#x, given %#x", g_valid_magic, header->magic); + T_ASSERT_EQ(header->size, size, "check preoslog sizes - expected %zu, given %zu", size, header->size); + T_ASSERT_LT(header->offset, header->size - sizeof(*header), "check write offset"); + + lower_buffer = header->data; + lower_buffer_size = header->offset + 1; + upper_buffer = lower_buffer + lower_buffer_size; + upper_buffer_size = header->size - lower_buffer_size - sizeof(*header); + if (header->wrapped) { + found = check_for_substrings(upper_buffer, upper_buffer_size); + } + + found = found || check_for_substrings(lower_buffer, lower_buffer_size); + T_ASSERT_TRUE(found, "Verify buffer content"); + + free(buffer); + buffer = NULL; + header = NULL; +} diff --git a/tests/priority_queue.cpp b/tests/priority_queue.cpp new file mode 100644 index 000000000..fac35220e --- /dev/null +++ b/tests/priority_queue.cpp @@ -0,0 +1,285 @@ +#include +#include +#include +#include +#include +#include + +#define DEVELOPMENT 0 +#define DEBUG 0 +#define XNU_KERNEL_PRIVATE 1 + +#define OS_REFCNT_DEBUG 1 +#define STRESS_TESTS 0 + +#define __container_of(ptr, type, field) __extension__({ \ + const __typeof__(((type *)nullptr)->field) *__ptr = (ptr); \ + (type *)((uintptr_t)__ptr - offsetof(type, field)); \ + }) + +#pragma clang diagnostic ignored "-Watomic-implicit-seq-cst" +#pragma clang diagnostic ignored "-Wc++98-compat" + +#include "../osfmk/kern/macro_help.h" +#include "../osfmk/kern/priority_queue.h" +#include "../libkern/c++/priority_queue.cpp" + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); + +static int +compare_numbers_descending(const void * a, const void * b) +{ + const uint16_t x = *(const uint16_t *)a; + const uint16_t y = *(const uint16_t *)b; + if (x > y) { + return -1; + } else if (x < y) { + return 1; + } else { + return 0; + } +} + +#define PRIORITY_QUEUE_NODES 8 + +typedef union test_node { + struct { + struct priority_queue_entry e; + uint32_t node_key; + }; + struct priority_queue_entry_sched ke; + struct priority_queue_entry_stable se; +} *test_node_t; + +static void +dump_pqueue_entry(priority_queue_entry_sched_t e, int depth) +{ + priority_queue_entry_sched_t t; + + printf("%*s [%02d] %p\n", depth * 4, "", e->key, (void *)e); + t = pqueue_sched_max_t::unpack_child(e); + if (t) { + dump_pqueue_entry(t, depth + 1); + } + while (e->next) { + e = e->next; + dump_pqueue_entry(e, depth); + } +} + +__unused +static void +dump_pqueue(struct priority_queue_sched_max *pq) +{ + dump_pqueue_entry(pq->pq_root, 0); + printf("\n"); +} + +T_DECL(priority_queue_sched_max, "Basic sched priority queue testing") +{ + /* Configuration for the test */ + static uint16_t priority_list[] = { 20, 3, 7, 6, 50, 2, 8, 12}; + + struct priority_queue_sched_max pq; + uint16_t increase_pri = 100; + uint16_t decrease_pri = 90; + uint16_t key = 0; + boolean_t update_result = false; + test_node_t node = NULL; + + priority_queue_init(&pq); + + /* Add all priorities to the first priority queue */ + for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) { + node = new test_node; + T_QUIET; T_ASSERT_NOTNULL(node, NULL); + + priority_queue_entry_init(&node->ke); + priority_queue_entry_set_sched_pri(&pq, &node->ke, priority_list[i], 0); + priority_queue_insert(&pq, &node->ke); + } + + /* Test the priority increase operation by updating the last node added (7) */ + priority_queue_entry_set_sched_pri(&pq, &node->ke, increase_pri, 0); + update_result = priority_queue_entry_increased(&pq, &node->ke); + T_ASSERT_TRUE(update_result, "increase key updated root"); + key = priority_queue_max_sched_pri(&pq); + T_ASSERT_EQ(key, increase_pri, "verify priority_queue_entry_increased() operation"); + + /* Test the priority decrease operation by updating the last node added */ + priority_queue_entry_set_sched_pri(&pq, &node->ke, decrease_pri, 0); + update_result = priority_queue_entry_decreased(&pq, &node->ke); + T_ASSERT_TRUE(update_result, "decrease key updated root"); + key = priority_queue_max_sched_pri(&pq); + T_ASSERT_EQ(key, decrease_pri, "verify priority_queue_entry_decreased() operation"); + + /* Update our local priority list as well */ + priority_list[PRIORITY_QUEUE_NODES - 1] = decrease_pri; + + /* Sort the local list in descending order */ + qsort(priority_list, PRIORITY_QUEUE_NODES, sizeof(priority_list[0]), compare_numbers_descending); + + priority_queue_entry_sched_t k = NULL; + + node = pqe_element_fast(k, test_node, ke); + + /* Test the maximum operation by comparing max node with local list */ + for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) { + key = priority_queue_max_sched_pri(&pq); + T_ASSERT_EQ(key, priority_list[i], "[%d] priority queue max node removal", i); + node = priority_queue_remove_max(&pq, test_node, ke); + delete node; + } + + T_ASSERT_TRUE(priority_queue_empty(&pq), "queue is empty"); + priority_queue_destroy(&pq, union test_node, ke, ^(test_node_t n) { + T_FAIL("Called with %p", n); + }); +} + +T_DECL(priority_queue_max, "Basic generic priority queue testing") +{ + /* Configuration for the test */ + static uint16_t priority_list[] = { 20, 3, 7, 6, 50, 2, 8, 12}; + + struct priority_queue_max pq; + uint16_t increase_pri = 100; + uint16_t decrease_pri = 90; + test_node_t result; + boolean_t update_result = false; + test_node_t node = NULL; + + priority_queue_compare_fn_t cmp_fn = + priority_heap_make_comparator(a, b, union test_node, e, { + if (a->node_key != b->node_key) { + return priority_heap_compare_ints(a->node_key, b->node_key); + } + return 0; + }); + + priority_queue_init(&pq, cmp_fn); + + /* Add all priorities to the first priority queue */ + for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) { + node = new test_node; + T_QUIET; T_ASSERT_NOTNULL(node, NULL); + + priority_queue_entry_init(&node->e); + node->node_key = priority_list[i]; + priority_queue_insert(&pq, &node->e); + } + + /* Test the priority increase operation by updating the last node added (8) */ + node->node_key = increase_pri; + update_result = priority_queue_entry_increased(&pq, &node->e); + T_ASSERT_TRUE(update_result, "increase key updated root"); + result = priority_queue_max(&pq, union test_node, e); + T_ASSERT_EQ(result->node_key, increase_pri, "verify priority_queue_entry_increased() operation"); + + + /* Test the priority decrease operation by updating the last node added */ + node->node_key = decrease_pri; + update_result = priority_queue_entry_decreased(&pq, &node->e); + T_ASSERT_TRUE(update_result, "decrease key updated root"); + result = priority_queue_max(&pq, union test_node, e); + T_ASSERT_EQ(result->node_key, decrease_pri, "verify priority_queue_entry_decreased() operation"); + + /* Update our local priority list as well */ + priority_list[PRIORITY_QUEUE_NODES - 1] = decrease_pri; + + /* Sort the local list in descending order */ + qsort(priority_list, PRIORITY_QUEUE_NODES, sizeof(priority_list[0]), compare_numbers_descending); + + /* Test the maximum operation by comparing max node with local list */ + for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) { + result = priority_queue_remove_max(&pq, union test_node, e); + T_ASSERT_EQ(result->node_key, priority_list[i], + "[%d] priority queue max node removal", i); + delete result; + } + + T_ASSERT_TRUE(priority_queue_empty(&pq), "queue is empty"); + priority_queue_destroy(&pq, union test_node, e, ^(test_node_t n) { + T_FAIL("Called with %p", n); + }); +} + +T_DECL(priority_queue_sched_stable_max, "Basic stable sched priority queue testing") +{ + /* Configuration for the test */ + static struct config { + uint16_t pri; + priority_queue_entry_sched_modifier_t modifier; + uint64_t stamp; + } config[] = { + { 20, PRIORITY_QUEUE_ENTRY_NONE, 8 }, + { 3, PRIORITY_QUEUE_ENTRY_NONE, 7 }, + { 3, PRIORITY_QUEUE_ENTRY_PREEMPTED, 6 }, + { 6, PRIORITY_QUEUE_ENTRY_NONE, 5 }, + { 50, PRIORITY_QUEUE_ENTRY_PREEMPTED, 4 }, + { 50, PRIORITY_QUEUE_ENTRY_PREEMPTED, 3 }, + { 50, PRIORITY_QUEUE_ENTRY_NONE, 2 }, + { 50, PRIORITY_QUEUE_ENTRY_NONE, 1 }, + }; + + struct priority_queue_sched_stable_max pq; + test_node_t node = NULL; + + priority_queue_init(&pq); + + /* Add all priorities to the first priority queue */ + for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) { + node = new test_node; + T_QUIET; T_ASSERT_NOTNULL(node, NULL); + + priority_queue_entry_init(node); + node->se.stamp = config[i].stamp; + priority_queue_entry_set_sched_pri(&pq, &node->se, + config[i].pri, config[i].modifier); + priority_queue_insert(&pq, &node->se); + } + + /* Sort the local list in descending order */ + qsort_b(config, PRIORITY_QUEUE_NODES, sizeof(struct config), ^(const void *a, const void *b){ + const struct config &c1 = *(const struct config *)a; + const struct config &c2 = *(const struct config *)b; + if (c1.pri != c2.pri) { + return c1.pri < c2.pri ? 1 : -1; + } + if (c1.modifier != c2.modifier) { + return c1.modifier < c2.modifier ? 1 : -1; + } + if (c1.stamp != c2.stamp) { + if (c1.modifier) { + /* younger is better */ + return c1.stamp < c1.stamp ? 1 : -1; + } else { + /* older is better */ + return c1.stamp > c2.stamp ? 1 : -1; + } + } + return 0; + }); + + /* Test the maximum operation by comparing max node with local list */ + for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) { + node = priority_queue_max(&pq, union test_node, se); + T_LOG("[%d]: { pri: %2d, modifier: %d, stamp: %lld }\n", + i, config[i].pri, config[i].modifier, config[i].stamp); + auto pri = priority_queue_entry_sched_pri(&pq, &node->se); + T_ASSERT_EQ(pri, config[i].pri, + "[%d] priority queue max node removal", i); + auto modifier = priority_queue_entry_sched_modifier(&pq, &node->se); + T_ASSERT_EQ(modifier, config[i].modifier, + "[%d] priority queue max node removal", i); + T_ASSERT_EQ(node->se.stamp, config[i].stamp, + "[%d] priority queue max node removal", i); + priority_queue_remove_max(&pq, union test_node, se); + delete node; + } + + T_ASSERT_TRUE(priority_queue_empty(&pq), "queue is empty"); + priority_queue_destroy(&pq, union test_node, se, ^(test_node_t n) { + T_FAIL("Called with %p", n); + }); +} diff --git a/tests/proc_info.c b/tests/proc_info.c index 5ce0747eb..6014a2e2f 100644 --- a/tests/proc_info.c +++ b/tests/proc_info.c @@ -1851,7 +1851,7 @@ print_uptrs(int argc, char * const * argv) } } -T_DECL(proc_list_uptrs, "the kernel should return any up-pointers it knows about", T_META_ALL_VALID_ARCHS(YES)) +T_DECL(proc_list_uptrs, "the kernel should return any up-pointers it knows about") { if (argc > 0) { print_uptrs(argc, argv); diff --git a/tests/proc_info_udata.c b/tests/proc_info_udata.c index e482a848d..9cfe9bc7c 100644 --- a/tests/proc_info_udata.c +++ b/tests/proc_info_udata.c @@ -3,6 +3,7 @@ #include "../libsyscall/wrappers/libproc/libproc.h" #include #include +#include T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); @@ -14,11 +15,11 @@ T_DECL(proc_udata_info, "Get and set a proc udata token"){ udata = token; ret = proc_udata_info(getpid(), PROC_UDATA_INFO_SET, &udata, sizeof(udata)); -#if CONFIG_EMBEDDED +#if !TARGET_OS_OSX T_WITH_ERRNO; - T_ASSERT_EQ_INT(ret, -1, "proc_udata_info PROC_UDATA_INFO_SET returns error on non-macOS"); - T_SKIP("Remaining tests are only supported on macOS"); -#endif /* CONFIG_EMBEDDED */ + T_ASSERT_EQ_INT(ret, -1, "proc_udata_info PROC_UDATA_INFO_SET returns error on non-supported platforms"); + T_SKIP("Remaining tests are only supported on platforms with CONFIG_PROC_UDATA_STORAGE configured"); +#endif T_WITH_ERRNO; T_ASSERT_EQ_INT(ret, 0, "proc_udata_info PROC_UDATA_INFO_SET"); diff --git a/tests/proc_pidpath_audittoken.c b/tests/proc_pidpath_audittoken.c new file mode 100644 index 000000000..8df206309 --- /dev/null +++ b/tests/proc_pidpath_audittoken.c @@ -0,0 +1,105 @@ +#include +#include +#include +#include +#include +#include +#include + +#undef USE_AUDIT_TOKEN_FOR_PID + +#ifdef USE_AUDIT_TOKEN_FOR_PID +static bool +audit_token_for_pid(pid_t pid, audit_token_t *token) +{ + kern_return_t err; + task_t task; + mach_msg_type_number_t info_size = TASK_AUDIT_TOKEN_COUNT; + + err = task_for_pid(mach_task_self(), pid, &task); + if (err != KERN_SUCCESS) { + printf("task_for_pid returned %d\n", err); + return false; + } + + err = task_info(task, TASK_AUDIT_TOKEN, (integer_t *)token, &info_size); + if (err != KERN_SUCCESS) { + printf("task_info returned %d\n", err); + return false; + } + + return true; +} + +#else + +static int +idversion_for_pid(pid_t pid) +{ + struct proc_uniqidentifierinfo uniqidinfo = {0}; + + int ret = proc_pidinfo(pid, PROC_PIDUNIQIDENTIFIERINFO, 0, &uniqidinfo, sizeof(uniqidinfo)); + if (ret <= 0) { + perror("proc_pidinfo(PROC_PIDUNIQIDENTIFIERINFO)"); + T_ASSERT_FAIL("proc_pidinfo(%d, PROC_PIDUNIQIDENTIFIERINFO) failed unexpectedly with errno %d", pid, errno); + } + +#ifdef NOTDEF + printf("%s>pid = %d, p_uniqueid = %lld\n", __FUNCTION__, pid, uniqidinfo.p_uniqueid); + printf("%s>pid = %d, p_idversion = %d\n", __FUNCTION__, pid, uniqidinfo.p_idversion); +#endif + + return uniqidinfo.p_idversion; +} +#endif + +static void +show_pidpaths(void) +{ + char buffer[PROC_PIDPATHINFO_MAXSIZE] = {}; + int count = 0; + + for (pid_t pid = 1; ((pid < 1000) && (count <= 25)); pid++) { + int ret = proc_pidpath(pid, buffer, sizeof(buffer)); + if (ret <= 0) { + if (errno == ESRCH) { + continue; + } + T_ASSERT_FAIL("proc_pidpath(%d) failed unexpectedly with errno %d", pid, errno); + } + count++; + + memset(buffer, 0, sizeof(buffer)); + + audit_token_t token = { 0 }; +#ifdef USE_AUDIT_TOKEN_FOR_PID + if (!audit_token_for_pid(pid, &token)) { + T_ASSERT_FAIL("audit_token_for_pid(%d) failed", pid); + continue; + } +#else + token.val[5] = (unsigned int)pid; + token.val[7] = (unsigned int)idversion_for_pid(pid); +#endif + ret = proc_pidpath_audittoken(&token, buffer, sizeof(buffer)); + if (ret <= 0) { + if (errno == ESRCH) { + continue; + } + T_ASSERT_FAIL("proc_pidpath_audittoken(%d) failed unexpectedly with errno %d", pid, errno); + } + T_PASS("%5d %s\n", pid, buffer); + + token.val[7]--; /* Change to idversion so the next call fails */ + ret = proc_pidpath_audittoken(&token, buffer, sizeof(buffer)); + T_ASSERT_LE(ret, 0, "proc_pidpath_audittoken() failed as expected due to incorrect idversion"); + T_ASSERT_EQ(errno, ESRCH, "errno is ESRCH as expected"); + } +} + +T_DECL(proc_pidpath_audittoken, "Test proc_pidpath_audittoken()", T_META_ASROOT(false)) +{ + show_pidpaths(); + T_PASS("Successfully tested prod_pidpath_audittoken()"); + T_END; +} diff --git a/tests/proc_rlimit.c b/tests/proc_rlimit.c new file mode 100644 index 000000000..74c49f0b4 --- /dev/null +++ b/tests/proc_rlimit.c @@ -0,0 +1,271 @@ +/* + * cd $XNU/tests + * xcrun -sdk macosx.internal/iphoneos.internal make proc_rlimit LDFLAGS="-ldarwintest" + */ +#include +#include +#include +#include +#include +#include + +/* Defined in but not visible to user space */ +#define RLIMIT_NLIMITS 9 + +/* Defined in and visible to user space */ +static const char *RESOURCE_STRING[] = { + "RLIMIT_CPU", /* #define RLIMIT_CPU 0 */ + "RLIMIT_FSIZE", /* #define RLIMIT_FSIZE 1 */ + "RLIMIT_DATA", /* #define RLIMIT_DATA 2 */ + "RLIMIT_STACK", /* #define RLIMIT_STACK 3 */ + "RLIMIT_CORE", /* #define RLIMIT_CORE 4 */ + "RLIMIT_AS/RSS", /* #define RLIMIT_AS 5 */ + /* #define RLIMIT_RSS RLIMIT_AS */ + "RLIMIT_MEMLOCK", /* #define RLIMIT_MEMLOCK 6 */ + "RLIMIT_NPROC", /* #define RLIMIT_NPROC 7 */ + "RLIMIT_NOFILE" /* #define RLIMIT_NOFILE 8 */ +}; + +/* Change limit values by this arbitrary amount */ +#define LIMIT_DIFF 64 + +/* Limit type */ +#define SOFT_LIMIT 0 +#define HARD_LIMIT 1 + +/* Action on changing limit values */ +#define LOWER 0 +#define RAISE 1 + +static struct rlimit orig_rlimit[RLIMIT_NLIMITS]; + +/* Maximum number of open files allowed by normal user */ +static rlim_t maxfilesperproc; +static size_t maxfilesperproc_size = sizeof(maxfilesperproc); + +/* Maximum number of open files allowed by super user */ +static rlim_t maxfiles; +static size_t maxfiles_size = sizeof(maxfiles); + +/* Maximum number of simultaneous processes allowed by normal user */ +static rlim_t maxprocperuid; +static size_t maxprocperuid_size = sizeof(maxprocperuid); + +/* Maximum number of simultaneous processes allowed by super user */ +static rlim_t maxproc; +static size_t maxproc_size = sizeof(maxproc); + +static bool superuser = FALSE; + +static int +get_initial_rlimits(void) +{ + int err = -1; + int i; + + for (i = 0; i < RLIMIT_NLIMITS; i++) { + err = getrlimit(i, &orig_rlimit[i]); + T_QUIET; T_EXPECT_EQ(0, err, "getrlimit(%15s, soft: 0x%16llx, hard 0x%16llx) %s", RESOURCE_STRING[i], orig_rlimit[i].rlim_cur, orig_rlimit[i].rlim_max, err == 0 ? "" : strerror(errno)); + } + return err; +} + +static void +print_rlimits(bool initial_limits) +{ + int err; + int i; + + for (i = 0; i < RLIMIT_NLIMITS; i++) { + struct rlimit lim; + + if (initial_limits) { + lim = orig_rlimit[i]; + } else { + err = getrlimit(i, &lim); + T_QUIET; T_EXPECT_EQ(0, err, "getrlimit(%15s, soft: 0x%16llx, hard 0x%16llx) %s", RESOURCE_STRING[i], lim.rlim_cur, lim.rlim_max, err == 0 ? "" : strerror(errno)); + } + T_LOG("%35s soft: 0x%16llx hard 0x%16llx", RESOURCE_STRING[i], lim.rlim_cur, lim.rlim_max); + } +} + +/* + * Change "limit_type" of all of the process's "rlimit" by amount + * + * limit_type: SOFT_LIMIT/HARD_LIMIT + * amount: rlim_t + * action: RAISE/LOWER + */ +static void +change_rlimits(int limit_type, rlim_t amount, int action) +{ + int err = -1; + int i; + + for (i = 0; i < RLIMIT_NLIMITS; i++) { + struct rlimit newlim; // for setrlimit + struct rlimit verifylim; // for getrlimit + bool expect_failure = FALSE; + int expect_errno = 0; + + /* Get the current limit values */ + err = getrlimit(i, &newlim); + T_EXPECT_EQ(0, err, "getrlimit(%15s, soft: 0x%16llx, hard 0x%16llx) %s", RESOURCE_STRING[i], newlim.rlim_cur, newlim.rlim_max, err == 0 ? "" : strerror(errno)); + + /* Changing soft limit */ + if (limit_type == SOFT_LIMIT) { + if (action == RAISE) { + /* Raising soft limits to exceed hard limits is not allowed and we expect to see failure on setrlimit call later */ + if (newlim.rlim_cur + amount > newlim.rlim_max) { + expect_failure = TRUE; + expect_errno = EINVAL; + } + newlim.rlim_cur += amount; + } else if (action == LOWER) { + if (newlim.rlim_cur == 0) { + /* Soft limit might be 0 already, if so skip lowering it */ + } else { + newlim.rlim_cur -= amount; + } + } else { + T_FAIL("Unknown action on soft limit: %d", action); + } + } + /* Changing hard limit */ + else if (limit_type == HARD_LIMIT) { + if (action == RAISE) { + newlim.rlim_max += amount; + + /* Raising hard limits is not allowed for normal user and we expect to see failure on setrlimit call later */ + expect_failure = TRUE; + expect_errno = EPERM; + } else if (action == LOWER) { + if (newlim.rlim_max == 0) { + /* Hard limit might be 0 already, if so skip lowering it (e.g., RLIMIT_CORE on iOS) */ + } else { + newlim.rlim_max -= amount; + } + /* Soft limit might need to be changed as well since soft cannot be greater than hard */ + if (newlim.rlim_cur > newlim.rlim_max) { + newlim.rlim_cur = newlim.rlim_max; + } + } else { + T_FAIL("Unknown action on hard limit: %d", action); + } + } + /* Changing unknown limit type */ + else { + T_FAIL("Unknown limit type: %d", limit_type); + } + + /* Request the kernel to change limit values */ + err = setrlimit(i, &newlim); + + if (expect_failure) { + /* We expect the setrlimit call to fail */ + T_EXPECT_EQ(-1, err, "setrlimit(%15s, soft: 0x%16llx, hard 0x%16llx) failed as expected: %s", RESOURCE_STRING[i], newlim.rlim_cur, newlim.rlim_max, strerror(errno)); + T_EXPECT_EQ(expect_errno, errno, "Expect errno %d, errno returned %d", expect_errno, errno); + continue; + } else { + T_EXPECT_EQ(0, err, "setrlimit(%15s, soft: 0x%16llx, hard 0x%16llx) %s", RESOURCE_STRING[i], newlim.rlim_cur, newlim.rlim_max, err == 0 ? "" : strerror(errno)); + } + + /* Verify the kernel correctly changed the limit values */ + err = getrlimit(i, &verifylim); + T_EXPECT_EQ(0, err, "getrlimit(%15s, soft: 0x%16llx, hard 0x%16llx) %s", RESOURCE_STRING[i], verifylim.rlim_cur, verifylim.rlim_max, err == 0 ? "" : strerror(errno)); + + /* The kernel forces the hard limit of RLIMIT_NOFILE to be at most maxfileperproc for normal user when changing the hard limit with setrlimit */ + if (i == RLIMIT_NOFILE && limit_type == HARD_LIMIT && newlim.rlim_max > maxfilesperproc) { + if (newlim.rlim_cur != verifylim.rlim_cur || + maxfilesperproc != verifylim.rlim_max) { + T_FAIL("Mismatch limit values %s despite a successful setrlimit call (setrlimit'd soft 0x%16llx hard 0x%16llx but getrlimit'd soft 0x%16llx hard 0x%16llx)", + RESOURCE_STRING[i], newlim.rlim_cur, newlim.rlim_max, verifylim.rlim_cur, verifylim.rlim_max); + } + } + /* The kernel forces the hard limit of RLIMIT_NPROC to be at most maxproc for normal user when changing either soft/hard limit with setrlimit */ + else if (i == RLIMIT_NPROC && newlim.rlim_max > maxprocperuid) { + if (newlim.rlim_cur != verifylim.rlim_cur || + maxprocperuid != verifylim.rlim_max) { + T_FAIL("Mismatch limit values %s despite a successful setrlimit call (setrlimit'd soft 0x%16llx hard 0x%16llx but getrlimit'd soft 0x%16llx hard 0x%16llx)", + RESOURCE_STRING[i], newlim.rlim_cur, newlim.rlim_max, verifylim.rlim_cur, verifylim.rlim_max); + } + } else { + if (newlim.rlim_cur != verifylim.rlim_cur || + newlim.rlim_max != verifylim.rlim_max) { + T_FAIL("Mismatch limit values %s despite a successful setrlimit call (setrlimit'd soft 0x%16llx hard 0x%16llx but getrlimit'd soft 0x%16llx hard 0x%16llx)", + RESOURCE_STRING[i], newlim.rlim_cur, newlim.rlim_max, verifylim.rlim_cur, verifylim.rlim_max); + } + } + } +} + +T_DECL(proc_rlimit, + "Test basic functionalities of the getrlimit and setrlimit") +{ + int err; + struct rlimit lim; + + T_SETUPBEGIN; + + if (geteuid() == 0) { + superuser = TRUE; + T_SKIP("This test should not be run as super user."); + } + + /* Use sysctl to query the real limits of RLIMIT_NOFILE/RLIMIT_NPROC for normal user on Apple's systems */ + err = sysctlbyname("kern.maxfilesperproc", &maxfilesperproc, &maxfilesperproc_size, NULL, 0); + T_EXPECT_EQ_INT(0, err, "maxfilesperproc: %llu", maxfilesperproc); + + err = sysctlbyname("kern.maxprocperuid", &maxprocperuid, &maxprocperuid_size, NULL, 0); + T_EXPECT_EQ_INT(0, err, "maxprocperuid: %llu", maxprocperuid); + + /* Use sysctl to query the real limits of RLIMIT_NOFILE/RLIMIT_NPROC for super user on Apple's systems (placeholder for adding super user tests) */ + err = sysctlbyname("kern.maxfiles", &maxfiles, &maxfiles_size, NULL, 0); + T_EXPECT_EQ_INT(0, err, "maxfiles: %llu", maxfiles); + + err = sysctlbyname("kern.maxproc", &maxproc, &maxproc_size, NULL, 0); + T_EXPECT_EQ_INT(0, err, "maxproc: %llu", maxproc); + + /* Issue getrlimit syscall to retrieve the initial resource limit values before calling setrlimit */ + err = get_initial_rlimits(); + T_EXPECT_EQ(0, err, "Obtained initial resource values."); + + /* Print out resource limit values to stdout for less-painful triage in case needed */ + T_LOG("Resource limits before the test:"); + print_rlimits(TRUE); + + T_SETUPEND; + + /* Lower soft limits by arbitrary amount */ + T_LOG("---------Lowering soft limits by 0x%x---------:\n", LIMIT_DIFF); + change_rlimits(SOFT_LIMIT, LIMIT_DIFF, LOWER); + + /* Raise soft limits back to the orginal values */ + T_LOG("---------Raising soft limits by 0x%x---------:\n", LIMIT_DIFF); + change_rlimits(SOFT_LIMIT, LIMIT_DIFF, RAISE); + + /* Lower hard limits */ + T_LOG("---------Lowering hard limits by 0x%x---------:", LIMIT_DIFF); + change_rlimits(HARD_LIMIT, LIMIT_DIFF, LOWER); + + /* Raise soft limits to exceed hard limits (setrlimit should fail, but the darwintest should pass) */ + T_LOG("---------Attempting to raised soft limits by 0x%x to exceed hard limits---------:", LIMIT_DIFF); + change_rlimits(SOFT_LIMIT, LIMIT_DIFF, RAISE); + + /* Raise hard limits (setrlimit should fail, but the darwintest should pass) */ + T_LOG("---------Attempting to raise hard limits by 0x%x---------:", LIMIT_DIFF); + change_rlimits(HARD_LIMIT, LIMIT_DIFF, RAISE); + + /* Get and set a non-existing resource limit */ + T_LOG("---------Accessing a non-existing resource---------:"); + err = getrlimit(RLIMIT_NLIMITS + 1, &lim); + T_EXPECT_EQ(-1, err, "Expect getrlimit to fail when accessing a non-existing resource: %s\n", strerror(errno)); + T_EXPECT_EQ(EINVAL, errno, "Expect errno %d, errno returned %d", EINVAL, errno); + + err = setrlimit(RLIMIT_NLIMITS + 1, &lim); + T_EXPECT_EQ(-1, err, "Expect setrlimit to fail when accessing a non-existing resource: %s\n", strerror(errno)); + T_EXPECT_EQ(EINVAL, errno, "Expect errno %d, errno returned %d", EINVAL, errno); + + T_LOG("Resource limits after the test:"); + print_rlimits(FALSE); +} diff --git a/tests/ptrauth-entitlements.plist b/tests/ptrauth-entitlements.plist new file mode 100644 index 000000000..03e0e3bda --- /dev/null +++ b/tests/ptrauth-entitlements.plist @@ -0,0 +1,8 @@ + + + + + com.apple.private.security.storage.AppBundles + + + diff --git a/tests/ptrauth_data_tests.c b/tests/ptrauth_data_tests.c new file mode 100644 index 000000000..bead1aa78 --- /dev/null +++ b/tests/ptrauth_data_tests.c @@ -0,0 +1,15 @@ +#include +#include + +T_GLOBAL_META(T_META_NAMESPACE("xnu.arm")); + +T_DECL(ptrauth_data_tests, "invoke the PAC unit tests", T_META_ASROOT(true)) +{ +#if __has_feature(ptrauth_calls) + int ret, dummy = 1; + ret = sysctlbyname("kern.run_ptrauth_data_tests", NULL, NULL, &dummy, sizeof(dummy)); + T_ASSERT_POSIX_SUCCESS(ret, "run ptrauth data tests"); +#else + T_SKIP("Running on non-ptrauth system. Skipping..."); +#endif //__has_feature(ptrauth_calls) +} diff --git a/tests/ptrauth_failure.c b/tests/ptrauth_failure.c new file mode 100644 index 000000000..035f73bcb --- /dev/null +++ b/tests/ptrauth_failure.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include "exc_helpers.h" + +T_GLOBAL_META( + T_META_NAMESPACE("xnu.arm"), + T_META_RUN_CONCURRENTLY(true) + ); + + +T_DECL(thread_set_state_corrupted_pc, + "Test that ptrauth failures in thread_set_state() poison the respective register.") +{ + T_SKIP("Running on non-arm64e target, skipping..."); +} + diff --git a/tests/pwrite_avoid_sigxfsz_28581610.c b/tests/pwrite_avoid_sigxfsz_28581610.c index 63fd25d74..c03b43978 100644 --- a/tests/pwrite_avoid_sigxfsz_28581610.c +++ b/tests/pwrite_avoid_sigxfsz_28581610.c @@ -64,10 +64,9 @@ T_DECL(pwrite, "Tests avoiding SIGXFSZ with pwrite and odd offsets", T_SETUPEND; /* we want to get the EFBIG errno but without a SIGXFSZ signal */ - T_EXPECTFAIL; if (!sigsetjmp(xfsz_jmpbuf, 1)) { signal(SIGXFSZ, xfsz_signal); - ret = pwrite(fd, buffer, sizeof buffer, LONG_MAX); + ret = pwrite(fd, buffer, sizeof buffer, QUAD_MAX); T_ASSERT_TRUE(((ret == -1) && (errno == EFBIG)), "large offset %d", 13); } else { @@ -78,11 +77,6 @@ T_DECL(pwrite, "Tests avoiding SIGXFSZ with pwrite and odd offsets", /* Negative offsets are invalid, no SIGXFSZ signals required */ for (x = 0; offs[x] != 0; x++) { - /* only -1 gives the correct result */ - if (-1 != offs[x]) { - T_EXPECTFAIL; - } - if (!sigsetjmp(xfsz_jmpbuf, 1)) { signal(SIGXFSZ, xfsz_signal); ret = pwrite(fd, buffer, sizeof buffer, offs[x]); diff --git a/tests/rename_excl.c b/tests/rename_excl.c new file mode 100644 index 000000000..2d3d32556 --- /dev/null +++ b/tests/rename_excl.c @@ -0,0 +1,121 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +T_GLOBAL_META( + T_META_NAMESPACE("xnu.vfs"), + T_META_CHECK_LEAKS(false) + ); + +#define TEST_DIR "rename_dir" +#define TEST_FILE1 TEST_DIR "/file1" +#define TEST_FILE1_UC TEST_DIR "/FILE1" +#define TEST_FILE2 TEST_DIR "/file2" +#define TEST_FILE3_HL TEST_DIR "/file3" + +static void +cleanup(void) +{ + (void) remove(TEST_FILE1); + (void) remove(TEST_FILE1_UC); + (void) remove(TEST_FILE2); + (void) remove(TEST_FILE3_HL); + (void) rmdir(TEST_DIR); +} + +/* + * This unit-test validates the behavior of renamex_np() with RENAME_EXCL flag. + * On either a case-insensitve/case-sensitive volume: + * 1. rename from source to existing target should succeed when the change is + * only case-variant (for e.g rename_dir/file1 -> rename_dir/FILE1) + * 2. rename from source to existing target should fail with EEXIST + * 3. rename from source to existing target which is a hardlink of the source + * should fail with EEXIST + * + * On case-insensitive volume: + * 1. rename from source to itself should succeed + * (rename_dir/file1 -> rename_dir/file1) + * + * On case-sensitive volume: + * 1. rename from source to itself should fail with EEXIST + * (rename_dir/file1 -> rename_dir/file1) + */ + +T_DECL(rename_excl_with_case_variant, + "test renamex_np() with RENAME_EXCL flag for files with case variants") +{ + const char *tmpdir = dt_tmpdir(); + long case_sensitive_vol; + int err, saved_errno; + int fd; + + T_SETUPBEGIN; + + atexit(cleanup); + + T_ASSERT_POSIX_ZERO(chdir(tmpdir), + "Setup: changing to tmpdir: %s", tmpdir); + + T_ASSERT_POSIX_SUCCESS(mkdir(TEST_DIR, 0777), + "Setup: creating test dir: %s", TEST_DIR); + + T_WITH_ERRNO; + fd = open(TEST_FILE1, O_CREAT | O_RDWR, 0666); + T_ASSERT_TRUE(fd != -1, "Creating test file1: %s", TEST_FILE1); + + T_ASSERT_POSIX_SUCCESS(close(fd), "Closing test file1: %s", + TEST_FILE1); + + T_WITH_ERRNO; + fd = open(TEST_FILE2, O_CREAT | O_RDWR, 0666); + T_ASSERT_TRUE(fd != -1, "Creating test file2: %s", TEST_FILE2); + + T_ASSERT_POSIX_SUCCESS(close(fd), "Closing test file2: %s", + TEST_FILE2); + + T_ASSERT_POSIX_SUCCESS(link(TEST_FILE1, TEST_FILE3_HL), + "Creating hardlink for %s from source: %s", + TEST_FILE3_HL, TEST_FILE1); + + case_sensitive_vol = pathconf(TEST_DIR, _PC_CASE_SENSITIVE); + T_ASSERT_TRUE(case_sensitive_vol != -1, + "Checking if target volume is case-sensitive, is_case_sensitive: %ld", + case_sensitive_vol); + + T_SETUPEND; + + err = renamex_np(TEST_FILE1, TEST_FILE2, RENAME_EXCL); + saved_errno = errno; + T_ASSERT_TRUE((err == -1 && saved_errno == EEXIST), + "Renaming with RENAME_EXCL from source: %s to target: %s", + TEST_FILE1, TEST_FILE2); + + err = renamex_np(TEST_FILE1, TEST_FILE3_HL, RENAME_EXCL); + saved_errno = errno; + T_ASSERT_TRUE((err == -1 && saved_errno == EEXIST), + "Renaming with RENAME_EXCL from source: %s to hardlink target: %s", + TEST_FILE1, TEST_FILE3_HL); + + if (case_sensitive_vol) { + err = renamex_np(TEST_FILE1, TEST_FILE1, RENAME_EXCL); + saved_errno = errno; + T_ASSERT_TRUE((err == -1 && saved_errno == EEXIST), + "Renaming with RENAME_EXCL from source: %s to target: %s", + TEST_FILE1, TEST_FILE1); + } else { + T_ASSERT_POSIX_SUCCESS(renamex_np(TEST_FILE1, TEST_FILE1, RENAME_EXCL), + "Renaming with RENAME_EXCL from source: %s to target: %s", + TEST_FILE1, TEST_FILE1); + } + + T_ASSERT_POSIX_SUCCESS(renamex_np(TEST_FILE1, TEST_FILE1_UC, RENAME_EXCL), + "Renaming with RENAME_EXCL from source: %s to target: %s", + TEST_FILE1, TEST_FILE1_UC); +} diff --git a/tests/safe_allocation.cpp b/tests/safe_allocation.cpp new file mode 100644 index 000000000..c0a07bf78 --- /dev/null +++ b/tests/safe_allocation.cpp @@ -0,0 +1,11 @@ +// +// Test runner for all `safe_allocation` tests. +// + +#include + +T_GLOBAL_META( + T_META_NAMESPACE("safe_allocation"), + T_META_CHECK_LEAKS(false), + T_META_RUN_CONCURRENTLY(true) + ); diff --git a/tests/safe_allocation_src/assign.copy.cpp b/tests/safe_allocation_src/assign.copy.cpp new file mode 100644 index 000000000..e9660d9f0 --- /dev/null +++ b/tests/safe_allocation_src/assign.copy.cpp @@ -0,0 +1,16 @@ +// +// Tests for +// safe_allocation& operator=(safe_allocation const&) = delete; +// + +#include +#include +#include +#include "test_utils.h" + +struct T { }; + +T_DECL(assign_copy, "safe_allocation.assign.copy") { + static_assert(!std::is_copy_assignable_v >); + T_PASS("safe_allocation.assign.copy passed"); +} diff --git a/tests/safe_allocation_src/assign.move.cpp b/tests/safe_allocation_src/assign.move.cpp new file mode 100644 index 000000000..c86b38b7e --- /dev/null +++ b/tests/safe_allocation_src/assign.move.cpp @@ -0,0 +1,143 @@ +// +// Tests for +// safe_allocation& operator=(safe_allocation&& other); +// + +#include +#include +#include "test_utils.h" +#include + +struct T { + int i; +}; + +template +static void +tests() +{ + // Move-assign non-null to non-null + { + { + tracked_safe_allocation from(10, libkern::allocate_memory); + T* memory = from.data(); + { + tracked_safe_allocation to(20, libkern::allocate_memory); + tracking_allocator::reset(); + + tracked_safe_allocation& ref = (to = std::move(from)); + CHECK(&ref == &to); + CHECK(to.data() == memory); + CHECK(to.size() == 10); + CHECK(from.data() == nullptr); + CHECK(from.size() == 0); + + CHECK(!tracking_allocator::did_allocate); + CHECK(tracking_allocator::deallocated_size == 20 * sizeof(T)); + tracking_allocator::reset(); + } + CHECK(tracking_allocator::deallocated_size == 10 * sizeof(T)); + tracking_allocator::reset(); + } + CHECK(!tracking_allocator::did_deallocate); + } + + // Move-assign null to non-null + { + { + tracked_safe_allocation from = nullptr; + { + tracked_safe_allocation to(20, libkern::allocate_memory); + tracking_allocator::reset(); + + tracked_safe_allocation& ref = (to = std::move(from)); + CHECK(&ref == &to); + CHECK(to.data() == nullptr); + CHECK(to.size() == 0); + CHECK(from.data() == nullptr); + CHECK(from.size() == 0); + + CHECK(!tracking_allocator::did_allocate); + CHECK(tracking_allocator::deallocated_size == 20 * sizeof(T)); + tracking_allocator::reset(); + } + CHECK(!tracking_allocator::did_deallocate); + tracking_allocator::reset(); + } + CHECK(!tracking_allocator::did_deallocate); + } + + // Move-assign non-null to null + { + { + tracked_safe_allocation from(10, libkern::allocate_memory); + T* memory = from.data(); + { + tracked_safe_allocation to = nullptr; + tracking_allocator::reset(); + + tracked_safe_allocation& ref = (to = std::move(from)); + CHECK(&ref == &to); + CHECK(to.data() == memory); + CHECK(to.size() == 10); + CHECK(from.data() == nullptr); + CHECK(from.size() == 0); + + CHECK(!tracking_allocator::did_allocate); + CHECK(!tracking_allocator::did_deallocate); + tracking_allocator::reset(); + } + CHECK(tracking_allocator::deallocated_size == 10 * sizeof(T)); + tracking_allocator::reset(); + } + CHECK(!tracking_allocator::did_deallocate); + } + + // Move-assign null to null + { + { + tracked_safe_allocation from = nullptr; + { + tracked_safe_allocation to = nullptr; + tracking_allocator::reset(); + + tracked_safe_allocation& ref = (to = std::move(from)); + CHECK(&ref == &to); + CHECK(to.data() == nullptr); + CHECK(to.size() == 0); + CHECK(from.data() == nullptr); + CHECK(from.size() == 0); + + CHECK(!tracking_allocator::did_allocate); + CHECK(!tracking_allocator::did_deallocate); + tracking_allocator::reset(); + } + CHECK(!tracking_allocator::did_deallocate); + tracking_allocator::reset(); + } + CHECK(!tracking_allocator::did_deallocate); + } + + // Move-assign to self + { + { + tracked_safe_allocation obj(10, libkern::allocate_memory); + T* memory = obj.data(); + + tracking_allocator::reset(); + tracked_safe_allocation& ref = (obj = std::move(obj)); + CHECK(&ref == &obj); + CHECK(obj.data() == memory); + CHECK(obj.size() == 10); + CHECK(!tracking_allocator::did_allocate); + CHECK(!tracking_allocator::did_deallocate); + tracking_allocator::reset(); + } + CHECK(tracking_allocator::deallocated_size == 10 * sizeof(T)); + } +} + +T_DECL(assign_move, "safe_allocation.assign.move") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/assign.nullptr.cpp b/tests/safe_allocation_src/assign.nullptr.cpp new file mode 100644 index 000000000..07a13a149 --- /dev/null +++ b/tests/safe_allocation_src/assign.nullptr.cpp @@ -0,0 +1,46 @@ +// +// Tests for +// safe_allocation& operator=(std::nullptr_t); +// + +#include +#include +#include "test_utils.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + // Assign to a non-null allocation + { + tracked_safe_allocation array(10, libkern::allocate_memory); + tracking_allocator::reset(); + + tracked_safe_allocation& ref = (array = nullptr); + CHECK(&ref == &array); + CHECK(array.data() == nullptr); + CHECK(array.size() == 0); + CHECK(tracking_allocator::did_deallocate); + } + + // Assign to a null allocation + { + tracked_safe_allocation array = nullptr; + tracking_allocator::reset(); + + tracked_safe_allocation& ref = (array = nullptr); + CHECK(&ref == &array); + CHECK(array.data() == nullptr); + CHECK(array.size() == 0); + CHECK(!tracking_allocator::did_deallocate); + } +} + +T_DECL(assign_nullptr, "safe_allocation.assign.nullptr") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/begin_end.cpp b/tests/safe_allocation_src/begin_end.cpp new file mode 100644 index 000000000..43bb8c153 --- /dev/null +++ b/tests/safe_allocation_src/begin_end.cpp @@ -0,0 +1,71 @@ +// +// Tests for +// iterator begin(); +// const_iterator begin() const; +// +// iterator end(); +// const_iterator end() const; +// + +#include +#include +#include "test_utils.h" +#include + +struct T { + int i; +}; + +template +static void +tests() +{ + using A = test_safe_allocation; + + // Check begin()/end() for a non-null allocation + { + A array(10, libkern::allocate_memory); + T* data = array.data(); + test_bounded_ptr begin = array.begin(); + test_bounded_ptr end = array.end(); + CHECK(begin.discard_bounds() == data); + CHECK(end.unsafe_discard_bounds() == data + 10); + } + { + A const array(10, libkern::allocate_memory); + T const* data = array.data(); + test_bounded_ptr begin = array.begin(); + test_bounded_ptr end = array.end(); + CHECK(begin.discard_bounds() == data); + CHECK(end.unsafe_discard_bounds() == data + 10); + } + + // Check begin()/end() for a null allocation + { + A array = nullptr; + test_bounded_ptr begin = array.begin(); + test_bounded_ptr end = array.end(); + CHECK(begin.unsafe_discard_bounds() == nullptr); + CHECK(end.unsafe_discard_bounds() == nullptr); + CHECK(begin == end); + } + { + A const array = nullptr; + test_bounded_ptr begin = array.begin(); + test_bounded_ptr end = array.end(); + CHECK(begin.unsafe_discard_bounds() == nullptr); + CHECK(end.unsafe_discard_bounds() == nullptr); + CHECK(begin == end); + } + + // Check associated types + { + static_assert(std::is_same_v >); + static_assert(std::is_same_v >); + } +} + +T_DECL(begin_end, "safe_allocation.begin_end") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/compare.equal.nullptr.cpp b/tests/safe_allocation_src/compare.equal.nullptr.cpp new file mode 100644 index 000000000..4a77169d5 --- /dev/null +++ b/tests/safe_allocation_src/compare.equal.nullptr.cpp @@ -0,0 +1,45 @@ +// +// Tests for +// template +// bool operator==(std::nullptr_t, safe_allocation const& x); +// +// template +// bool operator!=(std::nullptr_t, safe_allocation const& x); +// +// template +// bool operator==(safe_allocation const& x, std::nullptr_t); +// +// template +// bool operator!=(safe_allocation const& x, std::nullptr_t); +// + +#include +#include +#include "test_utils.h" + +struct T { }; + +template +static void +tests() +{ + { + test_safe_allocation const array(10, libkern::allocate_memory); + CHECK(!(array == nullptr)); + CHECK(!(nullptr == array)); + CHECK(array != nullptr); + CHECK(nullptr != array); + } + { + test_safe_allocation const array = nullptr; + CHECK(array == nullptr); + CHECK(nullptr == array); + CHECK(!(array != nullptr)); + CHECK(!(nullptr != array)); + } +} + +T_DECL(compare_equal_nullptr, "safe_allocation.compare.equal.nullptr") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/ctor.adopt.cpp b/tests/safe_allocation_src/ctor.adopt.cpp new file mode 100644 index 000000000..0a220b200 --- /dev/null +++ b/tests/safe_allocation_src/ctor.adopt.cpp @@ -0,0 +1,36 @@ +// +// Tests for +// explicit safe_allocation(T* data, size_t n, adopt_memory_t); +// + +#include +#include +#include "test_utils.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + { + T* memory = reinterpret_cast(tracking_allocator::allocate(10 * sizeof(T))); + tracking_allocator::reset(); + { + tracked_safe_allocation array(memory, 10, libkern::adopt_memory); + CHECK(!tracking_allocator::did_allocate); + CHECK(array.data() == memory); + CHECK(array.size() == 10); + CHECK(array.begin() == array.data()); + CHECK(array.end() == array.data() + 10); + } + CHECK(tracking_allocator::deallocated_size == 10 * sizeof(T)); + } +} + +T_DECL(ctor_adopt, "safe_allocation.ctor.adopt") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/ctor.allocate.cpp b/tests/safe_allocation_src/ctor.allocate.cpp new file mode 100644 index 000000000..a90f7abae --- /dev/null +++ b/tests/safe_allocation_src/ctor.allocate.cpp @@ -0,0 +1,67 @@ +// +// Tests for +// explicit safe_allocation(size_t n, allocate_memory_t); +// + +#include +#include +#include "test_utils.h" +#include +#include + +struct T { + int i; +}; + +struct TrackInit { + bool initialized; + TrackInit() : initialized(true) + { + } +}; + +template +static void +tests() +{ + { + tracking_allocator::reset(); + { + tracked_safe_allocation array(10, libkern::allocate_memory); + CHECK(tracking_allocator::allocated_size == 10 * sizeof(T)); + CHECK(array.data() != nullptr); + CHECK(array.size() == 10); + CHECK(array.begin() == array.data()); + CHECK(array.end() == array.data() + 10); + } + CHECK(tracking_allocator::deallocated_size == 10 * sizeof(T)); + } + + // Check with a huge number of elements that will overflow size_t + { + std::size_t max_n = std::numeric_limits::max() / sizeof(T); + tracking_allocator::reset(); + + { + tracked_safe_allocation array(max_n + 1, libkern::allocate_memory); + CHECK(array.data() == nullptr); + CHECK(array.size() == 0); + CHECK(array.begin() == array.end()); + CHECK(!tracking_allocator::did_allocate); + } + CHECK(!tracking_allocator::did_deallocate); + } +} + +T_DECL(ctor_allocate, "safe_allocation.ctor.allocate") { + tests(); + tests(); + + // Make sure we value-initialize elements + { + tracked_safe_allocation array(10, libkern::allocate_memory); + for (int i = 0; i != 10; ++i) { + CHECK(array[i].initialized == true); + } + } +} diff --git a/tests/safe_allocation_src/ctor.copy.cpp b/tests/safe_allocation_src/ctor.copy.cpp new file mode 100644 index 000000000..1e2936c30 --- /dev/null +++ b/tests/safe_allocation_src/ctor.copy.cpp @@ -0,0 +1,18 @@ +// +// Tests for +// safe_allocation(safe_allocation const&) = delete; +// + +#include +#include +#include +#include "test_utils.h" + +struct T { + int i; +}; + +T_DECL(ctor_copy, "safe_allocation.ctor.copy") { + static_assert(!std::is_copy_constructible_v >); + T_PASS("safe_allocation.ctor.copy passed"); +} diff --git a/tests/safe_allocation_src/ctor.default.cpp b/tests/safe_allocation_src/ctor.default.cpp new file mode 100644 index 000000000..54e5e0f67 --- /dev/null +++ b/tests/safe_allocation_src/ctor.default.cpp @@ -0,0 +1,41 @@ +// +// Tests for +// explicit safe_allocation(); +// + +#include +#include +#include "test_utils.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + { + test_safe_allocation array; + CHECK(array.data() == nullptr); + CHECK(array.size() == 0); + CHECK(array.begin() == array.end()); + } + { + test_safe_allocation array{}; + CHECK(array.data() == nullptr); + CHECK(array.size() == 0); + CHECK(array.begin() == array.end()); + } + { + test_safe_allocation array = test_safe_allocation(); + CHECK(array.data() == nullptr); + CHECK(array.size() == 0); + CHECK(array.begin() == array.end()); + } +} + +T_DECL(ctor_default, "safe_allocation.ctor.default") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/ctor.move.cpp b/tests/safe_allocation_src/ctor.move.cpp new file mode 100644 index 000000000..7f135f990 --- /dev/null +++ b/tests/safe_allocation_src/ctor.move.cpp @@ -0,0 +1,105 @@ +// +// Tests for +// safe_allocation(safe_allocation&& other); +// + +#include +#include +#include "test_utils.h" +#include + +struct T { + int i; +}; + +template +static void +tests() +{ + // Move-construct from a non-null allocation (with different syntaxes) + { + { + tracked_safe_allocation from(10, libkern::allocate_memory); + tracking_allocator::reset(); + + T* memory = from.data(); + + { + tracked_safe_allocation to(std::move(from)); + CHECK(!tracking_allocator::did_allocate); + CHECK(to.data() == memory); + CHECK(to.size() == 10); + CHECK(from.data() == nullptr); + CHECK(from.size() == 0); + } + CHECK(tracking_allocator::did_deallocate); + tracking_allocator::reset(); + } + CHECK(!tracking_allocator::did_deallocate); + } + { + { + tracked_safe_allocation from(10, libkern::allocate_memory); + tracking_allocator::reset(); + + T* memory = from.data(); + + { + tracked_safe_allocation to{std::move(from)}; + CHECK(!tracking_allocator::did_allocate); + CHECK(to.data() == memory); + CHECK(to.size() == 10); + CHECK(from.data() == nullptr); + CHECK(from.size() == 0); + } + CHECK(tracking_allocator::did_deallocate); + tracking_allocator::reset(); + } + CHECK(!tracking_allocator::did_deallocate); + } + { + { + tracked_safe_allocation from(10, libkern::allocate_memory); + tracking_allocator::reset(); + + T* memory = from.data(); + + { + tracked_safe_allocation to = std::move(from); + CHECK(!tracking_allocator::did_allocate); + CHECK(to.data() == memory); + CHECK(to.size() == 10); + CHECK(from.data() == nullptr); + CHECK(from.size() == 0); + } + CHECK(tracking_allocator::did_deallocate); + tracking_allocator::reset(); + } + CHECK(!tracking_allocator::did_deallocate); + } + + // Move-construct from a null allocation + { + { + tracked_safe_allocation from = nullptr; + tracking_allocator::reset(); + + { + tracked_safe_allocation to(std::move(from)); + CHECK(!tracking_allocator::did_allocate); + CHECK(to.data() == nullptr); + CHECK(to.size() == 0); + CHECK(from.data() == nullptr); + CHECK(from.size() == 0); + } + CHECK(!tracking_allocator::did_deallocate); + tracking_allocator::reset(); + } + CHECK(!tracking_allocator::did_deallocate); + } +} + +T_DECL(ctor_move, "safe_allocation.ctor.move") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/ctor.nullptr.cpp b/tests/safe_allocation_src/ctor.nullptr.cpp new file mode 100644 index 000000000..863325f06 --- /dev/null +++ b/tests/safe_allocation_src/ctor.nullptr.cpp @@ -0,0 +1,49 @@ +// +// Tests for +// safe_allocation(std::nullptr_t); +// + +#include +#include +#include "test_utils.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + { + test_safe_allocation array(nullptr); + CHECK(array.data() == nullptr); + CHECK(array.size() == 0); + CHECK(array.begin() == array.end()); + } + { + test_safe_allocation array{nullptr}; + CHECK(array.data() == nullptr); + CHECK(array.size() == 0); + CHECK(array.begin() == array.end()); + } + { + test_safe_allocation array = nullptr; + CHECK(array.data() == nullptr); + CHECK(array.size() == 0); + CHECK(array.begin() == array.end()); + } + { + auto f = [](test_safe_allocation array) { + CHECK(array.data() == nullptr); + CHECK(array.size() == 0); + CHECK(array.begin() == array.end()); + }; + f(nullptr); + } +} + +T_DECL(ctor_nullptr, "safe_allocation.ctor.nullptr") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/data.cpp b/tests/safe_allocation_src/data.cpp new file mode 100644 index 000000000..9fb2548af --- /dev/null +++ b/tests/safe_allocation_src/data.cpp @@ -0,0 +1,40 @@ +// +// Tests for +// T* data(); +// T const* data() const; +// + +#include +#include +#include "test_utils.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + { + test_safe_allocation array(10, libkern::allocate_memory); + CHECK(array.data() != nullptr); + } + { + T* memory = reinterpret_cast(malloc_allocator::allocate(10 * sizeof(T))); + test_safe_allocation array(memory, 10, libkern::adopt_memory); + T* data = array.data(); + CHECK(data == memory); + } + { + T* memory = reinterpret_cast(malloc_allocator::allocate(10 * sizeof(T))); + test_safe_allocation const array(memory, 10, libkern::adopt_memory); + T const* data = array.data(); + CHECK(data == memory); + } +} + +T_DECL(data, "safe_allocation.data") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/dtor.cpp b/tests/safe_allocation_src/dtor.cpp new file mode 100644 index 000000000..8bd575c17 --- /dev/null +++ b/tests/safe_allocation_src/dtor.cpp @@ -0,0 +1,50 @@ +// +// Tests for +// ~safe_allocation(); +// + +#include +#include +#include "test_utils.h" + +struct TriviallyDestructible { + int i; +}; + +struct NonTriviallyDestructible { + int i; + ~NonTriviallyDestructible() + { + } +}; + +template +static void +tests() +{ + // Destroy a non-null allocation + { + { + tracked_safe_allocation array(10, libkern::allocate_memory); + tracking_allocator::reset(); + } + CHECK(tracking_allocator::deallocated_size == 10 * sizeof(T)); + } + + // Destroy a null allocation + { + { + tracked_safe_allocation array = nullptr; + tracking_allocator::reset(); + } + CHECK(!tracking_allocator::did_deallocate); + } +} + +T_DECL(dtor, "safe_allocation.dtor") { + tests(); + tests(); + + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/operator.bool.cpp b/tests/safe_allocation_src/operator.bool.cpp new file mode 100644 index 000000000..af468d400 --- /dev/null +++ b/tests/safe_allocation_src/operator.bool.cpp @@ -0,0 +1,42 @@ +// +// Tests for +// explicit operator bool() const; +// + +#include +#include +#include "test_utils.h" +#include + +struct T { + int i; +}; + +template +static void +tests() +{ + { + test_safe_allocation const array(10, libkern::allocate_memory); + CHECK(static_cast(array)); + if (array) { + } else { + CHECK(FALSE); + } + } + { + test_safe_allocation const array = nullptr; + CHECK(!static_cast(array)); + if (!array) { + } else { + CHECK(FALSE); + } + } + + static_assert(!std::is_convertible_v, bool>); +} + +T_DECL(operator_bool, "safe_allocation.operator.bool") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/operator.subscript.cpp b/tests/safe_allocation_src/operator.subscript.cpp new file mode 100644 index 000000000..1eb36fb54 --- /dev/null +++ b/tests/safe_allocation_src/operator.subscript.cpp @@ -0,0 +1,84 @@ +// +// Tests for +// T& operator[](std::ptrdiff_t n); +// T const& operator[](std::ptrdiff_t n) const; +// + +#include +#include +#include "test_utils.h" +#include +#include + +struct T { + long i; +}; + +template +static void +tests() +{ + // Test the non-const version + { + RawT* memory = reinterpret_cast(malloc_allocator::allocate(10 * sizeof(RawT))); + for (RawT* ptr = memory; ptr != memory + 10; ++ptr) { + *ptr = RawT{ptr - memory}; // number from 0 to 9 + } + + test_safe_allocation array(memory, 10, libkern::adopt_memory); + for (std::ptrdiff_t n = 0; n != 10; ++n) { + QualT& element = array[n]; + CHECK(&element == memory + n); + } + } + + // Test the const version + { + RawT* memory = reinterpret_cast(malloc_allocator::allocate(10 * sizeof(RawT))); + for (RawT* ptr = memory; ptr != memory + 10; ++ptr) { + *ptr = RawT{ptr - memory}; // number from 0 to 9 + } + + test_safe_allocation const array(memory, 10, libkern::adopt_memory); + for (std::ptrdiff_t n = 0; n != 10; ++n) { + QualT const& element = array[n]; + CHECK(&element == memory + n); + } + } + + // Test with OOB offsets (should trap) + { + using Alloc = libkern::safe_allocation; + RawT* memory = reinterpret_cast(malloc_allocator::allocate(10 * sizeof(RawT))); + Alloc const array(memory, 10, libkern::adopt_memory); + + // Negative offsets + { + tracking_trapping_policy::reset(); + (void)array[-1]; + CHECK(tracking_trapping_policy::did_trap); + } + { + tracking_trapping_policy::reset(); + (void)array[-10]; + CHECK(tracking_trapping_policy::did_trap); + } + + // Too large offsets + { + tracking_trapping_policy::reset(); + (void)array[10]; + CHECK(tracking_trapping_policy::did_trap); + } + { + tracking_trapping_policy::reset(); + (void)array[11]; + CHECK(tracking_trapping_policy::did_trap); + } + } +} + +T_DECL(operator_subscript, "safe_allocation.operator.subscript") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/size.cpp b/tests/safe_allocation_src/size.cpp new file mode 100644 index 000000000..24d731d47 --- /dev/null +++ b/tests/safe_allocation_src/size.cpp @@ -0,0 +1,48 @@ +// +// Tests for +// size_t size() const; +// + +#include +#include +#include "test_utils.h" +#include +#include +#include + +struct T { + int i; +}; + +template +static void +tests() +{ + { + test_safe_allocation const array(10, libkern::allocate_memory); + CHECK(array.size() == 10); + } + { + T* memory = reinterpret_cast(malloc_allocator::allocate(10 * sizeof(T))); + test_safe_allocation const array(memory, 10, libkern::adopt_memory); + CHECK(array.size() == 10); + } + { + test_safe_allocation const array(nullptr, 0, libkern::adopt_memory); + CHECK(array.size() == 0); + } + { + test_safe_allocation const array; + CHECK(array.size() == 0); + } + + { + using Size = decltype(std::declval const&>().size()); + static_assert(std::is_same_v); + } +} + +T_DECL(size, "safe_allocation.size") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/swap.cpp b/tests/safe_allocation_src/swap.cpp new file mode 100644 index 000000000..d3da0a714 --- /dev/null +++ b/tests/safe_allocation_src/swap.cpp @@ -0,0 +1,104 @@ +// +// Tests for +// void swap(safe_allocation& a, safe_allocation& b); +// + +#include +#include +#include "test_utils.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + // Swap non-null with non-null + { + tracked_safe_allocation a(10, libkern::allocate_memory); + tracked_safe_allocation b(20, libkern::allocate_memory); + T* a_mem = a.data(); + T* b_mem = b.data(); + tracking_allocator::reset(); + + swap(a, b); // ADL call + + CHECK(!tracking_allocator::did_allocate); + CHECK(!tracking_allocator::did_deallocate); + CHECK(a.data() == b_mem); + CHECK(b.data() == a_mem); + CHECK(a.size() == 20); + CHECK(b.size() == 10); + } + + // Swap non-null with null + { + tracked_safe_allocation a(10, libkern::allocate_memory); + tracked_safe_allocation b = nullptr; + T* a_mem = a.data(); + tracking_allocator::reset(); + + swap(a, b); // ADL call + + CHECK(!tracking_allocator::did_allocate); + CHECK(!tracking_allocator::did_deallocate); + CHECK(a.data() == nullptr); + CHECK(b.data() == a_mem); + CHECK(a.size() == 0); + CHECK(b.size() == 10); + } + + // Swap null with non-null + { + tracked_safe_allocation a = nullptr; + tracked_safe_allocation b(20, libkern::allocate_memory); + T* b_mem = b.data(); + tracking_allocator::reset(); + + swap(a, b); // ADL call + + CHECK(!tracking_allocator::did_allocate); + CHECK(!tracking_allocator::did_deallocate); + CHECK(a.data() == b_mem); + CHECK(b.data() == nullptr); + CHECK(a.size() == 20); + CHECK(b.size() == 0); + } + + // Swap null with null + { + tracked_safe_allocation a = nullptr; + tracked_safe_allocation b = nullptr; + tracking_allocator::reset(); + + swap(a, b); // ADL call + + CHECK(!tracking_allocator::did_allocate); + CHECK(!tracking_allocator::did_deallocate); + CHECK(a.data() == nullptr); + CHECK(b.data() == nullptr); + CHECK(a.size() == 0); + CHECK(b.size() == 0); + } + + // Swap with self + { + tracked_safe_allocation a(10, libkern::allocate_memory); + T* a_mem = a.data(); + tracking_allocator::reset(); + + swap(a, a); // ADL call + + CHECK(!tracking_allocator::did_allocate); + CHECK(!tracking_allocator::did_deallocate); + CHECK(a.data() == a_mem); + CHECK(a.size() == 10); + } +} + +T_DECL(swap, "safe_allocation.swap") { + tests(); + tests(); +} diff --git a/tests/safe_allocation_src/test_utils.h b/tests/safe_allocation_src/test_utils.h new file mode 100644 index 000000000..1573b465e --- /dev/null +++ b/tests/safe_allocation_src/test_utils.h @@ -0,0 +1,97 @@ +#ifndef TESTS_SAFE_ALLOCATION_TEST_UTILS_H +#define TESTS_SAFE_ALLOCATION_TEST_UTILS_H + +#include +#include +#include +#include +#include +#include + +namespace { +struct assert_trapping_policy { + static void + trap(char const*) + { + assert(false); + } +}; + +struct malloc_allocator { + static void* + allocate(size_t n) + { + return std::malloc(n); + } + + static void + deallocate(void* p, size_t n) + { + std::free(p); + } +}; + +struct tracking_allocator { + static void + reset() + { + allocated_size = 0; + deallocated_size = 0; + did_allocate = false; + did_deallocate = false; + } + static std::size_t allocated_size; + static std::size_t deallocated_size; + static bool did_allocate; + static bool did_deallocate; + + static void* + allocate(std::size_t n) + { + did_allocate = true; + allocated_size = n; + return std::malloc(n); + } + + static void + deallocate(void* p, std::size_t n) + { + did_deallocate = true; + deallocated_size = n; + std::free(p); + } +}; + +std::size_t tracking_allocator::allocated_size = 0; +std::size_t tracking_allocator::deallocated_size = 0; +bool tracking_allocator::did_allocate = false; +bool tracking_allocator::did_deallocate = false; + +struct tracking_trapping_policy { + static void + reset() + { + did_trap = false; + } + static bool did_trap; + static void + trap(char const*) + { + did_trap = true; + } +}; +bool tracking_trapping_policy::did_trap = false; + +template +using test_safe_allocation = libkern::safe_allocation; + +template +using tracked_safe_allocation = libkern::safe_allocation; + +template +using test_bounded_ptr = libkern::bounded_ptr; +} // end anonymous namespace + +#define CHECK(...) T_ASSERT_TRUE((__VA_ARGS__), # __VA_ARGS__) + +#endif // !TESTS_SAFE_ALLOCATION_TEST_UTILS_H diff --git a/tests/safe_allocation_src/usage.for_loop.cpp b/tests/safe_allocation_src/usage.for_loop.cpp new file mode 100644 index 000000000..d0daad9f7 --- /dev/null +++ b/tests/safe_allocation_src/usage.for_loop.cpp @@ -0,0 +1,29 @@ +// +// Make sure `safe_allocation` works nicely with the range-based for-loop. +// + +#include +#include +#include "test_utils.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + test_safe_allocation array(10, libkern::allocate_memory); + for (T& element : array) { + element = T{3}; + } + + for (T const& element : array) { + CHECK(element.i == 3); + } +} + +T_DECL(usage_for_loop, "safe_allocation.usage.for_loop") { + tests(); +} diff --git a/tests/safe_allocation_src/usage.two_dimensions.cpp b/tests/safe_allocation_src/usage.two_dimensions.cpp new file mode 100644 index 000000000..2a154067c --- /dev/null +++ b/tests/safe_allocation_src/usage.two_dimensions.cpp @@ -0,0 +1,39 @@ +// +// Make sure `safe_allocation` can be used to create a two-dimensional array. +// +// Note that we don't really recommend using that representation for two +// dimensional arrays because other representations are better, but it +// should at least work. +// + +#include +#include +#include "test_utils.h" + +struct T { + int i; +}; + +template +static void +tests() +{ + test_safe_allocation > array(10, libkern::allocate_memory); + + for (int i = 0; i < 10; i++) { + array[i] = test_safe_allocation(10, libkern::allocate_memory); + for (int j = 0; j < 10; ++j) { + array[i][j] = i + j; + } + } + + for (int i = 0; i < 10; i++) { + for (int j = 0; j < 10; ++j) { + CHECK(array[i][j] == i + j); + } + } +} + +T_DECL(usage_two_dimensions, "safe_allocation.usage.two_dimensions") { + tests(); +} diff --git a/tests/sbuf_tests.c b/tests/sbuf_tests.c new file mode 100644 index 000000000..b1f9d1a20 --- /dev/null +++ b/tests/sbuf_tests.c @@ -0,0 +1,11 @@ +#include +#include + +T_DECL(sbuf_tests, "invoke the sbuf unit tests") +{ + char buf[5] = { 'A', 'B', 'C', 'D', 0 }; + int ret; + + ret = sysctlbyname("kern.sbuf_test", NULL, NULL, buf, sizeof(buf) - 1); + T_ASSERT_POSIX_SUCCESS(ret, "kernel sbuf tests failed"); +} diff --git a/tests/scanf.c b/tests/scanf.c new file mode 100644 index 000000000..05bf89409 --- /dev/null +++ b/tests/scanf.c @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2020 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include + +// This can either test libkern's sscanf, or stdio.h's. +#define TEST_LIBKERN + +#if defined(TEST_LIBKERN) +static int libkern_isspace(char c); +int libkern_sscanf(const char *ibuf, const char *fmt, ...); +int libkern_vsscanf(const char *inp, char const *fmt0, va_list ap); +# define isspace(C) libkern_isspace(C) +# define sscanf(...) libkern_sscanf(__VA_ARGS__) +# define vsscanf(...) libkern_vsscanf(__VA_ARGS__) +# include "../libkern/stdio/scanf.c" +#else +# include +#endif + +T_DECL(scanf_empty, "empty") +{ + T_ASSERT_EQ_INT(sscanf("", ""), 0, "empty input and format"); + T_ASSERT_EQ_INT(sscanf("", "match me"), EOF, "empty input"); + T_ASSERT_EQ_INT(sscanf("lonely", ""), 0, "empty format"); +} + +T_DECL(scanf_percent, "percent") +{ + T_ASSERT_EQ_INT(sscanf("%", "%%"), 0, "two percent"); +} + +T_DECL(scanf_character, "character") +{ + char c; + for (char i = ' '; i <= '~'; ++i) { + char buf[] = { i, '\0' }; + T_ASSERT_EQ_INT(sscanf(buf, "%c", &c), 1, "character matched"); + T_ASSERT_EQ_INT(c, i, "character value"); + } +} + +T_DECL(scanf_characters, "characters") +{ + char c[] = { 'a', 'b', 'c', 'd', 'e' }; + T_ASSERT_EQ_INT(sscanf("01234", "%4c", c), 1, "characters matched"); + T_ASSERT_EQ_INT(c[0], '0', "characters value"); + T_ASSERT_EQ_INT(c[1], '1', "characters value"); + T_ASSERT_EQ_INT(c[2], '2', "characters value"); + T_ASSERT_EQ_INT(c[3], '3', "characters value"); + T_ASSERT_EQ_INT(c[4], 'e', "characters value wasn't overwritten"); +} + +T_DECL(scanf_string, "string") +{ + char s[] = { 'a', 'b', 'c', 'd', 'e' }; + T_ASSERT_EQ_INT(sscanf("012", "%s", s), 1, "string matched"); + T_ASSERT_EQ_STR(s, "012", "string value"); + T_ASSERT_EQ_INT(s[4], 'e', "string value wasn't overwritten"); + T_ASSERT_EQ_INT(sscanf("ABCDE", "%3s", s), 1, "string matched"); + T_ASSERT_EQ_STR(s, "ABC", "string value"); + T_ASSERT_EQ_INT(s[4], 'e', "string value wasn't overwritten"); +} + +T_DECL(scanf_decimal, "decimal") +{ + int num; + for (char i = 0; i <= 9; ++i) { + char buf[] = { i + '0', '\0' }; + T_ASSERT_EQ_INT(sscanf(buf, "%d", &num), 1, "decimal matched"); + T_ASSERT_EQ_INT(num, i, "decimal value"); + } + for (char i = 10; i <= 99; ++i) { + char buf[] = { i / 10 + '0', i % 10 + '0', '\0' }; + T_ASSERT_EQ_INT(sscanf(buf, "%d", &num), 1, "decimal matched"); + T_ASSERT_EQ_INT(num, i, "decimal value"); + } + for (char i = 0; i <= 9; ++i) { + char buf[] = { '-', i + '0', '\0' }; + T_ASSERT_EQ_INT(sscanf(buf, "%d", &num), 1, "negative decimal matched"); + T_ASSERT_EQ_INT(num, -i, "negative decimal value"); + } + T_ASSERT_EQ_INT(sscanf("-2147483648", "%d", &num), 1, "INT32_MIN matched"); + T_ASSERT_EQ_INT(num, INT32_MIN, "INT32_MIN value"); + T_ASSERT_EQ_INT(sscanf("2147483647", "%d", &num), 1, "INT32_MAX matched"); + T_ASSERT_EQ_INT(num, INT32_MAX, "INT32_MAX value"); +} + +T_DECL(scanf_integer, "integer") +{ + int num; + T_ASSERT_EQ_INT(sscanf("0", "%i", &num), 1, "octal integer matched"); + T_ASSERT_EQ_INT(num, 0, "octal integer value"); + for (char i = 0; i <= 7; ++i) { + char buf[] = { '0', i + '0', '\0' }; + T_ASSERT_EQ_INT(sscanf(buf, "%i", &num), 1, "octal integer matched"); + T_ASSERT_EQ_INT(num, i, "octal integer value"); + } + for (char i = 0; i <= 9; ++i) { + char buf[] = { '0', 'x', i + '0', '\0' }; + T_ASSERT_EQ_INT(sscanf(buf, "%i", &num), 1, "hex integer matched"); + T_ASSERT_EQ_INT(num, i, "hex integer value"); + } + for (char i = 10; i <= 15; ++i) { + char buf[] = { '0', 'x', i - 10 + 'a', '\0' }; + T_ASSERT_EQ_INT(sscanf(buf, "%i", &num), 1, "hex integer matched"); + T_ASSERT_EQ_INT(num, i, "hex integer value"); + } +} + +T_DECL(scanf_unsigned, "unsigned") +{ + unsigned num; + T_ASSERT_EQ_INT(sscanf("4294967295", "%u", &num), 1, "UINT32_MAX matched"); + T_ASSERT_EQ_UINT(num, UINT32_MAX, "UINT32_MAX value"); +} + +T_DECL(scanf_octal, "octal") +{ + int num; + T_ASSERT_EQ_INT(sscanf("0", "%o", &num), 1, "octal matched"); + T_ASSERT_EQ_INT(num, 0, "octal value"); + for (char i = 0; i <= 7; ++i) { + char buf[] = { '0', i + '0', '\0' }; + T_ASSERT_EQ_INT(sscanf(buf, "%o", &num), 1, "octal matched"); + T_ASSERT_EQ_INT(num, i, "octal value"); + } +} + +T_DECL(scanf_hex, "hex") +{ + int num; + for (char i = 0; i <= 9; ++i) { + char buf[] = { '0', 'x', i + '0', '\0' }; + T_ASSERT_EQ_INT(sscanf(buf, "%x", &num), 1, "hex matched"); + T_ASSERT_EQ_INT(num, i, "hex value"); + } + for (char i = 10; i <= 15; ++i) { + char buf[] = { '0', 'x', i - 10 + 'a', '\0' }; + T_ASSERT_EQ_INT(sscanf(buf, "%x", &num), 1, "hex matched"); + T_ASSERT_EQ_INT(num, i, "hex value"); + } +} + +T_DECL(scanf_read, "read") +{ + int val, num; + T_ASSERT_EQ_INT(sscanf("", "%n", &num), 0, "read matched"); + T_ASSERT_EQ_INT(num, 0, "read count"); + T_ASSERT_EQ_INT(sscanf("a", "a%n", &num), 0, "read matched"); + T_ASSERT_EQ_INT(num, 1, "read count"); + T_ASSERT_EQ_INT(sscanf("ab", "a%nb", &num), 0, "read matched"); + T_ASSERT_EQ_INT(num, 1, "read count"); + T_ASSERT_EQ_INT(sscanf("1234567", "%i%n", &val, &num), 1, "read matched"); + T_ASSERT_EQ_INT(val, 1234567, "read value"); + T_ASSERT_EQ_INT(num, 7, "read count"); +} + +T_DECL(scanf_pointer, "pointer") +{ + void *ptr; + if (sizeof(void*) == 4) { + T_ASSERT_EQ_INT(sscanf("0xdeadbeef", "%p", &ptr), 1, "pointer matched"); + T_ASSERT_EQ_PTR(ptr, (void*)0xdeadbeef, "pointer value"); + } else { + T_ASSERT_EQ_INT(sscanf("0xdeadbeefc0defefe", "%p", &ptr), 1, "pointer matched"); + T_ASSERT_EQ_PTR(ptr, (void*)0xdeadbeefc0defefe, "pointer value"); + } +} diff --git a/tests/sched_cluster_bound_threads.c b/tests/sched_cluster_bound_threads.c new file mode 100644 index 000000000..b06b306dd --- /dev/null +++ b/tests/sched_cluster_bound_threads.c @@ -0,0 +1,140 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static mach_timebase_info_data_t timebase_info; + +static uint64_t +nanos_to_abs(uint64_t nanos) +{ + return nanos * timebase_info.denom / timebase_info.numer; +} +static uint64_t +abs_to_nanos(uint64_t abs) +{ + return abs * timebase_info.numer / timebase_info.denom; +} + + +/* Spin until a specified number of seconds elapses */ +static void +spin_for_duration(uint32_t seconds) +{ + uint64_t duration = nanos_to_abs((uint64_t)seconds * NSEC_PER_SEC); + uint64_t current_time = mach_absolute_time(); + uint64_t timeout = duration + current_time; + + uint64_t spin_count = 0; + + while (mach_absolute_time() < timeout) { + spin_count++; + } +} + +static void * +spin_thread(__unused void *arg) +{ + spin_for_duration(8); + return NULL; +} + +void +bind_to_cluster(char type) +{ + char old_type; + size_t type_size = sizeof(type); + T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.sched_thread_bind_cluster_type", + &old_type, &type_size, &type, sizeof(type)), + "bind current thread to cluster %c", type); +} + +static void * +spin_bound_thread(void *arg) +{ + char type = (char)arg; + bind_to_cluster(type); + spin_for_duration(10); + return NULL; +} + +static unsigned int +get_ncpu(void) +{ + int ncpu; + size_t sysctl_size = sizeof(ncpu); + int ret = sysctlbyname("hw.ncpu", &ncpu, &sysctl_size, NULL, 0); + assert(ret == 0); + return (unsigned int) ncpu; +} + +#define SPINNER_THREAD_LOAD_FACTOR (4) + +T_DECL(test_cluster_bound_thread_timeshare, "Make sure the low priority bound threads get CPU in the presence of non-bound CPU spinners", + T_META_BOOTARGS_SET("enable_skstb=1"), T_META_ASROOT(true)) +{ +#if TARGET_CPU_ARM64 && TARGET_OS_OSX + pthread_setname_np("main thread"); + + kern_return_t kr; + + kr = mach_timebase_info(&timebase_info); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_timebase_info"); + + int rv; + pthread_attr_t attr; + + rv = pthread_attr_init(&attr); + T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_init"); + + rv = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_setdetachstate"); + + rv = pthread_attr_set_qos_class_np(&attr, QOS_CLASS_USER_INITIATED, 0); + T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_set_qos_class_np"); + + unsigned int ncpu = get_ncpu(); + pthread_t unbound_thread; + pthread_t bound_thread; + + T_LOG("creating %u non-bound threads\n", ncpu * SPINNER_THREAD_LOAD_FACTOR); + + for (int i = 0; i < ncpu * SPINNER_THREAD_LOAD_FACTOR; i++) { + rv = pthread_create(&unbound_thread, &attr, spin_thread, NULL); + T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_create (non-bound)"); + } + + struct sched_param param = { .sched_priority = (int)20 }; + T_ASSERT_POSIX_ZERO(pthread_attr_setschedparam(&attr, ¶m), "pthread_attr_setschedparam"); + + rv = pthread_create(&bound_thread, &attr, spin_bound_thread, (void *)(uintptr_t)'P'); + T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_create (P-bound)"); + + rv = pthread_attr_destroy(&attr); + T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_destroy"); + + sleep(8); + + mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; + mach_port_t thread_port = pthread_mach_thread_np(bound_thread); + thread_basic_info_data_t bound_thread_info; + + kr = thread_info(thread_port, THREAD_BASIC_INFO, (thread_info_t)&bound_thread_info, &count); + if (kr != KERN_SUCCESS) { + err("%#x == thread_info(bound_thread, THREAD_BASIC_INFO)", kr); + } + + uint64_t bound_usr_usec = bound_thread_info.user_time.seconds * USEC_PER_SEC + bound_thread_info.user_time.microseconds; + + T_ASSERT_GT(bound_usr_usec, 75000, "Check that bound thread got atleast 75ms CPU time"); + T_PASS("Low priority bound threads got some CPU time in the presence of high priority unbound spinners"); +#else /* TARGET_CPU_ARM64 && TARGET_OS_OSX */ + T_SKIP("Test not supported on this platform!"); +#endif /* TARGET_CPU_ARM64 && TARGET_OS_OSX */ +} diff --git a/tests/select_stress.c b/tests/select_stress.c new file mode 100644 index 000000000..7cf0d1db0 --- /dev/null +++ b/tests/select_stress.c @@ -0,0 +1,416 @@ +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* Select parameters */ +#define TIMEOUT_CHANCE 17 /* one in this many times, timeout */ +#define TIMEOUT_POLLCHANCE 11 /* one in this many is a poll */ +#define TIMEOUT_SCALE 5 /* microseconds multiplier */ + +static semaphore_t g_thread_sem; +static semaphore_t g_sync_sem; + +struct endpoint { + int fd[4]; + pthread_t pth; +}; + +typedef void * (*thread_func)(struct endpoint *ep); +typedef void (*setup_func)(struct endpoint *ep); + +struct thread_sync_arg { + struct endpoint ep; + setup_func setup; + thread_func work; +}; + +static mach_timebase_info_data_t g_timebase; + +static int g_sleep_iterations = 150000; +static int g_sleep_usecs = 30; +static int g_stress_nthreads = 100; +static uint64_t g_stress_duration = 60; + +static inline uint64_t +ns_to_abs(uint64_t ns) +{ + return ns * g_timebase.denom / g_timebase.numer; +} + +static inline uint64_t +abs_to_ns(uint64_t abs) +{ + return abs * g_timebase.numer / g_timebase.denom; +} + + + +/* + * Synchronize the startup / initialization of a set of threads + */ +static void * +thread_sync(void *ctx) +{ + struct thread_sync_arg *a = (struct thread_sync_arg *)ctx; + T_QUIET; + T_ASSERT_TRUE(((a != NULL) && (a->work != NULL)), "thread setup error"); + + if (a->setup) { + (a->setup)(&a->ep); + } + + semaphore_wait_signal(g_thread_sem, g_sync_sem); + return (a->work)(&a->ep); +} + +struct select_stress_args { + struct endpoint *ep; + int nthreads; +}; + +static void +setup_stress_event(struct endpoint *ep) +{ + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_POSIX_SUCCESS(pipe(&ep->fd[0]), "pipe()"); + + T_LOG("th[0x%lx]: fd:{%d,%d}, ep@%p", + (uintptr_t)pthread_self(), ep->fd[0], ep->fd[1], (void *)ep); +} + +/* + * Cause file descriptors to be reused/replaced. We expect that it will at + * least take the lowest fd as part of the descriptor list. This may be + * optimistic, but it shows replacing an fd out from under a select() if it + * happens. + * + * We potentially delay the open for a random amount of time so that another + * thread can come in and wake up the fd_set with a bad (closed) fd in the set. + */ +static void +recycle_fds(struct endpoint *ep) +{ + /* close endpoint descriptors in random order */ + if (random() % 1) { + close(ep->fd[0]); + close(ep->fd[1]); + } else { + close(ep->fd[1]); + close(ep->fd[0]); + } + + /* randomize a delay */ + if ((random() % ep->fd[0]) == 0) { + usleep(((random() % ep->fd[1]) + 1) * ep->fd[1]); + } + + /* reopen the FDs, hopefully in the middle of select() */ + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_POSIX_SUCCESS(pipe(&ep->fd[0]), "pipe"); +} + + +/* + * Send a byte of data down the thread end of a pipe to wake up the select + * on the other end of it. Select will wake up normally because of this, + * and read the byte out. Hopefully, another thread has closed/reopened its FDs. + */ +static void +write_data(struct endpoint *ep) +{ + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_POSIX_SUCCESS(write(ep->fd[1], "X", 1), "th[0x%lx] write_data(fd=%d)", + (uintptr_t)pthread_self(), ep->fd[1]); +} + +static void * +do_stress_events(struct endpoint *ep) +{ + unsigned write_freq = (unsigned)(((uintptr_t)pthread_self() & 0xff0000) >> 16); + + /* some default */ + if (write_freq == 0) { + write_freq = 31; + } + + T_LOG("th[0x%lx] write_freq:%d", (uintptr_t)pthread_self(), write_freq); + + for (;;) { + /* randomized delay between events */ + usleep(((random() % ep->fd[1]) + 1) * ep->fd[1]); + + if ((random() % write_freq) == 0) { + write_data(ep); + } else { + recycle_fds(ep); + } + } +} + +struct selarg { + struct thread_sync_arg *th; + fd_set def_readfds; + int max_fd; + int nthreads; + int ret; + + pthread_t pth; +}; + +/* + * Put the actual call to select in its own thread so we can catch errors that + * occur only the first time a thread calls select. + */ +static void * +do_select(void *arg) +{ + struct selarg *sarg = (struct selarg *)arg; + struct timeval timeout; + struct timeval *tp = NULL; + fd_set readfds; + int nfd; + + sarg->ret = 0; + + FD_COPY(&sarg->def_readfds, &readfds); + + /* Add a timeout probablistically */ + if ((random() % TIMEOUT_CHANCE) == 0) { + timeout.tv_sec = random() % 1; + timeout.tv_usec = ((random() % TIMEOUT_POLLCHANCE) * TIMEOUT_SCALE); + tp = &timeout; + } + + /* Do the select */ + nfd = select(sarg->max_fd + 1, &readfds, 0, 0, tp); + if (nfd < 0) { + /* EBADF: fd_set has changed */ + if (errno == EBADF) { + sarg->ret = EBADF; + return NULL; + } + + /* Other errors are fatal */ + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_POSIX_SUCCESS(nfd, "select:stress"); + } + + /* Fast: handle timeouts */ + if (nfd == 0) { + return NULL; + } + + /* Slower: discard read input thrown at us from threads */ + for (int i = 0; i < sarg->nthreads; i++) { + struct endpoint *ep = &sarg->th[i].ep; + + if (FD_ISSET(ep->fd[0], &readfds)) { + char c; + (void)read(ep->fd[0], &c, 1); + } + } + + return NULL; +} + + +static void +test_select_stress(int nthreads, uint64_t duration_seconds) +{ + uint64_t deadline; + uint64_t seconds_remain, last_print_time; + + struct selarg sarg; + + int started_threads = 0; + struct thread_sync_arg *th; + + if (nthreads < 2) { + T_LOG("forcing a minimum of 2 threads"); + nthreads = 2; + } + + /* + * Allocate memory for endpoint data + */ + th = calloc(nthreads, sizeof(*th)); + T_QUIET; + T_ASSERT_NOTNULL(th, "select_stress: No memory for thread endpoints"); + + T_LOG("Select stress test: %d threads, for %lld seconds", nthreads, duration_seconds); + + /* + * Startup all the threads + */ + T_LOG("\tcreating threads..."); + for (int i = 0; i < nthreads; i++) { + struct endpoint *e = &th[i].ep; + th[i].setup = setup_stress_event; + th[i].work = do_stress_events; + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_POSIX_ZERO(pthread_create(&e->pth, 0, thread_sync, &th[i]), + "pthread_create:do_stress_events"); + } + + /* + * Wait for all the threads to start up + */ + while (started_threads < nthreads) { + if (semaphore_wait(g_sync_sem) == KERN_SUCCESS) { + ++started_threads; + } + } + + /* + * Kick everyone off + */ + semaphore_signal_all(g_thread_sem); + + /* + * Calculate a stop time + */ + deadline = mach_absolute_time() + ns_to_abs(duration_seconds * NSEC_PER_SEC); + seconds_remain = duration_seconds; + last_print_time = seconds_remain + 1; + + /* + * Perform the select and read any data that comes from the + * constituent thread FDs. + */ + + T_LOG("\ttest running!"); +handle_ebadf: + /* (re) set up the select fd set */ + sarg.max_fd = 0; + FD_ZERO(&sarg.def_readfds); + for (int i = 0; i < nthreads; i++) { + struct endpoint *ep = &th[i].ep; + + FD_SET(ep->fd[0], &sarg.def_readfds); + if (ep->fd[0] > sarg.max_fd) { + sarg.max_fd = ep->fd[0]; + } + } + + sarg.th = th; + sarg.nthreads = nthreads; + + while (mach_absolute_time() < deadline) { + void *thret = NULL; + + seconds_remain = abs_to_ns(deadline - mach_absolute_time()) / NSEC_PER_SEC; + if (last_print_time > seconds_remain) { + T_LOG(" %6lld...", seconds_remain); + last_print_time = seconds_remain; + } + + sarg.ret = 0; + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_POSIX_ZERO(pthread_create(&sarg.pth, 0, do_select, &sarg), + "pthread_create:do_select"); + + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_POSIX_ZERO(pthread_cancel(sarg.pth), "pthread_cancel"); + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_POSIX_ZERO(pthread_join(sarg.pth, &thret), "pthread_join"); + + if (sarg.ret == EBADF) { + goto handle_ebadf; + } + T_QUIET; + T_ASSERT_GE(sarg.ret, 0, "threaded do_select returned an \ + error: %d!", sarg.ret); + } + + T_PASS("select stress test passed"); +} + + +/* + * TEST: use select as sleep() + */ +static void +test_select_sleep(uint32_t niterations, unsigned long usecs) +{ + int ret; + struct timeval tv; + tv.tv_sec = 0; + tv.tv_usec = usecs; + + if (!niterations) { + T_FAIL("select sleep test skipped"); + return; + } + + T_LOG("Testing select as sleep (n=%d, us=%ld)...", niterations, usecs); + + while (niterations--) { + ret = select(0, NULL, NULL, NULL, &tv); + if (ret < 0 && errno != EINTR) { + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_POSIX_SUCCESS(ret, "select:sleep"); + } + } + + T_PASS("select sleep test passed"); +} + +#define get_env_arg(NM, sval, val) \ + do { \ + sval = getenv(#NM); \ + if (sval) { \ + long v = atol(sval); \ + if (v <= 0) \ + v =1 ; \ + val = (typeof(val))v; \ + } \ + } while (0) + +T_DECL(select_sleep, "select sleep test for rdar://problem/20804876 Gala: select with no FDs leaks waitq table objects (causes asserts/panics)") +{ + char *env_sval = NULL; + + get_env_arg(SELSLEEP_ITERATIONS, env_sval, g_sleep_iterations); + get_env_arg(SELSLEEP_INTERVAL, env_sval, g_sleep_usecs); + + test_select_sleep((uint32_t)g_sleep_iterations, (unsigned long)g_sleep_usecs); +} + +T_DECL(select_stress, "select stress test for rdar://problem/20804876 Gala: select with no FDs leaks waitq table objects (causes asserts/panics)") +{ + char *env_sval = NULL; + + T_QUIET; + T_ASSERT_MACH_SUCCESS(mach_timebase_info(&g_timebase), + "Can't get mach_timebase_info!"); + + get_env_arg(SELSTRESS_THREADS, env_sval, g_stress_nthreads); + get_env_arg(SELSTRESS_DURATION, env_sval, g_stress_duration); + + T_QUIET; + T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &g_sync_sem, SYNC_POLICY_FIFO, 0), + "semaphore_create(g_sync_sem)"); + T_QUIET; + T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &g_thread_sem, SYNC_POLICY_FIFO, 0), + "semaphore_create(g_thread_sem)"); + + test_select_stress(g_stress_nthreads, g_stress_duration); +} diff --git a/tests/settimeofday_29193041.c b/tests/settimeofday_29193041.c index 5acfb74ee..2d9bb5a95 100644 --- a/tests/settimeofday_29193041.c +++ b/tests/settimeofday_29193041.c @@ -9,12 +9,6 @@ #include #include -#if CONFIG_EMBEDDED -#include -#include -#include -#endif - /* * This test expects the entitlement or root privileges for a process to * set the time using settimeofday syscall. diff --git a/tests/settimeofday_29193041_entitled.c b/tests/settimeofday_29193041_entitled.c index 38bc47ad0..af6a3561d 100644 --- a/tests/settimeofday_29193041_entitled.c +++ b/tests/settimeofday_29193041_entitled.c @@ -9,12 +9,6 @@ #include #include -#if CONFIG_EMBEDDED -#include -#include -#include -#endif - /* * This test expects the entitlement or root privileges for a process to * set the time using settimeofday syscall. diff --git a/tests/shared_cache_reslide_test.c b/tests/shared_cache_reslide_test.c new file mode 100644 index 000000000..0c6a498fd --- /dev/null +++ b/tests/shared_cache_reslide_test.c @@ -0,0 +1,49 @@ +#define PRIVATE +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#undef PRIVATE + +#include +#include +#include + +#define SHARED_CACHE_HELPER "get_shared_cache_address" +#define DO_RUSAGE_CHECK "check_rusage_flag" +#define DO_DUMMY "dummy" +#define ADDRESS_OUTPUT_SIZE 12L + +#ifndef _POSIX_SPAWN_RESLIDE +#define _POSIX_SPAWN_RESLIDE 0x0800 +#endif + +#ifndef OS_REASON_FLAG_SHAREDREGION_FAULT +#define OS_REASON_FLAG_SHAREDREGION_FAULT 0x400 +#endif + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); + + +T_DECL(reslide_sharedcache, "crash induced reslide of the shared cache", + T_META_CHECK_LEAKS(false), T_META_IGNORECRASHES(".*shared_cache_reslide_test.*"), + T_META_ASROOT(true)) +{ + T_SKIP("shared cache reslide is currently only supported on arm64e iPhones"); +} diff --git a/tests/shared_cache_tests.c b/tests/shared_cache_tests.c index 572309d03..cb0f257dd 100644 --- a/tests/shared_cache_tests.c +++ b/tests/shared_cache_tests.c @@ -12,6 +12,11 @@ T_DECL(present, "tests that the device is running with a shared cache", T_META_A { size_t shared_cache_len = 0; const void *cache_header = _dyld_get_shared_cache_range(&shared_cache_len); + +#if TARGET_OS_OSX + T_SKIP("shared cache testing support incomplete (57267667)"); +#endif /* TARGET_OS_OSX */ + if ((cache_header == NULL) || (shared_cache_len == 0)) { #if TARGET_OS_OSX char *tmp_dir = (char *) dt_tmpdir(); diff --git a/tests/sigchld_return.c b/tests/sigchld_return.c index 01080d3b3..76ad56015 100644 --- a/tests/sigchld_return.c +++ b/tests/sigchld_return.c @@ -3,15 +3,17 @@ #include #include #include +#include +#include #include T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); static int exitcode = 0x6789BEEF; -int should_exit = 0; +static int should_exit = 0; -void +static void handler(int sig, siginfo_t *sip, __unused void *uconp) { /* Should handle the SIGCHLD signal */ @@ -49,3 +51,52 @@ T_DECL(sigchldreturn, "checks that a child process exited with an exitcode retur sleep(1); } } + +T_DECL(sigabrt_test, "check that child process' exitcode contains signum = SIGABRT", T_META_CHECK_LEAKS(false)) +{ + int ret; + siginfo_t siginfo; + pid_t pid = fork(); + int expected_signal = SIGABRT; + if (pid == 0) { + /* child exits with SIGABRT */ + T_LOG("In child process. Now signalling SIGABRT"); + (void)signal(SIGABRT, SIG_DFL); + raise(SIGABRT); + T_LOG("Child should not print"); + } else { + ret = waitid(P_PID, (id_t) pid, &siginfo, WEXITED); + T_ASSERT_POSIX_SUCCESS(0, "waitid"); + if (siginfo.si_signo != SIGCHLD) { + T_FAIL("Signal was not SIGCHLD."); + } + T_LOG("si_status = 0x%x , expected = 0x%x \n", siginfo.si_status, expected_signal); + if (siginfo.si_status != expected_signal) { + T_FAIL("Unexpected exitcode"); + } + } +} + +T_DECL(sigkill_test, "check that child process' exitcode contains signum = SIGKILL", T_META_CHECK_LEAKS(false)) +{ + int ret; + siginfo_t siginfo; + pid_t pid = fork(); + int expected_signal = SIGKILL; + if (pid == 0) { + /* child exits with SIGKILL */ + T_LOG("In child process. Now signalling SIGKILL"); + raise(SIGKILL); + T_LOG("Child should not print"); + } else { + ret = waitid(P_PID, (id_t) pid, &siginfo, WEXITED); + T_ASSERT_POSIX_SUCCESS(0, "waitid"); + if (siginfo.si_signo != SIGCHLD) { + T_FAIL("Signal was not SIGCHLD."); + } + T_LOG("si_status = 0x%x , expected = 0x%x \n", siginfo.si_status, expected_signal); + if (siginfo.si_status != expected_signal) { + T_FAIL("Unexpected exitcode"); + } + } +} diff --git a/tests/signal_stack.c b/tests/signal_stack.c new file mode 100644 index 000000000..453549274 --- /dev/null +++ b/tests/signal_stack.c @@ -0,0 +1,56 @@ +#include +#include +#include +#include +#include + +#include + +static uint64_t stack_base, stack_end; + +static void +signal_handler(int __unused signum, struct __siginfo * __unused info, void * __unused uap) +{ + T_LOG("In signal handler\n"); + uint64_t signal_stack = (uint64_t)__builtin_frame_address(0); + T_ASSERT_LE(stack_base, signal_stack, NULL); + T_ASSERT_LE(signal_stack, stack_end, NULL); + T_END; +} + +T_DECL(signalstack, "Check that the signal stack is set up correctly", T_META_ASROOT(YES)) +{ + void* stack_allocation = malloc(SIGSTKSZ); + + stack_base = (uint64_t)stack_allocation; + stack_end = stack_base + SIGSTKSZ; + + T_LOG("stack base = 0x%llx\n", stack_base); + T_LOG("stack end = 0x%llx\n", stack_end); + + stack_t alt_stack; + alt_stack.ss_sp = stack_allocation; + alt_stack.ss_size = SIGSTKSZ; + alt_stack.ss_flags = 0; + + if (sigaltstack(&alt_stack, NULL) < 0) { + T_FAIL("error: sigaltstack failed\n"); + } + + sigset_t signal_mask; + sigemptyset(&signal_mask); + + struct sigaction sig_action; + sig_action.sa_sigaction = signal_handler; + sig_action.sa_mask = signal_mask; + sig_action.sa_flags = SA_ONSTACK; + + if (sigaction(SIGUSR1, &sig_action, NULL) != 0) { + T_FAIL("error: sigaction failed\n"); + } + + T_LOG("Sending a SIGUSR1\n"); + kill(getpid(), SIGUSR1); + + return; +} diff --git a/tests/sioc-if-addr-bounds.c b/tests/sioc-if-addr-bounds.c index b7b124a31..94cab7f41 100644 --- a/tests/sioc-if-addr-bounds.c +++ b/tests/sioc-if-addr-bounds.c @@ -49,6 +49,8 @@ #include +#include + #include "ioc_str.h" T_GLOBAL_META(T_META_NAMESPACE("xnu.net")); @@ -182,6 +184,259 @@ HexDump(void *data, size_t len) } } +static size_t +snprint_dottedhex(char *str, size_t strsize, const void *data, const size_t datasize) +{ + size_t is = 0, ip = 0; + const unsigned char *ptr = (const unsigned char *)data; + + for (is = 0, ip = 0; is + 3 < strsize - 1 && ip < datasize; ip++) { + unsigned char msnbl = ptr[ip] >> 4; + unsigned char lsnbl = ptr[ip] & 0x0f; + + if (ip > 0) { + str[is++] = '.'; + } + str[is++] = (char)(msnbl + (msnbl < 10 ? '0' : 'a' - 10)); + str[is++] = (char)(lsnbl + (lsnbl < 10 ? '0' : 'a' - 10)); + } + str[is] = 0; + return is; +} + +static void +print_sockaddr_dl(const char *pre, const struct sockaddr *sa, const char *post) +{ + char nbuffer[256]; + char abuffer[256]; + char sbuffer[256]; + struct sockaddr_dl sdl = {}; + + if (sa == NULL) { + return; + } + memcpy(&sdl, sa, MIN(sizeof(sdl), sa->sa_len)); + strlcpy(nbuffer, sdl.sdl_data, sdl.sdl_nlen); + snprint_dottedhex(abuffer, sizeof(abuffer), sdl.sdl_data + sdl.sdl_nlen, sdl.sdl_alen); + snprint_dottedhex(sbuffer, sizeof(sbuffer), sdl.sdl_data + sdl.sdl_nlen + sdl.sdl_alen, sdl.sdl_slen); + + T_LOG("%ssdl_len %u sdl_family %u sdl_index %u sdl_type %u sdl_nlen %u (%s) sdl_alen %u (%s) sdl_slen %u (%s)%s", + pre != NULL ? pre : "", + sdl.sdl_len, sdl.sdl_family, sdl.sdl_index, sdl.sdl_type, + sdl.sdl_nlen, nbuffer, sdl.sdl_alen, abuffer, sdl.sdl_slen, sbuffer, + post != NULL ? post : ""); +} + +static void +print_sockaddr_in(const char *pre, const struct sockaddr *sa, const char *post) +{ + char abuffer[256]; + char zbuffer[256]; + struct sockaddr_in sin = {}; + + if (sa == NULL) { + return; + } + + memcpy(&sin, sa, MIN(sizeof(sin), sa->sa_len)); + inet_ntop(AF_INET, &sin.sin_addr, abuffer, sizeof(abuffer)); + snprint_dottedhex(zbuffer, sizeof(zbuffer), sin.sin_zero, sizeof(sin.sin_zero)); + + T_LOG("%ssin_len %u sin_family %u sin_port %u sin_addr %s sin_zero %s%s", + pre != NULL ? pre : "", + sin.sin_len, sin.sin_family, htons(sin.sin_port), abuffer, zbuffer, + post != NULL ? post : ""); +} + +static void +print_sockaddr_in6(const char *pre, const struct sockaddr *sa, const char *post) +{ + char abuffer[256]; + struct sockaddr_in6 sin6 = {}; + + if (sa == NULL) { + return; + } + + memcpy(&sin6, sa, MIN(sizeof(sin6), sa->sa_len)); + inet_ntop(AF_INET6, &sin6.sin6_addr, abuffer, sizeof(abuffer)); + + T_LOG("%ssin6_len %u sin6_family %u sin6_port %u sin6_flowinfo %u sin6_addr %s sin6_scope_id %u%s", + pre != NULL ? pre : "", + sin6.sin6_len, sin6.sin6_family, htons(sin6.sin6_port), sin6.sin6_flowinfo, abuffer, sin6.sin6_scope_id, + post != NULL ? post : ""); +} + +static void +print_sockaddr(const char *pre, const struct sockaddr *sa, const char *post) +{ + char buffer[256]; + + if (sa == NULL) { + return; + } + + snprint_dottedhex(buffer, sizeof(buffer), sa->sa_data, sa->sa_len - 2); + + T_LOG("%ssa_len %u sa_family %u sa_data %s%s", + pre != NULL ? pre : "", + sa->sa_len, sa->sa_family, buffer, + post != NULL ? post : ""); +} + + +#define ROUNDUP(a, size) (((a) & ((size) - 1)) ? (1 + ((a)|(size - 1))) : (a)) + +#define NEXT_SA(p) (struct sockaddr *) \ + ((caddr_t)p + (p->sa_len ? ROUNDUP(p->sa_len, sizeof(u_int32_t)) : \ + sizeof(u_long))) + +static size_t +get_rti_info(int addrs, struct sockaddr *sa, struct sockaddr **rti_info) +{ + int i; + size_t len = 0; + + for (i = 0; i < RTAX_MAX; i++) { + if (addrs & (1 << i)) { + rti_info[i] = sa; + if (sa->sa_len < sizeof(struct sockaddr)) { + len += sizeof(struct sockaddr); + } else { + len += sa->sa_len; + } + sa = NEXT_SA(sa); + } else { + rti_info[i] = NULL; + } + } + return len; +} + +static void +print_address(const char *pre, const struct sockaddr *sa, const char *post, u_char asFamily) +{ + if (sa == NULL) { + T_LOG("%s(NULL)%s", + pre != NULL ? pre : "", + post != NULL ? post : ""); + return; + } + if (sa->sa_len == 0) { + T_LOG("%ssa_len 0%s", + pre != NULL ? pre : "", + post != NULL ? post : ""); + return; + } + if (sa->sa_len == 1) { + T_LOG("%ssa_len 1%s", + pre != NULL ? pre : "", + post != NULL ? post : ""); + return; + } + + // If not forced + if (asFamily == AF_UNSPEC) { + asFamily = sa->sa_family; + } + switch (asFamily) { + case AF_INET: { + print_sockaddr_in(pre, sa, post); + break; + } + case AF_INET6: { + print_sockaddr_in6(pre, sa, post); + break; + } + case AF_LINK: { + print_sockaddr_dl(pre, sa, post); + break; + } + default: + print_sockaddr(pre, sa, post); + break; + } +} + +static void +print_rti_info(struct sockaddr *rti_info[]) +{ + struct sockaddr *sa; + u_char asFamily = 0; + + if ((sa = rti_info[RTAX_IFA])) { + asFamily = sa->sa_family; + print_address(" RTAX_IFA ", sa, "\n", 0); + } + if ((sa = rti_info[RTAX_DST])) { + asFamily = sa->sa_family; + print_address(" RTAX_DST ", sa, "\n", 0); + } + if ((sa = rti_info[RTAX_BRD])) { + print_address(" RTAX_BRD ", sa, "\n", asFamily); + } + + if ((sa = rti_info[RTAX_NETMASK])) { + print_address(" RTAX_NETMASK ", sa, "\n", asFamily); + } + + if ((sa = rti_info[RTAX_GATEWAY])) { + print_address(" RTAX_GATEWAY ", sa, "\n", 0); + } + + if ((sa = rti_info[RTAX_GENMASK])) { + print_address(" RTAX_GENMASK ", sa, "\n", asFamily); + } + + if ((sa = rti_info[RTAX_AUTHOR])) { + print_address(" RTAX_AUTHOR ", sa, "\n", asFamily); + } + + if ((sa = rti_info[RTAX_IFP])) { + print_address(" RTAX_IFP ", sa, "\n", 0); + } +} + +static void +print_rt_iflist2(const char *label) +{ + size_t len; + int mib[6] = { CTL_NET, PF_ROUTE, 0, 0, NET_RT_IFLIST2, 0 }; + unsigned char *buf = NULL; + unsigned char *lim, *next; + struct if_msghdr *ifmsg; + + T_LOG("interface address list for %s", label); + + T_QUIET; T_EXPECT_POSIX_SUCCESS(sysctl(mib, 6, NULL, &len, NULL, 0), "sysctl NET_RT_IFLIST2"); + + T_QUIET; T_ASSERT_NOTNULL(buf = calloc(1, len), "rt_if_list_buf calloc(1, %zd)", len); + + T_QUIET; T_EXPECT_POSIX_SUCCESS(sysctl(mib, 6, buf, &len, NULL, 0), "sysctl NET_RT_IFLIST2"); + + lim = buf + len; + for (next = buf; next < lim; next += ifmsg->ifm_msglen) { + ifmsg = (struct if_msghdr *)(void *)next; + char ifname[IF_NAMESIZE + 1]; + + if (ifmsg->ifm_type == RTM_IFINFO2) { + struct if_msghdr2 *ifm = (struct if_msghdr2 *)ifmsg; + struct sockaddr *sa = (struct sockaddr *)(ifm + 1); + + (void)if_indextoname(ifm->ifm_index, ifname); + T_LOG("interface: %s", ifname); + print_address(" PRIMARY ", sa, "", 0); + } else if (ifmsg->ifm_type == RTM_NEWADDR) { + struct sockaddr *rti_info[RTAX_MAX]; + struct ifa_msghdr *ifam = (struct ifa_msghdr *)ifmsg; + + (void) get_rti_info(ifam->ifam_addrs, (struct sockaddr *)(ifam + 1), rti_info); + + print_rti_info(rti_info); + } + } + free(buf); +} static int check_rt_if_list_for_pattern(const char *label, unsigned char pattern, size_t pattern_size) @@ -220,6 +475,7 @@ check_rt_if_list_for_pattern(const char *label, unsigned char pattern, size_t pa HexDump(rt_if_list_buf, len); } } + free(rt_if_list_buf); free(pattern_buf); @@ -373,6 +629,7 @@ test_sioc_ifr_bounds(struct ioc_ifreq *ioc_ifreq, int s, const char *ifname) T_EXPECT_EQ(check_rt_if_list_for_pattern("test_sioc_ifr_bounds", pattern, PATTERN_SIZE), 0, "pattern should not be found"); + fflush(stdout); fflush(stderr); } @@ -402,6 +659,7 @@ T_DECL(sioc_ifr_bounds, "test bound checks on struct ifreq addresses passed to i for (ioc_ifreq = ioc_list; ioc_ifreq->error != -1; ioc_ifreq++) { test_sioc_ifr_bounds(ioc_ifreq, s, ifname); } + print_rt_iflist2(__func__); (void)ifnet_destroy(s, ifname, true); close(s); @@ -615,6 +873,8 @@ T_DECL(sioc_ifra_addr_bounds, "test bound checks on socket address passed to int T_EXPECT_EQ(check_rt_if_list_for_pattern("after ioctl SIOCAIFADDR", pattern, PATTERN_SIZE), 0, "pattern should not be found"); } + print_rt_iflist2(__func__); + (void)ifnet_destroy(s, ifname, true); close(s); @@ -656,6 +916,8 @@ T_DECL(sioc_ifr_dstaddr_leak, "test bound checks on socket address passed to int T_EXPECT_POSIX_SUCCESS(ioctl(s, SIOCSIFDSTADDR, &ifr), "ioctl(SIOCSIFDSTADDR)"); + print_rt_iflist2(__func__); + close(s); T_EXPECT_EQ(check_rt_if_list_for_pattern("AFTER", pattern, PATTERN_SIZE), 0, "pattern should not be found"); diff --git a/tests/sr_entitlement.c b/tests/sr_entitlement.c new file mode 100644 index 000000000..b0c496d15 --- /dev/null +++ b/tests/sr_entitlement.c @@ -0,0 +1,103 @@ +#include +#include +#include + +#include +#include + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(false)); + +static int after_regions = 0; + +/* + * No system(3c) on watchOS, so provide our own. + */ +static int +my_system(const char *command) +{ + pid_t pid; + int status = 0; + const char *argv[] = { + "/bin/sh", + "-c", + command, + NULL + }; + + if (dt_launch_tool(&pid, (char **)(void *)argv, FALSE, NULL, NULL)) { + return -1; + } + sleep(2); /* let the child start running */ + + size_t size = sizeof(after_regions); + int ret = sysctlbyname("vm.shared_region_pager_count", &after_regions, &size, NULL, 0); + T_QUIET; T_EXPECT_POSIX_SUCCESS(ret, "get shared_region_pager_count after"); + + if (!dt_waitpid(pid, &status, NULL, 30)) { + if (status != 0) { + return status; + } + return -1; + } + return status; +} + +/* + * If shared regions by entitlement was not originally active, turn it back off. + */ +static int orig_setting = 0; +static void +cleanup(void) +{ + int ret; + int off = 0; + size_t size_off = sizeof(off); + + if (orig_setting == 0) { + ret = sysctlbyname("vm.vm_shared_region_by_entitlement", NULL, NULL, &off, size_off); + T_QUIET; T_EXPECT_POSIX_SUCCESS(ret, "turning sysctl back off"); + } +} + +/* + * This test: + * - looks at the number of shared region pagers, + * - launches a helper app that has entitlement for unique signing + * - gets the number of shared region pagers again. + * It expects to see additional shared region pager(s) to exist. + * + */ +T_DECL(sr_entitlement, "shared region by entitlement test") +{ + int ret; + size_t size; + int before_regions = 0; + int on = 1; + size_t size_on = sizeof(on); + + T_SKIP("No pointer authentication support"); + + /* + * Check if the sysctl vm_shared_region_by_entitlement exists and if so make + * sure it is set. + */ + size = sizeof(orig_setting); + ret = sysctlbyname("vm.vm_shared_region_by_entitlement", &orig_setting, &size, &on, size_on); + if (ret != 0) { + T_SKIP("No pointer authentication support"); + } + + T_ATEND(cleanup); + + size = sizeof(before_regions); + ret = sysctlbyname("vm.shared_region_pager_count", &before_regions, &size, NULL, 0); + T_QUIET; T_EXPECT_POSIX_SUCCESS(ret, "get shared_region_pager_count before"); + T_QUIET; T_EXPECT_GE_INT(before_regions, 1, "invalid before number of regions"); + + ret = my_system("./sr_entitlement_helper"); + if (ret != 0) { + T_ASSERT_FAIL("Couldn't run helper first time ret = %d", ret); + } + + T_EXPECT_GT_INT(after_regions, before_regions, "expected additional SR pagers after running helper"); +} diff --git a/tests/sr_entitlement.entitlements b/tests/sr_entitlement.entitlements new file mode 100644 index 000000000..f1ca55097 --- /dev/null +++ b/tests/sr_entitlement.entitlements @@ -0,0 +1,8 @@ + + + + + com.apple.pac.shared_region_id + SharedRegionTest + + diff --git a/tests/sr_entitlement_helper.c b/tests/sr_entitlement_helper.c new file mode 100644 index 000000000..198179274 --- /dev/null +++ b/tests/sr_entitlement_helper.c @@ -0,0 +1,15 @@ +#include +#include +#include + +/* + * This is a test helper that just has to run for a while. + */ +int +main(int argc, char **argv) +{ + printf("Hello, world.\n"); + sleep(15); + printf("That's all folks.\n"); + exit(0); +} diff --git a/tests/stackshot_accuracy.m b/tests/stackshot_accuracy.m index 9ff129091..02458072d 100644 --- a/tests/stackshot_accuracy.m +++ b/tests/stackshot_accuracy.m @@ -82,11 +82,11 @@ kill_children(void) } static void * -take_stackshot(pid_t target_pid, uint32_t extra_flags, uint64_t since_timestamp) +take_stackshot(pid_t target_pid, uint64_t extra_flags, uint64_t since_timestamp) { void *stackshot_config; int err, retries = 5; - uint32_t stackshot_flags = STACKSHOT_KCDATA_FORMAT | + uint64_t stackshot_flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_THREAD_WAITINFO | STACKSHOT_GET_DQ; diff --git a/tests/stackshot_spawn_exit_stress.c b/tests/stackshot_spawn_exit_stress.c index 342ea2bc6..07fd21e66 100644 --- a/tests/stackshot_spawn_exit_stress.c +++ b/tests/stackshot_spawn_exit_stress.c @@ -42,7 +42,7 @@ T_HELPER_DECL(spawn_children_helper, "spawn_children helper") static void take_stackshot(void) { - uint32_t stackshot_flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | + uint64_t stackshot_flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT); void *config = stackshot_config_create(); diff --git a/tests/stackshot_tests.m b/tests/stackshot_tests.m index 1777335c3..ce904d2d8 100644 --- a/tests/stackshot_tests.m +++ b/tests/stackshot_tests.m @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -10,6 +11,10 @@ #include #include #include +#include +#include +#include +#import T_GLOBAL_META( T_META_NAMESPACE("xnu.stackshot"), @@ -19,23 +24,35 @@ T_GLOBAL_META( static const char *current_process_name(void); static void verify_stackshot_sharedcache_layout(struct dyld_uuid_info_64 *uuids, uint32_t uuid_count); -static void parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int child_pid); +static void parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, NSDictionary *extra); static void parse_thread_group_stackshot(void **sbuf, size_t sslen); static uint64_t stackshot_timestamp(void *ssbuf, size_t sslen); static void initialize_thread(void); +static uint64_t global_flags = 0; + #define DEFAULT_STACKSHOT_BUFFER_SIZE (1024 * 1024) #define MAX_STACKSHOT_BUFFER_SIZE (6 * 1024 * 1024) +#define SRP_SERVICE_NAME "com.apple.xnu.test.stackshot.special_reply_port" + /* bit flags for parse_stackshot */ #define PARSE_STACKSHOT_DELTA 0x01 #define PARSE_STACKSHOT_ZOMBIE 0x02 #define PARSE_STACKSHOT_SHAREDCACHE_LAYOUT 0x04 #define PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL 0x08 #define PARSE_STACKSHOT_TURNSTILEINFO 0x10 +#define PARSE_STACKSHOT_POSTEXEC 0x20 #define PARSE_STACKSHOT_WAITINFO_CSEG 0x40 +#define PARSE_STACKSHOT_WAITINFO_SRP 0x80 +#define PARSE_STACKSHOT_TRANSLATED 0x100 -static uint64_t cseg_expected_threadid = 0; +/* keys for 'extra' dictionary for parse_stackshot */ +static const NSString* zombie_child_pid_key = @"zombie_child_pid"; // -> @(pid), required for PARSE_STACKSHOT_ZOMBIE +static const NSString* postexec_child_unique_pid_key = @"postexec_child_unique_pid"; // -> @(unique_pid), required for PARSE_STACKSHOT_POSTEXEC +static const NSString* cseg_expected_threadid_key = @"cseg_expected_threadid"; // -> @(tid), required for PARSE_STACKSHOT_WAITINFO_CSEG +static const NSString* srp_expected_pid_key = @"srp_expected_pid"; // -> @(pid), required for PARSE_STACKSHOT_WAITINFO_SRP +static const NSString* translated_child_pid_key = @"translated_child_pid"; // -> @(pid), required for PARSE_STACKSHOT_TRANSLATED #define TEST_STACKSHOT_QUEUE_LABEL "houston.we.had.a.problem" #define TEST_STACKSHOT_QUEUE_LABEL_LENGTH sizeof(TEST_STACKSHOT_QUEUE_LABEL) @@ -52,7 +69,7 @@ T_DECL(microstackshots, "test the microstackshot syscall") #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" int len = syscall(SYS_microstackshot, buf, size, - STACKSHOT_GET_MICROSTACKSHOT); + (uint32_t) STACKSHOT_GET_MICROSTACKSHOT); #pragma clang diagnostic pop if (len == ENOSYS) { T_SKIP("microstackshot syscall failed, likely not compiled with CONFIG_TELEMETRY"); @@ -79,7 +96,7 @@ T_DECL(microstackshots, "test the microstackshot syscall") struct scenario { const char *name; - uint32_t flags; + uint64_t flags; bool quiet; bool should_fail; bool maybe_unsupported; @@ -98,17 +115,18 @@ quiet(struct scenario *scenario) } static void -take_stackshot(struct scenario *scenario, void (^cb)(void *buf, size_t size)) +take_stackshot(struct scenario *scenario, bool compress_ok, void (^cb)(void *buf, size_t size)) { +start: initialize_thread(); void *config = stackshot_config_create(); quiet(scenario); T_ASSERT_NOTNULL(config, "created stackshot config"); - int ret = stackshot_config_set_flags(config, scenario->flags); + int ret = stackshot_config_set_flags(config, scenario->flags | global_flags); quiet(scenario); - T_ASSERT_POSIX_ZERO(ret, "set flags %#x on stackshot config", scenario->flags); + T_ASSERT_POSIX_ZERO(ret, "set flags %#llx on stackshot config", scenario->flags); if (scenario->size_hint > 0) { ret = stackshot_config_set_size_hint(config, scenario->size_hint); @@ -190,11 +208,60 @@ retry: ; fclose(f); } cb(buf, size); + if (compress_ok) { + if (global_flags == 0) { + T_LOG("Restarting test with compression"); + global_flags |= STACKSHOT_DO_COMPRESS; + goto start; + } else { + global_flags = 0; + } + } ret = stackshot_config_dealloc(config); T_QUIET; T_EXPECT_POSIX_ZERO(ret, "deallocated stackshot config"); } +T_DECL(simple_compressed, "take a simple compressed stackshot") +{ + struct scenario scenario = { + .name = "kcdata_compressed", + .flags = (STACKSHOT_DO_COMPRESS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_THREAD_WAITINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | + STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT), + }; + + T_LOG("taking compressed kcdata stackshot"); + take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) { + parse_stackshot(0, ssbuf, sslen, nil); + }); +} + +T_DECL(panic_compressed, "take a compressed stackshot with the same flags as a panic stackshot") +{ + uint64_t stackshot_flags = (STACKSHOT_SAVE_KEXT_LOADINFO | + STACKSHOT_SAVE_LOADINFO | + STACKSHOT_KCDATA_FORMAT | + STACKSHOT_ENABLE_BT_FAULTING | + STACKSHOT_ENABLE_UUID_FAULTING | + STACKSHOT_DO_COMPRESS | + STACKSHOT_NO_IO_STATS | + STACKSHOT_THREAD_WAITINFO | +#if TARGET_OS_MAC + STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT | +#endif + STACKSHOT_DISABLE_LATENCY_INFO); + + struct scenario scenario = { + .name = "kcdata_panic_compressed", + .flags = stackshot_flags, + }; + + T_LOG("taking compressed kcdata stackshot with panic flags"); + take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) { + parse_stackshot(0, ssbuf, sslen, nil); + }); +} + T_DECL(kcdata, "test that kcdata stackshots can be taken and parsed") { struct scenario scenario = { @@ -204,8 +271,8 @@ T_DECL(kcdata, "test that kcdata stackshots can be taken and parsed") }; T_LOG("taking kcdata stackshot"); - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { - parse_stackshot(0, ssbuf, sslen, -1); + take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) { + parse_stackshot(0, ssbuf, sslen, nil); }); } @@ -219,8 +286,8 @@ T_DECL(kcdata_faulting, "test that kcdata stackshots while faulting can be taken }; T_LOG("taking faulting stackshot"); - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { - parse_stackshot(0, ssbuf, sslen, -1); + take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) { + parse_stackshot(0, ssbuf, sslen, nil); }); } @@ -232,7 +299,7 @@ T_DECL(bad_flags, "test a poorly-formed stackshot syscall") }; T_LOG("attempting to take stackshot with kernel-only flag"); - take_stackshot(&scenario, ^(__unused void *ssbuf, __unused size_t sslen) { + take_stackshot(&scenario, true, ^(__unused void *ssbuf, __unused size_t sslen) { T_ASSERT_FAIL("stackshot data callback called"); }); } @@ -246,12 +313,12 @@ T_DECL(delta, "test delta stackshots") }; T_LOG("taking full stackshot"); - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { uint64_t stackshot_time = stackshot_timestamp(ssbuf, sslen); T_LOG("taking delta stackshot since time %" PRIu64, stackshot_time); - parse_stackshot(0, ssbuf, sslen, -1); + parse_stackshot(0, ssbuf, sslen, nil); struct scenario delta_scenario = { .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS @@ -260,8 +327,8 @@ T_DECL(delta, "test delta stackshots") .since_timestamp = stackshot_time }; - take_stackshot(&delta_scenario, ^(void *dssbuf, size_t dsslen) { - parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, -1); + take_stackshot(&delta_scenario, false, ^(void *dssbuf, size_t dsslen) { + parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, nil); }); }); } @@ -286,8 +353,8 @@ T_DECL(shared_cache_layout, "test stackshot inclusion of shared cache layout") } T_LOG("taking stackshot with STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT set"); - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { - parse_stackshot(PARSE_STACKSHOT_SHAREDCACHE_LAYOUT, ssbuf, sslen, -1); + take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) { + parse_stackshot(PARSE_STACKSHOT_SHAREDCACHE_LAYOUT, ssbuf, sslen, nil); }); } @@ -316,7 +383,7 @@ T_DECL(stress, "test that taking stackshots for 60 seconds doesn't crash the sys start_time = clock_gettime_nsec_np(CLOCK_MONOTONIC); while (clock_gettime_nsec_np(CLOCK_MONOTONIC) - start_time < max_diff_time) { - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { printf("."); fflush(stdout); }); @@ -361,8 +428,8 @@ T_DECL(dispatch_queue_label, "test that kcdata stackshots contain libdispatch qu dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER); T_LOG("taking kcdata stackshot with libdispatch queue labels"); - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { - parse_stackshot(PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL, ssbuf, sslen, -1); + take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) { + parse_stackshot(PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL, ssbuf, sslen, nil); }); dispatch_semaphore_signal(parent_done_sem); @@ -468,14 +535,114 @@ T_DECL(zombie, "tests a stackshot of a zombie task with a thread stuck in the ke | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT), }; - take_stackshot(&scenario, ^( void *ssbuf, size_t sslen) { + take_stackshot(&scenario, false, ^( void *ssbuf, size_t sslen) { /* First unwedge the child so we can reap it */ int val = 1, status; T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.unwedge_thread", NULL, NULL, &val, sizeof(val)), "unwedge child"); T_QUIET; T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on zombie child"); - parse_stackshot(PARSE_STACKSHOT_ZOMBIE, ssbuf, sslen, pid); + parse_stackshot(PARSE_STACKSHOT_ZOMBIE, ssbuf, sslen, @{zombie_child_pid_key: @(pid)}); + }); +} + +T_HELPER_DECL(exec_child_preexec, "child process pre-exec") +{ + dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL); + T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue"); + + signal(SIGUSR1, SIG_IGN); + dispatch_source_t parent_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q); + T_QUIET; T_ASSERT_NOTNULL(parent_sig_src, "dispatch_source_create (child_sig_src)"); + dispatch_source_set_event_handler(parent_sig_src, ^{ + + // Parent took a timestamp then signaled us: exec into the next process + + char path[PATH_MAX]; + uint32_t path_size = sizeof(path); + T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath"); + char *args[] = { path, "-n", "exec_child_postexec", NULL }; + + T_QUIET; T_ASSERT_POSIX_ZERO(execve(args[0], args, NULL), "execing into exec_child_postexec"); + }); + dispatch_activate(parent_sig_src); + + T_ASSERT_POSIX_SUCCESS(kill(getppid(), SIGUSR1), "signaled parent to take timestamp"); + + sleep(100); + // Should never get here + T_FAIL("Received signal to exec from parent"); +} + +T_HELPER_DECL(exec_child_postexec, "child process post-exec to sample") +{ + T_ASSERT_POSIX_SUCCESS(kill(getppid(), SIGUSR1), "signaled parent to take stackshot"); + sleep(100); + // Should never get here + T_FAIL("Killed by parent"); +} + +T_DECL(exec, "test getting full task snapshots for a task that execs") +{ + char path[PATH_MAX]; + uint32_t path_size = sizeof(path); + T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath"); + char *args[] = { path, "-n", "exec_child_preexec", NULL }; + + dispatch_source_t child_sig_src; + dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0); + T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "exec child semaphore"); + + dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL); + T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue"); + + pid_t pid; + + T_LOG("spawning a child"); + + signal(SIGUSR1, SIG_IGN); + child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q); + T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)"); + + dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); }); + dispatch_activate(child_sig_src); + + int sp_ret = posix_spawn(&pid, args[0], NULL, NULL, args, NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", args[0], pid); + + dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER); + + uint64_t start_time = mach_absolute_time(); + + struct proc_uniqidentifierinfo proc_info_data = { }; + int retval = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 0, &proc_info_data, sizeof(proc_info_data)); + T_QUIET; T_EXPECT_POSIX_SUCCESS(retval, "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO"); + T_QUIET; T_ASSERT_EQ_INT(retval, (int) sizeof(proc_info_data), "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO returned data"); + uint64_t unique_pid = proc_info_data.p_uniqueid; + + T_LOG("received signal from pre-exec child, unique_pid is %llu, timestamp is %llu", unique_pid, start_time); + + T_ASSERT_POSIX_SUCCESS(kill(pid, SIGUSR1), "signaled pre-exec child to exec"); + + dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER); + + T_LOG("received signal from post-exec child, capturing stackshot"); + + struct scenario scenario = { + .name = "exec", + .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS + | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT + | STACKSHOT_COLLECT_DELTA_SNAPSHOT), + .since_timestamp = start_time + }; + + take_stackshot(&scenario, false, ^( void *ssbuf, size_t sslen) { + // Kill the child + int status; + T_ASSERT_POSIX_SUCCESS(kill(pid, SIGKILL), "kill post-exec child %d", pid); + T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on post-exec child"); + + parse_stackshot(PARSE_STACKSHOT_POSTEXEC | PARSE_STACKSHOT_DELTA, ssbuf, sslen, @{postexec_child_unique_pid_key: @(unique_pid)}); }); } @@ -514,8 +681,8 @@ T_DECL(turnstile_singlehop, "turnstile single hop test") dispatch_queue_t dq1, dq2; dispatch_semaphore_t sema_x; dispatch_queue_attr_t dq1_attr, dq2_attr; - qos_class_t main_qos = 0; - int main_relpri = 0, main_relpri2 = 0, main_afterpri = 0; + __block qos_class_t main_qos = 0; + __block int main_relpri = 0, main_relpri2 = 0, main_afterpri = 0; struct scenario scenario = { .name = "turnstile_singlehop", .flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT), @@ -559,7 +726,7 @@ T_DECL(turnstile_singlehop, "turnstile single hop test") T_LOG("Async2 completed"); while (1) { - main_afterpri = get_user_promotion_basepri(); + main_afterpri = (int) get_user_promotion_basepri(); if (main_relpri != main_afterpri) { T_LOG("Success with promotion pri is %d", main_afterpri); break; @@ -568,8 +735,8 @@ T_DECL(turnstile_singlehop, "turnstile single hop test") usleep(100); } - take_stackshot(&scenario, ^( void *ssbuf, size_t sslen) { - parse_stackshot(PARSE_STACKSHOT_TURNSTILEINFO, ssbuf, sslen, -1); + take_stackshot(&scenario, true, ^( void *ssbuf, size_t sslen) { + parse_stackshot(PARSE_STACKSHOT_TURNSTILEINFO, ssbuf, sslen, nil); }); } @@ -647,8 +814,8 @@ T_DECL(instrs_cycles, "test a getting instructions and cycles in stackshot") }; T_LOG("attempting to take stackshot with instructions and cycles"); - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { - parse_stackshot(0, ssbuf, sslen, -1); + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { + parse_stackshot(0, ssbuf, sslen, nil); expect_instrs_cycles_in_stackshot(ssbuf, sslen); }); } @@ -665,12 +832,12 @@ T_DECL(delta_instrs_cycles, }; T_LOG("taking full stackshot"); - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { uint64_t stackshot_time = stackshot_timestamp(ssbuf, sslen); T_LOG("taking delta stackshot since time %" PRIu64, stackshot_time); - parse_stackshot(0, ssbuf, sslen, -1); + parse_stackshot(0, ssbuf, sslen, nil); expect_instrs_cycles_in_stackshot(ssbuf, sslen); struct scenario delta_scenario = { @@ -681,8 +848,8 @@ T_DECL(delta_instrs_cycles, .since_timestamp = stackshot_time, }; - take_stackshot(&delta_scenario, ^(void *dssbuf, size_t dsslen) { - parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, -1); + take_stackshot(&delta_scenario, false, ^(void *dssbuf, size_t dsslen) { + parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, nil); expect_instrs_cycles_in_stackshot(dssbuf, dsslen); }); }); @@ -711,7 +878,7 @@ T_DECL(thread_groups, "test getting thread groups in stackshot") }; T_LOG("attempting to take stackshot with thread group flag"); - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { parse_thread_group_stackshot(ssbuf, sslen); }); } @@ -802,7 +969,7 @@ T_DECL(dump_page_tables, "test stackshot page table dumping support") }; T_LOG("attempting to take stackshot with ASID and page table flags"); - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { parse_page_table_asid_stackshot(ssbuf, sslen); }); } @@ -843,6 +1010,69 @@ static void stackshot_verify_current_proc_uuid_info(void **ssbuf, size_t sslen, T_FAIL("failed to find matching UUID in stackshot data"); } +T_DECL(translated, "tests translated bit is set correctly") +{ +#if !(TARGET_OS_OSX && TARGET_CPU_ARM64) + T_SKIP("Not arm mac") +#endif + // Get path of stackshot_translated_child helper binary + char path[PATH_MAX]; + uint32_t path_size = sizeof(path); + T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath"); + char* binary_name = strrchr(path, '/'); + if (binary_name) binary_name++; + T_QUIET; T_ASSERT_NOTNULL(binary_name, "Find basename in path '%s'", path); + strlcpy(binary_name, "stackshot_translated_child", path_size - (binary_name - path)); + char *args[] = { path, NULL }; + + dispatch_source_t child_sig_src; + dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0); + T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "exec child semaphore"); + + dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL); + T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue"); + + signal(SIGUSR1, SIG_IGN); + child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q); + T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)"); + + dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); }); + dispatch_activate(child_sig_src); + + // Spawn child + pid_t pid; + T_LOG("spawning translated child"); + T_QUIET; T_ASSERT_POSIX_ZERO(posix_spawn(&pid, args[0], NULL, NULL, args, NULL), "spawned process '%s' with PID %d", args[0], pid); + + // Wait for the the child to spawn up + dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER); + + // Make sure the child is running and is translated + int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid }; + struct kinfo_proc process_info; + size_t bufsize = sizeof(process_info); + T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &process_info, &bufsize, NULL, 0), "get translated child process info"); + T_QUIET; T_ASSERT_GT(bufsize, 0, "process info is not empty"); + T_QUIET; T_ASSERT_TRUE((process_info.kp_proc.p_flag & P_TRANSLATED), "KERN_PROC_PID reports child is translated"); + + T_LOG("capturing stackshot"); + + struct scenario scenario = { + .name = "translated", + .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS + | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT), + }; + + take_stackshot(&scenario, true, ^( void *ssbuf, size_t sslen) { + // Kill the child + int status; + T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(pid, SIGTERM), "kill translated child"); + T_QUIET; T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on translated child"); + + parse_stackshot(PARSE_STACKSHOT_TRANSLATED, ssbuf, sslen, @{translated_child_pid_key: @(pid)}); + }); +} + T_DECL(proc_uuid_info, "tests that the main binary UUID for a proc is always populated") { struct proc_uniqidentifierinfo proc_info_data = { }; @@ -890,7 +1120,7 @@ T_DECL(proc_uuid_info, "tests that the main binary UUID for a proc is always pop }; T_LOG("attempting to take stackshot for current PID"); - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { stackshot_verify_current_proc_uuid_info(ssbuf, sslen, expected_mach_header_offset, &proc_info_data); }); } @@ -903,12 +1133,13 @@ T_DECL(cseg_waitinfo, "test that threads stuck in the compressor report correct .quiet = false, .flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT), }; + __block uint64_t thread_id = 0; dispatch_queue_t dq = dispatch_queue_create("com.apple.stackshot.cseg_waitinfo", NULL); dispatch_semaphore_t child_ok = dispatch_semaphore_create(0); dispatch_async(dq, ^{ - pthread_threadid_np(NULL, &cseg_expected_threadid); + pthread_threadid_np(NULL, &thread_id); dispatch_semaphore_signal(child_ok); T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.cseg_wedge_thread", NULL, NULL, &val, sizeof(val)), "wedge child thread"); }); @@ -917,10 +1148,244 @@ T_DECL(cseg_waitinfo, "test that threads stuck in the compressor report correct sleep(1); T_LOG("taking stackshot"); - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.cseg_unwedge_thread", NULL, NULL, &val, sizeof(val)), "unwedge child thread"); - parse_stackshot(PARSE_STACKSHOT_WAITINFO_CSEG, ssbuf, sslen, -1); + parse_stackshot(PARSE_STACKSHOT_WAITINFO_CSEG, ssbuf, sslen, @{cseg_expected_threadid_key: @(thread_id)}); + }); +} + +static void +srp_send( + mach_port_t send_port, + mach_port_t reply_port, + mach_port_t msg_port) +{ + kern_return_t ret = 0; + + struct test_msg { + mach_msg_header_t header; + mach_msg_body_t body; + mach_msg_port_descriptor_t port_descriptor; + }; + struct test_msg send_msg = { + .header = { + .msgh_remote_port = send_port, + .msgh_local_port = reply_port, + .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, + reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0, + MACH_MSG_TYPE_MOVE_SEND, + MACH_MSGH_BITS_COMPLEX), + .msgh_id = 0x100, + .msgh_size = sizeof(send_msg), + }, + .body = { + .msgh_descriptor_count = 1, + }, + .port_descriptor = { + .name = msg_port, + .disposition = MACH_MSG_TYPE_MOVE_RECEIVE, + .type = MACH_MSG_PORT_DESCRIPTOR, + }, + }; + + if (msg_port == MACH_PORT_NULL) { + send_msg.body.msgh_descriptor_count = 0; + } + + ret = mach_msg(&(send_msg.header), + MACH_SEND_MSG | + MACH_SEND_TIMEOUT | + MACH_SEND_OVERRIDE | + (reply_port ? MACH_SEND_SYNC_OVERRIDE : 0), + send_msg.header.msgh_size, + 0, + MACH_PORT_NULL, + 10000, + 0); + + T_ASSERT_MACH_SUCCESS(ret, "client mach_msg"); +} + +T_HELPER_DECL(srp_client, + "Client used for the special_reply_port test") +{ + pid_t ppid = getppid(); + dispatch_semaphore_t can_continue = dispatch_semaphore_create(0); + dispatch_queue_t dq = dispatch_queue_create("client_signalqueue", NULL); + dispatch_source_t sig_src; + + mach_msg_return_t mr; + mach_port_t service_port; + mach_port_t conn_port; + mach_port_t special_reply_port; + mach_port_options_t opts = { + .flags = MPO_INSERT_SEND_RIGHT, + }; + + signal(SIGUSR1, SIG_IGN); + sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dq); + + dispatch_source_set_event_handler(sig_src, ^{ + dispatch_semaphore_signal(can_continue); + }); + dispatch_activate(sig_src); + + /* lookup the mach service port for the parent */ + kern_return_t kr = bootstrap_look_up(bootstrap_port, + SRP_SERVICE_NAME, &service_port); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up"); + + /* create the send-once right (special reply port) and message to send to the server */ + kr = mach_port_construct(mach_task_self(), &opts, 0ull, &conn_port); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_port_construct"); + + special_reply_port = thread_get_special_reply_port(); + T_QUIET; T_ASSERT_TRUE(MACH_PORT_VALID(special_reply_port), "get_thread_special_reply_port"); + + /* send the message with the special reply port */ + srp_send(service_port, special_reply_port, conn_port); + + /* signal the parent to continue */ + kill(ppid, SIGUSR1); + + struct { + mach_msg_header_t header; + mach_msg_body_t body; + mach_msg_port_descriptor_t port_descriptor; + } rcv_msg = { + .header = + { + .msgh_remote_port = MACH_PORT_NULL, + .msgh_local_port = special_reply_port, + .msgh_size = sizeof(rcv_msg), + }, + }; + + /* wait on the reply from the parent (that we will never receive) */ + mr = mach_msg(&(rcv_msg.header), + (MACH_RCV_MSG | MACH_RCV_SYNC_WAIT), + 0, + rcv_msg.header.msgh_size, + special_reply_port, + MACH_MSG_TIMEOUT_NONE, + service_port); + + /* not expected to execute as parent will SIGKILL client... */ + T_LOG("client process exiting after sending message to parent (server)"); +} + +/* + * Tests the stackshot wait info plumbing for synchronous IPC that doesn't use kevent on the server. + * + * (part 1): tests the scenario where a client sends a request that includes a special reply port + * to a server that doesn't receive the message and doesn't copy the send-once right + * into its address space as a result. for this case the special reply port is enqueued + * in a port and we check which task has that receive right and use that info. (rdar://60440338) + * (part 2): tests the scenario where a client sends a request that includes a special reply port + * to a server that receives the message and copies in the send-once right, but doesn't + * reply to the client. for this case the special reply port is copied out and the kernel + * stashes the info about which task copied out the send once right. (rdar://60440592) + */ +T_DECL(special_reply_port, "test that tasks using special reply ports have correct waitinfo") +{ + dispatch_semaphore_t can_continue = dispatch_semaphore_create(0); + dispatch_queue_t dq = dispatch_queue_create("signalqueue", NULL); + dispatch_source_t sig_src; + char path[PATH_MAX]; + uint32_t path_size = sizeof(path); + T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath"); + char *client_args[] = { path, "-n", "srp_client", NULL }; + pid_t client_pid; + int sp_ret; + kern_return_t kr; + struct scenario scenario = { + .name = "srp", + .quiet = false, + .flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT), + }; + mach_port_t port; + + /* setup the signal handler in the parent (server) */ + T_LOG("setup sig handlers"); + signal(SIGUSR1, SIG_IGN); + sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dq); + + dispatch_source_set_event_handler(sig_src, ^{ + dispatch_semaphore_signal(can_continue); }); + dispatch_activate(sig_src); + + /* register with the mach service name so the client can lookup and send a message to the parent (server) */ + T_LOG("Server about to check in"); + kr = bootstrap_check_in(bootstrap_port, SRP_SERVICE_NAME, &port); + T_ASSERT_MACH_SUCCESS(kr, "server bootstrap_check_in"); + + T_LOG("Launching client"); + sp_ret = posix_spawn(&client_pid, client_args[0], NULL, NULL, client_args, NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", client_args[0], client_pid); + T_LOG("Spawned client as PID %d", client_pid); + + dispatch_semaphore_wait(can_continue, DISPATCH_TIME_FOREVER); + T_LOG("Ready to take stackshot, but waiting 1s for the coast to clear"); + + sleep(1); + + /* + * take the stackshot without calling receive to verify that the stackshot wait + * info shows our (the server) PID for the scenario where the server has yet to + * receive the message. + */ + T_LOG("Taking stackshot for part 1 coverage"); + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { + parse_stackshot(PARSE_STACKSHOT_WAITINFO_SRP, ssbuf, sslen, + @{srp_expected_pid_key: @(getpid())}); + }); + + /* + * receive the message from the client (which should copy the send once right into + * our address space). + */ + struct { + mach_msg_header_t header; + mach_msg_body_t body; + mach_msg_port_descriptor_t port_descriptor; + } rcv_msg = { + .header = + { + .msgh_remote_port = MACH_PORT_NULL, + .msgh_local_port = port, + .msgh_size = sizeof(rcv_msg), + }, + }; + + T_LOG("server: starting sync receive\n"); + + mach_msg_return_t mr; + mr = mach_msg(&(rcv_msg.header), + (MACH_RCV_MSG | MACH_RCV_TIMEOUT), + 0, + 4096, + port, + 10000, + MACH_PORT_NULL); + T_QUIET; T_ASSERT_MACH_SUCCESS(mr, "mach_msg() recieve of message from client"); + + /* + * take the stackshot to verify that the stackshot wait info shows our (the server) PID + * for the scenario where the server has received the message and copied in the send-once right. + */ + T_LOG("Taking stackshot for part 2 coverage"); + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { + parse_stackshot(PARSE_STACKSHOT_WAITINFO_SRP, ssbuf, sslen, + @{srp_expected_pid_key: @(getpid())}); + }); + + /* cleanup - kill the client */ + T_LOG("killing client"); + kill(client_pid, SIGKILL); + + T_LOG("waiting for the client to exit"); + waitpid(client_pid, NULL, 0); } #pragma mark performance tests @@ -948,7 +1413,7 @@ stackshot_perf(unsigned int options) while (!dt_stat_stable(duration) || !dt_stat_stable(size)) { __block uint64_t last_time = 0; __block uint32_t size_hint = 0; - take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { dt_stat_add(size, (double)sslen); last_time = stackshot_timestamp(ssbuf, sslen); size_hint = (uint32_t)sslen; @@ -966,6 +1431,97 @@ stackshot_perf(unsigned int options) dt_stat_finalize(size); } +static void +stackshot_flag_perf_noclobber(uint64_t flag, char *flagname) +{ + struct scenario scenario = { + .quiet = true, + .flags = (flag | STACKSHOT_KCDATA_FORMAT), + }; + + dt_stat_t duration = dt_stat_create("nanoseconds per thread", "%s_duration", flagname); + dt_stat_t size = dt_stat_create("bytes per thread", "%s_size", flagname); + T_LOG("Testing \"%s\" = 0x%x", flagname, flag); + + while (!dt_stat_stable(duration) || !dt_stat_stable(size)) { + take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) { + kcdata_iter_t iter = kcdata_iter(ssbuf, sslen); + unsigned long no_threads = 0; + mach_timebase_info_data_t timebase = {0, 0}; + uint64_t stackshot_duration = 0; + int found = 0; + T_QUIET; T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, "stackshot buffer"); + + KCDATA_ITER_FOREACH(iter) { + switch(kcdata_iter_type(iter)) { + case STACKSHOT_KCTYPE_THREAD_SNAPSHOT: { + found |= 1; + no_threads ++; + break; + } + case STACKSHOT_KCTYPE_STACKSHOT_DURATION: { + struct stackshot_duration *ssd = kcdata_iter_payload(iter); + stackshot_duration = ssd->stackshot_duration; + found |= 2; + break; + } + case KCDATA_TYPE_TIMEBASE: { + found |= 4; + mach_timebase_info_data_t *tb = kcdata_iter_payload(iter); + memcpy(&timebase, tb, sizeof(timebase)); + break; + } + } + } + + T_QUIET; T_ASSERT_EQ(found, 0x7, "found everything needed"); + + uint64_t ns = (stackshot_duration * timebase.numer) / timebase.denom; + uint64_t per_thread_ns = ns / no_threads; + uint64_t per_thread_size = sslen / no_threads; + + dt_stat_add(duration, per_thread_ns); + dt_stat_add(size, per_thread_size); + }); + } + + dt_stat_finalize(duration); + dt_stat_finalize(size); +} + +static void +stackshot_flag_perf(uint64_t flag, char *flagname) +{ + /* + * STACKSHOT_NO_IO_STATS disables data collection, so set it for + * more accurate perfdata collection. + */ + flag |= STACKSHOT_NO_IO_STATS; + + stackshot_flag_perf_noclobber(flag, flagname); +} + + +T_DECL(flag_perf, "test stackshot performance with different flags set", T_META_TAG_PERF) +{ + stackshot_flag_perf_noclobber(STACKSHOT_NO_IO_STATS, "baseline"); + stackshot_flag_perf_noclobber(0, "io_stats"); + + stackshot_flag_perf(STACKSHOT_THREAD_WAITINFO, "thread_waitinfo"); + stackshot_flag_perf(STACKSHOT_GET_DQ, "get_dq"); + stackshot_flag_perf(STACKSHOT_SAVE_LOADINFO, "save_loadinfo"); + stackshot_flag_perf(STACKSHOT_GET_GLOBAL_MEM_STATS, "get_global_mem_stats"); + stackshot_flag_perf(STACKSHOT_SAVE_KEXT_LOADINFO, "save_kext_loadinfo"); + stackshot_flag_perf(STACKSHOT_SAVE_IMP_DONATION_PIDS, "save_imp_donation_pids"); + stackshot_flag_perf(STACKSHOT_ENABLE_BT_FAULTING, "enable_bt_faulting"); + stackshot_flag_perf(STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT, "collect_sharedcache_layout"); + stackshot_flag_perf(STACKSHOT_ENABLE_UUID_FAULTING, "enable_uuid_faulting"); + stackshot_flag_perf(STACKSHOT_THREAD_GROUP, "thread_group"); + stackshot_flag_perf(STACKSHOT_SAVE_JETSAM_COALITIONS, "save_jetsam_coalitions"); + stackshot_flag_perf(STACKSHOT_INSTRS_CYCLES, "instrs_cycles"); + stackshot_flag_perf(STACKSHOT_ASID, "asid"); +} + T_DECL(perf_no_size_hint, "test stackshot performance with no size hint", T_META_TAG_PERF) { @@ -1114,18 +1670,45 @@ verify_stackshot_sharedcache_layout(struct dyld_uuid_info_64 *uuids, uint32_t uu } static void -parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int child_pid) +check_shared_cache_uuid(uuid_t imageUUID) +{ + static uuid_t shared_cache_uuid; + static dispatch_once_t read_shared_cache_uuid; + + dispatch_once(&read_shared_cache_uuid, ^{ + T_QUIET; + T_ASSERT_TRUE(_dyld_get_shared_cache_uuid(shared_cache_uuid), "retrieve current shared cache UUID"); + }); + T_QUIET; T_ASSERT_EQ(uuid_compare(shared_cache_uuid, imageUUID), 0, + "dyld returned UUID doesn't match kernel returned UUID for system shared cache"); +} + +/* + * extra dictionary contains data relevant for the given flags: + * PARSE_STACKSHOT_ZOMBIE: zombie_child_pid_key -> @(pid) + * PARSE_STACKSHOT_POSTEXEC: postexec_child_unique_pid_key -> @(unique_pid) + */ +static void +parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, NSDictionary *extra) { bool delta = (stackshot_parsing_flags & PARSE_STACKSHOT_DELTA); bool expect_zombie_child = (stackshot_parsing_flags & PARSE_STACKSHOT_ZOMBIE); + bool expect_postexec_child = (stackshot_parsing_flags & PARSE_STACKSHOT_POSTEXEC); bool expect_cseg_waitinfo = (stackshot_parsing_flags & PARSE_STACKSHOT_WAITINFO_CSEG); + bool expect_translated_child = (stackshot_parsing_flags & PARSE_STACKSHOT_TRANSLATED); bool expect_shared_cache_layout = false; bool expect_shared_cache_uuid = !delta; bool expect_dispatch_queue_label = (stackshot_parsing_flags & PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL); bool expect_turnstile_lock = (stackshot_parsing_flags & PARSE_STACKSHOT_TURNSTILEINFO); - bool found_zombie_child = false, found_shared_cache_layout = false, found_shared_cache_uuid = false; + bool expect_srp_waitinfo = (stackshot_parsing_flags & PARSE_STACKSHOT_WAITINFO_SRP); + bool found_zombie_child = false, found_postexec_child = false, found_shared_cache_layout = false, found_shared_cache_uuid = false; + bool found_translated_child = false; bool found_dispatch_queue_label = false, found_turnstile_lock = false; - bool found_cseg_waitinfo = false; + bool found_cseg_waitinfo = false, found_srp_waitinfo = false; + pid_t zombie_child_pid = -1, srp_expected_pid = 0; + pid_t translated_child_pid = -1; + uint64_t postexec_child_unique_pid = 0, cseg_expected_threadid = 0; + char *inflatedBufferBase = NULL; if (expect_shared_cache_uuid) { uuid_t shared_cache_uuid; @@ -1150,19 +1733,113 @@ parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int } if (expect_zombie_child) { - T_QUIET; T_ASSERT_GT(child_pid, 0, "child pid greater than zero"); + NSNumber* pid_num = extra[zombie_child_pid_key]; + T_QUIET; T_ASSERT_NOTNULL(pid_num, "zombie child pid provided"); + zombie_child_pid = [pid_num intValue]; + T_QUIET; T_ASSERT_GT(zombie_child_pid, 0, "zombie child pid greater than zero"); + } + + if (expect_postexec_child) { + NSNumber* unique_pid_num = extra[postexec_child_unique_pid_key]; + T_QUIET; T_ASSERT_NOTNULL(unique_pid_num, "postexec child unique pid provided"); + postexec_child_unique_pid = [unique_pid_num unsignedLongLongValue]; + T_QUIET; T_ASSERT_GT(postexec_child_unique_pid, 0ull, "postexec child unique pid greater than zero"); } + if (expect_cseg_waitinfo) { + NSNumber* tid_num = extra[cseg_expected_threadid_key]; + T_QUIET; T_ASSERT_NOTNULL(tid_num, "cseg's expected thread id provided"); + cseg_expected_threadid = [tid_num intValue]; + T_QUIET; T_ASSERT_GT(cseg_expected_threadid, 0, "cseg_expected_threadid greater than zero"); + } + + if (expect_srp_waitinfo) { + NSNumber* pid_num = extra[srp_expected_pid_key]; + T_QUIET; T_ASSERT_NOTNULL(pid_num, "expected SRP pid provided"); + srp_expected_pid = [pid_num intValue]; + T_QUIET; T_ASSERT_GT(srp_expected_pid , 0, "srp_expected_pid greater than zero"); + } + + if (expect_translated_child) { + NSNumber* pid_num = extra[translated_child_pid_key]; + T_QUIET; T_ASSERT_NOTNULL(pid_num, "translated child pid provided"); + translated_child_pid = [pid_num intValue]; + T_QUIET; T_ASSERT_GT(translated_child_pid, 0, "translated child pid greater than zero"); + } + kcdata_iter_t iter = kcdata_iter(ssbuf, sslen); if (delta) { T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT, "buffer provided is a delta stackshot"); + + iter = kcdata_iter_next(iter); } else { - T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, - "buffer provided is a stackshot"); + if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_COMPRESSED) { + T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, + "buffer provided is a stackshot"); + + iter = kcdata_iter_next(iter); + } else { + /* we are dealing with a compressed buffer */ + iter = kcdata_iter_next(iter); + uint64_t compression_type = 0, totalout = 0, totalin = 0; + + uint64_t *data; + char *desc; + for (int i = 0; i < 3; i ++) { + kcdata_iter_get_data_with_desc(iter, &desc, &data, NULL); + if (strcmp(desc, "kcd_c_type") == 0) { + compression_type = *data; + } else if (strcmp(desc, "kcd_c_totalout") == 0){ + totalout = *data; + } else if (strcmp(desc, "kcd_c_totalin") == 0){ + totalin = *data; + } + + iter = kcdata_iter_next(iter); + } + + T_ASSERT_EQ(compression_type, 1, "zlib compression is used"); + T_ASSERT_GT(totalout, 0, "successfully gathered how long the compressed buffer is"); + T_ASSERT_GT(totalin, 0, "successfully gathered how long the uncompressed buffer will be at least"); + + /* progress to the next kcdata item */ + T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, "compressed stackshot found"); + + void *bufferBase = kcdata_iter_payload(iter); + + /* + * zlib is used, allocate a buffer based on the metadata, plus + * extra scratch space (+12.5%) in case totalin was inconsistent + */ + size_t inflatedBufferSize = totalin + (totalin >> 3); + inflatedBufferBase = malloc(inflatedBufferSize); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(inflatedBufferBase, "allocated temporary output buffer"); + + z_stream zs; + memset(&zs, 0, sizeof(zs)); + T_QUIET; T_ASSERT_EQ(inflateInit(&zs), Z_OK, "inflateInit OK"); + zs.next_in = bufferBase; + zs.avail_in = totalout; + zs.next_out = inflatedBufferBase; + zs.avail_out = inflatedBufferSize; + T_ASSERT_EQ(inflate(&zs, Z_FINISH), Z_STREAM_END, "inflated buffer"); + inflateEnd(&zs); + + T_ASSERT_EQ(zs.total_out, totalin, "expected number of bytes inflated"); + + /* copy the data after the compressed area */ + T_QUIET; T_ASSERT_LE(sslen - totalout - (bufferBase - ssbuf), + inflatedBufferSize - zs.total_out, + "footer fits in the buffer"); + memcpy(inflatedBufferBase + zs.total_out, + bufferBase + totalout, + sslen - totalout - (bufferBase - ssbuf)); + + iter = kcdata_iter(inflatedBufferBase, inflatedBufferSize); + } } - iter = kcdata_iter_next(iter); KCDATA_ITER_FOREACH(iter) { NSError *error = nil; @@ -1201,6 +1878,11 @@ parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int T_QUIET; T_ASSERT_NOTNULL(container, "parsed container from stackshot"); T_QUIET; T_ASSERT_NULL(error, "error unset after parsing container"); + NSDictionary* task_snapshot = container[@"task_snapshots"][@"task_snapshot"]; + NSDictionary* task_delta_snapshot = container[@"task_snapshots"][@"task_delta_snapshot"]; + + T_QUIET; T_ASSERT_TRUE(!!task_snapshot != !!task_delta_snapshot, "Either task_snapshot xor task_delta_snapshot provided"); + if (expect_dispatch_queue_label && !found_dispatch_queue_label) { for (id thread_key in container[@"task_snapshots"][@"thread_snapshots"]) { NSMutableDictionary *thread = container[@"task_snapshots"][@"thread_snapshots"][thread_key]; @@ -1212,6 +1894,75 @@ parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int } } } + + if (expect_postexec_child && !found_postexec_child) { + if (task_snapshot) { + uint64_t unique_pid = [task_snapshot[@"ts_unique_pid"] unsignedLongLongValue]; + if (unique_pid == postexec_child_unique_pid) { + found_postexec_child = true; + + T_PASS("post-exec child %llu has a task snapshot", postexec_child_unique_pid); + + break; + } + } + + if (task_delta_snapshot) { + uint64_t unique_pid = [task_delta_snapshot[@"tds_unique_pid"] unsignedLongLongValue]; + if (unique_pid == postexec_child_unique_pid) { + found_postexec_child = true; + + T_FAIL("post-exec child %llu shouldn't have a delta task snapshot", postexec_child_unique_pid); + + break; + } + } + } + + if (!task_snapshot) { + break; + } + + int pid = [task_snapshot[@"ts_pid"] intValue]; + + if (pid && expect_shared_cache_uuid && !found_shared_cache_uuid) { + id ptr = container[@"task_snapshots"][@"shared_cache_dyld_load_info"]; + if (ptr) { + id uuid = ptr[@"imageUUID"]; + + uint8_t uuid_p[16]; + for (int i = 0; i < 16; i ++) + uuid_p[i] = (uint8_t) ([[uuid objectAtIndex:i] intValue]); + + check_shared_cache_uuid(uuid_p); + + /* + * check_shared_cache_uuid() will assert on failure, so if + * we get here, then we have found the shared cache UUID + * and it's correct + */ + found_shared_cache_uuid = true; + } + } + + + if (expect_zombie_child && (pid == zombie_child_pid)) { + found_zombie_child = true; + + uint64_t task_flags = [task_snapshot[@"ts_ss_flags"] unsignedLongLongValue]; + T_ASSERT_TRUE((task_flags & kTerminatedSnapshot) == kTerminatedSnapshot, "child zombie marked as terminated"); + + continue; + } + + if (expect_translated_child && (pid == translated_child_pid)) { + found_translated_child = true; + + uint64_t task_flags = [task_snapshot[@"ts_ss_flags"] unsignedLongLongValue]; + T_ASSERT_EQ((task_flags & kTaskIsTranslated), kTaskIsTranslated, "child marked as translated"); + + continue; + } if (expect_cseg_waitinfo) { NSArray *winfos = container[@"task_snapshots"][@"thread_waitinfo"]; @@ -1224,31 +1975,63 @@ parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int } } - int pid = [container[@"task_snapshots"][@"task_snapshot"][@"ts_pid"] intValue]; - if (expect_zombie_child && (pid == child_pid)) { - found_zombie_child = true; + if (expect_srp_waitinfo) { + NSArray *tinfos = container[@"task_snapshots"][@"thread_turnstileinfo"]; + NSArray *winfos = container[@"task_snapshots"][@"thread_waitinfo"]; - uint64_t task_flags = [container[@"task_snapshots"][@"task_snapshot"][@"ts_ss_flags"] unsignedLongLongValue]; - T_ASSERT_TRUE((task_flags & kTerminatedSnapshot) == kTerminatedSnapshot, "child zombie marked as terminated"); + for (id i in tinfos) { + if (!found_srp_waitinfo) { + if ([i[@"turnstile_context"] intValue] == srp_expected_pid && + ([i[@"turnstile_flags"] intValue] & STACKSHOT_TURNSTILE_STATUS_BLOCKED_ON_TASK)) { + + /* we found something that is blocking the correct pid */ + for (id j in winfos) { + if ([j[@"waiter"] intValue] == [i[@"waiter"] intValue] && + [j[@"wait_type"] intValue] == kThreadWaitPortReceive) { + found_srp_waitinfo = true; + break; + } + } + + if (found_srp_waitinfo) { + break; + } + } + } + } + } - continue; - } else if (pid != getpid()) { + if (pid != getpid()) { break; } - + T_EXPECT_EQ_STR(current_process_name(), - [container[@"task_snapshots"][@"task_snapshot"][@"ts_p_comm"] UTF8String], + [task_snapshot[@"ts_p_comm"] UTF8String], "current process name matches in stackshot"); - uint64_t task_flags = [container[@"task_snapshots"][@"task_snapshot"][@"ts_ss_flags"] unsignedLongLongValue]; - T_ASSERT_FALSE((task_flags & kTerminatedSnapshot) == kTerminatedSnapshot, "current process not marked as terminated"); + uint64_t task_flags = [task_snapshot[@"ts_ss_flags"] unsignedLongLongValue]; + T_ASSERT_NE((task_flags & kTerminatedSnapshot), kTerminatedSnapshot, "current process not marked as terminated"); + T_ASSERT_NE((task_flags & kTaskIsTranslated), kTaskIsTranslated, "current process not marked as translated"); T_QUIET; - T_EXPECT_LE(pid, [container[@"task_snapshots"][@"task_snapshot"][@"ts_unique_pid"] intValue], + T_EXPECT_LE(pid, [task_snapshot[@"ts_unique_pid"] intValue], "unique pid is greater than pid"); + NSDictionary* task_cpu_architecture = container[@"task_snapshots"][@"task_cpu_architecture"]; + T_QUIET; T_ASSERT_NOTNULL(task_cpu_architecture[@"cputype"], "have cputype"); + T_QUIET; T_ASSERT_NOTNULL(task_cpu_architecture[@"cpusubtype"], "have cputype"); + int cputype = [task_cpu_architecture[@"cputype"] intValue]; + int cpusubtype = [task_cpu_architecture[@"cpusubtype"] intValue]; + + struct proc_archinfo archinfo; + int retval = proc_pidinfo(pid, PROC_PIDARCHINFO, 0, &archinfo, sizeof(archinfo)); + T_QUIET; T_WITH_ERRNO; T_ASSERT_GT(retval, 0, "proc_pidinfo(PROC_PIDARCHINFO) returned a value > 0"); + T_QUIET; T_ASSERT_EQ(retval, (int)sizeof(struct proc_archinfo), "proc_pidinfo call for PROC_PIDARCHINFO returned expected size"); + T_QUIET; T_EXPECT_EQ(cputype, archinfo.p_cputype, "cpu type is correct"); + T_QUIET; T_EXPECT_EQ(cpusubtype, archinfo.p_cpusubtype, "cpu subtype is correct"); + bool found_main_thread = false; - uint64_t main_thread_id = -1; + uint64_t main_thread_id = -1ULL; for (id thread_key in container[@"task_snapshots"][@"thread_snapshots"]) { NSMutableDictionary *thread = container[@"task_snapshots"][@"thread_snapshots"][thread_key]; NSDictionary *thread_snap = thread[@"thread_snapshot"]; @@ -1263,7 +2046,7 @@ parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int NSString *pth_name = thread[@"pth_name"]; if (pth_name != nil && [pth_name isEqualToString:@TEST_THREAD_NAME]) { found_main_thread = true; - main_thread_id = [thread_snap[@"ths_thread_id"] intValue]; + main_thread_id = [thread_snap[@"ths_thread_id"] unsignedLongLongValue]; T_QUIET; T_EXPECT_GT([thread_snap[@"ths_total_syscalls"] intValue], 0, "total syscalls of current thread is valid"); @@ -1281,7 +2064,7 @@ parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int NSArray *tsinfos = container[@"task_snapshots"][@"thread_turnstileinfo"]; for (id i in tsinfos) { - if ([i[@"turnstile_context"] intValue] == main_thread_id) { + if ([i[@"turnstile_context"] unsignedLongLongValue] == main_thread_id) { found_turnstile_lock = true; break; } @@ -1290,11 +2073,15 @@ parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int break; } case STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO: { - struct dyld_uuid_info_64_v2 *shared_cache_info = kcdata_iter_payload(iter); - uuid_t shared_cache_uuid; - T_QUIET; T_ASSERT_TRUE(_dyld_get_shared_cache_uuid(shared_cache_uuid), "retrieve current shared cache UUID"); - T_QUIET; T_ASSERT_EQ(memcmp(shared_cache_info->imageUUID, shared_cache_uuid, sizeof(shared_cache_uuid)), 0, - "dyld returned UUID doesn't match kernel returned UUID for system shared cache"); + struct dyld_uuid_info_64_v2 *payload = kcdata_iter_payload(iter); + T_ASSERT_EQ(kcdata_iter_size(iter), sizeof(*payload), "valid dyld_uuid_info_64_v2 struct"); + + check_shared_cache_uuid(payload->imageUUID); + + /* + * check_shared_cache_uuid() asserts on failure, so we must have + * found the shared cache UUID to be correct. + */ found_shared_cache_uuid = true; break; } @@ -1305,6 +2092,14 @@ parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int T_QUIET; T_ASSERT_TRUE(found_zombie_child, "found zombie child in kcdata"); } + if (expect_postexec_child) { + T_QUIET; T_ASSERT_TRUE(found_postexec_child, "found post-exec child in kcdata"); + } + + if (expect_translated_child) { + T_QUIET; T_ASSERT_TRUE(found_translated_child, "found translated child in kcdata"); + } + if (expect_shared_cache_layout) { T_QUIET; T_ASSERT_TRUE(found_shared_cache_layout, "shared cache layout found in kcdata"); } @@ -1325,7 +2120,13 @@ parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int T_QUIET; T_ASSERT_TRUE(found_cseg_waitinfo, "found c_seg waitinfo"); } + if (expect_srp_waitinfo) { + T_QUIET; T_ASSERT_TRUE(found_srp_waitinfo, "found special reply port waitinfo"); + } + T_ASSERT_FALSE(KCDATA_ITER_FOREACH_FAILED(iter), "successfully iterated kcdata"); + + free(inflatedBufferBase); } static const char * diff --git a/tests/stackshot_translated_child.c b/tests/stackshot_translated_child.c new file mode 100644 index 000000000..377bd70b6 --- /dev/null +++ b/tests/stackshot_translated_child.c @@ -0,0 +1,16 @@ +#include +#include +#include + +int +main() +{ + // Always signal parent to unblock them + kill(getppid(), SIGUSR1); + +#if !defined(__x86_64__) + os_crash("translated child not running as x86_64"); +#endif + sleep(100); + return 0; +} diff --git a/tests/subsystem_root_path-entitlements.plist b/tests/subsystem_root_path-entitlements.plist new file mode 100644 index 000000000..098c9bb36 --- /dev/null +++ b/tests/subsystem_root_path-entitlements.plist @@ -0,0 +1,9 @@ + + + + + com.apple.private.spawn-subsystem-root + + + + diff --git a/tests/subsystem_root_path.c b/tests/subsystem_root_path.c new file mode 100644 index 000000000..f87849001 --- /dev/null +++ b/tests/subsystem_root_path.c @@ -0,0 +1,48 @@ +#include +#include "subsystem_root_path.h" + +#include +#include + +#define UNENTITLED_EXECUTABLE_PATH "./subsystem_root_path_helper" +#define ENTITLED_EXECUTABLE_PATH "./subsystem_root_path_helper_entitled" + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); + +T_DECL(subsystem_root_path, + "Test the support for setting subsystem_root_path", + T_META_CHECK_LEAKS(false)) +{ + char * args[] = { ENTITLED_EXECUTABLE_PATH, HELPER_BEHAVIOR_NOT_SET, "/main_root/", NULL}; + int pid = 0; + posix_spawnattr_t attr = NULL; + + T_ASSERT_EQ_INT(_spawn_and_wait(args, NULL), 0, "posix_spawn without attributes"); + T_ASSERT_POSIX_SUCCESS(posix_spawnattr_init(&attr), "posix_spawnattr_init"); + T_ASSERT_EQ_INT(_spawn_and_wait(args, &attr), 0, "posix_spawn with attributes"); + T_ASSERT_POSIX_SUCCESS(posix_spawnattr_set_subsystem_root_path_np(&attr, args[2]), "Set subsystem root path"); + + args[1] = HELPER_BEHAVIOR_SET; + + T_ASSERT_EQ_INT(_spawn_and_wait(args, &attr), 0, "posix_spawn with subsystem root path"); + T_ASSERT_POSIX_SUCCESS(posix_spawnattr_set_subsystem_root_path_np(&attr, NULL), "Clear subsystem root path attribute"); + + args[1] = HELPER_BEHAVIOR_NOT_SET; + + T_ASSERT_EQ_INT(_spawn_and_wait(args, &attr), 0, "Spawn without subsystem root path"); + + T_ASSERT_POSIX_SUCCESS(posix_spawnattr_set_subsystem_root_path_np(&attr, args[2]), "Set subsystem root path (again)"); + + args[1] = HELPER_BEHAVIOR_FORK_EXEC; + + T_ASSERT_EQ_INT(_spawn_and_wait(args, &attr), 0, "Subsystem root path inheritence across fork/exec"); + + args[1] = HELPER_BEHAVIOR_SPAWN; + + T_ASSERT_EQ_INT(_spawn_and_wait(args, &attr), 0, "Subsystem root path override through posix_spawn"); + + args[0] = UNENTITLED_EXECUTABLE_PATH; + + T_ASSERT_NE_INT(_spawn_and_wait(args, &attr), 0, "Entitlement check"); + T_ASSERT_POSIX_SUCCESS(posix_spawnattr_destroy(&attr), "posix_spawnattr_destroy"); +} diff --git a/tests/subsystem_root_path.h b/tests/subsystem_root_path.h new file mode 100644 index 000000000..1d58db7b7 --- /dev/null +++ b/tests/subsystem_root_path.h @@ -0,0 +1,30 @@ +#include +#include +#include + +#define SUBSYSTEM_ROOT_PATH_KEY "subsystem_root_path" + +#define HELPER_BEHAVIOR_NOT_SET "not_set" +#define HELPER_BEHAVIOR_SET "set" +#define HELPER_BEHAVIOR_FORK_EXEC "fork_exec" +#define HELPER_BEHAVIOR_SPAWN "spawn" + +static int +_spawn_and_wait(char ** args, posix_spawnattr_t *attr) +{ + int pid; + int status; + + if (posix_spawn(&pid, args[0], NULL, attr, args, NULL)) { + return -1; + } + if (waitpid(pid, &status, 0) < 0) { + return -1; + } + + if (WIFEXITED(status) && (WEXITSTATUS(status) == 0)) { + return 0; + } + + return -1; +} diff --git a/tests/subsystem_root_path_helper.c b/tests/subsystem_root_path_helper.c new file mode 100644 index 000000000..4c6452390 --- /dev/null +++ b/tests/subsystem_root_path_helper.c @@ -0,0 +1,60 @@ +#include +#include +#include <_simple.h> +#include "subsystem_root_path.h" + +int +main(int argc, char **argv, char **env, const char **apple) +{ + int retval = 0; + + if (argc != 3) { + return 1; + } + + char * behavior = argv[1]; + char * expected_subsystem_root_path = argv[2]; + + if (!strcmp(behavior, HELPER_BEHAVIOR_SET)) { + const char * subsystem_root_path = _simple_getenv(apple, SUBSYSTEM_ROOT_PATH_KEY); + if (strcmp(subsystem_root_path, expected_subsystem_root_path)) { + retval = 1; + } + } else if (!strcmp(behavior, HELPER_BEHAVIOR_NOT_SET)) { + const char * subsystem_root_path = _simple_getenv(apple, SUBSYSTEM_ROOT_PATH_KEY); + if (subsystem_root_path != NULL) { + retval = 1; + } + } else if (!strcmp(behavior, HELPER_BEHAVIOR_FORK_EXEC)) { + int pid = fork(); + + if (pid > 0) { + /* Parent */ + int status; + if (waitpid(pid, &status, 0) < 0) { + retval = 1; + } + + if (!(WIFEXITED(status) && (WEXITSTATUS(status) == 0))) { + retval = 1; + } + } else if (pid == 0) { + /* Child */ + char *new_argv[] = {argv[0], HELPER_BEHAVIOR_SET, argv[2], NULL}; + execv(new_argv[0], new_argv); + retval = 1; + } else if (pid < 0) { + /* Failed */ + retval = 1; + } + } else if (!strcmp(behavior, HELPER_BEHAVIOR_SPAWN)) { + char * new_argv[] = {argv[0], HELPER_BEHAVIOR_SET, "/helper_root/", NULL}; + posix_spawnattr_t attr; + posix_spawnattr_init(&attr); + posix_spawnattr_set_subsystem_root_path_np(&attr, new_argv[2]); + retval = _spawn_and_wait(new_argv, &attr); + posix_spawnattr_destroy(&attr); + } + + return retval; +} diff --git a/tests/sysctl_hw.c b/tests/sysctl_hw.c new file mode 100644 index 000000000..83a372fb3 --- /dev/null +++ b/tests/sysctl_hw.c @@ -0,0 +1,20 @@ +#include +#include + +T_DECL(sysctl_hw_target_product, "ensure the hw.target and hw.product sysctls exist") +{ + char buffer[64] = ""; + size_t buffer_size = sizeof(buffer); + + int ret = sysctlbyname("hw.target", buffer, + &buffer_size, NULL, 0); + T_ASSERT_POSIX_SUCCESS(ret, "hw.target sysctl"); + T_LOG("hw.target = %s", buffer); + + buffer_size = sizeof(buffer); + + ret = sysctlbyname("hw.product", buffer, + &buffer_size, NULL, 0); + T_ASSERT_POSIX_SUCCESS(ret, "hw.product sysctl"); + T_LOG("hw.product = %s", buffer); +} diff --git a/tests/sysctl_system_version.c b/tests/sysctl_system_version.c new file mode 100644 index 000000000..82a886838 --- /dev/null +++ b/tests/sysctl_system_version.c @@ -0,0 +1,21 @@ +#include +#include + +T_DECL(sysctl_osreleasetype_nowrite, + "ensure the osreleasetype sysctl is not writeable by normal processes") +{ + char nice_try[32] = "FactoryToAvoidSandbox!"; + int ret = sysctlbyname("kern.osreleasetype", NULL, NULL, nice_try, + sizeof(nice_try)); + T_ASSERT_POSIX_FAILURE(ret, EPERM, "try to set kern.osreleasetype sysctl"); +} + +T_DECL(sysctl_osreleasetype_exists, "ensure the osreleasetype sysctl exists") +{ + char release_type[64] = ""; + size_t release_type_size = sizeof(release_type); + int ret = sysctlbyname("kern.osreleasetype", release_type, + &release_type_size, NULL, 0); + T_ASSERT_POSIX_SUCCESS(ret, "kern.osreleasetype sysctl"); + T_LOG("kern.osreleasetype = %s", release_type); +} diff --git a/tests/system_version_compat.c b/tests/system_version_compat.c new file mode 100644 index 000000000..4b2f416e1 --- /dev/null +++ b/tests/system_version_compat.c @@ -0,0 +1,294 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define SYSTEM_VERSION_COMPAT_PLIST_PATH "/System/Library/CoreServices/SystemVersionCompat.plist" +#define IOS_SYSTEM_VERSION_PLIST_PATH "/System/Library/CoreServices/iOSSystemVersion.plist" + +#define SYSTEM_VERSION_PLIST_FILENAME "SystemVersion.plist" +#define SYSTEM_VERSION_PLIST_PATH ("/System/Library/CoreServices/" SYSTEM_VERSION_PLIST_FILENAME) + +#define PRODUCT_VERSION_KEY "ProductVersion" +#define IOS_SUPPORT_VERSION_KEY "iOSSupportVersion" + +#define PRODUCT_VERSION_SYSCTL "kern.osproductversion" +#define PRODUCT_VERSION_COMPAT_SYSCTL "kern.osproductversioncompat" + +T_GLOBAL_META(T_META_CHECK_LEAKS(false)); + +#if TARGET_OS_OSX +static void +check_system_version_compat_plist_exists(void) +{ + struct stat buf; + + int ret = stat(SYSTEM_VERSION_COMPAT_PLIST_PATH, &buf); + int error = errno; + if (ret != 0) { + if (error == ENOENT) { + T_SKIP("no SystemVersionCompat.plist on this system in %s, skipping test...", + SYSTEM_VERSION_COMPAT_PLIST_PATH); + } else { + T_ASSERT_FAIL("failed to find SystemVersionCompat.plist at " IOS_SYSTEM_VERSION_PLIST_PATH "with error: %s", + strerror(error)); + } + } +} + +static void +check_ios_version_plist_exists(void) +{ + struct stat buf; + + int ret = stat(IOS_SYSTEM_VERSION_PLIST_PATH, &buf); + int error = errno; + if (ret != 0) { + if (errno == ENOENT) { + T_SKIP("no iOSSystemVersion.plist on this system in %s, skipping test...", + IOS_SYSTEM_VERSION_PLIST_PATH); + } else { + T_ASSERT_FAIL("failed to find iOSSystemVersion.plist at " IOS_SYSTEM_VERSION_PLIST_PATH "with error: %s", + strerror(error)); + } + } +} + +static void +read_plist_version_info(char **version_plist_vers, char **compat_version_plist_vers, bool expect_shim) +{ + char opened_path[MAXPATHLEN] = { '\0' }; + + int version_plist_fd = open(SYSTEM_VERSION_PLIST_PATH, O_RDONLY); + T_QUIET; T_WITH_ERRNO; T_ASSERT_GT(version_plist_fd, 0, "opened %s", SYSTEM_VERSION_PLIST_PATH); + + // Resolve the full path of the file we've opened, verify it was either shimmed or not (as expected) + int ret = fcntl(version_plist_fd, F_GETPATH, opened_path); + T_QUIET; T_WITH_ERRNO; T_EXPECT_NE(ret, -1, "F_GETPATH on opened SystemVersion.plist"); + if (ret != -1) { + size_t opened_path_strlen = strlen(opened_path); + if (expect_shim) { + T_QUIET; T_EXPECT_GE(opened_path_strlen, strlen(SYSTEM_VERSION_COMPAT_PLIST_PATH), "opened path string length"); + T_EXPECT_EQ_STR(SYSTEM_VERSION_COMPAT_PLIST_PATH, (const char *)&opened_path[(opened_path_strlen - strlen(SYSTEM_VERSION_COMPAT_PLIST_PATH))], + "opened file path shimmed (Mac OS)"); + } else { + T_QUIET; T_EXPECT_GE(opened_path_strlen, strlen(SYSTEM_VERSION_PLIST_PATH), "opened path string length"); + T_EXPECT_EQ_STR(SYSTEM_VERSION_PLIST_PATH, (const char *)&opened_path[(opened_path_strlen - strlen(SYSTEM_VERSION_PLIST_PATH))], + "opened file path not shimmed"); + } + } + + // Read and parse the plists + dispatch_semaphore_t sema = dispatch_semaphore_create(0); + dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + xpc_create_from_plist_descriptor(version_plist_fd, queue, ^(xpc_object_t object) { + if (object == NULL) { + T_ASSERT_FAIL("Failed to parse dictionary from %s", SYSTEM_VERSION_PLIST_PATH); + } + if (xpc_get_type(object) != XPC_TYPE_DICTIONARY) { + T_ASSERT_FAIL("%s does not contain dictionary plist", SYSTEM_VERSION_PLIST_PATH); + } + + const char *plist_version = xpc_dictionary_get_string(object, PRODUCT_VERSION_KEY); + if (plist_version) { + T_LOG("Found %s for %s from %s", plist_version, PRODUCT_VERSION_KEY, SYSTEM_VERSION_PLIST_PATH); + *version_plist_vers = strdup(plist_version); + } + dispatch_semaphore_signal(sema); + }); + dispatch_semaphore_wait(sema, DISPATCH_TIME_FOREVER); + + close(version_plist_fd); + version_plist_fd = -1; + + int compat_version_plist_fd = open(SYSTEM_VERSION_COMPAT_PLIST_PATH, O_RDONLY); + T_QUIET; T_WITH_ERRNO; T_ASSERT_GT(compat_version_plist_fd, 0, "opened %s", SYSTEM_VERSION_COMPAT_PLIST_PATH); + + xpc_create_from_plist_descriptor(compat_version_plist_fd, queue, ^(xpc_object_t object) { + if (object == NULL) { + T_ASSERT_FAIL("Failed to parse dictionary from %s", SYSTEM_VERSION_COMPAT_PLIST_PATH); + } + if (xpc_get_type(object) != XPC_TYPE_DICTIONARY) { + T_ASSERT_FAIL("%s does not contain dictionary plist", SYSTEM_VERSION_COMPAT_PLIST_PATH); + } + + const char *plist_version = xpc_dictionary_get_string(object, PRODUCT_VERSION_KEY); + if (plist_version) { + T_LOG("Found %s for %s from %s", plist_version, PRODUCT_VERSION_KEY, SYSTEM_VERSION_COMPAT_PLIST_PATH); + *compat_version_plist_vers = strdup(plist_version); + } + dispatch_semaphore_signal(sema); + }); + dispatch_semaphore_wait(sema, DISPATCH_TIME_FOREVER); + + close(compat_version_plist_fd); + compat_version_plist_fd = -1; + + return; +} + +static void +read_sysctl_version_info(char **vers, char **compat_vers) +{ + char version[16] = { '\0' }, compat_version[16] = { '\0' }; + size_t version_len = sizeof(version), compat_version_len = sizeof(compat_version); + + T_QUIET; T_ASSERT_POSIX_ZERO(sysctlbyname(PRODUCT_VERSION_SYSCTL, version, &version_len, NULL, 0), "read %s", PRODUCT_VERSION_SYSCTL); + T_LOG("Foundd %s from %s", version, PRODUCT_VERSION_SYSCTL); + + T_QUIET; T_ASSERT_POSIX_ZERO(sysctlbyname(PRODUCT_VERSION_COMPAT_SYSCTL, compat_version, &compat_version_len, NULL, 0), + "read %s", PRODUCT_VERSION_COMPAT_SYSCTL); + T_LOG("Found %s from %s", compat_version, PRODUCT_VERSION_COMPAT_SYSCTL); + + *vers = strdup(version); + *compat_vers = strdup(compat_version); + + return; +} +#endif // TARGET_OS_OSX + +T_DECL(test_system_version_compat_disabled, + "Tests reading system product information without system version compat enabled") +{ +#if TARGET_OS_OSX + check_system_version_compat_plist_exists(); + char *plist_vers = NULL, *plist_compat_vers = NULL; + char *sysctl_vers = NULL, *sysctl_compat_vers = NULL; + + // Read plist version data + read_plist_version_info(&plist_vers, &plist_compat_vers, false); + + // Read sysctl version data + read_sysctl_version_info(&sysctl_vers, &sysctl_compat_vers); + + // Verify the normal data matches + T_EXPECT_EQ_STR(plist_vers, sysctl_vers, "%s %s matches %s value", SYSTEM_VERSION_PLIST_PATH, + PRODUCT_VERSION_KEY, PRODUCT_VERSION_SYSCTL); + + // Verify that the compatibility data matches + T_EXPECT_EQ_STR(plist_compat_vers, sysctl_compat_vers, "%s %s matches %s value", SYSTEM_VERSION_COMPAT_PLIST_PATH, + PRODUCT_VERSION_KEY, PRODUCT_VERSION_COMPAT_SYSCTL); + + + free(plist_vers); + free(plist_compat_vers); + free(sysctl_vers); + free(sysctl_compat_vers); + + T_PASS("verified version information without system version compat"); +#else // TARGET_OS_OSX + T_SKIP("system version compat only supported on macOS"); +#endif // TARGET_OS_OSX +} + +T_DECL(test_system_version_compat_enabled, + "Tests reading system product information with system version compat enabled", + T_META_ENVVAR("SYSTEM_VERSION_COMPAT=1")) +{ +#if TARGET_OS_OSX + check_system_version_compat_plist_exists(); + char *plist_vers = NULL, *plist_compat_vers = NULL; + char *sysctl_vers = NULL, *sysctl_compat_vers = NULL; + + // Read plist version data + read_plist_version_info(&plist_vers, &plist_compat_vers, true); + + // Read sysctl version data + read_sysctl_version_info(&sysctl_vers, &sysctl_compat_vers); + + // The version information should match from all sources with the shim enabled + + // Verify the normal data matches + T_EXPECT_EQ_STR(plist_vers, sysctl_vers, "%s %s matches %s value", SYSTEM_VERSION_PLIST_PATH, + PRODUCT_VERSION_KEY, PRODUCT_VERSION_SYSCTL); + + // Verify that the compatibility data matches + T_EXPECT_EQ_STR(plist_compat_vers, sysctl_compat_vers, "%s %s matches %s value", SYSTEM_VERSION_COMPAT_PLIST_PATH, + PRODUCT_VERSION_KEY, PRODUCT_VERSION_COMPAT_SYSCTL); + + // Verify the normal data matches the compatibility data + T_EXPECT_EQ_STR(plist_vers, plist_compat_vers, "%s matches in both %s and %s", PRODUCT_VERSION_KEY, + SYSTEM_VERSION_PLIST_PATH, SYSTEM_VERSION_COMPAT_PLIST_PATH); + + free(plist_vers); + free(plist_compat_vers); + free(sysctl_vers); + free(sysctl_compat_vers); + + T_PASS("verified version information with Mac OS X shim enabled"); +#else // TARGET_OS_OSX + T_SKIP("system version compat only supported on macOS"); +#endif // TARGET_OS_OSX +} + +T_DECL(test_system_version_compat_enabled_ios, + "Tests reading system product information with the iOS system version compat shim enabled", + T_META_ENVVAR("SYSTEM_VERSION_COMPAT=2")) +{ +#if TARGET_OS_OSX + char opened_path[MAXPATHLEN] = { '\0' }; + + check_ios_version_plist_exists(); + + // Read out the ProductVersion from SystemVersion.plist and ensure that it contains the same value as the + // iOSSupportVersion key + + __block char *read_plist_vers = NULL, *read_ios_support_version = NULL; + + int version_plist_fd = open(SYSTEM_VERSION_PLIST_PATH, O_RDONLY); + T_QUIET; T_WITH_ERRNO; T_ASSERT_GT(version_plist_fd, 0, "opened %s", SYSTEM_VERSION_PLIST_PATH); + + // Resolve the full path of the file we've opened, verify it was shimmed as expected + int ret = fcntl(version_plist_fd, F_GETPATH, opened_path); + T_QUIET; T_WITH_ERRNO; T_EXPECT_NE(ret, -1, "F_GETPATH on opened SystemVersion.plist"); + if (ret != -1) { + size_t opened_path_strlen = strlen(opened_path); + T_QUIET; T_EXPECT_GE(opened_path_strlen, strlen(IOS_SYSTEM_VERSION_PLIST_PATH), "opened path string length"); + T_EXPECT_EQ_STR(IOS_SYSTEM_VERSION_PLIST_PATH, (const char *)&opened_path[(opened_path_strlen - strlen(IOS_SYSTEM_VERSION_PLIST_PATH))], + "opened file path shimmed (iOS)"); + } + + // Read and parse the attributes from the SystemVersion plist + dispatch_semaphore_t sema = dispatch_semaphore_create(0); + dispatch_queue_t queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + xpc_create_from_plist_descriptor(version_plist_fd, queue, ^(xpc_object_t object) { + if (object == NULL) { + T_ASSERT_FAIL("Failed to parse dictionary from %s", SYSTEM_VERSION_PLIST_PATH); + } + if (xpc_get_type(object) != XPC_TYPE_DICTIONARY) { + T_ASSERT_FAIL("%s does not contain dictionary plist", SYSTEM_VERSION_PLIST_PATH); + } + + const char *plist_version = xpc_dictionary_get_string(object, PRODUCT_VERSION_KEY); + if (plist_version) { + T_LOG("Found %s for %s from %s", plist_version, PRODUCT_VERSION_KEY, SYSTEM_VERSION_PLIST_PATH); + read_plist_vers = strdup(plist_version); + } + + const char *ios_support_version = xpc_dictionary_get_string(object, IOS_SUPPORT_VERSION_KEY); + if (ios_support_version) { + T_LOG("Found %s for %s from %s", ios_support_version, IOS_SUPPORT_VERSION_KEY, SYSTEM_VERSION_PLIST_PATH); + read_ios_support_version = strdup(ios_support_version); + } + + dispatch_semaphore_signal(sema); + }); + dispatch_semaphore_wait(sema, DISPATCH_TIME_FOREVER); + + close(version_plist_fd); + version_plist_fd = -1; + + // Verify the data matches + T_EXPECT_EQ_STR(read_plist_vers, read_ios_support_version, "%s %s matches %s value", SYSTEM_VERSION_PLIST_PATH, + PRODUCT_VERSION_KEY, IOS_SUPPORT_VERSION_KEY); + + T_PASS("verified version information with iOS shim enabled"); + +#else // TARGET_OS_OSX + T_SKIP("iOS system version shim only supported on macOS"); +#endif // TARGET_OS_OSX +} diff --git a/tests/task_filter_msg.c b/tests/task_filter_msg.c new file mode 100644 index 000000000..5dc22fd03 --- /dev/null +++ b/tests/task_filter_msg.c @@ -0,0 +1,76 @@ +#include +#include +#include +#include +#include +#include +#include + +#ifdef T_NAMESPACE +#undef T_NAMESPACE +#endif + +#include +#include + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true), T_META_NAMESPACE("xnu.ipc")); + +T_DECL(test_task_filter_msg_flag, "Set the filter msg flag on the task and check if the forked child inherits it", + T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) +{ + int ret, dev; + size_t sysctl_size; + + T_SETUPBEGIN; + + dev = 0; + sysctl_size = sizeof(dev); + ret = sysctlbyname("kern.development", &dev, &sysctl_size, NULL, 0); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.development failed"); + if (dev == 0) { + T_SKIP("Skipping test on release kernel"); + } + + T_SETUPEND; + + int cur_filter_flag = 0; + int new_filter_flag = 1; + ret = sysctlbyname("kern.task_set_filter_msg_flag", &cur_filter_flag, &sysctl_size, &new_filter_flag, sizeof(new_filter_flag)); + T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname"); + T_ASSERT_EQ(cur_filter_flag, 0, "Task should not have filtering on"); + + cur_filter_flag = 0; + ret = sysctlbyname("kern.task_set_filter_msg_flag", &cur_filter_flag, &sysctl_size, NULL, 0); + T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname"); + T_ASSERT_EQ(cur_filter_flag, 1, "Task should have filtering on"); + + pid_t pid = fork(); + if (pid == 0) { + cur_filter_flag = 0; + ret = sysctlbyname("kern.task_set_filter_msg_flag", &cur_filter_flag, &sysctl_size, NULL, 0); + if (ret == 0) { + if (cur_filter_flag == 1) { + exit(0); + } + } + exit(1); + } + + int status; + ret = waitpid(pid, &status, 0); + T_ASSERT_POSIX_SUCCESS(ret, "waitpid"); + + if (WIFEXITED(status)) { + const int exit_code = WEXITSTATUS(status); + T_ASSERT_EQ(exit_code, 0, "Child inherited the filter msg flag"); + } + + /* Turn off task msg filtering */ + cur_filter_flag = 1; + new_filter_flag = 0; + ret = sysctlbyname("kern.task_set_filter_msg_flag", &cur_filter_flag, &sysctl_size, &new_filter_flag, sizeof(new_filter_flag)); + T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname"); + T_ASSERT_EQ(cur_filter_flag, 1, "Task should have filtering on"); + + T_END; +} diff --git a/tests/task_policy.c b/tests/task_policy.c new file mode 100644 index 000000000..2388a9667 --- /dev/null +++ b/tests/task_policy.c @@ -0,0 +1,600 @@ +#include +#include +#include +#include + +#include +#include +#include +#include + +extern char **environ; + +int task_inspect_for_pid(mach_port_name_t target_tport, int pid, mach_port_name_t *t); +int task_for_pid(mach_port_name_t target_tport, int pid, mach_port_name_t *t); +int task_name_for_pid(mach_port_name_t target_tport, int pid, mach_port_name_t *t); + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); + +#if defined(UNENTITLED) + +T_DECL(task_policy_set_task_name, "task_policy_set with task name (not entitled)") +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + task_name_t task_name = TASK_NAME_NULL; + + T_SETUPBEGIN; + T_ASSERT_MACH_SUCCESS(task_name_for_pid(mach_task_self(), getpid(), + &task_name), NULL); + T_SETUPEND; + + T_ASSERT_MACH_ERROR(task_policy_set(task_name, + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + KERN_INVALID_ARGUMENT, NULL); +} + +T_DECL(task_policy_set_task, "task_policy_set with task (not entitled)") +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + + T_ASSERT_MACH_SUCCESS(task_policy_set(mach_task_self(), + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + NULL); +} + +T_DECL(task_policy_set_inspect, "task_policy_set with task inspect (not entitled)") +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + task_inspect_t task_inspect = TASK_INSPECT_NULL; + + T_SETUPBEGIN; + T_ASSERT_MACH_SUCCESS(task_inspect_for_pid(mach_task_self(), getpid(), + &task_inspect), NULL); + T_SETUPEND; + + + T_ASSERT_MACH_ERROR(task_policy_set(task_inspect, + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + KERN_INVALID_ARGUMENT, NULL); +} + +T_DECL(task_policy_set_foreign_task, "task_policy_set for foreign task (not entitled)", T_META_ASROOT(true)) +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + task_t task = TASK_NULL; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_for_pid(mach_task_self(), pid, + &task), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_set(task, + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +T_DECL(task_policy_set_foreign_task_name, "task_policy_set for foreign task name (not entitled)", T_META_ASROOT(true)) +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + task_name_t task_name = TASK_NAME_NULL; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_name_for_pid(mach_task_self(), pid, + &task_name), NULL); + T_SETUPEND; + + T_ASSERT_MACH_ERROR(task_policy_set(task_name, + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + KERN_INVALID_ARGUMENT, NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +T_DECL(task_policy_set_foreign_task_inspect, "task_policy_set for foreign task inspect (not entitled)", T_META_ASROOT(true)) +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + task_inspect_t task_inspect = TASK_INSPECT_NULL; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_inspect_for_pid(mach_task_self(), pid, + &task_inspect), NULL); + T_SETUPEND; + + T_ASSERT_MACH_ERROR(task_policy_set(task_inspect, + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + KERN_INVALID_ARGUMENT, NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +T_DECL(task_policy_get_name, "task_policy_get with task name (not entitled)") +{ + task_name_t task_name = TASK_NAME_NULL; + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + + T_SETUPBEGIN; + T_ASSERT_MACH_SUCCESS(task_name_for_pid(mach_task_self(), getpid(), + &task_name), NULL); + T_SETUPEND; + + T_ASSERT_MACH_ERROR(task_policy_get(task_name, + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + KERN_INVALID_ARGUMENT, NULL); +} + +T_DECL(task_policy_get_task, "task_policy_get with task (not entitled)") +{ + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + + T_ASSERT_MACH_SUCCESS(task_policy_get(mach_task_self(), + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + NULL); +} + +T_DECL(task_policy_get_inspect, "task_policy_get with task inspect (not entitled)") +{ + task_inspect_t task_inspect = TASK_INSPECT_NULL; + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + + T_SETUPBEGIN; + T_ASSERT_MACH_SUCCESS(task_inspect_for_pid(mach_task_self(), getpid(), + &task_inspect), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_get(task_inspect, + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + NULL); +} + +T_DECL(task_policy_get_foreign_task_inspect, "task_policy_get for foreign task inspect (not entitled)", T_META_ASROOT(true)) +{ + task_inspect_t task_inspect = TASK_INSPECT_NULL; + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_inspect_for_pid(mach_task_self(), pid, + &task_inspect), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_get(task_inspect, + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +T_DECL(task_policy_get_foreign_task, "task_policy_get for foreign task (not entitled)", T_META_ASROOT(true)) +{ + task_t task = TASK_NULL; + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_for_pid(mach_task_self(), pid, + &task), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_get(task, + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +T_DECL(task_policy_get_foreign_task_name, "task_policy_get for foreign task name (not entitled)") +{ + task_name_t task_name = TASK_NAME_NULL; + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_name_for_pid(mach_task_self(), pid, + &task_name), NULL); + T_SETUPEND; + + T_ASSERT_MACH_ERROR(task_policy_get(task_name, + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + KERN_INVALID_ARGUMENT, NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +#else /* ENTITLED */ + +T_DECL(task_policy_set_task_name_entitled, "task_policy_set with task name (entitled)", T_META_ASROOT(true), T_META_ASROOT(true)) +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + task_name_t task_name = TASK_NAME_NULL; + + T_SETUPBEGIN; + T_ASSERT_MACH_SUCCESS(task_name_for_pid(mach_task_self(), getpid(), + &task_name), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_set(task_name, + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + NULL); +} + +T_DECL(task_policy_set_task_entitled, "task_policy_set with task (entitled)") +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + + T_ASSERT_MACH_SUCCESS(task_policy_set(mach_task_self(), + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + NULL); +} + +T_DECL(task_policy_set_inspect_entitled, "task_policy_set with task inspect (entitled)") +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + task_inspect_t task_inspect = TASK_INSPECT_NULL; + + T_SETUPBEGIN; + T_ASSERT_MACH_SUCCESS(task_inspect_for_pid(mach_task_self(), getpid(), + &task_inspect), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_set(task_inspect, + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + NULL); +} + +T_DECL(task_policy_set_foreign_task_entitled, "task_policy_set for foreign task (entitled)", T_META_ASROOT(true)) +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + task_t task = TASK_NULL; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_for_pid(mach_task_self(), pid, + &task), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_set(task, + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +T_DECL(task_policy_set_foreign_task_name_entitled, "task_policy_set for foreign task name (entitled)", T_META_ASROOT(true), T_META_ASROOT(true)) +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + task_name_t task_name = TASK_NAME_NULL; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_name_for_pid(mach_task_self(), pid, + &task_name), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_set(task_name, + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +T_DECL(task_policy_set_foreign_task_inspect_entitled, "task_policy_set for foreign task inspect (entitled)", T_META_ASROOT(true)) +{ + struct task_qos_policy qosinfo = { + .task_latency_qos_tier = LATENCY_QOS_TIER_0, + .task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0, + }; + task_inspect_t task_inspect = TASK_INSPECT_NULL; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_inspect_for_pid(mach_task_self(), pid, + &task_inspect), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_set(task_inspect, + TASK_BASE_QOS_POLICY, + (task_policy_t)&qosinfo, + TASK_QOS_POLICY_COUNT), + NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +T_DECL(task_policy_get_name_entitled, "task_policy_get with task name (entitled)", T_META_ASROOT(true)) +{ + task_name_t task_name = TASK_NAME_NULL; + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + + T_SETUPBEGIN; + T_ASSERT_MACH_SUCCESS(task_name_for_pid(mach_task_self(), getpid(), + &task_name), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_get(task_name, + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + NULL); +} + +T_DECL(task_policy_get_task_entitled, "task_policy_get with task (entitled)") +{ + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + + T_ASSERT_MACH_SUCCESS(task_policy_get(mach_task_self(), + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + NULL); +} + +T_DECL(task_policy_get_inspect_entitled, "task_policy_get with task inspect (entitled)") +{ + task_inspect_t task_inspect = TASK_INSPECT_NULL; + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + + T_SETUPBEGIN; + T_ASSERT_MACH_SUCCESS(task_inspect_for_pid(mach_task_self(), getpid(), + &task_inspect), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_get(task_inspect, + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + NULL); +} + +T_DECL(task_policy_get_foreign_task_inspect_entitled, "task_policy_get for foreign task inspect (entitled)", T_META_ASROOT(true)) +{ + task_inspect_t task_inspect = TASK_INSPECT_NULL; + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_inspect_for_pid(mach_task_self(), pid, + &task_inspect), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_get(task_inspect, + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +T_DECL(task_policy_get_foreign_task_entitled, "task_policy_get for foreign task (entitled)", T_META_ASROOT(true)) +{ + task_t task = TASK_NULL; + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_for_pid(mach_task_self(), pid, + &task), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_get(task, + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +T_DECL(task_policy_get_foreign_task_name_entitled, "task_policy_get for foreign task name (entitled)", T_META_ASROOT(true)) +{ + task_name_t task_name = TASK_NAME_NULL; + struct task_category_policy role[TASK_CATEGORY_POLICY_COUNT]; + mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT; + boolean_t get_default = FALSE; + kern_return_t ret = KERN_FAILURE; + char *args[] = { "sleep", "10", NULL }; + pid_t pid = 0; + + T_SETUPBEGIN; + + ret = posix_spawnp(&pid, args[0], NULL, NULL, args, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "spawning sleep 10"); + + T_ASSERT_MACH_SUCCESS(task_name_for_pid(mach_task_self(), pid, + &task_name), NULL); + T_SETUPEND; + + T_ASSERT_MACH_SUCCESS(task_policy_get(task_name, + TASK_CATEGORY_POLICY, + (task_policy_t)role, + &count, + &get_default), + NULL); + + ret = kill(pid, SIGTERM); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "killing sleep"); +} + +#endif /* UNENTITLED */ diff --git a/tests/task_policy_entitlement.plist b/tests/task_policy_entitlement.plist new file mode 100644 index 000000000..f0976d490 --- /dev/null +++ b/tests/task_policy_entitlement.plist @@ -0,0 +1,13 @@ + + + + + com.apple.private.task_policy + + + com.apple.system-task-ports + + task_for_pid-allow + + + diff --git a/tests/telemetry.c b/tests/telemetry.c index abf66285b..68d99ad94 100644 --- a/tests/telemetry.c +++ b/tests/telemetry.c @@ -4,12 +4,15 @@ #include #include #include +#include #include #include #include #include #include +#include "ktrace_helpers.h" + enum telemetry_pmi { TELEMETRY_PMI_NONE, TELEMETRY_PMI_INSTRS, @@ -132,6 +135,7 @@ thread_spin(__unused void *arg) T_DECL(microstackshot_pmi, "attempt to configure microstackshots on PMI") { skip_if_pmi_unsupported(); + start_controlling_ktrace(); T_SETUPBEGIN; ktrace_session_t s = ktrace_session_create(); diff --git a/tests/test_dext_launch_56101852.c b/tests/test_dext_launch_56101852.c new file mode 100644 index 000000000..99ad78213 --- /dev/null +++ b/tests/test_dext_launch_56101852.c @@ -0,0 +1,101 @@ +#include +#include +#include +#include +#include +#include +#include + +T_GLOBAL_META(T_META_NAMESPACE("xnu.iokit"), + T_META_RUN_CONCURRENTLY(true)); + +#define DEXT_NAME "com.apple.test_intentionally_crashing_driver_56101852.dext" +#define DEXT_PATH "/Library/DriverExtensions/" DEXT_NAME +#define SYSCTL_NAME "kern.driverkit_checkin_timed_out" +#define MAX_TIMEOUT_SECONDS 120 + +static int +copyfileCallback(int what __unused, int stage, copyfile_state_t state __unused, const char *src __unused, const char *dst, void *ctx __unused) +{ + if (stage == COPYFILE_FINISH) { + T_QUIET; T_ASSERT_POSIX_SUCCESS(chown(dst, 0, 0), "chown %s to root / wheel", dst); + } + return COPYFILE_CONTINUE; +} + +static void +cleanup(void) +{ + removefile_state_t state = removefile_state_alloc(); + removefile(DEXT_PATH, state, REMOVEFILE_RECURSIVE); + removefile_state_free(state); +} + +T_DECL(test_dext_launch_56101852, + "Test launching a crashing dext", + T_META_ASROOT(true), T_META_IGNORECRASHES("*test_intentionally_crashing_driver_56101852*")) +{ + T_SKIP("skipping test_dext_launch_56101852 due to 62657199"); + + CFStringRef path = NULL; + CFURLRef url = NULL; + uint64_t startTime = mach_absolute_time(); + uint64_t endTime = 0; + size_t endTimeSize = sizeof(uint64_t); + uint64_t elapsedTimeAbs = 0; + uint64_t elapsedTimeNs = 0; + mach_timebase_info_data_t timebaseInfo; + copyfile_state_t copyfileState; + + copyfileState = copyfile_state_alloc(); + copyfile_state_set(copyfileState, COPYFILE_STATE_STATUS_CB, (void *)©fileCallback); + T_ASSERT_POSIX_SUCCESS(copyfile(DEXT_NAME, DEXT_PATH, copyfileState, COPYFILE_RECURSIVE | COPYFILE_ALL), "copied dext " DEXT_NAME " to " DEXT_PATH); + T_ATEND(cleanup); + + /* set up timebaseInfo */ + T_ASSERT_MACH_SUCCESS(mach_timebase_info(&timebaseInfo), "set up mach_timebase_info"); + + /* Set the initial value of kern.driverkit_checkin_timed_out to startTime */ + T_ASSERT_POSIX_SUCCESS(sysctlbyname(SYSCTL_NAME, NULL, NULL, &startTime, sizeof(startTime)), "set sysctl " SYSCTL_NAME " to %llu", startTime); + + + /* Convert DEXT_PATH to a CFURL */ + path = CFSTR(DEXT_PATH); + url = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, path, kCFURLPOSIXPathStyle, true); + T_ASSERT_NOTNULL(url, "created CFURL from CFString"); + + /* Ask kextd to load the dext */ + T_ASSERT_EQ(KextManagerLoadKextWithURL(url, NULL), kOSReturnSuccess, "Loaded dext %s with kextd", DEXT_PATH); + T_LOG("Will sleep for up to %d seconds", MAX_TIMEOUT_SECONDS); + + /* Wait for up to 120 seconds. Each loop iteration sleeps for 1 second and checks + * the value of the sysctl to check if it has changed. If the value changed, then + * the dext loaded earlier has crashed. If 120 seconds elapses and the value does + * not change, then the dext did not crash. + */ + for (int i = 0; i < MAX_TIMEOUT_SECONDS; i++) { + sleep(1); + T_ASSERT_POSIX_SUCCESS(sysctlbyname(SYSCTL_NAME, &endTime, &endTimeSize, NULL, 0), "using " SYSCTL_NAME " to check if dext has crashed"); + if (endTime != startTime) { + T_LOG("Detected dext crash"); + break; + } + T_LOG(" Slept for %d seconds", i + 1); + } + + T_LOG("startTime = %llu, endTime = %llu", startTime, endTime); + + T_ASSERT_GT(endTime, startTime, "dext has crashed"); + + /* Check how much time has elapsed and see if it is less than 120 seconds. If it + * is 120 seconds or greater, then the dext did not check in to the kernel but we + * were not able to stop waiting for the dext to check in after it crashed. + */ + elapsedTimeAbs = endTime - startTime; + elapsedTimeNs = elapsedTimeAbs * timebaseInfo.numer / timebaseInfo.denom; + T_LOG("elapsedTimeAbs = %llu, elapsedTimeNs = %llu", elapsedTimeAbs, elapsedTimeNs); + T_ASSERT_LT(elapsedTimeNs / NSEC_PER_SEC, (uint64_t)MAX_TIMEOUT_SECONDS, "elapsed time is less than %d seconds", MAX_TIMEOUT_SECONDS); + + copyfile_state_free(copyfileState); + CFRelease(url); +} diff --git a/tests/test_dext_launch_56101852.entitlements b/tests/test_dext_launch_56101852.entitlements new file mode 100644 index 000000000..842b583b2 --- /dev/null +++ b/tests/test_dext_launch_56101852.entitlements @@ -0,0 +1,8 @@ + + + + + com.apple.private.security.storage.SystemExtensionManagement + + + diff --git a/tests/test_sysctl_kern_procargs_25397314.m b/tests/test_sysctl_kern_procargs_25397314.m new file mode 100644 index 000000000..47bf856f7 --- /dev/null +++ b/tests/test_sysctl_kern_procargs_25397314.m @@ -0,0 +1,343 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true), + T_META_ASROOT(true)); + +struct procargs { + int argc; + size_t preflightSize; + NSString *executablePath; + NSArray *components; + NSString *legacyExecutablePath; + void *rawBuffer; + size_t rawBufferSize; +}; + +static void printHexDump(void* buffer, size_t size); + +typedef struct procargs *procargs_t; + +#define TEST_ENVIRONMENT_VARIABLE "TESTENVVARIABLE" +#define TEST_ENVIRONMENT_VARIABLE_VALUE "TESTENVVARIABLE_VALUE" + + +static size_t argmax; + +static procargs_t getProcArgs(int type, pid_t pid, size_t allocSize) +{ + int sysctlArgs[3] = {CTL_KERN, type, pid}; + int argc; + NSMutableArray *components = [NSMutableArray array]; + procargs_t args = (procargs_t) malloc(sizeof(struct procargs)); + size_t currentLen = 0; + bool legacyPathPresent = false; + NSString *current = nil; + NSString *legacyExecutablePath = nil; + NSString *executablePath = nil; + size_t bufferSize; + size_t preflightSize = 0; + const char *name = type == KERN_PROCARGS ? "KERN_PROCARGS" : "KERN_PROCARGS2"; + const char *cursor; + void *buffer; + + T_LOG("Get proc args for pid %d, allocSize %lu with %s", pid, allocSize, name); + + + T_ASSERT_TRUE(type == KERN_PROCARGS || type == KERN_PROCARGS2, "type is valid"); + + /* Determine how much memory to allocate. If allocSize is 0 we will use the size + * we get from the sysctl for our buffer. */ + T_ASSERT_POSIX_SUCCESS(sysctl(sysctlArgs, 3, NULL, &preflightSize, NULL, 0), "sysctl %s", name); + T_LOG("procargs data should be %lu bytes", preflightSize); + + if (allocSize == 0) { + allocSize = preflightSize; + } + + buffer = malloc(allocSize); + T_QUIET; T_ASSERT_NOTNULL(buffer, "malloc buffer of size %lu", allocSize); + bufferSize = allocSize; + + T_ASSERT_POSIX_SUCCESS(sysctl(sysctlArgs, 3, buffer, &bufferSize, NULL, 0), "sysctl %s", name); + T_ASSERT_LE(bufferSize, allocSize, "returned buffer size should be less than allocated size"); + T_LOG("sysctl wrote %lu bytes", bufferSize); + if (allocSize >= bufferSize) { + /* Allocated buffer is larger than what kernel wrote, so it should match preflightSize */ + T_ASSERT_EQ(bufferSize, preflightSize, "buffer size should be the same as preflight size"); + } + + printHexDump(buffer, bufferSize); + + if (type == KERN_PROCARGS2) { + argc = *(int *)buffer; + cursor = (const char *)buffer + sizeof(int); + } else { + /* Without KERN_PROCARGS2, we can't tell where argv ends and environ begins. + * Set argc to -1 to indicate this */ + argc = -1; + cursor = buffer; + } + + while ((uintptr_t)cursor < (uintptr_t)buffer + bufferSize) { + /* Ensure alignment and check if the uint16_t at cursor is the magic value */ + if (!((uintptr_t)cursor & (sizeof(uint16_t) - 1)) && + (uintptr_t)buffer + bufferSize - (uintptr_t)cursor > sizeof(uint16_t)) { + /* Silence -Wcast-align by casting to const void * */ + uint16_t value = *(const uint16_t *)(const void *)cursor; + if (value == 0xBFFF) { + /* Magic value that specifies the end of the argument/environ section */ + cursor += sizeof(uint16_t) + sizeof(uint32_t); + legacyPathPresent = true; + break; + } + } + currentLen = strnlen(cursor, bufferSize - ((uintptr_t)cursor - (uintptr_t)buffer)); + current = [[NSString alloc] initWithBytes:cursor length:currentLen encoding:NSUTF8StringEncoding]; + T_QUIET; T_ASSERT_NOTNULL(current, "allocated string"); + cursor += currentLen + 1; + + if (executablePath == nil) { + executablePath = current; + [executablePath retain]; + while (*cursor == 0) { + cursor++; + } + } else { + [components addObject:current]; + } + [current release]; + } + if (legacyPathPresent) { + T_ASSERT_EQ(type, KERN_PROCARGS, "Legacy executable path should only be present for KERN_PROCARGS"); + currentLen = strnlen(cursor, bufferSize - ((uintptr_t)cursor - (uintptr_t)buffer)); + current = [[NSString alloc] initWithBytes:cursor length:currentLen encoding:NSUTF8StringEncoding]; + T_QUIET; T_ASSERT_NOTNULL(current, "allocated string"); + legacyExecutablePath = current; + } + args->argc = argc; + args->executablePath = executablePath; + args->components = components; + args->legacyExecutablePath = legacyExecutablePath; + args->preflightSize = preflightSize; + args->rawBuffer = buffer; + args->rawBufferSize = bufferSize; + return args; +} + +static void printProcArgs(procargs_t procargs) { + if (procargs->argc == -1) { + T_LOG("No argument count"); + } else { + T_LOG("Argc is %d", procargs->argc); + } + T_LOG("Executable path: %s (length %lu)", [procargs->executablePath UTF8String], [procargs->executablePath length]); + for (size_t i = 0; i < [procargs->components count]; i++) { + NSString *component = [procargs->components objectAtIndex:i]; + const char *str = [component UTF8String]; + size_t len = [component length]; + if (procargs->argc != -1) { + T_LOG("%s %zu: %s (length %lu)", i >= (size_t)procargs->argc ? "Env var" : "Argument", i, str, len); + } else { + T_LOG("Component %zu: %s (length %lu)", i, str, len); + } + } + if (procargs->legacyExecutablePath) { + T_LOG("Contains legacy executable path: %s (length %lu)", [procargs->legacyExecutablePath UTF8String], [procargs->legacyExecutablePath length]); + } + printHexDump(procargs->rawBuffer, procargs->rawBufferSize); +} + +static void printHexDump(void* buffer, size_t size) { + #define ROW_LENGTH 24 + T_LOG("Buffer %p, size %zu", buffer, size); + for (size_t row = 0; row < size; row += ROW_LENGTH) { + NSMutableString *line = [[NSMutableString alloc] initWithCapacity:0]; + NSMutableString *text = [[NSMutableString alloc] initWithCapacity:0]; + [line appendFormat:@" %04zx ", row]; + for (size_t col = row; col < row + ROW_LENGTH; col++) { + if (col < size) { + char c = ((char *)buffer)[col]; + [line appendFormat:@"%02x ", c]; + if (isprint(c)) { + [text appendFormat:@"%c", c]; + } else { + [text appendString:@"."]; + } + } else { + [line appendString:@" "]; + } + } + [line appendFormat:@" %@", text]; + T_LOG("%s", [line UTF8String]); + [text release]; + [line release]; + } +} + +static void deallocProcArgs(procargs_t procargs) +{ + [procargs->components release]; + [procargs->executablePath release]; + [procargs->legacyExecutablePath release]; + free(procargs->rawBuffer); + free(procargs); +} + +T_HELPER_DECL(child_helper, "Child process helper") +{ + while (true) { + wait(NULL); + } +} + +static pid_t +launch_child_process(NSArray *args, bool cs_restrict) +{ + pid_t pid; + char path[PATH_MAX]; + uint32_t path_size = sizeof(path); + uint32_t csopsStatus = 0; + const char** dt_args; + size_t dt_args_count; + + T_ASSERT_POSIX_SUCCESS(_NSGetExecutablePath(path, &path_size), "get executable path"); + + /* We need to add 4 arguments to the beginning and NULL at the end */ + dt_args_count = [args count] + 5; + dt_args = malloc(sizeof(char *) * dt_args_count); + dt_args[0] = path; + dt_args[1] = "-n"; + dt_args[2] = "child_helper"; + dt_args[3] = "--"; + for (size_t i = 0; i < [args count]; i++) { + NSString *arg = [args objectAtIndex:i]; + dt_args[i + 4] = [arg UTF8String]; + } + dt_args[[args count] + 4] = NULL; + + T_LOG("Launching %s", path); + T_LOG("Arguments: "); + for (size_t i = 0; i < dt_args_count; i++) { + T_LOG(" %s", dt_args[i] ? dt_args[i] : "(null)"); + } + T_ASSERT_POSIX_SUCCESS(dt_launch_tool(&pid, (char **)dt_args, false, NULL, NULL), "launched helper"); + free(dt_args); + + if (cs_restrict) { + csopsStatus |= CS_RESTRICT; + T_ASSERT_POSIX_SUCCESS(csops(pid, CS_OPS_SET_STATUS, &csopsStatus, sizeof(csopsStatus)), "set CS_RESTRICT"); + } + return pid; +} + +T_DECL(test_sysctl_kern_procargs_25397314, "Test kern.procargs and kern.procargs2 sysctls") +{ + procargs_t procargs; + size_t argsize = sizeof(argmax); + NSString *testArgument1 = @"test argument 1"; + bool containsTestArgument1 = false; + NSString *testArgument2 = @"test argument 2"; + bool containsTestArgument2 = false; + NSString *testEnvironmentVariable = @TEST_ENVIRONMENT_VARIABLE; + bool containsTestEnvironmentVariable = false; + bool containsPathEnvironmentVariable = false; + int development = 0; + size_t development_size = sizeof(development); + uint32_t csopsStatus = 0; + + + T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.development", &development, &development_size, NULL, 0), "sysctl kern.development"); + + T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.argmax", &argmax, &argsize, NULL, 0), "sysctl kern.argmax"); + procargs = getProcArgs(KERN_PROCARGS2, getpid(), argmax); + T_ASSERT_NOTNULL(procargs->executablePath, "executable path should be non-null"); + T_ASSERT_GT([procargs->executablePath length], 0, "executable path should not be empty"); + printProcArgs(procargs); + deallocProcArgs(procargs); + + procargs = getProcArgs(KERN_PROCARGS2, getpid(), 0); + T_ASSERT_NOTNULL(procargs->executablePath, "executable path should be non-null"); + T_ASSERT_GT([procargs->executablePath length], 0, "executable path should not be empty"); + printProcArgs(procargs); + deallocProcArgs(procargs); + + setenv(TEST_ENVIRONMENT_VARIABLE, TEST_ENVIRONMENT_VARIABLE_VALUE, true); + + pid_t child = launch_child_process(@[testArgument1, testArgument2], false); + procargs = getProcArgs(KERN_PROCARGS2, child, argmax); + T_ASSERT_NOTNULL(procargs->executablePath, "executable path should be non-null"); + T_ASSERT_GT([procargs->executablePath length], 0, "executable path should not be empty"); + printProcArgs(procargs); + + for (NSString *component in procargs->components) { + if ([component isEqualToString:testArgument1]) { + containsTestArgument1 = true; + } + if ([component isEqualToString:testArgument2]) { + containsTestArgument2 = true; + } + if ([component containsString:testEnvironmentVariable]) { + containsTestEnvironmentVariable = true; + } + } + deallocProcArgs(procargs); + kill(child, SIGKILL); + T_ASSERT_TRUE(containsTestArgument1, "Found test argument 1"); + T_ASSERT_TRUE(containsTestArgument2, "Found test argument 2"); + T_ASSERT_TRUE(containsTestEnvironmentVariable, "Found test environment variable"); + + if (development) { + T_LOG("Skipping test on DEVELOPMENT || DEBUG kernel"); + } else { + containsTestArgument1 = false; + containsTestArgument2 = false; + containsTestEnvironmentVariable = false; + + child = launch_child_process(@[testArgument1, testArgument2], true); + procargs = getProcArgs(KERN_PROCARGS2, child, argmax); + T_ASSERT_NOTNULL(procargs->executablePath, "executable path should be non-null"); + T_ASSERT_GT([procargs->executablePath length], 0, "executable path should not be empty"); + printProcArgs(procargs); + for (NSString *component in procargs->components) { + if ([component isEqualToString:testArgument1]) { + containsTestArgument1 = true; + } + if ([component isEqualToString:testArgument2]) { + containsTestArgument2 = true; + } + if ([component containsString:testEnvironmentVariable]) { + containsTestEnvironmentVariable = true; + } + } + deallocProcArgs(procargs); + kill(child, SIGKILL); + T_ASSERT_TRUE(containsTestArgument1, "Found test argument 1"); + T_ASSERT_TRUE(containsTestArgument2, "Found test argument 2"); + T_ASSERT_FALSE(containsTestEnvironmentVariable, "No test environment variable"); + + + csopsStatus |= CS_RESTRICT; + T_ASSERT_POSIX_SUCCESS(csops(getpid(), CS_OPS_SET_STATUS, &csopsStatus, sizeof(csopsStatus)), "set CS_RESTRICT on self"); + procargs = getProcArgs(KERN_PROCARGS2, getpid(), argmax); + T_ASSERT_NOTNULL(procargs->executablePath, "executable path should be non-null"); + T_ASSERT_GT([procargs->executablePath length], 0, "executable path should not be empty"); + printProcArgs(procargs); + for (NSString *component in procargs->components) { + if ([component containsString:@"PATH"]) { + containsPathEnvironmentVariable = true; + } + } + deallocProcArgs(procargs); + T_ASSERT_TRUE(containsPathEnvironmentVariable, "Found $PATH environment variable"); + } +} diff --git a/tests/thread_group_set_32261625.c b/tests/thread_group_set_32261625.c index 507219204..edd733fa6 100644 --- a/tests/thread_group_set_32261625.c +++ b/tests/thread_group_set_32261625.c @@ -18,7 +18,8 @@ newthread(void *arg) #define TEST_TIMEOUT (15 * NSEC_PER_SEC) -T_DECL(thread_group_set, "Checks that new threads get a THREAD_GROUP_SET tracepoint with a non-zero tid") { +T_DECL(thread_group_set, "Checks that new threads get a THREAD_GROUP_SET tracepoint with a non-zero tid", + T_META_ASROOT(true)) { pthread_t thread; __block int seen_new_thread = 0, __block seen_thread_group_set = 0; diff --git a/tests/thread_set_state_arm64_cpsr.c b/tests/thread_set_state_arm64_cpsr.c new file mode 100644 index 000000000..291f71b7f --- /dev/null +++ b/tests/thread_set_state_arm64_cpsr.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + +T_GLOBAL_META( + T_META_NAMESPACE("xnu.arm"), + T_META_RUN_CONCURRENTLY(true) + ); + +#define PSR64_USER_MASK (0xFU << 28) + +#if __arm64__ +__attribute__((noreturn)) +static void +phase2() +{ + kern_return_t err; + arm_thread_state64_t ts; + mach_msg_type_number_t count = ARM_THREAD_STATE64_COUNT; + uint32_t nzcv = (uint32_t) __builtin_arm_rsr64("NZCV"); + + T_QUIET; T_ASSERT_EQ(nzcv & PSR64_USER_MASK, PSR64_USER_MASK, "All condition flags are set"); + + err = thread_get_state(mach_thread_self(), ARM_THREAD_STATE64, (thread_state_t)&ts, &count); + T_QUIET; T_ASSERT_EQ(err, KERN_SUCCESS, "Got own thread state after corrupting CPSR"); + + T_QUIET; T_ASSERT_EQ(ts.__cpsr & ~PSR64_USER_MASK, 0, "No privileged fields in CPSR are set"); + + exit(0); +} +#endif + +T_DECL(thread_set_state_arm64_cpsr, + "Test that user mode cannot control privileged fields in CPSR/PSTATE.") +{ +#if !__arm64__ + T_SKIP("Running on non-arm64 target, skipping..."); +#else + kern_return_t err; + mach_msg_type_number_t count; + arm_thread_state64_t ts; + + count = ARM_THREAD_STATE64_COUNT; + err = thread_get_state(mach_thread_self(), ARM_THREAD_STATE64, (thread_state_t)&ts, &count); + T_QUIET; T_ASSERT_EQ(err, KERN_SUCCESS, "Got own thread state"); + + /* + * jump to the second phase while attempting to set all the bits + * in CPSR. If we survive the jump and read back CPSR without any + * bits besides condition flags set, the test passes. If kernel + * does not mask out the privileged CPSR bits correctly, we can + * expect an illegal instruction set panic due to SPSR.IL being + * set upon ERET to user mode. + */ + + void *new_pc = (void *)&phase2; + arm_thread_state64_set_pc_fptr(ts, new_pc); + ts.__cpsr = ~0U; + + err = thread_set_state(mach_thread_self(), ARM_THREAD_STATE64, (thread_state_t)&ts, ARM_THREAD_STATE64_COUNT); + + /* NOT REACHED */ + + T_ASSERT_FAIL("Thread did not reach expected state. err = %d", err); + +#endif +} diff --git a/tests/turnstile_multihop.c b/tests/turnstile_multihop.c index 65fd2db07..e2f367367 100644 --- a/tests/turnstile_multihop.c +++ b/tests/turnstile_multihop.c @@ -42,6 +42,10 @@ struct test_msg { static boolean_t spin_for_ever = false; +static boolean_t test_noimportance = false; + +#define EXPECTED_MESSAGE_ID 0x100 + static void thread_create_at_qos(qos_class_t qos, void * (*function)(void *)); static uint64_t @@ -226,6 +230,23 @@ get_user_promotion_basepri(void) return thread_policy.thps_user_promotion_basepri; } +static uint32_t +get_thread_base_priority(void) +{ + kern_return_t kr; + mach_port_t thread_port = pthread_mach_thread_np(pthread_self()); + + policy_timeshare_info_data_t timeshare_info; + mach_msg_type_number_t count = POLICY_TIMESHARE_INFO_COUNT; + + kr = thread_info(thread_port, THREAD_SCHED_TIMESHARE_INFO, + (thread_info_t)×hare_info, &count); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info"); + + return (uint32_t)timeshare_info.base_priority; +} + + #define LISTENER_WLID 0x100 #define CONN_WLID 0x200 @@ -285,6 +306,8 @@ workloop_cb_test_intransit(uint64_t *workloop_id, void **eventslist, int *events T_QUIET; T_ASSERT_EQ(*events, 1, "should have one event"); + T_EXPECT_REQUESTED_QOS_EQ(QOS_CLASS_MAINTENANCE, "message handler should have MT requested QoS"); + hdr = (mach_msg_header_t *)kev->ext[0]; T_ASSERT_NOTNULL(hdr, "has a message"); T_ASSERT_EQ(hdr->msgh_size, (uint32_t)sizeof(struct test_msg), "of the right size"); @@ -315,6 +338,9 @@ workloop_cb_test_intransit(uint64_t *workloop_id, void **eventslist, int *events T_EXPECT_EQ(get_user_promotion_basepri(), 60u, "dispatch_source event handler should be overridden at 60"); + T_EXPECT_EQ(get_thread_base_priority(), 60u, + "dispatch_source event handler should have base pri at 60"); + if (*workloop_id == LISTENER_WLID) { register_port(CONN_WLID, tmsg->port_descriptor.name); @@ -326,6 +352,14 @@ workloop_cb_test_intransit(uint64_t *workloop_id, void **eventslist, int *events /* this will unblock the waiter */ mach_msg_destroy(hdr); *events = 0; + + /* now that the message is destroyed, the priority should be gone */ + T_EXPECT_EFFECTIVE_QOS_EQ(QOS_CLASS_MAINTENANCE, + "dispatch_source event handler QoS should be QOS_CLASS_MAINTENANCE after destroying message"); + T_EXPECT_LE(get_user_promotion_basepri(), 0u, + "dispatch_source event handler should not be overridden after destroying message"); + T_EXPECT_LE(get_thread_base_priority(), 4u, + "dispatch_source event handler should have base pri at 4 or less after destroying message"); } } @@ -405,7 +439,7 @@ send( reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0, MACH_MSG_TYPE_MOVE_SEND, MACH_MSGH_BITS_COMPLEX), - .msgh_id = 0x100, + .msgh_id = EXPECTED_MESSAGE_ID, .msgh_size = sizeof(send_msg), }, .body = { @@ -430,6 +464,7 @@ send( MACH_SEND_MSG | MACH_SEND_TIMEOUT | MACH_SEND_OVERRIDE | + (test_noimportance ? MACH_SEND_NOIMPORTANCE : 0) | ((reply_port ? MACH_SEND_SYNC_OVERRIDE : 0) | options), send_msg.header.msgh_size, 0, @@ -440,13 +475,11 @@ send( T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client mach_msg"); } -static void +static mach_msg_id_t receive( mach_port_t rcv_port, mach_port_t notify_port) { - kern_return_t ret = 0; - struct { mach_msg_header_t header; mach_msg_body_t body; @@ -462,7 +495,8 @@ receive( T_LOG("Client: Starting sync receive\n"); - ret = mach_msg(&(rcv_msg.header), + kern_return_t kr; + kr = mach_msg(&(rcv_msg.header), MACH_RCV_MSG | MACH_RCV_SYNC_WAIT, 0, @@ -470,6 +504,10 @@ receive( rcv_port, 0, notify_port); + + T_ASSERT_MACH_SUCCESS(kr, "mach_msg rcv"); + + return rcv_msg.header.msgh_id; } static lock_t lock_DEF; @@ -766,7 +804,9 @@ thread_at_maintenance(void *arg __unused) thread_create_at_qos(QOS_CLASS_DEFAULT, thread_at_default); /* Block on Sync IPC */ - receive(special_reply_port, service_port); + mach_msg_id_t message_id = receive(special_reply_port, service_port); + + T_ASSERT_EQ(message_id, MACH_NOTIFY_SEND_ONCE, "got the expected send-once notification"); T_LOG("received reply"); @@ -781,6 +821,15 @@ T_HELPER_DECL(three_ulock_sync_ipc_hop, sigsuspend(0); } +T_HELPER_DECL(three_ulock_sync_ipc_hop_noimportance, + "Create chain of 4 threads with 3 ulocks and 1 no-importance sync IPC at different qos") +{ + test_noimportance = true; + thread_create_at_qos(QOS_CLASS_MAINTENANCE, thread_at_maintenance); + sigsuspend(0); +} + + static void thread_create_at_qos(qos_class_t qos, void * (*function)(void *)) { @@ -842,6 +891,8 @@ T_HELPER_DECL(server_kevent_id, */ TEST_MULTIHOP("server_kevent_id", "three_ulock_sync_ipc_hop", three_ulock_sync_ipc_hop) +TEST_MULTIHOP("server_kevent_id", "three_ulock_sync_ipc_hop_noimportance", three_ulock_sync_ipc_hop_noimportance) + /* * Test 2: Test multihop priority boosting with ulocks, dispatch sync and sync IPC. * diff --git a/tests/ulock.c b/tests/ulock.c new file mode 100644 index 000000000..09070d624 --- /dev/null +++ b/tests/ulock.c @@ -0,0 +1,92 @@ +#include + +#include + +#include +#include +#include + +#include + +#ifndef __TSD_MACH_THREAD_SELF +#define __TSD_MACH_THREAD_SELF 3 +#endif + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wbad-function-cast" +__inline static mach_port_name_t +_os_get_self(void) +{ + mach_port_name_t self = (mach_port_name_t)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF); + return self; +} +#pragma clang diagnostic pop + +T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); + +#pragma mark ulock_non_owner_wake + +static _Atomic uint32_t test_ulock; + +static void * +test_waiter(void *arg __unused) +{ + for (;;) { + uint32_t test_ulock_owner = atomic_load_explicit(&test_ulock, + memory_order_relaxed); + int rc = __ulock_wait(UL_UNFAIR_LOCK | ULF_NO_ERRNO, &test_ulock, + test_ulock_owner, 0); + if (rc == -EINTR || rc == -EFAULT) { + continue; + } + T_ASSERT_GE(rc, 0, "__ulock_wait"); + break; + } + + T_PASS("Waiter woke"); + T_END; + + return NULL; +} + +static void * +test_waker(void *arg __unused) +{ + for (;;) { + int rc = __ulock_wake(UL_UNFAIR_LOCK | ULF_NO_ERRNO | ULF_WAKE_ALLOW_NON_OWNER, + &test_ulock, 0); + if (rc == -EINTR) { + continue; + } + T_ASSERT_EQ(rc, 0, "__ulock_wake"); + break; + } + return NULL; +} + +T_DECL(ulock_non_owner_wake, "ulock_wake respects non-owner wakes", + T_META_CHECK_LEAKS(false)) +{ + pthread_t waiter, waker; + + atomic_store_explicit(&test_ulock, _os_get_self() & ~0x3u, memory_order_relaxed); + + T_ASSERT_POSIX_ZERO(pthread_create(&waiter, NULL, test_waiter, NULL), "create waiter"); + + // wait for the waiter to reach the kernel + for (;;) { + int kernel_ulocks = __ulock_wake(UL_DEBUG_HASH_DUMP_PID, NULL, 0); + T_QUIET; T_ASSERT_NE(kernel_ulocks, -1, "UL_DEBUG_HASH_DUMP_PID"); + + if (kernel_ulocks == 1) { + T_LOG("waiter is now waiting"); + break; + } + usleep(100); + } + + T_ASSERT_POSIX_ZERO(pthread_create(&waker, NULL, test_waker, NULL), "create waker"); + + // won't ever actually join + pthread_join(waiter, NULL); +} diff --git a/tests/vm/entitlement_increased_memory_limit.c b/tests/vm/entitlement_increased_memory_limit.c new file mode 100644 index 000000000..8d25bfcf8 --- /dev/null +++ b/tests/vm/entitlement_increased_memory_limit.c @@ -0,0 +1,148 @@ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include "memorystatus_assertion_helpers.h" + +#define MAX_TASK_MEM_ENTITLED "kern.entitled_max_task_pmem" +#define MAX_TASK_MEM "kern.max_task_pmem" +#define MAX_TASK_MEM_ENTITLED_VALUE (3 * (1 << 10)) + +#if ENTITLED +#define TESTNAME entitlement_increased_memory_limit_entitled +#else /* ENTITLED */ +#define TESTNAME entitlement_increased_memory_limit_unentitled +#endif /* ENTITLED */ + +T_GLOBAL_META(T_META_NAMESPACE("xnu.vm")); + +static int32_t old_entitled_max_task_pmem = 0; + +static void +reset_old_entitled_max_task_mem() +{ + int ret; + size_t size_old_entitled_max_task_pmem = sizeof(old_entitled_max_task_pmem); + // Use sysctl to change entitled limit + ret = sysctlbyname(MAX_TASK_MEM_ENTITLED, NULL, 0, &old_entitled_max_task_pmem, size_old_entitled_max_task_pmem); +} + +T_HELPER_DECL(child, "Child") { + // Doesn't do anything. Will start suspended + // so that its parent can check its memlimits + // and then kill it. + T_PASS("Child exiting"); +} + +static pid_t +spawn_child_with_memlimit(int32_t memlimit) +{ + posix_spawnattr_t attr; + int ret; + char **args; + char testpath[PATH_MAX]; + uint32_t testpath_buf_size; + pid_t pid; + + ret = posix_spawnattr_init(&attr); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_init"); + + testpath_buf_size = sizeof(testpath); + ret = _NSGetExecutablePath(testpath, &testpath_buf_size); + T_ASSERT_POSIX_ZERO(ret, "_NSGetExecutablePath"); + T_LOG("Executable path: %s", testpath); + args = (char *[]){ + testpath, + "-n", + "child", + NULL + }; + + ret = posix_spawnattr_setflags(&attr, POSIX_SPAWN_START_SUSPENDED); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_setflags() failed"); + ret = posix_spawnattr_setjetsam_ext(&attr, + 0, JETSAM_PRIORITY_FOREGROUND, memlimit, memlimit); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_setjetsam_ext"); + ret = posix_spawn(&pid, testpath, NULL, &attr, args, *_NSGetEnviron()); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawn() failed"); + + return pid; +} + + +T_DECL(TESTNAME, + "Verify that entitled processes can allocate up to the entitled memory limit", + T_META_CHECK_LEAKS(false)) +{ + int32_t entitled_max_task_pmem = MAX_TASK_MEM_ENTITLED_VALUE, max_task_pmem = 0, expected_limit; + size_t size_entitled_max_task_pmem = sizeof(entitled_max_task_pmem); + size_t size_old_entitled_max_task_pmem = sizeof(old_entitled_max_task_pmem); + size_t size_max_task_pmem = sizeof(max_task_pmem); + int status; + pid_t pid, rc; + bool signaled; + memorystatus_memlimit_properties2_t mmprops; + + int ret = 0; + + // Get the unentitled limit + ret = sysctlbyname(MAX_TASK_MEM, &max_task_pmem, &size_max_task_pmem, NULL, 0); + T_ASSERT_POSIX_SUCCESS(ret, "call sysctlbyname to get max task physical memory."); + if (max_task_pmem >= MAX_TASK_MEM_ENTITLED_VALUE) { + T_SKIP("max_task_pmem (%lld) is larger than entitled value (%lld). Skipping test on this device.", max_task_pmem, MAX_TASK_MEM_ENTITLED_VALUE); + } + + // Use sysctl to change entitled limit + ret = sysctlbyname(MAX_TASK_MEM_ENTITLED, &old_entitled_max_task_pmem, &size_old_entitled_max_task_pmem, &entitled_max_task_pmem, size_entitled_max_task_pmem); + T_ASSERT_POSIX_SUCCESS(ret, "call sysctlbyname to set entitled hardware mem size."); + + T_ATEND(reset_old_entitled_max_task_mem); + + /* + * Spawn child with the normal task limit (just as launchd does for an app) + * The child will start suspended, so we can check its memlimit. + */ + + pid = spawn_child_with_memlimit(max_task_pmem); + T_ASSERT_POSIX_SUCCESS(pid, "spawn child with task limit"); + + // Check its memlimt + ret = memorystatus_control(MEMORYSTATUS_CMD_GET_MEMLIMIT_PROPERTIES, pid, 0, &mmprops, sizeof(mmprops)); + T_ASSERT_POSIX_SUCCESS(ret, "memorystatus_control"); +#if ENTITLED + expected_limit = MAX_TASK_MEM_ENTITLED_VALUE; +#else /* ENTITLED */ + expected_limit = max_task_pmem; +#endif /* ENTITLED */ + T_ASSERT_EQ(mmprops.v1.memlimit_active, expected_limit, "active limit"); + T_ASSERT_EQ(mmprops.v1.memlimit_inactive, expected_limit, "inactive limit"); + + // Resume the child. It should exit immediately. + ret = kill(pid, SIGCONT); + T_ASSERT_POSIX_SUCCESS(ret, "kill child"); + + // Check child's exit code. + while (true) { + rc = waitpid(pid, &status, 0); + if (rc == -1 && errno == EINTR) { + continue; + } + T_ASSERT_EQ(rc, pid, "waitpid"); + signaled = WIFSIGNALED(status); + T_ASSERT_FALSE(signaled, "Child exited cleanly"); + ret = WEXITSTATUS(status); + T_ASSERT_EQ(ret, 0, "child exited with code 0."); + break; + } +} diff --git a/tests/vm/entitlement_increased_memory_limit.entitlements b/tests/vm/entitlement_increased_memory_limit.entitlements new file mode 100644 index 000000000..99f471672 --- /dev/null +++ b/tests/vm/entitlement_increased_memory_limit.entitlements @@ -0,0 +1,8 @@ + + + + + com.apple.developer.kernel.increased-memory-limit + + + diff --git a/tests/vm/fault_throughput.c b/tests/vm/fault_throughput.c new file mode 100644 index 000000000..3cf9ef1b2 --- /dev/null +++ b/tests/vm/fault_throughput.c @@ -0,0 +1,684 @@ +/* + * Benchmark VM fault throughput. + * This test faults memory for a configurable amount of time across a + * configurable number of threads. Currently it only measures zero fill faults. + * Currently it supports two variants: + * 1. Each thread gets its own vm objects to fault in + * 2. Threads share vm objects + * + * We'll add more fault types as we identify problematic user-facing workloads + * in macro benchmarks. + * + * Throughput is reported as pages / second using both wall time and cpu time. + * CPU time is a more reliable metric for regression testing, but wall time can + * highlight blocking in the VM. + * + * Running this benchmark directly is not recommended. + * Use fault_throughput.lua which provides a nicer interface and outputs + * perfdata. + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * TODO: Make this benchmark runnable on linux so we can do a perf comparison. + * We're mostly using POSIX APIs, but we'll need to replace + * the sysctls with the /proc equivalents, and replace clock_gettime_nsec_np + * with the linux equivalent. + */ +#include + +#include + +#include +#include + +#include "vm/perf_helpers.h" + +#if (TARGET_OS_OSX || TARGET_OS_SIMULATOR) +/* + * On non-embedded platforms we coalesce vm objects up to 128 MB, so + * we make the objects 128 MB on that platform to ensure they're not + * merged with anything else. + */ +const static size_t kVmObjectSize = 128 * (1UL << 20); +#else +/* + * Embedded platforms don't coalesce vm objects. This number + * needs to be big enough that faulting it in dwarfs the cost of dequeuing + * it from the work queue, but can't be too large or else we won't be able + * to allocate one per thread in the separate-objects benchmark. + */ +const static size_t kVmObjectSize = 4 * (1UL << 20); +#endif /* (TARGET_OS_OSX || TARGET_OS_SIMULATOR) */ +static const clockid_t kWallTimeClock = CLOCK_MONOTONIC_RAW; +static const clockid_t kThreadCPUTimeClock = CLOCK_THREAD_CPUTIME_ID; +/* These globals are set dynamically during test setup based on sysctls. */ +static uint64_t kCacheLineSize = 0; +/* The VM page size */ +static size_t kPageSize = 0; + + +typedef struct fault_buffer { + unsigned char* fb_start; /* The start of this buffer. */ + size_t fb_size; /* The size of this buffer in bytes. */ +} fault_buffer_t; + +typedef enum test_variant { + VARIANT_SEPARATE_VM_OBJECTS, + VARIANT_SHARE_VM_OBJECTS +} test_variant_t; + +typedef struct test_globals { + /* This lock protects: tg_cv, tg_running_count, tg_done, tg_current_iteration, and tg_iterations_completed. */ + pthread_mutex_t tg_lock; + pthread_cond_t tg_cv; + /* The number of currently running threads */ + unsigned int tg_running_count; + /* Set during cleanup to indicate that the benchmark is over. */ + bool tg_done; + size_t tg_current_iteration; + size_t tg_iterations_completed; + unsigned int tg_num_threads; + test_variant_t tg_variant; + /* + * An array of memory objects to fault in. + * This is basically a workqueue of + * contiguous chunks of memory that the worker threads + * will fault in. + */ + fault_buffer_t *tg_fault_buffer_arr; + size_t tg_fault_buffer_arr_length; + /* + * To avoid false sharing, we pad the test globals with an extra cache line and place the atomic + * next_fault_buffer_index size_t after the cache line. + */ + __unused char padding[]; + /* + * This field is directly after the padding buffer. + * It is used to synchronize access to tg_fault_buffer_arr. + */ + //_Atomic size_t tg_next_fault_buffer_index; +} test_globals_t; + +static const char* kSeparateObjectsArgument = "separate-objects"; +static const char* kShareObjectsArgument = "share-objects"; + +/* Arguments parsed from the command line */ +typedef struct test_args { + uint32_t n_threads; + uint64_t duration_seconds; + test_variant_t variant; + bool verbose; +} test_args_t; + +/* Get a (wall-time) timestamp in nanoseconds */ +static uint64_t get_timestamp_ns(void); +/* Get the number of cpus on this device. */ +static unsigned int get_ncpu(void); +/* + * Fault in the pages in the given buffer. + */ +static void fault_pages(fault_buffer_t *buffer, size_t stride); +/* Get a unique fault buffer from the global work queue. */ +static fault_buffer_t *get_fault_buffer(test_globals_t* globals); +/* + * Grabs buffers from the global test structure and faults them in, using this + * test variant's stride, until there are no more buffers to grab. + * Returns the number of microseconds spent on-cpu. + */ +static uint64_t grab_and_fault_pages(test_globals_t* globals); + +static bool worker_thread_iteration_setup(size_t current_iteration, test_globals_t *globals); +static void worker_thread_iteration_complete(test_globals_t *globals); + +static void parse_arguments(int argc, char **argv, test_args_t *args); +/* + * Sets up the test globals and spawns the background threads to do the faults. + * Returns an array of size `num_threads` + * Containing the thread ids of the forked threads. + */ +static pthread_t* setup_test(test_globals_t *globals, const test_args_t *args, size_t memory_size, bool verbose); +static test_globals_t *allocate_test_globals(void); +/* Initializes variables in the globals array. */ +static void init_globals(test_globals_t *globals, const test_args_t *args); +static inline _Atomic size_t *next_fault_buffer_index_ptr(test_globals_t *globals); +/* + * Called on the main thread. + * Waits for the background threads to be ready, sets up the memory objects, + * and then starts a faulting iteration. + * Returns the start (wall) time. + */ +static uint64_t start_iteration(test_globals_t* globals, test_variant_t variant, bool verbose); +/* + * Called on the main thread. + * Waits for the background threads to complete the iteration and cleans up. + * Returns the total amount of time spent faulting pages in nanoseconds by all threads thus far. + */ +static uint64_t finish_iteration(test_globals_t *globals, uint64_t start_time); +/* + * Called on the main thread. + * Maps buffers and places them in the work queue. + */ +static void setup_memory(test_globals_t* globals, test_variant_t variant); +/* + * Dump test results as a csv to stdout. + * Use fault_throughput.lua to convert to perfdata. + */ +static void output_results(const test_globals_t *globals, double walltime_elapsed_seconds, double cputime_elapsed_seconds); +static void cleanup_test(test_globals_t *globals); +/* + * Join the background threads and return the total microseconds + * of cpu time spent faulting across all of the threads. + * Takes ownership of the threads array and frees it. + */ +static uint64_t join_background_threads(test_globals_t *globals, pthread_t *threads); +static void unmap_fault_buffers(test_globals_t *globals); +/* + * Get the stride between each vm object in the fault buffer array. + */ +static size_t fault_buffer_stride(const test_globals_t *globals); + +int +main(int argc, char **argv) +{ + /* How much memory should the test consume (per-core on the system)? */ +#if (TARGET_OS_OSX || TARGET_OS_SIMULATOR) + static const size_t memory_per_core = kVmObjectSize; +#else + static const size_t memory_per_core = 25 * (1UL << 20); +#endif /* (TARGET_OS_OSX || TARGET_OS_SIMULATOR) */ + const size_t kMemSize = memory_per_core * get_ncpu(); + test_globals_t *globals = allocate_test_globals(); + /* Total wall-time spent faulting in pages. */ + uint64_t wall_time_elapsed_ns = 0; + /* Total cpu-time spent faulting in pages */ + uint64_t cpu_time_faulting_us = 0; + uint64_t start_time_ns; + test_args_t args; + parse_arguments(argc, argv, &args); + pthread_t* threads = setup_test(globals, &args, kMemSize, args.verbose); + + /* Keep doing more iterations until we've hit our (wall) time budget */ + while (wall_time_elapsed_ns < args.duration_seconds * kNumNanosecondsInSecond) { + benchmark_log(args.verbose, "----Starting Iteration %lu-----\n", globals->tg_current_iteration + 1); + start_time_ns = start_iteration(globals, args.variant, args.verbose); + wall_time_elapsed_ns += finish_iteration(globals, start_time_ns); + benchmark_log(args.verbose, "----Completed Iteration %lu----\n", globals->tg_current_iteration); + } + + benchmark_log(args.verbose, "Hit time budget\nJoining worker threads\n"); + cpu_time_faulting_us = join_background_threads(globals, threads); + benchmark_log(args.verbose, "----End Test Output----\n"); + output_results(globals, (double) wall_time_elapsed_ns / kNumNanosecondsInSecond, + (double)cpu_time_faulting_us / kNumMicrosecondsInSecond); + cleanup_test(globals); + + return 0; +} + + +/* The main loop for the worker threads. */ +static void* +faulting_thread(void* arg) +{ + test_globals_t* globals = arg; + uint64_t on_cpu_time_faulting = 0; + size_t current_iteration = 1; + while (true) { + bool should_continue = worker_thread_iteration_setup(current_iteration, globals); + if (!should_continue) { + break; + } + on_cpu_time_faulting += grab_and_fault_pages(globals); + worker_thread_iteration_complete(globals); + current_iteration++; + } + return (void*)on_cpu_time_faulting; +} + +/* + * Called on the worker threads before each iteration to synchronize this + * iteration start with the other threads. + * Returns true if the iteration should continue, and false if the test is over. + */ +static bool +worker_thread_iteration_setup(size_t current_iteration, test_globals_t *globals) +{ + bool should_continue = false; + int ret = 0; + // Gate on the other threads being ready to start + ret = pthread_mutex_lock(&globals->tg_lock); + assert(ret == 0); + globals->tg_running_count++; + if (globals->tg_running_count == globals->tg_num_threads) { + // All the worker threads are running. + // Wake up the main thread so that it can ungate the test. + ret = pthread_cond_broadcast(&globals->tg_cv); + assert(ret == 0); + } + /* + * The main thread will start this iteration by incrementing + * tg_current_iteration. Block until that happens. + * See start_iteration for the wakeup code. + */ + while (!globals->tg_done && globals->tg_current_iteration != current_iteration) { + ret = pthread_cond_wait(&globals->tg_cv, &globals->tg_lock); + assert(ret == 0); + } + should_continue = !globals->tg_done; + ret = pthread_mutex_unlock(&globals->tg_lock); + assert(ret == 0); + return should_continue; +} + +/* + * Called on the worker threads before each iteration finishes to synchronize + * with the other threads. + */ +static void +worker_thread_iteration_complete(test_globals_t *globals) +{ + int ret; + // Mark ourselves as done and wait for the other threads to finish + ret = pthread_mutex_lock(&globals->tg_lock); + assert(ret == 0); + globals->tg_running_count--; + if (globals->tg_running_count == 0) { + // We're the last one to finish. Mark this iteration as completed and wake everyone up. + globals->tg_iterations_completed++; + ret = pthread_cond_broadcast(&globals->tg_cv); + assert(ret == 0); + } else { + // Others are running. Wait for them to finish. + while (globals->tg_iterations_completed != globals->tg_current_iteration) { + ret = pthread_cond_wait(&globals->tg_cv, &globals->tg_lock); + assert(ret == 0); + } + } + ret = pthread_mutex_unlock(&globals->tg_lock); + assert(ret == 0); +} + +static void +fault_pages(fault_buffer_t *buffer, size_t stride) +{ + volatile unsigned char val; + for (unsigned char* ptr = buffer->fb_start; ptr < buffer->fb_start + buffer->fb_size; ptr += stride) { + val = *ptr; + } +} + +static fault_buffer_t * +get_fault_buffer(test_globals_t* globals) +{ + size_t index = atomic_fetch_add_explicit(next_fault_buffer_index_ptr(globals), 1UL, memory_order_acq_rel); + if (index < globals->tg_fault_buffer_arr_length) { + return &globals->tg_fault_buffer_arr[index]; + } + return NULL; +} + +static uint64_t +grab_and_fault_pages(test_globals_t* globals) +{ + struct timespec start_time, end_time; + uint64_t nanoseconds_faulting_on_cpu = 0; + int ret; + size_t stride = fault_buffer_stride(globals) * kPageSize; + while (true) { + fault_buffer_t *object = get_fault_buffer(globals); + if (object == NULL) { + break; + } + ret = clock_gettime(kThreadCPUTimeClock, &start_time); + assert(ret == 0); + + fault_pages(object, stride); + + ret = clock_gettime(kThreadCPUTimeClock, &end_time); + assert(ret == 0); + nanoseconds_faulting_on_cpu += (unsigned long) timespec_difference_us(&end_time, &start_time); + } + return nanoseconds_faulting_on_cpu; +} + +static uint64_t +start_iteration(test_globals_t* globals, test_variant_t variant, bool verbose) +{ + int ret; + uint64_t start_time; + ret = pthread_mutex_lock(&globals->tg_lock); + assert(ret == 0); + benchmark_log(verbose, "Waiting for workers to catch up before starting next iteration.\n"); + /* Wait until all the threads are ready to go to the next iteration */ + while (globals->tg_running_count != globals->tg_num_threads) { + ret = pthread_cond_wait(&globals->tg_cv, &globals->tg_lock); + } + benchmark_log(verbose, "Workers are all caught up\n"); + setup_memory(globals, variant); + benchmark_log(verbose, "Initialized data structures for iteration. Waking workers.\n"); + /* Grab a timestamp, tick the current iteration, and wake up the worker threads */ + start_time = get_timestamp_ns(); + globals->tg_current_iteration++; + ret = pthread_mutex_unlock(&globals->tg_lock); + assert(ret == 0); + ret = pthread_cond_broadcast(&globals->tg_cv); + assert(ret == 0); + return start_time; +} + +static uint64_t +finish_iteration(test_globals_t* globals, uint64_t start_time) +{ + int ret; + uint64_t end_time; + ret = pthread_mutex_lock(&globals->tg_lock); + assert(ret == 0); + while (globals->tg_iterations_completed != globals->tg_current_iteration) { + ret = pthread_cond_wait(&globals->tg_cv, &globals->tg_lock); + } + end_time = get_timestamp_ns(); + ret = pthread_mutex_unlock(&globals->tg_lock); + unmap_fault_buffers(globals); + assert(ret == 0); + return end_time - start_time; +} + +static void +setup_memory(test_globals_t* globals, test_variant_t variant) +{ + size_t stride = fault_buffer_stride(globals); + for (size_t i = 0; i < globals->tg_fault_buffer_arr_length; i += stride) { + fault_buffer_t *object = &globals->tg_fault_buffer_arr[i]; + object->fb_start = mmap_buffer(kVmObjectSize); + object->fb_size = kVmObjectSize; + if (variant == VARIANT_SHARE_VM_OBJECTS) { + /* + * Insert another buffer into the work queue for each thread. + * Each buffer starts 1 page past where the previous buffer started into the vm object. + * Since each thread strides by the number of threads * the page size they won't fault in the same pages. + */ + for (size_t j = 1; j < globals->tg_num_threads; j++) { + size_t offset = kPageSize * j; + fault_buffer_t *offset_object = &globals->tg_fault_buffer_arr[i + j]; + offset_object->fb_start = object->fb_start + offset; + offset_object->fb_size = object->fb_size - offset; + } + } else if (variant != VARIANT_SEPARATE_VM_OBJECTS) { + fprintf(stderr, "Unknown test variant.\n"); + exit(2); + } + } + atomic_store_explicit(next_fault_buffer_index_ptr(globals), 0, memory_order_release); +} + +static void +unmap_fault_buffers(test_globals_t* globals) +{ + size_t stride = fault_buffer_stride(globals); + for (size_t i = 0; i < globals->tg_fault_buffer_arr_length; i += stride) { + fault_buffer_t *buffer = &globals->tg_fault_buffer_arr[i]; + int res = munmap(buffer->fb_start, buffer->fb_size); + assert(res == 0); + } +} + +static test_globals_t * +allocate_test_globals() +{ + test_globals_t *globals = NULL; + int ret; + if (kCacheLineSize == 0) { + size_t cachelinesize_size = sizeof(kCacheLineSize); + ret = sysctlbyname("hw.cachelinesize", &kCacheLineSize, &cachelinesize_size, NULL, 0); + assert(ret == 0); + assert(kCacheLineSize > 0); + } + if (kPageSize == 0) { + size_t pagesize_size = sizeof(kPageSize); + ret = sysctlbyname("vm.pagesize", &kPageSize, &pagesize_size, NULL, 0); + assert(ret == 0); + assert(kPageSize > 0); + } + size_t test_globals_size = sizeof(test_globals_t) + kCacheLineSize + sizeof(_Atomic size_t); + globals = malloc(test_globals_size); + assert(globals != NULL); + memset(globals, 0, test_globals_size); + return globals; +} + +static void +init_globals(test_globals_t *globals, const test_args_t *args) +{ + pthread_mutexattr_t mutex_attrs; + pthread_condattr_t cond_attrs; + int ret; + memset(globals, 0, sizeof(test_globals_t)); + + ret = pthread_mutexattr_init(&mutex_attrs); + assert(ret == 0); + ret = pthread_mutex_init(&globals->tg_lock, &mutex_attrs); + assert(ret == 0); + ret = pthread_condattr_init(&cond_attrs); + assert(ret == 0); + ret = pthread_cond_init(&globals->tg_cv, &cond_attrs); + assert(ret == 0); + ret = pthread_mutexattr_destroy(&mutex_attrs); + assert(ret == 0); + ret = pthread_condattr_destroy(&cond_attrs); + assert(ret == 0); + + globals->tg_num_threads = args->n_threads; + globals->tg_variant = args->variant; +} + +static void +init_fault_buffer_arr(test_globals_t *globals, const test_args_t *args, size_t memory_size) +{ + if (args->variant == VARIANT_SEPARATE_VM_OBJECTS) { + // This variant creates separate vm objects up to memory size bytes total + globals->tg_fault_buffer_arr_length = memory_size / kVmObjectSize; + } else if (args->variant == VARIANT_SHARE_VM_OBJECTS) { + // This variant creates separate vm objects up to memory size bytes total + // And places a pointer into each vm object for each thread. + globals->tg_fault_buffer_arr_length = memory_size / kVmObjectSize * globals->tg_num_threads; + } else { + fprintf(stderr, "Unsupported test variant.\n"); + exit(2); + } + // It doesn't make sense to have more threads than elements in the work queue. + // NB: Since we scale memory_size by ncpus, this can only happen if the user + // tries to run the benchmark with many more threads than cores. + assert(globals->tg_fault_buffer_arr_length >= globals->tg_num_threads); + globals->tg_fault_buffer_arr = calloc(sizeof(fault_buffer_t), globals->tg_fault_buffer_arr_length); + assert(globals->tg_fault_buffer_arr); +} + +static pthread_t * +spawn_worker_threads(test_globals_t *globals, unsigned int num_threads) +{ + int ret; + pthread_attr_t pthread_attrs; + globals->tg_num_threads = num_threads; + pthread_t* threads = malloc(sizeof(pthread_t) * num_threads); + assert(threads); + ret = pthread_attr_init(&pthread_attrs); + assert(ret == 0); + // Spawn the background threads + for (unsigned int i = 0; i < num_threads; i++) { + ret = pthread_create(threads + i, &pthread_attrs, faulting_thread, globals); + assert(ret == 0); + } + ret = pthread_attr_destroy(&pthread_attrs); + assert(ret == 0); + return threads; +} + +static pthread_t* +setup_test(test_globals_t *globals, const test_args_t *args, size_t memory_size, bool verbose) +{ + init_globals(globals, args); + init_fault_buffer_arr(globals, args, memory_size); + benchmark_log(verbose, "Initialized global data structures.\n"); + pthread_t *workers = spawn_worker_threads(globals, args->n_threads); + benchmark_log(verbose, "Spawned workers.\n"); + return workers; +} + +static uint64_t +join_background_threads(test_globals_t *globals, pthread_t *threads) +{ + // Set the done flag so that the background threads exit + int ret; + uint64_t total_cputime_spent_faulting = 0; + ret = pthread_mutex_lock(&globals->tg_lock); + assert(ret == 0); + globals->tg_done = true; + ret = pthread_cond_broadcast(&globals->tg_cv); + assert(ret == 0); + ret = pthread_mutex_unlock(&globals->tg_lock); + assert(ret == 0); + + // Join the background threads + for (unsigned int i = 0; i < globals->tg_num_threads; i++) { + uint64_t cputime_spent_faulting = 0; + ret = pthread_join(threads[i], (void **)&cputime_spent_faulting); + assert(ret == 0); + total_cputime_spent_faulting += cputime_spent_faulting; + } + free(threads); + return total_cputime_spent_faulting; +} + +static void +cleanup_test(test_globals_t* globals) +{ + int ret; + ret = pthread_mutex_destroy(&globals->tg_lock); + assert(ret == 0); + ret = pthread_cond_destroy(&globals->tg_cv); + assert(ret == 0); + free(globals->tg_fault_buffer_arr); + free(globals); +} + +static void +output_results(const test_globals_t* globals, double walltime_elapsed_seconds, double cputime_elapsed_seconds) +{ + size_t pgsize; + size_t sysctl_size = sizeof(pgsize); + int ret = sysctlbyname("vm.pagesize", &pgsize, &sysctl_size, NULL, 0); + assert(ret == 0); + size_t num_pages = 0; + double walltime_throughput, cputime_throughput; + size_t stride = fault_buffer_stride(globals); + for (size_t i = 0; i < globals->tg_fault_buffer_arr_length; i += stride) { + num_pages += globals->tg_fault_buffer_arr[i].fb_size / pgsize; + } + num_pages *= globals->tg_iterations_completed; + walltime_throughput = num_pages / walltime_elapsed_seconds; + cputime_throughput = num_pages / cputime_elapsed_seconds; + printf("-----Results-----\n"); + printf("Throughput (pages / wall second), Throughput (pages / CPU second)\n"); + printf("%f,%f\n", walltime_throughput, cputime_throughput); +} + +static void +print_help(char** argv) +{ + fprintf(stderr, "%s: [-v] duration num_threads\n", argv[0]); + fprintf(stderr, "\ntest variants:\n"); + fprintf(stderr, " %s Fault in different vm objects in each thread.\n", kSeparateObjectsArgument); + fprintf(stderr, " %s Share vm objects across faulting threads.\n", kShareObjectsArgument); +} + +static uint64_t +get_timestamp_ns() +{ + return clock_gettime_nsec_np(kWallTimeClock); +} + +static unsigned int +get_ncpu(void) +{ + int ncpu; + size_t sysctl_size = sizeof(ncpu); + int ret = sysctlbyname("hw.ncpu", &ncpu, &sysctl_size, NULL, 0); + assert(ret == 0); + return (unsigned int) ncpu; +} + +static void +parse_arguments(int argc, char** argv, test_args_t *args) +{ + int current_argument = 1; + memset(args, 0, sizeof(test_args_t)); + if (argc < 4 || argc > 6) { + print_help(argv); + exit(1); + } + if (argv[current_argument][0] == '-') { + if (strcmp(argv[current_argument], "-v") == 0) { + args->verbose = true; + } else { + fprintf(stderr, "Unknown argument %s\n", argv[current_argument]); + print_help(argv); + exit(1); + } + current_argument++; + } + if (strncasecmp(argv[current_argument], kSeparateObjectsArgument, strlen(kSeparateObjectsArgument)) == 0) { + args->variant = VARIANT_SEPARATE_VM_OBJECTS; + } else if (strncasecmp(argv[current_argument], kShareObjectsArgument, strlen(kShareObjectsArgument)) == 0) { + args->variant = VARIANT_SHARE_VM_OBJECTS; + } else { + print_help(argv); + exit(1); + } + current_argument++; + + long duration = strtol(argv[current_argument++], NULL, 10); + if (duration == 0) { + print_help(argv); + exit(1); + } + long num_cores = strtol(argv[current_argument++], NULL, 10); + if (num_cores == 0) { + print_help(argv); + exit(1); + } + assert(num_cores > 0 && num_cores <= get_ncpu()); + args->n_threads = (unsigned int) num_cores; + args->duration_seconds = (unsigned long) duration; +} + +static inline +_Atomic size_t * +next_fault_buffer_index_ptr(test_globals_t *globals) +{ + return (_Atomic size_t *) (((ptrdiff_t)(globals + 1)) + (int64_t)kCacheLineSize); +} +static size_t +fault_buffer_stride(const test_globals_t *globals) +{ + size_t stride; + if (globals->tg_variant == VARIANT_SEPARATE_VM_OBJECTS) { + stride = 1; + } else if (globals->tg_variant == VARIANT_SHARE_VM_OBJECTS) { + stride = globals->tg_num_threads; + } else { + fprintf(stderr, "Unknown variant\n"); + exit(-1); + } + return stride; +} diff --git a/tests/vm/fault_throughput.lua b/tests/vm/fault_throughput.lua new file mode 100755 index 000000000..99a928c00 --- /dev/null +++ b/tests/vm/fault_throughput.lua @@ -0,0 +1,103 @@ +#!/usr/local/bin/recon +require 'strict' + +local benchrun = require 'benchrun' +local perfdata = require 'perfdata' +local csv = require 'csv' +local sysctl = require 'sysctl' +local os = require 'os' + +local kDefaultDuration = 30 + +local benchmark = benchrun.new { + name = 'xnu.zero_fill_fault_throughput', + version = 1, + arg = arg, + modify_argparser = function(parser) + parser:option{ + name = '--cpu-workers', + description = 'Number of cpu workers' + } + parser:flag{ + name = '--through-max-workers', + description = 'Run benchmark for [1..n] cpu workers' + } + parser:flag{ + name = '--through-max-workers-fast', + description = 'Run benchmark for [1..2] and each power of four value in [4..n] cpu workers' + } + parser:option{ + name = '--path', + description = 'Path to fault throughput binary' + } + parser:option{ + name = '--duration', + description = 'How long, in seconds, to run each iteration', + default = kDefaultDuration + } + parser:option{ + name = '--variant', + description = 'Which benchmark variant to run (sparate-objects or share-objects)', + default = 'separate-objects' + } + end +} + +assert(benchmark.opt.path, "No path supplied for fault throughput binary") +assert(benchmark.opt.variant == "separate-objects" or + benchmark.opt.variant == "share-objects", "Unsupported benchmark variant") + +local ncpus, err = sysctl('hw.logicalcpu_max') +assert(ncpus > 0, 'invalid number of logical cpus') +local cpu_workers = tonumber(benchmark.opt.cpu_workers) or ncpus + +local unit = perfdata.unit.custom('pages/sec') +local tests = {} + +function QueueTest(num_cores) + table.insert(tests, { + path = benchmark.opt.path, + num_cores = num_cores, + }) +end + +if benchmark.opt.through_max_workers then + for i = 1, cpu_workers do + QueueTest(i) + end +elseif benchmark.opt.through_max_workers_fast then + local i = 1 + while i <= cpu_workers do + QueueTest(i) + -- Always do a run with two threads to see what the first part of + -- the scaling curve looks like + -- (and to measure perf on dual core systems). + if i == 1 and cpu_workers >= 2 then + QueueTest(i + 1) + end + i = i * 4 + end +else + QueueTest(cpu_workers) +end + +for _, test in ipairs(tests) do + local args = {test.path, "-v", benchmark.opt.variant, benchmark.opt.duration, test.num_cores, + echo = true} + for out in benchmark:run(args) do + local result = out:match("-----Results-----\n(.*)") + benchmark:assert(result, "Unable to find result data in output") + local data = csv.openstring(result, {header = true}) + for field in data:lines() do + for k, v in pairs(field) do + benchmark.writer:add_value(k, unit, tonumber(v), { + [perfdata.larger_better] = true, + threads = test.num_cores, + variant = benchmark.opt.variant + }) + end + end + end +end + +benchmark:finish() diff --git a/tests/vm/fault_throughput.plist b/tests/vm/fault_throughput.plist new file mode 100644 index 000000000..721f98cd7 --- /dev/null +++ b/tests/vm/fault_throughput.plist @@ -0,0 +1,47 @@ + + + + + Tests + + + Command + + recon + /AppleInternal/Tests/xnu/darwintests/vm/fault_throughput.lua + --through-max-workers-fast + --variant separate-objects + --path /AppleInternal/Tests/xnu/darwintests/vm/fault_throughput + --tmp + --no-subdir + + Tags + + perf + + TestName + xnu.vm.zero_fill_fault_throughput.separate-vm-objects + + + Command + + recon + /AppleInternal/Tests/xnu/darwintests/vm/fault_throughput.lua + --through-max-workers-fast + --variant share-objects + --path /AppleInternal/Tests/xnu/darwintests/vm/fault_throughput + --tmp + --no-subdir + + Tags + + perf + + TestName + xnu.vm.zero_fill_fault_throughput.share-vm-objects + + + Timeout + 1800 + + diff --git a/tests/vm/kern_max_task_pmem.c b/tests/vm/kern_max_task_pmem.c new file mode 100644 index 000000000..4719c5c44 --- /dev/null +++ b/tests/vm/kern_max_task_pmem.c @@ -0,0 +1,45 @@ +#include +#include +#include + +#define MAX_TASK_PMEM "kern.max_task_pmem" +#define HW_MEMSIZE_STR "hw.memsize" +#define HW_MEMSIZE_THRESHOLD 600 * 1024 * 1024 + +T_GLOBAL_META(T_META_NAMESPACE("xnu.vm")); + +/* + * Embedded Device having physical memory greater than 600MB should have positive + * value for kern.max_task_pmem if present. + * Strategy: + * Fetch hw.memsize for the device. + * If hw.memsize > 600MB, and kern.max_task_pmem is present, assert that + * kern.max_task_pmem is set to value > 0. + */ +T_DECL(kern_max_task_pmem, "Embedded platforms should have a positive value for kern.max_task_pmem when hw.memsize > 600MB") +{ + int kern_max_task_pmem = 0; + size_t pmem_size = sizeof(kern_max_task_pmem); + + uint64_t hw_memsize = 0; + size_t size_hw_memsize = sizeof(hw_memsize); + + int ret = 0; + + ret = sysctlbyname(HW_MEMSIZE_STR, &hw_memsize, &size_hw_memsize, NULL, 0); + T_ASSERT_POSIX_SUCCESS(ret, "call sysctlbyname to get hardware mem size."); + + T_LOG("Checking if %s > %d", HW_MEMSIZE_STR, HW_MEMSIZE_THRESHOLD); + if (hw_memsize <= HW_MEMSIZE_THRESHOLD) { + T_SKIP("Device has hw.memsize = %lld. Skipping the check for %s", hw_memsize, MAX_TASK_PMEM); + } + + T_LOG("Device has %s = %lld", HW_MEMSIZE_STR, hw_memsize); + T_LOG("Testing for %s ...", MAX_TASK_PMEM); + + ret = sysctlbyname(MAX_TASK_PMEM, &kern_max_task_pmem, &pmem_size, NULL, 0); + T_ASSERT_POSIX_SUCCESS(ret, "call sysctlbyname to get max task physical memory"); + + T_LOG("%s = %d", MAX_TASK_PMEM, kern_max_task_pmem); + T_ASSERT_GT_INT(kern_max_task_pmem, 0, "%s should be greater than 0", MAX_TASK_PMEM); +} diff --git a/tests/vm/memorystatus_sort_test.c b/tests/vm/memorystatus_sort_test.c new file mode 100644 index 000000000..63b929c8f --- /dev/null +++ b/tests/vm/memorystatus_sort_test.c @@ -0,0 +1,370 @@ +#include +#include +#include +#include + +#include +#include +#include + +/* internal */ +#include +#include +#include + +#define JETSAM_PRIORITY_IDLE 0 +#define JETSAM_PRIORITY_FOREGROUND 10 + +#define kNumProcsInCoalition 4 +typedef struct { + pid_t pids[kNumProcsInCoalition]; // An array of pids in this coalition. Owned by this struct. + pid_t expected_order[kNumProcsInCoalition]; // An array of pids in this coalition in proper sorted order. + uint64_t ids[COALITION_NUM_TYPES]; +} coalition_info_t; + +/* + * Children pids spawned by this test that need to be cleaned up. + * Has to be a global because the T_ATEND API doesn't take any arguments. + */ +#define kMaxChildrenProcs 16 +static pid_t children_pids[kMaxChildrenProcs]; +static size_t num_children = 0; + +/* + * Sets up a new coalition. + */ +static void init_coalition(coalition_info_t*); + +/* + * Places all procs in the coalition in the given band. + */ +static void place_coalition_in_band(const coalition_info_t *, int band); + +/* + * Place the given proc in the given band. + */ +static void place_proc_in_band(pid_t pid, int band); + +/* + * Cleans up any children processes. + */ +static void cleanup_children(void); + +/* + * Check if we're on a kernel where we can test coalitions. + */ +static bool has_unrestrict_coalitions(void); + +/* + * Unrestrict coalition syscalls. + */ +static void unrestrict_coalitions(void); + +/* + * Restrict coalition syscalls + */ +static void restrict_coalitions(void); + +/* + * Allocate the requested number of pages and fault them in. + * Used to achieve a desired footprint. + */ +static void *allocate_pages(int); + +/* + * Get the vm page size. + */ +static int get_vmpage_size(void); + +/* + * Launch a proc with a role in a coalition. + * If coalition_ids is NULL, skip adding the proc to the coalition. + */ +static pid_t +launch_proc_in_coalition(uint64_t *coalition_ids, int role, int num_pages); + +/* + * Background process that will munch some memory, signal its parent, and + * then sit in a loop. + */ +T_HELPER_DECL(coalition_member, "Mock coalition member") { + int num_pages = 0; + if (argc == 1) { + num_pages = atoi(argv[0]); + } + allocate_pages(num_pages); + // Signal to the parent that we've touched all of our pages. + if (kill(getppid(), SIGUSR1) != 0) { + T_LOG("Unable to signal to parent process!"); + exit(1); + } + while (true) { + ; + } +} + +/* + * Test that sorting the fg bucket in coalition order works properly. + * Spawns children in the same coalition in the fg band. Each child + * has a different coalition role. Verifies that the coalition + * is sorted properly by role. + */ +T_DECL(memorystatus_sort_coalition, "Coalition sort order", + T_META_ASROOT(true)) { + int ret; + sig_t res; + coalition_info_t coalition; + if (!has_unrestrict_coalitions()) { + T_SKIP("Unable to test coalitions on this kernel."); + } + res = signal(SIGUSR1, SIG_IGN); + T_WITH_ERRNO; T_ASSERT_NE(res, SIG_ERR, "SIG_IGN SIGUSR1"); + unrestrict_coalitions(); + + // Set up a new coalition with various members. + init_coalition(&coalition); + T_ATEND(cleanup_children); + T_ATEND(restrict_coalitions); + // Place all procs in the coalition in the foreground band + place_coalition_in_band(&coalition, JETSAM_PRIORITY_FOREGROUND); + // Have the kernel sort the foreground bucket and verify that it's + // sorted correctly. + ret = memorystatus_control(MEMORYSTATUS_CMD_TEST_JETSAM_SORT, JETSAM_PRIORITY_FOREGROUND, 0, + coalition.expected_order, kNumProcsInCoalition * sizeof(pid_t)); + T_QUIET; T_ASSERT_EQ(ret, 0, "Error while sorting or validating sorted order.\n" + "Check os log output for details.\n" + "Look for memorystatus_verify_sort_order."); +} + +/* + * Test that sorting the idle bucket in footprint order works properly. + * + * Spawns some children with very different footprints in the idle band, + * and then ensures that they get sorted properly. + */ +T_DECL(memorystatus_sort_footprint, "Footprint sort order", + T_META_ASROOT(true)) { +#define kNumChildren 3 + static const int kChildrenFootprints[kNumChildren] = {500, 0, 2500}; + /* + * The expected sort order of the children in the order that they were launched. + * Used to construct the expected_order pid array. + * Note that procs should be sorted in descending footprint order. + */ + static const int kExpectedOrder[kNumChildren] = {2, 0, 1}; + static const int kJetsamBand = JETSAM_PRIORITY_IDLE; + __block pid_t pid; + sig_t res; + dispatch_source_t ds_allocated; + T_ATEND(cleanup_children); + + // After we spawn the children, they'll signal that they've touched their pages. + res = signal(SIGUSR1, SIG_IGN); + T_WITH_ERRNO; T_ASSERT_NE(res, SIG_ERR, "SIG_IGN SIGUSR1"); + ds_allocated = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dispatch_get_main_queue()); + T_QUIET; T_ASSERT_NOTNULL(ds_allocated, "dispatch_source_create (ds_allocated)"); + + dispatch_source_set_event_handler(ds_allocated, ^{ + if (num_children < kNumChildren) { + pid = launch_proc_in_coalition(NULL, 0, kChildrenFootprints[num_children]); + place_proc_in_band(pid, kJetsamBand); + } else { + pid_t expected_order[kNumChildren] = {0}; + int ret; + for (int i = 0; i < kNumChildren; i++) { + expected_order[i] = children_pids[kExpectedOrder[i]]; + } + // Verify the sort order + ret = memorystatus_control(MEMORYSTATUS_CMD_TEST_JETSAM_SORT, kJetsamBand, 0, + expected_order, sizeof(expected_order)); + T_QUIET; T_ASSERT_EQ(ret, 0, "Error while sorting or validating sorted order.\n" + "Check os log output for details.\n" + "Look for memorystatus_verify_sort_order."); + T_END; + } + }); + dispatch_activate(ds_allocated); + + pid = launch_proc_in_coalition(NULL, 0, kChildrenFootprints[num_children]); + place_proc_in_band(pid, kJetsamBand); + + dispatch_main(); + +#undef kNumChildren +} + +static pid_t +launch_proc_in_coalition(uint64_t *coalition_ids, int role, int num_pages) +{ + int ret; + posix_spawnattr_t attr; + pid_t pid; + char testpath[PATH_MAX]; + uint32_t testpath_buf_size = PATH_MAX; + char num_pages_str[32] = {0}; + char *argv[5] = {testpath, "-n", "coalition_member", num_pages_str, NULL}; + extern char **environ; + T_QUIET; T_ASSERT_LT(num_children + 1, (size_t) kMaxChildrenProcs, "Don't create too many children."); + ret = posix_spawnattr_init(&attr); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_init"); + if (coalition_ids != NULL) { + for (int i = 0; i < COALITION_NUM_TYPES; i++) { + ret = posix_spawnattr_setcoalition_np(&attr, coalition_ids[i], i, role); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_setcoalition_np"); + } + } + + ret = snprintf(num_pages_str, sizeof(num_pages_str), "%d", num_pages); + T_QUIET; T_ASSERT_LE((size_t) ret, sizeof(num_pages_str), "Don't allocate too many pages."); + ret = _NSGetExecutablePath(testpath, &testpath_buf_size); + T_QUIET; T_ASSERT_EQ(ret, 0, "_NSGetExecutablePath"); + ret = posix_spawn(&pid, argv[0], NULL, &attr, argv, environ); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawn"); + ret = posix_spawnattr_destroy(&attr); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_destroy"); + children_pids[num_children++] = pid; + return pid; +} + +static void +init_coalition(coalition_info_t *coalition) +{ + int ret; + uint32_t flags = 0; + memset(coalition, 0, sizeof(coalition_info_t)); + for (int i = 0; i < COALITION_NUM_TYPES; i++) { + COALITION_CREATE_FLAGS_SET_TYPE(flags, i); + ret = coalition_create(&coalition->ids[i], flags); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "coalition_create"); + } + + /* + * Spawn procs for each coalition role, and construct the expected + * sorted order. + */ + for (size_t i = 0; i < kNumProcsInCoalition; i++) { + int role; + if (i == 0) { + role = COALITION_TASKROLE_LEADER; + } else if (i == 1) { + role = COALITION_TASKROLE_EXT; + } else if (i == 2) { + role = COALITION_TASKROLE_UNDEF; + } else { + role = COALITION_TASKROLE_XPC; + } + pid_t pid = launch_proc_in_coalition(coalition->ids, role, 0); + coalition->pids[i] = pid; + /* + * Determine the expected sorted order. + * After a bucket has been coalition sorted, coalition members should + * be in the following kill order: + * undefined coalition members, extensions, xpc services, leader + */ + if (role == COALITION_TASKROLE_LEADER) { + coalition->expected_order[3] = pid; + } else if (role == COALITION_TASKROLE_XPC) { + coalition->expected_order[2] = pid; + } else if (role == COALITION_TASKROLE_EXT) { + coalition->expected_order[1] = pid; + } else { + coalition->expected_order[0] = pid; + } + } +} + +static void +place_proc_in_band(pid_t pid, int band) +{ + memorystatus_priority_properties_t props = {0}; + int ret; + props.priority = band; + props.user_data = 0; + ret = memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES, pid, 0, &props, sizeof(props)); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "move proc to band"); +} + + +static void +place_coalition_in_band(const coalition_info_t *coalition, int band) +{ + for (size_t i = 0; i < kNumProcsInCoalition; i++) { + pid_t curr = coalition->pids[i]; + place_proc_in_band(curr, band); + } +} + +static void +cleanup_children(void) +{ + int ret, status; + for (size_t i = 0; i < num_children; i++) { + pid_t exited_pid = 0; + pid_t curr = children_pids[i]; + ret = kill(curr, SIGKILL); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kill"); + while (exited_pid == 0) { + exited_pid = waitpid(curr, &status, 0); + } + T_QUIET; T_ASSERT_POSIX_SUCCESS(exited_pid, "waitpid"); + T_QUIET; T_ASSERT_TRUE(WIFSIGNALED(status), "proc was signaled."); + T_QUIET; T_ASSERT_EQ(WTERMSIG(status), SIGKILL, "proc was killed"); + } +} + +static bool +has_unrestrict_coalitions() +{ + int ret, val; + size_t val_sz; + + val = 0; + val_sz = sizeof(val); + ret = sysctlbyname("kern.unrestrict_coalitions", &val, &val_sz, NULL, 0); + return ret >= 0; +} + +static void +unrestrict_coalitions() +{ + int ret, val = 1; + size_t val_sz; + val_sz = sizeof(val); + ret = sysctlbyname("kern.unrestrict_coalitions", NULL, 0, &val, val_sz); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kern.unrestrict_coalitions <- 1"); +} + +static void +restrict_coalitions() +{ + int ret, val = 0; + size_t val_sz; + val_sz = sizeof(val); + ret = sysctlbyname("kern.unrestrict_coalitions", NULL, 0, &val, val_sz); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kern.unrestrict_coalitions <- 0"); +} + +static void * +allocate_pages(int num_pages) +{ + int page_size, i; + unsigned char *buf; + + page_size = get_vmpage_size(); + buf = malloc((unsigned long)(num_pages * page_size)); + for (i = 0; i < num_pages; i++) { + ((volatile unsigned char *)buf)[i * page_size] = 1; + } + return buf; +} + +static int +get_vmpage_size() +{ + int vmpage_size; + size_t size = sizeof(vmpage_size); + int ret = sysctlbyname("vm.pagesize", &vmpage_size, &size, NULL, 0); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "failed to query vm.pagesize"); + T_QUIET; T_ASSERT_GT(vmpage_size, 0, "vm.pagesize is not > 0"); + return vmpage_size; +} diff --git a/tests/vm/perf_helpers.c b/tests/vm/perf_helpers.c new file mode 100644 index 000000000..b4dea3102 --- /dev/null +++ b/tests/vm/perf_helpers.c @@ -0,0 +1,69 @@ +#include +#include +#include +#include +#include +#include + +#include + +#include "vm/perf_helpers.h" + +#define K_CTIME_BUFFER_LEN 26 +void +benchmark_log(bool verbose, const char *restrict fmt, ...) +{ + time_t now; + char time_buffer[K_CTIME_BUFFER_LEN]; + struct tm local_time; + va_list args; + if (verbose) { + strncpy(time_buffer, "UNKNOWN", K_CTIME_BUFFER_LEN); + + now = time(NULL); + if (now != -1) { + struct tm* ret = localtime_r(&now, &local_time); + if (ret == &local_time) { + snprintf(time_buffer, K_CTIME_BUFFER_LEN, + "%.2d/%.2d/%.2d %.2d:%.2d:%.2d", + local_time.tm_mon + 1, local_time.tm_mday, + local_time.tm_year + 1900, + local_time.tm_hour, local_time.tm_min, + local_time.tm_sec); + } + } + + printf("%s: ", time_buffer); + va_start(args, fmt); + vprintf(fmt, args); + fflush(stdout); + } +} + +uint64_t +timespec_difference_us(const struct timespec* a, const struct timespec* b) +{ + assert(a->tv_sec >= b->tv_sec || a->tv_nsec >= b->tv_nsec); + long seconds_elapsed = a->tv_sec - b->tv_sec; + uint64_t nsec_elapsed; + if (b->tv_nsec > a->tv_nsec) { + seconds_elapsed--; + nsec_elapsed = kNumNanosecondsInSecond - (uint64_t) (b->tv_nsec - a->tv_nsec); + } else { + nsec_elapsed = (uint64_t) (a->tv_nsec - b->tv_nsec); + } + return (uint64_t) seconds_elapsed * kNumMicrosecondsInSecond + nsec_elapsed / kNumNanosecondsInMicrosecond; +} + +unsigned char * +mmap_buffer(size_t memsize) +{ + int fd = -1; + unsigned char* addr = (unsigned char *)mmap(NULL, memsize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, + fd, 0); + if ((void*) addr == MAP_FAILED) { + fprintf(stderr, "Unable to mmap a memory object: %s\n", strerror(errno)); + exit(2); + } + return addr; +} diff --git a/tests/vm/perf_helpers.h b/tests/vm/perf_helpers.h new file mode 100644 index 000000000..53633f542 --- /dev/null +++ b/tests/vm/perf_helpers.h @@ -0,0 +1,34 @@ +#ifndef VM_PERF_HELPERS_H +#define VM_PERF_HELPERS_H + +/* + * Utility functions and constants used by the VM perf tests. + */ +#include +#include +#include + +/* + * mmap an anonymous chunk of memory. + */ +unsigned char *mmap_buffer(size_t size); +/* + * Returns a - b in microseconds. + * NB: a must be >= b + */ +uint64_t timespec_difference_us(const struct timespec* a, const struct timespec* b); +/* + * Print the message to stdout along with the current time. + * Also flushes stdout so that the log can help detect hangs. Don't call + * this function from within the measured portion of the benchmark as it will + * pollute your measurement. + * + * NB: Will only log if verbose == true. + */ +void benchmark_log(bool verbose, const char *restrict fmt, ...) __attribute__((format(printf, 2, 3))); + +static const uint64_t kNumMicrosecondsInSecond = 1000UL * 1000; +static const uint64_t kNumNanosecondsInMicrosecond = 1000UL; +static const uint64_t kNumNanosecondsInSecond = kNumNanosecondsInMicrosecond * kNumMicrosecondsInSecond; + +#endif /* !defined(VM_PERF_HELPERS_H) */ diff --git a/tests/vm/perf_madvise.c b/tests/vm/perf_madvise.c new file mode 100644 index 000000000..b579361b3 --- /dev/null +++ b/tests/vm/perf_madvise.c @@ -0,0 +1,200 @@ +/* + * Madvise benchmark. + * Currently only times various types of madvise frees. + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "vm/perf_helpers.h" + +typedef enum test_variant { + VARIANT_MADVISE_FREE +} test_variant_t; + +/* Arguments parsed from the command line */ +typedef struct test_args { + uint64_t ta_duration_seconds; + uint64_t ta_size; + test_variant_t ta_variant; + bool ta_verbose; +} test_args_t; + +static void print_help(char **argv); +static void parse_arguments(int argc, char** argv, test_args_t *args); +static double madvise_free_test(const test_args_t* args); +/* + * Allocate a buffer of the given size and fault in all of its pages. + */ +static void *allocate_and_init_buffer(uint64_t size); +/* + * Fault in the pages in the given buffer. + */ +static void fault_pages(unsigned char *buffer, size_t size, size_t stride); +/* + * Output the results of the test in pages / CPU second. + */ +static void output_throughput(double throughput); + +/* Test Variants */ +static const char* kMadviseFreeArgument = "MADV_FREE"; +/* The VM page size */ +static size_t kPageSize = 0; +static const clockid_t kThreadCPUTimeClock = CLOCK_THREAD_CPUTIME_ID; + +int +main(int argc, char** argv) +{ + test_args_t args; + parse_arguments(argc, argv, &args); + double throughput = 0.0; + if (args.ta_variant == VARIANT_MADVISE_FREE) { + throughput = madvise_free_test(&args); + } else { + fprintf(stderr, "Unknown test variant\n"); + exit(2); + } + output_throughput(throughput); + return 0; +} + +static double +madvise_free_test(const test_args_t* args) +{ + int ret, ret_end; + assert(args->ta_variant == VARIANT_MADVISE_FREE); + benchmark_log(args->ta_verbose, "Running madvise free test\n"); + size_t time_elapsed_us = 0; + size_t count = 0; + double throughput = 0; + + while (time_elapsed_us < args->ta_duration_seconds * kNumMicrosecondsInSecond) { + benchmark_log(args->ta_verbose, "Starting iteration %zu\n", count + 1); + void* buffer = allocate_and_init_buffer(args->ta_size); + benchmark_log(args->ta_verbose, "Allocated and faulted in test buffer\n"); + struct timespec start_time, end_time; + ret = clock_gettime(kThreadCPUTimeClock, &start_time); + + madvise(buffer, args->ta_size, MADV_FREE); + + ret_end = clock_gettime(kThreadCPUTimeClock, &end_time); + assert(ret == 0); + assert(ret_end == 0); + time_elapsed_us += timespec_difference_us(&end_time, &start_time); + + ret = munmap(buffer, args->ta_size); + assert(ret == 0); + benchmark_log(args->ta_verbose, "Completed iteration %zu\nMeasured %zu time on CPU so far.\n", count + 1, time_elapsed_us); + + count++; + } + assert(kPageSize != 0); + throughput = (count * args->ta_size) / ((double)time_elapsed_us / kNumMicrosecondsInSecond); + return throughput; +} + +static void * +allocate_and_init_buffer(uint64_t size) +{ + unsigned char *buffer = NULL; + int ret; + size_t len; + if (kPageSize == 0) { + size_t pagesize_size = sizeof(kPageSize); + ret = sysctlbyname("vm.pagesize", &kPageSize, &pagesize_size, NULL, 0); + assert(ret == 0); + assert(kPageSize > 0); + } + len = size; + buffer = mmap_buffer(len); + fault_pages(buffer, len, kPageSize); + return buffer; +} + +static void +fault_pages(unsigned char *buffer, size_t size, size_t stride) +{ + volatile unsigned char val; + for (unsigned char* ptr = buffer; ptr < buffer + size; ptr += stride) { + val = *ptr; + } +} + +static void +parse_arguments(int argc, char** argv, test_args_t *args) +{ + int current_positional_argument = 0; + long duration = -1, size_mb = -1; + memset(args, 0, sizeof(test_args_t)); + for (int current_argument = 1; current_argument < argc; current_argument++) { + if (argv[current_argument][0] == '-') { + if (strcmp(argv[current_argument], "-v") == 0) { + args->ta_verbose = true; + } else { + fprintf(stderr, "Unknown argument %s\n", argv[current_argument]); + print_help(argv); + exit(1); + } + if (current_argument >= argc) { + print_help(argv); + exit(1); + } + } else { + if (current_positional_argument == 0) { + if (strcasecmp(argv[current_argument], kMadviseFreeArgument) == 0) { + args->ta_variant = VARIANT_MADVISE_FREE; + } else { + print_help(argv); + exit(1); + } + current_positional_argument++; + } else if (current_positional_argument == 1) { + duration = strtol(argv[current_argument], NULL, 10); + if (duration <= 0) { + print_help(argv); + exit(1); + } + current_positional_argument++; + } else if (current_positional_argument == 2) { + size_mb = strtol(argv[current_argument], NULL, 10); + if (size_mb <= 0) { + print_help(argv); + exit(1); + } + current_positional_argument++; + } else { + print_help(argv); + exit(1); + } + } + } + if (current_positional_argument != 3) { + fprintf(stderr, "Expected 3 positional arguments. %d were supplied.\n", current_positional_argument); + print_help(argv); + exit(1); + } + args->ta_duration_seconds = (uint64_t) duration; + args->ta_size = ((uint64_t) size_mb * (1UL << 20)); +} + +static void +print_help(char** argv) +{ + fprintf(stderr, "%s: [-v] duration_seconds size_mb\n", argv[0]); + fprintf(stderr, "\ntest variants:\n"); + fprintf(stderr, " %s Measure MADV_FREE time.\n", kMadviseFreeArgument); +} + +static void +output_throughput(double throughput) +{ + printf("-----Results-----\n"); + printf("Throughput (bytes / CPU second)\n"); + printf("%f\n", throughput); +} diff --git a/tests/vm/perf_madvise.lua b/tests/vm/perf_madvise.lua new file mode 100755 index 000000000..43bb80e50 --- /dev/null +++ b/tests/vm/perf_madvise.lua @@ -0,0 +1,69 @@ +#!/usr/local/bin/recon + +local benchrun = require 'benchrun' +local perfdata = require 'perfdata' +local csv = require 'csv' + +require 'strict' + +local kDefaultDuration = 15 +local kDefaultSizeMb = 16 + +local benchmark = benchrun.new { + name = 'xnu.madvise', + version = 1, + arg = arg, + modify_argparser = function(parser) + parser:argument { + name = 'path', + description = 'Path to perf_madvise binary' + } + parser:option{ + name = '--duration', + description = 'How long, in seconds, to run each iteration', + default = kDefaultDuration + } + parser:option{ + name = '--variant', + description = 'Which benchmark variant to run (MADV_FREE)', + default = 'MADV_FREE', + choices = {"MADV_FREE"} + } + parser:option{ + name = '--verbose', + description = 'Enable verbose logging', + } + parser:option{ + name = '--size', + description = 'Madvise buffer size (MB)', + default = kDefaultSizeMb + } + end +} + +local unit = perfdata.unit.custom('pages/sec') +local tests = { + path = benchmark.opt.path, +} + +local args = {benchmark.opt.path, benchmark.opt.variant, benchmark.opt.duration, benchmark.opt.size} +if benchmark.opt.verbose then + table.insert(args, "-v") +end +args.echo = true +for out in benchmark:run(args) do + local result = out:match("-----Results-----\n(.*)") + benchmark:assert(result, "Unable to find result data in output") + local data = csv.openstring(result, {header = true}) + for field in data:lines() do + for k, v in pairs(field) do + benchmark.writer:add_value(k, unit, tonumber(v), { + [perfdata.larger_better] = true, + variant = benchmark.opt.variant + }) + end + end +end +benchmark.writer:set_primary_metric("Throughput (bytes / CPU second)") + +benchmark:finish() diff --git a/tests/vm/vm_allocation.c b/tests/vm/vm_allocation.c new file mode 100644 index 000000000..c66933b1f --- /dev/null +++ b/tests/vm/vm_allocation.c @@ -0,0 +1,4184 @@ +/* Mach virtual memory unit tests + * + * The main goal of this code is to facilitate the construction, + * running, result logging and clean up of a test suite, taking care + * of all the scaffolding. A test suite is a sequence of very targeted + * unit tests, each running as a separate process to isolate its + * address space. + * A unit test is abstracted as a unit_test_t structure, consisting of + * a test function and a logging identifier. A test suite is a suite_t + * structure, consisting of an unit_test_t array, fixture set up and + * tear down functions. + * Test suites are created dynamically. Each of its unit test runs in + * its own fork()d process, with the fixture set up and tear down + * running before and after each test. The parent process will log a + * pass result if the child exits normally, and a fail result in any + * other case (non-zero exit status, abnormal signal). The suite + * results are then aggregated and logged after the [SUMMARY] keyword, + * and finally the test suite is destroyed. + * The included test suites cover the Mach memory allocators, + * mach_vm_allocate() and mach_vm_map() with various options, and + * mach_vm_deallocate(), mach_vm_read(), mach_vm_write(), + * mach_vm_protect(), mach_vm_copy(). + * + * Author: Renaud Dreyer (rdreyer@apple.com) + * + * Transformed to libdarwintest by Tristan Ye (tristan_ye@apple.com) */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +T_GLOBAL_META(T_META_NAMESPACE("xnu.vm")); + +/**************************/ +/**************************/ +/* Unit Testing Framework */ +/**************************/ +/**************************/ + +/*********************/ +/* Private interface */ +/*********************/ + +static const char frameworkname[] = "vm_unitester"; + +/* Type for test, fixture set up and fixture tear down functions. */ +typedef void (*test_fn_t)(); + +/* Unit test structure. */ +typedef struct { + const char * name; + test_fn_t test; +} unit_test_t; + +/* Test suite structure. */ +typedef struct { + const char * name; + int numoftests; + test_fn_t set_up; + unit_test_t * tests; + test_fn_t tear_down; +} suite_t; + +int _quietness = 0; +int _expected_signal = 0; + +struct { + uintmax_t numoftests; + uintmax_t passed_tests; +} results = {0, 0}; + +#define logr(format, ...) \ + do { \ + if (_quietness <= 1) { \ + T_LOG(format, ## __VA_ARGS__); \ + } \ + } while (0) + +#define logv(format, ...) \ + do { \ + if (_quietness == 0) { \ + T_LOG(format, ## __VA_ARGS__); \ + } \ + } while (0) + +static suite_t * +create_suite(const char * name, int numoftests, test_fn_t set_up, unit_test_t * tests, test_fn_t tear_down) +{ + suite_t * suite = (suite_t *)malloc(sizeof(suite_t)); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(suite, "malloc()"); + + suite->name = name; + suite->numoftests = numoftests; + suite->set_up = set_up; + suite->tests = tests; + suite->tear_down = tear_down; + return suite; +} + +static void +destroy_suite(suite_t * suite) +{ + free(suite); +} + +static void +log_suite_info(suite_t * suite) +{ + logr("[TEST] %s", suite->name); + logr("Number of tests: %d\n", suite->numoftests); +} + +static void +log_suite_results(suite_t * suite, int passed_tests) +{ + results.numoftests += (uintmax_t)suite->numoftests; + results.passed_tests += (uintmax_t)passed_tests; +} + +static void +log_test_info(unit_test_t * unit_test, unsigned test_num) +{ + logr("[BEGIN] #%04d: %s", test_num, unit_test->name); +} + +static void +log_test_result(unit_test_t * unit_test, boolean_t test_passed, unsigned test_num) +{ + logr("[%s] #%04d: %s\n", test_passed ? "PASS" : "FAIL", test_num, unit_test->name); +} + +/* Run a test with fixture set up and teardown, while enforcing the + * time out constraint. */ +static void +run_test(suite_t * suite, unit_test_t * unit_test, unsigned test_num) +{ + log_test_info(unit_test, test_num); + + suite->set_up(); + unit_test->test(); + suite->tear_down(); +} + +/* Check a child return status. */ +static boolean_t +child_terminated_normally(int child_status) +{ + boolean_t normal_exit = FALSE; + + if (WIFEXITED(child_status)) { + int exit_status = WEXITSTATUS(child_status); + if (exit_status) { + T_LOG("Child process unexpectedly exited with code %d.", + exit_status); + } else if (!_expected_signal) { + normal_exit = TRUE; + } + } else if (WIFSIGNALED(child_status)) { + int signal = WTERMSIG(child_status); + if (signal == _expected_signal || + (_expected_signal == -1 && (signal == SIGBUS || signal == SIGSEGV))) { + if (_quietness <= 0) { + T_LOG("Child process died with expected signal " + "%d.", signal); + } + normal_exit = TRUE; + } else { + T_LOG("Child process unexpectedly died with signal %d.", + signal); + } + } else { + T_LOG("Child process unexpectedly did not exit nor die"); + } + + return normal_exit; +} + +/* Run a test in its own process, and report the result. */ +static boolean_t +child_test_passed(suite_t * suite, unit_test_t * unit_test) +{ + int test_status; + static unsigned test_num = 0; + + test_num++; + + pid_t test_pid = fork(); + T_QUIET; T_ASSERT_POSIX_SUCCESS(test_pid, "fork()"); + if (!test_pid) { + run_test(suite, unit_test, test_num); + exit(0); + } + while (waitpid(test_pid, &test_status, 0) != test_pid) { + continue; + } + boolean_t test_result = child_terminated_normally(test_status); + log_test_result(unit_test, test_result, test_num); + return test_result; +} + +/* Run each test in a suite, and report the results. */ +static int +count_passed_suite_tests(suite_t * suite) +{ + int passed_tests = 0; + int i; + + for (i = 0; i < suite->numoftests; i++) { + passed_tests += child_test_passed(suite, &(suite->tests[i])); + } + return passed_tests; +} + +/********************/ +/* Public interface */ +/********************/ + +#define DEFAULT_QUIETNESS 0 /* verbose */ +#define RESULT_ERR_QUIETNESS 1 /* result and error */ +#define ERROR_ONLY_QUIETNESS 2 /* error only */ + +#define run_suite(set_up, tests, tear_down, ...) \ + _run_suite((sizeof(tests) / sizeof(tests[0])), (set_up), (tests), (tear_down), __VA_ARGS__) + +typedef unit_test_t UnitTests[]; + +void _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...) +__printflike(5, 6); + +void +_run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...) +{ + va_list ap; + char * name; + + va_start(ap, format); + T_QUIET; T_ASSERT_POSIX_SUCCESS(vasprintf(&name, format, ap), "vasprintf()"); + va_end(ap); + suite_t * suite = create_suite(name, numoftests, set_up, tests, tear_down); + log_suite_info(suite); + log_suite_results(suite, count_passed_suite_tests(suite)); + free(name); + destroy_suite(suite); +} + +/* Setters and getters for various test framework global + * variables. Should only be used outside of the test, set up and tear + * down functions. */ + +/* Expected signal for a test, default is 0. */ +void +set_expected_signal(int signal) +{ + _expected_signal = signal; +} + +int +get_expected_signal() +{ + return _expected_signal; +} + +/* Logging verbosity. */ +void +set_quietness(int value) +{ + _quietness = value; +} + +int +get_quietness() +{ + return _quietness; +} + +/* For fixture set up and tear down functions, and units tests. */ +void +do_nothing() +{ +} + +void +log_aggregated_results() +{ + T_LOG("[SUMMARY] Aggregated Test Results\n"); + T_LOG("Total: %ju", results.numoftests); + T_LOG("Passed: %ju", results.passed_tests); + T_LOG("Failed: %ju\n", results.numoftests - results.passed_tests); + + T_QUIET; T_ASSERT_EQ(results.passed_tests, results.numoftests, + "%d passed of total %d tests", + results.passed_tests, results.numoftests); +} + +/*******************************/ +/*******************************/ +/* Virtual memory unit testing */ +/*******************************/ +/*******************************/ + +/* Test exit values: + * 0: pass + * 1: fail, generic unexpected failure + * 2: fail, unexpected Mach return value + * 3: fail, time out */ + +#define DEFAULT_VM_SIZE ((mach_vm_size_t)(1024ULL * 4096ULL)) + +#define POINTER(address) ((char *)(uintptr_t)(address)) +#define MACH_VM_ADDRESS_T(address) (*((mach_vm_address_t *)(uintptr_t)(address))) + +static int vm_address_size = sizeof(mach_vm_address_t); + +static char *progname = ""; + +/*************************/ +/* xnu version functions */ +/*************************/ + +/* Find the xnu version string. */ +char * +xnu_version_string() +{ + size_t length; + int mib[2]; + mib[0] = CTL_KERN; + mib[1] = KERN_VERSION; + + T_QUIET; + T_ASSERT_POSIX_SUCCESS(sysctl(mib, 2, NULL, &length, NULL, 0), "sysctl()"); + char * version = (char *)malloc(length); + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_NOTNULL(version, "malloc()"); + T_QUIET; + T_EXPECT_POSIX_SUCCESS(sysctl(mib, 2, version, &length, NULL, 0), "sysctl()"); + if (T_RESULT == T_RESULT_FAIL) { + free(version); + T_END; + } + char * xnu_string = strstr(version, "xnu-"); + free(version); + T_QUIET; + T_ASSERT_NOTNULL(xnu_string, "%s: error finding xnu version string.", progname); + return xnu_string; +} + +/* Find the xnu major version number. */ +unsigned int +xnu_major_version() +{ + char * endptr; + char * xnu_substring = xnu_version_string() + 4; + + errno = 0; + unsigned int xnu_version = strtoul(xnu_substring, &endptr, 0); + T_QUIET; + T_ASSERT_TRUE((errno != ERANGE && endptr != xnu_substring), + "%s: error finding xnu major version number.", progname); + return xnu_version; +} + +/*************************/ +/* Mach assert functions */ +/*************************/ + +static inline void +assert_mach_return(kern_return_t kr, kern_return_t expected_kr, const char * mach_routine) +{ + T_QUIET; T_ASSERT_EQ(kr, expected_kr, + "%s unexpectedly returned: %s." + "Should have returned: %s.", + mach_routine, mach_error_string(kr), + mach_error_string(expected_kr)); +} + +/*******************************/ +/* Arrays for test suite loops */ +/*******************************/ + +/* Memory allocators */ +typedef kern_return_t (*allocate_fn_t)(vm_map_t, mach_vm_address_t *, mach_vm_size_t, int); + + +/* + * Remember any pre-reserved fixed address, which needs to be released prior to an allocation. + */ +static mach_vm_address_t fixed_vm_address = 0x0; +static mach_vm_size_t fixed_vm_size = 0; + +/* forward decl */ +void assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size); + +/* + * If trying to allocate at a fixed address, we need to do the delayed deallocate first. + */ +static void +check_fixed_address(mach_vm_address_t *address, mach_vm_size_t size) +{ + if (fixed_vm_address != 0 && + fixed_vm_address <= *address && + *address + size <= fixed_vm_address + fixed_vm_size) { + assert_deallocate_success(fixed_vm_address, fixed_vm_size); + fixed_vm_address = 0; + fixed_vm_size = 0; + } +} + +kern_return_t +wrapper_mach_vm_allocate(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags) +{ + check_fixed_address(address, size); + return mach_vm_allocate(map, address, size, flags); +} + +kern_return_t +wrapper_mach_vm_map(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags) +{ + check_fixed_address(address, size); + return mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); +} + +/* Should have the same behavior as when mask is zero. */ +kern_return_t +wrapper_mach_vm_map_4kB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags) +{ + check_fixed_address(address, size); + return mach_vm_map(map, address, size, (mach_vm_offset_t)0xFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); +} + +kern_return_t +wrapper_mach_vm_map_2MB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags) +{ + check_fixed_address(address, size); + return mach_vm_map(map, address, size, (mach_vm_offset_t)0x1FFFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); +} + +mach_port_t +memory_entry(mach_vm_size_t * size) +{ + mach_port_t object_handle = MACH_PORT_NULL; + mach_vm_size_t original_size = *size; + + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), size, (memory_object_offset_t)0, + (MAP_MEM_NAMED_CREATE | VM_PROT_ALL), &object_handle, 0), + "mach_make_memory_entry_64()"); + T_QUIET; T_ASSERT_EQ(*size, round_page_kernel(original_size), + "mach_make_memory_entry_64() unexpectedly returned a named " + "entry of size 0x%jx (%ju).\n" + "Should have returned a " + "named entry of size 0x%jx (%ju).", + (uintmax_t)*size, (uintmax_t)*size, (uintmax_t)original_size, (uintmax_t)original_size); + return object_handle; +} + +kern_return_t +wrapper_mach_vm_map_named_entry(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags) +{ + mach_port_t object_handle = memory_entry(&size); + check_fixed_address(address, size); + kern_return_t kr = mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, object_handle, (memory_object_offset_t)0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_port_deallocate(mach_task_self(), object_handle), "mach_port_deallocate()"); + return kr; +} + +static struct { + allocate_fn_t allocate; + const char * description; +} allocators[] = { + {wrapper_mach_vm_allocate, "mach_vm_allocate()"}, + {wrapper_mach_vm_map, "mach_vm_map() (zero mask)"}, + {wrapper_mach_vm_map_4kB, + "mach_vm_map() " + "(4 kB address alignment)"}, + {wrapper_mach_vm_map_2MB, + "mach_vm_map() " + "(2 MB address alignment)"}, + {wrapper_mach_vm_map_named_entry, + "mach_vm_map() (named " + "entry, zero mask)"}, +}; +static int numofallocators = sizeof(allocators) / sizeof(allocators[0]); +static int allocators_idx; +enum { MACH_VM_ALLOCATE, MACH_VM_MAP, MACH_VM_MAP_4kB, MACH_VM_MAP_2MB, MACH_VM_MAP_NAMED_ENTRY }; + +/* VM size */ +static struct { + mach_vm_size_t size; + const char * description; +} vm_sizes[] = { + {DEFAULT_VM_SIZE, "default/input"}, + {0, "zero"}, + {4096ULL, "aligned"}, + {1ULL, "unaligned"}, + {4095ULL, "unaligned"}, + {4097ULL, "unaligned"}, +}; +static int numofsizes = sizeof(vm_sizes) / sizeof(vm_sizes[0]); +static int sizes_idx; +static int buffer_sizes_idx; +enum { DEFAULT_INPUT, ZERO_BYTES, ONE_PAGE, ONE_BYTE, ONE_PAGE_MINUS_ONE_BYTE, ONE_PAGE_AND_ONE_BYTE }; + +/* Unspecified/fixed address */ +static struct { + int flag; + const char * description; +} address_flags[] = { + {VM_FLAGS_ANYWHERE, "unspecified"}, {VM_FLAGS_FIXED, "fixed"}, +}; +static int numofflags = sizeof(address_flags) / sizeof(address_flags[0]); +static int flags_idx; +enum { ANYWHERE, FIXED }; + +/* Address alignment */ +static struct { + boolean_t alignment; + const char * description; +} address_alignments[] = { + {TRUE, " aligned"}, {FALSE, " unaligned"}, +}; +static int numofalignments = sizeof(address_alignments) / sizeof(*address_alignments); +static int alignments_idx; +enum { ALIGNED, UNALIGNED }; + +/* Buffer offset */ +static struct { + int offset; + const char * description; +} buffer_offsets[] = { + {0, ""}, {1, ""}, {2, ""}, +}; +static int numofoffsets = sizeof(buffer_offsets) / sizeof(buffer_offsets[0]); +static int offsets_idx; +enum { ZERO, ONE, TWO }; + +/* mach_vm_copy() post actions */ +enum { VMCOPY_MODIFY_SRC, VMCOPY_MODIFY_DST, VMCOPY_MODIFY_SHARED_COPIED }; + +static struct { + int action; + const char * description; +} vmcopy_actions[] = { + {VMCOPY_MODIFY_SRC, "modify vm_copy() source"}, + {VMCOPY_MODIFY_DST, "modify vm_copy() destination"}, + {VMCOPY_MODIFY_SHARED_COPIED, + "modify vm_copy source's shared " + "or copied from/to region"}, +}; +static int numofvmcopyactions = sizeof(vmcopy_actions) / sizeof(vmcopy_actions[0]); +static int vmcopy_action_idx; + +/************************************/ +/* Setters and getters for fixtures */ +/************************************/ + +/* Allocation memory range. */ +static allocate_fn_t _allocator = wrapper_mach_vm_allocate; +static mach_vm_size_t _vm_size = DEFAULT_VM_SIZE; +static int _address_flag = VM_FLAGS_ANYWHERE; +static boolean_t _address_alignment = TRUE; +static mach_vm_address_t _vm_address = 0x0; + +/* Buffer for mach_vm_write(). */ +static mach_vm_size_t _buffer_size = DEFAULT_VM_SIZE; +static mach_vm_address_t _buffer_address = 0x0; +static int _buffer_offset = 0; + +/* Post action for mach_vm_copy(). */ +static int _vmcopy_post_action = VMCOPY_MODIFY_SRC; + +static void +set_allocator(allocate_fn_t allocate) +{ + _allocator = allocate; +} + +static allocate_fn_t +get_allocator() +{ + return _allocator; +} + +static void +set_vm_size(mach_vm_size_t size) +{ + _vm_size = size; +} + +static mach_vm_size_t +get_vm_size() +{ + return _vm_size; +} + +static void +set_address_flag(int flag) +{ + _address_flag = flag; +} + +static int +get_address_flag() +{ + return _address_flag; +} + +static void +set_address_alignment(boolean_t alignment) +{ + _address_alignment = alignment; +} + +static boolean_t +get_address_alignment() +{ + return _address_alignment; +} + +static void +set_vm_address(mach_vm_address_t address) +{ + _vm_address = address; +} + +static mach_vm_address_t +get_vm_address() +{ + return _vm_address; +} + +static void +set_buffer_size(mach_vm_size_t size) +{ + _buffer_size = size; +} + +static mach_vm_size_t +get_buffer_size() +{ + return _buffer_size; +} + +static void +set_buffer_address(mach_vm_address_t address) +{ + _buffer_address = address; +} + +static mach_vm_address_t +get_buffer_address() +{ + return _buffer_address; +} + +static void +set_buffer_offset(int offset) +{ + _buffer_offset = offset; +} + +static int +get_buffer_offset() +{ + return _buffer_offset; +} + +static void +set_vmcopy_post_action(int action) +{ + _vmcopy_post_action = action; +} + +static int +get_vmcopy_post_action() +{ + return _vmcopy_post_action; +} + +/*******************************/ +/* Usage and option processing */ +/*******************************/ +static boolean_t flag_run_allocate_test = FALSE; +static boolean_t flag_run_deallocate_test = FALSE; +static boolean_t flag_run_read_test = FALSE; +static boolean_t flag_run_write_test = FALSE; +static boolean_t flag_run_protect_test = FALSE; +static boolean_t flag_run_copy_test = FALSE; + +#define VM_TEST_ALLOCATE 0x00000001 +#define VM_TEST_DEALLOCATE 0x00000002 +#define VM_TEST_READ 0x00000004 +#define VM_TEST_WRITE 0x00000008 +#define VM_TEST_PROTECT 0x00000010 +#define VM_TEST_COPY 0x00000020 + +typedef struct test_option { + uint32_t to_flags; + int to_quietness; + mach_vm_size_t to_vmsize; +} test_option_t; + +typedef struct test_info { + char *ti_name; + boolean_t *ti_flag; +} test_info_t; + +static test_option_t test_options; + +enum {ALLOCATE = 0, DEALLOCATE, READ, WRITE, PROTECT, COPY}; + +static test_info_t test_info[] = { + {"allocate", &flag_run_allocate_test}, + {"deallocate", &flag_run_deallocate_test}, + {"read", &flag_run_read_test}, + {"write", &flag_run_write_test}, + {"protect", &flag_run_protect_test}, + {"copy", &flag_run_copy_test}, + {NULL, NULL} +}; + +static void +die_on_invalid_value(int condition, const char * value_string) +{ + T_QUIET; + T_ASSERT_EQ(condition, 0, "%s: invalid value: %s.", + progname, value_string); +} + +static void +process_options(test_option_t options) +{ + test_info_t *tp; + + setvbuf(stdout, NULL, _IONBF, 0); + + set_vm_size(DEFAULT_VM_SIZE); + set_quietness(DEFAULT_QUIETNESS); + + if (NULL != getenv("LTERDOS")) { + logr("LTERDOS=YES this is LeanTestEnvironment\nIncreasing quietness by 1."); + set_quietness(get_quietness() + 1); + } else { + if (options.to_quietness > 0) { + set_quietness(options.to_quietness); + } + } + + if (options.to_vmsize != 0) { + vm_sizes[0].size = options.to_vmsize; + } + + if (options.to_flags == 0) { + for (tp = test_info; tp->ti_name != NULL; ++tp) { + *tp->ti_flag = TRUE; + } + } else { + if (options.to_flags & VM_TEST_ALLOCATE) { + *(test_info[ALLOCATE].ti_flag) = TRUE; + } + + if (options.to_flags & VM_TEST_DEALLOCATE) { + *(test_info[DEALLOCATE].ti_flag) = TRUE; + } + + if (options.to_flags & VM_TEST_READ) { + *(test_info[READ].ti_flag) = TRUE; + } + + if (options.to_flags & VM_TEST_WRITE) { + *(test_info[WRITE].ti_flag) = TRUE; + } + + if (options.to_flags & VM_TEST_PROTECT) { + *(test_info[PROTECT].ti_flag) = TRUE; + } + + if (options.to_flags & VM_TEST_COPY) { + *(test_info[COPY].ti_flag) = TRUE; + } + } +} + +/*****************/ +/* Various tools */ +/*****************/ + +/* Find the allocator address alignment mask. */ +mach_vm_address_t +get_mask() +{ + mach_vm_address_t mask; + + if (get_allocator() == wrapper_mach_vm_map_2MB) { + mask = (mach_vm_address_t)0x1FFFFF; + } else { + mask = vm_page_size - 1; + } + return mask; +} + +/* Find the size of the smallest aligned region containing a given + * memory range. */ +mach_vm_size_t +aligned_size(mach_vm_address_t address, mach_vm_size_t size) +{ + return round_page_kernel(address - mach_vm_trunc_page(address) + size); +} + +/********************/ +/* Assert functions */ +/********************/ + +/* Address is aligned on allocator boundary. */ +static inline void +assert_aligned_address(mach_vm_address_t address) +{ + T_QUIET; T_ASSERT_EQ((address & get_mask()), 0, + "Address 0x%jx is unexpectedly " + "unaligned.", + (uintmax_t)address); +} + +/* Address is truncated to allocator boundary. */ +static inline void +assert_trunc_address(mach_vm_address_t address, mach_vm_address_t trunc_address) +{ + T_QUIET; T_ASSERT_EQ(trunc_address, (address & ~get_mask()), + "Address " + "0x%jx is unexpectedly not truncated to address 0x%jx.", + (uintmax_t)address, (uintmax_t)trunc_address); +} + +static inline void +assert_address_value(mach_vm_address_t address, mach_vm_address_t marker) +{ + /* this assert is used so frequently so that we simply judge on + * its own instead of leaving this to LD macro for efficiency + */ + if (MACH_VM_ADDRESS_T(address) != marker) { + T_ASSERT_FAIL("Address 0x%jx unexpectedly has value 0x%jx, " + "instead of 0x%jx.", (uintmax_t)address, + (uintmax_t)MACH_VM_ADDRESS_T(address), (uintmax_t)marker); + } +} + +void +assert_allocate_return(mach_vm_address_t * address, mach_vm_size_t size, int address_flag, kern_return_t expected_kr) +{ + assert_mach_return(get_allocator()(mach_task_self(), address, size, address_flag), expected_kr, "Allocator"); +} + +void +assert_allocate_success(mach_vm_address_t * address, mach_vm_size_t size, int address_flag) +{ + assert_allocate_return(address, size, address_flag, KERN_SUCCESS); +} + +void +assert_deallocate_return(mach_vm_address_t address, mach_vm_size_t size, kern_return_t expected_kr) +{ + assert_mach_return(mach_vm_deallocate(mach_task_self(), address, size), expected_kr, "mach_vm_deallocate()"); +} + +void +assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size) +{ + assert_deallocate_return(address, size, KERN_SUCCESS); +} + +void +assert_read_return(mach_vm_address_t address, + mach_vm_size_t size, + vm_offset_t * data, + mach_msg_type_number_t * data_size, + kern_return_t expected_kr) +{ + assert_mach_return(mach_vm_read(mach_task_self(), address, size, data, data_size), expected_kr, "mach_vm_read()"); +} + +void +assert_read_success(mach_vm_address_t address, mach_vm_size_t size, vm_offset_t * data, mach_msg_type_number_t * data_size) +{ + assert_read_return(address, size, data, data_size, KERN_SUCCESS); + T_QUIET; T_ASSERT_EQ(*data_size, size, + "Returned buffer size 0x%jx " + "(%ju) is unexpectedly different from source size 0x%jx " + "(%ju).", + (uintmax_t)*data_size, (uintmax_t)*data_size, (uintmax_t)size, (uintmax_t)size); +} + +void +assert_write_return(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size, kern_return_t expected_kr) +{ + assert_mach_return(mach_vm_write(mach_task_self(), address, data, data_size), expected_kr, "mach_vm_write()"); +} + +void +assert_write_success(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size) +{ + assert_write_return(address, data, data_size, KERN_SUCCESS); +} + +void +assert_allocate_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest, kern_return_t expected_kr) +{ + assert_allocate_success(dest, size, VM_FLAGS_ANYWHERE); + assert_mach_return(mach_vm_copy(mach_task_self(), source, size, *dest), expected_kr, "mach_vm_copy()"); +} +void +assert_allocate_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest) +{ + assert_allocate_copy_return(source, size, dest, KERN_SUCCESS); +} + +void +assert_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest, kern_return_t expected_kr) +{ + assert_mach_return(mach_vm_copy(mach_task_self(), source, size, dest), expected_kr, "mach_vm_copy()"); +} + +void +assert_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest) +{ + assert_copy_return(source, size, dest, KERN_SUCCESS); +} + +/*******************/ +/* Memory patterns */ +/*******************/ + +typedef boolean_t (*address_filter_t)(mach_vm_address_t); +typedef void (*address_action_t)(mach_vm_address_t, mach_vm_address_t); + +/* Map over a memory region pattern and its complement, through a + * (possibly reversed) boolean filter and a starting value. */ +void +filter_addresses_do_else(address_filter_t filter, + boolean_t reversed, + mach_vm_address_t address, + mach_vm_size_t size, + address_action_t if_action, + address_action_t else_action, + mach_vm_address_t start_value) +{ + mach_vm_address_t i; + for (i = 0; i + vm_address_size < size; i += vm_address_size) { + if (filter(address + i) != reversed) { + if_action(address + i, start_value + i); + } else { + else_action(address + i, start_value + i); + } + } +} + +/* Various pattern actions. */ +void +no_action(mach_vm_address_t i, mach_vm_address_t value) +{ +} + +void +read_zero(mach_vm_address_t i, mach_vm_address_t value) +{ + assert_address_value(i, 0); +} + +void +verify_address(mach_vm_address_t i, mach_vm_address_t value) +{ + assert_address_value(i, value); +} + +void +write_address(mach_vm_address_t i, mach_vm_address_t value) +{ + MACH_VM_ADDRESS_T(i) = value; +} + +/* Various patterns. */ +boolean_t +empty(mach_vm_address_t i) +{ + return FALSE; +} + +boolean_t +checkerboard(mach_vm_address_t i) +{ + return !((i / vm_address_size) & 0x1); +} + +boolean_t +page_ends(mach_vm_address_t i) +{ + mach_vm_address_t residue = i % vm_page_size; + + return residue == 0 || residue == vm_page_size - vm_address_size; +} + +/*************************************/ +/* Global variables set up functions */ +/*************************************/ + +void +set_up_allocator() +{ + T_QUIET; T_ASSERT_TRUE(allocators_idx >= 0 && allocators_idx < numofallocators, "Invalid allocators[] index: %d.", allocators_idx); + set_allocator(allocators[allocators_idx].allocate); +} + +/* Find a fixed allocatable address by retrieving the address + * populated by mach_vm_allocate() with VM_FLAGS_ANYWHERE. */ +mach_vm_address_t +get_fixed_address(mach_vm_size_t size) +{ + /* mach_vm_map() starts looking for an address at 0x0. */ + mach_vm_address_t address = 0x0; + + /* + * The tests seem to have some funky off by one allocations. To avoid problems, we'll bump anything + * non-zero to have at least an extra couple pages. + */ + if (size != 0) { + size = round_page_kernel(size + 2 * vm_page_size); + } + + assert_allocate_success(&address, size, VM_FLAGS_ANYWHERE); + + /* + * Keep the memory allocated, otherwise the logv()/printf() activity sprinkled in these tests can + * cause malloc() to use the desired range and tests will randomly fail. The allocate routines will + * do the delayed vm_deallocate() to free the fixed memory just before allocation testing in the wrapper. + */ + T_QUIET; T_ASSERT_EQ(fixed_vm_address, 0, "previous fixed address not used"); + T_QUIET; T_ASSERT_EQ(fixed_vm_size, 0, "previous fixed size not used"); + fixed_vm_address = address; + fixed_vm_size = size; + + assert_aligned_address(address); + return address; +} + +/* If needed, find an address at which a region of the specified size + * can be allocated. Otherwise, set the address to 0x0. */ +void +set_up_vm_address(mach_vm_size_t size) +{ + T_QUIET; T_ASSERT_TRUE(flags_idx >= 0 && flags_idx < numofflags, "Invalid address_flags[] index: %d.", flags_idx); + T_QUIET; T_ASSERT_TRUE(alignments_idx >= 0 && alignments_idx < numofalignments, "Invalid address_alignments[] index: %d.", alignments_idx); + set_address_flag(address_flags[flags_idx].flag); + set_address_alignment(address_alignments[alignments_idx].alignment); + + if (!(get_address_flag() & VM_FLAGS_ANYWHERE)) { + boolean_t aligned = get_address_alignment(); + logv( + "Looking for fixed %saligned address for allocation " + "of 0x%jx (%ju) byte%s...", + aligned ? "" : "un", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s"); + mach_vm_address_t address = get_fixed_address(size); + if (!aligned) { + address++; + } + set_vm_address(address); + logv("Found %saligned fixed address 0x%jx.", aligned ? "" : "un", (uintmax_t)address); + } else { + /* mach_vm_map() with VM_FLAGS_ANYWHERE starts looking for + * an address at the one supplied and goes up, without + * wrapping around. */ + set_vm_address(0x0); + } +} + +void +set_up_vm_size() +{ + T_QUIET; T_ASSERT_TRUE(sizes_idx >= 0 && sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", sizes_idx); + set_vm_size(vm_sizes[sizes_idx].size); +} + +void +set_up_buffer_size() +{ + T_QUIET; T_ASSERT_TRUE(buffer_sizes_idx >= 0 && buffer_sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", buffer_sizes_idx); + set_buffer_size(vm_sizes[buffer_sizes_idx].size); +} + +void +set_up_buffer_offset() +{ + T_QUIET; T_ASSERT_TRUE(offsets_idx >= 0 && offsets_idx < numofoffsets, "Invalid buffer_offsets[] index: %d.", offsets_idx); + set_buffer_offset(buffer_offsets[offsets_idx].offset); +} + +void +set_up_vmcopy_action() +{ + T_QUIET; T_ASSERT_TRUE(vmcopy_action_idx >= 0 && vmcopy_action_idx < numofvmcopyactions, "Invalid vmcopy_actions[] index: %d.", + vmcopy_action_idx); + set_vmcopy_post_action(vmcopy_actions[vmcopy_action_idx].action); +} + +void +set_up_allocator_and_vm_size() +{ + set_up_allocator(); + set_up_vm_size(); +} + +void +set_up_vm_variables() +{ + set_up_vm_size(); + set_up_vm_address(get_vm_size()); +} + +void +set_up_allocator_and_vm_variables() +{ + set_up_allocator(); + set_up_vm_variables(); +} + +void +set_up_buffer_variables() +{ + set_up_buffer_size(); + set_up_buffer_offset(); +} + +void +set_up_copy_shared_mode_variables() +{ + set_up_vmcopy_action(); +} + +/*******************************/ +/* Allocation set up functions */ +/*******************************/ + +/* Allocate VM region of given size. */ +void +allocate(mach_vm_size_t size) +{ + mach_vm_address_t address = get_vm_address(); + int flag = get_address_flag(); + + logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s"); + if (!(flag & VM_FLAGS_ANYWHERE)) { + logv(" at address 0x%jx", (uintmax_t)address); + } + logv("..."); + assert_allocate_success(&address, size, flag); + logv( + "Memory of rounded size 0x%jx (%ju) allocated at " + "address 0x%jx.", + (uintmax_t)round_page_kernel(size), (uintmax_t)round_page_kernel(size), (uintmax_t)address); + /* Fixed allocation address is truncated to the allocator + * boundary. */ + if (!(flag & VM_FLAGS_ANYWHERE)) { + mach_vm_address_t old_address = get_vm_address(); + assert_trunc_address(old_address, address); + logv( + "Address 0x%jx is correctly truncated to allocated " + "address 0x%jx.", + (uintmax_t)old_address, (uintmax_t)address); + } + set_vm_address(address); +} + +void +allocate_buffer(mach_vm_size_t buffer_size) +{ + mach_vm_address_t data = 0x0; + + logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)buffer_size, (uintmax_t)buffer_size, (buffer_size == 1) ? "" : "s"); + assert_allocate_success(&data, buffer_size, VM_FLAGS_ANYWHERE); + logv( + "Memory of rounded size 0x%jx (%ju) allocated at " + "address 0x%jx.", + (uintmax_t)round_page_kernel(buffer_size), (uintmax_t)round_page_kernel(buffer_size), (uintmax_t)data); + data += get_buffer_offset(); + T_QUIET; T_ASSERT_EQ((vm_offset_t)data, data, + "Address 0x%jx " + "unexpectedly overflows to 0x%jx when cast as " + "vm_offset_t type.", + (uintmax_t)data, (uintmax_t)(vm_offset_t)data); + set_buffer_address(data); +} + +/****************************************************/ +/* Global variables and allocation set up functions */ +/****************************************************/ + +void +set_up_vm_variables_and_allocate() +{ + set_up_vm_variables(); + allocate(get_vm_size()); +} + +void +set_up_allocator_and_vm_variables_and_allocate() +{ + set_up_allocator(); + set_up_vm_variables_and_allocate(); +} + +void +set_up_vm_variables_and_allocate_extra_page() +{ + set_up_vm_size(); + /* Increment the size to insure we get an extra allocated page + * for unaligned start addresses. */ + mach_vm_size_t allocation_size = get_vm_size() + 1; + set_up_vm_address(allocation_size); + + allocate(allocation_size); + /* In the fixed unaligned address case, restore the returned + * (truncated) allocation address to its unaligned value. */ + if (!get_address_alignment()) { + set_vm_address(get_vm_address() + 1); + } +} + +void +set_up_buffer_variables_and_allocate_extra_page() +{ + set_up_buffer_variables(); + /* Increment the size to insure we get an extra allocated page + * for unaligned start addresses. */ + allocate_buffer(get_buffer_size() + get_buffer_offset()); +} + +/* Allocate some destination and buffer memory for subsequent + * writing, including extra pages for non-aligned start addresses. */ +void +set_up_vm_and_buffer_variables_allocate_for_writing() +{ + set_up_vm_variables_and_allocate_extra_page(); + set_up_buffer_variables_and_allocate_extra_page(); +} + +/* Allocate some destination and source regions for subsequent + * copying, including extra pages for non-aligned start addresses. */ +void +set_up_vm_and_buffer_variables_allocate_for_copying() +{ + set_up_vm_and_buffer_variables_allocate_for_writing(); +} + +/************************************/ +/* Deallocation tear down functions */ +/************************************/ + +void +deallocate_range(mach_vm_address_t address, mach_vm_size_t size) +{ + logv("Deallocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", + (uintmax_t)address); + assert_deallocate_success(address, size); +} + +void +deallocate() +{ + deallocate_range(get_vm_address(), get_vm_size()); +} + +/* Deallocate source memory, including the extra page for unaligned + * start addresses. */ +void +deallocate_extra_page() +{ + /* Set the address and size to their original allocation + * values. */ + deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1); +} + +/* Deallocate buffer and destination memory for mach_vm_write(), + * including the extra page for unaligned start addresses. */ +void +deallocate_vm_and_buffer() +{ + deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1); + deallocate_range(mach_vm_trunc_page(get_buffer_address()), get_buffer_size() + get_buffer_offset()); +} + +/***********************************/ +/* mach_vm_read() set up functions */ +/***********************************/ + +/* Read the source memory into a buffer, deallocate the source, set + * the global address and size from the buffer's. */ +void +read_deallocate() +{ + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t address = get_vm_address(); + vm_offset_t read_address; + mach_msg_type_number_t read_size; + + logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", + (uintmax_t)address); + assert_read_success(address, size, &read_address, &read_size); + logv( + "Memory of size 0x%jx (%ju) read into buffer of " + "address 0x%jx.", + (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)read_address); + /* Deallocate the originally allocated memory, including the + * extra allocated page in + * set_up_vm_variables_and_allocate_extra_page(). */ + deallocate_range(mach_vm_trunc_page(address), size + 1); + + /* Promoting to mach_vm types after checking for overflow, and + * setting the global address from the buffer's. */ + T_QUIET; T_ASSERT_EQ((mach_vm_address_t)read_address, read_address, + "Address 0x%jx unexpectedly overflows to 0x%jx when cast " + "as mach_vm_address_t type.", + (uintmax_t)read_address, (uintmax_t)(mach_vm_address_t)read_address); + T_QUIET; T_ASSERT_EQ((mach_vm_size_t)read_size, read_size, + "Size 0x%jx (%ju) unexpectedly overflows to 0x%jx (%ju) " + "when cast as mach_vm_size_t type.", + (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)(mach_vm_size_t)read_size, (uintmax_t)(mach_vm_size_t)read_size); + set_vm_address((mach_vm_address_t)read_address); + set_vm_size((mach_vm_size_t)read_size); +} + +/* Allocate some source memory, read it into a buffer, deallocate the + * source, set the global address and size from the buffer's. */ +void +set_up_vm_variables_allocate_read_deallocate() +{ + set_up_vm_variables_and_allocate_extra_page(); + read_deallocate(); +} + +/************************************/ +/* mach_vm_write() set up functions */ +/************************************/ + +/* Write the buffer into the destination memory. */ +void +write_buffer() +{ + mach_vm_address_t address = get_vm_address(); + vm_offset_t data = (vm_offset_t)get_buffer_address(); + mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size(); + + logv( + "Writing buffer of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address); + assert_write_success(address, data, buffer_size); + logv("Buffer written."); +} + +/* Allocate some destination and buffer memory, and write the buffer + * into the destination memory. */ +void +set_up_vm_and_buffer_variables_allocate_write() +{ + set_up_vm_and_buffer_variables_allocate_for_writing(); + write_buffer(); +} + +/***********************************/ +/* mach_vm_copy() set up functions */ +/***********************************/ + +void +copy_deallocate(void) +{ + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t source = get_vm_address(); + mach_vm_address_t dest = 0; + + logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", + (uintmax_t)source); + assert_allocate_copy_success(source, size, &dest); + logv( + "Memory of size 0x%jx (%ju) copy into region of " + "address 0x%jx.", + (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest); + /* Deallocate the originally allocated memory, including the + * extra allocated page in + * set_up_vm_variables_and_allocate_extra_page(). */ + deallocate_range(mach_vm_trunc_page(source), size + 1); + /* Promoting to mach_vm types after checking for overflow, and + * setting the global address from the buffer's. */ + T_QUIET; T_ASSERT_EQ((vm_offset_t)dest, dest, + "Address 0x%jx unexpectedly overflows to 0x%jx when cast " + "as mach_vm_address_t type.", + (uintmax_t)dest, (uintmax_t)(vm_offset_t)dest); + set_vm_address(dest); + set_vm_size(size); +} + +/* Copy the source region into the destination region. */ +void +copy_region() +{ + mach_vm_address_t source = get_vm_address(); + mach_vm_address_t dest = get_buffer_address(); + mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size(); + + logv( + "Copying memory region of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest); + assert_copy_success(source, size, dest); + logv("Buffer written."); +} + +/* Allocate some source memory, copy it to another region, deallocate the +* source, set the global address and size from the designation region. */ +void +set_up_vm_variables_allocate_copy_deallocate() +{ + set_up_vm_variables_and_allocate_extra_page(); + copy_deallocate(); +} + +/* Allocate some destination and source memory, and copy the source + * into the destination memory. */ +void +set_up_source_and_dest_variables_allocate_copy() +{ + set_up_vm_and_buffer_variables_allocate_for_copying(); + copy_region(); +} + +/**************************************/ +/* mach_vm_protect() set up functions */ +/**************************************/ + +void +set_up_vm_variables_allocate_protect(vm_prot_t protection, const char * protection_name) +{ + set_up_vm_variables_and_allocate_extra_page(); + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t address = get_vm_address(); + + logv( + "Setting %s-protection on 0x%jx (%ju) byte%s at address " + "0x%jx...", + protection_name, (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, protection), "mach_vm_protect()"); + logv("Region %s-protected.", protection_name); +} + +void +set_up_vm_variables_allocate_readprotect() +{ + set_up_vm_variables_allocate_protect(VM_PROT_WRITE, "read"); +} + +void +set_up_vm_variables_allocate_writeprotect() +{ + set_up_vm_variables_allocate_protect(VM_PROT_READ, "write"); +} + +/*****************/ +/* Address tests */ +/*****************/ + +/* Allocated address is nonzero iff size is nonzero. */ +void +test_nonzero_address_iff_nonzero_size() +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + + T_QUIET; T_ASSERT_TRUE((address && size) || (!address && !size), "Address 0x%jx is unexpectedly %szero.", (uintmax_t)address, + address ? "non" : ""); + logv("Address 0x%jx is %szero as expected.", (uintmax_t)address, size ? "non" : ""); +} + +/* Allocated address is aligned. */ +void +test_aligned_address() +{ + mach_vm_address_t address = get_vm_address(); + + assert_aligned_address(address); + logv("Address 0x%jx is aligned.", (uintmax_t)address); +} + +/************************/ +/* Read and write tests */ +/************************/ + +void +verify_pattern( + address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name) +{ + logv( + "Verifying %s pattern on region of address 0x%jx " + "and size 0x%jx (%ju)...", + pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size); + filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address); + logv("Pattern verified."); +} + +void +write_pattern( + address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name) +{ + logv( + "Writing %s pattern on region of address 0x%jx " + "and size 0x%jx (%ju)...", + pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size); + filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address); + logv("Pattern writen."); +} + +void +write_and_verify_pattern( + address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name) +{ + logv( + "Writing and verifying %s pattern on region of " + "address 0x%jx and size 0x%jx (%ju)...", + pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size); + filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address); + filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address); + logv("Pattern written and verified."); +} + +/* Verify that the smallest aligned region containing the + * given range is zero-filled. */ +void +test_zero_filled() +{ + verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), aligned_size(get_vm_address(), get_vm_size()), + "zero-filled"); +} + +void +test_write_address_filled() +{ + write_and_verify_pattern(empty, TRUE, get_vm_address(), round_page_kernel(get_vm_size()), "address-filled"); +} + +void +test_write_checkerboard() +{ + write_and_verify_pattern(checkerboard, FALSE, get_vm_address(), round_page_kernel(get_vm_size()), "checkerboard"); +} + +void +test_write_reverse_checkerboard() +{ + write_and_verify_pattern(checkerboard, TRUE, get_vm_address(), round_page_kernel(get_vm_size()), "reverse checkerboard"); +} + +void +test_write_page_ends() +{ + write_and_verify_pattern(page_ends, FALSE, get_vm_address(), round_page_kernel(get_vm_size()), "page ends"); +} + +void +test_write_page_interiors() +{ + write_and_verify_pattern(page_ends, TRUE, get_vm_address(), round_page_kernel(get_vm_size()), "page interiors"); +} + +/*********************************/ +/* Allocation error return tests */ +/*********************************/ + +/* Reallocating a page in the smallest aligned region containing the + * given allocated range fails. */ +void +test_reallocate_pages() +{ + allocate_fn_t allocator = get_allocator(); + vm_map_t this_task = mach_task_self(); + mach_vm_address_t address = mach_vm_trunc_page(get_vm_address()); + mach_vm_size_t size = aligned_size(get_vm_address(), get_vm_size()); + mach_vm_address_t i; + kern_return_t kr; + + logv( + "Reallocating pages in allocated region of address 0x%jx " + "and size 0x%jx (%ju)...", + (uintmax_t)address, (uintmax_t)size, (uintmax_t)size); + for (i = address; i < address + size; i += vm_page_size) { + kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED); + T_QUIET; T_ASSERT_EQ(kr, KERN_NO_SPACE, + "Allocator " + "at address 0x%jx unexpectedly returned: %s.\n" + "Should have returned: %s.", + (uintmax_t)address, mach_error_string(kr), mach_error_string(KERN_NO_SPACE)); + } + logv("Returned expected error at each page: %s.", mach_error_string(KERN_NO_SPACE)); +} + +/* Allocating in VM_MAP_NULL fails. */ +void +test_allocate_in_null_map() +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + int flag = get_address_flag(); + + logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s"); + if (!(flag & VM_FLAGS_ANYWHERE)) { + logv(" at address 0x%jx", (uintmax_t)address); + } + logv(" in NULL VM map..."); + assert_mach_return(get_allocator()(VM_MAP_NULL, &address, size, flag), MACH_SEND_INVALID_DEST, "Allocator"); + logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST)); +} + +/* Allocating with non-user flags fails. */ +void +test_allocate_with_kernel_flags() +{ + allocate_fn_t allocator = get_allocator(); + vm_map_t this_task = mach_task_self(); + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + int flag = get_address_flag(); + int bad_flag, i; + kern_return_t kr; + int kernel_flags[] = {0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x8000, INT_MAX}; + int numofflags = sizeof(kernel_flags) / sizeof(kernel_flags[0]); + + logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s"); + if (!(flag & VM_FLAGS_ANYWHERE)) { + logv(" at address 0x%jx", (uintmax_t)address); + } + logv(" with various kernel flags..."); + for (i = 0; i < numofflags; i++) { + bad_flag = kernel_flags[i] | flag; + kr = allocator(this_task, &address, size, bad_flag); + T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, + "Allocator " + "with kernel flag 0x%x unexpectedly returned: %s.\n" + "Should have returned: %s.", + bad_flag, mach_error_string(kr), mach_error_string(KERN_INVALID_ARGUMENT)); + } + logv("Returned expected error with each kernel flag: %s.", mach_error_string(KERN_INVALID_ARGUMENT)); +} + +/*****************************/ +/* mach_vm_map() error tests */ +/*****************************/ + +/* mach_vm_map() fails with invalid protection or inheritance + * arguments. */ +void +test_mach_vm_map_protection_inheritance_error() +{ + kern_return_t kr; + vm_map_t my_task = mach_task_self(); + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + vm_map_offset_t mask = (get_allocator() == wrapper_mach_vm_map || get_allocator() == wrapper_mach_vm_map_named_entry) + ? (mach_vm_offset_t)0 + : (mach_vm_offset_t)get_mask(); + int flag = get_address_flag(); + mach_port_t object_handle = (get_allocator() == wrapper_mach_vm_map_named_entry) ? memory_entry(&size) : MACH_PORT_NULL; + vm_prot_t cur_protections[] = {VM_PROT_DEFAULT, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX}; + vm_prot_t max_protections[] = {VM_PROT_ALL, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX}; + vm_inherit_t inheritances[] = {VM_INHERIT_DEFAULT, VM_INHERIT_LAST_VALID + 1, UINT_MAX}; + int i, j, k; + + logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s"); + if (!(flag & VM_FLAGS_ANYWHERE)) { + logv(" at address 0x%jx", (uintmax_t)address); + } + logv( + " with various invalid protection/inheritance " + "arguments..."); + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + for (k = 0; k < 3; k++) { + /* Skip the case with all valid arguments. */ + if (i == (j == (k == 0))) { + continue; + } + kr = mach_vm_map(my_task, &address, size, mask, flag, object_handle, (memory_object_offset_t)0, FALSE, + cur_protections[i], max_protections[j], inheritances[k]); + T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, + "mach_vm_map() " + "with cur_protection 0x%x, max_protection 0x%x, " + "inheritance 0x%x unexpectedly returned: %s.\n" + "Should have returned: %s.", + cur_protections[i], max_protections[j], inheritances[k], mach_error_string(kr), + mach_error_string(KERN_INVALID_ARGUMENT)); + } + } + } + logv("Returned expected error in each case: %s.", mach_error_string(KERN_INVALID_ARGUMENT)); +} + +/* mach_vm_map() with unspecified address fails if the starting + * address overflows when rounded up to a boundary value. */ +void +test_mach_vm_map_large_mask_overflow_error() +{ + mach_vm_address_t address = 0x1; + mach_vm_size_t size = get_vm_size(); + mach_vm_offset_t mask = (mach_vm_offset_t)UINTMAX_MAX; + /* mach_vm_map() cannot allocate 0 bytes at an unspecified + * address, see 8003930. */ + kern_return_t kr_expected = size ? KERN_NO_SPACE : KERN_INVALID_ARGUMENT; + + logv( + "Allocating 0x%jx (%ju) byte%s at an unspecified address " + "starting at 0x%jx with mask 0x%jx...", + (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address, (uintmax_t)mask); + assert_mach_return(mach_vm_map(mach_task_self(), &address, size, mask, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, + (memory_object_offset_t)0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT), + kr_expected, "mach_vm_map()"); + logv("Returned expected error: %s.", mach_error_string(kr_expected)); +} + +/************************/ +/* Size edge case tests */ +/************************/ + +void +allocate_edge_size(mach_vm_address_t * address, mach_vm_size_t size, kern_return_t expected_kr) +{ + logv("Allocating 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size); + assert_allocate_return(address, size, VM_FLAGS_ANYWHERE, expected_kr); + logv("Returned expected value: %s.", mach_error_string(expected_kr)); +} + +void +test_allocate_zero_size() +{ + mach_vm_address_t address = 0x0; + /* mach_vm_map() cannot allocate 0 bytes at an unspecified + * address, see 8003930. Other allocators succeed. */ + kern_return_t kr_expected = (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS; + + allocate_edge_size(&address, 0, kr_expected); + if (kr_expected == KERN_SUCCESS) { + deallocate_range(address, 0); + } +} + +/* Testing the allocation of the largest size that does not overflow + * when rounded up to a page-aligned value. */ +void +test_allocate_invalid_large_size() +{ + mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1; + if (get_allocator() != wrapper_mach_vm_map_named_entry) { + mach_vm_address_t address = 0x0; + allocate_edge_size(&address, size, KERN_NO_SPACE); + } else { + /* Named entries cannot currently be bigger than 4 GB + * - 4 kb. */ + mach_port_t object_handle = MACH_PORT_NULL; + logv("Creating named entry of 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size); + assert_mach_return(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)0, + (MAP_MEM_NAMED_CREATE | VM_PROT_ALL), &object_handle, 0), + KERN_FAILURE, "mach_make_memory_entry_64()"); + logv("Returned expected error: %s.", mach_error_string(KERN_FAILURE)); + } +} + +/* A UINTMAX_MAX VM size will overflow to 0 when rounded up to a + * page-aligned value. */ +void +test_allocate_overflowing_size() +{ + mach_vm_address_t address = 0x0; + + allocate_edge_size(&address, (mach_vm_size_t)UINTMAX_MAX, KERN_INVALID_ARGUMENT); +} + +/****************************/ +/* Address allocation tests */ +/****************************/ + +/* Allocation at address zero fails iff size is nonzero. */ +void +test_allocate_at_zero() +{ + mach_vm_address_t address = 0x0; + mach_vm_size_t size = get_vm_size(); + + kern_return_t kr_expected = + size ? KERN_INVALID_ADDRESS : (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS; + + logv("Allocating 0x%jx (%ju) byte%s at address 0x0...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s"); + assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected); + logv("Returned expected value: %s.", mach_error_string(kr_expected)); + if (kr_expected == KERN_SUCCESS) { + T_QUIET; T_ASSERT_EQ(address, 0, + "Address 0x%jx is unexpectedly " + "nonzero.\n", + (uintmax_t)address); + logv("Allocated address 0x%jx is zero.", (uintmax_t)address); + deallocate_range(address, size); + } +} + +/* Allocation at page-aligned but 2 MB boundary-unaligned address + * fails with KERN_NO_SPACE. */ +void +test_allocate_2MB_boundary_unaligned_page_aligned_address() +{ + mach_vm_size_t size = get_vm_size(); + + mach_vm_address_t address = get_fixed_address(size + vm_page_size) + vm_page_size; + logv( + "Found 2 MB boundary-unaligned, page aligned address " + "0x%jx.", + (uintmax_t)address); + + /* mach_vm_allocate() cannot allocate 0 bytes, and fails with a + * fixed boundary-unaligned truncated address. */ + kern_return_t kr_expected = (!size && get_allocator() != wrapper_mach_vm_allocate) + ? KERN_INVALID_ARGUMENT + : (get_allocator() == wrapper_mach_vm_map_2MB) ? KERN_NO_SPACE : KERN_SUCCESS; + logv("Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", + (uintmax_t)address); + assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected); + logv("Returned expected value: %s.", mach_error_string(kr_expected)); + if (kr_expected == KERN_SUCCESS) { + deallocate_range(address, size); + } +} + +/* With VM_FLAGS_ANYWHERE set, mach_vm_allocate() starts looking for + * an allocation address at 0x0, while mach_vm_map() starts at the + * supplied address and does not wrap around. See 8016663. */ +void +test_allocate_page_with_highest_address_hint() +{ + /* Highest valid page-aligned address. */ + mach_vm_address_t address = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1; + + logv( + "Allocating one page with unspecified address, but hint at " + "0x%jx...", + (uintmax_t)address); + if (get_allocator() == wrapper_mach_vm_allocate) { + /* mach_vm_allocate() starts from 0x0 and succeeds. */ + assert_allocate_success(&address, vm_page_size, VM_FLAGS_ANYWHERE); + logv("Memory allocated at address 0x%jx.", (uintmax_t)address); + assert_aligned_address(address); + deallocate_range(address, vm_page_size); + } else { + /* mach_vm_map() starts from the supplied address, and fails + * with KERN_NO_SPACE, see 8016663. */ + assert_allocate_return(&address, vm_page_size, VM_FLAGS_ANYWHERE, KERN_NO_SPACE); + logv("Returned expected error: %s.", mach_error_string(KERN_NO_SPACE)); + } +} + +/* Allocators find an allocation address with a first fit strategy. */ +void +test_allocate_first_fit_pages() +{ + allocate_fn_t allocator = get_allocator(); + mach_vm_address_t address1 = 0x0; + mach_vm_address_t i; + kern_return_t kr; + vm_map_t this_task = mach_task_self(); + + logv( + "Looking for first fit address for allocating one " + "page..."); + assert_allocate_success(&address1, vm_page_size, VM_FLAGS_ANYWHERE); + logv("Found address 0x%jx.", (uintmax_t)address1); + assert_aligned_address(address1); + mach_vm_address_t address2 = address1; + logv( + "Looking for next higher first fit address for allocating " + "one page..."); + assert_allocate_success(&address2, vm_page_size, VM_FLAGS_ANYWHERE); + logv("Found address 0x%jx.", (uintmax_t)address2); + assert_aligned_address(address2); + T_QUIET; T_ASSERT_GT(address2, address1, + "Second address 0x%jx is " + "unexpectedly not higher than first address 0x%jx.", + (uintmax_t)address2, (uintmax_t)address1); + + logv("Allocating pages between 0x%jx and 0x%jx...", (uintmax_t)address1, (uintmax_t)address2); + for (i = address1; i <= address2; i += vm_page_size) { + kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED); + T_QUIET; T_ASSERT_NE(kr, KERN_SUCCESS, + "Allocator at address 0x%jx " + "unexpectedly succeeded.", + (uintmax_t)i); + } + logv("Expectedly returned error at each page."); + deallocate_range(address1, vm_page_size); + deallocate_range(address2, vm_page_size); +} + +/*******************************/ +/* Deallocation segfault tests */ +/*******************************/ + +/* mach_vm_deallocate() deallocates the smallest aligned region + * (integral number of pages) containing the given range. */ + +/* Addresses in deallocated range are inaccessible. */ +void +access_deallocated_range_address(mach_vm_address_t address, const char * position) +{ + logv("Will deallocate and read from %s 0x%jx of deallocated range...", position, (uintmax_t)address); + deallocate(); + mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address); + T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx.\n" + "Should have died with signal SIGSEGV.", + (uintmax_t)bad_value, (uintmax_t)address); +} + +/* Start of deallocated range is inaccessible. */ +void +test_access_deallocated_range_start() +{ + access_deallocated_range_address(get_vm_address(), "start"); +} + +/* Middle of deallocated range is inaccessible. */ +void +test_access_deallocated_range_middle() +{ + access_deallocated_range_address(get_vm_address() + (round_page_kernel(get_vm_size()) >> 1), "middle"); +} + +/* End of deallocated range is inaccessible. */ +void +test_access_deallocated_range_end() +{ + access_deallocated_range_address(round_page_kernel(get_vm_size()) - vm_address_size + get_vm_address(), "end"); +} + +/* Deallocating almost the whole address space causes a SIGSEGV or SIGBUS. We + * deallocate the largest valid aligned size to avoid overflowing when + * rounding up. */ +void +test_deallocate_suicide() +{ + mach_vm_address_t address = 0x0; + mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1; + + logv("Deallocating 0x%jx (%ju) bytes at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (uintmax_t)address); + kern_return_t kr = mach_vm_deallocate(mach_task_self(), address, size); + T_ASSERT_FAIL("mach_vm_deallocate() with address 0x%jx and " + "size 0x%jx (%ju) unexpectedly returned: %s.\n" + "Should have died with signal SIGSEGV or SIGBUS.", + (uintmax_t)address, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr)); +} + +/***************************************/ +/* Deallocation and reallocation tests */ +/***************************************/ + +/* Deallocating memory twice succeeds. */ +void +test_deallocate_twice() +{ + deallocate(); + deallocate(); +} + +/* Deallocated and reallocated memory is zero-filled. Deallocated + * memory is inaccessible since it can be reallocated. */ +void +test_write_pattern_deallocate_reallocate_zero_filled() +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + + write_pattern(page_ends, FALSE, address, size, "page ends"); + logv("Deallocating, then Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", + (uintmax_t)address); + deallocate(); + assert_allocate_success(&address, size, VM_FLAGS_FIXED); + logv("Memory allocated."); + verify_pattern(empty, FALSE, address, size, "zero-filled"); + deallocate(); +} + +/********************************/ +/* Deallocation edge case tests */ +/********************************/ + +/* Zero size deallocation always succeeds. */ +void +test_deallocate_zero_size_ranges() +{ + int i; + kern_return_t kr; + vm_map_t this_task = mach_task_self(); + mach_vm_address_t addresses[] = {0x0, + 0x1, + vm_page_size - 1, + vm_page_size, + vm_page_size + 1, + (mach_vm_address_t)UINT_MAX - vm_page_size + 1, + (mach_vm_address_t)UINT_MAX, + (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, + (mach_vm_address_t)UINTMAX_MAX}; + int numofaddresses = sizeof(addresses) / sizeof(addresses[0]); + + logv("Deallocating 0x0 (0) bytes at various addresses..."); + for (i = 0; i < numofaddresses; i++) { + kr = mach_vm_deallocate(this_task, addresses[i], 0); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_deallocate() at " + "address 0x%jx unexpectedly failed: %s.", + (uintmax_t)addresses[i], mach_error_string(kr)); + } + logv("Deallocations successful."); +} + +/* Deallocation succeeds if the end of the range rounds to 0x0. */ +void +test_deallocate_rounded_zero_end_ranges() +{ + int i; + kern_return_t kr; + vm_map_t this_task = mach_task_self(); + struct { + mach_vm_address_t address; + mach_vm_size_t size; + } ranges[] = { + {0x0, (mach_vm_size_t)UINTMAX_MAX}, + {0x0, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 2}, + {0x1, (mach_vm_size_t)UINTMAX_MAX - 1}, + {0x1, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1}, + {0x2, (mach_vm_size_t)UINTMAX_MAX - 2}, + {0x2, (mach_vm_size_t)UINTMAX_MAX - vm_page_size}, + {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size - 1}, + {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, 1}, + {(mach_vm_address_t)UINTMAX_MAX - 1, 1}, + }; + int numofranges = sizeof(ranges) / sizeof(ranges[0]); + + logv( + "Deallocating various memory ranges whose end rounds to " + "0x0..."); + for (i = 0; i < numofranges; i++) { + kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, + "mach_vm_deallocate() with address 0x%jx and size " + "0x%jx (%ju) unexpectedly returned: %s.\n" + "Should have succeeded.", + (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr)); + } + logv("Deallocations successful."); +} + +/* Deallocating a range wrapped around the address space fails. */ +void +test_deallocate_wrapped_around_ranges() +{ + int i; + kern_return_t kr; + vm_map_t this_task = mach_task_self(); + struct { + mach_vm_address_t address; + mach_vm_size_t size; + } ranges[] = { + {0x1, (mach_vm_size_t)UINTMAX_MAX}, + {vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1}, + {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size}, + {(mach_vm_address_t)UINTMAX_MAX, 1}, + }; + int numofranges = sizeof(ranges) / sizeof(ranges[0]); + + logv( + "Deallocating various memory ranges wrapping around the " + "address space..."); + for (i = 0; i < numofranges; i++) { + kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size); + T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, + "mach_vm_deallocate() with address 0x%jx and size " + "0x%jx (%ju) unexpectedly returned: %s.\n" + "Should have returned: %s.", + (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr), + mach_error_string(KERN_INVALID_ARGUMENT)); + } + logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT)); +} + +/* Deallocating in VM_MAP_NULL fails. */ +void +test_deallocate_in_null_map() +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + int flag = get_address_flag(); + + logv("Deallocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s"); + if (!(flag & VM_FLAGS_ANYWHERE)) { + logv(" at address 0x%jx", (uintmax_t)address); + } + logv(" in NULL VM map..."); + assert_mach_return(mach_vm_deallocate(VM_MAP_NULL, address, size), MACH_SEND_INVALID_DEST, "mach_vm_deallocate()"); + logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST)); +} + +/*****************************/ +/* mach_vm_read() main tests */ +/*****************************/ + +/* Read memory of size less than a page has aligned starting + * address. Otherwise, the destination buffer's starting address has + * the same boundary offset as the source region's. */ +void +test_read_address_offset() +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + + if (size < vm_page_size * 2 || get_address_alignment()) { + assert_aligned_address(address); + logv("Buffer address 0x%jx is aligned as expected.", (uintmax_t)address); + } else { + T_QUIET; T_ASSERT_EQ(((address - 1) & (vm_page_size - 1)), 0, + "Buffer " + "address 0x%jx does not have the expected boundary " + "offset of 1.", + (uintmax_t)address); + logv( + "Buffer address 0x%jx has the expected boundary " + "offset of 1.", + (uintmax_t)address); + } +} + +/* Reading from VM_MAP_NULL fails. */ +void +test_read_null_map() +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + vm_offset_t read_address; + mach_msg_type_number_t read_size; + + logv( + "Reading 0x%jx (%ju) byte%s at address 0x%jx in NULL VM " + "map...", + (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address); + assert_mach_return(mach_vm_read(VM_MAP_NULL, address, size, &read_address, &read_size), MACH_SEND_INVALID_DEST, + "mach_vm_read()"); + logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST)); +} + +/* Reading partially deallocated memory fails. */ +void +test_read_partially_deallocated_range() +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2); + vm_offset_t read_address; + mach_msg_type_number_t read_size; + + logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point); + assert_deallocate_success(mid_point, vm_page_size); + logv("Page deallocated."); + + logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", + (uintmax_t)address); + assert_read_return(address, size, &read_address, &read_size, KERN_INVALID_ADDRESS); + logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS)); +} + +/* Reading partially read-protected memory fails. */ +void +test_read_partially_unreadable_range() +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2); + vm_offset_t read_address; + mach_msg_type_number_t read_size; + + /* For sizes < msg_ool_size_small, vm_map_copyin_common() uses + * vm_map_copyin_kernel_buffer() to read in the memory, + * returning different errors, see 8182239. */ + kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE; + + logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()"); + logv("Page read-protected."); + + logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", + (uintmax_t)address); + assert_read_return(address, size, &read_address, &read_size, kr_expected); + logv("Returned expected error: %s.", mach_error_string(kr_expected)); +} + +/**********************************/ +/* mach_vm_read() edge case tests */ +/**********************************/ + +void +read_edge_size(mach_vm_size_t size, kern_return_t expected_kr) +{ + int i; + kern_return_t kr; + vm_map_t this_task = mach_task_self(); + mach_vm_address_t addresses[] = {vm_page_size - 1, + vm_page_size, + vm_page_size + 1, + (mach_vm_address_t)UINT_MAX - vm_page_size + 1, + (mach_vm_address_t)UINT_MAX, + (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, + (mach_vm_address_t)UINTMAX_MAX}; + int numofaddresses = sizeof(addresses) / sizeof(addresses[0]); + vm_offset_t read_address; + mach_msg_type_number_t read_size; + + logv("Reading 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size); + for (i = 0; i < numofaddresses; i++) { + kr = mach_vm_read(this_task, addresses[i], size, &read_address, &read_size); + T_QUIET; T_ASSERT_EQ(kr, expected_kr, + "mach_vm_read() at " + "address 0x%jx unexpectedly returned: %s.\n" + "Should have returned: %s.", + (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr)); + } + logv( + "mach_vm_read() returned expected value in each case: " + "%s.", + mach_error_string(expected_kr)); +} + +/* Reading 0 bytes always succeeds. */ +void +test_read_zero_size() +{ + read_edge_size(0, KERN_SUCCESS); +} + +/* Reading 4GB or higher always fails. */ +void +test_read_invalid_large_size() +{ + read_edge_size((mach_vm_size_t)UINT_MAX + 1, KERN_INVALID_ARGUMENT); +} + +/* Reading a range wrapped around the address space fails. */ +void +test_read_wrapped_around_ranges() +{ + int i; + kern_return_t kr; + vm_map_t this_task = mach_task_self(); + struct { + mach_vm_address_t address; + mach_vm_size_t size; + } ranges[] = { + {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX}, + {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)}, + {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size}, + {(mach_vm_address_t)UINTMAX_MAX, 1}, + }; + int numofranges = sizeof(ranges) / sizeof(ranges[0]); + vm_offset_t read_address; + mach_msg_type_number_t read_size; + + logv( + "Reading various memory ranges wrapping around the " + "address space..."); + for (i = 0; i < numofranges; i++) { + kr = mach_vm_read(this_task, ranges[i].address, ranges[i].size, &read_address, &read_size); + T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS, + "mach_vm_read() at address 0x%jx with size " + "0x%jx (%ju) unexpectedly returned: %s.\n" + "Should have returned: %s.", + (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr), + mach_error_string(KERN_INVALID_ADDRESS)); + } + logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS)); +} + +/********************************/ +/* mach_vm_read() pattern tests */ +/********************************/ + +/* Write a pattern on pre-allocated memory, read into a buffer and + * verify the pattern on the buffer. */ +void +write_read_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name) +{ + mach_vm_address_t address = get_vm_address(); + + write_pattern(filter, reversed, address, get_vm_size(), pattern_name); + read_deallocate(); + /* Getting the address and size of the read buffer. */ + mach_vm_address_t read_address = get_vm_address(); + mach_vm_size_t read_size = get_vm_size(); + logv( + "Verifying %s pattern on buffer of " + "address 0x%jx and size 0x%jx (%ju)...", + pattern_name, (uintmax_t)read_address, (uintmax_t)read_size, (uintmax_t)read_size); + filter_addresses_do_else(filter, reversed, read_address, read_size, verify_address, read_zero, address); + logv("Pattern verified on destination buffer."); +} + +void +test_read_address_filled() +{ + write_read_verify_pattern(empty, TRUE, "address-filled"); +} + +void +test_read_checkerboard() +{ + write_read_verify_pattern(checkerboard, FALSE, "checkerboard"); +} + +void +test_read_reverse_checkerboard() +{ + write_read_verify_pattern(checkerboard, TRUE, "reverse checkerboard"); +} + +/***********************************/ +/* mach_vm_write() edge case tests */ +/***********************************/ + +/* Writing in VM_MAP_NULL fails. */ +void +test_write_null_map() +{ + mach_vm_address_t address = get_vm_address(); + vm_offset_t data = (vm_offset_t)get_buffer_address(); + mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size(); + + logv( + "Writing buffer of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx in NULL VM MAP...", + (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address); + assert_mach_return(mach_vm_write(VM_MAP_NULL, address, data, buffer_size), MACH_SEND_INVALID_DEST, "mach_vm_write()"); + logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST)); +} + +/* Writing 0 bytes always succeeds. */ +void +test_write_zero_size() +{ + set_buffer_size(0); + write_buffer(); +} + +/*****************************************/ +/* mach_vm_write() inaccessibility tests */ +/*****************************************/ + +/* Writing a partially deallocated buffer fails. */ +void +test_write_partially_deallocated_buffer() +{ + mach_vm_address_t address = get_vm_address(); + vm_offset_t data = (vm_offset_t)get_buffer_address(); + mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size(); + mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2); + + logv( + "Deallocating a mid-range buffer page at address " + "0x%jx...", + (uintmax_t)buffer_mid_point); + assert_deallocate_success(buffer_mid_point, vm_page_size); + logv("Page deallocated."); + + logv( + "Writing buffer of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address); + assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY); + logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY)); +} + +/* Writing a partially read-protected buffer fails. */ +void +test_write_partially_unreadable_buffer() +{ + mach_vm_address_t address = get_vm_address(); + vm_offset_t data = (vm_offset_t)get_buffer_address(); + mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size(); + mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2); + + logv( + "Read-protecting a mid-range buffer page at address " + "0x%jx...", + (uintmax_t)buffer_mid_point); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), buffer_mid_point, vm_page_size, FALSE, VM_PROT_WRITE), + "mach_vm_protect()"); + logv("Page read-protected."); + + logv( + "Writing buffer of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address); + assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY); + logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY)); +} + +/* Writing on partially deallocated memory fails. */ +void +test_write_on_partially_deallocated_range() +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_address_t start = mach_vm_trunc_page(address); + vm_offset_t data = (vm_offset_t)get_buffer_address(); + mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size(); + + logv( + "Deallocating the first destination page at address " + "0x%jx...", + (uintmax_t)start); + assert_deallocate_success(start, vm_page_size); + logv("Page deallocated."); + + logv( + "Writing buffer of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address); + assert_write_return(address, data, buffer_size, KERN_INVALID_ADDRESS); + logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS)); +} + +/* Writing on partially unwritable memory fails. */ +void +test_write_on_partially_unwritable_range() +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_address_t start = mach_vm_trunc_page(address); + vm_offset_t data = (vm_offset_t)get_buffer_address(); + mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size(); + + /* For sizes < msg_ool_size_small, + * vm_map_copy_overwrite_nested() uses + * vm_map_copyout_kernel_buffer() to read in the memory, + * returning different errors, see 8217123. */ + kern_return_t kr_expected = (buffer_size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE; + + logv( + "Write-protecting the first destination page at address " + "0x%jx...", + (uintmax_t)start); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()"); + logv("Page write-protected."); + + logv( + "Writing buffer of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address); + assert_write_return(address, data, buffer_size, kr_expected); + logv("Returned expected error: %s.", mach_error_string(kr_expected)); +} + +/*********************************/ +/* mach_vm_write() pattern tests */ +/*********************************/ + +/* Verify that a zero-filled buffer and destination memory are still + * zero-filled after writing. */ +void +test_zero_filled_write() +{ + verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page_kernel(get_vm_size() + 1), "zero-filled"); + verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()), + round_page_kernel(get_buffer_size() + get_buffer_offset()), "zero-filled"); +} + +/* Write a pattern on a buffer, write the buffer into some destination + * memory, and verify the pattern on both buffer and destination. */ +void +pattern_write(address_filter_t filter, boolean_t reversed, const char * pattern_name) +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t buffer_address = get_buffer_address(); + mach_vm_size_t buffer_size = get_buffer_size(); + + write_pattern(filter, reversed, buffer_address, buffer_size, pattern_name); + write_buffer(); + verify_pattern(filter, reversed, buffer_address, buffer_size, pattern_name); + logv( + "Verifying %s pattern on destination of " + "address 0x%jx and size 0x%jx (%ju)...", + pattern_name, (uintmax_t)address, (uintmax_t)buffer_size, (uintmax_t)size); + filter_addresses_do_else(filter, reversed, address, buffer_size, verify_address, read_zero, buffer_address); + logv("Pattern verified on destination."); +} + +void +test_address_filled_write() +{ + pattern_write(empty, TRUE, "address-filled"); +} + +void +test_checkerboard_write() +{ + pattern_write(checkerboard, FALSE, "checkerboard"); +} + +void +test_reverse_checkerboard_write() +{ + pattern_write(checkerboard, TRUE, "reverse checkerboard"); +} + +/**********************************/ +/* mach_vm_copy() edge case tests */ +/**********************************/ + +/* Copying in VM_MAP_NULL fails. */ +void +test_copy_null_map() +{ + mach_vm_address_t source = get_vm_address(); + mach_vm_address_t dest = get_buffer_address(); + mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size(); + + logv( + "Copying buffer of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx in NULL VM MAP...", + (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source); + assert_mach_return(mach_vm_copy(VM_MAP_NULL, source, size, dest), MACH_SEND_INVALID_DEST, "mach_vm_copy()"); + logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST)); +} + +void +copy_edge_size(mach_vm_size_t size, kern_return_t expected_kr) +{ + int i; + kern_return_t kr; + vm_map_t this_task = mach_task_self(); + mach_vm_address_t addresses[] = {0x0, + 0x1, + vm_page_size - 1, + vm_page_size, + vm_page_size + 1, + (mach_vm_address_t)UINT_MAX - vm_page_size + 1, + (mach_vm_address_t)UINT_MAX, + (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, + (mach_vm_address_t)UINTMAX_MAX}; + int numofaddresses = sizeof(addresses) / sizeof(addresses[0]); + mach_vm_address_t dest = 0; + + logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s"); + assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE); + logv("Copying 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size); + for (i = 0; i < numofaddresses; i++) { + kr = mach_vm_copy(this_task, addresses[i], size, dest); + T_QUIET; T_ASSERT_EQ(kr, expected_kr, + "mach_vm_copy() at " + "address 0x%jx unexpectedly returned: %s.\n" + "Should have returned: %s.", + (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr)); + } + logv( + "mach_vm_copy() returned expected value in each case: " + "%s.", + mach_error_string(expected_kr)); + + deallocate_range(dest, 4096); +} + +/* Copying 0 bytes always succeeds. */ +void +test_copy_zero_size() +{ + copy_edge_size(0, KERN_SUCCESS); +} + +/* Copying 4GB or higher always fails. */ +void +test_copy_invalid_large_size() +{ + copy_edge_size((mach_vm_size_t)UINT_MAX - 1, KERN_INVALID_ADDRESS); +} + +/* Reading a range wrapped around the address space fails. */ +void +test_copy_wrapped_around_ranges() +{ + int i; + kern_return_t kr; + vm_map_t this_task = mach_task_self(); + struct { + mach_vm_address_t address; + mach_vm_size_t size; + } ranges[] = { + {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX}, + {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)}, + {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size}, + {(mach_vm_address_t)UINTMAX_MAX, 1}, + }; + int numofranges = sizeof(ranges) / sizeof(ranges[0]); + mach_vm_address_t dest = 0; + + logv("Allocating 0x1000 (4096) bytes..."); + assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE); + + logv( + "Copying various memory ranges wrapping around the " + "address space..."); + for (i = 0; i < numofranges; i++) { + kr = mach_vm_copy(this_task, ranges[i].address, ranges[i].size, dest); + T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS, + "mach_vm_copy() at address 0x%jx with size " + "0x%jx (%ju) unexpectedly returned: %s.\n" + "Should have returned: %s.", + (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr), + mach_error_string(KERN_INVALID_ADDRESS)); + } + logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS)); + + deallocate_range(dest, 4096); +} + +/********************************/ +/* mach_vm_copy() pattern tests */ +/********************************/ + +/* Write a pattern on pre-allocated region, copy into another region + * and verify the pattern in the region. */ +void +write_copy_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name) +{ + mach_vm_address_t source = get_vm_address(); + mach_vm_size_t src_size = get_vm_size(); + write_pattern(filter, reversed, source, src_size, pattern_name); + /* Getting the address and size of the dest region */ + mach_vm_address_t dest = get_buffer_address(); + mach_vm_size_t dst_size = get_buffer_size(); + + logv( + "Copying memory region of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)source, (uintmax_t)dst_size, (uintmax_t)dst_size, (uintmax_t)dest); + assert_copy_success(source, dst_size, dest); + logv( + "Verifying %s pattern in region of " + "address 0x%jx and size 0x%jx (%ju)...", + pattern_name, (uintmax_t)dest, (uintmax_t)dst_size, (uintmax_t)dst_size); + filter_addresses_do_else(filter, reversed, dest, dst_size, verify_address, read_zero, source); + logv("Pattern verified on destination region."); +} + +void +test_copy_address_filled() +{ + write_copy_verify_pattern(empty, TRUE, "address-filled"); +} + +void +test_copy_checkerboard() +{ + write_copy_verify_pattern(checkerboard, FALSE, "checkerboard"); +} + +void +test_copy_reverse_checkerboard() +{ + write_copy_verify_pattern(checkerboard, TRUE, "reverse checkerboard"); +} + +/* Verify that a zero-filled source and destination memory are still + * zero-filled after writing. */ +void +test_zero_filled_copy_dest() +{ + verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page_kernel(get_vm_size() + 1), "zero-filled"); + verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()), + round_page_kernel(get_buffer_size() + get_buffer_offset()), "zero-filled"); +} + +/****************************************/ +/* mach_vm_copy() inaccessibility tests */ +/****************************************/ + +/* Copying partially deallocated memory fails. */ +void +test_copy_partially_deallocated_range() +{ + mach_vm_address_t source = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2); + mach_vm_address_t dest = 0; + + logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point); + assert_deallocate_success(mid_point, vm_page_size); + logv("Page deallocated."); + + logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", + (uintmax_t)source); + + assert_allocate_copy_return(source, size, &dest, KERN_INVALID_ADDRESS); + + logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS)); + + deallocate_range(dest, size); +} + +/* Copy partially read-protected memory fails. */ +void +test_copy_partially_unreadable_range() +{ + mach_vm_address_t source = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2); + mach_vm_address_t dest = 0; + + /* For sizes < 1 page, vm_map_copyin_common() uses + * vm_map_copyin_kernel_buffer() to read in the memory, + * returning different errors, see 8182239. */ + kern_return_t kr_expected = (size < vm_page_size) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE; + + logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()"); + logv("Page read-protected."); + + logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", + (uintmax_t)source); + assert_allocate_copy_return(source, size, &dest, kr_expected); + logv("Returned expected error: %s.", mach_error_string(kr_expected)); + + deallocate_range(dest, size); +} + +/* Copying to a partially deallocated region fails. */ +void +test_copy_dest_partially_deallocated_region() +{ + mach_vm_address_t dest = get_vm_address(); + mach_vm_address_t source = get_buffer_address(); + mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size(); + mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2); +#if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 + logv( + "Deallocating a mid-range source page at address " + "0x%jx...", + (uintmax_t)source_mid_point); + assert_deallocate_success(source_mid_point, vm_page_size); + logv("Page deallocated."); + + logv( + "Copying region of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest); + assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS); + logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS)); +#else + logv( + "Bypassing partially deallocated region test " + "(See )"); +#endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */ +} + +/* Copying from a partially deallocated region fails. */ +void +test_copy_source_partially_deallocated_region() +{ + mach_vm_address_t source = get_vm_address(); + mach_vm_address_t dest = get_buffer_address(); + mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size(); + mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2); + + logv( + "Deallocating a mid-range source page at address " + "0x%jx...", + (uintmax_t)source_mid_point); + assert_deallocate_success(source_mid_point, vm_page_size); + logv("Page deallocated."); + + logv( + "Copying region of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest); + assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS); + logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS)); +} + +/* Copying from a partially read-protected region fails. */ +void +test_copy_source_partially_unreadable_region() +{ + mach_vm_address_t source = get_vm_address(); + mach_vm_address_t dest = get_buffer_address(); + mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size(); + mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2); + kern_return_t kr = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE; + + logv( + "Read-protecting a mid-range buffer page at address " + "0x%jx...", + (uintmax_t)mid_point); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()"); + logv("Page read-protected."); + + logv( + "Copying region at address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest); + + assert_copy_return(source, size, dest, kr); + logv("Returned expected error: %s.", mach_error_string(kr)); +} + +/* Copying to a partially write-protected region fails. */ +void +test_copy_dest_partially_unwriteable_region() +{ + kern_return_t kr; + mach_vm_address_t dest = get_vm_address(); + mach_vm_address_t source = get_buffer_address(); + mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size(); + mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2); + +#if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 + logv( + "Read-protecting a mid-range buffer page at address " + "0x%jx...", + (uintmax_t)mid_point); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()"); + logv("Page read-protected."); + logv( + "Copying region at address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest); + if (size >= vm_page_size) { + kr = KERN_PROTECTION_FAILURE; + } else { + kr = KERN_INVALID_ADDRESS; + } + assert_copy_return(source, size, dest, kr); + logv("Returned expected error: %s.", mach_error_string(kr)); +#else + logv( + "Bypassing partially unwriteable region test " + "(See )"); +#endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */ +} + +/* Copying on partially deallocated memory fails. */ +void +test_copy_source_on_partially_deallocated_range() +{ + mach_vm_address_t source = get_vm_address(); + mach_vm_address_t dest = get_buffer_address(); + mach_vm_address_t start = mach_vm_trunc_page(source); + mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size(); + + logv( + "Deallocating the first source page at address " + "0x%jx...", + (uintmax_t)start); + assert_deallocate_success(start, vm_page_size); + logv("Page deallocated."); + + logv( + "Writing buffer of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source); + assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS); + logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS)); +} + +/* Copying on partially deallocated memory fails. */ +void +test_copy_dest_on_partially_deallocated_range() +{ + mach_vm_address_t source = get_vm_address(); + mach_vm_address_t dest = get_buffer_address(); + mach_vm_address_t start = mach_vm_trunc_page(dest); + mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size(); + + logv( + "Deallocating the first destination page at address " + "0x%jx...", + (uintmax_t)start); + assert_deallocate_success(start, vm_page_size); + logv("Page deallocated."); + + logv( + "Writing buffer of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source); + assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS); + logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS)); +} + +/* Copying on partially unwritable memory fails. */ +void +test_copy_dest_on_partially_unwritable_range() +{ + mach_vm_address_t source = get_vm_address(); + mach_vm_address_t dest = get_buffer_address(); + mach_vm_address_t start = mach_vm_trunc_page(dest); + mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size(); + + /* For sizes < msg_ool_size_small, + * vm_map_copy_overwrite_nested() uses + * vm_map_copyout_kernel_buffer() to read in the memory, + * returning different errors, see 8217123. */ + kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE; + + logv( + "Write-protecting the first destination page at address " + "0x%jx...", + (uintmax_t)start); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()"); + logv("Page write-protected."); + + logv( + "Writing buffer of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source); + assert_copy_return(source, size, dest, kr_expected); + logv("Returned expected error: %s.", mach_error_string(kr_expected)); +} + +/* Copying on partially unreadable memory fails. */ +void +test_copy_source_on_partially_unreadable_range() +{ + mach_vm_address_t source = get_vm_address(); + mach_vm_address_t dest = get_buffer_address(); + mach_vm_address_t start = mach_vm_trunc_page(source); + mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size(); + + /* For sizes < msg_ool_size_small, + * vm_map_copy_overwrite_nested() uses + * vm_map_copyout_kernel_buffer() to read in the memory, + * returning different errors, see 8217123. */ + kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE; + + logv( + "Read-protecting the first destination page at address " + "0x%jx...", + (uintmax_t)start); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()"); + logv("Page read-protected."); + + logv( + "Writing buffer of address 0x%jx and size 0x%jx (%ju), on " + "memory at address 0x%jx...", + (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source); + assert_copy_return(source, size, dest, kr_expected); + logv("Returned expected error: %s.", mach_error_string(kr_expected)); +} + +/********************************/ +/* mach_vm_protect() main tests */ +/********************************/ + +void +test_zero_filled_extended() +{ + verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page_kernel(get_vm_size() + 1), "zero-filled"); +} + +/* Allocated region is still zero-filled after read-protecting it and + * then restoring read-access. */ +void +test_zero_filled_readprotect() +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + + logv("Setting read access on 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, + (size == 1) ? "" : "s", (uintmax_t)address); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, VM_PROT_DEFAULT), "mach_vm_protect()"); + logv("Region has read access."); + test_zero_filled_extended(); +} + +void +verify_protection(vm_prot_t protection, const char * protection_name) +{ + mach_vm_address_t address = get_vm_address(); + mach_vm_size_t size = get_vm_size(); + mach_vm_size_t original_size = size; + vm_region_basic_info_data_64_t info; + mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64; + mach_port_t unused; + + logv( + "Verifying %s-protection on region of address 0x%jx and " + "size 0x%jx (%ju) with mach_vm_region()...", + protection_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size); + T_QUIET; T_ASSERT_MACH_SUCCESS( + mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &count, &unused), + "mach_vm_region()"); + if (original_size) { + T_QUIET; T_ASSERT_EQ((info.protection & protection), 0, + "Region " + "is unexpectedly %s-unprotected.", + protection_name); + logv("Region is %s-protected as expected.", protection_name); + } else { + T_QUIET; T_ASSERT_NE(info.protection & protection, 0, + "Region is " + "unexpectedly %s-protected.", + protection_name); + logv("Region is %s-unprotected as expected.", protection_name); + } +} + +void +test_verify_readprotection() +{ + verify_protection(VM_PROT_READ, "read"); +} + +void +test_verify_writeprotection() +{ + verify_protection(VM_PROT_WRITE, "write"); +} + +/******************************/ +/* Protection bus error tests */ +/******************************/ + +/* mach_vm_protect() affects the smallest aligned region (integral + * number of pages) containing the given range. */ + +/* Addresses in read-protected range are inaccessible. */ +void +access_readprotected_range_address(mach_vm_address_t address, const char * position) +{ + logv("Reading from %s 0x%jx of read-protected range...", position, (uintmax_t)address); + mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address); + T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx." + "Should have died with signal SIGBUS.", + (uintmax_t)bad_value, (uintmax_t)address); +} + +/* Start of read-protected range is inaccessible. */ +void +test_access_readprotected_range_start() +{ + access_readprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start"); +} + +/* Middle of read-protected range is inaccessible. */ +void +test_access_readprotected_range_middle() +{ + mach_vm_address_t address = get_vm_address(); + access_readprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle"); +} + +/* End of read-protected range is inaccessible. */ +void +test_access_readprotected_range_end() +{ + access_readprotected_range_address(round_page_kernel(get_vm_address() + get_vm_size()) - vm_address_size, "end"); +} + +/* Addresses in write-protected range are unwritable. */ +void +write_writeprotected_range_address(mach_vm_address_t address, const char * position) +{ + logv("Writing on %s 0x%jx of write-protected range...", position, (uintmax_t)address); + MACH_VM_ADDRESS_T(address) = 0x0; + T_ASSERT_FAIL("Unexpectedly wrote value 0x0 value at address 0x%jx." + "Should have died with signal SIGBUS.", + (uintmax_t)address); +} + +/* Start of write-protected range is unwritable. */ +void +test_write_writeprotected_range_start() +{ + write_writeprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start"); +} + +/* Middle of write-protected range is unwritable. */ +void +test_write_writeprotected_range_middle() +{ + mach_vm_address_t address = get_vm_address(); + write_writeprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle"); +} + +/* End of write-protected range is unwritable. */ +void +test_write_writeprotected_range_end() +{ + write_writeprotected_range_address(round_page_kernel(get_vm_address() + get_vm_size()) - vm_address_size, "end"); +} + +/*************************************/ +/* mach_vm_protect() edge case tests */ +/*************************************/ + +void +protect_zero_size(vm_prot_t protection, const char * protection_name) +{ + int i; + kern_return_t kr; + vm_map_t this_task = mach_task_self(); + mach_vm_address_t addresses[] = {0x0, + 0x1, + vm_page_size - 1, + vm_page_size, + vm_page_size + 1, + (mach_vm_address_t)UINT_MAX - vm_page_size + 1, + (mach_vm_address_t)UINT_MAX, + (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, + (mach_vm_address_t)UINTMAX_MAX}; + int numofaddresses = sizeof(addresses) / sizeof(addresses[0]); + + logv("%s-protecting 0x0 (0) bytes at various addresses...", protection_name); + for (i = 0; i < numofaddresses; i++) { + kr = mach_vm_protect(this_task, addresses[i], 0, FALSE, protection); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, + "mach_vm_protect() at " + "address 0x%jx unexpectedly failed: %s.", + (uintmax_t)addresses[i], mach_error_string(kr)); + } + logv("Protection successful."); +} + +void +test_readprotect_zero_size() +{ + protect_zero_size(VM_PROT_READ, "Read"); +} + +void +test_writeprotect_zero_size() +{ + protect_zero_size(VM_PROT_WRITE, "Write"); +} + +/* Protecting a range wrapped around the address space fails. */ +void +protect_wrapped_around_ranges(vm_prot_t protection, const char * protection_name) +{ + int i; + kern_return_t kr; + vm_map_t this_task = mach_task_self(); + struct { + mach_vm_address_t address; + mach_vm_size_t size; + } ranges[] = { + {0x1, (mach_vm_size_t)UINTMAX_MAX}, + {vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1}, + {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size}, + {(mach_vm_address_t)UINTMAX_MAX, 1}, + }; + int numofranges = sizeof(ranges) / sizeof(ranges[0]); + + logv( + "%s-protecting various memory ranges wrapping around the " + "address space...", + protection_name); + for (i = 0; i < numofranges; i++) { + kr = mach_vm_protect(this_task, ranges[i].address, ranges[i].size, FALSE, protection); + T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, + "mach_vm_protect() with address 0x%jx and size " + "0x%jx (%ju) unexpectedly returned: %s.\n" + "Should have returned: %s.", + (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr), + mach_error_string(KERN_INVALID_ARGUMENT)); + } + logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT)); +} + +void +test_readprotect_wrapped_around_ranges() +{ + protect_wrapped_around_ranges(VM_PROT_READ, "Read"); +} + +void +test_writeprotect_wrapped_around_ranges() +{ + protect_wrapped_around_ranges(VM_PROT_WRITE, "Write"); +} + +/*******************/ +/* vm_copy() tests */ +/*******************/ + +/* Verify the address space is being shared. */ +void +assert_share_mode(mach_vm_address_t address, unsigned share_mode, const char * share_mode_name) +{ + mach_vm_size_t size = get_vm_size(); + vm_region_extended_info_data_t info; + mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT; + mach_port_t unused; + +/* + * XXX Fails on UVM kernel. See + */ +#if notyet /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */ + logv( + "Verifying %s share mode on region of address 0x%jx and " + "size 0x%jx (%ju)...", + share_mode_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size); + T_QUIET; T_ASSERT_MACH_SUCCESS( + mach_vm_region(mach_task_self(), &address, &size, VM_REGION_EXTENDED_INFO, (vm_region_info_t)&info, &count, &unused), + "mach_vm_region()"); + T_QUIET; T_ASSERT_EQ(info.share_mode, share_mode, + "Region's share mode " + " unexpectedly is not %s but %d.", + share_mode_name, info.share_mode); + logv("Region has a share mode of %s as expected.", share_mode_name); +#else + logv("Bypassing share_mode verification (See )"); +#endif /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */ +} + +/* Do the vm_copy() and verify its success. */ +void +assert_vmcopy_success(vm_address_t src, vm_address_t dst, const char * source_name) +{ + kern_return_t kr; + mach_vm_size_t size = get_vm_size(); + + logv("Copying (using mach_vm_copy()) from a %s source...", source_name); + kr = mach_vm_copy(mach_task_self(), src, size, dst); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, + "mach_vm_copy() with the source address " + "0x%jx, designation address 0x%jx, and size 0x%jx (%ju) unexpectly " + "returned %s.\n Should have returned: %s.", + (uintmax_t)src, (uintmax_t)dst, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr), + mach_error_string(KERN_SUCCESS)); + logv("Copy (mach_vm_copy()) was successful as expected."); +} + +void +write_region(mach_vm_address_t address, mach_vm_size_t start) +{ + mach_vm_size_t size = get_vm_size(); + + filter_addresses_do_else(empty, FALSE, address, size, write_address, write_address, start); +} + +void +verify_region(mach_vm_address_t address, mach_vm_address_t start) +{ + mach_vm_size_t size = get_vm_size(); + + filter_addresses_do_else(empty, FALSE, address, size, verify_address, verify_address, start); +} + +/* Perform the post vm_copy() action and verify its results. */ +void +modify_one_and_verify_all_regions(vm_address_t src, vm_address_t dst, vm_address_t shared_copied, boolean_t shared) +{ + mach_vm_size_t size = get_vm_size(); + int action = get_vmcopy_post_action(); + + /* Do the post vm_copy() action. */ + switch (action) { + case VMCOPY_MODIFY_SRC: + logv("Modifying: source%s...", shared ? " (shared with other region)" : ""); + write_region(src, 1); + break; + + case VMCOPY_MODIFY_DST: + logv("Modifying: destination..."); + write_region(dst, 1); + break; + + case VMCOPY_MODIFY_SHARED_COPIED: + /* If no shared_copied then no need to verify (nothing changed). */ + if (!shared_copied) { + return; + } + logv("Modifying: shared/copied%s...", shared ? " (shared with source region)" : ""); + write_region(shared_copied, 1); + break; + + default: + T_ASSERT_FAIL("Unknown post vm_copy() action (%d)", action); + } + logv("Modification was successful as expected."); + + /* Verify all the regions with what is expected. */ + logv("Verifying: source... "); + verify_region(src, (VMCOPY_MODIFY_SRC == action || (shared && VMCOPY_MODIFY_SHARED_COPIED == action)) ? 1 : 0); + logv("destination... "); + verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0); + if (shared_copied) { + logv("shared/copied... "); + verify_region(shared_copied, (VMCOPY_MODIFY_SHARED_COPIED == action || (shared && VMCOPY_MODIFY_SRC == action)) ? 1 : 0); + } + logv("Verification was successful as expected."); +} + +/* Test source being a simple fresh region. */ +void +test_vmcopy_fresh_source() +{ + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t src, dst; + + if (get_vmcopy_post_action() == VMCOPY_MODIFY_SHARED_COPIED) { + /* No shared/copied region to modify so just return. */ + logv("No shared/copied region as expected."); + return; + } + + assert_allocate_success(&src, size, TRUE); + + assert_share_mode(src, SM_EMPTY, "SM_EMPTY"); + + write_region(src, 0); + + assert_allocate_success(&dst, size, TRUE); + + assert_vmcopy_success(src, dst, "freshly allocated"); + + modify_one_and_verify_all_regions(src, dst, 0, FALSE); + + assert_deallocate_success(src, size); + assert_deallocate_success(dst, size); +} + +/* Test source copied from a shared region. */ +void +test_vmcopy_shared_source() +{ + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t src, dst, shared; + int action = get_vmcopy_post_action(); + int pid, status; + + assert_allocate_success(&src, size, TRUE); + + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_inherit(mach_task_self(), src, size, VM_INHERIT_SHARE), "mach_vm_inherit()"); + + write_region(src, 0); + + pid = fork(); + if (pid == 0) { + /* Verify that the child's 'src' is shared with the + * parent's src */ + assert_share_mode(src, SM_SHARED, "SM_SHARED"); + assert_allocate_success(&dst, size, TRUE); + assert_vmcopy_success(src, dst, "shared"); + if (VMCOPY_MODIFY_SHARED_COPIED == action) { + logv("Modifying: shared..."); + write_region(src, 1); + logv("Modification was successsful as expected."); + logv("Verifying: source... "); + verify_region(src, 1); + logv("destination..."); + verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0); + logv("Verification was successful as expected."); + } else { + modify_one_and_verify_all_regions(src, dst, 0, TRUE); + } + assert_deallocate_success(dst, size); + exit(0); + } else if (pid > 0) { + /* In the parent the src becomes the shared */ + shared = src; + wait(&status); + if (WEXITSTATUS(status) != 0) { + exit(status); + } + /* verify shared (shared with child's src) */ + logv("Verifying: shared..."); + verify_region(shared, (VMCOPY_MODIFY_SHARED_COPIED == action || VMCOPY_MODIFY_SRC == action) ? 1 : 0); + logv("Verification was successful as expected."); + } else { + T_WITH_ERRNO; T_ASSERT_FAIL("fork failed"); + } + + assert_deallocate_success(src, size); +} + +/* Test source copied from another mapping. */ +void +test_vmcopy_copied_from_source() +{ + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t src, dst, copied; + + assert_allocate_success(&copied, size, TRUE); + write_region(copied, 0); + + assert_allocate_success(&src, size, TRUE); + + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), copied, size, src), "mach_vm_copy()"); + + assert_share_mode(src, SM_COW, "SM_COW"); + + assert_allocate_success(&dst, size, TRUE); + + assert_vmcopy_success(src, dst, "copied from"); + + modify_one_and_verify_all_regions(src, dst, copied, FALSE); + + assert_deallocate_success(src, size); + assert_deallocate_success(dst, size); + assert_deallocate_success(copied, size); +} + +/* Test source copied to another mapping. */ +void +test_vmcopy_copied_to_source() +{ + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t src, dst, copied; + + assert_allocate_success(&src, size, TRUE); + write_region(src, 0); + + assert_allocate_success(&copied, size, TRUE); + + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), src, size, copied), "mach_vm_copy()"); + + assert_share_mode(src, SM_COW, "SM_COW"); + + assert_allocate_success(&dst, size, TRUE); + + assert_vmcopy_success(src, dst, "copied to"); + + modify_one_and_verify_all_regions(src, dst, copied, FALSE); + + assert_deallocate_success(src, size); + assert_deallocate_success(dst, size); + assert_deallocate_success(copied, size); +} + +/* Test a truedshared source copied. */ +void +test_vmcopy_trueshared_source() +{ + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t src = 0x0, dst, shared; + vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE); + vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE); + mem_entry_name_port_t mem_obj; + + assert_allocate_success(&shared, size, TRUE); + write_region(shared, 0); + + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)shared, cur_protect, &mem_obj, + (mem_entry_name_port_t)NULL), + "mach_make_memory_entry_64()"); + T_QUIET; T_ASSERT_MACH_SUCCESS( + mach_vm_map(mach_task_self(), &src, size, 0, TRUE, mem_obj, 0, FALSE, cur_protect, max_protect, VM_INHERIT_NONE), + "mach_vm_map()"); + + assert_share_mode(src, SM_TRUESHARED, "SM_TRUESHARED"); + + assert_allocate_success(&dst, size, TRUE); + + assert_vmcopy_success(src, dst, "true shared"); + + modify_one_and_verify_all_regions(src, dst, shared, TRUE); + + assert_deallocate_success(src, size); + assert_deallocate_success(dst, size); + assert_deallocate_success(shared, size); +} + +/* Test a private aliazed source copied. */ +void +test_vmcopy_private_aliased_source() +{ + mach_vm_size_t size = get_vm_size(); + mach_vm_address_t src = 0x0, dst, shared; + vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE); + vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE); + + assert_allocate_success(&shared, size, TRUE); + write_region(shared, 0); + + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_remap(mach_task_self(), &src, size, 0, TRUE, mach_task_self(), shared, FALSE, &cur_protect, + &max_protect, VM_INHERIT_NONE), + "mach_vm_remap()"); + + assert_share_mode(src, SM_PRIVATE_ALIASED, "SM_PRIVATE_ALIASED"); + + assert_allocate_success(&dst, size, TRUE); + + assert_vmcopy_success(src, dst, "true shared"); + + modify_one_and_verify_all_regions(src, dst, shared, TRUE); + + assert_deallocate_success(src, size); + assert_deallocate_success(dst, size); + assert_deallocate_success(shared, size); +} + +/*************/ +/* VM Suites */ +/*************/ + +void +run_allocate_test_suites() +{ + /* CoreOSZin 12Z30: VMUnitTest fails: + * error finding xnu major version number. */ + /* unsigned int xnu_version = xnu_major_version(); */ + + UnitTests allocate_main_tests = { + {"Allocated address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size}, + {"Allocated address is page-aligned", test_aligned_address}, + {"Allocated memory is zero-filled", test_zero_filled}, + {"Write and verify address-filled pattern", test_write_address_filled}, + {"Write and verify checkerboard pattern", test_write_checkerboard}, + {"Write and verify reverse checkerboard pattern", test_write_reverse_checkerboard}, + {"Write and verify page ends pattern", test_write_page_ends}, + {"Write and verify page interiors pattern", test_write_page_interiors}, + {"Reallocate allocated pages", test_reallocate_pages}, + }; + UnitTests allocate_address_error_tests = { + {"Allocate at address zero", test_allocate_at_zero}, + {"Allocate at a 2 MB boundary-unaligned, page-aligned " + "address", + test_allocate_2MB_boundary_unaligned_page_aligned_address}, + }; + UnitTests allocate_argument_error_tests = { + {"Allocate in NULL VM map", test_allocate_in_null_map}, {"Allocate with kernel flags", test_allocate_with_kernel_flags}, + }; + UnitTests allocate_fixed_size_tests = { + {"Allocate zero size", test_allocate_zero_size}, + {"Allocate overflowing size", test_allocate_overflowing_size}, + {"Allocate a page with highest address hint", test_allocate_page_with_highest_address_hint}, + {"Allocate two pages and verify first fit strategy", test_allocate_first_fit_pages}, + }; + UnitTests allocate_invalid_large_size_test = { + {"Allocate invalid large size", test_allocate_invalid_large_size}, + }; + UnitTests mach_vm_map_protection_inheritance_error_test = { + {"mach_vm_map() with invalid protection/inheritance " + "arguments", + test_mach_vm_map_protection_inheritance_error}, + }; + UnitTests mach_vm_map_large_mask_overflow_error_test = { + {"mach_vm_map() with large address mask", test_mach_vm_map_large_mask_overflow_error}, + }; + + /* Run the test suites with various allocators and VM sizes, and + * unspecified or fixed (page-aligned or page-unaligned), + * addresses. */ + for (allocators_idx = 0; allocators_idx < numofallocators; allocators_idx++) { + for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) { + for (flags_idx = 0; flags_idx < numofflags; flags_idx++) { + for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) { + /* An allocated address will be page-aligned. */ + /* Only run the zero size mach_vm_map() error tests in the + * unspecified address case, since we won't be able to retrieve a + * fixed address for allocation. See 8003930. */ + if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED) || + (allocators_idx != MACH_VM_ALLOCATE && sizes_idx == ZERO_BYTES && flags_idx == FIXED)) { + continue; + } + run_suite(set_up_allocator_and_vm_variables, allocate_argument_error_tests, do_nothing, + "%s argument error tests, %s%s address, " + "%s size: 0x%jx (%ju)", + allocators[allocators_idx].description, address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + /* mach_vm_map() only protection and inheritance error + * tests. */ + if (allocators_idx != MACH_VM_ALLOCATE) { + run_suite(set_up_allocator_and_vm_variables, mach_vm_map_protection_inheritance_error_test, do_nothing, + "%s protection and inheritance " + "error test, %s%s address, %s size: 0x%jx " + "(%ju)", + allocators[allocators_idx].description, address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + } + /* mach_vm_map() cannot allocate 0 bytes, see 8003930. */ + if (allocators_idx == MACH_VM_ALLOCATE || sizes_idx != ZERO_BYTES) { + run_suite(set_up_allocator_and_vm_variables_and_allocate, allocate_main_tests, deallocate, + "%s main " + "allocation tests, %s%s address, %s size: 0x%jx " + "(%ju)", + allocators[allocators_idx].description, address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + } + } + } + run_suite(set_up_allocator_and_vm_size, allocate_address_error_tests, do_nothing, + "%s address " + "error allocation tests, %s size: 0x%jx (%ju)", + allocators[allocators_idx].description, vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + } + run_suite(set_up_allocator, allocate_fixed_size_tests, do_nothing, "%s fixed size allocation tests", + allocators[allocators_idx].description); + /* CoreOSZin 12Z30: VMUnitTest fails: + * error finding xnu major version number. */ + /* mach_vm_map() with a named entry triggers a panic with this test + * unless under xnu-1598 or later, see 8048580. */ + /* if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY + || xnu_version >= 1598) { */ + if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY) { + run_suite(set_up_allocator, allocate_invalid_large_size_test, do_nothing, "%s invalid large size allocation test", + allocators[allocators_idx].description); + } + } + /* mach_vm_map() only large mask overflow tests. */ + for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) { + run_suite(set_up_vm_size, mach_vm_map_large_mask_overflow_error_test, do_nothing, + "mach_vm_map() large mask overflow " + "error test, size: 0x%jx (%ju)", + (uintmax_t)vm_sizes[sizes_idx].size, (uintmax_t)vm_sizes[sizes_idx].size); + } +} + +void +run_deallocate_test_suites() +{ + UnitTests access_deallocated_memory_tests = { + {"Read start of deallocated range", test_access_deallocated_range_start}, + {"Read middle of deallocated range", test_access_deallocated_range_middle}, + {"Read end of deallocated range", test_access_deallocated_range_end}, + }; + UnitTests deallocate_reallocate_tests = { + {"Deallocate twice", test_deallocate_twice}, + {"Write pattern, deallocate, reallocate (deallocated " + "memory is inaccessible), and verify memory is " + "zero-filled", + test_write_pattern_deallocate_reallocate_zero_filled}, + }; + UnitTests deallocate_null_map_test = { + {"Deallocate in NULL VM map", test_deallocate_in_null_map}, + }; + UnitTests deallocate_edge_case_tests = { + {"Deallocate zero size ranges", test_deallocate_zero_size_ranges}, + {"Deallocate memory ranges whose end rounds to 0x0", test_deallocate_rounded_zero_end_ranges}, + {"Deallocate wrapped around memory ranges", test_deallocate_wrapped_around_ranges}, + }; + UnitTests deallocate_suicide_test = { + {"Deallocate whole address space", test_deallocate_suicide}, + }; + + /* All allocations done with mach_vm_allocate(). */ + set_allocator(wrapper_mach_vm_allocate); + + /* Run the test suites with various VM sizes, and unspecified or + * fixed (page-aligned or page-unaligned), addresses. */ + for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) { + for (flags_idx = 0; flags_idx < numofflags; flags_idx++) { + for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) { + /* An allocated address will be page-aligned. */ + if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) { + continue; + } + /* Accessing deallocated memory should cause a segmentation + * fault. */ + /* Nothing gets deallocated if size is zero. */ + if (sizes_idx != ZERO_BYTES) { + set_expected_signal(SIGSEGV); + run_suite(set_up_vm_variables_and_allocate, access_deallocated_memory_tests, do_nothing, + "Deallocated memory access tests, " + "%s%s address, %s size: 0x%jx (%ju)", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + set_expected_signal(0); + } + run_suite(set_up_vm_variables_and_allocate, deallocate_reallocate_tests, do_nothing, + "Deallocation and reallocation tests, %s%s " + "address, %s size: 0x%jx (%ju)", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + run_suite(set_up_vm_variables, deallocate_null_map_test, do_nothing, + "mach_vm_deallocate() null map test, " + "%s%s address, %s size: 0x%jx (%ju)", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + } + } + } + run_suite(do_nothing, deallocate_edge_case_tests, do_nothing, "Edge case deallocation tests"); + + set_expected_signal(-1); /* SIGSEGV or SIGBUS */ + run_suite(do_nothing, deallocate_suicide_test, do_nothing, "Whole address space deallocation test"); + set_expected_signal(0); +} + +void +run_read_test_suites() +{ + UnitTests read_main_tests = { + {"Read address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size}, + {"Read address has the correct boundary offset", test_read_address_offset}, + {"Reallocate read pages", test_reallocate_pages}, + {"Read and verify zero-filled memory", test_zero_filled}, + }; + UnitTests read_pattern_tests = { + {"Read address-filled pattern", test_read_address_filled}, + {"Read checkerboard pattern", test_read_checkerboard}, + {"Read reverse checkerboard pattern", test_read_reverse_checkerboard}, + }; + UnitTests read_null_map_test = { + {"Read from NULL VM map", test_read_null_map}, + }; + UnitTests read_edge_case_tests = { + {"Read zero size", test_read_zero_size}, + {"Read invalid large size", test_read_invalid_large_size}, + {"Read wrapped around memory ranges", test_read_wrapped_around_ranges}, + }; + UnitTests read_inaccessible_tests = { + {"Read partially decallocated memory", test_read_partially_deallocated_range}, + {"Read partially read-protected memory", test_read_partially_unreadable_range}, + }; + + /* All allocations done with mach_vm_allocate(). */ + set_allocator(wrapper_mach_vm_allocate); + + /* Run the test suites with various VM sizes, and unspecified or + * fixed (page-aligned or page-unaligned) addresses. */ + for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) { + for (flags_idx = 0; flags_idx < numofflags; flags_idx++) { + for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) { + /* An allocated address will be page-aligned. */ + if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) { + continue; + } + run_suite(set_up_vm_variables_allocate_read_deallocate, read_main_tests, deallocate, + "mach_vm_read() " + "main tests, %s%s address, %s size: 0x%jx (%ju)", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + run_suite(set_up_vm_variables_and_allocate_extra_page, read_pattern_tests, deallocate, + "mach_vm_read() pattern tests, %s%s address, %s " + "size: 0x%jx (%ju)", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + run_suite(set_up_vm_variables_and_allocate_extra_page, read_null_map_test, deallocate_extra_page, + "mach_vm_read() null map test, " + "%s%s address, %s size: 0x%jx (%ju)", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + /* A zero size range is always accessible. */ + if (sizes_idx != ZERO_BYTES) { + run_suite(set_up_vm_variables_and_allocate_extra_page, read_inaccessible_tests, deallocate_extra_page, + "mach_vm_read() inaccessibility tests, %s%s " + "address, %s size: 0x%jx (%ju)", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + } + } + } + } + run_suite(do_nothing, read_edge_case_tests, do_nothing, "mach_vm_read() fixed size tests"); +} + +void +run_write_test_suites() +{ + UnitTests write_main_tests = { + {"Write and verify zero-filled memory", test_zero_filled_write}, + }; + UnitTests write_pattern_tests = { + {"Write address-filled pattern", test_address_filled_write}, + {"Write checkerboard pattern", test_checkerboard_write}, + {"Write reverse checkerboard pattern", test_reverse_checkerboard_write}, + }; + UnitTests write_edge_case_tests = { + {"Write into NULL VM map", test_write_null_map}, {"Write zero size", test_write_zero_size}, + }; + UnitTests write_inaccessible_tests = { + {"Write partially decallocated buffer", test_write_partially_deallocated_buffer}, + {"Write partially read-protected buffer", test_write_partially_unreadable_buffer}, + {"Write on partially deallocated range", test_write_on_partially_deallocated_range}, + {"Write on partially write-protected range", test_write_on_partially_unwritable_range}, + }; + + /* All allocations done with mach_vm_allocate(). */ + set_allocator(wrapper_mach_vm_allocate); + + /* Run the test suites with various destination sizes and + * unspecified or fixed (page-aligned or page-unaligned) + * addresses, and various buffer sizes and boundary offsets. */ + for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) { + for (flags_idx = 0; flags_idx < numofflags; flags_idx++) { + for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) { + for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) { + for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) { + /* An allocated address will be page-aligned. */ + if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) { + continue; + } + run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_edge_case_tests, + deallocate_vm_and_buffer, + "mach_vm_write() edge case tests, %s%s address, %s " + "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), " + "buffer boundary offset: %d", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description, + (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size, + buffer_offsets[offsets_idx].offset); + /* A zero size buffer is always accessible. */ + if (buffer_sizes_idx != ZERO_BYTES) { + run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_inaccessible_tests, + deallocate_vm_and_buffer, + "mach_vm_write() inaccessibility tests, " + "%s%s address, %s size: 0x%jx (%ju), buffer " + "%s size: 0x%jx (%ju), buffer boundary " + "offset: %d", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description, + (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size, + buffer_offsets[offsets_idx].offset); + } + /* The buffer cannot be larger than the destination. */ + if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) { + continue; + } + run_suite(set_up_vm_and_buffer_variables_allocate_write, write_main_tests, deallocate_vm_and_buffer, + "mach_vm_write() main tests, %s%s address, %s " + "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), " + "buffer boundary offset: %d", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description, + (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size, + buffer_offsets[offsets_idx].offset); + run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_pattern_tests, + deallocate_vm_and_buffer, + "mach_vm_write() pattern tests, %s%s address, %s " + "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), " + "buffer boundary offset: %d", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description, + (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size, + buffer_offsets[offsets_idx].offset); + } + } + } + } + } +} + +void +run_protect_test_suites() +{ + UnitTests readprotection_main_tests = { + {"Read-protect, read-allow and verify zero-filled memory", test_zero_filled_readprotect}, + {"Verify that region is read-protected iff size is " + "nonzero", + test_verify_readprotection}, + }; + UnitTests access_readprotected_memory_tests = { + {"Read start of read-protected range", test_access_readprotected_range_start}, + {"Read middle of read-protected range", test_access_readprotected_range_middle}, + {"Read end of read-protected range", test_access_readprotected_range_end}, + }; + UnitTests writeprotection_main_tests = { + {"Write-protect and verify zero-filled memory", test_zero_filled_extended}, + {"Verify that region is write-protected iff size is " + "nonzero", + test_verify_writeprotection}, + }; + UnitTests write_writeprotected_memory_tests = { + {"Write at start of write-protected range", test_write_writeprotected_range_start}, + {"Write in middle of write-protected range", test_write_writeprotected_range_middle}, + {"Write at end of write-protected range", test_write_writeprotected_range_end}, + }; + UnitTests protect_edge_case_tests = { + {"Read-protect zero size ranges", test_readprotect_zero_size}, + {"Write-protect zero size ranges", test_writeprotect_zero_size}, + {"Read-protect wrapped around memory ranges", test_readprotect_wrapped_around_ranges}, + {"Write-protect wrapped around memory ranges", test_writeprotect_wrapped_around_ranges}, + }; + + /* All allocations done with mach_vm_allocate(). */ + set_allocator(wrapper_mach_vm_allocate); + + /* Run the test suites with various VM sizes, and unspecified or + * fixed (page-aligned or page-unaligned), addresses. */ + for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) { + for (flags_idx = 0; flags_idx < numofflags; flags_idx++) { + for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) { + /* An allocated address will be page-aligned. */ + if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) { + continue; + } + run_suite(set_up_vm_variables_allocate_readprotect, readprotection_main_tests, deallocate_extra_page, + "Main read-protection tests, %s%s address, %s " + "size: 0x%jx (%ju)", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + run_suite(set_up_vm_variables_allocate_writeprotect, writeprotection_main_tests, deallocate_extra_page, + "Main write-protection tests, %s%s address, %s " + "size: 0x%jx (%ju)", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + /* Nothing gets protected if size is zero. */ + if (sizes_idx != ZERO_BYTES) { + set_expected_signal(SIGBUS); + /* Accessing read-protected memory should cause a bus + * error. */ + run_suite(set_up_vm_variables_allocate_readprotect, access_readprotected_memory_tests, deallocate_extra_page, + "Read-protected memory access tests, %s%s " + "address, %s size: 0x%jx (%ju)", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + /* Writing on write-protected memory should cause a bus + * error. */ + run_suite(set_up_vm_variables_allocate_writeprotect, write_writeprotected_memory_tests, deallocate_extra_page, + "Write-protected memory writing tests, %s%s " + "address, %s size: 0x%jx (%ju)", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size); + set_expected_signal(0); + } + } + } + } + run_suite(do_nothing, protect_edge_case_tests, do_nothing, "Edge case protection tests"); +} + +void +run_copy_test_suites() +{ + /* Copy tests */ + UnitTests copy_main_tests = { + {"Copy and verify zero-filled memory", test_zero_filled_copy_dest}, + }; + UnitTests copy_pattern_tests = { + {"Copy address-filled pattern", test_copy_address_filled}, + {"Copy checkerboard pattern", test_copy_checkerboard}, + {"Copy reverse checkerboard pattern", test_copy_reverse_checkerboard}, + }; + UnitTests copy_edge_case_tests = { + {"Copy with NULL VM map", test_copy_null_map}, + {"Copy zero size", test_copy_zero_size}, + {"Copy invalid large size", test_copy_invalid_large_size}, + {"Read wrapped around memory ranges", test_copy_wrapped_around_ranges}, + }; + UnitTests copy_inaccessible_tests = { + {"Copy source partially decallocated region", test_copy_source_partially_deallocated_region}, + /* XXX */ + {"Copy destination partially decallocated region", test_copy_dest_partially_deallocated_region}, + {"Copy source partially read-protected region", test_copy_source_partially_unreadable_region}, + /* XXX */ + {"Copy destination partially write-protected region", test_copy_dest_partially_unwriteable_region}, + {"Copy source on partially deallocated range", test_copy_source_on_partially_deallocated_range}, + {"Copy destination on partially deallocated range", test_copy_dest_on_partially_deallocated_range}, + {"Copy source on partially read-protected range", test_copy_source_on_partially_unreadable_range}, + {"Copy destination on partially write-protected range", test_copy_dest_on_partially_unwritable_range}, + }; + + UnitTests copy_shared_mode_tests = { + {"Copy using freshly allocated source", test_vmcopy_fresh_source}, + {"Copy using shared source", test_vmcopy_shared_source}, + {"Copy using a \'copied from\' source", test_vmcopy_copied_from_source}, + {"Copy using a \'copied to\' source", test_vmcopy_copied_to_source}, + {"Copy using a true shared source", test_vmcopy_trueshared_source}, + {"Copy using a private aliased source", test_vmcopy_private_aliased_source}, + }; + + /* All allocations done with mach_vm_allocate(). */ + set_allocator(wrapper_mach_vm_allocate); + + /* All the tests are done with page size regions. */ + set_vm_size(vm_page_size); + + /* Run the test suites with various shared modes for source */ + for (vmcopy_action_idx = 0; vmcopy_action_idx < numofvmcopyactions; vmcopy_action_idx++) { + run_suite(set_up_copy_shared_mode_variables, copy_shared_mode_tests, do_nothing, "Copy shared mode tests, %s", + vmcopy_actions[vmcopy_action_idx].description); + } + + for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) { + for (flags_idx = 0; flags_idx < numofflags; flags_idx++) { + for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) { + for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) { + for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) { + /* An allocated address will be page-aligned. */ + if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) { + continue; + } + run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_edge_case_tests, + deallocate_vm_and_buffer, + "mach_vm_copy() edge case tests, %s%s address, %s " + "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), " + "buffer boundary offset: %d", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description, + (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size, + buffer_offsets[offsets_idx].offset); + /* The buffer cannot be larger than the destination. */ + if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) { + continue; + } + + /* A zero size buffer is always accessible. */ + if (buffer_sizes_idx != ZERO_BYTES) { + run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_inaccessible_tests, + deallocate_vm_and_buffer, + "mach_vm_copy() inaccessibility tests, " + "%s%s address, %s size: 0x%jx (%ju), buffer " + "%s size: 0x%jx (%ju), buffer boundary " + "offset: %d", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description, + (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size, + buffer_offsets[offsets_idx].offset); + } + run_suite(set_up_source_and_dest_variables_allocate_copy, copy_main_tests, deallocate_vm_and_buffer, + "mach_vm_copy() main tests, %s%s address, %s " + "size: 0x%jx (%ju), destination %s size: 0x%jx (%ju), " + "destination boundary offset: %d", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description, + (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size, + buffer_offsets[offsets_idx].offset); + run_suite(set_up_source_and_dest_variables_allocate_copy, copy_pattern_tests, deallocate_vm_and_buffer, + "mach_vm_copy() pattern tests, %s%s address, %s " + "size: 0x%jx (%ju) destination %s size: 0x%jx (%ju), " + "destination boundary offset: %d", + address_flags[flags_idx].description, + (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description, + vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size, + (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description, + (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size, + buffer_offsets[offsets_idx].offset); + } + } + } + } + } +} + +void +perform_test_with_options(test_option_t options) +{ + process_options(options); + + /* CoreOSZin 12Z30: VMUnitTest fails: + * error finding xnu major version number. */ + /* printf("xnu version is %s.\n\n", xnu_version_string()); */ + + if (flag_run_allocate_test) { + run_allocate_test_suites(); + } + + if (flag_run_deallocate_test) { + run_deallocate_test_suites(); + } + + if (flag_run_read_test) { + run_read_test_suites(); + } + + if (flag_run_write_test) { + run_write_test_suites(); + } + + if (flag_run_protect_test) { + run_protect_test_suites(); + } + + if (flag_run_copy_test) { + run_copy_test_suites(); + } + + log_aggregated_results(); +} + +T_DECL(vm_test_allocate, "Allocate VM unit test") +{ + test_options.to_flags = VM_TEST_ALLOCATE; + test_options.to_vmsize = 0; + test_options.to_quietness = ERROR_ONLY_QUIETNESS; + + perform_test_with_options(test_options); +} + +T_DECL(vm_test_deallocate, "Deallocate VM unit test", + T_META_IGNORECRASHES(".*vm_allocation.*")) +{ + test_options.to_flags = VM_TEST_DEALLOCATE; + test_options.to_vmsize = 0; + test_options.to_quietness = ERROR_ONLY_QUIETNESS; + + perform_test_with_options(test_options); +} + +T_DECL(vm_test_read, "Read VM unit test") +{ + test_options.to_flags = VM_TEST_READ; + test_options.to_vmsize = 0; + test_options.to_quietness = ERROR_ONLY_QUIETNESS; + + perform_test_with_options(test_options); +} + +T_DECL(vm_test_write, "Write VM unit test") +{ + test_options.to_flags = VM_TEST_WRITE; + test_options.to_vmsize = 0; + test_options.to_quietness = ERROR_ONLY_QUIETNESS; + + perform_test_with_options(test_options); +} + +T_DECL(vm_test_protect, "Protect VM unit test", + T_META_IGNORECRASHES(".*vm_allocation.*")) +{ + test_options.to_flags = VM_TEST_PROTECT; + test_options.to_vmsize = 0; + test_options.to_quietness = ERROR_ONLY_QUIETNESS; + + perform_test_with_options(test_options); +} + +T_DECL(vm_test_copy, "Copy VM unit test") +{ + test_options.to_flags = VM_TEST_COPY; + test_options.to_vmsize = 0; + test_options.to_quietness = ERROR_ONLY_QUIETNESS; + + perform_test_with_options(test_options); +} diff --git a/tests/vm/zone_gc_replenish_test.c b/tests/vm/zone_gc_replenish_test.c new file mode 100644 index 000000000..c979e6d7d --- /dev/null +++ b/tests/vm/zone_gc_replenish_test.c @@ -0,0 +1,78 @@ +#include +#include + +#include +#include + + +static void * +gc_thread_func(__unused void *arg) +{ + int err; + unsigned int count = 1; + size_t s = sizeof(count); + time_t start = time(NULL); + time_t end = time(NULL); + + /* + * Keep kicking the test for 15 seconds to see if we can panic() the kernel + */ + while (time(&end) < start + 15) { + err = sysctlbyname("kern.zone_gc_replenish_test", &count, &s, &count, s); + + /* If the sysctl isn't supported, test succeeds */ + if (err != 0) { + T_SKIP("sysctl kern.zone_gc_replenish_test not found, skipping test"); + break; + } + } + return NULL; +} + +static void * +alloc_thread_func(__unused void *arg) +{ + int err; + unsigned int count = 1; + size_t s = sizeof(count); + time_t start = time(NULL); + time_t end = time(NULL); + + /* + * Keep kicking the test for 15 seconds to see if we can panic() the kernel + */ + while (time(&end) < start + 15) { + err = sysctlbyname("kern.zone_alloc_replenish_test", &count, &s, &count, s); + + /* If the sysctl isn't supported, test succeeds */ + if (err != 0) { + T_SKIP("sysctl kern.zone_alloc_replenish_test not found, skipping test"); + break; + } + } + return NULL; +} + +T_DECL(zone_gc_replenish_test, + "Test zone garbage collection, exhaustion and replenishment", + T_META_NAMESPACE("xnu.vm"), + T_META_CHECK_LEAKS(false)) +{ + pthread_attr_t attr; + pthread_t gc_thread; + pthread_t alloc_thread; + int ret; + + ret = pthread_attr_init(&attr); + T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_init"); + + ret = pthread_create(&gc_thread, &attr, gc_thread_func, NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "gc pthread_create"); + + ret = pthread_create(&alloc_thread, &attr, alloc_thread_func, NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "alloc pthread_create"); + + T_ASSERT_POSIX_ZERO(pthread_join(gc_thread, NULL), NULL); + T_ASSERT_POSIX_ZERO(pthread_join(alloc_thread, NULL), NULL); + T_PASS("Ran 15 seconds with no panic"); +} diff --git a/tests/vm_kern_count_wired_kernelcache.c b/tests/vm_kern_count_wired_kernelcache.c new file mode 100644 index 000000000..21b381fec --- /dev/null +++ b/tests/vm_kern_count_wired_kernelcache.c @@ -0,0 +1,73 @@ +#include +#include + +#include +#include +#include + +#include + +/* + * Ensure that mach_memory_info includes a counter for the kernelcache size. + */ + +T_GLOBAL_META(T_META_NAMESPACE("xnu.vm")); + +T_DECL(vm_kern_count_wired_kernelcache, + "mach_memory_info returns a counter for for kernelcache", + T_META_ASROOT(true)) +{ + kern_return_t kr; + uint64_t i; + mach_zone_name_t *name = NULL; + unsigned int nameCnt = 0; + mach_zone_info_t *info = NULL; + unsigned int infoCnt = 0; + mach_memory_info_t *wiredInfo = NULL; + unsigned int wiredInfoCnt = 0; + + kr = mach_memory_info(mach_host_self(), &name, &nameCnt, &info, &infoCnt, + &wiredInfo, &wiredInfoCnt); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_memory_info"); + + bool found_kernelcache_counter = false; + uint64_t static_kernelcache_size = 0; + uint64_t wired_memory_boot = 0; + for (i = 0; i < wiredInfoCnt; i++) { + const mach_memory_info_t *curr = &wiredInfo[i]; + uint32_t type = curr->flags & VM_KERN_SITE_TYPE; + if (type == VM_KERN_SITE_COUNTER) { + if (curr->site == VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE) { + found_kernelcache_counter = true; + static_kernelcache_size = curr->size; + } else if (curr->site == VM_KERN_COUNT_WIRED_BOOT) { + wired_memory_boot = curr->size; + } + } + } + T_QUIET; T_ASSERT_TRUE(found_kernelcache_counter, "mach_memory_info returned kernelcache counter."); + // Sanity check that the counter isn't 0. + T_QUIET; T_ASSERT_GT(static_kernelcache_size, 0ULL, "kernelcache counter > 0"); + // Sanity check that the counter is less than the amount of wired memory + // at boot. + T_QUIET; T_ASSERT_LE(static_kernelcache_size, wired_memory_boot, "kernelcache counter <= VM_KERN_COUNT_WIRED_BOOT"); + + // Cleanup + if ((name != NULL) && (nameCnt != 0)) { + kr = vm_deallocate(mach_task_self(), (vm_address_t) name, + (vm_size_t) (nameCnt * sizeof *name)); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate name"); + } + + if ((info != NULL) && (infoCnt != 0)) { + kr = vm_deallocate(mach_task_self(), (vm_address_t) info, + (vm_size_t) (infoCnt * sizeof *info)); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate info"); + } + + if ((wiredInfo != NULL) && (wiredInfoCnt != 0)) { + kr = vm_deallocate(mach_task_self(), (vm_address_t) wiredInfo, + (vm_size_t) (wiredInfoCnt * sizeof *wiredInfo)); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate wiredInfo"); + } +} diff --git a/tests/vm_memory_tests_src/common.c b/tests/vm_memory_tests_src/common.c new file mode 100644 index 000000000..b6d65b0a4 --- /dev/null +++ b/tests/vm_memory_tests_src/common.c @@ -0,0 +1,173 @@ +#include "mach_vm_tests.h" +#include "unistd.h" + +#define TEST_TXT_FILE "/tmp/xnu.vm.sharing.test.txt" + +static const char * lorem_text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. \ +Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate\ +velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."; + + +static int fd = 0; +static struct stat sb; +mach_port_t persistentReplyPort; + +int +mach_server_data_setup(void **buffer) +{ + if (0 != access(TEST_TXT_FILE, F_OK)) { + /* create a test file */ + const size_t lorem_text_length = strlen(lorem_text); + int w_fd = open(TEST_TXT_FILE, O_WRONLY | O_CREAT | O_TRUNC, (mode_t)0666); + size_t required_length = 450783; + assert(w_fd >= 0); + size_t bytes_written = 0; + while (bytes_written < required_length) { + bytes_written += (size_t)write(w_fd, &lorem_text[0], (size_t)(lorem_text_length - 1)); + if ((bytes_written + lorem_text_length) > required_length) { + bytes_written += (size_t)write(w_fd, &lorem_text[0], (size_t)(required_length - bytes_written)); + break; + } + } + close(w_fd); + } + + /* Sample data set needs to be mapped in our space */ + fd = open(TEST_TXT_FILE, O_RDONLY | O_EXCL, 0666); + + if (fd < 0) { + printf("mach_server_data_setup: cannot open file %s - %d (%s).\n", TEST_TXT_FILE, errno, strerror(errno)); + return errno; + } + + if (fstat(fd, &sb) < 0) { + printf("mach_server_data_setup: cannot stat file %s - %d (%s).\n", TEST_TXT_FILE, errno, strerror(errno)); + return errno; + } + +#if MMAP_PATH + *buffer = mmap(NULL, sb.st_size, PROT_READ, MAP_FILE | MAP_PRIVATE, fd, 0); + + if (*buffer == MAP_FAILED) { + printf("mach_server_remap: mmap failed - %d (%s).\n", errno, strerror(errno)); + *buffer = NULL; + } +#else + kern_return_t kr = KERN_SUCCESS; + kr = mach_vm_allocate(mach_task_self(), (mach_vm_address_t *)buffer, (mach_vm_size_t)sb.st_size, VM_FLAGS_ANYWHERE); + assert(kr == KERN_SUCCESS); +#endif + return 0; +} + +void +mach_server_data_cleanup(void *buffer, mach_vm_address_t src, mach_vm_size_t size) +{ +#if MMAP_PATH + if (buffer) { + munmap(buffer, sb.st_size); + } +#else + mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)buffer, (mach_vm_size_t)sb.st_size); +#endif + + if (src) { + mach_vm_deallocate(mach_task_self(), src, size); + } + + if (fd > 2) { + close(fd); + } +} + +void +mach_server_construct_header(ipc_message_t *message, mach_port_t replyPort) +{ + bzero(message, sizeof(*message)); + message->header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND_ONCE) | MACH_MSGH_BITS_COMPLEX; + message->header.msgh_remote_port = persistentReplyPort;//serverPort; + message->header.msgh_local_port = replyPort; + message->header.msgh_size = sizeof(*message); + message->header.msgh_id = 1; +} + +void +mach_server_contruct_payload(ipc_message_t *message, + mach_vm_address_t src, + mach_port_t port, + mach_vm_size_t size, + mach_vm_offset_t misoffset, + boolean_t copy, + int vm_op) +{ + if (port == MACH_PORT_NULL) { + message->address = src;//LD TODO: (src + 8193); + } else { + message->body.msgh_descriptor_count = 1; + message->port_descriptor.name = port; + message->port_descriptor.disposition = MACH_MSG_TYPE_COPY_SEND; + message->port_descriptor.type = MACH_MSG_PORT_DESCRIPTOR; + } + + message->pid = (uint64_t)getpid(); + message->size = size; + message->vm_op = vm_op; + message->copy = copy; + message->misoffset = misoffset; +} + +void +mach_server_create_allocation(mach_vm_address_t *src, mach_vm_size_t size, void *buffer) +{ + kern_return_t kr = KERN_SUCCESS; + mach_vm_size_t chunk_size = 0; + unsigned int chunk_count = 0; + mach_vm_address_t localsrc = 0; + + kr = mach_vm_allocate(mach_task_self(), &localsrc, size, VM_FLAGS_ANYWHERE); + assert(KERN_SUCCESS == kr); + + chunk_size = MIN(size, (mach_vm_size_t)sb.st_size); + + if (chunk_size == 0) { + printf("mach_server_remap: Input size is 0\n"); + exit(0); + } + + chunk_count = (unsigned int)(size / (mach_vm_size_t)sb.st_size); + + if (debug && 0) { + printf("Chunks of size: 0x%llx and count: %d\n", chunk_size, chunk_count); + } + + for (unsigned int i = 0; i < chunk_count; i++) { + memcpy((void*)(localsrc + (i * chunk_size)), buffer, chunk_size); + } + + *src = localsrc; +} + +void +server_error_out(mach_port_t port) +{ + /* All done here...*/ + kern_return_t ret; + + mach_msg_size_t messageSize = sizeof(ipc_message_t) + sizeof(mach_msg_trailer_t) + 64; + ipc_message_t *message = (ipc_message_t *)calloc(1, messageSize); + + message->header.msgh_bits = MACH_MSGH_BITS_ZERO; + message->header.msgh_size = messageSize; + message->header.msgh_remote_port = MACH_PORT_NULL; + message->header.msgh_local_port = port; + + mach_server_construct_header(message, port); + message->vm_op = VM_OP_EXIT_ERROR; + ret = mach_msg(&message->header, MACH_SEND_MSG, message->header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (ret != KERN_SUCCESS) { + T_LOG("ERROR: Failed to send message to client: (%d) %s\n", ret, mach_error_string(ret)); + exit(1); + } + T_LOG("server_error_out. abort()\n"); + abort(); +} diff --git a/tests/vm_memory_tests_src/mach_vm_tests.h b/tests/vm_memory_tests_src/mach_vm_tests.h new file mode 100644 index 000000000..49bc8af65 --- /dev/null +++ b/tests/vm_memory_tests_src/mach_vm_tests.h @@ -0,0 +1,63 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define VM_OP_NONE (0) +#define VM_OP_UNMAP (1) +#define VM_OP_EXIT (2) +#define VM_OP_COPY (3) +#define VM_OP_READ (4) +#define VM_OP_MEMENTRY (5) +#define VM_OP_REMAP (6) +#define VM_OP_READ_OVERWRITE (7) +#define VM_OP_WRITE (8) +#define VM_OP_EXIT_ERROR (9) + +extern mach_port_t serverPort; +extern mach_port_t persistentReplyPort; +extern boolean_t debug; + +struct ipc_message { + mach_msg_header_t header; + mach_msg_body_t body; + mach_msg_port_descriptor_t port_descriptor; + boolean_t copy; + int vm_op; + uint64_t address; + uint64_t pid; + uint64_t size; + uint64_t misoffset; + char value[64]; +}; +typedef struct ipc_message ipc_message_t; + +void mach_vm_client(mach_port_t); +void mach_server_remap(mach_port_t); +void mach_server_read(mach_port_t, int); +void mach_server_make_memory_entry(mach_port_t); + +int mach_server_data_setup(void **); +void mach_server_data_cleanup(void *, mach_vm_address_t, mach_vm_size_t); +void mach_server_construct_header(ipc_message_t *, mach_port_t); +void mach_server_create_allocation(mach_vm_address_t *, mach_vm_size_t, void *); +void mach_server_contruct_payload(ipc_message_t *, mach_vm_address_t, mach_port_t, mach_vm_size_t, mach_vm_offset_t, boolean_t, int); +void server_error_out(mach_port_t); + + +#define MACH_VM_TEST_SERVICE_NAME "com.apple.test.xnu.vm.machVMTest" +#define VM_SPAWN_TOOL "/AppleInternal/Tests/xnu/darwintests/tools/vm_spawn_tool" diff --git a/tests/vm_memory_tests_src/main.c b/tests/vm_memory_tests_src/main.c new file mode 100644 index 000000000..8e987d901 --- /dev/null +++ b/tests/vm_memory_tests_src/main.c @@ -0,0 +1,155 @@ +#include "mach_vm_tests.h" + +T_GLOBAL_META(T_META_NAMESPACE("xnu.vm")); + +extern char **environ; + +static void +spawn_process(char *action, char *serviceName, char *extraArg, + mach_port_t *server_Port, pid_t *serverPid, boolean_t use4k); + +static void mach_client(void); + +mach_port_t serverPort; +static pid_t serverPid; + +boolean_t debug = TRUE; + +void +spawn_process(char *action, char *serviceName, char *extraArg, + mach_port_t *server_Port, pid_t *server_Pid, boolean_t use4k) +{ + char buffer[PATH_MAX]; + char *argv[10] = {0}; + int arg_index = 0; + pid_t pid = -1; + int r = proc_pidpath(getpid(), buffer, sizeof(buffer)); + T_ASSERT_NE(r, -1, "proc_pidpath"); + r = (int)strlcat(buffer, "_server", sizeof(buffer)); + T_ASSERT_LT(r, (int)sizeof(buffer), "strlcat"); + + if (use4k) { + int supported = 0; + size_t supported_size = sizeof(supported); + + r = sysctlbyname("debug.vm_mixed_pagesize_supported", &supported, &supported_size, NULL, 0); + if (r == 0 && supported) { + T_LOG("Using %s to spawn process with 4k", VM_SPAWN_TOOL); + argv[arg_index++] = VM_SPAWN_TOOL; + } else { + /* + * We didnt find debug.vm.mixed_page.supported OR its set to 0. + * Skip the test. + */ + T_SKIP("Hardware doesn't support 4K pages, skipping test..."); + exit(0); + } + } + argv[arg_index++] = (char *)&buffer[0]; + argv[arg_index++] = (char *)action; + argv[arg_index++] = (char *)serviceName; + argv[arg_index++] = (char *)extraArg; + argv[arg_index++] = NULL; + + printf("posix_spawn with argv: "); + for (r = 0; r <= arg_index; r++) { + printf("%s ", argv[r]); + } + printf("\n"); + + T_LOG("Spawning %s process(%s) with service name %s at %s\n", action, buffer, serviceName, buffer); + + + posix_spawn_file_actions_t actions; + posix_spawn_file_actions_init(&actions); + + T_ASSERT_POSIX_ZERO(posix_spawn(&pid, buffer, &actions, NULL, argv, environ), "spawn %s", serviceName); + posix_spawn_file_actions_destroy(&actions); + + kern_return_t ret; + mach_port_t servicePort; + int attempts = 0; + const int kMaxAttempts = 10; + do { + sleep(1); + ret = bootstrap_look_up(bootstrap_port, serviceName, &servicePort); + attempts++; + } while (ret == BOOTSTRAP_UNKNOWN_SERVICE && attempts < kMaxAttempts); + + if (ret != KERN_SUCCESS) { + printf("ERROR: Failed bootstrap lookup for process with mach service name '%s': (%d) %s\n", serviceName, ret, mach_error_string(ret)); + if (pid > 0) { + kill(pid, SIGKILL); + } + T_FAIL("Failed bootstrap lookup for process with mach service"); + } + + *server_Port = servicePort; + *server_Pid = pid; + T_LOG("Server pid=%d port 0x%x", pid, servicePort); +} + + + + +void +mach_client() +{ + mach_port_t replyPort; + T_ASSERT_POSIX_ZERO(mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &replyPort), "create recieve port"); + T_ASSERT_POSIX_ZERO(mach_port_insert_right(mach_task_self(), replyPort, replyPort, MACH_MSG_TYPE_MAKE_SEND), "insert send port"); + + ipc_message_t message; + bzero(&message, sizeof(message)); + message.header.msgh_id = 1; + + message.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_COPY_SEND); + message.header.msgh_remote_port = serverPort; + message.header.msgh_local_port = replyPort; + message.header.msgh_size = sizeof(message); + + /* reply creation is not necessary in this case. + * mach_msg_size_t replySize = sizeof(ipc_message_t) + sizeof(mach_msg_trailer_t) + 64; + * ipc_message_t *reply = calloc(1, replySize); + */ + T_LOG("sending message to %d of size %u", message.header.msgh_remote_port, message.header.msgh_size); + kern_return_t ret = mach_msg(&message.header, MACH_SEND_MSG, message.header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + T_ASSERT_MACH_SUCCESS(ret, "mach_msg to serverProcess"); + mach_vm_client(replyPort); + T_LOG("Sending SIGKILL to server(%d)", serverPid); + kill(serverPid, SIGKILL); +} + +T_DECL(memory_share_tests, + "test vm memory sharing between client and server process with different process PAGE_SIZE", + T_META_ASROOT(true)) +{ + boolean_t use4k = FALSE; + char serviceName[64]; + + struct sigaction action = { + .sa_handler = SIG_IGN, + .sa_flags = SA_NOCLDWAIT + }; + sigaction(SIGCHLD, &action, NULL); + + if (getenv("USE4K")) { + use4k = TRUE; + } + + if (getenv("QUIET")) { + debug = FALSE; + } + + T_LOG("running with use4k=%d debug=%d", use4k, (int)debug); + + strcpy(serviceName, MACH_VM_TEST_SERVICE_NAME); + + spawn_process("machserver", serviceName, NULL, &serverPort, &serverPid, use4k); + mach_client(); +} + +T_DECL_REF(memory_share_tests_4k, memory_share_tests, "vm memory sharing with 4k processes", + T_META_ENVVAR("USE4K=YES"), + T_META_ASROOT(true) + ); diff --git a/tests/vm_memory_tests_src/server.c b/tests/vm_memory_tests_src/server.c new file mode 100644 index 000000000..703e0a735 --- /dev/null +++ b/tests/vm_memory_tests_src/server.c @@ -0,0 +1,67 @@ +#include "mach_vm_tests.h" +boolean_t debug = TRUE; + +int +main() +{ + dispatch_source_t parentSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, (uintptr_t)getppid(), DISPATCH_PROC_EXIT, NULL); + dispatch_source_set_event_handler(parentSource, ^{ + T_LOG("Event handler got invoked. Parent process died. Exiting"); + exit(1); + }); + dispatch_activate(parentSource); + + const char *serviceName = MACH_VM_TEST_SERVICE_NAME; + + kern_return_t ret; + mach_port_t bootstrap; + task_get_bootstrap_port(mach_task_self(), &bootstrap); + + mach_port_t port; + mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port); + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated" + ret = bootstrap_register2(bootstrap, (char *)serviceName, port, BOOTSTRAP_ALLOW_LOOKUP); +#pragma clang diagnostic pop + + mach_msg_size_t messageSize = sizeof(ipc_message_t) + sizeof(mach_msg_trailer_t) + 64; + ipc_message_t *message = (ipc_message_t *)calloc(1, messageSize); + + message->header.msgh_bits = MACH_MSGH_BITS_ZERO; + message->header.msgh_size = messageSize; + message->header.msgh_remote_port = MACH_PORT_NULL; + message->header.msgh_local_port = port; + + ret = mach_msg(&message->header, MACH_RCV_MSG, 0, messageSize, port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (ret == KERN_SUCCESS) { + if (MACH_MSGH_BITS_REMOTE(message->header.msgh_bits) == MACH_MSG_TYPE_PORT_SEND) { + persistentReplyPort = message->header.msgh_remote_port; + mach_port_mod_refs(mach_task_self(), persistentReplyPort, MACH_PORT_RIGHT_SEND, 1); + } + } + + mach_server_make_memory_entry(port); + mach_server_remap(port); + mach_server_read(port, VM_OP_READ); + //mach_server_read(port, VM_OP_WRITE); + mach_server_read(port, VM_OP_READ_OVERWRITE); + + + message->header.msgh_bits = MACH_MSGH_BITS_ZERO; + message->header.msgh_size = messageSize; + message->header.msgh_remote_port = MACH_PORT_NULL; + message->header.msgh_local_port = port; + + mach_server_construct_header(message, port); + message->vm_op = VM_OP_EXIT; + ret = mach_msg(&message->header, MACH_SEND_MSG, message->header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (ret != KERN_SUCCESS) { + T_LOG("ERROR: Failed to send message to client: (%d) %s\n", ret, mach_error_string(ret)); + return 1; + } + + (void)parentSource; + + return 0; +} diff --git a/tests/vm_memory_tests_src/vm_tests.c b/tests/vm_memory_tests_src/vm_tests.c new file mode 100644 index 000000000..47e23000b --- /dev/null +++ b/tests/vm_memory_tests_src/vm_tests.c @@ -0,0 +1,685 @@ +// +// vmremaptest.c +// +// Created by Lionel Desai on 9/16/19. +// Copyright © 2019 Apple. All rights reserved. +// + +#include "mach_vm_tests.h" +#include + + +#define TESTSZ (140 * 1024 * 1024ULL) + +void +mach_vm_client(mach_port_t port) +{ + mach_port_t memport = MACH_PORT_NULL; + mach_vm_address_t src = 0, dest = 0, tmp = 0; + mach_vm_size_t size = 0; + vm_prot_t cur_prot, max_prot; + mach_port_name_t lport = 0; + kern_return_t ret = 0; + boolean_t copy = FALSE; + mach_vm_offset_t misoffset = 0; + + mach_msg_type_number_t countp; + mach_msg_size_t messageSize = 0; + ipc_message_t *message = NULL; + + char buffer[PATH_MAX]; + ret = proc_pidpath(getpid(), buffer, sizeof(buffer)); + assert(ret != -1); + + messageSize = sizeof(ipc_message_t) + sizeof(mach_msg_trailer_t) + 64; + message = (ipc_message_t *)calloc(1, messageSize); + + message->header.msgh_bits = MACH_MSGH_BITS_ZERO; + message->header.msgh_size = messageSize; + message->header.msgh_remote_port = MACH_PORT_NULL; + message->header.msgh_local_port = port; + + while (1) { + /* Awaiting the pid/src. addr/size from the server so we know what to remap from where */ + ret = mach_msg(&message->header, MACH_RCV_MSG | MACH_RCV_LARGE, 0, messageSize, port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (ret == KERN_SUCCESS) { + if (debug) { + T_LOG("CLIENT: received info from server... 0x%llx, %lld, 0x%llx, %d - %d\n", message->address, message->size, message->misoffset, message->vm_op, message->copy); + } + + switch (message->vm_op) { + case VM_OP_REMAP: + ret = task_for_pid(mach_task_self(), (pid_t) message->pid, &lport); + T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "task_for_pid"); + + copy = message->copy; + size = message->size; + src = message->address; + misoffset = 0; + + ret = mach_vm_allocate(mach_task_self(), &tmp, size + 16384, VM_FLAGS_ANYWHERE); + T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "mach_vm_allocate"); + mach_vm_deallocate(mach_task_self(), tmp, size + 16384); + + dest = tmp + 4096; + + ret = mach_vm_remap(mach_task_self(), &dest, size, 0, VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, + lport, src, copy, + &cur_prot, + &max_prot, + VM_INHERIT_NONE); + + if (ret) { + char dstval[64]; + memcpy(dstval, (void*) dest, 64); + T_LOG("CLIENT: mach_vm_remap FAILED: %s -- src 0x%llx, dest 0x%llx (%s)\n", mach_error_string(ret), src, dest, dstval); + T_FAIL("CLIENT: mach_vm_remap FAILED"); + } + + memcpy(message->value, (void*)dest, 64); + break; + + case VM_OP_READ_OVERWRITE: + ret = task_for_pid(mach_task_self(), (pid_t) message->pid, &lport); + T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "task_for_pid"); + + size = message->size; + src = message->address; + misoffset = 0; + + mach_vm_size_t dest_size = 0; + ret = mach_vm_allocate(mach_task_self(), &tmp, size + 16384, VM_FLAGS_ANYWHERE); + assert(KERN_SUCCESS == ret); + + dest = tmp + 4096; + + ret = mach_vm_read_overwrite(lport, src, size, dest, &dest_size); + + if (ret) { + char dstval[64]; + memcpy(dstval, (void*) dest, 64); + T_LOG("CLIENT: mach_vm_read_overwrite FAILED: %s -- src 0x%llx, dest 0x%llx (%s)\n", mach_error_string(ret), src, dest, dstval); + T_FAIL("CLIENT: mach_vm_read_overwrite FAILED"); + } + + memcpy(message->value, (void*)dest, 64); + break; + + case VM_OP_READ: + ret = task_for_pid(mach_task_self(), (pid_t) message->pid, &lport); + T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "task_for_pid"); + + size = message->size; + src = message->address; + misoffset = 0; + + ret = mach_vm_read(lport, src, size, (vm_offset_t*)&dest, &countp); + if (ret) { + char dstval[64]; + memcpy(dstval, (void*) dest, 64); + T_LOG("CLIENT: mach_vm_read FAILED: %s -- src 0x%llx, dest 0x%llx (%s)\n", mach_error_string(ret), src, dest, dstval); + T_FAIL("CLIENT: mach_vm_read FAILED"); + exit(1); + } + + memcpy(message->value, (void*)dest, 64); + break; + +#if 0 + case VM_OP_WRITE: + ret = task_for_pid(mach_task_self(), (pid_t) message->pid, &lport); + T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "task_for_pid"); + + size = message->size; + src = message->address; + misoffset = 0; + + ret = mach_vm_write(lport, src, dest, countp); + if (ret) { + char dstval[64]; + memcpy(dstval, (void*) dest, 64); + T_LOG("CLIENT: mach_vm_write FAILED: %s -- src 0x%llx, dest 0x%llx (%s)\n", mach_error_string(ret), src, dest, dstval); + T_FAIL("CLIENT: mach_vm_write FAILED"); + } + + memcpy(message->value, (void*)dest, 64); + break; +#endif + case VM_OP_MEMENTRY: + assert(message->body.msgh_descriptor_count == 1); + dest = 0; + size = message->size; + memport = message->port_descriptor.name; + copy = message->copy; + if (copy) { + misoffset = 0; + } else { + misoffset = message->misoffset; + } + + cur_prot = max_prot = VM_PROT_READ; +#if 0 + /* This + VM_FLAGS_FIXED in mach_vm_map() will return KERN_INVALID_ARG expectedly */ + ret = mach_vm_allocate(mach_task_self(), &tmp, size + 16384, VM_FLAGS_ANYWHERE); + dest = tmp + 4095; + mach_vm_deallocate(mach_task_self(), tmp, size + 16384); +#endif + ret = mach_vm_map(mach_task_self(), &dest, size, 0 /*mask*/, + VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, + memport, 0 /*offset*/, copy, cur_prot, max_prot, VM_INHERIT_NONE); + + if (ret) { + T_LOG("CLIENT: mach_vm_map FAILED: %s -- port 0x%x\n", mach_error_string(ret), memport); + T_FAIL("CLIENT: mach_vm_map FAILED"); + } + + memcpy(message->value, (void*)(dest + misoffset), 64); + break; + + case VM_OP_UNMAP: + assert(dest); + ret = mach_vm_deallocate(mach_task_self(), dest, size); + if (ret) { + T_LOG("CLIENT: mach_vm_deallocate FAILED: %s -- dest 0x%llx, size 0x%llx\n", mach_error_string(ret), dest, size); + T_FAIL("CLIENT: mach_vm_deallocate FAILED"); + } + /* No message to send here */ + continue; + + case VM_OP_NONE: + memcpy(message->value, (void*) (dest + misoffset), 64); + break; + + case VM_OP_EXIT: + if (debug) { + T_LOG("CLIENT EXITING ****** \n"); + } + return; + + case VM_OP_EXIT_ERROR: + if (debug) { + T_LOG("CLIENT EXITING WITH ERROR****** \n"); + T_FAIL("Revieved VM_OP_EXIT_ERROR"); + } + return; + default: + break; + } + + char dstval[64]; + memcpy(dstval, (void*) message->value, 64); + dstval[63] = '\0'; + + if (debug) { + T_LOG("CLIENT: dest 0x%llx -> 0x%llx (0x%llx), *dest %s\n", dest, dest + misoffset, misoffset, dstval); + /*memcpy(dstval, (void*) (dest + misoffset), 64); + * dstval[63]='\0'; + * T_LOG("*dest %s\n", dstval);*/ + } + + message->header.msgh_local_port = MACH_PORT_NULL; + + ret = mach_msg(&message->header, MACH_SEND_MSG, message->header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "CLIENT: mach_msg_send FAILED"); + } else { + T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "CLIENT: mach_msg_rcv FAILED"); + } + } +} + +void +mach_server_make_memory_entry(mach_port_t replyPort) +{ + mach_vm_address_t src = 0, lsrc = 0; + mach_vm_size_t size = TESTSZ; + memory_object_size_t memsz = 0; + kern_return_t kr; + boolean_t modified_in_server = FALSE, perm_changed = FALSE; + ipc_message_t message; + ipc_message_t *reply = NULL; + char src_val[64], dst_val[64]; + mach_msg_size_t replySize = 0; + void *buffer = NULL; + int flags = 0; + mach_port_t memport = 0; + int mementry_pass_idx = 0; + mach_vm_offset_t misoffset = 0; + + if (debug) { + T_LOG("\n*************** make_memory_entry_test START ***************\n"); + } + + if (mach_server_data_setup(&buffer) != 0) { + server_error_out(replyPort); + } + + if (buffer == NULL) { + mach_server_data_cleanup(NULL, 0, 0); + exit(0); + } + + replySize = sizeof(ipc_message_t) + sizeof(mach_msg_trailer_t) + 64; + reply = calloc(1, replySize); + +test_different_mementry_mode: + /* create message to send over rights/address/pid/size */ + mach_server_construct_header(&message, replyPort); + + /* allocation that we plan to remap in the client */ + mach_server_create_allocation(&src, size, buffer); + + memsz = 8191; + lsrc = src + 94095; + int pgmask = (getpagesize() - 1); + misoffset = 94095 - (94095 & ~pgmask); + + if (mementry_pass_idx < 2) { + if (mementry_pass_idx == 0) { + flags = VM_PROT_DEFAULT | MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR; + T_LOG("mach_make_memory_entry VM_COPY | USE_DATA_ADDR test..."); + } else { + flags = VM_PROT_READ | MAP_MEM_VM_SHARE; + T_LOG("mach_make_memory_entry VM_SHARE test..."); + } + kr = mach_vm_protect(mach_task_self(), (mach_vm_address_t) lsrc, (mach_vm_size_t)getpagesize(), FALSE, VM_PROT_READ); + assert(kr == KERN_SUCCESS); + perm_changed = TRUE; + } else { + flags = VM_PROT_DEFAULT; + perm_changed = FALSE; + T_LOG("mach_make_memory_entry DEFAULT test..."); + } + + kr = mach_make_memory_entry_64(mach_task_self(), &memsz, lsrc, flags, &memport, MACH_PORT_NULL); + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: mach_make_memory_entry_64 try (%d) failed in Client: (%d) %s\n", + mementry_pass_idx + 1, kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + mach_server_contruct_payload(&message, lsrc, memport, memsz, misoffset, ((flags & MAP_MEM_VM_COPY) == MAP_MEM_VM_COPY) /*copy*/, VM_OP_MEMENTRY); + + memcpy(src_val, (void*) lsrc, 64); + src_val[63] = '\0'; + +check_again: + /* Sending over pid/src address/size */ + kr = mach_msg(&message.header, MACH_SEND_MSG, message.header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: Failed to send message to client: (%d) %s\n", kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + /* Ack from client that it worked */ + + bzero(reply, replySize); + + kr = mach_msg(&reply->header, MACH_RCV_MSG, 0, replySize, replyPort, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: Failed to get reply from client: (%d) %s\n", kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + memcpy(dst_val, &reply->value, 64); + dst_val[63] = '\0'; + + if (modified_in_server == FALSE) { + if (strncmp(src_val, dst_val, 64)) { + T_LOG("FAILED\n"); + T_LOG("(%d) Pre modification mach_make_memory_entry() FAILED: copy(%d) src_val: %s dest_val: %s\n", mementry_pass_idx + 1, message.copy, src_val, dst_val); + server_error_out(replyPort); + } + } else { + if (message.copy == TRUE) { + if (strncmp(src_val, dst_val, 64) == 0) { + T_LOG("FAILED\n"); + T_LOG("(%d) Data mismatch with Copy: %d src_val: %s dest_val: %s\n", + mementry_pass_idx + 1, message.copy, src_val, dst_val); + server_error_out(replyPort); + } + } else { + if (strncmp(src_val, dst_val, 64)) { + T_LOG("FAILED\n"); + T_LOG("(%d) Data mismatch with Copy: %d src_val: %s dest_val: %s\n", + mementry_pass_idx + 1, message.copy, src_val, dst_val); + server_error_out(replyPort); + } + } + } + + if (modified_in_server == FALSE) { + /* Now we change our data that has been mapped elsewhere */ + if (perm_changed) { + kr = mach_vm_protect(mach_task_self(), (mach_vm_address_t) lsrc, (mach_vm_size_t)getpagesize(), FALSE, VM_PROT_READ | VM_PROT_WRITE); + assert(kr == KERN_SUCCESS); + } + + memcpy((void*) lsrc, "THIS IS DIFFERENT -- BUT WE DON'T know if that's expecTED", 64); + + if (perm_changed) { + kr = mach_vm_protect(mach_task_self(), (mach_vm_address_t) lsrc, (mach_vm_size_t)getpagesize(), FALSE, VM_PROT_READ); + assert(kr == KERN_SUCCESS); + } + + memcpy(src_val, (void*) lsrc, 64); + src_val[63] = '\0'; + modified_in_server = TRUE; + message.vm_op = VM_OP_NONE; + + /* Check to see if the data in the other process is as expected */ + goto check_again; + } + + if (mementry_pass_idx < 2) { + /* Next remap mode...so ask the other process to unmap the older mapping. */ + message.vm_op = VM_OP_UNMAP; + kr = mach_msg(&message.header, MACH_SEND_MSG, message.header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: Failed to send message to client: (%d) %s\n", kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + mach_port_deallocate(mach_task_self(), memport); + memport = MACH_PORT_NULL; + mach_vm_deallocate(mach_task_self(), src, size); + + T_LOG("PASSED\n"); + + mementry_pass_idx++; + modified_in_server = FALSE; + + goto test_different_mementry_mode; + } + + T_LOG("PASSED\n"); + + /* Unmap old mapping in the other process. */ + message.vm_op = VM_OP_UNMAP; + kr = mach_msg(&message.header, MACH_SEND_MSG, message.header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: Failed to send message to client: (%d) %s\n", kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + free(reply); + reply = NULL; + + mach_port_deallocate(mach_task_self(), memport); + memport = MACH_PORT_NULL; + + mach_server_data_cleanup(buffer, src, size); + buffer = NULL; + + if (debug) { + T_LOG("*************** mach_make_memory_entry_test END ***************\n"); + } +} + +void +mach_server_read(mach_port_t replyPort, int op) +{ + mach_vm_address_t src; + mach_vm_size_t size = TESTSZ; + kern_return_t kr; + boolean_t modified_in_server = FALSE; + ipc_message_t message; + char src_val[64], dst_val[64]; + mach_msg_size_t replySize = 0; + ipc_message_t *reply = NULL; + void *buffer = NULL; + + if (debug) { + T_LOG("\n*************** vm_read / write / overwrite_test START ***************\n"); + } + + { + char opname[16]; + if (op == VM_OP_READ) { + strlcpy(opname, "read", 5); + } + if (op == VM_OP_WRITE) { + strlcpy(opname, "write", 6); + } + if (op == VM_OP_READ_OVERWRITE) { + strlcpy(opname, "read_overwrite", 15); + } + + T_LOG("vm_%s test...", opname); + } + + if (mach_server_data_setup(&buffer) != 0) { + server_error_out(replyPort); + } + + if (buffer == NULL) { + mach_server_data_cleanup(NULL, 0, 0); + exit(0); + } + + replySize = sizeof(ipc_message_t) + sizeof(mach_msg_trailer_t) + 64; + reply = calloc(1, replySize); + + /* create message to send over rights/address/pid/size */ + mach_server_construct_header(&message, replyPort); + + /* allocation that we plan to remap in the client */ + mach_server_create_allocation(&src, size, buffer); + + mach_server_contruct_payload(&message, src, MACH_PORT_NULL /* port */, size, 0, TRUE /*copy*/, op); + if (debug) { + T_LOG("server COPY: Sending 0x%llx, %d, 0x%llx\n", message.address, getpid(), message.size); + } + memcpy(src_val, (void*)message.address, 64); + +check_again: + /* Sending over pid/src address/size */ + kr = mach_msg(&message.header, MACH_SEND_MSG, message.header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: Failed to send message to client: (%d) %s\n", kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + /* Ack from client that it worked */ + + bzero(reply, replySize); + + kr = mach_msg(&reply->header, MACH_RCV_MSG, 0, replySize, replyPort, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: Failed to get reply from client: (%d) %s\n", kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + memcpy(dst_val, &reply->value, 64); + + if (modified_in_server == FALSE) { + if (strncmp(src_val, dst_val, 64)) { + T_LOG("Pre modification (op: %d) FAILED: src_val: %s dest_val: %s\n", op, src_val, dst_val); + server_error_out(replyPort); + } + } else { + if (strncmp(src_val, dst_val, 64) == 0) { + T_LOG("Data mismatch (op:%d) with Copy: %d src_val: %s dest_val: %s\n", op, message.copy, src_val, dst_val); + server_error_out(replyPort); + } + } + + if (modified_in_server == FALSE) { + /* Now we change our data that has been mapped elsewhere */ + memcpy((void*)message.address, "THIS IS DIFFERENT -- BUT WE DON'T know if that's expecTED", 64); + memcpy(src_val, (void*)message.address, 64); + modified_in_server = TRUE; + message.vm_op = VM_OP_NONE; + + /* Check to see if the data in the other process is as expected */ + goto check_again; + } + + /* Unmap old mapping in the other process. */ + message.vm_op = VM_OP_UNMAP; + kr = mach_msg(&message.header, MACH_SEND_MSG, message.header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: Failed to send message to client: (%d) %s\n", kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + free(reply); + reply = NULL; + + mach_server_data_cleanup(buffer, src, size); + buffer = NULL; + + if (debug) { + T_LOG("*************** vm_read_test END ***************\n"); + } + + T_LOG("PASSED\n"); +} + +void +mach_server_remap(mach_port_t replyPort) +{ + mach_vm_address_t src = 0, lsrc = 0; + mach_vm_size_t size = TESTSZ; + kern_return_t kr; + int remap_copy_pass_idx = 0; + boolean_t modified_in_server = FALSE; + void *buffer; + ipc_message_t message; + char src_val[64], dst_val[64]; + mach_msg_size_t replySize = 0; + ipc_message_t *reply = NULL; + + if (debug) { + T_LOG("\n*************** vm_remap_test START ***************\n"); + } + + if (mach_server_data_setup(&buffer) != 0) { + server_error_out(replyPort); + } + + if (buffer == NULL) { + mach_server_data_cleanup(NULL, 0, 0); + exit(0); + } + + replySize = sizeof(ipc_message_t) + sizeof(mach_msg_trailer_t) + 64; + reply = calloc(1, replySize); + +remap_again: + + T_LOG("vm_remap (copy = %s) test...", ((remap_copy_pass_idx == 0) ? "FALSE" : "TRUE")); + + /* create message to send over rights/address/pid/size */ + mach_server_construct_header(&message, replyPort); + + /* server allocation that we plan to remap in the client */ + mach_server_create_allocation(&src, size, buffer); + + lsrc = src + 8193; + + mach_server_contruct_payload(&message, lsrc, MACH_PORT_NULL /* port */, size - 9000, 0, remap_copy_pass_idx /*copy*/, VM_OP_REMAP); + if (debug) { + T_LOG("server COPY: Sending 0x%llx, %d, 0x%llx\n", message.address, getpid(), message.size); + } + + memcpy(src_val, (void*)lsrc, 64); + src_val[63] = '\0'; + +check_again: + /* Sending over pid/src address/size */ + kr = mach_msg(&message.header, MACH_SEND_MSG, message.header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: Failed to send message to client: (%d) %s\n", kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + /* Ack from client that it worked */ + + bzero(reply, replySize); + + kr = mach_msg(&reply->header, MACH_RCV_MSG, 0, replySize, replyPort, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: Failed to get reply from client: (%d) %s\n", kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + memcpy(dst_val, &reply->value, 64); + dst_val[63] = '\0'; + + + if (modified_in_server == FALSE) { + if (strncmp(src_val, dst_val, 64)) { + T_LOG("Pre modification remap() FAILED: copy(%d) src_val: %s dest_val: %s\n", + message.copy, src_val, dst_val); + server_error_out(replyPort); + } + } else { + if (message.copy == TRUE) { + if (strcmp(src_val, dst_val) == 0) { + T_LOG("Data mismatch with Copy: %d src_val: %s dest_val: %s\n", + message.copy, src_val, dst_val); + server_error_out(replyPort); + } + } else { + if (strcmp(src_val, dst_val)) { + T_LOG("Data mismatch with Copy: %d src_val: %s dest_val: %s\n", + message.copy, src_val, dst_val); + server_error_out(replyPort); + } + } + } + + if (modified_in_server == FALSE) { + /* Now we change our data that has been mapped elsewhere */ + memcpy((void*)message.address, "THIS IS DIFFERENT -- BUT WE DON'T know if that's expecTED", 64); + memcpy(src_val, (void*)message.address, 64); + src_val[63] = '\0'; + + modified_in_server = TRUE; + message.vm_op = VM_OP_NONE; + + /* Check to see if the data in the other process is as expected */ + goto check_again; + } + + if (remap_copy_pass_idx == 0) { + /* Next remap mode...so ask the other process to unmap the older mapping. */ + message.vm_op = VM_OP_UNMAP; + kr = mach_msg(&message.header, MACH_SEND_MSG, message.header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: Failed to send message to client: (%d) %s\n", kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + mach_vm_deallocate(mach_task_self(), src, size); + + T_LOG("PASSED\n"); + + remap_copy_pass_idx++; + modified_in_server = FALSE; + + /* Next remap pass to test (copy == TRUE). Send data out again to the other process to remap. */ + goto remap_again; + } + + T_LOG("PASSED\n"); + + /* Unmap old mapping in the other process. */ + message.vm_op = VM_OP_UNMAP; + kr = mach_msg(&message.header, MACH_SEND_MSG, message.header.msgh_size, 0, MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if (kr != KERN_SUCCESS) { + T_LOG("ERROR: Failed to send message to client: (%d) %s\n", kr, mach_error_string(kr)); + server_error_out(replyPort); + } + + free(reply); + reply = NULL; + + mach_server_data_cleanup(buffer, src, size); + buffer = NULL; + + if (debug) { + T_LOG("*************** vm_remap_test END ***************\n"); + } +} diff --git a/tests/vm_set_max_addr_test.c b/tests/vm_set_max_addr_test.c index e1c06259f..2e802e429 100644 --- a/tests/vm_set_max_addr_test.c +++ b/tests/vm_set_max_addr_test.c @@ -17,7 +17,7 @@ T_DECL(set_max_addr, T_META_NAMESPACE("xnu.vm"), T_META_CHECK_LEAKS(false)) { -#if (defined(__arm64__) && defined(__LP64__)) +#if (!defined(TARGET_OS_MAC) && defined(__arm64__) && defined(__LP64__)) int result = 0; int code = 0; int child_pid = 0; diff --git a/tests/vm_spawn_tool.c b/tests/vm_spawn_tool.c new file mode 100644 index 000000000..595592f88 --- /dev/null +++ b/tests/vm_spawn_tool.c @@ -0,0 +1,62 @@ +#include +#include +#include +#include +#include + +extern char **environ; + +#ifndef _POSIX_SPAWN_FORCE_4K_PAGES +#define _POSIX_SPAWN_FORCE_4K_PAGES 0x1000 +#endif /* _POSIX_SPAWN_FORCE_4K_PAGES */ + +int +main(int argc, char *argv[]) +{ + if (argc < 2) { + fprintf(stderr, "Usage: %s [ ..]\n", argv[0]); + return 1; + } + + char * prog_path = argv[1]; + if (0 != access(prog_path, X_OK)) { + fprintf(stderr, "%s is not an executable\n", prog_path); + return 1; + } + + pid_t newpid = 0; + posix_spawn_file_actions_t fileactions; + posix_spawnattr_t spawnattrs; + if (posix_spawnattr_init(&spawnattrs)) { + perror("posix_spawnattr_init"); + return 1; + } + if (posix_spawn_file_actions_init(&fileactions)) { + perror("posix_spawn_file_actions_init"); + return 1; + } + short sp_flags = POSIX_SPAWN_SETEXEC; + + /* Need to set special flags */ + int supported = 0; + size_t supported_size = sizeof(supported); + + int r = sysctlbyname("debug.vm_mixed_pagesize_supported", &supported, &supported_size, NULL, 0); + if (r == 0 && supported) { + sp_flags |= _POSIX_SPAWN_FORCE_4K_PAGES; + } else { + /* + * We didnt find debug.vm.mixed_page.supported OR its set to 0. + * Skip the test. + */ + printf("Hardware doesn't support 4K pages, skipping test..."); + return 0; + } + + posix_spawnattr_setflags(&spawnattrs, sp_flags); + posix_spawn(&newpid, prog_path, &fileactions, &spawnattrs, &argv[1], environ); + + /* Should not have reached here */ + fprintf(stderr, "should not have reached here"); + return 1; +} diff --git a/tests/vm_test_code_signing_helper.c b/tests/vm_test_code_signing_helper.c new file mode 100644 index 000000000..0c429d725 --- /dev/null +++ b/tests/vm_test_code_signing_helper.c @@ -0,0 +1,152 @@ +#include +#include +#include +#include +#include + +#if __has_include() +#include +#endif + +#include +#include + +char *cmdname; + +int +main( + int argc, + char *argv[]) +{ + uint32_t page_size; + void *page; + int ch; + int opt_interactive; + + cmdname = argv[0]; + + opt_interactive = 0; + while ((ch = getopt(argc, argv, "i")) != -1) { + switch (ch) { + case 'i': + opt_interactive = 1; + break; + case '?': + default: + fprintf(stdout, + "Usage: %s [-i]\n" + "\t-i: interactive\n", + cmdname); + exit(1); + } + } + + page_size = getpagesize(); + page = mmap(NULL, page_size, PROT_READ | PROT_EXEC, MAP_ANON | MAP_SHARED, -1, 0); + if (!page) { + fprintf(stderr, "%s:%d mmap() error %d (%s)\n", + cmdname, __LINE__, + errno, strerror(errno)); + exit(1); + } + if (opt_interactive) { + fprintf(stdout, "allocated page at %p\n", + page); + } + + if (mprotect(page, page_size, PROT_READ | PROT_WRITE) != 0) { + fprintf(stderr, "%s:%d mprotect(RW) error %d (%s)\n", + cmdname, __LINE__, + errno, strerror(errno)); + exit(1); + } + +#if __arm64__ + // arm64 chdir() syscall + char chdir_code[] = { + 0x90, 0x01, 0x80, 0xd2, // movz x16, #0xc + 0x01, 0x10, 0x00, 0xd4, // svc #0x80 + 0xc0, 0x03, 0x5f, 0xd6, // ret + }; +#elif __arm__ + // armv7 chdir() syscall + char chdir_code[] = { + 0x0c, 0xc0, 0xa0, 0xe3, // mov r12 #0xc + 0x80, 0x00, 0x00, 0xef, // svc #0x80 + 0x1e, 0xff, 0x2f, 0xe1, // bx lr + }; +#elif __x86_64__ + // x86_64 chdir() syscall + char chdir_code[] = { + 0xb8, 0x0c, 0x00, 0x00, 0x02, // movl $0x200000c, %eax + 0x49, 0x89, 0xca, // movq %rcx, %r10 + 0x0f, 0x05, // syscall + 0xc3, // retq + }; +#elif __i386__ + // i386 chdir() syscall + char chdir_code[] = { + 0x90, // nop + 0xc3, // retq + }; +#endif + memcpy(page, chdir_code, sizeof chdir_code); + + if (opt_interactive) { + fprintf(stdout, + "changed page protection to r/w and copied code at %p\n", + page); + fprintf(stdout, "pausing...\n"); + fflush(stdout); + getchar(); + } + + if (mprotect(page, page_size, PROT_READ | PROT_EXEC) != 0) { + fprintf(stderr, "%s:%d mprotect(RX) error %d (%s)\n", + cmdname, __LINE__, + errno, strerror(errno)); + exit(1); + } + + if (opt_interactive) { + fprintf(stdout, + "changed page protection to r/x at %p\n", + page); + fprintf(stdout, "pausing...\n"); + fflush(stdout); + getchar(); + } + + char origdir[PATH_MAX]; + getcwd(origdir, sizeof(origdir) - 1); + + chdir("/"); + if (opt_interactive) { + fprintf(stdout, "cwd before = %s\n", getwd(NULL)); + } + + void (*mychdir)(char *) = page; +#if __has_feature(ptrauth_calls) + mychdir = ptrauth_sign_unauthenticated(mychdir, ptrauth_key_function_pointer, 0); +#endif + mychdir(getenv("HOME")); + if (opt_interactive) { + fprintf(stdout, "cwd after = %s\n", getwd(NULL)); + fprintf(stdout, "pausing...\n"); + fflush(stdout); + getchar(); + } + + fprintf(stdout, "%s: WARNING: unsigned code was executed\n", + cmdname); + +#if CONFIG_EMBEDDED + /* fail: unsigned code was executed */ + fprintf(stdout, "%s: FAIL\n", cmdname); + exit(1); +#else /* CONFIG_EMBEDDED */ + /* no fail: unsigned code is only prohibited on embedded platforms */ + fprintf(stdout, "%s: SUCCESS\n", cmdname); + exit(0); +#endif /* CONFIG_EMBEDDED */ +} diff --git a/tests/vm_test_mach_map.c b/tests/vm_test_mach_map.c new file mode 100644 index 000000000..2ab86744f --- /dev/null +++ b/tests/vm_test_mach_map.c @@ -0,0 +1,1083 @@ +/* Mach vm map miscellaneous unit tests + * + * This test program serves to be a regression test suite for legacy + * vm issues, ideally each test will be linked to a radar number and + * perform a set of certain validations. + * + */ +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +T_GLOBAL_META(T_META_NAMESPACE("xnu.vm"), + T_META_RUN_CONCURRENTLY(true)); + +static void +test_memory_entry_tagging(int override_tag) +{ + int pass; + int do_copy; + kern_return_t kr; + mach_vm_address_t vmaddr_orig, vmaddr_shared, vmaddr_copied; + mach_vm_size_t vmsize_orig, vmsize_shared, vmsize_copied; + mach_vm_address_t *vmaddr_ptr; + mach_vm_size_t *vmsize_ptr; + mach_vm_address_t vmaddr_chunk; + mach_vm_size_t vmsize_chunk; + mach_vm_offset_t vmoff; + mach_port_t mem_entry_copied, mem_entry_shared; + mach_port_t *mem_entry_ptr; + int i; + vm_region_submap_short_info_data_64_t ri; + mach_msg_type_number_t ri_count; + unsigned int depth; + int vm_flags; + int expected_tag; + + vmaddr_copied = 0; + vmaddr_shared = 0; + vmsize_copied = 0; + vmsize_shared = 0; + vmaddr_chunk = 0; + vmsize_chunk = 16 * 1024; + vmaddr_orig = 0; + vmsize_orig = 3 * vmsize_chunk; + mem_entry_copied = MACH_PORT_NULL; + mem_entry_shared = MACH_PORT_NULL; + pass = 0; + + vmaddr_orig = 0; + kr = mach_vm_allocate(mach_task_self(), + &vmaddr_orig, + vmsize_orig, + VM_FLAGS_ANYWHERE); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)", + override_tag, vmsize_orig); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + for (i = 0; i < vmsize_orig / vmsize_chunk; i++) { + vmaddr_chunk = vmaddr_orig + (i * vmsize_chunk); + kr = mach_vm_allocate(mach_task_self(), + &vmaddr_chunk, + vmsize_chunk, + (VM_FLAGS_FIXED | + VM_FLAGS_OVERWRITE | + VM_MAKE_TAG(100 + i))); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)", + override_tag, vmsize_chunk); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + } + + for (vmoff = 0; + vmoff < vmsize_orig; + vmoff += PAGE_SIZE) { + *((unsigned char *)(uintptr_t)(vmaddr_orig + vmoff)) = 'x'; + } + + do_copy = time(NULL) & 1; +again: + *((unsigned char *)(uintptr_t)vmaddr_orig) = 'x'; + if (do_copy) { + mem_entry_ptr = &mem_entry_copied; + vmsize_copied = vmsize_orig; + vmsize_ptr = &vmsize_copied; + vmaddr_copied = 0; + vmaddr_ptr = &vmaddr_copied; + vm_flags = MAP_MEM_VM_COPY; + } else { + mem_entry_ptr = &mem_entry_shared; + vmsize_shared = vmsize_orig; + vmsize_ptr = &vmsize_shared; + vmaddr_shared = 0; + vmaddr_ptr = &vmaddr_shared; + vm_flags = MAP_MEM_VM_SHARE; + } + kr = mach_make_memory_entry_64(mach_task_self(), + vmsize_ptr, + vmaddr_orig, /* offset */ + (vm_flags | + VM_PROT_READ | VM_PROT_WRITE), + mem_entry_ptr, + MACH_PORT_NULL); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_make_memory_entry()", + override_tag, do_copy); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + T_QUIET; + T_EXPECT_EQ(*vmsize_ptr, vmsize_orig, "[override_tag:%d][do_copy:%d] vmsize (0x%llx) != vmsize_orig (0x%llx)", + override_tag, do_copy, (uint64_t) *vmsize_ptr, (uint64_t) vmsize_orig); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + T_QUIET; + T_EXPECT_NOTNULL(*mem_entry_ptr, "[override_tag:%d][do_copy:%d] mem_entry == 0x%x", + override_tag, do_copy, *mem_entry_ptr); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + *vmaddr_ptr = 0; + if (override_tag) { + vm_flags = VM_MAKE_TAG(200); + } else { + vm_flags = 0; + } + kr = mach_vm_map(mach_task_self(), + vmaddr_ptr, + vmsize_orig, + 0, /* mask */ + vm_flags | VM_FLAGS_ANYWHERE, + *mem_entry_ptr, + 0, /* offset */ + FALSE, /* copy */ + VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_READ | VM_PROT_WRITE, + VM_INHERIT_DEFAULT); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_map()", + override_tag, do_copy); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + *((unsigned char *)(uintptr_t)vmaddr_orig) = 'X'; + if (*(unsigned char *)(uintptr_t)*vmaddr_ptr == 'X') { + T_QUIET; + T_EXPECT_EQ(do_copy, 0, "[override_tag:%d][do_copy:%d] memory shared instead of copied", + override_tag, do_copy); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + } else { + T_QUIET; + T_EXPECT_NE(do_copy, 0, "[override_tag:%d][do_copy:%d] memory copied instead of shared", + override_tag, do_copy); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + } + + for (i = 0; i < vmsize_orig / vmsize_chunk; i++) { + mach_vm_address_t vmaddr_info; + mach_vm_size_t vmsize_info; + + vmaddr_info = *vmaddr_ptr + (i * vmsize_chunk); + vmsize_info = 0; + depth = 1; + ri_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; + kr = mach_vm_region_recurse(mach_task_self(), + &vmaddr_info, + &vmsize_info, + &depth, + (vm_region_recurse_info_t) &ri, + &ri_count); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx)", + override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + T_QUIET; + T_EXPECT_EQ(vmaddr_info, *vmaddr_ptr + (i * vmsize_chunk), "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned addr 0x%llx", + override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmaddr_info); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + T_QUIET; + T_EXPECT_EQ(vmsize_info, vmsize_chunk, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned size 0x%llx expected 0x%llx", + override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmsize_info, vmsize_chunk); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + if (override_tag) { + expected_tag = 200; + } else { + expected_tag = 100 + i; + } + T_QUIET; + T_EXPECT_EQ(ri.user_tag, expected_tag, "[override_tag:%d][do_copy:%d] i=%d tag=%d expected %d", + override_tag, do_copy, i, ri.user_tag, expected_tag); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + } + + if (++pass < 2) { + do_copy = !do_copy; + goto again; + } + +done: + if (vmaddr_orig != 0) { + mach_vm_deallocate(mach_task_self(), + vmaddr_orig, + vmsize_orig); + vmaddr_orig = 0; + vmsize_orig = 0; + } + if (vmaddr_copied != 0) { + mach_vm_deallocate(mach_task_self(), + vmaddr_copied, + vmsize_copied); + vmaddr_copied = 0; + vmsize_copied = 0; + } + if (vmaddr_shared != 0) { + mach_vm_deallocate(mach_task_self(), + vmaddr_shared, + vmsize_shared); + vmaddr_shared = 0; + vmsize_shared = 0; + } + if (mem_entry_copied != MACH_PORT_NULL) { + mach_port_deallocate(mach_task_self(), mem_entry_copied); + mem_entry_copied = MACH_PORT_NULL; + } + if (mem_entry_shared != MACH_PORT_NULL) { + mach_port_deallocate(mach_task_self(), mem_entry_shared); + mem_entry_shared = MACH_PORT_NULL; + } + + return; +} + +static void +test_map_memory_entry(void) +{ + kern_return_t kr; + mach_vm_address_t vmaddr1, vmaddr2; + mach_vm_size_t vmsize1, vmsize2; + mach_port_t mem_entry; + unsigned char *cp1, *cp2; + + vmaddr1 = 0; + vmsize1 = 0; + vmaddr2 = 0; + vmsize2 = 0; + mem_entry = MACH_PORT_NULL; + + vmsize1 = 1; + vmaddr1 = 0; + kr = mach_vm_allocate(mach_task_self(), + &vmaddr1, + vmsize1, + VM_FLAGS_ANYWHERE); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(%lld)", vmsize1); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + cp1 = (unsigned char *)(uintptr_t)vmaddr1; + *cp1 = '1'; + + vmsize2 = 1; + mem_entry = MACH_PORT_NULL; + kr = mach_make_memory_entry_64(mach_task_self(), + &vmsize2, + vmaddr1, /* offset */ + (MAP_MEM_VM_COPY | + VM_PROT_READ | VM_PROT_WRITE), + &mem_entry, + MACH_PORT_NULL); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry()"); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + T_QUIET; + T_EXPECT_GE(vmsize2, vmsize1, "vmsize2 (0x%llx) < vmsize1 (0x%llx)", + (uint64_t) vmsize2, (uint64_t) vmsize1); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + T_QUIET; + T_EXPECT_NOTNULL(mem_entry, "mem_entry == 0x%x", mem_entry); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + vmaddr2 = 0; + kr = mach_vm_map(mach_task_self(), + &vmaddr2, + vmsize2, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + mem_entry, + 0, /* offset */ + TRUE, /* copy */ + VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_READ | VM_PROT_WRITE, + VM_INHERIT_DEFAULT); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "mach_vm_map()"); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + cp2 = (unsigned char *)(uintptr_t)vmaddr2; + T_QUIET; + T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '1')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x", + *cp1, *cp2, '1', '1'); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + *cp2 = '2'; + T_QUIET; + T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '2')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x", + *cp1, *cp2, '1', '2'); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + +done: + if (vmaddr1 != 0) { + mach_vm_deallocate(mach_task_self(), vmaddr1, vmsize1); + vmaddr1 = 0; + vmsize1 = 0; + } + if (vmaddr2 != 0) { + mach_vm_deallocate(mach_task_self(), vmaddr2, vmsize2); + vmaddr2 = 0; + vmsize2 = 0; + } + if (mem_entry != MACH_PORT_NULL) { + mach_port_deallocate(mach_task_self(), mem_entry); + mem_entry = MACH_PORT_NULL; + } + + return; +} + +T_DECL(memory_entry_tagging, "test mem entry tag for rdar://problem/23334087 \ + VM memory tags should be propagated through memory entries", + T_META_ALL_VALID_ARCHS(true)) +{ + test_memory_entry_tagging(0); + test_memory_entry_tagging(1); +} + +T_DECL(map_memory_entry, "test mapping mem entry for rdar://problem/22611816 \ + mach_make_memory_entry(MAP_MEM_VM_COPY) should never use a KERNEL_BUFFER \ + copy", T_META_ALL_VALID_ARCHS(true)) +{ + test_map_memory_entry(); +} + +static char *vm_purgable_state[4] = { "NONVOLATILE", "VOLATILE", "EMPTY", "DENY" }; + +static uint64_t +task_footprint(void) +{ + task_vm_info_data_t ti; + kern_return_t kr; + mach_msg_type_number_t count; + + count = TASK_VM_INFO_COUNT; + kr = task_info(mach_task_self(), + TASK_VM_INFO, + (task_info_t) &ti, + &count); + T_QUIET; + T_ASSERT_MACH_SUCCESS(kr, "task_info()"); +#if defined(__arm64__) || defined(__arm__) + T_QUIET; + T_ASSERT_EQ(count, TASK_VM_INFO_COUNT, "task_info() count = %d (expected %d)", + count, TASK_VM_INFO_COUNT); +#endif /* defined(__arm64__) || defined(__arm__) */ + return ti.phys_footprint; +} + +T_DECL(purgeable_empty_to_volatile, "test task physical footprint when \ + emptying, volatilizing purgeable vm") +{ + kern_return_t kr; + mach_vm_address_t vm_addr; + mach_vm_size_t vm_size; + char *cp; + int ret; + vm_purgable_t state; + uint64_t footprint[8]; + + vm_addr = 0; + vm_size = 1 * 1024 * 1024; + T_LOG("--> allocate %llu bytes", vm_size); + kr = mach_vm_allocate(mach_task_self(), + &vm_addr, + vm_size, + VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE); + T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); + + /* footprint0 */ + footprint[0] = task_footprint(); + T_LOG(" footprint[0] = %llu", footprint[0]); + + T_LOG("--> access %llu bytes", vm_size); + for (cp = (char *) vm_addr; + cp < (char *) (vm_addr + vm_size); + cp += vm_kernel_page_size) { + *cp = 'x'; + } + /* footprint1 == footprint0 + vm_size */ + footprint[1] = task_footprint(); + T_LOG(" footprint[1] = %llu", footprint[1]); + if (footprint[1] != footprint[0] + vm_size) { + T_LOG("WARN: footprint[1] != footprint[0] + vm_size"); + } + + T_LOG("--> wire %llu bytes", vm_size / 2); + ret = mlock((char *)vm_addr, (size_t) (vm_size / 2)); + T_ASSERT_POSIX_SUCCESS(ret, "mlock()"); + + /* footprint2 == footprint1 */ + footprint[2] = task_footprint(); + T_LOG(" footprint[2] = %llu", footprint[2]); + if (footprint[2] != footprint[1]) { + T_LOG("WARN: footprint[2] != footprint[1]"); + } + + T_LOG("--> VOLATILE"); + state = VM_PURGABLE_VOLATILE; + kr = mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state); + T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)"); + T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, "NONVOLATILE->VOLATILE: state was %s", + vm_purgable_state[state]); + /* footprint3 == footprint2 - (vm_size / 2) */ + footprint[3] = task_footprint(); + T_LOG(" footprint[3] = %llu", footprint[3]); + if (footprint[3] != footprint[2] - (vm_size / 2)) { + T_LOG("WARN: footprint[3] != footprint[2] - (vm_size / 2)"); + } + + T_LOG("--> EMPTY"); + state = VM_PURGABLE_EMPTY; + kr = mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state); + T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(EMPTY)"); + if (state != VM_PURGABLE_VOLATILE && + state != VM_PURGABLE_EMPTY) { + T_ASSERT_FAIL("VOLATILE->EMPTY: state was %s", + vm_purgable_state[state]); + } + /* footprint4 == footprint3 */ + footprint[4] = task_footprint(); + T_LOG(" footprint[4] = %llu", footprint[4]); + if (footprint[4] != footprint[3]) { + T_LOG("WARN: footprint[4] != footprint[3]"); + } + + T_LOG("--> unwire %llu bytes", vm_size / 2); + ret = munlock((char *)vm_addr, (size_t) (vm_size / 2)); + T_ASSERT_POSIX_SUCCESS(ret, "munlock()"); + + /* footprint5 == footprint4 - (vm_size/2) (unless memory pressure) */ + /* footprint5 == footprint0 */ + footprint[5] = task_footprint(); + T_LOG(" footprint[5] = %llu", footprint[5]); + if (footprint[5] != footprint[4] - (vm_size / 2)) { + T_LOG("WARN: footprint[5] != footprint[4] - (vm_size/2)"); + } + if (footprint[5] != footprint[0]) { + T_LOG("WARN: footprint[5] != footprint[0]"); + } + + T_LOG("--> VOLATILE"); + state = VM_PURGABLE_VOLATILE; + kr = mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state); + T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)"); + T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->VOLATILE: state == %s", + vm_purgable_state[state]); + /* footprint6 == footprint5 */ + /* footprint6 == footprint0 */ + footprint[6] = task_footprint(); + T_LOG(" footprint[6] = %llu", footprint[6]); + if (footprint[6] != footprint[5]) { + T_LOG("WARN: footprint[6] != footprint[5]"); + } + if (footprint[6] != footprint[0]) { + T_LOG("WARN: footprint[6] != footprint[0]"); + } + + T_LOG("--> NONVOLATILE"); + state = VM_PURGABLE_NONVOLATILE; + kr = mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state); + T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(NONVOLATILE)"); + T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->NONVOLATILE: state == %s", + vm_purgable_state[state]); + /* footprint7 == footprint6 */ + /* footprint7 == footprint0 */ + footprint[7] = task_footprint(); + T_LOG(" footprint[7] = %llu", footprint[7]); + if (footprint[7] != footprint[6]) { + T_LOG("WARN: footprint[7] != footprint[6]"); + } + if (footprint[7] != footprint[0]) { + T_LOG("WARN: footprint[7] != footprint[0]"); + } +} + +T_DECL(madvise_shared, "test madvise shared for rdar://problem/2295713 logging \ + rethink needs madvise(MADV_FREE_HARDER)", + T_META_ALL_VALID_ARCHS(true)) +{ + vm_address_t vmaddr = 0, vmaddr2 = 0; + vm_size_t vmsize; + kern_return_t kr; + char *cp; + vm_prot_t curprot, maxprot; + int ret; + task_vm_info_data_t ti; + mach_msg_type_number_t ti_count; + + vmsize = 10 * 1024 * 1024; /* 10MB */ + kr = vm_allocate(mach_task_self(), + &vmaddr, + vmsize, + VM_FLAGS_ANYWHERE); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()"); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + for (cp = (char *)(uintptr_t)vmaddr; + cp < (char *)(uintptr_t)(vmaddr + vmsize); + cp++) { + *cp = 'x'; + } + + kr = vm_remap(mach_task_self(), + &vmaddr2, + vmsize, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + mach_task_self(), + vmaddr, + FALSE, /* copy */ + &curprot, + &maxprot, + VM_INHERIT_DEFAULT); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "vm_remap()"); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + for (cp = (char *)(uintptr_t)vmaddr2; + cp < (char *)(uintptr_t)(vmaddr2 + vmsize); + cp++) { + T_QUIET; + T_EXPECT_EQ(*cp, 'x', "vmaddr=%p vmaddr2=%p %p:0x%x", + (void *)(uintptr_t)vmaddr, + (void *)(uintptr_t)vmaddr2, + (void *)cp, + (unsigned char)*cp); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + } + cp = (char *)(uintptr_t)vmaddr; + *cp = 'X'; + cp = (char *)(uintptr_t)vmaddr2; + T_QUIET; + T_EXPECT_EQ(*cp, 'X', "memory was not properly shared"); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + +#if defined(__x86_64__) || defined(__i386__) + if (*((uint64_t *)_COMM_PAGE_CPU_CAPABILITIES64) & kIsTranslated) { + T_LOG("Skipping madvise reusable tests because we're running under translation."); + goto done; + } +#endif /* defined(__x86_64__) || defined(__i386__) */ + ret = madvise((char *)(uintptr_t)vmaddr, + vmsize, + MADV_FREE_REUSABLE); + T_QUIET; + T_EXPECT_POSIX_SUCCESS(ret, "madvise()"); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + ti_count = TASK_VM_INFO_COUNT; + kr = task_info(mach_task_self(), + TASK_VM_INFO, + (task_info_t) &ti, + &ti_count); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "task_info()"); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + T_QUIET; + T_EXPECT_EQ(ti.reusable, 2ULL * vmsize, "ti.reusable=%lld expected %lld", + ti.reusable, (uint64_t)(2 * vmsize)); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + +done: + if (vmaddr != 0) { + vm_deallocate(mach_task_self(), vmaddr, vmsize); + vmaddr = 0; + } + if (vmaddr2 != 0) { + vm_deallocate(mach_task_self(), vmaddr2, vmsize); + vmaddr2 = 0; + } +} + +T_DECL(madvise_purgeable_can_reuse, "test madvise purgeable can reuse for \ + rdar://problem/37476183 Preview Footprint memory regressions ~100MB \ + [ purgeable_malloc became eligible for reuse ]", + T_META_ALL_VALID_ARCHS(true)) +{ +#if defined(__x86_64__) || defined(__i386__) + if (*((uint64_t *)_COMM_PAGE_CPU_CAPABILITIES64) & kIsTranslated) { + T_SKIP("madvise reusable is not supported under Rosetta translation. Skipping.)"); + } +#endif /* defined(__x86_64__) || defined(__i386__) */ + vm_address_t vmaddr = 0; + vm_size_t vmsize; + kern_return_t kr; + char *cp; + int ret; + + vmsize = 10 * 1024 * 1024; /* 10MB */ + kr = vm_allocate(mach_task_self(), + &vmaddr, + vmsize, + (VM_FLAGS_ANYWHERE | + VM_FLAGS_PURGABLE | + VM_MAKE_TAG(VM_MEMORY_MALLOC))); + T_QUIET; + T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()"); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + + for (cp = (char *)(uintptr_t)vmaddr; + cp < (char *)(uintptr_t)(vmaddr + vmsize); + cp++) { + *cp = 'x'; + } + + ret = madvise((char *)(uintptr_t)vmaddr, + vmsize, + MADV_CAN_REUSE); + T_QUIET; + T_EXPECT_TRUE(((ret == -1) && (errno == EINVAL)), "madvise(): purgeable vm can't be adviced to reuse"); + if (T_RESULT == T_RESULT_FAIL) { + goto done; + } + +done: + if (vmaddr != 0) { + vm_deallocate(mach_task_self(), vmaddr, vmsize); + vmaddr = 0; + } +} + +#define DEST_PATTERN 0xFEDCBA98 + +T_DECL(map_read_overwrite, "test overwriting vm map from other map - \ + rdar://31075370", + T_META_ALL_VALID_ARCHS(true)) +{ + kern_return_t kr; + mach_vm_address_t vmaddr1, vmaddr2; + mach_vm_size_t vmsize1, vmsize2; + int *ip; + int i; + + vmaddr1 = 0; + vmsize1 = 4 * 4096; + kr = mach_vm_allocate(mach_task_self(), + &vmaddr1, + vmsize1, + VM_FLAGS_ANYWHERE); + T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); + + ip = (int *)(uintptr_t)vmaddr1; + for (i = 0; i < vmsize1 / sizeof(*ip); i++) { + ip[i] = i; + } + + vmaddr2 = 0; + kr = mach_vm_allocate(mach_task_self(), + &vmaddr2, + vmsize1, + VM_FLAGS_ANYWHERE); + T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); + + ip = (int *)(uintptr_t)vmaddr2; + for (i = 0; i < vmsize1 / sizeof(*ip); i++) { + ip[i] = DEST_PATTERN; + } + + vmsize2 = vmsize1 - 2 * (sizeof(*ip)); + kr = mach_vm_read_overwrite(mach_task_self(), + vmaddr1 + sizeof(*ip), + vmsize2, + vmaddr2 + sizeof(*ip), + &vmsize2); + T_ASSERT_MACH_SUCCESS(kr, "vm_read_overwrite()"); + + ip = (int *)(uintptr_t)vmaddr2; + for (i = 0; i < 1; i++) { + T_QUIET; + T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x", + i, ip[i], DEST_PATTERN); + } + for (; i < (vmsize1 - 2) / sizeof(*ip); i++) { + T_QUIET; + T_ASSERT_EQ(ip[i], i, "vmaddr2[%d] = 0x%x instead of 0x%x", + i, ip[i], i); + } + for (; i < vmsize1 / sizeof(*ip); i++) { + T_QUIET; + T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x", + i, ip[i], DEST_PATTERN); + } +} + +T_DECL(copy_none_use_pmap, "test copy-on-write remapping of COPY_NONE vm \ + objects - rdar://35610377", + T_META_ALL_VALID_ARCHS(true)) +{ + kern_return_t kr; + mach_vm_address_t vmaddr1, vmaddr2, vmaddr3; + mach_vm_size_t vmsize; + vm_prot_t curprot, maxprot; + + vmsize = 32 * 1024 * 1024; + + vmaddr1 = 0; + kr = mach_vm_allocate(mach_task_self(), + &vmaddr1, + vmsize, + VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE); + T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); + + memset((void *)(uintptr_t)vmaddr1, 'x', vmsize); + + vmaddr2 = 0; + kr = mach_vm_remap(mach_task_self(), + &vmaddr2, + vmsize, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + mach_task_self(), + vmaddr1, + TRUE, /* copy */ + &curprot, + &maxprot, + VM_INHERIT_DEFAULT); + T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #1"); + + vmaddr3 = 0; + kr = mach_vm_remap(mach_task_self(), + &vmaddr3, + vmsize, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + mach_task_self(), + vmaddr2, + TRUE, /* copy */ + &curprot, + &maxprot, + VM_INHERIT_DEFAULT); + T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #2"); +} + +T_DECL(purgable_deny, "test purgeable memory is not allowed to be converted to \ + non-purgeable - rdar://31990033", + T_META_ALL_VALID_ARCHS(true)) +{ + kern_return_t kr; + vm_address_t vmaddr; + vm_purgable_t state; + + vmaddr = 0; + kr = vm_allocate(mach_task_self(), &vmaddr, 1, + VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE); + T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); + + state = VM_PURGABLE_DENY; + kr = vm_purgable_control(mach_task_self(), vmaddr, + VM_PURGABLE_SET_STATE, &state); + T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, + "vm_purgable_control(VM_PURGABLE_DENY) -> 0x%x (%s)", + kr, mach_error_string(kr)); + + kr = vm_deallocate(mach_task_self(), vmaddr, 1); + T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate()"); +} + +#define VMSIZE 0x10000 + +T_DECL(vm_remap_zero, "test vm map of zero size - rdar://33114981", + T_META_ALL_VALID_ARCHS(true)) +{ + kern_return_t kr; + mach_vm_address_t vmaddr1, vmaddr2; + mach_vm_size_t vmsize; + vm_prot_t curprot, maxprot; + + vmaddr1 = 0; + vmsize = VMSIZE; + kr = mach_vm_allocate(mach_task_self(), + &vmaddr1, + vmsize, + VM_FLAGS_ANYWHERE); + T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()"); + + vmaddr2 = 0; + vmsize = 0; + kr = mach_vm_remap(mach_task_self(), + &vmaddr2, + vmsize, + 0, + VM_FLAGS_ANYWHERE, + mach_task_self(), + vmaddr1, + FALSE, + &curprot, + &maxprot, + VM_INHERIT_DEFAULT); + T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)", + vmsize, kr, mach_error_string(kr)); + + vmaddr2 = 0; + vmsize = (mach_vm_size_t)-2; + kr = mach_vm_remap(mach_task_self(), + &vmaddr2, + vmsize, + 0, + VM_FLAGS_ANYWHERE, + mach_task_self(), + vmaddr1, + FALSE, + &curprot, + &maxprot, + VM_INHERIT_DEFAULT); + T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)", + vmsize, kr, mach_error_string(kr)); +} + +extern int __shared_region_check_np(uint64_t *); + +T_DECL(nested_pmap_trigger, "nested pmap should only be triggered from kernel \ + - rdar://problem/41481703", + T_META_ALL_VALID_ARCHS(true)) +{ + int ret; + kern_return_t kr; + mach_vm_address_t sr_start; + mach_vm_size_t vmsize; + mach_vm_address_t vmaddr; + mach_port_t mem_entry; + + ret = __shared_region_check_np(&sr_start); + if (ret != 0) { + int saved_errno; + saved_errno = errno; + + T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)", + saved_errno, strerror(saved_errno)); + T_END; + } + + vmsize = PAGE_SIZE; + kr = mach_make_memory_entry_64(mach_task_self(), + &vmsize, + sr_start, + MAP_MEM_VM_SHARE | VM_PROT_READ, + &mem_entry, + MACH_PORT_NULL); + T_ASSERT_MACH_SUCCESS(kr, "make_memory_entry(0x%llx)", sr_start); + + vmaddr = 0; + kr = mach_vm_map(mach_task_self(), + &vmaddr, + vmsize, + 0, + VM_FLAGS_ANYWHERE, + mem_entry, + 0, + FALSE, + VM_PROT_READ, + VM_PROT_READ, + VM_INHERIT_DEFAULT); + T_ASSERT_MACH_SUCCESS(kr, "vm_map()"); +} + +T_DECL(copyoverwrite_submap_protection, "test copywrite vm region submap \ + protection", T_META_ALL_VALID_ARCHS(true)) +{ + kern_return_t kr; + mach_vm_address_t vmaddr; + mach_vm_size_t vmsize; + natural_t depth; + vm_region_submap_short_info_data_64_t region_info; + mach_msg_type_number_t region_info_count; + + for (vmaddr = SHARED_REGION_BASE; + vmaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE; + vmaddr += vmsize) { + depth = 99; + region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; + kr = mach_vm_region_recurse(mach_task_self(), + &vmaddr, + &vmsize, + &depth, + (vm_region_info_t) ®ion_info, + ®ion_info_count); + if (kr == KERN_INVALID_ADDRESS) { + break; + } + T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse(0x%llx)", vmaddr); + T_ASSERT_EQ(region_info_count, + VM_REGION_SUBMAP_SHORT_INFO_COUNT_64, + "vm_region_recurse(0x%llx) count = %d expected %d", + vmaddr, region_info_count, + VM_REGION_SUBMAP_SHORT_INFO_COUNT_64); + + T_LOG("--> region: vmaddr 0x%llx depth %d prot 0x%x/0x%x", + vmaddr, depth, region_info.protection, + region_info.max_protection); + if (depth == 0) { + /* not a submap mapping: next mapping */ + continue; + } + if (vmaddr >= SHARED_REGION_BASE + SHARED_REGION_SIZE) { + break; + } + kr = mach_vm_copy(mach_task_self(), + vmaddr, + vmsize, + vmaddr); + if (kr == KERN_PROTECTION_FAILURE) { + T_PASS("vm_copy(0x%llx,0x%llx) expected prot error 0x%x (%s)", + vmaddr, vmsize, kr, mach_error_string(kr)); + continue; + } + T_ASSERT_MACH_SUCCESS(kr, "vm_copy(0x%llx,0x%llx) prot 0x%x", + vmaddr, vmsize, region_info.protection); + depth = 0; + region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; + kr = mach_vm_region_recurse(mach_task_self(), + &vmaddr, + &vmsize, + &depth, + (vm_region_info_t) ®ion_info, + ®ion_info_count); + T_ASSERT_MACH_SUCCESS(kr, "m_region_recurse(0x%llx)", vmaddr); + T_ASSERT_EQ(region_info_count, + VM_REGION_SUBMAP_SHORT_INFO_COUNT_64, + "vm_region_recurse() count = %d expected %d", + region_info_count, VM_REGION_SUBMAP_SHORT_INFO_COUNT_64); + + T_ASSERT_EQ(depth, 0, "vm_region_recurse(0x%llx): depth = %d expected 0", + vmaddr, depth); + T_ASSERT_EQ((region_info.protection & VM_PROT_EXECUTE), + 0, "vm_region_recurse(0x%llx): prot 0x%x", + vmaddr, region_info.protection); + } +} + +T_DECL(wire_text, "test wired text for rdar://problem/16783546 Wiring code in \ + the shared region triggers code-signing violations", + T_META_ALL_VALID_ARCHS(true)) +{ + char *addr; + int retval; + int saved_errno; + kern_return_t kr; + vm_address_t map_addr, remap_addr; + vm_prot_t curprot, maxprot; + + addr = (char *)&printf; +#if __has_feature(ptrauth_calls) + map_addr = (vm_address_t)(uintptr_t)ptrauth_strip(addr, ptrauth_key_function_pointer); +#else /* __has_feature(ptrauth_calls) */ + map_addr = (vm_address_t)(uintptr_t)addr; +#endif /* __has_feature(ptrauth_calls) */ + remap_addr = 0; + kr = vm_remap(mach_task_self(), &remap_addr, 4096, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + mach_task_self(), map_addr, + FALSE, /* copy */ + &curprot, &maxprot, + VM_INHERIT_DEFAULT); + T_ASSERT_EQ(kr, KERN_SUCCESS, "vm_remap error 0x%x (%s)", + kr, mach_error_string(kr)); + retval = mlock(addr, 4096); + if (retval != 0) { + saved_errno = errno; + T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d", + saved_errno, strerror(saved_errno), EACCES); + } else { + T_PASS("wire shared text"); + } + + addr = (char *) &fprintf; + retval = mlock(addr, 4096); + if (retval != 0) { + saved_errno = errno; + T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d", + saved_errno, strerror(saved_errno), EACCES); + } else { + T_PASS("wire shared text"); + } + + addr = (char *) &testmain_wire_text; + retval = mlock(addr, 4096); + if (retval != 0) { + saved_errno = errno; + T_ASSERT_EQ(saved_errno, EACCES, "wire text error return error %d (%s)", + saved_errno, strerror(saved_errno)); + } else { + T_PASS("wire text"); + } +} diff --git a/tests/voucher_traps.c b/tests/voucher_traps.c index 6731d3bb1..837255f3a 100644 --- a/tests/voucher_traps.c +++ b/tests/voucher_traps.c @@ -23,18 +23,31 @@ T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); static mach_port_t -get_atm_voucher(void) +get_user_data_port(mach_msg_type_number_t *size) { - mach_voucher_attr_recipe_data_t r = { - .key = MACH_VOUCHER_ATTR_KEY_ATM, - .command = MACH_VOUCHER_ATTR_ATM_CREATE +#define DATA "Hello World!" + struct { + mach_voucher_attr_recipe_data_t recipe; + char data[sizeof(DATA)]; + } buf = { + .recipe = { + .key = MACH_VOUCHER_ATTR_KEY_USER_DATA, + .command = MACH_VOUCHER_ATTR_USER_DATA_STORE, + .content_size = sizeof(DATA), + }, + .data = DATA, }; + mach_port_t port = MACH_PORT_NULL; kern_return_t kr = host_create_mach_voucher(mach_host_self(), - (mach_voucher_attr_raw_recipe_array_t)&r, - sizeof(r), &port); - T_ASSERT_MACH_SUCCESS(kr, "Create ATM voucher: 0x%x", (unsigned int)port); + (mach_voucher_attr_raw_recipe_array_t)&buf, + sizeof(buf), &port); + T_ASSERT_MACH_SUCCESS(kr, "Create USER_DATA voucher: 0x%x", + (unsigned int)port); + if (size) { + *size = sizeof(buf); + } return port; } @@ -45,6 +58,7 @@ T_DECL(voucher_extract_attr_recipe, "voucher_extract_attr_recipe") mach_vm_size_t alloc_sz; mach_port_t port; mach_vm_address_t alloc_addr; + mach_msg_type_number_t expected_size; /* map at least a page of memory at some arbitrary location */ alloc_sz = (mach_vm_size_t)round_page(MACH_VOUCHER_TRAP_STACK_LIMIT + 1); @@ -84,15 +98,16 @@ T_DECL(voucher_extract_attr_recipe, "voucher_extract_attr_recipe") void *recipe = malloc(size); memset(recipe, 0x41, size); - port = get_atm_voucher(); + port = get_user_data_port(&expected_size); /* - * This should try to extract the ATM attribute using a buffer on the + * This should try to extract the USER_DATA attribute using a buffer on the * kernel heap (probably zone memory). */ - kr = mach_voucher_extract_attr_recipe_trap(port, MACH_VOUCHER_ATTR_KEY_ATM, - recipe, recipe_size); + kr = mach_voucher_extract_attr_recipe_trap(port, + MACH_VOUCHER_ATTR_KEY_USER_DATA, recipe, recipe_size); T_ASSERT_MACH_SUCCESS(kr, "Extract attribute data with recipe: heap"); + T_ASSERT_EQ(*recipe_size, expected_size, "size should match"); /* reset the recipe memory */ memset(recipe, 0x41, size); @@ -100,12 +115,13 @@ T_DECL(voucher_extract_attr_recipe, "voucher_extract_attr_recipe") *recipe_size = MACH_VOUCHER_TRAP_STACK_LIMIT - 1; /* - * This should try to extract the ATM attribute using a buffer on the + * This should try to extract the USER_DATA attribute using a buffer on the * kernel stack. */ - kr = mach_voucher_extract_attr_recipe_trap(port, MACH_VOUCHER_ATTR_KEY_ATM, - recipe, recipe_size); + kr = mach_voucher_extract_attr_recipe_trap(port, + MACH_VOUCHER_ATTR_KEY_USER_DATA, recipe, recipe_size); T_ASSERT_MACH_SUCCESS(kr, "Extract attribute data with recipe: stack"); + T_ASSERT_EQ(*recipe_size, expected_size, "size should match"); /* cleanup */ diff --git a/tests/vsock.c b/tests/vsock.c new file mode 100644 index 000000000..8593386a7 --- /dev/null +++ b/tests/vsock.c @@ -0,0 +1,838 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#define COUNT_ELEMS(array) (sizeof (array) / sizeof (array[0])) + +T_GLOBAL_META( + T_META_RUN_CONCURRENTLY(true), + T_META_NAMESPACE("xnu.vsock") + ); + +static int +vsock_new_socket(void) +{ + int sock = socket(AF_VSOCK, SOCK_STREAM, 0); + if (sock < 0 && errno == ENODEV) { + T_SKIP("no vsock transport available"); + } + T_ASSERT_GT(sock, 0, "create new vsock socket"); + return sock; +} + +static uint32_t +vsock_get_local_cid(int socket) +{ + uint32_t cid = 0; + int result = ioctl(socket, IOCTL_VM_SOCKETS_GET_LOCAL_CID, &cid); + T_ASSERT_POSIX_SUCCESS(result, "vsock ioctl cid successful"); + T_ASSERT_GT(cid, VMADDR_CID_HOST, "cid is set"); + T_ASSERT_NE(cid, VMADDR_CID_ANY, "cid is valid"); + + return cid; +} + +static int +vsock_bind(uint32_t cid, uint32_t port, struct sockaddr_vm * addr, int *socket) +{ + *socket = vsock_new_socket(); + + bzero(addr, sizeof(*addr)); + addr->svm_port = port; + addr->svm_cid = cid; + + return bind(*socket, (struct sockaddr *) addr, sizeof(*addr)); +} + +static int +vsock_listen(uint32_t cid, uint32_t port, struct sockaddr_vm * addr, int backlog, int *socket) +{ + int result = vsock_bind(cid, port, addr, socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind"); + return listen(*socket, backlog); +} + +static int +vsock_connect(uint32_t cid, uint32_t port, int *socket) +{ + *socket = vsock_new_socket(); + struct sockaddr_vm addr = (struct sockaddr_vm) { + .svm_cid = cid, + .svm_port = port, + }; + return connect(*socket, (struct sockaddr *)&addr, sizeof(addr)); +} + +static struct sockaddr_vm +vsock_getsockname(int socket) +{ + struct sockaddr_vm addr; + socklen_t length = sizeof(addr); + int result = getsockname(socket, (struct sockaddr *)&addr, &length); + T_ASSERT_POSIX_SUCCESS(result, "vsock getsockname"); + T_ASSERT_EQ_INT((int) sizeof(addr), length, "correct address length"); + T_ASSERT_GT(addr.svm_port, 0, "bound to non-zero local port"); + return addr; +} + +static void +vsock_close(int socket) +{ + int result = close(socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock close"); +} + +static void +vsock_connect_peers(uint32_t cid, uint32_t port, int backlog, int *socketA, int *socketB) +{ + // Listen. + struct sockaddr_vm addr; + int listen_socket; + int result = vsock_listen(cid, port, &addr, backlog, &listen_socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock listen"); + + const uint32_t connection_cid = vsock_get_local_cid(listen_socket); + + // Connect. + int connect_socket; + result = vsock_connect(connection_cid, addr.svm_port, &connect_socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock connect"); + + // Accept. + struct sockaddr_vm accepted_addr; + socklen_t addrlen = sizeof(accepted_addr); + int accepted_socket = accept(listen_socket, (struct sockaddr *)&accepted_addr, &addrlen); + T_ASSERT_GT(accepted_socket, 0, "accepted socket"); + T_ASSERT_EQ_INT((int) sizeof(accepted_addr), addrlen, "correct address length"); + T_ASSERT_EQ_INT(connection_cid, accepted_addr.svm_cid, "same cid"); + T_ASSERT_NE_INT(VMADDR_CID_ANY, accepted_addr.svm_port, "some valid port"); + T_ASSERT_NE_INT(0, accepted_addr.svm_port, "some non-zero port"); + + *socketA = connect_socket; + *socketB = accepted_socket; +} + +static void +vsock_send(int socket, char *msg) +{ + T_ASSERT_NOTNULL(msg, "send message is not null"); + ssize_t sent_bytes = send(socket, msg, strlen(msg), 0); + T_ASSERT_EQ_LONG(strlen(msg), (unsigned long)sent_bytes, "sent all bytes"); +} + +static void +vsock_disable_sigpipe(int socket) +{ + int on = 1; + int result = setsockopt(socket, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on)); + T_ASSERT_POSIX_SUCCESS(result, "vsock disable SIGPIPE"); +} + +static bool +vsock_address_exists(struct xvsockpgen *buffer, struct sockaddr_vm addr) +{ + struct xvsockpgen *xvg = buffer; + struct xvsockpgen *oxvg = buffer; + + bool found = false; + for (xvg = (struct xvsockpgen *)((char *)xvg + xvg->xvg_len); + xvg->xvg_len > sizeof(struct xvsockpgen); + xvg = (struct xvsockpgen *)((char *)xvg + xvg->xvg_len)) { + struct xvsockpcb *xpcb = (struct xvsockpcb *)xvg; + + /* Ignore PCBs which were freed during copyout. */ + if (xpcb->xvp_gencnt > oxvg->xvg_gen) { + continue; + } + + if (xpcb->xvp_local_cid == addr.svm_cid && xpcb->xvp_remote_cid == VMADDR_CID_ANY && + xpcb->xvp_local_port == addr.svm_port && xpcb->xvp_remote_port == VMADDR_PORT_ANY) { + found = true; + break; + } + } + + T_ASSERT_NE(xvg, oxvg, "first and last xvsockpgen were returned"); + + return found; +} + +/* New Socket */ + +T_DECL(new_socket_getsockname, "vsock new - getsockname") +{ + int socket = vsock_new_socket(); + + struct sockaddr_vm addr; + socklen_t length = sizeof(struct sockaddr_vm); + int result = getsockname(socket, (struct sockaddr *)&addr, &length); + T_ASSERT_POSIX_SUCCESS(result, "vsock getsockname"); + T_ASSERT_EQ_INT(addr.svm_port, VMADDR_PORT_ANY, "name is any port"); + T_ASSERT_EQ_INT(addr.svm_cid, VMADDR_CID_ANY, "name is any cid"); +} + +T_DECL(new_socket_getpeername, "vsock new - getpeername") +{ + int socket = vsock_new_socket(); + + struct sockaddr_vm addr; + socklen_t length = sizeof(struct sockaddr_vm); + int result = getpeername(socket, (struct sockaddr *)&addr, &length); + T_ASSERT_POSIX_FAILURE(result, ENOTCONN, "vsock getpeername"); +} + +/* Ioctl */ + +T_DECL(ioctl_cid, "vsock ioctl cid") +{ + int socket = vsock_new_socket(); + vsock_get_local_cid(socket); +} + +/* Socketpair */ + +T_DECL(socketpair, "vsock socketpair") +{ + int pair[2]; + int error = socketpair(AF_VSOCK, SOCK_STREAM, 0, pair); + if (error < 0 && errno == ENODEV) { + T_SKIP("no vsock transport available"); + } + T_ASSERT_POSIX_FAILURE(error, EOPNOTSUPP, "vsock socketpair not supported"); +} + +/* Bind */ + +T_DECL(bind, "vsock bind to specific port") +{ + int socket; + struct sockaddr_vm addr; + int result = vsock_bind(VMADDR_CID_ANY, 8888, &addr, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind to specific port"); +} + +T_DECL(bind_any, "vsock bind to any port") +{ + int socket; + struct sockaddr_vm addr; + int result = vsock_bind(VMADDR_CID_ANY, VMADDR_PORT_ANY, &addr, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind to any port"); +} + +T_DECL(bind_getsockname, "vsock bind - getsockname") +{ + int socket; + struct sockaddr_vm addr; + const uint32_t port = VMADDR_PORT_ANY; + const uint32_t cid = VMADDR_CID_ANY; + int result = vsock_bind(cid, port, &addr, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind to any port"); + + struct sockaddr_vm bound_addr = vsock_getsockname(socket); + T_ASSERT_NE_INT(bound_addr.svm_port, port, "bound to unique local port"); + T_ASSERT_EQ_INT(bound_addr.svm_cid, cid, "bound to any cid"); +} + +T_DECL(bind_hypervisor, "vsock do not bind to hypervisor cid") +{ + int socket; + struct sockaddr_vm addr; + int result = vsock_bind(VMADDR_CID_HYPERVISOR, VMADDR_PORT_ANY, &addr, &socket); + T_ASSERT_POSIX_FAILURE(result, EADDRNOTAVAIL, "vsock do not bind to hypervisor cid"); +} + +T_DECL(bind_reserved, "vsock do not bind to reserved cid") +{ + int socket; + struct sockaddr_vm addr; + int result = vsock_bind(VMADDR_CID_RESERVED, VMADDR_PORT_ANY, &addr, &socket); + T_ASSERT_POSIX_FAILURE(result, EADDRNOTAVAIL, "vsock do not bind to reserved cid"); +} + +T_DECL(bind_host, "vsock do not bind to host cid") +{ + int socket; + struct sockaddr_vm addr; + int result = vsock_bind(VMADDR_CID_HOST, VMADDR_PORT_ANY, &addr, &socket); + T_ASSERT_POSIX_FAILURE(result, EADDRNOTAVAIL, "vsock do not bind to host cid"); +} + +T_DECL(bind_zero, "vsock bind to port zero", T_META_ASROOT(true)) +{ + const uint32_t port = 0; + + int socket; + struct sockaddr_vm addr; + int result = vsock_bind(VMADDR_CID_ANY, port, &addr, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind to port zero"); + + struct sockaddr_vm bound_addr; + socklen_t length = sizeof(struct sockaddr_vm); + result = getsockname(socket, (struct sockaddr *)&bound_addr, &length); + T_ASSERT_POSIX_SUCCESS(result, "vsock getsockname"); + T_ASSERT_EQ_INT((int) sizeof(bound_addr), length, "correct address length"); + T_ASSERT_EQ_UINT(bound_addr.svm_port, port, "bound to local port zero"); +} + +T_DECL(bind_double, "vsock double bind") +{ + const uint32_t cid = VMADDR_CID_ANY; + const uint32_t port = 8899; + + int socket; + struct sockaddr_vm addr; + int result = vsock_bind(cid, port, &addr, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind to a port"); + + result = bind(socket, (struct sockaddr *) &addr, sizeof(addr)); + T_ASSERT_POSIX_FAILURE(result, EINVAL, "vsock bind to same port"); +} + +T_DECL(bind_same, "vsock bind same address and port") +{ + const uint32_t cid = VMADDR_CID_ANY; + const uint32_t port = 3399; + + int socket; + struct sockaddr_vm addr; + int result = vsock_bind(cid, port, &addr, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind to a port"); + + result = vsock_bind(cid, port, &addr, &socket); + T_ASSERT_POSIX_FAILURE(result, EADDRINUSE, "vsock bind to same address and port"); +} + +T_DECL(bind_port_reuse, "vsock bind port reuse") +{ + const uint32_t cid = VMADDR_CID_ANY; + const uint32_t port = 9111; + + int socket; + struct sockaddr_vm addr; + int result = vsock_bind(cid, port, &addr, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind to a port"); + + vsock_close(socket); + + result = vsock_bind(cid, port, &addr, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind to a port"); +} + +T_DECL(bind_privileged_non_root, "vsock bind on privileged port - non-root", T_META_ASROOT(false)) +{ + if (geteuid() == 0) { + T_SKIP("test requires non-root privileges to run."); + } + struct sockaddr_vm addr; + int socket; + int result = vsock_bind(VMADDR_CID_ANY, 5, &addr, &socket); + T_ASSERT_POSIX_FAILURE(result, EACCES, "vsock bind privileged as non-root"); +} + +T_DECL(bind_privileged_root, "vsock bind on privileged port - root", T_META_ASROOT(true)) +{ + if (geteuid() != 0) { + T_SKIP("test requires root privileges to run."); + } + struct sockaddr_vm addr; + int socket; + int result = vsock_bind(VMADDR_CID_ANY, 6, &addr, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind privileged as root"); +} + +T_DECL(bind_no_family, "vsock bind with unspecified family") +{ + int socket = vsock_new_socket(); + + struct sockaddr_vm addr = (struct sockaddr_vm) { + .svm_family = AF_UNSPEC, + .svm_cid = VMADDR_CID_ANY, + .svm_port = 7321, + }; + + int result = bind(socket, (struct sockaddr *) &addr, sizeof(addr)); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind with unspecified family"); +} + +T_DECL(bind_vsock_family, "vsock bind with vsock family") +{ + int socket = vsock_new_socket(); + + struct sockaddr_vm addr = (struct sockaddr_vm) { + .svm_family = AF_VSOCK, + .svm_cid = VMADDR_CID_ANY, + .svm_port = 7322, + }; + + int result = bind(socket, (struct sockaddr *) &addr, sizeof(addr)); + T_ASSERT_POSIX_SUCCESS(result, "vsock bind with vsock family"); +} + +T_DECL(bind_wrong_family, "vsock bind with wrong family") +{ + int socket = vsock_new_socket(); + + struct sockaddr_vm addr = (struct sockaddr_vm) { + .svm_family = AF_INET, + .svm_cid = VMADDR_CID_ANY, + .svm_port = 7323, + }; + + int result = bind(socket, (struct sockaddr *) &addr, sizeof(addr)); + T_ASSERT_POSIX_FAILURE(result, EAFNOSUPPORT, "vsock bind with wrong family"); +} + +/* Listen */ + +T_DECL(listen, "vsock listen on specific port") +{ + struct sockaddr_vm addr; + int socket; + int result = vsock_listen(VMADDR_CID_ANY, 8889, &addr, 10, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock listen"); +} + +T_DECL(listen_any, "vsock listen on any port") +{ + struct sockaddr_vm addr; + int socket; + int result = vsock_listen(VMADDR_CID_ANY, VMADDR_PORT_ANY, &addr, 10, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock listen"); +} + +/* Connect */ + +T_DECL(connect_non_hypervisor, "vsock connect to remote other than hypervisor") +{ + int socket; + int result = vsock_connect(5555, 1234, &socket); + T_ASSERT_POSIX_FAILURE(result, EFAULT, "vsock connect non-hypervisor"); +} + +T_DECL(connect_non_listening_host, "vsock connect to non-listening host port") +{ + int socket; + int result = vsock_connect(VMADDR_CID_HOST, 7777, &socket); + T_ASSERT_POSIX_FAILURE(result, EAGAIN, "vsock connect non-listening host port"); +} + +T_DECL(connect_non_listening_hypervisor, "vsock connect to non-listening hypervisor port") +{ + int socket; + int result = vsock_connect(VMADDR_CID_HYPERVISOR, 4444, &socket); + T_ASSERT_POSIX_FAILURE(result, EAGAIN, "vsock connect non-listening hypervisor port"); +} + +T_DECL(connect_getsockname, "vsock connect - getsockname") +{ + int socket; + int result = vsock_connect(VMADDR_CID_HOST, 9999, &socket); + T_ASSERT_POSIX_FAILURE(result, EAGAIN, "vsock connect non-listening"); + + vsock_getsockname(socket); +} + +T_DECL(connect_timeout, "vsock connect with timeout") +{ + int socket = vsock_new_socket(); + + struct timeval timeout = (struct timeval) { + .tv_sec = 0, + .tv_usec = 1, + }; + int result = setsockopt(socket, SOL_SOCKET, SO_SNDTIMEO, &timeout, sizeof(timeout)); + T_ASSERT_POSIX_SUCCESS(result, "vsock set socket timeout"); + + struct sockaddr_vm addr = (struct sockaddr_vm) { + .svm_cid = VMADDR_CID_HOST, + .svm_port = 4321, + }; + result = connect(socket, (struct sockaddr *)&addr, sizeof(addr)); + T_ASSERT_POSIX_FAILURE(result, ETIMEDOUT, "vsock connect timeout"); +} + +T_DECL(connect_non_blocking, "vsock connect non-blocking") +{ + int socket = vsock_new_socket(); + + const uint32_t port = 4321; + const uint32_t cid = vsock_get_local_cid(socket); + + // Listen. + struct sockaddr_vm listen_addr; + int listen_socket; + long result = vsock_listen(cid, port, &listen_addr, 10, &listen_socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock listen"); + + // Set non-blocking. + long arg = fcntl(socket, F_GETFL, NULL); + T_ASSERT_GT(arg, -1L, "vsock get args"); + arg |= O_NONBLOCK; + result = fcntl(socket, F_SETFL, arg); + T_ASSERT_GT(arg, -1L, "vsock set args"); + + // Connect. + struct sockaddr_vm addr = (struct sockaddr_vm) { + .svm_cid = cid, + .svm_port = port, + }; + result = connect(socket, (struct sockaddr *)&addr, sizeof(addr)); + if (result != 0 && errno != EINPROGRESS) { + T_ASSERT_FAIL("vsock connect should succeed or return EINPROGRESS. errno: %u", errno); + } + + vsock_close(socket); + vsock_close(listen_socket); +} + +/* Shutdown */ + +T_DECL(shutdown_not_connected, "vsock shutdown - not connected") +{ + int how[] = {SHUT_RD, SHUT_WR, SHUT_RDWR}; + for (unsigned long i = 0; i < COUNT_ELEMS(how); i++) { + int socket = vsock_new_socket(); + int result = shutdown(socket, how[i]); + T_ASSERT_POSIX_FAILURE(result, ENOTCONN, "vsock cannot shutdown"); + } +} + +T_DECL(shutdown_reads, "vsock shutdown - reads") +{ + int socketA, socketB; + vsock_connect_peers(VMADDR_CID_ANY, 8989, 10, &socketA, &socketB); + + char *msg = "This is test message.\n"; + + // 'A' sends a message. + vsock_send(socketA, msg); + + // 'B' shutsdown reads. + int result = shutdown(socketB, SHUT_RD); + T_ASSERT_POSIX_SUCCESS(result, "vsock shutdown reads"); + + // 'B' reads nothing. + char buffer[1024] = {0}; + ssize_t read_bytes = read(socketB, buffer, 1024); + T_ASSERT_EQ_LONG(0L, read_bytes, "read zero bytes"); + + // 'B' can still send. + vsock_send(socketB, msg); + + vsock_close(socketA); + vsock_close(socketB); +} + +T_DECL(shutdown_writes, "vsock shutdown - writes") +{ + int socketA, socketB; + vsock_connect_peers(VMADDR_CID_ANY, 8787, 10, &socketA, &socketB); + + char *msg = "This is test message.\n"; + + // 'A' sends a message. + vsock_send(socketA, msg); + + // 'B' sends a message. + vsock_send(socketB, msg); + + // send() hits us with a SIGPIPE if peer closes. ignore this and catch the error code. + vsock_disable_sigpipe(socketB); + + // 'B' shutsdown writes. + int result = shutdown(socketB, SHUT_WR); + T_ASSERT_POSIX_SUCCESS(result, "vsock shutdown writes"); + + // 'B' fails to write. + ssize_t sent_bytes = send(socketB, msg, strlen(msg), 0); + T_ASSERT_POSIX_FAILURE(sent_bytes, EPIPE, "vsock cannot write"); + + // 'B' can still read. + char buffer[1024] = {0}; + ssize_t read_bytes = read(socketB, buffer, 1024); + T_ASSERT_EQ_LONG(strlen(msg), (unsigned long)read_bytes, "read all bytes"); + + vsock_close(socketA); + vsock_close(socketB); +} + +T_DECL(shutdown_both, "vsock shutdown - both") +{ + int socketA, socketB; + vsock_connect_peers(VMADDR_CID_ANY, 8686, 10, &socketA, &socketB); + + char *msg = "This is test message.\n"; + char buffer[1024] = {0}; + + // 'A' sends a message. + vsock_send(socketA, msg); + + // 'B' sends a message. + vsock_send(socketB, msg); + + // 'B' reads a message. + ssize_t read_bytes = read(socketB, buffer, 1024); + T_ASSERT_EQ_LONG(strlen(msg), (unsigned long)read_bytes, "read all bytes"); + T_ASSERT_EQ_STR(msg, buffer, "same message"); + + // 'A' sends a message. + vsock_send(socketA, msg); + + // send() hits us with a SIGPIPE if peer closes. ignore this and catch the error code. + vsock_disable_sigpipe(socketB); + + // 'B' shutsdown reads and writes. + int result = shutdown(socketB, SHUT_RDWR); + T_ASSERT_POSIX_SUCCESS(result, "vsock shutdown reads and writes"); + + // 'B' fails to write. + ssize_t sent_bytes = send(socketB, msg, strlen(msg), 0); + T_ASSERT_POSIX_FAILURE(sent_bytes, EPIPE, "vsock cannot write"); + + // 'B' reads nothing. + read_bytes = read(socketB, buffer, 1024); + T_ASSERT_EQ_LONG(0L, read_bytes, "read zero bytes"); + + vsock_close(socketA); + vsock_close(socketB); +} + +/* Communication */ + +T_DECL(talk_self, "vsock talk to self") +{ + int socketA, socketB; + vsock_connect_peers(VMADDR_CID_ANY, 4545, 10, &socketA, &socketB); + + char buffer[1024] = {0}; + + for (int i = 0; i < 64; i++) { + // Send a message. + char *msg = (char*)malloc(64 * sizeof(char)); + sprintf(msg, "This is test message %d\n", i); + vsock_send(socketA, msg); + + // Receive a message. + ssize_t read_bytes = read(socketB, buffer, 1024); + T_ASSERT_EQ_LONG(strlen(msg), (unsigned long)read_bytes, "read all bytes"); + T_ASSERT_EQ_STR(msg, buffer, "same message"); + free(msg); + } + + vsock_close(socketA); + vsock_close(socketB); +} + +T_DECL(talk_self_double, "vsock talk to self - double sends") +{ + int socketA, socketB; + vsock_connect_peers(VMADDR_CID_ANY, 4646, 10, &socketA, &socketB); + + char buffer[1024] = {0}; + + for (int i = 0; i < 64; i++) { + // Send a message. + char *msg = (char*)malloc(64 * sizeof(char)); + sprintf(msg, "This is test message %d\n", i); + vsock_send(socketA, msg); + + // Send the same message. + vsock_send(socketA, msg); + + // Receive a message. + ssize_t read_bytes = read(socketB, buffer, 1024); + T_ASSERT_EQ_LONG(strlen(msg) * 2, (unsigned long)read_bytes, "read all bytes"); + char *expected_msg = (char*)malloc(64 * sizeof(char)); + sprintf(expected_msg, "%s%s", msg, msg); + T_ASSERT_EQ_STR(expected_msg, buffer, "same message"); + free(msg); + free(expected_msg); + } + + vsock_close(socketA); + vsock_close(socketB); +} + +T_DECL(talk_self_early_close, "vsock talk to self - peer closes early") +{ + int socketA, socketB; + vsock_connect_peers(VMADDR_CID_ANY, 4646, 10, &socketA, &socketB); + + char *msg = "This is a message."; + vsock_send(socketA, msg); + + // send() hits us with a SIGPIPE if peer closes. ignore this and catch the error code. + vsock_disable_sigpipe(socketA); + + vsock_close(socketB); + + ssize_t result = send(socketA, msg, strlen(msg), 0); + T_ASSERT_POSIX_FAILURE(result, EPIPE, "vsock peer closed"); + + vsock_close(socketA); +} + +T_DECL(talk_self_connections, "vsock talk to self - too many connections") +{ + const uint32_t port = 4747; + const int backlog = 1; + + struct sockaddr_vm listen_addr; + int listen_socket; + int result = vsock_listen(VMADDR_CID_ANY, port, &listen_addr, backlog, &listen_socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock listen"); + + const uint32_t connection_cid = vsock_get_local_cid(listen_socket); + + // One backlog. + int connected_socket = vsock_new_socket(); + struct sockaddr_vm addr = (struct sockaddr_vm) { + .svm_cid = connection_cid, + .svm_port = port, + }; + result = connect(connected_socket, (struct sockaddr *)&addr, sizeof(addr)); + T_ASSERT_POSIX_SUCCESS(result, "vsock connection successful"); + + int bad_socket = vsock_new_socket(); + result = connect(bad_socket, (struct sockaddr *)&addr, sizeof(addr)); + T_ASSERT_POSIX_FAILURE(result, ECONNREFUSED, "vsock connection refused"); + + vsock_close(connected_socket); + vsock_close(listen_socket); +} + +/* Sysctl */ + +static const char* pcblist = "net.vsock.pcblist"; + +T_DECL(vsock_pcblist_simple, "vsock pcblist sysctl - simple") +{ + // Create some socket to discover in the pcblist. + struct sockaddr_vm addr; + int socket; + int result = vsock_listen(VMADDR_CID_ANY, 88899, &addr, 10, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock listen on a port"); + + // Get the buffer length for the pcblist. + size_t length = 0; + result = sysctlbyname(pcblist, 0, &length, 0, 0); + if (result == ENOENT) { + T_SKIP("%s missing", pcblist); + } + T_ASSERT_POSIX_SUCCESS(result, "vsock pcblist get buffer size (result %d)", result); + + // Allocate the buffer. + struct xvsockpgen *buffer = malloc(length); + T_ASSERT_NOTNULL(buffer, "allocated buffer is not null"); + + // Populate the buffer with the pcblist. + result = sysctlbyname(pcblist, buffer, &length, 0, 0); + T_ASSERT_POSIX_SUCCESS(result, "vsock pcblist populate buffer"); + + // The socket should exist in the list. + bool exists = vsock_address_exists(buffer, addr); + T_ASSERT_TRUE(exists, "vsock pcblist contains the specified socket"); + + vsock_close(socket); + free(buffer); +} + +T_DECL(vsock_pcblist_added, "vsock pcblist sysctl - socket added") +{ + // Get the buffer length for the pcblist. + size_t length = 0; + int result = sysctlbyname(pcblist, 0, &length, 0, 0); + if (result == ENOENT) { + T_SKIP("%s missing", pcblist); + } + T_ASSERT_POSIX_SUCCESS(result, "vsock pcblist get buffer size (result %d)", result); + + // Create some socket to discover in the pcblist after making the first sysctl. + struct sockaddr_vm addr; + int socket; + result = vsock_listen(VMADDR_CID_ANY, 77799, &addr, 10, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock listen on a port"); + + // Allocate the buffer. + struct xvsockpgen *buffer = malloc(length); + T_ASSERT_NOTNULL(buffer, "allocated buffer is not null"); + + // Populate the buffer with the pcblist. + result = sysctlbyname(pcblist, buffer, &length, 0, 0); + T_ASSERT_POSIX_SUCCESS(result, "vsock pcblist populate buffer"); + + // The socket was created after the buffer and cannot fit. + bool exists = vsock_address_exists(buffer, addr); + T_ASSERT_FALSE(exists, "vsock pcblist should not contain the new socket"); + + vsock_close(socket); + free(buffer); +} + +T_DECL(vsock_pcblist_removed, "vsock pcblist sysctl - socket removed") +{ + // Create some socket to be removed after making the first sysctl. + struct sockaddr_vm addr; + int socket; + int result = vsock_listen(VMADDR_CID_ANY, 66699, &addr, 10, &socket); + T_ASSERT_POSIX_SUCCESS(result, "vsock listen on a port"); + + // Get the buffer length for the pcblist. + size_t length = 0; + result = sysctlbyname(pcblist, 0, &length, 0, 0); + if (result == ENOENT) { + T_SKIP("%s missing", pcblist); + } + T_ASSERT_POSIX_SUCCESS(result, "vsock pcblist get buffer size (result %d)", result); + + // Close the socket early. + vsock_close(socket); + + // Allocate the buffer. + struct xvsockpgen *buffer = malloc(length); + T_ASSERT_NOTNULL(buffer, "allocated buffer is not null"); + + // Populate the buffer with the pcblist. + result = sysctlbyname(pcblist, buffer, &length, 0, 0); + T_ASSERT_POSIX_SUCCESS(result, "vsock pcblist populate buffer"); + + // The socket was destroyed before populating the list and should not exist. + bool exists = vsock_address_exists(buffer, addr); + T_ASSERT_FALSE(exists, "vsock pcblist should not contain the deleted socket"); + + free(buffer); +} diff --git a/tests/wired_mem_bench.c b/tests/wired_mem_bench.c deleted file mode 100644 index 0e6f2bda5..000000000 --- a/tests/wired_mem_bench.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 2015-2018 Apple Inc. All rights reserved. - * - * @APPLE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ - -#include - -#include -#include -#include -#include -#include -#include - -#define WIRED_MEM_THRESHOLD_PERCENTAGE 30 - -T_DECL(wired_mem_bench, - "report the amount of wired memory consumed by the booted OS; guard against egregious or unexpected regressions", - T_META_CHECK_LEAKS(false), - T_META_ASROOT(true), - T_META_REQUIRES_REBOOT(true)) // Help reduce noise by asking for a clean boot -// T_META_TAG_PERF) -{ - vm_statistics64_data_t stat; - uint64_t memsize; - vm_size_t page_size = 0; - unsigned int count = HOST_VM_INFO64_COUNT; - kern_return_t ret; - int wired_mem_pct; - struct utsname uname_vers; - - T_SETUPBEGIN; - ret = uname(&uname_vers); - T_QUIET; - T_ASSERT_POSIX_SUCCESS(ret, "uname()"); - - if (strnstr(uname_vers.version, "KASAN", sizeof(uname_vers.version)) != NULL) { - T_SKIP("wired memory metrics are not meaningful on KASAN kernels."); - } - - ret = host_statistics64(mach_host_self(), HOST_VM_INFO64, (host_info64_t)&stat, &count); - T_QUIET; - T_ASSERT_MACH_SUCCESS(ret, "wired memory query via host_statistics64()"); - - size_t s = sizeof(memsize); - ret = sysctlbyname("hw.memsize", &memsize, &s, NULL, 0); - T_QUIET; - T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"hw.memsize\")"); - - T_QUIET; - T_EXPECT_NE(memsize, 0ULL, "hw.memsize sysctl failed to provide device DRAM size"); - - ret = host_page_size(mach_host_self(), &page_size); - T_QUIET; - T_ASSERT_MACH_SUCCESS(ret, "page size query via host_page_size()"); - - T_SETUPEND; - - T_PERF("wired_memory", (double)(stat.wire_count * (mach_vm_size_t)vm_kernel_page_size >> 10), "kB", - "Wired memory at boot"); - - T_LOG("\nwired memory: %llu kB (%llu MB)\n", stat.wire_count * (mach_vm_size_t)vm_kernel_page_size >> 10, - stat.wire_count * (mach_vm_size_t)vm_kernel_page_size >> 20); - -#if TARGET_OS_IOS || TARGET_OS_OSX - // zprint is not mastered onto other platforms. - int r; - if ((r = system("zprint")) != 0) { - T_FAIL("couldn't run zprint: %d", r); - } -#endif - /* - * Poor-man's wired memory regression test: validate that wired memory consumes - * no more than some outrageously high fixed percentage of total device memory. - */ - wired_mem_pct = (int)((stat.wire_count * page_size * 100ULL) / memsize); - T_PERF("wired_memory_percentage", wired_mem_pct, "%", "Wired memory as percentage of device DRAM size"); - - T_ASSERT_LT(wired_mem_pct, WIRED_MEM_THRESHOLD_PERCENTAGE, - "Wired memory percentage is below allowable threshold (%llu bytes / %u pages / %llu total device memory)", - (uint64_t)stat.wire_count * page_size, stat.wire_count, memsize); -} diff --git a/tests/work_interval_test.c b/tests/work_interval_test.c index c46de4069..c38031d73 100644 --- a/tests/work_interval_test.c +++ b/tests/work_interval_test.c @@ -118,3 +118,120 @@ T_DECL(work_interval, "work interval interface") ret = work_interval_destroy(handle); T_ASSERT_POSIX_SUCCESS(ret, "work_interval_destroy"); } + +static mach_timebase_info_data_t timebase_info; + +static uint64_t +nanos_to_abs(uint64_t nanos) +{ + mach_timebase_info(&timebase_info); + return nanos * timebase_info.denom / timebase_info.numer; +} + +static void +set_realtime(pthread_t thread) +{ + kern_return_t kr; + thread_time_constraint_policy_data_t pol; + + mach_port_t target_thread = pthread_mach_thread_np(thread); + T_ASSERT_NOTNULL(target_thread, "pthread_mach_thread_np"); + + /* 1s 100ms 10ms */ + pol.period = (uint32_t)nanos_to_abs(1000000000); + pol.constraint = (uint32_t)nanos_to_abs(100000000); + pol.computation = (uint32_t)nanos_to_abs(10000000); + + pol.preemptible = 0; /* Ignored by OS */ + kr = thread_policy_set(target_thread, THREAD_TIME_CONSTRAINT_POLICY, (thread_policy_t) &pol, + THREAD_TIME_CONSTRAINT_POLICY_COUNT); + T_ASSERT_MACH_SUCCESS(kr, "thread_policy_set(THREAD_TIME_CONSTRAINT_POLICY)"); +} + +static void +set_nonrealtime(pthread_t thread) +{ + kern_return_t kr; + thread_standard_policy_data_t pol = {0}; + + mach_port_t target_thread = pthread_mach_thread_np(thread); + T_ASSERT_NOTNULL(target_thread, "pthread_mach_thread_np"); + + kr = thread_policy_set(target_thread, THREAD_STANDARD_POLICY, (thread_policy_t) &pol, + THREAD_STANDARD_POLICY_COUNT); + T_ASSERT_MACH_SUCCESS(kr, "thread_policy_set(THREAD_STANDARD_POLICY)"); +} + +T_DECL(work_interval_audio_realtime_only, "joining RT threads to audio work interval", T_META_ASROOT(YES)) +{ + int ret = 0; + work_interval_t handle = NULL; + kern_return_t kr = KERN_SUCCESS; + + uint32_t flags = WORK_INTERVAL_FLAG_GROUP | WORK_INTERVAL_FLAG_JOINABLE | WORK_INTERVAL_TYPE_COREAUDIO; + + ret = work_interval_create(&handle, flags); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_create, joinable"); + + ret = work_interval_copy_port(handle, &port); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_copy_port, joinable"); + + ret = work_interval_join_port(port); + T_EXPECT_POSIX_FAILURE(ret, EINVAL, "work_interval_join_port for audio on non-RT thread"); + + set_realtime(pthread_self()); + + ret = work_interval_join_port(port); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_join_port for audio on RT thread"); + + ret = work_interval_leave(); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_leave"); + + ret = work_interval_destroy(handle); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_destroy"); + + kr = mach_port_deallocate(mach_task_self(), port); + T_ASSERT_MACH_SUCCESS(kr, "mach_port_deallocate of port"); + + set_nonrealtime(pthread_self()); +} + +T_DECL(work_interval_get_flags, "querying a port for create flags") +{ + int ret = 0; + work_interval_t handle = NULL; + uint32_t flags = WORK_INTERVAL_FLAG_JOINABLE | WORK_INTERVAL_FLAG_GROUP | WORK_INTERVAL_TYPE_COREAUDIO; + + ret = work_interval_create(&handle, flags); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_create(AUDIO)"); + + ret = work_interval_copy_port(handle, &port); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_copy_port"); + T_ASSERT_TRUE(MACH_PORT_VALID(port), "port from copy port is a valid port"); + + uint32_t expected_flags = 0; + + ret = work_interval_get_flags_from_port(port, &expected_flags); + T_ASSERT_EQ(ret, 0, "work_interval_get_flags_from_port"); + + T_ASSERT_EQ(expected_flags, flags, "Flags match with what work interval was created with"); + + mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, -1); + work_interval_destroy(handle); + + // Negative test + + mach_port_t fake_port = MACH_PORT_NULL; + ret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &fake_port); + T_ASSERT_EQ(ret, 0, "successfully allocated a port"); + T_ASSERT_TRUE(MACH_PORT_VALID(fake_port), "allocated port is valid"); + + ret = mach_port_insert_right(mach_task_self(), fake_port, fake_port, MACH_MSG_TYPE_MAKE_SEND); + T_ASSERT_EQ(ret, 0, "successfully inserted a send right"); + + ret = work_interval_get_flags_from_port(fake_port, &expected_flags); + T_ASSERT_EQ(ret, -1, "query port failed as expected"); + + mach_port_mod_refs(mach_task_self(), fake_port, MACH_PORT_RIGHT_SEND, -1); + mach_port_mod_refs(mach_task_self(), fake_port, MACH_PORT_RIGHT_RECEIVE, -1); +} diff --git a/tests/work_interval_test_unentitled.c b/tests/work_interval_test_unentitled.c new file mode 100644 index 000000000..4041ebcba --- /dev/null +++ b/tests/work_interval_test_unentitled.c @@ -0,0 +1,207 @@ +/* test that the header doesn't implicitly depend on others */ +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +T_GLOBAL_META(T_META_NAMESPACE("xnu.scheduler"), + T_META_RUN_CONCURRENTLY(true)); + +static mach_port_t port = MACH_PORT_NULL; + +static mach_timebase_info_data_t timebase_info; + +static uint64_t +nanos_to_abs(uint64_t nanos) +{ + mach_timebase_info(&timebase_info); + return nanos * timebase_info.denom / timebase_info.numer; +} + +static void +set_realtime(pthread_t thread) +{ + kern_return_t kr; + thread_time_constraint_policy_data_t pol; + + mach_port_t target_thread = pthread_mach_thread_np(thread); + T_EXPECT_NE(target_thread, MACH_PORT_NULL, "pthread_mach_thread_np"); + + /* 1s 100ms 10ms */ + pol.period = (uint32_t)nanos_to_abs(1000000000); + pol.constraint = (uint32_t)nanos_to_abs(100000000); + pol.computation = (uint32_t)nanos_to_abs(10000000); + + pol.preemptible = 0; /* Ignored by OS */ + kr = thread_policy_set(target_thread, THREAD_TIME_CONSTRAINT_POLICY, (thread_policy_t) &pol, + THREAD_TIME_CONSTRAINT_POLICY_COUNT); + T_ASSERT_MACH_SUCCESS(kr, "thread_policy_set(THREAD_TIME_CONSTRAINT_POLICY)"); +} + +static void +set_nonrealtime(pthread_t thread) +{ + kern_return_t kr; + thread_standard_policy_data_t pol = {0}; + + mach_port_t target_thread = pthread_mach_thread_np(thread); + T_EXPECT_NE(target_thread, MACH_PORT_NULL, "pthread_mach_thread_np"); + + kr = thread_policy_set(target_thread, THREAD_STANDARD_POLICY, (thread_policy_t) &pol, + THREAD_STANDARD_POLICY_COUNT); + T_ASSERT_MACH_SUCCESS(kr, "thread_policy_set(THREAD_STANDARD_POLICY)"); +} + +T_DECL(unentitled_work_intervals, "work interval interface for unentitled types") +{ + int ret = 0; + work_interval_t handle = NULL; + uint64_t now = mach_absolute_time(); + kern_return_t kr = KERN_SUCCESS; + + ret = work_interval_create(NULL, 0); + T_ASSERT_EQ(errno, EINVAL, "create with null errno EINVAL"); + T_ASSERT_EQ(ret, -1, "create with null returns -1"); + + ret = work_interval_create(&handle, WORK_INTERVAL_FLAG_GROUP | WORK_INTERVAL_FLAG_UNRESTRICTED | WORK_INTERVAL_TYPE_DEFAULT); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_create, public flags"); + + ret = work_interval_copy_port(handle, &port); + T_ASSERT_EQ(errno, EINVAL, "work_interval_copy_port on non-joinable interval errno EINVAL"); + T_ASSERT_EQ(ret, -1, "work_interval_copy_port on non-joinable interval returns -1"); + + ret = work_interval_notify(handle, now - 1000, now, now + 1000, now + 2000, 0); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_notify, no flags"); + + ret = work_interval_destroy(handle); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_destroy, no flags"); + + uint32_t flags[] = { + WORK_INTERVAL_FLAG_JOINABLE | WORK_INTERVAL_FLAG_GROUP | WORK_INTERVAL_FLAG_UNRESTRICTED | WORK_INTERVAL_TYPE_DEFAULT, + }; + + for (uint32_t i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { + ret = work_interval_create(&handle, flags[i]); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_create, joinable"); + + ret = work_interval_copy_port(handle, &port); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_copy_port, joinable"); + + ret = work_interval_notify(handle, now - 1000, now, now + 1000, now + 2000, 0); + T_ASSERT_EQ(ret, -1, "work_interval_notify on non-joined thread returns -1"); + T_ASSERT_EQ(errno, EINVAL, "work_interval_copy_port on non-joined thread errno EINVAL"); + + ret = work_interval_join_port(port); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_join_port, joinable"); + + ret = work_interval_notify(handle, now - 1000, now, now + 1000, now + 2000, 0); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_notify, on joined thread"); + + ret = work_interval_join_port(port); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_join_port, join the same interval after destroy"); + + kr = mach_port_deallocate(mach_task_self(), port); + T_ASSERT_MACH_SUCCESS(kr, "mach_port_deallocate of port"); + + ret = work_interval_notify(handle, now - 1000, now, now + 1000, now + 2000, 0); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_notify, on joined thread after destroy"); + + ret = work_interval_destroy(handle); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_destroy, joinable, on joined thread"); + + ret = work_interval_leave(); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_leave, on destroyed work interval"); + } +} + +T_DECL(unentitled_work_interval_create_while_joined, "work interval interface: create while joined") +{ + int ret = 0; + work_interval_t handle = NULL; + work_interval_t handle2 = NULL; + mach_port_t port1 = MACH_PORT_NULL; + mach_port_t port2 = MACH_PORT_NULL; + kern_return_t kr = KERN_SUCCESS; + + uint32_t flags = WORK_INTERVAL_FLAG_JOINABLE | WORK_INTERVAL_FLAG_GROUP | WORK_INTERVAL_FLAG_UNRESTRICTED | WORK_INTERVAL_TYPE_DEFAULT; + + ret = work_interval_create(&handle, flags); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_create, joinable"); + + ret = work_interval_copy_port(handle, &port1); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_copy_port, joinable"); + + ret = work_interval_join_port(port1); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_join_port, joinable"); + + + ret = work_interval_create(&handle2, flags); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_create, joinable, while already joined"); + + ret = work_interval_copy_port(handle2, &port2); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_copy_port, joinable, while already joined"); + + ret = work_interval_join_port(port2); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_join_port, joinable, while already joined"); + + ret = work_interval_leave(); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_leave, first time"); + + ret = work_interval_leave(); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_leave, again"); + + ret = work_interval_destroy(handle); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_destroy"); + + ret = work_interval_destroy(handle2); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_destroy"); + + kr = mach_port_deallocate(mach_task_self(), port1); + T_ASSERT_MACH_SUCCESS(kr, "mach_port_deallocate of port"); + + kr = mach_port_deallocate(mach_task_self(), port2); + T_ASSERT_MACH_SUCCESS(kr, "mach_port_deallocate of port"); +} + +T_DECL(work_interval_audio_unentitled, "unentitled work interval for audio") +{ + int ret = 0; + work_interval_t handle = NULL; + kern_return_t kr = KERN_SUCCESS; + + uint32_t flags = WORK_INTERVAL_FLAG_GROUP | WORK_INTERVAL_FLAG_JOINABLE | WORK_INTERVAL_TYPE_COREAUDIO | WORK_INTERVAL_FLAG_UNRESTRICTED; + + ret = work_interval_create(&handle, flags); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_create, joinable"); + + ret = work_interval_copy_port(handle, &port); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_copy_port, joinable"); + + ret = work_interval_join_port(port); + T_EXPECT_POSIX_FAILURE(ret, EINVAL, "work_interval_join_port for audio on non-RT thread"); + + set_realtime(pthread_self()); + + ret = work_interval_join_port(port); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_join_port for audio on RT thread"); + + ret = work_interval_leave(); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_leave"); + + ret = work_interval_destroy(handle); + T_ASSERT_POSIX_SUCCESS(ret, "work_interval_destroy"); + + kr = mach_port_deallocate(mach_task_self(), port); + T_ASSERT_MACH_SUCCESS(kr, "mach_port_deallocate of port"); + + set_nonrealtime(pthread_self()); +} diff --git a/tests/workq_sigprof.c b/tests/workq_sigprof.c index deb7d3792..f6cdf3444 100644 --- a/tests/workq_sigprof.c +++ b/tests/workq_sigprof.c @@ -10,7 +10,7 @@ #include -#if !TARGET_OS_IPHONE +#if !defined(__arm__) T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); @@ -66,12 +66,12 @@ T_DECL(workq_sigprof, "test that workqueue threads can receive sigprof") dispatch_main(); } -#else //!TARGET_OS_IPHONE +#else //!defined(__arm__) T_DECL(workq_sigprof, "test that workqueue threads can receive sigprof") { T_EXPECTFAIL; - T_FAIL(" setitimer/sigprof doesn't seem to be delivered on embeded platforms"); + T_FAIL(" setitimer/sigprof not supported on 32bit arm platforms"); } -#endif //!TARGET_OS_IPHONE +#endif //!defined(__arm__) diff --git a/tests/x18.c b/tests/x18.c new file mode 100644 index 000000000..f8a8d8b1d --- /dev/null +++ b/tests/x18.c @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2019 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + +T_GLOBAL_META( + T_META_NAMESPACE("xnu.arm"), + T_META_RUN_CONCURRENTLY(true)); + +T_DECL(x18_preserved, + "Test that x18 is preserved on hardware that supports it.") +{ +#ifndef __arm64__ + T_SKIP("Running on non-arm64 target, skipping..."); +#else + int arm_kernel_protect = 0; + size_t arm_kernel_protect_size = sizeof(arm_kernel_protect); + bool preserved = true; + + int err = sysctlbyname("hw.optional.arm_kernel_protect", &arm_kernel_protect, &arm_kernel_protect_size, NULL, 0); + if (err) { + T_SKIP("Could not determine state of __ARM_KERNEL_PROTECT__, skipping..."); + } + if (arm_kernel_protect) { + preserved = false; + } + + uint64_t x18_val; + for (uint64_t i = 0xFEEDB0B000000000ULL; i < 0xFEEDB0B000000000ULL + 10000; ++i) { + asm volatile ("mov x18, %0" : : "r"(i)); + sched_yield(); + asm volatile ("mov %0, x18" : "=r"(x18_val)); + if (preserved) { + T_QUIET; T_ASSERT_EQ(x18_val, i, "check that x18 reads back correctly after yield"); + } else { + T_QUIET; T_ASSERT_EQ(x18_val, 0ULL, "check that x18 is cleared after yield"); + } + } +#endif +} diff --git a/tests/zalloc.c b/tests/zalloc.c new file mode 100644 index 000000000..b875c3419 --- /dev/null +++ b/tests/zalloc.c @@ -0,0 +1,15 @@ +#include +#include +#include + +T_DECL(basic_zone_test, "General zalloc test", + T_META_NAMESPACE("xnu.vm"), + T_META_CHECK_LEAKS(false)) +{ + unsigned int count = 1; + size_t s = sizeof(count); + int rc; + + rc = sysctlbyname("kern.run_zone_test", &count, &s, &count, s); + T_ASSERT_POSIX_SUCCESS(rc, "run_zone_test"); +} diff --git a/tools/cred_dump_creds.c b/tools/cred_dump_creds.c index 8d417fe69..c534e2820 100644 --- a/tools/cred_dump_creds.c +++ b/tools/cred_dump_creds.c @@ -24,7 +24,7 @@ struct debug_ucred { uid_t cr_uid; /* effective user id */ uid_t cr_ruid; /* real user id */ uid_t cr_svuid; /* saved user id */ - short cr_ngroups; /* number of groups in advisory list */ + u_short cr_ngroups; /* number of groups in advisory list */ gid_t cr_groups[NGROUPS]; /* advisory group list */ gid_t cr_rgid; /* real group id */ gid_t cr_svgid; /* saved group id */ diff --git a/tools/lldbmacros/Makefile b/tools/lldbmacros/Makefile index 17d6b10ee..e4e0aec7d 100644 --- a/tools/lldbmacros/Makefile +++ b/tools/lldbmacros/Makefile @@ -39,7 +39,6 @@ LLDBMACROS_PYTHON_FILES = $(LLDBMACROS_USERDEBUG_FILES) \ plugins/__init__.py \ plugins/zprint_perf_log.py \ sysregdoc/AArch64-esr_el1.xml \ - atm.py \ bank.py \ turnstile.py \ kevent.py \ diff --git a/tools/lldbmacros/atm.py b/tools/lldbmacros/atm.py deleted file mode 100755 index 451e03ab0..000000000 --- a/tools/lldbmacros/atm.py +++ /dev/null @@ -1,96 +0,0 @@ -from xnu import * -from utils import * - - -@lldb_type_summary(['atm_value', 'atm_value_t']) -@header("{0: <20s} {1: <16s} {2: <20s} {3: <16s}".format("atm_value", "aid", "voucher_value", "sync")) -def GetATMValueSummary(atm_value): - """ Summarizes the atm_value - params: atm_value = value object of type atm_value_t - returns: string with the summary of the type. - """ - format_str = "{0: <#020x} {1: <16d} {2: <#020x} {3: <16d}" - out_string = format_str.format(atm_value, unsigned(atm_value.aid), atm_value, atm_value.sync) - return out_string - - -@lldb_type_summary(['atm_task_descriptor', 'atm_task_descriptor_t']) -@header("{0: <20s} {1: <20s} {2: <16s} {3: <16s} {4: <10s}".format("task_descriptor", "trace_buffer", "buffer_size", "refcount", "flags")) -def GetATMTaskDescriptorSummary(descriptor): - """ Summarizes atm_task_descriptor object - params: descriptor - value object of type atm_task_descriptor_t - returns: string - containing the description. - """ - format_str = "{0: <#020x} {1: <#020x} {2: <#016x} {3: <16d} {4: <10s}" - flags_str = "" - if unsigned(descriptor.flags) & 0x1: - flags_str = "DEAD" - out_string = format_str.format(descriptor, descriptor.trace_buffer, descriptor.trace_buffer_size, descriptor.reference_count, flags_str) - - #if DEVELOPMENT - if hasattr(descriptor, 'task'): - out_string += " " + GetTaskSummary(descriptor.task) + " " + GetProcNameForTask(descriptor.task) - #endif - - return out_string - -# Macro: showatmvaluelisteners -@lldb_command('showatmvaluelisteners') -def ShowATMValueListeners(cmd_args=None, cmd_options={}): - """ show a list of listeners for an atm_value object. - Usage: (lldb)showatmvaluelisteners - """ - if not cmd_args: - raise ArgumentError("Please provide arguments") - - atm_val = kern.GetValueFromAddress(cmd_args[0], 'atm_value_t') - print GetATMValueSummary.header - print GetATMValueSummary(atm_val) - header_str = "{0: <20s} ".format("#guard") + GetATMTaskDescriptorSummary.header - #if DEVELOPMENT - header_str += " " + GetTaskSummary.header + " procname" - #endif - print header_str - for listener in IterateQueue(atm_val.listeners, 'atm_link_object_t', 'listeners_element'): - listener_summary = "{0: <#020x}".format(listener.guard) - listener_summary += " " + GetATMTaskDescriptorSummary(listener.descriptor) - print listener_summary - return -# EndMacro: showatmvaluelisteners - - -#if DEVELOPMENT - -# Macro: showallatmallocatedvalueslist -@lldb_command('showallatmallocatedvalueslist') -def ShowAllATMAllocatedValuesList(cmd_args=None, cmd_options={}): - """ A DEVELOPMENT macro that walks the list of all allocated atm_value objects - and prints them. - usage: (lldb) showallatmallocatedvalueslist - """ - if not hasattr(kern.globals, 'atm_values_list'): - print "It seems you are running a build of kernel that does not have the list of all atm_values_list." - return False - print GetATMValueSummary.header - for v in IterateQueue(kern.globals.atm_values_list, 'atm_value_t', 'value_elt'): - print GetATMValueSummary(v) - return True -# EndMacro: showallatmallocatedvalueslist - -# Macro: showallatmdescriptors -@lldb_command('showallatmdescriptors') -def ShowAllATMDescriptors(cmd_args=None, cmd_options={}): - """ A DEVELOPMENT macro that walks the list of all atm_descriptors_list - and prints the summary for each. - usage: (lldb) showallatmdescriptors - """ - if not hasattr(kern.globals, 'atm_descriptors_list'): - print "It seems you are running a build of kernel that does not have the list of all atm_descriptors_list." - return False - - print GetATMTaskDescriptorSummary.header - for d in IterateQueue(kern.globals.atm_descriptors_list, 'atm_task_descriptor_t', 'descriptor_elt'): - print GetATMTaskDescriptorSummary(d) - return True -# EndMacro -#endif diff --git a/tools/lldbmacros/core/cvalue.py b/tools/lldbmacros/core/cvalue.py index bcebeb495..84641c9b9 100755 --- a/tools/lldbmacros/core/cvalue.py +++ b/tools/lldbmacros/core/cvalue.py @@ -1,9 +1,9 @@ """ Defines a class value which encapsulates the basic lldb Scripting Bridge APIs. This provides an easy -wrapper to extract information from C based constructs. +wrapper to extract information from C based constructs. |------- core.value------------| | |--lldb Scripting Bridge--| | - | | |--lldb core--| | | + | | |--lldb core--| | | | |-------------------------| | |------------------------------| Use the member function GetSBValue() to access the base Scripting Bridge value. @@ -19,7 +19,7 @@ class value(object): can be used as a variable would be in code. So if you have a Point structure variable in your code in the current frame named "pt", you can initialize an instance of this class with it: - + pt = lldb.value(lldb.frame.FindVariable("pt")) print pt print pt.x @@ -29,7 +29,7 @@ class value(object): print rectangle_array[12] print rectangle_array[5].origin.x''' def __init__(self, sbvalue): - #_sbval19k84obscure747 is specifically chosen to be obscure. + #_sbval19k84obscure747 is specifically chosen to be obscure. #This avoids conflicts when attributes could mean any field value in code self._sbval19k84obscure747 = sbvalue self._sbval19k84obscure747_type = sbvalue.GetType() @@ -41,7 +41,7 @@ class value(object): def __repr__(self): return self._sbval19k84obscure747.__str__() - + def __cmp__(self, other): if type(other) is int or type(other) is long: me = int(self) @@ -54,7 +54,7 @@ class value(object): except TypeError: # Try promoting to long return long(self).__cmp__(long(other)) raise TypeError("Cannot compare value with type {}".format(type(other))) - + def __str__(self): global _cstring_rex type_name = self._sbval19k84obscure747_type.GetName() @@ -92,152 +92,152 @@ class value(object): def __add__(self, other): return int(self) + int(other) - + def __radd__(self, other): return int(self) + int(other) - + def __sub__(self, other): return int(self) - int(other) - + def __rsub__(self, other): return int(other) - int(self) - + def __mul__(self, other): return int(self) * int(other) - + def __rmul__(self, other): return int(self) * int(other) - + def __floordiv__(self, other): return int(self) // int(other) - + def __mod__(self, other): return int(self) % int(other) - + def __rmod__(self, other): return int(other) % int(self) - + def __divmod__(self, other): return int(self) % int(other) - + def __rdivmod__(self, other): return int(other) % int(self) - + def __pow__(self, other): return int(self) ** int(other) - + def __lshift__(self, other): return int(self) << int(other) - + def __rshift__(self, other): return int(self) >> int(other) - + def __and__(self, other): return int(self) & int(other) - + def __rand(self, other): return int(self) & int(other) - + def __xor__(self, other): return int(self) ^ int(other) - + def __or__(self, other): return int(self) | int(other) - + def __div__(self, other): return int(self) / int(other) - + def __rdiv__(self, other): return int(other)/int(self) - + def __truediv__(self, other): return int(self) / int(other) - + def __iadd__(self, other): result = self.__add__(other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __isub__(self, other): result = self.__sub__(other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __imul__(self, other): result = self.__mul__(other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __idiv__(self, other): result = self.__div__(other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __itruediv__(self, other): result = self.__truediv__(other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __ifloordiv__(self, other): result = self.__floordiv__(self, other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __imod__(self, other): result = self.__and__(self, other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __ipow__(self, other): result = self.__pow__(self, other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __ipow__(self, other, modulo): result = self.__pow__(self, other, modulo) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __ilshift__(self, other): result = self.__lshift__(other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __irshift__(self, other): result = self.__rshift__(other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __iand__(self, other): result = self.__and__(self, other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __ixor__(self, other): result = self.__xor__(self, other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __ior__(self, other): result = self.__ior__(self, other) self._sbval19k84obscure747.SetValueFromCString (str(result)) return result - + def __neg__(self): return -int(self) - + def __pos__(self): return +int(self) - + def __abs__(self): return abs(int(self)) - + def __invert__(self): return ~int(self) - + def __complex__(self): return complex (int(self)) - + def __int__(self): if self._sbval19k84obscure747_is_ptr: return self._GetValueAsUnsigned() @@ -249,49 +249,43 @@ class value(object): if (retval & 0x80000000): retval = retval - 0x100000000 return retval - + def __long__(self): return self._sbval19k84obscure747.GetValueAsSigned() - + def __float__(self): return float (self._sbval19k84obscure747.GetValueAsSigned()) - + def __oct__(self): return '0%o' % self._GetValueAsUnsigned() - + def __hex__(self): return '0x%x' % self._GetValueAsUnsigned() def __eq__(self, other): - self_err = lldb.SBError() - other_err = lldb.SBError() - self_val = self._sbval19k84obscure747.GetValueAsUnsigned(self_err) - if self_err.fail: - raise ValueError("unable to extract value of self") + self_val = self._GetValueAsUnsigned() if type(other) is value: - other_val = other._sbval19k84obscure747.GetValueAsUnsigned(other_err) - if other_err.fail: - raise ValueError("unable to extract value of other") + other_val = other._GetValueAsUnsigned() return self_val == other_val if type(other) is int: return int(self) == other raise TypeError("Equality operation is not defined for this type.") - + def __neq__(self, other): return not self.__eq__(other) - + def GetSBValue(self): return self._sbval19k84obscure747 - + def __getstate__(self): err = lldb.SBError() if self._sbval19k84obscure747_is_ptr: - addr = self._sbval19k84obscure747.GetValueAsUnsigned() + addr = self._sbval19k84obscure747.GetValueAsAddress() size = self._sbval19k84obscure747_type.GetPointeeType().GetByteSize() else: - addr = self._sbval19k84obscure747.AddressOf().GetValueAsUnsigned() + addr = self._sbval19k84obscure747.AddressOf().GetValueAsAddress() size = self._sbval19k84obscure747_type.GetByteSize() - + content = LazyTarget.GetProcess().ReadMemory(addr, size, err) if err.fail: content = '' @@ -324,7 +318,7 @@ class value(object): if serr.success: return retval raise ValueError("Failed to read unsigned data. "+ str(self._sbval19k84obscure747) +"(type =" + str(self._sbval19k84obscure747_type) + ") Error description: " + serr.GetCString()) - + def _GetValueAsString(self, offset = 0, maxlen = 1024): serr = lldb.SBError() sbdata = None @@ -332,7 +326,7 @@ class value(object): sbdata = self._sbval19k84obscure747.GetPointeeData(offset, maxlen) else: sbdata = self._sbval19k84obscure747.GetData() - + retval = '' bytesize = sbdata.GetByteSize() if bytesize == 0 : @@ -346,8 +340,8 @@ class value(object): if ch == '\0': break retval += ch - return retval - + return retval + def __format__(self, format_spec): ret_format = "{0:"+format_spec+"}" # typechar is last char. see http://www.python.org/dev/peps/pep-3101/ @@ -362,14 +356,14 @@ class value(object): return ret_format.format(int(oct(self))) if type_spec == 'c': return ret_format.format(int(self)) - + return "unknown format " + format_spec + str(self) - - + + def unsigned(val): """ Helper function to get unsigned value from core.value params: val - value (see value class above) representation of an integer type - returns: int which is unsigned. + returns: int which is unsigned. raises : ValueError if the type cannot be represented as unsigned int. """ if type(val) is value: @@ -377,18 +371,18 @@ def unsigned(val): return int(val) def sizeof(t): - """ Find the byte size of a type. + """ Find the byte size of a type. params: t - str : ex 'time_spec' returns equivalent of sizeof(time_spec) in C t - value: ex a value object. returns size of the object - returns: int - byte size length + returns: int - byte size length """ if type(t) is value : return t.GetSBValue().GetByteSize() if type(t) is str: return gettype(t).GetByteSize() raise ValueError("Cannot get sizeof. Invalid argument") - - + + def dereference(val): """ Get a dereferenced obj for a pointer type obj params: val - value object representing a pointer type C construct in lldb @@ -401,12 +395,12 @@ def dereference(val): if type(val) is value and val._sbval19k84obscure747_is_ptr: return value(val.GetSBValue().Dereference()) raise TypeError('Cannot dereference this type.') - + def addressof(val): - """ Get address of a core.value object. + """ Get address of a core.value object. params: val - value object representing a C construct in lldb returns: value - value object referring to 'type(val) *' type - ex. addr = addressof(hello_obj) #python + ex. addr = addressof(hello_obj) #python is same as uintptr_t addr = (uintptr_t)&hello_obj #C """ @@ -430,7 +424,7 @@ def cast(obj, target_type): if type(obj) is value: return obj._GetValueAsCast(dest_type) elif type(obj) is int: - print "ERROR: You cannot cast an 'int' to %s, please use kern.GetValueFromAddress() for such purposes." % str(target_type) + print "ERROR: You cannot cast an 'int' to %s, please use kern.GetValueFromAddress() for such purposes." % str(target_type) raise TypeError("object of type %s cannot be casted to %s" % (str(type(obj)), str(target_type))) def containerof(obj, target_type, field_name): @@ -549,10 +543,20 @@ def islong(x): def readmemory(val): """ Returns a string of hex data that is referenced by the value. - params: val - a value object. - return: str - string of hex bytes. + params: val - a value object. + return: str - string of hex bytes. raises: TypeError if val is not a valid type """ if not type(val) is value: raise TypeError('%s is not of type value' % str(type(val))) return val.__getstate__() + +def getOSPtr(cpp_obj): + """ Returns a core.value created from an intrusive_shared_ptr or itself, cpp_obj + params: cpp_obj - core.value object representing a C construct in lldb + return: core.value - newly created core.value or cpp_obj + """ + child = cpp_obj._sbval19k84obscure747.GetChildAtIndex(0) + if 'intrusive_shared_ptr' in str(child): + return value(child.GetChildMemberWithName('ptr_')) + return cpp_obj diff --git a/tools/lldbmacros/core/kernelcore.py b/tools/lldbmacros/core/kernelcore.py index 01067a75d..d21b5c912 100755 --- a/tools/lldbmacros/core/kernelcore.py +++ b/tools/lldbmacros/core/kernelcore.py @@ -247,7 +247,7 @@ def IterateRBTreeEntry(element, element_type, field_name): elt = cast(elt, element_type) -def IteratePriorityQueue(root, element_type, field_name): +def IterateSchedPriorityQueue(root, element_type, field_name): """ iterate over a priority queue as defined with struct priority_queue from osfmk/kern/priority_queue.h root - value : Value object for the priority queue element_type - str : Type of the link element @@ -257,9 +257,9 @@ def IteratePriorityQueue(root, element_type, field_name): value : an object thats of type (element_type). Always a pointer object """ def _make_pqe(addr): - return value(root.GetSBValue().CreateValueFromExpression(None,'(struct priority_queue_entry *)'+str(addr))) + return value(root.GetSBValue().CreateValueFromExpression(None,'(struct priority_queue_entry_sched *)'+str(addr))) - queue = [unsigned(root.pq_root_packed) & ~3] + queue = [unsigned(root.pq_root)] while len(queue): elt = _make_pqe(queue.pop()) @@ -270,6 +270,20 @@ def IteratePriorityQueue(root, element_type, field_name): if addr: queue.append(addr) elt = elt.next +def SchedPriorityStableQueueRootPri(root, element_type, field_name): + """ Return the root level priority of a priority queue as defined with struct priority_queue from osfmk/kern/priority_queue.h + root - value : Value object for the priority queue + element_type - str : Type of the link element + field_name - str : Name of the field in link element's structure + returns: + The sched pri of the root element. + """ + def _make_pqe(addr): + return value(root.GetSBValue().CreateValueFromExpression(None,'(struct priority_queue_entry_stable *)'+str(addr))) + + elt = _make_pqe(unsigned(root.pq_root)) + return (elt.key >> 8); + def IterateMPSCQueue(root, element_type, field_name): """ iterate over an MPSC queue as defined with struct mpsc_queue_head from osfmk/kern/mpsc_queue.h root - value : Value object for the mpsc queue diff --git a/tools/lldbmacros/core/operating_system.py b/tools/lldbmacros/core/operating_system.py index 2e7e21847..67e280cd6 100755 --- a/tools/lldbmacros/core/operating_system.py +++ b/tools/lldbmacros/core/operating_system.py @@ -156,47 +156,28 @@ class Armv8_RegisterSet(object): return self def ReadRegisterDataFromKernelStack(self, kstack_saved_state_addr, kernel_version): - saved_state = kernel_version.CreateValueFromExpression(None, '(struct arm_saved_state64 *) '+ str(kstack_saved_state_addr)) + saved_state = kernel_version.CreateValueFromExpression(None, '(arm_kernel_saved_state_t *) '+ str(kstack_saved_state_addr)) saved_state = saved_state.Dereference() saved_state = PluginValue(saved_state) self.ResetRegisterValues() - self.x0 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(0).GetValueAsUnsigned() - self.x1 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(1).GetValueAsUnsigned() - self.x2 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(2).GetValueAsUnsigned() - self.x3 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(3).GetValueAsUnsigned() - self.x4 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(4).GetValueAsUnsigned() - self.x5 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(5).GetValueAsUnsigned() - self.x6 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(6).GetValueAsUnsigned() - self.x7 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(7).GetValueAsUnsigned() - self.x8 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(8).GetValueAsUnsigned() - self.x9 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(9).GetValueAsUnsigned() - self.x10 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(10).GetValueAsUnsigned() - self.x11 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(11).GetValueAsUnsigned() - self.x12 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(12).GetValueAsUnsigned() - self.x13 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(13).GetValueAsUnsigned() - self.x14 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(14).GetValueAsUnsigned() - self.x15 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(15).GetValueAsUnsigned() - self.x16 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(16).GetValueAsUnsigned() - self.x17 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(17).GetValueAsUnsigned() - self.x18 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(18).GetValueAsUnsigned() - self.x19 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(19).GetValueAsUnsigned() - self.x20 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(20).GetValueAsUnsigned() - self.x21 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(21).GetValueAsUnsigned() - self.x22 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(22).GetValueAsUnsigned() - self.x23 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(23).GetValueAsUnsigned() - self.x24 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(24).GetValueAsUnsigned() - self.x25 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(25).GetValueAsUnsigned() - self.x26 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(26).GetValueAsUnsigned() - self.x27 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(27).GetValueAsUnsigned() - self.x28 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(28).GetValueAsUnsigned() + self.x16 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(0).GetValueAsUnsigned() + self.x17 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(1).GetValueAsUnsigned() + self.x19 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(2).GetValueAsUnsigned() + self.x20 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(3).GetValueAsUnsigned() + self.x21 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(4).GetValueAsUnsigned() + self.x22 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(5).GetValueAsUnsigned() + self.x23 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(6).GetValueAsUnsigned() + self.x24 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(7).GetValueAsUnsigned() + self.x25 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(8).GetValueAsUnsigned() + self.x26 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(9).GetValueAsUnsigned() + self.x27 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(10).GetValueAsUnsigned() + self.x28 = saved_state.GetChildMemberWithName('x').GetChildAtIndex(11).GetValueAsUnsigned() self.fp = saved_state.GetChildMemberWithName('fp').GetValueAsUnsigned() self.lr = saved_state.GetChildMemberWithName('lr').GetValueAsUnsigned() self.sp = saved_state.GetChildMemberWithName('sp').GetValueAsUnsigned() # pc for a blocked thread is treated to be the next instruction it would run after thread switch. self.pc = self.switch_context_address - self.far = saved_state.GetChildMemberWithName('far').GetValueAsUnsigned() self.cpsr = saved_state.GetChildMemberWithName('cpsr').GetValueAsUnsigned() - self.esr = saved_state.GetChildMemberWithName('esr').GetValueAsUnsigned() return self def ReadRegisterDataFromContinuation(self, continuation_ptr): @@ -883,12 +864,12 @@ class OperatingSystemPlugIn(object): return regs.GetPackedRegisterState() elif self.target_arch.startswith(archARMv8) and int(PluginValue(thobj).GetChildMemberWithName('machine').GetChildMemberWithName('kstackptr').GetValueAsUnsigned()) != 0: saved_state_addr = PluginValue(thobj).GetChildMemberWithName('machine').GetChildMemberWithName('kstackptr').GetValueAsUnsigned() - arm_ctx = PluginValue(self.version.CreateValueFromExpression(None, '(struct arm_context *) ' + str(saved_state_addr))) - ss_64_addr = arm_ctx.GetChildMemberWithName('ss').GetChildMemberWithName('uss').GetChildMemberWithName('ss_64').GetLoadAddress() - regs.ReadRegisterDataFromKernelStack(ss_64_addr, self.version) + arm_ctx = PluginValue(self.version.CreateValueFromExpression(None, '(struct arm_kernel_context *) ' + str(saved_state_addr))) + arm_ss_addr = arm_ctx.GetChildMemberWithName('ss').GetLoadAddress() + regs.ReadRegisterDataFromKernelStack(arm_ss_addr, self.version) return regs.GetPackedRegisterState() elif self.target_arch == archX86_64 or self.target_arch.startswith(archARMv7) or self.target_arch.startswith(archARMv8): - regs.ReadRegisterDataFromContinuation( PluginValue(thobj).GetChildMemberWithName('continuation').GetValueAsUnsigned()) + regs.ReadRegisterDataFromContinuation( PluginValue(thobj).GetChildMemberWithName('continuation').GetValueAsAddress()) return regs.GetPackedRegisterState() #incase we failed very miserably except KeyboardInterrupt, ke: diff --git a/tools/lldbmacros/ioreg.py b/tools/lldbmacros/ioreg.py index 19fd28799..0aa3355b1 100755 --- a/tools/lldbmacros/ioreg.py +++ b/tools/lldbmacros/ioreg.py @@ -3,6 +3,7 @@ from utils import * from kdp import * from core import caching import sys +import lldb from collections import deque ###################################### @@ -24,6 +25,22 @@ def CastIOKitClass(obj, target_type): v.GetSBValue().SetPreferDynamicValue(lldb.eNoDynamicValues) return v +##################################### +# Classes. +##################################### +class PreoslogHeader(object): + """ + Represents preoslog buffer header. There's no symbol in the kernel for it. + """ + valid_magic = "POSL" + def __init__(self): + self.magic = "" + self.offset = 0 + self.size = 0 + self.source = 0 + self.wrapped = 0 + self.data = None + ###################################### # Type Summaries ###################################### @@ -513,6 +530,75 @@ def ShowIOServicePM(cmd_args=None): print out_string +@lldb_type_summary(['IOPMWorkQueue *']) +@header("") +def GetIOPMWorkQueueSummary(wq): + out_str = "" + ioservicepm_header = "{:<20s}{:<4s}{:<4s}{:<4s}{:<4s}\n" + iopmrequest_indent = " " + iopmrequest_header = iopmrequest_indent + "{:<20s}{:<6s}{:<20s}{:<20s}{:<12s}{:<12s}{:<20s}{:<20s}{:<20s}\n" + + for next in IterateQueue(wq.fWorkQueue, 'IOServicePM *', 'WorkChain'): + out_str += ioservicepm_header.format("IOService", "ps", "ms", "wr", "name") + out_str += "0x{:<16x} {:<2d} {:<2d} {:<2d} {: + """ + if not cmd_args: + print "Please specify the address of the IOService" + print ShowIOPMInterest.__doc__ + return + + obj = kern.GetValueFromAddress(cmd_args[0], 'IOService *') + print GetIOPMInterest(obj) @lldb_command("showinterruptvectors") def ShowInterruptVectorInfo(cmd_args=None): @@ -622,8 +708,6 @@ def ShowIOKitClassHierarchy(cmd_args=None): print("{}[ {} ] {}".format(indent, str(mc.className()), str(mc.data()))) - - ###################################### # Helper routines ###################################### @@ -749,6 +833,25 @@ def FindRegistryObjectRecurse(entry, search_name): return registry_object return None +def CompareStringToOSSymbol(string, os_sym): + """ + Lexicographically compare python string to OSSymbol + Params: + string - python string + os_sym - OSSymbol + + Returns: + 0 if string == os_sym + 1 if string > os_sym + -1 if string < os_sym + """ + os_sym_str = GetString(os_sym) + if string > os_sym_str: + return 1 + elif string < os_sym_str: + return -1 + else: + return 0 class IOKitMetaClass(object): """ @@ -1221,7 +1324,7 @@ def SearchInterruptControllerDrivers(): yield ic -def LookupKeyInOSDict(osdict, key): +def LookupKeyInOSDict(osdict, key, comparer = None): """ Returns the value corresponding to a given key in a OSDictionary Returns None if the key was not found """ @@ -1230,8 +1333,16 @@ def LookupKeyInOSDict(osdict, key): count = unsigned(osdict.count) result = None idx = 0 + + if not comparer: + # When comparer is specified, "key" argument can be of any type as "comparer" knows how to compare "key" to a key from "osdict". + # When comparer is not specified, key is of cpp_obj type. + key = getOSPtr(key) while idx < count and result is None: - if key == osdict.dictionary[idx].key: + if comparer is not None: + if comparer(key, osdict.dictionary[idx].key) == 0: + result = osdict.dictionary[idx].value + elif key == osdict.dictionary[idx].key: result = osdict.dictionary[idx].value idx += 1 return result @@ -1270,7 +1381,7 @@ def GetRegDictionary(osdict, prefix): def GetString(string): """ Returns the python string representation of a given OSString """ - out_string = "\"{0:s}\"".format(CastIOKitClass(string, 'OSString *').string) + out_string = "{0:s}".format(CastIOKitClass(string, 'OSString *').string) return out_string def GetNumber(num): @@ -1311,25 +1422,24 @@ def GetArray(arr): def GetDictionary(d): """ Returns a string containing info about a given OSDictionary """ - out_string = "{" + if d is None: + return "" + out_string = "{\n" idx = 0 count = unsigned(d.count) while idx < count: - obj = d.dictionary[idx].key - out_string += GetObjectSummary(obj) + "=" - obj = d.dictionary[idx].value + key = d.dictionary[idx].key + value = d.dictionary[idx].value + out_string += " \"{}\" = {}\n".format(GetString(key), GetObjectSummary(value)) idx += 1 - out_string += GetObjectSummary(obj) - if idx < count: - out_string += "," out_string += "}" return out_string def GetSet(se): """ Returns a string containing info about a given OSSet """ - out_string += "[" + GetArray(se.members) + "]" + out_string = "[" + GetArray(se.members) + "]" return out_string def ReadIOPortInt(addr, numbytes, lcpu): @@ -1536,3 +1646,120 @@ def showinterruptstats(cmd_args=None): return True +def GetRegistryPlane(plane_name): + """ + Given plane_name, returns IORegistryPlane * object or None if there's no such registry plane + """ + return LookupKeyInOSDict(kern.globals.gIORegistryPlanes, plane_name, CompareStringToOSSymbol) + +def DecodePreoslogSource(source): + """ + Given preoslog source, return a matching string representation + """ + source_to_str = {0 : "iboot"} + if source in source_to_str: + return source_to_str[source] + return "UNKNOWN" + +def GetPreoslogHeader(): + """ + Scan IODeviceTree for preoslog and return a python representation of it + """ + edt_plane = GetRegistryPlane("IODeviceTree") + if edt_plane is None: + print "Couldn't obtain a pointer to IODeviceTree" + return None + + # Registry API functions operate on "plane" global variable + global plane + prev_plane = plane + plane = edt_plane + chosen = FindRegistryObjectRecurse(kern.globals.gRegistryRoot, "chosen") + if chosen is None: + print "Couldn't obtain /chosen IORegistryEntry" + return None + + memory_map = FindRegistryObjectRecurse(chosen, "memory-map") + if memory_map is None: + print "Couldn't obtain memory-map from /chosen" + return None + + plane = prev_plane + + mm_preoslog = LookupKeyInOSDict(memory_map.fPropertyTable, "preoslog", CompareStringToOSSymbol) + if mm_preoslog is None: + print "Couldn't find preoslog entry in memory-map" + return None + + if mm_preoslog.length != 16: + print "preoslog entry in memory-map is malformed, expected len is 16, given len is {}".format(preoslog.length) + return None + + data = cast(mm_preoslog.data, "dtptr_t *") + preoslog_paddr = unsigned(data[0]) + preoslog_vaddr = kern.PhysToKernelVirt(preoslog_paddr) + preoslog_size = unsigned(data[1]) + + preoslog_header = PreoslogHeader() + + # This structure defnition doesn't exist in xnu + """ + typedef struct __attribute__((packed)) { + char magic[4]; + uint32_t size; + uint32_t offset; + uint8_t source; + uint8_t wrapped; + char data[]; + } preoslog_header_t; + """ + preoslog_header_ptr = kern.GetValueFromAddress(preoslog_vaddr, "uint8_t *") + preoslog_header.magic = preoslog_header_ptr[0:4] + preoslog_header.source = DecodePreoslogSource(unsigned(preoslog_header_ptr[12])) + preoslog_header.wrapped = unsigned(preoslog_header_ptr[13]) + preoslog_header_ptr = kern.GetValueFromAddress(preoslog_vaddr, "uint32_t *") + preoslog_header.size = unsigned(preoslog_header_ptr[1]) + preoslog_header.offset = unsigned(preoslog_header_ptr[2]) + + for i in xrange(len(preoslog_header.valid_magic)): + c = chr(unsigned(preoslog_header.magic[i])) + if c != preoslog_header.valid_magic[i]: + string = "Error: magic doesn't match, expected {:.4s}, given {:.4s}" + print string.format(preoslog_header.valid_magic, preoslog_header.magic) + return None + + if preoslog_header.size != preoslog_size: + string = "Error: size mismatch preoslog_header.size ({}) != preoslog_size ({})" + print string.format(preoslog_header.size, preoslog_size) + return None + + preoslog_data_ptr = kern.GetValueFromAddress(preoslog_vaddr + 14, "char *") + preoslog_header.data = preoslog_data_ptr.sbvalue.GetPointeeData(0, preoslog_size) + return preoslog_header + +@lldb_command("showpreoslog") +def showpreoslog(cmd_args=None): + """ Display preoslog buffer """ + + preoslog = GetPreoslogHeader() + if preoslog is None: + print "Error: couldn't obtain preoslog header" + return False + + header = "".join([ + "----preoslog log header-----\n", + "size - {} bytes\n", + "write offset - {:#x}\n", + "wrapped - {}\n", + "source - {}\n", + "----preoslog log start------" + ]) + + print header.format(preoslog.size, preoslog.offset, preoslog.wrapped, preoslog.source) + + err = lldb.SBError() + if preoslog.wrapped > 0: + print preoslog.data.GetString(err, preoslog.offset + 1) + print preoslog.data.GetString(err, 0) + print "-----preoslog log end-------" + return True diff --git a/tools/lldbmacros/ipc.py b/tools/lldbmacros/ipc.py index a1b02fced..4ae6086e6 100755 --- a/tools/lldbmacros/ipc.py +++ b/tools/lldbmacros/ipc.py @@ -5,13 +5,12 @@ from xnu import * import sys, shlex from utils import * from process import * -from atm import * from bank import * from waitq import * from ioreg import * import xnudefines -@header("{0: <20s} {1: <6s} {2: <6s} {3: <10s} {4: <20s}".format("task", "pid", '#acts', "tablesize", "command")) +@header("{0: <20s} {1: <6s} {2: <6s} {3: <10s} {4: <32s}".format("task", "pid", '#acts', "tablesize", "command")) def GetTaskIPCSummary(task, show_busy = False): """ Display a task's ipc summary. params: @@ -20,7 +19,7 @@ def GetTaskIPCSummary(task, show_busy = False): str - string of ipc info for the task """ out_string = '' - format_string = "{0: <#020x} {1: <6d} {2: <6d} {3: <10d} {4: <20s}" + format_string = "{0: <#020x} {1: <6d} {2: <6d} {3: <10d} {4: <32s}" busy_format = " {0: <10d} {1: <6d}" proc_name = '' if not task.active: @@ -29,7 +28,7 @@ def GetTaskIPCSummary(task, show_busy = False): proc_name += 'halting: ' pval = Cast(task.bsd_info, 'proc *') if int(pval) != 0: - proc_name += str(pval.p_comm) + proc_name += GetProcName(pval) elif int(task.task_imp_base) != 0 and hasattr(task.task_imp_base, 'iit_procname'): proc_name += str(task.task_imp_base.iit_procname) table_size = int(task.itk_space.is_table_size) @@ -40,7 +39,7 @@ def GetTaskIPCSummary(task, show_busy = False): return (out_string, table_size, nbusy, nmsgs) return (out_string, table_size) -@header("{0: <20s} {1: <6s} {2: <6s} {3: <10s} {4: <20s} {5: <10s} {6: <6s}".format("task", "pid", '#acts', "tablesize", "command", "#busyports", "#kmsgs")) +@header("{0: <20s} {1: <6s} {2: <6s} {3: <10s} {4: <32s} {5: <10s} {6: <6s}".format("task", "pid", '#acts', "tablesize", "command", "#busyports", "#kmsgs")) def GetTaskBusyIPCSummary(task): return GetTaskIPCSummary(task, True) @@ -113,7 +112,7 @@ def GetPortDestProc(portp): if tsk.itk_space == spacep: if tsk.bsd_info: destprocp = Cast(tsk.bsd_info, 'struct proc *') - out_str = "{0:s}({1: 0: - func(t, space, ctx, taskports_idx, 0, t.itk_sself, 17) + if unsigned(t.itk_settable_self) > 0: + func(t, space, ctx, taskports_idx, 0, t.itk_settable_self, 17) if unsigned(t.itk_host) > 0: func(t, space, ctx, taskports_idx, 0, t.itk_host, 17) if unsigned(t.itk_bootstrap) > 0: @@ -1216,10 +1242,14 @@ def IterateAllPorts(tasklist, func, ctx, include_psets, follow_busyports, should func(t, space, ctx, taskports_idx, 0, t.itk_debug_control, 17) if unsigned(t.itk_task_access) > 0: func(t, space, ctx, taskports_idx, 0, t.itk_task_access, 17) + if unsigned(t.itk_self[1]) > 0: ## task read port + func(t, space, ctx, taskports_idx, 0, t.itk_self[1], 17) + if unsigned(t.itk_self[2]) > 0: ## task inspect port + func(t, space, ctx, taskports_idx, 0, t.itk_self[2], 17) - ## Task name port (not a send right, just a naked ref) - if unsigned(t.itk_nself) > 0: - func(t, space, ctx, taskports_idx, 0,t.itk_nself, 0) + ## Task name port (not a send right, just a naked ref); TASK_FLAVOR_NAME = 3 + if unsigned(t.itk_self[3]) > 0: + func(t, space, ctx, taskports_idx, 0, t.itk_self[3], 0) ## task resume port is a receive right to resume the task if unsigned(t.itk_resume) > 0: @@ -1257,8 +1287,8 @@ def IterateAllPorts(tasklist, func, ctx, include_psets, follow_busyports, should ## XXX: look at block reason to see if it's in mach_msg_receive - then look at saved state / message ## Thread port (send right) - if unsigned(thval.ith_sself) > 0: - thport = thval.ith_sself + if unsigned(thval.ith_settable_self) > 0: + thport = thval.ith_settable_self func(t, space, ctx, thports_idx, 0, thport, 17) ## see: osfmk/mach/message.h ## Thread special reply port (send-once right) if unsigned(thval.ith_special_reply_port) > 0: @@ -1386,7 +1416,7 @@ def CountPortsCallback(task, space, ctx, entry_idx, ipc_entry, ipc_port, port_di p_intransit.add(unsigned(ipc_port)) if task.active or (task.halting and not task.active): - pname = str(Cast(task.bsd_info, 'proc *').p_name) + pname = GetProcName(Cast(task.bsd_info, 'proc *')) if not pname in p_bytask.keys(): p_bytask[pname] = { 'transit':0, 'table':0, 'other':0 } if entry_idx == intransit_idx: @@ -1610,7 +1640,7 @@ def ShowAllIITs(cmd_args=[], cmd_options={}): print GetIPCImportantTaskSummary(iit) return -@header("{: <18s} {: <3s} {: <18s} {: <20s} {: <18s} {: <8s}".format("ipc_imp_inherit", "don", "to_task", "proc_name", "from_elem", "depth")) +@header("{: <18s} {: <3s} {: <18s} {: <32s} {: <18s} {: <8s}".format("ipc_imp_inherit", "don", "to_task", "proc_name", "from_elem", "depth")) @lldb_type_summary(['ipc_importance_inherit *', 'ipc_importance_inherit_t']) def GetIPCImportanceInheritSummary(iii): """ describes iii object of type ipc_importance_inherit_t * """ @@ -1666,12 +1696,12 @@ def GetIPCImportanceElemSummary(iie): return out_str -@header("{: <18s} {: <18s} {: <20s}".format("iit", "task", "name")) +@header("{: <18s} {: <18s} {: <32}".format("iit", "task", "name")) @lldb_type_summary(['ipc_importance_task *']) def GetIPCImportantTaskSummary(iit): """ iit is a ipc_importance_task value object. """ - fmt = "{: <#018x} {: <#018x} {: <20s}" + fmt = "{: <#018x} {: <#018x} {: <32}" out_str='' pname = GetProcNameForTask(iit.iit_task) if hasattr(iit, 'iit_bsd_pid'): @@ -1818,8 +1848,7 @@ def GetATMHandleSummary(handle_ptr): params: handle_ptr - uint64 number stored in handle of voucher returns: str - summary of atm value """ - elem = kern.GetValueFromAddress(handle_ptr, 'atm_value *') - return GetATMValueSummary(elem) + return "???" def GetBankHandleSummary(handle_ptr): """ converts a handle value inside a voucher attribute table to bank element and returns appropriate summary. @@ -1847,8 +1876,7 @@ def GetBagofBitsHandleSummary(handle_ptr): @static_var('attr_managers',{1: GetATMHandleSummary, 2: GetIPCHandleSummary, 3: GetBankHandleSummary, 7: GetBagofBitsHandleSummary}) def GetHandleSummaryForKey(handle_ptr, key_num): """ Get a summary of handle pointer from the voucher attribute manager. - For example key 1 -> ATM and it puts atm_value_t in the handle. So summary of it would be atm value and refs etc. - key 2 -> ipc and it puts either ipc_importance_inherit_t or ipc_important_task_t. + For example key 2 -> ipc and it puts either ipc_importance_inherit_t or ipc_important_task_t. key 3 -> Bank and it puts either bank_task_t or bank_account_t. key 7 -> Bag of Bits and it puts user_data_element_t in handle. So summary of it would be Bag of Bits content and refs etc. """ @@ -2084,7 +2112,7 @@ def ShowTaskSuspenders(cmd_args=[], cmd_options={}): task = kern.GetValueFromAddress(cmd_args[0], 'task_t') if task.suspend_count == 0: - print "task {:#x} ({:s}) is not suspended".format(unsigned(task), Cast(task.bsd_info, 'proc_t').p_name) + print "task {:#x} ({:s}) is not suspended".format(unsigned(task), GetProcName(Cast(task.bsd_info, 'proc_t'))) return # If the task has been suspended by the kernel (potentially by @@ -2094,7 +2122,7 @@ def ShowTaskSuspenders(cmd_args=[], cmd_options={}): # which task did the suspension. port = task.itk_resume if not port: - print "task {:#x} ({:s}) is suspended but no resume port exists".format(unsigned(task), Cast(task.bsd_info, 'proc_t').p_name) + print "task {:#x} ({:s}) is suspended but no resume port exists".format(unsigned(task), GetProcName(Cast(task.bsd_info, 'proc_t'))) return return FindPortRights(cmd_args=[unsigned(port)], cmd_options={'-R':'S'}) diff --git a/tools/lldbmacros/kasan.py b/tools/lldbmacros/kasan.py index d924521fe..94f133ccd 100755 --- a/tools/lldbmacros/kasan.py +++ b/tools/lldbmacros/kasan.py @@ -77,7 +77,7 @@ def print_alloc_free_entry(addr, orig_ptr): if h.zone: zone = h.zone - if str(zone.zone_name).startswith("fakestack"): + if str(zone.z_name).startswith("fakestack"): alloc_type = "fakestack" leftrz = 16 else: @@ -98,7 +98,7 @@ def print_alloc_free_entry(addr, orig_ptr): print "Offset: {} bytes".format(orig_ptr - addr - leftrz) print "Redzone: {} / {} bytes".format(leftrz, rightrz) if h.zone: - print "Zone: 0x{:x} <{:s}>".format(unsigned(zone), zone.zone_name) + print "Zone: 0x{:x} <{:s}>".format(unsigned(zone), zone.z_name) btframes = unsigned(h.frames) if btframes > 0: diff --git a/tools/lldbmacros/kcdata.py b/tools/lldbmacros/kcdata.py index dff373630..a17eec8b2 100755 --- a/tools/lldbmacros/kcdata.py +++ b/tools/lldbmacros/kcdata.py @@ -14,6 +14,7 @@ import subprocess import logging import contextlib import base64 +import zlib class Globals(object): pass @@ -98,6 +99,11 @@ kcdata_type_def = { 'STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT' : 0x927, 'STACKSHOT_KCTYPE_THREAD_DISPATCH_QUEUE_LABEL' : 0x928, 'STACKSHOT_KCTYPE_THREAD_TURNSTILEINFO' : 0x929, + 'STACKSHOT_KCTYPE_TASK_CPU_ARCHITECTURE' : 0x92a, + 'STACKSHOT_KCTYPE_LATENCY_INFO' : 0x92b, + 'STACKSHOT_KCTYPE_LATENCY_INFO_TASK' : 0x92c, + 'STACKSHOT_KCTYPE_LATENCY_INFO_THREAD' : 0x92d, + 'STACKSHOT_KCTYPE_LOADINFO64_TEXT_EXEC' : 0x92e, 'STACKSHOT_KCTYPE_TASK_DELTA_SNAPSHOT': 0x940, 'STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT': 0x941, @@ -146,6 +152,7 @@ kcdata_type_def = { 'KCDATA_BUFFER_BEGIN_CRASHINFO': 0xDEADF157, 'KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT': 0xDE17A59A, 'KCDATA_BUFFER_BEGIN_STACKSHOT': 0x59a25807, + 'KCDATA_BUFFER_BEGIN_COMPRESSED': 0x434f4d50, 'KCDATA_BUFFER_BEGIN_OS_REASON': 0x53A20900, 'KCDATA_BUFFER_BEGIN_XNUPOST_CONFIG': 0x1E21C09F } @@ -421,8 +428,9 @@ class KCObject(object): if self.i_type == GetTypeForName('KCDATA_TYPE_CONTAINER_BEGIN'): self.__class__ = KCContainerObject - - if self.i_type in KNOWN_TOPLEVEL_CONTAINER_TYPES: + elif self.i_type == GetTypeForName('KCDATA_BUFFER_BEGIN_COMPRESSED'): + self.__class__ = KCCompressedBufferObject + elif self.i_type in KNOWN_TOPLEVEL_CONTAINER_TYPES: self.__class__ = KCBufferObject self.InitAfterParse() @@ -660,6 +668,40 @@ class KCBufferObject(KCContainerObject): no_end_message = "could not find buffer end marker" +class KCCompressedBufferObject(KCContainerObject): + + def ReadItems(self, iterator): + self.header = dict() + with INDENT.indent(): + for i in iterator: + o = KCObject.FromKCItem(i) + if self.IsEndMarker(o): + self.compressed_type = o.i_type + self.blob_start = o.offset + 16 + break + o.ParseData() + self.header[o.i_name] = o.obj + + def IsEndMarker(self, o): + return o.i_type in KNOWN_TOPLEVEL_CONTAINER_TYPES + + def GetCompressedBlob(self, data): + if self.header['kcd_c_type'] != 1: + raise NotImplementedError + blob = data[self.blob_start:self.blob_start+self.header['kcd_c_totalout']] + if len(blob) != self.header['kcd_c_totalout']: + raise ValueError + return blob + + def Decompress(self, data): + start_marker = struct.pack('] + + Caveats: + * 32-bit kernels are unsupported. + * The trace file will be missing machine and config chunks, which might + prevent tools from analyzing it. + """ + + if kern.arch not in ['x86_64', 'x86_64h', 'arm64', 'arm64e']: + print('32-bit kernels are unsupported') + return + + if len(cmd_args) != 1: + raise ArgumentError('path to trace file is required') + + nevents = unsigned(kern.globals.nkdbufs) + if nevents == 0: + print('kdebug buffers are not set up') + return + + limit_nevents = nevents + if '-N' in cmd_options: + limit_nevents = unsigned(cmd_options['-N']) + if limit_nevents > nevents: + limit_nevents = nevents + verbose = config['verbosity'] > vHUMAN + + file_offset = 0 + with open(cmd_args[0], 'w+b') as f: + FILE_MAGIC = 0x55aa0300 + EVENTS_TAG = 0x00001e00 + SSHOT_TAG = 0x8002 + CHUNKHDR_PACK = 'IHHQ' + FILEHDR_PACK = CHUNKHDR_PACK + 'IIQQIIII' + FILEHDR_SIZE = 40 + FUTURE_SIZE = 8 + + numer, denom = GetTimebaseInfo() + + # XXX The kernel doesn't have a solid concept of the wall time. + wall_abstime = 0 + wall_secs = 0 + wall_usecs = 0 + + # XXX 32-bit is NYI + k64 = True + event_size = unsigned(64) + + file_hdr = struct.pack( + FILEHDR_PACK, FILE_MAGIC, 0, 0, FILEHDR_SIZE, + numer, denom, wall_abstime, wall_secs, wall_usecs, 0, 0, + 0x1 if k64 else 0) + f.write(file_hdr) + file_offset += 16 + FILEHDR_SIZE # chunk header plus file header + + skip_nevents = nevents - limit_nevents if limit_nevents else 0 + if skip_nevents > 0: + print('omitting {} events from the beginning'.format(skip_nevents)) + + events_hdr = struct.pack( + CHUNKHDR_PACK, EVENTS_TAG, 0, 0, 0) # size will be filled in later + f.write(events_hdr) + file_offset += 16 # header size + event_size_offset = file_offset - FUTURE_SIZE + # Future events timestamp -- doesn't need to be set for merged events. + f.write(struct.pack('Q', 0)) + file_offset += FUTURE_SIZE + + if verbose: + print('events start at offset {}'.format(file_offset)) + + process = LazyTarget().GetProcess() + error = lldb.SBError() + + written_nevents = 0 + seen_nevents = 0 + for event in IterateKdebugEvents(): + seen_nevents += 1 + if skip_nevents >= seen_nevents: + if seen_nevents % 1000 == 0: + sys.stderr.write('skipped {}/{} ({:4.2f}%) events'.format( + seen_nevents, skip_nevents, + float(seen_nevents) / skip_nevents * 100.0)) + sys.stderr.write('\r') + + continue + + event = process.ReadMemory( + unsigned(addressof(event)), event_size, error) + file_offset += event_size + f.write(event) + written_nevents += 1 + # Periodically update the CLI with progress. + if written_nevents % 1000 == 0: + sys.stderr.write('wrote {}/{} ({:4.2f}%) events'.format( + written_nevents, limit_nevents, + float(written_nevents) / nevents * 100.0)) + sys.stderr.write('\r') + sys.stderr.write('\n') + print('wrote {} events'.format(written_nevents)) + if verbose: + print('events end at offset {}'.format(file_offset)) + + # Normally, the chunk would need to be padded to 8, but events are + # already aligned. + + kcdata = kern.globals.kc_panic_data + kcdata_addr = unsigned(kcdata.kcd_addr_begin) + kcdata_length = unsigned(kcdata.kcd_length) + if kcdata_addr != 0 and kcdata_length != 0: + print('writing stackshot') + f.write(struct.pack(CHUNKHDR_PACK, SSHOT_TAG, 1, 0, kcdata_length)) + file_offset += 16 + if verbose: + print('stackshot is {} bytes long'.format(kcdata_length)) + print('stackshot starts at offset {}'.format(file_offset)) + ssdata = process.ReadMemory(kcdata_addr, kcdata_length, error) + f.write(ssdata) + file_offset += kcdata_length + if verbose: + print('stackshot ends at offset {}'.format(file_offset)) + else: + print('stackshot is not available, trace file may not be usable!') + + # After the number of events is known, fix up the events chunk size. + events_data_size = unsigned(written_nevents * event_size) + FUTURE_SIZE + f.seek(event_size_offset) + f.write(struct.pack('Q', events_data_size)) + + return diff --git a/tools/lldbmacros/memory.py b/tools/lldbmacros/memory.py index 963db7bcc..f64b5a0aa 100755 --- a/tools/lldbmacros/memory.py +++ b/tools/lldbmacros/memory.py @@ -9,28 +9,124 @@ from utils import * import xnudefines from process import * import macho +import json +from ctypes import c_int64 + +def vm_unpack_pointer(packed, params, type_str = 'void *'): + """ Unpack a pointer packed with VM_PACK_POINTER() + params: + packed - value : The packed pointer value + params - value : The packing parameters of type vm_packing_params_t + type_str - str : The type to cast the unpacked pointer into + returns: + The unpacked pointer + """ + if params.vmpp_base_relative: + addr = unsigned(packed) << int(params.vmpp_shift) + if addr: addr += int(params.vmpp_base) + else: + bits = int(params.vmpp_bits) + shift = int(params.vmpp_shift) + addr = c_int64(unsigned(packed) << (64 - bits)).value + addr >>= 64 - bits - shift + return kern.GetValueFromAddress(addr, type_str) + +def IterateZPerCPU(root, element_type): + """ Iterates over a percpu variable + params: + root - value : Value object for per-cpu variable + element_type - str : Type of element + returns: + one slot + """ + pagesize = kern.globals.page_size + mangle = 1 << (8 * kern.ptrsize - 1) + for i in range(0, kern.globals.zpercpu_early_count): + yield kern.GetValueFromAddress((int(root) | mangle) + i * pagesize, element_type) + +@lldb_command('showzpcpu', "S") +def ShowZPerCPU(cmd_args=None, cmd_options={}): + """ Routine to show per-cpu zone allocated variables + + Usage: showzpcpu [-S] expression [field] + -S : sum the values instead of printing them + """ + if not cmd_args: + raise ArgumentError("No arguments passed") + pagesize = kern.globals.page_size + mangle = 1 << (8 * kern.ptrsize - 1) + sbv = pagesize.GetSBValue() + v = sbv.CreateValueFromExpression(None, cmd_args[0]) + e = value(v) + acc = 0 + for i in range(0, kern.globals.zpercpu_early_count): + if len(cmd_args) == 1: + t = sbv.CreateValueFromExpression(None, '(%s)%d' % (v.GetTypeName(), (int(e) | mangle) + i * pagesize)).Dereference() + else: + t = sbv.CreateValueFromExpression(None, '((%s)%d)->%s' % (v.GetTypeName(), (int(e) | mangle) + i * pagesize, cmd_args[1])) + if "-S" in cmd_options: + acc += value(t) + else: + print value(t) + + if "-S" in cmd_options: + print acc + +def ZoneName(zone): + """ Formats the name for a given zone + params: + zone - value : A pointer to a zone + returns: + the formated name for the zone + """ + names = [ "", "default.", "data.", "kext."] + return "{:s}{:s}".format(names[int(zone.kalloc_heap)], zone.z_name) + +def PrettyPrintDictionary(d): + """ Internal function to pretty print a dictionary with string or integer values + params: The dictionary to print + """ + for key, value in d.items(): + key += ":" + if isinstance(value, int): + print "{:<30s} {: >10d}".format(key, value) + else: + print "{:<30s} {: >10s}".format(key, value) # Macro: memstats -@lldb_command('memstats') -def Memstats(cmd_args=None): +@lldb_command('memstats', 'J') +def Memstats(cmd_args=None, cmd_options={}): """ Prints out a summary of various memory statistics. In particular vm_page_wire_count should be greater than 2K or you are under memory pressure. + usage: memstats -J + Output json """ + print_json = False + if "-J" in cmd_options: + print_json = True + + memstats = {} try: - print "memorystatus_level: {: >10d}".format(kern.globals.memorystatus_level) - print "memorystatus_available_pages: {: >10d}".format(kern.globals.memorystatus_available_pages) - print "inuse_ptepages_count: {: >10d}".format(kern.globals.inuse_ptepages_count) + memstats["memorystatus_level"] = int(kern.globals.memorystatus_level) + memstats["memorystatus_available_pages"] = int(kern.globals.memorystatus_available_pages) + memstats["inuse_ptepages_count"] = int(kern.globals.inuse_ptepages_count) except ValueError: pass - print "vm_page_throttled_count: {: >10d}".format(kern.globals.vm_page_throttled_count) - print "vm_page_active_count: {: >10d}".format(kern.globals.vm_page_active_count) - print "vm_page_inactive_count: {: >10d}".format(kern.globals.vm_page_inactive_count) - print "vm_page_wire_count: {: >10d}".format(kern.globals.vm_page_wire_count) - print "vm_page_free_count: {: >10d}".format(kern.globals.vm_page_free_count) - print "vm_page_purgeable_count: {: >10d}".format(kern.globals.vm_page_purgeable_count) - print "vm_page_inactive_target: {: >10d}".format(kern.globals.vm_page_inactive_target) - print "vm_page_free_target: {: >10d}".format(kern.globals.vm_page_free_target) - - print "vm_page_free_reserved: {: >10d}".format(kern.globals.vm_page_free_reserved) + if hasattr(kern.globals, 'compressor_object'): + memstats["compressor_page_count"] = int(kern.globals.compressor_object.resident_page_count) + memstats["vm_page_throttled_count"] = int(kern.globals.vm_page_throttled_count) + memstats["vm_page_active_count"] = int(kern.globals.vm_page_active_count) + memstats["vm_page_inactive_count"] = int(kern.globals.vm_page_inactive_count) + memstats["vm_page_wire_count"] = int(kern.globals.vm_page_wire_count) + memstats["vm_page_free_count"] = int(kern.globals.vm_page_free_count) + memstats["vm_page_purgeable_count"] = int(kern.globals.vm_page_purgeable_count) + memstats["vm_page_inactive_target"] = int(kern.globals.vm_page_inactive_target) + memstats["vm_page_free_target"] = int(kern.globals.vm_page_free_target) + memstats["vm_page_free_reserved"] = int(kern.globals.vm_page_free_reserved) + + if print_json: + print json.dumps(memstats) + else: + PrettyPrintDictionary(memstats) @xnudebug_test('test_memstats') def TestMemstats(kernel_target, config, lldb_obj, isConnected ): @@ -64,7 +160,7 @@ def CalculateLedgerPeak(phys_footprint_entry): ledger_peak = long(phys_footprint_entry._le._le_max.le_interval_max) return ledger_peak -@header("{: >8s} {: >12s} {: >12s} {: >10s} {: >10s} {: >12s} {: >14s} {: >10s} {: >12s} {: >10s} {: >10s} {: >10s} {: <20s}\n".format( +@header("{: >8s} {: >12s} {: >12s} {: >10s} {: >10s} {: >12s} {: >14s} {: >10s} {: >12s} {: >10s} {: >10s} {: >10s} {: <32s}\n".format( 'pid', 'effective', 'requested', 'state', 'relaunch', 'user_data', 'physical', 'iokit', 'footprint', 'recent peak', 'lifemax', 'limit', 'command')) def GetMemoryStatusNode(proc_val): @@ -99,7 +195,7 @@ def GetMemoryStatusNode(proc_val): out_str += "{: >12s}".format('-') out_str += "{: >10d} ".format(phys_footprint_lifetime_max) - out_str += "{: >10d} {: <20s}\n".format(phys_footprint_limit, proc_val.p_comm) + out_str += "{: >10d} {: <32s}\n".format(phys_footprint_limit, GetProcName(proc_val)) return out_str @lldb_command('showmemorystatus') @@ -125,95 +221,195 @@ def ShowMemoryStatus(cmd_args=None): # EndMacro: showmemorystatus -def GetRealMetadata(meta): - """ Get real metadata for a given metadata pointer +class ZoneMeta(object): """ - try: - if unsigned(meta.zindex) != 0x03FF: - return meta - else: - return kern.GetValueFromAddress(unsigned(meta) - unsigned(meta.real_metadata_offset), "struct zone_page_metadata *") - except: - return 0 - -def GetFreeList(meta): - """ Get the free list pointer for a given metadata pointer + Helper class that helpers walking metadata """ - global kern - zone_map_min_address = kern.GetGlobalVariable('zone_map_min_address') - zone_map_max_address = kern.GetGlobalVariable('zone_map_max_address') - try: - if unsigned(meta.freelist_offset) == unsigned(0xffffffff): - return 0 + + @classmethod + def _looksForeign(cls, addr): + if addr & (kern.globals.page_size - 1): + return False + try: + meta = kern.GetValueFromAddress(addr, "struct zone_page_metadata *") + return meta.zm_foreign_cookie[0] == 0x123456789abcdef + except: + return False + + def __init__(self, addr, isPageIndex = False): + global kern + pagesize = kern.globals.page_size + zone_info = kern.GetGlobalVariable('zone_info') + + self.zone_map_min = unsigned(zone_info.zi_map_range.min_address) + self.zone_map_max = unsigned(zone_info.zi_map_range.max_address) + self.zone_meta_min = unsigned(zone_info.zi_meta_range.min_address) + self.zone_meta_max = unsigned(zone_info.zi_meta_range.max_address) + + addr = unsigned(addr) + if isPageIndex: + # sign extend + addr = value(pagesize.GetSBValue().CreateValueFromExpression(None, + '(long)(int)%d * %d' %(addr, pagesize))) + addr = unsigned(addr) + + self.address = addr + + if self.zone_meta_min <= addr and addr < self.zone_meta_max: + self.kind = 'Metadata' + addr -= (addr - self.zone_meta_min) % sizeof('struct zone_page_metadata') + self.meta_addr = addr + self.meta = kern.GetValueFromAddress(addr, "struct zone_page_metadata *") + + self.page_addr = self.zone_map_min + ((addr - self.zone_meta_min) / sizeof('struct zone_page_metadata') * pagesize) + self.first_offset = 0 + elif self.zone_map_min <= addr and addr < self.zone_map_max: + addr &= ~(pagesize - 1) + page_idx = (addr - self.zone_map_min) / pagesize + + self.kind = 'Element' + self.page_addr = addr + self.meta_addr = self.zone_meta_min + page_idx * sizeof('struct zone_page_metadata') + self.meta = kern.GetValueFromAddress(self.meta_addr, "struct zone_page_metadata *") + self.first_offset = 0 + elif ZoneMeta._looksForeign(addr): + self.kind = 'Element (F)' + addr &= ~(pagesize - 1) + self.page_addr = addr + self.meta_addr = addr + self.meta = kern.GetValueFromAddress(addr, "struct zone_page_metadata *") + self.first_offset = 32 # ZONE_FOREIGN_PAGE_FIRST_OFFSET in zalloc.c else: - if (unsigned(meta) >= unsigned(zone_map_min_address)) and (unsigned(meta) < unsigned(zone_map_max_address)): - page_index = ((unsigned(meta) - unsigned(kern.GetGlobalVariable('zone_metadata_region_min'))) / sizeof('struct zone_page_metadata')) - return (unsigned(zone_map_min_address) + (kern.globals.page_size * (page_index))) + meta.freelist_offset - else: - return (unsigned(meta) + meta.freelist_offset) - except: + self.kind = 'Unknown' + self.meta = None + self.page_addr = 0 + self.meta_addr = 0 + self.first_offset = 0 + + def isSecondaryPage(self): + return self.meta and self.meta.zm_secondary_page + + def getPageCount(self): + return self.meta and self.meta.zm_page_count or 0 + + def getAllocCount(self): + return self.meta and self.meta.zm_alloc_count or 0 + + def getReal(self): + if self.isSecondaryPage(): + return ZoneMeta(self.meta - self.meta.zm_page_count) + + return self + + def getFreeList(self): + if self.meta and self.meta.zm_freelist_offs != unsigned(0xffff): + return kern.GetValueFromAddress(self.page_addr + self.meta.zm_freelist_offs, 'vm_offset_t *') return 0 + def iterateFreeList(self): + cur = self.getFreeList() + while cur: + yield cur + + cur = dereference(cast(cur, 'vm_offset_t *')) + cur = unsigned(cur) ^ unsigned(kern.globals.zp_nopoison_cookie) + cur = kern.GetValueFromAddress(cur, 'vm_offset_t *') + + def iterateElements(self): + if self.meta is None: + return + esize = self.getZone().z_elem_size + offs = self.first_offset + end = kern.globals.page_size + if not self.meta.zm_percpu: + end *= self.meta.zm_page_count + + while offs + esize <= end: + yield kern.GetValueFromAddress(self.page_addr + offs, 'void *') + offs += esize + + def getZone(self): + if self.meta: + return kern.globals.zone_array[self.meta.zm_index] + return None + @lldb_type_summary(['zone_page_metadata']) -@header("{:<18s} {:<18s} {:>8s} {:>8s} {:<18s} {:<20s}".format('ZONE_METADATA', 'FREELIST', 'PG_CNT', 'FREE_CNT', 'ZONE', 'NAME')) +@header("{:<18s} {:<18s} {:>8s} {:>8s} {:<18s} {:<20s}".format('ZONE_METADATA', 'FREELIST', 'PG_CNT', 'ALLOC_CNT', 'ZONE', 'NAME')) def GetZoneMetadataSummary(meta): """ Summarize a zone metadata object params: meta - obj representing zone metadata in the kernel returns: str - summary of the zone metadata """ - out_str = "" - global kern - zinfo = 0 - try: - out_str += 'Metadata Description:\n' + GetZoneMetadataSummary.header + '\n' - meta = kern.GetValueFromAddress(meta, "struct zone_page_metadata *") - if unsigned(meta.zindex) == 255: - out_str += "{:#018x} {:#018x} {:8d} {:8d} {:#018x} {:s}\n".format(meta, 0, 0, 0, 0, '(fake multipage meta)') - meta = GetRealMetadata(meta) - if meta == 0: - return "" - zinfo = kern.globals.zone_array[unsigned(meta.zindex)] - out_str += "{:#018x} {:#018x} {:8d} {:8d} {:#018x} {:s}".format(meta, GetFreeList(meta), meta.page_count, meta.free_count, addressof(zinfo), zinfo.zone_name) - return out_str - except: - out_str = "" - return out_str -@header("{:<18s} {:>18s} {:>18s} {:<18s}".format('ADDRESS', 'TYPE', 'OFFSET_IN_PG', 'METADATA')) + if type(meta) != ZoneMeta: + meta = ZoneMeta(meta) + + out_str = 'Metadata Description:\n' + GetZoneMetadataSummary.header + '\n' + if meta.isSecondaryPage(): + out_str += "{:#018x} {:#018x} {:8d} {:8d} {:#018x} {:s}\n".format( + meta.meta_addr, 0, 0, 0, 0, '(fake multipage meta)') + meta = meta.getReal() + zinfo = meta.getZone() + out_str += "{:#018x} {:#018x} {:8d} {:8d} {:#018x} {:s}".format( + meta.meta_addr, meta.getFreeList(), meta.getPageCount(), meta.getAllocCount(), + addressof(zinfo), ZoneName(zinfo)) + return out_str + +@header("{:<18s} {:>10s} {:>18s} {:>18s} {:<10s}".format( + 'ADDRESS', 'TYPE', 'METADATA', 'PAGE_ADDR', 'OFFSET')) def WhatIs(addr): """ Information about kernel pointer """ - out_str = "" global kern - pagesize = kern.globals.page_size - zone_map_min_address = kern.GetGlobalVariable('zone_map_min_address') - zone_map_max_address = kern.GetGlobalVariable('zone_map_max_address') - if (unsigned(addr) >= unsigned(zone_map_min_address)) and (unsigned(addr) < unsigned(zone_map_max_address)): - zone_metadata_region_min = kern.GetGlobalVariable('zone_metadata_region_min') - zone_metadata_region_max = kern.GetGlobalVariable('zone_metadata_region_max') - if (unsigned(addr) >= unsigned(zone_metadata_region_min)) and (unsigned(addr) < unsigned(zone_metadata_region_max)): - metadata_offset = (unsigned(addr) - unsigned(zone_metadata_region_min)) % sizeof('struct zone_page_metadata') - page_offset_str = "{:d}/{:d}".format((unsigned(addr) - (unsigned(addr) & ~(pagesize - 1))), pagesize) - out_str += WhatIs.header + '\n' - out_str += "{:#018x} {:>18s} {:>18s} {:#018x}\n\n".format(unsigned(addr), "Metadata", page_offset_str, unsigned(addr) - metadata_offset) - out_str += GetZoneMetadataSummary((unsigned(addr) - metadata_offset)) + '\n\n' - else: - page_index = ((unsigned(addr) & ~(pagesize - 1)) - unsigned(zone_map_min_address)) / pagesize - meta = unsigned(zone_metadata_region_min) + (page_index * sizeof('struct zone_page_metadata')) - meta = kern.GetValueFromAddress(meta, "struct zone_page_metadata *") - page_meta = GetRealMetadata(meta) - if page_meta != 0: - zinfo = kern.globals.zone_array[unsigned(page_meta.zindex)] - page_offset_str = "{:d}/{:d}".format((unsigned(addr) - (unsigned(addr) & ~(pagesize - 1))), pagesize) - out_str += WhatIs.header + '\n' - out_str += "{:#018x} {:>18s} {:>18s} {:#018x}\n\n".format(unsigned(addr), "Element", page_offset_str, page_meta) - out_str += GetZoneMetadataSummary(unsigned(page_meta)) + '\n\n' - else: - out_str += "Unmapped address within the zone_map ({:#018x}-{:#018x})".format(zone_map_min_address, zone_map_max_address) + + meta = ZoneMeta(addr) + + if meta.meta is None: + out_str = "Address {:#018x} is outside of any zone map ({:#018x}-{:#018x})\n".format( + addr, meta.zone_map_min, meta.zone_map_max) else: - out_str += "Address {:#018x} is outside the zone_map ({:#018x}-{:#018x})\n".format(addr, zone_map_min_address, zone_map_max_address) + if meta.kind[0] == 'E': # element + page_offset_str = "{:d}/{:d}K".format( + addr - meta.page_addr, kern.globals.page_size / 1024) + else: + page_offset_str = "-" + out_str = WhatIs.header + '\n' + out_str += "{meta.address:#018x} {meta.kind:>10s} {meta.meta_addr:#018x} {meta.page_addr:#018x} {:<10s}\n\n".format( + page_offset_str, meta=meta) + out_str += GetZoneMetadataSummary(meta) + '\n\n' + print out_str - return + + if meta.kind[0] == 'E': + print "Hexdump:\n" + + meta = meta.getReal() + esize = meta.getZone().z_elem_size + start = meta.page_addr + + estart = addr - (start - meta.first_offset) + estart = start + estart - (estart % esize) + + try: + if estart > start: + data_array = kern.GetValueFromAddress(estart - 16, "uint8_t *") + print_hex_data(data_array[0:16], estart - 16, "") + print "------------------------------------------------------------------" + except: + pass + + try: + data_array = kern.GetValueFromAddress(estart, "uint8_t *") + print_hex_data(data_array[0:esize], estart, "") + except: + pass + + try: + print "------------------------------------------------------------------" + data_array = kern.GetValueFromAddress(estart + esize, "uint8_t *") + print_hex_data(data_array[0:16], estart + esize, "") + except: + pass @lldb_command('whatis') def WhatIsHelper(cmd_args=None): @@ -222,194 +418,228 @@ def WhatIsHelper(cmd_args=None): """ if not cmd_args: raise ArgumentError("No arguments passed") - addr = kern.GetValueFromAddress(cmd_args[0], 'void *') - WhatIs(addr) - print "Hexdump:\n" - try: - data_array = kern.GetValueFromAddress(unsigned(addr) - 16, "uint8_t *") - print_hex_data(data_array[0:48], unsigned(addr) - 16, "") - except: - pass - return + WhatIs(kern.GetValueFromAddress(cmd_args[0], 'void *')) # Macro: showzcache @lldb_type_summary(['zone','zone_t']) -@header("{:^18s} {:<40s} {:>10s} {:>10s} {:>10s} {:>10s}".format( -'ZONE', 'NAME', 'CACHE_ELTS', 'DEP_VALID', 'DEP_EMPTY','DEP_FULL')) +@header("{:<18s} {:>5s} {:>10s} {:>12s} {:>12s} {:>9s} {:>9s} {:>9s} {:>9s} {:>9s} {:<20s}".format( +'ZONE', 'ELTS', 'D FULL/EMPTY', 'ALLOCS', 'FREES', 'D_SWAP', 'D_FILL', 'D_DRAIN', 'D_GC', 'D_FAIL', 'NAME')) -def GetZoneCacheSummary(zone): +def GetZoneCacheSummary(zone, O): """ Summarize a zone's cache with important information. params: zone: value - obj representing a zone in kernel returns: str - summary of the zone's cache contents """ - out_string = "" - format_string = '{:#018x} {:<40s} {:>10d} {:>10s} {:>10d} {:>10d}' - cache_elem_count = 0 + format_string = '{:#018x} {:>5d} {:>4d} / {:>4d} {:>12,d} {:>12,d} {:>9,d} {:>9,d} {:>9,d} {:>9,d} {:>9,d} {:<20s}' mag_capacity = kern.GetGlobalVariable('magazine_element_count') depot_capacity = kern.GetGlobalVariable('depot_element_count') + cache_elem_count = 0 + allocs = 0 + frees = 0 if zone.__getattr__('cpu_cache_enabled') : - for i in range(0, kern.globals.machine_info.physical_cpu): - cache = zone.zcache[0].zcc_per_cpu_caches[i] + for cache in IterateZPerCPU(zone.zcache.zcc_pcpu, 'struct zcc_per_cpu_cache *'): cache_elem_count += cache.current.zcc_magazine_index cache_elem_count += cache.previous.zcc_magazine_index - - if zone.zcache[0].zcc_depot_index != -1: - cache_elem_count += zone.zcache[0].zcc_depot_index * mag_capacity - out_string += format_string.format(zone, zone.zone_name, cache_elem_count, "Y", depot_capacity - zone.zcache[0].zcc_depot_index, zone.zcache[0].zcc_depot_index) - else: - out_string += format_string.format(zone, zone.zone_name, cache_elem_count, "N", 0, 0) - - return out_string - -@lldb_command('showzcache') -def ZcachePrint(cmd_args=None): + allocs += cache.zcc_allocs + frees += cache.zcc_frees + + depot = zone.zcache.zcc_depot + cache_elem_count += depot.zcc_depot_index * mag_capacity + print O.format(format_string, zone, cache_elem_count, + depot.zcc_depot_index, depot_capacity - depot.zcc_depot_index, + allocs, frees, depot.zcc_swap, depot.zcc_fill, depot.zcc_drain, + depot.zcc_gc, depot.zcc_fail, ZoneName(zone)) + +@lldb_command('showzcache', fancy=True) +def ZcachePrint(cmd_args=None, cmd_options={}, O=None): """ Routine to print a summary listing of all the kernel zones cache contents All columns are printed in decimal """ global kern - print GetZoneCacheSummary.header - for zval in kern.zones: - if zval.__getattr__('cpu_cache_enabled') : - print GetZoneCacheSummary(zval) + with O.table(GetZoneCacheSummary.header): + for zval in kern.zones: + if zval.__getattr__('cpu_cache_enabled') : + GetZoneCacheSummary(zval, O) # EndMacro: showzcache # Macro: showzcachecpu @lldb_type_summary(['zone','zone_t']) -@header("{:^18s} {:40s} {:>10s} {:>10s}".format( +@header("{:18s} {:32s} {:<10s} {:<10s}".format( 'ZONE', 'NAME', 'CACHE_ELTS', 'CPU_INFO')) -def GetZoneCacheCPUSummary(zone): +def GetZoneCacheCPUSummary(zone, O): """ Summarize a zone's cache broken up per cpu params: zone: value - obj representing a zone in kernel returns: str - summary of the zone's per CPU cache contents """ - out_string = "" - format_string = '{:#018x} {:40s} {:10d} {cpuinfo:s}' + format_string = '{:#018x} {:32s} {:10d} {cpuinfo:s}' cache_elem_count = 0 cpu_info = "" per_cpu_count = 0 mag_capacity = kern.GetGlobalVariable('magazine_element_count') depot_capacity = kern.GetGlobalVariable('depot_element_count') - if zone.__getattr__('cpu_cache_enabled') : - for i in range(0, kern.globals.machine_info.physical_cpu): - if i != 0: + i = 0 + for cache in IterateZPerCPU(zone.zcache.zcc_pcpu, 'struct zcc_per_cpu_cache *'): + if i is not 0: cpu_info += ", " - cache = zone.zcache[0].zcc_per_cpu_caches[i] per_cpu_count = cache.current.zcc_magazine_index per_cpu_count += cache.previous.zcc_magazine_index cache_elem_count += per_cpu_count cpu_info += "CPU {:d}: {:5}".format(i,per_cpu_count) - if zone.zcache[0].zcc_depot_index != -1: - cache_elem_count += zone.zcache[0].zcc_depot_index * mag_capacity - - out_string += format_string.format(zone, zone.zone_name, cache_elem_count,cpuinfo = cpu_info) + i += 1 + cache_elem_count += zone.zcache.zcc_depot.zcc_depot_index * mag_capacity - return out_string + print O.format(format_string, zone, ZoneName(zone), cache_elem_count,cpuinfo = cpu_info) -@lldb_command('showzcachecpu') -def ZcacheCPUPrint(cmd_args=None): +@lldb_command('showzcachecpu', fancy=True) +def ZcacheCPUPrint(cmd_args=None, cmd_options={}, O=None): """ Routine to print a summary listing of all the kernel zones cache contents All columns are printed in decimal """ global kern - print GetZoneCacheCPUSummary.header - for zval in kern.zones: - if zval.__getattr__('cpu_cache_enabled') : - print GetZoneCacheCPUSummary(zval) + with O.table(GetZoneCacheCPUSummary.header): + for zval in kern.zones: + if zval.__getattr__('cpu_cache_enabled'): + GetZoneCacheCPUSummary(zval, O) # EndMacro: showzcachecpu # Macro: zprint +def GetZone(zone_val, marks): + """ Internal function which gets a phython dictionary containing important zone information. + params: + zone_val: value - obj representing a zone in kernel + returns: + zone - python dictionary with zone stats + """ + pagesize = kern.globals.page_size + zone = {} + zone["free_size"] = zone_val.countfree * zone_val.pcpu_elem_size + mag_capacity = kern.GetGlobalVariable('magazine_element_count') + zone["page_count"] = unsigned(zone_val.page_count) + zone["allfree_page_count"] = unsigned(zone_val.allfree_page_count) + + zone["size"] = zone_val.page_count * pagesize + zone["used_size"] = zone["size"] - zone["free_size"] + zone["element_count"] = zone_val.countavail - zone_val.countfree + + if zone_val.percpu: + zone["allocation_size"] = unsigned(pagesize) + zone["allocation_ncpu"] = unsigned(zone_val.alloc_pages) + else: + zone["allocation_size"] = unsigned(zone_val.alloc_pages * pagesize) + zone["allocation_ncpu"] = 1 + zone["allocation_count"] = zone["allocation_size"] / zone_val.z_elem_size + zone["allocation_waste"] = (zone["allocation_size"] % zone_val.z_elem_size) * zone["allocation_ncpu"] + + if not zone_val.__getattr__("z_self") : + zone["destroyed"] = True + else: + zone["destroyed"] = False + + for mark in marks: + if zone_val.__getattr__(mark[0]): + zone[mark[0]] = True + else: + zone[mark[0]] = False + + cache_elem_count = 0 + if zone_val.__getattr__('cpu_cache_enabled') : + for cache in IterateZPerCPU(zone_val.zcache.zcc_pcpu, 'struct zcc_per_cpu_cache *'): + cache_elem_count += cache.current.zcc_magazine_index + cache_elem_count += cache.previous.zcc_magazine_index + cache_elem_count += zone_val.zcache.zcc_depot.zcc_depot_index * mag_capacity + zone["cache_element_count"] = cache_elem_count + zone["name"] = ZoneName(zone_val) + if zone_val.exhaustible: + zone["exhaustible"] = True + else: + zone["exhaustible"] = False + + zone["sequester_page_count"] = unsigned(zone_val.sequester_page_count) + zone["page_count_max"] = unsigned(zone_val.page_count_max) + + return zone + + @lldb_type_summary(['zone','zone_t']) -@header(("{:<18s} {:_^23s} {:_^24s} {:_^13s} {:_^31s}\n"+ -"{:<18s} {:>11s} {:>11s} {:>8s} {:>7s} {:>7s} {:>6s} {:>6s} {:>7s} {:>5s} {:>3s} {:>5s} {:>7s} {:<15s} {:<20s}").format( +@header(("{:<18s} {:_^35s} {:_^24s} {:_^13s} {:_^28s}\n"+ +"{:<18s} {:>11s} {:>11s} {:>11s} {:>8s} {:>7s} {:>7s} {:>6s} {:>6s} {:>8s} {:>6s} {:>5s} {:>7s} {:<18s} {:<20s}").format( '', 'SIZE (bytes)', 'ELEMENTS (#)', 'PAGES', 'ALLOC CHUNK CONFIG', -'ZONE', 'ALLOC', 'FREE', 'ALLOC', 'FREE', 'CACHE', 'COUNT', 'FREE', 'SIZE', 'ELTS', 'PGS', 'WASTE', 'ELT_SZ', 'FLAGS', 'NAME')) -def GetZoneSummary(zone): +'ZONE', 'TOTAL', 'ALLOC', 'FREE', 'ALLOC', 'FREE', 'CACHE', 'COUNT', 'FREE', 'SIZE (P)', 'ELTS', 'WASTE', 'ELT_SZ', 'FLAGS', 'NAME')) +def GetZoneSummary(zone_val, marks, stats): """ Summarize a zone with important information. See help zprint for description of each field params: - zone: value - obj representing a zone in kernel + zone_val: value - obj representing a zone in kernel returns: str - summary of the zone """ - out_string = "" - format_string = '{zone:#018x} {zone.cur_size:11,d} {free_size:11,d} {zone.count:8,d} {zone.countfree:7,d} {cache_elem_count:7,d} {zone.page_count:6,d} {zone.count_all_free_pages:6,d} {zone.alloc_size:7,d} {alloc_count:5,d} {alloc_pages:3,d} {alloc_waste:5,d} {zone.elem_size:7,d} {markings:<15s} {zone.zone_name:<20s} ' pagesize = kern.globals.page_size + out_string = "" + zone = GetZone(zone_val, marks) - free_size = zone.countfree * zone.elem_size - mag_capacity = kern.GetGlobalVariable('magazine_element_count') - - alloc_pages = zone.alloc_size / pagesize - alloc_count = zone.alloc_size / zone.elem_size - alloc_waste = zone.alloc_size % zone.elem_size - - marks = [ - ["collectable", "C"], - ["expandable", "X"], - ["noencrypt", "$"], - ["caller_acct", "@"], - ["exhaustible", "H"], - ["allows_foreign", "F"], - ["async_prio_refill", "R"], - ["no_callout", "O"], - ["zleak_on", "L"], - ["doing_alloc_without_vm_priv", "A"], - ["doing_alloc_with_vm_priv", "S"], - ["waiting", "W"], - ["cpu_cache_enabled", "E"] - ] - if kern.arch == 'x86_64': - marks.append(["gzalloc_exempt", "M"]) - marks.append(["alignment_required", "N"]) + format_string = '{zone:#018x} {cur_size:11,d} {used_size:11,d} {free_size:11,d} ' + format_string += '{count_elts:8,d} {zone.countfree:7,d} {cache_elem_count:7,d} ' + format_string += '{zone.page_count:6,d} {zone.allfree_page_count:6,d} ' + format_string += '{alloc_size_kb:3,d}K ({zone.alloc_pages:d}) {alloc_count:6,d} {alloc_waste:5,d} {zone.pcpu_elem_size:7,d} ' + format_string += '{markings:<18s} {zone_name:<20s}' markings="" - if not zone.__getattr__("zone_valid") : + if zone["destroyed"]: markings+="I" + for mark in marks: - if zone.__getattr__(mark[0]) : - markings+=mark[1] + if zone[mark[0]]: + markings += mark[1] else: markings+=" " - cache_elem_count = 0 - if zone.__getattr__('cpu_cache_enabled') : - for i in range(0, kern.globals.machine_info.physical_cpu): - cache = zone.zcache[0].zcc_per_cpu_caches[i] - cache_elem_count += cache.current.zcc_magazine_index - cache_elem_count += cache.previous.zcc_magazine_index - if zone.zcache[0].zcc_depot_index != -1: - cache_elem_count += zone.zcache[0].zcc_depot_index * mag_capacity - out_string += format_string.format(zone=zone, free_size=free_size, alloc_count=alloc_count, - alloc_pages=alloc_pages, alloc_waste=alloc_waste, cache_elem_count=cache_elem_count, markings=markings) + alloc_size_kb = zone["allocation_size"] / 1024 + out_string += format_string.format(zone=zone_val, free_size=zone["free_size"], used_size=zone["used_size"], + cur_size=zone["size"], count_elts=zone["element_count"], cache_elem_count=zone["cache_element_count"], + alloc_count=zone["allocation_count"], alloc_size_kb=alloc_size_kb, alloc_waste=zone["allocation_waste"], + markings=markings, zone_name=zone["name"]) + + if zone["exhaustible"] : + out_string += " (max: {:d})".format(zone["page_count_max"] * pagesize) - if zone.exhaustible : - out_string += "(max: {:d})".format(zone.max_size) + if zone["sequester_page_count"] != 0 : + out_string += " (sequester: {:d})".format(zone["sequester_page_count"]) + + stats["cur_size"] += zone["size"] + stats["used_size"] += zone["used_size"] + stats["free_size"] += zone["free_size"] + stats["cur_pages"] += zone["page_count"] + stats["free_pages"] += zone["allfree_page_count"] + stats["seq_pages"] += zone["sequester_page_count"] return out_string -@lldb_command('zprint', fancy=True) +@lldb_command('zprint', "J", fancy=True) def Zprint(cmd_args=None, cmd_options={}, O=None): """ Routine to print a summary listing of all the kernel zones + usage: zprint -J + Output json All columns are printed in decimal Legend: C - collectable + D - destructible X - expandable $ - not encrypted during hibernation - @ - allocs and frees are accounted to caller process for KPRVT H - exhaustible - F - allows foreign memory (memory not allocated from zone_map) + F - allows foreign memory (memory not allocated from any zone map) M - gzalloc will avoid monitoring this zone R - will be refilled when below low water mark O - does not allow refill callout to fill zone on noblock allocation @@ -423,9 +653,53 @@ def Zprint(cmd_args=None, cmd_options={}, O=None): I - zone was destroyed and is no longer valid """ global kern - with O.table(GetZoneSummary.header): + + marks = [ + ["collectable", "C"], + ["destructible", "D"], + ["expandable", "X"], + ["noencrypt", "$"], + ["exhaustible", "H"], + ["allows_foreign", "F"], + ["prio_refill_count", "R"], + ["no_callout", "O"], + ["zleak_on", "L"], + ["expanding_no_vm_priv", "A"], + ["expanding_vm_priv", "S"], + ["waiting", "W"], + ["cpu_cache_enabled", "E"], + ["gzalloc_exempt", "M"], + ["alignment_required", "N"], + ["va_sequester", "!"] + ] + stats = { + "cur_size": 0, "used_size": 0, "free_size": 0, + "cur_pages": 0, "free_pages": 0, "seq_pages": 0 + } + + print_json = False + if "-J" in cmd_options: + print_json = True + + if print_json: + zones = [] for zval in kern.zones: - print GetZoneSummary(zval) + if zval.z_self: + zones.append(GetZone(zval, marks)) + + print json.dumps(zones) + else: + with O.table(GetZoneSummary.header): + for zval in kern.zones: + if zval.z_self: + print GetZoneSummary(zval, marks, stats) + + format_string = '{VT.Bold}{name:19s} {stats[cur_size]:11,d} {stats[used_size]:11,d} {stats[free_size]:11,d} ' + format_string += ' ' + format_string += '{stats[cur_pages]:6,d} {stats[free_pages]:6,d}{VT.EndBold} ' + format_string += '(sequester: {VT.Bold}{stats[seq_pages]:,d}{VT.EndBold})' + print O.format(format_string, name="TOTALS", filler="", stats=stats) + @xnudebug_test('test_zprint') def TestZprint(kernel_target, config, lldb_obj, isConnected ): @@ -460,12 +734,12 @@ def ShowZfreeListHeader(zone): """ scaled_factor = (unsigned(kern.globals.zp_factor) + - (unsigned(zone.elem_size) >> unsigned(kern.globals.zp_scale))) + (unsigned(zone.z_elem_size) >> unsigned(kern.globals.zp_scale))) out_str = "" out_str += "{0: <9s} {1: <12s} {2: <18s} {3: <18s} {4: <6s}\n".format('ELEM_SIZE', 'COUNT', 'NCOOKIE', 'PCOOKIE', 'FACTOR') out_str += "{0: <9d} {1: <12d} 0x{2:0>16x} 0x{3:0>16x} {4: <2d}/{5: <2d}\n\n".format( - zone.elem_size, zone.count, kern.globals.zp_nopoison_cookie, kern.globals.zp_poisoned_cookie, zone.zp_count, scaled_factor) + zone.z_elem_size, zone.countavail - zone.countfree, kern.globals.zp_nopoison_cookie, kern.globals.zp_poisoned_cookie, zone.zp_count, scaled_factor) out_str += "{0: <7s} {1: <18s} {2: <18s} {3: <18s} {4: <18s} {5: <18s} {6: <14s}\n".format( 'NUM', 'ELEM', 'NEXT', 'BACKUP', '^ NCOOKIE', '^ PCOOKIE', 'POISON (PREV)') print out_str @@ -485,7 +759,7 @@ def ShowZfreeListChain(zone, zfirst, zlimit): znext = dereference(Cast(current, 'vm_offset_t *')) znext = (unsigned(znext) ^ unsigned(kern.globals.zp_nopoison_cookie)) znext = kern.GetValueFromAddress(znext, 'vm_offset_t *') - backup_ptr = kern.GetValueFromAddress((unsigned(Cast(current, 'vm_offset_t')) + unsigned(zone.elem_size) - sizeof('vm_offset_t')), 'vm_offset_t *') + backup_ptr = kern.GetValueFromAddress((unsigned(Cast(current, 'vm_offset_t')) + unsigned(zone.z_elem_size) - sizeof('vm_offset_t')), 'vm_offset_t *') backup_val = dereference(backup_ptr) n_unobfuscated = (unsigned(backup_val) ^ unsigned(kern.globals.zp_nopoison_cookie)) p_unobfuscated = (unsigned(backup_val) ^ unsigned(kern.globals.zp_poisoned_cookie)) @@ -497,11 +771,18 @@ def ShowZfreeListChain(zone, zfirst, zlimit): if n_unobfuscated != unsigned(znext): poison_str = "INVALID" print "{0: <7d} 0x{1:0>16x} 0x{2:0>16x} 0x{3:0>16x} 0x{4:0>16x} 0x{5:0>16x} {6: <14s}\n".format( - ShowZfreeList.elts_found, unsigned(current), unsigned(znext), unsigned(backup_val), n_unobfuscated, p_unobfuscated, poison_str) + ShowZfreeList.elts_found, unsigned(current), unsigned(znext), + unsigned(backup_val), n_unobfuscated, p_unobfuscated, poison_str) if unsigned(znext) == 0: break current = Cast(znext, 'void *') +def ZoneIteratePageQueue(page): + while page.packed_address: + meta = ZoneMeta(page.packed_address, isPageIndex=True) + yield meta + page = meta.meta.zm_page_next + @static_var('elts_found',0) @static_var('last_poisoned',0) @lldb_command('showzfreelist') @@ -523,25 +804,13 @@ def ShowZfreeList(cmd_args=None): zlimit = ArgumentStringToInt(cmd_args[1]) ShowZfreeListHeader(zone) - if unsigned(zone.allows_foreign) == 1: - for free_page_meta in IterateQueue(zone.pages.any_free_foreign, 'struct zone_page_metadata *', 'pages'): + for head in [zone.pages_any_free_foreign, zone.pages_intermediate, zone.pages_all_free]: + for free_page_meta in ZoneIteratePageQueue(head): if ShowZfreeList.elts_found == zlimit: break - zfirst = kern.GetValueFromAddress(GetFreeList(free_page_meta), 'void *') - if unsigned(zfirst) != 0: + zfirst = free_page_meta.getFreeList() + if zfirst != 0: ShowZfreeListChain(zone, zfirst, zlimit) - for free_page_meta in IterateQueue(zone.pages.intermediate, 'struct zone_page_metadata *', 'pages'): - if ShowZfreeList.elts_found == zlimit: - break - zfirst = kern.GetValueFromAddress(GetFreeList(free_page_meta), 'void *') - if unsigned(zfirst) != 0: - ShowZfreeListChain(zone, zfirst, zlimit) - for free_page_meta in IterateQueue(zone.pages.all_free, 'struct zone_page_metadata *', 'pages'): - if ShowZfreeList.elts_found == zlimit: - break - zfirst = kern.GetValueFromAddress(GetFreeList(free_page_meta), 'void *') - if unsigned(zfirst) != 0: - ShowZfreeListChain(zone, zfirst, zlimit) if ShowZfreeList.elts_found == zlimit: print "Stopped at {0: ] [-V] + + Use -N to only dump the value for a given CPU number + Use -V to dump the values of the variables after their addresses + """ + + if not cmd_args: + raise ArgumentError("No arguments passed") + + cpu = None + ncpu = kern.globals.zpercpu_early_count + pcpu_base = kern.globals.percpu_base + + if "-N" in cmd_options: + cpu = unsigned(int(cmd_options["-N"])) + if cpu >= unsigned(ncpu): + raise ArgumentError("Invalid cpu {d}".format(cpu)) + + var = addressof(kern.GetGlobalVariable('percpu_slot_' + cmd_args[0])) + ty = var.GetSBValue().GetTypeName() + + r = range(0, ncpu) + if cpu: + r = range(cpu, cpu + 1) + + def PCPUSlot(pcpu_var, i): + if i == 0: + return pcpu_var + addr = unsigned(pcpu_var) + unsigned(pcpu_base.start) + (i - 1) * unsigned(pcpu_base.size) + return kern.GetValueFromAddress(addr, pcpu_var) + + with O.table("{:<4s} {:<20s}".format("CPU", "address")): + for i in r: + print O.format("{:<4d} ({:s}){:#x}", i, ty, PCPUSlot(var, i)) + + if not "-V" in cmd_options: + return + + for i in r: + with O.table("CPU {:d}".format(i)): + print dereference(PCPUSlot(var, i)) + +#EndMacro: showpcpu + def GetBtlogBacktrace(depth, zstack_record): """ Helper routine for getting a BT Log record backtrace stack. params: @@ -1229,7 +1547,7 @@ def ShowAllVMStats(cmd_args=None): hdr_format = "{:>6s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} {:<20s} {:1s}" print hdr_format.format('#ents', 'wired', 'vsize', 'rsize', 'NEW RSIZE', 'max rsize', 'internal', 'external', 'reusable', 'compressed', 'compressed', 'compressed', 'pid', 'command', '') print hdr_format.format('', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(pages)', '(current)', '(peak)', '(lifetime)', '', '', '') - entry_format = "{m.hdr.nentries: >6d} {s.wired_count: >10d} {vsize: >10d} {s.resident_count: >10d} {s.new_resident_count: >10d} {s.resident_max: >10d} {s.internal: >10d} {s.external: >10d} {s.reusable: >10d} {s.compressed: >10d} {s.compressed_peak: >10d} {s.compressed_lifetime: >10d} {p.p_pid: >10d} {p.p_comm: <20s} {s.error}" + entry_format = "{m.hdr.nentries: >6d} {s.wired_count: >10d} {vsize: >10d} {s.resident_count: >10d} {s.new_resident_count: >10d} {s.resident_max: >10d} {s.internal: >10d} {s.external: >10d} {s.reusable: >10d} {s.compressed: >10d} {s.compressed_peak: >10d} {s.compressed_lifetime: >10d} {p.p_pid: >10d} {0: <32s} {s.error}" for task in kern.tasks: proc = Cast(task.bsd_info, 'proc *') @@ -1261,7 +1579,7 @@ def ShowAllVMStats(cmd_args=None): if vmstats.new_resident_count +vmstats.reusable != vmstats.resident_count: vmstats.error += '*' - print entry_format.format(p=proc, m=vmmap, vsize=(unsigned(vmmap.size) / page_size), t=task, s=vmstats) + print entry_format.format(GetProcName(proc), p=proc, m=vmmap, vsize=(unsigned(vmmap.size) / page_size), t=task, s=vmstats) def ShowTaskVMEntries(task, show_pager_info, show_all_shadows): @@ -1317,17 +1635,17 @@ def ShowMapVME(cmd_args=None): return None @lldb_type_summary(['_vm_map *', 'vm_map_t']) -@header("{0: <20s} {1: <20s} {2: <20s} {3: >5s} {4: >5s} {5: <20s} {6: <20s}".format("vm_map", "pmap", "vm_size", "#ents", "rpage", "hint", "first_free")) +@header("{0: <20s} {1: <20s} {2: <20s} {3: >5s} {4: >5s} {5: <20s} {6: <20s} {7: <7s}".format("vm_map", "pmap", "vm_size", "#ents", "rpage", "hint", "first_free", "pgshift")) def GetVMMapSummary(vmmap): """ Display interesting bits from vm_map struct """ out_string = "" - format_string = "{0: <#020x} {1: <#020x} {2: <#020x} {3: >5d} {4: >5d} {5: <#020x} {6: <#020x}" + format_string = "{0: <#020x} {1: <#020x} {2: <#020x} {3: >5d} {4: >5d} {5: <#020x} {6: <#020x} {7: >7d}" vm_size = uint64_t(vmmap.size).value resident_pages = 0 if vmmap.pmap != 0: resident_pages = int(vmmap.pmap.stats.resident_count) first_free = 0 if int(vmmap.holelistenabled) == 0: first_free = vmmap.f_s._first_free - out_string += format_string.format(vmmap, vmmap.pmap, vm_size, vmmap.hdr.nentries, resident_pages, vmmap.hint, first_free) + out_string += format_string.format(vmmap, vmmap.pmap, vm_size, vmmap.hdr.nentries, resident_pages, vmmap.hint, first_free, vmmap.hdr.page_shift) return out_string @lldb_type_summary(['vm_map_entry']) @@ -2063,7 +2381,7 @@ def ShowProcLocks(cmd_args=None): seen = 0 while count <= fd_lastfile: if fd_ofiles[count]: - fglob = fd_ofiles[count].f_fglob + fglob = fd_ofiles[count].fp_glob fo_type = fglob.fg_ops.fo_type if fo_type == 1: fg_data = fglob.fg_data @@ -2087,13 +2405,14 @@ def ShowProcLocks(cmd_args=None): # EndMacro: showproclocks @lldb_type_summary(['vnode_t', 'vnode *']) -@header("{0: <20s} {1: >8s} {2: >8s} {3: <20s} {4: <6s} {5: <20s} {6: <6s} {7: <6s} {8: <35s}".format('vnode', 'usecount', 'iocount', 'v_data', 'vtype', 'parent', 'mapped', 'cs_version', 'name')) +@header("{0: <20s} {1: >8s} {2: >9s} {3: >8s} {4: <20s} {5: <6s} {6: <20s} {7: <6s} {8: <6s} {9: <35s}".format('vnode', 'usecount', 'kusecount', 'iocount', 'v_data', 'vtype', 'parent', 'mapped', 'cs_version', 'name')) def GetVnodeSummary(vnode): """ Get a summary of important information out of vnode """ out_str = '' - format_string = "{0: <#020x} {1: >8d} {2: >8d} {3: <#020x} {4: <6s} {5: <#020x} {6: <6s} {7: <6s} {8: <35s}" + format_string = "{0: <#020x} {1: >8d} {2: >8d} {3: >8d} {4: <#020x} {5: <6s} {6: <#020x} {7: <6s} {8: <6s} {9: <35s}" usecount = int(vnode.v_usecount) + kusecount = int(vnode.v_kusecount) iocount = int(vnode.v_iocount) v_data_ptr = int(hex(vnode.v_data), 16) vtype = int(vnode.v_type) @@ -2118,7 +2437,7 @@ def GetVnodeSummary(vnode): mapped = '1' else: mapped = '0' - out_str += format_string.format(vnode, usecount, iocount, v_data_ptr, vtype_str, parent_ptr, mapped, csblob_version, name) + out_str += format_string.format(vnode, usecount, kusecount, iocount, v_data_ptr, vtype_str, parent_ptr, mapped, csblob_version, name) return out_str @lldb_command('showallvnodes') @@ -2242,7 +2561,7 @@ def ShowProcVnodes(cmd_args=None): fpp = dereference(fpptr) fproc = kern.GetValueFromAddress(int(fpp), 'fileproc *') if int(fproc) != 0: - fglob = dereference(fproc).f_fglob + fglob = dereference(fproc).fp_glob flags = "" if (int(fglob) != 0) and (int(fglob.fg_ops.fo_type) == 1): if (fdptr.fd_ofileflags[count] & 1): flags += 'E' @@ -2811,6 +3130,45 @@ def ShowMapVME(cmd_args=None, cmd_options={}): map = kern.GetValueFromAddress(cmd_args[0], 'vm_map_t') showmapvme(map, start_vaddr, end_vaddr, show_pager_info, show_all_shadows, reverse_order, show_rb_tree) +@lldb_command("showmapcopyvme", "A:B:F:PRST") +def ShowMapCopyVME(cmd_args=None, cmd_options={}): + """Routine to print out info about the specified vm_map_copy and its vm entries + usage: showmapcopyvme [-A start] [-B end] [-S] [-P] + Use -A flag to start at virtual address + Use -B flag to end at virtual address + Use -F flag to find just the VME containing the given VA + Use -S flag to show VM object shadow chains + Use -P flag to show pager info (mapped file, compressed pages, ...) + Use -R flag to reverse order + Use -T to show red-black tree pointers + """ + if cmd_args == None or len(cmd_args) < 1: + print "Invalid argument.", ShowMapVME.__doc__ + return + show_pager_info = False + show_all_shadows = False + show_rb_tree = False + start_vaddr = 0 + end_vaddr = 0 + reverse_order = False + if "-A" in cmd_options: + start_vaddr = unsigned(int(cmd_options['-A'], 16)) + if "-B" in cmd_options: + end_vaddr = unsigned(int(cmd_options['-B'], 16)) + if "-F" in cmd_options: + start_vaddr = unsigned(int(cmd_options['-F'], 16)) + end_vaddr = start_vaddr + if "-P" in cmd_options: + show_pager_info = True + if "-S" in cmd_options: + show_all_shadows = True + if "-R" in cmd_options: + reverse_order = True + if "-T" in cmd_options: + show_rb_tree = True + map = kern.GetValueFromAddress(cmd_args[0], 'vm_map_copy_t') + showmapcopyvme(map, start_vaddr, end_vaddr, show_pager_info, show_all_shadows, reverse_order, show_rb_tree) + @lldb_command("showvmobject", "A:B:PRST") def ShowVMObject(cmd_args=None, cmd_options={}): """Routine to print out a VM object and its shadow chain @@ -2878,13 +3236,13 @@ def showmapvme(map, start_vaddr, end_vaddr, show_pager_info, show_all_shadows, r rsize = 0 if map.pmap != 0: rsize = int(map.pmap.stats.resident_count) - print "{:<18s} {:<18s} {:<18s} {:>10s} {:>18s} {:>18s}:{:<18s}".format("vm_map","pmap","size","#ents","rsize","start","end") - print "{: <#018x} {: <#018x} {:#018x} {:>10d} {:>18d} {:#018x}:{:#018x}".format(map,map.pmap,unsigned(map.size),map.hdr.nentries,rsize,map.hdr.links.start,map.hdr.links.end) + print "{:<18s} {:<18s} {:<18s} {:>10s} {:>18s} {:>18s}:{:<18s} {:<7s}".format("vm_map","pmap","size","#ents","rsize","start","end","pgshift") + print "{: <#018x} {: <#018x} {:#018x} {:>10d} {:>18d} {:#018x}:{:#018x} {:>7d}".format(map,map.pmap,unsigned(map.size),map.hdr.nentries,rsize,map.hdr.links.start,map.hdr.links.end,map.hdr.page_shift) showmaphdrvme(map.hdr, map.pmap, start_vaddr, end_vaddr, show_pager_info, show_all_shadows, reverse_order, show_rb_tree) def showmapcopyvme(mapcopy, start_vaddr=0, end_vaddr=0, show_pager_info=True, show_all_shadows=True, reverse_order=False, show_rb_tree=False): - print "{:<18s} {:<18s} {:<18s} {:>10s} {:>18s} {:>18s}:{:<18s}".format("vm_map_copy","pmap","size","#ents","rsize","start","end") - print "{: <#018x} {:#018x} {:#018x} {:>10d} {:>18d} {:#018x}:{:#018x}".format(mapcopy,0,0,mapcopy.c_u.hdr.nentries,0,mapcopy.c_u.hdr.links.start,mapcopy.c_u.hdr.links.end) + print "{:<18s} {:<18s} {:<18s} {:>10s} {:>18s} {:>18s}:{:<18s} {:<7s}".format("vm_map_copy","offset","size","#ents","rsize","start","end","pgshift") + print "{: <#018x} {:#018x} {:#018x} {:>10d} {:>18d} {:#018x}:{:#018x} {:>7d}".format(mapcopy,mapcopy.offset,mapcopy.size,mapcopy.c_u.hdr.nentries,0,mapcopy.c_u.hdr.links.start,mapcopy.c_u.hdr.links.end,mapcopy.c_u.hdr.page_shift) showmaphdrvme(mapcopy.c_u.hdr, 0, start_vaddr, end_vaddr, show_pager_info, show_all_shadows, reverse_order, show_rb_tree) def showmaphdrvme(maphdr, pmap, start_vaddr, end_vaddr, show_pager_info, show_all_shadows, reverse_order, show_rb_tree): @@ -2931,8 +3289,6 @@ def showmaphdrvme(maphdr, pmap, start_vaddr, end_vaddr, show_pager_info, show_al object_str = "IPC_KERNEL_COPY_MAP" elif object == kern.globals.kalloc_map: object_str = "KALLOC_MAP" - elif object == kern.globals.zone_map: - object_str = "ZONE_MAP" elif hasattr(kern.globals, 'compressor_map') and object == kern.globals.compressor_map: object_str = "COMPRESSOR_MAP" elif hasattr(kern.globals, 'gzalloc_map') and object == kern.globals.gzalloc_map: @@ -3081,6 +3437,7 @@ FixedTags = { 25: "VM_KERN_MEMORY_REASON", 26: "VM_KERN_MEMORY_SKYWALK", 27: "VM_KERN_MEMORY_LTABLE", + 28: "VM_KERN_MEMORY_HV", 255:"VM_KERN_MEMORY_ANY", } @@ -3104,7 +3461,7 @@ def GetVMKernName(tag): return (kern.Symbolicate(site), "") return ("", "") -@lldb_command("showvmtags", "AS") +@lldb_command("showvmtags", "ASJ") def showvmtags(cmd_args=None, cmd_options={}): """Routine to print out info about kernel wired page allocations usage: showvmtags @@ -3113,13 +3470,19 @@ def showvmtags(cmd_args=None, cmd_options={}): also iterates kernel object pages individually - slow. usage: showvmtags -A show all tags, even tags that have no wired count + usage: showvmtags -J + Output json """ slow = False + print_json = False if "-S" in cmd_options: slow = True all_tags = False if "-A" in cmd_options: all_tags = True + if "-J" in cmd_options: + print_json = True + page_size = unsigned(kern.globals.page_size) nsites = unsigned(kern.globals.vm_allocation_tag_highest) + 1 tagcounts = [0] * nsites @@ -3143,29 +3506,55 @@ def showvmtags(cmd_args=None, cmd_options={}): total = 0 totalmapped = 0 - print " vm_allocation_tag_highest: {:<7d} ".format(nsites - 1) - print " {:<7s} {:>7s} {:>7s} {:>7s} {:<50s}".format("tag.kmod", "peak", "size", "mapped", "name") + tags = [] for tag in range(nsites): if all_tags or tagcounts[tag] or tagmapped[tag]: + current = {} total += tagcounts[tag] totalmapped += tagmapped[tag] (sitestr, tagstr) = GetVMKernName(tag) - site = kern.globals.vm_allocation_sites[tag] - print " {:>3d}{:<4s} {:>7d}K {:>7d}K {:>7d}K {:<50s}".format(tag, tagstr, tagpeaks[tag] / 1024, tagcounts[tag] / 1024, tagmapped[tag] / 1024, sitestr) + current["name"] = sitestr + current["size"] = tagcounts[tag] + current["mapped"] = tagmapped[tag] + current["peak"] = tagpeaks[tag] + current["tag"] = tag + current["tagstr"] = tagstr + current["subtotals"] = [] + site = kern.globals.vm_allocation_sites[tag] for sub in range(site.subtotalscount): alloctag = unsigned(site.subtotals[sub].tag) amount = unsigned(site.subtotals[sub].total) subsite = kern.globals.vm_allocation_sites[alloctag] if alloctag and subsite: - if ((subsite.flags & 0x007f) == 0): - kind_str = "named" - else: - kind_str = "from" (sitestr, tagstr) = GetVMKernName(alloctag) - print " {:>7s} {:>7s} {:>7s} {:>7d}K {:s} {:>3d}{:<4s} {:<50s}".format(" ", " ", " ", amount / 1024, kind_str, alloctag, tagstr, sitestr) + current["subtotals"].append({ + "amount": amount, + "flags": int(subsite.flags), + "tag": alloctag, + "tagstr": tagstr, + "sitestr": sitestr, + }) + tags.append(current) + + if print_json: + print json.dumps(tags) + else: + print " vm_allocation_tag_highest: {:<7d} ".format(nsites - 1) + print " {:<7s} {:>7s} {:>7s} {:>7s} {:<50s}".format("tag.kmod", "peak", "size", "mapped", "name") + for tag in tags: + if not tagstr: + tagstr = "" + print " {:>3d}{:<4s} {:>7d}K {:>7d}K {:>7d}K {:<50s}".format(tag["tag"], tag["tagstr"], tag["peak"] / 1024, tag["size"] / 1024, tag["mapped"] / 1024, tag["name"]) + for sub in tag["subtotals"]: + if ((sub["flags"] & 0x007f) == 0): + kind_str = "named" + else: + kind_str = "from" - print "Total: {:>7d}K {:>7d}K".format(total / 1024, totalmapped / 1024) + print " {:>7s} {:>7s} {:>7s} {:>7d}K {:s} {:>3d}{:<4s} {:<50s}".format(" ", " ", " ", sub["amount"] / 1024, kind_str, sub["tag"], sub["tagstr"], sub["sitestr"]) + + print "Total: {:>7d}K {:>7d}K".format(total / 1024, totalmapped / 1024) return None @@ -3305,19 +3694,21 @@ def _vm_page_unpack_ptr(page): if page == 0 : return page - min_addr = kern.globals.vm_min_kernel_and_kext_address - ptr_shift = kern.globals.vm_packed_pointer_shift + params = kern.globals.vm_page_packing_params + ptr_shift = params.vmpp_shift ptr_mask = kern.globals.vm_packed_from_vm_pages_array_mask - #INTEL - min_addr = 0xffffff7f80000000 - #ARM - min_addr = 0x80000000 - #ARM64 - min_addr = 0xffffff8000000000 - if unsigned(page) & unsigned(ptr_mask) : + + # when no mask and shift on 64bit systems, we're working with real/non-packed pointers + if ptr_shift == 0 and ptr_mask == 0: + return page + + if unsigned(page) & unsigned(ptr_mask): masked_page = (unsigned(page) & ~ptr_mask) # can't use addressof(kern.globals.vm_pages[masked_page]) due to 32 bit limitation in SB bridge vm_pages_addr = unsigned(addressof(kern.globals.vm_pages[0])) element_size = unsigned(addressof(kern.globals.vm_pages[1])) - vm_pages_addr return (vm_pages_addr + masked_page * element_size) - return ((unsigned(page) << unsigned(ptr_shift)) + unsigned(min_addr)) + return vm_unpack_pointer(page, params) @lldb_command('calcvmpagehash') def CalcVMPageHash(cmd_args=None): @@ -3347,64 +3738,6 @@ def _calc_vm_page_hash(obj, off): return hash_id -def AddressIsFromZoneMap(addr): - zone_map_min_address = kern.GetGlobalVariable('zone_map_min_address') - zone_map_max_address = kern.GetGlobalVariable('zone_map_max_address') - if (unsigned(addr) >= unsigned(zone_map_min_address)) and (unsigned(addr) < unsigned(zone_map_max_address)): - return 1 - else: - return 0 - -def ElementOffsetInForeignPage(): - zone_element_alignment = 32 # defined in zalloc.c - zone_page_metadata_size = sizeof('struct zone_page_metadata') - if zone_page_metadata_size % zone_element_alignment == 0: - offset = zone_page_metadata_size - else: - offset = zone_page_metadata_size + (zone_element_alignment - (zone_page_metadata_size % zone_element_alignment)) - return unsigned(offset) - -def ElementStartAddrFromZonePageMetadata(page_metadata): - zone_metadata_region_min = kern.GetGlobalVariable('zone_metadata_region_min') - zone_map_min_address = kern.GetGlobalVariable('zone_map_min_address') - page_size = kern.GetGlobalVariable('page_size') - if AddressIsFromZoneMap(page_metadata): - page_index = (unsigned(page_metadata) - unsigned(zone_metadata_region_min)) / sizeof('struct zone_page_metadata') - element_start_addr = unsigned(zone_map_min_address) + unsigned(page_index * page_size) - else: - element_start_addr = unsigned(page_metadata) + unsigned(ElementOffsetInForeignPage()) - - return element_start_addr - -def ZonePageStartAddrFromZonePageMetadata(page_metadata): - zone_metadata_region_min = kern.GetGlobalVariable('zone_metadata_region_min') - zone_map_min_address = kern.GetGlobalVariable('zone_map_min_address') - page_size = kern.GetGlobalVariable('page_size') - - if AddressIsFromZoneMap(page_metadata): - page_index = (unsigned(page_metadata) - unsigned(zone_metadata_region_min)) / sizeof('struct zone_page_metadata') - zone_page_addr = unsigned(zone_map_min_address) + unsigned(page_index * page_size) - else: - zone_page_addr = unsigned(page_metadata) - - return unsigned(zone_page_addr) - -def CreateFreeElementsList(zone, first_free): - free_elements = [] - if unsigned(first_free) == 0: - return free_elements - current = first_free - while True: - free_elements.append(unsigned(current)) - next = dereference(Cast(current, 'vm_offset_t *')) - next = (unsigned(next) ^ unsigned(kern.globals.zp_nopoison_cookie)) - next = kern.GetValueFromAddress(next, 'vm_offset_t *') - if unsigned(next) == 0: - break; - current = Cast(next, 'void *') - - return free_elements - #Macro: showallocatedzoneelement @lldb_command('showallocatedzoneelement') def ShowAllocatedElementsInZone(cmd_args=None, cmd_options={}): @@ -3424,43 +3757,25 @@ def ShowAllocatedElementsInZone(cmd_args=None, cmd_options={}): #EndMacro: showallocatedzoneelement def FindAllocatedElementsInZone(zone): - page_size = kern.GetGlobalVariable('page_size') elements = [] - page_queues = ["any_free_foreign", "intermediate", "all_used"] - found_total = 0 - for queue in page_queues: - found_in_queue = 0 - if queue == "any_free_foreign" and unsigned(zone.allows_foreign) != 1: - continue + if not zone.z_self or zone.permanent: + return elements - for zone_page_metadata in IterateQueue(zone.pages.__getattr__(queue), 'struct zone_page_metadata *', 'pages'): - free_elements = [] - first_free_element = kern.GetValueFromAddress(GetFreeList(zone_page_metadata)) - free_elements = CreateFreeElementsList(zone, first_free_element) - - chunk_page_count = zone_page_metadata.page_count - element_addr_start = ElementStartAddrFromZonePageMetadata(zone_page_metadata) - zone_page_start = ZonePageStartAddrFromZonePageMetadata(zone_page_metadata) - next_page = zone_page_start + page_size - element_addr_end = zone_page_start + (chunk_page_count * page_size) - elem = unsigned(element_addr_start) - while elem < element_addr_end: - if elem not in free_elements: - elements.append(elem) - found_in_queue += 1 - elem += zone.elem_size + for head in [zone.pages_any_free_foreign, zone.pages_all_used_foreign, + zone.pages_intermediate, zone.pages_all_used]: - if queue == "any_free_foreign": - if (elem + zone.elem_size) >= next_page: - zone_page_start = unsigned((elem + page_size) & ~(page_size - 1)) - next_page = zone_page_start + page_size - elem = zone_page_start + unsigned(ElementOffsetInForeignPage()) + for meta in ZoneIteratePageQueue(head): + free_elements = set(meta.iterateFreeList()) - found_total += found_in_queue -# print "Found {0: " -def vm_min_kernel_and_kext_address(cmd_args=None): - if hasattr(kern.globals, 'vm_min_kernel_and_kext_address'): - return unsigned(kern.globals.vm_min_kernel_and_kext_address) - elif kern.arch == 'x86_64': - return unsigned(0xffffff7f80000000) - elif kern.arch == 'arm64': - return unsigned(0xffffff8000000000) - elif kern.arch == 'arm': - return unsigned(0x80000000) - else: - print "vm_min_kernel_and_kext_address(): unknown arch '{:s}'".format(kern.arch) - return unsigned(0) - def print_hex_data(data, begin_offset=0, desc=""): """ print on stdout "hexdump -C < data" like output params: @@ -4265,7 +4571,8 @@ def print_hex_data(data, begin_offset=0, desc=""): print "{:08x} {: <50s} |{: <16s}|".format(begin_offset + index - 16, hex_buf, char_buf) hex_buf = "" char_buf = "" - print "{:08x} {: <50s} |{: <16s}|".format(begin_offset + index - 16, hex_buf, char_buf) + if index % 16 != 0: + print "{:08x} {: <50s} |{: <16s}|".format(begin_offset + index - 16, hex_buf, char_buf) return @lldb_command('vm_scan_all_pages') @@ -4381,7 +4688,7 @@ def ShowAllVMNamedEntries(cmd_args=None): print 'vm_named_entry_list:{: <#018x} vm_named_entry_count:{:d}\n'.format(kern.GetLoadAddressForSymbol('vm_named_entry_list'),queue_len) - print '{:>6s} {:<6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:>3s} {:18s} {:>6s} {:<20s}\n'.format("#","#","object","P","refcnt","size (pages)","resid","wired","compressed","tag","owner","pid","process") +# print '{:>6s} {:<6s} {:18s} {:1s} {:>6s} {:>16s} {:>10s} {:>10s} {:>10s} {:>3s} {:18s} {:>6s} {:<20s}\n'.format("#","#","object","P","refcnt","size (pages)","resid","wired","compressed","tag","owner","pid","process") idx = 0 for entry in IterateQueue(queue_head, 'struct vm_named_entry *', 'named_entry_list'): idx += 1 @@ -4412,8 +4719,10 @@ def showmemoryentry(entry, idx=0, queue_len=0): backing += "SUBMAP" if entry.is_copy == 1: backing += "COPY" - if entry.is_sub_map == 0 and entry.is_copy == 0: + if entry.is_object == 1: backing += "OBJECT" + if entry.is_sub_map == 0 and entry.is_copy == 0 and entry.is_object == 0: + backing += "***?***" prot="" if entry.protection & 0x1: prot += "r" @@ -4432,13 +4741,16 @@ def showmemoryentry(entry, idx=0, queue_len=0): extra_str += " alias={:d}".format(entry.named_entry_alias) if hasattr(entry, 'named_entry_port'): extra_str += " port={:#016x}".format(entry.named_entry_port) - print "{:>6d}/{:<6d} {: <#018x} ref={:d} prot={:d}/{:s} type={:s} backing={: <#018x} offset={:#016x} dataoffset={:#016x} size={:#016x}{:s}\n".format(idx,queue_len,entry,entry.ref_count,entry.protection,prot,backing,entry.backing.object,entry.offset,entry.data_offset,entry.size,extra_str) + print "{:d}/{:d} {: <#018x} ref={:d} prot={:d}/{:s} type={:s} backing={: <#018x} offset={:#016x} dataoffset={:#016x} size={:#016x}{:s}\n".format(idx,queue_len,entry,entry.ref_count,entry.protection,prot,backing,entry.backing.copy,entry.offset,entry.data_offset,entry.size,extra_str) if entry.is_sub_map == 1: showmapvme(entry.backing.map, 0, 0, show_pager_info, show_all_shadows) - if entry.is_copy == 1: + elif entry.is_copy == 1: showmapcopyvme(entry.backing.copy, 0, 0, show_pager_info, show_all_shadows, 0) - if entry.is_sub_map == 0 and entry.is_copy == 0: - showvmobject(entry.backing.object, entry.offset, entry.size, show_pager_info, show_all_shadows) + elif entry.is_object == 1: + showmapcopyvme(entry.backing.copy, 0, 0, show_pager_info, show_all_shadows, 0) + else: + print "***** UNKNOWN TYPE *****" + print " \n" def IterateRBTreeEntry2(element, element_type, field_name1, field_name2): @@ -4543,6 +4855,28 @@ def ShowTaskOwnedObjects(cmd_args=None, cmd_options={}): task = kern.GetValueFromAddress(cmd_args[0], 'task *') ShowTaskOwnedVmObjects(task, showonlytagged) +@lldb_command('showdeviceinfo', 'J') +def ShowDeviceInfo(cmd_args=None, cmd_options={}): + """ Routine to show basic device information (model, build, ncpus, etc...) + Usage: memstats [-J] + -J : Output json + """ + print_json = False + if "-J" in cmd_options: + print_json = True + device_info = {} + device_info["build"] = str(kern.globals.osversion) + device_info["memoryConfig"] = int(kern.globals.max_mem_actual) + device_info["ncpu"] = int(kern.globals.ncpu) + device_info["pagesize"] = int(kern.globals.page_size) + device_info["mlockLimit"] = long(kern.globals.vm_global_user_wire_limit) + + + if print_json: + print json.dumps(device_info) + else: + PrettyPrintDictionary(device_info) + def ShowTaskOwnedVmObjects(task, showonlytagged=False): """ Routine to print out a summary listing of all the entries in a vm_map params: @@ -4620,11 +4954,13 @@ def GetProcNameForObjectOwner(owner): def GetDescForNamedEntry(mem_entry): out_str = "\n" - out_str += "\t\tmem_entry {:#08x} ref:{:d} offset:{:#08x} size:{:#08x} prot{:d} backing {:#08x}".format(mem_entry, mem_entry.ref_count, mem_entry.offset, mem_entry.size, mem_entry.protection, mem_entry.backing.object) + out_str += "\t\tmem_entry {:#08x} ref:{:d} offset:{:#08x} size:{:#08x} prot{:d} backing {:#08x}".format(mem_entry, mem_entry.ref_count, mem_entry.offset, mem_entry.size, mem_entry.protection, mem_entry.backing.copy) if mem_entry.is_sub_map: out_str += " is_sub_map" elif mem_entry.is_copy: out_str += " is_copy" - else: + elif mem_entry.is_object: out_str += " is_object" + else: + out_str += " ???" return out_str diff --git a/tools/lldbmacros/misc.py b/tools/lldbmacros/misc.py index 414a4e11d..5a0ef1128 100755 --- a/tools/lldbmacros/misc.py +++ b/tools/lldbmacros/misc.py @@ -81,42 +81,81 @@ def showMCAstate(cmd_args=None): print lldb_run_command('p/x *(x86_saved_state_t *) ' + hex(reg)) cpu = cpu + 1 -def dumpTimerList(anchor): +def dumpTimerList(mpqueue): """ Utility function to dump the timer entries in list (anchor). + anchor is a struct mpqueue_head. """ - entry = Cast(anchor.head, 'queue_t') - if entry == addressof(anchor): + + if mpqueue.count == 0: print '(empty)' return - thdr = ' {:<22s}{:<17s}{:<16s} {:<14s} {:<18s}' - print thdr.format('entry:','deadline','soft_deadline','to go','(*func)(param0,param1') - while entry != addressof(anchor): - timer_call = Cast(entry, 'timer_call_t') - call_entry = Cast(entry, 'struct call_entry *') + thdr = ' {:<24s}{:<17s}{:<16s} {:<14s} {:<18s} count: {:d} ' + tval = ' {:#018x}: {:16d} {:16d} {:s}{:3d}.{:09d} ({:#018x})({:#018x}, {:#018x}) ({:s}) {:s}' + + print thdr.format('Entry', 'Deadline', 'soft_deadline', 'Secs To Go', '(*func)(param0, param1)', mpqueue.count) + + for timer_call in ParanoidIterateLinkageChain(mpqueue.head, 'struct timer_call *', 'tc_qlink'): recent_timestamp = GetRecentTimestamp() - if (recent_timestamp < call_entry.deadline): + if (recent_timestamp < timer_call.tc_pqlink.deadline): delta_sign = ' ' - timer_fire = call_entry.deadline - recent_timestamp + timer_fire = timer_call.tc_pqlink.deadline - recent_timestamp else: delta_sign = '-' - timer_fire = recent_timestamp - call_entry.deadline + timer_fire = recent_timestamp - timer_call.tc_pqlink.deadline + + func_name = kern.Symbolicate(timer_call.tc_func) + + extra_string = "" + + strip_func = kern.StripKernelPAC(unsigned(timer_call.tc_func)) + + func_syms = kern.SymbolicateFromAddress(strip_func) + # returns an array of SBSymbol + + if func_syms and func_syms[0] : + func_sym = func_syms[0] + func_name = func_sym.GetName() + try : + + if "thread_call_delayed_timer" in func_name : + group = Cast(timer_call.tc_param0, 'struct thread_call_group *') + flavor = Cast(timer_call.tc_param1, 'thread_call_flavor_t') - func_name = kern.Symbolicate(call_entry.func) + # There's got to be a better way to stringify the enum + flavorname = str(flavor).partition(" = ")[2] - tval = ' {:#018x}: {:16d} {:16d} {:s}{:3d}.{:09d} ({:#018x})({:#018x},{:#018x}) ({:s})' - print tval.format(entry, - call_entry.deadline, - timer_call.soft_deadline, + extra_string += "{:s} {:s}".format(group.tcg_name, flavorname) + + if "thread_timer_expire" in func_name : + thread = Cast(timer_call.tc_param0, 'thread_t') + + tid = thread.thread_id + name = GetThreadName(thread) + pid = GetProcPIDForTask(thread.task) + procname = GetProcNameForTask(thread.task) + + extra_string += "thread: 0x{:x} {:s} task:{:s}[{:d}]".format( + tid, name, procname, pid) + except: + print "exception generating extra_string for call: {:#018x}".format(timer_call) + if dumpTimerList.enable_debug : + raise + + tval = ' {:#018x}: {:16d} {:16d} {:s}{:3d}.{:09d} ({:#018x})({:#018x},{:#018x}) ({:s}) {:s}' + print tval.format(timer_call, + timer_call.tc_pqlink.deadline, + timer_call.tc_soft_deadline, delta_sign, timer_fire/1000000000, timer_fire%1000000000, - call_entry.func, - call_entry.param0, - call_entry.param1, - func_name) - entry = entry.next + timer_call.tc_func, + timer_call.tc_param0, + timer_call.tc_param1, + func_name, extra_string) + +dumpTimerList.enable_debug = False def GetCpuDataForCpuID(cpu_id): """ @@ -141,8 +180,8 @@ def longtermTimers(cmd_args=None): lt = kern.globals.timer_longterm ltt = lt.threshold - EndofAllTime = -1 - if ltt.interval == EndofAllTime: + EndofAllTime = long(-1) + if long(ltt.interval) == EndofAllTime: print "Longterm timers disabled" return @@ -158,17 +197,17 @@ def longtermTimers(cmd_args=None): print ' enqueues/escalates : {:d}' .format(ratio) print ' threshold.interval : {:d}' .format(ltt.interval) print ' threshold.margin : {:d}' .format(ltt.margin) - print ' scan_time : {:d}' .format(lt.scan_time) - if ltt.preempted == EndofAllTime: + print ' scan_time : {:#018x} ({:d})'.format(lt.scan_time, lt.scan_time) + if long(ltt.preempted) == EndofAllTime: print ' threshold.preempted : None' else: - print ' threshold.preempted : {:d}' .format(ltt.preempted) - if ltt.deadline == EndofAllTime: + print ' threshold.preempted : {:#018x} ({:d})'.format(ltt.preempted, ltt.preempted) + if long(ltt.deadline) == EndofAllTime: print ' threshold.deadline : None' else: - print ' threshold.deadline : {:d}' .format(ltt.deadline) + print ' threshold.deadline : {:#018x} ({:d})'.format(ltt.deadline, ltt.deadline) print ' threshold.call : {:#018x}'.format(ltt.call) - print ' actual deadline set : {:d}' .format(ltt.deadline_set) + print ' actual deadline set : {:#018x} ({:d})'.format(ltt.deadline_set, ltt.deadline_set) print ' threshold.scans : {:d}' .format(ltt.scans) print ' threshold.preempts : {:d}' .format(ltt.preempts) print ' threshold.latency : {:d}' .format(ltt.latency) @@ -183,26 +222,32 @@ def processorTimers(cmd_args=None): Print details of processor timers, noting anything suspicious Also include long-term timer details """ - hdr = '{:<32s}{:<18s} {:<18s} {:<18s}' - print hdr.format('Processor','Last dispatch','Next deadline','difference') + hdr = '{:15s}{:<18s} {:<18s} {:<18s} {:<18s}' + print hdr.format('Processor #', 'Processor pointer', 'Last dispatch', 'Next deadline', 'Difference') + print "=" * 82 p = kern.globals.processor_list + EndOfAllTime = long(-1) while p: cpu = p.cpu_id cpu_data = GetCpuDataForCpuID(cpu) rt_timer = cpu_data.rtclock_timer - diff = p.last_dispatch - rt_timer.deadline - tmr = 'Processor {:d}: {:#018x} {:#018x} {:#018x} {:#018x} {:s}' + diff = long(rt_timer.deadline) - long(p.last_dispatch) + valid_deadline = long(rt_timer.deadline) != EndOfAllTime + tmr = 'Processor {:<3d}: {:#018x} {:#018x} {:18s} {:18s} {:s}' print tmr.format(cpu, p, p.last_dispatch, - rt_timer.deadline, - diff, - ['probably BAD', '(ok)'][int(diff < 0)]) - if kern.arch == 'x86_64': - print 'Next deadline set at: {:#018x}. Timer call list:'.format(rt_timer.when_set) - dumpTimerList(rt_timer.queue) + "{:#018x}".format(rt_timer.deadline) if valid_deadline else "None", + "{:#018x}".format(diff) if valid_deadline else "N/A", + ['(PAST DEADLINE)', '(ok)'][int(diff > 0)] if valid_deadline else "") + if valid_deadline: + if kern.arch == 'x86_64': + print 'Next deadline set at: {:#018x}. Timer call list:'.format(rt_timer.when_set) + dumpTimerList(rt_timer.queue) p = p.processor_list + print "-" * 82 longtermTimers() + ShowRunningTimers() @lldb_command('showtimerwakeupstats') @@ -217,7 +262,7 @@ def showTimerWakeupStats(cmd_args=None): print dereference(task) print '{:d}({:s}), terminated thread timer wakeups: {:d} {:d} 2ms: {:d} 5ms: {:d} UT: {:d} ST: {:d}'.format( proc.p_pid, - proc.p_comm, + GetProcName(proc), # Commented-out references below to be addressed by rdar://13009660. 0, #task.task_interrupt_wakeups, 0, #task.task_platform_idle_wakeups, @@ -247,6 +292,27 @@ def showTimerWakeupStats(cmd_args=None): print 'Task total wakeups: {:d} {:d}'.format( tot_wakes, tot_platform_wakes) +@lldb_command('showrunningtimers') +def ShowRunningTimers(cmd_args=None): + """ + Print the state of all running timers. + + Usage: showrunningtimers + """ + pset = addressof(kern.globals.pset0) + processor_array = kern.globals.processor_array + + i = 0 + while processor_array[i] != 0: + processor = processor_array[i] + print('{}: {}'.format( + i, 'on' if processor.running_timers_active else 'off')) + print('\tquantum: {}'.format( + unsigned(processor.running_timers[0].tc_pqlink.deadline))) + print('\tkperf: {}'.format( + unsigned(processor.running_timers[1].tc_pqlink.deadline))) + i += 1 + def DoReadMsr64(msr_address, lcpu): """ Read a 64-bit MSR from the specified CPU Params: @@ -389,45 +455,6 @@ def WriteMsr64(cmd_args=None): if not DoWriteMsr64(msr_address, lcpu, write_val): print "writemsr64 FAILED" -def GetEVFlags(debug_arg): - """ Return the EV Flags for the given kernel debug arg value - params: - debug_arg - value from arg member of kernel debug buffer entry - returns: - str - string representing the EV Flag for given input arg value - """ - out_str = "" - if debug_arg & 1: - out_str += "EV_RE " - if debug_arg & 2: - out_str += "EV_WR " - if debug_arg & 4: - out_str += "EV_EX " - if debug_arg & 8: - out_str += "EV_RM " - if debug_arg & 0x00100: - out_str += "EV_RBYTES " - if debug_arg & 0x00200: - out_str += "EV_WBYTES " - if debug_arg & 0x00400: - out_str += "EV_RCLOSED " - if debug_arg & 0x00800: - out_str += "EV_RCONN " - if debug_arg & 0x01000: - out_str += "EV_WCLOSED " - if debug_arg & 0x02000: - out_str += "EV_WCONN " - if debug_arg & 0x04000: - out_str += "EV_OOB " - if debug_arg & 0x08000: - out_str += "EV_FIN " - if debug_arg & 0x10000: - out_str += "EV_RESET " - if debug_arg & 0x20000: - out_str += "EV_TIMEOUT " - - return out_str - def GetKernelDebugBufferEntry(kdbg_entry): """ Extract the information from given kernel debug buffer entry and return the summary params: @@ -545,89 +572,11 @@ def GetKernelDebugBufferEntry(kdbg_entry): out_str += " {:>#5x} {:>8d} ".format(kdebug_subclass, kdebug_code) # space for debugid-specific processing - # EVPROC from bsd/kern/sys_generic.c - # MISCDBG_CODE(DBG_EVENT,DBG_WAIT) - if debugid == 0x14100048: - code_info_str += "waitevent " - if kdebug_arg1 == 1: - code_info_str += "before sleep" - elif kdebug_arg1 == 2: - code_info_str += "after sleep" - else: - code_info_str += "????????????" - code_info_str += " chan={:#08x} ".format(kdebug_arg2) - elif debugid == 0x14100049: - # MISCDBG_CODE(DBG_EVENT,DBG_WAIT|DBG_FUNC_START) - code_info_str += "waitevent " - elif debugid == 0x1410004a: - # MISCDBG_CODE(DBG_EVENT,DBG_WAIT|DBG_FUNC_END) - code_info_str += "waitevent error={:d} ".format(kdebug_arg1) - code_info_str += "eqp={:#08x} ".format(kdebug_arg4) - code_info_str += GetEVFlags(kdebug_arg3) - code_info_str += "er_handle={:d} ".format(kdebug_arg2) - elif debugid == 0x14100059: - # MISCDBG_CODE(DBG_EVENT,DBG_DEQUEUE|DBG_FUNC_START) - code_info_str += "evprocdeque proc={:#08x} ".format(kdebug_arg1) - if kdebug_arg2 == 0: - code_info_str += "remove first " - else: - code_info_str += "remove {:#08x} ".format(kdebug_arg2) - elif debugid == 0x1410005a: - # MISCDBG_CODE(DBG_EVENT,DBG_DEQUEUE|DBG_FUNC_END) - code_info_str += "evprocdeque " - if kdebug_arg1 == 0: - code_info_str += "result=NULL " - else: - code_info_str += "result={:#08x} ".format(kdebug_arg1) - elif debugid == 0x14100041: - # MISCDBG_CODE(DBG_EVENT,DBG_POST|DBG_FUNC_START) - code_info_str += "postevent " - code_info_str += GetEVFlags(kdebug_arg1) - elif debugid == 0x14100040: - # MISCDBG_CODE(DBG_EVENT,DBG_POST) - code_info_str += "postevent " - code_info_str += "evq={:#08x} ".format(kdebug_arg1) - code_info_str += "er_eventbits=" - code_info_str += GetEVFlags(kdebug_arg2) - code_info_str +="mask=" - code_info_str += GetEVFlags(kdebug_arg3) - elif debugid == 0x14100042: - # MISCDBG_CODE(DBG_EVENT,DBG_POST|DBG_FUNC_END) - code_info_str += "postevent " - elif debugid == 0x14100055: - # MISCDBG_CODE(DBG_EVENT,DBG_ENQUEUE|DBG_FUNC_START) - code_info_str += "evprocenque eqp={:#08x} ".format(kdebug_arg1) - if kdebug_arg2 & 1: - code_info_str += "EV_QUEUED " - code_info_str += GetEVFlags(kdebug_arg3) - elif debugid == 0x14100050: - # MISCDBG_CODE(DBG_EVENT,DBG_EWAKEUP) - code_info_str += "evprocenque before wakeup eqp={:#08x} ".format(kdebug_arg4) - elif debugid == 0x14100056: - # MISCDBG_CODE(DBG_EVENT,DBG_ENQUEUE|DBG_FUNC_END) - code_info_str += "evprocenque " - elif debugid == 0x1410004d: - # MISCDBG_CODE(DBG_EVENT,DBG_MOD|DBG_FUNC_START) - code_info_str += "modwatch " - elif debugid == 0x1410004c: - # MISCDBG_CODE(DBG_EVENT,DBG_MOD) - code_info_str += "modwatch er_handle={:d} ".format(kdebug_arg1) - code_info_str += GetEVFlags(kdebug_arg2) - code_info_str += "evq={:#08x} ", kdebug_arg3 - elif debugid == 0x1410004e: - # MISCDBG_CODE(DBG_EVENT,DBG_MOD|DBG_FUNC_END) - code_info_str += "modwatch er_handle={:d} ".format(kdebug_arg1) - code_info_str += "ee_eventmask=" - code_info_str += GetEVFlags(kdebug_arg2) - code_info_str += "sp={:#08x} ".format(kdebug_arg3) - code_info_str += "flag=" - code_info_str += GetEVFlags(kdebug_arg4) - else: - code_info_str += "arg1={:#010x} ".format(kdebug_arg1) - code_info_str += "arg2={:#010x} ".format(kdebug_arg2) - code_info_str += "arg3={:#010x} ".format(kdebug_arg3) - code_info_str += "arg4={:#010x} ".format(kdebug_arg4) - + code_info_str += "arg1={:#010x} ".format(kdebug_arg1) + code_info_str += "arg2={:#010x} ".format(kdebug_arg2) + code_info_str += "arg3={:#010x} ".format(kdebug_arg3) + code_info_str += "arg4={:#010x} ".format(kdebug_arg4) + # finish up out_str += "{:<25s}\n".format(code_info_str) return out_str @@ -1006,6 +955,19 @@ def DumpRawTraceFile(cmd_args=[], cmd_options={}): return +def GetTimebaseInfo(): + try: + tb = kern.GetValueFromAddress( + 'RTClockData', '_rtclock_data_').rtc_timebase_const + numer = tb.numer + denom = tb.denom + except NameError: + # Intel -- use the 1-1 timebase. + numer = 1 + denom = 1 + return numer, denom + + def PrintIteratedElem(i, elem, elem_type, do_summary, summary, regex): try: if do_summary and summary: diff --git a/tools/lldbmacros/net.py b/tools/lldbmacros/net.py index c7777f86b..1fc105bdf 100755 --- a/tools/lldbmacros/net.py +++ b/tools/lldbmacros/net.py @@ -54,8 +54,8 @@ def ShowIfConfiguration(ifnet): out_string += "\n\t(struct ifnet *)" + hex(ifnet) if iface.if_snd.ifcq_len : out_string += "\n\t" + str(iface.if_snd.ifcq_len) - if dlifnet.dl_if_inpstorage.rcvq_pkts.qlen : - out_string += "\n\t" + str(dlifnet.dl_if_inpstorage.rcvq_pkts.qlen) + if dlifnet.dl_if_inpstorage.dlth_pkts.qlen : + out_string += "\n\t" + str(dlifnet.dl_if_inpstorage.dlth_pkts.qlen) print out_string def GetIfConfiguration(ifname): @@ -325,7 +325,7 @@ def ShowDlilIfnetConfiguration(dlil_ifnet, show_all) : if show_all : out_string += GetIfaddrs(iface) out_string += "\n" - print out_string + print out_string # Macro: showifnets @lldb_command('showifnets') @@ -526,6 +526,17 @@ def GetUnixDomainSocketAsString(sock) : out_string += "unp_addr: " + GetSocketAddrAsStringUnix(pcb.unp_addr) return out_string +def GetVsockSocketAsString(sock) : + out_string = "" + pcb = Cast(sock.so_pcb, 'vsockpcb *') + if (pcb == 0): + out_string += "vsockpcb: (null) " + else: + out_string += "vsockpcb: " + hex(pcb) + " " + out_string += str(pcb.local_address) + " " + out_string += str(pcb.remote_address) + return out_string + def GetSocket(socket) : """ Show the contents of a socket """ @@ -543,6 +554,8 @@ def GetSocket(socket) : out_string += GetIPv4SocketAsString(so) if (domain.dom_family == 30): out_string += GetIPv6SocketAsString(so) + if (domain.dom_family == 40): + out_string += GetVsockSocketAsString(so) out_string += " s=" + str(int(so.so_snd.sb_cc)) + " r=" + str(int(so.so_rcv.sb_cc)) + " usecnt=" + str(int(so.so_usecount)) + "] " else: out_string += "(null)" @@ -572,6 +585,8 @@ def ShowSocket(cmd_args=None) : out_string += GetIPv4SocketAsString(so) if (domain.dom_family == 30): out_string += GetIPv6SocketAsString(so) + if (domain.dom_family == 40): + out_string += GetVsockSocketAsString(so) print out_string else: print "Unknown value passed as argument." @@ -598,8 +613,8 @@ def GetProcSockets(proc, total_snd_cc, total_rcv_cc): proc_lastfile = unsigned(proc_filedesc.fd_lastfile) if proc_filedesc.fd_nfiles != 0: while count <= proc_lastfile: - if (unsigned(proc_ofiles[count]) != 0 and proc_ofiles[count].f_fglob != 0): - fg = proc_ofiles[count].f_fglob + if (unsigned(proc_ofiles[count]) != 0 and proc_ofiles[count].fp_glob != 0): + fg = proc_ofiles[count].fp_glob if (int(fg.fg_ops.fo_type) == 2): if (proc_filedesc.fd_ofileflags[count] & 4): out_string += "U: " @@ -1785,6 +1800,29 @@ def Getntohs(port): p |= ((port & 0x000000ff) << 8) return str(p) + +# Macro: mbuf_list_usage_summary +@lldb_command('mbuf_list_usage_summary') +def ShowMbufListUsageSummary(cmd_args=None): + """ Print mbuf list usage summary + """ + out_string = "" + pkt_cnt = [0] + buf_byte_cnt = [0] * (Mbuf_Type.MT_LAST + 1) + mbuf_cnt = [0] + mbuf_cluster_cnt = [0] + + mpkt = kern.GetValueFromAddress(cmd_args[0], 'struct mbuf *') + CalcMbufInList(mpkt, pkt_cnt, buf_byte_cnt, mbuf_cnt, mbuf_cluster_cnt) + + out_string += "Total packet count is " + str(int(pkt_cnt[0])) + "\n" + for x in range(Mbuf_Type.MT_LAST): + if (buf_byte_cnt[x] != 0): + out_string += "Total buf bytes of type " + Mbuf_Type.reverse_mapping[x] + " : " + str(int(buf_byte_cnt[x])) + "\n" + out_string += "Total mbuf count " + str(int(mbuf_cnt[0])) + "\n" + out_string += "Total mbuf cluster count " + str(int(mbuf_cluster_cnt[0])) + "\n" + print out_string + # Macro: show_kern_event_pcbinfo def GetKernEventPcbInfo(kev_pcb_head): out_string = "" diff --git a/tools/lldbmacros/pmap.py b/tools/lldbmacros/pmap.py index 8bb7134fc..38f230ca7 100755 --- a/tools/lldbmacros/pmap.py +++ b/tools/lldbmacros/pmap.py @@ -748,191 +748,140 @@ ARM64_TTE_SIZE = 8 ARM64_TTE_SHIFT = 3 ARM64_VMADDR_BITS = 48 -def PmapBlockOffsetMaskARM64(level): - assert level >= 1 and level <= 3 - page_size = kern.globals.arm_hardware_page_size +def PmapBlockOffsetMaskARM64(page_size, level): + assert level >= 0 and level <= 3 ttentries = (page_size / ARM64_TTE_SIZE) return page_size * (ttentries ** (3 - level)) - 1 -def PmapBlockBaseMaskARM64(level): - assert level >= 1 and level <= 3 - page_size = kern.globals.arm_hardware_page_size - return ((1 << ARM64_VMADDR_BITS) - 1) & ~PmapBlockOffsetMaskARM64(level) +def PmapBlockBaseMaskARM64(page_size, level): + assert level >= 0 and level <= 3 + return ((1 << ARM64_VMADDR_BITS) - 1) & ~PmapBlockOffsetMaskARM64(page_size, level) -def PmapIndexMaskARM64(level): - assert level >= 1 and level <= 3 - page_size = kern.globals.arm_hardware_page_size - ttentries = (page_size / ARM64_TTE_SIZE) - return page_size * (ttentries ** (3 - level) * (ttentries - 1)) - -def PmapIndexDivideARM64(level): - assert level >= 1 and level <= 3 - page_size = kern.globals.arm_hardware_page_size - ttentries = (page_size / ARM64_TTE_SIZE) - return page_size * (ttentries ** (3 - level)) - -def PmapTTnIndexARM64(vaddr, level): - assert(type(vaddr) in (long, int)) - assert_64bit(vaddr) - - return (vaddr & PmapIndexMaskARM64(level)) // PmapIndexDivideARM64(level) - -def PmapDecodeTTEARM64(tte, level): +def PmapDecodeTTEARM64(tte, level, stage2 = False): """ Display the bits of an ARM64 translation table or page table entry in human-readable form. tte: integer value of the TTE/PTE level: translation table level. Valid values are 1, 2, or 3. """ - assert(type(tte) == long) assert(type(level) == int) assert_64bit(tte) - if tte & 0x1 == 0x1: - if (tte & 0x2 == 0x2) and (level != 0x3): - print "Type = Table pointer." - print "Table addr = {:#x}.".format(tte & 0xfffffffff000) + if tte & 0x1 == 0x0: + print("Invalid.") + return + + if (tte & 0x2 == 0x2) and (level != 0x3): + print "Type = Table pointer." + print "Table addr = {:#x}.".format(tte & 0xfffffffff000) + + if not stage2: print "PXN = {:#x}.".format((tte >> 59) & 0x1) print "XN = {:#x}.".format((tte >> 60) & 0x1) print "AP = {:#x}.".format((tte >> 61) & 0x3) - print "NS = {:#x}".format(tte >> 63) + print "NS = {:#x}.".format(tte >> 63) + else: + print "Type = Block." + + if stage2: + print "S2 MemAttr = {:#x}.".format((tte >> 2) & 0xf) else: - print "Type = Block." print "AttrIdx = {:#x}.".format((tte >> 2) & 0x7) print "NS = {:#x}.".format((tte >> 5) & 0x1) + + if stage2: + print "S2AP = {:#x}.".format((tte >> 6) & 0x3) + else: print "AP = {:#x}.".format((tte >> 6) & 0x3) - print "SH = {:#x}.".format((tte >> 8) & 0x3) - print "AF = {:#x}.".format((tte >> 10) & 0x1) + + print "SH = {:#x}.".format((tte >> 8) & 0x3) + print "AF = {:#x}.".format((tte >> 10) & 0x1) + + if not stage2: print "nG = {:#x}.".format((tte >> 11) & 0x1) - print "HINT = {:#x}.".format((tte >> 52) & 0x1) + + print "HINT = {:#x}.".format((tte >> 52) & 0x1) + + if stage2: + print "S2XN = {:#x}.".format((tte >> 53) & 0x3) + else: print "PXN = {:#x}.".format((tte >> 53) & 0x1) print "XN = {:#x}.".format((tte >> 54) & 0x1) - print "SW Use = {:#x}.".format((tte >> 55) & 0xf) - else: - print "Invalid." + + print "SW Use = {:#x}.".format((tte >> 55) & 0xf) return -def PmapWalkARM64(pmap, vaddr, verbose_level = vHUMAN): - assert(type(pmap) == core.cvalue.value) +def PmapTTnIndexARM64(vaddr, pmap_pt_attr): + pta_max_level = unsigned(pmap_pt_attr.pta_max_level) + + tt_index = [] + for i in range(pta_max_level + 1): + tt_index.append((vaddr & unsigned(pmap_pt_attr.pta_level_info[i].index_mask)) \ + >> unsigned(pmap_pt_attr.pta_level_info[i].shift)) + + return tt_index + +def PmapWalkARM64(pmap_pt_attr, root_tte, vaddr, verbose_level = vHUMAN): assert(type(vaddr) in (long, int)) - page_size = kern.globals.arm_hardware_page_size + assert_64bit(vaddr) + assert_64bit(root_tte) + + # Obtain pmap attributes + page_size = pmap_pt_attr.pta_page_size page_offset_mask = (page_size - 1) page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask) + tt_index = PmapTTnIndexARM64(vaddr, pmap_pt_attr) + stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False) - assert_64bit(vaddr) + # The pmap starts at a page table level that is defined by register + # values; the root level can be obtained from the attributes structure + level = unsigned(pmap_pt_attr.pta_root_level) + + root_tt_index = tt_index[level] + root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \ + unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1 + tte = long(unsigned(root_tte[root_tt_index])) + + # Walk the page tables paddr = -1 + max_level = unsigned(pmap_pt_attr.pta_max_level) - tt0_index = 0 - tt1_index = PmapTTnIndexARM64(vaddr, 1) - tt2_index = PmapTTnIndexARM64(vaddr, 2) - tt3_index = PmapTTnIndexARM64(vaddr, 3) + while (level <= max_level): + if verbose_level >= vSCRIPT: + print "L{} entry: {:#x}".format(level, tte) + if verbose_level >= vDETAIL: + PmapDecodeTTEARM64(tte, level, stage2) - # The pmap starts at a page tabel level that is defined by register - # values; the kernel exports the root level for LLDB - level = kern.globals.arm64_root_pgtable_level - assert(level <= 3) + if tte & 0x1 == 0x0: + if verbose_level >= vHUMAN: + print "L{} entry invalid: {:#x}\n".format(level, tte) + break - if level == 0: - root_tt_index = tt0_index - elif level == 1: - root_tt_index = tt1_index - elif level == 2: - root_tt_index = tt2_index - elif level == 3: - root_tt_index = tt3_index + # Handle leaf entry + if tte & 0x2 == 0x0 or level == max_level: + base_mask = page_base_mask if level == max_level else PmapBlockBaseMaskARM64(page_size, level) + offset_mask = page_offset_mask if level == max_level else PmapBlockOffsetMaskARM64(page_size, level) + paddr = tte & base_mask + paddr = paddr | (vaddr & offset_mask) - # If the root of the page table is not a full page, we need to - # truncate the index - root_tt_index = root_tt_index % unsigned(kern.globals.arm64_root_pgtable_num_ttes) + if level != max_level: + print "phys: {:#x}".format(paddr) - tte = long(unsigned(pmap.tte[root_tt_index])) - assert(type(tte) == long) - assert_64bit(tte) + break + else: + # Handle page table entry + next_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt_index[level + 1]) + assert(type(next_phys) == long) - while (True): - if (level == 0): - # L0 - # This is unsupported at the moment, as no kernel configurations use L0 - assert(False) + next_virt = kern.PhysToKernelVirt(next_phys) + assert(type(next_virt) == long) - elif (level == 1): - # L1 - if verbose_level >= vSCRIPT: - print "L1 entry: {:#x}".format(tte) - if verbose_level >= vDETAIL: - PmapDecodeTTEARM64(tte, 1) - - if tte & 0x1 == 0x1: - # Check for L1 block entry - if tte & 0x2 == 0x0: - # Handle L1 block entry - paddr = tte & PmapBlockBaseMaskARM64(1) - paddr = paddr | (vaddr & PmapBlockOffsetMaskARM64(1)) - print "phys: {:#x}".format(paddr) - break - else: - # Handle L1 table entry - l2_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt2_index) - assert(type(l2_phys) == long) - - l2_virt = kern.PhysToKernelVirt(l2_phys) - assert(type(l2_virt) == long) - - if verbose_level >= vDETAIL: - print "L2 physical address: {:#x}. L2 virtual address: {:#x}".format(l2_phys, l2_virt) - - ttep = kern.GetValueFromAddress(l2_virt, "tt_entry_t*") - tte = long(unsigned(dereference(ttep))) - assert(type(tte) == long) - elif verbose_level >= vHUMAN: - print "L1 entry invalid: {:#x}\n".format(tte) - - elif (level == 2): - # L2 - if verbose_level >= vSCRIPT: - print "L2 entry: {:#0x}".format(tte) if verbose_level >= vDETAIL: - PmapDecodeTTEARM64(tte, 2) - - if tte & 0x1 == 0x1: - # Check for L2 block entry - if tte & 0x2 == 0x0: - # Handle L2 block entry - paddr = tte & PmapBlockBaseMaskARM64(2) - paddr = paddr | (vaddr & PmapBlockOffsetMaskARM64(2)) - break - else: - # Handle L2 table entry - l3_phys = (tte & page_base_mask) + (ARM64_TTE_SIZE * tt3_index) - assert(type(l3_phys) == long) - - l3_virt = kern.PhysToKernelVirt(l3_phys) - assert(type(l3_virt) == long) - - if verbose_level >= vDETAIL: - print "L3 physical address: {:#x}. L3 virtual address: {:#x}".format(l3_phys, l3_virt) - - ttep = kern.GetValueFromAddress(l3_virt, "tt_entry_t*") - tte = long(unsigned(dereference(ttep))) - assert(type(tte) == long) - elif verbose_level >= vHUMAN: # tte & 0x1 == 0x1 - print "L2 entry invalid: {:#x}\n".format(tte) - - elif (level == 3): - # L3 - if verbose_level >= vSCRIPT: - print "L3 entry: {:#0x}".format(tte) - if verbose_level >= vDETAIL: - PmapDecodeTTEARM64(tte, 3) - - if tte & 0x3 == 0x3: - paddr = tte & page_base_mask - paddr = paddr | (vaddr & page_offset_mask) - elif verbose_level >= vHUMAN: - print "L3 entry invalid: {:#x}\n".format(tte) + print "L{} physical address: {:#x}. L{} virtual address: {:#x}".format(level + 1, next_phys, level + 1, next_virt) - # This was the leaf page table page for this request; we're done - break + ttep = kern.GetValueFromAddress(next_virt, "tt_entry_t*") + tte = long(unsigned(dereference(ttep))) + assert(type(tte) == long) # We've parsed one level, so go to the next level assert(level <= 3) @@ -952,7 +901,9 @@ def PmapWalk(pmap, vaddr, verbose_level = vHUMAN): elif kern.arch == 'arm': return PmapWalkARM(pmap, vaddr, verbose_level) elif kern.arch.startswith('arm64'): - return PmapWalkARM64(pmap, vaddr, verbose_level) + # Obtain pmap attributes from pmap structure + pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr + return PmapWalkARM64(pmap_pt_attr, pmap.tte, vaddr, verbose_level) else: raise NotImplementedError("PmapWalk does not support {0}".format(kern.arch)) @@ -966,21 +917,58 @@ def PmapWalkHelper(cmd_args=None): raise ArgumentError("Too few arguments to pmap_walk.") pmap = kern.GetValueAsType(cmd_args[0], 'pmap_t') - addr = unsigned(kern.GetValueFromAddress(cmd_args[1], 'void *')) + addr = ArgumentStringToInt(cmd_args[1]) PmapWalk(pmap, addr, config['verbosity']) return +def GetMemoryAttributesFromUser(requested_type): + pmap_attr_dict = { + '4k' : kern.globals.pmap_pt_attr_4k, + '16k' : kern.globals.pmap_pt_attr_16k, + '16k_s2' : kern.globals.pmap_pt_attr_16k_stage2 if hasattr(kern.globals, 'pmap_pt_attr_16k_stage2') else None, + } + + requested_type = requested_type.lower() + if requested_type not in pmap_attr_dict: + return None + + return pmap_attr_dict[requested_type] + +@lldb_command('ttep_walk') +def TTEPWalkPHelper(cmd_args=None): + """ Perform a page-table walk in for . + Syntax: (lldb) ttep_walk [4k|16k|16k_s2] [-v] [-e] + Multiple -v's can be specified for increased verbosity + """ + if cmd_args == None or len(cmd_args) < 2: + raise ArgumentError("Too few arguments to ttep_walk.") + + if not kern.arch.startswith('arm64'): + raise NotImplementedError("ttep_walk does not support {0}".format(kern.arch)) + + tte = kern.GetValueFromAddress(kern.PhysToKernelVirt(ArgumentStringToInt(cmd_args[0])), 'unsigned long *') + addr = ArgumentStringToInt(cmd_args[1]) + + pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 3 else GetMemoryAttributesFromUser(cmd_args[2]) + if pmap_pt_attr is None: + raise ArgumentError("Invalid translation attribute type.") + + return PmapWalkARM64(pmap_pt_attr, tte, addr, config['verbosity']) + @lldb_command('decode_tte') def DecodeTTE(cmd_args=None): - """ Decode the bits in the TTE/PTE value specified for translation level - Syntax: (lldb) decode_tte + """ Decode the bits in the TTE/PTE value specified for translation level and stage [s1|s2] + Syntax: (lldb) decode_tte [s1|s2] """ if cmd_args == None or len(cmd_args) < 2: raise ArgumentError("Too few arguments to decode_tte.") + if len(cmd_args) > 2 and cmd_args[2] not in ["s1", "s2"]: + raise ArgumentError("{} is not a valid stage of translation.".format(cmd_args[2])) if kern.arch == 'arm': PmapDecodeTTEARM(kern.GetValueFromAddress(cmd_args[0], "unsigned long"), ArgumentStringToInt(cmd_args[1]), vSCRIPT) elif kern.arch.startswith('arm64'): - PmapDecodeTTEARM64(long(kern.GetValueFromAddress(cmd_args[0], "unsigned long")), ArgumentStringToInt(cmd_args[1])) + stage2 = True if len(cmd_args) > 2 and cmd_args[2] == "s2" else False + PmapDecodeTTEARM64(ArgumentStringToInt(cmd_args[0]), ArgumentStringToInt(cmd_args[1]), stage2) else: raise NotImplementedError("decode_tte does not support {0}".format(kern.arch)) @@ -1046,7 +1034,10 @@ def PVWalkARM(pa): else: pve_str = ' (IOMMU state), descriptor' ptep = ptep | iommu_table_flag - print "PVE {:#x}, PTE {:#x}{:s}: {:#x}".format(current_pvep, ptep, pve_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *'))) + try: + print "PVE {:#x}, PTE {:#x}{:s}: {:#x}".format(current_pvep, ptep, pve_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *'))) + except: + print "PVE {:#x}, PTE {:#x}{:s}: ".format(current_pvep, ptep, pve_str) @lldb_command('pv_walk') def PVWalk(cmd_args=None): @@ -1090,12 +1081,9 @@ def KVToPhysARM(addr): return (addr - long(unsigned(ptov_table[i].va)) + long(unsigned(ptov_table[i].pa))) return (addr - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase)) -def ShowPTEARM(pte): - """ Display vital information about an ARM page table entry - pte: kernel virtual address of the PTE. Should be L3 PTE. May also work with L2 TTEs for certain devices. - """ - page_size = kern.globals.arm_hardware_page_size - pn = (KVToPhysARM(pte) - unsigned(kern.globals.vm_first_phys)) / page_size + +def GetPtDesc(paddr): + pn = (paddr - unsigned(kern.globals.vm_first_phys)) / kern.globals.page_size pvh = unsigned(kern.globals.pv_head_table[pn]) if kern.arch.startswith('arm64'): pvh = pvh | PVH_HIGH_FLAGS_ARM64 @@ -1105,6 +1093,13 @@ def ShowPTEARM(pte): if pvh_type != 0x3: raise ValueError("PV head {:#x} does not correspond to a page-table descriptor".format(pvh)) ptd = kern.GetValueFromAddress(pvh & ~0x3, 'pt_desc_t *') + return ptd + +def ShowPTEARM(pte, page_size, stage2 = False): + """ Display vital information about an ARM page table entry + pte: kernel virtual address of the PTE. Should be L3 PTE. May also work with L2 TTEs for certain devices. + """ + ptd = GetPtDesc(KVToPhysARM(pte)) print "descriptor: {:#x}".format(ptd) print "pmap: {:#x}".format(ptd.pmap) pt_index = (pte % kern.globals.page_size) / page_size @@ -1121,27 +1116,36 @@ def ShowPTEARM(pte): else: level = 3 granule = page_size - print "maps VA: {:#x}".format(long(unsigned(ptd.ptd_info[pt_index].va)) + (pte_pgoff * granule)) + print "maps {}: {:#x}".format("IPA" if stage2 else "VA", long(unsigned(ptd.ptd_info[pt_index].va)) + (pte_pgoff * granule)) pteval = long(unsigned(dereference(kern.GetValueFromAddress(unsigned(pte), 'pt_entry_t *')))) print "value: {:#x}".format(pteval) if kern.arch.startswith('arm64'): print "level: {:d}".format(level) - PmapDecodeTTEARM64(pteval, level) + PmapDecodeTTEARM64(pteval, level, stage2) elif kern.arch == 'arm': PmapDecodeTTEARM(pteval, 2, vSCRIPT) @lldb_command('showpte') def ShowPTE(cmd_args=None): """ Display vital information about the page table entry at VA - Syntax: (lldb) showpte + Syntax: (lldb) showpte [4k|16k|16k_s2] """ if cmd_args == None or len(cmd_args) < 1: raise ArgumentError("Too few arguments to showpte.") - if not kern.arch.startswith('arm'): + + if kern.arch == 'arm': + ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), kern.globals.page_size) + elif kern.arch.startswith('arm64'): + pmap_pt_attr = kern.globals.native_pt_attr if len(cmd_args) < 2 else GetMemoryAttributesFromUser(cmd_args[1]) + if pmap_pt_attr is None: + raise ArgumentError("Invalid translation attribute type.") + + stage2 = bool(pmap_pt_attr.stage2 if hasattr(pmap_pt_attr, 'stage2') else False) + ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'), pmap_pt_attr.pta_page_size, stage2) + else: raise NotImplementedError("showpte does not support {0}".format(kern.arch)) - ShowPTEARM(kern.GetValueFromAddress(cmd_args[0], 'unsigned long')) -def FindMappingAtLevelARM(pmap, tt, nttes, level, action): +def FindMappingAtLevelARM(pmap, tt, nttes, level, va, action): """ Perform the specified action for all valid mappings in an ARM translation table pmap: owner of the translation table tt: translation table or page table @@ -1152,10 +1156,12 @@ def FindMappingAtLevelARM(pmap, tt, nttes, level, action): for i in range(nttes): try: tte = tt[i] + va_size = None if level == 1: if tte & 0x3 == 0x1: type = 'table' granule = 1024 + va_size = kern.globals.page_size * 256 paddr = tte & 0xFFFFFC00 elif tte & 0x3 == 0x2: type = 'block' @@ -1177,14 +1183,17 @@ def FindMappingAtLevelARM(pmap, tt, nttes, level, action): paddr = tte & 0xFFFFF000 else: continue - action(pmap, level, type, addressof(tt[i]), paddr, granule) - if level == 1 and (tte & 0x3) == 0x1: - tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *') - FindMappingAtLevelARM(pmap, tt_next, granule / 4, level + 1, action) + if va_size is None: + va_size = granule + mapped_va = va + (va_size * i) + if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule): + if level == 1 and (tte & 0x3) == 0x1: + tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *') + FindMappingAtLevelARM(pmap, tt_next, granule / 4, level + 1, mapped_va, action) except Exception as exc: print "Unable to access tte {:#x}".format(unsigned(addressof(tt[i]))) -def FindMappingAtLevelARM64(pmap, tt, nttes, level, action): +def FindMappingAtLevelARM64(pmap, tt, nttes, level, va, action): """ Perform the specified action for all valid mappings in an ARM64 translation table pmap: owner of the translation table tt: translation table or page table @@ -1192,30 +1201,37 @@ def FindMappingAtLevelARM64(pmap, tt, nttes, level, action): level: translation table level, 1 2 or 3 action: callback for each valid TTE """ - page_size = kern.globals.arm_hardware_page_size + # Obtain pmap attributes + pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr + page_size = pmap_pt_attr.pta_page_size page_offset_mask = (page_size - 1) page_base_mask = ((1 << ARM64_VMADDR_BITS) - 1) & (~page_offset_mask) + max_level = unsigned(pmap_pt_attr.pta_max_level) + for i in range(nttes): try: tte = tt[i] - if tte & 0x1 == 0x1: - if tte & 0x2 == 0x2: - if level < 3: - type = 'table' - else: - type = 'entry' - granule = page_size - paddr = tte & page_base_mask - elif level < 3: - type = 'block' - granule = PmapBlockOffsetMaskARM64(level) + 1 - paddr = tte & PmapBlockBaseMaskARM64(level) - else: - continue - action(pmap, level, type, addressof(tt[i]), paddr, granule) - if level < 3 and (tte & 0x2 == 0x2): - tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *') - FindMappingAtLevelARM64(pmap, tt_next, granule / ARM64_TTE_SIZE, level + 1, action) + if tte & 0x1 == 0x0: + continue + + tt_next = None + paddr = unsigned(tte) & unsigned(page_base_mask) + + # Handle leaf entry + if tte & 0x2 == 0x0 or level == max_level: + type = 'block' if level < max_level else 'entry' + granule = PmapBlockOffsetMaskARM64(page_size, level) + 1 + else: + # Handle page table entry + type = 'table' + granule = page_size + tt_next = kern.GetValueFromAddress(kern.PhysToKernelVirt(paddr), 'tt_entry_t *') + + mapped_va = long(unsigned(va)) + ((PmapBlockOffsetMaskARM64(page_size, level) + 1) * i) + if action(pmap, level, type, addressof(tt[i]), paddr, mapped_va, granule): + if tt_next is not None: + FindMappingAtLevelARM64(pmap, tt_next, granule / ARM64_TTE_SIZE, level + 1, mapped_va, action) + except Exception as exc: print "Unable to access tte {:#x}".format(unsigned(addressof(tt[i]))) @@ -1227,14 +1243,20 @@ def ScanPageTables(action, targetPmap=None): print "Scanning all available translation tables. This may take a long time..." def ScanPmap(pmap, action): if kern.arch.startswith('arm64'): - granule = kern.globals.arm64_root_pgtable_num_ttes * 8 + # Obtain pmap attributes + pmap_pt_attr = pmap.pmap_pt_attr if hasattr(pmap, 'pmap_pt_attr') else kern.globals.native_pt_attr + granule = pmap_pt_attr.pta_page_size + level = unsigned(pmap_pt_attr.pta_root_level) + root_pgtable_num_ttes = (unsigned(pmap_pt_attr.pta_level_info[level].index_mask) >> \ + unsigned(pmap_pt_attr.pta_level_info[level].shift)) + 1 elif kern.arch == 'arm': granule = pmap.tte_index_max * 4 - action(pmap, 1, 'root', pmap.tte, unsigned(pmap.ttep), granule) - if kern.arch.startswith('arm64'): - FindMappingAtLevelARM64(pmap, pmap.tte, kern.globals.arm64_root_pgtable_num_ttes, kern.globals.arm64_root_pgtable_level, action) - elif kern.arch == 'arm': - FindMappingAtLevelARM(pmap, pmap.tte, pmap.tte_index_max, 1, action) + + if action(pmap, pmap_pt_attr.pta_root_level, 'root', pmap.tte, unsigned(pmap.ttep), pmap.min, granule): + if kern.arch.startswith('arm64'): + FindMappingAtLevelARM64(pmap, pmap.tte, root_pgtable_num_ttes, level, pmap.min, action) + elif kern.arch == 'arm': + FindMappingAtLevelARM(pmap, pmap.tte, pmap.tte_index_max, 1, pmap.min, action) if targetPmap is not None: ScanPmap(kern.GetValueFromAddress(targetPmap, 'pmap_t'), action) @@ -1258,12 +1280,54 @@ def ShowAllMappings(cmd_args=None): targetPmap = None if len(cmd_args) > 1: targetPmap = cmd_args[1] - def printMatchedMapping(pmap, level, type, tte, paddr, granule): + def printMatchedMapping(pmap, level, type, tte, paddr, va, granule): if paddr <= pa < (paddr + granule): - print "pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x})".format(pmap, level, type, unsigned(tte), paddr, paddr + granule) + print "pmap: {:#x}: L{:d} {:s} at {:#x}: [{:#x}, {:#x}), maps va {:#x}".format(pmap, level, type, unsigned(tte), paddr, paddr + granule, va) + return True ScanPageTables(printMatchedMapping, targetPmap) -def checkPVList(pmap, level, type, tte, paddr, granule): +@lldb_command('showptusage') +def ShowPTUsage(cmd_args=None): + """ Display a summary of pagetable allocations for a given pmap. + Syntax: (lldb) showptusage [] + WARNING: this macro can take a long time (> 1hr) to complete! + """ + if not kern.arch.startswith('arm'): + raise NotImplementedError("showptusage does not support {0}".format(kern.arch)) + targetPmap = None + if len(cmd_args) > 0: + targetPmap = cmd_args[0] + lastPmap = [None] + numTables = [0] + numUnnested = [0] + numPmaps = [0] + def printValidTTE(pmap, level, type, tte, paddr, va, granule): + unnested = "" + nested_region_addr = long(unsigned(pmap.nested_region_addr)) + nested_region_end = nested_region_addr + long(unsigned(pmap.nested_region_size)) + if lastPmap[0] is None or (pmap != lastPmap[0]): + lastPmap[0] = pmap + numPmaps[0] = numPmaps[0] + 1 + print ("pmap {:#x}:".format(pmap)) + if type == 'root': + return True + if (level == 2) and (va >= nested_region_addr) and (va < nested_region_end): + ptd = GetPtDesc(paddr) + if ptd.pmap != pmap: + return False + else: + numUnnested[0] = numUnnested[0] + 1 + unnested = " (likely unnested)" + numTables[0] = numTables[0] + 1 + print (" " * 4 * int(level)) + "L{:d} entry at {:#x}, maps {:#x}".format(level, unsigned(tte), va) + unnested + if level == 2: + return False + else: + return True + ScanPageTables(printValidTTE, targetPmap) + print("{:d} table(s), {:d} of them likely unnested, in {:d} pmap(s)".format(numTables[0], numUnnested[0], numPmaps[0])) + +def checkPVList(pmap, level, type, tte, paddr, va, granule): """ Checks an ARM physical-to-virtual mapping list for consistency errors. pmap: owner of the translation table level: translation table level. PV lists will only be checked for L2 (arm32) or L3 (arm64) tables. @@ -1287,7 +1351,7 @@ def checkPVList(pmap, level, type, tte, paddr, granule): max_level = 2 pvh_set_bits = PVH_HIGH_FLAGS_ARM32 if level < max_level or paddr < vm_first_phys or paddr >= vm_last_phys: - return + return True pn = (paddr - vm_first_phys) / page_size pvh = unsigned(kern.globals.pv_head_table[pn]) | pvh_set_bits pvh_type = pvh & 0x3 @@ -1328,6 +1392,7 @@ def checkPVList(pmap, level, type, tte, paddr, granule): print "{:s}{:s}Unable to read PVE {:#x}".format(pmap_str, tte_str, ptep) if tte is not None and not tte_match: print "{:s}{:s}not found in PV list".format(pmap_str, tte_str, paddr) + return True @lldb_command('pv_check', 'P') def PVCheck(cmd_args=None, cmd_options={}): @@ -1342,14 +1407,14 @@ def PVCheck(cmd_args=None, cmd_options={}): elif kern.arch.startswith('arm64'): level = 3 else: - raise NotImplementedError("showallmappings does not support {0}".format(kern.arch)) + raise NotImplementedError("pv_check does not support {0}".format(kern.arch)) if "-P" in cmd_options: pte = None pa = long(unsigned(kern.GetValueFromAddress(cmd_args[0], "unsigned long"))) else: pte = kern.GetValueFromAddress(cmd_args[0], 'pt_entry_t *') pa = long(unsigned(dereference(pte))) - checkPVList(None, level, None, pte, pa, None) + checkPVList(None, level, None, pte, pa, 0, None) @lldb_command('check_pmaps') def CheckPmapIntegrity(cmd_args=None): @@ -1362,7 +1427,7 @@ def CheckPmapIntegrity(cmd_args=None): Use of this macro without the [] argument is heavily discouraged. """ if not kern.arch.startswith('arm'): - raise NotImplementedError("showallmappings does not support {0}".format(kern.arch)) + raise NotImplementedError("check_pmaps does not support {0}".format(kern.arch)) targetPmap = None if len(cmd_args) > 0: targetPmap = cmd_args[0] @@ -1381,3 +1446,4 @@ def PmapsForLedger(cmd_args=None): for pmap in IterateQueue(kern.globals.map_pmap_list, 'pmap_t', 'pmaps'): if pmap.ledger == ledger: print "pmap: {:#x}".format(pmap) + diff --git a/tools/lldbmacros/process.py b/tools/lldbmacros/process.py index 3a107da8b..c7d5f493c 100755 --- a/tools/lldbmacros/process.py +++ b/tools/lldbmacros/process.py @@ -10,6 +10,20 @@ from core.lazytarget import * import time import xnudefines import memory +import json + +def GetProcName(proc): + """ returns a string name of the process. Longer variant is preffered if provided. + params: + proc: value object representing a proc in the kernel. + returns: + str: a string name of the process linked to the task. + """ + name = str(proc.p_name) + if name != '': + return name + else: + return str(proc.p_comm) def GetProcNameForTask(task): """ returns a string name of the process. if proc is not valid "unknown" is returned @@ -21,7 +35,8 @@ def GetProcNameForTask(task): if not task or not unsigned(task.bsd_info): return "unknown" p = Cast(task.bsd_info, 'proc *') - return str(p.p_comm) + + return GetProcName(p) def GetProcPIDForTask(task): """ returns a int pid of the process. if the proc is not valid, val[5] from audit_token is returned. @@ -33,7 +48,7 @@ def GetProcPIDForTask(task): if task and unsigned(task.bsd_info): p = Cast(task.bsd_info, 'proc *') return unsigned(p.p_pid) - + if task : return unsigned(task.audit_token.val[5]) @@ -47,9 +62,9 @@ def GetProcInfo(proc): str : A string describing various information for process. """ out_string = "" - out_string += ("Process {p: <#020x}\n\tname {p.p_comm: <20s}\n\tpid:{p.p_pid: <6d} " + + out_string += ("Process {p: <#020x}\n\tname {0: <32s}\n\tpid:{p.p_pid: <6d} " + "task:{p.task: <#020x} p_stat:{p.p_stat: <6d} parent pid: {p.p_ppid: <6d}\n" - ).format(p=proc) + ).format(GetProcName(proc), p=proc) #print the Creds ucred = proc.p_ucred if ucred: @@ -86,7 +101,7 @@ def GetProcNameForPid(pid): """ for p in kern.procs: if int(p.p_pid) == int(pid): - return str(p.p_comm) + return GetProcName(p) return "Unknown" def GetProcForPid(search_pid): @@ -606,7 +621,7 @@ def ShowTaskCoalitions(cmd_args=None, cmd_options={}): # EndMacro: showtaskcoalitions @lldb_type_summary(['proc', 'proc *']) -@header("{0: >6s} {1: <18s} {2: >11s} {3: ^10s} {4: <20s}".format("pid", "process", "io_policy", "wq_state", "command")) +@header("{0: >6s} {1: <18s} {2: >11s} {3: ^10s} {4: <32s}".format("pid", "process", "io_policy", "wq_state", "command")) def GetProcSummary(proc): """ Summarize the process data. params: @@ -615,7 +630,7 @@ def GetProcSummary(proc): str - string summary of the process. """ out_string = "" - format_string= "{0: >6d} {1: <#018x} {2: >11s} {3: >2d} {4: >2d} {5: >2d} {6: <20s}" + format_string= "{0: >6d} {1: <#018x} {2: >11s} {3: >2d} {4: >2d} {5: >2d} {6: <32s}" pval = proc.GetSBValue() #code.interact(local=locals()) if str(pval.GetType()) != str(gettype('proc *')) : @@ -665,7 +680,7 @@ def GetProcSummary(proc): wq_num_threads = -1 wq_idle_threads = -1 wq_req_threads = -1 - process_name = str(proc.p_comm) + process_name = GetProcName(proc) if process_name == 'xpcproxy': for thread in IterateQueue(task.threads, 'thread *', 'task_threads'): thread_name = GetThreadName(thread) @@ -798,8 +813,8 @@ def ShowProcFiles(cmd_args=None): while count <= proc_lastfile: if unsigned(proc_ofiles[count]) != 0: out_str = '' - proc_fd_flags = proc_ofiles[count].f_flags - proc_fd_fglob = proc_ofiles[count].f_fglob + proc_fd_flags = proc_ofiles[count].fp_flags + proc_fd_fglob = proc_ofiles[count].fp_glob out_str += "{0: <5d} ".format(count) out_str += "{0: <#18x} ".format(unsigned(proc_fd_fglob)) out_str += "0x{0:0>8x} ".format(unsigned(proc_fd_flags)) @@ -966,7 +981,7 @@ def DumpCallQueue(cmd_args=None): def ShowAllTaskIOStats(cmd_args=None): """ Commad to print I/O stats for all tasks """ - print "{0: <20s} {1: <20s} {2: <20s} {3: <20s} {4: <20s} {5: <20s} {6: <20s} {7: <20s} {8: <20s} {9: <20s}".format("task", "Immediate Writes", "Deferred Writes", "Invalidated Writes", "Metadata Writes", "Immediate Writes to External", "Deferred Writes to External", "Invalidated Writes to External", "Metadata Writes to External", "name") + print "{0: <20s} {1: <20s} {2: <20s} {3: <20s} {4: <20s} {5: <20s} {6: <20s} {7: <20s} {8: <20s} {9: <32}".format("task", "Immediate Writes", "Deferred Writes", "Invalidated Writes", "Metadata Writes", "Immediate Writes to External", "Deferred Writes to External", "Invalidated Writes to External", "Metadata Writes to External", "name") for t in kern.tasks: pval = Cast(t.bsd_info, 'proc *') print "{0: <#18x} {1: >20d} {2: >20d} {3: >20d} {4: >20d} {5: <20s} {6: <20s} {7: <20s} {8: <20s} {9: <20s}".format(t, @@ -978,7 +993,7 @@ def ShowAllTaskIOStats(cmd_args=None): t.task_writes_counters_external.task_deferred_writes, t.task_writes_counters_external.task_invalidated_writes, t.task_writes_counters_external.task_metadata_writes, - str(pval.p_comm)) + GetProcName(pval)) @lldb_command('showalltasks','C', fancy=True) @@ -1018,7 +1033,7 @@ def TaskForPmap(cmd_args=None): print GetTaskSummary.header + " " + GetProcSummary.header for tasklist in [kern.tasks, kern.terminated_tasks]: for t in tasklist: - if t.map.pmap == pmap: + if kern.GetValueFromAddress(unsigned(t.map.pmap), 'pmap_t') == pmap: pval = Cast(t.bsd_info, 'proc *') out_str = GetTaskSummary(t) + " " + GetProcSummary(pval) print out_str @@ -1070,7 +1085,7 @@ def FindTasksByName(searchstr, ignore_case=True): retval = [] for t in kern.tasks: pval = Cast(t.bsd_info, "proc *") - process_name = "{:s}".format(pval.p_comm) + process_name = "{:s}".format(GetProcName(pval)) if search_regex.search(process_name): retval.append(t) return retval @@ -1418,8 +1433,8 @@ def ShowProcTree(cmd_args=None): out_string = hdr_format.format("PID", "PROCESS", "POINTER") out_string += hdr_format.format('='*3, '='*7, '='*7) proc = GetProcForPid(search_pid) - out_string += "{0: <6d} {1: <14s} [ {2: #019x} ]\n".format(proc.p_ppid, proc.p_pptr.p_comm, unsigned(proc.p_pptr)) - out_string += "|--{0: <6d} {1: <16s} [ {2: #019x} ]\n".format(proc.p_pid, proc.p_comm, unsigned(proc)) + out_string += "{0: <6d} {1: <32s} [ {2: #019x} ]\n".format(proc.p_ppid, GetProcName(proc.p_pptr), unsigned(proc.p_pptr)) + out_string += "|--{0: <6d} {1: <32s} [ {2: #019x} ]\n".format(proc.p_pid, GetProcName(proc), unsigned(proc)) print out_string ShowProcTreeRecurse(proc, "| ") @@ -1436,7 +1451,7 @@ def ShowProcTreeRecurse(proc, prefix=""): head_ptr = proc.p_children.lh_first for p in IterateListEntry(proc.p_children, 'struct proc *', 'p_sibling'): - print prefix + "|--{0: <6d} {1: <16s} [ {2: #019x} ]\n".format(p.p_pid, p.p_comm, unsigned(p)) + print prefix + "|--{0: <6d} {1: <32s} [ {2: #019x} ]\n".format(p.p_pid, GetProcName(p), unsigned(p)) ShowProcTreeRecurse(p, prefix + "| ") @lldb_command('showthreadfortid') @@ -1507,115 +1522,203 @@ def GetProcessorSummary(processor): preemption_disable_str) return out_str -def GetLedgerEntrySummary(ledger_template, ledger, i, show_footprint_interval_max=False): +def GetLedgerEntry(ledger_template, ledger, i): """ Internal function to get internals of a ledger entry (*not* a ledger itself) params: ledger_template - value representing struct ledger_template_t for the task or thread ledger - value representing struct ledger_entry * - return: str - formatted output information of ledger entries + return: entry - entry dictionary """ ledger_limit_infinity = (uint64_t(0x1).value << 63) - 1 lf_refill_scheduled = 0x0400 lf_tracking_max = 0x4000 - out_str = '' now = unsigned(kern.globals.sched_tick) / 20 lim_pct = 0 - out_str += "{: >32s} {:<2d}:".format(ledger_template.lt_entries[i].et_key, i) - out_str += "{: >15d} ".format(unsigned(ledger.le_credit) - unsigned(ledger.le_debit)) + entry = {} + + entry["key"] = str(ledger_template.lt_entries[i].et_key) + entry["credit"] = unsigned(ledger.le_credit) + entry["debit"] = unsigned(ledger.le_debit) + entry["balance"] = entry["credit"] - entry["debit"] if (ledger.le_flags & lf_tracking_max): - if (show_footprint_interval_max): - out_str += "{:12d} ".format(ledger._le._le_max.le_interval_max) - out_str += "{:14d} ".format(ledger._le._le_max.le_lifetime_max) - else: - if (show_footprint_interval_max): + entry["interval_max"] = unsigned(ledger._le._le_max.le_interval_max) + entry["lifetime_max"] = unsigned(ledger._le._le_max.le_lifetime_max) + + if (unsigned(ledger.le_limit) != ledger_limit_infinity): + entry["limit"] = unsigned(ledger.le_limit) + + if (ledger.le_flags & lf_refill_scheduled): + entry["refill_period"] = unsigned (ledger._le.le_refill.le_refill_period) + + if (unsigned(ledger.le_warn_percent) < 65535): + entry["warn_percent"] = unsigned (ledger.le_warn_percent * 100 / 65536) + entry["flags"] = int(ledger.le_flags) + + return entry + +def FormatLedgerEntrySummary(entry, i, show_footprint_interval_max=False): + """ internal function to format a ledger entry into a string + params: entry - A python dictionary containing the ledger entry + return: str - formatted output information of ledger entries + """ + out_str = '' + out_str += "{: >32s} {:<2d}:".format(entry["key"], i) + out_str += "{: >15d} ".format(entry["balance"]) + + if (show_footprint_interval_max): + if entry.has_key("interval_max"): + out_str += "{:12d} ".format(entry["interval_max"]) + else: out_str += " - " + + if entry.has_key("lifetime_max"): + out_str += "{:14d} ".format(entry["lifetime_max"]) + else: out_str += " - " - out_str += "{:12d} {:12d} ".format(unsigned(ledger.le_credit), unsigned(ledger.le_debit)) - if (unsigned(ledger.le_limit) != ledger_limit_infinity): - out_str += "{:12d} ".format(unsigned(ledger.le_limit)) + + out_str += "{:12d} {:12d} ".format(entry["credit"], entry["debit"]) + if entry.has_key("limit"): + out_str += "{:12d} ".format(unsigned(entry["limit"])) else: out_str += " - " - if (ledger.le_flags & lf_refill_scheduled): - out_str += "{:15d} ".format(ledger._le.le_refill.le_refill_period) + if entry.has_key("refill_period"): + out_str += "{:15d} ".format(entry["refill_period"]) + out_str += "{:9d} ".format((entry["limit"] * 100) / entry["refill_period"]) else: out_str += " - " - - if (ledger.le_flags & lf_refill_scheduled): - out_str += "{:9d} ".format((unsigned(ledger.le_limit) * 100) / ledger._le.le_refill.le_refill_period) - else: out_str += " - " - if (unsigned(ledger.le_warn_percent) < 65535): - out_str += "{:9d} ".format(unsigned(ledger.le_warn_percent * 100. / 65536)) + if entry.has_key("warn_percent"): + out_str += "{:9d} ".format(entry["warn_percent"]) else: out_str += " - " - if ((unsigned(ledger.le_credit) - unsigned(ledger.le_debit)) > unsigned(ledger.le_limit)): - out_str += " X " + if entry.has_key("limit"): + if entry["balance"] > entry["limit"]: + out_str += " X " + else: + out_str += " " else: out_str += " " - out_str += "{:#8x}\n".format(ledger.le_flags) + out_str += "{:#8x}\n".format(entry["flags"]) return out_str -def GetThreadLedgerSummary(thread_val): +def GetLedgerEntrySummary(ledger_template, ledger, i, show_footprint_interval_max=False): + """ internal function to get internals of a ledger entry (*not* a ledger itself) + params: ledger_template - value representing struct ledger_template_t for the task or thread + ledger - value representing struct ledger_entry * + return: str - formatted output information of ledger entries + """ + entry = GetLedgerEntry(ledger_template, ledger, i) + return FormatLedgerEntrySummary(entry, i) + + +def GetThreadLedgers(thread_val): """ Internal function to get a summary of ledger entries for the given thread - params: thread - value representing struct thread * - return: str - formatted output information for ledger entries of the input thread + params: thread_val - value representing struct thread * + return: thread - python dictionary containing threads's ledger entries. This can + be printed directly with FormatThreadLedgerSummmary or outputted as json. """ - out_str = " [{:#08x}]\n".format(thread_val) + thread = {} + thread["address"] = unsigned(thread_val) ledgerp = thread_val.t_threadledger + thread["entries"] = [] if ledgerp: i = 0 while i != ledgerp.l_template.lt_cnt: - out_str += GetLedgerEntrySummary(kern.globals.thread_ledger_template, - ledgerp.l_entries[i], i) + thread["entries"].append(GetLedgerEntry(kern.globals.thread_ledger_template, + ledgerp.l_entries[i], i)) i = i + 1 + return thread + +def FormatThreadLedgerSummary(thread): + """ Internal function to print a thread's ledger entries + params: thread - python dictionary containing thread's ledger entries + return: str - formatted output information for ledger entries of the input thread + """ + out_str = " [{:#08x}]\n".format(thread["address"]) + entries = thread["entries"] + for i, entry in enumerate(entries): + out_str += FormatLedgerEntrySummary(entry, i) return out_str -def GetTaskLedgers(task_val, show_footprint_interval_max=False): +def GetTaskLedgers(task_val): """ Internal function to get summary of ledger entries from the task and its threads params: task_val - value representing struct task * - return: str - formatted output information for ledger entries of the input task + return: task - python dictionary containing tasks's ledger entries. This can + be printed directly with FormatTaskLedgerSummary or outputted as json. """ - out_str = '' task_ledgerp = task_val.ledger i = 0 - out_str += "{: #08x} ".format(task_val) + tasks = [] + task = {} + task["address"] = unsigned(task_val) + pval = Cast(task_val.bsd_info, 'proc *') if pval: - out_str += "{: <5s}:\n".format(pval.p_comm) - else: - out_str += "Invalid process:\n" + task["name"] = GetProcName(pval) + task["pid"] = int(pval.p_pid) + + task["entries"] = [] while i != task_ledgerp.l_template.lt_cnt: - out_str += GetLedgerEntrySummary(kern.globals.task_ledger_template, task_ledgerp.l_entries[i], i, show_footprint_interval_max) + task["entries"].append(GetLedgerEntry(kern.globals.task_ledger_template, task_ledgerp.l_entries[i], i)) i = i + 1 # Now walk threads + task["threads"] = [] for thval in IterateQueue(task_val.threads, 'thread *', 'task_threads'): - out_str += GetThreadLedgerSummary(thval) + task["threads"].append(GetThreadLedgers(thval)) + + return task +@header("{0: <15s} {1: >16s} {2: <2s} {3: >15s} {4: >14s} {5: >12s} {6: >12s} {7: >12s} {8: <15s} {9: <8s} {10: <9s} {11: <6s} {12: >6s}".format( + "task [thread]", "entry", "#", "balance", "lifetime_max", "credit", + "debit", "limit", "refill period", "lim pct", "warn pct", "over?", "flags")) +def FormatTaskLedgerSummary(task, show_footprint_interval_max=False): + """ Internal function to get summary of ledger entries from the task and its threads + params: task_val - value representing struct task * + return: str - formatted output information for ledger entries of the input task + """ + out_str = '' + out_str += "{: #08x} ".format(task["address"]) + if task.has_key("name"): + out_str += "{: <5s}:\n".format(task["name"]) + else: + out_str += "Invalid process\n" + + for i, entry in enumerate(task["entries"]): + out_str += FormatLedgerEntrySummary(entry, i, show_footprint_interval_max) + + for thread in task["threads"]: + out_str += FormatThreadLedgerSummary(thread) return out_str + # Macro: showtaskledgers -@lldb_command('showtaskledgers', 'F:I') +@lldb_command('showtaskledgers', 'JF:I') def ShowTaskLedgers(cmd_args=None, cmd_options={}): """ Routine to print a summary of ledger entries for the task and all of its threads - or : showtaskledgers [ -I ] [ -F ] + or : showtaskledgers [ -I ] [-J] [ -F ] options: -I: show footprint interval max (DEV/DEBUG only) -F: specify task via name instead of address + -J: output json - """ + print_json = False if "-F" in cmd_options: task_list = FindTasksByName(cmd_options["-F"]) for tval in task_list: - print GetTaskLedgers.header - print GetTaskLedgers(tval) + print FormatTaskLedgerSummary.header + ledgers = GetTaskLedgers(tval) + print FormatTaskLedgerSummary(ledgers) return + if "-J" in cmd_options: + print_json = True if not cmd_args: raise ArgumentError("No arguments passed.") @@ -1625,28 +1728,40 @@ def ShowTaskLedgers(cmd_args=None, cmd_options={}): tval = kern.GetValueFromAddress(cmd_args[0], 'task *') if not tval: raise ArgumentError("unknown arguments: %r" %cmd_args) - if (show_footprint_interval_max): - print "{0: <15s} {1: >16s} {2: <2s} {3: >15s} {4: >12s} {5: >14s} {6: >12s} {7: >12s} {8: >12s} {9: <15s} {10: <8s} {11: <9s} {12: <6s} {13: >6s}".format( - "task [thread]", "entry", "#", "balance", "intrvl_max", "lifetime_max", "credit", - "debit", "limit", "refill period", "lim pct", "warn pct", "over?", "flags") + ledgers = GetTaskLedgers(tval) + if print_json: + print json.dumps(ledgers) else: - print "{0: <15s} {1: >16s} {2: <2s} {3: >15s} {4: >14s} {5: >12s} {6: >12s} {7: >12s} {8: <15s} {9: <8s} {10: <9s} {11: <6s} {12: >6s}".format( - "task [thread]", "entry", "#", "balance", "lifetime_max", "credit", - "debit", "limit", "refill period", "lim pct", "warn pct", "over?", "flags") - print GetTaskLedgers(tval, show_footprint_interval_max) + if (show_footprint_interval_max): + print "{0: <15s} {1: >16s} {2: <2s} {3: >15s} {4: >12s} {5: >14s} {6: >12s} {7: >12s} {8: >12s} {9: <15s} {10: <8s} {11: <9s} {12: <6s} {13: >6s}".format( + "task [thread]", "entry", "#", "balance", "intrvl_max", "lifetime_max", "credit", + "debit", "limit", "refill period", "lim pct", "warn pct", "over?", "flags") + else: + print FormatTaskLedgerSummary.header + print FormatTaskLedgerSummary(ledgers, show_footprint_interval_max) # EndMacro: showtaskledgers # Macro: showalltaskledgers -@lldb_command('showalltaskledgers') +@lldb_command('showalltaskledgers', "J") def ShowAllTaskLedgers(cmd_args=None, cmd_options={}): """ Routine to print a summary of ledger entries for all tasks and respective threads - Usage: showalltaskledgers + Usage: showalltaskledgers [-J] + -J : Output json """ + print_json = False + if "-J" in cmd_options: + print_json = True + tasks = [] for t in kern.tasks: task_val = unsigned(t) - ShowTaskLedgers([task_val], cmd_options=cmd_options) + if not print_json: + ShowTaskLedgers([task_val], cmd_options=cmd_options) + else: + tasks.append(GetTaskLedgers(t)) + if print_json: + print json.dumps(tasks) # EndMacro: showalltaskledgers @@ -1832,7 +1947,7 @@ def ShowAllPte(cmd_args=None): procp = Cast(taskp.bsd_info, 'proc *') out_str = "task = {:#x} pte = {:#x}\t".format(taskp, taskp.map.pmap.ttep) if procp != 0: - out_str += "{:s}\n".format(procp.p_comm) + out_str += "{:s}\n".format(GetProcName(procp)) else: out_str += "\n" print out_str @@ -1920,7 +2035,7 @@ def ShowProcFilesSummary(cmd_args=None): if unsigned(proc_ofiles[count]) != 0: proc_file_count += 1 count += 1 - print "{0: <#020x} {1: <20s} {2: >10d}".format(proc, proc.p_comm, proc_file_count) + print "{0: <#020x} {1: <32s} {2: >10d}".format(proc, GetProcName(proc), proc_file_count) #EndMacro: showprocfilessummary @@ -2080,7 +2195,7 @@ def Showstackafterthread(cmd_args = None): if(thval==threadval): pval = Cast(t.bsd_info, 'proc *') - process_name = "{:s}".format(pval.p_comm) + process_name = "{:s}".format(GetProcName(pval)) print "\n\n" print " *** Continuing to dump the thread stacks from the process *** :" + " " + process_name print "\n\n" diff --git a/tools/lldbmacros/scheduler.py b/tools/lldbmacros/scheduler.py index 983a027e1..a521b0d22 100755 --- a/tools/lldbmacros/scheduler.py +++ b/tools/lldbmacros/scheduler.py @@ -3,6 +3,7 @@ from utils import * from process import * from misc import * from memory import * +from ipc import * # TODO: write scheduler related macros here @@ -164,21 +165,28 @@ def ShowCurremtAbsTime(cmd_args=None): bucketStr = ["FIXPRI (>UI)", "TIMESHARE_FG", "TIMESHARE_IN", "TIMESHARE_DF", "TIMESHARE_UT", "TIMESHARE_BG"] -@header(" {:>18s} | {:>20s} | {:>20s} | {:>10s} | {:>10s}".format('Thread Group', 'Interactivity Score', 'Last Timeshare Tick', 'pri_shift', 'highq')) +@header("{:<18s} | {:>20s} | {:>20s} | {:>10s} | {:>10s}".format('Thread Group', 'Pending (us)', 'Interactivity Score', 'TG Boost', 'Highest Thread Pri')) def GetSchedClutchBucketSummary(clutch_bucket): - return " 0x{:>16x} | {:>20d} | {:>20d} | {:>10d} | {:>10d}".format(clutch_bucket.scb_clutch.sc_tg, clutch_bucket.scb_interactivity_score, clutch_bucket.scb_timeshare_tick, clutch_bucket.scb_pri_shift, clutch_bucket.scb_runq.highq) + tg_boost = kern.globals.sched_clutch_bucket_group_pri_boost[clutch_bucket.scb_group.scbg_clutch.sc_tg_priority] + pending_delta = kern.GetNanotimeFromAbstime(GetRecentTimestamp() - clutch_bucket.scb_group.scbg_pending_data.scct_timestamp) / 1000 + if (int)(clutch_bucket.scb_group.scbg_pending_data.scct_timestamp) == 18446744073709551615: + pending_delta = 0 + return "0x{:<16x} | {:>20d} | {:>20d} | {:>10d} | {:>10d}".format(clutch_bucket.scb_group.scbg_clutch.sc_tg, pending_delta, clutch_bucket.scb_group.scbg_interactivity_data.scct_count, tg_boost, SchedPriorityStableQueueRootPri(clutch_bucket.scb_thread_runq, 'struct thread', 'th_clutch_runq_link')) def ShowSchedClutchForPset(pset): root_clutch = pset.pset_clutch_root print "\n{:s} : {:d}\n\n".format("Current Timestamp", GetRecentTimestamp()) - print "{:>10s} | {:>20s} | {:>30s} | {:>18s} | {:>10s} | {:>10s} | {:>30s} | {:>30s} | {:>15s} | ".format("Root", "Root Buckets", "Clutch Buckets", "Address", "Priority", "Count", "CPU Usage (MATUs)", "CPU Blocked (MATUs)", "Deadline (abs)") + GetSchedClutchBucketSummary.header + print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | {:<18s} | {:>10s} | {:>10s} | {:>15s} | ".format("Root", "Root Buckets", "Clutch Buckets", "Threads", "Address", "Pri (Base)", "Count", "Deadline (us)") + GetSchedClutchBucketSummary.header print "=" * 300 - print "{:>10s} | {:>20s} | {:>30s} | 0x{:16x} | {:>10d} | {:>10d} | {:>30s} | {:>30s} | {:>15s} | ".format("Root", "*", "*", addressof(root_clutch), root_clutch.scr_priority, root_clutch.scr_thr_count, "*", "*", "*") + print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10d} | {:>10d} | {:>15s} | ".format("Root", "*", "*", "*", addressof(root_clutch), (root_clutch.scr_priority if root_clutch.scr_thr_count > 0 else -1), root_clutch.scr_thr_count, "*") print "-" * 300 for i in range(0, 6): - root_bucket = root_clutch.scr_buckets[i] - print "{:>10s} | {:>20s} | {:>30s} | 0x{:16x} | {:>10s} | {:>10s} | {:>30s} | {:>30s} | {:>15d} | ".format("*", bucketStr[i], "*", addressof(root_bucket), "*", "*", "*", "*", root_bucket.scrb_deadline) + root_bucket = root_clutch.scr_unbound_buckets[i] + root_bucket_deadline = 0 + if root_bucket.scrb_clutch_buckets.scbrq_count != 0 and i != 0: + root_bucket_deadline = kern.GetNanotimeFromAbstime(root_bucket.scrb_pqlink.deadline - GetRecentTimestamp()) / 1000 + print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10s} | {:>10s} | {:>15d} | ".format("*", bucketStr[int(root_bucket.scrb_bucket)], "*", "*", addressof(root_bucket), "*", "*", root_bucket_deadline) clutch_bucket_runq = root_bucket.scrb_clutch_buckets clutch_bucket_list = [] for pri in range(0,128): @@ -188,9 +196,31 @@ def ShowSchedClutchForPset(pset): if len(clutch_bucket_list) > 0: clutch_bucket_list.sort(key=lambda x: x.scb_priority, reverse=True) for clutch_bucket in clutch_bucket_list: - cpu_used = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_used - cpu_blocked = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_blocked - print "{:>10s} | {:>20s} | {:>30s} | 0x{:16x} | {:>10d} | {:>10d} | {:>30d} | {:>30d} | {:>15s} | ".format("*", "*", clutch_bucket.scb_clutch.sc_tg.tg_name, clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count, cpu_used, cpu_blocked, "*") + GetSchedClutchBucketSummary(clutch_bucket) + print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | {:<18s} | {:>10s} | {:>10s} | {:>15s} | ".format("", "", "", "", "", "", "", "") + print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10d} | {:>10d} | {:>15s} | ".format("*", "*", clutch_bucket.scb_group.scbg_clutch.sc_tg.tg_name, "*", clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count, "*") + GetSchedClutchBucketSummary(clutch_bucket) + runq = clutch_bucket.scb_clutchpri_prioq + for thread in IterateSchedPriorityQueue(runq, 'struct thread', 'th_clutch_pri_link'): + thread_name = GetThreadName(thread)[-24:] + if len(thread_name) == 0: + thread_name = "" + print "{:>10s} | {:>20s} | {:>30s} | {:<25s} | 0x{:<16x} | {:>10d} | {:>10s} | {:>15s} | ".format("*", "*", "*", thread_name, thread, thread.base_pri, "*", "*") + print "-" * 300 + root_bucket = root_clutch.scr_bound_buckets[i] + root_bucket_deadline = 0 + if root_bucket.scrb_bound_thread_runq.count != 0: + root_bucket_deadline = kern.GetNanotimeFromAbstime(root_bucket.scrb_pqlink.deadline - GetRecentTimestamp()) / 1000 + print "{:>10s} | {:>20s} | {:>30s} | {:>25s} | 0x{:<16x} | {:>10s} | {:>10d} | {:>15d} | ".format("*", bucketStr[int(root_bucket.scrb_bucket)] + " [Bound]", "*", "*", addressof(root_bucket), "*", root_bucket.scrb_bound_thread_runq.count, root_bucket_deadline) + if root_bucket.scrb_bound_thread_runq.count == 0: + print "-" * 300 + continue + thread_runq = root_bucket.scrb_bound_thread_runq + for pri in range(0, 128): + thread_circleq = thread_runq.queues[pri] + for thread in IterateCircleQueue(thread_circleq, 'struct thread', 'runq_links'): + thread_name = GetThreadName(thread)[-24:] + if len(thread_name) == 0: + thread_name = "" + print "{:>10s} | {:>20s} | {:>30s} | {:<25s} | 0x{:<16x} | {:>10d} | {:>10s} | {:>15s} | ".format("*", "*", "*", thread_name, thread, thread.base_pri, "*", "*") print "-" * 300 @lldb_command('showschedclutch') @@ -214,9 +244,9 @@ def ShowSchedClutchRoot(cmd_args=[]): if not root: print "unknown arguments:", str(cmd_args) return False - print "{:>30s} : 0x{:16x}".format("Root", root) - print "{:>30s} : 0x{:16x}".format("Pset", root.scr_pset) - print "{:>30s} : {:d}".format("Priority", root.scr_priority) + print "{:>30s} : 0x{:<16x}".format("Root", root) + print "{:>30s} : 0x{:<16x}".format("Pset", root.scr_pset) + print "{:>30s} : {:d}".format("Priority", (root.scr_priority if root.scr_thr_count > 0 else -1)) print "{:>30s} : {:d}".format("Urgency", root.scr_urgency) print "{:>30s} : {:d}".format("Threads", root.scr_thr_count) print "{:>30s} : {:d}".format("Current Timestamp", GetRecentTimestamp()) @@ -233,9 +263,9 @@ def ShowSchedClutchRootBucket(cmd_args=[]): if not root_bucket: print "unknown arguments:", str(cmd_args) return False - print "{:<30s} : 0x{:16x}".format("Root Bucket", root_bucket) + print "{:<30s} : 0x{:<16x}".format("Root Bucket", root_bucket) print "{:<30s} : {:s}".format("Bucket Name", bucketStr[int(root_bucket.scrb_bucket)]) - print "{:<30s} : {:d}".format("Deadline", root_bucket.scrb_deadline) + print "{:<30s} : {:d}".format("Deadline", (root_bucket.scrb_pqlink.deadline if root_bucket.scrb_clutch_buckets.scbrq_count != 0 else 0)) print "{:<30s} : {:d}".format("Current Timestamp", GetRecentTimestamp()) print "\n" clutch_bucket_runq = root_bucket.scrb_clutch_buckets @@ -250,35 +280,23 @@ def ShowSchedClutchRootBucket(cmd_args=[]): print "=" * 240 clutch_bucket_list.sort(key=lambda x: x.scb_priority, reverse=True) for clutch_bucket in clutch_bucket_list: - print "{:>30s} | 0x{:16x} | {:>20d} | {:>20d} | ".format(clutch_bucket.scb_clutch.sc_tg.tg_name, clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count) + GetSchedClutchBucketSummary(clutch_bucket) + print "{:>30s} | 0x{:<16x} | {:>20d} | {:>20d} | ".format(clutch_bucket.scb_group.scbg_clutch.sc_tg.tg_name, clutch_bucket, clutch_bucket.scb_priority, clutch_bucket.scb_thr_count) + GetSchedClutchBucketSummary(clutch_bucket) -@lldb_command('showschedclutchbucket') -def ShowSchedClutchBucket(cmd_args=[]): - """ show information about a clutch bucket in the sched clutch hierarchy - Usage: showschedclutchbucket - """ - if not cmd_args: - raise ArgumentError("Invalid argument") - clutch_bucket = kern.GetValueFromAddress(cmd_args[0], "struct sched_clutch_bucket *") - if not clutch_bucket: - print "unknown arguments:", str(cmd_args) - return False - print "{:<30s} : 0x{:16x}".format("Clutch Bucket", clutch_bucket) - print "{:<30s} : {:s}".format("TG Name", clutch_bucket.scb_clutch.sc_tg.tg_name) +def SchedClutchBucketDetails(clutch_bucket): + print "{:<30s} : 0x{:<16x}".format("Clutch Bucket", clutch_bucket) + print "{:<30s} : {:s}".format("Scheduling Bucket", bucketStr[(int)(clutch_bucket.scb_bucket)]) + print "{:<30s} : 0x{:<16x}".format("Clutch Bucket Group", clutch_bucket.scb_group) + print "{:<30s} : {:s}".format("TG Name", clutch_bucket.scb_group.scbg_clutch.sc_tg.tg_name) print "{:<30s} : {:d}".format("Priority", clutch_bucket.scb_priority) print "{:<30s} : {:d}".format("Thread Count", clutch_bucket.scb_thr_count) - print "{:<30s} : 0x{:16x}".format("Thread Group", clutch_bucket.scb_clutch.sc_tg) - cpu_used = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_used - cpu_blocked = clutch_bucket.scb_cpu_data.cpu_data.scbcd_cpu_blocked - print "{:<30s} : {:d}".format("CPU Used (MATUs)", cpu_used) - print "{:<30s} : {:d}".format("CPU Blocked (MATUs)", cpu_blocked) - print "{:<30s} : {:d}".format("Interactivity Score", clutch_bucket.scb_interactivity_score) - print "{:<30s} : {:d}".format("Last Timeshare Update Tick", clutch_bucket.scb_timeshare_tick) - print "{:<30s} : {:d}".format("Priority Shift", clutch_bucket.scb_pri_shift) + print "{:<30s} : 0x{:<16x}".format("Thread Group", clutch_bucket.scb_group.scbg_clutch.sc_tg) + print "{:<30s} : {:6d} (inherited from clutch bucket group)".format("Interactivity Score", clutch_bucket.scb_group.scbg_interactivity_data.scct_count) + print "{:<30s} : {:6d} (inherited from clutch bucket group)".format("Last Timeshare Update Tick", clutch_bucket.scb_group.scbg_timeshare_tick) + print "{:<30s} : {:6d} (inherited from clutch bucket group)".format("Priority Shift", clutch_bucket.scb_group.scbg_pri_shift) print "\n" runq = clutch_bucket.scb_clutchpri_prioq thread_list = [] - for thread in IteratePriorityQueue(runq, 'struct thread', 'sched_clutchpri_link'): + for thread in IterateSchedPriorityQueue(runq, 'struct thread', 'th_clutch_pri_link'): thread_list.append(thread) if len(thread_list) > 0: print "=" * 240 @@ -286,7 +304,20 @@ def ShowSchedClutchBucket(cmd_args=[]): print "=" * 240 for thread in thread_list: proc = Cast(thread.task.bsd_info, 'proc *') - print GetThreadSummary(thread) + "{:s}".format(str(proc.p_comm)) + print GetThreadSummary(thread) + "{:s}".format(GetProcName(proc)) + +@lldb_command('showschedclutchbucket') +def ShowSchedClutchBucket(cmd_args=[]): + """ show information about a clutch bucket in the sched clutch hierarchy + Usage: showschedclutchbucket + """ + if not cmd_args: + raise ArgumentError("Invalid argument") + clutch_bucket = kern.GetValueFromAddress(cmd_args[0], "struct sched_clutch_bucket *") + if not clutch_bucket: + print "unknown arguments:", str(cmd_args) + return False + SchedClutchBucketDetails(clutch_bucket) @lldb_command('abs2nano') def ShowAbstimeToNanoTime(cmd_args=[]): @@ -318,7 +349,11 @@ def GetRecentTimestamp(): TODO: on x86, if not in the debugger, then look at the scheduler """ if kern.arch == 'x86_64': - return kern.globals.debugger_entry_time + most_recent_dispatch = GetSchedMostRecentDispatch(False) + if most_recent_dispatch > kern.globals.debugger_entry_time : + return most_recent_dispatch + else : + return kern.globals.debugger_entry_time else : return GetSchedMostRecentDispatch(False) @@ -336,7 +371,7 @@ def GetSchedMostRecentDispatch(show_processor_details=False): if unsigned(active_thread) != 0 : task_val = active_thread.task proc_val = Cast(task_val.bsd_info, 'proc *') - proc_name = "" if unsigned(proc_val) == 0 else str(proc_val.p_name) + proc_name = "" if unsigned(proc_val) == 0 else GetProcName(proc_val) last_dispatch = unsigned(current_processor.last_dispatch) @@ -381,7 +416,7 @@ def ShowThreadSchedHistory(thread, most_recent_dispatch): task_name = "unknown" if task and unsigned(task.bsd_info): p = Cast(task.bsd_info, 'proc *') - task_name = str(p.p_name) + task_name = GetProcName(p) sched_mode = "" @@ -661,6 +696,7 @@ def ShowScheduler(cmd_args=None): show_priority_pset_runq = 0 show_group_pset_runq = 0 show_clutch = 0 + show_edge = 0 sched_string = str(kern.globals.sched_string) if sched_string == "traditional": @@ -680,12 +716,14 @@ def ShowScheduler(cmd_args=None): show_priority_runq = 1 elif sched_string == "clutch": show_clutch = 1 + elif sched_string == "edge": + show_edge = 1 else : print "Unknown sched_string {:s}".format(sched_string) print "Scheduler: {:s}\n".format(sched_string) - if show_clutch == 0: + if show_clutch == 0 and show_edge == 0: run_buckets = kern.globals.sched_run_buckets run_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_RUN')] fixpri_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_FIXPRI')] @@ -804,8 +842,13 @@ def ShowScheduler(cmd_args=None): ShowRunQSummary(processor.runq) print " \n" - if show_clutch: - print "=== Clutch Scheduler Hierarchy ===\n\n" + if show_clutch or show_edge: + cluster_type = "SMP" + if pset.pset_type == 1: + cluster_type = "E" + elif pset.pset_type == 2: + cluster_type = "P" + print "=== Clutch Scheduler Hierarchy Pset{:d} (Type: {:s}) ] ===\n\n".format(pset.pset_cluster_id, cluster_type) ShowSchedClutchForPset(pset) pset = pset.pset_list @@ -948,6 +991,19 @@ def ParanoidIterateLinkageChain(queue_head, element_type, field_name, field_ofst ParanoidIterateLinkageChain.enable_paranoia = True ParanoidIterateLinkageChain.enable_debug = False +def LinkageChainEmpty(queue_head): + if not queue_head.GetSBValue().GetType().IsPointerType() : + queue_head = addressof(queue_head) + + # Mosh the value into a brand new value, to really get rid of its old cvalue history + # avoid using GetValueFromAddress + queue_head = value(queue_head.GetSBValue().CreateValueFromExpression(None,'(void *)'+str(unsigned(queue_head)))) + queue_head = cast(queue_head, 'struct queue_entry *') + + link = queue_head.next + + return unsigned(queue_head) == unsigned(link) + def bit_first(bitmap): return bitmap.bit_length() - 1 @@ -976,76 +1032,287 @@ def IterateBitmap(bitmap): # Macro: showallcallouts -def ShowThreadCall(prefix, call): +from kevent import GetKnoteKqueue + +def ShowThreadCall(prefix, call, recent_timestamp, is_pending=False): """ Print a description of a thread_call_t and its relationship to its expected fire time """ - func = call.tc_call.func - param0 = call.tc_call.param0 - param1 = call.tc_call.param1 - - iotes_desc = "" - iotes_callout = kern.GetLoadAddressForSymbol("_ZN18IOTimerEventSource17timeoutAndReleaseEPvS0_") - iotes_callout2 = kern.GetLoadAddressForSymbol("_ZN18IOTimerEventSource15timeoutSignaledEPvS0_") + func = call.tc_func + param0 = call.tc_param0 + param1 = call.tc_param1 - if (unsigned(func) == unsigned(iotes_callout) or - unsigned(func) == unsigned(iotes_callout2)) : - iotes = Cast(call.tc_call.param0, 'IOTimerEventSource*') - func = iotes.action - param0 = iotes.owner - param1 = unsigned(iotes) + is_iotes = False func_name = kern.Symbolicate(func) - if (func_name == "") : - func_name = FindKmodNameForAddr(func) - call_entry = call.tc_call + extra_string = "" - recent_timestamp = GetRecentTimestamp() + strip_func = kern.StripKernelPAC(unsigned(func)) + + func_syms = kern.SymbolicateFromAddress(strip_func) + # returns an array of SBSymbol + + if func_syms and func_syms[0] : + func_name = func_syms[0].GetName() + + try : + if ("IOTimerEventSource::timeoutAndRelease" in func_name or + "IOTimerEventSource::timeoutSignaled" in func_name) : + iotes = Cast(call.tc_param0, 'IOTimerEventSource*') + try: + func = iotes.action + param0 = iotes.owner + param1 = unsigned(iotes) + except AttributeError: + # This is horrible, horrible, horrible. But it works. Needed because IOEventSource hides the action member in an + # anonymous union when XNU_PRIVATE_SOURCE is set. To grab it, we work backwards from the enabled member. + func = dereference(kern.GetValueFromAddress(addressof(iotes.enabled) - sizeof('IOEventSource::Action'), 'uint64_t *')) + param0 = iotes.owner + param1 = unsigned(iotes) + + workloop = iotes.workLoop + thread = workloop.workThread + + is_iotes = True - # THREAD_CALL_CONTINUOUS 0x100 - kern.globals.mach_absolutetime_asleep - if (call.tc_flags & 0x100) : - timer_fire = call_entry.deadline - (recent_timestamp + kern.globals.mach_absolutetime_asleep) + # re-symbolicate the func we found inside the IOTES + strip_func = kern.StripKernelPAC(unsigned(func)) + func_syms = kern.SymbolicateFromAddress(strip_func) + if func_syms and func_syms[0] : + func_name = func_syms[0].GetName() + else : + func_name = str(FindKmodNameForAddr(func)) + + # cast from IOThread to thread_t, because IOThread is sometimes opaque + thread = Cast(thread, 'thread_t') + thread_id = thread.thread_id + thread_name = GetThreadName(thread) + + extra_string += "workloop thread: {:#x} ({:#x}) {:s}".format(thread, thread_id, thread_name) + + if "filt_timerexpire" in func_name : + knote = Cast(call.tc_param0, 'struct knote *') + kqueue = GetKnoteKqueue(knote) + proc = kqueue.kq_p + proc_name = GetProcName(proc) + proc_pid = proc.p_pid + + extra_string += "kq: {:#018x} {:s}[{:d}]".format(kqueue, proc_name, proc_pid) + + if "mk_timer_expire" in func_name : + timer = Cast(call.tc_param0, 'struct mk_timer *') + port = timer.port + + extra_string += "port: {:#018x} {:s}".format(port, GetPortDestinationSummary(port)) + + if "workq_kill_old_threads_call" in func_name : + workq = Cast(call.tc_param0, 'struct workqueue *') + proc = workq.wq_proc + proc_name = GetProcName(proc) + proc_pid = proc.p_pid + + extra_string += "{:s}[{:d}]".format(proc_name, proc_pid) + + if ("workq_add_new_threads_call" in func_name or + "realitexpire" in func_name): + proc = Cast(call.tc_param0, 'struct proc *') + proc_name = GetProcName(proc) + proc_pid = proc.p_pid + + extra_string += "{:s}[{:d}]".format(proc_name, proc_pid) + + except: + print "exception generating extra_string for call: {:#018x}".format(call) + if ShowThreadCall.enable_debug : + raise + + if (func_name == "") : + func_name = FindKmodNameForAddr(func) + + if (call.tc_flags & GetEnumValue('thread_call_flags_t::THREAD_CALL_FLAG_CONTINUOUS')) : + timer_fire = call.tc_pqlink.deadline - (recent_timestamp + kern.globals.mach_absolutetime_asleep) else : - timer_fire = call_entry.deadline - recent_timestamp + timer_fire = call.tc_pqlink.deadline - recent_timestamp timer_fire_s = kern.GetNanotimeFromAbstime(timer_fire) / 1000000000.0 ttd_s = kern.GetNanotimeFromAbstime(call.tc_ttd) / 1000000000.0 - print "{:s}{:#018x}: {:18d} {:18d} {:03.06f} {:03.06f} {:#018x}({:#018x},{:#018x}) ({:s})".format(prefix, - unsigned(call), call_entry.deadline, call.tc_soft_deadline, ttd_s, timer_fire_s, - func, param0, param1, func_name) + if (is_pending) : + pending_time = call.tc_pending_timestamp - recent_timestamp + pending_time = kern.GetNanotimeFromAbstime(pending_time) / 1000000000.0 + + flags = int(call.tc_flags) + # TODO: extract this out of the thread_call_flags_t enum + thread_call_flags = {0x0:'', 0x1:'A', 0x2:'W', 0x4:'D', 0x8:'R', 0x10:'S', 0x20:'O', + 0x40:'P', 0x80:'L', 0x100:'C'} + + flags_str = '' + mask = 0x1 + while mask <= 0x100 : + flags_str += thread_call_flags[int(flags & mask)] + mask = mask << 1 + + if is_iotes : + flags_str += 'I' + + if (is_pending) : + print ("{:s}{:#018x}: {:18d} {:18d} {:16.06f} {:16.06f} {:16.06f} {:9s} " + + "{:#018x} ({:#018x}, {:#018x}) ({:s}) {:s}").format(prefix, + unsigned(call), call.tc_pqlink.deadline, call.tc_soft_deadline, ttd_s, + timer_fire_s, pending_time, flags_str, + func, param0, param1, func_name, extra_string) + else : + print ("{:s}{:#018x}: {:18d} {:18d} {:16.06f} {:16.06f} {:9s} " + + "{:#018x} ({:#018x}, {:#018x}) ({:s}) {:s}").format(prefix, + unsigned(call), call.tc_pqlink.deadline, call.tc_soft_deadline, ttd_s, + timer_fire_s, flags_str, + func, param0, param1, func_name, extra_string) + +ShowThreadCall.enable_debug = False + +@header("{:>18s} {:>18s} {:>18s} {:>16s} {:>16s} {:9s} {:>18s}".format( + "entry", "deadline", "soft_deadline", + "duration (s)", "to go (s)", "flags", "(*func) (param0, param1)")) +def PrintThreadGroup(group): + header = PrintThreadGroup.header + pending_header = "{:>18s} {:>18s} {:>18s} {:>16s} {:>16s} {:>16s} {:9s} {:>18s}".format( + "entry", "deadline", "soft_deadline", + "duration (s)", "to go (s)", "pending", "flags", "(*func) (param0, param1)") + + recent_timestamp = GetRecentTimestamp() + + idle_timestamp_distance = group.idle_timestamp - recent_timestamp + idle_timestamp_distance_s = kern.GetNanotimeFromAbstime(idle_timestamp_distance) / 1000000000.0 + + is_parallel = "" + + if (group.tcg_flags & GetEnumValue('thread_call_group_flags_t::TCG_PARALLEL')) : + is_parallel = " (parallel)" + + print "Group: {g.tcg_name:s} ({:#18x}){:s}".format(unsigned(group), is_parallel, g=group) + print "\t" +"Thread Priority: {g.tcg_thread_pri:d}\n".format(g=group) + print ("\t" +"Active: {g.active_count:<3d} Idle: {g.idle_count:<3d}" + + "Blocked: {g.blocked_count:<3d} Pending: {g.pending_count:<3d}" + + "Target: {g.target_thread_count:<3d}\n").format(g=group) + + if unsigned(group.idle_timestamp) is not 0 : + print "\t" +"Idle Timestamp: {g.idle_timestamp:d} ({:03.06f})\n".format(idle_timestamp_distance_s, + g=group) + + print "\t" +"Pending Queue: ({:>#18x})\n".format(addressof(group.pending_queue)) + if not LinkageChainEmpty(group.pending_queue) : + print "\t\t" + pending_header + for call in ParanoidIterateLinkageChain(group.pending_queue, "thread_call_t", "tc_qlink"): + ShowThreadCall("\t\t", call, recent_timestamp, is_pending=True) + + print "\t" +"Delayed Queue (Absolute Time): ({:>#18x}) timer: ({:>#18x})\n".format( + addressof(group.delayed_queues[0]), addressof(group.delayed_timers[0])) + if not LinkageChainEmpty(group.delayed_queues[0]) : + print "\t\t" + header + for call in ParanoidIterateLinkageChain(group.delayed_queues[0], "thread_call_t", "tc_qlink"): + ShowThreadCall("\t\t", call, recent_timestamp) + + print "\t" +"Delayed Queue (Continuous Time): ({:>#18x}) timer: ({:>#18x})\n".format( + addressof(group.delayed_queues[1]), addressof(group.delayed_timers[1])) + if not LinkageChainEmpty(group.delayed_queues[1]) : + print "\t\t" + header + for call in ParanoidIterateLinkageChain(group.delayed_queues[1], "thread_call_t", "tc_qlink"): + ShowThreadCall("\t\t", call, recent_timestamp) + +def PrintThreadCallThreads() : + callout_flag = GetEnumValue('thread_tag_t::THREAD_TAG_CALLOUT') + recent_timestamp = GetRecentTimestamp() + + for thread in IterateQueue(kern.globals.kernel_task.threads, 'thread *', 'task_threads'): + if (thread.thread_tag & callout_flag) : + print " {:#20x} {:#12x} {:s}".format(thread, thread.thread_id, GetThreadName(thread)) + state = thread.thc_state + if state and state.thc_call : + print "\t" + PrintThreadGroup.header + ShowThreadCall("\t", state.thc_call, recent_timestamp) + soft_deadline = state.thc_call_soft_deadline + slop_time = state.thc_call_hard_deadline - soft_deadline + slop_time = kern.GetNanotimeFromAbstime(slop_time) / 1000000000.0 + print "\t original soft deadline {:d}, hard deadline {:d} (leeway {:.06f}s)".format( + soft_deadline, state.thc_call_hard_deadline, slop_time) + enqueue_time = state.thc_call_pending_timestamp - soft_deadline + enqueue_time = kern.GetNanotimeFromAbstime(enqueue_time) / 1000000000.0 + print "\t time to enqueue after deadline: {:.06f}s (enqueued at: {:d})".format( + enqueue_time, state.thc_call_pending_timestamp) + wait_time = state.thc_call_start - state.thc_call_pending_timestamp + wait_time = kern.GetNanotimeFromAbstime(wait_time) / 1000000000.0 + print "\t time to start executing after enqueue: {:.06f}s (executing at: {:d})".format( + wait_time, state.thc_call_start) + + if (state.thc_IOTES_invocation_timestamp) : + iotes_acquire_time = state.thc_IOTES_invocation_timestamp - state.thc_call_start + iotes_acquire_time = kern.GetNanotimeFromAbstime(iotes_acquire_time) / 1000000000.0 + print "\t IOTES acquire time: {:.06f}s (acquired at: {:d})".format( + iotes_acquire_time, state.thc_IOTES_invocation_timestamp) + + +@lldb_command('showcalloutgroup') +def ShowCalloutGroup(cmd_args=None): + """ Prints out the pending and delayed thread calls for a specific group + + Pass 'threads' to show the thread call threads themselves. + + Callout flags: + + A - Allocated memory owned by thread_call.c + W - Wait - thread waiting for call to finish running + D - Delayed - deadline based + R - Running - currently executing on a thread + S - Signal - call from timer interrupt instead of thread + O - Once - pend the enqueue if re-armed while running + P - Reschedule pending - enqueue is pending due to re-arm while running + L - Rate-limited - (App Nap) + C - Continuous time - Timeout is in mach_continuous_time + I - Callout is an IOTimerEventSource + """ + if not cmd_args: + print "No arguments passed" + print ShowCalloutGroup.__doc__ + return False + + if "threads" in cmd_args[0] : + PrintThreadCallThreads() + return + + group = kern.GetValueFromAddress(cmd_args[0], 'struct thread_call_group *') + if not group: + print "unknown arguments:", str(cmd_args) + return False + + PrintThreadGroup(group) @lldb_command('showallcallouts') def ShowAllCallouts(cmd_args=None): """ Prints out the pending and delayed thread calls for the thread call groups - """ + Callout flags: + + A - Allocated memory owned by thread_call.c + W - Wait - thread waiting for call to finish running + D - Delayed - deadline based + R - Running - currently executing on a thread + S - Signal - call from timer interrupt instead of thread + O - Once - pend the enqueue if re-armed while running + P - Reschedule pending - enqueue is pending due to re-arm while running + L - Rate-limited - (App Nap) + C - Continuous time - Timeout is in mach_continuous_time + I - Callout is an IOTimerEventSource + """ index_max = GetEnumValue('thread_call_index_t::THREAD_CALL_INDEX_MAX') for i in range (0, index_max) : - group = kern.globals.thread_call_groups[i] - - print "Group {i:d}: {g.tcg_name:s} ({:>#18x})".format(addressof(group), i=i, g=group) - print "\t" +"Active: {g.active_count:d} Idle: {g.idle_count:d}\n".format(g=group) - print "\t" +"Blocked: {g.blocked_count:d} Pending: {g.pending_count:d}\n".format(g=group) - print "\t" +"Target: {g.target_thread_count:d}\n".format(g=group) - - print "\t" +"Pending Queue: ({:>#18x})\n".format(addressof(group.pending_queue)) - for call in ParanoidIterateLinkageChain(group.pending_queue, "thread_call_t", "tc_call.q_link"): - ShowThreadCall("\t\t", call) - - print "\t" +"Delayed Queue (Absolute Time): ({:>#18x}) timer: ({:>#18x})\n".format( - addressof(group.delayed_queues[0]), addressof(group.delayed_timers[0])) - for call in ParanoidIterateLinkageChain(group.delayed_queues[0], "thread_call_t", "tc_call.q_link"): - ShowThreadCall("\t\t", call) - - print "\t" +"Delayed Queue (Continuous Time): ({:>#18x}) timer: ({:>#18x})\n".format( - addressof(group.delayed_queues[1]), addressof(group.delayed_timers[1])) - for call in ParanoidIterateLinkageChain(group.delayed_queues[1], "thread_call_t", "tc_call.q_link"): - ShowThreadCall("\t\t", call) + group = addressof(kern.globals.thread_call_groups[i]) + PrintThreadGroup(group) + + print "Thread Call Threads:" + PrintThreadCallThreads() # EndMacro: showallcallouts diff --git a/tools/lldbmacros/skywalk.py b/tools/lldbmacros/skywalk.py index f0cbae8fa..1017a0547 100755 --- a/tools/lldbmacros/skywalk.py +++ b/tools/lldbmacros/skywalk.py @@ -26,7 +26,7 @@ def IterateProcChannels(proc): count = 0 while count <= proc_lastfile: if unsigned(proc_ofiles[count]) != 0: - proc_fd_fglob = proc_ofiles[count].f_fglob + proc_fd_fglob = proc_ofiles[count].fp_glob if (unsigned(proc_fd_fglob.fg_ops.fo_type) == 10): yield Cast(proc_fd_fglob.fg_data, 'kern_channel *') count += 1 @@ -75,8 +75,8 @@ def GetKernChannelSummary(kc): u=GetUUIDSummary(kc.ch_info.cinfo_ch_id)) @lldb_type_summary(['__kern_channel_ring *']) -@header('{:<20s} {:<65s} {:>10s} | {:<5s} {:<5s} | {:<5s} {:<5s} {:<5s} | {:<5s} {:<5s} {:<5s}'.format( - 'kernchannelring', 'name', 'flags', 'kc', 'kt', 'rc', 'rh', 'rt', 'c', 'h', 't')) +@header('{:<20s} {:<65s} {:>10s} | {:<5s} {:<5s} | {:<5s} {:<5s} | {:<5s} {:<5s}'.format( + 'kernchannelring', 'name', 'flags', 'kh', 'kt', 'rh', 'rt', 'h', 't')) def GetKernChannelRingSummary(kring): """ Summarizes a __kern_channel_ring and related information @@ -456,7 +456,7 @@ def IterateProcNECP(proc): count = 0 while count <= proc_lastfile: if unsigned(proc_ofiles[count]) != 0: - proc_fd_fglob = proc_ofiles[count].f_fglob + proc_fd_fglob = proc_ofiles[count].fp_glob if (unsigned(proc_fd_fglob.fg_ops.fo_type) == 9): yield Cast(proc_fd_fglob.fg_data, 'necp_fd_data *') count += 1 @@ -607,43 +607,33 @@ def ShowNexuses(cmd_args=None): for nx_str in nexus_summaries: print "{0:s}".format(nx_str) -def GetSockAddr4(sin): - return GetInAddrAsString(sin.sin_addr) +def GetSockAddr4(in_addr): + return inet_ntoa(struct.pack("!I", in_addr.sin_addr)) -def GetSockAddr6(sin6): - addr = sin6.sin6_addr.__u6_addr.__u6_addr8 +def GetSockAddr6(in6_addr): + addr = in6_addr.__u6_addr.__u6_addr8 addr_raw_string = ":".join(["{0:02x}{0:02x}".format(unsigned(addr[i]), unsigned(addr[i+1])) for i in range(0, 16, 2)]) return inet_ntop(AF_INET6, inet_pton(AF_INET6, addr_raw_string)) -def GetSockAddr46(sockaddr46): - if sockaddr46 is None : - raise ArgumentError('sockaddr is None') - if (sockaddr46.sa.sa_family == 2): - return GetSockAddr4(sockaddr46.sin) - elif (sockaddr46.sa.sa_family == 30): - return GetSockAddr6(sockaddr46.sin6) +def FlowKeyStr(fk): + if fk.fk_ipver == 0x4: + src_str = GetSockAddr4(fk.fk_src._v4) + dst_str = GetSockAddr4(fk.fk_dst._v4) + elif fk.fk_ipver == 0x60: + src_str = GetSockAddr6(fk.fk_src._v6) + dst_str = GetSockAddr6(fk.fk_dst._v6) else: - raise ArgumentError('invalid sockaddr_in_4_6 address family') - -def GetSockPort46(sockaddr46): - if sockaddr46 is None : - raise ArgumentError('sockaddr is None') - if (sockaddr46.sa.sa_family == 2): - return ntohs(sockaddr46.sin.sin_port) - elif (sockaddr46.sa.sa_family == 30): - return ntohs(sockaddr46.sin6.sin6_port) - else: - raise ArgumentError('invalid sockaddr_in_4_6 address family') + return "unkown ipver" + + return "src={},dst={},proto={},sport={},dport={}".format(src_str, dst_str, + unsigned(fk.fk_proto), ntohs(fk.fk_sport), ntohs(fk.fk_dport)) def FlowEntryStr(fe): - return "(struct flow_entry*){} src={},dst={},proto={},sport={},dport={} ".format( - hex(fe), GetSockAddr46(fe.fe_laddr), GetSockAddr46(fe.fe_faddr), - unsigned(fe.fe_key.fk_proto), GetSockPort46(fe.fe_laddr), - GetSockPort46(fe.fe_faddr), fe.fe_owner_name) + return "(struct flow_entry*){} {} ".format(hex(fe), FlowKeyStr(fe.fe_key)) def GetFlowEntryPid(fe): - return fe.fe_owner_pid + return fe.fe_pid def GetFlowswitchFlowEntries(fsw): fm = kern.GetValueFromAddress(unsigned(fsw.fsw_flow_mgr), 'struct flow_mgr *') diff --git a/tools/lldbmacros/structanalyze.py b/tools/lldbmacros/structanalyze.py index ba262e329..595e41299 100755 --- a/tools/lldbmacros/structanalyze.py +++ b/tools/lldbmacros/structanalyze.py @@ -3,13 +3,55 @@ from xnu import * _UnionStructClass = [ lldb.eTypeClassStruct, lldb.eTypeClassClass, lldb.eTypeClassUnion ] -def _showStructPacking(O, symbol, begin_offset=0, symsize=0, typedef=None, outerSize=0, memberName=None): +def _get_offset_formatter(ctx, fmt_hex, fmt_dec): + """ Returns a formatter of struct member offsets and sizes. + + params: + ctx - configuration context + fmt_hex - hexadecimal format + fmt_dec - decimal format + returns: + offset formatter """ - recursively parse the field members of structure. - params : O the output formatter (standard.py) - symbol (lldb.SBType) reference to symbol in binary - returns: string containing lines of output. + O = ctx[0] + use_hex = ctx[1] + if use_hex: + fmt = fmt_hex + else: + fmt = fmt_dec + return lambda o, s: O.format(fmt, o, s) + +def _get_num_formatter(ctx, fmt_hex, fmt_dec): + """ Returns a number formatter. + + params: + ctx - configuration context + fmt_hex - hexadecimal format + fmt_dec - decimal format + returns: + number formatter + """ + O = ctx[0] + use_hex = ctx[1] + if use_hex: + fmt = fmt_hex + else: + fmt = fmt_dec + return lambda n: O.format(fmt, n) + +def _showStructPacking(ctx, symbol, begin_offset=0, symsize=0, typedef=None, outerSize=0, memberName=None): + """ Recursively parse the field members of structure. + + params : + ctx - context containing configuration settings and the output formatter (standard.py) symbol (lldb.SBType) reference to symbol in binary + returns: + string containing lines of output. """ + + O = ctx[0] + format_offset = _get_offset_formatter(ctx, "{:#06x},[{:#6x}]", "{:04d},[{:4d}]") + format_num = _get_num_formatter(ctx, "{:#04x}", "{:2d}") + ctype = "unknown type" is_union = False is_class = False @@ -27,12 +69,13 @@ def _showStructPacking(O, symbol, begin_offset=0, symsize=0, typedef=None, outer is_class = True if not outerSize or outerSize == sym_size: - outstr = O.format("{:04d},[{:4d}]", begin_offset, sym_size) + outstr = format_offset(begin_offset, sym_size) elif outerSize < sym_size: # happens with c++ inheritance - outstr = O.format("{:04d},[{:4d}]", begin_offset, outerSize) + outstr = format_offset(begin_offset, outerSize) else: - outstr = O.format("{:04d},[{:4d}]{VT.DarkRed}{{{:+d}}}{VT.Default}", - begin_offset, sym_size, outerSize - sym_size) + outstr = O.format("{:s}{VT.DarkRed}{{{:s}}}{VT.Default}", + format_offset(begin_offset, sym_size), + format_num(outerSize - sym_size)) if typedef: outstr += O.format(" {0}", typedef) @@ -73,7 +116,7 @@ def _showStructPacking(O, symbol, begin_offset=0, symsize=0, typedef=None, outer _previous_size = m_size _packed_bit_offset = member.GetOffsetInBits() + m_size_bits - _showStructPacking(O, m_type, m_offset, str(m_type), outerSize=m_size, memberName=m_name) + _showStructPacking(ctx, m_type, m_offset, str(m_type), outerSize=m_size, memberName=m_name) for i in range(_nfields): member = symbol.GetFieldAtIndex(i) @@ -95,11 +138,12 @@ def _showStructPacking(O, symbol, begin_offset=0, symsize=0, typedef=None, outer m_previous_offset = begin_offset + _packed_bit_offset / 8 m_hole_bits = m_offset_bits - _packed_bit_offset if _packed_bit_offset % 8 == 0: - print O.format("{:04d},[{:4d}] ({VT.DarkRed}*** padding ***{VT.Default})", - m_previous_offset, m_hole_bits / 8) + print O.format("{:s} ({VT.DarkRed}*** padding ***{VT.Default})", + format_offset(m_previous_offset, m_hole_bits / 8)) else: - print O.format("{:04d},[{:4d}] ({VT.Brown}*** padding : {:d} ***{VT.Default})", - m_previous_offset, _previous_size, m_hole_bits) + print O.format("{:s} ({VT.Brown}*** padding : {:s} ***{VT.Default})", + format_offset(m_previous_offset, _previous_size), + format_num(m_hole_bits)) _previous_size = m_size _packed_bit_offset = m_offset_bits + m_size_bits @@ -109,17 +153,19 @@ def _showStructPacking(O, symbol, begin_offset=0, symsize=0, typedef=None, outer _canonical_type_class = m_type.GetCanonicalType().GetTypeClass() if _type_class == lldb.eTypeClassTypedef and _canonical_type_class in _UnionStructClass: - _showStructPacking(O, _canonical_type, m_offset, str(m_type), outerSize=union_size, memberName=m_name) + _showStructPacking(ctx, _canonical_type, m_offset, str(m_type), outerSize=union_size, memberName=m_name) elif _type_class in _UnionStructClass: - _showStructPacking(O, m_type, m_offset, outerSize=union_size, memberName=m_name) + _showStructPacking(ctx, m_type, m_offset, outerSize=union_size, memberName=m_name) else: - outstr = O.format("{:04d},[{:4d}]", m_offset, m_size) + outstr = format_offset(m_offset, m_size) if is_union and union_size != m_size_bits / 8: - outstr += O.format("{VT.DarkRed}{{{:+d}}}{VT.Default}", - union_size - m_size_bits / 8) + outstr += O.format("{VT.DarkRed}{{{:s}}}{VT.Default}", + format_num(union_size - m_size_bits / 8)) if m_is_bitfield: - outstr += O.format(" ({VT.DarkGreen}{:s} : {:d}{VT.Default}) {:s}", - m_type.GetName(), m_size_bits, m_name) + outstr += O.format(" ({VT.DarkGreen}{:s} : {:s}{VT.Default}) {:s}", + m_type.GetName(), + format_num(m_size_bits), + m_name) else: outstr += O.format(" ({VT.DarkGreen}{:s}{VT.Default}) {:s}", m_type.GetName(), m_name) @@ -129,30 +175,33 @@ def _showStructPacking(O, symbol, begin_offset=0, symsize=0, typedef=None, outer if not is_union and _packed_bit_offset < referenceSize * 8: m_previous_offset = begin_offset + _packed_bit_offset / 8 m_hole_bits = referenceSize * 8 - _packed_bit_offset - offset = _packed_bit_offset / 8 + begin_offset if _packed_bit_offset % 8 == 0: - print O.format("{:04d},[{:4d}] ({VT.DarkRed}*** padding ***{VT.Default})", - m_previous_offset, m_hole_bits / 8) + print O.format("{:s} ({VT.DarkRed}*** padding ***{VT.Default})", + format_offset(m_previous_offset, m_hole_bits / 8)) else: - print O.format("{:04d},[{:4d}] ({VT.Brown}padding : {:d}{VT.Default})\n", - m_previous_offset, _previous_size, m_hole_bits) + print O.format("{:s} ({VT.Brown}padding : {:s}{VT.Default})\n", + format_offset(m_previous_offset, _previous_size), + format_num(m_hole_bits)) print "}" -@lldb_command('showstructpacking', fancy=True) +@lldb_command('showstructpacking', "X" , fancy=True) def showStructInfo(cmd_args=None, cmd_options={}, O=None): - """Show how a structure is packed in the binary. The format is - , [] () - - For example: - (lldb) showstructpacking pollfd - 0,[ 8] struct pollfd { - 0,[ 4] (int) fd - 4,[ 2] (short) events - 6,[ 2] (short) revents - } - - syntax: showstructpacking task + """ Show how a structure is packed in the binary. + + Usage: showstructpacking [-X] + -X : prints struct members offsets and sizes in a hexadecimal format (decimal is default) + + The format is: + , [] () + + Example: + (lldb) showstructpacking pollfd + 0,[ 8] struct pollfd { + 0,[ 4] (int) fd + 4,[ 2] (short) events + 6,[ 2] (short) revents + } """ if not cmd_args: raise ArgumentError("Please provide a type name.") @@ -169,6 +218,8 @@ def showStructInfo(cmd_args=None, cmd_options={}, O=None): if sym.GetTypeClass() not in _UnionStructClass: return O.error("{0} is not a structure/union/class type", ty_name) - _showStructPacking(O, sym, 0) + ctx = (O, "-X" in cmd_options) + + _showStructPacking(ctx, sym, 0) # EndMacro: showstructinto diff --git a/tools/lldbmacros/turnstile.py b/tools/lldbmacros/turnstile.py index 1f7c731d6..8135529bc 100755 --- a/tools/lldbmacros/turnstile.py +++ b/tools/lldbmacros/turnstile.py @@ -158,7 +158,7 @@ def ShowThreadInheritorBase(cmd_args=None, cmd_options={}, O=None): thread = kern.GetValueFromAddress(cmd_args[0], "thread_t") with O.table(GetTurnstileSummary.header): - for turnstile in IteratePriorityQueue(thread.base_inheritor_queue, 'struct turnstile', 'ts_inheritor_links'): + for turnstile in IterateSchedPriorityQueue(thread.base_inheritor_queue, 'struct turnstile', 'ts_inheritor_links'): PrintTurnstile(turnstile) @lldb_command('showthreadschedturnstiles', fancy=True) @@ -172,6 +172,6 @@ def ShowThreadInheritorSched(cmd_args=None, cmd_options={}, O=None): thread = kern.GetValueFromAddress(cmd_args[0], "thread_t") with O.table(GetTurnstileSummary.header): - for turnstile in IteratePriorityQueue(thread.sched_inheritor_queue, 'struct turnstile', 'ts_inheritor_links'): + for turnstile in IterateSchedPriorityQueue(thread.sched_inheritor_queue, 'struct turnstile', 'ts_inheritor_links'): PrintTurnstile(turnstile) #endif diff --git a/tools/lldbmacros/userspace.py b/tools/lldbmacros/userspace.py index f8844b3dc..329ccaa5b 100755 --- a/tools/lldbmacros/userspace.py +++ b/tools/lldbmacros/userspace.py @@ -106,7 +106,8 @@ def ShowARMUserStack(thread, user_lib_info = None): def ShowARM64UserStack(thread, user_lib_info = None): SAVED_STATE_FLAVOR_ARM=20 SAVED_STATE_FLAVOR_ARM64=21 - upcb = thread.machine.upcb + upcb_addr = kern.StripKernelPAC(thread.machine.upcb) + upcb = kern.GetValueFromAddress(upcb_addr, 'arm_saved_state_t *') flavor = upcb.ash.flavor frameformat = "{0:>2d} FP: 0x{1:x} PC: 0x{2:x}" if flavor == SAVED_STATE_FLAVOR_ARM64: @@ -296,8 +297,8 @@ Synthetic crash log generated from Kernel userstacks osversion += " ({:s})".format(kern.globals.osversion) if pval: pid = pval.p_pid - pname = pval.p_comm - path = pval.p_comm + pname = GetProcName(pval) + path = GetProcName(pval) ppid = pval.p_ppid else: pid = 0 diff --git a/tools/lldbmacros/usertaskdebugging/gdbserver.py b/tools/lldbmacros/usertaskdebugging/gdbserver.py index 53f788b14..ec58e079c 100755 --- a/tools/lldbmacros/usertaskdebugging/gdbserver.py +++ b/tools/lldbmacros/usertaskdebugging/gdbserver.py @@ -150,7 +150,7 @@ class GDBServer(object): def getRegisterData(self, query): if query[0] == 'g': - #TODO should implement thissometime. Considering getThreadRegistersInfo is there + #TODO should implement this sometime. Considering getThreadRegistersInfo is there #we wont need this one. return rsprotocol.UnSupportedMessage @@ -171,11 +171,11 @@ class GDBServer(object): def getRegisterInfo(self, query): bytes = '' try: - query = query.replace('qRegisterInfo', '') - regnum = int(query, 16) + query_index = query.replace('qRegisterInfo', '') + regnum = int(query_index, 16) bytes = self.process.getRegisterInfo(regnum) except Exception, e: - logging.error("Failed to get register information error: %s" % e.message) + logging.error("Non-fatal: Failed to get register information: query: %s error: %s" % (query, e.message)) return rsprotocol.Message(bytes) def getMemory(self, query): diff --git a/tools/lldbmacros/usertaskdebugging/interface.py b/tools/lldbmacros/usertaskdebugging/interface.py index 590541f4b..900bc18aa 100755 --- a/tools/lldbmacros/usertaskdebugging/interface.py +++ b/tools/lldbmacros/usertaskdebugging/interface.py @@ -24,14 +24,14 @@ class Interface(object): ra,wa,ea = select.select([self.socket], [], [], 30) if not ra: num_retries -= 1 - logging.error("select returned empty list") + logging.warning("timeout: select returned empty list. retrying..") continue self.connection, addr = self.socket.accept() logging.info("Connected to client from %s" % str(addr)) return True logging.error("Failed to connect. Exiting after multiple attempts.") return False - + def read(self): if self.isblocking: #BUG TODO make this unblocking soon diff --git a/tools/lldbmacros/usertaskdebugging/userprocess.py b/tools/lldbmacros/usertaskdebugging/userprocess.py index a4a9a61b2..d7ff218cc 100755 --- a/tools/lldbmacros/usertaskdebugging/userprocess.py +++ b/tools/lldbmacros/usertaskdebugging/userprocess.py @@ -4,6 +4,7 @@ import struct from xnu import * from core.operating_system import Armv8_RegisterSet, Armv7_RegisterSet, I386_RegisterSet, X86_64RegisterSet +from core import caching """ these defines should come from an authoritative header file """ CPU_TYPE_I386 = 0x00000007 @@ -23,7 +24,7 @@ def GetRegisterSetForCPU(cputype, subtype): retval = I386_RegisterSet elif cputype == CPU_TYPE_X86_64: retval = X86_64RegisterSet - + """ crash if unknown cputype """ return retval.register_info['registers'] @@ -113,16 +114,25 @@ class UserProcess(target.Process): self.cpusubtype = unsigned(self.proc.p_cpusubtype) super(UserProcess, self).__init__(self.cputype, self.cpusubtype, ptrsize) - - self.hinfo['ostype'] = 'macosx' - if self.cputype != CPU_TYPE_X86_64 and self.cputype != CPU_TYPE_I386: + dbg_message = "process:%s is64bit:%d ptrsize:%d cputype:0x%x cpusubtype:0x%x" % (hex(self.proc), int(dataregisters64bit), ptrsize, self.cputype, self.cpusubtype) + self.proc_platform = int(self.proc.p_platform) + if self.proc_platform == xnudefines.P_PLATFORM_MACOS: + self.hinfo['ostype'] = 'macosx' + elif self.proc_platform == xnudefines.P_PLATFORM_WATCHOS: + self.hinfo['ostype'] = 'watchos' + elif self.proc_platform == xnudefines.P_PLATFORM_TVOS: + self.hinfo['ostype'] = 'tvos' + else: self.hinfo['ostype'] = 'ios' + dbg_message += " ostype:%s" % self.hinfo['ostype'] + + if is_kern_64bit and str(kern.arch).lower().startswith('arm'): + addressing_bits = 64 - int(kern.globals.gT1Sz) + self.hinfo['addressing_bits'] = addressing_bits + dbg_message += " addressing_bits:%d" % addressing_bits self.registerset = GetRegisterSetForCPU(self.cputype, self.cpusubtype) - logging.debug("process %s is64bit: %d ptrsize: %d cputype: %d cpusubtype:%d", - hex(self.proc), int(dataregisters64bit), ptrsize, - self.cputype, self.cpusubtype - ) + logging.info(dbg_message) self.threads = {} self.threads_ids_list = [] logging.debug("iterating over threads in process") @@ -165,7 +175,7 @@ class UserProcess(target.Process): def getRegisterInfo(self, regnum): #something similar to #"name:x1;bitsize:64;offset:8;encoding:uint;format:hex;gcc:1;dwarf:1;set:General Purpose Registers;" - if regnum > len(self.registerset): + if regnum >= len(self.registerset): logging.debug("No register_info for number %d." % regnum) return 'E45' @@ -200,9 +210,15 @@ class UserProcess(target.Process): return retval def readMemory(self, address, size): + cache_key = "{}-{}-{}".format(hex(self.task), hex(address), size) + cache_data = caching.GetDynamicCacheData(cache_key) + if cache_data: + return self.encodeByteString(cache_data) data = GetUserDataAsString(self.task, address, size) if not data: logging.error("Failed to read memory task:{: <#018x} {: <#018x} {:d}".format(self.task, address, size)) + else: + caching.SaveDynamicCacheData(cache_key, data) return self.encodeByteString(data) def getSharedLibInfoAddress(self): diff --git a/tools/lldbmacros/usertaskgdbserver.py b/tools/lldbmacros/usertaskgdbserver.py index 29bdc5b29..dbe525580 100755 --- a/tools/lldbmacros/usertaskgdbserver.py +++ b/tools/lldbmacros/usertaskgdbserver.py @@ -21,24 +21,24 @@ def DoUserTaskDebuggingServer(cmd_args = [], cmd_options ={}): """ starts a gdb protocol server that is backed by in kernel debugging session. Usage: (lldb) beginusertaskdebugging options: -D for debug level logging - -W for warning level logging. + -W for warning level logging. default is error level logging """ if not _usertaskdebugging_availabe: print "You do not have the usertask debugging files available. " return - log_level = logging.ERROR + log_level = logging.INFO if '-D' in cmd_options: log_level = logging.DEBUG elif '-W' in cmd_options: - log_level = logging.WARNING + log_level = logging.WARNING setupLogging(debug_level=log_level) if not cmd_args: raise ArgumentError("Please provide valid task argument.") t = kern.GetValueFromAddress(cmd_args[0], 'task_t') - + up = userprocess.UserProcess(t) gbs = gdbserver.GDBServer(up) print "Starting debug session for %s at localhost:%d." % (GetProcNameForTask(t), gbs.portnum) diff --git a/tools/lldbmacros/workqueue.py b/tools/lldbmacros/workqueue.py index 26bb400f7..a3b91bf26 100755 --- a/tools/lldbmacros/workqueue.py +++ b/tools/lldbmacros/workqueue.py @@ -100,7 +100,7 @@ def GetWorkqueueThreadRequestSummary(proc, req): if req.tr_kq_wakeup: state += "*" thread = 0 - if int(req.tr_state) in [3, 4]: + if int(req.tr_state) in [4, 5]: # BINDING or BOUND thread = req.tr_thread qos = int(req.tr_qos) @@ -159,11 +159,11 @@ def ShowProcWorkqueue(cmd_args=None, cmd_options={}, O=None): print "" if wq.wq_event_manager_threadreq: print GetWorkqueueThreadRequestSummary(proc, wq.wq_event_manager_threadreq) - for req in IteratePriorityQueue(wq.wq_overcommit_queue, 'struct workq_threadreq_s', 'tr_entry'): + for req in IterateSchedPriorityQueue(wq.wq_overcommit_queue, 'struct workq_threadreq_s', 'tr_entry'): print GetWorkqueueThreadRequestSummary(proc, req) - for req in IteratePriorityQueue(wq.wq_constrained_queue, 'struct workq_threadreq_s', 'tr_entry'): + for req in IterateSchedPriorityQueue(wq.wq_constrained_queue, 'struct workq_threadreq_s', 'tr_entry'): print GetWorkqueueThreadRequestSummary(proc, req) - for req in IteratePriorityQueue(wq.wq_special_queue, 'struct workq_threadreq_s', 'tr_entry'): + for req in IterateSchedPriorityQueue(wq.wq_special_queue, 'struct workq_threadreq_s', 'tr_entry'): print GetWorkqueueThreadRequestSummary(proc, req) with O.table(GetWQThreadSummary.header, indent=True): diff --git a/tools/lldbmacros/xnu.py b/tools/lldbmacros/xnu.py index 6e58e3209..d935362ab 100755 --- a/tools/lldbmacros/xnu.py +++ b/tools/lldbmacros/xnu.py @@ -669,8 +669,15 @@ def ParseMacOSPanicLog(panic_header, cmd_options={}): if other_log_begin_offset != 0 and (other_log_len == 0 or other_log_len < (cur_debug_buf_ptr_offset - other_log_begin_offset)): other_log_len = cur_debug_buf_ptr_offset - other_log_begin_offset expected_panic_magic = xnudefines.MACOS_PANIC_MAGIC - panic_stackshot_addr = unsigned(panic_header) + unsigned(panic_header.mph_stackshot_offset) - panic_stackshot_len = unsigned(panic_header.mph_stackshot_len) + + # use the global if it's available (on an x86 corefile), otherwise refer to the header + if hasattr(kern.globals, "panic_stackshot_buf"): + panic_stackshot_addr = unsigned(kern.globals.panic_stackshot_buf) + panic_stackshot_len = unsigned(kern.globals.panic_stackshot_len) + else: + panic_stackshot_addr = unsigned(panic_header) + unsigned(panic_header.mph_stackshot_offset) + panic_stackshot_len = unsigned(panic_header.mph_stackshot_len) + panic_header_flags = unsigned(panic_header.mph_panic_flags) warn_str = "" @@ -756,7 +763,7 @@ def ParseAURRPanicLog(panic_header, cmd_options={}): # Adjust panic log string length (cap to maximum supported values) if panic_log_version == xnudefines.AURR_PANIC_VERSION: - max_string_len = panic_log_reset_log_len and min(panic_log_reset_log_len, xnudefines.AURR_PANIC_STRING_LEN) or 0 + max_string_len = panic_log_reset_log_len elif panic_log_version == xnudefines.AURR_CRASHLOG_PANIC_VERSION: max_string_len = xnudefines.CRASHLOG_PANIC_STRING_LEN @@ -1216,7 +1223,6 @@ from pci import * from misc import * from apic import * from scheduler import * -from atm import * from structanalyze import * from ipcimportancedetail import * from bank import * diff --git a/tools/lldbmacros/xnudefines.py b/tools/lldbmacros/xnudefines.py index a91d0831d..af1d64d20 100755 --- a/tools/lldbmacros/xnudefines.py +++ b/tools/lldbmacros/xnudefines.py @@ -173,10 +173,10 @@ proc_flag_explain_strings = ["!0x00000004 - process is 32 bit", #only exception ] #File: xnu/osfmk/kern/ipc_kobject.h # string representations for Kobject types -kobject_types = ['', 'THREAD', 'TASK', 'HOST', 'HOST_PRIV', 'PROCESSOR', 'PSET', 'PSET_NAME', 'TIMER', 'PAGER_REQ', 'DEVICE', 'XMM_OBJECT', 'XMM_PAGER', 'XMM_KERNEL', 'XMM_REPLY', +kobject_types = ['', 'THREAD_CONTROL', 'TASK_CONTROL', 'HOST', 'HOST_PRIV', 'PROCESSOR', 'PSET', 'PSET_NAME', 'TIMER', 'PAGER_REQ', 'DEVICE', 'XMM_OBJECT', 'XMM_PAGER', 'XMM_KERNEL', 'XMM_REPLY', 'NOTDEF 15', 'NOTDEF 16', 'HOST_SEC', 'LEDGER', 'MASTER_DEV', 'TASK_NAME', 'SUBSYTEM', 'IO_DONE_QUE', 'SEMAPHORE', 'LOCK_SET', 'CLOCK', 'CLOCK_CTRL' , 'IOKIT_SPARE', 'NAMED_MEM', 'IOKIT_CON', 'IOKIT_OBJ', 'UPL', 'MEM_OBJ_CONTROL', 'AU_SESSIONPORT', 'FILEPORT', 'LABELH', 'TASK_RESUME', 'VOUCHER', 'VOUCHER_ATTR_CONTROL', 'WORK_INTERVAL', - 'UX_HANDLER'] + 'UX_HANDLER', 'UEXT_OBJECT', 'ARCADE_REG', 'TASK_INSPECT', 'TASK_READ', 'THREAD_INSPECT', 'THREAD_READ'] def populate_kobject_types(xnu_dir_path): """ Function to read data from header file xnu/osfmk/kern/ipc_kobject.h @@ -210,12 +210,24 @@ MACOS_PANIC_MAGIC = 0x44454544 MACOS_PANIC_STACKSHOT_SUCCEEDED_FLAG = 0x04 AURR_PANIC_MAGIC = 0x41555252 -AURR_PANIC_STRING_LEN = 112 AURR_PANIC_VERSION = 1 CRASHLOG_PANIC_STRING_LEN = 32 AURR_CRASHLOG_PANIC_VERSION = 2 +# File:EXTERNAL_HEADER/mach-o/loader.h +# (struct proc *)->p_platform +P_PLATFORM_MACOS = 1 +P_PLATFORM_IOS = 2 +P_PLATFORM_TVOS = 3 +P_PLATFORM_WATCHOS = 4 +P_PLATFORM_BRIDGEOS = 5 +P_PLATFORM_MACCATALYST = 6 +P_PLATFORM_IOSSIMULATOR = 7 +P_PLATFORM_TVOSSIMULATOR = 8 +P_PLATFORM_WATCHOSSIMULATOR = 9 +P_PLATFORM_DRIVERKIT = 10 + if __name__ == "__main__": populate_kobject_types("../../") diff --git a/tools/lldbmacros/zonetriage.py b/tools/lldbmacros/zonetriage.py index fe58c09c6..2249533d7 100755 --- a/tools/lldbmacros/zonetriage.py +++ b/tools/lldbmacros/zonetriage.py @@ -82,7 +82,7 @@ def ZoneTriageMemoryLeak(cmd_args=None): print "(lldb) zstack_showzonesbeinglogged\n%s\n" % lldb_run_command("zstack_showzonesbeinglogged") for zval in kern.zones: if zval.zlog_btlog: - print '%s:' % zval.zone_name + print '%s:' % zval.z_name print "(lldb) zstack_findtop -N 5 0x%lx" % zval.zlog_btlog print lldb_run_command("zstack_findtop -N 5 0x%lx" % zval.zlog_btlog) print "(lldb) zstack_findleak 0x%lx" % zval.zlog_btlog @@ -118,7 +118,7 @@ def FindZoneBTLog(zone): global kern for zval in kern.zones: if zval.zlog_btlog: - if zone == "%s" % zval.zone_name: + if zone == "%s" % zval.z_name: return "0x%lx" % zval.zlog_btlog return None -# EndMacro: zonetriage, zonetriage_freedelement, zonetriage_memoryleak \ No newline at end of file +# EndMacro: zonetriage, zonetriage_freedelement, zonetriage_memoryleak diff --git a/tools/tests/MPMMTest/Makefile b/tools/tests/MPMMTest/Makefile index 369fbcace..bf24d9aa4 100644 --- a/tools/tests/MPMMTest/Makefile +++ b/tools/tests/MPMMTest/Makefile @@ -6,58 +6,40 @@ SYMROOT?=$(shell /bin/pwd) CFLAGS := -g -O2 -isysroot $(SDKROOT) -I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders CFLAGS += -Wno-deprecated-declarations +CFLAGS += -Wl,-sectcreate,__INFO_FILTER,__disable,/dev/null -ifdef RC_ARCHS - ARCHS:=$(RC_ARCHS) - else - ifeq "$(Embedded)" "YES" - ARCHS:=armv7 armv7s arm64 - else - ARCHS:=x86_64 i386 - endif -endif - -# These are convenience functions for filtering based on substrings, as the -# normal filter functions only accept one wildcard. -FILTER_OUT_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),,$(string)))) -FILTER_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),$(string),))) - -ARCH_32:=$(call FILTER_OUT_SUBSTRING,64,$(ARCHS)) -ARCH_64:=$(call FILTER_SUBSTRING,64,$(ARCHS)) - -ARCH_32_FLAGS := $(patsubst %, -arch %, $(ARCH_32)) -ARCH_64_FLAGS := $(patsubst %, -arch %, $(ARCH_64)) +# $(ARCH_CONFIGS) is set by Makefile.common DSTROOT?=$(shell /bin/pwd) ARCH_32_TARGETS := MPMMtest KQMPMMtest KQMPMMtestD ARCH_64_TARGETS := MPMMtest_64 KQMPMMtest_64 KQMPMMtest_64D -TARGETS := MPMMtest_perf.sh $(if $(ARCH_64), $(ARCH_64_TARGETS)) $(if $(ARCH_32), $(ARCH_32_TARGETS)) +TARGETS := MPMMtest_perf.sh $(if $(ARCH_CONFIGS_64), $(ARCH_64_TARGETS)) $(if $(ARCH_CONFIGS_32), $(ARCH_32_TARGETS)) all: $(addprefix $(DSTROOT)/, $(TARGETS)) $(DSTROOT)/MPMMtest: MPMMtest.c - ${CC} ${CFLAGS} ${ARCH_32_FLAGS} -o $(SYMROOT)/$(notdir $@) $? + ${CC} ${CFLAGS} ${ARCH_FLAGS_32} -o $(SYMROOT)/$(notdir $@) $? if [ ! -e $@ ]; then ditto $(SYMROOT)/$(notdir $@) $@; fi $(DSTROOT)/MPMMtest_64: MPMMtest.c - ${CC} ${CFLAGS} ${ARCH_64_FLAGS} -o $(SYMROOT)/$(notdir $@) $? + ${CC} ${CFLAGS} ${ARCH_FLAGS_64} -o $(SYMROOT)/$(notdir $@) $? if [ ! -e $@ ]; then ditto $(SYMROOT)/$(notdir $@) $@; fi $(DSTROOT)/KQMPMMtest: KQMPMMtest.c - ${CC} ${CFLAGS} ${ARCH_32_FLAGS} -o $(SYMROOT)/$(notdir $@) $? + ${CC} ${CFLAGS} ${ARCH_FLAGS_32} -o $(SYMROOT)/$(notdir $@) $? if [ ! -e $@ ]; then ditto $(SYMROOT)/$(notdir $@) $@; fi $(DSTROOT)/KQMPMMtest_64: KQMPMMtest.c - ${CC} ${CFLAGS} ${ARCH_64_FLAGS} -o $(SYMROOT)/$(notdir $@) $? + ${CC} ${CFLAGS} ${ARCH_FLAGS_64} -o $(SYMROOT)/$(notdir $@) $? if [ ! -e $@ ]; then ditto $(SYMROOT)/$(notdir $@) $@; fi $(DSTROOT)/KQMPMMtestD: KQMPMMtest.c - ${CC} ${CFLAGS} ${ARCH_32_FLAGS} -DDIRECT_MSG_RCV=1 -o $(SYMROOT)/$(notdir $@) $? + ${CC} ${CFLAGS} ${ARCH_FLAGS_32} -DDIRECT_MSG_RCV=1 -o $(SYMROOT)/$(notdir $@) $? if [ ! -e $@ ]; then ditto $(SYMROOT)/$(notdir $@) $@; fi $(DSTROOT)/KQMPMMtest_64D: KQMPMMtest.c - ${CC} ${CFLAGS} ${ARCH_64_FLAGS} -DDIRECT_MSG_RCV=1 -o $(SYMROOT)/$(notdir $@) $? + ${CC} ${CFLAGS} ${ARCH_FLAGS_64} -DDIRECT_MSG_RCV=1 -o $(SYMROOT)/$(notdir $@) $? if [ ! -e $@ ]; then ditto $(SYMROOT)/$(notdir $@) $@; fi $(DSTROOT)/MPMMtest_perf.sh: MPMMtest_run.sh diff --git a/tools/tests/Makefile b/tools/tests/Makefile index 09d13dfc4..2c929f93f 100644 --- a/tools/tests/Makefile +++ b/tools/tests/Makefile @@ -21,6 +21,8 @@ DSTSUBPATH = $(DSTROOT) BATS_CONFIG_PATH = $(DSTROOT) endif +DSTSUBPATH_KEXTS = $(DSTROOT)/System/AppleInternal/Library/AuxiliaryExtensions + COMMON_TARGETS = unit_tests \ MPMMTest \ packetdrill \ @@ -31,7 +33,8 @@ COMMON_TARGETS = unit_tests \ perf_index \ personas \ unixconf \ - testkext/pgokext.kext + +KEXT_TARGETS = pgokext.kext IPHONE_TARGETS = @@ -40,9 +43,9 @@ MAC_TARGETS = BATS_TARGET = $(BATS_CONFIG_PATH)/BATS ifeq "$(Embedded)" "YES" -TARGETS = $(addprefix $(DSTSUBPATH)/, $(COMMON_TARGETS) $(IPHONE_TARGETS)) +TARGETS = $(addprefix $(DSTSUBPATH)/, $(COMMON_TARGETS) $(IPHONE_TARGETS)) $(addprefix $(DSTSUBPATH_KEXTS)/, $(KEXT_TARGETS)) else -TARGETS = $(addprefix $(DSTSUBPATH)/, $(COMMON_TARGETS) $(MAC_TARGETS)) +TARGETS = $(addprefix $(DSTSUBPATH)/, $(COMMON_TARGETS) $(MAC_TARGETS)) $(addprefix $(DSTSUBPATH_KEXTS)/, $(KEXT_TARGETS)) endif all: $(BATS_TARGET) $(TARGETS) @@ -54,8 +57,8 @@ always: # NOTE: RC_ARCHES nonsense is here because code signing will fail on kexts if we include a i386 slice # CoreOSLobo: xnu_tests-4026 fails to codesign -$(DSTSUBPATH)/testkext/pgokext.kext: always - xcodebuild -configuration Debug -project $(SRCROOT)/testkext/testkext.xcodeproj -target pgokext DSTROOT=$(DSTSUBPATH)/testkext SYMROOT=$(SYMROOT)/testkext SRCROOT=$(SRCROOT)/testkext OBJROOT=$(OBJROOT)/testkext SDKROOT=$(SDKROOT) RC_ARCHS="$(subst i386,,$(RC_ARCHS))" install +$(DSTSUBPATH_KEXTS)/pgokext.kext: always + xcodebuild -configuration Debug -project $(SRCROOT)/testkext/testkext.xcodeproj -target pgokext DSTROOT=$(DSTSUBPATH_KEXTS) SYMROOT=$(SYMROOT)/testkext SRCROOT=$(SRCROOT)/testkext OBJROOT=$(OBJROOT)/testkext SDKROOT=$(SDKROOT) RC_ARCHS="$(subst i386,,$(RC_ARCHS))" install $(DSTSUBPATH)/%: always $(_v)echo Building $@ diff --git a/tools/tests/Makefile.common b/tools/tests/Makefile.common index 1d80fb11e..1f693ffe1 100644 --- a/tools/tests/Makefile.common +++ b/tools/tests/Makefile.common @@ -56,3 +56,28 @@ _v = else _v = @ endif + +# These are convenience functions for filtering based on substrings, as the +# normal filter functions only accept one wildcard. +FILTER_OUT_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),,$(string)))) +FILTER_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),$(string),))) + +#arch configs if not provided +ifdef RC_ARCHS +ARCH_CONFIGS:=$(RC_ARCHS) +endif + +ifeq ($(ARCH_CONFIGS),) +PLATFORM_LOWERCASE:=$(shell echo "$(PLATFORM)" | tr A-Z a-z) +ARCH_CONFIGS:=$(shell /usr/bin/plutil -extract SupportedTargets.$(PLATFORM_LOWERCASE).Archs json -o - $(SDKROOT)/SDKSettings.plist | tr '",[]' ' ') +PLATFORM_LOWERCASE:= +endif + +ARCH_CONFIGS_32:=$(call FILTER_OUT_SUBSTRING,64,$(ARCH_CONFIGS)) +ARCH_CONFIGS_64:=$(call FILTER_SUBSTRING,64,$(ARCH_CONFIGS)) +ARCH_CONFIGS_x86:=$(call FILTER_SUBSTRING,x86_64,$(ARCH_CONFIGS)) + +ARCH_FLAGS:=$(foreach argarch,$(ARCH_CONFIGS), -arch $(argarch) ) +ARCH_FLAGS_32:=$(foreach argarch,$(ARCH_CONFIGS_32), -arch $(argarch) ) +ARCH_FLAGS_64:=$(foreach argarch,$(ARCH_CONFIGS_64), -arch $(argarch) ) +ARCH_FLAGS_x86:=$(foreach argarch,$(ARCH_CONFIGS_x86), -arch $(argarch) ) diff --git a/tools/tests/TLBcoherency/Makefile b/tools/tests/TLBcoherency/Makefile index 00bbf15d6..031e44d89 100644 --- a/tools/tests/TLBcoherency/Makefile +++ b/tools/tests/TLBcoherency/Makefile @@ -2,17 +2,8 @@ include ../Makefile.common CC:=$(shell xcrun -sdk "$(SDKROOT)" -find cc) -ifdef RC_ARCHS - ARCHS:=$(RC_ARCHS) - else - ifeq "$(Embedded)" "YES" - ARCHS:=armv7 armv7s arm64 armv7k - else - ARCHS:=x86_64 - endif -endif - -CFLAGS := -g $(patsubst %, -arch %, $(ARCHS)) -isysroot $(SDKROOT) -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders +CFLAGS := -g $(ARCH_FLAGS) -isysroot $(SDKROOT) -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders +CFLAGS += -Wl,-sectcreate,__INFO_FILTER,__disable,/dev/null DSTROOT?=$(shell /bin/pwd) SYMROOT?=$(shell /bin/pwd) diff --git a/tools/tests/affinity/Makefile b/tools/tests/affinity/Makefile index 5f45973ab..7096bdc5b 100644 --- a/tools/tests/affinity/Makefile +++ b/tools/tests/affinity/Makefile @@ -4,28 +4,8 @@ CC:=$(shell xcrun -sdk "$(SDKROOT)" -find cc) SYMROOT?=$(shell /bin/pwd) -ifdef RC_ARCHS - ARCHS:=$(RC_ARCHS) - else - ifeq "$(Embedded)" "YES" - ARCHS:=armv7 armv7s arm64 - else - ARCHS:=x86_64 i386 - endif -endif - -# These are convenience functions for filtering based on substrings, as the -# normal filter functions only accept one wildcard. -FILTER_OUT_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),,$(string)))) -FILTER_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),$(string),))) - -ARCH_32:=$(call FILTER_OUT_SUBSTRING,64,$(ARCHS)) -ARCH_64:=$(call FILTER_SUBSTRING,64,$(ARCHS)) - -ARCH_32_FLAGS := $(patsubst %, -arch %, $(ARCH_32)) -ARCH_64_FLAGS := $(patsubst %, -arch %, $(ARCH_64)) - CFLAGS :=-g -isysroot $(SDKROOT) -I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders +CFLAGS += -Wl,-sectcreate,__INFO_FILTER,__disable,/dev/null DSTROOT?=$(shell /bin/pwd) SRCROOT?=$(shell /bin/pwd) @@ -34,18 +14,18 @@ SOURCES := sets.c pool.c tags.c ARCH_32_TARGETS := $(addprefix $(DSTROOT)/, sets pool tags) ARCH_64_TARGETS := $(addprefix $(DSTROOT)/, sets64 pool64 tags64) -TARGETS := $(if $(ARCH_64), $(ARCH_64_TARGETS)) $(if $(ARCH_32), $(ARCH_32_TARGETS)) +TARGETS := $(if $(ARCH_CONFIGS_64), $(ARCH_64_TARGETS)) $(if $(ARCH_CONFIGS_32), $(ARCH_32_TARGETS)) all: $(TARGETS) $(ARCH_32_TARGETS): $(DSTROOT)/%: $(SRCROOT)/%.c - $(CC) $(CFLAGS) $(ARCH_32_FLAGS) $< -o $(SYMROOT)/$(notdir $@) # 32-bit fat + $(CC) $(CFLAGS) $(ARCH_FLAGS_32) $< -o $(SYMROOT)/$(notdir $@) # 32-bit fat if [ ! -e $@ ]; then ditto $(SYMROOT)/$(notdir $@) $@; fi .SECONDEXPANSION: $(ARCH_64_TARGETS): $(DSTROOT)/%: $(SRCROOT)/$$(subst 64,,%).c - $(CC) $(CFLAGS) $(ARCH_64_FLAGS) $< -o $(SYMROOT)/$(notdir $@) # 64-bit fat + $(CC) $(CFLAGS) $(ARCH_FLAGS_64) $< -o $(SYMROOT)/$(notdir $@) # 64-bit fat if [ ! -e $@ ]; then ditto $(SYMROOT)/$(notdir $@) $@; fi clean: diff --git a/tools/tests/execperf/Makefile b/tools/tests/execperf/Makefile index e67fe1313..4e7b6919a 100644 --- a/tools/tests/execperf/Makefile +++ b/tools/tests/execperf/Makefile @@ -1,23 +1,14 @@ include ../Makefile.common -ifdef RC_ARCHS - ARCHS:=$(RC_ARCHS) - else - ifeq "$(Embedded)" "YES" - ARCHS:=armv7 armv7s arm64 - else - ARCHS:=x86_64 i386 - endif -endif - CC = $(shell xcrun -sdk "$(SDKROOT)" -find cc) STRIP = $(shell xcrun -sdk "$(SDKROOT)" -find strip) CODESIGN = $(shell xcrun -sdk "$(SDKROOT)" -find codesign) export CODESIGN_ALLOCATE := $(shell xcrun -sdk "$(SDKROOT)" -find codesign_allocate) DSYMUTIL = $(shell xcrun -sdk "$(SDKROOT)" -find dsymutil) -CFLAGS = -O0 -g -isysroot $(SDKROOT) $(patsubst %, -arch %,$(ARCHS)) +CFLAGS = -O0 -g -isysroot $(SDKROOT) $(ARCH_FLAGS) +CFLAGS += -Wl,-sectcreate,__INFO_FILTER,__disable,/dev/null LDFLAGS = -dead_strip \ - -isysroot $(SDKROOT) $(patsubst %, -arch %,$(ARCHS)) + -isysroot $(SDKROOT) $(ARCH_FLAGS) OBJROOT?=$(shell /bin/pwd)/BUILD/obj SYMROOT?=$(shell /bin/pwd)/BUILD/sym diff --git a/tools/tests/jitter/Makefile b/tools/tests/jitter/Makefile index 901814c6e..b5c086e2f 100644 --- a/tools/tests/jitter/Makefile +++ b/tools/tests/jitter/Makefile @@ -6,18 +6,8 @@ OBJROOT?=$(shell /bin/pwd) CC:=$(shell xcrun -sdk "$(SDKROOT)" -find cc) -ifdef RC_ARCHS - ARCHS:=$(RC_ARCHS) - else - ifeq "$(Embedded)" "YES" - ARCHS:=armv7 armv7s arm64 - else - ARCHS:=x86_64 i386 - endif -endif - - -CFLAGS:=$(patsubst %, -arch %,$(ARCHS)) -g -Wall -Os -isysroot $(SDKROOT) -I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders +CFLAGS:=$(ARCH_FLAGS) -g -Wall -Os -isysroot $(SDKROOT) -I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders +CFLAGS += -Wl,-sectcreate,__INFO_FILTER,__disable,/dev/null all: $(DSTROOT)/jitter diff --git a/tools/tests/mktimer/Makefile b/tools/tests/mktimer/Makefile index c26b6ae8d..ce0d1efc3 100644 --- a/tools/tests/mktimer/Makefile +++ b/tools/tests/mktimer/Makefile @@ -4,19 +4,10 @@ OBJROOT?=$(shell /bin/pwd) CC:=$(shell xcrun -sdk "$(SDKROOT)" -find cc) -ifdef RC_ARCHS - ARCHS:=$(RC_ARCHS) - else - ifeq "$(Embedded)" "YES" - ARCHS:=armv7 armv7s arm64 - else - ARCHS:=x86_64 i386 - endif -endif - DSTROOT?=$(shell /bin/pwd) -CFLAGS:=$(patsubst %, -arch %,$(ARCHS)) -g -Wall -Os -isysroot $(SDKROOT) -framework CoreFoundation +CFLAGS:=$(ARCH_FLAGS) -g -Wall -Os -isysroot $(SDKROOT) -framework CoreFoundation +CFLAGS += -Wl,-sectcreate,__INFO_FILTER,__disable,/dev/null all: $(DSTROOT)/mktimer_test diff --git a/tools/tests/perf_index/Makefile b/tools/tests/perf_index/Makefile index 49aaa5c96..0c0823fae 100644 --- a/tools/tests/perf_index/Makefile +++ b/tools/tests/perf_index/Makefile @@ -1,25 +1,15 @@ include ../Makefile.common CC:=$(shell xcrun -sdk "$(SDKROOT)" -find cc) -CFLAGS:=-c -Wall -pedantic -Os -isysroot $(SDKROOT) -LDFLAGS:= +CFLAGS:=-c -Wall -pedantic -Os -isysroot $(SDKROOT) $(ARCH_FLAGS) +CFLAGS += -Wl,-sectcreate,__INFO_FILTER,__disable,/dev/null +LDFLAGS:= $(ARCH_FLAGS) -isysroot $(SDKROOT) SRCROOT?=$(shell /bin/pwd) DSTROOT?=$(shell /bin/pwd)/BUILD/dst OBJROOT?=$(shell /bin/pwd)/BUILD/obj SYMROOT?=$(shell /bin/pwd)/BUILD/sym -ifdef RC_ARCHS - ARCHS:=$(RC_ARCHS) -else - ifeq ($(ARCHS),) - ifeq "$(Embedded)" "YES" - ARCHS:=armv7 armv7s arm64 -else - ARCHS:=x86_64 i386 -endif -endif -endif ifeq "$(Embedded)" "YES" TARGET_NAME:=PerfIndex.bundle-ios @@ -27,9 +17,6 @@ else TARGET_NAME:=PerfIndex.bundle-osx endif -CFLAGS += $(patsubst %, -arch %, $(ARCHS)) -LDFLAGS += $(patsubst %, -arch %, $(ARCHS)) - all: $(DSTROOT) $(OBJROOT) $(SYMROOT) \ $(DSTROOT)/perf_index \ $(DSTROOT)/perfindex-cpu.dylib \ diff --git a/tools/tests/perf_index/test_controller.py b/tools/tests/perf_index/test_controller.py index 18493c991..73d2a1b34 100755 --- a/tools/tests/perf_index/test_controller.py +++ b/tools/tests/perf_index/test_controller.py @@ -1,5 +1,5 @@ #!/usr/bin/python - +from __future__ import absolute_import, print_function import socket import time import select @@ -27,7 +27,7 @@ def main(num_clients, test_type, num_threads, job_size, args): msg = "\0".join(["%s\0%d\0%d" % (test_type, num_threads, job_size)] + args) + "\0\0" client_socket.send(msg) client_sockets.append(client_socket) - + control_socket.close() waitformsgs(client_sockets, "Ready") @@ -47,7 +47,7 @@ def main(num_clients, test_type, num_threads, job_size, args): return end_time - start_time def usage(): - sys.stderr.write("usage: start_tests.py num_clients type threads size\n") + sys.stderr.write("usage: start_tests.py num_clients type threads size\n") exit(1) if __name__ == "__main__": @@ -62,4 +62,4 @@ if __name__ == "__main__": except ValueError: usage() - print main(num_clients, test_type, num_threads, job_size, args) + print(main(num_clients, test_type, num_threads, job_size, args)) diff --git a/tools/tests/personas/Makefile b/tools/tests/personas/Makefile index ba66220c8..14211dc70 100644 --- a/tools/tests/personas/Makefile +++ b/tools/tests/personas/Makefile @@ -7,28 +7,10 @@ CODESIGN_ALLOCATE:=$(shell xcrun -sdk "$(SDKROOT)" -find codesign_allocate) SYMROOT?=$(shell /bin/pwd) CFLAGS := -g -O2 -isysroot $(SDKROOT) -I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders +CFLAGS += -Wl,-sectcreate,__INFO_FILTER,__disable,/dev/null -ifdef RC_ARCHS - ARCHS:=$(RC_ARCHS) - else - ifeq "$(Embedded)" "YES" - ARCHS:=armv7 armv7s arm64 - else - ARCHS:=x86_64 i386 - endif -endif - -# These are convenience functions for filtering based on substrings, as the -# normal filter functions only accept one wildcard. -FILTER_OUT_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),,$(string)))) -FILTER_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),$(string),))) - -ARCH_32:=$(call FILTER_OUT_SUBSTRING,64,$(ARCHS)) -ARCH_64:=$(call FILTER_SUBSTRING,64,$(ARCHS)) - -ARCH_32_FLAGS := $(patsubst %, -arch %, $(ARCH_32)) -ARCH_64_FLAGS := $(patsubst %, -arch %, $(ARCH_64)) -ARCH_FLAGS := $(if $(ARCH_64), $(ARCH_64_FLAGS)) $(if $(ARCH_32), $(ARCH_32_FLAGS)) +# $(ARCH_CONFIGS) is set my Makefile.common +ARCH_FLAGS := $(if $(ARCH_CONFIGS_64), $(ARCH_FLAGS_64)) $(if $(ARCH_CONFIGS_32), $(ARCH_FLAGS_32)) DSTROOT?=$(shell /bin/pwd) diff --git a/tools/tests/superpages/Makefile b/tools/tests/superpages/Makefile index 4772a03e5..4409c61ba 100644 --- a/tools/tests/superpages/Makefile +++ b/tools/tests/superpages/Makefile @@ -4,17 +4,8 @@ DSTROOT?=$(shell /bin/pwd) TARGETS := $(addprefix $(DSTROOT)/, measure_tlbs testsp) CC:=$(shell xcrun -sdk "$(SDKROOT)" -find cc) -ifdef RC_ARCHS - ARCHS:=$(RC_ARCHS) - else - ifeq "$(Embedded)" "YES" - ARCHS:=armv7 armv7s arm64 - else - ARCHS:=x86_64 i386 - endif -endif - -CFLAGS += $(patsubst %, -arch %, $(ARCHS)) -isysroot $(SDKROOT) +CFLAGS += $(ARCH_FLAGS) -isysroot $(SDKROOT) +CFLAGS += -Wl,-sectcreate,__INFO_FILTER,__disable,/dev/null all: $(TARGETS) diff --git a/tools/tests/testkext/testkext.xcodeproj/project.pbxproj b/tools/tests/testkext/testkext.xcodeproj/project.pbxproj index a89640683..b9ca44fbf 100644 --- a/tools/tests/testkext/testkext.xcodeproj/project.pbxproj +++ b/tools/tests/testkext/testkext.xcodeproj/project.pbxproj @@ -285,7 +285,6 @@ GCC_WARN_UNUSED_FUNCTION = YES; INFOPLIST_FILE = pgokext/Info.plist; INSTALL_PATH = /; - MACOSX_DEPLOYMENT_TARGET = 10.13; MODULE_NAME = com.apple.pgokext; MODULE_START = pgokext_start; MODULE_STOP = pgokext_stop; @@ -333,7 +332,6 @@ GCC_WARN_UNUSED_FUNCTION = YES; INFOPLIST_FILE = pgokext/Info.plist; INSTALL_PATH = /; - MACOSX_DEPLOYMENT_TARGET = 10.13; MODULE_NAME = com.apple.pgokext; MODULE_START = pgokext_start; MODULE_STOP = pgokext_stop; diff --git a/tools/tests/zero-to-n/Makefile b/tools/tests/zero-to-n/Makefile index 63e5484c9..a02474b47 100644 --- a/tools/tests/zero-to-n/Makefile +++ b/tools/tests/zero-to-n/Makefile @@ -2,17 +2,8 @@ include ../Makefile.common CC:=$(shell xcrun -sdk "$(SDKROOT)" -find cc) -ifdef RC_ARCHS - ARCHS:=$(RC_ARCHS) - else - ifeq "$(Embedded)" "YES" - ARCHS:=armv7 armv7s arm64 armv7k - else - ARCHS:=x86_64 i386 - endif -endif - -CFLAGS := -Os -g $(patsubst %, -arch %, $(ARCHS)) -isysroot $(SDKROOT) -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders +CFLAGS := -Os -g $(ARCH_FLAGS) -isysroot $(SDKROOT) -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders +CFLAGS += -Wl,-sectcreate,__INFO_FILTER,__disable,/dev/null DSTROOT?=$(shell /bin/pwd) SYMROOT?=$(shell /bin/pwd) diff --git a/tools/tests/zero-to-n/zero-to-n.c b/tools/tests/zero-to-n/zero-to-n.c index db9ed81c5..e834ccdd0 100644 --- a/tools/tests/zero-to-n/zero-to-n.c +++ b/tools/tests/zero-to-n/zero-to-n.c @@ -56,6 +56,7 @@ #include #include +#include typedef enum wake_type { WAKE_BROADCAST_ONESEM, WAKE_BROADCAST_PERTHREAD, WAKE_CHAIN, WAKE_HOP } wake_type_t; typedef enum my_policy_type { MY_POLICY_REALTIME, MY_POLICY_TIMESHARE, MY_POLICY_FIXEDPRI } my_policy_type_t; @@ -66,6 +67,8 @@ typedef enum my_policy_type { MY_POLICY_REALTIME, MY_POLICY_TIMESHARE, MY_POLICY #define CONSTRAINT_NANOS (20000000ll) /* 20 ms */ #define COMPUTATION_NANOS (10000000ll) /* 10 ms */ +#define LL_CONSTRAINT_NANOS ( 2000000ll) /* 2 ms */ +#define LL_COMPUTATION_NANOS ( 1000000ll) /* 1 ms */ #define RT_CHURN_COMP_NANOS ( 1000000ll) /* 1 ms */ #define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */ #define TRACEWORTHY_NANOS_TEST ( 2000000ll) /* 2 ms */ @@ -111,6 +114,9 @@ static uint32_t g_rt_churn_count = 0; static pthread_t* g_churn_threads = NULL; static pthread_t* g_rt_churn_threads = NULL; +/* should we skip test if run on non-intel */ +static boolean_t g_run_on_intel_only = FALSE; + /* Threshold for dropping a 'bad run' tracepoint */ static uint64_t g_traceworthy_latency_ns = TRACEWORTHY_NANOS; @@ -126,6 +132,9 @@ static boolean_t g_do_all_spin = FALSE; /* Every thread backgrounds temporarily before parking */ static boolean_t g_drop_priority = FALSE; +/* Use low-latency (sub 4ms deadline) realtime threads */ +static boolean_t g_rt_ll = FALSE; + /* Test whether realtime threads are scheduled on the separate CPUs */ static boolean_t g_test_rt = FALSE; @@ -464,8 +473,13 @@ thread_setup(uint32_t my_id) case MY_POLICY_REALTIME: /* Hard-coded realtime parameters (similar to what Digi uses) */ pol.period = 100000; - pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS); - pol.computation = (uint32_t) nanos_to_abs(COMPUTATION_NANOS); + if (g_rt_ll) { + pol.constraint = (uint32_t) nanos_to_abs(LL_CONSTRAINT_NANOS); + pol.computation = (uint32_t) nanos_to_abs(LL_COMPUTATION_NANOS); + } else { + pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS); + pol.computation = (uint32_t) nanos_to_abs(COMPUTATION_NANOS); + } pol.preemptible = 0; /* Ignored by OS */ kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY, @@ -790,6 +804,19 @@ main(int argc, char **argv) mach_timebase_info(&g_mti); +#if TARGET_OS_OSX + /* SKIP test if running on arm platform */ + if (g_run_on_intel_only) { + int is_arm = 0; + size_t is_arm_size = sizeof(is_arm); + ret = sysctlbyname("hw.optional.arm64", &is_arm, &is_arm_size, NULL, 0); + if (ret == 0 && is_arm) { + printf("Unsupported platform. Skipping test.\n"); + exit(0); + } + } +#endif /* TARGET_OS_OSX */ + size_t ncpu_size = sizeof(g_numcpus); ret = sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0); if (ret) { @@ -1223,7 +1250,8 @@ usage() " \n\t\t" "[--trace ] " "[--verbose] [--spin-one] [--spin-all] [--spin-time ] [--affinity]\n\t\t" - "[--no-sleep] [--drop-priority] [--churn-pri ] [--churn-count ]", + "[--no-sleep] [--drop-priority] [--churn-pri ] [--churn-count ]\n\t\t" + "[--rt-churn] [--rt-churn-count ] [--rt-ll] [--test-rt] [--test-rt-smt] [--test-rt-avoid0]", getprogname()); } @@ -1269,6 +1297,7 @@ parse_args(int argc, char *argv[]) { "rt-churn-count", required_argument, NULL, OPT_RT_CHURN_COUNT }, { "switched_apptype", no_argument, (int*)&g_seen_apptype, TRUE }, { "spin-one", no_argument, (int*)&g_do_one_long_spin, TRUE }, + { "intel-only", no_argument, (int*)&g_run_on_intel_only, TRUE }, { "spin-all", no_argument, (int*)&g_do_all_spin, TRUE }, { "affinity", no_argument, (int*)&g_do_affinity, TRUE }, { "no-sleep", no_argument, (int*)&g_do_sleep, FALSE }, @@ -1277,6 +1306,7 @@ parse_args(int argc, char *argv[]) { "test-rt-smt", no_argument, (int*)&g_test_rt_smt, TRUE }, { "test-rt-avoid0", no_argument, (int*)&g_test_rt_avoid0, TRUE }, { "rt-churn", no_argument, (int*)&g_rt_churn, TRUE }, + { "rt-ll", no_argument, (int*)&g_rt_ll, TRUE }, { "histogram", no_argument, (int*)&g_histogram, TRUE }, { "verbose", no_argument, (int*)&g_verbose, TRUE }, { "help", no_argument, NULL, 'h' }, -- 2.45.2